From e1268bc7de8517be7a09ed7ae53c49386f405610 Mon Sep 17 00:00:00 2001 From: Dennis Groenen Date: Wed, 28 Sep 2011 22:29:09 +0200 Subject: [PATCH] kernel-power v49 -> kernel-bfs --- kernel-bfs-2.6.28/debian/fmtxpower-bfs | 7 - .../debian/patches/board-rx51-peripherals.diff | 12 - .../debian/patches/bq27x00-maemo.diff | 90 + kernel-bfs-2.6.28/debian/patches/bq27x00-reg.diff | 205 + .../debian/patches/bq27x00-rx51-board.diff | 12 + .../debian/patches/bq27x00-upstream.diff | 1027 + .../debian/patches/bq27x00_battery.diff | 974 - .../debian/patches/fmtx_lock_power.diff | 80 - .../debian/patches/nokia-20093908+0m5.diff |402313 ++++++++++++++++++++ .../debian/patches/nokia-20094102.3+0m5.diff |210409 ++++++++++ .../debian/patches/nokia-20094102.6+0m5.diff | 1541 + .../debian/patches/nokia-20094803.3+0m5.diff |291981 +------------- .../patches/power-supply-ignore-enodata.diff | 37 + kernel-bfs-2.6.28/debian/patches/radio-si4713.diff | 195 + kernel-bfs-2.6.28/debian/patches/series | 18 +- kernel-bfs-2.6.28/debian/rules | 2 - 16 files changed, 623245 insertions(+), 285658 deletions(-) delete mode 100644 kernel-bfs-2.6.28/debian/fmtxpower-bfs delete mode 100644 kernel-bfs-2.6.28/debian/patches/board-rx51-peripherals.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/bq27x00-maemo.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/bq27x00-reg.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/bq27x00-rx51-board.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/bq27x00-upstream.diff delete mode 100644 kernel-bfs-2.6.28/debian/patches/bq27x00_battery.diff delete mode 100644 kernel-bfs-2.6.28/debian/patches/fmtx_lock_power.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/nokia-20093908+0m5.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/nokia-20094102.3+0m5.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/nokia-20094102.6+0m5.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/power-supply-ignore-enodata.diff create mode 100644 kernel-bfs-2.6.28/debian/patches/radio-si4713.diff diff --git a/kernel-bfs-2.6.28/debian/fmtxpower-bfs b/kernel-bfs-2.6.28/debian/fmtxpower-bfs deleted file mode 100644 index 966ea60..0000000 --- a/kernel-bfs-2.6.28/debian/fmtxpower-bfs +++ /dev/null @@ -1,7 +0,0 @@ -start on started rcS-late - -script - echo 120 > /sys/class/i2c-adapter/i2c-2/2-0063/power_level - echo 1 > /sys/class/i2c-adapter/i2c-2/2-0063/lock - -end script diff --git a/kernel-bfs-2.6.28/debian/patches/board-rx51-peripherals.diff b/kernel-bfs-2.6.28/debian/patches/board-rx51-peripherals.diff deleted file mode 100644 index 92e0619..0000000 --- a/kernel-bfs-2.6.28/debian/patches/board-rx51-peripherals.diff +++ /dev/null @@ -1,12 +0,0 @@ ---- kernel-power-2.6.28/arch/arm/mach-omap2/board-rx51-peripherals.c 2011-01-23 03:53:13.000000000 +0100 -+++ kernel-power-2.6.28/arch/arm/mach-omap2/board-rx51-peripherals.c 2011-01-23 04:02:21.000000000 +0100 -@@ -572,6 +580,9 @@ static struct i2c_board_info __initdata - I2C_BOARD_INFO("tsl2563", 0x29), - .platform_data = &rx51_tsl2563_platform_data, - }, -+ { -+ I2C_BOARD_INFO("bq27200", 0x55), -+ }, - }; - - static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = { diff --git a/kernel-bfs-2.6.28/debian/patches/bq27x00-maemo.diff b/kernel-bfs-2.6.28/debian/patches/bq27x00-maemo.diff new file mode 100644 index 0000000..5d6f5dc --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/bq27x00-maemo.diff @@ -0,0 +1,90 @@ +--- kernel-power-2.6.28/drivers/power/bq27x00_battery.c 2011-09-21 13:16:45.007978008 +0200 ++++ kernel-power-2.6.28/drivers/power/bq27x00_battery.c 2011-09-21 13:49:06.207956191 +0200 +@@ -36,7 +36,12 @@ + #include + #include + +-#include ++#define CONFIG_BATTERY_BQ27X00_I2C ++ ++struct bq27000_platform_data { ++ const char *name; ++ int (*read)(struct device *dev, unsigned int); ++}; + + #define DRIVER_VERSION "1.2.0" + +@@ -113,7 +118,9 @@ static enum power_supply_property bq27x0 + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, ++/* + POWER_SUPPLY_PROP_CAPACITY_LEVEL, ++*/ + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, +@@ -122,8 +129,10 @@ static enum power_supply_property bq27x0 + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, ++/* + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_ENERGY_NOW, ++*/ + }; + + static unsigned int poll_interval = 360; +@@ -353,7 +362,9 @@ static void bq27x00_battery_poll(struct + + if (poll_interval > 0) { + /* The timer does not have to be accurate. */ ++ /* + set_timer_slack(&di->work.timer, poll_interval * HZ / 4); ++ */ + schedule_delayed_work(&di->work, poll_interval * HZ); + } + } +@@ -423,6 +434,7 @@ static int bq27x00_battery_status(struct + return 0; + } + ++/* + static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di, + union power_supply_propval *val) + { +@@ -452,6 +464,7 @@ static int bq27x00_battery_capacity_leve + + return 0; + } ++*/ + + /* + * Return the battery Voltage in milivolts +@@ -520,9 +533,11 @@ static int bq27x00_battery_get_property( + case POWER_SUPPLY_PROP_CAPACITY: + ret = bq27x00_simple_value(di->cache.capacity, val); + break; ++/* + case POWER_SUPPLY_PROP_CAPACITY_LEVEL: + ret = bq27x00_battery_capacity_level(di, val); + break; ++*/ + case POWER_SUPPLY_PROP_TEMP: + ret = bq27x00_simple_value(di->cache.temperature, val); + break; +@@ -547,12 +562,14 @@ static int bq27x00_battery_get_property( + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + ret = bq27x00_simple_value(di->charge_design_full, val); + break; ++/* + case POWER_SUPPLY_PROP_CYCLE_COUNT: + ret = bq27x00_simple_value(di->cache.cycle_count, val); + break; + case POWER_SUPPLY_PROP_ENERGY_NOW: + ret = bq27x00_simple_value(di->cache.energy, val); + break; ++*/ + default: + return -EINVAL; + } diff --git a/kernel-bfs-2.6.28/debian/patches/bq27x00-reg.diff b/kernel-bfs-2.6.28/debian/patches/bq27x00-reg.diff new file mode 100644 index 0000000..11a9950 --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/bq27x00-reg.diff @@ -0,0 +1,205 @@ +diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c +index 46db966..6a6224e 100644 +--- a/drivers/power/bq27x00_battery.c ++++ b/drivers/power/bq27x00_battery.c +@@ -29,12 +29,15 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include + #include + #include + #include ++#include + + #include + +@@ -89,8 +92,34 @@ struct bq27x00_reg_cache { + int flags; + }; + ++#define BQ27X00_READ_REG _IO(MISC_MAJOR, 0) ++ ++/** ++ * struct bq27x00_reg_parms - User data for ioctl call BQ27X00_READ_REG ++ * @reg: Battery register ++ * @single: 1 if register is 8bit, 0 if 16bit ++ * @ret: value of register reg ++ * Ioctl call BQ27X00_READ_REG can be used to read battery register. ++ * If bq27x00_battery is loaded, it is not possible to use i2c-get ++ * to get status of battery registers, so this ioctl can be used. ++ */ ++struct bq27x00_reg_parms { ++ int reg; ++ int single; ++ int ret; ++}; ++ ++struct bq27x00_reg_device { ++ struct miscdevice miscdev; ++ struct bq27x00_device_info *di; ++ struct bq27x00_reg_device *next, *prev; ++}; ++ ++static struct bq27x00_reg_device *bq27x00_reg_devices = NULL; ++ + struct bq27x00_device_info { + struct device *dev; ++ struct bq27x00_reg_device *regdev; + int id; + enum bq27x00_chip chip; + +@@ -141,6 +153,109 @@ static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg, + return di->bus.read(di, reg, single); + } + ++/* Code for register device access */ ++ ++static long bq27x00_battery_reg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ int ret; ++ int minor = iminor(filp->f_dentry->d_inode); ++ struct bq27x00_reg_parms param; ++ struct bq27x00_reg_device *regdev = bq27x00_reg_devices; ++ ++ while (regdev) { ++ if (regdev->miscdev.minor == minor) ++ break; ++ regdev = regdev->next; ++ } ++ ++ if (!regdev) ++ return -ENXIO; ++ ++ if (cmd != BQ27X00_READ_REG) ++ return -EINVAL; ++ ++ ret = copy_from_user(¶m, (void __user *)arg, sizeof(param)); ++ if (ret != 0) ++ return -EACCES; ++ ++ param.ret = bq27x00_read(regdev->di, param.reg, param.single); ++ ++ ret = copy_to_user((void __user *)arg, ¶m, sizeof(param)); ++ if (ret != 0) ++ return -EACCES; ++ ++ return 0; ++} ++ ++static int bq27x00_battery_reg_open(struct inode *inode, struct file *file) ++{ ++ if (!try_module_get(THIS_MODULE)) ++ return -EPERM; ++ ++ return 0; ++} ++ ++static int bq27x00_battery_reg_release(struct inode *inode, struct file *file) ++{ ++ module_put(THIS_MODULE); ++ return 0; ++} ++ ++static struct file_operations bq27x00_reg_fileops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = bq27x00_battery_reg_ioctl, ++ .open = bq27x00_battery_reg_open, ++ .release = bq27x00_battery_reg_release, ++}; ++ ++static int bq27x00_battery_reg_init(struct bq27x00_device_info *di) ++{ ++ struct bq27x00_reg_device *regdev; ++ int ret; ++ ++ regdev = kzalloc(sizeof *regdev, GFP_KERNEL); ++ if (!regdev) ++ return -ENOMEM; ++ ++ regdev->miscdev.minor = MISC_DYNAMIC_MINOR; ++ regdev->miscdev.name = di->bat.name; ++ regdev->miscdev.fops = &bq27x00_reg_fileops; ++ regdev->di = di; ++ ++ ret = misc_register(®dev->miscdev); ++ if (ret != 0) { ++ kfree(regdev); ++ return ret; ++ } ++ ++ di->regdev = regdev; ++ ++ if (bq27x00_reg_devices) ++ bq27x00_reg_devices->prev = regdev; ++ ++ regdev->prev = NULL; ++ regdev->next = bq27x00_reg_devices; ++ bq27x00_reg_devices = regdev; ++ ++ return 0; ++} ++ ++static void bq27x00_battery_reg_exit(struct bq27x00_device_info *di) ++{ ++ misc_deregister(&di->regdev->miscdev); ++ ++ if (di->regdev->next) ++ di->regdev->next->prev = di->regdev->prev; ++ ++ if (di->regdev->prev) ++ di->regdev->prev->next = di->regdev->next; ++ ++ if (di->regdev == bq27x00_reg_devices) ++ bq27x00_reg_devices = NULL; ++ ++ kfree(di->regdev); ++} ++ + /* + * Return the battery Relative State-of-Charge + * Or < 0 if something fails. +@@ -839,6 +839,9 @@ static int bq27x00_battery_probe(struct + + i2c_set_clientdata(client, di); + ++ if (bq27x00_battery_reg_init(di)) ++ di->regdev = NULL; ++ + return 0; + + batt_failed_3: +@@ -859,6 +862,9 @@ static int bq27x00_battery_remove(struct + + bq27x00_powersupply_unregister(di); + ++ if (di->regdev) ++ bq27x00_battery_reg_exit(di); ++ + kfree(di->bat.name); + + mutex_lock(&battery_mutex); +@@ -829,6 +944,9 @@ static int __devinit bq27000_battery_probe(struct platform_device *pdev) + if (ret) + goto err_free; + ++ if (bq27x00_battery_reg_init(di)) ++ di->regdev = NULL; ++ + return 0; + + err_free: +@@ -844,6 +962,9 @@ static int __devexit bq27000_battery_remove(struct platform_device *pdev) + + bq27x00_powersupply_unregister(di); + ++ if (di->regdev) ++ bq27x00_battery_reg_exit(di); ++ + platform_set_drvdata(pdev, NULL); + kfree(di); + diff --git a/kernel-bfs-2.6.28/debian/patches/bq27x00-rx51-board.diff b/kernel-bfs-2.6.28/debian/patches/bq27x00-rx51-board.diff new file mode 100644 index 0000000..92e0619 --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/bq27x00-rx51-board.diff @@ -0,0 +1,12 @@ +--- kernel-power-2.6.28/arch/arm/mach-omap2/board-rx51-peripherals.c 2011-01-23 03:53:13.000000000 +0100 ++++ kernel-power-2.6.28/arch/arm/mach-omap2/board-rx51-peripherals.c 2011-01-23 04:02:21.000000000 +0100 +@@ -572,6 +580,9 @@ static struct i2c_board_info __initdata + I2C_BOARD_INFO("tsl2563", 0x29), + .platform_data = &rx51_tsl2563_platform_data, + }, ++ { ++ I2C_BOARD_INFO("bq27200", 0x55), ++ }, + }; + + static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = { diff --git a/kernel-bfs-2.6.28/debian/patches/bq27x00-upstream.diff b/kernel-bfs-2.6.28/debian/patches/bq27x00-upstream.diff new file mode 100644 index 0000000..abe9637 --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/bq27x00-upstream.diff @@ -0,0 +1,1027 @@ +--- kernel-power-2.6.28/drivers/power/bq27x00_battery.c 2011-09-15 00:34:44.600070307 +0200 ++++ kernel-power-2.6.28/drivers/power/bq27x00_battery.c 2011-09-21 13:16:45.007978008 +0200 +@@ -3,6 +3,8 @@ + * + * Copyright (C) 2008 Rodolfo Giometti + * Copyright (C) 2008 Eurotech S.p.A. ++ * Copyright (C) 2010-2011 Lars-Peter Clausen ++ * Copyright (C) 2011 Pali Rohár + * + * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. + * +@@ -15,6 +17,13 @@ + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + */ ++ ++/* ++ * Datasheets: ++ * http://focus.ti.com/docs/prod/folders/print/bq27000.html ++ * http://focus.ti.com/docs/prod/folders/print/bq27500.html ++ */ ++ + #include + #include + #include +@@ -24,144 +33,455 @@ + #include + #include + #include ++#include + #include + +-#define DRIVER_VERSION "1.0.0" ++#include ++ ++#define DRIVER_VERSION "1.2.0" + + #define BQ27x00_REG_TEMP 0x06 + #define BQ27x00_REG_VOLT 0x08 +-#define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */ + #define BQ27x00_REG_AI 0x14 + #define BQ27x00_REG_FLAGS 0x0A ++#define BQ27x00_REG_TTE 0x16 ++#define BQ27x00_REG_TTF 0x18 ++#define BQ27x00_REG_TTECP 0x26 ++#define BQ27x00_REG_NAC 0x0C /* Nominal available capaciy */ ++#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */ ++#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */ ++#define BQ27x00_REG_AE 0x22 /* Available enery */ ++ ++#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */ ++#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */ ++#define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */ ++#define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */ ++#define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */ ++#define BQ27000_FLAG_FC BIT(5) ++#define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */ ++ ++#define BQ27500_REG_SOC 0x2C ++#define BQ27500_REG_DCAP 0x3C /* Design capacity */ ++#define BQ27500_FLAG_DSG BIT(0) /* Discharging */ ++#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */ ++#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */ ++#define BQ27500_FLAG_CHG BIT(8) /* Charging */ ++#define BQ27500_FLAG_FC BIT(9) /* Fully charged */ + +-/* If the system has several batteries we need a different name for each +- * of them... +- */ +-static DEFINE_IDR(battery_id); +-static DEFINE_MUTEX(battery_mutex); ++#define BQ27000_RS 20 /* Resistor sense */ + + struct bq27x00_device_info; + struct bq27x00_access_methods { +- int (*read)(u8 reg, int *rt_value, int b_single, +- struct bq27x00_device_info *di); ++ int (*read)(struct bq27x00_device_info *di, u8 reg, bool single); ++}; ++ ++enum bq27x00_chip { BQ27000, BQ27500 }; ++ ++struct bq27x00_reg_cache { ++ int temperature; ++ int time_to_empty; ++ int time_to_empty_avg; ++ int time_to_full; ++ int charge_full; ++ int cycle_count; ++ int capacity; ++ int energy; ++ int flags; + }; + + struct bq27x00_device_info { + struct device *dev; + int id; +- int voltage_uV; +- int current_uA; +- int temp_C; +- int charge_rsoc; +- struct bq27x00_access_methods *bus; ++ enum bq27x00_chip chip; ++ ++ struct bq27x00_reg_cache cache; ++ int charge_design_full; ++ ++ unsigned long last_update; ++ struct delayed_work work; ++ + struct power_supply bat; + +- struct i2c_client *client; ++ struct bq27x00_access_methods bus; ++ ++ struct mutex lock; + }; + + static enum power_supply_property bq27x00_battery_props[] = { ++ POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, ++ POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, ++ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, ++ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, ++ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, ++ POWER_SUPPLY_PROP_TECHNOLOGY, ++ POWER_SUPPLY_PROP_CHARGE_FULL, ++ POWER_SUPPLY_PROP_CHARGE_NOW, ++ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, ++ POWER_SUPPLY_PROP_CYCLE_COUNT, ++ POWER_SUPPLY_PROP_ENERGY_NOW, + }; + ++static unsigned int poll_interval = 360; ++module_param(poll_interval, uint, 0644); ++MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \ ++ "0 disables polling"); ++ + /* + * Common code for BQ27x00 devices + */ + +-static int bq27x00_read(u8 reg, int *rt_value, int b_single, +- struct bq27x00_device_info *di) ++static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg, ++ bool single) + { +- int ret; ++ return di->bus.read(di, reg, single); ++} + +- ret = di->bus->read(reg, rt_value, b_single, di); +- *rt_value = be16_to_cpu(*rt_value); ++/* ++ * Return the battery Relative State-of-Charge ++ * Or < 0 if something fails. ++ */ ++static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di) ++{ ++ int rsoc; + +- return ret; ++ if (di->chip == BQ27500) ++ rsoc = bq27x00_read(di, BQ27500_REG_SOC, false); ++ else ++ rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true); ++ ++ if (rsoc < 0) ++ dev_err(di->dev, "error reading relative State-of-Charge\n"); ++ ++ return rsoc; + } + + /* +- * Return the battery temperature in Celcius degrees ++ * Return a battery charge value in µAh + * Or < 0 if something fails. + */ +-static int bq27x00_battery_temperature(struct bq27x00_device_info *di) ++static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg) + { +- int ret; +- int temp = 0; ++ int charge; + +- ret = bq27x00_read(BQ27x00_REG_TEMP, &temp, 0, di); +- if (ret) { ++ charge = bq27x00_read(di, reg, false); ++ if (charge < 0) { ++ dev_err(di->dev, "error reading charge register %02x: %d\n", reg, charge); ++ return charge; ++ } ++ ++ if (di->chip == BQ27500) ++ charge *= 1000; ++ else ++ charge = charge * 3570 / BQ27000_RS; ++ ++ return charge; ++} ++ ++/* ++ * Return the battery Nominal available capaciy in µAh ++ * Or < 0 if something fails. ++ */ ++static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di) ++{ ++ return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC); ++} ++ ++/* ++ * Return the battery Last measured discharge in µAh ++ * Or < 0 if something fails. ++ */ ++static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di) ++{ ++ return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD); ++} ++ ++/* ++ * Return the battery Initial last measured discharge in µAh ++ * Or < 0 if something fails. ++ */ ++static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di) ++{ ++ int ilmd; ++ ++ if (di->chip == BQ27500) ++ ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false); ++ else ++ ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true); ++ ++ if (ilmd < 0) { ++ dev_err(di->dev, "error reading initial last measured discharge\n"); ++ return ilmd; ++ } ++ ++ if (di->chip == BQ27500) ++ ilmd *= 1000; ++ else ++ ilmd = ilmd * 256 * 3570 / BQ27000_RS; ++ ++ return ilmd; ++} ++ ++/* ++ * Return the battery Available energy in µWh ++ * Or < 0 if something fails. ++ */ ++static int bq27x00_battery_read_energy(struct bq27x00_device_info *di) ++{ ++ int ae; ++ ++ ae = bq27x00_read(di, BQ27x00_REG_AE, false); ++ if (ae < 0) { ++ dev_err(di->dev, "error reading available energy\n"); ++ return ae; ++ } ++ ++ if (di->chip == BQ27500) ++ ae *= 1000; ++ else ++ ae = ae * 29200 / BQ27000_RS; ++ ++ return ae; ++} ++ ++/* ++ * Return the battery temperature in tenths of degree Celsius ++ * Or < 0 if something fails. ++ */ ++static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di) ++{ ++ int temp; ++ ++ temp = bq27x00_read(di, BQ27x00_REG_TEMP, false); ++ if (temp < 0) { + dev_err(di->dev, "error reading temperature\n"); +- return ret; ++ return temp; + } + +- return (temp >> 2) - 273; ++ if (di->chip == BQ27500) ++ temp -= 2731; ++ else ++ temp = ((temp * 5) - 5463) / 2; ++ ++ return temp; + } + + /* +- * Return the battery Voltage in milivolts ++ * Return the battery Cycle count total + * Or < 0 if something fails. + */ +-static int bq27x00_battery_voltage(struct bq27x00_device_info *di) ++static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di) + { +- int ret; +- int volt = 0; ++ int cyct; + +- ret = bq27x00_read(BQ27x00_REG_VOLT, &volt, 0, di); +- if (ret) { +- dev_err(di->dev, "error reading voltage\n"); +- return ret; ++ cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false); ++ if (cyct < 0) ++ dev_err(di->dev, "error reading cycle count total\n"); ++ ++ return cyct; ++} ++ ++/* ++ * Read a time register. ++ * Return < 0 if something fails. ++ */ ++static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg) ++{ ++ int tval; ++ ++ tval = bq27x00_read(di, reg, false); ++ if (tval < 0) { ++ dev_err(di->dev, "error reading time register %02x: %d\n", reg, tval); ++ return tval; ++ } ++ ++ if (tval == 65535) ++ return -ENODATA; ++ ++ return tval * 60; ++} ++ ++static void bq27x00_update(struct bq27x00_device_info *di) ++{ ++ struct bq27x00_reg_cache cache = {0, }; ++ bool is_bq27500 = di->chip == BQ27500; ++ ++ cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500); ++ if (cache.flags >= 0) { ++ if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) { ++ dev_err(di->dev, "battery is not calibrated! ignoring capacity values\n"); ++ cache.capacity = -ENODATA; ++ cache.energy = -ENODATA; ++ cache.time_to_empty = -ENODATA; ++ cache.time_to_empty_avg = -ENODATA; ++ cache.time_to_full = -ENODATA; ++ cache.charge_full = -ENODATA; ++ } else { ++ cache.capacity = bq27x00_battery_read_rsoc(di); ++ cache.energy = bq27x00_battery_read_energy(di); ++ cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE); ++ cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); ++ cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); ++ cache.charge_full = bq27x00_battery_read_lmd(di); ++ } ++ cache.temperature = bq27x00_battery_read_temperature(di); ++ cache.cycle_count = bq27x00_battery_read_cyct(di); ++ ++ /* We only have to read charge design full once */ ++ if (di->charge_design_full <= 0) ++ di->charge_design_full = bq27x00_battery_read_ilmd(di); ++ } ++ ++ if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) { ++ di->cache = cache; ++ power_supply_changed(&di->bat); + } + +- return volt; ++ di->last_update = jiffies; ++} ++ ++static void bq27x00_battery_poll(struct work_struct *work) ++{ ++ struct bq27x00_device_info *di = ++ container_of(work, struct bq27x00_device_info, work.work); ++ ++ bq27x00_update(di); ++ ++ if (poll_interval > 0) { ++ /* The timer does not have to be accurate. */ ++ set_timer_slack(&di->work.timer, poll_interval * HZ / 4); ++ schedule_delayed_work(&di->work, poll_interval * HZ); ++ } + } + + /* +- * Return the battery average current ++ * Return the battery average current in µA + * Note that current can be negative signed as well + * Or 0 if something fails. + */ +-static int bq27x00_battery_current(struct bq27x00_device_info *di) ++static int bq27x00_battery_current(struct bq27x00_device_info *di, ++ union power_supply_propval *val) + { +- int ret; +- int curr = 0; +- int flags = 0; ++ int curr; ++ int flags; + +- ret = bq27x00_read(BQ27x00_REG_AI, &curr, 0, di); +- if (ret) { +- dev_err(di->dev, "error reading current\n"); +- return 0; ++ curr = bq27x00_read(di, BQ27x00_REG_AI, false); ++ if (curr < 0) { ++ dev_err(di->dev, "error reading current"); ++ return curr; ++ } ++ ++ if (di->chip == BQ27500) { ++ /* bq27500 returns signed value */ ++ val->intval = (int)((s16)curr) * 1000; ++ } else { ++ flags = bq27x00_read(di, BQ27x00_REG_FLAGS, false); ++ if (flags & BQ27000_FLAG_CHGS) { ++ dev_dbg(di->dev, "negative current!\n"); ++ curr = -curr; ++ } ++ ++ val->intval = curr * 3570 / BQ27000_RS; ++ } ++ ++ return 0; ++} ++ ++static int bq27x00_battery_status(struct bq27x00_device_info *di, ++ union power_supply_propval *val) ++{ ++ int status; ++ ++ if (di->chip == BQ27500) { ++ if (di->cache.flags & BQ27500_FLAG_FC) ++ status = POWER_SUPPLY_STATUS_FULL; ++ else if (di->cache.flags & BQ27500_FLAG_DSG) ++ status = POWER_SUPPLY_STATUS_DISCHARGING; ++ else if (di->cache.flags & BQ27500_FLAG_CHG) ++ status = POWER_SUPPLY_STATUS_CHARGING; ++ else if (power_supply_am_i_supplied(&di->bat)) ++ status = POWER_SUPPLY_STATUS_NOT_CHARGING; ++ else ++ status = POWER_SUPPLY_STATUS_UNKNOWN; ++ } else { ++ if (di->cache.flags & BQ27000_FLAG_FC) ++ status = POWER_SUPPLY_STATUS_FULL; ++ else if (di->cache.flags & BQ27000_FLAG_CHGS) ++ status = POWER_SUPPLY_STATUS_CHARGING; ++ else if (power_supply_am_i_supplied(&di->bat)) ++ status = POWER_SUPPLY_STATUS_NOT_CHARGING; ++ else ++ status = POWER_SUPPLY_STATUS_DISCHARGING; + } +- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); +- if (ret < 0) { +- dev_err(di->dev, "error reading flags\n"); +- return 0; +- } +- if ((flags & (1 << 7)) != 0) { +- dev_dbg(di->dev, "negative current!\n"); +- return -curr; ++ ++ val->intval = status; ++ ++ return 0; ++} ++ ++static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di, ++ union power_supply_propval *val) ++{ ++ int level; ++ ++ if (di->chip == BQ27500) { ++ if (di->cache.flags & BQ27500_FLAG_FC) ++ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; ++ else if (di->cache.flags & BQ27500_FLAG_SOC1) ++ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; ++ else if (di->cache.flags & BQ27500_FLAG_SOCF) ++ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; ++ else ++ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; ++ } else { ++ if (di->cache.flags & BQ27000_FLAG_FC) ++ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; ++ else if (di->cache.flags & BQ27000_FLAG_EDV1) ++ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; ++ else if (di->cache.flags & BQ27000_FLAG_EDVF) ++ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; ++ else ++ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + } +- return curr; ++ ++ val->intval = level; ++ ++ return 0; + } + + /* +- * Return the battery Relative State-of-Charge ++ * Return the battery Voltage in milivolts + * Or < 0 if something fails. + */ +-static int bq27x00_battery_rsoc(struct bq27x00_device_info *di) ++static int bq27x00_battery_voltage(struct bq27x00_device_info *di, ++ union power_supply_propval *val) + { +- int ret; +- int rsoc = 0; ++ int volt; + +- ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di); +- if (ret) { +- dev_err(di->dev, "error reading relative State-of-Charge\n"); +- return ret; ++ volt = bq27x00_read(di, BQ27x00_REG_VOLT, false); ++ if (volt < 0) { ++ dev_err(di->dev, "error reading voltage\n"); ++ return volt; + } + +- return rsoc >> 8; ++ val->intval = volt * 1000; ++ ++ return 0; ++} ++ ++static int bq27x00_simple_value(int value, ++ union power_supply_propval *val) ++{ ++ if (value < 0) ++ return value; ++ ++ val->intval = value; ++ ++ return 0; + } + + #define to_bq27x00_device_info(x) container_of((x), \ +@@ -171,89 +491,167 @@ static int bq27x00_battery_get_property( + enum power_supply_property psp, + union power_supply_propval *val) + { ++ int ret = 0; + struct bq27x00_device_info *di = to_bq27x00_device_info(psy); + ++ mutex_lock(&di->lock); ++ if (time_is_before_jiffies(di->last_update + 5 * HZ)) { ++ cancel_delayed_work_sync(&di->work); ++ bq27x00_battery_poll(&di->work.work); ++ } ++ mutex_unlock(&di->lock); ++ ++ if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) ++ return -ENODEV; ++ + switch (psp) { ++ case POWER_SUPPLY_PROP_STATUS: ++ ret = bq27x00_battery_status(di, val); ++ break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: ++ ret = bq27x00_battery_voltage(di, val); ++ break; + case POWER_SUPPLY_PROP_PRESENT: +- val->intval = bq27x00_battery_voltage(di); +- if (psp == POWER_SUPPLY_PROP_PRESENT) +- val->intval = val->intval <= 0 ? 0 : 1; ++ val->intval = di->cache.flags < 0 ? 0 : 1; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: +- val->intval = bq27x00_battery_current(di); ++ ret = bq27x00_battery_current(di, val); + break; + case POWER_SUPPLY_PROP_CAPACITY: +- val->intval = bq27x00_battery_rsoc(di); ++ ret = bq27x00_simple_value(di->cache.capacity, val); ++ break; ++ case POWER_SUPPLY_PROP_CAPACITY_LEVEL: ++ ret = bq27x00_battery_capacity_level(di, val); + break; + case POWER_SUPPLY_PROP_TEMP: +- val->intval = bq27x00_battery_temperature(di); ++ ret = bq27x00_simple_value(di->cache.temperature, val); ++ break; ++ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: ++ ret = bq27x00_simple_value(di->cache.time_to_empty, val); ++ break; ++ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: ++ ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val); ++ break; ++ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: ++ ret = bq27x00_simple_value(di->cache.time_to_full, val); ++ break; ++ case POWER_SUPPLY_PROP_TECHNOLOGY: ++ val->intval = POWER_SUPPLY_TECHNOLOGY_LION; ++ break; ++ case POWER_SUPPLY_PROP_CHARGE_NOW: ++ ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val); ++ break; ++ case POWER_SUPPLY_PROP_CHARGE_FULL: ++ ret = bq27x00_simple_value(di->cache.charge_full, val); ++ break; ++ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: ++ ret = bq27x00_simple_value(di->charge_design_full, val); ++ break; ++ case POWER_SUPPLY_PROP_CYCLE_COUNT: ++ ret = bq27x00_simple_value(di->cache.cycle_count, val); ++ break; ++ case POWER_SUPPLY_PROP_ENERGY_NOW: ++ ret = bq27x00_simple_value(di->cache.energy, val); + break; + default: + return -EINVAL; + } + +- return 0; ++ return ret; + } + +-static void bq27x00_powersupply_init(struct bq27x00_device_info *di) ++static void bq27x00_external_power_changed(struct power_supply *psy) + { ++ struct bq27x00_device_info *di = to_bq27x00_device_info(psy); ++ ++ cancel_delayed_work_sync(&di->work); ++ schedule_delayed_work(&di->work, 0); ++} ++ ++static int bq27x00_powersupply_init(struct bq27x00_device_info *di) ++{ ++ int ret; ++ + di->bat.type = POWER_SUPPLY_TYPE_BATTERY; + di->bat.properties = bq27x00_battery_props; + di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props); + di->bat.get_property = bq27x00_battery_get_property; +- di->bat.external_power_changed = NULL; ++ di->bat.external_power_changed = bq27x00_external_power_changed; ++ ++ INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll); ++ mutex_init(&di->lock); ++ ++ ret = power_supply_register(di->dev, &di->bat); ++ if (ret) { ++ dev_err(di->dev, "failed to register battery: %d\n", ret); ++ return ret; ++ } ++ ++ dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION); ++ ++ bq27x00_update(di); ++ ++ return 0; + } + +-/* +- * BQ27200 specific code ++static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di) ++{ ++ cancel_delayed_work_sync(&di->work); ++ ++ power_supply_unregister(&di->bat); ++ ++ mutex_destroy(&di->lock); ++} ++ ++ ++/* i2c specific code */ ++#ifdef CONFIG_BATTERY_BQ27X00_I2C ++ ++/* If the system has several batteries we need a different name for each ++ * of them... + */ ++static DEFINE_IDR(battery_id); ++static DEFINE_MUTEX(battery_mutex); + +-static int bq27200_read(u8 reg, int *rt_value, int b_single, +- struct bq27x00_device_info *di) ++static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single) + { +- struct i2c_client *client = di->client; +- struct i2c_msg msg[1]; ++ struct i2c_client *client = to_i2c_client(di->dev); ++ struct i2c_msg msg[2]; + unsigned char data[2]; +- int err; ++ int ret; + + if (!client->adapter) + return -ENODEV; + +- msg->addr = client->addr; +- msg->flags = 0; +- msg->len = 1; +- msg->buf = data; +- +- data[0] = reg; +- err = i2c_transfer(client->adapter, msg, 1); +- +- if (err >= 0) { +- if (!b_single) +- msg->len = 2; +- else +- msg->len = 1; ++ msg[0].addr = client->addr; ++ msg[0].flags = 0; ++ msg[0].buf = ® ++ msg[0].len = sizeof(reg); ++ msg[1].addr = client->addr; ++ msg[1].flags = I2C_M_RD; ++ msg[1].buf = data; ++ if (single) ++ msg[1].len = 1; ++ else ++ msg[1].len = 2; + +- msg->flags = I2C_M_RD; +- err = i2c_transfer(client->adapter, msg, 1); +- if (err >= 0) { +- if (!b_single) +- *rt_value = get_unaligned_be16(data); +- else +- *rt_value = data[0]; ++ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); ++ if (ret < 0) ++ return ret; + +- return 0; +- } +- } +- return err; ++ if (!single) ++ ret = get_unaligned_le16(data); ++ else ++ ret = data[0]; ++ ++ return ret; + } + +-static int bq27200_battery_probe(struct i2c_client *client, ++static int bq27x00_battery_probe(struct i2c_client *client, + const struct i2c_device_id *id) + { + char *name; + struct bq27x00_device_info *di; +- struct bq27x00_access_methods *bus; + int num; + int retval = 0; + +@@ -267,7 +665,7 @@ static int bq27200_battery_probe(struct + if (retval < 0) + return retval; + +- name = kasprintf(GFP_KERNEL, "bq27200-%d", num); ++ name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); + if (!name) { + dev_err(&client->dev, "failed to allocate device name\n"); + retval = -ENOMEM; +@@ -280,37 +678,20 @@ static int bq27200_battery_probe(struct + retval = -ENOMEM; + goto batt_failed_2; + } +- di->id = num; + +- bus = kzalloc(sizeof(*bus), GFP_KERNEL); +- if (!bus) { +- dev_err(&client->dev, "failed to allocate access method " +- "data\n"); +- retval = -ENOMEM; +- goto batt_failed_3; +- } +- +- i2c_set_clientdata(client, di); ++ di->id = num; + di->dev = &client->dev; ++ di->chip = id->driver_data; + di->bat.name = name; +- bus->read = &bq27200_read; +- di->bus = bus; +- di->client = client; ++ di->bus.read = &bq27x00_read_i2c; + +- bq27x00_powersupply_init(di); +- +- retval = power_supply_register(&client->dev, &di->bat); +- if (retval) { +- dev_err(&client->dev, "failed to register battery\n"); +- goto batt_failed_4; +- } ++ if (bq27x00_powersupply_init(di)) ++ goto batt_failed_3; + +- dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); ++ i2c_set_clientdata(client, di); + + return 0; + +-batt_failed_4: +- kfree(bus); + batt_failed_3: + kfree(di); + batt_failed_2: +@@ -323,11 +704,11 @@ batt_failed_1: + return retval; + } + +-static int bq27200_battery_remove(struct i2c_client *client) ++static int bq27x00_battery_remove(struct i2c_client *client) + { + struct bq27x00_device_info *di = i2c_get_clientdata(client); + +- power_supply_unregister(&di->bat); ++ bq27x00_powersupply_unregister(di); + + kfree(di->bat.name); + +@@ -340,31 +721,180 @@ static int bq27200_battery_remove(struct + return 0; + } + +-/* +- * Module stuff +- */ +- +-static const struct i2c_device_id bq27200_id[] = { +- { "bq27200", 0 }, ++static const struct i2c_device_id bq27x00_id[] = { ++ { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */ ++ { "bq27500", BQ27500 }, + {}, + }; ++MODULE_DEVICE_TABLE(i2c, bq27x00_id); ++ ++static struct i2c_driver bq27x00_battery_driver = { ++ .driver = { ++ .name = "bq27x00-battery", ++ }, ++ .probe = bq27x00_battery_probe, ++ .remove = bq27x00_battery_remove, ++ .id_table = bq27x00_id, ++}; ++ ++static inline int bq27x00_battery_i2c_init(void) ++{ ++ int ret = i2c_add_driver(&bq27x00_battery_driver); ++ if (ret) ++ printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n"); ++ ++ return ret; ++} ++ ++static inline void bq27x00_battery_i2c_exit(void) ++{ ++ i2c_del_driver(&bq27x00_battery_driver); ++} ++ ++#else ++ ++static inline int bq27x00_battery_i2c_init(void) { return 0; } ++static inline void bq27x00_battery_i2c_exit(void) {}; ++ ++#endif ++ ++/* platform specific code */ ++#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM ++ ++static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg, ++ bool single) ++{ ++ struct device *dev = di->dev; ++ struct bq27000_platform_data *pdata = dev->platform_data; ++ unsigned int timeout = 3; ++ int upper, lower; ++ int temp; ++ ++ if (!single) { ++ /* Make sure the value has not changed in between reading the ++ * lower and the upper part */ ++ upper = pdata->read(dev, reg + 1); ++ do { ++ temp = upper; ++ if (upper < 0) ++ return upper; ++ ++ lower = pdata->read(dev, reg); ++ if (lower < 0) ++ return lower; ++ ++ upper = pdata->read(dev, reg + 1); ++ } while (temp != upper && --timeout); ++ ++ if (timeout == 0) ++ return -EIO; ++ ++ return (upper << 8) | lower; ++ } ++ ++ return pdata->read(dev, reg); ++} ++ ++static int __devinit bq27000_battery_probe(struct platform_device *pdev) ++{ ++ struct bq27x00_device_info *di; ++ struct bq27000_platform_data *pdata = pdev->dev.platform_data; ++ int ret; ++ ++ if (!pdata) { ++ dev_err(&pdev->dev, "no platform_data supplied\n"); ++ return -EINVAL; ++ } ++ ++ if (!pdata->read) { ++ dev_err(&pdev->dev, "no hdq read callback supplied\n"); ++ return -EINVAL; ++ } ++ ++ di = kzalloc(sizeof(*di), GFP_KERNEL); ++ if (!di) { ++ dev_err(&pdev->dev, "failed to allocate device info data\n"); ++ return -ENOMEM; ++ } ++ ++ platform_set_drvdata(pdev, di); ++ ++ di->dev = &pdev->dev; ++ di->chip = BQ27000; ++ ++ di->bat.name = pdata->name ?: dev_name(&pdev->dev); ++ di->bus.read = &bq27000_read_platform; + +-static struct i2c_driver bq27200_battery_driver = { ++ ret = bq27x00_powersupply_init(di); ++ if (ret) ++ goto err_free; ++ ++ return 0; ++ ++err_free: ++ platform_set_drvdata(pdev, NULL); ++ kfree(di); ++ ++ return ret; ++} ++ ++static int __devexit bq27000_battery_remove(struct platform_device *pdev) ++{ ++ struct bq27x00_device_info *di = platform_get_drvdata(pdev); ++ ++ bq27x00_powersupply_unregister(di); ++ ++ platform_set_drvdata(pdev, NULL); ++ kfree(di); ++ ++ return 0; ++} ++ ++static struct platform_driver bq27000_battery_driver = { ++ .probe = bq27000_battery_probe, ++ .remove = __devexit_p(bq27000_battery_remove), + .driver = { +- .name = "bq27200-battery", ++ .name = "bq27000-battery", ++ .owner = THIS_MODULE, + }, +- .probe = bq27200_battery_probe, +- .remove = bq27200_battery_remove, +- .id_table = bq27200_id, + }; + ++static inline int bq27x00_battery_platform_init(void) ++{ ++ int ret = platform_driver_register(&bq27000_battery_driver); ++ if (ret) ++ printk(KERN_ERR "Unable to register BQ27000 platform driver\n"); ++ ++ return ret; ++} ++ ++static inline void bq27x00_battery_platform_exit(void) ++{ ++ platform_driver_unregister(&bq27000_battery_driver); ++} ++ ++#else ++ ++static inline int bq27x00_battery_platform_init(void) { return 0; } ++static inline void bq27x00_battery_platform_exit(void) {}; ++ ++#endif ++ ++/* ++ * Module stuff ++ */ ++ + static int __init bq27x00_battery_init(void) + { + int ret; + +- ret = i2c_add_driver(&bq27200_battery_driver); ++ ret = bq27x00_battery_i2c_init(); ++ if (ret) ++ return ret; ++ ++ ret = bq27x00_battery_platform_init(); + if (ret) +- printk(KERN_ERR "Unable to register BQ27200 driver\n"); ++ bq27x00_battery_i2c_exit(); + + return ret; + } +@@ -372,7 +902,8 @@ module_init(bq27x00_battery_init); + + static void __exit bq27x00_battery_exit(void) + { +- i2c_del_driver(&bq27200_battery_driver); ++ bq27x00_battery_platform_exit(); ++ bq27x00_battery_i2c_exit(); + } + module_exit(bq27x00_battery_exit); + diff --git a/kernel-bfs-2.6.28/debian/patches/bq27x00_battery.diff b/kernel-bfs-2.6.28/debian/patches/bq27x00_battery.diff deleted file mode 100644 index b46ed98..0000000 --- a/kernel-bfs-2.6.28/debian/patches/bq27x00_battery.diff +++ /dev/null @@ -1,974 +0,0 @@ ---- kernel-power-2.6.28/drivers/power/bq27x00_battery.c 2011-05-01 01:48:44.000000000 +0200 -+++ kernel-power-2.6.28/drivers/power/bq27x00_battery.c 2011-05-01 01:51:12.000000000 +0200 -@@ -3,6 +3,8 @@ - * - * Copyright (C) 2008 Rodolfo Giometti - * Copyright (C) 2008 Eurotech S.p.A. -+ * Copyright (C) 2010-2011 Lars-Peter Clausen -+ * Copyright (C) 2011 Pali Rohár - * - * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. - * -@@ -15,6 +17,13 @@ - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - * - */ -+ -+/* -+ * Datasheets: -+ * http://focus.ti.com/docs/prod/folders/print/bq27000.html -+ * http://focus.ti.com/docs/prod/folders/print/bq27500.html -+ */ -+ - #include - #include - #include -@@ -24,144 +33,407 @@ - #include - #include - #include -+#include - #include - --#define DRIVER_VERSION "1.0.0" -+#define CONFIG_BATTERY_BQ27X00_I2C -+ -+struct bq27000_platform_data { -+ const char *name; -+ int (*read)(struct device *dev, unsigned int); -+}; -+ -+#define DRIVER_VERSION "1.2.0" - - #define BQ27x00_REG_TEMP 0x06 - #define BQ27x00_REG_VOLT 0x08 --#define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */ - #define BQ27x00_REG_AI 0x14 - #define BQ27x00_REG_FLAGS 0x0A -+#define BQ27x00_REG_TTE 0x16 -+#define BQ27x00_REG_TTF 0x18 -+#define BQ27x00_REG_TTECP 0x26 -+#define BQ27x00_REG_NAC 0x0C /* Nominal available capaciy */ -+#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */ -+#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */ -+#define BQ27x00_REG_AE 0x22 /* Available enery */ -+ -+#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */ -+#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */ -+#define BQ27000_FLAG_CHGS BIT(7) -+#define BQ27000_FLAG_FC BIT(5) -+ -+#define BQ27500_REG_SOC 0x2C -+#define BQ27500_REG_DCAP 0x3C /* Design capacity */ -+#define BQ27500_FLAG_DSC BIT(0) -+#define BQ27500_FLAG_FC BIT(9) - --/* If the system has several batteries we need a different name for each -- * of them... -- */ --static DEFINE_IDR(battery_id); --static DEFINE_MUTEX(battery_mutex); -+#define BQ27000_RS 20 /* Resistor sense */ - - struct bq27x00_device_info; - struct bq27x00_access_methods { -- int (*read)(u8 reg, int *rt_value, int b_single, -- struct bq27x00_device_info *di); -+ int (*read)(struct bq27x00_device_info *di, u8 reg, bool single); -+}; -+ -+enum bq27x00_chip { BQ27000, BQ27500 }; -+ -+struct bq27x00_reg_cache { -+ int temperature; -+ int time_to_empty; -+ int time_to_empty_avg; -+ int time_to_full; -+ int charge_full; -+ int capacity; -+ int flags; -+ -+ int current_now; - }; - - struct bq27x00_device_info { - struct device *dev; - int id; -- int voltage_uV; -- int current_uA; -- int temp_C; -- int charge_rsoc; -- struct bq27x00_access_methods *bus; -+ enum bq27x00_chip chip; -+ -+ struct bq27x00_reg_cache cache; -+ int charge_design_full; -+ -+ unsigned long last_update; -+ struct delayed_work work; -+ - struct power_supply bat; - -- struct i2c_client *client; -+ struct bq27x00_access_methods bus; -+ -+ struct mutex lock; - }; - - static enum power_supply_property bq27x00_battery_props[] = { -+ POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_TEMP, -+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, -+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, -+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, -+ POWER_SUPPLY_PROP_TECHNOLOGY, -+ POWER_SUPPLY_PROP_CHARGE_FULL, -+ POWER_SUPPLY_PROP_CHARGE_NOW, -+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - }; - -+static unsigned int poll_interval = 360; -+module_param(poll_interval, uint, 0644); -+MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \ -+ "0 disables polling"); -+ - /* - * Common code for BQ27x00 devices - */ - --static int bq27x00_read(u8 reg, int *rt_value, int b_single, -- struct bq27x00_device_info *di) -+static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg, -+ bool single) - { -- int ret; -+ return di->bus.read(di, reg, single); -+} -+ -+/* -+ * Return the battery Relative State-of-Charge -+ * Or < 0 if something fails. -+ */ -+static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di) -+{ -+ int rsoc; - -- ret = di->bus->read(reg, rt_value, b_single, di); -- *rt_value = be16_to_cpu(*rt_value); -+ if (di->chip == BQ27500) -+ rsoc = bq27x00_read(di, BQ27500_REG_SOC, false); -+ else -+ rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true); - -- return ret; -+ if (rsoc < 0) -+ dev_err(di->dev, "error reading relative State-of-Charge\n"); -+ -+ return rsoc; - } - - /* -- * Return the battery temperature in Celcius degrees -+ * Return a battery charge value in µAh - * Or < 0 if something fails. - */ --static int bq27x00_battery_temperature(struct bq27x00_device_info *di) -+static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg) - { -- int ret; -- int temp = 0; -+ int charge; - -- ret = bq27x00_read(BQ27x00_REG_TEMP, &temp, 0, di); -- if (ret) { -- dev_err(di->dev, "error reading temperature\n"); -- return ret; -+ charge = bq27x00_read(di, reg, false); -+ if (charge < 0) { -+ dev_err(di->dev, "error reading nominal available capacity\n"); -+ return charge; - } - -- return (temp >> 2) - 273; -+ if (di->chip == BQ27500) -+ charge *= 1000; -+ else -+ charge = charge * 3570 / BQ27000_RS; -+ -+ return charge; - } - - /* -- * Return the battery Voltage in milivolts -+ * Return the battery Nominal available capaciy in µAh - * Or < 0 if something fails. - */ --static int bq27x00_battery_voltage(struct bq27x00_device_info *di) -+static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di) - { -- int ret; -- int volt = 0; -+ return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC); -+} - -- ret = bq27x00_read(BQ27x00_REG_VOLT, &volt, 0, di); -- if (ret) { -- dev_err(di->dev, "error reading voltage\n"); -- return ret; -+/* -+ * Return the battery Last measured discharge in µAh -+ * Or < 0 if something fails. -+ */ -+static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di) -+{ -+ return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD); -+} -+ -+/* -+ * Return the battery Initial last measured discharge in µAh -+ * Or < 0 if something fails. -+ */ -+static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di) -+{ -+ int ilmd; -+ -+ if (di->chip == BQ27500) -+ ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false); -+ else -+ ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true); -+ -+ if (ilmd < 0) { -+ dev_err(di->dev, "error reading initial last measured discharge\n"); -+ return ilmd; - } - -- return volt; -+ if (di->chip == BQ27500) -+ ilmd *= 1000; -+ else -+ ilmd = ilmd * 256 * 3570 / BQ27000_RS; -+ -+ return ilmd; - } - - /* -- * Return the battery average current -- * Note that current can be negative signed as well -- * Or 0 if something fails. -+ * Return the battery Cycle count total -+ * Or < 0 if something fails. - */ --static int bq27x00_battery_current(struct bq27x00_device_info *di) -+static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di) - { -- int ret; -- int curr = 0; -- int flags = 0; -+ int cyct; - -- ret = bq27x00_read(BQ27x00_REG_AI, &curr, 0, di); -- if (ret) { -- dev_err(di->dev, "error reading current\n"); -- return 0; -+ cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false); -+ if (cyct < 0) -+ dev_err(di->dev, "error reading cycle count total\n"); -+ -+ return cyct; -+} -+ -+/* -+ * Read a time register. -+ * Return < 0 if something fails. -+ */ -+static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg) -+{ -+ int tval; -+ -+ tval = bq27x00_read(di, reg, false); -+ if (tval < 0) { -+ dev_err(di->dev, "error reading register %02x: %d\n", reg, tval); -+ return tval; - } -- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); -- if (ret < 0) { -- dev_err(di->dev, "error reading flags\n"); -+ -+ if (tval == 65535) - return 0; -+ -+ return tval * 60; -+} -+ -+static void bq27x00_update(struct bq27x00_device_info *di) -+{ -+ struct bq27x00_reg_cache cache = {0, }; -+ bool is_bq27500 = di->chip == BQ27500; -+ -+ cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500); -+ if (cache.flags >= 0) { -+ cache.capacity = bq27x00_battery_read_rsoc(di); -+ cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false); -+ cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE); -+ cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); -+ cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); -+ cache.charge_full = bq27x00_battery_read_lmd(di); -+ -+ if (!is_bq27500) -+ cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false); -+ -+ /* We only have to read charge design full once */ -+ if (di->charge_design_full <= 0) -+ di->charge_design_full = bq27x00_battery_read_ilmd(di); - } -- if ((flags & (1 << 7)) != 0) { -- dev_dbg(di->dev, "negative current!\n"); -- return -curr; -+ -+ /* Ignore current_now which is a snapshot of the current battery state -+ * and is likely to be different even between two consecutive reads */ -+ if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) { -+ di->cache = cache; -+ power_supply_changed(&di->bat); -+ } -+ -+ di->last_update = jiffies; -+} -+ -+static void bq27x00_battery_poll(struct work_struct *work) -+{ -+ struct bq27x00_device_info *di = -+ container_of(work, struct bq27x00_device_info, work.work); -+ -+ bq27x00_update(di); -+ -+ if (poll_interval > 0) { -+ schedule_delayed_work(&di->work, poll_interval * HZ); - } -- return curr; - } - -+ - /* -- * Return the battery Relative State-of-Charge -+ * Return the battery temperature in tenths of degree Celsius - * Or < 0 if something fails. - */ --static int bq27x00_battery_rsoc(struct bq27x00_device_info *di) -+static int bq27x00_battery_temperature(struct bq27x00_device_info *di, -+ union power_supply_propval *val) - { -- int ret; -- int rsoc = 0; -+ if (di->cache.temperature < 0) -+ return di->cache.temperature; - -- ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di); -- if (ret) { -- dev_err(di->dev, "error reading relative State-of-Charge\n"); -- return ret; -+ if (di->chip == BQ27500) -+ val->intval = di->cache.temperature - 2731; -+ else -+ val->intval = ((di->cache.temperature * 5) - 5463) / 2; -+ -+ return 0; -+} -+ -+/* -+ * Return the battery average current in µA -+ * Note that current can be negative signed as well -+ * Or 0 if something fails. -+ */ -+static int bq27x00_battery_current(struct bq27x00_device_info *di, -+ union power_supply_propval *val) -+{ -+ int curr; -+ -+ if (di->chip == BQ27500) -+ curr = bq27x00_read(di, BQ27x00_REG_AI, false); -+ else -+ curr = di->cache.current_now; -+ -+ if (curr < 0) -+ return curr; -+ -+ if (di->chip == BQ27500) { -+ /* bq27500 returns signed value */ -+ val->intval = (int)((s16)curr) * 1000; -+ } else { -+ if (di->cache.flags & BQ27000_FLAG_CHGS) { -+ dev_dbg(di->dev, "negative current!\n"); -+ curr = -curr; -+ } -+ -+ val->intval = curr * 3570 / BQ27000_RS; - } - -- return rsoc >> 8; -+ return 0; -+} -+ -+static int bq27x00_battery_status(struct bq27x00_device_info *di, -+ union power_supply_propval *val) -+{ -+ int status; -+ -+ if (di->chip == BQ27500) { -+ if (di->cache.flags & BQ27500_FLAG_FC) -+ status = POWER_SUPPLY_STATUS_FULL; -+ else if (di->cache.flags & BQ27500_FLAG_DSC) -+ status = POWER_SUPPLY_STATUS_DISCHARGING; -+ else -+ status = POWER_SUPPLY_STATUS_CHARGING; -+ } else { -+ if (di->cache.flags & BQ27000_FLAG_FC) -+ status = POWER_SUPPLY_STATUS_FULL; -+ else if (di->cache.flags & BQ27000_FLAG_CHGS) -+ status = POWER_SUPPLY_STATUS_CHARGING; -+ else if (power_supply_am_i_supplied(&di->bat)) -+ status = POWER_SUPPLY_STATUS_NOT_CHARGING; -+ else -+ status = POWER_SUPPLY_STATUS_DISCHARGING; -+ } -+ -+ val->intval = status; -+ -+ return 0; -+} -+ -+/* -+ * Return the battery Voltage in milivolts -+ * Or < 0 if something fails. -+ */ -+static int bq27x00_battery_voltage(struct bq27x00_device_info *di, -+ union power_supply_propval *val) -+{ -+ int volt; -+ -+ volt = bq27x00_read(di, BQ27x00_REG_VOLT, false); -+ if (volt < 0) -+ return volt; -+ -+ val->intval = volt * 1000; -+ -+ return 0; -+} -+ -+/* -+ * Return the battery Available energy in µWh -+ * Or < 0 if something fails. -+ */ -+static int bq27x00_battery_energy(struct bq27x00_device_info *di, -+ union power_supply_propval *val) -+{ -+ int ae; -+ -+ ae = bq27x00_read(di, BQ27x00_REG_AE, false); -+ if (ae < 0) { -+ dev_err(di->dev, "error reading available energy\n"); -+ return ae; -+ } -+ -+ if (di->chip == BQ27500) -+ ae *= 1000; -+ else -+ ae = ae * 29200 / BQ27000_RS; -+ -+ val->intval = ae; -+ -+ return 0; -+} -+ -+ -+static int bq27x00_simple_value(int value, -+ union power_supply_propval *val) -+{ -+ if (value < 0) -+ return value; -+ -+ val->intval = value; -+ -+ return 0; - } - - #define to_bq27x00_device_info(x) container_of((x), \ -@@ -171,89 +443,161 @@ static int bq27x00_battery_get_property( - enum power_supply_property psp, - union power_supply_propval *val) - { -+ int ret = 0; - struct bq27x00_device_info *di = to_bq27x00_device_info(psy); - -+ mutex_lock(&di->lock); -+ if (time_is_before_jiffies(di->last_update + 5 * HZ)) { -+ cancel_delayed_work_sync(&di->work); -+ bq27x00_battery_poll(&di->work.work); -+ } -+ mutex_unlock(&di->lock); -+ -+ if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) -+ return -ENODEV; -+ - switch (psp) { -+ case POWER_SUPPLY_PROP_STATUS: -+ ret = bq27x00_battery_status(di, val); -+ break; - case POWER_SUPPLY_PROP_VOLTAGE_NOW: -+ ret = bq27x00_battery_voltage(di, val); -+ break; - case POWER_SUPPLY_PROP_PRESENT: -- val->intval = bq27x00_battery_voltage(di); -- if (psp == POWER_SUPPLY_PROP_PRESENT) -- val->intval = val->intval <= 0 ? 0 : 1; -+ val->intval = di->cache.flags < 0 ? 0 : 1; - break; - case POWER_SUPPLY_PROP_CURRENT_NOW: -- val->intval = bq27x00_battery_current(di); -+ ret = bq27x00_battery_current(di, val); - break; - case POWER_SUPPLY_PROP_CAPACITY: -- val->intval = bq27x00_battery_rsoc(di); -+ ret = bq27x00_simple_value(di->cache.capacity, val); - break; - case POWER_SUPPLY_PROP_TEMP: -- val->intval = bq27x00_battery_temperature(di); -+ ret = bq27x00_battery_temperature(di, val); -+ break; -+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: -+ ret = bq27x00_simple_value(di->cache.time_to_empty, val); -+ break; -+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: -+ ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val); -+ break; -+ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: -+ ret = bq27x00_simple_value(di->cache.time_to_full, val); -+ break; -+ case POWER_SUPPLY_PROP_TECHNOLOGY: -+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION; -+ break; -+ case POWER_SUPPLY_PROP_CHARGE_NOW: -+ ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val); -+ break; -+ case POWER_SUPPLY_PROP_CHARGE_FULL: -+ ret = bq27x00_simple_value(di->cache.charge_full, val); -+ break; -+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: -+ ret = bq27x00_simple_value(di->charge_design_full, val); -+ break; -+ case POWER_SUPPLY_PROP_ENERGY_NOW: -+ ret = bq27x00_battery_energy(di, val); - break; - default: - return -EINVAL; - } - -- return 0; -+ return ret; - } - --static void bq27x00_powersupply_init(struct bq27x00_device_info *di) -+static void bq27x00_external_power_changed(struct power_supply *psy) - { -+ struct bq27x00_device_info *di = to_bq27x00_device_info(psy); -+ -+ cancel_delayed_work_sync(&di->work); -+ schedule_delayed_work(&di->work, 0); -+} -+ -+static int bq27x00_powersupply_init(struct bq27x00_device_info *di) -+{ -+ int ret; -+ - di->bat.type = POWER_SUPPLY_TYPE_BATTERY; - di->bat.properties = bq27x00_battery_props; - di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props); - di->bat.get_property = bq27x00_battery_get_property; -- di->bat.external_power_changed = NULL; -+ di->bat.external_power_changed = bq27x00_external_power_changed; -+ -+ INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll); -+ mutex_init(&di->lock); -+ -+ ret = power_supply_register(di->dev, &di->bat); -+ if (ret) { -+ dev_err(di->dev, "failed to register battery: %d\n", ret); -+ return ret; -+ } -+ -+ dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION); -+ -+ bq27x00_update(di); -+ -+ return 0; - } - --/* -- * BQ27200 specific code -+static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di) -+{ -+ cancel_delayed_work_sync(&di->work); -+ -+ power_supply_unregister(&di->bat); -+ -+ mutex_destroy(&di->lock); -+} -+ -+ -+/* i2c specific code */ -+#ifdef CONFIG_BATTERY_BQ27X00_I2C -+ -+/* If the system has several batteries we need a different name for each -+ * of them... - */ -+static DEFINE_IDR(battery_id); -+static DEFINE_MUTEX(battery_mutex); - --static int bq27200_read(u8 reg, int *rt_value, int b_single, -- struct bq27x00_device_info *di) -+static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single) - { -- struct i2c_client *client = di->client; -- struct i2c_msg msg[1]; -+ struct i2c_client *client = to_i2c_client(di->dev); -+ struct i2c_msg msg[2]; - unsigned char data[2]; -- int err; -+ int ret; - - if (!client->adapter) - return -ENODEV; - -- msg->addr = client->addr; -- msg->flags = 0; -- msg->len = 1; -- msg->buf = data; -- -- data[0] = reg; -- err = i2c_transfer(client->adapter, msg, 1); -- -- if (err >= 0) { -- if (!b_single) -- msg->len = 2; -- else -- msg->len = 1; -+ msg[0].addr = client->addr; -+ msg[0].flags = 0; -+ msg[0].buf = ® -+ msg[0].len = sizeof(reg); -+ msg[1].addr = client->addr; -+ msg[1].flags = I2C_M_RD; -+ msg[1].buf = data; -+ if (single) -+ msg[1].len = 1; -+ else -+ msg[1].len = 2; - -- msg->flags = I2C_M_RD; -- err = i2c_transfer(client->adapter, msg, 1); -- if (err >= 0) { -- if (!b_single) -- *rt_value = get_unaligned_be16(data); -- else -- *rt_value = data[0]; -+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); -+ if (ret < 0) -+ return ret; - -- return 0; -- } -- } -- return err; -+ if (!single) -+ ret = get_unaligned_le16(data); -+ else -+ ret = data[0]; -+ -+ return ret; - } - --static int bq27200_battery_probe(struct i2c_client *client, -+static int bq27x00_battery_probe(struct i2c_client *client, - const struct i2c_device_id *id) - { - char *name; - struct bq27x00_device_info *di; -- struct bq27x00_access_methods *bus; - int num; - int retval = 0; - -@@ -267,7 +611,7 @@ static int bq27200_battery_probe(struct - if (retval < 0) - return retval; - -- name = kasprintf(GFP_KERNEL, "bq27200-%d", num); -+ name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); - if (!name) { - dev_err(&client->dev, "failed to allocate device name\n"); - retval = -ENOMEM; -@@ -280,37 +624,20 @@ static int bq27200_battery_probe(struct - retval = -ENOMEM; - goto batt_failed_2; - } -- di->id = num; - -- bus = kzalloc(sizeof(*bus), GFP_KERNEL); -- if (!bus) { -- dev_err(&client->dev, "failed to allocate access method " -- "data\n"); -- retval = -ENOMEM; -- goto batt_failed_3; -- } -- -- i2c_set_clientdata(client, di); -+ di->id = num; - di->dev = &client->dev; -+ di->chip = id->driver_data; - di->bat.name = name; -- bus->read = &bq27200_read; -- di->bus = bus; -- di->client = client; -- -- bq27x00_powersupply_init(di); -+ di->bus.read = &bq27x00_read_i2c; - -- retval = power_supply_register(&client->dev, &di->bat); -- if (retval) { -- dev_err(&client->dev, "failed to register battery\n"); -- goto batt_failed_4; -- } -+ if (bq27x00_powersupply_init(di)) -+ goto batt_failed_3; - -- dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); -+ i2c_set_clientdata(client, di); - - return 0; - --batt_failed_4: -- kfree(bus); - batt_failed_3: - kfree(di); - batt_failed_2: -@@ -323,11 +650,11 @@ batt_failed_1: - return retval; - } - --static int bq27200_battery_remove(struct i2c_client *client) -+static int bq27x00_battery_remove(struct i2c_client *client) - { - struct bq27x00_device_info *di = i2c_get_clientdata(client); - -- power_supply_unregister(&di->bat); -+ bq27x00_powersupply_unregister(di); - - kfree(di->bat.name); - -@@ -340,31 +667,180 @@ static int bq27200_battery_remove(struct - return 0; - } - --/* -- * Module stuff -- */ -- --static const struct i2c_device_id bq27200_id[] = { -- { "bq27200", 0 }, -+static const struct i2c_device_id bq27x00_id[] = { -+ { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */ -+ { "bq27500", BQ27500 }, - {}, - }; -+MODULE_DEVICE_TABLE(i2c, bq27x00_id); -+ -+static struct i2c_driver bq27x00_battery_driver = { -+ .driver = { -+ .name = "bq27x00-battery", -+ }, -+ .probe = bq27x00_battery_probe, -+ .remove = bq27x00_battery_remove, -+ .id_table = bq27x00_id, -+}; -+ -+static inline int bq27x00_battery_i2c_init(void) -+{ -+ int ret = i2c_add_driver(&bq27x00_battery_driver); -+ if (ret) -+ printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n"); -+ -+ return ret; -+} -+ -+static inline void bq27x00_battery_i2c_exit(void) -+{ -+ i2c_del_driver(&bq27x00_battery_driver); -+} -+ -+#else -+ -+static inline int bq27x00_battery_i2c_init(void) { return 0; } -+static inline void bq27x00_battery_i2c_exit(void) {}; -+ -+#endif -+ -+/* platform specific code */ -+#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM -+ -+static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg, -+ bool single) -+{ -+ struct device *dev = di->dev; -+ struct bq27000_platform_data *pdata = dev->platform_data; -+ unsigned int timeout = 3; -+ int upper, lower; -+ int temp; -+ -+ if (!single) { -+ /* Make sure the value has not changed in between reading the -+ * lower and the upper part */ -+ upper = pdata->read(dev, reg + 1); -+ do { -+ temp = upper; -+ if (upper < 0) -+ return upper; -+ -+ lower = pdata->read(dev, reg); -+ if (lower < 0) -+ return lower; -+ -+ upper = pdata->read(dev, reg + 1); -+ } while (temp != upper && --timeout); -+ -+ if (timeout == 0) -+ return -EIO; -+ -+ return (upper << 8) | lower; -+ } -+ -+ return pdata->read(dev, reg); -+} -+ -+static int __devinit bq27000_battery_probe(struct platform_device *pdev) -+{ -+ struct bq27x00_device_info *di; -+ struct bq27000_platform_data *pdata = pdev->dev.platform_data; -+ int ret; -+ -+ if (!pdata) { -+ dev_err(&pdev->dev, "no platform_data supplied\n"); -+ return -EINVAL; -+ } -+ -+ if (!pdata->read) { -+ dev_err(&pdev->dev, "no hdq read callback supplied\n"); -+ return -EINVAL; -+ } -+ -+ di = kzalloc(sizeof(*di), GFP_KERNEL); -+ if (!di) { -+ dev_err(&pdev->dev, "failed to allocate device info data\n"); -+ return -ENOMEM; -+ } -+ -+ platform_set_drvdata(pdev, di); -+ -+ di->dev = &pdev->dev; -+ di->chip = BQ27000; -+ -+ di->bat.name = pdata->name ?: dev_name(&pdev->dev); -+ di->bus.read = &bq27000_read_platform; - --static struct i2c_driver bq27200_battery_driver = { -+ ret = bq27x00_powersupply_init(di); -+ if (ret) -+ goto err_free; -+ -+ return 0; -+ -+err_free: -+ platform_set_drvdata(pdev, NULL); -+ kfree(di); -+ -+ return ret; -+} -+ -+static int __devexit bq27000_battery_remove(struct platform_device *pdev) -+{ -+ struct bq27x00_device_info *di = platform_get_drvdata(pdev); -+ -+ bq27x00_powersupply_unregister(di); -+ -+ platform_set_drvdata(pdev, NULL); -+ kfree(di); -+ -+ return 0; -+} -+ -+static struct platform_driver bq27000_battery_driver = { -+ .probe = bq27000_battery_probe, -+ .remove = __devexit_p(bq27000_battery_remove), - .driver = { -- .name = "bq27200-battery", -+ .name = "bq27000-battery", -+ .owner = THIS_MODULE, - }, -- .probe = bq27200_battery_probe, -- .remove = bq27200_battery_remove, -- .id_table = bq27200_id, - }; - -+static inline int bq27x00_battery_platform_init(void) -+{ -+ int ret = platform_driver_register(&bq27000_battery_driver); -+ if (ret) -+ printk(KERN_ERR "Unable to register BQ27000 platform driver\n"); -+ -+ return ret; -+} -+ -+static inline void bq27x00_battery_platform_exit(void) -+{ -+ platform_driver_unregister(&bq27000_battery_driver); -+} -+ -+#else -+ -+static inline int bq27x00_battery_platform_init(void) { return 0; } -+static inline void bq27x00_battery_platform_exit(void) {}; -+ -+#endif -+ -+/* -+ * Module stuff -+ */ -+ - static int __init bq27x00_battery_init(void) - { - int ret; - -- ret = i2c_add_driver(&bq27200_battery_driver); -+ ret = bq27x00_battery_i2c_init(); -+ if (ret) -+ return ret; -+ -+ ret = bq27x00_battery_platform_init(); - if (ret) -- printk(KERN_ERR "Unable to register BQ27200 driver\n"); -+ bq27x00_battery_i2c_exit(); - - return ret; - } -@@ -372,7 +848,8 @@ module_init(bq27x00_battery_init); - - static void __exit bq27x00_battery_exit(void) - { -- i2c_del_driver(&bq27200_battery_driver); -+ bq27x00_battery_platform_exit(); -+ bq27x00_battery_i2c_exit(); - } - module_exit(bq27x00_battery_exit); - diff --git a/kernel-bfs-2.6.28/debian/patches/fmtx_lock_power.diff b/kernel-bfs-2.6.28/debian/patches/fmtx_lock_power.diff deleted file mode 100644 index 19d893c..0000000 --- a/kernel-bfs-2.6.28/debian/patches/fmtx_lock_power.diff +++ /dev/null @@ -1,80 +0,0 @@ ---- - drivers/media/radio/radio-si4713.c | 58 +------------------------------------ - 1 files changed, 3 insertions(+), 55 deletions(-) - -Index: kernel-power-2.6.28/drivers/media/radio/radio-si4713.c -=================================================================== ---- kernel-power-2.6.28.orig/drivers/media/radio/radio-si4713.c -+++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.c -@@ -200,8 +200,8 @@ - - sscanf(buf, "%d", &l); - --// if (l != 0) --// config_locked = 1; -+ if (l != 0) -+ config_locked = 1; - - return count; - } -@@ -468,59 +468,7 @@ - if (cmd != LOCK_LOW_POWER && cmd != RELEASE_LOW_POWER) - return video_ioctl2(inode, file, cmd, arg); - -- pl = si4713_get_power_level(si4713_dev); -- -- if (pl < 0) { -- rval = pl; -- goto exit; -- } -- -- if (copy_from_user(&pow, (void __user *)arg, sizeof(pow))) { -- rval = -EFAULT; -- goto exit; -- } -- -- if (cmd == LOCK_LOW_POWER) { -- -- if (pid_count == APP_MAX_NUM) { -- rval = -EPERM; -- goto exit; -- } -- -- if (pid_count == 0) { -- if (pow > pl) { -- rval = -EINVAL; -- goto exit; -- } else { -- /* Set max possible power level */ -- max_pl = pl; -- min_pl = pow; -- } -- } -- -- rval = register_pid(current->pid); -- -- if (rval) -- goto exit; -- -- /* Lower min power level if asked */ -- if (pow < min_pl) -- min_pl = pow; -- else -- pow = min_pl; -- -- } else { /* RELEASE_LOW_POWER */ -- rval = unregister_pid(current->pid); -- -- if (rval) -- goto exit; -- -- if (pid_count == 0) { -- if (pow > max_pl) -- pow = max_pl; -- } -- } -- rval = si4713_set_power_level(si4713_dev, pow); -+ rval = 0; - exit: - return rval; - } diff --git a/kernel-bfs-2.6.28/debian/patches/nokia-20093908+0m5.diff b/kernel-bfs-2.6.28/debian/patches/nokia-20093908+0m5.diff new file mode 100644 index 0000000..dd7c82a --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/nokia-20093908+0m5.diff @@ -0,0 +1,402313 @@ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/boot/compressed/head.S kernel-2.6.28-20093908+0m5/arch/arm/boot/compressed/head.S +--- linux-omap-2.6.28-omap1/arch/arm/boot/compressed/head.S 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/boot/compressed/head.S 2011-09-04 11:31:05.000000000 +0200 +@@ -717,6 +717,9 @@ __armv7_mmu_cache_off: + bl __armv7_mmu_cache_flush + mov r0, #0 + mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB ++ mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC ++ mcr p15, 0, r0, c7, c10, 4 @ DSB ++ mcr p15, 0, r0, c7, c5, 4 @ ISB + mov pc, r12 + + __arm6_mmu_cache_off: +@@ -778,12 +781,13 @@ __armv6_mmu_cache_flush: + __armv7_mmu_cache_flush: + mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 + tst r10, #0xf << 16 @ hierarchical cache (ARMv7) +- beq hierarchical + mov r10, #0 ++ beq hierarchical + mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D + b iflush + hierarchical: +- stmfd sp!, {r0-r5, r7, r9-r11} ++ mcr p15, 0, r10, c7, c10, 5 @ DMB ++ stmfd sp!, {r0-r5, r7, r9, r11} + mrc p15, 1, r0, c0, c0, 1 @ read clidr + ands r3, r0, #0x7000000 @ extract loc from clidr + mov r3, r3, lsr #23 @ left align loc bit field +@@ -820,12 +824,14 @@ skip: + cmp r3, r10 + bgt loop1 + finished: ++ ldmfd sp!, {r0-r5, r7, r9, r11} + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr +- ldmfd sp!, {r0-r5, r7, r9-r11} + iflush: ++ mcr p15, 0, r10, c7, c10, 4 @ DSB + mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB +- mcr p15, 0, r10, c7, c10, 4 @ drain WB ++ mcr p15, 0, r10, c7, c10, 4 @ DSB ++ mcr p15, 0, r10, c7, c5, 4 @ ISB + mov pc, lr + + __armv5tej_mmu_cache_flush: +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/configs/omap_3430sdp_min_defconfig kernel-2.6.28-20093908+0m5/arch/arm/configs/omap_3430sdp_min_defconfig +--- linux-omap-2.6.28-omap1/arch/arm/configs/omap_3430sdp_min_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/configs/omap_3430sdp_min_defconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1106 @@ ++# ++# Automatically generated make config: don't edit ++# Linux kernel version: 2.6.27-rc7-omap1 ++# Fri Sep 26 14:56:56 2008 ++# ++CONFIG_ARM=y ++CONFIG_SYS_SUPPORTS_APM_EMULATION=y ++CONFIG_GENERIC_GPIO=y ++CONFIG_GENERIC_TIME=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_MMU=y ++# CONFIG_NO_IOPORT is not set ++CONFIG_GENERIC_HARDIRQS=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_HAVE_LATENCYTOP_SUPPORT=y ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_HARDIRQS_SW_RESEND=y ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_RWSEM_GENERIC_SPINLOCK=y ++# CONFIG_ARCH_HAS_ILOG2_U32 is not set ++# CONFIG_ARCH_HAS_ILOG2_U64 is not set ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_ARCH_SUPPORTS_AOUT=y ++CONFIG_ZONE_DMA=y ++CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y ++CONFIG_VECTORS_BASE=0xffff0000 ++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" ++ ++# ++# General setup ++# ++CONFIG_EXPERIMENTAL=y ++CONFIG_BROKEN_ON_SMP=y ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++CONFIG_LOCALVERSION="" ++CONFIG_LOCALVERSION_AUTO=y ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++# CONFIG_POSIX_MQUEUE is not set ++CONFIG_BSD_PROCESS_ACCT=y ++# CONFIG_BSD_PROCESS_ACCT_V3 is not set ++# CONFIG_TASKSTATS is not set ++# CONFIG_AUDIT is not set ++# CONFIG_IKCONFIG is not set ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_CGROUPS is not set ++CONFIG_GROUP_SCHED=y ++CONFIG_FAIR_GROUP_SCHED=y ++# CONFIG_RT_GROUP_SCHED is not set ++CONFIG_USER_SCHED=y ++# CONFIG_CGROUP_SCHED is not set ++CONFIG_SYSFS_DEPRECATED=y ++CONFIG_SYSFS_DEPRECATED_V2=y ++# CONFIG_RELAY is not set ++# CONFIG_NAMESPACES is not set ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL=y ++CONFIG_EMBEDDED=y ++CONFIG_UID16=y ++# CONFIG_SYSCTL_SYSCALL is not set ++CONFIG_KALLSYMS=y ++# CONFIG_KALLSYMS_ALL is not set ++CONFIG_KALLSYMS_EXTRA_PASS=y ++CONFIG_HOTPLUG=y ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++CONFIG_COMPAT_BRK=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_ANON_INODES=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_VM_EVENT_COUNTERS=y ++CONFIG_SLAB=y ++# CONFIG_SLUB is not set ++# CONFIG_SLOB is not set ++# CONFIG_PROFILING is not set ++# CONFIG_MARKERS is not set ++CONFIG_HAVE_OPROFILE=y ++# CONFIG_KPROBES is not set ++# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set ++# CONFIG_HAVE_IOREMAP_PROT is not set ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++# CONFIG_HAVE_ARCH_TRACEHOOK is not set ++# CONFIG_HAVE_DMA_ATTRS is not set ++# CONFIG_USE_GENERIC_SMP_HELPERS is not set ++CONFIG_HAVE_CLK=y ++CONFIG_PROC_PAGE_MONITOR=y ++CONFIG_HAVE_GENERIC_DMA_COHERENT=y ++CONFIG_SLABINFO=y ++CONFIG_RT_MUTEXES=y ++# CONFIG_TINY_SHMEM is not set ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++# CONFIG_MODULE_FORCE_LOAD is not set ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_MODULE_FORCE_UNLOAD is not set ++CONFIG_MODVERSIONS=y ++CONFIG_MODULE_SRCVERSION_ALL=y ++CONFIG_KMOD=y ++CONFIG_BLOCK=y ++# CONFIG_LBD is not set ++# CONFIG_BLK_DEV_IO_TRACE is not set ++# CONFIG_LSF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++ ++# ++# IO Schedulers ++# ++CONFIG_IOSCHED_NOOP=y ++CONFIG_IOSCHED_AS=y ++CONFIG_IOSCHED_DEADLINE=y ++CONFIG_IOSCHED_CFQ=y ++CONFIG_DEFAULT_AS=y ++# CONFIG_DEFAULT_DEADLINE is not set ++# CONFIG_DEFAULT_CFQ is not set ++# CONFIG_DEFAULT_NOOP is not set ++CONFIG_DEFAULT_IOSCHED="anticipatory" ++CONFIG_CLASSIC_RCU=y ++ ++# ++# System Type ++# ++# CONFIG_ARCH_AAEC2000 is not set ++# CONFIG_ARCH_INTEGRATOR is not set ++# CONFIG_ARCH_REALVIEW is not set ++# CONFIG_ARCH_VERSATILE is not set ++# CONFIG_ARCH_AT91 is not set ++# CONFIG_ARCH_CLPS7500 is not set ++# CONFIG_ARCH_CLPS711X is not set ++# CONFIG_ARCH_EBSA110 is not set ++# CONFIG_ARCH_EP93XX is not set ++# CONFIG_ARCH_FOOTBRIDGE is not set ++# CONFIG_ARCH_NETX is not set ++# CONFIG_ARCH_H720X is not set ++# CONFIG_ARCH_IMX is not set ++# CONFIG_ARCH_IOP13XX is not set ++# CONFIG_ARCH_IOP32X is not set ++# CONFIG_ARCH_IOP33X is not set ++# CONFIG_ARCH_IXP23XX is not set ++# CONFIG_ARCH_IXP2000 is not set ++# CONFIG_ARCH_IXP4XX is not set ++# CONFIG_ARCH_L7200 is not set ++# CONFIG_ARCH_KIRKWOOD is not set ++# CONFIG_ARCH_KS8695 is not set ++# CONFIG_ARCH_NS9XXX is not set ++# CONFIG_ARCH_LOKI is not set ++# CONFIG_ARCH_MV78XX0 is not set ++# CONFIG_ARCH_MXC is not set ++# CONFIG_ARCH_ORION5X is not set ++# CONFIG_ARCH_PNX4008 is not set ++# CONFIG_ARCH_PXA is not set ++# CONFIG_ARCH_RPC is not set ++# CONFIG_ARCH_SA1100 is not set ++# CONFIG_ARCH_S3C2410 is not set ++# CONFIG_ARCH_SHARK is not set ++# CONFIG_ARCH_LH7A40X is not set ++# CONFIG_ARCH_DAVINCI is not set ++CONFIG_ARCH_OMAP=y ++# CONFIG_ARCH_MSM7X00A is not set ++ ++# ++# TI OMAP Implementations ++# ++CONFIG_ARCH_OMAP_OTG=y ++# CONFIG_ARCH_OMAP1 is not set ++# CONFIG_ARCH_OMAP2 is not set ++CONFIG_ARCH_OMAP3=y ++ ++# ++# OMAP Feature Selections ++# ++# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set ++# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set ++CONFIG_OMAP_SMARTREFLEX=y ++# CONFIG_OMAP_SMARTREFLEX_TESTING is not set ++CONFIG_OMAP_RESET_CLOCKS=y ++CONFIG_OMAP_BOOT_TAG=y ++CONFIG_OMAP_BOOT_REASON=y ++# CONFIG_OMAP_COMPONENT_VERSION is not set ++# CONFIG_OMAP_GPIO_SWITCH is not set ++CONFIG_OMAP_MUX=y ++CONFIG_OMAP_MUX_DEBUG=y ++CONFIG_OMAP_MUX_WARNINGS=y ++# CONFIG_OMAP_MCBSP is not set ++# CONFIG_OMAP_MMU_FWK is not set ++# CONFIG_OMAP_MBOX_FWK is not set ++# CONFIG_OMAP_MPU_TIMER is not set ++CONFIG_OMAP_32K_TIMER=y ++CONFIG_OMAP_32K_TIMER_HZ=128 ++CONFIG_OMAP_DM_TIMER=y ++CONFIG_OMAP_LL_DEBUG_UART1=y ++# CONFIG_OMAP_LL_DEBUG_UART2 is not set ++# CONFIG_OMAP_LL_DEBUG_UART3 is not set ++CONFIG_OMAP_SERIAL_WAKE=y ++# CONFIG_OMAP_PM_NOOP is not set ++CONFIG_OMAP_PM_SRF=y ++CONFIG_ARCH_OMAP34XX=y ++CONFIG_ARCH_OMAP3430=y ++ ++# ++# OMAP Board Type ++# ++# CONFIG_MACH_OMAP_LDP is not set ++CONFIG_MACH_OMAP_3430SDP=y ++# CONFIG_MACH_OMAP3EVM is not set ++# CONFIG_MACH_OMAP3_BEAGLE is not set ++# CONFIG_MACH_OVERO is not set ++# CONFIG_MACH_OMAP3_PANDORA is not set ++CONFIG_OMAP_TICK_GPTIMER=1 ++ ++# ++# Boot options ++# ++ ++# ++# Power management ++# ++ ++# ++# Processor Type ++# ++CONFIG_CPU_32=y ++CONFIG_CPU_32v6K=y ++CONFIG_CPU_V7=y ++CONFIG_CPU_32v7=y ++CONFIG_CPU_ABRT_EV7=y ++CONFIG_CPU_PABRT_IFAR=y ++CONFIG_CPU_CACHE_V7=y ++CONFIG_CPU_CACHE_VIPT=y ++CONFIG_CPU_COPY_V6=y ++CONFIG_CPU_TLB_V7=y ++CONFIG_CPU_HAS_ASID=y ++CONFIG_CPU_CP15=y ++CONFIG_CPU_CP15_MMU=y ++ ++# ++# Processor Features ++# ++CONFIG_ARM_THUMB=y ++# CONFIG_ARM_THUMBEE is not set ++# CONFIG_CPU_ICACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_DISABLE is not set ++# CONFIG_CPU_BPREDICT_DISABLE is not set ++CONFIG_HAS_TLS_REG=y ++# CONFIG_OUTER_CACHE is not set ++ ++# ++# Bus support ++# ++# CONFIG_PCI_SYSCALL is not set ++# CONFIG_ARCH_SUPPORTS_MSI is not set ++# CONFIG_PCCARD is not set ++ ++# ++# Kernel Features ++# ++CONFIG_TICK_ONESHOT=y ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y ++# CONFIG_PREEMPT is not set ++CONFIG_HZ=128 ++CONFIG_AEABI=y ++CONFIG_OABI_COMPAT=y ++CONFIG_ARCH_FLATMEM_HAS_HOLES=y ++# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_FLATMEM_MANUAL=y ++# CONFIG_DISCONTIGMEM_MANUAL is not set ++# CONFIG_SPARSEMEM_MANUAL is not set ++CONFIG_FLATMEM=y ++CONFIG_FLAT_NODE_MEM_MAP=y ++# CONFIG_SPARSEMEM_STATIC is not set ++# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set ++CONFIG_PAGEFLAGS_EXTENDED=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++# CONFIG_RESOURCES_64BIT is not set ++CONFIG_ZONE_DMA_FLAG=1 ++CONFIG_BOUNCE=y ++CONFIG_VIRT_TO_BUS=y ++# CONFIG_LEDS is not set ++CONFIG_ALIGNMENT_TRAP=y ++ ++# ++# Boot options ++# ++CONFIG_ZBOOT_ROM_TEXT=0x0 ++CONFIG_ZBOOT_ROM_BSS=0x0 ++CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/home/user/buildroot ip=192.168.0.2:192.168.0.1:192.168.0.1:255.255.255.0:tgt:eth0:off rw console=ttyS2,115200n8" ++# CONFIG_XIP_KERNEL is not set ++# CONFIG_KEXEC is not set ++ ++# ++# CPUIdle ++# ++# CONFIG_CPU_IDLE is not set ++ ++# ++# CPU Frequency scaling ++# ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_TABLE=y ++# CONFIG_CPU_FREQ_DEBUG is not set ++CONFIG_CPU_FREQ_STAT=y ++# CONFIG_CPU_FREQ_STAT_DETAILS is not set ++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set ++# CONFIG_CPU_FREQ_GOV_USERSPACE is not set ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set ++ ++# ++# Floating point emulation ++# ++ ++# ++# At least one emulation must be selected ++# ++CONFIG_FPE_NWFPE=y ++# CONFIG_FPE_NWFPE_XP is not set ++# CONFIG_FPE_FASTFPE is not set ++CONFIG_VFP=y ++CONFIG_VFPv3=y ++# CONFIG_NEON is not set ++ ++# ++# Userspace binary formats ++# ++CONFIG_BINFMT_ELF=y ++# CONFIG_BINFMT_AOUT is not set ++CONFIG_BINFMT_MISC=y ++ ++# ++# Power management options ++# ++CONFIG_PM=y ++# CONFIG_PM_DEBUG is not set ++CONFIG_PM_SLEEP=y ++CONFIG_SUSPEND=y ++CONFIG_SUSPEND_FREEZER=y ++# CONFIG_APM_EMULATION is not set ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++CONFIG_NET=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++# CONFIG_PACKET_MMAP is not set ++CONFIG_UNIX=y ++CONFIG_XFRM=y ++# CONFIG_XFRM_USER is not set ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++# CONFIG_XFRM_STATISTICS is not set ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++CONFIG_INET=y ++# CONFIG_IP_MULTICAST is not set ++# CONFIG_IP_ADVANCED_ROUTER is not set ++CONFIG_IP_FIB_HASH=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE is not set ++# CONFIG_ARPD is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++# CONFIG_INET_XFRM_TUNNEL is not set ++# CONFIG_INET_TUNNEL is not set ++CONFIG_INET_XFRM_MODE_TRANSPORT=y ++CONFIG_INET_XFRM_MODE_TUNNEL=y ++CONFIG_INET_XFRM_MODE_BEET=y ++# CONFIG_INET_LRO is not set ++CONFIG_INET_DIAG=y ++CONFIG_INET_TCP_DIAG=y ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_NETWORK_SECMARK is not set ++# CONFIG_NETFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_IPX is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_ECONET is not set ++# CONFIG_WAN_ROUTER is not set ++# CONFIG_NET_SCHED is not set ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_IRDA is not set ++# CONFIG_BT is not set ++# CONFIG_AF_RXRPC is not set ++ ++# ++# Wireless ++# ++# CONFIG_CFG80211 is not set ++# CONFIG_WIRELESS_EXT is not set ++# CONFIG_MAC80211 is not set ++# CONFIG_IEEE80211 is not set ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++ ++# ++# Device Drivers ++# ++ ++# ++# Generic Driver Options ++# ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++# CONFIG_FW_LOADER is not set ++# CONFIG_DEBUG_DRIVER is not set ++# CONFIG_DEBUG_DEVRES is not set ++# CONFIG_SYS_HYPERVISOR is not set ++# CONFIG_CONNECTOR is not set ++# CONFIG_MTD is not set ++# CONFIG_PARPORT is not set ++# CONFIG_BLK_DEV is not set ++# CONFIG_MISC_DEVICES is not set ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++# CONFIG_RAID_ATTRS is not set ++CONFIG_SCSI=y ++CONFIG_SCSI_DMA=y ++# CONFIG_SCSI_TGT is not set ++# CONFIG_SCSI_NETLINK is not set ++CONFIG_SCSI_PROC_FS=y ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=y ++# CONFIG_CHR_DEV_ST is not set ++# CONFIG_CHR_DEV_OSST is not set ++# CONFIG_BLK_DEV_SR is not set ++# CONFIG_CHR_DEV_SG is not set ++# CONFIG_CHR_DEV_SCH is not set ++ ++# ++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs ++# ++# CONFIG_SCSI_MULTI_LUN is not set ++# CONFIG_SCSI_CONSTANTS is not set ++# CONFIG_SCSI_LOGGING is not set ++# CONFIG_SCSI_SCAN_ASYNC is not set ++CONFIG_SCSI_WAIT_SCAN=m ++ ++# ++# SCSI Transports ++# ++# CONFIG_SCSI_SPI_ATTRS is not set ++# CONFIG_SCSI_FC_ATTRS is not set ++# CONFIG_SCSI_ISCSI_ATTRS is not set ++# CONFIG_SCSI_SAS_LIBSAS is not set ++# CONFIG_SCSI_SRP_ATTRS is not set ++CONFIG_SCSI_LOWLEVEL=y ++# CONFIG_ISCSI_TCP is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_DH is not set ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++CONFIG_NETDEVICES=y ++# CONFIG_DUMMY is not set ++# CONFIG_BONDING is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_EQUALIZER is not set ++# CONFIG_TUN is not set ++# CONFIG_VETH is not set ++# CONFIG_PHYLIB is not set ++CONFIG_NET_ETHERNET=y ++CONFIG_MII=y ++# CONFIG_AX88796 is not set ++CONFIG_SMC91X=y ++# CONFIG_DM9000 is not set ++# CONFIG_SMC911X is not set ++# CONFIG_IBM_NEW_EMAC_ZMII is not set ++# CONFIG_IBM_NEW_EMAC_RGMII is not set ++# CONFIG_IBM_NEW_EMAC_TAH is not set ++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set ++# CONFIG_B44 is not set ++CONFIG_NETDEV_1000=y ++CONFIG_NETDEV_10000=y ++ ++# ++# Wireless LAN ++# ++# CONFIG_WLAN_PRE80211 is not set ++# CONFIG_WLAN_80211 is not set ++# CONFIG_IWLWIFI_LEDS is not set ++# CONFIG_WAN is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_NETPOLL is not set ++# CONFIG_NET_POLL_CONTROLLER is not set ++# CONFIG_ISDN is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_POLLDEV is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++CONFIG_INPUT_KEYBOARD=y ++# CONFIG_KEYBOARD_ATKBD is not set ++# CONFIG_KEYBOARD_SUNKBD is not set ++# CONFIG_KEYBOARD_LKKBD is not set ++# CONFIG_KEYBOARD_XTKBD is not set ++# CONFIG_KEYBOARD_NEWTON is not set ++# CONFIG_KEYBOARD_STOWAWAY is not set ++CONFIG_KEYBOARD_TWL4030=y ++# CONFIG_KEYBOARD_LM8323 is not set ++# CONFIG_KEYBOARD_GPIO is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++# CONFIG_TOUCHSCREEN_FUJITSU is not set ++# CONFIG_TOUCHSCREEN_GUNZE is not set ++# CONFIG_TOUCHSCREEN_ELO is not set ++# CONFIG_TOUCHSCREEN_MTOUCH is not set ++# CONFIG_TOUCHSCREEN_INEXIO is not set ++# CONFIG_TOUCHSCREEN_MK712 is not set ++# CONFIG_TOUCHSCREEN_PENMOUNT is not set ++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set ++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set ++# CONFIG_TOUCHSCREEN_UCB1400 is not set ++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set ++# CONFIG_INPUT_MISC is not set ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++ ++# ++# Character devices ++# ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_DEVKMEM=y ++# CONFIG_SERIAL_NONSTANDARD is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=32 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=4 ++CONFIG_SERIAL_8250_EXTENDED=y ++CONFIG_SERIAL_8250_MANY_PORTS=y ++CONFIG_SERIAL_8250_SHARE_IRQ=y ++CONFIG_SERIAL_8250_DETECT_IRQ=y ++CONFIG_SERIAL_8250_RSA=y ++ ++# ++# Non-8250 serial port support ++# ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_IPMI_HANDLER is not set ++CONFIG_HW_RANDOM=y ++# CONFIG_NVRAM is not set ++# CONFIG_R3964 is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_TCG_TPM is not set ++CONFIG_I2C=y ++CONFIG_I2C_BOARDINFO=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_HELPER_AUTO=y ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_GPIO is not set ++# CONFIG_I2C_OCORES is not set ++CONFIG_I2C_OMAP=y ++# CONFIG_I2C_SIMTEC is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_PARPORT_LIGHT is not set ++# CONFIG_I2C_TAOS_EVM is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_STUB is not set ++ ++# ++# Miscellaneous I2C Chip support ++# ++# CONFIG_DS1682 is not set ++# CONFIG_AT24 is not set ++# CONFIG_SENSORS_EEPROM is not set ++# CONFIG_SENSORS_PCF8574 is not set ++# CONFIG_PCF8575 is not set ++# CONFIG_SENSORS_PCA9539 is not set ++# CONFIG_SENSORS_PCF8591 is not set ++# CONFIG_ISP1301_OMAP is not set ++# CONFIG_TPS65010 is not set ++# CONFIG_SENSORS_TLV320AIC23 is not set ++# CONFIG_TWL4030_MADC is not set ++CONFIG_TWL4030_USB=y ++# CONFIG_TWL4030_PWRBUTTON is not set ++# CONFIG_TWL4030_POWEROFF is not set ++# CONFIG_SENSORS_MAX6875 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++# CONFIG_LP5521 is not set ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# CONFIG_I2C_DEBUG_CHIP is not set ++# CONFIG_SPI is not set ++CONFIG_ARCH_REQUIRE_GPIOLIB=y ++CONFIG_GPIOLIB=y ++# CONFIG_DEBUG_GPIO is not set ++# CONFIG_GPIO_SYSFS is not set ++ ++# ++# I2C GPIO expanders: ++# ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCF857X is not set ++# CONFIG_GPIO_TWL4030 is not set ++ ++# ++# PCI GPIO expanders: ++# ++ ++# ++# SPI GPIO expanders: ++# ++# CONFIG_W1 is not set ++# CONFIG_POWER_SUPPLY is not set ++# CONFIG_HWMON is not set ++# CONFIG_WATCHDOG is not set ++ ++# ++# Sonics Silicon Backplane ++# ++CONFIG_SSB_POSSIBLE=y ++# CONFIG_SSB is not set ++ ++# ++# Multifunction device drivers ++# ++# CONFIG_MFD_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_ASIC3 is not set ++# CONFIG_HTC_EGPIO is not set ++# CONFIG_HTC_PASIC3 is not set ++CONFIG_TWL4030_CORE=y ++# CONFIG_TWL4030_POWER is not set ++# CONFIG_MFD_TMIO is not set ++# CONFIG_MFD_T7L66XB is not set ++# CONFIG_MFD_TC6387XB is not set ++# CONFIG_MFD_TC6393XB is not set ++ ++# ++# Multimedia devices ++# ++ ++# ++# Multimedia core support ++# ++# CONFIG_VIDEO_DEV is not set ++# CONFIG_DVB_CORE is not set ++# CONFIG_VIDEO_MEDIA is not set ++ ++# ++# Multimedia drivers ++# ++# CONFIG_DAB is not set ++ ++# ++# Graphics support ++# ++# CONFIG_VGASTATE is not set ++# CONFIG_VIDEO_OUTPUT_CONTROL is not set ++# CONFIG_FB is not set ++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set ++ ++# ++# Display device support ++# ++# CONFIG_DISPLAY_SUPPORT is not set ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++# CONFIG_SOUND is not set ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_MMC is not set ++# CONFIG_NEW_LEDS is not set ++CONFIG_RTC_LIB=y ++# CONFIG_RTC_CLASS is not set ++# CONFIG_DMADEVICES is not set ++ ++# ++# Voltage and Current regulators ++# ++# CONFIG_REGULATOR is not set ++# CONFIG_REGULATOR_FIXED_VOLTAGE is not set ++# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set ++# CONFIG_REGULATOR_BQ24022 is not set ++# CONFIG_UIO is not set ++ ++# ++# CBUS support ++# ++# CONFIG_CBUS is not set ++ ++# ++# File systems ++# ++CONFIG_EXT2_FS=y ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XIP is not set ++CONFIG_EXT3_FS=y ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT4DEV_FS is not set ++CONFIG_JBD=y ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_FS_POSIX_ACL is not set ++# CONFIG_XFS_FS is not set ++# CONFIG_OCFS2_FS is not set ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY=y ++CONFIG_INOTIFY_USER=y ++CONFIG_QUOTA=y ++# CONFIG_QUOTA_NETLINK_INTERFACE is not set ++CONFIG_PRINT_QUOTA_WARNING=y ++# CONFIG_QFMT_V1 is not set ++CONFIG_QFMT_V2=y ++CONFIG_QUOTACTL=y ++# CONFIG_AUTOFS_FS is not set ++# CONFIG_AUTOFS4_FS is not set ++# CONFIG_FUSE_FS is not set ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++ ++# ++# DOS/FAT/NT Filesystems ++# ++CONFIG_FAT_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_FAT_DEFAULT_CODEPAGE=437 ++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" ++# CONFIG_NTFS_FS is not set ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++CONFIG_PROC_SYSCTL=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++# CONFIG_TMPFS_POSIX_ACL is not set ++# CONFIG_HUGETLB_PAGE is not set ++# CONFIG_CONFIGFS_FS is not set ++ ++# ++# Miscellaneous filesystems ++# ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_CRAMFS is not set ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++# CONFIG_NFS_V3_ACL is not set ++CONFIG_NFS_V4=y ++CONFIG_ROOT_NFS=y ++# CONFIG_NFSD is not set ++CONFIG_LOCKD=y ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=y ++CONFIG_SUNRPC_GSS=y ++CONFIG_RPCSEC_GSS_KRB5=y ++# CONFIG_RPCSEC_GSS_SPKM3 is not set ++# CONFIG_SMB_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_NCP_FS is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++# CONFIG_EFI_PARTITION is not set ++# CONFIG_SYSV68_PARTITION is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++# CONFIG_NLS_ASCII is not set ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_UTF8 is not set ++# CONFIG_DLM is not set ++ ++# ++# Kernel hacking ++# ++# CONFIG_PRINTK_TIME is not set ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++CONFIG_MAGIC_SYSRQ=y ++# CONFIG_UNUSED_SYMBOLS is not set ++# CONFIG_DEBUG_FS is not set ++# CONFIG_HEADERS_CHECK is not set ++CONFIG_DEBUG_KERNEL=y ++# CONFIG_DEBUG_SHIRQ is not set ++CONFIG_DETECT_SOFTLOCKUP=y ++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 ++CONFIG_SCHED_DEBUG=y ++# CONFIG_SCHEDSTATS is not set ++# CONFIG_TIMER_STATS is not set ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_DEBUG_SLAB is not set ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_RT_MUTEX_TESTER is not set ++# CONFIG_DEBUG_SPINLOCK is not set ++CONFIG_DEBUG_MUTEXES=y ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++# CONFIG_DEBUG_KOBJECT is not set ++# CONFIG_DEBUG_BUGVERBOSE is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_DEBUG_VM is not set ++# CONFIG_DEBUG_WRITECOUNT is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++# CONFIG_DEBUG_LIST is not set ++# CONFIG_DEBUG_SG is not set ++CONFIG_FRAME_POINTER=y ++# CONFIG_BOOT_PRINTK_DELAY is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_FAULT_INJECTION is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_HAVE_FTRACE=y ++CONFIG_HAVE_DYNAMIC_FTRACE=y ++# CONFIG_FTRACE is not set ++# CONFIG_IRQSOFF_TRACER is not set ++# CONFIG_SCHED_TRACER is not set ++# CONFIG_CONTEXT_SWITCH_TRACER is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++# CONFIG_DEBUG_USER is not set ++# CONFIG_DEBUG_ERRORS is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++CONFIG_DEBUG_LL=y ++# CONFIG_DEBUG_ICEDCC is not set ++ ++# ++# Security options ++# ++# CONFIG_KEYS is not set ++# CONFIG_SECURITY is not set ++# CONFIG_SECURITY_FILE_CAPABILITIES is not set ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_BLKCIPHER=y ++CONFIG_CRYPTO_MANAGER=y ++# CONFIG_CRYPTO_GF128MUL is not set ++# CONFIG_CRYPTO_NULL is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++# CONFIG_CRYPTO_CCM is not set ++# CONFIG_CRYPTO_GCM is not set ++# CONFIG_CRYPTO_SEQIV is not set ++ ++# ++# Block modes ++# ++CONFIG_CRYPTO_CBC=y ++# CONFIG_CRYPTO_CTR is not set ++# CONFIG_CRYPTO_CTS is not set ++CONFIG_CRYPTO_ECB=m ++# CONFIG_CRYPTO_LRW is not set ++CONFIG_CRYPTO_PCBC=m ++# CONFIG_CRYPTO_XTS is not set ++ ++# ++# Hash modes ++# ++# CONFIG_CRYPTO_HMAC is not set ++# CONFIG_CRYPTO_XCBC is not set ++ ++# ++# Digest ++# ++# CONFIG_CRYPTO_CRC32C is not set ++# CONFIG_CRYPTO_MD4 is not set ++CONFIG_CRYPTO_MD5=y ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++# CONFIG_CRYPTO_SHA256 is not set ++# CONFIG_CRYPTO_SHA512 is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++ ++# ++# Ciphers ++# ++# CONFIG_CRYPTO_AES is not set ++# CONFIG_CRYPTO_ANUBIS is not set ++# CONFIG_CRYPTO_ARC4 is not set ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++CONFIG_CRYPTO_DES=y ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_TEA is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++ ++# ++# Compression ++# ++# CONFIG_CRYPTO_DEFLATE is not set ++# CONFIG_CRYPTO_LZO is not set ++CONFIG_CRYPTO_HW=y ++ ++# ++# Library routines ++# ++CONFIG_BITREVERSE=y ++# CONFIG_GENERIC_FIND_FIRST_BIT is not set ++# CONFIG_GENERIC_FIND_NEXT_BIT is not set ++CONFIG_CRC_CCITT=y ++# CONFIG_CRC16 is not set ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC_ITU_T is not set ++CONFIG_CRC32=y ++# CONFIG_CRC7 is not set ++CONFIG_LIBCRC32C=y ++CONFIG_PLIST=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT=y ++CONFIG_HAS_DMA=y +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/configs/rx51_defconfig kernel-2.6.28-20093908+0m5/arch/arm/configs/rx51_defconfig +--- linux-omap-2.6.28-omap1/arch/arm/configs/rx51_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/configs/rx51_defconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2032 @@ ++# ++# Automatically generated make config: don't edit ++# Linux kernel version: 2.6.28-omap1 ++# Thu Jul 9 09:32:52 2009 ++# ++CONFIG_ARM=y ++CONFIG_SYS_SUPPORTS_APM_EMULATION=y ++CONFIG_GENERIC_GPIO=y ++CONFIG_GENERIC_TIME=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_MMU=y ++# CONFIG_NO_IOPORT is not set ++CONFIG_GENERIC_HARDIRQS=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_HAVE_LATENCYTOP_SUPPORT=y ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_HARDIRQS_SW_RESEND=y ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_RWSEM_GENERIC_SPINLOCK=y ++# CONFIG_ARCH_HAS_ILOG2_U32 is not set ++# CONFIG_ARCH_HAS_ILOG2_U64 is not set ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y ++CONFIG_OPROFILE_OMAP_GPTIMER=y ++CONFIG_OPROFILE_ARMV7=y ++CONFIG_VECTORS_BASE=0xffff0000 ++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" ++ ++# ++# General setup ++# ++CONFIG_EXPERIMENTAL=y ++CONFIG_BROKEN_ON_SMP=y ++CONFIG_LOCK_KERNEL=y ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++CONFIG_LOCALVERSION="" ++CONFIG_LOCALVERSION_AUTO=y ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_BSD_PROCESS_ACCT=y ++# CONFIG_BSD_PROCESS_ACCT_V3 is not set ++# CONFIG_TASKSTATS is not set ++# CONFIG_AUDIT is not set ++# CONFIG_IKCONFIG is not set ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_CGROUPS=y ++# CONFIG_CGROUP_DEBUG is not set ++# CONFIG_CGROUP_NS is not set ++CONFIG_CGROUP_FREEZER=y ++# CONFIG_CGROUP_DEVICE is not set ++CONFIG_GROUP_SCHED=y ++CONFIG_FAIR_GROUP_SCHED=y ++# CONFIG_RT_GROUP_SCHED is not set ++CONFIG_USER_SCHED=y ++# CONFIG_CGROUP_SCHED is not set ++# CONFIG_CGROUP_CPUACCT is not set ++CONFIG_RESOURCE_COUNTERS=y ++CONFIG_MM_OWNER=y ++CONFIG_CGROUP_MEM_RES_CTLR=y ++CONFIG_SYSFS_DEPRECATED=y ++CONFIG_SYSFS_DEPRECATED_V2=y ++# CONFIG_RELAY is not set ++# CONFIG_NAMESPACES is not set ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL=y ++CONFIG_EMBEDDED=y ++CONFIG_UID16=y ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_KALLSYMS=y ++CONFIG_KALLSYMS_ALL=y ++CONFIG_KALLSYMS_EXTRA_PASS=y ++CONFIG_HOTPLUG=y ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++CONFIG_COMPAT_BRK=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_ANON_INODES=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++CONFIG_VM_EVENT_COUNTERS=y ++CONFIG_SLUB_DEBUG=y ++# CONFIG_SLAB is not set ++CONFIG_SLUB=y ++# CONFIG_SLOB is not set ++CONFIG_PROFILING=y ++# CONFIG_MARKERS is not set ++CONFIG_OPROFILE=m ++CONFIG_HAVE_OPROFILE=y ++CONFIG_KPROBES=y ++CONFIG_KRETPROBES=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++CONFIG_HAVE_CLK=y ++CONFIG_HAVE_GENERIC_DMA_COHERENT=y ++CONFIG_SLABINFO=y ++CONFIG_RT_MUTEXES=y ++# CONFIG_TINY_SHMEM is not set ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_MODULE_SRCVERSION_ALL=y ++CONFIG_KMOD=y ++CONFIG_BLOCK=y ++# CONFIG_LBD is not set ++# CONFIG_BLK_DEV_IO_TRACE is not set ++# CONFIG_LSF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++ ++# ++# IO Schedulers ++# ++CONFIG_IOSCHED_NOOP=y ++# CONFIG_IOSCHED_AS is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_IOSCHED_CFQ=y ++# CONFIG_DEFAULT_AS is not set ++# CONFIG_DEFAULT_DEADLINE is not set ++CONFIG_DEFAULT_CFQ=y ++# CONFIG_DEFAULT_NOOP is not set ++CONFIG_DEFAULT_IOSCHED="cfq" ++CONFIG_CLASSIC_RCU=y ++CONFIG_FREEZER=y ++ ++# ++# System Type ++# ++# CONFIG_ARCH_AAEC2000 is not set ++# CONFIG_ARCH_INTEGRATOR is not set ++# CONFIG_ARCH_REALVIEW is not set ++# CONFIG_ARCH_VERSATILE is not set ++# CONFIG_ARCH_AT91 is not set ++# CONFIG_ARCH_CLPS7500 is not set ++# CONFIG_ARCH_CLPS711X is not set ++# CONFIG_ARCH_EBSA110 is not set ++# CONFIG_ARCH_EP93XX is not set ++# CONFIG_ARCH_FOOTBRIDGE is not set ++# CONFIG_ARCH_NETX is not set ++# CONFIG_ARCH_H720X is not set ++# CONFIG_ARCH_IMX is not set ++# CONFIG_ARCH_IOP13XX is not set ++# CONFIG_ARCH_IOP32X is not set ++# CONFIG_ARCH_IOP33X is not set ++# CONFIG_ARCH_IXP23XX is not set ++# CONFIG_ARCH_IXP2000 is not set ++# CONFIG_ARCH_IXP4XX is not set ++# CONFIG_ARCH_L7200 is not set ++# CONFIG_ARCH_KIRKWOOD is not set ++# CONFIG_ARCH_KS8695 is not set ++# CONFIG_ARCH_NS9XXX is not set ++# CONFIG_ARCH_LOKI is not set ++# CONFIG_ARCH_MV78XX0 is not set ++# CONFIG_ARCH_MXC is not set ++# CONFIG_ARCH_ORION5X is not set ++# CONFIG_ARCH_PNX4008 is not set ++# CONFIG_ARCH_PXA is not set ++# CONFIG_ARCH_RPC is not set ++# CONFIG_ARCH_SA1100 is not set ++# CONFIG_ARCH_S3C2410 is not set ++# CONFIG_ARCH_SHARK is not set ++# CONFIG_ARCH_LH7A40X is not set ++# CONFIG_ARCH_DAVINCI is not set ++CONFIG_ARCH_OMAP=y ++# CONFIG_ARCH_MSM is not set ++ ++# ++# TI OMAP Implementations ++# ++CONFIG_ARCH_OMAP_OTG=y ++# CONFIG_ARCH_OMAP1 is not set ++# CONFIG_ARCH_OMAP2 is not set ++CONFIG_ARCH_OMAP3=y ++ ++# ++# OMAP Feature Selections ++# ++# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set ++# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set ++CONFIG_OMAP_SMARTREFLEX=y ++# CONFIG_OMAP_SMARTREFLEX_TESTING is not set ++CONFIG_OMAP_RESET_CLOCKS=y ++CONFIG_OMAP_BOOT_TAG=y ++CONFIG_OMAP_BOOT_REASON=y ++CONFIG_OMAP_COMPONENT_VERSION=y ++CONFIG_OMAP_GPIO_SWITCH=y ++CONFIG_OMAP_MUX=y ++CONFIG_OMAP_MUX_DEBUG=y ++CONFIG_OMAP_MUX_WARNINGS=y ++CONFIG_OMAP_MCBSP=y ++# CONFIG_OMAP_MMU_FWK is not set ++# CONFIG_OMAP_MBOX_FWK is not set ++CONFIG_OMAP_IOMMU=m ++# CONFIG_OMAP_IOMMU_DEBUG is not set ++# CONFIG_OMAP_MPU_TIMER is not set ++CONFIG_OMAP_32K_TIMER=y ++# CONFIG_OMAP3_DEBOBS is not set ++CONFIG_OMAP_32K_TIMER_HZ=128 ++CONFIG_OMAP_TICK_GPTIMER=1 ++CONFIG_OMAP_DM_TIMER=y ++# CONFIG_OMAP_LL_DEBUG_UART1 is not set ++# CONFIG_OMAP_LL_DEBUG_UART2 is not set ++CONFIG_OMAP_LL_DEBUG_UART3=y ++CONFIG_OMAP_SERIAL_WAKE=y ++# CONFIG_OMAP_PM_NONE is not set ++# CONFIG_OMAP_PM_NOOP is not set ++CONFIG_OMAP_PM_SRF=y ++CONFIG_ARCH_OMAP34XX=y ++CONFIG_ARCH_OMAP3430=y ++ ++# ++# OMAP Board Type ++# ++CONFIG_MACH_NOKIA_RX51=y ++CONFIG_VIDEO_MACH_RX51=m ++# CONFIG_VIDEO_MACH_RX51_OLD_I2C is not set ++CONFIG_MACH_NOKIA_RX71=y ++# CONFIG_MACH_OMAP_LDP is not set ++# CONFIG_MACH_OMAP_3430SDP is not set ++# CONFIG_MACH_OMAP3EVM is not set ++# CONFIG_MACH_OMAP3_BEAGLE is not set ++# CONFIG_MACH_OVERO is not set ++# CONFIG_MACH_OMAP3_PANDORA is not set ++# CONFIG_RX51_CAMERA_BUTTON is not set ++ ++# ++# Boot options ++# ++ ++# ++# Power management ++# ++ ++# ++# Processor Type ++# ++CONFIG_CPU_32=y ++CONFIG_CPU_32v6K=y ++CONFIG_CPU_V7=y ++CONFIG_CPU_32v7=y ++CONFIG_CPU_ABRT_EV7=y ++CONFIG_CPU_PABRT_IFAR=y ++CONFIG_CPU_CACHE_V7=y ++CONFIG_CPU_CACHE_VIPT=y ++CONFIG_CPU_COPY_V6=y ++CONFIG_CPU_TLB_V7=y ++CONFIG_CPU_HAS_ASID=y ++CONFIG_CPU_CP15=y ++CONFIG_CPU_CP15_MMU=y ++ ++# ++# Processor Features ++# ++CONFIG_ARM_THUMB=y ++# CONFIG_ARM_THUMBEE is not set ++# CONFIG_CPU_ICACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_DISABLE is not set ++# CONFIG_CPU_BPREDICT_DISABLE is not set ++CONFIG_HAS_TLS_REG=y ++# CONFIG_OUTER_CACHE is not set ++ ++# ++# Bus support ++# ++# CONFIG_PCI_SYSCALL is not set ++# CONFIG_ARCH_SUPPORTS_MSI is not set ++# CONFIG_PCCARD is not set ++ ++# ++# Kernel Features ++# ++CONFIG_TICK_ONESHOT=y ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y ++CONFIG_VMSPLIT_3G=y ++# CONFIG_VMSPLIT_2G is not set ++# CONFIG_VMSPLIT_1G is not set ++CONFIG_PAGE_OFFSET=0xC0000000 ++CONFIG_PREEMPT=y ++CONFIG_HZ=128 ++CONFIG_AEABI=y ++# CONFIG_OABI_COMPAT is not set ++CONFIG_ARCH_FLATMEM_HAS_HOLES=y ++# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set ++# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_FLATMEM_MANUAL=y ++# CONFIG_DISCONTIGMEM_MANUAL is not set ++# CONFIG_SPARSEMEM_MANUAL is not set ++CONFIG_FLATMEM=y ++CONFIG_FLAT_NODE_MEM_MAP=y ++CONFIG_PAGEFLAGS_EXTENDED=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++# CONFIG_RESOURCES_64BIT is not set ++# CONFIG_PHYS_ADDR_T_64BIT is not set ++CONFIG_ZONE_DMA_FLAG=0 ++CONFIG_VIRT_TO_BUS=y ++CONFIG_UNEVICTABLE_LRU=y ++# CONFIG_LEDS is not set ++CONFIG_ALIGNMENT_TRAP=y ++ ++# ++# Boot options ++# ++CONFIG_ZBOOT_ROM_TEXT=0x0 ++CONFIG_ZBOOT_ROM_BSS=0x0 ++CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log console=tty0" ++# CONFIG_XIP_KERNEL is not set ++CONFIG_KEXEC=y ++CONFIG_ATAGS_PROC=y ++ ++# ++# CPU Power Management ++# ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_TABLE=y ++# CONFIG_CPU_FREQ_DEBUG is not set ++CONFIG_CPU_FREQ_STAT=y ++# CONFIG_CPU_FREQ_STAT_DETAILS is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set ++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set ++# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set ++CONFIG_CPU_FREQ_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set ++CONFIG_CPU_IDLE=y ++CONFIG_CPU_IDLE_GOV_LADDER=y ++CONFIG_CPU_IDLE_GOV_MENU=y ++ ++# ++# Floating point emulation ++# ++ ++# ++# At least one emulation must be selected ++# ++CONFIG_VFP=y ++CONFIG_VFPv3=y ++CONFIG_NEON=y ++ ++# ++# Userspace binary formats ++# ++CONFIG_BINFMT_ELF=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_HAVE_AOUT=y ++# CONFIG_BINFMT_AOUT is not set ++CONFIG_BINFMT_MISC=y ++ ++# ++# Power management options ++# ++CONFIG_PM=y ++CONFIG_PM_DEBUG=y ++# CONFIG_PM_VERBOSE is not set ++CONFIG_CAN_PM_TRACE=y ++CONFIG_PM_SLEEP=y ++CONFIG_SUSPEND=y ++CONFIG_SUSPEND_FREEZER=y ++# CONFIG_APM_EMULATION is not set ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++CONFIG_NET=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++# CONFIG_PACKET_MMAP is not set ++CONFIG_UNIX=y ++CONFIG_XFRM=y ++# CONFIG_XFRM_USER is not set ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++# CONFIG_XFRM_STATISTICS is not set ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++CONFIG_INET=y ++# CONFIG_IP_MULTICAST is not set ++# CONFIG_IP_ADVANCED_ROUTER is not set ++CONFIG_IP_FIB_HASH=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE is not set ++# CONFIG_ARPD is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++# CONFIG_INET_XFRM_TUNNEL is not set ++# CONFIG_INET_TUNNEL is not set ++CONFIG_INET_XFRM_MODE_TRANSPORT=y ++CONFIG_INET_XFRM_MODE_TUNNEL=y ++CONFIG_INET_XFRM_MODE_BEET=y ++# CONFIG_INET_LRO is not set ++CONFIG_INET_DIAG=y ++CONFIG_INET_TCP_DIAG=y ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_NETLABEL is not set ++# CONFIG_NETWORK_SECMARK is not set ++CONFIG_NETFILTER=y ++# CONFIG_NETFILTER_DEBUG is not set ++CONFIG_NETFILTER_ADVANCED=y ++ ++# ++# Core Netfilter Configuration ++# ++# CONFIG_NETFILTER_NETLINK_QUEUE is not set ++# CONFIG_NETFILTER_NETLINK_LOG is not set ++# CONFIG_NF_CONNTRACK is not set ++CONFIG_NETFILTER_XTABLES=m ++# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set ++# CONFIG_NETFILTER_XT_TARGET_MARK is not set ++# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set ++# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set ++# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set ++# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set ++# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set ++# CONFIG_NETFILTER_XT_MATCH_DCCP is not set ++# CONFIG_NETFILTER_XT_MATCH_DSCP is not set ++# CONFIG_NETFILTER_XT_MATCH_ESP is not set ++# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set ++# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set ++# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set ++# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set ++# CONFIG_NETFILTER_XT_MATCH_MAC is not set ++# CONFIG_NETFILTER_XT_MATCH_MARK is not set ++# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set ++# CONFIG_NETFILTER_XT_MATCH_OWNER is not set ++# CONFIG_NETFILTER_XT_MATCH_POLICY is not set ++# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set ++# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set ++# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set ++# CONFIG_NETFILTER_XT_MATCH_REALM is not set ++# CONFIG_NETFILTER_XT_MATCH_RECENT is not set ++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set ++# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set ++# CONFIG_NETFILTER_XT_MATCH_STRING is not set ++# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set ++# CONFIG_NETFILTER_XT_MATCH_TIME is not set ++# CONFIG_NETFILTER_XT_MATCH_U32 is not set ++# CONFIG_IP_VS is not set ++ ++# ++# IP: Netfilter Configuration ++# ++# CONFIG_NF_DEFRAG_IPV4 is not set ++# CONFIG_IP_NF_QUEUE is not set ++CONFIG_IP_NF_IPTABLES=m ++# CONFIG_IP_NF_MATCH_ADDRTYPE is not set ++# CONFIG_IP_NF_MATCH_AH is not set ++# CONFIG_IP_NF_MATCH_ECN is not set ++# CONFIG_IP_NF_MATCH_TTL is not set ++CONFIG_IP_NF_FILTER=m ++# CONFIG_IP_NF_TARGET_REJECT is not set ++# CONFIG_IP_NF_TARGET_LOG is not set ++# CONFIG_IP_NF_TARGET_ULOG is not set ++# CONFIG_IP_NF_TARGET_IDLETIMER is not set ++# CONFIG_IP_NF_MANGLE is not set ++# CONFIG_IP_NF_RAW is not set ++# CONFIG_IP_NF_SECURITY is not set ++# CONFIG_IP_NF_ARPTABLES is not set ++CONFIG_IP_NF_HB=m ++# CONFIG_IP_NF_HB_DEBUG is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_IPX is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_ECONET is not set ++# CONFIG_WAN_ROUTER is not set ++CONFIG_PHONET=m ++# CONFIG_NET_SCHED is not set ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_NET_TCPPROBE is not set ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_IRDA is not set ++CONFIG_BT=m ++CONFIG_BT_L2CAP=m ++CONFIG_BT_SCO=m ++CONFIG_BT_RFCOMM=m ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=m ++CONFIG_BT_BNEP_MC_FILTER=y ++CONFIG_BT_BNEP_PROTO_FILTER=y ++CONFIG_BT_HIDP=m ++ ++# ++# Bluetooth device drivers ++# ++# CONFIG_BT_HCIUSB is not set ++# CONFIG_BT_HCIBTUSB is not set ++# CONFIG_BT_HCIBTSDIO is not set ++# CONFIG_BT_HCIUART is not set ++# CONFIG_BT_HCIBCM203X is not set ++# CONFIG_BT_HCIBPA10X is not set ++# CONFIG_BT_HCIBFUSB is not set ++# CONFIG_BT_HCIBRF6150 is not set ++CONFIG_BT_HCIH4P=m ++# CONFIG_BT_HCIVHCI is not set ++# CONFIG_AF_RXRPC is not set ++CONFIG_WIRELESS=y ++CONFIG_CFG80211=y ++CONFIG_NL80211=y ++CONFIG_WIRELESS_OLD_REGULATORY=y ++CONFIG_WIRELESS_EXT=y ++CONFIG_WIRELESS_EXT_SYSFS=y ++CONFIG_MAC80211=m ++ ++# ++# Rate control algorithm selection ++# ++# CONFIG_MAC80211_RC_PID is not set ++CONFIG_MAC80211_RC_MINSTREL=y ++# CONFIG_MAC80211_RC_DEFAULT_PID is not set ++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y ++CONFIG_MAC80211_RC_DEFAULT="minstrel" ++# CONFIG_MAC80211_MESH is not set ++# CONFIG_MAC80211_LEDS is not set ++# CONFIG_MAC80211_DEBUGFS is not set ++# CONFIG_MAC80211_DEBUG_MENU is not set ++# CONFIG_IEEE80211 is not set ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++ ++# ++# Device Drivers ++# ++ ++# ++# Generic Driver Options ++# ++CONFIG_UEVENT_HELPER_PATH="" ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++CONFIG_FW_LOADER=y ++CONFIG_FIRMWARE_IN_KERNEL=y ++CONFIG_EXTRA_FIRMWARE="" ++# CONFIG_DEBUG_DRIVER is not set ++# CONFIG_DEBUG_DEVRES is not set ++# CONFIG_SYS_HYPERVISOR is not set ++CONFIG_CONNECTOR=y ++CONFIG_PROC_EVENTS=y ++CONFIG_MTD=y ++# CONFIG_MTD_DEBUG is not set ++# CONFIG_MTD_CONCAT is not set ++CONFIG_MTD_PARTITIONS=y ++# CONFIG_MTD_REDBOOT_PARTS is not set ++# CONFIG_MTD_CMDLINE_PARTS is not set ++# CONFIG_MTD_AFS_PARTS is not set ++# CONFIG_MTD_AR7_PARTS is not set ++ ++# ++# User Modules And Translation Layers ++# ++CONFIG_MTD_CHAR=y ++# CONFIG_MTD_BLKDEVS is not set ++# CONFIG_MTD_BLOCK is not set ++# CONFIG_MTD_BLOCK_RO is not set ++# CONFIG_FTL is not set ++# CONFIG_NFTL is not set ++# CONFIG_INFTL is not set ++# CONFIG_RFD_FTL is not set ++# CONFIG_SSFDC is not set ++CONFIG_MTD_OOPS=y ++ ++# ++# RAM/ROM/Flash chip drivers ++# ++# CONFIG_MTD_CFI is not set ++# CONFIG_MTD_JEDECPROBE is not set ++CONFIG_MTD_MAP_BANK_WIDTH_1=y ++CONFIG_MTD_MAP_BANK_WIDTH_2=y ++CONFIG_MTD_MAP_BANK_WIDTH_4=y ++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set ++CONFIG_MTD_CFI_I1=y ++CONFIG_MTD_CFI_I2=y ++# CONFIG_MTD_CFI_I4 is not set ++# CONFIG_MTD_CFI_I8 is not set ++# CONFIG_MTD_RAM is not set ++# CONFIG_MTD_ROM is not set ++# CONFIG_MTD_ABSENT is not set ++ ++# ++# Mapping drivers for chip access ++# ++# CONFIG_MTD_COMPLEX_MAPPINGS is not set ++# CONFIG_MTD_PLATRAM is not set ++ ++# ++# Self-contained MTD device drivers ++# ++# CONFIG_MTD_DATAFLASH is not set ++# CONFIG_MTD_M25P80 is not set ++# CONFIG_MTD_SLRAM is not set ++# CONFIG_MTD_PHRAM is not set ++# CONFIG_MTD_MTDRAM is not set ++# CONFIG_MTD_BLOCK2MTD is not set ++ ++# ++# Disk-On-Chip Device Drivers ++# ++# CONFIG_MTD_DOC2000 is not set ++# CONFIG_MTD_DOC2001 is not set ++# CONFIG_MTD_DOC2001PLUS is not set ++# CONFIG_MTD_NAND is not set ++CONFIG_MTD_ONENAND=y ++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set ++# CONFIG_MTD_ONENAND_GENERIC is not set ++CONFIG_MTD_ONENAND_OMAP2=y ++# CONFIG_MTD_ONENAND_OTP is not set ++# CONFIG_MTD_ONENAND_2X_PROGRAM is not set ++# CONFIG_MTD_ONENAND_SIM is not set ++ ++# ++# UBI - Unsorted block images ++# ++CONFIG_MTD_UBI=y ++CONFIG_MTD_UBI_WL_THRESHOLD=4096 ++CONFIG_MTD_UBI_BEB_RESERVE=1 ++# CONFIG_MTD_UBI_GLUEBI is not set ++ ++# ++# UBI debugging options ++# ++# CONFIG_MTD_UBI_DEBUG is not set ++# CONFIG_PARPORT is not set ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_COW_COMMON is not set ++CONFIG_BLK_DEV_LOOP=y ++# CONFIG_BLK_DEV_CRYPTOLOOP is not set ++# CONFIG_BLK_DEV_NBD is not set ++# CONFIG_BLK_DEV_UB is not set ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++# CONFIG_BLK_DEV_XIP is not set ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++CONFIG_MISC_DEVICES=y ++# CONFIG_EEPROM_93CX6 is not set ++CONFIG_NOKIA_AV_DETECT=m ++# CONFIG_ICS932S401 is not set ++CONFIG_OMAP_STI=y ++CONFIG_OMAP_STI_CONSOLE=y ++# CONFIG_ENCLOSURE_SERVICES is not set ++# CONFIG_C2PORT is not set ++CONFIG_OMAP_SSI=m ++CONFIG_SSI_MCSAAB_IMP=m ++CONFIG_SSI_CMT_SPEECH=m ++# CONFIG_SSI_CMT_SPEECH_DEBUG is not set ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++# CONFIG_RAID_ATTRS is not set ++CONFIG_SCSI=m ++CONFIG_SCSI_DMA=y ++# CONFIG_SCSI_TGT is not set ++# CONFIG_SCSI_NETLINK is not set ++CONFIG_SCSI_PROC_FS=y ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=m ++# CONFIG_CHR_DEV_ST is not set ++# CONFIG_CHR_DEV_OSST is not set ++# CONFIG_BLK_DEV_SR is not set ++# CONFIG_CHR_DEV_SG is not set ++# CONFIG_CHR_DEV_SCH is not set ++ ++# ++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs ++# ++CONFIG_SCSI_MULTI_LUN=y ++# CONFIG_SCSI_CONSTANTS is not set ++# CONFIG_SCSI_LOGGING is not set ++CONFIG_SCSI_SCAN_ASYNC=y ++CONFIG_SCSI_WAIT_SCAN=m ++ ++# ++# SCSI Transports ++# ++# CONFIG_SCSI_SPI_ATTRS is not set ++# CONFIG_SCSI_FC_ATTRS is not set ++# CONFIG_SCSI_ISCSI_ATTRS is not set ++# CONFIG_SCSI_SAS_LIBSAS is not set ++# CONFIG_SCSI_SRP_ATTRS is not set ++CONFIG_SCSI_LOWLEVEL=y ++# CONFIG_ISCSI_TCP is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_DH is not set ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++CONFIG_NETDEVICES=y ++# CONFIG_DUMMY is not set ++# CONFIG_BONDING is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_EQUALIZER is not set ++CONFIG_TUN=m ++# CONFIG_VETH is not set ++# CONFIG_PHYLIB is not set ++CONFIG_NET_ETHERNET=y ++CONFIG_MII=m ++# CONFIG_AX88796 is not set ++CONFIG_SMC91X=m ++# CONFIG_DM9000 is not set ++# CONFIG_ENC28J60 is not set ++# CONFIG_SMC911X is not set ++# CONFIG_IBM_NEW_EMAC_ZMII is not set ++# CONFIG_IBM_NEW_EMAC_RGMII is not set ++# CONFIG_IBM_NEW_EMAC_TAH is not set ++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set ++# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set ++# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set ++# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set ++# CONFIG_B44 is not set ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++ ++# ++# Wireless LAN ++# ++# CONFIG_WLAN_PRE80211 is not set ++CONFIG_WLAN_80211=y ++# CONFIG_LIBERTAS is not set ++# CONFIG_LIBERTAS_THINFIRM is not set ++# CONFIG_USB_ZD1201 is not set ++# CONFIG_USB_NET_RNDIS_WLAN is not set ++# CONFIG_RTL8187 is not set ++# CONFIG_MAC80211_HWSIM is not set ++# CONFIG_P54_COMMON is not set ++# CONFIG_IWLWIFI_LEDS is not set ++# CONFIG_HOSTAP is not set ++# CONFIG_B43 is not set ++# CONFIG_B43LEGACY is not set ++# CONFIG_ZD1211RW is not set ++# CONFIG_RT2X00 is not set ++CONFIG_WL12XX=y ++CONFIG_WL1251=m ++# CONFIG_WL1271 is not set ++ ++# ++# USB Network Adapters ++# ++# CONFIG_USB_CATC is not set ++# CONFIG_USB_KAWETH is not set ++# CONFIG_USB_PEGASUS is not set ++# CONFIG_USB_RTL8150 is not set ++# CONFIG_USB_USBNET is not set ++# CONFIG_WAN is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_NETPOLL is not set ++# CONFIG_NET_POLL_CONTROLLER is not set ++# CONFIG_ISDN is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_POLLDEV is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++CONFIG_INPUT_KEYBOARD=y ++# CONFIG_KEYBOARD_ATKBD is not set ++# CONFIG_KEYBOARD_SUNKBD is not set ++# CONFIG_KEYBOARD_LKKBD is not set ++# CONFIG_KEYBOARD_XTKBD is not set ++# CONFIG_KEYBOARD_NEWTON is not set ++# CONFIG_KEYBOARD_STOWAWAY is not set ++CONFIG_KEYBOARD_TWL4030=y ++# CONFIG_KEYBOARD_LM8323 is not set ++# CONFIG_KEYBOARD_GPIO is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++# CONFIG_TOUCHSCREEN_ADS7846 is not set ++# CONFIG_TOUCHSCREEN_FUJITSU is not set ++# CONFIG_TOUCHSCREEN_GUNZE is not set ++# CONFIG_TOUCHSCREEN_ELO is not set ++# CONFIG_TOUCHSCREEN_MTOUCH is not set ++# CONFIG_TOUCHSCREEN_INEXIO is not set ++# CONFIG_TOUCHSCREEN_MK712 is not set ++# CONFIG_TOUCHSCREEN_PENMOUNT is not set ++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set ++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set ++CONFIG_TOUCHSCREEN_TSC2005=m ++# CONFIG_TOUCHSCREEN_TSC210X is not set ++# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set ++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set ++CONFIG_INPUT_LIRC=y ++CONFIG_LIRC_DEV=m ++CONFIG_LIRC_RX51=m ++CONFIG_INPUT_MISC=y ++# CONFIG_INPUT_ATI_REMOTE is not set ++# CONFIG_INPUT_ATI_REMOTE2 is not set ++# CONFIG_INPUT_KEYSPAN_REMOTE is not set ++# CONFIG_INPUT_POWERMATE is not set ++# CONFIG_INPUT_YEALINK is not set ++# CONFIG_INPUT_CM109 is not set ++CONFIG_INPUT_UINPUT=m ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++ ++# ++# Character devices ++# ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_DEVKMEM=y ++# CONFIG_SERIAL_NONSTANDARD is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=4 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=4 ++# CONFIG_SERIAL_8250_EXTENDED is not set ++ ++# ++# Non-8250 serial port support ++# ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_IPMI_HANDLER is not set ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_OMAP3_ROM=y ++# CONFIG_NVRAM is not set ++# CONFIG_R3964 is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_TCG_TPM is not set ++CONFIG_I2C=y ++CONFIG_I2C_BOARDINFO=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_HELPER_AUTO=y ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_GPIO is not set ++# CONFIG_I2C_OCORES is not set ++CONFIG_I2C_OMAP=y ++# CONFIG_I2C_SIMTEC is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_PARPORT_LIGHT is not set ++# CONFIG_I2C_TAOS_EVM is not set ++# CONFIG_I2C_TINY_USB is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_STUB is not set ++ ++# ++# Miscellaneous I2C Chip support ++# ++# CONFIG_DS1682 is not set ++# CONFIG_AT24 is not set ++# CONFIG_SENSORS_EEPROM is not set ++CONFIG_TPA6130A2=y ++# CONFIG_SENSORS_PCF8574 is not set ++# CONFIG_PCF8575 is not set ++# CONFIG_SENSORS_PCA9539 is not set ++# CONFIG_SENSORS_PCF8591 is not set ++# CONFIG_TPS65010 is not set ++CONFIG_TWL4030_MADC=y ++CONFIG_TWL4030_PWRBUTTON=y ++CONFIG_TWL4030_POWEROFF=y ++# CONFIG_SENSORS_MAX6875 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++CONFIG_SENSORS_TSL2563=m ++# CONFIG_LP5521 is not set ++CONFIG_LIS302DL=m ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# CONFIG_I2C_DEBUG_CHIP is not set ++CONFIG_SPI=y ++# CONFIG_SPI_DEBUG is not set ++CONFIG_SPI_MASTER=y ++ ++# ++# SPI Master Controller Drivers ++# ++# CONFIG_SPI_BITBANG is not set ++CONFIG_SPI_OMAP24XX=y ++ ++# ++# SPI Protocol Masters ++# ++# CONFIG_SPI_AT25 is not set ++# CONFIG_SPI_TSC210X is not set ++# CONFIG_SPI_TSC2301 is not set ++# CONFIG_SPI_SPIDEV is not set ++# CONFIG_SPI_TLE62X0 is not set ++CONFIG_ARCH_REQUIRE_GPIOLIB=y ++CONFIG_GPIOLIB=y ++# CONFIG_DEBUG_GPIO is not set ++CONFIG_GPIO_SYSFS=y ++ ++# ++# Memory mapped GPIO expanders: ++# ++ ++# ++# I2C GPIO expanders: ++# ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCF857X is not set ++CONFIG_GPIO_TWL4030=y ++ ++# ++# PCI GPIO expanders: ++# ++ ++# ++# SPI GPIO expanders: ++# ++# CONFIG_GPIO_MAX7301 is not set ++# CONFIG_GPIO_MCP23S08 is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_SUPPLY is not set ++CONFIG_HWMON=y ++# CONFIG_HWMON_VID is not set ++# CONFIG_SENSORS_AD7414 is not set ++# CONFIG_SENSORS_AD7418 is not set ++# CONFIG_SENSORS_ADCXX is not set ++# CONFIG_SENSORS_ADM1021 is not set ++# CONFIG_SENSORS_ADM1025 is not set ++# CONFIG_SENSORS_ADM1026 is not set ++# CONFIG_SENSORS_ADM1029 is not set ++# CONFIG_SENSORS_ADM1031 is not set ++# CONFIG_SENSORS_ADM9240 is not set ++# CONFIG_SENSORS_ADT7462 is not set ++# CONFIG_SENSORS_ADT7470 is not set ++# CONFIG_SENSORS_ADT7473 is not set ++# CONFIG_SENSORS_ATXP1 is not set ++# CONFIG_SENSORS_DS1621 is not set ++# CONFIG_SENSORS_F71805F is not set ++# CONFIG_SENSORS_F71882FG is not set ++# CONFIG_SENSORS_F75375S is not set ++# CONFIG_SENSORS_GL518SM is not set ++# CONFIG_SENSORS_GL520SM is not set ++# CONFIG_SENSORS_IT87 is not set ++# CONFIG_SENSORS_LM63 is not set ++# CONFIG_SENSORS_LM70 is not set ++# CONFIG_SENSORS_LM75 is not set ++# CONFIG_SENSORS_LM77 is not set ++# CONFIG_SENSORS_LM78 is not set ++# CONFIG_SENSORS_LM80 is not set ++# CONFIG_SENSORS_LM83 is not set ++# CONFIG_SENSORS_LM85 is not set ++# CONFIG_SENSORS_LM87 is not set ++# CONFIG_SENSORS_LM90 is not set ++# CONFIG_SENSORS_LM92 is not set ++# CONFIG_SENSORS_LM93 is not set ++# CONFIG_SENSORS_MAX1111 is not set ++# CONFIG_SENSORS_MAX1619 is not set ++# CONFIG_SENSORS_MAX6650 is not set ++# CONFIG_SENSORS_PC87360 is not set ++# CONFIG_SENSORS_PC87427 is not set ++# CONFIG_SENSORS_DME1737 is not set ++# CONFIG_SENSORS_SMSC47M1 is not set ++# CONFIG_SENSORS_SMSC47M192 is not set ++# CONFIG_SENSORS_SMSC47B397 is not set ++# CONFIG_SENSORS_ADS7828 is not set ++# CONFIG_SENSORS_THMC50 is not set ++# CONFIG_SENSORS_VT1211 is not set ++# CONFIG_SENSORS_W83781D is not set ++# CONFIG_SENSORS_W83791D is not set ++# CONFIG_SENSORS_W83792D is not set ++# CONFIG_SENSORS_W83793 is not set ++# CONFIG_SENSORS_W83L785TS is not set ++# CONFIG_SENSORS_W83L786NG is not set ++# CONFIG_SENSORS_W83627HF is not set ++# CONFIG_SENSORS_W83627EHF is not set ++# CONFIG_SENSORS_TSC210X is not set ++CONFIG_SENSORS_OMAP34XX=y ++# CONFIG_HWMON_DEBUG_CHIP is not set ++# CONFIG_THERMAL is not set ++# CONFIG_THERMAL_HWMON is not set ++CONFIG_WATCHDOG=y ++# CONFIG_WATCHDOG_NOWAYOUT is not set ++ ++# ++# Watchdog Device Drivers ++# ++# CONFIG_SOFT_WATCHDOG is not set ++CONFIG_OMAP_WATCHDOG=m ++CONFIG_TWL4030_WATCHDOG=m ++ ++# ++# USB-based Watchdog Cards ++# ++# CONFIG_USBPCWATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++ ++# ++# Sonics Silicon Backplane ++# ++# CONFIG_SSB is not set ++ ++# ++# Multifunction device drivers ++# ++# CONFIG_MFD_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_ASIC3 is not set ++# CONFIG_HTC_EGPIO is not set ++# CONFIG_HTC_PASIC3 is not set ++CONFIG_TWL4030_CORE=y ++CONFIG_TWL4030_POWER=y ++# CONFIG_MFD_TMIO is not set ++# CONFIG_MFD_T7L66XB is not set ++# CONFIG_MFD_TC6387XB is not set ++# CONFIG_MFD_TC6393XB is not set ++# CONFIG_PMIC_DA903X is not set ++# CONFIG_MFD_WM8400 is not set ++# CONFIG_MFD_WM8350_I2C is not set ++ ++# ++# Multimedia devices ++# ++ ++# ++# Multimedia core support ++# ++CONFIG_VIDEO_DEV=m ++CONFIG_VIDEO_V4L2_COMMON=m ++CONFIG_VIDEO_ALLOW_V4L1=y ++CONFIG_VIDEO_V4L1_COMPAT=y ++# CONFIG_DVB_CORE is not set ++CONFIG_VIDEO_MEDIA=m ++ ++# ++# Multimedia drivers ++# ++# CONFIG_MEDIA_ATTACH is not set ++CONFIG_MEDIA_TUNER=m ++CONFIG_MEDIA_TUNER_CUSTOMIZE=y ++# CONFIG_MEDIA_TUNER_SIMPLE is not set ++# CONFIG_MEDIA_TUNER_TDA8290 is not set ++# CONFIG_MEDIA_TUNER_TDA827X is not set ++# CONFIG_MEDIA_TUNER_TDA18271 is not set ++# CONFIG_MEDIA_TUNER_TDA9887 is not set ++# CONFIG_MEDIA_TUNER_TEA5761 is not set ++# CONFIG_MEDIA_TUNER_TEA5767 is not set ++# CONFIG_MEDIA_TUNER_MT20XX is not set ++# CONFIG_MEDIA_TUNER_MT2060 is not set ++# CONFIG_MEDIA_TUNER_MT2266 is not set ++# CONFIG_MEDIA_TUNER_MT2131 is not set ++# CONFIG_MEDIA_TUNER_QT1010 is not set ++# CONFIG_MEDIA_TUNER_XC2028 is not set ++# CONFIG_MEDIA_TUNER_XC5000 is not set ++# CONFIG_MEDIA_TUNER_MXL5005S is not set ++# CONFIG_MEDIA_TUNER_MXL5007T is not set ++CONFIG_VIDEO_V4L2=m ++CONFIG_VIDEO_V4L1=m ++CONFIG_VIDEOBUF_GEN=m ++CONFIG_VIDEOBUF_DMA_SG=m ++CONFIG_VIDEO_CAPTURE_DRIVERS=y ++# CONFIG_VIDEO_ADV_DEBUG is not set ++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set ++# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set ++ ++# ++# Encoders/decoders and other helper chips ++# ++ ++# ++# Audio decoders ++# ++# CONFIG_VIDEO_TVAUDIO is not set ++# CONFIG_VIDEO_TDA7432 is not set ++# CONFIG_VIDEO_TDA9840 is not set ++# CONFIG_VIDEO_TDA9875 is not set ++# CONFIG_VIDEO_TEA6415C is not set ++# CONFIG_VIDEO_TEA6420 is not set ++# CONFIG_VIDEO_MSP3400 is not set ++# CONFIG_VIDEO_CS5345 is not set ++# CONFIG_VIDEO_CS53L32A is not set ++# CONFIG_VIDEO_M52790 is not set ++# CONFIG_VIDEO_TLV320AIC23B is not set ++# CONFIG_VIDEO_WM8775 is not set ++# CONFIG_VIDEO_WM8739 is not set ++# CONFIG_VIDEO_VP27SMPX is not set ++ ++# ++# Video decoders ++# ++# CONFIG_VIDEO_BT819 is not set ++# CONFIG_VIDEO_BT856 is not set ++# CONFIG_VIDEO_BT866 is not set ++# CONFIG_VIDEO_KS0127 is not set ++# CONFIG_VIDEO_OV7670 is not set ++# CONFIG_VIDEO_TCM825X is not set ++CONFIG_VIDEO_ET8EK8=m ++CONFIG_VIDEO_AD5820=m ++CONFIG_VIDEO_ADP1653=m ++# CONFIG_VIDEO_SAA7110 is not set ++# CONFIG_VIDEO_SAA7111 is not set ++# CONFIG_VIDEO_SAA7114 is not set ++# CONFIG_VIDEO_SAA711X is not set ++# CONFIG_VIDEO_SAA717X is not set ++# CONFIG_VIDEO_SAA7191 is not set ++# CONFIG_VIDEO_TVP5150 is not set ++# CONFIG_VIDEO_VPX3220 is not set ++CONFIG_VIDEO_SMIA_SENSOR=m ++ ++# ++# Video and audio decoders ++# ++# CONFIG_VIDEO_CX25840 is not set ++ ++# ++# MPEG video encoders ++# ++# CONFIG_VIDEO_CX2341X is not set ++ ++# ++# Video encoders ++# ++# CONFIG_VIDEO_SAA7127 is not set ++# CONFIG_VIDEO_SAA7185 is not set ++# CONFIG_VIDEO_ADV7170 is not set ++# CONFIG_VIDEO_ADV7175 is not set ++ ++# ++# Video improvement chips ++# ++# CONFIG_VIDEO_UPD64031A is not set ++# CONFIG_VIDEO_UPD64083 is not set ++# CONFIG_VIDEO_VIVI is not set ++# CONFIG_VIDEO_CPIA is not set ++# CONFIG_VIDEO_CPIA2 is not set ++# CONFIG_VIDEO_SAA5246A is not set ++# CONFIG_VIDEO_SAA5249 is not set ++CONFIG_VIDEO_OMAP3=m ++CONFIG_VIDEO_SMIAREGS=m ++# CONFIG_SOC_CAMERA is not set ++CONFIG_V4L_USB_DRIVERS=y ++# CONFIG_USB_VIDEO_CLASS is not set ++# CONFIG_USB_GSPCA is not set ++# CONFIG_VIDEO_PVRUSB2 is not set ++# CONFIG_VIDEO_EM28XX is not set ++# CONFIG_VIDEO_USBVISION is not set ++# CONFIG_USB_VICAM is not set ++# CONFIG_USB_IBMCAM is not set ++# CONFIG_USB_KONICAWC is not set ++# CONFIG_USB_QUICKCAM_MESSENGER is not set ++# CONFIG_USB_ET61X251 is not set ++# CONFIG_VIDEO_OVCAMCHIP is not set ++# CONFIG_USB_OV511 is not set ++# CONFIG_USB_SE401 is not set ++# CONFIG_USB_SN9C102 is not set ++# CONFIG_USB_STV680 is not set ++# CONFIG_USB_ZC0301 is not set ++# CONFIG_USB_PWC is not set ++# CONFIG_USB_ZR364XX is not set ++# CONFIG_USB_STKWEBCAM is not set ++# CONFIG_USB_S2255 is not set ++CONFIG_RADIO_ADAPTERS=y ++CONFIG_I2C_SI4713=m ++# CONFIG_USB_DSBR is not set ++# CONFIG_USB_SI470X is not set ++CONFIG_I2C_BCM2048=m ++# CONFIG_USB_MR800 is not set ++# CONFIG_DAB is not set ++ ++# ++# Graphics support ++# ++CONFIG_PVR=m ++# CONFIG_DRM_VER_ORIG is not set ++# CONFIG_DRM_VER_TUNGSTEN is not set ++# CONFIG_DRM_TUNGSTEN is not set ++# CONFIG_VGASTATE is not set ++# CONFIG_VIDEO_OUTPUT_CONTROL is not set ++CONFIG_FB=y ++# CONFIG_FIRMWARE_EDID is not set ++# CONFIG_FB_DDC is not set ++# CONFIG_FB_BOOT_VESA_SUPPORT is not set ++CONFIG_FB_CFB_FILLRECT=y ++CONFIG_FB_CFB_COPYAREA=y ++CONFIG_FB_CFB_IMAGEBLIT=y ++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set ++# CONFIG_FB_SYS_FILLRECT is not set ++# CONFIG_FB_SYS_COPYAREA is not set ++# CONFIG_FB_SYS_IMAGEBLIT is not set ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++# CONFIG_FB_SYS_FOPS is not set ++# CONFIG_FB_SVGALIB is not set ++# CONFIG_FB_MACMODES is not set ++# CONFIG_FB_BACKLIGHT is not set ++# CONFIG_FB_MODE_HELPERS is not set ++# CONFIG_FB_TILEBLITTING is not set ++ ++# ++# Frame buffer hardware drivers ++# ++# CONFIG_FB_UVESA is not set ++# CONFIG_FB_S1D13XXX is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_MB862XX is not set ++CONFIG_FB_OMAP_BOOTLOADER_INIT=y ++CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=14 ++CONFIG_OMAP2_DSS=y ++CONFIG_OMAP2_DSS_VRAM_SIZE=0 ++# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set ++# CONFIG_OMAP2_DSS_RFBI is not set ++CONFIG_OMAP2_DSS_VENC=y ++CONFIG_OMAP2_DSS_SDI=y ++# CONFIG_OMAP2_DSS_DSI is not set ++# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set ++CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0 ++ ++# ++# OMAP2/3 Display Device Drivers ++# ++# CONFIG_PANEL_NEVADA is not set ++CONFIG_PANEL_ACX565AKM=y ++# CONFIG_PANEL_GENERIC is not set ++# CONFIG_PANEL_SAMSUNG_LTE430WQ_F0C is not set ++# CONFIG_PANEL_SHARP_LS037V7DW01 is not set ++CONFIG_FB_OMAP2=y ++# CONFIG_FB_OMAP2_DEBUG_SUPPORT is not set ++# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set ++CONFIG_FB_OMAP2_NUM_FBS=3 ++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++# CONFIG_BACKLIGHT_CORGI is not set ++ ++# ++# Display device support ++# ++CONFIG_DISPLAY_SUPPORT=y ++ ++# ++# Display hardware drivers ++# ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++# CONFIG_FRAMEBUFFER_CONSOLE is not set ++# CONFIG_LOGO is not set ++CONFIG_SOUND=y ++# CONFIG_SOUND_OSS_CORE is not set ++CONFIG_SND=y ++CONFIG_SND_TIMER=y ++CONFIG_SND_PCM=y ++CONFIG_SND_JACK=y ++# CONFIG_SND_SEQUENCER is not set ++# CONFIG_SND_MIXER_OSS is not set ++# CONFIG_SND_PCM_OSS is not set ++# CONFIG_SND_DYNAMIC_MINORS is not set ++CONFIG_SND_SUPPORT_OLD_API=y ++CONFIG_SND_VERBOSE_PROCFS=y ++# CONFIG_SND_VERBOSE_PRINTK is not set ++# CONFIG_SND_DEBUG is not set ++CONFIG_SND_DRIVERS=y ++# CONFIG_SND_DUMMY is not set ++# CONFIG_SND_MTPAV is not set ++# CONFIG_SND_SERIAL_U16550 is not set ++# CONFIG_SND_MPU401 is not set ++CONFIG_SND_ARM=y ++CONFIG_SND_SPI=y ++# CONFIG_SND_USB is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_OMAP_SOC=y ++CONFIG_SND_OMAP_SOC_MCBSP=y ++CONFIG_SND_OMAP_SOC_RX51=y ++# CONFIG_SND_SOC_ALL_CODECS is not set ++CONFIG_SND_SOC_TLV320AIC3X=y ++# CONFIG_SOUND_PRIME is not set ++CONFIG_HID_SUPPORT=y ++CONFIG_HID=m ++# CONFIG_HID_DEBUG is not set ++# CONFIG_HIDRAW is not set ++ ++# ++# USB Input Devices ++# ++CONFIG_USB_HID=m ++# CONFIG_HID_PID is not set ++# CONFIG_USB_HIDDEV is not set ++ ++# ++# USB HID Boot Protocol drivers ++# ++# CONFIG_USB_KBD is not set ++# CONFIG_USB_MOUSE is not set ++ ++# ++# Special HID drivers ++# ++# CONFIG_HID_COMPAT is not set ++# CONFIG_HID_A4TECH is not set ++# CONFIG_HID_APPLE is not set ++# CONFIG_HID_BELKIN is not set ++# CONFIG_HID_BRIGHT is not set ++# CONFIG_HID_CHERRY is not set ++# CONFIG_HID_CHICONY is not set ++# CONFIG_HID_CYPRESS is not set ++# CONFIG_HID_DELL is not set ++# CONFIG_HID_EZKEY is not set ++# CONFIG_HID_GYRATION is not set ++# CONFIG_HID_LOGITECH is not set ++# CONFIG_HID_MICROSOFT is not set ++# CONFIG_HID_MONTEREY is not set ++# CONFIG_HID_PANTHERLORD is not set ++# CONFIG_HID_PETALYNX is not set ++# CONFIG_HID_SAMSUNG is not set ++# CONFIG_HID_SONY is not set ++# CONFIG_HID_SUNPLUS is not set ++# CONFIG_THRUSTMASTER_FF is not set ++# CONFIG_ZEROPLUS_FF is not set ++CONFIG_USB_SUPPORT=y ++CONFIG_USB_ARCH_HAS_HCD=y ++CONFIG_USB_ARCH_HAS_OHCI=y ++CONFIG_USB_ARCH_HAS_EHCI=y ++CONFIG_USB=y ++# CONFIG_USB_DEBUG is not set ++# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set ++ ++# ++# Miscellaneous USB options ++# ++CONFIG_USB_DEVICEFS=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_DYNAMIC_MINORS=y ++CONFIG_USB_SUSPEND=y ++CONFIG_USB_OTG=y ++CONFIG_USB_OTG_WHITELIST=y ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++# CONFIG_USB_WUSB is not set ++# CONFIG_USB_WUSB_CBAF is not set ++ ++# ++# USB Host Controller Drivers ++# ++# CONFIG_USB_C67X00_HCD is not set ++# CONFIG_USB_EHCI_HCD is not set ++# CONFIG_USB_ISP116X_HCD is not set ++# CONFIG_USB_OHCI_HCD is not set ++# CONFIG_USB_SL811_HCD is not set ++# CONFIG_USB_R8A66597_HCD is not set ++# CONFIG_USB_HWA_HCD is not set ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_USB_MUSB_SOC=y ++ ++# ++# OMAP 343x high speed USB support ++# ++# CONFIG_USB_MUSB_HOST is not set ++# CONFIG_USB_MUSB_PERIPHERAL is not set ++CONFIG_USB_MUSB_OTG=y ++CONFIG_USB_GADGET_MUSB_HDRC=y ++CONFIG_USB_MUSB_HDRC_HCD=y ++# CONFIG_MUSB_PIO_ONLY is not set ++CONFIG_USB_INVENTRA_DMA=y ++# CONFIG_USB_TI_CPPI_DMA is not set ++CONFIG_USB_MUSB_DEBUG=y ++CONFIG_MUSB_PROC_FS=y ++ ++# ++# USB Device Class drivers ++# ++# CONFIG_USB_ACM is not set ++# CONFIG_USB_PRINTER is not set ++# CONFIG_USB_WDM is not set ++# CONFIG_USB_TMC is not set ++ ++# ++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; ++# ++ ++# ++# see USB_STORAGE Help for more information ++# ++CONFIG_USB_STORAGE=m ++# CONFIG_USB_STORAGE_DEBUG is not set ++# CONFIG_USB_STORAGE_DATAFAB is not set ++# CONFIG_USB_STORAGE_FREECOM is not set ++# CONFIG_USB_STORAGE_ISD200 is not set ++# CONFIG_USB_STORAGE_DPCM is not set ++# CONFIG_USB_STORAGE_USBAT is not set ++# CONFIG_USB_STORAGE_SDDR09 is not set ++# CONFIG_USB_STORAGE_SDDR55 is not set ++# CONFIG_USB_STORAGE_JUMPSHOT is not set ++# CONFIG_USB_STORAGE_ALAUDA is not set ++# CONFIG_USB_STORAGE_ONETOUCH is not set ++# CONFIG_USB_STORAGE_KARMA is not set ++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set ++CONFIG_USB_LIBUSUAL=y ++ ++# ++# USB Imaging devices ++# ++# CONFIG_USB_MDC800 is not set ++# CONFIG_USB_MICROTEK is not set ++ ++# ++# USB port drivers ++# ++# CONFIG_USB_SERIAL is not set ++ ++# ++# USB Miscellaneous drivers ++# ++# CONFIG_USB_EMI62 is not set ++# CONFIG_USB_EMI26 is not set ++# CONFIG_USB_ADUTUX is not set ++# CONFIG_USB_SEVSEG is not set ++# CONFIG_USB_RIO500 is not set ++# CONFIG_USB_LEGOTOWER is not set ++# CONFIG_USB_LCD is not set ++# CONFIG_USB_BERRY_CHARGE is not set ++# CONFIG_USB_LED is not set ++# CONFIG_USB_CYPRESS_CY7C63 is not set ++# CONFIG_USB_CYTHERM is not set ++# CONFIG_USB_PHIDGET is not set ++# CONFIG_USB_IDMOUSE is not set ++# CONFIG_USB_FTDI_ELAN is not set ++# CONFIG_USB_APPLEDISPLAY is not set ++# CONFIG_USB_LD is not set ++# CONFIG_USB_TRANCEVIBRATOR is not set ++# CONFIG_USB_IOWARRIOR is not set ++CONFIG_USB_TEST=m ++# CONFIG_USB_ISIGHTFW is not set ++# CONFIG_USB_VST is not set ++CONFIG_USB_GADGET=m ++# CONFIG_USB_GADGET_DEBUG is not set ++# CONFIG_USB_GADGET_DEBUG_FILES is not set ++# CONFIG_USB_GADGET_DEBUG_FS is not set ++CONFIG_USB_GADGET_VBUS_DRAW=2 ++CONFIG_USB_GADGET_SELECTED=y ++# CONFIG_USB_GADGET_AT91 is not set ++# CONFIG_USB_GADGET_ATMEL_USBA is not set ++# CONFIG_USB_GADGET_FSL_USB2 is not set ++# CONFIG_USB_GADGET_LH7A40X is not set ++# CONFIG_USB_GADGET_OMAP is not set ++# CONFIG_USB_GADGET_PXA25X is not set ++# CONFIG_USB_GADGET_PXA27X is not set ++# CONFIG_USB_GADGET_S3C2410 is not set ++# CONFIG_USB_GADGET_M66592 is not set ++# CONFIG_USB_GADGET_AMD5536UDC is not set ++# CONFIG_USB_GADGET_FSL_QE is not set ++# CONFIG_USB_GADGET_NET2280 is not set ++# CONFIG_USB_GADGET_GOKU is not set ++# CONFIG_USB_GADGET_DUMMY_HCD is not set ++CONFIG_USB_GADGET_DUALSPEED=y ++CONFIG_USB_ZERO=m ++# CONFIG_USB_ZERO_HNPTEST is not set ++# CONFIG_USB_ETH is not set ++# CONFIG_USB_GADGETFS is not set ++CONFIG_USB_FILE_STORAGE=m ++CONFIG_USB_FILE_STORAGE_TEST=y ++# CONFIG_USB_G_SERIAL is not set ++# CONFIG_USB_MIDI_GADGET is not set ++# CONFIG_USB_G_PRINTER is not set ++# CONFIG_USB_CDC_COMPOSITE is not set ++CONFIG_USB_G_NOKIA=m ++CONFIG_USB_G_SOFTUPD=m ++ ++# ++# OTG and related infrastructure ++# ++CONFIG_USB_OTG_UTILS=y ++# CONFIG_USB_GPIO_VBUS is not set ++# CONFIG_ISP1301_OMAP is not set ++CONFIG_TWL4030_USB=y ++CONFIG_MMC=m ++# CONFIG_MMC_DEBUG is not set ++CONFIG_MMC_UNSAFE_RESUME=y ++ ++# ++# MMC/SD/SDIO Card Drivers ++# ++CONFIG_MMC_BLOCK=m ++# CONFIG_MMC_BLOCK_BOUNCE is not set ++# CONFIG_SDIO_UART is not set ++# CONFIG_MMC_TEST is not set ++ ++# ++# MMC/SD/SDIO Host Controller Drivers ++# ++# CONFIG_MMC_SDHCI is not set ++CONFIG_MMC_OMAP_HS=m ++# CONFIG_MMC_SPI is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=m ++ ++# ++# LED drivers ++# ++# CONFIG_LEDS_OMAP is not set ++# CONFIG_LEDS_OMAP_PWM is not set ++# CONFIG_LEDS_PCA9532 is not set ++# CONFIG_LEDS_GPIO is not set ++# CONFIG_LEDS_PCA955X is not set ++CONFIG_LEDS_TWL4030_VIBRA=m ++CONFIG_LEDS_LP5523=m ++ ++# ++# LED Triggers ++# ++CONFIG_LEDS_TRIGGERS=y ++CONFIG_LEDS_TRIGGER_TIMER=y ++CONFIG_LEDS_TRIGGER_HEARTBEAT=y ++# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set ++# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set ++CONFIG_RTC_LIB=y ++CONFIG_RTC_CLASS=m ++ ++# ++# RTC interfaces ++# ++CONFIG_RTC_INTF_SYSFS=y ++CONFIG_RTC_INTF_PROC=y ++CONFIG_RTC_INTF_DEV=y ++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set ++# CONFIG_RTC_DRV_TEST is not set ++ ++# ++# I2C RTC drivers ++# ++# CONFIG_RTC_DRV_DS1307 is not set ++# CONFIG_RTC_DRV_DS1374 is not set ++# CONFIG_RTC_DRV_DS1672 is not set ++# CONFIG_RTC_DRV_MAX6900 is not set ++# CONFIG_RTC_DRV_RS5C372 is not set ++# CONFIG_RTC_DRV_ISL1208 is not set ++# CONFIG_RTC_DRV_X1205 is not set ++# CONFIG_RTC_DRV_PCF8563 is not set ++# CONFIG_RTC_DRV_PCF8583 is not set ++# CONFIG_RTC_DRV_M41T80 is not set ++CONFIG_RTC_DRV_TWL4030=m ++# CONFIG_RTC_DRV_S35390A is not set ++# CONFIG_RTC_DRV_FM3130 is not set ++# CONFIG_RTC_DRV_RX8581 is not set ++ ++# ++# SPI RTC drivers ++# ++# CONFIG_RTC_DRV_M41T94 is not set ++# CONFIG_RTC_DRV_DS1305 is not set ++# CONFIG_RTC_DRV_DS1390 is not set ++# CONFIG_RTC_DRV_MAX6902 is not set ++# CONFIG_RTC_DRV_R9701 is not set ++# CONFIG_RTC_DRV_RS5C348 is not set ++# CONFIG_RTC_DRV_DS3234 is not set ++ ++# ++# Platform RTC drivers ++# ++# CONFIG_RTC_DRV_CMOS is not set ++# CONFIG_RTC_DRV_DS1286 is not set ++# CONFIG_RTC_DRV_DS1511 is not set ++# CONFIG_RTC_DRV_DS1553 is not set ++# CONFIG_RTC_DRV_DS1742 is not set ++# CONFIG_RTC_DRV_STK17TA8 is not set ++# CONFIG_RTC_DRV_M48T86 is not set ++# CONFIG_RTC_DRV_M48T35 is not set ++# CONFIG_RTC_DRV_M48T59 is not set ++# CONFIG_RTC_DRV_BQ4802 is not set ++# CONFIG_RTC_DRV_V3020 is not set ++ ++# ++# on-CPU RTC drivers ++# ++# CONFIG_DMADEVICES is not set ++CONFIG_REGULATOR=y ++# CONFIG_REGULATOR_DEBUG is not set ++# CONFIG_REGULATOR_FIXED_VOLTAGE is not set ++# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set ++# CONFIG_REGULATOR_BQ24022 is not set ++CONFIG_REGULATOR_TWL4030=y ++# CONFIG_UIO is not set ++ ++# ++# CBUS support ++# ++# CONFIG_CBUS is not set ++CONFIG_MPU_BRIDGE=m ++CONFIG_BRIDGE_DVFS=y ++CONFIG_BRIDGE_MEMPOOL_SIZE=0x412800 ++# CONFIG_BRIDGE_DEBUG is not set ++ ++# ++# File systems ++# ++CONFIG_EXT2_FS=m ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XIP is not set ++CONFIG_EXT3_FS=m ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT4_FS is not set ++CONFIG_JBD=m ++# CONFIG_JBD_DEBUG is not set ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_FS_POSIX_ACL is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_XFS_FS is not set ++# CONFIG_OCFS2_FS is not set ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY=y ++CONFIG_INOTIFY_USER=y ++CONFIG_QUOTA=y ++# CONFIG_QUOTA_NETLINK_INTERFACE is not set ++CONFIG_PRINT_QUOTA_WARNING=y ++# CONFIG_QFMT_V1 is not set ++CONFIG_QFMT_V2=y ++CONFIG_QUOTACTL=y ++# CONFIG_AUTOFS_FS is not set ++# CONFIG_AUTOFS4_FS is not set ++CONFIG_FUSE_FS=m ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++ ++# ++# DOS/FAT/NT Filesystems ++# ++CONFIG_FAT_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_CODEPAGE=437 ++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" ++# CONFIG_NTFS_FS is not set ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++CONFIG_PROC_SYSCTL=y ++CONFIG_PROC_PAGE_MONITOR=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++# CONFIG_TMPFS_POSIX_ACL is not set ++# CONFIG_HUGETLB_PAGE is not set ++# CONFIG_CONFIGFS_FS is not set ++ ++# ++# Miscellaneous filesystems ++# ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_JFFS2_FS is not set ++CONFIG_UBIFS_FS=y ++# CONFIG_UBIFS_FS_XATTR is not set ++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set ++CONFIG_UBIFS_FS_LZO=y ++CONFIG_UBIFS_FS_ZLIB=y ++# CONFIG_UBIFS_FS_DEBUG is not set ++CONFIG_CRAMFS=y ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++# CONFIG_NFS_V3_ACL is not set ++CONFIG_NFS_V4=y ++# CONFIG_NFSD is not set ++CONFIG_LOCKD=m ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=m ++CONFIG_SUNRPC_GSS=m ++# CONFIG_SUNRPC_REGISTER_V4 is not set ++CONFIG_RPCSEC_GSS_KRB5=m ++# CONFIG_RPCSEC_GSS_SPKM3 is not set ++# CONFIG_SMB_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_NCP_FS is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++# CONFIG_EFI_PARTITION is not set ++# CONFIG_SYSV68_PARTITION is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++# CONFIG_NLS_ASCII is not set ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_UTF8 is not set ++# CONFIG_DLM is not set ++ ++# ++# Kernel hacking ++# ++CONFIG_PRINTK_TIME=y ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++CONFIG_MAGIC_SYSRQ=y ++# CONFIG_UNUSED_SYMBOLS is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_CHECK is not set ++CONFIG_DEBUG_KERNEL=y ++# CONFIG_DEBUG_SHIRQ is not set ++# CONFIG_DETECT_SOFTLOCKUP is not set ++# CONFIG_SCHED_DEBUG is not set ++# CONFIG_SCHEDSTATS is not set ++CONFIG_TIMER_STATS=y ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_SLUB_DEBUG_ON is not set ++# CONFIG_SLUB_STATS is not set ++# CONFIG_DEBUG_PREEMPT is not set ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_RT_MUTEX_TESTER is not set ++# CONFIG_DEBUG_SPINLOCK is not set ++# CONFIG_DEBUG_MUTEXES is not set ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++# CONFIG_DEBUG_KOBJECT is not set ++CONFIG_DEBUG_BUGVERBOSE=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_DEBUG_VM is not set ++# CONFIG_DEBUG_WRITECOUNT is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++# CONFIG_DEBUG_LIST is not set ++# CONFIG_DEBUG_SG is not set ++CONFIG_FRAME_POINTER=y ++# CONFIG_BOOT_PRINTK_DELAY is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_KPROBES_SANITY_TEST is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set ++# CONFIG_LKDTM is not set ++# CONFIG_FAULT_INJECTION is not set ++# CONFIG_LATENCYTOP is not set ++# CONFIG_SYSCTL_SYSCALL_CHECK is not set ++CONFIG_HAVE_FUNCTION_TRACER=y ++ ++# ++# Tracers ++# ++# CONFIG_FUNCTION_TRACER is not set ++# CONFIG_IRQSOFF_TRACER is not set ++# CONFIG_PREEMPT_TRACER is not set ++# CONFIG_SCHED_TRACER is not set ++# CONFIG_CONTEXT_SWITCH_TRACER is not set ++# CONFIG_BOOT_TRACER is not set ++# CONFIG_STACK_TRACER is not set ++# CONFIG_DYNAMIC_PRINTK_DEBUG is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++# CONFIG_DEBUG_USER is not set ++# CONFIG_DEBUG_ERRORS is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++# CONFIG_DEBUG_LL is not set ++ ++# ++# Security options ++# ++# CONFIG_KEYS is not set ++CONFIG_SECURITY=y ++# CONFIG_SECURITYFS is not set ++# CONFIG_SECURITY_NETWORK is not set ++# CONFIG_SECURITY_FILE_CAPABILITIES is not set ++# CONFIG_SECURITY_ROOTPLUG is not set ++CONFIG_SECURITY_LOWMEM=y ++CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=4096 ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++# CONFIG_CRYPTO_FIPS is not set ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_ALGAPI2=y ++CONFIG_CRYPTO_AEAD2=y ++CONFIG_CRYPTO_BLKCIPHER=y ++CONFIG_CRYPTO_BLKCIPHER2=y ++CONFIG_CRYPTO_HASH2=y ++CONFIG_CRYPTO_RNG2=y ++CONFIG_CRYPTO_MANAGER=y ++CONFIG_CRYPTO_MANAGER2=y ++# CONFIG_CRYPTO_GF128MUL is not set ++# CONFIG_CRYPTO_NULL is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++# CONFIG_CRYPTO_CCM is not set ++# CONFIG_CRYPTO_GCM is not set ++# CONFIG_CRYPTO_SEQIV is not set ++ ++# ++# Block modes ++# ++CONFIG_CRYPTO_CBC=y ++# CONFIG_CRYPTO_CTR is not set ++# CONFIG_CRYPTO_CTS is not set ++CONFIG_CRYPTO_ECB=y ++# CONFIG_CRYPTO_LRW is not set ++CONFIG_CRYPTO_PCBC=m ++# CONFIG_CRYPTO_XTS is not set ++ ++# ++# Hash modes ++# ++# CONFIG_CRYPTO_HMAC is not set ++# CONFIG_CRYPTO_XCBC is not set ++ ++# ++# Digest ++# ++# CONFIG_CRYPTO_CRC32C is not set ++# CONFIG_CRYPTO_MD4 is not set ++CONFIG_CRYPTO_MD5=y ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++# CONFIG_CRYPTO_SHA256 is not set ++# CONFIG_CRYPTO_SHA512 is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++ ++# ++# Ciphers ++# ++CONFIG_CRYPTO_AES=y ++# CONFIG_CRYPTO_ANUBIS is not set ++CONFIG_CRYPTO_ARC4=y ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++CONFIG_CRYPTO_DES=y ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_TEA is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++ ++# ++# Compression ++# ++CONFIG_CRYPTO_DEFLATE=y ++CONFIG_CRYPTO_LZO=y ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_HW=y ++ ++# ++# Library routines ++# ++CONFIG_BITREVERSE=y ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC_ITU_T is not set ++CONFIG_CRC32=y ++CONFIG_CRC7=m ++CONFIG_LIBCRC32C=y ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_LZO_COMPRESS=y ++CONFIG_LZO_DECOMPRESS=y ++CONFIG_PLIST=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT=y ++CONFIG_HAS_DMA=y +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/configs/rx51_tiny_defconfig kernel-2.6.28-20093908+0m5/arch/arm/configs/rx51_tiny_defconfig +--- linux-omap-2.6.28-omap1/arch/arm/configs/rx51_tiny_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/configs/rx51_tiny_defconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1877 @@ ++# ++# Automatically generated make config: don't edit ++# Linux kernel version: 2.6.28-rc9-omap1 ++# Sun Dec 21 19:26:10 2008 ++# ++CONFIG_ARM=y ++CONFIG_SYS_SUPPORTS_APM_EMULATION=y ++CONFIG_GENERIC_GPIO=y ++CONFIG_GENERIC_TIME=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_MMU=y ++# CONFIG_NO_IOPORT is not set ++CONFIG_GENERIC_HARDIRQS=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_HAVE_LATENCYTOP_SUPPORT=y ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_HARDIRQS_SW_RESEND=y ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_RWSEM_GENERIC_SPINLOCK=y ++# CONFIG_ARCH_HAS_ILOG2_U32 is not set ++# CONFIG_ARCH_HAS_ILOG2_U64 is not set ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y ++CONFIG_VECTORS_BASE=0xffff0000 ++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" ++ ++# ++# General setup ++# ++CONFIG_EXPERIMENTAL=y ++CONFIG_BROKEN_ON_SMP=y ++CONFIG_LOCK_KERNEL=y ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++CONFIG_LOCALVERSION="" ++CONFIG_LOCALVERSION_AUTO=y ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_BSD_PROCESS_ACCT=y ++# CONFIG_BSD_PROCESS_ACCT_V3 is not set ++# CONFIG_TASKSTATS is not set ++# CONFIG_AUDIT is not set ++# CONFIG_IKCONFIG is not set ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_CGROUPS is not set ++CONFIG_GROUP_SCHED=y ++CONFIG_FAIR_GROUP_SCHED=y ++# CONFIG_RT_GROUP_SCHED is not set ++CONFIG_USER_SCHED=y ++# CONFIG_CGROUP_SCHED is not set ++CONFIG_SYSFS_DEPRECATED=y ++CONFIG_SYSFS_DEPRECATED_V2=y ++# CONFIG_RELAY is not set ++# CONFIG_NAMESPACES is not set ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL=y ++CONFIG_EMBEDDED=y ++CONFIG_UID16=y ++# CONFIG_SYSCTL_SYSCALL is not set ++CONFIG_KALLSYMS=y ++CONFIG_KALLSYMS_ALL=y ++CONFIG_KALLSYMS_EXTRA_PASS=y ++CONFIG_HOTPLUG=y ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++CONFIG_COMPAT_BRK=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_ANON_INODES=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++CONFIG_VM_EVENT_COUNTERS=y ++CONFIG_SLAB=y ++# CONFIG_SLUB is not set ++# CONFIG_SLOB is not set ++# CONFIG_PROFILING is not set ++# CONFIG_MARKERS is not set ++CONFIG_HAVE_OPROFILE=y ++CONFIG_KPROBES=y ++CONFIG_KRETPROBES=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++CONFIG_HAVE_CLK=y ++CONFIG_HAVE_GENERIC_DMA_COHERENT=y ++CONFIG_SLABINFO=y ++CONFIG_RT_MUTEXES=y ++# CONFIG_TINY_SHMEM is not set ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_MODULE_SRCVERSION_ALL=y ++CONFIG_KMOD=y ++CONFIG_BLOCK=y ++# CONFIG_LBD is not set ++# CONFIG_BLK_DEV_IO_TRACE is not set ++# CONFIG_LSF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++ ++# ++# IO Schedulers ++# ++CONFIG_IOSCHED_NOOP=y ++# CONFIG_IOSCHED_AS is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_IOSCHED_CFQ=y ++# CONFIG_DEFAULT_AS is not set ++# CONFIG_DEFAULT_DEADLINE is not set ++CONFIG_DEFAULT_CFQ=y ++# CONFIG_DEFAULT_NOOP is not set ++CONFIG_DEFAULT_IOSCHED="cfq" ++CONFIG_CLASSIC_RCU=y ++CONFIG_FREEZER=y ++ ++# ++# System Type ++# ++# CONFIG_ARCH_AAEC2000 is not set ++# CONFIG_ARCH_INTEGRATOR is not set ++# CONFIG_ARCH_REALVIEW is not set ++# CONFIG_ARCH_VERSATILE is not set ++# CONFIG_ARCH_AT91 is not set ++# CONFIG_ARCH_CLPS7500 is not set ++# CONFIG_ARCH_CLPS711X is not set ++# CONFIG_ARCH_EBSA110 is not set ++# CONFIG_ARCH_EP93XX is not set ++# CONFIG_ARCH_FOOTBRIDGE is not set ++# CONFIG_ARCH_NETX is not set ++# CONFIG_ARCH_H720X is not set ++# CONFIG_ARCH_IMX is not set ++# CONFIG_ARCH_IOP13XX is not set ++# CONFIG_ARCH_IOP32X is not set ++# CONFIG_ARCH_IOP33X is not set ++# CONFIG_ARCH_IXP23XX is not set ++# CONFIG_ARCH_IXP2000 is not set ++# CONFIG_ARCH_IXP4XX is not set ++# CONFIG_ARCH_L7200 is not set ++# CONFIG_ARCH_KIRKWOOD is not set ++# CONFIG_ARCH_KS8695 is not set ++# CONFIG_ARCH_NS9XXX is not set ++# CONFIG_ARCH_LOKI is not set ++# CONFIG_ARCH_MV78XX0 is not set ++# CONFIG_ARCH_MXC is not set ++# CONFIG_ARCH_ORION5X is not set ++# CONFIG_ARCH_PNX4008 is not set ++# CONFIG_ARCH_PXA is not set ++# CONFIG_ARCH_RPC is not set ++# CONFIG_ARCH_SA1100 is not set ++# CONFIG_ARCH_S3C2410 is not set ++# CONFIG_ARCH_SHARK is not set ++# CONFIG_ARCH_LH7A40X is not set ++# CONFIG_ARCH_DAVINCI is not set ++CONFIG_ARCH_OMAP=y ++# CONFIG_ARCH_MSM is not set ++ ++# ++# TI OMAP Implementations ++# ++CONFIG_ARCH_OMAP_OTG=y ++# CONFIG_ARCH_OMAP1 is not set ++# CONFIG_ARCH_OMAP2 is not set ++CONFIG_ARCH_OMAP3=y ++ ++# ++# OMAP Feature Selections ++# ++# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set ++# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set ++CONFIG_OMAP_SMARTREFLEX=y ++# CONFIG_OMAP_SMARTREFLEX_TESTING is not set ++CONFIG_OMAP_RESET_CLOCKS=y ++CONFIG_OMAP_BOOT_TAG=y ++CONFIG_OMAP_BOOT_REASON=y ++CONFIG_OMAP_COMPONENT_VERSION=y ++CONFIG_OMAP_GPIO_SWITCH=y ++CONFIG_OMAP_MUX=y ++CONFIG_OMAP_MUX_DEBUG=y ++CONFIG_OMAP_MUX_WARNINGS=y ++CONFIG_OMAP_MCBSP=y ++# CONFIG_OMAP_MMU_FWK is not set ++# CONFIG_OMAP_MBOX_FWK is not set ++# CONFIG_OMAP_MPU_TIMER is not set ++CONFIG_OMAP_32K_TIMER=y ++# CONFIG_OMAP3_DEBOBS is not set ++CONFIG_OMAP_32K_TIMER_HZ=128 ++CONFIG_OMAP_TICK_GPTIMER=1 ++CONFIG_OMAP_DM_TIMER=y ++# CONFIG_OMAP_LL_DEBUG_UART1 is not set ++# CONFIG_OMAP_LL_DEBUG_UART2 is not set ++CONFIG_OMAP_LL_DEBUG_UART3=y ++CONFIG_OMAP_SERIAL_WAKE=y ++# CONFIG_OMAP_PM_NONE is not set ++# CONFIG_OMAP_PM_NOOP is not set ++CONFIG_OMAP_PM_SRF=y ++CONFIG_ARCH_OMAP34XX=y ++CONFIG_ARCH_OMAP3430=y ++ ++# ++# OMAP Board Type ++# ++CONFIG_MACH_NOKIA_RX51=y ++# CONFIG_VIDEO_MACH_RX51 is not set ++# CONFIG_VIDEO_MACH_RX51_OLD_I2C is not set ++CONFIG_MACH_NOKIA_RX71=y ++# CONFIG_MACH_OMAP_LDP is not set ++# CONFIG_MACH_OMAP_3430SDP is not set ++# CONFIG_MACH_OMAP3EVM is not set ++# CONFIG_MACH_OMAP3_BEAGLE is not set ++# CONFIG_MACH_OVERO is not set ++# CONFIG_MACH_OMAP3_PANDORA is not set ++ ++# ++# Boot options ++# ++ ++# ++# Power management ++# ++ ++# ++# Processor Type ++# ++CONFIG_CPU_32=y ++CONFIG_CPU_32v6K=y ++CONFIG_CPU_V7=y ++CONFIG_CPU_32v7=y ++CONFIG_CPU_ABRT_EV7=y ++CONFIG_CPU_PABRT_IFAR=y ++CONFIG_CPU_CACHE_V7=y ++CONFIG_CPU_CACHE_VIPT=y ++CONFIG_CPU_COPY_V6=y ++CONFIG_CPU_TLB_V7=y ++CONFIG_CPU_HAS_ASID=y ++CONFIG_CPU_CP15=y ++CONFIG_CPU_CP15_MMU=y ++ ++# ++# Processor Features ++# ++CONFIG_ARM_THUMB=y ++# CONFIG_ARM_THUMBEE is not set ++# CONFIG_CPU_ICACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_DISABLE is not set ++# CONFIG_CPU_BPREDICT_DISABLE is not set ++CONFIG_HAS_TLS_REG=y ++# CONFIG_OUTER_CACHE is not set ++ ++# ++# Bus support ++# ++# CONFIG_PCI_SYSCALL is not set ++# CONFIG_ARCH_SUPPORTS_MSI is not set ++# CONFIG_PCCARD is not set ++ ++# ++# Kernel Features ++# ++CONFIG_TICK_ONESHOT=y ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y ++CONFIG_VMSPLIT_3G=y ++# CONFIG_VMSPLIT_2G is not set ++# CONFIG_VMSPLIT_1G is not set ++CONFIG_PAGE_OFFSET=0xC0000000 ++CONFIG_PREEMPT=y ++CONFIG_HZ=128 ++CONFIG_AEABI=y ++# CONFIG_OABI_COMPAT is not set ++CONFIG_ARCH_FLATMEM_HAS_HOLES=y ++# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set ++# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_FLATMEM_MANUAL=y ++# CONFIG_DISCONTIGMEM_MANUAL is not set ++# CONFIG_SPARSEMEM_MANUAL is not set ++CONFIG_FLATMEM=y ++CONFIG_FLAT_NODE_MEM_MAP=y ++CONFIG_PAGEFLAGS_EXTENDED=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++# CONFIG_RESOURCES_64BIT is not set ++# CONFIG_PHYS_ADDR_T_64BIT is not set ++CONFIG_ZONE_DMA_FLAG=0 ++CONFIG_VIRT_TO_BUS=y ++CONFIG_UNEVICTABLE_LRU=y ++# CONFIG_LEDS is not set ++CONFIG_ALIGNMENT_TRAP=y ++ ++# ++# Boot options ++# ++CONFIG_ZBOOT_ROM_TEXT=0x0 ++CONFIG_ZBOOT_ROM_BSS=0x0 ++CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log" ++# CONFIG_XIP_KERNEL is not set ++CONFIG_KEXEC=y ++CONFIG_ATAGS_PROC=y ++ ++# ++# CPU Power Management ++# ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_TABLE=y ++# CONFIG_CPU_FREQ_DEBUG is not set ++CONFIG_CPU_FREQ_STAT=y ++# CONFIG_CPU_FREQ_STAT_DETAILS is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set ++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set ++# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set ++CONFIG_CPU_FREQ_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set ++CONFIG_CPU_IDLE=y ++CONFIG_CPU_IDLE_GOV_LADDER=y ++CONFIG_CPU_IDLE_GOV_MENU=y ++ ++# ++# Floating point emulation ++# ++ ++# ++# At least one emulation must be selected ++# ++CONFIG_VFP=y ++CONFIG_VFPv3=y ++CONFIG_NEON=y ++ ++# ++# Userspace binary formats ++# ++CONFIG_BINFMT_ELF=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_HAVE_AOUT=y ++# CONFIG_BINFMT_AOUT is not set ++CONFIG_BINFMT_MISC=y ++ ++# ++# Power management options ++# ++CONFIG_PM=y ++CONFIG_PM_DEBUG=y ++# CONFIG_PM_VERBOSE is not set ++CONFIG_CAN_PM_TRACE=y ++CONFIG_PM_SLEEP=y ++CONFIG_SUSPEND=y ++CONFIG_SUSPEND_FREEZER=y ++# CONFIG_APM_EMULATION is not set ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++CONFIG_NET=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++# CONFIG_PACKET_MMAP is not set ++CONFIG_UNIX=y ++CONFIG_XFRM=y ++# CONFIG_XFRM_USER is not set ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++# CONFIG_XFRM_STATISTICS is not set ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++CONFIG_INET=y ++# CONFIG_IP_MULTICAST is not set ++# CONFIG_IP_ADVANCED_ROUTER is not set ++CONFIG_IP_FIB_HASH=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE is not set ++# CONFIG_ARPD is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++# CONFIG_INET_XFRM_TUNNEL is not set ++# CONFIG_INET_TUNNEL is not set ++CONFIG_INET_XFRM_MODE_TRANSPORT=y ++CONFIG_INET_XFRM_MODE_TUNNEL=y ++CONFIG_INET_XFRM_MODE_BEET=y ++# CONFIG_INET_LRO is not set ++CONFIG_INET_DIAG=y ++CONFIG_INET_TCP_DIAG=y ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_NETLABEL is not set ++# CONFIG_NETWORK_SECMARK is not set ++CONFIG_NETFILTER=y ++# CONFIG_NETFILTER_DEBUG is not set ++CONFIG_NETFILTER_ADVANCED=y ++ ++# ++# Core Netfilter Configuration ++# ++# CONFIG_NETFILTER_NETLINK_QUEUE is not set ++# CONFIG_NETFILTER_NETLINK_LOG is not set ++# CONFIG_NF_CONNTRACK is not set ++CONFIG_NETFILTER_XTABLES=m ++# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set ++# CONFIG_NETFILTER_XT_TARGET_MARK is not set ++# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set ++# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set ++# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set ++# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set ++# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set ++# CONFIG_NETFILTER_XT_MATCH_DCCP is not set ++# CONFIG_NETFILTER_XT_MATCH_DSCP is not set ++# CONFIG_NETFILTER_XT_MATCH_ESP is not set ++# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set ++# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set ++# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set ++# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set ++# CONFIG_NETFILTER_XT_MATCH_MAC is not set ++# CONFIG_NETFILTER_XT_MATCH_MARK is not set ++# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set ++# CONFIG_NETFILTER_XT_MATCH_OWNER is not set ++# CONFIG_NETFILTER_XT_MATCH_POLICY is not set ++# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set ++# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set ++# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set ++# CONFIG_NETFILTER_XT_MATCH_REALM is not set ++# CONFIG_NETFILTER_XT_MATCH_RECENT is not set ++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set ++# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set ++# CONFIG_NETFILTER_XT_MATCH_STRING is not set ++# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set ++# CONFIG_NETFILTER_XT_MATCH_TIME is not set ++# CONFIG_NETFILTER_XT_MATCH_U32 is not set ++# CONFIG_IP_VS is not set ++ ++# ++# IP: Netfilter Configuration ++# ++# CONFIG_NF_DEFRAG_IPV4 is not set ++# CONFIG_IP_NF_QUEUE is not set ++CONFIG_IP_NF_IPTABLES=m ++# CONFIG_IP_NF_MATCH_ADDRTYPE is not set ++# CONFIG_IP_NF_MATCH_AH is not set ++# CONFIG_IP_NF_MATCH_ECN is not set ++# CONFIG_IP_NF_MATCH_TTL is not set ++CONFIG_IP_NF_FILTER=m ++# CONFIG_IP_NF_TARGET_REJECT is not set ++# CONFIG_IP_NF_TARGET_LOG is not set ++# CONFIG_IP_NF_TARGET_ULOG is not set ++# CONFIG_IP_NF_TARGET_IDLETIMER is not set ++# CONFIG_IP_NF_MANGLE is not set ++# CONFIG_IP_NF_RAW is not set ++# CONFIG_IP_NF_SECURITY is not set ++# CONFIG_IP_NF_ARPTABLES is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_IPX is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_ECONET is not set ++# CONFIG_WAN_ROUTER is not set ++# CONFIG_NET_SCHED is not set ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_NET_TCPPROBE is not set ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_IRDA is not set ++CONFIG_BT=m ++CONFIG_BT_L2CAP=m ++CONFIG_BT_SCO=m ++CONFIG_BT_RFCOMM=m ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=m ++CONFIG_BT_BNEP_MC_FILTER=y ++CONFIG_BT_BNEP_PROTO_FILTER=y ++CONFIG_BT_HIDP=m ++ ++# ++# Bluetooth device drivers ++# ++# CONFIG_BT_HCIUSB is not set ++# CONFIG_BT_HCIBTUSB is not set ++# CONFIG_BT_HCIBTSDIO is not set ++# CONFIG_BT_HCIUART is not set ++# CONFIG_BT_HCIBCM203X is not set ++# CONFIG_BT_HCIBPA10X is not set ++# CONFIG_BT_HCIBFUSB is not set ++# CONFIG_BT_HCIBRF6150 is not set ++CONFIG_BT_HCIH4P=m ++# CONFIG_BT_HCIVHCI is not set ++# CONFIG_AF_RXRPC is not set ++# CONFIG_PHONET is not set ++CONFIG_WIRELESS=y ++CONFIG_CFG80211=y ++CONFIG_NL80211=y ++CONFIG_WIRELESS_OLD_REGULATORY=y ++CONFIG_WIRELESS_EXT=y ++CONFIG_WIRELESS_EXT_SYSFS=y ++CONFIG_MAC80211=m ++ ++# ++# Rate control algorithm selection ++# ++CONFIG_MAC80211_RC_PID=y ++# CONFIG_MAC80211_RC_MINSTREL is not set ++CONFIG_MAC80211_RC_DEFAULT_PID=y ++# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set ++CONFIG_MAC80211_RC_DEFAULT="pid" ++# CONFIG_MAC80211_MESH is not set ++# CONFIG_MAC80211_LEDS is not set ++# CONFIG_MAC80211_DEBUGFS is not set ++# CONFIG_MAC80211_DEBUG_MENU is not set ++# CONFIG_IEEE80211 is not set ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++ ++# ++# Device Drivers ++# ++ ++# ++# Generic Driver Options ++# ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++CONFIG_FW_LOADER=y ++CONFIG_FIRMWARE_IN_KERNEL=y ++CONFIG_EXTRA_FIRMWARE="" ++# CONFIG_DEBUG_DRIVER is not set ++# CONFIG_DEBUG_DEVRES is not set ++# CONFIG_SYS_HYPERVISOR is not set ++# CONFIG_CONNECTOR is not set ++CONFIG_MTD=y ++# CONFIG_MTD_DEBUG is not set ++# CONFIG_MTD_CONCAT is not set ++CONFIG_MTD_PARTITIONS=y ++# CONFIG_MTD_REDBOOT_PARTS is not set ++# CONFIG_MTD_CMDLINE_PARTS is not set ++# CONFIG_MTD_AFS_PARTS is not set ++# CONFIG_MTD_AR7_PARTS is not set ++ ++# ++# User Modules And Translation Layers ++# ++CONFIG_MTD_CHAR=y ++# CONFIG_MTD_BLKDEVS is not set ++# CONFIG_MTD_BLOCK is not set ++# CONFIG_MTD_BLOCK_RO is not set ++# CONFIG_FTL is not set ++# CONFIG_NFTL is not set ++# CONFIG_INFTL is not set ++# CONFIG_RFD_FTL is not set ++# CONFIG_SSFDC is not set ++CONFIG_MTD_OOPS=y ++ ++# ++# RAM/ROM/Flash chip drivers ++# ++# CONFIG_MTD_CFI is not set ++# CONFIG_MTD_JEDECPROBE is not set ++CONFIG_MTD_MAP_BANK_WIDTH_1=y ++CONFIG_MTD_MAP_BANK_WIDTH_2=y ++CONFIG_MTD_MAP_BANK_WIDTH_4=y ++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set ++CONFIG_MTD_CFI_I1=y ++CONFIG_MTD_CFI_I2=y ++# CONFIG_MTD_CFI_I4 is not set ++# CONFIG_MTD_CFI_I8 is not set ++CONFIG_MTD_CFI_INTELEXT=y ++# CONFIG_MTD_CFI_AMDSTD is not set ++# CONFIG_MTD_CFI_STAA is not set ++CONFIG_MTD_CFI_UTIL=y ++# CONFIG_MTD_RAM is not set ++# CONFIG_MTD_ROM is not set ++# CONFIG_MTD_ABSENT is not set ++ ++# ++# Mapping drivers for chip access ++# ++# CONFIG_MTD_COMPLEX_MAPPINGS is not set ++# CONFIG_MTD_PHYSMAP is not set ++# CONFIG_MTD_ARM_INTEGRATOR is not set ++# CONFIG_MTD_OMAP_NOR is not set ++# CONFIG_MTD_PLATRAM is not set ++ ++# ++# Self-contained MTD device drivers ++# ++# CONFIG_MTD_DATAFLASH is not set ++# CONFIG_MTD_M25P80 is not set ++# CONFIG_MTD_SLRAM is not set ++# CONFIG_MTD_PHRAM is not set ++# CONFIG_MTD_MTDRAM is not set ++# CONFIG_MTD_BLOCK2MTD is not set ++ ++# ++# Disk-On-Chip Device Drivers ++# ++# CONFIG_MTD_DOC2000 is not set ++# CONFIG_MTD_DOC2001 is not set ++# CONFIG_MTD_DOC2001PLUS is not set ++# CONFIG_MTD_NAND is not set ++CONFIG_MTD_ONENAND=y ++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set ++# CONFIG_MTD_ONENAND_GENERIC is not set ++CONFIG_MTD_ONENAND_OMAP2=y ++# CONFIG_MTD_ONENAND_OTP is not set ++# CONFIG_MTD_ONENAND_2X_PROGRAM is not set ++# CONFIG_MTD_ONENAND_SIM is not set ++ ++# ++# UBI - Unsorted block images ++# ++CONFIG_MTD_UBI=y ++CONFIG_MTD_UBI_WL_THRESHOLD=4096 ++CONFIG_MTD_UBI_BEB_RESERVE=1 ++# CONFIG_MTD_UBI_GLUEBI is not set ++ ++# ++# UBI debugging options ++# ++# CONFIG_MTD_UBI_DEBUG is not set ++# CONFIG_PARPORT is not set ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_COW_COMMON is not set ++CONFIG_BLK_DEV_LOOP=y ++# CONFIG_BLK_DEV_CRYPTOLOOP is not set ++# CONFIG_BLK_DEV_NBD is not set ++# CONFIG_BLK_DEV_UB is not set ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++# CONFIG_BLK_DEV_XIP is not set ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++CONFIG_MISC_DEVICES=y ++# CONFIG_EEPROM_93CX6 is not set ++# CONFIG_ICS932S401 is not set ++# CONFIG_OMAP_STI is not set ++# CONFIG_ENCLOSURE_SERVICES is not set ++# CONFIG_C2PORT is not set ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++# CONFIG_RAID_ATTRS is not set ++CONFIG_SCSI=m ++CONFIG_SCSI_DMA=y ++# CONFIG_SCSI_TGT is not set ++# CONFIG_SCSI_NETLINK is not set ++CONFIG_SCSI_PROC_FS=y ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=m ++# CONFIG_CHR_DEV_ST is not set ++# CONFIG_CHR_DEV_OSST is not set ++# CONFIG_BLK_DEV_SR is not set ++# CONFIG_CHR_DEV_SG is not set ++# CONFIG_CHR_DEV_SCH is not set ++ ++# ++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs ++# ++CONFIG_SCSI_MULTI_LUN=y ++# CONFIG_SCSI_CONSTANTS is not set ++# CONFIG_SCSI_LOGGING is not set ++CONFIG_SCSI_SCAN_ASYNC=y ++CONFIG_SCSI_WAIT_SCAN=m ++ ++# ++# SCSI Transports ++# ++# CONFIG_SCSI_SPI_ATTRS is not set ++# CONFIG_SCSI_FC_ATTRS is not set ++# CONFIG_SCSI_ISCSI_ATTRS is not set ++# CONFIG_SCSI_SAS_LIBSAS is not set ++# CONFIG_SCSI_SRP_ATTRS is not set ++CONFIG_SCSI_LOWLEVEL=y ++# CONFIG_ISCSI_TCP is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_DH is not set ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++CONFIG_NETDEVICES=y ++# CONFIG_DUMMY is not set ++# CONFIG_BONDING is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_EQUALIZER is not set ++CONFIG_TUN=m ++# CONFIG_VETH is not set ++# CONFIG_PHYLIB is not set ++CONFIG_NET_ETHERNET=y ++CONFIG_MII=m ++# CONFIG_AX88796 is not set ++CONFIG_SMC91X=m ++# CONFIG_DM9000 is not set ++# CONFIG_ENC28J60 is not set ++# CONFIG_SMC911X is not set ++# CONFIG_IBM_NEW_EMAC_ZMII is not set ++# CONFIG_IBM_NEW_EMAC_RGMII is not set ++# CONFIG_IBM_NEW_EMAC_TAH is not set ++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set ++# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set ++# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set ++# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set ++# CONFIG_B44 is not set ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++ ++# ++# Wireless LAN ++# ++# CONFIG_WLAN_PRE80211 is not set ++CONFIG_WLAN_80211=y ++# CONFIG_LIBERTAS is not set ++# CONFIG_LIBERTAS_THINFIRM is not set ++# CONFIG_USB_ZD1201 is not set ++# CONFIG_USB_NET_RNDIS_WLAN is not set ++# CONFIG_RTL8187 is not set ++# CONFIG_MAC80211_HWSIM is not set ++# CONFIG_P54_COMMON is not set ++# CONFIG_IWLWIFI_LEDS is not set ++# CONFIG_HOSTAP is not set ++# CONFIG_B43 is not set ++# CONFIG_B43LEGACY is not set ++# CONFIG_ZD1211RW is not set ++# CONFIG_RT2X00 is not set ++ ++# ++# USB Network Adapters ++# ++# CONFIG_USB_CATC is not set ++# CONFIG_USB_KAWETH is not set ++# CONFIG_USB_PEGASUS is not set ++# CONFIG_USB_RTL8150 is not set ++# CONFIG_USB_USBNET is not set ++# CONFIG_WAN is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_NETPOLL is not set ++# CONFIG_NET_POLL_CONTROLLER is not set ++# CONFIG_ISDN is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_POLLDEV is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++CONFIG_INPUT_KEYBOARD=y ++# CONFIG_KEYBOARD_ATKBD is not set ++# CONFIG_KEYBOARD_SUNKBD is not set ++# CONFIG_KEYBOARD_LKKBD is not set ++# CONFIG_KEYBOARD_XTKBD is not set ++# CONFIG_KEYBOARD_NEWTON is not set ++# CONFIG_KEYBOARD_STOWAWAY is not set ++# CONFIG_KEYBOARD_TWL4030 is not set ++# CONFIG_KEYBOARD_LM8323 is not set ++# CONFIG_KEYBOARD_GPIO is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++# CONFIG_TOUCHSCREEN_ADS7846 is not set ++# CONFIG_TOUCHSCREEN_FUJITSU is not set ++# CONFIG_TOUCHSCREEN_GUNZE is not set ++# CONFIG_TOUCHSCREEN_ELO is not set ++# CONFIG_TOUCHSCREEN_MTOUCH is not set ++# CONFIG_TOUCHSCREEN_INEXIO is not set ++# CONFIG_TOUCHSCREEN_MK712 is not set ++# CONFIG_TOUCHSCREEN_PENMOUNT is not set ++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set ++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set ++CONFIG_TOUCHSCREEN_TSC2005=m ++# CONFIG_TOUCHSCREEN_TSC210X is not set ++# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set ++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_INPUT_ATI_REMOTE is not set ++# CONFIG_INPUT_ATI_REMOTE2 is not set ++# CONFIG_INPUT_KEYSPAN_REMOTE is not set ++# CONFIG_INPUT_POWERMATE is not set ++# CONFIG_INPUT_YEALINK is not set ++# CONFIG_INPUT_CM109 is not set ++CONFIG_INPUT_UINPUT=m ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++ ++# ++# Character devices ++# ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_DEVKMEM=y ++# CONFIG_SERIAL_NONSTANDARD is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=4 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=4 ++# CONFIG_SERIAL_8250_EXTENDED is not set ++ ++# ++# Non-8250 serial port support ++# ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_IPMI_HANDLER is not set ++CONFIG_HW_RANDOM=m ++# CONFIG_NVRAM is not set ++# CONFIG_R3964 is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_TCG_TPM is not set ++CONFIG_I2C=y ++CONFIG_I2C_BOARDINFO=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_HELPER_AUTO=y ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_GPIO is not set ++# CONFIG_I2C_OCORES is not set ++CONFIG_I2C_OMAP=y ++# CONFIG_I2C_SIMTEC is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_PARPORT_LIGHT is not set ++# CONFIG_I2C_TAOS_EVM is not set ++# CONFIG_I2C_TINY_USB is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_STUB is not set ++ ++# ++# Miscellaneous I2C Chip support ++# ++# CONFIG_DS1682 is not set ++# CONFIG_AT24 is not set ++# CONFIG_SENSORS_EEPROM is not set ++# CONFIG_SENSORS_PCF8574 is not set ++# CONFIG_PCF8575 is not set ++# CONFIG_SENSORS_PCA9539 is not set ++# CONFIG_SENSORS_PCF8591 is not set ++# CONFIG_TPS65010 is not set ++CONFIG_TWL4030_MADC=y ++CONFIG_TWL4030_PWRBUTTON=y ++CONFIG_TWL4030_POWEROFF=y ++# CONFIG_SENSORS_MAX6875 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++CONFIG_SENSORS_TSL2563=m ++# CONFIG_LP5521 is not set ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# CONFIG_I2C_DEBUG_CHIP is not set ++CONFIG_SPI=y ++# CONFIG_SPI_DEBUG is not set ++CONFIG_SPI_MASTER=y ++ ++# ++# SPI Master Controller Drivers ++# ++# CONFIG_SPI_BITBANG is not set ++CONFIG_SPI_OMAP24XX=y ++ ++# ++# SPI Protocol Masters ++# ++# CONFIG_SPI_AT25 is not set ++# CONFIG_SPI_TSC210X is not set ++# CONFIG_SPI_TSC2301 is not set ++# CONFIG_SPI_SPIDEV is not set ++# CONFIG_SPI_TLE62X0 is not set ++CONFIG_ARCH_REQUIRE_GPIOLIB=y ++CONFIG_GPIOLIB=y ++# CONFIG_DEBUG_GPIO is not set ++CONFIG_GPIO_SYSFS=y ++ ++# ++# Memory mapped GPIO expanders: ++# ++ ++# ++# I2C GPIO expanders: ++# ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCF857X is not set ++CONFIG_GPIO_TWL4030=y ++ ++# ++# PCI GPIO expanders: ++# ++ ++# ++# SPI GPIO expanders: ++# ++# CONFIG_GPIO_MAX7301 is not set ++# CONFIG_GPIO_MCP23S08 is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_SUPPLY is not set ++CONFIG_HWMON=y ++# CONFIG_HWMON_VID is not set ++# CONFIG_SENSORS_AD7414 is not set ++# CONFIG_SENSORS_AD7418 is not set ++# CONFIG_SENSORS_ADCXX is not set ++# CONFIG_SENSORS_ADM1021 is not set ++# CONFIG_SENSORS_ADM1025 is not set ++# CONFIG_SENSORS_ADM1026 is not set ++# CONFIG_SENSORS_ADM1029 is not set ++# CONFIG_SENSORS_ADM1031 is not set ++# CONFIG_SENSORS_ADM9240 is not set ++# CONFIG_SENSORS_ADT7462 is not set ++# CONFIG_SENSORS_ADT7470 is not set ++# CONFIG_SENSORS_ADT7473 is not set ++# CONFIG_SENSORS_ATXP1 is not set ++# CONFIG_SENSORS_DS1621 is not set ++# CONFIG_SENSORS_F71805F is not set ++# CONFIG_SENSORS_F71882FG is not set ++# CONFIG_SENSORS_F75375S is not set ++# CONFIG_SENSORS_GL518SM is not set ++# CONFIG_SENSORS_GL520SM is not set ++# CONFIG_SENSORS_IT87 is not set ++# CONFIG_SENSORS_LM63 is not set ++# CONFIG_SENSORS_LM70 is not set ++# CONFIG_SENSORS_LM75 is not set ++# CONFIG_SENSORS_LM77 is not set ++# CONFIG_SENSORS_LM78 is not set ++# CONFIG_SENSORS_LM80 is not set ++# CONFIG_SENSORS_LM83 is not set ++# CONFIG_SENSORS_LM85 is not set ++# CONFIG_SENSORS_LM87 is not set ++# CONFIG_SENSORS_LM90 is not set ++# CONFIG_SENSORS_LM92 is not set ++# CONFIG_SENSORS_LM93 is not set ++# CONFIG_SENSORS_MAX1111 is not set ++# CONFIG_SENSORS_MAX1619 is not set ++# CONFIG_SENSORS_MAX6650 is not set ++# CONFIG_SENSORS_PC87360 is not set ++# CONFIG_SENSORS_PC87427 is not set ++# CONFIG_SENSORS_DME1737 is not set ++# CONFIG_SENSORS_SMSC47M1 is not set ++# CONFIG_SENSORS_SMSC47M192 is not set ++# CONFIG_SENSORS_SMSC47B397 is not set ++# CONFIG_SENSORS_ADS7828 is not set ++# CONFIG_SENSORS_THMC50 is not set ++# CONFIG_SENSORS_VT1211 is not set ++# CONFIG_SENSORS_W83781D is not set ++# CONFIG_SENSORS_W83791D is not set ++# CONFIG_SENSORS_W83792D is not set ++# CONFIG_SENSORS_W83793 is not set ++# CONFIG_SENSORS_W83L785TS is not set ++# CONFIG_SENSORS_W83L786NG is not set ++# CONFIG_SENSORS_W83627HF is not set ++# CONFIG_SENSORS_W83627EHF is not set ++# CONFIG_SENSORS_TSC210X is not set ++CONFIG_SENSORS_OMAP34XX=y ++# CONFIG_HWMON_DEBUG_CHIP is not set ++# CONFIG_THERMAL is not set ++# CONFIG_THERMAL_HWMON is not set ++CONFIG_WATCHDOG=y ++# CONFIG_WATCHDOG_NOWAYOUT is not set ++ ++# ++# Watchdog Device Drivers ++# ++# CONFIG_SOFT_WATCHDOG is not set ++CONFIG_OMAP_WATCHDOG=m ++ ++# ++# USB-based Watchdog Cards ++# ++# CONFIG_USBPCWATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++ ++# ++# Sonics Silicon Backplane ++# ++# CONFIG_SSB is not set ++ ++# ++# Multifunction device drivers ++# ++# CONFIG_MFD_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_ASIC3 is not set ++# CONFIG_HTC_EGPIO is not set ++# CONFIG_HTC_PASIC3 is not set ++CONFIG_TWL4030_CORE=y ++CONFIG_TWL4030_POWER=y ++# CONFIG_MFD_TMIO is not set ++# CONFIG_MFD_T7L66XB is not set ++# CONFIG_MFD_TC6387XB is not set ++# CONFIG_MFD_TC6393XB is not set ++# CONFIG_PMIC_DA903X is not set ++# CONFIG_MFD_WM8400 is not set ++# CONFIG_MFD_WM8350_I2C is not set ++ ++# ++# Multimedia devices ++# ++ ++# ++# Multimedia core support ++# ++CONFIG_VIDEO_DEV=m ++CONFIG_VIDEO_V4L2_COMMON=m ++CONFIG_VIDEO_ALLOW_V4L1=y ++CONFIG_VIDEO_V4L1_COMPAT=y ++# CONFIG_DVB_CORE is not set ++CONFIG_VIDEO_MEDIA=m ++ ++# ++# Multimedia drivers ++# ++# CONFIG_MEDIA_ATTACH is not set ++CONFIG_MEDIA_TUNER=m ++# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set ++CONFIG_MEDIA_TUNER_SIMPLE=m ++CONFIG_MEDIA_TUNER_TDA8290=m ++CONFIG_MEDIA_TUNER_TDA9887=m ++CONFIG_MEDIA_TUNER_TEA5761=m ++CONFIG_MEDIA_TUNER_TEA5767=m ++CONFIG_MEDIA_TUNER_MT20XX=m ++CONFIG_MEDIA_TUNER_XC2028=m ++CONFIG_MEDIA_TUNER_XC5000=m ++CONFIG_VIDEO_V4L2=m ++CONFIG_VIDEO_V4L1=m ++CONFIG_VIDEO_CAPTURE_DRIVERS=y ++# CONFIG_VIDEO_ADV_DEBUG is not set ++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set ++CONFIG_VIDEO_HELPER_CHIPS_AUTO=y ++# CONFIG_VIDEO_VIVI is not set ++# CONFIG_VIDEO_CPIA is not set ++# CONFIG_VIDEO_CPIA2 is not set ++# CONFIG_VIDEO_SAA5246A is not set ++# CONFIG_VIDEO_SAA5249 is not set ++# CONFIG_SOC_CAMERA is not set ++CONFIG_V4L_USB_DRIVERS=y ++# CONFIG_USB_VIDEO_CLASS is not set ++# CONFIG_USB_GSPCA is not set ++# CONFIG_VIDEO_PVRUSB2 is not set ++# CONFIG_VIDEO_EM28XX is not set ++# CONFIG_VIDEO_USBVISION is not set ++# CONFIG_USB_VICAM is not set ++# CONFIG_USB_IBMCAM is not set ++# CONFIG_USB_KONICAWC is not set ++# CONFIG_USB_QUICKCAM_MESSENGER is not set ++# CONFIG_USB_ET61X251 is not set ++# CONFIG_VIDEO_OVCAMCHIP is not set ++# CONFIG_USB_OV511 is not set ++# CONFIG_USB_SE401 is not set ++# CONFIG_USB_SN9C102 is not set ++# CONFIG_USB_STV680 is not set ++# CONFIG_USB_ZC0301 is not set ++# CONFIG_USB_PWC is not set ++# CONFIG_USB_ZR364XX is not set ++# CONFIG_USB_STKWEBCAM is not set ++# CONFIG_USB_S2255 is not set ++CONFIG_RADIO_ADAPTERS=y ++# CONFIG_USB_DSBR is not set ++# CONFIG_USB_SI470X is not set ++# CONFIG_USB_MR800 is not set ++# CONFIG_DAB is not set ++ ++# ++# Graphics support ++# ++# CONFIG_VGASTATE is not set ++# CONFIG_VIDEO_OUTPUT_CONTROL is not set ++CONFIG_FB=y ++# CONFIG_FIRMWARE_EDID is not set ++# CONFIG_FB_DDC is not set ++# CONFIG_FB_BOOT_VESA_SUPPORT is not set ++# CONFIG_FB_CFB_FILLRECT is not set ++# CONFIG_FB_CFB_COPYAREA is not set ++# CONFIG_FB_CFB_IMAGEBLIT is not set ++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set ++# CONFIG_FB_SYS_FILLRECT is not set ++# CONFIG_FB_SYS_COPYAREA is not set ++# CONFIG_FB_SYS_IMAGEBLIT is not set ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++# CONFIG_FB_SYS_FOPS is not set ++# CONFIG_FB_SVGALIB is not set ++# CONFIG_FB_MACMODES is not set ++# CONFIG_FB_BACKLIGHT is not set ++# CONFIG_FB_MODE_HELPERS is not set ++# CONFIG_FB_TILEBLITTING is not set ++ ++# ++# Frame buffer hardware drivers ++# ++# CONFIG_FB_S1D13XXX is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_MB862XX is not set ++# CONFIG_FB_OMAP is not set ++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set ++ ++# ++# Display device support ++# ++CONFIG_DISPLAY_SUPPORT=y ++ ++# ++# Display hardware drivers ++# ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++# CONFIG_FRAMEBUFFER_CONSOLE is not set ++# CONFIG_LOGO is not set ++CONFIG_SOUND=y ++# CONFIG_SOUND_OSS_CORE is not set ++CONFIG_SND=y ++CONFIG_SND_TIMER=y ++CONFIG_SND_PCM=y ++# CONFIG_SND_SEQUENCER is not set ++# CONFIG_SND_MIXER_OSS is not set ++# CONFIG_SND_PCM_OSS is not set ++# CONFIG_SND_DYNAMIC_MINORS is not set ++CONFIG_SND_SUPPORT_OLD_API=y ++CONFIG_SND_VERBOSE_PROCFS=y ++# CONFIG_SND_VERBOSE_PRINTK is not set ++# CONFIG_SND_DEBUG is not set ++CONFIG_SND_DRIVERS=y ++# CONFIG_SND_DUMMY is not set ++# CONFIG_SND_MTPAV is not set ++# CONFIG_SND_SERIAL_U16550 is not set ++# CONFIG_SND_MPU401 is not set ++CONFIG_SND_ARM=y ++CONFIG_SND_SPI=y ++# CONFIG_SND_USB is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_OMAP_SOC=y ++# CONFIG_SND_SOC_ALL_CODECS is not set ++# CONFIG_SOUND_PRIME is not set ++CONFIG_HID_SUPPORT=y ++CONFIG_HID=m ++# CONFIG_HID_DEBUG is not set ++# CONFIG_HIDRAW is not set ++ ++# ++# USB Input Devices ++# ++CONFIG_USB_HID=m ++# CONFIG_HID_PID is not set ++# CONFIG_USB_HIDDEV is not set ++ ++# ++# USB HID Boot Protocol drivers ++# ++# CONFIG_USB_KBD is not set ++# CONFIG_USB_MOUSE is not set ++ ++# ++# Special HID drivers ++# ++CONFIG_HID_COMPAT=y ++CONFIG_HID_A4TECH=m ++CONFIG_HID_APPLE=m ++CONFIG_HID_BELKIN=m ++CONFIG_HID_BRIGHT=m ++CONFIG_HID_CHERRY=m ++CONFIG_HID_CHICONY=m ++CONFIG_HID_CYPRESS=m ++CONFIG_HID_DELL=m ++CONFIG_HID_EZKEY=m ++CONFIG_HID_GYRATION=m ++CONFIG_HID_LOGITECH=m ++# CONFIG_LOGITECH_FF is not set ++# CONFIG_LOGIRUMBLEPAD2_FF is not set ++CONFIG_HID_MICROSOFT=m ++CONFIG_HID_MONTEREY=m ++CONFIG_HID_PANTHERLORD=m ++# CONFIG_PANTHERLORD_FF is not set ++CONFIG_HID_PETALYNX=m ++CONFIG_HID_SAMSUNG=m ++CONFIG_HID_SONY=m ++CONFIG_HID_SUNPLUS=m ++# CONFIG_THRUSTMASTER_FF is not set ++# CONFIG_ZEROPLUS_FF is not set ++CONFIG_USB_SUPPORT=y ++CONFIG_USB_ARCH_HAS_HCD=y ++CONFIG_USB_ARCH_HAS_OHCI=y ++CONFIG_USB_ARCH_HAS_EHCI=y ++CONFIG_USB=m ++CONFIG_USB_DEBUG=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++ ++# ++# Miscellaneous USB options ++# ++CONFIG_USB_DEVICEFS=y ++CONFIG_USB_DEVICE_CLASS=y ++# CONFIG_USB_DYNAMIC_MINORS is not set ++CONFIG_USB_SUSPEND=y ++CONFIG_USB_OTG=y ++CONFIG_USB_OTG_WHITELIST=y ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++# CONFIG_USB_WUSB is not set ++# CONFIG_USB_WUSB_CBAF is not set ++ ++# ++# USB Host Controller Drivers ++# ++# CONFIG_USB_C67X00_HCD is not set ++# CONFIG_USB_EHCI_HCD is not set ++# CONFIG_USB_ISP116X_HCD is not set ++# CONFIG_USB_OHCI_HCD is not set ++# CONFIG_USB_SL811_HCD is not set ++# CONFIG_USB_R8A66597_HCD is not set ++# CONFIG_USB_HWA_HCD is not set ++CONFIG_USB_MUSB_HDRC=m ++CONFIG_USB_MUSB_SOC=y ++ ++# ++# OMAP 343x high speed USB support ++# ++# CONFIG_USB_MUSB_HOST is not set ++# CONFIG_USB_MUSB_PERIPHERAL is not set ++CONFIG_USB_MUSB_OTG=y ++CONFIG_USB_GADGET_MUSB_HDRC=y ++CONFIG_USB_MUSB_HDRC_HCD=y ++# CONFIG_MUSB_PIO_ONLY is not set ++CONFIG_USB_INVENTRA_DMA=y ++# CONFIG_USB_TI_CPPI_DMA is not set ++# CONFIG_USB_MUSB_DEBUG is not set ++ ++# ++# USB Device Class drivers ++# ++# CONFIG_USB_ACM is not set ++# CONFIG_USB_PRINTER is not set ++# CONFIG_USB_WDM is not set ++# CONFIG_USB_TMC is not set ++ ++# ++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; ++# ++ ++# ++# see USB_STORAGE Help for more information ++# ++CONFIG_USB_STORAGE=m ++# CONFIG_USB_STORAGE_DEBUG is not set ++# CONFIG_USB_STORAGE_DATAFAB is not set ++# CONFIG_USB_STORAGE_FREECOM is not set ++# CONFIG_USB_STORAGE_ISD200 is not set ++# CONFIG_USB_STORAGE_DPCM is not set ++# CONFIG_USB_STORAGE_USBAT is not set ++# CONFIG_USB_STORAGE_SDDR09 is not set ++# CONFIG_USB_STORAGE_SDDR55 is not set ++# CONFIG_USB_STORAGE_JUMPSHOT is not set ++# CONFIG_USB_STORAGE_ALAUDA is not set ++# CONFIG_USB_STORAGE_ONETOUCH is not set ++# CONFIG_USB_STORAGE_KARMA is not set ++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set ++CONFIG_USB_LIBUSUAL=y ++ ++# ++# USB Imaging devices ++# ++# CONFIG_USB_MDC800 is not set ++# CONFIG_USB_MICROTEK is not set ++ ++# ++# USB port drivers ++# ++# CONFIG_USB_SERIAL is not set ++ ++# ++# USB Miscellaneous drivers ++# ++# CONFIG_USB_EMI62 is not set ++# CONFIG_USB_EMI26 is not set ++# CONFIG_USB_ADUTUX is not set ++# CONFIG_USB_SEVSEG is not set ++# CONFIG_USB_RIO500 is not set ++# CONFIG_USB_LEGOTOWER is not set ++# CONFIG_USB_LCD is not set ++# CONFIG_USB_BERRY_CHARGE is not set ++# CONFIG_USB_LED is not set ++# CONFIG_USB_CYPRESS_CY7C63 is not set ++# CONFIG_USB_CYTHERM is not set ++# CONFIG_USB_PHIDGET is not set ++# CONFIG_USB_IDMOUSE is not set ++# CONFIG_USB_FTDI_ELAN is not set ++# CONFIG_USB_APPLEDISPLAY is not set ++# CONFIG_USB_LD is not set ++# CONFIG_USB_TRANCEVIBRATOR is not set ++# CONFIG_USB_IOWARRIOR is not set ++CONFIG_USB_TEST=m ++# CONFIG_USB_ISIGHTFW is not set ++# CONFIG_USB_VST is not set ++CONFIG_USB_GADGET=m ++CONFIG_USB_GADGET_DEBUG=y ++CONFIG_USB_GADGET_DEBUG_FILES=y ++CONFIG_USB_GADGET_DEBUG_FS=y ++CONFIG_USB_GADGET_VBUS_DRAW=2 ++CONFIG_USB_GADGET_SELECTED=y ++# CONFIG_USB_GADGET_AT91 is not set ++# CONFIG_USB_GADGET_ATMEL_USBA is not set ++# CONFIG_USB_GADGET_FSL_USB2 is not set ++# CONFIG_USB_GADGET_LH7A40X is not set ++# CONFIG_USB_GADGET_OMAP is not set ++# CONFIG_USB_GADGET_PXA25X is not set ++# CONFIG_USB_GADGET_PXA27X is not set ++# CONFIG_USB_GADGET_S3C2410 is not set ++# CONFIG_USB_GADGET_M66592 is not set ++# CONFIG_USB_GADGET_AMD5536UDC is not set ++# CONFIG_USB_GADGET_FSL_QE is not set ++# CONFIG_USB_GADGET_NET2280 is not set ++# CONFIG_USB_GADGET_GOKU is not set ++# CONFIG_USB_GADGET_DUMMY_HCD is not set ++CONFIG_USB_GADGET_DUALSPEED=y ++CONFIG_USB_ZERO=m ++# CONFIG_USB_ZERO_HNPTEST is not set ++# CONFIG_USB_ETH is not set ++# CONFIG_USB_GADGETFS is not set ++CONFIG_USB_FILE_STORAGE=m ++# CONFIG_USB_FILE_STORAGE_TEST is not set ++# CONFIG_USB_G_SERIAL is not set ++# CONFIG_USB_MIDI_GADGET is not set ++# CONFIG_USB_G_PRINTER is not set ++# CONFIG_USB_CDC_COMPOSITE is not set ++ ++# ++# OTG and related infrastructure ++# ++CONFIG_USB_OTG_UTILS=y ++# CONFIG_USB_GPIO_VBUS is not set ++# CONFIG_ISP1301_OMAP is not set ++CONFIG_TWL4030_USB=m ++CONFIG_MMC=m ++# CONFIG_MMC_DEBUG is not set ++# CONFIG_MMC_UNSAFE_RESUME is not set ++ ++# ++# MMC/SD/SDIO Card Drivers ++# ++CONFIG_MMC_BLOCK=m ++# CONFIG_MMC_BLOCK_BOUNCE is not set ++# CONFIG_SDIO_UART is not set ++# CONFIG_MMC_TEST is not set ++ ++# ++# MMC/SD/SDIO Host Controller Drivers ++# ++# CONFIG_MMC_SDHCI is not set ++CONFIG_MMC_OMAP_HS=m ++# CONFIG_MMC_SPI is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=m ++ ++# ++# LED drivers ++# ++# CONFIG_LEDS_OMAP is not set ++# CONFIG_LEDS_OMAP_PWM is not set ++# CONFIG_LEDS_PCA9532 is not set ++# CONFIG_LEDS_GPIO is not set ++# CONFIG_LEDS_PCA955X is not set ++ ++# ++# LED Triggers ++# ++# CONFIG_LEDS_TRIGGERS is not set ++CONFIG_RTC_LIB=y ++CONFIG_RTC_CLASS=m ++ ++# ++# RTC interfaces ++# ++CONFIG_RTC_INTF_SYSFS=y ++CONFIG_RTC_INTF_PROC=y ++CONFIG_RTC_INTF_DEV=y ++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set ++# CONFIG_RTC_DRV_TEST is not set ++ ++# ++# I2C RTC drivers ++# ++# CONFIG_RTC_DRV_DS1307 is not set ++# CONFIG_RTC_DRV_DS1374 is not set ++# CONFIG_RTC_DRV_DS1672 is not set ++# CONFIG_RTC_DRV_MAX6900 is not set ++# CONFIG_RTC_DRV_RS5C372 is not set ++# CONFIG_RTC_DRV_ISL1208 is not set ++# CONFIG_RTC_DRV_X1205 is not set ++# CONFIG_RTC_DRV_PCF8563 is not set ++# CONFIG_RTC_DRV_PCF8583 is not set ++# CONFIG_RTC_DRV_M41T80 is not set ++CONFIG_RTC_DRV_TWL4030=m ++# CONFIG_RTC_DRV_S35390A is not set ++# CONFIG_RTC_DRV_FM3130 is not set ++# CONFIG_RTC_DRV_RX8581 is not set ++ ++# ++# SPI RTC drivers ++# ++# CONFIG_RTC_DRV_M41T94 is not set ++# CONFIG_RTC_DRV_DS1305 is not set ++# CONFIG_RTC_DRV_DS1390 is not set ++# CONFIG_RTC_DRV_MAX6902 is not set ++# CONFIG_RTC_DRV_R9701 is not set ++# CONFIG_RTC_DRV_RS5C348 is not set ++# CONFIG_RTC_DRV_DS3234 is not set ++ ++# ++# Platform RTC drivers ++# ++# CONFIG_RTC_DRV_CMOS is not set ++# CONFIG_RTC_DRV_DS1286 is not set ++# CONFIG_RTC_DRV_DS1511 is not set ++# CONFIG_RTC_DRV_DS1553 is not set ++# CONFIG_RTC_DRV_DS1742 is not set ++# CONFIG_RTC_DRV_STK17TA8 is not set ++# CONFIG_RTC_DRV_M48T86 is not set ++# CONFIG_RTC_DRV_M48T35 is not set ++# CONFIG_RTC_DRV_M48T59 is not set ++# CONFIG_RTC_DRV_BQ4802 is not set ++# CONFIG_RTC_DRV_V3020 is not set ++ ++# ++# on-CPU RTC drivers ++# ++# CONFIG_DMADEVICES is not set ++# CONFIG_REGULATOR is not set ++# CONFIG_UIO is not set ++ ++# ++# CBUS support ++# ++# CONFIG_CBUS is not set ++ ++# ++# File systems ++# ++CONFIG_EXT2_FS=m ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XIP is not set ++CONFIG_EXT3_FS=m ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT4_FS is not set ++CONFIG_JBD=m ++# CONFIG_JBD_DEBUG is not set ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_FS_POSIX_ACL is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_XFS_FS is not set ++# CONFIG_OCFS2_FS is not set ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY=y ++CONFIG_INOTIFY_USER=y ++CONFIG_QUOTA=y ++# CONFIG_QUOTA_NETLINK_INTERFACE is not set ++CONFIG_PRINT_QUOTA_WARNING=y ++# CONFIG_QFMT_V1 is not set ++CONFIG_QFMT_V2=y ++CONFIG_QUOTACTL=y ++# CONFIG_AUTOFS_FS is not set ++# CONFIG_AUTOFS4_FS is not set ++CONFIG_FUSE_FS=m ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++ ++# ++# DOS/FAT/NT Filesystems ++# ++CONFIG_FAT_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_CODEPAGE=437 ++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" ++# CONFIG_NTFS_FS is not set ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++CONFIG_PROC_SYSCTL=y ++CONFIG_PROC_PAGE_MONITOR=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++# CONFIG_TMPFS_POSIX_ACL is not set ++# CONFIG_HUGETLB_PAGE is not set ++# CONFIG_CONFIGFS_FS is not set ++ ++# ++# Miscellaneous filesystems ++# ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_JFFS2_FS is not set ++CONFIG_UBIFS_FS=y ++# CONFIG_UBIFS_FS_XATTR is not set ++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set ++CONFIG_UBIFS_FS_LZO=y ++CONFIG_UBIFS_FS_ZLIB=y ++# CONFIG_UBIFS_FS_DEBUG is not set ++CONFIG_CRAMFS=y ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++# CONFIG_NFS_V3_ACL is not set ++CONFIG_NFS_V4=y ++# CONFIG_NFSD is not set ++CONFIG_LOCKD=m ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=m ++CONFIG_SUNRPC_GSS=m ++# CONFIG_SUNRPC_REGISTER_V4 is not set ++CONFIG_RPCSEC_GSS_KRB5=m ++# CONFIG_RPCSEC_GSS_SPKM3 is not set ++# CONFIG_SMB_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_NCP_FS is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++# CONFIG_EFI_PARTITION is not set ++# CONFIG_SYSV68_PARTITION is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++# CONFIG_NLS_ASCII is not set ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_UTF8 is not set ++# CONFIG_DLM is not set ++ ++# ++# Kernel hacking ++# ++CONFIG_PRINTK_TIME=y ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++CONFIG_MAGIC_SYSRQ=y ++# CONFIG_UNUSED_SYMBOLS is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_CHECK is not set ++CONFIG_DEBUG_KERNEL=y ++# CONFIG_DEBUG_SHIRQ is not set ++CONFIG_DETECT_SOFTLOCKUP=y ++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 ++CONFIG_SCHED_DEBUG=y ++# CONFIG_SCHEDSTATS is not set ++CONFIG_TIMER_STATS=y ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_DEBUG_SLAB is not set ++CONFIG_DEBUG_PREEMPT=y ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_RT_MUTEX_TESTER is not set ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_DEBUG_MUTEXES=y ++CONFIG_DEBUG_LOCK_ALLOC=y ++CONFIG_PROVE_LOCKING=y ++CONFIG_LOCKDEP=y ++CONFIG_LOCK_STAT=y ++# CONFIG_DEBUG_LOCKDEP is not set ++CONFIG_TRACE_IRQFLAGS=y ++CONFIG_DEBUG_SPINLOCK_SLEEP=y ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++CONFIG_STACKTRACE=y ++# CONFIG_DEBUG_KOBJECT is not set ++# CONFIG_DEBUG_BUGVERBOSE is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_DEBUG_VM is not set ++# CONFIG_DEBUG_WRITECOUNT is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++# CONFIG_DEBUG_LIST is not set ++# CONFIG_DEBUG_SG is not set ++CONFIG_FRAME_POINTER=y ++# CONFIG_BOOT_PRINTK_DELAY is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_KPROBES_SANITY_TEST is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set ++# CONFIG_LKDTM is not set ++# CONFIG_FAULT_INJECTION is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_HAVE_FUNCTION_TRACER=y ++ ++# ++# Tracers ++# ++# CONFIG_FUNCTION_TRACER is not set ++# CONFIG_IRQSOFF_TRACER is not set ++# CONFIG_PREEMPT_TRACER is not set ++# CONFIG_SCHED_TRACER is not set ++# CONFIG_CONTEXT_SWITCH_TRACER is not set ++# CONFIG_BOOT_TRACER is not set ++# CONFIG_STACK_TRACER is not set ++# CONFIG_DYNAMIC_PRINTK_DEBUG is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++# CONFIG_DEBUG_USER is not set ++# CONFIG_DEBUG_ERRORS is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++# CONFIG_DEBUG_LL is not set ++ ++# ++# Security options ++# ++# CONFIG_KEYS is not set ++CONFIG_SECURITY=y ++# CONFIG_SECURITYFS is not set ++# CONFIG_SECURITY_NETWORK is not set ++# CONFIG_SECURITY_FILE_CAPABILITIES is not set ++CONFIG_SECURITY_LOWMEM=y ++CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++# CONFIG_CRYPTO_FIPS is not set ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_ALGAPI2=y ++CONFIG_CRYPTO_AEAD2=y ++CONFIG_CRYPTO_BLKCIPHER=y ++CONFIG_CRYPTO_BLKCIPHER2=y ++CONFIG_CRYPTO_HASH2=y ++CONFIG_CRYPTO_RNG2=y ++CONFIG_CRYPTO_MANAGER=y ++CONFIG_CRYPTO_MANAGER2=y ++# CONFIG_CRYPTO_GF128MUL is not set ++# CONFIG_CRYPTO_NULL is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++# CONFIG_CRYPTO_CCM is not set ++# CONFIG_CRYPTO_GCM is not set ++# CONFIG_CRYPTO_SEQIV is not set ++ ++# ++# Block modes ++# ++CONFIG_CRYPTO_CBC=y ++# CONFIG_CRYPTO_CTR is not set ++# CONFIG_CRYPTO_CTS is not set ++CONFIG_CRYPTO_ECB=y ++# CONFIG_CRYPTO_LRW is not set ++CONFIG_CRYPTO_PCBC=m ++# CONFIG_CRYPTO_XTS is not set ++ ++# ++# Hash modes ++# ++# CONFIG_CRYPTO_HMAC is not set ++# CONFIG_CRYPTO_XCBC is not set ++ ++# ++# Digest ++# ++# CONFIG_CRYPTO_CRC32C is not set ++# CONFIG_CRYPTO_MD4 is not set ++CONFIG_CRYPTO_MD5=y ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++# CONFIG_CRYPTO_SHA256 is not set ++# CONFIG_CRYPTO_SHA512 is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++ ++# ++# Ciphers ++# ++CONFIG_CRYPTO_AES=y ++# CONFIG_CRYPTO_ANUBIS is not set ++CONFIG_CRYPTO_ARC4=y ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++CONFIG_CRYPTO_DES=y ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_TEA is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++ ++# ++# Compression ++# ++CONFIG_CRYPTO_DEFLATE=y ++CONFIG_CRYPTO_LZO=y ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_HW=y ++ ++# ++# Library routines ++# ++CONFIG_BITREVERSE=y ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC_ITU_T is not set ++CONFIG_CRC32=y ++CONFIG_CRC7=m ++CONFIG_LIBCRC32C=y ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_LZO_COMPRESS=y ++CONFIG_LZO_DECOMPRESS=y ++CONFIG_PLIST=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT=y ++CONFIG_HAS_DMA=y +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/configs/rx71_tiny_defconfig kernel-2.6.28-20093908+0m5/arch/arm/configs/rx71_tiny_defconfig +--- linux-omap-2.6.28-omap1/arch/arm/configs/rx71_tiny_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/configs/rx71_tiny_defconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1839 @@ ++# ++# Automatically generated make config: don't edit ++# Linux kernel version: 2.6.28-rc4-omap1 ++# Wed Nov 12 21:53:14 2008 ++# ++CONFIG_ARM=y ++CONFIG_SYS_SUPPORTS_APM_EMULATION=y ++CONFIG_GENERIC_GPIO=y ++CONFIG_GENERIC_TIME=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_MMU=y ++# CONFIG_NO_IOPORT is not set ++CONFIG_GENERIC_HARDIRQS=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_HAVE_LATENCYTOP_SUPPORT=y ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_HARDIRQS_SW_RESEND=y ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_RWSEM_GENERIC_SPINLOCK=y ++# CONFIG_ARCH_HAS_ILOG2_U32 is not set ++# CONFIG_ARCH_HAS_ILOG2_U64 is not set ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y ++CONFIG_VECTORS_BASE=0xffff0000 ++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" ++ ++# ++# General setup ++# ++CONFIG_EXPERIMENTAL=y ++CONFIG_BROKEN_ON_SMP=y ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++CONFIG_LOCALVERSION="" ++CONFIG_LOCALVERSION_AUTO=y ++CONFIG_SWAP=y ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_BSD_PROCESS_ACCT=y ++# CONFIG_BSD_PROCESS_ACCT_V3 is not set ++# CONFIG_TASKSTATS is not set ++# CONFIG_AUDIT is not set ++# CONFIG_IKCONFIG is not set ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_CGROUPS is not set ++CONFIG_GROUP_SCHED=y ++CONFIG_FAIR_GROUP_SCHED=y ++# CONFIG_RT_GROUP_SCHED is not set ++CONFIG_USER_SCHED=y ++# CONFIG_CGROUP_SCHED is not set ++CONFIG_SYSFS_DEPRECATED=y ++CONFIG_SYSFS_DEPRECATED_V2=y ++# CONFIG_RELAY is not set ++# CONFIG_NAMESPACES is not set ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="" ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_SYSCTL=y ++CONFIG_EMBEDDED=y ++CONFIG_UID16=y ++# CONFIG_SYSCTL_SYSCALL is not set ++CONFIG_KALLSYMS=y ++# CONFIG_KALLSYMS_ALL is not set ++CONFIG_KALLSYMS_EXTRA_PASS=y ++CONFIG_HOTPLUG=y ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++CONFIG_COMPAT_BRK=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_ANON_INODES=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++CONFIG_VM_EVENT_COUNTERS=y ++CONFIG_SLAB=y ++# CONFIG_SLUB is not set ++# CONFIG_SLOB is not set ++# CONFIG_PROFILING is not set ++# CONFIG_MARKERS is not set ++CONFIG_HAVE_OPROFILE=y ++CONFIG_KPROBES=y ++CONFIG_KRETPROBES=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++CONFIG_HAVE_CLK=y ++CONFIG_HAVE_GENERIC_DMA_COHERENT=y ++CONFIG_SLABINFO=y ++CONFIG_RT_MUTEXES=y ++# CONFIG_TINY_SHMEM is not set ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_MODULE_SRCVERSION_ALL=y ++CONFIG_KMOD=y ++CONFIG_BLOCK=y ++# CONFIG_LBD is not set ++# CONFIG_BLK_DEV_IO_TRACE is not set ++# CONFIG_LSF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++ ++# ++# IO Schedulers ++# ++CONFIG_IOSCHED_NOOP=y ++# CONFIG_IOSCHED_AS is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_IOSCHED_CFQ=y ++# CONFIG_DEFAULT_AS is not set ++# CONFIG_DEFAULT_DEADLINE is not set ++CONFIG_DEFAULT_CFQ=y ++# CONFIG_DEFAULT_NOOP is not set ++CONFIG_DEFAULT_IOSCHED="cfq" ++CONFIG_CLASSIC_RCU=y ++CONFIG_FREEZER=y ++ ++# ++# System Type ++# ++# CONFIG_ARCH_AAEC2000 is not set ++# CONFIG_ARCH_INTEGRATOR is not set ++# CONFIG_ARCH_REALVIEW is not set ++# CONFIG_ARCH_VERSATILE is not set ++# CONFIG_ARCH_AT91 is not set ++# CONFIG_ARCH_CLPS7500 is not set ++# CONFIG_ARCH_CLPS711X is not set ++# CONFIG_ARCH_EBSA110 is not set ++# CONFIG_ARCH_EP93XX is not set ++# CONFIG_ARCH_FOOTBRIDGE is not set ++# CONFIG_ARCH_NETX is not set ++# CONFIG_ARCH_H720X is not set ++# CONFIG_ARCH_IMX is not set ++# CONFIG_ARCH_IOP13XX is not set ++# CONFIG_ARCH_IOP32X is not set ++# CONFIG_ARCH_IOP33X is not set ++# CONFIG_ARCH_IXP23XX is not set ++# CONFIG_ARCH_IXP2000 is not set ++# CONFIG_ARCH_IXP4XX is not set ++# CONFIG_ARCH_L7200 is not set ++# CONFIG_ARCH_KIRKWOOD is not set ++# CONFIG_ARCH_KS8695 is not set ++# CONFIG_ARCH_NS9XXX is not set ++# CONFIG_ARCH_LOKI is not set ++# CONFIG_ARCH_MV78XX0 is not set ++# CONFIG_ARCH_MXC is not set ++# CONFIG_ARCH_ORION5X is not set ++# CONFIG_ARCH_PNX4008 is not set ++# CONFIG_ARCH_PXA is not set ++# CONFIG_ARCH_RPC is not set ++# CONFIG_ARCH_SA1100 is not set ++# CONFIG_ARCH_S3C2410 is not set ++# CONFIG_ARCH_SHARK is not set ++# CONFIG_ARCH_LH7A40X is not set ++# CONFIG_ARCH_DAVINCI is not set ++CONFIG_ARCH_OMAP=y ++# CONFIG_ARCH_MSM is not set ++ ++# ++# TI OMAP Implementations ++# ++CONFIG_ARCH_OMAP_OTG=y ++# CONFIG_ARCH_OMAP1 is not set ++# CONFIG_ARCH_OMAP2 is not set ++CONFIG_ARCH_OMAP3=y ++ ++# ++# OMAP Feature Selections ++# ++# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set ++# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set ++CONFIG_OMAP_SMARTREFLEX=y ++# CONFIG_OMAP_SMARTREFLEX_TESTING is not set ++CONFIG_OMAP_RESET_CLOCKS=y ++CONFIG_OMAP_BOOT_TAG=y ++CONFIG_OMAP_BOOT_REASON=y ++CONFIG_OMAP_COMPONENT_VERSION=y ++CONFIG_OMAP_GPIO_SWITCH=y ++CONFIG_OMAP_MUX=y ++CONFIG_OMAP_MUX_DEBUG=y ++CONFIG_OMAP_MUX_WARNINGS=y ++CONFIG_OMAP_MCBSP=y ++# CONFIG_OMAP_MMU_FWK is not set ++# CONFIG_OMAP_MBOX_FWK is not set ++# CONFIG_OMAP_MPU_TIMER is not set ++CONFIG_OMAP_32K_TIMER=y ++CONFIG_OMAP_32K_TIMER_HZ=128 ++CONFIG_OMAP_DM_TIMER=y ++# CONFIG_OMAP_LL_DEBUG_UART1 is not set ++# CONFIG_OMAP_LL_DEBUG_UART2 is not set ++CONFIG_OMAP_LL_DEBUG_UART3=y ++CONFIG_OMAP_SERIAL_WAKE=y ++CONFIG_ARCH_OMAP34XX=y ++CONFIG_ARCH_OMAP3430=y ++ ++# ++# OMAP Board Type ++# ++CONFIG_MACH_NOKIA_RX51=y ++CONFIG_MACH_NOKIA_RX71=y ++# CONFIG_MACH_OMAP_LDP is not set ++# CONFIG_MACH_OMAP_3430SDP is not set ++# CONFIG_MACH_OMAP3EVM is not set ++# CONFIG_MACH_OMAP3_BEAGLE is not set ++# CONFIG_MACH_OVERO is not set ++# CONFIG_MACH_OMAP3_PANDORA is not set ++CONFIG_OMAP_TICK_GPTIMER=1 ++ ++# ++# Boot options ++# ++ ++# ++# Power management ++# ++ ++# ++# Processor Type ++# ++CONFIG_CPU_32=y ++CONFIG_CPU_32v6K=y ++CONFIG_CPU_V7=y ++CONFIG_CPU_32v7=y ++CONFIG_CPU_ABRT_EV7=y ++CONFIG_CPU_PABRT_IFAR=y ++CONFIG_CPU_CACHE_V7=y ++CONFIG_CPU_CACHE_VIPT=y ++CONFIG_CPU_COPY_V6=y ++CONFIG_CPU_TLB_V7=y ++CONFIG_CPU_HAS_ASID=y ++CONFIG_CPU_CP15=y ++CONFIG_CPU_CP15_MMU=y ++ ++# ++# Processor Features ++# ++CONFIG_ARM_THUMB=y ++# CONFIG_ARM_THUMBEE is not set ++# CONFIG_CPU_ICACHE_DISABLE is not set ++# CONFIG_CPU_DCACHE_DISABLE is not set ++# CONFIG_CPU_BPREDICT_DISABLE is not set ++CONFIG_HAS_TLS_REG=y ++# CONFIG_OUTER_CACHE is not set ++ ++# ++# Bus support ++# ++# CONFIG_PCI_SYSCALL is not set ++# CONFIG_ARCH_SUPPORTS_MSI is not set ++# CONFIG_PCCARD is not set ++ ++# ++# Kernel Features ++# ++CONFIG_TICK_ONESHOT=y ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y ++CONFIG_VMSPLIT_3G=y ++# CONFIG_VMSPLIT_2G is not set ++# CONFIG_VMSPLIT_1G is not set ++CONFIG_PAGE_OFFSET=0xC0000000 ++# CONFIG_PREEMPT is not set ++CONFIG_HZ=128 ++CONFIG_AEABI=y ++# CONFIG_OABI_COMPAT is not set ++CONFIG_ARCH_FLATMEM_HAS_HOLES=y ++# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set ++# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set ++CONFIG_SELECT_MEMORY_MODEL=y ++CONFIG_FLATMEM_MANUAL=y ++# CONFIG_DISCONTIGMEM_MANUAL is not set ++# CONFIG_SPARSEMEM_MANUAL is not set ++CONFIG_FLATMEM=y ++CONFIG_FLAT_NODE_MEM_MAP=y ++CONFIG_PAGEFLAGS_EXTENDED=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++# CONFIG_RESOURCES_64BIT is not set ++# CONFIG_PHYS_ADDR_T_64BIT is not set ++CONFIG_ZONE_DMA_FLAG=0 ++CONFIG_VIRT_TO_BUS=y ++CONFIG_UNEVICTABLE_LRU=y ++# CONFIG_LEDS is not set ++CONFIG_ALIGNMENT_TRAP=y ++ ++# ++# Boot options ++# ++CONFIG_ZBOOT_ROM_TEXT=0x0 ++CONFIG_ZBOOT_ROM_BSS=0x0 ++CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=4 root=ubi0:rootfs rootfstype=ubifs rw console=ttyMTD5" ++# CONFIG_XIP_KERNEL is not set ++# CONFIG_KEXEC is not set ++ ++# ++# CPU Power Management ++# ++# CONFIG_CPU_FREQ is not set ++CONFIG_CPU_IDLE=y ++CONFIG_CPU_IDLE_GOV_LADDER=y ++CONFIG_CPU_IDLE_GOV_MENU=y ++ ++# ++# Floating point emulation ++# ++ ++# ++# At least one emulation must be selected ++# ++CONFIG_VFP=y ++CONFIG_VFPv3=y ++CONFIG_NEON=y ++ ++# ++# Userspace binary formats ++# ++CONFIG_BINFMT_ELF=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_HAVE_AOUT=y ++# CONFIG_BINFMT_AOUT is not set ++CONFIG_BINFMT_MISC=y ++ ++# ++# Power management options ++# ++CONFIG_PM=y ++CONFIG_PM_DEBUG=y ++# CONFIG_PM_VERBOSE is not set ++CONFIG_CAN_PM_TRACE=y ++CONFIG_PM_SLEEP=y ++CONFIG_SUSPEND=y ++CONFIG_SUSPEND_FREEZER=y ++# CONFIG_APM_EMULATION is not set ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++CONFIG_NET=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++# CONFIG_PACKET_MMAP is not set ++CONFIG_UNIX=y ++CONFIG_XFRM=y ++# CONFIG_XFRM_USER is not set ++# CONFIG_XFRM_SUB_POLICY is not set ++# CONFIG_XFRM_MIGRATE is not set ++# CONFIG_XFRM_STATISTICS is not set ++CONFIG_NET_KEY=y ++# CONFIG_NET_KEY_MIGRATE is not set ++CONFIG_INET=y ++# CONFIG_IP_MULTICAST is not set ++# CONFIG_IP_ADVANCED_ROUTER is not set ++CONFIG_IP_FIB_HASH=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE is not set ++# CONFIG_ARPD is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++# CONFIG_INET_XFRM_TUNNEL is not set ++# CONFIG_INET_TUNNEL is not set ++CONFIG_INET_XFRM_MODE_TRANSPORT=y ++CONFIG_INET_XFRM_MODE_TUNNEL=y ++CONFIG_INET_XFRM_MODE_BEET=y ++# CONFIG_INET_LRO is not set ++CONFIG_INET_DIAG=y ++CONFIG_INET_TCP_DIAG=y ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_NETLABEL is not set ++# CONFIG_NETWORK_SECMARK is not set ++CONFIG_NETFILTER=y ++# CONFIG_NETFILTER_DEBUG is not set ++CONFIG_NETFILTER_ADVANCED=y ++ ++# ++# Core Netfilter Configuration ++# ++# CONFIG_NETFILTER_NETLINK_QUEUE is not set ++# CONFIG_NETFILTER_NETLINK_LOG is not set ++# CONFIG_NF_CONNTRACK is not set ++CONFIG_NETFILTER_XTABLES=m ++# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set ++# CONFIG_NETFILTER_XT_TARGET_MARK is not set ++# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set ++# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set ++# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set ++# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set ++# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set ++# CONFIG_NETFILTER_XT_MATCH_DCCP is not set ++# CONFIG_NETFILTER_XT_MATCH_DSCP is not set ++# CONFIG_NETFILTER_XT_MATCH_ESP is not set ++# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set ++# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set ++# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set ++# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set ++# CONFIG_NETFILTER_XT_MATCH_MAC is not set ++# CONFIG_NETFILTER_XT_MATCH_MARK is not set ++# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set ++# CONFIG_NETFILTER_XT_MATCH_OWNER is not set ++# CONFIG_NETFILTER_XT_MATCH_POLICY is not set ++# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set ++# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set ++# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set ++# CONFIG_NETFILTER_XT_MATCH_REALM is not set ++# CONFIG_NETFILTER_XT_MATCH_RECENT is not set ++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set ++# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set ++# CONFIG_NETFILTER_XT_MATCH_STRING is not set ++# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set ++# CONFIG_NETFILTER_XT_MATCH_TIME is not set ++# CONFIG_NETFILTER_XT_MATCH_U32 is not set ++# CONFIG_IP_VS is not set ++ ++# ++# IP: Netfilter Configuration ++# ++# CONFIG_NF_DEFRAG_IPV4 is not set ++# CONFIG_IP_NF_QUEUE is not set ++CONFIG_IP_NF_IPTABLES=m ++# CONFIG_IP_NF_MATCH_ADDRTYPE is not set ++# CONFIG_IP_NF_MATCH_AH is not set ++# CONFIG_IP_NF_MATCH_ECN is not set ++# CONFIG_IP_NF_MATCH_TTL is not set ++CONFIG_IP_NF_FILTER=m ++# CONFIG_IP_NF_TARGET_REJECT is not set ++# CONFIG_IP_NF_TARGET_LOG is not set ++# CONFIG_IP_NF_TARGET_ULOG is not set ++# CONFIG_IP_NF_TARGET_IDLETIMER is not set ++# CONFIG_IP_NF_MANGLE is not set ++# CONFIG_IP_NF_RAW is not set ++# CONFIG_IP_NF_SECURITY is not set ++# CONFIG_IP_NF_ARPTABLES is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_DECNET is not set ++# CONFIG_LLC2 is not set ++# CONFIG_IPX is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_ECONET is not set ++# CONFIG_WAN_ROUTER is not set ++# CONFIG_NET_SCHED is not set ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# CONFIG_NET_TCPPROBE is not set ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_IRDA is not set ++CONFIG_BT=m ++CONFIG_BT_L2CAP=m ++CONFIG_BT_SCO=m ++CONFIG_BT_RFCOMM=m ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=m ++CONFIG_BT_BNEP_MC_FILTER=y ++CONFIG_BT_BNEP_PROTO_FILTER=y ++CONFIG_BT_HIDP=m ++ ++# ++# Bluetooth device drivers ++# ++# CONFIG_BT_HCIUSB is not set ++# CONFIG_BT_HCIBTUSB is not set ++# CONFIG_BT_HCIBTSDIO is not set ++# CONFIG_BT_HCIUART is not set ++# CONFIG_BT_HCIBCM203X is not set ++# CONFIG_BT_HCIBPA10X is not set ++# CONFIG_BT_HCIBFUSB is not set ++# CONFIG_BT_HCIBRF6150 is not set ++CONFIG_BT_HCIH4P=m ++# CONFIG_BT_HCIVHCI is not set ++# CONFIG_AF_RXRPC is not set ++# CONFIG_PHONET is not set ++CONFIG_WIRELESS=y ++CONFIG_CFG80211=y ++CONFIG_NL80211=y ++CONFIG_WIRELESS_OLD_REGULATORY=y ++CONFIG_WIRELESS_EXT=y ++CONFIG_WIRELESS_EXT_SYSFS=y ++CONFIG_MAC80211=m ++ ++# ++# Rate control algorithm selection ++# ++CONFIG_MAC80211_RC_PID=y ++# CONFIG_MAC80211_RC_MINSTREL is not set ++CONFIG_MAC80211_RC_DEFAULT_PID=y ++# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set ++CONFIG_MAC80211_RC_DEFAULT="pid" ++# CONFIG_MAC80211_MESH is not set ++# CONFIG_MAC80211_LEDS is not set ++# CONFIG_MAC80211_DEBUGFS is not set ++# CONFIG_MAC80211_DEBUG_MENU is not set ++# CONFIG_IEEE80211 is not set ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++ ++# ++# Device Drivers ++# ++ ++# ++# Generic Driver Options ++# ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_STANDALONE=y ++CONFIG_PREVENT_FIRMWARE_BUILD=y ++CONFIG_FW_LOADER=y ++CONFIG_FIRMWARE_IN_KERNEL=y ++CONFIG_EXTRA_FIRMWARE="" ++# CONFIG_DEBUG_DRIVER is not set ++# CONFIG_DEBUG_DEVRES is not set ++# CONFIG_SYS_HYPERVISOR is not set ++# CONFIG_CONNECTOR is not set ++CONFIG_MTD=y ++# CONFIG_MTD_DEBUG is not set ++CONFIG_MTD_CONCAT=y ++CONFIG_MTD_PARTITIONS=y ++# CONFIG_MTD_REDBOOT_PARTS is not set ++CONFIG_MTD_CMDLINE_PARTS=y ++# CONFIG_MTD_AFS_PARTS is not set ++# CONFIG_MTD_AR7_PARTS is not set ++ ++# ++# User Modules And Translation Layers ++# ++CONFIG_MTD_CHAR=y ++CONFIG_MTD_BLKDEVS=y ++CONFIG_MTD_BLOCK=y ++# CONFIG_FTL is not set ++# CONFIG_NFTL is not set ++# CONFIG_INFTL is not set ++# CONFIG_RFD_FTL is not set ++# CONFIG_SSFDC is not set ++CONFIG_MTD_OOPS=y ++ ++# ++# RAM/ROM/Flash chip drivers ++# ++CONFIG_MTD_CFI=y ++# CONFIG_MTD_JEDECPROBE is not set ++CONFIG_MTD_GEN_PROBE=y ++# CONFIG_MTD_CFI_ADV_OPTIONS is not set ++CONFIG_MTD_MAP_BANK_WIDTH_1=y ++CONFIG_MTD_MAP_BANK_WIDTH_2=y ++CONFIG_MTD_MAP_BANK_WIDTH_4=y ++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set ++CONFIG_MTD_CFI_I1=y ++CONFIG_MTD_CFI_I2=y ++# CONFIG_MTD_CFI_I4 is not set ++# CONFIG_MTD_CFI_I8 is not set ++CONFIG_MTD_CFI_INTELEXT=y ++# CONFIG_MTD_CFI_AMDSTD is not set ++# CONFIG_MTD_CFI_STAA is not set ++CONFIG_MTD_CFI_UTIL=y ++# CONFIG_MTD_RAM is not set ++# CONFIG_MTD_ROM is not set ++# CONFIG_MTD_ABSENT is not set ++ ++# ++# Mapping drivers for chip access ++# ++# CONFIG_MTD_COMPLEX_MAPPINGS is not set ++# CONFIG_MTD_PHYSMAP is not set ++# CONFIG_MTD_ARM_INTEGRATOR is not set ++# CONFIG_MTD_OMAP_NOR is not set ++# CONFIG_MTD_PLATRAM is not set ++ ++# ++# Self-contained MTD device drivers ++# ++# CONFIG_MTD_DATAFLASH is not set ++# CONFIG_MTD_M25P80 is not set ++# CONFIG_MTD_SLRAM is not set ++# CONFIG_MTD_PHRAM is not set ++# CONFIG_MTD_MTDRAM is not set ++# CONFIG_MTD_BLOCK2MTD is not set ++ ++# ++# Disk-On-Chip Device Drivers ++# ++# CONFIG_MTD_DOC2000 is not set ++# CONFIG_MTD_DOC2001 is not set ++# CONFIG_MTD_DOC2001PLUS is not set ++# CONFIG_MTD_NAND is not set ++CONFIG_MTD_ONENAND=y ++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set ++# CONFIG_MTD_ONENAND_GENERIC is not set ++CONFIG_MTD_ONENAND_OMAP2=y ++# CONFIG_MTD_ONENAND_OTP is not set ++# CONFIG_MTD_ONENAND_2X_PROGRAM is not set ++# CONFIG_MTD_ONENAND_SIM is not set ++ ++# ++# UBI - Unsorted block images ++# ++CONFIG_MTD_UBI=y ++CONFIG_MTD_UBI_WL_THRESHOLD=4096 ++CONFIG_MTD_UBI_BEB_RESERVE=1 ++# CONFIG_MTD_UBI_GLUEBI is not set ++ ++# ++# UBI debugging options ++# ++# CONFIG_MTD_UBI_DEBUG is not set ++# CONFIG_PARPORT is not set ++CONFIG_BLK_DEV=y ++# CONFIG_BLK_DEV_COW_COMMON is not set ++CONFIG_BLK_DEV_LOOP=y ++# CONFIG_BLK_DEV_CRYPTOLOOP is not set ++# CONFIG_BLK_DEV_NBD is not set ++# CONFIG_BLK_DEV_UB is not set ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=16 ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++# CONFIG_BLK_DEV_XIP is not set ++# CONFIG_CDROM_PKTCDVD is not set ++# CONFIG_ATA_OVER_ETH is not set ++CONFIG_MISC_DEVICES=y ++# CONFIG_EEPROM_93CX6 is not set ++# CONFIG_OMAP_STI is not set ++# CONFIG_ENCLOSURE_SERVICES is not set ++CONFIG_HAVE_IDE=y ++# CONFIG_IDE is not set ++ ++# ++# SCSI device support ++# ++# CONFIG_RAID_ATTRS is not set ++CONFIG_SCSI=m ++CONFIG_SCSI_DMA=y ++# CONFIG_SCSI_TGT is not set ++# CONFIG_SCSI_NETLINK is not set ++CONFIG_SCSI_PROC_FS=y ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=m ++# CONFIG_CHR_DEV_ST is not set ++# CONFIG_CHR_DEV_OSST is not set ++# CONFIG_BLK_DEV_SR is not set ++# CONFIG_CHR_DEV_SG is not set ++# CONFIG_CHR_DEV_SCH is not set ++ ++# ++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs ++# ++CONFIG_SCSI_MULTI_LUN=y ++# CONFIG_SCSI_CONSTANTS is not set ++# CONFIG_SCSI_LOGGING is not set ++CONFIG_SCSI_SCAN_ASYNC=y ++CONFIG_SCSI_WAIT_SCAN=m ++ ++# ++# SCSI Transports ++# ++# CONFIG_SCSI_SPI_ATTRS is not set ++# CONFIG_SCSI_FC_ATTRS is not set ++# CONFIG_SCSI_ISCSI_ATTRS is not set ++# CONFIG_SCSI_SAS_LIBSAS is not set ++# CONFIG_SCSI_SRP_ATTRS is not set ++CONFIG_SCSI_LOWLEVEL=y ++# CONFIG_ISCSI_TCP is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_DH is not set ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++CONFIG_NETDEVICES=y ++# CONFIG_DUMMY is not set ++# CONFIG_BONDING is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_EQUALIZER is not set ++CONFIG_TUN=m ++# CONFIG_VETH is not set ++# CONFIG_PHYLIB is not set ++CONFIG_NET_ETHERNET=y ++CONFIG_MII=m ++# CONFIG_AX88796 is not set ++CONFIG_SMC91X=m ++# CONFIG_DM9000 is not set ++# CONFIG_ENC28J60 is not set ++# CONFIG_SMC911X is not set ++# CONFIG_IBM_NEW_EMAC_ZMII is not set ++# CONFIG_IBM_NEW_EMAC_RGMII is not set ++# CONFIG_IBM_NEW_EMAC_TAH is not set ++# CONFIG_IBM_NEW_EMAC_EMAC4 is not set ++# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set ++# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set ++# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set ++# CONFIG_B44 is not set ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++ ++# ++# Wireless LAN ++# ++# CONFIG_WLAN_PRE80211 is not set ++CONFIG_WLAN_80211=y ++# CONFIG_LIBERTAS is not set ++# CONFIG_LIBERTAS_THINFIRM is not set ++# CONFIG_USB_ZD1201 is not set ++# CONFIG_USB_NET_RNDIS_WLAN is not set ++# CONFIG_RTL8187 is not set ++# CONFIG_MAC80211_HWSIM is not set ++# CONFIG_P54_COMMON is not set ++# CONFIG_IWLWIFI_LEDS is not set ++# CONFIG_HOSTAP is not set ++# CONFIG_B43 is not set ++# CONFIG_B43LEGACY is not set ++# CONFIG_ZD1211RW is not set ++# CONFIG_RT2X00 is not set ++ ++# ++# USB Network Adapters ++# ++# CONFIG_USB_CATC is not set ++# CONFIG_USB_KAWETH is not set ++# CONFIG_USB_PEGASUS is not set ++# CONFIG_USB_RTL8150 is not set ++# CONFIG_USB_USBNET is not set ++# CONFIG_WAN is not set ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_NETPOLL is not set ++# CONFIG_NET_POLL_CONTROLLER is not set ++# CONFIG_ISDN is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_POLLDEV is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++CONFIG_INPUT_KEYBOARD=y ++# CONFIG_KEYBOARD_ATKBD is not set ++# CONFIG_KEYBOARD_SUNKBD is not set ++# CONFIG_KEYBOARD_LKKBD is not set ++# CONFIG_KEYBOARD_XTKBD is not set ++# CONFIG_KEYBOARD_NEWTON is not set ++# CONFIG_KEYBOARD_STOWAWAY is not set ++# CONFIG_KEYBOARD_LM8323 is not set ++# CONFIG_KEYBOARD_GPIO is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++# CONFIG_TOUCHSCREEN_ADS7846 is not set ++# CONFIG_TOUCHSCREEN_FUJITSU is not set ++# CONFIG_TOUCHSCREEN_GUNZE is not set ++# CONFIG_TOUCHSCREEN_ELO is not set ++# CONFIG_TOUCHSCREEN_MTOUCH is not set ++# CONFIG_TOUCHSCREEN_INEXIO is not set ++# CONFIG_TOUCHSCREEN_MK712 is not set ++# CONFIG_TOUCHSCREEN_PENMOUNT is not set ++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set ++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set ++CONFIG_TOUCHSCREEN_TSC2005=m ++# CONFIG_TOUCHSCREEN_TSC2102 is not set ++# CONFIG_TOUCHSCREEN_TSC210X is not set ++# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set ++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_INPUT_ATI_REMOTE is not set ++# CONFIG_INPUT_ATI_REMOTE2 is not set ++# CONFIG_INPUT_KEYSPAN_REMOTE is not set ++# CONFIG_INPUT_POWERMATE is not set ++# CONFIG_INPUT_YEALINK is not set ++# CONFIG_INPUT_CM109 is not set ++CONFIG_INPUT_UINPUT=m ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++ ++# ++# Character devices ++# ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_DEVKMEM=y ++# CONFIG_SERIAL_NONSTANDARD is not set ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_NR_UARTS=4 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=4 ++# CONFIG_SERIAL_8250_EXTENDED is not set ++ ++# ++# Non-8250 serial port support ++# ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_IPMI_HANDLER is not set ++CONFIG_HW_RANDOM=m ++# CONFIG_NVRAM is not set ++# CONFIG_R3964 is not set ++# CONFIG_RAW_DRIVER is not set ++# CONFIG_TCG_TPM is not set ++CONFIG_I2C=y ++CONFIG_I2C_BOARDINFO=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_HELPER_AUTO=y ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_GPIO is not set ++# CONFIG_I2C_OCORES is not set ++CONFIG_I2C_OMAP=y ++# CONFIG_I2C_SIMTEC is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_PARPORT_LIGHT is not set ++# CONFIG_I2C_TAOS_EVM is not set ++# CONFIG_I2C_TINY_USB is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_STUB is not set ++ ++# ++# Miscellaneous I2C Chip support ++# ++# CONFIG_DS1682 is not set ++# CONFIG_AT24 is not set ++# CONFIG_SENSORS_EEPROM is not set ++# CONFIG_SENSORS_PCF8574 is not set ++# CONFIG_PCF8575 is not set ++# CONFIG_SENSORS_PCA9539 is not set ++# CONFIG_SENSORS_PCF8591 is not set ++# CONFIG_ISP1301_OMAP is not set ++# CONFIG_TPS65010 is not set ++# CONFIG_SENSORS_TLV320AIC23 is not set ++CONFIG_TWL4030_MADC=y ++CONFIG_TWL4030_USB=y ++CONFIG_TWL4030_PWRBUTTON=y ++CONFIG_TWL4030_POWEROFF=y ++# CONFIG_SENSORS_MAX6875 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++CONFIG_SENSORS_TSL2563=m ++# CONFIG_LP5521 is not set ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# CONFIG_I2C_DEBUG_CHIP is not set ++CONFIG_SPI=y ++# CONFIG_SPI_DEBUG is not set ++CONFIG_SPI_MASTER=y ++ ++# ++# SPI Master Controller Drivers ++# ++# CONFIG_SPI_BITBANG is not set ++CONFIG_SPI_OMAP24XX=y ++ ++# ++# SPI Protocol Masters ++# ++# CONFIG_SPI_AT25 is not set ++# CONFIG_SPI_TSC2101 is not set ++# CONFIG_SPI_TSC2102 is not set ++# CONFIG_SPI_TSC210X is not set ++# CONFIG_SPI_TSC2301 is not set ++# CONFIG_SPI_SPIDEV is not set ++# CONFIG_SPI_TLE62X0 is not set ++CONFIG_ARCH_REQUIRE_GPIOLIB=y ++CONFIG_GPIOLIB=y ++# CONFIG_DEBUG_GPIO is not set ++CONFIG_GPIO_SYSFS=y ++ ++# ++# I2C GPIO expanders: ++# ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCF857X is not set ++CONFIG_GPIO_TWL4030=y ++ ++# ++# PCI GPIO expanders: ++# ++ ++# ++# SPI GPIO expanders: ++# ++# CONFIG_GPIO_MAX7301 is not set ++# CONFIG_GPIO_MCP23S08 is not set ++# CONFIG_W1 is not set ++# CONFIG_POWER_SUPPLY is not set ++CONFIG_HWMON=y ++# CONFIG_HWMON_VID is not set ++# CONFIG_SENSORS_AD7414 is not set ++# CONFIG_SENSORS_AD7418 is not set ++# CONFIG_SENSORS_ADCXX is not set ++# CONFIG_SENSORS_ADM1021 is not set ++# CONFIG_SENSORS_ADM1025 is not set ++# CONFIG_SENSORS_ADM1026 is not set ++# CONFIG_SENSORS_ADM1029 is not set ++# CONFIG_SENSORS_ADM1031 is not set ++# CONFIG_SENSORS_ADM9240 is not set ++# CONFIG_SENSORS_ADT7470 is not set ++# CONFIG_SENSORS_ADT7473 is not set ++# CONFIG_SENSORS_ATXP1 is not set ++# CONFIG_SENSORS_DS1621 is not set ++# CONFIG_SENSORS_F71805F is not set ++# CONFIG_SENSORS_F71882FG is not set ++# CONFIG_SENSORS_F75375S is not set ++# CONFIG_SENSORS_GL518SM is not set ++# CONFIG_SENSORS_GL520SM is not set ++# CONFIG_SENSORS_IT87 is not set ++# CONFIG_SENSORS_LM63 is not set ++# CONFIG_SENSORS_LM70 is not set ++# CONFIG_SENSORS_LM75 is not set ++# CONFIG_SENSORS_LM77 is not set ++# CONFIG_SENSORS_LM78 is not set ++# CONFIG_SENSORS_LM80 is not set ++# CONFIG_SENSORS_LM83 is not set ++# CONFIG_SENSORS_LM85 is not set ++# CONFIG_SENSORS_LM87 is not set ++# CONFIG_SENSORS_LM90 is not set ++# CONFIG_SENSORS_LM92 is not set ++# CONFIG_SENSORS_LM93 is not set ++# CONFIG_SENSORS_MAX1111 is not set ++# CONFIG_SENSORS_MAX1619 is not set ++# CONFIG_SENSORS_MAX6650 is not set ++# CONFIG_SENSORS_PC87360 is not set ++# CONFIG_SENSORS_PC87427 is not set ++# CONFIG_SENSORS_DME1737 is not set ++# CONFIG_SENSORS_SMSC47M1 is not set ++# CONFIG_SENSORS_SMSC47M192 is not set ++# CONFIG_SENSORS_SMSC47B397 is not set ++# CONFIG_SENSORS_ADS7828 is not set ++# CONFIG_SENSORS_THMC50 is not set ++# CONFIG_SENSORS_VT1211 is not set ++# CONFIG_SENSORS_W83781D is not set ++# CONFIG_SENSORS_W83791D is not set ++# CONFIG_SENSORS_W83792D is not set ++# CONFIG_SENSORS_W83793 is not set ++# CONFIG_SENSORS_W83L785TS is not set ++# CONFIG_SENSORS_W83L786NG is not set ++# CONFIG_SENSORS_W83627HF is not set ++# CONFIG_SENSORS_W83627EHF is not set ++# CONFIG_SENSORS_TSC210X is not set ++CONFIG_SENSORS_OMAP34XX=y ++# CONFIG_HWMON_DEBUG_CHIP is not set ++# CONFIG_THERMAL is not set ++# CONFIG_THERMAL_HWMON is not set ++CONFIG_WATCHDOG=y ++# CONFIG_WATCHDOG_NOWAYOUT is not set ++ ++# ++# Watchdog Device Drivers ++# ++# CONFIG_SOFT_WATCHDOG is not set ++CONFIG_OMAP_WATCHDOG=m ++ ++# ++# USB-based Watchdog Cards ++# ++# CONFIG_USBPCWATCHDOG is not set ++ ++# ++# Sonics Silicon Backplane ++# ++CONFIG_SSB_POSSIBLE=y ++# CONFIG_SSB is not set ++ ++# ++# Multifunction device drivers ++# ++# CONFIG_MFD_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_ASIC3 is not set ++# CONFIG_HTC_EGPIO is not set ++# CONFIG_HTC_PASIC3 is not set ++CONFIG_TWL4030_CORE=y ++# CONFIG_TWL4030_POWER is not set ++# CONFIG_MFD_TMIO is not set ++# CONFIG_MFD_T7L66XB is not set ++# CONFIG_MFD_TC6387XB is not set ++# CONFIG_MFD_TC6393XB is not set ++# CONFIG_PMIC_DA903X is not set ++# CONFIG_MFD_WM8400 is not set ++# CONFIG_MFD_WM8350_I2C is not set ++ ++# ++# Multimedia devices ++# ++ ++# ++# Multimedia core support ++# ++CONFIG_VIDEO_DEV=m ++CONFIG_VIDEO_V4L2_COMMON=m ++CONFIG_VIDEO_ALLOW_V4L1=y ++CONFIG_VIDEO_V4L1_COMPAT=y ++# CONFIG_DVB_CORE is not set ++CONFIG_VIDEO_MEDIA=m ++ ++# ++# Multimedia drivers ++# ++# CONFIG_MEDIA_ATTACH is not set ++CONFIG_MEDIA_TUNER=m ++# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set ++CONFIG_MEDIA_TUNER_SIMPLE=m ++CONFIG_MEDIA_TUNER_TDA8290=m ++CONFIG_MEDIA_TUNER_TDA9887=m ++CONFIG_MEDIA_TUNER_TEA5761=m ++CONFIG_MEDIA_TUNER_TEA5767=m ++CONFIG_MEDIA_TUNER_MT20XX=m ++CONFIG_MEDIA_TUNER_XC2028=m ++CONFIG_MEDIA_TUNER_XC5000=m ++CONFIG_VIDEO_V4L2=m ++CONFIG_VIDEO_V4L1=m ++CONFIG_VIDEO_CAPTURE_DRIVERS=y ++# CONFIG_VIDEO_ADV_DEBUG is not set ++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set ++CONFIG_VIDEO_HELPER_CHIPS_AUTO=y ++# CONFIG_VIDEO_VIVI is not set ++# CONFIG_VIDEO_CPIA is not set ++# CONFIG_VIDEO_CPIA2 is not set ++# CONFIG_VIDEO_SAA5246A is not set ++# CONFIG_VIDEO_SAA5249 is not set ++# CONFIG_SOC_CAMERA is not set ++CONFIG_V4L_USB_DRIVERS=y ++# CONFIG_USB_VIDEO_CLASS is not set ++# CONFIG_USB_GSPCA is not set ++# CONFIG_VIDEO_PVRUSB2 is not set ++# CONFIG_VIDEO_EM28XX is not set ++# CONFIG_VIDEO_USBVISION is not set ++# CONFIG_USB_VICAM is not set ++# CONFIG_USB_IBMCAM is not set ++# CONFIG_USB_KONICAWC is not set ++# CONFIG_USB_QUICKCAM_MESSENGER is not set ++# CONFIG_USB_ET61X251 is not set ++# CONFIG_VIDEO_OVCAMCHIP is not set ++# CONFIG_USB_OV511 is not set ++# CONFIG_USB_SE401 is not set ++# CONFIG_USB_SN9C102 is not set ++# CONFIG_USB_STV680 is not set ++# CONFIG_USB_ZC0301 is not set ++# CONFIG_USB_PWC is not set ++# CONFIG_USB_ZR364XX is not set ++# CONFIG_USB_STKWEBCAM is not set ++# CONFIG_USB_S2255 is not set ++CONFIG_RADIO_ADAPTERS=y ++# CONFIG_USB_DSBR is not set ++# CONFIG_USB_SI470X is not set ++# CONFIG_USB_MR800 is not set ++# CONFIG_DAB is not set ++ ++# ++# Graphics support ++# ++# CONFIG_VGASTATE is not set ++# CONFIG_VIDEO_OUTPUT_CONTROL is not set ++CONFIG_FB=y ++# CONFIG_FIRMWARE_EDID is not set ++# CONFIG_FB_DDC is not set ++# CONFIG_FB_BOOT_VESA_SUPPORT is not set ++CONFIG_FB_CFB_FILLRECT=y ++CONFIG_FB_CFB_COPYAREA=y ++CONFIG_FB_CFB_IMAGEBLIT=y ++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set ++# CONFIG_FB_SYS_FILLRECT is not set ++# CONFIG_FB_SYS_COPYAREA is not set ++# CONFIG_FB_SYS_IMAGEBLIT is not set ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++# CONFIG_FB_SYS_FOPS is not set ++# CONFIG_FB_SVGALIB is not set ++# CONFIG_FB_MACMODES is not set ++# CONFIG_FB_BACKLIGHT is not set ++# CONFIG_FB_MODE_HELPERS is not set ++# CONFIG_FB_TILEBLITTING is not set ++ ++# ++# Frame buffer hardware drivers ++# ++# CONFIG_FB_S1D13XXX is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_MB862XX is not set ++CONFIG_FB_OMAP=y ++# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set ++# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set ++CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=6 ++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set ++ ++# ++# Display device support ++# ++CONFIG_DISPLAY_SUPPORT=y ++ ++# ++# Display hardware drivers ++# ++ ++# ++# Console display driver support ++# ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_DUMMY_CONSOLE=y ++# CONFIG_FRAMEBUFFER_CONSOLE is not set ++# CONFIG_LOGO is not set ++CONFIG_SOUND=y ++# CONFIG_SOUND_OSS_CORE is not set ++CONFIG_SND=y ++CONFIG_SND_TIMER=y ++CONFIG_SND_PCM=y ++# CONFIG_SND_SEQUENCER is not set ++# CONFIG_SND_MIXER_OSS is not set ++# CONFIG_SND_PCM_OSS is not set ++# CONFIG_SND_DYNAMIC_MINORS is not set ++CONFIG_SND_SUPPORT_OLD_API=y ++CONFIG_SND_VERBOSE_PROCFS=y ++# CONFIG_SND_VERBOSE_PRINTK is not set ++# CONFIG_SND_DEBUG is not set ++CONFIG_SND_DRIVERS=y ++# CONFIG_SND_DUMMY is not set ++# CONFIG_SND_MTPAV is not set ++# CONFIG_SND_SERIAL_U16550 is not set ++# CONFIG_SND_MPU401 is not set ++CONFIG_SND_ARM=y ++CONFIG_SND_SPI=y ++# CONFIG_SND_USB is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_OMAP_SOC=y ++# CONFIG_SND_SOC_ALL_CODECS is not set ++# CONFIG_SOUND_PRIME is not set ++CONFIG_HID_SUPPORT=y ++CONFIG_HID=m ++# CONFIG_HID_DEBUG is not set ++# CONFIG_HIDRAW is not set ++ ++# ++# USB Input Devices ++# ++CONFIG_USB_HID=m ++# CONFIG_HID_PID is not set ++# CONFIG_USB_HIDDEV is not set ++ ++# ++# USB HID Boot Protocol drivers ++# ++# CONFIG_USB_KBD is not set ++# CONFIG_USB_MOUSE is not set ++ ++# ++# Special HID drivers ++# ++CONFIG_HID_COMPAT=y ++CONFIG_HID_A4TECH=m ++CONFIG_HID_APPLE=m ++CONFIG_HID_BELKIN=m ++CONFIG_HID_BRIGHT=m ++CONFIG_HID_CHERRY=m ++CONFIG_HID_CHICONY=m ++CONFIG_HID_CYPRESS=m ++CONFIG_HID_DELL=m ++CONFIG_HID_EZKEY=m ++CONFIG_HID_GYRATION=m ++CONFIG_HID_LOGITECH=m ++# CONFIG_LOGITECH_FF is not set ++# CONFIG_LOGIRUMBLEPAD2_FF is not set ++CONFIG_HID_MICROSOFT=m ++CONFIG_HID_MONTEREY=m ++CONFIG_HID_PANTHERLORD=m ++# CONFIG_PANTHERLORD_FF is not set ++CONFIG_HID_PETALYNX=m ++CONFIG_HID_SAMSUNG=m ++CONFIG_HID_SONY=m ++CONFIG_HID_SUNPLUS=m ++# CONFIG_THRUSTMASTER_FF is not set ++# CONFIG_ZEROPLUS_FF is not set ++CONFIG_USB_SUPPORT=y ++CONFIG_USB_ARCH_HAS_HCD=y ++CONFIG_USB_ARCH_HAS_OHCI=y ++CONFIG_USB_ARCH_HAS_EHCI=y ++CONFIG_USB=m ++CONFIG_USB_DEBUG=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++ ++# ++# Miscellaneous USB options ++# ++CONFIG_USB_DEVICEFS=y ++CONFIG_USB_DEVICE_CLASS=y ++# CONFIG_USB_DYNAMIC_MINORS is not set ++CONFIG_USB_SUSPEND=y ++CONFIG_USB_OTG=y ++CONFIG_USB_OTG_WHITELIST=y ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++# CONFIG_USB_WUSB is not set ++# CONFIG_USB_WUSB_CBAF is not set ++ ++# ++# USB Host Controller Drivers ++# ++# CONFIG_USB_C67X00_HCD is not set ++# CONFIG_USB_EHCI_HCD is not set ++# CONFIG_USB_ISP116X_HCD is not set ++# CONFIG_USB_ISP1760_HCD is not set ++# CONFIG_USB_OHCI_HCD is not set ++# CONFIG_USB_SL811_HCD is not set ++# CONFIG_USB_R8A66597_HCD is not set ++# CONFIG_USB_HWA_HCD is not set ++CONFIG_USB_MUSB_HDRC=m ++CONFIG_USB_MUSB_SOC=y ++ ++# ++# OMAP 343x high speed USB support ++# ++# CONFIG_USB_MUSB_HOST is not set ++# CONFIG_USB_MUSB_PERIPHERAL is not set ++CONFIG_USB_MUSB_OTG=y ++CONFIG_USB_GADGET_MUSB_HDRC=y ++CONFIG_USB_MUSB_HDRC_HCD=y ++# CONFIG_MUSB_PIO_ONLY is not set ++CONFIG_USB_INVENTRA_DMA=y ++# CONFIG_USB_TI_CPPI_DMA is not set ++# CONFIG_USB_MUSB_DEBUG is not set ++ ++# ++# USB Device Class drivers ++# ++# CONFIG_USB_ACM is not set ++# CONFIG_USB_PRINTER is not set ++# CONFIG_USB_WDM is not set ++# CONFIG_USB_TMC is not set ++ ++# ++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' ++# ++ ++# ++# may also be needed; see USB_STORAGE Help for more information ++# ++CONFIG_USB_STORAGE=m ++# CONFIG_USB_STORAGE_DEBUG is not set ++# CONFIG_USB_STORAGE_DATAFAB is not set ++# CONFIG_USB_STORAGE_FREECOM is not set ++# CONFIG_USB_STORAGE_ISD200 is not set ++# CONFIG_USB_STORAGE_DPCM is not set ++# CONFIG_USB_STORAGE_USBAT is not set ++# CONFIG_USB_STORAGE_SDDR09 is not set ++# CONFIG_USB_STORAGE_SDDR55 is not set ++# CONFIG_USB_STORAGE_JUMPSHOT is not set ++# CONFIG_USB_STORAGE_ALAUDA is not set ++# CONFIG_USB_STORAGE_ONETOUCH is not set ++# CONFIG_USB_STORAGE_KARMA is not set ++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set ++CONFIG_USB_LIBUSUAL=y ++ ++# ++# USB Imaging devices ++# ++# CONFIG_USB_MDC800 is not set ++# CONFIG_USB_MICROTEK is not set ++ ++# ++# USB port drivers ++# ++# CONFIG_USB_SERIAL is not set ++ ++# ++# USB Miscellaneous drivers ++# ++# CONFIG_USB_EMI62 is not set ++# CONFIG_USB_EMI26 is not set ++# CONFIG_USB_ADUTUX is not set ++# CONFIG_USB_SEVSEG is not set ++# CONFIG_USB_RIO500 is not set ++# CONFIG_USB_LEGOTOWER is not set ++# CONFIG_USB_LCD is not set ++# CONFIG_USB_BERRY_CHARGE is not set ++# CONFIG_USB_LED is not set ++# CONFIG_USB_CYPRESS_CY7C63 is not set ++# CONFIG_USB_CYTHERM is not set ++# CONFIG_USB_PHIDGET is not set ++# CONFIG_USB_IDMOUSE is not set ++# CONFIG_USB_FTDI_ELAN is not set ++# CONFIG_USB_APPLEDISPLAY is not set ++# CONFIG_USB_LD is not set ++# CONFIG_USB_TRANCEVIBRATOR is not set ++# CONFIG_USB_IOWARRIOR is not set ++CONFIG_USB_TEST=m ++# CONFIG_USB_ISIGHTFW is not set ++# CONFIG_USB_VST is not set ++CONFIG_USB_GADGET=m ++CONFIG_USB_GADGET_DEBUG=y ++CONFIG_USB_GADGET_DEBUG_FILES=y ++CONFIG_USB_GADGET_DEBUG_FS=y ++CONFIG_USB_GADGET_VBUS_DRAW=2 ++CONFIG_USB_GADGET_SELECTED=y ++# CONFIG_USB_GADGET_AT91 is not set ++# CONFIG_USB_GADGET_ATMEL_USBA is not set ++# CONFIG_USB_GADGET_FSL_USB2 is not set ++# CONFIG_USB_GADGET_LH7A40X is not set ++# CONFIG_USB_GADGET_OMAP is not set ++# CONFIG_USB_GADGET_PXA25X is not set ++# CONFIG_USB_GADGET_PXA27X is not set ++# CONFIG_USB_GADGET_S3C2410 is not set ++# CONFIG_USB_GADGET_M66592 is not set ++# CONFIG_USB_GADGET_AMD5536UDC is not set ++# CONFIG_USB_GADGET_FSL_QE is not set ++# CONFIG_USB_GADGET_NET2280 is not set ++# CONFIG_USB_GADGET_GOKU is not set ++# CONFIG_USB_GADGET_DUMMY_HCD is not set ++CONFIG_USB_GADGET_DUALSPEED=y ++CONFIG_USB_ZERO=m ++# CONFIG_USB_ZERO_HNPTEST is not set ++# CONFIG_USB_ETH is not set ++# CONFIG_USB_GADGETFS is not set ++CONFIG_USB_FILE_STORAGE=m ++# CONFIG_USB_FILE_STORAGE_TEST is not set ++# CONFIG_USB_G_SERIAL is not set ++# CONFIG_USB_MIDI_GADGET is not set ++# CONFIG_USB_G_PRINTER is not set ++# CONFIG_USB_CDC_COMPOSITE is not set ++CONFIG_MMC=m ++# CONFIG_MMC_DEBUG is not set ++# CONFIG_MMC_UNSAFE_RESUME is not set ++ ++# ++# MMC/SD/SDIO Card Drivers ++# ++CONFIG_MMC_BLOCK=m ++CONFIG_MMC_BLOCK_BOUNCE=y ++# CONFIG_SDIO_UART is not set ++# CONFIG_MMC_TEST is not set ++ ++# ++# MMC/SD/SDIO Host Controller Drivers ++# ++# CONFIG_MMC_SDHCI is not set ++CONFIG_MMC_OMAP_HS=m ++# CONFIG_MMC_SPI is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_ACCESSIBILITY is not set ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=m ++ ++# ++# LED drivers ++# ++# CONFIG_LEDS_OMAP is not set ++# CONFIG_LEDS_OMAP_PWM is not set ++# CONFIG_LEDS_PCA9532 is not set ++# CONFIG_LEDS_GPIO is not set ++# CONFIG_LEDS_PCA955X is not set ++ ++# ++# LED Triggers ++# ++# CONFIG_LEDS_TRIGGERS is not set ++CONFIG_RTC_LIB=y ++CONFIG_RTC_CLASS=m ++ ++# ++# RTC interfaces ++# ++CONFIG_RTC_INTF_SYSFS=y ++CONFIG_RTC_INTF_PROC=y ++CONFIG_RTC_INTF_DEV=y ++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set ++# CONFIG_RTC_DRV_TEST is not set ++ ++# ++# I2C RTC drivers ++# ++# CONFIG_RTC_DRV_DS1307 is not set ++# CONFIG_RTC_DRV_DS1374 is not set ++# CONFIG_RTC_DRV_DS1672 is not set ++# CONFIG_RTC_DRV_MAX6900 is not set ++# CONFIG_RTC_DRV_RS5C372 is not set ++# CONFIG_RTC_DRV_ISL1208 is not set ++# CONFIG_RTC_DRV_X1205 is not set ++# CONFIG_RTC_DRV_PCF8563 is not set ++# CONFIG_RTC_DRV_PCF8583 is not set ++# CONFIG_RTC_DRV_M41T80 is not set ++CONFIG_RTC_DRV_TWL4030=m ++# CONFIG_RTC_DRV_S35390A is not set ++# CONFIG_RTC_DRV_FM3130 is not set ++ ++# ++# SPI RTC drivers ++# ++# CONFIG_RTC_DRV_M41T94 is not set ++# CONFIG_RTC_DRV_DS1305 is not set ++# CONFIG_RTC_DRV_MAX6902 is not set ++# CONFIG_RTC_DRV_R9701 is not set ++# CONFIG_RTC_DRV_RS5C348 is not set ++# CONFIG_RTC_DRV_DS3234 is not set ++ ++# ++# Platform RTC drivers ++# ++# CONFIG_RTC_DRV_CMOS is not set ++# CONFIG_RTC_DRV_DS1286 is not set ++# CONFIG_RTC_DRV_DS1511 is not set ++# CONFIG_RTC_DRV_DS1553 is not set ++# CONFIG_RTC_DRV_DS1742 is not set ++# CONFIG_RTC_DRV_STK17TA8 is not set ++# CONFIG_RTC_DRV_M48T86 is not set ++# CONFIG_RTC_DRV_M48T35 is not set ++# CONFIG_RTC_DRV_M48T59 is not set ++# CONFIG_RTC_DRV_BQ4802 is not set ++# CONFIG_RTC_DRV_V3020 is not set ++ ++# ++# on-CPU RTC drivers ++# ++# CONFIG_DMADEVICES is not set ++# CONFIG_REGULATOR is not set ++# CONFIG_UIO is not set ++ ++# ++# CBUS support ++# ++# CONFIG_CBUS is not set ++ ++# ++# File systems ++# ++CONFIG_EXT2_FS=m ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XIP is not set ++CONFIG_EXT3_FS=m ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT4_FS is not set ++CONFIG_JBD=m ++# CONFIG_JBD_DEBUG is not set ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++# CONFIG_FS_POSIX_ACL is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_XFS_FS is not set ++# CONFIG_OCFS2_FS is not set ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY=y ++CONFIG_INOTIFY_USER=y ++CONFIG_QUOTA=y ++# CONFIG_QUOTA_NETLINK_INTERFACE is not set ++CONFIG_PRINT_QUOTA_WARNING=y ++# CONFIG_QFMT_V1 is not set ++CONFIG_QFMT_V2=y ++CONFIG_QUOTACTL=y ++# CONFIG_AUTOFS_FS is not set ++# CONFIG_AUTOFS4_FS is not set ++CONFIG_FUSE_FS=m ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++ ++# ++# DOS/FAT/NT Filesystems ++# ++CONFIG_FAT_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_CODEPAGE=437 ++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" ++# CONFIG_NTFS_FS is not set ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++CONFIG_PROC_SYSCTL=y ++CONFIG_PROC_PAGE_MONITOR=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++# CONFIG_TMPFS_POSIX_ACL is not set ++# CONFIG_HUGETLB_PAGE is not set ++# CONFIG_CONFIGFS_FS is not set ++ ++# ++# Miscellaneous filesystems ++# ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_JFFS2_FS is not set ++CONFIG_UBIFS_FS=y ++# CONFIG_UBIFS_FS_XATTR is not set ++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set ++CONFIG_UBIFS_FS_LZO=y ++CONFIG_UBIFS_FS_ZLIB=y ++# CONFIG_UBIFS_FS_DEBUG is not set ++CONFIG_CRAMFS=y ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++# CONFIG_NFS_V3_ACL is not set ++CONFIG_NFS_V4=y ++# CONFIG_NFSD is not set ++CONFIG_LOCKD=m ++CONFIG_LOCKD_V4=y ++CONFIG_NFS_COMMON=y ++CONFIG_SUNRPC=m ++CONFIG_SUNRPC_GSS=m ++# CONFIG_SUNRPC_REGISTER_V4 is not set ++CONFIG_RPCSEC_GSS_KRB5=m ++# CONFIG_RPCSEC_GSS_SPKM3 is not set ++# CONFIG_SMB_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_NCP_FS is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++# CONFIG_EFI_PARTITION is not set ++# CONFIG_SYSV68_PARTITION is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++# CONFIG_NLS_CODEPAGE_737 is not set ++# CONFIG_NLS_CODEPAGE_775 is not set ++# CONFIG_NLS_CODEPAGE_850 is not set ++# CONFIG_NLS_CODEPAGE_852 is not set ++# CONFIG_NLS_CODEPAGE_855 is not set ++# CONFIG_NLS_CODEPAGE_857 is not set ++# CONFIG_NLS_CODEPAGE_860 is not set ++# CONFIG_NLS_CODEPAGE_861 is not set ++# CONFIG_NLS_CODEPAGE_862 is not set ++# CONFIG_NLS_CODEPAGE_863 is not set ++# CONFIG_NLS_CODEPAGE_864 is not set ++# CONFIG_NLS_CODEPAGE_865 is not set ++# CONFIG_NLS_CODEPAGE_866 is not set ++# CONFIG_NLS_CODEPAGE_869 is not set ++# CONFIG_NLS_CODEPAGE_936 is not set ++# CONFIG_NLS_CODEPAGE_950 is not set ++# CONFIG_NLS_CODEPAGE_932 is not set ++# CONFIG_NLS_CODEPAGE_949 is not set ++# CONFIG_NLS_CODEPAGE_874 is not set ++# CONFIG_NLS_ISO8859_8 is not set ++# CONFIG_NLS_CODEPAGE_1250 is not set ++# CONFIG_NLS_CODEPAGE_1251 is not set ++# CONFIG_NLS_ASCII is not set ++CONFIG_NLS_ISO8859_1=y ++# CONFIG_NLS_ISO8859_2 is not set ++# CONFIG_NLS_ISO8859_3 is not set ++# CONFIG_NLS_ISO8859_4 is not set ++# CONFIG_NLS_ISO8859_5 is not set ++# CONFIG_NLS_ISO8859_6 is not set ++# CONFIG_NLS_ISO8859_7 is not set ++# CONFIG_NLS_ISO8859_9 is not set ++# CONFIG_NLS_ISO8859_13 is not set ++# CONFIG_NLS_ISO8859_14 is not set ++# CONFIG_NLS_ISO8859_15 is not set ++# CONFIG_NLS_KOI8_R is not set ++# CONFIG_NLS_KOI8_U is not set ++# CONFIG_NLS_UTF8 is not set ++# CONFIG_DLM is not set ++ ++# ++# Kernel hacking ++# ++# CONFIG_PRINTK_TIME is not set ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_WARN=1024 ++CONFIG_MAGIC_SYSRQ=y ++# CONFIG_UNUSED_SYMBOLS is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_HEADERS_CHECK is not set ++CONFIG_DEBUG_KERNEL=y ++# CONFIG_DEBUG_SHIRQ is not set ++CONFIG_DETECT_SOFTLOCKUP=y ++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 ++CONFIG_SCHED_DEBUG=y ++# CONFIG_SCHEDSTATS is not set ++CONFIG_TIMER_STATS=y ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_DEBUG_SLAB is not set ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_RT_MUTEX_TESTER is not set ++# CONFIG_DEBUG_SPINLOCK is not set ++CONFIG_DEBUG_MUTEXES=y ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++CONFIG_DEBUG_SPINLOCK_SLEEP=y ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++# CONFIG_DEBUG_KOBJECT is not set ++# CONFIG_DEBUG_BUGVERBOSE is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_DEBUG_VM is not set ++# CONFIG_DEBUG_WRITECOUNT is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++# CONFIG_DEBUG_LIST is not set ++# CONFIG_DEBUG_SG is not set ++CONFIG_FRAME_POINTER=y ++# CONFIG_BOOT_PRINTK_DELAY is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_KPROBES_SANITY_TEST is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set ++# CONFIG_LKDTM is not set ++# CONFIG_FAULT_INJECTION is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_HAVE_FUNCTION_TRACER=y ++ ++# ++# Tracers ++# ++# CONFIG_FUNCTION_TRACER is not set ++# CONFIG_IRQSOFF_TRACER is not set ++# CONFIG_SCHED_TRACER is not set ++# CONFIG_CONTEXT_SWITCH_TRACER is not set ++# CONFIG_BOOT_TRACER is not set ++# CONFIG_STACK_TRACER is not set ++# CONFIG_DYNAMIC_PRINTK_DEBUG is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++# CONFIG_DEBUG_USER is not set ++# CONFIG_DEBUG_ERRORS is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++# CONFIG_DEBUG_LL is not set ++ ++# ++# Security options ++# ++# CONFIG_KEYS is not set ++CONFIG_SECURITY=y ++# CONFIG_SECURITYFS is not set ++# CONFIG_SECURITY_NETWORK is not set ++# CONFIG_SECURITY_FILE_CAPABILITIES is not set ++CONFIG_SECURITY_LOWMEM=y ++CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++# CONFIG_CRYPTO_FIPS is not set ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_AEAD=y ++CONFIG_CRYPTO_BLKCIPHER=y ++CONFIG_CRYPTO_HASH=y ++CONFIG_CRYPTO_RNG=y ++CONFIG_CRYPTO_MANAGER=y ++# CONFIG_CRYPTO_GF128MUL is not set ++# CONFIG_CRYPTO_NULL is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++ ++# ++# Authenticated Encryption with Associated Data ++# ++# CONFIG_CRYPTO_CCM is not set ++# CONFIG_CRYPTO_GCM is not set ++# CONFIG_CRYPTO_SEQIV is not set ++ ++# ++# Block modes ++# ++CONFIG_CRYPTO_CBC=y ++# CONFIG_CRYPTO_CTR is not set ++# CONFIG_CRYPTO_CTS is not set ++CONFIG_CRYPTO_ECB=y ++# CONFIG_CRYPTO_LRW is not set ++CONFIG_CRYPTO_PCBC=m ++# CONFIG_CRYPTO_XTS is not set ++ ++# ++# Hash modes ++# ++# CONFIG_CRYPTO_HMAC is not set ++# CONFIG_CRYPTO_XCBC is not set ++ ++# ++# Digest ++# ++# CONFIG_CRYPTO_CRC32C is not set ++# CONFIG_CRYPTO_MD4 is not set ++CONFIG_CRYPTO_MD5=y ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_RMD128 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_RMD256 is not set ++# CONFIG_CRYPTO_RMD320 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++# CONFIG_CRYPTO_SHA256 is not set ++# CONFIG_CRYPTO_SHA512 is not set ++# CONFIG_CRYPTO_TGR192 is not set ++# CONFIG_CRYPTO_WP512 is not set ++ ++# ++# Ciphers ++# ++CONFIG_CRYPTO_AES=y ++# CONFIG_CRYPTO_ANUBIS is not set ++CONFIG_CRYPTO_ARC4=y ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++CONFIG_CRYPTO_DES=y ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_KHAZAD is not set ++# CONFIG_CRYPTO_SALSA20 is not set ++# CONFIG_CRYPTO_SEED is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_TEA is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++ ++# ++# Compression ++# ++CONFIG_CRYPTO_DEFLATE=y ++CONFIG_CRYPTO_LZO=y ++ ++# ++# Random Number Generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_HW=y ++ ++# ++# Library routines ++# ++CONFIG_BITREVERSE=y ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC_ITU_T is not set ++CONFIG_CRC32=y ++CONFIG_CRC7=m ++CONFIG_LIBCRC32C=y ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_LZO_COMPRESS=y ++CONFIG_LZO_DECOMPRESS=y ++CONFIG_PLIST=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT=y ++CONFIG_HAS_DMA=y +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/cache.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/cache.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/cache.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/cache.h 2011-09-04 11:31:05.000000000 +0200 +@@ -4,7 +4,23 @@ + #ifndef __ASMARM_CACHE_H + #define __ASMARM_CACHE_H + +-#define L1_CACHE_SHIFT 5 ++#define L1_CACHE_SHIFT 6 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + ++/* ++ * Memory returned by kmalloc() may be used for DMA, so we must make ++ * sure that all such allocations are cache aligned. Otherwise, ++ * unrelated code may cause parts of the buffer to be read into the ++ * cache before the transfer is done, causing old data to be seen by ++ * the CPU. ++ */ ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES ++ ++/* ++ * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. ++ */ ++#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) ++#define ARCH_SLAB_MINALIGN 8 ++#endif ++ + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/elf.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/elf.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/elf.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/elf.h 2011-09-04 11:31:05.000000000 +0200 +@@ -45,11 +45,13 @@ typedef struct user_fp elf_fpregset_t; + #define EF_ARM_HASENTRY 0x00000002 /* All */ + #define EF_ARM_RELEXEC 0x00000001 /* All */ + +-#define R_ARM_NONE 0 +-#define R_ARM_PC24 1 +-#define R_ARM_ABS32 2 +-#define R_ARM_CALL 28 +-#define R_ARM_JUMP24 29 ++#define R_ARM_NONE 0 ++#define R_ARM_PC24 1 ++#define R_ARM_ABS32 2 ++#define R_ARM_CALL 28 ++#define R_ARM_JUMP24 29 ++#define R_ARM_MOVW_ABS_NC 43 ++#define R_ARM_MOVT_ABS 44 + + /* + * These are used to set parameters in the core dumps. +@@ -94,6 +96,10 @@ extern int arm_elf_read_implies_exec(con + #define USE_ELF_CORE_DUMP + #define ELF_EXEC_PAGESIZE 4096 + ++int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); ++ ++#define ELF_CORE_COPY_TASK_REGS dump_task_regs ++ + /* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/hwcap.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/hwcap.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/hwcap.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/hwcap.h 2011-09-04 11:31:05.000000000 +0200 +@@ -16,6 +16,9 @@ + #define HWCAP_IWMMXT 512 + #define HWCAP_CRUNCH 1024 + #define HWCAP_THUMBEE 2048 ++#define HWCAP_NEON 4096 ++#define HWCAP_VFPv3 8192 ++#define HWCAP_VFPv3D16 16384 + + #if defined(__KERNEL__) && !defined(__ASSEMBLY__) + /* +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/io.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/io.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/io.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/io.h 2011-09-04 11:31:05.000000000 +0200 +@@ -75,6 +75,12 @@ extern void __iomem * __arm_ioremap(unsi + extern void __iounmap(volatile void __iomem *addr); + + /* ++ * external interface to remap single page with appropriate type ++ */ ++extern int ioremap_page(unsigned long virt, unsigned long phys, ++ unsigned int mtype); ++ ++/* + * Bad read/write accesses... + */ + extern void __readwrite_bug(const char *fn); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/mach/map.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/mach/map.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/mach/map.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/mach/map.h 2011-09-04 11:31:05.000000000 +0200 +@@ -26,6 +26,7 @@ struct map_desc { + #define MT_HIGH_VECTORS 8 + #define MT_MEMORY 9 + #define MT_ROM 10 ++#define MT_MEMORY_NONCACHED 11 + + #ifdef CONFIG_MMU + extern void iotable_init(struct map_desc *, int); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/page.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/page.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/page.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/page.h 2011-09-04 11:31:05.000000000 +0200 +@@ -188,13 +188,6 @@ typedef struct page *pgtable_t; + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +-/* +- * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. +- */ +-#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +-#define ARCH_SLAB_MINALIGN 8 +-#endif +- + #include + + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/ptrace.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/ptrace.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/ptrace.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/ptrace.h 2011-09-04 11:31:05.000000000 +0200 +@@ -27,6 +27,8 @@ + /* PTRACE_SYSCALL is 24 */ + #define PTRACE_GETCRUNCHREGS 25 + #define PTRACE_SETCRUNCHREGS 26 ++#define PTRACE_GETVFPREGS 27 ++#define PTRACE_SETVFPREGS 28 + + /* + * PSR bits +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/scatterlist.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/scatterlist.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/scatterlist.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/scatterlist.h 2011-09-04 11:31:05.000000000 +0200 +@@ -24,4 +24,6 @@ struct scatterlist { + #define sg_dma_address(sg) ((sg)->dma_address) + #define sg_dma_len(sg) ((sg)->length) + ++#define ARCH_HAS_SG_CHAIN ++ + #endif /* _ASMARM_SCATTERLIST_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/thread_info.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/thread_info.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/thread_info.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/thread_info.h 2011-09-04 11:31:05.000000000 +0200 +@@ -113,6 +113,8 @@ extern void iwmmxt_task_restore(struct t + extern void iwmmxt_task_release(struct thread_info *); + extern void iwmmxt_task_switch(struct thread_info *); + ++extern void vfp_sync_state(struct thread_info *thread); ++ + #endif + + /* +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/tlb.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/tlb.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/tlb.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/tlb.h 2011-09-04 11:31:05.000000000 +0200 +@@ -36,6 +36,8 @@ + struct mmu_gather { + struct mm_struct *mm; + unsigned int fullmm; ++ unsigned long range_start; ++ unsigned long range_end; + }; + + DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +@@ -63,7 +65,19 @@ tlb_finish_mmu(struct mmu_gather *tlb, u + put_cpu_var(mmu_gathers); + } + +-#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) ++/* ++ * Memorize the range for the TLB flush. ++ */ ++static inline void ++tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) ++{ ++ if (!tlb->fullmm) { ++ if (addr < tlb->range_start) ++ tlb->range_start = addr; ++ if (addr + PAGE_SIZE > tlb->range_end) ++ tlb->range_end = addr + PAGE_SIZE; ++ } ++} + + /* + * In the case of tlb vma handling, we can optimise these away in the +@@ -73,15 +87,18 @@ tlb_finish_mmu(struct mmu_gather *tlb, u + static inline void + tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) + { +- if (!tlb->fullmm) ++ if (!tlb->fullmm) { + flush_cache_range(vma, vma->vm_start, vma->vm_end); ++ tlb->range_start = TASK_SIZE; ++ tlb->range_end = 0; ++ } + } + + static inline void + tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) + { +- if (!tlb->fullmm) +- flush_tlb_range(vma, vma->vm_start, vma->vm_end); ++ if (!tlb->fullmm && tlb->range_end > 0) ++ flush_tlb_range(vma, tlb->range_start, tlb->range_end); + } + + #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/ucontext.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/ucontext.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/ucontext.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/ucontext.h 2011-09-04 11:31:05.000000000 +0200 +@@ -59,23 +59,19 @@ struct iwmmxt_sigframe { + #endif /* CONFIG_IWMMXT */ + + #ifdef CONFIG_VFP +-#if __LINUX_ARM_ARCH__ < 6 +-/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra +- * word after the registers, and a word of padding at the end for +- * alignment. */ + #define VFP_MAGIC 0x56465001 +-#define VFP_STORAGE_SIZE 152 +-#else +-#define VFP_MAGIC 0x56465002 +-#define VFP_STORAGE_SIZE 144 +-#endif + + struct vfp_sigframe + { + unsigned long magic; + unsigned long size; +- union vfp_state storage; +-}; ++ struct user_vfp ufp; ++ unsigned long reserved; ++} __attribute__((__aligned__(8))); ++ ++/* 8 byte for magic and size, 260 byte for ufp and 4 byte padding */ ++#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) ++ + #endif /* CONFIG_VFP */ + + /* +@@ -91,7 +87,7 @@ struct aux_sigframe { + #ifdef CONFIG_IWMMXT + struct iwmmxt_sigframe iwmmxt; + #endif +-#if 0 && defined CONFIG_VFP /* Not yet saved. */ ++#ifdef CONFIG_VFP + struct vfp_sigframe vfp; + #endif + /* Something that isn't a valid magic number for any coprocessor. */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/include/asm/user.h kernel-2.6.28-20093908+0m5/arch/arm/include/asm/user.h +--- linux-omap-2.6.28-omap1/arch/arm/include/asm/user.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/include/asm/user.h 2011-09-04 11:31:05.000000000 +0200 +@@ -81,4 +81,13 @@ struct user{ + #define HOST_TEXT_START_ADDR (u.start_code) + #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + ++/* ++ * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 ++ * are ignored by the ptrace system call. ++ */ ++struct user_vfp { ++ unsigned long long fpregs[32]; ++ unsigned long fpscr; ++}; ++ + #endif /* _ARM_USER_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/Kconfig kernel-2.6.28-20093908+0m5/arch/arm/Kconfig +--- linux-omap-2.6.28-omap1/arch/arm/Kconfig 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/Kconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -161,6 +161,12 @@ config GENERIC_HARDIRQS_NO__DO_IRQ + + if OPROFILE + ++config OPROFILE_OMAP_GPTIMER ++ def_bool y ++ depends on ARCH_OMAP ++ select CONFIG_OMAP_32K_TIMER ++ select CONFIG_OMAP_DM_TIMER ++ + config OPROFILE_ARMV6 + def_bool y + depends on CPU_V6 && !SMP +@@ -1281,6 +1287,8 @@ source "drivers/cbus/Kconfig" + source "drivers/dsp/dspgateway/Kconfig" + endif + ++source "drivers/dsp/bridge/Kconfig" ++ + endmenu + + source "fs/Kconfig" +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/elf.c kernel-2.6.28-20093908+0m5/arch/arm/kernel/elf.c +--- linux-omap-2.6.28-omap1/arch/arm/kernel/elf.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/elf.c 2011-09-04 11:31:05.000000000 +0200 +@@ -74,9 +74,9 @@ EXPORT_SYMBOL(elf_set_personality); + */ + int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) + { +- if (executable_stack != EXSTACK_ENABLE_X) ++ if (executable_stack != EXSTACK_DISABLE_X) + return 1; +- if (cpu_architecture() <= CPU_ARCH_ARMv6) ++ if (cpu_architecture() < CPU_ARCH_ARMv6) + return 1; + return 0; + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/entry-armv.S kernel-2.6.28-20093908+0m5/arch/arm/kernel/entry-armv.S +--- linux-omap-2.6.28-omap1/arch/arm/kernel/entry-armv.S 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/entry-armv.S 2011-09-04 11:31:05.000000000 +0200 +@@ -650,6 +650,7 @@ ENTRY(fp_enter) + no_fp: mov pc, lr + + __und_usr_unknown: ++ enable_irq + mov r0, sp + adr lr, ret_from_exception + b do_undefinstr +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/head.S kernel-2.6.28-20093908+0m5/arch/arm/kernel/head.S +--- linux-omap-2.6.28-omap1/arch/arm/kernel/head.S 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/head.S 2011-09-04 11:31:05.000000000 +0200 +@@ -282,7 +282,7 @@ __create_page_tables: + .endif + str r6, [r0] + +-#ifdef CONFIG_DEBUG_LL ++#if defined(CONFIG_DEBUG_LL) || defined(CONFIG_DEBUG_SPINLOCK) + ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags + /* + * Map in IO space for serial debugging. +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/irq.c kernel-2.6.28-20093908+0m5/arch/arm/kernel/irq.c +--- linux-omap-2.6.28-omap1/arch/arm/kernel/irq.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -98,12 +98,6 @@ unlock: + return 0; + } + +-/* Handle bad interrupts */ +-static struct irq_desc bad_irq_desc = { +- .handle_irq = handle_bad_irq, +- .lock = SPIN_LOCK_UNLOCKED +-}; +- + /* + * do_IRQ handles all hardware IRQ's. Decoded IRQs should not + * come via this function. Instead, they should provide their +@@ -119,10 +113,13 @@ asmlinkage void __exception asm_do_IRQ(u + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ +- if (irq >= NR_IRQS) +- handle_bad_irq(irq, &bad_irq_desc); +- else ++ if (unlikely(irq >= NR_IRQS)) { ++ if (printk_ratelimit()) ++ printk(KERN_WARNING "Bad IRQ%u\n", irq); ++ ack_bad_irq(irq); ++ } else { + generic_handle_irq(irq); ++ } + + /* AT91 specific workaround */ + irq_finish(irq); +@@ -160,10 +157,6 @@ void __init init_IRQ(void) + for (irq = 0; irq < NR_IRQS; irq++) + irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; + +-#ifdef CONFIG_SMP +- bad_irq_desc.affinity = CPU_MASK_ALL; +- bad_irq_desc.cpu = smp_processor_id(); +-#endif + init_arch_irq(); + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/module.c kernel-2.6.28-20093908+0m5/arch/arm/kernel/module.c +--- linux-omap-2.6.28-omap1/arch/arm/kernel/module.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/module.c 2011-09-04 11:31:05.000000000 +0200 +@@ -132,6 +132,21 @@ apply_relocate(Elf32_Shdr *sechdrs, cons + *(u32 *)loc |= offset & 0x00ffffff; + break; + ++ case R_ARM_MOVW_ABS_NC: ++ case R_ARM_MOVT_ABS: ++ offset = *(u32 *)loc; ++ offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff); ++ offset = (offset ^ 0x8000) - 0x8000; ++ ++ offset += sym->st_value; ++ if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS) ++ offset >>= 16; ++ ++ *(u32 *)loc &= 0xfff0f000; ++ *(u32 *)loc |= ((offset & 0xf000) << 4) | ++ (offset & 0x0fff); ++ break; ++ + default: + printk(KERN_ERR "%s: unknown relocation: %u\n", + module->name, ELF32_R_TYPE(rel->r_info)); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/process.c kernel-2.6.28-20093908+0m5/arch/arm/kernel/process.c +--- linux-omap-2.6.28-omap1/arch/arm/kernel/process.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/process.c 2011-09-04 11:31:05.000000000 +0200 +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -199,6 +200,33 @@ void machine_restart(char * __unused) + arm_pm_restart(reboot_mode); + } + ++static ssize_t reboot_mode_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%c\n", reboot_mode); ++} ++ ++static ssize_t reboot_mode_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t n) ++{ ++ if (n < 1) ++ return -EINVAL; ++ reboot_mode = buf[0]; ++ ++ return n; ++} ++ ++static struct kobj_attribute reboot_mode_attr = ++ __ATTR(reboot_mode, 0644, reboot_mode_show, reboot_mode_store); ++ ++static int __init reboot_mode_sysfs_init(void) ++{ ++ return sysfs_create_file(kernel_kobj, &reboot_mode_attr.attr); ++} ++ ++__initcall(reboot_mode_sysfs_init); ++ + void __show_regs(struct pt_regs *regs) + { + unsigned long flags; +@@ -321,6 +349,15 @@ copy_thread(int nr, unsigned long clone_ + } + + /* ++ * Fill in the task's elfregs structure for a core dump ++ */ ++int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) ++{ ++ elf_core_copy_regs(elfregs, task_pt_regs(t)); ++ return 1; ++} ++ ++/* + * fill in the fpe structure for a core dump... + */ + int dump_fpu (struct pt_regs *regs, struct user_fp *fp) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/ptrace.c kernel-2.6.28-20093908+0m5/arch/arm/kernel/ptrace.c +--- linux-omap-2.6.28-omap1/arch/arm/kernel/ptrace.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/ptrace.c 2011-09-04 11:31:05.000000000 +0200 +@@ -653,6 +653,54 @@ static int ptrace_setcrunchregs(struct t + } + #endif + ++#ifdef CONFIG_VFP ++/* ++ * Get the child VFP state. ++ */ ++static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data) ++{ ++ struct thread_info *thread = task_thread_info(tsk); ++ union vfp_state *vfp = &thread->vfpstate; ++ struct user_vfp __user *ufp = data; ++ ++ vfp_sync_state(thread); ++ ++ /* copy the floating point registers */ ++ if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, ++ sizeof(vfp->hard.fpregs))) ++ return -EFAULT; ++ ++ /* copy the status and control register */ ++ if (put_user(vfp->hard.fpscr, &ufp->fpscr)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++/* ++ * Set the child VFP state. ++ */ ++static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) ++{ ++ struct thread_info *thread = task_thread_info(tsk); ++ union vfp_state *vfp = &thread->vfpstate; ++ struct user_vfp __user *ufp = data; ++ ++ vfp_sync_state(thread); ++ ++ /* copy the floating point registers */ ++ if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, ++ sizeof(vfp->hard.fpregs))) ++ return -EFAULT; ++ ++ /* copy the status and control register */ ++ if (get_user(vfp->hard.fpscr, &ufp->fpscr)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif ++ + long arch_ptrace(struct task_struct *child, long request, long addr, long data) + { + int ret; +@@ -775,6 +823,16 @@ long arch_ptrace(struct task_struct *chi + break; + #endif + ++#ifdef CONFIG_VFP ++ case PTRACE_GETVFPREGS: ++ ret = ptrace_getvfpregs(child, (void __user *)data); ++ break; ++ ++ case PTRACE_SETVFPREGS: ++ ret = ptrace_setvfpregs(child, (void __user *)data); ++ break; ++#endif ++ + default: + ret = ptrace_request(child, request, addr, data); + break; +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/setup.c kernel-2.6.28-20093908+0m5/arch/arm/kernel/setup.c +--- linux-omap-2.6.28-omap1/arch/arm/kernel/setup.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/setup.c 2011-09-04 11:31:05.000000000 +0200 +@@ -772,6 +772,10 @@ static const char *hwcap_str[] = { + "java", + "iwmmxt", + "crunch", ++ "thumbee", ++ "neon", ++ "vfpv3", ++ "vfpv3d16", + NULL + }; + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/kernel/signal.c kernel-2.6.28-20093908+0m5/arch/arm/kernel/signal.c +--- linux-omap-2.6.28-omap1/arch/arm/kernel/signal.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/kernel/signal.c 2011-09-04 11:31:05.000000000 +0200 +@@ -196,6 +196,67 @@ static int restore_iwmmxt_context(struct + + #endif + ++#ifdef CONFIG_VFP ++ ++static int preserve_vfp_context(struct vfp_sigframe __user *frame) ++{ ++ struct thread_info *thread = current_thread_info(); ++ struct vfp_hard_struct *h = &thread->vfpstate.hard; ++ const unsigned long magic = VFP_MAGIC; ++ const unsigned long size = VFP_STORAGE_SIZE; ++ int err = 0; ++ ++ vfp_sync_state(thread); ++ __put_user_error(magic, &frame->magic, err); ++ __put_user_error(size, &frame->size, err); ++ ++ /* ++ * Copy the floating point registers. There can be unused ++ * registers see asm/hwcap.h for details. ++ */ ++ err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, ++ sizeof(h->fpregs)); ++ /* ++ * Copy the status and control register. ++ */ ++ __put_user_error(h->fpscr, &frame->ufp.fpscr, err); ++ ++ return err ? -EFAULT : 0; ++} ++ ++static int restore_vfp_context(struct vfp_sigframe __user *frame) ++{ ++ struct thread_info *thread = current_thread_info(); ++ struct vfp_hard_struct *h = &thread->vfpstate.hard; ++ unsigned long magic; ++ unsigned long size; ++ int err = 0; ++ ++ vfp_sync_state(thread); ++ __get_user_error(magic, &frame->magic, err); ++ __get_user_error(size, &frame->size, err); ++ ++ if (err) ++ return -EFAULT; ++ if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) ++ return -EINVAL; ++ ++ /* ++ * Copy the floating point registers. There can be unused ++ * registers see asm/hwcap.h for details. ++ */ ++ err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, ++ sizeof(h->fpregs)); ++ /* ++ * Copy the status and control register. ++ */ ++ __get_user_error(h->fpscr, &frame->ufp.fpscr, err); ++ ++ return err ? -EFAULT : 0; ++} ++ ++#endif ++ + /* + * Do a signal return; undo the signal stack. These are aligned to 64-bit. + */ +@@ -254,8 +315,8 @@ static int restore_sigframe(struct pt_re + err |= restore_iwmmxt_context(&aux->iwmmxt); + #endif + #ifdef CONFIG_VFP +-// if (err == 0) +-// err |= vfp_restore_state(&sf->aux.vfp); ++ if (err == 0) ++ err |= restore_vfp_context(&aux->vfp); + #endif + + return err; +@@ -369,8 +430,8 @@ setup_sigframe(struct sigframe __user *s + err |= preserve_iwmmxt_context(&aux->iwmmxt); + #endif + #ifdef CONFIG_VFP +-// if (err == 0) +-// err |= vfp_save_state(&sf->aux.vfp); ++ if (err == 0) ++ err |= preserve_vfp_context(&aux->vfp); + #endif + __put_user_error(0, &aux->end_magic, err); + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-davinci/usb.c kernel-2.6.28-20093908+0m5/arch/arm/mach-davinci/usb.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-davinci/usb.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-davinci/usb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -76,29 +76,6 @@ static struct platform_device usb_dev = + .num_resources = ARRAY_SIZE(usb_resources), + }; + +-#ifdef CONFIG_USB_MUSB_OTG +- +-static struct otg_transceiver *xceiv; +- +-struct otg_transceiver *otg_get_transceiver(void) +-{ +- if (xceiv) +- get_device(xceiv->dev); +- return xceiv; +-} +-EXPORT_SYMBOL(otg_get_transceiver); +- +-int otg_set_transceiver(struct otg_transceiver *x) +-{ +- if (xceiv && x) +- return -EBUSY; +- xceiv = x; +- return 0; +-} +-EXPORT_SYMBOL(otg_set_transceiver); +- +-#endif +- + void __init setup_usb(unsigned mA, unsigned potpgt_msec) + { + usb_data.power = mA / 2; +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap1/board-nokia770.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap1/board-nokia770.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap1/board-nokia770.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap1/board-nokia770.c 2011-09-04 11:31:05.000000000 +0200 +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -32,7 +33,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap1/mcbsp.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap1/mcbsp.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap1/mcbsp.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap1/mcbsp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -26,81 +26,8 @@ + #define DPS_RSTCT2_PER_EN (1 << 0) + #define DSP_RSTCT2_WD_PER_EN (1 << 1) + +-struct mcbsp_internal_clk { +- struct clk clk; +- struct clk **childs; +- int n_childs; +-}; +- + #if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) +-static void omap_mcbsp_clk_init(struct mcbsp_internal_clk *mclk) +-{ +- const char *clk_names[] = { "dsp_ck", "api_ck", "dspxor_ck" }; +- int i; +- +- mclk->n_childs = ARRAY_SIZE(clk_names); +- mclk->childs = kzalloc(mclk->n_childs * sizeof(struct clk *), +- GFP_KERNEL); +- +- for (i = 0; i < mclk->n_childs; i++) { +- /* We fake a platform device to get correct device id */ +- struct platform_device pdev; +- +- pdev.dev.bus = &platform_bus_type; +- pdev.id = mclk->clk.id; +- mclk->childs[i] = clk_get(&pdev.dev, clk_names[i]); +- if (IS_ERR(mclk->childs[i])) +- printk(KERN_ERR "Could not get clock %s (%d).\n", +- clk_names[i], mclk->clk.id); +- } +-} +- +-static int omap_mcbsp_clk_enable(struct clk *clk) +-{ +- struct mcbsp_internal_clk *mclk = container_of(clk, +- struct mcbsp_internal_clk, clk); +- int i; +- +- for (i = 0; i < mclk->n_childs; i++) +- clk_enable(mclk->childs[i]); +- return 0; +-} +- +-static void omap_mcbsp_clk_disable(struct clk *clk) +-{ +- struct mcbsp_internal_clk *mclk = container_of(clk, +- struct mcbsp_internal_clk, clk); +- int i; +- +- for (i = 0; i < mclk->n_childs; i++) +- clk_disable(mclk->childs[i]); +-} +- +-static struct mcbsp_internal_clk omap_mcbsp_clks[] = { +- { +- .clk = { +- .name = "mcbsp_clk", +- .id = 1, +- .enable = omap_mcbsp_clk_enable, +- .disable = omap_mcbsp_clk_disable, +- }, +- }, +- { +- .clk = { +- .name = "mcbsp_clk", +- .id = 3, +- .enable = omap_mcbsp_clk_enable, +- .disable = omap_mcbsp_clk_disable, +- }, +- }, +-}; +- +-#define omap_mcbsp_clks_size ARRAY_SIZE(omap_mcbsp_clks) +-#else +-#define omap_mcbsp_clks_size 0 +-static struct mcbsp_internal_clk __initdata *omap_mcbsp_clks; +-static inline void omap_mcbsp_clk_init(struct mcbsp_internal_clk *mclk) +-{ } ++const char *clk_names[] = { "dsp_ck", "api_ck", "dspxor_ck" }; + #endif + + static void omap1_mcbsp_request(unsigned int id) +@@ -165,8 +92,9 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_McBSP1RX, + .tx_irq = INT_McBSP1TX, + .ops = &omap1_mcbsp_ops, +- .clk_name = "mcbsp_clk", +- }, ++ .clk_names = clk_names, ++ .num_clks = 3, ++ }, + { + .phys_base = OMAP1510_MCBSP2_BASE, + .dma_rx_sync = OMAP_DMA_MCBSP2_RX, +@@ -182,7 +110,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_McBSP3RX, + .tx_irq = INT_McBSP3TX, + .ops = &omap1_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 3, + }, + }; + #define OMAP15XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap15xx_mcbsp_pdata) +@@ -200,7 +129,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_McBSP1RX, + .tx_irq = INT_McBSP1TX, + .ops = &omap1_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 3, + }, + { + .phys_base = OMAP1610_MCBSP2_BASE, +@@ -217,7 +147,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_McBSP3RX, + .tx_irq = INT_McBSP3TX, + .ops = &omap1_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 3, + }, + }; + #define OMAP16XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap16xx_mcbsp_pdata) +@@ -228,15 +159,6 @@ static struct omap_mcbsp_platform_data o + + int __init omap1_mcbsp_init(void) + { +- int i; +- +- for (i = 0; i < omap_mcbsp_clks_size; i++) { +- if (cpu_is_omap15xx() || cpu_is_omap16xx()) { +- omap_mcbsp_clk_init(&omap_mcbsp_clks[i]); +- clk_register(&omap_mcbsp_clks[i].clk); +- } +- } +- + if (cpu_is_omap730()) + omap_mcbsp_count = OMAP730_MCBSP_PDATA_SZ; + if (cpu_is_omap15xx()) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-apollon.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-apollon.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-apollon.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-apollon.c 2011-09-04 11:31:05.000000000 +0200 +@@ -323,7 +323,7 @@ out: + + static void __init omap_apollon_init_irq(void) + { +- omap2_init_common_hw(NULL); ++ omap2_init_common_hw(NULL, NULL, NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + apollon_init_smc91x(); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-generic.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-generic.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-generic.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-generic.c 2011-09-04 11:31:05.000000000 +0200 +@@ -33,7 +33,7 @@ + + static void __init omap_generic_init_irq(void) + { +- omap2_init_common_hw(NULL); ++ omap2_init_common_hw(NULL, NULL, NULL, NULL, NULL); + omap_init_irq(); + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-h4.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-h4.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-h4.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-h4.c 2011-09-04 11:31:05.000000000 +0200 +@@ -348,7 +348,7 @@ static void __init h4_init_flash(void) + + static void __init omap_h4_init_irq(void) + { +- omap2_init_common_hw(NULL); ++ omap2_init_common_hw(NULL, NULL, NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + h4_init_flash(); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-ldp.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-ldp.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-ldp.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-ldp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -345,7 +345,7 @@ static inline void __init ldp_init_smc91 + + static void __init omap_ldp_init_irq(void) + { +- omap2_init_common_hw(NULL); ++ omap2_init_common_hw(NULL, NULL, NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + ldp_init_smc911x(); +@@ -529,7 +529,7 @@ static void __init omap_ldp_init(void) + msecure_init(); + ads7846_dev_init(); + omap_serial_init(); +- usb_musb_init(); ++ usb_musb_init(NULL); + twl4030_mmc_init(mmc); + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-n800.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-n800.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-n800.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-n800.c 2011-09-04 11:31:05.000000000 +0200 +@@ -26,6 +26,8 @@ + #include + #include + #include ++#include ++ + #include + #include + #include +@@ -38,7 +40,6 @@ + #include + #include + #include +-#include + #include + + #include <../drivers/cbus/tahvo.h> +@@ -50,6 +51,9 @@ + #define N800_DAV_IRQ_GPIO 103 + #define N800_TSC2301_RESET_GPIO 118 + ++#define N810_TSC2005_IRQ_GPIO 106 ++#define N810_TSC2005_RESET_GPIO 94 ++ + #ifdef CONFIG_MACH_NOKIA_N810 + static s16 rx44_keymap[LM8323_KEYMAP_SIZE] = { + [0x01] = KEY_Q, +@@ -122,7 +126,7 @@ static struct lm8323_platform_data lm832 + + void __init nokia_n800_init_irq(void) + { +- omap2_init_common_hw(NULL); ++ omap2_init_common_hw(NULL, NULL, NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + +@@ -305,7 +309,7 @@ static void tsc2301_dev_init(void) + gpio_direction_input(gpio); + tsc2301_config.keyb_int = gpio_to_irq(gpio); + } else { +- printk(KERN_ERR "unable to get KBD GPIO"); ++ printk(KERN_ERR "unable to get KBD GPIO\n"); + } + + gpio = N800_DAV_IRQ_GPIO; +@@ -314,7 +318,7 @@ static void tsc2301_dev_init(void) + gpio_direction_input(gpio); + tsc2301_config.dav_int = gpio_to_irq(gpio); + } else { +- printk(KERN_ERR "unable to get DAV GPIO"); ++ printk(KERN_ERR "unable to get DAV GPIO\n"); + } + } + +@@ -362,8 +366,9 @@ static struct omap2_mcspi_device_config + + #ifdef CONFIG_TOUCHSCREEN_TSC2005 + static struct tsc2005_platform_data tsc2005_config = { +- .reset_gpio = 94, +- .dav_gpio = 106 ++ .esd_timeout = 8*1000, /* ms of inactivity before we check */ ++ ++ .set_reset = NULL + }; + + static struct omap2_mcspi_device_config tsc2005_mcspi_config = { +@@ -464,6 +469,35 @@ static void __init tsc2005_set_config(vo + } + } + ++#ifdef CONFIG_TOUCHSCREEN_TSC2005 ++static void n810_tsc2005_set_reset(bool enable) ++{ ++ gpio_set_value(N810_TSC2005_RESET_GPIO, enable); ++} ++#endif ++ ++static void __init n810_init_tsc2005(void) ++{ ++#ifdef CONFIG_TOUCHSCREEN_TSC2005 ++ int r; ++ ++ r = gpio_request(N810_TSC2005_IRQ_GPIO, "tsc2005 DAV IRQ"); ++ if (r >= 0) ++ gpio_direction_input(N810_TSC2005_IRQ_GPIO); ++ else ++ printk(KERN_ERR "unable to get DAV GPIO\n"); ++ ++ r = gpio_request(N810_TSC2005_RESET_GPIO, "tsc2005 reset"); ++ if (r >= 0) { ++ gpio_direction_output(N810_TSC2005_RESET_GPIO, 1); ++ tsc2005_config.set_reset = n810_tsc2005_set_reset; ++ } else { ++ printk(KERN_ERR "unable to get tsc2005 reset GPIO\n"); ++ tsc2005_config.esd_timeout = 0; ++ } ++#endif ++} ++ + #if defined(CONFIG_CBUS_RETU) && defined(CONFIG_LEDS_OMAP_PWM) + + void retu_keypad_led_set_power(struct omap_pwm_led_platform_data *self, +@@ -675,6 +709,7 @@ void __init nokia_n800_common_init(void) + ARRAY_SIZE(n800_spi_board_info)); + if (machine_is_nokia_n810()) { + tsc2005_set_config(); ++ n810_init_tsc2005(); + spi_register_board_info(n810_spi_board_info, + ARRAY_SIZE(n810_spi_board_info)); + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-n800-flash.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-n800-flash.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-n800-flash.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-n800-flash.c 2011-09-04 11:31:05.000000000 +0200 +@@ -19,7 +19,7 @@ + #include + #include + +-struct mtd_partition n800_partitions[ONENAND_MAX_PARTITIONS]; ++struct mtd_partition n800_partitions[8]; + + int n800_onenand_setup(void __iomem *, int freq); + +@@ -38,9 +38,21 @@ static struct platform_device n800_onena + }, + }; + ++static unsigned short omap2_onenand_readw(void __iomem *addr) ++{ ++ return readw(addr); ++} ++ ++static void omap2_onenand_writew(unsigned short value, void __iomem *addr) ++{ ++ writew(value, addr); ++} ++ + static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) + { + struct gpmc_timings t; ++ u32 reg; ++ int err; + + const int t_cer = 15; + const int t_avdp = 12; +@@ -53,6 +65,11 @@ static int omap2_onenand_set_async_mode( + const int t_wpl = 40; + const int t_wph = 30; + ++ /* Ensure sync read and sync write are disabled */ ++ reg = omap2_onenand_readw(onenand_base + ONENAND_REG_SYS_CFG1); ++ reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE; ++ omap2_onenand_writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); ++ + memset(&t, 0, sizeof(t)); + t.sync_clk = 0; + t.cs_on = 0; +@@ -84,17 +101,16 @@ static int omap2_onenand_set_async_mode( + GPMC_CONFIG1_DEVICESIZE_16 | + GPMC_CONFIG1_MUXADDDATA); + +- return gpmc_cs_set_timings(cs, &t); +-} +- +-static unsigned short omap2_onenand_readw(void __iomem *addr) +-{ +- return readw(addr); +-} ++ err = gpmc_cs_set_timings(cs, &t); ++ if (err) ++ return err; ++ ++ /* Ensure sync read and sync write are disabled */ ++ reg = omap2_onenand_readw(onenand_base + ONENAND_REG_SYS_CFG1); ++ reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE; ++ omap2_onenand_writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); + +-static void omap2_onenand_writew(unsigned short value, void __iomem *addr) +-{ +- writew(value, addr); ++ return 0; + } + + static void set_onenand_cfg(void __iomem *onenand_base, int latency, +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap-bt.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap-bt.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap-bt.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap-bt.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,98 @@ ++/* ++ * Nokia RX-51 platform-specific data for Bluetooth ++ * ++ * Copyright (C) 2005, 2006 Nokia Corporation ++ * Contact: Ville Tervo ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++static struct platform_device omap_bt_device = { ++ .name = "hci_h4p", ++ .id = -1, ++ .num_resources = 0, ++}; ++ ++static ssize_t hci_h4p_store_bdaddr(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct omap_bluetooth_config *bt_config = ++ omap_bt_device.dev.platform_data; ++ unsigned int bdaddr[6]; ++ int ret, i; ++ ++ ret = sscanf(buf, "%2x:%2x:%2x:%2x:%2x:%2x\n", ++ &bdaddr[0], &bdaddr[1], &bdaddr[2], ++ &bdaddr[3], &bdaddr[4], &bdaddr[5]); ++ ++ if (ret != 6) ++ return -EINVAL; ++ ++ for (i = 0; i < 6; i++) ++ bt_config->bd_addr[i] = bdaddr[i] & 0xff; ++ ++ return count; ++} ++ ++static ssize_t hci_h4p_show_bdaddr(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct omap_bluetooth_config *bt_config = ++ omap_bt_device.dev.platform_data; ++ ++ return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", ++ bt_config->bd_addr[0], ++ bt_config->bd_addr[1], ++ bt_config->bd_addr[2], ++ bt_config->bd_addr[3], ++ bt_config->bd_addr[4], ++ bt_config->bd_addr[5]); ++} ++ ++static DEVICE_ATTR(bdaddr, S_IRUGO | S_IWUSR, hci_h4p_show_bdaddr, ++ hci_h4p_store_bdaddr); ++int hci_h4p_sysfs_create_files(struct device *dev) ++{ ++ return device_create_file(dev, &dev_attr_bdaddr); ++} ++ ++void __init omap_bt_init(struct omap_bluetooth_config *bt_config) ++{ ++ int err; ++ omap_bt_device.dev.platform_data = bt_config; ++ ++ err = platform_device_register(&omap_bt_device); ++ if (err < 0) { ++ printk(KERN_ERR "Omap bluetooth device registration failed\n"); ++ return; ++ } ++ ++ err = hci_h4p_sysfs_create_files(&omap_bt_device.dev); ++ if (err < 0) { ++ dev_err(&omap_bt_device.dev, ++ "Omap bluetooth sysfs entry registration failed\n"); ++ platform_device_unregister(&omap_bt_device); ++ return; ++ } ++} +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap2evm.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap2evm.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap2evm.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap2evm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -277,7 +277,7 @@ static struct twl4030_keypad_data omap2e + + static void __init omap2_evm_init_irq(void) + { +- omap2_init_common_hw(NULL); ++ omap2_init_common_hw(NULL, NULL, NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + omap2evm_init_smc911x(); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap3beagle.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap3beagle.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap3beagle.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap3beagle.c 2011-09-04 11:31:05.000000000 +0200 +@@ -43,10 +43,13 @@ + #include + #include + #include ++#include ++#include + + #include "twl4030-generic-scripts.h" + #include "mmc-twl4030.h" +- ++#include "pm.h" ++#include "omap3-opp.h" + + #define GPMC_CS0_BASE 0x60 + #define GPMC_CS_SIZE 0x30 +@@ -233,7 +236,11 @@ static int __init omap3_beagle_i2c_init( + + static void __init omap3_beagle_init_irq(void) + { +- omap2_init_common_hw(mt46h32m32lf6_sdrc_params); ++ omap2_init_common_hw(mt46h32m32lf6_sdrc_params, ++ mt46h32m32lf6_sdrc_params, ++ omap3_mpu_rate_table, ++ omap3_dsp_rate_table, ++ omap3_l3_rate_table); + omap_init_irq(); + omap_gpio_init(); + } +@@ -363,9 +370,13 @@ static void __init omap3_beagle_init(voi + /* REVISIT leave DVI powered down until it's needed ... */ + gpio_direction_output(170, true); + +- usb_musb_init(); ++ usb_musb_init(NULL); + usb_ehci_init(); + omap3beagle_flash_init(); ++ ++ /* Ensure SDRC pins are mux'd for self-refresh */ ++ omap_cfg_reg(H16_34XX_SDRC_CKE0); ++ omap_cfg_reg(H17_34XX_SDRC_CKE1); + } + + static void __init omap3_beagle_map_io(void) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap3evm.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap3evm.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap3evm.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap3evm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -217,7 +217,10 @@ struct spi_board_info omap3evm_spi_board + + static void __init omap3_evm_init_irq(void) + { +- omap2_init_common_hw(mt46h32m32lf6_sdrc_params); ++ omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL, ++ omap3_mpu_rate_table, ++ omap3_dsp_rate_table, ++ omap3_l3_rate_table); + omap_init_irq(); + omap_gpio_init(); + omap3evm_init_smc911x(); +@@ -256,7 +259,7 @@ static void __init omap3_evm_init(void) + + omap_serial_init(); + twl4030_mmc_init(mmc); +- usb_musb_init(); ++ usb_musb_init(NULL); + usb_ehci_init(); + omap3evm_flash_init(); + ads7846_dev_init(); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap3pandora.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap3pandora.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-omap3pandora.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-omap3pandora.c 2011-09-04 11:31:05.000000000 +0200 +@@ -157,6 +157,7 @@ static struct twl4030_hsmmc_info omap3pa + .gpio_cd = -EINVAL, + .gpio_wp = 127, + .ext_clock = 1, ++ .transceiver = true, + }, + {} /* Terminator */ + }; +@@ -214,7 +215,8 @@ static int __init omap3pandora_i2c_init( + + static void __init omap3pandora_init_irq(void) + { +- omap2_init_common_hw(mt46h32m32lf6_sdrc_params); ++ omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL, ++ NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + } +@@ -296,7 +298,7 @@ static void __init omap3pandora_init(voi + omap_serial_init(); + spi_register_board_info(omap3pandora_spi_board_info, + ARRAY_SIZE(omap3pandora_spi_board_info)); +- usb_musb_init(); ++ usb_musb_init(NULL); + usb_ehci_init(); + omap3pandora_flash_init(); + omap3pandora_ads7846_init(); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-overo.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-overo.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-overo.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-overo.c 2011-09-04 11:31:05.000000000 +0200 +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -184,7 +185,9 @@ static int __init overo_i2c_init(void) + + static void __init overo_init_irq(void) + { +- omap2_init_common_hw(mt46h32m32lf6_sdrc_params); ++ omap2_init_common_hw(mt46h32m32lf6_sdrc_params, ++ mt46h32m32lf6_sdrc_params, ++ NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + } +@@ -219,6 +222,7 @@ static struct twl4030_hsmmc_info mmc[] _ + .wires = 4, + .gpio_cd = -EINVAL, + .gpio_wp = -EINVAL, ++ .transceiver = true, + }, + {} /* Terminator */ + }; +@@ -231,10 +235,14 @@ static void __init overo_init(void) + omap_board_config_size = ARRAY_SIZE(overo_config); + omap_serial_init(); + twl4030_mmc_init(mmc); +- usb_musb_init(); ++ usb_musb_init(NULL); + usb_ehci_init(); + overo_flash_init(); + ++ /* Ensure SDRC pins are mux'd for self-refresh */ ++ omap_cfg_reg(H16_34XX_SDRC_CKE0); ++ omap_cfg_reg(H17_34XX_SDRC_CKE1); ++ + if ((gpio_request(OVERO_GPIO_W2W_NRESET, + "OVERO_GPIO_W2W_NRESET") == 0) && + (gpio_direction_output(OVERO_GPIO_W2W_NRESET, 1) == 0)) { +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-audio.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-audio.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-audio.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-audio.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,165 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx51-audio.c ++ * ++ * Copyright (C) 2008 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../drivers/media/radio/radio-si4713.h" ++#include ++#include ++#include ++ ++#define RX51_FMTX_RESET_GPIO 163 ++#define RX51_FMTX_IRQ 53 ++#define RX51_FMRX_IRQ 43 ++#define RX51_HEADPHN_EN_GPIO 98 ++#define RX51_ECI0_GPIO 61 ++#define RX51_ECI1_GPIO 62 ++#define RX51_HEADPHONE_GPIO 177 ++ ++static int si4713_set_power(int power) ++{ ++ /* Make sure VAUX1 is enabled before we rise reset line */ ++ if (power) ++ twl4030_enable_regulator(RES_VAUX1); ++ ++ if (!power) ++ udelay(1); ++ gpio_set_value(RX51_FMTX_RESET_GPIO, power); ++ udelay(50); ++ ++ /* As reset line is down, no need to keep VAUX1 */ ++ if (!power) ++ twl4030_disable_regulator(RES_VAUX1); ++ ++ return 0; ++} ++ ++static struct si4713_platform_data rx51_si4713_platform_data = { ++ .set_power = si4713_set_power, ++}; ++ ++static void __init rx51_init_si4713(void) ++{ ++ int r; ++ ++ r = gpio_request(RX51_FMTX_RESET_GPIO, "si4713"); ++ if (r < 0) { ++ printk(KERN_ERR "Failed to request gpio for FMTx rst\n"); ++ return; ++ } ++ ++ gpio_direction_output(RX51_FMTX_RESET_GPIO, 0); ++} ++ ++static void __init rx51_init_bcm2048(void) ++{ ++ int gpio_irq; ++ ++ gpio_irq = gpio_request(RX51_FMRX_IRQ, "BCM2048"); ++ if (gpio_irq < 0) { ++ printk(KERN_ERR "Failed to request gpio for FMRX IRQ\n"); ++ return; ++ } ++ ++ gpio_direction_input(RX51_FMRX_IRQ); ++} ++ ++static int tpa6130a2_set_power(int state) ++{ ++ gpio_set_value(RX51_HEADPHN_EN_GPIO, !!state); ++ return 0; ++} ++ ++static struct tpa6130a2_platform_data rx51_tpa6130a2_platform_data = { ++ .set_power = tpa6130a2_set_power, ++}; ++ ++static void __init rx51_init_tpa6130a2(void) ++{ ++ int r; ++ ++ r = gpio_request(RX51_HEADPHN_EN_GPIO, "tpa6130a2"); ++ if (r < 0) { ++ printk(KERN_ERR "Failed to request shutdown gpio " ++ "for TPA6130a2 chip\n"); ++ } ++ ++ gpio_direction_output(RX51_HEADPHN_EN_GPIO, 0); ++ ++ return; ++} ++ ++static struct nokia_av_platform_data rx51_nokia_av_platform_data = { ++ .eci0_gpio = RX51_ECI0_GPIO, ++ .eci1_gpio = RX51_ECI1_GPIO, ++ .headph_gpio = RX51_HEADPHONE_GPIO, ++}; ++ ++static struct platform_device rx51_nokia_av_device = { ++ .name = "nokia-av", ++ .id = -1, ++ .dev = { ++ .platform_data = &rx51_nokia_av_platform_data, ++ }, ++}; ++ ++static struct platform_device *rx51_audio_devices[] = { ++ &rx51_nokia_av_device, ++}; ++ ++static struct i2c_board_info __initdata rx51_audio_i2c_board_info_2[] = { ++ { ++ I2C_BOARD_INFO(SI4713_NAME, SI4713_I2C_ADDR_BUSEN_HIGH), ++ .type = "si4713", ++ .irq = OMAP_GPIO_IRQ(RX51_FMTX_IRQ), ++ .platform_data = &rx51_si4713_platform_data, ++ }, ++ { ++ I2C_BOARD_INFO("aic34b_dummy", 0x19), ++ }, ++ { ++ I2C_BOARD_INFO("tpa6130a2", 0x60), ++ .platform_data = &rx51_tpa6130a2_platform_data, ++ }, ++}; ++ ++static struct i2c_board_info __initdata rx51_audio_i2c_board_info_3[] = { ++ { ++ I2C_BOARD_INFO(BCM2048_NAME, BCM2048_I2C_ADDR), ++ .irq = OMAP_GPIO_IRQ(RX51_FMRX_IRQ), ++ }, ++}; ++ ++static int __init rx51_audio_init(void) ++{ ++ if (!(machine_is_nokia_rx51() || machine_is_nokia_rx71())) ++ return 0; ++ ++ platform_add_devices(rx51_audio_devices, ++ ARRAY_SIZE(rx51_audio_devices)); ++ ++ rx51_init_tpa6130a2(); ++ rx51_init_si4713(); ++ rx51_init_bcm2048(); ++ i2c_register_board_info(2, rx51_audio_i2c_board_info_2, ++ ARRAY_SIZE(rx51_audio_i2c_board_info_2)); ++ i2c_register_board_info(3, rx51_audio_i2c_board_info_3, ++ ARRAY_SIZE(rx51_audio_i2c_board_info_3)); ++ ++ return 0; ++} ++ ++subsys_initcall(rx51_audio_init); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,194 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx51.c ++ * ++ * Copyright (C) 2007, 2008 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "omap3-opp.h" ++#include "pm.h" ++ ++#include ++#include ++#include ++#include "cm.h" ++ ++#define RX51_USB_TRANSCEIVER_RST_GPIO 67 ++ ++extern int omap_init_fb(void); ++extern void rx51_video_mem_init(void); ++ ++static struct omap_uart_config rx51_uart_config = { ++ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)), ++}; ++ ++static struct omap_board_config_kernel rx51_config[] = { ++ { OMAP_TAG_UART, &rx51_uart_config }, ++}; ++ ++static struct omap_bluetooth_config rx51_bt_config = { ++ .chip_type = BT_CHIP_BCM, ++ .bt_wakeup_gpio = 37, ++ .host_wakeup_gpio = 101, ++ .reset_gpio = 91, ++ .bt_uart = 2, ++ .bt_sysclk = BT_SYSCLK_38_4, ++}; ++ ++static void __init rx51_init_irq(void) ++{ ++ struct omap_sdrc_params *sdrc_params; ++ ++ sdrc_params = rx51_get_sdram_timings(); ++ ++ omap2_init_common_hw(sdrc_params, sdrc_params, ++ omap3_mpu_rate_table, ++ omap3_dsp_rate_table, ++ omap3_l3_rate_table); ++ omap_init_irq(); ++ omap_gpio_init(); ++} ++ ++static void __init rx51_pm_init(void) ++{ ++ struct prm_setup_times prm_setup = { ++ .clksetup = 81, ++ .voltsetup_time1 = 270, ++ .voltsetup_time2 = 150, ++ .voltoffset = 17, ++ .voltsetup2 = 37, ++ }; ++ ++ omap3_set_prm_setup_times(&prm_setup); ++} ++ ++static void __init rx51_xceiv_init(void) ++{ ++ if (gpio_request(RX51_USB_TRANSCEIVER_RST_GPIO, NULL) < 0) ++ BUG(); ++ gpio_direction_output(RX51_USB_TRANSCEIVER_RST_GPIO, 1); ++} ++ ++static int rx51_xceiv_reset(void) ++{ ++ /* make sure the transceiver is awake */ ++ msleep(15); ++ /* only reset powered transceivers */ ++ if (!gpio_get_value(RX51_USB_TRANSCEIVER_RST_GPIO)) ++ return 0; ++ gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, 0); ++ msleep(1); ++ gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, 1); ++ msleep(15); ++ ++ return 0; ++} ++ ++static int rx51_xceiv_power(bool power) ++{ ++ unsigned long timeout; ++ ++ if (!power) { ++ /* Let musb go stdby before powering down the transceiver */ ++ timeout = jiffies + msecs_to_jiffies(100); ++ while (!time_after(jiffies, timeout)) ++ if (cm_read_mod_reg(CORE_MOD, CM_IDLEST1) ++ & OMAP3430ES2_ST_HSOTGUSB_STDBY_MASK) ++ break; ++ if (!(cm_read_mod_reg(CORE_MOD, CM_IDLEST1) ++ & OMAP3430ES2_ST_HSOTGUSB_STDBY_MASK)) ++ WARN(1, "could not put musb to sleep\n"); ++ } ++ gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, power); ++ ++ return 0; ++} ++ ++/** ++ * rx51_usb_set_pm_limits - sets omap3-related pm constraints ++ * @dev: musb's device pointer ++ * @set: set or clear constraints ++ * ++ * For now we only need mpu wakeup latency mpu frequency, if we ++ * need anything else we just add the logic here and the driver ++ * is already handling what needs to be handled. ++ */ ++static void rx51_usb_set_pm_limits(struct device *dev, bool set) ++{ ++ omap_pm_set_max_mpu_wakeup_lat(dev, set ? 10 : -1); ++ omap_pm_set_min_mpu_freq(dev, set ? 500000000 : 0); ++} ++ ++static struct musb_board_data rx51_musb_data = { ++ .xceiv_reset = rx51_xceiv_reset, ++ .xceiv_power = rx51_xceiv_power, ++ .set_pm_limits = rx51_usb_set_pm_limits, ++}; ++ ++static void __init rx51_init(void) ++{ ++ rx51_xceiv_init(); ++ usb_musb_init(&rx51_musb_data); ++ omap_serial_init(); ++ rx51_pm_init(); ++ /* ++ * With this early call work around a current clock framework problem ++ * where enabling and then disabling a clock disables a root clock ++ * used by another child clock. In our case this would happen with ++ * hsmmc which is normally initialized before fb. ++ */ ++ omap_init_fb(); ++ omap_bt_init(&rx51_bt_config); ++} ++ ++static void __init rx51_map_io(void) ++{ ++ omap_board_config = rx51_config; ++ omap_board_config_size = ARRAY_SIZE(rx51_config); ++ omap2_set_globals_343x(); ++ omap2_map_common_io(); ++ rx51_video_mem_init(); ++} ++ ++MACHINE_START(NOKIA_RX51, "Nokia RX-51 board") ++ /* Maintainer: Lauri Leukkunen */ ++ .phys_io = 0x48000000, ++ .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc, ++ .boot_params = 0x80000100, ++ .map_io = rx51_map_io, ++ .init_irq = rx51_init_irq, ++ .init_machine = rx51_init, ++ .timer = &omap_timer, ++MACHINE_END +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-camera-base.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-camera-base.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-camera-base.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-camera-base.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,154 @@ ++/* ++ * arch/arm/mach-omap2/board-rx51-camera-base.c ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * ++ * Contact: Sakari Ailus ++ * Tuukka Toivonen ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++ ++#if defined CONFIG_VIDEO_MACH_RX51 || defined CONFIG_VIDEO_MACH_RX51_MODULE ++ ++#include "../../../drivers/media/video/et8ek8.h" ++#include "../../../drivers/media/video/smia-sensor.h" ++ ++#include ++#include ++ ++#include "board-rx51-camera.h" ++ ++/* Fake platform data begins here. */ ++ ++static int fake_rx51_camera_g_priv(struct v4l2_int_device *s, void *priv) ++{ ++ return -EBUSY; ++} ++ ++struct et8ek8_platform_data rx51_et8ek8_platform_data = { ++ .g_priv = fake_rx51_camera_g_priv, ++}; ++EXPORT_SYMBOL(rx51_et8ek8_platform_data); ++ ++struct ad5820_platform_data rx51_ad5820_platform_data = { ++ .g_priv = fake_rx51_camera_g_priv, ++}; ++EXPORT_SYMBOL(rx51_ad5820_platform_data); ++ ++struct adp1653_platform_data rx51_adp1653_platform_data = { ++ .g_priv = fake_rx51_camera_g_priv, ++}; ++EXPORT_SYMBOL(rx51_adp1653_platform_data); ++ ++struct smia_sensor_platform_data rx51_smia_sensor_platform_data = { ++ .g_priv = fake_rx51_camera_g_priv, ++}; ++EXPORT_SYMBOL(rx51_smia_sensor_platform_data); ++ ++static struct i2c_board_info rx51_camera_board_info_2[] __initdata = { ++#ifdef CONFIG_VIDEO_MACH_RX51_OLD_I2C ++#if defined (CONFIG_VIDEO_ET8EK8) || defined (CONFIG_VIDEO_ET8EK8_MODULE) ++ { ++ I2C_BOARD_INFO(ET8EK8_NAME, ET8EK8_I2C_ADDR), ++ .platform_data = &rx51_et8ek8_platform_data, ++ }, ++#endif ++#if defined (CONFIG_VIDEO_AD5820) || defined (CONFIG_VIDEO_AD5820_MODULE) ++ { ++ I2C_BOARD_INFO(AD5820_NAME, AD5820_I2C_ADDR), ++ .platform_data = &rx51_ad5820_platform_data, ++ }, ++#endif ++#else /* CONFIG_VIDEO_MACH_RX51_OLD_I2C */ ++#if defined(CONFIG_VIDEO_ADP1653) || defined(CONFIG_VIDEO_ADP1653_MODULE) ++ { ++ I2C_BOARD_INFO(ADP1653_NAME, ADP1653_I2C_ADDR), ++ .platform_data = &rx51_adp1653_platform_data, ++ }, ++#endif ++#endif ++#if defined(CONFIG_VIDEO_SMIA_SENSOR) || defined(CONFIG_VIDEO_SMIA_SENSOR_MODULE) ++ { ++ I2C_BOARD_INFO(SMIA_SENSOR_NAME, SMIA_SENSOR_I2C_ADDR), ++ .platform_data = &rx51_smia_sensor_platform_data, ++ }, ++#endif ++}; ++ ++static struct i2c_board_info rx51_camera_board_info_3[] __initdata = { ++#ifdef CONFIG_VIDEO_MACH_RX51_OLD_I2C ++#if defined (CONFIG_VIDEO_ADP1653) || defined (CONFIG_VIDEO_ADP1653_MODULE) ++ { ++ I2C_BOARD_INFO(ADP1653_NAME, ADP1653_I2C_ADDR), ++ .platform_data = &rx51_adp1653_platform_data, ++ }, ++#endif ++#else /* CONFIG_VIDEO_MACH_RX51_OLD_I2C */ ++#if defined (CONFIG_VIDEO_ET8EK8) || defined (CONFIG_VIDEO_ET8EK8_MODULE) ++ { ++ I2C_BOARD_INFO(ET8EK8_NAME, ET8EK8_I2C_ADDR), ++ .platform_data = &rx51_et8ek8_platform_data, ++ }, ++#endif ++#if defined (CONFIG_VIDEO_AD5820) || defined (CONFIG_VIDEO_AD5820_MODULE) ++ { ++ I2C_BOARD_INFO(AD5820_NAME, AD5820_I2C_ADDR), ++ .platform_data = &rx51_ad5820_platform_data, ++ }, ++#endif ++#endif ++}; ++ ++static int __init rx51_camera_base_init(void) ++{ ++ int err; ++ ++ if (!(machine_is_nokia_rx51() || machine_is_nokia_rx71())) ++ return 0; ++ ++ /* I2C */ ++ err = i2c_register_board_info(2, rx51_camera_board_info_2, ++ ARRAY_SIZE(rx51_camera_board_info_2)); ++ if (err) { ++ printk(KERN_ERR ++ "%s: failed to register rx51_camera_board_info_2\n", ++ __func__); ++ return err; ++ } ++ err = i2c_register_board_info(3, rx51_camera_board_info_3, ++ ARRAY_SIZE(rx51_camera_board_info_3)); ++ if (err) { ++ printk(KERN_ERR ++ "%s: failed to register rx51_camera_board_info_3\n", ++ __func__); ++ return err; ++ } ++ ++ return 0; ++} ++ ++arch_initcall(rx51_camera_base_init); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-camera.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-camera.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-camera.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-camera.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,754 @@ ++/* ++ * arch/arm/mach-omap2/board-rx51-camera.c ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * ++ * Contact: Sakari Ailus ++ * Tuukka Toivonen ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "../../../drivers/media/video/omap34xxcam.h" ++#include "../../../drivers/media/video/isp/ispreg.h" ++#include "../../../drivers/media/video/et8ek8.h" ++#include "../../../drivers/media/video/smia-sensor.h" ++ ++#include ++#include ++#include ++ ++#include "board-rx51-camera.h" ++ ++#define ADP1653_GPIO_ENABLE 88 /* Used for resetting ADP1653 */ ++#define ADP1653_GPIO_INT 167 /* Fault interrupt */ ++#define ADP1653_GPIO_STROBE 126 /* Pin used in cam_strobe mode -> ++ * control using ISP drivers */ ++ ++#define STINGRAY_RESET_GPIO 102 ++#define ACMELITE_RESET_GPIO 97 /* Used also to MUX between cameras */ ++ ++#define RX51_CAMERA_STINGRAY 0 ++#define RX51_CAMERA_ACMELITE 1 ++ ++#define RX51_SENSOR 1 ++#define RX51_LENS 2 ++ ++#define GPIO_DIR_OUTPUT 0 ++ ++/* ++ * ++ * Power control ++ * ++ */ ++ ++/* Assign camera to peripheral power group P3 */ ++#define CAMERA_DEV_GRP (0x4 << 5) ++#define VAUX2_1V8 0x05 ++#define VAUX3_1V8 0x01 ++#define VAUX4_2V8 0x09 ++ ++/* Earlier rx51 builds require VAUX3. */ ++#define NEEDS_VAUX3 (system_rev >= 0x100 && system_rev < 0x900) ++ ++static struct rx51_camera { ++ int okay; ++ int inuse; ++} rx51_camera[2]; ++ ++static DEFINE_MUTEX(rx51_camera_mutex); ++ ++/* Acquires the given slave `which' for camera if possible. ++ * Returns the bitmask containing previously acquired slaves for the device. ++ */ ++static int rx51_camera_acquire(int camera, int which) ++{ ++ int other = 1 - camera; ++ int old_which; ++ ++ if (!rx51_camera[camera].okay) ++ return -EINVAL; ++ ++ if (rx51_camera[other].inuse) ++ return -EBUSY; ++ ++ old_which = rx51_camera[camera].inuse; ++ rx51_camera[camera].inuse |= which; ++ ++ return old_which; ++} ++ ++/* Releases the given slave `which' for camera. ++ * Returns the bitmask containing still acquired slaves for the device. ++ */ ++static int rx51_camera_release(int camera, int which) ++{ ++ rx51_camera[camera].inuse &= ~which; ++ ++ return rx51_camera[camera].inuse; ++} ++ ++static int rx51_camera_power_on_nolock(int camera) ++{ ++ int rval; ++ ++ /* Reset Stingray */ ++ gpio_set_value(STINGRAY_RESET_GPIO, 0); ++ ++ /* Mux to Stingray and reset Acme Lite */ ++ gpio_set_value(ACMELITE_RESET_GPIO, 0); ++ ++ /* VAUX2=1.8 V (muxer voltage) */ ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ VAUX2_1V8, TWL4030_VAUX2_DEDICATED); ++ if (rval) ++ goto out; ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ CAMERA_DEV_GRP, TWL4030_VAUX2_DEV_GRP); ++ if (rval) ++ goto out; ++ ++ /* Off & sleep -> Active state */ ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0xEE, TWL4030_VAUX2_REMAP); ++ if (rval) ++ goto out; ++ ++ /* VAUX4=2.8 V (camera VANA) */ ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ VAUX4_2V8, TWL4030_VAUX4_DEDICATED); ++ if (rval) ++ goto out; ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ CAMERA_DEV_GRP, TWL4030_VAUX4_DEV_GRP); ++ if (rval) ++ goto out; ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0xEE, TWL4030_VAUX4_REMAP); ++ if (rval) ++ goto out; ++ ++ if (NEEDS_VAUX3) { ++ /* VAUX3=1.8 V (camera VDIG) */ ++ printk(KERN_INFO "%s: VAUX3 on for old board\n", __func__); ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ VAUX3_1V8, ++ TWL4030_VAUX3_DEDICATED); ++ if (rval) ++ goto out; ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ CAMERA_DEV_GRP, ++ TWL4030_VAUX3_DEV_GRP); ++ if (rval) ++ goto out; ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0xEE, TWL4030_VAUX3_REMAP); ++ if (rval) ++ goto out; ++ } ++ ++ /* Let the voltages stabilize */ ++ udelay(15); ++ ++ /* XSHUTDOWN on, enable camera and set muxer */ ++ gpio_set_value(camera == RX51_CAMERA_STINGRAY ? ++ STINGRAY_RESET_GPIO : ACMELITE_RESET_GPIO, 1); ++ ++ /* CONTROL_CSIRXFE */ ++ omap_writel( ++ /* ++ * CSIb receiver data/clock or data/strobe mode ++ * ++ * Stingray uses data/strobe. ++ */ ++ ((camera ? 0 : 1) << 10) ++ | BIT(12) /* Enable differential transceiver */ ++ | BIT(13) /* Disable reset */ ++ , OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE); ++ ++ /* Let the voltages stabilize */ ++ udelay(15); ++ ++ return 0; ++ ++out: ++ printk(KERN_ALERT "%s: Error %d in writing to TWL4030!\n", __func__, ++ rval); ++ ++ return rval; ++} ++ ++static int rx51_camera_power_on(int camera, int which) ++{ ++ int rval; ++ ++ mutex_lock(&rx51_camera_mutex); ++ ++ rval = rx51_camera_acquire(camera, which); ++ ++ if (!rval) ++ rval = rx51_camera_power_on_nolock(camera); ++ else if (rval > 0) ++ rval = 0; ++ ++ mutex_unlock(&rx51_camera_mutex); ++ ++ if (rval < 0) ++ printk(KERN_INFO "%s: power_on camera %d which %d failed\n", ++ __func__, camera, which); ++ ++ return rval; ++} ++ ++static void rx51_camera_power_off_nolock(int camera) ++{ ++ int rval; ++ ++ /* Reset cameras */ ++ gpio_set_value(STINGRAY_RESET_GPIO, 0); ++ gpio_set_value(ACMELITE_RESET_GPIO, 0); ++ ++ /* VAUX2 (muxer voltage) off */ ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0, TWL4030_VAUX2_DEV_GRP); ++ if (rval) ++ goto out; ++ /* Off & sleep -> Off state */ ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0x00, TWL4030_VAUX2_REMAP); ++ if (rval) ++ goto out; ++ ++ /* VAUX4 (camera VANA) off */ ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0, TWL4030_VAUX4_DEV_GRP); ++ if (rval) ++ goto out; ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0x00, TWL4030_VAUX4_REMAP); ++ if (rval) ++ goto out; ++ ++ if (NEEDS_VAUX3) { ++ printk(KERN_INFO "%s: VAUX3 off for old board\n", __func__); ++ /* VAUX3 (camera VDIG) off */ ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0, TWL4030_VAUX3_DEV_GRP); ++ if (rval) ++ goto out; ++ rval = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ 0x00, TWL4030_VAUX3_REMAP); ++ if (rval) ++ goto out; ++ } ++ ++ return; ++ ++out: ++ printk(KERN_ALERT "%s: Error %d in writing to TWL4030!\n", __func__, ++ rval); ++} ++ ++static void rx51_camera_power_off(int camera, int which) ++{ ++ int rval; ++ ++ mutex_lock(&rx51_camera_mutex); ++ ++ rval = rx51_camera_release(camera, which); ++ if (!rval) ++ rx51_camera_power_off_nolock(camera); ++ ++ mutex_unlock(&rx51_camera_mutex); ++} ++ ++static void __init rx51_stingray_init(void) ++{ ++ if (gpio_request(STINGRAY_RESET_GPIO, "stingray reset") != 0) { ++ printk(KERN_INFO "%s: unable to acquire Stingray reset gpio\n", ++ __FUNCTION__); ++ return; ++ } ++ ++ /* XSHUTDOWN off, reset */ ++ gpio_direction_output(STINGRAY_RESET_GPIO, 0); ++ rx51_camera_power_off_nolock(RX51_CAMERA_STINGRAY); ++ rx51_camera[RX51_CAMERA_STINGRAY].okay = 1; ++ rx51_camera[RX51_CAMERA_STINGRAY].inuse = 0; ++} ++ ++static void __init rx51_acmelite_init(void) ++{ ++ if (gpio_request(ACMELITE_RESET_GPIO, "acmelite reset") != 0) { ++ printk(KERN_INFO "%s: unable to acquire Acme Lite reset gpio\n", ++ __FUNCTION__); ++ return; ++ } ++ ++ /* XSHUTDOWN off, reset */ ++ gpio_direction_output(ACMELITE_RESET_GPIO, 0); ++ rx51_camera_power_off_nolock(RX51_CAMERA_ACMELITE); ++ rx51_camera[RX51_CAMERA_ACMELITE].okay = 1; ++ rx51_camera[RX51_CAMERA_ACMELITE].inuse = 0; ++} ++ ++static int __init rx51_adp1653_init(void) ++{ ++ int err; ++ ++ err = gpio_request(ADP1653_GPIO_ENABLE, "adp1653 enable"); ++ if (err) { ++ printk(KERN_ERR ADP1653_NAME ++ " Failed to request EN gpio\n"); ++ err = -ENODEV; ++ goto err_omap_request_gpio; ++ } ++ ++ err = gpio_request(ADP1653_GPIO_INT, "adp1653 interrupt"); ++ if (err) { ++ printk(KERN_ERR ADP1653_NAME " Failed to request IRQ gpio\n"); ++ err = -ENODEV; ++ goto err_omap_request_gpio_2; ++ } ++ ++ err = gpio_request(ADP1653_GPIO_STROBE, "adp1653 strobe"); ++ if (err) { ++ printk(KERN_ERR ADP1653_NAME ++ " Failed to request STROBE gpio\n"); ++ err = -ENODEV; ++ goto err_omap_request_gpio_3; ++ } ++ ++ gpio_direction_output(ADP1653_GPIO_ENABLE, 0); ++ gpio_direction_input(ADP1653_GPIO_INT); ++ gpio_direction_output(ADP1653_GPIO_STROBE, 0); ++ ++ return 0; ++ ++err_omap_request_gpio_3: ++ gpio_free(ADP1653_GPIO_INT); ++ ++err_omap_request_gpio_2: ++ gpio_free(ADP1653_GPIO_ENABLE); ++ ++err_omap_request_gpio: ++ return err; ++} ++ ++static int __init rx51_camera_hw_init(void) ++{ ++ int rval; ++ ++ rval = rx51_adp1653_init(); ++ if (rval) ++ return rval; ++ ++ mutex_init(&rx51_camera_mutex); ++ rx51_stingray_init(); ++ rx51_acmelite_init(); ++ ++ return 0; ++} ++ ++/* ++ * ++ * Stingray ++ * ++ */ ++ ++#define STINGRAY_XCLK OMAP34XXCAM_XCLK_A ++ ++static struct isp_interface_config rx51_stingray_config = { ++ .ccdc_par_ser = ISP_CSIB, ++ .dataline_shift = 0, ++ .hsvs_syncdetect = ISPCTRL_SYNC_DETECT_VSRISE, ++ .strobe = 0, ++ .prestrobe = 0, ++ .shutter = 0, ++ .wait_hs_vs = 1, ++ .u = { ++ .csi = { ++ .crc = 1, ++ .mode = 0, ++ .edge = 1, ++ .signalling = 1, ++ .strobe_clock_inv = 0, ++ .vs_edge = 0, ++ .channel = 0, ++ .vpclk = 1, ++ .data_start = 0, ++ }, ++ }, ++}; ++ ++static struct omap34xxcam_hw_config rx51_stingray_omap34xxcam_hw_config = { ++ .dev_index = 0, ++ .dev_minor = 0, ++ .dev_type = OMAP34XXCAM_SLAVE_SENSOR, ++ .u = { ++ .sensor = { ++ .sensor_isp = 0, ++ .capture_mem = PAGE_ALIGN(2608 * 1966 * 2) * 2, ++ .ival_default = { 1, 30 }, ++ }, ++ }, ++}; ++ ++static int rx51_stingray_configure_interface(struct v4l2_int_device *s, ++ struct smia_mode *mode) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ static const int S = 8; ++ ++ /* Configure sensor interface. */ ++ rx51_stingray_config.u.csi.format = mode->pixel_format; ++ rx51_stingray_config.u.csi.data_size = mode->window_height; ++ /* Calculate average pixel clock per line. Assume buffers can spread ++ * the data over horizontal blanking time. Rounding upwards. */ ++ rx51_stingray_config.pixelclk = ++ mode->window_width ++ * (((mode->pixel_clock + (1<> S) + mode->width - 1) ++ / mode->width; ++ rx51_stingray_config.pixelclk <<= S; ++ return isp_configure_interface(vdev->cam->isp, &rx51_stingray_config); ++} ++ ++static int rx51_stingray_set_xclk(struct v4l2_int_device *s, int hz) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ ++ isp_set_xclk(vdev->cam->isp, hz, STINGRAY_XCLK); ++ ++ return 0; ++} ++ ++static int rx51_stingray_power_on(struct v4l2_int_device *s) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ struct device *dev = vdev->cam->isp; ++ int rval; ++ ++ omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, 400000); ++ ++ rval = rx51_camera_power_on(RX51_CAMERA_STINGRAY, RX51_SENSOR); ++ ++ if (rval) ++ omap_pm_set_min_bus_tput(dev, 2, 0); ++ ++ return rval; ++} ++ ++static int rx51_stingray_power_off(struct v4l2_int_device *s) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ struct device *dev = vdev->cam->isp; ++ ++ rx51_camera_power_off(RX51_CAMERA_STINGRAY, RX51_SENSOR); ++ ++ omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, 0); ++ ++ return 0; ++} ++ ++static int rx51_stingray_g_priv(struct v4l2_int_device *s, void *priv) ++{ ++ *(struct omap34xxcam_hw_config *)priv = ++ rx51_stingray_omap34xxcam_hw_config; ++ ++ return 0; ++} ++ ++static struct et8ek8_platform_data et8ek8_tmp_platform_data; ++ ++static struct et8ek8_platform_data et8ek8_my_platform_data = { ++ .g_priv = rx51_stingray_g_priv, ++ .configure_interface = rx51_stingray_configure_interface, ++ .set_xclk = rx51_stingray_set_xclk, ++ .power_on = rx51_stingray_power_on, ++ .power_off = rx51_stingray_power_off, ++}; ++ ++/* ++ * ++ * AD5820 ++ * ++ */ ++ ++static struct omap34xxcam_hw_config ad5820_omap34xxcam_hw_config = { ++ .dev_index = 0, ++ .dev_minor = 0, ++ .dev_type = OMAP34XXCAM_SLAVE_LENS, ++ .u = { ++ .lens = { ++ }, ++ }, ++}; ++ ++static int ad5820_g_priv(struct v4l2_int_device *s, void *priv) ++{ ++ *(struct omap34xxcam_hw_config *)priv = ad5820_omap34xxcam_hw_config; ++ ++ return 0; ++} ++ ++static int ad5820_s_power(struct v4l2_int_device *s, enum v4l2_power state) ++{ ++ if (state == V4L2_POWER_ON) ++ return rx51_camera_power_on(RX51_CAMERA_STINGRAY, RX51_LENS); ++ else ++ rx51_camera_power_off(RX51_CAMERA_STINGRAY, RX51_LENS); ++ ++ return 0; ++} ++ ++static struct ad5820_platform_data ad5820_tmp_platform_data; ++ ++static struct ad5820_platform_data ad5820_my_platform_data = { ++ .g_priv = ad5820_g_priv, ++ .s_power = ad5820_s_power, ++ ++}; ++ ++/* ++ * ++ * ADP1653 ++ * ++ */ ++ ++static struct omap34xxcam_hw_config adp1653_omap34xxcam_hw_config = { ++ .dev_index = 0, ++ .dev_minor = 0, ++ .dev_type = OMAP34XXCAM_SLAVE_FLASH, ++ .u = { ++ .flash = { ++ }, ++ }, ++}; ++ ++static int rx51_adp1653_g_priv(struct v4l2_int_device *s, void *priv) ++{ ++ *(struct omap34xxcam_hw_config *)priv = adp1653_omap34xxcam_hw_config; ++ ++ return 0; ++} ++ ++static int rx51_adp1653_power_on(struct v4l2_int_device *s) ++{ ++ gpio_set_value(ADP1653_GPIO_ENABLE, 1); ++ ++ /* Some delay is apparently required. */ ++ udelay(20); ++ ++ return 0; ++} ++ ++static int rx51_adp1653_power_off(struct v4l2_int_device *s) ++{ ++ gpio_set_value(ADP1653_GPIO_ENABLE, 0); ++ ++ return 0; ++} ++ ++static struct adp1653_platform_data adp1653_tmp_platform_data; ++ ++static struct adp1653_platform_data adp1653_my_platform_data = { ++ .g_priv = rx51_adp1653_g_priv, ++ .power_on = rx51_adp1653_power_on, ++ .power_off = rx51_adp1653_power_off, ++ /* Must be limited to 500 ms in RX-51 */ ++ .max_flash_timeout = 500000, /* us */ ++ /* Must be limited to 320 mA in RX-51 B3 and newer hardware */ ++ .max_flash_intensity = 19, ++ /* Must be limited to 50 mA in RX-51 */ ++ .max_torch_intensity = 1, ++ .max_indicator_intensity = ADP1653_REG_OUT_SEL_ILED_MAX, ++}; ++ ++/* ++ * ++ * Acmelite ++ * ++ */ ++ ++#define ACMELITE_XCLK OMAP34XXCAM_XCLK_A ++ ++static struct isp_interface_config rx51_acmelite_config = { ++ .ccdc_par_ser = ISP_CSIB, ++ .dataline_shift = 0, ++ .hsvs_syncdetect = ISPCTRL_SYNC_DETECT_VSRISE, ++ .strobe = 0, ++ .prestrobe = 0, ++ .shutter = 0, ++ .wait_hs_vs = 1, ++ .u = { ++ .csi = { ++ .crc = 0, ++ .mode = 0, ++ .edge = 1, ++ .signalling = 0, ++ .strobe_clock_inv = 0, ++ .vs_edge = 0, ++ .channel = 0, ++ .vpclk = 2, ++ .data_start = 4, ++ .format = V4L2_PIX_FMT_SGRBG10, ++ }, ++ }, ++}; ++ ++static struct omap34xxcam_hw_config rx51_acmelite_omap34xxcam_hw_config = { ++ .dev_index = 1, ++ .dev_minor = 1, ++ .dev_type = OMAP34XXCAM_SLAVE_SENSOR, ++ .u = { ++ .sensor = { ++ .sensor_isp = 0, ++ .capture_mem = PAGE_ALIGN(648 * 488 * 2) * 4, ++ .ival_default = { 1, 30 }, ++ }, ++ }, ++}; ++ ++static int rx51_acmelite_configure_interface(struct v4l2_int_device *s, ++ int width, int height) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ ++ /* Configure sensor interface. */ ++ rx51_acmelite_config.u.csi.data_size = height; ++ ++ return isp_configure_interface(vdev->cam->isp, &rx51_acmelite_config); ++} ++ ++static int rx51_acmelite_set_xclk(struct v4l2_int_device *s, int hz) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ ++ isp_set_xclk(vdev->cam->isp, hz, ACMELITE_XCLK); ++ ++ return 0; ++} ++ ++static int rx51_acmelite_power_on(struct v4l2_int_device *s) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ struct device *dev = vdev->cam->isp; ++ int rval; ++ ++ omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, 400000); ++ ++ rval = rx51_camera_power_on(RX51_CAMERA_ACMELITE, RX51_SENSOR); ++ ++ if (rval) ++ omap_pm_set_min_bus_tput(dev, 2, 0); ++ ++ return rval; ++} ++ ++static int rx51_acmelite_power_off(struct v4l2_int_device *s) ++{ ++ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; ++ struct device *dev = vdev->cam->isp; ++ ++ rx51_camera_power_off(RX51_CAMERA_ACMELITE, RX51_SENSOR); ++ ++ omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, 0); ++ ++ return 0; ++} ++ ++static int rx51_acmelite_g_priv(struct v4l2_int_device *s, void *priv) ++{ ++ *(struct omap34xxcam_hw_config *)priv = ++ rx51_acmelite_omap34xxcam_hw_config; ++ ++ return 0; ++} ++ ++static struct smia_sensor_platform_data smia_sensor_tmp_platform_data; ++ ++static struct smia_sensor_platform_data smia_sensor_my_platform_data = { ++ .g_priv = rx51_acmelite_g_priv, ++ .configure_interface = rx51_acmelite_configure_interface, ++ .set_xclk = rx51_acmelite_set_xclk, ++ .power_on = rx51_acmelite_power_on, ++ .power_off = rx51_acmelite_power_off, ++}; ++ ++/* ++ * ++ * Init it all ++ * ++ */ ++ ++static int __init rx51_camera_init(void) { ++ int err; ++ ++ err = rx51_camera_hw_init(); ++ if (err) ++ return err; ++ ++ /* ADP1653 */ ++ adp1653_tmp_platform_data = rx51_adp1653_platform_data; ++ rx51_adp1653_platform_data = adp1653_my_platform_data; ++ ++ /* Stingray */ ++ et8ek8_tmp_platform_data = rx51_et8ek8_platform_data; ++ rx51_et8ek8_platform_data = et8ek8_my_platform_data; ++ ++ /* AD5820 */ ++ ad5820_tmp_platform_data = rx51_ad5820_platform_data; ++ rx51_ad5820_platform_data = ad5820_my_platform_data; ++ ++ /* Acmelite */ ++ smia_sensor_tmp_platform_data = rx51_smia_sensor_platform_data; ++ rx51_smia_sensor_platform_data = smia_sensor_my_platform_data; ++ ++ /* FIXME: can't unload yet. */ ++ __module_get(THIS_MODULE); ++ ++ /* FIXME: somehow initiate device init. */ ++ v4l2_int_device_try_attach_all(); ++ ++ return 0; ++} ++ ++static void __exit rx51_camera_exit(void) { ++ rx51_et8ek8_platform_data = et8ek8_tmp_platform_data; ++ rx51_ad5820_platform_data = ad5820_tmp_platform_data; ++ rx51_adp1653_platform_data = adp1653_tmp_platform_data; ++ rx51_smia_sensor_platform_data = smia_sensor_tmp_platform_data; ++ ++ gpio_free(ADP1653_GPIO_ENABLE); ++ gpio_free(ADP1653_GPIO_INT); ++ gpio_free(ADP1653_GPIO_STROBE); ++} ++ ++module_init(rx51_camera_init); ++module_exit(rx51_camera_exit); ++ ++MODULE_LICENSE("GPL"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-camera.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-camera.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-camera.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-camera.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,33 @@ ++/* ++ * arch/arm/mach-omap2/board-rx51-camera.h ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * ++ * Contact: Sakari Ailus ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ * ++ */ ++ ++#ifndef BOARD_RX51_CAMERA_H ++#define BOARD_RX51_CAMERA_H ++ ++extern struct et8ek8_platform_data rx51_et8ek8_platform_data; ++extern struct mt9p012_platform_data rx51_mt9p012_platform_data; ++extern struct ad5820_platform_data rx51_ad5820_platform_data; ++extern struct adp1653_platform_data rx51_adp1653_platform_data; ++extern struct smia_sensor_platform_data rx51_smia_sensor_platform_data; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-flash.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-flash.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-flash.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-flash.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,100 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx51-flash.c ++ * ++ * Copyright (C) 2008 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++ ++#include "mmc-twl4030.h" ++ ++#define RX51_FLASH_CS 0 ++#define VAUX3_DEV_GRP 0x1F ++#define SYSTEM_REV_B_USES_VAUX3 0x1699 ++#define SYSTEM_REV_S_USES_VAUX3 0x7 ++ ++extern struct mtd_partition n800_partitions[ONENAND_MAX_PARTITIONS]; ++extern int n800_onenand_setup(void __iomem *onenand_base, int freq); ++extern void __init n800_flash_init(void); ++ ++static struct flash_platform_data rx51_flash_data = { ++ .map_name = "cfi_probe", ++ .width = 2, ++ .parts = n800_partitions, ++ .nr_parts = ARRAY_SIZE(n800_partitions), ++}; ++ ++static struct resource rx51_flash_resource = { ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct platform_device rx51_flash_device = { ++ .name = "omapflash", ++ .id = 0, ++ .dev = { ++ .platform_data = &rx51_flash_data, ++ }, ++ .num_resources = 1, ++ .resource = &rx51_flash_resource, ++}; ++ ++static struct platform_device *rx51_flash_devices[] = { ++ &rx51_flash_device, ++}; ++ ++static struct twl4030_hsmmc_info mmc[] __initdata = { ++ { ++ .name = "external", ++ .mmc = 1, ++ .wires = 4, ++ .cover_only = true, ++ .gpio_cd = 160, ++ .gpio_wp = -EINVAL, ++ .power_saving = true, ++ .caps = MMC_CAP_SD_ONLY, ++ }, ++ { ++ .name = "internal", ++ .mmc = 2, ++ .wires = 8, ++ .gpio_cd = -EINVAL, ++ .gpio_wp = -EINVAL, ++ .vsim_18v = true, ++ .power_saving = true, ++ .caps = MMC_CAP_MMC_ONLY | MMC_CAP_NONREMOVABLE, ++ }, ++ {} /* Terminator */ ++}; ++ ++static int __init rx51_flash_init(void) ++{ ++ if (!(machine_is_nokia_rx51() || machine_is_nokia_rx71())) ++ return 0; ++ ++ if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) || ++ system_rev >= SYSTEM_REV_B_USES_VAUX3) ++ mmc[1].vmmc_dev_grp = VAUX3_DEV_GRP; ++ else ++ mmc[1].power_saving = false; ++ ++ platform_add_devices(rx51_flash_devices, ARRAY_SIZE(rx51_flash_devices)); ++ n800_flash_init(); ++ twl4030_mmc_init(mmc); ++ return 0; ++} ++ ++subsys_initcall(rx51_flash_init); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-network.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-network.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-network.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-network.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,24 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx51-network.c ++ * ++ * Copyright (C) 2008 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++ ++#include ++ ++static int __init rx51_network_init(void) ++{ ++ if (!(machine_is_nokia_rx51() || machine_is_nokia_rx71())) ++ return 0; ++ ++ return 0; ++} ++ ++subsys_initcall(rx51_network_init); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-peripherals.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-peripherals.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-peripherals.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-peripherals.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,632 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx51-flash.c ++ * ++ * Copyright (C) 2008 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../../drivers/input/lirc/lirc_rx51.h" ++ ++#define RX51_DEBUG_BASE 0x08000000 /* debug board */ ++#define RX51_ETHR_START RX51_DEBUG_BASE ++#define RX51_ETHR_GPIO_IRQ 54 ++ ++#define RX51_TSC2005_RESET_GPIO 104 ++#define RX51_TSC2005_IRQ_GPIO 100 ++#define RX51_LP5523_IRQ_GPIO 55 ++#define RX51_LP5523_CHIP_EN_GPIO 41 ++ ++#define RX51_SMC91X_CS 1 ++ ++#define RX51_WL12XX_POWER_GPIO 87 ++#define RX51_WL12XX_IRQ_GPIO 42 ++ ++static void rx51_wl12xx_set_power(bool enable); ++static void rx51_tsc2005_set_reset(bool enable); ++ ++static struct resource rx51_smc91x_resources[] = { ++ [0] = { ++ .start = RX51_ETHR_START, ++ .end = RX51_ETHR_START + SZ_4K, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = OMAP_GPIO_IRQ(RX51_ETHR_GPIO_IRQ), ++ .end = 0, ++ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, ++ }, ++}; ++ ++static struct platform_device rx51_smc91x_device = { ++ .name = "smc91x", ++ .id = -1, ++ .num_resources = ARRAY_SIZE(rx51_smc91x_resources), ++ .resource = rx51_smc91x_resources, ++}; ++ ++static struct tsc2005_platform_data tsc2005_config = { ++ .ts_x_plate_ohm = 280, ++ .ts_hw_avg = 0, ++ .ts_touch_pressure = 1500, ++ .ts_stab_time = 1000, ++ .ts_pressure_max = 2048, ++ .ts_pressure_fudge = 2, ++ .ts_x_max = 4096, ++ .ts_x_fudge = 4, ++ .ts_y_max = 4096, ++ .ts_y_fudge = 7, ++ ++ .esd_timeout = 8*1000, /* ms of inactivity before we check */ ++ ++ .set_reset = NULL, ++}; ++ ++static struct lis302dl_platform_data rx51_lis302dl_data = { ++ .int1_gpio = 181, ++ .int2_gpio = 180, ++}; ++ ++static struct lp5523_led_config rx51_lp5523_led_config[] = { ++ { ++ .name = "kb1", ++ .led_nr = 0, ++ .led_current = 50, ++ }, { ++ .name = "kb2", ++ .led_nr = 1, ++ .led_current = 50, ++ }, { ++ .name = "kb3", ++ .led_nr = 2, ++ .led_current = 50, ++ }, { ++ .name = "kb4", ++ .led_nr = 3, ++ .led_current = 50, ++ }, { ++ .name = "b", ++ .led_nr = 4, ++ .led_current = 50, ++ }, { ++ .name = "g", ++ .led_nr = 5, ++ .led_current = 50, ++ }, { ++ .name = "r", ++ .led_nr = 6, ++ .led_current = 50, ++ }, { ++ .name = "kb5", ++ .led_nr = 7, ++ .led_current = 50, ++ }, { ++ .name = "kb6", ++ .led_nr = 8, ++ .led_current = 50, ++ } ++}; ++ ++static struct lp5523_platform_data rx51_lp5523_platform_data = { ++ .led_config = rx51_lp5523_led_config, ++ .num_leds = 9, ++ .irq = OMAP_GPIO_IRQ(RX51_LP5523_IRQ_GPIO), ++ .chip_en = RX51_LP5523_CHIP_EN_GPIO, ++}; ++ ++static struct tsl2563_platform_data rx51_tsl2563_platform_data = { ++ .cover_comp_gain = 16, ++}; ++ ++static struct wl12xx_platform_data wl12xx_pdata = { ++ .set_power = rx51_wl12xx_set_power, ++}; ++ ++static struct omap2_mcspi_device_config tsc2005_mcspi_config = { ++ .turbo_mode = 0, ++ .single_channel = 1, ++}; ++ ++static struct omap2_mcspi_device_config wl12xx_mcspi_config = { ++ .turbo_mode = 0, ++ .single_channel = 1, ++}; ++ ++static struct spi_board_info rx51_peripherals_spi_board_info[] = { ++ [0] = { ++ .modalias = "tsc2005", ++ .bus_num = 1, ++ .chip_select = 0, ++ .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO), ++ .max_speed_hz = 6000000, ++ .controller_data = &tsc2005_mcspi_config, ++ .platform_data = &tsc2005_config, ++ }, ++ [1] = { ++ .modalias = "wl12xx", ++ .bus_num = 4, ++ .chip_select = 0, ++ .max_speed_hz = 48000000, ++ .mode = SPI_MODE_3, ++ .controller_data = &wl12xx_mcspi_config, ++ .platform_data = &wl12xx_pdata, ++ }, ++}; ++ ++static int rx51_keymap[] = { ++ KEY(0, 0, KEY_Q), ++ KEY(0, 1, KEY_W), ++ KEY(0, 2, KEY_E), ++ KEY(0, 3, KEY_R), ++ KEY(0, 4, KEY_T), ++ KEY(0, 5, KEY_Y), ++ KEY(0, 6, KEY_U), ++ KEY(0, 7, KEY_I), ++ KEY(1, 0, KEY_O), ++ KEY(1, 1, KEY_D), ++ KEY(1, 2, KEY_DOT), ++ KEY(1, 3, KEY_V), ++ KEY(1, 4, KEY_DOWN), ++ KEY(1, 7, KEY_F7), ++ KEY(2, 0, KEY_P), ++ KEY(2, 1, KEY_F), ++ KEY(2, 2, KEY_UP), ++ KEY(2, 3, KEY_B), ++ KEY(2, 4, KEY_RIGHT), ++ KEY(2, 7, KEY_F8), ++ KEY(3, 0, KEY_COMMA), ++ KEY(3, 1, KEY_G), ++ KEY(3, 2, KEY_ENTER), ++ KEY(3, 3, KEY_N), ++ KEY(4, 0, KEY_BACKSPACE), ++ KEY(4, 1, KEY_H), ++ KEY(4, 3, KEY_M), ++ KEY(4, 4, KEY_LEFTCTRL), ++ KEY(5, 1, KEY_J), ++ KEY(5, 2, KEY_Z), ++ KEY(5, 3, KEY_SPACE), ++ KEY(5, 4, KEY_RIGHTALT), ++ KEY(6, 0, KEY_A), ++ KEY(6, 1, KEY_K), ++ KEY(6, 2, KEY_X), ++ KEY(6, 3, KEY_SPACE), ++ KEY(6, 4, KEY_LEFTSHIFT), ++ KEY(7, 0, KEY_S), ++ KEY(7, 1, KEY_L), ++ KEY(7, 2, KEY_C), ++ KEY(7, 3, KEY_LEFT), ++ KEY(0xff, 2, KEY_F9), ++ KEY(0xff, 4, KEY_F10), ++ KEY(0xff, 5, KEY_F11), ++}; ++ ++static struct twl4030_keypad_data rx51_kp_data = { ++ .rows = 8, ++ .cols = 8, ++ .keymap = rx51_keymap, ++ .keymapsize = ARRAY_SIZE(rx51_keymap), ++ .rep = 1, ++}; ++ ++static struct camera_button_platform_data rx51_cam_button_data = { ++ .shutter = 69, ++ .focus = 68, ++}; ++ ++static struct platform_device rx51_camera_button_device = { ++ .name = "camera_button", ++ .id = -1, ++ .dev = { ++ .platform_data = &rx51_cam_button_data, ++ }, ++}; ++ ++static struct lirc_rx51_platform_data rx51_lirc_data = { ++ .set_max_mpu_wakeup_lat = omap_pm_set_max_mpu_wakeup_lat, ++ .pwm_timer = 9, /* Use GPT 9 for CIR */ ++}; ++ ++static struct platform_device rx51_lirc_device = { ++ .name = "lirc_rx51", ++ .id = -1, ++ .dev = { ++ .platform_data = &rx51_lirc_data, ++ }, ++}; ++ ++static struct platform_device *rx51_peripherals_devices[] = { ++ &rx51_smc91x_device, ++ &rx51_camera_button_device, ++ &rx51_lirc_device, ++}; ++ ++static void __init rx51_init_smc91x(void) ++{ ++ int eth_cs; ++ unsigned long cs_mem_base; ++ unsigned int rate; ++ struct clk *l3ck; ++ ++ eth_cs = RX51_SMC91X_CS; ++ ++ l3ck = clk_get(NULL, "core_l3_ck"); ++ if (IS_ERR(l3ck)) ++ rate = 100000000; ++ else ++ rate = clk_get_rate(l3ck); ++ ++ if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) { ++ printk(KERN_ERR "Failed to request GPMC mem for smc91x\n"); ++ return; ++ } ++ ++ rx51_smc91x_resources[0].start = cs_mem_base + 0x0; ++ rx51_smc91x_resources[0].end = cs_mem_base + 0xf; ++ udelay(100); ++ ++ if (gpio_request(RX51_ETHR_GPIO_IRQ, "SMC91X irq") < 0) { ++ printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n", ++ RX51_ETHR_GPIO_IRQ); ++ return; ++ } ++ gpio_direction_input(RX51_ETHR_GPIO_IRQ); ++} ++ ++static void __init rx51_init_lp5523(void) ++{ ++ int r; ++ ++ r = gpio_request(RX51_LP5523_IRQ_GPIO, "lp5523 IRQ"); ++ if (r >= 0) ++ gpio_direction_input(RX51_LP5523_IRQ_GPIO); ++ else ++ printk(KERN_ERR "unable to get lp5523 IRQ GPIO\n"); ++ ++ r = gpio_request(RX51_LP5523_CHIP_EN_GPIO, "lp5523 CHIP EN"); ++ ++ if (r >= 0) { ++ r = gpio_direction_output(RX51_LP5523_CHIP_EN_GPIO, 1); ++ if (r < 0) ++ printk(KERN_ERR "unable to set lp5523 CHIP EN GPIO to output\n"); ++ } else { ++ printk(KERN_ERR "unable to get lp5523 CHIP EN GPIO\n"); ++ } ++} ++ ++static void __init rx51_init_tsc2005(void) ++{ ++ int r; ++ ++ r = gpio_request(RX51_TSC2005_IRQ_GPIO, "tsc2005 DAV IRQ"); ++ if (r >= 0) ++ gpio_direction_input(RX51_TSC2005_IRQ_GPIO); ++ else ++ printk(KERN_ERR "unable to get DAV GPIO\n"); ++ ++ r = gpio_request(RX51_TSC2005_RESET_GPIO, "tsc2005 reset"); ++ if (r >= 0) { ++ gpio_direction_output(RX51_TSC2005_RESET_GPIO, 1); ++ tsc2005_config.set_reset = rx51_tsc2005_set_reset; ++ } else { ++ printk(KERN_ERR "unable to get tsc2005 reset GPIO\n"); ++ tsc2005_config.esd_timeout = 0; ++ } ++} ++ ++static void rx51_tsc2005_set_reset(bool enable) ++{ ++ gpio_set_value(RX51_TSC2005_RESET_GPIO, enable); ++} ++ ++static void rx51_wl12xx_set_power(bool enable) ++{ ++ gpio_set_value(RX51_WL12XX_POWER_GPIO, enable); ++} ++ ++static void __init rx51_init_wl12xx(void) ++{ ++ int irq, ret; ++ ++ ret = gpio_request(RX51_WL12XX_POWER_GPIO, "wl12xx power"); ++ if (ret < 0) ++ goto error; ++ ++ ret = gpio_direction_output(RX51_WL12XX_POWER_GPIO, 0); ++ if (ret < 0) ++ goto err_power; ++ ++ ret = gpio_request(RX51_WL12XX_IRQ_GPIO, "wl12xx irq"); ++ if (ret < 0) ++ goto err_power; ++ ++ ret = gpio_direction_input(RX51_WL12XX_IRQ_GPIO); ++ if (ret < 0) ++ goto err_irq; ++ ++ irq = gpio_to_irq(RX51_WL12XX_IRQ_GPIO); ++ if (irq < 0) ++ goto err_irq; ++ ++ rx51_peripherals_spi_board_info[1].irq = irq; ++ ++ return; ++ ++err_irq: ++ gpio_free(RX51_WL12XX_IRQ_GPIO); ++ ++err_power: ++ gpio_free(RX51_WL12XX_POWER_GPIO); ++ ++error: ++ printk(KERN_ERR "wl12xx board initialisation failed\n"); ++ wl12xx_pdata.set_power = NULL; ++ ++ /* ++ * Now rx51_peripherals_spi_board_info[1].irq is zero and ++ * set_power is null, and wl12xx_probe() will fail. ++ */ ++} ++ ++static struct twl4030_usb_data rx51_usb_data = { ++ .usb_mode = T2_USB_MODE_ULPI, ++}; ++ ++static struct twl4030_madc_platform_data rx51_madc_data = { ++ .irq_line = 1, ++}; ++ ++static struct twl4030_gpio_platform_data rx51_gpio_data = { ++ .gpio_base = OMAP_MAX_GPIO_LINES, ++ .irq_base = TWL4030_GPIO_IRQ_BASE, ++ .irq_end = TWL4030_GPIO_IRQ_END, ++}; ++ ++static struct twl4030_ins sleep_on_seq[] = { ++/* ++ * Turn off everything. ++ */ ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_SLEEP), 2}, ++}; ++ ++static struct twl4030_script sleep_on_script = { ++ .script = sleep_on_seq, ++ .size = ARRAY_SIZE(sleep_on_seq), ++ .number_of_events = 1, ++ .events[0] = { ++ .offset = 0, ++ .event = TRITON_SLEEP, ++ }, ++}; ++ ++static struct twl4030_ins wakeup_seq[] = { ++/* ++ * Reenable everything. ++ */ ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2}, ++}; ++ ++static struct twl4030_script wakeup_script = { ++ .script = wakeup_seq, ++ .size = ARRAY_SIZE(wakeup_seq), ++ .number_of_events = 2, ++ .events = { ++ [0] = { ++ .offset = 0, ++ .event = TRITON_WAKEUP12, ++ }, ++ [1] = { ++ .offset = 0, ++ .event = TRITON_WAKEUP3, ++ }, ++ }, ++}; ++ ++static struct twl4030_ins wrst_seq[] = { ++/* ++ * Reset twl4030. ++ * Reset VDD1 regulator. ++ * Reset VDD2 regulator. ++ * Reset VPLL1 regulator. ++ * Enable sysclk output. ++ * Reenable twl4030. ++ */ ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_OFF), 2}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 0, 1, RES_STATE_ACTIVE), ++ 0x13}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 3, RES_STATE_OFF), 0x13}, ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_VDD1, RES_STATE_WRST), 0x13}, ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_VDD2, RES_STATE_WRST), 0x13}, ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_VPLL1, RES_STATE_WRST), 0x35}, ++ {MSG_SINGULAR(DEV_GRP_P3, RES_HFCLKOUT, RES_STATE_ACTIVE), 2}, ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_ACTIVE), 2}, ++}; ++ ++static struct twl4030_script wrst_script = { ++ .script = wrst_seq, ++ .size = ARRAY_SIZE(wrst_seq), ++ .number_of_events = 1, ++ .events[0] = { ++ .offset = 0, ++ .event = TRITON_WRST, ++ }, ++}; ++ ++static struct twl4030_script *twl4030_scripts[] = { ++ &sleep_on_script, ++ &wakeup_script, ++ &wrst_script, ++}; ++ ++static struct twl4030_resconfig twl4030_rconfig[] = { ++ ++ { .resource = RES_VDD1, .devgroup = -1, .type = 1, .type2 = -1, ++ .remap = 0 }, ++ { .resource = RES_VDD2, .devgroup = -1, .type = 1, .type2 = -1, ++ .remap = 0 }, ++ { .resource = RES_VPLL1, .devgroup = -1, .type = 1, .type2 = -1, ++ .remap = 0 }, ++ { .resource = RES_VPLL2, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VAUX1, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VAUX2, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VAUX3, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VAUX4, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VMMC1, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VMMC2, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VDAC, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VSIM, .devgroup = -1, .type = -1, .type2 = 3, ++ .remap = -1 }, ++ { .resource = RES_VINTANA1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, ++ .type = -1, .type2 = -1, .remap = -1 }, ++ { .resource = RES_VINTANA2, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, ++ .type = 1, .type2 = -1, .remap = -1 }, ++ { .resource = RES_VINTDIG, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, ++ .type = -1, .type2 = -1, .remap = -1 }, ++ { .resource = RES_VIO, .devgroup = DEV_GRP_P3, ++ .type = 1, .type2 = -1, .remap = -1 }, ++ { .resource = RES_CLKEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, ++ .type = 1, .type2 = -1 , .remap = -1 }, ++ { .resource = RES_REGEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, ++ .type = 1, .type2 = -1, .remap = -1 }, ++ { .resource = RES_NRES_PWRON, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, ++ .type = 1, .type2 = -1, .remap = -1 }, ++ { .resource = RES_SYSEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, ++ .type = 1, .type2 = -1, .remap = -1 }, ++ { .resource = RES_HFCLKOUT, .devgroup = DEV_GRP_P3, .type = 1, ++ .type2 = -1, .remap = -1 }, ++ { .resource = RES_32KCLKOUT, .devgroup = -1, .type = 1, .type2 = -1, ++ .remap = -1 }, ++ { .resource = RES_RESET, .devgroup = -1, .type = 1, .type2 = -1, ++ .remap = -1 }, ++ { .resource = RES_Main_Ref, .devgroup = -1, .type = 1, .type2 = -1, ++ .remap = -1 }, ++ { 0, 0}, ++}; ++ ++static struct twl4030_power_data rx51_t2scripts_data = { ++ .scripts = twl4030_scripts, ++ .scripts_size = ARRAY_SIZE(twl4030_scripts), ++ .resource_config = twl4030_rconfig, ++}; ++ ++ ++extern struct regulator_init_data rx51_vdac_data; ++ ++static struct twl4030_platform_data rx51_twldata = { ++ .irq_base = TWL4030_IRQ_BASE, ++ .irq_end = TWL4030_IRQ_END, ++ ++ /* platform_data for children goes here */ ++ .gpio = &rx51_gpio_data, ++ .keypad = &rx51_kp_data, ++ .madc = &rx51_madc_data, ++ .power = &rx51_t2scripts_data, ++ .usb = &rx51_usb_data, ++ ++ /* LDOs */ ++ .vdac = &rx51_vdac_data, ++}; ++ ++static struct omap_ssi_board_config ssi_board_config = { ++ .num_ports = 1, ++ .cawake_gpio = { 151 }, ++}; ++ ++static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = { ++ { ++ I2C_BOARD_INFO("twl4030", 0x48), ++ .flags = I2C_CLIENT_WAKE, ++ .irq = INT_34XX_SYS_NIRQ, ++ .platform_data = &rx51_twldata, ++ }, ++}; ++ ++static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = { ++ { ++ I2C_BOARD_INFO("lp5523", 0x32), ++ .platform_data = &rx51_lp5523_platform_data, ++ }, ++ { ++ I2C_BOARD_INFO("tsl2563", 0x29), ++ .platform_data = &rx51_tsl2563_platform_data, ++ }, ++}; ++ ++static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = { ++ { ++ I2C_BOARD_INFO("lis302dl", 0x1d), ++ .platform_data = &rx51_lis302dl_data, ++ }, ++}; ++ ++static int __init rx51_i2c_init(void) ++{ ++ omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1, ++ ARRAY_SIZE(rx51_peripherals_i2c_board_info_1)); ++ omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, ++ ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); ++ omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3, ++ ARRAY_SIZE(rx51_peripherals_i2c_board_info_3)); ++ ++ return 0; ++} ++ ++ ++static int __init rx51_peripherals_init(void) ++{ ++ if (!machine_is_nokia_rx51()) ++ return 0; ++ ++ rx51_init_wl12xx(); ++ ++ platform_add_devices(rx51_peripherals_devices, ++ ARRAY_SIZE(rx51_peripherals_devices)); ++ spi_register_board_info(rx51_peripherals_spi_board_info, ++ ARRAY_SIZE(rx51_peripherals_spi_board_info)); ++ rx51_i2c_init(); ++ rx51_init_smc91x(); ++ rx51_init_tsc2005(); ++ rx51_init_lp5523(); ++ omap_ssi_config(&ssi_board_config); ++ return 0; ++} ++ ++subsys_initcall(rx51_peripherals_init); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-sdram.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-sdram.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-sdram.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-sdram.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,252 @@ ++/* ++ * SDRC register values for the Nokia Memories ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * ++ * Lauri Leukkunen ++ * ++ * Original code by Juha Yrjölä ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++ ++/* In picoseconds, except for tREF (ns), tXP, tCKE, tWTR (clks) */ ++struct sdram_timings { ++ u32 casl; ++ u32 tDAL; ++ u32 tDPL; ++ u32 tRRD; ++ u32 tRCD; ++ u32 tRP; ++ u32 tRAS; ++ u32 tRC; ++ u32 tRFC; ++ u32 tXSR; ++ ++ u32 tREF; /* in ns */ ++ ++ u32 tXP; ++ u32 tCKE; ++ u32 tWTR; ++}; ++ ++struct sdram_info { ++ u8 row_lines; ++}; ++ ++ ++struct omap_sdrc_params rx51_sdrc_params[4]; ++ ++static const struct sdram_timings rx51_timings[] = { ++ { ++ .casl = 3, ++ .tDAL = 33000, ++ .tDPL = 15000, ++ .tRRD = 12000, ++ .tRCD = 22500, ++ .tRP = 18000, ++ .tRAS = 42000, ++ .tRC = 66000, ++ .tRFC = 138000, ++ .tXSR = 200000, ++ ++ .tREF = 7800, ++ ++ .tXP = 2, ++ .tCKE = 2, ++ .tWTR = 2 ++ }, ++}; ++ ++#define CM_BASE 0x48004000 ++ ++#define CM_CLKSEL_CORE 0x0a40 ++#define CM_CLKSEL1_PLL 0x0d40 ++ ++#define PRM_CLKSEL 0x48306d40 ++#define PRM_CLKSRC_CTRL 0x48307270 ++ ++static u32 cm_base = CM_BASE; ++ ++static inline u32 cm_read_reg(int idx) ++{ ++ return *(u32 *)OMAP2_IO_ADDRESS(cm_base + idx); ++} ++ ++static const unsigned long sys_clk_rate_table[] = { ++ 12000, 13000, 19200, 26000, 38400, 16800 ++}; ++ ++static unsigned long sdrc_get_fclk_period(long rate) ++{ ++ /* In picoseconds */ ++ return 1000000000 / rate; ++} ++ ++static unsigned int sdrc_ps_to_ticks(unsigned int time_ps, long rate) ++{ ++ unsigned long tick_ps; ++ ++ /* Calculate in picosecs to yield more exact results */ ++ tick_ps = sdrc_get_fclk_period(rate); ++ ++ return (time_ps + tick_ps - 1) / tick_ps; ++} ++#undef DEBUG ++#ifdef DEBUG ++static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit, ++ int ticks, long rate, const char *name) ++#else ++static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit, ++ int ticks) ++#endif ++{ ++ int mask, nr_bits; ++ ++ nr_bits = end_bit - st_bit + 1; ++ if (ticks >= 1 << nr_bits) ++ return -1; ++ mask = (1 << nr_bits) - 1; ++ *regval &= ~(mask << st_bit); ++ *regval |= ticks << st_bit; ++#ifdef DEBUG ++ printk(KERN_INFO "SDRC %s: %i ticks %i ns\n", name, ticks, ++ (unsigned int)sdrc_get_fclk_period(rate) * ticks / ++ 1000); ++#endif ++ ++ return 0; ++} ++ ++#ifdef DEBUG ++#define SDRC_SET_ONE(reg, st, end, field, rate) \ ++ if (set_sdrc_timing_regval((reg), (st), (end), \ ++ rx51_timings->field, \ ++ (rate), #field) < 0) \ ++ err = -1; ++ ++#else ++#define SDRC_SET_ONE(reg, st, end, field, rate) \ ++ if (set_sdrc_timing_regval((reg), (st), (end), rx51_timings->field) \ ++ < 0) \ ++ err = -1; ++ ++#endif ++ ++#ifdef DEBUG ++static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit, ++ int time, long rate, const char *name) ++#else ++static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit, ++ int time, long rate) ++#endif ++{ ++ int ticks, ret; ++ ret = 0; ++ ++ if (time == 0) ++ ticks = 0; ++ else ++ ticks = sdrc_ps_to_ticks(time, rate); ++ ++#ifdef DEBUG ++ ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks, ++ rate, name); ++#else ++ ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks); ++#endif ++ ++ return ret; ++} ++ ++#ifdef DEBUG ++#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \ ++ if (set_sdrc_timing_regval_ps((reg), (st), (end), \ ++ rx51_timings->field, \ ++ (rate), #field) < 0) \ ++ err = -1; ++ ++#else ++#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \ ++ if (set_sdrc_timing_regval_ps((reg), (st), (end), \ ++ rx51_timings->field, (rate)) < 0) \ ++ err = -1; ++#endif ++ ++static int sdrc_timings(int id, long rate) ++{ ++ u32 ticks_per_us; ++ u32 rfr, l; ++ u32 actim_ctrla = 0, actim_ctrlb = 0; ++ u32 rfr_ctrl; ++ int err = 0; ++ long l3_rate = rate / 1000; ++ ++#ifdef DEBUG ++ printk(KERN_INFO "SDRC CLK rate: %i MHz\n", (int)l3_rate/1000); ++#endif ++ ++ SDRC_SET_ONE_PS(&actim_ctrla, 0, 4, tDAL, l3_rate); ++ SDRC_SET_ONE_PS(&actim_ctrla, 6, 8, tDPL, l3_rate); ++ SDRC_SET_ONE_PS(&actim_ctrla, 9, 11, tRRD, l3_rate); ++ SDRC_SET_ONE_PS(&actim_ctrla, 12, 14, tRCD, l3_rate); ++ SDRC_SET_ONE_PS(&actim_ctrla, 15, 17, tRP, l3_rate); ++ SDRC_SET_ONE_PS(&actim_ctrla, 18, 21, tRAS, l3_rate); ++ SDRC_SET_ONE_PS(&actim_ctrla, 22, 26, tRC, l3_rate); ++ SDRC_SET_ONE_PS(&actim_ctrla, 27, 31, tRFC, l3_rate); ++ ++ SDRC_SET_ONE_PS(&actim_ctrlb, 0, 7, tXSR, l3_rate); ++ ++ SDRC_SET_ONE(&actim_ctrlb, 8, 10, tXP, l3_rate); ++ SDRC_SET_ONE(&actim_ctrlb, 12, 14, tCKE, l3_rate); ++ SDRC_SET_ONE(&actim_ctrlb, 16, 17, tWTR, l3_rate); ++ ++ ticks_per_us = sdrc_ps_to_ticks(1000000, l3_rate); ++ rfr = rx51_timings[0].tREF * ticks_per_us / 1000; ++ if (rfr > 65535 + 50) ++ rfr = 65535; ++ else ++ rfr -= 50; ++ ++#ifdef DEBUG ++ printk(KERN_INFO "SDRC tREF: %i ticks\n", rfr); ++#endif ++ ++ l = rfr << 8; ++ rfr_ctrl = l | 0x1; /* autorefresh, reload counter with 1xARCV */ ++ ++ rx51_sdrc_params[id].rate = rate; ++ rx51_sdrc_params[id].actim_ctrla = actim_ctrla; ++ rx51_sdrc_params[id].actim_ctrlb = actim_ctrlb; ++ rx51_sdrc_params[id].rfr_ctrl = rfr_ctrl; ++ rx51_sdrc_params[id].mr = 0x32; ++ ++ rx51_sdrc_params[id + 1].rate = 0; ++ ++ return err; ++} ++ ++struct omap_sdrc_params *rx51_get_sdram_timings(void) ++{ ++ int err; ++ ++ err = sdrc_timings(0, 41500000); ++ err |= sdrc_timings(1, 83000000); ++ err |= sdrc_timings(2, 166000000); ++ ++ return &rx51_sdrc_params[0]; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-video.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-video.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx51-video.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx51-video.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,363 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx51-video.c ++ * ++ * Copyright (C) 2008 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include <../drivers/video/omap2/displays/panel-acx565akm.h> ++ ++#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) ++ ++static struct omap2_mcspi_device_config mipid_mcspi_config = { ++ .turbo_mode = 0, ++ .single_channel = 1, ++}; ++ ++static int twl4030_bklight_level = -1; /* load from HW at first read */ ++ ++#define TWL4030_PWM0_ENABLE (1 << 2) ++#define TWL4030_PWM0_CLK_ENABLE (1 << 0) ++ ++static const int twl4030_bklight_max = 127; ++static int twl4030_bklight_initialized; ++ ++static int twl4030_get_bklight_level(void) ++{ ++ if (twl4030_bklight_level == -1) { ++ u8 reg; ++ ++ twl4030_i2c_read_u8(TWL4030_MODULE_INTBR, ®, 0x0c); ++ if (reg & TWL4030_PWM0_ENABLE) { ++ twl4030_i2c_read_u8(TWL4030_MODULE_PWM0, ®, 0x01); ++ twl4030_bklight_level = reg; ++ } else { ++ twl4030_bklight_level = 0; ++ } ++ } ++ ++ return twl4030_bklight_level; ++} ++ ++static void twl4030_set_bklight_level(int level) ++{ ++ u8 reg; ++ ++ if (!twl4030_bklight_initialized) { ++ /* Mux GPIO6 as PWM0 : PMBR1 = xxxx01xx */ ++ twl4030_i2c_read_u8(TWL4030_MODULE_INTBR, ®, 0x0d); ++ reg &= ~(3 << 2); ++ reg |= (1 << 2); ++ twl4030_i2c_write_u8(TWL4030_MODULE_INTBR, reg, 0x0d); ++ ++ twl4030_bklight_initialized = 1; ++ } ++ ++ twl4030_i2c_read_u8(TWL4030_MODULE_INTBR, ®, 0x0c); ++ ++ if (level != 0) { ++ /* Configure the duty cycle. */ ++ twl4030_i2c_write_u8(TWL4030_MODULE_PWM0, 0, 0x00); ++ twl4030_i2c_write_u8(TWL4030_MODULE_PWM0, level, 0x01); ++ ++ /* Enable clock for PWM0 a few microseconds before PWM0 itself. ++ This is not mentioned in TWL4030 spec. but some older boards ++ don't set backlight level properly from time to time ++ without this delay. */ ++ reg |= TWL4030_PWM0_CLK_ENABLE; ++ twl4030_i2c_write_u8(TWL4030_MODULE_INTBR, reg, 0x0c); ++ udelay(50); ++ reg |= TWL4030_PWM0_ENABLE; ++ } else { ++ /* Disable PWM0 before disabling its clock, see comment above */ ++ reg &= ~TWL4030_PWM0_ENABLE; ++ twl4030_i2c_write_u8(TWL4030_MODULE_INTBR, reg, 0x0c); ++ udelay(50); ++ reg &= ~TWL4030_PWM0_CLK_ENABLE; ++ } ++ ++ twl4030_i2c_write_u8(TWL4030_MODULE_INTBR, reg, 0x0c); ++ ++ twl4030_bklight_level = level; ++} ++ ++static struct spi_board_info rx51_video_spi_board_info[] = { ++ [0] = { ++ .modalias = "acx565akm", ++ .bus_num = 1, ++ .chip_select = 2, ++ .max_speed_hz = 6000000, ++ .controller_data = &mipid_mcspi_config, ++ }, ++}; ++ ++/* acx565akm LCD Panel */ ++static int acx565akm_enable(struct omap_display *display) ++{ ++ if (display->hw_config.panel_reset_gpio != -1) { ++ pr_debug("Release LCD reset\n"); ++ gpio_set_value(display->hw_config.panel_reset_gpio, 1); ++ } ++ ++ return 0; ++} ++ ++static void acx565akm_disable(struct omap_display *display) ++{ ++ if (display->hw_config.panel_reset_gpio != -1) { ++ pr_debug("Enable LCD reset\n"); ++ gpio_set_value(display->hw_config.panel_reset_gpio, 0); ++ } ++} ++ ++static int rx51_set_backlight_level(struct omap_display *display, int level) ++{ ++ twl4030_set_bklight_level(level); ++ ++ return 0; ++} ++ ++static int rx51_get_backlight_level(struct omap_display *display) ++{ ++ return twl4030_get_bklight_level(); ++} ++ ++static struct acx565akm_panel_data acx565akm_data = { ++ .bc_connected = 1, ++}; ++ ++static struct omap_dss_display_config acx565akm_display_data = { ++ .type = OMAP_DISPLAY_TYPE_SDI, ++ .name = "lcd", ++ .panel_name = "panel-acx565akm", ++ .panel_enable = acx565akm_enable, ++ .panel_disable = acx565akm_disable, ++ .panel_reset_gpio = -1, /* set later from tag data */ ++ .max_backlight_level = 127, ++ .set_backlight = rx51_set_backlight_level, ++ .get_backlight = rx51_get_backlight_level, ++ .panel_data = &acx565akm_data, ++ .u.sdi = { ++ .datapairs = 2, ++ }, ++}; ++ ++static void __init acx565akm_dev_init(void) ++{ ++ const struct omap_lcd_config *conf; ++ ++ conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config); ++ if (conf != NULL) { ++ int ret = gpio_request(conf->nreset_gpio, "acx565akm-reset"); ++ if (ret) { ++ printk(KERN_ERR "Failed to request GPIO %d for " ++ "acx565akm reset\n", conf->nreset_gpio); ++ } else { ++ gpio_direction_output(conf->nreset_gpio, 1); ++ acx565akm_display_data.panel_reset_gpio = ++ conf->nreset_gpio; ++ } ++ } ++} ++ ++/* TV-out */ ++static struct omap_dss_display_config venc_display_data = { ++ .type = OMAP_DISPLAY_TYPE_VENC, ++ .name = "tv", ++ .u.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE, ++}; ++ ++/* DSS */ ++static struct omap_dss_board_info rx51_dss_data = { ++ .get_last_off_on_transaction_id = get_last_off_on_transaction_id, ++ .set_min_bus_tput = omap_pm_set_min_bus_tput, ++ .num_displays = 2, ++ .displays = { ++ &acx565akm_display_data, ++ &venc_display_data, ++ }, ++ .fifo_thresholds = { ++ [OMAP_DSS_GFX] = { .low = 2944, .high = 3008, }, ++ }, ++}; ++ ++static struct platform_device rx51_dss_device = { ++ .name = "omapdss", ++ .id = -1, ++ .dev = { ++ .platform_data = &rx51_dss_data, ++ }, ++}; ++ ++static struct platform_device *rx51_video_devices[] = { ++ &rx51_dss_device, ++}; ++ ++/* TV-OUT (VDAC) regulator */ ++static struct regulator_consumer_supply rx51_vdac_supply = { ++ .supply = "vdac", ++ .dev = &rx51_dss_device.dev, ++}; ++ ++struct regulator_init_data rx51_vdac_data = { ++ .constraints = { ++ .name = "VDAC_18", ++ .min_uV = 1800000, ++ .max_uV = 1800000, ++ .apply_uV = true, ++ .valid_modes_mask = REGULATOR_MODE_NORMAL ++ | REGULATOR_MODE_STANDBY, ++ .valid_ops_mask = REGULATOR_CHANGE_MODE ++ | REGULATOR_CHANGE_STATUS, ++ }, ++ .num_consumer_supplies = 1, ++ .consumer_supplies = &rx51_vdac_supply, ++}; ++ ++static struct omapfb_platform_data omapfb_config; ++ ++static size_t rx51_vrfb_min_phys_size(int bpp) ++{ ++ unsigned bytespp = bpp >> 3; ++ size_t landscape; ++ size_t portrait; ++ ++ /* For physical screen resolution of 800x480. */ ++ landscape = omap_vrfb_min_phys_size(800, 480, bytespp); ++ portrait = omap_vrfb_min_phys_size(480, 800, bytespp); ++ ++ return max(landscape, portrait); ++} ++ ++ ++static void __init rx51_add_gfx_fb(u32 paddr, size_t size, enum omapfb_color_format format) ++{ ++ omapfb_config.mem_desc.region_cnt = 1; ++ omapfb_config.mem_desc.region[0].paddr = paddr; ++ omapfb_config.mem_desc.region[0].size = size; ++ omapfb_config.mem_desc.region[0].format = format; ++ omapfb_config.mem_desc.region[0].format_used = 1; ++ omapfb_set_platform_data(&omapfb_config); ++} ++ ++static void __init rx51_detect_vram(size_t vid_plane_mem_size) ++{ ++ unsigned long vram_paddr; ++ size_t vram_size; ++ unsigned long gfx_paddr; ++ size_t gfx_size; ++ enum omapfb_color_format format; ++ ++ gfx_paddr = dss_boottime_get_plane_base(0); ++ ++ if (gfx_paddr == -1UL) ++ return; ++ ++ gfx_size = rx51_vrfb_min_phys_size(dss_boottime_get_plane_bpp(0)); ++ format = dss_boottime_get_plane_format(0); ++ ++ vram_size = PAGE_ALIGN(gfx_size) + PAGE_ALIGN(vid_plane_mem_size); ++ vram_paddr = gfx_paddr + PAGE_ALIGN(gfx_size) - vram_size; ++ ++ rx51_add_gfx_fb(gfx_paddr, gfx_size, format); ++ ++ if (reserve_bootmem(vram_paddr, vram_size, BOOTMEM_EXCLUSIVE) < 0) { ++ pr_err("FB: can't reserve VRAM region\n"); ++ return; ++ } ++ ++ if (omap_vram_add_region(vram_paddr, vram_size) < 0) { ++ free_bootmem(vram_paddr, vram_size); ++ pr_err("Can't set VRAM region\n"); ++ return; ++ } ++ ++ pr_info("VRAM: %zd bytes at 0x%lx. (Detected %zd at %#lx)\n", ++ vram_size, vram_paddr, gfx_size, gfx_paddr); ++} ++ ++static void __init rx51_alloc_vram(size_t vid_plane_mem_size) ++{ ++ unsigned long vram_paddr; ++ size_t vram_size; ++ size_t gfx_size; ++ ++ gfx_size = rx51_vrfb_min_phys_size(16); ++ ++ vram_size = PAGE_ALIGN(gfx_size) + PAGE_ALIGN(vid_plane_mem_size); ++ vram_paddr = virt_to_phys(alloc_bootmem_pages(vram_size)); ++ BUG_ON(vram_paddr & ~PAGE_MASK); ++ ++ rx51_add_gfx_fb(vram_paddr, gfx_size, OMAPFB_COLOR_RGB565); ++ ++ if (omap_vram_add_region(vram_paddr, vram_size) < 0) { ++ free_bootmem(vram_paddr, vram_size); ++ pr_err("Can't set VRAM region\n"); ++ return; ++ } ++ ++ pr_info("VRAM: %zd bytes at 0x%lx\n", vram_size, vram_paddr); ++} ++ ++ ++void __init rx51_video_mem_init(void) ++{ ++ size_t vid_plane_mem_size; ++ ++ /* 2 VID planes, 2 buffers, 2 bytes per pixel, 864x648 resolution. */ ++ vid_plane_mem_size = 2 * 2 * PAGE_ALIGN(2 * 864 * 648); ++ ++ if (dss_boottime_plane_is_enabled(0)) ++ rx51_detect_vram(vid_plane_mem_size); ++ else ++ rx51_alloc_vram(vid_plane_mem_size); ++} ++ ++static int __init rx51_video_init(void) ++{ ++ if (!machine_is_nokia_rx51()) ++ return 0; ++ ++ platform_add_devices(rx51_video_devices, ARRAY_SIZE(rx51_video_devices)); ++ spi_register_board_info(rx51_video_spi_board_info, ++ ARRAY_SIZE(rx51_video_spi_board_info)); ++ acx565akm_dev_init(); ++ return 0; ++} ++ ++subsys_initcall(rx51_video_init); ++ ++#else ++ ++void __init rx51_video_mem_init(void) ++{ ++} ++ ++#endif /* CONFIG_FB_OMAP2 || CONFIG_FB_OMAP2_MODULE */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx71.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx71.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx71.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx71.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,140 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx71.c ++ * ++ * Copyright (C) 2007 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++/* MPU speeds */ ++#define S600M 600000000 ++#define S550M 550000000 ++#define S500M 500000000 ++#define S250M 250000000 ++#define S125M 125000000 ++ ++/* DSP speeds */ ++#define S430M 430000000 ++#define S400M 400000000 ++#define S360M 360000000 ++#define S180M 180000000 ++#define S90M 90000000 ++ ++/* L3 speeds */ ++#define S41M 41500000 ++#define S83M 83000000 ++#define S166M 166000000 ++ ++static struct omap_opp rx71_mpu_rate_table[] = { ++ {0, 0, 0}, ++ /*OPP1*/ ++ {S125M, VDD1_OPP1, 0x20}, ++ /*OPP2*/ ++ {S250M, VDD1_OPP2, 0x2c}, ++ /*OPP3*/ ++ {S500M, VDD1_OPP3, 0x31}, ++ /*OPP4*/ ++ {S550M, VDD1_OPP4, 0x37}, ++ /*OPP5*/ ++ {S600M, VDD1_OPP5, 0x42}, ++}; ++ ++static struct omap_opp rx71_l3_rate_table[] = { ++ {0, 0, 0}, ++ /*OPP1*/ ++ {S41M, VDD2_OPP1, 0x1e}, ++ /*OPP2*/ ++ {S83M, VDD2_OPP2, 0x20}, ++ /*OPP3*/ ++ {S166M, VDD2_OPP3, 0x2C}, ++}; ++ ++struct omap_opp rx71_dsp_rate_table[] = { ++ {0, 0, 0}, ++ /*OPP1*/ ++ {S90M, VDD1_OPP1, 0x1c}, ++ /*OPP2*/ ++ {S180M, VDD1_OPP2, 0x20}, ++ /*OPP3*/ ++ {S360M, VDD1_OPP3, 0x30}, ++ /*OPP4*/ ++ {S400M, VDD1_OPP4, 0x36}, ++ /*OPP5*/ ++ {S430M, VDD1_OPP5, 0x3C}, ++}; ++ ++static struct omap_uart_config rx71_uart_config = { ++ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)), ++}; ++ ++static struct omap_board_config_kernel rx71_config[] = { ++ { OMAP_TAG_UART, &rx71_uart_config }, ++}; ++ ++static void __init rx71_init_irq(void) ++{ ++ struct omap_sdrc_params *sdrc_params; ++ ++ sdrc_params = rx51_get_sdram_timings(); ++ ++ omap2_init_common_hw(sdrc_params, sdrc_params, ++ rx71_mpu_rate_table, ++ rx71_dsp_rate_table, ++ rx71_l3_rate_table); ++ ++ omap_init_irq(); ++ omap_gpio_init(); ++} ++ ++static void __init rx71_init(void) ++{ ++ omap_board_config = rx71_config; ++ omap_board_config_size = ARRAY_SIZE(rx71_config); ++ usb_musb_init(NULL); ++ omap_serial_init(); ++} ++ ++static void __init rx71_map_io(void) ++{ ++ omap2_set_globals_343x(); ++ omap2_map_common_io(); ++} ++ ++MACHINE_START(NOKIA_RX71, "Nokia RX-71 board") ++ /* Maintainer: Ilkka Koskinen */ ++ .phys_io = 0x48000000, ++ .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc, ++ .boot_params = 0x80000100, ++ .map_io = rx71_map_io, ++ .init_irq = rx71_init_irq, ++ .init_machine = rx71_init, ++ .timer = &omap_timer, ++MACHINE_END +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx71-peripherals.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx71-peripherals.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-rx71-peripherals.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-rx71-peripherals.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,179 @@ ++/* ++ * linux/arch/arm/mach-omap2/board-rx71-peripherals.c ++ * ++ * Copyright (C) 2008 Nokia ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static struct spi_board_info rx71_peripherals_spi_board_info[] = { ++}; ++ ++static int rx71_keymap[] = { ++ /* Col, Row, Key */ ++ ++ KEY(0, 0, KEY_RESERVED), ++ KEY(0, 1, KEY_RESERVED), ++ KEY(0, 2, KEY_RESERVED), ++ KEY(0, 3, KEY_RESERVED), ++ KEY(0, 4, KEY_RESERVED), ++ KEY(0, 5, KEY_M), ++ KEY(0, 6, KEY_KPMINUS), ++ KEY(0, 7, KEY_RESERVED), ++ ++ KEY(1, 0, KEY_RESERVED), ++ KEY(1, 1, KEY_RESERVED), ++ KEY(1, 2, KEY_RESERVED), ++ KEY(1, 3, KEY_KPPLUS), ++ KEY(1, 4, KEY_DELETE), ++ KEY(1, 5, KEY_RESERVED), ++ KEY(1, 6, KEY_RESERVED), ++ KEY(1, 7, KEY_RESERVED), ++ ++ KEY(2, 0, KEY_RESERVED), ++ KEY(2, 1, KEY_RESERVED), ++ KEY(2, 2, KEY_RESERVED), ++ KEY(2, 3, KEY_RESERVED), ++ KEY(2, 4, KEY_5), ++ KEY(2, 5, KEY_9), ++ KEY(2, 6, KEY_KPASTERISK), ++ KEY(2, 7, KEY_RESERVED), ++ ++ KEY(3, 0, KEY_RESERVED), ++ KEY(3, 1, KEY_RESERVED), ++ KEY(3, 2, KEY_RESERVED), ++ KEY(3, 3, KEY_6), ++ KEY(3, 4, KEY_7), ++ KEY(3, 5, KEY_0), ++ KEY(3, 6, KEY_RESERVED), ++ KEY(3, 7, KEY_RESERVED), ++ ++ KEY(4, 0, KEY_RESERVED), ++ KEY(4, 1, KEY_RESERVED), ++ KEY(4, 2, KEY_RESERVED), ++ KEY(4, 3, KEY_8), ++ KEY(4, 4, KEY_ENTER), ++ KEY(4, 5, KEY_RESERVED), ++ KEY(4, 6, KEY_4), ++ KEY(4, 7, KEY_RESERVED), ++ ++ KEY(5, 0, KEY_BACKSPACE), ++ KEY(5, 1, KEY_RESERVED), ++ KEY(5, 2, KEY_F2), ++ KEY(5, 3, KEY_F3), ++ KEY(5, 4, KEY_F5), ++ KEY(5, 5, KEY_F4), ++ KEY(5, 6, KEY_RESERVED), ++ KEY(5, 7, KEY_RESERVED), ++ ++ KEY(6, 0, KEY_RESERVED), ++ KEY(6, 1, KEY_RESERVED), ++ KEY(6, 2, KEY_RESERVED), ++ KEY(6, 3, KEY_RESERVED), ++ KEY(6, 4, KEY_RESERVED), ++ KEY(6, 5, KEY_RESERVED), ++ KEY(6, 6, KEY_RESERVED), ++ KEY(6, 7, KEY_RESERVED), ++ ++ KEY(7, 0, KEY_RESERVED), ++ KEY(7, 1, KEY_RESERVED), ++ KEY(7, 2, KEY_RESERVED), ++ KEY(7, 3, KEY_RESERVED), ++ KEY(7, 4, KEY_RESERVED), ++ KEY(7, 5, KEY_RESERVED), ++ KEY(7, 6, KEY_RESERVED), ++ KEY(7, 7, KEY_RESERVED), ++}; ++ ++static struct twl4030_keypad_data rx71_kp_data = { ++ .rows = 8, ++ .cols = 8, ++ .keymap = rx71_keymap, ++ .keymapsize = ARRAY_SIZE(rx71_keymap), ++ .rep = 1, ++}; ++ ++static struct platform_device *rx71_peripherals_devices[] = { ++}; ++ ++static struct twl4030_usb_data rx71_usb_data = { ++ .usb_mode = T2_USB_MODE_ULPI, ++}; ++ ++static struct twl4030_madc_platform_data rx71_madc_data = { ++ .irq_line = 1, ++}; ++ ++static struct twl4030_gpio_platform_data rx71_gpio_data = { ++ .gpio_base = OMAP_MAX_GPIO_LINES, ++ .irq_base = TWL4030_GPIO_IRQ_BASE, ++ .irq_end = TWL4030_GPIO_IRQ_END, ++}; ++ ++static struct twl4030_platform_data rx71_twldata = { ++ .irq_base = TWL4030_IRQ_BASE, ++ .irq_end = TWL4030_IRQ_END, ++ ++ /* platform_data for children goes here */ ++ .gpio = &rx71_gpio_data, ++ .keypad = &rx71_kp_data, ++ .madc = &rx71_madc_data, ++ .usb = &rx71_usb_data, ++}; ++ ++static struct i2c_board_info __initdata rx71_peripherals_i2c_board_info_1[] = { ++ { ++ I2C_BOARD_INFO("twl4030", 0x48), ++ .flags = I2C_CLIENT_WAKE, ++ .irq = INT_34XX_SYS_NIRQ, ++ .platform_data = &rx71_twldata, ++ }, ++}; ++ ++static int __init rx71_i2c_init(void) ++{ ++ omap_register_i2c_bus(1, 2600, rx71_peripherals_i2c_board_info_1, ++ ARRAY_SIZE(rx71_peripherals_i2c_board_info_1)); ++ omap_register_i2c_bus(2, 100, NULL, 0); ++ omap_register_i2c_bus(3, 400, NULL, 0); ++ return 0; ++} ++ ++ ++static int __init rx71_peripherals_init(void) ++{ ++ if (!machine_is_nokia_rx71()) ++ return 0; ++ ++ platform_add_devices(rx71_peripherals_devices, ++ ARRAY_SIZE(rx71_peripherals_devices)); ++ spi_register_board_info(rx71_peripherals_spi_board_info, ++ ARRAY_SIZE(rx71_peripherals_spi_board_info)); ++ rx71_i2c_init(); ++ return 0; ++} ++ ++subsys_initcall(rx71_peripherals_init); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-2430sdp.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-2430sdp.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-2430sdp.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-2430sdp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -323,7 +323,7 @@ out: + + static void __init omap_2430sdp_init_irq(void) + { +- omap2_init_common_hw(NULL); ++ omap2_init_common_hw(NULL, NULL, NULL, NULL, NULL); + omap_init_irq(); + omap_gpio_init(); + sdp2430_init_smc91x(); +@@ -411,7 +411,7 @@ static void __init omap_2430sdp_init(voi + msecure_init(); + + sdp2430_flash_init(); +- usb_musb_init(); ++ usb_musb_init(NULL); + + spi_register_board_info(sdp2430_spi_board_info, + ARRAY_SIZE(sdp2430_spi_board_info)); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-3430sdp.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-3430sdp.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/board-3430sdp.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/board-3430sdp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -39,13 +39,17 @@ + #include + #include + #include ++#include + + #include + #include + #include ++#include + + #include "sdram-qimonda-hyb18m512160af-6.h" + #include "mmc-twl4030.h" ++#include "pm.h" ++#include "omap3-opp.h" + + #define CONFIG_DISABLE_HFCLK 1 + +@@ -282,7 +286,10 @@ static inline void __init sdp3430_init_s + + static void __init omap_3430sdp_init_irq(void) + { +- omap2_init_common_hw(hyb18m512160af6_sdrc_params); ++ omap2_init_common_hw(hyb18m512160af6_sdrc_params, NULL, ++ omap3_mpu_rate_table, ++ omap3_dsp_rate_table, ++ omap3_l3_rate_table); + omap_init_irq(); + omap_gpio_init(); + sdp3430_init_smc91x(); +@@ -478,7 +485,7 @@ static void __init omap_3430sdp_init(voi + sdp3430_flash_init(); + msecure_init(); + omap_serial_init(); +- usb_musb_init(); ++ usb_musb_init(NULL); + usb_ehci_init(); + twl4030_mmc_init(mmc); + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock.c 2011-09-04 11:32:09.963211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock.c 2011-09-04 11:31:05.000000000 +0200 +@@ -591,8 +591,8 @@ u32 omap2_clksel_round_rate_div(struct c + const struct clksel_rate *clkr; + u32 last_div = 0; + +- printk(KERN_INFO "clock: clksel_round_rate_div: %s target_rate %ld\n", +- clk->name, target_rate); ++ pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n", ++ clk->name, target_rate); + + *new_div = 1; + +@@ -606,7 +606,7 @@ u32 omap2_clksel_round_rate_div(struct c + + /* Sanity check */ + if (clkr->div <= last_div) +- printk(KERN_ERR "clock: clksel_rate table not sorted " ++ pr_err("clock: clksel_rate table not sorted " + "for clock %s", clk->name); + + last_div = clkr->div; +@@ -618,7 +618,7 @@ u32 omap2_clksel_round_rate_div(struct c + } + + if (!clkr->div) { +- printk(KERN_ERR "clock: Could not find divisor for target " ++ pr_err("clock: Could not find divisor for target " + "rate %ld for clock %s parent %s\n", target_rate, + clk->name, clk->parent->name); + return ~0; +@@ -626,8 +626,8 @@ u32 omap2_clksel_round_rate_div(struct c + + *new_div = clkr->div; + +- printk(KERN_INFO "clock: new_div = %d, new_rate = %ld\n", *new_div, +- (clk->parent->rate / clkr->div)); ++ pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div, ++ (clk->parent->rate / clkr->div)); + + return (clk->parent->rate / clkr->div); + } +@@ -871,6 +871,36 @@ struct clk *omap2_clk_get_parent(struct + return clk->parent; + } + ++/** ++ * omap2_clk_round_rate_parent - return the rate for @clk if parent were changed ++ * @clk: struct clk that may change parents ++ * @new_parent: the struct clk that @clk may be reparented under ++ * ++ * Given a struct clk @clk and a new parent struct clk @new_parent, ++ * determine what @clk's rate would be after the reparent operation. ++ * Returns the new clock rate or -EINVAL upon error. ++ */ ++long omap2_clk_round_rate_parent(struct clk *clk, struct clk *new_parent) ++{ ++ u32 field_val, parent_div; ++ long rate; ++ ++ if (!clk->clksel || !new_parent) ++ return -EINVAL; ++ ++ parent_div = _omap2_clksel_get_src_field(new_parent, clk, &field_val); ++ if (!parent_div) ++ return -EINVAL; ++ ++ /* CLKSEL clocks follow their parents' rates, divided by a divisor */ ++ rate = new_parent->rate; ++ if (parent_div > 0) ++ rate /= parent_div; ++ ++ return rate; ++} ++ ++ + /* DPLL rate rounding code */ + + /** +@@ -1082,6 +1112,8 @@ void omap2_clk_disable_unused(struct clk + omap2_clk_disable(clk); + } else + _omap2_clk_disable(clk); ++ if (clk->clkdm.ptr != NULL) ++ pwrdm_clkdm_state_switch(clk->clkdm.ptr); + } + #endif + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clockdomain.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clockdomain.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clockdomain.c 2011-09-04 11:32:09.973211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clockdomain.c 2011-09-04 11:31:05.000000000 +0200 +@@ -133,6 +133,36 @@ static void _clkdm_del_autodeps(struct c + } + } + ++/* ++ * _omap2_clkdm_set_hwsup - set the hwsup idle transition bit ++ * @clkdm: struct clockdomain * ++ * @enable: int 0 to disable, 1 to enable ++ * ++ * Internal helper for actually switching the bit that controls hwsup ++ * idle transitions for clkdm. ++ */ ++static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable) ++{ ++ u32 v; ++ ++ if (cpu_is_omap24xx()) { ++ if (enable) ++ v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO; ++ else ++ v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO; ++ } else if (cpu_is_omap34xx()) { ++ if (enable) ++ v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO; ++ else ++ v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO; ++ } else { ++ BUG(); ++ } ++ ++ cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask, ++ v << __ffs(clkdm->clktrctrl_mask), ++ clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL); ++} + + static struct clockdomain *_clkdm_lookup(const char *name) + { +@@ -295,7 +325,8 @@ struct clockdomain *clkdm_lookup(const c + * anything else to indicate failure; or -EINVAL if the function pointer + * is null. + */ +-int clkdm_for_each(int (*fn)(struct clockdomain *clkdm)) ++int clkdm_for_each(int (*fn)(struct clockdomain *clkdm, void *user), ++ void *user) + { + struct clockdomain *clkdm; + int ret = 0; +@@ -305,7 +336,7 @@ int clkdm_for_each(int (*fn)(struct cloc + + mutex_lock(&clkdm_mutex); + list_for_each_entry(clkdm, &clkdm_list, node) { +- ret = (*fn)(clkdm); ++ ret = (*fn)(clkdm, user); + if (ret) + break; + } +@@ -451,8 +482,6 @@ int omap2_clkdm_wakeup(struct clockdomai + */ + void omap2_clkdm_allow_idle(struct clockdomain *clkdm) + { +- u32 v; +- + if (!clkdm) + return; + +@@ -468,18 +497,9 @@ void omap2_clkdm_allow_idle(struct clock + if (atomic_read(&clkdm->usecount) > 0) + _clkdm_add_autodeps(clkdm); + +- if (cpu_is_omap24xx()) +- v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO; +- else if (cpu_is_omap34xx()) +- v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO; +- else +- BUG(); +- ++ _omap2_clkdm_set_hwsup(clkdm, 1); + +- cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask, +- v << __ffs(clkdm->clktrctrl_mask), +- clkdm->pwrdm.ptr->prcm_offs, +- CM_CLKSTCTRL); ++ pwrdm_clkdm_state_switch(clkdm); + } + + /** +@@ -493,8 +513,6 @@ void omap2_clkdm_allow_idle(struct clock + */ + void omap2_clkdm_deny_idle(struct clockdomain *clkdm) + { +- u32 v; +- + if (!clkdm) + return; + +@@ -507,16 +525,7 @@ void omap2_clkdm_deny_idle(struct clockd + pr_debug("clockdomain: disabling automatic idle transitions for %s\n", + clkdm->name); + +- if (cpu_is_omap24xx()) +- v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO; +- else if (cpu_is_omap34xx()) +- v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO; +- else +- BUG(); +- +- cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask, +- v << __ffs(clkdm->clktrctrl_mask), +- clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL); ++ _omap2_clkdm_set_hwsup(clkdm, 0); + + if (atomic_read(&clkdm->usecount) > 0) + _clkdm_del_autodeps(clkdm); +@@ -562,12 +571,17 @@ int omap2_clkdm_clk_enable(struct clockd + v = omap2_clkdm_clktrctrl_read(clkdm); + + if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) || +- (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) ++ (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) { ++ /* Disable HW transitions when we are changing deps */ ++ _omap2_clkdm_set_hwsup(clkdm, 0); + _clkdm_add_autodeps(clkdm); +- else ++ _omap2_clkdm_set_hwsup(clkdm, 1); ++ } else { + omap2_clkdm_wakeup(clkdm); ++ } + + pwrdm_wait_transition(clkdm->pwrdm.ptr); ++ pwrdm_clkdm_state_switch(clkdm); + + return 0; + } +@@ -615,10 +629,16 @@ int omap2_clkdm_clk_disable(struct clock + v = omap2_clkdm_clktrctrl_read(clkdm); + + if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) || +- (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) ++ (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) { ++ /* Disable HW transitions when we are changing deps */ ++ _omap2_clkdm_set_hwsup(clkdm, 0); + _clkdm_del_autodeps(clkdm); +- else ++ _omap2_clkdm_set_hwsup(clkdm, 1); ++ } else { + omap2_clkdm_sleep(clkdm); ++ } ++ ++ pwrdm_clkdm_state_switch(clkdm); + + return 0; + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clockdomains.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clockdomains.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clockdomains.h 2011-09-04 11:32:09.973211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clockdomains.h 2011-09-04 11:31:05.000000000 +0200 +@@ -40,7 +40,7 @@ static struct clockdomain cm_clkdm = { + static struct clockdomain virt_opp_clkdm = { + .name = "virt_opp_clkdm", + .pwrdm = { .name = "wkup_pwrdm" }, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430), + }; + + /* +@@ -185,7 +185,7 @@ static struct clockdomain sgx_clkdm = { + .pwrdm = { .name = "sgx_pwrdm" }, + .flags = CLKDM_CAN_HWSUP_SWSUP, + .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), + }; + + /* +@@ -198,7 +198,7 @@ static struct clockdomain sgx_clkdm = { + static struct clockdomain d2d_clkdm = { + .name = "d2d_clkdm", + .pwrdm = { .name = "core_pwrdm" }, +- .flags = CLKDM_CAN_HWSUP, ++ .flags = CLKDM_CAN_HWSUP_SWSUP, + .clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_D2D_MASK, + .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), + }; +@@ -240,7 +240,7 @@ static struct clockdomain usbhost_clkdm + .pwrdm = { .name = "usbhost_pwrdm" }, + .flags = CLKDM_CAN_HWSUP_SWSUP, + .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), + }; + + static struct clockdomain per_clkdm = { +@@ -290,7 +290,7 @@ static struct clockdomain dpll4_clkdm = + static struct clockdomain dpll5_clkdm = { + .name = "dpll5_clkdm", + .pwrdm = { .name = "dpll5_pwrdm" }, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), + }; + + #endif /* CONFIG_ARCH_OMAP34XX */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock.h 2011-09-04 11:32:09.973211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock.h 2011-09-04 11:31:05.000000000 +0200 +@@ -41,6 +41,7 @@ int omap2_clk_register(struct clk *clk); + int omap2_clk_enable(struct clk *clk); + void omap2_clk_disable(struct clk *clk); + long omap2_clk_round_rate(struct clk *clk, unsigned long rate); ++long omap2_clk_round_rate_parent(struct clk *clk, struct clk *new_parent); + int omap2_clk_set_rate(struct clk *clk, unsigned long rate); + int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent); + int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock24xx.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock24xx.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock24xx.c 2011-09-04 11:32:09.973211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock24xx.c 2011-09-04 11:31:05.000000000 +0200 +@@ -448,6 +448,7 @@ static struct clk_functions omap2_clk_fu + .clk_enable = omap2_clk_enable, + .clk_disable = omap2_clk_disable, + .clk_round_rate = omap2_clk_round_rate, ++ .clk_round_rate_parent = omap2_clk_round_rate_parent, + .clk_set_rate = omap2_clk_set_rate, + .clk_set_parent = omap2_clk_set_parent, + .clk_get_parent = omap2_clk_get_parent, +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock34xx.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock34xx.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock34xx.c 2011-09-04 11:32:09.973211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock34xx.c 2011-09-04 11:31:05.000000000 +0200 +@@ -26,6 +26,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -38,6 +40,7 @@ + #include "prm-regbits-34xx.h" + #include "cm.h" + #include "cm-regbits-34xx.h" ++#include "pm.h" + + /* CM_AUTOIDLE_PLL*.AUTO_* bit values */ + #define DPLL_AUTOIDLE_DISABLE 0x0 +@@ -45,6 +48,22 @@ + + #define MAX_DPLL_WAIT_TRIES 1000000 + ++#define MIN_SDRC_DLL_LOCK_FREQ 83000000 ++ ++#define CYCLES_PER_MHZ 1000000 ++ ++/* Scale factor for fixed-point arith in omap3_core_dpll_m2_set_rate() */ ++#define SDRC_MPURATE_SCALE 8 ++ ++/* 2^SDRC_MPURATE_BASE_SHIFT: MPU MHz that SDRC_MPURATE_LOOPS is defined for */ ++#define SDRC_MPURATE_BASE_SHIFT 11 ++ ++/* ++ * SDRC_MPURATE_LOOPS: Number of MPU loops to execute at ++ * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize ++ */ ++#define SDRC_MPURATE_LOOPS 96 ++ + /** + * omap3_dpll_recalc - recalculate DPLL rate + * @clk: DPLL struct clk +@@ -117,7 +136,7 @@ static u16 _omap3_dpll_compute_freqsel(s + unsigned long fint; + u16 f = 0; + +- fint = clk->parent->rate / (n + 1); ++ fint = clk->parent->rate / n; + + pr_debug("clock: fint is %lu\n", fint); + +@@ -456,8 +475,12 @@ static int omap3_noncore_dpll_set_rate(s + static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) + { + u32 new_div = 0; +- unsigned long validrate, sdrcrate; +- struct omap_sdrc_params *sp; ++ u32 unlock_dll = 0; ++ u32 c; ++ unsigned long validrate, sdrcrate, mpurate; ++ struct omap_sdrc_params *sdrc_cs0; ++ struct omap_sdrc_params *sdrc_cs1; ++ int ret; + + if (!clk || !rate) + return -EINVAL; +@@ -465,34 +488,67 @@ static int omap3_core_dpll_m2_set_rate(s + if (clk != &dpll3_m2_ck) + return -EINVAL; + +- if (rate == clk->rate) +- return 0; +- + validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); + if (validrate != rate) + return -EINVAL; + + sdrcrate = sdrc_ick.rate; + if (rate > clk->rate) +- sdrcrate <<= ((rate / clk->rate) - 1); ++ sdrcrate <<= ((rate / clk->rate) >> 1); + else +- sdrcrate >>= ((clk->rate / rate) - 1); ++ sdrcrate >>= ((clk->rate / rate) >> 1); + +- sp = omap2_sdrc_get_params(sdrcrate); +- if (!sp) ++ ret = omap2_sdrc_get_params(sdrcrate, &sdrc_cs0, &sdrc_cs1); ++ if (ret) + return -EINVAL; + +- pr_info("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate, +- validrate); +- pr_info("clock: SDRC timing params used: %08x %08x %08x\n", +- sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb); ++ if (sdrcrate < MIN_SDRC_DLL_LOCK_FREQ) { ++ pr_debug("clock: will unlock SDRC DLL\n"); ++ unlock_dll = 1; ++ } + +- /* REVISIT: SRAM code doesn't support other M2 divisors yet */ +- WARN_ON(new_div != 1 && new_div != 2); ++ /* ++ * XXX This only needs to be done when the CPU frequency changes ++ */ ++ mpurate = arm_fck.rate / CYCLES_PER_MHZ; ++ c = (mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT; ++ c += 1; /* for safety */ ++ c *= SDRC_MPURATE_LOOPS; ++ c >>= SDRC_MPURATE_SCALE; ++ if (c == 0) ++ c = 1; ++ ++ /* Increase delay for OPP2 & OPP3 by one to avoid random crashes */ ++ if (c == 12 || c == 23) ++ c++; ++ pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate, ++ validrate); ++ pr_debug("clock: SDRC CS0 timing params used:" ++ " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n", ++ sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, ++ sdrc_cs0->actim_ctrlb, sdrc_cs0->mr); ++ if (sdrc_cs1) ++ pr_debug("clock: SDRC CS1 timing params used: " ++ " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n", ++ sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla, ++ sdrc_cs1->actim_ctrlb, sdrc_cs1->mr); + +- /* REVISIT: Add SDRC_MR changing to this code also */ +- omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla, +- sp->actim_ctrlb, new_div); ++ /* ++ * Only the SDRC RFRCTRL value is seen to be safe to be ++ * changed during dvfs. ++ * The ACTiming values are left unchanged and should be ++ * the ones programmed by the bootloader for higher OPP. ++ */ ++ if (sdrc_cs1) ++ omap3_configure_core_dpll( ++ new_div, unlock_dll, c, rate > clk->rate, ++ sdrc_cs0->rfr_ctrl, sdrc_cs0->mr, ++ sdrc_cs1->rfr_ctrl, sdrc_cs1->mr); ++ else ++ omap3_configure_core_dpll( ++ new_div, unlock_dll, c, rate > clk->rate, ++ sdrc_cs0->rfr_ctrl, sdrc_cs0->mr, ++ 0, 0); + + return 0; + } +@@ -603,7 +659,7 @@ static void omap3_clkoutx2_recalc(struct + pclk = pclk->parent; + + /* clk does not have a DPLL as a parent? */ +- WARN_ON(!pclk); ++ BUG_ON(!pclk); + + dd = pclk->dpll_data; + +@@ -630,15 +686,51 @@ static void omap3_clkoutx2_recalc(struct + */ + #if defined(CONFIG_ARCH_OMAP3) + ++#ifdef CONFIG_CPU_FREQ ++static struct cpufreq_frequency_table freq_table[MAX_VDD1_OPP+1]; ++ ++void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table) ++{ ++ struct omap_opp *prcm; ++ int i = 0; ++ ++ if (!mpu_opps) ++ return; ++ ++ prcm = mpu_opps + MAX_VDD1_OPP; ++ for (; prcm->rate; prcm--) { ++ freq_table[i].index = i; ++ freq_table[i].frequency = prcm->rate / 1000; ++ i++; ++ } ++ ++ if (i == 0) { ++ printk(KERN_WARNING "%s: failed to initialize frequency \ ++ table\n", ++ __func__); ++ return; ++ } ++ ++ freq_table[i].index = i; ++ freq_table[i].frequency = CPUFREQ_TABLE_END; ++ ++ *table = &freq_table[0]; ++} ++#endif ++ + static struct clk_functions omap2_clk_functions = { + .clk_register = omap2_clk_register, + .clk_enable = omap2_clk_enable, + .clk_disable = omap2_clk_disable, + .clk_round_rate = omap2_clk_round_rate, ++ .clk_round_rate_parent = omap2_clk_round_rate_parent, + .clk_set_rate = omap2_clk_set_rate, + .clk_set_parent = omap2_clk_set_parent, + .clk_get_parent = omap2_clk_get_parent, + .clk_disable_unused = omap2_clk_disable_unused, ++#ifdef CONFIG_CPU_FREQ ++ .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table, ++#endif + }; + + /* +@@ -760,13 +852,6 @@ int __init omap2_clk_init(void) + */ + clk_enable_init_clocks(); + +- /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ +- /* REVISIT: not yet ready for 343x */ +-#if 0 +- vclk = clk_get(NULL, "virt_prcm_set"); +- sclk = clk_get(NULL, "sys_ck"); +-#endif + return 0; + } +- +-#endif ++#endif /* CONFIG_ARCH_OMAP3 */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock34xx.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock34xx.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/clock34xx.h 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/clock34xx.h 2011-09-04 11:31:05.000000000 +0200 +@@ -20,6 +20,7 @@ + #define __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H + + #include ++#include + + #include "clock.h" + #include "cm.h" +@@ -817,6 +818,8 @@ static struct clk dpll4_m5_ck = { + .flags = CLOCK_IN_OMAP343X | PARENT_CONTROLS_CLOCK, + .clkdm = { .name = "dpll4_clkdm" }, + .recalc = &omap2_clksel_recalc, ++ .set_rate = &omap2_clksel_set_rate, ++ .round_rate = &omap2_clksel_round_rate, + }; + + /* The PWRDN bit is apparently only available on 3430ES2 and above */ +@@ -1283,6 +1286,39 @@ static struct clk d2d_26m_fck = { + .recalc = &followparent_recalc, + }; + ++static struct clk modem_fck = { ++ .name = "modem_fck", ++ .parent = &sys_ck, ++ .prcm_mod = CORE_MOD, ++ .enable_reg = CM_FCLKEN1, ++ .enable_bit = OMAP3430_EN_MODEM_SHIFT, ++ .flags = CLOCK_IN_OMAP343X, ++ .clkdm = { .name = "d2d_clkdm" }, ++ .recalc = &followparent_recalc, ++}; ++ ++static struct clk sad2d_ick = { ++ .name = "sad2d_ick", ++ .parent = &l3_ick, ++ .prcm_mod = CORE_MOD, ++ .enable_reg = CM_ICLKEN1, ++ .enable_bit = OMAP3430_EN_SAD2D_SHIFT, ++ .flags = CLOCK_IN_OMAP343X, ++ .clkdm = { .name = "d2d_clkdm" }, ++ .recalc = &followparent_recalc, ++}; ++ ++static struct clk mad2d_ick = { ++ .name = "mad2d_ick", ++ .parent = &l3_ick, ++ .prcm_mod = CORE_MOD, ++ .enable_reg = CM_ICLKEN3, ++ .enable_bit = OMAP3430_EN_MAD2D_SHIFT, ++ .flags = CLOCK_IN_OMAP343X, ++ .clkdm = { .name = "d2d_clkdm" }, ++ .recalc = &followparent_recalc, ++}; ++ + static const struct clksel omap343x_gpt_clksel[] = { + { .parent = &omap_32k_fck, .rates = gpt_32k_rates }, + { .parent = &sys_ck, .rates = gpt_sys_rates }, +@@ -2187,8 +2223,6 @@ static struct clk usb_l4_ick = { + .recalc = &omap2_clksel_recalc, + }; + +-/* XXX MDM_INTC_ICK, SAD2D_ICK ?? */ +- + /* SECURITY_L4_ICK2 based clocks */ + + static struct clk security_l4_ick2 = { +@@ -3402,6 +3436,9 @@ static struct clk *onchip_34xx_clks[] __ + &sgx_fck, + &sgx_ick, + &d2d_26m_fck, ++ &modem_fck, ++ &sad2d_ick, ++ &mad2d_ick, + &gpt10_fck, + &gpt11_fck, + &cpefuse_fck, +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/cm.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/cm.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/cm.h 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/cm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -74,6 +74,7 @@ + #define OMAP3430ES2_CM_CLKEN2 0x0004 + #define OMAP3430ES2_CM_FCLKEN3 0x0008 + #define OMAP3430_CM_IDLEST_PLL CM_IDLEST2 ++#define OMAP3430_CM_IDLEST3 0x0028 + #define OMAP3430_CM_AUTOIDLE_PLL CM_AUTOIDLE2 + #define OMAP3430ES2_CM_AUTOIDLE2_PLL CM_AUTOIDLE2 + #define OMAP3430_CM_CLKSEL1 CM_CLKSEL +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/cm-regbits-34xx.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/cm-regbits-34xx.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/cm-regbits-34xx.h 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/cm-regbits-34xx.h 2011-09-04 11:31:05.000000000 +0200 +@@ -145,6 +145,8 @@ + #define OMAP3430_CLKACTIVITY_MPU_MASK (1 << 0) + + /* CM_FCLKEN1_CORE specific bits */ ++#define OMAP3430_EN_MODEM (1 << 31) ++#define OMAP3430_EN_MODEM_SHIFT 31 + + /* CM_ICLKEN1_CORE specific bits */ + #define OMAP3430_EN_ICR (1 << 29) +@@ -161,6 +163,8 @@ + #define OMAP3430_EN_MAILBOXES_SHIFT 7 + #define OMAP3430_EN_OMAPCTRL (1 << 6) + #define OMAP3430_EN_OMAPCTRL_SHIFT 6 ++#define OMAP3430_EN_SAD2D (1 << 3) ++#define OMAP3430_EN_SAD2D_SHIFT 3 + #define OMAP3430_EN_SDRC (1 << 1) + #define OMAP3430_EN_SDRC_SHIFT 1 + +@@ -176,6 +180,10 @@ + #define OMAP3430_EN_DES1 (1 << 0) + #define OMAP3430_EN_DES1_SHIFT 0 + ++/* CM_ICLKEN3_CORE */ ++#define OMAP3430_EN_MAD2D_SHIFT 3 ++#define OMAP3430_EN_MAD2D (1 << 3) ++ + /* CM_FCLKEN3_CORE specific bits */ + #define OMAP3430ES2_EN_TS_SHIFT 1 + #define OMAP3430ES2_EN_TS_MASK (1 << 1) +@@ -231,6 +239,8 @@ + #define OMAP3430ES2_ST_CPEFUSE_MASK (1 << 0) + + /* CM_AUTOIDLE1_CORE */ ++#define OMAP3430_AUTO_MODEM (1 << 31) ++#define OMAP3430_AUTO_MODEM_SHIFT 31 + #define OMAP3430ES2_AUTO_MMC3 (1 << 30) + #define OMAP3430ES2_AUTO_MMC3_SHIFT 30 + #define OMAP3430ES2_AUTO_ICR (1 << 29) +@@ -287,6 +297,8 @@ + #define OMAP3430_AUTO_HSOTGUSB_SHIFT 4 + #define OMAP3430ES1_AUTO_D2D (1 << 3) + #define OMAP3430ES1_AUTO_D2D_SHIFT 3 ++#define OMAP3430_AUTO_SAD2D (1 << 3) ++#define OMAP3430_AUTO_SAD2D_SHIFT 3 + #define OMAP3430_AUTO_SSI (1 << 0) + #define OMAP3430_AUTO_SSI_SHIFT 0 + +@@ -308,6 +320,8 @@ + #define OMAP3430ES2_AUTO_USBTLL (1 << 2) + #define OMAP3430ES2_AUTO_USBTLL_SHIFT 2 + #define OMAP3430ES2_AUTO_USBTLL_MASK (1 << 2) ++#define OMAP3430_AUTO_MAD2D_SHIFT 3 ++#define OMAP3430_AUTO_MAD2D (1 << 3) + + /* CM_CLKSEL_CORE */ + #define OMAP3430_CLKSEL_SSI_SHIFT 8 +@@ -666,6 +680,7 @@ + #define OMAP3430_CLKSEL_GPT2_SHIFT 0 + + /* CM_SLEEPDEP_PER specific bits */ ++#define OMAP3430_CM_SLEEPDEP_PER_EN_MPU (1 << 1) + #define OMAP3430_CM_SLEEPDEP_PER_EN_IVA2 (1 << 2) + + /* CM_CLKSTCTRL_PER */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/control.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/control.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/control.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/control.c 2011-09-04 11:31:05.000000000 +0200 +@@ -17,9 +17,126 @@ + + #include + #include ++#include ++#include "cm-regbits-34xx.h" ++#include "prm-regbits-34xx.h" ++#include "cm.h" ++#include "prm.h" ++#include "sdrc.h" + + static void __iomem *omap2_ctrl_base; + ++#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) ++struct omap3_scratchpad { ++ u32 boot_config_ptr; ++ u32 public_restore_ptr; ++ u32 secure_ram_restore_ptr; ++ u32 sdrc_module_semaphore; ++ u32 prcm_block_offset; ++ u32 sdrc_block_offset; ++}; ++ ++struct omap3_scratchpad_prcm_block { ++ u32 prm_clksrc_ctrl; ++ u32 prm_clksel; ++ u32 cm_clksel_core; ++ u32 cm_clksel_wkup; ++ u32 cm_clken_pll; ++ u32 cm_autoidle_pll; ++ u32 cm_clksel1_pll; ++ u32 cm_clksel2_pll; ++ u32 cm_clksel3_pll; ++ u32 cm_clken_pll_mpu; ++ u32 cm_autoidle_pll_mpu; ++ u32 cm_clksel1_pll_mpu; ++ u32 cm_clksel2_pll_mpu; ++ u32 prcm_block_size; ++}; ++ ++struct omap3_scratchpad_sdrc_block { ++ u16 sysconfig; ++ u16 cs_cfg; ++ u16 sharing; ++ u16 err_type; ++ u32 dll_a_ctrl; ++ u32 dll_b_ctrl; ++ u32 power; ++ u32 cs_0; ++ u32 mcfg_0; ++ u16 mr_0; ++ u16 emr_1_0; ++ u16 emr_2_0; ++ u16 emr_3_0; ++ u32 actim_ctrla_0; ++ u32 actim_ctrlb_0; ++ u32 rfr_ctrl_0; ++ u32 cs_1; ++ u32 mcfg_1; ++ u16 mr_1; ++ u16 emr_1_1; ++ u16 emr_2_1; ++ u16 emr_3_1; ++ u32 actim_ctrla_1; ++ u32 actim_ctrlb_1; ++ u32 rfr_ctrl_1; ++ u16 dcdl_1_ctrl; ++ u16 dcdl_2_ctrl; ++ u32 flags; ++ u32 block_size; ++}; ++ ++void *omap3_secure_ram_storage; ++ ++/* ++ * This is used to store ARM registers in SDRAM before attempting ++ * an MPU OFF. The save and restore happens from the SRAM sleep code. ++ * The address is stored in scratchpad, so that it can be used ++ * during the restore path. ++ */ ++u32 omap3_arm_context[128]; ++u32 omap3_aux_ctrl[2] = { 0x1, 0x0 }; ++ ++struct omap3_control_regs { ++ u32 sysconfig; ++ u32 devconf0; ++ u32 mem_dftrw0; ++ u32 mem_dftrw1; ++ u32 msuspendmux_0; ++ u32 msuspendmux_1; ++ u32 msuspendmux_2; ++ u32 msuspendmux_3; ++ u32 msuspendmux_4; ++ u32 msuspendmux_5; ++ u32 sec_ctrl; ++ u32 devconf1; ++ u32 csirxfe; ++ u32 iva2_bootaddr; ++ u32 iva2_bootmod; ++ u32 debobs_0; ++ u32 debobs_1; ++ u32 debobs_2; ++ u32 debobs_3; ++ u32 debobs_4; ++ u32 debobs_5; ++ u32 debobs_6; ++ u32 debobs_7; ++ u32 debobs_8; ++ u32 prog_io0; ++ u32 prog_io1; ++ u32 dss_dpll_spreading; ++ u32 core_dpll_spreading; ++ u32 per_dpll_spreading; ++ u32 usbhost_dpll_spreading; ++ u32 pbias_lite; ++ u32 temp_sensor; ++ u32 sramldo4; ++ u32 sramldo5; ++ u32 csi; ++}; ++ ++static struct omap3_control_regs control_context; ++#endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */ ++ + #define OMAP_CTRL_REGADDR(reg) (omap2_ctrl_base + (reg)) + + void __init omap2_set_globals_control(struct omap_globals *omap2_globals) +@@ -62,3 +179,257 @@ void omap_ctrl_writel(u32 val, u16 offse + __raw_writel(val, OMAP_CTRL_REGADDR(offset)); + } + ++#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) ++/* ++ * Clears the scratchpad contents in case of cold boot- ++ * called during bootup ++ */ ++void omap3_clear_scratchpad_contents(void) ++{ ++ u32 max_offset = OMAP343X_SCRATCHPAD_ROM_OFFSET; ++ u32 *v_addr; ++ u32 offset = 0; ++ v_addr = OMAP2_IO_ADDRESS(OMAP343X_SCRATCHPAD_ROM); ++ if (prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) & ++ OMAP3430_GLOBAL_COLD_RST) { ++ for ( ; offset <= max_offset; offset += 0x4) ++ __raw_writel(0x0, (v_addr + offset)); ++ prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST, OMAP3430_GR_MOD, ++ OMAP3_PRM_RSTST_OFFSET); ++ } ++} ++ ++/* Populate the scratchpad structure with restore structure */ ++void omap3_save_scratchpad_contents(void) ++{ ++ void * __iomem scratchpad_address; ++ u32 arm_context_addr; ++ struct omap3_scratchpad scratchpad_contents; ++ struct omap3_scratchpad_prcm_block prcm_block_contents; ++ struct omap3_scratchpad_sdrc_block sdrc_block_contents; ++ ++ /* Populate the Scratchpad contents */ ++ scratchpad_contents.boot_config_ptr = 0x0; ++ if (omap_rev() != OMAP3430_REV_ES3_0 && ++ omap_rev() != OMAP3430_REV_ES3_1) ++ scratchpad_contents.public_restore_ptr = ++ virt_to_phys(get_restore_pointer()); ++ else ++ scratchpad_contents.public_restore_ptr = ++ virt_to_phys(get_es3_restore_pointer()); ++ if (omap_type() == OMAP2_DEVICE_TYPE_GP) ++ scratchpad_contents.secure_ram_restore_ptr = 0x0; ++ else ++ scratchpad_contents.secure_ram_restore_ptr = ++ (u32) __pa(omap3_secure_ram_storage); ++ scratchpad_contents.sdrc_module_semaphore = 0x0; ++ scratchpad_contents.prcm_block_offset = 0x2C; ++ scratchpad_contents.sdrc_block_offset = 0x64; ++ ++ /* Populate the PRCM block contents */ ++ prcm_block_contents.prm_clksrc_ctrl = prm_read_mod_reg(OMAP3430_GR_MOD, ++ OMAP3_PRM_CLKSRC_CTRL_OFFSET); ++ prcm_block_contents.prm_clksel = prm_read_mod_reg(OMAP3430_CCR_MOD, ++ OMAP3_PRM_CLKSEL_OFFSET); ++ prcm_block_contents.cm_clksel_core = ++ cm_read_mod_reg(CORE_MOD, CM_CLKSEL); ++ prcm_block_contents.cm_clksel_wkup = ++ cm_read_mod_reg(WKUP_MOD, CM_CLKSEL); ++ prcm_block_contents.cm_clken_pll = ++ cm_read_mod_reg(PLL_MOD, CM_CLKEN); ++ prcm_block_contents.cm_autoidle_pll = ++ cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL); ++ prcm_block_contents.cm_clksel1_pll = ++ cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL); ++ prcm_block_contents.cm_clksel2_pll = ++ cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL); ++ prcm_block_contents.cm_clksel3_pll = ++ cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3); ++ prcm_block_contents.cm_clken_pll_mpu = ++ cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL); ++ prcm_block_contents.cm_autoidle_pll_mpu = ++ cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL); ++ prcm_block_contents.cm_clksel1_pll_mpu = ++ cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL); ++ prcm_block_contents.cm_clksel2_pll_mpu = ++ cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL); ++ prcm_block_contents.prcm_block_size = 0x0; ++ ++ /* Populate the SDRC block contents */ ++ sdrc_block_contents.sysconfig = ++ (sdrc_read_reg(SDRC_SYSCONFIG) & 0xFFFF); ++ sdrc_block_contents.cs_cfg = ++ (sdrc_read_reg(SDRC_CS_CFG) & 0xFFFF); ++ sdrc_block_contents.sharing = ++ (sdrc_read_reg(SDRC_SHARING) & 0xFFFF); ++ sdrc_block_contents.err_type = ++ (sdrc_read_reg(SDRC_ERR_TYPE) & 0xFFFF); ++ sdrc_block_contents.dll_a_ctrl = sdrc_read_reg(SDRC_DLLA_CTRL); ++ sdrc_block_contents.dll_b_ctrl = 0x0; ++ sdrc_block_contents.power = (sdrc_read_reg(SDRC_POWER) & ++ ~(SDRC_POWER_AUTOCOUNT_MASK|SDRC_POWER_CLKCTRL_MASK)) | ++ (1 << SDRC_POWER_AUTOCOUNT_SHIFT) | ++ SDRC_SELF_REFRESH_ON_AUTOCOUNT; ++ sdrc_block_contents.cs_0 = 0x0; ++ sdrc_block_contents.mcfg_0 = sdrc_read_reg(SDRC_MCFG_0); ++ sdrc_block_contents.mr_0 = (sdrc_read_reg(SDRC_MR_0) & 0xFFFF); ++ sdrc_block_contents.emr_1_0 = 0x0; ++ sdrc_block_contents.emr_2_0 = 0x0; ++ sdrc_block_contents.emr_3_0 = 0x0; ++ sdrc_block_contents.actim_ctrla_0 = ++ sdrc_read_reg(SDRC_ACTIM_CTRL_A_0); ++ sdrc_block_contents.actim_ctrlb_0 = ++ sdrc_read_reg(SDRC_ACTIM_CTRL_B_0); ++ sdrc_block_contents.rfr_ctrl_0 = ++ sdrc_read_reg(SDRC_RFR_CTRL_0); ++ sdrc_block_contents.cs_1 = 0x0; ++ sdrc_block_contents.mcfg_1 = sdrc_read_reg(SDRC_MCFG_1); ++ sdrc_block_contents.mr_1 = sdrc_read_reg(SDRC_MR_1) & 0xFFFF; ++ sdrc_block_contents.emr_1_1 = 0x0; ++ sdrc_block_contents.emr_2_1 = 0x0; ++ sdrc_block_contents.emr_3_1 = 0x0; ++ sdrc_block_contents.actim_ctrla_1 = ++ sdrc_read_reg(SDRC_ACTIM_CTRL_A_1); ++ sdrc_block_contents.actim_ctrlb_1 = ++ sdrc_read_reg(SDRC_ACTIM_CTRL_B_1); ++ sdrc_block_contents.rfr_ctrl_1 = ++ sdrc_read_reg(SDRC_RFR_CTRL_1); ++ sdrc_block_contents.dcdl_1_ctrl = 0x0; ++ sdrc_block_contents.dcdl_2_ctrl = 0x0; ++ sdrc_block_contents.flags = 0x0; ++ sdrc_block_contents.block_size = 0x0; ++ ++ arm_context_addr = virt_to_phys(omap3_arm_context); ++ ++ /* Copy all the contents to the scratchpad location */ ++ scratchpad_address = OMAP2_IO_ADDRESS(OMAP343X_SCRATCHPAD); ++ memcpy_toio(scratchpad_address, &scratchpad_contents, ++ sizeof(scratchpad_contents)); ++ /* Scratchpad contents being 32 bits, a divide by 4 done here */ ++ memcpy_toio(scratchpad_address + ++ scratchpad_contents.prcm_block_offset, ++ &prcm_block_contents, sizeof(prcm_block_contents)); ++ memcpy_toio(scratchpad_address + ++ scratchpad_contents.sdrc_block_offset, ++ &sdrc_block_contents, sizeof(sdrc_block_contents)); ++ /* ++ * Copies the address of the location in SDRAM where ARM ++ * registers get saved during a MPU OFF transition. ++ */ ++ memcpy_toio(scratchpad_address + ++ scratchpad_contents.sdrc_block_offset + ++ sizeof(sdrc_block_contents), &arm_context_addr, 4); ++} ++ ++void omap3_control_save_context(void) ++{ ++ control_context.sysconfig = omap_ctrl_readl(OMAP2_CONTROL_SYSCONFIG); ++ control_context.devconf0 = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0); ++ control_context.mem_dftrw0 = ++ omap_ctrl_readl(OMAP343X_CONTROL_MEM_DFTRW0); ++ control_context.mem_dftrw1 = ++ omap_ctrl_readl(OMAP343X_CONTROL_MEM_DFTRW1); ++ control_context.msuspendmux_0 = ++ omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_0); ++ control_context.msuspendmux_1 = ++ omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_1); ++ control_context.msuspendmux_2 = ++ omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_2); ++ control_context.msuspendmux_3 = ++ omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_3); ++ control_context.msuspendmux_4 = ++ omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_4); ++ control_context.msuspendmux_5 = ++ omap_ctrl_readl(OMAP2_CONTROL_MSUSPENDMUX_5); ++ control_context.sec_ctrl = omap_ctrl_readl(OMAP2_CONTROL_SEC_CTRL); ++ control_context.devconf1 = omap_ctrl_readl(OMAP343X_CONTROL_DEVCONF1); ++ control_context.csirxfe = omap_ctrl_readl(OMAP343X_CONTROL_CSIRXFE); ++ control_context.iva2_bootaddr = ++ omap_ctrl_readl(OMAP343X_CONTROL_IVA2_BOOTADDR); ++ control_context.iva2_bootmod = ++ omap_ctrl_readl(OMAP343X_CONTROL_IVA2_BOOTMOD); ++ control_context.debobs_0 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(0)); ++ control_context.debobs_1 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(1)); ++ control_context.debobs_2 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(2)); ++ control_context.debobs_3 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(3)); ++ control_context.debobs_4 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(4)); ++ control_context.debobs_5 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(5)); ++ control_context.debobs_6 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(6)); ++ control_context.debobs_7 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(7)); ++ control_context.debobs_8 = omap_ctrl_readl(OMAP343X_CONTROL_DEBOBS(8)); ++ control_context.prog_io0 = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO0); ++ control_context.prog_io1 = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO1); ++ control_context.dss_dpll_spreading = ++ omap_ctrl_readl(OMAP343X_CONTROL_DSS_DPLL_SPREADING); ++ control_context.core_dpll_spreading = ++ omap_ctrl_readl(OMAP343X_CONTROL_CORE_DPLL_SPREADING); ++ control_context.per_dpll_spreading = ++ omap_ctrl_readl(OMAP343X_CONTROL_PER_DPLL_SPREADING); ++ control_context.usbhost_dpll_spreading = ++ omap_ctrl_readl(OMAP343X_CONTROL_USBHOST_DPLL_SPREADING); ++ control_context.pbias_lite = ++ omap_ctrl_readl(OMAP343X_CONTROL_PBIAS_LITE); ++ control_context.temp_sensor = ++ omap_ctrl_readl(OMAP343X_CONTROL_TEMP_SENSOR); ++ control_context.sramldo4 = omap_ctrl_readl(OMAP343X_CONTROL_SRAMLDO4); ++ control_context.sramldo5 = omap_ctrl_readl(OMAP343X_CONTROL_SRAMLDO5); ++ control_context.csi = omap_ctrl_readl(OMAP343X_CONTROL_CSI); ++ return; ++} ++ ++void omap3_control_restore_context(void) ++{ ++ omap_ctrl_writel(control_context.sysconfig, OMAP2_CONTROL_SYSCONFIG); ++ omap_ctrl_writel(control_context.devconf0, OMAP2_CONTROL_DEVCONF0); ++ omap_ctrl_writel(control_context.mem_dftrw0, ++ OMAP343X_CONTROL_MEM_DFTRW0); ++ omap_ctrl_writel(control_context.mem_dftrw1, ++ OMAP343X_CONTROL_MEM_DFTRW1); ++ omap_ctrl_writel(control_context.msuspendmux_0, ++ OMAP2_CONTROL_MSUSPENDMUX_0); ++ omap_ctrl_writel(control_context.msuspendmux_1, ++ OMAP2_CONTROL_MSUSPENDMUX_1); ++ omap_ctrl_writel(control_context.msuspendmux_2, ++ OMAP2_CONTROL_MSUSPENDMUX_2); ++ omap_ctrl_writel(control_context.msuspendmux_3, ++ OMAP2_CONTROL_MSUSPENDMUX_3); ++ omap_ctrl_writel(control_context.msuspendmux_4, ++ OMAP2_CONTROL_MSUSPENDMUX_4); ++ omap_ctrl_writel(control_context.msuspendmux_5, ++ OMAP2_CONTROL_MSUSPENDMUX_5); ++ omap_ctrl_writel(control_context.sec_ctrl, OMAP2_CONTROL_SEC_CTRL); ++ omap_ctrl_writel(control_context.devconf1, OMAP343X_CONTROL_DEVCONF1); ++ omap_ctrl_writel(control_context.csirxfe, OMAP343X_CONTROL_CSIRXFE); ++ omap_ctrl_writel(control_context.iva2_bootaddr, ++ OMAP343X_CONTROL_IVA2_BOOTADDR); ++ omap_ctrl_writel(control_context.iva2_bootmod, ++ OMAP343X_CONTROL_IVA2_BOOTMOD); ++ omap_ctrl_writel(control_context.debobs_0, OMAP343X_CONTROL_DEBOBS(0)); ++ omap_ctrl_writel(control_context.debobs_1, OMAP343X_CONTROL_DEBOBS(1)); ++ omap_ctrl_writel(control_context.debobs_2, OMAP343X_CONTROL_DEBOBS(2)); ++ omap_ctrl_writel(control_context.debobs_3, OMAP343X_CONTROL_DEBOBS(3)); ++ omap_ctrl_writel(control_context.debobs_4, OMAP343X_CONTROL_DEBOBS(4)); ++ omap_ctrl_writel(control_context.debobs_5, OMAP343X_CONTROL_DEBOBS(5)); ++ omap_ctrl_writel(control_context.debobs_6, OMAP343X_CONTROL_DEBOBS(6)); ++ omap_ctrl_writel(control_context.debobs_7, OMAP343X_CONTROL_DEBOBS(7)); ++ omap_ctrl_writel(control_context.debobs_8, OMAP343X_CONTROL_DEBOBS(8)); ++ omap_ctrl_writel(control_context.prog_io0, OMAP343X_CONTROL_PROG_IO0); ++ omap_ctrl_writel(control_context.prog_io1, OMAP343X_CONTROL_PROG_IO1); ++ omap_ctrl_writel(control_context.dss_dpll_spreading, ++ OMAP343X_CONTROL_DSS_DPLL_SPREADING); ++ omap_ctrl_writel(control_context.core_dpll_spreading, ++ OMAP343X_CONTROL_CORE_DPLL_SPREADING); ++ omap_ctrl_writel(control_context.per_dpll_spreading, ++ OMAP343X_CONTROL_PER_DPLL_SPREADING); ++ omap_ctrl_writel(control_context.usbhost_dpll_spreading, ++ OMAP343X_CONTROL_USBHOST_DPLL_SPREADING); ++ omap_ctrl_writel(control_context.pbias_lite, ++ OMAP343X_CONTROL_PBIAS_LITE); ++ omap_ctrl_writel(control_context.temp_sensor, ++ OMAP343X_CONTROL_TEMP_SENSOR); ++ omap_ctrl_writel(control_context.sramldo4, OMAP343X_CONTROL_SRAMLDO4); ++ omap_ctrl_writel(control_context.sramldo5, OMAP343X_CONTROL_SRAMLDO5); ++ omap_ctrl_writel(control_context.csi, OMAP343X_CONTROL_CSI); ++ return; ++} ++#endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/cpuidle34xx.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/cpuidle34xx.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/cpuidle34xx.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/cpuidle34xx.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,315 @@ ++/* ++ * linux/arch/arm/mach-omap2/cpuidle34xx.c ++ * ++ * OMAP3 CPU IDLE Routines ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * Rajendra Nayak ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * Karthik Dasu ++ * ++ * Copyright (C) 2006 Nokia Corporation ++ * Tony Lindgren ++ * ++ * Copyright (C) 2005 Texas Instruments, Inc. ++ * Richard Woodruff ++ * ++ * Based on pm.c for omap2 ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pm.h" ++ ++#ifdef CONFIG_CPU_IDLE ++ ++#define OMAP3_MAX_STATES 7 ++#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */ ++#define OMAP3_STATE_C2 1 /* C2 - MPU inactive + Core inactive */ ++#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */ ++#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */ ++#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */ ++#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */ ++#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */ ++ ++struct omap3_processor_cx { ++ u8 valid; ++ u8 type; ++ u32 sleep_latency; ++ u32 wakeup_latency; ++ u32 mpu_state; ++ u32 core_state; ++ u32 threshold; ++ u32 flags; ++}; ++ ++struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES]; ++struct omap3_processor_cx current_cx_state; ++struct powerdomain *mpu_pd, *core_pd, *per_pd; ++ ++static int omap3_idle_bm_check(void) ++{ ++ if (!omap3_can_sleep()) ++ return 1; ++ return 0; ++} ++ ++static int _cpuidle_allow_idle(struct powerdomain *pwrdm, ++ struct clockdomain *clkdm) ++{ ++ omap2_clkdm_allow_idle(clkdm); ++ return 0; ++} ++ ++static int _cpuidle_deny_idle(struct powerdomain *pwrdm, ++ struct clockdomain *clkdm) ++{ ++ omap2_clkdm_deny_idle(clkdm); ++ return 0; ++} ++ ++/** ++ * omap3_enter_idle - Programs OMAP3 to enter the specified state ++ * @dev: cpuidle device ++ * @state: The target state to be programmed ++ * ++ * Called from the CPUidle framework to program the device to the ++ * specified target state selected by the governor. ++ */ ++static int omap3_enter_idle(struct cpuidle_device *dev, ++ struct cpuidle_state *state) ++{ ++ struct omap3_processor_cx *cx = cpuidle_get_statedata(state); ++ struct timespec ts_preidle, ts_postidle, ts_idle; ++ u32 mpu_state = cx->mpu_state, core_state = cx->core_state; ++ ++ current_cx_state = *cx; ++ ++ /* Used to keep track of the total time in idle */ ++ getnstimeofday(&ts_preidle); ++ ++ local_irq_disable(); ++ local_fiq_disable(); ++ ++ if (!enable_off_mode) { ++ if (mpu_state < PWRDM_POWER_RET) ++ mpu_state = PWRDM_POWER_RET; ++ if (core_state < PWRDM_POWER_RET) ++ core_state = PWRDM_POWER_RET; ++ } ++ ++ pwrdm_set_next_pwrst(mpu_pd, mpu_state); ++ pwrdm_set_next_pwrst(core_pd, core_state); ++ ++ if (omap_irq_pending() || need_resched()) ++ goto return_sleep_time; ++ ++ if (cx->type == OMAP3_STATE_C1) { ++ pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); ++ pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); ++ } ++ ++ /* Execute ARM wfi */ ++ omap_sram_idle(); ++ ++ if (cx->type == OMAP3_STATE_C1) { ++ pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); ++ pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); ++ } ++ ++return_sleep_time: ++ getnstimeofday(&ts_postidle); ++ ts_idle = timespec_sub(ts_postidle, ts_preidle); ++ ++ local_irq_enable(); ++ local_fiq_enable(); ++ ++ return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; ++} ++ ++/** ++ * omap3_enter_idle_bm - Checks for any bus activity ++ * @dev: cpuidle device ++ * @state: The target state to be programmed ++ * ++ * Called from the CPUidle framework for C states with CPUIDLE_FLAG_CHECK_BM ++ * flag set. This function checks for any pending bus activity and then ++ * programs the device to the specified or a lower possible state ++ */ ++static int omap3_enter_idle_bm(struct cpuidle_device *dev, ++ struct cpuidle_state *state) ++{ ++ struct cpuidle_state *new_state = state; ++ ++ if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { ++ BUG_ON(!dev->safe_state); ++ new_state = dev->safe_state; ++ } ++ ++ dev->last_state = new_state; ++ return omap3_enter_idle(dev, new_state); ++} ++ ++DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); ++ ++/* omap3_init_power_states - Initialises the OMAP3 specific C states. ++ * ++ * Below is the desciption of each C state. ++ * C1 . MPU WFI + Core active ++ * C2 . MPU inactive + Core inactive ++ * C3 . MPU CSWR + Core inactive ++ * C4 . MPU OFF + Core inactive ++ * C5 . MPU CSWR + Core CSWR ++ * C6 . MPU OFF + Core CSWR ++ * C7 . MPU OFF + Core OFF ++ */ ++void omap_init_power_states(void) ++{ ++ /* C1 . MPU WFI + Core active */ ++ omap3_power_states[OMAP3_STATE_C1].valid = 1; ++ omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1; ++ omap3_power_states[OMAP3_STATE_C1].sleep_latency = 58; ++ omap3_power_states[OMAP3_STATE_C1].wakeup_latency = 52; ++ omap3_power_states[OMAP3_STATE_C1].threshold = 5; ++ omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON; ++ omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON; ++ omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID; ++ ++ /* C2 . MPU WFI + Core inactive */ ++ omap3_power_states[OMAP3_STATE_C2].valid = 1; ++ omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2; ++ omap3_power_states[OMAP3_STATE_C2].sleep_latency = 73; ++ omap3_power_states[OMAP3_STATE_C2].wakeup_latency = 164; ++ omap3_power_states[OMAP3_STATE_C2].threshold = 30; ++ omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON; ++ omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON; ++ omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID | ++ CPUIDLE_FLAG_CHECK_BM; ++ ++ /* C3 . MPU CSWR + Core inactive */ ++ omap3_power_states[OMAP3_STATE_C3].valid = 0; ++ omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3; ++ omap3_power_states[OMAP3_STATE_C3].sleep_latency = 90; ++ omap3_power_states[OMAP3_STATE_C3].wakeup_latency = 267; ++ omap3_power_states[OMAP3_STATE_C3].threshold = 113872; /* vs. C2 */ ++ omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET; ++ omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON; ++ omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID | ++ CPUIDLE_FLAG_CHECK_BM; ++ ++ /* C4 . MPU OFF + Core inactive */ ++ omap3_power_states[OMAP3_STATE_C4].valid = 0; ++ omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4; ++ omap3_power_states[OMAP3_STATE_C4].sleep_latency = 4130; ++ omap3_power_states[OMAP3_STATE_C4].wakeup_latency = 2130; ++ omap3_power_states[OMAP3_STATE_C4].threshold = 619328; /* vs. C2 */ ++ omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF; ++ omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON; ++ omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID | ++ CPUIDLE_FLAG_CHECK_BM; ++ ++ /* C5 . MPU CSWR + Core CSWR*/ ++ omap3_power_states[OMAP3_STATE_C5].valid = 1; ++ omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5; ++ omap3_power_states[OMAP3_STATE_C5].sleep_latency = 596; ++ omap3_power_states[OMAP3_STATE_C5].wakeup_latency = 1000; ++ omap3_power_states[OMAP3_STATE_C5].threshold = 7971; ++ omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET; ++ omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET; ++ omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID | ++ CPUIDLE_FLAG_CHECK_BM; ++ ++ /* C6 . MPU OFF + Core CSWR */ ++ omap3_power_states[OMAP3_STATE_C6].valid = 0; ++ omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6; ++ omap3_power_states[OMAP3_STATE_C6].sleep_latency = 4600; ++ omap3_power_states[OMAP3_STATE_C6].wakeup_latency = 2850; ++ omap3_power_states[OMAP3_STATE_C6].threshold = 2801100; /* vs. C5 */ ++ omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF; ++ omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET; ++ omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID | ++ CPUIDLE_FLAG_CHECK_BM; ++ ++ /* C7 . MPU OFF + Core OFF */ ++ omap3_power_states[OMAP3_STATE_C7].valid = 1; ++ omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7; ++ omap3_power_states[OMAP3_STATE_C7].sleep_latency = 4760; ++ omap3_power_states[OMAP3_STATE_C7].wakeup_latency = 7780; ++ omap3_power_states[OMAP3_STATE_C7].threshold = 610082; ++ omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF; ++ omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF; ++ omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID | ++ CPUIDLE_FLAG_CHECK_BM; ++} ++ ++struct cpuidle_driver omap3_idle_driver = { ++ .name = "omap3_idle", ++ .owner = THIS_MODULE, ++}; ++ ++/** ++ * omap3_idle_init - Init routine for OMAP3 idle ++ * ++ * Registers the OMAP3 specific cpuidle driver with the cpuidle ++ * framework with the valid set of states. ++ */ ++int omap3_idle_init(void) ++{ ++ int i, count = 0; ++ struct omap3_processor_cx *cx; ++ struct cpuidle_state *state; ++ struct cpuidle_device *dev; ++ ++ omap3_save_scratchpad_contents(); ++ ++ mpu_pd = pwrdm_lookup("mpu_pwrdm"); ++ core_pd = pwrdm_lookup("core_pwrdm"); ++ per_pd = pwrdm_lookup("per_pwrdm"); ++ ++ omap_init_power_states(); ++ cpuidle_register_driver(&omap3_idle_driver); ++ ++ dev = &per_cpu(omap3_idle_dev, smp_processor_id()); ++ ++ for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { ++ cx = &omap3_power_states[i]; ++ state = &dev->states[count]; ++ ++ if (!cx->valid) ++ continue; ++ cpuidle_set_statedata(state, cx); ++ state->exit_latency = cx->sleep_latency + cx->wakeup_latency; ++ state->target_residency = cx->threshold; ++ state->flags = cx->flags; ++ state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? ++ omap3_enter_idle_bm : omap3_enter_idle; ++ if (cx->type == OMAP3_STATE_C1) ++ dev->safe_state = state; ++ sprintf(state->name, "C%d", count+1); ++ count++; ++ } ++ ++ if (!count) ++ return -EINVAL; ++ dev->state_count = count; ++ ++ if (cpuidle_register_device(dev)) { ++ printk(KERN_ERR "%s: CPUidle register device failed\n", ++ __func__); ++ return -EIO; ++ } ++ return 0; ++} ++#endif /* CONFIG_CPU_IDLE */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/debobs.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/debobs.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/debobs.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/debobs.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,239 @@ ++/* ++ * arch/arm/mach-omap2/debobs.c ++ * ++ * Handle debobs pads ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * ++ * Written by Peter De Schrijver ++ * ++ * This file is subject to the terms and conditions of the GNU General ++ * Public License. See the file "COPYING" in the main directory of this ++ * archive for more details. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ETK_GPIO_BEGIN 12 ++#define ETK_GPIO(i) (ETK_GPIO_BEGIN + i) ++#define NUM_OF_DEBOBS_PADS 18 ++ ++static int debobs_initialized; ++ ++enum debobs_pad_mode { ++ GPIO = 0, ++ OBS = 1, ++ ETK = 2, ++ NO_MODE = 3, ++}; ++ ++static char *debobs_pad_mode_names[] = { ++ [GPIO] = "GPIO", ++ [OBS] = "OBS", ++ [ETK] = "ETK", ++}; ++ ++struct obs { ++ u16 offset; ++ u8 value; ++ u8 mask; ++}; ++ ++struct debobs_pad { ++ enum debobs_pad_mode mode; ++ struct obs core_obs; ++ struct obs wakeup_obs; ++}; ++ ++static struct debobs_pad debobs_pads[NUM_OF_DEBOBS_PADS]; ++ ++static int debobs_mode_open(struct inode *inode, struct file *file) ++{ ++ file->private_data = inode->i_private; ++ ++ return 0; ++} ++ ++static ssize_t debobs_mode_read(struct file *file, char __user *user_buf, ++ size_t count, loff_t *ppos) ++{ ++ char buffer[10]; ++ int size; ++ int pad_number = (int)file->private_data; ++ struct debobs_pad *e = &debobs_pads[pad_number]; ++ ++ size = snprintf(buffer, sizeof(buffer), "%s\n", ++ debobs_pad_mode_names[e->mode]); ++ return simple_read_from_buffer(user_buf, count, ppos, buffer, size); ++} ++ ++static ssize_t debobs_mode_write(struct file *file, const char __user *user_buf, ++ size_t count, loff_t *ppos) ++{ ++ char buffer[10]; ++ int buf_size, i, pad_number; ++ u16 muxmode = OMAP34XX_MUX_MODE7; ++ ++ memset(buffer, 0, sizeof(buffer)); ++ buf_size = min(count, (sizeof(buffer)-1)); ++ ++ if (copy_from_user(buffer, user_buf, buf_size)) ++ return -EFAULT; ++ ++ pad_number = (int)file->private_data; ++ ++ for (i = 0; i < NO_MODE; i++) { ++ if (!strnicmp(debobs_pad_mode_names[i], ++ buffer, ++ strlen(debobs_pad_mode_names[i]))) { ++ switch (i) { ++ case ETK: ++ muxmode = OMAP34XX_MUX_MODE0; ++ break; ++ case GPIO: ++ muxmode = OMAP34XX_MUX_MODE4; ++ break; ++ case OBS: ++ muxmode = OMAP34XX_MUX_MODE7; ++ break; ++ } ++ omap_ctrl_writew(muxmode, ++ OMAP343X_PADCONF_ETK(pad_number)); ++ debobs_pads[pad_number].mode = i; ++ ++ return count; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++static const struct file_operations debobs_mode_fops = { ++ .open = debobs_mode_open, ++ .read = debobs_mode_read, ++ .write = debobs_mode_write, ++}; ++ ++static int debobs_get(void *data, u64 *val) ++{ ++ struct obs *o = data; ++ ++ *val = o->value; ++ ++ return 0; ++} ++ ++static int debobs_set(void *data, u64 val) ++{ ++ struct obs *o = data; ++ ++ val &= BIT(o->mask) - 1; ++ ++ omap_ctrl_writeb(val, o->offset); ++ o->value = val; ++ ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(debobs_fops, debobs_get, debobs_set, "%llu\n"); ++ ++static inline int __init _new_debobs_pad(struct debobs_pad *pad, char *name, ++ int number, struct dentry *root) ++{ ++ struct dentry *d; ++ struct obs *o; ++ ++ d = debugfs_create_dir(name, root); ++ if (IS_ERR(d)) ++ return PTR_ERR(d); ++ ++ omap_ctrl_writew(OMAP34XX_MUX_MODE4, OMAP343X_PADCONF_ETK(number)); ++ gpio_direction_input(ETK_GPIO(number)); ++ gpio_export(ETK_GPIO(number), 1); ++ (void) debugfs_create_file("mode", S_IRUGO | S_IWUGO, d, ++ (void *)number, &debobs_mode_fops); ++ ++ o = &pad->core_obs; ++ o->offset = OMAP343X_CONTROL_DEBOBS(number); ++ o->value = omap_ctrl_readw(o->offset); ++ o->mask = 7; ++ (void) debugfs_create_file("coreobs", S_IRUGO | S_IWUGO, d, o, ++ &debobs_fops); ++ ++ o = &pad->wakeup_obs; ++ o->offset = OMAP343X_CONTROL_WKUP_DEBOBSMUX(number); ++ o->value = omap_ctrl_readb(o->offset); ++ o->mask = 5; ++ (void) debugfs_create_file("wakeupobs", S_IRUGO | S_IWUGO, d, o, ++ &debobs_fops); ++ ++ return 0; ++} ++ ++/* Public functions */ ++ ++void debug_gpio_set(unsigned gpio, int value) ++{ ++ if (!debobs_initialized) ++ return ; ++ ++ WARN_ON(gpio >= NUM_OF_DEBOBS_PADS); ++ if (gpio < NUM_OF_DEBOBS_PADS) ++ __gpio_set_value(ETK_GPIO(gpio), value); ++} ++ ++int debug_gpio_get(unsigned gpio) ++{ ++ if (!debobs_initialized) ++ return -EINVAL; ++ ++ WARN_ON(gpio >= NUM_OF_DEBOBS_PADS); ++ if (gpio < NUM_OF_DEBOBS_PADS) ++ return __gpio_get_value(ETK_GPIO(gpio)); ++ ++ return -EINVAL; ++} ++ ++int __init init_debobs(void) ++{ ++ struct dentry *debobs_root; ++ int i, err; ++ char name[10]; ++ ++ debobs_root = debugfs_create_dir("debobs", NULL); ++ if (IS_ERR(debobs_root)) ++ return PTR_ERR(debobs_root); ++ ++ for (i = 0; i < NUM_OF_DEBOBS_PADS; i++) { ++ snprintf(name, sizeof(name), "hw_dbg%d", i); ++ if (!gpio_request(ETK_GPIO(i), name)) { ++ err = _new_debobs_pad(&debobs_pads[i], name, i, ++ debobs_root); ++ } else ++ gpio_free(ETK_GPIO(i)); ++ } ++ ++ debobs_initialized = 1; ++ ++ return 0; ++} ++ ++late_initcall_sync(init_debobs); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/devices.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/devices.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/devices.c 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/devices.c 2011-09-04 11:31:05.000000000 +0200 +@@ -56,10 +56,60 @@ static inline void omap_init_camera(void + + #elif defined(CONFIG_VIDEO_OMAP3) || defined(CONFIG_VIDEO_OMAP3_MODULE) + +-static struct resource cam_resources[] = { ++static struct resource omap3isp_resources[] = { + { +- .start = OMAP34XX_CAMERA_BASE, +- .end = OMAP34XX_CAMERA_BASE + 0x1B70, ++ .start = OMAP3430_ISP_BASE, ++ .end = OMAP3430_ISP_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_CBUFF_BASE, ++ .end = OMAP3430_ISP_CBUFF_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_CCP2_BASE, ++ .end = OMAP3430_ISP_CCP2_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_CCDC_BASE, ++ .end = OMAP3430_ISP_CCDC_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_HIST_BASE, ++ .end = OMAP3430_ISP_HIST_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_H3A_BASE, ++ .end = OMAP3430_ISP_H3A_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_PREV_BASE, ++ .end = OMAP3430_ISP_PREV_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_RESZ_BASE, ++ .end = OMAP3430_ISP_RESZ_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_SBL_BASE, ++ .end = OMAP3430_ISP_SBL_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_CSI2A_BASE, ++ .end = OMAP3430_ISP_CSI2A_END, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3430_ISP_CSI2PHY_BASE, ++ .end = OMAP3430_ISP_CSI2PHY_END, + .flags = IORESOURCE_MEM, + }, + { +@@ -68,16 +118,16 @@ static struct resource cam_resources[] = + } + }; + +-static struct platform_device omap_cam_device = { +- .name = "omap34xxcam", ++static struct platform_device omap3isp_device = { ++ .name = "omap3isp", + .id = -1, +- .num_resources = ARRAY_SIZE(cam_resources), +- .resource = cam_resources, ++ .num_resources = ARRAY_SIZE(omap3isp_resources), ++ .resource = omap3isp_resources, + }; + + static inline void omap_init_camera(void) + { +- platform_device_register(&omap_cam_device); ++ platform_device_register(&omap3isp_device); + } + #else + static inline void omap_init_camera(void) +@@ -163,6 +213,10 @@ static struct resource sti_resources[] = + .flags = IORESOURCE_IRQ, + } + }; ++ ++/* Emulation pin manager */ ++static void epm_init(void) { } ++ + #elif defined(CONFIG_ARCH_OMAP3) + + #define OMAP3_SDTI_BASE 0x54500000 +@@ -181,6 +235,39 @@ static struct resource sti_resources[] = + } + }; + ++#define EPM_BASE 0x5401D000 ++ ++#define EPM_CONTROL_0 0x50 ++#define EPM_CONTROL_2 0x58 ++ ++/* Emulation pin manager */ ++static void epm_init(void) ++{ ++ void __iomem *epm_base; ++ ++ epm_base = ioremap(EPM_BASE, 256); ++ if (unlikely(!epm_base)) { ++ printk(KERN_ERR "EPM cannot be ioremapped\n"); ++ return; ++ } ++ ++ __raw_writel(1 << 30, epm_base + EPM_CONTROL_2); ++ ++ /* ++ * EMU0 (dbgp0) pin as XTI clk ++ * EMU1 (dbgp1) pin as XTI d0 ++ */ ++ __raw_writel(0x00000078, epm_base + EPM_CONTROL_0); ++ ++ /* ++ * TRACEDATA[13] (dbgp17) pin as XTI d1 ++ * TRACEDATA[14] (dbgp18) pin as XTI d2 ++ * TRACEDATA[15] (dbgp19) pin as XTI d3 ++ */ ++ __raw_writel(0x80007770, epm_base + EPM_CONTROL_2); ++ iounmap(epm_base); ++} ++ + #endif + + static struct platform_device sti_device = { +@@ -192,8 +279,19 @@ static struct platform_device sti_device + + static inline void omap_init_sti(void) + { ++ const struct omap_sti_console_config *info; ++ ++ info = omap_get_config(OMAP_TAG_STI_CONSOLE, ++ struct omap_sti_console_config); ++ ++ if (!info) ++ return; ++ ++ epm_init(); ++ + platform_device_register(&sti_device); + } ++ + #else + static inline void omap_init_sti(void) {} + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/dspbridge.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/dspbridge.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/dspbridge.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/dspbridge.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,72 @@ ++/* ++ * TI's dspbridge platform device registration ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * Copyright (C) 2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++ ++#include ++ ++#include ++ ++static struct platform_device *dspbridge_pdev; ++ ++static struct dspbridge_platform_data dspbridge_pdata __initdata = { ++ .dsp_set_min_opp = omap_pm_dsp_set_min_opp, ++ .dsp_get_opp = omap_pm_dsp_get_opp, ++ .cpu_set_freq = omap_pm_cpu_set_freq, ++ .cpu_get_freq = omap_pm_cpu_get_freq, ++}; ++ ++static int __init dspbridge_init(void) ++{ ++ struct platform_device *pdev; ++ int err = -ENOMEM; ++ struct dspbridge_platform_data *pdata = &dspbridge_pdata; ++ ++ pdata->phys_mempool_base = dspbridge_get_mempool_base(); ++ ++ if (pdata->phys_mempool_base) { ++ pdata->phys_mempool_size = CONFIG_BRIDGE_MEMPOOL_SIZE; ++ pr_info("%s: %x bytes @ %x\n", __func__, ++ pdata->phys_mempool_size, pdata->phys_mempool_base); ++ } ++ ++ pdev = platform_device_alloc("C6410", -1); ++ if (!pdev) ++ goto err_out; ++ ++ err = platform_device_add_data(pdev, pdata, sizeof(*pdata)); ++ if (err) ++ goto err_out; ++ ++ err = platform_device_add(pdev); ++ if (err) ++ goto err_out; ++ ++ dspbridge_pdev = pdev; ++ return 0; ++ ++err_out: ++ platform_device_put(pdev); ++ return err; ++} ++module_init(dspbridge_init); ++ ++static void __exit dspbridge_exit(void) ++{ ++ platform_device_unregister(dspbridge_pdev); ++} ++module_exit(dspbridge_exit); ++ ++MODULE_AUTHOR("Hiroshi DOYU"); ++MODULE_DESCRIPTION("TI's dspbridge platform device registration"); ++MODULE_LICENSE("GPL v2"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/gpmc.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/gpmc.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/gpmc.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/gpmc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -54,10 +54,38 @@ + #define GPMC_CHUNK_SHIFT 24 /* 16 MB */ + #define GPMC_SECTION_SHIFT 28 /* 128 MB */ + ++/* Structure to save gpmc cs context */ ++struct gpmc_cs_config { ++ u32 config1; ++ u32 config2; ++ u32 config3; ++ u32 config4; ++ u32 config5; ++ u32 config6; ++ u32 config7; ++ int is_valid; ++}; ++ ++/* ++ * Structure to save/restore gpmc context ++ * to support core off on OMAP3 ++ */ ++struct omap3_gpmc_regs { ++ u32 sysconfig; ++ u32 irqenable; ++ u32 timeout_ctrl; ++ u32 config; ++ u32 prefetch_config1; ++ u32 prefetch_config2; ++ u32 prefetch_control; ++ struct gpmc_cs_config cs_context[GPMC_CS_NUM]; ++}; ++ + static struct resource gpmc_mem_root; + static struct resource gpmc_cs_mem[GPMC_CS_NUM]; + static DEFINE_SPINLOCK(gpmc_mem_lock); + static unsigned gpmc_cs_map; ++static struct omap3_gpmc_regs gpmc_context; + + static void __iomem *gpmc_base; + +@@ -449,3 +477,68 @@ void __init gpmc_init(void) + + gpmc_mem_init(); + } ++ ++#ifdef CONFIG_ARCH_OMAP3 ++void omap3_gpmc_save_context() ++{ ++ int i; ++ gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); ++ gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); ++ gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); ++ gpmc_context.config = gpmc_read_reg(GPMC_CONFIG); ++ gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1); ++ gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2); ++ gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL); ++ for (i = 0; i < GPMC_CS_NUM; i++) { ++ gpmc_context.cs_context[i].is_valid = ++ (gpmc_cs_read_reg(i, GPMC_CS_CONFIG7)) ++ & GPMC_CONFIG7_CSVALID; ++ if (gpmc_context.cs_context[i].is_valid) { ++ gpmc_context.cs_context[i].config1 = ++ gpmc_cs_read_reg(i, GPMC_CS_CONFIG1); ++ gpmc_context.cs_context[i].config2 = ++ gpmc_cs_read_reg(i, GPMC_CS_CONFIG2); ++ gpmc_context.cs_context[i].config3 = ++ gpmc_cs_read_reg(i, GPMC_CS_CONFIG3); ++ gpmc_context.cs_context[i].config4 = ++ gpmc_cs_read_reg(i, GPMC_CS_CONFIG4); ++ gpmc_context.cs_context[i].config5 = ++ gpmc_cs_read_reg(i, GPMC_CS_CONFIG5); ++ gpmc_context.cs_context[i].config6 = ++ gpmc_cs_read_reg(i, GPMC_CS_CONFIG6); ++ gpmc_context.cs_context[i].config7 = ++ gpmc_cs_read_reg(i, GPMC_CS_CONFIG7); ++ } ++ } ++} ++ ++void omap3_gpmc_restore_context() ++{ ++ int i; ++ gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); ++ gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); ++ gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); ++ gpmc_write_reg(GPMC_CONFIG, gpmc_context.config); ++ gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1); ++ gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2); ++ gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control); ++ for (i = 0; i < GPMC_CS_NUM; i++) { ++ if (gpmc_context.cs_context[i].is_valid) { ++ gpmc_cs_write_reg(i, GPMC_CS_CONFIG1, ++ gpmc_context.cs_context[i].config1); ++ gpmc_cs_write_reg(i, GPMC_CS_CONFIG2, ++ gpmc_context.cs_context[i].config2); ++ gpmc_cs_write_reg(i, GPMC_CS_CONFIG3, ++ gpmc_context.cs_context[i].config3); ++ gpmc_cs_write_reg(i, GPMC_CS_CONFIG4, ++ gpmc_context.cs_context[i].config4); ++ gpmc_cs_write_reg(i, GPMC_CS_CONFIG5, ++ gpmc_context.cs_context[i].config5); ++ gpmc_cs_write_reg(i, GPMC_CS_CONFIG6, ++ gpmc_context.cs_context[i].config6); ++ gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, ++ gpmc_context.cs_context[i].config7); ++ } ++ } ++} ++#endif /* CONFIG_ARCH_OMAP3 */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/id.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/id.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/id.c 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/id.c 2011-09-04 11:31:05.000000000 +0200 +@@ -194,9 +194,13 @@ void __init omap34xx_check_revision(void + omap_revision = OMAP3430_REV_ES3_0; + rev_name = "ES3.0"; + break; ++ case 4: ++ omap_revision = OMAP3430_REV_ES3_1; ++ rev_name = "ES3.1"; ++ break; + default: + /* Use the latest known revision as default */ +- omap_revision = OMAP3430_REV_ES3_0; ++ omap_revision = OMAP3430_REV_ES3_1; + rev_name = "Unknown revision\n"; + } + } +@@ -235,8 +239,13 @@ void __init omap2_check_revision(void) + omap_chip.oc = CHIP_IS_OMAP3430; + if (omap_rev() == OMAP3430_REV_ES1_0) + omap_chip.oc |= CHIP_IS_OMAP3430ES1; +- else if (omap_rev() > OMAP3430_REV_ES1_0) ++ else if (omap_rev() >= OMAP3430_REV_ES2_0 && ++ omap_rev() <= OMAP3430_REV_ES2_1) + omap_chip.oc |= CHIP_IS_OMAP3430ES2; ++ else if (omap_rev() == OMAP3430_REV_ES3_0) ++ omap_chip.oc |= CHIP_IS_OMAP3430ES3_0; ++ else if (omap_rev() == OMAP3430_REV_ES3_1) ++ omap_chip.oc |= CHIP_IS_OMAP3430ES3_1; + } else { + pr_err("Uninitialized omap_chip, please fix!\n"); + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/io.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/io.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/io.c 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/io.c 2011-09-04 11:31:05.000000000 +0200 +@@ -19,16 +19,19 @@ + #include + #include + #include ++#include ++#include + + #include + + #include + #include +-#include + #include + #include + #include + ++#include "omapdev-common.h" ++ + #include "clock.h" + + #include +@@ -38,6 +41,10 @@ + #include + #include "clockdomains.h" + ++#include ++ ++#include ++ + /* + * The machine specific code may provide the extra mapping besides the + * default mapping provided here. +@@ -192,14 +199,58 @@ void __init omap2_map_common_io(void) + omap2_check_revision(); + omap_sram_init(); + omapfb_reserve_sdram(); ++ dspbridge_reserve_sdram(); + } + +-void __init omap2_init_common_hw(struct omap_sdrc_params *sp) ++/* ++ * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters ++ * ++ * Sets the CORE DPLL3 M2 divider to the same value that it's at ++ * currently. This has the effect of setting the SDRC SDRAM AC timing ++ * registers to the values currently defined by the kernel. Currently ++ * only defined for OMAP3; will return 0 if called on OMAP2. Returns ++ * -EINVAL if the dpll3_m2_ck cannot be found, 0 if called on OMAP2, ++ * or passes along the return value of clk_set_rate(). ++ */ ++static int __init _omap2_init_reprogram_sdrc(void) ++{ ++ struct clk *dpll3_m2_ck; ++ int v = -EINVAL; ++ ++ if (!cpu_is_omap34xx()) ++ return 0; ++ ++ dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck"); ++ if (!dpll3_m2_ck) ++ return -EINVAL; ++ ++ pr_info("Reprogramming SDRC\n"); ++ v = clk_set_rate(dpll3_m2_ck, clk_get_rate(dpll3_m2_ck)); ++ if (v) ++ pr_err("dpll3_m2_clk rate change failed: %d\n", v); ++ ++ clk_put(dpll3_m2_ck); ++ ++ return v; ++} ++ ++void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, ++ struct omap_sdrc_params *sdrc_cs1, ++ struct omap_opp *mpu_opps, ++ struct omap_opp *dsp_opps, ++ struct omap_opp *l3_opps) + { + omap2_mux_init(); ++ /* The OPP tables have to be registered before a clk init */ ++ omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps); + pwrdm_init(powerdomains_omap); + clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps); ++ omapdev_init(omapdevs); + omap2_clk_init(); +- omap2_sdrc_init(sp); ++ omap_pm_if_init(); ++ omap2_sdrc_init(sdrc_cs0, sdrc_cs1); ++ ++ _omap2_init_reprogram_sdrc(); ++ + gpmc_init(); + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/iommu2.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/iommu2.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/iommu2.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/iommu2.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,326 @@ ++/* ++ * omap iommu: omap2/3 architecture specific functions ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU , ++ * Paul Mundt and Toshihiro Kobayashi ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++ ++/* ++ * omap2 architecture specific register bit definitions ++ */ ++#define IOMMU_ARCH_VERSION 0x00000011 ++ ++/* SYSCONF */ ++#define MMU_SYS_IDLE_SHIFT 3 ++#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT) ++#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT) ++#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT) ++#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT) ++ ++#define MMU_SYS_SOFTRESET (1 << 1) ++#define MMU_SYS_AUTOIDLE 1 ++ ++/* SYSSTATUS */ ++#define MMU_SYS_RESETDONE 1 ++ ++/* IRQSTATUS & IRQENABLE */ ++#define MMU_IRQ_MULTIHITFAULT (1 << 4) ++#define MMU_IRQ_TABLEWALKFAULT (1 << 3) ++#define MMU_IRQ_EMUMISS (1 << 2) ++#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) ++#define MMU_IRQ_TLBMISS (1 << 0) ++#define MMU_IRQ_MASK \ ++ (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_EMUMISS | \ ++ MMU_IRQ_TRANSLATIONFAULT) ++ ++/* MMU_CNTL */ ++#define MMU_CNTL_SHIFT 1 ++#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) ++#define MMU_CNTL_EML_TLB (1 << 3) ++#define MMU_CNTL_TWL_EN (1 << 2) ++#define MMU_CNTL_MMU_EN (1 << 1) ++ ++#define get_cam_va_mask(pgsz) \ ++ (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ ++ ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ ++ ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ ++ ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) ++ ++static int omap2_iommu_enable(struct iommu *obj) ++{ ++ u32 l, pa; ++ unsigned long timeout; ++ ++ if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) ++ return -EINVAL; ++ ++ pa = virt_to_phys(obj->iopgd); ++ if (!IS_ALIGNED(pa, SZ_16K)) ++ return -EINVAL; ++ ++ iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG); ++ ++ timeout = jiffies + msecs_to_jiffies(20); ++ do { ++ l = iommu_read_reg(obj, MMU_SYSSTATUS); ++ if (l & MMU_SYS_RESETDONE) ++ break; ++ } while (time_after(jiffies, timeout)); ++ ++ if (!(l & MMU_SYS_RESETDONE)) { ++ dev_err(obj->dev, "can't take mmu out of reset\n"); ++ return -ENODEV; ++ } ++ ++ l = iommu_read_reg(obj, MMU_REVISION); ++ dev_info(obj->dev, "%s: version %d.%d\n", obj->name, ++ (l >> 4) & 0xf, l & 0xf); ++ ++ l = iommu_read_reg(obj, MMU_SYSCONFIG); ++ l &= ~MMU_SYS_IDLE_MASK; ++ l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE); ++ iommu_write_reg(obj, l, MMU_SYSCONFIG); ++ ++ iommu_write_reg(obj, MMU_IRQ_MASK, MMU_IRQENABLE); ++ iommu_write_reg(obj, pa, MMU_TTB); ++ ++ l = iommu_read_reg(obj, MMU_CNTL); ++ l &= ~MMU_CNTL_MASK; ++ l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); ++ iommu_write_reg(obj, l, MMU_CNTL); ++ ++ return 0; ++} ++ ++static void omap2_iommu_disable(struct iommu *obj) ++{ ++ u32 l = iommu_read_reg(obj, MMU_CNTL); ++ ++ l &= ~MMU_CNTL_MASK; ++ iommu_write_reg(obj, l, MMU_CNTL); ++ iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG); ++ ++ dev_dbg(obj->dev, "%s is shutting down\n", obj->name); ++} ++ ++static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) ++{ ++ int i; ++ u32 stat, da; ++ const char *err_msg[] = { ++ "tlb miss", ++ "translation fault", ++ "emulation miss", ++ "table walk fault", ++ "multi hit fault", ++ }; ++ ++ stat = iommu_read_reg(obj, MMU_IRQSTATUS); ++ stat &= MMU_IRQ_MASK; ++ if (!stat) ++ return 0; ++ ++ da = iommu_read_reg(obj, MMU_FAULT_AD); ++ *ra = da; ++ ++ dev_err(obj->dev, "%s:\tda:%08x ", __func__, da); ++ ++ for (i = 0; i < ARRAY_SIZE(err_msg); i++) { ++ if (stat & (1 << i)) ++ printk("%s ", err_msg[i]); ++ } ++ printk("\n"); ++ ++ iommu_write_reg(obj, stat, MMU_IRQSTATUS); ++ return stat; ++} ++ ++static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr) ++{ ++ cr->cam = iommu_read_reg(obj, MMU_READ_CAM); ++ cr->ram = iommu_read_reg(obj, MMU_READ_RAM); ++} ++ ++static void omap2_tlb_load_cr(struct iommu *obj, struct cr_regs *cr) ++{ ++ iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); ++ iommu_write_reg(obj, cr->ram, MMU_RAM); ++} ++ ++static u32 omap2_cr_to_virt(struct cr_regs *cr) ++{ ++ u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; ++ u32 mask = get_cam_va_mask(cr->cam & page_size); ++ ++ return cr->cam & mask; ++} ++ ++static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e) ++{ ++ struct cr_regs *cr; ++ ++ if (e->da & ~(get_cam_va_mask(e->pgsz))) { ++ dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, ++ e->da); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ cr = kmalloc(sizeof(*cr), GFP_KERNEL); ++ if (!cr) ++ return ERR_PTR(-ENOMEM); ++ ++ cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz; ++ cr->ram = e->pa | e->endian | e->elsz | e->mixed; ++ ++ return cr; ++} ++ ++static inline int omap2_cr_valid(struct cr_regs *cr) ++{ ++ return cr->cam & MMU_CAM_V; ++} ++ ++static u32 omap2_get_pte_attr(struct iotlb_entry *e) ++{ ++ u32 attr; ++ ++ attr = e->mixed << 5; ++ attr |= e->endian; ++ attr |= e->elsz >> 3; ++ attr <<= ((e->pgsz & MMU_CAM_PGSZ_4K) ? 0 : 6); ++ ++ return attr; ++} ++ ++static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) ++{ ++ char *p = buf; ++ ++ /* FIXME: Need more detail analysis of cam/ram */ ++ p += sprintf(p, "%08x %08x\n", cr->cam, cr->ram); ++ ++ return p - buf; ++} ++ ++#define pr_reg(name) \ ++ p += sprintf(p, "%20s: %08x\n", \ ++ __stringify(name), iommu_read_reg(obj, MMU_##name)); ++ ++static ssize_t omap2_iommu_dump_ctx(struct iommu *obj, char *buf) ++{ ++ char *p = buf; ++ ++ pr_reg(REVISION); ++ pr_reg(SYSCONFIG); ++ pr_reg(SYSSTATUS); ++ pr_reg(IRQSTATUS); ++ pr_reg(IRQENABLE); ++ pr_reg(WALKING_ST); ++ pr_reg(CNTL); ++ pr_reg(FAULT_AD); ++ pr_reg(TTB); ++ pr_reg(LOCK); ++ pr_reg(LD_TLB); ++ pr_reg(CAM); ++ pr_reg(RAM); ++ pr_reg(GFLUSH); ++ pr_reg(FLUSH_ENTRY); ++ pr_reg(READ_CAM); ++ pr_reg(READ_RAM); ++ pr_reg(EMU_FAULT_AD); ++ ++ return p - buf; ++} ++ ++static void omap2_iommu_save_ctx(struct iommu *obj) ++{ ++ int i; ++ u32 *p = obj->ctx; ++ ++ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { ++ p[i] = iommu_read_reg(obj, i * sizeof(u32)); ++ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); ++ } ++ ++ BUG_ON(p[0] != IOMMU_ARCH_VERSION); ++} ++ ++static void omap2_iommu_restore_ctx(struct iommu *obj) ++{ ++ int i; ++ u32 *p = obj->ctx; ++ ++ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { ++ iommu_write_reg(obj, p[i], i * sizeof(u32)); ++ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); ++ } ++ ++ BUG_ON(p[0] != IOMMU_ARCH_VERSION); ++} ++ ++static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) ++{ ++ e->da = cr->cam & MMU_CAM_VATAG_MASK; ++ e->pa = cr->ram & MMU_RAM_PADDR_MASK; ++ e->valid = cr->cam & MMU_CAM_V; ++ e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK; ++ e->endian = cr->ram & MMU_RAM_ENDIAN_MASK; ++ e->elsz = cr->ram & MMU_RAM_ELSZ_MASK; ++ e->mixed = cr->ram & MMU_RAM_MIXED; ++} ++ ++static const struct iommu_functions omap2_iommu_ops = { ++ .version = IOMMU_ARCH_VERSION, ++ ++ .enable = omap2_iommu_enable, ++ .disable = omap2_iommu_disable, ++ .fault_isr = omap2_iommu_fault_isr, ++ ++ .tlb_read_cr = omap2_tlb_read_cr, ++ .tlb_load_cr = omap2_tlb_load_cr, ++ ++ .cr_to_e = omap2_cr_to_e, ++ .cr_to_virt = omap2_cr_to_virt, ++ .alloc_cr = omap2_alloc_cr, ++ .cr_valid = omap2_cr_valid, ++ .dump_cr = omap2_dump_cr, ++ ++ .get_pte_attr = omap2_get_pte_attr, ++ ++ .save_ctx = omap2_iommu_save_ctx, ++ .restore_ctx = omap2_iommu_restore_ctx, ++ .dump_ctx = omap2_iommu_dump_ctx, ++}; ++ ++static int __init omap2_iommu_init(void) ++{ ++ return install_iommu_arch(&omap2_iommu_ops); ++} ++module_init(omap2_iommu_init); ++ ++static void __exit omap2_iommu_exit(void) ++{ ++ uninstall_iommu_arch(&omap2_iommu_ops); ++} ++module_exit(omap2_iommu_exit); ++ ++MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); ++MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions"); ++MODULE_LICENSE("GPL v2"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/irq.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/irq.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/irq.c 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -25,6 +25,10 @@ + #define INTC_SYSSTATUS 0x0014 + #define INTC_SIR 0x0040 + #define INTC_CONTROL 0x0048 ++#define INTC_PROTECTION 0x004C ++#define INTC_IDLE 0x0050 ++#define INTC_THRESHOLD 0x0068 ++#define INTC_MIR0 0x0084 + #define INTC_MIR_CLEAR0 0x0088 + #define INTC_MIR_SET0 0x008c + #define INTC_PENDING_IRQ0 0x0098 +@@ -49,6 +53,18 @@ static struct omap_irq_bank { + }, + }; + ++/* Structure to save interrupt controller context */ ++struct omap3_intc_regs { ++ u32 sysconfig; ++ u32 protection; ++ u32 idle; ++ u32 threshold; ++ u32 ilr[INTCPS_NR_IRQS]; ++ u32 mir[INTCPS_NR_MIR_REGS]; ++}; ++ ++static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)]; ++ + /* INTC bank register get/set */ + + static void intc_bank_write_reg(u32 val, struct omap_irq_bank *bank, u16 reg) +@@ -73,9 +89,9 @@ static int omap_check_spurious(unsigned + u32 sir, spurious; + + sir = intc_bank_read_reg(&irq_banks[0], INTC_SIR); +- spurious = sir >> 6; ++ spurious = sir >> 7; + +- if (spurious > 1) { ++ if (spurious) { + printk(KERN_WARNING "Spurious irq %i: 0x%08x, please flush " + "posted write for irq %i\n", + irq, sir, previous_irq); +@@ -158,8 +174,10 @@ static void __init omap_irq_bank_init_on + while (!(intc_bank_read_reg(bank, INTC_SYSSTATUS) & 0x1)) + /* Wait for reset to complete */; + +- /* Enable autoidle */ ++ /* Do not enable autoidle as it seems to cause problems */ ++#if 0 + intc_bank_write_reg(1 << 0, bank, INTC_SYSCONFIG); ++#endif + } + + int omap_irq_pending(void) +@@ -212,3 +230,53 @@ void __init omap_init_irq(void) + } + } + ++#ifdef CONFIG_ARCH_OMAP3 ++void omap3_intc_save_context(void) ++{ ++ int ind = 0, i = 0; ++ for (ind = 0; ind < ARRAY_SIZE(irq_banks); ind++) { ++ struct omap_irq_bank *bank = irq_banks + ind; ++ intc_context[ind].sysconfig = ++ intc_bank_read_reg(bank, INTC_SYSCONFIG); ++ intc_context[ind].protection = ++ intc_bank_read_reg(bank, INTC_PROTECTION); ++ intc_context[ind].idle = ++ intc_bank_read_reg(bank, INTC_IDLE); ++ intc_context[ind].threshold = ++ intc_bank_read_reg(bank, INTC_THRESHOLD); ++ for (i = 0; i < INTCPS_NR_IRQS; i++) ++ intc_context[ind].ilr[i] = ++ intc_bank_read_reg(bank, (0x100 + 0x4*i)); ++ for (i = 0; i < INTCPS_NR_MIR_REGS; i++) ++ intc_context[ind].mir[i] = ++ intc_bank_read_reg(&irq_banks[0], INTC_MIR0 + ++ (0x20 * i)); ++ } ++} ++ ++void omap3_intc_restore_context(void) ++{ ++ int ind = 0, i = 0; ++ ++ for (ind = 0; ind < ARRAY_SIZE(irq_banks); ind++) { ++ struct omap_irq_bank *bank = irq_banks + ind; ++ intc_bank_write_reg(intc_context[ind].sysconfig, ++ bank, INTC_SYSCONFIG); ++ intc_bank_write_reg(intc_context[ind].sysconfig, ++ bank, INTC_SYSCONFIG); ++ intc_bank_write_reg(intc_context[ind].protection, ++ bank, INTC_PROTECTION); ++ intc_bank_write_reg(intc_context[ind].idle, ++ bank, INTC_IDLE); ++ intc_bank_write_reg(intc_context[ind].threshold, ++ bank, INTC_THRESHOLD); ++ for (i = 0; i < INTCPS_NR_IRQS; i++) ++ intc_bank_write_reg(intc_context[ind].ilr[i], ++ bank, (0x100 + 0x4*i)); ++ for (i = 0; i < INTCPS_NR_MIR_REGS; i++) ++ intc_bank_write_reg(intc_context[ind].mir[i], ++ &irq_banks[0], INTC_MIR0 + (0x20 * i)); ++ } ++ /* MIRs are saved and restore with other PRCM registers */ ++} ++#endif /* CONFIG_ARCH_OMAP3 */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/Kconfig kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/Kconfig +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/Kconfig 2011-09-04 11:32:09.983211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/Kconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -53,6 +53,27 @@ config MACH_NOKIA_N810_WIMAX + depends on MACH_NOKIA_N800 + select MACH_NOKIA_N810 + ++config MACH_NOKIA_RX51 ++ bool "Nokia RX-51 board" ++ depends on ARCH_OMAP3 && ARCH_OMAP34XX ++ select VIDEO_ET8EK8 if VIDEO_OMAP3 && VIDEO_HELPER_CHIPS_AUTO ++ select VIDEO_AD5820 if VIDEO_OMAP3 && VIDEO_HELPER_CHIPS_AUTO ++ select VIDEO_ADP1653 if VIDEO_OMAP3 && VIDEO_HELPER_CHIPS_AUTO ++ select VIDEO_SMIA_SENSOR if VIDEO_OMAP3 && VIDEO_HELPER_CHIPS_AUTO ++ select VIDEO_MACH_RX51 if VIDEO_OMAP3 && VIDEO_HELPER_CHIPS_AUTO ++ ++config VIDEO_MACH_RX51 ++ tristate "Nokia RX-51 board camera" ++ depends on MACH_NOKIA_RX51 && VIDEO_DEV && VIDEO_OMAP3 && TWL4030_CORE ++ ++config VIDEO_MACH_RX51_OLD_I2C ++ bool "Camera related devices on I2C bus 2 instead of 3" ++ default n ++ ++config MACH_NOKIA_RX71 ++ bool "Nokia RX-71 board" ++ depends on MACH_NOKIA_RX51 ++ + config MACH_OMAP2_TUSB6010 + bool + depends on ARCH_OMAP2 && ARCH_OMAP2420 +@@ -132,3 +153,9 @@ config MACH_OVERO + config MACH_OMAP3_PANDORA + bool "OMAP3 Pandora" + depends on ARCH_OMAP3 && ARCH_OMAP34XX ++ ++config RX51_CAMERA_BUTTON ++ tristate "RX51 Camera Button" ++ depends on VIDEO_MACH_RX51 ++ help ++ Say Y here if you want to support the rx51 camera button +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/Makefile kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/Makefile +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/Makefile 2011-09-04 11:32:09.993211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/Makefile 2011-09-04 11:31:05.000000000 +0200 +@@ -5,7 +5,7 @@ + # Common support + obj-y := irq.o id.o io.o sdrc.o control.o prcm.o clock.o mux.o \ + devices.o serial.o gpmc.o timer-gp.o powerdomain.o \ +- clockdomain.o ++ clockdomain.o omapdev.o + + obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o + +@@ -23,7 +23,7 @@ ifeq ($(CONFIG_PM),y) + obj-y += pm.o + obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o + obj-$(CONFIG_ARCH_OMAP24XX) += sleep24xx.o +-obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o ++obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o cpuidle34xx.o + obj-$(CONFIG_PM_DEBUG) += pm-debug.o + endif + +@@ -33,6 +33,9 @@ obj-$(CONFIG_OMAP_SMARTREFLEX) += smart + # Clock framework + obj-$(CONFIG_ARCH_OMAP2) += clock24xx.o + obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o ++obj-$(CONFIG_OMAP_PM_SRF) += resource34xx.o ++ ++obj-$(CONFIG_MPU_BRIDGE) += dspbridge.o + + # DSP + obj-$(CONFIG_OMAP_MMU_FWK) += mmu_mach.o +@@ -40,6 +43,14 @@ obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_m + mailbox_mach-objs := mailbox.o + mmu_mach-objs := mmu.o + ++# Debobs ++obj-$(CONFIG_OMAP3_DEBOBS) += debobs.o ++ ++iommu-y += iommu2.o ++iommu-$(CONFIG_ARCH_OMAP3) += omap3-iommu.o ++ ++obj-$(CONFIG_OMAP_IOMMU) += $(iommu-y) ++ + # Specific board support + obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o + obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o board-h4-mmc.o +@@ -76,6 +87,23 @@ obj-$(CONFIG_MACH_NOKIA_N800) += board- + board-n800-dsp.o \ + board-n800-camera.o + obj-$(CONFIG_MACH_NOKIA_N810) += board-n810.o ++obj-$(CONFIG_VIDEO_MACH_RX51) += board-rx51-camera.o ++obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \ ++ board-omap-bt.o \ ++ board-n800-flash.o \ ++ board-rx51-audio.o \ ++ board-rx51-flash.o \ ++ board-rx51-sdram.o \ ++ board-rx51-video.o \ ++ board-rx51-network.o \ ++ board-rx51-camera-base.o \ ++ board-rx51-peripherals.o \ ++ mmc-twl4030.o \ ++ ssi.o \ ++ usb-musb.o ++obj-$(CONFIG_MACH_NOKIA_RX71) += board-rx71.o \ ++ board-rx71-peripherals.o ++ + obj-$(CONFIG_MACH_OVERO) += board-overo.o \ + mmc-twl4030.o \ + usb-musb.o \ +@@ -89,3 +117,4 @@ obj-$(CONFIG_MACH_OMAP3_PANDORA) += boar + # TUSB 6010 chips + obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o + ++obj-$(CONFIG_RX51_CAMERA_BUTTON) += rx51_camera_btn.o +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mcbsp.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mcbsp.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mcbsp.c 2011-09-04 11:32:09.993211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mcbsp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -22,111 +22,7 @@ + #include + #include + +-struct mcbsp_internal_clk { +- struct clk clk; +- struct clk **childs; +- int n_childs; +-}; +- +-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) +-static void omap_mcbsp_clk_init(struct mcbsp_internal_clk *mclk) +-{ +- const char *clk_names[] = { "mcbsp_ick", "mcbsp_fck" }; +- int i; +- +- mclk->n_childs = ARRAY_SIZE(clk_names); +- mclk->childs = kzalloc(mclk->n_childs * sizeof(struct clk *), +- GFP_KERNEL); +- +- for (i = 0; i < mclk->n_childs; i++) { +- /* We fake a platform device to get correct device id */ +- struct platform_device pdev; +- +- pdev.dev.bus = &platform_bus_type; +- pdev.id = mclk->clk.id; +- mclk->childs[i] = clk_get(&pdev.dev, clk_names[i]); +- if (IS_ERR(mclk->childs[i])) +- printk(KERN_ERR "Could not get clock %s (%d).\n", +- clk_names[i], mclk->clk.id); +- } +-} +- +-static int omap_mcbsp_clk_enable(struct clk *clk) +-{ +- struct mcbsp_internal_clk *mclk = container_of(clk, +- struct mcbsp_internal_clk, clk); +- int i; +- +- for (i = 0; i < mclk->n_childs; i++) +- clk_enable(mclk->childs[i]); +- return 0; +-} +- +-static void omap_mcbsp_clk_disable(struct clk *clk) +-{ +- struct mcbsp_internal_clk *mclk = container_of(clk, +- struct mcbsp_internal_clk, clk); +- int i; +- +- for (i = 0; i < mclk->n_childs; i++) +- clk_disable(mclk->childs[i]); +-} +- +-static struct mcbsp_internal_clk omap_mcbsp_clks[] = { +- { +- .clk = { +- .name = "mcbsp_clk", +- .id = 1, +- .clkdm = { .name = "virt_opp_clkdm" }, +- .enable = omap_mcbsp_clk_enable, +- .disable = omap_mcbsp_clk_disable, +- }, +- }, +- { +- .clk = { +- .name = "mcbsp_clk", +- .id = 2, +- .clkdm = { .name = "virt_opp_clkdm" }, +- .enable = omap_mcbsp_clk_enable, +- .disable = omap_mcbsp_clk_disable, +- }, +- }, +- { +- .clk = { +- .name = "mcbsp_clk", +- .id = 3, +- .clkdm = { .name = "virt_opp_clkdm" }, +- .enable = omap_mcbsp_clk_enable, +- .disable = omap_mcbsp_clk_disable, +- }, +- }, +- { +- .clk = { +- .name = "mcbsp_clk", +- .id = 4, +- .clkdm = { .name = "virt_opp_clkdm" }, +- .enable = omap_mcbsp_clk_enable, +- .disable = omap_mcbsp_clk_disable, +- }, +- }, +- { +- .clk = { +- .name = "mcbsp_clk", +- .id = 5, +- .clkdm = { .name = "virt_opp_clkdm" }, +- .enable = omap_mcbsp_clk_enable, +- .disable = omap_mcbsp_clk_disable, +- }, +- }, +-}; +- +-#define omap_mcbsp_clks_size ARRAY_SIZE(omap_mcbsp_clks) +-#else +-#define omap_mcbsp_clks_size 0 +-static struct mcbsp_internal_clk __initdata *omap_mcbsp_clks; +-static inline void omap_mcbsp_clk_init(struct clk *clk) +-{ } +-#endif ++const char *clk_names[] = { "mcbsp_ick", "mcbsp_fck" }; + + static void omap2_mcbsp2_mux_setup(void) + { +@@ -159,7 +55,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP1_IRQ_RX, + .tx_irq = INT_24XX_MCBSP1_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, + }, + { + .phys_base = OMAP24XX_MCBSP2_BASE, +@@ -168,7 +65,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP2_IRQ_RX, + .tx_irq = INT_24XX_MCBSP2_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, + }, + }; + #define OMAP2420_MCBSP_PDATA_SZ ARRAY_SIZE(omap2420_mcbsp_pdata) +@@ -186,7 +84,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP1_IRQ_RX, + .tx_irq = INT_24XX_MCBSP1_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, + }, + { + .phys_base = OMAP24XX_MCBSP2_BASE, +@@ -195,7 +94,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP2_IRQ_RX, + .tx_irq = INT_24XX_MCBSP2_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, + }, + { + .phys_base = OMAP2430_MCBSP3_BASE, +@@ -204,7 +104,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP3_IRQ_RX, + .tx_irq = INT_24XX_MCBSP3_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, + }, + { + .phys_base = OMAP2430_MCBSP4_BASE, +@@ -213,7 +114,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP4_IRQ_RX, + .tx_irq = INT_24XX_MCBSP4_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, + }, + { + .phys_base = OMAP2430_MCBSP5_BASE, +@@ -222,7 +124,8 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP5_IRQ_RX, + .tx_irq = INT_24XX_MCBSP5_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, + }, + }; + #define OMAP2430_MCBSP_PDATA_SZ ARRAY_SIZE(omap2430_mcbsp_pdata) +@@ -240,25 +143,33 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP1_IRQ_RX, + .tx_irq = INT_24XX_MCBSP1_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, ++ .buffer_size = 0x6F, + }, + { + .phys_base = OMAP34XX_MCBSP2_BASE, ++ .phys_base_st = OMAP34XX_MCBSP2_ST_BASE, + .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, + .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, + .rx_irq = INT_24XX_MCBSP2_IRQ_RX, + .tx_irq = INT_24XX_MCBSP2_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, ++ .buffer_size = 0x3FF, + }, + { + .phys_base = OMAP34XX_MCBSP3_BASE, ++ .phys_base_st = OMAP34XX_MCBSP3_ST_BASE, + .dma_rx_sync = OMAP24XX_DMA_MCBSP3_RX, + .dma_tx_sync = OMAP24XX_DMA_MCBSP3_TX, + .rx_irq = INT_24XX_MCBSP3_IRQ_RX, + .tx_irq = INT_24XX_MCBSP3_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, ++ .buffer_size = 0x6F, + }, + { + .phys_base = OMAP34XX_MCBSP4_BASE, +@@ -267,7 +178,9 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP4_IRQ_RX, + .tx_irq = INT_24XX_MCBSP4_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, ++ .buffer_size = 0x6F, + }, + { + .phys_base = OMAP34XX_MCBSP5_BASE, +@@ -276,7 +189,9 @@ static struct omap_mcbsp_platform_data o + .rx_irq = INT_24XX_MCBSP5_IRQ_RX, + .tx_irq = INT_24XX_MCBSP5_IRQ_TX, + .ops = &omap2_mcbsp_ops, +- .clk_name = "mcbsp_clk", ++ .clk_names = clk_names, ++ .num_clks = 2, ++ .buffer_size = 0x6F, + }, + }; + #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) +@@ -287,14 +202,6 @@ static struct omap_mcbsp_platform_data o + + static int __init omap2_mcbsp_init(void) + { +- int i; +- +- for (i = 0; i < omap_mcbsp_clks_size; i++) { +- /* Once we call clk_get inside init, we do not register it */ +- omap_mcbsp_clk_init(&omap_mcbsp_clks[i]); +- clk_register(&omap_mcbsp_clks[i].clk); +- } +- + if (cpu_is_omap2420()) + omap_mcbsp_count = OMAP2420_MCBSP_PDATA_SZ; + if (cpu_is_omap2430()) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/memory.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/memory.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/memory.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/memory.c 2008-12-25 00:26:37.000000000 +0100 +@@ -0,0 +1,191 @@ ++/* ++ * linux/arch/arm/mach-omap2/memory.c ++ * ++ * Memory timing related functions for OMAP24XX ++ * ++ * Copyright (C) 2005 Texas Instruments Inc. ++ * Richard Woodruff ++ * ++ * Copyright (C) 2005 Nokia Corporation ++ * Tony Lindgren ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "prm.h" ++ ++#include "memory.h" ++#include "sdrc.h" ++ ++void __iomem *omap2_sdrc_base; ++void __iomem *omap2_sms_base; ++ ++static struct memory_timings mem_timings; ++static u32 curr_perf_level = CORE_CLK_SRC_DPLL_X2; ++ ++u32 omap2_memory_get_slow_dll_ctrl(void) ++{ ++ return mem_timings.slow_dll_ctrl; ++} ++ ++u32 omap2_memory_get_fast_dll_ctrl(void) ++{ ++ return mem_timings.fast_dll_ctrl; ++} ++ ++u32 omap2_memory_get_type(void) ++{ ++ return mem_timings.m_type; ++} ++ ++/* ++ * Check the DLL lock state, and return tue if running in unlock mode. ++ * This is needed to compensate for the shifted DLL value in unlock mode. ++ */ ++u32 omap2_dll_force_needed(void) ++{ ++ /* dlla and dllb are a set */ ++ u32 dll_state = sdrc_read_reg(SDRC_DLLA_CTRL); ++ ++ if ((dll_state & (1 << 2)) == (1 << 2)) ++ return 1; ++ else ++ return 0; ++} ++ ++/* ++ * 'level' is the value to store to CM_CLKSEL2_PLL.CORE_CLK_SRC. ++ * Practical values are CORE_CLK_SRC_DPLL (for CORE_CLK = DPLL_CLK) or ++ * CORE_CLK_SRC_DPLL_X2 (for CORE_CLK = * DPLL_CLK * 2) ++ */ ++u32 omap2_reprogram_sdrc(u32 level, u32 force) ++{ ++ u32 dll_ctrl, m_type; ++ u32 prev = curr_perf_level; ++ unsigned long flags; ++ ++ if ((curr_perf_level == level) && !force) ++ return prev; ++ ++ if (level == CORE_CLK_SRC_DPLL) { ++ dll_ctrl = omap2_memory_get_slow_dll_ctrl(); ++ } else if (level == CORE_CLK_SRC_DPLL_X2) { ++ dll_ctrl = omap2_memory_get_fast_dll_ctrl(); ++ } else { ++ return prev; ++ } ++ ++ m_type = omap2_memory_get_type(); ++ ++ local_irq_save(flags); ++ __raw_writel(0xffff, OMAP24XX_PRCM_VOLTSETUP); ++ omap2_sram_reprogram_sdrc(level, dll_ctrl, m_type); ++ curr_perf_level = level; ++ local_irq_restore(flags); ++ ++ return prev; ++} ++ ++#if !defined(CONFIG_ARCH_OMAP2) ++void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl, ++ u32 base_cs, u32 force_unlock) ++{ ++} ++void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, ++ u32 mem_type) ++{ ++} ++#endif ++ ++void omap2_init_memory_params(u32 force_lock_to_unlock_mode) ++{ ++ unsigned long dll_cnt; ++ u32 fast_dll = 0; ++ ++ mem_timings.m_type = !((sdrc_read_reg(SDRC_MR_0) & 0x3) == 0x1); /* DDR = 1, SDR = 0 */ ++ ++ /* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others. ++ * In the case of 2422, its ok to use CS1 instead of CS0. ++ */ ++ if (cpu_is_omap2422()) ++ mem_timings.base_cs = 1; ++ else ++ mem_timings.base_cs = 0; ++ ++ if (mem_timings.m_type != M_DDR) ++ return; ++ ++ /* With DDR we need to determine the low frequency DLL value */ ++ if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL)) ++ mem_timings.dll_mode = M_UNLOCK; ++ else ++ mem_timings.dll_mode = M_LOCK; ++ ++ if (mem_timings.base_cs == 0) { ++ fast_dll = sdrc_read_reg(SDRC_DLLA_CTRL); ++ dll_cnt = sdrc_read_reg(SDRC_DLLA_STATUS) & 0xff00; ++ } else { ++ fast_dll = sdrc_read_reg(SDRC_DLLB_CTRL); ++ dll_cnt = sdrc_read_reg(SDRC_DLLB_STATUS) & 0xff00; ++ } ++ if (force_lock_to_unlock_mode) { ++ fast_dll &= ~0xff00; ++ fast_dll |= dll_cnt; /* Current lock mode */ ++ } ++ /* set fast timings with DLL filter disabled */ ++ mem_timings.fast_dll_ctrl = (fast_dll | (3 << 8)); ++ ++ /* No disruptions, DDR will be offline & C-ABI not followed */ ++ omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl, ++ mem_timings.fast_dll_ctrl, ++ mem_timings.base_cs, ++ force_lock_to_unlock_mode); ++ mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */ ++ ++ /* Turn status into unlock ctrl */ ++ mem_timings.slow_dll_ctrl |= ++ ((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2)); ++ ++ /* 90 degree phase for anything below 133Mhz + disable DLL filter */ ++ mem_timings.slow_dll_ctrl |= ((1 << 1) | (3 << 8)); ++} ++ ++void __init omap2_set_globals_memory(struct omap_globals *omap2_globals) ++{ ++ omap2_sdrc_base = omap2_globals->sdrc; ++ omap2_sms_base = omap2_globals->sms; ++} ++ ++/* turn on smart idle modes for SDRAM scheduler and controller */ ++void __init omap2_init_memory(void) ++{ ++ u32 l; ++ ++ if (!cpu_is_omap2420()) ++ return; ++ ++ l = sms_read_reg(SMS_SYSCONFIG); ++ l &= ~(0x3 << 3); ++ l |= (0x2 << 3); ++ sms_write_reg(l, SMS_SYSCONFIG); ++ ++ l = sdrc_read_reg(SDRC_SYSCONFIG); ++ l &= ~(0x3 << 3); ++ l |= (0x2 << 3); ++ sdrc_write_reg(l, SDRC_SYSCONFIG); ++} +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/memory.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/memory.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/memory.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/memory.h 2008-12-25 00:26:37.000000000 +0100 +@@ -0,0 +1,43 @@ ++/* ++ * linux/arch/arm/mach-omap2/memory.h ++ * ++ * Interface for memory timing related functions for OMAP24XX ++ * ++ * Copyright (C) 2005 Texas Instruments Inc. ++ * Richard Woodruff ++ * ++ * Copyright (C) 2005 Nokia Corporation ++ * Tony Lindgren ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef ARCH_ARM_MACH_OMAP2_MEMORY_H ++#define ARCH_ARM_MACH_OMAP2_MEMORY_H ++ ++/* Memory timings */ ++#define M_DDR 1 ++#define M_LOCK_CTRL (1 << 2) ++#define M_UNLOCK 0 ++#define M_LOCK 1 ++ ++struct memory_timings { ++ u32 m_type; /* ddr = 1, sdr = 0 */ ++ u32 dll_mode; /* use lock mode = 1, unlock mode = 0 */ ++ u32 slow_dll_ctrl; /* unlock mode, dll value for slow speed */ ++ u32 fast_dll_ctrl; /* unlock mode, dll value for fast speed */ ++ u32 base_cs; /* base chip select to use for calculations */ ++}; ++ ++extern void omap2_init_memory_params(u32 force_lock_to_unlock_mode); ++extern u32 omap2_memory_get_slow_dll_ctrl(void); ++extern u32 omap2_memory_get_fast_dll_ctrl(void); ++extern u32 omap2_memory_get_type(void); ++u32 omap2_dll_force_needed(void); ++u32 omap2_reprogram_sdrc(u32 level, u32 force); ++void __init omap2_init_memory(void); ++void __init gpmc_init(void); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mmc-twl4030.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mmc-twl4030.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mmc-twl4030.c 2011-09-04 11:32:09.993211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mmc-twl4030.c 2011-09-04 11:31:05.000000000 +0200 +@@ -16,39 +16,119 @@ + #include + #include + #include ++#include + #include + + #include + #include + #include + #include ++#include + + #include "mmc-twl4030.h" + ++#define MMCHS1 (L4_34XX_BASE + 0x9C000) ++#define MMCHS2 (L4_34XX_BASE + 0xB4000) ++#define MMCHS3 (L4_34XX_BASE + 0xAD000) ++#define MAX_MMC 3 ++#define MMCHS_SYSCONFIG 0x0010 ++#define MMCHS_SYSCONFIG_SWRESET (1 << 1) ++#define MMCHS_SYSSTATUS 0x0014 ++#define MMCHS_SYSSTATUS_RESETDONE (1 << 0) ++ ++static struct platform_device dummy_pdev = { ++ .dev = { ++ .bus = &platform_bus_type, ++ }, ++}; ++ ++/** ++ * hsmmc_reset() - Full reset of each HS-MMC controller ++ * ++ * Ensure that each MMC controller is fully reset. Controllers ++ * left in an unknown state (by bootloaer) may prevent retention ++ * or OFF-mode. This is especially important in cases where the ++ * MMC driver is not enabled, _or_ built as a module. ++ * ++ * In order for reset to work, interface, functional and debounce ++ * clocks must be enabled. The debounce clock comes from func_32k_clk ++ * and is not under SW control, so we only enable i- and f-clocks. ++ **/ ++static void __init hsmmc_reset(void) ++{ ++ u32 i, base[MAX_MMC] = {MMCHS1, MMCHS2, MMCHS3}; ++ ++ for (i = 0; i < MAX_MMC; i++) { ++ u32 v; ++ struct clk *iclk, *fclk; ++ struct device *dev = &dummy_pdev.dev; ++ ++ dummy_pdev.id = i; ++ iclk = clk_get(dev, "mmchs_ick"); ++ if (iclk && clk_enable(iclk)) ++ iclk = NULL; ++ ++ fclk = clk_get(dev, "mmchs_fck"); ++ if (fclk && clk_enable(fclk)) ++ fclk = NULL; ++ ++ if (!iclk || !fclk) { ++ printk(KERN_WARNING ++ "%s: Unable to enable clocks for MMC%d, " ++ "cannot reset.\n", __func__, i); ++ break; ++ } ++ ++ omap_writel(MMCHS_SYSCONFIG_SWRESET, base[i] + MMCHS_SYSCONFIG); ++ v = omap_readl(base[i] + MMCHS_SYSSTATUS); ++ while (!(omap_readl(base[i] + MMCHS_SYSSTATUS) & ++ MMCHS_SYSSTATUS_RESETDONE)) ++ cpu_relax(); ++ ++ if (fclk) { ++ clk_disable(fclk); ++ clk_put(fclk); ++ } ++ if (iclk) { ++ clk_disable(iclk); ++ clk_put(iclk); ++ } ++ } ++} ++ + #if defined(CONFIG_TWL4030_CORE) && \ + (defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)) + + #define LDO_CLR 0x00 + #define VSEL_S2_CLR 0x40 ++#define VMMC_DEV_GRP_P1 0x20 ++#define DEDICATED_OFFSET 3 ++#define VMMC_DEV_GRP(c) (c->twl_vmmc_dev_grp) + ++#define VAUX3_DEV_GRP 0x1F + #define VMMC1_DEV_GRP 0x27 +-#define VMMC1_CLR 0x00 ++#define VMMC2_DEV_GRP 0x2B ++#define VSIM_DEV_GRP 0x37 ++ + #define VMMC1_315V 0x03 + #define VMMC1_300V 0x02 + #define VMMC1_285V 0x01 + #define VMMC1_185V 0x00 +-#define VMMC1_DEDICATED 0x2A + +-#define VMMC2_DEV_GRP 0x2B +-#define VMMC2_CLR 0x40 + #define VMMC2_315V 0x0c + #define VMMC2_300V 0x0b + #define VMMC2_285V 0x0a ++#define VMMC2_280V 0x09 + #define VMMC2_260V 0x08 + #define VMMC2_185V 0x06 +-#define VMMC2_DEDICATED 0x2E + +-#define VMMC_DEV_GRP_P1 0x20 ++#define VAUX3_300V 0x04 ++#define VAUX3_280V 0x03 ++#define VAUX3_250V 0x02 ++#define VAUX3_180V 0x01 ++#define VAUX3_150V 0x00 ++ ++#define VSIM_18V 0x03 + + static u16 control_pbias_offset; + static u16 control_devconf1_offset; +@@ -58,16 +138,14 @@ static u16 control_devconf1_offset; + static struct twl_mmc_controller { + struct omap_mmc_platform_data *mmc; + u8 twl_vmmc_dev_grp; +- u8 twl_mmc_dedicated; +- char name[HSMMC_NAME_LEN]; ++ bool vsim_18v; ++ char name[HSMMC_NAME_LEN + 1]; + } hsmmc[] = { + { + .twl_vmmc_dev_grp = VMMC1_DEV_GRP, +- .twl_mmc_dedicated = VMMC1_DEDICATED, + }, + { + .twl_vmmc_dev_grp = VMMC2_DEV_GRP, +- .twl_mmc_dedicated = VMMC2_DEDICATED, + }, + }; + +@@ -98,6 +176,14 @@ static int twl_mmc_get_ro(struct device + return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); + } + ++static int twl_mmc_get_cover_state(struct device *dev, int slot) ++{ ++ struct omap_mmc_platform_data *mmc = dev->platform_data; ++ ++ /* NOTE: assumes card detect signal is active-low */ ++ return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); ++} ++ + /* + * MMC Slot Initialization. + */ +@@ -141,7 +227,6 @@ static void twl_mmc_cleanup(struct devic + } + + #ifdef CONFIG_PM +- + static int twl_mmc_suspend(struct device *dev, int slot) + { + struct omap_mmc_platform_data *mmc = dev->platform_data; +@@ -163,73 +248,274 @@ static int twl_mmc_resume(struct device + #define twl_mmc_resume NULL + #endif + ++#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) ++ ++static int twl4030_mmc_get_context_loss(struct device *dev) ++{ ++ return omap_pm_get_dev_context_loss_count(dev); ++} ++ ++#else ++#define twl4030_mmc_get_context_loss NULL ++#endif ++ + /* + * Sets the MMC voltage in twl4030 + */ +-static int twl_mmc_set_voltage(struct twl_mmc_controller *c, int vdd) ++ ++#define MMC1_OCR (MMC_VDD_165_195 \ ++ |MMC_VDD_28_29|MMC_VDD_29_30|MMC_VDD_30_31|MMC_VDD_31_32) ++#define MMC2_OCR (MMC_VDD_165_195 \ ++ |MMC_VDD_25_26|MMC_VDD_26_27|MMC_VDD_27_28 \ ++ |MMC_VDD_28_29|MMC_VDD_29_30|MMC_VDD_30_31|MMC_VDD_31_32) ++ ++#define VMMC1_ID 5 ++#define VMMC2_ID 6 ++#define VAUX3_ID 3 ++#define VSIM_ID 9 ++#define BAD_ID 255 ++ ++static int twl_mmc_i2c_wait(void) ++{ ++ int ret, timeout = 100; ++ u8 status; ++ ++ do { ++ ret = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, ++ &status, 0x14); ++ if (ret) ++ return ret; ++ ++ if (!(status & 1)) ++ return 0; ++ ++ msleep(10); ++ ++ } while (--timeout > 0); ++ ++ return -1; ++} ++ ++static int twl_mmc_send_pb_msg(u16 msg) + { + int ret; +- u8 vmmc, dev_grp_val; ++ u8 pwb_state; + +- switch (1 << vdd) { +- case MMC_VDD_35_36: +- case MMC_VDD_34_35: +- case MMC_VDD_33_34: +- case MMC_VDD_32_33: +- case MMC_VDD_31_32: +- case MMC_VDD_30_31: +- if (c->twl_vmmc_dev_grp == VMMC1_DEV_GRP) +- vmmc = VMMC1_315V; +- else +- vmmc = VMMC2_315V; +- break; +- case MMC_VDD_29_30: +- if (c->twl_vmmc_dev_grp == VMMC1_DEV_GRP) +- vmmc = VMMC1_315V; +- else +- vmmc = VMMC2_300V; +- break; +- case MMC_VDD_27_28: +- case MMC_VDD_26_27: +- if (c->twl_vmmc_dev_grp == VMMC1_DEV_GRP) +- vmmc = VMMC1_285V; +- else +- vmmc = VMMC2_285V; +- break; +- case MMC_VDD_25_26: +- case MMC_VDD_24_25: +- case MMC_VDD_23_24: +- case MMC_VDD_22_23: +- case MMC_VDD_21_22: +- case MMC_VDD_20_21: +- if (c->twl_vmmc_dev_grp == VMMC1_DEV_GRP) +- vmmc = VMMC1_285V; +- else +- vmmc = VMMC2_260V; +- break; +- case MMC_VDD_165_195: +- if (c->twl_vmmc_dev_grp == VMMC1_DEV_GRP) +- vmmc = VMMC1_185V; +- else +- vmmc = VMMC2_185V; +- break; ++ ret = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, ++ &pwb_state, 0x14); ++ if (ret) ++ return ret; ++ ++ ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ++ pwb_state | (1 << 1), 0x14); ++ if (ret) ++ return ret; ++ ++ ret = twl_mmc_i2c_wait(); ++ if (ret) ++ goto out; ++ ++ ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ++ msg >> 8, 0x15); ++ if (ret) ++ goto out; ++ ++ ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ++ msg & 0xff, 0x16); ++ if (ret) ++ goto out; ++ ++ ret = twl_mmc_i2c_wait(); ++ if (ret) ++ goto out; ++ ++out: ++ /* restore the previous state of TWL4030 */ ++ twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ++ pwb_state, 0x14); ++ ++ return ret; ++} ++ ++static u8 dev_grp_to_id(u8 vmmc_dev_grp) ++{ ++ switch (vmmc_dev_grp) { ++ case VMMC1_DEV_GRP: ++ return VMMC1_ID; ++ case VMMC2_DEV_GRP: ++ return VMMC2_ID; ++ case VAUX3_DEV_GRP: ++ return VAUX3_ID; ++ case VSIM_DEV_GRP: ++ return VSIM_ID; + default: +- vmmc = 0; +- break; ++ return BAD_ID; + } ++} ++ ++static int twl_mmc_regulator_set_mode(u8 vmmc_dev_grp, int sleep) ++{ ++ u8 reg_id = dev_grp_to_id(vmmc_dev_grp); ++ u16 msg; ++ ++ if (reg_id == BAD_ID) ++ return -EINVAL; + +- if (vmmc) +- dev_grp_val = VMMC_DEV_GRP_P1; /* Power up */ ++ if (sleep) ++ msg = MSG_SINGULAR(DEV_GRP_P1, reg_id, RES_STATE_SLEEP); + else +- dev_grp_val = LDO_CLR; /* Power down */ ++ msg = MSG_SINGULAR(DEV_GRP_P1, reg_id, RES_STATE_ACTIVE); ++ ++ return twl_mmc_send_pb_msg(msg); ++} + ++static int twl_mmc_enable_regulator(u8 vmmc_dev_grp) ++{ ++ int ret; ++ u16 msg; ++ u8 reg_id = dev_grp_to_id(vmmc_dev_grp); ++ ++ if (reg_id == BAD_ID) { ++ printk(KERN_ERR "twl_mmc_enable_regulator: unknown dev grp\n"); ++ return -1; ++ } ++ ++ /* add regulator to dev grp P1 */ + ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, +- dev_grp_val, c->twl_vmmc_dev_grp); ++ VMMC_DEV_GRP_P1, vmmc_dev_grp); ++ if (ret) ++ return ret; ++ ++ /* construct message to enable regulator on P1 */ ++ msg = (1 << 13) | (reg_id << 4) | 0xe; ++ ++ return twl_mmc_send_pb_msg(msg); ++} ++ ++static int twl_mmc_set_regulator(u8 vmmc_dev_grp, u8 vmmc) ++{ ++ int ret; ++ ++ ret = twl_mmc_enable_regulator(vmmc_dev_grp); + if (ret) + return ret; + + ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, +- vmmc, c->twl_mmc_dedicated); ++ vmmc, vmmc_dev_grp + DEDICATED_OFFSET); ++ ++ return ret; ++} ++ ++static int twl_mmc_shutdown_regulator(u8 vmmc_dev_grp) ++{ ++ return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, ++ LDO_CLR, vmmc_dev_grp); ++} ++ ++static int twl_mmc_set_voltage(struct twl_mmc_controller *c, int vdd) ++{ ++ int ret; ++ u8 vmmc; ++ ++ if (c->twl_vmmc_dev_grp == VMMC1_DEV_GRP) { ++ /* VMMC1: max 220 mA. And for 8-bit mode, ++ * VSIM: max 50 mA ++ */ ++ switch (1 << vdd) { ++ case MMC_VDD_165_195: ++ vmmc = VMMC1_185V; ++ /* and VSIM_180V */ ++ break; ++ case MMC_VDD_28_29: ++ vmmc = VMMC1_285V; ++ /* and VSIM_280V */ ++ break; ++ case MMC_VDD_29_30: ++ case MMC_VDD_30_31: ++ vmmc = VMMC1_300V; ++ /* and VSIM_300V */ ++ break; ++ case MMC_VDD_31_32: ++ vmmc = VMMC1_315V; ++ /* error if VSIM needed */ ++ break; ++ default: ++ vmmc = 0; ++ break; ++ } ++ } else if (c->twl_vmmc_dev_grp == VAUX3_DEV_GRP) { ++ /* VAUX3: max 200 mA */ ++ switch (1 << vdd) { ++ case MMC_VDD_165_195: ++ vmmc = VAUX3_180V; ++ break; ++ case MMC_VDD_25_26: ++ case MMC_VDD_26_27: ++ vmmc = VAUX3_250V; ++ break; ++ case MMC_VDD_27_28: ++ vmmc = VAUX3_280V; ++ break; ++ case MMC_VDD_28_29: ++ vmmc = VAUX3_280V; ++ break; ++ case MMC_VDD_29_30: ++ case MMC_VDD_30_31: ++ vmmc = VAUX3_300V; ++ break; ++ case MMC_VDD_31_32: ++ vmmc = VAUX3_300V; ++ break; ++ default: ++ vmmc = 0; ++ break; ++ } ++ } else if (c->twl_vmmc_dev_grp == VMMC2_DEV_GRP) { ++ /* VMMC2: max 100 mA */ ++ switch (1 << vdd) { ++ case MMC_VDD_165_195: ++ vmmc = VMMC2_185V; ++ break; ++ case MMC_VDD_25_26: ++ case MMC_VDD_26_27: ++ vmmc = VMMC2_260V; ++ break; ++ case MMC_VDD_27_28: ++ vmmc = VMMC2_280V; ++ break; ++ case MMC_VDD_28_29: ++ vmmc = VMMC2_285V; ++ break; ++ case MMC_VDD_29_30: ++ case MMC_VDD_30_31: ++ vmmc = VMMC2_300V; ++ break; ++ case MMC_VDD_31_32: ++ vmmc = VMMC2_315V; ++ break; ++ default: ++ vmmc = 0; ++ break; ++ } ++ } else { ++ return 0; ++ } ++ ++ if (vmmc) { ++ ret = twl_mmc_set_regulator(VMMC_DEV_GRP(c), vmmc); ++ if (ret) ++ return ret; ++ ++ if (c->vsim_18v) ++ ret = twl_mmc_set_regulator(VSIM_DEV_GRP, VSIM_18V); ++ } else { ++ ret = twl_mmc_shutdown_regulator(VMMC_DEV_GRP(c)); ++ if (ret) ++ return ret; ++ ++ if (c->vsim_18v) ++ ret = twl_mmc_shutdown_regulator(VSIM_DEV_GRP); ++ } + + return ret; + } +@@ -242,6 +528,14 @@ static int twl_mmc1_set_power(struct dev + struct twl_mmc_controller *c = &hsmmc[0]; + struct omap_mmc_platform_data *mmc = dev->platform_data; + ++ /* ++ * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the ++ * card using the same TWL VMMC1 supply (hsmmc[0]); OMAP has both ++ * 1.8V and 3.0V modes, controlled by the PBIAS register. ++ * ++ * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which ++ * is most naturally TWL VSIM; those pins also use PBIAS. ++ */ + if (power_on) { + if (cpu_is_omap2430()) { + reg = omap_ctrl_readl(OMAP243X_CONTROL_DEVCONF1); +@@ -298,6 +592,12 @@ static int twl_mmc2_set_power(struct dev + struct twl_mmc_controller *c = &hsmmc[1]; + struct omap_mmc_platform_data *mmc = dev->platform_data; + ++ /* ++ * Assume TWL VMMC2 (hsmmc[1]) is used only to power the card ... OMAP ++ * VDDS is used to power the pins, optionally with a transceiver to ++ * support cards using voltages other than VDDS (1.8V nominal). When a ++ * transceiver is used, DAT3..7 are muxed as transceiver control pins. ++ */ + if (power_on) { + if (mmc->slots[0].internal_clock) { + u32 reg; +@@ -314,6 +614,72 @@ static int twl_mmc2_set_power(struct dev + return ret; + } + ++static int twl_mmc1_set_sleep(struct device *dev, int slot, int sleep, int vdd, ++ int cardsleep) ++{ ++ struct twl_mmc_controller *c = &hsmmc[0]; ++ int err; ++ ++ if (!c->vsim_18v) ++ return twl_mmc_regulator_set_mode(c->twl_vmmc_dev_grp, sleep); ++ ++ if (cardsleep) { ++ /* VCC can be turned off if card is asleep */ ++ c->vsim_18v = 0; ++ if (sleep) ++ err = twl_mmc1_set_power(dev, slot, 0, 0); ++ else ++ err = twl_mmc1_set_power(dev, slot, 1, vdd); ++ c->vsim_18v = 1; ++ } else ++ err = twl_mmc_regulator_set_mode(c->twl_vmmc_dev_grp, sleep); ++ if (err) ++ return err; ++ return twl_mmc_regulator_set_mode(VSIM_DEV_GRP, sleep); ++} ++ ++static int twl_mmc2_set_sleep(struct device *dev, int slot, int sleep, int vdd, ++ int cardsleep) ++{ ++ struct twl_mmc_controller *c = &hsmmc[1]; ++ ++ int err; ++ ++ if (!c->vsim_18v) ++ return twl_mmc_regulator_set_mode(c->twl_vmmc_dev_grp, sleep); ++ ++ if (cardsleep) { ++ /* VCC can be turned off if card is asleep */ ++ c->vsim_18v = 0; ++ if (sleep) ++ err = twl_mmc2_set_power(dev, slot, 0, 0); ++ else ++ err = twl_mmc2_set_power(dev, slot, 1, vdd); ++ c->vsim_18v = 1; ++ } else ++ err = twl_mmc_regulator_set_mode(c->twl_vmmc_dev_grp, sleep); ++ if (err) ++ return err; ++ return twl_mmc_regulator_set_mode(VSIM_DEV_GRP, sleep); ++} ++ ++#if defined(CONFIG_BRIDGE_DVFS) ++/* ++ * This handler can be used for setting other DVFS/PM constraints: ++ * intr latency, wakeup latency, DMA start latency, bus throughput ++ * according to API in mach/omap-pm.h ++ * Currently we only set constraints for MPU frequency which forces ++ * VDD1 to stay at OPP3. ++ */ ++#define MMC_MIN_MPU_FREQUENCY 500000000 /* S500M at OPP3 */ ++static void mmc_set_pm_constraints(struct device *dev, int on) ++{ ++ omap_pm_set_min_mpu_freq(dev, (on ? MMC_MIN_MPU_FREQUENCY : 0)); ++} ++#else ++#define mmc_set_pm_constraints NULL ++#endif ++ + static struct omap_mmc_platform_data *hsmmc_data[OMAP34XX_NR_MMC] __initdata; + + void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) +@@ -321,6 +687,7 @@ void __init twl4030_mmc_init(struct twl4 + struct twl4030_hsmmc_info *c; + int nr_hsmmc = ARRAY_SIZE(hsmmc_data); + ++ hsmmc_reset(); + if (cpu_is_omap2430()) { + control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE; + control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1; +@@ -349,13 +716,12 @@ void __init twl4030_mmc_init(struct twl4 + return; + } + +- sprintf(twl->name, "mmc%islot%i", c->mmc, 1); ++ if (c->name) ++ strncpy(twl->name, c->name, HSMMC_NAME_LEN); ++ else ++ sprintf(twl->name, "mmc%islot%i", c->mmc, 1); + mmc->slots[0].name = twl->name; + mmc->nr_slots = 1; +- mmc->slots[0].ocr_mask = MMC_VDD_165_195 | +- MMC_VDD_26_27 | MMC_VDD_27_28 | +- MMC_VDD_29_30 | +- MMC_VDD_30_31 | MMC_VDD_31_32; + mmc->slots[0].wires = c->wires; + mmc->slots[0].internal_clock = !c->ext_clock; + mmc->dma_mask = 0xffffffff; +@@ -369,10 +735,18 @@ void __init twl4030_mmc_init(struct twl4 + + mmc->slots[0].switch_pin = c->gpio_cd; + mmc->slots[0].card_detect_irq = gpio_to_irq(c->gpio_cd); +- mmc->slots[0].card_detect = twl_mmc_card_detect; ++ if (c->cover_only) ++ mmc->slots[0].get_cover_state = twl_mmc_get_cover_state; ++ else ++ mmc->slots[0].card_detect = twl_mmc_card_detect; + } else + mmc->slots[0].switch_pin = -EINVAL; + ++ mmc->get_context_loss_count = ++ twl4030_mmc_get_context_loss; ++ ++ mmc->set_pm_constraints = mmc_set_pm_constraints; ++ + /* write protect normally uses an OMAP gpio */ + if (gpio_is_valid(c->gpio_wp)) { + gpio_request(c->gpio_wp, "mmc_wp"); +@@ -383,6 +757,11 @@ void __init twl4030_mmc_init(struct twl4 + } else + mmc->slots[0].gpio_wp = -EINVAL; + ++ if (c->power_saving) ++ mmc->slots[0].power_saving = 1; ++ ++ mmc->slots[0].caps = c->caps; ++ + /* NOTE: we assume OMAP's MMC1 and MMC2 use + * the TWL4030's VMMC1 and VMMC2, respectively; + * and that OMAP's MMC3 isn't used. +@@ -391,9 +770,23 @@ void __init twl4030_mmc_init(struct twl4 + switch (c->mmc) { + case 1: + mmc->slots[0].set_power = twl_mmc1_set_power; ++ mmc->slots[0].set_sleep = twl_mmc1_set_sleep; ++ mmc->slots[0].ocr_mask = MMC1_OCR; + break; + case 2: + mmc->slots[0].set_power = twl_mmc2_set_power; ++ mmc->slots[0].set_sleep = twl_mmc2_set_sleep; ++ if (c->vmmc_dev_grp) ++ twl->twl_vmmc_dev_grp = c->vmmc_dev_grp; ++ if (c->transceiver) ++ mmc->slots[0].ocr_mask = MMC2_OCR; ++ else if (c->vsim_18v) { ++ mmc->slots[0].ocr_mask = MMC_VDD_27_28 | ++ MMC_VDD_28_29 | MMC_VDD_29_30 | ++ MMC_VDD_30_31 | MMC_VDD_31_32; ++ twl->vsim_18v = true; ++ } else ++ mmc->slots[0].ocr_mask = MMC_VDD_165_195; + break; + default: + pr_err("MMC%d configuration not supported!\n", c->mmc); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mmc-twl4030.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mmc-twl4030.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mmc-twl4030.h 2011-09-04 11:32:09.993211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mmc-twl4030.h 2011-09-04 11:31:05.000000000 +0200 +@@ -9,9 +9,16 @@ + struct twl4030_hsmmc_info { + u8 mmc; /* controller 1/2/3 */ + u8 wires; /* 1/4/8 wires */ ++ bool transceiver; /* MMC-2 option */ ++ bool ext_clock; /* use external pin for input clock */ ++ u8 vmmc_dev_grp; /* override default regulator */ ++ bool vsim_18v; /* MMC-2 option */ ++ bool cover_only; /* No card detect - just cover switch */ ++ bool power_saving; /* Try to sleep or power off when possible */ ++ unsigned long caps; /* MMC host capabilities */ + int gpio_cd; /* or -EINVAL */ + int gpio_wp; /* or -EINVAL */ +- int ext_clock:1; /* use external pin for input clock */ ++ char *name; /* or NULL for default */ + }; + + #if defined(CONFIG_TWL4030_CORE) && \ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mux.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mux.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/mux.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/mux.c 2011-09-04 11:31:05.000000000 +0200 +@@ -472,6 +472,11 @@ MUX_CFG_34XX("AF5_34XX_GPIO142", 0x170, + MUX_CFG_34XX("AE5_34XX_GPIO143", 0x172, + OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT) + ++/* OMAP3 SDRC CKE signals to SDR/DDR ram chips */ ++MUX_CFG_34XX("H16_34XX_SDRC_CKE0", 0x262, ++ OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT) ++MUX_CFG_34XX("H17_34XX_SDRC_CKE1", 0x264, ++ OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT) + }; + + #define OMAP34XX_PINS_SZ ARRAY_SIZE(omap34xx_pins) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,177 @@ ++/* ++ * omapdev device registration and handling code ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Copyright (C) 2007-2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++#undef DEBUG ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++/* odev_list contains all registered struct omapdevs */ ++static LIST_HEAD(odev_list); ++ ++ ++/* Private functions */ ++ ++/* ++ * _omapdev_lookup - look up an OMAP module pointer by its name ++ * @name: name of the OMAP module to look up ++ * ++ * Finds a registered OMAP module by its name, returning a pointer to ++ * it. Returns a pointer to the struct omapdev if found, or NULL ++ * otherwise. ++ */ ++static struct omapdev *_omapdev_lookup(const char *name) ++{ ++ struct omapdev *odev, *temp_odev; ++ ++ if (!name) ++ return NULL; ++ ++ odev = NULL; ++ ++ list_for_each_entry(temp_odev, &odev_list, node) { ++ if (!strcmp(name, temp_odev->name)) { ++ odev = temp_odev; ++ break; ++ } ++ } ++ ++ return odev; ++} ++ ++/* ++ * _omapdev_register - register an OMAP module ++ * @odev: struct omapdev * to register ++ * ++ * Adds a OMAP module to the internal OMAP module list. Returns ++ * -EINVAL if given a null pointer, -EEXIST if a OMAP module is ++ * already registered by the provided name, or 0 upon success. ++ */ ++static int _omapdev_register(struct omapdev *odev) ++{ ++ struct powerdomain *pwrdm; ++ ++ if (!odev) ++ return -EINVAL; ++ ++ if (!omap_chip_is(odev->omap_chip)) ++ return -EINVAL; ++ ++ if (_omapdev_lookup(odev->name)) ++ return -EEXIST; ++ ++ pwrdm = pwrdm_lookup(odev->pwrdm.name); ++ if (!pwrdm) { ++ pr_debug("omapdev: cannot register %s: bad powerdomain\n", ++ odev->name); ++ return -EINVAL; ++ } ++ odev->pwrdm.ptr = pwrdm; ++ ++ list_add(&odev->node, &odev_list); ++ ++ pr_debug("omapdev: registered %s\n", odev->name); ++ ++ return 0; ++} ++ ++ ++ ++/* Public functions */ ++ ++ ++/** ++ * omapdev_init - set up the OMAP module layer ++ * @odevs: ptr to a array of omapdev ptrs to register ++ * ++ * Loop through the list of OMAP modules, registering them all. No ++ * return value. ++ */ ++void omapdev_init(struct omapdev **odevs) ++{ ++ struct omapdev **d = NULL; ++ ++ if (!list_empty(&odev_list)) { ++ pr_debug("omapdev: init already called\n"); ++ return; ++ } ++ ++ for (d = odevs; *d; d++) { ++ int v; ++ ++ if (!omap_chip_is((*d)->omap_chip)) ++ continue; ++ ++ v = _omapdev_register(*d); ++ if (ERR_PTR(v)) ++ pr_err("omapdev: could not register %s\n", ++ (*d)->name); ++ } ++} ++ ++ ++/** ++ * omapdev_get_pwrdm - return pwrdm pointer associated with the device ++ * @omapdev: omapdev * ++ * ++ */ ++struct powerdomain *omapdev_get_pwrdm(struct omapdev *odev) ++{ ++ if (!odev) ++ return NULL; ++ ++ return odev->pwrdm.ptr; ++} ++ ++ ++/** ++ * omapdev_find_pdev - look up an OMAP module by platform_device ++ * @pdev_name: platform_device name to find ++ * @pdev_id: platform_device id to find ++ * ++ * Finds a registered OMAP module by the platform_device name and ID ++ * that is associated with it in the omapdev structure. If multiple ++ * records exist, simply returns the 'first' record that it finds - ++ * this is probably not optimal behavior, but should work for current ++ * purposes. Returns a pointer to the struct omapdev if found, or ++ * NULL otherwise. ++ */ ++struct omapdev *omapdev_find_pdev(struct platform_device *pdev) ++{ ++ struct omapdev *odev, *temp_odev; ++ ++ if (!pdev) ++ return NULL; ++ ++ odev = NULL; ++ ++ list_for_each_entry(temp_odev, &odev_list, node) { ++ if (temp_odev->pdev_name && ++ !strcmp(pdev->name, temp_odev->pdev_name) && ++ pdev->id == temp_odev->pdev_id) { ++ odev = temp_odev; ++ break; ++ } ++ } ++ ++ return odev; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev-common.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev-common.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev-common.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev-common.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,309 @@ ++/* ++ * OMAP on-chip devices present on OMAP2/3 ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Copyright (C) 2007-2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#ifndef ARCH_ARM_MACH_OMAP2_OMAPDEV_COMMON_H ++#define ARCH_ARM_MACH_OMAP2_OMAPDEV_COMMON_H ++ ++#include ++ ++#include "omapdev242x.h" ++#include "omapdev243x.h" ++#include "omapdev3xxx.h" ++ ++static struct omapdev *omapdevs[] = { ++ ++#ifdef CONFIG_ARCH_OMAP2420 ++ &mpu_242x_omapdev, ++ &iva_242x_omapdev, ++ &gfx_242x_omapdev, ++ &prcm_242x_omapdev, ++ &l3_242x_omapdev, ++ &l4_core_242x_omapdev, ++ &dsp_242x_omapdev, ++ &dsp_mmu_242x_omapdev, ++ &control_242x_omapdev, ++ &tap_242x_omapdev, ++ &gpio2_242x_omapdev, ++ &gpio3_242x_omapdev, ++ &gpio4_242x_omapdev, ++ &gptimer12_242x_omapdev, ++ &uart3_242x_omapdev, ++ &mcbsp2_242x_omapdev, ++ &wdtimer4_242x_omapdev, ++ &gptimer2_242x_omapdev, ++ &gptimer3_242x_omapdev, ++ &gptimer4_242x_omapdev, ++ &gptimer5_242x_omapdev, ++ &gptimer6_242x_omapdev, ++ &gptimer7_242x_omapdev, ++ &gptimer8_242x_omapdev, ++ &gptimer9_242x_omapdev, ++ &etb_242x_omapdev, ++ &cwt_242x_omapdev, ++ &xti_242x_omapdev, ++ &dap_242x_omapdev, ++ &dsi_242x_omapdev, ++ &dsi_pll_242x_omapdev, ++ &dss_242x_omapdev, ++ &dispc_242x_omapdev, ++ &rfbi_242x_omapdev, ++ &venc_242x_omapdev, ++ &fac_242x_omapdev, ++ &cam_242x_omapdev, ++ &cam_core_242x_omapdev, ++ &cam_dma_242x_omapdev, ++ &cam_mmu_242x_omapdev, ++ &mpu_intc_242x_omapdev, ++ &sms_242x_omapdev, ++ &gpmc_242x_omapdev, ++ &sdrc_242x_omapdev, ++ &ocm_ram_242x_omapdev, ++ &ocm_rom_242x_omapdev, ++ &ssi_242x_omapdev, ++ &ohci_242x_omapdev, ++ &otg_242x_omapdev, ++ &sdma_242x_omapdev, ++ &i2c1_242x_omapdev, ++ &i2c2_242x_omapdev, ++ &uart1_242x_omapdev, ++ &uart2_242x_omapdev, ++ &mcbsp1_242x_omapdev, ++ &gptimer10_242x_omapdev, ++ &gptimer11_242x_omapdev, ++ &mailbox_242x_omapdev, ++ &mcspi1_242x_omapdev, ++ &mcspi2_242x_omapdev, ++ &mg_242x_omapdev, ++ &hdq_242x_omapdev, ++ &mspro_242x_omapdev, ++ &wdtimer3_242x_omapdev, ++ &vlynq_242x_omapdev, ++ &eac_242x_omapdev, ++ &mmc_242x_omapdev, ++ &gptimer1_242x_omapdev, ++ &omap_32ksynct_242x_omapdev, ++ &gpio1_242x_omapdev, ++ &wdtimer2_242x_omapdev, ++ &wdtimer1_242x_omapdev, ++ &rng_242x_omapdev, ++ &sha1md5_242x_omapdev, ++ &des_242x_omapdev, ++ &aes_242x_omapdev, ++ &pka_242x_omapdev, ++#endif ++ ++#ifdef CONFIG_ARCH_OMAP2430 ++ &mpu_243x_omapdev, ++ &iva2_243x_omapdev, ++ &iva2_mmu_243x_omapdev, ++ &gfx_243x_omapdev, ++ &l3_243x_omapdev, ++ &l4_core_243x_omapdev, ++ &l4_wkup_243x_omapdev, ++ &dsp_243x_omapdev, ++ &control_243x_omapdev, ++ &tap_243x_omapdev, ++ &gpio2_243x_omapdev, ++ &gpio3_243x_omapdev, ++ &gpio4_243x_omapdev, ++ &gptimer12_243x_omapdev, ++ &uart3_243x_omapdev, ++ &mcbsp2_243x_omapdev, ++ &wdtimer4_243x_omapdev, ++ &gptimer2_243x_omapdev, ++ &gptimer3_243x_omapdev, ++ &gptimer4_243x_omapdev, ++ &gptimer5_243x_omapdev, ++ &gptimer6_243x_omapdev, ++ &gptimer7_243x_omapdev, ++ &gptimer8_243x_omapdev, ++ &gptimer9_243x_omapdev, ++ &etb_243x_omapdev, ++ &cwt_243x_omapdev, ++ &xti_243x_omapdev, ++ &dap_243x_omapdev, ++ &dsi_243x_omapdev, ++ &dsi_pll_243x_omapdev, ++ &dss_243x_omapdev, ++ &dispc_243x_omapdev, ++ &rfbi_243x_omapdev, ++ &venc_243x_omapdev, ++ &fac_243x_omapdev, ++ &cam_243x_omapdev, ++ &cam_core_243x_omapdev, ++ &cam_dma_243x_omapdev, ++ &cam_mmu_243x_omapdev, ++ &mpu_intc_243x_omapdev, ++ &modem_intc_243x_omapdev, ++ &sms_243x_omapdev, ++ &gpmc_243x_omapdev, ++ &sdrc_243x_omapdev, ++ &ocm_ram_243x_omapdev, ++ &ocm_rom_243x_omapdev, ++ &sad2d_243x_omapdev, ++ &ssi_243x_omapdev, ++ &ohci_243x_omapdev, ++ &fsotg_243x_omapdev, ++ &hsotg_243x_omapdev, ++ &sdma_243x_omapdev, ++ &i2c1_243x_omapdev, ++ &i2c2_243x_omapdev, ++ &uart1_243x_omapdev, ++ &uart2_243x_omapdev, ++ &mcbsp1_243x_omapdev, ++ &gptimer10_243x_omapdev, ++ &gptimer11_243x_omapdev, ++ &mailbox_243x_omapdev, ++ &mcspi1_243x_omapdev, ++ &mcspi2_243x_omapdev, ++ &mg_243x_omapdev, ++ &hdq_243x_omapdev, ++ &mspro_243x_omapdev, ++ &mcbsp5_243x_omapdev, ++ &hsmmc1_243x_omapdev, ++ &hsmmc2_243x_omapdev, ++ &mcspi3_243x_omapdev, ++ &gptimer1_243x_omapdev, ++ &prm_243x_omapdev, ++ &cm_243x_omapdev, ++ &omap_32ksynct_243x_omapdev, ++ &gpio1_243x_omapdev, ++ &wdtimer2_243x_omapdev, ++ &wdtimer1_243x_omapdev, ++ &rng_243x_omapdev, ++ &sha1md5_243x_omapdev, ++ &des_243x_omapdev, ++ &aes_243x_omapdev, ++ &pka_243x_omapdev, ++ &modem_243x_omapdev, ++ &icr_243x_omapdev, ++ &mcbsp3_243x_omapdev, ++ &mcbsp4_243x_omapdev, ++ &gpio5_243x_omapdev, ++#endif ++ ++#ifdef CONFIG_ARCH_OMAP3 ++ &mpu_3xxx_omapdev, ++ &iva2_3xxx_omapdev, ++ &iva2_mmu_3xxx_omapdev, ++ &gfx_3xxx_omapdev, ++ &l3_3xxx_omapdev, ++ &l4_core_3xxx_omapdev, ++ &l4_wkup_3xxx_omapdev, ++ &mpu_intc_3xxx_omapdev, ++ &modem_intc_3xxx_omapdev, ++ &sms_3xxx_omapdev, ++ &gpmc_3xxx_omapdev, ++ &sdrc_3xxx_omapdev, ++ &ocm_ram_3xxx_omapdev, ++ &ocm_rom_3xxx_omapdev, ++ &sad2d_3xxx_omapdev, ++ &ssi_3xxx_omapdev, ++ &sdma_3xxx_omapdev, ++ &i2c1_3xxx_omapdev, ++ &i2c2_3xxx_omapdev, ++ &uart1_3xxx_omapdev, ++ &uart2_3xxx_omapdev, ++ &mcbsp1_3xxx_omapdev, ++ &gptimer10_3xxx_omapdev, ++ &gptimer11_3xxx_omapdev, ++ &mailbox_3xxx_omapdev, ++ &mcspi1_3xxx_omapdev, ++ &mcspi2_3xxx_omapdev, ++ &mg_3xxx_omapdev, ++ &hdq_3xxx_omapdev, ++ &mspro_3xxx_omapdev, ++ &mcbsp5_3xxx_omapdev, ++ &hsmmc1_3xxx_omapdev, ++ &hsmmc2_3xxx_omapdev, ++ &mcspi3_3xxx_omapdev, ++ &gptimer1_3xxx_omapdev, ++ &prm_3xxx_omapdev, ++ &cm_3xxx_omapdev, ++ &omap_32ksynct_3xxx_omapdev, ++ &gpio1_3xxx_omapdev, ++ &wdtimer2_3xxx_omapdev, ++ &wdtimer1_3xxx_omapdev, ++ &rng_3xxx_omapdev, ++ &sha1md5_3xxx_omapdev, ++ &des_3xxx_omapdev, ++ &aes_3xxx_omapdev, ++ &pka_3xxx_omapdev, ++ &neon_3xxx_omapdev, ++ &sgx_3xxx_omapdev, ++ &l4_per_3xxx_omapdev, ++ &l4_emu_3xxx_omapdev, ++ &icr_3xxx_omapdev, ++ &wugen_3xxx_omapdev, ++ &mad2d_3xxx_omapdev, ++ &control_3xxx_omapdev, ++ &i2c3_3xxx_omapdev, ++ &hsmmc3_3xxx_omapdev, ++ &mcspi4_3xxx_omapdev, ++ &sr1_3xxx_omapdev, ++ &sr2_3xxx_omapdev, ++ &usbhost_es1_3xxx_omapdev, ++ &usbotg_es1_3xxx_omapdev, ++ &uart3_3xxx_omapdev, ++ &mcbsp2_3xxx_omapdev, ++ &mcbsp3_3xxx_omapdev, ++ &mcbsp4_3xxx_omapdev, ++ &mcbsp2_sidetone_3xxx_omapdev, ++ &mcbsp3_sidetone_3xxx_omapdev, ++ &wdtimer3_3xxx_omapdev, ++ &gptimer2_3xxx_omapdev, ++ &gptimer3_3xxx_omapdev, ++ &gptimer4_3xxx_omapdev, ++ &gptimer5_3xxx_omapdev, ++ &gptimer6_3xxx_omapdev, ++ &gptimer7_3xxx_omapdev, ++ &gptimer8_3xxx_omapdev, ++ &gptimer9_3xxx_omapdev, ++ &gpio2_3xxx_omapdev, ++ &gpio3_3xxx_omapdev, ++ &gpio4_3xxx_omapdev, ++ &gpio5_3xxx_omapdev, ++ &gpio6_3xxx_omapdev, ++ &tap_3xxx_omapdev, ++ &mpuemu_3xxx_omapdev, ++ &tpiu_3xxx_omapdev, ++ &etb_3xxx_omapdev, ++ &dapctl_3xxx_omapdev, ++ &sdti_3xxx_omapdev, ++ &dap_3xxx_omapdev, ++ &usbhost_3xxx_omapdev, ++ &usbotg_3xxx_omapdev, ++ &usbtll_3xxx_omapdev, ++ &dsi_3xxx_omapdev, ++ &dsi_phy_3xxx_omapdev, ++ &dsi_pll_3xxx_omapdev, ++ &dss_3xxx_omapdev, ++ &dispc_3xxx_omapdev, ++ &rfbi_3xxx_omapdev, ++ &venc_3xxx_omapdev, ++ &isp_3xxx_omapdev, ++ &isp_cbuff_3xxx_omapdev, ++ &ccp2_3xxx_omapdev, ++ &ccdc_3xxx_omapdev, ++ &hist_3xxx_omapdev, ++ &h3a_3xxx_omapdev, ++ &prv_3xxx_omapdev, ++ &rsz_3xxx_omapdev, ++ &sbl_3xxx_omapdev, ++ &csi2_3xxx_omapdev, ++#endif ++ ++ NULL, ++}; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev242x.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev242x.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev242x.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev242x.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,581 @@ ++/* ++ * TI OCP devices present on OMAP242x ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Copyright (C) 2007-2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#ifndef ARCH_ARM_MACH_OMAP2_OMAPDEV_242X_H ++#define ARCH_ARM_MACH_OMAP2_OMAPDEV_242X_H ++ ++#include ++ ++#include ++#include ++ ++#ifdef CONFIG_ARCH_OMAP2420 ++ ++/* 242x data from 2420 TRM ES2.1.1 ES2.2 Rev Q */ ++ ++/* MPU */ ++ ++static struct omapdev mpu_242x_omapdev = { ++ .name = "mpu_omapdev", ++ .pwrdm = { .name = "mpu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++ ++/* DSP/IVA pwrdm */ ++ ++/* This is IVA1, the ARM7 core on 2420 */ ++static struct omapdev iva_242x_omapdev = { ++ .name = "iva_omapdev", ++ .pwrdm = { .name = "dsp_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* GFX */ ++ ++/* XXX VGP/MBX split ? */ ++static struct omapdev gfx_242x_omapdev = { ++ .name = "gfx_omapdev", ++ .pwrdm = { .name = "gfx_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* WKUP */ ++ ++static struct omapdev prcm_242x_omapdev = { ++ .name = "prcm_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* CORE */ ++ ++/* L3 bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l3_242x_omapdev = { ++ .name = "l3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* L4_CORE bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l4_core_242x_omapdev = { ++ .name = "l4_core_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev dsp_242x_omapdev = { ++ .name = "dsp_omapdev", ++ .pwrdm = { .name = "dsp_pwrdm" }, ++ .pdev_name = "dsp", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev dsp_mmu_242x_omapdev = { ++ .name = "dsp_mmu_omapdev", ++ .pwrdm = { .name = "dsp_pwrdm" }, ++ .pdev_name = "dsp", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* Wakeup */ ++ ++/* on CORE pwrdm in OMAP3 */ ++static struct omapdev control_242x_omapdev = { ++ .name = "control_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev tap_242x_omapdev = { ++ .name = "tap_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* GPIO2-4 is on PER_PWRDM on OMAP3 */ ++static struct omapdev gpio2_242x_omapdev = { ++ .name = "gpio2_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gpio3_242x_omapdev = { ++ .name = "gpio3_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gpio4_242x_omapdev = { ++ .name = "gpio4_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer12_242x_omapdev = { ++ .name = "gptimer12_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev uart3_242x_omapdev = { ++ .name = "uart3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mcbsp2_242x_omapdev = { ++ .name = "mcbsp2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* aka the "IVA2 wdtimer" */ ++static struct omapdev wdtimer4_242x_omapdev = { ++ .name = "wdtimer4_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer2_242x_omapdev = { ++ .name = "gptimer2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer3_242x_omapdev = { ++ .name = "gptimer3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer4_242x_omapdev = { ++ .name = "gptimer4_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer5_242x_omapdev = { ++ .name = "gptimer5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer6_242x_omapdev = { ++ .name = "gptimer6_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer7_242x_omapdev = { ++ .name = "gptimer7_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer8_242x_omapdev = { ++ .name = "gptimer8_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer9_242x_omapdev = { ++ .name = "gptimer9_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev etb_242x_omapdev = { ++ .name = "etb_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev cwt_242x_omapdev = { ++ .name = "cwt_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev xti_242x_omapdev = { ++ .name = "xti_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "sti", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev dap_242x_omapdev = { ++ .name = "dap_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev dsi_242x_omapdev = { ++ .name = "dsi_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev dsi_pll_242x_omapdev = { ++ .name = "dsi_pll_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev dss_242x_omapdev = { ++ .name = "dss_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev dispc_242x_omapdev = { ++ .name = "dispc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev rfbi_242x_omapdev = { ++ .name = "rfbi_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev venc_242x_omapdev = { ++ .name = "venc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev fac_242x_omapdev = { ++ .name = "fac_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev cam_242x_omapdev = { ++ .name = "cam_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev cam_core_242x_omapdev = { ++ .name = "cam_core_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev cam_dma_242x_omapdev = { ++ .name = "cam_dma_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev cam_mmu_242x_omapdev = { ++ .name = "cam_mmu_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* Connected to the ARM1136 peripheral port, not an L3/L4 interconnect */ ++static struct omapdev mpu_intc_242x_omapdev = { ++ .name = "mpu_intc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev sms_242x_omapdev = { ++ .name = "sms_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gpmc_242x_omapdev = { ++ .name = "gpmc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev sdrc_242x_omapdev = { ++ .name = "sdrc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev ocm_ram_242x_omapdev = { ++ .name = "ocm_ram_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev ocm_rom_242x_omapdev = { ++ .name = "ocm_rom_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev ssi_242x_omapdev = { ++ .name = "ssi_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev ohci_242x_omapdev = { ++ .name = "ohci_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "ohci", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev otg_242x_omapdev = { ++ .name = "otg_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_otg", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev sdma_242x_omapdev = { ++ .name = "sdma_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev i2c1_242x_omapdev = { ++ .name = "i2c1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "i2c_omap", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev i2c2_242x_omapdev = { ++ .name = "i2c2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "i2c_omap", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev uart1_242x_omapdev = { ++ .name = "uart1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev uart2_242x_omapdev = { ++ .name = "uart2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mcbsp1_242x_omapdev = { ++ .name = "mcbsp1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer10_242x_omapdev = { ++ .name = "gptimer10_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer11_242x_omapdev = { ++ .name = "gptimer11_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mailbox_242x_omapdev = { ++ .name = "mailbox_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mailbox", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mcspi1_242x_omapdev = { ++ .name = "mcspi1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mcspi2_242x_omapdev = { ++ .name = "mcspi2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mg_242x_omapdev = { ++ .name = "mg_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev hdq_242x_omapdev = { ++ .name = "hdq_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_hdq", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mspro_242x_omapdev = { ++ .name = "mspro_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* Not present on 2430 - present on 3430 in PER pwrdm */ ++static struct omapdev wdtimer3_242x_omapdev = { ++ .name = "wdtimer3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev vlynq_242x_omapdev = { ++ .name = "vlynq_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev eac_242x_omapdev = { ++ .name = "eac_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xx-eac", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev mmc_242x_omapdev = { ++ .name = "mmc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mmci-omap", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gptimer1_242x_omapdev = { ++ .name = "gptimer1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev omap_32ksynct_242x_omapdev = { ++ .name = "32ksynct_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev gpio1_242x_omapdev = { ++ .name = "gpio1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* aka the "omap wdtimer" on 2430 or the "mpu wdtimer" on 3430 */ ++static struct omapdev wdtimer2_242x_omapdev = { ++ .name = "wdtimer2_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .pdev_name = "omap_wdt", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* Secure-mode devices */ ++ ++/* aka the "secure wdtimer" */ ++static struct omapdev wdtimer1_242x_omapdev = { ++ .name = "wdtimer1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev rng_242x_omapdev = { ++ .name = "rng_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_rng", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++/* XXX is the slash in this pdev_name going to wreck sysfs? */ ++static struct omapdev sha1md5_242x_omapdev = { ++ .name = "sha1md5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "OMAP SHA1/MD5", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev des_242x_omapdev = { ++ .name = "des_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev aes_242x_omapdev = { ++ .name = "aes_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++static struct omapdev pka_242x_omapdev = { ++ .name = "pka_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), ++}; ++ ++#endif /* CONFIG_ARCH_OMAP2420 */ ++ ++ ++#endif ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev243x.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev243x.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev243x.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev243x.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,661 @@ ++/* ++ * TI OCP devices present on OMAP243x ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Copyright (C) 2007-2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#ifndef ARCH_ARM_MACH_OMAP2_OMAPDEV_243X_H ++#define ARCH_ARM_MACH_OMAP2_OMAPDEV_243X_H ++ ++#include ++ ++#include ++#include ++ ++#ifdef CONFIG_ARCH_OMAP2430 ++ ++/* 2430 data from 2430 TRM ES2.1 Rev G */ ++ ++/* XXX add IVA2.1 WUGEN for 2430/3430 ? */ ++ ++ ++/* MPU */ ++ ++static struct omapdev mpu_243x_omapdev = { ++ .name = "mpu_omapdev", ++ .pwrdm = { .name = "mpu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* IVA2/DSP */ ++ ++/* dsp_omapdev is what is used on OMAP243x */ ++static struct omapdev iva2_243x_omapdev = { ++ .name = "iva2_omapdev", ++ .pwrdm = { .name = "iva2_pwrdm" }, ++ .pdev_name = "dsp", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev iva2_mmu_243x_omapdev = { ++ .name = "iva2_mmu_omapdev", ++ .pwrdm = { .name = "iva2_pwrdm" }, ++ .pdev_name = "dsp", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++ ++/* GFX */ ++ ++/* XXX VGP/MBX split ? */ ++static struct omapdev gfx_243x_omapdev = { ++ .name = "gfx_omapdev", ++ .pwrdm = { .name = "gfx_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++ ++/* CORE pwrdm */ ++ ++/* L3 bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l3_243x_omapdev = { ++ .name = "l3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* L4_CORE bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l4_core_243x_omapdev = { ++ .name = "l4_core_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* L4_WKUP bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l4_wkup_243x_omapdev = { ++ .name = "l4_wkup_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev dsp_243x_omapdev = { ++ .name = "dsp_omapdev", ++ .pwrdm = { .name = "dsp_pwrdm" }, ++ .pdev_name = "dsp", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* Wakeup */ ++ ++/* on CORE pwrdm in OMAP3 */ ++static struct omapdev control_243x_omapdev = { ++ .name = "control_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev tap_243x_omapdev = { ++ .name = "tap_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* GPIO2-4 is on PER_PWRDM on OMAP3 */ ++static struct omapdev gpio2_243x_omapdev = { ++ .name = "gpio2_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gpio3_243x_omapdev = { ++ .name = "gpio3_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gpio4_243x_omapdev = { ++ .name = "gpio4_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer12_243x_omapdev = { ++ .name = "gptimer12_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev uart3_243x_omapdev = { ++ .name = "uart3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mcbsp2_243x_omapdev = { ++ .name = "mcbsp2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* aka the "IVA2 wdtimer" */ ++static struct omapdev wdtimer4_243x_omapdev = { ++ .name = "wdtimer4_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "wdt", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer2_243x_omapdev = { ++ .name = "gptimer2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer3_243x_omapdev = { ++ .name = "gptimer3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer4_243x_omapdev = { ++ .name = "gptimer4_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer5_243x_omapdev = { ++ .name = "gptimer5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer6_243x_omapdev = { ++ .name = "gptimer6_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer7_243x_omapdev = { ++ .name = "gptimer7_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer8_243x_omapdev = { ++ .name = "gptimer8_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer9_243x_omapdev = { ++ .name = "gptimer9_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev etb_243x_omapdev = { ++ .name = "etb_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev cwt_243x_omapdev = { ++ .name = "cwt_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev xti_243x_omapdev = { ++ .name = "xti_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "sti", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev dap_243x_omapdev = { ++ .name = "dap_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev dsi_243x_omapdev = { ++ .name = "dsi_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev dsi_pll_243x_omapdev = { ++ .name = "dsi_pll_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev dss_243x_omapdev = { ++ .name = "dss_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev dispc_243x_omapdev = { ++ .name = "dispc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev rfbi_243x_omapdev = { ++ .name = "rfbi_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev venc_243x_omapdev = { ++ .name = "venc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev fac_243x_omapdev = { ++ .name = "fac_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev cam_243x_omapdev = { ++ .name = "cam_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev cam_core_243x_omapdev = { ++ .name = "cam_core_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev cam_dma_243x_omapdev = { ++ .name = "cam_dma_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev cam_mmu_243x_omapdev = { ++ .name = "cam_mmu_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap24xxcam", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* Connected to the ARM1136 peripheral port, not an L3/L4 interconnect */ ++static struct omapdev mpu_intc_243x_omapdev = { ++ .name = "mpu_intc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* XXX guessing on this one; TRM does not cover it well */ ++static struct omapdev modem_intc_243x_omapdev = { ++ .name = "modem_intc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev sms_243x_omapdev = { ++ .name = "sms_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gpmc_243x_omapdev = { ++ .name = "gpmc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev sdrc_243x_omapdev = { ++ .name = "sdrc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev ocm_ram_243x_omapdev = { ++ .name = "ocm_ram_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev ocm_rom_243x_omapdev = { ++ .name = "ocm_rom_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev sad2d_243x_omapdev = { ++ .name = "sad2d_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev ssi_243x_omapdev = { ++ .name = "ssi_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev ohci_243x_omapdev = { ++ .name = "ohci_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "ohci", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev fsotg_243x_omapdev = { ++ .name = "fsotg_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_otg", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev hsotg_243x_omapdev = { ++ .name = "hsotg_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_otg", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev sdma_243x_omapdev = { ++ .name = "sdma_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev i2c1_243x_omapdev = { ++ .name = "i2c1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "i2c_omap", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev i2c2_243x_omapdev = { ++ .name = "i2c2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "i2c_omap", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev uart1_243x_omapdev = { ++ .name = "uart1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev uart2_243x_omapdev = { ++ .name = "uart2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mcbsp1_243x_omapdev = { ++ .name = "mcbsp1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer10_243x_omapdev = { ++ .name = "gptimer10_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gptimer11_243x_omapdev = { ++ .name = "gptimer11_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mailbox_243x_omapdev = { ++ .name = "mailbox_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mailbox", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mcspi1_243x_omapdev = { ++ .name = "mcspi1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mcspi2_243x_omapdev = { ++ .name = "mcspi2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mg_243x_omapdev = { ++ .name = "mg_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev hdq_243x_omapdev = { ++ .name = "hdq_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_hdq", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mspro_243x_omapdev = { ++ .name = "mspro_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mcbsp5_243x_omapdev = { ++ .name = "mcbsp5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 5, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev hsmmc1_243x_omapdev = { ++ .name = "hsmmc1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mmci-omap", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev hsmmc2_243x_omapdev = { ++ .name = "hsmmc2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mmci-omap", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mcspi3_243x_omapdev = { ++ .name = "mcspi3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 3, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++ ++ ++/* WKUP */ ++ ++static struct omapdev gptimer1_243x_omapdev = { ++ .name = "gptimer1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev prm_243x_omapdev = { ++ .name = "prm_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev cm_243x_omapdev = { ++ .name = "cm_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev omap_32ksynct_243x_omapdev = { ++ .name = "32ksynct_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gpio1_243x_omapdev = { ++ .name = "gpio1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* aka the "omap wdtimer" on 2430 or the "mpu wdtimer" on 3430 */ ++static struct omapdev wdtimer2_243x_omapdev = { ++ .name = "wdtimer2_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .pdev_name = "omap_wdt", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* Secure-mode devices */ ++ ++/* aka the "secure wdtimer" */ ++static struct omapdev wdtimer1_243x_omapdev = { ++ .name = "wdtimer1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev rng_243x_omapdev = { ++ .name = "rng_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_rng", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev sha1md5_243x_omapdev = { ++ .name = "sha1md5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "OMAP SHA1/MD5", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev des_243x_omapdev = { ++ .name = "des_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev aes_243x_omapdev = { ++ .name = "aes_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev pka_243x_omapdev = { ++ .name = "pka_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++ ++static struct omapdev modem_243x_omapdev = { ++ .name = "modem_omapdev", ++ .pwrdm = { .name = "mdm_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev icr_243x_omapdev = { ++ .name = "icr_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++/* These two McBSP instances are in PER on 3430 */ ++static struct omapdev mcbsp3_243x_omapdev = { ++ .name = "mcbsp3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 3, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev mcbsp4_243x_omapdev = { ++ .name = "mcbsp4_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 4, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++static struct omapdev gpio5_243x_omapdev = { ++ .name = "gpio5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430), ++}; ++ ++#endif /* CONFIG_ARCH_OMAP2430 */ ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev3xxx.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev3xxx.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omapdev3xxx.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omapdev3xxx.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,804 @@ ++/* ++ * TI OCP devices present on OMAP3xxx ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Copyright (C) 2007-2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#ifndef ARCH_ARM_MACH_OMAP2_OMAPDEV_3XXX_H ++#define ARCH_ARM_MACH_OMAP2_OMAPDEV_3XXX_H ++ ++#include ++ ++#include ++#include ++ ++#ifdef CONFIG_ARCH_OMAP3 ++ ++/* 3xxx data from the 34xx ES2 TRM Rev F */ ++ ++/* MPU */ ++ ++static struct omapdev mpu_3xxx_omapdev = { ++ .name = "mpu_3xxx_omapdev", ++ .pwrdm = { .name = "mpu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* IVA2/DSP */ ++ ++/* dsp_omapdev is what is used on OMAP242x */ ++static struct omapdev iva2_3xxx_omapdev = { ++ .name = "iva2_3xxx_omapdev", ++ .pwrdm = { .name = "iva2_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev iva2_mmu_3xxx_omapdev = { ++ .name = "iva2_mmu_3xxx_omapdev", ++ .pwrdm = { .name = "iva2_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++ ++/* GFX */ ++ ++/* XXX VGP/MBX split ? */ ++static struct omapdev gfx_3xxx_omapdev = { ++ .name = "gfx_omapdev", ++ .pwrdm = { .name = "gfx_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1), ++}; ++ ++ ++/* CORE pwrdm */ ++ ++/* L3 bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l3_3xxx_omapdev = { ++ .name = "l3_3xxx_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* L4_CORE bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l4_core_3xxx_omapdev = { ++ .name = "l4_core_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* L4_WKUP bus configuration: RT, AP, LA, PM blocks */ ++static struct omapdev l4_wkup_3xxx_omapdev = { ++ .name = "l4_wkup_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mpu_intc_3xxx_omapdev = { ++ .name = "mpu_intc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* XXX guessing on this one; TRM does not cover it well */ ++static struct omapdev modem_intc_3xxx_omapdev = { ++ .name = "modem_intc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sms_3xxx_omapdev = { ++ .name = "sms_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gpmc_3xxx_omapdev = { ++ .name = "gpmc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sdrc_3xxx_omapdev = { ++ .name = "sdrc_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev ocm_ram_3xxx_omapdev = { ++ .name = "ocm_ram_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev ocm_rom_3xxx_omapdev = { ++ .name = "ocm_rom_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sad2d_3xxx_omapdev = { ++ .name = "sad2d_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev ssi_3xxx_omapdev = { ++ .name = "ssi_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_ssi", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sdma_3xxx_omapdev = { ++ .name = "sdma_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev i2c1_3xxx_omapdev = { ++ .name = "i2c1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "i2c_omap", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev i2c2_3xxx_omapdev = { ++ .name = "i2c2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "i2c_omap", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev uart1_3xxx_omapdev = { ++ .name = "uart1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev uart2_3xxx_omapdev = { ++ .name = "uart2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcbsp1_3xxx_omapdev = { ++ .name = "mcbsp1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer10_3xxx_omapdev = { ++ .name = "gptimer10_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer11_3xxx_omapdev = { ++ .name = "gptimer11_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mailbox_3xxx_omapdev = { ++ .name = "mailbox_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mailbox", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcspi1_3xxx_omapdev = { ++ .name = "mcspi1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcspi2_3xxx_omapdev = { ++ .name = "mcspi2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mg_3xxx_omapdev = { ++ .name = "mg_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev hdq_3xxx_omapdev = { ++ .name = "hdq_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap_hdq", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mspro_3xxx_omapdev = { ++ .name = "mspro_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcbsp5_3xxx_omapdev = { ++ .name = "mcbsp5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 5, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev hsmmc1_3xxx_omapdev = { ++ .name = "hsmmc1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mmci-omap-hs", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev hsmmc2_3xxx_omapdev = { ++ .name = "hsmmc2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mmci-omap-hs", ++ .pdev_id = 1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcspi3_3xxx_omapdev = { ++ .name = "mcspi3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 3, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++ ++ ++/* WKUP */ ++ ++static struct omapdev gptimer1_3xxx_omapdev = { ++ .name = "gptimer1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev prm_3xxx_omapdev = { ++ .name = "prm_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev cm_3xxx_omapdev = { ++ .name = "cm_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev omap_32ksynct_3xxx_omapdev = { ++ .name = "32ksynct_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gpio1_3xxx_omapdev = { ++ .name = "gpio1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* aka the "omap wdtimer" on 2430 or the "mpu wdtimer" on 3430 */ ++static struct omapdev wdtimer2_3xxx_omapdev = { ++ .name = "wdtimer2_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .pdev_name = "omap_wdt", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* Secure-mode devices */ ++ ++/* aka the "secure wdtimer" */ ++static struct omapdev wdtimer1_3xxx_omapdev = { ++ .name = "wdtimer1_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev rng_3xxx_omapdev = { ++ .name = "rng_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .pdev_name = "omap_rng", ++ .pdev_id = -1, ++}; ++ ++static struct omapdev sha1md5_3xxx_omapdev = { ++ .name = "sha1md5_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .pdev_name = "OMAP SHA1/MD5", ++ .pdev_id = -1, ++}; ++ ++static struct omapdev des_3xxx_omapdev = { ++ .name = "des_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev aes_3xxx_omapdev = { ++ .name = "aes_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev pka_3xxx_omapdev = { ++ .name = "pka_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* NEON */ ++ ++static struct omapdev neon_3xxx_omapdev = { ++ .name = "neon_omapdev", ++ .pwrdm = { .name = "neon_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* IVA2 */ ++ ++/* XXX include IVA2 async bridges ? */ ++ ++/* SGX/GFX */ ++ ++static struct omapdev sgx_3xxx_omapdev = { ++ .name = "sgx_omapdev", ++ .pwrdm = { .name = "sgx_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), ++}; ++ ++/* CORE */ ++ ++static struct omapdev l4_per_3xxx_omapdev = { ++ .name = "l4_per_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev l4_emu_3xxx_omapdev = { ++ .name = "l4_emu_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* XXX ICR is present on 2430 & 3430, but is in WKUP on 2430 */ ++static struct omapdev icr_3xxx_omapdev = { ++ .name = "icr_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* IVA2 interrupt controller - XXX 2430 also ? */ ++static struct omapdev wugen_3xxx_omapdev = { ++ .name = "wugen_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* XXX temperature sensor (what is the second one for ?) */ ++ ++/* XXX CWT is on 2430 at least, what about 2420? */ ++ ++static struct omapdev mad2d_3xxx_omapdev = { ++ .name = "mad2d_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* CONTROL/SCM moved into CORE pwrdm on 3430 */ ++static struct omapdev control_3xxx_omapdev = { ++ .name = "control_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev i2c3_3xxx_omapdev = { ++ .name = "i2c3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "i2c_omap", ++ .pdev_id = 3, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev hsmmc3_3xxx_omapdev = { ++ .name = "hsmmc3_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "mmci-omap-hs", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), ++}; ++ ++static struct omapdev mcspi4_3xxx_omapdev = { ++ .name = "mcspi4_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .pdev_name = "omap2_mcspi", ++ .pdev_id = 4, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sr1_3xxx_omapdev = { ++ .name = "sr1_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sr2_3xxx_omapdev = { ++ .name = "sr2_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev usbhost_es1_3xxx_omapdev = { ++ .name = "usbhost_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1), ++}; ++ ++static struct omapdev usbotg_es1_3xxx_omapdev = { ++ .name = "usbotg_omapdev", ++ .pwrdm = { .name = "core_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1), ++}; ++ ++/* L4-PER */ ++ ++static struct omapdev uart3_3xxx_omapdev = { ++ .name = "uart3_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .pdev_name = "serial8250", ++ .pdev_id = PLAT8250_DEV_PLATFORM, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcbsp2_3xxx_omapdev = { ++ .name = "mcbsp2_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 2, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcbsp3_3xxx_omapdev = { ++ .name = "mcbsp3_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 3, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcbsp4_3xxx_omapdev = { ++ .name = "mcbsp4_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .pdev_name = "omap-mcbsp", ++ .pdev_id = 3, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcbsp2_sidetone_3xxx_omapdev = { ++ .name = "mcbsp2_sidetone_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev mcbsp3_sidetone_3xxx_omapdev = { ++ .name = "mcbsp3_sidetone_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* On 2420 also, but in CORE pwrdm */ ++/* aka the "iva2" wdtimer */ ++static struct omapdev wdtimer3_3xxx_omapdev = { ++ .name = "wdtimer3_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer2_3xxx_omapdev = { ++ .name = "gptimer2_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer3_3xxx_omapdev = { ++ .name = "gptimer3_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer4_3xxx_omapdev = { ++ .name = "gptimer4_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer5_3xxx_omapdev = { ++ .name = "gptimer5_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer6_3xxx_omapdev = { ++ .name = "gptimer6_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer7_3xxx_omapdev = { ++ .name = "gptimer7_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer8_3xxx_omapdev = { ++ .name = "gptimer8_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gptimer9_3xxx_omapdev = { ++ .name = "gptimer9_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gpio2_3xxx_omapdev = { ++ .name = "gpio2_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gpio3_3xxx_omapdev = { ++ .name = "gpio3_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gpio4_3xxx_omapdev = { ++ .name = "gpio4_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gpio5_3xxx_omapdev = { ++ .name = "gpio5_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev gpio6_3xxx_omapdev = { ++ .name = "gpio6_omapdev", ++ .pwrdm = { .name = "per_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* L4-WAKEUP */ ++ ++static struct omapdev tap_3xxx_omapdev = { ++ .name = "tap_omapdev", ++ .pwrdm = { .name = "wkup_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++ ++/* L4-EMU */ ++ ++static struct omapdev mpuemu_3xxx_omapdev = { ++ .name = "mpuemu_omapdev", ++ .pwrdm = { .name = "emu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev tpiu_3xxx_omapdev = { ++ .name = "tpiu_omapdev", ++ .pwrdm = { .name = "emu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev etb_3xxx_omapdev = { ++ .name = "etb_omapdev", ++ .pwrdm = { .name = "emu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev dapctl_3xxx_omapdev = { ++ .name = "dapctl_omapdev", ++ .pwrdm = { .name = "emu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sdti_3xxx_omapdev = { ++ .name = "sdti_omapdev", ++ .pwrdm = { .name = "emu_pwrdm" }, ++ .pdev_name = "sti", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev dap_3xxx_omapdev = { ++ .name = "dap_omapdev", ++ .pwrdm = { .name = "emu_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* USBHOST */ ++ ++static struct omapdev usbhost_3xxx_omapdev = { ++ .name = "usbhost_omapdev", ++ .pwrdm = { .name = "usbhost_pwrdm" }, ++ .pdev_name = "ehci-omap", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), ++}; ++ ++static struct omapdev usbotg_3xxx_omapdev = { ++ .name = "usbotg_omapdev", ++ .pwrdm = { .name = "usbhost_pwrdm" }, ++ .pdev_name = "musb_hdrc", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), ++}; ++ ++static struct omapdev usbtll_3xxx_omapdev = { ++ .name = "usbtll_omapdev", ++ .pwrdm = { .name = "usbhost_pwrdm" }, ++ .pdev_name = "ehci-omap", ++ .pdev_id = 0, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++/* DSS */ ++ ++static struct omapdev dsi_3xxx_omapdev = { ++ .name = "dsi_omapdev", ++ .pwrdm = { .name = "dss_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev dsi_phy_3xxx_omapdev = { ++ .name = "dsi_phy_omapdev", ++ .pwrdm = { .name = "dss_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev dsi_pll_3xxx_omapdev = { ++ .name = "dsi_pll_omapdev", ++ .pwrdm = { .name = "dss_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev dss_3xxx_omapdev = { ++ .name = "dss_omapdev", ++ .pwrdm = { .name = "dss_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev dispc_3xxx_omapdev = { ++ .name = "dispc_omapdev", ++ .pwrdm = { .name = "dss_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev rfbi_3xxx_omapdev = { ++ .name = "rfbi_omapdev", ++ .pwrdm = { .name = "dss_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev venc_3xxx_omapdev = { ++ .name = "venc_omapdev", ++ .pwrdm = { .name = "dss_pwrdm" }, ++ .pdev_name = "omapdss", ++ .pdev_id = -1, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++ ++/* CAM */ ++ ++ ++static struct omapdev isp_3xxx_omapdev = { ++ .name = "isp_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev isp_cbuff_3xxx_omapdev = { ++ .name = "isp_cbuff_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev ccp2_3xxx_omapdev = { ++ .name = "ccp2_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev ccdc_3xxx_omapdev = { ++ .name = "ccdc_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev hist_3xxx_omapdev = { ++ .name = "hist_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev h3a_3xxx_omapdev = { ++ .name = "h3a_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev prv_3xxx_omapdev = { ++ .name = "prv_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev rsz_3xxx_omapdev = { ++ .name = "rsz_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev sbl_3xxx_omapdev = { ++ .name = "sbl_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++static struct omapdev csi2_3xxx_omapdev = { ++ .name = "csi2_omapdev", ++ .pwrdm = { .name = "cam_pwrdm" }, ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++}; ++ ++#endif /* CONFIG_ARCH_OMAP3 */ ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omap3-iommu.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omap3-iommu.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omap3-iommu.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omap3-iommu.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,108 @@ ++/* ++ * omap iommu: omap3 device registration ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++ ++#include ++ ++#define OMAP3_MMU1_BASE 0x480bd400 ++#define OMAP3_MMU2_BASE 0x5d000000 ++#define OMAP3_MMU1_IRQ 24 ++#define OMAP3_MMU2_IRQ 28 ++ ++static struct resource omap3_iommu_res[] = { ++ { /* Camera ISP MMU */ ++ .start = OMAP3_MMU1_BASE, ++ .end = OMAP3_MMU1_BASE + MMU_REG_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3_MMU1_IRQ, ++ .flags = IORESOURCE_IRQ, ++ }, ++ { /* IVA2.2 MMU */ ++ .start = OMAP3_MMU2_BASE, ++ .end = OMAP3_MMU2_BASE + MMU_REG_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = OMAP3_MMU2_IRQ, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++#define NR_IOMMU_RES (ARRAY_SIZE(omap3_iommu_res) / 2) ++ ++static const struct iommu_platform_data omap3_iommu_pdata[] __initconst = { ++ { ++ .name = "isp", ++ .nr_tlb_entries = 8, ++ .clk_name = "cam_ick", ++ }, ++#if 0 /* FIXME: Disabled until dspbridge starts to use iommu */ ++ { ++ .name = "iva2", ++ .nr_tlb_entries = 32, ++ .clk_name = "iva2_ck", ++ }, ++#endif ++}; ++#define NR_IOMMU_DEVICES ARRAY_SIZE(omap3_iommu_pdata) ++ ++static struct platform_device *omap3_iommu_pdev[NR_IOMMU_DEVICES]; ++ ++static int __init omap3_iommu_init(void) ++{ ++ int i, err; ++ ++ for (i = 0; i < NR_IOMMU_DEVICES; i++) { ++ struct platform_device *pdev; ++ ++ pdev = platform_device_alloc("omap-iommu", i + 1); ++ if (!pdev) { ++ err = -ENOMEM; ++ goto err_out; ++ } ++ err = platform_device_add_resources(pdev, ++ &omap3_iommu_res[2 * i], NR_IOMMU_RES); ++ if (err) ++ goto err_out; ++ err = platform_device_add_data(pdev, &omap3_iommu_pdata[i], ++ sizeof(omap3_iommu_pdata[0])); ++ if (err) ++ goto err_out; ++ err = platform_device_add(pdev); ++ if (err) ++ goto err_out; ++ omap3_iommu_pdev[i] = pdev; ++ } ++ return 0; ++ ++err_out: ++ while (i--) ++ platform_device_put(omap3_iommu_pdev[i]); ++ return err; ++} ++module_init(omap3_iommu_init); ++ ++static void __exit omap3_iommu_exit(void) ++{ ++ int i; ++ ++ for (i = 0; i < NR_IOMMU_DEVICES; i++) ++ platform_device_unregister(omap3_iommu_pdev[i]); ++} ++module_exit(omap3_iommu_exit); ++ ++MODULE_AUTHOR("Hiroshi DOYU"); ++MODULE_DESCRIPTION("omap iommu: omap3 device registration"); ++MODULE_LICENSE("GPL v2"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omap3-opp.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omap3-opp.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/omap3-opp.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/omap3-opp.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,62 @@ ++#ifndef __OMAP3_OPP_H_ ++#define __OMAP3_OPP_H_ ++ ++#include ++ ++/* MPU speeds */ ++#define S600M 600000000 ++#define S550M 550000000 ++#define S500M 500000000 ++#define S250M 250000000 ++#define S125M 125000000 ++ ++/* DSP speeds */ ++#define S430M 430000000 ++#define S400M 400000000 ++#define S360M 360000000 ++#define S180M 180000000 ++#define S90M 90000000 ++ ++/* L3 speeds */ ++#define S83M 83000000 ++#define S166M 166000000 ++ ++static struct omap_opp omap3_mpu_rate_table[] = { ++ {0, 0, 0}, ++ /*OPP1*/ ++ {0, VDD1_OPP1, 0x1E}, ++ /*OPP2*/ ++ {S250M, VDD1_OPP2, 0x26}, ++ /*OPP3*/ ++ {S500M, VDD1_OPP3, 0x30}, ++ /*OPP4*/ ++ {S550M, VDD1_OPP4, 0x36}, ++ /*OPP5*/ ++ {S600M, VDD1_OPP5, 0x3C}, ++}; ++ ++static struct omap_opp omap3_l3_rate_table[] = { ++ {0, 0, 0}, ++ /*OPP1*/ ++ {0, VDD2_OPP1, 0x1E}, ++ /*OPP2*/ ++ {S83M, VDD2_OPP2, 0x24}, ++ /*OPP3*/ ++ {S166M, VDD2_OPP3, 0x2C}, ++}; ++ ++static struct omap_opp omap3_dsp_rate_table[] = { ++ {0, 0, 0}, ++ /*OPP1*/ ++ {S90M, VDD1_OPP1, 0x1E}, ++ /*OPP2*/ ++ {S180M, VDD1_OPP2, 0x26}, ++ /*OPP3*/ ++ {S360M, VDD1_OPP3, 0x30}, ++ /*OPP4*/ ++ {S400M, VDD1_OPP4, 0x36}, ++ /*OPP5*/ ++ {S430M, VDD1_OPP5, 0x3C}, ++}; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -23,17 +23,31 @@ + + #include + #include ++#include ++#include + + #include + #include + #include + + #include ++#include ++#include ++#include ++#include ++ ++#include "prm-regbits-34xx.h" + #include "pm.h" ++#include "smartreflex.h" + + unsigned short enable_dyn_sleep; + unsigned short clocks_off_while_idle; ++unsigned short enable_off_mode; ++EXPORT_SYMBOL(enable_off_mode); ++unsigned short voltage_off_while_idle; + atomic_t sleep_block = ATOMIC_INIT(0); ++static int vdd1_locked; ++static int vdd2_locked; + + static ssize_t idle_show(struct kobject *, struct kobj_attribute *, char *); + static ssize_t idle_store(struct kobject *k, struct kobj_attribute *, +@@ -45,6 +59,28 @@ static struct kobj_attribute sleep_while + static struct kobj_attribute clocks_off_while_idle_attr = + __ATTR(clocks_off_while_idle, 0644, idle_show, idle_store); + ++static struct kobj_attribute enable_off_mode_attr = ++ __ATTR(enable_off_mode, 0644, idle_show, idle_store); ++ ++static struct kobj_attribute voltage_off_while_idle_attr = ++ __ATTR(voltage_off_while_idle, 0644, idle_show, idle_store); ++ ++#ifdef CONFIG_OMAP_PM_SRF ++static ssize_t vdd_opp_show(struct kobject *, struct kobj_attribute *, char *); ++static ssize_t vdd_opp_store(struct kobject *k, struct kobj_attribute *, ++ const char *buf, size_t n); ++static struct kobj_attribute vdd1_opp_attr = ++ __ATTR(vdd1_opp, 0644, vdd_opp_show, vdd_opp_store); ++ ++static struct kobj_attribute vdd2_opp_attr = ++ __ATTR(vdd2_opp, 0644, vdd_opp_show, vdd_opp_store); ++static struct kobj_attribute vdd1_lock_attr = ++ __ATTR(vdd1_lock, 0644, vdd_opp_show, vdd_opp_store); ++static struct kobj_attribute vdd2_lock_attr = ++ __ATTR(vdd2_lock, 0644, vdd_opp_show, vdd_opp_store); ++ ++#endif ++ + static ssize_t idle_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) + { +@@ -52,6 +88,10 @@ static ssize_t idle_show(struct kobject + return sprintf(buf, "%hu\n", enable_dyn_sleep); + else if (attr == &clocks_off_while_idle_attr) + return sprintf(buf, "%hu\n", clocks_off_while_idle); ++ else if (attr == &enable_off_mode_attr) ++ return sprintf(buf, "%hu\n", enable_off_mode); ++ else if (attr == &voltage_off_while_idle_attr) ++ return sprintf(buf, "%hu\n", voltage_off_while_idle); + else + return -EINVAL; + } +@@ -67,15 +107,100 @@ static ssize_t idle_store(struct kobject + return -EINVAL; + } + +- if (attr == &sleep_while_idle_attr) ++ if (attr == &sleep_while_idle_attr) { + enable_dyn_sleep = value; +- else if (attr == &clocks_off_while_idle_attr) ++ } else if (attr == &clocks_off_while_idle_attr) { + clocks_off_while_idle = value; ++ } else if (attr == &enable_off_mode_attr) { ++ enable_off_mode = value; ++ omap3_pm_off_mode_enable(enable_off_mode); ++ } else if (attr == &voltage_off_while_idle_attr) { ++ voltage_off_while_idle = value; ++ if (voltage_off_while_idle) ++ prm_set_mod_reg_bits(OMAP3430_SEL_OFF, OMAP3430_GR_MOD, ++ OMAP3_PRM_VOLTCTRL_OFFSET); ++ else ++ prm_clear_mod_reg_bits(OMAP3430_SEL_OFF, ++ OMAP3430_GR_MOD, ++ OMAP3_PRM_VOLTCTRL_OFFSET); ++ } else { ++ return -EINVAL; ++ } ++ return n; ++} ++ ++#ifdef CONFIG_OMAP_PM_SRF ++static ssize_t vdd_opp_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ if (attr == &vdd1_opp_attr) ++ return sprintf(buf, "%hu\n", resource_get_level("vdd1_opp")); ++ else if (attr == &vdd2_opp_attr) ++ return sprintf(buf, "%hu\n", resource_get_level("vdd2_opp")); ++ else if (attr == &vdd1_lock_attr) ++ return sprintf(buf, "%hu\n", resource_get_opp_lock(PRCM_VDD1)); ++ else if (attr == &vdd2_lock_attr) ++ return sprintf(buf, "%hu\n", resource_get_opp_lock(PRCM_VDD2)); + else + return -EINVAL; ++} ++ ++static ssize_t vdd_opp_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t n) ++{ ++ unsigned short value; ++ int flags = 0; ++ ++ if (sscanf(buf, "%hu", &value) != 1) ++ return -EINVAL; ++ ++ /* Check locks */ ++ if (attr == &vdd1_lock_attr) { ++ flags = OPP_IGNORE_LOCK; ++ attr = &vdd1_opp_attr; ++ if (vdd1_locked && value == 0) { ++ resource_unlock_opp(PRCM_VDD1); ++ resource_refresh(); ++ vdd1_locked = 0; ++ return n; ++ } ++ if (vdd1_locked == 0 && value != 0) { ++ resource_lock_opp(PRCM_VDD1); ++ vdd1_locked = 1; ++ } ++ } else if (attr == &vdd2_lock_attr) { ++ flags = OPP_IGNORE_LOCK; ++ attr = &vdd2_opp_attr; ++ if (vdd2_locked && value == 0) { ++ resource_unlock_opp(PRCM_VDD2); ++ resource_refresh(); ++ vdd2_locked = 0; ++ return n; ++ } ++ if (vdd2_locked == 0 && value != 0) { ++ resource_lock_opp(PRCM_VDD2); ++ vdd2_locked = 1; ++ } ++ } + ++ if (attr == &vdd1_opp_attr) { ++ if (value < 1 || value > 5) { ++ printk(KERN_ERR "vdd_opp_store: Invalid value\n"); ++ return -EINVAL; ++ } ++ resource_set_opp_level(PRCM_VDD1, value, flags); ++ } else if (attr == &vdd2_opp_attr) { ++ if (value < 1 || value > 3) { ++ printk(KERN_ERR "vdd_opp_store: Invalid value\n"); ++ return -EINVAL; ++ } ++ resource_set_opp_level(PRCM_VDD2, value, flags); ++ } else { ++ return -EINVAL; ++ } + return n; + } ++#endif + + void omap2_block_sleep(void) + { +@@ -90,6 +215,21 @@ void omap2_allow_sleep(void) + BUG_ON(i < 0); + } + ++int get_last_off_on_transaction_id(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct omapdev *odev = omapdev_find_pdev(pdev); ++ struct powerdomain *pwrdm; ++ ++ if (odev) { ++ pwrdm = omapdev_get_pwrdm(odev); ++ if (pwrdm) ++ return pwrdm->state_counter[0] & INT_MAX; ++ } ++ ++ return 0; ++} ++ + static int __init omap_pm_init(void) + { + int error = -1; +@@ -112,7 +252,47 @@ static int __init omap_pm_init(void) + &clocks_off_while_idle_attr.attr); + if (error) + printk(KERN_ERR "sysfs_create_file failed: %d\n", error); ++ error = sysfs_create_file(power_kobj, ++ &enable_off_mode_attr.attr); ++ if (error) { ++ printk(KERN_ERR "sysfs_create_file failed: %d\n", error); ++ return error; ++ } ++#ifdef CONFIG_OMAP_PM_SRF ++ error = sysfs_create_file(power_kobj, ++ &vdd1_opp_attr.attr); ++ if (error) { ++ printk(KERN_ERR "sysfs_create_file failed: %d\n", error); ++ return error; ++ } ++ error = sysfs_create_file(power_kobj, ++ &vdd2_opp_attr.attr); ++ if (error) { ++ printk(KERN_ERR "sysfs_create_file failed: %d\n", error); ++ return error; ++ } ++ ++ error = sysfs_create_file(power_kobj, &vdd1_lock_attr.attr); ++ if (error) { ++ printk(KERN_ERR "sysfs_create_file failed: %d\n", error); ++ return error; ++ } + ++ error = sysfs_create_file(power_kobj, &vdd2_lock_attr.attr); ++ if (error) { ++ printk(KERN_ERR "sysfs_create_file failed: %d\n", error); ++ return error; ++ } ++#endif ++ voltage_off_while_idle = 0; ++ /* Going to 0V on anything under ES2.1 will eventually cause a crash */ ++ if (omap_rev() > OMAP3430_REV_ES2_0) { ++ error = sysfs_create_file(power_kobj, ++ &voltage_off_while_idle_attr.attr); ++ if (error) ++ printk(KERN_ERR "sysfs_create_file failed: %d\n", ++ error); ++ } + return error; + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm-debug.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm-debug.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm-debug.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm-debug.c 2011-09-04 11:31:05.000000000 +0200 +@@ -26,9 +26,12 @@ + #include + #include + #include ++#include + + #include + #include ++#include ++#include + + #include "prm.h" + #include "cm.h" +@@ -153,4 +156,424 @@ void omap2_pm_dump(int mode, int resume, + printk("%-20s: 0x%08x\n", regs[i].name, regs[i].val); + } + ++#ifdef CONFIG_DEBUG_FS ++#include ++#include ++ ++static void pm_dbg_regset_store(u32 *ptr); ++ ++struct dentry *pm_dbg_dir; ++ ++static int pm_dbg_init_done; ++ ++enum { ++ DEBUG_FILE_COUNTERS = 0, ++ DEBUG_FILE_TIMERS, ++ DEBUG_FILE_RESOURCES ++}; ++ ++struct pm_module_def { ++ char name[8]; /* Name of the module */ ++ short type; /* CM or PRM */ ++ unsigned short offset; ++ int low; /* First register address on this module */ ++ int high; /* Last register address on this module */ ++}; ++ ++#define MOD_CM 0 ++#define MOD_PRM 1 ++ ++static const struct pm_module_def pm_dbg_reg_modules[] = { ++ { "IVA2", MOD_CM, OMAP3430_IVA2_MOD, 0, 0x4c }, ++ { "OCP", MOD_CM, OCP_MOD, 0, 0x10 }, ++ { "MPU", MOD_CM, MPU_MOD, 4, 0x4c }, ++ { "CORE", MOD_CM, CORE_MOD, 0, 0x4c }, ++ { "SGX", MOD_CM, OMAP3430ES2_SGX_MOD, 0, 0x4c }, ++ { "WKUP", MOD_CM, WKUP_MOD, 0, 0x40 }, ++ { "CCR", MOD_CM, PLL_MOD, 0, 0x70 }, ++ { "DSS", MOD_CM, OMAP3430_DSS_MOD, 0, 0x4c }, ++ { "CAM", MOD_CM, OMAP3430_CAM_MOD, 0, 0x4c }, ++ { "PER", MOD_CM, OMAP3430_PER_MOD, 0, 0x4c }, ++ { "EMU", MOD_CM, OMAP3430_EMU_MOD, 0x40, 0x54 }, ++ { "NEON", MOD_CM, OMAP3430_NEON_MOD, 0x20, 0x48 }, ++ { "USB", MOD_CM, OMAP3430ES2_USBHOST_MOD, 0, 0x4c }, ++ ++ { "IVA2", MOD_PRM, OMAP3430_IVA2_MOD, 0x50, 0xfc }, ++ { "OCP", MOD_PRM, OCP_MOD, 4, 0x1c }, ++ { "MPU", MOD_PRM, MPU_MOD, 0x58, 0xe8 }, ++ { "CORE", MOD_PRM, CORE_MOD, 0x58, 0xf8 }, ++ { "SGX", MOD_PRM, OMAP3430ES2_SGX_MOD, 0x58, 0xe8 }, ++ { "WKUP", MOD_PRM, WKUP_MOD, 0xa0, 0xb0 }, ++ { "CCR", MOD_PRM, PLL_MOD, 0x40, 0x70 }, ++ { "DSS", MOD_PRM, OMAP3430_DSS_MOD, 0x58, 0xe8 }, ++ { "CAM", MOD_PRM, OMAP3430_CAM_MOD, 0x58, 0xe8 }, ++ { "PER", MOD_PRM, OMAP3430_PER_MOD, 0x58, 0xe8 }, ++ { "EMU", MOD_PRM, OMAP3430_EMU_MOD, 0x58, 0xe4 }, ++ { "GLBL", MOD_PRM, OMAP3430_GR_MOD, 0x20, 0xe4 }, ++ { "NEON", MOD_PRM, OMAP3430_NEON_MOD, 0x58, 0xe8 }, ++ { "USB", MOD_PRM, OMAP3430ES2_USBHOST_MOD, 0x58, 0xe8 }, ++ { "", 0, 0, 0, 0 }, ++}; ++ ++#define PM_DBG_MAX_REG_SETS 4 ++ ++static void *pm_dbg_reg_set[PM_DBG_MAX_REG_SETS]; ++ ++static int pm_dbg_get_regset_size(void) ++{ ++ static int regset_size; ++ ++ if (regset_size == 0) { ++ int i = 0; ++ ++ while (pm_dbg_reg_modules[i].name[0] != 0) { ++ regset_size += pm_dbg_reg_modules[i].high + ++ 4 - pm_dbg_reg_modules[i].low; ++ i++; ++ } ++ } ++ return regset_size; ++} ++ ++static int pm_dbg_show_regs(struct seq_file *s, void *unused) ++{ ++ int i, j; ++ unsigned long val; ++ int reg_set = (int)s->private; ++ u32 *ptr; ++ void *store = NULL; ++ int regs; ++ int linefeed; ++ ++ if (reg_set == 0) { ++ store = kmalloc(pm_dbg_get_regset_size(), GFP_KERNEL); ++ ptr = store; ++ pm_dbg_regset_store(ptr); ++ } else { ++ ptr = pm_dbg_reg_set[reg_set - 1]; ++ } ++ ++ i = 0; ++ ++ while (pm_dbg_reg_modules[i].name[0] != 0) { ++ regs = 0; ++ linefeed = 0; ++ if (pm_dbg_reg_modules[i].type == MOD_CM) ++ seq_printf(s, "MOD: CM_%s (%08x)\n", ++ pm_dbg_reg_modules[i].name, ++ (u32)(OMAP2_CM_BASE + ++ pm_dbg_reg_modules[i].offset)); ++ else ++ seq_printf(s, "MOD: PRM_%s (%08x)\n", ++ pm_dbg_reg_modules[i].name, ++ (u32)(OMAP2_PRM_BASE + ++ pm_dbg_reg_modules[i].offset)); ++ ++ for (j = pm_dbg_reg_modules[i].low; ++ j <= pm_dbg_reg_modules[i].high; j += 4) { ++ val = *(ptr++); ++ if (val != 0) { ++ regs++; ++ if (linefeed) { ++ seq_printf(s, "\n"); ++ linefeed = 0; ++ } ++ seq_printf(s, " %02x => %08lx", j, val); ++ if (regs % 4 == 0) ++ linefeed = 1; ++ } ++ } ++ seq_printf(s, "\n"); ++ i++; ++ } ++ ++ if (store != NULL) ++ kfree(store); ++ ++ return 0; ++} ++ ++static void pm_dbg_regset_store(u32 *ptr) ++{ ++ int i, j; ++ u32 val; ++ ++ i = 0; ++ ++ while (pm_dbg_reg_modules[i].name[0] != 0) { ++ for (j = pm_dbg_reg_modules[i].low; ++ j <= pm_dbg_reg_modules[i].high; j += 4) { ++ if (pm_dbg_reg_modules[i].type == MOD_CM) ++ val = cm_read_mod_reg( ++ pm_dbg_reg_modules[i].offset, j); ++ else ++ val = prm_read_mod_reg( ++ pm_dbg_reg_modules[i].offset, j); ++ *(ptr++) = val; ++ } ++ i++; ++ } ++} ++ ++int pm_dbg_regset_save(int reg_set) ++{ ++ if (pm_dbg_reg_set[reg_set-1] == NULL) ++ return -EINVAL; ++ ++ pm_dbg_regset_store(pm_dbg_reg_set[reg_set-1]); ++ ++ return 0; ++} ++ ++static const char pwrdm_state_names[][4] = { ++ "OFF", ++ "RET", ++ "INA", ++ "ON" ++}; ++ ++void pm_dbg_update_time(struct powerdomain *pwrdm, int prev) ++{ ++ s64 t; ++ struct timespec now; ++ ++ if (!pm_dbg_init_done) ++ return ; ++ ++ /* Update timer for previous state */ ++ getnstimeofday(&now); ++ t = timespec_to_ns(&now); ++ ++ pwrdm->state_timer[prev] += t - pwrdm->timer; ++ ++ pwrdm->timer = t; ++} ++ ++static int clkdm_dbg_show_counter(struct clockdomain *clkdm, void *user) ++{ ++ struct seq_file *s = (struct seq_file *)user; ++ ++ if (strcmp(clkdm->name, "emu_clkdm") == 0 || ++ strcmp(clkdm->name, "wkup_clkdm") == 0 || ++ strncmp(clkdm->name, "dpll", 4) == 0) ++ return 0; ++ ++ seq_printf(s, "%s->%s (%d)", clkdm->name, ++ clkdm->pwrdm.ptr->name, ++ atomic_read(&clkdm->usecount)); ++ seq_printf(s, "\n"); ++ ++ return 0; ++} ++ ++static int pwrdm_dbg_show_counter(struct powerdomain *pwrdm, void *user) ++{ ++ struct seq_file *s = (struct seq_file *)user; ++ int i; ++ ++ if (strcmp(pwrdm->name, "emu_pwrdm") == 0 || ++ strcmp(pwrdm->name, "wkup_pwrdm") == 0 || ++ strncmp(pwrdm->name, "dpll", 4) == 0) ++ return 0; ++ ++ if (pwrdm->state != pwrdm_read_pwrst(pwrdm)) ++ printk(KERN_ERR "pwrdm state mismatch(%s) %d != %d\n", ++ pwrdm->name, pwrdm->state, pwrdm_read_pwrst(pwrdm)); ++ ++ seq_printf(s, "%s (%s)", pwrdm->name, ++ pwrdm_state_names[pwrdm->state]); ++ for (i = 0; i < 4; i++) ++ seq_printf(s, ",%s:%d", pwrdm_state_names[i], ++ pwrdm->state_counter[i]); ++ ++ seq_printf(s, "\n"); ++ ++ return 0; ++} ++ ++static int pwrdm_dbg_show_timer(struct powerdomain *pwrdm, void *user) ++{ ++ struct seq_file *s = (struct seq_file *)user; ++ int i; ++ ++ if (strcmp(pwrdm->name, "emu_pwrdm") == 0 || ++ strcmp(pwrdm->name, "wkup_pwrdm") == 0 || ++ strncmp(pwrdm->name, "dpll", 4) == 0) ++ return 0; ++ ++ pwrdm_state_switch(pwrdm); ++ ++ seq_printf(s, "%s (%s)", pwrdm->name, ++ pwrdm_state_names[pwrdm->state]); ++ ++ for (i = 0; i < 4; i++) ++ seq_printf(s, ",%s:%lld", pwrdm_state_names[i], ++ pwrdm->state_timer[i]); ++ ++ seq_printf(s, "\n"); ++ return 0; ++} ++ ++static int pm_dbg_show_counters(struct seq_file *s, void *unused) ++{ ++ pwrdm_for_each(pwrdm_dbg_show_counter, s); ++ clkdm_for_each(clkdm_dbg_show_counter, s); ++ ++ return 0; ++} ++ ++static int pm_dbg_show_timers(struct seq_file *s, void *unused) ++{ ++ pwrdm_for_each(pwrdm_dbg_show_timer, s); ++ return 0; ++} ++ ++static int pm_dbg_open(struct inode *inode, struct file *file) ++{ ++ switch ((int)inode->i_private) { ++ case DEBUG_FILE_COUNTERS: ++ return single_open(file, pm_dbg_show_counters, ++ &inode->i_private); ++ case DEBUG_FILE_RESOURCES: ++ return single_open(file, resource_dump_reqs, ++ &inode->i_private); ++ case DEBUG_FILE_TIMERS: ++ default: ++ return single_open(file, pm_dbg_show_timers, ++ &inode->i_private); ++ }; ++} ++ ++static int pm_dbg_reg_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, pm_dbg_show_regs, inode->i_private); ++} ++ ++static const struct file_operations debug_fops = { ++ .open = pm_dbg_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static const struct file_operations debug_reg_fops = { ++ .open = pm_dbg_reg_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++int pm_dbg_regset_init(int reg_set) ++{ ++ char name[2]; ++ ++ if (reg_set < 1 || reg_set > PM_DBG_MAX_REG_SETS || ++ pm_dbg_reg_set[reg_set-1] != NULL) ++ return -EINVAL; ++ ++ pm_dbg_reg_set[reg_set-1] = ++ kmalloc(pm_dbg_get_regset_size(), GFP_KERNEL); ++ ++ if (pm_dbg_reg_set[reg_set-1] == NULL) ++ return -ENOMEM; ++ ++ if (pm_dbg_dir != NULL) { ++ sprintf(name, "%d", reg_set); ++ ++ (void) debugfs_create_file(name, S_IRUGO, ++ pm_dbg_dir, (void *)reg_set, &debug_reg_fops); ++ } ++ ++ return 0; ++} ++ ++static int pwrdm_suspend_get(void *data, u64 *val) ++{ ++ *val = omap3_pm_get_suspend_state((struct powerdomain *)data); ++ ++ if (*val >= 0) ++ return 0; ++ return *val; ++} ++ ++static int pwrdm_suspend_set(void *data, u64 val) ++{ ++ return omap3_pm_set_suspend_state((struct powerdomain *)data, (int)val); ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get, ++ pwrdm_suspend_set, "%llu\n"); ++ ++static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir) ++{ ++ int i; ++ s64 t; ++ struct timespec now; ++ struct dentry *d; ++ ++ getnstimeofday(&now); ++ t = timespec_to_ns(&now); ++ ++ for (i = 0; i < 4; i++) ++ pwrdm->state_timer[i] = 0; ++ ++ pwrdm->timer = t; ++ ++ if (strncmp(pwrdm->name, "dpll", 4) == 0) ++ return 0; ++ ++ d = debugfs_create_dir(pwrdm->name, (struct dentry *)dir); ++ ++ (void) debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d, ++ (void *)pwrdm, &pwrdm_suspend_fops); ++ ++ return 0; ++} ++ ++static int __init pm_dbg_init(void) ++{ ++ int i; ++ struct dentry *d; ++ char name[2]; ++ ++ printk(KERN_INFO "pm_dbg_init()\n"); ++ ++ d = debugfs_create_dir("pm_debug", NULL); ++ if (IS_ERR(d)) ++ return PTR_ERR(d); ++ ++ (void) debugfs_create_file("count", S_IRUGO, ++ d, (void *)DEBUG_FILE_COUNTERS, &debug_fops); ++ (void) debugfs_create_file("time", S_IRUGO, ++ d, (void *)DEBUG_FILE_TIMERS, &debug_fops); ++ (void) debugfs_create_file("resources", S_IRUGO, ++ d, (void *)DEBUG_FILE_RESOURCES, &debug_fops); ++ ++ pwrdm_for_each(pwrdms_setup, (void *)d); ++ ++ pm_dbg_dir = debugfs_create_dir("registers", d); ++ if (IS_ERR(pm_dbg_dir)) ++ return PTR_ERR(pm_dbg_dir); ++ ++ (void) debugfs_create_file("current", S_IRUGO, ++ pm_dbg_dir, (void *)0, &debug_reg_fops); ++ ++ for (i = 0; i < PM_DBG_MAX_REG_SETS; i++) ++ if (pm_dbg_reg_set[i] != NULL) { ++ sprintf(name, "%d", i+1); ++ (void) debugfs_create_file(name, S_IRUGO, ++ pm_dbg_dir, (void *)(i+1), &debug_reg_fops); ++ ++ } ++ ++ pm_dbg_init_done = 1; ++ ++ return 0; ++} ++late_initcall(pm_dbg_init); ++ ++#else ++void pm_dbg_update_time(struct powerdomain *pwrdm, int prev) {} ++#endif ++ + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -12,23 +12,72 @@ + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ ++#include + + extern int omap2_pm_init(void); + extern int omap3_pm_init(void); + ++#ifdef CONFIG_CPU_IDLE ++int omap3_idle_init(void); ++#else ++static inline int omap3_idle_init(void) { return 0; } ++#endif ++ + extern unsigned short enable_dyn_sleep; + extern unsigned short clocks_off_while_idle; ++extern unsigned short enable_off_mode; ++extern unsigned short voltage_off_while_idle; + extern atomic_t sleep_block; ++extern void *omap3_secure_ram_storage; + + extern void omap2_block_sleep(void); + extern void omap2_allow_sleep(void); ++#ifdef CONFIG_ARCH_OMAP3 ++struct prm_setup_times { ++ u16 clksetup; ++ u16 voltsetup_time1; ++ u16 voltsetup_time2; ++ u16 voltoffset; ++ u16 voltsetup2; ++}; ++#endif ++ ++extern void vfp_pm_save_context(void); ++extern void vfp_pm_restore_context(void); ++extern void vfp_enable(void); ++#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) ++extern void omap3_pm_off_mode_enable(int); ++extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm); ++extern int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state); ++extern void omap3_set_prm_setup_times(struct prm_setup_times *setup_times); ++#else ++#define omap3_pm_off_mode_enable(int) do {} while (0); ++#define omap3_pm_get_suspend_state(pwrdm) do {} while (0); ++#define omap3_pm_set_suspend_state(pwrdm, state) do {} while (0); ++#define omap3_set_prm_setup_times(setup_times) do {} while (0); ++#endif ++extern int set_pwrdm_state(struct powerdomain *pwrdm, u32 state); ++extern int resource_set_opp_level(int res, u32 target_level, int flags); ++extern int resource_access_opp_lock(int res, int delta); ++#define resource_lock_opp(res) resource_access_opp_lock(res, 1) ++#define resource_unlock_opp(res) resource_access_opp_lock(res, -1) ++#define resource_get_opp_lock(res) resource_access_opp_lock(res, 0) + ++#define OPP_IGNORE_LOCK 0x1 + + #ifdef CONFIG_PM_DEBUG + extern void omap2_pm_dump(int mode, int resume, unsigned int us); + extern int omap2_pm_debug; ++extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); ++extern int pm_dbg_regset_save(int reg_set); ++extern int pm_dbg_regset_init(int reg_set); ++extern int resource_dump_reqs(struct seq_file *s, void *unused); + #else + #define omap2_pm_dump(mode, resume, us) do {} while (0); + #define omap2_pm_debug 0 ++#define pm_dbg_update_time(pwrdm, prev) do {} while (0); ++#define pm_dbg_regset_save(reg_set) do {} while (0); ++#define pm_dbg_regset_init(reg_set) do {} while (0); ++#define resource_dump_reqs(s,unused) do {} while (0); + #endif /* CONFIG_PM_DEBUG */ + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm24xx.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm24xx.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm24xx.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm24xx.c 2011-09-04 11:31:05.000000000 +0200 +@@ -114,7 +114,7 @@ static void omap2_enter_full_retention(v + l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; + omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); + +- omap2_gpio_prepare_for_retention(); ++ omap2_gpio_prepare_for_idle(PWRDM_POWER_RET); + + if (omap2_pm_debug) { + omap2_pm_dump(0, 0, 0); +@@ -147,7 +147,7 @@ no_sleep: + tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC; + omap2_pm_dump(0, 1, tmp); + } +- omap2_gpio_resume_after_retention(); ++ omap2_gpio_resume_after_idle(); + + clk_enable(osc_ck); + +@@ -349,7 +349,7 @@ static struct platform_suspend_ops omap_ + .valid = suspend_valid_only_mem, + }; + +-static int _pm_clkdm_enable_hwsup(struct clockdomain *clkdm) ++static int _pm_clkdm_enable_hwsup(struct clockdomain *clkdm, void *unused) + { + omap2_clkdm_allow_idle(clkdm); + return 0; +@@ -401,7 +401,7 @@ static void __init prcm_setup_regs(void) + omap2_clkdm_sleep(gfx_clkdm); + + /* Enable clockdomain hardware-supervised control for all clkdms */ +- clkdm_for_each(_pm_clkdm_enable_hwsup); ++ clkdm_for_each(_pm_clkdm_enable_hwsup, NULL); + + /* Enable clock autoidle for all domains */ + cm_write_mod_reg(OMAP24XX_AUTO_CAM | +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm34xx.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm34xx.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/pm34xx.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/pm34xx.c 2011-09-04 11:31:05.000000000 +0200 +@@ -7,6 +7,9 @@ + * Tony Lindgren + * Jouni Hogander + * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * Rajendra Nayak ++ * + * Copyright (C) 2005 Texas Instruments, Inc. + * Richard Woodruff + * +@@ -23,14 +26,27 @@ + #include + #include + #include ++#include + + #include + #include + #include ++#include + #include + #include ++#include + #include + #include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include + + #include "cm.h" + #include "cm-regbits-34xx.h" +@@ -39,11 +55,67 @@ + #include "prm.h" + #include "pm.h" + #include "smartreflex.h" ++#include "sdrc.h" ++ ++/* Scratchpad offsets */ ++#define OMAP343X_TABLE_ADDRESS_OFFSET 0x31 ++#define OMAP343X_TABLE_VALUE_OFFSET 0x30 ++#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0x32 ++ ++/* IDLEST bitmasks for core status checks */ ++#define CORE_IDLEST1_ALL (\ ++ OMAP3430ES2_ST_MMC3_MASK|OMAP3430_ST_ICR_MASK|\ ++ OMAP3430_ST_AES2_MASK|OMAP3430_ST_SHA12_MASK|\ ++ OMAP3430_ST_DES2_MASK|OMAP3430_ST_MMC2_MASK|\ ++ OMAP3430_ST_MMC1_MASK|OMAP3430_ST_MSPRO_MASK|\ ++ OMAP3430_ST_HDQ_MASK|OMAP3430_ST_MCSPI4_MASK|\ ++ OMAP3430_ST_MCSPI3_MASK|OMAP3430_ST_MCSPI2_MASK|\ ++ OMAP3430_ST_MCSPI1_MASK|OMAP3430_ST_I2C3_MASK|\ ++ OMAP3430_ST_I2C2_MASK|OMAP3430_ST_I2C1_MASK|\ ++ OMAP3430_ST_UART2_MASK|OMAP3430_ST_UART1_MASK|\ ++ OMAP3430_ST_GPT11_MASK|OMAP3430_ST_GPT10_MASK|\ ++ OMAP3430_ST_MCBSP5_MASK|OMAP3430_ST_MCBSP1_MASK|\ ++ OMAP3430ES2_ST_HSOTGUSB_STDBY_MASK|\ ++ OMAP3430ES2_ST_SSI_IDLE_MASK|OMAP3430_ST_SDMA_MASK|\ ++ OMAP3430_ST_SSI_STDBY_MASK|OMAP3430_ST_D2D_MASK) ++#define CORE_IDLEST2_ALL (\ ++ OMAP3430_ST_PKA_MASK|OMAP3430_ST_AES1_MASK|\ ++ OMAP3430_ST_RNG_MASK|OMAP3430_ST_SHA11_MASK|\ ++ OMAP3430_ST_DES1_MASK) ++#define CORE_IDLEST3_ALL (\ ++ OMAP3430ES2_ST_USBTLL_MASK|OMAP3430ES2_ST_CPEFUSE_MASK) ++#define PER_IDLEST_ALL (\ ++ OMAP3430_ST_WDT3_MASK|OMAP3430_ST_MCBSP4_MASK|\ ++ OMAP3430_ST_MCBSP3_MASK|OMAP3430_ST_MCBSP2_MASK|\ ++ OMAP3430_ST_GPT9_MASK|OMAP3430_ST_GPT8_MASK|\ ++ OMAP3430_ST_GPT7_MASK|OMAP3430_ST_GPT6_MASK|\ ++ OMAP3430_ST_GPT5_MASK|OMAP3430_ST_GPT4_MASK|\ ++ OMAP3430_ST_GPT3_MASK|OMAP3430_ST_GPT2_MASK) ++ ++#define SGX_IDLEST_ALL OMAP_ST_GFX ++#define DSS_IDLEST_ALL (\ ++ OMAP3430ES2_ST_DSS_STDBY_MASK|\ ++ OMAP3430ES2_ST_DSS_IDLE_MASK) ++#define CAM_IDLEST_ALL OMAP3430_ST_CAM ++ ++#define OMAP343X_SSI_PORT1_BASE 0x48058000 ++#define CONTROL_PADCONF_MCBSP4_DX 0x158 ++#define CONTROL_PADCONF_UART1_TX 0x14c ++ ++static u16 ssi_rx_rdy; ++static u16 ssi_tx_dat; ++static u16 ssi_tx_flag; ++static int ssi_pads_saved; ++ ++/* Interrupt controller control register offset */ ++#define INTC_CONTROL 0x48 + + struct power_state { + struct powerdomain *pwrdm; + u32 next_state; ++#ifdef CONFIG_SUSPEND + u32 saved_state; ++#endif + struct list_head node; + }; + +@@ -51,15 +123,248 @@ static LIST_HEAD(pwrst_list); + + static void (*_omap_sram_idle)(u32 *addr, int save_state); + ++static int (*_omap_save_secure_sram)(u32 *addr); ++ + static void (*saved_idle)(void); + +-static struct powerdomain *mpu_pwrdm; ++static struct powerdomain *mpu_pwrdm, *neon_pwrdm; ++static struct powerdomain *core_pwrdm, *per_pwrdm; ++static struct powerdomain *cam_pwrdm, *iva2_pwrdm, *dss_pwrdm, *usb_pwrdm; ++ ++static struct prm_setup_times prm_setup = { ++ .clksetup = 0xff, ++ .voltsetup_time1 = 0xfff, ++ .voltsetup_time2 = 0xfff, ++ .voltoffset = 0xff, ++ .voltsetup2 = 0xff, ++}; ++ ++static inline void omap3_per_save_context(void) ++{ ++ omap3_gpio_save_context(); ++} ++ ++static inline void omap3_per_restore_context(void) ++{ ++ omap3_gpio_restore_context(); ++} ++ ++static void omap3_enable_io_chain(void) ++{ ++ int timeout = 0; ++ ++ if (omap_rev() >= OMAP3430_REV_ES3_1) { ++ prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN, WKUP_MOD, PM_WKEN); ++ /* Do a readback to assure write has been done */ ++ prm_read_mod_reg(WKUP_MOD, PM_WKEN); ++ ++ while (!(prm_read_mod_reg(WKUP_MOD, PM_WKST) & ++ OMAP3430_ST_IO_CHAIN)) { ++ timeout++; ++ if (timeout > 1000) { ++ printk(KERN_ERR "Wake up daisy chain " ++ "activation failed.\n"); ++ return; ++ } ++ prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN, WKUP_MOD, PM_WKST); ++ } ++ } ++} ++ ++static void omap3_disable_io_chain(void) ++{ ++ if (omap_rev() >= OMAP3430_REV_ES3_1) ++ prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN, WKUP_MOD, PM_WKEN); ++} ++ ++/* ++ * The following 4 helper functions are a workaround for a hardware bug ++ * which causes SSI_RX_RDY, SSI_TX_DAT and SSI_TX_FLAG to be raised ++ * erronously after resume from off mode. We work around this issue by ++ * putting these pads into tristate mode and enable their internal pull ++ * down before we enter off * mode. After resuming off mode, we reset ++ * the SSI module and then restore the configuration of these pads to their ++ * original state. ++ */ ++ ++static void save_ssi_padconf(void) ++{ ++ ++ ssi_rx_rdy = omap_ctrl_readw(OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_MCBSP4_DX); ++ ssi_tx_dat = omap_ctrl_readw(OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_UART1_TX); ++ ssi_tx_flag = omap_ctrl_readw(OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_UART1_TX + 2); ++} ++ ++static void ssi_padconf_save_mode(void) ++{ ++ u32 fck_core1; ++ ++ ssi_pads_saved = 0; ++ ++ fck_core1 = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); ++ ++ if (fck_core1 & 1) ++ return ; ++ ++ /* Set pad to save mode and enable pulldown */ ++ omap_ctrl_writew(0x10f, OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_MCBSP4_DX); ++ omap_ctrl_writew(0x10f, OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_UART1_TX); ++ omap_ctrl_writew(0x10f, OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_UART1_TX + 2); ++ ++ ssi_pads_saved = 1; ++} ++ ++static void reset_ssi(void) ++{ ++ int timeout = 0; ++ ++ /* reset the SSI module */ ++ ++ cm_set_mod_reg_bits(0x1, CORE_MOD, CM_ICLKEN); ++ cm_set_mod_reg_bits(0x1, CORE_MOD, CM_FCLKEN); ++ ++ while (cm_read_mod_reg(CORE_MOD, CM_IDLEST1) & ++ OMAP3430ES2_ST_SSI_IDLE_MASK) { ++ timeout++; ++ if (timeout > 1000) ++ break; ++ } ++ ++ omap_writel(SSI_SOFTRESET, OMAP343X_SSI_PORT1_BASE + ++ SSI_SYS_SYSCONFIG_REG); ++ dsb(); ++ ++ timeout = 0; ++ while (!(omap_readl(SSI_SYS_SYSSTATUS_REG + OMAP343X_SSI_PORT1_BASE) ++ & SSI_RESETDONE)) { ++ timeout++; ++ if (timeout > 1000) ++ break; ++ } ++ ++ cm_clear_mod_reg_bits(0x1, CORE_MOD, CM_ICLKEN); ++ cm_clear_mod_reg_bits(0x1, CORE_MOD, CM_FCLKEN); ++} ++ ++static void restore_ssi_padconf(void) ++{ ++ if (ssi_pads_saved) { ++ /* restore the SSI pads configuration */ ++ omap_ctrl_writew(ssi_rx_rdy, OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_MCBSP4_DX); ++ omap_ctrl_writew(ssi_tx_dat, OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_UART1_TX); ++ omap_ctrl_writew(ssi_tx_flag, OMAP2_CONTROL_PADCONFS + ++ CONTROL_PADCONF_UART1_TX + 2); ++ ssi_pads_saved = 0; ++ } ++} ++ ++int pm_check_idle(void) ++{ ++ if ((cm_read_mod_reg(CORE_MOD, CM_IDLEST1) & CORE_IDLEST1_ALL) ++ != CORE_IDLEST1_ALL) ++ return 0; ++ if ((cm_read_mod_reg(CORE_MOD, CM_IDLEST2) & CORE_IDLEST2_ALL) ++ != CORE_IDLEST2_ALL) ++ return 0; ++ if ((cm_read_mod_reg(CORE_MOD, OMAP3430_CM_IDLEST3) & CORE_IDLEST3_ALL) ++ != CORE_IDLEST3_ALL) ++ return 0; ++ if ((cm_read_mod_reg(OMAP3430_PER_MOD, CM_IDLEST1) & PER_IDLEST_ALL) ++ != PER_IDLEST_ALL) ++ return 0; ++ if (cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_IDLEST1) != SGX_IDLEST_ALL) ++ return 0; ++ if (cm_read_mod_reg(OMAP3430_CAM_MOD, CM_IDLEST1) != CAM_IDLEST_ALL) ++ return 0; ++ if (cm_read_mod_reg(OMAP3430_DSS_MOD, CM_IDLEST1) != DSS_IDLEST_ALL) ++ return 0; ++ return 1; ++} ++ ++static void omap3_core_save_context(void) ++{ ++ u32 control_padconf_off; ++ ++ /* Save the padconf registers */ ++ control_padconf_off = ++ omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF); ++ control_padconf_off |= START_PADCONF_SAVE; ++ omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF); ++ /* wait for the save to complete */ ++ while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS) ++ & PADCONF_SAVE_DONE)) ++ ; ++ /* Save the Interrupt controller context */ ++ omap3_intc_save_context(); ++ /* Save the GPMC context */ ++ omap3_gpmc_save_context(); ++ /* The VRFB context is saved while it's being configured */ ++ /* Save the system control module context, padconf already save above*/ ++ omap3_control_save_context(); ++ omap_dma_global_context_save(); ++} ++ ++static void omap3_core_restore_context(void) ++{ ++ /* Restore the control module context, padconf restored by h/w */ ++ omap3_control_restore_context(); ++ /* Restore the GPMC context */ ++ omap3_gpmc_restore_context(); ++ /* Restore the VRFB context */ ++ omap_vrfb_restore_context(); ++ /* Restore the interrupt controller context */ ++ omap3_intc_restore_context(); ++ omap_dma_global_context_restore(); ++} ++ ++/* ++ * FIXME: This function should be called before entering off-mode after ++ * OMAP3 secure services have been accessed. Currently it is only called ++ * once during boot sequence, but this works as we are not using secure ++ * services. ++ */ ++static void omap3_save_secure_ram_context(u32 target_mpu_state) ++{ ++ u32 ret; ++ ++ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { ++ /* ++ * MPU next state must be set to POWER_ON temporarily, ++ * otherwise the WFI executed inside the ROM code ++ * will hang the system. ++ */ ++ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); ++ ret = _omap_save_secure_sram((u32 *) ++ __pa(omap3_secure_ram_storage)); ++ pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state); ++ /* Following is for error tracking, it should not happen */ ++ if (ret) { ++ printk(KERN_ERR "save_secure_sram() returns %08x\n", ++ ret); ++ while (1) ++ ; ++ } ++ } ++} + + /* PRCM Interrupt Handler for wakeups */ + static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) + { + u32 wkst, irqstatus_mpu; +- u32 fclk, iclk; ++ u32 fclk, iclk, clken_pll; ++ ++ /* Ensure that DPLL4_M2X2_CLK path is powered up */ ++ clken_pll = cm_read_mod_reg(OMAP3430_CCR_MOD, CM_CLKEN); ++ cm_clear_mod_reg_bits(1 << OMAP3430_PWRDN_96M_SHIFT, ++ OMAP3430_CCR_MOD, CM_CLKEN); + + /* WKUP */ + wkst = prm_read_mod_reg(WKUP_MOD, PM_WKST); +@@ -69,7 +374,7 @@ static irqreturn_t prcm_interrupt_handle + cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_ICLKEN); + cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_FCLKEN); + prm_write_mod_reg(wkst, WKUP_MOD, PM_WKST); +- while (prm_read_mod_reg(WKUP_MOD, PM_WKST)); ++ prm_read_mod_reg(WKUP_MOD, PM_WKST); + cm_write_mod_reg(iclk, WKUP_MOD, CM_ICLKEN); + cm_write_mod_reg(fclk, WKUP_MOD, CM_FCLKEN); + } +@@ -82,7 +387,7 @@ static irqreturn_t prcm_interrupt_handle + cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN1); + cm_set_mod_reg_bits(wkst, CORE_MOD, CM_FCLKEN1); + prm_write_mod_reg(wkst, CORE_MOD, PM_WKST1); +- while (prm_read_mod_reg(CORE_MOD, PM_WKST1)); ++ prm_read_mod_reg(CORE_MOD, PM_WKST1); + cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN1); + cm_write_mod_reg(fclk, CORE_MOD, CM_FCLKEN1); + } +@@ -93,7 +398,7 @@ static irqreturn_t prcm_interrupt_handle + cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN3); + cm_set_mod_reg_bits(wkst, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); + prm_write_mod_reg(wkst, CORE_MOD, OMAP3430ES2_PM_WKST3); +- while (prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3)); ++ prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3); + cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN3); + cm_write_mod_reg(fclk, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); + } +@@ -106,7 +411,7 @@ static irqreturn_t prcm_interrupt_handle + cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_ICLKEN); + cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_FCLKEN); + prm_write_mod_reg(wkst, OMAP3430_PER_MOD, PM_WKST); +- while (prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST)); ++ prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST); + cm_write_mod_reg(iclk, OMAP3430_PER_MOD, CM_ICLKEN); + cm_write_mod_reg(fclk, OMAP3430_PER_MOD, CM_FCLKEN); + } +@@ -125,8 +430,7 @@ static irqreturn_t prcm_interrupt_handle + CM_FCLKEN); + prm_write_mod_reg(wkst, OMAP3430ES2_USBHOST_MOD, + PM_WKST); +- while (prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, +- PM_WKST)); ++ prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKST); + cm_write_mod_reg(iclk, OMAP3430ES2_USBHOST_MOD, + CM_ICLKEN); + cm_write_mod_reg(fclk, OMAP3430ES2_USBHOST_MOD, +@@ -134,17 +438,61 @@ static irqreturn_t prcm_interrupt_handle + } + } + ++ cm_write_mod_reg(clken_pll, OMAP3430_CCR_MOD, CM_CLKEN); ++ + irqstatus_mpu = prm_read_mod_reg(OCP_MOD, + OMAP2_PRM_IRQSTATUS_MPU_OFFSET); + prm_write_mod_reg(irqstatus_mpu, OCP_MOD, + OMAP2_PRM_IRQSTATUS_MPU_OFFSET); + +- while (prm_read_mod_reg(OCP_MOD, OMAP2_PRM_IRQSTATUS_MPU_OFFSET)); ++ prm_read_mod_reg(OCP_MOD, OMAP2_PRM_IRQSTATUS_MPU_OFFSET); + + return IRQ_HANDLED; + } + +-static void omap_sram_idle(void) ++static void restore_control_register(u32 val) ++{ ++ __asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val)); ++} ++ ++void omap3_save_neon_context(void) ++{ ++#ifdef CONFIG_VFP ++ vfp_pm_save_context(); ++#endif ++} ++ ++void omap3_restore_neon_context(void) ++{ ++#ifdef CONFIG_VFP ++ vfp_enable(); ++ vfp_pm_restore_context(); ++#endif ++} ++ ++/* Function to restore the table entry that was modified for enabling MMU */ ++static void restore_table_entry(void) ++{ ++ u32 *scratchpad_address; ++ u32 previous_value, control_reg_value; ++ u32 *address; ++ scratchpad_address = OMAP2_IO_ADDRESS(OMAP343X_SCRATCHPAD); ++ /* Get address of entry that was modified */ ++ address = (u32 *)__raw_readl(scratchpad_address ++ + OMAP343X_TABLE_ADDRESS_OFFSET); ++ /* Get the previous value which needs to be restored */ ++ previous_value = __raw_readl(scratchpad_address ++ + OMAP343X_TABLE_VALUE_OFFSET); ++ address = __va(address); ++ *address = previous_value; ++ flush_tlb_all(); ++ control_reg_value = __raw_readl(scratchpad_address ++ + OMAP343X_CONTROL_REG_VALUE_OFFSET); ++ /* This will enable caches and prediction */ ++ restore_control_register(control_reg_value); ++} ++ ++void omap_sram_idle(void) + { + /* Variable to tell what needs to be saved and restored + * in omap_sram_idle*/ +@@ -152,91 +500,261 @@ static void omap_sram_idle(void) + /* save_state = 1 => Only L1 and logic lost */ + /* save_state = 2 => Only L2 lost */ + /* save_state = 3 => L1, L2 and logic lost */ +- int save_state = 0, mpu_next_state; ++ int save_state = 0; ++ int mpu_next_state = PWRDM_POWER_ON; ++ int neon_next_state = PWRDM_POWER_ON; ++ int per_next_state = PWRDM_POWER_ON; ++ int core_next_state = PWRDM_POWER_ON; ++ int dss_state = PWRDM_POWER_ON; ++ int iva2_state = PWRDM_POWER_ON; ++ int usb_state = PWRDM_POWER_ON; ++ int core_prev_state, per_prev_state; ++ u32 sdrc_pwr = 0; ++ int per_state_modified = 0; ++ int core_saved_state = PWRDM_POWER_ON; + + if (!_omap_sram_idle) + return; + ++ pwrdm_clear_all_prev_pwrst(mpu_pwrdm); ++ pwrdm_clear_all_prev_pwrst(neon_pwrdm); ++ pwrdm_clear_all_prev_pwrst(core_pwrdm); ++ pwrdm_clear_all_prev_pwrst(per_pwrdm); ++ + mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); + switch (mpu_next_state) { ++ case PWRDM_POWER_ON: + case PWRDM_POWER_RET: + /* No need to save context */ + save_state = 0; + break; ++ case PWRDM_POWER_OFF: ++ save_state = 3; ++ break; + default: + /* Invalid state */ + printk(KERN_ERR "Invalid mpu state in sram_idle\n"); + return; + } +- /* Disable smartreflex before entering WFI */ +- disable_smartreflex(SR1); +- disable_smartreflex(SR2); +- +- omap2_gpio_prepare_for_retention(); +- omap_uart_prepare_idle(0); +- omap_uart_prepare_idle(1); +- omap_uart_prepare_idle(2); +- +- _omap_sram_idle(NULL, save_state); +- +- omap_uart_resume_idle(2); +- omap_uart_resume_idle(1); +- omap_uart_resume_idle(0); +- omap2_gpio_resume_after_retention(); +- +- /* Enable smartreflex after WFI */ +- enable_smartreflex(SR1); +- enable_smartreflex(SR2); +-} + +-/* +- * Check if functional clocks are enabled before entering +- * sleep. This function could be behind CONFIG_PM_DEBUG +- * when all drivers are configuring their sysconfig registers +- * properly and using their clocks properly. +- */ +-static int omap3_fclks_active(void) +-{ +- u32 fck_core1 = 0, fck_core3 = 0, fck_sgx = 0, fck_dss = 0, +- fck_cam = 0, fck_per = 0, fck_usbhost = 0; ++ pwrdm_pre_transition(); + +- fck_core1 = cm_read_mod_reg(CORE_MOD, +- CM_FCLKEN1); +- if (omap_rev() > OMAP3430_REV_ES1_0) { +- fck_core3 = cm_read_mod_reg(CORE_MOD, +- OMAP3430ES2_CM_FCLKEN3); +- fck_sgx = cm_read_mod_reg(OMAP3430ES2_SGX_MOD, +- CM_FCLKEN); +- fck_usbhost = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, +- CM_FCLKEN); +- } else +- fck_sgx = cm_read_mod_reg(GFX_MOD, +- OMAP3430ES2_CM_FCLKEN3); +- fck_dss = cm_read_mod_reg(OMAP3430_DSS_MOD, +- CM_FCLKEN); +- fck_cam = cm_read_mod_reg(OMAP3430_CAM_MOD, +- CM_FCLKEN); +- fck_per = cm_read_mod_reg(OMAP3430_PER_MOD, +- CM_FCLKEN); +- +- /* Ignore UART clocks. These are handled by UART core (serial.c) */ +- fck_core1 &= ~(OMAP3430_EN_UART1 | OMAP3430_EN_UART2); +- fck_per &= ~OMAP3430_EN_UART3; +- +- if (fck_core1 | fck_core3 | fck_sgx | fck_dss | +- fck_cam | fck_per | fck_usbhost) +- return 1; +- return 0; ++ /* NEON control */ ++ if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON) { ++ pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state); ++ neon_next_state = mpu_next_state; ++ if (neon_next_state == PWRDM_POWER_OFF) ++ omap3_save_neon_context(); ++ } ++ ++ /* Get powerdomain state data */ ++ core_next_state = pwrdm_read_next_pwrst(core_pwrdm); ++ dss_state = pwrdm_read_pwrst(dss_pwrdm); ++ iva2_state = pwrdm_read_pwrst(iva2_pwrdm); ++ usb_state = pwrdm_read_pwrst(usb_pwrdm); ++ per_next_state = pwrdm_read_next_pwrst(per_pwrdm); ++ ++ /* Check if PER domain can enter OFF or not */ ++ if (per_next_state == PWRDM_POWER_OFF) { ++ if ((cm_read_mod_reg(OMAP3430_PER_MOD, CM_IDLEST) & ++ PER_IDLEST_ALL) != PER_IDLEST_ALL) { ++ per_next_state = PWRDM_POWER_RET; ++ pwrdm_set_next_pwrst(per_pwrdm, per_next_state); ++ per_state_modified = 1; ++ } ++ } ++ /* ++ * Check whether core will enter idle or not. This is needed ++ * because I/O pad wakeup will fail if core stays on and PER ++ * enters off. This will also prevent unnecessary core context ++ * save / restore. ++ */ ++ if (core_next_state < PWRDM_POWER_ON) { ++ core_saved_state = core_next_state; ++ if ((cm_read_mod_reg(CORE_MOD, CM_IDLEST1) & CORE_IDLEST1_ALL) ++ != CORE_IDLEST1_ALL || ++ (cm_read_mod_reg(CORE_MOD, CM_IDLEST2) & CORE_IDLEST2_ALL) ++ != CORE_IDLEST2_ALL || ++ (cm_read_mod_reg(CORE_MOD, OMAP3430_CM_IDLEST3) & ++ CORE_IDLEST3_ALL) != CORE_IDLEST3_ALL) { ++ core_next_state = PWRDM_POWER_ON; ++ pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_ON); ++ } else if (core_next_state == PWRDM_POWER_OFF && ++ (dss_state == PWRDM_POWER_ON || ++ iva2_state >= PWRDM_POWER_RET || ++ usb_state >= PWRDM_POWER_RET || ++ per_next_state >= PWRDM_POWER_RET)) { ++ core_next_state = PWRDM_POWER_RET; ++ pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); ++ } ++ } ++ ++ /* PER */ ++ if (per_next_state < PWRDM_POWER_ON) { ++ omap_uart_prepare_idle(2); ++ omap2_gpio_prepare_for_idle(per_next_state); ++ if (per_next_state == PWRDM_POWER_OFF) { ++ if (core_next_state == PWRDM_POWER_ON) { ++ per_next_state = PWRDM_POWER_RET; ++ pwrdm_set_next_pwrst(per_pwrdm, per_next_state); ++ per_state_modified = 1; ++ } else ++ omap3_per_save_context(); ++ } ++ } ++ ++ if (pwrdm_read_pwrst(cam_pwrdm) == PWRDM_POWER_ON) ++ omap2_clkdm_deny_idle(mpu_pwrdm->pwrdm_clkdms[0]); ++ ++ /* ++ * Disable smartreflex before entering WFI. ++ * Only needed if we are going to enter retention. ++ */ ++ if (mpu_next_state < PWRDM_POWER_ON) ++ disable_smartreflex(SR1); ++ if (core_next_state < PWRDM_POWER_ON) ++ disable_smartreflex(SR2); ++ ++ /* CORE */ ++ if (core_next_state < PWRDM_POWER_ON) { ++ omap_uart_prepare_idle(0); ++ omap_uart_prepare_idle(1); ++ if (core_next_state == PWRDM_POWER_OFF) { ++ prm_set_mod_reg_bits(OMAP3430_AUTO_OFF, ++ OMAP3430_GR_MOD, ++ OMAP3_PRM_VOLTCTRL_OFFSET); ++ ssi_padconf_save_mode(); ++ omap3_core_save_context(); ++ omap3_prcm_save_context(); ++ } ++ /* Enable IO-PAD and IO-CHAIN wakeups */ ++ prm_set_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN); ++ omap3_enable_io_chain(); ++ } ++ ++ /* ++ * Force SDRAM controller to self-refresh mode after timeout on ++ * autocount. This is needed on ES3.0 to avoid SDRAM controller ++ * hang-ups. ++ */ ++ if (omap_rev() >= OMAP3430_REV_ES3_0 && ++ omap_type() != OMAP2_DEVICE_TYPE_GP && ++ core_next_state == PWRDM_POWER_OFF) { ++ sdrc_pwr = sdrc_read_reg(SDRC_POWER); ++ sdrc_write_reg((sdrc_pwr & ++ ~(SDRC_POWER_AUTOCOUNT_MASK|SDRC_POWER_CLKCTRL_MASK)) | ++ (1 << SDRC_POWER_AUTOCOUNT_SHIFT) | ++ SDRC_SELF_REFRESH_ON_AUTOCOUNT, SDRC_POWER); ++ } ++ ++ /* Write voltage setup times which are changed dynamically */ ++ if (core_next_state == PWRDM_POWER_OFF) { ++ prm_write_mod_reg(0, OMAP3430_GR_MOD, ++ OMAP3_PRM_VOLTSETUP1_OFFSET); ++ prm_write_mod_reg(prm_setup.voltsetup2, OMAP3430_GR_MOD, ++ OMAP3_PRM_VOLTSETUP2_OFFSET); ++ prm_write_mod_reg(prm_setup.clksetup, OMAP3430_GR_MOD, ++ OMAP3_PRM_CLKSETUP_OFFSET); ++ } else { ++ prm_write_mod_reg((prm_setup.voltsetup_time2 << ++ OMAP3430_SETUP_TIME2_SHIFT) | ++ (prm_setup.voltsetup_time1 << ++ OMAP3430_SETUP_TIME1_SHIFT), ++ OMAP3430_GR_MOD, OMAP3_PRM_VOLTSETUP1_OFFSET); ++ prm_write_mod_reg(0, OMAP3430_GR_MOD, ++ OMAP3_PRM_VOLTSETUP2_OFFSET); ++ /* ++ * Use static 1 as only HF_CLKOUT is turned off. ++ * Value taken from application note SWPA152 ++ */ ++ prm_write_mod_reg(0x1, OMAP3430_GR_MOD, ++ OMAP3_PRM_CLKSETUP_OFFSET); ++ } ++ ++ /* ++ * omap3_arm_context is the location where ARM registers ++ * get saved. The restore path then reads from this ++ * location and restores them back. ++ */ ++ _omap_sram_idle(omap3_arm_context, save_state); ++ ++ /* Restore normal SDRAM settings */ ++ if (omap_rev() >= OMAP3430_REV_ES3_0 && ++ omap_type() != OMAP2_DEVICE_TYPE_GP && ++ core_next_state == PWRDM_POWER_OFF) ++ sdrc_write_reg(sdrc_pwr, SDRC_POWER); ++ ++ /* Restore table entry modified during MMU restoration */ ++ if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF) ++ restore_table_entry(); ++ ++ if (neon_next_state == PWRDM_POWER_OFF) ++ omap3_restore_neon_context(); ++ ++ /* CORE */ ++ if (core_next_state < PWRDM_POWER_ON) { ++ core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); ++ if (core_prev_state == PWRDM_POWER_OFF) { ++ omap3_core_restore_context(); ++ omap3_prcm_restore_context(); ++ omap3_sram_restore_context(); ++ omap2_sms_restore_context(); ++ reset_ssi(); ++ } ++ omap_uart_resume_idle(0); ++ omap_uart_resume_idle(1); ++ if (core_next_state == PWRDM_POWER_OFF) { ++ prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF, ++ OMAP3430_GR_MOD, ++ OMAP3_PRM_VOLTCTRL_OFFSET); ++ restore_ssi_padconf(); ++ } ++ } ++ ++ /* ++ * Enable smartreflex after WFI. Only needed if we ++ * entered retention. ++ */ ++ if (mpu_next_state < PWRDM_POWER_ON) ++ enable_smartreflex(SR1); ++ if (core_next_state < PWRDM_POWER_ON) ++ enable_smartreflex(SR2); ++ ++ if (core_saved_state != core_next_state) ++ pwrdm_set_next_pwrst(core_pwrdm, core_saved_state); ++ ++ /* PER */ ++ if (per_next_state < PWRDM_POWER_ON) { ++ per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm); ++ if (per_prev_state == PWRDM_POWER_OFF) { ++ omap3_per_restore_context(); ++ omap3_gpio_restore_pad_context(0); ++ } else if (per_next_state == PWRDM_POWER_OFF) ++ omap3_gpio_restore_pad_context(1); ++ omap2_gpio_resume_after_idle(); ++ omap_uart_resume_idle(2); ++ if (per_state_modified) ++ pwrdm_set_next_pwrst(per_pwrdm, PWRDM_POWER_OFF); ++ } ++ ++ /* Disable IO-PAD and IO-CHAIN wakeup */ ++ if (core_next_state < PWRDM_POWER_ON) { ++ prm_clear_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN); ++ omap3_disable_io_chain(); ++ } ++ ++ ++ pwrdm_post_transition(); ++ ++ omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]); + } + +-static int omap3_can_sleep(void) ++int omap3_can_sleep(void) + { + if (!enable_dyn_sleep) + return 0; + if (!omap_uart_can_sleep()) + return 0; +- if (omap3_fclks_active()) +- return 0; + if (atomic_read(&sleep_block) > 0) + return 0; + return 1; +@@ -245,7 +763,7 @@ static int omap3_can_sleep(void) + /* This sets pwrdm state (other than mpu & core. Currently only ON & + * RET are supported. Function is assuming that clkdm doesn't have + * hw_sup mode enabled. */ +-static int set_pwrdm_state(struct powerdomain *pwrdm, u32 state) ++int set_pwrdm_state(struct powerdomain *pwrdm, u32 state) + { + u32 cur_state; + int sleep_switch = 0; +@@ -280,6 +798,7 @@ static int set_pwrdm_state(struct powerd + if (sleep_switch) { + omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]); + pwrdm_wait_transition(pwrdm); ++ pwrdm_state_switch(pwrdm); + } + + err: +@@ -294,7 +813,7 @@ static void omap3_pm_idle(void) + if (!omap3_can_sleep()) + goto out; + +- if (omap_irq_pending()) ++ if (omap_irq_pending() || need_resched()) + goto out; + + omap_sram_idle(); +@@ -304,6 +823,7 @@ out: + local_irq_enable(); + } + ++#ifdef CONFIG_SUSPEND + static int omap3_pm_prepare(void) + { + saved_idle = pm_idle; +@@ -321,6 +841,12 @@ static int omap3_pm_suspend(void) + pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); + /* Set ones wanted by suspend */ + list_for_each_entry(pwrst, &pwrst_list, node) { ++ /* Special handling for IVA2, just use current sleep state */ ++ if (pwrst->pwrdm == iva2_pwrdm) { ++ state = pwrdm_read_pwrst(pwrst->pwrdm); ++ if (state < PWRDM_POWER_ON) ++ pwrst->next_state = state; ++ } + if (set_pwrdm_state(pwrst->pwrdm, pwrst->next_state)) + goto restore; + if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm)) +@@ -328,12 +854,13 @@ static int omap3_pm_suspend(void) + } + + omap_uart_prepare_suspend(); ++ /* ACK pending interrupts */ ++ omap_writel(1, OMAP34XX_IC_BASE + INTC_CONTROL); + omap_sram_idle(); + + restore: + /* Restore next_pwrsts */ + list_for_each_entry(pwrst, &pwrst_list, node) { +- set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); + state = pwrdm_read_prev_pwrst(pwrst->pwrdm); + if (state > pwrst->next_state) { + printk(KERN_INFO "Powerdomain (%s) didn't enter " +@@ -341,6 +868,7 @@ restore: + pwrst->pwrdm->name, pwrst->next_state); + ret = -1; + } ++ set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); + } + if (ret) + printk(KERN_ERR "Could not enter target state in pm_suspend\n"); +@@ -372,12 +900,28 @@ static void omap3_pm_finish(void) + pm_idle = saved_idle; + } + ++/* Hooks to enable / disable UART interrupts during suspend */ ++static int omap3_pm_begin(suspend_state_t state) ++{ ++ omap_uart_enable_irqs(0); ++ return 0; ++} ++ ++static void omap3_pm_end(void) ++{ ++ omap_uart_enable_irqs(1); ++ return; ++} ++ + static struct platform_suspend_ops omap_pm_ops = { ++ .begin = omap3_pm_begin, ++ .end = omap3_pm_end, + .prepare = omap3_pm_prepare, + .enter = omap3_pm_enter, + .finish = omap3_pm_finish, + .valid = suspend_valid_only_mem, + }; ++#endif /* CONFIG_SUSPEND */ + + + /** +@@ -424,6 +968,8 @@ static void __init omap3_iva_idle(void) + + static void __init prcm_setup_regs(void) + { ++ struct clk *clk; ++ + /* reset modem */ + prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON | + OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST, +@@ -439,6 +985,23 @@ static void __init prcm_setup_regs(void) + prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP); + prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP); + if (omap_rev() > OMAP3430_REV_ES1_0) { ++ ++ /* ++ * This workaround is needed to prevent SGX and USBHOST from ++ * failing to transition to RET/OFF after a warm reset in OFF ++ * mode. Workaround sets a sleepdep of each of these domains ++ * with MPU, waits for a min 2 sysclk cycles and clears the sleepdep. ++ */ ++ cm_write_mod_reg(OMAP3430_CM_SLEEPDEP_PER_EN_MPU, ++ OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); ++ cm_write_mod_reg(OMAP3430_CM_SLEEPDEP_PER_EN_MPU, ++ OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); ++ udelay(100); ++ cm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, ++ OMAP3430_CM_SLEEPDEP); ++ cm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, ++ OMAP3430_CM_SLEEPDEP); ++ + prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP); + prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP); + } else +@@ -449,6 +1012,7 @@ static void __init prcm_setup_regs(void) + * Note that in the long run this should be done by clockfw + */ + cm_write_mod_reg( ++ OMAP3430_AUTO_MODEM | + OMAP3430ES2_AUTO_MMC3 | + OMAP3430ES2_AUTO_ICR | + OMAP3430_AUTO_AES2 | +@@ -476,7 +1040,7 @@ static void __init prcm_setup_regs(void) + OMAP3430_AUTO_OMAPCTRL | + OMAP3430ES1_AUTO_FSHOSTUSB | + OMAP3430_AUTO_HSOTGUSB | +- OMAP3430ES1_AUTO_D2D | /* This is es1 only */ ++ OMAP3430_AUTO_SAD2D | + OMAP3430_AUTO_SSI, + CORE_MOD, CM_AUTOIDLE1); + +@@ -490,6 +1054,7 @@ static void __init prcm_setup_regs(void) + + if (omap_rev() > OMAP3430_REV_ES1_0) { + cm_write_mod_reg( ++ OMAP3430_AUTO_MAD2D | + OMAP3430ES2_AUTO_USBTLL, + CORE_MOD, CM_AUTOIDLE3); + } +@@ -542,6 +1107,8 @@ static void __init prcm_setup_regs(void) + CM_AUTOIDLE); + } + ++ omap_ctrl_writel(OMAP3430_AUTOIDLE, OMAP2_CONTROL_SYSCONFIG); ++ + /* + * Set all plls to autoidle. This is needed until autoidle is + * enabled by clockfw +@@ -570,9 +1137,8 @@ static void __init prcm_setup_regs(void) + OMAP3_PRM_CLKSRC_CTRL_OFFSET); + + /* setup wakup source */ +- prm_write_mod_reg(OMAP3430_EN_IO | OMAP3430_EN_GPIO1 | +- OMAP3430_EN_GPT1 | OMAP3430_EN_GPT12, +- WKUP_MOD, PM_WKEN); ++ prm_write_mod_reg(OMAP3430_EN_GPIO1 | OMAP3430_EN_GPT1 | ++ OMAP3430_EN_GPT12, WKUP_MOD, PM_WKEN); + /* No need to write EN_IO, that is always enabled */ + prm_write_mod_reg(OMAP3430_EN_GPIO1 | OMAP3430_EN_GPT1 | + OMAP3430_EN_GPT12, +@@ -583,9 +1149,73 @@ static void __init prcm_setup_regs(void) + OCP_MOD, OMAP2_PRM_IRQENABLE_MPU_OFFSET); + + omap3_iva_idle(); ++ ++ /* ++ * Permanently enable USB interface clock, needed for the ++ * OTG_SYSCONFIG save / restore hack ++ */ ++ clk = clk_get(NULL, "hsotgusb_ick"); ++ clk_enable(clk); + } + +-static int __init pwrdms_setup(struct powerdomain *pwrdm) ++void omap3_pm_off_mode_enable(int enable) ++{ ++ struct power_state *pwrst; ++ u32 state; ++ ++ if (enable) ++ state = PWRDM_POWER_OFF; ++ else ++ state = PWRDM_POWER_RET; ++ ++#ifdef CONFIG_OMAP_PM_SRF ++ resource_lock_opp(PRCM_VDD1); ++ resource_lock_opp(PRCM_VDD2); ++ if (resource_refresh()) ++ printk(KERN_ERR "Error: could not refresh resources\n"); ++ resource_unlock_opp(PRCM_VDD1); ++ resource_unlock_opp(PRCM_VDD2); ++#endif ++ list_for_each_entry(pwrst, &pwrst_list, node) { ++ pwrst->next_state = state; ++ set_pwrdm_state(pwrst->pwrdm, state); ++ } ++} ++ ++int omap3_pm_get_suspend_state(struct powerdomain *pwrdm) ++{ ++ struct power_state *pwrst; ++ ++ list_for_each_entry(pwrst, &pwrst_list, node) { ++ if (pwrst->pwrdm == pwrdm) ++ return pwrst->next_state; ++ } ++ return -EINVAL; ++} ++ ++int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state) ++{ ++ struct power_state *pwrst; ++ ++ list_for_each_entry(pwrst, &pwrst_list, node) { ++ if (pwrst->pwrdm == pwrdm) { ++ pwrst->next_state = state; ++ return 0; ++ } ++ } ++ return -EINVAL; ++} ++ ++void omap3_set_prm_setup_times(struct prm_setup_times *setup_times) ++{ ++ prm_setup.clksetup = setup_times->clksetup; ++ prm_setup.voltsetup_time1 = setup_times->voltsetup_time1; ++ prm_setup.voltsetup_time2 = setup_times->voltsetup_time2; ++ prm_setup.voltoffset = setup_times->voltoffset; ++ prm_setup.voltsetup2 = setup_times->voltsetup2; ++} ++ ++static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) + { + struct power_state *pwrst; + +@@ -610,7 +1240,7 @@ static int __init pwrdms_setup(struct po + * supported. Initiate sleep transition for other clockdomains, if + * they are not used + */ +-static int __init clkdms_setup(struct clockdomain *clkdm) ++static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) + { + if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO) + omap2_clkdm_allow_idle(clkdm); +@@ -620,6 +1250,15 @@ static int __init clkdms_setup(struct cl + return 0; + } + ++void omap_push_sram_idle(void) ++{ ++ _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, ++ omap34xx_cpu_suspend_sz); ++ if (omap_type() != OMAP2_DEVICE_TYPE_GP) ++ _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, ++ save_secure_ram_context_sz); ++} ++ + int __init omap3_pm_init(void) + { + struct power_state *pwrst, *tmp; +@@ -640,13 +1279,13 @@ int __init omap3_pm_init(void) + goto err1; + } + +- ret = pwrdm_for_each(pwrdms_setup); ++ ret = pwrdm_for_each(pwrdms_setup, NULL); + if (ret) { + printk(KERN_ERR "Failed to setup powerdomains\n"); + goto err2; + } + +- (void) clkdm_for_each(clkdms_setup); ++ (void) clkdm_for_each(clkdms_setup, NULL); + + mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); + if (mpu_pwrdm == NULL) { +@@ -654,12 +1293,54 @@ int __init omap3_pm_init(void) + goto err2; + } + +- _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, +- omap34xx_cpu_suspend_sz); ++ neon_pwrdm = pwrdm_lookup("neon_pwrdm"); ++ per_pwrdm = pwrdm_lookup("per_pwrdm"); ++ core_pwrdm = pwrdm_lookup("core_pwrdm"); ++ cam_pwrdm = pwrdm_lookup("cam_pwrdm"); ++ iva2_pwrdm = pwrdm_lookup("iva2_pwrdm"); ++ dss_pwrdm = pwrdm_lookup("dss_pwrdm"); ++ usb_pwrdm = pwrdm_lookup("usbhost_pwrdm"); + ++ omap_push_sram_idle(); ++ ++#ifdef CONFIG_SUSPEND + suspend_set_ops(&omap_pm_ops); ++#endif /* CONFIG_SUSPEND */ + + pm_idle = omap3_pm_idle; ++ omap3_idle_init(); ++ ++ pwrdm_add_wkdep(neon_pwrdm, mpu_pwrdm); ++ /* ++ * REVISIT: This wkdep is only necessary when GPIO2-6 are enabled for ++ * IO-pad wakeup. Otherwise it will unnecessarily waste power ++ * waking up PER with every CORE wakeup - see ++ * http://marc.info/?l=linux-omap&m=121852150710062&w=2 ++ */ ++ pwrdm_add_wkdep(per_pwrdm, core_pwrdm); ++ ++ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { ++ omap3_secure_ram_storage = ++ kmalloc(0x803F, GFP_KERNEL); ++ if (!omap3_secure_ram_storage) ++ printk(KERN_ERR "Memory allocation failed when" ++ "allocating for secure sram context\n"); ++ } ++ omap3_save_scratchpad_contents(); ++ ++ save_ssi_padconf(); ++ ++ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { ++ local_irq_disable(); ++ local_fiq_disable(); ++ ++ omap_dma_global_context_save(); ++ omap3_save_secure_ram_context(PWRDM_POWER_ON); ++ omap_dma_global_context_restore(); ++ ++ local_irq_enable(); ++ local_fiq_enable(); ++ } + + err1: + return ret; +@@ -672,8 +1353,21 @@ err2: + return ret; + } + ++/* PRM_VC_CMD_VAL_0 specific bits */ ++#define OMAP3430_VC_CMD_VAL0_ON 0x30 ++#define OMAP3430_VC_CMD_VAL0_ONLP 0x1E ++#define OMAP3430_VC_CMD_VAL0_RET 0x1E ++#define OMAP3430_VC_CMD_VAL0_OFF 0x30 ++ ++/* PRM_VC_CMD_VAL_1 specific bits */ ++#define OMAP3430_VC_CMD_VAL1_ON 0x2C ++#define OMAP3430_VC_CMD_VAL1_ONLP 0x1E ++#define OMAP3430_VC_CMD_VAL1_RET 0x1E ++#define OMAP3430_VC_CMD_VAL1_OFF 0x2C ++ + static void __init configure_vc(void) + { ++ + prm_write_mod_reg((R_SRI2C_SLAVE_ADDR << OMAP3430_SMPS_SA1_SHIFT) | + (R_SRI2C_SLAVE_ADDR << OMAP3430_SMPS_SA0_SHIFT), + OMAP3430_GR_MOD, OMAP3_PRM_VC_SMPS_SA_OFFSET); +@@ -703,22 +1397,13 @@ static void __init configure_vc(void) + OMAP3430_GR_MOD, + OMAP3_PRM_VC_I2C_CFG_OFFSET); + +- /* Setup voltctrl and other setup times */ +- prm_write_mod_reg(OMAP3430_AUTO_RET, OMAP3430_GR_MOD, +- OMAP3_PRM_VOLTCTRL_OFFSET); +- +- prm_write_mod_reg(OMAP3430_CLKSETUP_DURATION, OMAP3430_GR_MOD, +- OMAP3_PRM_CLKSETUP_OFFSET); +- prm_write_mod_reg((OMAP3430_VOLTSETUP_TIME2 << +- OMAP3430_SETUP_TIME2_SHIFT) | +- (OMAP3430_VOLTSETUP_TIME1 << +- OMAP3430_SETUP_TIME1_SHIFT), +- OMAP3430_GR_MOD, OMAP3_PRM_VOLTSETUP1_OFFSET); ++ /* Setup value for voltctrl */ ++ prm_write_mod_reg(OMAP3430_AUTO_RET, ++ OMAP3430_GR_MOD, OMAP3_PRM_VOLTCTRL_OFFSET); + +- prm_write_mod_reg(OMAP3430_VOLTOFFSET_DURATION, OMAP3430_GR_MOD, ++ /* Write static setup times */ ++ prm_write_mod_reg(prm_setup.voltoffset, OMAP3430_GR_MOD, + OMAP3_PRM_VOLTOFFSET_OFFSET); +- prm_write_mod_reg(OMAP3430_VOLTSETUP2_DURATION, OMAP3430_GR_MOD, +- OMAP3_PRM_VOLTSETUP2_OFFSET); + } + + static int __init omap3_pm_early_init(void) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/powerdomain.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/powerdomain.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/powerdomain.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/powerdomain.c 2011-09-04 11:31:05.000000000 +0200 +@@ -35,6 +35,13 @@ + #include + #include + ++#include "pm.h" ++ ++enum { ++ PWRDM_STATE_NOW = 0, ++ PWRDM_STATE_PREV, ++}; ++ + /* pwrdm_list contains all registered struct powerdomains */ + static LIST_HEAD(pwrdm_list); + +@@ -102,6 +109,65 @@ static struct powerdomain *_pwrdm_deps_l + return pd->pwrdm; + } + ++static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) ++{ ++ ++ int prev; ++ int state; ++ ++ if (pwrdm == NULL) ++ return -EINVAL; ++ ++ state = pwrdm_read_pwrst(pwrdm); ++ ++ switch (flag) { ++ case PWRDM_STATE_NOW: ++ prev = pwrdm->state; ++ break; ++ case PWRDM_STATE_PREV: ++ prev = pwrdm_read_prev_pwrst(pwrdm); ++ if (pwrdm->state != prev) ++ pwrdm->state_counter[prev]++; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (state != prev) ++ pwrdm->state_counter[state]++; ++ ++ pm_dbg_update_time(pwrdm, prev); ++ ++ pwrdm->state = state; ++ ++ return 0; ++} ++ ++static int _pwrdm_pre_transition_cb(struct powerdomain *pwrdm, void *unused) ++{ ++ pwrdm_clear_all_prev_pwrst(pwrdm); ++ _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW); ++ return 0; ++} ++ ++static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused) ++{ ++ _pwrdm_state_switch(pwrdm, PWRDM_STATE_PREV); ++ return 0; ++} ++ ++static __init void _pwrdm_setup(struct powerdomain *pwrdm) ++{ ++ int i; ++ ++ for (i = 0; i < 4; i++) ++ pwrdm->state_counter[i] = 0; ++ ++ pwrdm_wait_transition(pwrdm); ++ pwrdm->state = pwrdm_read_pwrst(pwrdm); ++ pwrdm->state_counter[pwrdm->state] = 1; ++ ++} + + /* Public functions */ + +@@ -117,9 +183,12 @@ void pwrdm_init(struct powerdomain **pwr + { + struct powerdomain **p = NULL; + +- if (pwrdm_list) +- for (p = pwrdm_list; *p; p++) ++ if (pwrdm_list) { ++ for (p = pwrdm_list; *p; p++) { + pwrdm_register(*p); ++ _pwrdm_setup(*p); ++ } ++ } + } + + /** +@@ -217,7 +286,8 @@ struct powerdomain *pwrdm_lookup(const c + * anything else to indicate failure; or -EINVAL if the function + * pointer is null. + */ +-int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm)) ++int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), ++ void *user) + { + struct powerdomain *temp_pwrdm; + unsigned long flags; +@@ -228,7 +298,7 @@ int pwrdm_for_each(int (*fn)(struct powe + + read_lock_irqsave(&pwrdm_rwlock, flags); + list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { +- ret = (*fn)(temp_pwrdm); ++ ret = (*fn)(temp_pwrdm, user); + if (ret) + break; + } +@@ -1110,4 +1180,36 @@ int pwrdm_wait_transition(struct powerdo + return 0; + } + ++int pwrdm_state_switch(struct powerdomain *pwrdm) ++{ ++ return _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW); ++} ++ ++int pwrdm_clkdm_state_switch(struct clockdomain *clkdm) ++{ ++ if (clkdm != NULL && clkdm->pwrdm.ptr != NULL) { ++ pwrdm_wait_transition(clkdm->pwrdm.ptr); ++ return pwrdm_state_switch(clkdm->pwrdm.ptr); ++ } ++ ++ return -EINVAL; ++} ++int pwrdm_clk_state_switch(struct clk *clk) ++{ ++ if (clk != NULL && clk->clkdm.ptr != NULL) ++ return pwrdm_clkdm_state_switch(clk->clkdm.ptr); ++ return -EINVAL; ++} ++ ++int pwrdm_pre_transition(void) ++{ ++ pwrdm_for_each(_pwrdm_pre_transition_cb, NULL); ++ return 0; ++} ++ ++int pwrdm_post_transition(void) ++{ ++ pwrdm_for_each(_pwrdm_post_transition_cb, NULL); ++ return 0; ++} + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/powerdomains.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/powerdomains.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/powerdomains.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/powerdomains.h 2011-09-04 11:31:05.000000000 +0200 +@@ -171,8 +171,8 @@ static struct powerdomain *powerdomains_ + &iva2_pwrdm, + &mpu_34xx_pwrdm, + &neon_pwrdm, +- &core_34xx_es1_pwrdm, +- &core_34xx_es2_pwrdm, ++ &core_34xx_pre_es3_1_pwrdm, ++ &core_34xx_es3_1_pwrdm, + &cam_pwrdm, + &dss_pwrdm, + &per_pwrdm, +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/powerdomains34xx.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/powerdomains34xx.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/powerdomains34xx.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/powerdomains34xx.h 2011-09-04 11:31:05.000000000 +0200 +@@ -200,10 +200,12 @@ static struct powerdomain mpu_34xx_pwrdm + }; + + /* No wkdeps or sleepdeps for 34xx core apparently */ +-static struct powerdomain core_34xx_es1_pwrdm = { ++static struct powerdomain core_34xx_pre_es3_1_pwrdm = { + .name = "core_pwrdm", + .prcm_offs = CORE_MOD, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 | ++ CHIP_IS_OMAP3430ES2 | ++ CHIP_IS_OMAP3430ES3_0), + .pwrsts = PWRSTS_OFF_RET_ON, + .dep_bit = OMAP3430_EN_CORE_SHIFT, + .banks = 2, +@@ -218,10 +220,10 @@ static struct powerdomain core_34xx_es1_ + }; + + /* No wkdeps or sleepdeps for 34xx core apparently */ +-static struct powerdomain core_34xx_es2_pwrdm = { ++static struct powerdomain core_34xx_es3_1_pwrdm = { + .name = "core_pwrdm", + .prcm_offs = CORE_MOD, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES3_1), + .pwrsts = PWRSTS_OFF_RET_ON, + .dep_bit = OMAP3430_EN_CORE_SHIFT, + .flags = PWRDM_HAS_HDWR_SAR, /* for USBTLL only */ +@@ -263,7 +265,7 @@ static struct powerdomain dss_pwrdm = { + static struct powerdomain sgx_pwrdm = { + .name = "sgx_pwrdm", + .prcm_offs = OMAP3430ES2_SGX_MOD, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), + .wkdep_srcs = gfx_sgx_wkdeps, + .sleepdep_srcs = cam_gfx_sleepdeps, + /* XXX This is accurate for 3430 SGX, but what about GFX? */ +@@ -331,12 +333,18 @@ static struct powerdomain neon_pwrdm = { + static struct powerdomain usbhost_pwrdm = { + .name = "usbhost_pwrdm", + .prcm_offs = OMAP3430ES2_USBHOST_MOD, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), + .wkdep_srcs = per_usbhost_wkdeps, + .sleepdep_srcs = dss_per_usbhost_sleepdeps, + .pwrsts = PWRSTS_OFF_RET_ON, + .pwrsts_logic_ret = PWRDM_POWER_RET, +- .flags = PWRDM_HAS_HDWR_SAR, /* for USBHOST ctrlr only */ ++ /* ++ * REVISIT: Enabling usb host save and restore mechanism seems to ++ * leave the usb host domain permanently in ACTIVE mode after ++ * changing the usb host power domain state from OFF to active once. ++ * Disabling for now. ++ */ ++ /*.flags = PWRDM_HAS_HDWR_SAR,*/ /* for USBHOST ctrlr only */ + .banks = 1, + .pwrsts_mem_ret = { + [0] = PWRDM_POWER_RET, /* MEMRETSTATE */ +@@ -373,7 +381,7 @@ static struct powerdomain dpll4_pwrdm = + static struct powerdomain dpll5_pwrdm = { + .name = "dpll5_pwrdm", + .prcm_offs = PLL_MOD, +- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2), ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), + }; + + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/prcm.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/prcm.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/prcm.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/prcm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -7,6 +7,9 @@ + * + * Written by Tony Lindgren + * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * Rajendra Nayak ++ * + * Some pieces of code Copyright (C) 2005 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify +@@ -20,14 +23,103 @@ + + #include + #include ++#include ++#include + + #include "clock.h" ++#include "cm.h" + #include "prm.h" + #include "prm-regbits-24xx.h" + ++#define OTG_SYSCONFIG (OMAP34XX_HSUSB_OTG_BASE + 0x404) ++ + static void __iomem *prm_base; + static void __iomem *cm_base; + ++struct omap3_prcm_regs { ++ u32 control_padconf_sys_nirq; ++ u32 iva2_cm_clksel1; ++ u32 iva2_cm_clksel2; ++ u32 cm_sysconfig; ++ u32 sgx_cm_clksel; ++ u32 wkup_cm_clksel; ++ u32 dss_cm_clksel; ++ u32 cam_cm_clksel; ++ u32 per_cm_clksel; ++ u32 emu_cm_clksel; ++ u32 emu_cm_clkstctrl; ++ u32 pll_cm_autoidle2; ++ u32 pll_cm_clksel4; ++ u32 pll_cm_clksel5; ++ u32 pll_cm_clken; ++ u32 pll_cm_clken2; ++ u32 cm_polctrl; ++ u32 iva2_cm_fclken; ++ u32 iva2_cm_clken_pll; ++ u32 core_cm_fclken1; ++ u32 core_cm_fclken3; ++ u32 sgx_cm_fclken; ++ u32 wkup_cm_fclken; ++ u32 dss_cm_fclken; ++ u32 cam_cm_fclken; ++ u32 per_cm_fclken; ++ u32 usbhost_cm_fclken; ++ u32 core_cm_iclken1; ++ u32 core_cm_iclken2; ++ u32 core_cm_iclken3; ++ u32 sgx_cm_iclken; ++ u32 wkup_cm_iclken; ++ u32 dss_cm_iclken; ++ u32 cam_cm_iclken; ++ u32 per_cm_iclken; ++ u32 usbhost_cm_iclken; ++ u32 iva2_cm_autiidle2; ++ u32 mpu_cm_autoidle2; ++ u32 pll_cm_autoidle; ++ u32 iva2_cm_clkstctrl; ++ u32 mpu_cm_clkstctrl; ++ u32 core_cm_clkstctrl; ++ u32 sgx_cm_clkstctrl; ++ u32 dss_cm_clkstctrl; ++ u32 cam_cm_clkstctrl; ++ u32 per_cm_clkstctrl; ++ u32 neon_cm_clkstctrl; ++ u32 usbhost_cm_clkstctrl; ++ u32 core_cm_autoidle1; ++ u32 core_cm_autoidle2; ++ u32 core_cm_autoidle3; ++ u32 wkup_cm_autoidle; ++ u32 dss_cm_autoidle; ++ u32 cam_cm_autoidle; ++ u32 per_cm_autoidle; ++ u32 usbhost_cm_autoidle; ++ u32 sgx_cm_sleepdep; ++ u32 dss_cm_sleepdep; ++ u32 cam_cm_sleepdep; ++ u32 per_cm_sleepdep; ++ u32 usbhost_cm_sleepdep; ++ u32 cm_clkout_ctrl; ++ u32 prm_clkout_ctrl; ++ u32 sgx_pm_wkdep; ++ u32 dss_pm_wkdep; ++ u32 cam_pm_wkdep; ++ u32 per_pm_wkdep; ++ u32 neon_pm_wkdep; ++ u32 usbhost_pm_wkdep; ++ u32 core_pm_mpugrpsel1; ++ u32 iva2_pm_ivagrpsel1; ++ u32 core_pm_mpugrpsel3; ++ u32 core_pm_ivagrpsel3; ++ u32 wkup_pm_mpugrpsel; ++ u32 wkup_pm_ivagrpsel; ++ u32 per_pm_mpugrpsel; ++ u32 per_pm_ivagrpsel; ++ u32 wkup_pm_wken; ++ u32 otg_sysconfig; ++}; ++ ++struct omap3_prcm_regs prcm_context; ++ + u32 omap_prcm_get_reset_sources(void) + { + /* XXX This presumably needs modification for 34XX */ +@@ -35,6 +127,12 @@ u32 omap_prcm_get_reset_sources(void) + } + EXPORT_SYMBOL(omap_prcm_get_reset_sources); + ++/* ++ * HACK for RX51 boards previous to B3 which ++ * doesn't have a reset line to isp1707 transceiver ++ */ ++extern void musb_emergency_stop(void); ++ + /* Resets clock rates and reboots the system. Only called from system.h */ + void omap_prcm_arch_reset(char mode) + { +@@ -43,12 +141,24 @@ void omap_prcm_arch_reset(char mode) + + if (cpu_is_omap24xx()) + prcm_offs = WKUP_MOD; +- else if (cpu_is_omap34xx()) ++ else if (cpu_is_omap34xx()) { ++ u32 l; ++ + prcm_offs = OMAP3430_GR_MOD; +- else ++ l = ('B' << 24) | ('M' << 16) | mode; ++ /* Reserve the first word in scratchpad for communicating ++ * with the boot ROM. */ ++ omap_writel(l, OMAP343X_SCRATCHPAD + 4); ++ } else + WARN_ON(1); + +- prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs, RM_RSTCTRL); ++ /* ++ * HACK for RX51 boards previous to B3 which ++ * doesn't have a reset line to isp1707 transceiver ++ */ ++ musb_emergency_stop(); ++ ++ prm_set_mod_reg_bits(OMAP_RST_GS, prcm_offs, RM_RSTCTRL); + } + + static inline u32 __omap_prcm_read(void __iomem *base, s16 module, u16 reg) +@@ -125,3 +235,314 @@ void __init omap2_set_globals_prcm(struc + prm_base = omap2_globals->prm; + cm_base = omap2_globals->cm; + } ++ ++#ifdef CONFIG_ARCH_OMAP3 ++void omap3_prcm_save_context(void) ++{ ++ prcm_context.control_padconf_sys_nirq = ++ omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_SYSNIRQ); ++ prcm_context.iva2_cm_clksel1 = ++ cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL1); ++ prcm_context.iva2_cm_clksel2 = ++ cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL2); ++ prcm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG); ++ prcm_context.sgx_cm_clksel = ++ cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL); ++ prcm_context.wkup_cm_clksel = cm_read_mod_reg(WKUP_MOD, CM_CLKSEL); ++ prcm_context.dss_cm_clksel = ++ cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL); ++ prcm_context.cam_cm_clksel = ++ cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSEL); ++ prcm_context.per_cm_clksel = ++ cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSEL); ++ prcm_context.emu_cm_clksel = ++ cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1); ++ prcm_context.emu_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSTCTRL); ++ prcm_context.pll_cm_autoidle2 = ++ cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2); ++ prcm_context.pll_cm_clksel4 = ++ cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4); ++ prcm_context.pll_cm_clksel5 = ++ cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5); ++ prcm_context.pll_cm_clken = ++ cm_read_mod_reg(PLL_MOD, CM_CLKEN); ++ prcm_context.pll_cm_clken2 = ++ cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2); ++ prcm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL); ++ prcm_context.iva2_cm_fclken = ++ cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_FCLKEN); ++ prcm_context.iva2_cm_clken_pll = cm_read_mod_reg(OMAP3430_IVA2_MOD, ++ OMAP3430_CM_CLKEN_PLL); ++ prcm_context.core_cm_fclken1 = ++ cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); ++ prcm_context.core_cm_fclken3 = ++ cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3); ++ prcm_context.sgx_cm_fclken = ++ cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_FCLKEN); ++ prcm_context.wkup_cm_fclken = ++ cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); ++ prcm_context.dss_cm_fclken = ++ cm_read_mod_reg(OMAP3430_DSS_MOD, CM_FCLKEN); ++ prcm_context.cam_cm_fclken = ++ cm_read_mod_reg(OMAP3430_CAM_MOD, CM_FCLKEN); ++ prcm_context.per_cm_fclken = ++ cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN); ++ prcm_context.usbhost_cm_fclken = ++ cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); ++ prcm_context.core_cm_iclken1 = ++ cm_read_mod_reg(CORE_MOD, CM_ICLKEN1); ++ prcm_context.core_cm_iclken2 = ++ cm_read_mod_reg(CORE_MOD, CM_ICLKEN2); ++ prcm_context.core_cm_iclken3 = ++ cm_read_mod_reg(CORE_MOD, CM_ICLKEN3); ++ prcm_context.sgx_cm_iclken = ++ cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_ICLKEN); ++ prcm_context.wkup_cm_iclken = ++ cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); ++ prcm_context.dss_cm_iclken = ++ cm_read_mod_reg(OMAP3430_DSS_MOD, CM_ICLKEN); ++ prcm_context.cam_cm_iclken = ++ cm_read_mod_reg(OMAP3430_CAM_MOD, CM_ICLKEN); ++ prcm_context.per_cm_iclken = ++ cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN); ++ prcm_context.usbhost_cm_iclken = ++ cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); ++ prcm_context.iva2_cm_autiidle2 = ++ cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2); ++ prcm_context.mpu_cm_autoidle2 = ++ cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2); ++ prcm_context.pll_cm_autoidle = ++ cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); ++ prcm_context.iva2_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSTCTRL); ++ prcm_context.mpu_cm_clkstctrl = ++ cm_read_mod_reg(MPU_MOD, CM_CLKSTCTRL); ++ prcm_context.core_cm_clkstctrl = ++ cm_read_mod_reg(CORE_MOD, CM_CLKSTCTRL); ++ prcm_context.sgx_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSTCTRL); ++ prcm_context.dss_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSTCTRL); ++ prcm_context.cam_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSTCTRL); ++ prcm_context.per_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSTCTRL); ++ prcm_context.neon_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430_NEON_MOD, CM_CLKSTCTRL); ++ prcm_context.usbhost_cm_clkstctrl = ++ cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_CLKSTCTRL); ++ prcm_context.core_cm_autoidle1 = ++ cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE1); ++ prcm_context.core_cm_autoidle2 = ++ cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE2); ++ prcm_context.core_cm_autoidle3 = ++ cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE3); ++ prcm_context.wkup_cm_autoidle = ++ cm_read_mod_reg(WKUP_MOD, CM_AUTOIDLE); ++ prcm_context.dss_cm_autoidle = ++ cm_read_mod_reg(OMAP3430_DSS_MOD, CM_AUTOIDLE); ++ prcm_context.cam_cm_autoidle = ++ cm_read_mod_reg(OMAP3430_CAM_MOD, CM_AUTOIDLE); ++ prcm_context.per_cm_autoidle = ++ cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); ++ prcm_context.usbhost_cm_autoidle = ++ cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); ++ prcm_context.sgx_cm_sleepdep = ++ cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); ++ prcm_context.dss_cm_sleepdep = ++ cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP); ++ prcm_context.cam_cm_sleepdep = ++ cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP); ++ prcm_context.per_cm_sleepdep = ++ cm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP); ++ prcm_context.usbhost_cm_sleepdep = ++ cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); ++ prcm_context.cm_clkout_ctrl = cm_read_mod_reg(OMAP3430_CCR_MOD, ++ OMAP3430_CM_CLKOUT_CTRL_OFFSET); ++ prcm_context.prm_clkout_ctrl = prm_read_mod_reg(OMAP3430_CCR_MOD, ++ OMAP3_PRM_CLKOUT_CTRL_OFFSET); ++ prcm_context.sgx_pm_wkdep = ++ prm_read_mod_reg(OMAP3430ES2_SGX_MOD, PM_WKDEP); ++ prcm_context.dss_pm_wkdep = ++ prm_read_mod_reg(OMAP3430_DSS_MOD, PM_WKDEP); ++ prcm_context.cam_pm_wkdep = ++ prm_read_mod_reg(OMAP3430_CAM_MOD, PM_WKDEP); ++ prcm_context.per_pm_wkdep = ++ prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKDEP); ++ prcm_context.neon_pm_wkdep = ++ prm_read_mod_reg(OMAP3430_NEON_MOD, PM_WKDEP); ++ prcm_context.usbhost_pm_wkdep = ++ prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKDEP); ++ prcm_context.core_pm_mpugrpsel1 = ++ prm_read_mod_reg(CORE_MOD, OMAP3430_PM_MPUGRPSEL1); ++ prcm_context.iva2_pm_ivagrpsel1 = ++ prm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_PM_IVAGRPSEL1); ++ prcm_context.core_pm_mpugrpsel3 = ++ prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_MPUGRPSEL3); ++ prcm_context.core_pm_ivagrpsel3 = ++ prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3); ++ prcm_context.wkup_pm_mpugrpsel = ++ prm_read_mod_reg(WKUP_MOD, OMAP3430_PM_MPUGRPSEL); ++ prcm_context.wkup_pm_ivagrpsel = ++ prm_read_mod_reg(WKUP_MOD, OMAP3430_PM_IVAGRPSEL); ++ prcm_context.per_pm_mpugrpsel = ++ prm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); ++ prcm_context.per_pm_ivagrpsel = ++ prm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL); ++ prcm_context.wkup_pm_wken = prm_read_mod_reg(WKUP_MOD, PM_WKEN); ++ /* ++ * OTG_SYSCONFIG must be saved & restored because enabled AUTOIDLE ++ * bit will eventually block sleep due to OMAP bug ++ */ ++ prcm_context.otg_sysconfig = omap_readl(OTG_SYSCONFIG); ++ return; ++} ++ ++void omap3_prcm_restore_context(void) ++{ ++ omap_ctrl_writel(prcm_context.control_padconf_sys_nirq, ++ OMAP343X_CONTROL_PADCONF_SYSNIRQ); ++ cm_write_mod_reg(prcm_context.iva2_cm_clksel1, OMAP3430_IVA2_MOD, ++ CM_CLKSEL1); ++ cm_write_mod_reg(prcm_context.iva2_cm_clksel2, OMAP3430_IVA2_MOD, ++ CM_CLKSEL2); ++ __raw_writel(prcm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG); ++ cm_write_mod_reg(prcm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD, ++ CM_CLKSEL); ++ cm_write_mod_reg(prcm_context.wkup_cm_clksel, WKUP_MOD, CM_CLKSEL); ++ cm_write_mod_reg(prcm_context.dss_cm_clksel, OMAP3430_DSS_MOD, ++ CM_CLKSEL); ++ cm_write_mod_reg(prcm_context.cam_cm_clksel, OMAP3430_CAM_MOD, ++ CM_CLKSEL); ++ cm_write_mod_reg(prcm_context.per_cm_clksel, OMAP3430_PER_MOD, ++ CM_CLKSEL); ++ cm_write_mod_reg(prcm_context.emu_cm_clksel, OMAP3430_EMU_MOD, ++ CM_CLKSEL1); ++ cm_write_mod_reg(prcm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.pll_cm_autoidle2, PLL_MOD, ++ CM_AUTOIDLE2); ++ cm_write_mod_reg(prcm_context.pll_cm_clksel4, PLL_MOD, ++ OMAP3430ES2_CM_CLKSEL4); ++ cm_write_mod_reg(prcm_context.pll_cm_clksel5, PLL_MOD, ++ OMAP3430ES2_CM_CLKSEL5); ++ cm_write_mod_reg(prcm_context.pll_cm_clken, PLL_MOD, CM_CLKEN); ++ cm_write_mod_reg(prcm_context.pll_cm_clken2, PLL_MOD, ++ OMAP3430ES2_CM_CLKEN2); ++ __raw_writel(prcm_context.cm_polctrl, OMAP3430_CM_POLCTRL); ++ cm_write_mod_reg(prcm_context.iva2_cm_fclken, OMAP3430_IVA2_MOD, ++ CM_FCLKEN); ++ cm_write_mod_reg(prcm_context.iva2_cm_clken_pll, OMAP3430_IVA2_MOD, ++ OMAP3430_CM_CLKEN_PLL); ++ cm_write_mod_reg(prcm_context.core_cm_fclken1, CORE_MOD, CM_FCLKEN1); ++ cm_write_mod_reg(prcm_context.core_cm_fclken3, CORE_MOD, ++ OMAP3430ES2_CM_FCLKEN3); ++ cm_write_mod_reg(prcm_context.sgx_cm_fclken, OMAP3430ES2_SGX_MOD, ++ CM_FCLKEN); ++ cm_write_mod_reg(prcm_context.wkup_cm_fclken, WKUP_MOD, CM_FCLKEN); ++ cm_write_mod_reg(prcm_context.dss_cm_fclken, OMAP3430_DSS_MOD, ++ CM_FCLKEN); ++ cm_write_mod_reg(prcm_context.cam_cm_fclken, OMAP3430_CAM_MOD, ++ CM_FCLKEN); ++ cm_write_mod_reg(prcm_context.per_cm_fclken, OMAP3430_PER_MOD, ++ CM_FCLKEN); ++ cm_write_mod_reg(prcm_context.usbhost_cm_fclken, ++ OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); ++ cm_write_mod_reg(prcm_context.core_cm_iclken1, CORE_MOD, CM_ICLKEN1); ++ cm_write_mod_reg(prcm_context.core_cm_iclken2, CORE_MOD, CM_ICLKEN2); ++ cm_write_mod_reg(prcm_context.core_cm_iclken3, CORE_MOD, CM_ICLKEN3); ++ cm_write_mod_reg(prcm_context.sgx_cm_iclken, OMAP3430ES2_SGX_MOD, ++ CM_ICLKEN); ++ cm_write_mod_reg(prcm_context.wkup_cm_iclken, WKUP_MOD, CM_ICLKEN); ++ cm_write_mod_reg(prcm_context.dss_cm_iclken, OMAP3430_DSS_MOD, ++ CM_ICLKEN); ++ cm_write_mod_reg(prcm_context.cam_cm_iclken, OMAP3430_CAM_MOD, ++ CM_ICLKEN); ++ cm_write_mod_reg(prcm_context.per_cm_iclken, OMAP3430_PER_MOD, ++ CM_ICLKEN); ++ cm_write_mod_reg(prcm_context.usbhost_cm_iclken, ++ OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); ++ cm_write_mod_reg(prcm_context.iva2_cm_autiidle2, OMAP3430_IVA2_MOD, ++ CM_AUTOIDLE2); ++ cm_write_mod_reg(prcm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2); ++ cm_write_mod_reg(prcm_context.pll_cm_autoidle, PLL_MOD, CM_AUTOIDLE); ++ cm_write_mod_reg(prcm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.mpu_cm_clkstctrl, MPU_MOD, CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.core_cm_clkstctrl, CORE_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.sgx_cm_clkstctrl, OMAP3430ES2_SGX_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.dss_cm_clkstctrl, OMAP3430_DSS_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.cam_cm_clkstctrl, OMAP3430_CAM_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.per_cm_clkstctrl, OMAP3430_PER_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.neon_cm_clkstctrl, OMAP3430_NEON_MOD, ++ CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.usbhost_cm_clkstctrl, ++ OMAP3430ES2_USBHOST_MOD, CM_CLKSTCTRL); ++ cm_write_mod_reg(prcm_context.core_cm_autoidle1, CORE_MOD, ++ CM_AUTOIDLE1); ++ cm_write_mod_reg(prcm_context.core_cm_autoidle2, CORE_MOD, ++ CM_AUTOIDLE2); ++ cm_write_mod_reg(prcm_context.core_cm_autoidle3, CORE_MOD, ++ CM_AUTOIDLE3); ++ cm_write_mod_reg(prcm_context.wkup_cm_autoidle, WKUP_MOD, CM_AUTOIDLE); ++ cm_write_mod_reg(prcm_context.dss_cm_autoidle, OMAP3430_DSS_MOD, ++ CM_AUTOIDLE); ++ cm_write_mod_reg(prcm_context.cam_cm_autoidle, OMAP3430_CAM_MOD, ++ CM_AUTOIDLE); ++ cm_write_mod_reg(prcm_context.per_cm_autoidle, OMAP3430_PER_MOD, ++ CM_AUTOIDLE); ++ cm_write_mod_reg(prcm_context.usbhost_cm_autoidle, ++ OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); ++ cm_write_mod_reg(prcm_context.sgx_cm_sleepdep, OMAP3430ES2_SGX_MOD, ++ OMAP3430_CM_SLEEPDEP); ++ cm_write_mod_reg(prcm_context.dss_cm_sleepdep, OMAP3430_DSS_MOD, ++ OMAP3430_CM_SLEEPDEP); ++ cm_write_mod_reg(prcm_context.cam_cm_sleepdep, OMAP3430_CAM_MOD, ++ OMAP3430_CM_SLEEPDEP); ++ cm_write_mod_reg(prcm_context.per_cm_sleepdep, OMAP3430_PER_MOD, ++ OMAP3430_CM_SLEEPDEP); ++ cm_write_mod_reg(prcm_context.usbhost_cm_sleepdep, ++ OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); ++ cm_write_mod_reg(prcm_context.cm_clkout_ctrl, OMAP3430_CCR_MOD, ++ OMAP3430_CM_CLKOUT_CTRL_OFFSET); ++ prm_write_mod_reg(prcm_context.prm_clkout_ctrl, OMAP3430_CCR_MOD, ++ OMAP3_PRM_CLKOUT_CTRL_OFFSET); ++ prm_write_mod_reg(prcm_context.sgx_pm_wkdep, OMAP3430ES2_SGX_MOD, ++ PM_WKDEP); ++ prm_write_mod_reg(prcm_context.dss_pm_wkdep, OMAP3430_DSS_MOD, ++ PM_WKDEP); ++ prm_write_mod_reg(prcm_context.cam_pm_wkdep, OMAP3430_CAM_MOD, ++ PM_WKDEP); ++ prm_write_mod_reg(prcm_context.per_pm_wkdep, OMAP3430_PER_MOD, ++ PM_WKDEP); ++ prm_write_mod_reg(prcm_context.neon_pm_wkdep, OMAP3430_NEON_MOD, ++ PM_WKDEP); ++ prm_write_mod_reg(prcm_context.usbhost_pm_wkdep, ++ OMAP3430ES2_USBHOST_MOD, PM_WKDEP); ++ prm_write_mod_reg(prcm_context.core_pm_mpugrpsel1, CORE_MOD, ++ OMAP3430_PM_MPUGRPSEL1); ++ prm_write_mod_reg(prcm_context.iva2_pm_ivagrpsel1, OMAP3430_IVA2_MOD, ++ OMAP3430_PM_IVAGRPSEL1); ++ prm_write_mod_reg(prcm_context.core_pm_mpugrpsel3, CORE_MOD, ++ OMAP3430ES2_PM_MPUGRPSEL3); ++ prm_write_mod_reg(prcm_context.core_pm_ivagrpsel3, CORE_MOD, ++ OMAP3430ES2_PM_IVAGRPSEL3); ++ prm_write_mod_reg(prcm_context.wkup_pm_mpugrpsel, WKUP_MOD, ++ OMAP3430_PM_MPUGRPSEL); ++ prm_write_mod_reg(prcm_context.wkup_pm_ivagrpsel, WKUP_MOD, ++ OMAP3430_PM_IVAGRPSEL); ++ prm_write_mod_reg(prcm_context.per_pm_mpugrpsel, OMAP3430_PER_MOD, ++ OMAP3430_PM_MPUGRPSEL); ++ prm_write_mod_reg(prcm_context.per_pm_ivagrpsel, OMAP3430_PER_MOD, ++ OMAP3430_PM_IVAGRPSEL); ++ prm_write_mod_reg(prcm_context.wkup_pm_wken, WKUP_MOD, PM_WKEN); ++ omap_writel(prcm_context.otg_sysconfig, OTG_SYSCONFIG); ++ return; ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/prm.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/prm.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/prm.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/prm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -137,9 +137,11 @@ + + #define OMAP3430_PM_MPUGRPSEL 0x00a4 + #define OMAP3430_PM_MPUGRPSEL1 OMAP3430_PM_MPUGRPSEL ++#define OMAP3430ES2_PM_MPUGRPSEL3 0x00f8 + + #define OMAP3430_PM_IVAGRPSEL 0x00a8 + #define OMAP3430_PM_IVAGRPSEL1 OMAP3430_PM_IVAGRPSEL ++#define OMAP3430ES2_PM_IVAGRPSEL3 0x00f4 + + #define OMAP3430_PM_PREPWSTST 0x00e8 + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/prm-regbits-34xx.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/prm-regbits-34xx.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/prm-regbits-34xx.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/prm-regbits-34xx.h 2011-09-04 11:31:05.000000000 +0200 +@@ -365,6 +365,7 @@ + /* PM_PREPWSTST_GFX specific bits */ + + /* PM_WKEN_WKUP specific bits */ ++#define OMAP3430_EN_IO_CHAIN (1 << 16) + #define OMAP3430_EN_IO (1 << 8) + #define OMAP3430_EN_GPIO1 (1 << 3) + +@@ -373,6 +374,7 @@ + /* PM_IVA2GRPSEL_WKUP specific bits */ + + /* PM_WKST_WKUP specific bits */ ++#define OMAP3430_ST_IO_CHAIN (1 << 16) + #define OMAP3430_ST_IO (1 << 8) + + /* PRM_CLKSEL */ +@@ -409,7 +411,7 @@ + /* PM_PREPWSTST_CAM specific bits */ + + /* PM_PWSTCTRL_USBHOST specific bits */ +-#define OMAP3430ES2_SAVEANDRESTORE_SHIFT (1 << 4) ++#define OMAP3430ES2_SAVEANDRESTORE_SHIFT 4 + + /* RM_RSTST_PER specific bits */ + +@@ -462,18 +464,6 @@ + #define OMAP3430_VC_CMD_OFF_SHIFT 0 + #define OMAP3430_VC_CMD_OFF_MASK (0xFF << 0) + +-/* PRM_VC_CMD_VAL_0 specific bits */ +-#define OMAP3430_VC_CMD_VAL0_ON (0x3 << 4) +-#define OMAP3430_VC_CMD_VAL0_ONLP (0x3 << 3) +-#define OMAP3430_VC_CMD_VAL0_RET (0x3 << 3) +-#define OMAP3430_VC_CMD_VAL0_OFF (0x3 << 4) +- +-/* PRM_VC_CMD_VAL_1 specific bits */ +-#define OMAP3430_VC_CMD_VAL1_ON (0xB << 2) +-#define OMAP3430_VC_CMD_VAL1_ONLP (0x3 << 3) +-#define OMAP3430_VC_CMD_VAL1_RET (0x3 << 3) +-#define OMAP3430_VC_CMD_VAL1_OFF (0xB << 2) +- + /* PRM_VC_CH_CONF */ + #define OMAP3430_CMD1 (1 << 20) + #define OMAP3430_RACEN1 (1 << 19) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/resource34xx.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/resource34xx.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/resource34xx.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/resource34xx.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,456 @@ ++/* ++ * linux/arch/arm/mach-omap2/resource34xx.c ++ * OMAP3 resource init/change_level/validate_level functions ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Rajendra Nayak ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ * History: ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "smartreflex.h" ++#include "resource34xx.h" ++#include "pm.h" ++#include "cm.h" ++#include "cm-regbits-34xx.h" ++ ++/** ++ * init_latency - Initializes the mpu/core latency resource. ++ * @resp: Latency resource to be initalized ++ * ++ * No return value. ++ */ ++void init_latency(struct shared_resource *resp) ++{ ++ resp->no_of_users = 0; ++ resp->curr_level = RES_DEFAULTLEVEL; ++ *((u8 *)resp->resource_data) = 0; ++ return; ++} ++ ++/** ++ * set_latency - Adds/Updates and removes the CPU_DMA_LATENCY in *pm_qos_params. ++ * @resp: resource pointer ++ * @latency: target latency to be set ++ * ++ * Returns 0 on success, or error values as returned by ++ * pm_qos_update_requirement/pm_qos_add_requirement. ++ */ ++int set_latency(struct shared_resource *resp, u32 latency) ++{ ++ u8 *pm_qos_req_added; ++ ++ if (resp->curr_level == latency) ++ return 0; ++ else ++ /* Update the resources current level */ ++ resp->curr_level = latency; ++ ++ pm_qos_req_added = resp->resource_data; ++ if (latency == RES_DEFAULTLEVEL) ++ /* No more users left, remove the pm_qos_req if present */ ++ if (*pm_qos_req_added) { ++ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, ++ resp->name); ++ *pm_qos_req_added = 0; ++ return 0; ++ } ++ ++ if (*pm_qos_req_added) { ++ return pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, ++ resp->name, latency); ++ } else { ++ *pm_qos_req_added = 1; ++ return pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, ++ resp->name, latency); ++ } ++} ++ ++/** ++ * init_pd_latency - Initializes the power domain latency resource. ++ * @resp: Power Domain Latency resource to be initialized. ++ * ++ * No return value. ++ */ ++void init_pd_latency(struct shared_resource *resp) ++{ ++ struct pd_latency_db *pd_lat_db; ++ ++ resp->no_of_users = 0; ++ if (enable_off_mode) ++ resp->curr_level = PD_LATENCY_OFF; ++ else ++ resp->curr_level = PD_LATENCY_RET; ++ pd_lat_db = resp->resource_data; ++ /* Populate the power domain associated with the latency resource */ ++ pd_lat_db->pd = pwrdm_lookup(pd_lat_db->pwrdm_name); ++ set_pwrdm_state(pd_lat_db->pd, resp->curr_level); ++ return; ++} ++ ++/** ++ * set_pd_latency - Updates the curr_level of the power domain resource. ++ * @resp: Power domain latency resource. ++ * @latency: New latency value acceptable. ++ * ++ * This function maps the latency in microsecs to the acceptable ++ * Power domain state using the latency DB. ++ * It then programs the power domain to enter the target state. ++ * Always returns 0. ++ */ ++int set_pd_latency(struct shared_resource *resp, u32 latency) ++{ ++ u32 pd_lat_level, ind; ++ struct pd_latency_db *pd_lat_db; ++ struct powerdomain *pwrdm; ++ ++ pd_lat_db = resp->resource_data; ++ pwrdm = pd_lat_db->pd; ++ pd_lat_level = PD_LATENCY_OFF; ++ /* using the latency db map to the appropriate PD state */ ++ for (ind = 0; ind < PD_LATENCY_MAXLEVEL; ind++) { ++ if (pd_lat_db->latency[ind] < latency) { ++ pd_lat_level = ind; ++ break; ++ } ++ } ++ ++ if (!enable_off_mode && pd_lat_level == PD_LATENCY_OFF) ++ pd_lat_level = PD_LATENCY_RET; ++ ++ resp->curr_level = pd_lat_level; ++ set_pwrdm_state(pwrdm, pd_lat_level); ++ return 0; ++} ++ ++static struct shared_resource *vdd1_resp; ++static struct shared_resource *vdd2_resp; ++static struct device dummy_mpu_dev; ++static struct device dummy_dsp_dev; ++static struct device vdd2_dev; ++static int vdd1_lock; ++static int vdd2_lock; ++static struct clk *dpll1_clk, *dpll2_clk, *dpll3_clk; ++static int curr_vdd1_opp; ++static int curr_vdd2_opp; ++ ++DEFINE_MUTEX(dvfs_mutex); ++ ++static unsigned short get_opp(struct omap_opp *opp_freq_table, ++ unsigned long freq) ++{ ++ struct omap_opp *prcm_config; ++ prcm_config = opp_freq_table; ++ ++ if (prcm_config->rate <= freq) ++ return prcm_config->opp_id; /* Return the Highest OPP */ ++ for (; prcm_config->rate; prcm_config--) ++ if (prcm_config->rate < freq) ++ return (prcm_config+1)->opp_id; ++ else if (prcm_config->rate == freq) ++ return prcm_config->opp_id; ++ /* Return the least OPP */ ++ return (prcm_config+1)->opp_id; ++} ++ ++/** ++ * init_opp - Initialize the OPP resource ++ */ ++void init_opp(struct shared_resource *resp) ++{ ++ resp->no_of_users = 0; ++ ++ if (!mpu_opps || !dsp_opps || !l3_opps) ++ return; ++ ++ /* Initialize the current level of the OPP resource ++ * to the opp set by u-boot. ++ */ ++ if (strcmp(resp->name, "vdd1_opp") == 0) { ++ vdd1_resp = resp; ++ dpll1_clk = clk_get(NULL, "dpll1_ck"); ++ dpll2_clk = clk_get(NULL, "dpll2_ck"); ++ resp->curr_level = get_opp(mpu_opps + MAX_VDD1_OPP, ++ dpll1_clk->rate); ++ curr_vdd1_opp = resp->curr_level; ++ } else if (strcmp(resp->name, "vdd2_opp") == 0) { ++ vdd2_resp = resp; ++ dpll3_clk = clk_get(NULL, "dpll3_m2_ck"); ++ resp->curr_level = get_opp(l3_opps + MAX_VDD2_OPP, ++ dpll2_clk->rate); ++ curr_vdd2_opp = resp->curr_level; ++ } ++ return; ++} ++ ++int resource_access_opp_lock(int res, int delta) ++{ ++ if (res == PRCM_VDD1) { ++ vdd1_lock += delta; ++ return vdd1_lock; ++ } else if (res == PRCM_VDD2) { ++ vdd2_lock += delta; ++ return vdd2_lock; ++ } ++ return -EINVAL; ++} ++ ++#ifndef CONFIG_CPU_FREQ ++static unsigned long compute_lpj(unsigned long ref, u_int div, u_int mult) ++{ ++ unsigned long new_jiffy_l, new_jiffy_h; ++ ++ /* ++ * Recalculate loops_per_jiffy. We do it this way to ++ * avoid math overflow on 32-bit machines. Maybe we ++ * should make this architecture dependent? If you have ++ * a better way of doing this, please replace! ++ * ++ * new = old * mult / div ++ */ ++ new_jiffy_h = ref / div; ++ new_jiffy_l = (ref % div) / 100; ++ new_jiffy_h *= mult; ++ new_jiffy_l = new_jiffy_l * mult / div; ++ ++ return new_jiffy_h + new_jiffy_l * 100; ++} ++#endif ++ ++static int program_opp_freq(int res, int target_level, int current_level) ++{ ++ int ret = 0, l3_div; ++ int *curr_opp; ++ ++ if (res == PRCM_VDD1) { ++ curr_opp = &curr_vdd1_opp; ++ clk_set_rate(dpll1_clk, mpu_opps[target_level].rate); ++ clk_set_rate(dpll2_clk, dsp_opps[target_level].rate); ++#ifndef CONFIG_CPU_FREQ ++ /*Update loops_per_jiffy if processor speed is being changed*/ ++ loops_per_jiffy = compute_lpj(loops_per_jiffy, ++ mpu_opps[current_level].rate/1000, ++ mpu_opps[target_level].rate/1000); ++#endif ++ } else { ++ curr_opp = &curr_vdd2_opp; ++ l3_div = cm_read_mod_reg(CORE_MOD, CM_CLKSEL) & ++ OMAP3430_CLKSEL_L3_MASK; ++ ret = clk_set_rate(dpll3_clk, ++ l3_opps[target_level].rate * l3_div); ++ } ++ if (ret) ++ return current_level; ++#ifdef CONFIG_PM ++ omap3_save_scratchpad_contents(); ++#endif ++ *curr_opp = target_level; ++ return target_level; ++} ++ ++static int program_opp(int res, struct omap_opp *opp, int target_level, ++ int current_level) ++{ ++ int i, ret = 0, raise, sr_status; ++#ifdef CONFIG_OMAP_SMARTREFLEX ++ unsigned long t_opp, c_opp; ++ ++ t_opp = ID_VDD(res) | ID_OPP_NO(opp[target_level].opp_id); ++ c_opp = ID_VDD(res) | ID_OPP_NO(opp[current_level].opp_id); ++#endif ++ if (target_level > current_level) ++ raise = 1; ++ else ++ raise = 0; ++ ++#ifdef CONFIG_OMAP_SMARTREFLEX ++ sr_status = sr_stop_vddautocomap((get_vdd(t_opp) == PRCM_VDD1) ? ++ SR1 : SR2); ++#endif ++ for (i = 0; i < 2; i++) { ++ if (i == raise) ++ ret = program_opp_freq(res, target_level, ++ current_level); ++#ifdef CONFIG_OMAP_SMARTREFLEX ++ else ++ sr_voltagescale_vcbypass(t_opp, c_opp, ++ opp[target_level].vsel, ++ opp[current_level].vsel); ++#endif ++ } ++#ifdef CONFIG_OMAP_SMARTREFLEX ++ if (sr_status) ++ sr_start_vddautocomap((get_vdd(t_opp) == PRCM_VDD1) ? SR1 : SR2, ++ opp[target_level].opp_id); ++#endif ++ ++ return ret; ++} ++ ++int resource_set_opp_level(int res, u32 target_level, int flags) ++{ ++ unsigned long mpu_freq, mpu_old_freq; ++#ifdef CONFIG_CPU_FREQ ++ struct cpufreq_freqs freqs_notify; ++#endif ++ struct shared_resource *resp; ++ ++ if (res == PRCM_VDD1) ++ resp = vdd1_resp; ++ else if (res == PRCM_VDD2) ++ resp = vdd2_resp; ++ else ++ return 0; ++ ++ if (resp->curr_level == target_level) ++ return 0; ++ ++ if (!mpu_opps || !dsp_opps || !l3_opps) ++ return 0; ++ ++ mutex_lock(&dvfs_mutex); ++ ++ if (res == PRCM_VDD1) { ++ if (!(flags & OPP_IGNORE_LOCK) && vdd1_lock) { ++ mutex_unlock(&dvfs_mutex); ++ return 0; ++ } ++ mpu_old_freq = mpu_opps[resp->curr_level].rate; ++ mpu_freq = mpu_opps[target_level].rate; ++#ifdef CONFIG_CPU_FREQ ++ freqs_notify.old = mpu_old_freq/1000; ++ freqs_notify.new = mpu_freq/1000; ++ freqs_notify.cpu = 0; ++ /* Send pre notification to CPUFreq */ ++ cpufreq_notify_transition(&freqs_notify, CPUFREQ_PRECHANGE); ++#endif ++ ++ resp->curr_level = program_opp(res, mpu_opps, target_level, ++ resp->curr_level); ++ ++#ifdef CONFIG_CPU_FREQ ++ /* Send a post notification to CPUFreq */ ++ cpufreq_notify_transition(&freqs_notify, CPUFREQ_POSTCHANGE); ++#endif ++ } else { ++ if (!(flags & OPP_IGNORE_LOCK) && vdd2_lock) { ++ mutex_unlock(&dvfs_mutex); ++ return 0; ++ } ++ ++ resp->curr_level = program_opp(res, l3_opps, target_level, ++ resp->curr_level); ++ } ++ mutex_unlock(&dvfs_mutex); ++ return 0; ++} ++ ++int set_opp(struct shared_resource *resp, u32 target_level) ++{ ++ unsigned long tput; ++ unsigned long req_l3_freq; ++ int ind; ++ ++ if (resp == vdd1_resp) { ++ if (target_level < 3) ++ resource_release_locked("vdd2_opp", &vdd2_dev); ++ ++ resource_set_opp_level(PRCM_VDD1, target_level, 0); ++ /* ++ * For VDD1 OPP3 and above, make sure the interconnect ++ * is at 100Mhz or above. ++ * throughput in KiB/s for 100 Mhz = 100 * 1000 * 4. ++ */ ++ if (target_level >= 3) ++ resource_request_locked("vdd2_opp", &vdd2_dev, 400000); ++ } else if (resp == vdd2_resp) { ++ tput = target_level; ++ ++ /* Convert the tput in KiB/s to Bus frequency in MHz */ ++ req_l3_freq = (tput * 1000)/4; ++ ++ for (ind = 2; ind <= MAX_VDD2_OPP; ind++) ++ if ((l3_opps + ind)->rate >= req_l3_freq) { ++ target_level = ind; ++ break; ++ } ++ ++ /* Set the highest OPP possible */ ++ if (ind > MAX_VDD2_OPP) ++ target_level = ind-1; ++ resource_set_opp_level(PRCM_VDD2, target_level, 0); ++ } ++ return 0; ++} ++ ++/** ++ * validate_opp - Validates if valid VDD1 OPP's are passed as the ++ * target_level. ++ * VDD2 OPP levels are passed as L3 throughput, which are then mapped ++ * to an appropriate OPP. ++ */ ++int validate_opp(struct shared_resource *resp, u32 target_level) ++{ ++ return 0; ++} ++ ++/** ++ * init_freq - Initialize the frequency resource. ++ */ ++void init_freq(struct shared_resource *resp) ++{ ++ char *linked_res_name; ++ resp->no_of_users = 0; ++ ++ if (!mpu_opps || !dsp_opps) ++ return; ++ ++ linked_res_name = (char *)resp->resource_data; ++ /* Initialize the current level of the Freq resource ++ * to the frequency set by u-boot. ++ */ ++ if (strcmp(resp->name, "mpu_freq") == 0) ++ /* MPU freq in Mhz */ ++ resp->curr_level = mpu_opps[curr_vdd1_opp].rate; ++ else if (strcmp(resp->name, "dsp_freq") == 0) ++ /* DSP freq in Mhz */ ++ resp->curr_level = dsp_opps[curr_vdd1_opp].rate; ++ return; ++} ++ ++int set_freq(struct shared_resource *resp, u32 target_level) ++{ ++ unsigned int vdd1_opp; ++ ++ if (!mpu_opps || !dsp_opps) ++ return 0; ++ ++ if (strcmp(resp->name, "mpu_freq") == 0) { ++ vdd1_opp = get_opp(mpu_opps + MAX_VDD1_OPP, target_level); ++ resource_request_locked("vdd1_opp", &dummy_mpu_dev, vdd1_opp); ++ } else if (strcmp(resp->name, "dsp_freq") == 0) { ++ vdd1_opp = get_opp(dsp_opps + MAX_VDD1_OPP, target_level); ++ resource_request_locked("vdd1_opp", &dummy_dsp_dev, vdd1_opp); ++ } ++ resp->curr_level = target_level; ++ return 0; ++} ++ ++int validate_freq(struct shared_resource *resp, u32 target_level) ++{ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/resource34xx.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/resource34xx.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/resource34xx.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/resource34xx.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,304 @@ ++/* ++ * linux/arch/arm/mach-omap2/resource34xx.h ++ * ++ * OMAP3 resource definitions ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Rajendra Nayak ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ * ++ * History: ++ * ++ */ ++ ++#ifndef __ARCH_ARM_MACH_OMAP2_RESOURCE_H ++#define __ARCH_ARM_MACH_OMAP2_RESOURCE_H ++ ++#include ++#include ++#include ++#include ++#include ++#include "resource34xx_mutex.h" ++ ++extern int sr_voltagescale_vcbypass(u32 t_opp, u32 c_opp, u8 t_vsel, u8 c_vsel); ++ ++/* ++ * mpu_latency/core_latency are used to control the cpuidle C state. ++ */ ++void init_latency(struct shared_resource *resp); ++int set_latency(struct shared_resource *resp, u32 target_level); ++ ++static u8 mpu_qos_req_added; ++static u8 core_qos_req_added; ++ ++static struct shared_resource_ops lat_res_ops = { ++ .init = init_latency, ++ .change_level = set_latency, ++}; ++ ++static struct shared_resource mpu_latency = { ++ .name = "mpu_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &mpu_qos_req_added, ++ .ops = &lat_res_ops, ++}; ++ ++static struct shared_resource core_latency = { ++ .name = "core_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &core_qos_req_added, ++ .ops = &lat_res_ops, ++}; ++ ++/* ++ * Power domain Latencies are used to control the target Power ++ * domain state once all clocks for the power domain ++ * are released. ++ */ ++void init_pd_latency(struct shared_resource *resp); ++int set_pd_latency(struct shared_resource *resp, u32 target_level); ++ ++/* Power Domain Latency levels */ ++#define PD_LATENCY_OFF 0x0 ++#define PD_LATENCY_RET 0x1 ++#define PD_LATENCY_INACT 0x2 ++#define PD_LATENCY_ON 0x3 ++ ++#define PD_LATENCY_MAXLEVEL 0x4 ++ ++struct pd_latency_db { ++ char *pwrdm_name; ++ struct powerdomain *pd; ++ /* Latencies for each state transition, stored in us */ ++ unsigned long latency[PD_LATENCY_MAXLEVEL]; ++}; ++ ++static struct shared_resource_ops pd_lat_res_ops = { ++ .init = init_pd_latency, ++ .change_level = set_pd_latency, ++}; ++ ++static struct shared_resource core_pwrdm_latency = { ++ .name = "core_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &core_qos_req_added, ++ .ops = &lat_res_ops, ++}; ++ ++#if !defined(CONFIG_MPU_BRIDGE) && !defined(CONFIG_MPU_BRIDGE_MODULE) ++static struct pd_latency_db iva2_pwrdm_lat_db = { ++ .pwrdm_name = "iva2_pwrdm", ++ .latency[PD_LATENCY_OFF] = 1100, ++ .latency[PD_LATENCY_RET] = 350, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource iva2_pwrdm_latency = { ++ .name = "iva2_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &iva2_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++#endif ++ ++static struct pd_latency_db gfx_pwrdm_lat_db = { ++ .pwrdm_name = "gfx_pwrdm", ++ .latency[PD_LATENCY_OFF] = 1000, ++ .latency[PD_LATENCY_RET] = 100, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct pd_latency_db sgx_pwrdm_lat_db = { ++ .pwrdm_name = "sgx_pwrdm", ++ .latency[PD_LATENCY_OFF] = 1000, ++ .latency[PD_LATENCY_RET] = 100, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource gfx_pwrdm_latency = { ++ .name = "gfx_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1), ++ .resource_data = &gfx_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++static struct shared_resource sgx_pwrdm_latency = { ++ .name = "sgx_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), ++ .resource_data = &sgx_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++static struct pd_latency_db dss_pwrdm_lat_db = { ++ .pwrdm_name = "dss_pwrdm", ++ .latency[PD_LATENCY_OFF] = 70, ++ .latency[PD_LATENCY_RET] = 20, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource dss_pwrdm_latency = { ++ .name = "dss_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &dss_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++static struct pd_latency_db cam_pwrdm_lat_db = { ++ .pwrdm_name = "cam_pwrdm", ++ .latency[PD_LATENCY_OFF] = 850, ++ .latency[PD_LATENCY_RET] = 35, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource cam_pwrdm_latency = { ++ .name = "cam_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &cam_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++static struct pd_latency_db per_pwrdm_lat_db = { ++ .pwrdm_name = "per_pwrdm", ++ .latency[PD_LATENCY_OFF] = 200, ++ .latency[PD_LATENCY_RET] = 110, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource per_pwrdm_latency = { ++ .name = "per_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &per_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++static struct pd_latency_db neon_pwrdm_lat_db = { ++ .pwrdm_name = "neon_pwrdm", ++ .latency[PD_LATENCY_OFF] = 200, ++ .latency[PD_LATENCY_RET] = 35, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource neon_pwrdm_latency = { ++ .name = "neon_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &neon_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++static struct pd_latency_db usbhost_pwrdm_lat_db = { ++ .pwrdm_name = "usbhost_pwrdm", ++ .latency[PD_LATENCY_OFF] = 800, ++ .latency[PD_LATENCY_RET] = 150, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource usbhost_pwrdm_latency = { ++ .name = "usbhost_pwrdm_latency", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2), ++ .resource_data = &usbhost_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++static struct pd_latency_db emu_pwrdm_lat_db = { ++ .pwrdm_name = "emu_pwrdm", ++ .latency[PD_LATENCY_OFF] = 1000, ++ .latency[PD_LATENCY_RET] = 100, ++ .latency[PD_LATENCY_INACT] = -1, ++ .latency[PD_LATENCY_ON] = 0 ++}; ++ ++static struct shared_resource emu_pwrdm_latency = { ++ .name = "emu_pwrdm", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &emu_pwrdm_lat_db, ++ .ops = &pd_lat_res_ops, ++}; ++ ++void init_opp(struct shared_resource *resp); ++int set_opp(struct shared_resource *resp, u32 target_level); ++int validate_opp(struct shared_resource *resp, u32 target_level); ++void init_freq(struct shared_resource *resp); ++int set_freq(struct shared_resource *resp, u32 target_level); ++int validate_freq(struct shared_resource *resp, u32 target_level); ++ ++static struct shared_resource_ops opp_res_ops = { ++ .init = init_opp, ++ .change_level = set_opp, ++ .validate_level = validate_opp, ++}; ++ ++static struct shared_resource vdd1_opp = { ++ .name = "vdd1_opp", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .ops = &opp_res_ops, ++}; ++ ++static struct shared_resource vdd2_opp = { ++ .name = "vdd2_opp", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .ops = &opp_res_ops, ++}; ++ ++static char linked_res[] = "vdd1_opp"; ++ ++static struct shared_resource_ops freq_res_ops = { ++ .init = init_freq, ++ .change_level = set_freq, ++ .validate_level = validate_freq, ++}; ++ ++static struct shared_resource mpu_freq = { ++ .name = "mpu_freq", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &linked_res, ++ .ops = &freq_res_ops, ++}; ++ ++static struct shared_resource dsp_freq = { ++ .name = "dsp_freq", ++ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), ++ .resource_data = &linked_res, ++ .ops = &freq_res_ops, ++}; ++ ++struct shared_resource *resources_omap[] __initdata = { ++ &mpu_latency, ++ &core_latency, ++ /* Power Domain Latency resources */ ++ &core_pwrdm_latency, ++#if !defined(CONFIG_MPU_BRIDGE) && !defined(CONFIG_MPU_BRIDGE_MODULE) ++ &iva2_pwrdm_latency, ++#endif ++ &gfx_pwrdm_latency, ++ &sgx_pwrdm_latency, ++ &dss_pwrdm_latency, ++ &cam_pwrdm_latency, ++ &per_pwrdm_latency, ++ &neon_pwrdm_latency, ++ &usbhost_pwrdm_latency, ++ &emu_pwrdm_latency, ++ /* OPP/frequency resources */ ++ &vdd1_opp, ++ &vdd2_opp, ++ &mpu_freq, ++ &dsp_freq, ++ NULL ++}; ++ ++#endif /* __ARCH_ARM_MACH_OMAP2_RESOURCE_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/resource34xx_mutex.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/resource34xx_mutex.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/resource34xx_mutex.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/resource34xx_mutex.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,9 @@ ++#ifndef __ARCH_ARM_MACH_OMAP2_RESOURCE_MUTEX_H ++#define __ARCH_ARM_MACH_OMAP2_RESOURCE_MUTEX_H ++ ++extern struct mutex dvfs_mutex; ++ ++#endif ++ ++ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/rx51_camera_btn.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/rx51_camera_btn.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/rx51_camera_btn.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/rx51_camera_btn.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,247 @@ ++/** ++ * arch/arm/mach-omap2/rx51_camera_btn.c ++ * ++ * Driver for sending camera button events to input-layer ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * ++ * Written by Henrik Saari ++ * ++ * This file is subject to the terms and conditions of the GNU General ++ * Public License. See the file "COPYING" in the main directory of this ++ * archive for more details. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define CAM_IRQ_FLAGS (IRQF_TRIGGER_RISING | \ ++ IRQF_TRIGGER_FALLING) ++ ++struct rx51_cam_button { ++ struct input_dev *input; ++ ++ int focus; ++ int shutter; ++}; ++ ++static irqreturn_t rx51_cam_shutter_irq(int irq, void *_button) ++{ ++ struct rx51_cam_button *button = _button; ++ int gpio; ++ ++ gpio = irq_to_gpio(irq); ++ input_report_key(button->input, KEY_CAMERA, ++ !gpio_get_value(gpio)); ++ input_sync(button->input); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t rx51_cam_focus_irq(int irq, void *_button) ++{ ++ struct rx51_cam_button *button = _button; ++ int gpio; ++ ++ gpio = irq_to_gpio(irq); ++ input_report_key(button->input, KEY_F10, ++ !gpio_get_value(gpio)); ++ input_sync(button->input); ++ ++ return IRQ_HANDLED; ++} ++ ++/* SYSFS */ ++static ssize_t rx51_show_focus(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct rx51_cam_button *button = dev_get_drvdata(dev); ++ ++ /* report 1 when button is pressed. */ ++ return sprintf(buf, "%d\n", !gpio_get_value(button->focus)); ++} ++ ++static DEVICE_ATTR(focus_btn, S_IRUGO, rx51_show_focus, NULL); ++ ++static ssize_t rx51_show_shutter(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct rx51_cam_button *button = dev_get_drvdata(dev); ++ ++ /* report 1 when button is pressed. */ ++ return sprintf(buf, "%d\n", !gpio_get_value(button->shutter)); ++} ++ ++static DEVICE_ATTR(shutter_btn, S_IRUGO, rx51_show_shutter, NULL); ++ ++static int __init rx51_camera_btn_probe(struct platform_device *pdev) ++{ ++ struct camera_button_platform_data *pdata = pdev->dev.platform_data; ++ struct rx51_cam_button *button; ++ int err = 0; ++ ++ button = kzalloc(sizeof(*button), GFP_KERNEL); ++ if (!button) { ++ err = -ENOMEM; ++ goto err_alloc; ++ } ++ ++ if (!pdata || !pdata->shutter) { ++ dev_err(&pdev->dev, "Missing platform_data\n"); ++ err = -EINVAL; ++ goto err_pdata; ++ } ++ ++ err = gpio_request(pdata->shutter, "shutter"); ++ if (err) { ++ dev_err(&pdev->dev, "Cannot request gpio %d\n", pdata->shutter); ++ goto err_shutter; ++ } ++ gpio_direction_input(pdata->shutter); ++ ++ err = request_irq(gpio_to_irq(pdata->shutter), rx51_cam_shutter_irq, ++ CAM_IRQ_FLAGS, "cam_shuter_btn", button); ++ if (err) { ++ dev_err(&pdev->dev, "Could not request irq %d\n", ++ gpio_to_irq(pdata->shutter)); ++ goto err_irq_shutter; ++ } ++ button->shutter = pdata->shutter; ++ ++ if (pdata->focus) { ++ err = gpio_request(pdata->focus, "focus"); ++ if (err) { ++ dev_err(&pdev->dev, "Cannot request gpio %d\n", ++ pdata->focus); ++ goto err_focus; ++ } ++ ++ gpio_direction_input(pdata->focus); ++ ++ err = request_irq(gpio_to_irq(pdata->focus), ++ rx51_cam_focus_irq, CAM_IRQ_FLAGS, ++ "cam_focus_btn", button); ++ if (err) { ++ dev_err(&pdev->dev, "Could not request irq\n"); ++ goto err_irq_focus; ++ } ++ button->focus = pdata->focus; ++ } ++ ++ dev_set_drvdata(&pdev->dev, button); ++ ++ button->input = input_allocate_device(); ++ if (!button->input) { ++ dev_err(&pdev->dev, "Unable to allocate input device\n"); ++ err = -ENOMEM; ++ goto err_input_alloc; ++ } ++ ++ button->input->evbit[0] = BIT_MASK(EV_KEY); ++ button->input->keybit[BIT_WORD(KEY_CAMERA)] = ++ BIT_MASK(KEY_CAMERA); ++ button->input->name = "camera button"; ++ ++ if (pdata->focus) ++ button->input->keybit[BIT_WORD(KEY_F10)] = ++ BIT_MASK(KEY_F10); ++ ++ err = input_register_device(button->input); ++ if (err) ++ goto err_input_reg; ++ ++ if (device_create_file(&pdev->dev, &dev_attr_focus_btn)) ++ dev_err(&pdev->dev, "Could not create sysfs file\n"); ++ if (device_create_file(&pdev->dev, &dev_attr_shutter_btn)) ++ dev_err(&pdev->dev, "Could not create sysfs file\n"); ++ ++ dev_info(&pdev->dev, "Camera button driver initialized\n"); ++ ++ return 0; ++ ++err_input_reg: ++ input_free_device(button->input); ++ ++err_input_alloc: ++ dev_set_drvdata(&pdev->dev, NULL); ++ if (pdata->focus) ++ free_irq(gpio_to_irq(pdata->focus), button); ++ ++err_irq_focus: ++ if (pdata->focus) ++ gpio_free(pdata->focus); ++ ++err_focus: ++ free_irq(gpio_to_irq(button->shutter), button); ++ ++err_irq_shutter: ++ gpio_free(pdata->shutter); ++ ++err_shutter: ++err_pdata: ++ kfree(button); ++ ++err_alloc: ++ return err; ++} ++ ++static int __exit rx51_camera_btn_remove(struct platform_device *pdev) ++{ ++ struct rx51_cam_button *button = dev_get_drvdata(&pdev->dev); ++ ++ free_irq(gpio_to_irq(button->shutter), button); ++ gpio_free(button->shutter); ++ ++ if (button->focus) { ++ free_irq(gpio_to_irq(button->focus), button); ++ gpio_free(button->focus); ++ } ++ device_remove_file(&pdev->dev, &dev_attr_focus_btn); ++ device_remove_file(&pdev->dev, &dev_attr_shutter_btn); ++ ++ input_unregister_device(button->input); ++ input_free_device(button->input); ++ ++ return 0; ++} ++ ++static struct platform_driver rx51_cam_button_driver = { ++ .probe = rx51_camera_btn_probe, ++ .remove = __exit_p(rx51_camera_btn_remove), ++ .driver = { ++ .name = "camera_button", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init rx51_camera_btn_init(void) ++{ ++ return platform_driver_register(&rx51_cam_button_driver); ++} ++module_init(rx51_camera_btn_init); ++ ++static void __exit rx51_camera_btn_exit(void) ++{ ++ platform_driver_unregister(&rx51_cam_button_driver); ++} ++module_exit(rx51_camera_btn_exit); ++ ++MODULE_ALIAS("platform:camera_button"); ++MODULE_DESCRIPTION("Rx-51 Camera Button"); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Nokia Corporation"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h 2011-09-04 11:31:05.000000000 +0200 +@@ -20,34 +20,48 @@ + /* XXX Using ARE = 0x1 (no autorefresh burst) -- can this be changed? */ + static struct omap_sdrc_params mt46h32m32lf6_sdrc_params[] = { + [0] = { +- .rate = 165941176, ++ .rate = 166000000, + .actim_ctrla = 0x9a9db4c6, + .actim_ctrlb = 0x00011217, + .rfr_ctrl = 0x0004dc01, + .mr = 0x00000032, + }, + [1] = { ++ .rate = 165941176, ++ .actim_ctrla = 0x9a9db4c6, ++ .actim_ctrlb = 0x00011217, ++ .rfr_ctrl = 0x0004dc01, ++ .mr = 0x00000032, ++ }, ++ [2] = { + .rate = 133333333, + .actim_ctrla = 0x7a19b485, + .actim_ctrlb = 0x00011213, + .rfr_ctrl = 0x0003de01, + .mr = 0x00000032, + }, +- [2] = { ++ [3] = { ++ .rate = 83000000, ++ .actim_ctrla = 0x51512283, ++ .actim_ctrlb = 0x0001120c, ++ .rfr_ctrl = 0x00025501, ++ .mr = 0x00000032, ++ }, ++ [4] = { + .rate = 82970588, + .actim_ctrla = 0x51512283, + .actim_ctrlb = 0x0001120c, + .rfr_ctrl = 0x00025501, + .mr = 0x00000032, + }, +- [3] = { ++ [5] = { + .rate = 66666666, + .actim_ctrla = 0x410d2243, + .actim_ctrlb = 0x0001120a, + .rfr_ctrl = 0x0001d601, + .mr = 0x00000032, + }, +- [4] = { ++ [6] = { + .rate = 0 + }, + }; +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sdrc.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sdrc.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sdrc.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sdrc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -32,39 +32,88 @@ + #include + #include "sdrc.h" + +-static struct omap_sdrc_params *sdrc_init_params; ++static struct omap_sdrc_params *sdrc_init_params_cs0, *sdrc_init_params_cs1; + + void __iomem *omap2_sdrc_base; + void __iomem *omap2_sms_base; + ++struct omap2_sms_regs { ++ u32 sms_sysconfig; ++}; ++ ++static struct omap2_sms_regs sms_context; ++ ++/* SDRC_POWER register bits */ ++#define SDRC_POWER_EXTCLKDIS_SHIFT 3 ++#define SDRC_POWER_PWDENA_SHIFT 2 ++#define SDRC_POWER_PAGEPOLICY_SHIFT 0 ++ ++/** ++ * omap2_sms_save_context - Save SMS registers ++ * ++ * Save SMS registers that need to be restored after off mode. ++ */ ++void omap2_sms_save_context(void) ++{ ++ sms_context.sms_sysconfig = sms_read_reg(SMS_SYSCONFIG); ++} ++ ++/** ++ * omap2_sms_restore_context - Restore SMS registers ++ * ++ * Restore SMS registers that need to be Restored after off mode. ++ */ ++void omap2_sms_restore_context(void) ++{ ++ sms_write_reg(sms_context.sms_sysconfig, SMS_SYSCONFIG); ++} + + /** + * omap2_sdrc_get_params - return SDRC register values for a given clock rate + * @r: SDRC clock rate (in Hz) ++ * @sdrc_cs0: chip select 0 ram timings ** ++ * @sdrc_cs1: chip select 1 ram timings ** + * + * Return pre-calculated values for the SDRC_ACTIM_CTRLA, +- * SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL, and SDRC_MR registers, for a given +- * SDRC clock rate 'r'. These parameters control various timing +- * delays in the SDRAM controller that are expressed in terms of the +- * number of SDRC clock cycles to wait; hence the clock rate +- * dependency. Note that sdrc_init_params must be sorted rate +- * descending. Also assumes that both chip-selects use the same +- * timing parameters. Returns a struct omap_sdrc_params * upon +- * success, or NULL upon failure. ++ * SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL, and SDRC_MR registers in sdrc_cs[01] ++ * structs, for a given SDRC clock rate 'r'. ++ * These parameters control various timing delays in the SDRAM controller ++ * that are expressed in terms of the number of SDRC clock cycles to ++ * wait; hence the clock rate dependency. ++ * ++ * Supports 2 different timing parameters for both chip selects. ++ * ++ * Note 1: the sdrc_init_params_cs[01] must be sorted rate descending. ++ * Note 2: If sdrc_init_params_cs_1 is not NULL it must be of same size ++ * as sdrc_init_params_cs_0. ++ * ++ * Fills in the struct omap_sdrc_params * for each chip select. ++ * Returns 0 upon success or -1 upon failure. + */ +-struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r) ++int omap2_sdrc_get_params(unsigned long r, ++ struct omap_sdrc_params **sdrc_cs0, ++ struct omap_sdrc_params **sdrc_cs1) + { +- struct omap_sdrc_params *sp; ++ struct omap_sdrc_params *sp0, *sp1; + +- sp = sdrc_init_params; ++ if (!sdrc_init_params_cs0) ++ return -1; + +- while (sp->rate && sp->rate != r) +- sp++; ++ sp0 = sdrc_init_params_cs0; ++ sp1 = sdrc_init_params_cs1; + +- if (!sp->rate) +- return NULL; +- +- return sp; ++ while (sp0->rate && sp0->rate != r) { ++ sp0++; ++ if (sdrc_init_params_cs1) ++ sp1++; ++ } ++ ++ if (!sp0->rate) ++ return -1; ++ ++ *sdrc_cs0 = sp0; ++ *sdrc_cs1 = sp1; ++ return 0; + } + + +@@ -74,8 +123,17 @@ void __init omap2_set_globals_sdrc(struc + omap2_sms_base = omap2_globals->sms; + } + +-/* turn on smart idle modes for SDRAM scheduler and controller */ +-void __init omap2_sdrc_init(struct omap_sdrc_params *sp) ++/** ++ * omap2_sdrc_init - initialize SMS, SDRC devices on boot ++ * @sdrc_cs[01]: pointers to a null-terminated list of struct omap_sdrc_params ++ * Support for 2 chip selects timings ++ * ++ * Turn on smart idle modes for SDRAM scheduler and controller. ++ * Bootloaders should make proper configuration for SDRC since kernel ++ * is running from SDRAM. ++ */ ++void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0, ++ struct omap_sdrc_params *sdrc_cs1) + { + u32 l; + +@@ -89,5 +147,8 @@ void __init omap2_sdrc_init(struct omap_ + l |= (0x2 << 3); + sdrc_write_reg(l, SDRC_SYSCONFIG); + +- sdrc_init_params = sp; ++ sdrc_init_params_cs0 = sdrc_cs0; ++ sdrc_init_params_cs1 = sdrc_cs1; ++ ++ omap2_sms_save_context(); + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/serial.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/serial.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/serial.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/serial.c 2011-09-04 11:31:05.000000000 +0200 +@@ -138,8 +138,6 @@ static inline void omap_uart_enable_cloc + #ifdef CONFIG_PM + #ifdef CONFIG_ARCH_OMAP3 + +-static int enable_off_mode; /* to be removed by full off-mode patches */ +- + static void omap_uart_save_context(struct omap_uart_state *uart) + { + u16 lcr = 0; +@@ -245,7 +243,8 @@ static void omap_uart_allow_sleep(struct + if (!uart->clocked) + return; + +- omap_uart_smart_idle_enable(uart, 1); ++ if (serial_read_reg(uart->p, UART_LSR) & UART_LSR_TEMT) ++ omap_uart_smart_idle_enable(uart, 1); + uart->can_sleep = 1; + del_timer(&uart->timer); + } +@@ -266,7 +265,11 @@ void omap_uart_prepare_idle(int num) + continue; + + if (num == uart->num && uart->can_sleep) { +- omap_uart_disable_clocks(uart); ++ if (serial_read_reg(uart->p, UART_LSR) & ++ UART_LSR_TEMT) ++ omap_uart_disable_clocks(uart); ++ else ++ omap_uart_smart_idle_enable(uart, 0); + return; + } + } +@@ -339,8 +342,14 @@ int omap_uart_can_sleep(void) + static irqreturn_t omap_uart_interrupt(int irq, void *dev_id) + { + struct omap_uart_state *uart = dev_id; ++ u8 lsr; + +- omap_uart_block_sleep(uart); ++ lsr = serial_read_reg(uart->p, UART_LSR); ++ /* Check for receive interrupt */ ++ if (lsr & UART_LSR_DR) ++ omap_uart_block_sleep(uart); ++ if (lsr & UART_LSR_TEMT && uart->can_sleep) ++ omap_uart_smart_idle_enable(uart, 1); + + return IRQ_NONE; + } +@@ -434,6 +443,20 @@ static void omap_uart_idle_init(struct o + WARN_ON(ret); + } + ++void omap_uart_enable_irqs(int enable) ++{ ++ int ret; ++ struct omap_uart_state *uart; ++ ++ list_for_each_entry(uart, &uart_list, node) { ++ if (enable) ++ ret = request_irq(uart->p->irq, omap_uart_interrupt, ++ IRQF_SHARED, "serial idle", (void *)uart); ++ else ++ free_irq(uart->p->irq, (void *)uart); ++ } ++} ++ + static ssize_t sleep_timeout_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +@@ -453,8 +476,11 @@ static ssize_t sleep_timeout_store(struc + return -EINVAL; + } + sleep_timeout = value * HZ; +- list_for_each_entry(uart, &uart_list, node) ++ list_for_each_entry(uart, &uart_list, node) { + uart->timeout = sleep_timeout; ++ if (timer_pending(&uart->timer)) ++ mod_timer(&uart->timer, jiffies + sleep_timeout); ++ } + return n; + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sleep34xx.S kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sleep34xx.S +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sleep34xx.S 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sleep34xx.S 2011-09-04 11:31:05.000000000 +0200 +@@ -26,6 +26,7 @@ + */ + #include + #include ++#include + #include + #include + #include +@@ -35,15 +36,32 @@ + + #define PM_PREPWSTST_CORE_V OMAP34XX_PRM_REGADDR(CORE_MOD, \ + OMAP3430_PM_PREPWSTST) ++#define PM_PREPWSTST_CORE_P 0x48306AE8 + #define PM_PREPWSTST_MPU_V OMAP34XX_PRM_REGADDR(MPU_MOD, \ + OMAP3430_PM_PREPWSTST) +-#define PM_PWSTCTRL_MPU_P OMAP34XX_PRM_REGADDR(MPU_MOD, PM_PWSTCTRL) ++#define CM_IDLEST1_CORE_V IO_ADDRESS(OMAP3430_CM_BASE + 0x220) ++ ++/* ++ * This is the physical address of the register as specified ++ * by the _P. To be used while the MMU is still disabled. ++ */ ++#define PM_PWSTCTRL_MPU_P (OMAP3430_PRM_BASE + MPU_MOD + PM_PWSTCTRL) ++#define SRAM_BASE_P 0x40200000 ++#define CONTROL_STAT 0x480022F0 + #define SCRATCHPAD_MEM_OFFS 0x310 /* Move this as correct place is + * available */ +-#define SCRATCHPAD_BASE_P OMAP343X_CTRL_REGADDR(\ +- OMAP343X_CONTROL_MEM_WKUP +\ +- SCRATCHPAD_MEM_OFFS) ++#define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE + OMAP343X_CONTROL_MEM_WKUP\ ++ + SCRATCHPAD_MEM_OFFS) + #define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER) ++#define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG) ++#define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0) ++#define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0) ++#define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0) ++#define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1) ++#define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1) ++#define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1) ++#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS) ++#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL) + + .text + /* Function call to get the restore pointer for resume from OFF */ +@@ -52,7 +70,93 @@ ENTRY(get_restore_pointer) + adr r0, restore + ldmfd sp!, {pc} @ restore regs and return + ENTRY(get_restore_pointer_sz) +- .word . - get_restore_pointer_sz ++ .word . - get_restore_pointer ++ ++ .text ++/* Function call to get the restore pointer for for ES3 to resume from OFF */ ++ENTRY(get_es3_restore_pointer) ++ stmfd sp!, {lr} @ save registers on stack ++ adr r0, restore_es3 ++ ldmfd sp!, {pc} @ restore regs and return ++ENTRY(get_es3_restore_pointer_sz) ++ .word . - get_es3_restore_pointer ++ ++ENTRY(es3_sdrc_fix) ++ ldr r4, sdrc_syscfg @ get config addr ++ ldr r5, [r4] @ get value ++ tst r5, #0x100 @ is part access blocked ++ it eq ++ biceq r5, r5, #0x100 @ clear bit if set ++ str r5, [r4] @ write back change ++ ldr r4, sdrc_mr_0 @ get config addr ++ ldr r5, [r4] @ get value ++ str r5, [r4] @ write back change ++ ldr r4, sdrc_emr2_0 @ get config addr ++ ldr r5, [r4] @ get value ++ str r5, [r4] @ write back change ++ ldr r4, sdrc_manual_0 @ get config addr ++ mov r5, #0x2 @ autorefresh command ++ str r5, [r4] @ kick off refreshes ++ ldr r4, sdrc_mr_1 @ get config addr ++ ldr r5, [r4] @ get value ++ str r5, [r4] @ write back change ++ ldr r4, sdrc_emr2_1 @ get config addr ++ ldr r5, [r4] @ get value ++ str r5, [r4] @ write back change ++ ldr r4, sdrc_manual_1 @ get config addr ++ mov r5, #0x2 @ autorefresh command ++ str r5, [r4] @ kick off refreshes ++ bx lr ++sdrc_syscfg: ++ .word SDRC_SYSCONFIG_P ++sdrc_mr_0: ++ .word SDRC_MR_0_P ++sdrc_emr2_0: ++ .word SDRC_EMR2_0_P ++sdrc_manual_0: ++ .word SDRC_MANUAL_0_P ++sdrc_mr_1: ++ .word SDRC_MR_1_P ++sdrc_emr2_1: ++ .word SDRC_EMR2_1_P ++sdrc_manual_1: ++ .word SDRC_MANUAL_1_P ++ENTRY(es3_sdrc_fix_sz) ++ .word . - es3_sdrc_fix ++ ++/* Function to call rom code to save secure ram context */ ++ENTRY(save_secure_ram_context) ++ stmfd sp!, {r1-r12, lr} @ save registers on stack ++save_secure_ram_debug: ++ /* b save_secure_ram_debug */ @ enable to debug save code ++ adr r3, api_params @ r3 points to parameters ++ str r0, [r3,#0x4] @ r0 has sdram address ++ ldr r12, high_mask ++ and r3, r3, r12 ++ ldr r12, sram_phy_addr_mask ++ orr r3, r3, r12 ++ mov r0, #25 @ set service ID for PPA ++ mov r12, r0 @ copy secure service ID in r12 ++ mov r1, #0 @ set task id for ROM code in r1 ++ mov r2, #4 @ set some flags in r2, r6 ++ mov r6, #0xff ++ mcr p15, 0, r0, c7, c10, 4 @ data write barrier ++ mcr p15, 0, r0, c7, c10, 5 @ data memory barrier ++ .word 0xE1600071 @ call SMI monitor (smi #1) ++ nop ++ nop ++ nop ++ nop ++ ldmfd sp!, {r1-r12, pc} ++sram_phy_addr_mask: ++ .word SRAM_BASE_P ++high_mask: ++ .word 0xffff ++api_params: ++ .word 0x4, 0x0, 0x0, 0x1, 0x1 ++ENTRY(save_secure_ram_context_sz) ++ .word . - save_secure_ram_context ++ + /* + * Forces OMAP into idle state + * +@@ -93,11 +197,29 @@ loop: + nop + nop + nop +- bl i_dll_wait ++ bl wait_sdrc_ok + + ldmfd sp!, {r0-r12, pc} @ restore regs and return ++restore_es3: ++ /*b restore_es3*/ @ Enable to debug restore code ++ ldr r5, pm_prepwstst_core_p ++ ldr r4, [r5] ++ and r4, r4, #0x3 ++ cmp r4, #0x0 @ Check if previous power state of CORE is OFF ++ bne restore ++ adr r0, es3_sdrc_fix ++ ldr r1, sram_base ++ ldr r2, es3_sdrc_fix_sz ++ mov r2, r2, ror #2 ++copy_to_sram: ++ ldmia r0!, {r3} @ val = *src ++ stmia r1!, {r3} @ *dst = val ++ subs r2, r2, #0x1 @ num_words-- ++ bne copy_to_sram ++ ldr r1, sram_base ++ blx r1 + restore: +- /* b restore*/ @ Enable to debug restore code ++ /* b restore*/ @ Enable to debug restore code + /* Check what was the reason for mpu reset and store the reason in r9*/ + /* 1 - Only L1 and logic lost */ + /* 2 - Only L2 lost - In this case, we wont be here */ +@@ -109,9 +231,53 @@ restore: + moveq r9, #0x3 @ MPU OFF => L1 and L2 lost + movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation + bne logic_l1_restore ++ ldr r0, control_stat ++ ldr r1, [r0] ++ and r1, #0x700 ++ cmp r1, #0x300 ++ beq l2_inv_gp ++ mov r0, #40 @ set service ID for PPA ++ mov r12, r0 @ copy secure Service ID in r12 ++ mov r1, #0 @ set task id for ROM code in r1 ++ mov r2, #4 @ set some flags in r2, r6 ++ mov r6, #0xff ++ adr r3, l2_inv_api_params @ r3 points to dummy parameters ++ mcr p15, 0, r0, c7, c10, 4 @ data write barrier ++ mcr p15, 0, r0, c7, c10, 5 @ data memory barrier ++ .word 0xE1600071 @ call SMI monitor (smi #1) ++ /* Write to Aux control register to set some bits */ ++ mov r0, #42 @ set service ID for PPA ++ mov r12, r0 @ copy secure Service ID in r12 ++ mov r1, #0 @ set task id for ROM code in r1 ++ mov r2, #4 @ set some flags in r2, r6 ++ mov r6, #0xff ++ ldr r3, write_aux_control_params @ r3 points to parameters ++ ldr r4, phys_offset ++ adds r3, r3, r4 ++ ldr r4, page_offset ++ subs r3, r3, r4 ++ mcr p15, 0, r0, c7, c10, 4 @ data write barrier ++ mcr p15, 0, r0, c7, c10, 5 @ data memory barrier ++ .word 0xE1600071 @ call SMI monitor (smi #1) ++ ++ b logic_l1_restore ++l2_inv_api_params: ++ .word 0x1, 0x00 ++write_aux_control_params: ++ .word omap3_aux_ctrl ++l2_inv_gp: + /* Execute smi to invalidate L2 cache */ + mov r12, #0x1 @ set up to invalide L2 +-smi: .word 0xE1600070 @ Call SMI monitor (smieq) ++smi: .word 0xE1600070 @ Call SMI monitor (smieq) ++ /* Write to Aux control register to set some bits */ ++ ldr r1, write_aux_control_params ++ ldr r0, phys_offset ++ adds r1, r1, r0 ++ ldr r0, page_offset ++ subs r1, r1, r0 ++ ldr r0, [r1, #4] ++ mov r12, #0x3 ++ .word 0xE1600070 @ Call SMI monitor (smieq) + logic_l1_restore: + mov r1, #0 + /* Invalidate all instruction caches to PoU +@@ -288,6 +454,9 @@ usettbr0: + save_context_wfi: + /*b save_context_wfi*/ @ enable to debug save code + mov r8, r0 /* Store SDRAM address in r8 */ ++ mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary Control Register ++ ldr r5, write_aux_control_params ++ str r4, [r5, #4] + /* Check what that target sleep state is:stored in r1*/ + /* 1 - Only L1 and logic lost */ + /* 2 - Only L2 lost */ +@@ -499,33 +668,59 @@ skip_l2_inval: + nop + nop + nop +- bl i_dll_wait ++ bl wait_sdrc_ok + /* restore regs and return */ + ldmfd sp!, {r0-r12, pc} + +-i_dll_wait: +- ldr r4, clk_stabilize_delay +- +-i_dll_delay: +- subs r4, r4, #0x1 +- bne i_dll_delay +- ldr r4, sdrc_power +- ldr r5, [r4] +- bic r5, r5, #0x40 +- str r5, [r4] +- bx lr ++/* Make sure SDRC accesses are ok */ ++wait_sdrc_ok: ++ ldr r4, cm_idlest1_core ++ ldr r5, [r4] ++ and r5, r5, #0x2 ++ cmp r5, #0 ++ bne wait_sdrc_ok ++ ldr r4, sdrc_power ++ ldr r5, [r4] ++ bic r5, r5, #0x40 ++ str r5, [r4] ++wait_dll_lock: ++ /* Is dll in lock mode? */ ++ ldr r4, sdrc_dlla_ctrl ++ ldr r5, [r4] ++ tst r5, #0x4 ++ bxne lr ++ /* wait till dll locks */ ++ ldr r4, sdrc_dlla_status ++ ldr r5, [r4] ++ and r5, r5, #0x4 ++ cmp r5, #0x4 ++ bne wait_dll_lock ++ bx lr ++ ++phys_offset: ++ .word PHYS_OFFSET ++page_offset: ++ .word PAGE_OFFSET ++cm_idlest1_core: ++ .word CM_IDLEST1_CORE_V ++sdrc_dlla_status: ++ .word SDRC_DLLA_STATUS_V ++sdrc_dlla_ctrl: ++ .word SDRC_DLLA_CTRL_V + pm_prepwstst_core: + .word PM_PREPWSTST_CORE_V ++pm_prepwstst_core_p: ++ .word PM_PREPWSTST_CORE_P + pm_prepwstst_mpu: + .word PM_PREPWSTST_MPU_V + pm_pwstctrl_mpu: + .word PM_PWSTCTRL_MPU_P + scratchpad_base: + .word SCRATCHPAD_BASE_P ++sram_base: ++ .word SRAM_BASE_P + 0x8000 + sdrc_power: + .word SDRC_POWER_V +-context_mem: +- .word 0x803E3E14 + clk_stabilize_delay: + .word 0x000001FF + assoc_mask: +@@ -540,5 +735,7 @@ table_entry: + .word 0x00000C02 + cache_pred_disable_mask: + .word 0xFFFFE7FB ++control_stat: ++ .word CONTROL_STAT + ENTRY(omap34xx_cpu_suspend_sz) + .word . - omap34xx_cpu_suspend +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/smartreflex.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/smartreflex.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/smartreflex.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/smartreflex.c 2011-09-04 11:31:05.000000000 +0200 +@@ -26,18 +26,52 @@ + #include + #include + #include ++#include "resource34xx_mutex.h" + + #include + #include + #include ++#include ++#include + + #include "prm.h" + #include "smartreflex.h" + #include "prm-regbits-34xx.h" + +-/* XXX: These should be relocated where-ever the OPP implementation will be */ +-u32 current_vdd1_opp; +-u32 current_vdd2_opp; ++/* ++ * VP_TRANXDONE_TIMEOUT: maximum microseconds to wait for the VP to ++ * indicate that any pending transactions are complete. [The current ++ * 62 microsecond timeout was measured empirically by Nishanth Menon ++ * during an overnight run; its granularity is ~ 30.5 microseconds, since ++ * it was measured with the 32KiHz sync timer; see bug 133793] ++ */ ++#define VP_TRANXDONE_TIMEOUT 62 ++ ++/* ++ * VP_IDLE_TIMEOUT: maximum microseconds to wait for the VP to enter ++ * IDLE. [The current 3.472 millisecond timeout was measured ++ * empirically by Nishanth Menon during an overnight run; its ++ * granularity is ~ 30.5 microseconds, since it was measured with the ++ * 32KiHz sync timer; see bug 133793] ++ */ ++#define VP_IDLE_TIMEOUT 3472 ++ ++/* ++ * SR_DISABLE_TIMEOUT: maximum microseconds to wait for the SR to ++ * disable. [The current 3.472 millisecond timeout was measured ++ * empirically by Nishanth Menon during an overnight run; its ++ * granularity is ~ 30.5 microseconds, since it was measured with the ++ * 32KiHz sync timer; see bug 133793] ++ */ ++#define SR_DISABLE_TIMEOUT 3472 ++ ++/* ++ * SR_DISABLE_MAX_ATTEMPTS: arbitrary value intended to avoid system ++ * crashes if the SR disable process fails the first few times. The ++ * kernel will WARN() for every timeout, but will BUG() after ++ * SR_DISABLE_MAX_ATTEMPTS. ++ */ ++#define SR_DISABLE_MAX_ATTEMPTS 4 + + struct omap_sr { + int srid; +@@ -85,8 +119,8 @@ static int sr_clk_enable(struct omap_sr + } + + /* set fclk- active , iclk- idle */ +- sr_modify_reg(sr, ERRCONFIG, SR_CLKACTIVITY_MASK, +- SR_CLKACTIVITY_IOFF_FON); ++ sr_modify_reg(sr, ERRCONFIG, SR_CLKACTIVITY_MASK | ++ ERRCONFIG_INTERRUPT_STATUS_MASK, SR_CLKACTIVITY_IOFF_FON); + + return 0; + } +@@ -94,8 +128,8 @@ static int sr_clk_enable(struct omap_sr + static void sr_clk_disable(struct omap_sr *sr) + { + /* set fclk, iclk- idle */ +- sr_modify_reg(sr, ERRCONFIG, SR_CLKACTIVITY_MASK, +- SR_CLKACTIVITY_IOFF_FOFF); ++ sr_modify_reg(sr, ERRCONFIG, SR_CLKACTIVITY_MASK | ++ ERRCONFIG_INTERRUPT_STATUS_MASK, SR_CLKACTIVITY_IOFF_FOFF); + + clk_disable(sr->clk); + sr->is_sr_reset = 1; +@@ -148,14 +182,14 @@ static u32 cal_test_nvalue(u32 sennval, + + static void sr_set_clk_length(struct omap_sr *sr) + { +- struct clk *osc_sys_ck; +- u32 sys_clk = 0; ++ struct clk *sys_ck; ++ u32 sys_clk_speed; + +- osc_sys_ck = clk_get(NULL, "osc_sys_ck"); +- sys_clk = clk_get_rate(osc_sys_ck); +- clk_put(osc_sys_ck); ++ sys_ck = clk_get(NULL, "sys_ck"); ++ sys_clk_speed = clk_get_rate(sys_ck); ++ clk_put(sys_ck); + +- switch (sys_clk) { ++ switch (sys_clk_speed) { + case 12000000: + sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK; + break; +@@ -172,7 +206,7 @@ static void sr_set_clk_length(struct oma + sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK; + break; + default : +- printk(KERN_ERR "Invalid sysclk value: %d\n", sys_clk); ++ printk(KERN_ERR "Invalid sysclk value: %d\n", sys_clk_speed); + break; + } + } +@@ -183,7 +217,6 @@ static void sr_set_efuse_nvalues(struct + sr->senn_mod = (omap_ctrl_readl(OMAP343X_CONTROL_FUSE_SR) & + OMAP343X_SR1_SENNENABLE_MASK) >> + OMAP343X_SR1_SENNENABLE_SHIFT; +- + sr->senp_mod = (omap_ctrl_readl(OMAP343X_CONTROL_FUSE_SR) & + OMAP343X_SR1_SENPENABLE_MASK) >> + OMAP343X_SR1_SENPENABLE_SHIFT; +@@ -251,11 +284,19 @@ static void sr_set_nvalues(struct omap_s + static void sr_configure_vp(int srid) + { + u32 vpconfig; ++ u8 curr_opp_no; + + if (srid == SR1) { +- vpconfig = PRM_VP1_CONFIG_ERROROFFSET | PRM_VP1_CONFIG_ERRORGAIN +- | PRM_VP1_CONFIG_INITVOLTAGE +- | PRM_VP1_CONFIG_TIMEOUTEN; ++ curr_opp_no = resource_get_level("vdd1_opp"); ++ ++ vpconfig = PRM_VP1_CONFIG_ERROROFFSET | ++ PRM_VP1_CONFIG_TIMEOUTEN | ++ mpu_opps[curr_opp_no].vsel << ++ OMAP3430_INITVOLTAGE_SHIFT; ++ ++ vpconfig |= (curr_opp_no > SR_MAX_LOW_OPP) ? ++ PRM_VP1_CONFIG_ERRORGAIN_HIGHOPP : ++ PRM_VP1_CONFIG_ERRORGAIN_LOWOPP; + + prm_write_mod_reg(vpconfig, OMAP3430_GR_MOD, + OMAP3_PRM_VP1_CONFIG_OFFSET); +@@ -277,15 +318,30 @@ static void sr_configure_vp(int srid) + + /* Trigger initVDD value copy to voltage processor */ + prm_set_mod_reg_bits(PRM_VP1_CONFIG_INITVDD, OMAP3430_GR_MOD, +- OMAP3_PRM_VP1_CONFIG_OFFSET); ++ OMAP3_PRM_VP1_CONFIG_OFFSET); ++ + /* Clear initVDD copy trigger bit */ + prm_clear_mod_reg_bits(PRM_VP1_CONFIG_INITVDD, OMAP3430_GR_MOD, +- OMAP3_PRM_VP1_CONFIG_OFFSET); ++ OMAP3_PRM_VP1_CONFIG_OFFSET); ++ ++ /* Force update of voltage */ ++ prm_set_mod_reg_bits(OMAP3430_FORCEUPDATE, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP1_CONFIG_OFFSET); ++ /* Clear force bit */ ++ prm_clear_mod_reg_bits(OMAP3430_FORCEUPDATE, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP1_CONFIG_OFFSET); + + } else if (srid == SR2) { +- vpconfig = PRM_VP2_CONFIG_ERROROFFSET | PRM_VP2_CONFIG_ERRORGAIN +- | PRM_VP2_CONFIG_INITVOLTAGE +- | PRM_VP2_CONFIG_TIMEOUTEN; ++ curr_opp_no = resource_get_level("vdd2_opp"); ++ ++ vpconfig = PRM_VP2_CONFIG_ERROROFFSET | ++ PRM_VP2_CONFIG_TIMEOUTEN | ++ l3_opps[curr_opp_no].vsel << ++ OMAP3430_INITVOLTAGE_SHIFT; ++ ++ vpconfig |= (curr_opp_no > SR_MAX_LOW_OPP) ? ++ PRM_VP2_CONFIG_ERRORGAIN_HIGHOPP : ++ PRM_VP2_CONFIG_ERRORGAIN_LOWOPP; + + prm_write_mod_reg(vpconfig, OMAP3430_GR_MOD, + OMAP3_PRM_VP2_CONFIG_OFFSET); +@@ -306,11 +362,19 @@ static void sr_configure_vp(int srid) + OMAP3_PRM_VP2_VLIMITTO_OFFSET); + + /* Trigger initVDD value copy to voltage processor */ +- prm_set_mod_reg_bits(PRM_VP2_CONFIG_INITVDD, OMAP3430_GR_MOD, +- OMAP3_PRM_VP2_CONFIG_OFFSET); +- /* Reset initVDD copy trigger bit */ +- prm_clear_mod_reg_bits(PRM_VP2_CONFIG_INITVDD, OMAP3430_GR_MOD, +- OMAP3_PRM_VP2_CONFIG_OFFSET); ++ prm_set_mod_reg_bits(PRM_VP1_CONFIG_INITVDD, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); ++ ++ /* Clear initVDD copy trigger bit */ ++ prm_clear_mod_reg_bits(PRM_VP1_CONFIG_INITVDD, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); ++ ++ /* Force update of voltage */ ++ prm_set_mod_reg_bits(OMAP3430_FORCEUPDATE, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); ++ /* Clear force bit */ ++ prm_clear_mod_reg_bits(OMAP3430_FORCEUPDATE, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); + + } + } +@@ -339,8 +403,8 @@ static void sr_configure(struct omap_sr + SR1_AVGWEIGHT_SENNAVGWEIGHT); + + sr_modify_reg(sr, ERRCONFIG, (SR_ERRWEIGHT_MASK | +- SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK), +- (SR1_ERRWEIGHT | SR1_ERRMAXLIMIT | SR1_ERRMINLIMIT)); ++ SR_ERRMAXLIMIT_MASK) | ERRCONFIG_INTERRUPT_STATUS_MASK, ++ (SR1_ERRWEIGHT | SR1_ERRMAXLIMIT)); + + } else if (sr->srid == SR2) { + sr_config = SR2_SRCONFIG_ACCUMDATA | +@@ -355,16 +419,96 @@ static void sr_configure(struct omap_sr + sr_write_reg(sr, AVGWEIGHT, SR2_AVGWEIGHT_SENPAVGWEIGHT | + SR2_AVGWEIGHT_SENNAVGWEIGHT); + sr_modify_reg(sr, ERRCONFIG, (SR_ERRWEIGHT_MASK | +- SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK), +- (SR2_ERRWEIGHT | SR2_ERRMAXLIMIT | SR2_ERRMINLIMIT)); ++ SR_ERRMAXLIMIT_MASK) | ERRCONFIG_INTERRUPT_STATUS_MASK, ++ (SR2_ERRWEIGHT | SR2_ERRMAXLIMIT)); + + } + sr->is_sr_reset = 0; + } + ++static int sr_reset_voltage(int srid) ++{ ++ u32 target_opp_no, vsel = 0; ++ u32 reg_addr = 0; ++ u32 loop_cnt = 0, retries_cnt = 0; ++ u32 vc_bypass_value; ++ u32 t2_smps_steps = 0; ++ u32 t2_smps_delay = 0; ++ u32 prm_vp1_voltage, prm_vp2_voltage, vp_config_offs; ++ u32 errorgain; ++ ++ if (srid == SR1) { ++ target_opp_no = sr1.req_opp_no; ++ vsel = mpu_opps[target_opp_no].vsel; ++ reg_addr = R_VDD1_SR_CONTROL; ++ prm_vp1_voltage = prm_read_mod_reg(OMAP3430_GR_MOD, ++ OMAP3_PRM_VP1_VOLTAGE_OFFSET); ++ t2_smps_steps = abs(vsel - prm_vp1_voltage); ++ errorgain = (target_opp_no > SR_MAX_LOW_OPP) ? ++ PRM_VP1_CONFIG_ERRORGAIN_HIGHOPP : ++ PRM_VP1_CONFIG_ERRORGAIN_LOWOPP; ++ vp_config_offs = OMAP3_PRM_VP1_CONFIG_OFFSET; ++ } else if (srid == SR2) { ++ target_opp_no = sr2.req_opp_no; ++ vsel = l3_opps[target_opp_no].vsel; ++ reg_addr = R_VDD2_SR_CONTROL; ++ prm_vp2_voltage = prm_read_mod_reg(OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_VOLTAGE_OFFSET); ++ t2_smps_steps = abs(vsel - prm_vp2_voltage); ++ errorgain = (target_opp_no > SR_MAX_LOW_OPP) ? ++ PRM_VP2_CONFIG_ERRORGAIN_HIGHOPP : ++ PRM_VP2_CONFIG_ERRORGAIN_LOWOPP; ++ vp_config_offs = OMAP3_PRM_VP2_CONFIG_OFFSET; ++ } else { ++ WARN(1, "Bad SR ID %d", srid); ++ return SR_FAIL; ++ } ++ ++ prm_rmw_mod_reg_bits(OMAP3430_ERRORGAIN_MASK, errorgain, ++ OMAP3430_GR_MOD, vp_config_offs); ++ ++ vc_bypass_value = (vsel << OMAP3430_DATA_SHIFT) | ++ (reg_addr << OMAP3430_REGADDR_SHIFT) | ++ (R_SRI2C_SLAVE_ADDR << OMAP3430_SLAVEADDR_SHIFT); ++ ++ prm_write_mod_reg(vc_bypass_value, OMAP3430_GR_MOD, ++ OMAP3_PRM_VC_BYPASS_VAL_OFFSET); ++ ++ vc_bypass_value = prm_set_mod_reg_bits(OMAP3430_VALID, OMAP3430_GR_MOD, ++ OMAP3_PRM_VC_BYPASS_VAL_OFFSET); ++ ++ while ((vc_bypass_value & OMAP3430_VALID) != 0x0) { ++ loop_cnt++; ++ if (retries_cnt > 10) { ++ printk(KERN_INFO "Loop count exceeded in check SR I2C" ++ "write\n"); ++ return SR_FAIL; ++ } ++ if (loop_cnt > 50) { ++ retries_cnt++; ++ loop_cnt = 0; ++ udelay(10); ++ } ++ vc_bypass_value = prm_read_mod_reg(OMAP3430_GR_MOD, ++ OMAP3_PRM_VC_BYPASS_VAL_OFFSET); ++ } ++ ++ /* ++ * T2 SMPS slew rate (min) 4mV/uS, step size 12.5mV, ++ * 2us added as buffer. ++ */ ++ t2_smps_delay = ((t2_smps_steps * 125) / 40) + 2; ++ udelay(t2_smps_delay); ++ ++ return SR_PASS; ++} ++ + static int sr_enable(struct omap_sr *sr, u32 target_opp_no) + { +- u32 nvalue_reciprocal; ++ u32 nvalue_reciprocal, v; ++ u8 errminlimit; ++ ++ BUG_ON(!(mpu_opps && l3_opps)); + + sr->req_opp_no = target_opp_no; + +@@ -415,40 +559,190 @@ static int sr_enable(struct omap_sr *sr, + sr_write_reg(sr, NVALUERECIPROCAL, nvalue_reciprocal); + + /* Enable the interrupt */ +- sr_modify_reg(sr, ERRCONFIG, +- (ERRCONFIG_VPBOUNDINTEN | ERRCONFIG_VPBOUNDINTST), ++ sr_modify_reg(sr, ERRCONFIG, (ERRCONFIG_VPBOUNDINTEN | ++ ERRCONFIG_INTERRUPT_STATUS_MASK), + (ERRCONFIG_VPBOUNDINTEN | ERRCONFIG_VPBOUNDINTST)); ++ + if (sr->srid == SR1) { ++ errminlimit = (target_opp_no > SR_MAX_LOW_OPP) ? ++ SR1_ERRMINLIMIT_HIGHOPP : SR1_ERRMINLIMIT_LOWOPP; ++ ++ /* set/latch init voltage */ ++ v = prm_read_mod_reg(OMAP3430_GR_MOD, ++ OMAP3_PRM_VP1_CONFIG_OFFSET); ++ v &= ~(OMAP3430_INITVOLTAGE_MASK | OMAP3430_INITVDD); ++ v |= mpu_opps[target_opp_no].vsel << ++ OMAP3430_INITVOLTAGE_SHIFT; ++ prm_write_mod_reg(v, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP1_CONFIG_OFFSET); ++ /* write1 to latch */ ++ prm_set_mod_reg_bits(OMAP3430_INITVDD, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP1_CONFIG_OFFSET); ++ /* write2 clear */ ++ prm_clear_mod_reg_bits(OMAP3430_INITVDD, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP1_CONFIG_OFFSET); + /* Enable VP1 */ + prm_set_mod_reg_bits(PRM_VP1_CONFIG_VPENABLE, OMAP3430_GR_MOD, +- OMAP3_PRM_VP1_CONFIG_OFFSET); ++ OMAP3_PRM_VP1_CONFIG_OFFSET); + } else if (sr->srid == SR2) { ++ errminlimit = (target_opp_no > SR_MAX_LOW_OPP) ? ++ SR2_ERRMINLIMIT_HIGHOPP : SR2_ERRMINLIMIT_LOWOPP; ++ ++ /* set/latch init voltage */ ++ v = prm_read_mod_reg(OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); ++ v &= ~(OMAP3430_INITVOLTAGE_MASK | OMAP3430_INITVDD); ++ v |= l3_opps[target_opp_no].vsel << ++ OMAP3430_INITVOLTAGE_SHIFT; ++ prm_write_mod_reg(v, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); ++ /* write1 to latch */ ++ prm_set_mod_reg_bits(OMAP3430_INITVDD, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); ++ /* write2 clear */ ++ prm_clear_mod_reg_bits(OMAP3430_INITVDD, OMAP3430_GR_MOD, ++ OMAP3_PRM_VP2_CONFIG_OFFSET); + /* Enable VP2 */ + prm_set_mod_reg_bits(PRM_VP2_CONFIG_VPENABLE, OMAP3430_GR_MOD, +- OMAP3_PRM_VP2_CONFIG_OFFSET); ++ OMAP3_PRM_VP2_CONFIG_OFFSET); ++ } else { ++ WARN(1, "Bad SR ID %d", sr->srid); ++ return SR_FAIL; + } + ++ sr_modify_reg(sr, ERRCONFIG, SR_ERRMINLIMIT_MASK | ++ ERRCONFIG_INTERRUPT_STATUS_MASK, errminlimit); ++ + /* SRCONFIG - enable SR */ + sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE); + return SR_TRUE; + } + +-static void sr_disable(struct omap_sr *sr) ++static void vp_disable(struct omap_sr *sr) + { +- sr->is_sr_reset = 1; +- +- /* SRCONFIG - disable SR */ +- sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, ~SRCONFIG_SRENABLE); ++ u32 vp_config_offs, vp_status_offs; ++ u32 vp_tranxdone_st; ++ int c = 0, v; + + if (sr->srid == SR1) { +- /* Disable VP1 */ +- prm_clear_mod_reg_bits(PRM_VP1_CONFIG_VPENABLE, OMAP3430_GR_MOD, +- OMAP3_PRM_VP1_CONFIG_OFFSET); ++ vp_config_offs = OMAP3_PRM_VP1_CONFIG_OFFSET; ++ vp_status_offs = OMAP3_PRM_VP1_STATUS_OFFSET; ++ vp_tranxdone_st = OMAP3430_VP1_TRANXDONE_ST; + } else if (sr->srid == SR2) { +- /* Disable VP2 */ +- prm_clear_mod_reg_bits(PRM_VP2_CONFIG_VPENABLE, OMAP3430_GR_MOD, +- OMAP3_PRM_VP2_CONFIG_OFFSET); ++ vp_config_offs = OMAP3_PRM_VP2_CONFIG_OFFSET; ++ vp_status_offs = OMAP3_PRM_VP2_STATUS_OFFSET; ++ vp_tranxdone_st = OMAP3430_VP2_TRANXDONE_ST; ++ } else { ++ WARN(1, "Bad SR ID"); ++ return; + } ++ ++ /* ++ * Clear all pending TransactionDone int/st here ++ * XXX Do we need to make sure this INTEN bit is masked so the ++ * PRCM ISR isn't called? ++ */ ++ do { ++ prm_write_mod_reg(vp_tranxdone_st, OCP_MOD, ++ OMAP2_PRM_IRQSTATUS_MPU_OFFSET); ++ v = prm_read_mod_reg(OCP_MOD, OMAP2_PRM_IRQSTATUS_MPU_OFFSET); ++ v &= vp_tranxdone_st; ++ /* ++ * XXX This udelay(1) will wait for longer than 1 ++ * microsecond when switching to a lower OPP, since ++ * loops_per_jiffy is not yet updated at this point ++ */ ++ if (v) ++ udelay(1); ++ c++; ++ } while (v && c < VP_TRANXDONE_TIMEOUT); ++ ++ /* XXX Need clarity from TI on what to do if the timeout is reached */ ++ WARN(c == VP_TRANXDONE_TIMEOUT, "VP: TRANXDONE timeout exceeded"); ++ ++ /* Disable VP */ ++ prm_clear_mod_reg_bits(OMAP3430_VPENABLE, OMAP3430_GR_MOD, ++ vp_config_offs); ++ ++ /* Wait for VP to be in IDLE - typical latency < 1 microsecond */ ++ c = 0; ++ while (c < VP_IDLE_TIMEOUT && ++ !(prm_read_mod_reg(OMAP3430_GR_MOD, vp_status_offs) & ++ OMAP3430_VPINIDLE)) { ++ /* ++ * XXX This udelay(1) will wait for longer than 1 ++ * microsecond when switching to a lower OPP, since ++ * loops_per_jiffy is not yet updated at this point ++ */ ++ udelay(1); ++ c++; ++ } ++ ++ /* XXX Need clarity from TI on what to do if the timeout is reached */ ++ WARN(c == VP_IDLE_TIMEOUT, "VP: IDLE timeout exceeded"); ++} ++ ++static void sr_disable(struct omap_sr *sr) ++{ ++ u32 srconfig; ++ int c; ++ u8 retries = 0; ++ ++ /* Check to see if SR is already disabled. If so, skip */ ++ srconfig = sr_read_reg(sr, SRCONFIG); ++ if (!(srconfig & SRCONFIG_SRENABLE)) { ++ /* XXX In callers, add disable VP after sr_clk_disable() etc */ ++ sr->is_sr_reset = 1; ++ return; ++ } ++ ++ /* Enable MCUDisableAcknowledge interrupt */ ++ sr_modify_reg(sr, ERRCONFIG, ERRCONFIG_MCUDISACKINTEN | ++ ERRCONFIG_INTERRUPT_STATUS_MASK, ++ ERRCONFIG_MCUDISACKINTEN); ++ ++ /* Clear SREnable */ ++ srconfig &= ~SRCONFIG_SRENABLE; ++ sr_write_reg(sr, SRCONFIG, srconfig); ++ ++ /* Disable VPBOUND interrupt enable and status */ ++ sr_modify_reg(sr, ERRCONFIG, ERRCONFIG_VPBOUNDINTEN | ++ ERRCONFIG_INTERRUPT_STATUS_MASK, ++ ERRCONFIG_VPBOUNDINTST); ++ ++ do { ++ c = 0; ++ /* Wait for SR to be disabled - typical time < 1 microsecond */ ++ while (c < SR_DISABLE_TIMEOUT && ++ !(sr_read_reg(sr, ERRCONFIG) & ERRCONFIG_MCUDISACKINTST)) { ++ /* ++ * XXX This udelay(1) will wait for longer than 1 ++ * microsecond when switching to a lower OPP, since ++ * loops_per_jiffy is not yet updated at this point ++ */ ++ udelay(1); ++ c++; ++ } ++ ++ /* Could be due to a board-level I2C4 problem */ ++ WARN(c == SR_DISABLE_TIMEOUT, "SR disable timed out - " ++ "should never happen"); ++ ++ } while ((c == SR_DISABLE_TIMEOUT) && ++ (++retries < SR_DISABLE_MAX_ATTEMPTS)); ++ ++ WARN(retries == SR_DISABLE_MAX_ATTEMPTS, "SR voltage change failed " ++ "despite %d retries - should never happen - system will likely " ++ "crash soon", SR_DISABLE_MAX_ATTEMPTS); ++ ++ /* Disable MCUDisableAck interrupt and clear pending */ ++ sr_modify_reg(sr, ERRCONFIG, (ERRCONFIG_MCUDISACKINTEN | ++ ERRCONFIG_INTERRUPT_STATUS_MASK), ++ ERRCONFIG_MCUDISACKINTST); ++ ++ /* Disable SR func clk - done by sr_clk_disable() */ ++ ++ sr->is_sr_reset = 1; + } + + +@@ -460,19 +754,16 @@ void sr_start_vddautocomap(int srid, u32 + sr = &sr1; + else if (srid == SR2) + sr = &sr2; ++ else ++ return; + + if (sr->is_sr_reset == 1) { + sr_clk_enable(sr); + sr_configure(sr); + } + +- if (sr->is_autocomp_active == 1) +- printk(KERN_WARNING "SR%d: VDD autocomp is already active\n", +- srid); +- + sr->is_autocomp_active = 1; + if (!sr_enable(sr, target_opp_no)) { +- printk(KERN_WARNING "SR%d: VDD autocomp not activated\n", srid); + sr->is_autocomp_active = 0; + if (sr->is_sr_reset == 1) + sr_clk_disable(sr); +@@ -488,18 +779,19 @@ int sr_stop_vddautocomap(int srid) + sr = &sr1; + else if (srid == SR2) + sr = &sr2; ++ else ++ return -EINVAL; + + if (sr->is_autocomp_active == 1) { ++ vp_disable(sr); + sr_disable(sr); + sr_clk_disable(sr); + sr->is_autocomp_active = 0; ++ /* Reset the volatage for current OPP */ ++ sr_reset_voltage(srid); + return SR_TRUE; +- } else { +- printk(KERN_WARNING "SR%d: VDD autocomp is not active\n", +- srid); ++ } else + return SR_FALSE; +- } +- + } + EXPORT_SYMBOL(sr_stop_vddautocomap); + +@@ -512,16 +804,15 @@ void enable_smartreflex(int srid) + sr = &sr1; + else if (srid == SR2) + sr = &sr2; ++ else ++ return; + + if (sr->is_autocomp_active == 1) { + if (sr->is_sr_reset == 1) { + /* Enable SR clks */ + sr_clk_enable(sr); + +- if (srid == SR1) +- target_opp_no = get_opp_no(current_vdd1_opp); +- else if (srid == SR2) +- target_opp_no = get_opp_no(current_vdd2_opp); ++ target_opp_no = sr->req_opp_no; + + sr_configure(sr); + +@@ -539,64 +830,80 @@ void disable_smartreflex(int srid) + sr = &sr1; + else if (srid == SR2) + sr = &sr2; ++ else ++ return; + + if (sr->is_autocomp_active == 1) { + if (sr->is_sr_reset == 0) { + + sr->is_sr_reset = 1; +- /* SRCONFIG - disable SR */ +- sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, +- ~SRCONFIG_SRENABLE); +- ++ vp_disable(sr); ++ sr_disable(sr); + /* Disable SR clk */ + sr_clk_disable(sr); +- if (sr->srid == SR1) { +- /* Disable VP1 */ +- prm_clear_mod_reg_bits(PRM_VP1_CONFIG_VPENABLE, +- OMAP3430_GR_MOD, +- OMAP3_PRM_VP1_CONFIG_OFFSET); +- } else if (sr->srid == SR2) { +- /* Disable VP2 */ +- prm_clear_mod_reg_bits(PRM_VP2_CONFIG_VPENABLE, +- OMAP3430_GR_MOD, +- OMAP3_PRM_VP2_CONFIG_OFFSET); +- } ++ /* Reset the volatage for current OPP */ ++ sr_reset_voltage(srid); + } + } + } + + /* Voltage Scaling using SR VCBYPASS */ +-int sr_voltagescale_vcbypass(u32 target_opp, u8 vsel) ++int sr_voltagescale_vcbypass(u32 target_opp, u32 current_opp, ++ u8 target_vsel, u8 current_vsel) + { +- int sr_status = 0; +- u32 vdd, target_opp_no; ++ u32 vdd, target_opp_no, current_opp_no; + u32 vc_bypass_value; + u32 reg_addr = 0; + u32 loop_cnt = 0, retries_cnt = 0; ++ u32 t2_smps_steps = 0; ++ u32 t2_smps_delay = 0; ++ u32 vc_cmd_val_offs, vp_config_offs; ++ u32 errorgain; ++ struct omap_sr *sr; + + vdd = get_vdd(target_opp); + target_opp_no = get_opp_no(target_opp); ++ current_opp_no = get_opp_no(current_opp); + + if (vdd == PRCM_VDD1) { +- sr_status = sr_stop_vddautocomap(SR1); ++ t2_smps_steps = abs(target_vsel - current_vsel); ++ errorgain = (target_opp_no > SR_MAX_LOW_OPP) ? ++ PRM_VP1_CONFIG_ERRORGAIN_HIGHOPP : ++ PRM_VP1_CONFIG_ERRORGAIN_LOWOPP; + +- prm_rmw_mod_reg_bits(OMAP3430_VC_CMD_ON_MASK, +- (vsel << OMAP3430_VC_CMD_ON_SHIFT), +- OMAP3430_GR_MOD, +- OMAP3_PRM_VC_CMD_VAL_0_OFFSET); ++ vc_cmd_val_offs = OMAP3_PRM_VC_CMD_VAL_0_OFFSET; ++ vp_config_offs = OMAP3_PRM_VP1_CONFIG_OFFSET; + reg_addr = R_VDD1_SR_CONTROL; +- ++ sr = &sr1; + } else if (vdd == PRCM_VDD2) { +- sr_status = sr_stop_vddautocomap(SR2); ++ t2_smps_steps = abs(target_vsel - current_vsel); ++ errorgain = (target_opp_no > SR_MAX_LOW_OPP) ? ++ PRM_VP2_CONFIG_ERRORGAIN_HIGHOPP : ++ PRM_VP2_CONFIG_ERRORGAIN_LOWOPP; + +- prm_rmw_mod_reg_bits(OMAP3430_VC_CMD_ON_MASK, +- (vsel << OMAP3430_VC_CMD_ON_SHIFT), +- OMAP3430_GR_MOD, +- OMAP3_PRM_VC_CMD_VAL_1_OFFSET); ++ vc_cmd_val_offs = OMAP3_PRM_VC_CMD_VAL_1_OFFSET; ++ vp_config_offs = OMAP3_PRM_VP2_CONFIG_OFFSET; + reg_addr = R_VDD2_SR_CONTROL; ++ sr = &sr2; ++ } else { ++ WARN(1, "SR: invalid VDD in vcbypass scale"); ++ return SR_FAIL; + } + +- vc_bypass_value = (vsel << OMAP3430_DATA_SHIFT) | ++ if (sr->is_autocomp_active) { ++ WARN(1, "SR: Must not transmit VCBYPASS command while SR is " ++ "active"); ++ return SR_FAIL; ++ } ++ ++ prm_rmw_mod_reg_bits(OMAP3430_ERRORGAIN_MASK, errorgain, ++ OMAP3430_GR_MOD, vp_config_offs); ++ ++ prm_rmw_mod_reg_bits(OMAP3430_VC_CMD_ON_MASK, ++ (target_vsel << OMAP3430_VC_CMD_ON_SHIFT), ++ OMAP3430_GR_MOD, vc_cmd_val_offs); ++ ++ vc_bypass_value = (target_vsel << OMAP3430_DATA_SHIFT) | + (reg_addr << OMAP3430_REGADDR_SHIFT) | + (R_SRI2C_SLAVE_ADDR << OMAP3430_SLAVEADDR_SHIFT); + +@@ -622,14 +929,13 @@ int sr_voltagescale_vcbypass(u32 target_ + OMAP3_PRM_VC_BYPASS_VAL_OFFSET); + } + +- udelay(T2_SMPS_UPDATE_DELAY); ++ /* ++ * T2 SMPS slew rate (min) 4mV/uS, step size 12.5mV, ++ * 2us added as buffer. ++ */ ++ t2_smps_delay = ((t2_smps_steps * 125) / 40) + 2; ++ udelay(t2_smps_delay); + +- if (sr_status) { +- if (vdd == PRCM_VDD1) +- sr_start_vddautocomap(SR1, target_opp_no); +- else if (vdd == PRCM_VDD2) +- sr_start_vddautocomap(SR2, target_opp_no); +- } + + return SR_PASS; + } +@@ -645,7 +951,6 @@ static ssize_t omap_sr_vdd1_autocomp_sto + struct kobj_attribute *attr, + const char *buf, size_t n) + { +- u32 current_vdd1opp_no; + unsigned short value; + + if (sscanf(buf, "%hu", &value) != 1 || (value > 1)) { +@@ -653,12 +958,20 @@ static ssize_t omap_sr_vdd1_autocomp_sto + return -EINVAL; + } + +- current_vdd1opp_no = get_opp_no(current_vdd1_opp); ++ mutex_lock(&dvfs_mutex); + +- if (value == 0) ++ if (value == 0) { + sr_stop_vddautocomap(SR1); +- else ++ } else { ++ u32 current_vdd1opp_no = resource_get_level("vdd1_opp"); ++ if (IS_ERR_VALUE(current_vdd1opp_no)) { ++ mutex_unlock(&dvfs_mutex); ++ return -ENODEV; ++ } + sr_start_vddautocomap(SR1, current_vdd1opp_no); ++ } ++ ++ mutex_unlock(&dvfs_mutex); + + return n; + } +@@ -691,13 +1004,17 @@ static ssize_t omap_sr_vdd2_autocomp_sto + return -EINVAL; + } + +- current_vdd2opp_no = get_opp_no(current_vdd2_opp); ++ mutex_lock(&dvfs_mutex); ++ ++ current_vdd2opp_no = resource_get_level("vdd2_opp"); + + if (value == 0) + sr_stop_vddautocomap(SR2); + else + sr_start_vddautocomap(SR2, current_vdd2opp_no); + ++ mutex_unlock(&dvfs_mutex); ++ + return n; + } + +@@ -710,20 +1027,34 @@ static struct kobj_attribute sr_vdd2_aut + .store = omap_sr_vdd2_autocomp_store, + }; + ++static ssize_t omap_sr_opp1_efuse_show(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ char *buf) ++{ ++ return sprintf(buf, "%08x\n", sr1.opp1_nvalue); ++} + ++static struct kobj_attribute sr_opp1_efuse = { ++ .attr = { ++ .name = "OPP1Efuse", ++ .mode = 0444, ++ }, ++ .show = omap_sr_opp1_efuse_show, ++}; + + static int __init omap3_sr_init(void) + { + int ret = 0; + u8 RdReg; + +- if (omap_rev() > OMAP3430_REV_ES1_0) { +- current_vdd1_opp = PRCM_VDD1_OPP3; +- current_vdd2_opp = PRCM_VDD2_OPP3; +- } else { +- current_vdd1_opp = PRCM_VDD1_OPP1; +- current_vdd2_opp = PRCM_VDD1_OPP1; +- } ++ /* Enable SR on T2 */ ++ ret = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &RdReg, ++ R_DCDC_GLOBAL_CFG); ++ ++ RdReg |= DCDC_GLOBAL_CFG_ENABLE_SRFLX; ++ ret |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, RdReg, ++ R_DCDC_GLOBAL_CFG); ++ + if (cpu_is_omap34xx()) { + sr1.clk = clk_get(NULL, "sr1_fck"); + sr2.clk = clk_get(NULL, "sr2_fck"); +@@ -738,14 +1069,6 @@ static int __init omap3_sr_init(void) + sr_set_nvalues(&sr2); + sr_configure_vp(SR2); + +- /* Enable SR on T2 */ +- ret = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &RdReg, +- R_DCDC_GLOBAL_CFG); +- +- RdReg |= DCDC_GLOBAL_CFG_ENABLE_SRFLX; +- ret |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, RdReg, +- R_DCDC_GLOBAL_CFG); +- + printk(KERN_INFO "SmartReflex driver initialized\n"); + + ret = sysfs_create_file(power_kobj, &sr_vdd1_autocomp.attr); +@@ -756,6 +1079,10 @@ static int __init omap3_sr_init(void) + if (ret) + printk(KERN_ERR "sysfs_create_file failed: %d\n", ret); + ++ ret = sysfs_create_file(power_kobj, &sr_opp1_efuse.attr); ++ if (ret) ++ printk(KERN_ERR "sysfs_create_file failed for OPP data: %d\n", ret); ++ + return 0; + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/smartreflex.h kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/smartreflex.h +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/smartreflex.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/smartreflex.h 2011-09-04 11:31:05.000000000 +0200 +@@ -48,7 +48,8 @@ + + /* PRM_VP1_CONFIG */ + #define PRM_VP1_CONFIG_ERROROFFSET (0x00 << 24) +-#define PRM_VP1_CONFIG_ERRORGAIN (0x20 << 16) ++#define PRM_VP1_CONFIG_ERRORGAIN_LOWOPP (0x0C << 16) /* OPPs 1,2 */ ++#define PRM_VP1_CONFIG_ERRORGAIN_HIGHOPP (0x18 << 16) /* OPPs 3,4,5 */ + + #define PRM_VP1_CONFIG_INITVOLTAGE (0x30 << 8) /* 1.2 volt */ + #define PRM_VP1_CONFIG_TIMEOUTEN (0x1 << 3) +@@ -66,12 +67,13 @@ + + /* PRM_VP1_VLIMITTO */ + #define PRM_VP1_VLIMITTO_VDDMAX (0x3C << 24) +-#define PRM_VP1_VLIMITTO_VDDMIN (0x0 << 16) +-#define PRM_VP1_VLIMITTO_TIMEOUT (0xFFFF << 0) ++#define PRM_VP1_VLIMITTO_VDDMIN (0x14 << 16) ++#define PRM_VP1_VLIMITTO_TIMEOUT (0xF00 << 0) + + /* PRM_VP2_CONFIG */ + #define PRM_VP2_CONFIG_ERROROFFSET (0x00 << 24) +-#define PRM_VP2_CONFIG_ERRORGAIN (0x20 << 16) ++#define PRM_VP2_CONFIG_ERRORGAIN_LOWOPP (0x0C << 16) /* OPPs 1,2 */ ++#define PRM_VP2_CONFIG_ERRORGAIN_HIGHOPP (0x18 << 16) /* OPPs 3,4,5 */ + + #define PRM_VP2_CONFIG_INITVOLTAGE (0x30 << 8) /* 1.2 volt */ + #define PRM_VP2_CONFIG_TIMEOUTEN (0x1 << 3) +@@ -89,8 +91,8 @@ + + /* PRM_VP2_VLIMITTO */ + #define PRM_VP2_VLIMITTO_VDDMAX (0x2C << 24) +-#define PRM_VP2_VLIMITTO_VDDMIN (0x0 << 16) +-#define PRM_VP2_VLIMITTO_TIMEOUT (0xFFFF << 0) ++#define PRM_VP2_VLIMITTO_VDDMIN (0x18 << 16) ++#define PRM_VP2_VLIMITTO_TIMEOUT (0xF00 << 0) + + /* SRCONFIG */ + #define SR1_SRCONFIG_ACCUMDATA (0x1F4 << 22) +@@ -139,13 +141,34 @@ + #define ERRCONFIG_VPBOUNDINTEN (0x1 << 31) + #define ERRCONFIG_VPBOUNDINTST (0x1 << 30) + ++#define ERRCONFIG_MCUDISACKINTEN (0x1 << 23) ++#define ERRCONFIG_MCUDISACKINTST (0x1 << 22) ++ ++/* Status Bits */ ++#define ERRCONFIG_MCUACCUMINTST (0x1 << 28) ++#define ERRCONFIG_MCUVALIDINTST (0x1 << 26) ++#define ERRCONFIG_MCUBOUNDINTST (0x1 << 24) ++#define ERRCONFIG_RESERVED (0x1 << 19) ++ ++/* WARNING: Ensure all access to errconfig register skips ++ * clearing intst bits to ensure that we dont clear status ++ * bits unwantedly.. esp vpbound ++ */ ++#define ERRCONFIG_INTERRUPT_STATUS_MASK (ERRCONFIG_VPBOUNDINTST |\ ++ ERRCONFIG_MCUACCUMINTST |\ ++ ERRCONFIG_MCUVALIDINTST |\ ++ ERRCONFIG_MCUBOUNDINTST |\ ++ ERRCONFIG_MCUDISACKINTST | ERRCONFIG_RESERVED) ++ + #define SR1_ERRWEIGHT (0x07 << 16) + #define SR1_ERRMAXLIMIT (0x02 << 8) +-#define SR1_ERRMINLIMIT (0xFA << 0) ++#define SR1_ERRMINLIMIT_LOWOPP (0xF4 << 0) /* OPP1, 2 */ ++#define SR1_ERRMINLIMIT_HIGHOPP (0xF9 << 0) /* OPP3, 4, 5 */ + + #define SR2_ERRWEIGHT (0x07 << 16) + #define SR2_ERRMAXLIMIT (0x02 << 8) +-#define SR2_ERRMINLIMIT (0xF9 << 0) ++#define SR2_ERRMINLIMIT_LOWOPP (0xF4 << 0) /* OPP1, 2 */ ++#define SR2_ERRMINLIMIT_HIGHOPP (0xF9 << 0) /* OPP3, 4, 5 */ + + /* T2 SMART REFLEX */ + #define R_SRI2C_SLAVE_ADDR 0x12 +@@ -230,6 +253,9 @@ + #define PRCM_NO_VDD2_OPPS 3 + /* XXX: end remove/move */ + ++/* SR_MAX_LOW_OPP: the highest of the "low OPPs", 1 and 2. */ ++#define SR_MAX_LOW_OPP 2 ++ + /* XXX: find more appropriate place for these once DVFS is in place */ + extern u32 current_vdd1_opp; + extern u32 current_vdd2_opp; +@@ -248,7 +274,7 @@ extern u32 current_vdd2_opp; + #ifdef CONFIG_OMAP_SMARTREFLEX + void enable_smartreflex(int srid); + void disable_smartreflex(int srid); +-int sr_voltagescale_vcbypass(u32 target_opp, u8 vsel); ++int sr_voltagescale_vcbypass(u32 t_opp, u32 c_opp, u8 t_vsel, u8 c_vsel); + void sr_start_vddautocomap(int srid, u32 target_opp_no); + int sr_stop_vddautocomap(int srid); + #else +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sram34xx.S kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sram34xx.S +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/sram34xx.S 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/sram34xx.S 2011-09-04 11:31:05.000000000 +0200 +@@ -3,13 +3,12 @@ + * + * Omap3 specific functions that need to be run in internal SRAM + * +- * (C) Copyright 2007 +- * Texas Instruments Inc. +- * Rajendra Nayak ++ * Copyright (C) 2004, 2007, 2008 Texas Instruments, Inc. ++ * Copyright (C) 2008 Nokia Corporation + * +- * (C) Copyright 2004 +- * Texas Instruments, ++ * Rajendra Nayak + * Richard Woodruff ++ * Paul Walmsley + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as +@@ -37,121 +36,180 @@ + + .text + ++/* r1 parameters */ ++#define SDRC_NO_UNLOCK_DLL 0x0 ++#define SDRC_UNLOCK_DLL 0x1 ++ ++/* SDRC_DLLA_CTRL bit settings */ ++#define DLLIDLE_MASK 0x4 ++ ++/* SDRC_DLLA_STATUS bit settings */ ++#define LOCKSTATUS_MASK 0x4 ++ ++/* SDRC_POWER bit settings */ ++#define SRFRONIDLEREQ_MASK 0x40 ++#define PWDENA_MASK 0x4 ++ ++/* CM_IDLEST1_CORE bit settings */ ++#define ST_SDRC_MASK 0x2 ++ ++/* CM_ICLKEN1_CORE bit settings */ ++#define EN_SDRC_MASK 0x2 ++ ++/* CM_CLKSEL1_PLL bit settings */ ++#define CORE_DPLL_CLKOUT_DIV_SHIFT 0x1b ++ + /* +- * Change frequency of core dpll +- * r0 = sdrc_rfr_ctrl r1 = sdrc_actim_ctrla r2 = sdrc_actim_ctrlb r3 = M2 ++ * omap3_sram_configure_core_dpll - change DPLL3 M2 divider ++ * ++ * Params passed in registers: ++ * r0 = new M2 divider setting (only 1 and 2 supported right now) ++ * r1 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for ++ * SDRC rates < 83MHz ++ * r2 = number of MPU cycles to wait for SDRC to stabilize after ++ * reprogramming the SDRC when switching to a slower MPU speed ++ * r3 = increasing SDRC rate? (1 = yes, 0 = no) ++ * ++ * Params passed via the stack. The needed params will be copied in SRAM ++ * before use by the code in SRAM (SDRAM is not accessible during SDRC ++ * reconfiguration): ++ * new SDRC_RFR_CTRL_0 register contents ++ * new SDRC_MR_0 register value ++ * new SDRC_RFR_CTRL_1 register contents ++ * new SDRC_MR_1 register value ++ * ++ * If the param SDRC_RFR_CTRL_1 is 0, the parameters ++ * are not programmed into the SDRC CS1 registers + */ + ENTRY(omap3_sram_configure_core_dpll) + stmfd sp!, {r1-r12, lr} @ store regs to stack +- cmp r3, #0x2 +- blne configure_sdrc +- cmp r3, #0x2 ++ ++ @ pull the extra args off the stack ++ @ and store them in SRAM ++ ldr r4, [sp, #52] ++ str r4, omap_sdrc_rfr_ctrl_0_val ++ ldr r4, [sp, #56] ++ str r4, omap_sdrc_mr_0_val ++ ldr r4, [sp, #60] ++ str r4, omap_sdrc_rfr_ctrl_1_val ++ cmp r4, #0 @ if SDRC_RFR_CTRL_1 is 0, ++ beq skip_cs1_params @ do not use cs1 params ++ ldr r4, [sp, #64] ++ str r4, omap_sdrc_mr_1_val ++skip_cs1_params: ++ dsb @ flush buffered writes to interconnect ++ ++ cmp r3, #1 @ if increasing SDRC clk rate, ++ bleq configure_sdrc @ program the SDRC regs early (for RFR) ++ cmp r1, #SDRC_UNLOCK_DLL @ set the intended DLL state ++ bleq unlock_dll + blne lock_dll +- cmp r3, #0x1 +- blne unlock_dll +- bl sdram_in_selfrefresh @ put the SDRAM in self refresh +- bl configure_core_dpll +- bl enable_sdrc +- cmp r3, #0x1 +- blne wait_dll_unlock +- cmp r3, #0x2 ++ bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC ++ bl configure_core_dpll @ change the DPLL3 M2 divider ++ mov r12, r2 ++ bl wait_clk_stable @ wait for SDRC to stabilize ++ bl enable_sdrc @ take SDRC out of idle ++ cmp r1, #SDRC_UNLOCK_DLL @ wait for DLL status to change ++ bleq wait_dll_unlock + blne wait_dll_lock +- cmp r3, #0x1 +- blne configure_sdrc ++ cmp r3, #1 @ if increasing SDRC clk rate, ++ beq return_to_sdram @ return to SDRAM code, otherwise, ++ bl configure_sdrc @ reprogram SDRC regs now ++return_to_sdram: ++ isb @ prevent speculative exec past here + mov r0, #0 @ return value + ldmfd sp!, {r1-r12, pc} @ restore regs and return + unlock_dll: +- ldr r4, omap3_sdrc_dlla_ctrl +- ldr r5, [r4] +- orr r5, r5, #0x4 +- str r5, [r4] ++ ldr r11, omap3_sdrc_dlla_ctrl ++ ldr r12, [r11] ++ orr r12, r12, #DLLIDLE_MASK ++ str r12, [r11] @ (no OCP barrier needed) + bx lr + lock_dll: +- ldr r4, omap3_sdrc_dlla_ctrl +- ldr r5, [r4] +- bic r5, r5, #0x4 +- str r5, [r4] ++ ldr r11, omap3_sdrc_dlla_ctrl ++ ldr r12, [r11] ++ bic r12, r12, #DLLIDLE_MASK ++ str r12, [r11] @ (no OCP barrier needed) + bx lr + sdram_in_selfrefresh: +- mov r5, #0x0 @ Move 0 to R5 +- mcr p15, 0, r5, c7, c10, 5 @ memory barrier +- ldr r4, omap3_sdrc_power @ read the SDRC_POWER register +- ldr r5, [r4] @ read the contents of SDRC_POWER +- orr r5, r5, #0x40 @ enable self refresh on idle req +- str r5, [r4] @ write back to SDRC_POWER register +- ldr r4, omap3_cm_iclken1_core @ read the CM_ICLKEN1_CORE reg +- ldr r5, [r4] +- bic r5, r5, #0x2 @ disable iclk bit for SRDC +- str r5, [r4] ++ ldr r11, omap3_sdrc_power @ read the SDRC_POWER register ++ ldr r12, [r11] @ read the contents of SDRC_POWER ++ mov r9, r12 @ keep a copy of SDRC_POWER bits ++ orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle ++ bic r12, r12, #PWDENA_MASK @ clear PWDENA ++ str r12, [r11] @ write back to SDRC_POWER register ++ ldr r12, [r11] @ posted-write barrier for SDRC ++idle_sdrc: ++ ldr r11, omap3_cm_iclken1_core @ read the CM_ICLKEN1_CORE reg ++ ldr r12, [r11] ++ bic r12, r12, #EN_SDRC_MASK @ disable iclk bit for SDRC ++ str r12, [r11] + wait_sdrc_idle: +- ldr r4, omap3_cm_idlest1_core +- ldr r5, [r4] +- and r5, r5, #0x2 @ check for SDRC idle +- cmp r5, #2 ++ ldr r11, omap3_cm_idlest1_core ++ ldr r12, [r11] ++ and r12, r12, #ST_SDRC_MASK @ check for SDRC idle ++ cmp r12, #ST_SDRC_MASK + bne wait_sdrc_idle + bx lr + configure_core_dpll: +- ldr r4, omap3_cm_clksel1_pll +- ldr r5, [r4] +- ldr r6, core_m2_mask_val @ modify m2 for core dpll +- and r5, r5, r6 +- orr r5, r5, r3, lsl #0x1B @ r3 contains the M2 val +- str r5, [r4] +- mov r5, #0x800 @ wait for the clock to stabilise +- cmp r3, #2 +- bne wait_clk_stable ++ ldr r11, omap3_cm_clksel1_pll ++ ldr r12, [r11] ++ ldr r10, core_m2_mask_val @ modify m2 for core dpll ++ and r12, r12, r10 ++ orr r12, r12, r0, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT ++ str r12, [r11] ++ ldr r12, [r11] @ posted-write barrier for CM + bx lr + wait_clk_stable: +- subs r5, r5, #1 ++ subs r12, r12, #1 + bne wait_clk_stable +- nop +- nop +- nop +- nop +- nop +- nop +- nop +- nop +- nop +- nop + bx lr + enable_sdrc: +- ldr r4, omap3_cm_iclken1_core +- ldr r5, [r4] +- orr r5, r5, #0x2 @ enable iclk bit for SDRC +- str r5, [r4] ++ ldr r11, omap3_cm_iclken1_core ++ ldr r12, [r11] ++ orr r12, r12, #EN_SDRC_MASK @ enable iclk bit for SDRC ++ str r12, [r11] + wait_sdrc_idle1: +- ldr r4, omap3_cm_idlest1_core +- ldr r5, [r4] +- and r5, r5, #0x2 +- cmp r5, #0 ++ ldr r11, omap3_cm_idlest1_core ++ ldr r12, [r11] ++ and r12, r12, #ST_SDRC_MASK ++ cmp r12, #0 + bne wait_sdrc_idle1 +- ldr r4, omap3_sdrc_power +- ldr r5, [r4] +- bic r5, r5, #0x40 +- str r5, [r4] ++restore_sdrc_power_val: ++ ldr r11, omap3_sdrc_power ++ str r9, [r11] @ restore SDRC_POWER, no barrier needed + bx lr + wait_dll_lock: +- ldr r4, omap3_sdrc_dlla_status +- ldr r5, [r4] +- and r5, r5, #0x4 +- cmp r5, #0x4 ++ ldr r11, omap3_sdrc_dlla_status ++ ldr r12, [r11] ++ and r12, r12, #LOCKSTATUS_MASK ++ cmp r12, #LOCKSTATUS_MASK + bne wait_dll_lock + bx lr + wait_dll_unlock: +- ldr r4, omap3_sdrc_dlla_status +- ldr r5, [r4] +- and r5, r5, #0x4 +- cmp r5, #0x0 ++ ldr r11, omap3_sdrc_dlla_status ++ ldr r12, [r11] ++ and r12, r12, #LOCKSTATUS_MASK ++ cmp r12, #0x0 + bne wait_dll_unlock + bx lr + configure_sdrc: +- ldr r4, omap3_sdrc_rfr_ctrl +- str r0, [r4] +- ldr r4, omap3_sdrc_actim_ctrla +- str r1, [r4] +- ldr r4, omap3_sdrc_actim_ctrlb +- str r2, [r4] ++ ldr r12, omap_sdrc_rfr_ctrl_0_val @ fetch value from SRAM ++ ldr r11, omap3_sdrc_rfr_ctrl_0 @ fetch addr from SRAM ++ str r12, [r11] @ store ++ ldr r12, omap_sdrc_mr_0_val ++ ldr r11, omap3_sdrc_mr_0 ++ str r12, [r11] ++ ldr r12, omap_sdrc_rfr_ctrl_1_val ++ cmp r12, #0 @ if SDRC_RFR_CTRL_1 is 0, ++ beq skip_cs1_prog @ do not program cs1 params ++ ldr r11, omap3_sdrc_rfr_ctrl_1 ++ str r12, [r11] ++ ldr r12, omap_sdrc_mr_1_val ++ ldr r11, omap3_sdrc_mr_1 ++ str r12, [r11] ++skip_cs1_prog: ++ ldr r12, [r11] @ posted-write barrier for SDRC + bx lr + + omap3_sdrc_power: +@@ -162,12 +220,24 @@ omap3_cm_idlest1_core: + .word OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST) + omap3_cm_iclken1_core: + .word OMAP34XX_CM_REGADDR(CORE_MOD, CM_ICLKEN1) +-omap3_sdrc_rfr_ctrl: ++ ++omap3_sdrc_rfr_ctrl_0: + .word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_0) +-omap3_sdrc_actim_ctrla: +- .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0) +-omap3_sdrc_actim_ctrlb: +- .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0) ++omap3_sdrc_rfr_ctrl_1: ++ .word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_1) ++omap3_sdrc_mr_0: ++ .word OMAP34XX_SDRC_REGADDR(SDRC_MR_0) ++omap3_sdrc_mr_1: ++ .word OMAP34XX_SDRC_REGADDR(SDRC_MR_1) ++omap_sdrc_rfr_ctrl_0_val: ++ .word 0xDEADBEEF ++omap_sdrc_rfr_ctrl_1_val: ++ .word 0xDEADBEEF ++omap_sdrc_mr_0_val: ++ .word 0xDEADBEEF ++omap_sdrc_mr_1_val: ++ .word 0xDEADBEEF ++ + omap3_sdrc_dlla_status: + .word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS) + omap3_sdrc_dlla_ctrl: +@@ -177,3 +247,4 @@ core_m2_mask_val: + + ENTRY(omap3_sram_configure_core_dpll_sz) + .word . - omap3_sram_configure_core_dpll ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/ssi.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/ssi.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/ssi.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/ssi.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,497 @@ ++/* ++ * arch/arm/mach-omap2/ssi.c ++ * ++ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. ++ * ++ * Contact: Carlos Chinea ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "clock.h" ++#include ++#include "cm.h" ++#include "cm-regbits-34xx.h" ++ ++#define SSI_RATE_CHANGE 1 ++ ++/** ++ * struct ssi_internal_clk - Generic virtual ssi clock ++ * @clk: clock data ++ * @nb: notfier block for the DVFS notification chain ++ * @childs: Array of SSI FCK and ICK clocks ++ * @n_childs: Number of clocks in childs array ++ * @rate_change: Tracks if we are in the middle of a clock rate change ++ * @pdev: Reference to the SSI platform device associated to the clock ++ * @drv_nb: Reference to driver nb, use to propagate the DVFS notification ++ */ ++struct ssi_internal_clk { ++ struct clk clk; ++ struct notifier_block nb; ++ ++ struct clk **childs; ++ int n_childs; ++ ++ unsigned int rate_change:1; ++ ++ struct platform_device *pdev; ++ struct notifier_block *drv_nb; ++}; ++ ++static struct ssi_internal_clk ssi_clock; ++ ++static void ssi_set_mode(struct platform_device *pdev, u32 mode) ++{ ++ struct ssi_platform_data *pdata = pdev->dev.platform_data; ++ void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); ++ int port; ++ ++ for (port = 1; port <= pdata->num_ports; port++) { ++ outl(mode, base + SSI_SST_MODE_REG(port)); ++ outl(mode, base + SSI_SSR_MODE_REG(port)); ++ } ++} ++ ++static void ssi_save_mode(struct platform_device *pdev) ++{ ++ struct ssi_platform_data *pdata = pdev->dev.platform_data; ++ void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); ++ struct port_ctx *p; ++ int port; ++ ++ for (port = 1; port <= pdata->num_ports; port++) { ++ p = &pdata->ctx.pctx[port - 1]; ++ p->sst.mode = inl(base + SSI_SST_MODE_REG(port)); ++ p->ssr.mode = inl(base + SSI_SSR_MODE_REG(port)); ++ } ++} ++ ++static void ssi_restore_mode(struct platform_device *pdev) ++{ ++ struct ssi_platform_data *pdata = pdev->dev.platform_data; ++ void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); ++ struct port_ctx *p; ++ int port; ++ ++ for (port = 1; port <= pdata->num_ports; port++) { ++ p = &pdata->ctx.pctx[port - 1]; ++ outl(p->sst.mode, base + SSI_SST_MODE_REG(port)); ++ outl(p->ssr.mode, base + SSI_SSR_MODE_REG(port)); ++ } ++} ++ ++static int ssi_clk_event(struct notifier_block *nb, unsigned long event, ++ void *data) ++{ ++ struct ssi_internal_clk *ssi_clk = ++ container_of(nb, struct ssi_internal_clk, nb); ++ switch (event) { ++ case CLK_PRE_RATE_CHANGE: ++ ssi_clk->drv_nb->notifier_call(ssi_clk->drv_nb, event, data); ++ ssi_clk->rate_change = 1; ++ if (ssi_clk->clk.usecount > 0) { ++ ssi_save_mode(ssi_clk->pdev); ++ ssi_set_mode(ssi_clk->pdev, SSI_MODE_SLEEP); ++ } ++ break; ++ case CLK_ABORT_RATE_CHANGE: ++ case CLK_POST_RATE_CHANGE: ++ if ((ssi_clk->clk.usecount > 0) && (ssi_clk->rate_change)) ++ ssi_restore_mode(ssi_clk->pdev); ++ ++ ssi_clk->rate_change = 0; ++ ssi_clk->drv_nb->notifier_call(ssi_clk->drv_nb, event, data); ++ break; ++ default: ++ break; ++ } ++ ++ return NOTIFY_DONE; ++} ++ ++static int ssi_clk_notifier_register(struct clk *clk, struct notifier_block *nb) ++{ ++ struct ssi_internal_clk *ssi_clk; ++ ++ if (!clk || !nb) ++ return -EINVAL; ++ ++ ssi_clk = container_of(clk, struct ssi_internal_clk, clk); ++ ssi_clk->drv_nb = nb; ++ ssi_clk->nb.priority = nb->priority; ++ /* NOTE: We only want notifications from the functional clock */ ++ return clk_notifier_register(ssi_clk->childs[1], &ssi_clk->nb); ++} ++ ++static int ssi_clk_notifier_unregister(struct clk *clk, ++ struct notifier_block *nb) ++{ ++ struct ssi_internal_clk *ssi_clk; ++ ++ if (!clk || !nb) ++ return -EINVAL; ++ ++ ssi_clk = container_of(clk, struct ssi_internal_clk, clk); ++ ssi_clk->drv_nb = NULL; ++ return clk_notifier_unregister(ssi_clk->childs[1], &ssi_clk->nb); ++} ++ ++static void ssi_save_ctx(struct platform_device *pdev) ++{ ++ struct ssi_platform_data *pdata = pdev->dev.platform_data; ++ void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); ++ struct port_ctx *p; ++ int port; ++ ++ pdata->ctx.loss_count = ++ omap_pm_get_dev_context_loss_count(&pdev->dev); ++ ++ pdata->ctx.sysconfig = inl(base + SSI_SYS_SYSCONFIG_REG); ++ pdata->ctx.gdd_gcr = inl(base + SSI_GDD_GCR_REG); ++ for (port = 1; port <= pdata->num_ports; port++) { ++ p = &pdata->ctx.pctx[port - 1]; ++ p->sys_mpu_enable[0] = inl(base + ++ SSI_SYS_MPU_ENABLE_REG(port, 0)); ++ p->sys_mpu_enable[1] = inl(base + ++ SSI_SYS_MPU_ENABLE_REG(port, 1)); ++ p->sst.frame_size = inl(base + ++ SSI_SST_FRAMESIZE_REG(port)); ++ p->sst.divisor = inl(base + SSI_SST_DIVISOR_REG(port)); ++ p->sst.channels = inl(base + SSI_SST_CHANNELS_REG(port)); ++ p->sst.arb_mode = inl(base + SSI_SST_ARBMODE_REG(port)); ++ p->ssr.frame_size = inl(base + ++ SSI_SSR_FRAMESIZE_REG(port)); ++ p->ssr.timeout = inl(base + SSI_SSR_TIMEOUT_REG(port)); ++ p->ssr.channels = inl(base + SSI_SSR_CHANNELS_REG(port)); ++ } ++} ++ ++static void ssi_restore_ctx(struct platform_device *pdev) ++{ ++ struct ssi_platform_data *pdata = pdev->dev.platform_data; ++ void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); ++ struct port_ctx *p; ++ int port; ++ int loss_count; ++ ++ ++ loss_count = omap_pm_get_dev_context_loss_count(&pdev->dev); ++#if 0 ++ if (loss_count == pdata->ctx.loss_count) ++ return; ++#endif ++ outl(pdata->ctx.sysconfig, base + SSI_SYS_SYSCONFIG_REG); ++ outl(pdata->ctx.gdd_gcr, base + SSI_GDD_GCR_REG); ++ for (port = 1; port <= pdata->num_ports; port++) { ++ p = &pdata->ctx.pctx[port - 1]; ++ outl(p->sys_mpu_enable[0], base + ++ SSI_SYS_MPU_ENABLE_REG(port, 0)); ++ outl(p->sys_mpu_enable[1], base + ++ SSI_SYS_MPU_ENABLE_REG(port, 1)); ++ outl(p->sst.frame_size, base + ++ SSI_SST_FRAMESIZE_REG(port)); ++ outl(p->sst.divisor, base + SSI_SST_DIVISOR_REG(port)); ++ outl(p->sst.channels, base + SSI_SST_CHANNELS_REG(port)); ++ outl(p->sst.arb_mode, base + SSI_SST_ARBMODE_REG(port)); ++ outl(p->ssr.frame_size, base + ++ SSI_SSR_FRAMESIZE_REG(port)); ++ outl(p->ssr.timeout, base + SSI_SSR_TIMEOUT_REG(port)); ++ outl(p->ssr.channels, base + SSI_SSR_CHANNELS_REG(port)); ++ } ++} ++ ++static void ssi_pdev_release(struct device *dev) ++{ ++} ++ ++/* ++ * NOTE: We abuse a little bit the struct port_ctx to use it also for ++ * initialization. ++ */ ++static struct port_ctx ssi_port_ctx[] = { ++ [0] = { ++ .sst.mode = SSI_MODE_FRAME, ++ .sst.frame_size = SSI_FRAMESIZE_DEFAULT, ++ .sst.divisor = 1, ++ .sst.channels = SSI_CHANNELS_DEFAULT, ++ .sst.arb_mode = SSI_ARBMODE_ROUNDROBIN, ++ .ssr.mode = SSI_MODE_FRAME, ++ .ssr.frame_size = SSI_FRAMESIZE_DEFAULT, ++ .ssr.channels = SSI_CHANNELS_DEFAULT, ++ .ssr.timeout = SSI_TIMEOUT_DEFAULT, ++ }, ++}; ++ ++static struct ssi_platform_data ssi_pdata = { ++ .num_ports = ARRAY_SIZE(ssi_port_ctx), ++ .ctx.pctx = ssi_port_ctx, ++ .clk_notifier_register = ssi_clk_notifier_register, ++ .clk_notifier_unregister = ssi_clk_notifier_unregister, ++}; ++ ++static struct resource ssi_resources[] = { ++ [0] = { ++ .start = 0x48058000, ++ .end = 0x4805bbff, ++ .name = "omap_ssi_iomem", ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = 67, ++ .end = 67, ++ .name = "ssi_p1_mpu_irq0", ++ .flags = IORESOURCE_IRQ, ++ }, ++ [2] = { ++ .start = 69, ++ .end = 69, ++ .name = "ssi_p1_mpu_irq1", ++ .flags = IORESOURCE_IRQ, ++ }, ++ [3] = { ++ .start = 68, ++ .end = 68, ++ .name = "ssi_p2_mpu_irq0", ++ .flags = IORESOURCE_IRQ, ++ }, ++ [4] = { ++ .start = 70, ++ .end = 70, ++ .name = "ssi_p2_mpu_irq1", ++ .flags = IORESOURCE_IRQ, ++ }, ++ [5] = { ++ .start = 71, ++ .end = 71, ++ .name = "ssi_gdd", ++ .flags = IORESOURCE_IRQ, ++ }, ++ [6] = { ++ .start = 151, ++ .end = 0, ++ .name = "ssi_p1_cawake_gpio", ++ .flags = IORESOURCE_IRQ | IORESOURCE_UNSET, ++ }, ++ [7] = { ++ .start = 0, ++ .end = 0, ++ .name = "ssi_p2_cawake_gpio", ++ .flags = IORESOURCE_IRQ | IORESOURCE_UNSET, ++ }, ++}; ++ ++static struct platform_device ssi_pdev = { ++ .name = "omap_ssi", ++ .id = -1, ++ .num_resources = ARRAY_SIZE(ssi_resources), ++ .resource = ssi_resources, ++ .dev = { ++ .release = ssi_pdev_release, ++ .platform_data = &ssi_pdata, ++ }, ++}; ++ ++#define __SSI_CLK_FIX__ ++#ifdef __SSI_CLK_FIX__ ++/* ++ * FIXME: TO BE REMOVED. ++ * This hack allows us to ensure that clocks are stable before accessing ++ * SSI controller registers. To be removed when PM functionalty is in place. ++ */ ++static int check_ssi_active(void) ++{ ++ u32 reg; ++ unsigned long dl = jiffies + msecs_to_jiffies(500); ++ void __iomem *cm_idlest1 = OMAP2_IO_ADDRESS(0x48004a20); ++ ++ reg = inl(cm_idlest1); ++ while ((!(reg & 0x01)) && (time_before(jiffies, dl))) ++ reg = inl(cm_idlest1); ++ ++ if (!(reg & 0x01)) { /* ST_SSI */ ++ pr_err("SSI is still in STANDBY ! (BUG !?)\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++#endif /* __SSI_CLK_FIX__ */ ++ ++static int ssi_clk_init(struct ssi_internal_clk *ssi_clk) ++{ ++ const char *clk_names[] = { "ssi_ick", "ssi_ssr_fck" }; ++ int i; ++ int j; ++ ++ ssi_clk->n_childs = ARRAY_SIZE(clk_names); ++ ssi_clk->childs = kzalloc(ssi_clk->n_childs * sizeof(*ssi_clk->childs), ++ GFP_KERNEL); ++ if (!ssi_clk->childs) ++ return -ENOMEM; ++ ++ for (i = 0; i < ssi_clk->n_childs; i++) { ++ ssi_clk->childs[i] = clk_get(&ssi_clk->pdev->dev, clk_names[i]); ++ if (IS_ERR(ssi_clk->childs[i])) { ++ pr_err("Unable to get SSI clock: %s", clk_names[i]); ++ for (j = i - 1; j >= 0; j--) ++ clk_put(ssi_clk->childs[j]); ++ return -ENODEV; ++ } ++ } ++ ++ return 0; ++} ++ ++static void disable_dpll3_autoidle(void) ++{ ++ u32 v; ++ ++ v = cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); ++ v &= ~0x7; ++ cm_write_mod_reg(v, PLL_MOD, CM_AUTOIDLE); ++} ++ ++static void enable_dpll3_autoidle(void) ++{ ++ u32 v; ++ ++ v = cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); ++ v |= 1; ++ cm_write_mod_reg(v, PLL_MOD, CM_AUTOIDLE); ++} ++ ++static int ssi_clk_enable(struct clk *clk) ++{ ++ struct ssi_internal_clk *ssi_clk = ++ container_of(clk, struct ssi_internal_clk, clk); ++ int err; ++ int i; ++ ++ disable_dpll3_autoidle(); ++ ++ for (i = 0; i < ssi_clk->n_childs; i++) { ++ err = omap2_clk_enable(ssi_clk->childs[i]); ++ if (unlikely(err < 0)) ++ goto rollback; ++ } ++#ifdef __SSI_CLK_FIX__ ++ /* ++ * FIXME: To be removed ++ * Wait until the SSI controller has the clocks stable ++ */ ++ check_ssi_active(); ++#endif ++ ssi_restore_ctx(ssi_clk->pdev); ++ if (!ssi_clk->rate_change) ++ ssi_restore_mode(ssi_clk->pdev); ++ ++ return 0; ++rollback: ++ pr_err("Error on SSI clk child %d\n", i); ++ for (i = i - 1; i >= 0; i--) ++ omap2_clk_disable(ssi_clk->childs[i]); ++ ++ enable_dpll3_autoidle(); ++ ++ return err; ++} ++ ++static void ssi_clk_disable(struct clk *clk) ++{ ++ struct ssi_internal_clk *ssi_clk = ++ container_of(clk, struct ssi_internal_clk, clk); ++ int i; ++ ++ if (!ssi_clk->rate_change) { ++ ssi_save_mode(ssi_clk->pdev); ++ ssi_set_mode(ssi_clk->pdev, SSI_MODE_SLEEP); ++ } ++ /* Save ctx in all ports */ ++ ssi_save_ctx(ssi_clk->pdev); ++ ++ for (i = 0; i < ssi_clk->n_childs; i++) ++ omap2_clk_disable(ssi_clk->childs[i]); ++ ++ enable_dpll3_autoidle(); ++ ++} ++ ++int omap_ssi_config(struct omap_ssi_board_config *ssi_config) ++{ ++ int port; ++ int cawake_gpio; ++ ++ ssi_pdata.num_ports = ssi_config->num_ports; ++ for (port = 0; port < ssi_config->num_ports; port++) { ++ cawake_gpio = ssi_config->cawake_gpio[port]; ++ if (cawake_gpio < 0) ++ continue; /* Nothing to do */ ++ ++ if (gpio_request(cawake_gpio, "CAWAKE") < 0) { ++ dev_err(&ssi_pdev.dev, "FAILED to request CAWAKE" ++ " GPIO %d\n", cawake_gpio); ++ return -EBUSY; ++ } ++ ++ gpio_direction_input(cawake_gpio); ++ ssi_resources[6 + port].start = gpio_to_irq(cawake_gpio); ++ ssi_resources[6 + port].flags &= ~IORESOURCE_UNSET; ++ ssi_resources[6 + port].flags |= IORESOURCE_IRQ_HIGHEDGE | ++ IORESOURCE_IRQ_LOWEDGE; ++ } ++ return 0; ++} ++ ++static struct ssi_internal_clk ssi_clock = { ++ .clk = { ++ .name = "ssi_clk", ++ .id = -1, ++ .enable = ssi_clk_enable, ++ .disable = ssi_clk_disable, ++ .clkdm = { .name = "core_l4_clkdm", }, ++ }, ++ .nb = { ++ .notifier_call = ssi_clk_event, ++ .priority = INT_MAX, ++ }, ++ .pdev = &ssi_pdev, ++}; ++ ++static int __init omap_ssi_init(void) ++{ ++ int err; ++ ++ ssi_clk_init(&ssi_clock); ++ clk_register(&ssi_clock.clk); ++ ++ err = platform_device_register(&ssi_pdev); ++ if (err < 0) { ++ pr_err("Unable to register SSI platform device: %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++subsys_initcall(omap_ssi_init); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/timer-gp.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/timer-gp.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/timer-gp.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/timer-gp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -81,9 +81,21 @@ static void omap2_gp_timer_set_mode(enum + case CLOCK_EVT_MODE_ONESHOT: + break; + case CLOCK_EVT_MODE_UNUSED: +- case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_RESUME: + break; ++ case CLOCK_EVT_MODE_SHUTDOWN: ++ /* ++ * Wait for min period x 2 to make sure that timer is ++ * stopped ++ */ ++ udelay(evt->min_delta_ns / 500); ++ /* ++ * Clear possibly pending interrupt, this will occasionally ++ * generate spurious timer IRQs during suspend but this ++ * is okay, as another option is not to enter suspend at all ++ */ ++ omap_dm_timer_write_status(gptimer, OMAP_TIMER_INT_OVERFLOW); ++ break; + } + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mach-omap2/usb-musb.c kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/usb-musb.c +--- linux-omap-2.6.28-omap1/arch/arm/mach-omap2/usb-musb.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mach-omap2/usb-musb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -32,6 +32,13 @@ + #include + #include + ++#define SIDLEMODE 3 ++#define SMARTIDLE (2 << SIDLEMODE) ++#define AUTOIDLE (1 << 0) ++#define FORCESTDBY (1 << 0) ++#define OTG_SYSCONFIG (OMAP34XX_HSUSB_OTG_BASE + 0x404) ++#define OTG_FORCESTDBY (OMAP34XX_HSUSB_OTG_BASE + 0x414) ++ + #ifdef CONFIG_USB_MUSB_SOC + static struct resource musb_resources[] = { + [0] = { +@@ -156,13 +163,22 @@ static struct platform_device musb_devic + #endif + + +-void __init usb_musb_init(void) ++void __init usb_musb_init(struct musb_board_data *board) + { ++ musb_plat.board = board; ++ + #ifdef CONFIG_USB_MUSB_SOC + if (platform_device_register(&musb_device) < 0) { + printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); + return; + } + #endif ++#if !defined(CONFIG_USB) || defined(CONFIG_USB_MODULE) ++ /* Force MUSB to standby if not used */ ++ if (cpu_is_omap34xx()) { ++ omap_writel(AUTOIDLE, OTG_SYSCONFIG); ++ omap_writel(FORCESTDBY, OTG_FORCESTDBY); ++ } ++#endif + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mm/cache-v7.S kernel-2.6.28-20093908+0m5/arch/arm/mm/cache-v7.S +--- linux-omap-2.6.28-omap1/arch/arm/mm/cache-v7.S 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mm/cache-v7.S 2011-09-04 11:31:05.000000000 +0200 +@@ -26,6 +26,7 @@ + * - mm - mm_struct describing address space + */ + ENTRY(v7_flush_dcache_all) ++ dmb @ ensure ordering with previous memory accesses + mrc p15, 1, r0, c0, c0, 1 @ read clidr + ands r3, r0, #0x7000000 @ extract loc from clidr + mov r3, r3, lsr #23 @ left align loc bit field +@@ -64,6 +65,7 @@ skip: + finished: + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr ++ dsb + isb + mov pc, lr + ENDPROC(v7_flush_dcache_all) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mm/fault.c kernel-2.6.28-20093908+0m5/arch/arm/mm/fault.c +--- linux-omap-2.6.28-omap1/arch/arm/mm/fault.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mm/fault.c 2011-09-04 11:31:05.000000000 +0200 +@@ -22,6 +22,19 @@ + + #include "fault.h" + ++/* ++ * Fault status register encodings. We steal bit 31 for our own purposes. ++ */ ++#define FSR_LNX_PF (1 << 31) ++#define FSR_WRITE (1 << 11) ++#define FSR_FS4 (1 << 10) ++#define FSR_FS3_0 (15) ++ ++static inline int fsr_fs(unsigned int fsr) ++{ ++ return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; ++} ++ + + #ifdef CONFIG_KPROBES + static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) +@@ -172,18 +185,35 @@ void do_bad_area(unsigned long addr, uns + #define VM_FAULT_BADMAP 0x010000 + #define VM_FAULT_BADACCESS 0x020000 + +-static int ++/* ++ * Check that the permissions on the VMA allow for the fault which occurred. ++ * If we encountered a write fault, we must have write permission, otherwise ++ * we allow any permission. ++ */ ++static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) ++{ ++ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; ++ ++ if (fsr & FSR_WRITE) ++ mask = VM_WRITE; ++ if (fsr & FSR_LNX_PF) ++ mask = VM_EXEC; ++ ++ return vma->vm_flags & mask ? false : true; ++} ++ ++static int __kprobes + __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, + struct task_struct *tsk) + { + struct vm_area_struct *vma; +- int fault, mask; ++ int fault; + + vma = find_vma(mm, addr); + fault = VM_FAULT_BADMAP; +- if (!vma) ++ if (unlikely(!vma)) + goto out; +- if (vma->vm_start > addr) ++ if (unlikely(vma->vm_start > addr)) + goto check_stack; + + /* +@@ -191,14 +221,10 @@ __do_page_fault(struct mm_struct *mm, un + * memory access, so we can handle it. + */ + good_area: +- if (fsr & (1 << 11)) /* write? */ +- mask = VM_WRITE; +- else +- mask = VM_READ|VM_EXEC|VM_WRITE; +- +- fault = VM_FAULT_BADACCESS; +- if (!(vma->vm_flags & mask)) ++ if (access_error(fsr, vma)) { ++ fault = VM_FAULT_BADACCESS; + goto out; ++ } + + /* + * If for any reason at all we couldn't handle +@@ -206,7 +232,7 @@ good_area: + * than endlessly redo the fault. + */ + survive: +- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); ++ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & FSR_WRITE); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; +@@ -268,6 +294,13 @@ do_page_fault(unsigned long addr, unsign + if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) + goto no_context; + down_read(&mm->mmap_sem); ++ } else { ++ /* ++ * The above down_read_trylock() might have succeeded in ++ * which case, we'll have missed the might_sleep() from ++ * down_read() ++ */ ++ might_sleep(); + } + + fault = __do_page_fault(mm, addr, fsr, tsk); +@@ -463,10 +496,10 @@ hook_fault_code(int nr, int (*fn)(unsign + asmlinkage void __exception + do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + { +- const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); ++ const struct fsr_info *inf = fsr_info + fsr_fs(fsr); + struct siginfo info; + +- if (!inf->fn(addr, fsr, regs)) ++ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) + return; + + printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", +@@ -482,6 +515,6 @@ do_DataAbort(unsigned long addr, unsigne + asmlinkage void __exception + do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) + { +- do_translation_fault(addr, 0, regs); ++ do_translation_fault(addr, FSR_LNX_PF, regs); + } + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mm/ioremap.c kernel-2.6.28-20093908+0m5/arch/arm/mm/ioremap.c +--- linux-omap-2.6.28-omap1/arch/arm/mm/ioremap.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mm/ioremap.c 2011-09-04 11:31:05.000000000 +0200 +@@ -110,6 +110,17 @@ static int remap_area_pages(unsigned lon + return err; + } + ++int ioremap_page(unsigned long virt, unsigned long phys, unsigned int mtype) ++{ ++ const struct mem_type *type; ++ ++ type = get_mem_type(mtype); ++ if (!type) ++ return -EINVAL; ++ ++ return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, type); ++} ++EXPORT_SYMBOL(ioremap_page); + + void __check_kvm_seq(struct mm_struct *mm) + { +@@ -138,7 +149,7 @@ void __check_kvm_seq(struct mm_struct *m + */ + static void unmap_area_sections(unsigned long virt, unsigned long size) + { +- unsigned long addr = virt, end = virt + (size & ~SZ_1M); ++ unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); + pgd_t *pgd; + + flush_cache_vunmap(addr, end); +@@ -337,10 +348,7 @@ void __iounmap(volatile void __iomem *io + void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); + #ifndef CONFIG_SMP + struct vm_struct **p, *tmp; +-#endif +- unsigned int section_mapping = 0; + +-#ifndef CONFIG_SMP + /* + * If this is a section based mapping we need to handle it + * specially as the VM subsystem does not know how to handle +@@ -352,11 +360,8 @@ void __iounmap(volatile void __iomem *io + for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { + if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { + if (tmp->flags & VM_ARM_SECTION_MAPPING) { +- *p = tmp->next; + unmap_area_sections((unsigned long)tmp->addr, + tmp->size); +- kfree(tmp); +- section_mapping = 1; + } + break; + } +@@ -364,7 +369,6 @@ void __iounmap(volatile void __iomem *io + write_unlock(&vmlist_lock); + #endif + +- if (!section_mapping) +- vunmap(addr); ++ vunmap(addr); + } + EXPORT_SYMBOL(__iounmap); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/mm/mmu.c kernel-2.6.28-20093908+0m5/arch/arm/mm/mmu.c +--- linux-omap-2.6.28-omap1/arch/arm/mm/mmu.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/mm/mmu.c 2011-09-04 11:31:05.000000000 +0200 +@@ -242,6 +242,10 @@ static struct mem_type mem_types[] = { + .prot_sect = PMD_TYPE_SECT, + .domain = DOMAIN_KERNEL, + }, ++ [MT_MEMORY_NONCACHED] = { ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, ++ .domain = DOMAIN_KERNEL, ++ }, + }; + + const struct mem_type *get_mem_type(unsigned int type) +@@ -405,9 +409,28 @@ static void __init build_mem_type_table( + kern_pgprot |= L_PTE_SHARED; + vecs_pgprot |= L_PTE_SHARED; + mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + #endif + } + ++ /* ++ * Non-cacheable Normal - intended for memory areas that must ++ * not cause cache line evictions when used ++ */ ++ if (cpu_arch >= CPU_ARCH_ARMv6) { ++ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { ++ /* Non-cacheable Normal is XCB = 001 */ ++ mem_types[MT_MEMORY_NONCACHED].prot_sect |= ++ PMD_SECT_BUFFERED; ++ } else { ++ /* For both ARMv6 and non-TEX-remapping ARMv7 */ ++ mem_types[MT_MEMORY_NONCACHED].prot_sect |= ++ PMD_SECT_TEX(1); ++ } ++ } else { ++ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; ++ } ++ + for (i = 0; i < 16; i++) { + unsigned long v = pgprot_val(protection_map[i]); + protection_map[i] = __pgprot(v | user_pgprot); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/oprofile/backtrace.c kernel-2.6.28-20093908+0m5/arch/arm/oprofile/backtrace.c +--- linux-omap-2.6.28-omap1/arch/arm/oprofile/backtrace.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/oprofile/backtrace.c 2011-09-04 11:31:05.000000000 +0200 +@@ -33,41 +33,54 @@ static int report_trace(struct stackfram + return *depth == 0; + } + +-/* +- * The registers we're interested in are at the end of the variable +- * length saved register structure. The fp points at the end of this +- * structure so the address of this struct is: +- * (struct frame_tail *)(xxx->fp)-1 +- */ +-struct frame_tail { +- struct frame_tail *fp; +- unsigned long sp; +- unsigned long lr; +-} __attribute__((packed)); +- +-static struct frame_tail* user_backtrace(struct frame_tail *tail) ++static void **user_backtrace(struct pt_regs * const regs, ++ void **frame, int step) + { +- struct frame_tail buftail[2]; ++ void *frame_data[4]; ++ int instr; + +- /* Also check accessibility of one struct frame_tail beyond */ +- if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) +- return NULL; +- if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) +- return NULL; ++ void *ret_addr; ++ void **next_frame; + +- oprofile_add_trace(buftail[0].lr); ++ if (!access_ok(VERIFY_READ, frame - 3, sizeof(frame_data))) ++ return 0; ++ if (__copy_from_user_inatomic(frame_data, frame - 3, ++ sizeof(frame_data))) ++ return 0; ++ ++ if (access_ok(VERIFY_READ, (int *)frame_data[3] - 2, sizeof(instr)) && ++ __copy_from_user_inatomic(&instr, (int *)frame_data[3] - 2, ++ sizeof(instr)) == 0 && ++ (instr & 0xFFFFD800) == 0xE92DD800) { ++ /* Standard APCS frame */ ++ ret_addr = frame_data[2]; ++ next_frame = frame_data[0]; ++ } else if (step != 0 || ++ (unsigned long)frame_data[2] - (unsigned long)regs->ARM_sp < ++ (unsigned long)frame_data[3] - (unsigned long)regs->ARM_sp) { ++ /* Heuristic detection: codesourcery optimized normal frame */ ++ ret_addr = frame_data[3]; ++ next_frame = frame_data[2]; ++ } else { ++ /* Heuristic detection: codesourcery optimized leaf frame */ ++ ret_addr = (void *)regs->ARM_lr; ++ next_frame = frame_data[3]; ++ } + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ +- if (tail >= buftail[0].fp) ++ if (next_frame <= frame) + return NULL; + +- return buftail[0].fp-1; ++ oprofile_add_trace((unsigned long)ret_addr); ++ ++ return next_frame; + } + + void arm_backtrace(struct pt_regs * const regs, unsigned int depth) + { +- struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1; ++ int step = 0; ++ void **frame = (void **)regs->ARM_fp; + + if (!user_mode(regs)) { + unsigned long base = ((unsigned long)regs) & ~(THREAD_SIZE - 1); +@@ -76,6 +89,6 @@ void arm_backtrace(struct pt_regs * cons + return; + } + +- while (depth-- && tail && !((unsigned long) tail & 3)) +- tail = user_backtrace(tail); ++ while (depth-- && frame && !((unsigned long) frame & 3)) ++ frame = user_backtrace(regs, frame, step++); + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/oprofile/common.c kernel-2.6.28-20093908+0m5/arch/arm/oprofile/common.c +--- linux-omap-2.6.28-omap1/arch/arm/oprofile/common.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/oprofile/common.c 2011-09-04 11:31:05.000000000 +0200 +@@ -133,6 +133,11 @@ int __init oprofile_arch_init(struct opr + + ops->backtrace = arm_backtrace; + ++/* comes first, so that it can be overrided by a better implementation */ ++#ifdef CONFIG_OPROFILE_OMAP_GPTIMER ++ spec = &op_omap_gptimer_spec; ++#endif ++ + #ifdef CONFIG_CPU_XSCALE + spec = &op_xscale_spec; + #endif +@@ -144,11 +149,11 @@ int __init oprofile_arch_init(struct opr + #ifdef CONFIG_OPROFILE_MPCORE + spec = &op_mpcore_spec; + #endif +- ++/* + #ifdef CONFIG_OPROFILE_ARMV7 + spec = &op_armv7_spec; + #endif +- ++*/ + if (spec) { + ret = spec->init(); + if (ret < 0) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/oprofile/Makefile kernel-2.6.28-20093908+0m5/arch/arm/oprofile/Makefile +--- linux-omap-2.6.28-omap1/arch/arm/oprofile/Makefile 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/oprofile/Makefile 2011-09-04 11:31:05.000000000 +0200 +@@ -8,6 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drive + + oprofile-y := $(DRIVER_OBJS) common.o backtrace.o + oprofile-$(CONFIG_CPU_XSCALE) += op_model_xscale.o ++oprofile-$(CONFIG_OPROFILE_OMAP_GPTIMER) += op_model_omap_gptimer.o + oprofile-$(CONFIG_OPROFILE_ARM11_CORE) += op_model_arm11_core.o + oprofile-$(CONFIG_OPROFILE_ARMV6) += op_model_v6.o + oprofile-$(CONFIG_OPROFILE_MPCORE) += op_model_mpcore.o +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/oprofile/op_arm_model.h kernel-2.6.28-20093908+0m5/arch/arm/oprofile/op_arm_model.h +--- linux-omap-2.6.28-omap1/arch/arm/oprofile/op_arm_model.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/oprofile/op_arm_model.h 2011-09-04 11:31:05.000000000 +0200 +@@ -24,6 +24,8 @@ struct op_arm_model_spec { + extern struct op_arm_model_spec op_xscale_spec; + #endif + ++extern struct op_arm_model_spec op_omap_gptimer_spec; ++ + extern struct op_arm_model_spec op_armv6_spec; + extern struct op_arm_model_spec op_mpcore_spec; + extern struct op_arm_model_spec op_armv7_spec; +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/oprofile/op_model_omap_gptimer.c kernel-2.6.28-20093908+0m5/arch/arm/oprofile/op_model_omap_gptimer.c +--- linux-omap-2.6.28-omap1/arch/arm/oprofile/op_model_omap_gptimer.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/oprofile/op_model_omap_gptimer.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,93 @@ ++/** ++ * OMAP gptimer based event monitor driver for oprofile ++ * ++ * Copyright (C) 2009 Nokia Corporation ++ * Contact: Siarhei Siamashka ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "op_counter.h" ++#include "op_arm_model.h" ++ ++static struct omap_dm_timer *gptimer; ++ ++static int gptimer_init(void) ++{ ++ return 0; ++} ++ ++static int gptimer_setup(void) ++{ ++ return 0; ++} ++ ++static irqreturn_t gptimer_interrupt(int irq, void *arg) ++{ ++ omap_dm_timer_write_status(gptimer, OMAP_TIMER_INT_OVERFLOW); ++ oprofile_add_sample(get_irq_regs(), 0); ++ return IRQ_HANDLED; ++} ++ ++static int gptimer_start(void) ++{ ++ int err; ++ u32 count = counter_config[0].count; ++ ++ BUG_ON(gptimer != NULL); ++ /* First try to request timers from CORE power domain for OMAP3 */ ++ if (cpu_is_omap34xx()) { ++ gptimer = omap_dm_timer_request_specific(10); ++ if (gptimer == NULL) ++ gptimer = omap_dm_timer_request_specific(11); ++ } ++ /* Just any timer would be fine */ ++ if (gptimer == NULL) ++ gptimer = omap_dm_timer_request(); ++ if (gptimer == NULL) ++ return -ENODEV; ++ ++ omap_dm_timer_set_source(gptimer, OMAP_TIMER_SRC_32_KHZ); ++ err = request_irq(omap_dm_timer_get_irq(gptimer), gptimer_interrupt, ++ IRQF_DISABLED, "oprofile gptimer", NULL); ++ if (err) { ++ omap_dm_timer_free(gptimer); ++ gptimer = NULL; ++ printk(KERN_ERR "oprofile: unable to request gptimer IRQ\n"); ++ return err; ++ } ++ ++ if (count < 1) ++ count = 1; ++ ++ omap_dm_timer_set_load_start(gptimer, 1, 0xffffffff - count); ++ omap_dm_timer_set_int_enable(gptimer, OMAP_TIMER_INT_OVERFLOW); ++ return 0; ++} ++ ++static void gptimer_stop(void) ++{ ++ omap_dm_timer_set_int_enable(gptimer, 0); ++ free_irq(omap_dm_timer_get_irq(gptimer), NULL); ++ omap_dm_timer_free(gptimer); ++ gptimer = NULL; ++} ++ ++struct op_arm_model_spec op_omap_gptimer_spec = { ++ .init = gptimer_init, ++ .num_counters = 1, ++ .setup_ctrs = gptimer_setup, ++ .start = gptimer_start, ++ .stop = gptimer_stop, ++ .name = "arm/omap-gptimer", ++}; +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/clock.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/clock.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/clock.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/clock.c 2011-09-04 11:31:05.000000000 +0200 +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -34,6 +35,8 @@ static DEFINE_SPINLOCK(clockfw_lock); + + static struct clk_functions *arch_clock; + ++static LIST_HEAD(clk_notifier_list); ++ + /** + * omap_clk_for_each_child - call callback on each child clock of clk + * @clk: struct clk * to use as the "parent" +@@ -95,6 +98,19 @@ static int _do_propagate_rate(struct clk + } + + /** ++ * _clk_free_notifier_chain - safely remove struct clk_notifier ++ * @cn: struct clk_notifier * ++ * ++ * Removes the struct clk_notifier @cn from the clk_notifier_list and ++ * frees it. ++ */ ++static void _clk_free_notifier_chain(struct clk_notifier *cn) ++{ ++ list_del(&cn->node); ++ kfree(cn); ++} ++ ++/** + * omap_clk_add_child - add a child clock @clk2 to @clk + * @clk: parent struct clk * + * @clk2: new child struct clk * +@@ -170,6 +186,101 @@ void omap_clk_del_child(struct clk *clk, + } + } + ++/** ++ * omap_clk_notify - call clk notifier chain ++ * @clk: struct clk * that is changing rate ++ * @msg: clk notifier type (i.e., CLK_POST_RATE_CHANGE; see mach/clock.h) ++ * @old_rate: old rate ++ * @new_rate: new rate ++ * ++ * Triggers a notifier call chain on the post-clk-rate-change notifier ++ * for clock 'clk'. Passes a pointer to the struct clk and the ++ * previous and current rates to the notifier callback. Intended to be ++ * called by internal clock code only. No return value. ++ */ ++static void omap_clk_notify(struct clk *clk, unsigned long msg, ++ unsigned long old_rate, unsigned long new_rate) ++{ ++ struct clk_notifier *cn; ++ struct clk_notifier_data cnd; ++ ++ cnd.clk = clk; ++ cnd.old_rate = old_rate; ++ cnd.new_rate = new_rate; ++ ++ list_for_each_entry(cn, &clk_notifier_list, node) { ++ if (cn->clk == clk) { ++ blocking_notifier_call_chain(&cn->notifier_head, msg, ++ &cnd); ++ break; ++ } ++ } ++} ++ ++/** ++ * omap_clk_notify_downstream - trigger clock change notifications ++ * @clk: struct clk * to start the notifications with ++ * @msg: notifier msg - see "Clk notifier callback types" in mach/clock.h ++ * @param2: (not used - any u8 will do) ++ * ++ * Call clock change notifiers on clocks starting with @clk and including ++ * all of @clk's downstream children clocks. Returns NOTIFY_DONE. ++ */ ++static int omap_clk_notify_downstream(struct clk *clk, unsigned long msg, ++ u8 param2) ++{ ++ if (!clk->notifier_count) ++ return NOTIFY_DONE; ++ ++ omap_clk_notify(clk, msg, clk->rate, clk->temp_rate); ++ ++ if (!omap_clk_has_children(clk)) ++ return NOTIFY_DONE; ++ ++ return omap_clk_for_each_child(clk, msg, 0, omap_clk_notify_downstream); ++} ++ ++ ++/** ++ * _clk_pre_notify_set_parent - handle pre-notification for clk_set_parent() ++ * @clk: struct clk * changing parent ++ * ++ * When @clk is ready to change its parent, handle pre-notification. ++ * If the architecture does not have an ++ * arch_clock->clk_round_rate_parent() defined, this code will be unable ++ * to verify that the selected parent is valid, and also unable to pass the ++ * post-parent-change clock rate to the notifier. Returns any error from ++ * clk_round_rate_parent() or 0 upon success. ++ */ ++static int _clk_pre_notify_set_parent(struct clk *clk, struct clk *parent) ++{ ++ long rate; ++ ++ if (!clk->notifier_count) ++ return 0; ++ ++ if (!arch_clock->clk_round_rate_parent) { ++ pr_warning("clock: clk_set_parent(): WARNING: " ++ "clk_round_rate_parent() undefined: pre-notifiers " ++ "will get bogus rate\n"); ++ ++ rate = 0; ++ } else { ++ rate = arch_clock->clk_round_rate_parent(clk, parent); ++ }; ++ ++ if (IS_ERR_VALUE(rate)) ++ return rate; ++ ++ clk->temp_rate = rate; ++ propagate_rate(clk, TEMP_RATE); ++ ++ omap_clk_notify_downstream(clk, CLK_PRE_RATE_CHANGE, 0); ++ ++ return 0; ++} ++ ++ + /*------------------------------------------------------------------------- + * Standard clock functions defined in include/linux/clk.h + *-------------------------------------------------------------------------*/ +@@ -306,10 +417,20 @@ int clk_set_rate(struct clk *clk, unsign + { + unsigned long flags; + int ret = -EINVAL; ++ int msg; + + if (clk == NULL || IS_ERR(clk)) + return ret; + ++ mutex_lock(&clocks_mutex); ++ ++ if (clk->notifier_count) { ++ clk->temp_rate = rate; ++ propagate_rate(clk, TEMP_RATE); ++ ++ omap_clk_notify_downstream(clk, CLK_PRE_RATE_CHANGE, 0); ++ } ++ + spin_lock_irqsave(&clockfw_lock, flags); + + if (arch_clock->clk_set_rate) { +@@ -321,6 +442,12 @@ int clk_set_rate(struct clk *clk, unsign + + spin_unlock_irqrestore(&clockfw_lock, flags); + ++ msg = (ret) ? CLK_ABORT_RATE_CHANGE : CLK_POST_RATE_CHANGE; ++ ++ omap_clk_notify_downstream(clk, msg, 0); ++ ++ mutex_unlock(&clocks_mutex); ++ + return ret; + } + EXPORT_SYMBOL(clk_set_rate); +@@ -330,10 +457,17 @@ int clk_set_parent(struct clk *clk, stru + unsigned long flags; + struct clk *prev_parent; + int ret = -EINVAL; ++ int msg; + + if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent)) + return ret; + ++ mutex_lock(&clocks_mutex); ++ ++ ret = _clk_pre_notify_set_parent(clk, parent); ++ if (IS_ERR_VALUE(ret)) ++ goto csp_out; ++ + spin_lock_irqsave(&clockfw_lock, flags); + + if (arch_clock->clk_set_parent) { +@@ -349,6 +483,13 @@ int clk_set_parent(struct clk *clk, stru + + spin_unlock_irqrestore(&clockfw_lock, flags); + ++ msg = (ret) ? CLK_ABORT_RATE_CHANGE : CLK_POST_RATE_CHANGE; ++ ++ omap_clk_notify_downstream(clk, msg, 0); ++ ++csp_out: ++ mutex_unlock(&clocks_mutex); ++ + return ret; + } + EXPORT_SYMBOL(clk_set_parent); +@@ -535,6 +676,122 @@ void clk_init_cpufreq_table(struct cpufr + EXPORT_SYMBOL(clk_init_cpufreq_table); + #endif + ++/* Clk notifier implementation */ ++ ++/** ++ * clk_notifier_register - add a clock parameter change notifier ++ * @clk: struct clk * to watch ++ * @nb: struct notifier_block * with callback info ++ * ++ * Request notification for changes to the clock 'clk'. This uses a ++ * blocking notifier. Callback code must not call into the clock ++ * framework, as clocks_mutex is held. Pre-notifier callbacks will be ++ * passed the previous and new rate of the clock. ++ * ++ * clk_notifier_register() must be called from process ++ * context. Returns -EINVAL if called with null arguments, -ENOMEM ++ * upon allocation failure; otherwise, passes along the return value ++ * of blocking_notifier_chain_register(). ++ */ ++int clk_notifier_register(struct clk *clk, struct notifier_block *nb) ++{ ++ struct clk_notifier *cn = NULL, *cn_new = NULL; ++ int r; ++ struct clk *clkp; ++ ++ if (!clk || !nb) ++ return -EINVAL; ++ ++ mutex_lock(&clocks_mutex); ++ ++ list_for_each_entry(cn, &clk_notifier_list, node) ++ if (cn->clk == clk) ++ break; ++ ++ if (cn->clk != clk) { ++ cn_new = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); ++ if (!cn_new) { ++ r = -ENOMEM; ++ goto cnr_out; ++ }; ++ ++ cn_new->clk = clk; ++ BLOCKING_INIT_NOTIFIER_HEAD(&cn_new->notifier_head); ++ ++ list_add(&cn_new->node, &clk_notifier_list); ++ cn = cn_new; ++ } ++ ++ r = blocking_notifier_chain_register(&cn->notifier_head, nb); ++ if (!IS_ERR_VALUE(r)) { ++ clkp = clk; ++ do { ++ clkp->notifier_count++; ++ } while ((clkp = clkp->parent)); ++ } else { ++ if (cn_new) ++ _clk_free_notifier_chain(cn); ++ } ++ ++cnr_out: ++ mutex_unlock(&clocks_mutex); ++ ++ return r; ++} ++EXPORT_SYMBOL(clk_notifier_register); ++ ++/** ++ * clk_notifier_unregister - remove a clock change notifier ++ * @clk: struct clk * ++ * @nb: struct notifier_block * with callback info ++ * ++ * Request no further notification for changes to clock 'clk'. ++ * Returns -EINVAL if called with null arguments; otherwise, passes ++ * along the return value of blocking_notifier_chain_unregister(). ++ */ ++int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) ++{ ++ struct clk_notifier *cn = NULL; ++ struct clk *clkp; ++ int r = -EINVAL; ++ ++ if (!clk || !nb) ++ return -EINVAL; ++ ++ mutex_lock(&clocks_mutex); ++ ++ list_for_each_entry(cn, &clk_notifier_list, node) ++ if (cn->clk == clk) ++ break; ++ ++ if (cn->clk != clk) { ++ r = -ENOENT; ++ goto cnu_out; ++ }; ++ ++ r = blocking_notifier_chain_unregister(&cn->notifier_head, nb); ++ if (!IS_ERR_VALUE(r)) { ++ clkp = clk; ++ do { ++ clkp->notifier_count--; ++ } while ((clkp = clkp->parent)); ++ } ++ ++ /* ++ * XXX ugh, layering violation. There should be some ++ * support in the notifier code for this. ++ */ ++ if (!cn->notifier_head.head) ++ _clk_free_notifier_chain(cn); ++ ++cnu_out: ++ mutex_unlock(&clocks_mutex); ++ ++ return r; ++} ++EXPORT_SYMBOL(clk_notifier_unregister); ++ ++ + /*-------------------------------------------------------------------------*/ + + #ifdef CONFIG_OMAP_RESET_CLOCKS +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/common.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/common.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/common.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/common.c 2011-09-04 11:31:05.000000000 +0200 +@@ -46,6 +46,12 @@ int omap_bootloader_tag_len; + struct omap_board_config_kernel *omap_board_config; + int omap_board_config_size; + ++#ifdef CONFIG_OMAP_PM_NONE ++struct omap_opp *mpu_opps; ++struct omap_opp *dsp_opps; ++struct omap_opp *l3_opps; ++#endif ++ + #ifdef CONFIG_OMAP_BOOT_TAG + + static int __init parse_tag_omap(const struct tag *tag) +@@ -220,20 +226,16 @@ static struct clocksource clocksource_32 + }; + + /* +- * Rounds down to nearest nsec. +- */ +-unsigned long long omap_32k_ticks_to_nsecs(unsigned long ticks_32k) +-{ +- return cyc2ns(&clocksource_32k, ticks_32k); +-} +- +-/* + * Returns current time from boot in nsecs. It's OK for this to wrap + * around for now, as it's just a relative time stamp. + */ + unsigned long long sched_clock(void) + { +- return omap_32k_ticks_to_nsecs(omap_32k_read()); ++ unsigned long long ret; ++ ++ ret = (unsigned long long)omap_32k_read(); ++ ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift; ++ return ret; + } + + static int __init omap_init_clocksource_32k(void) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/cpu-omap.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/cpu-omap.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/cpu-omap.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/cpu-omap.c 2011-09-04 11:31:05.000000000 +0200 +@@ -8,6 +8,10 @@ + * + * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King + * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Updated to support OMAP3 ++ * Rajendra Nayak ++ * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +@@ -25,6 +29,9 @@ + #include + #include + #include ++#if defined(CONFIG_ARCH_OMAP3) && !defined(CONFIG_OMAP_PM_NONE) ++#include ++#endif + + #define VERY_HI_RATE 900000000 + +@@ -32,6 +39,8 @@ static struct cpufreq_frequency_table *f + + #ifdef CONFIG_ARCH_OMAP1 + #define MPU_CLK "mpu" ++#elif CONFIG_ARCH_OMAP3 ++#define MPU_CLK "arm_fck" + #else + #define MPU_CLK "virt_prcm_set" + #endif +@@ -73,23 +82,25 @@ static int omap_target(struct cpufreq_po + unsigned int target_freq, + unsigned int relation) + { ++#ifdef CONFIG_ARCH_OMAP1 + struct cpufreq_freqs freqs; ++#endif + int ret = 0; + + /* Ensure desired rate is within allowed range. Some govenors + * (ondemand) will just pass target_freq=0 to get the minimum. */ +- if (target_freq < policy->cpuinfo.min_freq) +- target_freq = policy->cpuinfo.min_freq; +- if (target_freq > policy->cpuinfo.max_freq) +- target_freq = policy->cpuinfo.max_freq; ++ if (target_freq < policy->min) ++ target_freq = policy->min; ++ if (target_freq > policy->max) ++ target_freq = policy->max; + ++#ifdef CONFIG_ARCH_OMAP1 + freqs.old = omap_getspeed(0); + freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000; + freqs.cpu = 0; + + if (freqs.old == freqs.new) + return ret; +- + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + #ifdef CONFIG_CPU_FREQ_DEBUG + printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n", +@@ -97,7 +108,18 @@ static int omap_target(struct cpufreq_po + #endif + ret = clk_set_rate(mpu_clk, freqs.new * 1000); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +- ++#elif defined(CONFIG_ARCH_OMAP3) && !defined(CONFIG_OMAP_PM_NONE) ++ if (mpu_opps) { ++ int ind; ++ for (ind = 1; ind <= MAX_VDD1_OPP; ind++) { ++ if (mpu_opps[ind].rate/1000 >= target_freq) { ++ omap_pm_cpu_set_freq ++ (mpu_opps[ind].rate); ++ break; ++ } ++ } ++ } ++#endif + return ret; + } + +@@ -126,9 +148,13 @@ static int __init omap_cpu_init(struct c + VERY_HI_RATE) / 1000; + } + +- /* FIXME: what's the actual transition time? */ +- policy->cpuinfo.transition_latency = 10 * 1000 * 1000; ++ clk_set_rate(mpu_clk, policy->cpuinfo.max_freq * 1000); ++ ++ policy->min = policy->cpuinfo.min_freq; ++ policy->max = policy->cpuinfo.max_freq; ++ policy->cur = omap_getspeed(0); + ++ policy->cpuinfo.transition_latency = 300 * 1000; + return 0; + } + +@@ -159,7 +185,7 @@ static int __init omap_cpufreq_init(void + return cpufreq_register_driver(&omap_driver); + } + +-arch_initcall(omap_cpufreq_init); ++late_initcall(omap_cpufreq_init); + + /* + * if ever we want to remove this, upon cleanup call: +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/devices.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/devices.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/devices.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/devices.c 2011-09-04 11:31:05.000000000 +0200 +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -89,6 +90,33 @@ EXPORT_SYMBOL(dsp_kfunc_device_register) + static inline void omap_init_dsp(void) { } + #endif /* CONFIG_OMAP_DSP */ + ++#if defined(CONFIG_MPU_BRIDGE) || defined(CONFIG_MPU_BRIDGE_MODULE) ++ ++static unsigned long dspbridge_phys_mempool_base; ++ ++void dspbridge_reserve_sdram(void) ++{ ++ void *va; ++ unsigned long size = CONFIG_BRIDGE_MEMPOOL_SIZE; ++ ++ if (!size) ++ return; ++ ++ va = __alloc_bootmem_nopanic(size, SZ_1M, 0); ++ if (!va) { ++ pr_err("%s: Failed to bootmem allocation(%lu bytes)\n", ++ __func__, size); ++ return; ++ } ++ dspbridge_phys_mempool_base = virt_to_phys(va); ++} ++ ++unsigned long dspbridge_get_mempool_base(void) ++{ ++ return dspbridge_phys_mempool_base; ++} ++EXPORT_SYMBOL(dspbridge_get_mempool_base); ++#endif + /*-------------------------------------------------------------------------*/ + #if defined(CONFIG_KEYBOARD_OMAP) || defined(CONFIG_KEYBOARD_OMAP_MODULE) + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/dma.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/dma.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/dma.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/dma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -51,6 +51,12 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTS + + static int enable_1510_mode; + ++static struct omap_dma_global_context_registers { ++ u32 dma_irqenable_l0; ++ u32 dma_ocp_sysconfig; ++ u32 dma_gcr; ++} omap_dma_global_context; ++ + struct omap_dma_lch { + int next_lch; + int dev_id; +@@ -309,41 +315,62 @@ EXPORT_SYMBOL(omap_set_dma_transfer_para + + void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) + { +- u16 w; +- + BUG_ON(omap_dma_in_1510_mode()); + +- if (cpu_class_is_omap2()) { +- REVISIT_24XX(); +- return; +- } ++ if (cpu_class_is_omap1()) { ++ u16 w; + +- w = dma_read(CCR2(lch)); +- w &= ~0x03; ++ w = dma_read(CCR2(lch)); ++ w &= ~0x03; + +- switch (mode) { +- case OMAP_DMA_CONSTANT_FILL: +- w |= 0x01; +- break; +- case OMAP_DMA_TRANSPARENT_COPY: +- w |= 0x02; +- break; +- case OMAP_DMA_COLOR_DIS: +- break; +- default: +- BUG(); ++ switch (mode) { ++ case OMAP_DMA_CONSTANT_FILL: ++ w |= 0x01; ++ break; ++ case OMAP_DMA_TRANSPARENT_COPY: ++ w |= 0x02; ++ break; ++ case OMAP_DMA_COLOR_DIS: ++ break; ++ default: ++ BUG(); ++ } ++ dma_write(w, CCR2(lch)); ++ ++ w = dma_read(LCH_CTRL(lch)); ++ w &= ~0x0f; ++ /* Default is channel type 2D */ ++ if (mode) { ++ dma_write((u16)color, COLOR_L(lch)); ++ dma_write((u16)(color >> 16), COLOR_U(lch)); ++ w |= 1; /* Channel type G */ ++ } ++ dma_write(w, LCH_CTRL(lch)); + } +- dma_write(w, CCR2(lch)); + +- w = dma_read(LCH_CTRL(lch)); +- w &= ~0x0f; +- /* Default is channel type 2D */ +- if (mode) { +- dma_write((u16)color, COLOR_L(lch)); +- dma_write((u16)(color >> 16), COLOR_U(lch)); +- w |= 1; /* Channel type G */ ++ if (cpu_class_is_omap2()) { ++ u32 val; ++ ++ val = dma_read(CCR(lch)); ++ val &= ~((1 << 17) | (1 << 16)); ++ ++ switch (mode) { ++ case OMAP_DMA_CONSTANT_FILL: ++ val |= 1 << 16; ++ break; ++ case OMAP_DMA_TRANSPARENT_COPY: ++ val |= 1 << 17; ++ break; ++ case OMAP_DMA_COLOR_DIS: ++ break; ++ default: ++ BUG(); ++ } ++ dma_write(val, CCR(lch)); ++ ++ color &= 0xffffff; ++ dma_write(color, COLOR(lch)); + } +- dma_write(w, LCH_CTRL(lch)); + } + EXPORT_SYMBOL(omap_set_dma_color_mode); + +@@ -759,19 +786,12 @@ void omap_free_dma(int lch) + { + unsigned long flags; + +- spin_lock_irqsave(&dma_chan_lock, flags); + if (dma_chan[lch].dev_id == -1) { + pr_err("omap_dma: trying to free unallocated DMA channel %d\n", + lch); +- spin_unlock_irqrestore(&dma_chan_lock, flags); + return; + } + +- dma_chan[lch].dev_id = -1; +- dma_chan[lch].next_lch = -1; +- dma_chan[lch].callback = NULL; +- spin_unlock_irqrestore(&dma_chan_lock, flags); +- + if (cpu_class_is_omap1()) { + /* Disable all DMA interrupts for the channel. */ + dma_write(0, CICR(lch)); +@@ -797,6 +817,12 @@ void omap_free_dma(int lch) + dma_write(0, CCR(lch)); + omap_clear_dma(lch); + } ++ ++ spin_lock_irqsave(&dma_chan_lock, flags); ++ dma_chan[lch].dev_id = -1; ++ dma_chan[lch].next_lch = -1; ++ dma_chan[lch].callback = NULL; ++ spin_unlock_irqrestore(&dma_chan_lock, flags); + } + EXPORT_SYMBOL(omap_free_dma); + +@@ -1889,18 +1915,18 @@ static int omap2_dma_handle_ch(int ch) + status = dma_read(CSR(ch)); + } + ++ dma_write(status, CSR(ch)); ++ + if (likely(dma_chan[ch].callback != NULL)) + dma_chan[ch].callback(ch, status, dma_chan[ch].data); + +- dma_write(status, CSR(ch)); +- + return 0; + } + + /* STATUS register count is from 1-32 while our is 0-31 */ + static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id) + { +- u32 val; ++ u32 val, enable_reg; + int i; + + val = dma_read(IRQSTATUS_L0); +@@ -1909,6 +1935,8 @@ static irqreturn_t omap2_dma_irq_handler + printk(KERN_WARNING "Spurious DMA IRQ\n"); + return IRQ_HANDLED; + } ++ enable_reg = dma_read(IRQENABLE_L0); ++ val &= enable_reg; /* Dispatch only relevant interrupts */ + for (i = 0; i < dma_lch_count && val != 0; i++) { + if (val & 1) + omap2_dma_handle_ch(i); +@@ -2301,6 +2329,54 @@ void omap_stop_lcd_dma(void) + } + EXPORT_SYMBOL(omap_stop_lcd_dma); + ++void omap_dma_global_context_save(void) ++{ ++ omap_dma_global_context.dma_irqenable_l0 = ++ dma_read(IRQENABLE_L0); ++ omap_dma_global_context.dma_ocp_sysconfig = ++ dma_read(OCP_SYSCONFIG); ++ omap_dma_global_context.dma_gcr = dma_read(GCR); ++} ++EXPORT_SYMBOL(omap_dma_global_context_save); ++ ++void omap_dma_global_context_restore(void) ++{ ++ int ch; ++ ++ dma_write(omap_dma_global_context.dma_gcr, GCR); ++ dma_write(omap_dma_global_context.dma_ocp_sysconfig, ++ OCP_SYSCONFIG); ++ dma_write(omap_dma_global_context.dma_irqenable_l0, ++ IRQENABLE_L0); ++ ++ /* ++ * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared ++ * after secure sram context save and restore. Hence we need to ++ * manually clear those IRQs to avoid spurious interrupts. This ++ * affects only secure devices. ++ */ ++ if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) { ++ dma_write(0x3 , IRQSTATUS_L0); ++ } ++ ++ for (ch = 0; ch < dma_chan_count; ch++) ++ if (dma_chan[ch].dev_id != -1) ++ omap_clear_dma(ch); ++} ++EXPORT_SYMBOL(omap_dma_global_context_restore); ++ ++void omap_dma_disable_irq(int lch) ++{ ++ u32 val; ++ ++ if (cpu_class_is_omap2()) { ++ /* Disable interrupts */ ++ val = dma_read(IRQENABLE_L0); ++ val &= ~(1 << lch); ++ dma_write(val, IRQENABLE_L0); ++ } ++} ++ + /*----------------------------------------------------------------------------*/ + + static int __init omap_init_dma(void) +@@ -2417,8 +2493,8 @@ static int __init omap_init_dma(void) + if (cpu_class_is_omap2()) + setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq); + +- /* Enable smartidle idlemodes and autoidle */ + if (cpu_is_omap34xx()) { ++ /* Enable smartidle idlemodes and autoidle */ + u32 v = dma_read(OCP_SYSCONFIG); + v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK | + DMA_SYSCONFIG_SIDLEMODE_MASK | +@@ -2427,6 +2503,13 @@ static int __init omap_init_dma(void) + DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) | + DMA_SYSCONFIG_AUTOIDLE); + dma_write(v , OCP_SYSCONFIG); ++ /* reserve dma channels 0 and 1 in high security devices */ ++ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { ++ printk(KERN_INFO "Reserving DMA channels 0 and 1 for " ++ "HS ROM code\n"); ++ dma_chan[0].dev_id = 0; ++ dma_chan[1].dev_id = 1; ++ } + } + + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/dmtimer.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/dmtimer.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/dmtimer.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/dmtimer.c 2011-09-04 11:31:05.000000000 +0200 +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -318,6 +319,7 @@ static void omap_dm_timer_reset(struct o + l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG); + l |= 0x02 << 3; /* Set to smart-idle mode */ + l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */ ++ l |= 0x1 << 0; /* Set autoidle */ + + /* + * Enable wake-up on OMAP2 CPUs. +@@ -360,6 +362,7 @@ struct omap_dm_timer *omap_dm_timer_requ + + return timer; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_request); + + struct omap_dm_timer *omap_dm_timer_request_specific(int id) + { +@@ -383,6 +386,7 @@ struct omap_dm_timer *omap_dm_timer_requ + + return timer; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific); + + void omap_dm_timer_free(struct omap_dm_timer *timer) + { +@@ -393,6 +397,7 @@ void omap_dm_timer_free(struct omap_dm_t + WARN_ON(!timer->reserved); + timer->reserved = 0; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_free); + + void omap_dm_timer_enable(struct omap_dm_timer *timer) + { +@@ -404,6 +409,7 @@ void omap_dm_timer_enable(struct omap_dm + + timer->enabled = 1; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_enable); + + void omap_dm_timer_disable(struct omap_dm_timer *timer) + { +@@ -415,11 +421,13 @@ void omap_dm_timer_disable(struct omap_d + + timer->enabled = 0; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_disable); + + int omap_dm_timer_get_irq(struct omap_dm_timer *timer) + { + return timer->irq; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq); + + #if defined(CONFIG_ARCH_OMAP1) + +@@ -450,6 +458,7 @@ __u32 omap_dm_timer_modify_idlect_mask(_ + + return inputmask; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask); + + #elif defined(CONFIG_ARCH_OMAP2) || defined (CONFIG_ARCH_OMAP3) + +@@ -457,6 +466,7 @@ struct clk *omap_dm_timer_get_fclk(struc + { + return timer->fclk; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk); + + __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask) + { +@@ -464,6 +474,7 @@ __u32 omap_dm_timer_modify_idlect_mask(_ + + return 0; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask); + + #endif + +@@ -471,6 +482,7 @@ void omap_dm_timer_trigger(struct omap_d + { + omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_trigger); + + void omap_dm_timer_start(struct omap_dm_timer *timer) + { +@@ -482,6 +494,7 @@ void omap_dm_timer_start(struct omap_dm_ + omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + } + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_start); + + void omap_dm_timer_stop(struct omap_dm_timer *timer) + { +@@ -493,6 +506,7 @@ void omap_dm_timer_stop(struct omap_dm_t + omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + } + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_stop); + + #ifdef CONFIG_ARCH_OMAP1 + +@@ -505,6 +519,7 @@ void omap_dm_timer_set_source(struct oma + l |= source << n; + omap_writel(l, MOD_CONF_CTRL_1); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_source); + + #else + +@@ -521,6 +536,7 @@ void omap_dm_timer_set_source(struct oma + * cause an abort. */ + __delay(150000); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_source); + + #endif + +@@ -539,6 +555,7 @@ void omap_dm_timer_set_load(struct omap_ + + omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_load); + + /* Optimized set_load which removes costly spin wait in timer_start */ + void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, +@@ -558,6 +575,7 @@ void omap_dm_timer_set_load_start(struct + omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, load); + omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start); + + void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, + unsigned int match) +@@ -572,6 +590,7 @@ void omap_dm_timer_set_match(struct omap + omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_match); + + void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, + int toggle, int trigger) +@@ -588,6 +607,7 @@ void omap_dm_timer_set_pwm(struct omap_d + l |= trigger << 10; + omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm); + + void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler) + { +@@ -601,6 +621,7 @@ void omap_dm_timer_set_prescaler(struct + } + omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler); + + void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, + unsigned int value) +@@ -608,6 +629,7 @@ void omap_dm_timer_set_int_enable(struct + omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value); + omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, value); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable); + + unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer) + { +@@ -617,11 +639,13 @@ unsigned int omap_dm_timer_read_status(s + + return l; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_read_status); + + void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value) + { + omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_write_status); + + unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer) + { +@@ -631,11 +655,13 @@ unsigned int omap_dm_timer_read_counter( + + return l; + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter); + + void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value) + { + omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value); + } ++EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter); + + int omap_dm_timers_active(void) + { +@@ -656,6 +682,7 @@ int omap_dm_timers_active(void) + } + return 0; + } ++EXPORT_SYMBOL_GPL(omap_dm_timers_active); + + int __init omap_dm_timer_init(void) + { +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/dss_boottime.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/dss_boottime.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/dss_boottime.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/dss_boottime.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,421 @@ ++/* ++ * File: arch/arm/plat-omap/dss.c ++ * ++ * OMAP Display Subsystem helper functions ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * Author: Imre Deak ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++ ++#define DSS_BASE 0x48050000 ++#define DSS_SYSCONFIG 0x0010 ++#define DSS_SYSSTATUS 0x0014 ++ ++#define DISPC_BASE 0x48050400 ++ ++#define DISPC_SYSCONFIG 0x0010 ++#define DISPC_SYSSTATUS 0x0014 ++#define DISPC_CONTROL 0x0040 ++ ++/* Base address offsets into DISPC_BASE for the 3 planes */ ++#define DISPC_GFX_BASE 0x0000 ++#define DISPC_VID1_BASE 0x00BC ++#define DISPC_VID2_BASE 0x014C ++ ++/* Register offsets into GFX / VIDx plane base addresses */ ++#define DISPC_GFX_BA0 0x0080 ++#define DISPC_GFX_SIZE 0x008C ++#define DISPC_GFX_ATTRIBUTES 0x00A0 ++#define DISPC_VID_BA0 0x0000 ++#define DISPC_VID_ATTRIBUTES 0x0010 ++#define DISPC_VID_PICTURE_SIZE 0x0028 ++ ++#define OMAP3430_DSS_ICK_REG 0x48004e10 ++#define OMAP3430_DSS_ICK_BIT (1 << 0) ++#define OMAP3430_DSS_FCK_REG 0x48004e00 ++#define OMAP3430_DSS_FCK_BIT (1 << 0) ++ ++static struct clk *dss_fclk; ++static struct clk *dss_iclk; ++static struct clk *digit_fclk; ++ ++static const u32 at_reg[3] = { ++ DISPC_GFX_BASE + DISPC_GFX_ATTRIBUTES, ++ DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES, ++ DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES ++}; ++ ++static const u32 ba0_reg[3] = { ++ DISPC_GFX_BASE + DISPC_GFX_BA0, ++ DISPC_VID1_BASE + DISPC_VID_BA0, ++ DISPC_VID2_BASE + DISPC_VID_BA0, ++}; ++ ++static const u32 siz_reg[3] = { ++ DISPC_GFX_BASE + DISPC_GFX_SIZE, ++ DISPC_VID1_BASE + DISPC_VID_PICTURE_SIZE, ++ DISPC_VID2_BASE + DISPC_VID_PICTURE_SIZE, ++}; ++ ++int dss_boottime_get_clocks(void) ++{ ++ static const char *dss_ick = "dss_ick"; ++ static const char *dss1_fck = cpu_is_omap34xx() ? ++ "dss1_alwon_fck" : "dss1_fck"; ++ static const char *digit_fck = cpu_is_omap34xx() ? ++ "dss_tv_fck" : "dss_54m_fck"; ++ ++ BUG_ON(dss_iclk || dss_fclk || digit_fclk); ++ ++ dss_iclk = clk_get(NULL, dss_ick); ++ if (IS_ERR(dss_iclk)) ++ return PTR_ERR(dss_iclk); ++ ++ dss_fclk = clk_get(NULL, dss1_fck); ++ if (IS_ERR(dss_fclk)) { ++ clk_put(dss_iclk); ++ dss_iclk = NULL; ++ return PTR_ERR(dss_fclk); ++ } ++ ++ digit_fclk = clk_get(NULL, digit_fck); ++ if (IS_ERR(digit_fclk)) { ++ clk_put(dss_iclk); ++ clk_put(dss_fclk); ++ dss_iclk = NULL; ++ dss_fclk = NULL; ++ return PTR_ERR(digit_fclk); ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(dss_boottime_get_clocks); ++ ++void dss_boottime_put_clocks(void) ++{ ++ clk_put(digit_fclk); ++ clk_put(dss_iclk); ++ clk_put(dss_fclk); ++ digit_fclk = NULL; ++ dss_iclk = NULL; ++ dss_fclk = NULL; ++} ++EXPORT_SYMBOL_GPL(dss_boottime_put_clocks); ++ ++int dss_boottime_enable_clocks(void) ++{ ++ int r; ++ ++ BUG_ON(!dss_fclk || !dss_iclk); ++ ++ if ((r = clk_enable(dss_iclk)) < 0) ++ return r; ++ ++ if ((r = clk_enable(dss_fclk)) < 0) ++ return r; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(dss_boottime_enable_clocks); ++ ++void dss_boottime_disable_clocks(void) ++{ ++ BUG_ON(!dss_fclk || !dss_iclk); ++ ++ clk_disable(dss_iclk); ++ clk_disable(dss_fclk); ++} ++EXPORT_SYMBOL_GPL(dss_boottime_disable_clocks); ++ ++static int __init enable_digit_clocks(void) ++{ ++ BUG_ON(!digit_fclk); ++ ++ return clk_enable(digit_fclk); ++} ++ ++static void __init disable_digit_clocks(void) ++{ ++ BUG_ON(!digit_fclk); ++ ++ clk_disable(digit_fclk); ++} ++ ++/** ++ * dispc_read_reg Read a DISPC register ++ * @reg - DISPC register to read ++ * ++ * Assumes that clocks are on. ++ */ ++static u32 __init dispc_read_reg(int reg) ++{ ++ return omap_readl(DISPC_BASE + reg); ++} ++ ++/** ++ * dispc_write_reg Write a DISPC register ++ * @reg - DISPC register to write ++ * @val - value to write ++ * ++ * Assumes that clocks are on. ++ */ ++static void __init dispc_write_reg(int reg, u32 val) ++{ ++ omap_writel(val, DISPC_BASE + reg); ++} ++ ++/** ++ * dss_read_reg Read a DSS register ++ * @reg - DSS register to read ++ * ++ * Assumes that clocks are on. ++ */ ++static u32 __init dss_read_reg(int reg) ++{ ++ return omap_readl(DSS_BASE + reg); ++} ++ ++/** ++ * dss_write_reg Write a DSS register ++ * @reg - DSS register to write ++ * @val - value to write ++ * ++ * Assumes that clocks are on. ++ */ ++static void __init dss_write_reg(int reg, u32 val) ++{ ++ omap_writel(val, DSS_BASE + reg); ++} ++ ++/** ++ * dss_boottime_plane_is_enabled Determines whether the plane is enabled ++ * @plane_idx - index of the plane to do the check for ++ * ++ * Return 1 if plane is enabled 0 otherwise. ++ * ++ * Since clock FW might not be initialized yet we can't use the clk_* ++ * interface. ++ */ ++int __init dss_boottime_plane_is_enabled(int plane_idx) ++{ ++ if (cpu_is_omap3430()) { ++ u32 l; ++ ++ if (plane_idx >= 3) ++ return 0; ++ ++ if (!(omap_readl(OMAP3430_DSS_ICK_REG) & OMAP3430_DSS_ICK_BIT)) ++ return 0; ++ ++ if (!(omap_readl(OMAP3430_DSS_FCK_REG) & OMAP3430_DSS_FCK_BIT)) ++ return 0; ++ ++ l = dispc_read_reg(DISPC_CONTROL); ++ /* LCD out enabled ? */ ++ if (!(l & 1)) ++ /* No, don't take over memory */ ++ return 0; ++ ++ l = dispc_read_reg(at_reg[plane_idx]); ++ /* Plane enabled ? */ ++ if (!(l & 1)) ++ /* No, don't take over memory */ ++ return 0; ++ ++ return 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * dss_boottime_get_plane_base - get base address for a plane's FB ++ * @plane_idx - plane index ++ * ++ * Return physical base address if plane is enabled, otherwise -1. ++ */ ++u32 __init dss_boottime_get_plane_base(int plane_idx) ++{ ++ if (dss_boottime_plane_is_enabled(plane_idx)) ++ return dispc_read_reg(ba0_reg[plane_idx]); ++ else ++ return -1UL; ++} ++ ++static const struct { ++ enum omapfb_color_format format; ++ int bpp; ++} mode_info[] = { ++ { OMAPFB_COLOR_CLUT_1BPP, 1, }, ++ { OMAPFB_COLOR_CLUT_2BPP, 2, }, ++ { OMAPFB_COLOR_CLUT_4BPP, 4, }, ++ { OMAPFB_COLOR_CLUT_8BPP, 8, }, ++ { OMAPFB_COLOR_RGB444, 16, }, ++ { OMAPFB_COLOR_ARGB16, 16, }, ++ { OMAPFB_COLOR_RGB565, 16, }, ++ { 0, 0, }, /* id=0x07, reserved */ ++ { OMAPFB_COLOR_RGB24U, 32, }, ++ { OMAPFB_COLOR_RGB24P, 24, }, ++ { OMAPFB_COLOR_YUY422, 16, }, ++ { OMAPFB_COLOR_YUV422, 16, }, ++ { OMAPFB_COLOR_ARGB32, 32, }, ++ { OMAPFB_COLOR_RGBA32, 32, }, ++ { OMAPFB_COLOR_RGBX32, 32, }, ++}; ++ ++static unsigned __init get_plane_mode(int plane_idx) ++{ ++ u32 l; ++ ++ l = dispc_read_reg(at_reg[plane_idx]); ++ l = (l >> 1) & 0x0f; ++ if (l == 0x07 || l >= ARRAY_SIZE(mode_info)) ++ BUG(); ++ /* For the GFX plane YUV2 and UYVY modes are not defined. */ ++ if (plane_idx == 0 && (l == 0x0a || l == 0x0b)) ++ BUG(); ++ return l; ++} ++ ++/** ++ * dss_boottime_get_plane_format - get color format for a plane's FB ++ * @plane_idx - plane index ++ * ++ * Return plane color format if plane is enabled, otherwise -1. ++ */ ++enum omapfb_color_format __init dss_boottime_get_plane_format(int plane_idx) ++{ ++ unsigned mode; ++ ++ if (!dss_boottime_plane_is_enabled(plane_idx)) ++ return -1; ++ ++ mode = get_plane_mode(plane_idx); ++ ++ return mode_info[mode].format; ++} ++ ++int __init dss_boottime_get_plane_bpp(int plane_idx) ++{ ++ unsigned mode; ++ unsigned bpp; ++ ++ if (!dss_boottime_plane_is_enabled(plane_idx)) ++ return -1; ++ ++ mode = get_plane_mode(plane_idx); ++ bpp = mode_info[mode].bpp; ++ ++ return bpp; ++} ++ ++/** ++ * dss_boottime_get_plane_size - get size of a plane's FB ++ * @plane_idx - plane index ++ * ++ * Return the size of a plane's FB based on it's color format and width/height ++ * if the plane is enabled, otherwise -1. ++ */ ++size_t __init dss_boottime_get_plane_size(int plane_idx) ++{ ++ u32 l; ++ unsigned bpp; ++ unsigned x, y; ++ size_t size; ++ ++ if (!dss_boottime_plane_is_enabled(plane_idx)) ++ return -1; ++ ++ bpp = dss_boottime_get_plane_bpp(plane_idx); ++ ++ l = dispc_read_reg(siz_reg[plane_idx]); ++ x = l & ((1 << 11) - 1); ++ x++; ++ l >>= 16; ++ y = l & ((1 << 11) - 1); ++ y++; ++ ++ size = x * y * bpp / 8; ++ ++ size = PAGE_ALIGN(size); ++ ++ return size; ++} ++ ++/** ++ * dss_boottime_reset - reset DSS ++ * ++ * This can only be called after dss_boottime_get_clocks has been called. ++ */ ++int __init dss_boottime_reset(void) ++{ ++ int tmo = 100000; ++ u32 l; ++ ++ BUG_ON(!dss_fclk || !dss_iclk || !digit_fclk); ++ ++ /* Reset monitoring works only w/ the 54M clk */ ++ if (dss_boottime_enable_clocks() < 0) ++ goto err1; ++ ++ if (enable_digit_clocks() < 0) ++ goto err2; ++ ++ /* Resetting DSS right after enabling clocks, or if ++ * bootloader has enabled the display, seems to put ++ * DSS sometimes in an invalid state. Disabling output ++ * and waiting after enabling clocks seem to fix this */ ++ ++ /* disable LCD & DIGIT output */ ++ dispc_write_reg(DISPC_CONTROL, dispc_read_reg(DISPC_CONTROL) & ~0x3); ++ msleep(50); ++ ++ /* Soft reset */ ++ l = dss_read_reg(DSS_SYSCONFIG); ++ l |= 1 << 1; ++ dss_write_reg(DSS_SYSCONFIG, l); ++ ++ while (!(dss_read_reg(DSS_SYSSTATUS) & 1)) { ++ if (!--tmo) ++ goto err3; ++ } ++ ++ disable_digit_clocks(); ++ dss_boottime_disable_clocks(); ++ ++ return 0; ++ ++err3: ++ disable_digit_clocks(); ++err2: ++ dss_boottime_disable_clocks(); ++err1: ++ return -ENODEV; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/fb.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/fb.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/fb.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/fb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -27,14 +27,43 @@ + #include + #include + #include ++#include + #include ++#include + + #include + #include + + #include + #include +-#include ++ ++#include ++ ++static int __initdata fb_initialized; ++ ++#if !defined(CONFIG_FB_OMAP) && !defined(CONFIG_FB_OMAP2) ++ ++static void __init reset_dss(void) ++{ ++ pr_info("FB: resetting DSS\n"); ++ ++ if (dss_boottime_get_clocks() < 0) { ++ pr_err("can't get DSS clocks\n"); ++ return; ++ } ++ if (dss_boottime_enable_clocks() < 0) { ++ pr_err("can't enable DSS clocks\n"); ++ dss_boottime_put_clocks(); ++ return; ++ } ++ if (dss_boottime_reset() < 0) ++ pr_err("can't reset DSS"); ++ ++ dss_boottime_disable_clocks(); ++ dss_boottime_put_clocks(); ++} ++ ++#endif + + #if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) + +@@ -70,13 +99,17 @@ static inline int range_included(unsigne + + + /* Check if there is an overlapping region. */ +-static int fbmem_region_reserved(unsigned long start, size_t size) ++static int fbmem_region_reserved(int region_idx, ++ unsigned long start, size_t size) + { + struct omapfb_mem_region *rg; + int i; + + rg = &omapfb_config.mem_desc.region[0]; + for (i = 0; i < OMAPFB_PLANE_NUM; i++, rg++) { ++ if (i == region_idx) ++ /* Don't check against self. */ ++ continue; + if (!rg->paddr) + /* Empty slot. */ + continue; +@@ -90,7 +123,8 @@ static int fbmem_region_reserved(unsigne + * Get the region_idx`th region from board config/ATAG and convert it to + * our internal format. + */ +-static int get_fbmem_region(int region_idx, struct omapfb_mem_region *rg) ++static int __init _get_fbmem_region(int region_idx, ++ struct omapfb_mem_region *rg) + { + const struct omap_fbmem_config *conf; + u32 paddr; +@@ -113,6 +147,103 @@ static int get_fbmem_region(int region_i + return 0; + } + ++static void __init get_all_regions(void) ++{ ++ int i; ++ ++ i = 0; ++ while (i < OMAPFB_PLANE_NUM) { ++ struct omapfb_mem_region rg; ++ ++ if (_get_fbmem_region(i, &rg) < 0) ++ break; ++ omapfb_config.mem_desc.region[i] = rg; ++ i++; ++ } ++ omapfb_config.mem_desc.region_cnt = i; ++} ++ ++ ++static int __init get_fbmem_region(int region_idx, ++ struct omapfb_mem_region *rg) ++{ ++ if (region_idx >= omapfb_config.mem_desc.region_cnt) ++ return -ENOENT; ++ *rg = omapfb_config.mem_desc.region[region_idx]; ++ ++ return 0; ++} ++ ++#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT ++ ++static void __init detect_hw_base_addr(void) ++{ ++ int ridx; ++ ++ for (ridx = 0; ridx < OMAPFB_PLANE_NUM; ridx++) { ++ struct omapfb_mem_region rg; ++ u32 paddr; ++ ++ if (get_fbmem_region(ridx, &rg) < 0) ++ break; ++ paddr = dss_boottime_get_plane_base(ridx); ++ if (paddr == -1UL) ++ continue; ++ ++ rg.paddr = paddr; ++ omapfb_config.mem_desc.region[ridx] = rg; ++ } ++} ++ ++#ifdef CONFIG_FB_OMAP ++static void __init enable_used_clocks(void) ++{ ++ int i; ++ ++ for (i = 0; i < OMAPFB_PLANE_NUM; i++) ++ if (dss_boottime_plane_is_enabled(i)) ++ break; ++ if (i == OMAPFB_PLANE_NUM) ++ /* No planes active */ ++ return; ++ ++ if (dss_boottime_get_clocks() < 0) { ++ pr_err("Can't get DSS clocks\n"); ++ return; ++ } ++ ++ if (dss_boottime_enable_clocks() < 0) { ++ pr_err("Can't enable DSS clocks\n"); ++ dss_boottime_put_clocks(); ++ } ++ ++ return; ++} ++#endif /* CONFIG_FB_OMAP */ ++ ++ ++#else ++static void inline __init detect_hw_base_addr(void) ++{ ++} ++ ++static void inline __init enable_used_clocks(void) ++{ ++} ++#endif /* FB_OMAP_BOOTLOADER_INIT */ ++ ++static void __init init_regions(void) ++{ ++ static int regions_inited; ++ ++ if (regions_inited) ++ return; ++ ++ regions_inited = 1; ++ get_all_regions(); ++ detect_hw_base_addr(); ++} ++ + static int set_fbmem_region_type(struct omapfb_mem_region *rg, int mem_type, + unsigned long mem_start, + unsigned long mem_size) +@@ -157,7 +288,7 @@ static int check_fbmem_region(int region + * Fixed region for the given RAM range. Check if it's already + * reserved by the FB code or someone else. + */ +- if (fbmem_region_reserved(paddr, size) || ++ if (fbmem_region_reserved(region_idx, paddr, size) || + !range_included(paddr, size, start_avail, size_avail)) { + printk(KERN_ERR "Trying to use reserved memory " + "for FB region %d\n", region_idx); +@@ -181,6 +312,8 @@ void __init omapfb_reserve_sdram(void) + if (config_invalid) + return; + ++ init_regions(); ++ + bdata = NODE_DATA(0)->bdata; + sdram_start = bdata->node_min_pfn << PAGE_SHIFT; + sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start; +@@ -201,19 +334,22 @@ void __init omapfb_reserve_sdram(void) + sdram_start, sdram_size) < 0 || + (rg.type != OMAPFB_MEMTYPE_SDRAM)) + continue; +- BUG_ON(omapfb_config.mem_desc.region[i].size); + if (check_fbmem_region(i, &rg, sdram_start, sdram_size) < 0) { + config_invalid = 1; + return; + } ++ + if (rg.paddr) { +- reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT); ++ if (reserve_bootmem(rg.paddr, rg.size, ++ BOOTMEM_EXCLUSIVE) < 0) { ++ config_invalid = 1; ++ return; ++ } + reserved += rg.size; + } + omapfb_config.mem_desc.region[i] = rg; + configured_regions++; + } +- omapfb_config.mem_desc.region_cnt = i; + if (reserved) + pr_info("Reserving %lu bytes SDRAM for frame buffer\n", + reserved); +@@ -243,6 +379,8 @@ unsigned long omapfb_reserve_sram(unsign + if (config_invalid) + return 0; + ++ init_regions(); ++ + reserved = 0; + pend_avail = pstart_avail + size_avail; + for (i = 0; ; i++) { +@@ -260,7 +398,6 @@ unsigned long omapfb_reserve_sram(unsign + sram_pstart, sram_size) < 0 || + (rg.type != OMAPFB_MEMTYPE_SRAM)) + continue; +- BUG_ON(omapfb_config.mem_desc.region[i].size); + + if (check_fbmem_region(i, &rg, pstart_avail, size_avail) < 0) { + config_invalid = 1; +@@ -291,7 +428,6 @@ unsigned long omapfb_reserve_sram(unsign + omapfb_config.mem_desc.region[i] = rg; + configured_regions++; + } +- omapfb_config.mem_desc.region_cnt = i; + if (reserved) + pr_info("Reserving %lu bytes SRAM for frame buffer\n", + reserved); +@@ -303,12 +439,13 @@ void omapfb_set_ctrl_platform_data(void + omapfb_config.ctrl_platform_data = data; + } + +-static inline int omap_init_fb(void) ++int __init omap_init_fb(void) + { + const struct omap_lcd_config *conf; + +- if (config_invalid) ++ if (fb_initialized || config_invalid) + return 0; ++ fb_initialized = 1; + if (configured_regions != omapfb_config.mem_desc.region_cnt) { + printk(KERN_ERR "Invalid FB mem configuration entries\n"); + return 0; +@@ -322,10 +459,69 @@ static inline int omap_init_fb(void) + } + omapfb_config.lcd = *conf; + ++#if defined (CONFIG_FB_OMAP_MODULE) ++ reset_dss(); ++#else ++ enable_used_clocks(); ++#endif + return platform_device_register(&omap_fb_device); + } + +-arch_initcall(omap_init_fb); ++#elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) ++ ++static u64 omap_fb_dma_mask = ~(u32)0; ++static struct omapfb_platform_data omapfb_config; ++ ++static struct platform_device omap_fb_device = { ++ .name = "omapfb", ++ .id = -1, ++ .dev = { ++ .dma_mask = &omap_fb_dma_mask, ++ .coherent_dma_mask = ~(u32)0, ++ .platform_data = &omapfb_config, ++ }, ++ .num_resources = 0, ++}; ++ ++void omapfb_set_platform_data(struct omapfb_platform_data *data) ++{ ++ omapfb_config = *data; ++} ++ ++#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT ++static void __init enable_used_clocks(void) ++{ ++ if (!dss_boottime_plane_is_enabled(0)) ++ return; ++ ++ if (dss_boottime_get_clocks() < 0) { ++ pr_err("Can't get DSS clocks\n"); ++ return; ++ } ++ ++ if (dss_boottime_enable_clocks() < 0) { ++ pr_err("Can't enable DSS clocks\n"); ++ dss_boottime_put_clocks(); ++ } ++ ++ return; ++} ++#else ++static void inline __init enable_used_clocks(void) { } ++#endif /* FB_OMAP_BOOTLOADER_INIT */ ++ ++int __init omap_init_fb(void) ++{ ++ if (fb_initialized) ++ return 0; ++ fb_initialized = 1; ++#if defined (CONFIG_FB_OMAP2_MODULE) ++ reset_dss(); ++#else ++ enable_used_clocks(); ++#endif ++ return platform_device_register(&omap_fb_device); ++} + + #else + +@@ -339,5 +535,17 @@ unsigned long omapfb_reserve_sram(unsign + return 0; + } + ++int __init omap_init_fb(void) ++{ ++ if (fb_initialized) ++ return 0; ++ fb_initialized = 1; ++ reset_dss(); ++ ++ return 0; ++} + + #endif ++ ++arch_initcall(omap_init_fb); ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/gpio.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/gpio.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/gpio.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/gpio.c 2011-09-04 11:31:05.000000000 +0200 +@@ -20,10 +20,13 @@ + #include + + #include ++#include + #include + #include + #include + #include ++#include ++#include + + /* + * OMAP1510 GPIO registers +@@ -153,6 +156,7 @@ struct gpio_bank { + spinlock_t lock; + struct gpio_chip chip; + struct clk *dbck; ++ u32 dbck_enable_mask; + }; + + #define METHOD_MPUIO 0 +@@ -219,6 +223,74 @@ static struct gpio_bank gpio_bank_34xx[6 + { OMAP34XX_GPIO6_BASE, INT_34XX_GPIO_BANK6, IH_GPIO_BASE + 160, METHOD_GPIO_24XX }, + }; + ++#define OMAP34XX_PAD_SAFE_MODE 0x7 ++#define OMAP34XX_PAD_IN_PU_GPIO 0x11c ++#define OMAP34XX_PAD_IN_PD_GPIO 0x10c ++#define OMAP34XX_PAD_WAKE_EN (1 << 14) ++ ++struct omap3_gpio_regs { ++ u32 sysconfig; ++ u32 irqenable1; ++ u32 irqenable2; ++ u32 wake_en; ++ u32 ctrl; ++ u32 oe; ++ u32 leveldetect0; ++ u32 leveldetect1; ++ u32 risingdetect; ++ u32 fallingdetect; ++ u32 dataout; ++}; ++ ++static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS]; ++ ++/* GPIO -> PAD init configuration struct */ ++struct gpio_pad_range { ++ /* Range start GPIO # */ ++ u16 min; ++ /* Range end GPIO # */ ++ u16 max; ++ /* Start pad config offset */ ++ u16 offset; ++}; ++ ++/* ++ * Defines GPIO to padconfig mapping. For example first definition tells ++ * us that there is a range of GPIOs 34...43 which have their padconfigs ++ * starting from offset 0x7a, i.e. gpio 34->0x7a, 35->0x7c, 36->0x7e ... etc. ++ */ ++static const struct gpio_pad_range gpio_pads_config[] = { ++ { 34, 43, 0x7a }, ++ { 44, 51, 0x9e }, ++ { 52, 59, 0xb0 }, ++ { 60, 62, 0xc6 }, ++ { 63, 111, 0xce }, ++ { 167, 167, 0x130 }, ++ { 126, 126, 0x132 }, ++ { 112, 166, 0x134 }, ++ { 120, 122, 0x1a2 }, ++ { 124, 125, 0x1a8 }, ++ { 130, 131, 0x1ac }, ++ { 169, 169, 0x1b0 }, ++ { 188, 191, 0x1b2 }, ++ { 168, 168, 0x1be }, ++ { 183, 185, 0x1c0 }, ++ { 170, 182, 0x1c6 }, ++ { 0, 0, 0x1e0 }, ++ { 186, 186, 0x1e2 }, ++ { 12, 29, 0x5d8 }, ++}; ++ ++/* GPIO -> PAD config mapping for OMAP3 */ ++struct gpio_pad { ++ s16 gpio; ++ u16 offset; ++ u16 save; ++}; ++ ++#define OMAP34XX_GPIO_AMT (32 * OMAP34XX_NR_GPIOS) ++ ++struct gpio_pad *gpio_pads; + #endif + + static struct gpio_bank *gpio_bank; +@@ -471,6 +543,7 @@ void omap_set_gpio_debounce(int gpio, in + goto done; + + if (cpu_is_omap34xx()) { ++ bank->dbck_enable_mask = val; + if (enable) + clk_enable(bank->dbck); + else +@@ -506,6 +579,9 @@ static inline void set_24xx_gpio_trigger + { + void __iomem *base = bank->base; + u32 gpio_bit = 1 << gpio; ++ struct gpio_pad *pad; ++ int gpio_num; ++ u16 val; + + MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, + trigger & IRQ_TYPE_LEVEL_LOW); +@@ -517,13 +593,30 @@ static inline void set_24xx_gpio_trigger + trigger & IRQ_TYPE_EDGE_FALLING); + + if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { ++ pad = gpio_pads; ++ ++ gpio_num = bank->virtual_irq_start - IH_GPIO_BASE + gpio; ++ /* Find the pad corresponding the GPIO */ ++ while (pad->gpio >= 0 && pad->gpio != gpio_num) ++ pad++; ++ /* Enable / disable pad wakeup */ ++ if (pad->gpio == gpio_num) { ++ val = omap_ctrl_readw(pad->offset); ++ if (trigger) ++ val |= OMAP34XX_PAD_WAKE_EN; ++ else ++ val &= ~(u16)OMAP34XX_PAD_WAKE_EN; ++ omap_ctrl_writew(val, pad->offset); ++ } + if (trigger != 0) + __raw_writel(1 << gpio, bank->base + + OMAP24XX_GPIO_SETWKUENA); + else + __raw_writel(1 << gpio, bank->base + + OMAP24XX_GPIO_CLEARWKUENA); +- } else { ++ } ++ /* This part needs to be executed always for OMAP34xx */ ++ if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) { + if (trigger != 0) + bank->enabled_non_wakeup_gpios |= gpio_bit; + else +@@ -691,8 +784,12 @@ static void _clear_gpio_irqbank(struct g + + /* Workaround for clearing DSP GPIO interrupts to allow retention */ + #if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) ++ reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2; + if (cpu_is_omap24xx() || cpu_is_omap34xx()) +- __raw_writel(gpio_mask, bank->base + OMAP24XX_GPIO_IRQSTATUS2); ++ __raw_writel(gpio_mask, reg); ++ ++ /* Flush posted write for the irq status to avoid spurious interrupts */ ++ __raw_readl(reg); + #endif + } + +@@ -837,13 +934,10 @@ static int _set_gpio_wakeup(struct gpio_ + case METHOD_MPUIO: + case METHOD_GPIO_1610: + spin_lock_irqsave(&bank->lock, flags); +- if (enable) { ++ if (enable) + bank->suspend_wakeup |= (1 << gpio); +- enable_irq_wake(bank->irq); +- } else { +- disable_irq_wake(bank->irq); ++ else + bank->suspend_wakeup &= ~(1 << gpio); +- } + spin_unlock_irqrestore(&bank->lock, flags); + return 0; + #endif +@@ -856,13 +950,10 @@ static int _set_gpio_wakeup(struct gpio_ + return -EINVAL; + } + spin_lock_irqsave(&bank->lock, flags); +- if (enable) { ++ if (enable) + bank->suspend_wakeup |= (1 << gpio); +- enable_irq_wake(bank->irq); +- } else { +- disable_irq_wake(bank->irq); ++ else + bank->suspend_wakeup &= ~(1 << gpio); +- } + spin_unlock_irqrestore(&bank->lock, flags); + return 0; + #endif +@@ -1059,6 +1150,7 @@ static void gpio_mask_irq(unsigned int i + struct gpio_bank *bank = get_irq_chip_data(irq); + + _set_gpio_irqenable(bank, gpio, 0); ++ _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE); + } + + static void gpio_unmask_irq(unsigned int irq) +@@ -1066,6 +1158,11 @@ static void gpio_unmask_irq(unsigned int + unsigned int gpio = irq - IH_GPIO_BASE; + struct gpio_bank *bank = get_irq_chip_data(irq); + unsigned int irq_mask = 1 << get_gpio_index(gpio); ++ struct irq_desc *desc = irq_to_desc(irq); ++ u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK; ++ ++ if (trigger) ++ _set_gpio_triggering(bank, get_gpio_index(gpio), trigger); + + /* For level-triggered GPIOs, the clearing must be done after + * the HW source is cleared, thus after the handler has run */ +@@ -1275,6 +1372,68 @@ static struct clk * gpio5_fck; + + #if defined(CONFIG_ARCH_OMAP3) + static struct clk *gpio_iclks[OMAP34XX_NR_GPIOS]; ++ ++/* ++ * Following pad init code in addition to the context / restore hooks are ++ * needed to fix glitches in GPIO outputs during off-mode. See OMAP3 ++ * errate section 1.158 ++ */ ++static int __init omap3_gpio_pads_init(void) ++{ ++ int i, j, min, max, gpio_amt; ++ u16 offset; ++ u16 *gpio_pad_map; ++ ++ gpio_amt = 0; ++ ++ gpio_pad_map = kzalloc(sizeof(u16) * OMAP34XX_GPIO_AMT, GFP_KERNEL); ++ if (gpio_pad_map == NULL) { ++ printk(KERN_ERR "FATAL: Failed to allocate gpio_pad_map\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(gpio_pads_config); i++) { ++ min = gpio_pads_config[i].min; ++ max = gpio_pads_config[i].max; ++ offset = gpio_pads_config[i].offset; ++ ++ for (j = min; j <= max; j++) { ++ /* ++ * Check if pad has been configured as GPIO. ++ * First module (gpio 0...31) is ignored as it is ++ * in wakeup domain and does not need special ++ * handling during off mode. ++ */ ++ if (j > 31 && (omap_ctrl_readw(offset) & ++ OMAP34XX_MUX_MODE7) == OMAP34XX_MUX_MODE4) { ++ gpio_pad_map[j] = offset; ++ gpio_amt++; ++ } ++ offset += 2; ++ } ++ } ++ gpio_pads = kmalloc(sizeof(struct gpio_pad) * (gpio_amt + 1), ++ GFP_KERNEL); ++ ++ if (gpio_pads == NULL) { ++ printk(KERN_ERR "FATAL: Failed to allocate gpio_pads\n"); ++ kfree(gpio_pad_map); ++ return -ENOMEM; ++ } ++ ++ gpio_amt = 0; ++ for (i = 0; i < OMAP34XX_GPIO_AMT; i++) { ++ if (gpio_pad_map[i] != 0) { ++ gpio_pads[gpio_amt].gpio = i; ++ gpio_pads[gpio_amt].offset = gpio_pad_map[i]; ++ gpio_amt++; ++ } ++ } ++ gpio_pads[gpio_amt].gpio = -1; ++ kfree(gpio_pad_map); ++ return 0; ++} ++early_initcall(omap3_gpio_pads_init); + #endif + + /* This lock class tells lockdep that GPIO irqs are in a different +@@ -1439,7 +1598,8 @@ static int __init _omap_gpio_init(void) + + /* Initialize interface clock ungated, module enabled */ + __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL); +- if (i < ARRAY_SIZE(non_wakeup_gpios)) ++ if (cpu_is_omap24xx() && ++ i < ARRAY_SIZE(non_wakeup_gpios)) + bank->non_wakeup_gpios = non_wakeup_gpios[i]; + gpio_count = 32; + } +@@ -1606,16 +1766,27 @@ static struct sys_device omap_gpio_devic + + static int workaround_enabled; + +-void omap2_gpio_prepare_for_retention(void) ++void omap2_gpio_prepare_for_idle(int power_state) + { + int i, c = 0; ++ int min = 0; + +- /* Remove triggering for all non-wakeup GPIOs. Otherwise spurious +- * IRQs will be generated. See OMAP2420 Errata item 1.101. */ +- for (i = 0; i < gpio_bank_count; i++) { ++ if (cpu_is_omap34xx()) ++ min = 1; ++ ++ for (i = min; i < gpio_bank_count; i++) { + struct gpio_bank *bank = &gpio_bank[i]; + u32 l1, l2; + ++ if (cpu_is_omap34xx() && bank->dbck_enable_mask) ++ clk_disable(bank->dbck); ++ ++ if (power_state > PWRDM_POWER_OFF) ++ continue; ++ ++ /* If going to OFF, remove triggering for all ++ * non-wakeup GPIOs. Otherwise spurious IRQs will be ++ * generated. See OMAP2420 Errata item 1.101. */ + if (!(bank->enabled_non_wakeup_gpios)) + continue; + #if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) +@@ -1627,10 +1798,12 @@ void omap2_gpio_prepare_for_retention(vo + bank->saved_risingdetect = l2; + l1 &= ~bank->enabled_non_wakeup_gpios; + l2 &= ~bank->enabled_non_wakeup_gpios; +-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) +- __raw_writel(l1, bank->base + OMAP24XX_GPIO_FALLINGDETECT); +- __raw_writel(l2, bank->base + OMAP24XX_GPIO_RISINGDETECT); +-#endif ++ if (cpu_is_omap24xx()) { ++ __raw_writel(l1, bank->base + ++ OMAP24XX_GPIO_FALLINGDETECT); ++ __raw_writel(l2, bank->base + ++ OMAP24XX_GPIO_RISINGDETECT); ++ } + c++; + } + if (!c) { +@@ -1640,15 +1813,22 @@ void omap2_gpio_prepare_for_retention(vo + workaround_enabled = 1; + } + +-void omap2_gpio_resume_after_retention(void) ++void omap2_gpio_resume_after_idle(void) + { + int i; ++ int min = 0; + +- if (!workaround_enabled) +- return; +- for (i = 0; i < gpio_bank_count; i++) { ++ if (cpu_is_omap34xx()) ++ min = 1; ++ for (i = min; i < gpio_bank_count; i++) { + struct gpio_bank *bank = &gpio_bank[i]; +- u32 l; ++ u32 l, gen, gen0, gen1; ++ ++ if (cpu_is_omap34xx() && bank->dbck_enable_mask) ++ clk_enable(bank->dbck); ++ ++ if (!workaround_enabled) ++ continue; + + if (!(bank->enabled_non_wakeup_gpios)) + continue; +@@ -1666,14 +1846,33 @@ void omap2_gpio_resume_after_retention(v + l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN); + #endif + l ^= bank->saved_datain; +- l &= bank->non_wakeup_gpios; +- if (l) { ++ l &= bank->enabled_non_wakeup_gpios; ++ ++ /* ++ * No need to generate IRQs for the rising edge for gpio IRQs ++ * configured with falling edge only; and vice versa. ++ */ ++ gen0 = l & bank->saved_fallingdetect; ++ gen0 &= bank->saved_datain; ++ ++ gen1 = l & bank->saved_risingdetect; ++ gen1 &= ~(bank->saved_datain); ++ ++ /* FIXME: Consider GPIO IRQs with level detections properly! */ ++ gen = l & (~(bank->saved_fallingdetect) & ++ ~(bank->saved_risingdetect)); ++ /* Consider all GPIO IRQs needed to be updated */ ++ gen |= gen0 | gen1; ++ ++ if (gen) { + u32 old0, old1; + #if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) + old0 = __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0); + old1 = __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1); +- __raw_writel(old0 | l, bank->base + OMAP24XX_GPIO_LEVELDETECT0); +- __raw_writel(old1 | l, bank->base + OMAP24XX_GPIO_LEVELDETECT1); ++ __raw_writel(old0 | gen, bank->base + ++ OMAP24XX_GPIO_LEVELDETECT0); ++ __raw_writel(old1 | gen, bank->base + ++ OMAP24XX_GPIO_LEVELDETECT1); + __raw_writel(old0, bank->base + OMAP24XX_GPIO_LEVELDETECT0); + __raw_writel(old1, bank->base + OMAP24XX_GPIO_LEVELDETECT1); + #endif +@@ -1684,6 +1883,145 @@ void omap2_gpio_resume_after_retention(v + + #endif + ++#ifdef CONFIG_ARCH_OMAP34XX ++/* save the registers of bank 2-6 */ ++void omap3_gpio_save_context(void) ++{ ++ int i; ++ struct gpio_bank *bank; ++ int n; ++ u16 offset, conf; ++ u32 out, pin; ++ struct gpio_pad *pad; ++ u32 tmp_oe[OMAP34XX_NR_GPIOS]; ++ /* saving banks from 2-6 only */ ++ for (i = 1; i < gpio_bank_count; i++) { ++ bank = &gpio_bank[i]; ++ gpio_context[i].sysconfig = ++ __raw_readl(bank->base + OMAP24XX_GPIO_SYSCONFIG); ++ gpio_context[i].irqenable1 = ++ __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1); ++ gpio_context[i].irqenable2 = ++ __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2); ++ gpio_context[i].wake_en = ++ __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN); ++ gpio_context[i].ctrl = ++ __raw_readl(bank->base + OMAP24XX_GPIO_CTRL); ++ gpio_context[i].oe = ++ __raw_readl(bank->base + OMAP24XX_GPIO_OE); ++ tmp_oe[i] = gpio_context[i].oe; ++ gpio_context[i].leveldetect0 = ++ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0); ++ gpio_context[i].leveldetect1 = ++ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1); ++ gpio_context[i].risingdetect = ++ __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT); ++ gpio_context[i].fallingdetect = ++ __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT); ++ gpio_context[i].dataout = ++ __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT); ++ } ++ pad = gpio_pads; ++ ++ if (pad == NULL) ++ return; ++ ++ while (pad->gpio >= 0) { ++ /* n = gpio number, 0..191 */ ++ n = pad->gpio; ++ /* i = gpio bank, 0..5 */ ++ i = n >> 5; ++ /* offset of padconf register */ ++ offset = pad->offset; ++ bank = &gpio_bank[i]; ++ /* bit position of gpio in the bank 0..31 */ ++ pin = 1 << (n & 0x1f); ++ ++ /* check if gpio is configured as output => need hack */ ++ if (!(tmp_oe[i] & pin)) { ++ /* save current padconf setting */ ++ pad->save = omap_ctrl_readw(offset); ++ out = gpio_context[i].dataout; ++ if (out & pin) ++ /* High: PU + input */ ++ conf = OMAP34XX_PAD_IN_PU_GPIO; ++ else ++ /* Low: PD + input */ ++ conf = OMAP34XX_PAD_IN_PD_GPIO; ++ /* Set PAD to GPIO + input */ ++ omap_ctrl_writew(conf, offset); ++ /* Set GPIO to input */ ++ tmp_oe[i] |= pin; ++ __raw_writel(tmp_oe[i], ++ bank->base + OMAP24XX_GPIO_OE); ++ /* Set PAD to safe mode */ ++ omap_ctrl_writew(conf | OMAP34XX_PAD_SAFE_MODE, offset); ++ } else ++ pad->save = 0; ++ pad++; ++ } ++} ++EXPORT_SYMBOL(omap3_gpio_save_context); ++ ++/* restore the required registers of bank 2-6 */ ++void omap3_gpio_restore_context(void) ++{ ++ int i; ++ for (i = 1; i < gpio_bank_count; i++) { ++ struct gpio_bank *bank = &gpio_bank[i]; ++ __raw_writel(gpio_context[i].sysconfig, ++ bank->base + OMAP24XX_GPIO_SYSCONFIG); ++ __raw_writel(gpio_context[i].irqenable1, ++ bank->base + OMAP24XX_GPIO_IRQENABLE1); ++ __raw_writel(gpio_context[i].irqenable2, ++ bank->base + OMAP24XX_GPIO_IRQENABLE2); ++ __raw_writel(gpio_context[i].wake_en, ++ bank->base + OMAP24XX_GPIO_WAKE_EN); ++ __raw_writel(gpio_context[i].ctrl, ++ bank->base + OMAP24XX_GPIO_CTRL); ++ __raw_writel(gpio_context[i].leveldetect0, ++ bank->base + OMAP24XX_GPIO_LEVELDETECT0); ++ __raw_writel(gpio_context[i].leveldetect1, ++ bank->base + OMAP24XX_GPIO_LEVELDETECT1); ++ __raw_writel(gpio_context[i].risingdetect, ++ bank->base + OMAP24XX_GPIO_RISINGDETECT); ++ __raw_writel(gpio_context[i].fallingdetect, ++ bank->base + OMAP24XX_GPIO_FALLINGDETECT); ++ __raw_writel(gpio_context[i].dataout, ++ bank->base + OMAP24XX_GPIO_DATAOUT); ++ __raw_writel(gpio_context[i].oe, ++ bank->base + OMAP24XX_GPIO_OE); ++ } ++} ++EXPORT_SYMBOL(omap3_gpio_restore_context); ++ ++void omap3_gpio_restore_pad_context(int restore_oe) ++{ ++ struct gpio_pad *pad; ++ int i; ++ ++ pad = gpio_pads; ++ ++ if (restore_oe) { ++ for (i = 1; i < gpio_bank_count; i++) { ++ struct gpio_bank *bank = &gpio_bank[i]; ++ __raw_writel(gpio_context[i].oe, ++ bank->base + OMAP24XX_GPIO_OE); ++ } ++ } ++ ++ if (pad == NULL) ++ return; ++ ++ while (pad->gpio >= 0) { ++ if (pad->save) ++ omap_ctrl_writew(pad->save, pad->offset); ++ pad++; ++ } ++} ++EXPORT_SYMBOL(omap3_gpio_restore_pad_context); ++#endif ++ + /* + * This may get called early from board specific init + * for boards that have interrupts routed via FPGA. +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/gpio-switch.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/gpio-switch.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/gpio-switch.c 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/gpio-switch.c 2011-09-04 11:31:05.000000000 +0200 +@@ -37,6 +37,8 @@ struct gpio_switch { + u16 debounce_rising; + u16 debounce_falling; + ++ int disabled; ++ + void (* notify)(void *data, int state); + void *notify_data; + +@@ -151,6 +153,8 @@ static ssize_t gpio_sw_state_show(struct + struct gpio_switch *sw = dev_get_drvdata(dev); + const char **str; + ++ if (sw->disabled) ++ sw->state = gpio_sw_get_state(sw); + str = get_sw_str(sw); + return sprintf(buf, "%s\n", str[sw->state]); + } +@@ -182,6 +186,42 @@ static ssize_t gpio_sw_direction_show(st + + static DEVICE_ATTR(direction, S_IRUGO, gpio_sw_direction_show, NULL); + ++static ssize_t gpio_sw_disable_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ struct gpio_switch *sw = dev_get_drvdata(dev); ++ unsigned long res; ++ ++ if (strict_strtoul(buf, 10, &res) < 0) ++ return -EINVAL; ++ ++ if (!!res == sw->disabled) ++ goto out; ++ sw->disabled = !!res; ++ ++ if (res) { ++ disable_irq(gpio_to_irq(sw->gpio)); ++ } else { ++ sw->state = gpio_sw_get_state(sw); ++ enable_irq(gpio_to_irq(sw->gpio)); ++ } ++out: ++ return count; ++} ++ ++static ssize_t gpio_sw_disable_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct gpio_switch *sw = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%u\n", sw->disabled); ++} ++ ++static DEVICE_ATTR(disable, S_IRUGO | S_IWUSR, gpio_sw_disable_show, ++ gpio_sw_disable_store); + + static irqreturn_t gpio_sw_irq_handler(int irq, void *arg) + { +@@ -286,17 +326,24 @@ static int __init new_switch(struct gpio + + /* input: 1, output: 0 */ + direction = !(sw->flags & OMAP_GPIO_SWITCH_FLAG_OUTPUT); +- if (direction) ++ if (direction) { + gpio_direction_input(sw->gpio); +- else +- gpio_direction_output(sw->gpio, true); ++ sw->state = gpio_sw_get_state(sw); ++ } else { ++ int state = sw->state = !!(sw->flags & ++ OMAP_GPIO_SWITCH_FLAG_OUTPUT_INIT_ACTIVE); + +- sw->state = gpio_sw_get_state(sw); ++ if (sw->flags & OMAP_GPIO_SWITCH_FLAG_INVERTED) ++ state = !state; ++ gpio_direction_output(sw->gpio, state); ++ } + + r = 0; + r |= device_create_file(&sw->pdev.dev, &dev_attr_state); + r |= device_create_file(&sw->pdev.dev, &dev_attr_type); + r |= device_create_file(&sw->pdev.dev, &dev_attr_direction); ++ if (direction) ++ r |= device_create_file(&sw->pdev.dev, &dev_attr_disable); + if (r) + printk(KERN_ERR "gpio-switch: attribute file creation " + "failed for %s\n", sw->name); +@@ -454,6 +501,9 @@ static void gpio_sw_cleanup(void) + device_remove_file(&sw->pdev.dev, &dev_attr_type); + device_remove_file(&sw->pdev.dev, &dev_attr_direction); + ++ if (!(sw->flags & OMAP_GPIO_SWITCH_FLAG_OUTPUT)) ++ device_remove_file(&sw->pdev.dev, &dev_attr_disable); ++ + platform_device_unregister(&sw->pdev); + gpio_free(sw->gpio); + old = sw; +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/brddefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/brddefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/brddefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/brddefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,54 @@ ++/* ++ * brddefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== brddefs.h ======== ++ * Description: ++ * Global BRD constants and types, shared between WSX, WCD, and WMD. ++ * ++ *! Revision History: ++ *! ================ ++ *! 31-Jan-2000 rr: Comment Exec changed to Monitor ++ *! 22-Jul-1999 jeh Added BRD_LOADED state. ++ *! 26-Mar-1997 gp: Added BRD_SYNCINIT state. ++ *! 11-Dec-1996 cr: Added BRD_LASTSTATE definition. ++ *! 11-Jul-1996 gp: Added missing u32 callback argument to BRD_CALLBACK. ++ *! 10-Jun-1996 gp: Created from board.h and brd.h. ++ */ ++ ++#ifndef BRDDEFS_ ++#define BRDDEFS_ ++ ++/* platform status values */ ++#define BRD_STOPPED 0x0 /* No Monitor Loaded, Not running. */ ++#define BRD_IDLE 0x1 /* Monitor Loaded, but suspended. */ ++#define BRD_RUNNING 0x2 /* Monitor loaded, and executing. */ ++#define BRD_UNKNOWN 0x3 /* Board state is indeterminate. */ ++#define BRD_SYNCINIT 0x4 ++#define BRD_LOADED 0x5 ++#define BRD_LASTSTATE BRD_LOADED /* Set to highest legal board state. */ ++#define BRD_SLEEP_TRANSITION 0x6 /* Sleep transition in progress */ ++#define BRD_HIBERNATION 0x7 /* MPU initiated hibernation */ ++#define BRD_RETENTION 0x8 /* Retention mode */ ++#define BRD_DSP_HIBERNATION 0x9 /* DSP initiated hibernation */ ++#define BRD_ERROR 0xA /* Board state is Error */ ++ typedef u32 BRD_STATUS; ++ ++/* BRD Object */ ++ struct BRD_OBJECT; ++ ++#endif /* BRDDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cfgdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cfgdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cfgdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cfgdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,116 @@ ++/* ++ * cfgdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== cfgdefs.h ======== ++ * Purpose: ++ * Global CFG constants and types, shared between class and mini driver. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 kc Removed wIOPort* in CFG_HOSTRES. ++ *! 06-Sep-2000 jeh Added channel info to CFG_HOSTRES. ++ *! 09-May-2000 rr: CFG_HOSTRES now support multiple windows for PCI support. ++ *! 31-Jan-2000 rr: Comments changed after code review. ++ *! 06-Jan-2000 rr: Bus Type included in CFG_HOSTRES. ++ *! 12-Nov-1999 rr: CFG_HOSTRES member names changed. ++ *! 25-Oct-1999 rr: Modified the CFG_HOSTRES Structure ++ *! PCMCIA ISR Register/Unregister fxn removed.. ++ *! New flag PCCARD introduced during compile time. ++ *! 10-Sep-1999 ww: Added PCMCIA ISR Register/Unregister fxn. ++ *! 01-Sep-1999 ag: Removed NT/95 specific fields in CFG_HOSTRES ++ *! 27-Oct-1997 cr: Updated CFG_HOSTRES struct to support 1+ IRQs per board. ++ *! 17-Sep-1997 gp: Tacked some NT config info to end of CFG_HOSTRES structure. ++ *! 12-Dec-1996 cr: Cleaned up after code review. ++ *! 14-Nov-1996 gp: Renamed from wsxcfg.h ++ *! 19-Jun-1996 cr: Created. ++ */ ++ ++#ifndef CFGDEFS_ ++#define CFGDEFS_ ++ ++/* Maximum length of module search path. */ ++#define CFG_MAXSEARCHPATHLEN 255 ++ ++/* Maximum length of general paths. */ ++#define CFG_MAXPATH 255 ++ ++/* Host Resources: */ ++#define CFG_MAXMEMREGISTERS 9 ++#define CFG_MAXIOPORTS 20 ++#define CFG_MAXIRQS 7 ++#define CFG_MAXDMACHANNELS 7 ++ ++/* IRQ flag */ ++#define CFG_IRQSHARED 0x01 /* IRQ can be shared */ ++ ++/* DSP Resources: */ ++#define CFG_DSPMAXMEMTYPES 10 ++#define CFG_DEFAULT_NUM_WINDOWS 1 /* We support only one window. */ ++ ++/* A platform-related device handle: */ ++ struct CFG_DEVNODE; ++ ++/* ++ * Host resource structure. ++ */ ++ struct CFG_HOSTRES { ++ u32 wNumMemWindows; /* Set to default */ ++ /* This is the base.memory */ ++ u32 dwMemBase[CFG_MAXMEMREGISTERS]; /* SHM virtual address */ ++ u32 dwMemLength[CFG_MAXMEMREGISTERS]; /* Length of the Base */ ++ u32 dwMemPhys[CFG_MAXMEMREGISTERS]; /* SHM Physical address */ ++ u8 bIRQRegisters; /* IRQ Number */ ++ u8 bIRQAttrib; /* IRQ Attribute */ ++ u32 dwOffsetForMonitor; /* The Shared memory starts from ++ * dwMemBase + this offset */ ++ /* ++ * Info needed by NODE for allocating channels to communicate with RMS: ++ * dwChnlOffset: Offset of RMS channels. Lower channels are ++ * reserved. ++ * dwChnlBufSize: Size of channel buffer to send to RMS ++ * dwNumChnls: Total number of channels (including reserved). ++ */ ++ u32 dwChnlOffset; ++ u32 dwChnlBufSize; ++ u32 dwNumChnls; ++ void __iomem *dwPrmBase; ++ void __iomem *dwCmBase; ++ void __iomem *dwPerBase; ++ u32 dwPerPmBase; ++ u32 dwCorePmBase; ++ void __iomem *dwWdTimerDspBase; ++ void __iomem *dwMboxBase; ++ void __iomem *dwDmmuBase; ++ void __iomem *dwSysCtrlBase; ++ } ; ++ ++ struct CFG_DSPMEMDESC { ++ u32 uMemType; /* Type of memory. */ ++ u32 ulMin; /* Minimum amount of memory of this type. */ ++ u32 ulMax; /* Maximum amount of memory of this type. */ ++ } ; ++ ++ struct CFG_DSPRES { ++ u32 uChipType; /* DSP chip type. */ ++ u32 uWordSize; /* Number of bytes in a word */ ++ u32 cChips; /* Number of chips. */ ++ u32 cMemTypes; /* Types of memory. */ ++ struct CFG_DSPMEMDESC aMemDesc[CFG_DSPMAXMEMTYPES]; ++ /* DSP Memory types */ ++ } ; ++ ++#endif /* CFGDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cfg.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cfg.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cfg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cfg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,339 @@ ++/* ++ * cfg.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== cfg.h ======== ++ * Purpose: ++ * PM Configuration module. ++ * ++ * Private Functions: ++ * CFG_Exit ++ * CFG_GetAutoStart ++ * CFG_GetCDVersion ++ * CFG_GetDevObject ++ * CFG_GetDSPResources ++ * CFG_GetExecFile ++ * CFG_GetHostResources ++ * CFG_GetObject ++ * CFG_GetPerfValue ++ * CFG_GetWMDFileName ++ * CFG_GetZLFile ++ * CFG_Init ++ * CFG_SetDevObject ++ * CFG_SetObject ++ * ++ *! Revision History: ++ *! ================= ++ *! 26-Feb-2003 kc Removed unused CFG fxns. ++ *! 28-Aug-2001 jeh Added CFG_GetLoaderName. ++ *! 26-Jul-2000 rr: Added CFG_GetDCDName to retrieve the DCD Dll name. ++ *! 13-Jul-2000 rr: Added CFG_GetObject & CFG_SetObject. ++ *! 13-Jan-2000 rr: CFG_Get/SetPrivateDword renamed to CFG_Get/SetDevObject. ++ *! CFG_GetWinBRIDGEDir/Directory,CFG_GetSearchPath removed. ++ *! 15-Jan-1998 cr: Code review cleanup. ++ *! 16-Aug-1997 cr: Added explicit cdecl identifiers. ++ *! 12-Dec-1996 gp: Moved CFG_FindInSearchPath to CSP module. ++ *! 13-Sep-1996 gp: Added CFG_GetBoardName(). ++ *! 22-Jul-1996 gp: Added CFG_GetTraceStr, to retrieve an initial GT trace. ++ *! 26-Jun-1996 cr: Added CFG_FindInSearchPath. ++ *! 25-Jun-1996 cr: Added CFG_GetWinSPOXDir. ++ *! 17-Jun-1996 cr: Added CFG_GetDevNode. ++ *! 11-Jun-1996 cr: Cleaned up for code review. ++ *! 07-Jun-1996 cr: Added CFG_GetExecFile and CFG_GetZLFileName functions. ++ *! 04-Jun-1996 gp: Added AutoStart regkey and accessor function. Placed ++ *! OUT parameters in accessor function param. lists at end. ++ *! 29-May-1996 gp: Moved DEV_HDEVNODE to here and renamed CFG_HDEVNODE. ++ *! 22-May-1996 cr: Added GetHostResources, GetDSPResources, and ++ *! GetWMDFileName services. ++ *! 18-May-1996 gp: Created. ++ */ ++ ++#ifndef CFG_ ++#define CFG_ ++#include ++#include ++ ++/* ++ * ======== CFG_Exit ======== ++ * Purpose: ++ * Discontinue usage of the CFG module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * CFG_Init(void) was previously called. ++ * Ensures: ++ * Resources acquired in CFG_Init(void) are freed. ++ */ ++ extern void CFG_Exit(void); ++ ++/* ++ * ======== CFG_GetAutoStart ======== ++ * Purpose: ++ * Retreive the autostart mask, if any, for this board. ++ * Parameters: ++ * hDevNode: Handle to the DevNode who's WMD we are querying. ++ * pdwAutoStart: Ptr to location for 32 bit autostart mask. ++ * Returns: ++ * DSP_SOK: Success. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * CFG_E_RESOURCENOTAVAIL: Unable to retreive resource. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: *pdwAutoStart contains autostart mask for this devnode. ++ */ ++ extern DSP_STATUS CFG_GetAutoStart(IN struct CFG_DEVNODE *hDevNode, ++ OUT u32 *pdwAutoStart); ++ ++/* ++ * ======== CFG_GetCDVersion ======== ++ * Purpose: ++ * Retrieves the version of the PM Class Driver. ++ * Parameters: ++ * pdwVersion: Ptr to u32 to contain version number upon return. ++ * Returns: ++ * DSP_SOK: Success. pdwVersion contains Class Driver version in ++ * the form: 0xAABBCCDD where AABB is Major version and ++ * CCDD is Minor. ++ * DSP_EFAIL: Failure. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: Success. ++ * else: *pdwVersion is NULL. ++ */ ++ extern DSP_STATUS CFG_GetCDVersion(OUT u32 *pdwVersion); ++ ++/* ++ * ======== CFG_GetDevObject ======== ++ * Purpose: ++ * Retrieve the Device Object handle for a given devnode. ++ * Parameters: ++ * hDevNode: Platform's DevNode handle from which to retrieve value. ++ * pdwValue: Ptr to location to store the value. ++ * Returns: ++ * DSP_SOK: Success. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * CFG_E_INVALIDPOINTER: phDevObject is invalid. ++ * CFG_E_RESOURCENOTAVAIL: The resource is not available. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: *pdwValue is set to the retrieved u32. ++ * else: *pdwValue is set to 0L. ++ */ ++ extern DSP_STATUS CFG_GetDevObject(IN struct CFG_DEVNODE *hDevNode, ++ OUT u32 *pdwValue); ++ ++/* ++ * ======== CFG_GetDSPResources ======== ++ * Purpose: ++ * Get the DSP resources available to a given device. ++ * Parameters: ++ * hDevNode: Handle to the DEVNODE who's resources we are querying. ++ * pDSPResTable: Ptr to a location to store the DSP resource table. ++ * Returns: ++ * DSP_SOK: On success. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * CFG_E_RESOURCENOTAVAIL: The DSP Resource information is not ++ * available ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: pDSPResTable points to a filled table of resources allocated ++ * for the specified WMD. ++ */ ++ extern DSP_STATUS CFG_GetDSPResources(IN struct CFG_DEVNODE *hDevNode, ++ OUT struct CFG_DSPRES *pDSPResTable); ++ ++ ++/* ++ * ======== CFG_GetExecFile ======== ++ * Purpose: ++ * Retreive the default executable, if any, for this board. ++ * Parameters: ++ * hDevNode: Handle to the DevNode who's WMD we are querying. ++ * cBufSize: Size of buffer. ++ * pstrExecFile: Ptr to character buf to hold ExecFile. ++ * Returns: ++ * DSP_SOK: Success. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * CFG_E_INVALIDPOINTER: pstrExecFile is invalid. ++ * CFG_E_RESOURCENOTAVAIL: The resource is not available. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: Not more than cBufSize bytes were copied into pstrExecFile, ++ * and *pstrExecFile contains default executable for this ++ * devnode. ++ */ ++ extern DSP_STATUS CFG_GetExecFile(IN struct CFG_DEVNODE *hDevNode, ++ IN u32 cBufSize, ++ OUT char *pstrExecFile); ++ ++/* ++ * ======== CFG_GetHostResources ======== ++ * Purpose: ++ * Get the Host PC allocated resources assigned to a given device. ++ * Parameters: ++ * hDevNode: Handle to the DEVNODE who's resources we are querying. ++ * pHostResTable: Ptr to a location to store the host resource table. ++ * Returns: ++ * DSP_SOK: On success. ++ * CFG_E_INVALIDPOINTER: pHostResTable is invalid. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * CFG_E_RESOURCENOTAVAIL: The resource is not available. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: pHostResTable points to a filled table of resources ++ * allocated for the specified WMD. ++ * ++ */ ++ extern DSP_STATUS CFG_GetHostResources(IN struct CFG_DEVNODE *hDevNode, ++ OUT struct CFG_HOSTRES *pHostResTable); ++ ++/* ++ * ======== CFG_GetObject ======== ++ * Purpose: ++ * Retrieve the Driver Object handle From the Registry ++ * Parameters: ++ * pdwValue: Ptr to location to store the value. ++ * dwType Type of Object to Get ++ * Returns: ++ * DSP_SOK: Success. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: *pdwValue is set to the retrieved u32(non-Zero). ++ * else: *pdwValue is set to 0L. ++ */ ++ extern DSP_STATUS CFG_GetObject(OUT u32 *pdwValue, u32 dwType); ++ ++/* ++ * ======== CFG_GetPerfValue ======== ++ * Purpose: ++ * Retrieve a flag indicating whether PERF should log statistics for the ++ * PM class driver. ++ * Parameters: ++ * pfEnablePerf: Location to store flag. 0 indicates the key was ++ * not found, or had a zero value. A nonzero value ++ * means the key was found and had a nonzero value. ++ * Returns: ++ * Requires: ++ * pfEnablePerf != NULL; ++ * Ensures: ++ */ ++ extern void CFG_GetPerfValue(OUT bool *pfEnablePerf); ++ ++/* ++ * ======== CFG_GetWMDFileName ======== ++ * Purpose: ++ * Get the mini-driver file name for a given device. ++ * Parameters: ++ * hDevNode: Handle to the DevNode who's WMD we are querying. ++ * cBufSize: Size of buffer. ++ * pWMDFileName: Ptr to a character buffer to hold the WMD filename. ++ * Returns: ++ * DSP_SOK: On success. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * CFG_E_RESOURCENOTAVAIL: The filename is not available. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: Not more than cBufSize bytes were copied ++ * into pWMDFileName. ++ * ++ */ ++ extern DSP_STATUS CFG_GetWMDFileName(IN struct CFG_DEVNODE *hDevNode, ++ IN u32 cBufSize, ++ OUT char *pWMDFileName); ++ ++/* ++ * ======== CFG_GetZLFile ======== ++ * Purpose: ++ * Retreive the ZLFile, if any, for this board. ++ * Parameters: ++ * hDevNode: Handle to the DevNode who's WMD we are querying. ++ * cBufSize: Size of buffer. ++ * pstrZLFileName: Ptr to character buf to hold ZLFileName. ++ * Returns: ++ * DSP_SOK: Success. ++ * CFG_E_INVALIDPOINTER: pstrZLFileName is invalid. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * CFG_E_RESOURCENOTAVAIL: couldn't find the ZLFileName. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: Not more than cBufSize bytes were copied into ++ * pstrZLFileName, and *pstrZLFileName contains ZLFileName ++ * for this devnode. ++ */ ++ extern DSP_STATUS CFG_GetZLFile(IN struct CFG_DEVNODE *hDevNode, ++ IN u32 cBufSize, ++ OUT char *pstrZLFileName); ++ ++/* ++ * ======== CFG_Init ======== ++ * Purpose: ++ * Initialize the CFG module's private state. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * A requirement for each of the other public CFG functions. ++ */ ++ extern bool CFG_Init(void); ++ ++/* ++ * ======== CFG_SetDevObject ======== ++ * Purpose: ++ * Store the Device Object handle for a given devnode. ++ * Parameters: ++ * hDevNode: Platform's DevNode handle we are storing value with. ++ * dwValue: Arbitrary value to store. ++ * Returns: ++ * DSP_SOK: Success. ++ * CFG_E_INVALIDHDEVNODE: hDevNode is invalid. ++ * DSP_EFAIL: Internal Error. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: The Private u32 was successfully set. ++ */ ++ extern DSP_STATUS CFG_SetDevObject(IN struct CFG_DEVNODE *hDevNode, ++ IN u32 dwValue); ++ ++/* ++ * ======== CFG_SetDrvObject ======== ++ * Purpose: ++ * Store the Driver Object handle. ++ * Parameters: ++ * dwValue: Arbitrary value to store. ++ * dwType Type of Object to Store ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Internal Error. ++ * Requires: ++ * CFG initialized. ++ * Ensures: ++ * DSP_SOK: The Private u32 was successfully set. ++ */ ++ extern DSP_STATUS CFG_SetObject(IN u32 dwValue, IN u32 dwType); ++ ++#endif /* CFG_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/clk.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/clk.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/clk.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/clk.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,155 @@ ++/* ++ * clk.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== clk.h ======== ++ * Purpose: Provides Clock functions. ++ * ++ *! Revision History: ++ *! ================ ++ *! 08-May-2007 rg: Moved all clock functions from sync module. ++ */ ++ ++#ifndef _CLK_H ++#define _CLK_H ++ ++ /* Generic TIMER object: */ ++ struct TIMER_OBJECT; ++ enum SERVICES_ClkId { ++ SERVICESCLK_iva2_ck = 0, ++ SERVICESCLK_mailbox_ick, ++ SERVICESCLK_gpt5_fck, ++ SERVICESCLK_gpt5_ick, ++ SERVICESCLK_gpt6_fck, ++ SERVICESCLK_gpt6_ick, ++ SERVICESCLK_gpt7_fck, ++ SERVICESCLK_gpt7_ick, ++ SERVICESCLK_gpt8_fck, ++ SERVICESCLK_gpt8_ick, ++ SERVICESCLK_wdt3_fck, ++ SERVICESCLK_wdt3_ick, ++ SERVICESCLK_mcbsp1_fck, ++ SERVICESCLK_mcbsp1_ick, ++ SERVICESCLK_mcbsp2_fck, ++ SERVICESCLK_mcbsp2_ick, ++ SERVICESCLK_mcbsp3_fck, ++ SERVICESCLK_mcbsp3_ick, ++ SERVICESCLK_mcbsp4_fck, ++ SERVICESCLK_mcbsp4_ick, ++ SERVICESCLK_mcbsp5_fck, ++ SERVICESCLK_mcbsp5_ick, ++ SERVICESCLK_ssi_fck, ++ SERVICESCLK_ssi_ick, ++ SERVICESCLK_sys_32k_ck, ++ SERVICESCLK_sys_ck, ++ SERVICESCLK_NOT_DEFINED ++ } ; ++ ++/* ++ * ======== CLK_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * CLK initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void CLK_Exit(void); ++ ++/* ++ * ======== CLK_Init ======== ++ * Purpose: ++ * Initializes private state of CLK module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * CLK initialized. ++ */ ++ extern bool CLK_Init(void); ++ ++ ++/* ++ * ======== CLK_Enable ======== ++ * Purpose: ++ * Enables the clock requested. ++ * Parameters: ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Error occured while enabling the clock. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CLK_Enable(IN enum SERVICES_ClkId clk_id); ++ ++/* ++ * ======== CLK_Disable ======== ++ * Purpose: ++ * Disables the clock requested. ++ * Parameters: ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Error occured while disabling the clock. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CLK_Disable(IN enum SERVICES_ClkId clk_id); ++ ++/* ++ * ======== CLK_GetRate ======== ++ * Purpose: ++ * Get the clock rate of requested clock. ++ * Parameters: ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Error occured while Getting the clock rate. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CLK_GetRate(IN enum SERVICES_ClkId clk_id, ++ u32 *speedMhz); ++/* ++ * ======== CLK_Set_32KHz ======== ++ * Purpose: ++ * Set the requested clock to 32KHz. ++ * Parameters: ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Error occured while setting the clock parent to 32KHz. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CLK_Set_32KHz(IN enum SERVICES_ClkId clk_id); ++ extern void SSI_Clk_Prepare(bool FLAG); ++ ++/* ++ * ======== CLK_Get_RefCnt ======== ++ * Purpose: ++ * get the reference count for the clock. ++ * Parameters: ++ * Returns: ++ * s32: Reference Count for the clock. ++ * DSP_EFAIL: Error occured while getting the reference count of a clock. ++ * Requires: ++ * Ensures: ++ */ ++ extern s32 CLK_Get_UseCnt(IN enum SERVICES_ClkId clk_id); ++ ++#endif /* _SYNC_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cmmdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cmmdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cmmdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cmmdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,135 @@ ++/* ++ * cmmdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== cmmdefs.h ======== ++ * Purpose: ++ * Global MEM constants and types. ++ * ++ *! Revision History: ++ *! ================ ++ *! 12-Nov-2001 ag CMM_KERNMAPTYPE added for dsp<->device process addr map'n. ++ *! This allows addr conversion from drvr process <-> DSP addr. ++ *! 29-Aug-2001 ag Added CMM_ALLSEGMENTS. ++ *! 08-Dec-2000 ag Added bus address conversion type CMM_POMAPEMIF2DSPBUS. ++ *! 05-Dec-2000 ag Added default CMM_DEFLTCONVFACTOR & CMM_DEFLTDSPADDROFFSET. ++ *! 29-Oct-2000 ag Added converstion factor for GPP DSP Pa translation. ++ *! 15-Oct-2000 ag Added address translator attributes and defaults. ++ *! 12-Jul-2000 ag Created. ++ */ ++ ++#ifndef CMMDEFS_ ++#define CMMDEFS_ ++ ++#include ++ ++/* Cmm attributes used in CMM_Create() */ ++ struct CMM_MGRATTRS { ++ /* Minimum SM allocation; default 32 bytes. */ ++ u32 ulMinBlockSize; ++ } ; ++ ++/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */ ++ struct CMM_ATTRS { ++ u32 ulSegId; /* 1,2... are SM segments. 0 is not. */ ++ u32 ulAlignment; /* 0,1,2,4....ulMinBlockSize */ ++ } ; ++ ++/* ++ * DSPPa to GPPPa Conversion Factor. ++ * ++ * For typical platforms: ++ * converted Address = PaDSP + ( cFactor * addressToConvert). ++ */ ++ enum CMM_CNVTTYPE { ++ CMM_SUBFROMDSPPA = -1, ++ /* PreOMAP is special case: not simple offset */ ++ CMM_POMAPEMIF2DSPBUS = 0, ++ CMM_ADDTODSPPA = 1 ++ } ; ++ ++#define CMM_DEFLTDSPADDROFFSET 0 ++#define CMM_DEFLTCONVFACTOR CMM_POMAPEMIF2DSPBUS /* PreOMAP DSPBUS<->EMIF */ ++#define CMM_ALLSEGMENTS 0xFFFFFF /* All SegIds */ ++#define CMM_MAXGPPSEGS 1 /* Maximum # of SM segs */ ++ ++/* ++ * SMSEGs are SM segments the DSP allocates from. ++ * ++ * This info is used by the GPP to xlate DSP allocated PAs. ++ */ ++ ++ struct CMM_SEGINFO { ++ u32 dwSegBasePa; /* Start Phys address of SM segment */ ++ /* Total size in bytes of segment: DSP+GPP */ ++ u32 ulTotalSegSize; ++ u32 dwGPPBasePA; /* Start Phys addr of Gpp SM seg */ ++ u32 ulGPPSize; /* Size of Gpp SM seg in bytes */ ++ u32 dwDSPBaseVA; /* DSP virt base byte address */ ++ u32 ulDSPSize; /* DSP seg size in bytes */ ++ /* # of current GPP allocations from this segment */ ++ u32 ulInUseCnt; ++ u32 dwSegBaseVa; /* Start Virt address of SM seg */ ++ ++ } ; ++ ++/* CMM useful information */ ++ struct CMM_INFO { ++ /* # of SM segments registered with this Cmm. */ ++ u32 ulNumGPPSMSegs; ++ /* Total # of allocations outstanding for CMM */ ++ u32 ulTotalInUseCnt; ++ /* Min SM block size allocation from CMM_Create() */ ++ u32 ulMinBlockSize; ++ /* Info per registered SM segment. */ ++ struct CMM_SEGINFO segInfo[CMM_MAXGPPSEGS]; ++ } ; ++ ++/* XlatorCreate attributes */ ++ struct CMM_XLATORATTRS { ++ u32 ulSegId; /* segment Id used for SM allocations */ ++ u32 dwDSPBufs; /* # of DSP-side bufs */ ++ u32 dwDSPBufSize; /* size of DSP-side bufs in GPP bytes */ ++ /* Vm base address alloc'd in client process context */ ++ void *pVmBase; ++ /* dwVmSize must be >= (dwMaxNumBufs * dwMaxSize) */ ++ u32 dwVmSize; ++ } ; ++ ++/* ++ * Cmm translation types. Use to map SM addresses to process context. ++ */ ++ enum CMM_XLATETYPE { ++ CMM_VA2PA = 0, /* Virtual to GPP physical address xlation */ ++ CMM_PA2VA = 1, /* GPP Physical to virtual */ ++ CMM_VA2DSPPA = 2, /* Va to DSP Pa */ ++ CMM_PA2DSPPA = 3, /* GPP Pa to DSP Pa */ ++ CMM_DSPPA2PA = 4, /* DSP Pa to GPP Pa */ ++ } ; ++ ++/* ++ * Used to "map" between device process virt addr and dsp addr. ++ */ ++ enum CMM_KERNMAPTYPE { ++ CMM_KERNVA2DSP = 0, /* Device process context to dsp address. */ ++ CMM_DSP2KERNVA = 1, /* Dsp address to device process context. */ ++ } ; ++ ++ struct CMM_OBJECT; ++ struct CMM_XLATOROBJECT; ++ ++#endif /* CMMDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cmm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cmm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cmm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cmm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,420 @@ ++/* ++ * cmm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== cmm.h ======== ++ * Purpose: ++ * The Communication Memory Management(CMM) module provides shared memory ++ * management services for DSP/BIOS Bridge data streaming and messaging. ++ * Multiple shared memory segments can be registered with CMM. Memory is ++ * coelesced back to the appropriate pool when a buffer is freed. ++ * ++ * The CMM_Xlator[xxx] functions are used for node messaging and data ++ * streaming address translation to perform zero-copy inter-processor ++ * data transfer(GPP<->DSP). A "translator" object is created for a node or ++ * stream object that contains per thread virtual address information. This ++ * translator info is used at runtime to perform SM address translation ++ * to/from the DSP address space. ++ * ++ * ++ * Public Functions: ++ * CMM_CallocBuf ++ * CMM_Create ++ * CMM_Destroy ++ * CMM_Exit ++ * CMM_FreeBuf ++ * CMM_GetHandle ++ * CMM_GetInfo ++ * CMM_Init ++ * CMM_RegisterGPPSMSeg ++ * CMM_UnRegisterGPPSMSeg ++ * CMM_XlatorAllocBuf (Note #1 below) ++ * CMM_XlatorCreate " ++ * CMM_XlatorDelete " ++ * CMM_XlatorFreeBuf " ++ * CMM_XlatorTranslate " ++ * ++ * ++ * Notes: ++ * #1: Used by Node and Stream modules for SM address translation. ++ * ++ *! Revision History: ++ *! ================ ++ *! 30-Jan-2002 ag Removed unused CMM_Alloc[Free]Desc & CMM_XlatorRegisterPa. ++ *! Renamed CMM_AllocBuf() to CMM_CallocBuf(). ++ *! 29-Aug-2001 ag: Added dsp virt base and size to CMM_RegisterGPPSMSeg(). ++ *! 12-Aug-2001 ag: Added CMM_UnRegisterGPP[DSP}SMSeg[s](). ++ *! 05-Dec-2000 ag: Added param to CMM_XlatorDelete() to force buf cleanup. ++ *! 30-Oct-2000 ag: Added conversion factor to CMM_RegisterDSP[GPP]SMSeg(). ++ *! 12-Oct-2000 ag: Added CMM_Xlator[xxx] functions. ++ *! 10-Aug-2000 ag: Created. ++ *! ++ */ ++ ++#ifndef CMM_ ++#define CMM_ ++ ++#include ++ ++#include ++#include ++ ++/* ++ * ======== CMM_CallocBuf ======== ++ * Purpose: ++ * Allocate memory buffers that can be used for data streaming or ++ * messaging. ++ * Parameters: ++ * hCmmMgr: Cmm Mgr handle. ++ * uSize: Number of bytes to allocate. ++ * pAttr: Attributes of memory to allocate. ++ * ppBufVA: Address of where to place VA. ++ * Returns: ++ * Pointer to a zero'd block of SM memory; ++ * NULL if memory couldn't be allocated, ++ * or if cBytes == 0, ++ * Requires: ++ * Valid hCmmMgr. ++ * CMM initialized. ++ * Ensures: ++ * The returned pointer, if not NULL, points to a valid memory block of ++ * the size requested. ++ * ++ */ ++ extern void *CMM_CallocBuf(struct CMM_OBJECT *hCmmMgr, ++ u32 uSize, struct CMM_ATTRS *pAttrs, ++ OUT void **ppBufVA); ++ ++/* ++ * ======== CMM_Create ======== ++ * Purpose: ++ * Create a communication memory manager object. ++ * Parameters: ++ * phCmmMgr: Location to store a communication manager handle on output. ++ * hDevObject: Handle to a device object. ++ * pMgrAttrs: Comm mem manager attributes. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * DSP_EFAIL: Failed to initialize critical sect sync object. ++ * ++ * Requires: ++ * CMM_Init(void) called. ++ * phCmmMgr != NULL. ++ * pMgrAttrs->ulMinBlockSize >= 4 bytes. ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_Create(OUT struct CMM_OBJECT **phCmmMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CMM_MGRATTRS *pMgrAttrs); ++ ++/* ++ * ======== CMM_Destroy ======== ++ * Purpose: ++ * Destroy the communication memory manager object. ++ * Parameters: ++ * hCmmMgr: Cmm Mgr handle. ++ * bForce: Force deallocation of all cmm memory immediately if set TRUE. ++ * If FALSE, and outstanding allocations will return DSP_EFAIL ++ * status. ++ * Returns: ++ * DSP_SOK: CMM object & resources deleted. ++ * DSP_EFAIL: Unable to free CMM object due to outstanding allocation. ++ * DSP_EHANDLE: Unable to free CMM due to bad handle. ++ * Requires: ++ * CMM is initialized. ++ * hCmmMgr != NULL. ++ * Ensures: ++ * Memory resources used by Cmm Mgr are freed. ++ */ ++ extern DSP_STATUS CMM_Destroy(struct CMM_OBJECT *hCmmMgr, bool bForce); ++ ++/* ++ * ======== CMM_Exit ======== ++ * Purpose: ++ * Discontinue usage of module. Cleanup CMM module if CMM cRef reaches zero. ++ * Parameters: ++ * n/a ++ * Returns: ++ * n/a ++ * Requires: ++ * CMM is initialized. ++ * Ensures: ++ */ ++ extern void CMM_Exit(void); ++ ++/* ++ * ======== CMM_FreeBuf ======== ++ * Purpose: ++ * Free the given buffer. ++ * Parameters: ++ * hCmmMgr: Cmm Mgr handle. ++ * pBuf: Pointer to memory allocated by CMM_CallocBuf(). ++ * ulSegId: SM segment Id used in CMM_Calloc() attrs. ++ * Set to 0 to use default segment. ++ * Returns: ++ * DSP_SOK ++ * DSP_EFAIL ++ * Requires: ++ * CMM initialized. ++ * pBufPA != NULL ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_FreeBuf(struct CMM_OBJECT *hCmmMgr, ++ void *pBufPA, u32 ulSegId); ++ ++/* ++ * ======== CMM_GetHandle ======== ++ * Purpose: ++ * Return the handle to the cmm mgr for the given device obj. ++ * Parameters: ++ * hProcessor: Handle to a Processor. ++ * phCmmMgr: Location to store the shared memory mgr handle on output. ++ * ++ * Returns: ++ * DSP_SOK: Cmm Mgr opaque handle returned. ++ * DSP_EHANDLE: Invalid handle. ++ * Requires: ++ * phCmmMgr != NULL ++ * hDevObject != NULL ++ * Ensures: ++ */ ++ extern DSP_STATUS CMM_GetHandle(DSP_HPROCESSOR hProcessor, ++ OUT struct CMM_OBJECT **phCmmMgr); ++ ++/* ++ * ======== CMM_GetInfo ======== ++ * Purpose: ++ * Return the current SM and VM utilization information. ++ * Parameters: ++ * hCmmMgr: Handle to a Cmm Mgr. ++ * pCmmInfo: Location to store the Cmm information on output. ++ * ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid handle. ++ * DSP_EINVALIDARG Invalid input argument. ++ * Requires: ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_GetInfo(struct CMM_OBJECT *hCmmMgr, ++ OUT struct CMM_INFO *pCmmInfo); ++ ++/* ++ * ======== CMM_Init ======== ++ * Purpose: ++ * Initializes private state of CMM module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * CMM initialized. ++ */ ++ extern bool CMM_Init(void); ++ ++/* ++ * ======== CMM_RegisterGPPSMSeg ======== ++ * Purpose: ++ * Register a block of SM with the CMM. ++ * Parameters: ++ * hCmmMgr: Handle to a Cmm Mgr. ++ * lpGPPBasePA: GPP Base Physical address. ++ * ulSize: Size in GPP bytes. ++ * dwDSPAddrOffset GPP PA to DSP PA Offset. ++ * cFactor: Add offset if CMM_ADDTODSPPA, sub if CMM_SUBFROMDSPPA. ++ * dwDSPBase: DSP virtual base byte address. ++ * ulDSPSize: Size of DSP segment in bytes. ++ * pulSegId: Address to store segment Id. ++ * ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hCmmMgr handle. ++ * DSP_EINVALIDARG: Invalid input argument. ++ * DSP_EFAIL: Unable to register. ++ * - On success *pulSegId is a valid SM segment ID. ++ * Requires: ++ * ulSize > 0 ++ * pulSegId != NULL ++ * dwGPPBasePA != 0 ++ * cFactor = CMM_ADDTODSPPA || cFactor = CMM_SUBFROMDSPPA ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_RegisterGPPSMSeg(struct CMM_OBJECT *hCmmMgr, ++ unsigned int dwGPPBasePA, ++ u32 ulSize, ++ u32 dwDSPAddrOffset, ++ enum CMM_CNVTTYPE cFactor, ++ unsigned int dwDSPBase, ++ u32 ulDSPSize, ++ u32 *pulSegId, ++ u32 dwGPPBaseBA); ++ ++/* ++ * ======== CMM_UnRegisterGPPSMSeg ======== ++ * Purpose: ++ * Unregister the given memory segment that was previously registered ++ * by CMM_RegisterGPPSMSeg. ++ * Parameters: ++ * hCmmMgr: Handle to a Cmm Mgr. ++ * ulSegId Segment identifier returned by CMM_RegisterGPPSMSeg. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid handle. ++ * DSP_EINVALIDARG: Invalid ulSegId. ++ * DSP_EFAIL: Unable to unregister for unknown reason. ++ * Requires: ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_UnRegisterGPPSMSeg(struct CMM_OBJECT *hCmmMgr, ++ u32 ulSegId); ++ ++/* ++ * ======== CMM_XlatorAllocBuf ======== ++ * Purpose: ++ * Allocate the specified SM buffer and create a local memory descriptor. ++ * Place on the descriptor on the translator's HaQ (Host Alloc'd Queue). ++ * Parameters: ++ * hXlator: Handle to a Xlator object. ++ * pVaBuf: Virtual address ptr(client context) ++ * uPaSize: Size of SM memory to allocate. ++ * Returns: ++ * Ptr to valid physical address(Pa) of uPaSize bytes, NULL if failed. ++ * Requires: ++ * pVaBuf != 0. ++ * uPaSize != 0. ++ * Ensures: ++ * ++ */ ++ extern void *CMM_XlatorAllocBuf(struct CMM_XLATOROBJECT *hXlator, ++ void *pVaBuf, u32 uPaSize); ++ ++/* ++ * ======== CMM_XlatorCreate ======== ++ * Purpose: ++ * Create a translator(xlator) object used for process specific Va<->Pa ++ * address translation. Node messaging and streams use this to perform ++ * inter-processor(GPP<->DSP) zero-copy data transfer. ++ * Parameters: ++ * phXlator: Address to place handle to a new Xlator handle. ++ * hCmmMgr: Handle to Cmm Mgr associated with this translator. ++ * pXlatorAttrs: Translator attributes used for the client NODE or STREAM. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EINVALIDARG: Bad input Attrs. ++ * DSP_EMEMORY: Insufficient memory(local) for requested resources. ++ * Requires: ++ * phXlator != NULL ++ * hCmmMgr != NULL ++ * pXlatorAttrs != NULL ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_XlatorCreate(OUT struct CMM_XLATOROBJECT **phXlator, ++ struct CMM_OBJECT *hCmmMgr, ++ struct CMM_XLATORATTRS *pXlatorAttrs); ++ ++/* ++ * ======== CMM_XlatorDelete ======== ++ * Purpose: ++ * Delete translator resources ++ * Parameters: ++ * hXlator: handle to translator. ++ * bForce: bForce = TRUE will free XLators SM buffers/dscriptrs. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Bad translator handle. ++ * DSP_EFAIL: Unable to free translator resources. ++ * Requires: ++ * cRefs > 0 ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_XlatorDelete(struct CMM_XLATOROBJECT *hXlator, ++ bool bForce); ++ ++/* ++ * ======== CMM_XlatorFreeBuf ======== ++ * Purpose: ++ * Free SM buffer and descriptor. ++ * Does not free client process VM. ++ * Parameters: ++ * hXlator: handle to translator. ++ * pBufVa Virtual address of PA to free. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Bad translator handle. ++ * Requires: ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_XlatorFreeBuf(struct CMM_XLATOROBJECT *hXlator, ++ void *pBufVa); ++ ++/* ++ * ======== CMM_XlatorInfo ======== ++ * Purpose: ++ * Set/Get process specific "translator" address info. ++ * This is used to perform fast virtaul address translation ++ * for shared memory buffers between the GPP and DSP. ++ * Parameters: ++ * hXlator: handle to translator. ++ * pAddr: Virtual base address of segment. ++ * ulSize: Size in bytes. ++ * uSegId: Segment identifier of SM segment(s) ++ * bSetInfo Set xlator fields if TRUE, else return base addr ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Bad translator handle. ++ * Requires: ++ * (cRefs > 0) ++ * (pAddr != NULL) ++ * (ulSize > 0) ++ * Ensures: ++ * ++ */ ++ extern DSP_STATUS CMM_XlatorInfo(struct CMM_XLATOROBJECT *hXlator, ++ IN OUT u8 **pAddr, ++ u32 ulSize, u32 uSegId, ++ bool bSetInfo); ++ ++/* ++ * ======== CMM_XlatorTranslate ======== ++ * Purpose: ++ * Perform address translation VA<->PA for the specified stream or ++ * message shared memory buffer. ++ * Parameters: ++ * hXlator: handle to translator. ++ * pAddr address of buffer to translate. ++ * xType Type of address xlation. CMM_PA2VA or CMM_VA2PA. ++ * Returns: ++ * Valid address on success, else NULL. ++ * Requires: ++ * cRefs > 0 ++ * pAddr != NULL ++ * xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA) ++ * Ensures: ++ * ++ */ ++ extern void *CMM_XlatorTranslate(struct CMM_XLATOROBJECT *hXlator, ++ void *pAddr, enum CMM_XLATETYPE xType); ++ ++#endif /* CMM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cod.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cod.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/cod.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/cod.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,433 @@ ++/* ++ * cod.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== cod.h ======== ++ * Description: ++ * Code management module for DSPs. This module provides an interface ++ * interface for loading both static and dynamic code objects onto DSP ++ * systems. ++ * ++ * Public Functions: ++ * COD_Close ++ * COD_Create ++ * COD_Delete ++ * COD_Exit ++ * COD_GetBaseLib ++ * COD_GetBaseName ++ * COD_GetLoader ++ * COD_GetSection ++ * COD_GetSymValue ++ * COD_Init ++ * COD_LoadBase ++ * COD_Open ++ * COD_OpenBase ++ * COD_ReadSection ++ * COD_UnloadSection ++ * ++ * Note: ++ * Currently, only static loading is supported. ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Apr-2003 map: Changed DBL to DBLL ++ *! 07-Aug-2002 jeh: Added COD_GetBaseName(). ++ *! 17-Jul-2002 jeh: Added COD_Open(), COD_Close(). ++ *! 15-Mar-2002 jeh: Added DBL_Flags param to COD_OpenBase(). ++ *! 19-Oct-2001 jeh: Added COD_GetBaseLib, COD_GetLoader, (left in ++ *! COD_LoadSection(), COD_UnloadSection(), since they ++ *! may be needed for BridgeLite). ++ *! 07-Sep-2001 jeh: Added COD_LoadSection(), COD_UnloadSection(). ++ *! 11-Jan-2001 jeh: Added COD_OpenBase. ++ *! 29-Sep-2000 kc: Added size param to COD_ReadSection for input buffer ++ *! validation. ++ *! 02-Aug-2000 kc: Added COD_ReadSection. ++ *! 04-Sep-1997 gp: Added CDECL identifier to COD_WRITEFXN (for NT).. ++ *! 18-Aug-1997 cr: Added explicit CDECL identifier. ++ *! 28-Oct-1996 gp: Added COD_GetSection. ++ *! 30-Jul-1996 gp: Added envp[] argument to COD_LoadBase(). ++ *! 12-Jun-1996 gp: Moved OUT param first in _Create(). Updated _Create() ++ *! call to take a ZLFileName. Moved COD_ processor types ++ *! to CFG. ++ *! 29-May-1996 gp: Changed WCD_STATUS to DSP_STATUS. Removed include's. ++ *! 07-May-1996 mg: Created. ++ * ++ */ ++ ++#ifndef COD_ ++#define COD_ ++ ++#include ++ ++#define COD_MAXPATHLENGTH 255 ++#define COD_TRACEBEG "SYS_PUTCBEG" ++#define COD_TRACEEND "SYS_PUTCEND" ++#define COD_TRACESECT "trace" ++#define COD_TRACEBEGOLD "PUTCBEG" ++#define COD_TRACEENDOLD "PUTCEND" ++ ++#define COD_NOLOAD DBLL_NOLOAD ++#define COD_SYMB DBLL_SYMB ++ ++/* Flags passed to COD_Open */ ++ typedef DBLL_Flags COD_FLAGS; ++ ++/* COD code manager handle */ ++ struct COD_MANAGER; ++ ++/* COD library handle */ ++ struct COD_LIBRARYOBJ; ++ ++/* COD attributes */ ++ struct COD_ATTRS { ++ u32 ulReserved; ++ } ; ++ ++/* ++ * Function prototypes for writing memory to a DSP system, allocating ++ * and freeing DSP memory. ++ */ ++ typedef u32(*COD_WRITEFXN) (void *pPrivRef, u32 ulDspAddr, ++ void *pBuf, u32 ulNumBytes, ++ u32 nMemSpace); ++ ++ ++/* ++ * ======== COD_Close ======== ++ * Purpose: ++ * Close a library opened with COD_Open(). ++ * Parameters: ++ * lib - Library handle returned by COD_Open(). ++ * Returns: ++ * None. ++ * Requires: ++ * COD module initialized. ++ * valid lib. ++ * Ensures: ++ * ++ */ ++ extern void COD_Close(struct COD_LIBRARYOBJ *lib); ++ ++/* ++ * ======== COD_Create ======== ++ * Purpose: ++ * Create an object to manage code on a DSP system. This object can be ++ * used to load an initial program image with arguments that can later ++ * be expanded with dynamically loaded object files. ++ * Symbol table information is managed by this object and can be retrieved ++ * using the COD_GetSymValue() function. ++ * Parameters: ++ * phManager: created manager object ++ * pstrZLFile: ZL DLL filename, of length < COD_MAXPATHLENGTH. ++ * attrs: attributes to be used by this object. A NULL value ++ * will cause default attrs to be used. ++ * Returns: ++ * DSP_SOK: Success. ++ * COD_E_NOZLFUNCTIONS: Could not initialize ZL functions. ++ * COD_E_ZLCREATEFAILED: ZL_Create failed. ++ * DSP_ENOTIMPL: attrs was not NULL. We don't yet support ++ * non default values of attrs. ++ * Requires: ++ * COD module initialized. ++ * pstrZLFile != NULL ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_Create(OUT struct COD_MANAGER **phManager, ++ char *pstrZLFile, ++ IN OPTIONAL CONST struct COD_ATTRS *attrs); ++ ++/* ++ * ======== COD_Delete ======== ++ * Purpose: ++ * Delete a code manager object. ++ * Parameters: ++ * hManager: handle of manager to be deleted ++ * Returns: ++ * None. ++ * Requires: ++ * COD module initialized. ++ * valid hManager. ++ * Ensures: ++ */ ++ extern void COD_Delete(struct COD_MANAGER *hManager); ++ ++/* ++ * ======== COD_Exit ======== ++ * Purpose: ++ * Discontinue usage of the COD module. ++ * Parameters: ++ * None. ++ * Returns: ++ * None. ++ * Requires: ++ * COD initialized. ++ * Ensures: ++ * Resources acquired in COD_Init(void) are freed. ++ */ ++ extern void COD_Exit(void); ++ ++/* ++ * ======== COD_GetBaseLib ======== ++ * Purpose: ++ * Get handle to the base image DBL library. ++ * Parameters: ++ * hManager: handle of manager to be deleted ++ * plib: location to store library handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * Requires: ++ * COD module initialized. ++ * valid hManager. ++ * plib != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_GetBaseLib(struct COD_MANAGER *hManager, ++ struct DBLL_LibraryObj **plib); ++ ++/* ++ * ======== COD_GetBaseName ======== ++ * Purpose: ++ * Get the name of the base image DBL library. ++ * Parameters: ++ * hManager: handle of manager to be deleted ++ * pszName: location to store library name on output. ++ * uSize: size of name buffer. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Buffer too small. ++ * Requires: ++ * COD module initialized. ++ * valid hManager. ++ * pszName != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_GetBaseName(struct COD_MANAGER *hManager, ++ char *pszName, u32 uSize); ++ ++/* ++ * ======== COD_GetEntry ======== ++ * Purpose: ++ * Retrieve the entry point of a loaded DSP program image ++ * Parameters: ++ * hManager: handle of manager to be deleted ++ * pulEntry: pointer to location for entry point ++ * Returns: ++ * DSP_SOK: Success. ++ * Requires: ++ * COD module initialized. ++ * valid hManager. ++ * pulEntry != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_GetEntry(struct COD_MANAGER *hManager, ++ u32 *pulEntry); ++ ++/* ++ * ======== COD_GetLoader ======== ++ * Purpose: ++ * Get handle to the DBL loader. ++ * Parameters: ++ * hManager: handle of manager to be deleted ++ * phLoader: location to store loader handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * Requires: ++ * COD module initialized. ++ * valid hManager. ++ * phLoader != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_GetLoader(struct COD_MANAGER *hManager, ++ struct DBLL_TarObj **phLoader); ++ ++/* ++ * ======== COD_GetSection ======== ++ * Purpose: ++ * Retrieve the starting address and length of a section in the COFF file ++ * given the section name. ++ * Parameters: ++ * lib Library handle returned from COD_Open(). ++ * pstrSect: name of the section, with or without leading "." ++ * puAddr: Location to store address. ++ * puLen: Location to store length. ++ * Returns: ++ * DSP_SOK: Success ++ * COD_E_NOSYMBOLSLOADED: Symbols have not been loaded onto the board. ++ * COD_E_SYMBOLNOTFOUND: The symbol could not be found. ++ * Requires: ++ * COD module initialized. ++ * valid hManager. ++ * pstrSect != NULL; ++ * puAddr != NULL; ++ * puLen != NULL; ++ * Ensures: ++ * DSP_SOK: *puAddr and *puLen contain the address and length of the ++ * section. ++ * else: *puAddr == 0 and *puLen == 0; ++ * ++ */ ++ extern DSP_STATUS COD_GetSection(struct COD_LIBRARYOBJ *lib, ++ IN char *pstrSect, ++ OUT u32 *puAddr, ++ OUT u32 *puLen); ++ ++/* ++ * ======== COD_GetSymValue ======== ++ * Purpose: ++ * Retrieve the value for the specified symbol. The symbol is first ++ * searched for literally and then, if not found, searched for as a ++ * C symbol. ++ * Parameters: ++ * lib: library handle returned from COD_Open(). ++ * pstrSymbol: name of the symbol ++ * value: value of the symbol ++ * Returns: ++ * DSP_SOK: Success. ++ * COD_E_NOSYMBOLSLOADED: Symbols have not been loaded onto the board. ++ * COD_E_SYMBOLNOTFOUND: The symbol could not be found. ++ * Requires: ++ * COD module initialized. ++ * Valid hManager. ++ * pstrSym != NULL. ++ * pulValue != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_GetSymValue(struct COD_MANAGER *hManager, ++ IN char *pstrSym, ++ OUT u32 *pulValue); ++ ++/* ++ * ======== COD_Init ======== ++ * Purpose: ++ * Initialize the COD module's private state. ++ * Parameters: ++ * None. ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * A requirement for each of the other public COD functions. ++ */ ++ extern bool COD_Init(void); ++ ++/* ++ * ======== COD_LoadBase ======== ++ * Purpose: ++ * Load the initial program image, optionally with command-line arguments, ++ * on the DSP system managed by the supplied handle. The program to be ++ * loaded must be the first element of the args array and must be a fully ++ * qualified pathname. ++ * Parameters: ++ * hMgr: manager to load the code with ++ * nArgc: number of arguments in the args array ++ * args: array of strings for arguments to DSP program ++ * writeFxn: board-specific function to write data to DSP system ++ * pArb: arbitrary pointer to be passed as first arg to writeFxn ++ * envp: array of environment strings for DSP exec. ++ * Returns: ++ * DSP_SOK: Success. ++ * COD_E_OPENFAILED: Failed to open target code. ++ * COD_E_LOADFAILED: Failed to load code onto target. ++ * Requires: ++ * COD module initialized. ++ * hMgr is valid. ++ * nArgc > 0. ++ * aArgs != NULL. ++ * aArgs[0] != NULL. ++ * pfnWrite != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_LoadBase(struct COD_MANAGER *hManager, ++ u32 nArgc, char *aArgs[], ++ COD_WRITEFXN pfnWrite, void *pArb, ++ char *envp[]); ++ ++ ++/* ++ * ======== COD_Open ======== ++ * Purpose: ++ * Open a library for reading sections. Does not load or set the base. ++ * Parameters: ++ * hMgr: manager to load the code with ++ * pszCoffPath: Coff file to open. ++ * flags: COD_NOLOAD (don't load symbols) or COD_SYMB (load ++ * symbols). ++ * pLib: Handle returned that can be used in calls to COD_Close ++ * and COD_GetSection. ++ * Returns: ++ * S_OK: Success. ++ * COD_E_OPENFAILED: Failed to open target code. ++ * Requires: ++ * COD module initialized. ++ * hMgr is valid. ++ * flags == COD_NOLOAD || flags == COD_SYMB. ++ * pszCoffPath != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS COD_Open(struct COD_MANAGER *hMgr, ++ IN char *pszCoffPath, ++ COD_FLAGS flags, ++ OUT struct COD_LIBRARYOBJ **pLib); ++ ++/* ++ * ======== COD_OpenBase ======== ++ * Purpose: ++ * Open base image for reading sections. Does not load the base. ++ * Parameters: ++ * hMgr: manager to load the code with ++ * pszCoffPath: Coff file to open. ++ * flags: Specifies whether to load symbols. ++ * Returns: ++ * DSP_SOK: Success. ++ * COD_E_OPENFAILED: Failed to open target code. ++ * Requires: ++ * COD module initialized. ++ * hMgr is valid. ++ * pszCoffPath != NULL. ++ * Ensures: ++ */ ++extern DSP_STATUS COD_OpenBase(struct COD_MANAGER *hMgr, IN char *pszCoffPath, ++ DBLL_Flags flags); ++ ++/* ++ * ======== COD_ReadSection ======== ++ * Purpose: ++ * Retrieve the content of a code section given the section name. ++ * Parameters: ++ * hManager - manager in which to search for the symbol ++ * pstrSect - name of the section, with or without leading "." ++ * pstrContent - buffer to store content of the section. ++ * Returns: ++ * DSP_SOK: on success, error code on failure ++ * COD_E_NOSYMBOLSLOADED: Symbols have not been loaded onto the board. ++ * COD_E_READFAILED: Failed to read content of code section. ++ * Requires: ++ * COD module initialized. ++ * valid hManager. ++ * pstrSect != NULL; ++ * pstrContent != NULL; ++ * Ensures: ++ * DSP_SOK: *pstrContent stores the content of the named section. ++ */ ++ extern DSP_STATUS COD_ReadSection(struct COD_LIBRARYOBJ *lib, ++ IN char *pstrSect, ++ OUT char *pstrContent, ++ IN u32 cContentSize); ++ ++ ++ ++#endif /* COD_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/csl.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/csl.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/csl.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/csl.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,135 @@ ++/* ++ * csl.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== csl.h ======== ++ * Purpose: ++ * Platform independent C Standard library functions. ++ * ++ * Public Functions: ++ * CSL_AnsiToWchar ++ * CSL_ByteSwap ++ * CSL_Exit ++ * CSL_Init ++ * CSL_NumToAscii ++ * CSL_Strtok ++ * CSL_Strtokr ++ * CSL_WcharToAnsi ++ * ++ *! Revision History: ++ *! ================ ++ *! 07-Aug-2002 jeh: Added CSL_Strtokr(). ++ *! 21-Sep-2001 jeh: Added CSL_Strncmp. ++ *! 22-Nov-2000 map: Added CSL_Atoi and CSL_Strtok ++ *! 19-Nov-2000 kc: Added CSL_ByteSwap(). ++ *! 09-Nov-2000 kc: Added CSL_Strncat. ++ *! 29-Oct-1999 kc: Added CSL_Wstrlen(). ++ *! 20-Sep-1999 ag: Added CSL_Wchar2Ansi(). ++ *! 19-Jan-1998 cr: Code review cleanup (mostly documentation fixes). ++ *! 29-Dec-1997 cr: Changed CSL_lowercase to CSL_Uppercase, added ++ *! CSL_AnsiToWchar. ++ *! 30-Sep-1997 cr: Added explicit cdecl descriptors to fxn definitions. ++ *! 25-Jun-1997 cr: Added CSL_strcmp. ++ *! 12-Jun-1996 gp: Created. ++ */ ++ ++#ifndef CSL_ ++#define CSL_ ++ ++#include ++ ++/* ++ * ======== CSL_Exit ======== ++ * Purpose: ++ * Discontinue usage of the CSL module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * CSL initialized. ++ * Ensures: ++ * Resources acquired in CSL_Init(void) are freed. ++ */ ++ extern void CSL_Exit(void); ++ ++/* ++ * ======== CSL_Init ======== ++ * Purpose: ++ * Initialize the CSL module's private state. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * A requirement for each of the other public CSL functions. ++ */ ++ extern bool CSL_Init(void); ++ ++/* ++ * ======== CSL_NumToAscii ======== ++ * Purpose: ++ * Convert a 1 or 2 digit number to a 2 digit string. ++ * Parameters: ++ * pstrNumber: Buffer to store converted string. ++ * dwNum: Number to convert. ++ * Returns: ++ * Requires: ++ * pstrNumber must be able to hold at least three characters. ++ * Ensures: ++ * pstrNumber will be null terminated. ++ */ ++ extern void CSL_NumToAscii(OUT char *pstrNumber, IN u32 dwNum); ++ ++ ++/* ++ * ======== CSL_Strtok ======== ++ * Purpose: ++ * Tokenize a NULL terminated string ++ * Parameters: ++ * ptstrSrc: pointer to string. ++ * szSeparators: pointer to a string of seperators ++ * Returns: ++ * char * ++ * Requires: ++ * CSL initialized. ++ * ptstrSrc is a valid string pointer. ++ * szSeparators is a valid string pointer. ++ * Ensures: ++ */ ++ extern char *CSL_Strtok(IN char *ptstrSrc, ++ IN CONST char *szSeparators); ++ ++/* ++ * ======== CSL_Strtokr ======== ++ * Purpose: ++ * Re-entrant version of strtok. ++ * Parameters: ++ * pstrSrc: Pointer to string. May be NULL on subsequent calls. ++ * szSeparators: Pointer to a string of seperators ++ * ppstrCur: Location to store start of string for next call to ++ * to CSL_Strtokr. ++ * Returns: ++ * char * (the token) ++ * Requires: ++ * CSL initialized. ++ * szSeparators != NULL ++ * ppstrCur != NULL ++ * Ensures: ++ */ ++ extern char *CSL_Strtokr(IN char *pstrSrc, ++ IN CONST char *szSeparators, ++ OUT char **ppstrCur); ++ ++#endif /* CSL_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbc.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbc.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbc.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbc.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,66 @@ ++/* ++ * dbc.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dbc.h ======== ++ * Purpose: ++ * "Design by Contract" programming macros. ++ * ++ * Public Functions: ++ * DBC_Assert ++ * DBC_Require ++ * DBC_Ensure ++ * ++ * Notes: ++ * Requires that the GT->ERROR function has been defaulted to a valid ++ * error handler for the given execution environment. ++ * ++ * Does not require that GT_init() be called. ++ * ++ *! Revision History: ++ *! ================ ++ *! 11-Aug-2000 ag: Removed include ++ *! 22-Apr-1996 gp: Created. ++ */ ++ ++#ifndef DBC_ ++#define DBC_ ++ ++#ifndef GT_TRACE ++#define GT_TRACE 0 /* 0 = "trace compiled out"; 1 = "trace active" */ ++#endif ++ ++/* Assertion Macros: */ ++#if GT_TRACE ++ ++#include ++ ++#define DBC_Assert(exp) \ ++ if (!(exp)) \ ++ printk("%s, line %d: Assertion (" #exp ") failed.\n", \ ++ __FILE__, __LINE__) ++#define DBC_Require DBC_Assert /* Function Precondition. */ ++#define DBC_Ensure DBC_Assert /* Function Postcondition. */ ++ ++#else ++ ++#define DBC_Assert(exp) {} ++#define DBC_Require(exp) {} ++#define DBC_Ensure(exp) {} ++ ++#endif /* DEBUG */ ++ ++#endif /* DBC_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbdcddef.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbdcddef.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbdcddef.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbdcddef.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,94 @@ ++/* ++ * dbdcddef.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbdcddef.h ======== ++ * Description: ++ * DCD (DSP/BIOS Bridge Configuration Database) constants and types. ++ * ++ *! Revision History: ++ *! ================ ++ *! 03-Dec-2003 map Moved and renamed DCD_OBJTYPE to DSP_DCDOBJTYPE in dbdefs.h ++ *! 05-Dec-2002 map Added DCD_CREATELIBTYPE, DCD_EXECUTELIBTYPE, ++ * DCD_DELETELIBTYPE ++ *! 24-Feb-2003 kc Updated REG entry names to DspBridge. ++ *! 22-Nov-2002 gp Cleaned up comments, formatting. ++ *! 05-Aug-2002 jeh Added DCD_REGISTERFXN. ++ *! 19-Apr-2002 jeh Added DCD_LIBRARYTYPE to DCD_OBJTYPE, dynamic load ++ *! properties to DCD_NODEPROPS. ++ *! 29-Jul-2001 ag Added extended procObj. ++ *! 13-Feb-2001 kc: Named changed from dcdbsdef.h dbdcddef.h. ++ *! 12-Dec-2000 jeh Added DAIS iAlg name to DCD_NODEPROPS. ++ *! 30-Oct-2000 kc: Added #defines for DCD_AutoRegister function. ++ *! 05-Sep-2000 jeh Added DCD_NODEPROPS. ++ *! 12-Aug-2000 kc: Incoroporated the use of types defined in . ++ *! 29-Jul-2000 kc: Created. ++ */ ++ ++#ifndef DBDCDDEF_ ++#define DBDCDDEF_ ++ ++#include ++#include /* for MGR_PROCESSOREXTINFO */ ++ ++/* ++ * The following defines are critical elements for the DCD module: ++ * ++ * - DCD_REGKEY enables DCD functions to locate registered DCD objects. ++ * - DCD_REGISTER_SECTION identifies the COFF section where the UUID of ++ * registered DCD objects are stored. ++ */ ++#define DCD_REGKEY "Software\\TexasInstruments\\DspBridge\\DCD" ++#define DCD_REGISTER_SECTION ".dcd_register" ++ ++/* DCD Manager Object */ ++ struct DCD_MANAGER; ++ ++/* DCD Node Properties */ ++ struct DCD_NODEPROPS { ++ struct DSP_NDBPROPS ndbProps; ++ u32 uMsgSegid; ++ u32 uMsgNotifyType; ++ char *pstrCreatePhaseFxn; ++ char *pstrDeletePhaseFxn; ++ char *pstrExecutePhaseFxn; ++ char *pstrIAlgName; ++ ++ /* Dynamic load properties */ ++ u16 usLoadType; /* Static, dynamic, overlay */ ++ u32 ulDataMemSegMask; /* Data memory requirements */ ++ u32 ulCodeMemSegMask; /* Code memory requirements */ ++ } ; ++ ++/* DCD Generic Object Type */ ++ struct DCD_GENERICOBJ { ++ union dcdObjUnion { ++ struct DCD_NODEPROPS nodeObj; /* node object. */ ++ /* processor object. */ ++ struct DSP_PROCESSORINFO procObj; ++ /* extended proc object (private) */ ++ struct MGR_PROCESSOREXTINFO extProcObj; ++ } objData; ++ } ; ++ ++/* DCD Internal Callback Type */ ++ typedef DSP_STATUS(*DCD_REGISTERFXN) (IN struct DSP_UUID *pUuid, ++ IN enum DSP_DCDOBJTYPE objType, ++ IN void *handle); ++ ++#endif /* DBDCDDEF_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbdcd.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbdcd.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbdcd.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbdcd.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,388 @@ ++/* ++ * dbdcd.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dbdcd.h ======== ++ * Description: ++ * Defines the DSP/BIOS Bridge Configuration Database (DCD) API. ++ * ++ *! Revision History ++ *! ================ ++ *! 03-Dec-2003 map Changed DCD_OBJTYPE to DSP_DCDOBJTYPE ++ *! 24-Feb-2003 kc Updated DCD_AutoUnregister and DCD_GetObjects to simplify ++ *! DCD implementation. ++ *! 05-Aug-2002 jeh Added DCD_GetObjects(). ++ *! 11-Jul-2002 jeh Added DCD_GetDepLibs(), DCD_GetNumDepLibs(). ++ *! 22-Apr-2002 jeh Added DCD_GetLibraryName(). ++ *! 03-Apr-2001 sg Changed error names to have DCD_E* format. ++ *! 13-Feb-2001 kc Name changed from dcdbs.h to dbdcd.h. ++ *! 12-Dec-2000 kc Added DCD_AutoUnregister. ++ *! 09-Nov-2000 kc Updated usage of DCD_EnumerateObject. ++ *! 30-Oct-2000 kc Added DCD_AutoRegister. Updated error DCD error codes. ++ *! 29-Sep-2000 kc Incorporated code review comments. See ++ *! /src/reviews/dcd_review.txt. ++ *! 26-Jul-2000 kc Created. ++ *! ++ */ ++ ++#ifndef DBDCD_ ++#define DBDCD_ ++ ++#include ++#include ++#include ++ ++/* ++ * ======== DCD_AutoRegister ======== ++ * Purpose: ++ * This function automatically registers DCD objects specified in a ++ * special COFF section called ".dcd_register" ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * pszCoffPath: Pointer to name of COFF file containing DCD ++ * objects to be registered. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EDCDNOAUTOREGISTER: Unable to find auto-registration section. ++ * DSP_EDCDREADSECT: Unable to read object code section. ++ * DSP_EDCDLOADBASE: Unable to load code base. ++ * DSP_EHANDLE: Invalid DCD_HMANAGER handle.. ++ * Requires: ++ * DCD initialized. ++ * Ensures: ++ * Note: ++ * Due to the DCD database construction, it is essential for a DCD-enabled ++ * COFF file to contain the right COFF sections, especially ++ * ".dcd_register", which is used for auto registration. ++ */ ++ extern DSP_STATUS DCD_AutoRegister(IN struct DCD_MANAGER *hDcdMgr, ++ IN char *pszCoffPath); ++ ++/* ++ * ======== DCD_AutoUnregister ======== ++ * Purpose: ++ * This function automatically unregisters DCD objects specified in a ++ * special COFF section called ".dcd_register" ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * pszCoffPath: Pointer to name of COFF file containing ++ * DCD objects to be unregistered. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EDCDNOAUTOREGISTER: Unable to find auto-registration section. ++ * DSP_EDCDREADSECT: Unable to read object code section. ++ * DSP_EDCDLOADBASE: Unable to load code base. ++ * DSP_EHANDLE: Invalid DCD_HMANAGER handle.. ++ * Requires: ++ * DCD initialized. ++ * Ensures: ++ * Note: ++ * Due to the DCD database construction, it is essential for a DCD-enabled ++ * COFF file to contain the right COFF sections, especially ++ * ".dcd_register", which is used for auto unregistration. ++ */ ++ extern DSP_STATUS DCD_AutoUnregister(IN struct DCD_MANAGER *hDcdMgr, ++ IN char *pszCoffPath); ++ ++/* ++ * ======== DCD_CreateManager ======== ++ * Purpose: ++ * This function creates a DCD module manager. ++ * Parameters: ++ * pszZlDllName: Pointer to a DLL name string. ++ * phDcdMgr: A pointer to a DCD manager handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Unable to allocate memory for DCD manager handle. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * DCD initialized. ++ * pszZlDllName is non-NULL. ++ * phDcdMgr is non-NULL. ++ * Ensures: ++ * A DCD manager handle is created. ++ */ ++ extern DSP_STATUS DCD_CreateManager(IN char *pszZlDllName, ++ OUT struct DCD_MANAGER **phDcdMgr); ++ ++/* ++ * ======== DCD_DestroyManager ======== ++ * Purpose: ++ * This function destroys a DCD module manager. ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid DCD manager handle. ++ * Requires: ++ * DCD initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS DCD_DestroyManager(IN struct DCD_MANAGER *hDcdMgr); ++ ++/* ++ * ======== DCD_EnumerateObject ======== ++ * Purpose: ++ * This function enumerates currently visible DSP/BIOS Bridge objects ++ * and returns the UUID and type of each enumerated object. ++ * Parameters: ++ * cIndex: The object enumeration index. ++ * objType: Type of object to enumerate. ++ * pUuid: Pointer to a DSP_UUID object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Unable to enumerate through the DCD database. ++ * DSP_SENUMCOMPLETE: Enumeration completed. This is not an error code. ++ * Requires: ++ * DCD initialized. ++ * pUuid is a valid pointer. ++ * Ensures: ++ * Details: ++ * This function can be used in conjunction with DCD_GetObjectDef to ++ * retrieve object properties. ++ */ ++ extern DSP_STATUS DCD_EnumerateObject(IN s32 cIndex, ++ IN enum DSP_DCDOBJTYPE objType, ++ OUT struct DSP_UUID *pUuid); ++ ++/* ++ * ======== DCD_Exit ======== ++ * Purpose: ++ * This function cleans up the DCD module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * DCD initialized. ++ * Ensures: ++ */ ++ extern void DCD_Exit(void); ++ ++/* ++ * ======== DCD_GetDepLibs ======== ++ * Purpose: ++ * Given the uuid of a library and size of array of uuids, this function ++ * fills the array with the uuids of all dependent libraries of the input ++ * library. ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * pUuid: Pointer to a DSP_UUID for a library. ++ * numLibs: Size of uuid array (number of library uuids). ++ * pDepLibUuids: Array of dependent library uuids to be filled in. ++ * pPersistentDepLibs: Array indicating if corresponding lib is persistent. ++ * phase: phase to obtain correct input library ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * DSP_EDCDREADSECT: Failure to read section containing library info. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * DCD initialized. ++ * Valid hDcdMgr. ++ * pUuid != NULL ++ * pDepLibUuids != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DCD_GetDepLibs(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, ++ u16 numLibs, ++ OUT struct DSP_UUID *pDepLibUuids, ++ OUT bool *pPersistentDepLibs, ++ IN enum NLDR_PHASE phase); ++ ++/* ++ * ======== DCD_GetNumDepLibs ======== ++ * Purpose: ++ * Given the uuid of a library, determine its number of dependent ++ * libraries. ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * pUuid: Pointer to a DSP_UUID for a library. ++ * pNumLibs: Size of uuid array (number of library uuids). ++ * pNumPersLibs: number of persistent dependent library. ++ * phase: Phase to obtain correct input library ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * DSP_EDCDREADSECT: Failure to read section containing library info. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * DCD initialized. ++ * Valid hDcdMgr. ++ * pUuid != NULL ++ * pNumLibs != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DCD_GetNumDepLibs(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, ++ OUT u16 *pNumLibs, ++ OUT u16 *pNumPersLibs, ++ IN enum NLDR_PHASE phase); ++ ++/* ++ * ======== DCD_GetLibraryName ======== ++ * Purpose: ++ * This function returns the name of a (dynamic) library for a given ++ * UUID. ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * pUuid: Pointer to a DSP_UUID that represents a unique DSP/BIOS ++ * Bridge object. ++ * pstrLibName: Buffer to hold library name. ++ * pdwSize: Contains buffer size. Set to string size on output. ++ * phase: Which phase to load ++ * fPhaseSplit: Are phases in multiple libraries ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * DCD initialized. ++ * Valid hDcdMgr. ++ * pstrLibName != NULL. ++ * pUuid != NULL ++ * pdwSize != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DCD_GetLibraryName(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, ++ IN OUT char *pstrLibName, ++ IN OUT u32 *pdwSize, ++ IN enum NLDR_PHASE phase, ++ OUT bool *fPhaseSplit); ++ ++/* ++ * ======== DCD_GetObjectDef ======== ++ * Purpose: ++ * This function returns the properties/attributes of a DSP/BIOS Bridge ++ * object. ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * pUuid: Pointer to a DSP_UUID that represents a unique ++ * DSP/BIOS Bridge object. ++ * objType: The type of DSP/BIOS Bridge object to be ++ * referenced (node, processor, etc). ++ * pObjDef: Pointer to an object definition structure. A ++ * union of various possible DCD object types. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EDCDPARSESECT: Unable to parse content of object code section. ++ * DSP_EDCDREADSECT: Unable to read object code section. ++ * DSP_EDCDGETSECT: Unable to access object code section. ++ * DSP_EDCDLOADBASE: Unable to load code base. ++ * DSP_EFAIL: General failure. ++ * DSP_EHANDLE: Invalid DCD_HMANAGER handle. ++ * Requires: ++ * DCD initialized. ++ * pObjUuid is non-NULL. ++ * pObjDef is non-NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DCD_GetObjectDef(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pObjUuid, ++ IN enum DSP_DCDOBJTYPE objType, ++ OUT struct DCD_GENERICOBJ *pObjDef); ++ ++/* ++ * ======== DCD_GetObjects ======== ++ * Purpose: ++ * This function finds all DCD objects specified in a special ++ * COFF section called ".dcd_register", and for each object, ++ * call a "register" function. The "register" function may perform ++ * various actions, such as 1) register nodes in the node database, 2) ++ * unregister nodes from the node database, and 3) add overlay nodes. ++ * Parameters: ++ * hDcdMgr: A DCD manager handle. ++ * pszCoffPath: Pointer to name of COFF file containing DCD ++ * objects. ++ * registerFxn: Callback fxn to be applied on each located ++ * DCD object. ++ * handle: Handle to pass to callback. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EDCDNOAUTOREGISTER: Unable to find .dcd_register section. ++ * DSP_EDCDREADSECT: Unable to read object code section. ++ * DSP_EDCDLOADBASE: Unable to load code base. ++ * DSP_EHANDLE: Invalid DCD_HMANAGER handle.. ++ * Requires: ++ * DCD initialized. ++ * Ensures: ++ * Note: ++ * Due to the DCD database construction, it is essential for a DCD-enabled ++ * COFF file to contain the right COFF sections, especially ++ * ".dcd_register", which is used for auto registration. ++ */ ++ extern DSP_STATUS DCD_GetObjects(IN struct DCD_MANAGER *hDcdMgr, ++ IN char *pszCoffPath, ++ DCD_REGISTERFXN registerFxn, ++ void *handle); ++ ++/* ++ * ======== DCD_Init ======== ++ * Purpose: ++ * This function initializes DCD. ++ * Parameters: ++ * Returns: ++ * FALSE: Initialization failed. ++ * TRUE: Initialization succeeded. ++ * Requires: ++ * Ensures: ++ * DCD initialized. ++ */ ++ extern bool DCD_Init(void); ++ ++/* ++ * ======== DCD_RegisterObject ======== ++ * Purpose: ++ * This function registers a DSP/BIOS Bridge object in the DCD database. ++ * Parameters: ++ * pUuid: Pointer to a DSP_UUID that identifies a DSP/BIOS ++ * Bridge object. ++ * objType: Type of object. ++ * pszPathName: Path to the object's COFF file. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Failed to register object. ++ * Requires: ++ * DCD initialized. ++ * pUuid and szPathName are non-NULL values. ++ * objType is a valid type value. ++ * Ensures: ++ */ ++ extern DSP_STATUS DCD_RegisterObject(IN struct DSP_UUID *pUuid, ++ IN enum DSP_DCDOBJTYPE objType, ++ IN char *pszPathName); ++ ++/* ++ * ======== DCD_UnregisterObject ======== ++ * Purpose: ++ * This function de-registers a valid DSP/BIOS Bridge object from the DCD ++ * database. ++ * Parameters: ++ * pUuid: Pointer to a DSP_UUID that identifies a DSP/BIOS Bridge ++ * object. ++ * objType: Type of object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Unable to de-register the specified object. ++ * Requires: ++ * DCD initialized. ++ * pUuid is a non-NULL value. ++ * objType is a valid type value. ++ * Ensures: ++ */ ++ extern DSP_STATUS DCD_UnregisterObject(IN struct DSP_UUID *pUuid, ++ IN enum DSP_DCDOBJTYPE objType); ++ ++#endif /* _DBDCD_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,583 @@ ++/* ++ * dbdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dbdefs.h ======== ++ * Description: ++ * Global definitions and constants for DSP/BIOS Bridge. ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian ++ *! 08-Mar-2004 sb Added MAPATTR & ELEM_SIZE for Dynamic Memory Mapping feature ++ *! 09-Feb-2004 vp Added processor ID numbers for DSP and IVA ++ *! 06-Feb-2003 kc Removed DSP_POSTMESSAGE. Updated IsValid*Event macros. ++ *! 22-Nov-2002 gp Cleaned up comments, formatting. ++ *! Removed unused DSP_ENUMLASTNODE define. ++ *! 13-Feb-2002 jeh Added uSysStackSize to DSP_NDBPROPS. ++ *! 23-Jan-2002 ag Added #define DSP_SHMSEG0. ++ *! 12-Dec-2001 ag Added DSP_ESTRMMODE error code. ++ *! 04-Dec-2001 jeh Added DSP_ENOTCONNECTED error code. ++ *! 10-Dec-2001 kc: Modified macros and definitions to disable DSP_POSTMESSAGE. ++ *! 01-Nov-2001 jeh Added DSP_EOVERLAYMEMORY. ++ *! 18-Oct-2001 ag Added DSP_STRMMODE type. ++ *! Added DSP_ENOTSHAREDMEM. ++ *! 21-Sep-2001 ag Added additional error codes. ++ *! 07-Jun-2001 sg Made DSPStream_AllocateBuffer fxn name plural. ++ *! 11-May-2001 jeh Changed DSP_NODE_MIN_PRIORITY from 0 to 1. Removed hNode ++ *! from DSP_NODEINFO. ++ *! 02-Apr-2001 sg Added missing error codes, rearranged codes, switched to ++ *! hex offsets, renamed some codes to match API spec. ++ *! 16-Jan-2001 jeh Added DSP_ESYMBOL, DSP_EUUID. ++ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. ++ *! 05-Dec-2000 ag: Added DSP_RMSxxx user available message command codes. ++ *! 09-Nov-2000 rr: Added DSP_PROCEESORRESTART define; Removed DSP_PBUFFER. ++ *! Added DSP_DCD_ENOAUTOREGISTER, DSP_EUSER1-16, DSP_ESTRMFUL ++ *! Removed DSP_EDONE. Macros's modified. ++ *! 23-Oct-2000 jeh Replaced DSP_STREAMSTATECHANGE with DSP_STREAMDONE. ++ *! 09-Oct-2000 jeh Updated to version 0.9 DSP Bridge API spec. ++ *! 29-Sep-2000 kc Added error codes for DCD and REG to simplify use of ++ *! these codes within the RM module. ++ *! 27-Sep-2000 jeh Added segid, alignment, uNumBufs to DSP_STREAMATTRIN. ++ *! 29-Aug-2000 jeh Added DSP_NODETYPE enum, changed DSP_EALREADYATTACHED to ++ *! DSP_EALREADYCONNECTED. Changed scStreamConnection[1] ++ *! to scStreamConnection[16] in DSP_NODEINFO structure. ++ *! Added DSP_NOTIFICATION, DSP_STRMATTR. PSTRING changed ++ *! back to TCHAR * and moved to dbtype.h. ++ *! 11-Aug-2000 rr: Macros to check valid events and notify masks added. ++ *! 09-Aug-2000 rr: Changed PSTRING to *s8 ++ *! 07-Aug-2000 rr: PROC_IDLE/SYNCINIT/UNKNOWN state removed. ++ *! 20-Jul-2000 rr: Updated to version 0.8 ++ *! 17-Jul-2000 rr: New PROC states added to the DSP_PROCSTATE. ++ *! 27-Jun-2000 rr: Created from dspapi.h ++ */ ++ ++#ifndef DBDEFS_ ++#define DBDEFS_ ++ ++#include ++ ++#include /* GPP side type definitions */ ++#include /* DSP/BIOS type definitions */ ++#include /* Types shared between GPP and DSP */ ++ ++#define PG_SIZE_4K 4096 ++#define PG_MASK(pg_size) (~((pg_size)-1)) ++#define PG_ALIGN_LOW(addr, pg_size) ((addr) & PG_MASK(pg_size)) ++#define PG_ALIGN_HIGH(addr, pg_size) (((addr)+(pg_size)-1) & PG_MASK(pg_size)) ++ ++/* API return value and calling convention */ ++#define DBAPI DSP_STATUS ++ ++/* Infinite time value for the uTimeout parameter to DSPStream_Select() */ ++#define DSP_FOREVER (-1) ++ ++/* Maximum length of node name, used in DSP_NDBPROPS */ ++#define DSP_MAXNAMELEN 32 ++ ++/* uNotifyType values for the RegisterNotify() functions. */ ++#define DSP_SIGNALEVENT 0x00000001 ++ ++/* Types of events for processors */ ++#define DSP_PROCESSORSTATECHANGE 0x00000001 ++#define DSP_PROCESSORATTACH 0x00000002 ++#define DSP_PROCESSORDETACH 0x00000004 ++#define DSP_PROCESSORRESTART 0x00000008 ++ ++/* DSP exception events (DSP/BIOS and DSP MMU fault) */ ++#define DSP_MMUFAULT 0x00000010 ++#define DSP_SYSERROR 0x00000020 ++#define DSP_EXCEPTIONABORT 0x00000300 ++#define DSP_PWRERROR 0x00000080 ++ ++/* IVA exception events (IVA MMU fault) */ ++#define IVA_MMUFAULT 0x00000040 ++/* Types of events for nodes */ ++#define DSP_NODESTATECHANGE 0x00000100 ++#define DSP_NODEMESSAGEREADY 0x00000200 ++ ++/* Types of events for streams */ ++#define DSP_STREAMDONE 0x00001000 ++#define DSP_STREAMIOCOMPLETION 0x00002000 ++ ++/* Handle definition representing the GPP node in DSPNode_Connect() calls */ ++#define DSP_HGPPNODE 0xFFFFFFFF ++ ++/* Node directions used in DSPNode_Connect() */ ++#define DSP_TONODE 1 ++#define DSP_FROMNODE 2 ++ ++/* Define Node Minimum and Maximum Priorities */ ++#define DSP_NODE_MIN_PRIORITY 1 ++#define DSP_NODE_MAX_PRIORITY 15 ++ ++/* Pre-Defined Message Command Codes available to user: */ ++#define DSP_RMSUSERCODESTART RMS_USER /* Start of RMS user cmd codes */ ++/* end of user codes */ ++#define DSP_RMSUSERCODEEND (RMS_USER + RMS_MAXUSERCODES); ++#define DSP_RMSBUFDESC RMS_BUFDESC /* MSG contains SM buffer description */ ++ ++/* Shared memory identifier for MEM segment named "SHMSEG0" */ ++#define DSP_SHMSEG0 (u32)(-1) ++ ++/* Processor ID numbers */ ++#define DSP_UNIT 0 ++#define IVA_UNIT 1 ++ ++#define DSPWORD unsigned char ++#define DSPWORDSIZE sizeof(DSPWORD) ++ ++/* Success & Failure macros */ ++#define DSP_SUCCEEDED(Status) likely((s32)(Status) >= 0) ++#define DSP_FAILED(Status) unlikely((s32)(Status) < 0) ++ ++/* Power control enumerations */ ++#define PROC_PWRCONTROL 0x8070 ++ ++#define PROC_PWRMGT_ENABLE (PROC_PWRCONTROL + 0x3) ++#define PROC_PWRMGT_DISABLE (PROC_PWRCONTROL + 0x4) ++ ++/* Bridge Code Version */ ++#define BRIDGE_VERSION_CODE 333 ++ ++#define MAX_PROFILES 16 ++ ++/* Types defined for 'Bridge API */ ++ typedef u32 DSP_STATUS; /* API return code type */ ++ ++ typedef HANDLE DSP_HNODE; /* Handle to a DSP Node object */ ++ typedef HANDLE DSP_HPROCESSOR; /* Handle to a Processor object */ ++ typedef HANDLE DSP_HSTREAM; /* Handle to a Stream object */ ++ ++ typedef u32 DSP_PROCFAMILY; /* Processor family */ ++ typedef u32 DSP_PROCTYPE; /* Processor type (w/in family) */ ++ typedef u32 DSP_RTOSTYPE; /* Type of DSP RTOS */ ++ ++/* Handy Macros */ ++#define IsValidProcEvent(x) (((x) == 0) || (((x) & (DSP_PROCESSORSTATECHANGE | \ ++ DSP_PROCESSORATTACH | \ ++ DSP_PROCESSORDETACH | \ ++ DSP_PROCESSORRESTART | \ ++ DSP_NODESTATECHANGE | \ ++ DSP_STREAMDONE | \ ++ DSP_STREAMIOCOMPLETION | \ ++ DSP_MMUFAULT | \ ++ DSP_SYSERROR | \ ++ DSP_PWRERROR)) && \ ++ !((x) & ~(DSP_PROCESSORSTATECHANGE | \ ++ DSP_PROCESSORATTACH | \ ++ DSP_PROCESSORDETACH | \ ++ DSP_PROCESSORRESTART | \ ++ DSP_NODESTATECHANGE | \ ++ DSP_STREAMDONE | \ ++ DSP_STREAMIOCOMPLETION | \ ++ DSP_MMUFAULT | \ ++ DSP_SYSERROR | \ ++ DSP_PWRERROR)))) ++ ++#define IsValidNodeEvent(x) (((x) == 0) || (((x) & (DSP_NODESTATECHANGE | \ ++ DSP_NODEMESSAGEREADY)) && \ ++ !((x) & ~(DSP_NODESTATECHANGE | \ ++ DSP_NODEMESSAGEREADY)))) ++ ++#define IsValidStrmEvent(x) (((x) == 0) || (((x) & (DSP_STREAMDONE | \ ++ DSP_STREAMIOCOMPLETION)) && \ ++ !((x) & ~(DSP_STREAMDONE | \ ++ DSP_STREAMIOCOMPLETION)))) ++ ++#define IsValidNotifyMask(x) ((x) & DSP_SIGNALEVENT) ++ ++/* The Node UUID structure */ ++ struct DSP_UUID { ++ u32 ulData1; ++ u16 usData2; ++ u16 usData3; ++ u8 ucData4; ++ u8 ucData5; ++ u8 ucData6[6]; ++ }; ++ ++/* DCD types */ ++ enum DSP_DCDOBJTYPE { ++ DSP_DCDNODETYPE, ++ DSP_DCDPROCESSORTYPE, ++ DSP_DCDLIBRARYTYPE, ++ DSP_DCDCREATELIBTYPE, ++ DSP_DCDEXECUTELIBTYPE, ++ DSP_DCDDELETELIBTYPE ++ } ; ++ ++/* Processor states */ ++ enum DSP_PROCSTATE { ++ PROC_STOPPED, ++ PROC_LOADED, ++ PROC_RUNNING, ++ PROC_ERROR ++ } ; ++ ++/* ++ * Node types: Message node, task node, xDAIS socket node, and ++ * device node. _NODE_GPP is used when defining a stream connection ++ * between a task or socket node and the GPP. ++ * ++ */ ++ enum NODE_TYPE { ++ NODE_DEVICE, ++ NODE_TASK, ++ NODE_DAISSOCKET, ++ NODE_MESSAGE, ++ NODE_GPP ++ } ; ++ ++/* ++ * ======== NODE_STATE ======== ++ * Internal node states. ++ */ ++ enum NODE_STATE { ++ NODE_ALLOCATED, ++ NODE_CREATED, ++ NODE_RUNNING, ++ NODE_PAUSED, ++ NODE_DONE, ++ NODE_CREATING, ++ NODE_STARTING, ++ NODE_PAUSING, ++ NODE_TERMINATING, ++ NODE_DELETING, ++ } ; ++ ++/* Stream states */ ++ enum DSP_STREAMSTATE { ++ STREAM_IDLE, ++ STREAM_READY, ++ STREAM_PENDING, ++ STREAM_DONE ++ } ; ++ ++/* Stream connect types */ ++ enum DSP_CONNECTTYPE { ++ CONNECTTYPE_NODEOUTPUT, ++ CONNECTTYPE_GPPOUTPUT, ++ CONNECTTYPE_NODEINPUT, ++ CONNECTTYPE_GPPINPUT ++ } ; ++ ++/* Stream mode types */ ++ enum DSP_STRMMODE { ++ STRMMODE_PROCCOPY, /* Processor(s) copy stream data payloads */ ++ STRMMODE_ZEROCOPY, /* Strm buffer ptrs swapped no data copied */ ++ STRMMODE_LDMA, /* Local DMA : OMAP's System-DMA device */ ++ STRMMODE_RDMA /* Remote DMA: OMAP's DSP-DMA device */ ++ } ; ++ ++/* Resource Types */ ++ enum DSP_RESOURCEINFOTYPE { ++ DSP_RESOURCE_DYNDARAM = 0, ++ DSP_RESOURCE_DYNSARAM, ++ DSP_RESOURCE_DYNEXTERNAL, ++ DSP_RESOURCE_DYNSRAM, ++ DSP_RESOURCE_PROCLOAD ++ } ; ++ ++/* Memory Segment Types */ ++ enum DSP_MEMTYPE { ++ DSP_DYNDARAM = 0, ++ DSP_DYNSARAM, ++ DSP_DYNEXTERNAL, ++ DSP_DYNSRAM ++ } ; ++ ++/* Memory Flush Types */ ++ enum DSP_FLUSHTYPE { ++ PROC_INVALIDATE_MEM = 0, ++ PROC_WRITEBACK_MEM, ++ PROC_WRITEBACK_INVALIDATE_MEM, ++ } ; ++ ++/* Memory Segment Status Values */ ++ struct DSP_MEMSTAT { ++ u32 ulSize; ++ u32 ulTotalFreeSize; ++ u32 ulLenMaxFreeBlock; ++ u32 ulNumFreeBlocks; ++ u32 ulNumAllocBlocks; ++ } ; ++ ++/* Processor Load information Values */ ++ struct DSP_PROCLOADSTAT { ++ u32 uCurrLoad; ++ u32 uPredictedLoad; ++ u32 uCurrDspFreq; ++ u32 uPredictedFreq; ++ } ; ++ ++/* Attributes for STRM connections between nodes */ ++ struct DSP_STRMATTR { ++ u32 uSegid; /* Memory segment on DSP to allocate buffers */ ++ u32 uBufsize; /* Buffer size (DSP words) */ ++ u32 uNumBufs; /* Number of buffers */ ++ u32 uAlignment; /* Buffer alignment */ ++ u32 uTimeout; /* Timeout for blocking STRM calls */ ++ enum DSP_STRMMODE lMode; /* mode of stream when opened */ ++ /* DMA chnl id if DSP_STRMMODE is LDMA or RDMA */ ++ u32 uDMAChnlId; ++ u32 uDMAPriority; /* DMA channel priority 0=lowest, >0=high */ ++ } ; ++ ++/* The DSP_CBDATA structure */ ++ struct DSP_CBDATA { ++ u32 cbData; ++ u8 cData[1]; ++ } ; ++ ++/* The DSP_MSG structure */ ++ struct DSP_MSG { ++ u32 dwCmd; ++ u32 dwArg1; ++ u32 dwArg2; ++ } ; ++ ++/* The DSP_RESOURCEREQMTS structure for node's resource requirements */ ++ struct DSP_RESOURCEREQMTS { ++ u32 cbStruct; ++ u32 uStaticDataSize; ++ u32 uGlobalDataSize; ++ u32 uProgramMemSize; ++ u32 uWCExecutionTime; ++ u32 uWCPeriod; ++ u32 uWCDeadline; ++ u32 uAvgExectionTime; ++ u32 uMinimumPeriod; ++ } ; ++ ++/* ++ * The DSP_STREAMCONNECT structure describes a stream connection ++ * between two nodes, or between a node and the GPP ++ */ ++ struct DSP_STREAMCONNECT { ++ u32 cbStruct; ++ enum DSP_CONNECTTYPE lType; ++ u32 uThisNodeStreamIndex; ++ DSP_HNODE hConnectedNode; ++ struct DSP_UUID uiConnectedNodeID; ++ u32 uConnectedNodeStreamIndex; ++ } ; ++ ++ struct DSP_NODEPROFS { ++ u32 ulHeapSize; ++ } ; ++ ++/* The DSP_NDBPROPS structure reports the attributes of a node */ ++ struct DSP_NDBPROPS { ++ u32 cbStruct; ++ struct DSP_UUID uiNodeID; ++ char acName[DSP_MAXNAMELEN]; ++ enum NODE_TYPE uNodeType; ++ u32 bCacheOnGPP; ++ struct DSP_RESOURCEREQMTS dspResourceReqmts; ++ s32 iPriority; ++ u32 uStackSize; ++ u32 uSysStackSize; ++ u32 uStackSeg; ++ u32 uMessageDepth; ++ u32 uNumInputStreams; ++ u32 uNumOutputStreams; ++ u32 uTimeout; ++ u32 uCountProfiles; /* Number of supported profiles */ ++ /* Array of profiles */ ++ struct DSP_NODEPROFS aProfiles[MAX_PROFILES]; ++ u32 uStackSegName; /* Stack Segment Name */ ++ } ; ++ ++ /* The DSP_NODEATTRIN structure describes the attributes of a ++ * node client */ ++ struct DSP_NODEATTRIN { ++ u32 cbStruct; ++ s32 iPriority; ++ u32 uTimeout; ++ u32 uProfileID; ++ /* Reserved, for Bridge Internal use only */ ++ u32 uHeapSize; ++ void *pGPPVirtAddr; /* Reserved, for Bridge Internal use only */ ++ } ; ++ ++ /* The DSP_NODEINFO structure is used to retrieve information ++ * about a node */ ++ struct DSP_NODEINFO { ++ u32 cbStruct; ++ struct DSP_NDBPROPS nbNodeDatabaseProps; ++ u32 uExecutionPriority; ++ enum NODE_STATE nsExecutionState; ++ DSP_HNODE hDeviceOwner; ++ u32 uNumberStreams; ++ struct DSP_STREAMCONNECT scStreamConnection[16]; ++ u32 uNodeEnv; ++ } ; ++ ++ /* The DSP_NODEATTR structure describes the attributes of a node */ ++ struct DSP_NODEATTR { ++ u32 cbStruct; ++ struct DSP_NODEATTRIN inNodeAttrIn; ++ u32 uInputs; ++ u32 uOutputs; ++ struct DSP_NODEINFO iNodeInfo; ++ } ; ++ ++/* ++ * Notification type: either the name of an opened event, or an event or ++ * window handle. ++ */ ++ struct DSP_NOTIFICATION { ++ char *psName; ++ HANDLE handle; ++ } ; ++ ++/* The DSP_PROCESSORATTRIN structure describes the attributes of a processor */ ++ struct DSP_PROCESSORATTRIN{ ++ u32 cbStruct; ++ u32 uTimeout; ++ } ; ++ ++ enum chipTypes { ++ DSPTYPE_55 = 6, ++ IVA_ARM7 = 0x97, ++ DSPTYPE_64 = 0x99 ++ }; ++ ++/* ++ * The DSP_PROCESSORINFO structure describes basic capabilities of a ++ * DSP processor ++ */ ++ struct DSP_PROCESSORINFO { ++ u32 cbStruct; ++ DSP_PROCFAMILY uProcessorFamily; ++ DSP_PROCTYPE uProcessorType; ++ u32 uClockRate; ++ u32 ulInternalMemSize; ++ u32 ulExternalMemSize; ++ u32 uProcessorID; ++ DSP_RTOSTYPE tyRunningRTOS; ++ s32 nNodeMinPriority; ++ s32 nNodeMaxPriority; ++ } ; ++ ++/* Error information of last DSP exception signalled to the GPP */ ++ struct DSP_ERRORINFO { ++ u32 dwErrMask; ++ u32 dwVal1; ++ u32 dwVal2; ++ u32 dwVal3; ++ } ; ++ ++/* The DSP_PROCESSORSTATE structure describes the state of a DSP processor */ ++ struct DSP_PROCESSORSTATE { ++ u32 cbStruct; ++ enum DSP_PROCSTATE iState; ++ struct DSP_ERRORINFO errInfo; ++ } ; ++ ++/* ++ * The DSP_RESOURCEINFO structure is used to retrieve information about a ++ * processor's resources ++ */ ++ struct DSP_RESOURCEINFO { ++ u32 cbStruct; ++ enum DSP_RESOURCEINFOTYPE uResourceType; ++ union { ++ u32 ulResource; ++ struct DSP_MEMSTAT memStat; ++ struct DSP_PROCLOADSTAT procLoadStat; ++ } result; ++ } ; ++ ++/* ++ * The DSP_STREAMATTRIN structure describes the attributes of a stream, ++ * including segment and alignment of data buffers allocated with ++ * DSPStream_AllocateBuffers(), if applicable ++ */ ++ struct DSP_STREAMATTRIN { ++ u32 cbStruct; ++ u32 uTimeout; ++ u32 uSegment; ++ u32 uAlignment; ++ u32 uNumBufs; ++ enum DSP_STRMMODE lMode; ++ u32 uDMAChnlId; ++ u32 uDMAPriority; ++ } ; ++ ++/* The DSP_BUFFERATTR structure describes the attributes of a data buffer */ ++ struct DSP_BUFFERATTR { ++ u32 cbStruct; ++ u32 uSegment; ++ u32 uAlignment; ++ } ; ++ ++/* ++ * The DSP_STREAMINFO structure is used to retrieve information ++ * about a stream. ++ */ ++ struct DSP_STREAMINFO { ++ u32 cbStruct; ++ u32 uNumberBufsAllowed; ++ u32 uNumberBufsInStream; ++ u32 ulNumberBytes; ++ HANDLE hSyncObjectHandle; ++ enum DSP_STREAMSTATE ssStreamState; ++ } ; ++ ++/* DMM MAP attributes ++It is a bit mask with each bit value indicating a specific attribute ++bit 0 - GPP address type (user virtual=0, physical=1) ++bit 1 - MMU Endianism (Big Endian=1, Little Endian=0) ++bit 2 - MMU mixed page attribute (Mixed/ CPUES=1, TLBES =0) ++bit 3 - MMU element size = 8bit (valid only for non mixed page entries) ++bit 4 - MMU element size = 16bit (valid only for non mixed page entries) ++bit 5 - MMU element size = 32bit (valid only for non mixed page entries) ++bit 6 - MMU element size = 64bit (valid only for non mixed page entries) ++*/ ++ ++/* Types of mapping attributes */ ++ ++/* MPU address is virtual and needs to be translated to physical addr */ ++#define DSP_MAPVIRTUALADDR 0x00000000 ++#define DSP_MAPPHYSICALADDR 0x00000001 ++ ++/* Mapped data is big endian */ ++#define DSP_MAPBIGENDIAN 0x00000002 ++#define DSP_MAPLITTLEENDIAN 0x00000000 ++ ++/* Element size is based on DSP r/w access size */ ++#define DSP_MAPMIXEDELEMSIZE 0x00000004 ++ ++/* ++ * Element size for MMU mapping (8, 16, 32, or 64 bit) ++ * Ignored if DSP_MAPMIXEDELEMSIZE enabled ++ */ ++#define DSP_MAPELEMSIZE8 0x00000008 ++#define DSP_MAPELEMSIZE16 0x00000010 ++#define DSP_MAPELEMSIZE32 0x00000020 ++#define DSP_MAPELEMSIZE64 0x00000040 ++ ++#define DSP_MAPVMALLOCADDR 0x00000080 ++ ++#define DSP_MAPDONOTLOCK 0x00000100 ++ ++ ++#define GEM_CACHE_LINE_SIZE 128 ++#define GEM_L1P_PREFETCH_SIZE 128 ++ ++#endif /* DBDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbg.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbg.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,110 @@ ++/* ++ * dbg.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dbg.h ======== ++ * Purpose: ++ * Provide debugging services for 'Bridge Mini Drivers. ++ * ++ * Public Functions: ++ * DBG_Exit ++ * DBG_Init ++ * DBG_Printf ++ * DBG_Trace ++ * ++ * Notes: ++ * WMD's must not call DBG_Init or DBG_Exit. ++ * ++ *! Revision History: ++ *! ================ ++ *! 03-Feb-2000 rr: DBG Levels redefined. ++ *! 29-Oct-1999 kc: Cleaned up for code review. ++ *! 10-Oct-1997 cr: Added DBG_Printf service. ++ *! 29-May-1996 gp: Removed WCD_ prefix. ++ *! 15-May-1996 gp: Created. ++ */ ++ ++#ifndef DBG_ ++#define DBG_ ++#include ++#include ++ ++/* Levels of trace debug messages: */ ++#define DBG_ENTER (u8)(0x01) /* Function entry point. */ ++#define DBG_LEVEL1 (u8)(0x02) /* Display debugging state/varibles */ ++#define DBG_LEVEL2 (u8)(0x04) /* Display debugging state/varibles */ ++#define DBG_LEVEL3 (u8)(0x08) /* Display debugging state/varibles */ ++#define DBG_LEVEL4 (u8)(0x10) /* Display debugging state/varibles */ ++#define DBG_LEVEL5 (u8)(0x20) /* Module Init, Exit */ ++#define DBG_LEVEL6 (u8)(0x40) /* Warn SERVICES Failures */ ++#define DBG_LEVEL7 (u8)(0x80) /* Warn Critical Errors */ ++ ++#if (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE ++ ++/* ++ * ======== DBG_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * DBG initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void DBG_Exit(void); ++ ++/* ++ * ======== DBG_Init ======== ++ * Purpose: ++ * Initializes private state of DBG module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ */ ++ extern bool DBG_Init(void); ++ ++/* ++ * ======== DBG_Trace ======== ++ * Purpose: ++ * Output a trace message to the debugger, if the given trace level ++ * is unmasked. ++ * Parameters: ++ * bLevel: Trace level. ++ * pstrFormat: sprintf-style format string. ++ * ...: Arguments for format string. ++ * Returns: ++ * DSP_SOK: Success, or trace level masked. ++ * DSP_EFAIL: On Error. ++ * Requires: ++ * DBG initialized. ++ * Ensures: ++ * Debug message is printed to debugger output window, if trace level ++ * is unmasked. ++ */ ++ extern DSP_STATUS DBG_Trace(IN u8 bLevel, IN char *pstrFormat, ...); ++#else ++ ++#define DBG_Exit(void) do {} while (0) ++#define DBG_Init(void) true ++#define DBG_Trace(bLevel, pstrFormat, args...) do {} while (0) ++ ++#endif /* (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE */ ++ ++#endif /* DBG_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbldefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbldefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbldefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbldefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,155 @@ ++/* ++ * dbldefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbldefs.h ======== ++ * ++ *! Revision History ++ *! ================ ++ *! 19-Mar-2002 jeh Added DBL_Fxns type (to make it easier to switch ++ *! between different loaders). ++ *! 28-Sep-2001 jeh Created from zl.h. ++ */ ++#ifndef DBLDEFS_ ++#define DBLDEFS_ ++ ++/* ++ * Bit masks for DBL_Flags. ++ */ ++#define DBL_NOLOAD 0x0 /* Don't load symbols, code, or data */ ++#define DBL_SYMB 0x1 /* load symbols */ ++#define DBL_CODE 0x2 /* load code */ ++#define DBL_DATA 0x4 /* load data */ ++#define DBL_DYNAMIC 0x8 /* dynamic load */ ++#define DBL_BSS 0x20 /* Unitialized section */ ++ ++#define DBL_MAXPATHLENGTH 255 ++ ++ ++ ++/* ++ * ======== DBL_Flags ======== ++ * Specifies whether to load code, data, or symbols ++ */ ++typedef s32 DBL_Flags; ++ ++/* ++ * ======== DBL_SectInfo ======== ++ * For collecting info on overlay sections ++ */ ++struct DBL_SectInfo { ++ const char *name; /* name of section */ ++ u32 runAddr; /* run address of section */ ++ u32 loadAddr; /* load address of section */ ++ u32 size; /* size of section (target MAUs) */ ++ DBL_Flags type; /* Code, data, or BSS */ ++} ; ++ ++/* ++ * ======== DBL_Symbol ======== ++ * (Needed for dynamic load library) ++ */ ++struct DBL_Symbol { ++ u32 value; ++}; ++ ++/* ++ * ======== DBL_AllocFxn ======== ++ * Allocate memory function. Allocate or reserve (if reserved == TRUE) ++ * "size" bytes of memory from segment "space" and return the address in ++ * *dspAddr (or starting at *dspAddr if reserve == TRUE). Returns 0 on ++ * success, or an error code on failure. ++ */ ++typedef s32(*DBL_AllocFxn) (void *hdl, s32 space, u32 size, u32 align, ++ u32 *dspAddr, s32 segId, s32 req, bool reserved); ++ ++ ++ ++/* ++ * ======== DBL_FreeFxn ======== ++ * Free memory function. Free, or unreserve (if reserved == TRUE) "size" ++ * bytes of memory from segment "space" ++ */ ++typedef bool(*DBL_FreeFxn) (void *hdl, u32 addr, s32 space, u32 size, ++ bool reserved); ++ ++/* ++ * ======== DBL_LogWriteFxn ======== ++ * Function to call when writing data from a section, to log the info. ++ * Can be NULL if no logging is required. ++ */ ++typedef DSP_STATUS(*DBL_LogWriteFxn) (void *handle, struct DBL_SectInfo *sect, ++ u32 addr, u32 nBytes); ++ ++ ++/* ++ * ======== DBL_SymLookup ======== ++ * Symbol lookup function - Find the symbol name and return its value. ++ * ++ * Parameters: ++ * handle - Opaque handle ++ * pArg - Opaque argument. ++ * name - Name of symbol to lookup. ++ * sym - Location to store address of symbol structure. ++ * ++ * Returns: ++ * TRUE: Success (symbol was found). ++ * FALSE: Failed to find symbol. ++ */ ++typedef bool(*DBL_SymLookup) (void *handle, void *pArg, void *rmmHandle, ++ const char *name, struct DBL_Symbol **sym); ++ ++ ++/* ++ * ======== DBL_WriteFxn ======== ++ * Write memory function. Write "n" HOST bytes of memory to segment "mtype" ++ * starting at address "dspAddr" from the buffer "buf". The buffer is ++ * formatted as an array of words appropriate for the DSP. ++ */ ++typedef s32(*DBL_WriteFxn) (void *hdl, u32 dspAddr, void *buf, ++ u32 n, s32 mtype); ++ ++/* ++ * ======== DBL_Attrs ======== ++ */ ++struct DBL_Attrs { ++ DBL_AllocFxn alloc; ++ DBL_FreeFxn free; ++ void *rmmHandle; /* Handle to pass to alloc, free functions */ ++ DBL_WriteFxn write; ++ void *wHandle; /* Handle to pass to write, cinit function */ ++ ++ DBL_LogWriteFxn logWrite; ++ void *logWriteHandle; ++ ++ /* Symbol matching function and handle to pass to it */ ++ DBL_SymLookup symLookup; ++ void *symHandle; ++ void *symArg; ++ ++ /* ++ * These file manipulation functions should be compatible with the ++ * "C" run time library functions of the same name. ++ */ ++ s32(*fread) (void *, size_t, size_t, void *); ++ s32(*fseek) (void *, long, int); ++ s32(*ftell) (void *); ++ s32(*fclose) (void *); ++ void *(*fopen) (const char *, const char *); ++} ; ++ ++#endif /* DBLDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbl.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbl.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbl.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbl.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,354 @@ ++/* ++ * dbl.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbl.h ======== ++ * ++ *! Revision History ++ *! ================ ++ *! 19-Mar-2002 jeh Pass DBL_Symbol pointer to DBL_getAddr, DBL_getCAddr ++ *! to accomodate dynamic loader library. ++ *! 20-Nov-2001 jeh Removed DBL_loadArgs(). ++ *! 24-Sep-2001 jeh Code review changes. ++ *! 07-Sep-2001 jeh Added DBL_LoadSect(), DBL_UnloadSect(). ++ *! 05-Jun-2001 jeh Created based on zl.h. ++ */ ++ ++#ifndef DBL_ ++#define DBL_ ++ ++#include ++#include ++ ++/* ++ * ======== DBL_close ======== ++ * Close library opened with DBL_open. ++ * Parameters: ++ * lib - Handle returned from DBL_open(). ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * Ensures: ++ */ ++ extern void DBL_close(struct DBL_LibraryObj *lib); ++ ++/* ++ * ======== DBL_create ======== ++ * Create a target object by specifying the alloc, free, and write ++ * functions for the target. ++ * Parameters: ++ * pTarget - Location to store target handle on output. ++ * pAttrs - Attributes. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failed. ++ * Requires: ++ * DBL initialized. ++ * pAttrs != NULL. ++ * pTarget != NULL; ++ * Ensures: ++ * Success: *pTarget != NULL. ++ * Failure: *pTarget == NULL. ++ */ ++ extern DSP_STATUS DBL_create(struct DBL_TargetObj **pTarget, ++ struct DBL_Attrs *pAttrs); ++ ++/* ++ * ======== DBL_delete ======== ++ * Delete target object and free resources for any loaded libraries. ++ * Parameters: ++ * target - Handle returned from DBL_Create(). ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * Ensures: ++ */ ++ extern void DBL_delete(struct DBL_TargetObj *target); ++ ++/* ++ * ======== DBL_exit ======== ++ * Discontinue use of DBL module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * cRefs > 0. ++ * Ensures: ++ * cRefs >= 0. ++ */ ++ extern void DBL_exit(void); ++ ++/* ++ * ======== DBL_getAddr ======== ++ * Get address of name in the specified library. ++ * Parameters: ++ * lib - Handle returned from DBL_open(). ++ * name - Name of symbol ++ * ppSym - Location to store symbol address on output. ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Symbol not found. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * name != NULL. ++ * pAddr != NULL. ++ * Ensures: ++ */ ++ extern bool DBL_getAddr(struct DBL_LibraryObj *lib, char *name, ++ struct DBL_Symbol **ppSym); ++ ++/* ++ * ======== DBL_getAttrs ======== ++ * Retrieve the attributes of the target. ++ * Parameters: ++ * target - Handle returned from DBL_Create(). ++ * pAttrs - Location to store attributes on output. ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * pAttrs != NULL. ++ * Ensures: ++ */ ++ extern void DBL_getAttrs(struct DBL_TargetObj *target, ++ struct DBL_Attrs *pAttrs); ++ ++/* ++ * ======== DBL_getCAddr ======== ++ * Get address of "C" name in the specified library. ++ * Parameters: ++ * lib - Handle returned from DBL_open(). ++ * name - Name of symbol ++ * ppSym - Location to store symbol address on output. ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Symbol not found. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * name != NULL. ++ * pAddr != NULL. ++ * Ensures: ++ */ ++ extern bool DBL_getCAddr(struct DBL_LibraryObj *lib, char *name, ++ struct DBL_Symbol **ppSym); ++ ++/* ++ * ======== DBL_getEntry ======== ++ * Get program entry point. ++ * ++ * Parameters: ++ * lib - Handle returned from DBL_open(). ++ * pEntry - Location to store entry address on output. ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Failure. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * pEntry != NULL. ++ * Ensures: ++ */ ++ extern bool DBL_getEntry(struct DBL_LibraryObj *lib, u32 *pEntry); ++ ++/* ++ * ======== DBL_getSect ======== ++ * Get address and size of a named section. ++ * Parameters: ++ * lib - Library handle returned from DBL_open(). ++ * name - Name of section. ++ * pAddr - Location to store section address on output. ++ * pSize - Location to store section size on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Section not found. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * name != NULL. ++ * pAddr != NULL; ++ * pSize != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DBL_getSect(struct DBL_LibraryObj *lib, char *name, ++ u32 *pAddr, u32 *pSize); ++ ++/* ++ * ======== DBL_init ======== ++ * Initialize DBL module. ++ * Parameters: ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Failure. ++ * Requires: ++ * cRefs >= 0. ++ * Ensures: ++ * Success: cRefs > 0. ++ * Failure: cRefs >= 0. ++ */ ++ extern bool DBL_init(void); ++ ++/* ++ * ======== DBL_load ======== ++ * Add symbols/code/data defined in file to that already present on ++ * the target. ++ * ++ * Parameters: ++ * lib - Library handle returned from DBL_open(). ++ * flags - Specifies whether loading code, data, and/or symbols. ++ * attrs - May contain write, alloc, and free functions. ++ * pulEntry - Location to store program entry on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFREAD: File read failed. ++ * DSP_EFWRITE: Write to target failed. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * pEntry != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DBL_load(struct DBL_LibraryObj *lib, DBL_Flags flags, ++ struct DBL_Attrs *attrs, u32 *pEntry); ++ ++/* ++ * ======== DBL_loadSect ======== ++ * Load a named section from an library (for overlay support). ++ * Parameters: ++ * lib - Handle returned from DBL_open(). ++ * sectName - Name of section to load. ++ * attrs - Contains write function and handle to pass to it. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Section not found. ++ * DSP_EFWRITE: Write function failed. ++ * Requires: ++ * Valid lib. ++ * sectName != NULL. ++ * attrs != NULL. ++ * attrs->write != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DBL_loadSect(struct DBL_LibraryObj *lib, ++ char *sectName, ++ struct DBL_Attrs *attrs); ++ ++/* ++ * ======== DBL_open ======== ++ * DBL_open() returns a library handle that can be used to load/unload ++ * the symbols/code/data via DBL_load()/DBL_unload(). ++ * Parameters: ++ * target - Handle returned from DBL_create(). ++ * file - Name of file to open. ++ * flags - Specifies whether to load symbols now. ++ * pLib - Location to store library handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * DSP_EFOPEN: File open failure. ++ * DSP_EFREAD: File read failure. ++ * DSP_ECORRUPTFILE: Unable to determine target type. ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * file != NULL. ++ * pLib != NULL. ++ * struct DBL_Attrs fopen function non-NULL. ++ * Ensures: ++ * Success: Valid *pLib. ++ * Failure: *pLib == NULL. ++ */ ++ extern DSP_STATUS DBL_open(struct DBL_TargetObj *target, char *file, ++ DBL_Flags flags, ++ struct DBL_LibraryObj **pLib); ++ ++/* ++ * ======== DBL_readSect ======== ++ * Read COFF section into a character buffer. ++ * Parameters: ++ * lib - Library handle returned from DBL_open(). ++ * name - Name of section. ++ * pBuf - Buffer to write section contents into. ++ * size - Buffer size ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Named section does not exists. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * name != NULL. ++ * pBuf != NULL. ++ * size != 0. ++ * Ensures: ++ */ ++ extern DSP_STATUS DBL_readSect(struct DBL_LibraryObj *lib, char *name, ++ char *pBuf, u32 size); ++ ++/* ++ * ======== DBL_setAttrs ======== ++ * Set the attributes of the target. ++ * Parameters: ++ * target - Handle returned from DBL_create(). ++ * pAttrs - New attributes. ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * pAttrs != NULL. ++ * Ensures: ++ */ ++ extern void DBL_setAttrs(struct DBL_TargetObj *target, ++ struct DBL_Attrs *pAttrs); ++ ++/* ++ * ======== DBL_unload ======== ++ * Remove the symbols/code/data corresponding to the library lib. ++ * Parameters: ++ * lib - Handle returned from DBL_open(). ++ * attrs - Contains free() function and handle to pass to it. ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * Ensures: ++ */ ++ extern void DBL_unload(struct DBL_LibraryObj *lib, ++ struct DBL_Attrs *attrs); ++ ++/* ++ * ======== DBL_unloadSect ======== ++ * Unload a named section from an library (for overlay support). ++ * Parameters: ++ * lib - Handle returned from DBL_open(). ++ * sectName - Name of section to load. ++ * attrs - Contains free() function and handle to pass to it. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Named section not found. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * sectName != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DBL_unloadSect(struct DBL_LibraryObj *lib, ++ char *sectName, ++ struct DBL_Attrs *attrs); ++ ++#endif /* DBL_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dblldefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dblldefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dblldefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dblldefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,509 @@ ++/* ++ * dblldefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dblldefs.h ======== ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Apr-2003 map Consolidated DBL into DBLL name ++ *! 19-Mar-2002 jeh Added DBL_Fxns type (to make it easier to switch ++ *! between different loaders). ++ *! 28-Sep-2001 jeh Created from zl.h. ++ */ ++#ifndef DBLLDEFS_ ++#define DBLLDEFS_ ++ ++/* ++ * Bit masks for DBL_Flags. ++ */ ++#define DBLL_NOLOAD 0x0 /* Don't load symbols, code, or data */ ++#define DBLL_SYMB 0x1 /* load symbols */ ++#define DBLL_CODE 0x2 /* load code */ ++#define DBLL_DATA 0x4 /* load data */ ++#define DBLL_DYNAMIC 0x8 /* dynamic load */ ++#define DBLL_BSS 0x20 /* Unitialized section */ ++ ++#define DBLL_MAXPATHLENGTH 255 ++ ++ ++/* ++ * ======== DBLL_Target ======== ++ * ++ */ ++struct DBLL_TarObj; ++ ++/* ++ * ======== DBLL_Flags ======== ++ * Specifies whether to load code, data, or symbols ++ */ ++typedef s32 DBLL_Flags; ++ ++/* ++ * ======== DBLL_Library ======== ++ * ++ */ ++struct DBLL_LibraryObj; ++ ++/* ++ * ======== DBLL_SectInfo ======== ++ * For collecting info on overlay sections ++ */ ++struct DBLL_SectInfo { ++ const char *name; /* name of section */ ++ u32 runAddr; /* run address of section */ ++ u32 loadAddr; /* load address of section */ ++ u32 size; /* size of section (target MAUs) */ ++ DBLL_Flags type; /* Code, data, or BSS */ ++} ; ++ ++/* ++ * ======== DBLL_Symbol ======== ++ * (Needed for dynamic load library) ++ */ ++struct DBLL_Symbol { ++ u32 value; ++}; ++ ++/* ++ * ======== DBLL_AllocFxn ======== ++ * Allocate memory function. Allocate or reserve (if reserved == TRUE) ++ * "size" bytes of memory from segment "space" and return the address in ++ * *dspAddr (or starting at *dspAddr if reserve == TRUE). Returns 0 on ++ * success, or an error code on failure. ++ */ ++typedef s32(*DBLL_AllocFxn) (void *hdl, s32 space, u32 size, u32 align, ++ u32 *dspAddr, s32 segId, s32 req, ++ bool reserved); ++ ++/* ++ * ======== DBLL_CloseFxn ======== ++ */ ++typedef s32(*DBLL_FCloseFxn) (void *); ++ ++/* ++ * ======== DBLL_FreeFxn ======== ++ * Free memory function. Free, or unreserve (if reserved == TRUE) "size" ++ * bytes of memory from segment "space" ++ */ ++typedef bool(*DBLL_FreeFxn) (void *hdl, u32 addr, s32 space, u32 size, ++ bool reserved); ++ ++/* ++ * ======== DBLL_FOpenFxn ======== ++ */ ++typedef void *(*DBLL_FOpenFxn) (const char *, const char *); ++ ++/* ++ * ======== DBLL_LogWriteFxn ======== ++ * Function to call when writing data from a section, to log the info. ++ * Can be NULL if no logging is required. ++ */ ++typedef DSP_STATUS(*DBLL_LogWriteFxn)(void *handle, struct DBLL_SectInfo *sect, ++ u32 addr, u32 nBytes); ++ ++/* ++ * ======== DBLL_ReadFxn ======== ++ */ ++typedef s32(*DBLL_ReadFxn) (void *, size_t, size_t, void *); ++ ++/* ++ * ======== DBLL_SeekFxn ======== ++ */ ++typedef s32(*DBLL_SeekFxn) (void *, long, int); ++ ++/* ++ * ======== DBLL_SymLookup ======== ++ * Symbol lookup function - Find the symbol name and return its value. ++ * ++ * Parameters: ++ * handle - Opaque handle ++ * pArg - Opaque argument. ++ * name - Name of symbol to lookup. ++ * sym - Location to store address of symbol structure. ++ * ++ * Returns: ++ * TRUE: Success (symbol was found). ++ * FALSE: Failed to find symbol. ++ */ ++typedef bool(*DBLL_SymLookup) (void *handle, void *pArg, void *rmmHandle, ++ const char *name, struct DBLL_Symbol **sym); ++ ++/* ++ * ======== DBLL_TellFxn ======== ++ */ ++typedef s32(*DBLL_TellFxn) (void *); ++ ++/* ++ * ======== DBLL_WriteFxn ======== ++ * Write memory function. Write "n" HOST bytes of memory to segment "mtype" ++ * starting at address "dspAddr" from the buffer "buf". The buffer is ++ * formatted as an array of words appropriate for the DSP. ++ */ ++typedef s32(*DBLL_WriteFxn) (void *hdl, u32 dspAddr, void *buf, ++ u32 n, s32 mtype); ++ ++/* ++ * ======== DBLL_Attrs ======== ++ */ ++struct DBLL_Attrs { ++ DBLL_AllocFxn alloc; ++ DBLL_FreeFxn free; ++ void *rmmHandle; /* Handle to pass to alloc, free functions */ ++ DBLL_WriteFxn write; ++ void *wHandle; /* Handle to pass to write, cinit function */ ++ bool baseImage; ++ DBLL_LogWriteFxn logWrite; ++ void *logWriteHandle; ++ ++ /* Symbol matching function and handle to pass to it */ ++ DBLL_SymLookup symLookup; ++ void *symHandle; ++ void *symArg; ++ ++ /* ++ * These file manipulation functions should be compatible with the ++ * "C" run time library functions of the same name. ++ */ ++ s32(*fread) (void *, size_t, size_t, void *); ++ s32(*fseek) (void *, long, int); ++ s32(*ftell) (void *); ++ s32(*fclose) (void *); ++ void *(*fopen) (const char *, const char *); ++} ; ++ ++/* ++ * ======== DBLL_close ======== ++ * Close library opened with DBLL_open. ++ * Parameters: ++ * lib - Handle returned from DBLL_open(). ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * Ensures: ++ */ ++typedef void(*DBLL_CloseFxn) (struct DBLL_LibraryObj *library); ++ ++/* ++ * ======== DBLL_create ======== ++ * Create a target object, specifying the alloc, free, and write functions. ++ * Parameters: ++ * pTarget - Location to store target handle on output. ++ * pAttrs - Attributes. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failed. ++ * Requires: ++ * DBL initialized. ++ * pAttrs != NULL. ++ * pTarget != NULL; ++ * Ensures: ++ * Success: *pTarget != NULL. ++ * Failure: *pTarget == NULL. ++ */ ++typedef DSP_STATUS(*DBLL_CreateFxn)(struct DBLL_TarObj **pTarget, ++ struct DBLL_Attrs *attrs); ++ ++/* ++ * ======== DBLL_delete ======== ++ * Delete target object and free resources for any loaded libraries. ++ * Parameters: ++ * target - Handle returned from DBLL_Create(). ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * Ensures: ++ */ ++typedef void(*DBLL_DeleteFxn) (struct DBLL_TarObj *target); ++ ++/* ++ * ======== DBLL_exit ======== ++ * Discontinue use of DBL module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * cRefs > 0. ++ * Ensures: ++ * cRefs >= 0. ++ */ ++typedef void(*DBLL_ExitFxn) (void); ++ ++/* ++ * ======== DBLL_getAddr ======== ++ * Get address of name in the specified library. ++ * Parameters: ++ * lib - Handle returned from DBLL_open(). ++ * name - Name of symbol ++ * ppSym - Location to store symbol address on output. ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Symbol not found. ++ * Requires: ++ * DBL initialized. ++ * Valid library. ++ * name != NULL. ++ * ppSym != NULL. ++ * Ensures: ++ */ ++typedef bool(*DBLL_GetAddrFxn) (struct DBLL_LibraryObj *lib, char *name, ++ struct DBLL_Symbol **ppSym); ++ ++/* ++ * ======== DBLL_getAttrs ======== ++ * Retrieve the attributes of the target. ++ * Parameters: ++ * target - Handle returned from DBLL_Create(). ++ * pAttrs - Location to store attributes on output. ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * pAttrs != NULL. ++ * Ensures: ++ */ ++typedef void(*DBLL_GetAttrsFxn) (struct DBLL_TarObj *target, ++ struct DBLL_Attrs *attrs); ++ ++/* ++ * ======== DBLL_getCAddr ======== ++ * Get address of "C" name on the specified library. ++ * Parameters: ++ * lib - Handle returned from DBLL_open(). ++ * name - Name of symbol ++ * ppSym - Location to store symbol address on output. ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Symbol not found. ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * name != NULL. ++ * ppSym != NULL. ++ * Ensures: ++ */ ++typedef bool(*DBLL_GetCAddrFxn) (struct DBLL_LibraryObj *lib, char *name, ++ struct DBLL_Symbol **ppSym); ++ ++/* ++ * ======== DBLL_getSect ======== ++ * Get address and size of a named section. ++ * Parameters: ++ * lib - Library handle returned from DBLL_open(). ++ * name - Name of section. ++ * pAddr - Location to store section address on output. ++ * pSize - Location to store section size on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Section not found. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * name != NULL. ++ * pAddr != NULL; ++ * pSize != NULL. ++ * Ensures: ++ */ ++typedef DSP_STATUS(*DBLL_GetSectFxn) (struct DBLL_LibraryObj *lib, char *name, ++ u32 *addr, u32 *size); ++ ++/* ++ * ======== DBLL_init ======== ++ * Initialize DBL module. ++ * Parameters: ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Failure. ++ * Requires: ++ * cRefs >= 0. ++ * Ensures: ++ * Success: cRefs > 0. ++ * Failure: cRefs >= 0. ++ */ ++typedef bool(*DBLL_InitFxn) (void); ++ ++/* ++ * ======== DBLL_load ======== ++ * Load library onto the target. ++ * ++ * Parameters: ++ * lib - Library handle returned from DBLL_open(). ++ * flags - Load code, data and/or symbols. ++ * attrs - May contain alloc, free, and write function. ++ * pulEntry - Location to store program entry on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFREAD: File read failed. ++ * DSP_EFWRITE: Write to target failed. ++ * DSP_EDYNLOAD: Failure in dynamic loader library. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * pEntry != NULL. ++ * Ensures: ++ */ ++typedef DSP_STATUS(*DBLL_LoadFxn) (struct DBLL_LibraryObj *lib, ++ DBLL_Flags flags, ++ struct DBLL_Attrs *attrs, u32 *entry); ++ ++/* ++ * ======== DBLL_loadSect ======== ++ * Load a named section from an library (for overlay support). ++ * Parameters: ++ * lib - Handle returned from DBLL_open(). ++ * sectName - Name of section to load. ++ * attrs - Contains write function and handle to pass to it. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Section not found. ++ * DSP_EFWRITE: Write function failed. ++ * DSP_ENOTIMPL: Function not implemented. ++ * Requires: ++ * Valid lib. ++ * sectName != NULL. ++ * attrs != NULL. ++ * attrs->write != NULL. ++ * Ensures: ++ */ ++typedef DSP_STATUS(*DBLL_LoadSectFxn) (struct DBLL_LibraryObj *lib, ++ char *pszSectName, ++ struct DBLL_Attrs *attrs); ++ ++/* ++ * ======== DBLL_open ======== ++ * DBLL_open() returns a library handle that can be used to load/unload ++ * the symbols/code/data via DBLL_load()/DBLL_unload(). ++ * Parameters: ++ * target - Handle returned from DBLL_create(). ++ * file - Name of file to open. ++ * flags - If flags & DBLL_SYMB, load symbols. ++ * pLib - Location to store library handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * DSP_EFOPEN: File open failure. ++ * DSP_EFREAD: File read failure. ++ * DSP_ECORRUPTFILE: Unable to determine target type. ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * file != NULL. ++ * pLib != NULL. ++ * DBLL_Attrs fopen function non-NULL. ++ * Ensures: ++ * Success: Valid *pLib. ++ * Failure: *pLib == NULL. ++ */ ++typedef DSP_STATUS(*DBLL_OpenFxn) (struct DBLL_TarObj *target, char *file, ++ DBLL_Flags flags, ++ struct DBLL_LibraryObj **pLib); ++ ++/* ++ * ======== DBLL_readSect ======== ++ * Read COFF section into a character buffer. ++ * Parameters: ++ * lib - Library handle returned from DBLL_open(). ++ * name - Name of section. ++ * pBuf - Buffer to write section contents into. ++ * size - Buffer size ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Named section does not exists. ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * name != NULL. ++ * pBuf != NULL. ++ * size != 0. ++ * Ensures: ++ */ ++typedef DSP_STATUS(*DBLL_ReadSectFxn) (struct DBLL_LibraryObj *lib, char *name, ++ char *content, u32 uContentSize); ++ ++/* ++ * ======== DBLL_setAttrs ======== ++ * Set the attributes of the target. ++ * Parameters: ++ * target - Handle returned from DBLL_create(). ++ * pAttrs - New attributes. ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid target. ++ * pAttrs != NULL. ++ * Ensures: ++ */ ++typedef void(*DBLL_SetAttrsFxn)(struct DBLL_TarObj *target, ++ struct DBLL_Attrs *attrs); ++ ++/* ++ * ======== DBLL_unload ======== ++ * Unload library loaded with DBLL_load(). ++ * Parameters: ++ * lib - Handle returned from DBLL_open(). ++ * attrs - Contains free() function and handle to pass to it. ++ * Returns: ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * Ensures: ++ */ ++typedef void(*DBLL_UnloadFxn) (struct DBLL_LibraryObj *library, ++ struct DBLL_Attrs *attrs); ++ ++/* ++ * ======== DBLL_unloadSect ======== ++ * Unload a named section from an library (for overlay support). ++ * Parameters: ++ * lib - Handle returned from DBLL_open(). ++ * sectName - Name of section to load. ++ * attrs - Contains free() function and handle to pass to it. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ENOSECT: Named section not found. ++ * DSP_ENOTIMPL ++ * Requires: ++ * DBL initialized. ++ * Valid lib. ++ * sectName != NULL. ++ * Ensures: ++ */ ++typedef DSP_STATUS(*DBLL_UnloadSectFxn) (struct DBLL_LibraryObj *lib, ++ char *pszSectName, ++ struct DBLL_Attrs *attrs); ++ ++struct DBLL_Fxns { ++ DBLL_CloseFxn closeFxn; ++ DBLL_CreateFxn createFxn; ++ DBLL_DeleteFxn deleteFxn; ++ DBLL_ExitFxn exitFxn; ++ DBLL_GetAttrsFxn getAttrsFxn; ++ DBLL_GetAddrFxn getAddrFxn; ++ DBLL_GetCAddrFxn getCAddrFxn; ++ DBLL_GetSectFxn getSectFxn; ++ DBLL_InitFxn initFxn; ++ DBLL_LoadFxn loadFxn; ++ DBLL_LoadSectFxn loadSectFxn; ++ DBLL_OpenFxn openFxn; ++ DBLL_ReadSectFxn readSectFxn; ++ DBLL_SetAttrsFxn setAttrsFxn; ++ DBLL_UnloadFxn unloadFxn; ++ DBLL_UnloadSectFxn unloadSectFxn; ++} ; ++ ++#endif /* DBLDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbll.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbll.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbll.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbll.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,70 @@ ++/* ++ * dbll.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbll.h ======== ++ * DSP/BIOS Bridge Dynamic load library module interface. Function header ++ * comments are in the file dblldefs.h. ++ * ++ *! Revision History ++ *! ================ ++ *! 31-Jul-2002 jeh Removed function comments (now in dblldefs.h). ++ *! 17-Apr-2002 jeh Created based on zl.h. ++ */ ++ ++#ifndef DBLL_ ++#define DBLL_ ++ ++#include ++#include ++ ++ extern void DBLL_close(struct DBLL_LibraryObj *lib); ++ extern DSP_STATUS DBLL_create(struct DBLL_TarObj **pTarget, ++ struct DBLL_Attrs *pAttrs); ++ extern void DBLL_delete(struct DBLL_TarObj *target); ++ extern void DBLL_exit(void); ++ extern bool DBLL_getAddr(struct DBLL_LibraryObj *lib, char *name, ++ struct DBLL_Symbol **ppSym); ++ extern void DBLL_getAttrs(struct DBLL_TarObj *target, ++ struct DBLL_Attrs *pAttrs); ++ extern bool DBLL_getCAddr(struct DBLL_LibraryObj *lib, char *name, ++ struct DBLL_Symbol **ppSym); ++ extern DSP_STATUS DBLL_getSect(struct DBLL_LibraryObj *lib, char *name, ++ u32 *pAddr, u32 *pSize); ++ extern bool DBLL_init(void); ++ extern DSP_STATUS DBLL_load(struct DBLL_LibraryObj *lib, ++ DBLL_Flags flags, ++ struct DBLL_Attrs *attrs, u32 *pEntry); ++ extern DSP_STATUS DBLL_loadSect(struct DBLL_LibraryObj *lib, ++ char *sectName, ++ struct DBLL_Attrs *attrs); ++ extern DSP_STATUS DBLL_open(struct DBLL_TarObj *target, char *file, ++ DBLL_Flags flags, ++ struct DBLL_LibraryObj **pLib); ++ extern DSP_STATUS DBLL_readSect(struct DBLL_LibraryObj *lib, ++ char *name, ++ char *pBuf, u32 size); ++ extern void DBLL_setAttrs(struct DBLL_TarObj *target, ++ struct DBLL_Attrs *pAttrs); ++ extern void DBLL_unload(struct DBLL_LibraryObj *lib, ++ struct DBLL_Attrs *attrs); ++ extern DSP_STATUS DBLL_unloadSect(struct DBLL_LibraryObj *lib, ++ char *sectName, ++ struct DBLL_Attrs *attrs); ++ ++#endif /* DBLL_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbof.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbof.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbof.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbof.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,117 @@ ++/* ++ * dbof.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbof.h ======== ++ * Description: ++ * Defines and typedefs for DSP/BIOS Bridge Object File Format (DBOF). ++ * ++ *! Revision History ++ *! ================ ++ *! 12-Jul-2002 jeh Added defines for DBOF_SectHdr page. ++ *! 12-Oct-2001 jeh Converted to std.h format. ++ *! 07-Sep-2001 jeh Added overlay support. ++ *! 06-Jul-2001 jeh Created. ++ */ ++ ++#ifndef DBOF_ ++#define DBOF_ ++ ++/* Enough to hold DCD section names: 32 digit ID + underscores */ ++#define DBOF_DCDSECTNAMELEN 40 ++ ++/* Values for DBOF_SectHdr page field. */ ++#define DBOF_PROGRAM 0 ++#define DBOF_DATA 1 ++#define DBOF_CINIT 2 ++ ++/* ++ * ======== DBOF_FileHdr ======== ++ */ ++ struct DBOF_FileHdr { ++ u32 magic; /* COFF magic number */ ++ u32 entry; /* Program entry point */ ++ u16 numSymbols; /* Number of bridge symbols */ ++ u16 numDCDSects; /* Number of DCD sections */ ++ u16 numSects; /* Number of sections to load */ ++ u16 numOvlySects; /* Number of overlay sections */ ++ u32 symOffset; /* Offset in file to symbols */ ++ u32 dcdSectOffset; /* Offset to DCD sections */ ++ u32 loadSectOffset; /* Offset to loadable sections */ ++ u32 ovlySectOffset; /* Offset to overlay data */ ++ u16 version; /* DBOF version number */ ++ u16 resvd; /* Reserved for future use */ ++ } ; ++ ++/* ++ * ======== DBOF_DCDSectHdr ======== ++ */ ++ struct DBOF_DCDSectHdr { ++ u32 size; /* Sect size (target MAUs) */ ++ char name[DBOF_DCDSECTNAMELEN]; /* DCD section name */ ++ } ; ++ ++/* ++ * ======== DBOF_OvlySectHdr ======== ++ */ ++ struct DBOF_OvlySectHdr { ++ u16 nameLen; /* Length of section name */ ++ u16 numCreateSects; /* # of sects loaded for create phase */ ++ u16 numDeleteSects; /* # of sects loaded for delete phase */ ++ u16 numExecuteSects; /* # of sects loaded for execute phase */ ++ ++ /* ++ * Number of sections where load/unload phase is not specified. ++ * These sections will be loaded when create phase sects are ++ * loaded, and unloaded when the delete phase is unloaded. ++ */ ++ u16 numOtherSects; ++ u16 resvd; /* Reserved for future use */ ++ }; ++ ++/* ++ * ======== DBOF_OvlySectData ======== ++ */ ++ struct DBOF_OvlySectData { ++ u32 loadAddr; /* Section load address */ ++ u32 runAddr; /* Section run address */ ++ u32 size; /* Section size (target MAUs) */ ++ u16 page; /* Memory page number */ ++ u16 resvd; /* Reserved */ ++ } ; ++ ++/* ++ * ======== DBOF_SectHdr ======== ++ */ ++ struct DBOF_SectHdr { ++ u32 addr; /* Section address */ ++ u32 size; /* Section size (target MAUs) */ ++ u16 page; /* Page number */ ++ u16 resvd; /* Reserved for future use */ ++ } ; ++ ++/* ++ * ======== DBOF_SymbolHdr ======== ++ */ ++ struct DBOF_SymbolHdr { ++ u32 value; /* Symbol value */ ++ u16 nameLen; /* Length of symbol name */ ++ u16 resvd; /* Reserved for future use */ ++ } ; ++ ++#endif /* DBOF_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbreg.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbreg.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbreg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbreg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,113 @@ ++/* ++ * dbreg.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbreg.h ======== ++ * Purpose: ++ * Registry keys for use in Linux. This is the clearinghouse for ++ * registry definitions, hopefully eliminating overlapping between ++ * modules. ++ * ++ *! Revision History: ++ *! ================ ++ *! 10-Apr-2003 vp: Added macro for subkey TCWORDSWAP. ++ *! 21-Mar-2003 sb: Added macro for subkey SHMSize ++ *! 27-Aug-2001 jeh Added WSXREG_LOADERFILENAME. ++ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. ++ *! 29-Nov-2000 rr: Added WSXREG_DSPTYPE_55 as 6. ++ *! 06-Sep-2000 jeh: Added WSXREG_CHNLOFFSET, WSXREG_NUMCHNLS, ++ *! WSXREG_CHNLBUFSIZE. ++ *! 26-Aug-2000 rr: MEMBASE expanded to 9 entries. ++ *! 26-Jul-2000 rr: Added WSXREG_DCDNAME for the DCD Dll name. It will ++ *! live under WSXREG_WINSPOXCONFIG. ++ *! 17-Jul-2000 rr: REG_MGR_OBJECT and REG_DRV_OBJECT defined. They ++ *! are stored in the Registrty under WSXREG_WINSPOXCONFIG ++ *! when they are created in DSP_Init. WSXREG_DEVOBJECT ++ *! and WSXREG_MGROBJECT defined. ++ *! 11-Dec-1999 ag: Renamed Isa to IsaBus due to conflict with ceddk.h. ++ *! 12-Nov-1999 rr: New Registry Defnitions. ++ *! 15-Oct-1999 rr: New entry for DevObject created. WSXREG_DEVOBJECT ++ *! under WSXREG_DDSPDRIVERPATH ++ *! 10-Nov-1997 cr: Added WSXREG_INFPATH, WSXREG_WINDEVICEPATH, ++ *! WSXREG_WINCURVERSION ++ *! 21-Oct-1997 cr: Added WSXREG_BUSTYPE. ++ *! 08-Sep-1997 cr: Added WSXREG_SERVICES, WSXREG_SERVICENAME and ++ *! WSXREG_CLASSINDEX. ++ *! 30-Aug-1997 cr: Added WSXREG_SOFTWAREPATHNT & WSXREG_WBCLASSGUID. ++ *! 24-Mar-1997 gp: Added MAXCHIPINFOSUBKEY def. ++ *! 18-Feb-1997 cr: Changed Version1.1 -> Version1.0 ++ *! 12-Feb-1997 cr: Changed WinSPOX -> WinBRIDGE. ++ *! 11-Dec-1996 gp: Added Perf key name in WinSPOX Config. ++ *! 22-Jul-1996 gp: Added Trace key name. ++ *! 30-May-1996 cr: Created. ++ */ ++ ++#ifndef DBREG_ ++#define DBREG_ 1 /* Defined as "1" so InstallShield programs compile. */ ++ ++#define REG_MGR_OBJECT 1 ++#define REG_DRV_OBJECT 2 ++/* general registry definitions */ ++#define MAXREGPATHLENGTH 255 /* Max registry path length. Also the ++ max registry value length. */ ++#define DSPTYPE_55 6 /* This is the DSP Chip type for 55 */ ++#define DSPTYPE_64 0x99 ++#define IVA_ARM7 0x97 /* This is the DSP Chip type for IVA/ARM7 */ ++ ++#define DSPPROCTYPE_C55 5510 ++#define DSPPROCTYPE_C64 6410 ++#define IVAPROCTYPE_ARM7 470 ++/* registry */ ++#define DEVNODESTRING "DevNode" /* u32 devnode */ ++#define CONFIG "Software\\TexasInstruments\\DirectDSP\\Config" ++#define DRVOBJECT "DrvObject" ++#define MGROBJECT "MgrObject" ++#define CLASS "Device" /* device class */ ++#define TRACE "Trace" /* GT Trace settings. */ ++#define PERFR "Perf" /* Enable perf bool. */ ++#define ROOT "Root" /* root dir */ ++ ++/* MiniDriver related definitions */ ++/* The following definitions are under "Drivers\\DirectDSP\\Device\\XXX " ++ * Where XXX is the device or board name */ ++ ++#define WMDFILENAME "MiniDriver" /* WMD entry name */ ++#define CHIPTYPE "ChipType" /* Chip type */ ++#define CHIPNUM "NumChips" /* Number of chips */ ++#define NUMPROCS "NumOfProcessors" /* Number of processors */ ++#define DEFEXEC "DefaultExecutable" /* Default executable */ ++#define AUTOSTART "AutoStart" /* Statically load flag */ ++#define IVAAUTOSTART "IvaAutoStart" /* Statically load flag */ ++#define BOARDNAME "BoardName" /* Name of the Board */ ++#define UNITNUMBER "UnitNumber" /* Unit # of the Board */ ++#define BUSTYPE "BusType" /* Bus type board is on */ ++#define BUSNUMBER "BusNumber" /* Bus number board is on */ ++#define CURRENTCONFIG "CurrentConfig" /* Current resources */ ++#define PCIVENDEVID "VendorDeviceId" /* The board's id */ ++#define INFPATH "InfPath" /* wmd's inf filename */ ++#define DEVOBJECT "DevObject" ++#define ZLFILENAME "ZLFileName" /* Name of ZL file */ ++#define WORDSIZE "WordSize" /* NumBytes in DSP Word */ ++#define SHMSIZE "SHMSize" /* Size of SHM reservd on MPU */ ++#define IVAEXTMEMSIZE "IVAEXTMEMSize" /* IVA External Memeory size */ ++#define TCWORDSWAP "TCWordSwap" /* Traffic Contoller Word Swap */ ++#define DSPRESOURCES "DspTMSResources" /* C55 DSP resurces on OMAP */ ++#define IVA1RESOURCES "ARM7IvaResources" /* ARM7 IVA resurces on OMAP */ ++#define PHYSMEMPOOLBASE "PhysicalMemBase" /* Physical mem passed to driver */ ++#define PHYSMEMPOOLSIZE "PhysicalMemSize" /* Physical mem passed to driver */ ++ ++#endif /* DBREG_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbtype.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbtype.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dbtype.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dbtype.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,103 @@ ++/* ++ * dbtype.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dbtype.h ======== ++ * Description: ++ * This header defines data types for DSP/BIOS Bridge APIs and device ++ * driver modules. It also defines the Hungarian ++ * prefix to use for each base type. ++ * ++ * ++ *! Revision History: ++ *! ================= ++ *! 23-Nov-2002 gp: Purpose -> Description in file header. ++ *! 13-Feb-2001 kc: Name changed from ddsptype.h dbtype.h. ++ *! 09-Oct-2000 jeh Added CHARACTER. ++ *! 11-Aug-2000 ag: Added 'typedef void void'. ++ *! 08-Apr-2000 ww: Cloned. ++ */ ++ ++#ifndef DBTYPE_ ++#define DBTYPE_ ++ ++/*============================================================================*/ ++/* Argument specification syntax */ ++/*============================================================================*/ ++ ++#ifndef IN ++#define IN /* Following parameter is for input. */ ++#endif ++ ++#ifndef OUT ++#define OUT /* Following parameter is for output. */ ++#endif ++ ++#ifndef OPTIONAL ++#define OPTIONAL /* Function may optionally use previous parameter. */ ++#endif ++ ++#ifndef CONST ++#define CONST const ++#endif ++ ++/*============================================================================*/ ++/* Boolean constants */ ++/*============================================================================*/ ++ ++#ifndef FALSE ++#define FALSE 0 ++#endif ++#ifndef TRUE ++#define TRUE 1 ++#endif ++ ++/*============================================================================*/ ++/* NULL (Definition is language specific) */ ++/*============================================================================*/ ++ ++#ifndef NULL ++#define NULL ((void *)0) /* Null pointer. */ ++#endif ++ ++/*============================================================================*/ ++/* NULL character (normally used for string termination) */ ++/*============================================================================*/ ++ ++#ifndef NULL_CHAR ++#define NULL_CHAR '\0' /* Null character. */ ++#endif ++ ++/*============================================================================*/ ++/* Basic Type definitions (with Prefixes for Hungarian notation) */ ++/*============================================================================*/ ++ ++#ifndef OMAPBRIDGE_TYPES ++#define OMAPBRIDGE_TYPES ++typedef volatile unsigned short REG_UWORD16; ++#endif ++ ++typedef void *HANDLE; /* h */ ++ ++#define TEXT(x) x ++ ++#define DLLIMPORT ++#define DLLEXPORT ++ ++/* Define DSPAPIDLL correctly in dspapi.h */ ++#define _DSPSYSDLL32_ ++ ++#endif /* DBTYPE_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/_dcd.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/_dcd.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/_dcd.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/_dcd.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,212 @@ ++/* ++ * _dcd.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _dcd.h ======== ++ * Description: ++ * Includes the wrapper functions called directly by the ++ * DeviceIOControl interface. ++ * ++ * Public Functions: ++ * WCD_CallDevIOCtl ++ * WCD_Init ++ * WCD_InitComplete2 ++ * WCD_Exit ++ * WRAP_* ++ * ++ * Notes: ++ * Compiled with CDECL calling convention. ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping feature ++ *! 30-Jan-2002 ag Renamed CMMWRAP_AllocBuf to CMMWRAP_CallocBuf. ++ *! 22-Nov-2000 kc: Added MGRWRAP_GetPerf_Data to acquire PERF stats. ++ *! 27-Oct-2000 jeh Added NODEWRAP_AllocMsgBuf, NODEWRAP_FreeMsgBuf. Removed ++ *! NODEWRAP_GetMessageStream. ++ *! 10-Oct-2000 ag: Added user CMM wrappers. ++ *! 04-Aug-2000 rr: MEMWRAP and UTIL_Wrap added. ++ *! 27-Jul-2000 rr: NODEWRAP, STRMWRAP added. ++ *! 27-Jun-2000 rr: MGRWRAP fxns added.IFDEF to build for PM or DSP/BIOS Bridge ++ *! 03-Dec-1999 rr: WCD_InitComplete2 enabled for BRD_AutoStart. ++ *! 09-Nov-1999 kc: Added MEMRY. ++ *! 02-Nov-1999 ag: Added CHNL. ++ *! 08-Oct-1999 rr: Utilwrap_Testdll fxn added ++ *! 24-Sep-1999 rr: header changed from _wcd.h to _dcd.h ++ *! 09-Sep-1997 gp: Created. ++ */ ++ ++#ifndef _WCD_ ++#define _WCD_ ++ ++#include ++ ++/* ++ * ======== WCD_CallDevIOCtl ======== ++ * Purpose: ++ * Call the (wrapper) function for the corresponding WCD IOCTL. ++ * Parameters: ++ * cmd: IOCTL id, base 0. ++ * args: Argument structure. ++ * pResult: ++ * Returns: ++ * DSP_SOK if command called; DSP_EINVALIDARG if command not in IOCTL ++ * table. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS WCD_CallDevIOCtl(unsigned int cmd, ++ union Trapped_Args *args, ++ u32 *pResult, void *pr_ctxt); ++ ++/* ++ * ======== WCD_Init ======== ++ * Purpose: ++ * Initialize WCD modules, and export WCD services to WMD's. ++ * This procedure is called when the class driver is loaded. ++ * Parameters: ++ * Returns: ++ * TRUE if success; FALSE otherwise. ++ * Requires: ++ * Ensures: ++ */ ++ extern bool WCD_Init(void); ++ ++/* ++ * ======== WCD_InitComplete2 ======== ++ * Purpose: ++ * Perform any required WCD, and WMD initialization which ++ * cannot not be performed in WCD_Init(void) or DEV_StartDevice() due ++ * to the fact that some services are not yet ++ * completely initialized. ++ * Parameters: ++ * Returns: ++ * DSP_SOK: Allow this device to load ++ * DSP_EFAIL: Failure. ++ * Requires: ++ * WCD initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS WCD_InitComplete2(void); ++ ++/* ++ * ======== WCD_Exit ======== ++ * Purpose: ++ * Exit all modules initialized in WCD_Init(void). ++ * This procedure is called when the class driver is unloaded. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * WCD_Init(void) was previously called. ++ * Ensures: ++ * Resources acquired in WCD_Init(void) are freed. ++ */ ++ extern void WCD_Exit(void); ++ ++/* MGR wrapper functions */ ++ extern u32 MGRWRAP_EnumNode_Info(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 MGRWRAP_EnumProc_Info(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 MGRWRAP_RegisterObject(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 MGRWRAP_UnregisterObject(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 MGRWRAP_WaitForBridgeEvents(union Trapped_Args *args, ++ void *pr_ctxt); ++ ++#ifndef RES_CLEANUP_DISABLE ++ extern u32 MGRWRAP_GetProcessResourcesInfo(union Trapped_Args *args, ++ void *pr_ctxt); ++#endif ++ ++ ++/* CPRC (Processor) wrapper Functions */ ++ extern u32 PROCWRAP_Attach(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_Ctrl(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_Detach(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_EnumNode_Info(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 PROCWRAP_EnumResources(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 PROCWRAP_GetState(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_GetTrace(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_Load(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_RegisterNotify(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 PROCWRAP_Start(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_ReserveMemory(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 PROCWRAP_UnReserveMemory(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 PROCWRAP_Map(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_UnMap(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_FlushMemory(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 PROCWRAP_Stop(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 PROCWRAP_InvalidateMemory(union Trapped_Args *args, ++ void *pr_ctxt); ++ ++/* NODE wrapper functions */ ++ extern u32 NODEWRAP_Allocate(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 NODEWRAP_AllocMsgBuf(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 NODEWRAP_ChangePriority(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 NODEWRAP_Connect(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 NODEWRAP_Create(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 NODEWRAP_Delete(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 NODEWRAP_FreeMsgBuf(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 NODEWRAP_GetAttr(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 NODEWRAP_GetMessage(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 NODEWRAP_Pause(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 NODEWRAP_PutMessage(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 NODEWRAP_RegisterNotify(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 NODEWRAP_Run(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 NODEWRAP_Terminate(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 NODEWRAP_GetUUIDProps(union Trapped_Args *args, ++ void *pr_ctxt); ++ ++/* STRM wrapper functions */ ++ extern u32 STRMWRAP_AllocateBuffer(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 STRMWRAP_Close(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 STRMWRAP_FreeBuffer(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 STRMWRAP_GetEventHandle(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 STRMWRAP_GetInfo(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 STRMWRAP_Idle(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 STRMWRAP_Issue(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 STRMWRAP_Open(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 STRMWRAP_Reclaim(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 STRMWRAP_RegisterNotify(union Trapped_Args *args, ++ void *pr_ctxt); ++ extern u32 STRMWRAP_Select(union Trapped_Args *args, void *pr_ctxt); ++ ++ extern u32 CMMWRAP_CallocBuf(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 CMMWRAP_FreeBuf(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 CMMWRAP_GetHandle(union Trapped_Args *args, void *pr_ctxt); ++ extern u32 CMMWRAP_GetInfo(union Trapped_Args *args, void *pr_ctxt); ++ ++#endif /* _WCD_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dehdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dehdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dehdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dehdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,42 @@ ++/* ++ * dehdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dehdefs.h ======== ++ * Purpose: ++ * Definition for mini-driver module DEH. ++ * ++ *! Revision History: ++ *! ================ ++ *! 17-Dec-2001 ag: added #include for shared mailbox codes. ++ *! 10-Dec-2001 kc: added DEH error base value and error max value. ++ *! 11-Sep-2001 kc: created. ++ */ ++ ++#ifndef DEHDEFS_ ++#define DEHDEFS_ ++ ++#include /* shared mailbox codes */ ++ ++/* DEH object manager */ ++ struct DEH_MGR; ++ ++/* Magic code used to determine if DSP signaled exception. */ ++#define DEH_BASE MBX_DEH_BASE ++#define DEH_USERS_BASE MBX_DEH_USERS_BASE ++#define DEH_LIMIT MBX_DEH_LIMIT ++ ++#endif /* _DEHDEFS_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/devdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/devdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/devdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/devdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,35 @@ ++/* ++ * devdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== devdefs.h ======== ++ * Purpose: ++ * Definition of common include typedef between wmd.h and dev.h. Required ++ * to break circular dependency between WMD and DEV include files. ++ * ++ *! Revision History: ++ *! ================ ++ *! 12-Nov-1996 gp: Renamed from dev1.h. ++ *! 30-May-1996 gp: Broke out from dev.h ++ */ ++ ++#ifndef DEVDEFS_ ++#define DEVDEFS_ ++ ++/* WCD Device Object */ ++ struct DEV_OBJECT; ++ ++#endif /* DEVDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dev.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dev.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dev.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dev.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,785 @@ ++/* ++ * dev.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dev.h ======== ++ * Description: ++ * 'Bridge Mini-driver device operations. ++ * ++ * Public Functions: ++ * DEV_BrdWriteFxn ++ * DEV_CreateDevice ++ * DEV_Create2 ++ * DEV_Destroy2 ++ * DEV_DestroyDevice ++ * DEV_GetChnlMgr ++ * DEV_GetCmmMgr ++ * DEV_GetCodMgr ++ * DEV_GetDehMgr ++ * DEV_GetDevNode ++ * DEV_GetDSPWordSize ++ * DEV_GetFirst ++ * DEV_GetIntfFxns ++ * DEV_GetIOMgr ++ * DEV_GetMsgMgr ++ * DEV_GetNext ++ * DEV_GetNodeManager ++ * DEV_GetSymbol ++ * DEV_GetWMDContext ++ * DEV_Exit ++ * DEV_Init ++ * DEV_InsertProcObject ++ * DEV_IsLocked ++ * DEV_NotifyClient ++ * DEV_RegisterNotify ++ * DEV_ReleaseCodMgr ++ * DEV_RemoveDevice ++ * DEV_RemoveProcObject ++ * DEV_SetChnlMgr ++ * DEV_SetMsgMgr ++ * DEV_SetLockOwner ++ * DEV_StartDevice ++ * ++ *! Revision History: ++ *! ================ ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping feature - Dev_GetDmmMgr ++ *! 09-Feb-2004 vp Added functions required for IVA ++ *! 25-Feb-2003 swa PMGR Code Review changes incorporated ++ *! 05-Nov-2001 kc: Added DEV_GetDehMgr. ++ *! 05-Dec-2000 jeh Added DEV_SetMsgMgr. ++ *! 29-Nov-2000 rr: Incorporated code review changes. ++ *! 17-Nov-2000 jeh Added DEV_GetMsgMgr. ++ *! 05-Oct-2000 rr: DEV_Create2 & DEV_Destroy2 Added. ++ *! 02-Oct-2000 rr: Added DEV_GetNodeManager. ++ *! 11-Aug-2000 ag: Added DEV_GetCmmMgr() for shared memory management. ++ *! 10-Aug-2000 rr: DEV_InsertProcObject/RemoveProcObject added. ++ *! 06-Jun-2000 jeh Added DEV_GetSymbol(). ++ *! 05-Nov-1999 kc: Updated function prototypes. ++ *! 08-Oct-1997 cr: Added explicit CDECL function identifiers. ++ *! 07-Nov-1996 gp: Updated for code review. ++ *! 22-Oct-1996 gp: Added DEV_CleanupProcessState(). ++ *! 29-May-1996 gp: Changed DEV_HDEVNODE --> CFG_HDEVNODE. ++ *! 18-May-1996 gp: Created. ++ */ ++ ++#ifndef DEV_ ++#define DEV_ ++ ++/* ----------------------------------- Module Dependent Headers */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++ ++/* ++ * ======== DEV_BrdWriteFxn ======== ++ * Purpose: ++ * Exported function to be used as the COD write function. This function ++ * is passed a handle to a DEV_hObject by ZL in pArb, then calls the ++ * device's WMD_BRD_Write() function. ++ * Parameters: ++ * pArb: Handle to a Device Object. ++ * hDevContext: Handle to mini-driver defined device info. ++ * dwDSPAddr: Address on DSP board (Destination). ++ * pHostBuf: Pointer to host buffer (Source). ++ * ulNumBytes: Number of bytes to transfer. ++ * ulMemType: Memory space on DSP to which to transfer. ++ * Returns: ++ * Number of bytes written. Returns 0 if the DEV_hObject passed in via ++ * pArb is invalid. ++ * Requires: ++ * DEV Initialized. ++ * pHostBuf != NULL ++ * Ensures: ++ */ ++ extern u32 DEV_BrdWriteFxn(void *pArb, ++ u32 ulDspAddr, ++ void *pHostBuf, ++ u32 ulNumBytes, u32 nMemSpace); ++ ++/* ++ * ======== DEV_CreateDevice ======== ++ * Purpose: ++ * Called by the operating system to load the 'Bridge Mini Driver for a ++ * 'Bridge device. ++ * Parameters: ++ * phDevObject: Ptr to location to receive the device object handle. ++ * pstrWMDFileName: Name of WMD PE DLL file to load. If the absolute ++ * path is not provided, the file is loaded through ++ * 'Bridge's module search path. ++ * pHostConfig: Host configuration information, to be passed down ++ * to the WMD when WMD_DEV_Create() is called. ++ * pDspConfig: DSP resources, to be passed down to the WMD when ++ * WMD_DEV_Create() is called. ++ * hDevNode: Platform (Windows) specific device node. ++ * Returns: ++ * DSP_SOK: Module is loaded, device object has been created ++ * DSP_EMEMORY: Insufficient memory to create needed resources. ++ * DEV_E_NEWWMD: The WMD was compiled for a newer version of WCD. ++ * DEV_E_NULLWMDINTF: WMD passed back a NULL Fxn Interface Struct Ptr ++ * DEV_E_NOCODMODULE: No ZL file name was specified in the registry ++ * for this hDevNode. ++ * LDR_E_FILEUNABLETOOPEN: Unable to open the specified WMD. ++ * LDR_E_NOMEMORY: PELDR is out of resources. ++ * DSP_EFAIL: Unable to find WMD entry point function. ++ * COD_E_NOZLFUNCTIONS: One or more ZL functions exports not found. ++ * COD_E_ZLCREATEFAILED: Unable to load ZL DLL. ++ * Requires: ++ * DEV Initialized. ++ * phDevObject != NULL. ++ * pstrWMDFileName != NULL. ++ * pHostConfig != NULL. ++ * pDspConfig != NULL. ++ * Ensures: ++ * DSP_SOK: *phDevObject will contain handle to the new device object. ++ * Otherwise, does not create the device object, ensures the WMD module is ++ * unloaded, and sets *phDevObject to NULL. ++ */ ++ extern DSP_STATUS DEV_CreateDevice(OUT struct DEV_OBJECT ++ **phDevObject, ++ IN CONST char *pstrWMDFileName, ++ IN CONST struct CFG_HOSTRES ++ *pHostConfig, ++ IN CONST struct CFG_DSPRES ++ *pDspConfig, ++ struct CFG_DEVNODE *hDevNode); ++ ++/* ++ * ======== DEV_CreateIVADevice ======== ++ * Purpose: ++ * Called by the operating system to load the 'Bridge Mini Driver for IVA. ++ * Parameters: ++ * phDevObject: Ptr to location to receive the device object handle. ++ * pstrWMDFileName: Name of WMD PE DLL file to load. If the absolute ++ * path is not provided, the file is loaded through ++ * 'Bridge's module search path. ++ * pHostConfig: Host configuration information, to be passed down ++ * to the WMD when WMD_DEV_Create() is called. ++ * pDspConfig: DSP resources, to be passed down to the WMD when ++ * WMD_DEV_Create() is called. ++ * hDevNode: Platform (Windows) specific device node. ++ * Returns: ++ * DSP_SOK: Module is loaded, device object has been created ++ * DSP_EMEMORY: Insufficient memory to create needed resources. ++ * DEV_E_NEWWMD: The WMD was compiled for a newer version of WCD. ++ * DEV_E_NULLWMDINTF: WMD passed back a NULL Fxn Interface Struct Ptr ++ * DEV_E_NOCODMODULE: No ZL file name was specified in the registry ++ * for this hDevNode. ++ * LDR_E_FILEUNABLETOOPEN: Unable to open the specified WMD. ++ * LDR_E_NOMEMORY: PELDR is out of resources. ++ * DSP_EFAIL: Unable to find WMD entry point function. ++ * COD_E_NOZLFUNCTIONS: One or more ZL functions exports not found. ++ * COD_E_ZLCREATEFAILED: Unable to load ZL DLL. ++ * Requires: ++ * DEV Initialized. ++ * phDevObject != NULL. ++ * pstrWMDFileName != NULL. ++ * pHostConfig != NULL. ++ * pDspConfig != NULL. ++ * Ensures: ++ * DSP_SOK: *phDevObject will contain handle to the new device object. ++ * Otherwise, does not create the device object, ensures the WMD module is ++ * unloaded, and sets *phDevObject to NULL. ++ */ ++ extern DSP_STATUS DEV_CreateIVADevice(OUT struct DEV_OBJECT ++ **phDevObject, ++ IN CONST char *pstrWMDFileName, ++ IN CONST struct CFG_HOSTRES *pHostConfig, ++ IN CONST struct CFG_DSPRES *pDspConfig, ++ struct CFG_DEVNODE *hDevNode); ++ ++/* ++ * ======== DEV_Create2 ======== ++ * Purpose: ++ * After successful loading of the image from WCD_InitComplete2 ++ * (PROC Auto_Start) or PROC_Load this fxn is called. This creates ++ * the Node Manager and updates the DEV Object. ++ * Parameters: ++ * hDevObject: Handle to device object created with DEV_CreateDevice(). ++ * Returns: ++ * DSP_SOK: Successful Creation of Node Manager ++ * DSP_EFAIL: Some Error Occurred. ++ * Requires: ++ * DEV Initialized ++ * Valid hDevObject ++ * Ensures: ++ * DSP_SOK and hDevObject->hNodeMgr != NULL ++ * else hDevObject->hNodeMgr == NULL ++ */ ++ extern DSP_STATUS DEV_Create2(IN struct DEV_OBJECT *hDevObject); ++ ++/* ++ * ======== DEV_Destroy2 ======== ++ * Purpose: ++ * Destroys the Node manager for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with DEV_CreateDevice(). ++ * Returns: ++ * DSP_SOK: Successful Creation of Node Manager ++ * DSP_EFAIL: Some Error Occurred. ++ * Requires: ++ * DEV Initialized ++ * Valid hDevObject ++ * Ensures: ++ * DSP_SOK and hDevObject->hNodeMgr == NULL ++ * else DSP_EFAIL. ++ */ ++ extern DSP_STATUS DEV_Destroy2(IN struct DEV_OBJECT *hDevObject); ++ ++/* ++ * ======== DEV_DestroyDevice ======== ++ * Purpose: ++ * Destroys the channel manager for this device, if any, calls ++ * WMD_DEV_Destroy(), and then attempts to unload the WMD module. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * DSP_EFAIL: The WMD failed it's WMD_DEV_Destroy() function. ++ * Requires: ++ * DEV Initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS DEV_DestroyDevice(struct DEV_OBJECT ++ *hDevObject); ++ ++/* ++ * ======== DEV_GetChnlMgr ======== ++ * Purpose: ++ * Retrieve the handle to the channel manager created for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * *phMgr: Ptr to location to store handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phMgr != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phMgr contains a handle to a channel manager object, ++ * or NULL. ++ * else: *phMgr is NULL. ++ */ ++ extern DSP_STATUS DEV_GetChnlMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct CHNL_MGR **phMgr); ++ ++/* ++ * ======== DEV_GetCmmMgr ======== ++ * Purpose: ++ * Retrieve the handle to the shared memory manager created for this ++ * device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * *phMgr: Ptr to location to store handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phMgr != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phMgr contains a handle to a channel manager object, ++ * or NULL. ++ * else: *phMgr is NULL. ++ */ ++ extern DSP_STATUS DEV_GetCmmMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct CMM_OBJECT **phMgr); ++ ++/* ++ * ======== DEV_GetDmmMgr ======== ++ * Purpose: ++ * Retrieve the handle to the dynamic memory manager created for this ++ * device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * *phMgr: Ptr to location to store handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phMgr != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phMgr contains a handle to a channel manager object, ++ * or NULL. ++ * else: *phMgr is NULL. ++ */ ++ extern DSP_STATUS DEV_GetDmmMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct DMM_OBJECT **phMgr); ++ ++/* ++ * ======== DEV_GetCodMgr ======== ++ * Purpose: ++ * Retrieve the COD manager create for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * *phCodMgr: Ptr to location to store handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phCodMgr != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phCodMgr contains a handle to a COD manager object. ++ * else: *phCodMgr is NULL. ++ */ ++ extern DSP_STATUS DEV_GetCodMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct COD_MANAGER **phCodMgr); ++ ++/* ++ * ======== DEV_GetDehMgr ======== ++ * Purpose: ++ * Retrieve the DEH manager created for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with DEV_CreateDevice(). ++ * *phDehMgr: Ptr to location to store handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phDehMgr != NULL. ++ * DEH Initialized. ++ * Ensures: ++ * DSP_SOK: *phDehMgr contains a handle to a DEH manager object. ++ * else: *phDehMgr is NULL. ++ */ ++ extern DSP_STATUS DEV_GetDehMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct DEH_MGR **phDehMgr); ++ ++/* ++ * ======== DEV_GetDevNode ======== ++ * Purpose: ++ * Retrieve the platform specific device ID for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * phDevNode: Ptr to location to get the device node handle. ++ * Returns: ++ * DSP_SOK: In Win95, returns a DEVNODE in *hDevNode; In NT, ??? ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phDevNode != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phDevNode contains a platform specific device ID; ++ * else: *phDevNode is NULL. ++ */ ++ extern DSP_STATUS DEV_GetDevNode(struct DEV_OBJECT *hDevObject, ++ OUT struct CFG_DEVNODE **phDevNode); ++ ++/* ++ * ======== DEV_GetDevType ======== ++ * Purpose: ++ * Retrieve the platform specific device ID for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * phDevNode: Ptr to location to get the device node handle. ++ * Returns: ++ * DSP_SOK: Success ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phDevNode != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phDevNode contains a platform specific device ID; ++ * else: *phDevNode is NULL. ++ */ ++ extern DSP_STATUS DEV_GetDevType(struct DEV_OBJECT *hdevObject, ++ u32 *devType); ++ ++/* ++ * ======== DEV_GetFirst ======== ++ * Purpose: ++ * Retrieve the first Device Object handle from an internal linked list of ++ * of DEV_OBJECTs maintained by DEV. ++ * Parameters: ++ * Returns: ++ * NULL if there are no device objects stored; else ++ * a valid DEV_HOBJECT. ++ * Requires: ++ * No calls to DEV_CreateDevice or DEV_DestroyDevice (which my modify the ++ * internal device object list) may occur between calls to DEV_GetFirst ++ * and DEV_GetNext. ++ * Ensures: ++ * The DEV_HOBJECT returned is valid. ++ * A subsequent call to DEV_GetNext will return the next device object in ++ * the list. ++ */ ++ extern struct DEV_OBJECT *DEV_GetFirst(void); ++ ++/* ++ * ======== DEV_GetIntfFxns ======== ++ * Purpose: ++ * Retrieve the WMD interface function structure for the loaded WMD. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * *ppIntfFxns: Ptr to location to store fxn interface. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * ppIntfFxns != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *ppIntfFxns contains a pointer to the WMD interface; ++ * else: *ppIntfFxns is NULL. ++ */ ++ extern DSP_STATUS DEV_GetIntfFxns(struct DEV_OBJECT *hDevObject, ++ OUT struct WMD_DRV_INTERFACE **ppIntfFxns); ++ ++/* ++ * ======== DEV_GetIOMgr ======== ++ * Purpose: ++ * Retrieve the handle to the IO manager created for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * *phMgr: Ptr to location to store handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phMgr != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phMgr contains a handle to an IO manager object. ++ * else: *phMgr is NULL. ++ */ ++ extern DSP_STATUS DEV_GetIOMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct IO_MGR **phMgr); ++ ++/* ++ * ======== DEV_GetNext ======== ++ * Purpose: ++ * Retrieve the next Device Object handle from an internal linked list of ++ * of DEV_OBJECTs maintained by DEV, after having previously called ++ * DEV_GetFirst() and zero or more DEV_GetNext ++ * Parameters: ++ * hDevObject: Handle to the device object returned from a previous ++ * call to DEV_GetFirst() or DEV_GetNext(). ++ * Returns: ++ * NULL if there are no further device objects on the list or hDevObject ++ * was invalid; ++ * else the next valid DEV_HOBJECT in the list. ++ * Requires: ++ * No calls to DEV_CreateDevice or DEV_DestroyDevice (which my modify the ++ * internal device object list) may occur between calls to DEV_GetFirst ++ * and DEV_GetNext. ++ * Ensures: ++ * The DEV_HOBJECT returned is valid. ++ * A subsequent call to DEV_GetNext will return the next device object in ++ * the list. ++ */ ++ extern struct DEV_OBJECT *DEV_GetNext(struct DEV_OBJECT ++ *hDevObject); ++ ++/* ++ * ========= DEV_GetMsgMgr ======== ++ * Purpose: ++ * Retrieve the MSG Manager Handle from the DevObject. ++ * Parameters: ++ * hDevObject: Handle to the Dev Object ++ * phMsgMgr: Location where MSG Manager handle will be returned. ++ * Returns: ++ * Requires: ++ * DEV Initialized. ++ * Valid hDevObject. ++ * phNodeMgr != NULL. ++ * Ensures: ++ */ ++ extern void DEV_GetMsgMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct MSG_MGR **phMsgMgr); ++ ++/* ++ * ========= DEV_GetNodeManager ======== ++ * Purpose: ++ * Retrieve the Node Manager Handle from the DevObject. It is an ++ * accessor function ++ * Parameters: ++ * hDevObject: Handle to the Dev Object ++ * phNodeMgr: Location where Handle to the Node Manager will be ++ * returned.. ++ * Returns: ++ * DSP_SOK: Success ++ * DSP_EHANDLE: Invalid Dev Object handle. ++ * Requires: ++ * DEV Initialized. ++ * phNodeMgr is not null ++ * Ensures: ++ * DSP_SOK: *phNodeMgr contains a handle to a Node manager object. ++ * else: *phNodeMgr is NULL. ++ */ ++ extern DSP_STATUS DEV_GetNodeManager(struct DEV_OBJECT ++ *hDevObject, ++ OUT struct NODE_MGR **phNodeMgr); ++ ++/* ++ * ======== DEV_GetSymbol ======== ++ * Purpose: ++ * Get the value of a symbol in the currently loaded program. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * pstrSym: Name of symbol to look up. ++ * pulValue: Ptr to symbol value. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * COD_E_NOSYMBOLSLOADED: Symbols have not been loaded onto the board. ++ * COD_E_SYMBOLNOTFOUND: The symbol could not be found. ++ * Requires: ++ * pstrSym != NULL. ++ * pulValue != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *pulValue contains the symbol value; ++ */ ++ extern DSP_STATUS DEV_GetSymbol(struct DEV_OBJECT *hDevObject, ++ IN CONST char *pstrSym, ++ OUT u32 *pulValue); ++ ++/* ++ * ======== DEV_GetWMDContext ======== ++ * Purpose: ++ * Retrieve the WMD Context handle, as returned by the WMD_Create fxn. ++ * Parameters: ++ * hDevObject: Handle to device object created with DEV_CreateDevice() ++ * *phWmdContext: Ptr to location to store context handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * phWmdContext != NULL. ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: *phWmdContext contains context handle; ++ * else: *phWmdContext is NULL; ++ */ ++ extern DSP_STATUS DEV_GetWMDContext(struct DEV_OBJECT *hDevObject, ++ OUT struct WMD_DEV_CONTEXT **phWmdContext); ++ ++/* ++ * ======== DEV_Exit ======== ++ * Purpose: ++ * Decrement reference count, and free resources when reference count is ++ * 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * DEV is initialized. ++ * Ensures: ++ * When reference count == 0, DEV's private resources are freed. ++ */ ++ extern void DEV_Exit(void); ++ ++/* ++ * ======== DEV_Init ======== ++ * Purpose: ++ * Initialize DEV's private state, keeping a reference count on each call. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * TRUE: A requirement for the other public DEV functions. ++ */ ++ extern bool DEV_Init(void); ++ ++/* ++ * ======== DEV_IsLocked ======== ++ * Purpose: ++ * Predicate function to determine if the device has been ++ * locked by a client for exclusive access. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * Returns: ++ * DSP_SOK: TRUE: device has been locked. ++ * DSP_SFALSE: FALSE: device not locked. ++ * DSP_EHANDLE: hDevObject was invalid. ++ * Requires: ++ * DEV Initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS DEV_IsLocked(IN struct DEV_OBJECT *hDevObject); ++ ++/* ++ * ======== DEV_InsertProcObject ======== ++ * Purpose: ++ * Inserts the Processor Object into the List of PROC Objects ++ * kept in the DEV Object ++ * Parameters: ++ * hProcObject: Handle to the Proc Object ++ * hDevObject Handle to the Dev Object ++ * bAttachedNew Specifies if there are already processors attached ++ * Returns: ++ * DSP_SOK: Successfully inserted into the list ++ * Requires: ++ * hProcObject is not NULL ++ * hDevObject is a valid handle to the DEV. ++ * DEV Initialized. ++ * List(of Proc object in Dev) Exists. ++ * Ensures: ++ * DSP_SOK & the PROC Object is inserted and the list is not empty ++ * Details: ++ * If the List of Proc Object is empty bAttachedNew is TRUE, it indicated ++ * this is the first Processor attaching. ++ * If it is False, there are already processors attached. ++ */ ++ extern DSP_STATUS DEV_InsertProcObject(IN struct DEV_OBJECT ++ *hDevObject, ++ IN u32 hProcObject, ++ OUT bool * ++ pbAlreadyAttached); ++ ++/* ++ * ======== DEV_RemoveProcObject ======== ++ * Purpose: ++ * Search for and remove a Proc object from the given list maintained ++ * by the DEV ++ * Parameters: ++ * pProcObject: Ptr to ProcObject to insert. ++ * pDevObject: Ptr to Dev Object where the list is. ++ * pbAlreadyAttached: Ptr to return the bool ++ * Returns: ++ * DSP_SOK: If successful. ++ * DSP_EFAIL Failure to Remove the PROC Object from the list ++ * Requires: ++ * DevObject is Valid ++ * hProcObject != 0 ++ * pDevObject->procList != NULL ++ * !LST_IsEmpty(pDevObject->procList) ++ * pbAlreadyAttached !=NULL ++ * Ensures: ++ * Details: ++ * List will be deleted when the DEV is destroyed. ++ * ++ */ ++ extern DSP_STATUS DEV_RemoveProcObject(struct DEV_OBJECT ++ *hDevObject, ++ u32 hProcObject); ++ ++/* ++ * ======== DEV_NotifyClients ======== ++ * Purpose: ++ * Notify all clients of this device of a change in device status. ++ * Clients may include multiple users of BRD, as well as CHNL. ++ * This function is asychronous, and may be called by a timer event ++ * set up by a watchdog timer. ++ * Parameters: ++ * hDevObject: Handle to device object created with DEV_CreateDevice(). ++ * ulStatus: A status word, most likely a BRD_STATUS. ++ * Returns: ++ * DSP_SOK: All registered clients were asynchronously notified. ++ * DSP_EINVALIDARG: Invalid hDevObject. ++ * Requires: ++ * DEV Initialized. ++ * Ensures: ++ * DSP_SOK: Notifications are queued by the operating system to be ++ * delivered to clients. This function does not ensure that ++ * the notifications will ever be delivered. ++ */ ++ extern DSP_STATUS DEV_NotifyClients(struct DEV_OBJECT *hDevObject, ++ u32 ulStatus); ++ ++ ++ ++/* ++ * ======== DEV_RemoveDevice ======== ++ * Purpose: ++ * Destroys the Device Object created by DEV_StartDevice. ++ * Parameters: ++ * hDevNode: Device node as it is know to OS. ++ * Returns: ++ * DSP_SOK: If success; ++ * Otherwise. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS DEV_RemoveDevice(struct CFG_DEVNODE *hDevNode); ++ ++/* ++ * ======== DEV_SetChnlMgr ======== ++ * Purpose: ++ * Set the channel manager for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * hMgr: Handle to a channel manager, or NULL. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * DEV Initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS DEV_SetChnlMgr(struct DEV_OBJECT *hDevObject, ++ struct CHNL_MGR *hMgr); ++ ++/* ++ * ======== DEV_SetMsgMgr ======== ++ * Purpose: ++ * Set the Message manager for this device. ++ * Parameters: ++ * hDevObject: Handle to device object created with DEV_CreateDevice(). ++ * hMgr: Handle to a message manager, or NULL. ++ * Returns: ++ * Requires: ++ * DEV Initialized. ++ * Ensures: ++ */ ++ extern void DEV_SetMsgMgr(struct DEV_OBJECT *hDevObject, ++ struct MSG_MGR *hMgr); ++ ++/* ++ * ======== DEV_StartDevice ======== ++ * Purpose: ++ * Initializes the new device with the WinBRIDGE environment. This ++ * involves querying CM for allocated resources, querying the registry ++ * for necessary dsp resources (requested in the INF file), and using ++ * this information to create a WinBRIDGE device object. ++ * Parameters: ++ * hDevNode: Device node as it is know to OS. ++ * Returns: ++ * DSP_SOK: If success; ++ * Otherwise. ++ * Requires: ++ * DEV initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS DEV_StartDevice(struct CFG_DEVNODE *hDevNode); ++ ++#endif /* DEV_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dispdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dispdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dispdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dispdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,45 @@ ++/* ++ * dispdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dispdefs.h ======== ++ * Description: ++ * Global DISP constants and types, shared by PROCESSOR, NODE, and DISP. ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Aug-2000 jeh Added fields to DISP_ATTRS. ++ *! 06-Jul-2000 jeh Created. ++ */ ++ ++#ifndef DISPDEFS_ ++#define DISPDEFS_ ++ ++ struct DISP_OBJECT; ++ ++/* Node Dispatcher attributes */ ++ struct DISP_ATTRS { ++ u32 ulChnlOffset; /* Offset of channel ids reserved for RMS */ ++ /* Size of buffer for sending data to RMS */ ++ u32 ulChnlBufSize; ++ DSP_PROCFAMILY procFamily; /* eg, 5000 */ ++ DSP_PROCTYPE procType; /* eg, 5510 */ ++ HANDLE hReserved1; /* Reserved for future use. */ ++ u32 hReserved2; /* Reserved for future use. */ ++ } ; ++ ++#endif /* DISPDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/disp.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/disp.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/disp.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/disp.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,236 @@ ++/* ++ * disp.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== disp.h ======== ++ * ++ * Description: ++ * DSP/BIOS Bridge Node Dispatcher. ++ * ++ * Public Functions: ++ * DISP_Create ++ * DISP_Delete ++ * DISP_Exit ++ * DISP_Init ++ * DISP_NodeChangePriority ++ * DISP_NodeCreate ++ * DISP_NodeDelete ++ * DISP_NodeRun ++ * ++ *! Revision History: ++ *! ================= ++ *! 28-Jan-2003 map Removed DISP_DoCinit(). ++ *! 15-May-2002 jeh Added DISP_DoCinit(). ++ *! 24-Apr-2002 jeh Added DISP_MemWrite(). ++ *! 07-Sep-2001 jeh Added DISP_MemCopy(). ++ *! 10-May-2001 jeh Code review cleanup. ++ *! 08-Aug-2000 jeh Removed DISP_NodeTerminate since it no longer uses RMS. ++ *! 17-Jul-2000 jeh Updates to function headers. ++ *! 19-Jun-2000 jeh Created. ++ */ ++ ++#ifndef DISP_ ++#define DISP_ ++ ++#include ++#include ++#include ++#include ++ ++/* ++ * ======== DISP_Create ======== ++ * Create a NODE Dispatcher object. This object handles the creation, ++ * deletion, and execution of nodes on the DSP target, through communication ++ * with the Resource Manager Server running on the target. Each NODE ++ * Manager object should have exactly one NODE Dispatcher. ++ * ++ * Parameters: ++ * phDispObject: Location to store node dispatcher object on output. ++ * hDevObject: Device for this processor. ++ * pDispAttrs: Node dispatcher attributes. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * DSP_EFAIL: Unable to create dispatcher. ++ * Requires: ++ * DISP_Init(void) called. ++ * pDispAttrs != NULL. ++ * hDevObject != NULL. ++ * phDispObject != NULL. ++ * Ensures: ++ * DSP_SOK: IsValid(*phDispObject). ++ * error: *phDispObject == NULL. ++ */ ++ extern DSP_STATUS DISP_Create(OUT struct DISP_OBJECT **phDispObject, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct DISP_ATTRS *pDispAttrs); ++ ++/* ++ * ======== DISP_Delete ======== ++ * Delete the NODE Dispatcher. ++ * ++ * Parameters: ++ * hDispObject: Node Dispatcher object. ++ * Returns: ++ * Requires: ++ * DISP_Init(void) called. ++ * Valid hDispObject. ++ * Ensures: ++ * hDispObject is invalid. ++ */ ++ extern void DISP_Delete(struct DISP_OBJECT *hDispObject); ++ ++/* ++ * ======== DISP_Exit ======== ++ * Discontinue usage of DISP module. ++ * ++ * Parameters: ++ * Returns: ++ * Requires: ++ * DISP_Init(void) previously called. ++ * Ensures: ++ * Any resources acquired in DISP_Init(void) will be freed when last DISP ++ * client calls DISP_Exit(void). ++ */ ++ extern void DISP_Exit(void); ++ ++/* ++ * ======== DISP_Init ======== ++ * Initialize the DISP module. ++ * ++ * Parameters: ++ * Returns: ++ * TRUE if initialization succeeded, FALSE otherwise. ++ * Ensures: ++ */ ++ extern bool DISP_Init(void); ++ ++/* ++ * ======== DISP_NodeChangePriority ======== ++ * Change the priority of a node currently running on the target. ++ * ++ * Parameters: ++ * hDispObject: Node Dispatcher object. ++ * hNode: Node object representing a node currently ++ * allocated or running on the DSP. ++ * ulFxnAddress: Address of RMS function for changing priority. ++ * nodeEnv: Address of node's environment structure. ++ * nPriority: New priority level to set node's priority to. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * Requires: ++ * DISP_Init(void) called. ++ * Valid hDispObject. ++ * hNode != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DISP_NodeChangePriority(struct DISP_OBJECT ++ *hDispObject, ++ struct NODE_OBJECT *hNode, ++ u32 ulFxnAddr, ++ NODE_ENV nodeEnv, ++ s32 nPriority); ++ ++/* ++ * ======== DISP_NodeCreate ======== ++ * Create a node on the DSP by remotely calling the node's create function. ++ * ++ * Parameters: ++ * hDispObject: Node Dispatcher object. ++ * hNode: Node handle obtained from NODE_Allocate(). ++ * ulFxnAddr: Address or RMS create node function. ++ * ulCreateFxn: Address of node's create function. ++ * pArgs: Arguments to pass to RMS node create function. ++ * pNodeEnv: Location to store node environment pointer on ++ * output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ETASK: Unable to create the node's task or process on the DSP. ++ * DSP_ESTREAM: Stream creation failure on the DSP. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * DSP_EUSER: A user-defined failure occurred. ++ * DSP_EFAIL: A failure occurred, unable to create node. ++ * Requires: ++ * DISP_Init(void) called. ++ * Valid hDispObject. ++ * pArgs != NULL. ++ * hNode != NULL. ++ * pNodeEnv != NULL. ++ * NODE_GetType(hNode) != NODE_DEVICE. ++ * Ensures: ++ */ ++ extern DSP_STATUS DISP_NodeCreate(struct DISP_OBJECT *hDispObject, ++ struct NODE_OBJECT *hNode, ++ u32 ulFxnAddr, ++ u32 ulCreateFxn, ++ IN CONST struct NODE_CREATEARGS ++ *pArgs, ++ OUT NODE_ENV *pNodeEnv); ++ ++/* ++ * ======== DISP_NodeDelete ======== ++ * Delete a node on the DSP by remotely calling the node's delete function. ++ * ++ * Parameters: ++ * hDispObject: Node Dispatcher object. ++ * hNode: Node object representing a node currently ++ * loaded on the DSP. ++ * ulFxnAddr: Address or RMS delete node function. ++ * ulDeleteFxn: Address of node's delete function. ++ * nodeEnv: Address of node's environment structure. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * Requires: ++ * DISP_Init(void) called. ++ * Valid hDispObject. ++ * hNode != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DISP_NodeDelete(struct DISP_OBJECT *hDispObject, ++ struct NODE_OBJECT *hNode, ++ u32 ulFxnAddr, ++ u32 ulDeleteFxn, NODE_ENV nodeEnv); ++ ++/* ++ * ======== DISP_NodeRun ======== ++ * Start execution of a node's execute phase, or resume execution of a node ++ * that has been suspended (via DISP_NodePause()) on the DSP. ++ * ++ * Parameters: ++ * hDispObject: Node Dispatcher object. ++ * hNode: Node object representing a node to be executed ++ * on the DSP. ++ * ulFxnAddr: Address or RMS node execute function. ++ * ulExecuteFxn: Address of node's execute function. ++ * nodeEnv: Address of node's environment structure. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * Requires: ++ * DISP_Init(void) called. ++ * Valid hDispObject. ++ * hNode != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS DISP_NodeRun(struct DISP_OBJECT *hDispObject, ++ struct NODE_OBJECT *hNode, ++ u32 ulFxnAddr, ++ u32 ulExecuteFxn, NODE_ENV nodeEnv); ++ ++#endif /* DISP_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dmm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dmm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dmm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dmm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,84 @@ ++/* ++ * dmm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dmm.h ======== ++ * Purpose: ++ * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address ++ * space that can be directly mapped to any MPU buffer or memory region ++ * ++ * Public Functions: ++ * ++ *! Revision History: ++ *! ================ ++ *! 20-Feb-2004 sb: Created. ++ *! ++ */ ++ ++#ifndef DMM_ ++#define DMM_ ++ ++#include ++ ++ struct DMM_OBJECT; ++ ++/* DMM attributes used in DMM_Create() */ ++ struct DMM_MGRATTRS { ++ u32 reserved; ++ } ; ++ ++#define DMMPOOLSIZE 0x4000000 ++ ++/* ++ * ======== DMM_GetHandle ======== ++ * Purpose: ++ * Return the dynamic memory manager object for this device. ++ * This is typically called from the client process. ++ */ ++ ++ extern DSP_STATUS DMM_GetHandle(DSP_HPROCESSOR hProcessor, ++ OUT struct DMM_OBJECT **phDmmMgr); ++ ++ extern DSP_STATUS DMM_ReserveMemory(struct DMM_OBJECT *hDmmMgr, ++ u32 size, ++ u32 *pRsvAddr); ++ ++ extern DSP_STATUS DMM_UnReserveMemory(struct DMM_OBJECT *hDmmMgr, ++ u32 rsvAddr); ++ ++ extern DSP_STATUS DMM_MapMemory(struct DMM_OBJECT *hDmmMgr, u32 addr, ++ u32 size); ++ ++ extern DSP_STATUS DMM_UnMapMemory(struct DMM_OBJECT *hDmmMgr, ++ u32 addr, ++ u32 *pSize); ++ ++ extern DSP_STATUS DMM_Destroy(struct DMM_OBJECT *hDmmMgr); ++ ++ extern DSP_STATUS DMM_DeleteTables(struct DMM_OBJECT *hDmmMgr); ++ ++ extern DSP_STATUS DMM_Create(OUT struct DMM_OBJECT **phDmmMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct DMM_MGRATTRS *pMgrAttrs); ++ ++ extern bool DMM_Init(void); ++ ++ extern void DMM_Exit(void); ++ ++ extern DSP_STATUS DMM_CreateTables(struct DMM_OBJECT *hDmmMgr, ++ u32 addr, u32 size); ++#endif /* DMM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dpc.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dpc.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dpc.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dpc.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,167 @@ ++/* ++ * dpc.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dpc.h ======== ++ * Purpose: ++ * Deferred Procedure Call(DPC) Services. ++ * ++ * Public Functions: ++ * DPC_Cancel ++ * DPC_Create ++ * DPC_Destroy ++ * DPC_Exit ++ * DPC_Init ++ * DPC_Schedule ++ * ++ *! Revision History: ++ *! ================ ++ *! 31-Jan-2000 rr: DPC_Destroy ensures Suceess and DPC Object is NULL. ++ *! 21-Jan-2000 ag: Updated comments per code review. ++ *! 06-Jan-2000 ag: Removed DPC_[Lower|Raise]IRQL[From|To]DispatchLevel. ++ *! 14-Jan-1998 gp: Added DPC_[Lower|Raise]IRQL[From|To]DispatchLevel. ++ *! 18-Aug-1997 cr: Added explicit CDECL identifiers. ++ *! 28-Jul-1996 gp: Created. ++ */ ++ ++#ifndef DPC_ ++#define DPC_ ++ ++ struct DPC_OBJECT; ++ ++/* ++ * ======== DPC_PROC ======== ++ * Purpose: ++ * Deferred processing routine. Typically scheduled from an ISR to ++ * complete I/O processing. ++ * Parameters: ++ * pRefData: Ptr to user data: passed in via ISR_ScheduleDPC. ++ * Returns: ++ * Requires: ++ * The DPC should not block, or otherwise acquire resources. ++ * Interrupts to the processor are enabled. ++ * DPC_PROC executes in a critical section. ++ * Ensures: ++ * This DPC will not be reenterred on the same thread. ++ * However, the DPC may take hardware interrupts during execution. ++ * Interrupts to the processor are enabled. ++ */ ++ typedef void(*DPC_PROC) (void *pRefData); ++ ++/* ++ * ======== DPC_Cancel ======== ++ * Purpose: ++ * Cancel a DPC previously scheduled by DPC_Schedule. ++ * Parameters: ++ * hDPC: A DPC object handle created in DPC_Create(). ++ * Returns: ++ * DSP_SOK: Scheduled DPC, if any, is cancelled. ++ * DSP_SFALSE: No DPC is currently scheduled for execution. ++ * DSP_EHANDLE: Invalid hDPC. ++ * Requires: ++ * Ensures: ++ * If the DPC has already executed, is executing, or was not yet ++ * scheduled, this function will have no effect. ++ */ ++ extern DSP_STATUS DPC_Cancel(IN struct DPC_OBJECT *hDPC); ++ ++/* ++ * ======== DPC_Create ======== ++ * Purpose: ++ * Create a DPC object, allowing a client's own DPC procedure to be ++ * scheduled for a call with client reference data. ++ * Parameters: ++ * phDPC: Pointer to location to store DPC object. ++ * pfnDPC: Client's DPC procedure. ++ * pRefData: Pointer to user-defined reference data. ++ * Returns: ++ * DSP_SOK: DPC object created. ++ * DSP_EPOINTER: phDPC == NULL or pfnDPC == NULL. ++ * DSP_EMEMORY: Insufficient memory. ++ * Requires: ++ * Must not be called at interrupt time. ++ * Ensures: ++ * DSP_SOK: DPC object is created; ++ * else: *phDPC is set to NULL. ++ */ ++ extern DSP_STATUS DPC_Create(OUT struct DPC_OBJECT **phDPC, ++ IN DPC_PROC pfnDPC, ++ IN void *pRefData); ++ ++/* ++ * ======== DPC_Destroy ======== ++ * Purpose: ++ * Cancel the last scheduled DPC, and deallocate a DPC object previously ++ * allocated with DPC_Create().Frees the Object only if the thread and ++ * the events are terminated successfuly. ++ * Parameters: ++ * hDPC: A DPC object handle created in DPC_Create(). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDPC. ++ * Requires: ++ * All DPC's scheduled for the DPC object must have completed their ++ * processing. ++ * Ensures: ++ * (SUCCESS && hDPC is NULL) or DSP_EFAILED status ++ */ ++ extern DSP_STATUS DPC_Destroy(IN struct DPC_OBJECT *hDPC); ++ ++/* ++ * ======== DPC_Exit ======== ++ * Purpose: ++ * Discontinue usage of the DPC module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * DPC_Init(void) was previously called. ++ * Ensures: ++ * Resources acquired in DPC_Init(void) are freed. ++ */ ++ extern void DPC_Exit(void); ++ ++/* ++ * ======== DPC_Init ======== ++ * Purpose: ++ * Initialize the DPC module's private state. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * A requirement for each of the other public DPC functions. ++ */ ++ extern bool DPC_Init(void); ++ ++/* ++ * ======== DPC_Schedule ======== ++ * Purpose: ++ * Schedule a deferred procedure call to be executed at a later time. ++ * Latency and order of DPC execution is platform specific. ++ * Parameters: ++ * hDPC: A DPC object handle created in DPC_Create(). ++ * Returns: ++ * DSP_SOK: An event is scheduled for deferred processing. ++ * DSP_EHANDLE: Invalid hDPC. ++ * Requires: ++ * See requirements for DPC_PROC. ++ * Ensures: ++ * DSP_SOK: The DPC will not be called before this function returns. ++ */ ++ extern DSP_STATUS DPC_Schedule(IN struct DPC_OBJECT *hDPC); ++ ++#endif /* DPC_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/drvdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/drvdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/drvdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/drvdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,34 @@ ++/* ++ * drvdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== drvdefs.h ======== ++ * Purpose: ++ * Definition of common include typedef between wmd.h and drv.h. ++ * ++ *! Revision History: ++ *! ================ ++ *! 17-Jul-2000 rr: Created ++ */ ++ ++#ifndef DRVDEFS_ ++#define DRVDEFS_ ++ ++/* WCD Driver Object */ ++ struct DRV_OBJECT; ++ ++#endif /* DRVDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/drv.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/drv.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,450 @@ ++/* ++ * drv.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== drv.h ======== ++ * Purpose: ++ * DRV Resource allocation module. Driver Object gets Created ++ * at the time of Loading. It holds the List of Device Objects ++ * in the Syste, ++ * ++ * Public Functions: ++ * DRV_Create ++ * DRV_Destroy ++ * DRV_Exit ++ * DRV_GetDevObject ++ * DRV_GetDevExtension ++ * DRV_GetFirstDevObject ++ * DRV_GetNextDevObject ++ * DRV_GetNextDevExtension ++ * DRV_Init ++ * DRV_InsertDevObject ++ * DRV_RemoveDevObject ++ * DRV_RequestResources ++ * DRV_ReleaseResources ++ * ++ *! Revision History ++ *! ================ ++ *! 10-Feb-2004 vp: Added OMAP24xx specific definitions. ++ *! 14-Aug-2000 rr: Cleaned up. ++ *! 27-Jul-2000 rr: DRV_RequestResources split into two(Request and Release) ++ *! Device extension created to hold the DevNodeString. ++ *! 17-Jul-2000 rr: Driver Object holds the list of Device Objects. ++ *! Added DRV_Create, DRV_Destroy, DRV_GetDevObject, ++ *! DRV_GetFirst/NextDevObject, DRV_Insert/RemoveDevObject. ++ *! 12-Nov-1999 rr: New Flag defines for DRV_ASSIGN and DRV_RELEASE ++ *! 25-Oct-1999 rr: Resource Structure removed. ++ *! 15-Oct-1999 rr: New Resource structure created. ++ *! 05-Oct-1999 rr: Added DRV_RequestResources ++ *! Removed fxn'sDRV_RegisterMiniDriver(), ++ *! DRV_UnRegisterMiniDriver() ++ *! Removed Structures DSP_DRIVER & DRV_EXTENSION. ++ *! ++ *! 24-Sep-1999 rr: Added DRV_EXTENSION and DSP_DRIVER structures. ++ *! ++ */ ++ ++#ifndef DRV_ ++#define DRV_ ++ ++#include ++ ++#include ++ ++#define DRV_ASSIGN 1 ++#define DRV_RELEASE 0 ++ ++/* Provide the DSP Internal memory windows that can be accessed from L3 address ++ * space */ ++ ++#define OMAP_GEM_BASE 0x107F8000 ++#define OMAP_DSP_SIZE 0x00720000 ++ ++/* MEM1 is L2 RAM + L2 Cache space */ ++#define OMAP_DSP_MEM1_BASE 0x5C7F8000 ++#define OMAP_DSP_MEM1_SIZE 0x18000 ++#define OMAP_DSP_GEM1_BASE 0x107F8000 ++ ++ ++/* MEM2 is L1P RAM/CACHE space */ ++#define OMAP_DSP_MEM2_BASE 0x5CE00000 ++#define OMAP_DSP_MEM2_SIZE 0x8000 ++#define OMAP_DSP_GEM2_BASE 0x10E00000 ++ ++/* MEM3 is L1D RAM/CACHE space */ ++#define OMAP_DSP_MEM3_BASE 0x5CF04000 ++#define OMAP_DSP_MEM3_SIZE 0x14000 ++#define OMAP_DSP_GEM3_BASE 0x10F04000 ++ ++ ++#define OMAP_IVA2_PRM_BASE 0x48306000 ++#define OMAP_IVA2_PRM_SIZE 0x1000 ++ ++#define OMAP_IVA2_CM_BASE 0x48004000 ++#define OMAP_IVA2_CM_SIZE 0x1000 ++ ++#define OMAP_PER_CM_BASE 0x48005000 ++#define OMAP_PER_CM_SIZE 0x1000 ++ ++#define OMAP_PER_PRM_BASE 0x48307000 ++#define OMAP_PER_PRM_SIZE 0x1000 ++ ++#define OMAP_CORE_PRM_BASE 0x48306A00 ++#define OMAP_CORE_PRM_SIZE 0x1000 ++ ++#define OMAP_SYSC_BASE 0x48002000 ++#define OMAP_SYSC_SIZE 0x1000 ++ ++#define OMAP_MBOX_BASE 0x48094000 ++#define OMAP_MBOX_SIZE 0x1000 ++ ++#define OMAP_DMMU_BASE 0x5D000000 ++#define OMAP_DMMU_SIZE 0x1000 ++ ++#define OMAP_PRCM_VDD1_DOMAIN 1 ++#define OMAP_PRCM_VDD2_DOMAIN 2 ++ ++#ifndef RES_CLEANUP_DISABLE ++ ++/* GPP PROCESS CLEANUP Data structures */ ++ ++/* New structure (member of process context) abstracts NODE resource info */ ++struct NODE_RES_OBJECT { ++ DSP_HNODE hNode; ++ s32 nodeAllocated; /* Node status */ ++ s32 heapAllocated; /* Heap status */ ++ s32 streamsAllocated; /* Streams status */ ++ struct NODE_RES_OBJECT *next; ++} ; ++ ++/* New structure (member of process context) abstracts DMM resource info */ ++struct DMM_RES_OBJECT { ++ s32 dmmAllocated; /* DMM status */ ++ u32 ulMpuAddr; ++ u32 ulDSPAddr; ++ u32 ulDSPResAddr; ++ u32 dmmSize; ++ HANDLE hProcessor; ++ struct DMM_RES_OBJECT *next; ++} ; ++ ++/* New structure (member of process context) abstracts DMM resource info */ ++struct DSPHEAP_RES_OBJECT { ++ s32 heapAllocated; /* DMM status */ ++ u32 ulMpuAddr; ++ u32 ulDSPAddr; ++ u32 ulDSPResAddr; ++ u32 heapSize; ++ HANDLE hProcessor; ++ struct DSPHEAP_RES_OBJECT *next; ++} ; ++ ++/* New structure (member of process context) abstracts stream resource info */ ++struct STRM_RES_OBJECT { ++ s32 streamAllocated; /* Stream status */ ++ DSP_HSTREAM hStream; ++ u32 uNumBufs; ++ u32 uDir; ++ struct STRM_RES_OBJECT *next; ++} ; ++ ++/* Overall Bridge process resource usage state */ ++enum GPP_PROC_RES_STATE { ++ PROC_RES_ALLOCATED, ++ PROC_RES_FREED ++} ; ++ ++/* Process Context */ ++struct PROCESS_CONTEXT{ ++ /* Process State */ ++ enum GPP_PROC_RES_STATE resState; ++ ++ /* Process ID (Same as UNIX process ID) */ ++ u32 pid; ++ ++ /* Pointer to next process context ++ * (To maintain a linked list of process contexts) */ ++ struct PROCESS_CONTEXT *next; ++ ++ /* List of Processors */ ++ struct list_head processor_list; ++ spinlock_t proc_list_lock; ++ ++ /* DSP Node resources */ ++ struct NODE_RES_OBJECT *pNodeList; ++ ++ /* DMM resources */ ++ struct DMM_RES_OBJECT *pDMMList; ++ ++ /* DSP Heap resources */ ++ struct DSPHEAP_RES_OBJECT *pDSPHEAPList; ++ ++ /* Stream resources */ ++ struct STRM_RES_OBJECT *pSTRMList; ++} ; ++#endif ++ ++/* ++ * ======== DRV_Create ======== ++ * Purpose: ++ * Creates the Driver Object. This is done during the driver loading. ++ * There is only one Driver Object in the DSP/BIOS Bridge. ++ * Parameters: ++ * phDrvObject: Location to store created DRV Object handle. ++ * Returns: ++ * DSP_SOK: Sucess ++ * DSP_EMEMORY: Failed in Memory allocation ++ * DSP_EFAIL: General Failure ++ * Requires: ++ * DRV Initialized (cRefs > 0 ) ++ * phDrvObject != NULL. ++ * Ensures: ++ * DSP_SOK: - *phDrvObject is a valid DRV interface to the device. ++ * - List of DevObject Created and Initialized. ++ * - List of DevNode String created and intialized. ++ * - Registry is updated with the DRV Object. ++ * !DSP_SOK: DRV Object not created ++ * Details: ++ * There is one Driver Object for the Driver representing ++ * the driver itself. It contains the list of device ++ * Objects and the list of Device Extensions in the system. ++ * Also it can hold other neccessary ++ * information in its storage area. ++ */ ++ extern DSP_STATUS DRV_Create(struct DRV_OBJECT **phDrvObject); ++ ++/* ++ * ======== DRV_Destroy ======== ++ * Purpose: ++ * destroys the Dev Object list, DrvExt list ++ * and destroy the DRV object ++ * Called upon driver unLoading.or unsuccesful loading of the driver. ++ * Parameters: ++ * hDrvObject: Handle to Driver object . ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Failed to destroy DRV Object ++ * Requires: ++ * DRV Initialized (cRegs > 0 ) ++ * hDrvObject is not NULL and a valid DRV handle . ++ * List of DevObject is Empty. ++ * List of DrvExt is Empty ++ * Ensures: ++ * DSP_SOK: - DRV Object destroyed and hDrvObject is not a valid ++ * DRV handle. ++ * - Registry is updated with "0" as the DRV Object. ++ */ ++ extern DSP_STATUS DRV_Destroy(struct DRV_OBJECT *hDrvObject); ++ ++/* ++ * ======== DRV_Exit ======== ++ * Purpose: ++ * Exit the DRV module, freeing any modules initialized in DRV_Init. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * Ensures: ++ */ ++ extern void DRV_Exit(void); ++ ++/* ++ * ======== DRV_GetFirstDevObject ======== ++ * Purpose: ++ * Returns the Ptr to the FirstDev Object in the List ++ * Parameters: ++ * Requires: ++ * DRV Initialized ++ * Returns: ++ * dwDevObject: Ptr to the First Dev Object as a u32 ++ * 0 if it fails to retrieve the First Dev Object ++ * Ensures: ++ */ ++ extern u32 DRV_GetFirstDevObject(void); ++ ++/* ++ * ======== DRV_GetFirstDevExtension ======== ++ * Purpose: ++ * Returns the Ptr to the First Device Extension in the List ++ * Parameters: ++ * Requires: ++ * DRV Initialized ++ * Returns: ++ * dwDevExtension: Ptr to the First Device Extension as a u32 ++ * 0: Failed to Get the Device Extension ++ * Ensures: ++ */ ++ extern u32 DRV_GetFirstDevExtension(void); ++ ++/* ++ * ======== DRV_GetDevObject ======== ++ * Purpose: ++ * Given a index, returns a handle to DevObject from the list ++ * Parameters: ++ * hDrvObject: Handle to the Manager ++ * phDevObject: Location to store the Dev Handle ++ * Requires: ++ * DRV Initialized ++ * uIndex >= 0 ++ * hDrvObject is not NULL and Valid DRV Object ++ * phDevObject is not NULL ++ * Device Object List not Empty ++ * Returns: ++ * DSP_SOK: Success ++ * DSP_EFAIL: Failed to Get the Dev Object ++ * Ensures: ++ * DSP_SOK: *phDevObject != NULL ++ * DSP_EFAIL: *phDevObject = NULL ++ */ ++ extern DSP_STATUS DRV_GetDevObject(u32 uIndex, ++ struct DRV_OBJECT *hDrvObject, ++ struct DEV_OBJECT **phDevObject); ++ ++/* ++ * ======== DRV_GetNextDevObject ======== ++ * Purpose: ++ * Returns the Ptr to the Next Device Object from the the List ++ * Parameters: ++ * hDevObject: Handle to the Device Object ++ * Requires: ++ * DRV Initialized ++ * hDevObject != 0 ++ * Returns: ++ * dwDevObject: Ptr to the Next Dev Object as a u32 ++ * 0: If it fail to get the next Dev Object. ++ * Ensures: ++ */ ++ extern u32 DRV_GetNextDevObject(u32 hDevObject); ++ ++/* ++ * ======== DRV_GetNextDevExtension ======== ++ * Purpose: ++ * Returns the Ptr to the Next Device Extension from the the List ++ * Parameters: ++ * hDevExtension: Handle to the Device Extension ++ * Requires: ++ * DRV Initialized ++ * hDevExtension != 0. ++ * Returns: ++ * dwDevExtension: Ptr to the Next Dev Extension ++ * 0: If it fail to Get the next Dev Extension ++ * Ensures: ++ */ ++ extern u32 DRV_GetNextDevExtension(u32 hDevExtension); ++ ++/* ++ * ======== DRV_Init ======== ++ * Purpose: ++ * Initialize the DRV module. ++ * Parameters: ++ * Returns: ++ * TRUE if success; FALSE otherwise. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS DRV_Init(void); ++ ++/* ++ * ======== DRV_InsertDevObject ======== ++ * Purpose: ++ * Insert a DeviceObject into the list of Driver object. ++ * Parameters: ++ * hDrvObject: Handle to DrvObject ++ * hDevObject: Handle to DeviceObject to insert. ++ * Returns: ++ * DSP_SOK: If successful. ++ * DSP_EFAIL: General Failure: ++ * Requires: ++ * hDrvObject != NULL and Valid DRV Handle. ++ * hDevObject != NULL. ++ * Ensures: ++ * DSP_SOK: Device Object is inserted and the List is not empty. ++ */ ++ extern DSP_STATUS DRV_InsertDevObject(struct DRV_OBJECT *hDrvObject, ++ struct DEV_OBJECT *hDevObject); ++ ++/* ++ * ======== DRV_RemoveDevObject ======== ++ * Purpose: ++ * Search for and remove a Device object from the given list of Device Obj ++ * objects. ++ * Parameters: ++ * hDrvObject: Handle to DrvObject ++ * hDevObject: Handle to DevObject to Remove ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Unable to find pDevObject. ++ * Requires: ++ * hDrvObject != NULL and a Valid DRV Handle. ++ * hDevObject != NULL. ++ * List exists and is not empty. ++ * Ensures: ++ * List either does not exist (NULL), or is not empty if it does exist. ++*/ ++ extern DSP_STATUS DRV_RemoveDevObject(struct DRV_OBJECT *hDrvObject, ++ struct DEV_OBJECT *hDevObject); ++ ++/* ++ * ======== DRV_RequestResources ======== ++ * Purpose: ++ * Assigns the Resources or Releases them. ++ * Parameters: ++ * dwContext: Path to the driver Registry Key. ++ * pDevNodeString: Ptr to DevNode String stored in the Device Ext. ++ * Returns: ++ * TRUE if success; FALSE otherwise. ++ * Requires: ++ * Ensures: ++ * The Resources are assigned based on Bus type. ++ * The hardware is initialized. Resource information is ++ * gathered from the Registry(ISA, PCMCIA)or scanned(PCI) ++ * Resource structure is stored in the registry which will be ++ * later used by the CFG module. ++ */ ++ extern DSP_STATUS DRV_RequestResources(IN u32 dwContext, ++ OUT u32 *pDevNodeString); ++ ++/* ++ * ======== DRV_ReleaseResources ======== ++ * Purpose: ++ * Assigns the Resources or Releases them. ++ * Parameters: ++ * dwContext: Path to the driver Registry Key. ++ * hDrvObject: Handle to the Driver Object. ++ * Returns: ++ * TRUE if success; FALSE otherwise. ++ * Requires: ++ * Ensures: ++ * The Resources are released based on Bus type. ++ * Resource structure is deleted from the registry ++ */ ++ extern DSP_STATUS DRV_ReleaseResources(IN u32 dwContext, ++ struct DRV_OBJECT *hDrvObject); ++ ++/* ++ * ======== DRV_ProcFreeDMMRes ======== ++ * Purpose: ++ * Actual DMM De-Allocation. ++ * Parameters: ++ * hPCtxt: Path to the driver Registry Key. ++ * Returns: ++ * DSP_SOK if success; ++ */ ++ ++ ++ extern DSP_STATUS DRV_ProcFreeDMMRes(HANDLE hPCtxt); ++ ++#endif /* DRV_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dspdrv.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dspdrv.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dspdrv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dspdrv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,89 @@ ++/* ++ * dspdrv.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dspdrv.h ======== ++ * Purpose: ++ * This is the Stream Interface for the DDSP Class driver. ++ * All Device operations are performed via DeviceIOControl. ++ * Read, Seek and Write not used. ++ * ++ * Public Functions ++ * DSP_Close ++ * DSP_Deinit ++ * DSP_Init ++ * DSP_IOControl ++ * DSP_Open ++ * DSP_PowerUp ++ * DSP_PowerDown ++ * ++ *! Revision History ++ *! ================ ++ *! 28-Jan-2000 rr: Type void changed to Void. ++ *! 02-Dec-1999 rr: MAX_DEV define moved from wcdce.c file.Code cleaned up. ++ *! 12-Nov-1999 rr: "#include removed. ++ *! 05-Oct-1999 rr Renamed the file name to wcdce.h Removed Bus Specific ++ *! code and #defines to PCCARD.h. ++ *! 24-Sep-1999 rr Changed the DSP_COMMON_WINDOW_SIZE to 0x4000(16k) for the ++ *! Memory windows. ++ *! 16-Jul-1999 ag Adapted from rkw's CAC Bullet driver. ++ *! ++ */ ++ ++#if !defined __DSPDRV_h__ ++#define __DSPDRV_h__ ++ ++#define MAX_DEV 10 /* Max support of 10 devices */ ++ ++/* ++ * ======== DSP_Deinit ======== ++ * Purpose: ++ * This function is called by Device Manager to de-initialize a device. ++ * This function is not called by applications. ++ * Parameters: ++ * dwDeviceContext:Handle to the device context. The XXX_Init function ++ * creates and returns this identifier. ++ * Returns: ++ * TRUE indicates the device successfully de-initialized. Otherwise it ++ * returns FALSE. ++ * Requires: ++ * dwDeviceContext!= NULL. For a built in device this should never ++ * get called. ++ * Ensures: ++ */ ++extern bool DSP_Deinit(u32 dwDeviceContext); ++ ++/* ++ * ======== DSP_Init ======== ++ * Purpose: ++ * This function is called by Device Manager to initialize a device. ++ * This function is not called by applications ++ * Parameters: ++ * dwContext: Specifies a pointer to a string containing the registry ++ * path to the active key for the stream interface driver. ++ * HKEY_LOCAL_MACHINE\Drivers\Active ++ * Returns: ++ * Returns a handle to the device context created. This is the our actual ++ * Device Object representing the DSP Device instance. ++ * Requires: ++ * Ensures: ++ * Succeeded: device context > 0 ++ * Failed: device Context = 0 ++ */ ++extern u32 DSP_Init(OUT u32 *initStatus); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dynamic_loader.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dynamic_loader.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/dynamic_loader.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/dynamic_loader.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,505 @@ ++/* ++ * dynamic_loader.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++#ifndef _DYNAMIC_LOADER_H_ ++#define _DYNAMIC_LOADER_H_ ++#include ++#include ++ ++/* ++ * Dynamic Loader ++ * ++ * The function of the dynamic loader is to load a "module" containing ++ * instructions for a "target" processor into that processor. In the process ++ * it assigns memory for the module, resolves symbol references made by the ++ * module, and remembers symbols defined by the module. ++ * ++ * The dynamic loader is parameterized for a particular system by 4 classes ++ * that supply the module and system specific functions it requires ++ */ ++ /* The read functions for the module image to be loaded */ ++ struct Dynamic_Loader_Stream; ++ ++ /* This class defines "host" symbol and support functions */ ++ struct Dynamic_Loader_Sym; ++ ++ /* This class defines the allocator for "target" memory */ ++ struct Dynamic_Loader_Allocate; ++ ++ /* This class defines the copy-into-target-memory functions */ ++ struct Dynamic_Loader_Initialize; ++ ++/* ++ * Option flags to modify the behavior of module loading ++ */ ++#define DLOAD_INITBSS 0x1 /* initialize BSS sections to zero */ ++#define DLOAD_BIGEND 0x2 /* require big-endian load module */ ++#define DLOAD_LITTLE 0x4 /* require little-endian load module */ ++ ++ typedef void *DLOAD_mhandle; /* module handle for loaded modules */ ++ ++/***************************************************************************** ++ * Procedure Dynamic_Load_Module ++ * ++ * Parameters: ++ * module The input stream that supplies the module image ++ * syms Host-side symbol table and malloc/free functions ++ * alloc Target-side memory allocation ++ * init Target-side memory initialization, or NULL for symbol read only ++ * options Option flags DLOAD_* ++ * mhandle A module handle for use with Dynamic_Unload ++ * ++ * Effect: ++ * The module image is read using *module. Target storage for the new image is ++ * obtained from *alloc. Symbols defined and referenced by the module are ++ * managed using *syms. The image is then relocated and references resolved ++ * as necessary, and the resulting executable bits are placed into target memory ++ * using *init. ++ * ++ * Returns: ++ * On a successful load, a module handle is placed in *mhandle, and zero is ++ * returned. On error, the number of errors detected is returned. Individual ++ * errors are reported during the load process using syms->Error_Report(). ++ *****************************************************************************/ ++ extern int Dynamic_Load_Module( ++ /* the source for the module image*/ ++ struct Dynamic_Loader_Stream *module, ++ /* host support for symbols and storage*/ ++ struct Dynamic_Loader_Sym *syms, ++ /* the target memory allocator*/ ++ struct Dynamic_Loader_Allocate *alloc, ++ /* the target memory initializer*/ ++ struct Dynamic_Loader_Initialize *init, ++ unsigned options, /* option flags*/ ++ /* the returned module handle*/ ++ DLOAD_mhandle *mhandle ++ ); ++ ++/***************************************************************************** ++ * Procedure Dynamic_Open_Module ++ * ++ * Parameters: ++ * module The input stream that supplies the module image ++ * syms Host-side symbol table and malloc/free functions ++ * alloc Target-side memory allocation ++ * init Target-side memory initialization, or NULL for symbol read only ++ * options Option flags DLOAD_* ++ * mhandle A module handle for use with Dynamic_Unload ++ * ++ * Effect: ++ * The module image is read using *module. Target storage for the new image is ++ * obtained from *alloc. Symbols defined and referenced by the module are ++ * managed using *syms. The image is then relocated and references resolved ++ * as necessary, and the resulting executable bits are placed into target memory ++ * using *init. ++ * ++ * Returns: ++ * On a successful load, a module handle is placed in *mhandle, and zero is ++ * returned. On error, the number of errors detected is returned. Individual ++ * errors are reported during the load process using syms->Error_Report(). ++ *****************************************************************************/ ++ extern int Dynamic_Open_Module( ++ /* the source for the module image */ ++ struct Dynamic_Loader_Stream *module, ++ /* host support for symbols and storage */ ++ struct Dynamic_Loader_Sym *syms, ++ /* the target memory allocator */ ++ struct Dynamic_Loader_Allocate *alloc, ++ /* the target memory initializer */ ++ struct Dynamic_Loader_Initialize *init, ++ unsigned options, /* option flags */ ++ /* the returned module handle */ ++ DLOAD_mhandle *mhandle ++ ); ++ ++/***************************************************************************** ++ * Procedure Dynamic_Unload_Module ++ * ++ * Parameters: ++ * mhandle A module handle from Dynamic_Load_Module ++ * syms Host-side symbol table and malloc/free functions ++ * alloc Target-side memory allocation ++ * ++ * Effect: ++ * The module specified by mhandle is unloaded. Unloading causes all ++ * target memory to be deallocated, all symbols defined by the module to ++ * be purged, and any host-side storage used by the dynamic loader for ++ * this module to be released. ++ * ++ * Returns: ++ * Zero for success. On error, the number of errors detected is returned. ++ * Individual errors are reported using syms->Error_Report(). ++ *****************************************************************************/ ++ extern int Dynamic_Unload_Module(DLOAD_mhandle mhandle, /* the module ++ * handle*/ ++ /* host support for symbols and ++ * storage */ ++ struct Dynamic_Loader_Sym *syms, ++ /* the target memory allocator*/ ++ struct Dynamic_Loader_Allocate *alloc, ++ /* the target memory initializer*/ ++ struct Dynamic_Loader_Initialize *init ++ ); ++ ++/***************************************************************************** ++ ***************************************************************************** ++ * A class used by the dynamic loader for input of the module image ++ ***************************************************************************** ++ *****************************************************************************/ ++ struct Dynamic_Loader_Stream { ++/* public: */ ++ /************************************************************************* ++ * read_buffer ++ * ++ * PARAMETERS : ++ * buffer Pointer to the buffer to fill ++ * bufsiz Amount of data desired in sizeof() units ++ * ++ * EFFECT : ++ * Reads the specified amount of data from the module input stream ++ * into the specified buffer. Returns the amount of data read in sizeof() ++ * units (which if less than the specification, represents an error). ++ * ++ * NOTES: ++ * In release 1 increments the file position by the number of bytes read ++ * ++ *************************************************************************/ ++ int (*read_buffer) (struct Dynamic_Loader_Stream *thisptr, ++ void *buffer, unsigned bufsiz); ++ ++ /************************************************************************* ++ * set_file_posn (release 1 only) ++ * ++ * PARAMETERS : ++ * posn Desired file position relative to start of file in sizeof() units. ++ * ++ * EFFECT : ++ * Adjusts the internal state of the stream object so that the next ++ * read_buffer call will begin to read at the specified offset from ++ * the beginning of the input module. Returns 0 for success, non-zero ++ * for failure. ++ * ++ *************************************************************************/ ++ int (*set_file_posn) (struct Dynamic_Loader_Stream *thisptr, ++ /* to be eliminated in release 2*/ ++ unsigned int posn); ++ ++ }; ++ ++/***************************************************************************** ++ ***************************************************************************** ++ * A class used by the dynamic loader for symbol table support and ++ * miscellaneous host-side functions ++ ***************************************************************************** ++ *****************************************************************************/ ++ ++ typedef u32 LDR_ADDR; ++ ++/* ++ * the structure of a symbol known to the dynamic loader ++ */ ++ struct dynload_symbol { ++ LDR_ADDR value; ++ } ; ++ ++ struct Dynamic_Loader_Sym { ++/* public: */ ++ /************************************************************************* ++ * Find_Matching_Symbol ++ * ++ * PARAMETERS : ++ * name The name of the desired symbol ++ * ++ * EFFECT : ++ * Locates a symbol matching the name specified. A pointer to the ++ * symbol is returned if it exists; 0 is returned if no such symbol is ++ * found. ++ * ++ *************************************************************************/ ++ struct dynload_symbol *(*Find_Matching_Symbol) ++ (struct Dynamic_Loader_Sym * ++ thisptr, ++ const char *name); ++ ++ /************************************************************************* ++ * Add_To_Symbol_Table ++ * ++ * PARAMETERS : ++ * nname Pointer to the name of the new symbol ++ * moduleid An opaque module id assigned by the dynamic loader ++ * ++ * EFFECT : ++ * The new symbol is added to the table. A pointer to the symbol is ++ * returned, or NULL is returned for failure. ++ * ++ * NOTES: ++ * It is permissible for this function to return NULL; the effect is that ++ * the named symbol will not be available to resolve references in ++ * subsequent loads. Returning NULL will not cause the current load ++ * to fail. ++ *************************************************************************/ ++ struct dynload_symbol *(*Add_To_Symbol_Table) ++ (struct Dynamic_Loader_Sym * ++ thisptr, ++ const char *nname, ++ unsigned moduleid); ++ ++ /************************************************************************* ++ * Purge_Symbol_Table ++ * ++ * PARAMETERS : ++ * moduleid An opaque module id assigned by the dynamic loader ++ * ++ * EFFECT : ++ * Each symbol in the symbol table whose moduleid matches the argument ++ * is removed from the table. ++ *************************************************************************/ ++ void (*Purge_Symbol_Table) (struct Dynamic_Loader_Sym *thisptr, ++ unsigned moduleid); ++ ++ /************************************************************************* ++ * Allocate ++ * ++ * PARAMETERS : ++ * memsiz size of desired memory in sizeof() units ++ * ++ * EFFECT : ++ * Returns a pointer to some "host" memory for use by the dynamic ++ * loader, or NULL for failure. ++ * This function is serves as a replaceable form of "malloc" to ++ * allow the user to configure the memory usage of the dynamic loader. ++ *************************************************************************/ ++ void *(*Allocate) (struct Dynamic_Loader_Sym *thisptr, ++ unsigned memsiz); ++ ++ /************************************************************************* ++ * Deallocate ++ * ++ * PARAMETERS : ++ * memptr pointer to previously allocated memory ++ * ++ * EFFECT : ++ * Releases the previously allocated "host" memory. ++ *************************************************************************/ ++ void (*Deallocate) (struct Dynamic_Loader_Sym *thisptr, ++ void *memptr); ++ ++ /************************************************************************* ++ * Error_Report ++ * ++ * PARAMETERS : ++ * errstr pointer to an error string ++ * args additional arguments ++ * ++ * EFFECT : ++ * This function provides an error reporting interface for the dynamic ++ * loader. The error string and arguments are designed as for the ++ * library function vprintf. ++ *************************************************************************/ ++ void (*Error_Report) (struct Dynamic_Loader_Sym *thisptr, ++ const char *errstr, va_list args); ++ ++ }; /* class Dynamic_Loader_Sym */ ++ ++/***************************************************************************** ++ ***************************************************************************** ++ * A class used by the dynamic loader to allocate and deallocate target memory. ++ ***************************************************************************** ++ *****************************************************************************/ ++ ++ struct LDR_SECTION_INFO { ++ /* Name of the memory section assigned at build time */ ++ const char *name; ++ LDR_ADDR run_addr; /* execution address of the section */ ++ LDR_ADDR load_addr; /* load address of the section */ ++ LDR_ADDR size; /* size of the section in addressable units */ ++#ifndef _BIG_ENDIAN ++ u16 page; /* memory page or view */ ++ u16 type; /* one of the section types below */ ++#else ++ u16 type; /* one of the section types below */ ++ u16 page; /* memory page or view */ ++#endif ++ /* a context field for use by Dynamic_Loader_Allocate; ++ * ignored but maintained by the dynamic loader */ ++ u32 context; ++ } ; ++ ++/* use this macro to extract type of section from LDR_SECTION_INFO.type field */ ++#define DLOAD_SECTION_TYPE(typeinfo) (typeinfo & 0xF) ++ ++/* type of section to be allocated */ ++#define DLOAD_TEXT 0 ++#define DLOAD_DATA 1 ++#define DLOAD_BSS 2 ++ /* internal use only, run-time cinit will be of type DLOAD_DATA */ ++#define DLOAD_CINIT 3 ++ ++ struct Dynamic_Loader_Allocate { ++/* public: */ ++ ++ /************************************************************************* ++ * Function allocate ++ * ++ * Parameters: ++ * info A pointer to an information block for the section ++ * align The alignment of the storage in target AUs ++ * ++ * Effect: ++ * Allocates target memory for the specified section and fills in the ++ * load_addr and run_addr fields of the section info structure. Returns TRUE ++ * for success, FALSE for failure. ++ * ++ * Notes: ++ * Frequently load_addr and run_addr are the same, but if they are not ++ * load_addr is used with Dynamic_Loader_Initialize, and run_addr is ++ * used for almost all relocations. This function should always initialize ++ * both fields. ++ *************************************************************************/ ++ int (*Allocate) (struct Dynamic_Loader_Allocate *thisptr, ++ struct LDR_SECTION_INFO *info, unsigned align); ++ ++ /************************************************************************* ++ * Function deallocate ++ * ++ * Parameters: ++ * info A pointer to an information block for the section ++ * ++ * Effect: ++ * Releases the target memory previously allocated. ++ * ++ * Notes: ++ * The content of the info->name field is undefined on call to this function. ++ *************************************************************************/ ++ void (*Deallocate) (struct Dynamic_Loader_Allocate *thisptr, ++ struct LDR_SECTION_INFO *info); ++ ++ }; /* class Dynamic_Loader_Allocate */ ++ ++/***************************************************************************** ++ ***************************************************************************** ++ * A class used by the dynamic loader to load data into a target. This class ++ * provides the interface-specific functions needed to load data. ++ ***************************************************************************** ++ *****************************************************************************/ ++ ++ struct Dynamic_Loader_Initialize { ++/* public: */ ++ /************************************************************************* ++ * Function connect ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Connect to the initialization interface. Returns TRUE for success, ++ * FALSE for failure. ++ * ++ * Notes: ++ * This function is called prior to use of any other functions in ++ * this interface. ++ *************************************************************************/ ++ int (*connect) (struct Dynamic_Loader_Initialize *thisptr); ++ ++ /************************************************************************* ++ * Function readmem ++ * ++ * Parameters: ++ * bufr Pointer to a word-aligned buffer for the result ++ * locn Target address of first data element ++ * info Section info for the section in which the address resides ++ * bytsiz Size of the data to be read in sizeof() units ++ * ++ * Effect: ++ * Fills the specified buffer with data from the target. Returns TRUE for ++ * success, FALSE for failure. ++ *************************************************************************/ ++ int (*readmem) (struct Dynamic_Loader_Initialize *thisptr, ++ void *bufr, ++ LDR_ADDR locn, ++ struct LDR_SECTION_INFO *info, ++ unsigned bytsiz); ++ ++ /************************************************************************* ++ * Function writemem ++ * ++ * Parameters: ++ * bufr Pointer to a word-aligned buffer of data ++ * locn Target address of first data element to be written ++ * info Section info for the section in which the address resides ++ * bytsiz Size of the data to be written in sizeof() units ++ * ++ * Effect: ++ * Writes the specified buffer to the target. Returns TRUE for success, ++ * FALSE for failure. ++ *************************************************************************/ ++ int (*writemem) (struct Dynamic_Loader_Initialize *thisptr, ++ void *bufr, ++ LDR_ADDR locn, ++ struct LDR_SECTION_INFO *info, ++ unsigned bytsiz); ++ ++ /************************************************************************* ++ * Function fillmem ++ * ++ * Parameters: ++ * locn Target address of first data element to be written ++ * info Section info for the section in which the address resides ++ * bytsiz Size of the data to be written in sizeof() units ++ * val Value to be written in each byte ++ * Effect: ++ * Fills the specified area of target memory. Returns TRUE for success, ++ * FALSE for failure. ++ *************************************************************************/ ++ int (*fillmem) (struct Dynamic_Loader_Initialize *thisptr, ++ LDR_ADDR locn, struct LDR_SECTION_INFO *info, ++ unsigned bytsiz, unsigned val); ++ ++ /************************************************************************* ++ * Function execute ++ * ++ * Parameters: ++ * start Starting address ++ * ++ * Effect: ++ * The target code at the specified starting address is executed. ++ * ++ * Notes: ++ * This function is called at the end of the dynamic load process ++ * if the input module has specified a starting address. ++ *************************************************************************/ ++ int (*execute) (struct Dynamic_Loader_Initialize *thisptr, ++ LDR_ADDR start); ++ ++ /************************************************************************* ++ * Function release ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Releases the connection to the load interface. ++ * ++ * Notes: ++ * This function is called at the end of the dynamic load process. ++ *************************************************************************/ ++ void (*release) (struct Dynamic_Loader_Initialize *thisptr); ++ ++ }; /* class Dynamic_Loader_Initialize */ ++ ++#endif /* _DYNAMIC_LOADER_H_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/errbase.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/errbase.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/errbase.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/errbase.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,509 @@ ++/* ++ * errbase.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== errbase.h ======== ++ * Description: ++ * Central repository for DSP/BIOS Bridge error and status code. ++ * ++ * Error codes are of the form: ++ * []_E ++ * ++ * Success codes are of the form: ++ * []_S ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Jan-2003 map Added DSP_SALREADYLOADED for persistent library checking ++ *! 23-Nov-2002 gp: Minor comment cleanup. ++ *! 13-May-2002 sg Added DSP_SALREADYASLEEP and DSP_SALREADYWAKE. ++ *! 18-Feb-2002 mk: Added DSP_EOVERLAYMEMORY, EFWRITE, ENOSECT. ++ *! 31-Jan-2002 mk: Added definitions of DSP_STRUE and DSP_SFALSE. ++ *! 29-Jan-2002 mk: Added definition of CFG_E_INSUFFICIENTBUFSIZE. ++ *! 24-Oct-2001 sp: Consolidated all the error codes into this file. ++ *! 24-Jul-2001 mk: Type-casted all definitions of WSX_STATUS types for ++ *! removal of compile warnings. ++ *! 22-Nov-1999 kc: Changes from code review. ++ *! 18-Aug-1999 rr: Ported From WSX. ++ *! 29-May-1996 gp: Removed WCD_ and WMD_ error ranges. Redefined format of ++ *! error codes. ++ *! 10-May-1996 gp: Created. ++ */ ++ ++#ifndef ERRBASE_ ++#define ERRBASE_ ++ ++/* Base of generic errors and component errors */ ++#define DSP_SBASE (DSP_STATUS)0x00008000 ++#define DSP_EBASE (DSP_STATUS)0x80008000 ++ ++#define DSP_COMP_EBASE (DSP_STATUS)0x80040200 ++#define DSP_COMP_ELAST (DSP_STATUS)0x80047fff ++ ++/* SUCCESS Codes */ ++ ++/* Generic success code */ ++#define DSP_SOK (DSP_SBASE + 0) ++ ++/* GPP is already attached to this DSP processor */ ++#define DSP_SALREADYATTACHED (DSP_SBASE + 1) ++ ++/* This is the last object available for enumeration. */ ++#define DSP_SENUMCOMPLETE (DSP_SBASE + 2) ++ ++/* The DSP is already asleep. */ ++#define DSP_SALREADYASLEEP (DSP_SBASE + 3) ++ ++/* The DSP is already awake. */ ++#define DSP_SALREADYAWAKE (DSP_SBASE + 4) ++ ++/* TRUE */ ++#define DSP_STRUE (DSP_SBASE + 5) ++ ++/* FALSE */ ++#define DSP_SFALSE (DSP_SBASE + 6) ++ ++/* A library contains no dependent library references */ ++#define DSP_SNODEPENDENTLIBS (DSP_SBASE + 7) ++ ++/* A persistent library is already loaded by the dynamic loader */ ++#define DSP_SALREADYLOADED (DSP_SBASE + 8) ++ ++/* Some error occured, but it is OK to continue */ ++#define DSP_OKTO_CONTINUE (DSP_SBASE + 9) ++ ++/* FAILURE Codes */ ++ ++/* The caller does not have access privileges to call this function */ ++#define DSP_EACCESSDENIED (DSP_EBASE + 0) ++ ++/* The Specified Connection already exists */ ++#define DSP_EALREADYCONNECTED (DSP_EBASE + 1) ++ ++/* The GPP must be detached from the DSP before this function is called */ ++#define DSP_EATTACHED (DSP_EBASE + 2) ++ ++/* During enumeration a change in the number or properties of the objects ++ * has occurred. */ ++#define DSP_ECHANGEDURINGENUM (DSP_EBASE + 3) ++ ++/* An error occurred while parsing the DSP executable file */ ++#define DSP_ECORRUPTFILE (DSP_EBASE + 4) ++ ++/* A failure occurred during a delete operation */ ++#define DSP_EDELETE (DSP_EBASE + 5) ++ ++/* The specified direction is invalid */ ++#define DSP_EDIRECTION (DSP_EBASE + 6) ++ ++/* A stream has been issued the maximum number of buffers allowed in the ++ * stream at once ; buffers must be reclaimed from the stream before any ++ * more can be issued. */ ++#define DSP_ESTREAMFULL (DSP_EBASE + 7) ++ ++/* A general failure occurred */ ++#define DSP_EFAIL (DSP_EBASE + 8) ++ ++/* The specified executable file could not be found. */ ++#define DSP_EFILE (DSP_EBASE + 9) ++ ++/* The specified handle is invalid. */ ++#define DSP_EHANDLE (DSP_EBASE + 0xa) ++ ++/* An invalid argument was specified. */ ++#define DSP_EINVALIDARG (DSP_EBASE + 0xb) ++ ++/* A memory allocation failure occurred. */ ++#define DSP_EMEMORY (DSP_EBASE + 0xc) ++ ++/* The requested operation is invalid for this node type. */ ++#define DSP_ENODETYPE (DSP_EBASE + 0xd) ++ ++/* No error text was found for the specified error code. */ ++#define DSP_ENOERRTEXT (DSP_EBASE + 0xe) ++ ++/* No more connections can be made for this node. */ ++#define DSP_ENOMORECONNECTIONS (DSP_EBASE + 0xf) ++ ++/* The indicated operation is not supported. */ ++#define DSP_ENOTIMPL (DSP_EBASE + 0x10) ++ ++/* I/O is currently pending. */ ++#define DSP_EPENDING (DSP_EBASE + 0x11) ++ ++/* An invalid pointer was specified. */ ++#define DSP_EPOINTER (DSP_EBASE + 0x12) ++ ++/* A parameter is specified outside its valid range. */ ++#define DSP_ERANGE (DSP_EBASE + 0x13) ++ ++/* An invalid size parameter was specified. */ ++#define DSP_ESIZE (DSP_EBASE + 0x14) ++ ++/* A stream creation failure occurred on the DSP. */ ++#define DSP_ESTREAM (DSP_EBASE + 0x15) ++ ++/* A task creation failure occurred on the DSP. */ ++#define DSP_ETASK (DSP_EBASE + 0x16) ++ ++/* A timeout occurred before the requested operation could complete. */ ++ ++#define DSP_ETIMEOUT (DSP_EBASE + 0x17) ++ ++/* A data truncation occurred, e.g., when requesting a descriptive error ++ * string, not enough space was allocated for the complete error message. */ ++ ++#define DSP_ETRUNCATED (DSP_EBASE + 0x18) ++ ++/* A parameter is invalid. */ ++#define DSP_EVALUE (DSP_EBASE + 0x1a) ++ ++/* The state of the specified object is incorrect for the requested ++ * operation. */ ++#define DSP_EWRONGSTATE (DSP_EBASE + 0x1b) ++ ++/* Symbol not found in the COFF file. DSPNode_Create will return this if ++ * the iAlg function table for an xDAIS socket is not found in the COFF file. ++ * In this case, force the symbol to be linked into the COFF file. ++ * DSPNode_Create, DSPNode_Execute, and DSPNode_Delete will return this if ++ * the create, execute, or delete phase function, respectively, could not be ++ * found in the COFF file. */ ++#define DSP_ESYMBOL (DSP_EBASE + 0x1c) ++ ++/* UUID not found in registry. */ ++#define DSP_EUUID (DSP_EBASE + 0x1d) ++ ++/* Unable to read content of DCD data section ; this is typically caused by ++ * improperly configured nodes. */ ++#define DSP_EDCDREADSECT (DSP_EBASE + 0x1e) ++ ++/* Unable to decode DCD data section content ; this is typically caused by ++ * changes to DSP/BIOS Bridge data structures. */ ++#define DSP_EDCDPARSESECT (DSP_EBASE + 0x1f) ++ ++/* Unable to get pointer to DCD data section ; this is typically caused by ++ * improperly configured UUIDs. */ ++#define DSP_EDCDGETSECT (DSP_EBASE + 0x20) ++ ++/* Unable to load file containing DCD data section ; this is typically ++ * caused by a missing COFF file. */ ++#define DSP_EDCDLOADBASE (DSP_EBASE + 0x21) ++ ++/* The specified COFF file does not contain a valid node registration ++ * section. */ ++#define DSP_EDCDNOAUTOREGISTER (DSP_EBASE + 0x22) ++ ++/* A requested resource is not available. */ ++#define DSP_ERESOURCE (DSP_EBASE + 0x28) ++ ++/* A critical error has occurred, and the DSP is being re-started. */ ++#define DSP_ERESTART (DSP_EBASE + 0x29) ++ ++/* A DSP memory free operation failed. */ ++#define DSP_EFREE (DSP_EBASE + 0x2a) ++ ++/* A DSP I/O free operation failed. */ ++#define DSP_EIOFREE (DSP_EBASE + 0x2b) ++ ++/* Multiple instances are not allowed. */ ++#define DSP_EMULINST (DSP_EBASE + 0x2c) ++ ++/* A specified entity was not found. */ ++#define DSP_ENOTFOUND (DSP_EBASE + 0x2d) ++ ++/* A DSP I/O resource is not available. */ ++#define DSP_EOUTOFIO (DSP_EBASE + 0x2e) ++ ++/* A shared memory buffer contained in a message or stream could not be ++ * mapped to the GPP client process's virtual space. */ ++#define DSP_ETRANSLATE (DSP_EBASE + 0x2f) ++ ++/* File or section load write function failed to write to DSP */ ++#define DSP_EFWRITE (DSP_EBASE + 0x31) ++ ++/* Unable to find a named section in DSP executable */ ++#define DSP_ENOSECT (DSP_EBASE + 0x32) ++ ++/* Unable to open file */ ++#define DSP_EFOPEN (DSP_EBASE + 0x33) ++ ++/* Unable to read file */ ++#define DSP_EFREAD (DSP_EBASE + 0x34) ++ ++/* A non-existent memory segment identifier was specified */ ++#define DSP_EOVERLAYMEMORY (DSP_EBASE + 0x37) ++ ++/* Invalid segment ID */ ++#define DSP_EBADSEGID (DSP_EBASE + 0x38) ++ ++/* Invalid alignment */ ++#define DSP_EALIGNMENT (DSP_EBASE + 0x39) ++ ++/* Invalid stream mode */ ++#define DSP_ESTRMMODE (DSP_EBASE + 0x3a) ++ ++/* Nodes not connected */ ++#define DSP_ENOTCONNECTED (DSP_EBASE + 0x3b) ++ ++/* Not shared memory */ ++#define DSP_ENOTSHAREDMEM (DSP_EBASE + 0x3c) ++ ++/* Error occurred in a dynamic loader library function */ ++#define DSP_EDYNLOAD (DSP_EBASE + 0x3d) ++ ++/* Device in 'sleep/suspend' mode due to DPM */ ++#define DSP_EDPMSUSPEND (DSP_EBASE + 0x3e) ++ ++/* A node-specific error has occurred. */ ++#define DSP_EUSER1 (DSP_EBASE + 0x40) ++#define DSP_EUSER2 (DSP_EBASE + 0x41) ++#define DSP_EUSER3 (DSP_EBASE + 0x42) ++#define DSP_EUSER4 (DSP_EBASE + 0x43) ++#define DSP_EUSER5 (DSP_EBASE + 0x44) ++#define DSP_EUSER6 (DSP_EBASE + 0x45) ++#define DSP_EUSER7 (DSP_EBASE + 0x46) ++#define DSP_EUSER8 (DSP_EBASE + 0x47) ++#define DSP_EUSER9 (DSP_EBASE + 0x48) ++#define DSP_EUSER10 (DSP_EBASE + 0x49) ++#define DSP_EUSER11 (DSP_EBASE + 0x4a) ++#define DSP_EUSER12 (DSP_EBASE + 0x4b) ++#define DSP_EUSER13 (DSP_EBASE + 0x4c) ++#define DSP_EUSER14 (DSP_EBASE + 0x4d) ++#define DSP_EUSER15 (DSP_EBASE + 0x4e) ++#define DSP_EUSER16 (DSP_EBASE + 0x4f) ++ ++/* FAILURE Codes : DEV */ ++#define DEV_EBASE (DSP_COMP_EBASE + 0x000) ++ ++/* The mini-driver expected a newer version of the class driver. */ ++#define DEV_E_NEWWMD (DEV_EBASE + 0x00) ++ ++/* WMD_DRV_Entry function returned a NULL function interface table. */ ++#define DEV_E_NULLWMDINTF (DEV_EBASE + 0x01) ++ ++/* FAILURE Codes : LDR */ ++#define LDR_EBASE (DSP_COMP_EBASE + 0x100) ++ ++/* Insufficient memory to export class driver services. */ ++#define LDR_E_NOMEMORY (LDR_EBASE + 0x00) ++ ++/* Unable to find WMD file in system directory. */ ++#define LDR_E_FILEUNABLETOOPEN (LDR_EBASE + 0x01) ++ ++/* FAILURE Codes : CFG */ ++#define CFG_EBASE (DSP_COMP_EBASE + 0x200) ++ ++/* Invalid pointer passed into a configuration module function */ ++#define CFG_E_INVALIDPOINTER (CFG_EBASE + 0x00) ++ ++/* Invalid device node handle passed into a configuration module function. */ ++#define CFG_E_INVALIDHDEVNODE (CFG_EBASE + 0x01) ++ ++/* Unable to retrieve resource information from the registry. */ ++#define CFG_E_RESOURCENOTAVAIL (CFG_EBASE + 0x02) ++ ++/* Unable to find board name key in registry. */ ++#define CFG_E_INVALIDBOARDNAME (CFG_EBASE + 0x03) ++ ++/* Unable to find a device node in registry with given unit number. */ ++#define CFG_E_INVALIDUNITNUM (CFG_EBASE + 0x04) ++ ++/* Insufficient buffer size */ ++#define CFG_E_INSUFFICIENTBUFSIZE (CFG_EBASE + 0x05) ++ ++/* FAILURE Codes : BRD */ ++#define BRD_EBASE (DSP_COMP_EBASE + 0x300) ++ ++/* Board client does not have sufficient access rights for this operation. */ ++#define BRD_E_ACCESSDENIED (BRD_EBASE + 0x00) ++ ++/* Unable to find trace buffer symbols in the DSP executable COFF file. */ ++#define BRD_E_NOTRACEBUFFER (BRD_EBASE + 0x01) ++ ++/* Attempted to auto-start board, but no default DSP executable configured. */ ++#define BRD_E_NOEXEC (BRD_EBASE + 0x02) ++ ++/* The operation failed because it was started from a wrong state */ ++#define BRD_E_WRONGSTATE (BRD_EBASE + 0x03) ++ ++/* FAILURE Codes : COD */ ++#define COD_EBASE (DSP_COMP_EBASE + 0x400) ++ ++/* No symbol table is loaded for this board. */ ++#define COD_E_NOSYMBOLSLOADED (COD_EBASE + 0x00) ++ ++/* Symbol not found in for this board. */ ++#define COD_E_SYMBOLNOTFOUND (COD_EBASE + 0x01) ++ ++/* ZL DLL module is not exporting the correct function interface. */ ++#define COD_E_NOZLFUNCTIONS (COD_EBASE + 0x02) ++ ++/* Unable to initialize the ZL COFF parsing module. */ ++#define COD_E_ZLCREATEFAILED (COD_EBASE + 0x03) ++ ++/* Unable to open DSP executable COFF file. */ ++#define COD_E_OPENFAILED (COD_EBASE + 0x04) ++ ++/* Unable to parse DSP executable COFF file. */ ++#define COD_E_LOADFAILED (COD_EBASE + 0x05) ++ ++/* Unable to read DSP executable COFF file. */ ++#define COD_E_READFAILED (COD_EBASE + 0x06) ++ ++/* FAILURE Codes : CHNL */ ++#define CHNL_EBASE (DSP_COMP_EBASE + 0x500) ++ ++/* Attempt to created channel manager with too many channels. */ ++#define CHNL_E_MAXCHANNELS (CHNL_EBASE + 0x00) ++ ++/* No channel manager exists for this mini-driver. */ ++#define CHNL_E_NOMGR (CHNL_EBASE + 0x01) ++ ++/* No free channels are available. */ ++#define CHNL_E_OUTOFSTREAMS (CHNL_EBASE + 0x02) ++ ++/* Channel ID is out of range. */ ++#define CHNL_E_BADCHANID (CHNL_EBASE + 0x03) ++ ++/* Channel is already in use. */ ++#define CHNL_E_CHANBUSY (CHNL_EBASE + 0x04) ++ ++/* Invalid channel mode argument. */ ++#define CHNL_E_BADMODE (CHNL_EBASE + 0x05) ++ ++/* dwTimeOut parameter was CHNL_IOCNOWAIT, yet no I/O completions were ++ * queued. */ ++#define CHNL_E_NOIOC (CHNL_EBASE + 0x06) ++ ++/* I/O has been cancelled on this channel. */ ++#define CHNL_E_CANCELLED (CHNL_EBASE + 0x07) ++ ++/* End of stream was already requested on this output channel. */ ++#define CHNL_E_EOS (CHNL_EBASE + 0x09) ++ ++/* Unable to create the channel event object. */ ++#define CHNL_E_CREATEEVENT (CHNL_EBASE + 0x0A) ++ ++/* Board name and unit number do not identify a valid board name. */ ++#define CHNL_E_BRDID (CHNL_EBASE + 0x0B) ++ ++/* Invalid IRQ configured for this WMD for this system. */ ++#define CHNL_E_INVALIDIRQ (CHNL_EBASE + 0x0C) ++ ++/* DSP word size of zero configured for this device. */ ++#define CHNL_E_INVALIDWORDSIZE (CHNL_EBASE + 0x0D) ++ ++/* A zero length memory base was specified for a shared memory class driver. */ ++#define CHNL_E_INVALIDMEMBASE (CHNL_EBASE + 0x0E) ++ ++/* Memory map is not configured, or unable to map physical to linear ++ * address. */ ++#define CHNL_E_NOMEMMAP (CHNL_EBASE + 0x0F) ++ ++/* Attempted to create a channel manager when one already exists. */ ++#define CHNL_E_MGREXISTS (CHNL_EBASE + 0x10) ++ ++/* Unable to plug channel ISR for configured IRQ. */ ++#define CHNL_E_ISR (CHNL_EBASE + 0x11) ++ ++/* No free I/O request packets are available. */ ++#define CHNL_E_NOIORPS (CHNL_EBASE + 0x12) ++ ++/* Buffer size is larger than the size of physical channel. */ ++#define CHNL_E_BUFSIZE (CHNL_EBASE + 0x13) ++ ++/* User cannot mark end of stream on an input channel. */ ++#define CHNL_E_NOEOS (CHNL_EBASE + 0x14) ++ ++/* Wait for flush operation on an output channel timed out. */ ++#define CHNL_E_WAITTIMEOUT (CHNL_EBASE + 0x15) ++ ++/* User supplied hEvent must be specified with pstrEventName attribute */ ++#define CHNL_E_BADUSEREVENT (CHNL_EBASE + 0x16) ++ ++/* Illegal user event name specified */ ++#define CHNL_E_USEREVENTNAME (CHNL_EBASE + 0x17) ++ ++/* Unable to prepare buffer specified */ ++#define CHNL_E_PREPFAILED (CHNL_EBASE + 0x18) ++ ++/* Unable to Unprepare buffer specified */ ++#define CHNL_E_UNPREPFAILED (CHNL_EBASE + 0x19) ++ ++/* FAILURE Codes : SYNC */ ++#define SYNC_EBASE (DSP_COMP_EBASE + 0x600) ++ ++/* Wait on a kernel event failed. */ ++#define SYNC_E_FAIL (SYNC_EBASE + 0x00) ++ ++/* Timeout expired while waiting for event to be signalled. */ ++#define SYNC_E_TIMEOUT (SYNC_EBASE + 0x01) ++ ++/* FAILURE Codes : WMD */ ++#define WMD_EBASE (DSP_COMP_EBASE + 0x700) ++ ++/* A test of hardware assumptions or integrity failed. */ ++#define WMD_E_HARDWARE (WMD_EBASE + 0x00) ++ ++/* One or more configuration parameters violated WMD hardware assumptions. */ ++#define WMD_E_BADCONFIG (WMD_EBASE + 0x01) ++ ++/* Timeout occurred waiting for a response from the hardware. */ ++#define WMD_E_TIMEOUT (WMD_EBASE + 0x02) ++ ++/* FAILURE Codes : REG */ ++#define REG_EBASE (DSP_COMP_EBASE + 0x800) ++ ++/* Invalid subkey parameter. */ ++#define REG_E_INVALIDSUBKEY (REG_EBASE + 0x00) ++ ++/* Invalid entry parameter. */ ++#define REG_E_INVALIDENTRY (REG_EBASE + 0x01) ++ ++/* No more registry values. */ ++#define REG_E_NOMOREITEMS (REG_EBASE + 0x02) ++ ++/* Insufficient space to hold data in registry value. */ ++#define REG_E_MOREDATA (REG_EBASE + 0x03) ++ ++/* FAILURE Codes : KFILE */ ++#define KFILE_EBASE (DSP_COMP_EBASE + 0x900) ++ ++/* Invalid file handle. */ ++#define E_KFILE_INVALIDHANDLE (KFILE_EBASE + 0x01) ++ ++/* Bad file name. */ ++#define E_KFILE_BADFILENAME (KFILE_EBASE + 0x02) ++ ++/* Invalid file mode. */ ++#define E_KFILE_INVALIDMODE (KFILE_EBASE + 0x03) ++ ++/* No resources available. */ ++#define E_KFILE_NORESOURCES (KFILE_EBASE + 0x04) ++ ++/* Invalid file buffer . */ ++#define E_KFILE_INVALIDBUFFER (KFILE_EBASE + 0x05) ++ ++/* Bad origin argument. */ ++#define E_KFILE_BADORIGINFLAG (KFILE_EBASE + 0x06) ++ ++/* Invalid file offset value. */ ++#define E_KFILE_INVALIDOFFSET (KFILE_EBASE + 0x07) ++ ++/* General KFILE error condition */ ++#define E_KFILE_ERROR (KFILE_EBASE + 0x08) ++ ++#endif /* ERRBASE_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gb.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gb.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gb.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gb.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,85 @@ ++/* ++ * gb.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== gb.h ======== ++ * Generic bitmap manager. ++ * ++ *! Revision History ++ *! ================ ++ */ ++ ++#ifndef GB_ ++#define GB_ ++ ++#define GB_NOBITS (~0) ++#include ++typedef s32 GB_BitNum; ++struct GB_TMap; ++ ++/* ++ * ======== GB_clear ======== ++ * Clear the bit in position bitn in the bitmap map. Bit positions are ++ * zero based. ++ */ ++ ++extern void GB_clear(struct GB_TMap *map, GB_BitNum bitn); ++ ++/* ++ * ======== GB_create ======== ++ * Create a bit map with len bits. Initially all bits are cleared. ++ */ ++ ++extern struct GB_TMap *GB_create(GB_BitNum len); ++ ++/* ++ * ======== GB_delete ======== ++ * Delete previously created bit map ++ */ ++ ++extern void GB_delete(struct GB_TMap *map); ++ ++/* ++ * ======== GB_findandset ======== ++ * Finds a clear bit, sets it, and returns the position ++ */ ++ ++extern GB_BitNum GB_findandset(struct GB_TMap *map); ++ ++/* ++ * ======== GB_minclear ======== ++ * GB_minclear returns the minimum clear bit position. If no bit is ++ * clear, GB_minclear returns -1. ++ */ ++extern GB_BitNum GB_minclear(struct GB_TMap *map); ++ ++/* ++ * ======== GB_set ======== ++ * Set the bit in position bitn in the bitmap map. Bit positions are ++ * zero based. ++ */ ++ ++extern void GB_set(struct GB_TMap *map, GB_BitNum bitn); ++ ++/* ++ * ======== GB_test ======== ++ * Returns TRUE if the bit in position bitn is set in map; otherwise ++ * GB_test returns FALSE. Bit positions are zero based. ++ */ ++ ++extern bool GB_test(struct GB_TMap *map, GB_BitNum bitn); ++ ++#endif /*GB_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/getsection.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/getsection.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/getsection.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/getsection.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,118 @@ ++/* ++ * getsection.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++#ifndef _GETSECTION_H_ ++#define _GETSECTION_H_ ++ ++ ++#include "dynamic_loader.h" ++ ++/* ++ * Get Section Information ++ * ++ * This file provides an API add-on to the dynamic loader that allows the user ++ * to query section information and extract section data from dynamic load ++ * modules. ++ * ++ * NOTE: ++ * Functions in this API assume that the supplied Dynamic_Loader_Stream object ++ * supports the set_file_posn method. ++ */ ++ ++ /* opaque handle for module information */ ++ typedef void *DLOAD_module_info; ++ ++/* ++ * Procedure DLOAD_module_open ++ * ++ * Parameters: ++ * module The input stream that supplies the module image ++ * syms Host-side malloc/free and error reporting functions. ++ * Other methods are unused. ++ * ++ * Effect: ++ * Reads header information from a dynamic loader module using the specified ++ * stream object, and returns a handle for the module information. This ++ * handle may be used in subsequent query calls to obtain information ++ * contained in the module. ++ * ++ * Returns: ++ * NULL if an error is encountered, otherwise a module handle for use ++ * in subsequent operations. ++ */ ++ extern DLOAD_module_info DLOAD_module_open(struct Dynamic_Loader_Stream ++ *module, ++ struct Dynamic_Loader_Sym ++ *syms); ++ ++/* ++ * Procedure DLOAD_GetSectionInfo ++ * ++ * Parameters: ++ * minfo Handle from DLOAD_module_open for this module ++ * sectionName Pointer to the string name of the section desired ++ * sectionInfo Address of a section info structure pointer to be initialized ++ * ++ * Effect: ++ * Finds the specified section in the module information, and fills in ++ * the provided LDR_SECTION_INFO structure. ++ * ++ * Returns: ++ * TRUE for success, FALSE for section not found ++ */ ++ extern int DLOAD_GetSectionInfo(DLOAD_module_info minfo, ++ const char *sectionName, ++ const struct LDR_SECTION_INFO ++ ** const sectionInfo); ++ ++/* ++ * Procedure DLOAD_GetSection ++ * ++ * Parameters: ++ * minfo Handle from DLOAD_module_open for this module ++ * sectionInfo Pointer to a section info structure for the desired section ++ * sectionData Buffer to contain the section initialized data ++ * ++ * Effect: ++ * Copies the initialized data for the specified section into the ++ * supplied buffer. ++ * ++ * Returns: ++ * TRUE for success, FALSE for section not found ++ */ ++ extern int DLOAD_GetSection(DLOAD_module_info minfo, ++ const struct LDR_SECTION_INFO *sectionInfo, ++ void *sectionData); ++ ++/* ++ * Procedure DLOAD_module_close ++ * ++ * Parameters: ++ * minfo Handle from DLOAD_module_open for this module ++ * ++ * Effect: ++ * Releases any storage associated with the module handle. On return, ++ * the module handle is invalid. ++ * ++ * Returns: ++ * Zero for success. On error, the number of errors detected is returned. ++ * Individual errors are reported using syms->Error_Report(), where syms was ++ * an argument to DLOAD_module_open ++ */ ++ extern void DLOAD_module_close(DLOAD_module_info minfo); ++ ++#endif /* _GETSECTION_H_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gh.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gh.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gh.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gh.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,37 @@ ++/* ++ * gh.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== gh.h ======== ++ * ++ *! Revision History ++ *! ================ ++ */ ++ ++#ifndef GH_ ++#define GH_ ++#include ++ ++extern struct GH_THashTab *GH_create(u16 maxBucket, u16 valSize, ++ u16(*hash) (void *, u16), bool(*match) (void *, void *), ++ void(*delete) (void *)); ++extern void GH_delete(struct GH_THashTab *hashTab); ++extern void GH_exit(void); ++extern void *GH_find(struct GH_THashTab *hashTab, void *key); ++extern void GH_init(void); ++extern void *GH_insert(struct GH_THashTab *hashTab, void *key, void *value); ++#endif /* GH_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,64 @@ ++/* ++ * gs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== gs.h ======== ++ * Memory allocation/release wrappers. This module allows clients to ++ * avoid OS spacific issues related to memory allocation. It also provides ++ * simple diagnostic capabilities to assist in the detection of memory ++ * leaks. ++ *! Revision History ++ *! ================ ++ */ ++ ++#ifndef GS_ ++#define GS_ ++ ++/* ++ * ======== GS_alloc ======== ++ * Alloc size bytes of space. Returns pointer to space ++ * allocated, otherwise NULL. ++ */ ++extern void *GS_alloc(u32 size); ++ ++/* ++ * ======== GS_exit ======== ++ * Module exit. Do not change to "#define GS_init()"; in ++ * some environments this operation must actually do some work! ++ */ ++extern void GS_exit(void); ++ ++/* ++ * ======== GS_free ======== ++ * Free space allocated by GS_alloc() or GS_calloc(). ++ */ ++extern void GS_free(void *ptr); ++ ++/* ++ * ======== GS_frees ======== ++ * Free space allocated by GS_alloc() or GS_calloc() and assert that ++ * the size of the allocation is size bytes. ++ */ ++extern void GS_frees(void *ptr, u32 size); ++ ++/* ++ * ======== GS_init ======== ++ * Module initialization. Do not change to "#define GS_init()"; in ++ * some environments this operation must actually do some work! ++ */ ++extern void GS_init(void); ++ ++#endif /*GS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gt.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gt.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/gt.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/gt.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,317 @@ ++/* ++ * gt.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== gt.h ======== ++ * Purpose: ++ * There are two definitions that affect which portions of trace ++ * are acutally compiled into the client: GT_TRACE and GT_ASSERT. If ++ * GT_TRACE is set to 0 then all trace statements (except for assertions) ++ * will be compiled out of the client. If GT_ASSERT is set to 0 then ++ * assertions will be compiled out of the client. GT_ASSERT can not be ++ * set to 0 unless GT_TRACE is also set to 0 (i.e. GT_TRACE == 1 implies ++ * GT_ASSERT == 1). ++ * ++ *! Revision History ++ *! ================ ++ *! 02-Feb-2000 rr: Renamed this file to gtce.h. GT CLASS and trace definitions ++ *! are WinCE Specific. ++ *! 03-Jan-1997 ge Replaced "GT_" prefix to GT_Config structure members ++ *! to eliminate preprocessor confusion with other macros. ++ */ ++#include ++#ifndef GT_ ++#define GT_ ++ ++#ifndef GT_TRACE ++#define GT_TRACE 0 /* 0 = "trace compiled out"; 1 = "trace active" */ ++#endif ++ ++#include ++ ++#if !defined(GT_ASSERT) || GT_TRACE ++#define GT_ASSERT 1 ++#endif ++ ++struct GT_Config { ++ Fxn PRINTFXN; ++ Fxn PIDFXN; ++ Fxn TIDFXN; ++ Fxn ERRORFXN; ++}; ++ ++extern struct GT_Config *GT; ++ ++struct GT_Mask { ++ char *modName; ++ u8 *flags; ++} ; ++ ++/* ++ * New GT Class defenitions. ++ * ++ * The following are the explanations and how it could be used in the code ++ * ++ * - GT_ENTER On Entry to Functions ++ * ++ * - GT_1CLASS Display level of debugging status- Object/Automatic ++ * variables ++ * - GT_2CLASS ---- do ---- ++ * ++ * - GT_3CLASS ---- do ---- + It can be used(recommended) for debug ++ * status in the ISR, IST ++ * - GT_4CLASS ---- do ---- ++ * ++ * - GT_5CLASS Display entry for module init/exit functions ++ * ++ * - GT_6CLASS Warn whenever SERVICES function fails ++ * ++ * - GT_7CLASS Warn failure of Critical failures ++ * ++ */ ++ ++#define GT_ENTER ((u8)0x01) ++#define GT_1CLASS ((u8)0x02) ++#define GT_2CLASS ((u8)0x04) ++#define GT_3CLASS ((u8)0x08) ++#define GT_4CLASS ((u8)0x10) ++#define GT_5CLASS ((u8)0x20) ++#define GT_6CLASS ((u8)0x40) ++#define GT_7CLASS ((u8)0x80) ++ ++#ifdef _LINT_ ++ ++/* LINTLIBRARY */ ++ ++/* ++ * ======== GT_assert ======== ++ */ ++/* ARGSUSED */ ++void GT_assert(struct GT_Mask mask, s32 expr) ++{ ++} ++ ++/* ++ * ======== GT_config ======== ++ */ ++/* ARGSUSED */ ++void GT_config(struct GT_Config config) ++{ ++} ++ ++/* ++ * ======== GT_create ======== ++ */ ++/* ARGSUSED */ ++void GT_create(struct GT_Mask *mask, char *modName) ++{ ++} ++ ++/* ++ * ======== GT_curLine ======== ++ * Purpose: ++ * Returns the current source code line number. Is useful for performing ++ * branch testing using trace. For example, ++ * ++ * GT_1trace(curTrace, GT_1CLASS, ++ * "in module XX_mod, executing line %u\n", GT_curLine()); ++ */ ++/* ARGSUSED */ ++u16 GT_curLine(void) ++{ ++ return (u16)NULL; ++} ++ ++/* ++ * ======== GT_exit ======== ++ */ ++/* ARGSUSED */ ++void GT_exit(void) ++{ ++} ++ ++/* ++ * ======== GT_init ======== ++ */ ++/* ARGSUSED */ ++void GT_init(void) ++{ ++} ++ ++/* ++ * ======== GT_query ======== ++ */ ++/* ARGSUSED */ ++bool GT_query(struct GT_Mask mask, u8 class) ++{ ++ return false; ++} ++ ++/* ++ * ======== GT_set ======== ++ * sets trace mask according to settings ++ */ ++ ++/* ARGSUSED */ ++void GT_set(char *settings) ++{ ++} ++ ++/* ++ * ======== GT_setprintf ======== ++ * sets printf function ++ */ ++ ++/* ARGSUSED */ ++void GT_setprintf(Fxn fxn) ++{ ++} ++ ++/* ARGSUSED */ ++void GT_0trace(struct GT_Mask mask, u8 class, char *format) ++{ ++} ++ ++/* ARGSUSED */ ++void GT_1trace(struct GT_Mask mask, u8 class, char *format, ...) ++{ ++} ++ ++/* ARGSUSED */ ++void GT_2trace(struct GT_Mask mask, u8 class, char *format, ...) ++{ ++} ++ ++/* ARGSUSED */ ++void GT_3trace(struct GT_Mask mask, u8 class, char *format, ...) ++{ ++} ++ ++/* ARGSUSED */ ++void GT_4trace(struct GT_Mask mask, u8 class, char *format, ...) ++{ ++} ++ ++/* ARGSUSED */ ++void GT_5trace(struct GT_Mask mask, u8 class, char *format, ...) ++{ ++} ++ ++/* ARGSUSED */ ++void GT_6trace(struct GT_Mask mask, u8 class, char *format, ...) ++{ ++} ++ ++#else ++ ++#define GT_BOUND 26 /* 26 letters in alphabet */ ++ ++extern void _GT_create(struct GT_Mask *mask, char *modName); ++ ++#define GT_exit() ++ ++extern void GT_init(void); ++extern void _GT_set(char *str); ++extern s32 _GT_trace(struct GT_Mask *mask, char *format, ...); ++ ++#if GT_ASSERT == 0 ++ ++#define GT_assert(mask, expr) ++#define GT_config(config) ++#define GT_configInit(config) ++#define GT_seterror(fxn) ++ ++#else ++ ++extern struct GT_Config _GT_params; ++ ++#define GT_assert(mask, expr) \ ++ (!(expr) ? \ ++ printk("assertion violation: %s, line %d\n", \ ++ __FILE__, __LINE__), NULL : NULL) ++ ++#define GT_config(config) (_GT_params = *(config)) ++#define GT_configInit(config) (*(config) = _GT_params) ++#define GT_seterror(fxn) (_GT_params.ERRORFXN = (Fxn)(fxn)) ++ ++#endif ++ ++#if GT_TRACE == 0 ++ ++#define GT_curLine() ((u16)__LINE__) ++#define GT_create(mask, modName) ++#define GT_exit() ++#define GT_init() ++#define GT_set(settings) ++#define GT_setprintf(fxn) ++ ++#define GT_query(mask, class) false ++ ++#define GT_0trace(mask, class, format) do {} while (0) ++#define GT_1trace(mask, class, format, arg1) do {} while (0) ++#define GT_2trace(mask, class, format, arg1, arg2) do {} while (0) ++#define GT_3trace(mask, class, format, arg1, arg2, arg3) do {} while (0) ++#define GT_4trace(mask, class, format, arg1, arg2, arg3, arg4) do {} while (0) ++#define GT_5trace(mask, class, format, arg1, arg2, arg3, arg4, arg5) \ ++ do {} while (0) ++#define GT_6trace(mask, class, format, arg1, arg2, arg3, arg4, arg5, arg6) \ ++ do {} while (0) ++ ++#else /* GT_TRACE == 1 */ ++ ++ ++#define GT_create(mask, modName) _GT_create((mask), (modName)) ++#define GT_curLine() ((u16)__LINE__) ++#define GT_set(settings) _GT_set(settings) ++#define GT_setprintf(fxn) (_GT_params.PRINTFXN = (Fxn)(fxn)) ++ ++#define GT_query(mask, class) ((*(mask).flags & (class))) ++ ++#define GT_0trace(mask, class, format) \ ++ ((*(mask).flags & (class)) ? \ ++ _GT_trace(&(mask), (format)) : 0) ++ ++#define GT_1trace(mask, class, format, arg1) \ ++ ((*(mask).flags & (class)) ? \ ++ _GT_trace(&(mask), (format), (arg1)) : 0) ++ ++#define GT_2trace(mask, class, format, arg1, arg2) \ ++ ((*(mask).flags & (class)) ? \ ++ _GT_trace(&(mask), (format), (arg1), (arg2)) : 0) ++ ++#define GT_3trace(mask, class, format, arg1, arg2, arg3) \ ++ ((*(mask).flags & (class)) ? \ ++ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3)) : 0) ++ ++#define GT_4trace(mask, class, format, arg1, arg2, arg3, arg4) \ ++ ((*(mask).flags & (class)) ? \ ++ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3), (arg4)) : 0) ++ ++#define GT_5trace(mask, class, format, arg1, arg2, arg3, arg4, arg5) \ ++ ((*(mask).flags & (class)) ? \ ++ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3), (arg4), (arg5)) : 0) ++ ++#define GT_6trace(mask, class, format, arg1, arg2, arg3, arg4, arg5, arg6) \ ++ ((*(mask).flags & (class)) ? \ ++ _GT_trace(&(mask), (format), (arg1), (arg2), (arg3), (arg4), (arg5), \ ++ (arg6)) : 0) ++ ++#endif /* GT_TRACE */ ++ ++#endif /* _LINT_ */ ++ ++#endif /* GTCE_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/host_os.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/host_os.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/host_os.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/host_os.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,90 @@ ++/* ++ * host_os.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== windows.h ======== ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Mar-2004 sb Added cacheflush.h to support Dynamic Memory Mapping feature ++ *! 16-Feb-2004 sb Added headers required for consistent_alloc ++ */ ++ ++#ifndef _HOST_OS_H_ ++#define _HOST_OS_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* TODO -- Remove, once BP defines them */ ++#define INT_MAIL_MPU_IRQ 26 ++#define INT_DSP_MMU_IRQ 28 ++ ++struct dspbridge_platform_data { ++ void (*dsp_set_min_opp)(u8 opp_id); ++ u8 (*dsp_get_opp)(void); ++ void (*cpu_set_freq)(unsigned long f); ++ unsigned long (*cpu_get_freq)(void); ++ unsigned long mpu_speed[6]; ++ ++ u32 phys_mempool_base; ++ u32 phys_mempool_size; ++}; ++ ++#define PRCM_VDD1 1 ++ ++extern struct platform_device *omap_dspbridge_dev; ++ ++#if defined(CONFIG_MPU_BRIDGE) || defined(CONFIG_MPU_BRIDGE_MODULE) ++extern void dspbridge_reserve_sdram(void); ++#else ++static inline void dspbridge_reserve_sdram(void) {} ++#endif ++ ++extern unsigned long dspbridge_get_mempool_base(void); ++#endif ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnldefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnldefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnldefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnldefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,92 @@ ++/* ++ * chnldefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== chnldefs.h ======== ++ * Purpose: ++ * System-wide channel objects and constants. ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Jan-2002 ag Added cBufSize to IOC. ++ *! 05-Jan-2000 ag: Text format cleanup. ++ *! 02-Dec-1999 ag: Added new chnl attribute pstrEventName. ++ *! 12-Nov-1999 kc: Enabled hEvent attribute for tests. ++ *! 01-Nov-1999 ag: hEvent attribute not supported(yet). ++ *! 16-Jan-1997 gp: Moved private stuff into chnlpriv.h ++ *! 14-Jan-1997 gp: Updated based on code review feedback: ++ *! Removed CHNL_MODENOWAIT, CHNL_MODEDIRECT, ++ *! 03-Jan-1997 gp: Added channel class library types. ++ *! 14-Dec-1996 gp: Moved uChnlId field from CHNL_ATTRS to CHNL_Open(). ++ *! 10-Dec-1996 gp: Added CHNL_IsTimedOut() macro. ++ *! 14-Nov-1996 gp: Renamed from wsxchnl.h. ++ *! 09-Sep-1996 gp: Added hReserved2 field to CHNL_ATTRS. Updated CHNL_INFO. ++ *! 10-Jul-1996 gp: Created from channel.h. ++ */ ++ ++#ifndef CHNLDEFS_ ++#define CHNLDEFS_ ++ ++/* Channel id option. */ ++#define CHNL_PICKFREE (~0UL) /* Let manager pick a free channel. */ ++ ++/* Channel manager limits: */ ++#define CHNL_INITIOREQS 4 /* Default # of I/O requests. */ ++ ++/* Channel modes */ ++#define CHNL_MODETODSP 0x0000 /* Data streaming to the DSP. */ ++#define CHNL_MODEFROMDSP 0x0001 /* Data streaming from the DSP. */ ++ ++/* GetIOCompletion flags */ ++#define CHNL_IOCINFINITE 0xffffffff /* Wait forever for IO completion. */ ++#define CHNL_IOCNOWAIT 0x0 /* Dequeue an IOC, if available. */ ++ ++/* IO Completion Record status: */ ++#define CHNL_IOCSTATCOMPLETE 0x0000 /* IO Completed. */ ++#define CHNL_IOCSTATCANCEL 0x0002 /* IO was cancelled */ ++#define CHNL_IOCSTATTIMEOUT 0x0008 /* Wait for IOC timed out. */ ++#define CHNL_IOCSTATEOS 0x8000 /* End Of Stream reached. */ ++ ++/* Macros for checking I/O Completion status: */ ++#define CHNL_IsEOS(ioc) (ioc.status & CHNL_IOCSTATEOS) ++#define CHNL_IsIOComplete(ioc) (!(ioc.status & ~CHNL_IOCSTATEOS)) ++#define CHNL_IsIOCancelled(ioc) (ioc.status & CHNL_IOCSTATCANCEL) ++#define CHNL_IsTimedOut(ioc) (ioc.status & CHNL_IOCSTATTIMEOUT) ++ ++/* CHNL types: */ ++ typedef u32 CHNL_MODE; /* Channel transfer mode. */ ++ ++/* Channel attributes: */ ++ struct CHNL_ATTRS { ++ u32 uIOReqs; /* Max # of preallocated I/O requests. */ ++ HANDLE hEvent; /* User supplied auto-reset event object. */ ++ char *pstrEventName; /* Ptr to name of user event object. */ ++ HANDLE hReserved1; /* Reserved for future use. */ ++ u32 hReserved2; /* Reserved for future use. */ ++ ++ }; ++ ++/* I/O completion record: */ ++ struct CHNL_IOC { ++ void *pBuf; /* Buffer to be filled/emptied. */ ++ u32 cBytes; /* Bytes transferred. */ ++ u32 cBufSize; /* Actual buffer size in bytes */ ++ u32 status; /* Status of IO completion. */ ++ u32 dwArg; /* User argument associated with pBuf. */ ++ } ; ++ ++#endif /* CHNLDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnl.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnl.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnl.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnl.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,170 @@ ++/* ++ * chnl.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== chnl.h ======== ++ * Description: ++ * WCD channel interface: multiplexes data streams through the single ++ * physical link managed by a mini-driver. ++ * ++ * Public Functions: ++ * CHNL_AddIOReq ++ * CHNL_AllocBuffer ++ * CHNL_CancelIO ++ * CHNL_Close ++ * CHNL_CloseOrphans ++ * CHNL_Create ++ * CHNL_Destroy ++ * CHNL_Exit ++ * CHNL_FlushIO ++ * CHNL_FreeBuffer ++ * CHNL_GetEventHandle ++ * CHNL_GetHandle ++ * CHNL_GetIOCompletion ++ * CHNL_GetId ++ * CHNL_GetMgr ++ * CHNL_GetMode ++ * CHNL_GetPosition ++ * CHNL_GetProcessHandle ++ * CHNL_Init ++ * CHNL_Open ++ * ++ * Notes: ++ * See DSP API chnl.h for more details. ++ * ++ *! Revision History: ++ *! ================ ++ *! 14-Jan-1997 gp: Updated based on code review feedback. ++ *! 24-Oct-1996 gp: Move CloseOrphans into here from dspsys. ++ *! 09-Sep-1996 gp: Added CHNL_GetProcessID() and CHNL_GetHandle(). ++ *! 10-Jul-1996 gp: Created. ++ */ ++ ++#ifndef CHNL_ ++#define CHNL_ ++ ++#include ++ ++/* ++ * ======== CHNL_Close ======== ++ * Purpose: ++ * Ensures all pending I/O on this channel is cancelled, discards all ++ * queued I/O completion notifications, then frees the resources allocated ++ * for this channel, and makes the corresponding logical channel id ++ * available for subsequent use. ++ * Parameters: ++ * hChnl: Channel object handle. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: Invalid hChnl. ++ * Requires: ++ * CHNL_Init(void) called. ++ * No thread must be blocked on this channel's I/O completion event. ++ * Ensures: ++ * DSP_SOK: The I/O completion event for this channel is freed. ++ * hChnl is no longer valid. ++ */ ++ extern DSP_STATUS CHNL_Close(struct CHNL_OBJECT *hChnl); ++ ++ ++/* ++ * ======== CHNL_Create ======== ++ * Purpose: ++ * Create a channel manager object, responsible for opening new channels ++ * and closing old ones for a given board. ++ * Parameters: ++ * phChnlMgr: Location to store a channel manager object on output. ++ * hDevObject: Handle to a device object. ++ * pMgrAttrs: Channel manager attributes. ++ * pMgrAttrs->cChannels: Max channels ++ * pMgrAttrs->bIRQ: Channel's I/O IRQ number. ++ * pMgrAttrs->fShared: TRUE if the IRQ is shareable. ++ * pMgrAttrs->uWordSize: DSP Word size in equivalent PC bytes.. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: hDevObject is invalid. ++ * DSP_EINVALIDARG: cChannels is 0. ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * CHNL_E_ISR: Unable to plug channel ISR for configured IRQ. ++ * CHNL_E_MAXCHANNELS: This manager cannot handle this many channels. ++ * CHNL_E_INVALIDIRQ: Invalid IRQ number. Must be 0 <= bIRQ <= 15. ++ * CHNL_E_INVALIDWORDSIZE: Invalid DSP word size. Must be > 0. ++ * CHNL_E_INVALIDMEMBASE: Invalid base address for DSP communications. ++ * CHNL_E_MGREXISTS: Channel manager already exists for this device. ++ * Requires: ++ * CHNL_Init(void) called. ++ * phChnlMgr != NULL. ++ * pMgrAttrs != NULL. ++ * Ensures: ++ * DSP_SOK: Subsequent calls to CHNL_Create() for the same ++ * board without an intervening call to ++ * CHNL_Destroy() will fail. ++ */ ++ extern DSP_STATUS CHNL_Create(OUT struct CHNL_MGR **phChnlMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CHNL_MGRATTRS *pMgrAttrs); ++ ++/* ++ * ======== CHNL_Destroy ======== ++ * Purpose: ++ * Close all open channels, and destroy the channel manager. ++ * Parameters: ++ * hChnlMgr: Channel manager object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: hChnlMgr was invalid. ++ * Requires: ++ * CHNL_Init(void) called. ++ * Ensures: ++ * DSP_SOK: Cancels I/O on each open channel. ++ * Closes each open channel. ++ * CHNL_Create may subsequently be called for the ++ * same board. ++ */ ++ extern DSP_STATUS CHNL_Destroy(struct CHNL_MGR *hChnlMgr); ++ ++/* ++ * ======== CHNL_Exit ======== ++ * Purpose: ++ * Discontinue usage of the CHNL module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * CHNL_Init(void) previously called. ++ * Ensures: ++ * Resources, if any acquired in CHNL_Init(void), are freed when the last ++ * client of CHNL calls CHNL_Exit(void). ++ */ ++ extern void CHNL_Exit(void); ++ ++ ++/* ++ * ======== CHNL_Init ======== ++ * Purpose: ++ * Initialize the CHNL module's private state. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occurred. ++ * Requires: ++ * Ensures: ++ * A requirement for each of the other public CHNL functions. ++ */ ++ extern bool CHNL_Init(void); ++ ++ ++ ++#endif /* CHNL_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnlpriv.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnlpriv.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnlpriv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnlpriv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,136 @@ ++/* ++ * chnlpriv.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== chnlpriv.h ======== ++ * Description: ++ * Private channel header shared between DSPSYS, WCD and WMD modules. ++ * ++ * Public Functions: ++ * None. ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 05-Jan-2002 ag Added cChannels(total # of chnls) to CHNL_MGRINFO struct. ++ *! Added private CHNL_[PCPY][ZCPY][DDMA]. ++ *! 17-Nov-2000 jeh Removed IRQ, shared memory from CHNL_MGRATTRS, since these ++ *! now belong to IO_ATTRS. ++ *! 21-Jan-2000 ag: Code review comments added. ++ *! 05-Jan-2000 ag: Text format cleanup. ++ *! 11-Dec-1999 ag: Added CHNL_MAXLOCKPAGES for CHNL_PrepareBuffer(). ++ *! 04-Dec-1999 ag: Added CHNL_MAXEVTNAMELEN for i/o compl named event support. ++ *! 01-Nov-1999 ag: CHNL_MAXCHANNELS set to 16 for 16-bit DSPs. ++ *! 27-Oct-1997 cr: Expanded CHNL_MAXIRQ from 0x0f to 0xff. ++ *! 16-Jan-1997 gp: Moved symbols into here from chnldefs.h. ++ *! 03-Jan-1997 gp: Added CHNL_MAXIRQ define. ++ *! 09-Dec-1996 gp: Removed CHNL_STATEIDLE. ++ *! 15-Jul-1996 gp: Created. ++ */ ++ ++#ifndef CHNLPRIV_ ++#define CHNLPRIV_ ++ ++#include ++#include ++#include ++ ++/* CHNL Object validation signatures: */ ++#define CHNL_MGRSIGNATURE 0x52474D43 /* "CMGR" (in reverse). */ ++#define CHNL_SIGNATURE 0x4C4E4843 /* "CHNL" (in reverse). */ ++ ++/* Channel manager limits: */ ++#define CHNL_MAXCHANNELS 32 /* Max channels available per transport */ ++ ++ ++/* ++ * Trans port channel Id definitions:(must match dsp-side). ++ * ++ * For CHNL_MAXCHANNELS = 16: ++ * ++ * ChnlIds: ++ * 0-15 (PCPY) - transport 0) ++ * 16-31 (DDMA) - transport 1) ++ * 32-47 (ZCPY) - transport 2) ++ */ ++#define CHNL_PCPY 0 /* Proc-copy transport 0 */ ++ ++#define CHNL_MAXIRQ 0xff /* Arbitrarily large number. */ ++ ++/* The following modes are private: */ ++#define CHNL_MODEUSEREVENT 0x1000 /* User provided the channel event. */ ++#define CHNL_MODEMASK 0x1001 ++ ++/* Higher level channel states: */ ++#define CHNL_STATEREADY 0x0000 /* Channel ready for I/O. */ ++#define CHNL_STATECANCEL 0x0001 /* I/O was cancelled. */ ++#define CHNL_STATEEOS 0x0002 /* End Of Stream reached. */ ++ ++/* Determine if user supplied an event for this channel: */ ++#define CHNL_IsUserEvent(mode) (mode & CHNL_MODEUSEREVENT) ++ ++/* Macros for checking mode: */ ++#define CHNL_IsInput(mode) (mode & CHNL_MODEFROMDSP) ++#define CHNL_IsOutput(mode) (!CHNL_IsInput(mode)) ++ ++/* Types of channel class libraries: */ ++#define CHNL_TYPESM 1 /* Shared memory driver. */ ++#define CHNL_TYPEBM 2 /* Bus Mastering driver. */ ++ ++/* Max string length of channel I/O completion event name - change if needed */ ++#define CHNL_MAXEVTNAMELEN 32 ++ ++/* Max memory pages lockable in CHNL_PrepareBuffer() - change if needed */ ++#define CHNL_MAXLOCKPAGES 64 ++ ++/* Channel info. */ ++ struct CHNL_INFO { ++ struct CHNL_MGR *hChnlMgr; /* Owning channel manager. */ ++ u32 dwID; /* Channel ID. */ ++ HANDLE hEvent; /* Channel I/O completion event. */ ++ /*Abstraction of I/O completion event.*/ ++ struct SYNC_OBJECT *hSyncEvent; ++ u32 dwMode; /* Channel mode. */ ++ u32 dwState; /* Current channel state. */ ++ u32 cPosition; /* Total bytes transferred. */ ++ u32 cIOCs; /* Number of IOCs in queue. */ ++ u32 cIOReqs; /* Number of IO Requests in queue. */ ++ u32 hProcess; /* Process owning this channel. */ ++ /* ++ * Name of channel I/O completion event. Not required in Linux ++ */ ++ char szEventName[CHNL_MAXEVTNAMELEN + 1]; ++ } ; ++ ++/* Channel manager info: */ ++ struct CHNL_MGRINFO { ++ u32 dwType; /* Type of channel class library. */ ++ /* Channel handle, given the channel id. */ ++ struct CHNL_OBJECT *hChnl; ++ u32 cOpenChannels; /* Number of open channels. */ ++ u32 cChannels; /* total # of chnls supported */ ++ } ; ++ ++/* Channel Manager Attrs: */ ++ struct CHNL_MGRATTRS { ++ /* Max number of channels this manager can use. */ ++ u32 cChannels; ++ u32 uWordSize; /* DSP Word size. */ ++ } ; ++ ++#endif /* CHNLPRIV_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnl_sm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnl_sm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/chnl_sm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/chnl_sm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,168 @@ ++/* ++ * chnl_sm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== chnl_sm.h ======== ++ * Description: ++ * Prototypes for channel lower edge functions for a WinBRIDGE mini driver ++ * implementing data transfer via shared memory. ++ * ++ * Public Functions: ++ * CHNLSM_DisableInterrupt; ++ * CHNLSM_EnableInterrupt; ++ * CHNLSM_ISR; ++ * CHNLSM_Read; ++ * CHNLSM_UpdateSHMLength; ++ * CHNLSM_Write; ++ * ++ * Notes: ++ * These lower edge functions must be implemented by the WMD writer. ++ * Currently, CHNLSM_Read() and CHNLSM_Write() are not called, but must ++ * be defined to link. ++ * ++ */ ++ ++#ifndef CHNLSM_ ++#define CHNLSM_ ++ ++#include ++ ++/* ++ * ======== CHNLSM_DisableInterrupt ======== ++ * Purpose: ++ * Disable interrupts from the DSP board to the PC. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * Returns: ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CHNLSM_DisableInterrupt(struct WMD_DEV_CONTEXT* ++ hDevContext); ++ ++/* ++ * ======== CHNLSM_EnableInterrupt ======== ++ * Purpose: ++ * Enable interrupts from the DSP board to the PC. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * Returns: ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CHNLSM_EnableInterrupt(struct WMD_DEV_CONTEXT* ++ hDevContext); ++ ++/* ++ * ======== CHNLSM_InterruptDSP2 ======== ++ * Purpose: ++ * Set interrupt value & send an interrupt to the DSP processor(s). ++ * This is typicaly used when mailbox interrupt mechanisms allow data ++ * to be associated with interrupt such as for OMAP's CMD/DATA regs. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * wMbVal: Value associated with interrupt(e.g. mailbox value). ++ * Returns: ++ * DSP_SOK: Interrupt sent; ++ * else: Unable to send interrupt. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CHNLSM_InterruptDSP2(struct WMD_DEV_CONTEXT* ++ hDevContext, u16 wMbVal); ++ ++/* ++ * ======== CHNLSM_ISR ======== ++ * Purpose: ++ * Mini-driver's ISR, called by WCD when the board interrupts the host. ++ * Parameters: ++ * hDevContext: Handle to the mini-driver defined device info. ++ * pfSchedDPC: Set to TRUE to schedule a deferred procedure call ++ * to advance the channel protocol. The channel class ++ * library will call the WMD's CHNLSM_DPC routine during ++ * its own DPC, before dispatching I/O. ++ * The channel class library should ignore *pfSchedDPC when ++ * CHNLSM_ISR returns FALSE. ++ * pwMBRegVal: Value of mailbox register. ++ * Returns: ++ * TRUE if this interrupt is was generated by the DSP board. ++ * FALSE otherwise. ++ * Requires: ++ * Interrupts to the host processor are disabled on entry. ++ * Must only call functions which are in page locked memory. ++ * Must only call asynchronous OS services. ++ * The EOI for this interrupt has already been sent to the PIC. ++ * Ensures: ++ * If the interrupt is *not* shared, this routine must return TRUE. ++ */ ++ extern bool CHNLSM_ISR(struct WMD_DEV_CONTEXT *hDevContext, ++ OUT bool *pfSchedDPC, ++ OUT u16 *pwIntrVal); ++ ++/* ++ * ======== CHNLSM_Read ======== ++ * Purpose: ++ * Read data from DSP board memory into a Host buffer. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * pHostBuf: Pointer to host buffer (Destination). ++ * dwDSPAddr: Address on DSP board (Source). ++ * ulNumBytes: Number of bytes to transfer. ++ * Returns: ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CHNLSM_Read(struct WMD_DEV_CONTEXT *hDevContext, ++ OUT u8 *pHostBuf, ++ u32 dwDSPAddr, u32 ulNumBytes); ++ ++/* ++ * ======== CHNLSM_UpdateSHMLength ======== ++ * Purpose: ++ * Allow the minidriver a chance to override the SHM length as reported ++ * to the mini driver (chnl_sm.lib) by Windows Plug and Play. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * pSHMLength: Pointer to size of SHM window (in DSP words). ++ * Returns: ++ * TRUE if pSHMLength updated; FALSE otherwise. ++ * Requires: ++ * pSHMLength != NULL. ++ * Ensures: ++ * No more than sizeof(u32) bytes written to *pSHMLength ++ */ ++ extern bool CHNLSM_UpdateSHMLength(struct WMD_DEV_CONTEXT *hDevContext, ++ IN OUT u32 *pSHMLength); ++ ++/* ++ * ======== CHNLSM_Write ======== ++ * Purpose: ++ * Write data from a Host buffer to DSP board memory. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * pHostBuf: Pointer to host buffer (Source). ++ * dwDSPAddr: Address on DSP board (Destination). ++ * ulNumBytes: Number of bytes to transfer. ++ * Returns: ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS CHNLSM_Write(struct WMD_DEV_CONTEXT *hDevContext, ++ IN u8 *pHostBuf, ++ u32 dwDSPAddr, u32 ulNumBytes); ++ ++#endif /* CHNLSM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/_chnl_sm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,212 @@ ++/* ++ * _chnl_sm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _chnl_sm.h ======== ++ * Description: ++ * Private header file defining channel manager and channel objects for ++ * a shared memory channel driver. ++ * ++ * Public Functions: ++ * None. ++ * ++ * Notes: ++ * Shared between the modules implementing the shared memory channel class ++ * library. ++ * ++ *! Revision History: ++ *! ================ ++ *! 15-Oct-2002 kc Removed legacy PERF code. ++ *! 12-Jan-2002 ag Removed unused gppReqIO & ddmaChnlId DDMA fields. ++ *! Added zero-copy chnl descriptor array: zchnldesc. ++ *! 21-Dec-2001 ag Moved descPaGpp to private chnl obj from chnl descriptor. ++ *! 20-May-2001 ag/jeh Removed fShmSyms field from CHNL_MGR. ++ *! 04-Feb-2001 ag DSP-DMA support added. ++ *! 26-Oct-2000 jeh Added arg and resvd to SHM control structure. Added dwArg ++ *! to CHNL_IRP. ++ *! 16-Oct-2000 jeh Removed #ifdef DEBUG from around channel object's cIOCs ++ *! field, added cIOReqs. ++ *! 20-Jan-2000 ag: Incorporated code review comments. ++ *! 05-Jan-2000 ag: Text format cleanup. ++ *! 03-Nov-1999 ag: Added szEventName[] to CHNL object for name event support. ++ *! 02-Nov-1999 ag: _SHM_BEG & _END Syms from COFF now used for IO and SM CLASS. ++ *! 27-Oct-1999 jeh Define SHM structure to work for 16-bit targets. ++ *! 25-May-1999 jg: Added target side symbol names for share memory buffer ++ *! 03-Jan-1997 gp: Added fSharedIRQ field. ++ *! 22-Oct-1996 gp: Made dwProcessID a handle. ++ *! 09-Sep-1996 gp: Added dwProcessID field to CHNL_OBJECT. ++ *! 13-Aug-1996 gp: Created. ++ */ ++ ++#ifndef _CHNL_SM_ ++#define _CHNL_SM_ ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++/* ++ * These target side symbols define the beginning and ending addresses ++ * of shared memory buffer. They are defined in the *cfg.cmd file by ++ * cdb code. ++ */ ++#define CHNL_SHARED_BUFFER_BASE_SYM "_SHM_BEG" ++#define CHNL_SHARED_BUFFER_LIMIT_SYM "_SHM_END" ++#define BRIDGEINIT_BIOSGPTIMER "_BRIDGEINIT_BIOSGPTIMER" ++#define BRIDGEINIT_LOADMON_GPTIMER "_BRIDGEINIT_LOADMON_GPTIMER" ++ ++#ifndef _CHNL_WORDSIZE ++#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */ ++#endif ++ ++#define MAXOPPS 16 ++ ++struct oppTableEntry { ++ u32 voltage; ++ u32 frequency; ++ u32 minFreq; ++ u32 maxFreq; ++} ; ++ ++struct oppStruct { ++ u32 currOppPt; ++ u32 numOppPts; ++ struct oppTableEntry oppPoint[MAXOPPS]; ++} ; ++ ++/* Request to MPU */ ++struct oppRqstStruct { ++ u32 rqstDspFreq; ++ u32 rqstOppPt; ++}; ++ ++/* Info to MPU */ ++struct loadMonStruct { ++ u32 currDspLoad; ++ u32 currDspFreq; ++ u32 predDspLoad; ++ u32 predDspFreq; ++}; ++ ++ enum SHM_DESCTYPE { ++ SHM_CURROPP = 0, ++ SHM_OPPINFO = 1, ++ SHM_GETOPP = 2, /* Get DSP requested OPP info */ ++ } ; ++ ++/* Structure in shared between DSP and PC for communication.*/ ++ struct SHM { ++ u32 dspFreeMask; /* Written by DSP, read by PC. */ ++ u32 hostFreeMask; /* Written by PC, read by DSP */ ++ ++ u32 inputFull; /* Input channel has unread data. */ ++ u32 inputId; /* Channel for which input is available. */ ++ u32 inputSize; /* Size of data block (in DSP words). */ ++ ++ u32 outputFull; /* Output channel has unread data. */ ++ u32 outputId; /* Channel for which output is available. */ ++ u32 outputSize; /* Size of data block (in DSP words). */ ++ ++ u32 arg; /* Arg for Issue/Reclaim (23 bits for 55x). */ ++ u32 resvd; /* Keep structure size even for 32-bit DSPs */ ++ ++ /* Operating Point structure */ ++ struct oppStruct oppTableStruct; ++ /* Operating Point Request structure */ ++ struct oppRqstStruct oppRequest; ++ /* load monitor information structure*/ ++ struct loadMonStruct loadMonInfo; ++ char dummy[184]; /* padding to 256 byte boundary */ ++ u32 shm_dbg_var[64]; /* shared memory debug variables */ ++ } ; ++ ++ /* Channel Manager: only one created per board: */ ++ struct CHNL_MGR { ++ u32 dwSignature; /* Used for object validation */ ++ /* Function interface to WMD */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct IO_MGR *hIOMgr; /* IO manager */ ++ /* Device this board represents */ ++ struct DEV_OBJECT *hDevObject; ++ ++ /* These fields initialized in WMD_CHNL_Create(): */ ++ u32 dwOutputMask; /* Host output channels w/ full buffers */ ++ u32 dwLastOutput; /* Last output channel fired from DPC */ ++ /* Critical section object handle */ ++ struct SYNC_CSOBJECT *hCSObj; ++ u32 uWordSize; /* Size in bytes of DSP word */ ++ u32 cChannels; /* Total number of channels */ ++ u32 cOpenChannels; /* Total number of open channels */ ++ struct CHNL_OBJECT **apChannel; /* Array of channels */ ++ u32 dwType; /* Type of channel class library */ ++ /* If no SHM syms, return for CHNL_Open */ ++ DSP_STATUS chnlOpenStatus; ++ } ; ++ ++/* ++ * Channel: up to CHNL_MAXCHANNELS per board or if DSP-DMA supported then ++ * up to CHNL_MAXCHANNELS + CHNL_MAXDDMACHNLS per board. ++ */ ++ struct CHNL_OBJECT { ++ u32 dwSignature; /* Used for object validation */ ++ /* Pointer back to channel manager */ ++ struct CHNL_MGR *pChnlMgr; ++ u32 uId; /* Channel id */ ++ u32 dwState; /* Current channel state */ ++ u32 uMode; /* Chnl mode and attributes */ ++ /* Chnl I/O completion event (user mode) */ ++ HANDLE hUserEvent; ++ /* Abstract syncronization object */ ++ struct SYNC_OBJECT *hSyncEvent; ++ /* Name of Sync event */ ++ char szEventName[SYNC_MAXNAMELENGTH + 1]; ++ u32 hProcess; /* Process which created this channel */ ++ u32 pCBArg; /* Argument to use with callback */ ++ struct LST_LIST *pIORequests; /* List of IOR's to driver */ ++ s32 cIOCs; /* Number of IOC's in queue */ ++ s32 cIOReqs; /* Number of IORequests in queue */ ++ s32 cChirps; /* Initial number of free Irps */ ++ /* List of IOC's from driver */ ++ struct LST_LIST *pIOCompletions; ++ struct LST_LIST *pFreeList; /* List of free Irps */ ++ struct NTFY_OBJECT *hNtfy; ++ u32 cBytesMoved; /* Total number of bytes transfered */ ++ ++ /* For DSP-DMA */ ++ ++ /* Type of chnl transport:CHNL_[PCPY][DDMA] */ ++ u32 uChnlType; ++ } ; ++ ++/* I/O Request/completion packet: */ ++ struct CHNL_IRP { ++ struct LST_ELEM link; /* Link to next CHIRP in queue. */ ++ /* Buffer to be filled/emptied. (User) */ ++ u8 *pHostUserBuf; ++ /* Buffer to be filled/emptied. (System) */ ++ u8 *pHostSysBuf; ++ u32 dwArg; /* Issue/Reclaim argument. */ ++ u32 uDspAddr; /* Transfer address on DSP side. */ ++ u32 cBytes; /* Bytes transferred. */ ++ u32 cBufSize; /* Actual buffer size when allocated. */ ++ u32 status; /* Status of IO completion. */ ++ } ; ++ ++#endif /* _CHNL_SM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/iodefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/iodefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/iodefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/iodefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,45 @@ ++/* ++ * iodefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== iodefs.h ======== ++ * Description: ++ * System-wide channel objects and constants. ++ * ++ *! Revision History: ++ *! ================ ++ *! 07-Nov-2000 jeh Created. ++ */ ++ ++#ifndef IODEFS_ ++#define IODEFS_ ++ ++#define IO_MAXIRQ 0xff /* Arbitrarily large number. */ ++ ++/* IO Objects: */ ++ struct IO_MGR; ++ ++/* IO manager attributes: */ ++ struct IO_ATTRS { ++ u8 bIRQ; /* Channel's I/O IRQ number. */ ++ bool fShared; /* TRUE if the IRQ is shareable. */ ++ u32 uWordSize; /* DSP Word size. */ ++ u32 dwSMBase; /* Physical base address of shared memory. */ ++ u32 uSMLength; /* Size (in bytes) of shared memory. */ ++ } ; ++ ++#endif /* IODEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/io.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/io.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/io.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/io.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,132 @@ ++/* ++ * io.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== io.h ======== ++ * Description: ++ * The io module manages IO between CHNL and MSG. ++ * ++ * Public Functions: ++ * IO_Create ++ * IO_Destroy ++ * IO_Exit ++ * IO_Init ++ * IO_OnLoaded ++ * ++ * ++ *! Revision History: ++ *! ================ ++ *! 07-Nov-2000 jeh Created. ++ */ ++ ++#ifndef IO_ ++#define IO_ ++ ++#include ++#include ++ ++#include ++ ++/* ++ * ======== IO_Create ======== ++ * Purpose: ++ * Create an IO manager object, responsible for managing IO between ++ * CHNL and MSG. ++ * Parameters: ++ * phChnlMgr: Location to store a channel manager object on ++ * output. ++ * hDevObject: Handle to a device object. ++ * pMgrAttrs: IO manager attributes. ++ * pMgrAttrs->bIRQ: I/O IRQ number. ++ * pMgrAttrs->fShared: TRUE if the IRQ is shareable. ++ * pMgrAttrs->uWordSize: DSP Word size in equivalent PC bytes.. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * CHNL_E_ISR: Unable to plug channel ISR for configured IRQ. ++ * CHNL_E_INVALIDIRQ: Invalid IRQ number. Must be 0 <= bIRQ <= 15. ++ * CHNL_E_INVALIDWORDSIZE: Invalid DSP word size. Must be > 0. ++ * CHNL_E_INVALIDMEMBASE: Invalid base address for DSP communications. ++ * Requires: ++ * IO_Init(void) called. ++ * phIOMgr != NULL. ++ * pMgrAttrs != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS IO_Create(OUT struct IO_MGR **phIOMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct IO_ATTRS *pMgrAttrs); ++ ++/* ++ * ======== IO_Destroy ======== ++ * Purpose: ++ * Destroy the IO manager. ++ * Parameters: ++ * hIOMgr: IOmanager object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: hIOMgr was invalid. ++ * Requires: ++ * IO_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS IO_Destroy(struct IO_MGR *hIOMgr); ++ ++/* ++ * ======== IO_Exit ======== ++ * Purpose: ++ * Discontinue usage of the IO module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * IO_Init(void) previously called. ++ * Ensures: ++ * Resources, if any acquired in IO_Init(void), are freed when the last ++ * client of IO calls IO_Exit(void). ++ */ ++ extern void IO_Exit(void); ++ ++/* ++ * ======== IO_Init ======== ++ * Purpose: ++ * Initialize the IO module's private state. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occurred. ++ * Requires: ++ * Ensures: ++ * A requirement for each of the other public CHNL functions. ++ */ ++ extern bool IO_Init(void); ++ ++/* ++ * ======== IO_OnLoaded ======== ++ * Purpose: ++ * Called when a program is loaded so IO manager can update its ++ * internal state. ++ * Parameters: ++ * hIOMgr: IOmanager object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: hIOMgr was invalid. ++ * Requires: ++ * IO_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS IO_OnLoaded(struct IO_MGR *hIOMgr); ++ ++#endif /* CHNL_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/io_sm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/io_sm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/io_sm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/io_sm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,335 @@ ++/* ++ * io_sm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== io_sm.h ======== ++ * Description: ++ * IO dispatcher for a shared memory channel driver. ++ * Also, includes macros to simulate SHM via port io calls. ++ * ++ * Public Functions: ++ * IO_Dispatch ++ * IO_DPC ++ * IO_ISR ++ * IO_RequestChnl ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 01-Mar-2004 vp: Added IVA releated functions. ++ *! 23-Apr-2003 sb: Fixed mailbox deadlock ++ *! 06-Feb-2003 kc Added IO_DDMAClearChnlDesc and IO_DDZCClearChnlDesc. ++ *! 21-Dec-2001 ag Removed unused param in IO_DDMAInitChnlDesc(). ++ * Updated comments. Removed #ifdef CHNL_NOIPCINTR. ++ *! 05-Nov-2001 kc Updated IO_CALLISR macro. ++ *! 01-May-2001 jeh Removed IO_RequestMsg. ++ *! 29-Mar-2001 ag Added #ifdef CHNL_NOIPCINTR. ++ *! 04-Dec-2000 jeh Added IO_RequestMsg. ++ *! 26-Oct-2000 jeh Added IO_GetLong, IO_SetLong, IO_ReadValueLong, and ++ *! IO_WriteValueLong, for passing arg in SHM structure. ++ *! 20-Jan-2000 ag: Updated header comments per code review. ++ *! 05-Jan-2000 ag: Text format clean-up. ++ *! 02-Nov-1999 ag: Updated header descriptions. ++ *! 25-May-1999 jg: Removed assumption of 4 bytes / word. ++ *! 12-Aug-1996 gp: Created. ++ */ ++ ++#ifndef IOSM_ ++#define IOSM_ ++ ++#include ++#include ++ ++#include ++ ++#define IO_INPUT 0 ++#define IO_OUTPUT 1 ++#define IO_SERVICE 2 ++#define IO_MAXSERVICE IO_SERVICE ++ ++#define IO_MGRSIGNATURE 0x494f4D43 /* "IOGR" */ ++ ++#define DSPFieldAddr(type, field, base, wordsize) \ ++ ((((s32)&(((type *)0)->field)) / wordsize) + (u32)base) ++ ++/* Access can be different SM access word size (e.g. 16/32 bit words) */ ++#define IO_SetValue(pContext, type, base, field, value) (base->field = value) ++#define IO_GetValue(pContext, type, base, field) (base->field) ++#define IO_OrValue(pContext, type, base, field, value) (base->field |= value) ++#define IO_AndValue(pContext, type, base, field, value) (base->field &= value) ++#define IO_SetLong(pContext, type, base, field, value) (base->field = value) ++#define IO_GetLong(pContext, type, base, field) (base->field) ++ ++#define IO_DisableInterrupt(h) CHNLSM_DisableInterrupt(h) ++#define IO_EnableInterrupt(h) CHNLSM_EnableInterrupt(h) ++#define IO_CALLISR(h, pFlag, pwMBRegVal) CHNLSM_ISR(h, pFlag, pwMBRegVal) ++ ++/* ++ * ======== IO_CancelChnl ======== ++ * Purpose: ++ * Cancel IO on a given channel. ++ * Parameters: ++ * hIOMgr: IO Manager. ++ * ulChnl: Index of channel to cancel IO on. ++ * Returns: ++ * Requires: ++ * Valid hIOMgr. ++ * Ensures: ++ */ ++ extern void IO_CancelChnl(struct IO_MGR *hIOMgr, u32 ulChnl); ++ ++/* ++ * ======== IO_DPC ======== ++ * Purpose: ++ * Deferred procedure call for shared memory channel driver ISR. Carries ++ * out the dispatch of I/O. ++ * Parameters: ++ * pRefData: Pointer to reference data registered via a call to ++ * DPC_Create(). ++ * Returns: ++ * Requires: ++ * Must not block. ++ * Must not acquire resources. ++ * All data touched must be locked in memory if running in kernel mode. ++ * Ensures: ++ * Non-preemptible (but interruptible). ++ */ ++ extern void IO_DPC(IN OUT void *pRefData); ++ ++/* ++ * ======== IO_ISR ======== ++ * Purpose: ++ * Main interrupt handler for the shared memory WMD channel manager. ++ * Calls the WMD's CHNLSM_ISR to determine if this interrupt is ours, then ++ * schedules a DPC to dispatch I/O.. ++ * Parameters: ++ * pRefData: Pointer to the channel manager object for this board. ++ * Set in an initial call to ISR_Install(). ++ * Returns: ++ * TRUE if interrupt handled; FALSE otherwise. ++ * Requires: ++ * Must be in locked memory if executing in kernel mode. ++ * Must only call functions which are in locked memory if Kernel mode. ++ * Must only call asynchronous services. ++ * Interrupts are disabled and EOI for this interrupt has been sent. ++ * Ensures: ++ */ ++ irqreturn_t IO_ISR(int irq, IN void *pRefData); ++/* ++ * ======== IO_RequestChnl ======== ++ * Purpose: ++ * Request I/O from the DSP. Sets flags in shared memory, then interrupts ++ * the DSP. ++ * Parameters: ++ * hIOMgr: IO manager handle. ++ * pChnl: Ptr to the channel requesting I/O. ++ * iMode: Mode of channel: {IO_INPUT | IO_OUTPUT}. ++ * Returns: ++ * Requires: ++ * pChnl != NULL ++ * Ensures: ++ */ ++ extern void IO_RequestChnl(struct IO_MGR *hIOMgr, ++ struct CHNL_OBJECT *pChnl, ++ u32 iMode, OUT u16 *pwMbVal); ++ ++/* ++ * ======== IO_Schedule ======== ++ * Purpose: ++ * Schedule DPC for IO. ++ * Parameters: ++ * pIOMgr: Ptr to a I/O manager. ++ * Returns: ++ * Requires: ++ * pChnl != NULL ++ * Ensures: ++ */ ++ extern void IO_Schedule(struct IO_MGR *hIOMgr); ++ ++/* ++ * DSP-DMA IO functions ++ */ ++ ++/* ++ * ======== IO_DDMAInitChnlDesc ======== ++ * Purpose: ++ * Initialize DSP DMA channel descriptor. ++ * Parameters: ++ * hIOMgr: Handle to a I/O manager. ++ * uDDMAChnlId: DDMA channel identifier. ++ * uNumDesc: Number of buffer descriptors(equals # of IOReqs & ++ * Chirps) ++ * pDsp: Dsp address; ++ * Returns: ++ * Requires: ++ * uDDMAChnlId < DDMA_MAXDDMACHNLS ++ * uNumDesc > 0 ++ * pVa != NULL ++ * pDspPa != NULL ++ * ++ * Ensures: ++ */ ++ extern void IO_DDMAInitChnlDesc(struct IO_MGR *hIOMgr, u32 uDDMAChnlId, ++ u32 uNumDesc, void *pDsp); ++ ++/* ++ * ======== IO_DDMAClearChnlDesc ======== ++ * Purpose: ++ * Clear DSP DMA channel descriptor. ++ * Parameters: ++ * hIOMgr: Handle to a I/O manager. ++ * uDDMAChnlId: DDMA channel identifier. ++ * Returns: ++ * Requires: ++ * uDDMAChnlId < DDMA_MAXDDMACHNLS ++ * Ensures: ++ */ ++ extern void IO_DDMAClearChnlDesc(struct IO_MGR *hIOMgr, ++ u32 uDDMAChnlId); ++ ++/* ++ * ======== IO_DDMARequestChnl ======== ++ * Purpose: ++ * Request channel DSP-DMA from the DSP. Sets up SM descriptors and ++ * control fields in shared memory. ++ * Parameters: ++ * hIOMgr: Handle to a I/O manager. ++ * pChnl: Ptr to channel object ++ * pChirp: Ptr to channel i/o request packet. ++ * Returns: ++ * Requires: ++ * pChnl != NULL ++ * pChnl->cIOReqs > 0 ++ * pChirp != NULL ++ * Ensures: ++ */ ++ extern void IO_DDMARequestChnl(struct IO_MGR *hIOMgr, ++ struct CHNL_OBJECT *pChnl, ++ struct CHNL_IRP *pChirp, ++ OUT u16 *pwMbVal); ++ ++/* ++ * Zero-copy IO functions ++ */ ++ ++/* ++ * ======== IO_DDZCInitChnlDesc ======== ++ * Purpose: ++ * Initialize ZCPY channel descriptor. ++ * Parameters: ++ * hIOMgr: Handle to a I/O manager. ++ * uZId: zero-copy channel identifier. ++ * Returns: ++ * Requires: ++ * uDDMAChnlId < DDMA_MAXZCPYCHNLS ++ * hIOMgr != Null ++ * Ensures: ++ */ ++ extern void IO_DDZCInitChnlDesc(struct IO_MGR *hIOMgr, u32 uZId); ++ ++/* ++ * ======== IO_DDZCClearChnlDesc ======== ++ * Purpose: ++ * Clear DSP ZC channel descriptor. ++ * Parameters: ++ * hIOMgr: Handle to a I/O manager. ++ * uChnlId: ZC channel identifier. ++ * Returns: ++ * Requires: ++ * hIOMgr is valid ++ * uChnlId < DDMA_MAXZCPYCHNLS ++ * Ensures: ++ */ ++ extern void IO_DDZCClearChnlDesc(struct IO_MGR *hIOMgr, u32 uChnlId); ++ ++/* ++ * ======== IO_DDZCRequestChnl ======== ++ * Purpose: ++ * Request zero-copy channel transfer. Sets up SM descriptors and ++ * control fields in shared memory. ++ * Parameters: ++ * hIOMgr: Handle to a I/O manager. ++ * pChnl: Ptr to channel object ++ * pChirp: Ptr to channel i/o request packet. ++ * Returns: ++ * Requires: ++ * pChnl != NULL ++ * pChnl->cIOReqs > 0 ++ * pChirp != NULL ++ * Ensures: ++ */ ++ extern void IO_DDZCRequestChnl(struct IO_MGR *hIOMgr, ++ struct CHNL_OBJECT *pChnl, ++ struct CHNL_IRP *pChirp, ++ OUT u16 *pwMbVal); ++ ++/* ++ * ======== IO_SHMsetting ======== ++ * Purpose: ++ * Sets the shared memory setting ++ * Parameters: ++ * hIOMgr: Handle to a I/O manager. ++ * desc: Shared memory type ++ * pArgs: Ptr to SHM setting ++ * Returns: ++ * Requires: ++ * hIOMgr != NULL ++ * pArgs != NULL ++ * Ensures: ++ */ ++ extern DSP_STATUS IO_SHMsetting(IN struct IO_MGR *hIOMgr, ++ IN enum SHM_DESCTYPE desc, ++ IN void *pArgs); ++ ++/* ++ * Misc functions for the CHNL_IO shared memory library: ++ */ ++ ++/* Maximum channel bufsize that can be used. */ ++ extern u32 IO_BufSize(struct IO_MGR *hIOMgr); ++ ++ extern u32 IO_ReadValue(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwDSPAddr); ++ ++ extern void IO_WriteValue(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwDSPAddr, u32 dwValue); ++ ++ extern u32 IO_ReadValueLong(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwDSPAddr); ++ ++ extern void IO_WriteValueLong(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwDSPAddr, u32 dwValue); ++ ++ extern void IO_OrSetValue(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwDSPAddr, u32 dwValue); ++ ++ extern void IO_AndSetValue(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwDSPAddr, u32 dwValue); ++ ++ extern void IO_IntrDSP2(IN struct IO_MGR *pIOMgr, IN u16 wMbVal); ++ ++ extern void IO_SM_init(void); ++ ++/* ++ * ========PrintDspTraceBuffer ======== ++ * Print DSP tracebuffer. ++ */ ++ extern DSP_STATUS PrintDspTraceBuffer(struct WMD_DEV_CONTEXT ++ *hWmdContext); ++ ++#endif /* IOSM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/kfile.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/kfile.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/kfile.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/kfile.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,216 @@ ++/* ++ * kfile.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== kfile.h ======== ++ * Purpose: ++ * Provide file I/O management capabilities. ++ * ++ * Public Functions: ++ * KFILE_Close ++ * KFILE_Exit ++ * KFILE_Init ++ * KFILE_Open ++ * KFILE_Read ++ * KFILE_Seek ++ * KFILE_Tell ++ * KFILE_Write ++ * ++ * Notes: ++ * The KFILE module is not like most of the other DSP/BIOS Bridge modules ++ * in that it doesn't return WSX_STATUS type values. Rather, it's ++ * prototypes are meant to match the stdio file prototypes ++ * (ie, fopen, fclose, etc.). ++ * ++ *! Revision History ++ *! ================ ++ *! 29-Oct-1999 kc: Clean up for code review. ++ *! 07-Jan-1998 cr: Clean up for code review. ++ *! 15-Aug-1997 cr: Added E_KFILE_ERROR for general error condition. ++ *! 04-Aug-1997 cr: Added explicit CDECL descriptions. ++ *! 11-Nov-1996 cr: Implemented changes based on code review. ++ *! 05-Nov-1996 cr: Cleaned up for code review. ++ *! 29-May-1996 gp: Added requirement that size != 0 in _Write() and _Read(). ++ *! 28-May-1996 mg: Changed return values for Read/Write. ++ *! 14-Dec-1995 cr: Created. ++ */ ++ ++#ifndef KFILE_ ++#define KFILE_ ++ ++/* ++ * Constants for KFILE_Seek. Note that these MUST be the same definitions as ++ * those defined for fseek. ++ */ ++#define KFILE_SEEK_SET 0x00 /* seek from beginning of file */ ++#define KFILE_SEEK_CUR 0x01 /* seek from current position */ ++#define KFILE_SEEK_END 0x02 /* seek from end of file */ ++ ++ struct KFILE_FileObj; ++ ++/* ++ * ======== KFILE_Close ======== ++ * Purpose: ++ * This function closes a file's stream. ++ * Parameters: ++ * hFile: Handle of the file stream returned by KFILE_Open. ++ * Returns: ++ * E_KFILE_INVALIDHANDLE: bad handle. ++ * 0: success. ++ * E_KFILE_ERROR: unable to close specified handle. ++ * Requires: ++ * KFILE initialized. ++ * Ensures: ++ */ ++ extern s32 KFILE_Close(IN struct KFILE_FileObj *hFile); ++ ++/* ++ * ======== KFILE_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * KFILE initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void KFILE_Exit(void); ++ ++/* ++ * ======== KFILE_Init ======== ++ * Purpose: ++ * Initializes private state of KFILE module. ++ * Parameters: ++ * Returns: ++ * TRUE if success, else FALSE. ++ * Requires: ++ * Ensures: ++ * KFILE initialized. ++ */ ++ extern bool KFILE_Init(void); ++ ++/* ++ * ======== KFILE_Open ======== ++ * Purpose: ++ * Opens a file for use. ++ * Parameters: ++ * pszFileName: Full path to name of the file to open. ++ * pszMode: String containing open status. Only the first ++ * character of the string is examined, for either ++ * "r" (read) or "w" (write) mode. ++ * Returns: ++ * A valid file handle if success, else NULL. ++ * Requires: ++ * - KFILE initialized. ++ * - pszMode != NULL. ++ * - pszFileName != NULL. ++ * Ensures: ++ */ ++ extern struct KFILE_FileObj *KFILE_Open(IN CONST char *filename, ++ IN CONST char *mode); ++ ++/* ++ * ======== KFILE_Read ======== ++ * Purpose: ++ * This function reads a specified number of bytes into a buffer. ++ * Parameters: ++ * pBuffer: Array to which the file data is copied. ++ * cSize: Number of characters in each object. ++ * cCount: Number of objects to read in. ++ * hFile: Handle of the file stream returned by KFILE_Open. ++ * Returns: ++ * E_KFILE_INVALIDHANDLE: bad file handle. ++ * E_KFILE_ERROR: general failure. ++ * > 0: success; # of objects read from file. ++ * Requires: ++ * KFILE initialized. ++ * pBuffer is a valid pointer. ++ * Ensures: ++ */ ++ extern s32 KFILE_Read(OUT void __user*buffer, ++ IN s32 size, IN s32 count, ++ IN struct KFILE_FileObj *hFile); ++ ++/* ++ * ======== KFILE_Seek ======== ++ * Purpose: ++ * This function sets the file position indicator. NOTE: we don't ++ * support seeking beyond the boundaries of a file. ++ * Parameters: ++ * hFile: Handle of the file stream returned by KFILE_Open. ++ * offset: Number of bytes from the origin to move. ++ * origin: File reference point, one of the following values: ++ * KFILE_SEEK_SET: Seek from beginning of file. ++ * KFILE_SEEK_CUR: Seek from current position. ++ * KFILE_SEEK_END: Seek from end of file. ++ * Returns: ++ * 0: success. ++ * E_KFILE_INVALIDHANDLE: bad handle. ++ * E_KFILE_BADORIGIN: invalid origin paramater. ++ * E_KFILE_ERROR: general failure. ++ * Requires: ++ * KFILE initialized. ++ * Ensures: ++ */ ++ extern s32 KFILE_Seek(IN struct KFILE_FileObj *hFile, ++ IN s32 offset, IN s32 origin); ++ ++/* ++ * ======== KFILE_Tell ======== ++ * Purpose: ++ * This function reports the current value of the position indicator. ++ * Parameters: ++ * hFile: Handle of the file stream returned by KFILE_Open. ++ * Return value: ++ * > 0: success; returns # of bytes the position indicator is from ++ * beginning of file. ++ * E_KFILE_ERROR: general failure. ++ * E_KFILE_INVALIDHANDLE: bad file handle. ++ * Requires: ++ * KFILE initialized. ++ * Ensures: ++ */ ++ extern s32 KFILE_Tell(IN struct KFILE_FileObj *hFile); ++ ++/* ++ * ======== KFILE_Write ======== ++ * Purpose: ++ * This function writes a number of objects to the stream. ++ * Parameters: ++ * pBuffer: Array from which the file data is written. ++ * cSize: Number of characters in each object. ++ * cCount: Number of objects to write out. ++ * hFile: Handle of the file stream returned by KFILE_Open. ++ * Returns: ++ * E_KFILE_INVALIDHANDLE: bad file handle. ++ * E_KFILE_ERROR: general failure. ++ * > 0: success; # of objects written to file. ++ * Requires: ++ * KFILE initialized. ++ * pBuffer != NULL. ++ * Postcondition: ++ * The file position indicator is advanced by the number of ++ * characters written. ++ */ ++ extern s32 KFILE_Write(OUT void *buffer, ++ IN s32 size, ++ IN s32 count, ++ IN struct KFILE_FileObj *hFile); ++ ++#endif /* KFILE_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/ldr.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/ldr.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/ldr.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/ldr.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,51 @@ ++/* ++ * ldr.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== ldr.h ======== ++ * Purpose: ++ * Provide module loading services and symbol export services. ++ * ++ * Public Functions: ++ * LDR_Exit ++ * LDR_FreeModule ++ * LDR_GetProcAddress ++ * LDR_Init ++ * LDR_LoadModule ++ * ++ * Notes: ++ * This service is meant to be used by modules of the DSP/BIOS Bridge ++ * class driver. ++ * ++ *! Revision History: ++ *! ================ ++ *! 22-Nov-1999 kc: Changes from code review. ++ *! 12-Nov-1999 kc: Removed declaration of unused loader object. ++ *! 29-Oct-1999 kc: Cleaned up for code review. ++ *! 12-Jan-1998 cr: Cleaned up for code review. ++ *! 04-Aug-1997 cr: Added explicit CDECL identifiers. ++ *! 11-Nov-1996 cr: Cleaned up for code review. ++ *! 16-May-1996 gp: Created. ++ */ ++ ++#ifndef LDR_ ++#define LDR_ ++ ++/* Loader objects: */ ++ struct LDR_MODULE; ++ ++#endif /* LDR_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/list.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/list.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/list.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/list.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,296 @@ ++/* ++ * list.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== list.h ======== ++ * Purpose: ++ * Declarations of list management control structures and definitions ++ * of inline list management functions. ++ * ++ * Public Functions: ++ * LST_Create ++ * LST_Delete ++ * LST_Exit ++ * LST_First ++ * LST_GetHead ++ * LST_InitElem ++ * LST_Init ++ * LST_InsertBefore ++ * LST_IsEmpty ++ * LST_Next ++ * LST_PutTail ++ * LST_RemoveElem ++ * ++ * Notes: ++ * ++ *! Revision History ++ *! ================ ++ *! 10-Aug-2000 ag: Added LST_InsertBefore(). ++ *! 29-Oct-1999 kc: Cleaned up for code review. ++ *! 16-Aug-1997 cr: added explicit identifiers. ++ *! 10-Aug-1996 gp: Acquired from SMM for WinSPOX v.1.1; renamed identifiers. ++ *! 21-Oct-1994 dh4: Cleaned / commented for code review. ++ *! 08-Jun-1994 dh4: Converted to SPM (added extern "C"). ++ */ ++ ++#ifndef LIST_ ++#define LIST_ ++ ++#include ++ ++#define LST_IsEmpty(l) (((l)->head.next == &(l)->head)) ++ ++ struct LST_ELEM { ++ struct LST_ELEM *next; ++ struct LST_ELEM *prev; ++ struct LST_ELEM *self; ++ } ; ++ ++ struct LST_LIST { ++ struct LST_ELEM head; ++ } ; ++ ++/* ++ * ======== LST_Create ======== ++ * Purpose: ++ * Allocates and initializes a circular list. ++ * Details: ++ * Uses portable MEM_Calloc() function to allocate a list containing ++ * a single element and initializes that element to indicate that it ++ * is the "end of the list" (i.e., the list is empty). ++ * An empty list is indicated by the "next" pointer in the element ++ * at the head of the list pointing to the head of the list, itself. ++ * Parameters: ++ * Returns: ++ * Pointer to beginning of created list (success) ++ * NULL --> Allocation failed ++ * Requires: ++ * LST initialized. ++ * Ensures: ++ * Notes: ++ * The created list contains a single element. This element is the ++ * "empty" element, because its "next" and "prev" pointers point at ++ * the same location (the element itself). ++ */ ++ extern struct LST_LIST *LST_Create(void); ++ ++/* ++ * ======== LST_Delete ======== ++ * Purpose: ++ * Removes a list by freeing its control structure's memory space. ++ * Details: ++ * Uses portable MEM_Free() function to deallocate the memory ++ * block pointed at by the input parameter. ++ * Parameters: ++ * pList: Pointer to list control structure of list to be deleted ++ * Returns: ++ * Void ++ * Requires: ++ * - LST initialized. ++ * - pList != NULL. ++ * Ensures: ++ * Notes: ++ * Must ONLY be used for empty lists, because it does not walk the ++ * chain of list elements. Calling this function on a non-empty list ++ * will cause a memory leak. ++ */ ++ extern void LST_Delete(IN struct LST_LIST *pList); ++ ++/* ++ * ======== LST_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * LST initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void LST_Exit(void); ++ ++/* ++ * ======== LST_First ======== ++ * Purpose: ++ * Returns a pointer to the first element of the list, or NULL if the list ++ * is empty. ++ * Parameters: ++ * pList: Pointer to list control structure. ++ * Returns: ++ * Pointer to first list element, or NULL. ++ * Requires: ++ * - LST initialized. ++ * - pList != NULL. ++ * Ensures: ++ */ ++ extern struct LST_ELEM *LST_First(IN struct LST_LIST *pList); ++ ++/* ++ * ======== LST_GetHead ======== ++ * Purpose: ++ * Pops the head off the list and returns a pointer to it. ++ * Details: ++ * If the list is empty, returns NULL. ++ * Else, removes the element at the head of the list, making the next ++ * element the head of the list. ++ * The head is removed by making the tail element of the list point its ++ * "next" pointer at the next element after the head, and by making the ++ * "prev" pointer of the next element after the head point at the tail ++ * element. So the next element after the head becomes the new head of ++ * the list. ++ * Parameters: ++ * pList: Pointer to list control structure of list whose head ++ * element is to be removed ++ * Returns: ++ * Pointer to element that was at the head of the list (success) ++ * NULL No elements in list ++ * Requires: ++ * - head.self must be correctly set to &head. ++ * - LST initialized. ++ * - pList != NULL. ++ * Ensures: ++ * Notes: ++ * Because the tail of the list points forward (its "next" pointer) to ++ * the head of the list, and the head of the list points backward (its ++ * "prev" pointer) to the tail of the list, this list is circular. ++ */ ++ extern struct LST_ELEM *LST_GetHead(IN struct LST_LIST *pList); ++ ++/* ++ * ======== LST_Init ======== ++ * Purpose: ++ * Initializes private state of LST module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE otherwise. ++ * Requires: ++ * Ensures: ++ * LST initialized. ++ */ ++ extern bool LST_Init(void); ++ ++/* ++ * ======== LST_InitElem ======== ++ * Purpose: ++ * Initializes a list element to default (cleared) values ++ * Details: ++ * Parameters: ++ * pElem: Pointer to list element to be reset ++ * Returns: ++ * Requires: ++ * LST initialized. ++ * Ensures: ++ * Notes: ++ * This function must not be called to "reset" an element in the middle ++ * of a list chain -- that would break the chain. ++ * ++ */ ++ extern void LST_InitElem(IN struct LST_ELEM *pListElem); ++ ++/* ++ * ======== LST_InsertBefore ======== ++ * Purpose: ++ * Insert the element before the existing element. ++ * Parameters: ++ * pList: Pointer to list control structure. ++ * pElem: Pointer to element in list to insert. ++ * pElemExisting: Pointer to existing list element. ++ * Returns: ++ * Requires: ++ * - LST initialized. ++ * - pList != NULL. ++ * - pElem != NULL. ++ * - pElemExisting != NULL. ++ * Ensures: ++ */ ++ extern void LST_InsertBefore(IN struct LST_LIST *pList, ++ IN struct LST_ELEM *pElem, ++ IN struct LST_ELEM *pElemExisting); ++ ++/* ++ * ======== LST_Next ======== ++ * Purpose: ++ * Returns a pointer to the next element of the list, or NULL if the next ++ * element is the head of the list or the list is empty. ++ * Parameters: ++ * pList: Pointer to list control structure. ++ * pCurElem: Pointer to element in list to remove. ++ * Returns: ++ * Pointer to list element, or NULL. ++ * Requires: ++ * - LST initialized. ++ * - pList != NULL. ++ * - pCurElem != NULL. ++ * Ensures: ++ */ ++ extern struct LST_ELEM *LST_Next(IN struct LST_LIST *pList, ++ IN struct LST_ELEM *pCurElem); ++ ++/* ++ * ======== LST_PutTail ======== ++ * Purpose: ++ * Adds the specified element to the tail of the list ++ * Details: ++ * Sets new element's "prev" pointer to the address previously held by ++ * the head element's prev pointer. This is the previous tail member of ++ * the list. ++ * Sets the new head's prev pointer to the address of the element. ++ * Sets next pointer of the previous tail member of the list to point to ++ * the new element (rather than the head, which it had been pointing at). ++ * Sets new element's next pointer to the address of the head element. ++ * Sets head's prev pointer to the address of the new element. ++ * Parameters: ++ * pList: Pointer to list control structure to which *pElem will be ++ * added ++ * pElem: Pointer to list element to be added ++ * Returns: ++ * Void ++ * Requires: ++ * *pElem and *pList must both exist. ++ * pElem->self = pElem before pElem is passed to this function. ++ * LST initialized. ++ * Ensures: ++ * Notes: ++ * Because the tail is always "just before" the head of the list (the ++ * tail's "next" pointer points at the head of the list, and the head's ++ * "prev" pointer points at the tail of the list), the list is circular. ++ * Warning: if pElem->self is not set beforehand, LST_GetHead() will ++ * return an erroneous pointer when it is called for this element. ++ */ ++ extern void LST_PutTail(IN struct LST_LIST *pList, ++ IN struct LST_ELEM *pListElem); ++ ++/* ++ * ======== LST_RemoveElem ======== ++ * Purpose: ++ * Removes (unlinks) the given element from the list, if the list is not ++ * empty. Does not free the list element. ++ * Parameters: ++ * pList: Pointer to list control structure. ++ * pCurElem: Pointer to element in list to remove. ++ * Returns: ++ * Requires: ++ * - LST initialized. ++ * - pList != NULL. ++ * - pCurElem != NULL. ++ * Ensures: ++ */ ++extern void LST_RemoveElem(IN struct LST_LIST *pList, ++ IN struct LST_ELEM *pCurElem); ++ ++#endif /* LIST_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mbx_sh.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mbx_sh.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mbx_sh.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mbx_sh.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,213 @@ ++/* ++ * mbx_sh.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== mbx_sh.h ======== ++ * Definitions for shared mailbox cmd/data values.(used on both ++ * the GPP and DSP sides). ++ * ++ * Bridge usage of OMAP mailbox 1 is determined by the "class" of the ++ * mailbox interrupt's cmd value received. The class value are defined ++ * as a bit (10 thru 15) being set. ++ * ++ * Note: Only 16 bits of each is used. Other 16 bit data reg available. ++ * ++ * 16 bit Mbx bit defns: ++ * ++ * A). Exception/Error handling (Module DEH) : class = 0. ++ * ++ * 15 10 0 ++ * --------------------------------- ++ * |0|0|0|0|0|0|x|x|x|x|x|x|x|x|x|x| ++ * --------------------------------- ++ * | (class) | (module specific) | ++ * ++ * ++ * ++ * B: DSP-DMA link driver channels (DDMA) : class = 1. ++ * ++ * 15 10 0 ++ * --------------------------------- ++ * |0|0|0|0|0|1|b|b|b|b|b|c|c|c|c|c| ++ * --------------------------------- ++ * | (class) | (module specific) | ++ * ++ * where b -> buffer index (32 DDMA buffers/chnl max) ++ * c -> channel Id (32 DDMA chnls max) ++ * ++ * ++ * ++ * ++ * C: Proc-copy link driver channels (PCPY) : class = 2. ++ * ++ * 15 10 0 ++ * --------------------------------- ++ * |0|0|0|0|1|0|x|x|x|x|x|x|x|x|x|x| ++ * --------------------------------- ++ * | (class) | (module specific) | ++ * ++ * ++ * D: Zero-copy link driver channels (DDZC) : class = 4. ++ * ++ * 15 10 0 ++ * --------------------------------- ++ * |0|0|0|1|0|0|x|x|x|x|x|c|c|c|c|c| ++ * --------------------------------- ++ * | (class) | (module specific) | ++ * ++ * where x -> not used ++ * c -> channel Id (32 ZCPY chnls max) ++ * ++ * ++ * E: Power management : class = 8. ++ * ++ * 15 10 0 ++ * --------------------------------- ++ * |0|0|1|0|0|0|x|x|x|x|x|c|c|c|c|c| ++ ++ * 0010 00xx xxxc cccc ++ * 0010 00nn pppp qqqq ++ * nn: ++ * 00 = reserved ++ * 01 = pwr state change ++ * 10 = opp pre-change ++ * 11 = opp post-change ++ * ++ * if nn = pwr state change: ++ * pppp = don't care ++ * qqqq: ++ * 0010 = hibernate ++ * 0010 0001 0000 0010 ++ * 0110 = retention ++ * 0010 0001 0000 0110 ++ * others reserved ++ * ++ * if nn = opp pre-change: ++ * pppp = current opp ++ * qqqq = next opp ++ * ++ * if nn = opp post-change: ++ * pppp = prev opp ++ * qqqq = current opp ++ * ++ * --------------------------------- ++ * | (class) | (module specific) | ++ * ++ * where x -> not used ++ * c -> Power management command ++ * ++ * ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Sep-2002 mr Added DEH reset const ++ *! 24-Apr-2002 sg Added more PM commands. ++ *! 04-Mar-2002 gv Added MBX_PM_CLASS ++ *! 22-Jan-2002 ag Bug fix in MBX_SETZCPYVAL(x) macro. ++ *! 21-Dec-2001 ag Added bit masks defns. ++ *! 17-Dec-2001 ag: created. ++ */ ++ ++#ifndef _MBX_SH_H ++#define _MBX_SH_H ++ ++#define MBX_CLASS_MSK 0xFC00 /* Class bits are 10 thru 15 */ ++#define MBX_VALUE_MSK 0x03FF /* Value is 0 thru 9 */ ++ ++#define MBX_DEH_CLASS 0x0000 /* DEH owns Mbx INTR */ ++#define MBX_DDMA_CLASS 0x0400 /* DSP-DMA link drvr chnls owns INTR */ ++#define MBX_PCPY_CLASS 0x0800 /* PROC-COPY " */ ++#define MBX_ZCPY_CLASS 0x1000 /* ZERO-COPY " */ ++#define MBX_PM_CLASS 0x2000 /* Power Management */ ++#define MBX_DBG_CLASS 0x4000 /* For debugging purpose */ ++ ++/* ++ * Exception Handler codes ++ * Magic code used to determine if DSP signaled exception. ++ */ ++#define MBX_DEH_BASE 0x0 ++#define MBX_DEH_USERS_BASE 0x100 /* 256 */ ++#define MBX_DEH_LIMIT 0x3FF /* 1023 */ ++#define MBX_DEH_RESET 0x101 /* DSP RESET (DEH) */ ++#define MBX_DEH_EMMU 0X103 /*DSP MMU FAULT RECOVERY*/ ++ ++/* ++ * Link driver command/status codes. ++ */ ++/* DSP-DMA */ ++#define MBX_DDMA_NUMCHNLBITS 5 /* # chnl Id: # bits available */ ++#define MBX_DDMA_CHNLSHIFT 0 /* # of bits to shift */ ++#define MBX_DDMA_CHNLMSK 0x01F /* bits 0 thru 4 */ ++ ++#define MBX_DDMA_NUMBUFBITS 5 /* buffer index: # of bits avail */ ++#define MBX_DDMA_BUFSHIFT (MBX_DDMA_NUMCHNLBITS + MBX_DDMA_CHNLSHIFT) ++#define MBX_DDMA_BUFMSK 0x3E0 /* bits 5 thru 9 */ ++ ++/* Zero-Copy */ ++#define MBX_ZCPY_NUMCHNLBITS 5 /* # chnl Id: # bits available */ ++#define MBX_ZCPY_CHNLSHIFT 0 /* # of bits to shift */ ++#define MBX_ZCPY_CHNLMSK 0x01F /* bits 0 thru 4 */ ++ ++/* Power Management Commands */ ++#define MBX_PM_DSPIDLE (MBX_PM_CLASS + 0x0) ++#define MBX_PM_DSPWAKEUP (MBX_PM_CLASS + 0x1) ++#define MBX_PM_EMERGENCYSLEEP (MBX_PM_CLASS + 0x2) ++#define MBX_PM_SLEEPUNTILRESTART (MBX_PM_CLASS + 0x3) ++#define MBX_PM_DSPGLOBALIDLE_OFF (MBX_PM_CLASS + 0x4) ++#define MBX_PM_DSPGLOBALIDLE_ON (MBX_PM_CLASS + 0x5) ++#define MBX_PM_SETPOINT_PRENOTIFY (MBX_PM_CLASS + 0x6) ++#define MBX_PM_SETPOINT_POSTNOTIFY (MBX_PM_CLASS + 0x7) ++#define MBX_PM_DSPRETN (MBX_PM_CLASS + 0x8) ++#define MBX_PM_DSPRETENTION (MBX_PM_CLASS + 0x8) ++#define MBX_PM_DSPHIBERNATE (MBX_PM_CLASS + 0x9) ++#define MBX_PM_HIBERNATE_EN (MBX_PM_CLASS + 0xA) ++#define MBX_PM_OPP_REQ (MBX_PM_CLASS + 0xB) ++#define MBX_PM_OPP_CHG (MBX_PM_CLASS + 0xC) ++ ++#define MBX_PM_TYPE_MASK 0x0300 ++#define MBX_PM_TYPE_PWR_CHNG 0x0100 ++#define MBX_PM_TYPE_OPP_PRECHNG 0x0200 ++#define MBX_PM_TYPE_OPP_POSTCHNG 0x0300 ++#define MBX_PM_TYPE_OPP_MASK 0x0300 ++#define MBX_PM_OPP_PRECHNG (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG) ++/* DSP to MPU */ ++#define MBX_PM_OPP_CHNG(OPP) (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG | (OPP)) ++#define MBX_PM_RET (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0006) ++#define MBX_PM_HIB (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0002) ++#define MBX_PM_OPP_1 0 ++#define MBX_PM_OPP_2 1 ++#define MBX_PM_OPP_3 2 ++#define MBX_PM_OPP_4 3 ++#define MBX_OLDOPP_EXTRACT(OPPMSG) ((0x00F0 & (OPPMSG)) >> 4) ++#define MBX_NEWOPP_EXTRACT(OPPMSG) (0x000F & (OPPMSG)) ++#define MBX_PREVOPP_EXTRACT(OPPMSG) ((0x00F0 & (OPPMSG)) >> 4) ++#define MBX_CUROPP_EXTRACT(OPPMSG) (0x000F & (OPPMSG)) ++ ++/* Bridge Debug Commands */ ++#define MBX_DBG_SYSPRINTF (MBX_DBG_CLASS + 0x0) ++ ++/* ++ * Useful macros ++ */ ++/* DSP-DMA channel */ ++#define MBX_SETDDMAVAL(x, y) (MBX_DDMA_CLASS | (x << MBX_DDMA_BUFSHIFT) | \ ++ (y << MBX_DDMA_CHNLSHIFT)) ++ ++/* Zero-Copy channel */ ++#define MBX_SETZCPYVAL(x) (MBX_ZCPY_CLASS | (x << MBX_ZCPY_CHNLSHIFT)) ++ ++#endif /* _MBX_SH_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/memdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/memdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/memdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/memdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,52 @@ ++/* ++ * memdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== memdefs.h ======== ++ * Purpose: ++ * Global MEM constants and types, shared between WSX, WCD, and WMD. ++ * ++ *! Revision History: ++ *! ================ ++ *! 28-Aug-2001 ag: Added MEM_[SET][GET]VIRTUALSEGID. ++ *! 10-Aug-1999 kc: Based on wsx-c18. ++ *! 15-Nov-1996 gp: Renamed from wsxmem.h and moved to kwinos. ++ *! 21-Aug-1996 cr: Created from mem.h. ++ */ ++ ++#ifndef MEMDEFS_ ++#define MEMDEFS_ ++ ++/* Memory Pool Attributes: */ ++ enum MEM_POOLATTRS { ++ MEM_PAGED = 0, ++ MEM_NONPAGED = 1, ++ MEM_LARGEVIRTMEM = 2 ++ } ; ++ ++/* ++ * MEM_VIRTUALSEGID is used by Node & Strm to access virtual address space in ++ * the correct client process context. ++ */ ++#define MEM_SETVIRTUALSEGID 0x10000000 ++#define MEM_GETVIRTUALSEGID 0x20000000 ++#define MEM_MASKVIRTUALSEGID (MEM_SETVIRTUALSEGID | MEM_GETVIRTUALSEGID) ++ ++#define TO_VIRTUAL_UNCACHED(x) x ++#define INTREG_TO_VIRTUAL_UNCACHED(x) x ++ ++#endif /* MEMDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mem.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mem.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mem.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mem.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,357 @@ ++/* ++ * mem.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== mem.h ======== ++ * Purpose: ++ * Memory management and address mapping services for the DSP/BIOS Bridge ++ * class driver and mini-driver. ++ * ++ * Public Functions: ++ * MEM_Alloc ++ * MEM_AllocObject ++ * MEM_AllocPhysMem ++ * MEM_Calloc ++ * MEM_Exit ++ * MEM_FlushCache ++ * MEM_Free ++ * MEM_FreeObject ++ * MEM_FreePhysMem ++ * MEM_GetNumPages ++ * MEM_Init ++ * MEM_IsValidHandle ++ * MEM_LinearAddress ++ * MEM_PageLock ++ * MEM_PageUnlock ++ * MEM_UnMapLinearAddress ++ * MEM_VirtualToPhysical ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb: Added Alloc/Free PhysMem, FlushCache, VirtualToPhysical ++ *! 01-Sep-2001 ag: Cleaned up notes for MEM_LinearAddress() does not ++ *! require phys address to be page aligned! ++ *! 02-Dec-1999 rr: stdwin.h included for retail build ++ *! 12-Nov-1999 kc: Added warning about use of MEM_LinearAddress. ++ *! 29-Oct-1999 kc: Cleaned up for code review. ++ *! 10-Aug-1999 kc: Based on wsx-c18. ++ *! 07-Jan-1998 gp: Added MEM_AllocUMB and MEM_UMBFree for User Mapped Buffers ++ *! used by WMD_CHNL. ++ *! 23-Dec-1997 cr: Code review cleanup, removed dead Ring 3 code. ++ *! 04-Aug-1997 cr: Added explicit CDECL identifiers. ++ *! 01-Nov-1996 gp: Updated based on code review. ++ *! 04-Sep-1996 gp: Added MEM_PageLock() and MEM_PageUnlock() services. ++ *! 14-Aug-1996 mg: Added MEM_GetPhysAddr() and MEM_GetNumPages() ++ *! 25-Jul-1996 gp: Added MEM_IsValidHandle() macro. ++ *! 10-May-1996 gp: Added MEM_Calloc(). ++ *! 25-Apr-1996 gp: Added MEM_PhysicalAddress() ++ *! 17-Apr-1996 gp: Added MEM_Exit function; updated to latest naming standard. ++ *! 08-Apr-1996 gp: Created. ++ */ ++ ++#ifndef MEM_ ++#define MEM_ ++ ++#include ++#include ++ ++/* ++ * ======== MEM_Alloc ======== ++ * Purpose: ++ * Allocate memory from the paged or non-paged pools. ++ * Parameters: ++ * cBytes: Number of bytes to allocate. ++ * type: Type of memory to allocate; one of: ++ * MEM_PAGED: Allocate from pageable memory. ++ * MEM_NONPAGED: Allocate from page locked memory. ++ * Returns: ++ * Pointer to a block of memory; ++ * NULL if memory couldn't be allocated, if cBytes == 0, or if type is ++ * not one of MEM_PAGED or MEM_NONPAGED. ++ * Requires: ++ * MEM initialized. ++ * Ensures: ++ * The returned pointer, if not NULL, points to a valid memory block of ++ * the size requested. ++ */ ++ extern void *MEM_Alloc(IN u32 cBytes, IN enum MEM_POOLATTRS type); ++ ++/* ++ * ======== MEM_AllocObject ======== ++ * Purpose: ++ * Allocate an object, and set it's signature. ++ * Parameters: ++ * pObj: Pointer to the new object. ++ * Obj: Type of the object to allocate. ++ * Signature: Magic field value. Must be non-zero. ++ * Returns: ++ * Requires: ++ * Same requirements as MEM_Calloc(); and ++ * The object structure has a dwSignature field. The compiler ensures ++ * this requirement. ++ * Ensures: ++ * A subsequent call to MEM_IsValidHandle() will succeed for this object. ++ */ ++#define MEM_AllocObject(pObj, Obj, Signature) \ ++{ \ ++ pObj = MEM_Calloc(sizeof(Obj), MEM_NONPAGED); \ ++ if (pObj) { \ ++ pObj->dwSignature = Signature; \ ++ } \ ++} ++ ++/* ======== MEM_AllocPhysMem ======== ++ * Purpose: ++ * Allocate physically contiguous, uncached memory ++ * Parameters: ++ * cBytes: Number of bytes to allocate. ++ * ulAlign: Alignment Mask. ++ * pPhysicalAddress: Physical address of allocated memory. ++ * Returns: ++ * Pointer to a block of memory; ++ * NULL if memory couldn't be allocated, or if cBytes == 0. ++ * Requires: ++ * MEM initialized. ++ * Ensures: ++ * The returned pointer, if not NULL, points to a valid memory block of ++ * the size requested. Returned physical address refers to physical ++ * location of memory. ++ */ ++ extern void *MEM_AllocPhysMem(IN u32 cBytes, ++ IN u32 ulAlign, ++ OUT u32 *pPhysicalAddress); ++ ++/* ++ * ======== MEM_Calloc ======== ++ * Purpose: ++ * Allocate zero-initialized memory from the paged or non-paged pools. ++ * Parameters: ++ * cBytes: Number of bytes to allocate. ++ * type: Type of memory to allocate; one of: ++ * MEM_PAGED: Allocate from pageable memory. ++ * MEM_NONPAGED: Allocate from page locked memory. ++ * Returns: ++ * Pointer to a block of zeroed memory; ++ * NULL if memory couldn't be allocated, if cBytes == 0, or if type is ++ * not one of MEM_PAGED or MEM_NONPAGED. ++ * Requires: ++ * MEM initialized. ++ * Ensures: ++ * The returned pointer, if not NULL, points to a valid memory block ++ * of the size requested. ++ */ ++ extern void *MEM_Calloc(IN u32 cBytes, IN enum MEM_POOLATTRS type); ++ ++/* ++ * ======== MEM_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * MEM is initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void MEM_Exit(void); ++ ++/* ++ * ======== MEM_FlushCache ======== ++ * Purpose: ++ * Performs system cache sync with discard ++ * Parameters: ++ * pMemBuf: Pointer to memory region to be flushed. ++ * pMemBuf: Size of the memory region to be flushed. ++ * Returns: ++ * Requires: ++ * MEM is initialized. ++ * Ensures: ++ * Cache is synchronized ++ */ ++ extern void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType); ++ ++/* ++ * ======== MEM_Free ======== ++ * Purpose: ++ * Free the given block of system memory. ++ * Parameters: ++ * pMemBuf: Pointer to memory allocated by MEM_Calloc/Alloc(). ++ * Returns: ++ * Requires: ++ * MEM initialized. ++ * pMemBuf is a valid memory address returned by MEM_Calloc/Alloc(). ++ * Ensures: ++ * pMemBuf is no longer a valid pointer to memory. ++ */ ++ extern void MEM_Free(IN void *pMemBuf); ++ ++/* ++ * ======== MEM_VFree ======== ++ * Purpose: ++ * Free the given block of system memory in virtual space. ++ * Parameters: ++ * pMemBuf: Pointer to memory allocated by MEM_Calloc/Alloc() ++ * using vmalloc. ++ * Returns: ++ * Requires: ++ * MEM initialized. ++ * pMemBuf is a valid memory address returned by MEM_Calloc/Alloc() ++ * using vmalloc. ++ * Ensures: ++ * pMemBuf is no longer a valid pointer to memory. ++ */ ++ extern void MEM_VFree(IN void *pMemBuf); ++ ++/* ++ * ======== MEM_FreePhysMem ======== ++ * Purpose: ++ * Free the given block of physically contiguous memory. ++ * Parameters: ++ * pVirtualAddress: Pointer to virtual memory region allocated ++ * by MEM_AllocPhysMem(). ++ * pPhysicalAddress: Pointer to physical memory region allocated ++ * by MEM_AllocPhysMem(). ++ * cBytes: Size of the memory region allocated by MEM_AllocPhysMem(). ++ * Returns: ++ * Requires: ++ * MEM initialized. ++ * pVirtualAddress is a valid memory address returned by ++ * MEM_AllocPhysMem() ++ * Ensures: ++ * pVirtualAddress is no longer a valid pointer to memory. ++ */ ++ extern void MEM_FreePhysMem(void *pVirtualAddress, ++ u32 pPhysicalAddress, u32 cBytes); ++ ++/* ++ * ======== MEM_FreeObject ======== ++ * Purpose: ++ * Utility macro to invalidate an object's signature, and deallocate it. ++ * Parameters: ++ * pObj: Pointer to the object to free. ++ * Returns: ++ * Requires: ++ * Same requirements as MEM_Free(). ++ * Ensures: ++ * A subsequent call to MEM_IsValidHandle() will fail for this object. ++ */ ++#define MEM_FreeObject(pObj) \ ++{ \ ++ pObj->dwSignature = 0x00; \ ++ MEM_Free(pObj); \ ++} ++ ++/* ++ * ======== MEM_GetNumPages ======== ++ * Purpose: ++ * Calculate the number of pages corresponding to the supplied buffer. ++ * Parameters: ++ * pAddr: Linear (virtual) address of the buffer. ++ * cBytes: Number of bytes in the buffer. ++ * Returns: ++ * Number of pages. ++ * Requires: ++ * MEM initialized. ++ * Ensures: ++ * If cBytes > 0, number of pages returned > 0. ++ */ ++ extern s32 MEM_GetNumPages(IN void *pAddr, IN u32 cBytes); ++ ++/* ++ * ======== MEM_Init ======== ++ * Purpose: ++ * Initializes private state of MEM module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * MEM initialized. ++ */ ++ extern bool MEM_Init(void); ++ ++/* ++ * ======== MEM_IsValidHandle ======== ++ * Purpose: ++ * Validate the object handle. ++ * Parameters: ++ * hObj: Handle to object created with MEM_AllocObject(). ++ * Sig: Expected signature u32. ++ * Returns: ++ * TRUE if handle is valid; FALSE otherwise. ++ * Requires: ++ * The object structure has a dwSignature field. Ensured by compiler. ++ * Ensures: ++ */ ++#define MEM_IsValidHandle(hObj, Sig) \ ++ ((hObj != NULL) && (hObj->dwSignature == Sig)) ++ ++/* ++ * ======== MEM_LinearAddress ======== ++ * Purpose: ++ * Get the linear address corresponding to the given physical address. ++ * Parameters: ++ * pPhysAddr: Physical address to be mapped. ++ * cBytes: Number of bytes in physical range to map. ++ * Returns: ++ * The corresponding linear address, or NULL if unsuccessful. ++ * Requires: ++ * MEM initialized. ++ * Ensures: ++ * Notes: ++ * If valid linear address is returned, be sure to call ++ * MEM_UnmapLinearAddress(). ++ */ ++#define MEM_LinearAddress(pPhyAddr, cBytes) pPhyAddr ++ ++/* ++ * ======== MEM_UnmapLinearAddress ======== ++ * Purpose: ++ * Unmap the linear address mapped in MEM_LinearAddress. ++ * Parameters: ++ * pBaseAddr: Ptr to mapped memory (as returned by MEM_LinearAddress()). ++ * Returns: ++ * Requires: ++ * - MEM initialized. ++ * - pBaseAddr is a valid linear address mapped in MEM_LinearAddress. ++ * Ensures: ++ * - pBaseAddr no longer points to a valid linear address. ++ */ ++#define MEM_UnmapLinearAddress(pBaseAddr) {} ++ ++/* ++ * ======== MEM_ExtPhysPoolInit ======== ++ * Purpose: ++ * Uses the physical memory chunk passed for internal consitent memory ++ * allocations. ++ * physical address based on the page frame address. ++ * Parameters: ++ * poolPhysBase starting address of the physical memory pool. ++ * poolSize size of the physical memory pool. ++ * Returns: ++ * none. ++ * Requires: ++ * - MEM initialized. ++ * - valid physical address for the base and size > 0 ++ */ ++ extern void MEM_ExtPhysPoolInit(IN u32 poolPhysBase, ++ IN u32 poolSize); ++ ++#endif /* MEM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mgr.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mgr.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mgr.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mgr.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,234 @@ ++/* ++ * mgr.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== mgr.h ======== ++ * Description: ++ * This is the Class driver RM module interface. ++ * ++ * Public Functions: ++ * MGR_Create ++ * MGR_Destroy ++ * MGR_EnumNodeInfo ++ * MGR_EnumProcessorInfo ++ * MGR_Exit ++ * MGR_GetDCDHandle ++ * MGR_Init ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 15-Oct-2002 kc: Removed legacy PERF definitions. ++ *! 11-Jul-2001 jeh Added CFG_HDEVNODE parameter to MGR_Create(). ++ *! 22-Nov-2000 kc: Added MGR_GetPerfData for acquiring PERF stats. ++ *! 03-Nov-2000 rr: Added MGR_GetDCDHandle. Modified after code review. ++ *! 25-Sep-2000 rr: Updated to Version 0.9 ++ *! 14-Aug-2000 rr: Cleaned up. ++ *! 07-Aug-2000 rr: MGR_Create does the job of Loading DCD Dll. ++ *! 27-Jul-2000 rr: Updated to ver 0.8 of DSPAPI(types). ++ *! 20-Jun-2000 rr: Created. ++ */ ++ ++#ifndef MGR_ ++#define MGR_ ++ ++#include ++ ++#define MAX_EVENTS 32 ++ ++/* ++ * ======== MGR_WaitForBridgeEvents ======== ++ * Purpose: ++ * Block on any Bridge event(s) ++ * Parameters: ++ * aNotifications : array of pointers to notification objects. ++ * uCount : number of elements in above array ++ * puIndex : index of signaled event object ++ * uTimeout : timeout interval in milliseocnds ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_ETIMEOUT : Wait timed out. *puIndex is undetermined. ++ * Details: ++ */ ++ ++ DSP_STATUS MGR_WaitForBridgeEvents(struct DSP_NOTIFICATION ++ **aNotifications, ++ u32 uCount, OUT u32 *puIndex, ++ u32 uTimeout); ++ ++/* ++ * ======== MGR_Create ======== ++ * Purpose: ++ * Creates the Manager Object. This is done during the driver loading. ++ * There is only one Manager Object in the DSP/BIOS Bridge. ++ * Parameters: ++ * phMgrObject: Location to store created MGR Object handle. ++ * hDevNode: Device object as known to Windows system. ++ * Returns: ++ * DSP_SOK: Success ++ * DSP_EMEMORY: Failed to Create the Object ++ * DSP_EFAIL: General Failure ++ * Requires: ++ * MGR Initialized (cRefs > 0 ) ++ * phMgrObject != NULL. ++ * Ensures: ++ * DSP_SOK: *phMgrObject is a valid MGR interface to the device. ++ * MGR Object stores the DCD Manager Handle. ++ * MGR Object stored in the Regsitry. ++ * !DSP_SOK: MGR Object not created ++ * Details: ++ * DCD Dll is loaded and MGR Object stores the handle of the DLL. ++ */ ++ extern DSP_STATUS MGR_Create(OUT struct MGR_OBJECT **hMgrObject, ++ struct CFG_DEVNODE *hDevNode); ++ ++/* ++ * ======== MGR_Destroy ======== ++ * Purpose: ++ * Destroys the MGR object. Called upon driver unloading. ++ * Parameters: ++ * hMgrObject: Handle to Manager object . ++ * Returns: ++ * DSP_SOK: Success. ++ * DCD Manager freed; MGR Object destroyed; ++ * MGR Object deleted from the Registry. ++ * DSP_EFAIL: Failed to destroy MGR Object ++ * Requires: ++ * MGR Initialized (cRefs > 0 ) ++ * hMgrObject is a valid MGR handle . ++ * Ensures: ++ * DSP_SOK: MGR Object destroyed and hMgrObject is Invalid MGR ++ * Handle. ++ */ ++ extern DSP_STATUS MGR_Destroy(struct MGR_OBJECT *hMgrObject); ++ ++/* ++ * ======== MGR_EnumNodeInfo ======== ++ * Purpose: ++ * Enumerate and get configuration information about nodes configured ++ * in the node database. ++ * Parameters: ++ * uNode: The node index (base 0). ++ * pNDBProps: Ptr to the DSP_NDBPROPS structure for output. ++ * uNDBPropsSize: Size of the DSP_NDBPROPS structure. ++ * puNumNodes: Location where the number of nodes configured ++ * in the database will be returned. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EINVALIDARG: Parameter uNode is > than the number of nodes. ++ * configutred in the system ++ * DSP_ECHANGEDURINGENUM: During Enumeration there has been a change in ++ * the number of nodes configured or in the ++ * the properties of the enumerated nodes. ++ * DSP_EFAIL: Failed to querry the Node Data Base ++ * Requires: ++ * pNDBPROPS is not null ++ * uNDBPropsSize >= sizeof(DSP_NDBPROPS) ++ * puNumNodes is not null ++ * MGR Initialized (cRefs > 0 ) ++ * Ensures: ++ * SUCCESS on successful retreival of data and *puNumNodes > 0 OR ++ * DSP_FAILED && *puNumNodes == 0. ++ * Details: ++ */ ++ extern DSP_STATUS MGR_EnumNodeInfo(u32 uNode, ++ OUT struct DSP_NDBPROPS *pNDBProps, ++ u32 uNDBPropsSize, ++ OUT u32 *puNumNodes); ++ ++/* ++ * ======== MGR_EnumProcessorInfo ======== ++ * Purpose: ++ * Enumerate and get configuration information about available DSP ++ * processors ++ * Parameters: ++ * uProcessor: The processor index (zero-based). ++ * pProcessorInfo: Ptr to the DSP_PROCESSORINFO structure . ++ * uProcessorInfoSize: Size of DSP_PROCESSORINFO structure. ++ * puNumProcs: Location where the number of DSPs configured ++ * in the database will be returned ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EINVALIDARG: Parameter uProcessor is > than the number of ++ * DSP Processors in the system. ++ * DSP_EFAIL: Failed to querry the Node Data Base ++ * Requires: ++ * pProcessorInfo is not null ++ * puNumProcs is not null ++ * uProcessorInfoSize >= sizeof(DSP_PROCESSORINFO) ++ * MGR Initialized (cRefs > 0 ) ++ * Ensures: ++ * SUCCESS on successful retreival of data and *puNumProcs > 0 OR ++ * DSP_FAILED && *puNumProcs == 0. ++ * Details: ++ */ ++ extern DSP_STATUS MGR_EnumProcessorInfo(u32 uProcessor, ++ OUT struct DSP_PROCESSORINFO * ++ pProcessorInfo, ++ u32 uProcessorInfoSize, ++ OUT u32 *puNumProcs); ++/* ++ * ======== MGR_Exit ======== ++ * Purpose: ++ * Decrement reference count, and free resources when reference count is ++ * 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * MGR is initialized. ++ * Ensures: ++ * When reference count == 0, MGR's private resources are freed. ++ */ ++ extern void MGR_Exit(void); ++ ++/* ++ * ======== MGR_GetDCDHandle ======== ++ * Purpose: ++ * Retrieves the MGR handle. Accessor Function ++ * Parameters: ++ * hMGRHandle: Handle to the Manager Object ++ * phDCDHandle: Ptr to receive the DCD Handle. ++ * Returns: ++ * DSP_SOK: Sucess ++ * DSP_EFAIL: Failure to get the Handle ++ * Requires: ++ * MGR is initialized. ++ * phDCDHandle != NULL ++ * Ensures: ++ * DSP_SOK and *phDCDHandle != NULL || ++ * DSP_EFAIL and *phDCDHandle == NULL ++ */ ++ extern DSP_STATUS MGR_GetDCDHandle(IN struct MGR_OBJECT ++ *hMGRHandle, ++ OUT u32 *phDCDHandle); ++ ++/* ++ * ======== MGR_Init ======== ++ * Purpose: ++ * Initialize MGR's private state, keeping a reference count on each ++ * call. Intializes the DCD. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * TRUE: A requirement for the other public MGR functions. ++ */ ++ extern bool MGR_Init(void); ++ ++#endif /* MGR_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mgrpriv.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mgrpriv.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/mgrpriv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/mgrpriv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,55 @@ ++/* ++ * mgrpriv.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== mgrpriv.h ======== ++ * Description: ++ * Global MGR constants and types, shared by PROC, MGR, and WCD. ++ * ++ *! Revision History: ++ *! ================ ++ *! 29-July-2001 ag: added MGR_PROCESSOREXTINFO. ++ *! 05-July-2000 rr: Created ++ */ ++ ++#ifndef MGRPRIV_ ++#define MGRPRIV_ ++ ++/* ++ * OMAP1510 specific ++ */ ++#define MGR_MAXTLBENTRIES 32 ++ ++/* RM MGR Object */ ++ struct MGR_OBJECT; ++ ++ struct MGR_TLBENTRY { ++ u32 ulDspVirt; /* DSP virtual address */ ++ u32 ulGppPhys; /* GPP physical address */ ++ } ; ++ ++/* ++ * The DSP_PROCESSOREXTINFO structure describes additional extended ++ * capabilities of a DSP processor not exposed to user. ++ */ ++ struct MGR_PROCESSOREXTINFO { ++ struct DSP_PROCESSORINFO tyBasic; /* user processor info */ ++ /* private dsp mmu entries */ ++ struct MGR_TLBENTRY tyTlb[MGR_MAXTLBENTRIES]; ++ } ; ++ ++#endif /* MGRPRIV_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/msgdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/msgdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/msgdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/msgdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,43 @@ ++/* ++ * msgdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== msgdefs.h ======== ++ * Description: ++ * Global MSG constants and types. ++ * ++ *! Revision History ++ *! ================ ++ *! 09-May-2001 jeh Removed MSG_TODSP, MSG_FROMDSP. ++ *! 17-Nov-2000 jeh Added MSGMGR_SIGNATURE. ++ *! 12-Sep-2000 jeh Created. ++ */ ++ ++#ifndef MSGDEFS_ ++#define MSGDEFS_ ++ ++#define MSGMGR_SIGNATURE 0x4d47534d /* "MGSM" */ ++ ++/* MSG Objects: */ ++ struct MSG_MGR; ++ struct MSG_QUEUE; ++ ++/* Function prototype for callback to be called on RMS_EXIT message received */ ++ typedef void(*MSG_ONEXIT) (HANDLE h, s32 nStatus); ++ ++#endif /* MSGDEFS_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/msg.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/msg.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/msg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/msg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,106 @@ ++/* ++ * msg.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== msg.h ======== ++ * Description: ++ * DSP/BIOS Bridge MSG Module. ++ * ++ * Public Functions: ++ * MSG_Create ++ * MSG_Delete ++ * MSG_Exit ++ * MSG_Init ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================= ++ *! 17-Nov-2000 jeh Removed MSG_Get, MSG_Put, MSG_CreateQueue, ++ *! MSG_DeleteQueue, and MSG_RegisterNotify, since these ++ *! are now part of mini-driver. ++ *! 12-Sep-2000 jeh Created. ++ */ ++ ++#ifndef MSG_ ++#define MSG_ ++ ++#include ++#include ++ ++/* ++ * ======== MSG_Create ======== ++ * Purpose: ++ * Create an object to manage message queues. Only one of these objects ++ * can exist per device object. The MSG manager must be created before ++ * the IO Manager. ++ * Parameters: ++ * phMsgMgr: Location to store MSG manager handle on output. ++ * hDevObject: The device object. ++ * msgCallback: Called whenever an RMS_EXIT message is received. ++ * Returns: ++ * Requires: ++ * MSG_Init(void) called. ++ * phMsgMgr != NULL. ++ * hDevObject != NULL. ++ * msgCallback != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS MSG_Create(OUT struct MSG_MGR **phMsgMgr, ++ struct DEV_OBJECT *hDevObject, ++ MSG_ONEXIT msgCallback); ++ ++/* ++ * ======== MSG_Delete ======== ++ * Purpose: ++ * Delete a MSG manager allocated in MSG_Create(). ++ * Parameters: ++ * hMsgMgr: Handle returned from MSG_Create(). ++ * Returns: ++ * Requires: ++ * MSG_Init(void) called. ++ * Valid hMsgMgr. ++ * Ensures: ++ */ ++ extern void MSG_Delete(struct MSG_MGR *hMsgMgr); ++ ++/* ++ * ======== MSG_Exit ======== ++ * Purpose: ++ * Discontinue usage of MSG module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * MSG_Init(void) successfully called before. ++ * Ensures: ++ * Any resources acquired in MSG_Init(void) will be freed when last MSG ++ * client calls MSG_Exit(void). ++ */ ++ extern void MSG_Exit(void); ++ ++/* ++ * ======== MSG_Init ======== ++ * Purpose: ++ * Initialize the MSG module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialization succeeded, FALSE otherwise. ++ * Ensures: ++ */ ++ extern bool MSG_Init(void); ++ ++#endif /* MSG_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nldrdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nldrdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nldrdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nldrdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,307 @@ ++/* ++ * nldrdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== nldrdefs.h ======== ++ * Description: ++ * Global Dynamic + static/overlay Node loader (NLDR) constants and types. ++ * ++ *! Revision History ++ *! ================ ++ *! 07-Apr-2003 map Consolidated dldrdefs.h into nldrdefs.h ++ *! 05-Aug-2002 jeh Created. ++ */ ++ ++#ifndef NLDRDEFS_ ++#define NLDRDEFS_ ++ ++#include ++#include ++ ++#define NLDR_MAXPATHLENGTH 255 ++/* NLDR Objects: */ ++ struct NLDR_OBJECT; ++ struct NLDR_NODEOBJECT; ++ ++/* ++ * ======== NLDR_LOADTYPE ======== ++ * Load types for a node. Must match values in node.h55. ++ */ ++ enum NLDR_LOADTYPE { ++ NLDR_STATICLOAD, /* Linked in base image, not overlay */ ++ NLDR_DYNAMICLOAD, /* Dynamically loaded node */ ++ NLDR_OVLYLOAD /* Linked in base image, overlay node */ ++ } ; ++ ++/* ++ * ======== NLDR_OVLYFXN ======== ++ * Causes code or data to be copied from load address to run address. This ++ * is the "COD_WRITEFXN" that gets passed to the DBLL_Library and is used as ++ * the ZL write function. ++ * ++ * Parameters: ++ * pPrivRef: Handle to identify the node. ++ * ulDspRunAddr: Run address of code or data. ++ * ulDspLoadAddr: Load address of code or data. ++ * ulNumBytes: Number of (GPP) bytes to copy. ++ * nMemSpace: RMS_CODE or RMS_DATA. ++ * Returns: ++ * ulNumBytes: Success. ++ * 0: Failure. ++ * Requires: ++ * Ensures: ++ */ ++ typedef u32(*NLDR_OVLYFXN) (void *pPrivRef, u32 ulDspRunAddr, ++ u32 ulDspLoadAddr, ++ u32 ulNumBytes, u32 nMemSpace); ++ ++/* ++ * ======== NLDR_WRITEFXN ======== ++ * Write memory function. Used for dynamic load writes. ++ * Parameters: ++ * pPrivRef: Handle to identify the node. ++ * ulDspAddr: Address of code or data. ++ * pBuf: Code or data to be written ++ * ulNumBytes: Number of (GPP) bytes to write. ++ * nMemSpace: DBLL_DATA or DBLL_CODE. ++ * Returns: ++ * ulNumBytes: Success. ++ * 0: Failure. ++ * Requires: ++ * Ensures: ++ */ ++ typedef u32(*NLDR_WRITEFXN) (void *pPrivRef, ++ u32 ulDspAddr, void *pBuf, ++ u32 ulNumBytes, u32 nMemSpace); ++ ++/* ++ * ======== NLDR_ATTRS ======== ++ * Attributes passed to NLDR_Create function. ++ */ ++ struct NLDR_ATTRS { ++ NLDR_OVLYFXN pfnOvly; ++ NLDR_WRITEFXN pfnWrite; ++ u16 usDSPWordSize; ++ u16 usDSPMauSize; ++ } ; ++ ++/* ++ * ======== NLDR_PHASE ======== ++ * Indicates node create, delete, or execute phase function. ++ */ ++ enum NLDR_PHASE { ++ NLDR_CREATE, ++ NLDR_DELETE, ++ NLDR_EXECUTE, ++ NLDR_NOPHASE ++ } ; ++ ++/* ++ * Typedefs of loader functions imported from a DLL, or defined in a ++ * function table. ++ */ ++ ++/* ++ * ======== NLDR_Allocate ======== ++ * Allocate resources to manage the loading of a node on the DSP. ++ * ++ * Parameters: ++ * hNldr: Handle of loader that will load the node. ++ * pPrivRef: Handle to identify the node. ++ * pNodeProps: Pointer to a DCD_NODEPROPS for the node. ++ * phNldrNode: Location to store node handle on output. This handle ++ * will be passed to NLDR_Load/NLDR_Unload. ++ * pfPhaseSplit: pointer to boolean variable referenced in node.c ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory on GPP. ++ * Requires: ++ * NLDR_Init(void) called. ++ * Valid hNldr. ++ * pNodeProps != NULL. ++ * phNldrNode != NULL. ++ * Ensures: ++ * DSP_SOK: IsValidNode(*phNldrNode). ++ * error: *phNldrNode == NULL. ++ */ ++ typedef DSP_STATUS(*NLDR_ALLOCATEFXN) (struct NLDR_OBJECT *hNldr, ++ void *pPrivRef, ++ IN CONST struct DCD_NODEPROPS ++ *pNodeProps, ++ OUT struct NLDR_NODEOBJECT ++ **phNldrNode, ++ OUT bool *pfPhaseSplit); ++ ++/* ++ * ======== NLDR_Create ======== ++ * Create a loader object. This object handles the loading and unloading of ++ * create, delete, and execute phase functions of nodes on the DSP target. ++ * ++ * Parameters: ++ * phNldr: Location to store loader handle on output. ++ * hDevObject: Device for this processor. ++ * pAttrs: Loader attributes. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * Requires: ++ * NLDR_Init(void) called. ++ * phNldr != NULL. ++ * hDevObject != NULL. ++ * pAttrs != NULL. ++ * Ensures: ++ * DSP_SOK: Valid *phNldr. ++ * error: *phNldr == NULL. ++ */ ++ typedef DSP_STATUS(*NLDR_CREATEFXN) (OUT struct NLDR_OBJECT **phNldr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct NLDR_ATTRS ++ *pAttrs); ++ ++/* ++ * ======== NLDR_Delete ======== ++ * Delete the NLDR loader. ++ * ++ * Parameters: ++ * hNldr: Node manager object. ++ * Returns: ++ * Requires: ++ * NLDR_Init(void) called. ++ * Valid hNldr. ++ * Ensures: ++ * hNldr invalid ++ */ ++ typedef void(*NLDR_DELETEFXN) (struct NLDR_OBJECT *hNldr); ++ ++/* ++ * ======== NLDR_Exit ======== ++ * Discontinue usage of NLDR module. ++ * ++ * Parameters: ++ * Returns: ++ * Requires: ++ * NLDR_Init(void) successfully called before. ++ * Ensures: ++ * Any resources acquired in NLDR_Init(void) will be freed when last NLDR ++ * client calls NLDR_Exit(void). ++ */ ++ typedef void(*NLDR_EXITFXN) (void); ++ ++/* ++ * ======== NLDR_Free ======== ++ * Free resources allocated in NLDR_Allocate. ++ * ++ * Parameters: ++ * hNldrNode: Handle returned from NLDR_Allocate(). ++ * Returns: ++ * Requires: ++ * NLDR_Init(void) called. ++ * Valid hNldrNode. ++ * Ensures: ++ */ ++ typedef void(*NLDR_FREEFXN) (struct NLDR_NODEOBJECT *hNldrNode); ++ ++/* ++ * ======== NLDR_GetFxnAddr ======== ++ * Get address of create, delete, or execute phase function of a node on ++ * the DSP. ++ * ++ * Parameters: ++ * hNldrNode: Handle returned from NLDR_Allocate(). ++ * pstrFxn: Name of function. ++ * pulAddr: Location to store function address. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ESYMBOL: Address of function not found. ++ * Requires: ++ * NLDR_Init(void) called. ++ * Valid hNldrNode. ++ * pulAddr != NULL; ++ * pstrFxn != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*NLDR_GETFXNADDRFXN) (struct NLDR_NODEOBJECT ++ *hNldrNode, ++ char *pstrFxn, u32 *pulAddr); ++ ++/* ++ * ======== NLDR_Init ======== ++ * Initialize the NLDR module. ++ * ++ * Parameters: ++ * Returns: ++ * TRUE if initialization succeeded, FALSE otherwise. ++ * Ensures: ++ */ ++ typedef bool(*NLDR_INITFXN) (void); ++ ++/* ++ * ======== NLDR_Load ======== ++ * Load create, delete, or execute phase function of a node on the DSP. ++ * ++ * Parameters: ++ * hNldrNode: Handle returned from NLDR_Allocate(). ++ * phase: Type of function to load (create, delete, or execute). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory on GPP. ++ * DSP_EOVERLAYMEMORY: Can't overlay phase because overlay memory ++ * is already in use. ++ * DSP_EDYNLOAD: Failure in dynamic loader library. ++ * DSP_EFWRITE: Failed to write phase's code or date to target. ++ * Requires: ++ * NLDR_Init(void) called. ++ * Valid hNldrNode. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*NLDR_LOADFXN) (struct NLDR_NODEOBJECT *hNldrNode, ++ enum NLDR_PHASE phase); ++ ++/* ++ * ======== NLDR_Unload ======== ++ * Unload create, delete, or execute phase function of a node on the DSP. ++ * ++ * Parameters: ++ * hNldrNode: Handle returned from NLDR_Allocate(). ++ * phase: Node function to unload (create, delete, or execute). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory on GPP. ++ * Requires: ++ * NLDR_Init(void) called. ++ * Valid hNldrNode. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*NLDR_UNLOADFXN) (struct NLDR_NODEOBJECT *hNldrNode, ++ enum NLDR_PHASE phase); ++ ++/* ++ * ======== NLDR_FXNS ======== ++ */ ++ struct NLDR_FXNS { ++ NLDR_ALLOCATEFXN pfnAllocate; ++ NLDR_CREATEFXN pfnCreate; ++ NLDR_DELETEFXN pfnDelete; ++ NLDR_EXITFXN pfnExit; ++ NLDR_FREEFXN pfnFree; ++ NLDR_GETFXNADDRFXN pfnGetFxnAddr; ++ NLDR_INITFXN pfnInit; ++ NLDR_LOADFXN pfnLoad; ++ NLDR_UNLOADFXN pfnUnload; ++ } ; ++ ++#endif /* NLDRDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nldr.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nldr.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nldr.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nldr.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,81 @@ ++/* ++ * nldr.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== nldr.h ======== ++ * ++ * Description: ++ * DSP/BIOS Bridge dynamic loader interface. See the file dldrdefs.h ++ * for a description of these functions. ++ * ++ * Public Functions: ++ * NLDR_Allocate ++ * NLDR_Create ++ * NLDR_Delete ++ * NLDR_Exit ++ * NLDR_Free ++ * NLDR_GetFxnAddr ++ * NLDR_Init ++ * NLDR_Load ++ * NLDR_Unload ++ * ++ * Notes: ++ * ++ *! Revision History ++ *! ================ ++ *! 31-Jul-2002 jeh Removed function header comments. ++ *! 17-Apr-2002 jeh Created. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#ifndef NLDR_ ++#define NLDR_ ++ ++ extern DSP_STATUS NLDR_Allocate(struct NLDR_OBJECT *hNldr, ++ void *pPrivRef, ++ IN CONST struct DCD_NODEPROPS ++ *pNodeProps, ++ OUT struct NLDR_NODEOBJECT **phNldrNode, ++ IN bool *pfPhaseSplit); ++ ++ extern DSP_STATUS NLDR_Create(OUT struct NLDR_OBJECT **phNldr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct NLDR_ATTRS *pAttrs); ++ ++ extern void NLDR_Delete(struct NLDR_OBJECT *hNldr); ++ extern void NLDR_Exit(void); ++ extern void NLDR_Free(struct NLDR_NODEOBJECT *hNldrNode); ++ ++ extern DSP_STATUS NLDR_GetFxnAddr(struct NLDR_NODEOBJECT *hNldrNode, ++ char *pstrFxn, u32 *pulAddr); ++ ++ extern DSP_STATUS NLDR_GetRmmManager(struct NLDR_OBJECT *hNldrObject, ++ OUT struct RMM_TargetObj ++ **phRmmMgr); ++ ++ extern bool NLDR_Init(void); ++ extern DSP_STATUS NLDR_Load(struct NLDR_NODEOBJECT *hNldrNode, ++ enum NLDR_PHASE phase); ++ extern DSP_STATUS NLDR_Unload(struct NLDR_NODEOBJECT *hNldrNode, ++ enum NLDR_PHASE phase); ++ ++#endif /* NLDR_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nodedefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nodedefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nodedefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nodedefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,40 @@ ++/* ++ * nodedefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== nodedefs.h ======== ++ * Description: ++ * Global NODE constants and types, shared by PROCESSOR, NODE, and DISP. ++ * ++ *! Revision History ++ *! ================ ++ *! 23-Apr-2001 jeh Removed NODE_MGRATTRS. ++ *! 21-Sep-2000 jeh Removed NODE_TYPE enum. ++ *! 17-Jul-2000 jeh Changed order of node types to match rms_sh.h. ++ *! 20-Jun-2000 jeh Created. ++ */ ++ ++#ifndef NODEDEFS_ ++#define NODEDEFS_ ++ ++#define NODE_SUSPENDEDPRI -1 ++ ++/* NODE Objects: */ ++ struct NODE_MGR; ++ struct NODE_OBJECT; ++ ++#endif /* NODEDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/node.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/node.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/node.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/node.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,622 @@ ++/* ++ * node.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== node.h ======== ++ * Description: ++ * DSP/BIOS Bridge Node Manager. ++ * ++ * Public Functions: ++ * NODE_Allocate ++ * NODE_AllocMsgBuf ++ * NODE_ChangePriority ++ * NODE_Connect ++ * NODE_Create ++ * NODE_CreateMgr ++ * NODE_Delete ++ * NODE_DeleteMgr ++ * NODE_EnumNodes ++ * NODE_Exit ++ * NODE_FreeMsgBuf ++ * NODE_GetAttr ++ * NODE_GetMessage ++ * NODE_GetProcessor ++ * NODE_Init ++ * NODE_OnExit ++ * NODE_Pause ++ * NODE_PutMessage ++ * NODE_RegisterNotify ++ * NODE_Run ++ * NODE_Terminate ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================= ++ *! 23-Apr-2001 jeh Updated with code review changes. ++ *! 16-Jan-2001 jeh Added DSP_ESYMBOL, DSP_EUUID to return codes. ++ *! 17-Nov-2000 jeh Added NODE_OnExit(). ++ *! 27-Oct-2000 jeh Added timeouts to NODE_GetMessage, NODE_PutMessage. ++ *! 12-Oct-2000 jeh Changed NODE_EnumNodeInfo to NODE_EnumNodes. Removed ++ *! NODE_RegisterAllNodes(). ++ *! 07-Sep-2000 jeh Changed type HANDLE in NODE_RegisterNotify to ++ *! DSP_HNOTIFICATION. Added DSP_STRMATTR param to ++ *! NODE_Connect(). Removed NODE_GetMessageStream(). ++ *! 17-Jul-2000 jeh Updated function header descriptions. ++ *! 19-Jun-2000 jeh Created. ++ */ ++ ++#ifndef NODE_ ++#define NODE_ ++ ++#include ++ ++#include ++#include ++#include ++#include ++ ++/* ++ * ======== NODE_Allocate ======== ++ * Purpose: ++ * Allocate GPP resources to manage a node on the DSP. ++ * Parameters: ++ * hProcessor: Handle of processor that is allocating the node. ++ * pNodeId: Pointer to a DSP_UUID for the node. ++ * pArgs: Optional arguments to be passed to the node. ++ * pAttrIn: Optional pointer to node attributes (priority, ++ * timeout...) ++ * phNode: Location to store node handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory on GPP. ++ * DSP_EUUID: Node UUID has not been registered. ++ * DSP_ESYMBOL: iAlg functions not found for a DAIS node. ++ * DSP_ERANGE: pAttrIn != NULL and pAttrIn->iPriority out of ++ * range. ++ * DSP_EFAIL: A failure occured, unable to allocate node. ++ * DSP_EWRONGSTATE: Proccessor is not in the running state. ++ * Requires: ++ * NODE_Init(void) called. ++ * hProcessor != NULL. ++ * pNodeId != NULL. ++ * phNode != NULL. ++ * Ensures: ++ * DSP_SOK: IsValidNode(*phNode). ++ * error: *phNode == NULL. ++ */ ++ extern DSP_STATUS NODE_Allocate(struct PROC_OBJECT *hProcessor, ++ IN CONST struct DSP_UUID *pNodeId, ++ OPTIONAL IN CONST struct DSP_CBDATA ++ *pArgs, ++ OPTIONAL IN CONST struct DSP_NODEATTRIN ++ *pAttrIn, ++ OUT struct NODE_OBJECT **phNode, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== NODE_AllocMsgBuf ======== ++ * Purpose: ++ * Allocate and Prepare a buffer whose descriptor will be passed to a ++ * Node within a (DSP_MSG)message ++ * Parameters: ++ * hNode: The node handle. ++ * uSize: The size of the buffer to be allocated. ++ * pAttr: Pointer to a DSP_BUFFERATTR structure. ++ * pBuffer: Location to store the address of the allocated ++ * buffer on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid node handle. ++ * DSP_EMEMORY: Insufficent memory. ++ * DSP_EFAIL: General Failure. ++ * DSP_ESIZE: Invalid Size. ++ * Requires: ++ * NODE_Init(void) called. ++ * pBuffer != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_AllocMsgBuf(struct NODE_OBJECT *hNode, ++ u32 uSize, ++ OPTIONAL struct DSP_BUFFERATTR ++ *pAttr, ++ OUT u8 **pBuffer); ++ ++/* ++ * ======== NODE_ChangePriority ======== ++ * Purpose: ++ * Change the priority of an allocated node. ++ * Parameters: ++ * hNode: Node handle returned from NODE_Allocate. ++ * nPriority: New priority level to set node's priority to. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ERANGE: nPriority is out of range. ++ * DSP_ENODETYPE: The specified node is not a task node. ++ * DSP_EWRONGSTATE: Node is not in the NODE_ALLOCATED, NODE_PAUSED, ++ * or NODE_RUNNING state. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * DSP_ERESTART: A critical error has occurred and the DSP is ++ * being restarted. ++ * DSP_EFAIL: Unable to change node's runtime priority level. ++ * Requires: ++ * NODE_Init(void) called. ++ * Ensures: ++ * DSP_SOK && (Node's current priority == nPriority) ++ */ ++ extern DSP_STATUS NODE_ChangePriority(struct NODE_OBJECT *hNode, ++ s32 nPriority); ++ ++/* ++ * ======== NODE_CloseOrphans ======== ++ * Purpose: ++ * Delete all nodes whose owning processor is being destroyed. ++ * Parameters: ++ * hNodeMgr: Node manager object. ++ * hProc: Handle to processor object being destroyed. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Unable to delete all nodes belonging to hProc. ++ * Requires: ++ * Valid hNodeMgr. ++ * hProc != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_CloseOrphans(struct NODE_MGR *hNodeMgr, ++ struct PROC_OBJECT *hProc); ++ ++/* ++ * ======== NODE_Connect ======== ++ * Purpose: ++ * Connect two nodes on the DSP, or a node on the DSP to the GPP. In the ++ * case that the connnection is being made between a node on the DSP and ++ * the GPP, one of the node handles (either hNode1 or hNode2) must be ++ * the constant NODE_HGPPNODE. ++ * Parameters: ++ * hNode1: Handle of first node to connect to second node. If ++ * this is a connection from the GPP to hNode2, hNode1 ++ * must be the constant NODE_HGPPNODE. Otherwise, hNode1 ++ * must be a node handle returned from a successful call ++ * to Node_Allocate(). ++ * hNode2: Handle of second node. Must be either NODE_HGPPNODE ++ * if this is a connection from DSP node to GPP, or a ++ * node handle returned from a successful call to ++ * NODE_Allocate(). ++ * uStream1: Output stream index on first node, to be connected ++ * to second node's input stream. Value must range from ++ * 0 <= uStream1 < number of output streams. ++ * uStream2: Input stream index on second node. Value must range ++ * from 0 <= uStream2 < number of input streams. ++ * pAttrs: Stream attributes (NULL ==> use defaults). ++ * pConnParam: A pointer to a DSP_CBDATA structure that defines ++ * connection parameter for device nodes to pass to DSP ++ * side. ++ * If the value of this parameter is NULL, then this API ++ * behaves like DSPNode_Connect. This parameter will have ++ * length of the string and the null terminated string in ++ * DSP_CBDATA struct. This can be extended in future tp ++ * pass binary data. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode1 or hNode2. ++ * DSP_EMEMORY: Insufficient host memory. ++ * DSP_EVALUE: A stream index parameter is invalid. ++ * DSP_EALREADYCONNECTED: A connection already exists for one of the ++ * indices uStream1 or uStream2. ++ * DSP_EWRONGSTATE: Either hNode1 or hNode2 is not in the ++ * NODE_ALLOCATED state. ++ * DSP_ENOMORECONNECTIONS: No more connections available. ++ * DSP_EFAIL: Attempt to make an illegal connection (eg, ++ * Device node to device node, or device node to ++ * GPP), the two nodes are on different DSPs. ++ * Requires: ++ * NODE_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_Connect(struct NODE_OBJECT *hNode1, ++ u32 uStream1, ++ struct NODE_OBJECT *hNode2, ++ u32 uStream2, ++ OPTIONAL IN struct DSP_STRMATTR *pAttrs, ++ OPTIONAL IN struct DSP_CBDATA ++ *pConnParam); ++ ++/* ++ * ======== NODE_Create ======== ++ * Purpose: ++ * Create a node on the DSP by remotely calling the node's create ++ * function. If necessary, load code that contains the node's create ++ * function. ++ * Parameters: ++ * hNode: Node handle returned from NODE_Allocate(). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ESYMBOL: Create function not found in the COFF file. ++ * DSP_EWRONGSTATE: Node is not in the NODE_ALLOCATED state. ++ * DSP_EMEMORY: Memory allocation failure on the DSP. ++ * DSP_ETASK: Unable to create node's task or process on the DSP. ++ * DSP_ESTREAM: Stream creation failure on the DSP. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * DSP_EUSER1-16: A user-defined failure occurred on the DSP. ++ * DSP_EFAIL: A failure occurred, unable to create node. ++ * Requires: ++ * NODE_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_Create(struct NODE_OBJECT *hNode); ++ ++/* ++ * ======== NODE_CreateMgr ======== ++ * Purpose: ++ * Create a NODE Manager object. This object handles the creation, ++ * deletion, and execution of nodes on the DSP target. The NODE Manager ++ * also maintains a pipe map of used and available node connections. ++ * Each DEV object should have exactly one NODE Manager object. ++ * ++ * Parameters: ++ * phNodeMgr: Location to store node manager handle on output. ++ * hDevObject: Device for this processor. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * NODE_Init(void) called. ++ * phNodeMgr != NULL. ++ * hDevObject != NULL. ++ * Ensures: ++ * DSP_SOK: Valide *phNodeMgr. ++ * error: *phNodeMgr == NULL. ++ */ ++ extern DSP_STATUS NODE_CreateMgr(OUT struct NODE_MGR **phNodeMgr, ++ struct DEV_OBJECT *hDevObject); ++ ++/* ++ * ======== NODE_Delete ======== ++ * Purpose: ++ * Delete resources allocated in NODE_Allocate(). If the node was ++ * created, delete the node on the DSP by remotely calling the node's ++ * delete function. Loads the node's delete function if necessary. ++ * GPP side resources are freed after node's delete function returns. ++ * Parameters: ++ * hNode: Node handle returned from NODE_Allocate(). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * DSP_EDELETE: A deletion failure occurred. ++ * DSP_EUSER1-16: Node specific failure occurred on the DSP. ++ * DSP_EFAIL: A failure occurred in deleting the node. ++ * DSP_ESYMBOL: Delete function not found in the COFF file. ++ * Requires: ++ * NODE_Init(void) called. ++ * Ensures: ++ * DSP_SOK: hNode is invalid. ++ */ ++ extern DSP_STATUS NODE_Delete(struct NODE_OBJECT *hNode, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== NODE_DeleteMgr ======== ++ * Purpose: ++ * Delete the NODE Manager. ++ * Parameters: ++ * hNodeMgr: Node manager object. ++ * Returns: ++ * DSP_SOK: Success. ++ * Requires: ++ * NODE_Init(void) called. ++ * Valid hNodeMgr. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_DeleteMgr(struct NODE_MGR *hNodeMgr); ++ ++/* ++ * ======== NODE_EnumNodes ======== ++ * Purpose: ++ * Enumerate the nodes currently allocated for the DSP. ++ * Parameters: ++ * hNodeMgr: Node manager returned from NODE_CreateMgr(). ++ * aNodeTab: Array to copy node handles into. ++ * uNodeTabSize: Number of handles that can be written to aNodeTab. ++ * puNumNodes: Location where number of node handles written to ++ * aNodeTab will be written. ++ * puAllocated: Location to write total number of allocated nodes. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ESIZE: aNodeTab is too small to hold all node handles. ++ * Requires: ++ * Valid hNodeMgr. ++ * aNodeTab != NULL || uNodeTabSize == 0. ++ * puNumNodes != NULL. ++ * puAllocated != NULL. ++ * Ensures: ++ * - (DSP_ESIZE && *puNumNodes == 0) ++ * - || (DSP_SOK && *puNumNodes <= uNodeTabSize) && ++ * (*puAllocated == *puNumNodes) ++ */ ++ extern DSP_STATUS NODE_EnumNodes(struct NODE_MGR *hNodeMgr, ++ IN DSP_HNODE *aNodeTab, ++ u32 uNodeTabSize, ++ OUT u32 *puNumNodes, ++ OUT u32 *puAllocated); ++ ++/* ++ * ======== NODE_Exit ======== ++ * Purpose: ++ * Discontinue usage of NODE module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * NODE_Init(void) successfully called before. ++ * Ensures: ++ * Any resources acquired in NODE_Init(void) will be freed when last NODE ++ * client calls NODE_Exit(void). ++ */ ++ extern void NODE_Exit(void); ++ ++/* ++ * ======== NODE_FreeMsgBuf ======== ++ * Purpose: ++ * Free a message buffer previously allocated with NODE_AllocMsgBuf. ++ * Parameters: ++ * hNode: The node handle. ++ * pBuffer: (Address) Buffer allocated by NODE_AllocMsgBuf. ++ * pAttr: Same buffer attributes passed to NODE_AllocMsgBuf. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid node handle. ++ * DSP_EFAIL: Failure to free the buffer. ++ * Requires: ++ * NODE_Init(void) called. ++ * pBuffer != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_FreeMsgBuf(struct NODE_OBJECT *hNode, ++ IN u8 *pBuffer, ++ OPTIONAL struct DSP_BUFFERATTR ++ *pAttr); ++ ++/* ++ * ======== NODE_GetAttr ======== ++ * Purpose: ++ * Copy the current attributes of the specified node into a DSP_NODEATTR ++ * structure. ++ * Parameters: ++ * hNode: Node object allocated from NODE_Allocate(). ++ * pAttr: Pointer to DSP_NODEATTR structure to copy node's ++ * attributes. ++ * uAttrSize: Size of pAttr. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * Requires: ++ * NODE_Init(void) called. ++ * pAttr != NULL. ++ * Ensures: ++ * DSP_SOK: *pAttrs contains the node's current attributes. ++ */ ++ extern DSP_STATUS NODE_GetAttr(struct NODE_OBJECT *hNode, ++ OUT struct DSP_NODEATTR *pAttr, ++ u32 uAttrSize); ++ ++/* ++ * ======== NODE_GetMessage ======== ++ * Purpose: ++ * Retrieve a message from a node on the DSP. The node must be either a ++ * message node, task node, or XDAIS socket node. ++ * If a message is not available, this function will block until a ++ * message is available, or the node's timeout value is reached. ++ * Parameters: ++ * hNode: Node handle returned from NODE_Allocate(). ++ * pMessage: Pointer to DSP_MSG structure to copy the ++ * message into. ++ * uTimeout: Timeout in milliseconds to wait for message. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ENODETYPE: Cannot retrieve messages from this type of node. ++ * DSP_ETIMEOUT: Timeout occurred and no message is available. ++ * DSP_EFAIL: Error occurred while trying to retrieve a message. ++ * Requires: ++ * NODE_Init(void) called. ++ * pMessage != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_GetMessage(struct NODE_OBJECT *hNode, ++ OUT struct DSP_MSG *pMessage, ++ u32 uTimeout); ++ ++/* ++ * ======== NODE_GetNldrObj ======== ++ * Purpose: ++ * Retrieve the Nldr manager ++ * Parameters: ++ * hNodeMgr: Node Manager ++ * phNldrObj: Pointer to a Nldr manager handle ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_GetNldrObj(struct NODE_MGR *hNodeMgr, ++ OUT struct NLDR_OBJECT **phNldrObj); ++ ++/* ++ * ======== NODE_Init ======== ++ * Purpose: ++ * Initialize the NODE module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialization succeeded, FALSE otherwise. ++ * Ensures: ++ */ ++ extern bool NODE_Init(void); ++ ++/* ++ * ======== NODE_OnExit ======== ++ * Purpose: ++ * Gets called when RMS_EXIT is received for a node. PROC needs to pass ++ * this function as a parameter to MSG_Create(). This function then gets ++ * called by the mini-driver when an exit message for a node is received. ++ * Parameters: ++ * hNode: Handle of the node that the exit message is for. ++ * nStatus: Return status of the node's execute phase. ++ * Returns: ++ * Ensures: ++ */ ++ void NODE_OnExit(struct NODE_OBJECT *hNode, s32 nStatus); ++ ++/* ++ * ======== NODE_Pause ======== ++ * Purpose: ++ * Suspend execution of a node currently running on the DSP. ++ * Parameters: ++ * hNode: Node object representing a node currently ++ * running on the DSP. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ENODETYPE: Node is not a task or socket node. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state. ++ * DSP_EFAIL: Failed to pause node. ++ * Requires: ++ * NODE_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_Pause(struct NODE_OBJECT *hNode); ++ ++/* ++ * ======== NODE_PutMessage ======== ++ * Purpose: ++ * Send a message to a message node, task node, or XDAIS socket node. ++ * This function will block until the message stream can accommodate ++ * the message, or a timeout occurs. The message will be copied, so Msg ++ * can be re-used immediately after return. ++ * Parameters: ++ * hNode: Node handle returned by NODE_Allocate(). ++ * pMsg: Location of message to be sent to the node. ++ * uTimeout: Timeout in msecs to wait. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ENODETYPE: Messages can't be sent to this type of node. ++ * DSP_ETIMEOUT: Timeout occurred before message could be set. ++ * DSP_EWRONGSTATE: Node is in invalid state for sending messages. ++ * DSP_EFAIL: Unable to send message. ++ * Requires: ++ * NODE_Init(void) called. ++ * pMsg != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_PutMessage(struct NODE_OBJECT *hNode, ++ IN CONST struct DSP_MSG *pMsg, ++ u32 uTimeout); ++ ++/* ++ * ======== NODE_RegisterNotify ======== ++ * Purpose: ++ * Register to be notified on specific events for this node. ++ * Parameters: ++ * hNode: Node handle returned by NODE_Allocate(). ++ * uEventMask: Mask of types of events to be notified about. ++ * uNotifyType: Type of notification to be sent. ++ * hNotification: Handle to be used for notification. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_EMEMORY: Insufficient memory on GPP. ++ * DSP_EVALUE: uEventMask is invalid. ++ * DSP_ENOTIMPL: Notification type specified by uNotifyType is not ++ * supported. ++ * Requires: ++ * NODE_Init(void) called. ++ * hNotification != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_RegisterNotify(struct NODE_OBJECT *hNode, ++ u32 uEventMask, u32 uNotifyType, ++ struct DSP_NOTIFICATION ++ *hNotification); ++ ++/* ++ * ======== NODE_Run ======== ++ * Purpose: ++ * Start execution of a node's execute phase, or resume execution of ++ * a node that has been suspended (via NODE_Pause()) on the DSP. Load ++ * the node's execute function if necessary. ++ * Parameters: ++ * hNode: Node object representing a node currently ++ * running on the DSP. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ENODETYPE: hNode doesn't represent a message, task or dais ++ * socket node. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state. ++ * DSP_EFAIL: Unable to start or resume execution. ++ * DSP_ESYMBOL: Execute function not found in the COFF file. ++ * Requires: ++ * NODE_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_Run(struct NODE_OBJECT *hNode); ++ ++/* ++ * ======== NODE_Terminate ======== ++ * Purpose: ++ * Signal a node running on the DSP that it should exit its execute ++ * phase function. ++ * Parameters: ++ * hNode: Node object representing a node currently ++ * running on the DSP. ++ * pStatus: Location to store execute-phase function return ++ * value (DSP_EUSER1-16). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ETIMEOUT: A timeout occurred before the DSP responded. ++ * DSP_ENODETYPE: Type of node specified cannot be terminated. ++ * DSP_EWRONGSTATE: Operation not valid for the current node state. ++ * DSP_EFAIL: Unable to terminate the node. ++ * Requires: ++ * NODE_Init(void) called. ++ * pStatus != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_Terminate(struct NODE_OBJECT *hNode, ++ OUT DSP_STATUS *pStatus); ++ ++ ++ ++/* ++ * ======== NODE_GetUUIDProps ======== ++ * Purpose: ++ * Fetch Node properties given the UUID ++ * Parameters: ++ * ++ */ ++ extern DSP_STATUS NODE_GetUUIDProps(DSP_HPROCESSOR hProcessor, ++ IN CONST struct DSP_UUID *pNodeId, ++ OUT struct DSP_NDBPROPS ++ *pNodeProps); ++ ++#endif /* NODE_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nodepriv.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nodepriv.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/nodepriv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/nodepriv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,202 @@ ++/* ++ * nodepriv.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== nodepriv.h ======== ++ * Description: ++ * Private node header shared by NODE and DISP. ++ * ++ * Public Functions: ++ * NODE_GetChannelId ++ * NODE_GetStrmMgr ++ * NODE_GetTimeout ++ * NODE_GetType ++ * NODE_GetLoadType ++ * ++ *! Revision History ++ *! ================ ++ *! 19-Nov-2002 map Added NODE_GetLoadType ++ *! 13-Feb-2002 jeh Added uSysStackSize to NODE_TASKARGS. ++ *! 23-Apr-2001 jeh Removed unused typedefs, defines. ++ *! 10-Oct-2000 jeh Added alignment to NODE_STRMDEF. ++ *! 20-Jun-2000 jeh Created. ++ */ ++ ++#ifndef NODEPRIV_ ++#define NODEPRIV_ ++ ++#include ++#include ++#include ++ ++/* DSP address of node environment structure */ ++ typedef u32 NODE_ENV; ++ ++/* ++ * Node create structures ++ */ ++ ++/* Message node */ ++ struct NODE_MSGARGS { ++ u32 uMaxMessages; /* Max # of simultaneous messages for node */ ++ u32 uSegid; /* Segment for allocating message buffers */ ++ u32 uNotifyType; /* Notify type (SEM_post, SWI_post, etc.) */ ++ u32 uArgLength; /* Length in 32-bit words of arg data block */ ++ u8 *pData; /* Argument data for node */ ++ } ; ++ ++ struct NODE_STRMDEF { ++ u32 uBufsize; /* Size of buffers for SIO stream */ ++ u32 uNumBufs; /* max # of buffers in SIO stream at once */ ++ u32 uSegid; /* Memory segment id to allocate buffers */ ++ u32 uTimeout; /* Timeout for blocking SIO calls */ ++ u32 uAlignment; /* Buffer alignment */ ++ char *szDevice; /* Device name for stream */ ++ } ; ++ ++/* Task node */ ++ struct NODE_TASKARGS { ++ struct NODE_MSGARGS msgArgs; ++ s32 nPriority; ++ u32 uStackSize; ++ u32 uSysStackSize; ++ u32 uStackSeg; ++ u32 uDSPHeapResAddr; /* DSP virtual heap address */ ++ u32 uDSPHeapAddr; /* DSP virtual heap address */ ++ u32 uHeapSize; /* Heap size */ ++ u32 uGPPHeapAddr; /* GPP virtual heap address */ ++ u32 uProfileID; /* Profile ID */ ++ u32 uNumInputs; ++ u32 uNumOutputs; ++ u32 ulDaisArg; /* Address of iAlg object */ ++ struct NODE_STRMDEF *strmInDef; ++ struct NODE_STRMDEF *strmOutDef; ++ } ; ++ ++/* ++ * ======== NODE_CREATEARGS ======== ++ */ ++ struct NODE_CREATEARGS { ++ union { ++ struct NODE_MSGARGS msgArgs; ++ struct NODE_TASKARGS taskArgs; ++ } asa; ++ } ; ++ ++/* ++ * ======== NODE_GetChannelId ======== ++ * Purpose: ++ * Get the channel index reserved for a stream connection between the ++ * host and a node. This index is reserved when NODE_Connect() is called ++ * to connect the node with the host. This index should be passed to ++ * the CHNL_Open function when the stream is actually opened. ++ * Parameters: ++ * hNode: Node object allocated from NODE_Allocate(). ++ * uDir: Input (DSP_TONODE) or output (DSP_FROMNODE). ++ * uIndex: Stream index. ++ * pulId: Location to store channel index. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_ENODETYPE: Not a task or DAIS socket node. ++ * DSP_EVALUE: The node's stream corresponding to uIndex and uDir ++ * is not a stream to or from the host. ++ * Requires: ++ * NODE_Init(void) called. ++ * Valid uDir. ++ * pulId != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_GetChannelId(struct NODE_OBJECT *hNode, ++ u32 uDir, ++ u32 uIndex, OUT u32 *pulId); ++ ++/* ++ * ======== NODE_GetStrmMgr ======== ++ * Purpose: ++ * Get the STRM manager for a node. ++ * Parameters: ++ * hNode: Node allocated with NODE_Allocate(). ++ * phStrmMgr: Location to store STRM manager on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * Requires: ++ * phStrmMgr != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS NODE_GetStrmMgr(struct NODE_OBJECT *hNode, ++ struct STRM_MGR **phStrmMgr); ++ ++/* ++ * ======== NODE_GetTimeout ======== ++ * Purpose: ++ * Get the timeout value of a node. ++ * Parameters: ++ * hNode: Node allocated with NODE_Allocate(), or DSP_HGPPNODE. ++ * Returns: ++ * Node's timeout value. ++ * Requires: ++ * Valid hNode. ++ * Ensures: ++ */ ++ extern u32 NODE_GetTimeout(struct NODE_OBJECT *hNode); ++ ++/* ++ * ======== NODE_GetType ======== ++ * Purpose: ++ * Get the type (device, message, task, or XDAIS socket) of a node. ++ * Parameters: ++ * hNode: Node allocated with NODE_Allocate(), or DSP_HGPPNODE. ++ * Returns: ++ * Node type: NODE_DEVICE, NODE_TASK, NODE_XDAIS, or NODE_GPP. ++ * Requires: ++ * Valid hNode. ++ * Ensures: ++ */ ++ extern enum NODE_TYPE NODE_GetType(struct NODE_OBJECT *hNode); ++ ++/* ++ * ======== GetNodeInfo ======== ++ * Purpose: ++ * Get node information without holding semaphore. ++ * Parameters: ++ * hNode: Node allocated with NODE_Allocate(), or DSP_HGPPNODE. ++ * Returns: ++ * Node info: priority, device owner, no. of streams, execution state ++ * NDB properties. ++ * Requires: ++ * Valid hNode. ++ * Ensures: ++ */ ++ extern void GetNodeInfo(struct NODE_OBJECT *hNode, ++ struct DSP_NODEINFO *pNodeInfo); ++ ++/* ++ * ======== NODE_GetLoadType ======== ++ * Purpose: ++ * Get the load type (dynamic, overlay, static) of a node. ++ * Parameters: ++ * hNode: Node allocated with NODE_Allocate(), or DSP_HGPPNODE. ++ * Returns: ++ * Node type: NLDR_DYNAMICLOAD, NLDR_OVLYLOAD, NLDR_STATICLOAD ++ * Requires: ++ * Valid hNode. ++ * Ensures: ++ */ ++ extern enum NLDR_LOADTYPE NODE_GetLoadType(struct NODE_OBJECT *hNode); ++ ++#endif /* NODEPRIV_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/ntfy.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/ntfy.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/ntfy.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/ntfy.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,146 @@ ++/* ++ * ntfy.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== ntfy.h ======== ++ * Purpose: ++ * Manage lists of notification events. ++ * ++ * Public Functions: ++ * NTFY_Create ++ * NTFY_Delete ++ * NTFY_Exit ++ * NTFY_Init ++ * NTFY_Notify ++ * NTFY_Register ++ * ++ *! Revision History: ++ *! ================= ++ *! 05-Nov-2001 kc: Updated NTFY_Register. ++ *! 07-Sep-2000 jeh Created. ++ */ ++ ++#ifndef NTFY_ ++#define NTFY_ ++ ++ struct NTFY_OBJECT; ++ ++/* ++ * ======== NTFY_Create ======== ++ * Purpose: ++ * Create an empty list of notifications. ++ * Parameters: ++ * phNtfy: Location to store handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * Requires: ++ * NTFY_Init(void) called. ++ * phNtfy != NULL. ++ * Ensures: ++ * DSP_SUCCEEDED(status) <==> IsValid(*phNtfy). ++ */ ++ extern DSP_STATUS NTFY_Create(OUT struct NTFY_OBJECT **phNtfy); ++ ++/* ++ * ======== NTFY_Delete ======== ++ * Purpose: ++ * Free resources allocated in NTFY_Create. ++ * Parameters: ++ * hNtfy: Handle returned from NTFY_Create(). ++ * Returns: ++ * Requires: ++ * NTFY_Init(void) called. ++ * IsValid(hNtfy). ++ * Ensures: ++ */ ++ extern void NTFY_Delete(IN struct NTFY_OBJECT *hNtfy); ++ ++/* ++ * ======== NTFY_Exit ======== ++ * Purpose: ++ * Discontinue usage of NTFY module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * NTFY_Init(void) successfully called before. ++ * Ensures: ++ */ ++ extern void NTFY_Exit(void); ++ ++/* ++ * ======== NTFY_Init ======== ++ * Purpose: ++ * Initialize the NTFY module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialization succeeded, FALSE otherwise. ++ * Ensures: ++ */ ++ extern bool NTFY_Init(void); ++ ++/* ++ * ======== NTFY_Notify ======== ++ * Purpose: ++ * Execute notify function (signal event or post message) for every ++ * element in the notification list that is to be notified about the ++ * event specified in uEventMask. ++ * Parameters: ++ * hNtfy: Handle returned from NTFY_Create(). ++ * uEventMask: The type of event that has occurred. ++ * Returns: ++ * Requires: ++ * NTFY_Init(void) called. ++ * IsValid(hNtfy). ++ * Ensures: ++ */ ++ extern void NTFY_Notify(IN struct NTFY_OBJECT *hNtfy, ++ IN u32 uEventMask); ++ ++/* ++ * ======== NTFY_Register ======== ++ * Purpose: ++ * Add a notification element to the list. If the notification is already ++ * registered, and uEventMask != 0, the notification will get posted for ++ * events specified in the new event mask. If the notification is already ++ * registered and uEventMask == 0, the notification will be unregistered. ++ * Parameters: ++ * hNtfy: Handle returned from NTFY_Create(). ++ * hNotification: Handle to a DSP_NOTIFICATION object. ++ * uEventMask: Events to be notified about. ++ * uNotifyType: Type of notification: DSP_SIGNALEVENT. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory. ++ * DSP_EVALUE: uEventMask is 0 and hNotification was not ++ * previously registered. ++ * DSP_EHANDLE: NULL hNotification, hNotification event name ++ * too long, or hNotification event name NULL. ++ * Requires: ++ * NTFY_Init(void) called. ++ * IsValid(hNtfy). ++ * hNotification != NULL. ++ * uNotifyType is DSP_SIGNALEVENT ++ * Ensures: ++ */ ++ extern DSP_STATUS NTFY_Register(IN struct NTFY_OBJECT *hNtfy, ++ IN struct DSP_NOTIFICATION ++ *hNotification, ++ IN u32 uEventMask, ++ IN u32 uNotifyType); ++ ++#endif /* NTFY_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/proc.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/proc.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/proc.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/proc.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,676 @@ ++/* ++ * proc.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== proc.h ======== ++ * Description: ++ * This is the Class driver RM module interface. ++ * ++ * Public Functions: ++ * PROC_Attach ++ * PROC_Create ++ * PROC_Ctrl (OEM-function) ++ * PROC_Destroy ++ * PROC_Detach ++ * PROC_EnumNodes ++ * PROC_Exit ++ * PROC_FlushMemory ++ * PROC_GetDevObject (OEM-function) ++ * PROC_GetResourceInfo ++ * PROC_GetState ++ * PROC_GetProcessorId ++ * PROC_GetTrace (OEM-function) ++ * PROC_Init ++ * PROC_Load (OEM-function) ++ * PROC_Map ++ * PROC_NotifyAllclients ++ * PROC_NotifyClients (OEM-function) ++ * PROC_RegisterNotify ++ * PROC_ReserveMemory ++ * PROC_Start (OEM-function) ++ * PROC_UnMap ++ * PROC_UnReserveMemory ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping APIs ++ *! 09-Feb-2003 vp: Added PROC_GetProcessorID function ++ *! 29-Nov-2000 rr: Incorporated code review changes. ++ *! 28-Sep-2000 rr: Updated to Version 0.9. ++ *! 10-Aug-2000 rr: PROC_NotifyClients, PROC_GetProcessorHandle Added ++ *! 27-Jul-2000 rr: Updated to ver 0.8 of DSPAPI(types). GetTrace added. ++ *! 27-Jun-2000 rr: Created from dspapi.h ++ */ ++ ++#ifndef PROC_ ++#define PROC_ ++ ++#include ++#include ++#include ++ ++/* The PROC_OBJECT structure. */ ++struct PROC_OBJECT { ++ struct LST_ELEM link; /* Link to next PROC_OBJECT */ ++ u32 dwSignature; /* Used for object validation */ ++ struct DEV_OBJECT *hDevObject; /* Device this PROC represents */ ++ u32 hProcess; /* Process owning this Processor */ ++ struct MGR_OBJECT *hMgrObject; /* Manager Object Handle */ ++ u32 uAttachCount; /* Processor attach count */ ++ u32 uProcessor; /* Processor number */ ++ u32 uTimeout; /* Time out count */ ++ enum DSP_PROCSTATE sState; /* Processor state */ ++ u32 ulUnit; /* DDSP unit number */ ++ bool bIsAlreadyAttached; /* ++ * True if the Device below has ++ * GPP Client attached ++ */ ++ struct NTFY_OBJECT *hNtfy; /* Manages notifications */ ++ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD Context Handle */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ ++ char *g_pszLastCoff; ++ struct list_head proc_object; ++}; ++ ++/* ++ * ======== PROC_Attach ======== ++ * Purpose: ++ * Prepare for communication with a particular DSP processor, and return ++ * a handle to the processor object. The PROC Object gets created ++ * Parameters: ++ * uProcessor : The processor index (zero-based). ++ * hMgrObject : Handle to the Manager Object ++ * pAttrIn : Ptr to the DSP_PROCESSORATTRIN structure. ++ * A NULL value means use default values. ++ * phProcessor : Ptr to location to store processor handle. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EFAIL : General failure. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_SALREADYATTACHED: Success; Processor already attached. ++ * Requires: ++ * phProcessor != NULL. ++ * PROC Initialized. ++ * Ensures: ++ * DSP_EFAIL, and *phProcessor == NULL, OR ++ * Success and *phProcessor is a Valid Processor handle OR ++ * DSP_SALREADYATTACHED and *phProcessor is a Valid Processor. ++ * Details: ++ * When pAttrIn is NULL, the default timeout value is 10 seconds. ++ */ ++ extern DSP_STATUS PROC_Attach(u32 uProcessor, ++ OPTIONAL CONST struct DSP_PROCESSORATTRIN ++ *pAttrIn, ++ OUT DSP_HPROCESSOR *phProcessor, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== PROC_AutoStart ========= ++ * Purpose: ++ * A Particular device gets loaded with the default image ++ * if the AutoStart flag is set. ++ * Parameters: ++ * hDevObject : Handle to the Device ++ * Returns: ++ * DSP_SOK : On Successful Loading ++ * DSP_EFILE : No DSP exec file found. ++ * DSP_EFAIL : General Failure ++ * Requires: ++ * hDevObject != NULL. ++ * hDevNode != NULL. ++ * PROC Initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS PROC_AutoStart(struct CFG_DEVNODE *hDevNode, ++ struct DEV_OBJECT *hDevObject); ++ ++/* ++ * ======== PROC_Ctrl ======== ++ * Purpose: ++ * Pass control information to the GPP device driver managing the DSP ++ * processor. This will be an OEM-only function, and not part of the ++ * 'Bridge application developer's API. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * dwCmd : Private driver IOCTL cmd ID. ++ * pArgs : Ptr to an driver defined argument structure. ++ * Returns: ++ * DSP_SOK : SUCCESS ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_ETIMEOUT: A Timeout Occured before the Control information ++ * could be sent. ++ * DSP_EACCESSDENIED: Client does not have the access rights required ++ * to call this function. ++ * DSP_ERESTART: A Critical error has occured and the DSP is being ++ * restarted. ++ * DSP_EFAIL : General Failure. ++ * Requires: ++ * PROC Initialized. ++ * Ensures ++ * Details: ++ * This function Calls WMD_BRD_Ioctl. ++ */ ++ extern DSP_STATUS PROC_Ctrl(DSP_HPROCESSOR hProcessor, ++ u32 dwCmd, IN struct DSP_CBDATA *pArgs); ++ ++/* ++ * ======== PROC_Detach ======== ++ * Purpose: ++ * Close a DSP processor and de-allocate all (GPP) resources reserved ++ * for it. The Processor Object is deleted. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : InValid Handle. ++ * DSP_EFAIL : General failure. ++ * Requires: ++ * PROC Initialized. ++ * Ensures: ++ * PROC Object is destroyed. ++ */ ++ extern DSP_STATUS PROC_Detach(DSP_HPROCESSOR hProcessor, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== PROC_EnumNodes ======== ++ * Purpose: ++ * Enumerate the nodes currently allocated on a processor. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * aNodeTab : The first Location of an array allocated for node ++ * handles. ++ * uNodeTabSize: The number of (DSP_HNODE) handles that can be held ++ * to the memory the client has allocated for aNodeTab ++ * puNumNodes : Location where DSPProcessor_EnumNodes will return ++ * the number of valid handles written to aNodeTab ++ * puAllocated : Location where DSPProcessor_EnumNodes will return ++ * the number of nodes that are allocated on the DSP. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_ESIZE : The amount of memory allocated for aNodeTab is ++ * insufficent. That is the number of nodes actually ++ * allocated on the DSP is greater than the value ++ * specified for uNodeTabSize. ++ * DSP_EFAIL : Unable to get Resource Information. ++ * Details: ++ * Requires ++ * puNumNodes is not NULL. ++ * puAllocated is not NULL. ++ * aNodeTab is not NULL. ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_EnumNodes(DSP_HPROCESSOR hProcessor, ++ IN DSP_HNODE *aNodeTab, ++ IN u32 uNodeTabSize, ++ OUT u32 *puNumNodes, ++ OUT u32 *puAllocated); ++ ++/* ++ * ======== PROC_GetResourceInfo ======== ++ * Purpose: ++ * Enumerate the resources currently available on a processor. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * uResourceType: Type of resource . ++ * pResourceInfo: Ptr to the DSP_RESOURCEINFO structure. ++ * uResourceInfoSize: Size of the structure. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EWRONGSTATE: The processor is not in the PROC_RUNNING state. ++ * DSP_ETIMEOUT: A timeout occured before the DSP responded to the ++ * querry. ++ * DSP_ERESTART: A Critical error has occured and the DSP is being ++ * restarted. ++ * DSP_EFAIL : Unable to get Resource Information ++ * Requires: ++ * pResourceInfo is not NULL. ++ * Parameter uResourceType is Valid.[TBD] ++ * uResourceInfoSize is >= sizeof DSP_RESOURCEINFO struct. ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ * This function currently returns ++ * DSP_ENOTIMPL, and does not write any data to the pResourceInfo struct. ++ */ ++ extern DSP_STATUS PROC_GetResourceInfo(DSP_HPROCESSOR hProcessor, ++ u32 uResourceType, ++ OUT struct DSP_RESOURCEINFO * ++ pResourceInfo, ++ u32 uResourceInfoSize); ++ ++/* ++ * ======== PROC_Exit ======== ++ * Purpose: ++ * Decrement reference count, and free resources when reference count is ++ * 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * PROC is initialized. ++ * Ensures: ++ * When reference count == 0, PROC's private resources are freed. ++ */ ++ extern void PROC_Exit(void); ++ ++/* ++ * ======== PROC_GetDevObject ========= ++ * Purpose: ++ * Returns the DEV Hanlde for a given Processor handle ++ * Parameters: ++ * hProcessor : Processor Handle ++ * phDevObject : Location to store the DEV Handle. ++ * Returns: ++ * DSP_SOK : Success; *phDevObject has Dev handle ++ * DSP_EFAIL : Failure; *phDevObject is zero. ++ * Requires: ++ * phDevObject is not NULL ++ * PROC Initialized. ++ * Ensures: ++ * DSP_SOK : *phDevObject is not NULL ++ * DSP_EFAIL : *phDevObject is NULL. ++ */ ++ extern DSP_STATUS PROC_GetDevObject(DSP_HPROCESSOR hProcessor, ++ struct DEV_OBJECT **phDevObject); ++ ++/* ++ * ======== PROC_Init ======== ++ * Purpose: ++ * Initialize PROC's private state, keeping a reference count on each ++ * call. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * TRUE: A requirement for the other public PROC functions. ++ */ ++ extern bool PROC_Init(void); ++ ++/* ++ * ======== PROC_GetState ======== ++ * Purpose: ++ * Report the state of the specified DSP processor. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * pProcStatus : Ptr to location to store the DSP_PROCESSORSTATE ++ * structure. ++ * uStateInfoSize: Size of DSP_PROCESSORSTATE. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure while querying processor state. ++ * Requires: ++ * pProcStatus is not NULL ++ * uStateInfoSize is >= than the size of DSP_PROCESSORSTATE structure. ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_GetState(DSP_HPROCESSOR hProcessor, ++ OUT struct DSP_PROCESSORSTATE ++ *pProcStatus, ++ u32 uStateInfoSize); ++ ++/* ++ * ======== PROC_GetProcessorID ======== ++ * Purpose: ++ * Report the state of the specified DSP processor. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * procID : Processor ID ++ * ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure while querying processor state. ++ * Requires: ++ * pProcStatus is not NULL ++ * uStateInfoSize is >= than the size of DSP_PROCESSORSTATE structure. ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_GetProcessorId(DSP_HPROCESSOR hProcessor, ++ u32 *procID); ++ ++/* ++ * ======== PROC_GetTrace ======== ++ * Purpose: ++ * Retrieve the trace buffer from the specified DSP processor. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * pBuf : Ptr to buffer to hold trace output. ++ * uMaxSize : Maximum size of the output buffer. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure while retireving processor trace ++ * Buffer. ++ * Requires: ++ * pBuf is not NULL ++ * uMaxSize is > 0. ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_GetTrace(DSP_HPROCESSOR hProcessor, u8 *pBuf, ++ u32 uMaxSize); ++ ++/* ++ * ======== PROC_Load ======== ++ * Purpose: ++ * Reset a processor and load a new base program image. ++ * This will be an OEM-only function. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * iArgc : The number of Arguments(strings)in the aArgV[] ++ * aArgv : An Array of Arguments(Unicode Strings) ++ * aEnvp : An Array of Environment settings(Unicode Strings) ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EFILE : The DSP Execuetable was not found. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_ECORRUTFILE: Unable to Parse the DSP Execuetable ++ * DSP_EATTACHED: Abort because a GPP Client is attached to the ++ * specified Processor ++ * DSP_EACCESSDENIED: Client does not have the required access rights ++ * to reset and load the Processor ++ * DSP_EFAIL : Unable to Load the Processor ++ * Requires: ++ * aArgv is not NULL ++ * iArgc is > 0 ++ * PROC Initialized. ++ * Ensures: ++ * Success and ProcState == PROC_LOADED ++ * or DSP_FAILED status. ++ * Details: ++ * Does not implement access rights to control which GPP application ++ * can load the processor. ++ */ ++ extern DSP_STATUS PROC_Load(DSP_HPROCESSOR hProcessor, ++ IN CONST s32 iArgc, IN CONST char **aArgv, ++ IN CONST char **aEnvp); ++ ++/* ++ * ======== PROC_RegisterNotify ======== ++ * Purpose: ++ * Register to be notified of specific processor events ++ * Parameters: ++ * hProcessor : The processor handle. ++ * uEventMask : Mask of types of events to be notified about. ++ * uNotifyType : Type of notification to be sent. ++ * hNotification: Handle to be used for notification. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle or hNotification. ++ * DSP_EVALUE : Parameter uEventMask is Invalid ++ * DSP_ENOTIMP : The notification type specified in uNotifyMask ++ * is not supported. ++ * DSP_EFAIL : Unable to register for notification. ++ * Requires: ++ * hNotification is not NULL ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_RegisterNotify(DSP_HPROCESSOR hProcessor, ++ u32 uEventMask, u32 uNotifyType, ++ struct DSP_NOTIFICATION ++ *hNotification); ++ ++/* ++ * ======== PROC_NotifyClients ======== ++ * Purpose: ++ * Notify the Processor Clients ++ * Parameters: ++ * hProc : The processor handle. ++ * uEvents : Event to be notified about. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : Failure to Set or Reset the Event ++ * Requires: ++ * uEvents is Supported or Valid type of Event ++ * hProc is a valid handle ++ * PROC Initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS PROC_NotifyClients(DSP_HPROCESSOR hProc, ++ u32 uEvents); ++ ++/* ++ * ======== PROC_NotifyAllClients ======== ++ * Purpose: ++ * Notify the Processor Clients ++ * Parameters: ++ * hProc : The processor handle. ++ * uEvents : Event to be notified about. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : Failure to Set or Reset the Event ++ * Requires: ++ * uEvents is Supported or Valid type of Event ++ * hProc is a valid handle ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ * NODE And STRM would use this function to notify their clients ++ * about the state changes in NODE or STRM. ++ */ ++ extern DSP_STATUS PROC_NotifyAllClients(DSP_HPROCESSOR hProc, ++ u32 uEvents); ++ ++/* ++ * ======== PROC_Start ======== ++ * Purpose: ++ * Start a processor running. ++ * Processor must be in PROC_LOADED state. ++ * This will be an OEM-only function, and not part of the 'Bridge ++ * application developer's API. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EWRONGSTATE: Processor is not in PROC_LOADED state. ++ * DSP_EFAIL : Unable to start the processor. ++ * Requires: ++ * PROC Initialized. ++ * Ensures: ++ * Success and ProcState == PROC_RUNNING or DSP_FAILED status. ++ * Details: ++ */ ++ extern DSP_STATUS PROC_Start(DSP_HPROCESSOR hProcessor); ++ ++/* ++ * ======== PROC_Stop ======== ++ * Purpose: ++ * Start a processor running. ++ * Processor must be in PROC_LOADED state. ++ * This will be an OEM-only function, and not part of the 'Bridge ++ * application developer's API. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EWRONGSTATE: Processor is not in PROC_LOADED state. ++ * DSP_EFAIL : Unable to start the processor. ++ * Requires: ++ * PROC Initialized. ++ * Ensures: ++ * Success and ProcState == PROC_RUNNING or DSP_FAILED status. ++ * Details: ++ */ ++ extern DSP_STATUS PROC_Stop(DSP_HPROCESSOR hProcessor); ++ ++/* ++ * ======== PROC_FlushMemory ======== ++ * Purpose: ++ * Flushes a buffer from the MPU data cache. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * pMpuAddr : Buffer start address ++ * ulSize : Buffer size ++ * ulFlags : Reserved. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure. ++ * Requires: ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ * All the arguments are currently ignored. ++ */ ++ extern DSP_STATUS PROC_FlushMemory(DSP_HPROCESSOR hProcessor, ++ void *pMpuAddr, ++ u32 ulSize, u32 ulFlags); ++ ++ ++/* ++ * ======== PROC_InvalidateMemory ======== ++ * Purpose: ++ * Invalidates a buffer from the MPU data cache. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * pMpuAddr : Buffer start address ++ * ulSize : Buffer size ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure. ++ * Requires: ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ * All the arguments are currently ignored. ++ */ ++ extern DSP_STATUS PROC_InvalidateMemory(DSP_HPROCESSOR hProcessor, ++ void *pMpuAddr, ++ u32 ulSize); ++ ++/* ++ * ======== PROC_Map ======== ++ * Purpose: ++ * Maps a MPU buffer to DSP address space. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * pMpuAddr : Starting address of the memory region to map. ++ * ulSize : Size of the memory region to map. ++ * pReqAddr : Requested DSP start address. Offset-adjusted actual ++ * mapped address is in the last argument. ++ * ppMapAddr : Ptr to DSP side mapped u8 address. ++ * ulMapAttr : Optional endianness attributes, virt to phys flag. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure. ++ * DSP_EMEMORY : MPU side memory allocation error. ++ * DSP_ENOTFOUND : Cannot find a reserved region starting with this ++ * : address. ++ * Requires: ++ * pMpuAddr is not NULL ++ * ulSize is not zero ++ * ppMapAddr is not NULL ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_Map(DSP_HPROCESSOR hProcessor, ++ void *pMpuAddr, ++ u32 ulSize, ++ void *pReqAddr, ++ void **ppMapAddr, u32 ulMapAttr, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== PROC_ReserveMemory ======== ++ * Purpose: ++ * Reserve a virtually contiguous region of DSP address space. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * ulSize : Size of the address space to reserve. ++ * ppRsvAddr : Ptr to DSP side reserved u8 address. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure. ++ * DSP_EMEMORY : Cannot reserve chunk of this size. ++ * Requires: ++ * ppRsvAddr is not NULL ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_ReserveMemory(DSP_HPROCESSOR hProcessor, ++ u32 ulSize, void **ppRsvAddr); ++ ++/* ++ * ======== PROC_UnMap ======== ++ * Purpose: ++ * Removes a MPU buffer mapping from the DSP address space. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * pMapAddr : Starting address of the mapped memory region. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure. ++ * DSP_ENOTFOUND : Cannot find a mapped region starting with this ++ * : address. ++ * Requires: ++ * pMapAddr is not NULL ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_UnMap(DSP_HPROCESSOR hProcessor, void *pMapAddr, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== PROC_UnReserveMemory ======== ++ * Purpose: ++ * Frees a previously reserved region of DSP address space. ++ * Parameters: ++ * hProcessor : The processor handle. ++ * pRsvAddr : Ptr to DSP side reservedBYTE address. ++ * Returns: ++ * DSP_SOK : Success. ++ * DSP_EHANDLE : Invalid processor handle. ++ * DSP_EFAIL : General failure. ++ * DSP_ENOTFOUND : Cannot find a reserved region starting with this ++ * : address. ++ * Requires: ++ * pRsvAddr is not NULL ++ * PROC Initialized. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS PROC_UnReserveMemory(DSP_HPROCESSOR hProcessor, ++ void *pRsvAddr); ++ ++#endif /* PROC_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/procpriv.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/procpriv.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/procpriv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/procpriv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,35 @@ ++/* ++ * procpriv.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== procpriv.h ======== ++ * Description: ++ * Global PROC constants and types, shared by PROC, MGR, and WCD. ++ * ++ *! Revision History: ++ *! ================ ++ *! 05-July-2000 rr: Created ++ */ ++ ++#ifndef PROCPRIV_ ++#define PROCPRIV_ ++ ++/* RM PROC Object */ ++ struct PROC_OBJECT; ++ ++#endif /* PROCPRIV_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/pwr.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/pwr.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/pwr.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/pwr.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,129 @@ ++/* ++ * pwr.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== pwr.h ======== ++ * ++ * Public Functions: ++ * ++ * PWR_SleepDSP ++ * PWR_WakeDSP ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 06-Jun-2002 sg Replaced dspdefs.h with includes of dbdefs.h and errbase.h. ++ *! 13-May-2002 sg Added DSP_SAREADYASLEEP and DSP_SALREADYAWAKE. ++ *! 09-May-2002 sg Updated, added timeouts. ++ *! 02-May-2002 sg Initial. ++ */ ++ ++#ifndef PWR_ ++#define PWR_ ++ ++#include ++#include ++#include ++ ++/* ++ * ======== PWR_SleepDSP ======== ++ * Signal the DSP to go to sleep. ++ * ++ * Parameters: ++ * sleepCode: New sleep state for DSP. (Initially, valid codes ++ * are PWR_DEEPSLEEP or PWR_EMERGENCYDEEPSLEEP; both of ++ * these codes will simply put the DSP in deep sleep.) ++ * ++ * timeout: Maximum time (msec) that PWR should wait for ++ * confirmation that the DSP sleep state has been ++ * reached. If PWR should simply send the command to ++ * the DSP to go to sleep and then return (i.e., ++ * asynchrounous sleep), the timeout should be ++ * specified as zero. ++ * ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_SALREADYASLEEP: Success, but the DSP was already asleep. ++ * DSP_EINVALIDARG: The specified sleepCode is not supported. ++ * DSP_ETIMEOUT: A timeout occured while waiting for DSP sleep ++ * confirmation. ++ * DSP_EFAIL: General failure, unable to send sleep command to ++ * the DSP. ++ */ ++ extern DSP_STATUS PWR_SleepDSP(IN CONST u32 sleepCode, ++ IN CONST u32 timeout); ++ ++/* ++ * ======== PWR_WakeDSP ======== ++ * Signal the DSP to wake from sleep. ++ * ++ * Parameters: ++ * timeout: Maximum time (msec) that PWR should wait for ++ * confirmation that the DSP is awake. If PWR should ++ * simply send a command to the DSP to wake and then ++ * return (i.e., asynchrounous wake), timeout should ++ * be specified as zero. ++ * ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_SALREADYAWAKE: Success, but the DSP was already awake. ++ * DSP_ETIMEOUT: A timeout occured while waiting for wake ++ * confirmation. ++ * DSP_EFAIL: General failure, unable to send wake command to ++ * the DSP. ++ */ ++ extern DSP_STATUS PWR_WakeDSP(IN CONST u32 timeout); ++ ++/* ++ * ======== PWR_PM_PreScale ======== ++ * Prescale notification to DSP. ++ * ++ * Parameters: ++ * voltage_domain: The voltage domain for which notification is sent ++ * level: The level of voltage domain ++ * ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_SALREADYAWAKE: Success, but the DSP was already awake. ++ * DSP_ETIMEOUT: A timeout occured while waiting for wake ++ * confirmation. ++ * DSP_EFAIL: General failure, unable to send wake command to ++ * the DSP. ++ */ ++ extern DSP_STATUS PWR_PM_PreScale(IN u16 voltage_domain, u32 level); ++ ++/* ++ * ======== PWR_PM_PostScale ======== ++ * PostScale notification to DSP. ++ * ++ * Parameters: ++ * voltage_domain: The voltage domain for which notification is sent ++ * level: The level of voltage domain ++ * ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_SALREADYAWAKE: Success, but the DSP was already awake. ++ * DSP_ETIMEOUT: A timeout occured while waiting for wake ++ * confirmation. ++ * DSP_EFAIL: General failure, unable to send wake command to ++ * the DSP. ++ */ ++ extern DSP_STATUS PWR_PM_PostScale(IN u16 voltage_domain, ++ u32 level); ++ ++#endif /* PWR_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/pwr_sh.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/pwr_sh.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/pwr_sh.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/pwr_sh.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,41 @@ ++/* ++ * pwr_sh.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== pwr_sh.h ======== ++ * ++ * Power Manager shared definitions (used on both GPP and DSP sides). ++ * ++ *! Revision History ++ *! ================ ++ *! 17-Apr-2002 sg: Initial. ++ */ ++ ++#ifndef PWR_SH_ ++#define PWR_SH_ ++ ++#include ++ ++/* valid sleep command codes that can be sent by GPP via mailbox: */ ++#define PWR_DEEPSLEEP MBX_PM_DSPIDLE ++#define PWR_EMERGENCYDEEPSLEEP MBX_PM_EMERGENCYSLEEP ++#define PWR_SLEEPUNTILRESTART MBX_PM_SLEEPUNTILRESTART ++#define PWR_WAKEUP MBX_PM_DSPWAKEUP ++#define PWR_AUTOENABLE MBX_PM_PWRENABLE ++#define PWR_AUTODISABLE MBX_PM_PWRDISABLE ++#define PWR_RETENTION MBX_PM_DSPRETN ++ ++#endif /* PWR_SH_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/reg.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/reg.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/reg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/reg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,257 @@ ++/* ++ * reg.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== reg.h ======== ++ * Purpose: ++ * Provide registry functions. ++ * ++ * Public Functions: ++ * REG_DeleteValue ++ * REG_EnumKey ++ * REG_EnumValue ++ * REG_Exit ++ * REG_GetValue ++ * REG_Init ++ * REG_SetValue ++ * ++ *! Revision History: ++ *! ================= ++ *! 30-Oct-2000 kc: Updated REG_SetValue & REG_GetValue; renamed ++ *! REG_DeleteEntry to REG_DeleteValue. ++ *! 29-Sep-2000 kc: Updated a REG functions for code review. ++ *! 12-Aug-2000 kc: Renamed REG_EnumValue to REG_EnumKey. Re-implemented ++ *! REG_EnumValue. ++ *! 03-Feb-2000 rr: REG_EnumValue Fxn Added ++ *! 13-Dec-1999 rr: windows.h removed ++ *! 02-Dec-1999 rr: windows.h included for retail build ++ *! 22-Nov-1999 kc: Changes from code review. ++ *! 29-Dec-1997 cr: Changes from code review. ++ *! 27-Oct-1997 cr: Added REG_DeleteValue. ++ *! 20-Oct-1997 cr: Added ability to pass bValue = NULL to REG_GetValue ++ *! and return size of reg entry in pdwValueSize. ++ *! 29-Sep-1997 cr: Added REG_SetValue ++ *! 29-Aug-1997 cr: Created. ++ */ ++ ++#ifndef _REG_H ++#define _REG_H ++ ++#include ++ ++/* ------------------------- Defines, Data Structures, Typedefs for Linux */ ++#ifndef UNDER_CE ++ ++#ifndef REG_SZ ++#define REG_SZ 1 ++#endif ++ ++#ifndef REG_BINARY ++#define REG_BINARY 3 ++#endif ++ ++#ifndef REG_DWORD ++#define REG_DWORD 4 ++#endif ++ ++#endif /* UNDER_CE */ ++ ++#define REG_MAXREGPATHLENGTH 255 ++ ++/* ++ * ======== REG_DeleteValue ======== ++ * Purpose: ++ * Deletes a registry entry. NOTE: A registry entry is not the same as ++ * a registry key. ++ * Parameters: ++ * phKey: Currently reserved; must be NULL. ++ * pstrSubkey: Path to key to open. ++ * pstrValue: Name of entry to delete. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * - REG initialized. ++ * - pstrSubkey & pstrValue are non-NULL values. ++ * - phKey is NULL. ++ * - length of pstrSubkey < REG_MAXREGPATHLENGTH. ++ * - length of pstrValue < REG_MAXREGPATHLENGTH. ++ * Ensures: ++ * Details: ++ */ ++ extern DSP_STATUS REG_DeleteValue(OPTIONAL IN HANDLE *phKey, ++ IN CONST char *pstrSubkey, ++ IN CONST char *pstrValue); ++ ++/* ++ * ======== REG_EnumKey ======== ++ * Purpose: ++ * Enumerates subkeys of the specified path to the registry key ++ * Retrieves the name of the subkey(given the index) and ++ * appends with the orignal path to form the full path. ++ * Parameters: ++ * phKey: Currently reserved; must be NULL. ++ * pstrKey The name of the registry key to be enumerated. ++ * dwIndex Specifies the index of the subkey to retrieve. ++ * pstrSubkey: Pointer to buffer that receives full path name of the ++ * specified key + the sub-key ++ * pdwValueSize: Specifies bytes of memory pstrSubkey points to on input, ++ * on output, specifies actual memory bytes written into. ++ * If there is no sub key,pdwValueSize returns NULL. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * - REG initialized. ++ * - pstrKey is non-NULL value. ++ * - pdwValueSize is a valid pointer. ++ * - phKey is NULL. ++ * - length of pstrKey < REG_MAXREGPATHLENGTH. ++ * Ensures: ++ * - strlen(pstrSubkey) is > strlen(pstrKey) && ++ * - strlen(pstrSubkey) is < REG_MAXREGPATHLENGTH ++ */ ++ extern DSP_STATUS REG_EnumKey(OPTIONAL IN HANDLE *phKey, ++ IN u32 dwIndex, IN CONST char *pstrKey, ++ IN OUT char *pstrSubkey, ++ IN OUT u32 *pdwValueSize); ++ ++/* ++ * ======== REG_EnumValue ======== ++ * Purpose: ++ * Enumerates values of a specified key. Retrieves each value name and ++ * the data associated with the value. ++ * Parameters: ++ * phKey: Currently reserved; must be NULL. ++ * dwIndex: Specifies the index of the value to retrieve. ++ * pstrKey: The name of the registry key to be enumerated. ++ * pstrValue: Pointer to buffer that receives the name of the value. ++ * pdwValueSize: Specifies bytes of memory pstrValue points to on input, ++ * On output, specifies actual memory bytes written into. ++ * If there is no value, pdwValueSize returns NULL ++ * pstrData: Pointer to buffer that receives the data of a value. ++ * pdwDataSize: Specifies bytes of memory in pstrData on input and ++ * bytes of memory written into pstrData on output. ++ * If there is no data, pdwDataSize returns NULL. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * REG initialized. ++ * phKey is NULL. ++ * pstrKey is a non-NULL value. ++ * pstrValue, pstrData, pdwValueSize and pdwDataSize are valid pointers. ++ * Length of pstrKey is less than REG_MAXREGPATHLENGTH. ++ * Ensures: ++ */ ++ extern DSP_STATUS REG_EnumValue(IN HANDLE *phKey, ++ IN u32 dwIndex, ++ IN CONST char *pstrKey, ++ IN OUT char *pstrValue, ++ IN OUT u32 *pdwValueSize, ++ IN OUT char *pstrData, ++ IN OUT u32 *pdwDataSize); ++ ++/* ++ * ======== REG_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * REG initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void REG_Exit(void); ++ ++/* ++ * ======== REG_GetValue ======== ++ * Purpose: ++ * Retrieve a value from the registry. ++ * Parameters: ++ * phKey: Currently reserved; must be NULL. ++ * pstrSubkey: Path to key to open. ++ * pstrEntry: Name of entry to retrieve. ++ * pbValue: Upon return, points to retrieved value. ++ * pdwValueSize: Specifies bytes of memory pbValue points to on input, ++ * on output, specifies actual memory bytes written into. ++ * If pbValue is NULL, pdwValueSize reports the size of ++ * the entry in pstrEntry. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * - REG initialized. ++ * - pstrSubkey & pstrEntry are non-NULL values. ++ * - pbValue is a valid pointer. ++ * - phKey is NULL. ++ * - length of pstrSubkey < REG_MAXREGPATHLENGTH. ++ * - length of pstrEntry < REG_MAXREGPATHLENGTH. ++ * Ensures: ++ */ ++ extern DSP_STATUS REG_GetValue(OPTIONAL IN HANDLE *phKey, ++ IN CONST char *pstrSubkey, ++ IN CONST char *pstrEntry, ++ OUT u8 *pbValue, ++ IN OUT u32 *pdwValueSize); ++ ++/* ++ * ======== REG_Init ======== ++ * Purpose: ++ * Initializes private state of REG module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * REG initialized. ++ */ ++ extern bool REG_Init(void); ++ ++/* ++ * ======== REG_SetValue ======== ++ * Purpose: ++ * Set a value in the registry. ++ * Parameters: ++ * phKey: Handle to open reg key, or NULL if pSubkey is full path. ++ * pstrSubkey: Path to key to open, could be based on phKey. ++ * pstrEntry: Name of entry to set. ++ * dwType: Data type of new registry value. ++ * pbValue: Points to buffer containing new data. ++ * dwValueSize: Specifies bytes of memory bValue points to. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * - REG initialized. ++ * - pstrSubkey & pstrEntry are non-NULL values. ++ * - pbValue is a valid pointer. ++ * - phKey is NULL. ++ * - dwValuSize > 0. ++ * - length of pstrSubkey < REG_MAXREGPATHLENGTH. ++ * - length of pstrEntry < REG_MAXREGPATHLENGTH. ++ * Ensures: ++ */ ++ extern DSP_STATUS REG_SetValue(OPTIONAL IN HANDLE *phKey, ++ IN CONST char *pstrSubKey, ++ IN CONST char *pstrEntry, ++ IN CONST u32 dwType, ++ IN u8 *pbValue, IN u32 dwValueSize); ++ ++#endif /* _REG_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/resourcecleanup.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/resourcecleanup.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/resourcecleanup.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/resourcecleanup.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,83 @@ ++/* ++ * resourcecleanup.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef RES_CLEANUP_DISABLE ++ ++#include ++#include ++ ++ ++extern DSP_STATUS DRV_GetProcCtxtList(struct PROCESS_CONTEXT **pPctxt, ++ struct DRV_OBJECT *hDrvObject); ++ ++extern DSP_STATUS DRV_InsertProcContext(struct DRV_OBJECT *hDrVObject, ++ HANDLE hPCtxt); ++ ++extern DSP_STATUS DRV_RemoveAllDMMResElements(HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_RemoveAllNodeResElements(HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_ProcUpdatestate(HANDLE pCtxt, ++ enum GPP_PROC_RES_STATE resState); ++ ++extern DSP_STATUS DRV_ProcSetPID(HANDLE pCtxt, s32 hProcess); ++ ++extern DSP_STATUS DRV_RemoveAllResources(HANDLE pPctxt); ++ ++extern DSP_STATUS DRV_RemoveProcContext(struct DRV_OBJECT *hDRVObject, ++ HANDLE hPCtxt); ++ ++extern DSP_STATUS DRV_GetNodeResElement(HANDLE hNode, HANDLE nodeRes, ++ HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_InsertNodeResElement(HANDLE hNode, HANDLE nodeRes, ++ HANDLE pCtxt); ++ ++extern void DRV_ProcNodeUpdateHeapStatus(HANDLE hNodeRes, s32 status); ++ ++extern DSP_STATUS DRV_RemoveNodeResElement(HANDLE nodeRes, HANDLE status); ++ ++extern void DRV_ProcNodeUpdateStatus(HANDLE hNodeRes, s32 status); ++ ++extern DSP_STATUS DRV_UpdateDMMResElement(HANDLE dmmRes, u32 pMpuAddr, ++ u32 ulSize, u32 pReqAddr, ++ u32 ppMapAddr, HANDLE hProcesso); ++ ++extern DSP_STATUS DRV_InsertDMMResElement(HANDLE dmmRes, HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_GetDMMResElement(u32 pMapAddr, HANDLE dmmRes, ++ HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_RemoveDMMResElement(HANDLE dmmRes, HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_ProcUpdateSTRMRes(u32 uNumBufs, HANDLE STRMRes, ++ HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_ProcInsertSTRMResElement(HANDLE hStrm, HANDLE STRMRes, ++ HANDLE pPctxt); ++ ++extern DSP_STATUS DRV_GetSTRMResElement(HANDLE hStrm, HANDLE STRMRes, ++ HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_ProcRemoveSTRMResElement(HANDLE STRMRes, HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_RemoveAllSTRMResElements(HANDLE pCtxt); ++ ++extern DSP_STATUS DRV_ProcDisplayResInfo(u8 *pBuf, u32 *pSize); ++ ++extern enum NODE_STATE NODE_GetState(HANDLE hNode); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/rmm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/rmm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/rmm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/rmm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,199 @@ ++/* ++ * rmm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== rmm.h ======== ++ * ++ * This memory manager provides general heap management and arbitrary ++ * alignment for any number of memory segments, and management of overlay ++ * memory. ++ * ++ * Public functions: ++ * RMM_alloc ++ * RMM_create ++ * RMM_delete ++ * RMM_exit ++ * RMM_free ++ * RMM_init ++ * ++ *! Revision History ++ *! ================ ++ *! 25-Jun-2002 jeh Added RMM_Addr. Removed RMM_reserve, RMM_stat. ++ *! 15-Oct-2001 jeh Based on rm.h in gen tree. ++ */ ++ ++#ifndef RMM_ ++#define RMM_ ++ ++/* ++ * ======== RMM_Addr ======== ++ * DSP address + segid ++ */ ++struct RMM_Addr { ++ u32 addr; ++ s32 segid; ++} ; ++ ++/* ++ * ======== RMM_Segment ======== ++ * Memory segment on the DSP available for remote allocations. ++ */ ++struct RMM_Segment { ++ u32 base; /* Base of the segment */ ++ u32 length; /* Size of the segment (target MAUs) */ ++ s32 space; /* Code or data */ ++ u32 number; /* Number of Allocated Blocks */ ++} ; ++ ++/* ++ * ======== RMM_Target ======== ++ */ ++struct RMM_TargetObj; ++ ++/* ++ * ======== RMM_alloc ======== ++ * ++ * RMM_alloc is used to remotely allocate or reserve memory on the DSP. ++ * ++ * Parameters: ++ * target - Target returned from RMM_create(). ++ * segid - Memory segment to allocate from. ++ * size - Size (target MAUS) to allocate. ++ * align - alignment. ++ * dspAddr - If reserve is FALSE, the location to store allocated ++ * address on output, otherwise, the DSP address to ++ * reserve. ++ * reserve - If TRUE, reserve the memory specified by dspAddr. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation on GPP failed. ++ * DSP_EOVERLAYMEMORY: Cannot "allocate" overlay memory because it's ++ * already in use. ++ * Requires: ++ * RMM initialized. ++ * Valid target. ++ * dspAddr != NULL. ++ * size > 0 ++ * reserve || target->numSegs > 0. ++ * Ensures: ++ */ ++extern DSP_STATUS RMM_alloc(struct RMM_TargetObj *target, u32 segid, u32 size, ++ u32 align, u32 *dspAdr, bool reserve); ++ ++/* ++ * ======== RMM_create ======== ++ * Create a target object with memory segments for remote allocation. If ++ * segTab == NULL or numSegs == 0, memory can only be reserved through ++ * RMM_alloc(). ++ * ++ * Parameters: ++ * pTarget: - Location to store target on output. ++ * segTab: - Table of memory segments. ++ * numSegs: - Number of memory segments. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failed. ++ * Requires: ++ * RMM initialized. ++ * pTarget != NULL. ++ * numSegs == 0 || segTab != NULL. ++ * Ensures: ++ * Success: Valid *pTarget. ++ * Failure: *pTarget == NULL. ++ */ ++extern DSP_STATUS RMM_create(struct RMM_TargetObj **pTarget, ++ struct RMM_Segment segTab[], u32 numSegs); ++ ++/* ++ * ======== RMM_delete ======== ++ * Delete target allocated in RMM_create(). ++ * ++ * Parameters: ++ * target - Target returned from RMM_create(). ++ * Returns: ++ * Requires: ++ * RMM initialized. ++ * Valid target. ++ * Ensures: ++ */ ++extern void RMM_delete(struct RMM_TargetObj *target); ++ ++/* ++ * ======== RMM_exit ======== ++ * Exit the RMM module ++ * ++ * Parameters: ++ * Returns: ++ * Requires: ++ * RMM_init successfully called. ++ * Ensures: ++ */ ++extern void RMM_exit(void); ++ ++/* ++ * ======== RMM_free ======== ++ * Free or unreserve memory allocated through RMM_alloc(). ++ * ++ * Parameters: ++ * target: - Target returned from RMM_create(). ++ * segid: - Segment of memory to free. ++ * dspAddr: - Address to free or unreserve. ++ * size: - Size of memory to free or unreserve. ++ * reserved: - TRUE if memory was reserved only, otherwise FALSE. ++ * Returns: ++ * Requires: ++ * RMM initialized. ++ * Valid target. ++ * reserved || segid < target->numSegs. ++ * reserve || [dspAddr, dspAddr + size] is a valid memory range. ++ * Ensures: ++ */ ++extern bool RMM_free(struct RMM_TargetObj *target, u32 segid, u32 dspAddr, ++ u32 size, bool reserved); ++ ++/* ++ * ======== RMM_init ======== ++ * Initialize the RMM module ++ * ++ * Parameters: ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Failure. ++ * Requires: ++ * Ensures: ++ */ ++extern bool RMM_init(void); ++ ++/* ++ * ======== RMM_stat ======== ++ * Obtain memory segment status ++ * ++ * Parameters: ++ * segid: Segment ID of the dynamic loading segment. ++ * pMemStatBuf: Pointer to allocated buffer into which memory stats are ++ * placed. ++ * Returns: ++ * TRUE: Success. ++ * FALSE: Failure. ++ * Requires: ++ * segid < target->numSegs ++ * Ensures: ++ */ ++extern bool RMM_stat(struct RMM_TargetObj *target, enum DSP_MEMTYPE segid, ++ struct DSP_MEMSTAT *pMemStatBuf); ++ ++#endif /* RMM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/rms_sh.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/rms_sh.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/rms_sh.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/rms_sh.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,125 @@ ++/* ++ * rms_sh.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== rms_sh.h ======== ++ * ++ * DSP/BIOS Bridge Resource Manager Server shared definitions (used on both ++ * GPP and DSP sides). ++ * ++ *! Revision History ++ *! ================ ++ *! 24-Mar-2003 vp Merged updates required for CCS2.2 transition. ++ *! 24-Feb-2003 kc Rearranged order of node types to temporarily support ++ *! legacy message node code ++ *! 23-Nov-2002 gp Converted tabs -> spaces, to fix formatting. ++ *! 13-Feb-2002 jeh Added sysstacksize element to RMS_MoreTaskArgs. ++ *! 11-Dec-2000 sg Added 'misc' element to RMS_MoreTaskArgs. ++ *! 04-Dec-2000 ag Added RMS_BUFDESC command code. ++ *! C/R code value changed to allow ORing of system/user codes. ++ *! 10-Oct-2000 sg Added 'align' field to RMS_StrmDef. ++ *! 09-Oct-2000 sg Moved pre-defined message codes here from rmsdefs.h. ++ *! 02-Oct-2000 sg Changed ticks to msec. ++ *! 24-Aug-2000 sg Moved definitions that will be exposed to app developers ++ *! to a separate file, rmsdefs.h. ++ *! 10-Aug-2000 sg Added RMS_COMMANDBUFSIZE and RMS_RESPONSEBUFSIZE; added ++ *! pre-defined command/response codes; more comments. ++ *! 09-Aug-2000 sg Added RMS_ETASK. ++ *! 08-Aug-2000 jeh Define RMS_WORD for GPP, rename DSP_MSG to RMS_DSPMSG. ++ *! Added RMS_MsgArgs, RMS_MoreTaskArgs. ++ *! 25-Jul-2000 sg: Changed SIO to STRM. ++ *! 30-Jun-2000 sg: Initial. ++ */ ++ ++#ifndef RMS_SH_ ++#define RMS_SH_ ++ ++#include ++ ++/* Node Types: */ ++#define RMS_TASK 1 /* Task node */ ++#define RMS_DAIS 2 /* xDAIS socket node */ ++#define RMS_MSG 3 /* Message node */ ++ ++/* Memory Types: */ ++#define RMS_CODE 0 /* Program space */ ++#define RMS_DATA 1 /* Data space */ ++#define RMS_IO 2 /* I/O space */ ++ ++/* RM Server Command and Response Buffer Sizes: */ ++#define RMS_COMMANDBUFSIZE 256 /* Size of command buffer */ ++#define RMS_RESPONSEBUFSIZE 16 /* Size of response buffer */ ++ ++/* Pre-Defined Command/Response Codes: */ ++#define RMS_EXIT 0x80000000 /* GPP->Node: shutdown */ ++#define RMS_EXITACK 0x40000000 /* Node->GPP: ack shutdown */ ++#define RMS_BUFDESC 0x20000000 /* Arg1 SM buf, Arg2 is SM size */ ++#define RMS_KILLTASK 0x10000000 /* GPP->Node: Kill Task */ ++#define RMS_USER 0x0 /* Start of user-defined msg codes */ ++#define RMS_MAXUSERCODES 0xfff /* Maximum user defined C/R Codes */ ++ ++ ++/* RM Server RPC Command Structure: */ ++ struct RMS_Command { ++ RMS_WORD fxn; /* Server function address */ ++ RMS_WORD arg1; /* First argument */ ++ RMS_WORD arg2; /* Second argument */ ++ RMS_WORD data; /* Function-specific data array */ ++ } ; ++ ++/* ++ * The RMS_StrmDef structure defines the parameters for both input and output ++ * streams, and is passed to a node's create function. ++ */ ++ struct RMS_StrmDef { ++ RMS_WORD bufsize; /* Buffer size (in DSP words) */ ++ RMS_WORD nbufs; /* Max number of bufs in stream */ ++ RMS_WORD segid; /* Segment to allocate buffers */ ++ RMS_WORD align; /* Alignment for allocated buffers */ ++ RMS_WORD timeout; /* Timeout (msec) for blocking calls */ ++ RMS_CHAR name[1]; /* Device Name (terminated by '\0') */ ++ } ; ++ ++/* Message node create args structure: */ ++ struct RMS_MsgArgs { ++ RMS_WORD maxMessages; /* Max # simultaneous msgs to node */ ++ RMS_WORD segid; /* Mem segment for NODE_allocMsgBuf */ ++ RMS_WORD notifyType; /* Type of message notification */ ++ RMS_WORD argLength; /* Length (in DSP chars) of arg data */ ++ RMS_WORD argData; /* Arg data for node */ ++ } ; ++ ++/* Partial task create args structure */ ++ struct RMS_MoreTaskArgs { ++ RMS_WORD priority; /* Task's runtime priority level */ ++ RMS_WORD stackSize; /* Task's stack size */ ++ RMS_WORD sysstackSize; /* Task's system stack size (55x) */ ++ RMS_WORD stackSeg; /* Memory segment for task's stack */ ++ RMS_WORD heapAddr; /* base address of the node memory heap in ++ * external memory (DSP virtual address) */ ++ RMS_WORD heapSize; /* size in MAUs of the node memory heap in ++ * external memory */ ++ RMS_WORD misc; /* Misc field. Not used for 'normal' ++ * task nodes; for xDAIS socket nodes ++ * specifies the IALG_Fxn pointer. ++ */ ++ /* # input STRM definition structures */ ++ RMS_WORD numInputStreams; ++ } ; ++ ++#endif /* RMS_SH_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/rmstypes.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/rmstypes.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/rmstypes.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/rmstypes.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,40 @@ ++/* ++ * rmstypes.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== rmstypes.h ======== ++ * ++ * DSP/BIOS Bridge Resource Manager Server shared data type definitions. ++ * ++ *! Revision History ++ *! ================ ++ *! 06-Oct-2000 sg Added LgFxn type. ++ *! 05-Oct-2000 sg Changed RMS_STATUS to LgUns. ++ *! 31-Aug-2000 sg Added RMS_DSPMSG. ++ *! 25-Aug-2000 sg Initial. ++ */ ++ ++#ifndef RMSTYPES_ ++#define RMSTYPES_ ++#include ++/* ++ * DSP-side definitions. ++ */ ++#include ++typedef u32 RMS_WORD; ++typedef char RMS_CHAR; ++ ++#endif /* RMSTYPES_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/services.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/services.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/services.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/services.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,63 @@ ++/* ++ * services.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== services.h ======== ++ * Purpose: ++ * Provide loading and unloading of SERVICES modules. ++ * ++ * Public Functions: ++ * SERVICES_Exit(void) ++ * SERVICES_Init(void) ++ * ++ *! Revision History: ++ *! ================ ++ *! 01-Feb-2000 kc: Created. ++ */ ++ ++#ifndef SERVICES_ ++#define SERVICES_ ++ ++#include ++/* ++ * ======== SERVICES_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * SERVICES initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void SERVICES_Exit(void); ++ ++/* ++ * ======== SERVICES_Init ======== ++ * Purpose: ++ * Initializes SERVICES modules. ++ * Parameters: ++ * Returns: ++ * TRUE if all modules initialized; otherwise FALSE. ++ * Requires: ++ * Ensures: ++ * SERVICES modules initialized. ++ */ ++ extern bool SERVICES_Init(void); ++ ++#endif /* SERVICES_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/std.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/std.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/std.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/std.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,143 @@ ++/* ++ * std.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== std.h ======== ++ * ++ *! Revision History ++ *! ================ ++ *! 16-Feb-2004 vp GNU compiler 3.x defines inline keyword. Added ++ *! appropriate macros not to redefine inline keyword in ++ *! this file. ++ *! 24-Oct-2002 ashu defined _TI_ and _FIXED_ symbols for 28x. ++ *! 24-Oct-2002 ashu defined _TI_ for 24x. ++ *! 01-Mar-2002 kln changed LARGE_MODEL and Arg definition for 28x ++ *! 01-Feb-2002 kln added definitions for 28x ++ *! 08-Dec-2000 kw: added 'ArgToInt' and 'ArgToPtr' macros ++ *! 30-Nov-2000 mf: Added _64_, _6x_; removed _7d_ ++ *! 30-May-2000 srid: Added __TMS320C55X__ for 55x; Arg is void * for 55 . ++ *! 18-Jun-1999 dr: Added '_TI_', fixed __inline for SUN4, added inline ++ *! 10-Feb-1999 rt: Added '55' support, changed 54's symbol to _TMS320C5XX ++ *! 29-Aug-1998 mf: fixed typo, removed obsolete targets ++ *! 08-Jun-1998 mf: _67_ is synonym for _7d_ ++ *! 10-Oct-1997 rt; added _7d_ for Raytheon C7DSP triggered by _TMS320C6700 ++ *! 04-Aug-1997 cc: added _29_ for _TMS320C2XX ++ *! 11-Jul-1997 dlr: _5t_, and STD_SPOXTASK keyword for Tasking ++ *! 12-Jun-1997 mf: _TMS320C60 -> _TMS320C6200 ++ *! 13-Feb-1997 mf: _62_, with 32-bit LgInt ++ *! 26-Nov-1996 kw: merged bios-c00's and wsx-a27's changes ++ *! *and* revision history ++ *! 12-Sep-1996 kw: added C54x #ifdef's ++ *! 21-Aug-1996 mf: removed #define main smain for _21_ ++ *! 14-May-1996 gp: def'd out INT, FLOAT, and COMPLEX defines for WSX. ++ *! 11-Apr-1996 kw: define _W32_ based on _WIN32 (defined by MS compiler) ++ *! 07-Mar-1996 mg: added Win32 support ++ *! 06-Sep-1995 dh: added _77_ dynamic stack support via fxns77.h ++ *! 27-Jun-1995 dh: added _77_ support ++ *! 16-Mar-1995 mf: for _21_: #define main smain ++ *! 01-Mar-1995 mf: set _20_ and _60_ (as well as _21_ for both) ++ *! 22-Feb-1995 mf: Float is float for _SUN_ and _80_ ++ *! 22-Dec-1994 mf: Added _80_ definition, for PP or MP. ++ *! 09-Dec-1994 mf: Added _53_ definition. ++ *! Added definitions of _30_, etc. ++ *! 23-Aug-1994 dh removed _21_ special case (kw) ++ *! 17-Aug-1994 dh added _51_ support ++ *! 03-Aug-1994 kw updated _80_ support ++ *! 30-Jun-1994 kw added _80_ support ++ *! 05-Apr-1994 kw: Added _SUN_ to _FLOAT_ definition ++ *! 01-Mar-1994 kw: Made Bool an int (was u16) for _56_ (more efficient). ++ *! Added _53_ support. ++ */ ++ ++#ifndef STD_ ++#define STD_ ++ ++#include ++ ++/* ++ * ======== _TI_ ======== ++ * _TI_ is defined for all TI targets ++ */ ++#if defined(_29_) || defined(_30_) || defined(_40_) || defined(_50_) || \ ++ defined(_54_) || defined(_55_) || defined(_6x_) || defined(_80_) || \ ++ defined(_28_) || defined(_24_) ++#define _TI_ 1 ++#endif ++ ++/* ++ * ======== _FLOAT_ ======== ++ * _FLOAT_ is defined for all targets that natively support floating point ++ */ ++#if defined(_SUN_) || defined(_30_) || defined(_40_) || defined(_67_) || \ ++ defined(_80_) ++#define _FLOAT_ 1 ++#endif ++ ++/* ++ * ======== _FIXED_ ======== ++ * _FIXED_ is defined for all fixed point target architectures ++ */ ++#if defined(_29_) || defined(_50_) || defined(_54_) || defined(_55_) || \ ++ defined(_62_) || defined(_64_) || defined(_28_) ++#define _FIXED_ 1 ++#endif ++ ++/* ++ * ======== _TARGET_ ======== ++ * _TARGET_ is defined for all target architectures (as opposed to ++ * host-side software) ++ */ ++#if defined(_FIXED_) || defined(_FLOAT_) ++#define _TARGET_ 1 ++#endif ++ ++/* ++ * 8, 16, 32-bit type definitions ++ * ++ * Sm* - 8-bit type ++ * Md* - 16-bit type ++ * Lg* - 32-bit type ++ * ++ * *s32 - signed type ++ * *u32 - unsigned type ++ * *Bits - unsigned type (bit-maps) ++ */ ++ ++/* ++ * Aliases for standard C types ++ */ ++ ++typedef s32(*Fxn) (void); /* generic function type */ ++ ++#ifndef NULL ++#define NULL 0 ++#endif ++ ++ ++/* ++ * These macros are used to cast 'Arg' types to 's32' or 'Ptr'. ++ * These macros were added for the 55x since Arg is not the same ++ * size as s32 and Ptr in 55x large model. ++ */ ++#if defined(_28l_) || defined(_55l_) ++#define ArgToInt(A) ((s32)((long)(A) & 0xffff)) ++#define ArgToPtr(A) ((Ptr)(A)) ++#else ++#define ArgToInt(A) ((s32)(A)) ++#define ArgToPtr(A) ((Ptr)(A)) ++#endif ++ ++#endif /* STD_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/strmdefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/strmdefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/strmdefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/strmdefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,57 @@ ++/* ++ * strmdefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== strmdefs.h ======== ++ * Purpose: ++ * Global STRM constants and types. ++ * ++ *! Revision History ++ *! ================ ++ *! 19-Nov-2001 ag Added STRM_INFO.. ++ *! 25-Sep-2000 jeh Created. ++ */ ++ ++#ifndef STRMDEFS_ ++#define STRMDEFS_ ++ ++#define STRM_MAXEVTNAMELEN 32 ++ ++ struct STRM_MGR; ++ ++ struct STRM_OBJECT; ++ ++ struct STRM_ATTR { ++ HANDLE hUserEvent; ++ char *pstrEventName; ++ void *pVirtBase; /* Process virtual base address of ++ * mapped SM */ ++ u32 ulVirtSize; /* Size of virtual space in bytes */ ++ struct DSP_STREAMATTRIN *pStreamAttrIn; ++ } ; ++ ++ struct STRM_INFO { ++ enum DSP_STRMMODE lMode; /* transport mode of ++ * stream(DMA, ZEROCOPY..) */ ++ u32 uSegment; /* Segment strm allocs from. 0 is local mem */ ++ void *pVirtBase; /* " " Stream'process virt base */ ++ struct DSP_STREAMINFO *pUser; /* User's stream information ++ * returned */ ++ } ; ++ ++#endif /* STRMDEFS_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/strm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/strm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/strm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/strm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,446 @@ ++/* ++ * strm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== strm.h ======== ++ * Description: ++ * DSPBridge Stream Manager. ++ * ++ * Public Functions: ++ * STRM_AllocateBuffer ++ * STRM_Close ++ * STRM_Create ++ * STRM_Delete ++ * STRM_Exit ++ * STRM_FreeBuffer ++ * STRM_GetEventHandle ++ * STRM_GetInfo ++ * STRM_Idle ++ * STRM_Init ++ * STRM_Issue ++ * STRM_Open ++ * STRM_PrepareBuffer ++ * STRM_Reclaim ++ * STRM_RegisterNotify ++ * STRM_Select ++ * STRM_UnprepareBuffer ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================= ++ *! 15-Nov-2001 ag Changed DSP_STREAMINFO to STRM_INFO in STRM_GetInfo(). ++ *! Added DSP_ESIZE error to STRM_AllocateBuffer(). ++ *! 07-Jun-2001 sg Made DSPStream_AllocateBuffer fxn name plural. ++ *! 10-May-2001 jeh Code review cleanup. ++ *! 13-Feb-2001 kc DSP/BIOS Bridge name updates. ++ *! 06-Feb-2001 kc Updated DBC_Ensure for STRM_Select(). ++ *! 23-Oct-2000 jeh Allow NULL STRM_ATTRS passed to STRM_Open(). ++ *! 25-Sep-2000 jeh Created. ++ */ ++ ++#ifndef STRM_ ++#define STRM_ ++ ++#include ++ ++#include ++#include ++ ++/* ++ * ======== STRM_AllocateBuffer ======== ++ * Purpose: ++ * Allocate data buffer(s) for use with a stream. ++ * Parameter: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * uSize: Size (GPP bytes) of the buffer(s). ++ * uNumBufs: Number of buffers to allocate. ++ * apBuffer: Array to hold buffer addresses. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_EMEMORY: Insufficient memory. ++ * DSP_EFAIL: Failure occurred, unable to allocate buffers. ++ * DSP_ESIZE: uSize must be > 0 bytes. ++ * Requires: ++ * STRM_Init(void) called. ++ * apBuffer != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_AllocateBuffer(struct STRM_OBJECT *hStrm, ++ u32 uSize, ++ OUT u8 **apBuffer, ++ u32 uNumBufs, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== STRM_Close ======== ++ * Purpose: ++ * Close a stream opened with STRM_Open(). ++ * Parameter: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_EPENDING: Some data buffers issued to the stream have not ++ * been reclaimed. ++ * DSP_EFAIL: Failure to close stream. ++ * Requires: ++ * STRM_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_Close(struct STRM_OBJECT *hStrm, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== STRM_Create ======== ++ * Purpose: ++ * Create a STRM manager object. This object holds information about the ++ * device needed to open streams. ++ * Parameters: ++ * phStrmMgr: Location to store handle to STRM manager object on ++ * output. ++ * hDev: Device for this processor. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * DSP_EFAIL: General failure. ++ * Requires: ++ * STRM_Init(void) called. ++ * phStrmMgr != NULL. ++ * hDev != NULL. ++ * Ensures: ++ * DSP_SOK: Valid *phStrmMgr. ++ * error: *phStrmMgr == NULL. ++ */ ++ extern DSP_STATUS STRM_Create(OUT struct STRM_MGR **phStrmMgr, ++ struct DEV_OBJECT *hDev); ++ ++/* ++ * ======== STRM_Delete ======== ++ * Purpose: ++ * Delete the STRM Object. ++ * Parameters: ++ * hStrmMgr: Handle to STRM manager object from STRM_Create. ++ * Returns: ++ * Requires: ++ * STRM_Init(void) called. ++ * Valid hStrmMgr. ++ * Ensures: ++ * hStrmMgr is not valid. ++ */ ++ extern void STRM_Delete(struct STRM_MGR *hStrmMgr); ++ ++/* ++ * ======== STRM_Exit ======== ++ * Purpose: ++ * Discontinue usage of STRM module. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * STRM_Init(void) successfully called before. ++ * Ensures: ++ */ ++ extern void STRM_Exit(void); ++ ++/* ++ * ======== STRM_FreeBuffer ======== ++ * Purpose: ++ * Free buffer(s) allocated with STRM_AllocateBuffer. ++ * Parameter: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * apBuffer: Array containing buffer addresses. ++ * uNumBufs: Number of buffers to be freed. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid stream handle. ++ * DSP_EFAIL: Failure occurred, unable to free buffers. ++ * Requires: ++ * STRM_Init(void) called. ++ * apBuffer != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_FreeBuffer(struct STRM_OBJECT *hStrm, ++ u8 **apBuffer, u32 uNumBufs, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== STRM_GetEventHandle ======== ++ * Purpose: ++ * Get stream's user event handle. This function is used when closing ++ * a stream, so the event can be closed. ++ * Parameter: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * phEvent: Location to store event handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * Requires: ++ * STRM_Init(void) called. ++ * phEvent != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_GetEventHandle(struct STRM_OBJECT *hStrm, ++ OUT HANDLE *phEvent); ++ ++/* ++ * ======== STRM_GetInfo ======== ++ * Purpose: ++ * Get information about a stream. User's DSP_STREAMINFO is contained ++ * in STRM_INFO struct. STRM_INFO also contains Bridge private info. ++ * Parameters: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * pStreamInfo: Location to store stream info on output. ++ * uSteamInfoSize: Size of user's DSP_STREAMINFO structure. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_ESIZE: uStreamInfoSize < sizeof(DSP_STREAMINFO). ++ * DSP_EFAIL: Unable to get stream info. ++ * Requires: ++ * STRM_Init(void) called. ++ * pStreamInfo != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_GetInfo(struct STRM_OBJECT *hStrm, ++ OUT struct STRM_INFO *pStreamInfo, ++ u32 uStreamInfoSize); ++ ++/* ++ * ======== STRM_Idle ======== ++ * Purpose: ++ * Idle a stream and optionally flush output data buffers. ++ * If this is an output stream and fFlush is TRUE, all data currently ++ * enqueued will be discarded. ++ * If this is an output stream and fFlush is FALSE, this function ++ * will block until all currently buffered data is output, or the timeout ++ * specified has been reached. ++ * After a successful call to STRM_Idle(), all buffers can immediately ++ * be reclaimed. ++ * Parameters: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * fFlush: If TRUE, discard output buffers. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_ETIMEOUT: A timeout occurred before the stream could be idled. ++ * DSP_ERESTART: A critical error occurred, DSP is being restarted. ++ * DSP_EFAIL: Unable to idle stream. ++ * Requires: ++ * STRM_Init(void) called. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_Idle(struct STRM_OBJECT *hStrm, bool fFlush); ++ ++/* ++ * ======== STRM_Init ======== ++ * Purpose: ++ * Initialize the STRM module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialization succeeded, FALSE otherwise. ++ * Requires: ++ * Ensures: ++ */ ++ extern bool STRM_Init(void); ++ ++/* ++ * ======== STRM_Issue ======== ++ * Purpose: ++ * Send a buffer of data to a stream. ++ * Parameters: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * pBuf: Pointer to buffer of data to be sent to the stream. ++ * ulBytes: Number of bytes of data in the buffer. ++ * ulBufSize: Actual buffer size in bytes. ++ * dwArg: A user argument that travels with the buffer. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_ESTREAMFULL: The stream is full. ++ * DSP_EFAIL: Failure occurred, unable to issue buffer. ++ * Requires: ++ * STRM_Init(void) called. ++ * pBuf != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_Issue(struct STRM_OBJECT *hStrm, IN u8 *pBuf, ++ u32 ulBytes, u32 ulBufSize, ++ IN u32 dwArg); ++ ++/* ++ * ======== STRM_Open ======== ++ * Purpose: ++ * Open a stream for sending/receiving data buffers to/from a task of ++ * DAIS socket node on the DSP. ++ * Parameters: ++ * hNode: Node handle returned from NODE_Allocate(). ++ * uDir: DSP_TONODE or DSP_FROMNODE. ++ * uIndex: Stream index. ++ * pAttr: Pointer to structure containing attributes to be ++ * applied to stream. Cannot be NULL. ++ * phStrm: Location to store stream handle on output. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hNode. ++ * DSP_EDIRECTION: Invalid uDir. ++ * DSP_EVALUE: Invalid uIndex. ++ * DSP_ENODETYPE: hNode is not a task or DAIS socket node. ++ * DSP_EFAIL: Unable to open stream. ++ * Requires: ++ * STRM_Init(void) called. ++ * phStrm != NULL. ++ * pAttr != NULL. ++ * Ensures: ++ * DSP_SOK: *phStrm is valid. ++ * error: *phStrm == NULL. ++ */ ++ extern DSP_STATUS STRM_Open(struct NODE_OBJECT *hNode, u32 uDir, ++ u32 uIndex, IN struct STRM_ATTR *pAttr, ++ OUT struct STRM_OBJECT **phStrm, ++ struct PROCESS_CONTEXT *pr_ctxt); ++ ++/* ++ * ======== STRM_PrepareBuffer ======== ++ * Purpose: ++ * Prepare a data buffer not allocated by DSPStream_AllocateBuffers() ++ * for use with a stream. ++ * Parameter: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * uSize: Size (GPP bytes) of the buffer. ++ * pBuffer: Buffer address. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_EFAIL: Failure occurred, unable to prepare buffer. ++ * Requires: ++ * STRM_Init(void) called. ++ * pBuffer != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_PrepareBuffer(struct STRM_OBJECT *hStrm, ++ u32 uSize, ++ u8 *pBuffer); ++ ++/* ++ * ======== STRM_Reclaim ======== ++ * Purpose: ++ * Request a buffer back from a stream. ++ * Parameters: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * pBufPtr: Location to store pointer to reclaimed buffer. ++ * pulBytes: Location where number of bytes of data in the ++ * buffer will be written. ++ * pulBufSize: Location where actual buffer size will be written. ++ * pdwArg: Location where user argument that travels with ++ * the buffer will be written. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_ETIMEOUT: A timeout occurred before a buffer could be ++ * retrieved. ++ * DSP_EFAIL: Failure occurred, unable to reclaim buffer. ++ * Requires: ++ * STRM_Init(void) called. ++ * pBufPtr != NULL. ++ * pulBytes != NULL. ++ * pdwArg != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_Reclaim(struct STRM_OBJECT *hStrm, ++ OUT u8 **pBufPtr, u32 *pulBytes, ++ u32 *pulBufSize, u32 *pdwArg); ++ ++/* ++ * ======== STRM_RegisterNotify ======== ++ * Purpose: ++ * Register to be notified on specific events for this stream. ++ * Parameters: ++ * hStrm: Stream handle returned by STRM_Open(). ++ * uEventMask: Mask of types of events to be notified about. ++ * uNotifyType: Type of notification to be sent. ++ * hNotification: Handle to be used for notification. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_EMEMORY: Insufficient memory on GPP. ++ * DSP_EVALUE: uEventMask is invalid. ++ * DSP_ENOTIMPL: Notification type specified by uNotifyType is not ++ * supported. ++ * Requires: ++ * STRM_Init(void) called. ++ * hNotification != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_RegisterNotify(struct STRM_OBJECT *hStrm, ++ u32 uEventMask, u32 uNotifyType, ++ struct DSP_NOTIFICATION ++ *hNotification); ++ ++/* ++ * ======== STRM_Select ======== ++ * Purpose: ++ * Select a ready stream. ++ * Parameters: ++ * aStrmTab: Array of stream handles returned from STRM_Open(). ++ * nStrms: Number of stream handles in array. ++ * pMask: Location to store mask of ready streams on output. ++ * uTimeout: Timeout value (milliseconds). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ERANGE: nStrms out of range. ++ ++ * DSP_EHANDLE: Invalid stream handle in array. ++ * DSP_ETIMEOUT: A timeout occurred before a stream became ready. ++ * DSP_EFAIL: Failure occurred, unable to select a stream. ++ * Requires: ++ * STRM_Init(void) called. ++ * aStrmTab != NULL. ++ * nStrms > 0. ++ * pMask != NULL. ++ * Ensures: ++ * DSP_SOK: *pMask != 0 || uTimeout == 0. ++ * Error: *pMask == 0. ++ */ ++ extern DSP_STATUS STRM_Select(IN struct STRM_OBJECT **aStrmTab, ++ u32 nStrms, ++ OUT u32 *pMask, u32 uTimeout); ++ ++/* ++ * ======== STRM_UnprepareBuffer ======== ++ * Purpose: ++ * Unprepare a data buffer that was previously prepared for a stream ++ * with DSPStream_PrepareBuffer(), and that will no longer be used with ++ * the stream. ++ * Parameter: ++ * hStrm: Stream handle returned from STRM_Open(). ++ * uSize: Size (GPP bytes) of the buffer. ++ * pBuffer: Buffer address. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hStrm. ++ * DSP_EFAIL: Failure occurred, unable to unprepare buffer. ++ * Requires: ++ * STRM_Init(void) called. ++ * pBuffer != NULL. ++ * Ensures: ++ */ ++ extern DSP_STATUS STRM_UnprepareBuffer(struct STRM_OBJECT *hStrm, ++ u32 uSize, ++ u8 *pBuffer); ++ ++#endif /* STRM_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/sync.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/sync.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/sync.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/sync.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,340 @@ ++/* ++ * sync.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== sync.h ======== ++ * Purpose: ++ * Provide synchronization services. ++ * ++ * Public Functions: ++ * SYNC_CloseEvent ++ * SYNC_DeleteCS ++ * SYNC_EnterCS ++ * SYNC_Exit ++ * SYNC_Init ++ * SYNC_InitializeCS ++ * SYNC_LeaveCS ++ * SYNC_OpenEvent ++ * SYNC_PostMessage ++ * SYNC_ResetEvent ++ * SYNC_SetEvent ++ * SYNC_WaitOnEvent ++ * SYNC_WaitOnMultipleEvents ++ * ++ *! Revision History: ++ *! ================ ++ *! 05-Oct-2000 jeh Added SYNC_WaitOnMultipleEvents(). ++ *! 01-Dec-1999 ag: Added #define SYNC_MAXNAMELENGTH. ++ *! 04-Nov-1999 kc: Added critical section functions and objects to SYNC. ++ *! 29-Oct-1999 kc: Cleaned up for code review. ++ *! 24-Sep-1999 kc: Added WinCE notes. ++ *! 20-Oct-1997 gp: Removed unused SYNC_ critical section and must complete fxns ++ *! Added SYNC_HOBJECT, SYNC_ATTRS, and object validation, and ++ *! merged SYNC_DestroyEvent into SYNC_CloseEvent, and merged ++ *! SYNC_CreateEvent into SYNC_OpenEvent. ++ *! 07-Oct-1997 gp: Added SYNC_Create/DestroyEvent (for NT testing). ++ *! 06-Oct-1997 gp: Added SYNC_OpenEvent. ++ *! 03-Jun-1997 gp: Added SYNC_{Begin|End}CritSection() functions. ++ *! 03-Jan-1997 gp: Added SYNC_INFINITE define. ++ *! 05-Aug-1996 gp: Created. ++ */ ++ ++#ifndef _SYNC_H ++#define _SYNC_H ++ ++#define SIGNATURECS 0x53435953 /* "SYCS" (in reverse) */ ++#define SIGNATUREDPCCS 0x53445953 /* "SYDS" (in reverse) */ ++ ++/* Special timeout value indicating an infinite wait: */ ++#define SYNC_INFINITE 0xffffffff ++ ++/* Maximum string length of a named event */ ++#define SYNC_MAXNAMELENGTH 32 ++ ++/* Generic SYNC object: */ ++ struct SYNC_OBJECT; ++ ++/* Generic SYNC CS object: */ ++struct SYNC_CSOBJECT { ++ u32 dwSignature; /* used for object validation */ ++ struct semaphore sem; ++} ; ++ ++/* SYNC object attributes: */ ++ struct SYNC_ATTRS { ++ HANDLE hUserEvent; /* Platform's User Mode synch. object. */ ++ HANDLE hKernelEvent; /* Platform's Kernel Mode sync. object. */ ++ u32 dwReserved1; /* For future expansion. */ ++ u32 dwReserved2; /* For future expansion. */ ++ } ; ++ ++/* ++ * ======== SYNC_CloseEvent ======== ++ * Purpose: ++ * Close this event handle, freeing resources allocated in SYNC_OpenEvent ++ * if necessary. ++ * Parameters: ++ * hEvent: Handle to a synchronization event, created/opened in ++ * SYNC_OpenEvent. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EFAIL: Failed to close event handle. ++ * DSP_EHANDLE: Invalid handle. ++ * Requires: ++ * SYNC initialized. ++ * Ensures: ++ * Any subsequent usage of hEvent would be invalid. ++ */ ++ extern DSP_STATUS SYNC_CloseEvent(IN struct SYNC_OBJECT *hEvent); ++ ++/* ++ * ======== SYNC_DeleteCS ======== ++ * Purpose: ++ * Delete a critical section. ++ * Parameters: ++ * hCSObj: critical section handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid handle. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_DeleteCS(IN struct SYNC_CSOBJECT *hCSObj); ++ ++/* ++ * ======== SYNC_EnterCS ======== ++ * Purpose: ++ * Enter the critical section. ++ * Parameters: ++ * hCSObj: critical section handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid handle. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_EnterCS(IN struct SYNC_CSOBJECT *hCSObj); ++ ++/* ++ * ======== SYNC_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * SYNC initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern void SYNC_Exit(void); ++ ++/* ++ * ======== SYNC_Init ======== ++ * Purpose: ++ * Initializes private state of SYNC module. ++ * Parameters: ++ * Returns: ++ * TRUE if initialized; FALSE if error occured. ++ * Requires: ++ * Ensures: ++ * SYNC initialized. ++ */ ++ extern bool SYNC_Init(void); ++ ++/* ++ * ======== SYNC_InitializeCS ======== ++ * Purpose: ++ * Initialize the critical section. ++ * Parameters: ++ * hCSObj: critical section handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Out of memory. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_InitializeCS(OUT struct SYNC_CSOBJECT **phCSObj); ++ ++/* ++ * ======== SYNC_InitializeDPCCS ======== ++ * Purpose: ++ * Initialize the critical section between process context and DPC. ++ * Parameters: ++ * hCSObj: critical section handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Out of memory. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_InitializeDPCCS(OUT struct SYNC_CSOBJECT ++ **phCSObj); ++ ++/* ++ * ======== SYNC_LeaveCS ======== ++ * Purpose: ++ * Leave the critical section. ++ * Parameters: ++ * hCSObj: critical section handle. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid handle. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_LeaveCS(IN struct SYNC_CSOBJECT *hCSObj); ++ ++/* ++ * ======== SYNC_OpenEvent ======== ++ * Purpose: ++ * Create/open and initialize an event object for thread synchronization, ++ * which is initially in the non-signalled state. ++ * Parameters: ++ * phEvent: Pointer to location to receive the event object handle. ++ * pAttrs: Pointer to SYNC_ATTRS object containing initial SYNC ++ * SYNC_OBJECT attributes. If this pointer is NULL, then ++ * SYNC_OpenEvent will create and manage an OS specific ++ * syncronization object. ++ * pAttrs->hUserEvent: Platform's User Mode synchronization object. ++ * ++ * The behaviour of the SYNC methods depend on the value of ++ * the hUserEvent attr: ++ * ++ * 1. (hUserEvent == NULL): ++ * A user mode event is created. ++ * 2. (hUserEvent != NULL): ++ * A user mode event is supplied by the caller of SYNC_OpenEvent(). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Unable to create user mode event. ++ * DSP_EMEMORY: Insufficient memory. ++ * DSP_EINVALIDARG SYNC_ATTRS values are invalid. ++ * Requires: ++ * - SYNC initialized. ++ * - phEvent != NULL. ++ * Ensures: ++ * If function succeeded, pEvent->hEvent must be a valid event handle. ++ */ ++ extern DSP_STATUS SYNC_OpenEvent(OUT struct SYNC_OBJECT **phEvent, ++ IN OPTIONAL struct SYNC_ATTRS ++ *pAttrs); ++ ++/* ++ * ========= SYNC_PostMessage ======== ++ * Purpose: ++ * To post a windows message ++ * Parameters: ++ * hWindow: Handle to the window ++ * uMsg: Message to be posted ++ * Returns: ++ * DSP_SOK: Success ++ * DSP_EFAIL: Post message failed ++ * DSP_EHANDLE: Invalid Window handle ++ * Requires: ++ * SYNC initialized ++ * Ensures ++ */ ++ extern DSP_STATUS SYNC_PostMessage(IN HANDLE hWindow, IN u32 uMsg); ++ ++/* ++ * ======== SYNC_ResetEvent ======== ++ * Purpose: ++ * Reset a syncronization event object state to non-signalled. ++ * Parameters: ++ * hEvent: Handle to a sync event. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EFAIL: Failed to reset event. ++ * DSP_EHANDLE: Invalid handle. ++ * Requires: ++ * SYNC initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_ResetEvent(IN struct SYNC_OBJECT *hEvent); ++ ++/* ++ * ======== SYNC_SetEvent ======== ++ * Purpose: ++ * Signal the event. Will unblock one waiting thread. ++ * Parameters: ++ * hEvent: Handle to an event object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Failed to signal event. ++ * DSP_EHANDLE: Invalid handle. ++ * Requires: ++ * SYNC initialized. ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_SetEvent(IN struct SYNC_OBJECT *hEvent); ++ ++/* ++ * ======== SYNC_WaitOnEvent ======== ++ * Purpose: ++ * Wait for an event to be signalled, up to the specified timeout. ++ * Parameters: ++ * hEvent: Handle to an event object. ++ * dwTimeOut: The time-out interval, in milliseconds. ++ * The function returns if the interval elapses, even if ++ * the object's state is nonsignaled. ++ * If zero, the function tests the object's state and ++ * returns immediately. ++ * If SYNC_INFINITE, the function's time-out interval ++ * never elapses. ++ * Returns: ++ * DSP_SOK: The object was signalled. ++ * DSP_EHANDLE: Invalid handle. ++ * SYNC_E_FAIL: Wait failed, possibly because the process terminated. ++ * SYNC_E_TIMEOUT: Timeout expired while waiting for event to be signalled. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_WaitOnEvent(IN struct SYNC_OBJECT *hEvent, ++ IN u32 dwTimeOut); ++ ++/* ++ * ======== SYNC_WaitOnMultipleEvents ======== ++ * Purpose: ++ * Wait for any of an array of events to be signalled, up to the ++ * specified timeout. ++ * Note: dwTimeOut must be SYNC_INFINITE to signal infinite wait. ++ * Parameters: ++ * hSyncEvents: Array of handles to event objects. ++ * uCount: Number of event handles. ++ * dwTimeOut: The time-out interval, in milliseconds. ++ * The function returns if the interval elapses, even if ++ * no event is signalled. ++ * If zero, the function tests the object's state and ++ * returns immediately. ++ * If SYNC_INFINITE, the function's time-out interval ++ * never elapses. ++ * puIndex: Location to store index of event that was signalled. ++ * Returns: ++ * DSP_SOK: The object was signalled. ++ * SYNC_E_FAIL: Wait failed, possibly because the process terminated. ++ * SYNC_E_TIMEOUT: Timeout expired before event was signalled. ++ * DSP_EMEMORY: Memory allocation failed. ++ * Requires: ++ * Ensures: ++ */ ++ extern DSP_STATUS SYNC_WaitOnMultipleEvents(IN struct SYNC_OBJECT ++ **hSyncEvents, ++ IN u32 uCount, ++ IN u32 dwTimeout, ++ OUT u32 *puIndex); ++ ++#endif /* _SYNC_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/utildefs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/utildefs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/utildefs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/utildefs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,51 @@ ++/* ++ * utildefs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== utildefs.h ======== ++ * Purpose: ++ * Global UTIL constants and types, shared between WCD and DSPSYS. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 kc Removed wIOPort* entries from UTIL_HOSTCONFIG. ++ *! 12-Aug-2000 ag Added UTIL_SYSINFO typedef. ++ *! 08-Oct-1999 rr Adopted for WinCE where test fxns will be added in util.h ++ *! 26-Dec-1996 cr Created. ++ */ ++ ++#ifndef UTILDEFS_ ++#define UTILDEFS_ ++ ++/* constants taken from configmg.h */ ++#define UTIL_MAXMEMREGS 9 ++#define UTIL_MAXIOPORTS 20 ++#define UTIL_MAXIRQS 7 ++#define UTIL_MAXDMACHNLS 7 ++ ++/* misc. constants */ ++#define UTIL_MAXARGVS 10 ++ ++/* Platform specific important info */ ++ struct UTIL_SYSINFO { ++ /* Granularity of page protection; usually 1k or 4k */ ++ u32 dwPageSize; ++ u32 dwAllocationGranularity; /* VM granularity, usually 64K */ ++ u32 dwNumberOfProcessors; /* Used as sanity check */ ++ } ; ++ ++#endif /* UTILDEFS_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/util.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/util.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/util.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/util.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,122 @@ ++/* ++ * util.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== util.h ======== ++ * Purpose: ++ * Provide general purpose utility functions. ++ * ++ * Public Functions: ++ * UTIL_CDTestDll ++ * UTIL_CmdLineToArgs ++ * UTIL_Exit ++ * UTIL_GetSysInfo ++ * UTIL_Init ++ */ ++ ++#ifndef _UTIL_H ++#define _UTIL_H ++ ++#include ++#include ++ ++#include ++ ++/* ++ * ======== UTIL_CDTestDll ======== ++ * Purpose: ++ * Provides test entry point in class driver context. ++ * Parameters: ++ * cArgc: test module command line input count. ++ * ppArgv: test module command line args. ++ * Returns: ++ * 0 if successful, a negative value otherwise. ++ * Requires: ++ * UTIL initialized. ++ * Ensures: ++ */ ++ extern u32 UTIL_CDTestDll(IN s32 cArgc, IN char **ppArgv); ++ ++/* ++ * ======== UTIL_CmdLineToArgs ======== ++ * Purpose: ++ * This function re-creates C-style cmd line argc & argv from WinMain() ++ * cmd line args. ++ * Parameters: ++ * s8 *pszProgName - The name of the program currently being executed. ++ * s8 *argv[] - The argument vector. ++ * s8 *pCmdLine - The pointer to the command line. ++ * bool fHasProgName - Indicats whether a program name is supplied. ++ * Returns: ++ * Returns the number of arguments found. ++ * Requires: ++ * UTIL initialized. ++ * Ensures: ++ */ ++ extern s32 UTIL_CmdLineToArgs(IN char *pszProgName, ++ IN char *argv[UTIL_MAXARGVS], ++ IN char *pCmdLine, IN bool fHasProgName); ++ ++/* ++ * ======== UTIL_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ * Parameters: ++ * Returns: ++ * Requires: ++ * UTIL initialized. ++ * Ensures: ++ * Resources used by module are freed when cRef reaches zero. ++ */ ++ extern inline void UTIL_Exit(void) ++ { ++ } ++/* ++ * ======== UTIL_GetSysInfo ======== ++ * Purpose: ++ * This function return platform specific system information. ++ * ++ * Parameters: ++ * pSysInfo - address to store the system information. ++ * Returns: ++ * DSP_SOK ++ * S_FAIL ++ * Requires: ++ * UTIL initialized. ++ * pSysInfo != NULL ++ * Ensures: ++ */ ++ extern DSP_STATUS UTIL_GetSysInfo(OUT struct UTIL_SYSINFO *pSysInfo); ++ ++/* ++ * ======== UTIL_Init ======== ++ * Purpose: ++ * Initializes private state of UTIL module. ++ * Parameters: ++ * Returns: ++ * TRUE if success, else FALSE. ++ * Requires: ++ * Ensures: ++ * UTIL initialized. ++ */ ++ extern inline bool UTIL_Init(void) ++ { ++ return true; ++ } ++ ++#endif /* _UTIL_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/uuidutil.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/uuidutil.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/uuidutil.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/uuidutil.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,74 @@ ++/* ++ * uuidutil.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== uuidutil.h ======== ++ * Description: ++ * This file contains the specification of UUID helper functions. ++ * ++ *! Revision History ++ *! ================ ++ *! 09-Nov-2000 kc: Modified description of UUID utility functions. ++ *! 29-Sep-2000 kc: Appended "UUID_" prefix to UUID helper functions. ++ *! 10-Aug-2000 kc: Created. ++ *! ++ */ ++ ++#ifndef UUIDUTIL_ ++#define UUIDUTIL_ ++ ++#define MAXUUIDLEN 37 ++ ++/* ++ * ======== UUID_UuidToString ======== ++ * Purpose: ++ * Converts a DSP_UUID to an ANSI string. ++ * Parameters: ++ * pUuid: Pointer to a DSP_UUID object. ++ * pszUuid: Pointer to a buffer to receive a NULL-terminated UUID ++ * string. ++ * size: Maximum size of the pszUuid string. ++ * Returns: ++ * Requires: ++ * pUuid & pszUuid are non-NULL values. ++ * Ensures: ++ * Lenghth of pszUuid is less than MAXUUIDLEN. ++ * Details: ++ * UUID string limit currently set at MAXUUIDLEN. ++ */ ++ void UUID_UuidToString(IN struct DSP_UUID *pUuid, OUT char *pszUuid, ++ s32 size); ++ ++/* ++ * ======== UUID_UuidFromString ======== ++ * Purpose: ++ * Converts an ANSI string to a DSP_UUID. ++ * Parameters: ++ * pszUuid: Pointer to a string that represents a DSP_UUID object. ++ * pUuid: Pointer to a DSP_UUID object. ++ * Returns: ++ * Requires: ++ * pUuid & pszUuid are non-NULL values. ++ * Ensures: ++ * Details: ++ * We assume the string representation of a UUID has the following format: ++ * "12345678_1234_1234_1234_123456789abc". ++ */ ++ extern void UUID_UuidFromString(IN char *pszUuid, ++ OUT struct DSP_UUID *pUuid); ++ ++#endif /* UUIDUTIL_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wcd.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wcd.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wcd.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wcd.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,61 @@ ++/* ++ * wcd.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wcd.h ======== ++ * Description: ++ * 'Bridge class driver library functions, object definitions, and ++ * return error/status codes. To be included by 'Bridge mini drivers. ++ * ++ * Public Functions: ++ * See mem.h and dbg.h. ++ * ++ * Notes: ++ * 'Bridge Class Driver services exported to WMD's are initialized by the ++ * WCD on behalf of the WMD. WMD's must not call module Init/Exit ++ * functions. ++ * ++ * To ensure WMD binary compatibility across different platforms, ++ * for the same processor, a WMD must restrict its usage of system ++ * services to those exported by the 'Bridge class library. ++ * ++ *! Revision History: ++ *! ================ ++ *! 07-Jun-2000 jeh Added dev.h ++ *! 01-Nov-1999 ag: #WINCE# WCD_MAJOR_VERSION=8 & WCD_MINOR_VERSION=0 to match ++ *! dll stamps. ++ *! 0.80 - 0.89 Alpha, 0.90 - 0.99 Beta, 1.00 - 1.10 FCS. ++ *! 17-Sep-1997 gp: Changed size of CFG_HOSTRES structure; and ISR_Install API; ++ *! Changed WCD_MINOR_VERSION 3 -> 4. ++ *! 15-Sep-1997 gp: Moved WCD_(Un)registerMinidriver to drv. ++ *! 25-Jul-1997 cr: Added WCD_UnregisterMinidriver. ++ *! 22-Jul-1997 cr: Added WCD_RegisterMinidriver, WCD_MINOR_VERSION 2 -> 3. ++ *! 12-Nov-1996 gp: Defined port io macros. ++ *! 07-Nov-1996 gp: Updated for code review. ++ *! 16-Jul-1996 gp: Added CHNL fxns; updated WCD lib version to 2. ++ *! 10-May-1996 gp: Separated WMD def.s' into wmd.h. ++ *! 03-May-1996 gp: Created. ++ */ ++ ++#ifndef WCD_ ++#define WCD_ ++ ++/* This WCD Library Version: */ ++#define WCD_MAJOR_VERSION (u32)8 /* .8x - Alpha, .9x - Beta, 1.x FCS */ ++#define WCD_MINOR_VERSION (u32)0 ++ ++#endif /* WCD_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wcdioctl.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wcdioctl.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wcdioctl.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wcdioctl.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,519 @@ ++/* ++ * wcdioctl.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wcdioctl.h ======== ++ * Purpose: ++ * Contains structures and commands that are used for interaction ++ * between the DDSP API and class driver. ++ * ++ *! Revision History ++ *! ================ ++ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping structs & offsets ++ *! 15-Oct-2002 kc Updated definitions for private PERF module. ++ *! 16-Aug-2002 map Added ARGS_MGR_REGISTEROBJECT & ARGS_MGR_UNREGISTEROBJECT ++ *! Added CMD_MGR_REGISTEROBJECT_OFFSET & ++ *! CMD_MGR_UNREGISTEROBJECT_OFFSET ++ *! 15-Jan-2002 ag Added actaul bufSize to ARGS_STRM_[RECLAIM][ISSUE]. ++ *! 15-Nov-2001 ag change to STRMINFO in ARGS_STRM_GETINFO. ++ *! 11-Sep-2001 ag ARGS_CMM_GETHANDLE defn uses DSP_HPROCESSOR. ++ *! 23-Apr-2001 jeh Added pStatus to NODE_TERMINATE args. ++ *! 13-Feb-2001 kc DSP/BIOS Bridge name updates. ++ *! 22-Nov-2000 kc: Added CMD_MGR_GETPERF_DATA_OFFSET for acquiring PERF stats. ++ *! 27-Oct-2000 jeh Added timeouts to NODE_GETMESSAGE, NODE_PUTMESSAGE args. ++ *! Removed NODE_GETMESSAGESTRM args. ++ *! 11-Oct-2000 ag: Added SM mgr(CMM) args. ++ *! 27-Sep-2000 jeh Removed struct DSP_BUFFERATTR param from ++ *! ARGS_STRM_ALLOCATEBUFFER. ++ *! 25-Sep-2000 rr: Updated to Version 0.9 ++ *! 07-Sep-2000 jeh Changed HANDLE to DSP_HNOTIFICATION in RegisterNotify args. ++ *! Added DSP_STRMATTR to DSPNode_Connect args. ++ *! 04-Aug-2000 rr: MEM and UTIL added to RM. ++ *! 27-Jul-2000 rr: NODE, MGR,STRM and PROC added ++ *! 27-Jun-2000 rr: Modifed to Use either PM or DSP/BIOS Bridge ++ *! IFDEF to build for PM or DSP/BIOS Bridge ++ *! 28-Jan-2000 rr: NT_CMD_FROM_OFFSET moved out to dsptrap.h ++ *! 24-Jan-2000 rr: Merged with Scott's code. ++ *! 21-Jan-2000 sg: In ARGS_CHNL_GETMODE changed mode to be u32 to be ++ *! consistent with chnldefs.h. ++ *! 11-Jan-2000 rr: CMD_CFG_GETCDVERSION_OFFSET added. ++ *! 12-Nov-1999 rr: CMD_BRD_MONITOR_OFFSET added ++ *! 09-Nov-1999 kc: Added MEMRY and enabled CMD_BRD_IOCTL_OFFSET. ++ *! 05-Nov-1999 ag: Added CHNL. ++ *! 02-Nov-1999 kc: Removed field from ARGS_UTIL_TESTDLL. ++ *! 29-Oct-1999 kc: Cleaned up for code review. ++ *! 08-Oct-1999 rr: Util control offsets added. ++ *! 13-Sep-1999 kc: Added ARGS_UTIL_TESTDLL for PM test infrastructure. ++ *! 19-Aug-1999 rr: Created from WSX. Minimal Implementaion of BRD_Start and BRD ++ *! and BRD_Stop. IOCTL Offsets and CTRL Code. ++ */ ++ ++#ifndef WCDIOCTL_ ++#define WCDIOCTL_ ++ ++#include ++#include ++#include ++#include ++ ++union Trapped_Args { ++ ++ /* MGR Module */ ++ struct { ++ u32 uNode; ++ struct DSP_NDBPROPS __user *pNDBProps; ++ u32 uNDBPropsSize; ++ u32 __user *puNumNodes; ++ } ARGS_MGR_ENUMNODE_INFO; ++ ++ struct { ++ u32 uProcessor; ++ struct DSP_PROCESSORINFO __user *pProcessorInfo; ++ u32 uProcessorInfoSize; ++ u32 __user *puNumProcs; ++ } ARGS_MGR_ENUMPROC_INFO; ++ ++ struct { ++ struct DSP_UUID *pUuid; ++ enum DSP_DCDOBJTYPE objType; ++ char *pszPathName; ++ } ARGS_MGR_REGISTEROBJECT; ++ ++ struct { ++ struct DSP_UUID *pUuid; ++ enum DSP_DCDOBJTYPE objType; ++ } ARGS_MGR_UNREGISTEROBJECT; ++ ++ struct { ++ struct DSP_NOTIFICATION __user*__user *aNotifications; ++ u32 uCount; ++ u32 __user *puIndex; ++ u32 uTimeout; ++ } ARGS_MGR_WAIT; ++ ++ /* PROC Module */ ++ struct { ++ u32 uProcessor; ++ struct DSP_PROCESSORATTRIN __user *pAttrIn; ++ DSP_HPROCESSOR __user *phProcessor; ++ } ARGS_PROC_ATTACH; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ u32 dwCmd; ++ struct DSP_CBDATA __user *pArgs; ++ } ARGS_PROC_CTRL; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ } ARGS_PROC_DETACH; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ DSP_HNODE __user *aNodeTab; ++ u32 uNodeTabSize; ++ u32 __user *puNumNodes; ++ u32 __user *puAllocated; ++ } ARGS_PROC_ENUMNODE_INFO; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ u32 uResourceType; ++ struct DSP_RESOURCEINFO *pResourceInfo; ++ u32 uResourceInfoSize; ++ } ARGS_PROC_ENUMRESOURCES; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ struct DSP_PROCESSORSTATE __user *pProcStatus; ++ u32 uStateInfoSize; ++ } ARGS_PROC_GETSTATE; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ u8 __user *pBuf; ++ ++ #ifndef RES_CLEANUP_DISABLE ++ u8 __user *pSize; ++ #endif ++ u32 uMaxSize; ++ } ARGS_PROC_GETTRACE; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ s32 iArgc; ++ char __user*__user *aArgv; ++ char *__user *aEnvp; ++ } ARGS_PROC_LOAD; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ u32 uEventMask; ++ u32 uNotifyType; ++ struct DSP_NOTIFICATION __user *hNotification; ++ } ARGS_PROC_REGISTER_NOTIFY; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ } ARGS_PROC_START; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ u32 ulSize; ++ void *__user *ppRsvAddr; ++ } ARGS_PROC_RSVMEM; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ u32 ulSize; ++ void *pRsvAddr; ++ } ARGS_PROC_UNRSVMEM; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ void *pMpuAddr; ++ u32 ulSize; ++ void *pReqAddr; ++ void *__user *ppMapAddr; ++ u32 ulMapAttr; ++ } ARGS_PROC_MAPMEM; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ u32 ulSize; ++ void *pMapAddr; ++ } ARGS_PROC_UNMAPMEM; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ void *pMpuAddr; ++ u32 ulSize; ++ u32 ulFlags; ++ } ARGS_PROC_FLUSHMEMORY; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ } ARGS_PROC_STOP; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ void *pMpuAddr; ++ u32 ulSize; ++ } ARGS_PROC_INVALIDATEMEMORY; ++ ++ ++ /* NODE Module */ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ struct DSP_UUID __user *pNodeID; ++ struct DSP_CBDATA __user *pArgs; ++ struct DSP_NODEATTRIN __user *pAttrIn; ++ DSP_HNODE __user *phNode; ++ } ARGS_NODE_ALLOCATE; ++ ++ struct { ++ DSP_HNODE hNode; ++ u32 uSize; ++ struct DSP_BUFFERATTR __user *pAttr; ++ u8 *__user *pBuffer; ++ } ARGS_NODE_ALLOCMSGBUF; ++ ++ struct { ++ DSP_HNODE hNode; ++ s32 iPriority; ++ } ARGS_NODE_CHANGEPRIORITY; ++ ++ struct { ++ DSP_HNODE hNode; ++ u32 uStream; ++ DSP_HNODE hOtherNode; ++ u32 uOtherStream; ++ struct DSP_STRMATTR __user *pAttrs; ++ struct DSP_CBDATA __user *pConnParam; ++ } ARGS_NODE_CONNECT; ++ ++ struct { ++ DSP_HNODE hNode; ++ } ARGS_NODE_CREATE; ++ ++ struct { ++ DSP_HNODE hNode; ++ } ARGS_NODE_DELETE; ++ ++ struct { ++ DSP_HNODE hNode; ++ struct DSP_BUFFERATTR __user *pAttr; ++ u8 *pBuffer; ++ } ARGS_NODE_FREEMSGBUF; ++ ++ struct { ++ DSP_HNODE hNode; ++ struct DSP_NODEATTR __user *pAttr; ++ u32 uAttrSize; ++ } ARGS_NODE_GETATTR; ++ ++ struct { ++ DSP_HNODE hNode; ++ struct DSP_MSG __user *pMessage; ++ u32 uTimeout; ++ } ARGS_NODE_GETMESSAGE; ++ ++ struct { ++ DSP_HNODE hNode; ++ } ARGS_NODE_PAUSE; ++ ++ struct { ++ DSP_HNODE hNode; ++ struct DSP_MSG __user *pMessage; ++ u32 uTimeout; ++ } ARGS_NODE_PUTMESSAGE; ++ ++ struct { ++ DSP_HNODE hNode; ++ u32 uEventMask; ++ u32 uNotifyType; ++ struct DSP_NOTIFICATION __user *hNotification; ++ } ARGS_NODE_REGISTERNOTIFY; ++ ++ struct { ++ DSP_HNODE hNode; ++ } ARGS_NODE_RUN; ++ ++ struct { ++ DSP_HNODE hNode; ++ DSP_STATUS __user *pStatus; ++ } ARGS_NODE_TERMINATE; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ struct DSP_UUID __user *pNodeID; ++ struct DSP_NDBPROPS __user *pNodeProps; ++ } ARGS_NODE_GETUUIDPROPS; ++ ++ /* STRM module */ ++ ++ struct { ++ DSP_HSTREAM hStream; ++ u32 uSize; ++ u8 *__user *apBuffer; ++ u32 uNumBufs; ++ } ARGS_STRM_ALLOCATEBUFFER; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ } ARGS_STRM_CLOSE; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ u8 *__user *apBuffer; ++ u32 uNumBufs; ++ } ARGS_STRM_FREEBUFFER; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ HANDLE *phEvent; ++ } ARGS_STRM_GETEVENTHANDLE; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ struct STRM_INFO __user *pStreamInfo; ++ u32 uStreamInfoSize; ++ } ARGS_STRM_GETINFO; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ bool bFlush; ++ } ARGS_STRM_IDLE; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ u8 *pBuffer; ++ u32 dwBytes; ++ u32 dwBufSize; ++ u32 dwArg; ++ } ARGS_STRM_ISSUE; ++ ++ struct { ++ DSP_HNODE hNode; ++ u32 uDirection; ++ u32 uIndex; ++ struct STRM_ATTR __user *pAttrIn; ++ DSP_HSTREAM __user *phStream; ++ } ARGS_STRM_OPEN; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ u8 *__user *pBufPtr; ++ u32 __user *pBytes; ++ u32 __user *pBufSize; ++ u32 __user *pdwArg; ++ } ARGS_STRM_RECLAIM; ++ ++ struct { ++ DSP_HSTREAM hStream; ++ u32 uEventMask; ++ u32 uNotifyType; ++ struct DSP_NOTIFICATION __user *hNotification; ++ } ARGS_STRM_REGISTERNOTIFY; ++ ++ struct { ++ DSP_HSTREAM __user *aStreamTab; ++ u32 nStreams; ++ u32 __user *pMask; ++ u32 uTimeout; ++ } ARGS_STRM_SELECT; ++ ++ /* CMM Module */ ++ struct { ++ struct CMM_OBJECT *hCmmMgr; ++ u32 uSize; ++ struct CMM_ATTRS *pAttrs; ++ OUT void **ppBufVA; ++ } ARGS_CMM_ALLOCBUF; ++ ++ struct { ++ struct CMM_OBJECT *hCmmMgr; ++ void *pBufPA; ++ u32 ulSegId; ++ } ARGS_CMM_FREEBUF; ++ ++ struct { ++ DSP_HPROCESSOR hProcessor; ++ struct CMM_OBJECT *__user *phCmmMgr; ++ } ARGS_CMM_GETHANDLE; ++ ++ struct { ++ struct CMM_OBJECT *hCmmMgr; ++ struct CMM_INFO __user *pCmmInfo; ++ } ARGS_CMM_GETINFO; ++ ++ /* MEM Module */ ++ struct { ++ u32 cBytes; ++ enum MEM_POOLATTRS type; ++ void *pMem; ++ } ARGS_MEM_ALLOC; ++ ++ struct { ++ u32 cBytes; ++ enum MEM_POOLATTRS type; ++ void *pMem; ++ } ARGS_MEM_CALLOC; ++ ++ struct { ++ void *pMem; ++ } ARGS_MEM_FREE; ++ ++ struct { ++ void *pBuffer; ++ u32 cSize; ++ void *pLockedBuffer; ++ } ARGS_MEM_PAGELOCK; ++ ++ struct { ++ void *pBuffer; ++ u32 cSize; ++ } ARGS_MEM_PAGEUNLOCK; ++ ++ /* UTIL module */ ++ struct { ++ s32 cArgc; ++ char **ppArgv; ++ } ARGS_UTIL_TESTDLL; ++} ; ++ ++#define CMD_BASE 1 ++ ++/* MGR module offsets */ ++#define CMD_MGR_BASE_OFFSET CMD_BASE ++#define CMD_MGR_ENUMNODE_INFO_OFFSET (CMD_MGR_BASE_OFFSET + 0) ++#define CMD_MGR_ENUMPROC_INFO_OFFSET (CMD_MGR_BASE_OFFSET + 1) ++#define CMD_MGR_REGISTEROBJECT_OFFSET (CMD_MGR_BASE_OFFSET + 2) ++#define CMD_MGR_UNREGISTEROBJECT_OFFSET (CMD_MGR_BASE_OFFSET + 3) ++#define CMD_MGR_WAIT_OFFSET (CMD_MGR_BASE_OFFSET + 4) ++ ++#ifndef RES_CLEANUP_DISABLE ++#define CMD_MGR_RESOUCES_OFFSET (CMD_MGR_BASE_OFFSET + 5) ++#define CMD_MGR_END_OFFSET CMD_MGR_RESOUCES_OFFSET ++#else ++#define CMD_MGR_END_OFFSET CMD_MGR_WAIT_OFFSET ++#endif ++ ++#define CMD_PROC_BASE_OFFSET (CMD_MGR_END_OFFSET + 1) ++#define CMD_PROC_ATTACH_OFFSET (CMD_PROC_BASE_OFFSET + 0) ++#define CMD_PROC_CTRL_OFFSET (CMD_PROC_BASE_OFFSET + 1) ++#define CMD_PROC_DETACH_OFFSET (CMD_PROC_BASE_OFFSET + 2) ++#define CMD_PROC_ENUMNODE_OFFSET (CMD_PROC_BASE_OFFSET + 3) ++#define CMD_PROC_ENUMRESOURCES_OFFSET (CMD_PROC_BASE_OFFSET + 4) ++#define CMD_PROC_GETSTATE_OFFSET (CMD_PROC_BASE_OFFSET + 5) ++#define CMD_PROC_GETTRACE_OFFSET (CMD_PROC_BASE_OFFSET + 6) ++#define CMD_PROC_LOAD_OFFSET (CMD_PROC_BASE_OFFSET + 7) ++#define CMD_PROC_REGISTERNOTIFY_OFFSET (CMD_PROC_BASE_OFFSET + 8) ++#define CMD_PROC_START_OFFSET (CMD_PROC_BASE_OFFSET + 9) ++#define CMD_PROC_RSVMEM_OFFSET (CMD_PROC_BASE_OFFSET + 10) ++#define CMD_PROC_UNRSVMEM_OFFSET (CMD_PROC_BASE_OFFSET + 11) ++#define CMD_PROC_MAPMEM_OFFSET (CMD_PROC_BASE_OFFSET + 12) ++#define CMD_PROC_UNMAPMEM_OFFSET (CMD_PROC_BASE_OFFSET + 13) ++#define CMD_PROC_FLUSHMEMORY_OFFSET (CMD_PROC_BASE_OFFSET + 14) ++#define CMD_PROC_STOP_OFFSET (CMD_PROC_BASE_OFFSET + 15) ++#define CMD_PROC_INVALIDATEMEMORY_OFFSET (CMD_PROC_BASE_OFFSET + 16) ++#define CMD_PROC_END_OFFSET CMD_PROC_INVALIDATEMEMORY_OFFSET ++ ++ ++#define CMD_NODE_BASE_OFFSET (CMD_PROC_END_OFFSET + 1) ++#define CMD_NODE_ALLOCATE_OFFSET (CMD_NODE_BASE_OFFSET + 0) ++#define CMD_NODE_ALLOCMSGBUF_OFFSET (CMD_NODE_BASE_OFFSET + 1) ++#define CMD_NODE_CHANGEPRIORITY_OFFSET (CMD_NODE_BASE_OFFSET + 2) ++#define CMD_NODE_CONNECT_OFFSET (CMD_NODE_BASE_OFFSET + 3) ++#define CMD_NODE_CREATE_OFFSET (CMD_NODE_BASE_OFFSET + 4) ++#define CMD_NODE_DELETE_OFFSET (CMD_NODE_BASE_OFFSET + 5) ++#define CMD_NODE_FREEMSGBUF_OFFSET (CMD_NODE_BASE_OFFSET + 6) ++#define CMD_NODE_GETATTR_OFFSET (CMD_NODE_BASE_OFFSET + 7) ++#define CMD_NODE_GETMESSAGE_OFFSET (CMD_NODE_BASE_OFFSET + 8) ++#define CMD_NODE_PAUSE_OFFSET (CMD_NODE_BASE_OFFSET + 9) ++#define CMD_NODE_PUTMESSAGE_OFFSET (CMD_NODE_BASE_OFFSET + 10) ++#define CMD_NODE_REGISTERNOTIFY_OFFSET (CMD_NODE_BASE_OFFSET + 11) ++#define CMD_NODE_RUN_OFFSET (CMD_NODE_BASE_OFFSET + 12) ++#define CMD_NODE_TERMINATE_OFFSET (CMD_NODE_BASE_OFFSET + 13) ++#define CMD_NODE_GETUUIDPROPS_OFFSET (CMD_NODE_BASE_OFFSET + 14) ++#define CMD_NODE_END_OFFSET CMD_NODE_GETUUIDPROPS_OFFSET ++ ++#define CMD_STRM_BASE_OFFSET (CMD_NODE_END_OFFSET + 1) ++#define CMD_STRM_ALLOCATEBUFFER_OFFSET (CMD_STRM_BASE_OFFSET + 0) ++#define CMD_STRM_CLOSE_OFFSET (CMD_STRM_BASE_OFFSET + 1) ++#define CMD_STRM_FREEBUFFER_OFFSET (CMD_STRM_BASE_OFFSET + 2) ++#define CMD_STRM_GETEVENTHANDLE_OFFSET (CMD_STRM_BASE_OFFSET + 3) ++#define CMD_STRM_GETINFO_OFFSET (CMD_STRM_BASE_OFFSET + 4) ++#define CMD_STRM_IDLE_OFFSET (CMD_STRM_BASE_OFFSET + 5) ++#define CMD_STRM_ISSUE_OFFSET (CMD_STRM_BASE_OFFSET + 6) ++#define CMD_STRM_OPEN_OFFSET (CMD_STRM_BASE_OFFSET + 7) ++#define CMD_STRM_RECLAIM_OFFSET (CMD_STRM_BASE_OFFSET + 8) ++#define CMD_STRM_REGISTERNOTIFY_OFFSET (CMD_STRM_BASE_OFFSET + 9) ++#define CMD_STRM_SELECT_OFFSET (CMD_STRM_BASE_OFFSET + 10) ++#define CMD_STRM_END_OFFSET CMD_STRM_SELECT_OFFSET ++ ++/* Communication Memory Manager (UCMM) */ ++#define CMD_CMM_BASE_OFFSET (CMD_STRM_END_OFFSET + 1) ++#define CMD_CMM_ALLOCBUF_OFFSET (CMD_CMM_BASE_OFFSET + 0) ++#define CMD_CMM_FREEBUF_OFFSET (CMD_CMM_BASE_OFFSET + 1) ++#define CMD_CMM_GETHANDLE_OFFSET (CMD_CMM_BASE_OFFSET + 2) ++#define CMD_CMM_GETINFO_OFFSET (CMD_CMM_BASE_OFFSET + 3) ++#define CMD_CMM_END_OFFSET CMD_CMM_GETINFO_OFFSET ++ ++#define CMD_BASE_END_OFFSET CMD_CMM_END_OFFSET ++#endif /* WCDIOCTL_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmddeh.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmddeh.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmddeh.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmddeh.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,66 @@ ++/* ++ * wmddeh.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wmddeh.h ======== ++ * Description: ++ * Defines upper edge DEH functions required by all WMD/WCD driver ++ * interface tables. ++ * ++ * Public Functions: ++ * WMD_DEH_Create ++ * IVA_DEH_Create ++ * WMD_DEH_Destroy ++ * WMD_DEH_GetInfo ++ * WMD_DEH_RegisterNotify ++ * WMD_DEH_Notify ++ * ++ * Notes: ++ * Function comment headers reside with the function typedefs in wmd.h. ++ * ++ *! Revision History: ++ *! ================ ++ *! 26-Dec-2004 hn: added IVA_DEH_Create. ++ *! 13-Sep-2001 kc: created. ++ */ ++ ++#ifndef WMDDEH_ ++#define WMDDEH_ ++ ++#include ++ ++#include ++ ++ extern DSP_STATUS WMD_DEH_Create(OUT struct DEH_MGR **phDehMgr, ++ struct DEV_OBJECT *hDevObject); ++ ++ extern DSP_STATUS WMD_DEH_Destroy(struct DEH_MGR *hDehMgr); ++ ++ extern DSP_STATUS WMD_DEH_GetInfo(struct DEH_MGR *hDehMgr, ++ struct DSP_ERRORINFO *pErrInfo); ++ ++ extern DSP_STATUS WMD_DEH_RegisterNotify(struct DEH_MGR *hDehMgr, ++ u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION ++ *hNotification); ++ ++ extern void WMD_DEH_Notify(struct DEH_MGR *hDehMgr, ++ u32 ulEventMask, u32 dwErrInfo); ++ ++ extern void WMD_DEH_ReleaseDummyMem(void); ++#endif /* WMDDEH_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmd.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmd.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmd.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmd.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1193 @@ ++/* ++ * wmd.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wmd.h ======== ++ * Purpose: ++ * 'Bridge mini driver entry point and interface function declarations. ++ * ++ * Public Functions: ++ * WMD_DRV_Entry ++ * ++ * Notes: ++ * The 'Bridge class driver obtains it's function interface to ++ * the 'Bridge mini driver via a call to WMD_DRV_Entry(). ++ * ++ * 'Bridge Class Driver services exported to WMD's are initialized by the ++ * WCD on behalf of the WMD. ++ * ++ * WMD function DBC Requires and Ensures are also made by the WCD on ++ * behalf of the WMD, to simplify the WMD code. ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping APIs - WMD_BRD_MemMap/UnMap ++ *! 01-Mar-2004 vp Added filename argument to WMD_DRV_Entry function. ++ *! 29-Aug-2002 map Added WMD_BRD_MemWrite() ++ *! 26-Aug-2002 map Added WMD_BRD_MemCopy() ++ *! 07-Jan-2002 ag Added cBufSize to WMD_CHNL_AddIOReq(). ++ *! 05-Nov-2001 kc: Added error handling DEH functions. ++ *! 06-Dec-2000 jeh Added uEventMask to WMD_MSG_RegisterNotify(). ++ *! 17-Nov-2000 jeh Added WMD_MSG and WMD_IO definitions. ++ *! 01-Nov-2000 jeh Added more error codes to WMD_CHNL_RegisterNotify(). ++ *! 13-Oct-2000 jeh Added dwArg to WMD_CHNL_AddIOReq(), added WMD_CHNL_IDLE ++ *! and WMD_CHNL_RegisterNotify for DSPStream support. ++ *! 17-Jan-2000 rr: WMD_BRD_SETSTATE Added. ++ *! 30-Jul-1997 gp: Split wmd IOCTL space into reserved and private. ++ *! 07-Nov-1996 gp: Updated for code review. ++ *! 18-Oct-1996 gp: Added WMD_E_HARDWARE return code from WMD_BRD_Monitor. ++ *! 09-Sep-1996 gp: Subtly altered the semantics of WMD_CHNL_GetInfo(). ++ *! 02-Aug-1996 gp: Ensured on BRD_Start that interrupts to the PC are enabled. ++ *! 11-Jul-1996 gp: Added CHNL interface. Note stronger DBC_Require conditions. ++ *! 29-May-1996 gp: Removed WCD_ prefix from functions imported from WCD.LIB. ++ *! 29-May-1996 gp: Made OUT param first in WMD_DEV_Create(). ++ *! 09-May-1996 gp: Created. ++ */ ++ ++#ifndef WMD_ ++#define WMD_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * Any IOCTLS at or above this value are reserved for standard WMD ++ * interfaces. ++ */ ++#define WMD_RESERVEDIOCTLBASE 0x8000 ++ ++/* Handle to mini-driver's private device context. */ ++ struct WMD_DEV_CONTEXT; ++ ++/*---------------------------------------------------------------------------*/ ++/* 'Bridge MINI DRIVER FUNCTION TYPES */ ++/*---------------------------------------------------------------------------*/ ++ ++/* ++ * ======== WMD_BRD_Monitor ======== ++ * Purpose: ++ * Bring the board to the BRD_IDLE (monitor) state. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device context. ++ * Returns: ++ * DSP_SOK: Success. ++ * WMD_E_HARDWARE: A test of hardware assumptions/integrity failed. ++ * WMD_E_TIMEOUT: Timeout occured waiting for a response from hardware. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL ++ * Ensures: ++ * DSP_SOK: Board is in BRD_IDLE state; ++ * else: Board state is indeterminate. ++ */ ++ typedef DSP_STATUS( ++ *WMD_BRD_MONITOR) (struct WMD_DEV_CONTEXT ++ *hDevContext); ++ ++/* ++ * ======== WMD_BRD_SETSTATE ======== ++ * Purpose: ++ * Sets the Mini driver state ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * ulBrdState: Board state ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL; ++ * ulBrdState <= BRD_LASTSTATE. ++ * Ensures: ++ * ulBrdState <= BRD_LASTSTATE. ++ * Update the Board state to the specified state. ++ */ ++ typedef DSP_STATUS( ++ *WMD_BRD_SETSTATE) (struct WMD_DEV_CONTEXT ++ *hDevContext, u32 ulBrdState); ++ ++/* ++ * ======== WMD_BRD_Start ======== ++ * Purpose: ++ * Bring board to the BRD_RUNNING (start) state. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device context. ++ * dwDSPAddr: DSP address at which to start execution. ++ * Returns: ++ * DSP_SOK: Success. ++ * WMD_E_TIMEOUT: Timeout occured waiting for a response from hardware. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL ++ * Board is in monitor (BRD_IDLE) state. ++ * Ensures: ++ * DSP_SOK: Board is in BRD_RUNNING state. ++ * Interrupts to the PC are enabled. ++ * else: Board state is indeterminate. ++ */ ++ typedef DSP_STATUS(*WMD_BRD_START) (struct WMD_DEV_CONTEXT ++ *hDevContext, u32 dwDSPAddr); ++ ++/* ++ * ======== WMD_BRD_MemCopy ======== ++ * Purpose: ++ * Copy memory from one DSP address to another ++ * Parameters: ++ * pDevContext: Pointer to context handle ++ * ulDspDestAddr: DSP address to copy to ++ * ulDspSrcAddr: DSP address to copy from ++ * ulNumBytes: Number of bytes to copy ++ * ulMemType: What section of memory to copy to ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * pDevContext != NULL ++ * Ensures: ++ * DSP_SOK: Board is in BRD_RUNNING state. ++ * Interrupts to the PC are enabled. ++ * else: Board state is indeterminate. ++ */ ++ typedef DSP_STATUS(*WMD_BRD_MEMCOPY) (struct WMD_DEV_CONTEXT ++ *hDevContext, ++ u32 ulDspDestAddr, ++ u32 ulDspSrcAddr, ++ u32 ulNumBytes, u32 ulMemType); ++/* ++ * ======== WMD_BRD_MemWrite ======== ++ * Purpose: ++ * Write a block of host memory into a DSP address, into a given memory ++ * space. Unlike WMD_BRD_Write, this API does reset the DSP ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * dwDSPAddr: Address on DSP board (Destination). ++ * pHostBuf: Pointer to host buffer (Source). ++ * ulNumBytes: Number of bytes to transfer. ++ * ulMemType: Memory space on DSP to which to transfer. ++ * Returns: ++ * DSP_SOK: Success. ++ * WMD_E_TIMEOUT: Timeout occured waiting for a response from hardware. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL; ++ * pHostBuf != NULL. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_BRD_MEMWRITE) (struct WMD_DEV_CONTEXT ++ *hDevContext, ++ IN u8 *pHostBuf, ++ u32 dwDSPAddr, u32 ulNumBytes, ++ u32 ulMemType); ++ ++/* ++ * ======== WMD_BRD_MemMap ======== ++ * Purpose: ++ * Map a MPU memory region to a DSP/IVA memory space ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * ulMpuAddr: MPU memory region start address. ++ * ulVirtAddr: DSP/IVA memory region u8 address. ++ * ulNumBytes: Number of bytes to map. ++ * mapAttrs: Mapping attributes (e.g. endianness). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_BRD_MEMMAP) (struct WMD_DEV_CONTEXT ++ *hDevContext, u32 ulMpuAddr, ++ u32 ulVirtAddr, u32 ulNumBytes, ++ u32 ulMapAttrs); ++ ++/* ++ * ======== WMD_BRD_MemUnMap ======== ++ * Purpose: ++ * UnMap an MPU memory region from DSP/IVA memory space ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * ulVirtAddr: DSP/IVA memory region u8 address. ++ * ulNumBytes: Number of bytes to unmap. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_BRD_MEMUNMAP) (struct WMD_DEV_CONTEXT ++ *hDevContext, ++ u32 ulVirtAddr, ++ u32 ulNumBytes); ++ ++/* ++ * ======== WMD_BRD_Stop ======== ++ * Purpose: ++ * Bring board to the BRD_STOPPED state. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device context. ++ * Returns: ++ * DSP_SOK: Success. ++ * WMD_E_TIMEOUT: Timeout occured waiting for a response from hardware. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL ++ * Ensures: ++ * DSP_SOK: Board is in BRD_STOPPED (stop) state; ++ * Interrupts to the PC are disabled. ++ * else: Board state is indeterminate. ++ */ ++ typedef DSP_STATUS(*WMD_BRD_STOP) (struct WMD_DEV_CONTEXT ++ *hDevContext); ++ ++/* ++ * ======== WMD_BRD_Status ======== ++ * Purpose: ++ * Report the current state of the board. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device context. ++ * pdwState: Ptr to BRD status variable. ++ * Returns: ++ * DSP_SOK: ++ * Requires: ++ * pdwState != NULL; ++ * hDevContext != NULL ++ * Ensures: ++ * *pdwState is one of {BRD_STOPPED, BRD_IDLE, BRD_RUNNING, BRD_UNKNOWN}; ++ */ ++ typedef DSP_STATUS(* ++ WMD_BRD_STATUS) (struct WMD_DEV_CONTEXT *hDevContext, ++ OUT BRD_STATUS * pdwState); ++ ++/* ++ * ======== WMD_BRD_Read ======== ++ * Purpose: ++ * Read a block of DSP memory, from a given memory space, into a host ++ * buffer. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * pHostBuf: Pointer to host buffer (Destination). ++ * dwDSPAddr: Address on DSP board (Source). ++ * ulNumBytes: Number of bytes to transfer. ++ * ulMemType: Memory space on DSP from which to transfer. ++ * Returns: ++ * DSP_SOK: Success. ++ * WMD_E_TIMEOUT: Timeout occured waiting for a response from hardware. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL; ++ * pHostBuf != NULL. ++ * Ensures: ++ * Will not write more than ulNumBytes bytes into pHostBuf. ++ */ ++typedef DSP_STATUS(*WMD_BRD_READ) (struct WMD_DEV_CONTEXT *hDevContext, ++ OUT u8 *pHostBuf, ++ u32 dwDSPAddr, ++ u32 ulNumBytes, ++ u32 ulMemType); ++ ++/* ++ * ======== WMD_BRD_Write ======== ++ * Purpose: ++ * Write a block of host memory into a DSP address, into a given memory ++ * space. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * dwDSPAddr: Address on DSP board (Destination). ++ * pHostBuf: Pointer to host buffer (Source). ++ * ulNumBytes: Number of bytes to transfer. ++ * ulMemType: Memory space on DSP to which to transfer. ++ * Returns: ++ * DSP_SOK: Success. ++ * WMD_E_TIMEOUT: Timeout occured waiting for a response from hardware. ++ * DSP_EFAIL: Other, unspecified error. ++ * Requires: ++ * hDevContext != NULL; ++ * pHostBuf != NULL. ++ * Ensures: ++ */ ++typedef DSP_STATUS(*WMD_BRD_WRITE)(struct WMD_DEV_CONTEXT *hDevContext, ++ IN u8 *pHostBuf, ++ u32 dwDSPAddr, ++ u32 ulNumBytes, ++ u32 ulMemType); ++ ++/* ++ * ======== WMD_CHNL_Create ======== ++ * Purpose: ++ * Create a channel manager object, responsible for opening new channels ++ * and closing old ones for a given 'Bridge board. ++ * Parameters: ++ * phChnlMgr: Location to store a channel manager object on output. ++ * hDevObject: Handle to a device object. ++ * pMgrAttrs: Channel manager attributes. ++ * pMgrAttrs->cChannels: Max channels ++ * pMgrAttrs->bIRQ: Channel's I/O IRQ number. ++ * pMgrAttrs->fShared: TRUE if the IRQ is shareable. ++ * pMgrAttrs->uWordSize: DSP Word size in equivalent PC bytes.. ++ * pMgrAttrs->dwSMBase: Base physical address of shared memory, if any. ++ * pMgrAttrs->uSMLength: Bytes of shared memory block. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * CHNL_E_ISR: Unable to plug ISR for given IRQ. ++ * CHNL_E_NOMEMMAP: Couldn't map physical address to a virtual one. ++ * Requires: ++ * phChnlMgr != NULL. ++ * pMgrAttrs != NULL ++ * pMgrAttrs field are all valid: ++ * 0 < cChannels <= CHNL_MAXCHANNELS. ++ * bIRQ <= 15. ++ * uWordSize > 0. ++ * IsValidHandle(hDevObject) ++ * No channel manager exists for this board. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_CREATE)(OUT struct CHNL_MGR ++ **phChnlMgr, ++ struct DEV_OBJECT ++ *hDevObject, ++ IN CONST struct ++ CHNL_MGRATTRS *pMgrAttrs); ++ ++/* ++ * ======== WMD_CHNL_Destroy ======== ++ * Purpose: ++ * Close all open channels, and destroy the channel manager. ++ * Parameters: ++ * hChnlMgr: Channel manager object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: hChnlMgr was invalid. ++ * Requires: ++ * Ensures: ++ * DSP_SOK: Cancels I/O on each open channel. Closes each open channel. ++ * CHNL_Create may subsequently be called for the same device. ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_DESTROY) (struct CHNL_MGR ++ *hChnlMgr); ++/* ++ * ======== WMD_DEH_Notify ======== ++ * Purpose: ++ * When notified of DSP error, take appropriate action. ++ * Parameters: ++ * hDehMgr: Handle to DEH manager object. ++ * ulEventMask: Indicate the type of exception ++ * dwErrInfo: Error information ++ * Returns: ++ * ++ * Requires: ++ * hDehMgr != NULL; ++ * ulEventMask with a valid exception ++ * Ensures: ++ */ ++ typedef void (*WMD_DEH_NOTIFY)(struct DEH_MGR *hDehMgr, ++ u32 ulEventMask, u32 dwErrInfo); ++ ++ ++/* ++ * ======== WMD_CHNL_Open ======== ++ * Purpose: ++ * Open a new half-duplex channel to the DSP board. ++ * Parameters: ++ * phChnl: Location to store a channel object handle. ++ * hChnlMgr: Handle to channel manager, as returned by CHNL_GetMgr(). ++ * uMode: One of {CHNL_MODETODSP, CHNL_MODEFROMDSP} specifies ++ * direction of data transfer. ++ * uChnlId: If CHNL_PICKFREE is specified, the channel manager will ++ * select a free channel id (default); ++ * otherwise this field specifies the id of the channel. ++ * pAttrs: Channel attributes. Attribute fields are as follows: ++ * pAttrs->uIOReqs: Specifies the maximum number of I/O requests which can ++ * be pending at any given time. All request packets are ++ * preallocated when the channel is opened. ++ * pAttrs->hEvent: This field allows the user to supply an auto reset ++ * event object for channel I/O completion notifications. ++ * It is the responsibility of the user to destroy this ++ * object AFTER closing the channel. ++ * This channel event object can be retrieved using ++ * CHNL_GetEventHandle(). ++ * pAttrs->hReserved: The kernel mode handle of this event object. ++ * ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: hChnlMgr is invalid. ++ * DSP_EMEMORY: Insufficient memory for requested resources. ++ * DSP_EINVALIDARG: Invalid number of IOReqs. ++ * CHNL_E_OUTOFSTREAMS: No free channels available. ++ * CHNL_E_BADCHANID: Channel ID is out of range. ++ * CHNL_E_CHANBUSY: Channel is in use. ++ * CHNL_E_NOIORPS: No free IO request packets available for ++ * queuing. ++ * Requires: ++ * phChnl != NULL. ++ * pAttrs != NULL. ++ * pAttrs->hEvent is a valid event handle. ++ * pAttrs->hReserved is the kernel mode handle for pAttrs->hEvent. ++ * Ensures: ++ * DSP_SOK: *phChnl is a valid channel. ++ * else: *phChnl is set to NULL if (phChnl != NULL); ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_OPEN) (OUT struct CHNL_OBJECT ++ **phChnl, ++ struct CHNL_MGR *hChnlMgr, ++ CHNL_MODE uMode, ++ u32 uChnlId, ++ CONST IN OPTIONAL struct ++ CHNL_ATTRS *pAttrs); ++ ++/* ++ * ======== WMD_CHNL_Close ======== ++ * Purpose: ++ * Ensures all pending I/O on this channel is cancelled, discards all ++ * queued I/O completion notifications, then frees the resources allocated ++ * for this channel, and makes the corresponding logical channel id ++ * available for subsequent use. ++ * Parameters: ++ * hChnl: Handle to a channel object. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: Invalid hChnl. ++ * Requires: ++ * No thread must be blocked on this channel's I/O completion event. ++ * Ensures: ++ * DSP_SOK: hChnl is no longer valid. ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_CLOSE) (struct CHNL_OBJECT *hChnl); ++ ++/* ++ * ======== WMD_CHNL_AddIOReq ======== ++ * Purpose: ++ * Enqueue an I/O request for data transfer on a channel to the DSP. ++ * The direction (mode) is specified in the channel object. Note the DSP ++ * address is specified for channels opened in direct I/O mode. ++ * Parameters: ++ * hChnl: Channel object handle. ++ * pHostBuf: Host buffer address source. ++ * cBytes: Number of PC bytes to transfer. A zero value indicates ++ * that this buffer is the last in the output channel. ++ * A zero value is invalid for an input channel. ++ *! cBufSize: Actual buffer size in host bytes. ++ * dwDspAddr: DSP address for transfer. (Currently ignored). ++ * dwArg: A user argument that travels with the buffer. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: Invalid hChnl. ++ * DSP_EPOINTER: pHostBuf is invalid. ++ * CHNL_E_NOEOS: User cannot mark EOS on an input channel. ++ * CHNL_E_CANCELLED: I/O has been cancelled on this channel. No further ++ * I/O is allowed. ++ * CHNL_E_EOS: End of stream was already marked on a previous ++ * IORequest on this channel. No further I/O is expected. ++ * CHNL_E_BUFSIZE: Buffer submitted to this output channel is larger than ++ * the size of the physical shared memory output window. ++ * Requires: ++ * Ensures: ++ * DSP_SOK: The buffer will be transferred if the channel is ready; ++ * otherwise, will be queued for transfer when the channel becomes ++ * ready. In any case, notifications of I/O completion are ++ * asynchronous. ++ * If cBytes is 0 for an output channel, subsequent CHNL_AddIOReq's ++ * on this channel will fail with error code CHNL_E_EOS. The ++ * corresponding IOC for this I/O request will have its status flag ++ * set to CHNL_IOCSTATEOS. ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_ADDIOREQ) (struct CHNL_OBJECT ++ *hChnl, ++ void *pHostBuf, ++ u32 cBytes, ++ u32 cBufSize, ++ OPTIONAL u32 dwDspAddr, ++ u32 dwArg); ++ ++/* ++ * ======== WMD_CHNL_GetIOC ======== ++ * Purpose: ++ * Dequeue an I/O completion record, which contains information about the ++ * completed I/O request. ++ * Parameters: ++ * hChnl: Channel object handle. ++ * dwTimeOut: A value of CHNL_IOCNOWAIT will simply dequeue the ++ * first available IOC. ++ * pIOC: On output, contains host buffer address, bytes ++ * transferred, and status of I/O completion. ++ * pIOC->status: See chnldefs.h. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hChnl. ++ * DSP_EPOINTER: pIOC is invalid. ++ * CHNL_E_NOIOC: CHNL_IOCNOWAIT was specified as the dwTimeOut parameter ++ * yet no I/O completions were queued. ++ * Requires: ++ * dwTimeOut == CHNL_IOCNOWAIT. ++ * Ensures: ++ * DSP_SOK: if there are any remaining IOC's queued before this call ++ * returns, the channel event object will be left in a signalled ++ * state. ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_GETIOC) (struct CHNL_OBJECT *hChnl, ++ u32 dwTimeOut, ++ OUT struct CHNL_IOC *pIOC); ++ ++/* ++ * ======== WMD_CHNL_CancelIO ======== ++ * Purpose: ++ * Return all I/O requests to the client which have not yet been ++ * transferred. The channel's I/O completion object is ++ * signalled, and all the I/O requests are queued as IOC's, with the ++ * status field set to CHNL_IOCSTATCANCEL. ++ * This call is typically used in abort situations, and is a prelude to ++ * CHNL_Close(); ++ * Parameters: ++ * hChnl: Channel object handle. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: Invalid hChnl. ++ * Requires: ++ * Ensures: ++ * Subsequent I/O requests to this channel will not be accepted. ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_CANCELIO) (struct CHNL_OBJECT ++ *hChnl); ++ ++/* ++ * ======== WMD_CHNL_FlushIO ======== ++ * Purpose: ++ * For an output stream (to the DSP), indicates if any IO requests are in ++ * the output request queue. For input streams (from the DSP), will ++ * cancel all pending IO requests. ++ * Parameters: ++ * hChnl: Channel object handle. ++ * dwTimeOut: Timeout value for flush operation. ++ * Returns: ++ * DSP_SOK: Success; ++ * S_CHNLIOREQUEST: Returned if any IORequests are in the output queue. ++ * DSP_EHANDLE: Invalid hChnl. ++ * Requires: ++ * Ensures: ++ * DSP_SOK: No I/O requests will be pending on this channel. ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_FLUSHIO) (struct CHNL_OBJECT *hChnl, ++ u32 dwTimeOut); ++ ++/* ++ * ======== WMD_CHNL_GetInfo ======== ++ * Purpose: ++ * Retrieve information related to a channel. ++ * Parameters: ++ * hChnl: Handle to a valid channel object, or NULL. ++ * pInfo: Location to store channel info. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: Invalid hChnl. ++ * DSP_EPOINTER: pInfo == NULL. ++ * Requires: ++ * Ensures: ++ * DSP_SOK: pInfo points to a filled in CHNL_INFO struct, ++ * if (pInfo != NULL). ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_GETINFO) (struct CHNL_OBJECT *hChnl, ++ OUT struct CHNL_INFO ++ *pChnlInfo); ++ ++/* ++ * ======== WMD_CHNL_GetMgrInfo ======== ++ * Purpose: ++ * Retrieve information related to the channel manager. ++ * Parameters: ++ * hChnlMgr: Handle to a valid channel manager, or NULL. ++ * uChnlID: Channel ID. ++ * pMgrInfo: Location to store channel manager info. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: Invalid hChnlMgr. ++ * DSP_EPOINTER: pMgrInfo == NULL. ++ * CHNL_E_BADCHANID: Invalid channel ID. ++ * Requires: ++ * Ensures: ++ * DSP_SOK: pMgrInfo points to a filled in CHNL_MGRINFO ++ * struct, if (pMgrInfo != NULL). ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_GETMGRINFO) (struct CHNL_MGR ++ *hChnlMgr, ++ u32 uChnlID, ++ OUT struct CHNL_MGRINFO ++ *pMgrInfo); ++ ++/* ++ * ======== WMD_CHNL_Idle ======== ++ * Purpose: ++ * Idle a channel. If this is an input channel, or if this is an output ++ * channel and fFlush is TRUE, all currently enqueued buffers will be ++ * dequeued (data discarded for output channel). ++ * If this is an output channel and fFlush is FALSE, this function ++ * will block until all currently buffered data is output, or the timeout ++ * specified has been reached. ++ * ++ * Parameters: ++ * hChnl: Channel object handle. ++ * dwTimeOut: If output channel and fFlush is FALSE, timeout value ++ * to wait for buffers to be output. (Not used for ++ * input channel). ++ * fFlush: If output channel and fFlush is TRUE, discard any ++ * currently buffered data. If FALSE, wait for currently ++ * buffered data to be output, or timeout, whichever ++ * occurs first. fFlush is ignored for input channel. ++ * Returns: ++ * DSP_SOK: Success; ++ * DSP_EHANDLE: Invalid hChnl. ++ * CHNL_E_WAITTIMEOUT: Timeout occured before channel could be idled. ++ * Requires: ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_IDLE) (struct CHNL_OBJECT *hChnl, ++ u32 dwTimeOut, ++ bool fFlush); ++ ++/* ++ * ======== WMD_CHNL_RegisterNotify ======== ++ * Purpose: ++ * Register for notification of events on a channel. ++ * Parameters: ++ * hChnl: Channel object handle. ++ * uEventMask: Type of events to be notified about: IO completion ++ * (DSP_STREAMIOCOMPLETION) or end of stream ++ * (DSP_STREAMDONE). ++ * uNotifyType: DSP_SIGNALEVENT. ++ * hNotification: Handle of a DSP_NOTIFICATION object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory. ++ * DSP_EVALUE: uEventMask is 0 and hNotification was not ++ * previously registered. ++ * DSP_EHANDLE: NULL hNotification, hNotification event name ++ * too long, or hNotification event name NULL. ++ * Requires: ++ * Valid hChnl. ++ * hNotification != NULL. ++ * (uEventMask & ~(DSP_STREAMIOCOMPLETION | DSP_STREAMDONE)) == 0. ++ * uNotifyType == DSP_SIGNALEVENT. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_CHNL_REGISTERNOTIFY) ++ (struct CHNL_OBJECT *hChnl, ++ u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION *hNotification); ++ ++/* ++ * ======== WMD_DEV_Create ======== ++ * Purpose: ++ * Complete creation of the device object for this board. ++ * Parameters: ++ * phDevContext: Ptr to location to store a WMD device context. ++ * hDevObject: Handle to a Device Object, created and managed by WCD. ++ * pConfig: Ptr to configuration parameters provided by the Windows ++ * Configuration Manager during device loading. ++ * pDspConfig: DSP resources, as specified in the registry key for this ++ * device. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Unable to allocate memory for device context. ++ * WMD_E_BADCONFIG: One or more of the host or DSP configuration ++ * parameters did not satisfy hardware assumptions ++ * made by this WMD. ++ * Requires: ++ * phDevContext != NULL; ++ * hDevObject != NULL; ++ * pConfig != NULL; ++ * pDspConfig != NULL; ++ * Fields in pConfig and pDspConfig contain valid values. ++ * Ensures: ++ * DSP_SOK: All mini-driver specific DSP resource and other ++ * board context has been allocated. ++ * DSP_EMEMORY: WMD failed to allocate resources. ++ * Any acquired resources have been freed. The WCD will ++ * not call WMD_DEV_Destroy() if WMD_DEV_Create() fails. ++ * Details: ++ * Called during the CONFIGMG's Device_Init phase. Based on host and ++ * DSP configuration information, create a board context, a handle to ++ * which is passed into other WMD BRD and CHNL functions. The ++ * board context contains state information for the device. Since the ++ * addresses of all IN pointer parameters may be invalid when this ++ * function returns, they must not be stored into the device context ++ * structure. ++ */ ++ typedef DSP_STATUS(*WMD_DEV_CREATE) (OUT struct WMD_DEV_CONTEXT ++ **phDevContext, ++ struct DEV_OBJECT ++ *hDevObject, ++ IN CONST struct CFG_HOSTRES ++ *pConfig, ++ IN CONST struct CFG_DSPRES ++ *pDspConfig); ++ ++/* ++ * ======== WMD_DEV_Ctrl ======== ++ * Purpose: ++ * Mini-driver specific interface. ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device info. ++ * dwCmd: WMD defined command code. ++ * pArgs: Pointer to an arbitrary argument structure. ++ * Returns: ++ * DSP_SOK or DSP_EFAIL. Actual command error codes should be passed back ++ * in the pArgs structure, and are defined by the WMD implementor. ++ * Requires: ++ * All calls are currently assumed to be synchronous. There are no ++ * IOCTL completion routines provided. ++ * Ensures: ++ */ ++typedef DSP_STATUS(*WMD_DEV_CTRL)(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwCmd, ++ IN OUT void *pArgs); ++ ++/* ++ * ======== WMD_DEV_Destroy ======== ++ * Purpose: ++ * Deallocate WMD device extension structures and all other resources ++ * acquired by the mini-driver. ++ * No calls to other mini driver functions may subsequently ++ * occur, except for WMD_DEV_Create(). ++ * Parameters: ++ * hDevContext: Handle to mini-driver defined device information. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Failed to release a resource previously acquired. ++ * Requires: ++ * hDevContext != NULL; ++ * Ensures: ++ * DSP_SOK: Device context is freed. ++ */ ++ typedef DSP_STATUS(*WMD_DEV_DESTROY) (struct WMD_DEV_CONTEXT ++ *hDevContext); ++ ++/* ++ * ======== WMD_DEH_Create ======== ++ * Purpose: ++ * Create an object that manages DSP exceptions from the GPP. ++ * Parameters: ++ * phDehMgr: Location to store DEH manager on output. ++ * hDevObject: Handle to DEV object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * DSP_EFAIL: Creation failed. ++ * Requires: ++ * hDevObject != NULL; ++ * phDehMgr != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_DEH_CREATE) (OUT struct DEH_MGR ++ **phDehMgr, ++ struct DEV_OBJECT ++ *hDevObject); ++ ++/* ++ * ======== WMD_DEH_Destroy ======== ++ * Purpose: ++ * Destroy the DEH object. ++ * Parameters: ++ * hDehMgr: Handle to DEH manager object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Destroy failed. ++ * Requires: ++ * hDehMgr != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_DEH_DESTROY) (struct DEH_MGR *hDehMgr); ++ ++/* ++ * ======== WMD_DEH_RegisterNotify ======== ++ * Purpose: ++ * Register for DEH event notification. ++ * Parameters: ++ * hDehMgr: Handle to DEH manager object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Destroy failed. ++ * Requires: ++ * hDehMgr != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_DEH_REGISTERNOTIFY) ++ (struct DEH_MGR *hDehMgr, ++ u32 uEventMask, u32 uNotifyType, ++ struct DSP_NOTIFICATION *hNotification); ++ ++/* ++ * ======== WMD_DEH_GetInfo ======== ++ * Purpose: ++ * Get DSP exception info. ++ * Parameters: ++ * phDehMgr: Location to store DEH manager on output. ++ * pErrInfo: Ptr to error info structure. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Creation failed. ++ * Requires: ++ * phDehMgr != NULL; ++ * pErrorInfo != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_DEH_GETINFO) (struct DEH_MGR *phDehMgr, ++ struct DSP_ERRORINFO *pErrInfo); ++ ++/* ++ * ======== WMD_IO_Create ======== ++ * Purpose: ++ * Create an object that manages I/O between CHNL and MSG. ++ * Parameters: ++ * phIOMgr: Location to store IO manager on output. ++ * hChnlMgr: Handle to channel manager. ++ * hMsgMgr: Handle to message manager. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * DSP_EFAIL: Creation failed. ++ * Requires: ++ * hDevObject != NULL; ++ * Channel manager already created; ++ * Message manager already created; ++ * pMgrAttrs != NULL; ++ * phIOMgr != NULL; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_IO_CREATE) (OUT struct IO_MGR **phIOMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct IO_ATTRS *pMgrAttrs); ++ ++/* ++ * ======== WMD_IO_Destroy ======== ++ * Purpose: ++ * Destroy object created in WMD_IO_Create. ++ * Parameters: ++ * hIOMgr: IO Manager. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Memory allocation failure. ++ * DSP_EFAIL: Creation failed. ++ * Requires: ++ * Valid hIOMgr; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_IO_DESTROY) (struct IO_MGR *hIOMgr); ++ ++/* ++ * ======== WMD_IO_OnLoaded ======== ++ * Purpose: ++ * Called whenever a program is loaded to update internal data. For ++ * example, if shared memory is used, this function would update the ++ * shared memory location and address. ++ * Parameters: ++ * hIOMgr: IO Manager. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Internal failure occurred. ++ * Requires: ++ * Valid hIOMgr; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_IO_ONLOADED) (struct IO_MGR *hIOMgr); ++ ++/* ++ * ======== WMD_IO_GETPROCLOAD ======== ++ * Purpose: ++ * Called to get the Processor's current and predicted load ++ * Parameters: ++ * hIOMgr: IO Manager. ++ * pProcLoadStat Processor Load statistics ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EFAIL: Internal failure occurred. ++ * Requires: ++ * Valid hIOMgr; ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_IO_GETPROCLOAD)(struct IO_MGR *hIOMgr, ++ struct DSP_PROCLOADSTAT *pProcLoadStat); ++ ++/* ++ * ======== WMD_MSG_Create ======== ++ * Purpose: ++ * Create an object to manage message queues. Only one of these objects ++ * can exist per device object. ++ * Parameters: ++ * phMsgMgr: Location to store MSG manager on output. ++ * hDevObject: Handle to a device object. ++ * msgCallback: Called whenever an RMS_EXIT message is received. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory. ++ * Requires: ++ * phMsgMgr != NULL. ++ * msgCallback != NULL. ++ * hDevObject != NULL. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_MSG_CREATE) ++ (OUT struct MSG_MGR **phMsgMgr, ++ struct DEV_OBJECT *hDevObject, ++ MSG_ONEXIT msgCallback); ++ ++/* ++ * ======== WMD_MSG_CreateQueue ======== ++ * Purpose: ++ * Create a MSG queue for sending or receiving messages from a Message ++ * node on the DSP. ++ * Parameters: ++ * hMsgMgr: MSG queue manager handle returned from ++ * WMD_MSG_Create. ++ * phMsgQueue: Location to store MSG queue on output. ++ * dwId: Identifier for messages (node environment pointer). ++ * uMaxMsgs: Max number of simultaneous messages for the node. ++ * h: Handle passed to hMsgMgr->msgCallback(). ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory. ++ * Requires: ++ * phMsgQueue != NULL. ++ * h != NULL. ++ * uMaxMsgs > 0. ++ * Ensures: ++ * phMsgQueue !=NULL <==> DSP_SOK. ++ */ ++ typedef DSP_STATUS(*WMD_MSG_CREATEQUEUE) ++ (struct MSG_MGR *hMsgMgr, ++ OUT struct MSG_QUEUE **phMsgQueue, ++ u32 dwId, u32 uMaxMsgs, HANDLE h); ++ ++/* ++ * ======== WMD_MSG_Delete ======== ++ * Purpose: ++ * Delete a MSG manager allocated in WMD_MSG_Create(). ++ * Parameters: ++ * hMsgMgr: Handle returned from WMD_MSG_Create(). ++ * Returns: ++ * Requires: ++ * Valid hMsgMgr. ++ * Ensures: ++ */ ++ typedef void(*WMD_MSG_DELETE) (struct MSG_MGR *hMsgMgr); ++ ++/* ++ * ======== WMD_MSG_DeleteQueue ======== ++ * Purpose: ++ * Delete a MSG queue allocated in WMD_MSG_CreateQueue. ++ * Parameters: ++ * hMsgQueue: Handle to MSG queue returned from ++ * WMD_MSG_CreateQueue. ++ * Returns: ++ * Requires: ++ * Valid hMsgQueue. ++ * Ensures: ++ */ ++ typedef void(*WMD_MSG_DELETEQUEUE) (struct MSG_QUEUE *hMsgQueue); ++ ++/* ++ * ======== WMD_MSG_Get ======== ++ * Purpose: ++ * Get a message from a MSG queue. ++ * Parameters: ++ * hMsgQueue: Handle to MSG queue returned from ++ * WMD_MSG_CreateQueue. ++ * pMsg: Location to copy message into. ++ * uTimeout: Timeout to wait for a message. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ETIMEOUT: Timeout occurred. ++ * DSP_EFAIL: No frames available for message (uMaxMessages too ++ * small). ++ * Requires: ++ * Valid hMsgQueue. ++ * pMsg != NULL. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_MSG_GET) (struct MSG_QUEUE *hMsgQueue, ++ struct DSP_MSG *pMsg, ++ u32 uTimeout); ++ ++/* ++ * ======== WMD_MSG_Put ======== ++ * Purpose: ++ * Put a message onto a MSG queue. ++ * Parameters: ++ * hMsgQueue: Handle to MSG queue returned from ++ * WMD_MSG_CreateQueue. ++ * pMsg: Pointer to message. ++ * uTimeout: Timeout to wait for a message. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_ETIMEOUT: Timeout occurred. ++ * DSP_EFAIL: No frames available for message (uMaxMessages too ++ * small). ++ * Requires: ++ * Valid hMsgQueue. ++ * pMsg != NULL. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_MSG_PUT) (struct MSG_QUEUE *hMsgQueue, ++ IN CONST struct DSP_MSG *pMsg, ++ u32 uTimeout); ++ ++/* ++ * ======== WMD_MSG_RegisterNotify ======== ++ * Purpose: ++ * Register notification for when a message is ready. ++ * Parameters: ++ * hMsgQueue: Handle to MSG queue returned from ++ * WMD_MSG_CreateQueue. ++ * uEventMask: Type of events to be notified about: Must be ++ * DSP_NODEMESSAGEREADY, or 0 to unregister. ++ * uNotifyType: DSP_SIGNALEVENT. ++ * hNotification: Handle of notification object. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Insufficient memory. ++ * Requires: ++ * Valid hMsgQueue. ++ * hNotification != NULL. ++ * uNotifyType == DSP_SIGNALEVENT. ++ * uEventMask == DSP_NODEMESSAGEREADY || uEventMask == 0. ++ * Ensures: ++ */ ++ typedef DSP_STATUS(*WMD_MSG_REGISTERNOTIFY) ++ (struct MSG_QUEUE *hMsgQueue, ++ u32 uEventMask, u32 uNotifyType, ++ struct DSP_NOTIFICATION *hNotification); ++ ++/* ++ * ======== WMD_MSG_SetQueueId ======== ++ * Purpose: ++ * Set message queue id to node environment. Allows WMD_MSG_CreateQueue ++ * to be called in NODE_Allocate, before the node environment is known. ++ * Parameters: ++ * hMsgQueue: Handle to MSG queue returned from ++ * WMD_MSG_CreateQueue. ++ * dwId: Node environment pointer. ++ * Returns: ++ * Requires: ++ * Valid hMsgQueue. ++ * dwId != 0. ++ * Ensures: ++ */ ++ typedef void(*WMD_MSG_SETQUEUEID) (struct MSG_QUEUE *hMsgQueue, ++ u32 dwId); ++ ++/* ++ * 'Bridge Mini Driver (WMD) interface function table. ++ * ++ * The information in this table is filled in by the specific mini-driver, ++ * and copied into the 'Bridge class driver's own space. If any interface ++ * function field is set to a value of NULL, then the class driver will ++ * consider that function not implemented, and return the error code ++ * DSP_ENOTIMPL when a WMD client attempts to call that function. ++ * ++ * This function table contains WCD version numbers, which are used by the ++ * WMD loader to help ensure backwards compatility between older WMD's and a ++ * newer 'Bridge Class Driver. These must be set to WCD_MAJOR_VERSION ++ * and WCD_MINOR_VERSION, respectively. ++ * ++ * A mini-driver need not export a CHNL interface. In this case, *all* of ++ * the WMD_CHNL_* entries must be set to NULL. ++ */ ++ struct WMD_DRV_INTERFACE { ++ u32 dwWCDMajorVersion; /* Set to WCD_MAJOR_VERSION. */ ++ u32 dwWCDMinorVersion; /* Set to WCD_MINOR_VERSION. */ ++ WMD_DEV_CREATE pfnDevCreate; /* Create device context */ ++ WMD_DEV_DESTROY pfnDevDestroy; /* Destroy device context */ ++ WMD_DEV_CTRL pfnDevCntrl; /* Optional vendor interface */ ++ WMD_BRD_MONITOR pfnBrdMonitor; /* Load and/or start monitor */ ++ WMD_BRD_START pfnBrdStart; /* Start DSP program. */ ++ WMD_BRD_STOP pfnBrdStop; /* Stop/reset board. */ ++ WMD_BRD_STATUS pfnBrdStatus; /* Get current board status. */ ++ WMD_BRD_READ pfnBrdRead; /* Read board memory */ ++ WMD_BRD_WRITE pfnBrdWrite; /* Write board memory. */ ++ WMD_BRD_SETSTATE pfnBrdSetState; /* Sets the Board State */ ++ WMD_BRD_MEMCOPY pfnBrdMemCopy; /* Copies DSP Memory */ ++ WMD_BRD_MEMWRITE pfnBrdMemWrite; /* Write DSP Memory w/o halt */ ++ WMD_BRD_MEMMAP pfnBrdMemMap; /* Maps MPU mem to DSP mem */ ++ WMD_BRD_MEMUNMAP pfnBrdMemUnMap; /* Unmaps MPU mem to DSP mem */ ++ WMD_CHNL_CREATE pfnChnlCreate; /* Create channel manager. */ ++ WMD_CHNL_DESTROY pfnChnlDestroy; /* Destroy channel manager. */ ++ WMD_CHNL_OPEN pfnChnlOpen; /* Create a new channel. */ ++ WMD_CHNL_CLOSE pfnChnlClose; /* Close a channel. */ ++ WMD_CHNL_ADDIOREQ pfnChnlAddIOReq; /* Req I/O on a channel. */ ++ WMD_CHNL_GETIOC pfnChnlGetIOC; /* Wait for I/O completion. */ ++ WMD_CHNL_CANCELIO pfnChnlCancelIO; /* Cancl I/O on a channel. */ ++ WMD_CHNL_FLUSHIO pfnChnlFlushIO; /* Flush I/O. */ ++ WMD_CHNL_GETINFO pfnChnlGetInfo; /* Get channel specific info */ ++ /* Get channel manager info. */ ++ WMD_CHNL_GETMGRINFO pfnChnlGetMgrInfo; ++ WMD_CHNL_IDLE pfnChnlIdle; /* Idle the channel */ ++ /* Register for notif. */ ++ WMD_CHNL_REGISTERNOTIFY pfnChnlRegisterNotify; ++ WMD_DEH_CREATE pfnDehCreate; /* Create DEH manager */ ++ WMD_DEH_DESTROY pfnDehDestroy; /* Destroy DEH manager */ ++ WMD_DEH_NOTIFY pfnDehNotify; /* Notify of DSP error */ ++ /* register for deh notif. */ ++ WMD_DEH_REGISTERNOTIFY pfnDehRegisterNotify; ++ WMD_DEH_GETINFO pfnDehGetInfo; /* register for deh notif. */ ++ WMD_IO_CREATE pfnIOCreate; /* Create IO manager */ ++ WMD_IO_DESTROY pfnIODestroy; /* Destroy IO manager */ ++ WMD_IO_ONLOADED pfnIOOnLoaded; /* Notify of program loaded */ ++ /* Get Processor's current and predicted load */ ++ WMD_IO_GETPROCLOAD pfnIOGetProcLoad; ++ WMD_MSG_CREATE pfnMsgCreate; /* Create message manager */ ++ /* Create message queue */ ++ WMD_MSG_CREATEQUEUE pfnMsgCreateQueue; ++ WMD_MSG_DELETE pfnMsgDelete; /* Delete message manager */ ++ /* Delete message queue */ ++ WMD_MSG_DELETEQUEUE pfnMsgDeleteQueue; ++ WMD_MSG_GET pfnMsgGet; /* Get a message */ ++ WMD_MSG_PUT pfnMsgPut; /* Send a message */ ++ /* Register for notif. */ ++ WMD_MSG_REGISTERNOTIFY pfnMsgRegisterNotify; ++ /* Set message queue id */ ++ WMD_MSG_SETQUEUEID pfnMsgSetQueueId; ++ } ; ++ ++/* ++ * ======== WMD_DRV_Entry ======== ++ * Purpose: ++ * Registers WMD functions with the class driver. Called only once ++ * by the WCD. The caller will first check WCD version compatibility, and ++ * then copy the interface functions into its own memory space. ++ * Parameters: ++ * ppDrvInterface Pointer to a location to receive a pointer to the ++ * mini driver interface. ++ * Returns: ++ * Requires: ++ * The code segment this function resides in must expect to be discarded ++ * after completion. ++ * Ensures: ++ * ppDrvInterface pointer initialized to WMD's function interface. ++ * No system resources are acquired by this function. ++ * Details: ++ * Win95: Called during the Device_Init phase. ++ */ ++ void WMD_DRV_Entry(OUT struct WMD_DRV_INTERFACE **ppDrvInterface, ++ IN CONST char *pstrWMDFileName); ++ ++#endif /* WMD_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdchnl.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdchnl.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdchnl.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdchnl.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,90 @@ ++/* ++ * wmdchnl.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wmdchnl.h ======== ++ * Description: ++ * Declares the upper edge channel class library functions required by ++ * all WMD / WCD driver interface tables. These functions are implemented ++ * by every class of WMD channel library. ++ * ++ * Public Functions: ++ * ++ * Notes: ++ * The function comment headers reside with the function typedefs in wmd.h. ++ * ++ *! Revision History: ++ *! ================ ++ *! 07-Jan-2002 ag Added cBufSize to WMD_CHNL_AddIOReq(). ++ *! 13-Oct-2000 jeh Added dwArg parameter to WMD_CHNL_AddIOReq(), added ++ *! WMD_CHNL_Idle and WMD_CHNL_RegisterNotify for DSPStream ++ *! support. ++ *! 11-Jul-1996 gp: Created. ++ */ ++ ++#ifndef WMDCHNL_ ++#define WMDCHNL_ ++ ++ extern DSP_STATUS WMD_CHNL_Create(OUT struct CHNL_MGR **phChnlMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CHNL_MGRATTRS ++ *pMgrAttrs); ++ ++ extern DSP_STATUS WMD_CHNL_Destroy(struct CHNL_MGR *hChnlMgr); ++ ++ extern DSP_STATUS WMD_CHNL_Open(OUT struct CHNL_OBJECT **phChnl, ++ struct CHNL_MGR *hChnlMgr, ++ CHNL_MODE uMode, ++ u32 uChnlId, ++ CONST IN OPTIONAL struct CHNL_ATTRS ++ *pAttrs); ++ ++ extern DSP_STATUS WMD_CHNL_Close(struct CHNL_OBJECT *hChnl); ++ ++ extern DSP_STATUS WMD_CHNL_AddIOReq(struct CHNL_OBJECT *hChnl, ++ void *pHostBuf, ++ u32 cBytes, u32 cBufSize, ++ OPTIONAL u32 dwDspAddr, ++ u32 dwArg); ++ ++ extern DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, ++ u32 dwTimeOut, ++ OUT struct CHNL_IOC *pIOC); ++ ++ extern DSP_STATUS WMD_CHNL_CancelIO(struct CHNL_OBJECT *hChnl); ++ ++ extern DSP_STATUS WMD_CHNL_FlushIO(struct CHNL_OBJECT *hChnl, ++ u32 dwTimeOut); ++ ++ extern DSP_STATUS WMD_CHNL_GetInfo(struct CHNL_OBJECT *hChnl, ++ OUT struct CHNL_INFO *pInfo); ++ ++ extern DSP_STATUS WMD_CHNL_GetMgrInfo(struct CHNL_MGR *hChnlMgr, ++ u32 uChnlID, ++ OUT struct CHNL_MGRINFO ++ *pMgrInfo); ++ ++ extern DSP_STATUS WMD_CHNL_Idle(struct CHNL_OBJECT *hChnl, ++ u32 dwTimeOut, bool fFlush); ++ ++ extern DSP_STATUS WMD_CHNL_RegisterNotify(struct CHNL_OBJECT *hChnl, ++ u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION ++ *hNotification); ++ ++#endif /* WMDCHNL_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdioctl.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdioctl.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdioctl.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdioctl.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,91 @@ ++/* ++ * wmdioctl.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wmdioctl.h ======== ++ * Description: ++ * BRIDGE Minidriver BRD_IOCtl reserved command definitions. ++ * ++ *! Revision History ++ *! ================ ++ *! 19-Apr-2004 sb Updated HW typedefs ++ *! 16-Feb-2004 vp Added MMU endianness attributes to WMDIOCTL_EXTPROC ++ *! 21-Mar-2003 sb Changed WMDIOCTL_NUMOFMMUTLB from 7 to 32 ++ *! 14-May-2001 sg Added codes for PWR. ++ *! 10-Aug-2001 ag Added _SETMMUCONFIG ioctl used for DSP-MMU init. ++ *! 16-Nov-1999 rajesh ? ++ *! 18-Jun-1998 ag Moved EMIF, SDRAM_C, & CE space init to ENBLEXTMEM ioctl. ++ *! Added ENBLEXTMEM, RESETDSP, UNRESETDSP & ASSERTSIG ioctls. ++ *! 07-Jun-1998 ag Added JTAG_SELECT, MAP_TBC, GET_CONFIGURATION ioctls. ++ *! 26-Jan-1998 jeh: Added START, RECV, and SEND ioctls. ++ *! 07-Nov-1997 nn: Added command to interrupt DSP for interrupt test. ++ *! 20-Oct-1997 nn: Added commands for getting and resetting interrupt count. ++ *! 17-Oct-1997 gp: Moved to src/wmd. Standardized prefix. ++ *! 08-Oct-1997 nn: Created. ++ */ ++ ++#ifndef WMDIOCTL_ ++#define WMDIOCTL_ ++ ++/* ------------------------------------ Hardware Abstraction Layer */ ++#include ++#include ++ ++/* Any IOCTLS at or above this value are reserved for standard WMD interfaces.*/ ++#define WMDIOCTL_RESERVEDBASE 0x8000 ++ ++#define WMDIOCTL_CHNLREAD (WMDIOCTL_RESERVEDBASE + 0x10) ++#define WMDIOCTL_CHNLWRITE (WMDIOCTL_RESERVEDBASE + 0x20) ++#define WMDIOCTL_GETINTRCOUNT (WMDIOCTL_RESERVEDBASE + 0x30) ++#define WMDIOCTL_RESETINTRCOUNT (WMDIOCTL_RESERVEDBASE + 0x40) ++#define WMDIOCTL_INTERRUPTDSP (WMDIOCTL_RESERVEDBASE + 0x50) ++#define WMDIOCTL_SETMMUCONFIG (WMDIOCTL_RESERVEDBASE + 0x60) /* DMMU */ ++#define WMDIOCTL_PWRCONTROL (WMDIOCTL_RESERVEDBASE + 0x70) /* PWR */ ++ ++/* attention, modifiers: ++ * Some of these control enumerations are made visible to user for power ++ * control, so any changes to this list, should also be updated in the user ++ * header file 'dbdefs.h' ***/ ++/* These ioctls are reserved for PWR power commands for the DSP */ ++#define WMDIOCTL_DEEPSLEEP (WMDIOCTL_PWRCONTROL + 0x0) ++#define WMDIOCTL_EMERGENCYSLEEP (WMDIOCTL_PWRCONTROL + 0x1) ++#define WMDIOCTL_WAKEUP (WMDIOCTL_PWRCONTROL + 0x2) ++#define WMDIOCTL_PWRENABLE (WMDIOCTL_PWRCONTROL + 0x3) ++#define WMDIOCTL_PWRDISABLE (WMDIOCTL_PWRCONTROL + 0x4) ++#define WMDIOCTL_CLK_CTRL (WMDIOCTL_PWRCONTROL + 0x7) ++#define WMDIOCTL_PWR_HIBERNATE (WMDIOCTL_PWRCONTROL + 0x8) /*DSP Initiated ++ * Hibernate*/ ++#define WMDIOCTL_PRESCALE_NOTIFY (WMDIOCTL_PWRCONTROL + 0x9) ++#define WMDIOCTL_POSTSCALE_NOTIFY (WMDIOCTL_PWRCONTROL + 0xA) ++#define WMDIOCTL_CONSTRAINT_REQUEST (WMDIOCTL_PWRCONTROL + 0xB) ++ ++/* Number of actual DSP-MMU TLB entrries */ ++#define WMDIOCTL_NUMOFMMUTLB 32 ++ ++struct WMDIOCTL_EXTPROC { ++ u32 ulDspVa; /* DSP virtual address */ ++ u32 ulGppPa; /* GPP physical address */ ++ /* GPP virtual address. __va does not work for ioremapped addresses */ ++ u32 ulGppVa; ++ u32 ulSize; /* Size of the mapped memory in bytes */ ++ enum HW_Endianism_t endianism; ++ enum HW_MMUMixedSize_t mixedMode; ++ enum HW_ElementSize_t elemSize; ++}; ++ ++#endif /* WMDIOCTL_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdio.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdio.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdio.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdio.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,53 @@ ++/* ++ * wmdio.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wmdio.h ======== ++ * Description: ++ * Declares the upper edge IO functions required by ++ * all WMD / WCD driver interface tables. ++ * ++ * Public Functions: ++ * ++ * Notes: ++ * Function comment headers reside with the function typedefs in wmd.h. ++ * ++ *! Revision History: ++ *! ================ ++ *! 27-Feb-2004 vp Added IVA releated function. ++ *! 06-Nov-2000 jeh Created. ++ */ ++ ++#ifndef WMDIO_ ++#define WMDIO_ ++ ++#include ++#include ++ ++ extern DSP_STATUS WMD_IO_Create(OUT struct IO_MGR **phIOMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct IO_ATTRS *pMgrAttrs); ++ ++ extern DSP_STATUS WMD_IO_Destroy(struct IO_MGR *hIOMgr); ++ ++ extern DSP_STATUS WMD_IO_OnLoaded(struct IO_MGR *hIOMgr); ++ ++ extern DSP_STATUS IVA_IO_OnLoaded(struct IO_MGR *hIOMgr); ++ extern DSP_STATUS WMD_IO_GetProcLoad(IN struct IO_MGR *hIOMgr, ++ OUT struct DSP_PROCLOADSTAT *pProcStat); ++ ++#endif /* WMDIO_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdmsg.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdmsg.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/dspbridge/wmdmsg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/dspbridge/wmdmsg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,70 @@ ++/* ++ * wmdmsg.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== wmdmsg.h ======== ++ * Description: ++ * Declares the upper edge message class library functions required by ++ * all WMD / WCD driver interface tables. These functions are ++ * implemented by every class of WMD channel library. ++ * ++ * Public Functions: ++ * ++ * Notes: ++ * Function comment headers reside with the function typedefs in wmd.h. ++ * ++ *! Revision History: ++ *! ================ ++ *! 06-Dec-2000 jeh Added uEventMask to WMD_MSG_RegisterNotify(). Added ++ *! WMD_MSG_SetQueueId(). ++ *! 17-Nov-2000 jeh Created. ++ */ ++ ++#ifndef WMDMSG_ ++#define WMDMSG_ ++ ++#include ++ ++ extern DSP_STATUS WMD_MSG_Create(OUT struct MSG_MGR **phMsgMgr, ++ struct DEV_OBJECT *hDevObject, ++ MSG_ONEXIT msgCallback); ++ ++ extern DSP_STATUS WMD_MSG_CreateQueue(struct MSG_MGR *hMsgMgr, ++ OUT struct MSG_QUEUE **phMsgQueue, ++ u32 dwId, u32 uMaxMsgs, ++ HANDLE h); ++ ++ extern void WMD_MSG_Delete(struct MSG_MGR *hMsgMgr); ++ ++ extern void WMD_MSG_DeleteQueue(struct MSG_QUEUE *hMsgQueue); ++ ++ extern DSP_STATUS WMD_MSG_Get(struct MSG_QUEUE *hMsgQueue, ++ struct DSP_MSG *pMsg, u32 uTimeout); ++ ++ extern DSP_STATUS WMD_MSG_Put(struct MSG_QUEUE *hMsgQueue, ++ IN CONST struct DSP_MSG *pMsg, ++ u32 uTimeout); ++ ++ extern DSP_STATUS WMD_MSG_RegisterNotify(struct MSG_QUEUE *hMsgQueue, ++ u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION ++ *hNotification); ++ ++ extern void WMD_MSG_SetQueueId(struct MSG_QUEUE *hMsgQueue, u32 dwId); ++ ++#endif /* WMDMSG_ */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/aic23.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/aic23.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/aic23.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/aic23.h 2008-12-25 00:26:37.000000000 +0100 +@@ -0,0 +1,116 @@ ++/* ++ * arch/arm/plat-omap/include/mach/aic23.h ++ * ++ * Hardware definitions for TI TLV320AIC23 audio codec ++ * ++ * Copyright (C) 2002 RidgeRun, Inc. ++ * Author: Steve Johnson ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#ifndef __ASM_ARCH_AIC23_H ++#define __ASM_ARCH_AIC23_H ++ ++// Codec TLV320AIC23 ++#define LEFT_LINE_VOLUME_ADDR 0x00 ++#define RIGHT_LINE_VOLUME_ADDR 0x01 ++#define LEFT_CHANNEL_VOLUME_ADDR 0x02 ++#define RIGHT_CHANNEL_VOLUME_ADDR 0x03 ++#define ANALOG_AUDIO_CONTROL_ADDR 0x04 ++#define DIGITAL_AUDIO_CONTROL_ADDR 0x05 ++#define POWER_DOWN_CONTROL_ADDR 0x06 ++#define DIGITAL_AUDIO_FORMAT_ADDR 0x07 ++#define SAMPLE_RATE_CONTROL_ADDR 0x08 ++#define DIGITAL_INTERFACE_ACT_ADDR 0x09 ++#define RESET_CONTROL_ADDR 0x0F ++ ++// Left (right) line input volume control register ++#define LRS_ENABLED 0x0100 ++#define LIM_MUTED 0x0080 ++#define LIV_DEFAULT 0x0017 ++#define LIV_MAX 0x001f ++#define LIV_MIN 0x0000 ++ ++// Left (right) channel headphone volume control register ++#define LZC_ON 0x0080 ++#define LHV_DEFAULT 0x0079 ++#define LHV_MAX 0x007f ++#define LHV_MIN 0x0000 ++ ++// Analog audio path control register ++#define STA_REG(x) ((x)<<6) ++#define STE_ENABLED 0x0020 ++#define DAC_SELECTED 0x0010 ++#define BYPASS_ON 0x0008 ++#define INSEL_MIC 0x0004 ++#define MICM_MUTED 0x0002 ++#define MICB_20DB 0x0001 ++ ++// Digital audio path control register ++#define DACM_MUTE 0x0008 ++#define DEEMP_32K 0x0002 ++#define DEEMP_44K 0x0004 ++#define DEEMP_48K 0x0006 ++#define ADCHP_ON 0x0001 ++ ++// Power control down register ++#define DEVICE_POWER_OFF 0x0080 ++#define CLK_OFF 0x0040 ++#define OSC_OFF 0x0020 ++#define OUT_OFF 0x0010 ++#define DAC_OFF 0x0008 ++#define ADC_OFF 0x0004 ++#define MIC_OFF 0x0002 ++#define LINE_OFF 0x0001 ++ ++// Digital audio interface register ++#define MS_MASTER 0x0040 ++#define LRSWAP_ON 0x0020 ++#define LRP_ON 0x0010 ++#define IWL_16 0x0000 ++#define IWL_20 0x0004 ++#define IWL_24 0x0008 ++#define IWL_32 0x000C ++#define FOR_I2S 0x0002 ++#define FOR_DSP 0x0003 ++ ++// Sample rate control register ++#define CLKOUT_HALF 0x0080 ++#define CLKIN_HALF 0x0040 ++#define BOSR_384fs 0x0002 // BOSR_272fs when in USB mode ++#define USB_CLK_ON 0x0001 ++#define SR_MASK 0xf ++#define CLKOUT_SHIFT 7 ++#define CLKIN_SHIFT 6 ++#define SR_SHIFT 2 ++#define BOSR_SHIFT 1 ++ ++// Digital interface register ++#define ACT_ON 0x0001 ++ ++#define TLV320AIC23ID1 (0x1a) // cs low ++#define TLV320AIC23ID2 (0x1b) // cs high ++ ++void aic23_power_up(void); ++void aic23_power_down(void); ++ ++#endif /* __ASM_ARCH_AIC23_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/board-nokia.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/board-nokia.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/board-nokia.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/board-nokia.h 2011-09-04 11:31:05.000000000 +0200 +@@ -32,6 +32,7 @@ extern void n800_mmc_slot1_cover_handler + + #define BT_CHIP_CSR 1 + #define BT_CHIP_TI 2 ++#define BT_CHIP_BCM 3 + + #define BT_SYSCLK_12 1 + #define BT_SYSCLK_38_4 2 +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/board-rx51.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/board-rx51.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/board-rx51.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/board-rx51.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,47 @@ ++/* ++ * linux/include/asm-arm/arch-omap/board-rx51.h ++ * ++ * Copyright (C) 2007 Nokia ++ * ++ * Hardware definitions for Nokia RX-51 ++ * based on board-3430sdp.h ++ * ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#ifndef __ASM_ARCH_OMAP_BOARD_RX51_H ++#define __ASM_ARCH_OMAP_BOARD_RX51_H ++ ++#include ++ ++#ifdef CONFIG_USB_MUSB_SOC ++extern void rx51_usb_init(void); ++#else ++static inline void rx51_usb_init(void) { } ++#endif ++ ++extern void omap_bt_init(struct omap_bluetooth_config *bt_config); ++ ++struct omap_sdrc_params *rx51_get_sdram_timings(void); ++ ++#endif /* __ASM_ARCH_OMAP_BOARD_RX51_H */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/board-rx71.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/board-rx71.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/board-rx71.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/board-rx71.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,41 @@ ++/* ++ * linux/include/asm-arm/arch-omap/board-rx71.h ++ * ++ * Copyright (C) 2007 Nokia ++ * ++ * Hardware definitions for Nokia RX-71 ++ * based on board-3430sdp.h ++ * ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#ifndef __ASM_ARCH_OMAP_BOARD_RX71_H ++#define __ASM_ARCH_OMAP_BOARD_RX71_H ++ ++#include ++ ++extern void n800_bt_init(void); ++ ++struct omap_sdrc_params *rx51_get_sdram_timings(void); ++ ++#endif /* __ASM_ARCH_OMAP_BOARD_RX71_H */ ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/clockdomain.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/clockdomain.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/clockdomain.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/clockdomain.h 2011-09-04 11:31:05.000000000 +0200 +@@ -95,7 +95,8 @@ int clkdm_register(struct clockdomain *c + int clkdm_unregister(struct clockdomain *clkdm); + struct clockdomain *clkdm_lookup(const char *name); + +-int clkdm_for_each(int (*fn)(struct clockdomain *clkdm)); ++int clkdm_for_each(int (*fn)(struct clockdomain *clkdm, void *user), ++ void *user); + struct powerdomain *clkdm_get_pwrdm(struct clockdomain *clkdm); + + void omap2_clkdm_allow_idle(struct clockdomain *clkdm); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/clock.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/clock.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/clock.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/clock.h 2011-09-04 11:31:05.000000000 +0200 +@@ -10,6 +10,8 @@ + * published by the Free Software Foundation. + */ + ++#include ++ + #ifndef __ARCH_ARM_OMAP_CLOCK_H + #define __ARCH_ARM_OMAP_CLOCK_H + +@@ -75,6 +77,40 @@ struct clk_child { + u8 flags; + }; + ++/** ++ * struct clk_notifier - associate a clk with a notifier ++ * @clk: struct clk * to associate the notifier with ++ * @notifier_head: a blocking_notifier_head for this clk ++ * @node: linked list pointers ++ * ++ * A list of struct clk_notifier is maintained by the notifier code. ++ * An entry is created whenever code registers the first notifier on a ++ * particular @clk. Future notifiers on that @clk are added to the ++ * @notifier_head. ++ */ ++struct clk_notifier { ++ struct clk *clk; ++ struct blocking_notifier_head notifier_head; ++ struct list_head node; ++}; ++ ++/** ++ * struct clk_notifier_data - rate data to pass to the notifier callback ++ * @clk: struct clk * being changed ++ * @old_rate: previous rate of this clock ++ * @new_rate: new rate of this clock ++ * ++ * For a pre-notifier, old_rate is the clock's rate before this rate ++ * change, and new_rate is what the rate will be in the future. For a ++ * post-notifier, old_rate and new_rate are both set to the clock's ++ * current rate (this was done to optimize the implementation). ++ */ ++struct clk_notifier_data { ++ struct clk *clk; ++ unsigned long old_rate; ++ unsigned long new_rate; ++}; ++ + struct clk { + struct list_head node; + const char *name; +@@ -91,6 +127,7 @@ struct clk { + void (*init)(struct clk *); + int (*enable)(struct clk *); + void (*disable)(struct clk *); ++ u16 notifier_count; + __u8 enable_bit; + __s8 usecount; + u8 idlest_bit; +@@ -121,6 +158,8 @@ struct clk_functions { + int (*clk_enable)(struct clk *clk); + void (*clk_disable)(struct clk *clk); + long (*clk_round_rate)(struct clk *clk, unsigned long rate); ++ long (*clk_round_rate_parent)(struct clk *clk, ++ struct clk *parent); + int (*clk_set_rate)(struct clk *clk, unsigned long rate); + int (*clk_set_parent)(struct clk *clk, struct clk *parent); + struct clk * (*clk_get_parent)(struct clk *clk); +@@ -144,6 +183,8 @@ extern void followparent_recalc(struct c + extern void clk_allow_idle(struct clk *clk); + extern void clk_deny_idle(struct clk *clk); + extern void clk_enable_init_clocks(void); ++extern int clk_notifier_register(struct clk *clk, struct notifier_block *nb); ++extern int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); + #ifdef CONFIG_CPU_FREQ + extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table); + #endif +@@ -201,4 +242,31 @@ void omap_clk_del_child(struct clk *clk, + #define CLK_REG_IN_PRM (1 << 0) + #define CLK_REG_IN_SCM (1 << 1) + ++/* ++ * Clk notifier callback types ++ * ++ * Since the notifier is called with interrupts disabled, any actions ++ * taken by callbacks must be extremely fast and lightweight. ++ * ++ * CLK_PRE_RATE_CHANGE - called after all callbacks have approved the ++ * rate change, immediately before the clock rate is changed, to ++ * indicate that the rate change will proceed. Drivers must ++ * immediately terminate any operations that will be affected by ++ * the rate change. Callbacks must always return NOTIFY_DONE. ++ * ++ * CLK_ABORT_RATE_CHANGE: called if the rate change failed for some ++ * reason after CLK_PRE_RATE_CHANGE. In this case, all registered ++ * notifiers on the clock will be called with ++ * CLK_ABORT_RATE_CHANGE. Callbacks must always return ++ * NOTIFY_DONE. ++ * ++ * CLK_POST_RATE_CHANGE - called after the clock rate change has ++ * successfully completed. Callbacks must always return ++ * NOTIFY_DONE. ++ * ++ */ ++#define CLK_PRE_RATE_CHANGE 1 ++#define CLK_ABORT_RATE_CHANGE 2 ++#define CLK_POST_RATE_CHANGE 3 ++ + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/common.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/common.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/common.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/common.h 2011-09-04 11:31:05.000000000 +0200 +@@ -33,7 +33,7 @@ struct sys_timer; + + extern void omap_map_common_io(void); + extern struct sys_timer omap_timer; +-#ifdef CONFIG_I2C_OMAP ++#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE) + extern int omap_register_i2c_bus(int bus_id, u32 clkrate, + struct i2c_board_info const *info, + unsigned len); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/control.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/control.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/control.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/control.h 2011-09-04 11:31:05.000000000 +0200 +@@ -109,6 +109,8 @@ + #define OMAP24XX_CONTROL_TEST_KEY_8 (OMAP2_CONTROL_GENERAL + 0x00e0) + #define OMAP24XX_CONTROL_TEST_KEY_9 (OMAP2_CONTROL_GENERAL + 0x00e4) + ++#define OMAP343X_CONTROL_PADCONF_SYSNIRQ (OMAP2_CONTROL_INTERFACE + 0x01b0) ++ + /* 34xx-only CONTROL_GENERAL register offsets */ + #define OMAP343X_CONTROL_PADCONF_OFF (OMAP2_CONTROL_GENERAL + 0x0000) + #define OMAP343X_CONTROL_MEM_DFTRW0 (OMAP2_CONTROL_GENERAL + 0x0008) +@@ -150,8 +152,51 @@ + #define OMAP343X_CONTROL_FUSE_SR (OMAP2_CONTROL_GENERAL + 0x0130) + #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) + #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) +-#define OMAP343X_CONTROL_PBIAS_LITE (OMAP2_CONTROL_GENERAL + 0x02b0) +-#define OMAP343X_CONTROL_TEMP_SENSOR (OMAP2_CONTROL_GENERAL + 0x02b4) ++#define OMAP343X_CONTROL_DEBOBS(i) (OMAP2_CONTROL_GENERAL + 0x01B0 \ ++ + ((i) >> 1) * 4 + (!(i) & 1) * 2) ++#define OMAP343X_CONTROL_PROG_IO0 (OMAP2_CONTROL_GENERAL + 0x01D4) ++#define OMAP343X_CONTROL_PROG_IO1 (OMAP2_CONTROL_GENERAL + 0x01D8) ++#define OMAP343X_CONTROL_DSS_DPLL_SPREADING (OMAP2_CONTROL_GENERAL + 0x01E0) ++#define OMAP343X_CONTROL_CORE_DPLL_SPREADING (OMAP2_CONTROL_GENERAL + 0x01E4) ++#define OMAP343X_CONTROL_PER_DPLL_SPREADING (OMAP2_CONTROL_GENERAL + 0x01E8) ++#define OMAP343X_CONTROL_USBHOST_DPLL_SPREADING (OMAP2_CONTROL_GENERAL + 0x01EC) ++#define OMAP343X_CONTROL_PBIAS_LITE (OMAP2_CONTROL_GENERAL + 0x02B0) ++#define OMAP343X_CONTROL_TEMP_SENSOR (OMAP2_CONTROL_GENERAL + 0x02B4) ++#define OMAP343X_CONTROL_SRAMLDO4 (OMAP2_CONTROL_GENERAL + 0x02B8) ++#define OMAP343X_CONTROL_SRAMLDO5 (OMAP2_CONTROL_GENERAL + 0x02C0) ++#define OMAP343X_CONTROL_CSI (OMAP2_CONTROL_GENERAL + 0x02C4) ++ ++ ++/* 34xx PADCONF register offsets */ ++#define OMAP343X_PADCONF_ETK(i) (OMAP2_CONTROL_PADCONFS + 0x5a8 + \ ++ (i)*2) ++#define OMAP343X_PADCONF_ETK_CLK OMAP343X_PADCONF_ETK(0) ++#define OMAP343X_PADCONF_ETK_CTL OMAP343X_PADCONF_ETK(1) ++#define OMAP343X_PADCONF_ETK_D0 OMAP343X_PADCONF_ETK(2) ++#define OMAP343X_PADCONF_ETK_D1 OMAP343X_PADCONF_ETK(3) ++#define OMAP343X_PADCONF_ETK_D2 OMAP343X_PADCONF_ETK(4) ++#define OMAP343X_PADCONF_ETK_D3 OMAP343X_PADCONF_ETK(5) ++#define OMAP343X_PADCONF_ETK_D4 OMAP343X_PADCONF_ETK(6) ++#define OMAP343X_PADCONF_ETK_D5 OMAP343X_PADCONF_ETK(7) ++#define OMAP343X_PADCONF_ETK_D6 OMAP343X_PADCONF_ETK(8) ++#define OMAP343X_PADCONF_ETK_D7 OMAP343X_PADCONF_ETK(9) ++#define OMAP343X_PADCONF_ETK_D8 OMAP343X_PADCONF_ETK(10) ++#define OMAP343X_PADCONF_ETK_D9 OMAP343X_PADCONF_ETK(11) ++#define OMAP343X_PADCONF_ETK_D10 OMAP343X_PADCONF_ETK(12) ++#define OMAP343X_PADCONF_ETK_D11 OMAP343X_PADCONF_ETK(13) ++#define OMAP343X_PADCONF_ETK_D12 OMAP343X_PADCONF_ETK(14) ++#define OMAP343X_PADCONF_ETK_D13 OMAP343X_PADCONF_ETK(15) ++#define OMAP343X_PADCONF_ETK_D14 OMAP343X_PADCONF_ETK(16) ++#define OMAP343X_PADCONF_ETK_D15 OMAP343X_PADCONF_ETK(17) ++ ++/* 34xx GENERAL_WKUP regist offsets */ ++#define OMAP343X_CONTROL_WKUP_DEBOBSMUX(i) (OMAP343X_CONTROL_GENERAL_WKUP + \ ++ 0x008 + (i)) ++#define OMAP343X_CONTROL_WKUP_DEBOBS0 (OMAP343X_CONTROL_GENERAL_WKUP + 0x008) ++#define OMAP343X_CONTROL_WKUP_DEBOBS1 (OMAP343X_CONTROL_GENERAL_WKUP + 0x00C) ++#define OMAP343X_CONTROL_WKUP_DEBOBS2 (OMAP343X_CONTROL_GENERAL_WKUP + 0x010) ++#define OMAP343X_CONTROL_WKUP_DEBOBS3 (OMAP343X_CONTROL_GENERAL_WKUP + 0x014) ++#define OMAP343X_CONTROL_WKUP_DEBOBS4 (OMAP343X_CONTROL_GENERAL_WKUP + 0x018) + + /* + * REVISIT: This list of registers is not comprehensive - there are more +@@ -209,8 +254,12 @@ + #define OMAP2_PBIASLITEVMODE0 (1 << 0) + + /* CONTROL_PADCONF_X bits */ +-#define OMAP3_PADCONF_WAKEUPEVENT0 (1 << 15) +-#define OMAP3_PADCONF_WAKEUPENABLE0 (1 << 14) ++#define OMAP3_PADCONF_WAKEUPEVENT0 (1 << 15) ++#define OMAP3_PADCONF_WAKEUPENABLE0 (1 << 14) ++ ++#define OMAP343X_SCRATCHPAD_ROM (OMAP343X_CTRL_BASE + 0x860) ++#define OMAP343X_SCRATCHPAD (OMAP343X_CTRL_BASE + 0x910) ++#define OMAP343X_SCRATCHPAD_ROM_OFFSET 0x19C + + /* CONTROL_IVA2_BOOTMOD bits */ + #define OMAP3_IVA2_BOOTMOD_SHIFT 0 +@@ -226,6 +275,15 @@ extern u32 omap_ctrl_readl(u16 offset); + extern void omap_ctrl_writeb(u8 val, u16 offset); + extern void omap_ctrl_writew(u16 val, u16 offset); + extern void omap_ctrl_writel(u32 val, u16 offset); ++ ++extern void omap3_save_scratchpad_contents(void); ++extern void omap3_clear_scratchpad_contents(void); ++extern u32 *get_restore_pointer(void); ++extern u32 *get_es3_restore_pointer(void); ++extern u32 omap3_arm_context[128]; ++extern void omap3_control_save_context(void); ++extern void omap3_control_restore_context(void); ++ + #else + #define omap_ctrl_base_get() 0 + #define omap_ctrl_readb(x) 0 +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/cpu.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/cpu.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/cpu.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/cpu.h 2011-09-04 11:31:05.000000000 +0200 +@@ -339,6 +339,7 @@ IS_OMAP_TYPE(3430, 0x3430) + #define OMAP3430_REV_ES2_0 0x34301034 + #define OMAP3430_REV_ES2_1 0x34302034 + #define OMAP3430_REV_ES3_0 0x34303034 ++#define OMAP3430_REV_ES3_1 0x34304034 + + /* + * omap_chip bits +@@ -354,13 +355,27 @@ IS_OMAP_TYPE(3430, 0x3430) + * use omap_chip_is(). + * + */ +-#define CHIP_IS_OMAP2420 (1 << 0) +-#define CHIP_IS_OMAP2430 (1 << 1) +-#define CHIP_IS_OMAP3430 (1 << 2) +-#define CHIP_IS_OMAP3430ES1 (1 << 3) +-#define CHIP_IS_OMAP3430ES2 (1 << 4) ++#define CHIP_IS_OMAP2420 (1 << 0) ++#define CHIP_IS_OMAP2430 (1 << 1) ++#define CHIP_IS_OMAP3430 (1 << 2) ++#define CHIP_IS_OMAP3430ES1 (1 << 3) ++#define CHIP_IS_OMAP3430ES2 (1 << 4) ++#define CHIP_IS_OMAP3430ES3_0 (1 << 5) ++#define CHIP_IS_OMAP3430ES3_1 (1 << 6) ++ ++#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430) ++ ++/* ++ * "GE" here represents "greater than or equal to" in terms of ES ++ * levels. So CHIP_GE_OMAP3430ES2 is intended to match all OMAP3430 ++ * chips at ES2 and beyond, but not, for example, any OMAP lines after ++ * OMAP3. ++ */ ++#define CHIP_GE_OMAP3430ES2 (CHIP_IS_OMAP3430ES2 | \ ++ CHIP_IS_OMAP3430ES3_0 | \ ++ CHIP_IS_OMAP3430ES3_1) ++#define CHIP_GE_OMAP3430ES3_1 (CHIP_IS_OMAP3430ES3_1) + +-#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430) + + int omap_chip_is(struct omap_chip_id oci); + int omap_type(void); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/debobs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/debobs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/debobs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/debobs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,7 @@ ++#ifndef __DEBOBS_H ++#define __DEBOBS_H ++ ++void debug_gpio_set(unsigned gpio, int value); ++int debug_gpio_get(unsigned gpio); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/display.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/display.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/display.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/display.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,588 @@ ++/* ++ * linux/include/asm-arm/arch-omap/display.h ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * Author: Tomi Valkeinen ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program. If not, see . ++ */ ++ ++#ifndef __ASM_ARCH_OMAP_DISPLAY_H ++#define __ASM_ARCH_OMAP_DISPLAY_H ++ ++#include ++#include ++#include ++ ++#define DISPC_IRQ_FRAMEDONE (1 << 0) ++#define DISPC_IRQ_VSYNC (1 << 1) ++#define DISPC_IRQ_EVSYNC_EVEN (1 << 2) ++#define DISPC_IRQ_EVSYNC_ODD (1 << 3) ++#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4) ++#define DISPC_IRQ_PROG_LINE_NUM (1 << 5) ++#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6) ++#define DISPC_IRQ_GFX_END_WIN (1 << 7) ++#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8) ++#define DISPC_IRQ_OCP_ERR (1 << 9) ++#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10) ++#define DISPC_IRQ_VID1_END_WIN (1 << 11) ++#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12) ++#define DISPC_IRQ_VID2_END_WIN (1 << 13) ++#define DISPC_IRQ_SYNC_LOST (1 << 14) ++#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15) ++#define DISPC_IRQ_WAKEUP (1 << 16) ++ ++enum omap_display_type { ++ OMAP_DISPLAY_TYPE_NONE = 0, ++ OMAP_DISPLAY_TYPE_DPI = 1 << 0, ++ OMAP_DISPLAY_TYPE_DBI = 1 << 1, ++ OMAP_DISPLAY_TYPE_SDI = 1 << 2, ++ OMAP_DISPLAY_TYPE_DSI = 1 << 3, ++ OMAP_DISPLAY_TYPE_VENC = 1 << 4, ++}; ++ ++enum omap_plane { ++ OMAP_DSS_GFX = 0, ++ OMAP_DSS_VIDEO1 = 1, ++ OMAP_DSS_VIDEO2 = 2 ++}; ++ ++enum omap_channel { ++ OMAP_DSS_CHANNEL_LCD = 0, ++ OMAP_DSS_CHANNEL_DIGIT = 1, ++}; ++ ++enum omap_color_mode { ++ OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */ ++ OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */ ++ OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */ ++ OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */ ++ OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */ ++ OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */ ++ OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */ ++ OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */ ++ OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */ ++ OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */ ++ OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */ ++ OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */ ++ OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */ ++ OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */ ++ ++ OMAP_DSS_COLOR_GFX_OMAP2 = ++ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | ++ OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | ++ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 | ++ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P, ++ ++ OMAP_DSS_COLOR_VID_OMAP2 = ++ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | ++ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | ++ OMAP_DSS_COLOR_UYVY, ++ ++ OMAP_DSS_COLOR_GFX_OMAP3 = ++ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | ++ OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | ++ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | ++ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | ++ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 | ++ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, ++ ++ OMAP_DSS_COLOR_VID1_OMAP3 = ++ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 | ++ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | ++ OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY, ++ ++ OMAP_DSS_COLOR_VID2_OMAP3 = ++ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | ++ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | ++ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | ++ OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 | ++ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, ++}; ++ ++enum omap_lcd_display_type { ++ OMAP_DSS_LCD_DISPLAY_STN, ++ OMAP_DSS_LCD_DISPLAY_TFT, ++}; ++ ++enum omap_dss_load_mode { ++ OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, ++ OMAP_DSS_LOAD_CLUT_ONLY = 1, ++ OMAP_DSS_LOAD_FRAME_ONLY = 2, ++ OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3, ++}; ++ ++enum omap_dss_color_key_type { ++ OMAP_DSS_COLOR_KEY_GFX_DST = 0, ++ OMAP_DSS_COLOR_KEY_VID_SRC = 1, ++}; ++ ++enum omap_rfbi_te_mode { ++ OMAP_DSS_RFBI_TE_MODE_1 = 1, ++ OMAP_DSS_RFBI_TE_MODE_2 = 2, ++}; ++ ++enum omap_panel_config { ++ OMAP_DSS_LCD_IVS = 1<<0, ++ OMAP_DSS_LCD_IHS = 1<<1, ++ OMAP_DSS_LCD_IPC = 1<<2, ++ OMAP_DSS_LCD_IEO = 1<<3, ++ OMAP_DSS_LCD_RF = 1<<4, ++ OMAP_DSS_LCD_ONOFF = 1<<5, ++ ++ OMAP_DSS_LCD_TFT = 1<<20, ++}; ++ ++enum omap_dss_venc_type { ++ OMAP_DSS_VENC_TYPE_COMPOSITE, ++ OMAP_DSS_VENC_TYPE_SVIDEO, ++}; ++ ++struct omap_display; ++struct omap_panel; ++struct omap_ctrl; ++ ++/* RFBI */ ++ ++struct rfbi_timings { ++ int cs_on_time; ++ int cs_off_time; ++ int we_on_time; ++ int we_off_time; ++ int re_on_time; ++ int re_off_time; ++ int we_cycle_time; ++ int re_cycle_time; ++ int cs_pulse_width; ++ int access_time; ++ ++ int clk_div; ++ ++ u32 tim[5]; /* set by rfbi_convert_timings() */ ++ ++ int converted; ++}; ++ ++void omap_rfbi_write_command(const void *buf, u32 len); ++void omap_rfbi_read_data(void *buf, u32 len); ++void omap_rfbi_write_data(const void *buf, u32 len); ++void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, ++ u16 x, u16 y, ++ u16 w, u16 h); ++int omap_rfbi_enable_te(bool enable, unsigned line); ++int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode, ++ unsigned hs_pulse_time, unsigned vs_pulse_time, ++ int hs_pol_inv, int vs_pol_inv, int extif_div); ++ ++/* DSI */ ++int dsi_vc_dcs_write(int channel, u8 *data, int len); ++int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len); ++int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen); ++int dsi_vc_set_max_rx_packet_size(int channel, u16 len); ++int dsi_vc_send_null(int channel); ++ ++/* Board specific data */ ++struct omap_dss_display_config { ++ enum omap_display_type type; ++ ++ union { ++ struct { ++ u8 data_lines; ++ } dpi; ++ ++ struct { ++ u8 channel; ++ u8 data_lines; ++ } rfbi; ++ ++ struct { ++ u8 datapairs; ++ unsigned pad_off_pe : 1; /* pull pads if disabled */ ++ unsigned pad_off_pu : 1; /* pull up */ ++ } sdi; ++ ++ struct { ++ u8 clk_lane; ++ u8 clk_pol; ++ u8 data1_lane; ++ u8 data1_pol; ++ u8 data2_lane; ++ u8 data2_pol; ++ unsigned long lp_clk_hz; ++ unsigned long ddr_clk_hz; ++ ++ bool ext_te; ++ u8 ext_te_gpio; ++ } dsi; ++ ++ struct { ++ enum omap_dss_venc_type type; ++ } venc; ++ } u; ++ ++ int panel_reset_gpio; ++ int ctrl_reset_gpio; ++ ++ int max_backlight_level; ++ ++ const char *name; /* for debug */ ++ const char *ctrl_name; ++ const char *panel_name; ++ ++ void *panel_data; ++ void *ctrl_data; ++ ++ /* platform specific enable/disable */ ++ int (*panel_enable)(struct omap_display *display); ++ void (*panel_disable)(struct omap_display *display); ++ int (*ctrl_enable)(struct omap_display *display); ++ void (*ctrl_disable)(struct omap_display *display); ++ int (*set_backlight)(struct omap_display *display, ++ int level); ++ int (*get_backlight)(struct omap_display *display); ++}; ++ ++struct device; ++ ++/* Board specific data */ ++struct omap_dss_board_info { ++ int (*get_last_off_on_transaction_id)(struct device *dev); ++ void (*set_min_bus_tput)(struct device *dev, u8 agent_id, unsigned long r); ++ int (*dsi_power_up)(void); ++ void (*dsi_power_down)(void); ++ struct { ++ u16 low; ++ u16 high; ++ } fifo_thresholds[3]; ++ int num_displays; ++ struct omap_dss_display_config *displays[]; ++}; ++ ++struct omap_ctrl { ++ struct module *owner; ++ ++ const char *name; ++ ++ int (*init)(struct omap_display *display); ++ void (*cleanup)(struct omap_display *display); ++ int (*enable)(struct omap_display *display); ++ void (*disable)(struct omap_display *display); ++ int (*suspend)(struct omap_display *display); ++ int (*resume)(struct omap_display *display); ++ void (*setup_update)(struct omap_display *display, ++ u16 x, u16 y, u16 w, u16 h); ++ ++ int (*enable_te)(struct omap_display *display, bool enable); ++ int (*wait_for_te)(struct omap_display *display); ++ ++ u8 (*get_rotate)(struct omap_display *display); ++ int (*set_rotate)(struct omap_display *display, u8 rotate); ++ ++ bool (*get_mirror)(struct omap_display *display); ++ int (*set_mirror)(struct omap_display *display, bool enable); ++ ++ int (*run_test)(struct omap_display *display, int test); ++ int (*memory_read)(struct omap_display *display, ++ void *buf, size_t size, ++ u16 x, u16 y, u16 w, u16 h); ++ ++ u8 pixel_size; ++ ++ struct rfbi_timings timings; ++ ++ void *priv; ++}; ++ ++struct omap_video_timings { ++ /* Unit: pixels */ ++ u16 x_res; ++ /* Unit: pixels */ ++ u16 y_res; ++ /* Unit: KHz */ ++ u32 pixel_clock; ++ /* Unit: pixel clocks */ ++ u16 hsw; /* Horizontal synchronization pulse width */ ++ /* Unit: pixel clocks */ ++ u16 hfp; /* Horizontal front porch */ ++ /* Unit: pixel clocks */ ++ u16 hbp; /* Horizontal back porch */ ++ /* Unit: line clocks */ ++ u16 vsw; /* Vertical synchronization pulse width */ ++ /* Unit: line clocks */ ++ u16 vfp; /* Vertical front porch */ ++ /* Unit: line clocks */ ++ u16 vbp; /* Vertical back porch */ ++ ++}; ++ ++#ifdef CONFIG_OMAP2_DSS_VENC ++/* Hardcoded timings for tv modes. Venc only uses these to ++ * identify the mode, and does not actually use the configs ++ * itself. However, the configs should be something that ++ * a normal monitor can also show */ ++const extern struct omap_video_timings omap_dss_pal_timings; ++const extern struct omap_video_timings omap_dss_ntsc_timings; ++#endif ++ ++struct omap_panel { ++ struct module *owner; ++ ++ const char *name; ++ ++ int (*init)(struct omap_display *display); ++ void (*cleanup)(struct omap_display *display); ++ int (*remove)(struct omap_display *display); ++ int (*enable)(struct omap_display *display); ++ void (*disable)(struct omap_display *display); ++ int (*suspend)(struct omap_display *display); ++ int (*resume)(struct omap_display *display); ++ int (*run_test)(struct omap_display *display, int test); ++ ++ struct omap_video_timings timings; ++ ++ int acbi; /* ac-bias pin transitions per interrupt */ ++ /* Unit: line clocks */ ++ int acb; /* ac-bias pin frequency */ ++ ++ enum omap_panel_config config; ++ ++ u8 recommended_bpp; ++ ++ void *priv; ++}; ++ ++/* XXX perhaps this should be removed */ ++enum omap_dss_overlay_managers { ++ OMAP_DSS_OVL_MGR_LCD, ++ OMAP_DSS_OVL_MGR_TV, ++}; ++ ++struct omap_overlay_manager; ++ ++enum omap_dss_rotation_type { ++ OMAP_DSS_ROT_DMA = 0, ++ OMAP_DSS_ROT_VRFB = 1, ++}; ++ ++/* clockwise rotation angle */ ++enum omap_dss_rotation_angle { ++ OMAP_DSS_ROT_0 = 0, ++ OMAP_DSS_ROT_90 = 1, ++ OMAP_DSS_ROT_180 = 2, ++ OMAP_DSS_ROT_270 = 3, ++}; ++ ++struct omap_overlay_info { ++ bool enabled; ++ ++ u32 paddr; ++ void __iomem *vaddr; ++ u16 screen_width; ++ u16 width; ++ u16 height; ++ enum omap_color_mode color_mode; ++ u8 rotation; ++ enum omap_dss_rotation_type rotation_type; ++ bool mirror; ++ ++ u16 pos_x; ++ u16 pos_y; ++ u16 out_width; /* if 0, out_width == width */ ++ u16 out_height; /* if 0, out_height == height */ ++ u8 global_alpha; ++ u16 fifo_threshold_low; ++ u16 fifo_threshold_high; ++}; ++ ++enum omap_overlay_caps { ++ OMAP_DSS_OVL_CAP_SCALE = 1 << 0, ++ OMAP_DSS_OVL_CAP_DISPC = 1 << 1, ++}; ++ ++struct omap_overlay { ++ struct kobject kobj; ++ struct list_head list; ++ ++ const char *name; ++ int id; ++ struct omap_overlay_manager *manager; ++ enum omap_color_mode supported_modes; ++ struct omap_overlay_info info; ++ enum omap_overlay_caps caps; ++ ++ int (*set_manager)(struct omap_overlay *ovl, ++ struct omap_overlay_manager *mgr); ++ int (*unset_manager)(struct omap_overlay *ovl); ++ ++ int (*set_overlay_info)(struct omap_overlay *ovl, ++ struct omap_overlay_info *info); ++ void (*get_overlay_info)(struct omap_overlay *ovl, ++ struct omap_overlay_info *info); ++}; ++ ++enum omap_overlay_manager_caps { ++ OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0, ++}; ++ ++struct omap_overlay_manager { ++ struct kobject kobj; ++ struct list_head list; ++ ++ const char *name; ++ int id; ++ enum omap_overlay_manager_caps caps; ++ struct omap_display *display; ++ int num_overlays; ++ struct omap_overlay **overlays; ++ enum omap_display_type supported_displays; ++ ++ int (*set_display)(struct omap_overlay_manager *mgr, ++ struct omap_display *display); ++ int (*unset_display)(struct omap_overlay_manager *mgr); ++ ++ int (*apply)(struct omap_overlay_manager *mgr); ++ ++ void (*set_default_color)(struct omap_overlay_manager *mgr, u32 color); ++ u32 (*get_default_color)(struct omap_overlay_manager *mgr); ++ bool (*get_alpha_blending_status)(struct omap_overlay_manager *mgr); ++ bool (*get_trans_key_status)(struct omap_overlay_manager *mgr); ++ void (*get_trans_key_type_and_value)(struct omap_overlay_manager *mgr, ++ enum omap_dss_color_key_type *type, ++ u32 *trans_key); ++ void (*set_trans_key_type_and_value)(struct omap_overlay_manager *mgr, ++ enum omap_dss_color_key_type type, ++ u32 trans_key); ++ void (*enable_trans_key)(struct omap_overlay_manager *mgr, ++ bool enable); ++ void (*enable_alpha_blending)(struct omap_overlay_manager *mgr, ++ bool enable); ++}; ++ ++enum omap_display_caps { ++ OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0, ++}; ++ ++enum omap_dss_update_mode { ++ OMAP_DSS_UPDATE_DISABLED = 0, ++ OMAP_DSS_UPDATE_AUTO, ++ OMAP_DSS_UPDATE_MANUAL, ++}; ++ ++enum omap_dss_display_state { ++ OMAP_DSS_DISPLAY_UNINITIALIZED = 0, ++ OMAP_DSS_DISPLAY_DISABLED, ++ OMAP_DSS_DISPLAY_ACTIVE, ++ OMAP_DSS_DISPLAY_SUSPENDED, ++}; ++ ++struct omap_display { ++ struct kobject kobj; ++ struct list_head list; ++ ++ /*atomic_t ref_count;*/ ++ int ref_count; ++ /* helper variable for driver suspend/resume */ ++ int activate_after_resume; ++ ++ enum omap_display_type type; ++ const char *name; ++ ++ enum omap_display_caps caps; ++ ++ struct omap_overlay_manager *manager; ++ ++ enum omap_dss_display_state state; ++ ++ struct omap_dss_display_config hw_config; /* board specific data */ ++ struct omap_ctrl *ctrl; /* static common data */ ++ struct omap_panel *panel; /* static common data */ ++ ++ int (*enable)(struct omap_display *display); ++ void (*disable)(struct omap_display *display); ++ ++ int (*suspend)(struct omap_display *display); ++ int (*resume)(struct omap_display *display); ++ ++ void (*get_resolution)(struct omap_display *display, ++ u16 *xres, u16 *yres); ++ int (*get_recommended_bpp)(struct omap_display *display); ++ ++ int (*check_timings)(struct omap_display *display, ++ struct omap_video_timings *timings); ++ void (*set_timings)(struct omap_display *display, ++ struct omap_video_timings *timings); ++ void (*get_timings)(struct omap_display *display, ++ struct omap_video_timings *timings); ++ int (*update)(struct omap_display *display, ++ u16 x, u16 y, u16 w, u16 h); ++ int (*sync)(struct omap_display *display); ++ int (*wait_vsync)(struct omap_display *display); ++ ++ int (*set_update_mode)(struct omap_display *display, ++ enum omap_dss_update_mode); ++ enum omap_dss_update_mode (*get_update_mode) ++ (struct omap_display *display); ++ ++ int (*enable_te)(struct omap_display *display, bool enable); ++ int (*get_te)(struct omap_display *display); ++ ++ u8 (*get_rotate)(struct omap_display *display); ++ int (*set_rotate)(struct omap_display *display, u8 rotate); ++ ++ bool (*get_mirror)(struct omap_display *display); ++ int (*set_mirror)(struct omap_display *display, bool enable); ++ ++ int (*run_test)(struct omap_display *display, int test); ++ int (*memory_read)(struct omap_display *display, ++ void *buf, size_t size, ++ u16 x, u16 y, u16 w, u16 h); ++ ++ void (*configure_overlay)(struct omap_overlay *overlay); ++ ++ int (*set_wss)(struct omap_display *display, u32 wss); ++ u32 (*get_wss)(struct omap_display *display); ++}; ++ ++int omap_dss_get_num_displays(void); ++struct omap_display *omap_dss_get_display(int no); ++void omap_dss_put_display(struct omap_display *display); ++ ++void omap_dss_register_ctrl(struct omap_ctrl *ctrl); ++void omap_dss_unregister_ctrl(struct omap_ctrl *ctrl); ++ ++void omap_dss_register_panel(struct omap_panel *panel); ++void omap_dss_unregister_panel(struct omap_panel *panel); ++ ++int omap_dss_get_num_overlay_managers(void); ++struct omap_overlay_manager *omap_dss_get_overlay_manager(int num); ++ ++int omap_dss_get_num_overlays(void); ++struct omap_overlay *omap_dss_get_overlay(int num); ++ ++typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); ++int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); ++int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); ++ ++void omap_dispc_set_plane_ba0(enum omap_channel, enum omap_plane plane, u32 paddr); ++ ++int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout); ++int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask, ++ unsigned long timeout); ++ ++void omap_dss_maximize_min_bus_tput(void); ++void omap_dss_update_min_bus_tput(void); ++ ++void omap_dss_lock(void); ++void omap_dss_unlock(void); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/dma.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/dma.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/dma.h 2011-09-04 11:32:10.003211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/dma.h 2011-09-04 11:31:05.000000000 +0200 +@@ -144,6 +144,7 @@ + #define OMAP_DMA4_CSSA_U(n) 0 + #define OMAP_DMA4_CDSA_L(n) 0 + #define OMAP_DMA4_CDSA_U(n) 0 ++#define OMAP1_DMA_COLOR(n) 0 + + /*----------------------------------------------------------------------------*/ + +@@ -543,6 +544,11 @@ extern void omap_set_dma_dst_endian_type + extern void omap_set_dma_src_endian_type(int lch, enum end_type etype); + extern int omap_get_dma_index(int lch, int *ei, int *fi); + ++void omap_dma_global_context_save(void); ++void omap_dma_global_context_restore(void); ++ ++extern void omap_dma_disable_irq(int lch); ++ + /* Chaining APIs */ + #ifndef CONFIG_ARCH_OMAP1 + extern int omap_request_dma_chain(int dev_id, const char *dev_name, +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/dss_boottime.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/dss_boottime.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/dss_boottime.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/dss_boottime.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,16 @@ ++#ifndef _DSS_BOOTTIME_H ++#define _DSS_BOOTTIME_H ++ ++int dss_boottime_get_clocks(void); ++void dss_boottime_put_clocks(void); ++int dss_boottime_enable_clocks(void); ++void dss_boottime_disable_clocks(void); ++u32 dss_boottime_get_plane_base(int pidx); ++enum omapfb_color_format dss_boottime_get_plane_format(int pidx); ++int dss_boottime_get_plane_bpp(int plane_idx); ++size_t dss_boottime_get_plane_size(int pidx); ++int dss_boottime_plane_is_enabled(int pdix); ++int dss_boottime_reset(void); ++ ++#endif ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpioexpander.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpioexpander.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpioexpander.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpioexpander.h 2008-12-25 00:26:37.000000000 +0100 +@@ -0,0 +1,35 @@ ++/* ++ * arch/arm/plat-omap/include/mach/gpioexpander.h ++ * ++ * ++ * Copyright (C) 2004 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef __ASM_ARCH_OMAP_GPIOEXPANDER_H ++#define __ASM_ARCH_OMAP_GPIOEXPANDER_H ++ ++/* Function Prototypes for GPIO Expander functions */ ++ ++#ifdef CONFIG_GPIOEXPANDER_OMAP ++int read_gpio_expa(u8 *, int); ++int write_gpio_expa(u8 , int); ++#else ++static inline int read_gpio_expa(u8 *val, int addr) ++{ ++ return 0; ++} ++static inline int write_gpio_expa(u8 val, int addr) ++{ ++ return 0; ++} ++#endif ++ ++#endif /* __ASM_ARCH_OMAP_GPIOEXPANDER_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpio.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpio.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpio.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpio.h 2011-09-04 11:31:05.000000000 +0200 +@@ -71,11 +71,13 @@ + IH_GPIO_BASE + (nr)) + + extern int omap_gpio_init(void); /* Call from board init only */ +-extern void omap2_gpio_prepare_for_retention(void); +-extern void omap2_gpio_resume_after_retention(void); ++extern void omap2_gpio_prepare_for_idle(int power_state); ++extern void omap2_gpio_resume_after_idle(void); + extern void omap_set_gpio_debounce(int gpio, int enable); + extern void omap_set_gpio_debounce_time(int gpio, int enable); +- ++extern void omap3_gpio_save_context(void); ++extern void omap3_gpio_restore_context(void); ++extern void omap3_gpio_restore_pad_context(int restore_oe); + /*-------------------------------------------------------------------------*/ + + /* Wrappers for "new style" GPIO calls, using the new infrastructure +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpio-switch.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpio-switch.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpio-switch.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpio-switch.h 2011-09-04 11:31:05.000000000 +0200 +@@ -24,11 +24,12 @@ + * low -> inactive + * + */ +-#define OMAP_GPIO_SWITCH_TYPE_COVER 0x0000 +-#define OMAP_GPIO_SWITCH_TYPE_CONNECTION 0x0001 +-#define OMAP_GPIO_SWITCH_TYPE_ACTIVITY 0x0002 +-#define OMAP_GPIO_SWITCH_FLAG_INVERTED 0x0001 +-#define OMAP_GPIO_SWITCH_FLAG_OUTPUT 0x0002 ++#define OMAP_GPIO_SWITCH_TYPE_COVER 0x0000 ++#define OMAP_GPIO_SWITCH_TYPE_CONNECTION 0x0001 ++#define OMAP_GPIO_SWITCH_TYPE_ACTIVITY 0x0002 ++#define OMAP_GPIO_SWITCH_FLAG_INVERTED 0x0001 ++#define OMAP_GPIO_SWITCH_FLAG_OUTPUT 0x0002 ++#define OMAP_GPIO_SWITCH_FLAG_OUTPUT_INIT_ACTIVE 0x0004 + + struct omap_gpio_switch { + const char *name; +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpmc.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpmc.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/gpmc.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/gpmc.h 2011-09-04 11:31:05.000000000 +0200 +@@ -62,6 +62,7 @@ + #define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1)) + #define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2)) + #define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3)) ++#define GPMC_CONFIG7_CSVALID (1 << 6) + + /* + * Note that all values in this struct are in nanoseconds, while +@@ -113,6 +114,8 @@ extern int gpmc_cs_request(int cs, unsig + extern void gpmc_cs_free(int cs); + extern int gpmc_cs_set_reserved(int cs, int reserved); + extern int gpmc_cs_reserved(int cs); ++extern void omap3_gpmc_save_context(void); ++extern void omap3_gpmc_restore_context(void); + extern void __init gpmc_init(void); + + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/hardware.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/hardware.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/hardware.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/hardware.h 2011-09-04 11:31:05.000000000 +0200 +@@ -322,6 +322,15 @@ + #include "board-nokia.h" + #endif + ++#ifdef CONFIG_MACH_NOKIA_RX51 ++#include "board-rx51.h" ++#endif ++ ++#ifdef CONFIG_MACH_NOKIA_RX71 ++#include "board-rx71.h" ++#endif ++ ++ + #ifdef CONFIG_MACH_OMAP_2430SDP + #include "board-2430sdp.h" + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/io.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/io.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/io.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/io.h 2011-09-04 11:31:05.000000000 +0200 +@@ -188,12 +188,17 @@ + #define omap_writel(v,a) __raw_writel(v, IO_ADDRESS(a)) + + struct omap_sdrc_params; ++struct omap_opp; + + extern void omap1_map_common_io(void); + extern void omap1_init_common_hw(void); + + extern void omap2_map_common_io(void); +-extern void omap2_init_common_hw(struct omap_sdrc_params *sp); ++extern void omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, ++ struct omap_sdrc_params *sdrc_cs1, ++ struct omap_opp *mpu_opps, ++ struct omap_opp *dsp_opps, ++ struct omap_opp *l3_opps); + + #define __arch_ioremap(p,s,t) omap_ioremap(p,s,t) + #define __arch_iounmap(v) omap_iounmap(v) +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/iommu.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/iommu.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/iommu.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/iommu.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,164 @@ ++/* ++ * omap iommu: main structures ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __MACH_IOMMU_H ++#define __MACH_IOMMU_H ++ ++struct iotlb_entry { ++ u32 da; ++ u32 pa; ++ u32 pgsz, prsvd, valid; ++ union { ++ u16 ap; ++ struct { ++ u32 endian, elsz, mixed; ++ }; ++ }; ++}; ++ ++struct iommu { ++ const char *name; ++ struct module *owner; ++ struct clk *clk; ++ void __iomem *regbase; ++ struct device *dev; ++ ++ unsigned int refcount; ++ struct mutex iommu_lock; /* global for this whole object */ ++ ++ /* ++ * We don't change iopgd for a situation like pgd for a task, ++ * but share it globally for each iommu. ++ */ ++ u32 *iopgd; ++ spinlock_t page_table_lock; /* protect iopgd */ ++ ++ int nr_tlb_entries; ++ ++ struct list_head mmap; ++ struct mutex mmap_lock; /* protect mmap */ ++ ++ int (*isr)(struct iommu *obj); ++ ++ void *ctx; /* iommu context: registres saved area */ ++}; ++ ++struct cr_regs { ++ union { ++ struct { ++ u16 cam_l; ++ u16 cam_h; ++ }; ++ u32 cam; ++ }; ++ union { ++ struct { ++ u16 ram_l; ++ u16 ram_h; ++ }; ++ u32 ram; ++ }; ++}; ++ ++struct iotlb_lock { ++ short base; ++ short vict; ++}; ++ ++/* architecture specific functions */ ++struct iommu_functions { ++ unsigned long version; ++ ++ int (*enable)(struct iommu *obj); ++ void (*disable)(struct iommu *obj); ++ u32 (*fault_isr)(struct iommu *obj, u32 *ra); ++ ++ void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr); ++ void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr); ++ ++ struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e); ++ int (*cr_valid)(struct cr_regs *cr); ++ u32 (*cr_to_virt)(struct cr_regs *cr); ++ void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); ++ ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf); ++ ++ u32 (*get_pte_attr)(struct iotlb_entry *e); ++ ++ void (*save_ctx)(struct iommu *obj); ++ void (*restore_ctx)(struct iommu *obj); ++ ssize_t (*dump_ctx)(struct iommu *obj, char *buf); ++}; ++ ++struct iommu_platform_data { ++ const char *name; ++ const char *clk_name; ++ const int nr_tlb_entries; ++}; ++ ++#include ++ ++/* ++ * utilities for super page(16MB, 1MB, 64KB and 4KB) ++ */ ++ ++#define iopgsz_max(bytes) \ ++ (((bytes) >= SZ_16M) ? SZ_16M : \ ++ ((bytes) >= SZ_1M) ? SZ_1M : \ ++ ((bytes) >= SZ_64K) ? SZ_64K : \ ++ ((bytes) >= SZ_4K) ? SZ_4K : 0) ++ ++#define bytes_to_iopgsz(bytes) \ ++ (((bytes) == SZ_16M) ? MMU_CAM_PGSZ_16M : \ ++ ((bytes) == SZ_1M) ? MMU_CAM_PGSZ_1M : \ ++ ((bytes) == SZ_64K) ? MMU_CAM_PGSZ_64K : \ ++ ((bytes) == SZ_4K) ? MMU_CAM_PGSZ_4K : -1) ++ ++#define iopgsz_to_bytes(iopgsz) \ ++ (((iopgsz) == MMU_CAM_PGSZ_16M) ? SZ_16M : \ ++ ((iopgsz) == MMU_CAM_PGSZ_1M) ? SZ_1M : \ ++ ((iopgsz) == MMU_CAM_PGSZ_64K) ? SZ_64K : \ ++ ((iopgsz) == MMU_CAM_PGSZ_4K) ? SZ_4K : 0) ++ ++#define iopgsz_ok(bytes) (bytes_to_iopgsz(bytes) >= 0) ++ ++/* ++ * global functions ++ */ ++extern u32 iommu_arch_version(void); ++ ++extern void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); ++extern u32 iotlb_cr_to_virt(struct cr_regs *cr); ++ ++extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e); ++extern void flush_iotlb_page(struct iommu *obj, u32 da); ++extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end); ++extern void flush_iotlb_all(struct iommu *obj); ++ ++extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); ++extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); ++ ++extern struct iommu *iommu_get(const char *name); ++extern void iommu_put(struct iommu *obj); ++ ++extern void iommu_save_ctx(struct iommu *obj); ++extern void iommu_restore_ctx(struct iommu *obj); ++ ++extern int install_iommu_arch(const struct iommu_functions *ops); ++extern void uninstall_iommu_arch(const struct iommu_functions *ops); ++ ++extern int foreach_iommu_device(void *data, ++ int (*fn)(struct device *, void *)); ++ ++extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf); ++extern size_t dump_tlb_entries(struct iommu *obj, char *buf); ++ ++#endif /* __MACH_IOMMU_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/iommu2.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/iommu2.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/iommu2.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/iommu2.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,94 @@ ++/* ++ * omap iommu: omap2 architecture specific definitions ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __MACH_IOMMU2_H ++#define __MACH_IOMMU2_H ++ ++/* ++ * MMU Register offsets ++ */ ++#define MMU_REVISION 0x00 ++#define MMU_SYSCONFIG 0x10 ++#define MMU_SYSSTATUS 0x14 ++#define MMU_IRQSTATUS 0x18 ++#define MMU_IRQENABLE 0x1c ++#define MMU_WALKING_ST 0x40 ++#define MMU_CNTL 0x44 ++#define MMU_FAULT_AD 0x48 ++#define MMU_TTB 0x4c ++#define MMU_LOCK 0x50 ++#define MMU_LD_TLB 0x54 ++#define MMU_CAM 0x58 ++#define MMU_RAM 0x5c ++#define MMU_GFLUSH 0x60 ++#define MMU_FLUSH_ENTRY 0x64 ++#define MMU_READ_CAM 0x68 ++#define MMU_READ_RAM 0x6c ++#define MMU_EMU_FAULT_AD 0x70 ++ ++#define MMU_REG_SIZE 256 ++ ++/* ++ * MMU Register bit definitions ++ */ ++#define MMU_LOCK_BASE_SHIFT 10 ++#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) ++#define MMU_LOCK_BASE(x) \ ++ ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) ++ ++#define MMU_LOCK_VICT_SHIFT 4 ++#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) ++#define MMU_LOCK_VICT(x) \ ++ ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) ++ ++#define MMU_CAM_VATAG_SHIFT 12 ++#define MMU_CAM_VATAG_MASK \ ++ ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) ++#define MMU_CAM_P (1 << 3) ++#define MMU_CAM_V (1 << 2) ++#define MMU_CAM_PGSZ_MASK 3 ++#define MMU_CAM_PGSZ_1M (0 << 0) ++#define MMU_CAM_PGSZ_64K (1 << 0) ++#define MMU_CAM_PGSZ_4K (2 << 0) ++#define MMU_CAM_PGSZ_16M (3 << 0) ++ ++#define MMU_RAM_PADDR_SHIFT 12 ++#define MMU_RAM_PADDR_MASK \ ++ ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) ++#define MMU_RAM_ENDIAN_SHIFT 9 ++#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT) ++#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT) ++#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT) ++#define MMU_RAM_ELSZ_SHIFT 7 ++#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT) ++#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT) ++#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT) ++#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT) ++#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT) ++#define MMU_RAM_MIXED_SHIFT 6 ++#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT) ++#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK ++ ++/* ++ * register accessors ++ */ ++static inline u32 iommu_read_reg(struct iommu *obj, size_t offs) ++{ ++ return __raw_readl(obj->regbase + offs); ++} ++ ++static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs) ++{ ++ __raw_writel(val, obj->regbase + offs); ++} ++ ++#endif /* __MACH_IOMMU2_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/iovmm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/iovmm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/iovmm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/iovmm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,94 @@ ++/* ++ * omap iommu: simple virtual address space management ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __IOMMU_MMAP_H ++#define __IOMMU_MMAP_H ++ ++struct iovm_struct { ++ struct iommu *iommu; /* iommu object which this belongs to */ ++ u32 da_start; /* area definition */ ++ u32 da_end; ++ u32 flags; /* IOVMF_: see below */ ++ struct list_head list; /* linked in ascending order */ ++ const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */ ++ void *va; /* mpu side mapped address */ ++}; ++ ++/* ++ * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma) ++ * ++ * lower 16 bit is used for h/w and upper 16 bit is for s/w. ++ */ ++#define IOVMF_SW_SHIFT 16 ++#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT) ++#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1) ++#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL ++ ++/* ++ * iovma: h/w flags derived from cam and ram attribute ++ */ ++#define IOVMF_CAM_MASK (~((1 << 10) - 1)) ++#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK) ++ ++#define IOVMF_PGSZ_MASK (3 << 0) ++#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M ++#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K ++#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K ++#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M ++ ++#define IOVMF_ENDIAN_MASK (1 << 9) ++#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG ++#define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE ++ ++#define IOVMF_ELSZ_MASK (3 << 7) ++#define IOVMF_ELSZ_8 MMU_RAM_ELSZ_8 ++#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16 ++#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32 ++#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE ++ ++#define IOVMF_MIXED_MASK (1 << 6) ++#define IOVMF_MIXED MMU_RAM_MIXED ++ ++/* ++ * iovma: s/w flags, used for mapping and umapping internally. ++ */ ++#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT) ++#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT) ++#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT) ++ ++/* "superpages" is supported just with physically linear pages */ ++#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT)) ++#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT)) ++#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT)) ++ ++#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) ++#define IOVMF_DA_ANON (2 << (4 + IOVMF_SW_SHIFT)) ++#define IOVMF_DA_MASK (3 << (4 + IOVMF_SW_SHIFT)) ++ ++ ++extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); ++extern u32 iommu_vmap(struct iommu *obj, u32 da, ++ const struct sg_table *sgt, u32 flags); ++extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); ++extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, ++ u32 flags); ++extern void iommu_vfree(struct iommu *obj, const u32 da); ++extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, ++ u32 flags); ++extern void iommu_kunmap(struct iommu *obj, u32 da); ++extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, ++ u32 flags); ++extern void iommu_kfree(struct iommu *obj, u32 da); ++ ++extern void *da_to_va(struct iommu *obj, u32 da); ++ ++#endif /* __IOMMU_MMAP_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/irqs.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/irqs.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/irqs.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/irqs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -382,9 +382,14 @@ + + #define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32)) + ++#define INTCPS_NR_MIR_REGS 3 ++#define INTCPS_NR_IRQS 96 ++ + #ifndef __ASSEMBLY__ + extern void omap_init_irq(void); + extern int omap_irq_pending(void); ++void omap3_intc_save_context(void); ++void omap3_intc_restore_context(void); + #endif + + #include +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/isp_user.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/isp_user.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/isp_user.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/isp_user.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,692 @@ ++/* ++ * isp_user.h ++ * ++ * Include file for OMAP ISP module in TI's OMAP3. ++ * ++ * Copyright (C) 2009 Texas Instruments, Inc. ++ * ++ * Contributors: ++ * Mohit Jalori ++ * Sergio Aguirre ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef OMAP_ISP_USER_H ++#define OMAP_ISP_USER_H ++ ++/* ISP Private IOCTLs */ ++#define VIDIOC_PRIVATE_ISP_CCDC_CFG \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct ispccdc_update_config) ++#define VIDIOC_PRIVATE_ISP_PRV_CFG \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct ispprv_update_config) ++#define VIDIOC_PRIVATE_ISP_AEWB_CFG \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct isph3a_aewb_config) ++#define VIDIOC_PRIVATE_ISP_AEWB_REQ \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct isph3a_aewb_data) ++#define VIDIOC_PRIVATE_ISP_HIST_CFG \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct isp_hist_config) ++#define VIDIOC_PRIVATE_ISP_HIST_REQ \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct isp_hist_data) ++#define VIDIOC_PRIVATE_ISP_AF_CFG \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct af_configuration) ++#define VIDIOC_PRIVATE_ISP_AF_REQ \ ++ _IOWR('V', BASE_VIDIOC_PRIVATE + 9, struct isp_af_data) ++ ++/* AE/AWB related structures and flags*/ ++ ++/* Flags for update field */ ++#define REQUEST_STATISTICS (1 << 0) ++#define SET_COLOR_GAINS (1 << 1) ++#define SET_DIGITAL_GAIN (1 << 2) ++#define SET_EXPOSURE (1 << 3) ++#define SET_ANALOG_GAIN (1 << 4) ++ ++#define MAX_FRAME_COUNT 0x0FFF ++#define MAX_FUTURE_FRAMES 10 ++ ++#define MAX_SATURATION_LIM 1023 ++#define MIN_WIN_H 2 ++#define MAX_WIN_H 256 ++#define MIN_WIN_W 6 ++#define MAX_WIN_W 256 ++#define MAX_WINVC 128 ++#define MAX_WINHC 36 ++#define MAX_WINSTART 4095 ++#define MIN_SUB_INC 2 ++#define MAX_SUB_INC 32 ++ ++/* Range Constants */ ++#define AF_IIRSH_MIN 0 ++#define AF_IIRSH_MAX 4094 ++#define AF_PAXEL_HORIZONTAL_COUNT_MIN 0 ++#define AF_PAXEL_HORIZONTAL_COUNT_MAX 35 ++#define AF_PAXEL_VERTICAL_COUNT_MIN 0 ++#define AF_PAXEL_VERTICAL_COUNT_MAX 127 ++#define AF_PAXEL_INCREMENT_MIN 0 ++#define AF_PAXEL_INCREMENT_MAX 14 ++#define AF_PAXEL_HEIGHT_MIN 0 ++#define AF_PAXEL_HEIGHT_MAX 127 ++#define AF_PAXEL_WIDTH_MIN 0 ++#define AF_PAXEL_WIDTH_MAX 127 ++#define AF_PAXEL_HZSTART_MIN 2 ++#define AF_PAXEL_HZSTART_MAX 4094 ++ ++#define AF_PAXEL_VTSTART_MIN 0 ++#define AF_PAXEL_VTSTART_MAX 4095 ++#define AF_THRESHOLD_MAX 255 ++#define AF_COEF_MAX 4095 ++#define AF_PAXEL_SIZE 48 ++ ++/** ++ * struct isph3a_aewb_config - AE AWB configuration reset values. ++ * saturation_limit: Saturation limit. ++ * @win_height: Window Height. Range 2 - 256, even values only. ++ * @win_width: Window Width. Range 6 - 256, even values only. ++ * @ver_win_count: Vertical Window Count. Range 1 - 128. ++ * @hor_win_count: Horizontal Window Count. Range 1 - 36. ++ * @ver_win_start: Vertical Window Start. Range 0 - 4095. ++ * @hor_win_start: Horizontal Window Start. Range 0 - 4095. ++ * @blk_ver_win_start: Black Vertical Windows Start. Range 0 - 4095. ++ * @blk_win_height: Black Window Height. Range 2 - 256, even values only. ++ * @subsample_ver_inc: Subsample Vertical points increment Range 2 - 32, even ++ * values only. ++ * @subsample_hor_inc: Subsample Horizontal points increment Range 2 - 32, even ++ * values only. ++ * @alaw_enable: AEW ALAW EN flag. ++ * @aewb_enable: AE AWB stats generation EN flag. ++ */ ++struct isph3a_aewb_config { ++ __u16 saturation_limit; ++ __u16 win_height; ++ __u16 win_width; ++ __u16 ver_win_count; ++ __u16 hor_win_count; ++ __u16 ver_win_start; ++ __u16 hor_win_start; ++ __u16 blk_ver_win_start; ++ __u16 blk_win_height; ++ __u16 subsample_ver_inc; ++ __u16 subsample_hor_inc; ++ __u8 alaw_enable; ++ __u8 aewb_enable; ++}; ++ ++/** ++ * struct isph3a_aewb_data - Structure of data sent to or received from user ++ * @h3a_aewb_statistics_buf: Pointer to pass to user. ++ * @shutter: Shutter speed. ++ * @gain: Sensor analog Gain. ++ * @shutter_cap: Shutter speed for capture. ++ * @gain_cap: Sensor Gain for capture. ++ * @dgain: White balance digital gain. ++ * @wb_gain_b: White balance color gain blue. ++ * @wb_gain_r: White balance color gain red. ++ * @wb_gain_gb: White balance color gain green blue. ++ * @wb_gain_gr: White balance color gain green red. ++ * @frame_number: Frame number of requested stats. ++ * @curr_frame: Current frame number being processed. ++ * @update: Bitwise flags to update parameters. ++ * @ts: Timestamp of returned framestats. ++ * @field_count: Sequence number of returned framestats. ++ */ ++struct isph3a_aewb_data { ++ void *h3a_aewb_statistics_buf; ++ __u32 shutter; ++ __u16 gain; ++ __u32 shutter_cap; ++ __u16 gain_cap; ++ __u16 dgain; ++ __u16 wb_gain_b; ++ __u16 wb_gain_r; ++ __u16 wb_gain_gb; ++ __u16 wb_gain_gr; ++ __u16 frame_number; ++ __u16 curr_frame; ++ __u8 update; ++ struct timeval ts; ++ __u32 config_counter; ++ unsigned long field_count; ++}; ++ ++ ++/* Histogram related structs */ ++ ++/* Flags for number of bins */ ++#define HIST_BINS_32 0 ++#define HIST_BINS_64 1 ++#define HIST_BINS_128 2 ++#define HIST_BINS_256 3 ++#define HIST_MEM_SIZE_BINS(n) ((n)*16) ++ ++#define HIST_MEM_SIZE 1024 ++#define HIST_MIN_REGIONS 1 ++#define HIST_MAX_REGIONS 4 ++#define HIST_MAX_WB_GAIN 255 ++#define HIST_MIN_WB_GAIN 0 ++#define HIST_MAX_BIT_WIDTH 14 ++#define HIST_MIN_BIT_WIDTH 8 ++#define HIST_MAX_BUFF 5 ++#define HIST_MAX_WG 4 ++ ++/* Source */ ++#define HIST_SOURCE_CCDC 0 ++#define HIST_SOURCE_MEM 1 ++ ++/* CFA pattern */ ++#define HIST_CFA_BAYER 0 ++#define HIST_CFA_FOVEONX3 1 ++ ++struct isp_hist_config { ++ __u8 enable; ++ __u8 source; /* CCDC or memory */ ++ __u8 input_bit_width; /* Needed o know the size per pixel */ ++ __u8 num_acc_frames; /* Num of frames to be processed and accumulated ++ for each generated histogram frame */ ++ __u8 hist_h_v_info; /* frame-input width and height if source is ++ * memory */ ++ __u16 hist_radd; /* frame-input address in memory */ ++ __u16 hist_radd_off; /* line-offset for frame-input */ ++ __u16 hist_bins; /* number of bins: 32, 64, 128, or 256 */ ++ __u8 cfa; /* BAYER or FOVEON X3 */ ++ __u8 wg[HIST_MAX_WG]; /* White Balance Gain */ ++ __u8 num_regions; /* number of regions to be configured */ ++ __u32 reg_hor[HIST_MAX_REGIONS]; /* Regions size and position */ ++ __u32 reg_ver[HIST_MAX_REGIONS]; /* Regions size and position */ ++}; ++ ++struct isp_hist_data { ++ __u32 *hist_statistics_buf; /* Pointer to pass to user */ ++ __u8 update; ++ __u16 frame_number; ++ __u16 curr_frame; ++ __u32 config_counter; ++ struct timeval ts; ++}; ++ ++/* Auto Focus related structs */ ++ ++#define AF_NUMBER_OF_COEF 11 ++ ++/* Flags for update field */ ++#define REQUEST_STATISTICS (1 << 0) ++#define LENS_DESIRED_POSITION (1 << 1) ++#define LENS_CURRENT_POSITION (1 << 2) ++ ++/** ++ * struct isp_af_xtrastats - Extra statistics related to AF generated stats. ++ * @ts: Timestamp when the frame gets delivered to the user. ++ * @field_count: Field count of the frame delivered to the user. ++ * @lens_position: Lens position when the stats are being generated. ++ */ ++struct isp_af_xtrastats { ++ struct timeval ts; ++ unsigned long field_count; ++ __u16 lens_position; /* deprecated */ ++}; ++ ++/** ++ * struct isp_af_data - AF statistics data to transfer between driver and user. ++ * @af_statistics_buf: Pointer to pass to user. ++ * @lens_current_position: Read value of lens absolute position. ++ * @desired_lens_direction: Lens desired location. ++ * @update: Bitwise flags to update parameters. ++ * @frame_number: Data for which frame is desired/given. ++ * @curr_frame: Current frame number being processed by AF module. ++ * @xtrastats: Extra statistics structure. ++ */ ++struct isp_af_data { ++ void *af_statistics_buf; ++ __u16 lens_current_position; /* deprecated */ ++ __u16 desired_lens_direction; /* deprecated */ ++ __u16 update; ++ __u16 frame_number; ++ __u16 curr_frame; ++ __u32 config_counter; ++ struct isp_af_xtrastats xtrastats; ++}; ++ ++/* enum used for status of specific feature */ ++enum af_alaw_enable { ++ H3A_AF_ALAW_DISABLE = 0, ++ H3A_AF_ALAW_ENABLE = 1 ++}; ++ ++enum af_hmf_enable { ++ H3A_AF_HMF_DISABLE = 0, ++ H3A_AF_HMF_ENABLE = 1 ++}; ++ ++enum af_config_flag { ++ H3A_AF_CFG_DISABLE = 0, ++ H3A_AF_CFG_ENABLE = 1 ++}; ++ ++enum af_mode { ++ ACCUMULATOR_SUMMED = 0, ++ ACCUMULATOR_PEAK = 1 ++}; ++ ++/* Red, Green, and blue pixel location in the AF windows */ ++enum rgbpos { ++ GR_GB_BAYER = 0, /* GR and GB as Bayer pattern */ ++ RG_GB_BAYER = 1, /* RG and GB as Bayer pattern */ ++ GR_BG_BAYER = 2, /* GR and BG as Bayer pattern */ ++ RG_BG_BAYER = 3, /* RG and BG as Bayer pattern */ ++ GG_RB_CUSTOM = 4, /* GG and RB as custom pattern */ ++ RB_GG_CUSTOM = 5 /* RB and GG as custom pattern */ ++}; ++ ++/* Contains the information regarding the Horizontal Median Filter */ ++struct af_hmf { ++ enum af_hmf_enable enable; /* Status of Horizontal Median Filter */ ++ unsigned int threshold; /* Threshhold Value for Horizontal Median ++ * Filter ++ */ ++}; ++ ++/* Contains the information regarding the IIR Filters */ ++struct af_iir { ++ unsigned int hz_start_pos; /* IIR Start Register Value */ ++ int coeff_set0[AF_NUMBER_OF_COEF]; /* ++ * IIR Filter Coefficient for ++ * Set 0 ++ */ ++ int coeff_set1[AF_NUMBER_OF_COEF]; /* ++ * IIR Filter Coefficient for ++ * Set 1 ++ */ ++}; ++ ++/* Contains the information regarding the Paxels Structure in AF Engine */ ++struct af_paxel { ++ unsigned int width; /* Width of the Paxel */ ++ unsigned int height; /* Height of the Paxel */ ++ unsigned int hz_start; /* Horizontal Start Position */ ++ unsigned int vt_start; /* Vertical Start Position */ ++ unsigned int hz_cnt; /* Horizontal Count */ ++ unsigned int vt_cnt; /* vertical Count */ ++ unsigned int line_incr; /* Line Increment */ ++}; ++/* Contains the parameters required for hardware set up of AF Engine */ ++struct af_configuration { ++ enum af_alaw_enable alaw_enable; /*ALWAW status */ ++ struct af_hmf hmf_config; /*HMF configurations */ ++ enum rgbpos rgb_pos; /*RGB Positions */ ++ struct af_iir iir_config; /*IIR filter configurations */ ++ struct af_paxel paxel_config; /*Paxel parameters */ ++ enum af_mode mode; /*Accumulator mode */ ++ enum af_config_flag af_config; /*Flag indicates Engine is configured */ ++}; ++ ++/* ISP CCDC structs */ ++ ++/* Abstraction layer CCDC configurations */ ++#define ISP_ABS_CCDC_ALAW (1 << 0) ++#define ISP_ABS_CCDC_LPF (1 << 1) ++#define ISP_ABS_CCDC_BLCLAMP (1 << 2) ++#define ISP_ABS_CCDC_BCOMP (1 << 3) ++#define ISP_ABS_CCDC_FPC (1 << 4) ++#define ISP_ABS_CCDC_CULL (1 << 5) ++#define ISP_ABS_CCDC_COLPTN (1 << 6) ++#define ISP_ABS_CCDC_CONFIG_LSC (1 << 7) ++#define ISP_ABS_TBL_LSC (1 << 8) ++ ++#define RGB_MAX 3 ++ ++/* Enumeration constants for Alaw input width */ ++enum alaw_ipwidth { ++ ALAW_BIT12_3 = 0x3, ++ ALAW_BIT11_2 = 0x4, ++ ALAW_BIT10_1 = 0x5, ++ ALAW_BIT9_0 = 0x6 ++}; ++ ++/* Enumeration constants for Video Port */ ++enum vpin { ++ BIT12_3 = 3, ++ BIT11_2 = 4, ++ BIT10_1 = 5, ++ BIT9_0 = 6 ++}; ++ ++enum vpif_freq { ++ PIXCLKBY2, ++ PIXCLKBY3_5, ++ PIXCLKBY4_5, ++ PIXCLKBY5_5, ++ PIXCLKBY6_5 ++}; ++ ++/** ++ * struct ispccdc_lsc_config - Structure for LSC configuration. ++ * @offset: Table Offset of the gain table. ++ * @gain_mode_n: Vertical dimension of a paxel in LSC configuration. ++ * @gain_mode_m: Horizontal dimension of a paxel in LSC configuration. ++ * @gain_format: Gain table format. ++ * @fmtsph: Start pixel horizontal from start of the HS sync pulse. ++ * @fmtlnh: Number of pixels in horizontal direction to use for the data ++ * reformatter. ++ * @fmtslv: Start line from start of VS sync pulse for the data reformatter. ++ * @fmtlnv: Number of lines in vertical direction for the data reformatter. ++ * @initial_x: X position, in pixels, of the first active pixel in reference ++ * to the first active paxel. Must be an even number. ++ * @initial_y: Y position, in pixels, of the first active pixel in reference ++ * to the first active paxel. Must be an even number. ++ * @size: Size of LSC gain table. Filled when loaded from userspace. ++ */ ++struct ispccdc_lsc_config { ++ __u16 offset; ++ __u8 gain_mode_n; ++ __u8 gain_mode_m; ++ __u8 gain_format; ++ __u16 fmtsph; ++ __u16 fmtlnh; ++ __u16 fmtslv; ++ __u16 fmtlnv; ++ __u8 initial_x; ++ __u8 initial_y; ++ __u32 size; ++}; ++ ++/** ++ * struct ispccdc_bclamp - Structure for Optical & Digital black clamp subtract ++ * @obgain: Optical black average gain. ++ * @obstpixel: Start Pixel w.r.t. HS pulse in Optical black sample. ++ * @oblines: Optical Black Sample lines. ++ * @oblen: Optical Black Sample Length. ++ * @dcsubval: Digital Black Clamp subtract value. ++ */ ++struct ispccdc_bclamp { ++ __u8 obgain; ++ __u8 obstpixel; ++ __u8 oblines; ++ __u8 oblen; ++ __u16 dcsubval; ++}; ++ ++/** ++ * ispccdc_fpc - Structure for FPC ++ * @fpnum: Number of faulty pixels to be corrected in the frame. ++ * @fpcaddr: Memory address of the FPC Table ++ */ ++struct ispccdc_fpc { ++ __u16 fpnum; ++ __u32 fpcaddr; ++}; ++ ++/** ++ * ispccdc_blcomp - Structure for Black Level Compensation parameters. ++ * @b_mg: B/Mg pixels. 2's complement. -128 to +127. ++ * @gb_g: Gb/G pixels. 2's complement. -128 to +127. ++ * @gr_cy: Gr/Cy pixels. 2's complement. -128 to +127. ++ * @r_ye: R/Ye pixels. 2's complement. -128 to +127. ++ */ ++struct ispccdc_blcomp { ++ __u8 b_mg; ++ __u8 gb_g; ++ __u8 gr_cy; ++ __u8 r_ye; ++}; ++ ++/** ++ * struct ispccdc_vp - Structure for Video Port parameters ++ * @bitshift_sel: Video port input select. 3 - bits 12-3, 4 - bits 11-2, ++ * 5 - bits 10-1, 6 - bits 9-0. ++ * @freq_sel: Video port data ready frequency. 1 - 1/3.5, 2 - 1/4.5, ++ * 3 - 1/5.5, 4 - 1/6.5. ++ */ ++struct ispccdc_vp { ++ enum vpin bitshift_sel; ++ enum vpif_freq freq_sel; ++}; ++ ++/** ++ * ispccdc_culling - Structure for Culling parameters. ++ * @v_pattern: Vertical culling pattern. ++ * @h_odd: Horizontal Culling pattern for odd lines. ++ * @h_even: Horizontal Culling pattern for even lines. ++ */ ++struct ispccdc_culling { ++ __u8 v_pattern; ++ __u16 h_odd; ++ __u16 h_even; ++}; ++ ++/** ++ * ispccdc_update_config - Structure for CCDC configuration. ++ * @update: Specifies which CCDC registers should be updated. ++ * @flag: Specifies which CCDC functions should be enabled. ++ * @alawip: Enable/Disable A-Law compression. ++ * @bclamp: Black clamp control register. ++ * @blcomp: Black level compensation value for RGrGbB Pixels. 2's complement. ++ * @fpc: Number of faulty pixels corrected in the frame, address of FPC table. ++ * @cull: Cull control register. ++ * @colptn: Color pattern of the sensor. ++ * @lsc: Pointer to LSC gain table. ++ */ ++struct ispccdc_update_config { ++ __u16 update; ++ __u16 flag; ++ enum alaw_ipwidth alawip; ++ struct ispccdc_bclamp *bclamp; ++ struct ispccdc_blcomp *blcomp; ++ struct ispccdc_fpc *fpc; ++ struct ispccdc_lsc_config *lsc_cfg; ++ struct ispccdc_culling *cull; ++ __u32 colptn; ++ __u8 *lsc; ++}; ++ ++/* Preview configuration */ ++ ++/*Abstraction layer preview configurations*/ ++#define ISP_ABS_PREV_LUMAENH (1 << 0) ++#define ISP_ABS_PREV_INVALAW (1 << 1) ++#define ISP_ABS_PREV_HRZ_MED (1 << 2) ++#define ISP_ABS_PREV_CFA (1 << 3) ++#define ISP_ABS_PREV_CHROMA_SUPP (1 << 4) ++#define ISP_ABS_PREV_WB (1 << 5) ++#define ISP_ABS_PREV_BLKADJ (1 << 6) ++#define ISP_ABS_PREV_RGB2RGB (1 << 7) ++#define ISP_ABS_PREV_COLOR_CONV (1 << 8) ++#define ISP_ABS_PREV_YC_LIMIT (1 << 9) ++#define ISP_ABS_PREV_DEFECT_COR (1 << 10) ++#define ISP_ABS_PREV_GAMMABYPASS (1 << 11) ++#define ISP_ABS_TBL_NF (1 << 12) ++#define ISP_ABS_TBL_REDGAMMA (1 << 13) ++#define ISP_ABS_TBL_GREENGAMMA (1 << 14) ++#define ISP_ABS_TBL_BLUEGAMMA (1 << 15) ++ ++#define ISPPRV_NF_TBL_SIZE 64 ++#define ISPPRV_CFA_TBL_SIZE 576 ++#define ISPPRV_GAMMA_TBL_SIZE 1024 ++#define ISPPRV_YENH_TBL_SIZE 128 ++ ++/** ++ * struct ispprev_hmed - Structure for Horizontal Median Filter. ++ * @odddist: Distance between consecutive pixels of same color in the odd line. ++ * @evendist: Distance between consecutive pixels of same color in the even ++ * line. ++ * @thres: Horizontal median filter threshold. ++ */ ++struct ispprev_hmed { ++ __u8 odddist; ++ __u8 evendist; ++ __u8 thres; ++}; ++ ++/* ++ * Enumeration for CFA Formats supported by preview ++ */ ++enum cfa_fmt { ++ CFAFMT_BAYER, CFAFMT_SONYVGA, CFAFMT_RGBFOVEON, ++ CFAFMT_DNSPL, CFAFMT_HONEYCOMB, CFAFMT_RRGGBBFOVEON ++}; ++ ++/** ++ * struct ispprev_cfa - Structure for CFA Inpterpolation. ++ * @cfafmt: CFA Format Enum value supported by preview. ++ * @cfa_gradthrs_vert: CFA Gradient Threshold - Vertical. ++ * @cfa_gradthrs_horz: CFA Gradient Threshold - Horizontal. ++ * @cfa_table: Pointer to the CFA table. ++ */ ++struct ispprev_cfa { ++ enum cfa_fmt cfafmt; ++ __u8 cfa_gradthrs_vert; ++ __u8 cfa_gradthrs_horz; ++ __u32 *cfa_table; ++}; ++ ++/** ++ * struct ispprev_csup - Structure for Chrominance Suppression. ++ * @gain: Gain. ++ * @thres: Threshold. ++ * @hypf_en: Flag to enable/disable the High Pass Filter. ++ */ ++struct ispprev_csup { ++ __u8 gain; ++ __u8 thres; ++ __u8 hypf_en; ++}; ++ ++/** ++ * struct ispprev_wbal - Structure for White Balance. ++ * @dgain: Digital gain (U10Q8). ++ * @coef3: White balance gain - COEF 3 (U8Q5). ++ * @coef2: White balance gain - COEF 2 (U8Q5). ++ * @coef1: White balance gain - COEF 1 (U8Q5). ++ * @coef0: White balance gain - COEF 0 (U8Q5). ++ */ ++struct ispprev_wbal { ++ __u16 dgain; ++ __u8 coef3; ++ __u8 coef2; ++ __u8 coef1; ++ __u8 coef0; ++}; ++ ++/** ++ * struct ispprev_blkadj - Structure for Black Adjustment. ++ * @red: Black level offset adjustment for Red in 2's complement format ++ * @green: Black level offset adjustment for Green in 2's complement format ++ * @blue: Black level offset adjustment for Blue in 2's complement format ++ */ ++struct ispprev_blkadj { ++ /*Black level offset adjustment for Red in 2's complement format */ ++ __u8 red; ++ /*Black level offset adjustment for Green in 2's complement format */ ++ __u8 green; ++ /* Black level offset adjustment for Blue in 2's complement format */ ++ __u8 blue; ++}; ++ ++/** ++ * struct ispprev_rgbtorgb - Structure for RGB to RGB Blending. ++ * @matrix: Blending values(S12Q8 format) ++ * [RR] [GR] [BR] ++ * [RG] [GG] [BG] ++ * [RB] [GB] [BB] ++ * @offset: Blending offset value for R,G,B in 2's complement integer format. ++ */ ++struct ispprev_rgbtorgb { ++ __u16 matrix[3][3]; ++ __u16 offset[3]; ++}; ++ ++/** ++ * struct ispprev_csc - Structure for Color Space Conversion from RGB-YCbYCr ++ * @matrix: Color space conversion coefficients(S10Q8) ++ * [CSCRY] [CSCGY] [CSCBY] ++ * [CSCRCB] [CSCGCB] [CSCBCB] ++ * [CSCRCR] [CSCGCR] [CSCBCR] ++ * @offset: CSC offset values for Y offset, CB offset and CR offset respectively ++ */ ++struct ispprev_csc { ++ __u16 matrix[RGB_MAX][RGB_MAX]; ++ __s16 offset[RGB_MAX]; ++}; ++ ++/** ++ * struct ispprev_yclimit - Structure for Y, C Value Limit. ++ * @minC: Minimum C value ++ * @maxC: Maximum C value ++ * @minY: Minimum Y value ++ * @maxY: Maximum Y value ++ */ ++struct ispprev_yclimit { ++ __u8 minC; ++ __u8 maxC; ++ __u8 minY; ++ __u8 maxY; ++}; ++ ++/** ++ * struct ispprev_dcor - Structure for Defect correction. ++ * @couplet_mode_en: Flag to enable or disable the couplet dc Correction in NF ++ * @detect_correct: Thresholds for correction bit 0:10 detect 16:25 correct ++ */ ++struct ispprev_dcor { ++ __u8 couplet_mode_en; ++ __u32 detect_correct[4]; ++}; ++ ++/** ++ * struct ispprev_nf - Structure for Noise Filter ++ * @spread: Spread value to be used in Noise Filter ++ * @table: Pointer to the Noise Filter table ++ */ ++struct ispprev_nf { ++ __u8 spread; ++ __u32 table[ISPPRV_NF_TBL_SIZE]; ++}; ++ ++/** ++ * struct ispprv_update_config - Structure for Preview Configuration (user). ++ * @update: Specifies which ISP Preview registers should be updated. ++ * @flag: Specifies which ISP Preview functions should be enabled. ++ * @yen: Pointer to luma enhancement table. ++ * @shading_shift: 3bit value of shift used in shading compensation. ++ * @prev_hmed: Pointer to structure containing the odd and even distance. ++ * between the pixels in the image along with the filter threshold. ++ * @prev_cfa: Pointer to structure containing the CFA interpolation table, CFA. ++ * format in the image, vertical and horizontal gradient threshold. ++ * @csup: Pointer to Structure for Chrominance Suppression coefficients. ++ * @prev_wbal: Pointer to structure for White Balance. ++ * @prev_blkadj: Pointer to structure for Black Adjustment. ++ * @rgb2rgb: Pointer to structure for RGB to RGB Blending. ++ * @prev_csc: Pointer to structure for Color Space Conversion from RGB-YCbYCr. ++ * @yclimit: Pointer to structure for Y, C Value Limit. ++ * @prev_dcor: Pointer to structure for defect correction. ++ * @prev_nf: Pointer to structure for Noise Filter ++ * @red_gamma: Pointer to red gamma correction table. ++ * @green_gamma: Pointer to green gamma correction table. ++ * @blue_gamma: Pointer to blue gamma correction table. ++ */ ++struct ispprv_update_config { ++ __u16 update; ++ __u16 flag; ++ void *yen; ++ __u32 shading_shift; ++ struct ispprev_hmed *prev_hmed; ++ struct ispprev_cfa *prev_cfa; ++ struct ispprev_csup *csup; ++ struct ispprev_wbal *prev_wbal; ++ struct ispprev_blkadj *prev_blkadj; ++ struct ispprev_rgbtorgb *rgb2rgb; ++ struct ispprev_csc *prev_csc; ++ struct ispprev_yclimit *yclimit; ++ struct ispprev_dcor *prev_dcor; ++ struct ispprev_nf *prev_nf; ++ __u32 *red_gamma; ++ __u32 *green_gamma; ++ __u32 *blue_gamma; ++}; ++ ++#endif /* OMAP_ISP_USER_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/lcd_mipid.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/lcd_mipid.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/lcd_mipid.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/lcd_mipid.h 2011-09-04 11:31:05.000000000 +0200 +@@ -16,7 +16,15 @@ enum mipid_test_result { + struct mipid_platform_data { + int nreset_gpio; + int data_lines; ++ ++ int bc_connected : 1; /* Backlight ctrl pin */ ++ int kbbc_connected : 1; /* Keyboard backlight ctrl pin */ ++ + void (*shutdown)(struct mipid_platform_data *pdata); ++ void (*set_bklight_level)(struct mipid_platform_data *pdata, ++ int level); ++ int (*get_bklight_level)(struct mipid_platform_data *pdata); ++ int (*get_bklight_max)(struct mipid_platform_data *pdata); + }; + + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/mcbsp.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/mcbsp.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/mcbsp.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/mcbsp.h 2011-09-04 11:31:05.000000000 +0200 +@@ -49,7 +49,9 @@ + + #define OMAP34XX_MCBSP1_BASE 0x48074000 + #define OMAP34XX_MCBSP2_BASE 0x49022000 ++#define OMAP34XX_MCBSP2_ST_BASE 0x49028000 + #define OMAP34XX_MCBSP3_BASE 0x49024000 ++#define OMAP34XX_MCBSP3_ST_BASE 0x4902A000 + #define OMAP34XX_MCBSP4_BASE 0x49026000 + #define OMAP34XX_MCBSP5_BASE 0x48096000 + +@@ -130,8 +132,22 @@ + #define OMAP_MCBSP_REG_XCERG 0x74 + #define OMAP_MCBSP_REG_XCERH 0x78 + #define OMAP_MCBSP_REG_SYSCON 0x8C ++#define OMAP_MCBSP_REG_THRSH2 0x90 ++#define OMAP_MCBSP_REG_THRSH1 0x94 ++#define OMAP_MCBSP_REG_IRQST 0xA0 ++#define OMAP_MCBSP_REG_IRQEN 0xA4 ++#define OMAP_MCBSP_REG_WAKEUPEN 0xA8 + #define OMAP_MCBSP_REG_XCCR 0xAC + #define OMAP_MCBSP_REG_RCCR 0xB0 ++#define OMAP_MCBSP_REG_SSELCR 0xBC ++ ++#define OMAP_ST_REG_REV 0x00 ++#define OMAP_ST_REG_SYSCONFIG 0x10 ++#define OMAP_ST_REG_IRQSTATUS 0x18 ++#define OMAP_ST_REG_IRQENABLE 0x1C ++#define OMAP_ST_REG_SGAINCR 0x24 ++#define OMAP_ST_REG_SFIRCR 0x28 ++#define OMAP_ST_REG_SSELCR 0x2C + + #define AUDIO_MCBSP_DATAWRITE (OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1) + #define AUDIO_MCBSP_DATAREAD (OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1) +@@ -245,8 +261,50 @@ + #define RDISABLE 0x0001 + + /********************** McBSP SYSCONFIG bit definitions ********************/ ++#define CLOCKACTIVITY(value) ((value)<<8) ++#define SIDLEMODE(value) ((value)<<3) ++#define ENAWAKEUP 0x0004 + #define SOFTRST 0x0002 + ++/********************** McBSP IRQSTATUS bit definitions ********************/ ++#define IRQST_XRDY (1<<10) ++#define IRQST_RRDY (1<<3) ++ ++/********************** McBSP WAKEUPEN bit definitions *********************/ ++#define XEMPTYEOFEN 0x4000 ++#define XRDYEN 0x0400 ++#define XEOFEN 0x0200 ++#define XFSXEN 0x0100 ++#define XSYNCERREN 0x0080 ++#define RRDYEN 0x0008 ++#define REOFEN 0x0004 ++#define RFSREN 0x0002 ++#define RSYNCERREN 0x0001 ++#define WAKEUPEN_ALL (XRDYEN | RRDYEN) ++ ++/********************** McBSP SSELCR bit definitions ***********************/ ++#define SIDETONEEN 0x0400 ++ ++/********************** McBSP Sidetone SYSCONFIG bit definitions ***********/ ++#define ST_AUTOIDLE 0x0001 ++ ++/********************** McBSP Sidetone SGAINCR bit definitions *************/ ++#define ST_CH1GAIN(value) ((value<<16)) /* Bits 16:31 */ ++#define ST_CH0GAIN(value) (value) /* Bits 0:15 */ ++ ++/********************** McBSP Sidetone SFIRCR bit definitions **************/ ++#define ST_FIRCOEFF(value) (value) /* Bits 0:15 */ ++ ++/********************** McBSP Sidetone SSELCR bit definitions **************/ ++#define ST_COEFFWRDONE 0x0004 ++#define ST_COEFFWREN 0x0002 ++#define ST_SIDETONEEN 0x0001 ++ ++/********************** McBSP DMA operating modes **************************/ ++#define MCBSP_DMA_MODE_ELEMENT 0 ++#define MCBSP_DMA_MODE_THRESHOLD 1 ++#define MCBSP_DMA_MODE_FRAME 2 ++ + /* we don't do multichannel for now */ + struct omap_mcbsp_reg_cfg { + u16 spcr2; +@@ -340,7 +398,23 @@ struct omap_mcbsp_platform_data { + u8 dma_rx_sync, dma_tx_sync; + u16 rx_irq, tx_irq; + struct omap_mcbsp_ops *ops; +- char const *clk_name; ++ char const **clk_names; ++ int num_clks; ++#ifdef CONFIG_ARCH_OMAP34XX ++ /* Sidetone block for McBSP 2 and 3 */ ++ unsigned long phys_base_st; ++ u16 buffer_size; ++#endif ++}; ++ ++struct omap_mcbsp_st_data { ++ void __iomem *io_base_st; ++ int enabled; ++ int running; ++ s16 taps[128]; /* Sidetone filter coefficients */ ++ int nr_taps; /* Number of filter coefficients in use */ ++ s16 ch0gain; ++ s16 ch1gain; + }; + + struct omap_mcbsp { +@@ -372,7 +446,15 @@ struct omap_mcbsp { + /* Protect the field .free, while checking if the mcbsp is in use */ + spinlock_t lock; + struct omap_mcbsp_platform_data *pdata; +- struct clk *clk; ++ struct clk **clks; ++ int num_clks; ++ ++#ifdef CONFIG_ARCH_OMAP34XX ++ struct omap_mcbsp_st_data *st_data; ++ int dma_op_mode; ++ u16 max_tx_thres; ++ u16 max_rx_thres; ++#endif + }; + extern struct omap_mcbsp **mcbsp_ptr; + extern int omap_mcbsp_count; +@@ -381,10 +463,27 @@ int omap_mcbsp_init(void); + void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, + int size); + void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); ++#ifdef CONFIG_ARCH_OMAP34XX ++void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold); ++void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold); ++u16 omap_mcbsp_get_max_tx_threshold(unsigned int id); ++u16 omap_mcbsp_get_max_rx_threshold(unsigned int id); ++int omap_mcbsp_get_dma_op_mode(unsigned int id); ++#else ++static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) ++{ } ++static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) ++{ } ++static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; } ++static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; } ++static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; } ++#endif + int omap_mcbsp_request(unsigned int id); + void omap_mcbsp_free(unsigned int id); + void omap_mcbsp_start(unsigned int id); + void omap_mcbsp_stop(unsigned int id); ++void omap_mcbsp_xmit_enable(unsigned int id, u8 enable); ++void omap_mcbsp_recv_enable(unsigned int id, u8 enable); + void omap_mcbsp_xmit_word(unsigned int id, u32 word); + u32 omap_mcbsp_recv_word(unsigned int id); + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/mmc.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/mmc.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/mmc.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/mmc.h 2011-09-04 11:31:05.000000000 +0200 +@@ -57,6 +57,12 @@ struct omap_mmc_platform_data { + int (*suspend)(struct device *dev, int slot); + int (*resume)(struct device *dev, int slot); + ++ /* Return context loss count due to PM states changing */ ++ int (*get_context_loss_count)(struct device *dev); ++ ++ /* set/drop DVFS/PM constraints */ ++ void (*set_pm_constraints)(struct device *dev, int on); ++ + u64 dma_mask; + + struct omap_mmc_slot_data { +@@ -77,6 +83,13 @@ struct omap_mmc_platform_data { + + /* use the internal clock */ + unsigned internal_clock:1; ++ ++ /* Try to sleep or power off when possible */ ++ unsigned power_saving:1; ++ ++ /* MMC host capabilities */ ++ unsigned long caps; ++ + s16 power_pin; + + int switch_pin; /* gpio (card detect) */ +@@ -85,6 +98,8 @@ struct omap_mmc_platform_data { + int (* set_bus_mode)(struct device *dev, int slot, int bus_mode); + int (* set_power)(struct device *dev, int slot, int power_on, int vdd); + int (* get_ro)(struct device *dev, int slot); ++ int (*set_sleep)(struct device *dev, int slot, int sleep, ++ int vdd, int cardsleep); + + /* return MMC cover switch state, can be NULL if not supported. + * +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/mux.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/mux.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/mux.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/mux.h 2011-09-04 11:31:05.000000000 +0200 +@@ -795,7 +795,11 @@ enum omap34xx_index { + AF6_34XX_GPIO140_UP, + AE6_34XX_GPIO141, + AF5_34XX_GPIO142, +- AE5_34XX_GPIO143 ++ AE5_34XX_GPIO143, ++ ++ /* OMAP3 SDRC CKE signals to SDR/DDR ram chips */ ++ H16_34XX_SDRC_CKE0, ++ H17_34XX_SDRC_CKE1, + }; + + struct omap_mux_cfg { +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omapdev.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omapdev.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omapdev.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omapdev.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,58 @@ ++/* ++ * OMAP on-chip device: structure and function call definitions ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Copyright (C) 2007-2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++#ifndef ARCH_ARM_PLAT_OMAP_OMAPDEV_H ++#define ARCH_ARM_PLAT_OMAP_OMAPDEV_H ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++/** ++ * struct omapdev - OMAP on-chip hardware devices ++ * @name: name of the device - should match TRM ++ * @pwrdm: powerdomain that the device resides in ++ * @omap_chip: OMAP chips this omapdev is valid for ++ * @pdev_name: platform_device name associated with this omapdev (if any) ++ * @pdev_id: platform_device id associated with this omapdev (if any) ++ * ++ */ ++struct omapdev { ++ ++ const char *name; ++ ++ union { ++ const char *name; ++ struct powerdomain *ptr; ++ } pwrdm; ++ ++ const struct omap_chip_id omap_chip; ++ ++ const char *pdev_name; ++ ++ const int pdev_id; ++ ++ struct list_head node; ++}; ++ ++ ++void omapdev_init(struct omapdev **odev_list); ++ ++struct powerdomain *omapdev_get_pwrdm(struct omapdev *odev); ++ ++struct omapdev *omapdev_find_pdev(struct platform_device *pdev); ++ ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omapfb.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omapfb.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omapfb.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omapfb.h 2008-12-25 00:26:37.000000000 +0100 +@@ -276,8 +276,8 @@ typedef int (*omapfb_notifier_callback_t + void *fbi); + + struct omapfb_mem_region { +- u32 paddr; +- void __iomem *vaddr; ++ dma_addr_t paddr; ++ void *vaddr; + unsigned long size; + u8 type; /* OMAPFB_PLANE_MEM_* */ + unsigned alloc:1; /* allocated by the driver */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omap-pm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omap-pm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omap-pm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omap-pm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,406 @@ ++/* ++ * omap-pm.h - OMAP power management interface ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * Copyright (C) 2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * Interface developed by (in alphabetical order): Karthik Dasu, Jouni ++ * Högander, Tony Lindgren, Rajendra Nayak, Sakari Poussa, ++ * Veeramanikandan Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, ++ * Richard Woodruff ++ */ ++ ++#ifndef ASM_ARM_ARCH_OMAP_OMAP_PM_H ++#define ASM_ARM_ARCH_OMAP_OMAP_PM_H ++ ++#include ++#include ++ ++#include "powerdomain.h" ++ ++/** ++ * struct omap_opp - clock frequency-to-OPP ID table for DSP, MPU ++ * @rate: target clock rate ++ * @opp_id: OPP ID ++ * @min_vdd: minimum VDD1 voltage (in millivolts) for this OPP ++ * ++ * Operating performance point data. Can vary by OMAP chip and board. ++ */ ++struct omap_opp { ++ unsigned long rate; ++ u8 opp_id; ++ u16 vsel; ++}; ++ ++extern struct omap_opp *mpu_opps; ++extern struct omap_opp *dsp_opps; ++extern struct omap_opp *l3_opps; ++ ++/* ++ * agent_id values for use with omap_pm_set_min_bus_tput(): ++ * ++ * OCP_INITIATOR_AGENT is only valid for devices that can act as ++ * initiators -- it represents the device's L3 interconnect ++ * connection. OCP_TARGET_AGENT represents the device's L4 ++ * interconnect connection. ++ */ ++#define OCP_TARGET_AGENT 1 ++#define OCP_INITIATOR_AGENT 2 ++ ++/** ++ * omap_pm_if_early_init - OMAP PM init code called before clock fw init ++ * @mpu_opp_table: array ptr to struct omap_opp for MPU ++ * @dsp_opp_table: array ptr to struct omap_opp for DSP ++ * @l3_opp_table : array ptr to struct omap_opp for CORE ++ * ++ * Initialize anything that must be configured before the clock ++ * framework starts. The "_if_" is to avoid name collisions with the ++ * PM idle-loop code. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline int __init omap_pm_if_early_init(struct omap_opp *mpu_opp_table, ++ struct omap_opp *dsp_opp_table, ++ struct omap_opp *l3_opp_table) { return 0; } ++#else ++int __init omap_pm_if_early_init(struct omap_opp *mpu_opp_table, ++ struct omap_opp *dsp_opp_table, ++ struct omap_opp *l3_opp_table); ++#endif ++ ++/** ++ * omap_pm_if_init - OMAP PM init code called after clock fw init ++ * ++ * The main initialization code. OPP tables are passed in here. The ++ * "_if_" is to avoid name collisions with the PM idle-loop code. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline int __init omap_pm_if_init(void) { return 0; } ++#else ++int __init omap_pm_if_init(void); ++#endif ++ ++/** ++ * omap_pm_if_exit - OMAP PM exit code ++ * ++ * Exit code; currently unused. The "_if_" is to avoid name ++ * collisions with the PM idle-loop code. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_if_exit(void) { } ++#else ++void omap_pm_if_exit(void); ++#endif ++ ++/* ++ * Device-driver-originated constraints (via board-*.c files, platform_data) ++ */ ++ ++ ++/** ++ * omap_pm_set_max_mpu_wakeup_lat - set the maximum MPU wakeup latency ++ * @dev: struct device * requesting the constraint ++ * @t: maximum MPU wakeup latency in microseconds ++ * ++ * Request that the maximum interrupt latency for the MPU to be no ++ * greater than 't' microseconds. "Interrupt latency" in this case is ++ * defined as the elapsed time from the occurrence of a hardware or ++ * timer interrupt to the time when the device driver's interrupt ++ * service routine has been entered by the MPU. ++ * ++ * It is intended that underlying PM code will use this information to ++ * determine what power state to put the MPU powerdomain into, and ++ * possibly the CORE powerdomain as well, since interrupt handling ++ * code currently runs from SDRAM. Advanced PM or board*.c code may ++ * also configure interrupt controller priorities, OCP bus priorities, ++ * CPU speed(s), etc. ++ * ++ * This function will not affect device wakeup latency, e.g., time ++ * elapsed from when a device driver enables a hardware device with ++ * clk_enable(), to when the device is ready for register access or ++ * other use. To control this device wakeup latency, use ++ * set_max_dev_wakeup_lat() ++ * ++ * Multiple calls to set_max_mpu_wakeup_lat() will replace the ++ * previous t value. To remove the latency target for the MPU, call ++ * with t = -1. ++ * ++ * No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t) { } ++#else ++void omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t); ++#endif ++ ++ ++/** ++ * omap_pm_set_min_bus_tput - set minimum bus throughput needed by device ++ * @dev: struct device * requesting the constraint ++ * @tbus_id: interconnect to operate on (OCP_{INITIATOR,TARGET}_AGENT) ++ * @r: minimum throughput (in KiB/s) ++ * ++ * Request that the minimum data throughput on the OCP interconnect ++ * attached to device 'dev' interconnect agent 'tbus_id' be no less ++ * than 'r' KiB/s. ++ * ++ * It is expected that the OMAP PM or bus code will use this ++ * information to set the interconnect clock to run at the lowest ++ * possible speed that satisfies all current system users. The PM or ++ * bus code will adjust the estimate based on its model of the bus, so ++ * device driver authors should attempt to specify an accurate ++ * quantity for their device use case, and let the PM or bus code ++ * overestimate the numbers as necessary to handle request/response ++ * latency, other competing users on the system, etc. On OMAP2/3, if ++ * a driver requests a minimum L4 interconnect speed constraint, the ++ * code will also need to add an minimum L3 interconnect speed ++ * constraint, ++ * ++ * Multiple calls to set_min_bus_tput() will replace the previous rate ++ * value for this device. To remove the interconnect throughput ++ * restriction for this device, call with r = 0. ++ * ++ * No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r) { } ++#else ++void omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r); ++#endif ++ ++/** ++ * omap_pm_set_min_mpu_freq - set minimum MPU frequency needed by device ++ * @dev: struct device * requesting the constraint ++ * @r: minimum MPU frequency (in Hz) ++ * ++ * Request that the minimum MPU frequency be no less than 'r' Hz. ++ * ++ * Multiple calls to set_min_mpu_freq() will replace the previous rate value ++ * for this device. To remove the frequency restriction for this device, ++ * call with r = 0. ++ * ++ * No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_set_min_mpu_freq(struct device *dev, ++ unsigned long r) { } ++#else ++void omap_pm_set_min_mpu_freq(struct device *dev, unsigned long r); ++#endif ++ ++ ++/** ++ * omap_pm_set_max_dev_wakeup_lat - set the maximum device enable latency ++ * @dev: struct device * ++ * @t: maximum device wakeup latency in microseconds ++ * ++ * Request that the maximum amount of time necessary for a device to ++ * become accessible after its clocks are enabled should be no greater ++ * than 't' microseconds. Specifically, this represents the time from ++ * when a device driver enables device clocks with clk_enable(), to ++ * when the register reads and writes on the device will succeed. ++ * This function should be called before clk_disable() is called, ++ * since the power state transition decision may be made during ++ * clk_disable(). ++ * ++ * It is intended that underlying PM code will use this information to ++ * determine what power state to put the powerdomain enclosing this ++ * device into. ++ * ++ * Multiple calls to set_max_dev_wakeup_lat() will replace the ++ * previous wakeup latency values for this device. To remove the wakeup ++ * latency restriction for this device, call with t = -1. ++ * ++ * No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_set_max_dev_wakeup_lat(struct device *dev, long t) { } ++#else ++void omap_pm_set_max_dev_wakeup_lat(struct device *dev, long t); ++#endif ++ ++ ++/** ++ * omap_pm_set_max_sdma_lat - set the maximum system DMA transfer start latency ++ * @dev: struct device * ++ * @t: maximum DMA transfer start latency in microseconds ++ * ++ * Request that the maximum system DMA transfer start latency for this ++ * device 'dev' should be no greater than 't' microseconds. "DMA ++ * transfer start latency" here is defined as the elapsed time from ++ * when a device (e.g., McBSP) requests that a system DMA transfer ++ * start or continue, to the time at which data starts to flow into ++ * that device from the system DMA controller. ++ * ++ * It is intended that underlying PM code will use this information to ++ * determine what power state to put the CORE powerdomain into. ++ * ++ * Since system DMA transfers may not involve the MPU, this function ++ * will not affect MPU wakeup latency. Use set_max_cpu_lat() to do ++ * so. Similarly, this function will not affect device wakeup latency ++ * -- use set_max_dev_wakeup_lat() to affect that. ++ * ++ * Multiple calls to set_max_sdma_lat() will replace the previous t ++ * value for this device. To remove the maximum DMA latency for this ++ * device, call with t = -1. ++ * ++ * No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_set_max_sdma_lat(struct device *dev, long t) { } ++#else ++void omap_pm_set_max_sdma_lat(struct device *dev, long t); ++#endif ++ ++ ++/* ++ * DSP Bridge-specific constraints ++ */ ++ ++/** ++ * omap_pm_dsp_get_opp_table - get OPP->DSP clock frequency table ++ * ++ * Intended for use by DSPBridge. Returns an array of OPP->DSP clock ++ * frequency entries. The final item in the array should have .rate = ++ * .opp_id = 0. ++ */ ++const struct omap_opp *omap_pm_dsp_get_opp_table(void); ++ ++/** ++ * omap_pm_dsp_set_min_opp - receive desired OPP target ID from DSP Bridge ++ * @opp_id: target DSP OPP ID ++ * ++ * Set a minimum OPP ID for the DSP. This is intended to be called ++ * only from the DSP Bridge MPU-side driver. Unfortunately, the only ++ * information that code receives from the DSP/BIOS load estimator is the ++ * target OPP ID; hence, this interface. No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_dsp_set_min_opp(u8 opp_id) { } ++#else ++void omap_pm_dsp_set_min_opp(u8 opp_id); ++#endif ++ ++/** ++ * omap_pm_dsp_get_opp - report the current DSP OPP ID ++ * ++ * Report the current OPP for the DSP. Since on OMAP3, the DSP and ++ * MPU share a single voltage domain, the OPP ID returned back may ++ * represent a higher DSP speed than the OPP requested via ++ * omap_pm_dsp_set_min_opp(). ++ * ++ * Returns the current VDD1 OPP ID, or 0 upon error. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline u8 omap_pm_dsp_get_opp(void) { return 0; } ++#else ++u8 omap_pm_dsp_get_opp(void); ++#endif ++ ++ ++/* ++ * CPUFreq-originated constraint ++ * ++ * In the future, this should be handled by custom OPP clocktype ++ * functions. ++ */ ++ ++/** ++ * omap_pm_cpu_get_freq_table - return a cpufreq_frequency_table array ptr ++ * ++ * Provide a frequency table usable by CPUFreq for the current chip/board. ++ * Returns a pointer to a struct cpufreq_frequency_table array or NULL ++ * upon error. ++ */ ++struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void); ++ ++/** ++ * omap_pm_cpu_set_freq - set the current minimum MPU frequency ++ * @f: MPU frequency in Hz ++ * ++ * Set the current minimum CPU frequency. The actual CPU frequency ++ * used could end up higher if the DSP requested a higher OPP. ++ * Intended to be called by plat-omap/cpu_omap.c:omap_target(). No ++ * return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_cpu_set_freq(unsigned long f) { } ++#else ++void omap_pm_cpu_set_freq(unsigned long f); ++#endif ++ ++/** ++ * omap_pm_cpu_get_freq - report the current CPU frequency ++ * ++ * Returns the current MPU frequency, or 0 upon error. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline unsigned long omap_pm_cpu_get_freq(void) { return 0; } ++#else ++unsigned long omap_pm_cpu_get_freq(void); ++#endif ++ ++ ++/* ++ * Device context loss tracking ++ */ ++ ++/** ++ * omap_pm_get_dev_context_loss_count - return count of times dev has lost ctx ++ * @dev: struct device * ++ * ++ * This function returns the number of times that the device @dev has ++ * lost its internal context. This generally occurs on a powerdomain ++ * transition to OFF. Drivers use this as an optimization to avoid restoring ++ * context if the device hasn't lost it. To use, drivers should initially ++ * call this in their context save functions and store the result. Early in ++ * the driver's context restore function, the driver should call this function ++ * again, and compare the result to the stored counter. If they differ, the ++ * driver must restore device context. If the number of context losses ++ * exceeds the maximum positive integer, the function will wrap to 0 and ++ * continue counting. Returns the number of context losses for this device, ++ * or -EINVAL upon error. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline int omap_pm_get_dev_context_loss_count(struct device *dev) { return 0; } ++#else ++int omap_pm_get_dev_context_loss_count(struct device *dev); ++#endif ++ ++ ++/* ++ * Powerdomain usecounting hooks ++ */ ++ ++/** ++ * omap_pm_pwrdm_active - indicate that a power domain has become active ++ * @pwrdm: struct powerdomain * ++ * ++ * Notify the OMAP PM layer that the power domain 'pwrdm' has become active, ++ * presumably due to a device driver enabling an underlying clock. This ++ * function is intended to be called by the clockdomain code, not by drivers. ++ * No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_pwrdm_active(struct powerdomain *pwrdm) { } ++#else ++void omap_pm_pwrdm_active(struct powerdomain *pwrdm); ++#endif ++ ++ ++/** ++ * omap_pm_pwrdm_inactive - indicate that a power domain has become inactive ++ * @pwrdm: struct powerdomain * ++ * ++ * Notify the OMAP PM layer that the power domain 'pwrdm' has become ++ * inactive, presumably due to a device driver disabling an underlying ++ * clock. This function is intended to be called by the clockdomain ++ * code, not by drivers. No return value. ++ */ ++#ifdef CONFIG_OMAP_PM_NONE ++static inline void omap_pm_pwrdm_inactive(struct powerdomain *pwrdm) { } ++#else ++void omap_pm_pwrdm_inactive(struct powerdomain *pwrdm); ++#endif ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omap34xx.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omap34xx.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/omap34xx.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/omap34xx.h 2011-09-04 11:31:05.000000000 +0200 +@@ -47,8 +47,36 @@ + #define OMAP34XX_GPMC_BASE 0x6E000000 + #define OMAP343X_SCM_BASE 0x48002000 + #define OMAP343X_CTRL_BASE OMAP343X_SCM_BASE ++#define OMAP34XX_HS_BASE (L4_34XX_BASE + 0x65000) + + #define OMAP34XX_IC_BASE 0x48200000 ++ ++#define OMAP3430_ISP_BASE (L4_34XX_BASE + 0xBC000) ++#define OMAP3430_ISP_CBUFF_BASE (OMAP3430_ISP_BASE + 0x0100) ++#define OMAP3430_ISP_CCP2_BASE (OMAP3430_ISP_BASE + 0x0400) ++#define OMAP3430_ISP_CCDC_BASE (OMAP3430_ISP_BASE + 0x0600) ++#define OMAP3430_ISP_HIST_BASE (OMAP3430_ISP_BASE + 0x0A00) ++#define OMAP3430_ISP_H3A_BASE (OMAP3430_ISP_BASE + 0x0C00) ++#define OMAP3430_ISP_PREV_BASE (OMAP3430_ISP_BASE + 0x0E00) ++#define OMAP3430_ISP_RESZ_BASE (OMAP3430_ISP_BASE + 0x1000) ++#define OMAP3430_ISP_SBL_BASE (OMAP3430_ISP_BASE + 0x1200) ++#define OMAP3430_ISP_MMU_BASE (OMAP3430_ISP_BASE + 0x1400) ++#define OMAP3430_ISP_CSI2A_BASE (OMAP3430_ISP_BASE + 0x1800) ++#define OMAP3430_ISP_CSI2PHY_BASE (OMAP3430_ISP_BASE + 0x1970) ++ ++#define OMAP3430_ISP_END (OMAP3430_ISP_BASE + 0x06F) ++#define OMAP3430_ISP_CBUFF_END (OMAP3430_ISP_CBUFF_BASE + 0x077) ++#define OMAP3430_ISP_CCP2_END (OMAP3430_ISP_CCP2_BASE + 0x1EF) ++#define OMAP3430_ISP_CCDC_END (OMAP3430_ISP_CCDC_BASE + 0x0A7) ++#define OMAP3430_ISP_HIST_END (OMAP3430_ISP_HIST_BASE + 0x047) ++#define OMAP3430_ISP_H3A_END (OMAP3430_ISP_H3A_BASE + 0x05F) ++#define OMAP3430_ISP_PREV_END (OMAP3430_ISP_PREV_BASE + 0x09F) ++#define OMAP3430_ISP_RESZ_END (OMAP3430_ISP_RESZ_BASE + 0x0AB) ++#define OMAP3430_ISP_SBL_END (OMAP3430_ISP_SBL_BASE + 0x0FB) ++#define OMAP3430_ISP_MMU_END (OMAP3430_ISP_MMU_BASE + 0x06F) ++#define OMAP3430_ISP_CSI2A_END (OMAP3430_ISP_CSI2A_BASE + 0x16F) ++#define OMAP3430_ISP_CSI2PHY_END (OMAP3430_ISP_CSI2PHY_BASE + 0x007) ++ + #define OMAP34XX_IVA_INTC_BASE 0x40000000 + #define OMAP34XX_HSUSB_OTG_BASE (L4_34XX_BASE + 0xAB000) + #define OMAP34XX_HSUSB_HOST_BASE (L4_34XX_BASE + 0x64000) +@@ -56,7 +84,6 @@ + #define OMAP34XX_SR1_BASE 0x480C9000 + #define OMAP34XX_SR2_BASE 0x480CB000 + +-#define OMAP34XX_CAMERA_BASE (L4_34XX_BASE + 0xBC000) + #define OMAP34XX_MAILBOX_BASE (L4_34XX_BASE + 0x94000) + + +@@ -73,5 +100,23 @@ + #define OMAP34XX_DSP_MEM_BASE (OMAP34XX_DSP_BASE + 0x0) + #define OMAP34XX_DSP_IPI_BASE (OMAP34XX_DSP_BASE + 0x1000000) + #define OMAP34XX_DSP_MMU_BASE (OMAP34XX_DSP_BASE + 0x2000000) ++ ++/* VDD1 OPPS */ ++#define VDD1_OPP1 0x1 ++#define VDD1_OPP2 0x2 ++#define VDD1_OPP3 0x3 ++#define VDD1_OPP4 0x4 ++#define VDD1_OPP5 0x5 ++ ++/* VDD2 OPPS */ ++#define VDD2_OPP1 0x1 ++#define VDD2_OPP2 0x2 ++#define VDD2_OPP3 0x3 ++ ++#define MIN_VDD1_OPP VDD1_OPP1 ++#define MAX_VDD1_OPP VDD1_OPP5 ++#define MIN_VDD2_OPP VDD2_OPP1 ++#define MAX_VDD2_OPP VDD2_OPP3 ++ + #endif /* __ASM_ARCH_OMAP34XX_H */ + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/pm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/pm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/pm.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/pm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -135,19 +135,25 @@ void clk_allow_idle(struct clk *clk); + + extern void omap_pm_idle(void); + extern void omap_pm_suspend(void); ++extern void omap_sram_idle(void); ++extern int pm_check_idle(void); + #ifdef CONFIG_PM + extern void omap2_block_sleep(void); + extern void omap2_allow_sleep(void); ++int get_last_off_on_transaction_id(struct device *dev); + #else + static inline void omap2_block_sleep(void) { } + static inline void omap2_allow_sleep(void) { } ++static inline int get_last_off_on_transaction_id(struct device *dev) { return 0; } + #endif ++extern int omap3_can_sleep(void); + extern void omap730_cpu_suspend(unsigned short, unsigned short); + extern void omap1510_cpu_suspend(unsigned short, unsigned short); + extern void omap1610_cpu_suspend(unsigned short, unsigned short); + extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, + void __iomem *sdrc_power); + extern void omap34xx_cpu_suspend(u32 *addr, int save_state); ++extern void save_secure_ram_context(u32 *addr); + extern void omap730_idle_loop_suspend(void); + extern void omap1510_idle_loop_suspend(void); + extern void omap1610_idle_loop_suspend(void); +@@ -163,6 +169,7 @@ extern unsigned int omap1510_idle_loop_s + extern unsigned int omap1610_idle_loop_suspend_sz; + extern unsigned int omap24xx_idle_loop_suspend_sz; + extern unsigned int omap34xx_suspend_sz; ++extern unsigned int save_secure_ram_context_sz; + + #ifdef CONFIG_OMAP_SERIAL_WAKE + extern void omap_serial_wake_trigger(int enable); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/powerdomain.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/powerdomain.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/powerdomain.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/powerdomain.h 2011-09-04 11:31:05.000000000 +0200 +@@ -117,6 +117,13 @@ struct powerdomain { + + struct list_head node; + ++ int state; ++ unsigned state_counter[4]; ++ ++#ifdef CONFIG_PM_DEBUG ++ s64 timer; ++ s64 state_timer[4]; ++#endif + }; + + +@@ -126,7 +133,8 @@ int pwrdm_register(struct powerdomain *p + int pwrdm_unregister(struct powerdomain *pwrdm); + struct powerdomain *pwrdm_lookup(const char *name); + +-int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm)); ++int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), ++ void *user); + + int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); + int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); +@@ -164,4 +172,9 @@ bool pwrdm_has_hdwr_sar(struct powerdoma + + int pwrdm_wait_transition(struct powerdomain *pwrdm); + ++int pwrdm_state_switch(struct powerdomain *pwrdm); ++int pwrdm_clkdm_state_switch(struct clockdomain *clkdm); ++int pwrdm_pre_transition(void); ++int pwrdm_post_transition(void); ++ + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/prcm.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/prcm.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/prcm.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/prcm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -30,6 +30,11 @@ void cm_write_mod_reg(u32 val, s16 modul + u32 cm_read_mod_reg(s16 module, u16 idx); + u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx); + ++#define START_PADCONF_SAVE 0x2 ++#define PADCONF_SAVE_DONE 0x1 ++ ++void omap3_prcm_save_context(void); ++void omap3_prcm_restore_context(void); + #endif + + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/resource.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/resource.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/resource.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/resource.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,84 @@ ++/* ++ * linux/include/asm-arm/arch-omap/resource.h ++ * Structure definitions for Shared resource Framework ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Written by Rajendra Nayak ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ * ++ * History: ++ * ++ */ ++ ++#ifndef __ARCH_ARM_OMAP_RESOURCE_H ++#define __ARCH_ARM_OMAP_RESOURCE_H ++ ++#include ++#include ++#include ++#include ++ ++#define RES_DEFAULTLEVEL 0x0 ++ ++struct shared_resource_ops; /* forward declaration */ ++ ++/* Used to model a Shared Multilevel Resource */ ++struct shared_resource { ++ /* Resource name */ ++ char *name; ++ /* Used to represent the OMAP chip types containing this res */ ++ const struct omap_chip_id omap_chip; ++ /* Total no of users at any point of this resource */ ++ u8 no_of_users; ++ /* Current level of this resource */ ++ u32 curr_level; ++ /* Used to store any resource specific data */ ++ void *resource_data; ++ /* List of all the current users for this resource */ ++ struct list_head users_list; ++ /* Shared resource operations */ ++ struct shared_resource_ops *ops; ++ struct list_head node; ++}; ++ ++struct shared_resource_ops { ++ /* Init function for the resource */ ++ void (*init)(struct shared_resource *res); ++ /* Function to change the level of the resource */ ++ int (*change_level)(struct shared_resource *res, u32 target_level); ++ /* Function to validate the requested level of the resource */ ++ int (*validate_level)(struct shared_resource *res, u32 target_level); ++}; ++ ++/* Used to represent a user of a shared resource */ ++struct users_list { ++ /* Device pointer used to uniquely identify the user */ ++ struct device *dev; ++ /* Current level as requested for the resource by the user */ ++ u32 level; ++ struct list_head node; ++ u8 usage; ++}; ++ ++extern struct shared_resource *resources_omap[]; ++/* Shared resource Framework API's */ ++void resource_init(struct shared_resource **resources); ++int resource_refresh(void); ++int resource_register(struct shared_resource *res); ++int resource_unregister(struct shared_resource *res); ++int resource_request(const char *name, struct device *dev, ++ unsigned long level); ++int resource_release(const char *name, struct device *dev); ++int resource_request_locked(const char *name, struct device *dev, ++ unsigned long level); ++int resource_release_locked(const char *name, struct device *dev); ++int resource_get_level(const char *name); ++ ++#endif /* __ARCH_ARM_OMAP_RESOURCE_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/sdrc.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/sdrc.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/sdrc.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/sdrc.h 2011-09-04 11:31:05.000000000 +0200 +@@ -31,14 +31,18 @@ + #define SDRC_POWER 0x070 + #define SDRC_MCFG_0 0x080 + #define SDRC_MR_0 0x084 ++#define SDRC_EMR2_0 0x08c + #define SDRC_ACTIM_CTRL_A_0 0x09c + #define SDRC_ACTIM_CTRL_B_0 0x0a0 + #define SDRC_RFR_CTRL_0 0x0a4 ++#define SDRC_MANUAL_0 0x0a8 + #define SDRC_MCFG_1 0x0B0 + #define SDRC_MR_1 0x0B4 ++#define SDRC_EMR2_1 0x0BC + #define SDRC_ACTIM_CTRL_A_1 0x0C4 + #define SDRC_ACTIM_CTRL_B_1 0x0C8 + #define SDRC_RFR_CTRL_1 0x0D4 ++#define SDRC_MANUAL_1 0x0D8 + + /* + * These values represent the number of memory clock cycles between +@@ -70,6 +74,12 @@ + #define SDRC_RFR_CTRL_110MHz (0x0002da01 | 1) /* Need to calc */ + #define SDRC_RFR_CTRL_BYPASS (0x00005000 | 1) /* Need to calc */ + ++/* SDRC POWER regbits */ ++#define SDRC_POWER_AUTOCOUNT_SHIFT 8 ++#define SDRC_POWER_AUTOCOUNT_MASK (0xffff << SDRC_POWER_AUTOCOUNT_SHIFT) ++#define SDRC_POWER_CLKCTRL_SHIFT 4 ++#define SDRC_POWER_CLKCTRL_MASK (0x3 << SDRC_POWER_CLKCTRL_SHIFT) ++#define SDRC_SELF_REFRESH_ON_AUTOCOUNT (0x2 << SDRC_POWER_CLKCTRL_SHIFT) + + /* + * SMS register access +@@ -108,9 +118,13 @@ struct omap_sdrc_params { + u32 mr; + }; + +-void __init omap2_sdrc_init(struct omap_sdrc_params *); +-struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r); +- ++void omap2_sms_save_context(void); ++void omap2_sms_restore_context(void); ++void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0, ++ struct omap_sdrc_params *sdrc_cs1); ++int omap2_sdrc_get_params(unsigned long r, ++ struct omap_sdrc_params **sdrc_cs0, ++ struct omap_sdrc_params **sdrc_cs1); + #ifdef CONFIG_ARCH_OMAP2 + + struct memory_timings { +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/serial.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/serial.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/serial.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/serial.h 2011-09-04 11:31:05.000000000 +0200 +@@ -47,6 +47,7 @@ extern void omap_uart_check_wakeup(void) + extern void omap_uart_prepare_suspend(void); + extern void omap_uart_prepare_idle(int num); + extern void omap_uart_resume_idle(int num); ++extern void omap_uart_enable_irqs(int enable); + #endif + + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/sram.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/sram.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/sram.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/sram.h 2011-09-04 11:31:05.000000000 +0200 +@@ -21,9 +21,12 @@ extern void omap2_sram_reprogram_sdrc(u3 + u32 mem_type); + extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass); + +-extern u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, +- u32 sdrc_actim_ctrla, +- u32 sdrc_actim_ctrlb, u32 m2); ++extern u32 omap3_configure_core_dpll( ++ u32 m2, u32 unlock_dll, u32 f, u32 inc, ++ u32 sdrc_rfr_ctrl_0, u32 sdrc_mr_0, ++ u32 sdrc_rfr_ctrl_1, u32 sdrc_mr_1); ++ ++extern void omap3_sram_restore_context(void); + + /* Do not use these */ + extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl); +@@ -58,9 +61,18 @@ extern void omap243x_sram_reprogram_sdrc + extern unsigned long omap243x_sram_reprogram_sdrc_sz; + + +-extern u32 omap3_sram_configure_core_dpll(u32 sdrc_rfr_ctrl, +- u32 sdrc_actim_ctrla, +- u32 sdrc_actim_ctrlb, u32 m2); ++extern u32 omap3_sram_configure_core_dpll( ++ u32 m2, u32 unlock_dll, u32 f, u32 inc, ++ u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0, ++ u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0, ++ u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1, ++ u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1); + extern unsigned long omap3_sram_configure_core_dpll_sz; + ++#ifdef CONFIG_PM ++extern void omap_push_sram_idle(void); ++#else ++static inline void omap_push_sram_idle(void) {} ++#endif /* CONFIG_PM */ ++ + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/ssi.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/ssi.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/ssi.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/ssi.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,303 @@ ++/* ++ * mach/ssi.h ++ * ++ * Hardware definitions for SSI. ++ * ++ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. ++ * ++ * Contact: Carlos Chinea ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#ifndef __SSI_REGS_H__ ++#define __SSI_REGS_H__ ++ ++#define SSI_PORT_OFFSET 0x1000 ++ ++/* ++ * GDD base addr : 0x48059000 ++ */ ++#define SSI_GDD_OFFSET 0x1000 ++#define SSI_GDD_BASE SSI_GDD_OFFSET /* 0x9000 */ ++ ++/* ++ * SST base addr: ++ * port 1: 0x4805a000 ++ * port 2: 0x4805b000 ++ */ ++#define SSI_SST_OFFSET 0x2000 ++#define SSI_SST_BASE(port) (SSI_SST_OFFSET + (((port) - 1) *\ ++ (SSI_PORT_OFFSET))) ++/* ++ * SSR base addr: ++ * port 1: 0x4805a800 ++ * port 2: 0x4805b800 ++ */ ++#define SSI_SSR_OFFSET 0x2800 ++#define SSI_SSR_BASE(port) (SSI_SSR_OFFSET + (((port) - 1) *\ ++ (SSI_PORT_OFFSET))) ++/* ++ * SSI SYS registers ++ */ ++#define SSI_SYS_REVISION_REG 0x0000 ++# define SSI_REV_MASK 0x000000ff ++# define SSI_REV_MAJOR 0xf0 ++# define SSI_REV_MINOR 0x0f ++ ++#define SSI_SYS_SYSCONFIG_REG 0x0010 ++# define SSI_AUTOIDLE (1 << 0) ++# define SSI_SOFTRESET (1 << 1) ++# define SSI_SIDLEMODE_FORCE 0 ++# define SSI_SIDLEMODE_NO (1 << 3) ++# define SSI_SIDLEMODE_SMART (1 << 4) ++# define SSI_SIDLEMODE_MASK 0x00000018 ++# define SSI_MIDLEMODE_FORCE 0 ++# define SSI_MIDLEMODE_NO (1 << 12) ++# define SSI_MIDLEMODE_SMART (1 << 13) ++# define SSI_MIDLEMODE_MASK 0x00003000 ++ ++#define SSI_SYS_SYSSTATUS_REG 0x0014 ++# define SSI_RESETDONE 1 ++ ++#define SSI_SYS_MPU_STATUS_BASE 0x0808 ++#define SSI_SYS_MPU_STATUS_PORT_OFFSET 0x10 ++#define SSI_SYS_MPU_STATUS_IRQ_OFFSET 2 ++ ++#define SSI_SYS_MPU_STATUS_REG(port, irq) \ ++ (SSI_SYS_MPU_STATUS_BASE +\ ++ ((((port) - 1) * SSI_SYS_MPU_STATUS_PORT_OFFSET) +\ ++ ((irq) * SSI_SYS_MPU_STATUS_IRQ_OFFSET))) ++ ++#define SSI_SYS_MPU_ENABLE_BASE 0X080c ++#define SSI_SYS_MPU_ENABLE_PORT_OFFSET 0x10 ++#define SSI_SYS_MPU_ENABLE_IRQ_OFFSET 8 ++ ++#define SSI_SYS_MPU_ENABLE_REG(port, irq) \ ++ (SSI_SYS_MPU_ENABLE_BASE +\ ++ ((((port) - 1) * SSI_SYS_MPU_ENABLE_PORT_OFFSET) +\ ++ ((irq) * SSI_SYS_MPU_ENABLE_IRQ_OFFSET))) ++ ++# define SSI_SST_DATAACCEPT(channel) (1 << (channel)) ++# define SSI_SSR_DATAAVAILABLE(channel) (1 << ((channel) + 8)) ++# define SSI_SSR_DATAOVERRUN(channel) (1 << ((channel) + 16)) ++# define SSI_ERROROCCURED (1 << 24) ++# define SSI_BREAKDETECTED (1 << 25) ++ ++#define SSI_SYS_GDD_MPU_IRQ_STATUS_REG 0x0800 ++#define SSI_SYS_GDD_MPU_IRQ_ENABLE_REG 0x0804 ++# define SSI_GDD_LCH(channel) (1 << (channel)) ++ ++#define SSI_SYS_WAKE_OFFSET 0x10 ++#define SSI_SYS_WAKE_BASE 0x0c00 ++#define SSI_SYS_WAKE_REG(port) (SSI_SYS_WAKE_BASE +\ ++ (((port) - 1) * SSI_SYS_WAKE_OFFSET)) ++#define SSI_SYS_CLEAR_WAKE_BASE 0x0c04 ++#define SSI_SYS_CLEAR_WAKE_REG(port) (SSI_SYS_CLEAR_WAKE_BASE +\ ++ (((port) - 1) * SSI_SYS_WAKE_OFFSET)) ++#define SSI_SYS_SET_WAKE_BASE 0x0c08 ++#define SSI_SYS_SET_WAKE_REG(port) (SSI_SYS_SET_WAKE_BASE +\ ++ (((port) - 1) * SSI_SYS_WAKE_OFFSET)) ++# define SSI_WAKE(channel) (1 << (channel)) ++# define SSI_WAKE_MASK 0xff ++ ++/* ++ * SSI SST registers ++ */ ++#define SSI_SST_ID_REG(port) (SSI_SST_BASE(port) + 0x0000) ++#define SSI_SST_MODE_REG(port) (SSI_SST_BASE(port) + 0x0004) ++# define SSI_MODE_VAL_MASK 3 ++# define SSI_MODE_SLEEP 0 ++# define SSI_MODE_STREAM 1 ++# define SSI_MODE_FRAME 2 ++# define SSI_MODE_MULTIPOINTS 3 ++#define SSI_SST_FRAMESIZE_REG(port) (SSI_SST_BASE(port) + 0x0008) ++# define SSI_FRAMESIZE_DEFAULT 31 ++#define SSI_SST_TXSTATE_REG(port) (SSI_SST_BASE(port) + 0X000c) ++# define TXSTATE_IDLE 0 ++#define SSI_SST_BUFSTATE_REG(port) (SSI_SST_BASE(port) + 0x0010) ++# define NOTFULL(channel) (1 << (channel)) ++#define SSI_SST_DIVISOR_REG(port) (SSI_SST_BASE(port) + 0x0018) ++# define SSI_DIVISOR_DEFAULT 1 ++ ++#define SSI_SST_BREAK_REG(port) (SSI_SST_BASE(port) + 0x0020) ++#define SSI_SST_CHANNELS_REG(port) (SSI_SST_BASE(port) + 0x0024) ++# define SSI_CHANNELS_DEFAULT 4 ++ ++#define SSI_SST_ARBMODE_REG(port) (SSI_SST_BASE(port) + 0x0028) ++# define SSI_ARBMODE_ROUNDROBIN 0 ++# define SSI_ARBMODE_PRIORITY 1 ++ ++#define SSI_SST_BUFFER_BASE(port) (SSI_SST_BASE(port) + 0x0080) ++#define SSI_SST_BUFFER_CH_REG(port, channel) (SSI_SST_BUFFER_BASE(port) +\ ++ ((channel) * 4)) ++ ++#define SSI_SST_SWAPBUF_BASE(port) (SSI_SST_BASE(port) + 0X00c0) ++#define SSI_SST_SWAPBUF_CH_REG(port, channel) (SSI_SST_SWAPBUF_BASE(port) +\ ++ ((channel) * 4)) ++/* ++ * SSI SSR registers ++ */ ++#define SSI_SSR_ID_REG(port) (SSI_SSR_BASE(port) + 0x0000) ++#define SSI_SSR_MODE_REG(port) (SSI_SSR_BASE(port) + 0x0004) ++#define SSI_SSR_FRAMESIZE_REG(port) (SSI_SSR_BASE(port) + 0x0008) ++#define SSI_SSR_RXSTATE_REG(port) (SSI_SSR_BASE(port) + 0x000c) ++#define SSI_SSR_BUFSTATE_REG(port) (SSI_SSR_BASE(port) + 0x0010) ++# define NOTEMPTY(channel) (1 << (channel)) ++#define SSI_SSR_BREAK_REG(port) (SSI_SSR_BASE(port) + 0x001c) ++#define SSI_SSR_ERROR_REG(port) (SSI_SSR_BASE(port) + 0x0020) ++#define SSI_SSR_ERRORACK_REG(port) (SSI_SSR_BASE(port) + 0x0024) ++#define SSI_SSR_OVERRUN_REG(port) (SSI_SSR_BASE(port) + 0x002c) ++#define SSI_SSR_OVERRUNACK_REG(port) (SSI_SSR_BASE(port) + 0x0030) ++#define SSI_SSR_TIMEOUT_REG(port) (SSI_SSR_BASE(port) + 0x0034) ++# define SSI_TIMEOUT_DEFAULT 0 ++#define SSI_SSR_CHANNELS_REG(port) (SSI_SSR_BASE(port) + 0x0028) ++ ++#define SSI_SSR_BUFFER_BASE(port) (SSI_SSR_BASE(port) + 0x0080) ++#define SSI_SSR_BUFFER_CH_REG(port, channel) (SSI_SSR_BUFFER_BASE(port) +\ ++ ((channel) * 4)) ++ ++#define SSI_SSR_SWAPBUF_BASE(port) (SSI_SSR_BASE(port) + 0x00c0) ++#define SSI_SSR_SWAPBUF_CH_REG(port, channel) (SSI_SSR_SWAPBUF_BASE +\ ++ ((channel) * 4)) ++/* ++ * SSI GDD registers ++ */ ++#define SSI_GDD_HW_ID_REG (SSI_GDD_BASE + 0x0000) ++#define SSI_GDD_PPORT_ID_REG (SSI_GDD_BASE + 0x0010) ++#define SSI_GDD_MPORT_ID_REG (SSI_GDD_BASE + 0x0014) ++ ++#define SSI_GDD_PPORT_SR_REG (SSI_GDD_BASE + 0x0020) ++# define SSI_PPORT_ACTIVE_LCH_NUMBER_MASK 0Xff ++ ++#define SSI_GDD_MPORT_SR_REG (SSI_GDD_BASE + 0x0024) ++# define SSI_MPORT_ACTIVE_LCH_NUMBER_MASK 0xff ++ ++#define SSI_GDD_TEST_REG (SSI_GDD_BASE + 0x0040) ++# define SSI_TEST 1 ++ ++#define SSI_GDD_GCR_REG (SSI_GDD_BASE + 0x0100) ++# define SSI_CLK_AUTOGATING_ON (1 << 3) ++# define SSI_FREE (1 << 2) ++# define SSI_SWITCH_OFF (1 << 0) ++ ++#define SSI_GDD_GRST_REG (SSI_GDD_BASE + 0x0200) ++# define SSI_SWRESET 1 ++ ++#define SSI_GDD_CSDP_BASE (SSI_GDD_BASE + 0x0800) ++#define SSI_GDD_CSDP_OFFSET 0x40 ++#define SSI_GDD_CSDP_REG(channel) (SSI_GDD_CSDP_BASE +\ ++ ((channel) * SSI_GDD_CSDP_OFFSET)) ++# define SSI_DST_BURST_EN_MASK 0xC000 ++# define SSI_DST_SINGLE_ACCESS0 0 ++# define SSI_DST_SINGLE_ACCESS (1 << 14) ++# define SSI_DST_BURST_4X32_BIT (2 << 14) ++# define SSI_DST_BURST_8x32_BIT (3 << 14) ++ ++# define SSI_DST_MASK 0x1e00 ++# define SSI_DST_MEMORY_PORT (8 << 9) ++# define SSI_DST_PERIPHERAL_PORT (9 << 9) ++ ++# define SSI_SRC_BURST_EN_MASK 0x0180 ++# define SSI_SRC_SINGLE_ACCESS0 0 ++# define SSI_SRC_SINGLE_ACCESS (1 << 7) ++# define SSI_SRC_BURST_4x32_BIT (2 << 7) ++# define SSI_SRC_BURST_8x32_BIT (3 << 7) ++ ++# define SSI_SRC_MASK 0x003c ++# define SSI_SRC_MEMORY_PORT (8 << 2) ++# define SSI_SRC_PERIPHERAL_PORT (9 << 2) ++ ++# define SSI_DATA_TYPE_MASK 3 ++# define SSI_DATA_TYPE_S32 2 ++ ++#define SSI_GDD_CCR_BASE (SSI_GDD_BASE + 0x0802) ++#define SSI_GDD_CCR_OFFSET 0x40 ++#define SSI_GDD_CCR_REG(channel) (SSI_GDD_CCR_BASE +\ ++ ((channel) * SSI_GDD_CCR_OFFSET)) ++# define SSI_DST_AMODE_MASK (3 << 14) ++# define SSI_DST_AMODE_CONST 0 ++# define SSI_DST_AMODE_POSTINC (1 << 12) ++ ++# define SSI_SRC_AMODE_MASK (3 << 12) ++# define SSI_SRC_AMODE_CONST 0 ++# define SSI_SRC_AMODE_POSTINC (1 << 12) ++ ++# define SSI_CCR_ENABLE (1 << 7) ++ ++# define SSI_CCR_SYNC_MASK 0X001f ++ ++#define SSI_GDD_CICR_BASE (SSI_GDD_BASE + 0x0804) ++#define SSI_GDD_CICR_OFFSET 0x40 ++#define SSI_GDD_CICR_REG(channel) (SSI_GDD_CICR_BASE +\ ++ ((channel) * SSI_GDD_CICR_OFFSET)) ++# define SSI_BLOCK_IE (1 << 5) ++# define SSI_HALF_IE (1 << 2) ++# define SSI_TOUT_IE (1 << 0) ++ ++#define SSI_GDD_CSR_BASE (SSI_GDD_BASE + 0x0806) ++#define SSI_GDD_CSR_OFFSET 0x40 ++#define SSI_GDD_CSR_REG(channel) (SSI_GDD_CSR_BASE +\ ++ ((channel) * SSI_GDD_CSR_OFFSET)) ++# define SSI_CSR_SYNC (1 << 6) ++# define SSI_CSR_BLOCK (1 << 5) ++# define SSI_CSR_HALF (1 << 2) ++# define SSI_CSR_TOUR (1 << 0) ++ ++#define SSI_GDD_CSSA_BASE (SSI_GDD_BASE + 0x0808) ++#define SSI_GDD_CSSA_OFFSET 0x40 ++#define SSI_GDD_CSSA_REG(channel) (SSI_GDD_CSSA_BASE +\ ++ ((channel) * SSI_GDD_CSSA_OFFSET)) ++ ++#define SSI_GDD_CDSA_BASE (SSI_GDD_BASE + 0x080c) ++#define SSI_GDD_CDSA_OFFSET 0x40 ++#define SSI_GDD_CDSA_REG(channel) (SSI_GDD_CDSA_BASE +\ ++ ((channel) * SSI_GDD_CDSA_OFFSET)) ++ ++#define SSI_GDD_CEN_BASE (SSI_GDD_BASE + 0x0810) ++#define SSI_GDD_CEN_OFFSET 0x40 ++#define SSI_GDD_CEN_REG(channel) (SSI_GDD_CEN_BASE +\ ++ ((channel) * SSI_GDD_CEN_OFFSET)) ++ ++#define SSI_GDD_CSAC_BASE (SSI_GDD_BASE + 0x0818) ++#define SSI_GDD_CSAC_OFFSET 0x40 ++#define SSI_GDD_CSAC_REG(channel) (SSI_GDD_CSAC_BASE +\ ++ ((channel) * SSI_GDD_CSAC_OFFSET)) ++ ++#define SSI_GDD_CDAC_BASE (SSI_GDD_BASE + 0x081a) ++#define SSI_GDD_CDAC_OFFSET 0x40 ++#define SSI_GDD_CDAC_REG(channel) (SSI_GDD_CDAC_BASE +\ ++ ((channel) * SSI_GDD_CDAC_OFFSET)) ++ ++#define SSI_GDD_CLNK_CTRL_BASE (SSI_GDD_BASE + 0x0828) ++#define SSI_GDD_CLNK_CTRL_OFFSET 0x40 ++#define SSI_GDD_CLNK_CTRL_REG(channel) (SSI_GDD_CLNK_CTRL_BASE +\ ++ ((channel) * SSI_GDD_CLNK_CTRL_OFFSET)) ++# define SSI_ENABLE_LNK (1 << 15) ++# define SSI_STOP_LNK (1 << 14) ++# define NEXT_CH_ID_MASK 0xf ++ ++/** ++ * struct omap_ssi_config - SSI board configuration ++ * @num_ports: Number of ports in use ++ * @cawake_line: Array of cawake gpio lines ++ */ ++struct omap_ssi_board_config { ++ unsigned int num_ports; ++ int cawake_gpio[2]; ++}; ++ ++extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config); ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/usb.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/usb.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/usb.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/usb.h 2011-09-04 11:31:05.000000000 +0200 +@@ -27,7 +27,8 @@ + #define UDC_BASE OMAP2_UDC_BASE + #define OMAP_OHCI_BASE OMAP2_OHCI_BASE + +-void __init usb_musb_init(void); ++struct musb_board_data; ++void __init usb_musb_init(struct musb_board_data *); + void __init usb_ehci_init(void); + + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/usb-musb.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/usb-musb.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/usb-musb.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/usb-musb.h 2011-09-04 11:31:05.000000000 +0200 +@@ -29,7 +29,8 @@ + #ifndef __ASM_ARCH_OMAP_USB_MUSB_H + #define __ASM_ARCH_OMAP_USB_MUSB_H + +-extern void usb_musb_init(void); ++struct musb_board_data; ++extern void usb_musb_init(struct musb_board_data *); + + #endif /* __ASM_ARCH_OMAP_USB_MUSB_H */ + +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/vram.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/vram.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/vram.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/vram.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * File: arch/arm/plat-omap/include/mach/vram.h ++ * ++ * Copyright (C) 2009 Nokia Corporation ++ * Author: Tomi Valkeinen ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ */ ++ ++#ifndef __OMAPVRAM_H ++#define __OMAPVRAM_H ++ ++#include ++ ++extern int omap_vram_add_region(unsigned long paddr, size_t size); ++extern int omap_vram_free(unsigned long paddr, size_t size); ++extern int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr); ++extern int omap_vram_reserve(unsigned long paddr, size_t size); ++extern void omap_vram_get_info(unsigned long *vram, unsigned long *free_vram, ++ unsigned long *largest_free_block); ++extern void omap2_set_sdram_vram(u32 size, u32 start); ++extern void omap2_set_sram_vram(u32 size, u32 start); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/vrfb.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/vrfb.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/include/mach/vrfb.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/include/mach/vrfb.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,56 @@ ++/* ++ * File: arch/arm/plat-omap/include/mach/vrfb.h ++ * ++ * VRFB ++ * ++ * Copyright (C) 2009 Nokia Corporation ++ * Author: Tomi Valkeinen ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ */ ++ ++#ifndef __VRFB_H ++#define __VRFB_H ++ ++#include ++#define OMAP_VRFB_LINE_LEN 2048 ++ ++struct vrfb ++{ ++ u8 context; ++ void __iomem *vaddr[4]; ++ unsigned long paddr[4]; ++ u16 xres; ++ u16 yres; ++ u16 xoffset; ++ u16 yoffset; ++ u8 bytespp; ++}; ++ ++extern int omap_vrfb_request_ctx(struct vrfb *vrfb); ++extern void omap_vrfb_release_ctx(struct vrfb *vrfb); ++extern void omap_vrfb_suspend_ctx(struct vrfb *vrfb); ++extern void omap_vrfb_resume_ctx(struct vrfb *vrfb); ++extern void omap_vrfb_adjust_size(u16 *width, u16 *height, ++ u8 bytespp); ++extern u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp); ++extern u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp); ++extern void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr, ++ u16 width, u16 height, ++ enum omap_color_mode color_mode); ++extern int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot); ++extern void omap_vrfb_restore_context(void); ++ ++#endif /* __VRFB_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/iommu.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iommu.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/iommu.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iommu.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,998 @@ ++/* ++ * omap iommu: tlb and pagetable primitives ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU , ++ * Paul Mundt and Toshihiro Kobayashi ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++ ++#include "iopgtable.h" ++ ++/* accommodate the difference between omap1 and omap2/3 */ ++static const struct iommu_functions *arch_iommu; ++ ++static struct platform_driver omap_iommu_driver; ++static struct kmem_cache *iopte_cachep; ++ ++/** ++ * install_iommu_arch - Install archtecure specific iommu functions ++ * @ops: a pointer to architecture specific iommu functions ++ * ++ * There are several kind of iommu algorithm(tlb, pagetable) among ++ * omap series. This interface installs such an iommu algorighm. ++ **/ ++int install_iommu_arch(const struct iommu_functions *ops) ++{ ++ if (arch_iommu) ++ return -EBUSY; ++ ++ arch_iommu = ops; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(install_iommu_arch); ++ ++/** ++ * uninstall_iommu_arch - Uninstall archtecure specific iommu functions ++ * @ops: a pointer to architecture specific iommu functions ++ * ++ * This interface uninstalls the iommu algorighm installed previously. ++ **/ ++void uninstall_iommu_arch(const struct iommu_functions *ops) ++{ ++ if (arch_iommu != ops) ++ pr_err("%s: not your arch\n", __func__); ++ ++ arch_iommu = NULL; ++} ++EXPORT_SYMBOL_GPL(uninstall_iommu_arch); ++ ++/** ++ * iommu_save_ctx - Save registers for pm off-mode support ++ * @obj: target iommu ++ **/ ++void iommu_save_ctx(struct iommu *obj) ++{ ++ arch_iommu->save_ctx(obj); ++} ++EXPORT_SYMBOL_GPL(iommu_save_ctx); ++ ++/** ++ * iommu_restore_ctx - Restore registers for pm off-mode support ++ * @obj: target iommu ++ **/ ++void iommu_restore_ctx(struct iommu *obj) ++{ ++ arch_iommu->restore_ctx(obj); ++} ++EXPORT_SYMBOL_GPL(iommu_restore_ctx); ++ ++/** ++ * iommu_arch_version - Return running iommu arch version ++ **/ ++u32 iommu_arch_version(void) ++{ ++ return arch_iommu->version; ++} ++EXPORT_SYMBOL_GPL(iommu_arch_version); ++ ++static int iommu_enable(struct iommu *obj) ++{ ++ int err; ++ ++ if (!obj) ++ return -EINVAL; ++ ++ clk_enable(obj->clk); ++ ++ err = arch_iommu->enable(obj); ++ ++ clk_disable(obj->clk); ++ return err; ++} ++ ++static void iommu_disable(struct iommu *obj) ++{ ++ if (!obj) ++ return; ++ ++ clk_enable(obj->clk); ++ ++ arch_iommu->disable(obj); ++ ++ clk_disable(obj->clk); ++} ++ ++/* ++ * TLB operations ++ */ ++void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) ++{ ++ BUG_ON(!cr || !e); ++ ++ arch_iommu->cr_to_e(cr, e); ++} ++EXPORT_SYMBOL_GPL(iotlb_cr_to_e); ++ ++static inline int iotlb_cr_valid(struct cr_regs *cr) ++{ ++ if (!cr) ++ return -EINVAL; ++ ++ return arch_iommu->cr_valid(cr); ++} ++ ++static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, ++ struct iotlb_entry *e) ++{ ++ if (!e) ++ return NULL; ++ ++ return arch_iommu->alloc_cr(obj, e); ++} ++ ++u32 iotlb_cr_to_virt(struct cr_regs *cr) ++{ ++ return arch_iommu->cr_to_virt(cr); ++} ++EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); ++ ++static u32 get_iopte_attr(struct iotlb_entry *e) ++{ ++ return arch_iommu->get_pte_attr(e); ++} ++ ++static u32 iommu_report_fault(struct iommu *obj, u32 *da) ++{ ++ return arch_iommu->fault_isr(obj, da); ++} ++ ++static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) ++{ ++ u32 val; ++ ++ val = iommu_read_reg(obj, MMU_LOCK); ++ ++ l->base = MMU_LOCK_BASE(val); ++ l->vict = MMU_LOCK_VICT(val); ++ ++ BUG_ON(l->base != 0); /* Currently no preservation is used */ ++} ++ ++static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) ++{ ++ u32 val; ++ ++ BUG_ON(l->base != 0); /* Currently no preservation is used */ ++ ++ val = (l->base << MMU_LOCK_BASE_SHIFT); ++ val |= (l->vict << MMU_LOCK_VICT_SHIFT); ++ ++ iommu_write_reg(obj, val, MMU_LOCK); ++} ++ ++static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) ++{ ++ arch_iommu->tlb_read_cr(obj, cr); ++} ++ ++static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) ++{ ++ arch_iommu->tlb_load_cr(obj, cr); ++ ++ iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); ++ iommu_write_reg(obj, 1, MMU_LD_TLB); ++} ++ ++/** ++ * iotlb_dump_cr - Dump an iommu tlb entry into buf ++ * @obj: target iommu ++ * @cr: contents of cam and ram register ++ * @buf: output buffer ++ **/ ++static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, ++ char *buf) ++{ ++ BUG_ON(!cr || !buf); ++ ++ return arch_iommu->dump_cr(obj, cr, buf); ++} ++ ++/** ++ * load_iotlb_entry - Set an iommu tlb entry ++ * @obj: target iommu ++ * @e: an iommu tlb entry info ++ **/ ++int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) ++{ ++ int i; ++ int err = 0; ++ struct iotlb_lock l; ++ struct cr_regs *cr; ++ ++ if (!obj || !obj->nr_tlb_entries || !e) ++ return -EINVAL; ++ ++ clk_enable(obj->clk); ++ ++ for (i = 0; i < obj->nr_tlb_entries; i++) { ++ struct cr_regs tmp; ++ ++ iotlb_lock_get(obj, &l); ++ l.vict = i; ++ iotlb_lock_set(obj, &l); ++ iotlb_read_cr(obj, &tmp); ++ if (!iotlb_cr_valid(&tmp)) ++ break; ++ } ++ ++ if (i == obj->nr_tlb_entries) { ++ dev_dbg(obj->dev, "%s: full: no entry\n", __func__); ++ err = -EBUSY; ++ goto out; ++ } ++ ++ cr = iotlb_alloc_cr(obj, e); ++ if (IS_ERR(cr)) { ++ clk_disable(obj->clk); ++ return PTR_ERR(cr); ++ } ++ ++ iotlb_load_cr(obj, cr); ++ kfree(cr); ++ ++ /* increment victim for next tlb load */ ++ if (++l.vict == obj->nr_tlb_entries) ++ l.vict = 0; ++ iotlb_lock_set(obj, &l); ++out: ++ clk_disable(obj->clk); ++ return err; ++} ++EXPORT_SYMBOL_GPL(load_iotlb_entry); ++ ++/** ++ * flush_iotlb_page - Clear an iommu tlb entry ++ * @obj: target iommu ++ * @da: iommu device virtual address ++ * ++ * Clear an iommu tlb entry which includes 'da' address. ++ **/ ++void flush_iotlb_page(struct iommu *obj, u32 da) ++{ ++ struct iotlb_lock l; ++ int i; ++ ++ clk_enable(obj->clk); ++ ++ for (i = 0; i < obj->nr_tlb_entries; i++) { ++ struct cr_regs cr; ++ u32 start; ++ size_t bytes; ++ ++ iotlb_lock_get(obj, &l); ++ l.vict = i; ++ iotlb_lock_set(obj, &l); ++ iotlb_read_cr(obj, &cr); ++ if (!iotlb_cr_valid(&cr)) ++ continue; ++ ++ start = iotlb_cr_to_virt(&cr); ++ bytes = iopgsz_to_bytes(cr.cam & 3); ++ ++ if ((start <= da) && (da < start + bytes)) { ++ dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", ++ __func__, start, da, bytes); ++ iotlb_load_cr(obj, &cr); ++ iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); ++ } ++ } ++ clk_disable(obj->clk); ++ ++ if (i == obj->nr_tlb_entries) ++ dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); ++} ++EXPORT_SYMBOL_GPL(flush_iotlb_page); ++ ++/** ++ * flush_iotlb_range - Clear an iommu tlb entries ++ * @obj: target iommu ++ * @start: iommu device virtual address(start) ++ * @end: iommu device virtual address(end) ++ * ++ * Clear an iommu tlb entry which includes 'da' address. ++ **/ ++void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) ++{ ++ u32 da = start; ++ ++ while (da < end) { ++ flush_iotlb_page(obj, da); ++ /* FIXME: Optimize for multiple page size */ ++ da += IOPTE_SIZE; ++ } ++} ++EXPORT_SYMBOL_GPL(flush_iotlb_range); ++ ++/** ++ * flush_iotlb_all - Clear all iommu tlb entries ++ * @obj: target iommu ++ **/ ++void flush_iotlb_all(struct iommu *obj) ++{ ++ struct iotlb_lock l; ++ ++ clk_enable(obj->clk); ++ ++ l.base = 0; ++ l.vict = 0; ++ iotlb_lock_set(obj, &l); ++ ++ iommu_write_reg(obj, 1, MMU_GFLUSH); ++ ++ clk_disable(obj->clk); ++} ++EXPORT_SYMBOL_GPL(flush_iotlb_all); ++ ++#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) ++ ++ssize_t iommu_dump_ctx(struct iommu *obj, char *buf) ++{ ++ ssize_t bytes; ++ ++ if (!obj || !buf) ++ return -EINVAL; ++ ++ clk_enable(obj->clk); ++ ++ bytes = arch_iommu->dump_ctx(obj, buf); ++ ++ clk_disable(obj->clk); ++ ++ return bytes; ++} ++EXPORT_SYMBOL_GPL(iommu_dump_ctx); ++ ++static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs) ++{ ++ int i; ++ struct iotlb_lock saved, l; ++ struct cr_regs *p = crs; ++ ++ clk_enable(obj->clk); ++ ++ iotlb_lock_get(obj, &saved); ++ memcpy(&l, &saved, sizeof(saved)); ++ ++ for (i = 0; i < obj->nr_tlb_entries; i++) { ++ struct cr_regs tmp; ++ ++ iotlb_lock_get(obj, &l); ++ l.vict = i; ++ iotlb_lock_set(obj, &l); ++ iotlb_read_cr(obj, &tmp); ++ if (!iotlb_cr_valid(&tmp)) ++ continue; ++ ++ *p++ = tmp; ++ } ++ iotlb_lock_set(obj, &saved); ++ clk_disable(obj->clk); ++ ++ return p - crs; ++} ++ ++/** ++ * dump_tlb_entries - dump cr arrays to given buffer ++ * @obj: target iommu ++ * @buf: output buffer ++ **/ ++size_t dump_tlb_entries(struct iommu *obj, char *buf) ++{ ++ int i, n; ++ struct cr_regs *cr; ++ char *p = buf; ++ ++ cr = kcalloc(obj->nr_tlb_entries, sizeof(*cr), GFP_KERNEL); ++ if (!cr) ++ return 0; ++ ++ n = __dump_tlb_entries(obj, cr); ++ for (i = 0; i < n; i++) ++ p += iotlb_dump_cr(obj, cr + i, p); ++ kfree(cr); ++ ++ return p - buf; ++} ++EXPORT_SYMBOL_GPL(dump_tlb_entries); ++ ++int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) ++{ ++ return driver_for_each_device(&omap_iommu_driver.driver, ++ NULL, data, fn); ++} ++EXPORT_SYMBOL_GPL(foreach_iommu_device); ++ ++#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ ++ ++/* ++ * H/W pagetable operations ++ */ ++static void flush_iopgd_range(u32 *first, u32 *last) ++{ ++ /* FIXME: L2 cache should be taken care of if it exists */ ++ do { ++ asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" ++ : : "r" (first)); ++ first += L1_CACHE_BYTES / sizeof(*first); ++ } while (first <= last); ++} ++ ++static void flush_iopte_range(u32 *first, u32 *last) ++{ ++ /* FIXME: L2 cache should be taken care of if it exists */ ++ do { ++ asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" ++ : : "r" (first)); ++ first += L1_CACHE_BYTES / sizeof(*first); ++ } while (first <= last); ++} ++ ++static void iopte_free(u32 *iopte) ++{ ++ /* Note: freed iopte's must be clean ready for re-use */ ++ kmem_cache_free(iopte_cachep, iopte); ++} ++ ++static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) ++{ ++ u32 *iopte; ++ ++ /* a table has already existed */ ++ if (*iopgd) ++ goto pte_ready; ++ ++ /* ++ * do the allocation outside the page table lock ++ */ ++ spin_unlock(&obj->page_table_lock); ++ iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); ++ spin_lock(&obj->page_table_lock); ++ ++ if (!*iopgd) { ++ if (!iopte) ++ return ERR_PTR(-ENOMEM); ++ ++ *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; ++ flush_iopgd_range(iopgd, iopgd); ++ ++ dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); ++ } else { ++ /* We raced, free the reduniovant table */ ++ iopte_free(iopte); ++ } ++ ++pte_ready: ++ iopte = iopte_offset(iopgd, da); ++ ++ dev_vdbg(obj->dev, ++ "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", ++ __func__, da, iopgd, *iopgd, iopte, *iopte); ++ ++ return iopte; ++} ++ ++static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) ++{ ++ u32 *iopgd = iopgd_offset(obj, da); ++ ++ *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; ++ flush_iopgd_range(iopgd, iopgd); ++ return 0; ++} ++ ++static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) ++{ ++ u32 *iopgd = iopgd_offset(obj, da); ++ int i; ++ ++ for (i = 0; i < 16; i++) ++ *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; ++ flush_iopgd_range(iopgd, iopgd + 15); ++ return 0; ++} ++ ++static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) ++{ ++ u32 *iopgd = iopgd_offset(obj, da); ++ u32 *iopte = iopte_alloc(obj, iopgd, da); ++ ++ if (IS_ERR(iopte)) ++ return PTR_ERR(iopte); ++ ++ *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; ++ flush_iopte_range(iopte, iopte); ++ ++ dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", ++ __func__, da, pa, iopte, *iopte); ++ ++ return 0; ++} ++ ++static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) ++{ ++ u32 *iopgd = iopgd_offset(obj, da); ++ u32 *iopte = iopte_alloc(obj, iopgd, da); ++ int i; ++ ++ if (IS_ERR(iopte)) ++ return PTR_ERR(iopte); ++ ++ for (i = 0; i < 16; i++) ++ *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; ++ flush_iopte_range(iopte, iopte + 15); ++ return 0; ++} ++ ++static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) ++{ ++ int (*fn)(struct iommu *, u32, u32, u32); ++ u32 prot; ++ int err; ++ ++ if (!obj || !e) ++ return -EINVAL; ++ ++ switch (e->pgsz) { ++ case MMU_CAM_PGSZ_16M: ++ fn = iopgd_alloc_super; ++ break; ++ case MMU_CAM_PGSZ_1M: ++ fn = iopgd_alloc_section; ++ break; ++ case MMU_CAM_PGSZ_64K: ++ fn = iopte_alloc_large; ++ break; ++ case MMU_CAM_PGSZ_4K: ++ fn = iopte_alloc_page; ++ break; ++ default: ++ fn = NULL; ++ BUG(); ++ break; ++ } ++ ++ prot = get_iopte_attr(e); ++ ++ spin_lock(&obj->page_table_lock); ++ err = fn(obj, e->da, e->pa, prot); ++ spin_unlock(&obj->page_table_lock); ++ ++ return err; ++} ++ ++/** ++ * iopgtable_store_entry - Make an iommu pte entry ++ * @obj: target iommu ++ * @e: an iommu tlb entry info ++ **/ ++int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) ++{ ++ int err; ++ ++ flush_iotlb_page(obj, e->da); ++ err = iopgtable_store_entry_core(obj, e); ++#ifdef PREFETCH_IOTLB ++ if (!err) ++ load_iotlb_entry(obj, e); ++#endif ++ return err; ++} ++EXPORT_SYMBOL_GPL(iopgtable_store_entry); ++ ++/** ++ * iopgtable_lookup_entry - Lookup an iommu pte entry ++ * @obj: target iommu ++ * @da: iommu device virtual address ++ * @ppgd: iommu pgd entry pointer to be returned ++ * @ppte: iommu pte entry pointer to be returned ++ **/ ++void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) ++{ ++ u32 *iopgd, *iopte = NULL; ++ ++ iopgd = iopgd_offset(obj, da); ++ if (!*iopgd) ++ goto out; ++ ++ if (*iopgd & IOPGD_TABLE) ++ iopte = iopte_offset(iopgd, da); ++out: ++ *ppgd = iopgd; ++ *ppte = iopte; ++} ++EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); ++ ++static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) ++{ ++ size_t bytes; ++ u32 *iopgd = iopgd_offset(obj, da); ++ int nent = 1; ++ ++ if (!*iopgd) ++ return 0; ++ ++ if (*iopgd & IOPGD_TABLE) { ++ int i; ++ u32 *iopte = iopte_offset(iopgd, da); ++ ++ bytes = IOPTE_SIZE; ++ if (*iopte & IOPTE_LARGE) { ++ nent *= 16; ++ /* rewind to the 1st entry */ ++ iopte = (u32 *)((u32)iopte & IOLARGE_MASK); ++ } ++ bytes *= nent; ++ memset(iopte, 0, nent * sizeof(*iopte)); ++ flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); ++ ++ /* ++ * do table walk to check if this table is necessary or not ++ */ ++ iopte = iopte_offset(iopgd, 0); ++ for (i = 0; i < PTRS_PER_IOPTE; i++) ++ if (iopte[i]) ++ goto out; ++ ++ iopte_free(iopte); ++ nent = 1; /* for the next L1 entry */ ++ } else { ++ bytes = IOPGD_SIZE; ++ if (*iopgd & IOPGD_SUPER) { ++ nent *= 16; ++ /* rewind to the 1st entry */ ++ iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK); ++ } ++ bytes *= nent; ++ } ++ memset(iopgd, 0, nent * sizeof(*iopgd)); ++ flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); ++out: ++ return bytes; ++} ++ ++/** ++ * iopgtable_clear_entry - Remove an iommu pte entry ++ * @obj: target iommu ++ * @da: iommu device virtual address ++ **/ ++size_t iopgtable_clear_entry(struct iommu *obj, u32 da) ++{ ++ size_t bytes; ++ ++ spin_lock(&obj->page_table_lock); ++ ++ bytes = iopgtable_clear_entry_core(obj, da); ++ flush_iotlb_page(obj, da); ++ ++ spin_unlock(&obj->page_table_lock); ++ ++ return bytes; ++} ++EXPORT_SYMBOL_GPL(iopgtable_clear_entry); ++ ++static void iopgtable_clear_entry_all(struct iommu *obj) ++{ ++ int i; ++ ++ spin_lock(&obj->page_table_lock); ++ ++ for (i = 0; i < PTRS_PER_IOPGD; i++) { ++ u32 da; ++ u32 *iopgd; ++ ++ da = i << IOPGD_SHIFT; ++ iopgd = iopgd_offset(obj, da); ++ ++ if (!*iopgd) ++ continue; ++ ++ if (*iopgd & IOPGD_TABLE) ++ iopte_free(iopte_offset(iopgd, 0)); ++ ++ *iopgd = 0; ++ flush_iopgd_range(iopgd, iopgd); ++ } ++ ++ flush_iotlb_all(obj); ++ ++ spin_unlock(&obj->page_table_lock); ++} ++ ++/* ++ * Device IOMMU generic operations ++ */ ++static irqreturn_t iommu_fault_handler(int irq, void *data) ++{ ++ u32 stat, da; ++ u32 *iopgd, *iopte; ++ int err = -EIO; ++ struct iommu *obj = data; ++ ++ if (!obj->refcount) ++ return IRQ_NONE; ++ ++ /* Dynamic loading TLB or PTE */ ++ if (obj->isr) ++ err = obj->isr(obj); ++ ++ if (!err) ++ return IRQ_HANDLED; ++ ++ clk_enable(obj->clk); ++ stat = iommu_report_fault(obj, &da); ++ clk_disable(obj->clk); ++ if (!stat) ++ return IRQ_HANDLED; ++ ++ iopgd = iopgd_offset(obj, da); ++ ++ if (!(*iopgd & IOPGD_TABLE)) { ++ dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__, ++ da, iopgd, *iopgd); ++ return IRQ_NONE; ++ } ++ ++ iopte = iopte_offset(iopgd, da); ++ ++ dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", ++ __func__, da, iopgd, *iopgd, iopte, *iopte); ++ ++ return IRQ_NONE; ++} ++ ++static int device_match_by_alias(struct device *dev, void *data) ++{ ++ struct iommu *obj = to_iommu(dev); ++ const char *name = data; ++ ++ pr_debug("%s: %s %s\n", __func__, obj->name, name); ++ ++ return strcmp(obj->name, name) == 0; ++} ++ ++/** ++ * iommu_get - Get iommu handler ++ * @name: target iommu name ++ **/ ++struct iommu *iommu_get(const char *name) ++{ ++ int err = -ENOMEM; ++ struct device *dev; ++ struct iommu *obj; ++ ++ dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, ++ device_match_by_alias); ++ if (!dev) ++ return ERR_PTR(-ENODEV); ++ ++ obj = to_iommu(dev); ++ ++ mutex_lock(&obj->iommu_lock); ++ ++ if (obj->refcount++ == 0) { ++ err = iommu_enable(obj); ++ if (err) ++ goto err_enable; ++ flush_iotlb_all(obj); ++ } ++ ++ if (!try_module_get(obj->owner)) ++ goto err_module; ++ ++ mutex_unlock(&obj->iommu_lock); ++ ++ dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); ++ return obj; ++ ++err_module: ++ if (obj->refcount == 1) ++ iommu_disable(obj); ++err_enable: ++ obj->refcount--; ++ mutex_unlock(&obj->iommu_lock); ++ return ERR_PTR(err); ++} ++EXPORT_SYMBOL_GPL(iommu_get); ++ ++/** ++ * iommu_put - Put back iommu handler ++ * @obj: target iommu ++ **/ ++void iommu_put(struct iommu *obj) ++{ ++ if (!obj && IS_ERR(obj)) ++ return; ++ ++ mutex_lock(&obj->iommu_lock); ++ ++ if (--obj->refcount == 0) ++ iommu_disable(obj); ++ ++ module_put(obj->owner); ++ ++ mutex_unlock(&obj->iommu_lock); ++ ++ dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); ++} ++EXPORT_SYMBOL_GPL(iommu_put); ++ ++/* ++ * OMAP Device MMU(IOMMU) detection ++ */ ++static int __devinit omap_iommu_probe(struct platform_device *pdev) ++{ ++ int err = -ENODEV; ++ void *p; ++ int irq; ++ struct iommu *obj; ++ struct resource *res; ++ struct iommu_platform_data *pdata = pdev->dev.platform_data; ++ ++ if (pdev->num_resources != 2) ++ return -EINVAL; ++ ++ obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); ++ if (!obj) ++ return -ENOMEM; ++ ++ obj->clk = clk_get(&pdev->dev, pdata->clk_name); ++ if (IS_ERR(obj->clk)) ++ goto err_clk; ++ ++ obj->nr_tlb_entries = pdata->nr_tlb_entries; ++ obj->name = pdata->name; ++ obj->dev = &pdev->dev; ++ obj->ctx = (void *)obj + sizeof(*obj); ++ ++ mutex_init(&obj->iommu_lock); ++ mutex_init(&obj->mmap_lock); ++ spin_lock_init(&obj->page_table_lock); ++ INIT_LIST_HEAD(&obj->mmap); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ err = -ENODEV; ++ goto err_mem; ++ } ++ obj->regbase = ioremap(res->start, resource_size(res)); ++ if (!obj->regbase) { ++ err = -ENOMEM; ++ goto err_mem; ++ } ++ ++ res = request_mem_region(res->start, resource_size(res), ++ dev_name(&pdev->dev)); ++ if (!res) { ++ err = -EIO; ++ goto err_mem; ++ } ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ err = -ENODEV; ++ goto err_irq; ++ } ++ err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, ++ dev_name(&pdev->dev), obj); ++ if (err < 0) ++ goto err_irq; ++ platform_set_drvdata(pdev, obj); ++ ++ p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); ++ if (!p) { ++ err = -ENOMEM; ++ goto err_pgd; ++ } ++ memset(p, 0, IOPGD_TABLE_SIZE); ++ clean_dcache_area(p, IOPGD_TABLE_SIZE); ++ obj->iopgd = p; ++ ++ BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); ++ ++ dev_info(&pdev->dev, "%s registered\n", obj->name); ++ return 0; ++ ++err_pgd: ++ free_irq(irq, obj); ++err_irq: ++ release_mem_region(res->start, resource_size(res)); ++ iounmap(obj->regbase); ++err_mem: ++ clk_put(obj->clk); ++err_clk: ++ kfree(obj); ++ return err; ++} ++ ++static int __devexit omap_iommu_remove(struct platform_device *pdev) ++{ ++ int irq; ++ struct resource *res; ++ struct iommu *obj = platform_get_drvdata(pdev); ++ ++ platform_set_drvdata(pdev, NULL); ++ ++ iopgtable_clear_entry_all(obj); ++ free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); ++ ++ irq = platform_get_irq(pdev, 0); ++ free_irq(irq, obj); ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ release_mem_region(res->start, resource_size(res)); ++ iounmap(obj->regbase); ++ ++ clk_put(obj->clk); ++ dev_info(&pdev->dev, "%s removed\n", obj->name); ++ kfree(obj); ++ return 0; ++} ++ ++static struct platform_driver omap_iommu_driver = { ++ .probe = omap_iommu_probe, ++ .remove = __devexit_p(omap_iommu_remove), ++ .driver = { ++ .name = "omap-iommu", ++ }, ++}; ++ ++static void iopte_cachep_ctor(void *iopte) ++{ ++ clean_dcache_area(iopte, IOPTE_TABLE_SIZE); ++} ++ ++static int __init omap_iommu_init(void) ++{ ++ struct kmem_cache *p; ++ const unsigned long flags = SLAB_HWCACHE_ALIGN; ++ size_t align = 1 << 10; /* L2 pagetable alignement */ ++ ++ p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, ++ iopte_cachep_ctor); ++ if (!p) ++ return -ENOMEM; ++ iopte_cachep = p; ++ ++ return platform_driver_register(&omap_iommu_driver); ++} ++module_init(omap_iommu_init); ++ ++static void __exit omap_iommu_exit(void) ++{ ++ kmem_cache_destroy(iopte_cachep); ++ ++ platform_driver_unregister(&omap_iommu_driver); ++} ++module_exit(omap_iommu_exit); ++ ++MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); ++MODULE_ALIAS("platform:omap-iommu"); ++MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); ++MODULE_LICENSE("GPL v2"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/iommu-debug.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iommu-debug.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/iommu-debug.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iommu-debug.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,334 @@ ++/* ++ * omap iommu: debugfs interface ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "iopgtable.h" ++ ++#define MAXCOLUMN 100 /* for short messages */ ++ ++static DEFINE_MUTEX(iommu_debug_lock); ++static char local_buffer[SZ_4K]; ++ ++static struct dentry *iommu_debug_root; ++ ++static ssize_t debug_read_ver(struct file *file, char __user *userbuf, ++ size_t count, loff_t *ppos) ++{ ++ u32 ver = iommu_arch_version(); ++ char buf[MAXCOLUMN], *p = buf; ++ ++ p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); ++ ++ return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); ++} ++ ++static ssize_t debug_read_regs(struct file *file, char __user *userbuf, ++ size_t count, loff_t *ppos) ++{ ++ struct iommu *obj = file->private_data; ++ char *p = local_buffer; ++ ssize_t bytes; ++ ++ mutex_lock(&iommu_debug_lock); ++ p += iommu_dump_ctx(obj, p); ++ bytes = simple_read_from_buffer(userbuf, count, ppos, local_buffer, ++ p - local_buffer); ++ mutex_unlock(&iommu_debug_lock); ++ return bytes; ++} ++ ++static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, ++ size_t count, loff_t *ppos) ++{ ++ struct iommu *obj = file->private_data; ++ char *p = local_buffer; ++ ssize_t bytes; ++ ++ mutex_lock(&iommu_debug_lock); ++ p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); ++ p += sprintf(p, "-----------------------------------------\n"); ++ p += dump_tlb_entries(obj, p); ++ bytes = simple_read_from_buffer(userbuf, count, ppos, local_buffer, ++ p - local_buffer); ++ mutex_unlock(&iommu_debug_lock); ++ return bytes; ++} ++ ++static ssize_t debug_write_pagetable(struct file *file, ++ const char __user *userbuf, size_t count, loff_t *ppos) ++{ ++ struct iotlb_entry e; ++ struct cr_regs cr; ++ int err; ++ struct iommu *obj = file->private_data; ++ char buf[MAXCOLUMN], *p = buf; ++ ++ count = min(count, sizeof(buf)); ++ ++ mutex_lock(&iommu_debug_lock); ++ if (copy_from_user(p, userbuf, count)) { ++ mutex_unlock(&iommu_debug_lock); ++ return -EFAULT; ++ } ++ ++ sscanf(p, "%x %x", &cr.cam, &cr.ram); ++ if (!cr.cam || !cr.ram) { ++ mutex_unlock(&iommu_debug_lock); ++ return -EINVAL; ++ } ++ ++ iotlb_cr_to_e(&cr, &e); ++ err = iopgtable_store_entry(obj, &e); ++ if (err) ++ dev_err(obj->dev, "%s: fail to store cr\n", __func__); ++ ++ mutex_unlock(&iommu_debug_lock); ++ return count; ++} ++ ++static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, ++ size_t count, loff_t *ppos) ++{ ++ int i; ++ u32 *iopgd; ++ struct iommu *obj = file->private_data; ++ char *p = local_buffer; ++ ssize_t bytes; ++ ++ mutex_lock(&iommu_debug_lock); ++ ++ p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); ++ p += sprintf(p, "-----------------------------------------\n"); ++ ++ spin_lock(&obj->page_table_lock); ++ ++ iopgd = iopgd_offset(obj, 0); ++ for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { ++ int j; ++ u32 *iopte; ++ ++ if (!*iopgd) ++ continue; ++ ++ if (!(*iopgd & IOPGD_TABLE)) { ++ u32 da; ++ ++ da = i << IOPGD_SHIFT; ++ p += sprintf(p, "1: %08x %08x\n", da, *iopgd); ++ continue; ++ } ++ ++ iopte = iopte_offset(iopgd, 0); ++ ++ for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { ++ u32 da; ++ ++ if (!*iopte) ++ continue; ++ ++ da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); ++ p += sprintf(p, "2: %08x %08x\n", da, *iopte); ++ } ++ } ++ spin_unlock(&obj->page_table_lock); ++ ++ bytes = simple_read_from_buffer(userbuf, count, ppos, local_buffer, ++ p - local_buffer); ++ mutex_unlock(&iommu_debug_lock); ++ return bytes; ++} ++ ++static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, ++ size_t count, loff_t *ppos) ++{ ++ struct iommu *obj = file->private_data; ++ char *p = local_buffer; ++ struct iovm_struct *tmp; ++ int uninitialized_var(i); ++ ssize_t bytes; ++ ++ mutex_lock(&iommu_debug_lock); ++ ++ p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n", ++ "No", "start", "end", "size", "flags"); ++ p += sprintf(p, "-------------------------------------------------\n"); ++ ++ list_for_each_entry(tmp, &obj->mmap, list) { ++ size_t len; ++ ++ len = tmp->da_end - tmp->da_start; ++ p += sprintf(p, "%3d %08x-%08x %6x %8x\n", ++ i, tmp->da_start, tmp->da_end, len, tmp->flags); ++ i++; ++ } ++ bytes = simple_read_from_buffer(userbuf, count, ppos, local_buffer, ++ p - local_buffer); ++ mutex_unlock(&iommu_debug_lock); ++ return bytes; ++} ++ ++static ssize_t debug_read_mem(struct file *file, char __user *userbuf, ++ size_t count, loff_t *ppos) ++{ ++ struct iommu *obj = file->private_data; ++ char *p = local_buffer; ++ struct iovm_struct *area; ++ ssize_t bytes; ++ ++ mutex_lock(&iommu_debug_lock); ++ ++ area = find_iovm_area(obj, (u32)ppos); ++ if (IS_ERR(area)) { ++ mutex_unlock(&iommu_debug_lock); ++ return -EINVAL; ++ } ++ memcpy(p, area->va, count); ++ p += count; ++ ++ bytes = simple_read_from_buffer(userbuf, count, ppos, local_buffer, ++ p - local_buffer); ++ mutex_unlock(&iommu_debug_lock); ++ return bytes; ++} ++ ++static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, ++ size_t count, loff_t *ppos) ++{ ++ struct iommu *obj = file->private_data; ++ struct iovm_struct *area; ++ char *p = local_buffer; ++ ++ count = min(count, sizeof(local_buffer)); ++ ++ mutex_lock(&iommu_debug_lock); ++ ++ if (copy_from_user(p, userbuf, count)) { ++ mutex_unlock(&iommu_debug_lock); ++ return -EFAULT; ++ } ++ ++ area = find_iovm_area(obj, (u32)ppos); ++ if (IS_ERR(area)) { ++ mutex_unlock(&iommu_debug_lock); ++ return -EINVAL; ++ } ++ memcpy(area->va, p, count); ++ mutex_unlock(&iommu_debug_lock); ++ return count; ++} ++ ++static int debug_open_generic(struct inode *inode, struct file *file) ++{ ++ file->private_data = inode->i_private; ++ return 0; ++} ++ ++#define DEBUG_FOPS(name) \ ++ static const struct file_operations debug_##name##_fops = { \ ++ .open = debug_open_generic, \ ++ .read = debug_read_##name, \ ++ .write = debug_write_##name, \ ++ }; ++ ++#define DEBUG_FOPS_RO(name) \ ++ static const struct file_operations debug_##name##_fops = { \ ++ .open = debug_open_generic, \ ++ .read = debug_read_##name, \ ++ }; ++ ++DEBUG_FOPS_RO(ver); ++DEBUG_FOPS_RO(regs); ++DEBUG_FOPS_RO(tlb); ++DEBUG_FOPS(pagetable); ++DEBUG_FOPS_RO(mmap); ++DEBUG_FOPS(mem); ++ ++#define __DEBUG_ADD_FILE(attr, mode) \ ++ { \ ++ struct dentry *dent; \ ++ dent = debugfs_create_file(#attr, mode, parent, \ ++ obj, &debug_##attr##_fops); \ ++ if (!dent) \ ++ return -ENOMEM; \ ++ } ++ ++#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) ++#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) ++ ++static int iommu_debug_register(struct device *dev, void *data) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct iommu *obj = platform_get_drvdata(pdev); ++ struct dentry *d, *parent; ++ ++ if (!obj || !obj->dev) ++ return -EINVAL; ++ ++ d = debugfs_create_dir(obj->name, iommu_debug_root); ++ if (!d) ++ return -ENOMEM; ++ parent = d; ++ ++ d = debugfs_create_u8("nr_tlb_entries", 400, parent, ++ (u8 *)&obj->nr_tlb_entries); ++ if (!d) ++ return -ENOMEM; ++ ++ DEBUG_ADD_FILE_RO(ver); ++ DEBUG_ADD_FILE_RO(regs); ++ DEBUG_ADD_FILE_RO(tlb); ++ DEBUG_ADD_FILE(pagetable); ++ DEBUG_ADD_FILE_RO(mmap); ++ DEBUG_ADD_FILE(mem); ++ ++ return 0; ++} ++ ++static int __init iommu_debug_init(void) ++{ ++ struct dentry *d; ++ int err; ++ ++ d = debugfs_create_dir("iommu", NULL); ++ if (!d) ++ return -ENOMEM; ++ iommu_debug_root = d; ++ ++ err = foreach_iommu_device(d, iommu_debug_register); ++ if (err) ++ goto err_out; ++ return 0; ++ ++err_out: ++ debugfs_remove_recursive(iommu_debug_root); ++ return err; ++} ++module_init(iommu_debug_init) ++ ++static void __exit iommu_debugfs_exit(void) ++{ ++ debugfs_remove_recursive(iommu_debug_root); ++} ++module_exit(iommu_debugfs_exit) ++ ++MODULE_DESCRIPTION("omap iommu: debugfs interface"); ++MODULE_AUTHOR("Hiroshi DOYU "); ++MODULE_LICENSE("GPL v2"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/iopgtable.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iopgtable.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/iopgtable.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iopgtable.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,72 @@ ++/* ++ * omap iommu: pagetable definitions ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __PLAT_OMAP_IOMMU_H ++#define __PLAT_OMAP_IOMMU_H ++ ++#define IOPGD_SHIFT 20 ++#define IOPGD_SIZE (1 << IOPGD_SHIFT) ++#define IOPGD_MASK (~(IOPGD_SIZE - 1)) ++#define IOSECTION_MASK IOPGD_MASK ++#define PTRS_PER_IOPGD (1 << (32 - IOPGD_SHIFT)) ++#define IOPGD_TABLE_SIZE (PTRS_PER_IOPGD * sizeof(u32)) ++ ++#define IOSUPER_SIZE (IOPGD_SIZE << 4) ++#define IOSUPER_MASK (~(IOSUPER_SIZE - 1)) ++ ++#define IOPTE_SHIFT 12 ++#define IOPTE_SIZE (1 << IOPTE_SHIFT) ++#define IOPTE_MASK (~(IOPTE_SIZE - 1)) ++#define IOPAGE_MASK IOPTE_MASK ++#define PTRS_PER_IOPTE (1 << (IOPGD_SHIFT - IOPTE_SHIFT)) ++#define IOPTE_TABLE_SIZE (PTRS_PER_IOPTE * sizeof(u32)) ++ ++#define IOLARGE_SIZE (IOPTE_SIZE << 4) ++#define IOLARGE_MASK (~(IOLARGE_SIZE - 1)) ++ ++#define IOPGD_TABLE (1 << 0) ++#define IOPGD_SECTION (2 << 0) ++#define IOPGD_SUPER (1 << 18 | 2 << 0) ++ ++#define IOPTE_SMALL (2 << 0) ++#define IOPTE_LARGE (1 << 0) ++ ++#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) ++#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) ++ ++#define iopte_paddr(iopgd) (*iopgd & ~((1 << 10) - 1)) ++#define iopte_vaddr(iopgd) ((u32 *)phys_to_virt(iopte_paddr(iopgd))) ++ ++#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1)) ++#define iopte_offset(iopgd, da) (iopte_vaddr(iopgd) + iopte_index(da)) ++ ++static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, ++ u32 flags) ++{ ++ memset(e, 0, sizeof(*e)); ++ ++ e->da = da; ++ e->pa = pa; ++ e->valid = 1; ++ /* FIXME: add OMAP1 support */ ++ e->pgsz = flags & MMU_CAM_PGSZ_MASK; ++ e->endian = flags & MMU_RAM_ENDIAN_MASK; ++ e->elsz = flags & MMU_RAM_ELSZ_MASK; ++ e->mixed = flags & MMU_RAM_MIXED_MASK; ++ ++ return iopgsz_to_bytes(e->pgsz); ++} ++ ++#define to_iommu(dev) \ ++ (struct iommu *)platform_get_drvdata(to_platform_device(dev)) ++ ++#endif /* __PLAT_OMAP_IOMMU_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/iovmm.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iovmm.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/iovmm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/iovmm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,893 @@ ++/* ++ * omap iommu: simple virtual address space management ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++ ++#include "iopgtable.h" ++ ++/* ++ * A device driver needs to create address mappings between: ++ * ++ * - iommu/device address ++ * - physical address ++ * - mpu virtual address ++ * ++ * There are 4 possible patterns for them: ++ * ++ * |iova/ mapping iommu_ page ++ * | da pa va (d)-(p)-(v) function type ++ * --------------------------------------------------------------------------- ++ * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s ++ * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s ++ * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s ++ * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* ++ * ++ * ++ * 'iova': device iommu virtual address ++ * 'da': alias of 'iova' ++ * 'pa': physical address ++ * 'va': mpu virtual address ++ * ++ * 'c': contiguous memory area ++ * 'd': dicontiguous memory area ++ * 'a': anonymous memory allocation ++ * '()': optional feature ++ * ++ * 'n': a normal page(4KB) size is used. ++ * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. ++ * ++ * '*': not yet, but feasible. ++ */ ++ ++static struct kmem_cache *iovm_area_cachep; ++ ++/* return total bytes of sg buffers */ ++static size_t sgtable_len(const struct sg_table *sgt) ++{ ++ unsigned int i, total = 0; ++ struct scatterlist *sg; ++ ++ if (!sgt) ++ return 0; ++ ++ for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ size_t bytes; ++ ++ bytes = sg_dma_len(sg); ++ ++ if (!iopgsz_ok(bytes)) { ++ pr_err("%s: sg[%d] not iommu pagesize(%x)\n", ++ __func__, i, bytes); ++ return 0; ++ } ++ ++ total += bytes; ++ } ++ ++ return total; ++} ++#define sgtable_ok(x) (!!sgtable_len(x)) ++ ++/* ++ * calculate the optimal number sg elements from total bytes based on ++ * iommu superpages ++ */ ++static unsigned int sgtable_nents(size_t bytes) ++{ ++ int i; ++ unsigned int nr_entries; ++ const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; ++ ++ if (!IS_ALIGNED(bytes, PAGE_SIZE)) { ++ pr_err("%s: wrong size %08x\n", __func__, bytes); ++ return 0; ++ } ++ ++ nr_entries = 0; ++ for (i = 0; i < ARRAY_SIZE(pagesize); i++) { ++ if (bytes >= pagesize[i]) { ++ nr_entries += (bytes / pagesize[i]); ++ bytes %= pagesize[i]; ++ } ++ } ++ BUG_ON(bytes); ++ ++ return nr_entries; ++} ++ ++/* allocate and initialize sg_table header(a kind of 'superblock') */ ++static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) ++{ ++ unsigned int nr_entries; ++ int err; ++ struct sg_table *sgt; ++ ++ if (!bytes) ++ return ERR_PTR(-EINVAL); ++ ++ if (!IS_ALIGNED(bytes, PAGE_SIZE)) ++ return ERR_PTR(-EINVAL); ++ ++ /* FIXME: IOVMF_DA_FIXED should support 'superpages' */ ++ if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { ++ nr_entries = sgtable_nents(bytes); ++ if (!nr_entries) ++ return ERR_PTR(-EINVAL); ++ } else ++ nr_entries = bytes / PAGE_SIZE; ++ ++ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); ++ if (!sgt) ++ return ERR_PTR(-ENOMEM); ++ ++ err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); ++ if (err) ++ return ERR_PTR(err); ++ ++ pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); ++ ++ return sgt; ++} ++ ++/* free sg_table header(a kind of superblock) */ ++static void sgtable_free(struct sg_table *sgt) ++{ ++ if (!sgt) ++ return; ++ ++ sg_free_table(sgt); ++ kfree(sgt); ++ ++ pr_debug("%s: sgt:%p\n", __func__, sgt); ++} ++ ++/* map 'sglist' to a contiguous mpu virtual area and return 'va' */ ++static void *vmap_sg(const struct sg_table *sgt) ++{ ++ u32 va; ++ size_t total; ++ unsigned int i; ++ struct scatterlist *sg; ++ struct vm_struct *new; ++ ++ total = sgtable_len(sgt); ++ if (!total) ++ return ERR_PTR(-EINVAL); ++ ++ new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); ++ if (!new) ++ return ERR_PTR(-ENOMEM); ++ va = (u32)new->addr; ++ ++ for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ size_t bytes; ++ u32 pa; ++ int err; ++ ++ pa = sg_phys(sg); ++ bytes = sg_dma_len(sg); ++ ++ BUG_ON(bytes != PAGE_SIZE); ++ ++ err = ioremap_page(va, pa, MT_DEVICE); ++ if (err) ++ goto err_out; ++ ++ va += bytes; ++ } ++ ++ flush_cache_vmap(new->addr, total); ++ return new->addr; ++ ++err_out: ++ WARN_ON(1); /* FIXME: cleanup some mpu mappings */ ++ vunmap(new->addr); ++ return ERR_PTR(-EAGAIN); ++} ++ ++static inline void vunmap_sg(const void *va) ++{ ++ vunmap(va); ++} ++ ++static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) ++{ ++ struct iovm_struct *tmp; ++ ++ list_for_each_entry(tmp, &obj->mmap, list) { ++ if ((da >= tmp->da_start) && (da < tmp->da_end)) { ++ size_t len; ++ ++ len = tmp->da_end - tmp->da_start; ++ ++ dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", ++ __func__, tmp->da_start, da, tmp->da_end, len, ++ tmp->flags); ++ ++ return tmp; ++ } ++ } ++ ++ return NULL; ++} ++ ++/** ++ * find_iovm_area - find iovma which includes @da ++ * @da: iommu device virtual address ++ * ++ * Find the existing iovma starting at @da ++ */ ++struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) ++{ ++ struct iovm_struct *area; ++ ++ mutex_lock(&obj->mmap_lock); ++ area = __find_iovm_area(obj, da); ++ mutex_unlock(&obj->mmap_lock); ++ ++ return area; ++} ++EXPORT_SYMBOL_GPL(find_iovm_area); ++ ++/* ++ * This finds the hole(area) which fits the requested address and len ++ * in iovmas mmap, and returns the new allocated iovma. ++ */ ++static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, ++ size_t bytes, u32 flags) ++{ ++ struct iovm_struct *new, *tmp; ++ u32 start, prev_end, alignement; ++ ++ if (!obj || !bytes) ++ return ERR_PTR(-EINVAL); ++ ++ start = da; ++ alignement = PAGE_SIZE; ++ ++ if (flags & IOVMF_DA_ANON) { ++ /* ++ * Reserve the first page for NULL ++ */ ++ start = PAGE_SIZE; ++ if (flags & IOVMF_LINEAR) ++ alignement = iopgsz_max(bytes); ++ start = roundup(start, alignement); ++ } ++ ++ tmp = NULL; ++ if (list_empty(&obj->mmap)) ++ goto found; ++ ++ prev_end = 0; ++ list_for_each_entry(tmp, &obj->mmap, list) { ++ ++ if ((prev_end <= start) && (start + bytes < tmp->da_start)) ++ goto found; ++ ++ if (flags & IOVMF_DA_ANON) ++ start = roundup(tmp->da_end, alignement); ++ ++ prev_end = tmp->da_end; ++ } ++ ++ if ((start >= prev_end) && (ULONG_MAX - start >= bytes)) ++ goto found; ++ ++ dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", ++ __func__, da, bytes, flags); ++ ++ return ERR_PTR(-EINVAL); ++ ++found: ++ new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); ++ if (!new) ++ return ERR_PTR(-ENOMEM); ++ ++ new->iommu = obj; ++ new->da_start = start; ++ new->da_end = start + bytes; ++ new->flags = flags; ++ ++ /* ++ * keep ascending order of iovmas ++ */ ++ if (tmp) ++ list_add_tail(&new->list, &tmp->list); ++ else ++ list_add(&new->list, &obj->mmap); ++ ++ dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", ++ __func__, new->da_start, start, new->da_end, bytes, flags); ++ ++ return new; ++} ++ ++static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) ++{ ++ size_t bytes; ++ ++ BUG_ON(!obj || !area); ++ ++ bytes = area->da_end - area->da_start; ++ ++ dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", ++ __func__, area->da_start, area->da_end, bytes, area->flags); ++ ++ list_del(&area->list); ++ kmem_cache_free(iovm_area_cachep, area); ++} ++ ++/** ++ * da_to_va - convert (d) to (v) ++ * @obj: objective iommu ++ * @da: iommu device virtual address ++ * @va: mpu virtual address ++ * ++ * Returns mpu virtual addr which corresponds to a given device virtual addr ++ */ ++void *da_to_va(struct iommu *obj, u32 da) ++{ ++ void *va = NULL; ++ struct iovm_struct *area; ++ ++ mutex_lock(&obj->mmap_lock); ++ ++ area = __find_iovm_area(obj, da); ++ if (!area) { ++ dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); ++ goto out; ++ } ++ va = area->va; ++ mutex_unlock(&obj->mmap_lock); ++out: ++ return va; ++} ++EXPORT_SYMBOL_GPL(da_to_va); ++ ++static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) ++{ ++ unsigned int i; ++ struct scatterlist *sg; ++ void *va = _va; ++ void *va_end; ++ ++ for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ struct page *pg; ++ const size_t bytes = PAGE_SIZE; ++ ++ /* ++ * iommu 'superpage' isn't supported with 'iommu_vmalloc()' ++ */ ++ pg = vmalloc_to_page(va); ++ BUG_ON(!pg); ++ sg_set_page(sg, pg, bytes, 0); ++ ++ va += bytes; ++ } ++ ++ va_end = _va + PAGE_SIZE * i; ++ flush_cache_vmap(_va, va_end); ++} ++ ++static inline void sgtable_drain_vmalloc(struct sg_table *sgt) ++{ ++ /* ++ * Actually this is not necessary at all, just exists for ++ * consistency of the code readibility. ++ */ ++ BUG_ON(!sgt); ++} ++ ++static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) ++{ ++ unsigned int i; ++ struct scatterlist *sg; ++ void *va; ++ ++ va = phys_to_virt(pa); ++ ++ for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ size_t bytes; ++ ++ bytes = iopgsz_max(len); ++ ++ BUG_ON(!iopgsz_ok(bytes)); ++ ++ sg_set_buf(sg, phys_to_virt(pa), bytes); ++ /* ++ * 'pa' is cotinuous(linear). ++ */ ++ pa += bytes; ++ len -= bytes; ++ } ++ BUG_ON(len); ++ ++ clean_dcache_area(va, len); ++} ++ ++static inline void sgtable_drain_kmalloc(struct sg_table *sgt) ++{ ++ /* ++ * Actually this is not necessary at all, just exists for ++ * consistency of the code readibility ++ */ ++ BUG_ON(!sgt); ++} ++ ++/* create 'da' <-> 'pa' mapping from 'sgt' */ ++static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, ++ const struct sg_table *sgt, u32 flags) ++{ ++ int err; ++ unsigned int i, j; ++ struct scatterlist *sg; ++ u32 da = new->da_start; ++ ++ if (!obj || !new || !sgt) ++ return -EINVAL; ++ ++ BUG_ON(!sgtable_ok(sgt)); ++ ++ for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ u32 pa; ++ int pgsz; ++ size_t bytes; ++ struct iotlb_entry e; ++ ++ pa = sg_phys(sg); ++ bytes = sg_dma_len(sg); ++ ++ flags &= ~IOVMF_PGSZ_MASK; ++ pgsz = bytes_to_iopgsz(bytes); ++ if (pgsz < 0) ++ goto err_out; ++ flags |= pgsz; ++ ++ pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, ++ i, da, pa, bytes); ++ ++ iotlb_init_entry(&e, da, pa, flags); ++ err = iopgtable_store_entry(obj, &e); ++ if (err) ++ goto err_out; ++ ++ da += bytes; ++ } ++ return 0; ++ ++err_out: ++ da = new->da_start; ++ ++ for_each_sg(sgt->sgl, sg, i, j) { ++ size_t bytes; ++ ++ bytes = iopgtable_clear_entry(obj, da); ++ ++ BUG_ON(!iopgsz_ok(bytes)); ++ ++ da += bytes; ++ } ++ return err; ++} ++ ++/* release 'da' <-> 'pa' mapping */ ++static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) ++{ ++ u32 start; ++ size_t total = area->da_end - area->da_start; ++ ++ BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); ++ ++ start = area->da_start; ++ while (total > 0) { ++ size_t bytes; ++ ++ bytes = iopgtable_clear_entry(obj, start); ++ if (bytes == 0) ++ bytes = PAGE_SIZE; ++ else ++ dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", ++ __func__, start, bytes, area->flags); ++ ++ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); ++ ++ total -= bytes; ++ start += bytes; ++ } ++ BUG_ON(total); ++} ++ ++/* template function for all unmapping */ ++static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, ++ void (*fn)(const void *), u32 flags) ++{ ++ struct sg_table *sgt = NULL; ++ struct iovm_struct *area; ++ ++ if (!IS_ALIGNED(da, PAGE_SIZE)) { ++ dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); ++ return NULL; ++ } ++ ++ mutex_lock(&obj->mmap_lock); ++ ++ area = __find_iovm_area(obj, da); ++ if (!area) { ++ dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); ++ goto out; ++ } ++ ++ if ((area->flags & flags) != flags) { ++ dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, ++ area->flags); ++ goto out; ++ } ++ sgt = (struct sg_table *)area->sgt; ++ ++ unmap_iovm_area(obj, area); ++ ++ fn(area->va); ++ ++ dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, ++ area->da_start, da, area->da_end, ++ area->da_end - area->da_start, area->flags); ++ ++ free_iovm_area(obj, area); ++out: ++ mutex_unlock(&obj->mmap_lock); ++ ++ return sgt; ++} ++ ++static u32 map_iommu_region(struct iommu *obj, u32 da, ++ const struct sg_table *sgt, void *va, size_t bytes, u32 flags) ++{ ++ int err = -ENOMEM; ++ struct iovm_struct *new; ++ ++ mutex_lock(&obj->mmap_lock); ++ ++ new = alloc_iovm_area(obj, da, bytes, flags); ++ if (IS_ERR(new)) { ++ err = PTR_ERR(new); ++ goto err_alloc_iovma; ++ } ++ new->va = va; ++ new->sgt = sgt; ++ ++ if (map_iovm_area(obj, new, sgt, new->flags)) ++ goto err_map; ++ ++ mutex_unlock(&obj->mmap_lock); ++ ++ dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", ++ __func__, new->da_start, bytes, new->flags, va); ++ ++ return new->da_start; ++ ++err_map: ++ free_iovm_area(obj, new); ++err_alloc_iovma: ++ mutex_unlock(&obj->mmap_lock); ++ return err; ++} ++ ++static inline u32 __iommu_vmap(struct iommu *obj, u32 da, ++ const struct sg_table *sgt, void *va, size_t bytes, u32 flags) ++{ ++ return map_iommu_region(obj, da, sgt, va, bytes, flags); ++} ++ ++/** ++ * iommu_vmap - (d)-(p)-(v) address mapper ++ * @obj: objective iommu ++ * @sgt: address of scatter gather table ++ * @flags: iovma and page property ++ * ++ * Creates 1-n-1 mapping with given @sgt and returns @da. ++ * All @sgt element must be io page size aligned. ++ */ ++u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, ++ u32 flags) ++{ ++ size_t bytes; ++ void *va = NULL; ++ ++ if (!obj || !obj->dev || !sgt) ++ return -EINVAL; ++ ++ bytes = sgtable_len(sgt); ++ if (!bytes) ++ return -EINVAL; ++ bytes = PAGE_ALIGN(bytes); ++ ++ if (flags & IOVMF_MMIO) { ++ va = vmap_sg(sgt); ++ if (IS_ERR(va)) ++ return PTR_ERR(va); ++ } ++ ++ flags &= IOVMF_HW_MASK; ++ flags |= IOVMF_DISCONT; ++ flags |= IOVMF_MMIO; ++ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); ++ ++ da = __iommu_vmap(obj, da, sgt, va, bytes, flags); ++ if (IS_ERR_VALUE(da)) ++ vunmap_sg(va); ++ ++ return da; ++} ++EXPORT_SYMBOL_GPL(iommu_vmap); ++ ++/** ++ * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' ++ * @obj: objective iommu ++ * @da: iommu device virtual address ++ * ++ * Free the iommu virtually contiguous memory area starting at ++ * @da, which was returned by 'iommu_vmap()'. ++ */ ++struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) ++{ ++ struct sg_table *sgt; ++ /* ++ * 'sgt' is allocated before 'iommu_vmalloc()' is called. ++ * Just returns 'sgt' to the caller to free ++ */ ++ sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); ++ if (!sgt) ++ dev_dbg(obj->dev, "%s: No sgt\n", __func__); ++ return sgt; ++} ++EXPORT_SYMBOL_GPL(iommu_vunmap); ++ ++/** ++ * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper ++ * @obj: objective iommu ++ * @da: contiguous iommu virtual memory ++ * @bytes: allocation size ++ * @flags: iovma and page property ++ * ++ * Allocate @bytes linearly and creates 1-n-1 mapping and returns ++ * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. ++ */ ++u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) ++{ ++ void *va; ++ struct sg_table *sgt; ++ ++ if (!obj || !obj->dev || !bytes) ++ return -EINVAL; ++ ++ bytes = PAGE_ALIGN(bytes); ++ ++ va = vmalloc(bytes); ++ if (!va) ++ return -ENOMEM; ++ ++ sgt = sgtable_alloc(bytes, flags); ++ if (IS_ERR(sgt)) { ++ da = PTR_ERR(sgt); ++ goto err_sgt_alloc; ++ } ++ sgtable_fill_vmalloc(sgt, va); ++ ++ flags &= IOVMF_HW_MASK; ++ flags |= IOVMF_DISCONT; ++ flags |= IOVMF_ALLOC; ++ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); ++ ++ da = __iommu_vmap(obj, da, sgt, va, bytes, flags); ++ if (IS_ERR_VALUE(da)) ++ goto err_iommu_vmap; ++ ++ return da; ++ ++err_iommu_vmap: ++ sgtable_drain_vmalloc(sgt); ++ sgtable_free(sgt); ++err_sgt_alloc: ++ vfree(va); ++ return da; ++} ++EXPORT_SYMBOL_GPL(iommu_vmalloc); ++ ++/** ++ * iommu_vfree - release memory allocated by 'iommu_vmalloc()' ++ * @obj: objective iommu ++ * @da: iommu device virtual address ++ * ++ * Frees the iommu virtually continuous memory area starting at ++ * @da, as obtained from 'iommu_vmalloc()'. ++ */ ++void iommu_vfree(struct iommu *obj, const u32 da) ++{ ++ struct sg_table *sgt; ++ ++ sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); ++ if (!sgt) ++ dev_dbg(obj->dev, "%s: No sgt\n", __func__); ++ sgtable_free(sgt); ++} ++EXPORT_SYMBOL_GPL(iommu_vfree); ++ ++static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, ++ size_t bytes, u32 flags) ++{ ++ struct sg_table *sgt; ++ ++ sgt = sgtable_alloc(bytes, flags); ++ if (IS_ERR(sgt)) ++ return PTR_ERR(sgt); ++ ++ sgtable_fill_kmalloc(sgt, pa, bytes); ++ ++ da = map_iommu_region(obj, da, sgt, va, bytes, flags); ++ if (IS_ERR_VALUE(da)) { ++ sgtable_drain_kmalloc(sgt); ++ sgtable_free(sgt); ++ } ++ ++ return da; ++} ++ ++/** ++ * iommu_kmap - (d)-(p)-(v) address mapper ++ * @obj: objective iommu ++ * @da: contiguous iommu virtual memory ++ * @pa: contiguous physical memory ++ * @flags: iovma and page property ++ * ++ * Creates 1-1-1 mapping and returns @da again, which can be ++ * adjusted if 'IOVMF_DA_ANON' is set. ++ */ ++u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, ++ u32 flags) ++{ ++ void *va; ++ ++ if (!obj || !obj->dev || !bytes) ++ return -EINVAL; ++ ++ bytes = PAGE_ALIGN(bytes); ++ ++ va = ioremap(pa, bytes); ++ if (!va) ++ return -ENOMEM; ++ ++ flags &= IOVMF_HW_MASK; ++ flags |= IOVMF_LINEAR; ++ flags |= IOVMF_MMIO; ++ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); ++ ++ da = __iommu_kmap(obj, da, pa, va, bytes, flags); ++ if (IS_ERR_VALUE(da)) ++ iounmap(va); ++ ++ return da; ++} ++EXPORT_SYMBOL_GPL(iommu_kmap); ++ ++/** ++ * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' ++ * @obj: objective iommu ++ * @da: iommu device virtual address ++ * ++ * Frees the iommu virtually contiguous memory area starting at ++ * @da, which was passed to and was returned by'iommu_kmap()'. ++ */ ++void iommu_kunmap(struct iommu *obj, u32 da) ++{ ++ struct sg_table *sgt; ++ typedef void (*func_t)(const void *); ++ ++ sgt = unmap_vm_area(obj, da, (func_t)__iounmap, ++ IOVMF_LINEAR | IOVMF_MMIO); ++ if (!sgt) ++ dev_dbg(obj->dev, "%s: No sgt\n", __func__); ++ sgtable_free(sgt); ++} ++EXPORT_SYMBOL_GPL(iommu_kunmap); ++ ++/** ++ * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper ++ * @obj: objective iommu ++ * @da: contiguous iommu virtual memory ++ * @bytes: bytes for allocation ++ * @flags: iovma and page property ++ * ++ * Allocate @bytes linearly and creates 1-1-1 mapping and returns ++ * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. ++ */ ++u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) ++{ ++ void *va; ++ u32 pa; ++ ++ if (!obj || !obj->dev || !bytes) ++ return -EINVAL; ++ ++ bytes = PAGE_ALIGN(bytes); ++ ++ va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); ++ if (!va) ++ return -ENOMEM; ++ pa = virt_to_phys(va); ++ ++ flags &= IOVMF_HW_MASK; ++ flags |= IOVMF_LINEAR; ++ flags |= IOVMF_ALLOC; ++ flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); ++ ++ da = __iommu_kmap(obj, da, pa, va, bytes, flags); ++ if (IS_ERR_VALUE(da)) ++ kfree(va); ++ ++ return da; ++} ++EXPORT_SYMBOL_GPL(iommu_kmalloc); ++ ++/** ++ * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' ++ * @obj: objective iommu ++ * @da: iommu device virtual address ++ * ++ * Frees the iommu virtually contiguous memory area starting at ++ * @da, which was passed to and was returned by'iommu_kmalloc()'. ++ */ ++void iommu_kfree(struct iommu *obj, u32 da) ++{ ++ struct sg_table *sgt; ++ ++ sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); ++ if (!sgt) ++ dev_dbg(obj->dev, "%s: No sgt\n", __func__); ++ sgtable_free(sgt); ++} ++EXPORT_SYMBOL_GPL(iommu_kfree); ++ ++ ++static int __init iovmm_init(void) ++{ ++ const unsigned long flags = SLAB_HWCACHE_ALIGN; ++ struct kmem_cache *p; ++ ++ p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, ++ flags, NULL); ++ if (!p) ++ return -ENOMEM; ++ iovm_area_cachep = p; ++ ++ return 0; ++} ++module_init(iovmm_init); ++ ++static void __exit iovmm_exit(void) ++{ ++ kmem_cache_destroy(iovm_area_cachep); ++} ++module_exit(iovmm_exit); ++ ++MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); ++MODULE_AUTHOR("Hiroshi DOYU "); ++MODULE_LICENSE("GPL v2"); +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/Kconfig kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/Kconfig +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/Kconfig 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/Kconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -58,7 +58,7 @@ config OMAP_DEBUG_CLOCKDOMAIN + + config OMAP_SMARTREFLEX + bool "SmartReflex support" +- depends on ARCH_OMAP34XX && TWL4030_CORE ++ depends on ARCH_OMAP34XX && TWL4030_CORE && PM + help + Say Y if you want to enable SmartReflex. + +@@ -182,6 +182,21 @@ config OMAP_MBOX_FWK + Say Y here if you want to use OMAP Mailbox framework support for + DSP, IVA1.0 and IVA2 in OMAP1/2/3. + ++config OMAP_IOMMU ++ tristate "IOMMU support" ++ depends on ARCH_OMAP ++ default n ++ help ++ Say Y here if you want to use OMAP IOMMU support for IVA2 and ++ Camera in OMAP3. ++ ++config OMAP_IOMMU_DEBUG ++ tristate "IOMMU debug support" ++ depends on ARCH_OMAP && DEBUG_FS && m ++ default n ++ help ++ Use debugfs to investigate iommu ++ + choice + prompt "System timer" + default OMAP_MPU_TIMER +@@ -205,6 +220,13 @@ config OMAP_32K_TIMER + + endchoice + ++config OMAP3_DEBOBS ++ bool "OMAP 3430 Debug observability support" ++ depends on ARCH_OMAP3 && DEBUG_FS ++ default n ++ help ++ Use ETK pads for debug observability ++ + config OMAP_32K_TIMER_HZ + int "Kernel internal timer frequency for 32KHz timer" + range 32 1024 +@@ -260,6 +282,23 @@ config OMAP_SERIAL_WAKE + to data on the serial RX line. This allows you to wake the + system from serial console. + ++choice ++ prompt "OMAP PM layer selection" ++ depends on ARCH_OMAP ++ default OMAP_PM_NOOP ++ ++config OMAP_PM_NONE ++ bool "No PM layer" ++ ++config OMAP_PM_NOOP ++ bool "No-op/debug PM layer" ++ ++config OMAP_PM_SRF ++ depends on PM ++ bool "PM layer implemented using SRF" ++ ++endchoice ++ + endmenu + + endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/mailbox.h kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/mailbox.h +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/mailbox.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/mailbox.h 2008-12-25 00:26:37.000000000 +0100 +@@ -0,0 +1,100 @@ ++/* ++ * Mailbox internal functions ++ * ++ * Copyright (C) 2006 Nokia Corporation ++ * Written by: Hiroshi DOYU ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ */ ++ ++#ifndef __ARCH_ARM_PLAT_MAILBOX_H ++#define __ARCH_ARM_PLAT_MAILBOX_H ++ ++/* ++ * Mailbox sequence bit API ++ */ ++#if defined(CONFIG_ARCH_OMAP1) ++# define MBOX_USE_SEQ_BIT ++#elif defined(CONFIG_ARCH_OMAP2) ++# define MBOX_USE_SEQ_BIT ++#endif ++ ++#ifdef MBOX_USE_SEQ_BIT ++/* seq_rcv should be initialized with any value other than ++ * 0 and 1 << 31, to allow either value for the first ++ * message. */ ++static inline void mbox_seq_init(struct omap_mbox *mbox) ++{ ++ /* any value other than 0 and 1 << 31 */ ++ mbox->seq_rcv = 0xffffffff; ++} ++ ++static inline void mbox_seq_toggle(struct omap_mbox *mbox, mbox_msg_t * msg) ++{ ++ /* add seq_snd to msg */ ++ *msg = (*msg & 0x7fffffff) | mbox->seq_snd; ++ /* flip seq_snd */ ++ mbox->seq_snd ^= 1 << 31; ++} ++ ++static inline int mbox_seq_test(struct omap_mbox *mbox, mbox_msg_t msg) ++{ ++ mbox_msg_t seq = msg & (1 << 31); ++ if (seq == mbox->seq_rcv) ++ return -1; ++ mbox->seq_rcv = seq; ++ return 0; ++} ++#else ++static inline void mbox_seq_init(struct omap_mbox *mbox) ++{ ++} ++static inline void mbox_seq_toggle(struct omap_mbox *mbox, mbox_msg_t * msg) ++{ ++} ++static inline int mbox_seq_test(struct omap_mbox *mbox, mbox_msg_t msg) ++{ ++ return 0; ++} ++#endif ++ ++/* Mailbox FIFO handle functions */ ++static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox) ++{ ++ return mbox->ops->fifo_read(mbox); ++} ++static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) ++{ ++ mbox->ops->fifo_write(mbox, msg); ++} ++static inline int mbox_fifo_empty(struct omap_mbox *mbox) ++{ ++ return mbox->ops->fifo_empty(mbox); ++} ++static inline int mbox_fifo_full(struct omap_mbox *mbox) ++{ ++ return mbox->ops->fifo_full(mbox); ++} ++ ++/* Mailbox IRQ handle functions */ ++static inline void enable_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) ++{ ++ mbox->ops->enable_irq(mbox, irq); ++} ++static inline void disable_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) ++{ ++ mbox->ops->disable_irq(mbox, irq); ++} ++static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) ++{ ++ if (mbox->ops->ack_irq) ++ mbox->ops->ack_irq(mbox, irq); ++} ++static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) ++{ ++ return mbox->ops->is_irq(mbox, irq); ++} ++ ++#endif /* __ARCH_ARM_PLAT_MAILBOX_H */ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/Makefile kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/Makefile +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/Makefile 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/Makefile 2011-09-04 11:31:05.000000000 +0200 +@@ -4,7 +4,7 @@ + + # Common support + obj-y := common.o sram.o clock.o devices.o dma.o mux.o gpio.o \ +- usb.o fb.o io.o ++ usb.o fb.o dss_boottime.o vram.o vrfb.o io.o + obj-m := + obj-n := + obj- := +@@ -13,6 +13,8 @@ obj- := + obj-$(CONFIG_ARCH_OMAP16XX) += ocpi.o + + obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o ++obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o ++obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o + + obj-$(CONFIG_CPU_FREQ) += cpu-omap.o + obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o +@@ -21,7 +23,8 @@ obj-$(CONFIG_OMAP_COMPONENT_VERSION) += + obj-$(CONFIG_OMAP_GPIO_SWITCH) += gpio-switch.o + obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o + obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o +-obj-$(CONFIG_I2C_OMAP) += i2c.o ++i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o ++obj-y += $(i2c-omap-m) $(i2c-omap-y) + + # OMAP MMU framework + obj-$(CONFIG_OMAP_MMU_FWK) += mmu.o +@@ -29,3 +32,6 @@ obj-$(CONFIG_OMAP_MMU_FWK) += mmu.o + # OMAP mailbox framework + obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox.o + ++obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-noop.o ++obj-$(CONFIG_OMAP_PM_SRF) += omap-pm-srf.o \ ++ resource.o +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/mcbsp.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/mcbsp.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/mcbsp.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/mcbsp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -26,6 +26,9 @@ + + #include + #include ++#ifdef CONFIG_ARCH_OMAP34XX ++#include "../mach-omap2/cm-regbits-34xx.h" ++#endif + + struct omap_mcbsp **mcbsp_ptr; + int omap_mcbsp_count; +@@ -54,6 +57,14 @@ int omap_mcbsp_read(void __iomem *io_bas + #define omap_mcbsp_check_valid_id(id) (id < omap_mcbsp_count) + #define id_to_mcbsp_ptr(id) mcbsp_ptr[id]; + ++#define OMAP_ST_READ(base, reg) \ ++ omap_mcbsp_read(base, OMAP_ST_REG_##reg) ++#define OMAP_ST_WRITE(base, reg, val) \ ++ omap_mcbsp_write(base, OMAP_ST_REG_##reg, val) ++ ++#define omap_st_check_valid_id(id) ((id == 1 || id == 2) && \ ++ id < omap_mcbsp_count) ++ + static void omap_mcbsp_dump_reg(u8 id) + { + struct omap_mcbsp *mcbsp = id_to_mcbsp_ptr(id); +@@ -91,11 +102,20 @@ static void omap_mcbsp_dump_reg(u8 id) + static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id) + { + struct omap_mcbsp *mcbsp_tx = dev_id; ++ u16 irqst_spcr2; + +- dev_dbg(mcbsp_tx->dev, "TX IRQ callback : 0x%x\n", +- OMAP_MCBSP_READ(mcbsp_tx->io_base, SPCR2)); ++ irqst_spcr2 = OMAP_MCBSP_READ(mcbsp_tx->io_base, SPCR2); ++ dev_dbg(mcbsp_tx->dev, "TX IRQ callback : 0x%x\n", irqst_spcr2); + +- complete(&mcbsp_tx->tx_irq_completion); ++ if (irqst_spcr2 & XSYNC_ERR) { ++ dev_err(mcbsp_tx->dev, "TX Frame Sync Error! : 0x%x\n", ++ irqst_spcr2); ++ /* Writing zero to XSYNC_ERR clears the IRQ */ ++ OMAP_MCBSP_WRITE(mcbsp_tx->io_base, SPCR2, ++ irqst_spcr2 & ~(XSYNC_ERR)); ++ } else { ++ complete(&mcbsp_tx->tx_irq_completion); ++ } + + return IRQ_HANDLED; + } +@@ -103,11 +123,20 @@ static irqreturn_t omap_mcbsp_tx_irq_han + static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id) + { + struct omap_mcbsp *mcbsp_rx = dev_id; ++ u16 irqst_spcr1; + +- dev_dbg(mcbsp_rx->dev, "RX IRQ callback : 0x%x\n", +- OMAP_MCBSP_READ(mcbsp_rx->io_base, SPCR2)); ++ irqst_spcr1 = OMAP_MCBSP_READ(mcbsp_rx->io_base, SPCR1); ++ dev_dbg(mcbsp_rx->dev, "RX IRQ callback : 0x%x\n", irqst_spcr1); + +- complete(&mcbsp_rx->rx_irq_completion); ++ if (irqst_spcr1 & RSYNC_ERR) { ++ dev_err(mcbsp_rx->dev, "RX Frame Sync Error! : 0x%x\n", ++ irqst_spcr1); ++ /* Writing zero to RSYNC_ERR clears the IRQ */ ++ OMAP_MCBSP_WRITE(mcbsp_rx->io_base, SPCR1, ++ irqst_spcr1 & ~(RSYNC_ERR)); ++ } else { ++ complete(&mcbsp_rx->tx_irq_completion); ++ } + + return IRQ_HANDLED; + } +@@ -180,6 +209,115 @@ void omap_mcbsp_config(unsigned int id, + } + EXPORT_SYMBOL(omap_mcbsp_config); + ++#ifdef CONFIG_ARCH_OMAP34XX ++/* ++ * omap_mcbsp_set_tx_threshold configures how to deal ++ * with transmit threshold. the threshold value and handler can be ++ * configure in here. ++ */ ++void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold) ++{ ++ struct omap_mcbsp *mcbsp; ++ void __iomem *io_base; ++ ++ if (!cpu_is_omap34xx()) ++ return; ++ ++ if (!omap_mcbsp_check_valid_id(id)) { ++ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); ++ return; ++ } ++ mcbsp = id_to_mcbsp_ptr(id); ++ io_base = mcbsp->io_base; ++ ++ OMAP_MCBSP_WRITE(io_base, THRSH2, threshold); ++} ++EXPORT_SYMBOL(omap_mcbsp_set_tx_threshold); ++ ++/* ++ * omap_mcbsp_set_rx_threshold configures how to deal ++ * with receive threshold. the threshold value and handler can be ++ * configure in here. ++ */ ++void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold) ++{ ++ struct omap_mcbsp *mcbsp; ++ void __iomem *io_base; ++ ++ if (!cpu_is_omap34xx()) ++ return; ++ ++ if (!omap_mcbsp_check_valid_id(id)) { ++ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); ++ return; ++ } ++ mcbsp = id_to_mcbsp_ptr(id); ++ io_base = mcbsp->io_base; ++ ++ OMAP_MCBSP_WRITE(io_base, THRSH1, threshold); ++} ++EXPORT_SYMBOL(omap_mcbsp_set_rx_threshold); ++ ++/* ++ * omap_mcbsp_get_max_tx_thres just return the current configured ++ * maximum threshold for transmission ++ */ ++u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) ++{ ++ struct omap_mcbsp *mcbsp; ++ ++ if (!omap_mcbsp_check_valid_id(id)) { ++ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); ++ return -ENODEV; ++ } ++ mcbsp = id_to_mcbsp_ptr(id); ++ ++ return mcbsp->max_tx_thres; ++} ++EXPORT_SYMBOL(omap_mcbsp_get_max_tx_threshold); ++ ++/* ++ * omap_mcbsp_get_max_rx_thres just return the current configured ++ * maximum threshold for reception ++ */ ++u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) ++{ ++ struct omap_mcbsp *mcbsp; ++ ++ if (!omap_mcbsp_check_valid_id(id)) { ++ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); ++ return -ENODEV; ++ } ++ mcbsp = id_to_mcbsp_ptr(id); ++ ++ return mcbsp->max_rx_thres; ++} ++EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold); ++ ++/* ++ * omap_mcbsp_get_dma_op_mode just return the current configured ++ * operating mode for the mcbsp channel ++ */ ++int omap_mcbsp_get_dma_op_mode(unsigned int id) ++{ ++ struct omap_mcbsp *mcbsp; ++ int dma_op_mode; ++ ++ if (!omap_mcbsp_check_valid_id(id)) { ++ printk(KERN_ERR "%s: Invalid id (%u)\n", __func__, id + 1); ++ return -ENODEV; ++ } ++ mcbsp = id_to_mcbsp_ptr(id); ++ ++ spin_lock_irq(&mcbsp->lock); ++ dma_op_mode = mcbsp->dma_op_mode; ++ spin_unlock_irq(&mcbsp->lock); ++ ++ return dma_op_mode; ++} ++EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode); ++#endif ++ + /* + * We can choose between IRQ based or polled IO. + * This needs to be called before omap_mcbsp_request(). +@@ -194,18 +332,18 @@ int omap_mcbsp_set_io_type(unsigned int + } + mcbsp = id_to_mcbsp_ptr(id); + +- spin_lock(&mcbsp->lock); ++ spin_lock_irq(&mcbsp->lock); + + if (!mcbsp->free) { + dev_err(mcbsp->dev, "McBSP%d is currently in use\n", + mcbsp->id); +- spin_unlock(&mcbsp->lock); ++ spin_unlock_irq(&mcbsp->lock); + return -EINVAL; + } + + mcbsp->io_type = io_type; + +- spin_unlock(&mcbsp->lock); ++ spin_unlock_irq(&mcbsp->lock); + + return 0; + } +@@ -214,7 +352,10 @@ EXPORT_SYMBOL(omap_mcbsp_set_io_type); + int omap_mcbsp_request(unsigned int id) + { + struct omap_mcbsp *mcbsp; ++ unsigned long flags; ++ int i; + int err; ++ u16 syscon; + + if (!omap_mcbsp_check_valid_id(id)) { + printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); +@@ -225,18 +366,41 @@ int omap_mcbsp_request(unsigned int id) + if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->request) + mcbsp->pdata->ops->request(id); + +- clk_enable(mcbsp->clk); +- +- spin_lock(&mcbsp->lock); ++ spin_lock_irqsave(&mcbsp->lock, flags); + if (!mcbsp->free) { + dev_err(mcbsp->dev, "McBSP%d is currently in use\n", + mcbsp->id); +- spin_unlock(&mcbsp->lock); ++ spin_unlock_irqrestore(&mcbsp->lock, flags); + return -1; + } + + mcbsp->free = 0; +- spin_unlock(&mcbsp->lock); ++ spin_unlock_irqrestore(&mcbsp->lock, flags); ++ ++ for (i = 0; i < mcbsp->num_clks; i++) ++ clk_enable(mcbsp->clks[i]); ++ ++ /* ++ * Enable wakup behavior, smart idle and all wakeups ++ * REVISIT: some wakeups may be unnecessary ++ */ ++ if (cpu_is_omap34xx()) { ++ syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); ++ syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); ++ ++ spin_lock_irq(&mcbsp->lock); ++ if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) { ++ syscon |= (ENAWAKEUP | SIDLEMODE(0x02) | ++ CLOCKACTIVITY(0x02)); ++ OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, ++ WAKEUPEN_ALL); ++ } else { ++ syscon |= SIDLEMODE(0x01); ++ } ++ spin_unlock_irq(&mcbsp->lock); ++ ++ OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); ++ } + + /* + * Make sure that transmitter, receiver and sample-rate generator are +@@ -276,6 +440,9 @@ EXPORT_SYMBOL(omap_mcbsp_request); + void omap_mcbsp_free(unsigned int id) + { + struct omap_mcbsp *mcbsp; ++ unsigned long flags; ++ int i; ++ u16 syscon; + + if (!omap_mcbsp_check_valid_id(id)) { + printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); +@@ -286,18 +453,41 @@ void omap_mcbsp_free(unsigned int id) + if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) + mcbsp->pdata->ops->free(id); + +- clk_disable(mcbsp->clk); ++ /* ++ * Disable wakup behavior, smart idle and all wakeups ++ */ ++ if (cpu_is_omap34xx()) { ++ syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON); ++ syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03)); ++ /* ++ * HW bug workaround - If no_idle mode is taken, we need to ++ * go to smart_idle before going to always_idle, or the ++ * device will not hit retention anymore. ++ */ ++ syscon |= SIDLEMODE(0x02); ++ OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); ++ ++ syscon &= ~(SIDLEMODE(0x03)); ++ OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon); ++ ++ OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, 0); ++ } + +- spin_lock(&mcbsp->lock); ++ spin_lock_irqsave(&mcbsp->lock, flags); + if (mcbsp->free) { + dev_err(mcbsp->dev, "McBSP%d was not reserved\n", + mcbsp->id); +- spin_unlock(&mcbsp->lock); ++ spin_unlock_irqrestore(&mcbsp->lock, flags); + return; + } ++ spin_unlock_irqrestore(&mcbsp->lock, flags); + ++ for (i = mcbsp->num_clks - 1; i >= 0; i--) ++ clk_disable(mcbsp->clks[i]); ++ ++ spin_lock_irqsave(&mcbsp->lock, flags); + mcbsp->free = 1; +- spin_unlock(&mcbsp->lock); ++ spin_unlock_irqrestore(&mcbsp->lock, flags); + + if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) { + /* Free IRQs */ +@@ -307,6 +497,165 @@ void omap_mcbsp_free(unsigned int id) + } + EXPORT_SYMBOL(omap_mcbsp_free); + ++#ifdef CONFIG_ARCH_OMAP34XX ++static void omap_st_enable(struct omap_mcbsp *mcbsp) ++{ ++ struct omap_mcbsp_st_data *st_data; ++ void __iomem *io_base_mcbsp; ++ void __iomem *io_base_st; ++ unsigned int w; ++ ++ io_base_mcbsp = mcbsp->io_base; ++ st_data = mcbsp->st_data; ++ io_base_st = st_data->io_base_st; ++ ++ /* ++ * Sidetone uses McBSP ICLK - which must not idle when sidetones ++ * are enabled or sidetones start sounding ugly. ++ */ ++ w = cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); ++ w &= ~(mcbsp->id - 1); ++ cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE); ++ ++ /* Enable McBSP Sidetone */ ++ w = OMAP_MCBSP_READ(io_base_mcbsp, SSELCR); ++ OMAP_MCBSP_WRITE(io_base_mcbsp, SSELCR, w | SIDETONEEN); ++ ++ w = OMAP_ST_READ(io_base_st, SYSCONFIG); ++ OMAP_ST_WRITE(io_base_st, SYSCONFIG, w & ~(ST_AUTOIDLE)); ++ ++ /* Enable Sidetone from Sidetone Core */ ++ w = OMAP_ST_READ(io_base_st, SSELCR); ++ OMAP_ST_WRITE(io_base_st, SSELCR, w | ST_SIDETONEEN); ++} ++ ++static void omap_st_disable(struct omap_mcbsp *mcbsp) ++{ ++ struct omap_mcbsp_st_data *st_data; ++ void __iomem *io_base_mcbsp; ++ void __iomem *io_base_st; ++ unsigned int w; ++ ++ io_base_mcbsp = mcbsp->io_base; ++ st_data = mcbsp->st_data; ++ io_base_st = st_data->io_base_st; ++ ++ w = OMAP_ST_READ(io_base_st, SSELCR); ++ OMAP_ST_WRITE(io_base_st, SSELCR, w & ~(ST_SIDETONEEN)); ++ ++ w = OMAP_ST_READ(io_base_st, SYSCONFIG); ++ OMAP_ST_WRITE(io_base_st, SYSCONFIG, w | ST_AUTOIDLE); ++ ++ w = OMAP_MCBSP_READ(io_base_mcbsp, SSELCR); ++ OMAP_MCBSP_WRITE(io_base_mcbsp, SSELCR, w & ~(SIDETONEEN)); ++ ++ w = cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); ++ w |= (mcbsp->id - 1); ++ cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE); ++} ++ ++static void omap_st_enable_autoidle(struct omap_mcbsp *mcbsp) ++{ ++ struct omap_mcbsp_st_data *st_data; ++ void __iomem *io_base_st; ++ unsigned int w; ++ ++ st_data = mcbsp->st_data; ++ io_base_st = st_data->io_base_st; ++ ++ w = OMAP_ST_READ(io_base_st, SYSCONFIG); ++ OMAP_ST_WRITE(io_base_st, SYSCONFIG, w | ST_AUTOIDLE); ++} ++ ++static void omap_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir) ++{ ++ struct omap_mcbsp_st_data *st_data; ++ void __iomem *io_base; ++ u16 w, i; ++ ++ st_data = mcbsp->st_data; ++ io_base = st_data->io_base_st; ++ ++ w = OMAP_ST_READ(io_base, SYSCONFIG); ++ OMAP_ST_WRITE(io_base, SYSCONFIG, w & ~(ST_AUTOIDLE)); ++ ++ w = OMAP_ST_READ(io_base, SSELCR); ++ ++ if (w & ST_COEFFWREN) ++ OMAP_ST_WRITE(io_base, SSELCR, w & ~(ST_COEFFWREN)); ++ ++ OMAP_ST_WRITE(io_base, SSELCR, w | ST_COEFFWREN); ++ ++ for (i = 0; i < 128; i++) ++ OMAP_ST_WRITE(io_base, SFIRCR, fir[i]); ++ ++ i = 0; ++ ++ w = OMAP_ST_READ(io_base, SSELCR); ++ while (!(w & ST_COEFFWRDONE) && (++i < 1000)) ++ w = OMAP_ST_READ(io_base, SSELCR); ++ ++ OMAP_ST_WRITE(io_base, SSELCR, w & ~(ST_COEFFWREN)); ++ ++ if (i == 1000) ++ dev_err(mcbsp->dev, "McBSP FIR load error!\n"); ++} ++ ++static void omap_st_chgain(struct omap_mcbsp *mcbsp, s16 ch0gain, s16 ch1gain) ++{ ++ struct omap_mcbsp_st_data *st_data; ++ void __iomem *io_base; ++ u16 w; ++ ++ st_data = mcbsp->st_data; ++ io_base = st_data->io_base_st; ++ ++ w = OMAP_ST_READ(io_base, SYSCONFIG); ++ OMAP_ST_WRITE(io_base, SYSCONFIG, w & ~(ST_AUTOIDLE)); ++ ++ w = OMAP_ST_READ(io_base, SSELCR); ++ ++ OMAP_ST_WRITE(io_base, SGAINCR, ST_CH0GAIN(ch0gain) | \ ++ ST_CH1GAIN(ch1gain)); ++} ++ ++static void omap_st_start(struct omap_mcbsp *mcbsp) ++{ ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&mcbsp->lock, flags); ++ if (st_data) { ++ omap_st_fir_write(mcbsp, mcbsp->st_data->taps); ++ omap_st_chgain(mcbsp, ++ mcbsp->st_data->ch0gain, ++ mcbsp->st_data->ch1gain); ++ if (st_data->enabled) ++ omap_st_enable(mcbsp); ++ else ++ omap_st_enable_autoidle(mcbsp); ++ st_data->running = 1; ++ } ++ spin_unlock_irqrestore(&mcbsp->lock, flags); ++} ++ ++static void omap_st_stop(struct omap_mcbsp *mcbsp) ++{ ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&mcbsp->lock, flags); ++ if (st_data && st_data->running) { ++ omap_st_disable(mcbsp); ++ st_data->running = 0; ++ } ++ spin_unlock_irqrestore(&mcbsp->lock, flags); ++} ++#else ++static inline void omap_st_start(struct omap_mcbsp *mcbsp) {} ++static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {} ++#endif /* CONFIG_ARCH_OMAP34XX */ ++ + /* + * Here we start the McBSP, by enabling the sample + * generator, both transmitter and receivers, +@@ -325,6 +674,9 @@ void omap_mcbsp_start(unsigned int id) + mcbsp = id_to_mcbsp_ptr(id); + io_base = mcbsp->io_base; + ++ if (cpu_is_omap34xx()) ++ omap_st_start(mcbsp); ++ + mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; + mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; + +@@ -339,7 +691,8 @@ void omap_mcbsp_start(unsigned int id) + w = OMAP_MCBSP_READ(io_base, SPCR1); + OMAP_MCBSP_WRITE(io_base, SPCR1, w | 1); + +- udelay(100); ++ /* Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec */ ++ udelay(500); + + /* Start frame sync */ + w = OMAP_MCBSP_READ(io_base, SPCR2); +@@ -375,9 +728,64 @@ void omap_mcbsp_stop(unsigned int id) + /* Reset the sample rate generator */ + w = OMAP_MCBSP_READ(io_base, SPCR2); + OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); ++ ++ if (cpu_is_omap34xx()) ++ omap_st_stop(mcbsp); + } + EXPORT_SYMBOL(omap_mcbsp_stop); + ++void omap_mcbsp_xmit_enable(unsigned int id, u8 enable) ++{ ++ struct omap_mcbsp *mcbsp; ++ void __iomem *io_base; ++ u16 w; ++ ++ if (!(cpu_is_omap2430() || cpu_is_omap34xx())) ++ return; ++ ++ if (!omap_mcbsp_check_valid_id(id)) { ++ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); ++ return; ++ } ++ ++ mcbsp = id_to_mcbsp_ptr(id); ++ io_base = mcbsp->io_base; ++ ++ w = OMAP_MCBSP_READ(io_base, XCCR); ++ ++ if (enable) ++ OMAP_MCBSP_WRITE(io_base, XCCR, w & ~(XDISABLE)); ++ else ++ OMAP_MCBSP_WRITE(io_base, XCCR, w | XDISABLE); ++} ++EXPORT_SYMBOL(omap_mcbsp_xmit_enable); ++ ++void omap_mcbsp_recv_enable(unsigned int id, u8 enable) ++{ ++ struct omap_mcbsp *mcbsp; ++ void __iomem *io_base; ++ u16 w; ++ ++ if (!(cpu_is_omap2430() || cpu_is_omap34xx())) ++ return; ++ ++ if (!omap_mcbsp_check_valid_id(id)) { ++ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1); ++ return; ++ } ++ ++ mcbsp = id_to_mcbsp_ptr(id); ++ io_base = mcbsp->io_base; ++ ++ w = OMAP_MCBSP_READ(io_base, RCCR); ++ ++ if (enable) ++ OMAP_MCBSP_WRITE(io_base, RCCR, w & ~(RDISABLE)); ++ else ++ OMAP_MCBSP_WRITE(io_base, RCCR, w | RDISABLE); ++} ++EXPORT_SYMBOL(omap_mcbsp_recv_enable); ++ + /* polled mcbsp i/o operations */ + int omap_mcbsp_pollwrite(unsigned int id, u16 buf) + { +@@ -863,6 +1271,323 @@ void omap_mcbsp_set_spi_mode(unsigned in + } + EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); + ++#ifdef CONFIG_ARCH_OMAP34XX ++#define max_thres(m) (mcbsp->pdata->buffer_size) ++#define valid_threshold(m, val) ((val) <= max_thres(m)) ++#define THRESHOLD_PROP_BUILDER(prop) \ ++static ssize_t prop##_show(struct device *dev, \ ++ struct device_attribute *attr, char *buf) \ ++{ \ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ ++ \ ++ return sprintf(buf, "%u\n", mcbsp->prop); \ ++} \ ++ \ ++static ssize_t prop##_store(struct device *dev, \ ++ struct device_attribute *attr, \ ++ const char *buf, size_t size) \ ++{ \ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \ ++ unsigned long val; \ ++ int status; \ ++ \ ++ status = strict_strtoul(buf, 0, &val); \ ++ if (status) \ ++ return status; \ ++ \ ++ if (!valid_threshold(mcbsp, val)) \ ++ return -EDOM; \ ++ \ ++ mcbsp->prop = val; \ ++ return size; \ ++} \ ++ \ ++static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store); ++ ++THRESHOLD_PROP_BUILDER(max_tx_thres); ++THRESHOLD_PROP_BUILDER(max_rx_thres); ++ ++static ssize_t dma_op_mode_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ int dma_op_mode; ++ ++ spin_lock_irq(&mcbsp->lock); ++ dma_op_mode = mcbsp->dma_op_mode; ++ spin_unlock_irq(&mcbsp->lock); ++ ++ return sprintf(buf, "%d\n", dma_op_mode); ++} ++ ++static ssize_t dma_op_mode_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ unsigned long val; ++ int status; ++ ++ status = strict_strtoul(buf, 0, &val); ++ if (status) ++ return status; ++ ++ spin_lock_irq(&mcbsp->lock); ++ ++ if (!mcbsp->free) { ++ size = -EBUSY; ++ goto unlock; ++ } ++ ++ if (val > MCBSP_DMA_MODE_FRAME || val < MCBSP_DMA_MODE_ELEMENT) { ++ size = -EINVAL; ++ goto unlock; ++ } ++ ++ mcbsp->dma_op_mode = val; ++ ++unlock: ++ spin_unlock_irq(&mcbsp->lock); ++ ++ return size; ++} ++ ++static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store); ++ ++static ssize_t st_enable_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ ++ return sprintf(buf, "%d\n", st_data->enabled); ++} ++ ++static ssize_t st_enable_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ unsigned long val; ++ int status; ++ ++ status = strict_strtoul(buf, 0, &val); ++ if (status) ++ return status; ++ ++ spin_lock_irq(&mcbsp->lock); ++ st_data->enabled = !!val; ++ ++ if (st_data->running) { ++ if (st_data->enabled) ++ omap_st_enable(mcbsp); ++ else ++ omap_st_disable(mcbsp); ++ } ++ spin_unlock_irq(&mcbsp->lock); ++ ++ return size; ++} ++ ++static DEVICE_ATTR(st_enable, 0644, st_enable_show, st_enable_store); ++ ++static ssize_t st_taps_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ ssize_t status = 0; ++ int i; ++ ++ spin_lock_irq(&mcbsp->lock); ++ for (i = 0; i < st_data->nr_taps; i++) ++ status += sprintf(&buf[status], (i ? ", %d" : "%d"), ++ st_data->taps[i]); ++ if (i) ++ status += sprintf(&buf[status], "\n"); ++ spin_unlock_irq(&mcbsp->lock); ++ ++ return status; ++} ++ ++static ssize_t st_taps_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ int val, tmp, status, i = 0; ++ ++ spin_lock_irq(&mcbsp->lock); ++ memset(st_data->taps, 0, sizeof(st_data->taps)); ++ st_data->nr_taps = 0; ++ ++ do { ++ status = sscanf(buf, "%d%n", &val, &tmp); ++ if (status < 0 || status == 0) { ++ size = -EINVAL; ++ goto out; ++ } ++ if (val < -32768 || val > 32767) { ++ size = -EINVAL; ++ goto out; ++ } ++ st_data->taps[i++] = val; ++ buf += tmp; ++ if (*buf != ',') ++ break; ++ buf++; ++ } while (1); ++ ++ st_data->nr_taps = i; ++ ++out: ++ spin_unlock_irq(&mcbsp->lock); ++ ++ return size; ++} ++ ++static DEVICE_ATTR(st_taps, 0644, st_taps_show, st_taps_store); ++ ++static ssize_t st_chgain_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ ++ if (strcmp("st_ch0gain", attr->attr.name) == 0) ++ return sprintf(buf, "%d\n", st_data->ch0gain); ++ else ++ return sprintf(buf, "%d\n", st_data->ch1gain); ++} ++ ++static ssize_t st_chgain_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ long val; ++ int status; ++ ++ status = strict_strtol(buf, 0, &val); ++ if (status) ++ return status; ++ if (val < -32768 || val > 32767) ++ return -EINVAL; ++ ++ spin_lock_irq(&mcbsp->lock); ++ if (strcmp("st_ch0gain", attr->attr.name) == 0) ++ st_data->ch0gain = val; ++ else ++ st_data->ch1gain = val; ++ ++ if (st_data->running) ++ omap_st_chgain(mcbsp, ++ mcbsp->st_data->ch0gain, ++ mcbsp->st_data->ch1gain); ++ spin_unlock_irq(&mcbsp->lock); ++ ++ return size; ++} ++ ++static DEVICE_ATTR(st_ch0gain, 0644, st_chgain_show, st_chgain_store); ++static DEVICE_ATTR(st_ch1gain, 0644, st_chgain_show, st_chgain_store); ++ ++static const struct attribute *additional_attrs[] = { ++ &dev_attr_max_tx_thres.attr, ++ &dev_attr_max_rx_thres.attr, ++ &dev_attr_dma_op_mode.attr, ++ NULL, ++}; ++ ++static const struct attribute_group additional_attr_group = { ++ .attrs = (struct attribute **)additional_attrs, ++}; ++ ++static inline int __devinit omap_additional_add(struct platform_device *pdev) ++{ ++ return sysfs_create_group(&pdev->dev.kobj, &additional_attr_group); ++} ++ ++static inline void __devexit omap_additional_rem(struct platform_device *pdev) ++{ ++ sysfs_remove_group(&pdev->dev.kobj, &additional_attr_group); ++} ++ ++static const struct attribute *sidetone_attrs[] = { ++ &dev_attr_st_enable.attr, ++ &dev_attr_st_taps.attr, ++ &dev_attr_st_ch0gain.attr, ++ &dev_attr_st_ch1gain.attr, ++ NULL, ++}; ++ ++static const struct attribute_group sidetone_attr_group = { ++ .attrs = (struct attribute **)sidetone_attrs, ++}; ++ ++int __devinit omap_st_add(struct platform_device *pdev) ++{ ++ struct omap_mcbsp_platform_data *pdata = pdev->dev.platform_data; ++ struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); ++ struct omap_mcbsp_st_data *st_data; ++ int err; ++ ++ st_data = kzalloc(sizeof(*mcbsp->st_data), GFP_KERNEL); ++ if (!st_data) { ++ err = -ENOMEM; ++ goto err1; ++ } ++ ++ st_data->io_base_st = ioremap(pdata->phys_base_st, SZ_4K); ++ if (!st_data->io_base_st) { ++ err = -ENOMEM; ++ goto err2; ++ } ++ ++ err = sysfs_create_group(&pdev->dev.kobj, &sidetone_attr_group); ++ if (err) ++ goto err3; ++ ++ mcbsp->st_data = st_data; ++ return 0; ++ ++err3: ++ iounmap(st_data->io_base_st); ++err2: ++ kfree(st_data); ++err1: ++ return err; ++ ++} ++ ++static void __devexit omap_st_remove(struct platform_device *pdev) ++{ ++ struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); ++ struct omap_mcbsp_st_data *st_data = mcbsp->st_data; ++ ++ if (st_data) { ++ sysfs_remove_group(&pdev->dev.kobj, &sidetone_attr_group); ++ iounmap(st_data->io_base_st); ++ kfree(st_data); ++ } ++} ++#else ++static inline int __devinit omap_st_add(struct platform_device *pdev) ++{ ++ return 0; ++} ++static inline void __devexit omap_st_remove(struct platform_device *pdev) {} ++static inline int __devinit omap_additional_add(struct platform_device *pdev) ++{ ++ return 0; ++} ++static inline void __devexit omap_additional_rem(struct platform_device *pdev) ++{ } ++#endif /* CONFIG_ARCH_OMAP34XX */ ++ + /* + * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. + * 730 has only 2 McBSP, and both of them are MPU peripherals. +@@ -872,6 +1597,7 @@ static int __devinit omap_mcbsp_probe(st + struct omap_mcbsp_platform_data *pdata = pdev->dev.platform_data; + struct omap_mcbsp *mcbsp; + int id = pdev->id - 1; ++ int i; + int ret = 0; + + if (!pdata) { +@@ -916,22 +1642,59 @@ static int __devinit omap_mcbsp_probe(st + mcbsp->dma_rx_sync = pdata->dma_rx_sync; + mcbsp->dma_tx_sync = pdata->dma_tx_sync; + +- if (pdata->clk_name) +- mcbsp->clk = clk_get(&pdev->dev, pdata->clk_name); +- if (IS_ERR(mcbsp->clk)) { +- dev_err(&pdev->dev, +- "Invalid clock configuration for McBSP%d.\n", +- mcbsp->id); +- ret = PTR_ERR(mcbsp->clk); +- goto err_clk; ++ if (pdata->num_clks) { ++ mcbsp->num_clks = pdata->num_clks; ++ mcbsp->clks = kzalloc(mcbsp->num_clks * sizeof(struct clk *), ++ GFP_KERNEL); ++ if (!mcbsp->clks) { ++ ret = -ENOMEM; ++ goto exit; ++ } ++ for (i = 0; i < mcbsp->num_clks; i++) { ++ mcbsp->clks[i] = clk_get(&pdev->dev, pdata->clk_names[i]); ++ if (IS_ERR(mcbsp->clks[i])) { ++ dev_err(&pdev->dev, ++ "Invalid %s configuration for McBSP%d.\n", ++ pdata->clk_names[i], mcbsp->id); ++ ret = PTR_ERR(mcbsp->clks[i]); ++ goto err_clk; ++ } ++ } ++ + } + + mcbsp->pdata = pdata; + mcbsp->dev = &pdev->dev; + platform_set_drvdata(pdev, mcbsp); ++#ifdef CONFIG_ARCH_OMAP34XX ++ if (cpu_is_omap34xx()) { ++ mcbsp->max_tx_thres = max_thres(mcbsp); ++ mcbsp->max_rx_thres = max_thres(mcbsp); ++ mcbsp->dma_op_mode = MCBSP_DMA_MODE_THRESHOLD; ++ } else { ++ mcbsp->max_tx_thres = -EINVAL; ++ mcbsp->max_rx_thres = -EINVAL; ++ mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT; ++ } ++#endif ++ ++ if (cpu_is_omap34xx()) { ++ if (mcbsp->id == 2 || mcbsp->id == 3) ++ if (omap_st_add(pdev)) ++ dev_warn(&pdev->dev, ++ "Unable to create sidetone controls\n"); ++ ++ if (omap_additional_add(pdev)) ++ dev_warn(&pdev->dev, ++ "Unable to create threshold controls\n"); ++ } ++ + return 0; + + err_clk: ++ while (i--) ++ clk_put(mcbsp->clks[i]); ++ kfree(mcbsp->clks); + iounmap(mcbsp->io_base); + err_ioremap: + mcbsp->free = 0; +@@ -942,6 +1705,7 @@ exit: + static int __devexit omap_mcbsp_remove(struct platform_device *pdev) + { + struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); ++ int i; + + platform_set_drvdata(pdev, NULL); + if (mcbsp) { +@@ -950,12 +1714,25 @@ static int __devexit omap_mcbsp_remove(s + mcbsp->pdata->ops->free) + mcbsp->pdata->ops->free(mcbsp->id); + +- clk_disable(mcbsp->clk); +- clk_put(mcbsp->clk); ++ if (cpu_is_omap34xx()) { ++ if (mcbsp->id == 2 || mcbsp->id == 3) ++ omap_st_remove(pdev); ++ ++ omap_additional_rem(pdev); ++ } ++ ++ for (i = mcbsp->num_clks - 1; i >= 0; i--) { ++ clk_disable(mcbsp->clks[i]); ++ clk_put(mcbsp->clks[i]); ++ } + + iounmap(mcbsp->io_base); + +- mcbsp->clk = NULL; ++ if (mcbsp->num_clks) { ++ kfree(mcbsp->clks); ++ mcbsp->clks = NULL; ++ mcbsp->num_clks = 0; ++ } + mcbsp->free = 0; + mcbsp->dev = NULL; + } +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/omap-pm-noop.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/omap-pm-noop.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/omap-pm-noop.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/omap-pm-noop.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,355 @@ ++/* ++ * omap-pm-noop.c - OMAP power management interface - dummy version ++ * ++ * This code implements the OMAP power management interface to ++ * drivers, CPUIdle, CPUFreq, and DSP Bridge. It is strictly for ++ * debug/demonstration use, as it does nothing but printk() whenever a ++ * function is called (when DEBUG is defined, below) ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * Copyright (C) 2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * Interface developed by (in alphabetical order): ++ * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan ++ * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff ++ */ ++ ++#undef DEBUG ++ ++#include ++#include ++#include ++ ++/* Interface documentation is in mach/omap-pm.h */ ++#include ++ ++#include ++ ++struct omap_opp *dsp_opps; ++struct omap_opp *mpu_opps; ++struct omap_opp *l3_opps; ++ ++/* ++ * Device-driver-originated constraints (via board-*.c files) ++ */ ++ ++void omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t) ++{ ++ if (!dev || t < -1) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ if (t == -1) ++ pr_debug("OMAP PM: remove max MPU wakeup latency constraint: " ++ "dev %s\n", dev_name(dev)); ++ else ++ pr_debug("OMAP PM: add max MPU wakeup latency constraint: " ++ "dev %s, t = %ld usec\n", dev_name(dev), t); ++ ++ /* ++ * For current Linux, this needs to map the MPU to a ++ * powerdomain, then go through the list of current max lat ++ * constraints on the MPU and find the smallest. If ++ * the latency constraint has changed, the code should ++ * recompute the state to enter for the next powerdomain ++ * state. ++ * ++ * TI CDP code can call constraint_set here. ++ */ ++} ++ ++void omap_pm_set_min_mpu_freq(struct device *dev, unsigned long r) ++{ ++ if (!dev) { ++ WARN_ON(1); ++ return; ++ } ++ return; ++} ++ ++void omap_pm_set_min_mpu_freq(struct device *dev, unsigned long r) ++{ ++ if (!dev) { ++ WARN_ON(1); ++ return; ++ } ++} ++ ++void omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r) ++{ ++ if (!dev || (agent_id != OCP_INITIATOR_AGENT && ++ agent_id != OCP_TARGET_AGENT)) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ if (r == 0) ++ pr_debug("OMAP PM: remove min bus tput constraint: " ++ "dev %s for agent_id %d\n", dev_name(dev), agent_id); ++ else ++ pr_debug("OMAP PM: add min bus tput constraint: " ++ "dev %s for agent_id %d: rate %ld KiB\n", ++ dev_name(dev), agent_id, r); ++ ++ /* ++ * This code should model the interconnect and compute the ++ * required clock frequency, convert that to a VDD2 OPP ID, then ++ * set the VDD2 OPP appropriately. ++ * ++ * TI CDP code can call constraint_set here on the VDD2 OPP. ++ */ ++} ++EXPORT_SYMBOL(omap_pm_set_min_bus_tput); ++ ++void omap_pm_set_max_dev_wakeup_lat(struct device *dev, long t) ++{ ++ if (!dev || t < -1) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ if (t == -1) ++ pr_debug("OMAP PM: remove max device latency constraint: " ++ "dev %s\n", dev_name(dev)); ++ else ++ pr_debug("OMAP PM: add max device latency constraint: " ++ "dev %s, t = %ld usec\n", dev_name(dev), t); ++ ++ /* ++ * For current Linux, this needs to map the device to a ++ * powerdomain, then go through the list of current max lat ++ * constraints on that powerdomain and find the smallest. If ++ * the latency constraint has changed, the code should ++ * recompute the state to enter for the next powerdomain ++ * state. Conceivably, this code should also determine ++ * whether to actually disable the device clocks or not, ++ * depending on how long it takes to re-enable the clocks. ++ * ++ * TI CDP code can call constraint_set here. ++ */ ++} ++ ++void omap_pm_set_max_sdma_lat(struct device *dev, long t) ++{ ++ if (!dev || t < -1) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ if (t == -1) ++ pr_debug("OMAP PM: remove max DMA latency constraint: " ++ "dev %s\n", dev_name(dev)); ++ else ++ pr_debug("OMAP PM: add max DMA latency constraint: " ++ "dev %s, t = %ld usec\n", dev_name(dev), t); ++ ++ /* ++ * For current Linux PM QOS params, this code should scan the ++ * list of maximum CPU and DMA latencies and select the ++ * smallest, then set cpu_dma_latency pm_qos_param ++ * accordingly. ++ * ++ * For future Linux PM QOS params, with separate CPU and DMA ++ * latency params, this code should just set the dma_latency param. ++ * ++ * TI CDP code can call constraint_set here. ++ */ ++ ++} ++ ++ ++/* ++ * DSP Bridge-specific constraints ++ */ ++ ++const struct omap_opp *omap_pm_dsp_get_opp_table(void) ++{ ++ pr_debug("OMAP PM: DSP request for OPP table\n"); ++ ++ /* ++ * Return DSP frequency table here: The final item in the ++ * array should have .rate = .opp_id = 0. ++ */ ++ ++ return NULL; ++} ++ ++void omap_pm_dsp_set_min_opp(u8 opp_id) ++{ ++ if (opp_id == 0) { ++ WARN_ON(1); ++ return; ++ } ++ ++ pr_debug("OMAP PM: DSP requests minimum VDD1 OPP to be %d\n", opp_id); ++ ++ /* ++ * ++ * For l-o dev tree, our VDD1 clk is keyed on OPP ID, so we ++ * can just test to see which is higher, the CPU's desired OPP ++ * ID or the DSP's desired OPP ID, and use whichever is ++ * highest. ++ * ++ * In CDP12.14+, the VDD1 OPP custom clock that controls the DSP ++ * rate is keyed on MPU speed, not the OPP ID. So we need to ++ * map the OPP ID to the MPU speed for use with clk_set_rate() ++ * if it is higher than the current OPP clock rate. ++ * ++ */ ++} ++EXPORT_SYMBOL(omap_pm_dsp_set_min_opp); ++ ++ ++u8 omap_pm_dsp_get_opp(void) ++{ ++ pr_debug("OMAP PM: DSP requests current DSP OPP ID\n"); ++ ++ /* ++ * For l-o dev tree, call clk_get_rate() on VDD1 OPP clock ++ * ++ * CDP12.14+: ++ * Call clk_get_rate() on the OPP custom clock, map that to an ++ * OPP ID using the tables defined in board-*.c/chip-*.c files. ++ */ ++ ++ return 0; ++} ++EXPORT_SYMBOL(omap_pm_dsp_get_opp); ++ ++/* ++ * CPUFreq-originated constraint ++ * ++ * In the future, this should be handled by custom OPP clocktype ++ * functions. ++ */ ++ ++struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void) ++{ ++ pr_debug("OMAP PM: CPUFreq request for frequency table\n"); ++ ++ /* ++ * Return CPUFreq frequency table here: loop over ++ * all VDD1 clkrates, pull out the mpu_ck frequencies, build ++ * table ++ */ ++ ++ return NULL; ++} ++ ++void omap_pm_cpu_set_freq(unsigned long f) ++{ ++ if (f == 0) { ++ WARN_ON(1); ++ return; ++ } ++ ++ pr_debug("OMAP PM: CPUFreq requests CPU frequency to be set to %lu\n", ++ f); ++ ++ /* ++ * For l-o dev tree, determine whether MPU freq or DSP OPP id ++ * freq is higher. Find the OPP ID corresponding to the ++ * higher frequency. Call clk_round_rate() and clk_set_rate() ++ * on the OPP custom clock. ++ * ++ * CDP should just be able to set the VDD1 OPP clock rate here. ++ */ ++} ++EXPORT_SYMBOL(omap_pm_cpu_set_freq); ++ ++unsigned long omap_pm_cpu_get_freq(void) ++{ ++ pr_debug("OMAP PM: CPUFreq requests current CPU frequency\n"); ++ ++ /* ++ * Call clk_get_rate() on the mpu_ck. ++ */ ++ ++ return 0; ++} ++EXPORT_SYMBOL(omap_pm_cpu_get_freq); ++ ++/* ++ * Device context loss tracking ++ */ ++ ++int omap_pm_get_dev_context_loss_count(struct device *dev) ++{ ++ if (!dev) { ++ WARN_ON(1); ++ return -EINVAL; ++ }; ++ ++ pr_debug("OMAP PM: returning context loss count for dev %s\n", ++ dev_name(dev)); ++ ++ /* ++ * Map the device to the powerdomain. Return the powerdomain ++ * off counter. ++ */ ++ ++ return 0; ++} ++ ++/* ++ * Powerdomain usecounting hooks ++ */ ++ ++void omap_pm_pwrdm_active(struct powerdomain *pwrdm) ++{ ++ if (!pwrdm) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ pr_debug("OMAP PM: powerdomain %s is becoming active\n", pwrdm->name); ++ ++ /* ++ * CDP code apparently will need these for the enable_power_domain() ++ * and disable_power_domain() functions. ++ */ ++} ++ ++void omap_pm_pwrdm_inactive(struct powerdomain *pwrdm) ++{ ++ if (!pwrdm) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ pr_debug("OMAP PM: powerdomain %s is becoming inactive\n", ++ pwrdm->name); ++ ++ /* ++ * CDP code apparently will need these for the enable_power_domain() ++ * and disable_power_domain() functions. ++ */ ++} ++ ++/* ++ * Should be called before clk framework since clk fw will call ++ * omap_pm_pwrdm_{in,}active() ++ */ ++int __init omap_pm_if_early_init(struct omap_opp *mpu_opp_table, ++ struct omap_opp *dsp_opp_table, ++ struct omap_opp *l3_opp_table) ++{ ++ mpu_opps = mpu_opp_table; ++ dsp_opps = dsp_opp_table; ++ l3_opps = l3_opp_table; ++ return 0; ++} ++ ++/* Must be called after clock framework is initialized */ ++int __init omap_pm_if_init(void) ++{ ++ return 0; ++} ++ ++void omap_pm_if_exit(void) ++{ ++ /* Deallocate CPUFreq frequency table here */ ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/omap-pm-srf.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/omap-pm-srf.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/omap-pm-srf.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/omap-pm-srf.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,357 @@ ++/* ++ * omap-pm-srf.c - OMAP power management interface implemented ++ * using Shared resource framework ++ * ++ * This code implements the OMAP power management interface to ++ * drivers, CPUIdle, CPUFreq, and DSP Bridge. It is strictly for ++ * debug/demonstration use, as it does nothing but printk() whenever a ++ * function is called (when DEBUG is defined, below) ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * Copyright (C) 2008 Nokia Corporation ++ * Paul Walmsley ++ * ++ * Interface developed by (in alphabetical order): ++ * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan ++ * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff ++ */ ++ ++#undef DEBUG ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++struct omap_opp *dsp_opps; ++struct omap_opp *mpu_opps; ++struct omap_opp *l3_opps; ++ ++#define LAT_RES_POSTAMBLE "_latency" ++#define MAX_LATENCY_RES_NAME 30 ++ ++/** ++ * get_lat_res_name - gets the latency resource name given a power domain name ++ * @pwrdm_name: Name of the power domain. ++ * @lat_name: Buffer in which latency resource name is populated ++ * @size: Max size of the latency resource name ++ * ++ * Returns the latency resource name populated in lat_name. ++ */ ++void get_lat_res_name(const char *pwrdm_name, char **lat_name, int size) ++{ ++ strcpy(*lat_name, ""); ++ WARN_ON(strlen(pwrdm_name) + strlen(LAT_RES_POSTAMBLE) > size); ++ strcpy(*lat_name, pwrdm_name); ++ strcat(*lat_name, LAT_RES_POSTAMBLE); ++ return; ++} ++ ++/* ++ * Device-driver-originated constraints (via board-*.c files) ++ */ ++ ++void omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t) ++{ ++ if (!dev || t < -1) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ if (t == -1) { ++ pr_debug("OMAP PM: remove max MPU wakeup latency constraint: " ++ "dev %s\n", dev_name(dev)); ++ resource_release("mpu_latency", dev); ++ } else { ++ pr_debug("OMAP PM: add max MPU wakeup latency constraint: " ++ "dev %s, t = %ld usec\n", dev_name(dev), t); ++ resource_request("mpu_latency", dev, t); ++ } ++} ++ ++void omap_pm_set_min_mpu_freq(struct device *dev, unsigned long r) ++{ ++ if (!dev) { ++ WARN_ON(1); ++ return; ++ } ++ ++ if (r == 0) ++ resource_release("mpu_freq", dev); ++ else ++ resource_request("mpu_freq", dev, r); ++} ++EXPORT_SYMBOL(omap_pm_set_min_mpu_freq); ++ ++void omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r) ++{ ++ if (!dev || (agent_id != OCP_INITIATOR_AGENT && ++ agent_id != OCP_TARGET_AGENT)) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ if (r == 0) { ++ pr_debug("OMAP PM: remove min bus tput constraint: " ++ "dev %s for agent_id %d\n", dev_name(dev), agent_id); ++ resource_release("vdd2_opp", dev); ++ } else { ++ pr_debug("OMAP PM: add min bus tput constraint: " ++ "dev %s for agent_id %d: rate %ld KiB\n", ++ dev_name(dev), agent_id, r); ++ resource_request("vdd2_opp", dev, r); ++ } ++} ++EXPORT_SYMBOL(omap_pm_set_min_bus_tput); ++ ++void omap_pm_set_max_dev_wakeup_lat(struct device *dev, long t) ++{ ++ struct omapdev *odev; ++ struct powerdomain *pwrdm_dev; ++ struct platform_device *pdev; ++ char *lat_res_name; ++ ++ if (!dev || t < -1) { ++ WARN_ON(1); ++ return; ++ }; ++ /* Look for the devices Power Domain */ ++ /* ++ * WARNING! If device is not a platform device, container_of will ++ * return a pointer to unknown memory! ++ * TODO: Either change omap-pm interface to support only platform ++ * devices, or change the underlying omapdev implementation to ++ * support normal devices. ++ */ ++ pdev = container_of(dev, struct platform_device, dev); ++ ++ /* Try to catch non platform devices. */ ++ if (pdev->name == NULL) { ++ printk(KERN_ERR "OMAP-PM: Error: platform device not valid\n"); ++ return; ++ } ++ ++ odev = omapdev_find_pdev(pdev); ++ if (odev) { ++ pwrdm_dev = omapdev_get_pwrdm(odev); ++ } else { ++ printk(KERN_ERR "OMAP-PM: Error: Could not find omapdev " ++ "for %s\n", pdev->name); ++ return; ++ } ++ ++ lat_res_name = kmalloc(MAX_LATENCY_RES_NAME, GFP_KERNEL); ++ if (!lat_res_name) { ++ printk(KERN_ERR "OMAP-PM: FATAL ERROR: kmalloc failed\n"); ++ return; ++ } ++ get_lat_res_name(pwrdm_dev->name, &lat_res_name, MAX_LATENCY_RES_NAME); ++ ++ if (t == -1) { ++ pr_debug("OMAP PM: remove max device latency constraint: " ++ "dev %s\n", dev_name(dev)); ++ resource_release(lat_res_name, dev); ++ } else { ++ pr_debug("OMAP PM: add max device latency constraint: " ++ "dev %s, t = %ld usec\n", dev_name(dev), t); ++ resource_request(lat_res_name, dev, t); ++ } ++ ++ kfree(lat_res_name); ++ return; ++} ++ ++void omap_pm_set_max_sdma_lat(struct device *dev, long t) ++{ ++ if (!dev || t < -1) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ if (t == -1) { ++ pr_debug("OMAP PM: remove max DMA latency constraint: " ++ "dev %s\n", dev_name(dev)); ++ resource_release("core_latency", dev); ++ } else { ++ pr_debug("OMAP PM: add max DMA latency constraint: " ++ "dev %s, t = %ld usec\n", dev_name(dev), t); ++ resource_request("core_latency", dev, t); ++ } ++} ++ ++static struct device dummy_dsp_dev; ++ ++/* ++ * DSP Bridge-specific constraints ++ */ ++const struct omap_opp *omap_pm_dsp_get_opp_table(void) ++{ ++ pr_debug("OMAP PM: DSP request for OPP table\n"); ++ ++ /* ++ * Return DSP frequency table here: The final item in the ++ * array should have .rate = .opp_id = 0. ++ */ ++ ++ return NULL; ++} ++EXPORT_SYMBOL(omap_pm_dsp_get_opp_table); ++ ++void omap_pm_dsp_set_min_opp(u8 opp_id) ++{ ++ if (opp_id == 0) { ++ WARN_ON(1); ++ return; ++ } ++ ++ pr_debug("OMAP PM: DSP requests minimum VDD1 OPP to be %d\n", opp_id); ++ ++ /* ++ * For now pass a dummy_dev struct for SRF to identify the caller. ++ * Maybe its good to have DSP pass this as an argument ++ */ ++ resource_request("vdd1_opp", &dummy_dsp_dev, opp_id); ++ return; ++} ++EXPORT_SYMBOL(omap_pm_dsp_set_min_opp); ++ ++u8 omap_pm_dsp_get_opp(void) ++{ ++ pr_debug("OMAP PM: DSP requests current DSP OPP ID\n"); ++ return resource_get_level("vdd1_opp"); ++ return 0; ++} ++EXPORT_SYMBOL(omap_pm_dsp_get_opp); ++ ++/* ++ * CPUFreq-originated constraint ++ * ++ * In the future, this should be handled by custom OPP clocktype ++ * functions. ++ */ ++ ++struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void) ++{ ++ pr_debug("OMAP PM: CPUFreq request for frequency table\n"); ++ ++ /* ++ * Return CPUFreq frequency table here: loop over ++ * all VDD1 clkrates, pull out the mpu_ck frequencies, build ++ * table ++ */ ++ ++ return NULL; ++} ++ ++static struct device dummy_cpufreq_dev; ++ ++void omap_pm_cpu_set_freq(unsigned long f) ++{ ++ if (f == 0) { ++ WARN_ON(1); ++ return; ++ } ++ ++ pr_debug("OMAP PM: CPUFreq requests CPU frequency to be set to %lu\n", ++ f); ++ ++ resource_request("mpu_freq", &dummy_cpufreq_dev, f); ++ return; ++} ++EXPORT_SYMBOL(omap_pm_cpu_set_freq); ++ ++unsigned long omap_pm_cpu_get_freq(void) ++{ ++ pr_debug("OMAP PM: CPUFreq requests current CPU frequency\n"); ++ return resource_get_level("mpu_freq"); ++} ++EXPORT_SYMBOL(omap_pm_cpu_get_freq); ++ ++/* ++ * Device context loss tracking ++ */ ++ ++int omap_pm_get_dev_context_loss_count(struct device *dev) ++{ ++ if (!dev) { ++ WARN_ON(1); ++ return -EINVAL; ++ }; ++ ++ pr_debug("OMAP PM: returning context loss count for dev %s\n", ++ dev_name(dev)); ++ ++ /* ++ * Map the device to the powerdomain. Return the powerdomain ++ * off counter. ++ */ ++ ++ return get_last_off_on_transaction_id(dev); ++} ++ ++/* ++ * Powerdomain usecounting hooks ++ */ ++ ++void omap_pm_pwrdm_active(struct powerdomain *pwrdm) ++{ ++ if (!pwrdm) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ pr_debug("OMAP PM: powerdomain %s is becoming active\n", pwrdm->name); ++ ++ /* ++ * CDP code apparently will need these for the enable_power_domain() ++ * and disable_power_domain() functions. ++ */ ++} ++ ++void omap_pm_pwrdm_inactive(struct powerdomain *pwrdm) ++{ ++ if (!pwrdm) { ++ WARN_ON(1); ++ return; ++ }; ++ ++ pr_debug("OMAP PM: powerdomain %s is becoming inactive\n", ++ pwrdm->name); ++ ++ /* ++ * CDP code apparently will need these for the enable_power_domain() ++ * and disable_power_domain() functions. ++ */ ++} ++ ++/* ++ * Should be called before clk framework since clk fw will call ++ * omap_pm_pwrdm_{in,}active() ++ */ ++int __init omap_pm_if_early_init(struct omap_opp *mpu_opp_table, ++ struct omap_opp *dsp_opp_table, ++ struct omap_opp *l3_opp_table) ++{ ++ mpu_opps = mpu_opp_table; ++ dsp_opps = dsp_opp_table; ++ l3_opps = l3_opp_table; ++ return 0; ++} ++ ++/* Must be called after clock framework is initialized */ ++int __init omap_pm_if_init(void) ++{ ++ resource_init(resources_omap); ++ return 0; ++} ++ ++void omap_pm_if_exit(void) ++{ ++ /* Deallocate CPUFreq frequency table here */ ++} +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/resource.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/resource.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/resource.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/resource.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,492 @@ ++/* ++ * linux/arch/arm/plat-omap/resource.c ++ * Shared Resource Framework API implementation ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * Rajendra Nayak ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ * History: ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++/* ++ * This is for statically defining the users pool. This static pool is ++ * used early at bootup till kmalloc becomes available. ++ */ ++#define MAX_USERS 10 ++#define UNUSED 0x0 ++#define DYNAMIC_ALLOC 0x1 ++#define STATIC_ALLOC 0x2 ++ ++/* res_list contains all registered struct shared_resource */ ++static LIST_HEAD(res_list); ++ ++/* res_mutex protects res_list add and del ops */ ++static DECLARE_MUTEX(res_mutex); ++ ++/* Static Pool of users for a resource used till kmalloc becomes available */ ++struct users_list usr_list[MAX_USERS]; ++ ++/* Private/Internal functions */ ++ ++/** ++ * _resource_lookup - loop up a resource by its name, return a pointer ++ * @name: The name of the resource to lookup ++ * ++ * Looks for a registered resource by its name. Returns a pointer to ++ * the struct shared_resource if found, else returns NULL. ++ * The function is not lock safe. ++ */ ++static struct shared_resource *_resource_lookup(const char *name) ++{ ++ struct shared_resource *res, *tmp_res; ++ ++ if (!name) ++ return NULL; ++ ++ res = NULL; ++ ++ list_for_each_entry(tmp_res, &res_list, node) { ++ if (!strcmp(name, tmp_res->name)) { ++ res = tmp_res; ++ break; ++ } ++ } ++ return res; ++} ++ ++/** ++ * resource_lookup - loop up a resource by its name, return a pointer ++ * @name: The name of the resource to lookup ++ * ++ * Looks for a registered resource by its name. Returns a pointer to ++ * the struct shared_resource if found, else returns NULL. ++ * The function holds mutex and takes care of atomicity. ++ */ ++static struct shared_resource *resource_lookup(const char *name) ++{ ++ struct shared_resource *res; ++ ++ if (!name) ++ return NULL; ++ down(&res_mutex); ++ res = _resource_lookup(name); ++ up(&res_mutex); ++ ++ return res; ++} ++ ++/** ++ * update_resource_level - Regenerates and updates the curr_level of the res ++ * @resp: Pointer to the resource ++ * ++ * This function looks at all the users of the given resource and the levels ++ * requested by each of them, and recomputes a target level for the resource ++ * acceptable to all its current usres. It then calls platform specific ++ * change_level to change the level of the resource. ++ * Returns 0 on success, else a non-zero value returned by the platform ++ * specific change_level function. ++ **/ ++static int update_resource_level(struct shared_resource *resp) ++{ ++ struct users_list *user; ++ unsigned long target_level; ++ int ret; ++ ++ /* Regenerate the target_value for the resource */ ++ target_level = RES_DEFAULTLEVEL; ++ list_for_each_entry(user, &resp->users_list, node) ++ if (user->level > target_level) ++ target_level = user->level; ++ ++ pr_debug("SRF: Changing Level for resource %s to %ld\n", ++ resp->name, target_level); ++ ret = resp->ops->change_level(resp, target_level); ++ if (ret) { ++ printk(KERN_ERR "Unable to Change" ++ "level for resource %s to %ld\n", ++ resp->name, target_level); ++ } ++ return ret; ++} ++ ++/** ++ * get_user - gets a new users_list struct from static pool or dynamically ++ * ++ * This function initally looks for availability in the static pool and ++ * tries to dynamcially allocate only once the static pool is empty. ++ * We hope that during bootup by the time we hit a case of dynamic allocation ++ * slab initialization would have happened. ++ * Returns a pointer users_list struct on success. On dynamic allocation failure ++ * returns a ERR_PTR(-ENOMEM). ++ */ ++static struct users_list *get_user(void) ++{ ++ int ind = 0; ++ struct users_list *user; ++ ++ /* See if something available in the static pool */ ++ while (ind < MAX_USERS) { ++ if (usr_list[ind].usage == UNUSED) ++ break; ++ else ++ ind++; ++ } ++ if (ind < MAX_USERS) { ++ /* Pick from the static pool */ ++ user = &usr_list[ind]; ++ user->usage = STATIC_ALLOC; ++ } else { ++ /* By this time we hope slab is initialized */ ++ if (slab_is_available()) { ++ user = kmalloc(sizeof(struct users_list), GFP_KERNEL); ++ if (!user) { ++ printk(KERN_ERR "SRF:FATAL ERROR: kmalloc" ++ "failed\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ user->usage = DYNAMIC_ALLOC; ++ } else { ++ /* Dynamic alloc not available yet */ ++ printk(KERN_ERR "SRF: FATAL ERROR: users_list" ++ "initial POOL EMPTY before slab init\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ return user; ++} ++ ++/** ++ * free_user - frees the dynamic users_list and marks the static one unused ++ * @user: The struct users_list to be freed ++ * ++ * Looks at the usage flag and either frees the users_list if it was ++ * dynamically allocated, and if its from the static pool, marks it unused. ++ * No return value. ++ */ ++void free_user(struct users_list *user) ++{ ++ if (user->usage == DYNAMIC_ALLOC) { ++ kfree(user); ++ } else { ++ user->usage = UNUSED; ++ user->level = RES_DEFAULTLEVEL; ++ user->dev = NULL; ++ } ++} ++ ++/** ++ * resource_init - Initializes the Shared resource framework. ++ * @resources: List of all the resources modelled ++ * ++ * Loops through the list of resources and registers all that ++ * are available for the current CPU. ++ * No return value ++ */ ++void resource_init(struct shared_resource **resources) ++{ ++ struct shared_resource **resp; ++ int ind; ++ ++ pr_debug("Initializing Shared Resource Framework\n"); ++ ++ if (!cpu_is_omap34xx()) { ++ /* This CPU is not supported */ ++ printk(KERN_WARNING "Shared Resource Framework does not" ++ "support this CPU type.\n"); ++ WARN_ON(1); ++ } ++ ++ /* Init the users_list POOL */ ++ for (ind = 0; ind < MAX_USERS; ind++) { ++ usr_list[ind].usage = UNUSED; ++ usr_list[ind].dev = NULL; ++ usr_list[ind].level = RES_DEFAULTLEVEL; ++ } ++ ++ if (resources) ++ for (resp = resources; *resp; resp++) ++ resource_register(*resp); ++} ++ ++/** ++ * resource_refresh - Refresh the states of all current resources ++ * ++ * If a condition in power domains has changed that requires refreshing ++ * power domain states, this function can be used to restore correct ++ * states according to shared resources. ++ * Returns 0 on success, non-zero, if some resource cannot be refreshed. ++ */ ++int resource_refresh(void) ++{ ++ struct shared_resource *resp = NULL; ++ int ret = 0; ++ ++ down(&res_mutex); ++ list_for_each_entry(resp, &res_list, node) { ++ ret = update_resource_level(resp); ++ if (ret) ++ break; ++ } ++ up(&res_mutex); ++ return ret; ++} ++ ++/** ++ * resource_register - registers and initializes a resource ++ * @res: struct shared_resource * to register ++ * ++ * Initializes the given resource and adds it to the resource list ++ * for the current CPU. ++ * Returns 0 on success, -EINVAL if given a NULL pointer, -EEXIST if the ++ * resource is already registered. ++ */ ++int resource_register(struct shared_resource *resp) ++{ ++ if (!resp) ++ return -EINVAL; ++ ++ if (!omap_chip_is(resp->omap_chip)) ++ return -EINVAL; ++ ++ /* Verify that the resource is not already registered */ ++ if (resource_lookup(resp->name)) ++ return -EEXIST; ++ ++ INIT_LIST_HEAD(&resp->users_list); ++ ++ down(&res_mutex); ++ /* Add the resource to the resource list */ ++ list_add(&resp->node, &res_list); ++ ++ /* Call the resource specific init*/ ++ if (resp->ops->init) ++ resp->ops->init(resp); ++ ++ up(&res_mutex); ++ pr_debug("resource: registered %s\n", resp->name); ++ ++ return 0; ++} ++EXPORT_SYMBOL(resource_register); ++ ++/** ++ * resource_unregister - unregister a resource ++ * @res: struct shared_resource * to unregister ++ * ++ * Removes a resource from the resource list. ++ * Returns 0 on success, -EINVAL if passed a NULL pointer. ++ */ ++int resource_unregister(struct shared_resource *resp) ++{ ++ if (!resp) ++ return -EINVAL; ++ ++ down(&res_mutex); ++ /* delete the resource from the resource list */ ++ list_del(&resp->node); ++ up(&res_mutex); ++ ++ pr_debug("resource: unregistered %s\n", resp->name); ++ ++ return 0; ++} ++EXPORT_SYMBOL(resource_unregister); ++ ++/** ++ * resource_request - Request for a required level of a resource ++ * @name: The name of the resource requested ++ * @dev: Uniquely identifes the caller ++ * @level: The requested level for the resource ++ * ++ * This function recomputes the target level of the resource based on ++ * the level requested by the user. The level of the resource is ++ * changed to the target level, if it is not the same as the existing level ++ * of the resource. Multiple calls to this function by the same device will ++ * replace the previous level requested ++ * Returns 0 on success, -EINVAL if the resource name passed in invalid. ++ * -ENOMEM if no static pool available or dynamic allocations fails. ++ * Else returns a non-zero error value returned by one of the failing ++ * shared_resource_ops. ++ */ ++ ++int resource_request_locked(const char *name, struct device *dev, ++ unsigned long level) ++{ ++ struct shared_resource *resp; ++ struct users_list *user; ++ int found = 0, ret = 0; ++ ++ resp = _resource_lookup(name); ++ if (!resp) { ++ printk(KERN_ERR "resource_request: Invalid resource name\n"); ++ return -EINVAL; ++ } ++ ++ /* Call the resource specific validate function */ ++ if (resp->ops->validate_level) { ++ ret = resp->ops->validate_level(resp, level); ++ if (ret) ++ return ret; ++ } ++ ++ list_for_each_entry(user, &resp->users_list, node) { ++ if (user->dev == dev) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) { ++ /* First time user */ ++ user = get_user(); ++ if (IS_ERR(user)) ++ return -ENOMEM; ++ user->dev = dev; ++ list_add(&user->node, &resp->users_list); ++ resp->no_of_users++; ++ } ++ user->level = level; ++ ++ /* ++ * Recompute and set the current level for the resource. ++ * NOTE: update_resource level moved out of spin_lock, as it may call ++ * pm_qos_add_requirement, which does a kzmalloc. This won't be allowed ++ * in iterrupt context. The spin_lock still protects add/remove users. ++ */ ++ return update_resource_level(resp); ++} ++ ++int resource_request(const char *name, struct device *dev, ++ unsigned long level) ++{ ++ int ret; ++ down(&res_mutex); ++ ret = resource_request_locked(name, dev, level); ++ up(&res_mutex); ++ return ret; ++} ++ ++EXPORT_SYMBOL(resource_request); ++ ++ ++ ++/** ++ * resource_release - Release a previously requested level of a resource ++ * @name: The name of the resource to be released ++ * @dev: Uniquely identifes the caller ++ * ++ * This function recomputes the target level of the resource after removing ++ * the level requested by the user. The level of the resource is ++ * changed to the target level, if it is not the same as the existing level ++ * of the resource. ++ * Returns 0 on success, -EINVAL if the resource name or dev structure ++ * is invalid. ++ */ ++int resource_release_locked(const char *name, struct device *dev) ++{ ++ struct shared_resource *resp; ++ struct users_list *user; ++ int found = 0; ++ ++ resp = _resource_lookup(name); ++ if (!resp) { ++ printk(KERN_ERR "resource_release: Invalid resource name\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(user, &resp->users_list, node) { ++ if (user->dev == dev) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) { ++ /* No such user exists */ ++ return -EINVAL; ++ } ++ ++ resp->no_of_users--; ++ list_del(&user->node); ++ free_user(user); ++ ++ /* Recompute and set the current level for the resource */ ++ return update_resource_level(resp); ++} ++ ++int resource_release(const char *name, struct device *dev) ++{ ++ int ret; ++ down(&res_mutex); ++ ret = resource_release_locked(name, dev); ++ up(&res_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(resource_release); ++ ++/** ++ * resource_get_level - Returns the current level of the resource ++ * @name: Name of the resource ++ * ++ * Returns the current level of the resource if found, else returns ++ * -EINVAL if the resource name is invalid. ++ */ ++int resource_get_level(const char *name) ++{ ++ struct shared_resource *resp; ++ u32 ret; ++ ++ down(&res_mutex); ++ resp = _resource_lookup(name); ++ if (!resp) { ++ printk(KERN_ERR "resource_release: Invalid resource name\n"); ++ up(&res_mutex); ++ return -EINVAL; ++ } ++ ret = resp->curr_level; ++ up(&res_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(resource_get_level); ++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PM_DEBUG) ++#include ++#include ++int resource_dump_reqs(struct seq_file *s, void *unused) ++{ ++ struct shared_resource *resp; ++ struct users_list *user; ++ char *buf; ++ ++ buf = kmalloc(KSYM_NAME_LEN, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ ++ down(&res_mutex); ++ list_for_each_entry(resp, &res_list, node) { ++ seq_printf(s, "%s:\n", resp->name); ++ list_for_each_entry(user, &resp->users_list, node) { ++ sprint_symbol(buf, (u32)user->dev); ++ seq_printf(s, " %s [%s] : %d\n", ++ user->dev->init_name, ++ buf, ++ user->level); ++ } ++ } ++ up(&res_mutex); ++ kfree(buf); ++ return 0; ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/sram.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/sram.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/sram.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/sram.c 2011-09-04 11:31:05.000000000 +0200 +@@ -41,9 +41,9 @@ + #define OMAP2_SRAM_VA 0xe3000000 + #define OMAP2_SRAM_PUB_VA (OMAP2_SRAM_VA + 0x800) + #define OMAP3_SRAM_PA 0x40200000 +-#define OMAP3_SRAM_VA 0xd7000000 ++#define OMAP3_SRAM_VA 0xe3000000 + #define OMAP3_SRAM_PUB_PA 0x40208000 +-#define OMAP3_SRAM_PUB_VA 0xd7008000 ++#define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000) + + #if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) + #define SRAM_BOOTLOADER_SZ 0x00 +@@ -122,9 +122,15 @@ void __init omap_detect_sram(void) + if (cpu_class_is_omap2()) { + if (is_sram_locked()) { + if (cpu_is_omap34xx()) { +- omap_sram_base = OMAP3_SRAM_PUB_VA; +- omap_sram_start = OMAP3_SRAM_PUB_PA; +- omap_sram_size = 0x8000; /* 32K */ ++ if (omap_type() == OMAP2_DEVICE_TYPE_GP) { ++ omap_sram_base = OMAP3_SRAM_PUB_VA; ++ omap_sram_start = OMAP3_SRAM_PUB_PA; ++ omap_sram_size = 0x8000; /* 32K */ ++ } else { ++ omap_sram_base = OMAP3_SRAM_PUB_VA; ++ omap_sram_start = OMAP3_SRAM_PUB_PA; ++ omap_sram_size = 0x7000; /* 28K */ ++ } + } else { + omap_sram_base = OMAP2_SRAM_PUB_VA; + omap_sram_start = OMAP2_SRAM_PUB_PA; +@@ -201,6 +207,15 @@ void __init omap_map_sram(void) + base = OMAP3_SRAM_PA; + base = ROUND_DOWN(base, PAGE_SIZE); + omap_sram_io_desc[0].pfn = __phys_to_pfn(base); ++ ++ /* ++ * SRAM must be marked as non-cached on OMAP3 since the ++ * CORE DPLL M2 divider change code (in SRAM) runs with the ++ * SDRAM controller disabled, and if it is marked cached, ++ * the ARM may attempt to write cache lines back to SDRAM ++ * which will cause the system to hang. ++ */ ++ omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; + } + + omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */ +@@ -353,37 +368,42 @@ static inline int omap243x_sram_init(voi + + #ifdef CONFIG_ARCH_OMAP3 + +-static u32 (*_omap3_sram_configure_core_dpll)(u32 sdrc_rfr_ctrl, +- u32 sdrc_actim_ctrla, +- u32 sdrc_actim_ctrlb, +- u32 m2); +-u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, u32 sdrc_actim_ctrla, +- u32 sdrc_actim_ctrlb, u32 m2) +- { ++static u32 (*_omap3_sram_configure_core_dpll)( ++ u32 m2, u32 unlock_dll, u32 f, u32 inc, ++ u32 sdrc_rfr_ctrl_0, u32 sdrc_mr_0, ++ u32 sdrc_rfr_ctrl_1, u32 sdrc_mr_1); ++ ++u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc, ++ u32 sdrc_rfr_ctrl_0, u32 sdrc_mr_0, ++ u32 sdrc_rfr_ctrl_1, u32 sdrc_mr_1) ++{ + if (!_omap3_sram_configure_core_dpll) + omap_sram_error(); + +- return _omap3_sram_configure_core_dpll(sdrc_rfr_ctrl, +- sdrc_actim_ctrla, +- sdrc_actim_ctrlb, m2); +- } ++ return _omap3_sram_configure_core_dpll( ++ m2, unlock_dll, f, inc, ++ sdrc_rfr_ctrl_0, sdrc_mr_0, ++ sdrc_rfr_ctrl_1, sdrc_mr_1); ++} + +-/* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */ +-void restore_sram_functions(void) ++#ifdef CONFIG_PM ++void omap3_sram_restore_context(void) + { + omap_sram_ceil = omap_sram_base + omap_sram_size; + + _omap3_sram_configure_core_dpll = + omap_sram_push(omap3_sram_configure_core_dpll, + omap3_sram_configure_core_dpll_sz); ++ omap_push_sram_idle(); + } ++#endif /* CONFIG_PM */ + + int __init omap3_sram_init(void) + { + _omap3_sram_configure_core_dpll = + omap_sram_push(omap3_sram_configure_core_dpll, + omap3_sram_configure_core_dpll_sz); +- ++ omap_push_sram_idle(); + return 0; + } + #else +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/vram.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/vram.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/vram.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/vram.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,705 @@ ++/* ++ * linux/arch/arm/plat-omap/vram.c ++ * ++ * Copyright (C) 2008 Nokia Corporation ++ * Author: Tomi Valkeinen ++ * ++ * Some code and ideas taken from drivers/video/omap/ driver ++ * by Imre Deak. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program. If not, see . ++ */ ++ ++/*#define DEBUG*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++ ++#ifdef DEBUG ++#define DBG(format, ...) printk(KERN_DEBUG "VRAM: " format, ## __VA_ARGS__) ++#else ++#define DBG(format, ...) ++#endif ++ ++#define OMAP2_SRAM_START 0x40200000 ++/* Maximum size, in reality this is smaller if SRAM is partially locked. */ ++#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */ ++ ++#define REG_MAP_SIZE(_page_cnt) \ ++ ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8) ++#define REG_MAP_PTR(_rg, _page_nr) \ ++ (((_rg)->map) + (_page_nr) / (sizeof(unsigned long) * 8)) ++#define REG_MAP_MASK(_page_nr) \ ++ (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1))) ++ ++#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) ++ ++/* postponed regions are used to temporarily store region information at boot ++ * time when we cannot yet allocate the region list */ ++#define MAX_POSTPONED_REGIONS 10 ++ ++static bool vram_initialized; ++static int postponed_cnt; ++static struct { ++ unsigned long paddr; ++ size_t size; ++} postponed_regions[MAX_POSTPONED_REGIONS]; ++ ++struct vram_alloc { ++ struct list_head list; ++ unsigned long paddr; ++ unsigned pages; ++}; ++ ++struct vram_region { ++ struct list_head list; ++ struct list_head alloc_list; ++ unsigned long paddr; ++ unsigned pages; ++}; ++ ++static DEFINE_MUTEX(region_mutex); ++static LIST_HEAD(region_list); ++ ++static inline int region_mem_type(unsigned long paddr) ++{ ++ if (paddr >= OMAP2_SRAM_START && ++ paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE) ++ return OMAPFB_MEMTYPE_SRAM; ++ else ++ return OMAPFB_MEMTYPE_SDRAM; ++} ++ ++static struct vram_region *omap_vram_create_region(unsigned long paddr, ++ unsigned pages) ++{ ++ struct vram_region *rm; ++ ++ rm = kzalloc(sizeof(*rm), GFP_KERNEL); ++ ++ if (rm) { ++ INIT_LIST_HEAD(&rm->alloc_list); ++ rm->paddr = paddr; ++ rm->pages = pages; ++ } ++ ++ return rm; ++} ++ ++#if 0 ++static void omap_vram_free_region(struct vram_region *vr) ++{ ++ list_del(&vr->list); ++ kfree(vr); ++} ++#endif ++ ++static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr, ++ unsigned long paddr, unsigned pages) ++{ ++ struct vram_alloc *va; ++ struct vram_alloc *new; ++ ++ new = kzalloc(sizeof(*va), GFP_KERNEL); ++ ++ if (!new) ++ return NULL; ++ ++ new->paddr = paddr; ++ new->pages = pages; ++ ++ list_for_each_entry(va, &vr->alloc_list, list) { ++ if (va->paddr > new->paddr) ++ break; ++ } ++ ++ list_add_tail(&new->list, &va->list); ++ ++ return new; ++} ++ ++static void omap_vram_free_allocation(struct vram_alloc *va) ++{ ++ list_del(&va->list); ++ kfree(va); ++} ++ ++int omap_vram_add_region(unsigned long paddr, size_t size) ++{ ++ struct vram_region *rm; ++ unsigned pages; ++ ++ if (vram_initialized) { ++ DBG("adding region paddr %08lx size %d\n", ++ paddr, size); ++ ++ size &= PAGE_MASK; ++ pages = size >> PAGE_SHIFT; ++ ++ rm = omap_vram_create_region(paddr, pages); ++ if (rm == NULL) ++ return -ENOMEM; ++ ++ list_add(&rm->list, ®ion_list); ++ } else { ++ if (postponed_cnt == MAX_POSTPONED_REGIONS) ++ return -ENOMEM; ++ ++ postponed_regions[postponed_cnt].paddr = paddr; ++ postponed_regions[postponed_cnt].size = size; ++ ++ ++postponed_cnt; ++ } ++ return 0; ++} ++ ++int omap_vram_free(unsigned long paddr, size_t size) ++{ ++ struct vram_region *rm; ++ struct vram_alloc *alloc; ++ unsigned start, end; ++ ++ DBG("free mem paddr %08lx size %d\n", paddr, size); ++ ++ size = PAGE_ALIGN(size); ++ ++ mutex_lock(®ion_mutex); ++ ++ list_for_each_entry(rm, ®ion_list, list) { ++ list_for_each_entry(alloc, &rm->alloc_list, list) { ++ start = alloc->paddr; ++ end = alloc->paddr + (alloc->pages >> PAGE_SHIFT); ++ ++ if (start >= paddr && end < paddr + size) ++ goto found; ++ } ++ } ++ ++ mutex_unlock(®ion_mutex); ++ return -EINVAL; ++ ++found: ++ omap_vram_free_allocation(alloc); ++ ++ mutex_unlock(®ion_mutex); ++ return 0; ++} ++EXPORT_SYMBOL(omap_vram_free); ++ ++static int _omap_vram_reserve(unsigned long paddr, unsigned pages) ++{ ++ struct vram_region *rm; ++ struct vram_alloc *alloc; ++ size_t size; ++ ++ size = pages << PAGE_SHIFT; ++ ++ list_for_each_entry(rm, ®ion_list, list) { ++ unsigned long start, end; ++ ++ DBG("checking region %lx %d\n", rm->paddr, rm->pages); ++ ++ if (region_mem_type(rm->paddr) != region_mem_type(paddr)) ++ continue; ++ ++ start = rm->paddr; ++ end = start + (rm->pages << PAGE_SHIFT) - 1; ++ if (start > paddr || end < paddr + size - 1) ++ continue; ++ ++ DBG("block ok, checking allocs\n"); ++ ++ list_for_each_entry(alloc, &rm->alloc_list, list) { ++ end = alloc->paddr - 1; ++ ++ if (start <= paddr && end >= paddr + size - 1) ++ goto found; ++ ++ start = alloc->paddr + (alloc->pages << PAGE_SHIFT); ++ } ++ ++ end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1; ++ ++ if (!(start <= paddr && end >= paddr + size - 1)) ++ continue; ++found: ++ DBG("FOUND area start %lx, end %lx\n", start, end); ++ ++ if (omap_vram_create_allocation(rm, paddr, pages) == NULL) ++ return -ENOMEM; ++ ++ return 0; ++ } ++ ++ return -ENOMEM; ++} ++ ++int omap_vram_reserve(unsigned long paddr, size_t size) ++{ ++ unsigned pages; ++ int r; ++ ++ DBG("reserve mem paddr %08lx size %d\n", paddr, size); ++ ++ size = PAGE_ALIGN(size); ++ pages = size >> PAGE_SHIFT; ++ ++ mutex_lock(®ion_mutex); ++ ++ r = _omap_vram_reserve(paddr, pages); ++ ++ mutex_unlock(®ion_mutex); ++ ++ return r; ++} ++EXPORT_SYMBOL(omap_vram_reserve); ++ ++static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data) ++{ ++ struct completion *compl = data; ++ complete(compl); ++} ++ ++static int _omap_vram_clear(u32 paddr, unsigned pages) ++{ ++ struct completion compl; ++ unsigned elem_count; ++ unsigned frame_count; ++ int r; ++ int lch; ++ ++ init_completion(&compl); ++ ++ r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA", ++ _omap_vram_dma_cb, ++ &compl, &lch); ++ if (r) { ++ pr_err("VRAM: request_dma failed for memory clear\n"); ++ return -EBUSY; ++ } ++ ++ elem_count = pages * PAGE_SIZE / 4; ++ frame_count = 1; ++ ++ omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32, ++ elem_count, frame_count, ++ OMAP_DMA_SYNC_ELEMENT, ++ 0, 0); ++ ++ omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC, ++ paddr, 0, 0); ++ ++ omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000); ++ ++ omap_start_dma(lch); ++ ++ if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) { ++ omap_stop_dma(lch); ++ pr_err("VRAM: dma timeout while clearing memory\n"); ++ r = -EIO; ++ goto err; ++ } ++ ++ r = 0; ++err: ++ omap_free_dma(lch); ++ ++ return r; ++} ++ ++static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr) ++{ ++ struct vram_region *rm; ++ struct vram_alloc *alloc; ++ ++ list_for_each_entry(rm, ®ion_list, list) { ++ unsigned long start, end; ++ ++ DBG("checking region %lx %d\n", rm->paddr, rm->pages); ++ ++ if (region_mem_type(rm->paddr) != mtype) ++ continue; ++ ++ start = rm->paddr; ++ ++ list_for_each_entry(alloc, &rm->alloc_list, list) { ++ end = alloc->paddr; ++ ++ if (end - start >= pages << PAGE_SHIFT) ++ goto found; ++ ++ start = alloc->paddr + (alloc->pages << PAGE_SHIFT); ++ } ++ ++ end = rm->paddr + (rm->pages << PAGE_SHIFT); ++found: ++ if (end - start < pages << PAGE_SHIFT) ++ continue; ++ ++ DBG("FOUND %lx, end %lx\n", start, end); ++ ++ alloc = omap_vram_create_allocation(rm, start, pages); ++ if (alloc == NULL) ++ return -ENOMEM; ++ ++ *paddr = start; ++ ++ _omap_vram_clear(start, pages); ++ ++ return 0; ++ } ++ ++ return -ENOMEM; ++} ++ ++int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr) ++{ ++ unsigned pages; ++ int r; ++ ++ BUG_ON(mtype > OMAPFB_MEMTYPE_MAX || !size); ++ ++ DBG("alloc mem type %d size %d\n", mtype, size); ++ ++ size = PAGE_ALIGN(size); ++ pages = size >> PAGE_SHIFT; ++ ++ mutex_lock(®ion_mutex); ++ ++ r = _omap_vram_alloc(mtype, pages, paddr); ++ ++ mutex_unlock(®ion_mutex); ++ ++ return r; ++} ++EXPORT_SYMBOL(omap_vram_alloc); ++ ++void omap_vram_get_info(unsigned long *vram, ++ unsigned long *free_vram, ++ unsigned long *largest_free_block) ++{ ++ struct vram_region *vr; ++ struct vram_alloc *va; ++ ++ *vram = 0; ++ *free_vram = 0; ++ *largest_free_block = 0; ++ ++ mutex_lock(®ion_mutex); ++ ++ list_for_each_entry(vr, ®ion_list, list) { ++ unsigned free; ++ unsigned long pa; ++ ++ pa = vr->paddr; ++ *vram += vr->pages << PAGE_SHIFT; ++ ++ list_for_each_entry(va, &vr->alloc_list, list) { ++ free = va->paddr - pa; ++ *free_vram += free; ++ if (free > *largest_free_block) ++ *largest_free_block = free; ++ pa = va->paddr + (va->pages << PAGE_SHIFT); ++ } ++ ++ free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa; ++ *free_vram += free; ++ if (free > *largest_free_block) ++ *largest_free_block = free; ++ } ++ ++ mutex_unlock(®ion_mutex); ++} ++EXPORT_SYMBOL(omap_vram_get_info); ++ ++#ifdef CONFIG_PROC_FS ++static void *r_next(struct seq_file *m, void *v, loff_t *pos) ++{ ++ struct list_head *l = v; ++ ++ (*pos)++; ++ ++ if (list_is_last(l, ®ion_list)) ++ return NULL; ++ ++ return l->next; ++} ++ ++static void *r_start(struct seq_file *m, loff_t *pos) ++{ ++ loff_t p = *pos; ++ struct list_head *l = ®ion_list; ++ ++ mutex_lock(®ion_mutex); ++ ++ do { ++ l = l->next; ++ if (l == ®ion_list) ++ return NULL; ++ } while (p--); ++ ++ return l; ++} ++ ++static void r_stop(struct seq_file *m, void *v) ++{ ++ mutex_unlock(®ion_mutex); ++} ++ ++static int r_show(struct seq_file *m, void *v) ++{ ++ struct vram_region *vr; ++ struct vram_alloc *va; ++ unsigned size; ++ ++ vr = list_entry(v, struct vram_region, list); ++ ++ size = vr->pages << PAGE_SHIFT; ++ ++ seq_printf(m, "%08lx-%08lx (%d bytes)\n", ++ vr->paddr, vr->paddr + size - 1, ++ size); ++ ++ list_for_each_entry(va, &vr->alloc_list, list) { ++ size = va->pages << PAGE_SHIFT; ++ seq_printf(m, " %08lx-%08lx (%d bytes)\n", ++ va->paddr, va->paddr + size - 1, ++ size); ++ } ++ ++ ++ ++ return 0; ++} ++ ++static const struct seq_operations resource_op = { ++ .start = r_start, ++ .next = r_next, ++ .stop = r_stop, ++ .show = r_show, ++}; ++ ++static int vram_open(struct inode *inode, struct file *file) ++{ ++ return seq_open(file, &resource_op); ++} ++ ++static const struct file_operations proc_vram_operations = { ++ .open = vram_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = seq_release, ++}; ++ ++static int __init omap_vram_create_proc(void) ++{ ++ proc_create("omap-vram", 0, NULL, &proc_vram_operations); ++ ++ return 0; ++} ++#endif ++ ++static __init int omap_vram_init(void) ++{ ++ int i, r; ++ ++ vram_initialized = 1; ++ ++ for (i = 0; i < postponed_cnt; i++) ++ omap_vram_add_region(postponed_regions[i].paddr, ++ postponed_regions[i].size); ++ ++#ifdef CONFIG_PROC_FS ++ r = omap_vram_create_proc(); ++ if (r) ++ return -ENOMEM; ++#endif ++ ++ return 0; ++} ++ ++arch_initcall(omap_vram_init); ++ ++/* boottime vram alloc stuff */ ++ ++/* set from board file */ ++static u32 omapfb_sram_vram_start __initdata; ++static u32 omapfb_sram_vram_size __initdata; ++ ++/* set from board file */ ++static u32 omapfb_sdram_vram_start __initdata; ++static u32 omapfb_sdram_vram_size __initdata; ++ ++/* set from kernel cmdline */ ++static u32 omapfb_def_sdram_vram_size __initdata; ++static u32 omapfb_def_sdram_vram_start __initdata; ++ ++static void __init omapfb_early_vram(char **p) ++{ ++ omapfb_def_sdram_vram_size = memparse(*p, p); ++ if (**p == ',') ++ omapfb_def_sdram_vram_start = simple_strtoul((*p) + 1, p, 16); ++} ++__early_param("vram=", omapfb_early_vram); ++ ++/* ++ * Called from map_io. We need to call to this early enough so that we ++ * can reserve the fixed SDRAM regions before VM could get hold of them. ++ */ ++void __init omapfb_reserve_sdram(void) ++{ ++ struct bootmem_data *bdata; ++ unsigned long sdram_start, sdram_size; ++ u32 paddr; ++ u32 size = 0; ++ ++ /* cmdline arg overrides the board file definition */ ++ if (omapfb_def_sdram_vram_size) { ++ size = omapfb_def_sdram_vram_size; ++ paddr = omapfb_def_sdram_vram_start; ++ } ++ ++ if (!size) { ++ size = omapfb_sdram_vram_size; ++ paddr = omapfb_sdram_vram_start; ++ } ++ ++#ifdef CONFIG_OMAP2_DSS_VRAM_SIZE ++ if (!size) { ++ size = CONFIG_OMAP2_DSS_VRAM_SIZE * 1024 * 1024; ++ paddr = 0; ++ } ++#endif ++ ++ if (!size) ++ return; ++ ++ size = PAGE_ALIGN(size); ++ ++ bdata = NODE_DATA(0)->bdata; ++ sdram_start = bdata->node_min_pfn << PAGE_SHIFT; ++ sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start; ++ ++ if (paddr) { ++ if ((paddr & ~PAGE_MASK) || paddr < sdram_start || ++ paddr + size > sdram_start + sdram_size) { ++ printk(KERN_ERR "Illegal SDRAM region for VRAM\n"); ++ return; ++ } ++ ++ if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) { ++ pr_err("FB: failed to reserve VRAM\n"); ++ return; ++ } ++ } else { ++ if (size > sdram_size) { ++ printk(KERN_ERR "Illegal SDRAM size for VRAM\n"); ++ return; ++ } ++ ++ paddr = virt_to_phys(alloc_bootmem_pages(size)); ++ BUG_ON(paddr & ~PAGE_MASK); ++ } ++ ++ omap_vram_add_region(paddr, size); ++ ++ pr_info("Reserving %u bytes SDRAM for VRAM\n", size); ++} ++ ++/* ++ * Called at sram init time, before anything is pushed to the SRAM stack. ++ * Because of the stack scheme, we will allocate everything from the ++ * start of the lowest address region to the end of SRAM. This will also ++ * include padding for page alignment and possible holes between regions. ++ * ++ * As opposed to the SDRAM case, we'll also do any dynamic allocations at ++ * this point, since the driver built as a module would have problem with ++ * freeing / reallocating the regions. ++ */ ++unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart, ++ unsigned long sram_vstart, ++ unsigned long sram_size, ++ unsigned long pstart_avail, ++ unsigned long size_avail) ++{ ++ unsigned long pend_avail; ++ unsigned long reserved; ++ u32 paddr; ++ u32 size; ++ ++ paddr = omapfb_sram_vram_start; ++ size = omapfb_sram_vram_size; ++ ++ if (!size) ++ return 0; ++ ++ reserved = 0; ++ pend_avail = pstart_avail + size_avail; ++ ++ if (!paddr) { ++ /* Dynamic allocation */ ++ if ((size_avail & PAGE_MASK) < size) { ++ printk(KERN_ERR "Not enough SRAM for VRAM\n"); ++ return 0; ++ } ++ size_avail = (size_avail - size) & PAGE_MASK; ++ paddr = pstart_avail + size_avail; ++ } ++ ++ if (paddr < sram_pstart || ++ paddr + size > sram_pstart + sram_size) { ++ printk(KERN_ERR "Illegal SRAM region for VRAM\n"); ++ return 0; ++ } ++ ++ /* Reserve everything above the start of the region. */ ++ if (pend_avail - paddr > reserved) ++ reserved = pend_avail - paddr; ++ size_avail = pend_avail - reserved - pstart_avail; ++ ++ omap_vram_add_region(paddr, size); ++ ++ if (reserved) ++ pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved); ++ ++ return reserved; ++} ++ ++void __init omap2_set_sdram_vram(u32 size, u32 start) ++{ ++ omapfb_sdram_vram_start = start; ++ omapfb_sdram_vram_size = size; ++} ++ ++void __init omap2_set_sram_vram(u32 size, u32 start) ++{ ++ omapfb_sram_vram_start = start; ++ omapfb_sram_vram_size = size; ++} ++ ++#endif ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/plat-omap/vrfb.c kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/vrfb.c +--- linux-omap-2.6.28-omap1/arch/arm/plat-omap/vrfb.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/plat-omap/vrfb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,311 @@ ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++ ++/*#define DEBUG*/ ++ ++#ifdef DEBUG ++#define DBG(format, ...) printk(KERN_DEBUG "VRFB: " format, ## __VA_ARGS__) ++#else ++#define DBG(format, ...) ++#endif ++ ++#define SMS_ROT_VIRT_BASE(context, rot) \ ++ (((context >= 4) ? 0xD0000000 : 0x70000000) \ ++ + (0x4000000 * (context)) \ ++ + (0x1000000 * (rot))) ++ ++#define OMAP_VRFB_SIZE (2048 * 2048 * 4) ++ ++#define VRFB_PAGE_WIDTH_EXP 5 /* Assuming SDRAM pagesize= 1024 */ ++#define VRFB_PAGE_HEIGHT_EXP 5 /* 1024 = 2^5 * 2^5 */ ++#define VRFB_PAGE_WIDTH (1 << VRFB_PAGE_WIDTH_EXP) ++#define VRFB_PAGE_HEIGHT (1 << VRFB_PAGE_HEIGHT_EXP) ++#define SMS_IMAGEHEIGHT_OFFSET 16 ++#define SMS_IMAGEWIDTH_OFFSET 0 ++#define SMS_PH_OFFSET 8 ++#define SMS_PW_OFFSET 4 ++#define SMS_PS_OFFSET 0 ++ ++#define OMAP_SMS_BASE 0x6C000000 ++#define SMS_ROT_CONTROL(context) (OMAP_SMS_BASE + 0x180 + 0x10 * context) ++#define SMS_ROT_SIZE(context) (OMAP_SMS_BASE + 0x184 + 0x10 * context) ++#define SMS_ROT_PHYSICAL_BA(context) (OMAP_SMS_BASE + 0x188 + 0x10 * context) ++ ++#define VRFB_NUM_CTXS 12 ++/* bitmap of reserved contexts */ ++static unsigned long ctx_map; ++ ++/* ++ * Access to this happens from client drivers or the PM core after wake-up. ++ * For the first case we require locking at the driver level, for the second ++ * we don't need locking, since no drivers will run until after the wake-up ++ * has finished. ++ */ ++static struct { ++ u32 physical_ba; ++ u32 control; ++ u32 size; ++} vrfb_hw_context[VRFB_NUM_CTXS]; ++ ++static void inline restore_hw_context(int ctx) ++{ ++ omap_writel(vrfb_hw_context[ctx].control, SMS_ROT_CONTROL(ctx)); ++ omap_writel(vrfb_hw_context[ctx].size, SMS_ROT_SIZE(ctx)); ++ omap_writel(vrfb_hw_context[ctx].physical_ba, SMS_ROT_PHYSICAL_BA(ctx)); ++} ++ ++/* ++ * This the extra space needed in the VRFB physical area for VRFB to safely wrap ++ * any memory accesses to the invisible part of the virtual view to the physical ++ * area. ++ */ ++static inline u32 get_extra_physical_size(u16 width, u8 bytespp) ++{ ++ return (OMAP_VRFB_LINE_LEN - width) * bytespp * VRFB_PAGE_HEIGHT; ++} ++ ++void omap_vrfb_restore_context(void) ++{ ++ int i; ++ unsigned long map = ctx_map; ++ ++ for (i = ffs(map); i; i = ffs(map)) { ++ /* i=1..32 */ ++ i--; ++ map &= ~(1 << i); ++ restore_hw_context(i); ++ } ++} ++ ++void omap_vrfb_adjust_size(u16 *width, u16 *height, ++ u8 bytespp) ++{ ++ *width = ALIGN(*width * bytespp, VRFB_PAGE_WIDTH) / bytespp; ++ *height = ALIGN(*height, VRFB_PAGE_HEIGHT); ++} ++EXPORT_SYMBOL(omap_vrfb_adjust_size); ++ ++u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp) ++{ ++ /* mmap() is page aligned */ ++ height = ALIGN(OMAP_VRFB_LINE_LEN * bytespp * height, PAGE_SIZE) / ++ (OMAP_VRFB_LINE_LEN * bytespp); ++ ++ omap_vrfb_adjust_size(&width, &height, bytespp); ++ ++ if (width > OMAP_VRFB_LINE_LEN) ++ return 0; ++ ++ if (height > 2048) ++ return 0; ++ ++ return (width * height * bytespp) + get_extra_physical_size(width, bytespp); ++} ++EXPORT_SYMBOL(omap_vrfb_min_phys_size); ++ ++u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp) ++{ ++ unsigned long height; ++ unsigned long extra; ++ ++ width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp; ++ ++ if (width > OMAP_VRFB_LINE_LEN) ++ return 0; ++ ++ extra = get_extra_physical_size(width, bytespp); ++ ++ if (phys_size < extra) ++ return 0; ++ ++ height = (phys_size - extra) / (width * bytespp); ++ ++ /* mmap() is page aligned */ ++ height = (OMAP_VRFB_LINE_LEN * bytespp * height & ~PAGE_MASK) / ++ (OMAP_VRFB_LINE_LEN * bytespp); ++ ++ /* Only full tiles */ ++ height &= ~(VRFB_PAGE_HEIGHT - 1); ++ ++ /* Virtual views provided by VRFB are limited to 2048x2048. */ ++ return min(height, 2048UL); ++} ++EXPORT_SYMBOL(omap_vrfb_max_height); ++ ++void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr, ++ u16 width, u16 height, ++ enum omap_color_mode color_mode) ++{ ++ unsigned pixel_size_exp; ++ u16 vrfb_width; ++ u16 vrfb_height; ++ u8 ctx = vrfb->context; ++ u8 bytespp; ++ u32 size; ++ u32 control; ++ ++ DBG("omapfb_set_vrfb(%d, %lx, %dx%d, %d)\n", ctx, paddr, ++ width, height, color_mode); ++ ++ switch (color_mode) { ++ case OMAP_DSS_COLOR_RGB16: ++ case OMAP_DSS_COLOR_ARGB16: ++ bytespp = 2; ++ break; ++ ++ case OMAP_DSS_COLOR_RGB24P: ++ bytespp = 3; ++ break; ++ ++ case OMAP_DSS_COLOR_RGB24U: ++ case OMAP_DSS_COLOR_ARGB32: ++ case OMAP_DSS_COLOR_RGBA32: ++ case OMAP_DSS_COLOR_RGBX32: ++ case OMAP_DSS_COLOR_YUV2: ++ case OMAP_DSS_COLOR_UYVY: ++ bytespp = 4; ++ break; ++ ++ default: ++ BUG(); ++ return; ++ } ++ ++ if (color_mode == OMAP_DSS_COLOR_YUV2 || ++ color_mode == OMAP_DSS_COLOR_UYVY) ++ width >>= 1; ++ ++ if (bytespp == 4) ++ pixel_size_exp = 2; ++ else if (bytespp == 2) ++ pixel_size_exp = 1; ++ else ++ BUG(); ++ ++ vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp; ++ vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT); ++ ++ DBG("vrfb w %u, h %u bytespp %d\n", vrfb_width, vrfb_height, bytespp); ++ ++ size = vrfb_width << SMS_IMAGEWIDTH_OFFSET; ++ size |= vrfb_height << SMS_IMAGEHEIGHT_OFFSET; ++ ++ control = pixel_size_exp << SMS_PS_OFFSET; ++ control |= VRFB_PAGE_WIDTH_EXP << SMS_PW_OFFSET; ++ control |= VRFB_PAGE_HEIGHT_EXP << SMS_PH_OFFSET; ++ ++ vrfb_hw_context[ctx].physical_ba = paddr; ++ vrfb_hw_context[ctx].size = size; ++ vrfb_hw_context[ctx].control = control; ++ ++ omap_writel(paddr, SMS_ROT_PHYSICAL_BA(ctx)); ++ omap_writel(size, SMS_ROT_SIZE(ctx)); ++ omap_writel(control, SMS_ROT_CONTROL(ctx)); ++ ++ DBG("vrfb offset pixels %d, %d\n", ++ vrfb_width - width, vrfb_height - height); ++ ++ vrfb->xres = width; ++ vrfb->yres = height; ++ vrfb->xoffset = vrfb_width - width; ++ vrfb->yoffset = vrfb_height - height; ++ vrfb->bytespp = bytespp; ++} ++EXPORT_SYMBOL(omap_vrfb_setup); ++ ++int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot) ++{ ++ unsigned long size = height * OMAP_VRFB_LINE_LEN * vrfb->bytespp; ++ ++ vrfb->vaddr[rot] = ioremap_wc(vrfb->paddr[rot], size); ++ ++ if (!vrfb->vaddr[rot]) { ++ printk(KERN_ERR "vrfb: ioremap failed\n"); ++ return -ENOMEM; ++ } ++ ++ DBG("ioremapped vrfb area %d of size %lu into %p\n", rot, size, ++ vrfb->vaddr[rot]); ++ ++ return 0; ++} ++EXPORT_SYMBOL(omap_vrfb_map_angle); ++ ++void omap_vrfb_release_ctx(struct vrfb *vrfb) ++{ ++ int rot; ++ int ctx = vrfb->context; ++ ++ if (ctx == 0xff) ++ return; ++ ++ DBG("release ctx %d\n", ctx); ++ ++ if (!(ctx_map & (1 << ctx))) { ++ BUG(); ++ return; ++ } ++ clear_bit(ctx, &ctx_map); ++ ++ for (rot = 0; rot < 4; ++rot) { ++ if(vrfb->paddr[rot]) { ++ release_mem_region(vrfb->paddr[rot], OMAP_VRFB_SIZE); ++ vrfb->paddr[rot] = 0; ++ } ++ } ++ ++ vrfb->context = 0xff; ++} ++EXPORT_SYMBOL(omap_vrfb_release_ctx); ++ ++int omap_vrfb_request_ctx(struct vrfb *vrfb) ++{ ++ int rot; ++ u32 paddr; ++ u8 ctx; ++ ++ DBG("request ctx\n"); ++ ++ for (ctx = 0; ctx < VRFB_NUM_CTXS; ++ctx) ++ if ((ctx_map & (1 << ctx)) == 0) ++ break; ++ ++ if (ctx == VRFB_NUM_CTXS) { ++ printk(KERN_ERR "vrfb: no free contexts\n"); ++ return -EBUSY; ++ } ++ ++ DBG("found free ctx %d\n", ctx); ++ ++ set_bit(ctx, &ctx_map); ++ ++ memset(vrfb, 0, sizeof(*vrfb)); ++ ++ vrfb->context = ctx; ++ ++ for (rot = 0; rot < 4; ++rot) { ++ paddr = SMS_ROT_VIRT_BASE(ctx, rot); ++ if (!request_mem_region(paddr, OMAP_VRFB_SIZE, "vrfb")) { ++ printk(KERN_ERR "vrfb: failed to reserve VRFB " ++ "area for ctx %d, rotation %d\n", ++ ctx, rot * 90); ++ omap_vrfb_release_ctx(vrfb); ++ return -ENOMEM; ++ } ++ ++ vrfb->paddr[rot] = paddr; ++ ++ DBG("VRFB %d/%d: %lx\n", ctx, rot*90, vrfb->paddr[rot]); ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(omap_vrfb_request_ctx); ++ +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/tools/mach-types kernel-2.6.28-20093908+0m5/arch/arm/tools/mach-types +--- linux-omap-2.6.28-omap1/arch/arm/tools/mach-types 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/tools/mach-types 2011-09-04 11:31:05.000000000 +0200 +@@ -12,10 +12,11 @@ + # + # http://www.arm.linux.org.uk/developer/machines/?action=new + # +-# Last update: Thu Sep 25 10:10:50 2008 ++# Last update: Sun Nov 30 16:39:36 2008 + # + # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number + # ++nokia_rx71 MACH_NOKIA_RX71 NOKIA_RX71 6666 + ebsa110 ARCH_EBSA110 EBSA110 0 + riscpc ARCH_RPC RISCPC 1 + nexuspci ARCH_NEXUSPCI NEXUSPCI 3 +@@ -1380,7 +1381,7 @@ holon MACH_HOLON HOLON 1377 + olip8 MACH_OLIP8 OLIP8 1378 + ghi270hg MACH_GHI270HG GHI270HG 1379 + davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380 +-davinci_dm355_evm MACH_DAVINCI_DM350_EVM DAVINCI_DM350_EVM 1381 ++davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381 + blackriver MACH_BLACKRIVER BLACKRIVER 1383 + sandgate_wp MACH_SANDGATEWP SANDGATEWP 1384 + cdotbwsg MACH_CDOTBWSG CDOTBWSG 1385 +@@ -1771,7 +1772,7 @@ axs_ultrax MACH_AXS_ULTRAX AXS_ULTRAX + at572d940deb MACH_AT572D940DEB AT572D940DEB 1780 + davinci_da8xx_evm MACH_DAVINCI_DA8XX_EVM DAVINCI_DA8XX_EVM 1781 + ep9302 MACH_EP9302 EP9302 1782 +-at572d940hfeb MACH_AT572D940HFEB AT572D940HFEB 1783 ++at572d940hfek MACH_AT572D940HFEB AT572D940HFEB 1783 + cybook3 MACH_CYBOOK3 CYBOOK3 1784 + wdg002 MACH_WDG002 WDG002 1785 + sg560adsl MACH_SG560ADSL SG560ADSL 1786 +@@ -1899,3 +1900,98 @@ rut100 MACH_RUT100 RUT100 1908 + asusp535 MACH_ASUSP535 ASUSP535 1909 + htcraphael MACH_HTCRAPHAEL HTCRAPHAEL 1910 + sygdg1 MACH_SYGDG1 SYGDG1 1911 ++sygdg2 MACH_SYGDG2 SYGDG2 1912 ++seoul MACH_SEOUL SEOUL 1913 ++salerno MACH_SALERNO SALERNO 1914 ++ucn_s3c64xx MACH_UCN_S3C64XX UCN_S3C64XX 1915 ++msm7201a MACH_MSM7201A MSM7201A 1916 ++lpr1 MACH_LPR1 LPR1 1917 ++armadillo500fx MACH_ARMADILLO500FX ARMADILLO500FX 1918 ++g3evm MACH_G3EVM G3EVM 1919 ++z3_dm355 MACH_Z3_DM355 Z3_DM355 1920 ++w90p910evb MACH_W90P910EVB W90P910EVB 1921 ++w90p920evb MACH_W90P920EVB W90P920EVB 1922 ++w90p950evb MACH_W90P950EVB W90P950EVB 1923 ++w90n960evb MACH_W90N960EVB W90N960EVB 1924 ++camhd MACH_CAMHD CAMHD 1925 ++mvc100 MACH_MVC100 MVC100 1926 ++electrum_200 MACH_ELECTRUM_200 ELECTRUM_200 1927 ++htcjade MACH_HTCJADE HTCJADE 1928 ++memphis MACH_MEMPHIS MEMPHIS 1929 ++imx27sbc MACH_IMX27SBC IMX27SBC 1930 ++lextar MACH_LEXTAR LEXTAR 1931 ++mv88f6281gtw_ge MACH_MV88F6281GTW_GE MV88F6281GTW_GE 1932 ++ncp MACH_NCP NCP 1933 ++z32an_series MACH_Z32AN Z32AN 1934 ++tmq_capd MACH_TMQ_CAPD TMQ_CAPD 1935 ++omap3_wl MACH_OMAP3_WL OMAP3_WL 1936 ++chumby MACH_CHUMBY CHUMBY 1937 ++atsarm9 MACH_ATSARM9 ATSARM9 1938 ++davinci_dm365_evm MACH_DAVINCI_DM365_EVM DAVINCI_DM365_EVM 1939 ++bahamas MACH_BAHAMAS BAHAMAS 1940 ++das MACH_DAS DAS 1941 ++minidas MACH_MINIDAS MINIDAS 1942 ++vk1000 MACH_VK1000 VK1000 1943 ++centro MACH_CENTRO CENTRO 1944 ++ctera_2bay MACH_CTERA_2BAY CTERA_2BAY 1945 ++edgeconnect MACH_EDGECONNECT EDGECONNECT 1946 ++nd27000 MACH_ND27000 ND27000 1947 ++cobra MACH_GEMALTO_COBRA GEMALTO_COBRA 1948 ++ingelabs_comet MACH_INGELABS_COMET INGELABS_COMET 1949 ++pollux_wiz MACH_POLLUX_WIZ POLLUX_WIZ 1950 ++blackstone MACH_BLACKSTONE BLACKSTONE 1951 ++topaz MACH_TOPAZ TOPAZ 1952 ++aixle MACH_AIXLE AIXLE 1953 ++mw998 MACH_MW998 MW998 1954 ++nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955 ++vsc5605ev MACH_VSC5605EV VSC5605EV 1956 ++nt98700dk MACH_NT98700DK NT98700DK 1957 ++icontact MACH_ICONTACT ICONTACT 1958 ++swarco_frcpu MACH_SWARCO_FRCPU SWARCO_FRCPU 1959 ++swarco_scpu MACH_SWARCO_SCPU SWARCO_SCPU 1960 ++bbox_p16 MACH_BBOX_P16 BBOX_P16 1961 ++bstd MACH_BSTD BSTD 1962 ++sbc2440ii MACH_SBC2440II SBC2440II 1963 ++pcm034 MACH_PCM034 PCM034 1964 ++neso MACH_NESO NESO 1965 ++wlnx_9g20 MACH_WLNX_9G20 WLNX_9G20 1966 ++omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967 ++totemnova MACH_TOTEMNOVA TOTEMNOVA 1968 ++c5000 MACH_C5000 C5000 1969 ++unipo_at91sam9263 MACH_UNIPO_AT91SAM9263 UNIPO_AT91SAM9263 1970 ++ethernut5 MACH_ETHERNUT5 ETHERNUT5 1971 ++arm11 MACH_ARM11 ARM11 1972 ++cpuat9260 MACH_CPUAT9260 CPUAT9260 1973 ++cpupxa255 MACH_CPUPXA255 CPUPXA255 1974 ++cpuimx27 MACH_CPUIMX27 CPUIMX27 1975 ++cheflux MACH_CHEFLUX CHEFLUX 1976 ++eb_cpux9k2 MACH_EB_CPUX9K2 EB_CPUX9K2 1977 ++opcotec MACH_OPCOTEC OPCOTEC 1978 ++yt MACH_YT YT 1979 ++motoq MACH_MOTOQ MOTOQ 1980 ++bsb1 MACH_BSB1 BSB1 1981 ++acs5k MACH_ACS5K ACS5K 1982 ++milan MACH_MILAN MILAN 1983 ++quartzv2 MACH_QUARTZV2 QUARTZV2 1984 ++rsvp MACH_RSVP RSVP 1985 ++rmp200 MACH_RMP200 RMP200 1986 ++snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987 ++dsm320 MACH_DSM320 DSM320 1988 ++adsgcm MACH_ADSGCM ADSGCM 1989 ++ase2_400 MACH_ASE2_400 ASE2_400 1990 ++pizza MACH_PIZZA PIZZA 1991 ++spot_ngpl MACH_SPOT_NGPL SPOT_NGPL 1992 ++armata MACH_ARMATA ARMATA 1993 ++exeda MACH_EXEDA EXEDA 1994 ++mx31sf005 MACH_MX31SF005 MX31SF005 1995 ++f5d8231_4_v2 MACH_F5D8231_4_V2 F5D8231_4_V2 1996 ++q2440 MACH_Q2440 Q2440 1997 ++qq2440 MACH_QQ2440 QQ2440 1998 ++mini2440 MACH_MINI2440 MINI2440 1999 ++colibri300 MACH_COLIBRI300 COLIBRI300 2000 ++jades MACH_JADES JADES 2001 ++spark MACH_SPARK SPARK 2002 ++benzina MACH_BENZINA BENZINA 2003 ++blaze MACH_BLAZE BLAZE 2004 ++linkstation_ls_hgl MACH_LINKSTATION_LS_HGL LINKSTATION_LS_HGL 2005 ++htcvenus MACH_HTCVENUS HTCVENUS 2006 +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/vfp/entry.S kernel-2.6.28-20093908+0m5/arch/arm/vfp/entry.S +--- linux-omap-2.6.28-omap1/arch/arm/vfp/entry.S 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/vfp/entry.S 2011-09-04 11:31:05.000000000 +0200 +@@ -15,13 +15,16 @@ + * r10 = thread_info structure + * lr = failure return + */ +-#include +-#include +-#include +-#include ++#include + #include ++#include "../kernel/entry-header.S" + + ENTRY(do_vfp) ++#ifdef CONFIG_PREEMPT ++ ldr r4, [r10, #TI_PREEMPT] @ get preempt count ++ add r11, r4, #1 @ increment it ++ str r11, [r10, #TI_PREEMPT] ++#endif + enable_irq + ldr r4, .LCvfp + ldr r11, [r10, #TI_CPU] @ CPU number +@@ -30,6 +33,12 @@ ENTRY(do_vfp) + ENDPROC(do_vfp) + + ENTRY(vfp_null_entry) ++#ifdef CONFIG_PREEMPT ++ get_thread_info r10 ++ ldr r4, [r10, #TI_PREEMPT] @ get preempt count ++ sub r11, r4, #1 @ decrement it ++ str r11, [r10, #TI_PREEMPT] ++#endif + mov pc, lr + ENDPROC(vfp_null_entry) + +@@ -41,6 +50,12 @@ ENDPROC(vfp_null_entry) + + __INIT + ENTRY(vfp_testing_entry) ++#ifdef CONFIG_PREEMPT ++ get_thread_info r10 ++ ldr r4, [r10, #TI_PREEMPT] @ get preempt count ++ sub r11, r4, #1 @ decrement it ++ str r11, [r10, #TI_PREEMPT] ++#endif + ldr r0, VFP_arch_address + str r5, [r0] @ known non-zero value + mov pc, r9 @ we have handled the fault +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/vfp/vfp.h kernel-2.6.28-20093908+0m5/arch/arm/vfp/vfp.h +--- linux-omap-2.6.28-omap1/arch/arm/vfp/vfp.h 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/vfp/vfp.h 2011-09-04 11:31:05.000000000 +0200 +@@ -377,6 +377,11 @@ struct op { + u32 flags; + }; + +-#ifdef CONFIG_SMP + extern void vfp_save_state(void *location, u32 fpexc); ++#if defined(CONFIG_SMP) || defined(CONFIG_PM) ++extern void vfp_restore_state(void *location); ++extern void vfp_pm_save_context(void); ++extern void vfp_pm_restore_context(void); ++extern void vfp_save_regs(u32 *vfp_regs); ++extern void vfp_restore_regs(u32 *vfp_regs); + #endif +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/vfp/vfphw.S kernel-2.6.28-20093908+0m5/arch/arm/vfp/vfphw.S +--- linux-omap-2.6.28-omap1/arch/arm/vfp/vfphw.S 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/vfp/vfphw.S 2011-09-04 11:31:05.000000000 +0200 +@@ -101,9 +101,12 @@ ENTRY(vfp_support_entry) + VFPFSTMIA r4, r5 @ save the working registers + VFPFMRX r5, FPSCR @ current status + tst r1, #FPEXC_EX @ is there additional state to save? +- VFPFMRX r6, FPINST, NE @ FPINST (only if FPEXC.EX is set) +- tstne r1, #FPEXC_FP2V @ is there an FPINST2 to read? +- VFPFMRX r8, FPINST2, NE @ FPINST2 if needed (and present) ++ beq 1f ++ VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) ++ tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? ++ beq 1f ++ VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) ++1: + stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 + @ and point r4 at the word at the + @ start of the register dump +@@ -117,9 +120,12 @@ no_old_VFP_process: + @ FPEXC is in a safe state + ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 + tst r1, #FPEXC_EX @ is there additional state to restore? +- VFPFMXR FPINST, r6, NE @ restore FPINST (only if FPEXC.EX is set) +- tstne r1, #FPEXC_FP2V @ is there an FPINST2 to write? +- VFPFMXR FPINST2, r8, NE @ FPINST2 if needed (and present) ++ beq 1f ++ VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) ++ tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? ++ beq 1f ++ VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) ++1: + VFPFMXR FPSCR, r5 @ restore status + + check_for_exception: +@@ -131,6 +137,12 @@ check_for_exception: + VFPFMXR FPEXC, r1 @ restore FPEXC last + sub r2, r2, #4 + str r2, [sp, #S_PC] @ retry the instruction ++#ifdef CONFIG_PREEMPT ++ get_thread_info r10 ++ ldr r4, [r10, #TI_PREEMPT] @ get preempt count ++ sub r11, r4, #1 @ decrement it ++ str r11, [r10, #TI_PREEMPT] ++#endif + mov pc, r9 @ we think we have handled things + + +@@ -149,6 +161,12 @@ look_for_VFP_exceptions: + @ not recognised by VFP + + DBGSTR "not VFP" ++#ifdef CONFIG_PREEMPT ++ get_thread_info r10 ++ ldr r4, [r10, #TI_PREEMPT] @ get preempt count ++ sub r11, r4, #1 @ decrement it ++ str r11, [r10, #TI_PREEMPT] ++#endif + mov pc, lr + + process_exception: +@@ -166,7 +184,6 @@ process_exception: + @ retry the faulted instruction + ENDPROC(vfp_support_entry) + +-#ifdef CONFIG_SMP + ENTRY(vfp_save_state) + @ Save the current VFP state + @ r0 - save location +@@ -175,12 +192,50 @@ ENTRY(vfp_save_state) + VFPFSTMIA r0, r2 @ save the working registers + VFPFMRX r2, FPSCR @ current status + tst r1, #FPEXC_EX @ is there additional state to save? +- VFPFMRX r3, FPINST, NE @ FPINST (only if FPEXC.EX is set) +- tstne r1, #FPEXC_FP2V @ is there an FPINST2 to read? +- VFPFMRX r12, FPINST2, NE @ FPINST2 if needed (and present) ++ beq 1f ++ VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set) ++ tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? ++ beq 1f ++ VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) ++1: + stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 + mov pc, lr + ENDPROC(vfp_save_state) ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PM) ++ENTRY(vfp_restore_state) ++ @ Restore the current VFP state ++ @ r0 - saved location ++ DBGSTR1 "Restore VFP state %p", r0 ++ ldmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 ++ VFPFMXR FPSCR, r2 @ current status ++ tst r1, #FPEXC_EX @ is there additional state to save? ++ beq 1f ++ VFPFMXR FPINST, r3 @ FPINST (only if FPEXC.EX is set) ++ tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? ++ beq 1f ++ VFPFMXR FPINST2, r12 @ FPINST2 if needed (and present) ++1: ++ mov pc, lr ++ENDPROC(vfp_restore_state) ++#endif ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PM) ++ENTRY(vfp_save_regs) ++ @ Save the current VFP state ++ @ r0 - save location ++ VFPFSTMIA r0, r1 ++ mov pc, lr ++ENDPROC(vfp_save_regs) ++#endif ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PM) ++ENTRY(vfp_restore_regs) ++ @ Save the current VFP state ++ @ r0 - save location ++ VFPFLDMIA r0, r1 ++ mov pc, lr ++ENDPROC(vfp_restore_regs) + #endif + + last_VFP_context_address: +diff -Nurp linux-omap-2.6.28-omap1/arch/arm/vfp/vfpmodule.c kernel-2.6.28-20093908+0m5/arch/arm/vfp/vfpmodule.c +--- linux-omap-2.6.28-omap1/arch/arm/vfp/vfpmodule.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/arch/arm/vfp/vfpmodule.c 2011-09-04 11:31:05.000000000 +0200 +@@ -266,7 +266,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, + * on VFP subarch 1. + */ + vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); +- return; ++ goto exit; + } + + /* +@@ -297,7 +297,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, + * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. + */ + if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) +- return; ++ goto exit; + + /* + * The barrier() here prevents fpinst2 being read +@@ -310,9 +310,11 @@ void VFP_bounce(u32 trigger, u32 fpexc, + exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); + if (exceptions) + vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); ++ exit: ++ preempt_enable(); + } + +-static void vfp_enable(void *unused) ++void vfp_enable(void *unused) + { + u32 access = get_copro_access(); + +@@ -322,6 +324,151 @@ static void vfp_enable(void *unused) + set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); + } + ++#ifdef CONFIG_PM ++#include ++ ++void vfp_pm_save_context(void) ++{ ++ struct thread_info *ti = current_thread_info(); ++ u32 fpexc = fmrx(FPEXC); ++ ++ /* if vfp is on, then save state for resumption */ ++ if (fpexc & FPEXC_EN) { ++ printk(KERN_DEBUG "%s: saving vfp state\n", __func__); ++ vfp_save_state(&(ti->vfpstate.hard.fpexc), fpexc); ++ vfp_save_regs((u32 *)(ti->vfpstate.hard.fpregs)); ++ } else { ++ vfp_enable(NULL); /* enable VFP for now to save context. */ ++ vfp_save_regs((u32 *)(ti->vfpstate.hard.fpregs)); ++ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); ++ } ++} ++ ++static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) ++{ ++ u32 fpexc = fmrx(FPEXC); ++ vfp_pm_save_context(); ++ ++ /* disable, just in case */ ++ if (fpexc & FPEXC_EN) ++ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); ++ ++ /* clear any information we had about last context state */ ++ memset(last_VFP_context, 0, sizeof(last_VFP_context)); ++ ++ return 0; ++} ++ ++void vfp_pm_restore_context(void) ++{ ++ struct thread_info *ti = current_thread_info(); ++ u32 fpexc = fmrx(FPEXC); ++ /* if vfp is on, then save state for resumption */ ++ if (fpexc & FPEXC_EN) { ++ printk(KERN_DEBUG "%s: restoring vfp state\n", __func__); ++ vfp_restore_state(&(ti->vfpstate.hard.fpexc)); ++ vfp_restore_regs((u32 *)(ti->vfpstate.hard.fpregs)); ++ } ++} ++ ++static int vfp_pm_resume(struct sys_device *dev) ++{ ++ /* ensure we have access to the vfp */ ++ vfp_enable(NULL); ++ ++ vfp_pm_restore_context(); ++ /* and disable it to ensure the next usage restores the state */ ++ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); ++ ++ return 0; ++} ++ ++static struct sysdev_class vfp_pm_sysclass = { ++ .name = "vfp", ++ .suspend = vfp_pm_suspend, ++ .resume = vfp_pm_resume, ++}; ++ ++static struct sys_device vfp_pm_sysdev = { ++ .cls = &vfp_pm_sysclass, ++}; ++ ++static void vfp_pm_init(void) ++{ ++ sysdev_class_register(&vfp_pm_sysclass); ++ sysdev_register(&vfp_pm_sysdev); ++} ++ ++ ++#else ++static inline void vfp_pm_init(void) { } ++#endif /* CONFIG_PM */ ++ ++/* ++ * Synchronise the hardware VFP state of a thread with the saved one. ++ * This function is used by the ptrace mechanism and the signal handler path. ++ */ ++void vfp_sync_state(struct thread_info *thread) ++{ ++ unsigned int cpu = get_cpu(); ++ u32 fpexc = fmrx(FPEXC); ++ int vfp_enabled; ++ int self; ++ ++ vfp_enabled = fpexc & FPEXC_EN; ++ self = thread == current_thread_info(); ++#ifdef CONFIG_SMP ++ /* ++ * On SMP systems, the VFP state is automatically saved at every ++ * context switch. We mark the thread VFP state as belonging to a ++ * non-existent CPU so that the saved one will be reloaded when ++ * needed. ++ */ ++ thread->vfpstate.hard.cpu = NR_CPUS; ++ /* ++ * Only the current thread's saved VFP context can be out-of-date. ++ * For others there is nothing else to do, since we already ensured ++ * force loading above. ++ */ ++ if (!self) ++ goto out; ++#endif ++ /* ++ * If the VFP is enabled only the current thread's saved VFP ++ * context can get out-of-date. For other threads the context ++ * was updated when the current thread started to use the VFP. ++ * This also means that the context will be reloaded next time ++ * the thread uses the VFP, so no need to enforce it. ++ */ ++ if (vfp_enabled && !self) ++ goto out; ++ ++ if (!last_VFP_context[cpu]) ++ goto out; ++ ++ /* ++ * Save the last VFP state on this CPU. ++ */ ++ if (!vfp_enabled) ++ fmxr(FPEXC, fpexc | FPEXC_EN); ++ vfp_save_state(last_VFP_context[cpu], fpexc); ++ /* ++ * Disable VFP in case it was enabled so that the force reload ++ * can happen. ++ */ ++ fpexc &= ~FPEXC_EN; ++ fmxr(FPEXC, fpexc); ++ ++ /* ++ * Set the context to NULL to force a reload the next time the thread ++ * uses the VFP. ++ */ ++ last_VFP_context[cpu] = NULL; ++ ++out: ++ put_cpu(); ++} ++ + #include + + /* +@@ -365,12 +512,34 @@ static int __init vfp_init(void) + vfp_vector = vfp_support_entry; + + thread_register_notifier(&vfp_notifier_block); ++ vfp_pm_init(); + + /* + * We detected VFP, and the support code is + * in place; report VFP support to userspace. + */ + elf_hwcap |= HWCAP_VFP; ++#ifdef CONFIG_VFPv3 ++ if (VFP_arch >= 3) { ++ elf_hwcap |= HWCAP_VFPv3; ++ ++ /* ++ * Check for VFPv3 D16. CPUs in this configuration ++ * only have 16 x 64bit registers. ++ */ ++ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) ++ elf_hwcap |= HWCAP_VFPv3D16; ++ } ++#endif ++#ifdef CONFIG_NEON ++ /* ++ * Check for the presence of the Advanced SIMD ++ * load/store instructions, integer and single ++ * precision floating point operations. ++ */ ++ if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) ++ elf_hwcap |= HWCAP_NEON; ++#endif + } + return 0; + } +diff -Nurp linux-omap-2.6.28-omap1/block/blk-core.c kernel-2.6.28-20093908+0m5/block/blk-core.c +--- linux-omap-2.6.28-omap1/block/blk-core.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/block/blk-core.c 2011-09-04 11:31:05.000000000 +0200 +@@ -1246,7 +1246,7 @@ get_rq: + blk_plug_device(q); + add_request(q, req); + out: +- if (sync) ++ if (sync || (blk_queue_nonrot(q) && rq_data_dir(req) == READ)) + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + return 0; +diff -Nurp linux-omap-2.6.28-omap1/block/genhd.c kernel-2.6.28-20093908+0m5/block/genhd.c +--- linux-omap-2.6.28-omap1/block/genhd.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/block/genhd.c 2011-09-04 11:31:05.000000000 +0200 +@@ -692,6 +692,20 @@ static void disk_seqf_stop(struct seq_fi + } + } + ++void mtd_diskstats(struct seq_file *seqf); ++ ++static void diskstats_seqf_stop(struct seq_file *seqf, void *v) ++{ ++ struct class_dev_iter *iter = seqf->private; ++ ++ /* stop is called even after start failed :-( */ ++ if (iter) { ++ mtd_diskstats(seqf); ++ class_dev_iter_exit(iter); ++ kfree(iter); ++ } ++} ++ + static void *show_partition_start(struct seq_file *seqf, loff_t *pos) + { + static void *p; +@@ -1010,7 +1024,7 @@ static int diskstats_show(struct seq_fil + static const struct seq_operations diskstats_op = { + .start = disk_seqf_start, + .next = disk_seqf_next, +- .stop = disk_seqf_stop, ++ .stop = diskstats_seqf_stop, + .show = diskstats_show + }; + +diff -Nurp linux-omap-2.6.28-omap1/Documentation/arm/OMAP/DSS kernel-2.6.28-20093908+0m5/Documentation/arm/OMAP/DSS +--- linux-omap-2.6.28-omap1/Documentation/arm/OMAP/DSS 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/Documentation/arm/OMAP/DSS 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,317 @@ ++OMAP2/3 Display Subsystem ++------------------------- ++ ++This is an almost total rewrite of the OMAP FB driver in drivers/video/omap ++(let's call it DSS1). The main differences between DSS1 and DSS2 are DSI, ++TV-out and multiple display support, but there are lots of small improvements ++also. ++ ++The DSS2 driver (omapdss module) is in arch/arm/plat-omap/dss/, and the FB, ++panel and controller drivers are in drivers/video/omap2/. DSS1 and DSS2 live ++currently side by side, you can choose which one to use. ++ ++Features ++-------- ++ ++Working and tested features include: ++ ++- MIPI DPI (parallel) output ++- MIPI DSI output in command mode ++- MIPI DBI (RFBI) output ++- SDI output ++- TV output ++- All pieces can be compiled as a module or inside kernel ++- Use DISPC to update any of the outputs ++- Use CPU to update RFBI or DSI output ++- OMAP DISPC planes ++- RGB16, RGB24 packed, RGB24 unpacked ++- YUV2, UYVY ++- Scaling ++- Adjusting DSS FCK to find a good pixel clock ++- Use DSI DPLL to create DSS FCK ++ ++Tested boards include: ++- OMAP3 SDP board ++- Beagle board ++- N810 ++ ++omapdss driver ++-------------- ++ ++The DSS driver does not itself have any support for Linux framebuffer, V4L or ++such like the current ones, but it has an internal kernel API that upper level ++drivers can use. ++ ++The DSS driver models OMAP's overlays, overlay managers and displays in a ++flexible way to enable non-common multi-display configuration. In addition to ++modelling the hardware overlays, omapdss supports virtual overlays and overlay ++managers. These can be used when updating a display with CPU or system DMA. ++ ++Panel and controller drivers ++---------------------------- ++ ++The drivers implement panel or controller specific functionality and are not ++usually visible to users except through omapfb driver. They register ++themselves to the DSS driver. ++ ++omapfb driver ++------------- ++ ++The omapfb driver implements arbitrary number of standard linux framebuffers. ++These framebuffers can be routed flexibly to any overlays, thus allowing very ++dynamic display architecture. ++ ++The driver exports some omapfb specific ioctls, which are compatible with the ++ioctls in the old driver. ++ ++The rest of the non standard features are exported via sysfs. Whether the final ++implementation will use sysfs, or ioctls, is still open. ++ ++V4L2 drivers ++------------ ++ ++V4L2 is being implemented in TI. ++ ++From omapdss point of view the V4L2 drivers should be similar to framebuffer ++driver. ++ ++Architecture ++-------------------- ++ ++Some clarification what the different components do: ++ ++ - Framebuffer is a memory area inside OMAP's SRAM/SDRAM that contains the ++ pixel data for the image. Framebuffer has width and height and color ++ depth. ++ - Overlay defines where the pixels are read from and where they go on the ++ screen. The overlay may be smaller than framebuffer, thus displaying only ++ part of the framebuffer. The position of the overlay may be changed if ++ the overlay is smaller than the display. ++ - Overlay manager combines the overlays in to one image and feeds them to ++ display. ++ - Display is the actual physical display device. ++ ++A framebuffer can be connected to multiple overlays to show the same pixel data ++on all of the overlays. Note that in this case the overlay input sizes must be ++the same, but, in case of video overlays, the output size can be different. Any ++framebuffer can be connected to any overlay. ++ ++An overlay can be connected to one overlay manager. Also DISPC overlays can be ++connected only to DISPC overlay managers, and virtual overlays can be only ++connected to virtual overlays. ++ ++An overlay manager can be connected to one display. There are certain ++restrictions which kinds of displays an overlay manager can be connected: ++ ++ - DISPC TV overlay manager can be only connected to TV display. ++ - Virtual overlay managers can only be connected to DBI or DSI displays. ++ - DISPC LCD overlay manager can be connected to all displays, except TV ++ display. ++ ++Sysfs ++----- ++The sysfs interface is mainly used for testing. I don't think sysfs ++interface is the best for this in the final version, but I don't quite know ++what would be the best interfaces for these things. ++ ++The sysfs interface is divided to two parts: DSS and FB. ++ ++/sys/class/graphics/fb? directory: ++mirror 0=off, 1=on ++rotate Rotation 0-3 for 0, 90, 180, 270 degrees ++rotate_type 0 = DMA rotation, 1 = VRFB rotation ++overlays List of overlay numbers to which framebuffer pixels go ++phys_addr Physical address of the framebuffer ++virt_addr Virtual address of the framebuffer ++size Size of the framebuffer ++ ++/sys/devices/platform/omapdss/overlay? directory: ++enabled 0=off, 1=on ++input_size width,height (ie. the framebuffer size) ++manager Destination overlay manager name ++name ++output_size width,height ++position x,y ++screen_width width ++global_alpha global alpha 0-255 0=transparent 255=opaque ++ ++/sys/devices/platform/omapdss/manager? directory: ++display Destination display ++name ++alpha_blending_enabled 0=off 1=on ++color_key_enabled 0=off 1=on ++color_key_type gfx-destination video-source ++color_key_value 0 to 2^24 ++default_color default background color RGB24 0 to 2^24 ++ ++/sys/devices/platform/omapdss/display? directory: ++ctrl_name Controller name ++mirror 0=off, 1=on ++update_mode 0=off, 1=auto, 2=manual ++enabled 0=off, 1=on ++name ++rotate Rotation 0-3 for 0, 90, 180, 270 degrees ++timings Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw) ++ When writing, two special timings are accepted for tv-out: ++ "pal" and "ntsc" ++panel_name ++tear_elim Tearing elimination 0=off, 1=on ++ ++There are also some debugfs files at /omapdss/ which show information ++about clocks and registers. ++ ++Examples ++-------- ++ ++The following definitions have been made for the examples below: ++ ++ovl0=/sys/devices/platform/omapdss/overlay0 ++ovl1=/sys/devices/platform/omapdss/overlay1 ++ovl2=/sys/devices/platform/omapdss/overlay2 ++ ++mgr0=/sys/devices/platform/omapdss/manager0 ++mgr1=/sys/devices/platform/omapdss/manager1 ++ ++lcd=/sys/devices/platform/omapdss/display0 ++dvi=/sys/devices/platform/omapdss/display1 ++tv=/sys/devices/platform/omapdss/display2 ++ ++fb0=/sys/class/graphics/fb0 ++fb1=/sys/class/graphics/fb1 ++fb2=/sys/class/graphics/fb2 ++ ++Default setup on OMAP3 SDP ++-------------------------- ++ ++Here's the default setup on OMAP3 SDP board. All planes go to LCD. DVI ++and TV-out are not in use. The columns from left to right are: ++framebuffers, overlays, overlay managers, displays. Framebuffers are ++handled by omapfb, and the rest by the DSS. ++ ++FB0 --- GFX -\ DVI ++FB1 --- VID1 --+- LCD ---- LCD ++FB2 --- VID2 -/ TV ----- TV ++ ++Example: Switch from LCD to DVI ++---------------------- ++ ++w=`cat $dvi/horizontal | cut -d "," -f 1` ++h=`cat $dvi/vertical | cut -d "," -f 1` ++ ++echo "0" > $lcd/enabled ++echo "" > $mgr0/display ++fbset -fb /dev/fb0 -xres $w -yres $h -vxres $w -vyres $h ++# at this point you have to switch the dvi/lcd dip-switch from the omap board ++echo "dvi" > $mgr0/display ++echo "1" > $dvi/enabled ++ ++After this the configuration looks like: ++ ++FB0 --- GFX -\ -- DVI ++FB1 --- VID1 --+- LCD -/ LCD ++FB2 --- VID2 -/ TV ----- TV ++ ++Example: Clone GFX overlay to LCD and TV ++------------------------------- ++ ++w=`cat $tv/horizontal | cut -d "," -f 1` ++h=`cat $tv/vertical | cut -d "," -f 1` ++ ++echo "0" > $ovl0/enabled ++echo "0" > $ovl1/enabled ++ ++echo "" > $fb1/overlays ++echo "0,1" > $fb0/overlays ++ ++echo "$w,$h" > $ovl1/output_size ++echo "tv" > $ovl1/manager ++ ++echo "1" > $ovl0/enabled ++echo "1" > $ovl1/enabled ++ ++echo "1" > $tv/enabled ++ ++After this the configuration looks like (only relevant parts shown): ++ ++FB0 +-- GFX ---- LCD ---- LCD ++ \- VID1 ---- TV ---- TV ++ ++Misc notes ++---------- ++ ++OMAP FB allocates the framebuffer memory using the OMAP VRAM allocator. ++ ++Using DSI DPLL to generate pixel clock it is possible produce the pixel clock ++of 86.5MHz (max possible), and with that you get 1280x1024@57 output from DVI. ++ ++Rotation and mirroring currently only supports RGB565 and RGB8888 modes. VRFB ++does not support mirroring. ++ ++VRFB rotation requires much more memory than non-rotated framebuffer, so you ++probably need to increase your vram setting before using VRFB rotation. Also, ++many applications may not work with VRFB if they do not pay attention to all ++framebuffer parameters. ++ ++Kernel boot arguments ++--------------------- ++ ++vram= ++ - Amount of total VRAM to preallocate. For example, "10M". omapfb ++ allocates memory for framebuffers from VRAM. ++ ++omapfb.mode=:[,...] ++ - Default video mode for specified displays. For example, ++ "dvi:800x400MR-24@60". See drivers/video/modedb.c. ++ There are also two special modes: "pal" and "ntsc" that ++ can be used to tv out. ++ ++omapfb.vram=:[@][,...] ++ - VRAM allocated for a framebuffer. Normally omapfb allocates vram ++ depending on the display size. With this you can manually allocate ++ more or define the physical address of each framebuffer. For example, ++ "1:4M" to allocate 4M for fb1. ++ ++omapfb.debug= ++ - Enable debug printing. You have to have OMAPFB debug support enabled ++ in kernel config. ++ ++omapfb.test= ++ - Draw test pattern to framebuffer whenever framebuffer settings change. ++ You need to have OMAPFB debug support enabled in kernel config. ++ ++omapfb.vrfb= ++ - Use VRFB rotation for all framebuffers. ++ ++omapfb.rotate= ++ - Default rotation applied to all framebuffers. ++ 0 - 0 degree rotation ++ 1 - 90 degree rotation ++ 2 - 180 degree rotation ++ 3 - 270 degree rotation ++ ++omapfb.mirror= ++ - Default mirror for all framebuffers. Only works with DMA rotation. ++ ++omapdss.def_disp= ++ - Name of default display, to which all overlays will be connected. ++ Common examples are "lcd" or "tv". ++ ++omapdss.debug= ++ - Enable debug printing. You have to have DSS debug support enabled in ++ kernel config. ++ ++TODO ++---- ++ ++DSS locking ++ ++Error checking ++- Lots of checks are missing or implemented just as BUG() ++ ++System DMA update for DSI ++- Can be used for RGB16 and RGB24P modes. Probably not for RGB24U (how ++ to skip the empty byte?) ++ ++OMAP1 support ++- Not sure if needed ++ +diff -Nurp linux-omap-2.6.28-omap1/Documentation/arm/OMAP/omap_pm kernel-2.6.28-20093908+0m5/Documentation/arm/OMAP/omap_pm +--- linux-omap-2.6.28-omap1/Documentation/arm/OMAP/omap_pm 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/Documentation/arm/OMAP/omap_pm 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,204 @@ ++ ++The OMAP PM interface ++===================== ++ ++This document describes the temporary OMAP PM interface. Driver ++authors use these functions to communicate minimum latency or ++throughput constraints to the kernel power management code. ++Over time, the intention is to merge features from the OMAP PM ++interface into the Linux PM QoS code. ++ ++This document is divided into two parts: ++ ++ - Rationale ++ - Overview ++ ++ ++Rationale: existing PM interfaces are currently not ideal for OMAP ++------------------------------------------------------------------ ++ ++There are two PM interfaces in use with publicly-distributed OMAP ++Linux code: the TI Shared Resource Framework (SRF) and the Linux PM ++QoS parameters code. Neither interface is currently ideal for Linux ++OMAP code. ++ ++TI Shared Resource Framework: ++ ++The TI CDP tree drivers use the TI Shared Resource Framework (SRF) to ++control chip power management. TI got their "CDP" drivers up and ++running quickly with considerable power savings using the SRF. ++However, the SRF has some problems. Many parameters are specified in ++OMAP-specific terms, such as target OPPs (operating performance ++points), rather than in terms of actual latency or throughput ++requirements. OPPs change depending on OMAP silicon revisions or OMAP ++types, and are meaningless for other architectures, so drivers shared ++between OMAP and other architectures would have to #ifdef out the SRF ++constraints. ++ ++Linux PM QoS parameters: ++ ++In February 2008, the mainline Linux kernel added the Linux PM QoS ++parameters code, located in kernel/pm_qos_params.c. (This code ++replaced the latency management code that was present in earlier ++kernels.) Ideally, OMAP drivers would be able to use this Linux PM ++QoS code directly, but the PM QoS code has some drawbacks: ++ ++- It combines some power management parameters that should be kept ++ separate for maximum power savings on OMAP3. For example, in the PM ++ QoS code, CPU and system DMA wakeup latency are combined into one ++ parameter; but on OMAP3, these are distinct parameters. The Linux ++ PM QoS code also combines all network power management knobs into ++ two non-device-specific parameters. OMAP2/3 systems can have ++ different network devices with different power management ++ requirements - for example, a wired Ethernet interface may have ++ different latency and throughput constraints than a WiFi interface. ++ ++- It does not yet cover all of the power management capabilities of ++ the OMAP3 architecture. It does not express latency constraints on ++ a per-device or per-powerdomain basis; it only covers ++ cpu_dma_latency and network throughput and latency, which would not ++ cover most of the OMAP3 devices. ++ ++The result: drivers using the current Linux PM QoS layer directly are ++unlikely to reach the same level of power efficiency as driver code ++using the SRF. ++ ++So, the SRF provides significant power savings, but expresses power ++constraints in an OMAP- and silicon-revision-specific way; and the PM ++QoS layer expresses PM constraints in a cross-platform manner (in ++terms of fundamental physical units), but does not support ++per-powerdomain constraints and does not support many of the OMAP power ++management features. ++ ++ ++Overview: A medium-term alternative: the OMAP PM interface ++----------------------------------------------------------- ++ ++Drivers need to express PM parameters which: ++ ++- support the range of power management parameters present in the TI SRF; ++ ++- separate the drivers from the underlying PM parameter ++ implementation, whether it is the TI SRF or Linux PM QoS or Linux ++ latency framework or something else; ++ ++- specify PM parameters in terms of fundamental units, such as ++ latency and throughput, rather than units which are specific to OMAP ++ or to particular OMAP variants; ++ ++- allow drivers which are shared with other architectures (e.g., ++ DaVinci) to add these constraints in a way which won't affect non-OMAP ++ systems, ++ ++- can be implemented immediately with minimal disruption of other ++ architectures. ++ ++ ++This document proposes the OMAP PM interface, including the following ++five power management functions for driver code: ++ ++1. Set the maximum MPU wakeup latency: ++ (*pdata->set_max_mpu_wakeup_lat)(struct device *dev, unsigned long t) ++ ++2. Set the maximum device wakeup latency: ++ (*pdata->set_max_dev_wakeup_lat)(struct device *dev, unsigned long t) ++ ++3. Set the maximum system DMA transfer start latency (CORE pwrdm): ++ (*pdata->set_max_sdma_lat)(struct device *dev, long t) ++ ++4. Set the minimum bus throughput needed by a device: ++ (*pdata->set_min_bus_tput)(struct device *dev, u8 agent_id, unsigned long r) ++ ++5. Return the number of times the device has lost context ++ (*pdata->get_dev_context_loss_count)(struct device *dev) ++ ++ ++Further documentation for all OMAP PM interface functions can be ++found in arch/arm/plat-omap/include/mach/omap-pm.h. ++ ++ ++The OMAP PM layer is intended to be temporary ++--------------------------------------------- ++ ++The intention is that eventually the Linux PM QoS layer should support ++the range of power management features present in OMAP3. As this ++happens, existing drivers using the OMAP PM interface can be modified ++to use the Linux PM QoS code; and the OMAP PM interface can disappear. ++ ++ ++Driver usage of the OMAP PM functions ++------------------------------------- ++ ++As the 'pdata' in the above examples indicates, these functions are ++exposed to drivers through function pointers in driver .platform_data ++structures. The function pointers are initialized by the board-*.c ++files to point to the corresponding OMAP PM functions: ++.set_max_dev_wakeup_lat will point to ++omap_pm_set_max_dev_wakeup_lat(), etc. Other architectures which do ++not support these functions should leave these function pointers set ++to NULL. Drivers should use the following idiom: ++ ++ if (pdata->set_max_dev_wakeup_lat) ++ (*pdata->set_max_dev_wakeup_lat)(dev, t); ++ ++The most common usage of these functions will probably be to specify ++the maximum time from when an interrupt occurs, to when the device ++becomes accessible. To accomplish this, driver writers should use the ++set_max_mpu_wakeup_lat() function to to constrain the MPU wakeup ++latency, and the set_max_dev_wakeup_lat() function to constrain the ++device wakeup latency (from clk_enable() to accessibility). For ++example, ++ ++ /* Limit MPU wakeup latency */ ++ if (pdata->set_max_mpu_wakeup_lat) ++ (*pdata->set_max_mpu_wakeup_lat)(dev, tc); ++ ++ /* Limit device powerdomain wakeup latency */ ++ if (pdata->set_max_dev_wakeup_lat) ++ (*pdata->set_max_dev_wakeup_lat)(dev, td); ++ ++ /* total wakeup latency in this example: (tc + td) */ ++ ++The PM parameters can be overwritten by calling the function again ++with the new value. The settings can be removed by calling the ++function with a t argument of -1 (except in the case of ++set_max_bus_tput(), which should be called with an r argument of 0). ++ ++The fifth function above, omap_pm_get_dev_context_loss_count(), ++is intended as an optimization to allow drivers to determine whether the ++device has lost its internal context. If context has been lost, the ++driver must restore its internal context before proceeding. ++ ++ ++Other specialized interface functions ++------------------------------------- ++ ++The five functions listed above are intended to be usable by any ++device driver. DSPBridge and CPUFreq have a few special requirements. ++DSPBridge expresses target DSP performance levels in terms of OPP IDs. ++CPUFreq expresses target MPU performance levels in terms of MPU ++frequency. The OMAP PM interface contains functions for these ++specialized cases to convert that input information (OPPs/MPU ++frequency) into the form that the underlying power management ++implementation needs: ++ ++6. (*pdata->dsp_get_opp_table)(void) ++ ++7. (*pdata->dsp_set_min_opp)(u8 opp_id) ++ ++8. (*pdata->dsp_get_opp)(void) ++ ++9. (*pdata->cpu_get_freq_table)(void) ++ ++10. (*pdata->cpu_set_freq)(unsigned long f) ++ ++11. (*pdata->cpu_get_freq)(void) ++ ++ ++There are also functions for use by the clockdomain layer to indicate ++that a powerdomain should wake up or be put to sleep. These are not called ++via .platform_data. ++ ++12. omap_pm_pwrdm_active(struct powerdomain *pwrdm) ++ ++13. omap_pm_pwrdm_inactive(struct powerdomain *pwrdm) +diff -Nurp linux-omap-2.6.28-omap1/Documentation/arm/OMAP/ssi/ssi kernel-2.6.28-20093908+0m5/Documentation/arm/OMAP/ssi/ssi +--- linux-omap-2.6.28-omap1/Documentation/arm/OMAP/ssi/ssi 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/Documentation/arm/OMAP/ssi/ssi 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,225 @@ ++OMAP SSI API's How To ++===================== ++ ++The Synchronous Serial Interface (SSI) is a high speed communication interface ++that is used for connecting OMAP to a cellular modem engine. ++ ++The SSI interface supports full duplex communication over multiple channels and ++is capable of reaching speeds up to 110 Mbit/s ++ ++I OMAP SSI driver API overview ++----------------------------- ++ ++A) SSI Bus, SSI channels and protocol drivers overview. ++ ++The OMAP SSI driver is intended to be used inside the kernel by protocol drivers. ++ ++The OMAP SSI abstracts the concept of SSI channels by creating an SSI bus an ++attaching SSI channel devices to it.(see Figure 1) ++ ++Protocol drivers will then claim one or more SSI channels, after registering with the OMAP SSI driver. ++ ++ +---------------------+ +----------------+ ++ + SSI channel device + + SSI protocol + ++ + (omap_ssi.pX-cY) + <-------+ driver + ++ +---------------------+ +----------------+ ++ | | ++(/sys/bus/ssi/devices/omap_ssi.pX-cy) (/sys/bus/ssi/drivers/ssi_protocol) ++ | | +++---------------------------------------------------------------+ +++ SSI bus + +++---------------------------------------------------------------+ ++ ++ Figure 1. ++ ++(NOTE: omap_ssi.pX-cY represents the SSI channel Y on port X from the omap_ssi ++device) ++ ++B) Data transfers ++ ++The OMAP SSI driver exports an asynchronous interface for sending and receiving ++data over the SSI channels. Protocol drivers will register a set of read and write ++completion callbacks for each SSI channel they use. ++ ++Protocol drivers call ssi_write/ssi_read functions to signal the OMAP SSI driver ++that is willing to write/read data to/from a channel. Transfers are completed only ++when the OMAP SSI driver calls the completion callback. ++ ++An SSI channel can simultaneously have both a read and a write request ++pending, however, requests cannot be queued. ++ ++It is safe to call ssi_write/ssi_read functions inside the callbacks functions. ++In fact, a protocol driver should normally re-issue the read request from within ++the read callback, in order to not miss any incoming messages. ++ ++C) Error handling ++ ++SSI is a multi channel interface but the channels share the same physical wires. ++Therefore, any transmission error potentially affects all the protocol drivers ++that sit on top of the SSI driver. Whenever an error occurs, it is broadcasted to ++all protocol drivers. ++ ++Errors are signaled to the protocol drivers through the port_event callback. ++ ++Completion callbacks functions are only called when a transfer has succeed. ++ ++II OMAP SSI API's ++----------------- ++ ++A) Include ++ ++#include ++ ++B) int register_ssi_driver(struct ssi_device_driver *driver); ++ ++Description: Register an SSI protocol driver ++ ++Parameter: A protocol driver declaration (see struct ssi_device_driver) ++ ++B) void unregister_ssi_driver(struct ssi_device_driver *driver); ++ ++Description: Unregister an SSI protocol driver ++ ++Parameter: A protocol driver declaration (see struct ssi_device_driver) ++ ++C) int ssi_open(struct ssi_device *dev); ++ ++Description: Open an SSI device channel ++ ++Parameter: The SSI channel ++ ++D) int ssi_write(struct ssi_device *dev, u32 *data, unsigned int count); ++ ++Description: Send data through an SSI channel. The transfer is only completed ++when the write_complete callback is called ++ ++Parameters: ++ - dev: SSI channel ++ - data: pointer to the data to send ++ - count: number of 32-bit words to be sent ++ ++E) void ssi_write_cancel(struct ssi_device *dev); ++ ++Description: Cancel current pending write operation ++ ++Parameters: SSI channel ++ ++F) int ssi_read(struct ssi_device *dev, u32 *data, unsigned int w_count); ++ ++Description: Receive data through an SSI channel. The transfer is only completed ++when the read_complete callback is called ++ ++Parameters: ++ - dev: SSI channel ++ - data: pointer where to store the data ++ - count: number of 32-bit words to be read ++ ++ ++G) void ssi_read_cancel(struct ssi_device *dev); ++ ++Description: Cancel current pending read operation ++ ++Parameters: SSI channel ++ ++H) int ssi_ioctl(struct ssi_device *dev, unsigned int command, void *arg); ++ ++Description: Apply some control command to the port associated to the given ++SSI channel ++ ++Parameters: ++ - dev: SSI channel ++ - command: command to execute ++ - arg: parameter for the control command ++ ++Commands: ++ - SSI_IOCTL_WAKE_UP: ++ Description: Set SSI wakeup line for the channel ++ Parameters: None ++ - SSI_IOCTL_WAKE_DOWN: ++ Description: Unset SSI wakeup line for the channel ++ Parameters: None ++ - SSI_IOCTL_SEND_BREAK: ++ Description: Send a HW BREAK frame in FRAME mode ++ Parameters: None ++ - SSI_IOCTL_WAKE: ++ Description: Get wakeup line status ++ Parameters: Pointer to a u32 variable to return result ++ (Result: 0 means wakeline DOWN, other result means wakeline UP) ++ ++I)void ssi_close(struct ssi_device *dev); ++ ++Description: Close an SSI channel ++ ++Parameters: The SSI channel to close ++ ++J) void ssi_dev_set_cb( struct ssi_device *dev, ++ void (*r_cb)(struct ssi_device *dev), ++ void (*w_cb)(struct ssi_device *dev)); ++ ++Description: Set the read and write callbacks for the SSI channel. This ++function is usually called in the probe function of the SSI protocol driver to ++set completion callbacks for the asynchronous read and write transfer ++ ++Parameters: ++ - dev: SSI channel ++ - r_cb: Pointer to a callback function to signal that a read transfer is ++ completed ++ - w_cb: Pointer to a callback function to signal that a write transfer ++ is completed ++ ++H) struct ssi_device_driver ++ ++Description: Protocol drivers pass this struct to the register_ssi_driver function ++in order to register with the OMAP SSI driver. Among other things it tells the ++OMAP SSI driver which channels the protocol driver wants to allocate for its use ++ ++Declaration: ++struct ssi_device_driver { ++ unsigned long ctrl_mask; ++ unsigned long ch_mask[SSI_MAX_PORTS]; ++ void (*port_event) (int c_id, unsigned int port, ++ unsigned int event, void *arg); ++ int (*probe)(struct ssi_device *dev); ++ int (*remove)(struct ssi_device *dev); ++ int (*suspend)(struct ssi_device *dev, ++ pm_message_t mesg); ++ int (*resume)(struct ssi_device *dev); ++ struct device_driver driver; ++}; ++ ++Fields description: ++ ctrl_mask: SSI block ids to use ++ ch_mask[SSI_MAX_PORTS]: SSI channels to use ++ port_event: Function callback for notifying SSI events ++ (i.e.: error transfer) ++ Parameters: ++ c_id: SSI Block id which generate the event ++ port: Port number which generate the event ++ event: Event code ++ probe: Probe function ++ Parameters: SSI channel ++ remove: Remove function ++ Parameters: SSI channel ++ ++Example: ++ ++static struct ssi_device_driver ssi_protocol_driver = { ++ .ctrl_mask = ANY_SSI_CONTROLLER, ++ .ch_mask[0] = CHANNEL(0) | CHANNEL(1), ++ .port_event = port_event_callback, ++ .probe = ssi_proto_probe, ++ .remove = __devexit_p(ssi_proto_remove), ++ .driver = { ++ .name = "ssi_protocol", ++ }, ++}; ++ ++ ++III OMAP SSI platform_device ++---------------------------- ++ ++[TBD] Explain interface to configure the ssi device controller. ++ ++================================================= ++Contact: Carlos Chinea ++Copyright (C) 2008 Nokia Corporation. +diff -Nurp linux-omap-2.6.28-omap1/Documentation/DocBook/mac80211.tmpl kernel-2.6.28-20093908+0m5/Documentation/DocBook/mac80211.tmpl +--- linux-omap-2.6.28-omap1/Documentation/DocBook/mac80211.tmpl 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/Documentation/DocBook/mac80211.tmpl 2011-09-04 11:31:05.000000000 +0200 +@@ -223,6 +223,17 @@ usage should require reading the full do + !Finclude/net/mac80211.h ieee80211_key_flags + + ++ ++ Powersave support ++!Pinclude/net/mac80211.h Powersave support ++ ++ ++ ++ Beacon filter support ++!Pinclude/net/mac80211.h Beacon filter support ++!Finclude/net/mac80211.h ieee80211_beacon_loss ++ ++ + + Multiple queues and QoS support + TBD +diff -Nurp linux-omap-2.6.28-omap1/Documentation/filesystems/ubifs.txt kernel-2.6.28-20093908+0m5/Documentation/filesystems/ubifs.txt +--- linux-omap-2.6.28-omap1/Documentation/filesystems/ubifs.txt 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/Documentation/filesystems/ubifs.txt 2011-09-04 11:31:05.000000000 +0200 +@@ -79,13 +79,6 @@ Mount options + + (*) == default. + +-norm_unmount (*) commit on unmount; the journal is committed +- when the file-system is unmounted so that the +- next mount does not have to replay the journal +- and it becomes very fast; +-fast_unmount do not commit on unmount; this option makes +- unmount faster, but the next mount slower +- because of the need to replay the journal. + bulk_read read more in one go to take advantage of flash + media that read faster sequentially + no_bulk_read (*) do not bulk-read +@@ -95,6 +88,9 @@ no_chk_data_crc skip checking of CRCs o + of this option is that corruption of the contents + of a file can go unnoticed. + chk_data_crc (*) do not skip checking CRCs on data nodes ++compr=none override default compressor and set it to "none" ++compr=lzo override default compressor and set it to "lzo" ++compr=zlib override default compressor and set it to "zlib" + + + Quick usage instructions +diff -Nurp linux-omap-2.6.28-omap1/Documentation/filesystems/vfat.txt kernel-2.6.28-20093908+0m5/Documentation/filesystems/vfat.txt +--- linux-omap-2.6.28-omap1/Documentation/filesystems/vfat.txt 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/Documentation/filesystems/vfat.txt 2011-09-04 11:31:05.000000000 +0200 +@@ -132,6 +132,11 @@ rodir -- FAT has the ATTR_RO (read + If you want to use ATTR_RO as read-only flag even for + the directory, set this option. + ++errors=panic|continue|remount-ro ++ -- specify FAT behavior on critical errors: panic, continue ++ without doing anything or remopunt the partition in ++ read-only mode (default behavior). ++ + : 0,1,yes,no,true,false + + TODO +diff -Nurp linux-omap-2.6.28-omap1/Documentation/tidspbridge/README kernel-2.6.28-20093908+0m5/Documentation/tidspbridge/README +--- linux-omap-2.6.28-omap1/Documentation/tidspbridge/README 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/Documentation/tidspbridge/README 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,70 @@ ++ Linux DSP/BIOS Bridge release ++ ++DSP/BIOS Bridge overview ++======================== ++ ++DSP/BIOS Bridge is designed for platforms that contain a GPP and one or more ++attached DSPs. The GPP is considered the master or "host" processor, and the ++attached DSPs are processing resources that can be utilized by applications ++and drivers running on the GPP. ++ ++The abstraction that DSP/BIOS Bridge supplies, is a direct link between a GPP ++program and a DSP task. This communication link is partitioned into two ++types of sub-links: messaging (short, fixed-length packets) and data ++streaming (multiple, large buffers). Each sub-link operates independently, ++and features in-order delivery of data, meaning that messages are delivered ++in the order they were submitted to the message link, and stream buffers are ++delivered in the order they were submitted to the stream link. ++ ++In addition, a GPP client can specify what inputs and outputs a DSP task ++uses. DSP tasks typically use message objects for passing control and status ++information and stream objects for efficient streaming of real-time data. ++ ++GPP Software Architecture ++========================= ++ ++A GPP application communicates with its associated DSP task running on the ++DSP subsystem using the DSP/BIOS Bridge API. For example, a GPP audio ++application can use the API to pass messages to a DSP task that is managing ++data flowing from analog-to-digital converters (ADCs) to digital-to-analog ++converters (DACs). ++ ++From the perspective of the GPP OS, the DSP is treated as just another ++peripheral device. Most high level GPP OS typically support a device driver ++model, whereby applications can safely access and share a hardware peripheral ++through standard driver interfaces. Therefore, to allow multiple GPP ++applications to share access to the DSP, the GPP side of DSP/BIOS Bridge ++implements a device driver for the DSP. ++ ++Since driver interfaces are not always standard across GPP OS, and to provide ++some level of interoperability of application code using DSP/BIOS Bridge ++between GPP OS, DSP/BIOS Bridge provides a standard library of APIs which ++wrap calls into the device driver. So, rather than calling GPP OS specific ++driver interfaces, applications (and even other device drivers) can use the ++standard API library directly. ++ ++DSP Software Architecture ++========================= ++ ++For DSP/BIOS, DSP/BIOS Bridge adds a device-independent streaming I/O (STRM) ++interface, a messaging interface (NODE), and a Resource Manager (RM) Server. ++The RM Server runs as a task of DSP/BIOS and is subservient to commands ++and queries from the GPP. It executes commands to start and stop DSP signal ++processing nodes in response to GPP programs making requests through the ++(GPP-side) API. ++ ++DSP tasks started by the RM Server are similar to any other DSP task with two ++important differences: they must follow a specific task model consisting of ++three C-callable functions (node create, execute, and delete), with specific ++sets of arguments, and they have a pre-defined task environment established ++by the RM Server. ++ ++Tasks started by the RM Server communicate using the STRM and NODE interfaces ++and act as servers for their corresponding GPP clients, performing signal ++processing functions as requested by messages sent by their GPP client. ++Typically, a DSP task moves data from source devices to sink devices using ++device independent I/O streams, performing application-specific processing ++and transformations on the data while it is moved. For example, an audio ++task might perform audio decompression (ADPCM, MPEG, CELP) on data received ++from a GPP audio driver and then send the decompressed linear samples to a ++digital-to-analog converter. +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/bcm203x.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/bcm203x.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/bcm203x.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/bcm203x.c 2011-09-04 11:31:05.000000000 +0200 +@@ -37,11 +37,6 @@ + + #include + +-#ifndef CONFIG_BT_HCIBCM203X_DEBUG +-#undef BT_DBG +-#define BT_DBG(D...) +-#endif +- + #define VERSION "1.2" + + static struct usb_device_id bcm203x_table[] = { +@@ -199,7 +194,7 @@ static int bcm203x_probe(struct usb_inte + return -EIO; + } + +- BT_DBG("minidrv data %p size %d", firmware->data, firmware->size); ++ BT_DBG("minidrv data %p size %zu", firmware->data, firmware->size); + + size = max_t(uint, firmware->size, 4096); + +@@ -227,7 +222,7 @@ static int bcm203x_probe(struct usb_inte + return -EIO; + } + +- BT_DBG("firmware data %p size %d", firmware->data, firmware->size); ++ BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); + + data->fw_data = kmalloc(firmware->size, GFP_KERNEL); + if (!data->fw_data) { +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/bfusb.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/bfusb.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/bfusb.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/bfusb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -38,11 +38,6 @@ + #include + #include + +-#ifndef CONFIG_BT_HCIBFUSB_DEBUG +-#undef BT_DBG +-#define BT_DBG(D...) +-#endif +- + #define VERSION "1.2" + + static struct usb_driver bfusb_driver; +@@ -221,7 +216,7 @@ static int bfusb_rx_submit(struct bfusb_ + struct sk_buff *skb; + int err, pipe, size = HCI_MAX_FRAME_SIZE + 32; + +- BT_DBG("bfusb %p urb %p", bfusb, urb); ++ BT_DBG("bfusb %p urb %p", data, urb); + + if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC))) + return -ENOMEM; +@@ -262,8 +257,7 @@ static inline int bfusb_recv_block(struc + + if (hdr & 0x10) { + BT_ERR("%s error in block", data->hdev->name); +- if (data->reassembly) +- kfree_skb(data->reassembly); ++ kfree_skb(data->reassembly); + data->reassembly = NULL; + return -EIO; + } +@@ -354,7 +348,7 @@ static void bfusb_rx_complete(struct urb + int count = urb->actual_length; + int err, hdr, len; + +- BT_DBG("bfusb %p urb %p skb %p len %d", bfusb, urb, skb, skb->len); ++ BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); + + read_lock(&data->lock); + +@@ -691,7 +685,7 @@ static int bfusb_probe(struct usb_interf + goto error; + } + +- BT_DBG("firmware data %p size %d", firmware->data, firmware->size); ++ BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); + + if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) { + BT_ERR("Firmware loading failed"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/bpa10x.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/bpa10x.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/bpa10x.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/bpa10x.c 2011-09-04 11:31:05.000000000 +0200 +@@ -35,11 +35,6 @@ + #include + #include + +-#ifndef CONFIG_BT_HCIBPA10X_DEBUG +-#undef BT_DBG +-#define BT_DBG(D...) +-#endif +- + #define VERSION "0.10" + + static struct usb_device_id bpa10x_table[] = { +@@ -489,6 +484,8 @@ static int bpa10x_probe(struct usb_inter + + hdev->owner = THIS_MODULE; + ++ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); ++ + err = hci_register_dev(hdev); + if (err < 0) { + hci_free_dev(hdev); +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/btsdio.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/btsdio.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/btsdio.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/btsdio.c 2011-09-04 11:31:05.000000000 +0200 +@@ -37,11 +37,6 @@ + #include + #include + +-#ifndef CONFIG_BT_HCIBTSDIO_DEBUG +-#undef BT_DBG +-#define BT_DBG(D...) +-#endif +- + #define VERSION "0.1" + + static const struct sdio_device_id btsdio_table[] = { +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/btusb.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/btusb.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/btusb.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/btusb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -35,31 +35,25 @@ + #include + #include + +-//#define CONFIG_BT_HCIBTUSB_DEBUG +-#ifndef CONFIG_BT_HCIBTUSB_DEBUG +-#undef BT_DBG +-#define BT_DBG(D...) +-#endif +- +-#define VERSION "0.3" ++#define VERSION "0.4" + + static int ignore_dga; + static int ignore_csr; + static int ignore_sniffer; + static int disable_scofix; + static int force_scofix; +-static int reset; ++ ++static int reset = 1; + + static struct usb_driver btusb_driver; + + #define BTUSB_IGNORE 0x01 +-#define BTUSB_RESET 0x02 +-#define BTUSB_DIGIANSWER 0x04 +-#define BTUSB_CSR 0x08 +-#define BTUSB_SNIFFER 0x10 +-#define BTUSB_BCM92035 0x20 +-#define BTUSB_BROKEN_ISOC 0x40 +-#define BTUSB_WRONG_SCO_MTU 0x80 ++#define BTUSB_DIGIANSWER 0x02 ++#define BTUSB_CSR 0x04 ++#define BTUSB_SNIFFER 0x08 ++#define BTUSB_BCM92035 0x10 ++#define BTUSB_BROKEN_ISOC 0x20 ++#define BTUSB_WRONG_SCO_MTU 0x40 + + static struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ +@@ -79,7 +73,7 @@ static struct usb_device_id btusb_table[ + { USB_DEVICE(0x0bdb, 0x1002) }, + + /* Canyon CN-BTU1 with HID interfaces */ +- { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_RESET }, ++ { USB_DEVICE(0x0c10, 0x0000) }, + + { } /* Terminating entry */ + }; +@@ -94,52 +88,36 @@ static struct usb_device_id blacklist_ta + { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, + + /* Broadcom BCM2035 */ +- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, + + /* Broadcom BCM2045 */ +- { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- +- /* Broadcom BCM2046 */ +- { USB_DEVICE(0x0a5c, 0x2146), .driver_info = BTUSB_RESET }, +- { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET }, +- +- /* Apple MacBook Pro with Broadcom chip */ +- { USB_DEVICE(0x05ac, 0x820f), .driver_info = BTUSB_RESET }, ++ { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* IBM/Lenovo ThinkPad with Broadcom chip */ +- { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- +- /* Targus ACB10US */ +- { USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET }, +- { USB_DEVICE(0x0a5c, 0x2154), .driver_info = BTUSB_RESET }, +- +- /* ANYCOM Bluetooth USB-200 and USB-250 */ +- { USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET }, ++ { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* HP laptop with Broadcom chip */ +- { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Dell laptop with Broadcom chip */ +- { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Dell Wireless 370 */ +- { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Dell Wireless 410 */ +- { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- +- /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ +- { USB_DEVICE(0x045e, 0x009c), .driver_info = BTUSB_RESET }, ++ { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Kensington Bluetooth USB adapter */ +- { USB_DEVICE(0x047d, 0x105d), .driver_info = BTUSB_RESET }, +- { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_WRONG_SCO_MTU }, + +- /* ISSC Bluetooth Adapter v3.1 */ +- { USB_DEVICE(0x1131, 0x1001), .driver_info = BTUSB_RESET }, ++ /* Belkin F8T012 and F8T013 devices */ ++ { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* RTX Telecom based adapters with buggy SCO support */ + { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC }, +@@ -148,13 +126,6 @@ static struct usb_device_id blacklist_ta + /* CONWISE Technology based adapters with buggy SCO support */ + { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC }, + +- /* Belkin F8T012 and F8T013 devices */ +- { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, +- +- /* Belkin F8T016 device */ +- { USB_DEVICE(0x050d, 0x016a), .driver_info = BTUSB_RESET }, +- + /* Digianswer devices */ + { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, + { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, +@@ -197,6 +168,8 @@ struct btusb_data { + struct usb_endpoint_descriptor *isoc_tx_ep; + struct usb_endpoint_descriptor *isoc_rx_ep; + ++ __u8 cmdreq_type; ++ + int isoc_altsetting; + }; + +@@ -589,7 +562,7 @@ static int btusb_send_frame(struct sk_bu + return -ENOMEM; + } + +- dr->bRequestType = USB_TYPE_CLASS; ++ dr->bRequestType = data->cmdreq_type; + dr->bRequest = 0; + dr->wIndex = 0; + dr->wValue = 0; +@@ -828,6 +801,8 @@ static int btusb_probe(struct usb_interf + return -ENODEV; + } + ++ data->cmdreq_type = USB_TYPE_CLASS; ++ + data->udev = interface_to_usbdev(intf); + data->intf = intf; + +@@ -862,11 +837,11 @@ static int btusb_probe(struct usb_interf + + hdev->owner = THIS_MODULE; + +- /* interface numbers are hardcoded in the spec */ ++ /* Interface numbers are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, 1); + +- if (reset || id->driver_info & BTUSB_RESET) +- set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); ++ if (!reset) ++ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + + if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { + if (!disable_scofix) +@@ -876,9 +851,23 @@ static int btusb_probe(struct usb_interf + if (id->driver_info & BTUSB_BROKEN_ISOC) + data->isoc = NULL; + ++ if (id->driver_info & BTUSB_DIGIANSWER) { ++ data->cmdreq_type = USB_TYPE_VENDOR; ++ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); ++ } ++ ++ if (id->driver_info & BTUSB_CSR) { ++ struct usb_device *udev = data->udev; ++ ++ /* Old firmware would otherwise execute USB reset */ ++ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) ++ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); ++ } ++ + if (id->driver_info & BTUSB_SNIFFER) { + struct usb_device *udev = data->udev; + ++ /* New sniffer firmware has crippled HCI interface */ + if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_bcsp.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_bcsp.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_bcsp.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_bcsp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -47,11 +47,6 @@ + + #include "hci_uart.h" + +-#ifndef CONFIG_BT_HCIUART_DEBUG +-#undef BT_DBG +-#define BT_DBG( A... ) +-#endif +- + #define VERSION "0.3" + + static int txcrc = 1; +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4.c 2011-09-04 11:31:05.000000000 +0200 +@@ -46,11 +46,6 @@ + + #include "hci_uart.h" + +-#ifndef CONFIG_BT_HCIUART_DEBUG +-#undef BT_DBG +-#define BT_DBG( A... ) +-#endif +- + #define VERSION "1.2" + + struct h4_struct { +@@ -107,8 +102,7 @@ static int h4_close(struct hci_uart *hu) + + skb_queue_purge(&h4->txq); + +- if (h4->rx_skb) +- kfree_skb(h4->rx_skb); ++ kfree_skb(h4->rx_skb); + + hu->priv = NULL; + kfree(h4); +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/core.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/core.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/core.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/core.c 2011-09-04 11:31:05.000000000 +0200 +@@ -1,7 +1,7 @@ + /* + * This file is part of hci_h4p bluetooth driver + * +- * Copyright (C) 2005, 2006 Nokia Corporation. ++ * Copyright (C) 2005-2008 Nokia Corporation. + * + * Contact: Ville Tervo + * +@@ -22,7 +22,6 @@ + */ + + #include +- + #include + #include + #include +@@ -30,14 +29,14 @@ + #include + #include + #include +-#include + #include + #include + #include ++#include + #include ++#include + + #include +-#include + #include + #include + +@@ -47,8 +46,6 @@ + + #include "hci_h4p.h" + +-#define PM_TIMEOUT 200 +- + /* This should be used in function that cannot release clocks */ + static void hci_h4p_set_clk(struct hci_h4p_info *info, int *clock, int enable) + { +@@ -58,21 +55,23 @@ static void hci_h4p_set_clk(struct hci_h + if (enable && !*clock) { + NBT_DBG_POWER("Enabling %p\n", clock); + clk_enable(info->uart_fclk); +-#ifdef CONFIG_ARCH_OMAP2 +- if (cpu_is_omap24xx()) { ++#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) ++ if (cpu_is_omap24xx() || cpu_is_omap34xx()) + clk_enable(info->uart_iclk); +- omap2_block_sleep(); +- } + #endif ++ if (atomic_read(&info->clk_users) == 0) ++ hci_h4p_restore_regs(info); ++ atomic_inc(&info->clk_users); + } ++ + if (!enable && *clock) { + NBT_DBG_POWER("Disabling %p\n", clock); ++ if (atomic_dec_and_test(&info->clk_users)) ++ hci_h4p_store_regs(info); + clk_disable(info->uart_fclk); +-#ifdef CONFIG_ARCH_OMAP2 +- if (cpu_is_omap24xx()) { ++#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) ++ if (cpu_is_omap24xx() || cpu_is_omap34xx()) + clk_disable(info->uart_iclk); +- omap2_allow_sleep(); +- } + #endif + } + +@@ -80,48 +79,70 @@ static void hci_h4p_set_clk(struct hci_h + spin_unlock_irqrestore(&info->clocks_lock, flags); + } + ++static void hci_h4p_lazy_clock_release(unsigned long data) ++{ ++ struct hci_h4p_info *info = (struct hci_h4p_info *)data; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ if (!info->tx_enabled) ++ hci_h4p_set_clk(info, &info->tx_clocks_en, 0); ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ + /* Power management functions */ +-static void hci_h4p_disable_tx(struct hci_h4p_info *info) ++void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable) + { +- NBT_DBG_POWER("\n"); ++ u8 v; + +- if (!info->pm_enabled) +- return; ++ v = hci_h4p_inb(info, UART_OMAP_SYSC); ++ v &= ~(UART_OMAP_SYSC_IDLEMASK); ++ ++ if (enable) ++ v |= UART_OMAP_SYSC_SMART_IDLE; ++ else ++ v |= UART_OMAP_SYSC_NO_IDLE; + +- mod_timer(&info->tx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); ++ hci_h4p_outb(info, UART_OMAP_SYSC, v); + } + +-static void hci_h4p_enable_tx(struct hci_h4p_info *info) ++static void hci_h4p_disable_tx(struct hci_h4p_info *info) + { + NBT_DBG_POWER("\n"); + + if (!info->pm_enabled) + return; + +- del_timer_sync(&info->tx_pm_timer); +- if (info->tx_pm_enabled) { +- info->tx_pm_enabled = 0; +- hci_h4p_set_clk(info, &info->tx_clocks_en, 1); +- gpio_set_value(info->bt_wakeup_gpio, 1); +- } ++ /* Re-enable smart-idle */ ++ hci_h4p_smart_idle(info, 1); ++ ++ gpio_set_value(info->bt_wakeup_gpio, 0); ++ mod_timer(&info->lazy_release, jiffies + msecs_to_jiffies(100)); ++ info->tx_enabled = 0; + } + +-static void hci_h4p_tx_pm_timer(unsigned long data) ++void hci_h4p_enable_tx(struct hci_h4p_info *info) + { +- struct hci_h4p_info *info; +- ++ unsigned long flags; + NBT_DBG_POWER("\n"); + +- info = (struct hci_h4p_info *)data; ++ if (!info->pm_enabled) ++ return; + +- if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { +- gpio_set_value(info->bt_wakeup_gpio, 0); +- hci_h4p_set_clk(info, &info->tx_clocks_en, 0); +- info->tx_pm_enabled = 1; +- } +- else { +- mod_timer(&info->tx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); +- } ++ spin_lock_irqsave(&info->lock, flags); ++ del_timer(&info->lazy_release); ++ hci_h4p_set_clk(info, &info->tx_clocks_en, 1); ++ info->tx_enabled = 1; ++ gpio_set_value(info->bt_wakeup_gpio, 1); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ /* ++ * Disable smart-idle as UART TX interrupts ++ * are not wake-up capable ++ */ ++ hci_h4p_smart_idle(info, 0); ++ ++ spin_unlock_irqrestore(&info->lock, flags); + } + + static void hci_h4p_disable_rx(struct hci_h4p_info *info) +@@ -129,49 +150,39 @@ static void hci_h4p_disable_rx(struct hc + if (!info->pm_enabled) + return; + +- mod_timer(&info->rx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); ++ info->rx_enabled = 0; ++ ++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) ++ return; ++ ++ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) ++ return; ++ ++ __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); ++ info->autorts = 0; ++ hci_h4p_set_clk(info, &info->rx_clocks_en, 0); + } + + static void hci_h4p_enable_rx(struct hci_h4p_info *info) + { +- unsigned long flags; +- + if (!info->pm_enabled) + return; + +- del_timer_sync(&info->rx_pm_timer); +- spin_lock_irqsave(&info->lock, flags); +- if (info->rx_pm_enabled) { +- hci_h4p_set_clk(info, &info->rx_clocks_en, 1); +- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | UART_IER_RDI); +- __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); +- info->rx_pm_enabled = 0; +- } +- spin_unlock_irqrestore(&info->lock, flags); +-} ++ hci_h4p_set_clk(info, &info->rx_clocks_en, 1); ++ info->rx_enabled = 1; + +-static void hci_h4p_rx_pm_timer(unsigned long data) +-{ +- unsigned long flags; +- struct hci_h4p_info *info = (struct hci_h4p_info *)data; ++ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) ++ return; + +- spin_lock_irqsave(&info->lock, flags); +- if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_DR)) { +- __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); +- hci_h4p_set_rts(info, 0); +- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) & ~UART_IER_RDI); +- hci_h4p_set_clk(info, &info->rx_clocks_en, 0); +- info->rx_pm_enabled = 1; +- } +- else { +- mod_timer(&info->rx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); +- } +- spin_unlock_irqrestore(&info->lock, flags); ++ __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); ++ info->autorts = 1; + } + + /* Negotiation functions */ + int hci_h4p_send_alive_packet(struct hci_h4p_info *info) + { ++ unsigned long flags; ++ + NBT_DBG("Sending alive packet\n"); + + if (!info->alive_cmd_skb) +@@ -181,7 +192,10 @@ int hci_h4p_send_alive_packet(struct hci + info->alive_cmd_skb = skb_get(info->alive_cmd_skb); + + skb_queue_tail(&info->txq, info->alive_cmd_skb); +- tasklet_schedule(&info->tx_task); ++ spin_lock_irqsave(&info->lock, flags); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ spin_unlock_irqrestore(&info->lock, flags); + + NBT_DBG("Alive packet sent\n"); + +@@ -191,67 +205,75 @@ int hci_h4p_send_alive_packet(struct hci + static void hci_h4p_alive_packet(struct hci_h4p_info *info, struct sk_buff *skb) + { + NBT_DBG("Received alive packet\n"); +- if (skb->data[1] == 0xCC) { +- complete(&info->init_completion); ++ if (skb->data[1] != 0xCC) { ++ dev_err(info->dev, "Could not negotiate hci_h4p settings\n"); ++ info->init_error = -EINVAL; + } + ++ complete(&info->init_completion); + kfree_skb(skb); + } + +-static int hci_h4p_send_negotiation(struct hci_h4p_info *info, struct sk_buff *skb) ++static int hci_h4p_send_negotiation(struct hci_h4p_info *info, ++ struct sk_buff *skb) + { ++ unsigned long flags; ++ int err; + NBT_DBG("Sending negotiation..\n"); + + hci_h4p_change_speed(info, INIT_SPEED); + ++ hci_h4p_set_rts(info, 1); + info->init_error = 0; + init_completion(&info->init_completion); + skb_queue_tail(&info->txq, skb); +- tasklet_schedule(&info->tx_task); ++ spin_lock_irqsave(&info->lock, flags); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ spin_unlock_irqrestore(&info->lock, flags); + + if (!wait_for_completion_interruptible_timeout(&info->init_completion, + msecs_to_jiffies(1000))) + return -ETIMEDOUT; + +- NBT_DBG("Negotiation sent\n"); +- return info->init_error; +-} ++ if (info->init_error < 0) ++ return info->init_error; + +-static void hci_h4p_negotiation_packet(struct hci_h4p_info *info, +- struct sk_buff *skb) +-{ +- int err = 0; ++ /* Change to operational settings */ ++ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); ++ hci_h4p_set_rts(info, 0); ++ hci_h4p_change_speed(info, MAX_BAUD_RATE); + +- if (skb->data[1] == 0x20) { +- /* Change to operational settings */ +- hci_h4p_set_rts(info, 0); ++ err = hci_h4p_wait_for_cts(info, 1, 100); ++ if (err < 0) ++ return err; + +- err = hci_h4p_wait_for_cts(info, 0, 100); +- if (err < 0) +- goto neg_ret; ++ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); ++ init_completion(&info->init_completion); ++ err = hci_h4p_send_alive_packet(info); + +- hci_h4p_change_speed(info, MAX_BAUD_RATE); ++ if (err < 0) ++ return err; + +- err = hci_h4p_wait_for_cts(info, 1, 100); +- if (err < 0) +- goto neg_ret; ++ if (!wait_for_completion_interruptible_timeout(&info->init_completion, ++ msecs_to_jiffies(1000))) ++ return -ETIMEDOUT; + +- hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS); ++ if (info->init_error < 0) ++ return info->init_error; + +- err = hci_h4p_send_alive_packet(info); +- if (err < 0) +- goto neg_ret; +- } else { ++ NBT_DBG("Negotiation succesful\n"); ++ return 0; ++} ++ ++static void hci_h4p_negotiation_packet(struct hci_h4p_info *info, ++ struct sk_buff *skb) ++{ ++ if (skb->data[1] != 0x20) { + dev_err(info->dev, "Could not negotiate hci_h4p settings\n"); +- err = -EINVAL; +- goto neg_ret; ++ info->init_error = -EINVAL; + } + +- kfree_skb(skb); +- return; +- +-neg_ret: +- info->init_error = err; + complete(&info->init_completion); + kfree_skb(skb); + } +@@ -272,11 +294,14 @@ static int hci_h4p_get_hdr_len(struct hc + retval = HCI_SCO_HDR_SIZE; + break; + case H4_NEG_PKT: +- retval = 11; ++ retval = 13; + break; + case H4_ALIVE_PKT: + retval = 3; + break; ++ case H4_RADIO_PKT: ++ retval = H4_RADIO_HDR_SIZE; ++ break; + default: + dev_err(info->dev, "Unknown H4 packet type 0x%.2x\n", pkt_type); + retval = -1; +@@ -293,6 +318,7 @@ static unsigned int hci_h4p_get_data_len + struct hci_event_hdr *evt_hdr; + struct hci_acl_hdr *acl_hdr; + struct hci_sco_hdr *sco_hdr; ++ struct hci_h4p_radio_hdr *radio_hdr; + + switch (bt_cb(skb)->pkt_type) { + case H4_EVT_PKT: +@@ -307,9 +333,11 @@ static unsigned int hci_h4p_get_data_len + sco_hdr = (struct hci_sco_hdr *)skb->data; + retval = sco_hdr->dlen; + break; +- case H4_NEG_PKT: +- retval = 0; ++ case H4_RADIO_PKT: ++ radio_hdr = (struct hci_h4p_radio_hdr *)skb->data; ++ retval = radio_hdr->dlen; + break; ++ case H4_NEG_PKT: + case H4_ALIVE_PKT: + retval = 0; + break; +@@ -331,10 +359,72 @@ static inline void hci_h4p_recv_frame(st + } + } + ++static inline void hci_h4p_handle_byte(struct hci_h4p_info *info, u8 byte) ++{ ++ switch (info->rx_state) { ++ case WAIT_FOR_PKT_TYPE: ++ bt_cb(info->rx_skb)->pkt_type = byte; ++ info->rx_count = hci_h4p_get_hdr_len(info, byte); ++ if (info->rx_count < 0) { ++ info->hdev->stat.err_rx++; ++ kfree_skb(info->rx_skb); ++ info->rx_skb = NULL; ++ } else { ++ info->rx_state = WAIT_FOR_HEADER; ++ } ++ break; ++ case WAIT_FOR_HEADER: ++ info->rx_count--; ++ *skb_put(info->rx_skb, 1) = byte; ++ if (info->rx_count == 0) { ++ info->rx_count = hci_h4p_get_data_len(info, ++ info->rx_skb); ++ if (info->rx_count > skb_tailroom(info->rx_skb)) { ++ dev_err(info->dev, "Too long frame.\n"); ++ info->garbage_bytes = info->rx_count - ++ skb_tailroom(info->rx_skb); ++ kfree_skb(info->rx_skb); ++ info->rx_skb = NULL; ++ break; ++ } ++ info->rx_state = WAIT_FOR_DATA; ++ ++ if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) { ++ hci_h4p_negotiation_packet(info, info->rx_skb); ++ info->rx_skb = NULL; ++ info->rx_state = WAIT_FOR_PKT_TYPE; ++ return; ++ } ++ if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) { ++ hci_h4p_alive_packet(info, info->rx_skb); ++ info->rx_skb = NULL; ++ info->rx_state = WAIT_FOR_PKT_TYPE; ++ return; ++ } ++ } ++ break; ++ case WAIT_FOR_DATA: ++ info->rx_count--; ++ *skb_put(info->rx_skb, 1) = byte; ++ break; ++ default: ++ WARN_ON(1); ++ break; ++ } ++ ++ if (info->rx_count == 0) { ++ /* H4+ devices should allways send word aligned ++ * packets */ ++ if (!(info->rx_skb->len % 2)) ++ info->garbage_bytes++; ++ hci_h4p_recv_frame(info, info->rx_skb); ++ info->rx_skb = NULL; ++ } ++} ++ + static void hci_h4p_rx_tasklet(unsigned long data) + { + u8 byte; +- unsigned long flags; + struct hci_h4p_info *info = (struct hci_h4p_info *)data; + + NBT_DBG("tasklet woke up\n"); +@@ -347,80 +437,33 @@ static void hci_h4p_rx_tasklet(unsigned + continue; + } + if (info->rx_skb == NULL) { +- info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC | GFP_DMA); ++ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, ++ GFP_ATOMIC | GFP_DMA); + if (!info->rx_skb) { +- dev_err(info->dev, "Can't allocate memory for new packet\n"); +- goto finish_task; ++ dev_err(info->dev, ++ "No memory for new packet\n"); ++ goto finish_rx; + } + info->rx_state = WAIT_FOR_PKT_TYPE; + info->rx_skb->dev = (void *)info->hdev; + } + info->hdev->stat.byte_rx++; + NBT_DBG_TRANSFER_NF("0x%.2x ", byte); +- switch (info->rx_state) { +- case WAIT_FOR_PKT_TYPE: +- bt_cb(info->rx_skb)->pkt_type = byte; +- info->rx_count = hci_h4p_get_hdr_len(info, byte); +- if (info->rx_count < 0) { +- info->hdev->stat.err_rx++; +- kfree_skb(info->rx_skb); +- info->rx_skb = NULL; +- } else { +- info->rx_state = WAIT_FOR_HEADER; +- } +- break; +- case WAIT_FOR_HEADER: +- info->rx_count--; +- *skb_put(info->rx_skb, 1) = byte; +- if (info->rx_count == 0) { +- info->rx_count = hci_h4p_get_data_len(info, info->rx_skb); +- if (info->rx_count > skb_tailroom(info->rx_skb)) { +- dev_err(info->dev, "Frame is %ld bytes too long.\n", +- info->rx_count - skb_tailroom(info->rx_skb)); +- kfree_skb(info->rx_skb); +- info->rx_skb = NULL; +- info->garbage_bytes = info->rx_count - skb_tailroom(info->rx_skb); +- break; +- } +- info->rx_state = WAIT_FOR_DATA; +- +- if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) { +- hci_h4p_negotiation_packet(info, info->rx_skb); +- info->rx_skb = NULL; +- info->rx_state = WAIT_FOR_PKT_TYPE; +- goto finish_task; +- } +- if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) { +- hci_h4p_alive_packet(info, info->rx_skb); +- info->rx_skb = NULL; +- info->rx_state = WAIT_FOR_PKT_TYPE; +- goto finish_task; +- } +- } +- break; +- case WAIT_FOR_DATA: +- info->rx_count--; +- *skb_put(info->rx_skb, 1) = byte; +- if (info->rx_count == 0) { +- /* H4+ devices should allways send word aligned packets */ +- if (!(info->rx_skb->len % 2)) { +- info->garbage_bytes++; +- } +- hci_h4p_recv_frame(info, info->rx_skb); +- info->rx_skb = NULL; +- } +- break; +- default: +- WARN_ON(1); +- break; +- } ++ hci_h4p_handle_byte(info, byte); + } + +-finish_task: +- spin_lock_irqsave(&info->lock, flags); +- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | UART_IER_RDI); +- spin_unlock_irqrestore(&info->lock, flags); ++ if (!info->rx_enabled) { ++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT && ++ info->autorts) { ++ __hci_h4p_set_auto_ctsrts(info, 0 , UART_EFR_RTS); ++ info->autorts = 0; ++ } ++ /* Flush posted write to avoid spurious interrupts */ ++ hci_h4p_inb(info, UART_OMAP_SCR); ++ hci_h4p_set_clk(info, &info->rx_clocks_en, 0); ++ } + ++finish_rx: + NBT_DBG_TRANSFER_NF("\n"); + NBT_DBG("rx_ended\n"); + } +@@ -428,19 +471,48 @@ finish_task: + static void hci_h4p_tx_tasklet(unsigned long data) + { + unsigned int sent = 0; +- unsigned long flags; + struct sk_buff *skb; + struct hci_h4p_info *info = (struct hci_h4p_info *)data; + + NBT_DBG("tasklet woke up\n"); + NBT_DBG_TRANSFER("tx_tasklet woke up\n data "); + ++ if (info->autorts != info->rx_enabled) { ++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { ++ if (info->autorts && !info->rx_enabled) { ++ __hci_h4p_set_auto_ctsrts(info, 0, ++ UART_EFR_RTS); ++ info->autorts = 0; ++ } ++ if (!info->autorts && info->rx_enabled) { ++ __hci_h4p_set_auto_ctsrts(info, 1, ++ UART_EFR_RTS); ++ info->autorts = 1; ++ } ++ } else { ++ hci_h4p_outb(info, UART_OMAP_SCR, ++ hci_h4p_inb(info, UART_OMAP_SCR) | ++ UART_OMAP_SCR_EMPTY_THR); ++ goto finish_tx; ++ } ++ } ++ + skb = skb_dequeue(&info->txq); + if (!skb) { + /* No data in buffer */ + NBT_DBG("skb ready\n"); +- hci_h4p_disable_tx(info); +- return; ++ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { ++ hci_h4p_outb(info, UART_IER, ++ hci_h4p_inb(info, UART_IER) & ++ ~UART_IER_THRI); ++ hci_h4p_inb(info, UART_OMAP_SCR); ++ hci_h4p_disable_tx(info); ++ return; ++ } else ++ hci_h4p_outb(info, UART_OMAP_SCR, ++ hci_h4p_inb(info, UART_OMAP_SCR) | ++ UART_OMAP_SCR_EMPTY_THR); ++ goto finish_tx; + } + + /* Copy data to tx fifo */ +@@ -460,9 +532,15 @@ static void hci_h4p_tx_tasklet(unsigned + skb_queue_head(&info->txq, skb); + } + +- spin_lock_irqsave(&info->lock, flags); +- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | UART_IER_THRI); +- spin_unlock_irqrestore(&info->lock, flags); ++ hci_h4p_outb(info, UART_OMAP_SCR, hci_h4p_inb(info, UART_OMAP_SCR) & ++ ~UART_OMAP_SCR_EMPTY_THR); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ ++finish_tx: ++ /* Flush posted write to avoid spurious interrupts */ ++ hci_h4p_inb(info, UART_OMAP_SCR); ++ + } + + static irqreturn_t hci_h4p_interrupt(int irq, void *data) +@@ -470,13 +548,11 @@ static irqreturn_t hci_h4p_interrupt(int + struct hci_h4p_info *info = (struct hci_h4p_info *)data; + u8 iir, msr; + int ret; +- unsigned long flags; + + ret = IRQ_NONE; + + iir = hci_h4p_inb(info, UART_IIR); + if (iir & UART_IIR_NO_INT) { +- dev_err(info->dev, "Interrupt but no reason irq 0x%.2x\n", iir); + return IRQ_HANDLED; + } + +@@ -495,18 +571,12 @@ static irqreturn_t hci_h4p_interrupt(int + } + + if (iir == UART_IIR_RDI) { +- spin_lock_irqsave(&info->lock, flags); +- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) & ~UART_IER_RDI); +- spin_unlock_irqrestore(&info->lock, flags); +- tasklet_schedule(&info->rx_task); ++ hci_h4p_rx_tasklet((unsigned long)data); + ret = IRQ_HANDLED; + } + + if (iir == UART_IIR_THRI) { +- spin_lock_irqsave(&info->lock, flags); +- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) & ~UART_IER_THRI); +- spin_unlock_irqrestore(&info->lock, flags); +- tasklet_schedule(&info->tx_task); ++ hci_h4p_tx_tasklet((unsigned long)data); + ret = IRQ_HANDLED; + } + +@@ -529,6 +599,11 @@ static irqreturn_t hci_h4p_wakeup_interr + + should_wakeup = gpio_get_value(info->host_wakeup_gpio); + NBT_DBG_POWER("gpio interrupt %d\n", should_wakeup); ++ ++ /* Check if wee have missed some interrupts */ ++ if (info->rx_enabled == should_wakeup) ++ return IRQ_HANDLED; ++ + if (should_wakeup) { + hci_h4p_enable_rx(info); + } else { +@@ -542,16 +617,20 @@ static int hci_h4p_reset(struct hci_h4p_ + { + int err; + ++ err = hci_h4p_reset_uart(info); ++ if (err < 0) { ++ dev_err(info->dev, "Uart reset failed\n"); ++ return err; ++ } + hci_h4p_init_uart(info); + hci_h4p_set_rts(info, 0); + + gpio_set_value(info->reset_gpio, 0); +- msleep(100); + gpio_set_value(info->bt_wakeup_gpio, 1); ++ msleep(10); + gpio_set_value(info->reset_gpio, 1); +- msleep(100); + +- err = hci_h4p_wait_for_cts(info, 1, 10); ++ err = hci_h4p_wait_for_cts(info, 1, 100); + if (err < 0) { + dev_err(info->dev, "No cts from bt chip\n"); + return err; +@@ -579,6 +658,7 @@ static int hci_h4p_hci_open(struct hci_d + int err; + struct sk_buff *neg_cmd_skb; + struct sk_buff_head fw_queue; ++ unsigned long flags; + + info = hdev->driver_data; + +@@ -602,38 +682,45 @@ static int hci_h4p_hci_open(struct hci_d + goto err_clean; + } + +- hci_h4p_set_clk(info, &info->tx_clocks_en, 1); +- hci_h4p_set_clk(info, &info->rx_clocks_en, 1); +- +- tasklet_enable(&info->tx_task); +- tasklet_enable(&info->rx_task); ++ info->rx_enabled = 1; + info->rx_state = WAIT_FOR_PKT_TYPE; + info->rx_count = 0; + info->garbage_bytes = 0; + info->rx_skb = NULL; + info->pm_enabled = 0; + init_completion(&info->fw_completion); ++ hci_h4p_set_clk(info, &info->tx_clocks_en, 1); ++ hci_h4p_set_clk(info, &info->rx_clocks_en, 1); + + err = hci_h4p_reset(info); + if (err < 0) + goto err_clean; + ++ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS); ++ info->autorts = 1; + err = hci_h4p_send_negotiation(info, neg_cmd_skb); + neg_cmd_skb = NULL; + if (err < 0) + goto err_clean; + ++ + err = hci_h4p_send_fw(info, &fw_queue); + if (err < 0) { + dev_err(info->dev, "Sending firmware failed.\n"); + goto err_clean; + } + ++ info->pm_enabled = 1; ++ ++ spin_lock_irqsave(&info->lock, flags); ++ info->rx_enabled = gpio_get_value(info->host_wakeup_gpio); ++ hci_h4p_set_clk(info, &info->rx_clocks_en, info->rx_enabled); ++ spin_unlock_irqrestore(&info->lock, flags); ++ ++ hci_h4p_set_clk(info, &info->tx_clocks_en, 0); ++ + kfree_skb(info->alive_cmd_skb); + info->alive_cmd_skb = NULL; +- info->pm_enabled = 1; +- info->tx_pm_enabled = 1; +- info->rx_pm_enabled = 0; + set_bit(HCI_RUNNING, &hdev->flags); + + NBT_DBG("hci up and running\n"); +@@ -641,9 +728,8 @@ static int hci_h4p_hci_open(struct hci_d + + err_clean: + hci_h4p_hci_flush(hdev); +- tasklet_disable(&info->tx_task); +- tasklet_disable(&info->rx_task); + hci_h4p_reset_uart(info); ++ del_timer_sync(&info->lazy_release); + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + hci_h4p_set_clk(info, &info->rx_clocks_en, 0); + gpio_set_value(info->reset_gpio, 0); +@@ -666,13 +752,10 @@ static int hci_h4p_hci_close(struct hci_ + return 0; + + hci_h4p_hci_flush(hdev); +- del_timer_sync(&info->tx_pm_timer); +- del_timer_sync(&info->rx_pm_timer); +- tasklet_disable(&info->tx_task); +- tasklet_disable(&info->rx_task); + hci_h4p_set_clk(info, &info->tx_clocks_en, 1); + hci_h4p_set_clk(info, &info->rx_clocks_en, 1); + hci_h4p_reset_uart(info); ++ del_timer_sync(&info->lazy_release); + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + hci_h4p_set_clk(info, &info->rx_clocks_en, 0); + gpio_set_value(info->reset_gpio, 0); +@@ -723,18 +806,20 @@ static int hci_h4p_hci_send_frame(struct + /* We should allways send word aligned data to h4+ devices */ + if (skb->len % 2) { + err = skb_pad(skb, 1); ++ if (!err) ++ *skb_put(skb, 1) = 0x00; + } + if (err) + return err; + +- hci_h4p_enable_tx(info); + skb_queue_tail(&info->txq, skb); +- tasklet_schedule(&info->tx_task); ++ hci_h4p_enable_tx(info); + + return 0; + } + +-static int hci_h4p_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) ++static int hci_h4p_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, ++ unsigned long arg) + { + return -ENOIOCTLCMD; + } +@@ -761,11 +846,12 @@ static int hci_h4p_register_hdev(struct + hdev->send = hci_h4p_hci_send_frame; + hdev->destruct = hci_h4p_hci_destruct; + hdev->ioctl = hci_h4p_hci_ioctl; ++ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + + hdev->owner = THIS_MODULE; + + if (hci_register_dev(hdev) < 0) { +- dev_err(info->dev, "hci_h4p: Can't register HCI device %s.\n", hdev->name); ++ dev_err(info->dev, "hci_register failed %s.\n", hdev->name); + return -ENODEV; + } + +@@ -785,28 +871,19 @@ static int hci_h4p_probe(struct platform + + info->dev = &pdev->dev; + info->pm_enabled = 0; +- info->tx_pm_enabled = 0; +- info->rx_pm_enabled = 0; ++ info->tx_enabled = 1; ++ info->rx_enabled = 1; + info->garbage_bytes = 0; + info->tx_clocks_en = 0; + info->rx_clocks_en = 0; +- tasklet_init(&info->tx_task, hci_h4p_tx_tasklet, (unsigned long)info); +- tasklet_init(&info->rx_task, hci_h4p_rx_tasklet, (unsigned long)info); +- /* hci_h4p_hci_open assumes that tasklet is disabled in startup */ +- tasklet_disable(&info->tx_task); +- tasklet_disable(&info->rx_task); ++ irq = 0; + spin_lock_init(&info->lock); + spin_lock_init(&info->clocks_lock); + skb_queue_head_init(&info->txq); +- init_timer(&info->tx_pm_timer); +- info->tx_pm_timer.function = hci_h4p_tx_pm_timer; +- info->tx_pm_timer.data = (unsigned long)info; +- init_timer(&info->rx_pm_timer); +- info->rx_pm_timer.function = hci_h4p_rx_pm_timer; +- info->rx_pm_timer.data = (unsigned long)info; + + if (pdev->dev.platform_data == NULL) { + dev_err(&pdev->dev, "Could not get Bluetooth config data\n"); ++ kfree(info); + return -ENODATA; + } + +@@ -823,33 +900,30 @@ static int hci_h4p_probe(struct platform + NBT_DBG("Uart: %d\n", bt_config->bt_uart); + NBT_DBG("sysclk: %d\n", info->bt_sysclk); + +- err = gpio_request(info->reset_gpio, "BT reset"); ++ err = gpio_request(info->reset_gpio, "bt_reset"); + if (err < 0) { + dev_err(&pdev->dev, "Cannot get GPIO line %d\n", + info->reset_gpio); +- kfree(info); +- goto cleanup; ++ goto cleanup_setup; + } + +- err = gpio_request(info->bt_wakeup_gpio, "BT wakeup"); ++ err = gpio_request(info->bt_wakeup_gpio, "bt_wakeup"); + if (err < 0) + { + dev_err(info->dev, "Cannot get GPIO line 0x%d", + info->bt_wakeup_gpio); + gpio_free(info->reset_gpio); +- kfree(info); +- goto cleanup; ++ goto cleanup_setup; + } + +- err = gpio_request(info->host_wakeup_gpio, "BT host wakeup"); ++ err = gpio_request(info->host_wakeup_gpio, "host_wakeup"); + if (err < 0) + { + dev_err(info->dev, "Cannot get GPIO line %d", + info->host_wakeup_gpio); + gpio_free(info->reset_gpio); + gpio_free(info->bt_wakeup_gpio); +- kfree(info); +- goto cleanup; ++ goto cleanup_setup; + } + + gpio_direction_output(info->reset_gpio, 0); +@@ -866,10 +940,7 @@ static int hci_h4p_probe(struct platform + info->uart_iclk = clk_get(NULL, "uart1_ick"); + info->uart_fclk = clk_get(NULL, "uart1_fck"); + } +- /* FIXME: Use platform_get_resource for the port */ +- info->uart_base = ioremap(OMAP_UART1_BASE, 0x16); +- if (!info->uart_base) +- goto cleanup; ++ info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART1_BASE); + break; + case 2: + if (cpu_is_omap16xx()) { +@@ -880,10 +951,7 @@ static int hci_h4p_probe(struct platform + info->uart_iclk = clk_get(NULL, "uart2_ick"); + info->uart_fclk = clk_get(NULL, "uart2_fck"); + } +- /* FIXME: Use platform_get_resource for the port */ +- info->uart_base = ioremap(OMAP_UART2_BASE, 0x16); +- if (!info->uart_base) +- goto cleanup; ++ info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART2_BASE); + break; + case 3: + if (cpu_is_omap16xx()) { +@@ -894,10 +962,7 @@ static int hci_h4p_probe(struct platform + info->uart_iclk = clk_get(NULL, "uart3_ick"); + info->uart_fclk = clk_get(NULL, "uart3_fck"); + } +- /* FIXME: Use platform_get_resource for the port */ +- info->uart_base = ioremap(OMAP_UART3_BASE, 0x16); +- if (!info->uart_base) +- goto cleanup; ++ info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART3_BASE); + break; + default: + dev_err(info->dev, "No uart defined\n"); +@@ -905,71 +970,83 @@ static int hci_h4p_probe(struct platform + } + + info->irq = irq; +- err = request_irq(irq, hci_h4p_interrupt, 0, "hci_h4p", (void *)info); ++ err = request_irq(irq, hci_h4p_interrupt, IRQF_DISABLED, "hci_h4p", ++ info); + if (err < 0) { + dev_err(info->dev, "hci_h4p: unable to get IRQ %d\n", irq); + goto cleanup; + } + + err = request_irq(gpio_to_irq(info->host_wakeup_gpio), +- hci_h4p_wakeup_interrupt, +- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, +- "hci_h4p_wkup", (void *)info); ++ hci_h4p_wakeup_interrupt, IRQF_TRIGGER_FALLING | ++ IRQF_TRIGGER_RISING | IRQF_DISABLED, ++ "hci_h4p_wkup", info); + if (err < 0) { + dev_err(info->dev, "hci_h4p: unable to get wakeup IRQ %d\n", + gpio_to_irq(info->host_wakeup_gpio)); +- free_irq(irq, (void *)info); ++ free_irq(irq, info); ++ goto cleanup; ++ } ++ ++ err = set_irq_wake(gpio_to_irq(info->host_wakeup_gpio), 1); ++ if (err < 0) { ++ dev_err(info->dev, "hci_h4p: unable to set wakeup for IRQ %d\n", ++ gpio_to_irq(info->host_wakeup_gpio)); ++ free_irq(irq, info); ++ free_irq(gpio_to_irq(info->host_wakeup_gpio), info); + goto cleanup; + } + ++ init_timer_deferrable(&info->lazy_release); ++ info->lazy_release.function = hci_h4p_lazy_clock_release; ++ info->lazy_release.data = (unsigned long)info; + hci_h4p_set_clk(info, &info->tx_clocks_en, 1); +- hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_CTS | UART_EFR_RTS); +- err = hci_h4p_init_uart(info); ++ err = hci_h4p_reset_uart(info); + if (err < 0) + goto cleanup_irq; ++ hci_h4p_init_uart(info); ++ hci_h4p_set_rts(info, 0); + err = hci_h4p_reset(info); ++ hci_h4p_reset_uart(info); + if (err < 0) + goto cleanup_irq; +- err = hci_h4p_wait_for_cts(info, 1, 10); +- if (err < 0) +- goto cleanup_irq; ++ gpio_set_value(info->reset_gpio, 0); + hci_h4p_set_clk(info, &info->tx_clocks_en, 0); + + platform_set_drvdata(pdev, info); +- err = hci_h4p_sysfs_create_files(info->dev); +- if (err < 0) +- goto cleanup_irq; + + if (hci_h4p_register_hdev(info) < 0) { + dev_err(info->dev, "failed to register hci_h4p hci device\n"); + goto cleanup_irq; + } +- gpio_set_value(info->reset_gpio, 0); + + return 0; + + cleanup_irq: + free_irq(irq, (void *)info); +- free_irq(gpio_to_irq(info->host_wakeup_gpio), (void *)info); ++ free_irq(gpio_to_irq(info->host_wakeup_gpio), info); + cleanup: + gpio_set_value(info->reset_gpio, 0); + gpio_free(info->reset_gpio); + gpio_free(info->bt_wakeup_gpio); + gpio_free(info->host_wakeup_gpio); +- kfree(info); + ++cleanup_setup: ++ ++ kfree(info); + return err; + + } + +-static int hci_h4p_remove(struct platform_device *dev) ++static int hci_h4p_remove(struct platform_device *pdev) + { + struct hci_h4p_info *info; + +- info = platform_get_drvdata(dev); ++ info = platform_get_drvdata(pdev); + + hci_h4p_hci_close(info->hdev); +- free_irq(gpio_to_irq(info->host_wakeup_gpio), (void *) info); ++ free_irq(gpio_to_irq(info->host_wakeup_gpio), info); ++ hci_unregister_dev(info->hdev); + hci_free_dev(info->hdev); + gpio_free(info->reset_gpio); + gpio_free(info->bt_wakeup_gpio); +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-bcm.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw-bcm.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-bcm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw-bcm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,161 @@ ++/* ++ * This file is part of hci_h4p bluetooth driver ++ * ++ * Copyright (C) 2005-2008 Nokia Corporation. ++ * ++ * Contact: Ville Tervo ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ * ++ */ ++ ++#include ++#include ++#include ++ ++#include "hci_h4p.h" ++ ++static struct sk_buff_head *fw_q; ++ ++static int inject_bdaddr(struct hci_h4p_info *info, struct sk_buff *skb) ++{ ++ unsigned int offset; ++ int i; ++ struct omap_bluetooth_config *config; ++ ++ config = info->dev->platform_data; ++ ++ if (!config) ++ return -ENODEV; ++ ++ if (skb->len < 10) { ++ dev_info(info->dev, "Valid bluetooth address not found.\n"); ++ return -ENODATA; ++ } ++ ++ offset = 4; ++ skb->data[offset + 5] = config->bd_addr[0]; ++ skb->data[offset + 4] = config->bd_addr[1]; ++ skb->data[offset + 3] = config->bd_addr[2]; ++ skb->data[offset + 2] = config->bd_addr[3]; ++ skb->data[offset + 1] = config->bd_addr[4]; ++ skb->data[offset + 0] = config->bd_addr[5]; ++ ++ for (i = 0; i < 6; i++) { ++ if (config->bd_addr[i] != 0x00) ++ break; ++ } ++ ++ if (i > 5) { ++ dev_info(info->dev, "Valid bluetooth address not found.\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb) ++{ ++ struct sk_buff *fw_skb; ++ int err; ++ unsigned long flags; ++ ++ if (skb->data[5] != 0x00) { ++ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n", ++ skb->data[5]); ++ info->fw_error = -EPROTO; ++ } ++ ++ kfree_skb(skb); ++ ++ fw_skb = skb_dequeue(fw_q); ++ if (fw_skb == NULL || info->fw_error) { ++ complete(&info->fw_completion); ++ return; ++ } ++ ++ if (fw_skb->data[1] == 0x01 && fw_skb->data[2] == 0xfc) { ++ NBT_DBG_FW("Injecting bluetooth address\n"); ++ err = inject_bdaddr(info, fw_skb); ++ if (err < 0) { ++ kfree_skb(fw_skb); ++ info->fw_error = err; ++ complete(&info->fw_completion); ++ return; ++ } ++ } ++ ++ skb_queue_tail(&info->txq, fw_skb); ++ spin_lock_irqsave(&info->lock, flags); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ spin_unlock_irqrestore(&info->lock, flags); ++} ++ ++ ++int hci_h4p_bcm_send_fw(struct hci_h4p_info *info, ++ struct sk_buff_head *fw_queue) ++{ ++ struct sk_buff *skb; ++ unsigned long flags, time; ++ ++ info->fw_error = 0; ++ ++ NBT_DBG_FW("Sending firmware\n"); ++ ++ time = jiffies; ++ ++ fw_q = fw_queue; ++ skb = skb_dequeue(fw_queue); ++ if (!skb) ++ return -ENODATA; ++ ++ NBT_DBG_FW("Sending commands\n"); ++ ++ /* ++ * Disable smart-idle as UART TX interrupts ++ * are not wake-up capable ++ */ ++ hci_h4p_smart_idle(info, 0); ++ ++ /* Check if this is bd_address packet */ ++ init_completion(&info->fw_completion); ++ skb_queue_tail(&info->txq, skb); ++ spin_lock_irqsave(&info->lock, flags); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ spin_unlock_irqrestore(&info->lock, flags); ++ ++ if (!wait_for_completion_timeout(&info->fw_completion, ++ msecs_to_jiffies(2000))) { ++ dev_err(info->dev, "No reply to fw command\n"); ++ return -ETIMEDOUT; ++ } ++ ++ if (info->fw_error) { ++ dev_err(info->dev, "FW error\n"); ++ return -EPROTO; ++ } ++ ++ NBT_DBG_FW("Firmware sent in %d msecs\n", ++ jiffies_to_msecs(jiffies-time)); ++ ++ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); ++ hci_h4p_set_rts(info, 0); ++ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); ++ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw.c 2011-09-04 11:31:05.000000000 +0200 +@@ -46,6 +46,9 @@ static int hci_h4p_open_firmware(struct + case BT_CHIP_CSR: + err = request_firmware(fw_entry, "bc4fw.bin", info->dev); + break; ++ case BT_CHIP_BCM: ++ err = request_firmware(fw_entry, "bcmfw.bin", info->dev); ++ break; + default: + dev_err(info->dev, "Invalid chip type\n"); + *fw_entry = NULL; +@@ -72,12 +75,18 @@ static int hci_h4p_read_fw_cmd(struct hc + return 0; + } + ++ if (fw_pos + 2 > fw_entry->size) { ++ dev_err(info->dev, "Corrupted firmware image 1\n"); ++ return -EMSGSIZE; ++ } ++ + cmd_len = fw_entry->data[fw_pos++]; +- if (!cmd_len) ++ cmd_len += fw_entry->data[fw_pos++] << 8; ++ if (cmd_len == 0) + return 0; + + if (fw_pos + cmd_len > fw_entry->size) { +- dev_err(info->dev, "Corrupted firmware image\n"); ++ dev_err(info->dev, "Corrupted firmware image 2\n"); + return -EMSGSIZE; + } + +@@ -126,6 +135,9 @@ int hci_h4p_send_fw(struct hci_h4p_info + case BT_CHIP_TI: + err = hci_h4p_brf6150_send_fw(info, fw_queue); + break; ++ case BT_CHIP_BCM: ++ err = hci_h4p_bcm_send_fw(info, fw_queue); ++ break; + default: + dev_err(info->dev, "Don't know how to send firmware\n"); + err = -EINVAL; +@@ -143,6 +155,9 @@ void hci_h4p_parse_fw_event(struct hci_h + case BT_CHIP_TI: + hci_h4p_brf6150_parse_fw_event(info, skb); + break; ++ case BT_CHIP_BCM: ++ hci_h4p_bcm_parse_fw_event(info, skb); ++ break; + default: + dev_err(info->dev, "Don't know how to parse fw event\n"); + info->fw_error = -EINVAL; +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-csr.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw-csr.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-csr.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw-csr.c 2011-09-04 11:31:05.000000000 +0200 +@@ -1,7 +1,7 @@ + /* + * This file is part of hci_h4p bluetooth driver + * +- * Copyright (C) 2005, 2006 Nokia Corporation. ++ * Copyright (C) 2005-2008 Nokia Corporation. + * + * Contact: Ville Tervo + * +@@ -50,6 +50,8 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i + struct sk_buff *skb; + unsigned int offset; + int retries, count, i; ++ unsigned long flags; ++ struct omap_bluetooth_config *config; + + info->fw_error = 0; + +@@ -59,21 +61,27 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i + if (!skb) + return -ENOMSG; + ++ config = info->dev->platform_data; ++ if (!config) { ++ kfree_skb(skb); ++ return -ENODEV; ++ } ++ + /* Check if this is bd_address packet */ + if (skb->data[15] == 0x01 && skb->data[16] == 0x00) { + offset = 21; + skb->data[offset + 1] = 0x00; + skb->data[offset + 5] = 0x00; +- skb->data[offset + 7] = info->bdaddr[0]; +- skb->data[offset + 6] = info->bdaddr[1]; +- skb->data[offset + 4] = info->bdaddr[2]; +- skb->data[offset + 0] = info->bdaddr[3]; +- skb->data[offset + 3] = info->bdaddr[4]; +- skb->data[offset + 2] = info->bdaddr[5]; ++ skb->data[offset + 7] = config->bd_addr[0]; ++ skb->data[offset + 6] = config->bd_addr[1]; ++ skb->data[offset + 4] = config->bd_addr[2]; ++ skb->data[offset + 0] = config->bd_addr[3]; ++ skb->data[offset + 3] = config->bd_addr[4]; ++ skb->data[offset + 2] = config->bd_addr[5]; + } + + for (i = 0; i < 6; i++) { +- if (info->bdaddr[i] != 0x00) ++ if (config->bd_addr[i] != 0x00) + break; + } + +@@ -87,7 +95,10 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i + NBT_DBG_FW("Sending firmware command %d\n", count); + init_completion(&info->fw_completion); + skb_queue_tail(&info->txq, skb); +- tasklet_schedule(&info->tx_task); ++ spin_lock_irqsave(&info->lock, flags); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ spin_unlock_irqrestore(&info->lock, flags); + + skb = skb_dequeue(fw_queue); + if (!skb) +@@ -120,7 +131,7 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i + hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); + + if (hci_h4p_wait_for_cts(info, 1, 100)) { +- dev_err(info->dev, "cts didn't go down after final speed change\n"); ++ dev_err(info->dev, "cts didn't deassert after final speed\n"); + return -ETIMEDOUT; + } + +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-ti.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw-ti.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-ti.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/fw-ti.c 2011-09-04 11:31:05.000000000 +0200 +@@ -1,7 +1,7 @@ + /* + * This file is part of hci_h4p bluetooth driver + * +- * Copyright (C) 2005, 2006 Nokia Corporation. ++ * Copyright (C) 2005-2008 Nokia Corporation. + * + * Contact: Ville Tervo + * +@@ -22,6 +22,7 @@ + */ + + #include ++#include + + #include "hci_h4p.h" + +@@ -55,10 +56,12 @@ ret: + complete(&info->fw_completion); + } + +-int hci_h4p_brf6150_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue) ++int hci_h4p_brf6150_send_fw(struct hci_h4p_info *info, ++ struct sk_buff_head *fw_queue) + { + struct sk_buff *skb; + int err = 0; ++ unsigned long flags; + + info->fw_error = 0; + +@@ -72,7 +75,10 @@ int hci_h4p_brf6150_send_fw(struct hci_h + + init_completion(&info->fw_completion); + skb_queue_tail(&info->txq, skb); +- tasklet_schedule(&info->tx_task); ++ spin_lock_irqsave(&info->lock, flags); ++ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | ++ UART_IER_THRI); ++ spin_unlock_irqrestore(&info->lock, flags); + + if (!wait_for_completion_timeout(&info->fw_completion, HZ)) { + dev_err(info->dev, "Timeout while sending brf6150 fw\n"); +@@ -80,7 +86,8 @@ int hci_h4p_brf6150_send_fw(struct hci_h + } + + if (info->fw_error) { +- dev_err(info->dev, "There was fw_error while sending bfr6150 fw\n"); ++ dev_err(info->dev, ++ "fw_error while sending bfr6150 fw\n"); + return -EPROTO; + } + } +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/hci_h4p.h kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/hci_h4p.h +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/hci_h4p.h 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/hci_h4p.h 2011-09-04 11:31:05.000000000 +0200 +@@ -1,7 +1,7 @@ + /* + * This file is part of hci_h4p bluetooth driver + * +- * Copyright (C) 2005, 2006 Nokia Corporation. ++ * Copyright (C) 2005-2008 Nokia Corporation. + * + * Contact: Ville Tervo + * +@@ -37,6 +37,13 @@ + #define UART_OMAP_SSR_WAKEUP 0x02 + #define UART_OMAP_SSR_TXFULL 0x01 + ++#define UART_OMAP_SYSC_IDLEMODE 0x03 ++#define UART_OMAP_SYSC_IDLEMASK (3 << UART_OMAP_SYSC_IDLEMODE) ++ ++#define UART_OMAP_SYSC_FORCE_IDLE (0 << UART_OMAP_SYSC_IDLEMODE) ++#define UART_OMAP_SYSC_NO_IDLE (1 << UART_OMAP_SYSC_IDLEMODE) ++#define UART_OMAP_SYSC_SMART_IDLE (2 << UART_OMAP_SYSC_IDLEMODE) ++ + #if 0 + #define NBT_DBG(fmt, arg...) printk("%s: " fmt "" , __FUNCTION__ , ## arg) + #else +@@ -74,6 +81,7 @@ + #endif + + struct hci_h4p_info { ++ struct timer_list lazy_release; + struct hci_dev *hdev; + spinlock_t lock; + +@@ -81,14 +89,12 @@ struct hci_h4p_info { + unsigned long uart_phys_base; + int irq; + struct device *dev; +- u8 bdaddr[6]; + u8 chip_type; + u8 bt_wakeup_gpio; + u8 host_wakeup_gpio; + u8 reset_gpio; + u8 bt_sysclk; + +- + struct sk_buff_head fw_queue; + struct sk_buff *alive_cmd_skb; + struct completion init_completion; +@@ -97,27 +103,35 @@ struct hci_h4p_info { + int init_error; + + struct sk_buff_head txq; +- struct tasklet_struct tx_task; + + struct sk_buff *rx_skb; + long rx_count; + unsigned long rx_state; + unsigned long garbage_bytes; +- struct tasklet_struct rx_task; + + int pm_enabled; +- int tx_pm_enabled; +- int rx_pm_enabled; +- struct timer_list tx_pm_timer; +- struct timer_list rx_pm_timer; ++ int tx_enabled; ++ int autorts; ++ int rx_enabled; + + int tx_clocks_en; + int rx_clocks_en; + spinlock_t clocks_lock; + struct clk *uart_iclk; + struct clk *uart_fclk; ++ atomic_t clk_users; ++ u16 dll; ++ u16 dlh; ++ u16 ier; ++ u16 mdr1; ++ u16 efr; + }; + ++struct hci_h4p_radio_hdr { ++ __u8 evt; ++ __u8 dlen; ++} __attribute__ ((packed)); ++ + #define MAX_BAUD_RATE 921600 + #define BC4_MAX_BAUD_RATE 3692300 + #define UART_CLOCK 48000000 +@@ -127,6 +141,7 @@ struct hci_h4p_info { + #define INIT_SPEED 120000 + + #define H4_TYPE_SIZE 1 ++#define H4_RADIO_HDR_SIZE 2 + + /* H4+ packet types */ + #define H4_CMD_PKT 0x01 +@@ -135,6 +150,7 @@ struct hci_h4p_info { + #define H4_EVT_PKT 0x04 + #define H4_NEG_PKT 0x06 + #define H4_ALIVE_PKT 0x07 ++#define H4_RADIO_PKT 0x08 + + /* TX states */ + #define WAIT_FOR_PKT_TYPE 1 +@@ -154,6 +170,11 @@ struct hci_bc4_set_bdaddr { + + int hci_h4p_send_alive_packet(struct hci_h4p_info *info); + ++void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, ++ struct sk_buff *skb); ++int hci_h4p_bcm_send_fw(struct hci_h4p_info *info, ++ struct sk_buff_head *fw_queue); ++ + void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, + struct sk_buff *skb); + int hci_h4p_bc4_send_fw(struct hci_h4p_info *info, +@@ -169,6 +190,7 @@ int hci_h4p_send_fw(struct hci_h4p_info + void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb); + + int hci_h4p_sysfs_create_files(struct device *dev); ++void hci_h4p_sysfs_remove_files(struct device *dev); + + void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val); + u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset); +@@ -178,6 +200,10 @@ void __hci_h4p_set_auto_ctsrts(struct hc + void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which); + void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed); + int hci_h4p_reset_uart(struct hci_h4p_info *info); +-int hci_h4p_init_uart(struct hci_h4p_info *info); ++void hci_h4p_init_uart(struct hci_h4p_info *info); ++void hci_h4p_enable_tx(struct hci_h4p_info *info); ++void hci_h4p_store_regs(struct hci_h4p_info *info); ++void hci_h4p_restore_regs(struct hci_h4p_info *info); ++void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable); + + #endif /* __DRIVERS_BLUETOOTH_HCI_H4P_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/Makefile kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/Makefile +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/Makefile 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/Makefile 2011-09-04 11:31:05.000000000 +0200 +@@ -4,4 +4,4 @@ + + obj-$(CONFIG_BT_HCIH4P) += hci_h4p.o + +-hci_h4p-objs := core.o fw.o uart.o sysfs.o fw-ti.o fw-csr.o ++hci_h4p-objs := core.o fw.o uart.o fw-ti.o fw-csr.o fw-bcm.o +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/sysfs.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/sysfs.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/sysfs.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/sysfs.c 1970-01-01 01:00:00.000000000 +0100 +@@ -1,74 +0,0 @@ +-/* +- * This file is part of hci_h4p bluetooth driver +- * +- * Copyright (C) 2005, 2006 Nokia Corporation. +- * +- * Contact: Ville Tervo +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License +- * version 2 as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +- * 02110-1301 USA +- * +- */ +- +-#include +-#include +-#include +-#include +- +-#include "hci_h4p.h" +- +-#ifdef CONFIG_SYSFS +- +-static ssize_t hci_h4p_store_bdaddr(struct device *dev, struct device_attribute *attr, +- const char *buf, size_t count) +-{ +- struct hci_h4p_info *info = (struct hci_h4p_info*)dev_get_drvdata(dev); +- unsigned int bdaddr[6]; +- int ret, i; +- +- ret = sscanf(buf, "%2x:%2x:%2x:%2x:%2x:%2x\n", +- &bdaddr[0], &bdaddr[1], &bdaddr[2], +- &bdaddr[3], &bdaddr[4], &bdaddr[5]); +- +- if (ret != 6) { +- return -EINVAL; +- } +- +- for (i = 0; i < 6; i++) +- info->bdaddr[i] = bdaddr[i] & 0xff; +- +- return count; +-} +- +-static ssize_t hci_h4p_show_bdaddr(struct device *dev, struct device_attribute *attr, +- char *buf) +-{ +- struct hci_h4p_info *info = (struct hci_h4p_info*)dev_get_drvdata(dev); +- +- return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", +- info->bdaddr[0], +- info->bdaddr[1], +- info->bdaddr[2], +- info->bdaddr[3], +- info->bdaddr[4], +- info->bdaddr[5]); +-} +- +-static DEVICE_ATTR(bdaddr, S_IRUGO | S_IWUSR, hci_h4p_show_bdaddr, hci_h4p_store_bdaddr); +-int hci_h4p_sysfs_create_files(struct device *dev) +-{ +- return device_create_file(dev, &dev_attr_bdaddr); +-} +- +-#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/uart.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/uart.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/uart.c 2011-09-04 11:32:10.013211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_h4p/uart.c 2011-09-04 11:31:05.000000000 +0200 +@@ -31,12 +31,12 @@ + + inline void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val) + { +- outb(val, info->uart_base + (offset << 2)); ++ __raw_writeb(val, info->uart_base + (offset << 2)); + } + + inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset) + { +- return inb(info->uart_base + (offset << 2)); ++ return __raw_readb(info->uart_base + (offset << 2)); + } + + void hci_h4p_set_rts(struct hci_h4p_info *info, int active) +@@ -54,14 +54,11 @@ void hci_h4p_set_rts(struct hci_h4p_info + int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, + int timeout_ms) + { +- int okay; + unsigned long timeout; ++ int state; + +- okay = 0; + timeout = jiffies + msecs_to_jiffies(timeout_ms); + for (;;) { +- int state; +- + state = hci_h4p_inb(info, UART_MSR) & UART_MSR_CTS; + if (active) { + if (state) +@@ -72,6 +69,7 @@ int hci_h4p_wait_for_cts(struct hci_h4p_ + } + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; ++ msleep(1); + } + } + +@@ -140,25 +138,60 @@ int hci_h4p_reset_uart(struct hci_h4p_in + return 0; + } + +-int hci_h4p_init_uart(struct hci_h4p_info *info) ++ ++void hci_h4p_store_regs(struct hci_h4p_info *info) ++{ ++ u16 lcr = 0; ++ ++ lcr = hci_h4p_inb(info, UART_LCR); ++ hci_h4p_outb(info, UART_LCR, 0xBF); ++ info->dll = hci_h4p_inb(info, UART_DLL); ++ info->dlh = hci_h4p_inb(info, UART_DLM); ++ info->efr = hci_h4p_inb(info, UART_EFR); ++ hci_h4p_outb(info, UART_LCR, lcr); ++ info->mdr1 = hci_h4p_inb(info, UART_OMAP_MDR1); ++ info->ier = hci_h4p_inb(info, UART_IER); ++} ++ ++void hci_h4p_restore_regs(struct hci_h4p_info *info) + { +- int err; ++ u16 lcr = 0; ++ ++ hci_h4p_init_uart(info); + +- err = hci_h4p_reset_uart(info); +- if (err < 0) +- return err; ++ hci_h4p_outb(info, UART_OMAP_MDR1, 7); ++ lcr = hci_h4p_inb(info, UART_LCR); ++ hci_h4p_outb(info, UART_LCR, 0xBF); ++ hci_h4p_outb(info, UART_DLL, info->dll); /* Set speed */ ++ hci_h4p_outb(info, UART_DLM, info->dlh); ++ hci_h4p_outb(info, UART_EFR, info->efr); ++ hci_h4p_outb(info, UART_LCR, lcr); ++ hci_h4p_outb(info, UART_OMAP_MDR1, info->mdr1); ++ hci_h4p_outb(info, UART_IER, info->ier); ++} ++ ++void hci_h4p_init_uart(struct hci_h4p_info *info) ++{ ++ u8 mcr, efr; + + /* Enable and setup FIFO */ +- hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8); +- hci_h4p_outb(info, UART_OMAP_MDR1, 0x00); /* Make sure UART mode is enabled */ +- hci_h4p_outb(info, UART_OMAP_SCR, 0x80); ++ hci_h4p_outb(info, UART_OMAP_MDR1, 0x00); ++ ++ hci_h4p_outb(info, UART_LCR, 0xbf); ++ efr = hci_h4p_inb(info, UART_EFR); + hci_h4p_outb(info, UART_EFR, UART_EFR_ECB); ++ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); ++ mcr = hci_h4p_inb(info, UART_MCR); + hci_h4p_outb(info, UART_MCR, UART_MCR_TCRTLR); +- hci_h4p_outb(info, UART_TI752_TLR, 0x1f); +- hci_h4p_outb(info, UART_TI752_TCR, 0xef); + hci_h4p_outb(info, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | +- UART_FCR_CLEAR_XMIT | UART_FCR_R_TRIG_00); ++ UART_FCR_CLEAR_XMIT | (3 << 6) | (0 << 4)); ++ hci_h4p_outb(info, UART_LCR, 0xbf); ++ hci_h4p_outb(info, UART_TI752_TLR, 0xed); ++ hci_h4p_outb(info, UART_TI752_TCR, 0xef); ++ hci_h4p_outb(info, UART_EFR, efr); ++ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); ++ hci_h4p_outb(info, UART_MCR, 0x00); ++ hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8); + hci_h4p_outb(info, UART_IER, UART_IER_RDI); +- +- return 0; ++ hci_h4p_outb(info, UART_OMAP_SYSC, (1 << 0) | (1 << 2) | (2 << 3)); + } +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ldisc.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_ldisc.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ldisc.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_ldisc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -46,11 +46,6 @@ + + #include "hci_uart.h" + +-#ifndef CONFIG_BT_HCIUART_DEBUG +-#undef BT_DBG +-#define BT_DBG( A... ) +-#endif +- + #define VERSION "2.2" + + static int reset = 0; +@@ -399,8 +394,8 @@ static int hci_uart_register_dev(struct + + hdev->owner = THIS_MODULE; + +- if (reset) +- set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); ++ if (!reset) ++ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ll.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_ll.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ll.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_ll.c 2011-09-04 11:31:05.000000000 +0200 +@@ -163,8 +163,7 @@ static int ll_close(struct hci_uart *hu) + skb_queue_purge(&ll->tx_wait_q); + skb_queue_purge(&ll->txq); + +- if (ll->rx_skb) +- kfree_skb(ll->rx_skb); ++ kfree_skb(ll->rx_skb); + + hu->priv = NULL; + +diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_vhci.c kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_vhci.c +--- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_vhci.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/bluetooth/hci_vhci.c 2011-09-04 11:31:05.000000000 +0200 +@@ -40,11 +40,6 @@ + #include + #include + +-#ifndef CONFIG_BT_HCIVHCI_DEBUG +-#undef BT_DBG +-#define BT_DBG(D...) +-#endif +- + #define VERSION "1.2" + + static int minor = MISC_DYNAMIC_MINOR; +diff -Nurp linux-omap-2.6.28-omap1/drivers/cpufreq/cpufreq_ondemand.c kernel-2.6.28-20093908+0m5/drivers/cpufreq/cpufreq_ondemand.c +--- linux-omap-2.6.28-omap1/drivers/cpufreq/cpufreq_ondemand.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/cpufreq/cpufreq_ondemand.c 2011-09-04 11:31:05.000000000 +0200 +@@ -21,6 +21,8 @@ + #include + #include + #include ++#include ++#include + + /* + * dbs is used in this file as a shortform for demandbased switching +@@ -540,6 +542,87 @@ static inline void dbs_timer_exit(struct + cancel_delayed_work(&dbs_info->work); + } + ++static void dbs_refresh_callback(struct work_struct *unused) ++{ ++ struct cpufreq_policy *policy; ++ struct cpu_dbs_info_s *this_dbs_info; ++ ++ this_dbs_info = &per_cpu(cpu_dbs_info, 0); ++ policy = this_dbs_info->cur_policy; ++ ++ __cpufreq_driver_target(policy, policy->max, ++ CPUFREQ_RELATION_L); ++ this_dbs_info->prev_cpu_idle = get_cpu_idle_time(0, ++ &this_dbs_info->prev_cpu_wall); ++} ++ ++static DECLARE_WORK(dbs_refresh_work, dbs_refresh_callback); ++ ++static void dbs_input_event(struct input_handle *handle, unsigned int type, ++ unsigned int code, int value) ++{ ++ struct cpufreq_policy *policy; ++ struct cpu_dbs_info_s *this_dbs_info; ++ ++ this_dbs_info = &per_cpu(cpu_dbs_info, 0); ++ policy = this_dbs_info->cur_policy; ++ ++ if (policy->cur < policy->max) { ++ policy->cur = policy->max; ++ schedule_work(&dbs_refresh_work); ++ } ++} ++ ++static int dbs_input_connect(struct input_handler *handler, ++ struct input_dev *dev, const struct input_device_id *id) ++{ ++ struct input_handle *handle; ++ int error; ++ ++ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); ++ if (!handle) ++ return -ENOMEM; ++ ++ handle->dev = dev; ++ handle->handler = handler; ++ handle->name = "cpufreq"; ++ ++ error = input_register_handle(handle); ++ if (error) ++ goto err1; ++ ++ error = input_open_device(handle); ++ if (error) ++ goto err2; ++ ++ return 0; ++err1: ++ input_unregister_handle(handle); ++err2: ++ kfree(handle); ++ return error; ++} ++ ++static void dbs_input_disconnect(struct input_handle *handle) ++{ ++ input_close_device(handle); ++ input_unregister_handle(handle); ++ kfree(handle); ++} ++ ++static const struct input_device_id dbs_ids[] = { ++ { .driver_info = 1 }, ++ { }, ++}; ++ ++static struct input_handler dbs_input_handler = { ++ .event = dbs_input_event, ++ .connect = dbs_input_connect, ++ .disconnect = dbs_input_disconnect, ++ .name = "cpufreq_ond", ++ .id_table = dbs_ids, ++}; ++ + static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) + { +@@ -597,7 +680,7 @@ static int cpufreq_governor_dbs(struct c + dbs_tuners_ins.sampling_rate = def_sampling_rate; + } + dbs_timer_init(this_dbs_info); +- ++ rc = input_register_handler(&dbs_input_handler); + mutex_unlock(&dbs_mutex); + break; + +@@ -606,6 +689,7 @@ static int cpufreq_governor_dbs(struct c + dbs_timer_exit(this_dbs_info); + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + dbs_enable--; ++ input_unregister_handler(&dbs_input_handler); + mutex_unlock(&dbs_mutex); + + break; +diff -Nurp linux-omap-2.6.28-omap1/drivers/cpuidle/governors/menu.c kernel-2.6.28-20093908+0m5/drivers/cpuidle/governors/menu.c +--- linux-omap-2.6.28-omap1/drivers/cpuidle/governors/menu.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/cpuidle/governors/menu.c 2011-09-04 11:31:05.000000000 +0200 +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + #define BREAK_FUZZ 4 /* 4 us */ + +@@ -36,6 +37,8 @@ static int menu_select(struct cpuidle_de + struct menu_device *data = &__get_cpu_var(menu_devices); + int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); + int i; ++ int device_not_idle; ++ struct timespec t; + + /* Special case when user has set very strict latency requirement */ + if (unlikely(latency_req == 0)) { +@@ -44,8 +47,11 @@ static int menu_select(struct cpuidle_de + } + + /* determine the expected residency time */ ++ t = ktime_to_timespec(tick_nohz_get_sleep_length()); + data->expected_us = +- (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; ++ t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; ++ ++ device_not_idle = !pm_check_idle(); + + /* find the deepest idle state that satisfies our constraints */ + for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { +@@ -53,7 +59,8 @@ static int menu_select(struct cpuidle_de + + if (s->target_residency > data->expected_us) + break; +- if (s->target_residency > data->predicted_us) ++ if (device_not_idle && ++ s->target_residency > data->predicted_us) + break; + if (s->exit_latency > latency_req) + break; +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/cload.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/cload.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/cload.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/cload.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1851 @@ ++/* ++ * cload.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#include "header.h" ++ ++#include "module_list.h" ++#define LINKER_MODULES_HEADER ("_" MODULES_HEADER) ++ ++/* ++ * we use the fact that DOFF section records are shaped just like ++ * LDR_SECTION_INFO to reduce our section storage usage. This macro marks ++ * the places where that assumption is made ++ */ ++#define DOFFSEC_IS_LDRSEC(pdoffsec) ((struct LDR_SECTION_INFO *)(pdoffsec)) ++ ++/* ++ * forward references ++ */ ++static void dload_symbols(struct dload_state *dlthis); ++static void dload_data(struct dload_state *dlthis); ++static void allocate_sections(struct dload_state *dlthis); ++static void string_table_free(struct dload_state *dlthis); ++static void symbol_table_free(struct dload_state *dlthis); ++static void section_table_free(struct dload_state *dlthis); ++static void init_module_handle(struct dload_state *dlthis); ++#if BITS_PER_AU > BITS_PER_BYTE ++static char *unpack_name(struct dload_state *dlthis, u32 soffset); ++#endif ++ ++static const char CINITNAME[] = { ".cinit" }; ++static const char LOADER_DLLVIEW_ROOT[] = { "?DLModules?" }; ++ ++/* ++ * Error strings ++ */ ++static const char E_READSTRM[] = { "Error reading %s from input stream" }; ++static const char E_ALLOC[] = { "Syms->Allocate( %d ) failed" }; ++static const char E_TGTALLOC[] = ++ { "Target memory allocate failed, section %s size " FMT_UI32 }; ++static const char E_INITFAIL[] = { "%s to target address " FMT_UI32 " failed" }; ++static const char E_DLVWRITE[] = { "Write to DLLview list failed" }; ++static const char E_ICONNECT[] = { "Connect call to init interface failed" }; ++static const char E_CHECKSUM[] = { "Checksum failed on %s" }; ++ ++/************************************************************************* ++ * Procedure dload_error ++ * ++ * Parameters: ++ * errtxt description of the error, printf style ++ * ... additional information ++ * ++ * Effect: ++ * Reports or records the error as appropriate. ++ ************************************************************************/ ++void dload_error(struct dload_state *dlthis, const char *errtxt, ...) ++{ ++ va_list args; ++ ++ va_start(args, errtxt); ++ dlthis->mysym->Error_Report(dlthis->mysym, errtxt, args); ++ va_end(args); ++ dlthis->dload_errcount += 1; ++ ++} /* dload_error */ ++ ++#define DL_ERROR(zza, zzb) dload_error(dlthis, zza, zzb) ++ ++/************************************************************************* ++ * Procedure dload_syms_error ++ * ++ * Parameters: ++ * errtxt description of the error, printf style ++ * ... additional information ++ * ++ * Effect: ++ * Reports or records the error as appropriate. ++ ************************************************************************/ ++void dload_syms_error(struct Dynamic_Loader_Sym *syms, const char *errtxt, ...) ++{ ++ va_list args; ++ ++ va_start(args, errtxt); ++ syms->Error_Report(syms, errtxt, args); ++ va_end(args); ++} ++ ++/************************************************************************* ++ * Procedure Dynamic_Load_Module ++ * ++ * Parameters: ++ * module The input stream that supplies the module image ++ * syms Host-side symbol table and malloc/free functions ++ * alloc Target-side memory allocation ++ * init Target-side memory initialization ++ * options Option flags DLOAD_* ++ * mhandle A module handle for use with Dynamic_Unload ++ * ++ * Effect: ++ * The module image is read using *module. Target storage for the new ++ * image is ++ * obtained from *alloc. Symbols defined and referenced by the module are ++ * managed using *syms. The image is then relocated and references ++ * resolved as necessary, and the resulting executable bits are placed ++ * into target memory using *init. ++ * ++ * Returns: ++ * On a successful load, a module handle is placed in *mhandle, ++ * and zero is returned. On error, the number of errors detected is ++ * returned. Individual errors are reported during the load process ++ * using syms->Error_Report(). ++ ***********************************************************************/ ++int Dynamic_Load_Module(struct Dynamic_Loader_Stream *module, ++ struct Dynamic_Loader_Sym *syms , ++ struct Dynamic_Loader_Allocate *alloc, ++ struct Dynamic_Loader_Initialize *init, ++ unsigned options, DLOAD_mhandle *mhandle) ++{ ++ register unsigned *dp, sz; ++ struct dload_state dl_state; /* internal state for this call */ ++ ++ /* blast our internal state */ ++ dp = (unsigned *)&dl_state; ++ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1) ++ *dp++ = 0; ++ ++ /* Enable _only_ BSS initialization if enabled by user */ ++ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS) ++ dl_state.myoptions = DLOAD_INITBSS; ++ ++ /* Check that mandatory arguments are present */ ++ if (!module || !syms) { ++ dload_error(&dl_state, "Required parameter is NULL"); ++ } else { ++ dl_state.strm = module; ++ dl_state.mysym = syms; ++ dload_headers(&dl_state); ++ if (!dl_state.dload_errcount) ++ dload_strings(&dl_state, false); ++ if (!dl_state.dload_errcount) ++ dload_sections(&dl_state); ++ ++ if (init && !dl_state.dload_errcount) { ++ if (init->connect(init)) { ++ dl_state.myio = init; ++ dl_state.myalloc = alloc; ++ /* do now, before reducing symbols */ ++ allocate_sections(&dl_state); ++ } else ++ dload_error(&dl_state, E_ICONNECT); ++ } ++ ++ if (!dl_state.dload_errcount) { ++ /* fix up entry point address */ ++ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1; ++ if (sref < dl_state.allocated_secn_count) ++ dl_state.dfile_hdr.df_entrypt += ++ dl_state.ldr_sections[sref].run_addr; ++ ++ dload_symbols(&dl_state); ++ } ++ ++ if (init && !dl_state.dload_errcount) ++ dload_data(&dl_state); ++ ++ init_module_handle(&dl_state); ++ ++ /* dl_state.myio is init or 0 at this point. */ ++ if (dl_state.myio) { ++ if ((!dl_state.dload_errcount) && ++ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) && ++ (!init->execute(init, ++ dl_state.dfile_hdr.df_entrypt))) ++ dload_error(&dl_state, ++ "Init->Execute Failed"); ++ init->release(init); ++ } ++ ++ symbol_table_free(&dl_state); ++ section_table_free(&dl_state); ++ string_table_free(&dl_state); ++ ++ if (dl_state.dload_errcount) { ++ Dynamic_Unload_Module(dl_state.myhandle, syms, alloc, ++ init); ++ dl_state.myhandle = NULL; ++ } ++ } ++ ++ if (mhandle) ++ *mhandle = dl_state.myhandle; /* give back the handle */ ++ ++ return dl_state.dload_errcount; ++} /* DLOAD_File */ ++ ++/************************************************************************* ++ * Procedure Dynamic_Open_Module ++ * ++ * Parameters: ++ * module The input stream that supplies the module image ++ * syms Host-side symbol table and malloc/free functions ++ * alloc Target-side memory allocation ++ * init Target-side memory initialization ++ * options Option flags DLOAD_* ++ * mhandle A module handle for use with Dynamic_Unload ++ * ++ * Effect: ++ * The module image is read using *module. Target storage for the new ++ * image is ++ * obtained from *alloc. Symbols defined and referenced by the module are ++ * managed using *syms. The image is then relocated and references ++ * resolved as necessary, and the resulting executable bits are placed ++ * into target memory using *init. ++ * ++ * Returns: ++ * On a successful load, a module handle is placed in *mhandle, ++ * and zero is returned. On error, the number of errors detected is ++ * returned. Individual errors are reported during the load process ++ * using syms->Error_Report(). ++ ***********************************************************************/ ++int ++Dynamic_Open_Module(struct Dynamic_Loader_Stream *module, ++ struct Dynamic_Loader_Sym *syms, ++ struct Dynamic_Loader_Allocate *alloc, ++ struct Dynamic_Loader_Initialize *init, ++ unsigned options, DLOAD_mhandle *mhandle) ++{ ++ register unsigned *dp, sz; ++ struct dload_state dl_state; /* internal state for this call */ ++ ++ /* blast our internal state */ ++ dp = (unsigned *)&dl_state; ++ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1) ++ *dp++ = 0; ++ ++ /* Enable _only_ BSS initialization if enabled by user */ ++ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS) ++ dl_state.myoptions = DLOAD_INITBSS; ++ ++ /* Check that mandatory arguments are present */ ++ if (!module || !syms) { ++ dload_error(&dl_state, "Required parameter is NULL"); ++ } else { ++ dl_state.strm = module; ++ dl_state.mysym = syms; ++ dload_headers(&dl_state); ++ if (!dl_state.dload_errcount) ++ dload_strings(&dl_state, false); ++ if (!dl_state.dload_errcount) ++ dload_sections(&dl_state); ++ ++ if (init && !dl_state.dload_errcount) { ++ if (init->connect(init)) { ++ dl_state.myio = init; ++ dl_state.myalloc = alloc; ++ /* do now, before reducing symbols */ ++ allocate_sections(&dl_state); ++ } else ++ dload_error(&dl_state, E_ICONNECT); ++ } ++ ++ if (!dl_state.dload_errcount) { ++ /* fix up entry point address */ ++ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1; ++ if (sref < dl_state.allocated_secn_count) ++ dl_state.dfile_hdr.df_entrypt += ++ dl_state.ldr_sections[sref].run_addr; ++ ++ dload_symbols(&dl_state); ++ } ++ ++ init_module_handle(&dl_state); ++ ++ /* dl_state.myio is either 0 or init at this point. */ ++ if (dl_state.myio) { ++ if ((!dl_state.dload_errcount) && ++ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) && ++ (!init->execute(init, ++ dl_state.dfile_hdr.df_entrypt))) ++ dload_error(&dl_state, ++ "Init->Execute Failed"); ++ init->release(init); ++ } ++ ++ symbol_table_free(&dl_state); ++ section_table_free(&dl_state); ++ string_table_free(&dl_state); ++ ++ if (dl_state.dload_errcount) { ++ Dynamic_Unload_Module(dl_state.myhandle, syms, alloc, ++ init); ++ dl_state.myhandle = NULL; ++ } ++ } ++ ++ if (mhandle) ++ *mhandle = dl_state.myhandle; /* give back the handle */ ++ ++ return dl_state.dload_errcount; ++} /* DLOAD_File */ ++ ++/************************************************************************* ++ * Procedure dload_headers ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Loads the DOFF header and verify record. Deals with any byte-order ++ * issues and checks them for validity. ++ ************************************************************************/ ++#define COMBINED_HEADER_SIZE (sizeof(struct doff_filehdr_t)+ \ ++ sizeof(struct doff_verify_rec_t)) ++ ++void dload_headers(struct dload_state *dlthis) ++{ ++ u32 map; ++ ++ /* Read the header and the verify record as one. If we don't get it ++ all, we're done */ ++ if (dlthis->strm->read_buffer(dlthis->strm, &dlthis->dfile_hdr, ++ COMBINED_HEADER_SIZE) != COMBINED_HEADER_SIZE) { ++ DL_ERROR(E_READSTRM, "File Headers"); ++ return; ++ } ++ /* ++ * Verify that we have the byte order of the file correct. ++ * If not, must fix it before we can continue ++ */ ++ map = REORDER_MAP(dlthis->dfile_hdr.df_byte_reshuffle); ++ if (map != REORDER_MAP(BYTE_RESHUFFLE_VALUE)) { ++ /* input is either byte-shuffled or bad */ ++ if ((map & 0xFCFCFCFC) == 0) { /* no obviously bogus bits */ ++ dload_reorder(&dlthis->dfile_hdr, COMBINED_HEADER_SIZE, ++ map); ++ } ++ if (dlthis->dfile_hdr.df_byte_reshuffle != ++ BYTE_RESHUFFLE_VALUE) { ++ /* didn't fix the problem, the byte swap map is bad */ ++ dload_error(dlthis, ++ "Bad byte swap map " FMT_UI32 " in header", ++ dlthis->dfile_hdr.df_byte_reshuffle); ++ return; ++ } ++ dlthis->reorder_map = map; /* keep map for future use */ ++ } ++ ++ /* ++ * Verify checksum of header and verify record ++ */ ++ if (~dload_checksum(&dlthis->dfile_hdr, ++ sizeof(struct doff_filehdr_t)) || ++ ~dload_checksum(&dlthis->verify, ++ sizeof(struct doff_verify_rec_t))) { ++ DL_ERROR(E_CHECKSUM, "header or verify record"); ++ return; ++ } ++#if HOST_ENDIANNESS ++ dlthis->dfile_hdr.df_byte_reshuffle = map; /* put back for later */ ++#endif ++ ++ /* Check for valid target ID */ ++ if ((dlthis->dfile_hdr.df_target_id != TARGET_ID) && ++ -(dlthis->dfile_hdr.df_target_id != TMS470_ID)) { ++ dload_error(dlthis, "Bad target ID 0x%x and TARGET_ID 0x%x", ++ dlthis->dfile_hdr.df_target_id, TARGET_ID); ++ return; ++ } ++ /* Check for valid file format */ ++ if ((dlthis->dfile_hdr.df_doff_version != DOFF0)) { ++ dload_error(dlthis, "Bad DOFF version 0x%x", ++ dlthis->dfile_hdr.df_doff_version); ++ return; ++ } ++ ++ /* ++ * Apply reasonableness checks to count fields ++ */ ++ if (dlthis->dfile_hdr.df_strtab_size > MAX_REASONABLE_STRINGTAB) { ++ dload_error(dlthis, "Excessive string table size " FMT_UI32, ++ dlthis->dfile_hdr.df_strtab_size); ++ return; ++ } ++ if (dlthis->dfile_hdr.df_no_scns > MAX_REASONABLE_SECTIONS) { ++ dload_error(dlthis, "Excessive section count 0x%x", ++ dlthis->dfile_hdr.df_no_scns); ++ return; ++ } ++#ifndef TARGET_ENDIANNESS ++ /* ++ * Check that endianness does not disagree with explicit specification ++ */ ++ if ((dlthis->dfile_hdr.df_flags >> ALIGN_COFF_ENDIANNESS) & ++ dlthis->myoptions & ENDIANNESS_MASK) { ++ dload_error(dlthis, ++ "Input endianness disagrees with specified option"); ++ return; ++ } ++ dlthis->big_e_target = dlthis->dfile_hdr.df_flags & DF_BIG; ++#endif ++ ++} /* dload_headers */ ++ ++/* COFF Section Processing ++ * ++ * COFF sections are read in and retained intact. Each record is embedded ++ * in a new structure that records the updated load and ++ * run addresses of the section */ ++ ++static const char SECN_ERRID[] = { "section" }; ++ ++/************************************************************************* ++ * Procedure dload_sections ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Loads the section records into an internal table. ++ ************************************************************************/ ++void ++dload_sections(struct dload_state *dlthis) ++{ ++ s16 siz; ++ struct doff_scnhdr_t *shp; ++ unsigned nsecs = dlthis->dfile_hdr.df_no_scns; ++ ++ /* allocate space for the DOFF section records */ ++ siz = nsecs * sizeof(struct doff_scnhdr_t); ++ shp = (struct doff_scnhdr_t *)dlthis->mysym->Allocate(dlthis->mysym, ++ siz); ++ if (!shp) { /* not enough storage */ ++ DL_ERROR(E_ALLOC, siz); ++ return; ++ } ++ dlthis->sect_hdrs = shp; ++ ++ /* read in the section records */ ++ if (dlthis->strm->read_buffer(dlthis->strm, shp, siz) != siz) { ++ DL_ERROR(E_READSTRM, SECN_ERRID); ++ return; ++ } ++ ++ /* if we need to fix up byte order, do it now */ ++ if (dlthis->reorder_map) ++ dload_reorder(shp, siz, dlthis->reorder_map); ++ ++ /* check for validity */ ++ if (~dload_checksum(dlthis->sect_hdrs, siz) != ++ dlthis->verify.dv_scn_rec_checksum) { ++ DL_ERROR(E_CHECKSUM, SECN_ERRID); ++ return; ++ } ++ ++} /* dload_sections */ ++ ++/***************************************************************************** ++ * Procedure allocate_sections ++ * ++ * Parameters: ++ * alloc target memory allocator class ++ * ++ * Effect: ++ * Assigns new (target) addresses for sections ++ *****************************************************************************/ ++static void allocate_sections(struct dload_state *dlthis) ++{ ++ u16 curr_sect, nsecs, siz; ++ struct doff_scnhdr_t *shp; ++ struct LDR_SECTION_INFO *asecs; ++ struct my_handle *hndl; ++ nsecs = dlthis->dfile_hdr.df_no_scns; ++ if (!nsecs) ++ return; ++ if ((dlthis->myalloc == NULL) && ++ (dlthis->dfile_hdr.df_target_scns > 0)) { ++ DL_ERROR("Arg 3 (alloc) required but NULL", 0); ++ return; ++ } ++ /* allocate space for the module handle, which we will ++ * keep for unload purposes */ ++ siz = dlthis->dfile_hdr.df_target_scns * ++ sizeof(struct LDR_SECTION_INFO) + MY_HANDLE_SIZE; ++ hndl = (struct my_handle *)dlthis->mysym->Allocate(dlthis->mysym, siz); ++ if (!hndl) { /* not enough storage */ ++ DL_ERROR(E_ALLOC, siz); ++ return; ++ } ++ /* initialize the handle header */ ++ hndl->dm.hnext = hndl->dm.hprev = hndl; /* circular list */ ++ hndl->dm.hroot = NULL; ++ hndl->dm.dbthis = 0; ++ dlthis->myhandle = hndl; /* save away for return */ ++ /* pointer to the section list of allocated sections */ ++ dlthis->ldr_sections = asecs = hndl->secns; ++ /* * Insert names into all sections, make copies of ++ the sections we allocate */ ++ shp = dlthis->sect_hdrs; ++ for (curr_sect = 0; curr_sect < nsecs; curr_sect++) { ++ u32 soffset = shp->ds_offset; ++#if BITS_PER_AU <= BITS_PER_BYTE ++ /* attempt to insert the name of this section */ ++ if (soffset < dlthis->dfile_hdr.df_strtab_size) ++ DOFFSEC_IS_LDRSEC(shp)->name = dlthis->str_head + ++ soffset; ++ else { ++ dload_error(dlthis, "Bad name offset in section %d", ++ curr_sect); ++ DOFFSEC_IS_LDRSEC(shp)->name = NULL; ++ } ++#endif ++ /* allocate target storage for sections that require it */ ++ if (DS_NEEDS_ALLOCATION(shp)) { ++ *asecs = *DOFFSEC_IS_LDRSEC(shp); ++ asecs->context = 0; /* zero the context field */ ++#if BITS_PER_AU > BITS_PER_BYTE ++ asecs->name = unpack_name(dlthis, soffset); ++ dlthis->debug_string_size = soffset + dlthis->temp_len; ++#else ++ dlthis->debug_string_size = soffset; ++#endif ++ if (dlthis->myalloc != NULL) { ++ if (!dlthis->myalloc->Allocate(dlthis->myalloc, asecs, ++ DS_ALIGNMENT(asecs->type))) { ++ dload_error(dlthis, E_TGTALLOC, asecs->name, ++ asecs->size); ++ return; ++ } ++ } ++ /* keep address deltas in original section table */ ++ shp->ds_vaddr = asecs->load_addr - shp->ds_vaddr; ++ shp->ds_paddr = asecs->run_addr - shp->ds_paddr; ++ dlthis->allocated_secn_count += 1; ++ } /* allocate target storage */ ++ shp += 1; ++ asecs += 1; ++ } ++#if BITS_PER_AU <= BITS_PER_BYTE ++ dlthis->debug_string_size += ++ strlen(dlthis->str_head + dlthis->debug_string_size) + 1; ++#endif ++} /* allocate sections */ ++ ++/************************************************************************* ++ * Procedure section_table_free ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Frees any state used by the symbol table. ++ * ++ * WARNING: ++ * This routine is not allowed to declare errors! ++ ************************************************************************/ ++static void section_table_free(struct dload_state *dlthis) ++{ ++ struct doff_scnhdr_t *shp; ++ ++ shp = dlthis->sect_hdrs; ++ if (shp) ++ dlthis->mysym->Deallocate(dlthis->mysym, shp); ++ ++} /* section_table_free */ ++ ++/************************************************************************* ++ * Procedure dload_strings ++ * ++ * Parameters: ++ * sec_names_only If true only read in the "section names" ++ * portion of the string table ++ * ++ * Effect: ++ * Loads the DOFF string table into memory. DOFF keeps all strings in a ++ * big unsorted array. We just read that array into memory in bulk. ++ ************************************************************************/ ++static const char S_STRINGTBL[] = { "string table" }; ++void dload_strings(struct dload_state *dlthis, boolean sec_names_only) ++{ ++ u32 ssiz; ++ char *strbuf; ++ ++ if (sec_names_only) { ++ ssiz = BYTE_TO_HOST(DOFF_ALIGN ++ (dlthis->dfile_hdr.df_scn_name_size)); ++ } else { ++ ssiz = BYTE_TO_HOST(DOFF_ALIGN ++ (dlthis->dfile_hdr.df_strtab_size)); ++ } ++ if (ssiz == 0) ++ return; ++ ++ /* get some memory for the string table */ ++#if BITS_PER_AU > BITS_PER_BYTE ++ strbuf = (char *)dlthis->mysym->Allocate(dlthis->mysym, ssiz + ++ dlthis->dfile_hdr.df_max_str_len); ++#else ++ strbuf = (char *)dlthis->mysym->Allocate(dlthis->mysym, ssiz); ++#endif ++ if (strbuf == NULL) { ++ DL_ERROR(E_ALLOC, ssiz); ++ return; ++ } ++ dlthis->str_head = strbuf; ++#if BITS_PER_AU > BITS_PER_BYTE ++ dlthis->str_temp = strbuf + ssiz; ++#endif ++ /* read in the strings and verify them */ ++ if ((unsigned)(dlthis->strm->read_buffer(dlthis->strm, strbuf, ++ ssiz)) != ssiz) { ++ DL_ERROR(E_READSTRM, S_STRINGTBL); ++ } ++ /* if we need to fix up byte order, do it now */ ++#ifndef _BIG_ENDIAN ++ if (dlthis->reorder_map) ++ dload_reorder(strbuf, ssiz, dlthis->reorder_map); ++ ++ if ((!sec_names_only) && (~dload_checksum(strbuf, ssiz) != ++ dlthis->verify.dv_str_tab_checksum)) { ++ DL_ERROR(E_CHECKSUM, S_STRINGTBL); ++ } ++#else ++ if (dlthis->dfile_hdr.df_byte_reshuffle != ++ HOST_BYTE_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) { ++ /* put strings in big-endian order, not in PC order */ ++ dload_reorder(strbuf, ssiz, HOST_BYTE_ORDER(dlthis->dfile_hdr. ++ df_byte_reshuffle)); ++ } ++ if ((!sec_names_only) && (~dload_reverse_checksum(strbuf, ssiz) != ++ dlthis->verify.dv_str_tab_checksum)) { ++ DL_ERROR(E_CHECKSUM, S_STRINGTBL); ++ } ++#endif ++} /* dload_strings */ ++ ++/************************************************************************* ++ * Procedure string_table_free ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Frees any state used by the string table. ++ * ++ * WARNING: ++ * This routine is not allowed to declare errors! ++ *************************************************************************/ ++static void string_table_free(struct dload_state *dlthis) ++{ ++ if (dlthis->str_head) ++ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->str_head); ++ ++} /* string_table_free */ ++ ++/* ++ * Symbol Table Maintenance Functions ++ * ++ * COFF symbols are read by dload_symbols(), which is called after ++ * sections have been allocated. Symbols which might be used in ++ * relocation (ie, not debug info) are retained in an internal temporary ++ * compressed table (type Local_Symbol). A particular symbol is recovered ++ * by index by calling dload_find_symbol(). dload_find_symbol ++ * reconstructs a more explicit representation (type SLOTVEC) which is ++ * used by reloc.c ++ */ ++/* real size of debug header */ ++#define DBG_HDR_SIZE (sizeof(struct dll_module) - sizeof(struct dll_sect)) ++ ++static const char SYM_ERRID[] = { "symbol" }; ++ ++/************************************************************************** ++ * Procedure dload_symbols ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Reads in symbols and retains ones that might be needed for relocation ++ * purposes. ++ ************************************************************************/ ++/* size of symbol buffer no bigger than target data buffer, to limit stack ++ * usage*/ ++#define MY_SYM_BUF_SIZ (BYTE_TO_HOST(IMAGE_PACKET_SIZE)/\ ++ sizeof(struct doff_syment_t)) ++ ++static void dload_symbols(struct dload_state *dlthis) ++{ ++ u32 s_count, siz, dsiz, symbols_left; ++ u32 checks; ++ struct Local_Symbol *sp; ++ struct dynload_symbol *symp; ++ struct dynload_symbol *newsym; ++ ++ s_count = dlthis->dfile_hdr.df_no_syms; ++ if (s_count == 0) ++ return; ++ ++ /* We keep a local symbol table for all of the symbols in the input. ++ * This table contains only section & value info, as we do not have ++ * to do any name processing for locals. We reuse this storage ++ * as a temporary for .dllview record construction. ++ * Allocate storage for the whole table.*/ ++ siz = s_count * sizeof(struct Local_Symbol); ++ dsiz = DBG_HDR_SIZE + ++ (sizeof(struct dll_sect) * dlthis->allocated_secn_count) + ++ BYTE_TO_HOST_ROUND(dlthis->debug_string_size + 1); ++ if (dsiz > siz) ++ siz = dsiz; /* larger of symbols and .dllview temp */ ++ sp = (struct Local_Symbol *)dlthis->mysym->Allocate(dlthis->mysym, siz); ++ if (!sp) { ++ DL_ERROR(E_ALLOC, siz); ++ return; ++ } ++ dlthis->local_symtab = sp; ++ /* Read the symbols in the input, store them in the table, and post any ++ * globals to the global symbol table. In the process, externals ++ become defined from the global symbol table */ ++ checks = dlthis->verify.dv_sym_tab_checksum; ++ symbols_left = s_count; ++ do { /* read all symbols */ ++ char *sname; ++ u32 val; ++ s32 delta; ++ struct doff_syment_t *input_sym; ++ unsigned syms_in_buf; ++ struct doff_syment_t my_sym_buf[MY_SYM_BUF_SIZ]; ++ input_sym = my_sym_buf; ++ syms_in_buf = symbols_left > MY_SYM_BUF_SIZ ? ++ MY_SYM_BUF_SIZ : symbols_left; ++ siz = syms_in_buf * sizeof(struct doff_syment_t); ++ if (dlthis->strm->read_buffer(dlthis->strm, input_sym, siz) != ++ siz) { ++ DL_ERROR(E_READSTRM, SYM_ERRID); ++ return; ++ } ++ if (dlthis->reorder_map) ++ dload_reorder(input_sym, siz, dlthis->reorder_map); ++ ++ checks += dload_checksum(input_sym, siz); ++ do { /* process symbols in buffer */ ++ symbols_left -= 1; ++ /* attempt to derive the name of this symbol */ ++ sname = NULL; ++ if (input_sym->dn_offset > 0) { ++#if BITS_PER_AU <= BITS_PER_BYTE ++ if ((u32) input_sym->dn_offset < ++ dlthis->dfile_hdr.df_strtab_size) ++ sname = dlthis->str_head + ++ BYTE_TO_HOST(input_sym->dn_offset); ++ else ++ dload_error(dlthis, ++ "Bad name offset in symbol %d", ++ symbols_left); ++#else ++ sname = unpack_name(dlthis, ++ input_sym->dn_offset); ++#endif ++ } ++ val = input_sym->dn_value; ++ delta = 0; ++ sp->sclass = input_sym->dn_sclass; ++ sp->secnn = input_sym->dn_scnum; ++ /* if this is an undefined symbol, ++ * define it (or fail) now */ ++ if (sp->secnn == DN_UNDEF) { ++ /* pointless for static undefined */ ++ if (input_sym->dn_sclass != DN_EXT) ++ goto loop_cont; ++ ++ /* try to define symbol from previously ++ * loaded images */ ++ symp = dlthis->mysym->Find_Matching_Symbol ++ (dlthis->mysym, sname); ++ if (!symp) { ++ DL_ERROR ++ ("Undefined external symbol %s", ++ sname); ++ goto loop_cont; ++ } ++ val = delta = symp->value; ++ goto loop_cont; ++ } ++ /* symbol defined by this module */ ++ if (sp->secnn > 0) { /* symbol references a section */ ++ if ((unsigned)sp->secnn <= ++ dlthis->allocated_secn_count) { ++ /* section was allocated */ ++ struct doff_scnhdr_t *srefp = ++ &dlthis->sect_hdrs ++ [sp->secnn - 1]; ++ ++ if (input_sym->dn_sclass == ++ DN_STATLAB || ++ input_sym->dn_sclass == DN_EXTLAB){ ++ /* load */ ++ delta = srefp->ds_vaddr; ++ } else { ++ /* run */ ++ delta = srefp->ds_paddr; ++ } ++ val += delta; ++ } ++ goto loop_itr; ++ } ++ /* This symbol is an absolute symbol */ ++ if (sp->secnn == DN_ABS && ((sp->sclass == DN_EXT) || ++ (sp->sclass == DN_EXTLAB))) { ++ symp = dlthis->mysym->Find_Matching_Symbol ++ (dlthis->mysym, sname); ++ if (!symp) ++ goto loop_itr; ++ /* This absolute symbol is already defined. */ ++ if (symp->value == input_sym->dn_value) { ++ /* If symbol values are equal, continue ++ * but don't add to the global symbol ++ * table */ ++ sp->value = val; ++ sp->delta = delta; ++ sp += 1; ++ input_sym += 1; ++ continue; ++ } else { ++ /* If symbol values are not equal, ++ * return with redefinition error */ ++ DL_ERROR("Absolute symbol %s is " ++ "defined multiple times with " ++ "different values", sname); ++ return; ++ } ++ } ++loop_itr: ++ /* if this is a global symbol, post it to the ++ * global table */ ++ if (input_sym->dn_sclass == DN_EXT || ++ input_sym->dn_sclass == DN_EXTLAB) { ++ /* Keep this global symbol for subsequent ++ * modules. Don't complain on error, to allow ++ * symbol API to suppress global symbols */ ++ if (!sname) ++ goto loop_cont; ++ ++ newsym = dlthis->mysym->Add_To_Symbol_Table ++ (dlthis->mysym, sname, ++ (unsigned)dlthis->myhandle); ++ if (newsym) ++ newsym->value = val; ++ ++ } /* global */ ++loop_cont: ++ sp->value = val; ++ sp->delta = delta; ++ sp += 1; ++ input_sym += 1; ++ } while ((syms_in_buf -= 1) > 0); /* process sym in buffer */ ++ } while (symbols_left > 0); /* read all symbols */ ++ if (~checks) ++ dload_error(dlthis, "Checksum of symbols failed"); ++ ++} /* dload_symbols */ ++ ++/***************************************************************************** ++ * Procedure symbol_table_free ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Frees any state used by the symbol table. ++ * ++ * WARNING: ++ * This routine is not allowed to declare errors! ++ *****************************************************************************/ ++static void symbol_table_free(struct dload_state *dlthis) ++{ ++ if (dlthis->local_symtab) { ++ if (dlthis->dload_errcount) { /* blow off our symbols */ ++ dlthis->mysym->Purge_Symbol_Table(dlthis->mysym, ++ (unsigned)dlthis->myhandle); ++ } ++ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->local_symtab); ++ } ++} /* symbol_table_free */ ++ ++/* .cinit Processing ++ * ++ * The dynamic loader does .cinit interpretation. cload_cinit() ++ * acts as a special write-to-target function, in that it takes relocated ++ * data from the normal data flow, and interprets it as .cinit actions. ++ * Because the normal data flow does not necessarily process the whole ++ * .cinit section in one buffer, cload_cinit() must be prepared to ++ * interpret the data piecemeal. A state machine is used for this ++ * purpose. ++ */ ++ ++/* The following are only for use by reloc.c and things it calls */ ++static const struct LDR_SECTION_INFO CINIT_INFO_INIT = { CINITNAME, 0, 0, ++ (LDR_ADDR) -1, 0, DLOAD_BSS, 0 }; ++ ++/************************************************************************* ++ * Procedure cload_cinit ++ * ++ * Parameters: ++ * ipacket Pointer to data packet to be loaded ++ * ++ * Effect: ++ * Interprets the data in the buffer as .cinit data, and performs the ++ * appropriate initializations. ++ ************************************************************************/ ++static void cload_cinit(struct dload_state *dlthis, ++ struct image_packet_t *ipacket) ++{ ++#if TDATA_TO_HOST(CINIT_COUNT)*BITS_PER_AU > 16 ++ s32 init_count, left; ++#else ++ s16 init_count, left; ++#endif ++ unsigned char *pktp = ipacket->i_bits; ++ unsigned char *pktend = pktp + ++ BYTE_TO_HOST_ROUND(ipacket->i_packet_size); ++ int temp; ++ LDR_ADDR atmp; ++ struct LDR_SECTION_INFO cinit_info; ++ ++ /* PROCESS ALL THE INITIALIZATION RECORDS IN THE BUFFER. */ ++ while (true) { ++ left = pktend - pktp; ++ switch (dlthis->cinit_state) { ++ case CI_count: /* count field */ ++ if (left < TDATA_TO_HOST(CINIT_COUNT)) ++ goto loopexit; ++ temp = dload_unpack(dlthis, (TgtAU_t *)pktp, ++ CINIT_COUNT * TDATA_AU_BITS, 0, ++ ROP_SGN); ++ pktp += TDATA_TO_HOST(CINIT_COUNT); ++ /* negative signifies BSS table, zero means done */ ++ if (temp <= 0) { ++ dlthis->cinit_state = CI_done; ++ break; ++ } ++ dlthis->cinit_count = temp; ++ dlthis->cinit_state = CI_address; ++ break; ++#if CINIT_ALIGN < CINIT_ADDRESS ++ case CI_partaddress: ++ pktp -= TDATA_TO_HOST(CINIT_ALIGN); ++ /* back up pointer into space courtesy of caller */ ++ *(uint16_t *)pktp = dlthis->cinit_addr; ++ /* stuff in saved bits !! FALL THRU !! */ ++#endif ++ case CI_address: /* Address field for a copy packet */ ++ if (left < TDATA_TO_HOST(CINIT_ADDRESS)) { ++#if CINIT_ALIGN < CINIT_ADDRESS ++ if (left == TDATA_TO_HOST(CINIT_ALIGN)) { ++ /* address broken into halves */ ++ dlthis->cinit_addr = *(uint16_t *)pktp; ++ /* remember 1st half */ ++ dlthis->cinit_state = CI_partaddress; ++ left = 0; ++ } ++#endif ++ goto loopexit; ++ } ++ atmp = dload_unpack(dlthis, (TgtAU_t *)pktp, ++ CINIT_ADDRESS * TDATA_AU_BITS, 0, ++ ROP_UNS); ++ pktp += TDATA_TO_HOST(CINIT_ADDRESS); ++#if CINIT_PAGE_BITS > 0 ++ dlthis->cinit_page = atmp & ++ ((1 << CINIT_PAGE_BITS) - 1); ++ atmp >>= CINIT_PAGE_BITS; ++#else ++ dlthis->cinit_page = CINIT_DEFAULT_PAGE; ++#endif ++ dlthis->cinit_addr = atmp; ++ dlthis->cinit_state = CI_copy; ++ break; ++ case CI_copy: /* copy bits to the target */ ++ init_count = HOST_TO_TDATA(left); ++ if (init_count > dlthis->cinit_count) ++ init_count = dlthis->cinit_count; ++ if (init_count == 0) ++ goto loopexit; /* get more bits */ ++ cinit_info = CINIT_INFO_INIT; ++ cinit_info.page = dlthis->cinit_page; ++ if (!dlthis->myio->writemem(dlthis->myio, pktp, ++ TDATA_TO_TADDR(dlthis->cinit_addr), ++ &cinit_info, ++ TDATA_TO_HOST(init_count))) { ++ dload_error(dlthis, E_INITFAIL, "write", ++ dlthis->cinit_addr); ++ } ++ dlthis->cinit_count -= init_count; ++ if (dlthis->cinit_count <= 0) { ++ dlthis->cinit_state = CI_count; ++ init_count = (init_count + CINIT_ALIGN - 1) & ++ -CINIT_ALIGN; ++ /* align to next init */ ++ } ++ pktp += TDATA_TO_HOST(init_count); ++ dlthis->cinit_addr += init_count; ++ break; ++ case CI_done: /* no more .cinit to do */ ++ return; ++ } /* switch (cinit_state) */ ++ } /* while */ ++ ++loopexit: ++ if (left > 0) { ++ dload_error(dlthis, "%d bytes left over in cinit packet", left); ++ dlthis->cinit_state = CI_done; /* left over bytes are bad */ ++ } ++} /* cload_cinit */ ++ ++/* Functions to interface to reloc.c ++ * ++ * reloc.c is the relocation module borrowed from the linker, with ++ * minimal (we hope) changes for our purposes. cload_sect_data() invokes ++ * this module on a section to relocate and load the image data for that ++ * section. The actual read and write actions are supplied by the global ++ * routines below. ++ */ ++ ++/************************************************************************ ++ * Procedure relocate_packet ++ * ++ * Parameters: ++ * ipacket Pointer to an image packet to relocate ++ * ++ * Effect: ++ * Performs the required relocations on the packet. Returns a checksum ++ * of the relocation operations. ++ ************************************************************************/ ++#define MY_RELOC_BUF_SIZ 8 ++/* careful! exists at the same time as the image buffer*/ ++static int relocate_packet(struct dload_state *dlthis, ++ struct image_packet_t *ipacket, u32 *checks) ++{ ++ u32 rnum; ++ ++ rnum = ipacket->i_num_relocs; ++ do { /* all relocs */ ++ unsigned rinbuf; ++ int siz; ++ struct reloc_record_t *rp, rrec[MY_RELOC_BUF_SIZ]; ++ rp = rrec; ++ rinbuf = rnum > MY_RELOC_BUF_SIZ ? MY_RELOC_BUF_SIZ : rnum; ++ siz = rinbuf * sizeof(struct reloc_record_t); ++ if (dlthis->strm->read_buffer(dlthis->strm, rp, siz) != siz) { ++ DL_ERROR(E_READSTRM, "relocation"); ++ return 0; ++ } ++ /* reorder the bytes if need be */ ++ if (dlthis->reorder_map) ++ dload_reorder(rp, siz, dlthis->reorder_map); ++ ++ *checks += dload_checksum(rp, siz); ++ do { ++ /* perform the relocation operation */ ++ dload_relocate(dlthis, (TgtAU_t *) ipacket->i_bits, rp); ++ rp += 1; ++ rnum -= 1; ++ } while ((rinbuf -= 1) > 0); ++ } while (rnum > 0); /* all relocs */ ++ return 1; ++} /* dload_read_reloc */ ++ ++#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32)) ++ ++/* VERY dangerous */ ++static const char IMAGEPAK[] = { "image packet" }; ++ ++/************************************************************************* ++ * Procedure dload_data ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Read image data from input file, relocate it, and download it to the ++ * target. ++ ************************************************************************/ ++static void dload_data(struct dload_state *dlthis) ++{ ++ u16 curr_sect; ++ struct doff_scnhdr_t *sptr = dlthis->sect_hdrs; ++ struct LDR_SECTION_INFO *lptr = dlthis->ldr_sections; ++#ifdef OPT_ZERO_COPY_LOADER ++ boolean bZeroCopy = false; ++#endif ++ u8 *pDest; ++ ++ struct { ++ struct image_packet_t ipacket; ++ u8 bufr[BYTE_TO_HOST(IMAGE_PACKET_SIZE)]; ++ } ibuf; ++ ++ /* Indicates whether CINIT processing has occurred */ ++ boolean cinit_processed = false; ++ ++ /* Loop through the sections and load them one at a time. ++ */ ++ for (curr_sect = 0; curr_sect < dlthis->dfile_hdr.df_no_scns; ++ curr_sect += 1) { ++ if (DS_NEEDS_DOWNLOAD(sptr)) { ++ s32 nip; ++ LDR_ADDR image_offset = 0; ++ /* set relocation info for this section */ ++ if (curr_sect < dlthis->allocated_secn_count) ++ dlthis->delta_runaddr = sptr->ds_paddr; ++ else { ++ lptr = DOFFSEC_IS_LDRSEC(sptr); ++ dlthis->delta_runaddr = 0; ++ } ++ dlthis->image_secn = lptr; ++#if BITS_PER_AU > BITS_PER_BYTE ++ lptr->name = unpack_name(dlthis, sptr->ds_offset); ++#endif ++ nip = sptr->ds_nipacks; ++ while ((nip -= 1) >= 0) { /* process packets */ ++ ++ s32 ipsize; ++ u32 checks; ++ /* get the fixed header bits */ ++ if (dlthis->strm->read_buffer(dlthis->strm, ++ &ibuf.ipacket, IPH_SIZE) != IPH_SIZE) { ++ DL_ERROR(E_READSTRM, IMAGEPAK); ++ return; ++ } ++ /* reorder the header if need be */ ++ if (dlthis->reorder_map) { ++ dload_reorder(&ibuf.ipacket, IPH_SIZE, ++ dlthis->reorder_map); ++ } ++ /* now read the rest of the packet */ ++ ipsize = ++ BYTE_TO_HOST(DOFF_ALIGN ++ (ibuf.ipacket.i_packet_size)); ++ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) { ++ DL_ERROR("Bad image packet size %d", ++ ipsize); ++ return; ++ } ++ pDest = ibuf.bufr; ++#ifdef OPT_ZERO_COPY_LOADER ++ bZeroCopy = false; ++ if (DLOAD_SECT_TYPE(sptr) != DLOAD_CINIT) { ++ dlthis->myio->writemem(dlthis->myio, ++ &pDest, lptr->load_addr + ++ image_offset, lptr, 0); ++ bZeroCopy = (pDest != ibuf.bufr); ++ } ++#endif ++ /* End of determination */ ++ ++ if (dlthis->strm->read_buffer(dlthis->strm, ++ ibuf.bufr, ipsize) != ipsize) { ++ DL_ERROR(E_READSTRM, IMAGEPAK); ++ return; ++ } ++ ibuf.ipacket.i_bits = pDest; ++ ++ /* reorder the bytes if need be */ ++#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16) ++ if (dlthis->reorder_map) { ++ dload_reorder(pDest, ipsize, ++ dlthis->reorder_map); ++ } ++ checks = dload_checksum(pDest, ipsize); ++#else ++ if (dlthis->dfile_hdr.df_byte_reshuffle != ++ TARGET_ORDER(REORDER_MAP ++ (BYTE_RESHUFFLE_VALUE))) { ++ /* put image bytes in big-endian order, ++ * not PC order */ ++ dload_reorder(pDest, ipsize, ++ TARGET_ORDER ++ (dlthis->dfile_hdr.df_byte_reshuffle)); ++ } ++#if TARGET_AU_BITS > 8 ++ checks = dload_reverse_checksum_16(pDest, ++ ipsize); ++#else ++ checks = dload_reverse_checksum(pDest, ++ ipsize); ++#endif ++#endif ++ ++ checks += dload_checksum(&ibuf.ipacket, ++ IPH_SIZE); ++ /* relocate the image bits as needed */ ++ if (ibuf.ipacket.i_num_relocs) { ++ dlthis->image_offset = image_offset; ++ if (!relocate_packet(dlthis, ++ &ibuf.ipacket, &checks)) ++ return; /* serious error */ ++ } ++ if (~checks) ++ DL_ERROR(E_CHECKSUM, IMAGEPAK); ++ /* stuff the result into target memory */ ++ if (DLOAD_SECT_TYPE(sptr) == DLOAD_CINIT) { ++ cload_cinit(dlthis, &ibuf.ipacket); ++ cinit_processed = true; ++ } else { ++#ifdef OPT_ZERO_COPY_LOADER ++ if (!bZeroCopy) { ++#endif ++ ++ if (!dlthis->myio->writemem ++ (dlthis->myio, ibuf.bufr, ++ lptr->load_addr + image_offset, lptr, ++ BYTE_TO_HOST ++ (ibuf.ipacket.i_packet_size))) { ++ DL_ERROR( ++ "Write to " FMT_UI32 " failed", ++ lptr->load_addr + image_offset); ++ } ++#ifdef OPT_ZERO_COPY_LOADER ++ } ++#endif ++ ++ } ++ image_offset += ++ BYTE_TO_TADDR(ibuf.ipacket.i_packet_size); ++ } /* process packets */ ++ /* if this is a BSS section, we may want to fill it */ ++ if (DLOAD_SECT_TYPE(sptr) != DLOAD_BSS) ++ goto loop_cont; ++ ++ if (!(dlthis->myoptions & DLOAD_INITBSS)) ++ goto loop_cont; ++ ++ if (cinit_processed) { ++ /* Don't clear BSS after load-time ++ * initialization */ ++ DL_ERROR ++ ("Zero-initialization at " FMT_UI32 " after " ++ "load-time initialization!", lptr->load_addr); ++ goto loop_cont; ++ } ++ /* fill the .bss area */ ++ dlthis->myio->fillmem(dlthis->myio, ++ TADDR_TO_HOST(lptr->load_addr), ++ lptr, TADDR_TO_HOST(lptr->size), ++ dload_fill_bss); ++ goto loop_cont; ++ } /* if DS_DOWNLOAD_MASK */ ++ /* If not loading, but BSS, zero initialize */ ++ if (DLOAD_SECT_TYPE(sptr) != DLOAD_BSS) ++ goto loop_cont; ++ ++ if (!(dlthis->myoptions & DLOAD_INITBSS)) ++ goto loop_cont; ++ ++ if (curr_sect >= dlthis->allocated_secn_count) ++ lptr = DOFFSEC_IS_LDRSEC(sptr); ++ ++ if (cinit_processed) { ++ /*Don't clear BSS after load-time initialization */ ++ DL_ERROR( ++ "Zero-initialization at " FMT_UI32 " attempted after " ++ "load-time initialization!", lptr->load_addr); ++ goto loop_cont; ++ } ++ /* fill the .bss area */ ++ dlthis->myio->fillmem(dlthis->myio, ++ TADDR_TO_HOST(lptr->load_addr), lptr, ++ TADDR_TO_HOST(lptr->size), dload_fill_bss); ++loop_cont: ++ sptr += 1; ++ lptr += 1; ++ } /* load sections */ ++} /* dload_data */ ++ ++/************************************************************************* ++ * Procedure dload_reorder ++ * ++ * Parameters: ++ * data 32-bit aligned pointer to data to be byte-swapped ++ * dsiz size of the data to be reordered in sizeof() units. ++ * map 32-bit map defining how to reorder the data. Value ++ * must be REORDER_MAP() of some permutation ++ * of 0x00 01 02 03 ++ * ++ * Effect: ++ * Re-arranges the bytes in each word according to the map specified. ++ * ++ ************************************************************************/ ++/* mask for byte shift count */ ++#define SHIFT_COUNT_MASK (3 << LOG_BITS_PER_BYTE) ++ ++void dload_reorder(void *data, int dsiz, unsigned int map) ++{ ++ register u32 tmp, tmap, datv; ++ u32 *dp = (u32 *)data; ++ ++ map <<= LOG_BITS_PER_BYTE; /* align map with SHIFT_COUNT_MASK */ ++ do { ++ tmp = 0; ++ datv = *dp; ++ tmap = map; ++ do { ++ tmp |= (datv & BYTE_MASK) << (tmap & SHIFT_COUNT_MASK); ++ tmap >>= BITS_PER_BYTE; ++ } while (datv >>= BITS_PER_BYTE); ++ *dp++ = tmp; ++ } while ((dsiz -= sizeof(u32)) > 0); ++} /* dload_reorder */ ++ ++/************************************************************************* ++ * Procedure dload_checksum ++ * ++ * Parameters: ++ * data 32-bit aligned pointer to data to be checksummed ++ * siz size of the data to be checksummed in sizeof() units. ++ * ++ * Effect: ++ * Returns a checksum of the specified block ++ * ++ ************************************************************************/ ++u32 dload_checksum(void *data, unsigned siz) ++{ ++ u32 sum; ++ u32 *dp; ++ int left; ++ ++ sum = 0; ++ dp = (u32 *)data; ++ for (left = siz; left > 0; left -= sizeof(u32)) ++ sum += *dp++; ++ return sum; ++} /* dload_checksum */ ++ ++#if HOST_ENDIANNESS ++/************************************************************************* ++ * Procedure dload_reverse_checksum ++ * ++ * Parameters: ++ * data 32-bit aligned pointer to data to be checksummed ++ * siz size of the data to be checksummed in sizeof() units. ++ * ++ * Effect: ++ * Returns a checksum of the specified block, which is assumed to be bytes ++ * in big-endian order. ++ * ++ * Notes: ++ * In a big-endian host, things like the string table are stored as bytes ++ * in host order. But dllcreate always checksums in little-endian order. ++ * It is most efficient to just handle the difference a word at a time. ++ * ++ ***********************************************************************/ ++u32 dload_reverse_checksum(void *data, unsigned siz) ++{ ++ u32 sum, temp; ++ u32 *dp; ++ int left; ++ ++ sum = 0; ++ dp = (u32 *)data; ++ ++ for (left = siz; left > 0; left -= sizeof(u32)) { ++ temp = *dp++; ++ sum += temp << BITS_PER_BYTE * 3; ++ sum += temp >> BITS_PER_BYTE * 3; ++ sum += (temp >> BITS_PER_BYTE) & (BYTE_MASK << BITS_PER_BYTE); ++ sum += (temp & (BYTE_MASK << BITS_PER_BYTE)) << BITS_PER_BYTE; ++ } ++ ++ return sum; ++} /* dload_reverse_checksum */ ++ ++#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32) ++u32 dload_reverse_checksum_16(void *data, unsigned siz) ++{ ++ uint_fast32_t sum, temp; ++ u32 *dp; ++ int left; ++ ++ sum = 0; ++ dp = (u32 *)data; ++ ++ for (left = siz; left > 0; left -= sizeof(u32)) { ++ temp = *dp++; ++ sum += temp << BITS_PER_BYTE * 2; ++ sum += temp >> BITS_PER_BYTE * 2; ++ } ++ ++ return sum; ++} /* dload_reverse_checksum_16 */ ++#endif ++#endif ++ ++/************************************************************************* ++ * Procedure swap_words ++ * ++ * Parameters: ++ * data 32-bit aligned pointer to data to be swapped ++ * siz size of the data to be swapped. ++ * bitmap Bit map of how to swap each 32-bit word; 1 => 2 shorts, ++ * 0 => 1 long ++ * ++ * Effect: ++ * Swaps the specified data according to the specified map ++ * ++ ************************************************************************/ ++static void swap_words(void *data, unsigned siz, unsigned bitmap) ++{ ++ register int i; ++#if TARGET_AU_BITS < 16 ++ register u16 *sp; ++#endif ++ register u32 *lp; ++ ++ siz /= sizeof(u16); ++ ++#if TARGET_AU_BITS < 16 ++ /* pass 1: do all the bytes */ ++ i = siz; ++ sp = (u16 *) data; ++ do { ++ register u16 tmp; ++ tmp = *sp; ++ *sp++ = SWAP16BY8(tmp); ++ } while ((i -= 1) > 0); ++#endif ++ ++#if TARGET_AU_BITS < 32 ++ /* pass 2: fixup the 32-bit words */ ++ i = siz >> 1; ++ lp = (u32 *) data; ++ do { ++ if ((bitmap & 1) == 0) { ++ register u32 tmp; ++ tmp = *lp; ++ *lp = SWAP32BY16(tmp); ++ } ++ lp += 1; ++ bitmap >>= 1; ++ } while ((i -= 1) > 0); ++#endif ++} /* swap_words */ ++ ++/************************************************************************* ++ * Procedure copy_tgt_strings ++ * ++ * Parameters: ++ * dstp Destination address. Assumed to be 32-bit aligned ++ * srcp Source address. Assumed to be 32-bit aligned ++ * charcount Number of characters to copy. ++ * ++ * Effect: ++ * Copies strings from the source (which is in usual .dof file order on ++ * the loading processor) to the destination buffer (which should be in proper ++ * target addressable unit order). Makes sure the last string in the ++ * buffer is NULL terminated (for safety). ++ * Returns the first unused destination address. ++ ************************************************************************/ ++static char *copy_tgt_strings(void *dstp, void *srcp, unsigned charcount) ++{ ++ register TgtAU_t *src = (TgtAU_t *)srcp; ++ register TgtAU_t *dst = (TgtAU_t *)dstp; ++ register int cnt = charcount; ++ do { ++#if TARGET_AU_BITS <= BITS_PER_AU ++ /* byte-swapping issues may exist for strings on target */ ++ *dst++ = *src++; ++#elif TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN) ++ register TgtAU_t tmp; ++ tmp = *src++; ++ *dst++ = SWAP16BY8(tmp); /* right for TARGET_AU_BITS == 16 */ ++#else ++ *dst++ = *src++; ++#endif ++ } while ((cnt -= (sizeof(TgtAU_t) * BITS_PER_AU / BITS_PER_BYTE)) > 0); ++ /*apply force to make sure that the string table has null terminator */ ++#if (BITS_PER_AU == BITS_PER_BYTE) && (TARGET_AU_BITS == BITS_PER_BYTE) ++ dst[-1] = 0; ++#elif TARGET_BIG_ENDIAN ++ dst[-1] &= ~BYTE_MASK; /* big-endian */ ++#else ++ dst[-1] &= (1 << (BITS_PER_AU - BITS_PER_BYTE)) - 1; /* little endian */ ++#endif ++ return (char *)dst; ++} /* copy_tgt_strings */ ++ ++/************************************************************************* ++ * Procedure init_module_handle ++ * ++ * Parameters: ++ * none ++ * ++ * Effect: ++ * Initializes the module handle we use to enable unloading, and installs ++ * the debug information required by the target. ++ * ++ * Notes: ++ * The handle returned from Dynamic_Load_Module needs to encapsulate all the ++ * allocations done for the module, and enable them plus the modules symbols to ++ * be deallocated. ++ * ++ ************************************************************************/ ++#ifndef _BIG_ENDIAN ++static const struct LDR_SECTION_INFO DLLVIEW_INFO_INIT = { ".dllview", 0, 0, ++ (LDR_ADDR) -1, DBG_LIST_PAGE, DLOAD_DATA, 0 }; ++#else ++static const struct LDR_SECTION_INFO DLLVIEW_INFO_INIT = { ".dllview", 0, 0, ++ (LDR_ADDR) -1, DLOAD_DATA, DBG_LIST_PAGE, 0 }; ++#endif ++static void init_module_handle(struct dload_state *dlthis) ++{ ++ struct my_handle *hndl; ++ u16 curr_sect; ++ struct LDR_SECTION_INFO *asecs; ++ struct dll_module *dbmod; ++ struct dll_sect *dbsec; ++ struct dbg_mirror_root *mlist; ++ register char *cp; ++ struct modules_header mhdr; ++ struct LDR_SECTION_INFO dllview_info; ++ struct dynload_symbol *debug_mirror_sym; ++ hndl = dlthis->myhandle; ++ if (!hndl) ++ return; /* must be errors detected, so forget it */ ++ hndl->secn_count = dlthis->allocated_secn_count << 1; ++#ifndef TARGET_ENDIANNESS ++ if (dlthis->big_e_target) ++ hndl->secn_count += 1; /* flag for big-endian */ ++#endif ++ if (dlthis->dload_errcount) ++ return; /* abandon if errors detected */ ++ /* Locate the symbol that names the header for the CCS debug list ++ of modules. If not found, we just don't generate the debug record. ++ If found, we create our modules list. We make sure to create the ++ LOADER_DLLVIEW_ROOT even if there is no relocation info to record, ++ just to try to put both symbols in the same symbol table and ++ module.*/ ++ debug_mirror_sym = dlthis->mysym->Find_Matching_Symbol(dlthis->mysym, ++ LOADER_DLLVIEW_ROOT); ++ if (!debug_mirror_sym) { ++ struct dynload_symbol *dlmodsym; ++ struct dbg_mirror_root *mlst; ++ ++ /* our root symbol is not yet present; ++ check if we have DLModules defined */ ++ dlmodsym = dlthis->mysym->Find_Matching_Symbol(dlthis->mysym, ++ LINKER_MODULES_HEADER); ++ if (!dlmodsym) ++ return; /* no DLModules list so no debug info */ ++ /* if we have DLModules defined, construct our header */ ++ mlst = (struct dbg_mirror_root *) ++ dlthis->mysym->Allocate(dlthis->mysym, ++ sizeof(struct dbg_mirror_root)); ++ if (!mlst) { ++ DL_ERROR(E_ALLOC, sizeof(struct dbg_mirror_root)); ++ return; ++ } ++ mlst->hnext = NULL; ++ mlst->changes = 0; ++ mlst->refcount = 0; ++ mlst->dbthis = TDATA_TO_TADDR(dlmodsym->value); ++ /* add our root symbol */ ++ debug_mirror_sym = dlthis->mysym->Add_To_Symbol_Table ++ (dlthis->mysym, LOADER_DLLVIEW_ROOT, ++ (unsigned)dlthis->myhandle); ++ if (!debug_mirror_sym) { ++ /* failed, recover memory */ ++ dlthis->mysym->Deallocate(dlthis->mysym, mlst); ++ return; ++ } ++ debug_mirror_sym->value = (u32)mlst; ++ } ++ /* First create the DLLview record and stuff it into the buffer. ++ Then write it to the DSP. Record pertinent locations in our hndl, ++ and add it to the per-processor list of handles with debug info.*/ ++#ifndef DEBUG_HEADER_IN_LOADER ++ mlist = (struct dbg_mirror_root *)debug_mirror_sym->value; ++ if (!mlist) ++ return; ++#else ++ mlist = (struct dbg_mirror_root *)&debug_list_header; ++#endif ++ hndl->dm.hroot = mlist; /* set pointer to root into our handle */ ++ if (!dlthis->allocated_secn_count) ++ return; /* no load addresses to be recorded */ ++ /* reuse temporary symbol storage */ ++ dbmod = (struct dll_module *) dlthis->local_symtab; ++ /* Create the DLLview record in the memory we retain for our handle*/ ++ dbmod->num_sects = dlthis->allocated_secn_count; ++ dbmod->timestamp = dlthis->verify.dv_timdat; ++ dbmod->version = INIT_VERSION; ++ dbmod->verification = VERIFICATION; ++ asecs = dlthis->ldr_sections; ++ dbsec = dbmod->sects; ++ for (curr_sect = dlthis->allocated_secn_count; ++ curr_sect > 0; curr_sect -= 1) { ++ dbsec->sect_load_adr = asecs->load_addr; ++ dbsec->sect_run_adr = asecs->run_addr; ++ dbsec += 1; ++ asecs += 1; ++ } ++ /* now cram in the names */ ++ cp = copy_tgt_strings(dbsec, dlthis->str_head, ++ dlthis->debug_string_size); ++ ++ /* round off the size of the debug record, and remember same */ ++ hndl->dm.dbsiz = HOST_TO_TDATA_ROUND(cp - (char *)dbmod); ++ *cp = 0; /* strictly to make our test harness happy */ ++ dllview_info = DLLVIEW_INFO_INIT; ++ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz); ++ /* Initialize memory context to default heap */ ++ dllview_info.context = 0; ++ hndl->dm.context = 0; ++ /* fill in next pointer and size */ ++ if (mlist->hnext) { ++ dbmod->next_module = TADDR_TO_TDATA(mlist->hnext->dm.dbthis); ++ dbmod->next_module_size = mlist->hnext->dm.dbsiz; ++ } else { ++ dbmod->next_module_size = 0; ++ dbmod->next_module = 0; ++ } ++ /* allocate memory for on-DSP DLLview debug record */ ++ if (!dlthis->myalloc) ++ return; ++ if (!dlthis->myalloc->Allocate(dlthis->myalloc, &dllview_info, ++ HOST_TO_TADDR(sizeof(u32)))) { ++ return; ++ } ++ /* Store load address of .dllview section */ ++ hndl->dm.dbthis = dllview_info.load_addr; ++ /* Store memory context (segid) in which .dllview section ++ * was allocated */ ++ hndl->dm.context = dllview_info.context; ++ mlist->refcount += 1; ++ /* swap bytes in the entire debug record, but not the string table */ ++ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) { ++ swap_words(dbmod, (char *)dbsec - (char *)dbmod, ++ DLL_MODULE_BITMAP); ++ } ++ /* Update the DLLview list on the DSP write new record */ ++ if (!dlthis->myio->writemem(dlthis->myio, dbmod, ++ dllview_info.load_addr, &dllview_info, ++ TADDR_TO_HOST(dllview_info.size))) { ++ return; ++ } ++ /* write new header */ ++ mhdr.first_module_size = hndl->dm.dbsiz; ++ mhdr.first_module = TADDR_TO_TDATA(dllview_info.load_addr); ++ /* swap bytes in the module header, if needed */ ++ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) { ++ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16), ++ MODULES_HEADER_BITMAP); ++ } ++ dllview_info = DLLVIEW_INFO_INIT; ++ if (!dlthis->myio->writemem(dlthis->myio, &mhdr, mlist->dbthis, ++ &dllview_info, sizeof(struct modules_header) - ++ sizeof(u16))) { ++ return; ++ } ++ /* Add the module handle to this processor's list ++ of handles with debug info */ ++ hndl->dm.hnext = mlist->hnext; ++ if (hndl->dm.hnext) ++ hndl->dm.hnext->dm.hprev = hndl; ++ hndl->dm.hprev = (struct my_handle *) mlist; ++ mlist->hnext = hndl; /* insert after root*/ ++} /* init_module_handle */ ++ ++/************************************************************************* ++ * Procedure Dynamic_Unload_Module ++ * ++ * Parameters: ++ * mhandle A module handle from Dynamic_Load_Module ++ * syms Host-side symbol table and malloc/free functions ++ * alloc Target-side memory allocation ++ * ++ * Effect: ++ * The module specified by mhandle is unloaded. Unloading causes all ++ * target memory to be deallocated, all symbols defined by the module to ++ * be purged, and any host-side storage used by the dynamic loader for ++ * this module to be released. ++ * ++ * Returns: ++ * Zero for success. On error, the number of errors detected is returned. ++ * Individual errors are reported using syms->Error_Report(). ++ ************************************************************************/ ++int Dynamic_Unload_Module(DLOAD_mhandle mhandle, ++ struct Dynamic_Loader_Sym *syms, ++ struct Dynamic_Loader_Allocate *alloc, ++ struct Dynamic_Loader_Initialize *init) ++{ ++ s16 curr_sect; ++ struct LDR_SECTION_INFO *asecs; ++ struct my_handle *hndl; ++ struct dbg_mirror_root *root; ++ unsigned errcount = 0; ++ struct LDR_SECTION_INFO dllview_info = DLLVIEW_INFO_INIT; ++ struct modules_header mhdr; ++ ++ hndl = (struct my_handle *)mhandle; ++ if (!hndl) ++ return 0; /* if handle is null, nothing to do */ ++ /* Clear out the module symbols ++ * Note that if this is the module that defined MODULES_HEADER ++ (the head of the target debug list) ++ * then this operation will blow away that symbol. ++ It will therefore be impossible for subsequent ++ * operations to add entries to this un-referenceable list.*/ ++ if (!syms) ++ return 1; ++ syms->Purge_Symbol_Table(syms, (unsigned) hndl); ++ /* Deallocate target memory for sections */ ++ asecs = hndl->secns; ++ if (alloc) ++ for (curr_sect = (hndl->secn_count >> 1); curr_sect > 0; ++ curr_sect -= 1) { ++ asecs->name = NULL; ++ alloc->Deallocate(alloc, asecs++); ++ } ++ root = hndl->dm.hroot; ++ if (!root) { ++ /* there is a debug list containing this module */ ++ goto func_end; ++ } ++ if (!hndl->dm.dbthis) { /* target-side dllview record exists */ ++ goto loop_end; ++ } ++ /* Retrieve memory context in which .dllview was allocated */ ++ dllview_info.context = hndl->dm.context; ++ if (hndl->dm.hprev == hndl) ++ goto exitunltgt; ++ ++ /* target-side dllview record is in list */ ++ /* dequeue this record from our GPP-side mirror list */ ++ hndl->dm.hprev->dm.hnext = hndl->dm.hnext; ++ if (hndl->dm.hnext) ++ hndl->dm.hnext->dm.hprev = hndl->dm.hprev; ++ /* Update next_module of previous entry in target list ++ * We are using mhdr here as a surrogate for either a ++ struct modules_header or a dll_module */ ++ if (hndl->dm.hnext) { ++ mhdr.first_module = TADDR_TO_TDATA(hndl->dm.hnext->dm.dbthis); ++ mhdr.first_module_size = hndl->dm.hnext->dm.dbsiz; ++ } else { ++ mhdr.first_module = 0; ++ mhdr.first_module_size = 0; ++ } ++ if (!init) ++ goto exitunltgt; ++ ++ if (!init->connect(init)) { ++ dload_syms_error(syms, E_ICONNECT); ++ errcount += 1; ++ goto exitunltgt; ++ } ++ /* swap bytes in the module header, if needed */ ++ if (TARGET_ENDIANNESS_DIFFERS(hndl->secn_count & 0x1)) { ++ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16), ++ MODULES_HEADER_BITMAP); ++ } ++ if (!init->writemem(init, &mhdr, hndl->dm.hprev->dm.dbthis, ++ &dllview_info, sizeof(struct modules_header) - ++ sizeof(mhdr.update_flag))) { ++ dload_syms_error(syms, E_DLVWRITE); ++ errcount += 1; ++ } ++ /* update change counter */ ++ root->changes += 1; ++ if (!init->writemem(init, &(root->changes), ++ root->dbthis + HOST_TO_TADDR ++ (sizeof(mhdr.first_module) + ++ sizeof(mhdr.first_module_size)), ++ &dllview_info, ++ sizeof(mhdr.update_flag))) { ++ dload_syms_error(syms, E_DLVWRITE); ++ errcount += 1; ++ } ++ init->release(init); ++exitunltgt: ++ /* release target storage */ ++ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz); ++ dllview_info.load_addr = hndl->dm.dbthis; ++ if (alloc) ++ alloc->Deallocate(alloc, &dllview_info); ++ root->refcount -= 1; ++ /* target-side dllview record exists */ ++loop_end: ++#ifndef DEBUG_HEADER_IN_LOADER ++ if (root->refcount <= 0) { ++ /* if all references gone, blow off the header */ ++ /* our root symbol may be gone due to the Purge above, ++ but if not, do not destroy the root */ ++ if (syms->Find_Matching_Symbol ++ (syms, LOADER_DLLVIEW_ROOT) == NULL) ++ syms->Deallocate(syms, root); ++ } ++#endif ++func_end: ++ /* there is a debug list containing this module */ ++ syms->Deallocate(syms, mhandle); /* release our storage */ ++ return errcount; ++} /* Dynamic_Unload_Module */ ++ ++#if BITS_PER_AU > BITS_PER_BYTE ++/************************************************************************* ++ * Procedure unpack_name ++ * ++ * Parameters: ++ * soffset Byte offset into the string table ++ * ++ * Effect: ++ * Returns a pointer to the string specified by the offset supplied, or ++ * NULL for error. ++ * ++ ************************************************************************/ ++static char *unpack_name(struct dload_state *dlthis, u32 soffset) ++{ ++ u8 tmp, *src; ++ char *dst; ++ ++ if (soffset >= dlthis->dfile_hdr.df_strtab_size) { ++ dload_error(dlthis, "Bad string table offset " FMT_UI32, ++ soffset); ++ return NULL; ++ } ++ src = (uint_least8_t *)dlthis->str_head + ++ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)); ++ dst = dlthis->str_temp; ++ if (soffset & 1) ++ *dst++ = *src++; /* only 1 character in first word */ ++ do { ++ tmp = *src++; ++ *dst = (tmp >> BITS_PER_BYTE); ++ if (!(*dst++)) ++ break; ++ } while ((*dst++ = tmp & BYTE_MASK)); ++ dlthis->temp_len = dst - dlthis->str_temp; ++ /* squirrel away length including terminating null */ ++ return dlthis->str_temp; ++} /* unpack_name */ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dlclasses_hdr.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/dlclasses_hdr.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dlclasses_hdr.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/dlclasses_hdr.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,41 @@ ++/* ++ * dlclasses_hdr.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++#ifndef _DLCLASSES_HDR_H ++#define _DLCLASSES_HDR_H ++ ++/***************************************************************************** ++ ***************************************************************************** ++ * ++ * DLCLASSES_HDR.H ++ * ++ * Sample classes in support of the dynamic loader ++ * ++ * These are just concrete derivations of the virtual ones in dynamic_loader.h ++ * with a few additional interfaces for init, etc. ++ ***************************************************************************** ++ *****************************************************************************/ ++ ++#include ++ ++#include "DLstream.h" ++#include "DLsymtab.h" ++#include "DLalloc.h" ++#include "DLinit.h" ++ ++#endif /* _DLCLASSES_HDR_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dload_internal.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/dload_internal.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dload_internal.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/dload_internal.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,237 @@ ++/* ++ * dload_internal.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++#ifndef __DLOAD_INTERNAL__ ++#define __DLOAD_INTERNAL__ ++ ++#include ++ ++/* ++ * Internal state definitions for the dynamic loader ++ */ ++ ++#define TRUE 1 ++#define FALSE 0 ++typedef int boolean; ++ ++ ++/* type used for relocation intermediate results */ ++typedef s32 RVALUE; ++ ++/* unsigned version of same; must have at least as many bits */ ++typedef u32 URVALUE; ++ ++/* ++ * Dynamic loader configuration constants ++ */ ++/* error issued if input has more sections than this limit */ ++#define REASONABLE_SECTION_LIMIT 100 ++ ++/* (Addressable unit) value used to clear BSS section */ ++#define dload_fill_bss 0 ++ ++/* ++ * Reorder maps explained (?) ++ * ++ * The doff file format defines a 32-bit pattern used to determine the ++ * byte order of an image being read. That value is ++ * BYTE_RESHUFFLE_VALUE == 0x00010203 ++ * For purposes of the reorder routine, we would rather have the all-is-OK ++ * for 32-bits pattern be 0x03020100. This first macro makes the ++ * translation from doff file header value to MAP value: */ ++#define REORDER_MAP(rawmap) ((rawmap) ^ 0x3030303) ++/* This translation is made in dload_headers. Thereafter, the all-is-OK ++ * value for the maps stored in dlthis is REORDER_MAP(BYTE_RESHUFFLE_VALUE). ++ * But sadly, not all bits of the doff file are 32-bit integers. ++ * The notable exceptions are strings and image bits. ++ * Strings obey host byte order: */ ++#if defined(_BIG_ENDIAN) ++#define HOST_BYTE_ORDER(cookedmap) ((cookedmap) ^ 0x3030303) ++#else ++#define HOST_BYTE_ORDER(cookedmap) (cookedmap) ++#endif ++/* Target bits consist of target AUs (could be bytes, or 16-bits, ++ * or 32-bits) stored as an array in host order. A target order ++ * map is defined by: */ ++#if !defined(_BIG_ENDIAN) || TARGET_AU_BITS > 16 ++#define TARGET_ORDER(cookedmap) (cookedmap) ++#elif TARGET_AU_BITS > 8 ++#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x2020202) ++#else ++#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x3030303) ++#endif ++ ++/* forward declaration for handle returned by dynamic loader */ ++struct my_handle; ++ ++/* ++ * a list of module handles, which mirrors the debug list on the target ++ */ ++struct dbg_mirror_root { ++ /* must be same as dbg_mirror_list; __DLModules address on target */ ++ u32 dbthis; ++ struct my_handle *hnext; /* must be same as dbg_mirror_list */ ++ u16 changes; /* change counter */ ++ u16 refcount; /* number of modules referencing this root */ ++} ; ++ ++struct dbg_mirror_list { ++ u32 dbthis; ++ struct my_handle *hnext, *hprev; ++ struct dbg_mirror_root *hroot; ++ u16 dbsiz; ++ u32 context; /* Save context for .dllview memory allocation */ ++} ; ++ ++#define VARIABLE_SIZE 1 ++/* ++ * the structure we actually return as an opaque module handle ++ */ ++struct my_handle { ++ struct dbg_mirror_list dm; /* !!! must be first !!! */ ++ /* sections following << 1, LSB is set for big-endian target */ ++ u16 secn_count; ++ struct LDR_SECTION_INFO secns[VARIABLE_SIZE]; ++} ; ++#define MY_HANDLE_SIZE (sizeof(struct my_handle) -\ ++ sizeof(struct LDR_SECTION_INFO)) ++/* real size of my_handle */ ++ ++/* ++ * reduced symbol structure used for symbols during relocation ++ */ ++struct Local_Symbol { ++ s32 value; /* Relocated symbol value */ ++ s32 delta; /* Original value in input file */ ++ s16 secnn; /* section number */ ++ s16 sclass; /* symbol class */ ++} ; ++ ++/* ++ * States of the .cinit state machine ++ */ ++enum cinit_mode { ++ CI_count = 0, /* expecting a count */ ++ CI_address, /* expecting an address */ ++#if CINIT_ALIGN < CINIT_ADDRESS /* handle case of partial address field */ ++ CI_partaddress, /* have only part of the address */ ++#endif ++ CI_copy, /* in the middle of copying data */ ++ CI_done /* end of .cinit table */ ++}; ++ ++/* ++ * The internal state of the dynamic loader, which is passed around as ++ * an object ++ */ ++struct dload_state { ++ struct Dynamic_Loader_Stream *strm; /* The module input stream */ ++ struct Dynamic_Loader_Sym *mysym; /* Symbols for this session */ ++ struct Dynamic_Loader_Allocate *myalloc; /* target memory allocator */ ++ struct Dynamic_Loader_Initialize *myio; /* target memory initializer */ ++ unsigned myoptions; /* Options parameter Dynamic_Load_Module */ ++ ++ char *str_head; /* Pointer to string table */ ++#if BITS_PER_AU > BITS_PER_BYTE ++ char *str_temp; /* Pointer to temporary buffer for strings */ ++ /* big enough to hold longest string */ ++ unsigned temp_len; /* length of last temporary string */ ++ char *xstrings; /* Pointer to buffer for expanded */ ++ /* strings for sec names */ ++#endif ++ /* Total size of strings for DLLView section names */ ++ unsigned debug_string_size; ++ /* Pointer to parallel section info for allocated sections only */ ++ struct doff_scnhdr_t *sect_hdrs; /* Pointer to section table */ ++ struct LDR_SECTION_INFO *ldr_sections; ++#if TMS32060 ++ /* The address of the start of the .bss section */ ++ LDR_ADDR bss_run_base; ++#endif ++ struct Local_Symbol *local_symtab; /* Relocation symbol table */ ++ ++ /* pointer to DL section info for the section being relocated */ ++ struct LDR_SECTION_INFO *image_secn; ++ /* change in run address for current section during relocation */ ++ LDR_ADDR delta_runaddr; ++ LDR_ADDR image_offset; /* offset of current packet in section */ ++ enum cinit_mode cinit_state; /* current state of cload_cinit() */ ++ int cinit_count; /* the current count */ ++ LDR_ADDR cinit_addr; /* the current address */ ++ s16 cinit_page; /* the current page */ ++ /* Handle to be returned by Dynamic_Load_Module */ ++ struct my_handle *myhandle; ++ unsigned dload_errcount; /* Total # of errors reported so far */ ++ /* Number of target sections that require allocation and relocation */ ++ unsigned allocated_secn_count; ++#ifndef TARGET_ENDIANNESS ++ boolean big_e_target; /* Target data in big-endian format */ ++#endif ++ /* map for reordering bytes, 0 if not needed */ ++ u32 reorder_map; ++ struct doff_filehdr_t dfile_hdr; /* DOFF file header structure */ ++ struct doff_verify_rec_t verify; /* Verify record */ ++ ++ int relstkidx; /* index into relocation value stack */ ++ /* relocation value stack used in relexp.c */ ++ RVALUE relstk[STATIC_EXPR_STK_SIZE]; ++ ++} ; ++ ++#ifdef TARGET_ENDIANNESS ++#define TARGET_BIG_ENDIAN TARGET_ENDIANNESS ++#else ++#define TARGET_BIG_ENDIAN (dlthis->big_e_target) ++#endif ++ ++/* ++ * Exports from cload.c to rest of the world ++ */ ++extern void dload_error(struct dload_state *dlthis, const char *errtxt, ...); ++extern void dload_syms_error(struct Dynamic_Loader_Sym *syms, ++ const char *errtxt, ...); ++extern void dload_headers(struct dload_state *dlthis); ++extern void dload_strings(struct dload_state *dlthis, boolean sec_names_only); ++extern void dload_sections(struct dload_state *dlthis); ++extern void dload_reorder(void *data, int dsiz, u32 map); ++extern u32 dload_checksum(void *data, unsigned siz); ++ ++#if HOST_ENDIANNESS ++extern uint32_t dload_reverse_checksum(void *data, unsigned siz); ++#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32) ++extern uint32_t dload_reverse_checksum_16(void *data, unsigned siz); ++#endif ++#endif ++ ++#define is_data_scn(zzz) (DLOAD_SECTION_TYPE((zzz)->type) != DLOAD_TEXT) ++#define is_data_scn_num(zzz) \ ++ (DLOAD_SECT_TYPE(&dlthis->sect_hdrs[(zzz)-1]) != DLOAD_TEXT) ++ ++/* ++ * exported by reloc.c ++ */ ++extern void dload_relocate(struct dload_state *dlthis, TgtAU_t *data, ++ struct reloc_record_t *rp); ++ ++extern RVALUE dload_unpack(struct dload_state *dlthis, TgtAU_t *data, ++ int fieldsz, int offset, unsigned sgn); ++ ++extern int dload_repack(struct dload_state *dlthis, RVALUE val, TgtAU_t *data, ++ int fieldsz, int offset, unsigned sgn); ++ ++#endif /* __DLOAD_INTERNAL__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/doff.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/doff.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/doff.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/doff.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,347 @@ ++/* ++ * doff.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/*****************************************************************************/ ++/* DOFF.H - Structures & definitions used for dynamically */ ++/* loaded modules file format. This format is a reformatted */ ++/* version of COFF.(see coff.h for details) It optimizes the */ ++/* layout for the dynamic loader. */ ++/* */ ++/* .dof files, when viewed as a sequence of 32-bit integers, look the same */ ++/* on big-endian and little-endian machines. */ ++/*****************************************************************************/ ++#ifndef _DOFF_H ++#define _DOFF_H ++ ++#ifndef UINT32_C ++#define UINT32_C(zzz) ((u32)zzz) ++#endif ++ ++#define BYTE_RESHUFFLE_VALUE UINT32_C(0x00010203) ++ ++/* DOFF file header containing fields categorizing the remainder of the file */ ++struct doff_filehdr_t { ++ ++ /* string table size, including filename, in bytes */ ++ u32 df_strtab_size; ++ ++ /* entry point if one exists */ ++ u32 df_entrypt; ++ ++ /* identifies byte ordering of file; ++ * always set to BYTE_RESHUFFLE_VALUE */ ++ u32 df_byte_reshuffle; ++ ++ /* Size of the string table up to and including the last section name */ ++ /* Size includes the name of the COFF file also */ ++ u32 df_scn_name_size; ++ ++#ifndef _BIG_ENDIAN ++ /* number of symbols */ ++ u16 df_no_syms; ++ ++ /* length in bytes of the longest string, including terminating NULL */ ++ /* excludes the name of the file */ ++ u16 df_max_str_len; ++ ++ /* total number of sections including no-load ones */ ++ u16 df_no_scns; ++ ++ /* number of sections containing target code allocated or downloaded */ ++ u16 df_target_scns; ++ ++ /* unique id for dll file format & version */ ++ u16 df_doff_version; ++ ++ /* identifies ISA */ ++ u16 df_target_id; ++ ++ /* useful file flags */ ++ u16 df_flags; ++ ++ /* section reference for entry point, N_UNDEF for none, */ ++ /* N_ABS for absolute address */ ++ s16 df_entry_secn; ++#else ++ /* length of the longest string, including terminating NULL */ ++ u16 df_max_str_len; ++ ++ /* number of symbols */ ++ u16 df_no_syms; ++ ++ /* number of sections containing target code allocated or downloaded */ ++ u16 df_target_scns; ++ ++ /* total number of sections including no-load ones */ ++ u16 df_no_scns; ++ ++ /* identifies ISA */ ++ u16 df_target_id; ++ ++ /* unique id for dll file format & version */ ++ u16 df_doff_version; ++ ++ /* section reference for entry point, N_UNDEF for none, */ ++ /* N_ABS for absolute address */ ++ s16 df_entry_secn; ++ ++ /* useful file flags */ ++ u16 df_flags; ++#endif ++ /* checksum for file header record */ ++ u32 df_checksum; ++ ++} ; ++ ++/* flags in the df_flags field */ ++#define DF_LITTLE 0x100 ++#define DF_BIG 0x200 ++#define DF_BYTE_ORDER (DF_LITTLE | DF_BIG) ++ ++/* Supported processors */ ++#define TMS470_ID 0x97 ++#define LEAD_ID 0x98 ++#define TMS32060_ID 0x99 ++#define LEAD3_ID 0x9c ++ ++/* Primary processor for loading */ ++#if TMS32060 ++#define TARGET_ID TMS32060_ID ++#endif ++ ++/* Verification record containing values used to test integrity of the bits */ ++struct doff_verify_rec_t { ++ ++ /* time and date stamp */ ++ u32 dv_timdat; ++ ++ /* checksum for all section records */ ++ u32 dv_scn_rec_checksum; ++ ++ /* checksum for string table */ ++ u32 dv_str_tab_checksum; ++ ++ /* checksum for symbol table */ ++ u32 dv_sym_tab_checksum; ++ ++ /* checksum for verification record */ ++ u32 dv_verify_rec_checksum; ++ ++} ; ++ ++/* String table is an array of null-terminated strings. The first entry is ++ * the filename, which is added by DLLcreate. No new structure definitions ++ * are required. ++ */ ++ ++/* Section Records including information on the corresponding image packets */ ++/* ++ * !!WARNING!! ++ * ++ * This structure is expected to match in form LDR_SECTION_INFO in ++ * dynamic_loader.h ++ */ ++ ++struct doff_scnhdr_t { ++ ++ s32 ds_offset; /* offset into string table of name */ ++ s32 ds_paddr; /* RUN address, in target AU */ ++ s32 ds_vaddr; /* LOAD address, in target AU */ ++ s32 ds_size; /* section size, in target AU */ ++#ifndef _BIG_ENDIAN ++ u16 ds_page; /* memory page id */ ++ u16 ds_flags; /* section flags */ ++#else ++ u16 ds_flags; /* section flags */ ++ u16 ds_page; /* memory page id */ ++#endif ++ u32 ds_first_pkt_offset; ++ /* Absolute byte offset into the file */ ++ /* where the first image record resides */ ++ ++ s32 ds_nipacks; /* number of image packets */ ++ ++}; ++ ++/* Symbol table entry */ ++struct doff_syment_t { ++ ++ s32 dn_offset; /* offset into string table of name */ ++ s32 dn_value; /* value of symbol */ ++#ifndef _BIG_ENDIAN ++ s16 dn_scnum; /* section number */ ++ s16 dn_sclass; /* storage class */ ++#else ++ s16 dn_sclass; /* storage class */ ++ s16 dn_scnum; /* section number, 1-based */ ++#endif ++ ++} ; ++ ++/* special values for dn_scnum */ ++#define DN_UNDEF 0 /* undefined symbol */ ++#define DN_ABS (-1) /* value of symbol is absolute */ ++/* special values for dn_sclass */ ++#define DN_EXT 2 ++#define DN_STATLAB 20 ++#define DN_EXTLAB 21 ++ ++/* Default value of image bits in packet */ ++/* Configurable by user on the command line */ ++#define IMAGE_PACKET_SIZE 1024 ++ ++/* An image packet contains a chunk of data from a section along with */ ++/* information necessary for its processing. */ ++struct image_packet_t { ++ ++ s32 i_num_relocs; /* number of relocations for */ ++ /* this packet */ ++ ++ s32 i_packet_size; /* number of bytes in array */ ++ /* "bits" occupied by */ ++ /* valid data. Could be */ ++ /* < IMAGE_PACKET_SIZE to */ ++ /* prevent splitting a */ ++ /* relocation across packets. */ ++ /* Last packet of a section */ ++ /* will most likely contain */ ++ /* < IMAGE_PACKET_SIZE bytes */ ++ /* of valid data */ ++ ++ s32 i_checksum; /* Checksum for image packet */ ++ /* and the corresponding */ ++ /* relocation records */ ++ ++ u8 *i_bits; /* Actual data in section */ ++ ++}; ++ ++/* The relocation structure definition matches the COFF version. Offsets */ ++/* however are relative to the image packet base not the section base. */ ++struct reloc_record_t { ++ ++ s32 r_vaddr; ++ ++ /* expressed in target AUs */ ++ ++ union { ++ struct { ++#ifndef _BIG_ENDIAN ++ u8 _offset; /* bit offset of rel fld */ ++ u8 _fieldsz; /* size of rel fld */ ++ u8 _wordsz; /* # bytes containing rel fld */ ++ u8 _dum1; ++ u16 _dum2; ++ u16 _type; ++#else ++ unsigned _dum1:8; ++ unsigned _wordsz:8; /* # bytes containing rel fld */ ++ unsigned _fieldsz:8; /* size of rel fld */ ++ unsigned _offset:8; /* bit offset of rel fld */ ++ u16 _type; ++ u16 _dum2; ++#endif ++ } _r_field; ++ ++ struct { ++ u32 _spc; /* image packet relative PC */ ++#ifndef _BIG_ENDIAN ++ u16 _dum; ++ u16 _type; /* relocation type */ ++#else ++ u16 _type; /* relocation type */ ++ u16 _dum; ++#endif ++ } _r_spc; ++ ++ struct { ++ u32 _uval; /* constant value */ ++#ifndef _BIG_ENDIAN ++ u16 _dum; ++ u16 _type; /* relocation type */ ++#else ++ u16 _type; /* relocation type */ ++ u16 _dum; ++#endif ++ } _r_uval; ++ ++ struct { ++ s32 _symndx; /* 32-bit sym tbl index */ ++#ifndef _BIG_ENDIAN ++ u16 _disp; /* extra addr encode data */ ++ u16 _type; /* relocation type */ ++#else ++ u16 _type; /* relocation type */ ++ u16 _disp; /* extra addr encode data */ ++#endif ++ } _r_sym; ++ } _u_reloc; ++ ++} ; ++ ++/* abbreviations for convenience */ ++#ifndef r_type ++#define r_type _u_reloc._r_sym._type ++#define r_uval _u_reloc._r_uval._uval ++#define r_symndx _u_reloc._r_sym._symndx ++#define r_offset _u_reloc._r_field._offset ++#define r_fieldsz _u_reloc._r_field._fieldsz ++#define r_wordsz _u_reloc._r_field._wordsz ++#define r_disp _u_reloc._r_sym._disp ++#endif ++ ++/*****************************************************************************/ ++/* */ ++/* Important DOFF macros used for file processing */ ++/* */ ++/*****************************************************************************/ ++ ++/* DOFF Versions */ ++#define DOFF0 0 ++ ++/* Return the address/size >= to addr that is at a 32-bit boundary */ ++/* This assumes that a byte is 8 bits */ ++#define DOFF_ALIGN(addr) (((addr) + 3) & ~UINT32_C(3)) ++ ++/*****************************************************************************/ ++/* */ ++/* The DOFF section header flags field is laid out as follows: */ ++/* */ ++/* Bits 0-3 : Section Type */ ++/* Bit 4 : Set when section requires target memory to be allocated by DL */ ++/* Bit 5 : Set when section requires downloading */ ++/* Bits 8-11: Alignment, same as COFF */ ++/* */ ++/*****************************************************************************/ ++ ++/* Enum for DOFF section types (bits 0-3 of flag): See dynamic_loader.h */ ++ ++/* Macros to help processing of sections */ ++#define DLOAD_SECT_TYPE(s_hdr) ((s_hdr)->ds_flags & 0xF) ++ ++/* DS_ALLOCATE indicates whether a section needs space on the target */ ++#define DS_ALLOCATE_MASK 0x10 ++#define DS_NEEDS_ALLOCATION(s_hdr) ((s_hdr)->ds_flags & DS_ALLOCATE_MASK) ++ ++/* DS_DOWNLOAD indicates that the loader needs to copy bits */ ++#define DS_DOWNLOAD_MASK 0x20 ++#define DS_NEEDS_DOWNLOAD(s_hdr) ((s_hdr)->ds_flags & DS_DOWNLOAD_MASK) ++ ++/* Section alignment requirement in AUs */ ++#define DS_ALIGNMENT(ds_flags) (1 << (((ds_flags) >> 8) & 0xF)) ++ ++#endif /* _DOFF_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/getsection.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/getsection.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/getsection.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/getsection.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,412 @@ ++/* ++ * getsection.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++#include ++#include "header.h" ++ ++/* ++ * Error strings ++ */ ++static const char E_READSTRM[] = { "Error reading %s from input stream" }; ++static const char E_SEEK[] = { "Set file position to %d failed" }; ++static const char E_ISIZ[] = { "Bad image packet size %d" }; ++static const char E_CHECKSUM[] = { "Checksum failed on %s" }; ++static const char E_RELOC[] = { "DLOAD_GetSection unable to read" ++ "sections containing relocation entries"}; ++#if BITS_PER_AU > BITS_PER_BYTE ++static const char E_ALLOC[] = { "Syms->Allocate( %d ) failed" }; ++static const char E_STBL[] = { "Bad string table offset " FMT_UI32 }; ++#endif ++ ++/* ++ * we use the fact that DOFF section records are shaped just like ++ * LDR_SECTION_INFO to reduce our section storage usage. These macros ++ * marks the places where that assumption is made ++ */ ++#define DOFFSEC_IS_LDRSEC(pdoffsec) ((struct LDR_SECTION_INFO *)(pdoffsec)) ++#define LDRSEC_IS_DOFFSEC(ldrsec) ((struct doff_scnhdr_t *)(ldrsec)) ++ ++/***************************************************************/ ++/********************* SUPPORT FUNCTIONS ***********************/ ++/***************************************************************/ ++ ++#if BITS_PER_AU > BITS_PER_BYTE ++/************************************************************************** ++ * Procedure unpack_sec_name ++ * ++ * Parameters: ++ * dlthis Handle from DLOAD_module_open for this module ++ * soffset Byte offset into the string table ++ * dst Place to store the expanded string ++ * ++ * Effect: ++ * Stores a string from the string table into the destination, expanding ++ * it in the process. Returns a pointer just past the end of the stored ++ * string on success, or NULL on failure. ++ * ++ *************************************************************************/ ++static char *unpack_sec_name(struct dload_state *dlthis, ++ u32 soffset, char *dst) ++{ ++ u8 tmp, *src; ++ ++ if (soffset >= dlthis->dfile_hdr.df_scn_name_size) { ++ dload_error(dlthis, E_STBL, soffset); ++ return NULL; ++ } ++ src = (u8 *)dlthis->str_head + ++ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)); ++ if (soffset & 1) ++ *dst++ = *src++; /* only 1 character in first word */ ++ do { ++ tmp = *src++; ++ *dst = (tmp >> BITS_PER_BYTE) ++ if (!(*dst++)) ++ break; ++ } while ((*dst++ = tmp & BYTE_MASK)); ++ ++ return dst; ++} ++ ++/************************************************************************** ++ * Procedure expand_sec_names ++ * ++ * Parameters: ++ * dlthis Handle from DLOAD_module_open for this module ++ * ++ * Effect: ++ * Allocates a buffer, unpacks and copies strings from string table into it. ++ * Stores a pointer to the buffer into a state variable. ++ **************************************************************************/ ++static void expand_sec_names(struct dload_state *dlthis) ++{ ++ char *xstrings, *curr, *next; ++ u32 xsize; ++ u16 sec; ++ struct LDR_SECTION_INFO *shp; ++ /* assume worst-case size requirement */ ++ xsize = dlthis->dfile_hdr.df_max_str_len * dlthis->dfile_hdr.df_no_scns; ++ xstrings = (char *)dlthis->mysym->Allocate(dlthis->mysym, xsize); ++ if (xstrings == NULL) { ++ dload_error(dlthis, E_ALLOC, xsize); ++ return; ++ } ++ dlthis->xstrings = xstrings; ++ /* For each sec, copy and expand its name */ ++ curr = xstrings; ++ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { ++ shp = DOFFSEC_IS_LDRSEC(&dlthis->sect_hdrs[sec]); ++ next = unpack_sec_name(dlthis, *(u32 *) &shp->name, curr); ++ if (next == NULL) ++ break; /* error */ ++ shp->name = curr; ++ curr = next; ++ } ++} ++ ++#endif ++ ++/***************************************************************/ ++/********************* EXPORTED FUNCTIONS **********************/ ++/***************************************************************/ ++ ++/************************************************************************** ++ * Procedure DLOAD_module_open ++ * ++ * Parameters: ++ * module The input stream that supplies the module image ++ * syms Host-side malloc/free and error reporting functions. ++ * Other methods are unused. ++ * ++ * Effect: ++ * Reads header information from a dynamic loader module using the ++ specified ++ * stream object, and returns a handle for the module information. This ++ * handle may be used in subsequent query calls to obtain information ++ * contained in the module. ++ * ++ * Returns: ++ * NULL if an error is encountered, otherwise a module handle for use ++ * in subsequent operations. ++ **************************************************************************/ ++DLOAD_module_info DLOAD_module_open(struct Dynamic_Loader_Stream *module, ++ struct Dynamic_Loader_Sym *syms) ++{ ++ struct dload_state *dlthis; /* internal state for this call */ ++ unsigned *dp, sz; ++ u32 sec_start; ++#if BITS_PER_AU <= BITS_PER_BYTE ++ u16 sec; ++#endif ++ ++ /* Check that mandatory arguments are present */ ++ if (!module || !syms) { ++ if (syms != NULL) ++ dload_syms_error(syms, "Required parameter is NULL"); ++ ++ return NULL; ++ } ++ ++ dlthis = (struct dload_state *) ++ syms->Allocate(syms, sizeof(struct dload_state)); ++ if (!dlthis) { ++ /* not enough storage */ ++ dload_syms_error(syms, "Can't allocate module info"); ++ return NULL; ++ } ++ ++ /* clear our internal state */ ++ dp = (unsigned *)dlthis; ++ for (sz = sizeof(struct dload_state) / sizeof(unsigned); ++ sz > 0; sz -= 1) ++ *dp++ = 0; ++ ++ dlthis->strm = module; ++ dlthis->mysym = syms; ++ ++ /* read in the doff image and store in our state variable */ ++ dload_headers(dlthis); ++ ++ if (!dlthis->dload_errcount) ++ dload_strings(dlthis, true); ++ ++ /* skip ahead past the unread portion of the string table */ ++ sec_start = sizeof(struct doff_filehdr_t) + ++ sizeof(struct doff_verify_rec_t) + ++ BYTE_TO_HOST(DOFF_ALIGN(dlthis->dfile_hdr.df_strtab_size)); ++ ++ if (dlthis->strm->set_file_posn(dlthis->strm, sec_start) != 0) { ++ dload_error(dlthis, E_SEEK, sec_start); ++ return NULL; ++ } ++ ++ if (!dlthis->dload_errcount) ++ dload_sections(dlthis); ++ ++ if (dlthis->dload_errcount) { ++ DLOAD_module_close(dlthis); /* errors, blow off our state */ ++ dlthis = NULL; ++ return NULL; ++ } ++#if BITS_PER_AU > BITS_PER_BYTE ++ /* Expand all section names from the string table into the */ ++ /* state variable, and convert section names from a relative */ ++ /* string table offset to a pointers to the expanded string. */ ++ expand_sec_names(dlthis); ++#else ++ /* Convert section names from a relative string table offset */ ++ /* to a pointer into the string table. */ ++ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { ++ struct LDR_SECTION_INFO *shp = ++ DOFFSEC_IS_LDRSEC(&dlthis->sect_hdrs[sec]); ++ shp->name = dlthis->str_head + *(u32 *)&shp->name; ++ } ++#endif ++ ++ return dlthis; ++} ++ ++/*************************************************************************** ++ * Procedure DLOAD_GetSectionInfo ++ * ++ * Parameters: ++ * minfo Handle from DLOAD_module_open for this module ++ * sectionName Pointer to the string name of the section desired ++ * sectionInfo Address of a section info structure pointer to be ++ * initialized ++ * ++ * Effect: ++ * Finds the specified section in the module information, and initializes ++ * the provided struct LDR_SECTION_INFO pointer. ++ * ++ * Returns: ++ * true for success, false for section not found ++ **************************************************************************/ ++int DLOAD_GetSectionInfo(DLOAD_module_info minfo, const char *sectionName, ++ const struct LDR_SECTION_INFO **const sectionInfo) ++{ ++ struct dload_state *dlthis; ++ struct LDR_SECTION_INFO *shp; ++ u16 sec; ++ ++ dlthis = (struct dload_state *)minfo; ++ if (!dlthis) ++ return false; ++ ++ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { ++ shp = DOFFSEC_IS_LDRSEC(&dlthis->sect_hdrs[sec]); ++ if (strcmp(sectionName, shp->name) == 0) { ++ *sectionInfo = shp; ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32)) ++#define REVERSE_REORDER_MAP(rawmap) ((rawmap) ^ 0x3030303) ++ ++/************************************************************************** ++ * Procedure DLOAD_GetSection ++ * ++ * Parameters: ++ * minfo Handle from DLOAD_module_open for this module ++ * sectionInfo Pointer to a section info structure for the desired ++ * section ++ * sectionData Buffer to contain the section initialized data ++ * ++ * Effect: ++ * Copies the initialized data for the specified section into the ++ * supplied buffer. ++ * ++ * Returns: ++ * true for success, false for section not found ++ **************************************************************************/ ++int DLOAD_GetSection(DLOAD_module_info minfo, ++ const struct LDR_SECTION_INFO *sectionInfo, void *sectionData) ++{ ++ struct dload_state *dlthis; ++ u32 pos; ++ struct doff_scnhdr_t *sptr = NULL; ++ s32 nip; ++ struct image_packet_t ipacket; ++ s32 ipsize; ++ u32 checks; ++ s8 *dest = (s8 *)sectionData; ++ ++ dlthis = (struct dload_state *)minfo; ++ if (!dlthis) ++ return false; ++ sptr = LDRSEC_IS_DOFFSEC(sectionInfo); ++ if (sptr == NULL) ++ return false; ++ ++ /* skip ahead to the start of the first packet */ ++ pos = BYTE_TO_HOST(DOFF_ALIGN((u32) sptr->ds_first_pkt_offset)); ++ if (dlthis->strm->set_file_posn(dlthis->strm, pos) != 0) { ++ dload_error(dlthis, E_SEEK, pos); ++ return false; ++ } ++ ++ nip = sptr->ds_nipacks; ++ while ((nip -= 1) >= 0) { /* for each packet */ ++ /* get the fixed header bits */ ++ if (dlthis->strm-> ++ read_buffer(dlthis->strm, &ipacket, IPH_SIZE) != IPH_SIZE) { ++ dload_error(dlthis, E_READSTRM, "image packet"); ++ return false; ++ } ++ /* reorder the header if need be */ ++ if (dlthis->reorder_map) ++ dload_reorder(&ipacket, IPH_SIZE, dlthis->reorder_map); ++ ++ /* Now read the packet image bits. Note: round the size up to ++ * the next multiple of 4 bytes; this is what checksum ++ * routines want. */ ++ ipsize = BYTE_TO_HOST(DOFF_ALIGN(ipacket.i_packet_size)); ++ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) { ++ dload_error(dlthis, E_ISIZ, ipsize); ++ return false; ++ } ++ if (dlthis->strm->read_buffer ++ (dlthis->strm, dest, ipsize) != ipsize) { ++ dload_error(dlthis, E_READSTRM, "image packet"); ++ return false; ++ } ++ /* reorder the bytes if need be */ ++#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16) ++ if (dlthis->reorder_map) ++ dload_reorder(dest, ipsize, dlthis->reorder_map); ++ ++ checks = dload_checksum(dest, ipsize); ++#else ++ if (dlthis->dfile_hdr.df_byte_reshuffle != ++ TARGET_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) { ++ /* put image bytes in big-endian order, not PC order */ ++ dload_reorder(dest, ipsize, ++ TARGET_ORDER(dlthis->dfile_hdr. ++ df_byte_reshuffle)); ++ } ++#if TARGET_AU_BITS > 8 ++ checks = dload_reverse_checksum_16(dest, ipsize); ++#else ++ checks = dload_reverse_checksum(dest, ipsize); ++#endif ++#endif ++ checks += dload_checksum(&ipacket, IPH_SIZE); ++ ++ /* NYI: unable to handle relocation entries here. Reloc ++ * entries referring to fields that span the packet boundaries ++ * may result in packets of sizes that are not multiple of ++ * 4 bytes. Our checksum implementation works on 32-bit words ++ * only. */ ++ if (ipacket.i_num_relocs != 0) { ++ dload_error(dlthis, E_RELOC, ipsize); ++ return false; ++ } ++ ++ if (~checks) { ++ dload_error(dlthis, E_CHECKSUM, "image packet"); ++ return false; ++ } ++ ++ /*Advance destination ptr by the size of the just-read packet*/ ++ dest += ipsize; ++ } ++ ++ return true; ++} ++ ++/*************************************************************************** ++ * Procedure DLOAD_module_close ++ * ++ * Parameters: ++ * minfo Handle from DLOAD_module_open for this module ++ * ++ * Effect: ++ * Releases any storage associated with the module handle. On return, ++ * the module handle is invalid. ++ * ++ * Returns: ++ * Zero for success. On error, the number of errors detected is returned. ++ * Individual errors are reported using syms->Error_Report(), where syms was ++ * an argument to DLOAD_module_open ++ **************************************************************************/ ++void DLOAD_module_close(DLOAD_module_info minfo) ++{ ++ struct dload_state *dlthis; ++ ++ dlthis = (struct dload_state *)minfo; ++ if (!dlthis) ++ return; ++ ++ if (dlthis->str_head) ++ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->str_head); ++ ++ if (dlthis->sect_hdrs) ++ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->sect_hdrs); ++ ++#if BITS_PER_AU > BITS_PER_BYTE ++ if (dlthis->xstrings) ++ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->xstrings); ++ ++#endif ++ ++ dlthis->mysym->Deallocate(dlthis->mysym, dlthis); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/header.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/header.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/header.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/header.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,59 @@ ++/* ++ * header.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++#define TRUE 1 ++#define FALSE 0 ++#ifndef NULL ++#define NULL 0 ++#endif ++ ++#include ++#define DL_STRCMP strcmp ++ ++/* maximum parenthesis nesting in relocation stack expressions */ ++#define STATIC_EXPR_STK_SIZE 10 ++ ++#include ++typedef unsigned int uint_least32_t; ++typedef unsigned short int uint_least16_t; ++ ++#include "doff.h" ++#include ++#include "params.h" ++#include "dload_internal.h" ++#include "reloc_table.h" ++ ++/* ++ * Plausibility limits ++ * ++ * These limits are imposed upon the input DOFF file as a check for validity. ++ * They are hard limits, in that the load will fail if they are exceeded. ++ * The numbers selected are arbitrary, in that the loader implementation does ++ * not require these limits. ++ */ ++ ++/* maximum number of bytes in string table */ ++#define MAX_REASONABLE_STRINGTAB (0x100000) ++/* maximum number of code,data,etc. sections */ ++#define MAX_REASONABLE_SECTIONS (200) ++/* maximum number of linker symbols */ ++#define MAX_REASONABLE_SYMBOLS (100000) ++ ++/* shift count to align F_BIG with DLOAD_LITTLE */ ++#define ALIGN_COFF_ENDIANNESS 7 ++#define ENDIANNESS_MASK (DF_BYTE_ORDER >> ALIGN_COFF_ENDIANNESS) +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/module_list.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/module_list.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/module_list.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/module_list.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,161 @@ ++/* ++ * dspbridge/mpu_driver/src/dynload/module_list.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/*============================================================================ ++ Filename: module_list.h ++ ++ Copyright (C) 2002 Texas Instruments Incorporated ++ ++ ++ This C header file gives the layout of the data structure created by the ++ dynamic loader to describe the set of modules loaded into the DSP. ++ ++ Linked List Structure: ++ ---------------------- ++ The data structure defined here is a singly-linked list. The list ++ represents the set of modules which are currently loaded in the DSP memory. ++ The first entry in the list is a header record which contains a flag ++ representing the state of the list. The rest of the entries in the list ++ are module records. ++ ++ Global symbol _DLModules designates the first record in the list (i.e. the ++ header record). This symbol must be defined in any program that wishes to ++ use DLLview plug-in. ++ ++ String Representation: ++ ---------------------- ++ The string names of the module and its sections are stored in a block of ++ memory which follows the module record itself. The strings are ordered: ++ module name first, followed by section names in order from the first ++ section to the last. String names are tightly packed arrays of 8-bit ++ characters (two characters per 16-bit word on the C55x). Strings are ++ zero-byte-terminated. ++ ++ Creating and updating the list: ++------------------------------- ++ Upon loading a new module into the DSP memory the dynamic loader inserts a ++new module record as the first module record in the list. The fields of ++ this module record are initialized to reflect the properties of the module. ++ The dynamic loader does NOT increment the flag/counter in the list's header ++ record. ++ ++ Upon unloading a module from the DSP memory the dynamic loader removes the ++module's record from this list. The dynamic loader also increments the ++ flag/counter in the list's header record to indicate that the list has been ++ changed. ++ ++============================================================================*/ ++ ++#ifndef _MODULE_LIST_H_ ++#define _MODULE_LIST_H_ ++ ++#include ++ ++/* Global pointer to the modules_header structure*/ ++#define MODULES_HEADER "_DLModules" ++#define MODULES_HEADER_NO_UNDERSCORE "DLModules" ++ ++/* Initial version number*/ ++#define INIT_VERSION 1 ++ ++/* Verification number -- to be recorded in each module record */ ++#define VERIFICATION 0x79 ++ ++/* forward declarations */ ++struct dll_module; ++struct dll_sect; ++ ++/* the first entry in the list is the modules_header record; ++ * its address is contained in the global _DLModules pointer */ ++struct modules_header { ++ ++ /* Address of the first dll_module record in the list or NULL. ++ Note: for C55x this is a word address (C55x data is word-addressable)*/ ++ u32 first_module; ++ ++ /* Combined storage size (in target addressable units) of the ++ * dll_module record which follows this header record, or zero ++ * if the list is empty. This size includes the module's string table. ++ * Note: for C55x the unit is a 16-bit word */ ++ u16 first_module_size; ++ ++ /* Counter is incremented whenever a module record is removed from ++ * the list */ ++ u16 update_flag; ++ ++} ; ++ ++/* for each 32-bits in above structure, a bitmap, LSB first, whose bits are: ++ * 0 => a 32-bit value, 1 => 2 16-bit values */ ++#define MODULES_HEADER_BITMAP 0x2 /* swapping bitmap for type modules_header */ ++ ++/* information recorded about each section in a module */ ++struct dll_sect { ++ ++ /* Load-time address of the section. ++ * Note: for C55x this is a byte address for program sections, and ++ * a word address for data sections. C55x program memory is ++ * byte-addressable, while data memory is word-addressable. */ ++ u32 sect_load_adr; ++ ++ /* Run-time address of the section. ++ * Note 1: for C55x this is a byte address for program sections, and ++ * a word address for data sections. ++ * Note 2: for C55x two most significant bits of this field indicate ++ * the section type: '00' for a code section, '11' for a data section ++ * (C55 addresses are really only 24-bits wide). */ ++ u32 sect_run_adr; ++ ++} ; ++ ++/* the rest of the entries in the list are module records */ ++struct dll_module { ++ ++ /* Address of the next dll_module record in the list, or 0 if this is ++ * the last record in the list. ++ * Note: for C55x this is a word address (C55x data is ++ * word-addressable) */ ++ u32 next_module; ++ ++ /* Combined storage size (in target addressable units) of the ++ * dll_module record which follows this one, or zero if this is the ++ * last record in the list. This size includes the module's string ++ * table. ++ * Note: for C55x the unit is a 16-bit word. */ ++ u16 next_module_size; ++ ++ /* version number of the tooling; set to INIT_VERSION for Phase 1 */ ++ u16 version; ++ ++ /* the verification word; set to VERIFICATION */ ++ u16 verification; ++ ++ /* Number of sections in the sects array */ ++ u16 num_sects; ++ ++ /* Module's "unique" id; copy of the timestamp from the host ++ * COFF file */ ++ u32 timestamp; ++ ++ /* Array of num_sects elements of the module's section records */ ++ struct dll_sect sects[1]; ++} ; ++ ++/* for each 32 bits in above structure, a bitmap, LSB first, whose bits are: ++ * 0 => a 32-bit value, 1 => 2 16-bit values */ ++#define DLL_MODULE_BITMAP 0x6 /* swapping bitmap for type dll_module */ ++ ++#endif /* _MODULE_LIST_H_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/params.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/params.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/params.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/params.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,231 @@ ++/* ++ * params.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++/****************************************************************************** ++ * ++ * This file defines host and target properties for all machines ++ * supported by the dynamic loader. To be tedious... ++ * ++ * host == the machine on which the dynamic loader runs ++ * target == the machine that the dynamic loader is loading ++ * ++ * Host and target may or may not be the same, depending upon the particular ++ * use. ++ *****************************************************************************/ ++ ++/****************************************************************************** ++ * ++ * Host Properties ++ * ++ *****************************************************************************/ ++ ++#define BITS_PER_BYTE 8 /* bits in the standard PC/SUN byte */ ++#define LOG_BITS_PER_BYTE 3 /* log base 2 of same */ ++#define BYTE_MASK ((1U<> 16)) ++#define SWAP16BY8(zz) (((zz) << 8) | ((zz) >> 8)) ++ ++/* !! don't be tempted to insert type definitions here; use !! */ ++ ++/****************************************************************************** ++ * ++ * Target Properties ++ * ++ *****************************************************************************/ ++ ++ ++/*--------------------------------------------------------------------------*/ ++/* TMS320C6x Target Specific Parameters (byte-addressable) */ ++/*--------------------------------------------------------------------------*/ ++#if TMS32060 ++#define MEMORG 0x0L /* Size of configured memory */ ++#define MEMSIZE 0x0L /* (full address space) */ ++ ++#define CINIT_ALIGN 8 /* alignment of cinit record in TDATA AUs */ ++#define CINIT_COUNT 4 /* width of count field in TDATA AUs */ ++#define CINIT_ADDRESS 4 /* width of address field in TDATA AUs */ ++#define CINIT_PAGE_BITS 0 /* Number of LSBs of address that ++ * are page number */ ++ ++#define LENIENT_SIGNED_RELEXPS 0 /* DOES SIGNED ALLOW MAX UNSIGNED */ ++ ++#undef TARGET_ENDIANNESS /* may be big or little endian */ ++ ++/* align a target address to a word boundary */ ++#define TARGET_WORD_ALIGN(zz) (((zz) + 0x3) & -0x4) ++#endif ++ ++ ++/*-------------------------------------------------------------------------- ++ * ++ * DEFAULT SETTINGS and DERIVED PROPERTIES ++ * ++ * This section establishes defaults for values not specified above ++ *--------------------------------------------------------------------------*/ ++#ifndef TARGET_AU_BITS ++#define TARGET_AU_BITS 8 /* width of the target addressable unit */ ++#define LOG_TARGET_AU_BITS 3 /* log2 of same */ ++#endif ++ ++#ifndef CINIT_DEFAULT_PAGE ++#define CINIT_DEFAULT_PAGE 0 /* default .cinit page number */ ++#endif ++ ++#ifndef DATA_RUN2LOAD ++#define DATA_RUN2LOAD(zz) (zz) /* translate data run address to load address */ ++#endif ++ ++#ifndef DBG_LIST_PAGE ++#define DBG_LIST_PAGE 0 /* page number for .dllview section */ ++#endif ++ ++#ifndef TARGET_WORD_ALIGN ++/* align a target address to a word boundary */ ++#define TARGET_WORD_ALIGN(zz) (zz) ++#endif ++ ++#ifndef TDATA_TO_TADDR ++#define TDATA_TO_TADDR(zz) (zz) /* target data address to target AU address */ ++#define TADDR_TO_TDATA(zz) (zz) /* target AU address to target data address */ ++#define TDATA_AU_BITS TARGET_AU_BITS /* bits per data AU */ ++#define LOG_TDATA_AU_BITS LOG_TARGET_AU_BITS ++#endif ++ ++/* ++ * ++ * Useful properties and conversions derived from the above ++ * ++ */ ++ ++/* ++ * Conversions between host and target addresses ++ */ ++#if LOG_BITS_PER_AU == LOG_TARGET_AU_BITS ++/* translate target addressable unit to host address */ ++#define TADDR_TO_HOST(x) (x) ++/* translate host address to target addressable unit */ ++#define HOST_TO_TADDR(x) (x) ++#elif LOG_BITS_PER_AU > LOG_TARGET_AU_BITS ++#define TADDR_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS)) ++#define HOST_TO_TADDR(x) ((x) << (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS)) ++#else ++#define TADDR_TO_HOST(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU)) ++#define HOST_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU)) ++#endif ++ ++#if LOG_BITS_PER_AU == LOG_TDATA_AU_BITS ++/* translate target addressable unit to host address */ ++#define TDATA_TO_HOST(x) (x) ++/* translate host address to target addressable unit */ ++#define HOST_TO_TDATA(x) (x) ++/* translate host address to target addressable unit, round up */ ++#define HOST_TO_TDATA_ROUND(x) (x) ++/* byte offset to host offset, rounded up for TDATA size */ ++#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x) ++#elif LOG_BITS_PER_AU > LOG_TDATA_AU_BITS ++#define TDATA_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) ++#define HOST_TO_TDATA(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) ++#define HOST_TO_TDATA_ROUND(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) ++#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x) ++#else ++#define TDATA_TO_HOST(x) ((x) << (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) ++#define HOST_TO_TDATA(x) ((x) >> (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) ++#define HOST_TO_TDATA_ROUND(x) (((x) +\ ++ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))-1) >>\ ++ (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) ++#define BYTE_TO_HOST_TDATA_ROUND(x) (BYTE_TO_HOST((x) +\ ++ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_BYTE))-1) &\ ++ -(TDATA_AU_BITS/BITS_PER_AU)) ++#endif ++ ++/* ++ * Input in DOFF format is always expresed in bytes, regardless of loading host ++ * so we wind up converting from bytes to target and host units even when the ++ * host is not a byte machine. ++ */ ++#if LOG_BITS_PER_AU == LOG_BITS_PER_BYTE ++#define BYTE_TO_HOST(x) (x) ++#define BYTE_TO_HOST_ROUND(x) (x) ++#define HOST_TO_BYTE(x) (x) ++#elif LOG_BITS_PER_AU >= LOG_BITS_PER_BYTE ++#define BYTE_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) ++#define BYTE_TO_HOST_ROUND(x) ((x + (BITS_PER_AU/BITS_PER_BYTE-1)) >>\ ++ (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) ++#define HOST_TO_BYTE(x) ((x) << (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) ++#else ++/* lets not try to deal with sub-8-bit byte machines */ ++#endif ++ ++#if LOG_TARGET_AU_BITS == LOG_BITS_PER_BYTE ++/* translate target addressable unit to byte address */ ++#define TADDR_TO_BYTE(x) (x) ++/* translate byte address to target addressable unit */ ++#define BYTE_TO_TADDR(x) (x) ++#elif LOG_TARGET_AU_BITS > LOG_BITS_PER_BYTE ++#define TADDR_TO_BYTE(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE)) ++#define BYTE_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE)) ++#else ++/* lets not try to deal with sub-8-bit byte machines */ ++#endif ++ ++#ifdef _BIG_ENDIAN ++#define HOST_ENDIANNESS 1 ++#else ++#define HOST_ENDIANNESS 0 ++#endif ++ ++#ifdef TARGET_ENDIANNESS ++#define TARGET_ENDIANNESS_DIFFERS(rtend) (HOST_ENDIANNESS^TARGET_ENDIANNESS) ++#elif HOST_ENDIANNESS ++#define TARGET_ENDIANNESS_DIFFERS(rtend) (!(rtend)) ++#else ++#define TARGET_ENDIANNESS_DIFFERS(rtend) (rtend) ++#endif ++ ++/* the unit in which we process target image data */ ++#if TARGET_AU_BITS <= 8 ++typedef u8 TgtAU_t; ++#elif TARGET_AU_BITS <= 16 ++typedef u16 TgtAU_t; ++#else ++typedef u32 TgtAU_t; ++#endif ++ ++/* size of that unit */ ++#if TARGET_AU_BITS < BITS_PER_AU ++#define TGTAU_BITS BITS_PER_AU ++#define LOG_TGTAU_BITS LOG_BITS_PER_AU ++#else ++#define TGTAU_BITS TARGET_AU_BITS ++#define LOG_TGTAU_BITS LOG_TARGET_AU_BITS ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/reloc.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/reloc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,425 @@ ++/* ++ * reloc.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#include "header.h" ++ ++#if TMS32060 ++/* the magic symbol for the start of BSS */ ++static const char BSSSYMBOL[] = {".bss"}; ++#endif ++ ++#if TMS32060 ++#include "reloc_table_c6000.c" ++#endif ++ ++#if TMS32060 ++/* From coff.h - ignore these relocation operations */ ++#define R_C60ALIGN 0x76 /* C60: Alignment info for compressor */ ++#define R_C60FPHEAD 0x77 /* C60: Explicit assembly directive */ ++#define R_C60NOCMP 0x100 /* C60: Don't compress this code scn */ ++#endif ++ ++/************************************************************************** ++ * Procedure dload_unpack ++ * ++ * Parameters: ++ * data pointer to storage unit containing lowest host address of ++ * image data ++ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(RVALUE)*BITS_PER_AU ++ * offset Offset from LSB, 0 <= offset < BITS_PER_AU ++ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) ++ * ++ * Effect: ++ * Extracts the specified field and returns it. ++ **************************************************************************/ ++RVALUE dload_unpack(struct dload_state *dlthis, TgtAU_t *data, int fieldsz, ++ int offset, unsigned sgn) ++{ ++ register RVALUE objval; ++ register int shift, direction; ++ register TgtAU_t *dp = data; ++ ++ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value*/ ++ /* * collect up enough bits to contain the desired field */ ++ if (TARGET_BIG_ENDIAN) { ++ dp += (fieldsz + offset) >> LOG_TGTAU_BITS; ++ direction = -1; ++ } else ++ direction = 1; ++ objval = *dp >> offset; ++ shift = TGTAU_BITS - offset; ++ while (shift <= fieldsz) { ++ dp += direction; ++ objval += (RVALUE)*dp << shift; ++ shift += TGTAU_BITS; ++ } ++ ++ /* * sign or zero extend the value appropriately */ ++ if (sgn == ROP_UNS) ++ objval &= (2 << fieldsz) - 1; ++ else { ++ shift = sizeof(RVALUE) * BITS_PER_AU-1 - fieldsz; ++ objval = (objval << shift) >> shift; ++ } ++ ++ return objval; ++ ++} /* dload_unpack */ ++ ++ ++/************************************************************************** ++ * Procedure dload_repack ++ * ++ * Parameters: ++ * val Value to insert ++ * data Pointer to storage unit containing lowest host address of ++ * image data ++ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(RVALUE)*BITS_PER_AU ++ * offset Offset from LSB, 0 <= offset < BITS_PER_AU ++ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) ++ * ++ * Effect: ++ * Stuffs the specified value in the specified field. Returns 0 for ++ * success ++ * or 1 if the value will not fit in the specified field according to the ++ * specified signedness rule. ++ **************************************************************************/ ++static const unsigned char ovf_limit[] = {1, 2, 2}; ++int dload_repack(struct dload_state *dlthis, RVALUE val, TgtAU_t *data, ++ int fieldsz, int offset, unsigned sgn) ++{ ++ register URVALUE objval, mask; ++ register int shift, direction; ++ register TgtAU_t *dp = data; ++ ++ ++ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */ ++ /* clip the bits */ ++ mask = ((UINT32_C(2) << fieldsz) - 1); ++ objval = (val & mask); ++ /* * store the bits through the specified mask */ ++ if (TARGET_BIG_ENDIAN) { ++ dp += (fieldsz + offset) >> LOG_TGTAU_BITS; ++ direction = -1; ++ } else ++ direction = 1; ++ ++ /* insert LSBs */ ++ *dp = (*dp & ~(mask << offset)) + (objval << offset); ++ shift = TGTAU_BITS-offset; ++ /* align mask and objval with AU boundary */ ++ objval >>= shift; ++ mask >>= shift; ++ ++ while (mask) { ++ dp += direction; ++ *dp = (*dp & ~mask) + objval; ++ objval >>= TGTAU_BITS; ++ mask >>= TGTAU_BITS; ++ } ++ ++ /* ++ * check for overflow ++ */ ++ if (sgn) { ++ unsigned tmp = (val >> fieldsz) + (sgn & 0x1); ++ if (tmp > ovf_limit[sgn-1]) ++ return 1; ++ } ++ return 0; ++ ++} /* dload_repack */ ++ ++/* lookup table for the scaling amount in a C6x instruction */ ++#if TMS32060 ++#define SCALE_BITS 4 /* there are 4 bits in the scale field */ ++#define SCALE_MASK 0x7 /* we really only use the bottom 3 bits */ ++static const u8 C60_Scale[SCALE_MASK+1] = { ++ 1, 0, 0, 0, 1, 1, 2, 2 ++}; ++#endif ++ ++/************************************************************************** ++ * Procedure dload_relocate ++ * ++ * Parameters: ++ * data Pointer to base of image data ++ * rp Pointer to relocation operation ++ * ++ * Effect: ++ * Performs the specified relocation operation ++ **************************************************************************/ ++void dload_relocate(struct dload_state *dlthis, TgtAU_t *data, ++ struct reloc_record_t *rp) ++{ ++ RVALUE val = 0; ++ RVALUE reloc_amt = 0; ++ unsigned int fieldsz = 0; ++ unsigned int offset = 0; ++ unsigned int reloc_info = 0; ++ unsigned int reloc_action = 0; ++ register int rx = 0; ++ RVALUE *stackp = NULL; ++ int top; ++ struct Local_Symbol *svp = NULL; ++#ifdef RFV_SCALE ++ unsigned int scale = 0; ++#endif ++ ++ rx = HASH_FUNC(rp->r_type); ++ while (rop_map1[rx] != rp->r_type) { ++ rx = HASH_L(rop_map2[rx]); ++ if (rx < 0) { ++#if TMS32060 ++ switch (rp->r_type) { ++ case R_C60ALIGN: ++ case R_C60NOCMP: ++ case R_C60FPHEAD: ++ /* Ignore these reloc types and return */ ++ break; ++ default: ++ /* Unknown reloc type, print error and return */ ++ dload_error(dlthis, "Bad coff operator 0x%x", rp->r_type); ++ } ++#else ++ dload_error(dlthis, "Bad coff operator 0x%x", rp->r_type); ++#endif ++ return; ++ } ++ } ++ rx = HASH_I(rop_map2[rx]); ++ if ((rx < (sizeof(rop_action)/sizeof(uint_least16_t))) ++ && (rx < (sizeof(rop_info)/sizeof(uint_least16_t))) && (rx > 0)) { ++ reloc_action = rop_action[rx]; reloc_info = rop_info[rx]; ++ } else { ++ dload_error(dlthis, "Buffer Overflow - Array Index Out of Bounds"); ++ } ++ ++ /* Compute the relocation amount for the referenced symbol, if any */ ++ reloc_amt = rp->r_uval; ++ if (RFV_SYM(reloc_info)) { /* relocation uses a symbol reference */ ++ if ((u32)rp->r_symndx < dlthis->dfile_hdr.df_no_syms) { ++ /* real symbol reference */ ++ svp = &dlthis->local_symtab[rp->r_symndx]; ++ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? ++ svp->delta : svp->value; ++ } ++ /* reloc references current section */ ++ else if (rp->r_symndx == -1) ++ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? ++ dlthis->delta_runaddr : dlthis->image_secn->run_addr; ++ } /* relocation uses a symbol reference */ ++ /* Handle stack adjustment */ ++ val = 0; ++ top = RFV_STK(reloc_info); ++ if (top) { ++ top += dlthis->relstkidx - RSTK_UOP; ++ if (top >= STATIC_EXPR_STK_SIZE) { ++ dload_error(dlthis, ++ "Expression stack overflow in %s at offset " ++ FMT_UI32, dlthis->image_secn->name, ++ rp->r_vaddr + dlthis->image_offset); ++ return; ++ } ++ val = dlthis->relstk[dlthis->relstkidx]; ++ dlthis->relstkidx = top; ++ stackp = &dlthis->relstk[top]; ++ } ++ /* Derive field position and size, if we need them */ ++ if (reloc_info & ROP_RW) { /* read or write action in our future */ ++ fieldsz = RFV_WIDTH(reloc_action); ++ if (fieldsz) { /* field info from table */ ++ offset = RFV_POSN(reloc_action); ++ if (TARGET_BIG_ENDIAN) ++ /* make sure r_vaddr is the lowest target ++ * address containing bits */ ++ rp->r_vaddr += RFV_BIGOFF(reloc_info); ++ } else { /* field info from relocation op */ ++ fieldsz = rp->r_fieldsz; offset = rp->r_offset; ++ if (TARGET_BIG_ENDIAN) ++ /* make sure r_vaddr is the lowest target ++ address containing bits */ ++ rp->r_vaddr += (rp->r_wordsz - offset - fieldsz) ++ >> LOG_TARGET_AU_BITS; ++ } ++ data = (TgtAU_t *)((char *)data + TADDR_TO_HOST(rp->r_vaddr)); ++ /* compute lowest host location of referenced data */ ++#if BITS_PER_AU > TARGET_AU_BITS ++ /* conversion from target address to host address may lose ++ address bits; add loss to offset */ ++ if (TARGET_BIG_ENDIAN) { ++ offset += -((rp->r_vaddr << LOG_TARGET_AU_BITS) + ++ offset + fieldsz) & ++ (BITS_PER_AU-TARGET_AU_BITS); ++ } else { ++ offset += (rp->r_vaddr << LOG_TARGET_AU_BITS) & ++ (BITS_PER_AU-1); ++ } ++#endif ++#ifdef RFV_SCALE ++ scale = RFV_SCALE(reloc_info); ++#endif ++ } ++ /* read the object value from the current image, if so ordered */ ++ if (reloc_info & ROP_R) { /* relocation reads current image value */ ++ val = dload_unpack(dlthis, data, fieldsz, offset, ++ RFV_SIGN(reloc_info)); ++#ifdef RFV_SCALE ++ val <<= scale; ++#endif ++ } ++ /* perform the necessary arithmetic */ ++ switch (RFV_ACTION(reloc_action)) { /* relocation actions */ ++ case RACT_VAL: ++ break; ++ case RACT_ASGN: ++ val = reloc_amt; ++ break; ++ case RACT_ADD: ++ val += reloc_amt; ++ break; ++ case RACT_PCR: ++ /*----------------------------------------------------------- ++ * Handle special cases of jumping from absolute sections ++ * (special reloc type) or to absolute destination ++ * (symndx == -1). In either case, set the appropriate ++ * relocation amount to 0. ++ *-----------------------------------------------------------*/ ++ if (rp->r_symndx == -1) ++ reloc_amt = 0; ++ val += reloc_amt - dlthis->delta_runaddr; ++ break; ++ case RACT_ADDISP: ++ val += rp->r_disp + reloc_amt; ++ break; ++ case RACT_ASGPC: ++ val = dlthis->image_secn->run_addr + reloc_amt; ++ break; ++ case RACT_PLUS: ++ if (stackp != NULL) ++ val += *stackp; ++ break; ++ case RACT_SUB: ++ if (stackp != NULL) ++ val = *stackp - val; ++ break; ++ case RACT_NEG: ++ val = -val; ++ break; ++ case RACT_MPY: ++ if (stackp != NULL) ++ val *= *stackp; ++ break; ++ case RACT_DIV: ++ if (stackp != NULL) ++ val = *stackp / val; ++ break; ++ case RACT_MOD: ++ if (stackp != NULL) ++ val = *stackp % val; ++ break; ++ case RACT_SR: ++ if (val >= sizeof(RVALUE) * BITS_PER_AU) ++ val = 0; ++ else if (stackp != NULL) ++ val = (URVALUE)*stackp >> val; ++ break; ++ case RACT_ASR: ++ if (val >= sizeof(RVALUE)*BITS_PER_AU) ++ val = sizeof(RVALUE)*BITS_PER_AU - 1; ++ else if (stackp != NULL) ++ val = *stackp >> val; ++ break; ++ case RACT_SL: ++ if (val >= sizeof(RVALUE)*BITS_PER_AU) ++ val = 0; ++ else if (stackp != NULL) ++ val = *stackp << val; ++ break; ++ case RACT_AND: ++ if (stackp != NULL) ++ val &= *stackp; ++ break; ++ case RACT_OR: ++ if (stackp != NULL) ++ val |= *stackp; ++ break; ++ case RACT_XOR: ++ if (stackp != NULL) ++ val ^= *stackp; ++ break; ++ case RACT_NOT: ++ val = ~val; ++ break; ++#if TMS32060 ++ case RACT_C6SECT: ++ /* actually needed address of secn containing symbol */ ++ if (svp != NULL) { ++ if (rp->r_symndx >= 0) ++ if (svp->secnn > 0) ++ reloc_amt = dlthis->ldr_sections ++ [svp->secnn-1].run_addr; ++ } ++ /* !!! FALL THRU !!! */ ++ case RACT_C6BASE: ++ if (dlthis->bss_run_base == 0) { ++ struct dynload_symbol *symp; ++ symp = dlthis->mysym->Find_Matching_Symbol ++ (dlthis->mysym, BSSSYMBOL); ++ /* lookup value of global BSS base */ ++ if (symp) ++ dlthis->bss_run_base = symp->value; ++ else ++ dload_error(dlthis, ++ "Global BSS base referenced in %s offset"\ ++ FMT_UI32 " but not defined", ++ dlthis->image_secn->name, ++ rp->r_vaddr + dlthis->image_offset); ++ } ++ reloc_amt -= dlthis->bss_run_base; ++ /* !!! FALL THRU !!! */ ++ case RACT_C6DSPL: ++ /* scale factor determined by 3 LSBs of field */ ++ scale = C60_Scale[val & SCALE_MASK]; ++ offset += SCALE_BITS; ++ fieldsz -= SCALE_BITS; ++ val >>= SCALE_BITS; /* ignore the scale field hereafter */ ++ val <<= scale; ++ val += reloc_amt; /* do the usual relocation */ ++ if (((1 << scale)-1) & val) ++ dload_error(dlthis, ++ "Unaligned reference in %s offset " FMT_UI32, ++ dlthis->image_secn->name, ++ rp->r_vaddr + dlthis->image_offset); ++ break; ++#endif ++ } /* relocation actions */ ++ /* * Put back result as required */ ++ if (reloc_info & ROP_W) { /* relocation writes image value */ ++#ifdef RFV_SCALE ++ val >>= scale; ++#endif ++ if (dload_repack(dlthis, val, data, fieldsz, offset, ++ RFV_SIGN(reloc_info))) { ++ dload_error(dlthis, "Relocation value " FMT_UI32 ++ " overflows %d bits in %s offset " FMT_UI32, val, ++ fieldsz, dlthis->image_secn->name, ++ dlthis->image_offset + rp->r_vaddr); ++ } ++ } else if (top) ++ *stackp = val; ++} /* reloc_value */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table_c6000.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/reloc_table_c6000.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table_c6000.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/reloc_table_c6000.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,258 @@ ++/* ++ * reloc_table_c6000.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* Tables generated for c6000 */ ++ ++#define HASH_FUNC(zz) (((((zz) + 1) * UINT32_C(1845)) >> 11) & 63) ++#define HASH_L(zz) ((zz) >> 8) ++#define HASH_I(zz) ((zz) & 0xFF) ++ ++static const u16 rop_map1[] = { ++ 0, ++ 1, ++ 2, ++ 20, ++ 4, ++ 5, ++ 6, ++ 15, ++ 80, ++ 81, ++ 82, ++ 83, ++ 84, ++ 85, ++ 86, ++ 87, ++ 17, ++ 18, ++ 19, ++ 21, ++ 16, ++ 16394, ++ 16404, ++ 65535, ++ 65535, ++ 65535, ++ 65535, ++ 65535, ++ 65535, ++ 32, ++ 65535, ++ 65535, ++ 65535, ++ 65535, ++ 65535, ++ 65535, ++ 40, ++ 112, ++ 113, ++ 65535, ++ 16384, ++ 16385, ++ 16386, ++ 16387, ++ 16388, ++ 16389, ++ 16390, ++ 16391, ++ 16392, ++ 16393, ++ 16395, ++ 16396, ++ 16397, ++ 16398, ++ 16399, ++ 16400, ++ 16401, ++ 16402, ++ 16403, ++ 16405, ++ 16406, ++ 65535, ++ 65535, ++ 65535 ++}; ++ ++static const s16 rop_map2[] = { ++ -256, ++ -255, ++ -254, ++ -245, ++ -253, ++ -252, ++ -251, ++ -250, ++ -241, ++ -240, ++ -239, ++ -238, ++ -237, ++ -236, ++ 1813, ++ 5142, ++ -248, ++ -247, ++ 778, ++ -244, ++ -249, ++ -221, ++ -211, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -243, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -242, ++ -233, ++ -232, ++ -1, ++ -231, ++ -230, ++ -229, ++ -228, ++ -227, ++ -226, ++ -225, ++ -224, ++ -223, ++ 5410, ++ -220, ++ -219, ++ -218, ++ -217, ++ -216, ++ -215, ++ -214, ++ -213, ++ 5676, ++ -210, ++ -209, ++ -1, ++ -1, ++ -1 ++}; ++ ++static const u16 rop_action[] = { ++ 2560, ++ 2304, ++ 2304, ++ 2432, ++ 2432, ++ 2560, ++ 2176, ++ 2304, ++ 2560, ++ 3200, ++ 3328, ++ 3584, ++ 3456, ++ 2304, ++ 4208, ++ 20788, ++ 21812, ++ 3415, ++ 3245, ++ 2311, ++ 4359, ++ 19764, ++ 2311, ++ 3191, ++ 3280, ++ 6656, ++ 7680, ++ 8704, ++ 9728, ++ 10752, ++ 11776, ++ 12800, ++ 13824, ++ 14848, ++ 15872, ++ 16896, ++ 17920, ++ 18944, ++ 0, ++ 0, ++ 0, ++ 0, ++ 1536, ++ 1536, ++ 1536, ++ 5632, ++ 512, ++ 0 ++}; ++ ++static const u16 rop_info[] = { ++ 0, ++ 35, ++ 35, ++ 35, ++ 35, ++ 35, ++ 35, ++ 35, ++ 35, ++ 39, ++ 39, ++ 39, ++ 39, ++ 35, ++ 34, ++ 283, ++ 299, ++ 4135, ++ 4391, ++ 291, ++ 33059, ++ 283, ++ 295, ++ 4647, ++ 4135, ++ 64, ++ 64, ++ 128, ++ 64, ++ 64, ++ 64, ++ 64, ++ 64, ++ 64, ++ 64, ++ 64, ++ 64, ++ 128, ++ 201, ++ 197, ++ 74, ++ 70, ++ 208, ++ 196, ++ 200, ++ 192, ++ 192, ++ 66 ++}; +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/reloc_table.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/dynload/reloc_table.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,102 @@ ++/* ++ * reloc_table.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++#ifndef __RELOC_TABLE_H__ ++#define __RELOC_TABLE_H__ ++/* ++ * Table of relocation operator properties ++ */ ++#include ++ ++/* How does this relocation operation access the program image? */ ++#define ROP_N 0 /* does not access image */ ++#define ROP_R 1 /* read from image */ ++#define ROP_W 2 /* write to image */ ++#define ROP_RW 3 /* read from and write to image */ ++ ++/* For program image access, what are the overflow rules for the bit field? */ ++/* Beware! Procedure repack depends on this encoding */ ++#define ROP_ANY 0 /* no overflow ever, just truncate the value */ ++#define ROP_SGN 1 /* signed field */ ++#define ROP_UNS 2 /* unsigned field */ ++#define ROP_MAX 3 /* allow maximum range of either signed or unsigned */ ++ ++/* How does the relocation operation use the symbol reference */ ++#define ROP_IGN 0 /* no symbol is referenced */ ++#define ROP_LIT 0 /* use rp->r_uval literal field */ ++#define ROP_SYM 1 /* symbol value is used in relocation */ ++#define ROP_SYMD 2 /* delta value vs last link is used */ ++ ++/* How does the reloc op use the stack? */ ++#define RSTK_N 0 /* Does not use */ ++#define RSTK_POP 1 /* Does a POP */ ++#define RSTK_UOP 2 /* Unary op, stack position unaffected */ ++#define RSTK_PSH 3 /* Does a push */ ++ ++/* ++ * Computational actions performed by the dynamic loader ++ */ ++enum Dload_Actions { ++ RACT_VAL, /* don't alter the current val (from stack or mem fetch) */ ++ RACT_ASGN, /* set value to reference amount (from symbol reference) */ ++ RACT_ADD, /* add reference to value */ ++ RACT_PCR, /* add reference minus PC delta to value */ ++ RACT_ADDISP, /* add reference plus r_disp */ ++ RACT_ASGPC, /* set value to section address plus reference */ ++ ++ RACT_PLUS, /* stack + */ ++ RACT_SUB, /* stack - */ ++ RACT_NEG, /* stack unary - */ ++ ++ RACT_MPY, /* stack * */ ++ RACT_DIV, /* stack / */ ++ RACT_MOD, /* stack % */ ++ ++ RACT_SR, /* stack unsigned >> */ ++ RACT_ASR, /* stack signed >> */ ++ RACT_SL, /* stack << */ ++ RACT_AND, /* stack & */ ++ RACT_OR, /* stack | */ ++ RACT_XOR, /* stack ^ */ ++ RACT_NOT, /* stack ~ */ ++ RACT_C6SECT, /* for C60 R_SECT op */ ++ RACT_C6BASE, /* for C60 R_BASE op */ ++ RACT_C6DSPL, /* for C60 scaled 15-bit displacement */ ++ RACT_PCR23T /* for ARM Thumb long branch */ ++}; ++ ++/* ++ * macros used to extract values ++ */ ++#define RFV_POSN(aaa) ((aaa) & 0xF) ++#define RFV_WIDTH(aaa) (((aaa) >> 4) & 0x3F) ++#define RFV_ACTION(aaa) ((aaa) >> 10) ++ ++#define RFV_SIGN(iii) (((iii) >> 2) & 0x3) ++#define RFV_SYM(iii) (((iii) >> 4) & 0x3) ++#define RFV_STK(iii) (((iii) >> 6) & 0x3) ++#define RFV_ACCS(iii) ((iii) & 0x3) ++ ++#if (TMS32060) ++#define RFV_SCALE(iii) ((iii) >> 11) ++#define RFV_BIGOFF(iii) (((iii) >> 8) & 0x7) ++#else ++#define RFV_BIGOFF(iii) ((iii) >> 8) ++#endif ++ ++#endif /* __RELOC_TABLE_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gb.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gb.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gb.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,182 @@ ++/* ++ * gb.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== gb.c ======== ++ * Description: Generic bitmap operations. ++ * ++ *! Revision History ++ *! ================ ++ *! 24-Feb-2003 vp Code review updates. ++ *! 17-Dec-2002 map Fixed GB_minset(), GB_empty(), and GB_full(), ++ *! to ensure only 'len' bits are considered in the map ++ *! 18-Oct-2002 sb Ported to Linux platform. ++ *! 06-Dec-2001 jeh Fixed bug in GB_minclear(). ++ *! ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++/* ----------------------------------- This */ ++#include ++#include ++ ++typedef GB_BitNum GB_WordNum; ++ ++struct GB_TMap { ++ GB_BitNum len; ++ GB_WordNum wcnt; ++ u32 *words; ++}; ++ ++/* ++ * ======== GB_clear ======== ++ * purpose: ++ * Clears a bit in the bit map. ++ */ ++ ++void GB_clear(struct GB_TMap *map, GB_BitNum bitn) ++{ ++ u32 mask; ++ ++ mask = 1L << (bitn % BITS_PER_LONG); ++ map->words[bitn / BITS_PER_LONG] &= ~mask; ++} ++ ++/* ++ * ======== GB_create ======== ++ * purpose: ++ * Creates a bit map. ++ */ ++ ++struct GB_TMap *GB_create(GB_BitNum len) ++{ ++ struct GB_TMap *map; ++ GB_WordNum i; ++ map = (struct GB_TMap *)GS_alloc(sizeof(struct GB_TMap)); ++ if (map != NULL) { ++ map->len = len; ++ map->wcnt = len / BITS_PER_LONG + 1; ++ map->words = (u32 *)GS_alloc(map->wcnt * sizeof(u32)); ++ if (map->words != NULL) { ++ for (i = 0; i < map->wcnt; i++) ++ map->words[i] = 0L; ++ ++ } else { ++ GS_frees(map, sizeof(struct GB_TMap)); ++ map = NULL; ++ } ++ } ++ ++ return map; ++} ++ ++/* ++ * ======== GB_delete ======== ++ * purpose: ++ * Frees a bit map. ++ */ ++ ++void GB_delete(struct GB_TMap *map) ++{ ++ GS_frees(map->words, map->wcnt * sizeof(u32)); ++ GS_frees(map, sizeof(struct GB_TMap)); ++} ++ ++/* ++ * ======== GB_findandset ======== ++ * purpose: ++ * Finds a free bit and sets it. ++ */ ++GB_BitNum GB_findandset(struct GB_TMap *map) ++{ ++ GB_BitNum bitn; ++ ++ bitn = GB_minclear(map); ++ ++ if (bitn != GB_NOBITS) ++ GB_set(map, bitn); ++ ++ return bitn; ++} ++ ++/* ++ * ======== GB_minclear ======== ++ * purpose: ++ * returns the location of the first unset bit in the bit map. ++ */ ++GB_BitNum GB_minclear(struct GB_TMap *map) ++{ ++ GB_BitNum bit_location = 0; ++ GB_BitNum bitAcc = 0; ++ GB_WordNum i; ++ GB_BitNum bit; ++ u32 *word; ++ ++ for (word = map->words, i = 0; i < map->wcnt; word++, i++) { ++ if (~*word) { ++ for (bit = 0; bit < BITS_PER_LONG; bit++, bitAcc++) { ++ if (bitAcc == map->len) ++ return GB_NOBITS; ++ ++ if (~*word & (1L << bit)) { ++ bit_location = i * BITS_PER_LONG + bit; ++ return bit_location; ++ } ++ ++ } ++ } else { ++ bitAcc += BITS_PER_LONG; ++ } ++ } ++ ++ return GB_NOBITS; ++} ++ ++/* ++ * ======== GB_set ======== ++ * purpose: ++ * Sets a bit in the bit map. ++ */ ++ ++void GB_set(struct GB_TMap *map, GB_BitNum bitn) ++{ ++ u32 mask; ++ ++ mask = 1L << (bitn % BITS_PER_LONG); ++ map->words[bitn / BITS_PER_LONG] |= mask; ++} ++ ++/* ++ * ======== GB_test ======== ++ * purpose: ++ * Returns true if the bit is set in the specified location. ++ */ ++ ++bool GB_test(struct GB_TMap *map, GB_BitNum bitn) ++{ ++ bool state; ++ u32 mask; ++ u32 word; ++ ++ mask = 1L << (bitn % BITS_PER_LONG); ++ word = map->words[bitn / BITS_PER_LONG]; ++ state = word & mask ? TRUE : FALSE; ++ ++ return state; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gh.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gh.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gh.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gh.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,191 @@ ++/* ++ * gh.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== gh.c ======== ++ */ ++ ++#include ++ ++#include ++ ++#include ++ ++#include ++ ++struct Elem { ++ struct Elem *next; ++ u8 data[1]; ++}; ++ ++struct GH_THashTab { ++ u16 maxBucket; ++ u16 valSize; ++ struct Elem **buckets; ++ u16(*hash) (void *, u16); ++ bool(*match) (void *, void *); ++ void(*delete) (void *); ++}; ++ ++static void Nop(void *p); ++static s32 curInit; ++static void myfree(void *ptr, s32 size); ++ ++/* ++ * ======== GH_create ======== ++ */ ++ ++struct GH_THashTab *GH_create(u16 maxBucket, u16 valSize, ++ u16(*hash)(void *, u16), bool(*match)(void *, void *), ++ void(*delete)(void *)) ++{ ++ struct GH_THashTab *hashTab; ++ u16 i; ++ hashTab = (struct GH_THashTab *)GS_alloc(sizeof(struct GH_THashTab)); ++ if (hashTab == NULL) ++ return NULL; ++ hashTab->maxBucket = maxBucket; ++ hashTab->valSize = valSize; ++ hashTab->hash = hash; ++ hashTab->match = match; ++ hashTab->delete = delete == NULL ? Nop : delete; ++ ++ hashTab->buckets = (struct Elem **) ++ GS_alloc(sizeof(struct Elem *) * maxBucket); ++ if (hashTab->buckets == NULL) { ++ GH_delete(hashTab); ++ return NULL; ++ } ++ ++ for (i = 0; i < maxBucket; i++) ++ hashTab->buckets[i] = NULL; ++ ++ return hashTab; ++} ++ ++/* ++ * ======== GH_delete ======== ++ */ ++void GH_delete(struct GH_THashTab *hashTab) ++{ ++ struct Elem *elem, *next; ++ u16 i; ++ ++ if (hashTab != NULL) { ++ if (hashTab->buckets != NULL) { ++ for (i = 0; i < hashTab->maxBucket; i++) { ++ for (elem = hashTab->buckets[i]; elem != NULL; ++ elem = next) { ++ next = elem->next; ++ (*hashTab->delete) (elem->data); ++ myfree(elem, sizeof(struct Elem) - 1 + ++ hashTab->valSize); ++ } ++ } ++ ++ myfree(hashTab->buckets, sizeof(struct Elem *) ++ * hashTab->maxBucket); ++ } ++ ++ myfree(hashTab, sizeof(struct GH_THashTab)); ++ } ++} ++ ++/* ++ * ======== GH_exit ======== ++ */ ++ ++void GH_exit(void) ++{ ++ if (curInit-- == 1) ++ GS_exit(); ++ ++} ++ ++/* ++ * ======== GH_find ======== ++ */ ++ ++void *GH_find(struct GH_THashTab *hashTab, void *key) ++{ ++ struct Elem *elem; ++ ++ elem = hashTab->buckets[(*hashTab->hash)(key, hashTab->maxBucket)]; ++ ++ for (; elem; elem = elem->next) { ++ if ((*hashTab->match)(key, elem->data)) ++ return elem->data; ++ } ++ ++ return NULL; ++} ++ ++/* ++ * ======== GH_init ======== ++ */ ++ ++void GH_init(void) ++{ ++ if (curInit++ == 0) ++ GS_init(); ++} ++ ++/* ++ * ======== GH_insert ======== ++ */ ++ ++void *GH_insert(struct GH_THashTab *hashTab, void *key, void *value) ++{ ++ struct Elem *elem; ++ u16 i; ++ char *src, *dst; ++ ++ elem = (struct Elem *)GS_alloc(sizeof(struct Elem) - 1 + ++ hashTab->valSize); ++ if (elem != NULL) { ++ ++ dst = (char *)elem->data; ++ src = (char *)value; ++ for (i = 0; i < hashTab->valSize; i++) ++ *dst++ = *src++; ++ ++ i = (*hashTab->hash)(key, hashTab->maxBucket); ++ elem->next = hashTab->buckets[i]; ++ hashTab->buckets[i] = elem; ++ ++ return elem->data; ++ } ++ ++ return NULL; ++} ++ ++/* ++ * ======== Nop ======== ++ */ ++/* ARGSUSED */ ++static void Nop(void *p) ++{ ++ p = p; /* stifle compiler warning */ ++} ++ ++/* ++ * ======== myfree ======== ++ */ ++static void myfree(void *ptr, s32 size) ++{ ++ GS_free(ptr); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gs.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gs.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gs.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gs.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,106 @@ ++/* ++ * gs.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== gs.c ======== ++ * Description: ++ * General storage memory allocator services. ++ * ++ *! Revision History ++ *! ================ ++ *! 29-Sep-1999 ag: Un-commented MEM_Init in GS_init(). ++ *! 14-May-1997 mg: Modified to use new GS API for GS_free() and GS_frees(). ++ *! 06-Nov-1996 gp: Re-commented MEM_Init in GS_init(). GS needs GS_Exit(). ++ *! 21-Oct-1996 db: Un-commented MEM_Init in GS_init(). ++ *! 21-May-1996 mg: Created from original stdlib implementation. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Globals */ ++static u32 cumsize; ++ ++/* ++ * ======== GS_alloc ======== ++ * purpose: ++ * Allocates memory of the specified size. ++ */ ++void *GS_alloc(u32 size) ++{ ++ void *p; ++ ++ p = MEM_Calloc(size, MEM_PAGED); ++ if (p == NULL) ++ return NULL; ++ cumsize += size; ++ return p; ++} ++ ++/* ++ * ======== GS_exit ======== ++ * purpose: ++ * Discontinue the usage of the GS module. ++ */ ++void GS_exit(void) ++{ ++ MEM_Exit(); ++} ++ ++/* ++ * ======== GS_free ======== ++ * purpose: ++ * Frees the memory. ++ */ ++void GS_free(void *ptr) ++{ ++ MEM_Free(ptr); ++ /* ack! no size info */ ++ /* cumsize -= size; */ ++} ++ ++/* ++ * ======== GS_frees ======== ++ * purpose: ++ * Frees the memory. ++ */ ++void GS_frees(void *ptr, u32 size) ++{ ++ MEM_Free(ptr); ++ cumsize -= size; ++} ++ ++/* ++ * ======== GS_init ======== ++ * purpose: ++ * Initializes the GS module. ++ */ ++void GS_init(void) ++{ ++ static bool curInit; ++ ++ if (curInit == false) { ++ curInit = MEM_Init(); /* which can't fail currently. */ ++ } ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gt.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gt.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gt.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/gt.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,348 @@ ++/* ++ * gt.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== gt.c ======== ++ * Description: This module implements the trace mechanism for bridge. ++ * ++ *! Revision History ++ *! ================ ++ *! 16-May-1997 dr Changed GT_Config member names to conform to coding ++ *! standards. ++ *! 23-Apr-1997 ge Check for GT->TIDFXN for NULL before calling it. ++ *! 03-Jan-1997 ge Changed GT_Config structure member names to eliminate ++ *! preprocessor confusion with other macros. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++#define GT_WILD '*' ++ ++#define GT_CLEAR '=' ++#define GT_ON '+' ++#define GT_OFF '-' ++ ++enum GT_State { ++ GT_SEP, ++ GT_FIRST, ++ GT_SECOND, ++ GT_OP, ++ GT_DIGITS ++} ; ++ ++#ifdef CONFIG_BRIDGE_DEBUG ++static char *GT_1format = "%s - %d: "; ++static char *GT_2format = "%s - %d(%d): "; ++#endif /* CONFIG_BRIDGE_DEBUG */ ++ ++static unsigned char *GT_tMask[GT_BOUND]; ++ ++static bool curInit; ++static char *separator; ++static unsigned char tabMem[GT_BOUND][sizeof(unsigned char) * GT_BOUND]; ++ ++static void error(char *string); ++static void setMask(s16 index1, s16 index2, char op, unsigned char mask); ++ ++/* ++ * ======== _GT_create ======== ++ * purpose: ++ * Creates GT mask. ++ */ ++void _GT_create(struct GT_Mask *mask, char *modName) ++{ ++ mask->modName = modName; ++ mask->flags = &(GT_tMask[modName[0] - 'A'][modName[1] - 'A']); ++} ++ ++/* ++ * ======== GT_init ======== ++ * purpose: ++ * Initializes GT module. ++ */ ++#ifdef GT_init ++#undef GT_init ++#endif ++void GT_init(void) ++{ ++ register unsigned char index1; ++ register unsigned char index2; ++ ++ if (!curInit) { ++ curInit = true; ++ ++ separator = " ,;/"; ++ ++ for (index1 = 0; index1 < GT_BOUND; index1++) { ++ GT_tMask[index1] = tabMem[index1]; ++ for (index2 = 0; index2 < GT_BOUND; index2++) { ++ /* no tracing */ ++ GT_tMask[index1][index2] = 0x00; ++ } ++ } ++ } ++} ++ ++/* ++ * ======== _GT_set ======== ++ * purpose: ++ * Sets the trace string format. ++ */ ++ ++void _GT_set(char *str) ++{ ++ enum GT_State state; ++ char *sep; ++ s16 index1 = GT_BOUND; /* indicates all values */ ++ s16 index2 = GT_BOUND; /* indicates all values */ ++ char op = GT_CLEAR; ++ bool maskValid; ++ s16 digit; ++ register unsigned char mask = 0x0; /* no tracing */ ++ ++ if (str == NULL) ++ return; ++ ++ maskValid = false; ++ state = GT_SEP; ++ while (*str != '\0') { ++ switch ((s32) state) { ++ case (s32) GT_SEP: ++ maskValid = false; ++ sep = separator; ++ while (*sep != '\0') { ++ if (*str == *sep) { ++ str++; ++ break; ++ } else { ++ sep++; ++ } ++ } ++ if (*sep == '\0') ++ state = GT_FIRST; ++ ++ break; ++ case (s32) GT_FIRST: ++ if (*str == GT_WILD) { ++ /* indicates all values */ ++ index1 = GT_BOUND; ++ /* indicates all values */ ++ index2 = GT_BOUND; ++ state = GT_OP; ++ } else { ++ if (*str >= 'a') ++ index1 = (s16) (*str - 'a'); ++ else ++ index1 = (s16) (*str - 'A'); ++ if ((index1 >= 0) && (index1 < GT_BOUND)) ++ state = GT_SECOND; ++ else ++ state = GT_SEP; ++ } ++ str++; ++ break; ++ case (s32) GT_SECOND: ++ if (*str == GT_WILD) { ++ index2 = GT_BOUND; /* indicates all values */ ++ state = GT_OP; ++ str++; ++ } else { ++ if (*str >= 'a') ++ index2 = (s16) (*str - 'a'); ++ else ++ index2 = (s16) (*str - 'A'); ++ if ((index2 >= 0) && (index2 < GT_BOUND)) { ++ state = GT_OP; ++ str++; ++ } else { ++ state = GT_SEP; ++ } ++ } ++ break; ++ case (s32) GT_OP: ++ op = *str; ++ mask = 0x0; /* no tracing */ ++ switch (op) { ++ case (s32) GT_CLEAR: ++ maskValid = true; ++ case (s32) GT_ON: ++ case (s32) GT_OFF: ++ state = GT_DIGITS; ++ str++; ++ break; ++ default: ++ state = GT_SEP; ++ break; ++ } ++ break; ++ case (s32) GT_DIGITS: ++ digit = (s16) (*str - '0'); ++ if ((digit >= 0) && (digit <= 7)) { ++ mask |= (0x01 << digit); ++ maskValid = true; ++ str++; ++ } else { ++ if (maskValid == true) { ++ setMask(index1, index2, op, mask); ++ maskValid = false; ++ } ++ state = GT_SEP; ++ } ++ break; ++ default: ++ error("illegal trace mask"); ++ break; ++ } ++ } ++ ++ if (maskValid) ++ setMask(index1, index2, op, mask); ++} ++ ++/* ++ * ======== _GT_trace ======== ++ * purpose: ++ * Prints the input string onto standard output ++ */ ++ ++s32 _GT_trace(struct GT_Mask *mask, char *format, ...) ++{ ++ s32 arg1, arg2, arg3, arg4, arg5, arg6; ++ va_list va; ++ ++ va_start(va, format); ++ ++ arg1 = va_arg(va, s32); ++ arg2 = va_arg(va, s32); ++ arg3 = va_arg(va, s32); ++ arg4 = va_arg(va, s32); ++ arg5 = va_arg(va, s32); ++ arg6 = va_arg(va, s32); ++ ++ va_end(va); ++#ifdef DEBUG ++ if (GT->PIDFXN == NULL) { ++ printk(GT_1format, mask->modName, GT->TIDFXN ? ++ (*GT->TIDFXN)() : 0); ++ } else { ++ printk(GT_2format, mask->modName, (*GT->PIDFXN)(), ++ GT->TIDFXN ? (*GT->TIDFXN)() : 0); ++ } ++#endif ++ printk(format, arg1, arg2, arg3, arg4, arg5, arg6); ++ ++ return 0; ++} ++ ++/* ++ * ======== error ======== ++ * purpose: ++ * Prints errors onto the standard output. ++ */ ++static void error(char *string) ++{ ++ printk("GT: %s", string); ++} ++ ++/* ++ * ======== setmask ======== ++ * purpose: ++ * Sets mask for the GT module. ++ */ ++ ++static void setMask(s16 index1, s16 index2, char op, u8 mask) ++{ ++ register s16 index; ++ ++ if (index1 < GT_BOUND) { ++ if (index2 < GT_BOUND) { ++ switch (op) { ++ case (s32) GT_CLEAR: ++ GT_tMask[index1][index2] = mask; ++ break; ++ case (s32) GT_ON: ++ GT_tMask[index1][index2] |= mask; ++ break; ++ case (s32) GT_OFF: ++ GT_tMask[index1][index2] &= ~mask; ++ break; ++ default: ++ error("illegal trace mask"); ++ break; ++ } ++ } else { ++ for (index2--; index2 >= 0; index2--) { ++ switch (op) { ++ case (s32) GT_CLEAR: ++ GT_tMask[index1][index2] = mask; ++ break; ++ case (s32) GT_ON: ++ GT_tMask[index1][index2] |= mask; ++ break; ++ case (s32) GT_OFF: ++ GT_tMask[index1][index2] &= ~mask; ++ break; ++ default: ++ error("illegal trace mask"); ++ break; ++ } ++ } ++ } ++ } else { ++ for (index1--; index1 >= 0; index1--) { ++ if (index2 < GT_BOUND) { ++ switch (op) { ++ case (s32) GT_CLEAR: ++ GT_tMask[index1][index2] = mask; ++ break; ++ case (s32) GT_ON: ++ GT_tMask[index1][index2] |= mask; ++ break; ++ case (s32) GT_OFF: ++ GT_tMask[index1][index2] &= ~mask; ++ break; ++ default: ++ error("illegal trace mask"); ++ break; ++ } ++ } else { ++ index = GT_BOUND; ++ for (index--; index >= 0; index--) { ++ switch (op) { ++ case (s32) GT_CLEAR: ++ GT_tMask[index1][index] = mask; ++ break; ++ case (s32) GT_ON: ++ GT_tMask[index1][index] |= mask; ++ break; ++ case (s32) GT_OFF: ++ GT_tMask[index1][index] &= ++ ~mask; ++ break; ++ default: ++ error("illegal trace mask"); ++ break; ++ } ++ } ++ } ++ } ++ } ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/_gt_para.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/_gt_para.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/_gt_para.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/_gt_para.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,107 @@ ++/* ++ * _gt_para.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _gt_para.c ======== ++ * Description: ++ * Configuration parameters for GT. This file is separated from ++ * gt.c so that GT_assert() can reference the error function without ++ * forcing the linker to include all the code for GT_set(), GT_init(), ++ * etc. into a fully bound image. Thus, GT_assert() can be retained in ++ * a program for which GT_?trace() has been compiled out. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 vp: Code Review Updates. ++ *! 18-Oct-2002 sb: Ported to Linux platform. ++ *! 03-Jul-2001 rr: Removed kfuncs.h because of build errors. ++ *! 07-Dec-1999 ag: Fxn error now causes a WinCE DebugBreak; ++ *! 30-Aug-1999 ag: Now uses GP_printf for printf and error. ++ *! ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++ ++/* ----------------------------------- Function Prototypes */ ++static void error(char *msg, ...); ++static s32 GT_nop(void); ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++ ++struct GT_Config _GT_params = { ++ (Fxn) printk, /* printf */ ++ (Fxn) NULL, /* procid */ ++ (Fxn) GT_nop, /* taskid */ ++ (Fxn) error, /* error */ ++}; ++ ++/* ----------------------------------- Globals */ ++struct GT_Config *GT = &_GT_params; ++ ++/* ++ * ======== GT_nop ======== ++ */ ++static s32 GT_nop(void) ++{ ++ return 0; ++} ++ ++/* ++ * ======== error ======== ++ * purpose: ++ * Prints error onto the standard output. ++ */ ++static void error(char *fmt, ...) ++{ ++ s32 arg1, arg2, arg3, arg4, arg5, arg6; ++ ++ va_list va; ++ ++ va_start(va, fmt); ++ ++ arg1 = va_arg(va, s32); ++ arg2 = va_arg(va, s32); ++ arg3 = va_arg(va, s32); ++ arg4 = va_arg(va, s32); ++ arg5 = va_arg(va, s32); ++ arg6 = va_arg(va, s32); ++ ++ va_end(va); ++ ++ printk("ERROR: "); ++ printk(fmt, arg1, arg2, arg3, arg4, arg5, arg6); ++ ++#if defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT) ++ if (in_interrupt()) { ++ printk(KERN_INFO "Not stopping after error since ISR/DPC " ++ "are disabled\n"); ++ } else { ++ set_current_state(TASK_INTERRUPTIBLE); ++ flush_signals(current); ++ schedule(); ++ flush_signals(current); ++ printk(KERN_INFO "Signaled in error function\n"); ++ } ++#endif ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/uuidutil.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/uuidutil.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/uuidutil.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/gen/uuidutil.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,238 @@ ++/* ++ * uuidutil.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== uuidutil.c ======== ++ * Description: ++ * This file contains the implementation of UUID helper functions. ++ * ++ *! Revision History ++ *! ================ ++ *! 23-Feb-2003 vp: Code review updates. ++ *! 18-Oct-2003 vp: Ported to Linux platform. ++ *! 31-Aug-2000 rr: UUID_UuidFromString bug fixed. ++ *! 29-Aug-2000 rr: Modified UUID_UuidFromString. ++ *! 09-Nov-2000 kc: Modified UUID_UuidFromString to simplify implementation. ++ *! 30-Oct-2000 kc: Modified UUID utility module function prefix. ++ *! 10-Aug-2000 kc: Created. ++ *! ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ++ * ======== UUID_UuidToString ======== ++ * Purpose: ++ * Converts a struct DSP_UUID to a string. ++ * Note: snprintf format specifier is: ++ * %[flags] [width] [.precision] [{h | l | I64 | L}]type ++ */ ++void UUID_UuidToString(IN struct DSP_UUID *pUuid, OUT char *pszUuid, ++ IN s32 size) ++{ ++ s32 i; /* return result from snprintf. */ ++ ++ DBC_Require(pUuid && pszUuid); ++ ++ i = snprintf(pszUuid, size, ++ "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X", ++ pUuid->ulData1, pUuid->usData2, pUuid->usData3, ++ pUuid->ucData4, pUuid->ucData5, pUuid->ucData6[0], ++ pUuid->ucData6[1], pUuid->ucData6[2], pUuid->ucData6[3], ++ pUuid->ucData6[4], pUuid->ucData6[5]); ++ ++ DBC_Ensure(i != -1); ++} ++ ++/* ++ * ======== htoi ======== ++ * Purpose: ++ * Converts a hex value to a decimal integer. ++ */ ++ ++static int htoi(char c) ++{ ++ switch (c) { ++ case '0': ++ return 0; ++ case '1': ++ return 1; ++ case '2': ++ return 2; ++ case '3': ++ return 3; ++ case '4': ++ return 4; ++ case '5': ++ return 5; ++ case '6': ++ return 6; ++ case '7': ++ return 7; ++ case '8': ++ return 8; ++ case '9': ++ return 9; ++ case 'A': ++ return 10; ++ case 'B': ++ return 11; ++ case 'C': ++ return 12; ++ case 'D': ++ return 13; ++ case 'E': ++ return 14; ++ case 'F': ++ return 15; ++ case 'a': ++ return 10; ++ case 'b': ++ return 11; ++ case 'c': ++ return 12; ++ case 'd': ++ return 13; ++ case 'e': ++ return 14; ++ case 'f': ++ return 15; ++ } ++ return 0; ++} ++ ++/* ++ * ======== UUID_UuidFromString ======== ++ * Purpose: ++ * Converts a string to a struct DSP_UUID. ++ */ ++void UUID_UuidFromString(IN char *pszUuid, OUT struct DSP_UUID *pUuid) ++{ ++ char c; ++ s32 i, j; ++ s32 result; ++ char *temp = pszUuid; ++ ++ result = 0; ++ for (i = 0; i < 8; i++) { ++ /* Get first character in string */ ++ c = *temp; ++ ++ /* Increase the results by new value */ ++ result *= 16; ++ result += htoi(c); ++ ++ /* Go to next character in string */ ++ temp++; ++ } ++ pUuid->ulData1 = result; ++ ++ /* Step over underscore */ ++ temp++; ++ ++ result = 0; ++ for (i = 0; i < 4; i++) { ++ /* Get first character in string */ ++ c = *temp; ++ ++ /* Increase the results by new value */ ++ result *= 16; ++ result += htoi(c); ++ ++ /* Go to next character in string */ ++ temp++; ++ } ++ pUuid->usData2 = (u16)result; ++ ++ /* Step over underscore */ ++ temp++; ++ ++ result = 0; ++ for (i = 0; i < 4; i++) { ++ /* Get first character in string */ ++ c = *temp; ++ ++ /* Increase the results by new value */ ++ result *= 16; ++ result += htoi(c); ++ ++ /* Go to next character in string */ ++ temp++; ++ } ++ pUuid->usData3 = (u16)result; ++ ++ /* Step over underscore */ ++ temp++; ++ ++ result = 0; ++ for (i = 0; i < 2; i++) { ++ /* Get first character in string */ ++ c = *temp; ++ ++ /* Increase the results by new value */ ++ result *= 16; ++ result += htoi(c); ++ ++ /* Go to next character in string */ ++ temp++; ++ } ++ pUuid->ucData4 = (u8)result; ++ ++ result = 0; ++ for (i = 0; i < 2; i++) { ++ /* Get first character in string */ ++ c = *temp; ++ ++ /* Increase the results by new value */ ++ result *= 16; ++ result += htoi(c); ++ ++ /* Go to next character in string */ ++ temp++; ++ } ++ pUuid->ucData5 = (u8)result; ++ ++ /* Step over underscore */ ++ temp++; ++ ++ for (j = 0; j < 6; j++) { ++ result = 0; ++ for (i = 0; i < 2; i++) { ++ /* Get first character in string */ ++ c = *temp; ++ ++ /* Increase the results by new value */ ++ result *= 16; ++ result += htoi(c); ++ ++ /* Go to next character in string */ ++ temp++; ++ } ++ pUuid->ucData6[j] = (u8)result; ++ } ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/EasiGlobal.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/EasiGlobal.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/EasiGlobal.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/EasiGlobal.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,42 @@ ++/* ++ * EasiGlobal.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef __EASIGLOBAL_H ++#define __EASIGLOBAL_H ++#include ++ ++/* ++ * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE ++ * ++ * DESCRIPTION: Defines used to describe register types for EASI-checker tests. ++ */ ++ ++#define READ_ONLY 1 ++#define WRITE_ONLY 2 ++#define READ_WRITE 3 ++ ++/* ++ * MACRO: _DEBUG_LEVEL_1_EASI ++ * ++ * DESCRIPTION: A MACRO which can be used to indicate that a particular beach ++ * register access function was called. ++ * ++ * NOTE: We currently dont use this functionality. ++ */ ++#define _DEBUG_LEVEL_1_EASI(easiNum) ((void)0) ++ ++#endif /* __EASIGLOBAL_H */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/GlobalTypes.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/GlobalTypes.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/GlobalTypes.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/GlobalTypes.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,325 @@ ++/* ++ * GlobalTypes.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== GlobalTypes.h ======== ++ * Description: ++ * Global HW definitions ++ * ++ *! Revision History: ++ *! ================ ++ *! 16 Feb 2003 sb: Initial version ++ */ ++#ifndef __GLOBALTYPES_H ++#define __GLOBALTYPES_H ++ ++/* ++ * Definition: TRUE, FALSE ++ * ++ * DESCRIPTION: Boolean Definitions ++ */ ++#ifndef TRUE ++#define FALSE 0 ++#define TRUE (!(FALSE)) ++#endif ++ ++/* ++ * Definition: NULL ++ * ++ * DESCRIPTION: Invalid pointer ++ */ ++#ifndef NULL ++#define NULL (void *)0 ++#endif ++ ++/* ++ * Definition: RET_CODE_BASE ++ * ++ * DESCRIPTION: Base value for return code offsets ++ */ ++#define RET_CODE_BASE 0 ++ ++/* ++ * Definition: *BIT_OFFSET ++ * ++ * DESCRIPTION: offset in bytes from start of 32-bit word. ++ */ ++#define LOWER_16BIT_OFFSET 0 ++#define UPPER_16BIT_OFFSET 2 ++ ++#define LOWER_8BIT_OFFSET 0 ++#define LOWER_MIDDLE_8BIT_OFFSET 1 ++#define UPPER_MIDDLE_8BIT_OFFSET 2 ++#define UPPER_8BIT_OFFSET 3 ++ ++#define LOWER_8BIT_OF16_OFFSET 0 ++#define UPPER_8BIT_OF16_OFFSET 1 ++ ++/* ++ * Definition: *BIT_SHIFT ++ * ++ * DESCRIPTION: offset in bits from start of 32-bit word. ++ */ ++#define LOWER_16BIT_SHIFT 0 ++#define UPPER_16BIT_SHIFT 16 ++ ++#define LOWER_8BIT_SHIFT 0 ++#define LOWER_MIDDLE_8BIT_SHIFT 8 ++#define UPPER_MIDDLE_8BIT_SHIFT 16 ++#define UPPER_8BIT_SHIFT 24 ++ ++#define LOWER_8BIT_OF16_SHIFT 0 ++#define UPPER_8BIT_OF16_SHIFT 8 ++ ++ ++/* ++ * Definition: LOWER_16BIT_MASK ++ * ++ * DESCRIPTION: 16 bit mask used for inclusion of lower 16 bits i.e. mask out ++ * the upper 16 bits ++ */ ++#define LOWER_16BIT_MASK 0x0000FFFF ++ ++ ++/* ++ * Definition: LOWER_8BIT_MASK ++ * ++ * DESCRIPTION: 8 bit masks used for inclusion of 8 bits i.e. mask out ++ * the upper 16 bits ++ */ ++#define LOWER_8BIT_MASK 0x000000FF ++ ++/* ++ * Definition: RETURN_32BITS_FROM_16LOWER_AND_16UPPER(lower16Bits, upper16Bits) ++ * ++ * DESCRIPTION: Returns a 32 bit value given a 16 bit lower value and a 16 ++ * bit upper value ++ */ ++#define RETURN_32BITS_FROM_16LOWER_AND_16UPPER(lower16Bits,upper16Bits)\ ++ (((((u32)lower16Bits) & LOWER_16BIT_MASK)) | \ ++ (((((u32)upper16Bits) & LOWER_16BIT_MASK) << UPPER_16BIT_SHIFT))) ++ ++/* ++ * Definition: RETURN_16BITS_FROM_8LOWER_AND_8UPPER(lower16Bits, upper16Bits) ++ * ++ * DESCRIPTION: Returns a 16 bit value given a 8 bit lower value and a 8 ++ * bit upper value ++ */ ++#define RETURN_16BITS_FROM_8LOWER_AND_8UPPER(lower8Bits,upper8Bits)\ ++ (((((u32)lower8Bits) & LOWER_8BIT_MASK)) | \ ++ (((((u32)upper8Bits) & LOWER_8BIT_MASK) << UPPER_8BIT_OF16_SHIFT))) ++ ++/* ++ * Definition: RETURN_32BITS_FROM_4_8BIT_VALUES(lower8Bits, lowerMiddle8Bits, ++ * lowerUpper8Bits, upper8Bits) ++ * ++ * DESCRIPTION: Returns a 32 bit value given four 8 bit values ++ */ ++#define RETURN_32BITS_FROM_4_8BIT_VALUES(lower8Bits, lowerMiddle8Bits,\ ++ lowerUpper8Bits, upper8Bits)\ ++ (((((u32)lower8Bits) & LOWER_8BIT_MASK)) | \ ++ (((((u32)lowerMiddle8Bits) & LOWER_8BIT_MASK) <<\ ++ LOWER_MIDDLE_8BIT_SHIFT)) | \ ++ (((((u32)lowerUpper8Bits) & LOWER_8BIT_MASK) <<\ ++ UPPER_MIDDLE_8BIT_SHIFT)) | \ ++ (((((u32)upper8Bits) & LOWER_8BIT_MASK) <<\ ++ UPPER_8BIT_SHIFT))) ++ ++/* ++ * Definition: READ_LOWER_16BITS_OF_32(value32bits) ++ * ++ * DESCRIPTION: Returns a 16 lower bits of 32bit value ++ */ ++#define READ_LOWER_16BITS_OF_32(value32bits)\ ++ ((u16)((u32)(value32bits) & LOWER_16BIT_MASK)) ++ ++/* ++ * Definition: READ_UPPER_16BITS_OF_32(value32bits) ++ * ++ * DESCRIPTION: Returns a 16 lower bits of 32bit value ++ */ ++#define READ_UPPER_16BITS_OF_32(value32bits)\ ++ (((u16)((u32)(value32bits) >> UPPER_16BIT_SHIFT)) &\ ++ LOWER_16BIT_MASK) ++ ++ ++/* ++ * Definition: READ_LOWER_8BITS_OF_32(value32bits) ++ * ++ * DESCRIPTION: Returns a 8 lower bits of 32bit value ++ */ ++#define READ_LOWER_8BITS_OF_32(value32bits)\ ++ ((u8)((u32)(value32bits) & LOWER_8BIT_MASK)) ++ ++/* ++ * Definition: READ_LOWER_MIDDLE_8BITS_OF_32(value32bits) ++ * ++ * DESCRIPTION: Returns a 8 lower middle bits of 32bit value ++ */ ++#define READ_LOWER_MIDDLE_8BITS_OF_32(value32bits)\ ++ (((u8)((u32)(value32bits) >> LOWER_MIDDLE_8BIT_SHIFT)) &\ ++ LOWER_8BIT_MASK) ++ ++/* ++ * Definition: READ_LOWER_MIDDLE_8BITS_OF_32(value32bits) ++ * ++ * DESCRIPTION: Returns a 8 lower middle bits of 32bit value ++ */ ++#define READ_UPPER_MIDDLE_8BITS_OF_32(value32bits)\ ++ (((u8)((u32)(value32bits) >> LOWER_MIDDLE_8BIT_SHIFT)) &\ ++ LOWER_8BIT_MASK) ++ ++/* ++ * Definition: READ_UPPER_8BITS_OF_32(value32bits) ++ * ++ * DESCRIPTION: Returns a 8 upper bits of 32bit value ++ */ ++#define READ_UPPER_8BITS_OF_32(value32bits)\ ++ (((u8)((u32)(value32bits) >> UPPER_8BIT_SHIFT)) & LOWER_8BIT_MASK) ++ ++ ++/* ++ * Definition: READ_LOWER_8BITS_OF_16(value16bits) ++ * ++ * DESCRIPTION: Returns a 8 lower bits of 16bit value ++ */ ++#define READ_LOWER_8BITS_OF_16(value16bits)\ ++ ((u8)((u16)(value16bits) & LOWER_8BIT_MASK)) ++ ++/* ++ * Definition: READ_UPPER_8BITS_OF_16(value32bits) ++ * ++ * DESCRIPTION: Returns a 8 upper bits of 16bit value ++ */ ++#define READ_UPPER_8BITS_OF_16(value16bits)\ ++ (((u8)((u32)(value16bits) >> UPPER_8BIT_SHIFT)) & LOWER_8BIT_MASK) ++ ++ ++ ++/* UWORD16: 16 bit tpyes */ ++ ++ ++/* REG_UWORD8, REG_WORD8: 8 bit register types */ ++typedef volatile unsigned char REG_UWORD8; ++typedef volatile signed char REG_WORD8; ++ ++/* REG_UWORD16, REG_WORD16: 16 bit register types */ ++#ifndef OMAPBRIDGE_TYPES ++typedef volatile unsigned short REG_UWORD16; ++#endif ++typedef volatile short REG_WORD16; ++ ++/* REG_UWORD32, REG_WORD32: 32 bit register types */ ++typedef volatile unsigned long REG_UWORD32; ++ ++/* FLOAT ++ * ++ * Type to be used for floating point calculation. Note that floating point ++ * calculation is very CPU expensive, and you should only use if you ++ * absolutely need this. */ ++ ++ ++/* boolean_t: Boolean Type True, False */ ++/* ReturnCode_t: Return codes to be returned by all library functions */ ++typedef enum ReturnCode_label { ++ RET_OK = 0, ++ RET_FAIL = -1, ++ RET_BAD_NULL_PARAM = -2, ++ RET_PARAM_OUT_OF_RANGE = -3, ++ RET_INVALID_ID = -4, ++ RET_EMPTY = -5, ++ RET_FULL = -6, ++ RET_TIMEOUT = -7, ++ RET_INVALID_OPERATION = -8, ++ ++ /* Add new error codes at end of above list */ ++ ++ RET_NUM_RET_CODES /* this should ALWAYS be LAST entry */ ++} ReturnCode_t, *pReturnCode_t; ++ ++/* MACRO: RD_MEM_8, WR_MEM_8 ++ * ++ * DESCRIPTION: 32 bit memory access macros ++ */ ++#define RD_MEM_8(addr) ((u8)(*((u8 *)(addr)))) ++#define WR_MEM_8(addr, data) (*((u8 *)(addr)) = (u8)(data)) ++ ++/* MACRO: RD_MEM_8_VOLATILE, WR_MEM_8_VOLATILE ++ * ++ * DESCRIPTION: 8 bit register access macros ++ */ ++#define RD_MEM_8_VOLATILE(addr) ((u8)(*((REG_UWORD8 *)(addr)))) ++#define WR_MEM_8_VOLATILE(addr, data) (*((REG_UWORD8 *)(addr)) = (u8)(data)) ++ ++ ++/* ++ * MACRO: RD_MEM_16, WR_MEM_16 ++ * ++ * DESCRIPTION: 16 bit memory access macros ++ */ ++#define RD_MEM_16(addr) ((u16)(*((u16 *)(addr)))) ++#define WR_MEM_16(addr, data) (*((u16 *)(addr)) = (u16)(data)) ++ ++/* ++ * MACRO: RD_MEM_16_VOLATILE, WR_MEM_16_VOLATILE ++ * ++ * DESCRIPTION: 16 bit register access macros ++ */ ++#define RD_MEM_16_VOLATILE(addr) ((u16)(*((REG_UWORD16 *)(addr)))) ++#define WR_MEM_16_VOLATILE(addr, data) (*((REG_UWORD16 *)(addr)) =\ ++ (u16)(data)) ++ ++/* ++ * MACRO: RD_MEM_32, WR_MEM_32 ++ * ++ * DESCRIPTION: 32 bit memory access macros ++ */ ++#define RD_MEM_32(addr) ((u32)(*((u32 *)(addr)))) ++#define WR_MEM_32(addr, data) (*((u32 *)(addr)) = (u32)(data)) ++ ++/* ++ * MACRO: RD_MEM_32_VOLATILE, WR_MEM_32_VOLATILE ++ * ++ * DESCRIPTION: 32 bit register access macros ++ */ ++#define RD_MEM_32_VOLATILE(addr) ((u32)(*((REG_UWORD32 *)(addr)))) ++#define WR_MEM_32_VOLATILE(addr, data) (*((REG_UWORD32 *)(addr)) =\ ++ (u32)(data)) ++ ++/* Not sure if this all belongs here */ ++ ++#define CHECK_RETURN_VALUE(actualValue, expectedValue, returnCodeIfMismatch,\ ++ spyCodeIfMisMatch) ++#define CHECK_RETURN_VALUE_RET(actualValue, expectedValue, returnCodeIfMismatch) ++#define CHECK_RETURN_VALUE_RES(actualValue, expectedValue, spyCodeIfMisMatch) ++#define CHECK_RETURN_VALUE_RET_VOID(actualValue, expectedValue,\ ++ spyCodeIfMisMatch) ++ ++#define CHECK_INPUT_PARAM(actualValue, invalidValue, returnCodeIfMismatch,\ ++ spyCodeIfMisMatch) ++#define CHECK_INPUT_PARAM_NO_SPY(actualValue, invalidValue,\ ++ returnCodeIfMismatch) ++#define CHECK_INPUT_RANGE(actualValue, minValidValue, maxValidValue,\ ++ returnCodeIfMismatch, spyCodeIfMisMatch) ++#define CHECK_INPUT_RANGE_NO_SPY(actualValue, minValidValue, maxValidValue,\ ++ returnCodeIfMismatch) ++#define CHECK_INPUT_RANGE_MIN0(actualValue, maxValidValue,\ ++ returnCodeIfMismatch, spyCodeIfMisMatch) ++#define CHECK_INPUT_RANGE_NO_SPY_MIN0(actualValue, maxValidValue,\ ++ returnCodeIfMismatch) ++ ++#endif /* __GLOBALTYPES_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_defs.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_defs.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_defs.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_defs.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,73 @@ ++/* ++ * hw_defs.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== hw_defs.h ======== ++ * Description: ++ * Global HW definitions ++ * ++ *! Revision History: ++ *! ================ ++ *! 19 Apr 2004 sb: Added generic page size, endianness and element size defns ++ *! 16 Feb 2003 sb: Initial version ++ */ ++#ifndef __HW_DEFS_H ++#define __HW_DEFS_H ++ ++#include ++ ++/* Page size */ ++#define HW_PAGE_SIZE_4KB 0x1000 ++#define HW_PAGE_SIZE_64KB 0x10000 ++#define HW_PAGE_SIZE_1MB 0x100000 ++#define HW_PAGE_SIZE_16MB 0x1000000 ++ ++/* HW_STATUS: return type for HW API */ ++typedef long HW_STATUS; ++ ++/* HW_SetClear_t: Enumerated Type used to set and clear any bit */ ++enum HW_SetClear_t { ++ HW_CLEAR, ++ HW_SET ++} ; ++ ++/* HW_Endianism_t: Enumerated Type used to specify the endianism ++ * Do NOT change these values. They are used as bit fields. */ ++enum HW_Endianism_t { ++ HW_LITTLE_ENDIAN, ++ HW_BIG_ENDIAN ++ ++} ; ++ ++/* HW_ElementSize_t: Enumerated Type used to specify the element size ++ * Do NOT change these values. They are used as bit fields. */ ++enum HW_ElementSize_t { ++ HW_ELEM_SIZE_8BIT, ++ HW_ELEM_SIZE_16BIT, ++ HW_ELEM_SIZE_32BIT, ++ HW_ELEM_SIZE_64BIT ++ ++} ; ++ ++/* HW_IdleMode_t: Enumerated Type used to specify Idle modes */ ++ enum HW_IdleMode_t { ++ HW_FORCE_IDLE, ++ HW_NO_IDLE, ++ HW_SMART_IDLE ++ } ; ++ ++#endif /* __HW_DEFS_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_dspssC64P.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_dspssC64P.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,56 @@ ++/* ++ * hw_dspss64P.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== hw_dspss64P.c ======== ++ * Description: ++ * API definitions to configure DSP Subsystem modules like IPI ++ * ++ *! Revision History: ++ *! ================ ++ *! 19 Apr 2004 sb: Implemented HW_DSPSS_IPIEndianismSet ++ *! 16 Feb 2003 sb: Initial version ++ */ ++ ++/* PROJECT SPECIFIC INCLUDE FILES */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* HW FUNCTIONS */ ++HW_STATUS HW_DSPSS_BootModeSet(const void __iomem *baseAddress, ++ enum HW_DSPSYSC_BootMode_t bootMode, ++ const u32 bootAddress) ++{ ++ HW_STATUS status = RET_OK; ++ u32 offset = SYSC_IVA2BOOTMOD_OFFSET; ++ u32 alignedBootAddr; ++ ++ /* if Boot mode it DIRECT BOOT, check that the bootAddress is ++ * aligned to atleast 1K :: TODO */ ++ __raw_writel(bootMode, (baseAddress) + offset); ++ ++ offset = SYSC_IVA2BOOTADDR_OFFSET; ++ ++ alignedBootAddr = bootAddress & SYSC_IVA2BOOTADDR_MASK; ++ ++ __raw_writel(alignedBootAddr, (baseAddress) + offset); ++ ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_dspssC64P.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_dspssC64P.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,48 @@ ++/* ++ * hw_dspssC64P.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== hw_dspss.h ======== ++ * Description: ++ * DSP Subsystem API declarations ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb: Removed redundant argument from HW_DSPSS_IPIEndianismSet ++ *! Moved endianness and element size to generic hw_defs.h ++ *! 16 Feb 2003 sb: Initial version ++ */ ++ ++#ifndef __HW_DSPSS_H ++#define __HW_DSPSS_H ++#include ++ ++ enum HW_DSPSYSC_BootMode_t { ++ HW_DSPSYSC_DIRECTBOOT = 0x0, ++ HW_DSPSYSC_IDLEBOOT = 0x1, ++ HW_DSPSYSC_SELFLOOPBOOT = 0x2, ++ HW_DSPSYSC_USRBOOTSTRAP = 0x3, ++ HW_DSPSYSC_DEFAULTRESTORE = 0x4 ++ } ; ++ ++#define HW_DSP_IDLEBOOT_ADDR 0x007E0000 ++ ++ extern HW_STATUS HW_DSPSS_BootModeSet(const void __iomem *baseAddress, ++ enum HW_DSPSYSC_BootMode_t bootMode, ++ const u32 bootAddress); ++ ++#endif /* __HW_DSPSS_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mbox.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mbox.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,247 @@ ++/* ++ * hw_mbox.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== hw_mbox.c ======== ++ * Description: ++ * Mailbox messaging & configuration API definitions ++ * ++ *! Revision History: ++ *! ================ ++ *! 16 Feb 2003 sb: Initial version ++ */ ++ ++#include ++#include "MLBRegAcM.h" ++#include ++#include ++ ++/* width in bits of MBOX Id */ ++#define HW_MBOX_ID_WIDTH 2 ++ ++struct MAILBOX_CONTEXT mboxsetting = { ++ .sysconfig = 2 << 3 | 1, /* SMART/AUTO-IDLE */ ++}; ++ ++/* Saves the mailbox context */ ++HW_STATUS HW_MBOX_saveSettings(void __iomem *baseAddress) ++{ ++ HW_STATUS status = RET_OK; ++ ++ mboxsetting.sysconfig = MLBMAILBOX_SYSCONFIGReadRegister32(baseAddress); ++ /* Get current enable status */ ++ mboxsetting.irqEnable0 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 ++ (baseAddress, HW_MBOX_U0_ARM); ++ mboxsetting.irqEnable1 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 ++ (baseAddress, HW_MBOX_U1_DSP1); ++ return status; ++} ++ ++/* Restores the mailbox context */ ++HW_STATUS HW_MBOX_restoreSettings(void __iomem *baseAddress) ++{ ++ HW_STATUS status = RET_OK; ++ /* Restor IRQ enable status */ ++ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, HW_MBOX_U0_ARM, ++ mboxsetting.irqEnable0); ++ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, HW_MBOX_U1_DSP1, ++ mboxsetting.irqEnable1); ++ /* Restore Sysconfig register */ ++ MLBMAILBOX_SYSCONFIGWriteRegister32(baseAddress, mboxsetting.sysconfig); ++ return status; ++} ++ ++/* Reads a u32 from the sub module message box Specified. if there are no ++ * messages in the mailbox then and error is returned. */ ++HW_STATUS HW_MBOX_MsgRead(const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, u32 *const pReadValue) ++{ ++ HW_STATUS status = RET_OK; ++ ++ /* Check input parameters */ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_PARAM(pReadValue, NULL, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* Read 32-bit message in mail box */ ++ *pReadValue = MLBMAILBOX_MESSAGE___0_15ReadRegister32(baseAddress, ++ (u32)mailBoxId); ++ ++ return status; ++} ++ ++/* Writes a u32 from the sub module message box Specified. */ ++HW_STATUS HW_MBOX_MsgWrite(const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, const u32 writeValue) ++{ ++ HW_STATUS status = RET_OK; ++ ++ /* Check input parameters */ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* Write 32-bit value to mailbox */ ++ MLBMAILBOX_MESSAGE___0_15WriteRegister32(baseAddress, (u32)mailBoxId, ++ (u32)writeValue); ++ ++ return status; ++} ++ ++/* Gets number of messages in a specified mailbox. */ ++HW_STATUS HW_MBOX_NumMsgGet(const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, u32 *const pNumMsg) ++{ ++ HW_STATUS status = RET_OK; ++ ++ /* Check input parameters */ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_PARAM(pNumMsg, NULL, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ ++ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* Get number of messages available for MailBox */ ++ *pNumMsg = MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32(baseAddress, ++ (u32)mailBoxId); ++ ++ return status; ++} ++ ++/* Enables the specified IRQ. */ ++HW_STATUS HW_MBOX_EventEnable(const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ const HW_MBOX_UserId_t userId, ++ const u32 events) ++{ ++ HW_STATUS status = RET_OK; ++ u32 irqEnableReg; ++ ++ /* Check input parameters */ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(enableIrq, HW_MBOX_INT_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(userId, HW_MBOX_USER_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* Get current enable status */ ++ irqEnableReg = MLBMAILBOX_IRQENABLE___0_3ReadRegister32(baseAddress, ++ (u32)userId); ++ ++ /* update enable value */ ++ irqEnableReg |= ((u32)(events)) << (((u32)(mailBoxId)) * ++ HW_MBOX_ID_WIDTH); ++ ++ /* write new enable status */ ++ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, (u32)userId, ++ (u32)irqEnableReg); ++ ++ mboxsetting.sysconfig = MLBMAILBOX_SYSCONFIGReadRegister32(baseAddress); ++ /* Get current enable status */ ++ mboxsetting.irqEnable0 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 ++ (baseAddress, HW_MBOX_U0_ARM); ++ mboxsetting.irqEnable1 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 ++ (baseAddress, HW_MBOX_U1_DSP1); ++ return status; ++} ++ ++/* Disables the specified IRQ. */ ++HW_STATUS HW_MBOX_EventDisable(const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ const HW_MBOX_UserId_t userId, ++ const u32 events) ++{ ++ HW_STATUS status = RET_OK; ++ u32 irqDisableReg; ++ ++ /* Check input parameters */ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(disableIrq, HW_MBOX_INT_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(userId, HW_MBOX_USER_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* Get current enable status */ ++ irqDisableReg = MLBMAILBOX_IRQENABLE___0_3ReadRegister32(baseAddress, ++ (u32)userId); ++ ++ /* update enable value */ ++ irqDisableReg &= ~(((u32)(events)) << (((u32)(mailBoxId)) * ++ HW_MBOX_ID_WIDTH)); ++ ++ /* write new enable status */ ++ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, (u32)userId, ++ (u32)irqDisableReg); ++ ++ return status; ++} ++ ++/* Sets the status of the specified IRQ. */ ++HW_STATUS HW_MBOX_EventAck(const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, const HW_MBOX_UserId_t userId, ++ const u32 event) ++{ ++ HW_STATUS status = RET_OK; ++ u32 irqStatusReg; ++ ++ /* Check input parameters */ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + ++ RES_INVALID_INPUT_PARAM); ++ ++ CHECK_INPUT_RANGE_MIN0(irqStatus, HW_MBOX_INT_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(userId, HW_MBOX_USER_MAX, RET_INVALID_ID, ++ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* calculate status to write */ ++ irqStatusReg = ((u32)event) << (((u32)(mailBoxId)) * ++ HW_MBOX_ID_WIDTH); ++ ++ /* clear Irq Status for specified mailbox/User Id */ ++ MLBMAILBOX_IRQSTATUS___0_3WriteRegister32(baseAddress, (u32)userId, ++ (u32)irqStatusReg); ++ ++ /* ++ * FIXME: Replace all this custom register access with standard ++ * __raw_read/write(). ++ * ++ * FIXME: Replace all interrupt handlers with standard linux style ++ * interrupt handlers. ++ * ++ * FIXME: Replace direct access to PRCM registers with omap standard ++ * PRCM register access. ++ * ++ * Flush posted write for the irq status to avoid spurious interrupts. ++ */ ++ MLBMAILBOX_IRQSTATUS___0_3ReadRegister32(baseAddress, (u32)userId); ++ ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mbox.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mbox.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,328 @@ ++/* ++ * hw_mbox.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== hw_mbox.h ======== ++ * Description: ++ * HW Mailbox API and types definitions ++ * ++ *! Revision History: ++ *! ================ ++ *! 16 Feb 2003 sb: Initial version ++ */ ++#ifndef __MBOX_H ++#define __MBOX_H ++ ++/* Bitmasks for Mailbox interrupt sources */ ++#define HW_MBOX_INT_NEW_MSG 0x1 ++#define HW_MBOX_INT_NOT_FULL 0x2 ++#define HW_MBOX_INT_ALL 0x3 ++ ++/* Maximum number of messages that mailbox can hald at a time. */ ++#define HW_MBOX_MAX_NUM_MESSAGES 4 ++ ++/* HW_MBOX_Id_t: Enumerated Type used to specify Mailbox Sub Module Id Number */ ++typedef enum HW_MBOX_Id_label { ++ HW_MBOX_ID_0, ++ HW_MBOX_ID_1, ++ HW_MBOX_ID_2, ++ HW_MBOX_ID_3, ++ HW_MBOX_ID_4, ++ HW_MBOX_ID_5 ++ ++} HW_MBOX_Id_t, *pHW_MBOX_Id_t; ++ ++/* HW_MBOX_UserId_t: Enumerated Type used to specify Mail box User Id */ ++typedef enum HW_MBOX_UserId_label { ++ HW_MBOX_U0_ARM, ++ HW_MBOX_U1_DSP1, ++ HW_MBOX_U2_DSP2, ++ HW_MBOX_U3_ARM ++ ++} HW_MBOX_UserId_t, *pHW_MBOX_UserId_t; ++ ++/* Mailbox context settings */ ++struct MAILBOX_CONTEXT { ++ u32 sysconfig; ++ u32 irqEnable0; ++ u32 irqEnable1; ++}; ++ ++/* ++* FUNCTION : HW_MBOX_MsgRead ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of Mailbox module ++* ++* Identifier : mailBoxId ++* Type : const HW_MBOX_Id_t ++* Description : Mail Box Sub module Id to read ++* ++* OUTPUTS: ++* ++* Identifier : pReadValue ++* Type : u32 *const ++* Description : Value read from MailBox ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM Address/ptr Paramater was set to 0/NULL ++* RET_INVALID_ID Invalid Id used ++* RET_EMPTY Mailbox empty ++* ++* PURPOSE: : this function reads a u32 from the sub module message ++* box Specified. if there are no messages in the mailbox ++* then and error is returned. ++*/ ++extern HW_STATUS HW_MBOX_MsgRead(const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ u32 *const pReadValue); ++ ++/* ++* FUNCTION : HW_MBOX_MsgWrite ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of Mailbox module ++* ++* Identifier : mailBoxId ++* Type : const HW_MBOX_Id_t ++* Description : Mail Box Sub module Id to write ++* ++* Identifier : writeValue ++* Type : const u32 ++* Description : Value to write to MailBox ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL ++* RET_INVALID_ID Invalid Id used ++* ++* PURPOSE: : this function writes a u32 from the sub module message ++* box Specified. ++*/ ++extern HW_STATUS HW_MBOX_MsgWrite( ++ const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ const u32 writeValue ++ ); ++ ++/* ++* FUNCTION : HW_MBOX_NumMsgGet ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of Mailbox module ++* ++* Identifier : mailBoxId ++* Type : const HW_MBOX_Id_t ++* Description : Mail Box Sub module Id to get num messages ++* ++* OUTPUTS: ++* ++* Identifier : pNumMsg ++* Type : u32 *const ++* Description : Number of messages in mailbox ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL ++* RET_INVALID_ID Inavlid ID input at parameter ++* ++* PURPOSE: : this function gets number of messages in a specified mailbox. ++*/ ++extern HW_STATUS HW_MBOX_NumMsgGet( ++ const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ u32 *const pNumMsg ++ ); ++ ++/* ++* FUNCTION : HW_MBOX_EventEnable ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL ++* ++* Identifier : mailBoxId ++* Type : const HW_MBOX_Id_t ++* Description : Mail Box Sub module Id to enable ++* ++* Identifier : userId ++* Type : const HW_MBOX_UserId_t ++* Description : Mail box User Id to enable ++* ++* Identifier : enableIrq ++* Type : const u32 ++* Description : Irq value to enable ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM A Pointer Paramater was set to NULL ++* RET_INVALID_ID Invalid Id used ++* ++* PURPOSE: : this function enables the specified IRQ. ++*/ ++extern HW_STATUS HW_MBOX_EventEnable( ++ const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ const HW_MBOX_UserId_t userId, ++ const u32 events ++ ); ++ ++/* ++* FUNCTION : HW_MBOX_EventDisable ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL ++* ++* Identifier : mailBoxId ++* Type : const HW_MBOX_Id_t ++* Description : Mail Box Sub module Id to disable ++* ++* Identifier : userId ++* Type : const HW_MBOX_UserId_t ++* Description : Mail box User Id to disable ++* ++* Identifier : enableIrq ++* Type : const u32 ++* Description : Irq value to disable ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM A Pointer Paramater was set to NULL ++* RET_INVALID_ID Invalid Id used ++* ++* PURPOSE: : this function disables the specified IRQ. ++*/ ++extern HW_STATUS HW_MBOX_EventDisable( ++ const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ const HW_MBOX_UserId_t userId, ++ const u32 events ++ ); ++ ++/* ++* FUNCTION : HW_MBOX_EventAck ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of Mailbox module ++* ++* Identifier : mailBoxId ++* Type : const HW_MBOX_Id_t ++* Description : Mail Box Sub module Id to set ++* ++* Identifier : userId ++* Type : const HW_MBOX_UserId_t ++* Description : Mail box User Id to set ++* ++* Identifier : irqStatus ++* Type : const u32 ++* Description : The value to write IRQ status ++* ++* OUTPUTS: ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM Address Paramater was set to 0 ++* RET_INVALID_ID Invalid Id used ++* ++* PURPOSE: : this function sets the status of the specified IRQ. ++*/ ++extern HW_STATUS HW_MBOX_EventAck( ++ const void __iomem *baseAddress, ++ const HW_MBOX_Id_t mailBoxId, ++ const HW_MBOX_UserId_t userId, ++ const u32 event ++ ); ++ ++/* ++* FUNCTION : HW_MBOX_saveSettings ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of Mailbox module ++* ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL ++* RET_INVALID_ID Invalid Id used ++* RET_EMPTY Mailbox empty ++* ++* PURPOSE: : this function saves the context of mailbox ++*/ ++extern HW_STATUS HW_MBOX_saveSettings(void __iomem *baseAddres); ++ ++/* ++* FUNCTION : HW_MBOX_restoreSettings ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of Mailbox module ++* ++* ++* RETURNS: ++* ++* Type : ReturnCode_t ++* Description : RET_OK No errors occured ++* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL ++* RET_INVALID_ID Invalid Id used ++* RET_EMPTY Mailbox empty ++* ++* PURPOSE: : this function restores the context of mailbox ++*/ ++extern HW_STATUS HW_MBOX_restoreSettings(void __iomem *baseAddres); ++ ++static inline void HW_MBOX_initSettings(void __iomem *baseAddres) ++{ ++ HW_MBOX_restoreSettings(baseAddres); ++} ++ ++#endif /* __MBOX_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mmu.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mmu.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,599 @@ ++/* ++ * hw_mmu.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== hw_mmu.c ======== ++ * Description: ++ * API definitions to setup MMU TLB and PTE ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb TLBAdd and TLBFlush input the page size in bytes instead ++ of an enum. TLBAdd inputs mapping attributes struct instead ++ of individual arguments. ++ Removed MMU.h and other cosmetic updates. ++ *! 08-Mar-2004 sb Added the Page Table Management APIs ++ *! 16 Feb 2003 sb: Initial version ++ */ ++ ++#include ++#include ++#include "MMURegAcM.h" ++#include ++#include ++#include ++ ++#define MMU_BASE_VAL_MASK 0xFC00 ++#define MMU_PAGE_MAX 3 ++#define MMU_ELEMENTSIZE_MAX 3 ++#define MMU_ADDR_MASK 0xFFFFF000 ++#define MMU_TTB_MASK 0xFFFFC000 ++#define MMU_SECTION_ADDR_MASK 0xFFF00000 ++#define MMU_SSECTION_ADDR_MASK 0xFF000000 ++#define MMU_PAGE_TABLE_MASK 0xFFFFFC00 ++#define MMU_LARGE_PAGE_MASK 0xFFFF0000 ++#define MMU_SMALL_PAGE_MASK 0xFFFFF000 ++ ++#define MMU_LOAD_TLB 0x00000001 ++ ++/* HW_MMUPageSize_t: Enumerated Type used to specify the MMU Page Size(SLSS) */ ++enum HW_MMUPageSize_t { ++ HW_MMU_SECTION, ++ HW_MMU_LARGE_PAGE, ++ HW_MMU_SMALL_PAGE, ++ HW_MMU_SUPERSECTION ++} ; ++ ++/* ++* FUNCTION : MMU_FlushEntry ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of MMU module ++* ++* RETURNS: ++* ++* Type : HW_STATUS ++* Description : RET_OK -- No errors occured ++* RET_BAD_NULL_PARAM -- A Pointer ++* Paramater was set to NULL ++* ++* PURPOSE: : Flush the TLB entry pointed by the ++* lock counter register ++* even if this entry is set protected ++* ++* METHOD: : Check the Input parameter and Flush a ++* single entry in the TLB. ++*/ ++static HW_STATUS MMU_FlushEntry(const void __iomem *baseAddress); ++ ++/* ++* FUNCTION : MMU_SetCAMEntry ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* TypE : const u32 ++* Description : Base Address of instance of MMU module ++* ++* Identifier : pageSize ++* TypE : const u32 ++* Description : It indicates the page size ++* ++* Identifier : preservedBit ++* Type : const u32 ++* Description : It indicates the TLB entry is preserved entry ++* or not ++* ++* Identifier : validBit ++* Type : const u32 ++* Description : It indicates the TLB entry is valid entry or not ++* ++* ++* Identifier : virtualAddrTag ++* Type : const u32 ++* Description : virtual Address ++* ++* RETURNS: ++* ++* Type : HW_STATUS ++* Description : RET_OK -- No errors occured ++* RET_BAD_NULL_PARAM -- A Pointer Paramater ++* was set to NULL ++* RET_PARAM_OUT_OF_RANGE -- Input Parameter out ++* of Range ++* ++* PURPOSE: : Set MMU_CAM reg ++* ++* METHOD: : Check the Input parameters and set the CAM entry. ++*/ ++static HW_STATUS MMU_SetCAMEntry(const void __iomem *baseAddress, ++ const u32 pageSize, ++ const u32 preservedBit, ++ const u32 validBit, ++ const u32 virtualAddrTag); ++ ++/* ++* FUNCTION : MMU_SetRAMEntry ++* ++* INPUTS: ++* ++* Identifier : baseAddress ++* Type : const u32 ++* Description : Base Address of instance of MMU module ++* ++* Identifier : physicalAddr ++* Type : const u32 ++* Description : Physical Address to which the corresponding ++* virtual Address shouldpoint ++* ++* Identifier : endianism ++* Type : HW_Endianism_t ++* Description : endianism for the given page ++* ++* Identifier : elementSize ++* Type : HW_ElementSize_t ++* Description : The element size ( 8,16, 32 or 64 bit) ++* ++* Identifier : mixedSize ++* Type : HW_MMUMixedSize_t ++* Description : Element Size to follow CPU or TLB ++* ++* RETURNS: ++* ++* Type : HW_STATUS ++* Description : RET_OK -- No errors occured ++* RET_BAD_NULL_PARAM -- A Pointer Paramater ++* was set to NULL ++* RET_PARAM_OUT_OF_RANGE -- Input Parameter ++* out of Range ++* ++* PURPOSE: : Set MMU_CAM reg ++* ++* METHOD: : Check the Input parameters and set the RAM entry. ++*/ ++static HW_STATUS MMU_SetRAMEntry(const void __iomem *baseAddress, ++ const u32 physicalAddr, ++ enum HW_Endianism_t endianism, ++ enum HW_ElementSize_t elementSize, ++ enum HW_MMUMixedSize_t mixedSize); ++ ++/* HW FUNCTIONS */ ++ ++HW_STATUS HW_MMU_Enable(const void __iomem *baseAddress) ++{ ++ HW_STATUS status = RET_OK; ++ ++ MMUMMU_CNTLMMUEnableWrite32(baseAddress, HW_SET); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_Disable(const void __iomem *baseAddress) ++{ ++ HW_STATUS status = RET_OK; ++ ++ MMUMMU_CNTLMMUEnableWrite32(baseAddress, HW_CLEAR); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_NumLockedSet(const void __iomem *baseAddress, ++ u32 numLockedEntries) ++{ ++ HW_STATUS status = RET_OK; ++ ++ MMUMMU_LOCKBaseValueWrite32(baseAddress, numLockedEntries); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_VictimNumSet(const void __iomem *baseAddress, ++ u32 victimEntryNum) ++{ ++ HW_STATUS status = RET_OK; ++ ++ MMUMMU_LOCKCurrentVictimWrite32(baseAddress, victimEntryNum); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_EventAck(const void __iomem *baseAddress, u32 irqMask) ++{ ++ HW_STATUS status = RET_OK; ++ ++ MMUMMU_IRQSTATUSWriteRegister32(baseAddress, irqMask); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_EventDisable(const void __iomem *baseAddress, ++ u32 irqMask) ++{ ++ HW_STATUS status = RET_OK; ++ u32 irqReg; ++ ++ irqReg = MMUMMU_IRQENABLEReadRegister32(baseAddress); ++ ++ MMUMMU_IRQENABLEWriteRegister32(baseAddress, irqReg & ~irqMask); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_EventEnable(const void __iomem *baseAddress, u32 irqMask) ++{ ++ HW_STATUS status = RET_OK; ++ u32 irqReg; ++ ++ irqReg = MMUMMU_IRQENABLEReadRegister32(baseAddress); ++ ++ MMUMMU_IRQENABLEWriteRegister32(baseAddress, irqReg | irqMask); ++ ++ return status; ++} ++ ++ ++HW_STATUS HW_MMU_EventStatus(const void __iomem *baseAddress, u32 *irqMask) ++{ ++ HW_STATUS status = RET_OK; ++ ++ *irqMask = MMUMMU_IRQSTATUSReadRegister32(baseAddress); ++ ++ return status; ++} ++ ++ ++HW_STATUS HW_MMU_FaultAddrRead(const void __iomem *baseAddress, u32 *addr) ++{ ++ HW_STATUS status = RET_OK; ++ ++ /*Check the input Parameters*/ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, ++ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* read values from register */ ++ *addr = MMUMMU_FAULT_ADReadRegister32(baseAddress); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_TTBSet(const void __iomem *baseAddress, u32 TTBPhysAddr) ++{ ++ HW_STATUS status = RET_OK; ++ u32 loadTTB; ++ ++ /*Check the input Parameters*/ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, ++ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); ++ ++ loadTTB = TTBPhysAddr & ~0x7FUL; ++ /* write values to register */ ++ MMUMMU_TTBWriteRegister32(baseAddress, loadTTB); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_TWLEnable(const void __iomem *baseAddress) ++{ ++ HW_STATUS status = RET_OK; ++ ++ MMUMMU_CNTLTWLEnableWrite32(baseAddress, HW_SET); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_TWLDisable(const void __iomem *baseAddress) ++{ ++ HW_STATUS status = RET_OK; ++ ++ MMUMMU_CNTLTWLEnableWrite32(baseAddress, HW_CLEAR); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_TLBFlush(const void __iomem *baseAddress, u32 virtualAddr, ++ u32 pageSize) ++{ ++ HW_STATUS status = RET_OK; ++ u32 virtualAddrTag; ++ enum HW_MMUPageSize_t pgSizeBits; ++ ++ switch (pageSize) { ++ case HW_PAGE_SIZE_4KB: ++ pgSizeBits = HW_MMU_SMALL_PAGE; ++ break; ++ ++ case HW_PAGE_SIZE_64KB: ++ pgSizeBits = HW_MMU_LARGE_PAGE; ++ break; ++ ++ case HW_PAGE_SIZE_1MB: ++ pgSizeBits = HW_MMU_SECTION; ++ break; ++ ++ case HW_PAGE_SIZE_16MB: ++ pgSizeBits = HW_MMU_SUPERSECTION; ++ break; ++ ++ default: ++ return RET_FAIL; ++ } ++ ++ /* Generate the 20-bit tag from virtual address */ ++ virtualAddrTag = ((virtualAddr & MMU_ADDR_MASK) >> 12); ++ ++ MMU_SetCAMEntry(baseAddress, pgSizeBits, 0, 0, virtualAddrTag); ++ ++ MMU_FlushEntry(baseAddress); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_TLBAdd(const void __iomem *baseAddress, ++ u32 physicalAddr, ++ u32 virtualAddr, ++ u32 pageSize, ++ u32 entryNum, ++ struct HW_MMUMapAttrs_t *mapAttrs, ++ enum HW_SetClear_t preservedBit, ++ enum HW_SetClear_t validBit) ++{ ++ HW_STATUS status = RET_OK; ++ u32 lockReg; ++ u32 virtualAddrTag; ++ enum HW_MMUPageSize_t mmuPgSize; ++ ++ /*Check the input Parameters*/ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, ++ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(pageSize, MMU_PAGE_MAX, RET_PARAM_OUT_OF_RANGE, ++ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(mapAttrs->elementSize, MMU_ELEMENTSIZE_MAX, ++ RET_PARAM_OUT_OF_RANGE, RES_MMU_BASE + ++ RES_INVALID_INPUT_PARAM); ++ ++ switch (pageSize) { ++ case HW_PAGE_SIZE_4KB: ++ mmuPgSize = HW_MMU_SMALL_PAGE; ++ break; ++ ++ case HW_PAGE_SIZE_64KB: ++ mmuPgSize = HW_MMU_LARGE_PAGE; ++ break; ++ ++ case HW_PAGE_SIZE_1MB: ++ mmuPgSize = HW_MMU_SECTION; ++ break; ++ ++ case HW_PAGE_SIZE_16MB: ++ mmuPgSize = HW_MMU_SUPERSECTION; ++ break; ++ ++ default: ++ return RET_FAIL; ++ } ++ ++ lockReg = MMUMMU_LOCKReadRegister32(baseAddress); ++ ++ /* Generate the 20-bit tag from virtual address */ ++ virtualAddrTag = ((virtualAddr & MMU_ADDR_MASK) >> 12); ++ ++ /* Write the fields in the CAM Entry Register */ ++ MMU_SetCAMEntry(baseAddress, mmuPgSize, preservedBit, validBit, ++ virtualAddrTag); ++ ++ /* Write the different fields of the RAM Entry Register */ ++ /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit)*/ ++ MMU_SetRAMEntry(baseAddress, physicalAddr, mapAttrs->endianism, ++ mapAttrs->elementSize, mapAttrs->mixedSize); ++ ++ /* Update the MMU Lock Register */ ++ /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1)*/ ++ MMUMMU_LOCKCurrentVictimWrite32(baseAddress, entryNum); ++ ++ /* Enable loading of an entry in TLB by writing 1 ++ into LD_TLB_REG register */ ++ MMUMMU_LD_TLBWriteRegister32(baseAddress, MMU_LOAD_TLB); ++ ++ ++ MMUMMU_LOCKWriteRegister32(baseAddress, lockReg); ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_PteSet(const u32 pgTblVa, ++ u32 physicalAddr, ++ u32 virtualAddr, ++ u32 pageSize, ++ struct HW_MMUMapAttrs_t *mapAttrs) ++{ ++ HW_STATUS status = RET_OK; ++ u32 pteAddr, pteVal; ++ s32 numEntries = 1; ++ ++ switch (pageSize) { ++ case HW_PAGE_SIZE_4KB: ++ pteAddr = HW_MMU_PteAddrL2(pgTblVa, ++ virtualAddr & MMU_SMALL_PAGE_MASK); ++ pteVal = ((physicalAddr & MMU_SMALL_PAGE_MASK) | ++ (mapAttrs->endianism << 9) | ++ (mapAttrs->elementSize << 4) | ++ (mapAttrs->mixedSize << 11) | 2 ++ ); ++ break; ++ ++ case HW_PAGE_SIZE_64KB: ++ numEntries = 16; ++ pteAddr = HW_MMU_PteAddrL2(pgTblVa, ++ virtualAddr & MMU_LARGE_PAGE_MASK); ++ pteVal = ((physicalAddr & MMU_LARGE_PAGE_MASK) | ++ (mapAttrs->endianism << 9) | ++ (mapAttrs->elementSize << 4) | ++ (mapAttrs->mixedSize << 11) | 1 ++ ); ++ break; ++ ++ case HW_PAGE_SIZE_1MB: ++ pteAddr = HW_MMU_PteAddrL1(pgTblVa, ++ virtualAddr & MMU_SECTION_ADDR_MASK); ++ pteVal = ((((physicalAddr & MMU_SECTION_ADDR_MASK) | ++ (mapAttrs->endianism << 15) | ++ (mapAttrs->elementSize << 10) | ++ (mapAttrs->mixedSize << 17)) & ++ ~0x40000) | 0x2 ++ ); ++ break; ++ ++ case HW_PAGE_SIZE_16MB: ++ numEntries = 16; ++ pteAddr = HW_MMU_PteAddrL1(pgTblVa, ++ virtualAddr & MMU_SSECTION_ADDR_MASK); ++ pteVal = (((physicalAddr & MMU_SSECTION_ADDR_MASK) | ++ (mapAttrs->endianism << 15) | ++ (mapAttrs->elementSize << 10) | ++ (mapAttrs->mixedSize << 17) ++ ) | 0x40000 | 0x2 ++ ); ++ break; ++ ++ case HW_MMU_COARSE_PAGE_SIZE: ++ pteAddr = HW_MMU_PteAddrL1(pgTblVa, ++ virtualAddr & MMU_SECTION_ADDR_MASK); ++ pteVal = (physicalAddr & MMU_PAGE_TABLE_MASK) | 1; ++ break; ++ ++ default: ++ return RET_FAIL; ++ } ++ ++ while (--numEntries >= 0) ++ ((u32 *)pteAddr)[numEntries] = pteVal; ++ ++ return status; ++} ++ ++HW_STATUS HW_MMU_PteClear(const u32 pgTblVa, ++ u32 virtualAddr, ++ u32 pgSize) ++{ ++ HW_STATUS status = RET_OK; ++ u32 pteAddr; ++ s32 numEntries = 1; ++ ++ switch (pgSize) { ++ case HW_PAGE_SIZE_4KB: ++ pteAddr = HW_MMU_PteAddrL2(pgTblVa, ++ virtualAddr & MMU_SMALL_PAGE_MASK); ++ break; ++ ++ case HW_PAGE_SIZE_64KB: ++ numEntries = 16; ++ pteAddr = HW_MMU_PteAddrL2(pgTblVa, ++ virtualAddr & MMU_LARGE_PAGE_MASK); ++ break; ++ ++ case HW_PAGE_SIZE_1MB: ++ case HW_MMU_COARSE_PAGE_SIZE: ++ pteAddr = HW_MMU_PteAddrL1(pgTblVa, ++ virtualAddr & MMU_SECTION_ADDR_MASK); ++ break; ++ ++ case HW_PAGE_SIZE_16MB: ++ numEntries = 16; ++ pteAddr = HW_MMU_PteAddrL1(pgTblVa, ++ virtualAddr & MMU_SSECTION_ADDR_MASK); ++ break; ++ ++ default: ++ return RET_FAIL; ++ } ++ ++ while (--numEntries >= 0) ++ ((u32 *)pteAddr)[numEntries] = 0; ++ ++ return status; ++} ++ ++/* MMU_FlushEntry */ ++static HW_STATUS MMU_FlushEntry(const void __iomem *baseAddress) ++{ ++ HW_STATUS status = RET_OK; ++ u32 flushEntryData = 0x1; ++ ++ /*Check the input Parameters*/ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, ++ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); ++ ++ /* write values to register */ ++ MMUMMU_FLUSH_ENTRYWriteRegister32(baseAddress, flushEntryData); ++ ++ return status; ++} ++ ++/* MMU_SetCAMEntry */ ++static HW_STATUS MMU_SetCAMEntry(const void __iomem *baseAddress, ++ const u32 pageSize, ++ const u32 preservedBit, ++ const u32 validBit, ++ const u32 virtualAddrTag) ++{ ++ HW_STATUS status = RET_OK; ++ u32 mmuCamReg; ++ ++ /*Check the input Parameters*/ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, ++ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); ++ ++ mmuCamReg = (virtualAddrTag << 12); ++ mmuCamReg = (mmuCamReg) | (pageSize) | (validBit << 2) | ++ (preservedBit << 3) ; ++ ++ /* write values to register */ ++ MMUMMU_CAMWriteRegister32(baseAddress, mmuCamReg); ++ ++ return status; ++} ++ ++/* MMU_SetRAMEntry */ ++static HW_STATUS MMU_SetRAMEntry(const void __iomem *baseAddress, ++ const u32 physicalAddr, ++ enum HW_Endianism_t endianism, ++ enum HW_ElementSize_t elementSize, ++ enum HW_MMUMixedSize_t mixedSize) ++{ ++ HW_STATUS status = RET_OK; ++ u32 mmuRamReg; ++ ++ /*Check the input Parameters*/ ++ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, ++ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); ++ CHECK_INPUT_RANGE_MIN0(elementSize, MMU_ELEMENTSIZE_MAX, ++ RET_PARAM_OUT_OF_RANGE, RES_MMU_BASE + ++ RES_INVALID_INPUT_PARAM); ++ ++ ++ mmuRamReg = (physicalAddr & MMU_ADDR_MASK); ++ mmuRamReg = (mmuRamReg) | ((endianism << 9) | (elementSize << 7) | ++ (mixedSize << 6)); ++ ++ /* write values to register */ ++ MMUMMU_RAMWriteRegister32(baseAddress, mmuRamReg); ++ ++ return status; ++ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mmu.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_mmu.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,177 @@ ++/* ++ * hw_mmu.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== hw_mmu.h ======== ++ * Description: ++ * MMU types and API declarations ++ * ++ *! Revision History: ++ *! ================ ++ *! 19-Apr-2004 sb Moved & renamed endianness, page size, element size ++ TLBAdd takes in MMUMapAttrs instead of separate arguments ++ *! 08-Mar-2004 sb Added the Page Table management APIs ++ *! 16 Feb 2003 sb: Initial version ++ */ ++#ifndef __HW_MMU_H ++#define __HW_MMU_H ++ ++#include ++ ++/* Bitmasks for interrupt sources */ ++#define HW_MMU_TRANSLATION_FAULT 0x2 ++#define HW_MMU_ALL_INTERRUPTS 0x1F ++ ++#define HW_MMU_COARSE_PAGE_SIZE 0x400 ++ ++/* HW_MMUMixedSize_t: Enumerated Type used to specify whether to follow ++ CPU/TLB Element size */ ++enum HW_MMUMixedSize_t { ++ HW_MMU_TLBES, ++ HW_MMU_CPUES ++ ++} ; ++ ++/* HW_MMUMapAttrs_t: Struct containing MMU mapping attributes */ ++struct HW_MMUMapAttrs_t { ++ enum HW_Endianism_t endianism; ++ enum HW_ElementSize_t elementSize; ++ enum HW_MMUMixedSize_t mixedSize; ++ bool donotlockmpupage; ++} ; ++ ++extern HW_STATUS HW_MMU_Enable(const void __iomem *baseAddress); ++ ++extern HW_STATUS HW_MMU_Disable(const void __iomem *baseAddress); ++ ++extern HW_STATUS HW_MMU_NumLockedSet(const void __iomem *baseAddress, ++ u32 numLockedEntries); ++ ++extern HW_STATUS HW_MMU_VictimNumSet(const void __iomem *baseAddress, ++ u32 victimEntryNum); ++ ++/* For MMU faults */ ++extern HW_STATUS HW_MMU_EventAck(const void __iomem *baseAddress, ++ u32 irqMask); ++ ++extern HW_STATUS HW_MMU_EventDisable(const void __iomem *baseAddress, ++ u32 irqMask); ++ ++extern HW_STATUS HW_MMU_EventEnable(const void __iomem *baseAddress, ++ u32 irqMask); ++ ++extern HW_STATUS HW_MMU_EventStatus(const void __iomem *baseAddress, ++ u32 *irqMask); ++ ++extern HW_STATUS HW_MMU_FaultAddrRead(const void __iomem *baseAddress, ++ u32 *addr); ++ ++/* Set the TT base address */ ++extern HW_STATUS HW_MMU_TTBSet(const void __iomem *baseAddress, ++ u32 TTBPhysAddr); ++ ++extern HW_STATUS HW_MMU_TWLEnable(const void __iomem *baseAddress); ++ ++extern HW_STATUS HW_MMU_TWLDisable(const void __iomem *baseAddress); ++ ++extern HW_STATUS HW_MMU_TLBFlush(const void __iomem *baseAddress, ++ u32 virtualAddr, ++ u32 pageSize); ++ ++extern HW_STATUS HW_MMU_TLBAdd(const void __iomem *baseAddress, ++ u32 physicalAddr, ++ u32 virtualAddr, ++ u32 pageSize, ++ u32 entryNum, ++ struct HW_MMUMapAttrs_t *mapAttrs, ++ enum HW_SetClear_t preservedBit, ++ enum HW_SetClear_t validBit); ++ ++ ++/* For PTEs */ ++extern HW_STATUS HW_MMU_PteSet(const u32 pgTblVa, ++ u32 physicalAddr, ++ u32 virtualAddr, ++ u32 pageSize, ++ struct HW_MMUMapAttrs_t *mapAttrs); ++ ++extern HW_STATUS HW_MMU_PteClear(const u32 pgTblVa, ++ u32 pgSize, ++ u32 virtualAddr); ++ ++static inline u32 HW_MMU_PteAddrL1(u32 L1_base, u32 va) ++{ ++ u32 pteAddr; ++ u32 VA_31_to_20; ++ ++ VA_31_to_20 = va >> (20 - 2); /* Left-shift by 2 here itself */ ++ VA_31_to_20 &= 0xFFFFFFFCUL; ++ pteAddr = L1_base + VA_31_to_20; ++ ++ return pteAddr; ++} ++ ++static inline u32 HW_MMU_PteAddrL2(u32 L2_base, u32 va) ++{ ++ u32 pteAddr; ++ ++ pteAddr = (L2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC); ++ ++ return pteAddr; ++} ++ ++static inline u32 HW_MMU_PteCoarseL1(u32 pteVal) ++{ ++ u32 pteCoarse; ++ ++ pteCoarse = pteVal & 0xFFFFFC00; ++ ++ return pteCoarse; ++} ++ ++static inline u32 HW_MMU_PteSizeL1(u32 pteVal) ++{ ++ u32 pteSize = 0; ++ ++ if ((pteVal & 0x3) == 0x1) { ++ /* Points to L2 PT */ ++ pteSize = HW_MMU_COARSE_PAGE_SIZE; ++ } ++ ++ if ((pteVal & 0x3) == 0x2) { ++ if (pteVal & (1 << 18)) ++ pteSize = HW_PAGE_SIZE_16MB; ++ else ++ pteSize = HW_PAGE_SIZE_1MB; ++ } ++ ++ return pteSize; ++} ++ ++static inline u32 HW_MMU_PteSizeL2(u32 pteVal) ++{ ++ u32 pteSize = 0; ++ ++ if (pteVal & 0x2) ++ pteSize = HW_PAGE_SIZE_4KB; ++ else if (pteVal & 0x1) ++ pteSize = HW_PAGE_SIZE_64KB; ++ ++ return pteSize; ++} ++ ++#endif /* __HW_MMU_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_prcm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_prcm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,167 @@ ++/* ++ * hw_prcm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== hw_prcm.c ======== ++ * Description: ++ * API definitions to configure PRCM (Power, Reset & Clocks Manager) ++ * ++ *! Revision History: ++ *! ================ ++ *! 16 Feb 2003 sb: Initial version ++ */ ++ ++#include ++#include "PRCMRegAcM.h" ++#include ++#include ++ ++static HW_STATUS HW_RST_WriteVal(const void __iomem *baseAddress, ++ enum HW_RstModule_t r, ++ enum HW_SetClear_t val); ++ ++HW_STATUS HW_RST_Reset(const void __iomem *baseAddress, enum HW_RstModule_t r) ++{ ++ return HW_RST_WriteVal(baseAddress, r, HW_SET); ++} ++ ++HW_STATUS HW_RST_UnReset(const void __iomem *baseAddress, enum HW_RstModule_t r) ++{ ++ return HW_RST_WriteVal(baseAddress, r, HW_CLEAR); ++} ++ ++static HW_STATUS HW_RST_WriteVal(const void __iomem *baseAddress, ++ enum HW_RstModule_t r, ++ enum HW_SetClear_t val) ++{ ++ HW_STATUS status = RET_OK; ++ ++ switch (r) { ++ case HW_RST1_IVA2: ++ PRM_RSTCTRL_IVA2RST1_DSPWrite32(baseAddress, val); ++ break; ++ case HW_RST2_IVA2: ++ PRM_RSTCTRL_IVA2RST2_DSPWrite32(baseAddress, val); ++ break; ++ case HW_RST3_IVA2: ++ PRM_RSTCTRL_IVA2RST3_DSPWrite32(baseAddress, val); ++ break; ++ default: ++ status = RET_FAIL; ++ break; ++ } ++ return status; ++} ++ ++HW_STATUS HW_PWR_IVA2StateGet(const void __iomem *baseAddress, ++ enum HW_PwrModule_t p, enum HW_PwrState_t *value) ++{ ++ HW_STATUS status = RET_OK; ++ u32 temp; ++ ++ switch (p) { ++ case HW_PWR_DOMAIN_DSP: ++ /* wait until Transition is complete */ ++ do { ++ /* mdelay(1); */ ++ temp = PRCMPM_PWSTST_IVA2InTransitionRead32 ++ (baseAddress); ++ ++ } while (temp); ++ ++ temp = PRCMPM_PWSTST_IVA2ReadRegister32(baseAddress); ++ *value = PRCMPM_PWSTST_IVA2PowerStateStGet32(temp); ++ break; ++ ++ default: ++ status = RET_FAIL; ++ break; ++ } ++ return status; ++} ++ ++HW_STATUS HW_PWRST_IVA2RegGet(const void __iomem *baseAddress, u32 *value) ++{ ++ HW_STATUS status = RET_OK; ++ ++ *value = PRCMPM_PWSTST_IVA2ReadRegister32(baseAddress); ++ ++ return status; ++} ++ ++ ++HW_STATUS HW_PWR_IVA2PowerStateSet(const void __iomem *baseAddress, ++ enum HW_PwrModule_t p, ++ enum HW_PwrState_t value) ++{ ++ HW_STATUS status = RET_OK; ++ ++ switch (p) { ++ case HW_PWR_DOMAIN_DSP: ++ switch (value) { ++ case HW_PWR_STATE_ON: ++ PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32(baseAddress); ++ break; ++ case HW_PWR_STATE_RET: ++ PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32(baseAddress); ++ break; ++ case HW_PWR_STATE_OFF: ++ PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32(baseAddress); ++ break; ++ default: ++ status = RET_FAIL; ++ break; ++ } ++ break; ++ ++ default: ++ status = RET_FAIL; ++ break; ++ } ++ ++ return status; ++} ++ ++HW_STATUS HW_PWR_CLKCTRL_IVA2RegSet(const void __iomem *baseAddress, ++ enum HW_TransitionState_t val) ++{ ++ HW_STATUS status = RET_OK; ++ ++ PRCMCM_CLKSTCTRL_IVA2WriteRegister32(baseAddress, val); ++ ++ return status; ++ ++} ++ ++HW_STATUS HW_RSTST_RegGet(const void __iomem *baseAddress, ++ enum HW_RstModule_t m, u32 *value) ++{ ++ HW_STATUS status = RET_OK; ++ ++ *value = PRCMRM_RSTST_DSPReadRegister32(baseAddress); ++ ++ return status; ++} ++ ++HW_STATUS HW_RSTCTRL_RegGet(const void __iomem *baseAddress, ++ enum HW_RstModule_t m, u32 *value) ++{ ++ HW_STATUS status = RET_OK; ++ ++ *value = PRCMRM_RSTCTRL_DSPReadRegister32(baseAddress); ++ ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_prcm.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/hw_prcm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,169 @@ ++/* ++ * hw_prcm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== hw_prcm.h ======== ++ * Description: ++ * PRCM types and API declarations ++ * ++ *! Revision History: ++ *! ================ ++ *! 16 Feb 2003 sb: Initial version ++ */ ++ ++#ifndef __HW_PRCM_H ++#define __HW_PRCM_H ++ ++/* HW_ClkModule: Enumerated Type used to specify the clock domain */ ++ ++enum HW_ClkModule_t { ++/* DSP Domain */ ++ HW_CLK_DSP_CPU, ++ HW_CLK_DSP_IPI_MMU, ++ HW_CLK_IVA_ARM, ++ HW_CLK_IVA_COP, /* IVA Coprocessor */ ++ ++/* Core Domain */ ++ HW_CLK_FN_WDT4, /* Functional Clock */ ++ HW_CLK_FN_WDT3, ++ HW_CLK_FN_UART2, ++ HW_CLK_FN_UART1, ++ HW_CLK_GPT5, ++ HW_CLK_GPT6, ++ HW_CLK_GPT7, ++ HW_CLK_GPT8, ++ ++ HW_CLK_IF_WDT4, /* Interface Clock */ ++ HW_CLK_IF_WDT3, ++ HW_CLK_IF_UART2, ++ HW_CLK_IF_UART1, ++ HW_CLK_IF_MBOX ++ ++} ; ++ ++enum HW_ClkSubsys_t { ++ HW_CLK_DSPSS, ++ HW_CLK_IVASS ++} ; ++ ++/* HW_GPtimers: General purpose timers */ ++enum HW_GPtimer_t { ++ HW_GPT5 = 5, ++ HW_GPT6 = 6, ++ HW_GPT7 = 7, ++ HW_GPT8 = 8 ++} ; ++ ++ ++/* GP timers Input clock type: General purpose timers */ ++enum HW_Clocktype_t { ++ HW_CLK_32KHz = 0, ++ HW_CLK_SYS = 1, ++ HW_CLK_EXT = 2 ++} ; ++ ++/* HW_ClkDiv: Clock divisors */ ++enum HW_ClkDiv_t { ++ HW_CLK_DIV_1 = 0x1, ++ HW_CLK_DIV_2 = 0x2, ++ HW_CLK_DIV_3 = 0x3, ++ HW_CLK_DIV_4 = 0x4, ++ HW_CLK_DIV_6 = 0x6, ++ HW_CLK_DIV_8 = 0x8, ++ HW_CLK_DIV_12 = 0xC ++} ; ++ ++/* HW_RstModule: Enumerated Type used to specify the module to be reset */ ++enum HW_RstModule_t { ++ HW_RST1_IVA2, /* Reset the DSP */ ++ HW_RST2_IVA2, /* Reset MMU and LEON HWa */ ++ HW_RST3_IVA2 /* Reset LEON sequencer */ ++} ; ++ ++/* HW_PwrModule: Enumerated Type used to specify the power domain */ ++enum HW_PwrModule_t { ++/* Domains */ ++ HW_PWR_DOMAIN_CORE, ++ HW_PWR_DOMAIN_MPU, ++ HW_PWR_DOMAIN_WAKEUP, ++ HW_PWR_DOMAIN_DSP, ++ ++/* Sub-domains */ ++ HW_PWR_DSP_IPI, /* IPI = Intrusive Port Interface */ ++ HW_PWR_IVA_ISP /* ISP = Intrusive Slave Port */ ++} ; ++ ++enum HW_PwrState_t { ++ HW_PWR_STATE_OFF, ++ HW_PWR_STATE_RET, ++ HW_PWR_STATE_INACT, ++ HW_PWR_STATE_ON = 3 ++} ; ++ ++enum HW_ForceState_t { ++ HW_FORCE_OFF, ++ HW_FORCE_ON ++} ; ++ ++enum HW_IdleState_t { ++ HW_ACTIVE, ++ HW_STANDBY ++ ++} ; ++ ++enum HW_TransitionState_t { ++ HW_AUTOTRANS_DIS, ++ HW_SW_SUP_SLEEP, ++ HW_SW_SUP_WAKEUP, ++ HW_AUTOTRANS_EN ++} ; ++ ++ ++extern HW_STATUS HW_RST_Reset(const void __iomem *baseAddress, ++ enum HW_RstModule_t r); ++ ++extern HW_STATUS HW_RST_UnReset(const void __iomem *baseAddress, ++ enum HW_RstModule_t r); ++ ++extern HW_STATUS HW_RSTCTRL_RegGet(const void __iomem *baseAddress, ++ enum HW_RstModule_t p, ++ u32 *value); ++extern HW_STATUS HW_RSTST_RegGet(const void __iomem *baseAddress, ++ enum HW_RstModule_t p, u32 *value); ++ ++extern HW_STATUS HW_PWR_PowerStateSet(const u32 baseAddress, ++ enum HW_PwrModule_t p, ++ enum HW_PwrState_t value); ++ ++extern HW_STATUS HW_CLK_SetInputClock(const u32 baseAddress, ++ enum HW_GPtimer_t gpt, ++ enum HW_Clocktype_t c); ++ ++extern HW_STATUS HW_PWR_IVA2StateGet(const void __iomem *baseAddress, ++ enum HW_PwrModule_t p, ++ enum HW_PwrState_t *value); ++ ++extern HW_STATUS HW_PWRST_IVA2RegGet(const void __iomem *baseAddress, ++ u32 *value); ++ ++extern HW_STATUS HW_PWR_IVA2PowerStateSet(const void __iomem *baseAddress, ++ enum HW_PwrModule_t p, ++ enum HW_PwrState_t value); ++ ++extern HW_STATUS HW_PWR_CLKCTRL_IVA2RegSet(const void __iomem *baseAddress, ++ enum HW_TransitionState_t val); ++ ++#endif /* __HW_PRCM_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IPIAccInt.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/IPIAccInt.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IPIAccInt.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/IPIAccInt.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,41 @@ ++/* ++ * IPIAccInt.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef _IPI_ACC_INT_H ++#define _IPI_ACC_INT_H ++ ++/* Bitfield mask and offset declarations */ ++#define SYSC_IVA2BOOTMOD_OFFSET 0x404 ++#define SYSC_IVA2BOOTADDR_OFFSET 0x400 ++#define SYSC_IVA2BOOTADDR_MASK 0xfffffc00 ++ ++ ++/* The following represent the enumerated values for each bitfield */ ++ ++enum IPIIPI_SYSCONFIGAutoIdleE { ++ IPIIPI_SYSCONFIGAutoIdleclkfree = 0x0000, ++ IPIIPI_SYSCONFIGAutoIdleautoclkgate = 0x0001 ++} ; ++ ++enum IPIIPI_ENTRYElemSizeValueE { ++ IPIIPI_ENTRYElemSizeValueElemSz8b = 0x0000, ++ IPIIPI_ENTRYElemSizeValueElemSz16b = 0x0001, ++ IPIIPI_ENTRYElemSizeValueElemSz32b = 0x0002, ++ IPIIPI_ENTRYElemSizeValueReserved = 0x0003 ++} ; ++ ++#endif /* _IPI_ACC_INT_H */ ++/* EOF */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IVA2RegAcM.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/IVA2RegAcM.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IVA2RegAcM.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/IVA2RegAcM.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,28 @@ ++/* ++ * IVA1RegAcM.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++ ++#ifndef _IVA2_REG_ACM_H ++#define _IVA2_REG_ACM_H ++ ++#include ++#include ++ ++#define SYSC_IVA2BOOTMOD_OFFSET 0x404 ++#define SYSC_IVA2BOOTADDR_OFFSET 0x400 ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBAccInt.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MLBAccInt.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBAccInt.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MLBAccInt.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,132 @@ ++/* ++ * MLBAccInt.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++#ifndef _MLB_ACC_INT_H ++#define _MLB_ACC_INT_H ++ ++/* Mappings of level 1 EASI function numbers to function names */ ++ ++#define EASIL1_MLBMAILBOX_SYSCONFIGReadRegister32 (MLB_BASE_EASIL1 + 3) ++#define EASIL1_MLBMAILBOX_SYSCONFIGWriteRegister32 (MLB_BASE_EASIL1 + 4) ++#define EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeRead32 (MLB_BASE_EASIL1 + 7) ++#define EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeWrite32 (MLB_BASE_EASIL1 + 17) ++#define EASIL1_MLBMAILBOX_SYSCONFIGSoftResetWrite32 (MLB_BASE_EASIL1 + 29) ++#define EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleRead32 \ ++ (MLB_BASE_EASIL1 + 33) ++#define EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleWrite32 (MLB_BASE_EASIL1 + 39) ++#define EASIL1_MLBMAILBOX_SYSSTATUSResetDoneRead32 (MLB_BASE_EASIL1 + 44) ++#define EASIL1_MLBMAILBOX_MESSAGE___0_15ReadRegister32 \ ++ (MLB_BASE_EASIL1 + 50) ++#define EASIL1_MLBMAILBOX_MESSAGE___0_15WriteRegister32 \ ++ (MLB_BASE_EASIL1 + 51) ++#define EASIL1_MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32 \ ++ (MLB_BASE_EASIL1 + 56) ++#define EASIL1_MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32 \ ++ (MLB_BASE_EASIL1 + 57) ++#define EASIL1_MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32 \ ++ (MLB_BASE_EASIL1 + 60) ++#define EASIL1_MLBMAILBOX_IRQSTATUS___0_3ReadRegister32 \ ++ (MLB_BASE_EASIL1 + 62) ++#define EASIL1_MLBMAILBOX_IRQSTATUS___0_3WriteRegister32 \ ++ (MLB_BASE_EASIL1 + 63) ++#define EASIL1_MLBMAILBOX_IRQENABLE___0_3ReadRegister32 \ ++ (MLB_BASE_EASIL1 + 192) ++#define EASIL1_MLBMAILBOX_IRQENABLE___0_3WriteRegister32 \ ++ (MLB_BASE_EASIL1 + 193) ++ ++/* Register set MAILBOX_MESSAGE___REGSET_0_15 address offset, bank address ++ * increment and number of banks */ ++ ++#define MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET (u32)(0x0040) ++#define MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP (u32)(0x0004) ++ ++/* Register offset address definitions relative to register set ++ * MAILBOX_MESSAGE___REGSET_0_15 */ ++ ++#define MLB_MAILBOX_MESSAGE___0_15_OFFSET (u32)(0x0) ++ ++ ++/* Register set MAILBOX_FIFOSTATUS___REGSET_0_15 address offset, bank address ++ * increment and number of banks */ ++ ++#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET (u32)(0x0080) ++#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP (u32)(0x0004) ++ ++/* Register offset address definitions relative to register set ++ * MAILBOX_FIFOSTATUS___REGSET_0_15 */ ++ ++#define MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET (u32)(0x0) ++ ++ ++/* Register set MAILBOX_MSGSTATUS___REGSET_0_15 address offset, bank address ++ * increment and number of banks */ ++ ++#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET (u32)(0x00c0) ++#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP (u32)(0x0004) ++ ++/* Register offset address definitions relative to register set ++ * MAILBOX_MSGSTATUS___REGSET_0_15 */ ++ ++#define MLB_MAILBOX_MSGSTATUS___0_15_OFFSET (u32)(0x0) ++ ++ ++/* Register set MAILBOX_IRQSTATUS___REGSET_0_3 address offset, bank address ++ * increment and number of banks */ ++ ++#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET (u32)(0x0100) ++#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP (u32)(0x0008) ++ ++/* Register offset address definitions relative to register set ++ * MAILBOX_IRQSTATUS___REGSET_0_3 */ ++ ++#define MLB_MAILBOX_IRQSTATUS___0_3_OFFSET (u32)(0x0) ++ ++ ++/* Register set MAILBOX_IRQENABLE___REGSET_0_3 address offset, bank address ++ * increment and number of banks */ ++ ++#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET (u32)(0x0104) ++#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP (u32)(0x0008) ++ ++/* Register offset address definitions relative to register set ++ * MAILBOX_IRQENABLE___REGSET_0_3 */ ++ ++#define MLB_MAILBOX_IRQENABLE___0_3_OFFSET (u32)(0x0) ++ ++ ++/* Register offset address definitions */ ++ ++#define MLB_MAILBOX_SYSCONFIG_OFFSET (u32)(0x10) ++#define MLB_MAILBOX_SYSSTATUS_OFFSET (u32)(0x14) ++ ++ ++/* Bitfield mask and offset declarations */ ++ ++#define MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK (u32)(0x18) ++#define MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET (u32)(3) ++#define MLB_MAILBOX_SYSCONFIG_SoftReset_MASK (u32)(0x2) ++#define MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET (u32)(1) ++#define MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK (u32)(0x1) ++#define MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET (u32)(0) ++#define MLB_MAILBOX_SYSSTATUS_ResetDone_MASK (u32)(0x1) ++#define MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET (u32)(0) ++#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK (u32)(0x1) ++#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET (u32)(0) ++#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK (u32)(0x7f) ++#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET (u32)(0) ++ ++#endif /* _MLB_ACC_INT_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBRegAcM.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MLBRegAcM.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBRegAcM.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MLBRegAcM.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,201 @@ ++/* ++ * MLBRegAcM.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef _MLB_REG_ACM_H ++#define _MLB_REG_ACM_H ++ ++#include ++#include ++#include ++#include "MLBAccInt.h" ++ ++#if defined(USE_LEVEL_1_MACROS) ++ ++#define MLBMAILBOX_SYSCONFIGReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGReadRegister32),\ ++ __raw_readl(((baseAddress))+ \ ++ MLB_MAILBOX_SYSCONFIG_OFFSET)) ++ ++ ++#define MLBMAILBOX_SYSCONFIGWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGWriteRegister32);\ ++ __raw_writel(newValue, ((baseAddress))+offset);\ ++} ++ ++ ++#define MLBMAILBOX_SYSCONFIGSIdleModeRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\ ++ MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\ ++ MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)) ++ ++ ++#define MLBMAILBOX_SYSCONFIGSIdleModeWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ ++ register u32 data = __raw_readl(((u32)(baseAddress)) +\ ++ offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeWrite32);\ ++ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\ ++ newValue <<= MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\ ++ newValue &= MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define MLBMAILBOX_SYSCONFIGSoftResetWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSoftResetWrite32);\ ++ data &= ~(MLB_MAILBOX_SYSCONFIG_SoftReset_MASK);\ ++ newValue <<= MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET;\ ++ newValue &= MLB_MAILBOX_SYSCONFIG_SoftReset_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define MLBMAILBOX_SYSCONFIGAutoIdleRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\ ++ MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\ ++ MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET)) ++ ++ ++#define MLBMAILBOX_SYSCONFIGAutoIdleWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleWrite32);\ ++ data &= ~(MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK);\ ++ newValue <<= MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET;\ ++ newValue &= MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define MLBMAILBOX_SYSSTATUSResetDoneRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSSTATUSResetDoneRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (MLB_MAILBOX_SYSSTATUS_OFFSET)))) &\ ++ MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\ ++ MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET)) ++ ++ ++#define MLBMAILBOX_MESSAGE___0_15ReadRegister32(baseAddress, bank)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_MESSAGE___0_15ReadRegister32),\ ++ __raw_readl(((baseAddress))+\ ++ (MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\ ++ MLB_MAILBOX_MESSAGE___0_15_OFFSET+(\ ++ (bank)*MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP)))) ++ ++ ++#define MLBMAILBOX_MESSAGE___0_15WriteRegister32(baseAddress, bank, value)\ ++{\ ++ const u32 offset = MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\ ++ MLB_MAILBOX_MESSAGE___0_15_OFFSET +\ ++ ((bank)*MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_MESSAGE___0_15WriteRegister32);\ ++ __raw_writel(newValue, ((baseAddress))+offset);\ ++} ++ ++ ++#define MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32(baseAddress, bank)\ ++ (_DEBUG_LEVEL_1_EASI(\ ++ EASIL1_MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32),\ ++ __raw_readl(((u32)(baseAddress))+\ ++ (MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\ ++ MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+\ ++ ((bank)*MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP)))) ++ ++ ++#define MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32(baseAddress, bank)\ ++ (_DEBUG_LEVEL_1_EASI(\ ++ EASIL1_MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32),\ ++ (((__raw_readl(((baseAddress))+\ ++ (MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\ ++ MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+\ ++ ((bank)*MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP)))) &\ ++ MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK) >>\ ++ MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET)) ++ ++ ++#define MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32(baseAddress, bank)\ ++ (_DEBUG_LEVEL_1_EASI(\ ++ EASIL1_MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32),\ ++ (((__raw_readl(((baseAddress))+\ ++ (MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET +\ ++ MLB_MAILBOX_MSGSTATUS___0_15_OFFSET+\ ++ ((bank)*MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP)))) &\ ++ MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK) >>\ ++ MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET)) ++ ++ ++#define MLBMAILBOX_IRQSTATUS___0_3ReadRegister32(baseAddress, bank)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQSTATUS___0_3ReadRegister32),\ ++ __raw_readl(((baseAddress))+\ ++ (MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\ ++ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+\ ++ ((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) ++ ++ ++#define MLBMAILBOX_IRQSTATUS___0_3WriteRegister32(baseAddress, bank, value)\ ++{\ ++ const u32 offset = MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\ ++ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\ ++ ((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQSTATUS___0_3WriteRegister32);\ ++ __raw_writel(newValue, ((baseAddress))+offset);\ ++} ++ ++ ++#define MLBMAILBOX_IRQENABLE___0_3ReadRegister32(baseAddress, bank)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQENABLE___0_3ReadRegister32),\ ++ __raw_readl(((baseAddress))+\ ++ (MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\ ++ MLB_MAILBOX_IRQENABLE___0_3_OFFSET+\ ++ ((bank)*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) ++ ++ ++#define MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, bank, value)\ ++{\ ++ const u32 offset = MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\ ++ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\ ++ ((bank)*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQENABLE___0_3WriteRegister32);\ ++ __raw_writel(newValue, ((baseAddress))+offset);\ ++} ++ ++ ++#endif /* USE_LEVEL_1_MACROS */ ++ ++#endif /* _MLB_REG_ACM_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMUAccInt.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MMUAccInt.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMUAccInt.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MMUAccInt.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,76 @@ ++/* ++ * MMUAccInt.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef _MMU_ACC_INT_H ++#define _MMU_ACC_INT_H ++ ++/* Mappings of level 1 EASI function numbers to function names */ ++ ++#define EASIL1_MMUMMU_SYSCONFIGReadRegister32 (MMU_BASE_EASIL1 + 3) ++#define EASIL1_MMUMMU_SYSCONFIGIdleModeWrite32 (MMU_BASE_EASIL1 + 17) ++#define EASIL1_MMUMMU_SYSCONFIGAutoIdleWrite32 (MMU_BASE_EASIL1 + 39) ++#define EASIL1_MMUMMU_IRQSTATUSWriteRegister32 (MMU_BASE_EASIL1 + 51) ++#define EASIL1_MMUMMU_IRQENABLEReadRegister32 (MMU_BASE_EASIL1 + 102) ++#define EASIL1_MMUMMU_IRQENABLEWriteRegister32 (MMU_BASE_EASIL1 + 103) ++#define EASIL1_MMUMMU_WALKING_STTWLRunningRead32 (MMU_BASE_EASIL1 + 156) ++#define EASIL1_MMUMMU_CNTLTWLEnableRead32 (MMU_BASE_EASIL1 + 174) ++#define EASIL1_MMUMMU_CNTLTWLEnableWrite32 (MMU_BASE_EASIL1 + 180) ++#define EASIL1_MMUMMU_CNTLMMUEnableWrite32 (MMU_BASE_EASIL1 + 190) ++#define EASIL1_MMUMMU_FAULT_ADReadRegister32 (MMU_BASE_EASIL1 + 194) ++#define EASIL1_MMUMMU_TTBWriteRegister32 (MMU_BASE_EASIL1 + 198) ++#define EASIL1_MMUMMU_LOCKReadRegister32 (MMU_BASE_EASIL1 + 203) ++#define EASIL1_MMUMMU_LOCKWriteRegister32 (MMU_BASE_EASIL1 + 204) ++#define EASIL1_MMUMMU_LOCKBaseValueRead32 (MMU_BASE_EASIL1 + 205) ++#define EASIL1_MMUMMU_LOCKCurrentVictimRead32 (MMU_BASE_EASIL1 + 209) ++#define EASIL1_MMUMMU_LOCKCurrentVictimWrite32 (MMU_BASE_EASIL1 + 211) ++#define EASIL1_MMUMMU_LOCKCurrentVictimSet32 (MMU_BASE_EASIL1 + 212) ++#define EASIL1_MMUMMU_LD_TLBReadRegister32 (MMU_BASE_EASIL1 + 213) ++#define EASIL1_MMUMMU_LD_TLBWriteRegister32 (MMU_BASE_EASIL1 + 214) ++#define EASIL1_MMUMMU_CAMWriteRegister32 (MMU_BASE_EASIL1 + 226) ++#define EASIL1_MMUMMU_RAMWriteRegister32 (MMU_BASE_EASIL1 + 268) ++#define EASIL1_MMUMMU_FLUSH_ENTRYWriteRegister32 (MMU_BASE_EASIL1 + 322) ++ ++/* Register offset address definitions */ ++#define MMU_MMU_SYSCONFIG_OFFSET 0x10 ++#define MMU_MMU_IRQSTATUS_OFFSET 0x18 ++#define MMU_MMU_IRQENABLE_OFFSET 0x1c ++#define MMU_MMU_WALKING_ST_OFFSET 0x40 ++#define MMU_MMU_CNTL_OFFSET 0x44 ++#define MMU_MMU_FAULT_AD_OFFSET 0x48 ++#define MMU_MMU_TTB_OFFSET 0x4c ++#define MMU_MMU_LOCK_OFFSET 0x50 ++#define MMU_MMU_LD_TLB_OFFSET 0x54 ++#define MMU_MMU_CAM_OFFSET 0x58 ++#define MMU_MMU_RAM_OFFSET 0x5c ++#define MMU_MMU_GFLUSH_OFFSET 0x60 ++#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64 ++/* Bitfield mask and offset declarations */ ++#define MMU_MMU_SYSCONFIG_IdleMode_MASK 0x18 ++#define MMU_MMU_SYSCONFIG_IdleMode_OFFSET 3 ++#define MMU_MMU_SYSCONFIG_AutoIdle_MASK 0x1 ++#define MMU_MMU_SYSCONFIG_AutoIdle_OFFSET 0 ++#define MMU_MMU_WALKING_ST_TWLRunning_MASK 0x1 ++#define MMU_MMU_WALKING_ST_TWLRunning_OFFSET 0 ++#define MMU_MMU_CNTL_TWLEnable_MASK 0x4 ++#define MMU_MMU_CNTL_TWLEnable_OFFSET 2 ++#define MMU_MMU_CNTL_MMUEnable_MASK 0x2 ++#define MMU_MMU_CNTL_MMUEnable_OFFSET 1 ++#define MMU_MMU_LOCK_BaseValue_MASK 0xfc00 ++#define MMU_MMU_LOCK_BaseValue_OFFSET 10 ++#define MMU_MMU_LOCK_CurrentVictim_MASK 0x3f0 ++#define MMU_MMU_LOCK_CurrentVictim_OFFSET 4 ++ ++#endif /* _MMU_ACC_INT_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMURegAcM.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MMURegAcM.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMURegAcM.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/MMURegAcM.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,253 @@ ++/* ++ * MMURegAcM.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++#ifndef _MMU_REG_ACM_H ++#define _MMU_REG_ACM_H ++ ++#include ++#include ++#include ++ ++#include "MMUAccInt.h" ++ ++#if defined(USE_LEVEL_1_MACROS) ++ ++ ++#define MMUMMU_SYSCONFIGReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_SYSCONFIGReadRegister32),\ ++ __raw_readl((baseAddress)+MMU_MMU_SYSCONFIG_OFFSET)) ++ ++ ++#define MMUMMU_SYSCONFIGIdleModeWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_SYSCONFIGIdleModeWrite32);\ ++ data &= ~(MMU_MMU_SYSCONFIG_IdleMode_MASK);\ ++ newValue <<= MMU_MMU_SYSCONFIG_IdleMode_OFFSET;\ ++ newValue &= MMU_MMU_SYSCONFIG_IdleMode_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, baseAddress+offset);\ ++} ++ ++ ++#define MMUMMU_SYSCONFIGAutoIdleWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_SYSCONFIGAutoIdleWrite32);\ ++ data &= ~(MMU_MMU_SYSCONFIG_AutoIdle_MASK);\ ++ newValue <<= MMU_MMU_SYSCONFIG_AutoIdle_OFFSET;\ ++ newValue &= MMU_MMU_SYSCONFIG_AutoIdle_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, baseAddress+offset);\ ++} ++ ++ ++#define MMUMMU_IRQSTATUSReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQSTATUSReadRegister32),\ ++ __raw_readl((baseAddress)+MMU_MMU_IRQSTATUS_OFFSET)) ++ ++ ++#define MMUMMU_IRQSTATUSWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQSTATUSWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define MMUMMU_IRQENABLEReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQENABLEReadRegister32),\ ++ __raw_readl((baseAddress)+MMU_MMU_IRQENABLE_OFFSET)) ++ ++ ++#define MMUMMU_IRQENABLEWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQENABLEWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define MMUMMU_WALKING_STTWLRunningRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_WALKING_STTWLRunningRead32),\ ++ (((__raw_readl(((baseAddress)+(MMU_MMU_WALKING_ST_OFFSET))))\ ++ & MMU_MMU_WALKING_ST_TWLRunning_MASK) >>\ ++ MMU_MMU_WALKING_ST_TWLRunning_OFFSET)) ++ ++ ++#define MMUMMU_CNTLTWLEnableRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CNTLTWLEnableRead32),\ ++ (((__raw_readl(((baseAddress)+(MMU_MMU_CNTL_OFFSET)))) &\ ++ MMU_MMU_CNTL_TWLEnable_MASK) >>\ ++ MMU_MMU_CNTL_TWLEnable_OFFSET)) ++ ++ ++#define MMUMMU_CNTLTWLEnableWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_CNTL_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CNTLTWLEnableWrite32);\ ++ data &= ~(MMU_MMU_CNTL_TWLEnable_MASK);\ ++ newValue <<= MMU_MMU_CNTL_TWLEnable_OFFSET;\ ++ newValue &= MMU_MMU_CNTL_TWLEnable_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, baseAddress+offset);\ ++} ++ ++ ++#define MMUMMU_CNTLMMUEnableWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_CNTL_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CNTLMMUEnableWrite32);\ ++ data &= ~(MMU_MMU_CNTL_MMUEnable_MASK);\ ++ newValue <<= MMU_MMU_CNTL_MMUEnable_OFFSET;\ ++ newValue &= MMU_MMU_CNTL_MMUEnable_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, baseAddress+offset);\ ++} ++ ++ ++#define MMUMMU_FAULT_ADReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_FAULT_ADReadRegister32),\ ++ __raw_readl((baseAddress)+MMU_MMU_FAULT_AD_OFFSET)) ++ ++ ++#define MMUMMU_TTBWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_TTB_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_TTBWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define MMUMMU_LOCKReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKReadRegister32),\ ++ __raw_readl((baseAddress)+MMU_MMU_LOCK_OFFSET)) ++ ++ ++#define MMUMMU_LOCKWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_LOCK_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define MMUMMU_LOCKBaseValueRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKBaseValueRead32),\ ++ (((__raw_readl(((baseAddress)+(MMU_MMU_LOCK_OFFSET)))) &\ ++ MMU_MMU_LOCK_BaseValue_MASK) >>\ ++ MMU_MMU_LOCK_BaseValue_OFFSET)) ++ ++ ++#define MMUMMU_LOCKBaseValueWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_LOCK_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKBaseValueWrite32);\ ++ data &= ~(MMU_MMU_LOCK_BaseValue_MASK);\ ++ newValue <<= MMU_MMU_LOCK_BaseValue_OFFSET;\ ++ newValue &= MMU_MMU_LOCK_BaseValue_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, baseAddress+offset);\ ++} ++ ++ ++#define MMUMMU_LOCKCurrentVictimRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKCurrentVictimRead32),\ ++ (((__raw_readl(((baseAddress)+(MMU_MMU_LOCK_OFFSET)))) &\ ++ MMU_MMU_LOCK_CurrentVictim_MASK) >>\ ++ MMU_MMU_LOCK_CurrentVictim_OFFSET)) ++ ++ ++#define MMUMMU_LOCKCurrentVictimWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_LOCK_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKCurrentVictimWrite32);\ ++ data &= ~(MMU_MMU_LOCK_CurrentVictim_MASK);\ ++ newValue <<= MMU_MMU_LOCK_CurrentVictim_OFFSET;\ ++ newValue &= MMU_MMU_LOCK_CurrentVictim_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, baseAddress+offset);\ ++} ++ ++ ++#define MMUMMU_LOCKCurrentVictimSet32(var, value)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKCurrentVictimSet32),\ ++ (((var) & ~(MMU_MMU_LOCK_CurrentVictim_MASK)) |\ ++ (((value) << MMU_MMU_LOCK_CurrentVictim_OFFSET) &\ ++ MMU_MMU_LOCK_CurrentVictim_MASK))) ++ ++ ++#define MMUMMU_LD_TLBReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LD_TLBReadRegister32),\ ++ __raw_readl((baseAddress)+MMU_MMU_LD_TLB_OFFSET)) ++ ++ ++#define MMUMMU_LD_TLBWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_LD_TLB_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LD_TLBWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define MMUMMU_CAMWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_CAM_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CAMWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define MMUMMU_RAMWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_RAM_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_RAMWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define MMUMMU_FLUSH_ENTRYWriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\ ++ register u32 newValue = (value);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_FLUSH_ENTRYWriteRegister32);\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#endif /* USE_LEVEL_1_MACROS */ ++ ++#endif /* _MMU_REG_ACM_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMAccInt.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/PRCMAccInt.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMAccInt.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/PRCMAccInt.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,300 @@ ++/* ++ * PRCMAccInt.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef _PRCM_ACC_INT_H ++#define _PRCM_ACC_INT_H ++ ++/* Mappings of level 1 EASI function numbers to function names */ ++ ++#define EASIL1_PRCMPRCM_CLKCFG_CTRLValid_configWriteClk_valid32 \ ++ (PRCM_BASE_EASIL1 + 349) ++#define EASIL1_PRCMCM_FCLKEN1_COREReadRegister32 (PRCM_BASE_EASIL1 + 743) ++#define EASIL1_PRCMCM_FCLKEN1_COREEN_GPT8Write32 (PRCM_BASE_EASIL1 + 951) ++#define EASIL1_PRCMCM_FCLKEN1_COREEN_GPT7Write32 (PRCM_BASE_EASIL1 + 961) ++#define EASIL1_PRCMCM_ICLKEN1_COREReadRegister32 \ ++ (PRCM_BASE_EASIL1 + 1087) ++#define EASIL1_PRCMCM_ICLKEN1_COREEN_MAILBOXESWrite32 \ ++ (PRCM_BASE_EASIL1 + 1105) ++#define EASIL1_PRCMCM_ICLKEN1_COREEN_GPT8Write32 \ ++ (PRCM_BASE_EASIL1 + 1305) ++#define EASIL1_PRCMCM_ICLKEN1_COREEN_GPT7Write32 \ ++ (PRCM_BASE_EASIL1 + 1315) ++#define EASIL1_PRCMCM_CLKSEL1_CORECLKSEL_L3ReadIssel132 \ ++ (PRCM_BASE_EASIL1 + 2261) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8Write32k32 \ ++ (PRCM_BASE_EASIL1 + 2364) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteSys32 \ ++ (PRCM_BASE_EASIL1 + 2365) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteExt32 \ ++ (PRCM_BASE_EASIL1 + 2366) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7Write32k32 \ ++ (PRCM_BASE_EASIL1 + 2380) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteSys32 \ ++ (PRCM_BASE_EASIL1 + 2381) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteExt32 \ ++ (PRCM_BASE_EASIL1 + 2382) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteSys32 \ ++ (PRCM_BASE_EASIL1 + 2397) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteExt32 \ ++ (PRCM_BASE_EASIL1 + 2398) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteSys32 \ ++ (PRCM_BASE_EASIL1 + 2413) ++#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteExt32 \ ++ (PRCM_BASE_EASIL1 + 2414) ++#define EASIL1_PRCMCM_CLKSEL1_PLLAPLLs_ClkinRead32 \ ++ (PRCM_BASE_EASIL1 + 3747) ++#define EASIL1_PRCMCM_FCLKEN_DSPEN_DSPWrite32 (PRCM_BASE_EASIL1 + 3834) ++#define EASIL1_PRCMCM_ICLKEN_DSPEN_DSP_IPIWrite32 \ ++ (PRCM_BASE_EASIL1 + 3846) ++#define EASIL1_PRCMCM_IDLEST_DSPReadRegister32 (PRCM_BASE_EASIL1 + 3850) ++#define EASIL1_PRCMCM_IDLEST_DSPST_IPIRead32 (PRCM_BASE_EASIL1 + 3857) ++#define EASIL1_PRCMCM_IDLEST_DSPST_DSPRead32 (PRCM_BASE_EASIL1 + 3863) ++#define EASIL1_PRCMCM_AUTOIDLE_DSPAUTO_DSP_IPIWrite32 \ ++ (PRCM_BASE_EASIL1 + 3877) ++#define EASIL1_PRCMCM_CLKSEL_DSPSYNC_DSPWrite32 (PRCM_BASE_EASIL1 + 3927) ++#define EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSP_IFWrite32 \ ++ (PRCM_BASE_EASIL1 + 3941) ++#define EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSPWrite32 \ ++ (PRCM_BASE_EASIL1 + 3965) ++#define EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPRead32 \ ++ (PRCM_BASE_EASIL1 + 3987) ++#define EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPWrite32 \ ++ (PRCM_BASE_EASIL1 + 3993) ++#define EASIL1_PRCMRM_RSTCTRL_DSPReadRegister32 (PRCM_BASE_EASIL1 + 3997) ++#define EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32 \ ++ (PRCM_BASE_EASIL1 + 4025) ++#define EASIL1_PRCMRM_RSTST_DSPReadRegister32 (PRCM_BASE_EASIL1 + 4029) ++#define EASIL1_PRCMRM_RSTST_DSPWriteRegister32 (PRCM_BASE_EASIL1 + 4030) ++#define EASIL1_PRCMPM_PWSTCTRL_DSPForceStateWrite32 \ ++ (PRCM_BASE_EASIL1 + 4165) ++#define EASIL1_PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32 \ ++ (PRCM_BASE_EASIL1 + 4193) ++#define EASIL1_PRCMPM_PWSTST_DSPReadRegister32 (PRCM_BASE_EASIL1 + 4197) ++#define EASIL1_PRCMPM_PWSTST_DSPInTransitionRead32 \ ++ (PRCM_BASE_EASIL1 + 4198) ++#define EASIL1_PRCMPM_PWSTST_DSPPowerStateStGet32 \ ++ (PRCM_BASE_EASIL1 + 4235) ++#define EASIL1_CM_FCLKEN_PER_GPT5WriteRegister32 \ ++ (PRCM_BASE_EASIL1 + 4368) ++#define EASIL1_CM_ICLKEN_PER_GPT5WriteRegister32 \ ++ (PRCM_BASE_EASIL1 + 4370) ++#define EASIL1_CM_CLKSEL_PER_GPT5Write32k32 (PRCM_BASE_EASIL1 + 4372) ++#define EASIL1_CM_CLKSEL_PER_GPT6Write32k32 (PRCM_BASE_EASIL1 + 4373) ++#define EASIL1_PRCMCM_CLKSTCTRL_IVA2WriteRegister32 \ ++ (PRCM_BASE_EASIL1 + 4374) ++#define EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32 \ ++ (PRCM_BASE_EASIL1 + 4375) ++#define EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32 \ ++ (PRCM_BASE_EASIL1 + 4376) ++#define EASIL1_PRCMPM_PWSTST_IVA2InTransitionRead32 \ ++ (PRCM_BASE_EASIL1 + 4377) ++#define EASIL1_PRCMPM_PWSTST_IVA2PowerStateStGet32 \ ++ (PRCM_BASE_EASIL1 + 4378) ++#define EASIL1_PRCMPM_PWSTST_IVA2ReadRegister32 (PRCM_BASE_EASIL1 + 4379) ++ ++/* Register offset address definitions */ ++ ++#define PRCM_PRCM_CLKCFG_CTRL_OFFSET (u32)(0x80) ++#define PRCM_CM_FCLKEN1_CORE_OFFSET (u32)(0x200) ++#define PRCM_CM_ICLKEN1_CORE_OFFSET (u32)(0x210) ++#define PRCM_CM_CLKSEL2_CORE_OFFSET (u32)(0x244) ++#define PRCM_CM_CLKSEL1_PLL_OFFSET (u32)(0x540) ++#define PRCM_CM_ICLKEN_DSP_OFFSET (u32)(0x810) ++#define PRCM_CM_IDLEST_DSP_OFFSET (u32)(0x820) ++#define PRCM_CM_AUTOIDLE_DSP_OFFSET (u32)(0x830) ++#define PRCM_CM_CLKSEL_DSP_OFFSET (u32)(0x840) ++#define PRCM_CM_CLKSTCTRL_DSP_OFFSET (u32)(0x848) ++#define PRCM_RM_RSTCTRL_DSP_OFFSET (u32)(0x050) ++#define PRCM_RM_RSTST_DSP_OFFSET (u32)(0x058) ++#define PRCM_PM_PWSTCTRL_DSP_OFFSET (u32)(0x8e0) ++#define PRCM_PM_PWSTST_DSP_OFFSET (u32)(0x8e4) ++#define PRCM_PM_PWSTST_IVA2_OFFSET (u32)(0xE4) ++#define PRCM_PM_PWSTCTRL_IVA2_OFFSET (u32)(0xE0) ++#define PRCM_CM_CLKSTCTRL_IVA2_OFFSET (u32)(0x48) ++#define CM_CLKSEL_PER_OFFSET (u32)(0x40) ++ ++/* Bitfield mask and offset declarations */ ++ ++#define PRCM_PRCM_CLKCFG_CTRL_Valid_config_MASK (u32)(0x1) ++#define PRCM_PRCM_CLKCFG_CTRL_Valid_config_OFFSET (u32)(0) ++ ++#define PRCM_CM_FCLKEN1_CORE_EN_GPT8_MASK (u32)(0x400) ++#define PRCM_CM_FCLKEN1_CORE_EN_GPT8_OFFSET (u32)(10) ++ ++#define PRCM_CM_FCLKEN1_CORE_EN_GPT7_MASK (u32)(0x200) ++#define PRCM_CM_FCLKEN1_CORE_EN_GPT7_OFFSET (u32)(9) ++ ++#define PRCM_CM_ICLKEN1_CORE_EN_GPT8_MASK (u32)(0x400) ++#define PRCM_CM_ICLKEN1_CORE_EN_GPT8_OFFSET (u32)(10) ++ ++#define PRCM_CM_ICLKEN1_CORE_EN_GPT7_MASK (u32)(0x200) ++#define PRCM_CM_ICLKEN1_CORE_EN_GPT7_OFFSET (u32)(9) ++ ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK (u32)(0xc000) ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET (u32)(14) ++ ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK (u32)(0x3000) ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET (u32)(12) ++ ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_MASK (u32)(0xc00) ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_OFFSET (u32)(10) ++ ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_MASK (u32)(0x300) ++#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_OFFSET (u32)(8) ++ ++#define PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_MASK (u32)(0x3800000) ++#define PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_OFFSET (u32)(23) ++ ++#define PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_MASK (u32)(0x2) ++#define PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_OFFSET (u32)(1) ++ ++#define PRCM_CM_IDLEST_DSP_ST_IPI_MASK (u32)(0x2) ++#define PRCM_CM_IDLEST_DSP_ST_IPI_OFFSET (u32)(1) ++ ++#define PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_MASK (u32)(0x2) ++#define PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_OFFSET (u32)(1) ++ ++#define PRCM_CM_CLKSEL_DSP_SYNC_DSP_MASK (u32)(0x80) ++#define PRCM_CM_CLKSEL_DSP_SYNC_DSP_OFFSET (u32)(7) ++ ++#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_MASK (u32)(0x60) ++#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_OFFSET (u32)(5) ++ ++#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_MASK (u32)(0x1f) ++#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_OFFSET (u32)(0) ++ ++#define PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK (u32)(0x1) ++#define PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_OFFSET (u32)(0) ++ ++#define PRCM_PM_PWSTCTRL_DSP_ForceState_MASK (u32)(0x40000) ++#define PRCM_PM_PWSTCTRL_DSP_ForceState_OFFSET (u32)(18) ++ ++#define PRCM_PM_PWSTCTRL_DSP_PowerState_MASK (u32)(0x3) ++#define PRCM_PM_PWSTCTRL_DSP_PowerState_OFFSET (u32)(0) ++ ++#define PRCM_PM_PWSTCTRL_IVA2_PowerState_MASK (u32)(0x3) ++#define PRCM_PM_PWSTCTRL_IVA2_PowerState_OFFSET (u32)(0) ++ ++#define PRCM_PM_PWSTST_DSP_InTransition_MASK (u32)(0x100000) ++#define PRCM_PM_PWSTST_DSP_InTransition_OFFSET (u32)(20) ++ ++#define PRCM_PM_PWSTST_IVA2_InTransition_MASK (u32)(0x100000) ++#define PRCM_PM_PWSTST_IVA2_InTransition_OFFSET (u32)(20) ++ ++#define PRCM_PM_PWSTST_DSP_PowerStateSt_MASK (u32)(0x3) ++#define PRCM_PM_PWSTST_DSP_PowerStateSt_OFFSET (u32)(0) ++ ++#define PRCM_PM_PWSTST_IVA2_PowerStateSt_MASK (u32)(0x3) ++#define PRCM_PM_PWSTST_IVA2_PowerStateSt_OFFSET (u32)(0) ++ ++#define CM_FCLKEN_PER_OFFSET (u32)(0x0) ++#define CM_FCLKEN_PER_GPT5_OFFSET (u32)(6) ++#define CM_FCLKEN_PER_GPT5_MASK (u32)(0x40) ++ ++#define CM_FCLKEN_PER_GPT6_OFFSET (u32)(7) ++#define CM_FCLKEN_PER_GPT6_MASK (u32)(0x80) ++ ++#define CM_ICLKEN_PER_OFFSET (u32)(0x10) ++#define CM_ICLKEN_PER_GPT5_OFFSET (u32)(6) ++#define CM_ICLKEN_PER_GPT5_MASK (u32)(0x40) ++ ++#define CM_ICLKEN_PER_GPT6_OFFSET (u32)(7) ++#define CM_ICLKEN_PER_GPT6_MASK (u32)(0x80) ++ ++#define CM_CLKSEL_PER_GPT5_OFFSET (u32)(3) ++#define CM_CLKSEL_PER_GPT5_MASK (u32)(0x8) ++ ++#define CM_CLKSEL_PER_GPT6_OFFSET (u32)(4) ++#define CM_CLKSEL_PER_GPT6_MASK (u32)(0x10) ++ ++ ++#define CM_FCLKEN_IVA2_OFFSET (u32)(0x0) ++#define CM_FCLKEN_IVA2_EN_MASK (u32)(0x1) ++#define CM_FCLKEN_IVA2_EN_OFFSET (u32)(0x0) ++ ++#define CM_IDLEST_IVA2_OFFSET (u32)(0x20) ++#define CM_IDLEST_IVA2_ST_IVA2_MASK (u32) (0x01) ++#define CM_IDLEST_IVA2_ST_IVA2_OFFSET (u32) (0x00) ++ ++#define CM_FCLKEN1_CORE_OFFSET (u32)(0xA00) ++ ++#define CM_ICLKEN1_CORE_OFFSET (u32)(0xA10) ++#define CM_ICLKEN1_CORE_EN_MAILBOXES_MASK (u32)(0x00000080) /* bit 7 */ ++#define CM_ICLKEN1_CORE_EN_MAILBOXES_OFFSET (u32)(7) ++ ++#define CM_CLKSTCTRL_IVA2_OFFSET (u32)(0x0) ++#define CM_CLKSTCTRL_IVA2_MASK (u32)(0x3) ++ ++ ++#define PRM_RSTCTRL_IVA2_OFFSET (u32)(0x50) ++#define PRM_RSTCTRL_IVA2_RST1_MASK (u32)(0x1) ++#define PRM_RSTCTRL_IVA2_RST1_OFFSET (u32)(0x0) ++#define PRM_RSTCTRL_IVA2_RST2_MASK (u32)(0x2) ++#define PRM_RSTCTRL_IVA2_RST2_OFFSET (u32)(0x1) ++#define PRM_RSTCTRL_IVA2_RST3_MASK (u32)(0x4) ++#define PRM_RSTCTRL_IVA2_RST3_OFFSET (u32)(0x2) ++ ++ ++/* The following represent the enumerated values for each bitfield */ ++ ++enum PRCMPRCM_CLKCFG_CTRLValid_configE { ++ PRCMPRCM_CLKCFG_CTRLValid_configUpdated = 0x0000, ++ PRCMPRCM_CLKCFG_CTRLValid_configClk_valid = 0x0001 ++} ; ++ ++enum PRCMCM_CLKSEL2_CORECLKSEL_GPT8E { ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT832k = 0x0000, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT8Sys = 0x0001, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT8Ext = 0x0002, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT8Reserved = 0x0003 ++} ; ++ ++enum PRCMCM_CLKSEL2_CORECLKSEL_GPT7E { ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT732k = 0x0000, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT7Sys = 0x0001, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT7Ext = 0x0002, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT7Reserved = 0x0003 ++} ; ++ ++enum PRCMCM_CLKSEL2_CORECLKSEL_GPT6E { ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT632k = 0x0000, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT6Sys = 0x0001, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT6Ext = 0x0002, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT6Reserved = 0x0003 ++} ; ++ ++enum PRCMCM_CLKSEL2_CORECLKSEL_GPT5E { ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT532k = 0x0000, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT5Sys = 0x0001, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT5Ext = 0x0002, ++ PRCMCM_CLKSEL2_CORECLKSEL_GPT5Reserved = 0x0003 ++} ; ++ ++enum PRCMPM_PWSTCTRL_DSPPowerStateE { ++ PRCMPM_PWSTCTRL_DSPPowerStateON = 0x0000, ++ PRCMPM_PWSTCTRL_DSPPowerStateRET = 0x0001, ++ PRCMPM_PWSTCTRL_DSPPowerStateReserved = 0x0002, ++ PRCMPM_PWSTCTRL_DSPPowerStateOFF = 0x0003 ++} ; ++ ++enum PRCMPM_PWSTCTRL_IVA2PowerStateE { ++ PRCMPM_PWSTCTRL_IVA2PowerStateON = 0x0003, ++ PRCMPM_PWSTCTRL_IVA2PowerStateRET = 0x0001, ++ PRCMPM_PWSTCTRL_IVA2PowerStateReserved = 0x0002, ++ PRCMPM_PWSTCTRL_IVA2PowerStateOFF = 0x0000 ++} ; ++ ++#endif /* _PRCM_ACC_INT_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMRegAcM.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/PRCMRegAcM.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMRegAcM.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/hw/PRCMRegAcM.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,670 @@ ++/* ++ * PRCMRegAcM.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#ifndef _PRCM_REG_ACM_H ++#define _PRCM_REG_ACM_H ++ ++#include ++#include ++ ++#include ++ ++#include "PRCMAccInt.h" ++ ++#if defined(USE_LEVEL_1_MACROS) ++ ++#define PRCMPRCM_CLKCFG_CTRLValid_configWriteClk_valid32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_PRCM_CLKCFG_CTRL_OFFSET;\ ++ const u32 newValue = \ ++ (u32)PRCMPRCM_CLKCFG_CTRLValid_configClk_valid <<\ ++ PRCM_PRCM_CLKCFG_CTRL_Valid_config_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(\ ++ EASIL1_PRCMPRCM_CLKCFG_CTRLValid_configWriteClk_valid32);\ ++ data &= ~(PRCM_PRCM_CLKCFG_CTRL_Valid_config_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define CM_FCLKEN_PERReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREReadRegister32),\ ++ __raw_readl(((u32)(baseAddress))+CM_FCLKEN_PER_OFFSET)) ++ ++ ++#define CM_ICLKEN_PERReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREReadRegister32),\ ++ __raw_readl(((u32)(baseAddress))+CM_ICLKEN_PER_OFFSET)) ++ ++ ++#define CM_FCLKEN_PER_GPT5WriteRegister32(baseAddress,value)\ ++{\ ++ const u32 offset = CM_FCLKEN_PER_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_CM_FCLKEN_PER_GPT5WriteRegister32);\ ++ data &= ~(CM_FCLKEN_PER_GPT5_MASK);\ ++ newValue <<= CM_FCLKEN_PER_GPT5_OFFSET;\ ++ newValue &= CM_FCLKEN_PER_GPT5_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ ++} ++ ++ ++#define CM_FCLKEN_PER_GPT6WriteRegister32(baseAddress,value)\ ++{\ ++ const u32 offset = CM_FCLKEN_PER_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_CM_FCLKEN_PER_GPT5WriteRegister32);\ ++ data &= ~(CM_FCLKEN_PER_GPT6_MASK);\ ++ newValue <<= CM_FCLKEN_PER_GPT6_OFFSET;\ ++ newValue &= CM_FCLKEN_PER_GPT6_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ ++} ++ ++ ++#define CM_ICLKEN_PER_GPT5WriteRegister32(baseAddress,value)\ ++{\ ++ const u32 offset = CM_ICLKEN_PER_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_CM_ICLKEN_PER_GPT5WriteRegister32);\ ++ data &= ~(CM_ICLKEN_PER_GPT5_MASK);\ ++ newValue <<= CM_ICLKEN_PER_GPT5_OFFSET;\ ++ newValue &= CM_ICLKEN_PER_GPT5_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ ++} ++ ++ ++#define CM_ICLKEN_PER_GPT6WriteRegister32(baseAddress,value)\ ++{\ ++ const u32 offset = CM_ICLKEN_PER_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_CM_ICLKEN_PER_GPT5WriteRegister32);\ ++ data &= ~(CM_ICLKEN_PER_GPT6_MASK);\ ++ newValue <<= CM_ICLKEN_PER_GPT6_OFFSET;\ ++ newValue &= CM_ICLKEN_PER_GPT6_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ ++} ++ ++ ++#define CM_FCLKEN1_COREReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREReadRegister32),\ ++ __raw_readl(((u32)(baseAddress))+CM_FCLKEN1_CORE_OFFSET)) ++ ++ ++#define PRCMCM_FCLKEN1_COREEN_GPT8Write32(baseAddress,value)\ ++{\ ++ const u32 offset = PRCM_CM_FCLKEN1_CORE_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREEN_GPT8Write32);\ ++ data &= ~(PRCM_CM_FCLKEN1_CORE_EN_GPT8_MASK);\ ++ newValue <<= PRCM_CM_FCLKEN1_CORE_EN_GPT8_OFFSET;\ ++ newValue &= PRCM_CM_FCLKEN1_CORE_EN_GPT8_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_FCLKEN1_COREEN_GPT7Write32(baseAddress,value)\ ++{\ ++ const u32 offset = PRCM_CM_FCLKEN1_CORE_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREEN_GPT7Write32);\ ++ data &= ~(PRCM_CM_FCLKEN1_CORE_EN_GPT7_MASK);\ ++ newValue <<= PRCM_CM_FCLKEN1_CORE_EN_GPT7_OFFSET;\ ++ newValue &= PRCM_CM_FCLKEN1_CORE_EN_GPT7_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define CM_ICLKEN1_COREReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREReadRegister32),\ ++ __raw_readl(((u32)(baseAddress))+CM_ICLKEN1_CORE_OFFSET)) ++ ++ ++#define CM_ICLKEN1_COREEN_MAILBOXESWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = CM_ICLKEN1_CORE_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREEN_MAILBOXESWrite32);\ ++ data &= ~(CM_ICLKEN1_CORE_EN_MAILBOXES_MASK);\ ++ newValue <<= CM_ICLKEN1_CORE_EN_MAILBOXES_OFFSET;\ ++ newValue &= CM_ICLKEN1_CORE_EN_MAILBOXES_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_ICLKEN1_COREEN_GPT8Write32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_ICLKEN1_CORE_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREEN_GPT8Write32);\ ++ data &= ~(PRCM_CM_ICLKEN1_CORE_EN_GPT8_MASK);\ ++ newValue <<= PRCM_CM_ICLKEN1_CORE_EN_GPT8_OFFSET;\ ++ newValue &= PRCM_CM_ICLKEN1_CORE_EN_GPT8_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_ICLKEN1_COREEN_GPT7Write32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_ICLKEN1_CORE_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREEN_GPT7Write32);\ ++ data &= ~(PRCM_CM_ICLKEN1_CORE_EN_GPT7_MASK);\ ++ newValue <<= PRCM_CM_ICLKEN1_CORE_EN_GPT7_OFFSET;\ ++ newValue &= PRCM_CM_ICLKEN1_CORE_EN_GPT7_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT8Write32k32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT832k <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8Write32k32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteSys32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT8Sys <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteSys32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteExt32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT8Ext <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteExt32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT7Write32k32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT732k <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7Write32k32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteSys32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT7Sys <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteSys32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteExt32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT7Ext <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteExt32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteSys32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT6Sys <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteSys32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteExt32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT6Ext <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteExt32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define CM_CLKSEL_PER_GPT5Write32k32(baseAddress)\ ++{\ ++ const u32 offset = CM_CLKSEL_PER_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT532k <<\ ++ CM_CLKSEL_PER_GPT5_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_CM_CLKSEL_PER_GPT5Write32k32);\ ++ data &= ~(CM_CLKSEL_PER_GPT5_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define CM_CLKSEL_PER_GPT6Write32k32(baseAddress)\ ++{\ ++ const u32 offset = CM_CLKSEL_PER_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT532k <<\ ++ CM_CLKSEL_PER_GPT6_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_CM_CLKSEL_PER_GPT6Write32k32);\ ++ data &= ~(CM_CLKSEL_PER_GPT6_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteSys32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT5Sys <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteSys32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteExt32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ ++ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT5Ext <<\ ++ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_OFFSET;\ ++ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteExt32);\ ++ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL1_PLLAPLLs_ClkinRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL1_PLLAPLLs_ClkinRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (PRCM_CM_CLKSEL1_PLL_OFFSET)))) &\ ++ PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_MASK) >>\ ++ PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_OFFSET)) ++ ++ ++#define CM_FCLKEN_IVA2EN_DSPWrite32(baseAddress,value)\ ++{\ ++ const u32 offset = CM_FCLKEN_IVA2_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN_DSPEN_DSPWrite32);\ ++ data &= ~(CM_FCLKEN_IVA2_EN_MASK);\ ++ newValue <<= CM_FCLKEN_IVA2_EN_OFFSET;\ ++ newValue &= CM_FCLKEN_IVA2_EN_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_ICLKEN_DSPEN_DSP_IPIWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_ICLKEN_DSP_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN_DSPEN_DSP_IPIWrite32);\ ++ data &= ~(PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_MASK);\ ++ newValue <<= PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_OFFSET;\ ++ newValue &= PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_IDLEST_DSPReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_IDLEST_DSPReadRegister32),\ ++ __raw_readl(((u32)(baseAddress))+PRCM_CM_IDLEST_DSP_OFFSET)) ++ ++ ++#define PRCMCM_IDLEST_DSPST_IPIRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_IDLEST_DSPST_IPIRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (PRCM_CM_IDLEST_DSP_OFFSET)))) &\ ++ PRCM_CM_IDLEST_DSP_ST_IPI_MASK) >>\ ++ PRCM_CM_IDLEST_DSP_ST_IPI_OFFSET)) ++ ++ ++#define PRM_IDLEST_IVA2ST_IVA2Read32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_IDLEST_DSPST_DSPRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (CM_IDLEST_IVA2_OFFSET)))) &\ ++ CM_IDLEST_IVA2_ST_IVA2_MASK) >>\ ++ CM_IDLEST_IVA2_ST_IVA2_OFFSET)) ++ ++ ++#define PRCMCM_AUTOIDLE_DSPAUTO_DSP_IPIWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_AUTOIDLE_DSP_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_AUTOIDLE_DSPAUTO_DSP_IPIWrite32);\ ++ data &= ~(PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_MASK);\ ++ newValue <<= PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_OFFSET;\ ++ newValue &= PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL_DSPSYNC_DSPWrite32(baseAddress,value)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL_DSP_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL_DSPSYNC_DSPWrite32);\ ++ data &= ~(PRCM_CM_CLKSEL_DSP_SYNC_DSP_MASK);\ ++ newValue <<= PRCM_CM_CLKSEL_DSP_SYNC_DSP_OFFSET;\ ++ newValue &= PRCM_CM_CLKSEL_DSP_SYNC_DSP_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL_DSPCLKSEL_DSP_IFWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL_DSP_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSP_IFWrite32);\ ++ data &= ~(PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_MASK);\ ++ newValue <<= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_OFFSET;\ ++ newValue &= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSEL_DSPCLKSEL_DSPWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSEL_DSP_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSPWrite32);\ ++ data &= ~(PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_MASK);\ ++ newValue <<= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_OFFSET;\ ++ newValue &= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSTCTRL_IVA2WriteRegister32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSTCTRL_IVA2_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSTCTRL_IVA2WriteRegister32);\ ++ data &= ~(CM_CLKSTCTRL_IVA2_MASK);\ ++ newValue <<= CM_CLKSTCTRL_IVA2_OFFSET;\ ++ newValue &= CM_CLKSTCTRL_IVA2_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define PRCMCM_CLKSTCTRL_DSPAutostate_DSPRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (PRCM_CM_CLKSTCTRL_DSP_OFFSET)))) &\ ++ PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK) >>\ ++ PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_OFFSET)) ++ ++ ++#define PRCMCM_CLKSTCTRL_DSPAutostate_DSPWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_CM_CLKSTCTRL_DSP_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPWrite32);\ ++ data &= ~(PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK);\ ++ newValue <<= PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_OFFSET;\ ++ newValue &= PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMRM_RSTCTRL_DSPReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPReadRegister32),\ ++ __raw_readl(((baseAddress))+PRCM_RM_RSTCTRL_DSP_OFFSET)) ++ ++ ++#define PRM_RSTCTRL_IVA2RST1_DSPWrite32(baseAddress,value)\ ++{\ ++ const u32 offset = PRM_RSTCTRL_IVA2_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32);\ ++ data &= ~(PRM_RSTCTRL_IVA2_RST1_MASK);\ ++ newValue <<= PRM_RSTCTRL_IVA2_RST1_OFFSET;\ ++ newValue &= PRM_RSTCTRL_IVA2_RST1_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define PRM_RSTCTRL_IVA2RST2_DSPWrite32(baseAddress,value)\ ++{\ ++ const u32 offset = PRM_RSTCTRL_IVA2_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32);\ ++ data &= ~(PRM_RSTCTRL_IVA2_RST2_MASK);\ ++ newValue <<= PRM_RSTCTRL_IVA2_RST2_OFFSET;\ ++ newValue &= PRM_RSTCTRL_IVA2_RST2_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define PRM_RSTCTRL_IVA2RST3_DSPWrite32(baseAddress,value)\ ++{\ ++ const u32 offset = PRM_RSTCTRL_IVA2_OFFSET;\ ++ register u32 data =\ ++ __raw_readl(((baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32);\ ++ data &= ~(PRM_RSTCTRL_IVA2_RST3_MASK);\ ++ newValue <<= PRM_RSTCTRL_IVA2_RST3_OFFSET;\ ++ newValue &= PRM_RSTCTRL_IVA2_RST3_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (baseAddress)+offset);\ ++} ++ ++ ++#define PRCMRM_RSTST_DSPReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTST_DSPReadRegister32),\ ++ __raw_readl(((baseAddress))+PRCM_RM_RSTST_DSP_OFFSET)) ++ ++ ++#define PRCMRM_RSTST_DSPWriteRegister32(baseAddress,value)\ ++{\ ++ const u32 offset = PRCM_RM_RSTST_DSP_OFFSET;\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTST_DSPWriteRegister32);\ ++ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ ++} ++ ++ ++#define PRCMPM_PWSTCTRL_DSPForceStateWrite32(baseAddress, value)\ ++{\ ++ const u32 offset = PRCM_PM_PWSTCTRL_DSP_OFFSET;\ ++ register u32 data = \ ++ __raw_readl(((u32)(baseAddress))+offset);\ ++ register u32 newValue = ((u32)(value));\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_DSPForceStateWrite32);\ ++ data &= ~(PRCM_PM_PWSTCTRL_DSP_ForceState_MASK);\ ++ newValue <<= PRCM_PM_PWSTCTRL_DSP_ForceState_OFFSET;\ ++ newValue &= PRCM_PM_PWSTCTRL_DSP_ForceState_MASK;\ ++ newValue |= data;\ ++ __raw_writel(newValue, (u32)(baseAddress)+offset);\ ++} ++ ++ ++#define PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_PM_PWSTCTRL_IVA2_OFFSET;\ ++ const u32 newValue = (u32)PRCMPM_PWSTCTRL_IVA2PowerStateON <<\ ++ PRCM_PM_PWSTCTRL_IVA2_PowerState_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32);\ ++ data &= ~(PRCM_PM_PWSTCTRL_IVA2_PowerState_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (baseAddress)+offset);\ ++} ++ ++ ++#define PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_PM_PWSTCTRL_IVA2_OFFSET;\ ++ const u32 newValue = (u32)PRCMPM_PWSTCTRL_IVA2PowerStateOFF <<\ ++ PRCM_PM_PWSTCTRL_IVA2_PowerState_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32);\ ++ data &= ~(PRCM_PM_PWSTCTRL_IVA2_PowerState_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (baseAddress)+offset);\ ++} ++ ++ ++#define PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32(baseAddress)\ ++{\ ++ const u32 offset = PRCM_PM_PWSTCTRL_DSP_OFFSET;\ ++ const u32 newValue = (u32)PRCMPM_PWSTCTRL_DSPPowerStateRET <<\ ++ PRCM_PM_PWSTCTRL_DSP_PowerState_OFFSET;\ ++ register u32 data = __raw_readl((baseAddress)+offset);\ ++ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32);\ ++ data &= ~(PRCM_PM_PWSTCTRL_DSP_PowerState_MASK);\ ++ data |= newValue;\ ++ __raw_writel(data, (baseAddress)+offset);\ ++} ++ ++ ++#define PRCMPM_PWSTST_DSPReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_DSPReadRegister32),\ ++ __raw_readl(((u32)(baseAddress))+PRCM_PM_PWSTST_DSP_OFFSET)) ++ ++ ++#define PRCMPM_PWSTST_IVA2ReadRegister32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_IVA2ReadRegister32),\ ++ __raw_readl((baseAddress) + PRCM_PM_PWSTST_IVA2_OFFSET)) ++ ++ ++#define PRCMPM_PWSTST_DSPInTransitionRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_DSPInTransitionRead32),\ ++ (((__raw_readl((((u32)(baseAddress))+\ ++ (PRCM_PM_PWSTST_DSP_OFFSET)))) &\ ++ PRCM_PM_PWSTST_DSP_InTransition_MASK) >>\ ++ PRCM_PM_PWSTST_DSP_InTransition_OFFSET)) ++ ++ ++#define PRCMPM_PWSTST_IVA2InTransitionRead32(baseAddress)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_IVA2InTransitionRead32),\ ++ (((__raw_readl((((baseAddress))+\ ++ (PRCM_PM_PWSTST_IVA2_OFFSET)))) &\ ++ PRCM_PM_PWSTST_IVA2_InTransition_MASK) >>\ ++ PRCM_PM_PWSTST_IVA2_InTransition_OFFSET)) ++ ++ ++#define PRCMPM_PWSTST_DSPPowerStateStGet32(var)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_DSPPowerStateStGet32),\ ++ (u32)((((u32)(var)) & PRCM_PM_PWSTST_DSP_PowerStateSt_MASK) >>\ ++ PRCM_PM_PWSTST_DSP_PowerStateSt_OFFSET)) ++ ++ ++#define PRCMPM_PWSTST_IVA2PowerStateStGet32(var)\ ++ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_IVA2PowerStateStGet32),\ ++ (u32)((((u32)(var)) & PRCM_PM_PWSTST_IVA2_PowerStateSt_MASK) >>\ ++ PRCM_PM_PWSTST_IVA2_PowerStateSt_OFFSET)) ++ ++ ++#endif /* USE_LEVEL_1_MACROS */ ++ ++#endif /* _PRCM_REG_ACM_H */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kbuild kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/Kbuild +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kbuild 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/Kbuild 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,39 @@ ++obj-$(CONFIG_MPU_BRIDGE) += bridgedriver.o ++ ++libgen = gen/gb.o gen/gt.o gen/gs.o gen/gh.o gen/_gt_para.o gen/uuidutil.o ++libservices = services/csl.o services/mem.o services/list.o services/dpc.o \ ++ services/kfile.o services/sync.o \ ++ services/clk.o services/cfg.o services/reg.o \ ++ services/regsup.o services/ntfy.o \ ++ services/dbg.o services/services.o ++libwmd = wmd/chnl_sm.o wmd/msg_sm.o wmd/io_sm.o wmd/tiomap3430.o \ ++ wmd/tiomap3430_pwr.o wmd/tiomap_sm.o wmd/tiomap_io.o \ ++ wmd/mmu_fault.o wmd/ue_deh.o ++libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/wcd.o \ ++ pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o ++librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ ++ rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ ++ rmgr/nldr.o rmgr/drv_interface.o ++libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o ++libhw = hw/hw_prcm.o hw/hw_dspssC64P.o hw/hw_mmu.o hw/hw_mbox.o ++ ++bridgedriver-objs = $(libgen) $(libservices) $(libwmd) $(libpmgr) $(librmgr) \ ++ $(libdload) $(libhw) ++ ++# Debug ++ifeq ($(CONFIG_BRIDGE_DEBUG),y) ++ccflags-y += -DGT_TRACE -DDEBUG ++endif ++ ++#Machine dependent ++ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ ++ -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \ ++ -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS ++ ++#Header files ++ccflags-y += -Idrivers/dsp/bridge/services ++ccflags-y += -Idrivers/dsp/bridge/wmd ++ccflags-y += -Idrivers/dsp/bridge/pmgr ++ccflags-y += -Idrivers/dsp/bridge/rmgr ++ccflags-y += -Idrivers/dsp/bridge/hw ++ccflags-y += -Iarch/arm +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kconfig kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/Kconfig +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/Kconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,36 @@ ++# ++# DSP Bridge Driver Support ++# ++ ++menuconfig MPU_BRIDGE ++ tristate "DSP Bridge driver" ++ default n ++ help ++ DSP/BIOS Bridge is designed for platforms that contain a GPP and ++ one or more attached DSPs. The GPP is considered the master or ++ "host" processor, and the attached DSPs are processing resources ++ that can be utilized by applications and drivers running on the GPP. ++ ++config BRIDGE_DVFS ++ bool "Enable Bridge Dynamic Voltage and Frequency Scaling (DVFS)" ++ depends on MPU_BRIDGE && OMAP_PM_SRF ++ default n ++ help ++ DVFS allows DSP Bridge to initiate the operating point change to ++ scale the chip voltage and frequency in order to match the ++ performance and power consumption to the current processing ++ requirements. ++ ++config BRIDGE_MEMPOOL_SIZE ++ hex "Physical memory pool size (Byte)" ++ depends on MPU_BRIDGE ++ default 0x600000 ++ help ++ Allocate specified size of memory at booting time to avoid allocation ++ failure under heavy memory fragmentation after some use time. ++ ++config BRIDGE_DEBUG ++ bool "DSP Bridge Debug Support" ++ depends on MPU_BRIDGE ++ help ++ Say Y to enable Bridge debugging capabilities +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cmm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/cmm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cmm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/cmm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1291 @@ ++/* ++ * cmm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== cmm.c ======== ++ * Purpose: ++ * The Communication(Shared) Memory Management(CMM) module provides ++ * shared memory management services for DSP/BIOS Bridge data streaming ++ * and messaging. ++ * ++ * Multiple shared memory segments can be registered with CMM. ++ * Each registered SM segment is represented by a SM "allocator" that ++ * describes a block of physically contiguous shared memory used for ++ * future allocations by CMM. ++ * ++ * Memory is coelesced back to the appropriate heap when a buffer is ++ * freed. ++ * ++ * Public Functions: ++ * CMM_CallocBuf ++ * CMM_Create ++ * CMM_Destroy ++ * CMM_Exit ++ * CMM_FreeBuf ++ * CMM_GetHandle ++ * CMM_GetInfo ++ * CMM_Init ++ * CMM_RegisterGPPSMSeg ++ * CMM_UnRegisterGPPSMSeg ++ * ++ * The CMM_Xlator[xxx] routines below are used by Node and Stream ++ * to perform SM address translation to the client process address space. ++ * A "translator" object is created by a node/stream for each SM seg used. ++ * ++ * Translator Routines: ++ * CMM_XlatorAllocBuf ++ * CMM_XlatorCreate ++ * CMM_XlatorDelete ++ * CMM_XlatorFreeBuf ++ * CMM_XlatorInfo ++ * CMM_XlatorTranslate ++ * ++ * Private Functions: ++ * AddToFreeList ++ * GetAllocator ++ * GetFreeBlock ++ * GetNode ++ * GetSlot ++ * UnRegisterGPPSMSeg ++ * ++ * Notes: ++ * Va: Virtual address. ++ * Pa: Physical or kernel system address. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 16-Feb-2002 ag Code review cleanup. ++ *! PreOMAP address translation no longner supported. ++ *! 30-Jan-2002 ag Updates to CMM_XlatorTranslate() per TII, ANSI C++ ++ *! warnings. ++ *! 27-Jan-2002 ag Removed unused CMM_[Alloc][Free]Desc() & #ifdef USELOOKUP, ++ *! & unused VALIDATECMM and VaPaConvert(). ++ *! Removed bFastXlate from CMM_XLATOR. Always fast lookup. ++ *! 03-Jan-2002 ag Clear SM in CMM_AllocBuf(). Renamed to CMM_CallocBuf(). ++ *! 13-Nov-2001 ag Now delete pNodeFreeListHead and nodes in CMM_Destroy(). ++ *! 28-Aug-2001 ag CMM_GetHandle() returns CMM Mgr hndle given HPROCESSOR. ++ *! Removed unused CMM_[Un]RegisterDSPSMSeg() & ++ * CMM_[Un}ReserveVirtSpace fxns. Some cleanup. ++ *! 12-Aug-2001 ag Exposed CMM_UnRegisterGPP[DSP]SMSeg. ++ *! 13-Feb-2001 kc DSP/BIOS Bridge name update. ++ *! 21-Dec-2000 rr GetFreeBlock checks for pAllocator. ++ *! 09-Dec-2000 ag Added GPPPA2DSPPA, DSPPA2GPPPA macros. ++ *! 05-Dec-2000 ag CMM_XlatorDelete() optionally frees SM bufs and descriptors. ++ *! 30-Oct-2000 ag Buf size bug fixed in CMM_AllocBuf() causing leak. ++ *! Revamped XlatorTranslate() routine. ++ *! 10-Oct-2000 ag Added CMM_Xlator[xxx] functions. ++ *! 02-Aug-2000 ag Created. ++ *! ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++/* Object signatures */ ++#define CMMSIGNATURE 0x004d4d43 /* "CMM" (in reverse) */ ++#define SMEMSIGNATURE 0x4D454D53 /* "SMEM" SM space */ ++#define CMMXLATESIGNATURE 0x584d4d43 /* "CMMX" CMM Xlator */ ++ ++#define NEXT_PA(pNode) (pNode->dwPA + pNode->ulSize) ++ ++/* Other bus/platform translations */ ++#define DSPPA2GPPPA(base, x, y) ((x)+(y)) ++#define GPPPA2DSPPA(base, x, y) ((x)-(y)) ++ ++/* ++ * Allocators define a block of contiguous memory used for future allocations. ++ * ++ * sma - shared memory allocator. ++ * vma - virtual memory allocator.(not used). ++ */ ++struct CMM_ALLOCATOR { /* sma */ ++ u32 dwSignature; /* SMA allocator signature SMEMSIGNATURE */ ++ unsigned int dwSmBase; /* Start of physical SM block */ ++ u32 ulSmSize; /* Size of SM block in bytes */ ++ unsigned int dwVmBase; /* Start of VM block. (Dev driver ++ * context for 'sma') */ ++ u32 dwDSPPhysAddrOffset; /* DSP PA to GPP PA offset for this ++ * SM space */ ++ /* CMM_ADDTO[SUBFROM]DSPPA, _POMAPEMIF2DSPBUS */ ++ enum CMM_CNVTTYPE cFactor; ++ unsigned int dwDSPBase; /* DSP virt base byte address */ ++ u32 ulDSPSize; /* DSP seg size in bytes */ ++ struct CMM_OBJECT *hCmmMgr; /* back ref to parent mgr */ ++ struct LST_LIST *pFreeListHead; /* node list of available memory */ ++ struct LST_LIST *pInUseListHead; /* node list of memory in use */ ++} ; ++ ++struct CMM_XLATOR { /* Pa<->Va translator object */ ++ u32 dwSignature; /* "CMMX" */ ++ struct CMM_OBJECT *hCmmMgr; /* CMM object this translator associated */ ++ /* ++ * Client process virtual base address that corresponds to phys SM ++ * base address for translator's ulSegId. ++ * Only 1 segment ID currently supported. ++ */ ++ unsigned int dwVirtBase; /* virtual base address */ ++ u32 ulVirtSize; /* size of virt space in bytes */ ++ u32 ulSegId; /* Segment Id */ ++} ; ++ ++/* CMM Mgr */ ++struct CMM_OBJECT { ++ u32 dwSignature; /* Used for object validation */ ++ /* ++ * Cmm Lock is used to serialize access mem manager for multi-threads. ++ */ ++ struct SYNC_CSOBJECT *hCmmLock; /* Lock to access cmm mgr */ ++ struct LST_LIST *pNodeFreeListHead; /* Free list of memory nodes */ ++ u32 ulMinBlockSize; /* Min SM block; default 16 bytes */ ++ u32 dwPageSize; /* Memory Page size (1k/4k) */ ++ /* GPP SM segment ptrs */ ++ struct CMM_ALLOCATOR *paGPPSMSegTab[CMM_MAXGPPSEGS]; ++} ; ++ ++/* Default CMM Mgr attributes */ ++static struct CMM_MGRATTRS CMM_DFLTMGRATTRS = { ++ 16 /* ulMinBlockSize, min block size(bytes) allocated by cmm mgr */ ++}; ++ ++/* Default allocation attributes */ ++static struct CMM_ATTRS CMM_DFLTALCTATTRS = { ++ 1 /* ulSegId, default segment Id for allocator */ ++}; ++ ++/* Address translator default attrs */ ++static struct CMM_XLATORATTRS CMM_DFLTXLATORATTRS = { ++ 1, /* ulSegId, does not have to match CMM_DFLTALCTATTRS ulSegId */ ++ 0, /* dwDSPBufs */ ++ 0, /* dwDSPBufSize */ ++ NULL, /* pVmBase */ ++ 0, /* dwVmSize */ ++}; ++ ++/* SM node representing a block of memory. */ ++struct CMM_MNODE { ++ struct LST_ELEM link; /* must be 1st element */ ++ u32 dwPA; /* Phys addr */ ++ u32 dwVA; /* Virtual address in device process context */ ++ u32 ulSize; /* SM block size in bytes */ ++ u32 hClientProc; /* Process that allocated this mem block */ ++} ; ++ ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask CMM_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++static u32 cRefs; /* module reference count */ ++ ++/* ----------------------------------- Function Prototypes */ ++static void AddToFreeList(struct CMM_ALLOCATOR *pAllocator, ++ struct CMM_MNODE *pNode); ++static struct CMM_ALLOCATOR *GetAllocator(struct CMM_OBJECT *pCmmMgr, ++ u32 ulSegId); ++static struct CMM_MNODE *GetFreeBlock(struct CMM_ALLOCATOR *pAllocator, ++ u32 uSize); ++static struct CMM_MNODE *GetNode(struct CMM_OBJECT *pCmmMgr, u32 dwPA, ++ u32 dwVA, u32 ulSize); ++/* get available slot for new allocator */ ++static s32 GetSlot(struct CMM_OBJECT *hCmmMgr); ++static void UnRegisterGPPSMSeg(struct CMM_ALLOCATOR *pSMA); ++ ++/* ++ * ======== CMM_CallocBuf ======== ++ * Purpose: ++ * Allocate a SM buffer, zero contents, and return the physical address ++ * and optional driver context virtual address(ppBufVA). ++ * ++ * The freelist is sorted in increasing size order. Get the first ++ * block that satifies the request and sort the remaining back on ++ * the freelist; if large enough. The kept block is placed on the ++ * inUseList. ++ */ ++void *CMM_CallocBuf(struct CMM_OBJECT *hCmmMgr, u32 uSize, ++ struct CMM_ATTRS *pAttrs, OUT void **ppBufVA) ++{ ++ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; ++ void *pBufPA = NULL; ++ struct CMM_MNODE *pNode = NULL; ++ struct CMM_MNODE *pNewNode = NULL; ++ struct CMM_ALLOCATOR *pAllocator = NULL; ++ u32 uDeltaSize; ++ u8 *pByte = NULL; ++ s32 cnt; ++ ++ if (pAttrs == NULL) ++ pAttrs = &CMM_DFLTALCTATTRS; ++ ++ if (ppBufVA != NULL) ++ *ppBufVA = NULL; ++ ++ if ((MEM_IsValidHandle(pCmmMgr, CMMSIGNATURE)) && (uSize != 0)) { ++ if (pAttrs->ulSegId > 0) { ++ /* SegId > 0 is SM */ ++ /* get the allocator object for this segment id */ ++ pAllocator = GetAllocator(pCmmMgr, pAttrs->ulSegId); ++ /* keep block size a multiple of ulMinBlockSize */ ++ uSize = ((uSize - 1) & ~(pCmmMgr->ulMinBlockSize - 1)) ++ + pCmmMgr->ulMinBlockSize; ++ SYNC_EnterCS(pCmmMgr->hCmmLock); ++ pNode = GetFreeBlock(pAllocator, uSize); ++ } ++ if (pNode) { ++ uDeltaSize = (pNode->ulSize - uSize); ++ if (uDeltaSize >= pCmmMgr->ulMinBlockSize) { ++ /* create a new block with the leftovers and ++ * add to freelist */ ++ pNewNode = GetNode(pCmmMgr, pNode->dwPA + uSize, ++ pNode->dwVA + uSize, ++ (u32)uDeltaSize); ++ /* leftovers go free */ ++ AddToFreeList(pAllocator, pNewNode); ++ /* adjust our node's size */ ++ pNode->ulSize = uSize; ++ } ++ /* Tag node with client process requesting allocation ++ * We'll need to free up a process's alloc'd SM if the ++ * client process goes away. ++ */ ++ /* Return TGID instead of process handle */ ++ pNode->hClientProc = current->tgid; ++ ++ /* put our node on InUse list */ ++ LST_PutTail(pAllocator->pInUseListHead, ++ (struct LST_ELEM *)pNode); ++ pBufPA = (void *)pNode->dwPA; /* physical address */ ++ /* clear mem */ ++ pByte = (u8 *)pNode->dwVA; ++ for (cnt = 0; cnt < (s32) uSize; cnt++, pByte++) ++ *pByte = 0; ++ ++ if (ppBufVA != NULL) { ++ /* Virtual address */ ++ *ppBufVA = (void *)pNode->dwVA; ++ } ++ } ++ GT_3trace(CMM_debugMask, GT_3CLASS, ++ "CMM_CallocBuf dwPA %x, dwVA %x uSize" ++ "%x\n", pNode->dwPA, pNode->dwVA, uSize); ++ SYNC_LeaveCS(pCmmMgr->hCmmLock); ++ } ++ return pBufPA; ++} ++ ++/* ++ * ======== CMM_Create ======== ++ * Purpose: ++ * Create a communication memory manager object. ++ */ ++DSP_STATUS CMM_Create(OUT struct CMM_OBJECT **phCmmMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CMM_MGRATTRS *pMgrAttrs) ++{ ++ struct CMM_OBJECT *pCmmObject = NULL; ++ DSP_STATUS status = DSP_SOK; ++ struct UTIL_SYSINFO sysInfo; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phCmmMgr != NULL); ++ ++ GT_3trace(CMM_debugMask, GT_ENTER, ++ "CMM_Create: phCmmMgr: 0x%x\thDevObject: " ++ "0x%x\tpMgrAttrs: 0x%x\n", phCmmMgr, hDevObject, pMgrAttrs); ++ *phCmmMgr = NULL; ++ /* create, zero, and tag a cmm mgr object */ ++ MEM_AllocObject(pCmmObject, struct CMM_OBJECT, CMMSIGNATURE); ++ if (pCmmObject != NULL) { ++ if (pMgrAttrs == NULL) ++ pMgrAttrs = &CMM_DFLTMGRATTRS; /* set defaults */ ++ ++ /* 4 bytes minimum */ ++ DBC_Assert(pMgrAttrs->ulMinBlockSize >= 4); ++ /* save away smallest block allocation for this cmm mgr */ ++ pCmmObject->ulMinBlockSize = pMgrAttrs->ulMinBlockSize; ++ /* save away the systems memory page size */ ++ sysInfo.dwPageSize = PAGE_SIZE; ++ sysInfo.dwAllocationGranularity = PAGE_SIZE; ++ sysInfo.dwNumberOfProcessors = 1; ++ if (DSP_SUCCEEDED(status)) { ++ GT_1trace(CMM_debugMask, GT_5CLASS, ++ "CMM_Create: Got system page size" ++ "= 0x%x\t\n", sysInfo.dwPageSize); ++ pCmmObject->dwPageSize = sysInfo.dwPageSize; ++ } else { ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_Create: failed to get system" ++ "page size\n"); ++ pCmmObject->dwPageSize = 0; ++ status = DSP_EFAIL; ++ } ++ /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by ++ * MEM_AllocObject */ ++ if (DSP_SUCCEEDED(status)) { ++ /* create node free list */ ++ pCmmObject->pNodeFreeListHead = LST_Create(); ++ if (pCmmObject->pNodeFreeListHead == NULL) { ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_Create: LST_Create() " ++ "failed \n"); ++ status = DSP_EMEMORY; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_InitializeCS(&pCmmObject->hCmmLock); ++ ++ if (DSP_SUCCEEDED(status)) ++ *phCmmMgr = pCmmObject; ++ else ++ CMM_Destroy(pCmmObject, true); ++ ++ } else { ++ GT_0trace(CMM_debugMask, GT_6CLASS, ++ "CMM_Create: Object Allocation " ++ "Failure(CMM Object)\n"); ++ status = DSP_EMEMORY; ++ } ++ return status; ++} ++ ++/* ++ * ======== CMM_Destroy ======== ++ * Purpose: ++ * Release the communication memory manager resources. ++ */ ++DSP_STATUS CMM_Destroy(struct CMM_OBJECT *hCmmMgr, bool bForce) ++{ ++ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; ++ struct CMM_INFO tempInfo; ++ DSP_STATUS status = DSP_SOK; ++ s32 nSlot; ++ struct CMM_MNODE *pNode; ++ ++ DBC_Require(cRefs > 0); ++ if (!MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { ++ status = DSP_EHANDLE; ++ return status; ++ } ++ SYNC_EnterCS(pCmmMgr->hCmmLock); ++ /* If not force then fail if outstanding allocations exist */ ++ if (!bForce) { ++ /* Check for outstanding memory allocations */ ++ status = CMM_GetInfo(hCmmMgr, &tempInfo); ++ if (DSP_SUCCEEDED(status)) { ++ if (tempInfo.ulTotalInUseCnt > 0) { ++ /* outstanding allocations */ ++ status = DSP_EFAIL; ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* UnRegister SM allocator */ ++ for (nSlot = 0; nSlot < CMM_MAXGPPSEGS; nSlot++) { ++ if (pCmmMgr->paGPPSMSegTab[nSlot] != NULL) { ++ UnRegisterGPPSMSeg(pCmmMgr-> ++ paGPPSMSegTab[nSlot]); ++ /* Set slot to NULL for future reuse */ ++ pCmmMgr->paGPPSMSegTab[nSlot] = NULL; ++ } ++ } ++ } ++ if (pCmmMgr->pNodeFreeListHead != NULL) { ++ /* Free the free nodes */ ++ while (!LST_IsEmpty(pCmmMgr->pNodeFreeListHead)) { ++ /* (struct LST_ELEM*) pNode = ++ * LST_GetHead(pCmmMgr->pNodeFreeListHead);*/ ++ pNode = (struct CMM_MNODE *)LST_GetHead(pCmmMgr-> ++ pNodeFreeListHead); ++ MEM_Free(pNode); ++ } ++ /* delete NodeFreeList list */ ++ LST_Delete(pCmmMgr->pNodeFreeListHead); ++ } ++ SYNC_LeaveCS(pCmmMgr->hCmmLock); ++ if (DSP_SUCCEEDED(status)) { ++ /* delete CS & cmm mgr object */ ++ SYNC_DeleteCS(pCmmMgr->hCmmLock); ++ MEM_FreeObject(pCmmMgr); ++ } ++ return status; ++} ++ ++/* ++ * ======== CMM_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ */ ++void CMM_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(CMM_debugMask, GT_ENTER, ++ "exiting CMM_Exit,ref count:0x%x\n", cRefs); ++} ++ ++/* ++ * ======== CMM_FreeBuf ======== ++ * Purpose: ++ * Free the given buffer. ++ */ ++DSP_STATUS CMM_FreeBuf(struct CMM_OBJECT *hCmmMgr, void *pBufPA, u32 ulSegId) ++{ ++ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; ++ DSP_STATUS status = DSP_EPOINTER; ++ struct CMM_MNODE *pCurNode = NULL; ++ struct CMM_ALLOCATOR *pAllocator = NULL; ++ struct CMM_ATTRS *pAttrs; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pBufPA != NULL); ++ GT_1trace(CMM_debugMask, GT_ENTER, "CMM_FreeBuf pBufPA %x\n", pBufPA); ++ if (ulSegId == 0) { ++ pAttrs = &CMM_DFLTALCTATTRS; ++ ulSegId = pAttrs->ulSegId; ++ } ++ if (!(MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) || !(ulSegId > 0)) { ++ status = DSP_EHANDLE; ++ return status; ++ } ++ /* get the allocator for this segment id */ ++ pAllocator = GetAllocator(pCmmMgr, ulSegId); ++ if (pAllocator != NULL) { ++ SYNC_EnterCS(pCmmMgr->hCmmLock); ++ pCurNode = (struct CMM_MNODE *)LST_First(pAllocator-> ++ pInUseListHead); ++ while (pCurNode) { ++ if ((u32)pBufPA == pCurNode->dwPA) { ++ /* Found it */ ++ LST_RemoveElem(pAllocator->pInUseListHead, ++ (struct LST_ELEM *)pCurNode); ++ /* back to freelist */ ++ AddToFreeList(pAllocator, pCurNode); ++ status = DSP_SOK; /* all right! */ ++ break; ++ } ++ /* next node. */ ++ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> ++ pInUseListHead, (struct LST_ELEM *)pCurNode); ++ } ++ SYNC_LeaveCS(pCmmMgr->hCmmLock); ++ } ++ return status; ++} ++ ++/* ++ * ======== CMM_GetHandle ======== ++ * Purpose: ++ * Return the communication memory manager object for this device. ++ * This is typically called from the client process. ++ */ ++DSP_STATUS CMM_GetHandle(DSP_HPROCESSOR hProcessor, ++ OUT struct CMM_OBJECT **phCmmMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phCmmMgr != NULL); ++ if (hProcessor != NULL) ++ status = PROC_GetDevObject(hProcessor, &hDevObject); ++ else ++ hDevObject = DEV_GetFirst(); /* default */ ++ ++ if (DSP_SUCCEEDED(status)) ++ status = DEV_GetCmmMgr(hDevObject, phCmmMgr); ++ ++ return status; ++} ++ ++/* ++ * ======== CMM_GetInfo ======== ++ * Purpose: ++ * Return the current memory utilization information. ++ */ ++DSP_STATUS CMM_GetInfo(struct CMM_OBJECT *hCmmMgr, ++ OUT struct CMM_INFO *pCmmInfo) ++{ ++ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; ++ u32 ulSeg; ++ DSP_STATUS status = DSP_SOK; ++ struct CMM_ALLOCATOR *pAltr; ++ struct CMM_MNODE *pCurNode = NULL; ++ ++ DBC_Require(pCmmInfo != NULL); ++ ++ if (!MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { ++ status = DSP_EHANDLE; ++ return status; ++ } ++ SYNC_EnterCS(pCmmMgr->hCmmLock); ++ pCmmInfo->ulNumGPPSMSegs = 0; /* # of SM segments */ ++ pCmmInfo->ulTotalInUseCnt = 0; /* Total # of outstanding alloc */ ++ pCmmInfo->ulMinBlockSize = pCmmMgr->ulMinBlockSize; /* min block size */ ++ /* check SM memory segments */ ++ for (ulSeg = 1; ulSeg <= CMM_MAXGPPSEGS; ulSeg++) { ++ /* get the allocator object for this segment id */ ++ pAltr = GetAllocator(pCmmMgr, ulSeg); ++ if (pAltr != NULL) { ++ pCmmInfo->ulNumGPPSMSegs++; ++ pCmmInfo->segInfo[ulSeg - 1].dwSegBasePa = ++ pAltr->dwSmBase - pAltr->ulDSPSize; ++ pCmmInfo->segInfo[ulSeg - 1].ulTotalSegSize = ++ pAltr->ulDSPSize + pAltr->ulSmSize; ++ pCmmInfo->segInfo[ulSeg - 1].dwGPPBasePA = ++ pAltr->dwSmBase; ++ pCmmInfo->segInfo[ulSeg - 1].ulGPPSize = ++ pAltr->ulSmSize; ++ pCmmInfo->segInfo[ulSeg - 1].dwDSPBaseVA = ++ pAltr->dwDSPBase; ++ pCmmInfo->segInfo[ulSeg - 1].ulDSPSize = ++ pAltr->ulDSPSize; ++ pCmmInfo->segInfo[ulSeg - 1].dwSegBaseVa = ++ pAltr->dwVmBase - pAltr->ulDSPSize; ++ pCmmInfo->segInfo[ulSeg - 1].ulInUseCnt = 0; ++ pCurNode = (struct CMM_MNODE *)LST_First(pAltr-> ++ pInUseListHead); ++ /* Count inUse blocks */ ++ while (pCurNode) { ++ pCmmInfo->ulTotalInUseCnt++; ++ pCmmInfo->segInfo[ulSeg - 1].ulInUseCnt++; ++ /* next node. */ ++ pCurNode = (struct CMM_MNODE *)LST_Next(pAltr-> ++ pInUseListHead, ++ (struct LST_ELEM *)pCurNode); ++ } ++ } ++ } /* end for */ ++ SYNC_LeaveCS(pCmmMgr->hCmmLock); ++ return status; ++} ++ ++/* ++ * ======== CMM_Init ======== ++ * Purpose: ++ * Initializes private state of CMM module. ++ */ ++bool CMM_Init(void) ++{ ++ bool fRetval = true; ++ ++ DBC_Require(cRefs >= 0); ++ if (cRefs == 0) { ++ /* Set the Trace mask */ ++ /* "CM" for Comm Memory manager */ ++ GT_create(&CMM_debugMask, "CM"); ++ } ++ if (fRetval) ++ cRefs++; ++ ++ GT_1trace(CMM_debugMask, GT_ENTER, ++ "Entered CMM_Init,ref count:0x%x\n", cRefs); ++ ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ return fRetval; ++} ++ ++/* ++ * ======== CMM_RegisterGPPSMSeg ======== ++ * Purpose: ++ * Register a block of SM with the CMM to be used for later GPP SM ++ * allocations. ++ */ ++DSP_STATUS CMM_RegisterGPPSMSeg(struct CMM_OBJECT *hCmmMgr, u32 dwGPPBasePA, ++ u32 ulSize, u32 dwDSPAddrOffset, ++ enum CMM_CNVTTYPE cFactor, u32 dwDSPBase, ++ u32 ulDSPSize, u32 *pulSegId, ++ u32 dwGPPBaseVA) ++{ ++ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; ++ struct CMM_ALLOCATOR *pSMA = NULL; ++ DSP_STATUS status = DSP_SOK; ++ struct CMM_MNODE *pNewNode; ++ s32 nSlot; ++ ++ DBC_Require(ulSize > 0); ++ DBC_Require(pulSegId != NULL); ++ DBC_Require(dwGPPBasePA != 0); ++ DBC_Require(dwGPPBaseVA != 0); ++ DBC_Require((cFactor <= CMM_ADDTODSPPA) && ++ (cFactor >= CMM_SUBFROMDSPPA)); ++ GT_6trace(CMM_debugMask, GT_ENTER, ++ "CMM_RegisterGPPSMSeg dwGPPBasePA %x " ++ "ulSize %x dwDSPAddrOffset %x dwDSPBase %x ulDSPSize %x " ++ "dwGPPBaseVA %x\n", dwGPPBasePA, ulSize, dwDSPAddrOffset, ++ dwDSPBase, ulDSPSize, dwGPPBaseVA); ++ if (!MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { ++ status = DSP_EHANDLE; ++ return status; ++ } ++ /* make sure we have room for another allocator */ ++ SYNC_EnterCS(pCmmMgr->hCmmLock); ++ nSlot = GetSlot(pCmmMgr); ++ if (nSlot < 0) { ++ /* get a slot number */ ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ /* Check if input ulSize is big enough to alloc at least one block */ ++ if (DSP_SUCCEEDED(status)) { ++ if (ulSize < pCmmMgr->ulMinBlockSize) { ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_RegisterGPPSMSeg: " ++ "ulSize too small\n"); ++ status = DSP_EINVALIDARG; ++ goto func_end; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* create, zero, and tag an SM allocator object */ ++ MEM_AllocObject(pSMA, struct CMM_ALLOCATOR, SMEMSIGNATURE); ++ } ++ if (pSMA != NULL) { ++ pSMA->hCmmMgr = hCmmMgr; /* ref to parent */ ++ pSMA->dwSmBase = dwGPPBasePA; /* SM Base phys */ ++ pSMA->ulSmSize = ulSize; /* SM segment size in bytes */ ++ pSMA->dwVmBase = dwGPPBaseVA; ++ pSMA->dwDSPPhysAddrOffset = dwDSPAddrOffset; ++ pSMA->cFactor = cFactor; ++ pSMA->dwDSPBase = dwDSPBase; ++ pSMA->ulDSPSize = ulDSPSize; ++ if (pSMA->dwVmBase == 0) { ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_RegisterGPPSMSeg: Error" ++ "MEM_LinearAddress()\n"); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* return the actual segment identifier */ ++ *pulSegId = (u32) nSlot + 1; ++ /* create memory free list */ ++ pSMA->pFreeListHead = LST_Create(); ++ if (pSMA->pFreeListHead == NULL) { ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_RegisterGPPSMSeg: " ++ "Out Of Memory \n"); ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* create memory in-use list */ ++ pSMA->pInUseListHead = LST_Create(); ++ if (pSMA->pInUseListHead == NULL) { ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_RegisterGPPSMSeg: " ++ "LST_Create failed\n"); ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Get a mem node for this hunk-o-memory */ ++ pNewNode = GetNode(pCmmMgr, dwGPPBasePA, ++ pSMA->dwVmBase, ulSize); ++ /* Place node on the SM allocator's free list */ ++ if (pNewNode) { ++ LST_PutTail(pSMA->pFreeListHead, ++ (struct LST_ELEM *)pNewNode); ++ } else { ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ } ++ if (DSP_FAILED(status)) { ++ /* Cleanup allocator */ ++ UnRegisterGPPSMSeg(pSMA); ++ } ++ } else { ++ GT_0trace(CMM_debugMask, GT_6CLASS, ++ "CMM_RegisterGPPSMSeg: SMA Object " ++ "Allocation Failure\n"); ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ /* make entry */ ++ if (DSP_SUCCEEDED(status)) ++ pCmmMgr->paGPPSMSegTab[nSlot] = pSMA; ++ ++func_end: ++ SYNC_LeaveCS(pCmmMgr->hCmmLock); ++ return status; ++} ++ ++/* ++ * ======== CMM_UnRegisterGPPSMSeg ======== ++ * Purpose: ++ * UnRegister GPP SM segments with the CMM. ++ */ ++DSP_STATUS CMM_UnRegisterGPPSMSeg(struct CMM_OBJECT *hCmmMgr, u32 ulSegId) ++{ ++ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; ++ DSP_STATUS status = DSP_SOK; ++ struct CMM_ALLOCATOR *pSMA; ++ u32 ulId = ulSegId; ++ ++ DBC_Require(ulSegId > 0); ++ if (MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { ++ if (ulSegId == CMM_ALLSEGMENTS) ++ ulId = 1; ++ ++ if ((ulId > 0) && (ulId <= CMM_MAXGPPSEGS)) { ++ while (ulId <= CMM_MAXGPPSEGS) { ++ SYNC_EnterCS(pCmmMgr->hCmmLock); ++ /* slot = segId-1 */ ++ pSMA = pCmmMgr->paGPPSMSegTab[ulId - 1]; ++ if (pSMA != NULL) { ++ UnRegisterGPPSMSeg(pSMA); ++ /* Set alctr ptr to NULL for future ++ * reuse */ ++ pCmmMgr->paGPPSMSegTab[ulId - 1] = NULL; ++ } else if (ulSegId != CMM_ALLSEGMENTS) { ++ status = DSP_EFAIL; ++ } ++ SYNC_LeaveCS(pCmmMgr->hCmmLock); ++ if (ulSegId != CMM_ALLSEGMENTS) ++ break; ++ ++ ulId++; ++ } /* end while */ ++ } else { ++ status = DSP_EINVALIDARG; ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_UnRegisterGPPSMSeg: Bad " ++ "segment Id\n"); ++ } ++ } else { ++ status = DSP_EHANDLE; ++ } ++ return status; ++} ++ ++/* ++ * ======== UnRegisterGPPSMSeg ======== ++ * Purpose: ++ * UnRegister the SM allocator by freeing all its resources and ++ * nulling cmm mgr table entry. ++ * Note: ++ * This routine is always called within cmm lock crit sect. ++ */ ++static void UnRegisterGPPSMSeg(struct CMM_ALLOCATOR *pSMA) ++{ ++ struct CMM_MNODE *pCurNode = NULL; ++ struct CMM_MNODE *pNextNode = NULL; ++ ++ DBC_Require(pSMA != NULL); ++ if (pSMA->pFreeListHead != NULL) { ++ /* free nodes on free list */ ++ pCurNode = (struct CMM_MNODE *)LST_First(pSMA->pFreeListHead); ++ while (pCurNode) { ++ pNextNode = (struct CMM_MNODE *)LST_Next(pSMA-> ++ pFreeListHead, ++ (struct LST_ELEM *)pCurNode); ++ LST_RemoveElem(pSMA->pFreeListHead, ++ (struct LST_ELEM *)pCurNode); ++ MEM_Free((void *) pCurNode); ++ /* next node. */ ++ pCurNode = pNextNode; ++ } ++ LST_Delete(pSMA->pFreeListHead); /* delete freelist */ ++ /* free nodes on InUse list */ ++ pCurNode = (struct CMM_MNODE *)LST_First(pSMA->pInUseListHead); ++ while (pCurNode) { ++ pNextNode = (struct CMM_MNODE *)LST_Next(pSMA-> ++ pInUseListHead, ++ (struct LST_ELEM *)pCurNode); ++ LST_RemoveElem(pSMA->pInUseListHead, ++ (struct LST_ELEM *)pCurNode); ++ MEM_Free((void *) pCurNode); ++ /* next node. */ ++ pCurNode = pNextNode; ++ } ++ LST_Delete(pSMA->pInUseListHead); /* delete InUse list */ ++ } ++ if ((void *) pSMA->dwVmBase != NULL) ++ MEM_UnmapLinearAddress((void *) pSMA->dwVmBase); ++ ++ /* Free allocator itself */ ++ MEM_FreeObject(pSMA); ++} ++ ++/* ++ * ======== GetSlot ======== ++ * Purpose: ++ * An available slot # is returned. Returns negative on failure. ++ */ ++static s32 GetSlot(struct CMM_OBJECT *pCmmMgr) ++{ ++ s32 nSlot = -1; /* neg on failure */ ++ DBC_Require(pCmmMgr != NULL); ++ /* get first available slot in cmm mgr SMSegTab[] */ ++ for (nSlot = 0; nSlot < CMM_MAXGPPSEGS; nSlot++) { ++ if (pCmmMgr->paGPPSMSegTab[nSlot] == NULL) ++ break; ++ ++ } ++ if (nSlot == CMM_MAXGPPSEGS) { ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_RegisterGPPSMSeg: Allocator " ++ "entry failure, max exceeded\n"); ++ nSlot = -1; /* failed */ ++ } ++ return nSlot; ++} ++ ++/* ++ * ======== GetNode ======== ++ * Purpose: ++ * Get a memory node from freelist or create a new one. ++ */ ++static struct CMM_MNODE *GetNode(struct CMM_OBJECT *pCmmMgr, u32 dwPA, ++ u32 dwVA, u32 ulSize) ++{ ++ struct CMM_MNODE *pNode = NULL; ++ ++ DBC_Require(pCmmMgr != NULL); ++ DBC_Require(dwPA != 0); ++ DBC_Require(dwVA != 0); ++ DBC_Require(ulSize != 0); ++ /* Check cmm mgr's node freelist */ ++ if (LST_IsEmpty(pCmmMgr->pNodeFreeListHead)) { ++ pNode = (struct CMM_MNODE *)MEM_Calloc(sizeof(struct CMM_MNODE), ++ MEM_PAGED); ++ } else { ++ /* surely a valid element */ ++ /* (struct LST_ELEM*) pNode = LST_GetHead(pCmmMgr-> ++ * pNodeFreeListHead);*/ ++ pNode = (struct CMM_MNODE *)LST_GetHead(pCmmMgr-> ++ pNodeFreeListHead); ++ } ++ if (pNode == NULL) { ++ GT_0trace(CMM_debugMask, GT_7CLASS, "GetNode: Out Of Memory\n"); ++ } else { ++ LST_InitElem((struct LST_ELEM *) pNode); /* set self */ ++ pNode->dwPA = dwPA; /* Physical addr of start of block */ ++ pNode->dwVA = dwVA; /* Virtual " " */ ++ pNode->ulSize = ulSize; /* Size of block */ ++ } ++ return pNode; ++} ++ ++/* ++ * ======== DeleteNode ======== ++ * Purpose: ++ * Put a memory node on the cmm nodelist for later use. ++ * Doesn't actually delete the node. Heap thrashing friendly. ++ */ ++static void DeleteNode(struct CMM_OBJECT *pCmmMgr, struct CMM_MNODE *pNode) ++{ ++ DBC_Require(pNode != NULL); ++ LST_InitElem((struct LST_ELEM *) pNode); /* init .self ptr */ ++ LST_PutTail(pCmmMgr->pNodeFreeListHead, (struct LST_ELEM *) pNode); ++} ++ ++/* ++ * ====== GetFreeBlock ======== ++ * Purpose: ++ * Scan the free block list and return the first block that satisfies ++ * the size. ++ */ ++static struct CMM_MNODE *GetFreeBlock(struct CMM_ALLOCATOR *pAllocator, ++ u32 uSize) ++{ ++ if (pAllocator) { ++ struct CMM_MNODE *pCurNode = (struct CMM_MNODE *) ++ LST_First(pAllocator->pFreeListHead); ++ while (pCurNode) { ++ if (uSize <= (u32) pCurNode->ulSize) { ++ LST_RemoveElem(pAllocator->pFreeListHead, ++ (struct LST_ELEM *)pCurNode); ++ return pCurNode; ++ } ++ /* next node. */ ++ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> ++ pFreeListHead, (struct LST_ELEM *)pCurNode); ++ } ++ } ++ return NULL; ++} ++ ++/* ++ * ======== AddToFreeList ======== ++ * Purpose: ++ * Coelesce node into the freelist in ascending size order. ++ */ ++static void AddToFreeList(struct CMM_ALLOCATOR *pAllocator, ++ struct CMM_MNODE *pNode) ++{ ++ struct CMM_MNODE *pNodePrev = NULL; ++ struct CMM_MNODE *pNodeNext = NULL; ++ struct CMM_MNODE *pCurNode; ++ u32 dwThisPA; ++ u32 dwNextPA; ++ ++ DBC_Require(pNode != NULL); ++ DBC_Require(pAllocator != NULL); ++ dwThisPA = pNode->dwPA; ++ dwNextPA = NEXT_PA(pNode); ++ pCurNode = (struct CMM_MNODE *)LST_First(pAllocator->pFreeListHead); ++ while (pCurNode) { ++ if (dwThisPA == NEXT_PA(pCurNode)) { ++ /* found the block ahead of this one */ ++ pNodePrev = pCurNode; ++ } else if (dwNextPA == pCurNode->dwPA) { ++ pNodeNext = pCurNode; ++ } ++ if ((pNodePrev == NULL) || (pNodeNext == NULL)) { ++ /* next node. */ ++ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> ++ pFreeListHead, (struct LST_ELEM *)pCurNode); ++ } else { ++ /* got 'em */ ++ break; ++ } ++ } /* while */ ++ if (pNodePrev != NULL) { ++ /* combine with previous block */ ++ LST_RemoveElem(pAllocator->pFreeListHead, ++ (struct LST_ELEM *)pNodePrev); ++ /* grow node to hold both */ ++ pNode->ulSize += pNodePrev->ulSize; ++ pNode->dwPA = pNodePrev->dwPA; ++ pNode->dwVA = pNodePrev->dwVA; ++ /* place node on mgr nodeFreeList */ ++ DeleteNode((struct CMM_OBJECT *)pAllocator->hCmmMgr, pNodePrev); ++ } ++ if (pNodeNext != NULL) { ++ /* combine with next block */ ++ LST_RemoveElem(pAllocator->pFreeListHead, ++ (struct LST_ELEM *)pNodeNext); ++ /* grow da node */ ++ pNode->ulSize += pNodeNext->ulSize; ++ /* place node on mgr nodeFreeList */ ++ DeleteNode((struct CMM_OBJECT *)pAllocator->hCmmMgr, pNodeNext); ++ } ++ /* Now, let's add to freelist in increasing size order */ ++ pCurNode = (struct CMM_MNODE *)LST_First(pAllocator->pFreeListHead); ++ while (pCurNode) { ++ if (pNode->ulSize <= pCurNode->ulSize) ++ break; ++ ++ /* next node. */ ++ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> ++ pFreeListHead, (struct LST_ELEM *)pCurNode); ++ } ++ /* if pCurNode is NULL then add our pNode to the end of the freelist */ ++ if (pCurNode == NULL) { ++ LST_PutTail(pAllocator->pFreeListHead, ++ (struct LST_ELEM *)pNode); ++ } else { ++ /* insert our node before the current traversed node */ ++ LST_InsertBefore(pAllocator->pFreeListHead, ++ (struct LST_ELEM *)pNode, ++ (struct LST_ELEM *)pCurNode); ++ } ++} ++ ++/* ++ * ======== GetAllocator ======== ++ * Purpose: ++ * Return the allocator for the given SM Segid. ++ * SegIds: 1,2,3..max. ++ */ ++static struct CMM_ALLOCATOR *GetAllocator(struct CMM_OBJECT *pCmmMgr, ++ u32 ulSegId) ++{ ++ struct CMM_ALLOCATOR *pAllocator = NULL; ++ ++ DBC_Require(pCmmMgr != NULL); ++ DBC_Require((ulSegId > 0) && (ulSegId <= CMM_MAXGPPSEGS)); ++ pAllocator = pCmmMgr->paGPPSMSegTab[ulSegId - 1]; ++ if (pAllocator != NULL) { ++ /* make sure it's for real */ ++ if (!MEM_IsValidHandle(pAllocator, SMEMSIGNATURE)) { ++ pAllocator = NULL; ++ DBC_Assert(false); ++ } ++ } ++ return pAllocator; ++} ++ ++/* ++ * ======== CMM_XlatorCreate ======== ++ * Purpose: ++ * Create an address translator object. ++ */ ++DSP_STATUS CMM_XlatorCreate(OUT struct CMM_XLATOROBJECT **phXlator, ++ struct CMM_OBJECT *hCmmMgr, ++ struct CMM_XLATORATTRS *pXlatorAttrs) ++{ ++ struct CMM_XLATOR *pXlatorObject = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phXlator != NULL); ++ DBC_Require(hCmmMgr != NULL); ++ GT_3trace(CMM_debugMask, GT_ENTER, ++ "CMM_XlatorCreate: phXlator: 0x%x\t" ++ "phCmmMgr: 0x%x\tpXlAttrs: 0x%x\n", phXlator, ++ hCmmMgr, pXlatorAttrs); ++ *phXlator = NULL; ++ if (pXlatorAttrs == NULL) ++ pXlatorAttrs = &CMM_DFLTXLATORATTRS; /* set defaults */ ++ ++ MEM_AllocObject(pXlatorObject, struct CMM_XLATOR, CMMXLATESIGNATURE); ++ if (pXlatorObject != NULL) { ++ pXlatorObject->hCmmMgr = hCmmMgr; /* ref back to CMM */ ++ pXlatorObject->ulSegId = pXlatorAttrs->ulSegId; /* SM segId */ ++ } else { ++ GT_0trace(CMM_debugMask, GT_6CLASS, ++ "CMM_XlatorCreate: Object Allocation" ++ "Failure(CMM Xlator)\n"); ++ status = DSP_EMEMORY; ++ } ++ if (DSP_SUCCEEDED(status)) ++ *phXlator = (struct CMM_XLATOROBJECT *) pXlatorObject; ++ ++ return status; ++} ++ ++/* ++ * ======== CMM_XlatorDelete ======== ++ * Purpose: ++ * Free the Xlator resources. ++ * VM gets freed later. ++ */ ++DSP_STATUS CMM_XlatorDelete(struct CMM_XLATOROBJECT *hXlator, bool bForce) ++{ ++ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ ++ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { ++ MEM_FreeObject(pXlator); ++ } else { ++ status = DSP_EHANDLE; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== CMM_XlatorAllocBuf ======== ++ */ ++void *CMM_XlatorAllocBuf(struct CMM_XLATOROBJECT *hXlator, void *pVaBuf, ++ u32 uPaSize) ++{ ++ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; ++ void *pBuf = NULL; ++ struct CMM_ATTRS attrs; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(hXlator != NULL); ++ DBC_Require(pXlator->hCmmMgr != NULL); ++ DBC_Require(pVaBuf != NULL); ++ DBC_Require(uPaSize > 0); ++ DBC_Require(pXlator->ulSegId > 0); ++ ++ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { ++ attrs.ulSegId = pXlator->ulSegId; ++ *(volatile u32 *)pVaBuf = 0; ++ /* Alloc SM */ ++ pBuf = CMM_CallocBuf(pXlator->hCmmMgr, uPaSize, &attrs, NULL); ++ if (pBuf) { ++ /* convert to translator(node/strm) process Virtual ++ * address */ ++ *(volatile u32 **)pVaBuf = ++ (u32 *)CMM_XlatorTranslate(hXlator, ++ pBuf, CMM_PA2VA); ++ } ++ } ++ return pBuf; ++} ++ ++/* ++ * ======== CMM_XlatorFreeBuf ======== ++ * Purpose: ++ * Free the given SM buffer and descriptor. ++ * Does not free virtual memory. ++ */ ++DSP_STATUS CMM_XlatorFreeBuf(struct CMM_XLATOROBJECT *hXlator, void *pBufVa) ++{ ++ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; ++ DSP_STATUS status = DSP_EFAIL; ++ void *pBufPa = NULL; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pBufVa != NULL); ++ DBC_Require(pXlator->ulSegId > 0); ++ ++ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { ++ /* convert Va to Pa so we can free it. */ ++ pBufPa = CMM_XlatorTranslate(hXlator, pBufVa, CMM_VA2PA); ++ if (pBufPa) { ++ status = CMM_FreeBuf(pXlator->hCmmMgr, pBufPa, ++ pXlator->ulSegId); ++ if (DSP_FAILED(status)) { ++ /* Uh oh, this shouldn't happen. Descriptor ++ * gone! */ ++ GT_2trace(CMM_debugMask, GT_7CLASS, ++ "Cannot free DMA/ZCPY buffer" ++ "not allocated by MPU. PA %x, VA %x\n", ++ pBufPa, pBufVa); ++ DBC_Assert(false); /* CMM is leaking mem! */ ++ } ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== CMM_XlatorInfo ======== ++ * Purpose: ++ * Set/Get translator info. ++ */ ++DSP_STATUS CMM_XlatorInfo(struct CMM_XLATOROBJECT *hXlator, IN OUT u8 **pAddr, ++ u32 ulSize, u32 uSegId, bool bSetInfo) ++{ ++ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pAddr != NULL); ++ DBC_Require((uSegId > 0) && (uSegId <= CMM_MAXGPPSEGS)); ++ ++ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { ++ if (bSetInfo) { ++ /* set translators virtual address range */ ++ pXlator->dwVirtBase = (u32)*pAddr; ++ pXlator->ulVirtSize = ulSize; ++ GT_2trace(CMM_debugMask, GT_3CLASS, ++ "pXlator->dwVirtBase %x, " ++ "ulVirtSize %x\n", pXlator->dwVirtBase, ++ pXlator->ulVirtSize); ++ } else { /* return virt base address */ ++ *pAddr = (u8 *)pXlator->dwVirtBase; ++ } ++ } else { ++ status = DSP_EHANDLE; ++ } ++ return status; ++} ++ ++/* ++ * ======== CMM_XlatorTranslate ======== ++ */ ++void *CMM_XlatorTranslate(struct CMM_XLATOROBJECT *hXlator, void *pAddr, ++ enum CMM_XLATETYPE xType) ++{ ++ u32 dwAddrXlate = 0; ++ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; ++ struct CMM_OBJECT *pCmmMgr = NULL; ++ struct CMM_ALLOCATOR *pAlctr = NULL; ++ u32 dwOffset = 0; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pAddr != NULL); ++ DBC_Require((xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA)); ++ ++ if (!MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) ++ goto loop_cont; ++ ++ pCmmMgr = (struct CMM_OBJECT *)pXlator->hCmmMgr; ++ /* get this translator's default SM allocator */ ++ DBC_Assert(pXlator->ulSegId > 0); ++ pAlctr = pCmmMgr->paGPPSMSegTab[pXlator->ulSegId - 1]; ++ if (!MEM_IsValidHandle(pAlctr, SMEMSIGNATURE)) ++ goto loop_cont; ++ ++ if ((xType == CMM_VA2DSPPA) || (xType == CMM_VA2PA) || ++ (xType == CMM_PA2VA)) { ++ if (xType == CMM_PA2VA) { ++ /* Gpp Va = Va Base + offset */ ++ dwOffset = (u8 *)pAddr - (u8 *)(pAlctr->dwSmBase - ++ pAlctr->ulDSPSize); ++ dwAddrXlate = pXlator->dwVirtBase + dwOffset; ++ /* Check if translated Va base is in range */ ++ if ((dwAddrXlate < pXlator->dwVirtBase) || ++ (dwAddrXlate >= ++ (pXlator->dwVirtBase + pXlator->ulVirtSize))) { ++ dwAddrXlate = 0; /* bad address */ ++ GT_0trace(CMM_debugMask, GT_7CLASS, ++ "CMM_XlatorTranslate: " ++ "Virt addr out of range\n"); ++ } ++ } else { ++ /* Gpp PA = Gpp Base + offset */ ++ dwOffset = (u8 *)pAddr - (u8 *)pXlator->dwVirtBase; ++ dwAddrXlate = pAlctr->dwSmBase - pAlctr->ulDSPSize + ++ dwOffset; ++ } ++ } else { ++ dwAddrXlate = (u32)pAddr; ++ } ++ /*Now convert address to proper target physical address if needed*/ ++ if ((xType == CMM_VA2DSPPA) || (xType == CMM_PA2DSPPA)) { ++ /* Got Gpp Pa now, convert to DSP Pa */ ++ dwAddrXlate = GPPPA2DSPPA((pAlctr->dwSmBase - pAlctr-> ++ ulDSPSize), dwAddrXlate, ++ pAlctr->dwDSPPhysAddrOffset * ++ pAlctr->cFactor); ++ } else if (xType == CMM_DSPPA2PA) { ++ /* Got DSP Pa, convert to GPP Pa */ ++ dwAddrXlate = DSPPA2GPPPA(pAlctr->dwSmBase - pAlctr->ulDSPSize, ++ dwAddrXlate, ++ pAlctr->dwDSPPhysAddrOffset * ++ pAlctr->cFactor); ++ } ++loop_cont: ++ if (!dwAddrXlate) { ++ GT_2trace(CMM_debugMask, GT_7CLASS, ++ "CMM_XlatorTranslate: Can't translate" ++ " address: 0x%x xType %x\n", pAddr, xType); ++ } else { ++ GT_3trace(CMM_debugMask, GT_3CLASS, ++ "CMM_XlatorTranslate: pAddr %x, xType" ++ " %x, dwAddrXlate %x\n", pAddr, xType, dwAddrXlate); ++ } ++ return (void *)dwAddrXlate; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cod.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/cod.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cod.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/cod.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,684 @@ ++/* ++ * cod.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== cod.c ======== ++ * This module implements DSP code management for the DSP/BIOS Bridge ++ * environment. It is mostly a thin wrapper. ++ * ++ * This module provides an interface for loading both static and ++ * dynamic code objects onto DSP systems. ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Apr-2003 map: Consolidated DBL to DBLL loader name ++ *! 24-Feb-2003 swa: PMGR Code review comments incorporated. ++ *! 18-Apr-2002 jeh: Added DBL function tables. ++ *! 20-Nov-2001 jeh: Removed call to ZL_loadArgs function. ++ *! 19-Oct-2001 jeh: Access DBL as a static library. Added COD_GetBaseLib, ++ *! COD_GetLoader, removed COD_LoadSection, COD_UnloadSection. ++ *! 07-Sep-2001 jeh: Added COD_LoadSection(), COD_UnloadSection(). ++ *! 07-Aug-2001 rr: hMgr->baseLib is updated after zlopen in COD_LoadBase. ++ *! 18-Apr-2001 jeh: Check for fLoaded flag before ZL_unload, to allow ++ *! COD_OpenBase to be used. ++ *! 11-Jan-2001 jeh: Added COD_OpenBase (not used yet, since there is an ++ *! occasional crash). ++ *! 02-Aug-2000 kc: Added COD_ReadSection to COD module. Incorporates use ++ *! of ZL_readSect (new function in ZL module). ++ *! 28-Feb-2000 rr: New GT Usage Implementation ++ *! 08-Dec-1999 ag: Removed x86 specific __asm int 3. ++ *! 02-Oct-1999 ag: Added #ifdef DEBUGINT3COD for debug. ++ *! 20-Sep-1999 ag: Removed call to GT_set(). ++ *! 04-Jun-1997 cr: Added validation of argc/argv pair in COD_LoadBase, as it ++ *! is a requirement to ZL_loadArgs. ++ *! 31-May-1997 cr: Changed COD_LoadBase argc value from u32 to int, added ++ *! DSP_ENOTIMPL return value to COD_Create when attrs != NULL. ++ *! 29-May-1997 cr: Added debugging support. ++ *! 24-Oct-1996 gp: Added COD_GetSection(). ++ *! 18-Jun-1996 gp: Updated GetSymValue() to check for lib; updated E_ codes. ++ *! 12-Jun-1996 gp: Imported CSL_ services for strcpyn(); Added ref counting. ++ *! 20-May-1996 mg: Adapted for new MEM and LDR modules. ++ *! 08-May-1996 mg: Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++/* Include appropriate loader header file */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* magic number for handle validation */ ++#define MAGIC 0xc001beef ++ ++/* macro to validate COD manager handles */ ++#define IsValid(h) ((h) != NULL && (h)->ulMagic == MAGIC) ++ ++/* ++ * ======== COD_MANAGER ======== ++ */ ++struct COD_MANAGER { ++ struct DBLL_TarObj *target; ++ struct DBLL_LibraryObj *baseLib; ++ bool fLoaded; /* Base library loaded? */ ++ u32 ulEntry; ++ struct LDR_MODULE *hDll; ++ struct DBLL_Fxns fxns; ++ struct DBLL_Attrs attrs; ++ char szZLFile[COD_MAXPATHLENGTH]; ++ u32 ulMagic; ++} ; ++ ++/* ++ * ======== COD_LIBRARYOBJ ======== ++ */ ++struct COD_LIBRARYOBJ { ++ struct DBLL_LibraryObj *dbllLib; ++ struct COD_MANAGER *hCodMgr; ++} ; ++ ++static u32 cRefs = 0L; ++ ++#if GT_TRACE ++static struct GT_Mask COD_debugMask = { NULL, NULL }; ++#endif ++ ++static struct DBLL_Fxns dbllFxns = { ++ (DBLL_CloseFxn) DBLL_close, ++ (DBLL_CreateFxn) DBLL_create, ++ (DBLL_DeleteFxn) DBLL_delete, ++ (DBLL_ExitFxn) DBLL_exit, ++ (DBLL_GetAttrsFxn) DBLL_getAttrs, ++ (DBLL_GetAddrFxn) DBLL_getAddr, ++ (DBLL_GetCAddrFxn) DBLL_getCAddr, ++ (DBLL_GetSectFxn) DBLL_getSect, ++ (DBLL_InitFxn) DBLL_init, ++ (DBLL_LoadFxn) DBLL_load, ++ (DBLL_LoadSectFxn) DBLL_loadSect, ++ (DBLL_OpenFxn) DBLL_open, ++ (DBLL_ReadSectFxn) DBLL_readSect, ++ (DBLL_SetAttrsFxn) DBLL_setAttrs, ++ (DBLL_UnloadFxn) DBLL_unload, ++ (DBLL_UnloadSectFxn) DBLL_unloadSect, ++}; ++ ++static bool NoOp(void); ++ ++/* ++ * ======== COD_Close ======== ++ */ ++void COD_Close(struct COD_LIBRARYOBJ *lib) ++{ ++ struct COD_MANAGER *hMgr; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(lib != NULL); ++ DBC_Require(IsValid(((struct COD_LIBRARYOBJ *)lib)->hCodMgr)); ++ ++ hMgr = lib->hCodMgr; ++ hMgr->fxns.closeFxn(lib->dbllLib); ++ ++ MEM_Free(lib); ++} ++ ++/* ++ * ======== COD_Create ======== ++ * Purpose: ++ * Create an object to manage code on a DSP system. ++ * This object can be used to load an initial program image with ++ * arguments that can later be expanded with ++ * dynamically loaded object files. ++ * ++ */ ++DSP_STATUS COD_Create(OUT struct COD_MANAGER **phMgr, char *pstrDummyFile, ++ IN OPTIONAL CONST struct COD_ATTRS *attrs) ++{ ++ struct COD_MANAGER *hMgrNew; ++ struct DBLL_Attrs zlAttrs; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phMgr != NULL); ++ ++ GT_3trace(COD_debugMask, GT_ENTER, ++ "Entered COD_Create, Args: \t\nphMgr: " ++ "0x%x\t\npstrDummyFile: 0x%x\t\nattr: 0x%x\n", ++ phMgr, pstrDummyFile, attrs); ++ /* assume failure */ ++ *phMgr = NULL; ++ ++ /* we don't support non-default attrs yet */ ++ if (attrs != NULL) ++ return DSP_ENOTIMPL; ++ ++ hMgrNew = MEM_Calloc(sizeof(struct COD_MANAGER), MEM_NONPAGED); ++ if (hMgrNew == NULL) { ++ GT_0trace(COD_debugMask, GT_7CLASS, ++ "COD_Create: Out Of Memory\n"); ++ return DSP_EMEMORY; ++ } ++ ++ hMgrNew->ulMagic = MAGIC; ++ ++ /* Set up loader functions */ ++ hMgrNew->fxns = dbllFxns; ++ ++ /* initialize the ZL module */ ++ hMgrNew->fxns.initFxn(); ++ ++ zlAttrs.alloc = (DBLL_AllocFxn)NoOp; ++ zlAttrs.free = (DBLL_FreeFxn)NoOp; ++ zlAttrs.fread = (DBLL_ReadFxn)KFILE_Read; ++ zlAttrs.fseek = (DBLL_SeekFxn)KFILE_Seek; ++ zlAttrs.ftell = (DBLL_TellFxn)KFILE_Tell; ++ zlAttrs.fclose = (DBLL_FCloseFxn)KFILE_Close; ++ zlAttrs.fopen = (DBLL_FOpenFxn)KFILE_Open; ++ zlAttrs.symLookup = NULL; ++ zlAttrs.baseImage = true; ++ zlAttrs.logWrite = NULL; ++ zlAttrs.logWriteHandle = NULL; ++ zlAttrs.write = NULL; ++ zlAttrs.rmmHandle = NULL; ++ zlAttrs.wHandle = NULL; ++ zlAttrs.symHandle = NULL; ++ zlAttrs.symArg = NULL; ++ ++ hMgrNew->attrs = zlAttrs; ++ ++ status = hMgrNew->fxns.createFxn(&hMgrNew->target, &zlAttrs); ++ ++ if (DSP_FAILED(status)) { ++ COD_Delete(hMgrNew); ++ GT_1trace(COD_debugMask, GT_7CLASS, ++ "COD_Create:ZL Create Failed: 0x%x\n", status); ++ return COD_E_ZLCREATEFAILED; ++ } ++ ++ /* return the new manager */ ++ *phMgr = hMgrNew; ++ GT_1trace(COD_debugMask, GT_1CLASS, ++ "COD_Create: Success CodMgr: 0x%x\n", *phMgr); ++ return DSP_SOK; ++} ++ ++/* ++ * ======== COD_Delete ======== ++ * Purpose: ++ * Delete a code manager object. ++ */ ++void COD_Delete(struct COD_MANAGER *hMgr) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hMgr)); ++ ++ GT_1trace(COD_debugMask, GT_ENTER, "COD_Delete:hMgr 0x%x\n", hMgr); ++ if (hMgr->baseLib) { ++ if (hMgr->fLoaded) ++ hMgr->fxns.unloadFxn(hMgr->baseLib, &hMgr->attrs); ++ ++ hMgr->fxns.closeFxn(hMgr->baseLib); ++ } ++ if (hMgr->target) { ++ hMgr->fxns.deleteFxn(hMgr->target); ++ hMgr->fxns.exitFxn(); ++ } ++ hMgr->ulMagic = ~MAGIC; ++ MEM_Free(hMgr); ++} ++ ++/* ++ * ======== COD_Exit ======== ++ * Purpose: ++ * Discontinue usage of the COD module. ++ * ++ */ ++void COD_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(COD_debugMask, GT_ENTER, ++ "Entered COD_Exit, ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== COD_GetBaseLib ======== ++ * Purpose: ++ * Get handle to the base image DBL library. ++ */ ++DSP_STATUS COD_GetBaseLib(struct COD_MANAGER *hManager, ++ struct DBLL_LibraryObj **plib) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hManager)); ++ DBC_Require(plib != NULL); ++ ++ *plib = (struct DBLL_LibraryObj *) hManager->baseLib; ++ ++ return status; ++} ++ ++/* ++ * ======== COD_GetBaseName ======== ++ */ ++DSP_STATUS COD_GetBaseName(struct COD_MANAGER *hManager, char *pszName, ++ u32 uSize) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hManager)); ++ DBC_Require(pszName != NULL); ++ ++ if (uSize <= COD_MAXPATHLENGTH) ++ strncpy(pszName, hManager->szZLFile, uSize); ++ else ++ status = DSP_EFAIL; ++ ++ return status; ++} ++ ++/* ++ * ======== COD_GetEntry ======== ++ * Purpose: ++ * Retrieve the entry point of a loaded DSP program image ++ * ++ */ ++DSP_STATUS COD_GetEntry(struct COD_MANAGER *hManager, u32 *pulEntry) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hManager)); ++ DBC_Require(pulEntry != NULL); ++ ++ *pulEntry = hManager->ulEntry; ++ ++ GT_1trace(COD_debugMask, GT_ENTER, "COD_GetEntry:ulEntr 0x%x\n", ++ *pulEntry); ++ ++ return DSP_SOK; ++} ++ ++/* ++ * ======== COD_GetLoader ======== ++ * Purpose: ++ * Get handle to the DBLL loader. ++ */ ++DSP_STATUS COD_GetLoader(struct COD_MANAGER *hManager, ++ struct DBLL_TarObj **phLoader) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hManager)); ++ DBC_Require(phLoader != NULL); ++ ++ *phLoader = (struct DBLL_TarObj *)hManager->target; ++ ++ return status; ++} ++ ++/* ++ * ======== COD_GetSection ======== ++ * Purpose: ++ * Retrieve the starting address and length of a section in the COFF file ++ * given the section name. ++ */ ++DSP_STATUS COD_GetSection(struct COD_LIBRARYOBJ *lib, IN char *pstrSect, ++ OUT u32 *puAddr, OUT u32 *puLen) ++{ ++ struct COD_MANAGER *hManager; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(lib != NULL); ++ DBC_Require(IsValid(lib->hCodMgr)); ++ DBC_Require(pstrSect != NULL); ++ DBC_Require(puAddr != NULL); ++ DBC_Require(puLen != NULL); ++ ++ GT_4trace(COD_debugMask, GT_ENTER, ++ "Entered COD_GetSection Args \t\n lib: " ++ "0x%x\t\npstrsect: 0x%x\t\npuAddr: 0x%x\t\npuLen: 0x%x\n", ++ lib, pstrSect, puAddr, puLen); ++ *puAddr = 0; ++ *puLen = 0; ++ if (lib != NULL) { ++ hManager = lib->hCodMgr; ++ status = hManager->fxns.getSectFxn(lib->dbllLib, pstrSect, ++ puAddr, puLen); ++ if (DSP_FAILED(status)) { ++ GT_1trace(COD_debugMask, GT_7CLASS, ++ "COD_GetSection: Section %s not" ++ "found\n", pstrSect); ++ } ++ } else { ++ status = COD_E_NOSYMBOLSLOADED; ++ GT_0trace(COD_debugMask, GT_7CLASS, ++ "COD_GetSection:No Symbols loaded\n"); ++ } ++ ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((*puAddr == 0) && (*puLen == 0))); ++ ++ return status; ++} ++ ++/* ++ * ======== COD_GetSymValue ======== ++ * Purpose: ++ * Retrieve the value for the specified symbol. The symbol is first ++ * searched for literally and then, if not found, searched for as a ++ * C symbol. ++ * ++ */ ++DSP_STATUS COD_GetSymValue(struct COD_MANAGER *hMgr, char *pstrSym, ++ u32 *pulValue) ++{ ++ struct DBLL_Symbol *pSym; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hMgr)); ++ DBC_Require(pstrSym != NULL); ++ DBC_Require(pulValue != NULL); ++ ++ GT_3trace(COD_debugMask, GT_ENTER, "Entered COD_GetSymValue Args \t\n" ++ "hMgr: 0x%x\t\npstrSym: 0x%x\t\npulValue: 0x%x\n", ++ hMgr, pstrSym, pulValue); ++ if (hMgr->baseLib) { ++ if (!hMgr->fxns.getAddrFxn(hMgr->baseLib, pstrSym, &pSym)) { ++ if (!hMgr->fxns.getCAddrFxn(hMgr->baseLib, pstrSym, ++ &pSym)) { ++ GT_0trace(COD_debugMask, GT_7CLASS, ++ "COD_GetSymValue: " ++ "Symbols not found\n"); ++ return COD_E_SYMBOLNOTFOUND; ++ } ++ } ++ } else { ++ GT_0trace(COD_debugMask, GT_7CLASS, "COD_GetSymValue: " ++ "No Symbols loaded\n"); ++ return COD_E_NOSYMBOLSLOADED; ++ } ++ ++ *pulValue = pSym->value; ++ ++ return DSP_SOK; ++} ++ ++/* ++ * ======== COD_Init ======== ++ * Purpose: ++ * Initialize the COD module's private state. ++ * ++ */ ++bool COD_Init(void) ++{ ++ bool fRetVal = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!COD_debugMask.flags); ++ GT_create(&COD_debugMask, "CO"); ++ } ++ ++ if (fRetVal) ++ cRefs++; ++ ++ ++ GT_1trace(COD_debugMask, GT_1CLASS, ++ "Entered COD_Init, ref count: 0x%x\n", cRefs); ++ DBC_Ensure((fRetVal && cRefs > 0) || (!fRetVal && cRefs >= 0)); ++ return fRetVal; ++} ++ ++/* ++ * ======== COD_LoadBase ======== ++ * Purpose: ++ * Load the initial program image, optionally with command-line arguments, ++ * on the DSP system managed by the supplied handle. The program to be ++ * loaded must be the first element of the args array and must be a fully ++ * qualified pathname. ++ * Details: ++ * if nArgc doesn't match the number of arguments in the aArgs array, the ++ * aArgs array is searched for a NULL terminating entry, and argc is ++ * recalculated to reflect this. In this way, we can support NULL ++ * terminating aArgs arrays, if nArgc is very large. ++ */ ++DSP_STATUS COD_LoadBase(struct COD_MANAGER *hMgr, u32 nArgc, char *aArgs[], ++ COD_WRITEFXN pfnWrite, void *pArb, char *envp[]) ++{ ++ DBLL_Flags flags; ++ struct DBLL_Attrs saveAttrs; ++ struct DBLL_Attrs newAttrs; ++ DSP_STATUS status; ++ u32 i; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hMgr)); ++ DBC_Require(nArgc > 0); ++ DBC_Require(aArgs != NULL); ++ DBC_Require(aArgs[0] != NULL); ++ DBC_Require(pfnWrite != NULL); ++ DBC_Require(hMgr->baseLib != NULL); ++ ++ GT_6trace(COD_debugMask, GT_ENTER, ++ "Entered COD_LoadBase, hMgr: 0x%x\n \t" ++ "nArgc: 0x%x\n\taArgs: 0x%x\n\tpfnWrite: 0x%x\n\tpArb:" ++ " 0x%x\n \tenvp: 0x%x\n", hMgr, nArgc, aArgs, pfnWrite, ++ pArb, envp); ++ /* ++ * Make sure every argv[] stated in argc has a value, or change argc to ++ * reflect true number in NULL terminated argv array. ++ */ ++ for (i = 0; i < nArgc; i++) { ++ if (aArgs[i] == NULL) { ++ nArgc = i; ++ break; ++ } ++ } ++ ++ /* set the write function for this operation */ ++ hMgr->fxns.getAttrsFxn(hMgr->target, &saveAttrs); ++ ++ newAttrs = saveAttrs; ++ newAttrs.write = (DBLL_WriteFxn)pfnWrite; ++ newAttrs.wHandle = pArb; ++ newAttrs.alloc = (DBLL_AllocFxn)NoOp; ++ newAttrs.free = (DBLL_FreeFxn)NoOp; ++ newAttrs.logWrite = NULL; ++ newAttrs.logWriteHandle = NULL; ++ ++ /* Load the image */ ++ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; ++ status = hMgr->fxns.loadFxn(hMgr->baseLib, flags, &newAttrs, ++ &hMgr->ulEntry); ++ if (DSP_FAILED(status)) { ++ hMgr->fxns.closeFxn(hMgr->baseLib); ++ GT_1trace(COD_debugMask, GT_7CLASS, ++ "COD_LoadBase: COD Load failed: " ++ "0x%x\n", status); ++ } ++ if (DSP_SUCCEEDED(status)) ++ hMgr->fLoaded = true; ++ else ++ hMgr->baseLib = NULL; ++ ++ return status; ++} ++ ++/* ++ * ======== COD_Open ======== ++ * Open library for reading sections. ++ */ ++DSP_STATUS COD_Open(struct COD_MANAGER *hMgr, IN char *pszCoffPath, ++ COD_FLAGS flags, struct COD_LIBRARYOBJ **pLib) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct COD_LIBRARYOBJ *lib = NULL; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hMgr)); ++ DBC_Require(pszCoffPath != NULL); ++ DBC_Require(flags == COD_NOLOAD || flags == COD_SYMB); ++ DBC_Require(pLib != NULL); ++ ++ GT_4trace(COD_debugMask, GT_ENTER, "Entered COD_Open, hMgr: 0x%x\n\t " ++ "pszCoffPath: 0x%x\tflags: 0x%x\tlib: 0x%x\n", hMgr, ++ pszCoffPath, flags, pLib); ++ ++ *pLib = NULL; ++ ++ lib = MEM_Calloc(sizeof(struct COD_LIBRARYOBJ), MEM_NONPAGED); ++ if (lib == NULL) { ++ GT_0trace(COD_debugMask, GT_7CLASS, ++ "COD_Open: Out Of Memory\n"); ++ status = DSP_EMEMORY; ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ lib->hCodMgr = hMgr; ++ status = hMgr->fxns.openFxn(hMgr->target, pszCoffPath, flags, ++ &lib->dbllLib); ++ if (DSP_FAILED(status)) { ++ GT_1trace(COD_debugMask, GT_7CLASS, ++ "COD_Open failed: 0x%x\n", status); ++ } else { ++ *pLib = lib; ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== COD_OpenBase ======== ++ * Purpose: ++ * Open base image for reading sections. ++ */ ++DSP_STATUS COD_OpenBase(struct COD_MANAGER *hMgr, IN char *pszCoffPath, ++ DBLL_Flags flags) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DBLL_LibraryObj *lib; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValid(hMgr)); ++ DBC_Require(pszCoffPath != NULL); ++ ++ GT_2trace(COD_debugMask, GT_ENTER, ++ "Entered COD_OpenBase, hMgr: 0x%x\n\t" ++ "pszCoffPath: 0x%x\n", hMgr, pszCoffPath); ++ ++ /* if we previously opened a base image, close it now */ ++ if (hMgr->baseLib) { ++ if (hMgr->fLoaded) { ++ GT_0trace(COD_debugMask, GT_7CLASS, ++ "Base Image is already loaded. " ++ "Unloading it...\n"); ++ hMgr->fxns.unloadFxn(hMgr->baseLib, &hMgr->attrs); ++ hMgr->fLoaded = false; ++ } ++ hMgr->fxns.closeFxn(hMgr->baseLib); ++ hMgr->baseLib = NULL; ++ } else { ++ GT_0trace(COD_debugMask, GT_1CLASS, ++ "COD_OpenBase: Opening the base image ...\n"); ++ } ++ status = hMgr->fxns.openFxn(hMgr->target, pszCoffPath, flags, &lib); ++ if (DSP_FAILED(status)) { ++ GT_0trace(COD_debugMask, GT_7CLASS, ++ "COD_OpenBase: COD Open failed\n"); ++ } else { ++ /* hang onto the library for subsequent sym table usage */ ++ hMgr->baseLib = lib; ++ strncpy(hMgr->szZLFile, pszCoffPath, COD_MAXPATHLENGTH - 1); ++ hMgr->szZLFile[COD_MAXPATHLENGTH - 1] = '\0'; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== COD_ReadSection ======== ++ * Purpose: ++ * Retrieve the content of a code section given the section name. ++ */ ++DSP_STATUS COD_ReadSection(struct COD_LIBRARYOBJ *lib, IN char *pstrSect, ++ OUT char *pstrContent, IN u32 cContentSize) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(lib != NULL); ++ DBC_Require(IsValid(lib->hCodMgr)); ++ DBC_Require(pstrSect != NULL); ++ DBC_Require(pstrContent != NULL); ++ ++ GT_4trace(COD_debugMask, GT_ENTER, "Entered COD_ReadSection Args: 0x%x," ++ " 0x%x, 0x%x, 0x%x\n", lib, pstrSect, pstrContent, ++ cContentSize); ++ ++ if (lib != NULL) { ++ status = lib->hCodMgr->fxns.readSectFxn(lib->dbllLib, pstrSect, ++ pstrContent, ++ cContentSize); ++ if (DSP_FAILED(status)) { ++ GT_1trace(COD_debugMask, GT_7CLASS, ++ "COD_ReadSection failed: 0x%lx\n", status); ++ } ++ } else { ++ status = COD_E_NOSYMBOLSLOADED; ++ GT_0trace(COD_debugMask, GT_7CLASS, ++ "COD_ReadSection: No Symbols loaded\n"); ++ } ++ return status; ++} ++ ++/* ++ * ======== NoOp ======== ++ * Purpose: ++ * No Operation. ++ * ++ */ ++static bool NoOp(void) ++{ ++ return true; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbl.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dbl.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbl.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dbl.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1385 @@ ++/* ++ * dbl.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbl.c ======== ++ * Dynamic BOF Loader library. Contains functions related to ++ * loading and unloading symbols/code/data on DSP. ++ * Also contains other support functions. ++ * ++ *! Revision History ++ *! ================ ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 24-May-2002 jeh Free DCD sects in DBL_close(). ++ *! 19-Mar-2002 jeh Changes made to match dynamic loader (dbll.c): Pass ++ *! DBL_Library to DBL_getAddr() instead of DBL_Target, ++ *! eliminate scope param, use DBL_Symbol. Pass attrs to ++ *! DBL_load(), DBL_unload(). ++ *! 20-Nov-2001 jeh Removed DBL_loadArgs(). ++ *! 07-Sep-2001 jeh Added overlay support. ++ *! 31-Jul-2001 jeh Include windows.h. ++ *! 06-Jun-2001 jeh Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++#define DBL_TARGSIGNATURE 0x544c4244 /* "TLBD" */ ++#define DBL_LIBSIGNATURE 0x4c4c4244 /* "LLBD" */ ++ ++#define C54TARG 0 ++#define C55TARG 1 ++#define NUMTARGS 2 ++ ++#define C54MAGIC 0x98 /* Magic number for TI C54 COF */ ++#define C55MAGIC 0x9c /* Magic number for LEAD3 (C55) COF */ ++ ++/* Three task phases */ ++#define CREATEPHASE 0 ++#define DELETEPHASE 1 ++#define EXECUTEPHASE 2 ++#define NONE 3 /* For overlay section with phase not specified */ ++ ++/* Default load buffer size */ ++#define LOADBUFSIZE 0x800 ++ ++#define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \ ++ (((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF)) ++ ++#define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF)) ++ ++/* ++ * Macros for accessing the following types of overlay data within a ++ * structure of type OvlyData: ++ * - Overlay data not associated with a particular phase ++ * - Create phase overlay data ++ * - Delete phase overlay data ++ * - Execute phase overlay data ++ */ ++#define numOtherSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numOtherSects) ++#define numCreateSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numCreateSects) ++#define numDeleteSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numDeleteSects) ++#define numExecuteSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numExecuteSects) ++#define otherOffset(pOvlyData) 0 ++#define createOffset(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numOtherSects) ++#define deleteOffset(pOvlyData) (createOffset(pOvlyData) + \ ++ (pOvlyData->hdr.dbofHdr.numCreateSects)) ++#define executeOffset(pOvlyData) (deleteOffset(pOvlyData) + \ ++ (pOvlyData->hdr.dbofHdr.numDeleteSects)) ++/* ++ * ======== OvlyHdr ======== ++ */ ++struct OvlyHdr { ++ struct DBOF_OvlySectHdr dbofHdr; ++ char *pName; /* Name of overlay section */ ++ u16 createRef; /* Reference count for create phase */ ++ u16 deleteRef; /* Reference count for delete phase */ ++ u16 executeRef; /* Execute phase ref count */ ++ u16 otherRef; /* Unspecified phase ref count */ ++} ; ++ ++/* ++ * ======== OvlyData ======== ++ */ ++struct OvlyData { ++ struct OvlyHdr hdr; ++ struct DBOF_OvlySectData data[1]; ++} ; ++ ++/* ++ * ======== Symbol ======== ++ */ ++struct Symbol { ++ struct DBL_Symbol sym; ++ char *pSymName; ++}; ++ ++/* ++ * ======== DCDSect ======== ++ */ ++struct DCDSect { ++ struct DBOF_DCDSectHdr sectHdr; ++ char *pData; ++} ; ++ ++/* ++ * ======== DBL_TargetObj ======== ++ */ ++struct DBL_TargetObj { ++ u32 dwSignature; /* For object validation */ ++ struct DBL_Attrs dblAttrs; /* file read, write, etc. functions */ ++ char *pBuf; /* Load buffer */ ++}; ++ ++/* ++ * ======== TargetInfo ======== ++ */ ++struct TargetInfo { ++ u16 dspType; /* eg, C54TARG, C55TARG */ ++ u32 magic; /* COFF magic number, identifies target type */ ++ u16 wordSize; /* Size of a DSP word */ ++ u16 mauSize; /* Size of minimum addressable unit */ ++ u16 charSize; /* For C55x, mausize = 1, but charsize = 2 */ ++} ; ++ ++/* ++ * ======== DBL_LibraryObj ======== ++ * Represents a library loaded on a target. ++ */ ++struct DBL_LibraryObj { ++ u32 dwSignature; /* For object validation */ ++ struct DBL_TargetObj *pTarget; /* Target for this library */ ++ struct KFILE_FileObj *file; /* DBOF file handle */ ++ bool byteSwapped; /* Are bytes swapped? */ ++ struct DBOF_FileHdr fileHdr; /* Header of DBOF file */ ++ u16 nSymbols; /* Number of DSP/Bridge symbols */ ++ struct Symbol *symbols; /* Table of DSP/Bridge symbols */ ++ u16 nDCDSects; /* Number of DCD sections */ ++ u16 nOvlySects; /* Number of overlay nodes */ ++ struct DCDSect *dcdSects; /* DCD section data */ ++ struct OvlyData **ppOvlyData; /* Array of overlay section data */ ++ struct TargetInfo *pTargetInfo; /* Entry in targetTab[] below */ ++} ; ++ ++#if GT_TRACE ++static struct GT_Mask DBL_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++static u32 cRefs; /* module reference count */ ++ ++static u32 magicTab[NUMTARGS] = { C54MAGIC, C55MAGIC }; ++ ++static struct TargetInfo targetTab[] = { ++ /* targ magic wordsize mausize charsize */ ++ {C54TARG, C54MAGIC, 2, 2, 2}, /* C54 */ ++ {C55TARG, C55MAGIC, 2, 1, 2}, /* C55 */ ++}; ++ ++static void freeSects(struct DBL_TargetObj *dbl, struct OvlyData *pOvlyData, ++ s32 offset, s32 nSects); ++static DSP_STATUS loadSect(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib); ++static DSP_STATUS readDCDSects(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib); ++static DSP_STATUS readHeader(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib); ++static DSP_STATUS readOvlySects(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib); ++static DSP_STATUS readSymbols(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib); ++ ++/* ++ * ======== DBL_close ======== ++ * Purpose: ++ * Close library opened with DBL_open. ++ */ ++void DBL_close(struct DBL_LibraryObj *lib) ++{ ++ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; ++ u16 i; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); ++ ++ GT_1trace(DBL_debugMask, GT_ENTER, "DBL_close: lib: 0x%x\n", lib); ++ ++ /* Free symbols */ ++ if (pdblLib->symbols) { ++ for (i = 0; i < pdblLib->nSymbols; i++) { ++ if (pdblLib->symbols[i].pSymName) ++ MEM_Free(pdblLib->symbols[i].pSymName); ++ ++ } ++ MEM_Free(pdblLib->symbols); ++ } ++ ++ /* Free DCD sects */ ++ if (pdblLib->dcdSects) { ++ for (i = 0; i < pdblLib->nDCDSects; i++) { ++ if (pdblLib->dcdSects[i].pData) ++ MEM_Free(pdblLib->dcdSects[i].pData); ++ ++ } ++ MEM_Free(pdblLib->dcdSects); ++ } ++ ++ /* Free overlay sects */ ++ if (pdblLib->ppOvlyData) { ++ for (i = 0; i < pdblLib->nOvlySects; i++) { ++ if (pdblLib->ppOvlyData[i]) { ++ if (pdblLib->ppOvlyData[i]->hdr.pName) { ++ MEM_Free(pdblLib->ppOvlyData[i]-> ++ hdr.pName); ++ } ++ MEM_Free(pdblLib->ppOvlyData[i]); ++ } ++ } ++ MEM_Free(pdblLib->ppOvlyData); ++ } ++ ++ /* Close the file */ ++ if (pdblLib->file) ++ (*pdblLib->pTarget->dblAttrs.fclose) (pdblLib->file); ++ ++ ++ MEM_FreeObject(pdblLib); ++} ++ ++/* ++ * ======== DBL_create ======== ++ * Purpose: ++ * Create a target object by specifying the alloc, free, and ++ * write functions for the target. ++ */ ++DSP_STATUS DBL_create(struct DBL_TargetObj **pTarget, struct DBL_Attrs *pAttrs) ++{ ++ struct DBL_TargetObj *pdblTarget = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pAttrs != NULL); ++ DBC_Require(pTarget != NULL); ++ ++ GT_2trace(DBL_debugMask, GT_ENTER, ++ "DBL_create: pTarget: 0x%x pAttrs: 0x%x\n", ++ pTarget, pAttrs); ++ /* Allocate DBL target object */ ++ MEM_AllocObject(pdblTarget, struct DBL_TargetObj, DBL_TARGSIGNATURE); ++ if (pdblTarget == NULL) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "DBL_create: Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } else { ++ pdblTarget->dblAttrs = *pAttrs; ++ /* Allocate buffer for loading target */ ++ pdblTarget->pBuf = MEM_Calloc(LOADBUFSIZE, MEM_PAGED); ++ if (pdblTarget->pBuf == NULL) ++ status = DSP_EMEMORY; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ *pTarget = pdblTarget; ++ } else { ++ *pTarget = NULL; ++ if (pdblTarget) ++ DBL_delete(pdblTarget); ++ ++ } ++ DBC_Ensure(DSP_SUCCEEDED(status) && ++ ((MEM_IsValidHandle((*pTarget), DBL_TARGSIGNATURE)) || ++ (DSP_FAILED(status) && *pTarget == NULL))); ++ return status; ++} ++ ++/* ++ * ======== DBL_delete ======== ++ * Purpose: ++ * Delete target object and free resources for any loaded libraries. ++ */ ++void DBL_delete(struct DBL_TargetObj *target) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); ++ ++ GT_1trace(DBL_debugMask, GT_ENTER, ++ "DBL_delete: target: 0x%x\n", target); ++ ++ if (target->pBuf) ++ MEM_Free(target->pBuf); ++ ++ MEM_FreeObject(target); ++} ++ ++/* ++ * ======== DBL_exit ======== ++ * Purpose ++ * Discontinue usage of DBL module. ++ */ ++void DBL_exit() ++{ ++ DBC_Require(cRefs > 0); ++ cRefs--; ++ GT_1trace(DBL_debugMask, GT_5CLASS, ++ "DBL_exit() ref count: 0x%x\n", cRefs); ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== DBL_getAddr ======== ++ * Purpose: ++ * Get address of name in the specified library. ++ */ ++bool DBL_getAddr(struct DBL_LibraryObj *lib, char *name, ++ struct DBL_Symbol **ppSym) ++{ ++ bool retVal = false; ++ struct Symbol *symbol; ++ u16 i; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); ++ DBC_Require(name != NULL); ++ DBC_Require(ppSym != NULL); ++ ++ GT_3trace(DBL_debugMask, GT_ENTER, ++ "DBL_getAddr: libt: 0x%x name: %s pAddr: " ++ "0x%x\n", lib, name, ppSym); ++ for (i = 0; i < lib->nSymbols; i++) { ++ symbol = &lib->symbols[i]; ++ if (CSL_Strcmp(name, symbol->pSymName) == 0) { ++ /* Found it */ ++ *ppSym = &lib->symbols[i].sym; ++ retVal = true; ++ break; ++ } ++ } ++ return retVal; ++} ++ ++/* ++ * ======== DBL_getAttrs ======== ++ * Purpose: ++ * Retrieve the attributes of the target. ++ */ ++void DBL_getAttrs(struct DBL_TargetObj *target, struct DBL_Attrs *pAttrs) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); ++ DBC_Require(pAttrs != NULL); ++ GT_2trace(DBL_debugMask, GT_ENTER, "DBL_getAttrs: target: 0x%x pAttrs: " ++ "0x%x\n", target, pAttrs); ++ *pAttrs = target->dblAttrs; ++} ++ ++/* ++ * ======== DBL_getCAddr ======== ++ * Purpose: ++ * Get address of "C" name in the specified library. ++ */ ++bool DBL_getCAddr(struct DBL_LibraryObj *lib, char *name, ++ struct DBL_Symbol **ppSym) ++{ ++ bool retVal = false; ++ struct Symbol *symbol; ++ u16 i; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); ++ DBC_Require(name != NULL); ++ DBC_Require(ppSym != NULL); ++ ++ GT_3trace(DBL_debugMask, GT_ENTER, ++ "DBL_getCAddr: target: 0x%x name:%s pAddr:" ++ " 0x%x\n", lib, name, ppSym); ++ for (i = 0; i < lib->nSymbols; i++) { ++ symbol = &lib->symbols[i]; ++ if ((CSL_Strcmp(name, symbol->pSymName) == 0) || ++ (CSL_Strcmp(name, symbol->pSymName + 1) == 0 && ++ symbol->pSymName[0] == '_')) { ++ /* Found it */ ++ *ppSym = &lib->symbols[i].sym; ++ retVal = true; ++ break; ++ } ++ } ++ return retVal; ++} ++ ++/* ++ * ======== DBL_getEntry ======== ++ * Purpose: ++ * Get program entry point. ++ * ++ */ ++bool DBL_getEntry(struct DBL_LibraryObj *lib, u32 *pEntry) ++{ ++ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); ++ DBC_Require(pEntry != NULL); ++ ++ GT_2trace(DBL_debugMask, GT_ENTER, ++ "DBL_getEntry: lib: 0x%x pEntry: 0x%x\n", lib, pEntry); ++ *pEntry = pdblLib->fileHdr.entry; ++ ++ return true; ++} ++ ++/* ++ * ======== DBL_getSect ======== ++ * Purpose: ++ * Get address and size of a named section. ++ */ ++DSP_STATUS DBL_getSect(struct DBL_LibraryObj *lib, char *name, u32 *pAddr, ++ u32 *pSize) ++{ ++ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; ++ u16 i; ++ DSP_STATUS status = DSP_ENOSECT; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(name != NULL); ++ DBC_Require(pAddr != NULL); ++ DBC_Require(pSize != NULL); ++ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); ++ ++ GT_4trace(DBL_debugMask, GT_ENTER, ++ "DBL_getSect: lib: 0x%x name: %s pAddr:" ++ " 0x%x pSize: 0x%x\n", lib, name, pAddr, pSize); ++ ++ /* ++ * Check for DCD and overlay sections. Overlay loader uses DBL_getSect ++ * to determine whether or not a node has overlay sections. ++ * DCD section names begin with '.' ++ */ ++ if (name[0] == '.') { ++ /* Get DCD section size (address is 0, since it's a NOLOAD). */ ++ for (i = 0; i < pdblLib->nDCDSects; i++) { ++ if (CSL_Strcmp(pdblLib->dcdSects[i].sectHdr.name, ++ name) == 0) { ++ *pAddr = 0; ++ *pSize = pdblLib->dcdSects[i].sectHdr.size * ++ pdblLib->pTargetInfo->mauSize; ++ status = DSP_SOK; ++ break; ++ } ++ } ++ } else { ++ /* Check for overlay section */ ++ for (i = 0; i < pdblLib->nOvlySects; i++) { ++ if (CSL_Strcmp(pdblLib->ppOvlyData[i]->hdr.pName, ++ name) == 0) { ++ /* Address and size are meaningless */ ++ *pAddr = 0; ++ *pSize = 0; ++ status = DSP_SOK; ++ break; ++ } ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DBL_init ======== ++ * Purpose: ++ * Initialize DBL module. ++ */ ++bool DBL_init(void) ++{ ++ bool retVal = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!DBL_debugMask.flags); ++ GT_create(&DBL_debugMask, "BL"); /* "BL" for dBL */ ++ ++ } ++ ++ if (retVal) ++ cRefs++; ++ ++ ++ GT_1trace(DBL_debugMask, GT_5CLASS, "DBL_init(), ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((retVal && (cRefs > 0)) || (!retVal && (cRefs >= 0))); ++ ++ return retVal; ++} ++ ++/* ++ * ======== DBL_load ======== ++ * Purpose: ++ * Add symbols/code/data defined in file to that already present ++ * on the target. ++ */ ++DSP_STATUS DBL_load(struct DBL_LibraryObj *lib, DBL_Flags flags, ++ struct DBL_Attrs *attrs, u32 *pEntry) ++{ ++ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; ++ struct DBL_TargetObj *dbl; ++ u16 i; ++ u16 nSects; ++ DSP_STATUS status = DSP_EFAIL; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); ++ DBC_Require(pEntry != NULL); ++ DBC_Require(attrs != NULL); ++ ++ GT_4trace(DBL_debugMask, GT_ENTER, "DBL_load: lib: 0x%x flags: " ++ "0x%x attrs: 0x%x pEntry: 0x%x\n", lib, flags, attrs, pEntry); ++ ++ dbl = pdblLib->pTarget; ++ *pEntry = pdblLib->fileHdr.entry; ++ nSects = pdblLib->fileHdr.numSects; ++ dbl->dblAttrs = *attrs; ++ ++ for (i = 0; i < nSects; i++) { ++ /* Load the section at the current file offset */ ++ status = loadSect(dbl, lib); ++ if (DSP_FAILED(status)) ++ break; ++ ++ } ++ ++ /* Done with file, we can close it */ ++ if (pdblLib->file) { ++ (*pdblLib->pTarget->dblAttrs.fclose) (pdblLib->file); ++ pdblLib->file = NULL; ++ } ++ return status; ++} ++ ++/* ++ * ======== DBL_loadSect ======== ++ * Purpose: ++ * Load a named section from an library (for overlay support). ++ */ ++DSP_STATUS DBL_loadSect(struct DBL_LibraryObj *lib, char *sectName, ++ struct DBL_Attrs *attrs) ++{ ++ struct DBL_TargetObj *dbl; ++ s32 i; ++ s32 phase; ++ s32 offset = -1; ++ s32 nSects = -1; ++ s32 allocdSects = 0; ++ u32 loadAddr; ++ u32 runAddr; ++ u32 size; ++ u32 space; ++ u32 ulBytes; ++ u16 mauSize; ++ u16 wordSize; ++ u16 *phaseRef = NULL; ++ u16 *otherRef = NULL; ++ char *name = NULL; ++ struct OvlyData *pOvlyData; ++ DSP_STATUS status = DSP_ENOSECT; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); ++ DBC_Require(sectName != NULL); ++ DBC_Require(attrs != NULL); ++ DBC_Require(attrs->write != NULL); ++ GT_3trace(DBL_debugMask, GT_ENTER, ++ "DBL_loadSect: lib: 0x%x sectName: %s " ++ "attrs: 0x%x\n", lib, sectName, attrs); ++ dbl = lib->pTarget; ++ mauSize = lib->pTargetInfo->mauSize; ++ wordSize = lib->pTargetInfo->wordSize; ++ /* Check for match of sect name in overlay table */ ++ for (i = 0; i < lib->nOvlySects; i++) { ++ name = lib->ppOvlyData[i]->hdr.pName; ++ if (!CSL_Strncmp(name, sectName, CSL_Strlen(name))) { ++ /* Match found */ ++ status = DSP_SOK; ++ break; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ DBC_Assert(i < lib->nOvlySects); ++ pOvlyData = lib->ppOvlyData[i]; ++ /* ++ * If node overlay, phase will be encoded in name. If not node ++ * overlay, set phase to NONE. ++ */ ++ phase = (CSL_Strcmp(name, sectName)) ? ++ CSL_Atoi(sectName + CSL_Strlen(sectName) - 1) : NONE; ++ /* Get reference count of node phase to be loaded, offset into ++ * overlay data array, and number of sections to overlay. */ ++ switch (phase) { ++ case NONE: ++ /* Not a node overlay */ ++ phaseRef = &pOvlyData->hdr.otherRef; ++ nSects = numOtherSects(pOvlyData); ++ offset = otherOffset(pOvlyData); ++ break; ++ case CREATEPHASE: ++ phaseRef = &pOvlyData->hdr.createRef; ++ otherRef = &pOvlyData->hdr.otherRef; ++ if (*otherRef) { ++ /* The overlay sections where node phase was ++ * not specified, have already been loaded. */ ++ nSects = numCreateSects(pOvlyData); ++ offset = createOffset(pOvlyData); ++ } else { ++ /* Overlay sections where node phase was not ++ * specified get loaded at create time, along ++ * with create sects. */ ++ nSects = numCreateSects(pOvlyData) + ++ numOtherSects(pOvlyData); ++ offset = otherOffset(pOvlyData); ++ } ++ break; ++ case DELETEPHASE: ++ phaseRef = &pOvlyData->hdr.deleteRef; ++ nSects = numDeleteSects(pOvlyData); ++ offset = deleteOffset(pOvlyData); ++ break; ++ case EXECUTEPHASE: ++ phaseRef = &pOvlyData->hdr.executeRef; ++ nSects = numExecuteSects(pOvlyData); ++ offset = executeOffset(pOvlyData); ++ break; ++ default: ++ /* ERROR */ ++ DBC_Assert(false); ++ break; ++ } ++ /* Do overlay if reference count is 0 */ ++ if (!(*phaseRef)) { ++ /* "Allocate" all sections */ ++ for (i = 0; i < nSects; i++) { ++ runAddr = pOvlyData->data[offset + i].runAddr; ++ size = pOvlyData->data[offset + i].size; ++ space = pOvlyData->data[offset + i].page; ++ status = (dbl->dblAttrs.alloc)(dbl->dblAttrs. ++ rmmHandle, space, size, 0, ++ &runAddr, true); ++ if (DSP_FAILED(status)) ++ break; ++ ++ allocdSects++; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Load sections */ ++ for (i = 0; i < nSects; i++) { ++ loadAddr = pOvlyData->data[offset + i]. ++ loadAddr; ++ runAddr = pOvlyData->data[offset + i]. ++ runAddr; ++ size = pOvlyData->data[offset + i]. ++ size; ++ space = pOvlyData->data[offset + i]. ++ page; ++ /* Convert to word address, call ++ * write function */ ++ loadAddr /= (wordSize / mauSize); ++ runAddr /= (wordSize / mauSize); ++ ulBytes = size * mauSize; ++ if ((*attrs->write)(attrs->wHandle, ++ runAddr, (void *)loadAddr, ulBytes, ++ space) != ulBytes) { ++ GT_0trace(DBL_debugMask, ++ GT_6CLASS, ++ "DBL_loadSect: write" ++ " failed\n"); ++ status = DSP_EFWRITE; ++ break; ++ } ++ } ++ } ++ /* Free sections on failure */ ++ if (DSP_FAILED(status)) ++ freeSects(dbl, pOvlyData, offset, allocdSects); ++ ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Increment reference counts */ ++ if (otherRef) ++ *otherRef = *otherRef + 1; ++ ++ *phaseRef = *phaseRef + 1; ++ } ++ return status; ++} ++ ++/* ++ * ======== DBL_open ======== ++ * Purpose: ++ * DBL_open() returns a library handle that can be used to ++ * load/unload the symbols/code/data via DBL_load()/DBL_unload(). ++ */ ++DSP_STATUS DBL_open(struct DBL_TargetObj *target, char *file, DBL_Flags flags, ++ struct DBL_LibraryObj **pLib) ++{ ++ struct DBL_LibraryObj *pdblLib = NULL; ++ u16 nSymbols; ++ u16 nDCDSects; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); ++ DBC_Require(target->dblAttrs.fopen != NULL); ++ DBC_Require(file != NULL); ++ DBC_Require(pLib != NULL); ++ ++ GT_3trace(DBL_debugMask, GT_ENTER, "DBL_open: target: 0x%x file: %s " ++ "pLib: 0x%x\n", target, file, pLib); ++ /* Allocate DBL library object */ ++ MEM_AllocObject(pdblLib, struct DBL_LibraryObj, DBL_LIBSIGNATURE); ++ if (pdblLib == NULL) ++ status = DSP_EMEMORY; ++ ++ /* Open the file */ ++ if (DSP_SUCCEEDED(status)) { ++ pdblLib->pTarget = target; ++ pdblLib->file = (*target->dblAttrs.fopen)(file, "rb"); ++ if (pdblLib->file == NULL) ++ status = DSP_EFOPEN; ++ ++ } ++ /* Read file header */ ++ if (DSP_SUCCEEDED(status)) { ++ status = readHeader(target, pdblLib); ++ if (DSP_FAILED(status)) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "DBL_open(): Failed to read file header\n"); ++ } ++ } ++ /* Allocate symbol table */ ++ if (DSP_SUCCEEDED(status)) { ++ nSymbols = pdblLib->nSymbols = pdblLib->fileHdr.numSymbols; ++ pdblLib->symbols = MEM_Calloc(nSymbols * sizeof(struct Symbol), ++ MEM_PAGED); ++ if (pdblLib->symbols == NULL) ++ status = DSP_EMEMORY; ++ ++ } ++ /* Read all the symbols */ ++ if (DSP_SUCCEEDED(status)) { ++ status = readSymbols(target, pdblLib); ++ if (DSP_FAILED(status)) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "DBL_open(): Failed to read symbols\n"); ++ } ++ } ++ /* Allocate DCD sect table */ ++ if (DSP_SUCCEEDED(status)) { ++ nDCDSects = pdblLib->nDCDSects = pdblLib->fileHdr.numDCDSects; ++ pdblLib->dcdSects = MEM_Calloc(nDCDSects * ++ sizeof(struct DCDSect), MEM_PAGED); ++ if (pdblLib->dcdSects == NULL) ++ status = DSP_EMEMORY; ++ ++ } ++ /* Read DCD sections */ ++ if (DSP_SUCCEEDED(status)) { ++ status = readDCDSects(target, pdblLib); ++ if (DSP_FAILED(status)) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "DBL_open(): Failed to read DCD sections\n"); ++ } ++ } ++ /* Read overlay sections */ ++ if (DSP_SUCCEEDED(status)) { ++ status = readOvlySects(target, pdblLib); ++ if (DSP_FAILED(status)) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "DBL_open(): Failed to read " ++ "overlay sections\n"); ++ } ++ } ++ if (DSP_FAILED(status)) { ++ *pLib = NULL; ++ if (pdblLib != NULL) ++ DBL_close((struct DBL_LibraryObj *) pdblLib); ++ ++ } else { ++ *pLib = pdblLib; ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && ++ (MEM_IsValidHandle((*pLib), DBL_LIBSIGNATURE))) || ++ (DSP_FAILED(status) && *pLib == NULL)); ++ return status; ++} ++ ++/* ++ * ======== DBL_readSect ======== ++ * Purpose: ++ * Read COFF section into a character buffer. ++ */ ++DSP_STATUS DBL_readSect(struct DBL_LibraryObj *lib, char *name, char *pContent, ++ u32 size) ++{ ++ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; ++ u16 i; ++ u32 mauSize; ++ u32 max; ++ DSP_STATUS status = DSP_ENOSECT; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); ++ DBC_Require(name != NULL); ++ DBC_Require(pContent != NULL); ++ DBC_Require(size != 0); ++ GT_4trace(DBL_debugMask, GT_ENTER, "DBL_readSect: lib: 0x%x name: %s " ++ "pContent: 0x%x size: 0x%x\n", lib, name, pContent, size); ++ ++ mauSize = pdblLib->pTargetInfo->mauSize; ++ ++ /* Attempt to find match with DCD section names. */ ++ for (i = 0; i < pdblLib->nDCDSects; i++) { ++ if (CSL_Strcmp(pdblLib->dcdSects[i].sectHdr.name, name) == 0) { ++ /* Match found */ ++ max = pdblLib->dcdSects[i].sectHdr.size * mauSize; ++ max = (max > size) ? size : max; ++ memcpy(pContent, pdblLib->dcdSects[i].pData, max); ++ status = DSP_SOK; ++ break; ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DBL_setAttrs ======== ++ * Purpose: ++ * Set the attributes of the target. ++ */ ++void DBL_setAttrs(struct DBL_TargetObj *target, struct DBL_Attrs *pAttrs) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); ++ DBC_Require(pAttrs != NULL); ++ ++ GT_2trace(DBL_debugMask, GT_ENTER, "DBL_setAttrs: target: 0x%x pAttrs: " ++ "0x%x\n", target, pAttrs); ++ ++ target->dblAttrs = *pAttrs; ++} ++ ++/* ++ * ======== DBL_unload ======== ++ * Purpose: ++ * Remove the symbols/code/data corresponding to the library lib. ++ */ ++void DBL_unload(struct DBL_LibraryObj *lib, struct DBL_Attrs *attrs) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); ++ ++ GT_1trace(DBL_debugMask, GT_ENTER, "DBL_unload: lib: 0x%x\n", lib); ++ ++ /* Nothing to do for static loading */ ++} ++ ++/* ++ * ======== DBL_unloadSect ======== ++ * Purpose: ++ * Unload a named section from an library (for overlay support). ++ */ ++DSP_STATUS DBL_unloadSect(struct DBL_LibraryObj *lib, char *sectName, ++ struct DBL_Attrs *attrs) ++{ ++ struct DBL_TargetObj *dbl; ++ s32 i; ++ s32 phase; ++ s32 offset = -1; ++ s32 nSects = -1; ++ u16 *phaseRef = NULL; ++ u16 *otherRef = NULL; ++ char *pName = NULL; ++ struct OvlyData *pOvlyData; ++ DSP_STATUS status = DSP_ENOSECT; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); ++ DBC_Require(sectName != NULL); ++ ++ GT_2trace(DBL_debugMask, GT_ENTER, ++ "DBL_unloadSect: lib: 0x%x sectName: %s\n", lib, sectName); ++ dbl = lib->pTarget; ++ /* Check for match of sect name in overlay table */ ++ for (i = 0; i < lib->nOvlySects; i++) { ++ pName = lib->ppOvlyData[i]->hdr.pName; ++ if (!CSL_Strncmp(pName, sectName, CSL_Strlen(pName))) { ++ /* Match found */ ++ status = DSP_SOK; ++ break; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ DBC_Assert(i < lib->nOvlySects); ++ pOvlyData = lib->ppOvlyData[i]; ++ /* If node overlay, phase will be encoded in name. */ ++ phase = (CSL_Strcmp(pName, sectName)) ? ++ CSL_Atoi(sectName + CSL_Strlen(sectName) - 1) : NONE; ++ switch (phase) { ++ case NONE: ++ nSects = numOtherSects(pOvlyData); ++ phaseRef = &pOvlyData->hdr.otherRef; ++ offset = otherOffset(pOvlyData); ++ break; ++ case CREATEPHASE: ++ nSects = numCreateSects(pOvlyData); ++ offset = createOffset(pOvlyData); ++ phaseRef = &pOvlyData->hdr.createRef; ++ break; ++ case DELETEPHASE: ++ nSects = numDeleteSects(pOvlyData); ++ offset = deleteOffset(pOvlyData); ++ phaseRef = &pOvlyData->hdr.deleteRef; ++ otherRef = &pOvlyData->hdr.otherRef; ++ break; ++ case EXECUTEPHASE: ++ nSects = numExecuteSects(pOvlyData); ++ offset = executeOffset(pOvlyData); ++ phaseRef = &pOvlyData->hdr.executeRef; ++ break; ++ default: ++ /* ERROR */ ++ DBC_Assert(false); ++ break; ++ } ++ if (*phaseRef) { ++ *phaseRef = *phaseRef - 1; ++ if (*phaseRef == 0) { ++ /* Unload overlay sections for phase */ ++ freeSects(dbl, pOvlyData, offset, nSects); ++ } ++ if (phase == DELETEPHASE) { ++ DBC_Assert(*otherRef); ++ *otherRef = *otherRef - 1; ++ if (*otherRef == 0) { ++ /* Unload other overlay sections */ ++ nSects = numOtherSects(pOvlyData); ++ offset = otherOffset(pOvlyData); ++ freeSects(dbl, pOvlyData, offset, ++ nSects); ++ } ++ } ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== freeSects ======== ++ * Purpose: ++ * Free section ++ */ ++static void freeSects(struct DBL_TargetObj *dbl, struct OvlyData *pOvlyData, ++ s32 offset, s32 nSects) ++{ ++ u32 runAddr; ++ u32 size; ++ u32 space; ++ s32 i; ++ ++ for (i = 0; i < nSects; i++) { ++ runAddr = pOvlyData->data[offset + i].runAddr; ++ size = pOvlyData->data[offset + i].size; ++ space = pOvlyData->data[offset + i].page; ++ if (!(dbl->dblAttrs.free) ++ (dbl->dblAttrs.rmmHandle, space, runAddr, size, true)) { ++ /* ++ * Free function will not fail for overlay, unless ++ * address passed in is bad. ++ */ ++ DBC_Assert(false); ++ } ++ } ++} ++ ++/* ++ * ======== loadSect ======== ++ * Purpose: ++ * Load section to target ++ */ ++static DSP_STATUS loadSect(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib) ++{ ++ struct DBOF_SectHdr sectHdr; ++ char *pBuf; ++ struct KFILE_FileObj *file; ++ u32 space; ++ u32 addr; ++ u32 total; ++ u32 nWords = 0; ++ u32 nBytes = 0; ++ u16 mauSize; ++ u32 bufSize; ++ DSP_STATUS status = DSP_SOK; ++ ++ file = pdblLib->file; ++ mauSize = pdblLib->pTargetInfo->mauSize; ++ bufSize = LOADBUFSIZE / mauSize; ++ pBuf = dbl->pBuf; ++ ++ /* Read the section header */ ++ if ((*dbl->dblAttrs.fread)(§Hdr, sizeof(struct DBOF_SectHdr), ++ 1, file) != 1) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to read DCD sect header\n"); ++ status = DSP_EFREAD; ++ } else { ++ if (pdblLib->byteSwapped) { ++ sectHdr.size = SWAPLONG(sectHdr.size); ++ sectHdr.addr = SWAPLONG(sectHdr.addr); ++ sectHdr.page = SWAPWORD(sectHdr.page); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ addr = sectHdr.addr; ++ space = sectHdr.page; ++ for (total = sectHdr.size; total > 0; total -= nWords) { ++ nWords = min(total, bufSize); ++ nBytes = nWords * mauSize; ++ /* Read section data */ ++ if ((*dbl->dblAttrs.fread)(pBuf, nBytes, 1, ++ file) != 1) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to read DCD sect header\n"); ++ status = DSP_EFREAD; ++ break; ++ } ++ /* Write section to target */ ++ if (!(*dbl->dblAttrs.write)(dbl->dblAttrs.wHandle, ++ addr, pBuf, nBytes, space)) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to write section data\n"); ++ status = DSP_EFWRITE; ++ break; ++ } ++ addr += nWords; ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== readDCDSects ======== ++ * Purpose: ++ * Read DCD sections. ++ */ ++static DSP_STATUS readDCDSects(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib) ++{ ++ struct DBOF_DCDSectHdr *pSectHdr; ++ struct DCDSect *pSect; ++ struct KFILE_FileObj *file; ++ u16 nSects; ++ u16 i; ++ u16 mauSize; ++ DSP_STATUS status = DSP_SOK; ++ ++ file = pdblLib->file; ++ mauSize = pdblLib->pTargetInfo->mauSize; ++ nSects = pdblLib->fileHdr.numDCDSects; ++ for (i = 0; i < nSects; i++) { ++ pSect = &pdblLib->dcdSects[i]; ++ pSectHdr = &pdblLib->dcdSects[i].sectHdr; ++ /* Read sect header */ ++ if ((*dbl->dblAttrs.fread)(pSectHdr, ++ sizeof(struct DBOF_DCDSectHdr), 1, file) != 1) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to read DCD sect header\n"); ++ status = DSP_EFREAD; ++ break; ++ } ++ if (pdblLib->byteSwapped) ++ pSectHdr->size = SWAPLONG(pSectHdr->size); ++ ++ pSect->pData = (char *)MEM_Calloc(pSectHdr->size * ++ mauSize, MEM_PAGED); ++ if (pSect->pData == NULL) { ++ GT_2trace(DBL_debugMask, GT_6CLASS, ++ "Memory allocation for sect %s " ++ "data failed: Size: 0x%lx\n", pSectHdr->name, ++ pSectHdr->size); ++ status = DSP_EMEMORY; ++ break; ++ } ++ /* Read DCD sect data */ ++ if ((*dbl->dblAttrs.fread)(pSect->pData, mauSize, ++ pSectHdr->size, file) != pSectHdr->size) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to read DCD sect data\n"); ++ status = DSP_EFREAD; ++ break; ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== readHeader ======== ++ * Purpose: ++ * Read Header. ++ */ ++static DSP_STATUS readHeader(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib) ++{ ++ struct KFILE_FileObj *file; ++ s32 i; ++ struct DBOF_FileHdr *pHdr; ++ u32 swapMagic; ++ DSP_STATUS status = DSP_SOK; ++ ++ pdblLib->byteSwapped = false; ++ file = pdblLib->file; ++ pHdr = &pdblLib->fileHdr; ++ if ((*dbl->dblAttrs.fread)(pHdr, sizeof(struct DBOF_FileHdr), 1, ++ file) != 1) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "readHeader: Failed to read file header\n"); ++ status = DSP_EFREAD; ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Determine if byte swapped */ ++ for (i = 0; i < NUMTARGS; i++) { ++ swapMagic = SWAPLONG(pHdr->magic); ++ if (pHdr->magic == magicTab[i] || swapMagic == ++ magicTab[i]) { ++ if (swapMagic == magicTab[i]) { ++ pdblLib->byteSwapped = true; ++ pHdr->magic = SWAPLONG(pHdr->magic); ++ pHdr->entry = SWAPLONG(pHdr->entry); ++ pHdr->symOffset = SWAPLONG(pHdr-> ++ symOffset); ++ pHdr->dcdSectOffset = SWAPLONG(pHdr-> ++ dcdSectOffset); ++ pHdr->loadSectOffset = SWAPLONG(pHdr-> ++ loadSectOffset); ++ pHdr->ovlySectOffset = SWAPLONG(pHdr-> ++ ovlySectOffset); ++ pHdr->numSymbols = SWAPWORD(pHdr-> ++ numSymbols); ++ pHdr->numDCDSects = SWAPWORD(pHdr-> ++ numDCDSects); ++ pHdr->numSects = SWAPWORD(pHdr-> ++ numSects); ++ pHdr->numOvlySects = SWAPWORD(pHdr-> ++ numOvlySects); ++ } ++ break; ++ } ++ } ++ if (i == NUMTARGS) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "readHeader: Failed to determine" ++ " target type\n"); ++ status = DSP_ECORRUPTFILE; ++ } else { ++ pdblLib->pTargetInfo = &targetTab[i]; ++ GT_1trace(DBL_debugMask, GT_ENTER, ++ "COF type: 0x%lx\n", pHdr->magic); ++ GT_1trace(DBL_debugMask, GT_ENTER, ++ "Entry point:0x%lx\n", pHdr->entry); ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== readOvlySects ======== ++ * Purpose: ++ * Read Overlay Sections ++ */ ++static DSP_STATUS readOvlySects(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib) ++{ ++ struct DBOF_OvlySectHdr hdr; ++ struct DBOF_OvlySectData *pData; ++ struct OvlyData *pOvlyData; ++ char *pName; ++ struct KFILE_FileObj *file; ++ u16 i, j; ++ u16 nSects; ++ u16 n; ++ DSP_STATUS status = DSP_SOK; ++ ++ pdblLib->nOvlySects = nSects = pdblLib->fileHdr.numOvlySects; ++ file = pdblLib->file; ++ if (nSects > 0) { ++ pdblLib->ppOvlyData = MEM_Calloc(nSects * sizeof(OvlyData *), ++ MEM_PAGED); ++ if (pdblLib->ppOvlyData == NULL) { ++ GT_0trace(DBL_debugMask, GT_7CLASS, ++ "Failed to allocatate overlay " ++ "data memory\n"); ++ status = DSP_EMEMORY; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Read overlay data for each node */ ++ for (i = 0; i < nSects; i++) { ++ /* Read overlay section header */ ++ if ((*dbl->dblAttrs.fread)(&hdr, ++ sizeof(struct DBOF_OvlySectHdr), 1, file) != 1) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to read overlay sect" ++ " header\n"); ++ status = DSP_EFREAD; ++ break; ++ } ++ if (pdblLib->byteSwapped) { ++ hdr.nameLen = SWAPWORD(hdr.nameLen); ++ hdr.numCreateSects = ++ SWAPWORD(hdr.numCreateSects); ++ hdr.numDeleteSects = ++ SWAPWORD(hdr.numDeleteSects); ++ hdr.numExecuteSects = ++ SWAPWORD(hdr.numExecuteSects); ++ hdr.numOtherSects = ++ SWAPWORD(hdr.numOtherSects); ++ hdr.resvd = SWAPWORD(hdr.resvd); ++ } ++ n = hdr.numCreateSects + hdr.numDeleteSects + ++ hdr.numExecuteSects + hdr.numOtherSects; ++ ++ /* Allocate memory for node's overlay data */ ++ pOvlyData = (struct OvlyData *)MEM_Calloc ++ (sizeof(struct OvlyHdr) + ++ n * sizeof(struct DBOF_OvlySectData), ++ MEM_PAGED); ++ if (pOvlyData == NULL) { ++ GT_0trace(DBL_debugMask, GT_7CLASS, ++ "Failed to allocatate ovlyay" ++ " data memory\n"); ++ status = DSP_EMEMORY; ++ break; ++ } ++ pOvlyData->hdr.dbofHdr = hdr; ++ pdblLib->ppOvlyData[i] = pOvlyData; ++ /* Allocate memory for section name */ ++ pName = (char *)MEM_Calloc(hdr.nameLen + 1, MEM_PAGED); ++ if (pName == NULL) { ++ GT_0trace(DBL_debugMask, GT_7CLASS, ++ "Failed to allocatate ovlyay" ++ " section name\n"); ++ status = DSP_EMEMORY; ++ break; ++ } ++ pOvlyData->hdr.pName = pName; ++ /* Read the overlay section name */ ++ if ((*dbl->dblAttrs.fread)(pName, sizeof(char), ++ hdr.nameLen, file) != hdr.nameLen) { ++ GT_0trace(DBL_debugMask, GT_7CLASS, ++ "readOvlySects: Unable to " ++ "read overlay name.\n"); ++ status = DSP_EFREAD; ++ break; ++ } ++ /* Read the overlay section data */ ++ pData = pOvlyData->data; ++ if ((*dbl->dblAttrs.fread)(pData, ++ sizeof(struct DBOF_OvlySectData), n, file) != n) { ++ GT_0trace(DBL_debugMask, GT_7CLASS, ++ "readOvlySects: Unable to " ++ "read overlay data.\n"); ++ status = DSP_EFREAD; ++ break; ++ } ++ /* Swap overlay data, if necessary */ ++ if (pdblLib->byteSwapped) { ++ for (j = 0; j < n; j++) { ++ pData[j].loadAddr = ++ SWAPLONG(pData[j].loadAddr); ++ pData[j].runAddr = ++ SWAPLONG(pData[j].runAddr); ++ pData[j].size = ++ SWAPLONG(pData[j].size); ++ pData[j].page = ++ SWAPWORD(pData[j].page); ++ } ++ } ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== readSymbols ======== ++ * Purpose: ++ * Read Symbols ++ */ ++static DSP_STATUS readSymbols(struct DBL_TargetObj *dbl, ++ struct DBL_LibraryObj *pdblLib) ++{ ++ struct DBOF_SymbolHdr symHdr; ++ struct KFILE_FileObj *file; ++ u16 i; ++ u16 nSymbols; ++ u16 len; ++ char *pName = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ file = pdblLib->file; ++ ++ nSymbols = pdblLib->fileHdr.numSymbols; ++ ++ for (i = 0; i < nSymbols; i++) { ++ /* Read symbol value */ ++ if ((*dbl->dblAttrs.fread)(&symHdr, ++ sizeof(struct DBOF_SymbolHdr), 1, file) != 1) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to read symbol value\n"); ++ status = DSP_EFREAD; ++ break; ++ } ++ if (pdblLib->byteSwapped) { ++ symHdr.nameLen = SWAPWORD(symHdr.nameLen); ++ symHdr.value = SWAPLONG(symHdr.value); ++ } ++ /* Allocate buffer for symbol name */ ++ len = symHdr.nameLen; ++ pName = (char *)MEM_Calloc(len + 1, MEM_PAGED); ++ if (pName == NULL) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ break; ++ } ++ pdblLib->symbols[i].pSymName = pName; ++ pdblLib->symbols[i].sym.value = symHdr.value; ++ /* Read symbol name */ ++ if ((*dbl->dblAttrs.fread) (pName, sizeof(char), len, file) != ++ len) { ++ GT_0trace(DBL_debugMask, GT_6CLASS, ++ "Failed to read symbol value\n"); ++ status = DSP_EFREAD; ++ break; ++ } else { ++ pName[len] = '\0'; ++ GT_2trace(DBL_debugMask, GT_ENTER, ++ "Symbol: %s Value: 0x%lx\n", ++ pName, symHdr.value); ++ } ++ } ++ return status; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbll.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dbll.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbll.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dbll.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1572 @@ ++/* ++ * dbll.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dbll.c ======== ++ * ++ *! Revision History ++ *! ================ ++ *! 25-Apr-2030 map: Fixed symbol redefinition bug + unload and return error ++ *! 08-Apr-2003 map: Consolidated DBL with DBLL loader name ++ *! 24-Mar-2003 map: Updated findSymbol to support dllview update ++ *! 23-Jan-2003 map: Updated rmmAlloc to support memory granularity ++ *! 21-Nov-2002 map: Combine fopen and DLOAD_module_open to increase ++ *! performance on start. ++ *! 04-Oct-2002 map: Integrated new TIP dynamic loader w/ DOF api. ++ *! 27-Sep-2002 map: Changed handle passed to RemoteFree, instead of ++ *! RMM_free; added GT_trace to rmmDealloc ++ *! 20-Sep-2002 map: Updated from Code Review ++ *! 08-Aug-2002 jeh: Updated to support overlays. ++ *! 25-Jun-2002 jeh: Pass RMM_Addr object to alloc function in rmmAlloc(). ++ *! 20-Mar-2002 jeh: Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* Dynamic loader library interface */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++#define DBLL_TARGSIGNATURE 0x544c4c44 /* "TLLD" */ ++#define DBLL_LIBSIGNATURE 0x4c4c4c44 /* "LLLD" */ ++ ++/* Number of buckets for symbol hash table */ ++#define MAXBUCKETS 211 ++ ++/* Max buffer length */ ++#define MAXEXPR 128 ++ ++#ifndef UINT32_C ++#define UINT32_C(zzz) ((uint32_t)zzz) ++#endif ++#define DOFF_ALIGN(x) (((x) + 3) & ~UINT32_C(3)) ++ ++/* ++ * ======== struct DBLL_TarObj* ======== ++ * A target may have one or more libraries of symbols/code/data loaded ++ * onto it, where a library is simply the symbols/code/data contained ++ * in a DOFF file. ++ */ ++/* ++ * ======== DBLL_TarObj ======== ++ */ ++struct DBLL_TarObj { ++ u32 dwSignature; /* For object validation */ ++ struct DBLL_Attrs attrs; ++ struct DBLL_LibraryObj *head; /* List of all opened libraries */ ++} ; ++ ++/* ++ * The following 4 typedefs are "super classes" of the dynamic loader ++ * library types used in dynamic loader functions (dynamic_loader.h). ++ */ ++/* ++ * ======== DBLLStream ======== ++ * Contains Dynamic_Loader_Stream ++ */ ++struct DBLLStream { ++ struct Dynamic_Loader_Stream dlStream; ++ struct DBLL_LibraryObj *lib; ++} ; ++ ++/* ++ * ======== DBLLSymbol ======== ++ */ ++struct DBLLSymbol { ++ struct Dynamic_Loader_Sym dlSymbol; ++ struct DBLL_LibraryObj *lib; ++} ; ++ ++/* ++ * ======== DBLLAlloc ======== ++ */ ++ struct DBLLAlloc { ++ struct Dynamic_Loader_Allocate dlAlloc; ++ struct DBLL_LibraryObj *lib; ++} ; ++ ++/* ++ * ======== DBLLInit ======== ++ */ ++struct DBLLInit { ++ struct Dynamic_Loader_Initialize dlInit; ++ struct DBLL_LibraryObj *lib; ++}; ++ ++/* ++ * ======== DBLL_Library ======== ++ * A library handle is returned by DBLL_Open() and is passed to DBLL_load() ++ * to load symbols/code/data, and to DBLL_unload(), to remove the ++ * symbols/code/data loaded by DBLL_load(). ++ */ ++ ++/* ++ * ======== DBLL_LibraryObj ======== ++ */ ++ struct DBLL_LibraryObj { ++ u32 dwSignature; /* For object validation */ ++ struct DBLL_LibraryObj *next; /* Next library in target's list */ ++ struct DBLL_LibraryObj *prev; /* Previous in the list */ ++ struct DBLL_TarObj *pTarget; /* target for this library */ ++ ++ /* Objects needed by dynamic loader */ ++ struct DBLLStream stream; ++ struct DBLLSymbol symbol; ++ struct DBLLAlloc allocate; ++ struct DBLLInit init; ++ DLOAD_mhandle mHandle; ++ ++ char *fileName; /* COFF file name */ ++ void *fp; /* Opaque file handle */ ++ u32 entry; /* Entry point */ ++ DLOAD_mhandle desc; /* desc of DOFF file loaded */ ++ u32 openRef; /* Number of times opened */ ++ u32 loadRef; /* Number of times loaded */ ++ struct GH_THashTab *symTab; /* Hash table of symbols */ ++ u32 ulPos; ++} ; ++ ++/* ++ * ======== Symbol ======== ++ */ ++struct Symbol { ++ struct DBLL_Symbol value; ++ char *name; ++} ; ++extern bool bSymbolsReloaded; ++ ++static void dofClose(struct DBLL_LibraryObj *zlLib); ++static DSP_STATUS dofOpen(struct DBLL_LibraryObj *zlLib); ++static s32 NoOp(struct Dynamic_Loader_Initialize *thisptr, void *bufr, ++ LDR_ADDR locn, struct LDR_SECTION_INFO *info, unsigned bytsiz); ++ ++/* ++ * Functions called by dynamic loader ++ * ++ */ ++/* Dynamic_Loader_Stream */ ++static int readBuffer(struct Dynamic_Loader_Stream *this, void *buffer, ++ unsigned bufsize); ++static int setFilePosn(struct Dynamic_Loader_Stream *this, unsigned int pos); ++/* Dynamic_Loader_Sym */ ++static struct dynload_symbol *findSymbol(struct Dynamic_Loader_Sym *this, ++ const char *name); ++static struct dynload_symbol *addToSymbolTable(struct Dynamic_Loader_Sym *this, ++ const char *name, ++ unsigned moduleId); ++static struct dynload_symbol *findInSymbolTable(struct Dynamic_Loader_Sym *this, ++ const char *name, ++ unsigned moduleid); ++static void purgeSymbolTable(struct Dynamic_Loader_Sym *this, ++ unsigned moduleId); ++static void *allocate(struct Dynamic_Loader_Sym *this, unsigned memsize); ++static void deallocate(struct Dynamic_Loader_Sym *this, void *memPtr); ++static void errorReport(struct Dynamic_Loader_Sym *this, const char *errstr, ++ va_list args); ++/* Dynamic_Loader_Allocate */ ++static int rmmAlloc(struct Dynamic_Loader_Allocate *this, ++ struct LDR_SECTION_INFO *info, unsigned align); ++static void rmmDealloc(struct Dynamic_Loader_Allocate *this, ++ struct LDR_SECTION_INFO *info); ++ ++/* Dynamic_Loader_Initialize */ ++static int connect(struct Dynamic_Loader_Initialize *this); ++static int readMem(struct Dynamic_Loader_Initialize *this, void *buf, ++ LDR_ADDR addr, struct LDR_SECTION_INFO *info, ++ unsigned nbytes); ++static int writeMem(struct Dynamic_Loader_Initialize *this, void *buf, ++ LDR_ADDR addr, struct LDR_SECTION_INFO *info, ++ unsigned nbytes); ++static int fillMem(struct Dynamic_Loader_Initialize *this, LDR_ADDR addr, ++ struct LDR_SECTION_INFO *info, unsigned nbytes, ++ unsigned val); ++static int execute(struct Dynamic_Loader_Initialize *this, LDR_ADDR start); ++static void release(struct Dynamic_Loader_Initialize *this); ++ ++/* symbol table hash functions */ ++static u16 nameHash(void *name, u16 maxBucket); ++static bool nameMatch(void *name, void *sp); ++static void symDelete(void *sp); ++ ++#if GT_TRACE ++static struct GT_Mask DBLL_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++static u32 cRefs; /* module reference count */ ++ ++/* Symbol Redefinition */ ++static int bRedefinedSymbol; ++static int bGblSearch = 1; ++ ++/* ++ * ======== DBLL_close ======== ++ */ ++void DBLL_close(struct DBLL_LibraryObj *zlLib) ++{ ++ struct DBLL_TarObj *zlTarget; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ DBC_Require(zlLib->openRef > 0); ++ zlTarget = zlLib->pTarget; ++ GT_1trace(DBLL_debugMask, GT_ENTER, "DBLL_close: lib: 0x%x\n", zlLib); ++ zlLib->openRef--; ++ if (zlLib->openRef == 0) { ++ /* Remove library from list */ ++ if (zlTarget->head == zlLib) ++ zlTarget->head = zlLib->next; ++ ++ if (zlLib->prev) ++ (zlLib->prev)->next = zlLib->next; ++ ++ if (zlLib->next) ++ (zlLib->next)->prev = zlLib->prev; ++ ++ /* Free DOF resources */ ++ dofClose(zlLib); ++ if (zlLib->fileName) ++ MEM_Free(zlLib->fileName); ++ ++ /* remove symbols from symbol table */ ++ if (zlLib->symTab) ++ GH_delete(zlLib->symTab); ++ ++ /* remove the library object itself */ ++ MEM_FreeObject(zlLib); ++ zlLib = NULL; ++ } ++} ++ ++/* ++ * ======== DBLL_create ======== ++ */ ++DSP_STATUS DBLL_create(struct DBLL_TarObj **pTarget, struct DBLL_Attrs *pAttrs) ++{ ++ struct DBLL_TarObj *pzlTarget; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pAttrs != NULL); ++ DBC_Require(pTarget != NULL); ++ ++ GT_2trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_create: pTarget: 0x%x pAttrs: " ++ "0x%x\n", pTarget, pAttrs); ++ /* Allocate DBL target object */ ++ MEM_AllocObject(pzlTarget, struct DBLL_TarObj, DBLL_TARGSIGNATURE); ++ if (pTarget != NULL) { ++ if (pzlTarget == NULL) { ++ GT_0trace(DBLL_debugMask, GT_6CLASS, ++ "DBLL_create: Memory allocation" ++ " failed\n"); ++ *pTarget = NULL; ++ status = DSP_EMEMORY; ++ } else { ++ pzlTarget->attrs = *pAttrs; ++ *pTarget = (struct DBLL_TarObj *)pzlTarget; ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && ++ MEM_IsValidHandle(((struct DBLL_TarObj *)(*pTarget)), ++ DBLL_TARGSIGNATURE)) || (DSP_FAILED(status) && ++ *pTarget == NULL)); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DBLL_delete ======== ++ */ ++void DBLL_delete(struct DBLL_TarObj *target) ++{ ++ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); ++ ++ GT_1trace(DBLL_debugMask, GT_ENTER, "DBLL_delete: target: 0x%x\n", ++ target); ++ ++ if (zlTarget != NULL) ++ MEM_FreeObject(zlTarget); ++ ++} ++ ++/* ++ * ======== DBLL_exit ======== ++ * Discontinue usage of DBL module. ++ */ ++void DBLL_exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(DBLL_debugMask, GT_5CLASS, "DBLL_exit() ref count: 0x%x\n", ++ cRefs); ++ ++ if (cRefs == 0) { ++ MEM_Exit(); ++ CSL_Exit(); ++ GH_exit(); ++#if GT_TRACE ++ DBLL_debugMask.flags = NULL; ++#endif ++ } ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== DBLL_getAddr ======== ++ * Get address of name in the specified library. ++ */ ++bool DBLL_getAddr(struct DBLL_LibraryObj *zlLib, char *name, ++ struct DBLL_Symbol **ppSym) ++{ ++ struct Symbol *sym; ++ bool status = false; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ DBC_Require(name != NULL); ++ DBC_Require(ppSym != NULL); ++ DBC_Require(zlLib->symTab != NULL); ++ ++ GT_3trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_getAddr: lib: 0x%x name: %s pAddr:" ++ " 0x%x\n", zlLib, name, ppSym); ++ sym = (struct Symbol *)GH_find(zlLib->symTab, name); ++ if (sym != NULL) { ++ *ppSym = &sym->value; ++ status = true; ++ } ++ return status; ++} ++ ++/* ++ * ======== DBLL_getAttrs ======== ++ * Retrieve the attributes of the target. ++ */ ++void DBLL_getAttrs(struct DBLL_TarObj *target, struct DBLL_Attrs *pAttrs) ++{ ++ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); ++ DBC_Require(pAttrs != NULL); ++ ++ if ((pAttrs != NULL) && (zlTarget != NULL)) ++ *pAttrs = zlTarget->attrs; ++ ++} ++ ++/* ++ * ======== DBLL_getCAddr ======== ++ * Get address of a "C" name in the specified library. ++ */ ++bool DBLL_getCAddr(struct DBLL_LibraryObj *zlLib, char *name, ++ struct DBLL_Symbol **ppSym) ++{ ++ struct Symbol *sym; ++ char cname[MAXEXPR + 1]; ++ bool status = false; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ DBC_Require(ppSym != NULL); ++ DBC_Require(zlLib->symTab != NULL); ++ DBC_Require(name != NULL); ++ ++ cname[0] = '_'; ++ ++ strncpy(cname + 1, name, sizeof(cname) - 2); ++ cname[MAXEXPR] = '\0'; /* insure '\0' string termination */ ++ ++ /* Check for C name, if not found */ ++ sym = (struct Symbol *)GH_find(zlLib->symTab, cname); ++ ++ if (sym != NULL) { ++ *ppSym = &sym->value; ++ status = true; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DBLL_getSect ======== ++ * Get the base address and size (in bytes) of a COFF section. ++ */ ++DSP_STATUS DBLL_getSect(struct DBLL_LibraryObj *lib, char *name, u32 *pAddr, ++ u32 *pSize) ++{ ++ u32 uByteSize; ++ bool fOpenedDoff = false; ++ const struct LDR_SECTION_INFO *sect = NULL; ++ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(name != NULL); ++ DBC_Require(pAddr != NULL); ++ DBC_Require(pSize != NULL); ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ ++ GT_4trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_getSect: lib: 0x%x name: %s pAddr:" ++ " 0x%x pSize: 0x%x\n", lib, name, pAddr, pSize); ++ /* If DOFF file is not open, we open it. */ ++ if (zlLib != NULL) { ++ if (zlLib->fp == NULL) { ++ status = dofOpen(zlLib); ++ if (DSP_SUCCEEDED(status)) ++ fOpenedDoff = true; ++ ++ } else { ++ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, ++ zlLib->ulPos, SEEK_SET); ++ } ++ } else { ++ status = DSP_EHANDLE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ uByteSize = 1; ++ if (DLOAD_GetSectionInfo(zlLib->desc, name, §)) { ++ *pAddr = sect->load_addr; ++ *pSize = sect->size * uByteSize; ++ /* Make sure size is even for good swap */ ++ if (*pSize % 2) ++ (*pSize)++; ++ ++ /* Align size */ ++ *pSize = DOFF_ALIGN(*pSize); ++ } else { ++ status = DSP_ENOSECT; ++ } ++ } ++ if (fOpenedDoff) { ++ dofClose(zlLib); ++ fOpenedDoff = false; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DBLL_init ======== ++ */ ++bool DBLL_init(void) ++{ ++ bool retVal = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!DBLL_debugMask.flags); ++ GT_create(&DBLL_debugMask, "DL"); /* "DL" for dbDL */ ++ GH_init(); ++ CSL_Init(); ++ retVal = MEM_Init(); ++ if (!retVal) ++ MEM_Exit(); ++ ++ } ++ ++ if (retVal) ++ cRefs++; ++ ++ ++ GT_1trace(DBLL_debugMask, GT_5CLASS, "DBLL_init(), ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((retVal && (cRefs > 0)) || (!retVal && (cRefs >= 0))); ++ ++ return retVal; ++} ++ ++/* ++ * ======== DBLL_load ======== ++ */ ++DSP_STATUS DBLL_load(struct DBLL_LibraryObj *lib, DBLL_Flags flags, ++ struct DBLL_Attrs *attrs, u32 *pEntry) ++{ ++ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; ++ struct DBLL_TarObj *dbzl; ++ bool gotSymbols = true; ++ s32 err; ++ DSP_STATUS status = DSP_SOK; ++ bool fOpenedDoff = false; ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ DBC_Require(pEntry != NULL); ++ DBC_Require(attrs != NULL); ++ ++ GT_4trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_load: lib: 0x%x flags: 0x%x pEntry:" ++ " 0x%x\n", lib, flags, attrs, pEntry); ++ /* ++ * Load if not already loaded. ++ */ ++ if (zlLib->loadRef == 0 || !(flags & DBLL_DYNAMIC)) { ++ dbzl = zlLib->pTarget; ++ dbzl->attrs = *attrs; ++ /* Create a hash table for symbols if not already created */ ++ if (zlLib->symTab == NULL) { ++ gotSymbols = false; ++ zlLib->symTab = GH_create(MAXBUCKETS, ++ sizeof(struct Symbol), ++ nameHash, ++ nameMatch, symDelete); ++ if (zlLib->symTab == NULL) ++ status = DSP_EMEMORY; ++ ++ } ++ /* ++ * Set up objects needed by the dynamic loader ++ */ ++ /* Stream */ ++ zlLib->stream.dlStream.read_buffer = readBuffer; ++ zlLib->stream.dlStream.set_file_posn = setFilePosn; ++ zlLib->stream.lib = zlLib; ++ /* Symbol */ ++ zlLib->symbol.dlSymbol.Find_Matching_Symbol = findSymbol; ++ if (gotSymbols) { ++ zlLib->symbol.dlSymbol.Add_To_Symbol_Table = ++ findInSymbolTable; ++ } else { ++ zlLib->symbol.dlSymbol.Add_To_Symbol_Table = ++ addToSymbolTable; ++ } ++ zlLib->symbol.dlSymbol.Purge_Symbol_Table = purgeSymbolTable; ++ zlLib->symbol.dlSymbol.Allocate = allocate; ++ zlLib->symbol.dlSymbol.Deallocate = deallocate; ++ zlLib->symbol.dlSymbol.Error_Report = errorReport; ++ zlLib->symbol.lib = zlLib; ++ /* Allocate */ ++ zlLib->allocate.dlAlloc.Allocate = rmmAlloc; ++ zlLib->allocate.dlAlloc.Deallocate = rmmDealloc; ++ zlLib->allocate.lib = zlLib; ++ /* Init */ ++ zlLib->init.dlInit.connect = connect; ++ zlLib->init.dlInit.readmem = readMem; ++ zlLib->init.dlInit.writemem = writeMem; ++ zlLib->init.dlInit.fillmem = fillMem; ++ zlLib->init.dlInit.execute = execute; ++ zlLib->init.dlInit.release = release; ++ zlLib->init.lib = zlLib; ++ /* If COFF file is not open, we open it. */ ++ if (zlLib->fp == NULL) { ++ status = dofOpen(zlLib); ++ if (DSP_SUCCEEDED(status)) ++ fOpenedDoff = true; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ zlLib->ulPos = (*(zlLib->pTarget->attrs.ftell)) ++ (zlLib->fp); ++ /* Reset file cursor */ ++ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long)0, ++ SEEK_SET); ++ bSymbolsReloaded = true; ++ /* The 5th argument, DLOAD_INITBSS, tells the DLL ++ * module to zero-init all BSS sections. In general, ++ * this is not necessary and also increases load time. ++ * We may want to make this configurable by the user */ ++ err = Dynamic_Load_Module(&zlLib->stream.dlStream, ++ &zlLib->symbol.dlSymbol, &zlLib->allocate.dlAlloc, ++ &zlLib->init.dlInit, DLOAD_INITBSS, ++ &zlLib->mHandle); ++ ++ if (err != 0) { ++ GT_1trace(DBLL_debugMask, GT_6CLASS, ++ "DBLL_load: " ++ "Dynamic_Load_Module failed: 0x%lx\n", ++ err); ++ status = DSP_EDYNLOAD; ++ } else if (bRedefinedSymbol) { ++ zlLib->loadRef++; ++ DBLL_unload(zlLib, (struct DBLL_Attrs *) attrs); ++ bRedefinedSymbol = false; ++ status = DSP_EDYNLOAD; ++ } else { ++ *pEntry = zlLib->entry; ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) ++ zlLib->loadRef++; ++ ++ /* Clean up DOFF resources */ ++ if (fOpenedDoff) ++ dofClose(zlLib); ++ ++ DBC_Ensure(DSP_FAILED(status) || zlLib->loadRef > 0); ++ return status; ++} ++ ++/* ++ * ======== DBLL_loadSect ======== ++ * Not supported for COFF. ++ */ ++DSP_STATUS DBLL_loadSect(struct DBLL_LibraryObj *zlLib, char *sectName, ++ struct DBLL_Attrs *attrs) ++{ ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ ++ return DSP_ENOTIMPL; ++} ++ ++/* ++ * ======== DBLL_open ======== ++ */ ++DSP_STATUS DBLL_open(struct DBLL_TarObj *target, char *file, DBLL_Flags flags, ++ struct DBLL_LibraryObj **pLib) ++{ ++ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; ++ struct DBLL_LibraryObj *zlLib = NULL; ++ s32 err; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); ++ DBC_Require(zlTarget->attrs.fopen != NULL); ++ DBC_Require(file != NULL); ++ DBC_Require(pLib != NULL); ++ ++ GT_3trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_open: target: 0x%x file: %s pLib:" ++ " 0x%x\n", target, file, pLib); ++ zlLib = zlTarget->head; ++ while (zlLib != NULL) { ++ if (strcmp(zlLib->fileName, file) == 0) { ++ /* Library is already opened */ ++ zlLib->openRef++; ++ break; ++ } ++ zlLib = zlLib->next; ++ } ++ if (zlLib == NULL) { ++ /* Allocate DBL library object */ ++ MEM_AllocObject(zlLib, struct DBLL_LibraryObj, ++ DBLL_LIBSIGNATURE); ++ if (zlLib == NULL) { ++ GT_0trace(DBLL_debugMask, GT_6CLASS, ++ "DBLL_open: Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } else { ++ zlLib->ulPos = 0; ++ /* Increment ref count to allow close on failure ++ * later on */ ++ zlLib->openRef++; ++ zlLib->pTarget = zlTarget; ++ /* Keep a copy of the file name */ ++ zlLib->fileName = MEM_Calloc(strlen(file) + 1, ++ MEM_PAGED); ++ if (zlLib->fileName == NULL) { ++ GT_0trace(DBLL_debugMask, GT_6CLASS, ++ "DBLL_open: Memory " ++ "allocation failed\n"); ++ status = DSP_EMEMORY; ++ } else { ++ strncpy(zlLib->fileName, file, ++ strlen(file) + 1); ++ } ++ zlLib->symTab = NULL; ++ } ++ } ++ /* ++ * Set up objects needed by the dynamic loader ++ */ ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ /* Stream */ ++ zlLib->stream.dlStream.read_buffer = readBuffer; ++ zlLib->stream.dlStream.set_file_posn = setFilePosn; ++ zlLib->stream.lib = zlLib; ++ /* Symbol */ ++ zlLib->symbol.dlSymbol.Add_To_Symbol_Table = addToSymbolTable; ++ zlLib->symbol.dlSymbol.Find_Matching_Symbol = findSymbol; ++ zlLib->symbol.dlSymbol.Purge_Symbol_Table = purgeSymbolTable; ++ zlLib->symbol.dlSymbol.Allocate = allocate; ++ zlLib->symbol.dlSymbol.Deallocate = deallocate; ++ zlLib->symbol.dlSymbol.Error_Report = errorReport; ++ zlLib->symbol.lib = zlLib; ++ /* Allocate */ ++ zlLib->allocate.dlAlloc.Allocate = rmmAlloc; ++ zlLib->allocate.dlAlloc.Deallocate = rmmDealloc; ++ zlLib->allocate.lib = zlLib; ++ /* Init */ ++ zlLib->init.dlInit.connect = connect; ++ zlLib->init.dlInit.readmem = readMem; ++ zlLib->init.dlInit.writemem = writeMem; ++ zlLib->init.dlInit.fillmem = fillMem; ++ zlLib->init.dlInit.execute = execute; ++ zlLib->init.dlInit.release = release; ++ zlLib->init.lib = zlLib; ++ if (DSP_SUCCEEDED(status) && zlLib->fp == NULL) ++ status = dofOpen(zlLib); ++ ++ zlLib->ulPos = (*(zlLib->pTarget->attrs.ftell)) (zlLib->fp); ++ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long) 0, SEEK_SET); ++ /* Create a hash table for symbols if flag is set */ ++ if (zlLib->symTab != NULL || !(flags & DBLL_SYMB)) ++ goto func_cont; ++ ++ zlLib->symTab = GH_create(MAXBUCKETS, sizeof(struct Symbol), nameHash, ++ nameMatch, symDelete); ++ if (zlLib->symTab == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ /* Do a fake load to get symbols - set write function to NoOp */ ++ zlLib->init.dlInit.writemem = NoOp; ++ err = Dynamic_Open_Module(&zlLib->stream.dlStream, ++ &zlLib->symbol.dlSymbol, ++ &zlLib->allocate.dlAlloc, ++ &zlLib->init.dlInit, 0, ++ &zlLib->mHandle); ++ if (err != 0) { ++ GT_1trace(DBLL_debugMask, GT_6CLASS, "DBLL_open: " ++ "Dynamic_Load_Module failed: 0x%lx\n", err); ++ status = DSP_EDYNLOAD; ++ } else { ++ /* Now that we have the symbol table, we can unload */ ++ err = Dynamic_Unload_Module(zlLib->mHandle, ++ &zlLib->symbol.dlSymbol, ++ &zlLib->allocate.dlAlloc, ++ &zlLib->init.dlInit); ++ if (err != 0) { ++ GT_1trace(DBLL_debugMask, GT_6CLASS, ++ "DBLL_open: " ++ "Dynamic_Unload_Module failed: 0x%lx\n", ++ err); ++ status = DSP_EDYNLOAD; ++ } ++ zlLib->mHandle = NULL; ++ } ++ } ++func_cont: ++ if (DSP_SUCCEEDED(status)) { ++ if (zlLib->openRef == 1) { ++ /* First time opened - insert in list */ ++ if (zlTarget->head) ++ (zlTarget->head)->prev = zlLib; ++ ++ zlLib->prev = NULL; ++ zlLib->next = zlTarget->head; ++ zlTarget->head = zlLib; ++ } ++ *pLib = (struct DBLL_LibraryObj *)zlLib; ++ } else { ++ *pLib = NULL; ++ if (zlLib != NULL) ++ DBLL_close((struct DBLL_LibraryObj *)zlLib); ++ ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && (zlLib->openRef > 0) && ++ MEM_IsValidHandle(((struct DBLL_LibraryObj *)(*pLib)), ++ DBLL_LIBSIGNATURE)) || (DSP_FAILED(status) && *pLib == NULL)); ++ return status; ++} ++ ++/* ++ * ======== DBLL_readSect ======== ++ * Get the content of a COFF section. ++ */ ++DSP_STATUS DBLL_readSect(struct DBLL_LibraryObj *lib, char *name, ++ char *pContent, u32 size) ++{ ++ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; ++ bool fOpenedDoff = false; ++ u32 uByteSize; /* size of bytes */ ++ u32 ulSectSize; /* size of section */ ++ const struct LDR_SECTION_INFO *sect = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ DBC_Require(name != NULL); ++ DBC_Require(pContent != NULL); ++ DBC_Require(size != 0); ++ ++ GT_4trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_readSect: lib: 0x%x name: %s " ++ "pContent: 0x%x size: 0x%x\n", lib, name, pContent, size); ++ /* If DOFF file is not open, we open it. */ ++ if (zlLib != NULL) { ++ if (zlLib->fp == NULL) { ++ status = dofOpen(zlLib); ++ if (DSP_SUCCEEDED(status)) ++ fOpenedDoff = true; ++ ++ } else { ++ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, ++ zlLib->ulPos, SEEK_SET); ++ } ++ } else { ++ status = DSP_EHANDLE; ++ } ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ uByteSize = 1; ++ if (!DLOAD_GetSectionInfo(zlLib->desc, name, §)) { ++ status = DSP_ENOSECT; ++ goto func_cont; ++ } ++ /* ++ * Ensure the supplied buffer size is sufficient to store ++ * the section content to be read. ++ */ ++ ulSectSize = sect->size * uByteSize; ++ /* Make sure size is even for good swap */ ++ if (ulSectSize % 2) ++ ulSectSize++; ++ ++ /* Align size */ ++ ulSectSize = DOFF_ALIGN(ulSectSize); ++ if (ulSectSize > size) { ++ status = DSP_EFAIL; ++ } else { ++ if (!DLOAD_GetSection(zlLib->desc, sect, pContent)) ++ status = DSP_EFREAD; ++ ++ } ++func_cont: ++ if (fOpenedDoff) { ++ dofClose(zlLib); ++ fOpenedDoff = false; ++ } ++ return status; ++} ++ ++/* ++ * ======== DBLL_setAttrs ======== ++ * Set the attributes of the target. ++ */ ++void DBLL_setAttrs(struct DBLL_TarObj *target, struct DBLL_Attrs *pAttrs) ++{ ++ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); ++ DBC_Require(pAttrs != NULL); ++ GT_2trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_setAttrs: target: 0x%x pAttrs: " ++ "0x%x\n", target, pAttrs); ++ if ((pAttrs != NULL) && (zlTarget != NULL)) ++ zlTarget->attrs = *pAttrs; ++ ++} ++ ++/* ++ * ======== DBLL_unload ======== ++ */ ++void DBLL_unload(struct DBLL_LibraryObj *lib, struct DBLL_Attrs *attrs) ++{ ++ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; ++ s32 err = 0; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); ++ DBC_Require(zlLib->loadRef > 0); ++ GT_1trace(DBLL_debugMask, GT_ENTER, "DBLL_unload: lib: 0x%x\n", lib); ++ zlLib->loadRef--; ++ /* Unload only if reference count is 0 */ ++ if (zlLib->loadRef != 0) ++ goto func_end; ++ ++ zlLib->pTarget->attrs = *attrs; ++ if (zlLib != NULL) { ++ if (zlLib->mHandle) { ++ err = Dynamic_Unload_Module(zlLib->mHandle, ++ &zlLib->symbol.dlSymbol, ++ &zlLib->allocate.dlAlloc, &zlLib->init.dlInit); ++ if (err != 0) { ++ GT_1trace(DBLL_debugMask, GT_5CLASS, ++ "Dynamic_Unload_Module " ++ "failed: 0x%x\n", err); ++ } ++ } ++ /* remove symbols from symbol table */ ++ if (zlLib->symTab != NULL) { ++ GH_delete(zlLib->symTab); ++ zlLib->symTab = NULL; ++ } ++ /* delete DOFF desc since it holds *lots* of host OS ++ * resources */ ++ dofClose(zlLib); ++ } ++func_end: ++ DBC_Ensure(zlLib->loadRef >= 0); ++} ++ ++/* ++ * ======== DBLL_unloadSect ======== ++ * Not supported for COFF. ++ */ ++DSP_STATUS DBLL_unloadSect(struct DBLL_LibraryObj *lib, char *sectName, ++ struct DBLL_Attrs *attrs) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(sectName != NULL); ++ GT_2trace(DBLL_debugMask, GT_ENTER, ++ "DBLL_unloadSect: lib: 0x%x sectName: " ++ "%s\n", lib, sectName); ++ return DSP_ENOTIMPL; ++} ++ ++/* ++ * ======== dofClose ======== ++ */ ++static void dofClose(struct DBLL_LibraryObj *zlLib) ++{ ++ if (zlLib->desc) { ++ DLOAD_module_close(zlLib->desc); ++ zlLib->desc = NULL; ++ } ++ /* close file */ ++ if (zlLib->fp) { ++ (zlLib->pTarget->attrs.fclose) (zlLib->fp); ++ zlLib->fp = NULL; ++ } ++} ++ ++/* ++ * ======== dofOpen ======== ++ */ ++static DSP_STATUS dofOpen(struct DBLL_LibraryObj *zlLib) ++{ ++ void *open = *(zlLib->pTarget->attrs.fopen); ++ DSP_STATUS status = DSP_SOK; ++ ++ /* First open the file for the dynamic loader, then open COF */ ++ zlLib->fp = (void *)((DBLL_FOpenFxn)(open))(zlLib->fileName, "rb"); ++ ++ /* Open DOFF module */ ++ if (zlLib->fp && zlLib->desc == NULL) { ++ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long)0, SEEK_SET); ++ zlLib->desc = DLOAD_module_open(&zlLib->stream.dlStream, ++ &zlLib->symbol.dlSymbol); ++ if (zlLib->desc == NULL) { ++ (zlLib->pTarget->attrs.fclose)(zlLib->fp); ++ zlLib->fp = NULL; ++ status = DSP_EFOPEN; ++ } ++ } else { ++ status = DSP_EFOPEN; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== nameHash ======== ++ */ ++static u16 nameHash(void *key, u16 maxBucket) ++{ ++ u16 ret; ++ u16 hash; ++ char *name = (char *)key; ++ ++ DBC_Require(name != NULL); ++ ++ hash = 0; ++ ++ while (*name) { ++ hash <<= 1; ++ hash ^= *name++; ++ } ++ ++ ret = hash % maxBucket; ++ ++ return ret; ++} ++ ++/* ++ * ======== nameMatch ======== ++ */ ++static bool nameMatch(void *key, void *value) ++{ ++ DBC_Require(key != NULL); ++ DBC_Require(value != NULL); ++ ++ if ((key != NULL) && (value != NULL)) { ++ if (strcmp((char *)key, ((struct Symbol *)value)->name) == 0) ++ return true; ++ } ++ return false; ++} ++ ++/* ++ * ======== NoOp ======== ++ */ ++static int NoOp(struct Dynamic_Loader_Initialize *thisptr, void *bufr, ++ LDR_ADDR locn, struct LDR_SECTION_INFO *info, unsigned bytsize) ++{ ++ return 1; ++} ++ ++/* ++ * ======== symDelete ======== ++ */ ++static void symDelete(void *value) ++{ ++ struct Symbol *sp = (struct Symbol *)value; ++ ++ MEM_Free(sp->name); ++} ++ ++/* ++ * Dynamic Loader Functions ++ */ ++ ++/* Dynamic_Loader_Stream */ ++/* ++ * ======== readBuffer ======== ++ */ ++static int readBuffer(struct Dynamic_Loader_Stream *this, void *buffer, ++ unsigned bufsize) ++{ ++ struct DBLLStream *pStream = (struct DBLLStream *)this; ++ struct DBLL_LibraryObj *lib; ++ int bytesRead = 0; ++ ++ DBC_Require(this != NULL); ++ lib = pStream->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ if (lib != NULL) { ++ bytesRead = (*(lib->pTarget->attrs.fread))(buffer, 1, bufsize, ++ lib->fp); ++ } ++ return bytesRead; ++} ++ ++/* ++ * ======== setFilePosn ======== ++ */ ++static int setFilePosn(struct Dynamic_Loader_Stream *this, unsigned int pos) ++{ ++ struct DBLLStream *pStream = (struct DBLLStream *)this; ++ struct DBLL_LibraryObj *lib; ++ int status = 0; /* Success */ ++ ++ DBC_Require(this != NULL); ++ lib = pStream->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ if (lib != NULL) { ++ status = (*(lib->pTarget->attrs.fseek))(lib->fp, (long)pos, ++ SEEK_SET); ++ } ++ ++ return status; ++} ++ ++/* Dynamic_Loader_Sym */ ++ ++/* ++ * ======== findSymbol ======== ++ */ ++static struct dynload_symbol *findSymbol(struct Dynamic_Loader_Sym *this, ++ const char *name) ++{ ++ struct dynload_symbol *retSym; ++ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; ++ struct DBLL_LibraryObj *lib; ++ struct DBLL_Symbol *pSym = NULL; ++ bool status = false; /* Symbol not found yet */ ++ ++ DBC_Require(this != NULL); ++ lib = pSymbol->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ if (lib != NULL) { ++ if (lib->pTarget->attrs.symLookup) { ++ /* Check current lib + base lib + dep lib + ++ * persistent lib */ ++ status = (*(lib->pTarget->attrs.symLookup)) ++ (lib->pTarget->attrs.symHandle, ++ lib->pTarget->attrs.symArg, ++ lib->pTarget->attrs.rmmHandle, name, &pSym); ++ } else { ++ /* Just check current lib for symbol */ ++ status = DBLL_getAddr((struct DBLL_LibraryObj *)lib, ++ (char *)name, &pSym); ++ if (!status) { ++ status = ++ DBLL_getCAddr((struct DBLL_LibraryObj *)lib, ++ (char *)name, &pSym); ++ } ++ } ++ } ++ ++ if (!status && bGblSearch) { ++ GT_1trace(DBLL_debugMask, GT_6CLASS, ++ "findSymbol: Symbol not found: %s\n", name); ++ } ++ ++ DBC_Assert((status && (pSym != NULL)) || (!status && (pSym == NULL))); ++ ++ retSym = (struct dynload_symbol *)pSym; ++ return retSym; ++} ++ ++/* ++ * ======== findInSymbolTable ======== ++ */ ++static struct dynload_symbol *findInSymbolTable(struct Dynamic_Loader_Sym *this, ++ const char *name, ++ unsigned moduleid) ++{ ++ struct dynload_symbol *retSym; ++ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; ++ struct DBLL_LibraryObj *lib; ++ struct Symbol *sym; ++ ++ DBC_Require(this != NULL); ++ lib = pSymbol->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ DBC_Require(lib->symTab != NULL); ++ ++ sym = (struct Symbol *)GH_find(lib->symTab, (char *) name); ++ ++ retSym = (struct dynload_symbol *)&sym->value; ++ return retSym; ++} ++ ++/* ++ * ======== addToSymbolTable ======== ++ */ ++static struct dynload_symbol *addToSymbolTable(struct Dynamic_Loader_Sym *this, ++ const char *name, ++ unsigned moduleId) ++{ ++ struct Symbol *symPtr = NULL; ++ struct Symbol symbol; ++ struct dynload_symbol *pSym = NULL; ++ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; ++ struct DBLL_LibraryObj *lib; ++ struct dynload_symbol *retVal; ++ ++ DBC_Require(this != NULL); ++ DBC_Require(name); ++ lib = pSymbol->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ /* Check to see if symbol is already defined in symbol table */ ++ if (!(lib->pTarget->attrs.baseImage)) { ++ bGblSearch = false; ++ pSym = findSymbol(this, name); ++ bGblSearch = true; ++ if (pSym) { ++ bRedefinedSymbol = true; ++ GT_1trace(DBLL_debugMask, GT_6CLASS, ++ "Symbol already defined in " ++ "symbol table: %s\n", name); ++ return NULL; ++ } ++ } ++ /* Allocate string to copy symbol name */ ++ symbol.name = (char *)MEM_Calloc(strlen((char *const)name) + 1, ++ MEM_PAGED); ++ if (symbol.name == NULL) ++ return NULL; ++ ++ if (symbol.name != NULL) { ++ /* Just copy name (value will be filled in by dynamic loader) */ ++ strncpy(symbol.name, (char *const)name, ++ strlen((char *const)name) + 1); ++ ++ /* Add symbol to symbol table */ ++ symPtr = (struct Symbol *)GH_insert(lib->symTab, (void *)name, ++ (void *)&symbol); ++ if (symPtr == NULL) ++ MEM_Free(symbol.name); ++ ++ } ++ if (symPtr != NULL) ++ retVal = (struct dynload_symbol *)&symPtr->value; ++ else ++ retVal = NULL; ++ ++ return retVal; ++} ++ ++/* ++ * ======== purgeSymbolTable ======== ++ */ ++static void purgeSymbolTable(struct Dynamic_Loader_Sym *this, unsigned moduleId) ++{ ++ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; ++ struct DBLL_LibraryObj *lib; ++ ++ DBC_Require(this != NULL); ++ lib = pSymbol->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ /* May not need to do anything */ ++} ++ ++/* ++ * ======== allocate ======== ++ */ ++static void *allocate(struct Dynamic_Loader_Sym *this, unsigned memsize) ++{ ++ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; ++ struct DBLL_LibraryObj *lib; ++ void *buf; ++ ++ DBC_Require(this != NULL); ++ lib = pSymbol->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ buf = MEM_Calloc(memsize, MEM_PAGED); ++ ++ return buf; ++} ++ ++/* ++ * ======== deallocate ======== ++ */ ++static void deallocate(struct Dynamic_Loader_Sym *this, void *memPtr) ++{ ++ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; ++ struct DBLL_LibraryObj *lib; ++ ++ DBC_Require(this != NULL); ++ lib = pSymbol->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ MEM_Free(memPtr); ++} ++ ++/* ++ * ======== errorReport ======== ++ */ ++static void errorReport(struct Dynamic_Loader_Sym *this, const char *errstr, ++ va_list args) ++{ ++ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; ++ struct DBLL_LibraryObj *lib; ++ char tempBuf[MAXEXPR]; ++ ++ DBC_Require(this != NULL); ++ lib = pSymbol->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ vsnprintf((char *)tempBuf, MAXEXPR, (char *)errstr, args); ++ GT_1trace(DBLL_debugMask, GT_5CLASS, "%s\n", tempBuf); ++} ++ ++/* Dynamic_Loader_Allocate */ ++ ++/* ++ * ======== rmmAlloc ======== ++ */ ++static int rmmAlloc(struct Dynamic_Loader_Allocate *this, ++ struct LDR_SECTION_INFO *info, unsigned align) ++{ ++ struct DBLLAlloc *pAlloc = (struct DBLLAlloc *)this; ++ struct DBLL_LibraryObj *lib; ++ DSP_STATUS status = DSP_SOK; ++ u32 memType; ++ struct RMM_Addr rmmAddr; ++ s32 retVal = TRUE; ++ unsigned stype = DLOAD_SECTION_TYPE(info->type); ++ char *pToken = NULL; ++ char *szSecLastToken = NULL; ++ char *szLastToken = NULL; ++ char *szSectName = NULL; ++ char *pszCur; ++ s32 tokenLen = 0; ++ s32 segId = -1; ++ s32 req = -1; ++ s32 count = 0; ++ u32 allocSize = 0; ++ ++ DBC_Require(this != NULL); ++ lib = pAlloc->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ memType = (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == DLOAD_BSS) ? ++ DBLL_BSS : DBLL_DATA; ++ ++ /* Attempt to extract the segment ID and requirement information from ++ the name of the section */ ++ DBC_Require(info->name); ++ tokenLen = strlen((char *)(info->name)) + 1; ++ ++ szSectName = MEM_Calloc(tokenLen, MEM_PAGED); ++ szLastToken = MEM_Calloc(tokenLen, MEM_PAGED); ++ szSecLastToken = MEM_Calloc(tokenLen, MEM_PAGED); ++ ++ if (szSectName == NULL || szSecLastToken == NULL || ++ szLastToken == NULL) { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ strncpy(szSectName, (char *)(info->name), tokenLen); ++ pszCur = szSectName; ++ while ((pToken = strsep(&pszCur, ":")) && *pToken != '\0') { ++ strncpy(szSecLastToken, szLastToken, strlen(szLastToken) + 1); ++ strncpy(szLastToken, pToken, strlen(pToken) + 1); ++ pToken = strsep(&pszCur, ":"); ++ count++; /* optimizes processing*/ ++ } ++ /* If pToken is 0 or 1, and szSecLastToken is DYN_DARAM or DYN_SARAM, ++ or DYN_EXTERNAL, then mem granularity information is present ++ within the section name - only process if there are at least three ++ tokens within the section name (just a minor optimization)*/ ++ if (count >= 3) ++ strict_strtol(szLastToken, 10, (long *)&req); ++ ++ if ((req == 0) || (req == 1)) { ++ if (strcmp(szSecLastToken, "DYN_DARAM") == 0) { ++ segId = 0; ++ } else { ++ if (strcmp(szSecLastToken, "DYN_SARAM") == 0) { ++ segId = 1; ++ } else { ++ if (strcmp(szSecLastToken, ++ "DYN_EXTERNAL") == 0) { ++ segId = 2; ++ } ++ } ++ } ++ if (segId != -1) { ++ GT_2trace(DBLL_debugMask, GT_5CLASS, ++ "Extracted values for memory" ++ " granularity req [%d] segId [%d]\n", ++ req, segId); ++ } ++ } ++ MEM_Free(szSectName); ++ szSectName = NULL; ++ MEM_Free(szLastToken); ++ szLastToken = NULL; ++ MEM_Free(szSecLastToken); ++ szSecLastToken = NULL; ++func_cont: ++ if (memType == DBLL_CODE) ++ allocSize = info->size + GEM_L1P_PREFETCH_SIZE; ++ else ++ allocSize = info->size; ++ /* TODO - ideally, we can pass the alignment requirement also ++ * from here */ ++ if (lib != NULL) { ++ status = (lib->pTarget->attrs.alloc)(lib->pTarget-> ++ attrs.rmmHandle, memType, allocSize, align, ++ (u32 *)&rmmAddr, segId, req, FALSE); ++ } ++ if (DSP_FAILED(status)) { ++ retVal = false; ++ } else { ++ /* RMM gives word address. Need to convert to byte address */ ++ info->load_addr = rmmAddr.addr * DSPWORDSIZE; ++ info->run_addr = info->load_addr; ++ info->context = (u32)rmmAddr.segid; ++ GT_3trace(DBLL_debugMask, GT_5CLASS, ++ "Remote alloc: %s base = 0x%lx len" ++ "= 0x%lx\n", info->name, info->load_addr / DSPWORDSIZE, ++ info->size / DSPWORDSIZE); ++ } ++ return retVal; ++} ++ ++/* ++ * ======== rmmDealloc ======== ++ */ ++static void rmmDealloc(struct Dynamic_Loader_Allocate *this, ++ struct LDR_SECTION_INFO *info) ++{ ++ struct DBLLAlloc *pAlloc = (struct DBLLAlloc *)this; ++ struct DBLL_LibraryObj *lib; ++ u32 segid; ++ DSP_STATUS status = DSP_SOK; ++ unsigned stype = DLOAD_SECTION_TYPE(info->type); ++ u32 memType; ++ u32 freeSize = 0; ++ ++ memType = (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == DLOAD_BSS) ? ++ DBLL_BSS : DBLL_DATA; ++ DBC_Require(this != NULL); ++ lib = pAlloc->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ /* segid was set by alloc function */ ++ segid = (u32)info->context; ++ if (memType == DBLL_CODE) ++ freeSize = info->size + GEM_L1P_PREFETCH_SIZE; ++ else ++ freeSize = info->size; ++ if (lib != NULL) { ++ status = (lib->pTarget->attrs.free)(lib->pTarget-> ++ attrs.symHandle, segid, info->load_addr / DSPWORDSIZE, ++ freeSize, false); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ GT_2trace(DBLL_debugMask, GT_5CLASS, ++ "Remote dealloc: base = 0x%lx len =" ++ "0x%lx\n", info->load_addr / DSPWORDSIZE, ++ freeSize / DSPWORDSIZE); ++ } ++} ++ ++/* Dynamic_Loader_Initialize */ ++/* ++ * ======== connect ======== ++ */ ++static int connect(struct Dynamic_Loader_Initialize *this) ++{ ++ return true; ++} ++ ++/* ++ * ======== readMem ======== ++ * This function does not need to be implemented. ++ */ ++static int readMem(struct Dynamic_Loader_Initialize *this, void *buf, ++ LDR_ADDR addr, struct LDR_SECTION_INFO *info, ++ unsigned nbytes) ++{ ++ struct DBLLInit *pInit = (struct DBLLInit *)this; ++ struct DBLL_LibraryObj *lib; ++ int bytesRead = 0; ++ ++ DBC_Require(this != NULL); ++ lib = pInit->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ /* Need WMD_BRD_Read function */ ++ return bytesRead; ++} ++ ++/* ++ * ======== writeMem ======== ++ */ ++static int writeMem(struct Dynamic_Loader_Initialize *this, void *buf, ++ LDR_ADDR addr, struct LDR_SECTION_INFO *info, ++ unsigned nBytes) ++{ ++ struct DBLLInit *pInit = (struct DBLLInit *)this; ++ struct DBLL_LibraryObj *lib; ++ struct DBLL_TarObj *pTarget; ++ struct DBLL_SectInfo sectInfo; ++ u32 memType; ++ bool retVal = true; ++ ++ DBC_Require(this != NULL); ++ lib = pInit->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ ++ memType = (DLOAD_SECTION_TYPE(info->type) == DLOAD_TEXT) ? DBLL_CODE : ++ DBLL_DATA; ++ if ((lib != NULL) && ++ ((pTarget = lib->pTarget) != NULL) && ++ (pTarget->attrs.write != NULL)) { ++ retVal = (*pTarget->attrs.write)(pTarget->attrs.wHandle, ++ addr, buf, nBytes, memType); ++ ++ if (pTarget->attrs.logWrite) { ++ sectInfo.name = info->name; ++ sectInfo.runAddr = info->run_addr; ++ sectInfo.loadAddr = info->load_addr; ++ sectInfo.size = info->size; ++ sectInfo.type = memType; ++ /* Pass the information about what we've written to ++ * another module */ ++ (*pTarget->attrs.logWrite)( ++ pTarget->attrs.logWriteHandle, ++ §Info, addr, nBytes); ++ } ++ } ++ return retVal; ++} ++ ++/* ++ * ======== fillMem ======== ++ * Fill nBytes of memory at a given address with a given value by ++ * writing from a buffer containing the given value. Write in ++ * sets of MAXEXPR (128) bytes to avoid large stack buffer issues. ++ */ ++static int fillMem(struct Dynamic_Loader_Initialize *this, LDR_ADDR addr, ++ struct LDR_SECTION_INFO *info, unsigned nBytes, ++ unsigned val) ++{ ++ bool retVal = true; ++ char *pBuf; ++ struct DBLL_LibraryObj *lib; ++ struct DBLLInit *pInit = (struct DBLLInit *)this; ++ ++ DBC_Require(this != NULL); ++ lib = pInit->lib; ++ pBuf = NULL; ++ /* Pass the NULL pointer to writeMem to get the start address of Shared ++ memory. This is a trick to just get the start address, there is no ++ writing taking place with this Writemem ++ */ ++ if ((lib->pTarget->attrs.write) != (DBLL_WriteFxn)NoOp) ++ writeMem(this, &pBuf, addr, info, 0); ++ if (pBuf) ++ memset(pBuf, val, nBytes); ++ ++ return retVal; ++} ++ ++/* ++ * ======== execute ======== ++ */ ++static int execute(struct Dynamic_Loader_Initialize *this, LDR_ADDR start) ++{ ++ struct DBLLInit *pInit = (struct DBLLInit *)this; ++ struct DBLL_LibraryObj *lib; ++ bool retVal = true; ++ ++ DBC_Require(this != NULL); ++ lib = pInit->lib; ++ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); ++ /* Save entry point */ ++ if (lib != NULL) ++ lib->entry = (u32)start; ++ ++ return retVal; ++} ++ ++/* ++ * ======== release ======== ++ */ ++static void release(struct Dynamic_Loader_Initialize *this) ++{ ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dev.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dev.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dev.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dev.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1476 @@ ++/* ++ * dev.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dev.c ======== ++ * Description: ++ * Implementation of 'Bridge Mini-driver device operations. ++ * ++ * Public Functions: ++ * DEV_BrdWriteFxn ++ * DEV_CreateDevice ++ * DEV_Create2 ++ * DEV_Destroy2 ++ * DEV_DestroyDevice ++ * DEV_GetChnlMgr ++ * DEV_GetCmmMgr ++ * DEV_GetCodMgr ++ * DEV_GetDehMgr ++ * DEV_GetDevNode ++ * DEV_GetDSPWordSize ++ * DEV_GetFirst ++ * DEV_GetIntfFxns ++ * DEV_GetIOMgr ++ * DEV_GetNext ++ * DEV_GetNodeManager ++ * DEV_GetSymbol ++ * DEV_GetWMDContext ++ * DEV_Exit ++ * DEV_Init ++ * DEV_InsertProcObject ++ * DEV_IsLocked ++ * DEV_NotifyClient ++ * DEV_RegisterNotify ++ * DEV_ReleaseCodMgr ++ * DEV_RemoveDevice ++ * DEV_RemoveProcObject ++ * DEV_SetChnlMgr ++ * DEV_SetMsgMgr ++ * DEV_SetLockOwner ++ * DEV_StartDevice ++ * ++ * Private Functions: ++ * FxnNotImplemented ++ * InitCodMgr ++ * InsertDevObject ++ * IsValidHandle ++ * RemoveDevObject ++ * StoreInterfaceFxns ++ * ++ *! Revision History: ++ *! ================ ++ *! 03-Jan-2005 hn Support for IVA DEH ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping feature ++ *! 09-Feb-2004 vp Updated to support IVA. ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 29-Nov-2001 jeh Check for DSP_ENOTIMPL status of DEH create function. ++ *! 05-Nov-2001 kc Added support for DEH module. ++ *! 05-Aug-2001 ag Shared memory registration moved to WMD_IO_OnLoaded(). ++ *! 11-Jul-2001 jeh Moved MGR_Create() from DSP_Init() to DEV_StartDevice(). ++ *! 11-Apr-2001 rr: Removed CMM_RegisterGPPSMSeg. ++ *! 02-Apr-2001 rr: CHNL_Create failure is printed out. ++ *! 15-Jan-2001 jeh Removed call to IO_OnLoaded() from DEV_Create2(). ++ *! 13-Feb-2001 kc: DSP/BIOS Bridge name update. ++ *! 15-Dec-2000 rr: Dev_Create2 returns error if NODE_CreateMgr fails. ++ *! 05-Dec-2000 jeh Moved IO_OnLoaded() to PROC_Load. Added DEV_SetMsgMgr. ++ *! 05-Dev-2000 ag SM Heap for messaging registered via CMM_RegisterGPPSMSeg(). ++ *! SM heap base and size currently taken from registry. ++ *! 29-Nov-2000 rr: Incorporated code review changes. ++ *! 17-Nov-2000 jeh Added calls to get IO manager (IO_Create), IO_OnLoaded(). ++ *! 06-Oct-2000 rr: DEV_Destroy2 and DEV_Create2 added. ++ *! 02-Oct-2000 rr: DEV_GetNodeManager added. ++ *! 11-Aug-2000 ag: Added DEV_GetCmmMgr(), CMM_Init() & CMM_Exit(). ++ *! Removed & , added ++ *! 10-Aug-2000 rr: DEV_InsertProcObject/RemoveProcObject added. ++ *! DEV_Cleanup calls PROC_Detach if it is a matching process. ++ *! 27-Jul-2000 rr: DEV is in new directoy DEV and produces devlib.lib ++ *! 17-Jul-2000 rr: DRV Object holds the list of Dev Objects. DEV gets ++ *! the List and Next devices through DRV. ++ *! DEV object has a back pointer to DRV Object. ++ *! 06-Jun-2000 jeh Added DEV_GetSymbol(). ++ *! 09-May-2000 rr: dwMemBase has index for multiple windows need. ++ *! 28-Feb-2000 rr: New GT Usage implemented. ++ *! 03-Feb-2000 rr: GT and Module init/exit Changes.(Done up front from ++ *! SERVICES) ++ *! 31-Jan-2000 rr: Comments changed after code review. ++ *! 21-Jan-2000 rr: windows.h, tchar.h, HMODULE removed. FreeLibrary replaced ++ *! with LDR_FreeModule ++ *! 17-Jan-2000 rr: CFG_Get/SetPrivateDword renamed to CFG_Get/SetDevObject. ++ *! StoreInterfaceFxns stores the new fxn WMD_BRD_SETSTATE. ++ *! 20-Nov-1999 ag: Actual uSMLength = total - monitor offset. ++ *! 12-Nov-1999 rr: bIRQ and IRQAttrib taken from the struct CFG_HOSTRES. ++ *! dMemBase is added with offset for monitor taken from ++ *! registry. ++ *! 31-Oct-1999 ag: Added CHNL support. ++ *! 10-Sep-1999 rr: GT Enabled. DEV_Create will Load the Mini Driver and will ++ *! find its fxn table. Right now lot of things are hardcoded ++ *! as the REG is not ready. ++ *! 10-Jun-1996 rr: Created from WSX ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++#include /* WCD version info. */ ++ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++ ++#define SIGNATURE 0x5f564544 /* "DEV_" (in reverse) */ ++#define MAKEVERSION(major, minor) (major * 10 + minor) ++#define WCDVERSION MAKEVERSION(WCD_MAJOR_VERSION, WCD_MINOR_VERSION) ++ ++/* The WMD device object: */ ++struct DEV_OBJECT { ++ /* LST requires "link" to be first field! */ ++ struct LST_ELEM link; /* Link to next DEV_OBJECT. */ ++ u32 devType; /* Device Type */ ++ u32 dwSignature; /* Used for object validation. */ ++ struct CFG_DEVNODE *hDevNode; /* Platform specific device id */ ++ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD Context Handle */ ++ struct WMD_DRV_INTERFACE intfFxns; /* Function interface to WMD. */ ++ struct BRD_OBJECT *lockOwner; /* Client with exclusive access. */ ++ struct COD_MANAGER *hCodMgr; /* Code manager handle. */ ++ struct CHNL_MGR *hChnlMgr; /* Channel manager. */ ++ struct DEH_MGR *hDehMgr; /* DEH manager. */ ++ struct MSG_MGR *hMsgMgr; /* Message manager. */ ++ struct IO_MGR *hIOMgr; /* IO manager (CHNL, MSG) */ ++ struct CMM_OBJECT *hCmmMgr; /* SM memory manager. */ ++ struct DMM_OBJECT *hDmmMgr; /* Dynamic memory manager. */ ++ struct LDR_MODULE *hModule; /* WMD Module handle. */ ++ u32 uWordSize; /* DSP word size: quick access. */ ++ struct DRV_OBJECT *hDrvObject; /* Driver Object */ ++ struct LST_LIST *procList; /* List of Proceeosr attached to ++ * this device */ ++ struct NODE_MGR *hNodeMgr; ++} ; ++ ++/* ----------------------------------- Globals */ ++static u32 cRefs; /* Module reference count */ ++#if GT_TRACE ++static struct GT_Mask debugMask = { NULL, NULL }; /* For debugging */ ++#endif ++ ++/* ----------------------------------- Function Prototypes */ ++static DSP_STATUS FxnNotImplemented(int arg, ...); ++static DSP_STATUS InitCodMgr(struct DEV_OBJECT *pDevObject); ++static bool IsValidHandle(struct DEV_OBJECT *hObj); ++static void StoreInterfaceFxns(struct WMD_DRV_INTERFACE *pDrvFxns, ++ OUT struct WMD_DRV_INTERFACE *pIntfFxns); ++/* ++ * ======== DEV_BrdWriteFxn ======== ++ * Purpose: ++ * Exported function to be used as the COD write function. This function ++ * is passed a handle to a DEV_hObject, then calls the ++ * device's WMD_BRD_Write() function. ++ */ ++u32 DEV_BrdWriteFxn(void *pArb, u32 ulDspAddr, void *pHostBuf, ++ u32 ulNumBytes, u32 nMemSpace) ++{ ++ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)pArb; ++ u32 ulWritten = 0; ++ DSP_STATUS status; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pHostBuf != NULL); /* Required of BrdWrite(). */ ++ GT_5trace(debugMask, GT_ENTER, ++ "Entered DEV_BrdWriteFxn, pArb: 0x%x\n\t\t" ++ "ulDspAddr: 0x%x\n\t\tpHostBuf: 0x%x\n \t\tulNumBytes: 0x%x\n" ++ "\t\tnMemSpace: 0x%x\n", pArb, ulDspAddr, pHostBuf, ++ ulNumBytes, nMemSpace); ++ if (IsValidHandle(pDevObject)) { ++ /* Require of BrdWrite() */ ++ DBC_Assert(pDevObject->hWmdContext != NULL); ++ status = (*pDevObject->intfFxns.pfnBrdWrite)(pDevObject-> ++ hWmdContext, pHostBuf, ulDspAddr, ulNumBytes, ++ nMemSpace); ++ /* Special case of getting the address only */ ++ if (ulNumBytes == 0) ++ ulNumBytes = 1; ++ if (DSP_SUCCEEDED(status)) ++ ulWritten = ulNumBytes; ++ ++ } ++ GT_1trace(debugMask, GT_ENTER, "Exit DEV_BrdWriteFxn ulWritten: 0x%x\n", ++ ulWritten); ++ return ulWritten; ++} ++ ++/* ++ * ======== DEV_CreateDevice ======== ++ * Purpose: ++ * Called by the operating system to load the PM Mini Driver for a ++ * PM board (device). ++ */ ++DSP_STATUS DEV_CreateDevice(OUT struct DEV_OBJECT **phDevObject, ++ IN CONST char *pstrWMDFileName, ++ IN CONST struct CFG_HOSTRES *pHostConfig, ++ IN CONST struct CFG_DSPRES *pDspConfig, ++ struct CFG_DEVNODE *hDevNode) ++{ ++ struct LDR_MODULE *hModule = NULL; ++ struct WMD_DRV_INTERFACE *pDrvFxns = NULL; ++ struct DEV_OBJECT *pDevObject = NULL; ++ struct CHNL_MGRATTRS mgrAttrs; ++ struct IO_ATTRS ioMgrAttrs; ++ u32 uNumWindows; ++ struct DRV_OBJECT *hDrvObject = NULL; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDevObject != NULL); ++ DBC_Require(pstrWMDFileName != NULL); ++ DBC_Require(pHostConfig != NULL); ++ DBC_Require(pDspConfig != NULL); ++ ++ GT_5trace(debugMask, GT_ENTER, ++ "Entered DEV_CreateDevice, phDevObject: 0x%x\n" ++ "\t\tpstrWMDFileName: 0x%x\n\t\tpHostConfig:0x%x\n\t\t" ++ "pDspConfig: 0x%x\n\t\tnhDevNode: 0x%x\n", phDevObject, ++ pstrWMDFileName, pHostConfig, pDspConfig, hDevNode); ++ /* Get the WMD interface functions*/ ++ WMD_DRV_Entry(&pDrvFxns, pstrWMDFileName); ++ if (DSP_FAILED(CFG_GetObject((u32 *) &hDrvObject, REG_DRV_OBJECT))) { ++ /* don't propogate CFG errors from this PROC function */ ++ GT_0trace(debugMask, GT_7CLASS, ++ "Failed to get the DRV Object \n"); ++ status = DSP_EFAIL; ++ } ++ /* Create the device object, and pass a handle to the WMD for ++ * storage. */ ++ if (DSP_SUCCEEDED(status)) { ++ DBC_Assert(pDrvFxns); ++ MEM_AllocObject(pDevObject, struct DEV_OBJECT, SIGNATURE); ++ if (pDevObject) { ++ /* Fill out the rest of the Dev Object structure: */ ++ pDevObject->hDevNode = hDevNode; ++ pDevObject->hModule = hModule; ++ pDevObject->hCodMgr = NULL; ++ pDevObject->hChnlMgr = NULL; ++ pDevObject->hDehMgr = NULL; ++ pDevObject->lockOwner = NULL; ++ pDevObject->uWordSize = pDspConfig->uWordSize; ++ pDevObject->hDrvObject = hDrvObject; ++ pDevObject->devType = DSP_UNIT; ++ /* Store this WMD's interface functions, based on its ++ * version. */ ++ StoreInterfaceFxns(pDrvFxns, &pDevObject->intfFxns); ++ /* Call WMD_DEV_CREATE() to get the WMD's device ++ * context handle. */ ++ status = (pDevObject->intfFxns.pfnDevCreate) ++ (&pDevObject->hWmdContext, pDevObject, ++ pHostConfig, pDspConfig); ++ /* Assert WMD_DEV_Create()'s ensure clause: */ ++ DBC_Assert(DSP_FAILED(status) || (pDevObject-> ++ hWmdContext != NULL)); ++ } else { ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_Create: Out Of Memory"); ++ status = DSP_EMEMORY; ++ } ++ } ++ /* Attempt to create the COD manager for this device: */ ++ if (DSP_SUCCEEDED(status)) ++ status = InitCodMgr(pDevObject); ++ ++ /* Attempt to create the channel manager for this device: */ ++ if (DSP_SUCCEEDED(status)) { ++ mgrAttrs.cChannels = CHNL_MAXCHANNELS; ++ ioMgrAttrs.bIRQ = pHostConfig->bIRQRegisters; ++ ioMgrAttrs.fShared = (pHostConfig->bIRQAttrib & CFG_IRQSHARED); ++ ioMgrAttrs.uWordSize = pDspConfig->uWordSize; ++ mgrAttrs.uWordSize = pDspConfig->uWordSize; ++ uNumWindows = pHostConfig->wNumMemWindows; ++ if (uNumWindows) { ++ /* Assume last memory window is for CHNL */ ++ ioMgrAttrs.dwSMBase = pHostConfig->dwMemBase[1] + ++ pHostConfig->dwOffsetForMonitor; ++ ioMgrAttrs.uSMLength = pHostConfig->dwMemLength[1] - ++ pHostConfig->dwOffsetForMonitor; ++ } else { ++ ioMgrAttrs.dwSMBase = 0; ++ ioMgrAttrs.uSMLength = 0; ++ GT_0trace(debugMask, GT_7CLASS, ++ "**There is no memory reserved for " ++ "shared structures**\n"); ++ } ++ status = CHNL_Create(&pDevObject->hChnlMgr, pDevObject, ++ &mgrAttrs); ++ if (status == DSP_ENOTIMPL) { ++ /* It's OK for a device not to have a channel ++ * manager: */ ++ status = DSP_SOK; ++ } ++ /* Create CMM mgr even if Msg Mgr not impl. */ ++ status = CMM_Create(&pDevObject->hCmmMgr, ++ (struct DEV_OBJECT *)pDevObject, NULL); ++ if (DSP_FAILED(status)) { ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_Create: Failed to Create SM " ++ "Manager\n"); ++ } ++ /* Only create IO manager if we have a channel manager */ ++ if (DSP_SUCCEEDED(status) && pDevObject->hChnlMgr) { ++ status = IO_Create(&pDevObject->hIOMgr, pDevObject, ++ &ioMgrAttrs); ++ } ++ /* Only create DEH manager if we have an IO manager */ ++ if (DSP_SUCCEEDED(status)) { ++ /* Instantiate the DEH module */ ++ status = (*pDevObject->intfFxns.pfnDehCreate) ++ (&pDevObject->hDehMgr, pDevObject); ++ } ++ /* Create DMM mgr . */ ++ status = DMM_Create(&pDevObject->hDmmMgr, ++ (struct DEV_OBJECT *)pDevObject, NULL); ++ if (DSP_FAILED(status)) { ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_Create: Failed to Create DMM " ++ "Manager\n"); ++ } ++ } ++ /* Add the new DEV_Object to the global list: */ ++ if (DSP_SUCCEEDED(status)) { ++ LST_InitElem(&pDevObject->link); ++ status = DRV_InsertDevObject(hDrvObject, pDevObject); ++ } ++ /* Create the Processor List */ ++ if (DSP_SUCCEEDED(status)) { ++ pDevObject->procList = LST_Create(); ++ if (!(pDevObject->procList)) { ++ status = DSP_EFAIL; ++ GT_0trace(debugMask, GT_7CLASS, "DEV_Create: " ++ "Failed to Create Proc List"); ++ } ++ } ++ /* If all went well, return a handle to the dev object; ++ * else, cleanup and return NULL in the OUT parameter. */ ++ if (DSP_SUCCEEDED(status)) { ++ *phDevObject = pDevObject; ++ GT_1trace(debugMask, GT_1CLASS, ++ "DEV_CreateDevice Succeeded \nDevObject " ++ "0x%x\n", pDevObject); ++ } else { ++ if (pDevObject && pDevObject->procList) ++ LST_Delete(pDevObject->procList); ++ ++ if (pDevObject && pDevObject->hCodMgr) ++ COD_Delete(pDevObject->hCodMgr); ++ ++ if (pDevObject && pDevObject->hDmmMgr) ++ DMM_Destroy(pDevObject->hDmmMgr); ++ ++ if (pDevObject) ++ MEM_FreeObject(pDevObject); ++ ++ *phDevObject = NULL; ++ GT_0trace(debugMask, GT_7CLASS, "DEV_CreateDevice Failed\n"); ++ } ++ GT_1trace(debugMask, GT_1CLASS, "Exiting DEV_Create: DevObject 0x%x\n", ++ *phDevObject); ++ DBC_Ensure((DSP_SUCCEEDED(status) && IsValidHandle(*phDevObject)) || ++ (DSP_FAILED(status) && !*phDevObject)); ++ return status; ++} ++ ++/* ++ * ======== DEV_Create2 ======== ++ * Purpose: ++ * After successful loading of the image from WCD_InitComplete2 ++ * (PROC Auto_Start) or PROC_Load this fxn is called. This creates ++ * the Node Manager and updates the DEV Object. ++ */ ++DSP_STATUS DEV_Create2(struct DEV_OBJECT *hDevObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValidHandle(hDevObject)); ++ ++ GT_1trace(debugMask, GT_ENTER, ++ "Entered DEV_Create2, hDevObject: 0x%x\n", hDevObject); ++ /* There can be only one Node Manager per DEV object */ ++ DBC_Assert(!pDevObject->hNodeMgr); ++ status = NODE_CreateMgr(&pDevObject->hNodeMgr, hDevObject); ++ if (DSP_FAILED(status)) { ++ GT_1trace(debugMask, GT_7CLASS, ++ "DEV_Create2: NODE_CreateMgr failed, " ++ "0x%x!\n", status); ++ pDevObject->hNodeMgr = NULL; ++ GT_0trace(debugMask, GT_7CLASS, "DEV_Create2: Failed!!\n"); ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && pDevObject->hNodeMgr != NULL) ++ || (DSP_FAILED(status) && pDevObject->hNodeMgr == NULL)); ++ GT_2trace(debugMask, GT_ENTER, ++ "Exiting DEV_Create2, hNodeMgr: 0x%x, status:" ++ " 0x%x\n", pDevObject->hNodeMgr, status); ++ return status; ++} ++ ++/* ++ * ======== DEV_Destroy2 ======== ++ * Purpose: ++ * Destroys the Node manager for this device. ++ */ ++DSP_STATUS DEV_Destroy2(struct DEV_OBJECT *hDevObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValidHandle(hDevObject)); ++ ++ GT_1trace(debugMask, GT_ENTER, ++ "Entered DEV_Destroy2, hDevObject: 0x%x\n", ++ hDevObject); ++ if (pDevObject->hNodeMgr) { ++ if (DSP_FAILED(NODE_DeleteMgr(pDevObject->hNodeMgr))) ++ status = DSP_EFAIL; ++ else ++ pDevObject->hNodeMgr = NULL; ++ ++ } ++ if (DSP_FAILED(status)) ++ GT_0trace(debugMask, GT_7CLASS, "DEV_Destroy2 failed!!\n"); ++ ++ DBC_Ensure((DSP_SUCCEEDED(status) && pDevObject->hNodeMgr == NULL) || ++ DSP_FAILED(status)); ++ GT_2trace(debugMask, GT_ENTER, ++ "Exiting DEV_Destroy2, hNodeMgr: 0x%x, status" ++ " = 0x%x\n", pDevObject->hNodeMgr, status); ++ return status; ++} ++ ++/* ++ * ======== DEV_DestroyDevice ======== ++ * Purpose: ++ * Destroys the channel manager for this device, if any, calls ++ * WMD_DEV_Destroy(), and then attempts to unload the WMD module. ++ */ ++DSP_STATUS DEV_DestroyDevice(struct DEV_OBJECT *hDevObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(debugMask, GT_ENTER, "Entered DEV_DestroyDevice, hDevObject: " ++ "0x%x\n", hDevObject); ++ if (IsValidHandle(hDevObject)) { ++ if (pDevObject->hCodMgr) ++ COD_Delete(pDevObject->hCodMgr); ++ ++ if (pDevObject->hNodeMgr) ++ NODE_DeleteMgr(pDevObject->hNodeMgr); ++ ++ /* Free the io, channel, and message managers for this board: */ ++ if (pDevObject->hIOMgr) { ++ IO_Destroy(pDevObject->hIOMgr); ++ pDevObject->hIOMgr = NULL; ++ } ++ if (pDevObject->hChnlMgr) { ++ CHNL_Destroy(pDevObject->hChnlMgr); ++ pDevObject->hChnlMgr = NULL; ++ } ++ if (pDevObject->hMsgMgr) ++ MSG_Delete(pDevObject->hMsgMgr); ++ ++ if (pDevObject->hDehMgr) { ++ /* Uninitialize DEH module. */ ++ (*pDevObject->intfFxns.pfnDehDestroy) ++ (pDevObject->hDehMgr); ++ } ++ if (pDevObject->hCmmMgr) ++ CMM_Destroy(pDevObject->hCmmMgr, true); ++ ++ if (pDevObject->hDmmMgr) ++ DMM_Destroy(pDevObject->hDmmMgr); ++ ++ /* Call the driver's WMD_DEV_Destroy() function: */ ++ /* Require of DevDestroy */ ++ DBC_Assert(pDevObject->hWmdContext != NULL); ++ status = (*pDevObject->intfFxns.pfnDevDestroy) ++ (pDevObject->hWmdContext); ++ if (DSP_SUCCEEDED(status)) { ++ if (pDevObject->procList) ++ LST_Delete(pDevObject->procList); ++ ++ /* Remove this DEV_Object from the global list: */ ++ DRV_RemoveDevObject(pDevObject->hDrvObject, pDevObject); ++ /* Free The library * LDR_FreeModule ++ * (pDevObject->hModule);*/ ++ /* Free this dev object: */ ++ MEM_FreeObject(pDevObject); ++ } ++ } else { ++ GT_0trace(debugMask, GT_7CLASS, "DEV_Destroy: Invlaid handle"); ++ status = DSP_EHANDLE; ++ } ++ GT_1trace(debugMask, GT_ENTER, "Exit DEV_destroy: status 0x%x\n", ++ status); ++ return status; ++} ++ ++/* ++ * ======== DEV_GetChnlMgr ======== ++ * Purpose: ++ * Retrieve the handle to the channel manager handle created for this ++ * device. ++ */ ++DSP_STATUS DEV_GetChnlMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct CHNL_MGR **phMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phMgr != NULL); ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_GetChnlMgr, hDevObject: 0x%x\n\t" ++ "\tphMgr: 0x%x\n", hDevObject, phMgr); ++ if (IsValidHandle(hDevObject)) { ++ *phMgr = pDevObject->hChnlMgr; ++ } else { ++ *phMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetChnlMgr: Invalid handle"); ++ } ++ GT_2trace(debugMask, GT_ENTER, ++ "Exit DEV_GetChnlMgr: status 0x%x\t\n hMgr: " ++ "0x%x\n", status, *phMgr); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phMgr != NULL) && ++ (*phMgr == NULL))); ++ return status; ++} ++ ++/* ++ * ======== DEV_GetCmmMgr ======== ++ * Purpose: ++ * Retrieve the handle to the shared memory manager created for this ++ * device. ++ */ ++DSP_STATUS DEV_GetCmmMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct CMM_OBJECT **phMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phMgr != NULL); ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_GetCmmMgr, hDevObject: 0x%x\n\t" ++ "\tphMgr: 0x%x\n", hDevObject, phMgr); ++ if (IsValidHandle(hDevObject)) { ++ *phMgr = pDevObject->hCmmMgr; ++ } else { ++ *phMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetCmmMgr: Invalid handle"); ++ } ++ GT_2trace(debugMask, GT_ENTER, ++ "Exit DEV_GetCmmMgr: status 0x%x\t\nhMgr: " ++ "0x%x\n", status, *phMgr); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phMgr != NULL) && ++ (*phMgr == NULL))); ++ return status; ++} ++ ++/* ++ * ======== DEV_GetDmmMgr ======== ++ * Purpose: ++ * Retrieve the handle to the dynamic memory manager created for this ++ * device. ++ */ ++DSP_STATUS DEV_GetDmmMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct DMM_OBJECT **phMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phMgr != NULL); ++ ++ GT_2trace(debugMask, GT_ENTER, "Entered DEV_GetDmmMgr, hDevObject: " ++ "0x%x\n\t\tphMgr: 0x%x\n", hDevObject, phMgr); ++ if (IsValidHandle(hDevObject)) { ++ *phMgr = pDevObject->hDmmMgr; ++ } else { ++ *phMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetDmmMgr: Invalid handle"); ++ } ++ GT_2trace(debugMask, GT_ENTER, ++ "Exit DEV_GetDmmMgr: status 0x%x\t\n hMgr: " ++ "0x%x\n", status, *phMgr); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phMgr != NULL) && ++ (*phMgr == NULL))); ++ return status; ++} ++ ++/* ++ * ======== DEV_GetCodMgr ======== ++ * Purpose: ++ * Retrieve the COD manager create for this device. ++ */ ++DSP_STATUS DEV_GetCodMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct COD_MANAGER **phCodMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phCodMgr != NULL); ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_GetCodMgr, hDevObject: 0x%x\n\t\t" ++ "phCodMgr: 0x%x\n", hDevObject, phCodMgr); ++ if (IsValidHandle(hDevObject)) { ++ *phCodMgr = pDevObject->hCodMgr; ++ } else { ++ *phCodMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_1trace(debugMask, GT_7CLASS, ++ "DEV_GetCodMgr, invalid handle: 0x%x\n", ++ hDevObject); ++ } ++ GT_2trace(debugMask, GT_ENTER, ++ "Exit DEV_GetCodMgr: status 0x%x\t\n hCodMgr:" ++ " 0x%x\n", status, *phCodMgr); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phCodMgr != NULL) && ++ (*phCodMgr == NULL))); ++ return status; ++} ++ ++/* ++ * ========= DEV_GetDehMgr ======== ++ */ ++DSP_STATUS DEV_GetDehMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct DEH_MGR **phDehMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDehMgr != NULL); ++ DBC_Require(MEM_IsValidHandle(hDevObject, SIGNATURE)); ++ if (IsValidHandle(hDevObject)) { ++ *phDehMgr = hDevObject->hDehMgr; ++ } else { ++ *phDehMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetDehMgr: Invalid handle"); ++ } ++ return status; ++} ++ ++/* ++ * ======== DEV_GetDevNode ======== ++ * Purpose: ++ * Retrieve the platform specific device ID for this device. ++ */ ++DSP_STATUS DEV_GetDevNode(struct DEV_OBJECT *hDevObject, ++ OUT struct CFG_DEVNODE **phDevNode) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDevNode != NULL); ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_GetDevNode, hDevObject: 0x%x\n\t" ++ "\tphDevNode: 0x%x\n", hDevObject, phDevNode); ++ if (IsValidHandle(hDevObject)) { ++ *phDevNode = pDevObject->hDevNode; ++ } else { ++ *phDevNode = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetDevNode: Invalid handle"); ++ } ++ GT_2trace(debugMask, GT_ENTER, ++ "Exit DEV_GetDevNode: status 0x%x\t\nhDevNode:" ++ "0x%x\n", status, *phDevNode); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phDevNode != NULL) && ++ (*phDevNode == NULL))); ++ return status; ++} ++ ++/* ++ * ======== DEV_GetFirst ======== ++ * Purpose: ++ * Retrieve the first Device Object handle from an internal linked list ++ * DEV_OBJECTs maintained by DEV. ++ */ ++struct DEV_OBJECT *DEV_GetFirst(void) ++{ ++ struct DEV_OBJECT *pDevObject = NULL; ++ ++ pDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); ++ ++ DBC_Ensure((pDevObject == NULL) || IsValidHandle(pDevObject)); ++ ++ return pDevObject; ++} ++ ++/* ++ * ======== DEV_GetIntfFxns ======== ++ * Purpose: ++ * Retrieve the WMD interface function structure for the loaded WMD. ++ * ppIntfFxns != NULL. ++ */ ++DSP_STATUS DEV_GetIntfFxns(struct DEV_OBJECT *hDevObject, ++ OUT struct WMD_DRV_INTERFACE **ppIntfFxns) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(ppIntfFxns != NULL); ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_GetIntfFxns, hDevObject: 0x%x\n\t" ++ "\tppIntfFxns: 0x%x\n", hDevObject, ppIntfFxns); ++ if (IsValidHandle(hDevObject)) { ++ *ppIntfFxns = &pDevObject->intfFxns; ++ } else { ++ *ppIntfFxns = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetIntDxns: Invalid handle"); ++ } ++ GT_2trace(debugMask, GT_ENTER, "Exit DEV_GetIntFxns: status 0x%x\t\n" ++ "ppIntFxns: 0x%x\n", status, *ppIntfFxns); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((ppIntfFxns != NULL) && ++ (*ppIntfFxns == NULL))); ++ return status; ++} ++ ++/* ++ * ========= DEV_GetIOMgr ======== ++ */ ++DSP_STATUS DEV_GetIOMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct IO_MGR **phIOMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phIOMgr != NULL); ++ DBC_Require(MEM_IsValidHandle(hDevObject, SIGNATURE)); ++ ++ if (IsValidHandle(hDevObject)) { ++ *phIOMgr = hDevObject->hIOMgr; ++ } else { ++ *phIOMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, "DEV_GetIOMgr: Invalid handle"); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DEV_GetNext ======== ++ * Purpose: ++ * Retrieve the next Device Object handle from an internal linked list ++ * of DEV_OBJECTs maintained by DEV, after having previously called ++ * DEV_GetFirst() and zero or more DEV_GetNext ++ */ ++struct DEV_OBJECT *DEV_GetNext(struct DEV_OBJECT *hDevObject) ++{ ++ struct DEV_OBJECT *pNextDevObject = NULL; ++ ++ if (IsValidHandle(hDevObject)) { ++ pNextDevObject = (struct DEV_OBJECT *) ++ DRV_GetNextDevObject((u32)hDevObject); ++ } ++ DBC_Ensure((pNextDevObject == NULL) || IsValidHandle(pNextDevObject)); ++ return pNextDevObject; ++} ++ ++/* ++ * ========= DEV_GetMsgMgr ======== ++ */ ++void DEV_GetMsgMgr(struct DEV_OBJECT *hDevObject, ++ OUT struct MSG_MGR **phMsgMgr) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phMsgMgr != NULL); ++ DBC_Require(MEM_IsValidHandle(hDevObject, SIGNATURE)); ++ ++ *phMsgMgr = hDevObject->hMsgMgr; ++} ++ ++/* ++ * ======== DEV_GetNodeManager ======== ++ * Purpose: ++ * Retrieve the Node Manager Handle ++ */ ++DSP_STATUS DEV_GetNodeManager(struct DEV_OBJECT *hDevObject, ++ OUT struct NODE_MGR **phNodeMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phNodeMgr != NULL); ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_GetNodeManager, hDevObject: 0x%x" ++ "\n\t\tphNodeMgr: 0x%x\n", hDevObject, phNodeMgr); ++ if (IsValidHandle(hDevObject)) { ++ *phNodeMgr = pDevObject->hNodeMgr; ++ } else { ++ *phNodeMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_1trace(debugMask, GT_7CLASS, ++ "DEV_GetNodeManager, invalid handle: 0x" ++ "%x\n", hDevObject); ++ } ++ GT_2trace(debugMask, GT_ENTER, ++ "Exit DEV_GetNodeManager: status 0x%x\t\nhMgr:" ++ " 0x%x\n", status, *phNodeMgr); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phNodeMgr != NULL) && ++ (*phNodeMgr == NULL))); ++ return status; ++} ++ ++/* ++ * ======== DEV_GetSymbol ======== ++ */ ++DSP_STATUS DEV_GetSymbol(struct DEV_OBJECT *hDevObject, ++ IN CONST char *pstrSym, OUT u32 *pulValue) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct COD_MANAGER *hCodMgr; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pstrSym != NULL && pulValue != NULL); ++ ++ GT_3trace(debugMask, GT_ENTER, ++ "Entered DEV_GetSymbol, hDevObject: 0x%x\n\t\t" ++ "pstrSym: 0x%x\n\t\tpulValue: 0x%x\n", hDevObject, pstrSym, ++ pulValue); ++ if (IsValidHandle(hDevObject)) { ++ status = DEV_GetCodMgr(hDevObject, &hCodMgr); ++ if (DSP_SUCCEEDED(status)) { ++ DBC_Assert(hCodMgr != NULL); ++ status = COD_GetSymValue(hCodMgr, (char *)pstrSym, ++ pulValue); ++ } ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetSymbol: Invalid handle"); ++ } ++ GT_2trace(debugMask, GT_ENTER, "Exit DEV_GetSymbol: status 0x%x\t\n" ++ "hWmdContext: 0x%x\n", status, *pulValue); ++ return status; ++} ++ ++/* ++ * ======== DEV_GetWMDContext ======== ++ * Purpose: ++ * Retrieve the WMD Context handle, as returned by the WMD_Create fxn. ++ */ ++DSP_STATUS DEV_GetWMDContext(struct DEV_OBJECT *hDevObject, ++ OUT struct WMD_DEV_CONTEXT **phWmdContext) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phWmdContext != NULL); ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_GetWMDContext, hDevObject: 0x%x\n" ++ "\t\tphWmdContext: 0x%x\n", hDevObject, phWmdContext); ++ if (IsValidHandle(hDevObject)) { ++ *phWmdContext = pDevObject->hWmdContext; ++ } else { ++ *phWmdContext = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_GetWMDContext: Invalid handle"); ++ } ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Exit DEV_GetWMDContext: status 0x%x\t\n" ++ "hWmdContext: 0x%x\n", status, *phWmdContext); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phWmdContext != NULL) && ++ (*phWmdContext == NULL))); ++ return status; ++} ++ ++/* ++ * ======== DEV_Exit ======== ++ * Purpose: ++ * Decrement reference count, and free resources when reference count is ++ * 0. ++ */ ++void DEV_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ if (cRefs == 0) { ++ CMM_Exit(); ++ DMM_Exit(); ++ } ++ ++ GT_1trace(debugMask, GT_5CLASS, "Entered DEV_Exit, ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== DEV_Init ======== ++ * Purpose: ++ * Initialize DEV's private state, keeping a reference count on each call. ++ */ ++bool DEV_Init(void) ++{ ++ bool fCmm, fDmm, fRetval = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ /* Set the Trace mask */ ++ DBC_Assert(!debugMask.flags); ++ GT_create(&debugMask, "DV"); /* "DV" for DeVice */ ++ fCmm = CMM_Init(); ++ fDmm = DMM_Init(); ++ ++ fRetval = fCmm && fDmm; ++ ++ if (!fRetval) { ++ if (fCmm) ++ CMM_Exit(); ++ ++ ++ if (fDmm) ++ DMM_Exit(); ++ ++ } ++ } ++ ++ if (fRetval) ++ cRefs++; ++ ++ ++ GT_1trace(debugMask, GT_5CLASS, "Entered DEV_Init, ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ return fRetval; ++} ++ ++/* ++ * ======== DEV_NotifyClients ======== ++ * Purpose: ++ * Notify all clients of this device of a change in device status. ++ */ ++DSP_STATUS DEV_NotifyClients(struct DEV_OBJECT *hDevObject, u32 ulStatus) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ DSP_HPROCESSOR hProcObject; ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_NotifyClients, hDevObject: 0x%x\n" ++ "\t\tulStatus: 0x%x\n", hDevObject, ulStatus); ++ for (hProcObject = (DSP_HPROCESSOR)LST_First(pDevObject->procList); ++ hProcObject != NULL; ++ hProcObject = (DSP_HPROCESSOR)LST_Next(pDevObject->procList, ++ (struct LST_ELEM *)hProcObject)) ++ PROC_NotifyClients(hProcObject, (u32) ulStatus); ++ ++ return status; ++} ++ ++/* ++ * ======== DEV_RemoveDevice ======== ++ */ ++DSP_STATUS DEV_RemoveDevice(struct CFG_DEVNODE *hDevNode) ++{ ++ struct DEV_OBJECT *hDevObject; /* handle to device object */ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject; ++ ++ GT_1trace(debugMask, GT_ENTER, ++ "Entered DEV_RemoveDevice, hDevNode: 0x%x\n", hDevNode); ++ /* Retrieve the device object handle originaly stored with ++ * the DevNode: */ ++ status = CFG_GetDevObject(hDevNode, (u32 *)&hDevObject); ++ if (DSP_SUCCEEDED(status)) { ++ /* Remove the Processor List */ ++ pDevObject = (struct DEV_OBJECT *)hDevObject; ++ /* Destroy the device object. */ ++ status = DEV_DestroyDevice(hDevObject); ++ if (DSP_SUCCEEDED(status)) { ++ /* Null out the handle stored with the DevNode. */ ++ GT_0trace(debugMask, GT_1CLASS, ++ "DEV_RemoveDevice, success"); ++ } ++ } ++ GT_1trace(debugMask, GT_ENTER, "Exit DEV_RemoveDevice, status: 0x%x\n", ++ status); ++ return status; ++} ++ ++/* ++ * ======== DEV_SetChnlMgr ======== ++ * Purpose: ++ * Set the channel manager for this device. ++ */ ++DSP_STATUS DEV_SetChnlMgr(struct DEV_OBJECT *hDevObject, struct CHNL_MGR *hMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = hDevObject; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_SetChnlMgr, hDevObject: 0x%x\n\t" ++ "\thMgr:0x%x\n", hDevObject, hMgr); ++ if (IsValidHandle(hDevObject)) { ++ pDevObject->hChnlMgr = hMgr; ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(debugMask, GT_7CLASS, ++ "DEV_SetChnlMgr, Invalid handle\n"); ++ } ++ DBC_Ensure(DSP_FAILED(status) || (pDevObject->hChnlMgr == hMgr)); ++ return status; ++} ++ ++/* ++ * ======== DEV_SetMsgMgr ======== ++ * Purpose: ++ * Set the message manager for this device. ++ */ ++void DEV_SetMsgMgr(struct DEV_OBJECT *hDevObject, struct MSG_MGR *hMgr) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValidHandle(hDevObject)); ++ GT_2trace(debugMask, GT_ENTER, ++ "Entered DEV_SetMsgMgr, hDevObject: 0x%x\n\t\t" ++ "hMgr: 0x%x\n", hDevObject, hMgr); ++ hDevObject->hMsgMgr = hMgr; ++} ++ ++/* ++ * ======== DEV_StartDevice ======== ++ * Purpose: ++ * Initializes the new device with the BRIDGE environment. ++ */ ++DSP_STATUS DEV_StartDevice(struct CFG_DEVNODE *hDevNode) ++{ ++ struct DEV_OBJECT *hDevObject = NULL; /* handle to 'Bridge Device */ ++ struct CFG_HOSTRES hostRes; /* resources struct. */ ++ struct CFG_DSPRES dspRes; /* DSP resources struct */ ++ char szWMDFileName[CFG_MAXSEARCHPATHLEN] = "UMA"; /* wmd filename */ ++ DSP_STATUS status; ++ struct MGR_OBJECT *hMgrObject = NULL; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(debugMask, GT_ENTER, ++ "Entered DEV_StartDevice, hDevObject: 0x%x\n", hDevNode); ++ status = CFG_GetHostResources(hDevNode, &hostRes); ++ if (DSP_SUCCEEDED(status)) { ++ /* Get DSP resources of device from Registry: */ ++ status = CFG_GetDSPResources(hDevNode, &dspRes); ++ if (DSP_FAILED(status)) { ++ GT_1trace(debugMask, GT_7CLASS, ++ "Failed to get WMD DSP resources" ++ " from registry: 0x%x ", status); ++ } ++ } else { ++ GT_1trace(debugMask, GT_7CLASS, ++ "Failed to get WMD Host resources " ++ "from registry: 0x%x ", status); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Given all resources, create a device object. */ ++ status = DEV_CreateDevice(&hDevObject, szWMDFileName, &hostRes, ++ &dspRes, hDevNode); ++ if (DSP_SUCCEEDED(status)) { ++ /* Store away the hDevObject with the DEVNODE */ ++ status = CFG_SetDevObject(hDevNode, (u32)hDevObject); ++ if (DSP_FAILED(status)) { ++ /* Clean up */ ++ GT_1trace(debugMask, GT_7CLASS, ++ "Failed to set DevObject in the " ++ "Registry: 0x%x", status); ++ DEV_DestroyDevice(hDevObject); ++ hDevObject = NULL; ++ } ++ } else { ++ GT_1trace(debugMask, GT_7CLASS, ++ "Failed to Create Device: 0x%x", ++ status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Create the Manager Object */ ++ status = MGR_Create(&hMgrObject, hDevNode); ++ } ++ if (DSP_FAILED(status)) { ++ GT_1trace(debugMask, GT_7CLASS, "Failed to MGR object: 0x%x", ++ status); ++ status = DSP_EFAIL; ++ } ++ if (DSP_FAILED(status)) { ++ if (hDevObject) ++ DEV_DestroyDevice(hDevObject); ++ ++ /* Ensure the device extension is NULL */ ++ CFG_SetDevObject(hDevNode, 0L); ++ } ++ GT_1trace(debugMask, GT_ENTER, "Exiting DEV_StartDevice status 0x%x\n", ++ status); ++ return status; ++} ++ ++/* ++ * ======== FxnNotImplemented ======== ++ * Purpose: ++ * Takes the place of a WMD Null Function. ++ * Parameters: ++ * Multiple, optional. ++ * Returns: ++ * DSP_ENOTIMPL: Always. ++ */ ++static DSP_STATUS FxnNotImplemented(int arg, ...) ++{ ++ DBG_Trace(DBG_LEVEL1, ++ "WARNING: Calling a non-implemented WMD function.\n"); ++ ++ return DSP_ENOTIMPL; ++} ++ ++/* ++ * ======== IsValidHandle ======== ++ * Purpose: ++ * Validate the device object handle. ++ * Parameters: ++ * hDevObject: Handle to device object created with ++ * DEV_CreateDevice(). ++ * Returns: ++ * true if handle is valid; false otherwise. ++ * Requires: ++ * Ensures: ++ */ ++static bool IsValidHandle(struct DEV_OBJECT *hObj) ++{ ++ bool retVal; ++ ++ retVal = (hObj != NULL) && (hObj->dwSignature == SIGNATURE); ++ ++ return retVal; ++} ++ ++/* ++ * ======== InitCodMgr ======== ++ * Purpose: ++ * Create a COD manager for this device. ++ * Parameters: ++ * pDevObject: Pointer to device object created with ++ * DEV_CreateDevice() ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EHANDLE: Invalid hDevObject. ++ * Requires: ++ * Should only be called once by DEV_CreateDevice() for a given DevObject. ++ * Ensures: ++ */ ++static DSP_STATUS InitCodMgr(struct DEV_OBJECT *pDevObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ char *szDummyFile = "dummy"; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(!IsValidHandle(pDevObject) || ++ (pDevObject->hCodMgr == NULL)); ++ GT_1trace(debugMask, GT_ENTER, "Entering InitCodMgr pDevObject: 0x%x", ++ pDevObject); ++ status = COD_Create(&pDevObject->hCodMgr, szDummyFile, NULL); ++ GT_1trace(debugMask, GT_ENTER, "Exiting InitCodMgr status 0x%x\n ", ++ status); ++ return status; ++} ++ ++/* ++ * ======== DEV_InsertProcObject ======== ++ * Purpose: ++ * Insert a ProcObject into the list maintained by DEV. ++ * Parameters: ++ * pProcObject: Ptr to ProcObject to insert. ++ * pDevObject: Ptr to Dev Object where the list is. ++ * pbAlreadyAttached: Ptr to return the bool ++ * Returns: ++ * DSP_SOK: If successful. ++ * Requires: ++ * List Exists ++ * hDevObject is Valid handle ++ * DEV Initialized ++ * pbAlreadyAttached != NULL ++ * hProcObject != 0 ++ * Ensures: ++ * DSP_SOK and List is not Empty. ++ */ ++DSP_STATUS DEV_InsertProcObject(struct DEV_OBJECT *hDevObject, ++ u32 hProcObject, ++ OUT bool *pbAlreadyAttached) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)hDevObject; ++ ++ GT_2trace(debugMask, GT_ENTER, ++ "Entering DEV_InsetProcObject pProcObject 0x%x" ++ "pDevObject 0x%x\n", hProcObject, hDevObject); ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValidHandle(pDevObject)); ++ DBC_Require(hProcObject != 0); ++ DBC_Require(pDevObject->procList != NULL); ++ DBC_Require(pbAlreadyAttached != NULL); ++ if (!LST_IsEmpty(pDevObject->procList)) ++ *pbAlreadyAttached = true; ++ ++ /* Add DevObject to tail. */ ++ LST_PutTail(pDevObject->procList, (struct LST_ELEM *)hProcObject); ++ ++ GT_1trace(debugMask, GT_ENTER, ++ "Exiting DEV_InsetProcObject status 0x%x\n", status); ++ DBC_Ensure(DSP_SUCCEEDED(status) && !LST_IsEmpty(pDevObject->procList)); ++ ++ return status; ++} ++ ++/* ++ * ======== DEV_RemoveProcObject ======== ++ * Purpose: ++ * Search for and remove a Proc object from the given list maintained ++ * by the DEV ++ * Parameters: ++ * pProcObject: Ptr to ProcObject to insert. ++ * pDevObject Ptr to Dev Object where the list is. ++ * Returns: ++ * DSP_SOK: If successful. ++ * Requires: ++ * List exists and is not empty ++ * hProcObject != 0 ++ * hDevObject is a valid Dev handle. ++ * Ensures: ++ * Details: ++ * List will be deleted when the DEV is destroyed. ++ */ ++DSP_STATUS DEV_RemoveProcObject(struct DEV_OBJECT *hDevObject, ++ u32 hProcObject) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ struct LST_ELEM *pCurElem; ++ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)hDevObject; ++ ++ DBC_Require(IsValidHandle(pDevObject)); ++ DBC_Require(hProcObject != 0); ++ DBC_Require(pDevObject->procList != NULL); ++ DBC_Require(!LST_IsEmpty(pDevObject->procList)); ++ ++ GT_1trace(debugMask, GT_ENTER, ++ "Entering DEV_RemoveProcObject hDevObject " ++ "0x%x\n", hDevObject); ++ /* Search list for pDevObject: */ ++ for (pCurElem = LST_First(pDevObject->procList); pCurElem != NULL; ++ pCurElem = LST_Next(pDevObject->procList, pCurElem)) { ++ /* If found, remove it. */ ++ if ((u32)pCurElem == hProcObject) { ++ LST_RemoveElem(pDevObject->procList, pCurElem); ++ status = DSP_SOK; ++ break; ++ } ++ } ++ GT_1trace(debugMask, GT_ENTER, "DEV_RemoveProcObject returning 0x%x\n", ++ status); ++ return status; ++} ++ ++DSP_STATUS DEV_GetDevType(struct DEV_OBJECT *hdevObject, u32 *devType) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)hdevObject; ++ ++ *devType = pDevObject->devType; ++ ++ return status; ++} ++ ++/* ++ * ======== StoreInterfaceFxns ======== ++ * Purpose: ++ * Copy the WMD's interface functions into the device object, ++ * ensuring that FxnNotImplemented() is set for: ++ * ++ * 1. All WMD function pointers which are NULL; and ++ * 2. All function slots in the struct DEV_OBJECT structure which have no ++ * corresponding slots in the the WMD's interface, because the WMD ++ * is of an *older* version. ++ * Parameters: ++ * pIntfFxns: Interface Fxn Structure of the WCD's Dev Object. ++ * pDrvFxns: Interface Fxns offered by the WMD during DEV_Create(). ++ * Returns: ++ * Requires: ++ * Input pointers are valid. ++ * WMD is *not* written for a newer WCD. ++ * Ensures: ++ * All function pointers in the dev object's Fxn interface are not NULL. ++ */ ++static void StoreInterfaceFxns(struct WMD_DRV_INTERFACE *pDrvFxns, ++ OUT struct WMD_DRV_INTERFACE *pIntfFxns) ++{ ++ u32 dwWMDVersion; ++ ++ /* Local helper macro: */ ++#define StoreFxn(cast, pfn) \ ++ (pIntfFxns->pfn = ((pDrvFxns->pfn != NULL) ? pDrvFxns->pfn : \ ++ (cast)FxnNotImplemented)) ++ ++ DBC_Require(pIntfFxns != NULL); ++ DBC_Require(pDrvFxns != NULL); ++ DBC_Require(MAKEVERSION(pDrvFxns->dwWCDMajorVersion, ++ pDrvFxns->dwWCDMinorVersion) <= WCDVERSION); ++ dwWMDVersion = MAKEVERSION(pDrvFxns->dwWCDMajorVersion, ++ pDrvFxns->dwWCDMinorVersion); ++ pIntfFxns->dwWCDMajorVersion = pDrvFxns->dwWCDMajorVersion; ++ pIntfFxns->dwWCDMinorVersion = pDrvFxns->dwWCDMinorVersion; ++ /* Install functions up to WCD version .80 (first alpha): */ ++ if (dwWMDVersion > 0) { ++ StoreFxn(WMD_DEV_CREATE, pfnDevCreate); ++ StoreFxn(WMD_DEV_DESTROY, pfnDevDestroy); ++ StoreFxn(WMD_DEV_CTRL, pfnDevCntrl); ++ StoreFxn(WMD_BRD_MONITOR, pfnBrdMonitor); ++ StoreFxn(WMD_BRD_START, pfnBrdStart); ++ StoreFxn(WMD_BRD_STOP, pfnBrdStop); ++ StoreFxn(WMD_BRD_STATUS, pfnBrdStatus); ++ StoreFxn(WMD_BRD_READ, pfnBrdRead); ++ StoreFxn(WMD_BRD_WRITE, pfnBrdWrite); ++ StoreFxn(WMD_BRD_SETSTATE, pfnBrdSetState); ++ StoreFxn(WMD_BRD_MEMCOPY, pfnBrdMemCopy); ++ StoreFxn(WMD_BRD_MEMWRITE, pfnBrdMemWrite); ++ StoreFxn(WMD_BRD_MEMMAP, pfnBrdMemMap); ++ StoreFxn(WMD_BRD_MEMUNMAP, pfnBrdMemUnMap); ++ StoreFxn(WMD_CHNL_CREATE, pfnChnlCreate); ++ StoreFxn(WMD_CHNL_DESTROY, pfnChnlDestroy); ++ StoreFxn(WMD_CHNL_OPEN, pfnChnlOpen); ++ StoreFxn(WMD_CHNL_CLOSE, pfnChnlClose); ++ StoreFxn(WMD_CHNL_ADDIOREQ, pfnChnlAddIOReq); ++ StoreFxn(WMD_CHNL_GETIOC, pfnChnlGetIOC); ++ StoreFxn(WMD_CHNL_CANCELIO, pfnChnlCancelIO); ++ StoreFxn(WMD_CHNL_FLUSHIO, pfnChnlFlushIO); ++ StoreFxn(WMD_CHNL_GETINFO, pfnChnlGetInfo); ++ StoreFxn(WMD_CHNL_GETMGRINFO, pfnChnlGetMgrInfo); ++ StoreFxn(WMD_CHNL_IDLE, pfnChnlIdle); ++ StoreFxn(WMD_CHNL_REGISTERNOTIFY, pfnChnlRegisterNotify); ++ StoreFxn(WMD_DEH_CREATE, pfnDehCreate); ++ StoreFxn(WMD_DEH_DESTROY, pfnDehDestroy); ++ StoreFxn(WMD_DEH_NOTIFY, pfnDehNotify); ++ StoreFxn(WMD_DEH_REGISTERNOTIFY, pfnDehRegisterNotify); ++ StoreFxn(WMD_DEH_GETINFO, pfnDehGetInfo); ++ StoreFxn(WMD_IO_CREATE, pfnIOCreate); ++ StoreFxn(WMD_IO_DESTROY, pfnIODestroy); ++ StoreFxn(WMD_IO_ONLOADED, pfnIOOnLoaded); ++ StoreFxn(WMD_IO_GETPROCLOAD, pfnIOGetProcLoad); ++ StoreFxn(WMD_MSG_CREATE, pfnMsgCreate); ++ StoreFxn(WMD_MSG_CREATEQUEUE, pfnMsgCreateQueue); ++ StoreFxn(WMD_MSG_DELETE, pfnMsgDelete); ++ StoreFxn(WMD_MSG_DELETEQUEUE, pfnMsgDeleteQueue); ++ StoreFxn(WMD_MSG_GET, pfnMsgGet); ++ StoreFxn(WMD_MSG_PUT, pfnMsgPut); ++ StoreFxn(WMD_MSG_REGISTERNOTIFY, pfnMsgRegisterNotify); ++ StoreFxn(WMD_MSG_SETQUEUEID, pfnMsgSetQueueId); ++ } ++ /* Add code for any additional functions in newer WMD versions here: */ ++ /* Ensure postcondition: */ ++ DBC_Ensure(pIntfFxns->pfnDevCreate != NULL); ++ DBC_Ensure(pIntfFxns->pfnDevDestroy != NULL); ++ DBC_Ensure(pIntfFxns->pfnDevCntrl != NULL); ++ DBC_Ensure(pIntfFxns->pfnBrdMonitor != NULL); ++ DBC_Ensure(pIntfFxns->pfnBrdStart != NULL); ++ DBC_Ensure(pIntfFxns->pfnBrdStop != NULL); ++ DBC_Ensure(pIntfFxns->pfnBrdStatus != NULL); ++ DBC_Ensure(pIntfFxns->pfnBrdRead != NULL); ++ DBC_Ensure(pIntfFxns->pfnBrdWrite != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlCreate != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlDestroy != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlOpen != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlClose != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlAddIOReq != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlGetIOC != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlCancelIO != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlFlushIO != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlGetInfo != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlGetMgrInfo != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlIdle != NULL); ++ DBC_Ensure(pIntfFxns->pfnChnlRegisterNotify != NULL); ++ DBC_Ensure(pIntfFxns->pfnDehCreate != NULL); ++ DBC_Ensure(pIntfFxns->pfnDehDestroy != NULL); ++ DBC_Ensure(pIntfFxns->pfnDehNotify != NULL); ++ DBC_Ensure(pIntfFxns->pfnDehRegisterNotify != NULL); ++ DBC_Ensure(pIntfFxns->pfnDehGetInfo != NULL); ++ DBC_Ensure(pIntfFxns->pfnIOCreate != NULL); ++ DBC_Ensure(pIntfFxns->pfnIODestroy != NULL); ++ DBC_Ensure(pIntfFxns->pfnIOOnLoaded != NULL); ++ DBC_Ensure(pIntfFxns->pfnIOGetProcLoad != NULL); ++ DBC_Ensure(pIntfFxns->pfnMsgSetQueueId != NULL); ++ ++#undef StoreFxn ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dmm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dmm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dmm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/dmm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,657 @@ ++/* ++ * dmm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== dmm.c ======== ++ * Purpose: ++ * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address ++ * space that can be directly mapped to any MPU buffer or memory region ++ * ++ * Public Functions: ++ * DMM_CreateTables ++ * DMM_Create ++ * DMM_Destroy ++ * DMM_Exit ++ * DMM_Init ++ * DMM_MapMemory ++ * DMM_Reset ++ * DMM_ReserveMemory ++ * DMM_UnMapMemory ++ * DMM_UnReserveMemory ++ * ++ * Private Functions: ++ * AddRegion ++ * CreateRegion ++ * GetRegion ++ * GetFreeRegion ++ * GetMappedRegion ++ * ++ * Notes: ++ * Region: Generic memory entitiy having a start address and a size ++ * Chunk: Reserved region ++ * ++ * ++ *! Revision History: ++ *! ================ ++ *! 04-Jun-2008 Hari K : Optimized DMM implementation. Removed linked list ++ *! and instead used Table approach. ++ *! 19-Apr-2004 sb: Integrated Alan's code review updates. ++ *! 17-Mar-2004 ap: Fixed GetRegion for size=0 using tighter bound. ++ *! 20-Feb-2004 sb: Created. ++ *! ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++/* Object signatures */ ++#define DMMSIGNATURE 0x004d4d44 /* "DMM" (in reverse) */ ++ ++#define DMM_ADDR_VIRTUAL(a) \ ++ (((struct MapPage *)(a) - pVirtualMappingTable) * PG_SIZE_4K +\ ++ dynMemMapBeg) ++#define DMM_ADDR_TO_INDEX(a) (((a) - dynMemMapBeg) / PG_SIZE_4K) ++ ++/* DMM Mgr */ ++struct DMM_OBJECT { ++ u32 dwSignature; /* Used for object validation */ ++ /* Dmm Lock is used to serialize access mem manager for ++ * multi-threads. */ ++ struct SYNC_CSOBJECT *hDmmLock; /* Lock to access dmm mgr */ ++}; ++ ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask DMM_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++static u32 cRefs; /* module reference count */ ++struct MapPage { ++ u32 RegionSize:15; ++ u32 MappedSize:15; ++ u32 bReserved:1; ++ u32 bMapped:1; ++}; ++ ++/* Create the free list */ ++static struct MapPage *pVirtualMappingTable; ++static u32 iFreeRegion; /* The index of free region */ ++static u32 iFreeSize; ++static u32 dynMemMapBeg; /* The Beginning of dynamic memory mapping */ ++static u32 TableSize;/* The size of virtual and physical pages tables */ ++ ++/* ----------------------------------- Function Prototypes */ ++static struct MapPage *GetRegion(u32 addr); ++static struct MapPage *GetFreeRegion(u32 aSize); ++static struct MapPage *GetMappedRegion(u32 aAddr); ++#ifdef DSP_DMM_DEBUG ++u32 DMM_MemMapDump(struct DMM_OBJECT *hDmmMgr); ++#endif ++ ++/* ======== DMM_CreateTables ======== ++ * Purpose: ++ * Create table to hold the information of physical address ++ * the buffer pages that is passed by the user, and the table ++ * to hold the information of the virtual memory that is reserved ++ * for DSP. ++ */ ++DSP_STATUS DMM_CreateTables(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 size) ++{ ++ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ GT_3trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_CreateTables () hDmmMgr %x, addr" ++ " %x, size %x\n", hDmmMgr, addr, size); ++ status = DMM_DeleteTables(pDmmObj); ++ if (DSP_SUCCEEDED(status)) { ++ SYNC_EnterCS(pDmmObj->hDmmLock); ++ dynMemMapBeg = addr; ++ TableSize = PG_ALIGN_HIGH(size, PG_SIZE_4K)/PG_SIZE_4K; ++ /* Create the free list */ ++ pVirtualMappingTable = (struct MapPage *) MEM_Calloc ++ (TableSize * sizeof(struct MapPage), MEM_LARGEVIRTMEM); ++ if (pVirtualMappingTable == NULL) ++ status = DSP_EMEMORY; ++ else { ++ /* On successful allocation, ++ * all entries are zero ('free') */ ++ iFreeRegion = 0; ++ iFreeSize = TableSize*PG_SIZE_4K; ++ pVirtualMappingTable[0].RegionSize = TableSize; ++ } ++ SYNC_LeaveCS(pDmmObj->hDmmLock); ++ } else ++ GT_0trace(DMM_debugMask, GT_7CLASS, ++ "DMM_CreateTables: DMM_DeleteTables" ++ "Failure\n"); ++ ++ GT_1trace(DMM_debugMask, GT_4CLASS, "Leaving DMM_CreateTables status" ++ "0x%x\n", status); ++ return status; ++} ++ ++/* ++ * ======== DMM_Create ======== ++ * Purpose: ++ * Create a dynamic memory manager object. ++ */ ++DSP_STATUS DMM_Create(OUT struct DMM_OBJECT **phDmmMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct DMM_MGRATTRS *pMgrAttrs) ++{ ++ struct DMM_OBJECT *pDmmObject = NULL; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDmmMgr != NULL); ++ ++ GT_3trace(DMM_debugMask, GT_ENTER, ++ "DMM_Create: phDmmMgr: 0x%x hDevObject: " ++ "0x%x pMgrAttrs: 0x%x\n", phDmmMgr, hDevObject, pMgrAttrs); ++ *phDmmMgr = NULL; ++ /* create, zero, and tag a cmm mgr object */ ++ MEM_AllocObject(pDmmObject, struct DMM_OBJECT, DMMSIGNATURE); ++ if (pDmmObject != NULL) { ++ status = SYNC_InitializeCS(&pDmmObject->hDmmLock); ++ if (DSP_SUCCEEDED(status)) ++ *phDmmMgr = pDmmObject; ++ else ++ DMM_Destroy(pDmmObject); ++ } else { ++ GT_0trace(DMM_debugMask, GT_7CLASS, ++ "DMM_Create: Object Allocation " ++ "Failure(DMM Object)\n"); ++ status = DSP_EMEMORY; ++ } ++ GT_2trace(DMM_debugMask, GT_4CLASS, ++ "Leaving DMM_Create status %x pDmmObject %x\n", ++ status, pDmmObject); ++ ++ return status; ++} ++ ++/* ++ * ======== DMM_Destroy ======== ++ * Purpose: ++ * Release the communication memory manager resources. ++ */ ++DSP_STATUS DMM_Destroy(struct DMM_OBJECT *hDmmMgr) ++{ ++ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ GT_1trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_Destroy () hDmmMgr %x\n", hDmmMgr); ++ DBC_Require(cRefs > 0); ++ if (MEM_IsValidHandle(hDmmMgr, DMMSIGNATURE)) { ++ status = DMM_DeleteTables(pDmmObj); ++ if (DSP_SUCCEEDED(status)) { ++ /* Delete CS & dmm mgr object */ ++ SYNC_DeleteCS(pDmmObj->hDmmLock); ++ MEM_FreeObject(pDmmObj); ++ } else ++ GT_0trace(DMM_debugMask, GT_7CLASS, ++ "DMM_Destroy: DMM_DeleteTables " ++ "Failure\n"); ++ } else ++ status = DSP_EHANDLE; ++ GT_1trace(DMM_debugMask, GT_4CLASS, "Leaving DMM_Destroy status %x\n", ++ status); ++ return status; ++} ++ ++ ++/* ++ * ======== DMM_DeleteTables ======== ++ * Purpose: ++ * Delete DMM Tables. ++ */ ++DSP_STATUS DMM_DeleteTables(struct DMM_OBJECT *hDmmMgr) ++{ ++ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ GT_1trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_DeleteTables () hDmmMgr %x\n", hDmmMgr); ++ DBC_Require(cRefs > 0); ++ if (MEM_IsValidHandle(hDmmMgr, DMMSIGNATURE)) { ++ /* Delete all DMM tables */ ++ SYNC_EnterCS(pDmmObj->hDmmLock); ++ ++ if (pVirtualMappingTable != NULL) ++ MEM_VFree(pVirtualMappingTable); ++ ++ SYNC_LeaveCS(pDmmObj->hDmmLock); ++ } else ++ status = DSP_EHANDLE; ++ GT_1trace(DMM_debugMask, GT_4CLASS, ++ "Leaving DMM_DeleteTables status %x\n", status); ++ return status; ++} ++ ++ ++ ++ ++/* ++ * ======== DMM_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ */ ++void DMM_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(DMM_debugMask, GT_ENTER, ++ "exiting DMM_Exit, ref count:0x%x\n", cRefs); ++} ++ ++/* ++ * ======== DMM_GetHandle ======== ++ * Purpose: ++ * Return the dynamic memory manager object for this device. ++ * This is typically called from the client process. ++ */ ++DSP_STATUS DMM_GetHandle(DSP_HPROCESSOR hProcessor, ++ OUT struct DMM_OBJECT **phDmmMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *hDevObject; ++ ++ GT_2trace(DMM_debugMask, GT_ENTER, ++ "DMM_GetHandle: hProcessor %x, phDmmMgr" ++ "%x\n", hProcessor, phDmmMgr); ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDmmMgr != NULL); ++ if (hProcessor != NULL) ++ status = PROC_GetDevObject(hProcessor, &hDevObject); ++ else ++ hDevObject = DEV_GetFirst(); /* default */ ++ ++ if (DSP_SUCCEEDED(status)) ++ status = DEV_GetDmmMgr(hDevObject, phDmmMgr); ++ ++ GT_2trace(DMM_debugMask, GT_4CLASS, "Leaving DMM_GetHandle status %x, " ++ "*phDmmMgr %x\n", status, phDmmMgr ? *phDmmMgr : NULL); ++ return status; ++} ++ ++/* ++ * ======== DMM_Init ======== ++ * Purpose: ++ * Initializes private state of DMM module. ++ */ ++bool DMM_Init(void) ++{ ++ bool fRetval = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ /* Set the Trace mask */ ++ /*"DM" for Dymanic Memory Manager */ ++ GT_create(&DMM_debugMask, "DM"); ++ } ++ ++ if (fRetval) ++ cRefs++; ++ ++ GT_1trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_Init, ref count:0x%x\n", cRefs); ++ ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ pVirtualMappingTable = NULL ; ++ TableSize = 0; ++ ++ return fRetval; ++} ++ ++/* ++ * ======== DMM_MapMemory ======== ++ * Purpose: ++ * Add a mapping block to the reserved chunk. DMM assumes that this block ++ * will be mapped in the DSP/IVA's address space. DMM returns an error if a ++ * mapping overlaps another one. This function stores the info that will be ++ * required later while unmapping the block. ++ */ ++DSP_STATUS DMM_MapMemory(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 size) ++{ ++ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; ++ struct MapPage *chunk; ++ DSP_STATUS status = DSP_SOK; ++ ++ GT_3trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_MapMemory () hDmmMgr %x, " ++ "addr %x, size %x\n", hDmmMgr, addr, size); ++ SYNC_EnterCS(pDmmObj->hDmmLock); ++ /* Find the Reserved memory chunk containing the DSP block to ++ * be mapped */ ++ chunk = (struct MapPage *)GetRegion(addr); ++ if (chunk != NULL) { ++ /* Mark the region 'mapped', leave the 'reserved' info as-is */ ++ chunk->bMapped = true; ++ chunk->MappedSize = (size/PG_SIZE_4K); ++ } else ++ status = DSP_ENOTFOUND; ++ SYNC_LeaveCS(pDmmObj->hDmmLock); ++ GT_2trace(DMM_debugMask, GT_4CLASS, ++ "Leaving DMM_MapMemory status %x, chunk %x\n", ++ status, chunk); ++ return status; ++} ++ ++/* ++ * ======== DMM_ReserveMemory ======== ++ * Purpose: ++ * Reserve a chunk of virtually contiguous DSP/IVA address space. ++ */ ++DSP_STATUS DMM_ReserveMemory(struct DMM_OBJECT *hDmmMgr, u32 size, ++ u32 *pRsvAddr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; ++ struct MapPage *node; ++ u32 rsvAddr = 0; ++ u32 rsvSize = 0; ++ ++ GT_3trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_ReserveMemory () hDmmMgr %x, " ++ "size %x, pRsvAddr %x\n", hDmmMgr, size, pRsvAddr); ++ SYNC_EnterCS(pDmmObj->hDmmLock); ++ ++ /* Try to get a DSP chunk from the free list */ ++ node = GetFreeRegion(size); ++ if (node != NULL) { ++ /* DSP chunk of given size is available. */ ++ rsvAddr = DMM_ADDR_VIRTUAL(node); ++ /* Calculate the number entries to use */ ++ rsvSize = size/PG_SIZE_4K; ++ if (rsvSize < node->RegionSize) { ++ /* Mark remainder of free region */ ++ node[rsvSize].bMapped = false; ++ node[rsvSize].bReserved = false; ++ node[rsvSize].RegionSize = node->RegionSize - rsvSize; ++ node[rsvSize].MappedSize = 0; ++ } ++ /* GetRegion will return first fit chunk. But we only use what ++ is requested. */ ++ node->bMapped = false; ++ node->bReserved = true; ++ node->RegionSize = rsvSize; ++ node->MappedSize = 0; ++ /* Return the chunk's starting address */ ++ *pRsvAddr = rsvAddr; ++ } else ++ /*dSP chunk of given size is not available */ ++ status = DSP_EMEMORY; ++ ++ SYNC_LeaveCS(pDmmObj->hDmmLock); ++ GT_3trace(DMM_debugMask, GT_4CLASS, ++ "Leaving ReserveMemory status %x, rsvAddr" ++ " %x, rsvSize %x\n", status, rsvAddr, rsvSize); ++ return status; ++} ++ ++ ++/* ++ * ======== DMM_UnMapMemory ======== ++ * Purpose: ++ * Remove the mapped block from the reserved chunk. ++ */ ++DSP_STATUS DMM_UnMapMemory(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 *pSize) ++{ ++ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; ++ struct MapPage *chunk; ++ DSP_STATUS status = DSP_SOK; ++ ++ GT_3trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_UnMapMemory () hDmmMgr %x, " ++ "addr %x, pSize %x\n", hDmmMgr, addr, pSize); ++ SYNC_EnterCS(pDmmObj->hDmmLock); ++ chunk = GetMappedRegion(addr) ; ++ if (chunk == NULL) ++ status = DSP_ENOTFOUND ; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Unmap the region */ ++ *pSize = chunk->MappedSize * PG_SIZE_4K; ++ chunk->bMapped = false; ++ chunk->MappedSize = 0; ++ } ++ SYNC_LeaveCS(pDmmObj->hDmmLock); ++ GT_3trace(DMM_debugMask, GT_ENTER, ++ "Leaving DMM_UnMapMemory status %x, chunk" ++ " %x, *pSize %x\n", status, chunk, *pSize); ++ ++ return status; ++} ++ ++/* ++ * ======== DMM_UnReserveMemory ======== ++ * Purpose: ++ * Free a chunk of reserved DSP/IVA address space. ++ */ ++DSP_STATUS DMM_UnReserveMemory(struct DMM_OBJECT *hDmmMgr, u32 rsvAddr) ++{ ++ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; ++ struct MapPage *chunk; ++ u32 i; ++ DSP_STATUS status = DSP_SOK; ++ u32 chunkSize; ++ ++ GT_2trace(DMM_debugMask, GT_ENTER, ++ "Entered DMM_UnReserveMemory () hDmmMgr " ++ "%x, rsvAddr %x\n", hDmmMgr, rsvAddr); ++ ++ SYNC_EnterCS(pDmmObj->hDmmLock); ++ ++ /* Find the chunk containing the reserved address */ ++ chunk = GetMappedRegion(rsvAddr); ++ if (chunk == NULL) ++ status = DSP_ENOTFOUND; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Free all the mapped pages for this reserved region */ ++ i = 0; ++ while (i < chunk->RegionSize) { ++ if (chunk[i].bMapped) { ++ /* Remove mapping from the page tables. */ ++ chunkSize = chunk[i].MappedSize; ++ /* Clear the mapping flags */ ++ chunk[i].bMapped = false; ++ chunk[i].MappedSize = 0; ++ i += chunkSize; ++ } else ++ i++; ++ } ++ /* Clear the flags (mark the region 'free') */ ++ chunk->bReserved = false; ++ /* NOTE: We do NOT coalesce free regions here. ++ * Free regions are coalesced in GetRegion(), as it traverses ++ *the whole mapping table ++ */ ++ } ++ SYNC_LeaveCS(pDmmObj->hDmmLock); ++ GT_2trace(DMM_debugMask, GT_ENTER, ++ "Leaving DMM_UnReserveMemory status %x" ++ " chunk %x\n", status, chunk); ++ return status; ++} ++ ++ ++/* ++ * ======== GetRegion ======== ++ * Purpose: ++ * Returns a region containing the specified memory region ++ */ ++static struct MapPage *GetRegion(u32 aAddr) ++{ ++ struct MapPage *currRegion = NULL; ++ u32 i = 0; ++ ++ GT_1trace(DMM_debugMask, GT_ENTER, "Entered GetRegion () " ++ " aAddr %x\n", aAddr); ++ ++ if (pVirtualMappingTable != NULL) { ++ /* find page mapped by this address */ ++ i = DMM_ADDR_TO_INDEX(aAddr); ++ if (i < TableSize) ++ currRegion = pVirtualMappingTable + i; ++ } ++ GT_3trace(DMM_debugMask, GT_4CLASS, ++ "Leaving GetRegion currRegion %x, iFreeRegion %d\n," ++ "iFreeSize %d\n", currRegion, iFreeRegion, iFreeSize) ; ++ return currRegion; ++} ++ ++/* ++ * ======== GetFreeRegion ======== ++ * Purpose: ++ * Returns the requested free region ++ */ ++static struct MapPage *GetFreeRegion(u32 aSize) ++{ ++ struct MapPage *currRegion = NULL; ++ u32 i = 0; ++ u32 RegionSize = 0; ++ u32 nextI = 0; ++ GT_1trace(DMM_debugMask, GT_ENTER, "Entered GetFreeRegion () " ++ "aSize 0x%x\n", aSize); ++ ++ if (pVirtualMappingTable == NULL) ++ return currRegion; ++ if (aSize > iFreeSize) { ++ /* Find the largest free region ++ * (coalesce during the traversal) */ ++ while (i < TableSize) { ++ RegionSize = pVirtualMappingTable[i].RegionSize; ++ nextI = i+RegionSize; ++ if (pVirtualMappingTable[i].bReserved == false) { ++ /* Coalesce, if possible */ ++ if (nextI < TableSize && ++ pVirtualMappingTable[nextI].bReserved ++ == false) { ++ pVirtualMappingTable[i].RegionSize += ++ pVirtualMappingTable[nextI].RegionSize; ++ continue; ++ } ++ RegionSize *= PG_SIZE_4K; ++ if (RegionSize > iFreeSize) { ++ iFreeRegion = i; ++ iFreeSize = RegionSize; ++ } ++ } ++ i = nextI; ++ } ++ } ++ if (aSize <= iFreeSize) { ++ currRegion = pVirtualMappingTable + iFreeRegion; ++ iFreeRegion += (aSize / PG_SIZE_4K); ++ iFreeSize -= aSize; ++ } ++ return currRegion; ++} ++ ++/* ++ * ======== GetMappedRegion ======== ++ * Purpose: ++ * Returns the requestedmapped region ++ */ ++static struct MapPage *GetMappedRegion(u32 aAddr) ++{ ++ u32 i = 0; ++ struct MapPage *currRegion = NULL; ++ GT_1trace(DMM_debugMask, GT_ENTER, "Entered GetMappedRegion () " ++ "aAddr 0x%x\n", aAddr); ++ ++ if (pVirtualMappingTable == NULL) ++ return currRegion; ++ ++ i = DMM_ADDR_TO_INDEX(aAddr); ++ if (i < TableSize && (pVirtualMappingTable[i].bMapped || ++ pVirtualMappingTable[i].bReserved)) ++ currRegion = pVirtualMappingTable + i; ++ return currRegion; ++} ++ ++#ifdef DSP_DMM_DEBUG ++u32 DMM_MemMapDump(struct DMM_OBJECT *hDmmMgr) ++{ ++ struct MapPage *curNode = NULL; ++ u32 i; ++ u32 freemem = 0; ++ u32 bigsize = 0; ++ ++ SYNC_EnterCS(hDmmMgr->hDmmLock); ++ ++ if (pVirtualMappingTable != NULL) { ++ for (i = 0; i < TableSize; i += ++ pVirtualMappingTable[i].RegionSize) { ++ curNode = pVirtualMappingTable + i; ++ if (curNode->bReserved == TRUE) { ++ /*printk("RESERVED size = 0x%x, " ++ "Map size = 0x%x\n", ++ (curNode->RegionSize * PG_SIZE_4K), ++ (curNode->bMapped == false) ? 0 : ++ (curNode->MappedSize * PG_SIZE_4K)); ++*/ ++ } else { ++/* printk("UNRESERVED size = 0x%x\n", ++ (curNode->RegionSize * PG_SIZE_4K)); ++*/ ++ freemem += (curNode->RegionSize * PG_SIZE_4K); ++ if (curNode->RegionSize > bigsize) ++ bigsize = curNode->RegionSize; ++ } ++ } ++ } ++ printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n", ++ freemem/(1024*1024)); ++ printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n", ++ (((TableSize * PG_SIZE_4K)-freemem))/(1024*1024)); ++ printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n", ++ (bigsize*PG_SIZE_4K/(1024*1024))); ++ SYNC_LeaveCS(hDmmMgr->hDmmLock); ++ ++ return 0; ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnl.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/chnl.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnl.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/chnl.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,260 @@ ++/* ++ * chnl.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== chnl.c ======== ++ * Description: ++ * WCD channel interface: multiplexes data streams through the single ++ * physical link managed by a 'Bridge mini-driver. ++ * ++ * Public Functions: ++ * CHNL_Close ++ * CHNL_CloseOrphans ++ * CHNL_Create ++ * CHNL_Destroy ++ * CHNL_Exit ++ * CHNL_GetHandle ++ * CHNL_GetProcessHandle ++ * CHNL_Init ++ * CHNL_Open ++ * ++ * Notes: ++ * This interface is basically a pass through to the WMD CHNL functions, ++ * except for the CHNL_Get() accessor functions which call ++ * WMD_CHNL_GetInfo(). ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 07-Jan-2002 ag CHNL_CloseOrphans() now closes supported # of channels. ++ *! 17-Nov-2000 jeh Removed IRQ, shared memory stuff from CHNL_Create. ++ *! 28-Feb-2000 rr: New GT USage Implementation ++ *! 03-Feb-2000 rr: GT and Module init/exit Changes.(Done up front from ++ *! SERVICES) ++ *! 21-Jan-2000 ag: Added code review comments. ++ *! 13-Jan-2000 rr: CFG_Get/SetPrivateDword renamed to CFG_Get/SetDevObject. ++ *! 08-Dec-1999 ag: CHNL_[Alloc|Free]Buffer bufs taken from client process heap. ++ *! 02-Dec-1999 ag: Implemented CHNL_GetEventHandle(). ++ *! 17-Nov-1999 ag: CHNL_AllocBuffer() allocs extra word for process mapping. ++ *! 28-Oct-1999 ag: WinCE port. Search for "WinCE" for changes(TBR). ++ *! 07-Jan-1998 gp: CHNL_[Alloc|Free]Buffer now call MEM_UMB functions. ++ *! 22-Oct-1997 gp: Removed requirement in CHNL_Open that hReserved1 != NULL. ++ *! 30-Aug-1997 cr: Renamed cfg.h wbwcd.h b/c of WINNT file name collision. ++ *! 10-Mar-1997 gp: Added GT trace. ++ *! 14-Jan-1997 gp: Updated based on code review feedback. ++ *! 03-Jan-1997 gp: Moved CHNL_AllocBuffer/CHNL_FreeBuffer code from udspsys. ++ *! 14-Dec-1996 gp: Added uChnlId parameter to CHNL_Open(). ++ *! 09-Sep-1996 gp: Added CHNL_GetProcessHandle(). ++ *! 15-Jul-1996 gp: Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Globals */ ++static u32 cRefs; ++#if GT_TRACE ++static struct GT_Mask CHNL_DebugMask = { NULL, NULL }; /* WCD CHNL Mask */ ++#endif ++ ++ ++ ++/* ++ * ======== CHNL_Create ======== ++ * Purpose: ++ * Create a channel manager object, responsible for opening new channels ++ * and closing old ones for a given 'Bridge board. ++ */ ++DSP_STATUS CHNL_Create(OUT struct CHNL_MGR **phChnlMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CHNL_MGRATTRS *pMgrAttrs) ++{ ++ DSP_STATUS status; ++ struct CHNL_MGR *hChnlMgr; ++ struct CHNL_MGR_ *pChnlMgr = NULL; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phChnlMgr != NULL); ++ DBC_Require(pMgrAttrs != NULL); ++ ++ GT_3trace(CHNL_DebugMask, GT_ENTER, ++ "Entered CHNL_Create: phChnlMgr: 0x%x\t" ++ "hDevObject: 0x%x\tpMgrAttrs:0x%x\n", ++ phChnlMgr, hDevObject, pMgrAttrs); ++ ++ *phChnlMgr = NULL; ++ ++ /* Validate args: */ ++ if ((0 < pMgrAttrs->cChannels) && ++ (pMgrAttrs->cChannels <= CHNL_MAXCHANNELS)) { ++ status = DSP_SOK; ++ } else if (pMgrAttrs->cChannels == 0) { ++ status = DSP_EINVALIDARG; ++ GT_0trace(CHNL_DebugMask, GT_7CLASS, ++ "CHNL_Create:Invalid Args\n"); ++ } else { ++ status = CHNL_E_MAXCHANNELS; ++ GT_0trace(CHNL_DebugMask, GT_7CLASS, ++ "CHNL_Create:Error Max Channels\n"); ++ } ++ if (pMgrAttrs->uWordSize == 0) { ++ status = CHNL_E_INVALIDWORDSIZE; ++ GT_0trace(CHNL_DebugMask, GT_7CLASS, ++ "CHNL_Create:Invalid Word size\n"); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetChnlMgr(hDevObject, &hChnlMgr); ++ if (DSP_SUCCEEDED(status) && hChnlMgr != NULL) ++ status = CHNL_E_MGREXISTS; ++ ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DEV_GetIntfFxns(hDevObject, &pIntfFxns); ++ /* Let WMD channel module finish the create: */ ++ status = (*pIntfFxns->pfnChnlCreate)(&hChnlMgr, hDevObject, ++ pMgrAttrs); ++ if (DSP_SUCCEEDED(status)) { ++ /* Fill in WCD channel module's fields of the ++ * CHNL_MGR structure */ ++ pChnlMgr = (struct CHNL_MGR_ *)hChnlMgr; ++ pChnlMgr->pIntfFxns = pIntfFxns; ++ /* Finally, return the new channel manager handle: */ ++ *phChnlMgr = hChnlMgr; ++ GT_1trace(CHNL_DebugMask, GT_1CLASS, ++ "CHNL_Create: Success pChnlMgr:" ++ "0x%x\n", pChnlMgr); ++ } ++ } ++ ++ GT_2trace(CHNL_DebugMask, GT_ENTER, ++ "Exiting CHNL_Create: pChnlMgr: 0x%x," ++ "status: 0x%x\n", pChnlMgr, status); ++ DBC_Ensure(DSP_FAILED(status) || CHNL_IsValidMgr(pChnlMgr)); ++ ++ return status; ++} ++ ++/* ++ * ======== CHNL_Destroy ======== ++ * Purpose: ++ * Close all open channels, and destroy the channel manager. ++ */ ++DSP_STATUS CHNL_Destroy(struct CHNL_MGR *hChnlMgr) ++{ ++ struct CHNL_MGR_ *pChnlMgr = (struct CHNL_MGR_ *)hChnlMgr; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(CHNL_DebugMask, GT_ENTER, ++ "Entered CHNL_Destroy: hChnlMgr: 0x%x\n", hChnlMgr); ++ if (CHNL_IsValidMgr(pChnlMgr)) { ++ pIntfFxns = pChnlMgr->pIntfFxns; ++ /* Let WMD channel module destroy the CHNL_MGR: */ ++ status = (*pIntfFxns->pfnChnlDestroy)(hChnlMgr); ++ } else { ++ GT_0trace(CHNL_DebugMask, GT_7CLASS, ++ "CHNL_Destroy:Invalid Handle\n"); ++ status = DSP_EHANDLE; ++ } ++ ++ GT_2trace(CHNL_DebugMask, GT_ENTER, ++ "Exiting CHNL_Destroy: pChnlMgr: 0x%x," ++ " status:0x%x\n", pChnlMgr, status); ++ DBC_Ensure(DSP_FAILED(status) || !CHNL_IsValidMgr(pChnlMgr)); ++ ++ return status; ++} ++ ++/* ++ * ======== CHNL_Exit ======== ++ * Purpose: ++ * Discontinue usage of the CHNL module. ++ */ ++void CHNL_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(CHNL_DebugMask, GT_5CLASS, ++ "Entered CHNL_Exit, ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++ ++/* ++ * ======== CHNL_Init ======== ++ * Purpose: ++ * Initialize the CHNL module's private state. ++ */ ++bool CHNL_Init(void) ++{ ++ bool fRetval = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!CHNL_DebugMask.flags); ++ GT_create(&CHNL_DebugMask, "CH"); /* "CH" for CHannel */ ++ } ++ ++ if (fRetval) ++ cRefs++; ++ ++ GT_1trace(CHNL_DebugMask, GT_5CLASS, ++ "Entered CHNL_Init, ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ return fRetval; ++} ++ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnlobj.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/chnlobj.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnlobj.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/chnlobj.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,71 @@ ++/* ++ * chnlobj.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== chnlobj.h ======== ++ * Description: ++ * Structure subcomponents of channel class library channel objects which ++ * are exposed to class driver from mini-driver. ++ * ++ * Public Functions: ++ * None. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 17-Nov-2000 jeh Removed some fields from CHNL_MGR_ to match CHNL_MGR ++ *! structure defined in _chnl_sm.h. ++ *! 16-Jan-1997 gp: Created from chnlpriv.h ++ */ ++ ++#ifndef CHNLOBJ_ ++#define CHNLOBJ_ ++ ++#include ++#include ++ ++/* Object validateion macros: */ ++#define CHNL_IsValidMgr(h) \ ++ ((h != NULL) && ((h)->dwSignature == CHNL_MGRSIGNATURE)) ++ ++#define CHNL_IsValidChnl(h)\ ++ ((h != NULL) && ((h)->dwSignature == CHNL_SIGNATURE)) ++ ++/* ++ * This struct is the first field in a CHNL_MGR struct, as implemented in ++ * a WMD channel class library. Other, implementation specific fields ++ * follow this structure in memory. ++ */ ++struct CHNL_MGR_ { ++ /* These must be the first fields in a CHNL_MGR struct: */ ++ u32 dwSignature; /* Used for object validation. */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ ++} ; ++ ++/* ++ * This struct is the first field in a CHNL_OBJECT struct, as implemented in ++ * a WMD channel class library. Other, implementation specific fields ++ * follow this structure in memory. ++ */ ++struct CHNL_OBJECT_ { ++ /* These must be the first fields in a CHNL_OBJECT struct: */ ++ u32 dwSignature; /* Used for object validation. */ ++ struct CHNL_MGR_ *pChnlMgr; /* Pointer back to channel manager. */ ++} ; ++ ++#endif /* CHNLOBJ_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/io.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/io.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/io.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/io.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,205 @@ ++/* ++ * io.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== io.c ======== ++ * Description: ++ * IO manager interface: Manages IO between CHNL and MSG. ++ * ++ * Public Functions: ++ * IO_Create ++ * IO_Destroy ++ * IO_Exit ++ * IO_Init ++ * IO_OnLoaded ++ * ++ * Notes: ++ * This interface is basically a pass through to the WMD IO functions. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 04-Apr-2001 rr WSX_STATUS initialized in IO_Create. ++ *! 07-Nov-2000 jeh Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Globals */ ++static u32 cRefs; ++ ++#if GT_TRACE ++static struct GT_Mask IO_DebugMask = { NULL, NULL }; /* WCD IO Mask */ ++#endif ++ ++/* ++ * ======== IO_Create ======== ++ * Purpose: ++ * Create an IO manager object, responsible for managing IO between ++ * CHNL and MSG ++ */ ++DSP_STATUS IO_Create(OUT struct IO_MGR **phIOMgr, struct DEV_OBJECT *hDevObject, ++ IN CONST struct IO_ATTRS *pMgrAttrs) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct IO_MGR *hIOMgr = NULL; ++ struct IO_MGR_ *pIOMgr = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phIOMgr != NULL); ++ DBC_Require(pMgrAttrs != NULL); ++ ++ GT_3trace(IO_DebugMask, GT_ENTER, "Entered IO_Create: phIOMgr: 0x%x\t " ++ "hDevObject: 0x%x\tpMgrAttrs: 0x%x\n", ++ phIOMgr, hDevObject, pMgrAttrs); ++ ++ *phIOMgr = NULL; ++ ++ /* A memory base of 0 implies no memory base: */ ++ if ((pMgrAttrs->dwSMBase != 0) && (pMgrAttrs->uSMLength == 0)) { ++ status = CHNL_E_INVALIDMEMBASE; ++ GT_0trace(IO_DebugMask, GT_7CLASS, ++ "IO_Create:Invalid Mem Base\n"); ++ } ++ ++ if (pMgrAttrs->uWordSize == 0) { ++ status = CHNL_E_INVALIDWORDSIZE; ++ GT_0trace(IO_DebugMask, GT_7CLASS, ++ "IO_Create:Invalid Word size\n"); ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ DEV_GetIntfFxns(hDevObject, &pIntfFxns); ++ ++ /* Let WMD channel module finish the create: */ ++ status = (*pIntfFxns->pfnIOCreate)(&hIOMgr, hDevObject, ++ pMgrAttrs); ++ ++ if (DSP_SUCCEEDED(status)) { ++ pIOMgr = (struct IO_MGR_ *) hIOMgr; ++ pIOMgr->pIntfFxns = pIntfFxns; ++ pIOMgr->hDevObject = hDevObject; ++ ++ /* Return the new channel manager handle: */ ++ *phIOMgr = hIOMgr; ++ GT_1trace(IO_DebugMask, GT_1CLASS, ++ "IO_Create: Success hIOMgr: 0x%x\n", ++ hIOMgr); ++ } ++ } ++ ++ GT_2trace(IO_DebugMask, GT_ENTER, ++ "Exiting IO_Create: hIOMgr: 0x%x, status:" ++ " 0x%x\n", hIOMgr, status); ++ ++ return status; ++} ++ ++/* ++ * ======== IO_Destroy ======== ++ * Purpose: ++ * Delete IO manager. ++ */ ++DSP_STATUS IO_Destroy(struct IO_MGR *hIOMgr) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct IO_MGR_ *pIOMgr = (struct IO_MGR_ *)hIOMgr; ++ DSP_STATUS status; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(IO_DebugMask, GT_ENTER, "Entered IO_Destroy: hIOMgr: 0x%x\n", ++ hIOMgr); ++ ++ pIntfFxns = pIOMgr->pIntfFxns; ++ ++ /* Let WMD channel module destroy the IO_MGR: */ ++ status = (*pIntfFxns->pfnIODestroy) (hIOMgr); ++ ++ GT_2trace(IO_DebugMask, GT_ENTER, ++ "Exiting IO_Destroy: pIOMgr: 0x%x, status:" ++ " 0x%x\n", pIOMgr, status); ++ return status; ++} ++ ++/* ++ * ======== IO_Exit ======== ++ * Purpose: ++ * Discontinue usage of the IO module. ++ */ ++void IO_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(IO_DebugMask, GT_5CLASS, ++ "Entered IO_Exit, ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== IO_Init ======== ++ * Purpose: ++ * Initialize the IO module's private state. ++ */ ++bool IO_Init(void) ++{ ++ bool fRetval = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!IO_DebugMask.flags); ++ GT_create(&IO_DebugMask, "IO"); /* "IO" for IO */ ++ } ++ ++ if (fRetval) ++ cRefs++; ++ ++ ++ GT_1trace(IO_DebugMask, GT_5CLASS, ++ "Entered IO_Init, ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ return fRetval; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/ioobj.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/ioobj.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/ioobj.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/ioobj.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,52 @@ ++/* ++ * ioobj.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== ioobj.h ======== ++ * Description: ++ * Structure subcomponents of channel class library IO objects which ++ * are exposed to class driver from mini-driver. ++ * ++ * Public Functions: ++ * None. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 01/16/97 gp: Created from chnlpriv.h ++ */ ++ ++#ifndef IOOBJ_ ++#define IOOBJ_ ++ ++#include ++#include ++ ++/* ++ * This struct is the first field in a IO_MGR struct, as implemented in ++ * a WMD channel class library. Other, implementation specific fields ++ * follow this structure in memory. ++ */ ++struct IO_MGR_ { ++ /* These must be the first fields in a IO_MGR struct: */ ++ u32 dwSignature; /* Used for object validation. */ ++ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD device context. */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ ++ struct DEV_OBJECT *hDevObject; /* Device this board represents. */ ++} ; ++ ++#endif /* IOOBJ_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msg.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/msg.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msg.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/msg.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,173 @@ ++/* ++ * msg.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== msg.c ======== ++ * Description: ++ * DSP/BIOS Bridge MSG Module. ++ * ++ * Public Functions: ++ * MSG_Create ++ * MSG_Delete ++ * MSG_Exit ++ * MSG_Init ++ * ++ *! Revision History: ++ *! ================= ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 15-May-2001 ag Changed SUCCEEDED to DSP_SUCCEEDED. ++ *! 16-Feb-2001 jeh Fixed some comments. ++ *! 15-Dec-2000 rr MSG_Create returns DSP_EFAIL if pfnMsgCreate fails. ++ *! 12-Sep-2000 jeh Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- Mini Driver */ ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask MSG_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++static u32 cRefs; /* module reference count */ ++ ++/* ++ * ======== MSG_Create ======== ++ * Purpose: ++ * Create an object to manage message queues. Only one of these objects ++ * can exist per device object. ++ */ ++DSP_STATUS MSG_Create(OUT struct MSG_MGR **phMsgMgr, ++ struct DEV_OBJECT *hDevObject, MSG_ONEXIT msgCallback) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct MSG_MGR_ *pMsgMgr; ++ struct MSG_MGR *hMsgMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phMsgMgr != NULL); ++ DBC_Require(msgCallback != NULL); ++ DBC_Require(hDevObject != NULL); ++ ++ GT_3trace(MSG_debugMask, GT_ENTER, "MSG_Create: phMsgMgr: 0x%x\t" ++ "hDevObject: 0x%x\tmsgCallback: 0x%x\n", ++ phMsgMgr, hDevObject, msgCallback); ++ ++ *phMsgMgr = NULL; ++ ++ DEV_GetIntfFxns(hDevObject, &pIntfFxns); ++ ++ /* Let WMD message module finish the create: */ ++ status = (*pIntfFxns->pfnMsgCreate)(&hMsgMgr, hDevObject, msgCallback); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Fill in WCD message module's fields of the MSG_MGR ++ * structure */ ++ pMsgMgr = (struct MSG_MGR_ *)hMsgMgr; ++ pMsgMgr->pIntfFxns = pIntfFxns; ++ ++ /* Finally, return the new message manager handle: */ ++ *phMsgMgr = hMsgMgr; ++ GT_1trace(MSG_debugMask, GT_1CLASS, ++ "MSG_Create: Success pMsgMgr: 0x%x\n", pMsgMgr); ++ } else { ++ status = DSP_EFAIL; ++ } ++ return status; ++} ++ ++/* ++ * ======== MSG_Delete ======== ++ * Purpose: ++ * Delete a MSG manager allocated in MSG_Create(). ++ */ ++void MSG_Delete(struct MSG_MGR *hMsgMgr) ++{ ++ struct MSG_MGR_ *pMsgMgr = (struct MSG_MGR_ *)hMsgMgr; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pMsgMgr, MSGMGR_SIGNATURE)); ++ ++ GT_1trace(MSG_debugMask, GT_ENTER, "MSG_Delete: hMsgMgr: 0x%x\n", ++ hMsgMgr); ++ ++ pIntfFxns = pMsgMgr->pIntfFxns; ++ ++ /* Let WMD message module destroy the MSG_MGR: */ ++ (*pIntfFxns->pfnMsgDelete)(hMsgMgr); ++ ++ DBC_Ensure(!MEM_IsValidHandle(pMsgMgr, MSGMGR_SIGNATURE)); ++} ++ ++/* ++ * ======== MSG_Exit ======== ++ */ ++void MSG_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ cRefs--; ++ GT_1trace(MSG_debugMask, GT_5CLASS, ++ "Entered MSG_Exit, ref count: 0x%x\n", cRefs); ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== MSG_Init ======== ++ */ ++bool MSG_Init(void) ++{ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!MSG_debugMask.flags); ++ GT_create(&MSG_debugMask, "MS"); /* "MS" for MSg */ ++ } ++ ++ cRefs++; ++ ++ GT_1trace(MSG_debugMask, GT_5CLASS, "MSG_Init(), ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++ ++ return true; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msgobj.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/msgobj.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msgobj.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/msgobj.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,52 @@ ++/* ++ * msgobj.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== msgobj.h ======== ++ * Description: ++ * Structure subcomponents of channel class library MSG objects which ++ * are exposed to class driver from mini-driver. ++ * ++ * Public Functions: ++ * None. ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 17-Nov-2000 jeh Created. ++ */ ++ ++#ifndef MSGOBJ_ ++#define MSGOBJ_ ++ ++#include ++ ++#include ++ ++/* ++ * This struct is the first field in a MSG_MGR struct, as implemented in ++ * a WMD channel class library. Other, implementation specific fields ++ * follow this structure in memory. ++ */ ++struct MSG_MGR_ { ++ /* The first two fields must match those in msgobj.h */ ++ u32 dwSignature; ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ ++}; ++ ++#endif /* MSGOBJ_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/wcd.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/wcd.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/wcd.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/pmgr/wcd.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1747 @@ ++/* ++ * wcd.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== wcd.c ======== ++ * Description: ++ * Common WCD functions, also includes the wrapper ++ * functions called directly by the DeviceIOControl interface. ++ * ++ * Public Functions: ++ * WCD_CallDevIOCtl ++ * WCD_Init ++ * WCD_InitComplete2 ++ * WCD_Exit ++ * WRAP_* ++ * ++ *! Revision History: ++ *! ================ ++ *! 29-Apr-2004 hp Call PROC_AutoStart only for DSP device ++ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping APIs ++ *! 03-Apr-2003 sb Process environment pointer in PROCWRAP_Load ++ *! 24-Feb-2003 swa PMGR Code review comments incorporated. ++ *! 30-Jan-2002 ag CMMWRAP_AllocBuf name changed to CMMWRAP_CallocBuf ++ *! 15-Jan-2002 ag Added actual bufSize param to STRMWRAP_Reclaim[issue]. ++ *! 14-Dec-2001 rr ARGS_NODE_CONNECT maps the pAttr. ++ *! 03-Oct-2001 rr ARGS_NODE_ALLOCMSGBUF/FREEMSGBUF maps the pAttr. ++ *! 10-Sep-2001 ag Added CMD_CMM_GETHANDLE. ++ *! 23-Apr-2001 jeh Pass pStatus to NODE_Terminate. ++ *! 11-Apr-2001 jeh STRMWRAP_Reclaim embedded pointer is mapped and unmapped. ++ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. ++ *! 06-Dec-2000 jeh WRAP_MAP2CALLER pointers in RegisterNotify calls. ++ *! 05-Dec-2000 ag: Removed MAP2CALLER in NODEWRAP_FreeMsgBuf(). ++ *! 22-Nov-2000 kc: Added MGRWRAP_GetPerf_Data(). ++ *! 20-Nov-2000 jeh Added MSG_Init()/MSG_Exit(), IO_Init()/IO_Exit(). ++ *! WRAP pointers to handles for PROC_Attach, NODE_Allocate. ++ *! 27-Oct-2000 jeh Added NODEWRAP_AllocMsgBuf, NODEWRAP_FreeMsgBuf. Removed ++ *! NODEWRAP_GetMessageStream. ++ *! 12-Oct-2000 ag: Added user CMM wrappers. ++ *! 05-Oct-2000 rr: WcdInitComplete2 will fail even if one BRD or PROC ++ *! AutoStart fails. ++ *! 25-Sep-2000 rr: Updated to Version 0.9 ++ *! 13-Sep-2000 jeh Pass ARGS_NODE_CONNECT.pAttrs to NODE_Connect(). ++ *! 11-Aug-2000 rr: Part of node enabled. ++ *! 31-Jul-2000 rr: UTIL_Wrap and MEM_Wrap added to RM. ++ *! 27-Jul-2000 rr: PROCWRAP, NODEWRAP and STRMWRAP implemented. ++ *! STRM and some NODE Wrappers are not implemented. ++ *! 27-Jun-2000 rr: MGRWRAP fxns added.IFDEF to build for PM or DSP/BIOS Bridge ++ *! 08-Feb-2000 rr File name changed to wcd.c ++ *! 03-Feb-2000 rr: Module initialization are done by SERVICES init. GT Class ++ *! changes for module init/exit fxns. ++ *! 24-Jan-2000 rr: Merged with Scott's code. ++ *! 21-Jan-1999 sg: Changed ARGS_CHNL_GETMODE field name from pdwMode to pMode. ++ *! 17-Jan-2000 rr: BRD_GetStatus does WRAP_MAP2CALLER for state. ++ *! 14-Dec-1999 ag: Removed _MAP2CALLER in CHNL_GetMgr(). ++ *! 13-Dec-1999 rr: BRDWRAP_GetSymbol, BRDWRAP_GetTrace uses WRAP_MAP2CALLER ++ *! macros.BRDWRAP_Load maps and unmaps embedded pointers. ++ *! 10-Dec-1999 ag: User CHNL bufs mapped in _AddIOReq & _GetIOCompletion. ++ *! 09-Dec-1999 rr: BRDWRAP_Open and CHNLWRAP_GetMgr does not map ++ *! pointer as there was a change in config.c ++ *! 06-Dec-1999 rr: BRD_Read and Write Maps the buf pointers. ++ *! 03-Dec-1999 rr: CHNLWRAP_GetMgr and BRDWRAP_Open maps hDevNode pointer. ++ *! WCD_InitComplete2 Included for BRD_AutoStart. ++ *! 16-Nov-1999 ag: Map buf to process in CHNLWRAP_AllocBuffer(). ++ *! CHNL_GetMgr() Mapping Fix. ++ *! 10-Nov-1999 ag: Removed unnecessary calls to WRAP_MAP2CALLER. ++ *! 08-Nov-1999 kc: Added MEMRY & enabled BRD_IOCtl for tests. ++ *! 29-Oct-1999 ag: Added CHNL. ++ *! 29-Oct-1999 kc: Added trace statements; added ptr mapping; updated ++ *! use of UTIL module API. ++ *! 29-Oct-1999 rr: Wrapper functions does the Mapping of the Pointers. ++ *! in WinCE all the explicit pointers will be converted ++ *! by the OS during interprocess but not the embedded pointers. ++ *! 16-Oct-1999 kc: Code review cleanup. ++ *! 07-Oct-1999 kc: Added UTILWRAP_TestDll() to run PM test harness. See ++ *! /src/doc/pmtest.doc for more detail. ++ *! 09-Sep-1999 rr: After exactly two years(!). Adopted for WinCE. GT Enabled. ++ *! 09-Sep-1997 gp: Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++ ++#include ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++#include ++#include ++#include ++ ++ ++/* ----------------------------------- Others */ ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++#ifndef RES_CLEANUP_DISABLE ++#include ++#endif ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define MAX_TRACEBUFLEN 255 ++#define MAX_LOADARGS 16 ++#define MAX_NODES 64 ++#define MAX_STREAMS 16 ++#define MAX_BUFS 64 ++ ++/* Device IOCtl function pointer */ ++struct WCD_Cmd { ++ u32(*fxn)(union Trapped_Args *args, void *pr_ctxt); ++ u32 dwIndex; ++} ; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask WCD_debugMask = { NULL, NULL }; /* Core VxD Mask */ ++#endif ++static u32 WCD_cRefs; ++ ++static inline void __cp_fm_usr(void *to, const void __user *from, ++ DSP_STATUS *err, unsigned long bytes) ++{ ++ if (DSP_FAILED(*err)) ++ return; ++ ++ if (unlikely(!from)) { ++ *err = DSP_EPOINTER; ++ return; ++ } ++ ++ if (unlikely(copy_from_user(to, from, bytes))) { ++ GT_2trace(WCD_debugMask, GT_7CLASS, ++ "%s failed, from=0x%08x\n", __func__, from); ++ *err = DSP_EPOINTER; ++ } ++} ++#define cp_fm_usr(to, from, err, n) \ ++ __cp_fm_usr(to, from, &(err), (n) * sizeof(*(to))) ++ ++static inline void __cp_to_usr(void __user *to, const void *from, ++ DSP_STATUS *err, unsigned long bytes) ++{ ++ if (DSP_FAILED(*err)) ++ return; ++ ++ if (unlikely(!to)) { ++ *err = DSP_EPOINTER; ++ return; ++ } ++ ++ if (unlikely(copy_to_user(to, from, bytes))) { ++ GT_2trace(WCD_debugMask, GT_7CLASS, ++ "%s failed, to=0x%08x\n", __func__, to); ++ *err = DSP_EPOINTER; ++ } ++} ++#define cp_to_usr(to, from, err, n) \ ++ __cp_to_usr(to, from, &(err), (n) * sizeof(*(from))) ++ ++/* ++ * Function table. ++ * The order of these functions MUST be the same as the order of the command ++ * numbers defined in wcdioctl.h This is how an IOCTL number in user mode ++ * turns into a function call in kernel mode. ++ */ ++static struct WCD_Cmd WCD_cmdTable[] = { ++ /* MGR module */ ++ {MGRWRAP_EnumNode_Info, CMD_MGR_ENUMNODE_INFO_OFFSET}, ++ {MGRWRAP_EnumProc_Info, CMD_MGR_ENUMPROC_INFO_OFFSET}, ++ {MGRWRAP_RegisterObject, CMD_MGR_REGISTEROBJECT_OFFSET}, ++ {MGRWRAP_UnregisterObject, CMD_MGR_UNREGISTEROBJECT_OFFSET}, ++ {MGRWRAP_WaitForBridgeEvents, CMD_MGR_WAIT_OFFSET}, ++#ifndef RES_CLEANUP_DISABLE ++ {MGRWRAP_GetProcessResourcesInfo, CMD_MGR_RESOUCES_OFFSET}, ++#endif ++ /* PROC Module */ ++ {PROCWRAP_Attach, CMD_PROC_ATTACH_OFFSET}, ++ {PROCWRAP_Ctrl, CMD_PROC_CTRL_OFFSET}, ++ {PROCWRAP_Detach, CMD_PROC_DETACH_OFFSET}, ++ {PROCWRAP_EnumNode_Info, CMD_PROC_ENUMNODE_OFFSET}, ++ {PROCWRAP_EnumResources, CMD_PROC_ENUMRESOURCES_OFFSET}, ++ {PROCWRAP_GetState, CMD_PROC_GETSTATE_OFFSET}, ++ {PROCWRAP_GetTrace, CMD_PROC_GETTRACE_OFFSET}, ++ {PROCWRAP_Load, CMD_PROC_LOAD_OFFSET}, ++ {PROCWRAP_RegisterNotify, CMD_PROC_REGISTERNOTIFY_OFFSET}, ++ {PROCWRAP_Start, CMD_PROC_START_OFFSET}, ++ {PROCWRAP_ReserveMemory, CMD_PROC_RSVMEM_OFFSET}, ++ {PROCWRAP_UnReserveMemory, CMD_PROC_UNRSVMEM_OFFSET}, ++ {PROCWRAP_Map, CMD_PROC_MAPMEM_OFFSET}, ++ {PROCWRAP_UnMap, CMD_PROC_UNMAPMEM_OFFSET}, ++ {PROCWRAP_FlushMemory, CMD_PROC_FLUSHMEMORY_OFFSET}, ++ {PROCWRAP_Stop, CMD_PROC_STOP_OFFSET}, ++ {PROCWRAP_InvalidateMemory, CMD_PROC_INVALIDATEMEMORY_OFFSET}, ++ /* NODE Module */ ++ {NODEWRAP_Allocate, CMD_NODE_ALLOCATE_OFFSET}, ++ {NODEWRAP_AllocMsgBuf, CMD_NODE_ALLOCMSGBUF_OFFSET}, ++ {NODEWRAP_ChangePriority, CMD_NODE_CHANGEPRIORITY_OFFSET}, ++ {NODEWRAP_Connect, CMD_NODE_CONNECT_OFFSET}, ++ {NODEWRAP_Create, CMD_NODE_CREATE_OFFSET}, ++ {NODEWRAP_Delete, CMD_NODE_DELETE_OFFSET}, ++ {NODEWRAP_FreeMsgBuf, CMD_NODE_FREEMSGBUF_OFFSET}, ++ {NODEWRAP_GetAttr, CMD_NODE_GETATTR_OFFSET}, ++ {NODEWRAP_GetMessage, CMD_NODE_GETMESSAGE_OFFSET}, ++ {NODEWRAP_Pause, CMD_NODE_PAUSE_OFFSET}, ++ {NODEWRAP_PutMessage, CMD_NODE_PUTMESSAGE_OFFSET}, ++ {NODEWRAP_RegisterNotify, CMD_NODE_REGISTERNOTIFY_OFFSET}, ++ {NODEWRAP_Run, CMD_NODE_RUN_OFFSET}, ++ {NODEWRAP_Terminate, CMD_NODE_TERMINATE_OFFSET}, ++ {NODEWRAP_GetUUIDProps, CMD_NODE_GETUUIDPROPS_OFFSET}, ++ /* STRM wrapper functions */ ++ {STRMWRAP_AllocateBuffer, CMD_STRM_ALLOCATEBUFFER_OFFSET}, ++ {STRMWRAP_Close, CMD_STRM_CLOSE_OFFSET}, ++ {STRMWRAP_FreeBuffer, CMD_STRM_FREEBUFFER_OFFSET}, ++ {STRMWRAP_GetEventHandle, CMD_STRM_GETEVENTHANDLE_OFFSET}, ++ {STRMWRAP_GetInfo, CMD_STRM_GETINFO_OFFSET}, ++ {STRMWRAP_Idle, CMD_STRM_IDLE_OFFSET}, ++ {STRMWRAP_Issue, CMD_STRM_ISSUE_OFFSET}, ++ {STRMWRAP_Open, CMD_STRM_OPEN_OFFSET}, ++ {STRMWRAP_Reclaim, CMD_STRM_RECLAIM_OFFSET}, ++ {STRMWRAP_RegisterNotify, CMD_STRM_REGISTERNOTIFY_OFFSET}, ++ {STRMWRAP_Select, CMD_STRM_SELECT_OFFSET}, ++ /* CMM module */ ++ {CMMWRAP_CallocBuf, CMD_CMM_ALLOCBUF_OFFSET}, ++ {CMMWRAP_FreeBuf, CMD_CMM_FREEBUF_OFFSET}, ++ {CMMWRAP_GetHandle, CMD_CMM_GETHANDLE_OFFSET}, ++ {CMMWRAP_GetInfo, CMD_CMM_GETINFO_OFFSET} ++}; ++ ++/* ++ * ======== WCD_CallDevIOCtl ======== ++ * Purpose: ++ * Call the (wrapper) function for the corresponding WCD IOCTL. ++ */ ++inline DSP_STATUS WCD_CallDevIOCtl(u32 cmd, union Trapped_Args *args, ++ u32 *pResult, void *pr_ctxt) ++{ ++ if ((cmd < (sizeof(WCD_cmdTable) / sizeof(struct WCD_Cmd)))) { ++ /* make the fxn call via the cmd table */ ++ *pResult = (*WCD_cmdTable[cmd].fxn) (args, pr_ctxt); ++ return DSP_SOK; ++ } else { ++ return DSP_EINVALIDARG; ++ } ++} ++ ++/* ++ * ======== WCD_Exit ======== ++ */ ++void WCD_Exit(void) ++{ ++ DBC_Require(WCD_cRefs > 0); ++ WCD_cRefs--; ++ GT_1trace(WCD_debugMask, GT_5CLASS, ++ "Entered WCD_Exit, ref count: 0x%x\n", WCD_cRefs); ++ if (WCD_cRefs == 0) { ++ /* Release all WCD modules initialized in WCD_Init(). */ ++ COD_Exit(); ++ DEV_Exit(); ++ CHNL_Exit(); ++ MSG_Exit(); ++ IO_Exit(); ++ STRM_Exit(); ++ NTFY_Exit(); ++ DISP_Exit(); ++ NODE_Exit(); ++ PROC_Exit(); ++ MGR_Exit(); ++ RMM_exit(); ++ DRV_Exit(); ++ SERVICES_Exit(); ++ } ++ DBC_Ensure(WCD_cRefs >= 0); ++} ++ ++/* ++ * ======== WCD_Init ======== ++ * Purpose: ++ * Module initialization is done by SERVICES Init. ++ */ ++bool WCD_Init(void) ++{ ++ bool fInit = true; ++ bool fDRV, fDEV, fCOD, fSERVICES, fCHNL, fMSG, fIO; ++ bool fMGR, fPROC, fNODE, fDISP, fNTFY, fSTRM, fRMM; ++#ifdef DEBUG ++ /* runtime check of Device IOCtl array. */ ++ u32 i; ++ for (i = 1; i < (sizeof(WCD_cmdTable) / sizeof(struct WCD_Cmd)); i++) ++ DBC_Assert(WCD_cmdTable[i - 1].dwIndex == i); ++ ++#endif ++ if (WCD_cRefs == 0) { ++ /* initialize all SERVICES modules */ ++ fSERVICES = SERVICES_Init(); ++ /* initialize debugging module */ ++ DBC_Assert(!WCD_debugMask.flags); ++ GT_create(&WCD_debugMask, "CD"); /* CD for class driver */ ++ /* initialize class driver and other modules */ ++ fDRV = DRV_Init(); ++ fMGR = MGR_Init(); ++ fPROC = PROC_Init(); ++ fNODE = NODE_Init(); ++ fDISP = DISP_Init(); ++ fNTFY = NTFY_Init(); ++ fSTRM = STRM_Init(); ++ fRMM = RMM_init(); ++ fCHNL = CHNL_Init(); ++ fMSG = MSG_Init(); ++ fIO = IO_Init(); ++ fDEV = DEV_Init(); ++ fCOD = COD_Init(); ++ fInit = fSERVICES && fDRV && fDEV && fCHNL && fCOD && ++ fMSG && fIO; ++ fInit = fInit && fMGR && fPROC && fRMM; ++ if (!fInit) { ++ if (fSERVICES) ++ SERVICES_Exit(); ++ ++ if (fDRV) ++ DRV_Exit(); ++ ++ if (fMGR) ++ MGR_Exit(); ++ ++ if (fSTRM) ++ STRM_Exit(); ++ ++ if (fPROC) ++ PROC_Exit(); ++ ++ if (fNODE) ++ NODE_Exit(); ++ ++ if (fDISP) ++ DISP_Exit(); ++ ++ if (fNTFY) ++ NTFY_Exit(); ++ ++ if (fCHNL) ++ CHNL_Exit(); ++ ++ if (fMSG) ++ MSG_Exit(); ++ ++ if (fIO) ++ IO_Exit(); ++ ++ if (fDEV) ++ DEV_Exit(); ++ ++ if (fCOD) ++ COD_Exit(); ++ ++ if (fRMM) ++ RMM_exit(); ++ ++ } ++ } ++ if (fInit) ++ WCD_cRefs++; ++ ++ GT_1trace(WCD_debugMask, GT_5CLASS, ++ "Entered WCD_Init, ref count: 0x%x\n", WCD_cRefs); ++ return fInit; ++} ++ ++/* ++ * ======== WCD_InitComplete2 ======== ++ * Purpose: ++ * Perform any required WCD, and WMD initialization which ++ * cannot not be performed in WCD_Init() or DEV_StartDevice() due ++ * to the fact that some services are not yet ++ * completely initialized. ++ * Parameters: ++ * Returns: ++ * DSP_SOK: Allow this device to load ++ * DSP_EFAIL: Failure. ++ * Requires: ++ * WCD initialized. ++ * Ensures: ++ */ ++DSP_STATUS WCD_InitComplete2(void) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CFG_DEVNODE *DevNode; ++ struct DEV_OBJECT *hDevObject; ++ u32 devType; ++ ++ DBC_Require(WCD_cRefs > 0); ++ GT_0trace(WCD_debugMask, GT_ENTER, "Entered WCD_InitComplete\n"); ++ /* Walk the list of DevObjects, get each devnode, and attempting to ++ * autostart the board. Note that this requires COF loading, which ++ * requires KFILE. */ ++ for (hDevObject = DEV_GetFirst(); hDevObject != NULL; ++ hDevObject = DEV_GetNext(hDevObject)) { ++ if (DSP_FAILED(DEV_GetDevNode(hDevObject, &DevNode))) ++ continue; ++ ++ if (DSP_FAILED(DEV_GetDevType(hDevObject, &devType))) ++ continue; ++ ++ if ((devType == DSP_UNIT) || (devType == IVA_UNIT)) { ++ if (DSP_FAILED(PROC_AutoStart(DevNode, hDevObject))) { ++ GT_0trace(WCD_debugMask, GT_1CLASS, ++ "WCD_InitComplete2 Failed\n"); ++ status = DSP_EFAIL; ++ /* break; */ ++ } ++ } else ++ GT_1trace(WCD_debugMask, GT_ENTER, ++ "Ignoring PROC_AutoStart " ++ "for Device Type = 0x%x \n", devType); ++ } /* End For Loop */ ++ GT_1trace(WCD_debugMask, GT_ENTER, ++ "Exiting WCD_InitComplete status 0x%x\n", status); ++ return status; ++} ++ ++/* ++ * ======== MGRWRAP_EnumNode_Info ======== ++ */ ++u32 MGRWRAP_EnumNode_Info(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u8 *pNDBProps; ++ u32 uNumNodes; ++ DSP_STATUS status = DSP_SOK; ++ u32 size = args->ARGS_MGR_ENUMNODE_INFO.uNDBPropsSize; ++ ++ GT_4trace(WCD_debugMask, GT_ENTER, ++ "MGR_EnumNodeInfo: entered args:\n0x%x" ++ " uNode: 0x%x\tpNDBProps: 0x%x\tuNDBPropsSize: " ++ "0x%x\tpuNumNodes\n", args->ARGS_MGR_ENUMNODE_INFO.uNode, ++ args->ARGS_MGR_ENUMNODE_INFO.pNDBProps, ++ args->ARGS_MGR_ENUMNODE_INFO.uNDBPropsSize, ++ args->ARGS_MGR_ENUMNODE_INFO.puNumNodes); ++ pNDBProps = MEM_Alloc(size, MEM_NONPAGED); ++ if (pNDBProps == NULL) ++ status = DSP_EMEMORY; ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = MGR_EnumNodeInfo(args->ARGS_MGR_ENUMNODE_INFO.uNode, ++ (struct DSP_NDBPROPS *)pNDBProps, ++ size, &uNumNodes); ++ } ++ cp_to_usr(args->ARGS_MGR_ENUMNODE_INFO.pNDBProps, pNDBProps, status, ++ size); ++ cp_to_usr(args->ARGS_MGR_ENUMNODE_INFO.puNumNodes, &uNumNodes, status, ++ 1); ++ if (pNDBProps) ++ MEM_Free(pNDBProps); ++ ++ return status; ++} ++ ++/* ++ * ======== MGRWRAP_EnumProc_Info ======== ++ */ ++u32 MGRWRAP_EnumProc_Info(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u8 *pProcessorInfo; ++ u32 uNumProcs; ++ DSP_STATUS status = DSP_SOK; ++ u32 size = args->ARGS_MGR_ENUMPROC_INFO.uProcessorInfoSize; ++ ++ GT_4trace(WCD_debugMask, GT_ENTER, ++ "MGRWRAP_EnumProc_Info: entered args:\n" ++ "0x%x uProcessor: 0x%x\tpProcessorInfo: 0x%x\t" ++ "uProcessorInfoSize: 0x%x\tpuNumProcs \n", ++ args->ARGS_MGR_ENUMPROC_INFO.uProcessor, ++ args->ARGS_MGR_ENUMPROC_INFO.pProcessorInfo, ++ args->ARGS_MGR_ENUMPROC_INFO.uProcessorInfoSize, ++ args->ARGS_MGR_ENUMPROC_INFO.puNumProcs); ++ pProcessorInfo = MEM_Alloc(size, MEM_NONPAGED); ++ if (pProcessorInfo == NULL) ++ status = DSP_EMEMORY; ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = MGR_EnumProcessorInfo(args-> ++ ARGS_MGR_ENUMPROC_INFO.uProcessor, ++ (struct DSP_PROCESSORINFO *)pProcessorInfo, ++ size, &uNumProcs); ++ } ++ cp_to_usr(args->ARGS_MGR_ENUMPROC_INFO.pProcessorInfo, pProcessorInfo, ++ status, size); ++ cp_to_usr(args->ARGS_MGR_ENUMPROC_INFO.puNumProcs, &uNumProcs, ++ status, 1); ++ if (pProcessorInfo) ++ MEM_Free(pProcessorInfo); ++ ++ return status; ++} ++ ++#define WRAP_MAP2CALLER(x) x ++/* ++ * ======== MGRWRAP_RegisterObject ======== ++ */ ++u32 MGRWRAP_RegisterObject(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ struct DSP_UUID pUuid; ++ u32 pathSize = 0; ++ char *pszPathName = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ cp_fm_usr(&pUuid, args->ARGS_MGR_REGISTEROBJECT.pUuid, status, 1); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* pathSize is increased by 1 to accommodate NULL */ ++ pathSize = strlen_user((char *) ++ args->ARGS_MGR_REGISTEROBJECT.pszPathName) + 1; ++ pszPathName = MEM_Alloc(pathSize, MEM_NONPAGED); ++ if (!pszPathName) ++ goto func_end; ++ retVal = strncpy_from_user(pszPathName, ++ (char *)args->ARGS_MGR_REGISTEROBJECT.pszPathName, ++ pathSize); ++ if (!retVal) { ++ status = DSP_EPOINTER; ++ goto func_end; ++ } ++ ++ GT_1trace(WCD_debugMask, GT_ENTER, ++ "MGRWRAP_RegisterObject: entered pg2hMsg " ++ "0x%x\n", args->ARGS_MGR_REGISTEROBJECT.pUuid); ++ status = DCD_RegisterObject(&pUuid, ++ args->ARGS_MGR_REGISTEROBJECT.objType, ++ (char *)pszPathName); ++func_end: ++ if (pszPathName) ++ MEM_Free(pszPathName); ++ return status; ++} ++ ++/* ++ * ======== MGRWRAP_UnregisterObject ======== ++ */ ++u32 MGRWRAP_UnregisterObject(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_UUID pUuid; ++ ++ cp_fm_usr(&pUuid, args->ARGS_MGR_REGISTEROBJECT.pUuid, status, 1); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ GT_1trace(WCD_debugMask, GT_ENTER, ++ "MGRWRAP_UnregisterObject: entered pg2hMsg" ++ " 0x%x\n", args->ARGS_MGR_UNREGISTEROBJECT.pUuid); ++ status = DCD_UnregisterObject(&pUuid, ++ args->ARGS_MGR_UNREGISTEROBJECT.objType); ++func_end: ++ return status; ++ ++} ++ ++/* ++ * ======== MGRWRAP_WaitForBridgeEvents ======== ++ */ ++u32 MGRWRAP_WaitForBridgeEvents(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK, real_status = DSP_SOK; ++ struct DSP_NOTIFICATION *aNotifications[MAX_EVENTS]; ++ struct DSP_NOTIFICATION notifications[MAX_EVENTS]; ++ u32 uIndex, i; ++ u32 uCount = args->ARGS_MGR_WAIT.uCount; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "MGRWRAP_WaitForBridgeEvents: entered\n"); ++ ++ if (uCount > MAX_EVENTS) ++ status = DSP_EINVALIDARG; ++ ++ /* get the array of pointers to user structures */ ++ cp_fm_usr(aNotifications, args->ARGS_MGR_WAIT.aNotifications, ++ status, uCount); ++ /* get the events */ ++ for (i = 0; i < uCount; i++) { ++ cp_fm_usr(¬ifications[i], aNotifications[i], status, 1); ++ if (DSP_SUCCEEDED(status)) { ++ /* set the array of pointers to kernel structures*/ ++ aNotifications[i] = ¬ifications[i]; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ real_status = MGR_WaitForBridgeEvents(aNotifications, uCount, ++ &uIndex, args->ARGS_MGR_WAIT.uTimeout); ++ } ++ cp_to_usr(args->ARGS_MGR_WAIT.puIndex, &uIndex, status, 1); ++ return real_status; ++} ++ ++ ++#ifndef RES_CLEANUP_DISABLE ++/* ++ * ======== MGRWRAP_GetProcessResourceInfo ======== ++ */ ++u32 MGRWRAP_GetProcessResourcesInfo(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 uSize = 0; ++ u8 *pBuf = MEM_Alloc(8092, MEM_NONPAGED); ++ status = DRV_ProcDisplayResInfo(pBuf, &uSize); ++ GT_1trace(WCD_debugMask, GT_ENTER, ++ "MGRWRAP_GetProcessResourcesInfo:uSize=%d :\n", uSize); ++ cp_to_usr(args->ARGS_PROC_GETTRACE.pBuf, pBuf, status, uSize); ++ GT_0trace(WCD_debugMask, GT_ENTER, "\n***********" ++ "123MGRWRAP_GetProcessResourcesInfo:**************\n"); ++ GT_0trace(WCD_debugMask, GT_ENTER, "\n***********" ++ "456MGRWRAP_GetProcessResourcesInfo:**************\n"); ++ cp_to_usr(args->ARGS_PROC_GETTRACE.pSize, &uSize, status, 1); ++ MEM_Free(pBuf); ++ return status; ++} ++#endif ++ ++ ++/* ++ * ======== PROCWRAP_Attach ======== ++ */ ++u32 PROCWRAP_Attach(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_HPROCESSOR processor; ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_PROCESSORATTRIN attrIn, *pAttrIn = NULL; ++ ++ GT_3trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_Attach: entered args:\n" "0x%x" ++ " uProcessor: 0x%x\tpAttrIn: 0x%x\tphProcessor \n", ++ args->ARGS_PROC_ATTACH.uProcessor, ++ args->ARGS_PROC_ATTACH.pAttrIn, ++ args->ARGS_PROC_ATTACH.phProcessor); ++ /* Optional argument */ ++ if (args->ARGS_PROC_ATTACH.pAttrIn) { ++ cp_fm_usr(&attrIn, args->ARGS_PROC_ATTACH.pAttrIn, status, 1); ++ if (DSP_SUCCEEDED(status)) ++ pAttrIn = &attrIn; ++ else ++ goto func_end; ++ ++ ++ } ++ status = PROC_Attach(args->ARGS_PROC_ATTACH.uProcessor, pAttrIn, ++ &processor, pr_ctxt); ++ cp_to_usr(args->ARGS_PROC_ATTACH.phProcessor, &processor, status, 1); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_Ctrl ======== ++ */ ++u32 PROCWRAP_Ctrl(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 cbDataSize, __user *pSize = (u32 __user *) ++ args->ARGS_PROC_CTRL.pArgs; ++ u8 *pArgs = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ GT_3trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_Ctrl: entered args:\n 0x%x" ++ " uProcessor: 0x%x\tdwCmd: 0x%x\tpArgs \n", ++ args->ARGS_PROC_CTRL.hProcessor, ++ args->ARGS_PROC_CTRL.dwCmd, ++ args->ARGS_PROC_CTRL.pArgs); ++ if (pSize) { ++ if (get_user(cbDataSize, pSize)) { ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ cbDataSize += sizeof(u32); ++ pArgs = MEM_Alloc(cbDataSize, MEM_NONPAGED); ++ if (pArgs == NULL) { ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ ++ cp_fm_usr(pArgs, args->ARGS_PROC_CTRL.pArgs, status, ++ cbDataSize); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = PROC_Ctrl(args->ARGS_PROC_CTRL.hProcessor, ++ args->ARGS_PROC_CTRL.dwCmd, ++ (struct DSP_CBDATA *)pArgs); ++ } ++ ++ /* cp_to_usr(args->ARGS_PROC_CTRL.pArgs, pArgs, status, 1);*/ ++ if (pArgs) ++ MEM_Free(pArgs); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_Detach ======== ++ */ ++u32 PROCWRAP_Detach(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_1trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_Detach: entered args\n0x%x " ++ "hProceesor \n", args->ARGS_PROC_DETACH.hProcessor); ++ retVal = PROC_Detach(args->ARGS_PROC_DETACH.hProcessor, pr_ctxt); ++ ++ return retVal; ++} ++ ++/* ++ * ======== PROCWRAP_EnumNode_Info ======== ++ */ ++u32 PROCWRAP_EnumNode_Info(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ DSP_HNODE aNodeTab[MAX_NODES]; ++ u32 uNumNodes; ++ u32 uAllocated; ++ ++ GT_5trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_EnumNode_Info:entered args:\n0x" ++ "%xhProcessor:0x%x\taNodeTab:0x%x\tuNodeTabSize:" ++ "%0x%x\tpuNumNodes%\n0x%x puAllocated: \n", ++ args->ARGS_PROC_ENUMNODE_INFO.hProcessor, ++ args->ARGS_PROC_ENUMNODE_INFO.aNodeTab, ++ args->ARGS_PROC_ENUMNODE_INFO.uNodeTabSize, ++ args->ARGS_PROC_ENUMNODE_INFO.puNumNodes, ++ args->ARGS_PROC_ENUMNODE_INFO.puAllocated); ++ DBC_Require(args->ARGS_PROC_ENUMNODE_INFO.uNodeTabSize <= MAX_NODES); ++ status = PROC_EnumNodes(args->ARGS_PROC_ENUMNODE_INFO.hProcessor, ++ aNodeTab, ++ args->ARGS_PROC_ENUMNODE_INFO.uNodeTabSize, ++ &uNumNodes, &uAllocated); ++ cp_to_usr(args->ARGS_PROC_ENUMNODE_INFO.aNodeTab, aNodeTab, status, ++ uNumNodes); ++ cp_to_usr(args->ARGS_PROC_ENUMNODE_INFO.puNumNodes, &uNumNodes, ++ status, 1); ++ cp_to_usr(args->ARGS_PROC_ENUMNODE_INFO.puAllocated, &uAllocated, ++ status, 1); ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_FlushMemory ======== ++ */ ++u32 PROCWRAP_FlushMemory(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_FlushMemory: entered\n"); ++ ++ status = PROC_FlushMemory(args->ARGS_PROC_FLUSHMEMORY.hProcessor, ++ args->ARGS_PROC_FLUSHMEMORY.pMpuAddr, ++ args->ARGS_PROC_FLUSHMEMORY.ulSize, ++ args->ARGS_PROC_FLUSHMEMORY.ulFlags); ++ return status; ++} ++ ++ ++/* ++ * ======== PROCWRAP_InvalidateMemory ======== ++ */ ++u32 PROCWRAP_InvalidateMemory(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_InvalidateMemory:entered\n"); ++ ++ status = PROC_InvalidateMemory( ++ args->ARGS_PROC_INVALIDATEMEMORY.hProcessor, ++ args->ARGS_PROC_INVALIDATEMEMORY.pMpuAddr, ++ args->ARGS_PROC_INVALIDATEMEMORY.ulSize); ++ return status; ++} ++ ++ ++/* ++ * ======== PROCWRAP_EnumResources ======== ++ */ ++u32 PROCWRAP_EnumResources(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_RESOURCEINFO pResourceInfo; ++ ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ GT_4trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_EnumResources: entered args:\n" ++ "0x%x hProcessor: 0x%x\tuResourceMask: 0x%x\tpResourceInfo" ++ " 0x%x\tuResourceInfoSixe \n", ++ args->ARGS_PROC_ENUMRESOURCES.hProcessor, ++ args->ARGS_PROC_ENUMRESOURCES.uResourceType, ++ args->ARGS_PROC_ENUMRESOURCES.pResourceInfo, ++ args->ARGS_PROC_ENUMRESOURCES.uResourceInfoSize); ++ status = PROC_GetResourceInfo(args->ARGS_PROC_ENUMRESOURCES.hProcessor, ++ args->ARGS_PROC_ENUMRESOURCES.uResourceType, ++ &pResourceInfo, ++ args->ARGS_PROC_ENUMRESOURCES.uResourceInfoSize); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ cp_to_usr(args->ARGS_PROC_ENUMRESOURCES.pResourceInfo, &pResourceInfo, ++ status, 1); ++func_end: ++ return status; ++ ++} ++ ++/* ++ * ======== PROCWRAP_GetState ======== ++ */ ++u32 PROCWRAP_GetState(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ struct DSP_PROCESSORSTATE procStatus; ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_GetState: entered\n"); ++ status = PROC_GetState(args->ARGS_PROC_GETSTATE.hProcessor, &procStatus, ++ args->ARGS_PROC_GETSTATE.uStateInfoSize); ++ cp_to_usr(args->ARGS_PROC_GETSTATE.pProcStatus, &procStatus, status, 1); ++ return status; ++ ++} ++ ++/* ++ * ======== PROCWRAP_GetTrace ======== ++ */ ++u32 PROCWRAP_GetTrace(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ u8 *pBuf; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_GetTrace: entered\n"); ++ ++ DBC_Require(args->ARGS_PROC_GETTRACE.uMaxSize <= MAX_TRACEBUFLEN); ++ ++ pBuf = MEM_Calloc(args->ARGS_PROC_GETTRACE.uMaxSize, MEM_NONPAGED); ++ if (pBuf != NULL) { ++ status = PROC_GetTrace(args->ARGS_PROC_GETTRACE.hProcessor, ++ pBuf, args->ARGS_PROC_GETTRACE.uMaxSize); ++ } else { ++ status = DSP_EMEMORY; ++ } ++ cp_to_usr(args->ARGS_PROC_GETTRACE.pBuf, pBuf, status, ++ args->ARGS_PROC_GETTRACE.uMaxSize); ++ if (pBuf) ++ MEM_Free(pBuf); ++ ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_Load ======== ++ */ ++u32 PROCWRAP_Load(union Trapped_Args *args, void *pr_ctxt) ++{ ++ s32 i, len; ++ DSP_STATUS status = DSP_SOK; ++ char *temp; ++ s32 count = args->ARGS_PROC_LOAD.iArgc; ++ u8 **argv, **envp = NULL; ++ ++ DBC_Require(count > 0); ++ DBC_Require(count <= MAX_LOADARGS); ++ ++ argv = MEM_Alloc(count * sizeof(u8 *), MEM_NONPAGED); ++ if (!argv) { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ ++ cp_fm_usr(argv, args->ARGS_PROC_LOAD.aArgv, status, count); ++ if (DSP_FAILED(status)) { ++ MEM_Free(argv); ++ argv = NULL; ++ goto func_cont; ++ } ++ ++ for (i = 0; i < count; i++) { ++ if (argv[i]) { ++ /* User space pointer to argument */ ++ temp = (char *) argv[i]; ++ /* len is increased by 1 to accommodate NULL */ ++ len = strlen_user((char *)temp) + 1; ++ /* Kernel space pointer to argument */ ++ argv[i] = MEM_Alloc(len, MEM_NONPAGED); ++ if (argv[i]) { ++ cp_fm_usr(argv[i], temp, status, len); ++ if (DSP_FAILED(status)) { ++ MEM_Free(argv[i]); ++ argv[i] = NULL; ++ goto func_cont; ++ } ++ } else { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ } ++ } ++ /* TODO: validate this */ ++ if (args->ARGS_PROC_LOAD.aEnvp) { ++ /* number of elements in the envp array including NULL */ ++ count = 0; ++ do { ++ get_user(temp, args->ARGS_PROC_LOAD.aEnvp + count); ++ count++; ++ } while (temp); ++ envp = MEM_Alloc(count * sizeof(u8 *), MEM_NONPAGED); ++ if (!envp) { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ ++ cp_fm_usr(envp, args->ARGS_PROC_LOAD.aEnvp, status, count); ++ if (DSP_FAILED(status)) { ++ MEM_Free(envp); ++ envp = NULL; ++ goto func_cont; ++ } ++ for (i = 0; envp[i]; i++) { ++ /* User space pointer to argument */ ++ temp = (char *)envp[i]; ++ /* len is increased by 1 to accommodate NULL */ ++ len = strlen_user((char *)temp) + 1; ++ /* Kernel space pointer to argument */ ++ envp[i] = MEM_Alloc(len, MEM_NONPAGED); ++ if (envp[i]) { ++ cp_fm_usr(envp[i], temp, status, len); ++ if (DSP_FAILED(status)) { ++ MEM_Free(envp[i]); ++ envp[i] = NULL; ++ goto func_cont; ++ } ++ } else { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ } ++ } ++ GT_5trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_Load, hProcessor: 0x%x\n\tiArgc:" ++ "0x%x\n\taArgv: 0x%x\n\taArgv[0]: %s\n\taEnvp: 0x%0x\n", ++ args->ARGS_PROC_LOAD.hProcessor, ++ args->ARGS_PROC_LOAD.iArgc, args->ARGS_PROC_LOAD.aArgv, ++ argv[0], args->ARGS_PROC_LOAD.aEnvp); ++ if (DSP_SUCCEEDED(status)) { ++ status = PROC_Load(args->ARGS_PROC_LOAD.hProcessor, ++ args->ARGS_PROC_LOAD.iArgc, ++ (CONST char **)argv, (CONST char **)envp); ++ } ++func_cont: ++ if (envp) { ++ i = 0; ++ while (envp[i]) ++ MEM_Free(envp[i++]); ++ ++ MEM_Free(envp); ++ } ++ ++ if (argv) { ++ count = args->ARGS_PROC_LOAD.iArgc; ++ for (i = 0; (i < count) && argv[i]; i++) ++ MEM_Free(argv[i]); ++ ++ MEM_Free(argv); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_Map ======== ++ */ ++u32 PROCWRAP_Map(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ void *pMapAddr; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_Map: entered\n"); ++ status = PROC_Map(args->ARGS_PROC_MAPMEM.hProcessor, ++ args->ARGS_PROC_MAPMEM.pMpuAddr, ++ args->ARGS_PROC_MAPMEM.ulSize, ++ args->ARGS_PROC_MAPMEM.pReqAddr, &pMapAddr, ++ args->ARGS_PROC_MAPMEM.ulMapAttr, pr_ctxt); ++ if (DSP_SUCCEEDED(status)) { ++ if (put_user(pMapAddr, args->ARGS_PROC_MAPMEM.ppMapAddr)) ++ status = DSP_EINVALIDARG; ++ ++ } ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_RegisterNotify ======== ++ */ ++u32 PROCWRAP_RegisterNotify(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ struct DSP_NOTIFICATION notification; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_RegisterNotify: entered\n"); ++ ++ /* Initialize the notification data structure */ ++ notification.psName = NULL; ++ notification.handle = NULL; ++ ++ status = PROC_RegisterNotify(args->ARGS_PROC_REGISTER_NOTIFY.hProcessor, ++ args->ARGS_PROC_REGISTER_NOTIFY.uEventMask, ++ args->ARGS_PROC_REGISTER_NOTIFY.uNotifyType, ++ ¬ification); ++ cp_to_usr(args->ARGS_PROC_REGISTER_NOTIFY.hNotification, ¬ification, ++ status, 1); ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_ReserveMemory ======== ++ */ ++u32 PROCWRAP_ReserveMemory(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ void *pRsvAddr; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_ReserveMemory: entered\n"); ++ status = PROC_ReserveMemory(args->ARGS_PROC_RSVMEM.hProcessor, ++ args->ARGS_PROC_RSVMEM.ulSize, &pRsvAddr); ++ if (put_user(pRsvAddr, args->ARGS_PROC_RSVMEM.ppRsvAddr)) ++ status = DSP_EINVALIDARG; ++ ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_Start ======== ++ */ ++u32 PROCWRAP_Start(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_Start: entered\n"); ++ retVal = PROC_Start(args->ARGS_PROC_START.hProcessor); ++ return retVal; ++} ++ ++/* ++ * ======== PROCWRAP_UnMap ======== ++ */ ++u32 PROCWRAP_UnMap(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_UnMap: entered\n"); ++ status = PROC_UnMap(args->ARGS_PROC_UNMAPMEM.hProcessor, ++ args->ARGS_PROC_UNMAPMEM.pMapAddr, pr_ctxt); ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_UnReserveMemory ======== ++ */ ++u32 PROCWRAP_UnReserveMemory(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "PROCWRAP_UnReserveMemory: entered\n"); ++ status = PROC_UnReserveMemory(args->ARGS_PROC_UNRSVMEM.hProcessor, ++ args->ARGS_PROC_UNRSVMEM.pRsvAddr); ++ return status; ++} ++ ++/* ++ * ======== PROCWRAP_Stop ======== ++ */ ++u32 PROCWRAP_Stop(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_Stop: entered\n"); ++ retVal = PROC_Stop(args->ARGS_PROC_STOP.hProcessor); ++ ++ return retVal; ++} ++ ++/* ++ * ======== NODEWRAP_Allocate ======== ++ */ ++u32 NODEWRAP_Allocate(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_UUID nodeId; ++ u32 cbDataSize = 0; ++ u32 __user *pSize = (u32 __user *)args->ARGS_NODE_ALLOCATE.pArgs; ++ u8 *pArgs = NULL; ++ struct DSP_NODEATTRIN attrIn, *pAttrIn = NULL; ++ struct NODE_OBJECT *hNode; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Allocate: entered\n"); ++ ++ /* Optional argument */ ++ if (pSize) { ++ if (get_user(cbDataSize, pSize)) ++ status = DSP_EFAIL; ++ ++ cbDataSize += sizeof(u32); ++ if (DSP_SUCCEEDED(status)) { ++ pArgs = MEM_Alloc(cbDataSize, MEM_NONPAGED); ++ if (pArgs == NULL) ++ status = DSP_EMEMORY; ++ ++ } ++ cp_fm_usr(pArgs, args->ARGS_NODE_ALLOCATE.pArgs, status, ++ cbDataSize); ++ } ++ cp_fm_usr(&nodeId, args->ARGS_NODE_ALLOCATE.pNodeID, status, 1); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ /* Optional argument */ ++ if (args->ARGS_NODE_ALLOCATE.pAttrIn) { ++ cp_fm_usr(&attrIn, args->ARGS_NODE_ALLOCATE.pAttrIn, status, 1); ++ if (DSP_SUCCEEDED(status)) ++ pAttrIn = &attrIn; ++ else ++ status = DSP_EMEMORY; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = NODE_Allocate(args->ARGS_NODE_ALLOCATE.hProcessor, ++ &nodeId, (struct DSP_CBDATA *)pArgs, ++ pAttrIn, &hNode, pr_ctxt); ++ } ++ cp_to_usr(args->ARGS_NODE_ALLOCATE.phNode, &hNode, status, 1); ++func_cont: ++ if (pArgs) ++ MEM_Free(pArgs); ++ ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_AllocMsgBuf ======== ++ */ ++u32 NODEWRAP_AllocMsgBuf(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_BUFFERATTR *pAttr = NULL; ++ struct DSP_BUFFERATTR attr; ++ u8 *pBuffer = NULL; ++ ++ if (args->ARGS_NODE_ALLOCMSGBUF.pAttr) { /* Optional argument */ ++ cp_fm_usr(&attr, args->ARGS_NODE_ALLOCMSGBUF.pAttr, status, 1); ++ if (DSP_SUCCEEDED(status)) ++ pAttr = &attr; ++ ++ } ++ /* IN OUT argument */ ++ cp_fm_usr(&pBuffer, args->ARGS_NODE_ALLOCMSGBUF.pBuffer, status, 1); ++ if (DSP_SUCCEEDED(status)) { ++ status = NODE_AllocMsgBuf(args->ARGS_NODE_ALLOCMSGBUF.hNode, ++ args->ARGS_NODE_ALLOCMSGBUF.uSize, ++ pAttr, &pBuffer); ++ } ++ cp_to_usr(args->ARGS_NODE_ALLOCMSGBUF.pBuffer, &pBuffer, status, 1); ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_ChangePriority ======== ++ */ ++u32 NODEWRAP_ChangePriority(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "NODEWRAP_ChangePriority: entered\n"); ++ retVal = NODE_ChangePriority(args->ARGS_NODE_CHANGEPRIORITY.hNode, ++ args->ARGS_NODE_CHANGEPRIORITY.iPriority); ++ ++ return retVal; ++} ++ ++/* ++ * ======== NODEWRAP_Connect ======== ++ */ ++u32 NODEWRAP_Connect(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_STRMATTR attrs; ++ struct DSP_STRMATTR *pAttrs = NULL; ++ u32 cbDataSize; ++ u32 __user *pSize = (u32 __user *)args->ARGS_NODE_CONNECT.pConnParam; ++ u8 *pArgs = NULL; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Connect: entered\n"); ++ ++ /* Optional argument */ ++ if (pSize) { ++ if (get_user(cbDataSize, pSize)) ++ status = DSP_EFAIL; ++ ++ cbDataSize += sizeof(u32); ++ if (DSP_SUCCEEDED(status)) { ++ pArgs = MEM_Alloc(cbDataSize, MEM_NONPAGED); ++ if (pArgs == NULL) { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ ++ } ++ cp_fm_usr(pArgs, args->ARGS_NODE_CONNECT.pConnParam, status, ++ cbDataSize); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ } ++ if (args->ARGS_NODE_CONNECT.pAttrs) { /* Optional argument */ ++ cp_fm_usr(&attrs, args->ARGS_NODE_CONNECT.pAttrs, status, 1); ++ if (DSP_SUCCEEDED(status)) ++ pAttrs = &attrs; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = NODE_Connect(args->ARGS_NODE_CONNECT.hNode, ++ args->ARGS_NODE_CONNECT.uStream, ++ args->ARGS_NODE_CONNECT.hOtherNode, ++ args->ARGS_NODE_CONNECT.uOtherStream, ++ pAttrs, (struct DSP_CBDATA *)pArgs); ++ } ++func_cont: ++ if (pArgs) ++ MEM_Free(pArgs); ++ ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_Create ======== ++ */ ++u32 NODEWRAP_Create(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Create: entered\n"); ++ retVal = NODE_Create(args->ARGS_NODE_CREATE.hNode); ++ ++ return retVal; ++} ++ ++/* ++ * ======== NODEWRAP_Delete ======== ++ */ ++u32 NODEWRAP_Delete(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Delete: entered\n"); ++ retVal = NODE_Delete(args->ARGS_NODE_DELETE.hNode, pr_ctxt); ++ ++ return retVal; ++} ++ ++/* ++ * ======== NODEWRAP_FreeMsgBuf ======== ++ */ ++u32 NODEWRAP_FreeMsgBuf(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_BUFFERATTR *pAttr = NULL; ++ struct DSP_BUFFERATTR attr; ++ if (args->ARGS_NODE_FREEMSGBUF.pAttr) { /* Optional argument */ ++ cp_fm_usr(&attr, args->ARGS_NODE_FREEMSGBUF.pAttr, status, 1); ++ if (DSP_SUCCEEDED(status)) ++ pAttr = &attr; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = NODE_FreeMsgBuf(args->ARGS_NODE_FREEMSGBUF.hNode, ++ args->ARGS_NODE_FREEMSGBUF.pBuffer, ++ pAttr); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_GetAttr ======== ++ */ ++u32 NODEWRAP_GetAttr(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_NODEATTR attr; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_GetAttr: entered\n"); ++ ++ status = NODE_GetAttr(args->ARGS_NODE_GETATTR.hNode, &attr, ++ args->ARGS_NODE_GETATTR.uAttrSize); ++ cp_to_usr(args->ARGS_NODE_GETATTR.pAttr, &attr, status, 1); ++ ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_GetMessage ======== ++ */ ++u32 NODEWRAP_GetMessage(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ struct DSP_MSG msg; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_GetMessage: entered\n"); ++ ++ status = NODE_GetMessage(args->ARGS_NODE_GETMESSAGE.hNode, &msg, ++ args->ARGS_NODE_GETMESSAGE.uTimeout); ++ ++ cp_to_usr(args->ARGS_NODE_GETMESSAGE.pMessage, &msg, status, 1); ++ ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_Pause ======== ++ */ ++u32 NODEWRAP_Pause(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Pause: entered\n"); ++ retVal = NODE_Pause(args->ARGS_NODE_PAUSE.hNode); ++ ++ return retVal; ++} ++ ++/* ++ * ======== NODEWRAP_PutMessage ======== ++ */ ++u32 NODEWRAP_PutMessage(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_MSG msg; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_PutMessage: entered\n"); ++ ++ cp_fm_usr(&msg, args->ARGS_NODE_PUTMESSAGE.pMessage, status, 1); ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = NODE_PutMessage(args->ARGS_NODE_PUTMESSAGE.hNode, &msg, ++ args->ARGS_NODE_PUTMESSAGE.uTimeout); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_RegisterNotify ======== ++ */ ++u32 NODEWRAP_RegisterNotify(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_NOTIFICATION notification; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "NODEWRAP_RegisterNotify: entered\n"); ++ ++ /* Initialize the notification data structure */ ++ notification.psName = NULL; ++ notification.handle = NULL; ++ ++ status = NODE_RegisterNotify(args->ARGS_NODE_REGISTERNOTIFY.hNode, ++ args->ARGS_NODE_REGISTERNOTIFY.uEventMask, ++ args->ARGS_NODE_REGISTERNOTIFY.uNotifyType, ++ ¬ification); ++ cp_to_usr(args->ARGS_NODE_REGISTERNOTIFY.hNotification, ¬ification, ++ status, 1); ++ return status; ++} ++ ++/* ++ * ======== NODEWRAP_Run ======== ++ */ ++u32 NODEWRAP_Run(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Run: entered\n"); ++ retVal = NODE_Run(args->ARGS_NODE_RUN.hNode); ++ ++ return retVal; ++} ++ ++/* ++ * ======== NODEWRAP_Terminate ======== ++ */ ++u32 NODEWRAP_Terminate(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ DSP_STATUS tempstatus; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Terminate: entered\n"); ++ ++ status = NODE_Terminate(args->ARGS_NODE_TERMINATE.hNode, &tempstatus); ++ ++ cp_to_usr(args->ARGS_NODE_TERMINATE.pStatus, &tempstatus, status, 1); ++ ++ return status; ++} ++ ++ ++/* ++ * ======== NODEWRAP_GetUUIDProps ======== ++ */ ++u32 NODEWRAP_GetUUIDProps(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_UUID nodeId; ++ struct DSP_NDBPROPS *pnodeProps = NULL; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "NODEWRAP_GetUUIDPropste: entered\n"); ++ ++ ++ cp_fm_usr(&nodeId, args->ARGS_NODE_GETUUIDPROPS.pNodeID, status, 1); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ pnodeProps = MEM_Alloc(sizeof(struct DSP_NDBPROPS), MEM_NONPAGED); ++ if (pnodeProps != NULL) { ++ status = NODE_GetUUIDProps(args-> ++ ARGS_NODE_GETUUIDPROPS.hProcessor, ++ &nodeId, pnodeProps); ++ cp_to_usr(args->ARGS_NODE_GETUUIDPROPS.pNodeProps, pnodeProps, ++ status, 1); ++ } else ++ status = DSP_EMEMORY; ++func_cont: ++ if (pnodeProps) ++ MEM_Free(pnodeProps); ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_AllocateBuffer ======== ++ */ ++u32 STRMWRAP_AllocateBuffer(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status; ++ u8 **apBuffer = NULL; ++ u32 uNumBufs = args->ARGS_STRM_ALLOCATEBUFFER.uNumBufs; ++ ++ DBC_Require(uNumBufs <= MAX_BUFS); ++ ++ apBuffer = MEM_Alloc((uNumBufs * sizeof(u8 *)), MEM_NONPAGED); ++ ++ status = STRM_AllocateBuffer(args->ARGS_STRM_ALLOCATEBUFFER.hStream, ++ args->ARGS_STRM_ALLOCATEBUFFER.uSize, ++ apBuffer, uNumBufs, pr_ctxt); ++ cp_to_usr(args->ARGS_STRM_ALLOCATEBUFFER.apBuffer, apBuffer, status, ++ uNumBufs); ++ if (apBuffer) ++ MEM_Free(apBuffer); ++ ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_Close ======== ++ */ ++u32 STRMWRAP_Close(union Trapped_Args *args, void *pr_ctxt) ++{ ++ return STRM_Close(args->ARGS_STRM_CLOSE.hStream, pr_ctxt); ++} ++ ++/* ++ * ======== STRMWRAP_FreeBuffer ======== ++ */ ++u32 STRMWRAP_FreeBuffer(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u8 **apBuffer = NULL; ++ u32 uNumBufs = args->ARGS_STRM_FREEBUFFER.uNumBufs; ++ ++ DBC_Require(uNumBufs <= MAX_BUFS); ++ ++ apBuffer = MEM_Alloc((uNumBufs * sizeof(u8 *)), MEM_NONPAGED); ++ ++ cp_fm_usr(apBuffer, args->ARGS_STRM_FREEBUFFER.apBuffer, status, ++ uNumBufs); ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = STRM_FreeBuffer(args->ARGS_STRM_FREEBUFFER.hStream, ++ apBuffer, uNumBufs, pr_ctxt); ++ } ++ cp_to_usr(args->ARGS_STRM_FREEBUFFER.apBuffer, apBuffer, status, ++ uNumBufs); ++ if (apBuffer) ++ MEM_Free(apBuffer); ++ ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_GetEventHandle ======== ++ */ ++u32 STRMWRAP_GetEventHandle(union Trapped_Args *args, void *pr_ctxt) ++{ ++ return DSP_ENOTIMPL; ++} ++ ++/* ++ * ======== STRMWRAP_GetInfo ======== ++ */ ++u32 STRMWRAP_GetInfo(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct STRM_INFO strmInfo; ++ struct DSP_STREAMINFO user; ++ struct DSP_STREAMINFO *temp; ++ ++ cp_fm_usr(&strmInfo, args->ARGS_STRM_GETINFO.pStreamInfo, status, 1); ++ temp = strmInfo.pUser; ++ ++ strmInfo.pUser = &user; ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = STRM_GetInfo(args->ARGS_STRM_GETINFO.hStream, ++ &strmInfo, args->ARGS_STRM_GETINFO.uStreamInfoSize); ++ } ++ cp_to_usr(temp, strmInfo.pUser, status, 1); ++ strmInfo.pUser = temp; ++ cp_to_usr(args->ARGS_STRM_GETINFO.pStreamInfo, &strmInfo, status, 1); ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_Idle ======== ++ */ ++u32 STRMWRAP_Idle(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 retVal; ++ ++ retVal = STRM_Idle(args->ARGS_STRM_IDLE.hStream, ++ args->ARGS_STRM_IDLE.bFlush); ++ ++ return retVal; ++} ++ ++/* ++ * ======== STRMWRAP_Issue ======== ++ */ ++u32 STRMWRAP_Issue(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ /* No need of doing cp_fm_usr for the user buffer (pBuffer) ++ as this is done in Bridge internal function WMD_CHNL_AddIOReq ++ in chnl_sm.c */ ++ status = STRM_Issue(args->ARGS_STRM_ISSUE.hStream, ++ args->ARGS_STRM_ISSUE.pBuffer, ++ args->ARGS_STRM_ISSUE.dwBytes, ++ args->ARGS_STRM_ISSUE.dwBufSize, ++ args->ARGS_STRM_ISSUE.dwArg); ++ ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_Open ======== ++ */ ++u32 STRMWRAP_Open(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct STRM_ATTR attr; ++ struct STRM_OBJECT *pStrm; ++ struct DSP_STREAMATTRIN strmAttrIn; ++ ++ cp_fm_usr(&attr, args->ARGS_STRM_OPEN.pAttrIn, status, 1); ++ ++ if (attr.pStreamAttrIn != NULL) { /* Optional argument */ ++ cp_fm_usr(&strmAttrIn, attr.pStreamAttrIn, status, 1); ++ if (DSP_SUCCEEDED(status)) ++ attr.pStreamAttrIn = &strmAttrIn; ++ ++ } ++ status = STRM_Open(args->ARGS_STRM_OPEN.hNode, ++ args->ARGS_STRM_OPEN.uDirection, ++ args->ARGS_STRM_OPEN.uIndex, &attr, &pStrm, ++ pr_ctxt); ++ cp_to_usr(args->ARGS_STRM_OPEN.phStream, &pStrm, status, 1); ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_Reclaim ======== ++ */ ++u32 STRMWRAP_Reclaim(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u8 *pBufPtr; ++ u32 ulBytes; ++ u32 dwArg; ++ u32 ulBufSize; ++ ++ status = STRM_Reclaim(args->ARGS_STRM_RECLAIM.hStream, &pBufPtr, ++ &ulBytes, &ulBufSize, &dwArg); ++ cp_to_usr(args->ARGS_STRM_RECLAIM.pBufPtr, &pBufPtr, status, 1); ++ cp_to_usr(args->ARGS_STRM_RECLAIM.pBytes, &ulBytes, status, 1); ++ cp_to_usr(args->ARGS_STRM_RECLAIM.pdwArg, &dwArg, status, 1); ++ ++ if (args->ARGS_STRM_RECLAIM.pBufSize != NULL) { ++ cp_to_usr(args->ARGS_STRM_RECLAIM.pBufSize, &ulBufSize, ++ status, 1); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_RegisterNotify ======== ++ */ ++u32 STRMWRAP_RegisterNotify(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_NOTIFICATION notification; ++ ++ GT_0trace(WCD_debugMask, GT_ENTER, ++ "NODEWRAP_RegisterNotify: entered\n"); ++ ++ /* Initialize the notification data structure */ ++ notification.psName = NULL; ++ notification.handle = NULL; ++ ++ status = STRM_RegisterNotify(args->ARGS_STRM_REGISTERNOTIFY.hStream, ++ args->ARGS_STRM_REGISTERNOTIFY.uEventMask, ++ args->ARGS_STRM_REGISTERNOTIFY.uNotifyType, ++ ¬ification); ++ cp_to_usr(args->ARGS_STRM_REGISTERNOTIFY.hNotification, ¬ification, ++ status, 1); ++ ++ return status; ++} ++ ++/* ++ * ======== STRMWRAP_Select ======== ++ */ ++u32 STRMWRAP_Select(union Trapped_Args *args, void *pr_ctxt) ++{ ++ u32 mask; ++ struct STRM_OBJECT *aStrmTab[MAX_STREAMS]; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(args->ARGS_STRM_SELECT.nStreams <= MAX_STREAMS); ++ ++ cp_fm_usr(aStrmTab, args->ARGS_STRM_SELECT.aStreamTab, status, ++ args->ARGS_STRM_SELECT.nStreams); ++ if (DSP_SUCCEEDED(status)) { ++ status = STRM_Select(aStrmTab, args->ARGS_STRM_SELECT.nStreams, ++ &mask, args->ARGS_STRM_SELECT.uTimeout); ++ } ++ cp_to_usr(args->ARGS_STRM_SELECT.pMask, &mask, status, 1); ++ return status; ++} ++ ++/* CMM */ ++ ++/* ++ * ======== CMMWRAP_CallocBuf ======== ++ */ ++u32 CMMWRAP_CallocBuf(union Trapped_Args *args, void *pr_ctxt) ++{ ++ /* This operation is done in kernel */ ++ return DSP_ENOTIMPL; ++} ++ ++/* ++ * ======== CMMWRAP_FreeBuf ======== ++ */ ++u32 CMMWRAP_FreeBuf(union Trapped_Args *args, void *pr_ctxt) ++{ ++ /* This operation is done in kernel */ ++ return DSP_ENOTIMPL; ++} ++ ++/* ++ * ======== CMMWRAP_GetHandle ======== ++ */ ++u32 CMMWRAP_GetHandle(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CMM_OBJECT *hCmmMgr; ++ ++ status = CMM_GetHandle(args->ARGS_CMM_GETHANDLE.hProcessor, &hCmmMgr); ++ ++ cp_to_usr(args->ARGS_CMM_GETHANDLE.phCmmMgr, &hCmmMgr, status, 1); ++ ++ return status; ++} ++ ++/* ++ * ======== CMMWRAP_GetInfo ======== ++ */ ++u32 CMMWRAP_GetInfo(union Trapped_Args *args, void *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CMM_INFO cmmInfo; ++ ++ status = CMM_GetInfo(args->ARGS_CMM_GETINFO.hCmmMgr, &cmmInfo); ++ ++ cp_to_usr(args->ARGS_CMM_GETINFO.pCmmInfo, &cmmInfo, status, 1); ++ ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dbdcd.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/dbdcd.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dbdcd.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/dbdcd.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1573 @@ ++/* ++ * dbdcd.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbdcd.c ======== ++ * Description: ++ * This file contains the implementation of the DSP/BIOS Bridge ++ * Configuration Database (DCD). ++ * ++ * Notes: ++ * The fxn DCD_GetObjects can apply a callback fxn to each DCD object ++ * that is located in a specified COFF file. At the moment, ++ * DCD_AutoRegister, DCD_AutoUnregister, and NLDR module all use ++ * DCD_GetObjects. ++ * ++ *! Revision History ++ *! ================ ++ *! 03-Dec-2003 map Changed DCD_OBJTYPE to DSP_DCDOBJTYPE ++ *! 17-Dec-2002 map Modified DCD_GetDepLibs, DCD_GetNumDepLibs, GetDepLibInfo ++ *! to include phase information ++ *! 02-Dec-2002 map Modified DCD_GetLibraryName for phases in different ++ *! libraries ++ *! 26-Feb-2003 kc Updated DCD_AutoUnregister and DCD_GetObjects to simplify ++ *! DCD implementation. ++ *! 17-Jul-2002 jeh Call COD_Open() instead of COD_OpenBase(), call COD_Close() ++ *! 11-Jul-2002 jeh Added DCD_GetDepLibs(), DCD_GetNumDepLibs(). ++ *! 18-Feb-2003 vp Code review updates ++ *! 18-Oct-2002 vp Ported to Linux platform ++ *! 15-Mar-2002 jeh Read dynamic loading memory requirements into node object ++ *! data. Added DCD_GetLibraryName(). ++ *! 13-Feb-2002 jeh Get system stack size in GetAttrsFromBuf(). ++ *! 01-Aug-2001 ag: Added check for PROC "extended" attributes used for ++ *! DSP-MMU setup. These are private attributes. ++ *! 18-Apr-2001 jeh Use COD_OpenBase instead of COD_LoadBase. ++ *! 03-Apr-2001 sg: Changed error names to DSP_EDCD* format. ++ *! 11-Jan-2001 jeh Changes to DCD_GetObjectDef to match node.cdb, proc.cdb. ++ *! 12-Dec-2000 kc: Added DCD_AutoUnregister. MSGNODE, DAISNODE added in ++ *! GetAttrsFromBuf ++ *! 22-Nov-2000 kc: Replaced sprintf() calls with strncat. ++ *! 09-Nov-2000 kc: Optimized DCD module. ++ *! 30-Oct-2000 kc: Added DCD_AutoRegister function; changed local var. names. ++ *! 29-Sep-2000 kc: Added code review changes (src/reviews/dcd_reviews.txt). ++ *! 06-Sep-2000 jeh Get message segid, message notification type. Added Atoi() ++ *! to replace atoi(), until cdb generation can output in ++ *! decimal format. ++ *! 26-Jul-2000 kc: Created. ++ *! ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Global defines. */ ++#define SIGNATURE 0x5f444344 /* "DCD_" (in reverse). */ ++ ++#define IsValidHandle(h) (((h) != NULL) && (h->dwSignature == SIGNATURE)) ++ ++#define MAX_INT2CHAR_LENGTH 16 /* Maximum int2char len of 32 bit int. */ ++ ++/* Name of section containing dependent libraries */ ++#define DEPLIBSECT ".dspbridge_deplibs" ++ ++/* DCD specific structures. */ ++struct DCD_MANAGER { ++ u32 dwSignature; /* Used for object validation. */ ++ struct COD_MANAGER *hCodMgr; /* Handle to COD manager object. */ ++}; ++ ++/* Global reference variables. */ ++static u32 cRefs; ++static u32 cEnumRefs; ++ ++extern struct GT_Mask curTrace; ++ ++/* helper function prototypes. */ ++static s32 Atoi(char *pszBuf); ++ ++static DSP_STATUS GetAttrsFromBuf(char *pszBuf, u32 ulBufSize, ++ enum DSP_DCDOBJTYPE objType, ++ struct DCD_GENERICOBJ *pGenObj); ++ ++static void CompressBuf(char *pszBuf, u32 ulBufSize, s32 cCharSize); ++ ++static char DspChar2GppChar(char *pWord, s32 cDspCharSize); ++ ++static DSP_STATUS GetDepLibInfo(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, ++ IN OUT u16 *pNumLibs, ++ OPTIONAL OUT u16 *pNumPersLibs, ++ OPTIONAL OUT struct DSP_UUID *pDepLibUuids, ++ OPTIONAL OUT bool *pPersistentDepLibs, ++ IN enum NLDR_PHASE phase); ++ ++/* ++ * ======== DCD_AutoRegister ======== ++ * Purpose: ++ * Parses the supplied image and resigsters with DCD. ++ */ ++ ++DSP_STATUS DCD_AutoRegister(IN struct DCD_MANAGER *hDcdMgr, ++ IN char *pszCoffPath) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_AutoRegister: hDcdMgr 0x%x\n", ++ hDcdMgr); ++ ++ if (IsValidHandle(hDcdMgr)) { ++ status = DCD_GetObjects(hDcdMgr, pszCoffPath, ++ (DCD_REGISTERFXN)DCD_RegisterObject, ++ (void *)pszCoffPath); ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_AutoRegister: invalid DCD manager handle.\n"); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_AutoUnregister ======== ++ * Purpose: ++ * Parses the supplied DSP image and unresiters from DCD. ++ */ ++DSP_STATUS DCD_AutoUnregister(IN struct DCD_MANAGER *hDcdMgr, ++ IN char *pszCoffPath) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_AutoUnregister: hDcdMgr 0x%x\n", ++ hDcdMgr); ++ ++ if (IsValidHandle(hDcdMgr)) { ++ status = DCD_GetObjects(hDcdMgr, pszCoffPath, ++ (DCD_REGISTERFXN)DCD_RegisterObject, ++ NULL); ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_AutoUnregister: invalid DCD manager" ++ " handle.\n"); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_CreateManager ======== ++ * Purpose: ++ * Creates DCD manager. ++ */ ++DSP_STATUS DCD_CreateManager(IN char *pszZlDllName, ++ OUT struct DCD_MANAGER **phDcdMgr) ++{ ++ struct COD_MANAGER *hCodMgr; /* COD manager handle */ ++ struct DCD_MANAGER *pDcdMgr = NULL; /* DCD Manager pointer */ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs >= 0); ++ DBC_Require(phDcdMgr); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_CreateManager: phDcdMgr 0x%x\n", ++ phDcdMgr); ++ ++ status = COD_Create(&hCodMgr, pszZlDllName, NULL); ++ if (DSP_SUCCEEDED(status)) { ++ ++ /* Create a DCD object. */ ++ MEM_AllocObject(pDcdMgr, struct DCD_MANAGER, SIGNATURE); ++ if (pDcdMgr != NULL) { ++ ++ /* Fill out the object. */ ++ pDcdMgr->hCodMgr = hCodMgr; ++ ++ /* Return handle to this DCD interface. */ ++ *phDcdMgr = pDcdMgr; ++ ++ GT_2trace(curTrace, GT_5CLASS, ++ "DCD_CreateManager: pDcdMgr 0x%x, " ++ " hCodMgr 0x%x", pDcdMgr, hCodMgr); ++ } else { ++ status = DSP_EMEMORY; ++ ++ /* ++ * If allocation of DcdManager object failed, delete the ++ * COD manager. ++ */ ++ COD_Delete(hCodMgr); ++ ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_CreateManager: MEM_AllocObject failed\n"); ++ } ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_CreateManager: COD_Create failed\n"); ++ } ++ ++ DBC_Ensure((DSP_SUCCEEDED(status)) || ((hCodMgr == NULL) && ++ (status == DSP_EFAIL)) || ((pDcdMgr == NULL) && ++ (status == DSP_EMEMORY))); ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_DestroyManager ======== ++ * Purpose: ++ * Frees DCD Manager object. ++ */ ++DSP_STATUS DCD_DestroyManager(IN struct DCD_MANAGER *hDcdMgr) ++{ ++ struct DCD_MANAGER *pDcdMgr = hDcdMgr; ++ DSP_STATUS status = DSP_EHANDLE; ++ ++ DBC_Require(cRefs >= 0); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_DestroyManager: hDcdMgr 0x%x\n", ++ hDcdMgr); ++ ++ if (IsValidHandle(hDcdMgr)) { ++ ++ /* Delete the COD manager. */ ++ COD_Delete(pDcdMgr->hCodMgr); ++ ++ /* Deallocate a DCD manager object. */ ++ MEM_FreeObject(pDcdMgr); ++ ++ status = DSP_SOK; ++ } else { ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_DestroyManager: invalid DCD manager handle.\n"); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_EnumerateObject ======== ++ * Purpose: ++ * Enumerates objects in the DCD. ++ */ ++DSP_STATUS DCD_EnumerateObject(IN s32 cIndex, IN enum DSP_DCDOBJTYPE objType, ++ OUT struct DSP_UUID *pUuid) ++{ ++ DSP_STATUS status = DSP_SOK; ++ char szRegKey[REG_MAXREGPATHLENGTH]; ++ char szValue[REG_MAXREGPATHLENGTH]; ++ char szData[REG_MAXREGPATHLENGTH]; ++ u32 dwValueSize; ++ u32 dwDataSize; ++ struct DSP_UUID dspUuid; ++ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ ++ u32 dwKeyLen = 0; ++ ++ DBC_Require(cRefs >= 0); ++ DBC_Require(cIndex >= 0); ++ DBC_Require(pUuid != NULL); ++ ++ GT_3trace(curTrace, GT_ENTER, ++ "DCD_EnumerateObject: cIndex %d, objType %d, " ++ " pUuid 0x%x\n", cIndex, objType, pUuid); ++ ++ if ((cIndex != 0) && (cEnumRefs == 0)) { ++ /* ++ * If an enumeration is being performed on an index greater ++ * than zero, then the current cEnumRefs must have been ++ * incremented to greater than zero. ++ */ ++ status = DSP_ECHANGEDURINGENUM; ++ } else { ++ /* Enumerate a specific key in the registry by index. */ ++ dwValueSize = REG_MAXREGPATHLENGTH; ++ dwDataSize = REG_MAXREGPATHLENGTH; ++ ++ /* ++ * Pre-determine final key length. It's length of DCD_REGKEY + ++ * "_\0" + length of szObjType string + terminating NULL. ++ */ ++ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; ++ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); ++ ++ /* Create proper REG key; concatenate DCD_REGKEY with ++ * objType. */ ++ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); ++ if ((strlen(szRegKey) + strlen("_\0")) < ++ REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, "_\0", 2); ++ } else { ++ status = DSP_EFAIL; ++ } ++ ++ /* This snprintf is guaranteed not to exceed max size of an ++ * integer. */ ++ status = snprintf(szObjType, MAX_INT2CHAR_LENGTH, "%d", ++ objType); ++ ++ if (status == -1) { ++ status = DSP_EFAIL; ++ } else { ++ status = DSP_SOK; ++ if ((strlen(szRegKey) + strlen(szObjType)) < ++ REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, szObjType, ++ strlen(szObjType) + 1); ++ } else { ++ status = DSP_EFAIL; ++ } ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = REG_EnumValue(NULL, cIndex, szRegKey, szValue, ++ &dwValueSize, szData, ++ &dwDataSize); ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Create UUID value using string retrieved from ++ * registry. */ ++ UUID_UuidFromString(szValue, &dspUuid); ++ ++ *pUuid = dspUuid; ++ ++ /* Increment cEnumRefs to update reference count. */ ++ cEnumRefs++; ++ ++ status = DSP_SOK; ++ } else if (status == REG_E_NOMOREITEMS) { ++ /* At the end of enumeration. Reset cEnumRefs. */ ++ cEnumRefs = 0; ++ ++ status = DSP_SENUMCOMPLETE; ++ } else { ++ status = DSP_EFAIL; ++ GT_1trace(curTrace, GT_6CLASS, ++ "DCD_EnumerateObject: REG_EnumValue" ++ " failed, status = 0x%x\n", status); ++ } ++ } ++ ++ DBC_Ensure(pUuid || (status == DSP_EFAIL)); ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_Exit ======== ++ * Purpose: ++ * Discontinue usage of the DCD module. ++ */ ++void DCD_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(curTrace, GT_5CLASS, "DCD_Exit: cRefs 0x%x\n", cRefs); ++ ++ cRefs--; ++ if (cRefs == 0) { ++ REG_Exit(); ++ COD_Exit(); ++ MEM_Exit(); ++ } ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== DCD_GetDepLibs ======== ++ */ ++DSP_STATUS DCD_GetDepLibs(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, ++ u16 numLibs, OUT struct DSP_UUID *pDepLibUuids, ++ OUT bool *pPersistentDepLibs, IN enum NLDR_PHASE phase) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValidHandle(hDcdMgr)); ++ DBC_Require(pUuid != NULL); ++ DBC_Require(pDepLibUuids != NULL); ++ DBC_Require(pPersistentDepLibs != NULL); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_GetDepLibs: hDcdMgr 0x%x\n", ++ hDcdMgr); ++ ++ status = GetDepLibInfo(hDcdMgr, pUuid, &numLibs, NULL, pDepLibUuids, ++ pPersistentDepLibs, phase); ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_GetNumDepLibs ======== ++ */ ++DSP_STATUS DCD_GetNumDepLibs(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, OUT u16 *pNumLibs, ++ OUT u16 *pNumPersLibs, IN enum NLDR_PHASE phase) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(IsValidHandle(hDcdMgr)); ++ DBC_Require(pNumLibs != NULL); ++ DBC_Require(pNumPersLibs != NULL); ++ DBC_Require(pUuid != NULL); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_GetNumDepLibs: hDcdMgr 0x%x\n", ++ hDcdMgr); ++ ++ status = GetDepLibInfo(hDcdMgr, pUuid, pNumLibs, pNumPersLibs, ++ NULL, NULL, phase); ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_GetObjectDef ======== ++ * Purpose: ++ * Retrieves the properties of a node or processor based on the UUID and ++ * object type. ++ */ ++DSP_STATUS DCD_GetObjectDef(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pObjUuid, ++ IN enum DSP_DCDOBJTYPE objType, ++ OUT struct DCD_GENERICOBJ *pObjDef) ++{ ++ struct DCD_MANAGER *pDcdMgr = hDcdMgr; /* pointer to DCD manager */ ++ struct COD_LIBRARYOBJ *lib = NULL; ++ DSP_STATUS status = DSP_SOK; ++ u32 ulAddr = 0; /* Used by COD_GetSection */ ++ u32 ulLen = 0; /* Used by COD_GetSection */ ++ u32 dwBufSize; /* Used by REG functions */ ++ char szRegKey[REG_MAXREGPATHLENGTH]; ++ char *szUuid; /*[MAXUUIDLEN];*/ ++ char szRegData[REG_MAXREGPATHLENGTH]; ++ char szSectName[MAXUUIDLEN + 2]; /* ".[UUID]\0" */ ++ char *pszCoffBuf; ++ u32 dwKeyLen; /* Len of REG key. */ ++ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pObjDef != NULL); ++ DBC_Require(pObjUuid != NULL); ++ ++ GT_4trace(curTrace, GT_ENTER, ++ "DCD_GetObjectDef: hDcdMgr 0x%x, " "objUuid" ++ " 0x%x, objType 0x%x, pObjDef 0x%x\n", hDcdMgr, pObjUuid, ++ objType, pObjDef); ++ szUuid = (char *)MEM_Calloc(MAXUUIDLEN, MEM_PAGED); ++ if (!szUuid) ++ return status = DSP_EMEMORY; ++ ++ if (!IsValidHandle(hDcdMgr)) { ++ status = DSP_EHANDLE; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: invalid " ++ "DCD manager handle.\n"); ++ goto func_end; ++ } ++ /* Pre-determine final key length. It's length of DCD_REGKEY + ++ * "_\0" + length of szObjType string + terminating NULL */ ++ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; ++ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); ++ /* Create proper REG key; concatenate DCD_REGKEY with objType. */ ++ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); ++ ++ if ((strlen(szRegKey) + strlen("_\0")) < REG_MAXREGPATHLENGTH) ++ strncat(szRegKey, "_\0", 2); ++ else ++ status = DSP_EFAIL; ++ ++ status = snprintf(szObjType, MAX_INT2CHAR_LENGTH, "%d", objType); ++ if (status == -1) { ++ status = DSP_EFAIL; ++ } else { ++ status = DSP_SOK; ++ ++ if ((strlen(szRegKey) + strlen(szObjType)) < ++ REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, szObjType, strlen(szObjType) + 1); ++ } else { ++ status = DSP_EFAIL; ++ } ++ /* Create UUID value to set in registry. */ ++ UUID_UuidToString(pObjUuid, szUuid, MAXUUIDLEN); ++ ++ if ((strlen(szRegKey) + MAXUUIDLEN) < REG_MAXREGPATHLENGTH) ++ strncat(szRegKey, szUuid, MAXUUIDLEN); ++ else ++ status = DSP_EFAIL; ++ ++ /* Retrieve paths from the registry based on struct DSP_UUID */ ++ dwBufSize = REG_MAXREGPATHLENGTH; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = REG_GetValue(NULL, szRegKey, szRegKey, (u8 *)szRegData, ++ &dwBufSize); ++ } ++ if (DSP_FAILED(status)) { ++ status = DSP_EUUID; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " ++ "REG_GetValue() failed\n"); ++ goto func_end; ++ } ++ /* Open COFF file. */ ++ status = COD_Open(pDcdMgr->hCodMgr, szRegData, COD_NOLOAD, &lib); ++ if (DSP_FAILED(status)) { ++ status = DSP_EDCDLOADBASE; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " ++ "COD_OpenBase() failed\n"); ++ goto func_end; ++ } ++ /* Ensure szUuid + 1 is not greater than sizeof szSectName. */ ++ DBC_Assert((strlen(szUuid) + 1) < sizeof(szSectName)); ++ /* Create section name based on node UUID. A period is ++ * pre-pended to the UUID string to form the section name. ++ * I.e. ".24BC8D90_BB45_11d4_B756_006008BDB66F" */ ++ strncpy(szSectName, ".", 2); ++ strncat(szSectName, szUuid, strlen(szUuid)); ++ /* Get section information. */ ++ status = COD_GetSection(lib, szSectName, &ulAddr, &ulLen); ++ if (DSP_FAILED(status)) { ++ status = DSP_EDCDGETSECT; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef:" ++ " COD_GetSection() failed\n"); ++ goto func_end; ++ } ++ /* Allocate zeroed buffer. */ ++ pszCoffBuf = MEM_Calloc(ulLen + 4, MEM_PAGED); ++#ifdef _DB_TIOMAP ++ if (strstr(szRegData, "iva") == NULL) { ++ /* Locate section by objectID and read its content. */ ++ status = COD_ReadSection(lib, szSectName, pszCoffBuf, ulLen); ++ } else { ++ status = COD_ReadSection(lib, szSectName, pszCoffBuf, ulLen); ++ GT_0trace(curTrace, GT_4CLASS, ++ "Skipped Byte swap for IVA !!\n"); ++ } ++#else ++ status = COD_ReadSection(lib, szSectName, pszCoffBuf, ulLen); ++#endif ++ if (DSP_SUCCEEDED(status)) { ++ /* Compres DSP buffer to conform to PC format. */ ++ if (strstr(szRegData, "iva") == NULL) { ++ CompressBuf(pszCoffBuf, ulLen, DSPWORDSIZE); ++ } else { ++ CompressBuf(pszCoffBuf, ulLen, 1); ++ GT_0trace(curTrace, GT_4CLASS, "Compressing IVA " ++ "COFF buffer by 1 for IVA !!\n"); ++ } ++ /* Parse the content of the COFF buffer. */ ++ status = GetAttrsFromBuf(pszCoffBuf, ulLen, objType, pObjDef); ++ if (DSP_FAILED(status)) { ++ status = DSP_EDCDPARSESECT; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " ++ "GetAttrsFromBuf() failed\n"); ++ } ++ } else { ++ status = DSP_EDCDREADSECT; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " ++ "COD_ReadSection() failed\n"); ++ } ++ /* Free the previously allocated dynamic buffer. */ ++ MEM_Free(pszCoffBuf); ++func_end: ++ if (lib) ++ COD_Close(lib); ++ ++ if (szUuid) ++ MEM_Free(szUuid); ++ return status; ++} ++ ++/* ++ * ======== DCD_GetObjects ======== ++ */ ++DSP_STATUS DCD_GetObjects(IN struct DCD_MANAGER *hDcdMgr, IN char *pszCoffPath, ++ DCD_REGISTERFXN registerFxn, void *handle) ++{ ++ struct DCD_MANAGER *pDcdMgr = hDcdMgr; /* pointer to DCD manager */ ++ DSP_STATUS status = DSP_SOK; ++ char *pszCoffBuf; ++ char *pszCur; ++ struct COD_LIBRARYOBJ *lib = NULL; ++ u32 ulAddr = 0; /* Used by COD_GetSection */ ++ u32 ulLen = 0; /* Used by COD_GetSection */ ++ char seps[] = ":, "; ++ char *pToken = NULL; ++ struct DSP_UUID dspUuid; ++ s32 cObjectType; ++ ++ DBC_Require(cRefs > 0); ++ GT_1trace(curTrace, GT_ENTER, ++ "DCD_GetObjects: hDcdMgr 0x%x\n", hDcdMgr); ++ if (!IsValidHandle(hDcdMgr)) { ++ status = DSP_EHANDLE; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_GetObjects: invalid DCD manager handle.\n"); ++ goto func_end; ++ } ++ /* Open DSP coff file, don't load symbols. */ ++ status = COD_Open(pDcdMgr->hCodMgr, pszCoffPath, COD_NOLOAD, &lib); ++ if (DSP_FAILED(status)) { ++ status = DSP_EDCDLOADBASE; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_AutoRegister: COD_Open() failed\n"); ++ goto func_cont; ++ } ++ /* Get DCD_RESIGER_SECTION section information. */ ++ status = COD_GetSection(lib, DCD_REGISTER_SECTION, &ulAddr, &ulLen); ++ if (DSP_FAILED(status) || !(ulLen > 0)) { ++ status = DSP_EDCDNOAUTOREGISTER; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_GetObjects: COD_GetSection() " ++ "- no auto register section\n"); ++ goto func_cont; ++ } ++ /* Allocate zeroed buffer. */ ++ pszCoffBuf = MEM_Calloc(ulLen + 4, MEM_PAGED); ++#ifdef _DB_TIOMAP ++ if (strstr(pszCoffPath, "iva") == NULL) { ++ /* Locate section by objectID and read its content. */ ++ status = COD_ReadSection(lib, DCD_REGISTER_SECTION, ++ pszCoffBuf, ulLen); ++ } else { ++ GT_0trace(curTrace, GT_4CLASS, "Skipped Byte swap for IVA!!\n"); ++ status = COD_ReadSection(lib, DCD_REGISTER_SECTION, ++ pszCoffBuf, ulLen); ++ } ++#else ++ status = COD_ReadSection(lib, DCD_REGISTER_SECTION, pszCoffBuf, ulLen); ++#endif ++ if (DSP_SUCCEEDED(status)) { ++ /* Compress DSP buffer to conform to PC format. */ ++ GT_0trace(curTrace, GT_4CLASS, ++ "Successfully read section !!\n"); ++ if (strstr(pszCoffPath, "iva") == NULL) { ++ CompressBuf(pszCoffBuf, ulLen, DSPWORDSIZE); ++ } else { ++ CompressBuf(pszCoffBuf, ulLen, 1); ++ GT_0trace(curTrace, GT_4CLASS, "Compress COFF buffer " ++ "with 1 word for IVA !!\n"); ++ } ++ /* Read from buffer and register object in buffer. */ ++ pszCur = pszCoffBuf; ++ while ((pToken = strsep(&pszCur, seps)) && *pToken != '\0') { ++ /* Retrieve UUID string. */ ++ UUID_UuidFromString(pToken, &dspUuid); ++ /* Retrieve object type */ ++ pToken = strsep(&pszCur, seps); ++ /* Retrieve object type */ ++ cObjectType = Atoi(pToken); ++ /* ++ * Apply registerFxn to the found DCD object. ++ * Possible actions include: ++ * ++ * 1) Register found DCD object. ++ * 2) Unregister found DCD object (when handle == NULL) ++ * 3) Add overlay node. ++ */ ++ GT_1trace(curTrace, GT_4CLASS, "Registering objtype " ++ "%d \n", cObjectType); ++ status = registerFxn(&dspUuid, cObjectType, handle); ++ if (DSP_SUCCEEDED(status)) { ++ GT_1trace(curTrace, GT_5CLASS, ++ "DCD_GetObjects: status 0x%x\n", ++ status); ++ } else { ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_GetObjects: " ++ "registration() failed\n"); ++ /* if error occurs, break from while loop. */ ++ break; ++ } ++ } ++ } else { ++ status = DSP_EDCDREADSECT; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjects: " ++ "COD_ReadSection() failed\n"); ++ } ++ /* Free the previously allocated dynamic buffer. */ ++ MEM_Free(pszCoffBuf); ++func_cont: ++ if (lib) ++ COD_Close(lib); ++ ++func_end: ++ return status; ++} ++ ++/* ++ * ======== DCD_GetLibraryName ======== ++ * Purpose: ++ * Retrieves the library name for the given UUID. ++ * ++ */ ++DSP_STATUS DCD_GetLibraryName(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, ++ IN OUT char *pstrLibName, IN OUT u32 *pdwSize, ++ enum NLDR_PHASE phase, OUT bool *fPhaseSplit) ++{ ++ char szRegKey[REG_MAXREGPATHLENGTH]; ++ char szUuid[MAXUUIDLEN]; ++ u32 dwKeyLen; /* Len of REG key. */ ++ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(pUuid != NULL); ++ DBC_Require(pstrLibName != NULL); ++ DBC_Require(pdwSize != NULL); ++ DBC_Require(IsValidHandle(hDcdMgr)); ++ ++ GT_4trace(curTrace, GT_ENTER, ++ "DCD_GetLibraryName: hDcdMgr 0x%x, pUuid 0x%x, " ++ " pstrLibName 0x%x, pdwSize 0x%x\n", hDcdMgr, pUuid, ++ pstrLibName, pdwSize); ++ /* ++ * Pre-determine final key length. It's length of DCD_REGKEY + ++ * "_\0" + length of szObjType string + terminating NULL. ++ */ ++ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; ++ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); ++ /* Create proper REG key; concatenate DCD_REGKEY with objType. */ ++ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); ++ if ((strlen(szRegKey) + strlen("_\0")) < REG_MAXREGPATHLENGTH) ++ strncat(szRegKey, "_\0", 2); ++ else ++ status = DSP_EFAIL; ++ ++ switch (phase) { ++ case NLDR_CREATE: ++ /* create phase type */ ++ sprintf(szObjType, "%d", DSP_DCDCREATELIBTYPE); ++ break; ++ case NLDR_EXECUTE: ++ /* execute phase type */ ++ sprintf(szObjType, "%d", DSP_DCDEXECUTELIBTYPE); ++ break; ++ case NLDR_DELETE: ++ /* delete phase type */ ++ sprintf(szObjType, "%d", DSP_DCDDELETELIBTYPE); ++ break; ++ case NLDR_NOPHASE: ++ /* known to be a dependent library */ ++ sprintf(szObjType, "%d", DSP_DCDLIBRARYTYPE); ++ break; ++ default: ++ status = -1; ++ DBC_Assert(false); ++ } ++ if (status == -1) { ++ status = DSP_EFAIL; ++ } else { ++ status = DSP_SOK; ++ if ((strlen(szRegKey) + strlen(szObjType)) ++ < REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, szObjType, strlen(szObjType) + 1); ++ } else { ++ status = DSP_EFAIL; ++ } ++ /* Create UUID value to find match in registry. */ ++ UUID_UuidToString(pUuid, szUuid, MAXUUIDLEN); ++ if ((strlen(szRegKey) + MAXUUIDLEN) < ++ REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, szUuid, MAXUUIDLEN); ++ } else { ++ status = DSP_EFAIL; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Retrieve path from the registry based on DSP_UUID */ ++ status = REG_GetValue(NULL, szRegKey, szRegKey, ++ (u8 *)pstrLibName, pdwSize); ++ } ++ /* If can't find, phases might be registered as generic LIBRARYTYPE */ ++ if (DSP_FAILED(status) && phase != NLDR_NOPHASE) { ++ if (fPhaseSplit) ++ *fPhaseSplit = false; ++ ++ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); ++ if ((strlen(szRegKey) + strlen("_\0")) < ++ REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, "_\0", 2); ++ } else { ++ status = DSP_EFAIL; ++ } ++ sprintf(szObjType, "%d", DSP_DCDLIBRARYTYPE); ++ if ((strlen(szRegKey) + strlen(szObjType)) ++ < REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, szObjType, strlen(szObjType) + 1); ++ } else { ++ status = DSP_EFAIL; ++ } ++ UUID_UuidToString(pUuid, szUuid, MAXUUIDLEN); ++ if ((strlen(szRegKey) + MAXUUIDLEN) < REG_MAXREGPATHLENGTH) ++ strncat(szRegKey, szUuid, MAXUUIDLEN); ++ else ++ status = DSP_EFAIL; ++ ++ status = REG_GetValue(NULL, szRegKey, szRegKey, ++ (u8 *)pstrLibName, pdwSize); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_Init ======== ++ * Purpose: ++ * Initialize the DCD module. ++ */ ++bool DCD_Init(void) ++{ ++ bool fInitMEM; ++ bool fInitREG; ++ bool fInitCOD; ++ bool fInit = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_Init: (on enter) cRefs = 0x%x\n", ++ cRefs); ++ ++ if (cRefs == 0) { ++ ++ /* Initialize required modules. */ ++ fInitMEM = MEM_Init(); ++ fInitCOD = COD_Init(); ++ fInitREG = REG_Init(); ++ if (!fInitMEM || !fInitCOD || !fInitREG) { ++ fInit = false; ++ GT_0trace(curTrace, GT_6CLASS, "DCD_Init failed\n"); ++ /* Exit initialized modules. */ ++ if (fInitMEM) ++ MEM_Exit(); ++ ++ if (fInitCOD) ++ COD_Exit(); ++ ++ if (fInitREG) ++ REG_Exit(); ++ ++ } ++ } ++ ++ if (fInit) ++ cRefs++; ++ ++ ++ GT_1trace(curTrace, GT_5CLASS, "DCD_Init: (on exit) cRefs = 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((fInit && (cRefs > 0)) || (!fInit && (cRefs == 0))); ++ ++ return fInit; ++} ++ ++/* ++ * ======== DCD_RegisterObject ======== ++ * Purpose: ++ * Registers a node or a processor with the DCD. ++ * If pszPathName == NULL, unregister the specified DCD object. ++ */ ++DSP_STATUS DCD_RegisterObject(IN struct DSP_UUID *pUuid, ++ IN enum DSP_DCDOBJTYPE objType, ++ IN char *pszPathName) ++{ ++ DSP_STATUS status = DSP_SOK; ++ char szRegKey[REG_MAXREGPATHLENGTH]; ++ char szUuid[MAXUUIDLEN + 1]; ++ u32 dwPathSize = 0; ++ u32 dwKeyLen; /* Len of REG key. */ ++ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pUuid != NULL); ++ DBC_Require((objType == DSP_DCDNODETYPE) || ++ (objType == DSP_DCDPROCESSORTYPE) || ++ (objType == DSP_DCDLIBRARYTYPE) || ++ (objType == DSP_DCDCREATELIBTYPE) || ++ (objType == DSP_DCDEXECUTELIBTYPE) || ++ (objType == DSP_DCDDELETELIBTYPE)); ++ ++ GT_3trace(curTrace, GT_ENTER, "DCD_RegisterObject: object UUID 0x%x, " ++ "objType %d, szPathName %s\n", pUuid, objType, pszPathName); ++ /* ++ * Pre-determine final key length. It's length of DCD_REGKEY + ++ * "_\0" + length of szObjType string + terminating NULL. ++ */ ++ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; ++ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); ++ /* Create proper REG key; concatenate DCD_REGKEY with objType. */ ++ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); ++ if ((strlen(szRegKey) + strlen("_\0")) < REG_MAXREGPATHLENGTH) ++ strncat(szRegKey, "_\0", 2); ++ else ++ status = DSP_EFAIL; ++ ++ status = snprintf(szObjType, MAX_INT2CHAR_LENGTH, "%d", objType); ++ if (status == -1) { ++ status = DSP_EFAIL; ++ } else { ++ status = DSP_SOK; ++ if ((strlen(szRegKey) + strlen(szObjType)) < ++ REG_MAXREGPATHLENGTH) { ++ strncat(szRegKey, szObjType, strlen(szObjType) + 1); ++ } else { ++ status = DSP_EFAIL; ++ } ++ /* Create UUID value to set in registry. */ ++ UUID_UuidToString(pUuid, szUuid, MAXUUIDLEN); ++ if ((strlen(szRegKey) + MAXUUIDLEN) < REG_MAXREGPATHLENGTH) ++ strncat(szRegKey, szUuid, MAXUUIDLEN); ++ else ++ status = DSP_EFAIL; ++ ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* ++ * If pszPathName != NULL, perform registration, otherwise, ++ * perform unregistration. ++ */ ++ if (pszPathName) { ++ /* Add new reg value (UUID+objType) with COFF path ++ * info. */ ++ dwPathSize = strlen(pszPathName) + 1; ++ status = REG_SetValue(NULL, szRegKey, szRegKey, REG_SZ, ++ (u8 *)pszPathName, dwPathSize); ++ GT_3trace(curTrace, GT_6CLASS, ++ "REG_SetValue REG_SZ=%d, " ++ "(u8 *)pszPathName=%s, dwPathSize=%d\n", ++ REG_SZ, pszPathName, dwPathSize); ++ if (DSP_FAILED(status)) { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_RegisterObject: REG_SetValue failed!\n"); ++ } ++ } else { ++ /* Deregister an existing object. */ ++ status = REG_DeleteValue(NULL, szRegKey, szRegKey); ++ if (DSP_FAILED(status)) { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_6CLASS, ++ "DCD_UnregisterObject: " ++ "REG_DeleteValue failed!\n"); ++ } ++ } ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* ++ * Because the node database has been updated through a ++ * successful object registration/de-registration operation, ++ * we need to reset the object enumeration counter to allow ++ * current enumerations to reflect this update in the node ++ * database. ++ */ ++ ++ cEnumRefs = 0; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DCD_UnregisterObject ======== ++ * Call DCD_Register object with pszPathName set to NULL to ++ * perform actual object de-registration. ++ */ ++DSP_STATUS DCD_UnregisterObject(IN struct DSP_UUID *pUuid, ++ IN enum DSP_DCDOBJTYPE objType) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pUuid != NULL); ++ DBC_Require((objType == DSP_DCDNODETYPE) || ++ (objType == DSP_DCDPROCESSORTYPE) || ++ (objType == DSP_DCDLIBRARYTYPE) || ++ (objType == DSP_DCDCREATELIBTYPE) || ++ (objType == DSP_DCDEXECUTELIBTYPE) || ++ (objType == DSP_DCDDELETELIBTYPE)); ++ ++ GT_2trace(curTrace, GT_ENTER, ++ "DCD_UnregisterObject: object UUID 0x%x, " ++ "objType %d\n", pUuid, objType); ++ ++ /* ++ * When DCD_RegisterObject is called with NULL as pathname, ++ * it indicates an unregister object operation. ++ */ ++ status = DCD_RegisterObject(pUuid, objType, NULL); ++ ++ return status; ++} ++ ++/* ++ ********************************************************************** ++ * DCD Helper Functions ++ ********************************************************************** ++ */ ++ ++/* ++ * ======== Atoi ======== ++ * Purpose: ++ * This function converts strings in decimal or hex format to integers. ++ */ ++static s32 Atoi(char *pszBuf) ++{ ++ s32 result = 0; ++ char *pch = pszBuf; ++ char c; ++ char first; ++ s32 base = 10; ++ s32 len; ++ ++ while (isspace(*pch)) ++ pch++; ++ ++ first = *pch; ++ if (first == '-' || first == '+') { ++ pch++; ++ } else { ++ /* Determine if base 10 or base 16 */ ++ len = strlen(pch); ++ if (len > 1) { ++ c = pch[1]; ++ if ((*pch == '0' && (c == 'x' || c == 'X'))) { ++ base = 16; ++ pch += 2; ++ } ++ c = pch[len - 1]; ++ if (c == 'h' || c == 'H') ++ base = 16; ++ ++ } ++ } ++ ++ while (isdigit(c = *pch) || ((base == 16) && isxdigit(c))) { ++ result *= base; ++ if ('A' <= c && c <= 'F') { ++ c = c - 'A' + 10; ++ } else { ++ if ('a' <= c && c <= 'f') ++ c = c - 'a' + 10; ++ else ++ c -= '0'; ++ ++ } ++ result += c; ++ ++pch; ++ } ++ ++ return result; ++} ++ ++/* ++ * ======== GetAttrsFromBuf ======== ++ * Purpose: ++ * Parse the content of a buffer filled with DSP-side data and ++ * retrieve an object's attributes from it. IMPORTANT: Assume the ++ * buffer has been converted from DSP format to GPP format. ++ */ ++static DSP_STATUS GetAttrsFromBuf(char *pszBuf, u32 ulBufSize, ++ enum DSP_DCDOBJTYPE objType, ++ struct DCD_GENERICOBJ *pGenObj) ++{ ++ DSP_STATUS status = DSP_SOK; ++ char seps[] = ", "; ++ char *pszCur; ++ char *token; ++ s32 cLen = 0; ++ u32 i = 0; ++#ifdef _DB_TIOMAP ++ s32 iEntry; ++#endif ++ ++ DBC_Require(pszBuf != NULL); ++ DBC_Require(ulBufSize != 0); ++ DBC_Require((objType == DSP_DCDNODETYPE) ++ || (objType == DSP_DCDPROCESSORTYPE)); ++ DBC_Require(pGenObj != NULL); ++ ++ ++ switch (objType) { ++ case DSP_DCDNODETYPE: ++ /* ++ * Parse COFF sect buffer to retrieve individual tokens used ++ * to fill in object attrs. ++ */ ++ pszCur = pszBuf; ++ token = strsep(&pszCur, seps); ++ ++ /* u32 cbStruct */ ++ pGenObj->objData.nodeObj.ndbProps.cbStruct = ++ (u32) Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* DSP_UUID uiNodeID */ ++ UUID_UuidFromString(token, ++ &pGenObj->objData.nodeObj.ndbProps.uiNodeID); ++ token = strsep(&pszCur, seps); ++ ++ /* acName */ ++ DBC_Require(token); ++ cLen = strlen(token); ++ if (cLen > DSP_MAXNAMELEN - 1) ++ cLen = DSP_MAXNAMELEN - 1; ++ ++ strncpy(pGenObj->objData.nodeObj.ndbProps.acName, ++ token, cLen); ++ pGenObj->objData.nodeObj.ndbProps.acName[cLen] = '\0'; ++ token = strsep(&pszCur, seps); ++ /* u32 uNodeType */ ++ pGenObj->objData.nodeObj.ndbProps.uNodeType = Atoi(token); ++ token = strsep(&pszCur, seps); ++ /* u32 bCacheOnGPP */ ++ pGenObj->objData.nodeObj.ndbProps.bCacheOnGPP = Atoi(token); ++ token = strsep(&pszCur, seps); ++ /* DSP_RESOURCEREQMTS dspResourceReqmts */ ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts.cbStruct = ++ (u32) Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uStaticDataSize = Atoi(token); ++ token = strsep(&pszCur, seps); ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uGlobalDataSize = Atoi(token); ++ token = strsep(&pszCur, seps); ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uProgramMemSize = Atoi(token); ++ token = strsep(&pszCur, seps); ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uWCExecutionTime = Atoi(token); ++ token = strsep(&pszCur, seps); ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uWCPeriod = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uWCDeadline = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uAvgExectionTime = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. ++ uMinimumPeriod = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* s32 iPriority */ ++ pGenObj->objData.nodeObj.ndbProps.iPriority = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* u32 uStackSize */ ++ pGenObj->objData.nodeObj.ndbProps.uStackSize = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* u32 uSysStackSize */ ++ pGenObj->objData.nodeObj.ndbProps.uSysStackSize = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* u32 uStackSeg */ ++ pGenObj->objData.nodeObj.ndbProps.uStackSeg = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* u32 uMessageDepth */ ++ pGenObj->objData.nodeObj.ndbProps.uMessageDepth = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* u32 uNumInputStreams */ ++ pGenObj->objData.nodeObj.ndbProps.uNumInputStreams = ++ Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* u32 uNumOutputStreams */ ++ pGenObj->objData.nodeObj.ndbProps.uNumOutputStreams = ++ Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* u32 uTimeout */ ++ pGenObj->objData.nodeObj.ndbProps.uTimeout = ++ Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* char * pstrCreatePhaseFxn */ ++ DBC_Require(token); ++ cLen = strlen(token); ++ pGenObj->objData.nodeObj.pstrCreatePhaseFxn = ++ MEM_Calloc(cLen + 1, MEM_PAGED); ++ strncpy(pGenObj->objData.nodeObj.pstrCreatePhaseFxn, ++ token, cLen); ++ pGenObj->objData.nodeObj.pstrCreatePhaseFxn[cLen] = '\0'; ++ token = strsep(&pszCur, seps); ++ ++ /* char * pstrExecutePhaseFxn */ ++ DBC_Require(token); ++ cLen = strlen(token); ++ pGenObj->objData.nodeObj.pstrExecutePhaseFxn = ++ MEM_Calloc(cLen + 1, MEM_PAGED); ++ strncpy(pGenObj->objData.nodeObj.pstrExecutePhaseFxn, ++ token, cLen); ++ pGenObj->objData.nodeObj.pstrExecutePhaseFxn[cLen] = '\0'; ++ token = strsep(&pszCur, seps); ++ ++ /* char * pstrDeletePhaseFxn */ ++ DBC_Require(token); ++ cLen = strlen(token); ++ pGenObj->objData.nodeObj.pstrDeletePhaseFxn = ++ MEM_Calloc(cLen + 1, MEM_PAGED); ++ strncpy(pGenObj->objData.nodeObj.pstrDeletePhaseFxn, ++ token, cLen); ++ pGenObj->objData.nodeObj.pstrDeletePhaseFxn[cLen] = '\0'; ++ token = strsep(&pszCur, seps); ++ ++ /* Segment id for message buffers */ ++ pGenObj->objData.nodeObj.uMsgSegid = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* Message notification type */ ++ pGenObj->objData.nodeObj.uMsgNotifyType = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ /* char * pstrIAlgName */ ++ if (token) { ++ cLen = strlen(token); ++ pGenObj->objData.nodeObj.pstrIAlgName = ++ MEM_Calloc(cLen + 1, MEM_PAGED); ++ strncpy(pGenObj->objData.nodeObj.pstrIAlgName, ++ token, cLen); ++ pGenObj->objData.nodeObj.pstrIAlgName[cLen] = '\0'; ++ token = strsep(&pszCur, seps); ++ } ++ ++ /* Load type (static, dynamic, or overlay) */ ++ if (token) { ++ pGenObj->objData.nodeObj.usLoadType = Atoi(token); ++ token = strsep(&pszCur, seps); ++ } ++ ++ /* Dynamic load data requirements */ ++ if (token) { ++ pGenObj->objData.nodeObj.ulDataMemSegMask = Atoi(token); ++ token = strsep(&pszCur, seps); ++ } ++ ++ /* Dynamic load code requirements */ ++ if (token) { ++ pGenObj->objData.nodeObj.ulCodeMemSegMask = Atoi(token); ++ token = strsep(&pszCur, seps); ++ } ++ ++ /* Extract node profiles into node properties */ ++ if (token) { ++ ++ pGenObj->objData.nodeObj.ndbProps.uCountProfiles = ++ Atoi(token); ++ for (i = 0; i < pGenObj->objData.nodeObj.ndbProps. ++ uCountProfiles; i++) { ++ token = strsep(&pszCur, seps); ++ if (token) { ++ /* Heap Size for the node */ ++ pGenObj->objData.nodeObj.ndbProps. ++ aProfiles[i].ulHeapSize = ++ Atoi(token); ++ } ++ } ++ } ++ token = strsep(&pszCur, seps); ++ if (token) { ++ pGenObj->objData.nodeObj.ndbProps.uStackSegName = ++ (u32)(token); ++ } ++ ++ break; ++ ++ case DSP_DCDPROCESSORTYPE: ++ /* ++ * Parse COFF sect buffer to retrieve individual tokens used ++ * to fill in object attrs. ++ */ ++ pszCur = pszBuf; ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.cbStruct = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.uProcessorFamily = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.uProcessorType = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.uClockRate = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.ulInternalMemSize = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.ulExternalMemSize = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.uProcessorID = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.tyRunningRTOS = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.nNodeMinPriority = Atoi(token); ++ token = strsep(&pszCur, seps); ++ ++ pGenObj->objData.procObj.nNodeMaxPriority = Atoi(token); ++ ++#ifdef _DB_TIOMAP ++ /* Proc object may contain additional(extended) attributes. */ ++ /* attr must match proc.hxx */ ++ for (iEntry = 0; iEntry < 7; iEntry++) { ++ token = strsep(&pszCur, seps); ++ pGenObj->objData.extProcObj.tyTlb[iEntry].ulGppPhys = ++ Atoi(token); ++ ++ token = strsep(&pszCur, seps); ++ pGenObj->objData.extProcObj.tyTlb[iEntry].ulDspVirt = ++ Atoi(token); ++ } ++#endif ++ ++ break; ++ ++ default: ++ status = DSP_EFAIL; ++ break; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== CompressBuffer ======== ++ * Purpose: ++ * Compress the DSP buffer, if necessary, to conform to PC format. ++ */ ++static void CompressBuf(char *pszBuf, u32 ulBufSize, s32 cCharSize) ++{ ++ char *p; ++ char ch; ++ char *q; ++ ++ p = pszBuf; ++ if (p == NULL) ++ return; ++ ++ for (q = pszBuf; q < (pszBuf + ulBufSize);) { ++ ++ ch = DspChar2GppChar(q, cCharSize); ++ if (ch == '\\') { ++ q += cCharSize; ++ ch = DspChar2GppChar(q, cCharSize); ++ switch (ch) { ++ case 't': ++ *p = '\t'; ++ break; ++ ++ case 'n': ++ *p = '\n'; ++ break; ++ ++ case 'r': ++ *p = '\r'; ++ break; ++ ++ case '0': ++ *p = '\0'; ++ break; ++ ++ default: ++ *p = ch; ++ break; ++ } ++ } else { ++ *p = ch; ++ } ++ p++; ++ q += cCharSize; ++ } ++ ++ /* NULL out remainder of buffer. */ ++ while (p < q) ++ *p++ = '\0'; ++ ++} ++ ++/* ++ * ======== DspChar2GppChar ======== ++ * Purpose: ++ * Convert DSP char to host GPP char in a portable manner ++ */ ++static char DspChar2GppChar(char *pWord, s32 cDspCharSize) ++{ ++ char ch = '\0'; ++ char *chSrc; ++ s32 i; ++ ++ for (chSrc = pWord, i = cDspCharSize; i > 0; i--) ++ ch |= *chSrc++; ++ ++ return ch; ++} ++ ++/* ++ * ======== GetDepLibInfo ======== ++ */ ++static DSP_STATUS GetDepLibInfo(IN struct DCD_MANAGER *hDcdMgr, ++ IN struct DSP_UUID *pUuid, ++ IN OUT u16 *pNumLibs, ++ OPTIONAL OUT u16 *pNumPersLibs, ++ OPTIONAL OUT struct DSP_UUID *pDepLibUuids, ++ OPTIONAL OUT bool *pPersistentDepLibs, ++ enum NLDR_PHASE phase) ++{ ++ struct DCD_MANAGER *pDcdMgr = hDcdMgr; /* pointer to DCD manager */ ++ char *pszCoffBuf = NULL; ++ char *pszCur; ++ char *pszFileName = NULL; ++ struct COD_LIBRARYOBJ *lib = NULL; ++ u32 ulAddr = 0; /* Used by COD_GetSection */ ++ u32 ulLen = 0; /* Used by COD_GetSection */ ++ u32 dwDataSize = COD_MAXPATHLENGTH; ++ char seps[] = ", "; ++ char *pToken = NULL; ++ bool fGetUuids = (pDepLibUuids != NULL); ++ u16 nDepLibs = 0; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ ++ DBC_Require(IsValidHandle(hDcdMgr)); ++ DBC_Require(pNumLibs != NULL); ++ DBC_Require(pUuid != NULL); ++ ++ GT_1trace(curTrace, GT_ENTER, "DCD_GetNumDepLibs: hDcdMgr 0x%x\n", ++ hDcdMgr); ++ ++ /* Initialize to 0 dependent libraries, if only counting number of ++ * dependent libraries */ ++ if (!fGetUuids) { ++ *pNumLibs = 0; ++ *pNumPersLibs = 0; ++ } ++ ++ /* Allocate a buffer for file name */ ++ pszFileName = MEM_Calloc(dwDataSize, MEM_PAGED); ++ if (pszFileName == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ /* Get the name of the library */ ++ status = DCD_GetLibraryName(hDcdMgr, pUuid, pszFileName, ++ &dwDataSize, phase, NULL); ++ } ++ /* Open the library */ ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_Open(pDcdMgr->hCodMgr, pszFileName, ++ COD_NOLOAD, &lib); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Get dependent library section information. */ ++ status = COD_GetSection(lib, DEPLIBSECT, &ulAddr, &ulLen); ++ ++ if (DSP_FAILED(status)) { ++ /* Ok, no dependent libraries */ ++ ulLen = 0; ++ status = DSP_SNODEPENDENTLIBS; ++ } ++ } ++ ++ if (DSP_FAILED(status) || !(ulLen > 0)) ++ goto func_cont; ++ ++ /* Allocate zeroed buffer. */ ++ pszCoffBuf = MEM_Calloc(ulLen, MEM_PAGED); ++ if (pszCoffBuf == NULL) ++ status = DSP_EMEMORY; ++ ++ /* Read section contents. */ ++ status = COD_ReadSection(lib, DEPLIBSECT, pszCoffBuf, ulLen); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ /* Compress and format DSP buffer to conform to PC format. */ ++ CompressBuf(pszCoffBuf, ulLen, DSPWORDSIZE); ++ /* Read from buffer */ ++ pszCur = pszCoffBuf; ++ while ((pToken = strsep(&pszCur, seps)) && *pToken != '\0') { ++ if (fGetUuids) { ++ if (nDepLibs >= *pNumLibs) { ++ /* Gone beyond the limit */ ++ break; ++ } else { ++ /* Retrieve UUID string. */ ++ UUID_UuidFromString(pToken, ++ &(pDepLibUuids[nDepLibs])); ++ /* Is this library persistent? */ ++ pToken = strsep(&pszCur, seps); ++ pPersistentDepLibs[nDepLibs] = Atoi(pToken); ++ nDepLibs++; ++ } ++ } else { ++ /* Advanc to next token */ ++ pToken = strsep(&pszCur, seps); ++ if (Atoi(pToken)) ++ (*pNumPersLibs)++; ++ ++ /* Just counting number of dependent libraries */ ++ (*pNumLibs)++; ++ } ++ } ++func_cont: ++ if (lib) ++ COD_Close(lib); ++ ++ /* Free previously allocated dynamic buffers. */ ++ if (pszFileName) ++ MEM_Free(pszFileName); ++ ++ if (pszCoffBuf) ++ MEM_Free(pszCoffBuf); ++ ++ return status; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/disp.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/disp.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/disp.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/disp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,916 @@ ++/* ++ * disp.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== disp.c ======== ++ * ++ * Description: ++ * Node Dispatcher interface. Communicates with Resource Manager Server ++ * (RMS) on DSP. Access to RMS is synchronized in NODE. ++ * ++ * Public Functions: ++ * DISP_Create ++ * DISP_Delete ++ * DISP_Exit ++ * DISP_Init ++ * DISP_NodeChangePriority ++ * DISP_NodeCreate ++ * DISP_NodeDelete ++ * DISP_NodePause ++ * DISP_NodeRun ++ * ++ *! Revision History: ++ *! ================= ++ *! 18-Feb-2003 vp Code review updates ++ *! 18-Oct-2002 vp Ported to Linux platform ++ *! 16-May-2002 jeh Added DISP_DoCinit(). ++ *! 24-Apr-2002 jeh Added DISP_MemWrite(). ++ *! 13-Feb-2002 jeh Pass system stack size to RMS. ++ *! 16-Jan-2002 ag Added bufsize param to _ChnlAddIOReq() fxn ++ *! 10-May-2001 jeh Code Review cleanup. ++ *! 26-Sep-2000 jeh Fixed status values in SendMessage(). ++ *! 19-Jun-2000 jeh Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Link Driver */ ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++#define DISP_SIGNATURE 0x50534944 /* "PSID" */ ++ ++/* Size of a reply from RMS */ ++#define REPLYSIZE (3 * sizeof(RMS_WORD)) ++ ++/* Reserved channel offsets for communication with RMS */ ++#define CHNLTORMSOFFSET 0 ++#define CHNLFROMRMSOFFSET 1 ++ ++#define CHNLIOREQS 1 ++ ++#define SwapWord(x) (((u32)(x) >> 16) | ((u32)(x) << 16)) ++ ++/* ++ * ======== DISP_OBJECT ======== ++ */ ++struct DISP_OBJECT { ++ u32 dwSignature; /* Used for object validation */ ++ struct DEV_OBJECT *hDevObject; /* Device for this processor */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ ++ struct CHNL_MGR *hChnlMgr; /* Channel manager */ ++ struct CHNL_OBJECT *hChnlToDsp; /* Channel for commands to RMS */ ++ struct CHNL_OBJECT *hChnlFromDsp; /* Channel for replies from RMS */ ++ u8 *pBuf; /* Buffer for commands, replies */ ++ u32 ulBufsize; /* pBuf size in bytes */ ++ u32 ulBufsizeRMS; /* pBuf size in RMS words */ ++ u32 uCharSize; /* Size of DSP character */ ++ u32 uWordSize; /* Size of DSP word */ ++ u32 uDataMauSize; /* Size of DSP Data MAU */ ++}; ++ ++static u32 cRefs; ++ ++/* Debug msgs: */ ++#if GT_TRACE ++static struct GT_Mask DISP_DebugMask = { NULL, NULL }; ++#endif ++ ++static void DeleteDisp(struct DISP_OBJECT *hDisp); ++static DSP_STATUS FillStreamDef(RMS_WORD *pdwBuf, u32 *ptotal, u32 offset, ++ struct NODE_STRMDEF strmDef, u32 max, ++ u32 uCharsInRMSWord); ++static DSP_STATUS SendMessage(struct DISP_OBJECT *hDisp, u32 dwTimeout, ++ u32 ulBytes, OUT u32 *pdwArg); ++ ++/* ++ * ======== DISP_Create ======== ++ * Create a NODE Dispatcher object. ++ */ ++DSP_STATUS DISP_Create(OUT struct DISP_OBJECT **phDispObject, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct DISP_ATTRS *pDispAttrs) ++{ ++ struct DISP_OBJECT *pDisp; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ u32 ulChnlId; ++ struct CHNL_ATTRS chnlAttrs; ++ DSP_STATUS status = DSP_SOK; ++ u32 devType; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDispObject != NULL); ++ DBC_Require(pDispAttrs != NULL); ++ DBC_Require(hDevObject != NULL); ++ ++ GT_3trace(DISP_DebugMask, GT_ENTER, "DISP_Create: phDispObject: 0x%x\t" ++ "hDevObject: 0x%x\tpDispAttrs: 0x%x\n", phDispObject, ++ hDevObject, pDispAttrs); ++ ++ *phDispObject = NULL; ++ ++ /* Allocate Node Dispatcher object */ ++ MEM_AllocObject(pDisp, struct DISP_OBJECT, DISP_SIGNATURE); ++ if (pDisp == NULL) { ++ status = DSP_EMEMORY; ++ GT_0trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Create: MEM_AllocObject() failed!\n"); ++ } else { ++ pDisp->hDevObject = hDevObject; ++ } ++ ++ /* Get Channel manager and WMD function interface */ ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetChnlMgr(hDevObject, &(pDisp->hChnlMgr)); ++ if (DSP_SUCCEEDED(status)) { ++ (void) DEV_GetIntfFxns(hDevObject, &pIntfFxns); ++ pDisp->pIntfFxns = pIntfFxns; ++ } else { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Create: Failed to get " ++ "channel manager! status = 0x%x\n", status); ++ } ++ } ++ ++ /* check device type and decide if streams or messag'ing is used for ++ * RMS/EDS */ ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ status = DEV_GetDevType(hDevObject, &devType); ++ GT_1trace(DISP_DebugMask, GT_6CLASS, "DISP_Create: Creating DISP for " ++ "device = 0x%x\n", devType); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ if (devType != DSP_UNIT) { ++ GT_0trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Create: Unkown device " ++ "type in Device object !! \n"); ++ status = DSP_EFAIL; ++ goto func_cont; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pDisp->uCharSize = DSPWORDSIZE; ++ pDisp->uWordSize = DSPWORDSIZE; ++ pDisp->uDataMauSize = DSPWORDSIZE; ++ /* Open channels for communicating with the RMS */ ++ chnlAttrs.uIOReqs = CHNLIOREQS; ++ chnlAttrs.hEvent = NULL; ++ ulChnlId = pDispAttrs->ulChnlOffset + CHNLTORMSOFFSET; ++ status = (*pIntfFxns->pfnChnlOpen)(&(pDisp->hChnlToDsp), ++ pDisp->hChnlMgr, CHNL_MODETODSP, ulChnlId, &chnlAttrs); ++ if (DSP_FAILED(status)) { ++ GT_2trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Create: Channel to RMS " ++ "open failed, chnl id = %d, status = 0x%x\n", ++ ulChnlId, status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ ulChnlId = pDispAttrs->ulChnlOffset + CHNLFROMRMSOFFSET; ++ status = (*pIntfFxns->pfnChnlOpen)(&(pDisp->hChnlFromDsp), ++ pDisp->hChnlMgr, CHNL_MODEFROMDSP, ulChnlId, ++ &chnlAttrs); ++ if (DSP_FAILED(status)) { ++ GT_2trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Create: Channel from RMS " ++ "open failed, chnl id = %d, status = 0x%x\n", ++ ulChnlId, status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Allocate buffer for commands, replies */ ++ pDisp->ulBufsize = pDispAttrs->ulChnlBufSize; ++ pDisp->ulBufsizeRMS = RMS_COMMANDBUFSIZE; ++ pDisp->pBuf = MEM_Calloc(pDisp->ulBufsize, MEM_PAGED); ++ if (pDisp->pBuf == NULL) { ++ status = DSP_EMEMORY; ++ GT_0trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Create: Failed " ++ "to allocate channel buffer!\n"); ++ } ++ } ++func_cont: ++ if (DSP_SUCCEEDED(status)) ++ *phDispObject = pDisp; ++ else ++ DeleteDisp(pDisp); ++ ++ DBC_Ensure(((DSP_FAILED(status)) && ((*phDispObject == NULL))) || ++ ((DSP_SUCCEEDED(status)) && ++ (MEM_IsValidHandle((*phDispObject), DISP_SIGNATURE)))); ++ return status; ++} ++ ++/* ++ * ======== DISP_Delete ======== ++ * Delete the NODE Dispatcher. ++ */ ++void DISP_Delete(struct DISP_OBJECT *hDisp) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); ++ ++ GT_1trace(DISP_DebugMask, GT_ENTER, ++ "DISP_Delete: hDisp: 0x%x\n", hDisp); ++ ++ DeleteDisp(hDisp); ++ ++ DBC_Ensure(!MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); ++} ++ ++/* ++ * ======== DISP_Exit ======== ++ * Discontinue usage of DISP module. ++ */ ++void DISP_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(DISP_DebugMask, GT_5CLASS, ++ "Entered DISP_Exit, ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== DISP_Init ======== ++ * Initialize the DISP module. ++ */ ++bool DISP_Init(void) ++{ ++ bool fRetVal = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!DISP_DebugMask.flags); ++ GT_create(&DISP_DebugMask, "DI"); /* "DI" for DIspatcher */ ++ } ++ ++ if (fRetVal) ++ cRefs++; ++ ++ GT_1trace(DISP_DebugMask, GT_5CLASS, ++ "DISP_Init(), ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure((fRetVal && (cRefs > 0)) || (!fRetVal && (cRefs >= 0))); ++ return fRetVal; ++} ++ ++/* ++ * ======== DISP_NodeChangePriority ======== ++ * Change the priority of a node currently running on the target. ++ */ ++DSP_STATUS DISP_NodeChangePriority(struct DISP_OBJECT *hDisp, ++ struct NODE_OBJECT *hNode, ++ u32 ulRMSFxn, NODE_ENV nodeEnv, ++ s32 nPriority) ++{ ++ u32 dwArg; ++ struct RMS_Command *pCommand; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); ++ DBC_Require(hNode != NULL); ++ ++ GT_5trace(DISP_DebugMask, GT_ENTER, "DISP_NodeChangePriority: hDisp: " ++ "0x%x\thNode: 0x%x\tulRMSFxn: 0x%x\tnodeEnv: 0x%x\tnPriority\n", ++ hDisp, hNode, ulRMSFxn, nodeEnv, nPriority); ++ ++ /* Send message to RMS to change priority */ ++ pCommand = (struct RMS_Command *)(hDisp->pBuf); ++ pCommand->fxn = (RMS_WORD)(ulRMSFxn); ++ pCommand->arg1 = (RMS_WORD)nodeEnv; ++ pCommand->arg2 = nPriority; ++ status = SendMessage(hDisp, NODE_GetTimeout(hNode), ++ sizeof(struct RMS_Command), &dwArg); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeChangePriority failed! " ++ "status = 0x%x\n", status); ++ } ++ return status; ++} ++ ++/* ++ * ======== DISP_NodeCreate ======== ++ * Create a node on the DSP by remotely calling the node's create function. ++ */ ++DSP_STATUS DISP_NodeCreate(struct DISP_OBJECT *hDisp, struct NODE_OBJECT *hNode, ++ u32 ulRMSFxn, u32 ulCreateFxn, ++ IN CONST struct NODE_CREATEARGS *pArgs, ++ OUT NODE_ENV *pNodeEnv) ++{ ++ struct NODE_MSGARGS msgArgs; ++ struct NODE_TASKARGS taskArgs; ++ struct RMS_Command *pCommand; ++ struct RMS_MsgArgs *pMsgArgs; ++ struct RMS_MoreTaskArgs *pMoreTaskArgs; ++ enum NODE_TYPE nodeType; ++ u32 dwLength; ++ RMS_WORD *pdwBuf = NULL; ++ u32 ulBytes; ++ u32 i; ++ u32 total; ++ u32 uCharsInRMSWord; ++ s32 taskArgsOffset; ++ s32 sioInDefOffset; ++ s32 sioOutDefOffset; ++ s32 sioDefsOffset; ++ s32 argsOffset = -1; ++ s32 offset; ++ struct NODE_STRMDEF strmDef; ++ u32 max; ++ DSP_STATUS status = DSP_SOK; ++ struct DSP_NODEINFO nodeInfo; ++ u32 devType; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); ++ DBC_Require(hNode != NULL); ++ DBC_Require(NODE_GetType(hNode) != NODE_DEVICE); ++ DBC_Require(pNodeEnv != NULL); ++ ++ GT_6trace(DISP_DebugMask, GT_ENTER, ++ "DISP_NodeCreate: hDisp: 0x%x\thNode:" ++ " 0x%x\tulRMSFxn: 0x%x\tulCreateFxn: 0x%x\tpArgs: 0x%x\tpNodeEnv:" ++ " 0x%x\n", hDisp, hNode, ulRMSFxn, ulCreateFxn, pArgs, pNodeEnv); ++ ++ status = DEV_GetDevType(hDisp->hDevObject, &devType); ++ ++ GT_1trace(DISP_DebugMask, GT_6CLASS, "DISP_Create: Creating DISP " ++ "for device = 0x%x\n", devType); ++ ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (devType != DSP_UNIT) { ++ GT_1trace(DISP_DebugMask, GT_7CLASS, ++ "DISP_NodeCreate unknown device " ++ "type = 0x%x\n", devType); ++ goto func_end; ++ } ++ DBC_Require(pArgs != NULL); ++ nodeType = NODE_GetType(hNode); ++ msgArgs = pArgs->asa.msgArgs; ++ max = hDisp->ulBufsizeRMS; /*Max # of RMS words that can be sent */ ++ DBC_Assert(max == RMS_COMMANDBUFSIZE); ++ uCharsInRMSWord = sizeof(RMS_WORD) / hDisp->uCharSize; ++ /* Number of RMS words needed to hold arg data */ ++ dwLength = (msgArgs.uArgLength + uCharsInRMSWord - 1) / uCharsInRMSWord; ++ /* Make sure msg args and command fit in buffer */ ++ total = sizeof(struct RMS_Command) / sizeof(RMS_WORD) + ++ sizeof(struct RMS_MsgArgs) ++ / sizeof(RMS_WORD) - 1 + dwLength; ++ if (total >= max) { ++ status = DSP_EFAIL; ++ GT_2trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeCreate: Message args too" ++ " large for buffer! Message args size = %d, max = %d\n", ++ total, max); ++ } ++ /* ++ * Fill in buffer to send to RMS. ++ * The buffer will have the following format: ++ * ++ * RMS command: ++ * Address of RMS_CreateNode() ++ * Address of node's create function ++ * dummy argument ++ * node type ++ * ++ * Message Args: ++ * max number of messages ++ * segid for message buffer allocation ++ * notification type to use when message is received ++ * length of message arg data ++ * message args data ++ * ++ * Task Args (if task or socket node): ++ * priority ++ * stack size ++ * system stack size ++ * stack segment ++ * misc ++ * number of input streams ++ * pSTRMInDef[] - offsets of STRM definitions for input streams ++ * number of output streams ++ * pSTRMOutDef[] - offsets of STRM definitions for output ++ * streams ++ * STRMInDef[] - array of STRM definitions for input streams ++ * STRMOutDef[] - array of STRM definitions for output streams ++ * ++ * Socket Args (if DAIS socket node): ++ * ++ */ ++ if (DSP_SUCCEEDED(status)) { ++ total = 0; /* Total number of words in buffer so far */ ++ pdwBuf = (RMS_WORD *)hDisp->pBuf; ++ pCommand = (struct RMS_Command *)pdwBuf; ++ pCommand->fxn = (RMS_WORD)(ulRMSFxn); ++ pCommand->arg1 = (RMS_WORD)(ulCreateFxn); ++ if (NODE_GetLoadType(hNode) == NLDR_DYNAMICLOAD) { ++ /* Flush ICACHE on Load */ ++ pCommand->arg2 = 1; /* dummy argument */ ++ } else { ++ /* Do not flush ICACHE */ ++ pCommand->arg2 = 0; /* dummy argument */ ++ } ++ pCommand->data = NODE_GetType(hNode); ++ /* ++ * argsOffset is the offset of the data field in struct ++ * RMS_Command structure. We need this to calculate stream ++ * definition offsets. ++ */ ++ argsOffset = 3; ++ total += sizeof(struct RMS_Command) / sizeof(RMS_WORD); ++ /* Message args */ ++ pMsgArgs = (struct RMS_MsgArgs *) (pdwBuf + total); ++ pMsgArgs->maxMessages = msgArgs.uMaxMessages; ++ pMsgArgs->segid = msgArgs.uSegid; ++ pMsgArgs->notifyType = msgArgs.uNotifyType; ++ pMsgArgs->argLength = msgArgs.uArgLength; ++ total += sizeof(struct RMS_MsgArgs) / sizeof(RMS_WORD) - 1; ++ memcpy(pdwBuf + total, msgArgs.pData, msgArgs.uArgLength); ++ total += dwLength; ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* If node is a task node, copy task create arguments into buffer */ ++ if (nodeType == NODE_TASK || nodeType == NODE_DAISSOCKET) { ++ taskArgs = pArgs->asa.taskArgs; ++ taskArgsOffset = total; ++ total += sizeof(struct RMS_MoreTaskArgs) / sizeof(RMS_WORD) + ++ 1 + taskArgs.uNumInputs + taskArgs.uNumOutputs; ++ /* Copy task arguments */ ++ if (total < max) { ++ total = taskArgsOffset; ++ pMoreTaskArgs = (struct RMS_MoreTaskArgs *)(pdwBuf + ++ total); ++ /* ++ * Get some important info about the node. Note that we ++ * don't just reach into the hNode struct because ++ * that would break the node object's abstraction. ++ */ ++ GetNodeInfo(hNode, &nodeInfo); ++ GT_2trace(DISP_DebugMask, GT_ENTER, ++ "uExecutionPriority %x, nPriority %x\n", ++ nodeInfo.uExecutionPriority, ++ taskArgs.nPriority); ++ pMoreTaskArgs->priority = nodeInfo.uExecutionPriority; ++ pMoreTaskArgs->stackSize = taskArgs.uStackSize; ++ pMoreTaskArgs->sysstackSize = taskArgs.uSysStackSize; ++ pMoreTaskArgs->stackSeg = taskArgs.uStackSeg; ++ pMoreTaskArgs->heapAddr = taskArgs.uDSPHeapAddr; ++ pMoreTaskArgs->heapSize = taskArgs.uHeapSize; ++ pMoreTaskArgs->misc = taskArgs.ulDaisArg; ++ pMoreTaskArgs->numInputStreams = taskArgs.uNumInputs; ++ total += ++ sizeof(struct RMS_MoreTaskArgs) / sizeof(RMS_WORD); ++ GT_2trace(DISP_DebugMask, GT_7CLASS, ++ "DISP::::uDSPHeapAddr %x, " ++ "uHeapSize %x\n", taskArgs.uDSPHeapAddr, ++ taskArgs.uHeapSize); ++ /* Keep track of pSIOInDef[] and pSIOOutDef[] ++ * positions in the buffer, since this needs to be ++ * filled in later. */ ++ sioInDefOffset = total; ++ total += taskArgs.uNumInputs; ++ pdwBuf[total++] = taskArgs.uNumOutputs; ++ sioOutDefOffset = total; ++ total += taskArgs.uNumOutputs; ++ sioDefsOffset = total; ++ /* Fill SIO defs and offsets */ ++ offset = sioDefsOffset; ++ for (i = 0; i < taskArgs.uNumInputs; i++) { ++ if (DSP_FAILED(status)) ++ break; ++ ++ pdwBuf[sioInDefOffset + i] = ++ (offset - argsOffset) ++ * (sizeof(RMS_WORD) / DSPWORDSIZE); ++ strmDef = taskArgs.strmInDef[i]; ++ status = FillStreamDef(pdwBuf, &total, offset, ++ strmDef, max, uCharsInRMSWord); ++ offset = total; ++ } ++ for (i = 0; (i < taskArgs.uNumOutputs) && ++ (DSP_SUCCEEDED(status)); i++) { ++ pdwBuf[sioOutDefOffset + i] = ++ (offset - argsOffset) ++ * (sizeof(RMS_WORD) / DSPWORDSIZE); ++ strmDef = taskArgs.strmOutDef[i]; ++ status = FillStreamDef(pdwBuf, &total, offset, ++ strmDef, max, uCharsInRMSWord); ++ offset = total; ++ } ++ if (DSP_FAILED(status)) { ++ GT_2trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeCreate: Message" ++ " args to large for buffer! Message args" ++ " size = %d, max = %d\n", total, max); ++ } ++ } else { ++ /* Args won't fit */ ++ status = DSP_EFAIL; ++ GT_2trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeCreate: Message args " ++ " too large for buffer! Message args size = %d" ++ ", max = %d\n", total, max); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ ulBytes = total * sizeof(RMS_WORD); ++ DBC_Assert(ulBytes < (RMS_COMMANDBUFSIZE * sizeof(RMS_WORD))); ++ status = SendMessage(hDisp, NODE_GetTimeout(hNode), ++ ulBytes, pNodeEnv); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeCreate failed! " ++ "status = 0x%x\n", status); ++ } else { ++ /* ++ * Message successfully received from RMS. ++ * Return the status of the Node's create function ++ * on the DSP-side ++ */ ++ status = (((RMS_WORD *)(hDisp->pBuf))[0]); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeCreate, " ++ "DSP-side Node Create failed: 0x%x\n", ++ status); ++ } ++ ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== DISP_NodeDelete ======== ++ * purpose: ++ * Delete a node on the DSP by remotely calling the node's delete function. ++ * ++ */ ++DSP_STATUS DISP_NodeDelete(struct DISP_OBJECT *hDisp, struct NODE_OBJECT *hNode, ++ u32 ulRMSFxn, u32 ulDeleteFxn, NODE_ENV nodeEnv) ++{ ++ u32 dwArg; ++ struct RMS_Command *pCommand; ++ DSP_STATUS status = DSP_SOK; ++ u32 devType; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); ++ DBC_Require(hNode != NULL); ++ ++ GT_5trace(DISP_DebugMask, GT_ENTER, ++ "DISP_NodeDelete: hDisp: 0x%xthNode: " ++ "0x%x\tulRMSFxn: 0x%x\tulDeleteFxn: 0x%x\tnodeEnv: 0x%x\n", ++ hDisp, hNode, ulRMSFxn, ulDeleteFxn, nodeEnv); ++ ++ status = DEV_GetDevType(hDisp->hDevObject, &devType); ++ ++ if (DSP_SUCCEEDED(status)) { ++ ++ if (devType == DSP_UNIT) { ++ ++ /* ++ * Fill in buffer to send to RMS ++ */ ++ pCommand = (struct RMS_Command *)hDisp->pBuf; ++ pCommand->fxn = (RMS_WORD)(ulRMSFxn); ++ pCommand->arg1 = (RMS_WORD)nodeEnv; ++ pCommand->arg2 = (RMS_WORD)(ulDeleteFxn); ++ pCommand->data = NODE_GetType(hNode); ++ ++ status = SendMessage(hDisp, NODE_GetTimeout(hNode), ++ sizeof(struct RMS_Command), &dwArg); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeDelete failed!" ++ "status = 0x%x\n", status); ++ } else { ++ /* ++ * Message successfully received from RMS. ++ * Return the status of the Node's delete ++ * function on the DSP-side ++ */ ++ status = (((RMS_WORD *)(hDisp->pBuf))[0]); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeDelete, " ++ "DSP-side Node Delete failed: 0x%x\n", ++ status); ++ } ++ } ++ ++ ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== DISP_NodeRun ======== ++ * purpose: ++ * Start execution of a node's execute phase, or resume execution of a node ++ * that has been suspended (via DISP_NodePause()) on the DSP. ++ */ ++DSP_STATUS DISP_NodeRun(struct DISP_OBJECT *hDisp, struct NODE_OBJECT *hNode, ++ u32 ulRMSFxn, u32 ulExecuteFxn, NODE_ENV nodeEnv) ++{ ++ u32 dwArg; ++ struct RMS_Command *pCommand; ++ DSP_STATUS status = DSP_SOK; ++ u32 devType; ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); ++ DBC_Require(hNode != NULL); ++ ++ GT_5trace(DISP_DebugMask, GT_ENTER, "DISP_NodeRun: hDisp: 0x%xthNode: \ ++ 0x%x\tulRMSFxn: 0x%x\tulExecuteFxn: 0x%x\tnodeEnv: 0x%x\n", \ ++ hDisp, hNode, ulRMSFxn, ulExecuteFxn, nodeEnv); ++ ++ status = DEV_GetDevType(hDisp->hDevObject, &devType); ++ ++ if (DSP_SUCCEEDED(status)) { ++ ++ if (devType == DSP_UNIT) { ++ ++ /* ++ * Fill in buffer to send to RMS. ++ */ ++ pCommand = (struct RMS_Command *) hDisp->pBuf; ++ pCommand->fxn = (RMS_WORD) (ulRMSFxn); ++ pCommand->arg1 = (RMS_WORD) nodeEnv; ++ pCommand->arg2 = (RMS_WORD) (ulExecuteFxn); ++ pCommand->data = NODE_GetType(hNode); ++ ++ status = SendMessage(hDisp, NODE_GetTimeout(hNode), ++ sizeof(struct RMS_Command), &dwArg); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeRun failed!" ++ "status = 0x%x\n", status); ++ } else { ++ /* ++ * Message successfully received from RMS. ++ * Return the status of the Node's execute ++ * function on the DSP-side ++ */ ++ status = (((RMS_WORD *)(hDisp->pBuf))[0]); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_NodeRun, DSP-side Node " ++ "Execute failed: 0x%x\n", ++ status); ++ } ++ } ++ ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DeleteDisp ======== ++ * purpose: ++ * Frees the resources allocated for the dispatcher. ++ */ ++static void DeleteDisp(struct DISP_OBJECT *hDisp) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ ++ if (MEM_IsValidHandle(hDisp, DISP_SIGNATURE)) { ++ pIntfFxns = hDisp->pIntfFxns; ++ ++ /* Free Node Dispatcher resources */ ++ if (hDisp->hChnlFromDsp) { ++ /* Channel close can fail only if the channel handle ++ * is invalid. */ ++ status = (*pIntfFxns->pfnChnlClose) ++ (hDisp->hChnlFromDsp); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Delete: Failed to " ++ "close channel from RMS: 0x%x\n", ++ status); ++ } ++ } ++ if (hDisp->hChnlToDsp) { ++ status = (*pIntfFxns->pfnChnlClose)(hDisp->hChnlToDsp); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "DISP_Delete: Failed to " ++ "close channel to RMS: 0x%x\n", ++ status); ++ } ++ } ++ if (hDisp->pBuf) ++ MEM_Free(hDisp->pBuf); ++ ++ MEM_FreeObject(hDisp); ++ } ++} ++ ++/* ++ * ======== FillStreamDef ======== ++ * purpose: ++ * Fills stream definitions. ++ */ ++static DSP_STATUS FillStreamDef(RMS_WORD *pdwBuf, u32 *ptotal, u32 offset, ++ struct NODE_STRMDEF strmDef, u32 max, ++ u32 uCharsInRMSWord) ++{ ++ struct RMS_StrmDef *pStrmDef; ++ u32 total = *ptotal; ++ u32 uNameLen; ++ u32 dwLength; ++ DSP_STATUS status = DSP_SOK; ++ ++ if (total + sizeof(struct RMS_StrmDef) / sizeof(RMS_WORD) >= max) { ++ status = DSP_EFAIL; ++ } else { ++ pStrmDef = (struct RMS_StrmDef *)(pdwBuf + total); ++ pStrmDef->bufsize = strmDef.uBufsize; ++ pStrmDef->nbufs = strmDef.uNumBufs; ++ pStrmDef->segid = strmDef.uSegid; ++ pStrmDef->align = strmDef.uAlignment; ++ pStrmDef->timeout = strmDef.uTimeout; ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* ++ * Since we haven't added the device name yet, subtract ++ * 1 from total. ++ */ ++ total += sizeof(struct RMS_StrmDef) / sizeof(RMS_WORD) - 1; ++ DBC_Require(strmDef.szDevice); ++ dwLength = strlen(strmDef.szDevice) + 1; ++ ++ /* Number of RMS_WORDS needed to hold device name */ ++ uNameLen = (dwLength + uCharsInRMSWord - 1) / uCharsInRMSWord; ++ ++ if (total + uNameLen >= max) { ++ status = DSP_EFAIL; ++ } else { ++ /* ++ * Zero out last word, since the device name may not ++ * extend to completely fill this word. ++ */ ++ pdwBuf[total + uNameLen - 1] = 0; ++ /** TODO USE SERVICES **/ ++ memcpy(pdwBuf + total, strmDef.szDevice, dwLength); ++ total += uNameLen; ++ *ptotal = total; ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== SendMessage ====== ++ * Send command message to RMS, get reply from RMS. ++ */ ++static DSP_STATUS SendMessage(struct DISP_OBJECT *hDisp, u32 dwTimeout, ++ u32 ulBytes, u32 *pdwArg) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct CHNL_OBJECT *hChnl; ++ u32 dwArg = 0; ++ u8 *pBuf; ++ struct CHNL_IOC chnlIOC; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(pdwArg != NULL); ++ ++ *pdwArg = (u32) NULL; ++ pIntfFxns = hDisp->pIntfFxns; ++ hChnl = hDisp->hChnlToDsp; ++ pBuf = hDisp->pBuf; ++ ++ /* Send the command */ ++ status = (*pIntfFxns->pfnChnlAddIOReq) (hChnl, pBuf, ulBytes, 0, ++ 0L, dwArg); ++ ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "SendMessage: Channel AddIOReq to" ++ " RMS failed! Status = 0x%x\n", status); ++ goto func_cont; ++ } ++ status = (*pIntfFxns->pfnChnlGetIOC) (hChnl, dwTimeout, &chnlIOC); ++ if (DSP_SUCCEEDED(status)) { ++ if (!CHNL_IsIOComplete(chnlIOC)) { ++ if (CHNL_IsTimedOut(chnlIOC)) { ++ status = DSP_ETIMEOUT; ++ } else { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "SendMessage failed! " ++ "Channel IOC status = 0x%x\n", ++ chnlIOC.status); ++ status = DSP_EFAIL; ++ } ++ } ++ } else { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "SendMessage: Channel GetIOC to" ++ " RMS failed! Status = 0x%x\n", status); ++ } ++func_cont: ++ /* Get the reply */ ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ hChnl = hDisp->hChnlFromDsp; ++ ulBytes = REPLYSIZE; ++ status = (*pIntfFxns->pfnChnlAddIOReq)(hChnl, pBuf, ulBytes, ++ 0, 0L, dwArg); ++ if (DSP_FAILED(status)) { ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "SendMessage: Channel AddIOReq " ++ "from RMS failed! Status = 0x%x\n", status); ++ goto func_end; ++ } ++ status = (*pIntfFxns->pfnChnlGetIOC) (hChnl, dwTimeout, &chnlIOC); ++ if (DSP_SUCCEEDED(status)) { ++ if (CHNL_IsTimedOut(chnlIOC)) { ++ status = DSP_ETIMEOUT; ++ } else if (chnlIOC.cBytes < ulBytes) { ++ /* Did not get all of the reply from the RMS */ ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "SendMessage: Did not get all" ++ "of reply from RMS! Bytes received: %d\n", ++ chnlIOC.cBytes); ++ status = DSP_EFAIL; ++ } else { ++ if (CHNL_IsIOComplete(chnlIOC)) { ++ DBC_Assert(chnlIOC.pBuf == pBuf); ++ status = (*((RMS_WORD *)chnlIOC.pBuf)); ++ *pdwArg = (((RMS_WORD *)(chnlIOC.pBuf))[1]); ++ } else { ++ status = DSP_EFAIL; ++ } ++ } ++ } else { ++ /* GetIOC failed */ ++ GT_1trace(DISP_DebugMask, GT_6CLASS, ++ "SendMessage: Failed to get " ++ "reply from RMS! Status = 0x%x\n", status); ++ } ++func_end: ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/drv.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1840 @@ ++/* ++ * drv.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== drv.c ======== ++ * Description: ++ * DSP/BIOS Bridge resource allocation module. ++ * ++ * Public Functions: ++ * DRV_Create ++ * DRV_Destroy ++ * DRV_Exit ++ * DRV_GetDevObject ++ * DRV_GetDevExtension ++ * DRV_GetFirstDevObject ++ * DRV_GetNextDevObject ++ * DRV_GetNextDevExtension ++ * DRV_Init ++ * DRV_InsertDevObject ++ * DRV_RemoveDevObject ++ * DRV_RequestResources ++ * DRV_ReleaseResources ++ * ++ *! Revision History ++ *! ======== ======== ++ *! 19-Apr-2004 sb: Replaced OS specific APIs with MEM_AllocPhysMem and ++ MEM_FreePhysMem. Fixed warnings. Cosmetic updates. ++ *! 12-Apr-2004 hp: IVA clean up during bridge-uninstall ++ *! 05-Jan-2004 vp: Updated for 24xx platform ++ *! 21-Mar-2003 sb: Get SHM size from registry ++ *! 10-Feb-2003 vp: Code review updates ++ *! 18-Oct-2002 vp: Ported to Linux platform ++ *! 30-Oct-2000 kc: Modified usage of REG_SetValue. ++ *! 06-Sep-2000 jeh Read channel info into struct CFG_HOSTRES in ++ *! RequestISAResources() ++ *! 21-Sep-2000 rr: numwindows is calculated instead of default value in ++ *! RequestISAResources. ++ *! 07-Aug-2000 rr: static list of dev objects removed. ++ *! 27-Jul-2000 rr: RequestResources split into two(Request and Release) ++ *! Device extension created to hold the DevNodeString. ++ *! 17-Jul-2000 rr: Driver Object holds the list of Device Objects. ++ *! Added DRV_Create, DRV_Destroy, DRV_GetDevObject, ++ *! DRV_GetFirst/NextDevObject, DRV_Insert/RemoveDevObject. ++ *! 09-May-2000 rr: PCI Support is not L301 specific.Use of MEM_Calloc ++ *! instead of MEM_Alloc. ++ *! 28-Mar-2000 rr: PCI Support added. L301 Specific. TBD. ++ *! 03-Feb-2000 rr: GT and Module Init/exit Changes. Merged with kc. ++ *! 19-Jan-2000 rr: DBC_Ensure in RequestPCMCIA moved within PCCARD ifdef ++ *! 29-Dec-1999 rr: PCCard support for any slot.Bus type stored in the ++ *! struct CFG_HOSTRES Structure. ++ *! 17-Dec-1999 rr: if PCCARD_Init fails we return DSP_EFAIL. ++ *! DBC_Ensure checks for sucess and pDevice != NULL ++ *! 11-Dec-1999 ag: #define "Isa" renamed to "IsaBus". ++ *! 09-Dec-1999 rr: windows.h included to remove warnings. ++ *! 02-Dec-1999 rr: struct GT_Mask is with in if DEBUG. Request resources checks ++ *! status while making call to Reg functions. ++ *! 23-Nov-1999 rr: windows.h included ++ *! 19-Nov-1999 rr: DRV_RELEASE bug while setting the registry to zero. ++ *! fixed. ++ *! 12-Nov-1999 rr: RequestResources() reads values from the registry. ++ *! Hardcoded bIRQRegister define removed. ++ *! 05-Nov-1999 rr: Added hardcoded device interrupt. ++ *! 25-Oct-1999 rr: Resource structure removed. Now it uses the Host ++ *! Resource structure directly. ++ *! 15-Oct-1999 rr: Resource Structure modified. See drv.h ++ *! dwBusType taken from the registry.Hard coded ++ *! registry entries removed. ++ *! 05-Oct-1999 rr: Calling DEV_StartDevice moved to wcdce.c. DRV_Register ++ *! MiniDriver has been renamed to DRV_RequestResources. ++ *! DRV_UnRegisterMiniDriver fxn removed. ++ *! 24-Sep-1999 rr: Significant changes to the RegisterMiniDriver fxns. ++ *! Now it is simpler. IT stores the dev node in the ++ *! registry, assign resources and calls the DEV_Start. ++ *! 10-Sep-1999 rr: Register Minidriver modified. ++ *! - Resource structure follows the NT model ++ *! 08-Aug-1999 rr: Adopted for WinCE. Exports Fxns removed. Hull Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++#ifndef RES_CLEANUP_DISABLE ++#include ++#include ++#include ++#include ++#include ++#include ++#endif ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define SIGNATURE 0x5f52474d /* "DRV_" (in reverse) */ ++ ++struct DRV_OBJECT { ++ u32 dwSignature; ++ struct LST_LIST *devList; ++ struct LST_LIST *devNodeString; ++#ifndef RES_CLEANUP_DISABLE ++ struct PROCESS_CONTEXT *procCtxtList; ++#endif ++}; ++ ++/* ++ * This is the Device Extension. Named with the Prefix ++ * DRV_ since it is living in this module ++ */ ++struct DRV_EXT { ++ struct LST_ELEM link; ++ char szString[MAXREGPATHLENGTH]; ++}; ++ ++/* ----------------------------------- Globals */ ++static s32 cRefs; ++ ++#if GT_TRACE ++extern struct GT_Mask curTrace; ++#endif ++ ++/* ----------------------------------- Function Prototypes */ ++static DSP_STATUS RequestBridgeResources(u32 dwContext, s32 fRequest); ++static DSP_STATUS RequestBridgeResourcesDSP(u32 dwContext, s32 fRequest); ++ ++#ifndef RES_CLEANUP_DISABLE ++/* GPP PROCESS CLEANUP CODE */ ++ ++static DSP_STATUS PrintProcessInformation(void); ++static DSP_STATUS DRV_ProcFreeNodeRes(HANDLE hPCtxt); ++static DSP_STATUS DRV_ProcFreeSTRMRes(HANDLE hPCtxt); ++extern enum NODE_STATE NODE_GetState(HANDLE hNode); ++ ++/* Get the process context list from driver object */ ++ ++/* Set the Process ID */ ++DSP_STATUS DRV_ProcSetPID(HANDLE hPCtxt, s32 hProcess) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Assert(hPCtxt != NULL); ++ ++ pCtxt->pid = hProcess; ++ return status; ++} ++ ++ ++/* Getting the head of the process context list */ ++DSP_STATUS DRV_GetProcCtxtList(struct PROCESS_CONTEXT **pPctxt, ++ struct DRV_OBJECT *hDrvObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DRV_OBJECT *pDrvObject = (struct DRV_OBJECT *)hDrvObject; ++ ++ DBC_Assert(hDrvObject != NULL); ++ GT_2trace(curTrace, GT_ENTER, ++ "DRV_GetProcCtxtList: 2 *pPctxt:%x, pDrvObject" ++ ":%x", *pPctxt, pDrvObject); ++ *pPctxt = pDrvObject->procCtxtList; ++ GT_2trace(curTrace, GT_ENTER, ++ "DRV_GetProcCtxtList: 3 *pPctxt:%x, pDrvObject" ++ ":%x", *pPctxt, pDrvObject); ++ return status; ++} ++ ++/* Add a new process context to process context list */ ++DSP_STATUS DRV_InsertProcContext(struct DRV_OBJECT *hDrVObject, HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT **pCtxt = (struct PROCESS_CONTEXT **)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct PROCESS_CONTEXT *pCtxtList = NULL; ++ struct DRV_OBJECT *hDRVObject; ++ ++ GT_0trace(curTrace, GT_ENTER, "\n In DRV_InsertProcContext\n"); ++ ++ status = CFG_GetObject((u32 *)&hDRVObject, REG_DRV_OBJECT); ++ DBC_Assert(hDRVObject != NULL); ++ ++ *pCtxt = MEM_Calloc(1 * sizeof(struct PROCESS_CONTEXT), MEM_PAGED); ++ if (!*pCtxt) { ++ pr_err("DSP: MEM_Calloc failed in DRV_InsertProcContext\n"); ++ return DSP_EMEMORY; ++ } ++ ++ spin_lock_init(&(*pCtxt)->proc_list_lock); ++ INIT_LIST_HEAD(&(*pCtxt)->processor_list); ++ ++ GT_0trace(curTrace, GT_ENTER, ++ "\n In DRV_InsertProcContext Calling " ++ "DRV_GetProcCtxtList\n"); ++ DRV_GetProcCtxtList(&pCtxtList, hDRVObject); ++ GT_0trace(curTrace, GT_ENTER, ++ "\n In DRV_InsertProcContext After Calling " ++ "DRV_GetProcCtxtList\n"); ++ if (pCtxtList != NULL) { ++ GT_0trace(curTrace, GT_ENTER, ++ "\n In DRV_InsertProcContext and pCtxt is " ++ "not Null\n"); ++ while (pCtxtList->next != NULL) ++ pCtxtList = pCtxtList->next; ++ ++ pCtxtList->next = *pCtxt; ++ } else { ++ GT_0trace(curTrace, GT_ENTER, ++ "\n In DRV_InsertProcContext and " ++ "pCtxt is Null\n"); ++ hDRVObject->procCtxtList = *pCtxt; ++ } ++ return status; ++} ++ ++/* Delete a process context from process resource context list */ ++DSP_STATUS DRV_RemoveProcContext(struct DRV_OBJECT *hDRVObject, ++ HANDLE pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROCESS_CONTEXT *pr_ctxt_list = NULL; ++ struct PROCESS_CONTEXT *uninitialized_var(ptr_prev); ++ ++ DBC_Assert(hDRVObject != NULL); ++ ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 12"); ++ DRV_GetProcCtxtList(&pr_ctxt_list, hDRVObject); ++ ++ /* Special condition */ ++ if (pr_ctxt_list == pr_ctxt) { ++ hDRVObject->procCtxtList = NULL; ++ goto func_cont; ++ } ++ ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 13"); ++ while (pr_ctxt_list && (pr_ctxt_list != pr_ctxt)) { ++ ptr_prev = pr_ctxt_list; ++ pr_ctxt_list = pr_ctxt_list->next; ++ GT_0trace(curTrace, GT_ENTER, ++ "DRV_RemoveProcContext: 2"); ++ } ++ ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 3"); ++ ++ if (!pr_ctxt_list) ++ return DSP_ENOTFOUND; ++ else ++ ptr_prev->next = pr_ctxt_list->next; ++ ++func_cont: ++ MEM_Free(pr_ctxt); ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 7"); ++ ++ return status; ++} ++ ++/* Update the state of process context */ ++DSP_STATUS DRV_ProcUpdatestate(HANDLE hPCtxt, enum GPP_PROC_RES_STATE status) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status1 = DSP_SOK; ++ if (pCtxt != NULL) { ++ pCtxt->resState = status; ++ } else { ++ GT_0trace(curTrace, GT_ENTER, ++ "DRV_ProcUpdatestate: Failed to update " ++ "process state"); ++ } ++ return status1; ++} ++ ++/* Allocate and add a node resource element ++* This function is called from .Node_Allocate. */ ++DSP_STATUS DRV_InsertNodeResElement(HANDLE hNode, HANDLE hNodeRes, ++ HANDLE hPCtxt) ++{ ++ struct NODE_RES_OBJECT **pNodeRes = (struct NODE_RES_OBJECT **)hNodeRes; ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct NODE_RES_OBJECT *pTempNodeRes = NULL; ++ GT_0trace(curTrace, GT_ENTER, "DRV_InsertNodeResElement: 1"); ++ *pNodeRes = (struct NODE_RES_OBJECT *)MEM_Calloc ++ (1 * sizeof(struct NODE_RES_OBJECT), MEM_PAGED); ++ DBC_Assert(hPCtxt != NULL); ++ if ((*pNodeRes == NULL) || (hPCtxt == NULL)) { ++ GT_0trace(curTrace, GT_ENTER, "DRV_InsertNodeResElement: 12"); ++ status = DSP_EHANDLE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ (*pNodeRes)->hNode = hNode; ++ if (pCtxt->pNodeList != NULL) { ++ pTempNodeRes = pCtxt->pNodeList; ++ while (pTempNodeRes->next != NULL) ++ pTempNodeRes = pTempNodeRes->next; ++ ++ pTempNodeRes->next = *pNodeRes; ++ GT_0trace(curTrace, GT_ENTER, ++ "DRV_InsertNodeResElement: 2"); ++ } else { ++ pCtxt->pNodeList = *pNodeRes; ++ GT_0trace(curTrace, GT_ENTER, ++ "DRV_InsertNodeResElement: 3"); ++ } ++ } ++ GT_0trace(curTrace, GT_ENTER, "DRV_InsertNodeResElement: 4"); ++ return status; ++} ++ ++/* Release all Node resources and its context ++* This is called from .Node_Delete. */ ++DSP_STATUS DRV_RemoveNodeResElement(HANDLE hNodeRes, HANDLE hPCtxt) ++{ ++ struct NODE_RES_OBJECT *pNodeRes = (struct NODE_RES_OBJECT *)hNodeRes; ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct NODE_RES_OBJECT *pTempNode2 = pCtxt->pNodeList; ++ struct NODE_RES_OBJECT *pTempNode = pCtxt->pNodeList; ++ ++ DBC_Assert(hPCtxt != NULL); ++ GT_0trace(curTrace, GT_ENTER, "\nDRV_RemoveNodeResElement: 1\n"); ++ while ((pTempNode != NULL) && (pTempNode != pNodeRes)) { ++ pTempNode2 = pTempNode; ++ pTempNode = pTempNode->next; ++ } ++ if (pCtxt->pNodeList == pNodeRes) ++ pCtxt->pNodeList = pNodeRes->next; ++ ++ if (pTempNode == NULL) ++ return DSP_ENOTFOUND; ++ else if (pTempNode2->next != NULL) ++ pTempNode2->next = pTempNode2->next->next; ++ ++ MEM_Free(pTempNode); ++ return status; ++} ++ ++/* Actual Node De-Allocation */ ++static DSP_STATUS DRV_ProcFreeNodeRes(HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct NODE_RES_OBJECT *pNodeList = NULL; ++ struct NODE_RES_OBJECT *pNodeRes = NULL; ++ u32 nState; ++ ++ DBC_Assert(hPCtxt != NULL); ++ pNodeList = pCtxt->pNodeList; ++ while (pNodeList != NULL) { ++ GT_0trace(curTrace, GT_ENTER, "DRV_ProcFreeNodeRes: 1"); ++ pNodeRes = pNodeList; ++ pNodeList = pNodeList->next; ++ if (pNodeRes->nodeAllocated) { ++ nState = NODE_GetState(pNodeRes->hNode) ; ++ GT_1trace(curTrace, GT_5CLASS, ++ "DRV_ProcFreeNodeRes: Node state %x\n", nState); ++ if (nState <= NODE_DELETING) { ++ if ((nState == NODE_RUNNING) || ++ (nState == NODE_PAUSED) || ++ (nState == NODE_TERMINATING)) { ++ GT_1trace(curTrace, GT_5CLASS, ++ "Calling Node_Terminate for Node:" ++ " 0x%x\n", pNodeRes->hNode); ++ status = NODE_Terminate ++ (pNodeRes->hNode, &status); ++ GT_1trace(curTrace, GT_5CLASS, ++ "Calling Node_Delete for Node:" ++ " 0x%x\n", pNodeRes->hNode); ++ status = NODE_Delete(pNodeRes->hNode, ++ pCtxt); ++ GT_1trace(curTrace, GT_5CLASS, ++ "the status after the NodeDelete %x\n", ++ status); ++ } else if ((nState == NODE_ALLOCATED) ++ || (nState == NODE_CREATED)) ++ status = NODE_Delete(pNodeRes->hNode, ++ pCtxt); ++ } ++ } ++ } ++ return status; ++} ++ ++/* Allocate the DMM resource element ++* This is called from Proc_Map. after the actual resource is allocated */ ++DSP_STATUS DRV_InsertDMMResElement(HANDLE hDMMRes, HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ struct DMM_RES_OBJECT **pDMMRes = (struct DMM_RES_OBJECT **)hDMMRes; ++ DSP_STATUS status = DSP_SOK; ++ struct DMM_RES_OBJECT *pTempDMMRes = NULL; ++ ++ *pDMMRes = (struct DMM_RES_OBJECT *) ++ MEM_Calloc(1 * sizeof(struct DMM_RES_OBJECT), MEM_PAGED); ++ DBC_Assert(hPCtxt != NULL); ++ GT_0trace(curTrace, GT_ENTER, "DRV_InsertDMMResElement: 1"); ++ if ((*pDMMRes == NULL) || (hPCtxt == NULL)) { ++ GT_0trace(curTrace, GT_5CLASS, "DRV_InsertDMMResElement: 2"); ++ status = DSP_EHANDLE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ if (pCtxt->pDMMList != NULL) { ++ GT_0trace(curTrace, GT_5CLASS, ++ "DRV_InsertDMMResElement: 3"); ++ pTempDMMRes = pCtxt->pDMMList; ++ while (pTempDMMRes->next != NULL) ++ pTempDMMRes = pTempDMMRes->next; ++ ++ pTempDMMRes->next = *pDMMRes; ++ } else { ++ pCtxt->pDMMList = *pDMMRes; ++ GT_0trace(curTrace, GT_5CLASS, ++ "DRV_InsertDMMResElement: 4"); ++ } ++ } ++ GT_0trace(curTrace, GT_ENTER, "DRV_InsertDMMResElement: 5"); ++ return status; ++} ++ ++ ++ ++/* Release DMM resource element context ++* This is called from Proc_UnMap. after the actual resource is freed */ ++DSP_STATUS DRV_RemoveDMMResElement(HANDLE hDMMRes, HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ struct DMM_RES_OBJECT *pDMMRes = (struct DMM_RES_OBJECT *)hDMMRes; ++ DSP_STATUS status = DSP_SOK; ++ struct DMM_RES_OBJECT *pTempDMMRes2 = NULL; ++ struct DMM_RES_OBJECT *pTempDMMRes = NULL; ++ ++ DBC_Assert(hPCtxt != NULL); ++ pTempDMMRes2 = pCtxt->pDMMList; ++ pTempDMMRes = pCtxt->pDMMList; ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 1"); ++ while ((pTempDMMRes != NULL) && (pTempDMMRes != pDMMRes)) { ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 2"); ++ pTempDMMRes2 = pTempDMMRes; ++ pTempDMMRes = pTempDMMRes->next; ++ } ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 3"); ++ if (pCtxt->pDMMList == pTempDMMRes) ++ pCtxt->pDMMList = pTempDMMRes->next; ++ ++ if (pTempDMMRes == NULL) ++ return DSP_ENOTFOUND; ++ else if (pTempDMMRes2->next != NULL) ++ pTempDMMRes2->next = pTempDMMRes2->next->next; ++ ++ MEM_Free(pDMMRes); ++ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 4"); ++ return status; ++} ++ ++/* Update DMM resource status */ ++DSP_STATUS DRV_UpdateDMMResElement(HANDLE hDMMRes, u32 pMpuAddr, u32 ulSize, ++ u32 pReqAddr, u32 pMapAddr, ++ HANDLE hProcessor) ++{ ++ struct DMM_RES_OBJECT *pDMMRes = (struct DMM_RES_OBJECT *)hDMMRes; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Assert(hDMMRes != NULL); ++ pDMMRes->ulMpuAddr = pMpuAddr; ++ pDMMRes->ulDSPAddr = pMapAddr; ++ pDMMRes->ulDSPResAddr = pReqAddr; ++ pDMMRes->dmmSize = ulSize; ++ pDMMRes->hProcessor = hProcessor; ++ pDMMRes->dmmAllocated = 1; ++ ++ return status; ++} ++ ++/* Actual DMM De-Allocation */ ++DSP_STATUS DRV_ProcFreeDMMRes(HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct DMM_RES_OBJECT *pDMMList = pCtxt->pDMMList; ++ struct DMM_RES_OBJECT *pDMMRes = NULL; ++ ++ DBC_Assert(hPCtxt != NULL); ++ GT_0trace(curTrace, GT_ENTER, "\nDRV_ProcFreeDMMRes: 1\n"); ++ while (pDMMList != NULL) { ++ pDMMRes = pDMMList; ++ pDMMList = pDMMList->next; ++ if (pDMMRes->dmmAllocated) { ++ status = PROC_UnMap(pDMMRes->hProcessor, ++ (void *)pDMMRes->ulDSPResAddr, pCtxt); ++ status = PROC_UnReserveMemory(pDMMRes->hProcessor, ++ (void *)pDMMRes->ulDSPResAddr); ++ pDMMRes->dmmAllocated = 0; ++ } ++ } ++ return status; ++} ++ ++ ++/* Release all DMM resources and its context ++* This is called from .bridge_release. */ ++DSP_STATUS DRV_RemoveAllDMMResElements(HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct DMM_RES_OBJECT *pTempDMMRes2 = NULL; ++ struct DMM_RES_OBJECT *pTempDMMRes = NULL; ++ ++ DBC_Assert(pCtxt != NULL); ++ DRV_ProcFreeDMMRes(pCtxt); ++ pTempDMMRes = pCtxt->pDMMList; ++ while (pTempDMMRes != NULL) { ++ pTempDMMRes2 = pTempDMMRes; ++ pTempDMMRes = pTempDMMRes->next; ++ MEM_Free(pTempDMMRes2); ++ } ++ pCtxt->pDMMList = NULL; ++ return status; ++} ++ ++DSP_STATUS DRV_GetDMMResElement(u32 pMapAddr, HANDLE hDMMRes, HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ struct DMM_RES_OBJECT **pDMMRes = (struct DMM_RES_OBJECT **)hDMMRes; ++ DSP_STATUS status = DSP_SOK; ++ struct DMM_RES_OBJECT *pTempDMM2 = NULL; ++ struct DMM_RES_OBJECT *pTempDMM = NULL; ++ ++ DBC_Assert(hPCtxt != NULL); ++ pTempDMM = pCtxt->pDMMList; ++ while ((pTempDMM != NULL) && (pTempDMM->ulDSPAddr != pMapAddr)) { ++ GT_3trace(curTrace, GT_ENTER, ++ "DRV_GetDMMResElement: 2 pTempDMM:%x " ++ "pTempDMM->ulDSPAddr:%x pMapAddr:%x\n", pTempDMM, ++ pTempDMM->ulDSPAddr, pMapAddr); ++ pTempDMM2 = pTempDMM; ++ pTempDMM = pTempDMM->next; ++ } ++ if (pTempDMM != NULL) { ++ GT_0trace(curTrace, GT_ENTER, "DRV_GetDMMResElement: 3"); ++ *pDMMRes = pTempDMM; ++ } else { ++ status = DSP_ENOTFOUND; ++ } GT_0trace(curTrace, GT_ENTER, "DRV_GetDMMResElement: 4"); ++ return status; ++} ++ ++/* Update Node allocation status */ ++void DRV_ProcNodeUpdateStatus(HANDLE hNodeRes, s32 status) ++{ ++ struct NODE_RES_OBJECT *pNodeRes = (struct NODE_RES_OBJECT *)hNodeRes; ++ DBC_Assert(hNodeRes != NULL); ++ pNodeRes->nodeAllocated = status; ++} ++ ++/* Update Node Heap status */ ++void DRV_ProcNodeUpdateHeapStatus(HANDLE hNodeRes, s32 status) ++{ ++ struct NODE_RES_OBJECT *pNodeRes = (struct NODE_RES_OBJECT *)hNodeRes; ++ DBC_Assert(hNodeRes != NULL); ++ pNodeRes->heapAllocated = status; ++} ++ ++/* Release all Node resources and its context ++* This is called from .bridge_release. ++*/ ++DSP_STATUS DRV_RemoveAllNodeResElements(HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct NODE_RES_OBJECT *pTempNode2 = NULL; ++ struct NODE_RES_OBJECT *pTempNode = NULL; ++ ++ DBC_Assert(hPCtxt != NULL); ++ DRV_ProcFreeNodeRes(pCtxt); ++ pTempNode = pCtxt->pNodeList; ++ while (pTempNode != NULL) { ++ pTempNode2 = pTempNode; ++ pTempNode = pTempNode->next; ++ MEM_Free(pTempNode2); ++ } ++ pCtxt->pNodeList = NULL; ++ return status; ++} ++ ++/* Getting the node resource element */ ++ ++DSP_STATUS DRV_GetNodeResElement(HANDLE hNode, HANDLE hNodeRes, HANDLE hPCtxt) ++{ ++ struct NODE_RES_OBJECT **nodeRes = (struct NODE_RES_OBJECT **)hNodeRes; ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct NODE_RES_OBJECT *pTempNode2 = NULL; ++ struct NODE_RES_OBJECT *pTempNode = NULL; ++ ++ DBC_Assert(hPCtxt != NULL); ++ pTempNode = pCtxt->pNodeList; ++ GT_0trace(curTrace, GT_ENTER, "DRV_GetNodeResElement: 1"); ++ while ((pTempNode != NULL) && (pTempNode->hNode != hNode)) { ++ pTempNode2 = pTempNode; ++ pTempNode = pTempNode->next; ++ } ++ if (pTempNode != NULL) ++ *nodeRes = pTempNode; ++ else ++ status = DSP_ENOTFOUND; ++ ++ return status; ++} ++ ++ ++ ++/* Allocate the STRM resource element ++* This is called after the actual resource is allocated ++*/ ++DSP_STATUS DRV_ProcInsertSTRMResElement(HANDLE hStreamHandle, HANDLE hSTRMRes, ++ HANDLE hPCtxt) ++{ ++ struct STRM_RES_OBJECT **pSTRMRes = (struct STRM_RES_OBJECT **)hSTRMRes; ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct STRM_RES_OBJECT *pTempSTRMRes = NULL; ++ DBC_Assert(hPCtxt != NULL); ++ ++ *pSTRMRes = (struct STRM_RES_OBJECT *) ++ MEM_Calloc(1 * sizeof(struct STRM_RES_OBJECT), MEM_PAGED); ++ if ((*pSTRMRes == NULL) || (hPCtxt == NULL)) { ++ GT_0trace(curTrace, GT_ENTER, "DRV_InsertSTRMResElement: 2"); ++ status = DSP_EHANDLE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ (*pSTRMRes)->hStream = hStreamHandle; ++ if (pCtxt->pSTRMList != NULL) { ++ GT_0trace(curTrace, GT_ENTER, ++ "DRV_InsertiSTRMResElement: 3"); ++ pTempSTRMRes = pCtxt->pSTRMList; ++ while (pTempSTRMRes->next != NULL) ++ pTempSTRMRes = pTempSTRMRes->next; ++ ++ pTempSTRMRes->next = *pSTRMRes; ++ } else { ++ pCtxt->pSTRMList = *pSTRMRes; ++ GT_0trace(curTrace, GT_ENTER, ++ "DRV_InsertSTRMResElement: 4"); ++ } ++ } ++ return status; ++} ++ ++ ++ ++/* Release Stream resource element context ++* This function called after the actual resource is freed ++*/ ++DSP_STATUS DRV_ProcRemoveSTRMResElement(HANDLE hSTRMRes, HANDLE hPCtxt) ++{ ++ struct STRM_RES_OBJECT *pSTRMRes = (struct STRM_RES_OBJECT *)hSTRMRes; ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct STRM_RES_OBJECT *pTempSTRMRes2 = pCtxt->pSTRMList; ++ struct STRM_RES_OBJECT *pTempSTRMRes = pCtxt->pSTRMList; ++ ++ DBC_Assert(hPCtxt != NULL); ++ while ((pTempSTRMRes != NULL) && (pTempSTRMRes != pSTRMRes)) { ++ pTempSTRMRes2 = pTempSTRMRes; ++ pTempSTRMRes = pTempSTRMRes->next; ++ } ++ if (pCtxt->pSTRMList == pTempSTRMRes) ++ pCtxt->pSTRMList = pTempSTRMRes->next; ++ ++ if (pTempSTRMRes == NULL) ++ status = DSP_ENOTFOUND; ++ else if (pTempSTRMRes2->next != NULL) ++ pTempSTRMRes2->next = pTempSTRMRes2->next->next; ++ ++ MEM_Free(pSTRMRes); ++ return status; ++} ++ ++ ++/* Actual Stream De-Allocation */ ++static DSP_STATUS DRV_ProcFreeSTRMRes(HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ DSP_STATUS status1 = DSP_SOK; ++ u8 **apBuffer = NULL; ++ struct STRM_RES_OBJECT *pSTRMList = NULL; ++ struct STRM_RES_OBJECT *pSTRMRes = NULL; ++ u8 *pBufPtr; ++ u32 ulBytes; ++ u32 dwArg; ++ s32 ulBufSize; ++ ++ ++ DBC_Assert(hPCtxt != NULL); ++ pSTRMList = pCtxt->pSTRMList; ++ while (pSTRMList != NULL) { ++ pSTRMRes = pSTRMList; ++ pSTRMList = pSTRMList->next; ++ if (pSTRMRes->uNumBufs != 0) { ++ apBuffer = MEM_Alloc((pSTRMRes->uNumBufs * ++ sizeof(u8 *)), MEM_NONPAGED); ++ status = STRM_FreeBuffer(pSTRMRes->hStream, apBuffer, ++ pSTRMRes->uNumBufs, pCtxt); ++ MEM_Free(apBuffer); ++ } ++ status = STRM_Close(pSTRMRes->hStream, pCtxt); ++ if (DSP_FAILED(status)) { ++ if (status == DSP_EPENDING) { ++ status = STRM_Reclaim(pSTRMRes->hStream, ++ &pBufPtr, &ulBytes, ++ (u32 *)&ulBufSize, &dwArg); ++ if (DSP_SUCCEEDED(status)) ++ status = STRM_Close(pSTRMRes->hStream, ++ pCtxt); ++ ++ } ++ } ++ } ++ return status1; ++} ++ ++/* Release all Stream resources and its context ++* This is called from .bridge_release. ++*/ ++DSP_STATUS DRV_RemoveAllSTRMResElements(HANDLE hPCtxt) ++{ ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct STRM_RES_OBJECT *pTempSTRMRes2 = NULL; ++ struct STRM_RES_OBJECT *pTempSTRMRes = NULL; ++ ++ DBC_Assert(hPCtxt != NULL); ++ DRV_ProcFreeSTRMRes(pCtxt); ++ pTempSTRMRes = pCtxt->pSTRMList; ++ while (pTempSTRMRes != NULL) { ++ pTempSTRMRes2 = pTempSTRMRes; ++ pTempSTRMRes = pTempSTRMRes->next; ++ MEM_Free(pTempSTRMRes2); ++ } ++ pCtxt->pSTRMList = NULL; ++ return status; ++} ++ ++ ++/* Getting the stream resource element */ ++DSP_STATUS DRV_GetSTRMResElement(HANDLE hStrm, HANDLE hSTRMRes, HANDLE hPCtxt) ++{ ++ struct STRM_RES_OBJECT **STRMRes = (struct STRM_RES_OBJECT **)hSTRMRes; ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ DSP_STATUS status = DSP_SOK; ++ struct STRM_RES_OBJECT *pTempSTRM2 = NULL; ++ struct STRM_RES_OBJECT *pTempSTRM = pCtxt->pSTRMList; ++ ++ DBC_Assert(hPCtxt != NULL); ++ while ((pTempSTRM != NULL) && (pTempSTRM->hStream != hStrm)) { ++ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 2"); ++ pTempSTRM2 = pTempSTRM; ++ pTempSTRM = pTempSTRM->next; ++ } ++ if (pTempSTRM != NULL) { ++ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 3"); ++ *STRMRes = pTempSTRM; ++ } else { ++ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 4"); ++ status = DSP_ENOTFOUND; ++ } ++ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 5"); ++ return status; ++} ++ ++/* Updating the stream resource element */ ++DSP_STATUS DRV_ProcUpdateSTRMRes(u32 uNumBufs, HANDLE hSTRMRes, HANDLE hPCtxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct STRM_RES_OBJECT **STRMRes = (struct STRM_RES_OBJECT **)hSTRMRes; ++ ++ DBC_Assert(hPCtxt != NULL); ++ (*STRMRes)->uNumBufs = uNumBufs; ++ return status; ++} ++ ++/* Displaying the resources allocated by a process */ ++DSP_STATUS DRV_ProcDisplayResInfo(u8 *pBuf1, u32 *pSize) ++{ ++ struct PROCESS_CONTEXT *pCtxt = NULL; ++ struct NODE_RES_OBJECT *pNodeRes = NULL; ++ struct DMM_RES_OBJECT *pDMMRes = NULL; ++ struct STRM_RES_OBJECT *pSTRMRes = NULL; ++ struct DSPHEAP_RES_OBJECT *pDSPHEAPRes = NULL; ++ u32 tempCount = 1; ++ HANDLE hDrvObject = NULL; ++ void *pBuf = pBuf1; ++ u8 pTempBuf[250]; ++ u32 tempStrLen = 0, tempStrLen2 = 0; ++ DSP_STATUS status = DSP_SOK; ++ ++ CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ DRV_GetProcCtxtList(&pCtxt, (struct DRV_OBJECT *)hDrvObject); ++ GT_0trace(curTrace, GT_ENTER, "*********************" ++ "DRV_ProcDisplayResourceInfo:*\n"); ++ while (pCtxt != NULL) { ++ tempStrLen2 = sprintf((char *)pTempBuf, ++ "-------------------------------------" ++ "-----------------------------------\n"); ++ tempStrLen2 += 2; ++ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); ++ tempStrLen += tempStrLen2; ++ if (pCtxt->resState == PROC_RES_ALLOCATED) { ++ tempStrLen2 = sprintf((char *)pTempBuf, ++ "GPP Process Resource State: " ++ "pCtxt->resState = PROC_RES_ALLOCATED, " ++ " Process ID: %d\n", pCtxt->pid); ++ tempStrLen2 += 2; ++ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); ++ tempStrLen += tempStrLen2; ++ } else { ++ tempStrLen2 = sprintf((char *)pTempBuf, ++ "GPP Resource State: pCtxt->resState" ++ " = PROC_RES_DEALLOCATED, Process ID:%d\n", ++ pCtxt->pid); ++ tempStrLen2 += 2; ++ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); ++ tempStrLen += tempStrLen2; ++ } ++ pNodeRes = pCtxt->pNodeList; ++ tempCount = 1; ++ while (pNodeRes != NULL) { ++ GT_2trace(curTrace, GT_ENTER, ++ "DRV_ProcDisplayResourceInfo: #:%d " ++ "pCtxt->pNodeList->hNode:%x\n", ++ tempCount, pNodeRes->hNode); ++ tempStrLen2 = sprintf((char *)pTempBuf, ++ "Node Resource Information: Node #" ++ " %d Node Handle hNode:0X%x\n", ++ tempCount, (u32)pNodeRes->hNode); ++ pNodeRes = pNodeRes->next; ++ tempStrLen2 += 2; ++ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); ++ tempStrLen += tempStrLen2; ++ tempCount++; ++ } ++ tempCount = 1; ++ pDSPHEAPRes = pCtxt->pDSPHEAPList; ++ while (pDSPHEAPRes != NULL) { ++ GT_2trace(curTrace, GT_ENTER, ++ "DRV_ProcDisplayResourceInfo: #:%d " ++ "pCtxt->pDSPHEAPRList->ulMpuAddr:%x\n", ++ tempCount, pDSPHEAPRes->ulMpuAddr); ++ tempStrLen2 = sprintf((char *)pTempBuf, ++ "DSP Heap Resource Info: HEAP # %d" ++ " Mapped GPP Address: 0x%x, size: 0x%x\n", ++ tempCount, (u32)pDSPHEAPRes->ulMpuAddr, ++ (u32)pDSPHEAPRes->heapSize); ++ pDSPHEAPRes = pDSPHEAPRes->next; ++ tempStrLen2 += 2; ++ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); ++ tempStrLen += tempStrLen2; ++ tempCount++; ++ } ++ tempCount = 1; ++ pDMMRes = pCtxt->pDMMList; ++ while (pDMMRes != NULL) { ++ GT_2trace(curTrace, GT_ENTER, ++ "DRV_ProcDisplayResourceInfo: #:%d " ++ " pCtxt->pDMMList->ulMpuAddr:%x\n", ++ tempCount, ++ pDMMRes->ulMpuAddr); ++ tempStrLen2 = sprintf((char *)pTempBuf, ++ "DMM Resource Info: DMM # %d Mapped" ++ " GPP Address: 0x%x, size: 0x%x\n", ++ tempCount, (u32)pDMMRes->ulMpuAddr, ++ (u32)pDMMRes->dmmSize); ++ pDMMRes = pDMMRes->next; ++ tempStrLen2 += 2; ++ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); ++ tempStrLen += tempStrLen2; ++ tempCount++; ++ } ++ tempCount = 1; ++ pSTRMRes = pCtxt->pSTRMList; ++ while (pSTRMRes != NULL) { ++ GT_2trace(curTrace, GT_ENTER, ++ "DRV_ProcDisplayResourceInfo: #:%d " ++ "pCtxt->pSTRMList->hStream:%x\n", tempCount, ++ pSTRMRes->hStream); ++ tempStrLen2 = sprintf((char *)pTempBuf, ++ "Stream Resource info: STRM # %d " ++ "Stream Handle: 0x%x \n", ++ tempCount, (u32)pSTRMRes->hStream); ++ pSTRMRes = pSTRMRes->next; ++ tempStrLen2 += 2; ++ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); ++ tempStrLen += tempStrLen2; ++ tempCount++; ++ } ++ pCtxt = pCtxt->next; ++ } ++ *pSize = tempStrLen; ++ status = PrintProcessInformation(); ++ GT_0trace(curTrace, GT_ENTER, "*********************" ++ "DRV_ProcDisplayResourceInfo:**\n"); ++ return status; ++} ++ ++/* ++ * ======== PrintProcessInformation ======== ++ * Purpose: ++ * This function prints the Process's information stored in ++ * the process context list. Some of the information that ++ * it displays is Process's state, Node, Stream, DMM, and ++ * Heap information. ++ */ ++static DSP_STATUS PrintProcessInformation(void) ++{ ++ struct DRV_OBJECT *hDrvObject = NULL; ++ struct PROCESS_CONTEXT *pCtxtList = NULL; ++ struct NODE_RES_OBJECT *pNodeRes = NULL; ++ struct DMM_RES_OBJECT *pDMMRes = NULL; ++ struct STRM_RES_OBJECT *pSTRMRes = NULL; ++ struct DSPHEAP_RES_OBJECT *pDSPHEAPRes = NULL; ++ struct PROC_OBJECT *proc_obj_ptr; ++ DSP_STATUS status = DSP_SOK; ++ u32 tempCount; ++ u32 procID; ++ ++ /* Get the Process context list */ ++ CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ DRV_GetProcCtxtList(&pCtxtList, hDrvObject); ++ GT_0trace(curTrace, GT_4CLASS, "\n### Debug information" ++ " for DSP bridge ##\n"); ++ GT_0trace(curTrace, GT_4CLASS, " \n ###The processes" ++ " information is as follows ### \n") ; ++ GT_0trace(curTrace, GT_4CLASS, " =====================" ++ "============ \n"); ++ /* Go through the entries in the Process context list */ ++ while (pCtxtList != NULL) { ++ GT_1trace(curTrace, GT_4CLASS, "\nThe process" ++ " id is %d\n", pCtxtList->pid); ++ GT_0trace(curTrace, GT_4CLASS, " -------------------" ++ "---------\n"); ++ if (pCtxtList->resState == PROC_RES_ALLOCATED) { ++ GT_0trace(curTrace, GT_4CLASS, " \nThe Process" ++ " is in Allocated state\n"); ++ } else { ++ GT_0trace(curTrace, GT_4CLASS, "\nThe Process" ++ " is in DeAllocated state\n"); ++ } ++ ++ spin_lock(&pCtxtList->proc_list_lock); ++ list_for_each_entry(proc_obj_ptr, &pCtxtList->processor_list, ++ proc_object) { ++ PROC_GetProcessorId(proc_obj_ptr, &procID); ++ if (procID == DSP_UNIT) { ++ GT_0trace(curTrace, GT_4CLASS, ++ "\nProcess connected to" ++ " DSP Processor\n"); ++ } else if (procID == IVA_UNIT) { ++ GT_0trace(curTrace, GT_4CLASS, ++ "\nProcess connected to" ++ " IVA Processor\n"); ++ } else { ++ GT_0trace(curTrace, GT_7CLASS, ++ "\n***ERROR:Invalid Processor Id***\n"); ++ } ++ } ++ spin_unlock(&pCtxtList->proc_list_lock); ++ ++ pNodeRes = pCtxtList->pNodeList; ++ tempCount = 1; ++ while (pNodeRes != NULL) { ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n***The Nodes allocated by" ++ " this Process are***\n"); ++ GT_2trace(curTrace, GT_4CLASS, ++ "Node # %d Node Handle hNode:0x%x\n", ++ tempCount, (u32)pNodeRes->hNode); ++ pNodeRes = pNodeRes->next; ++ tempCount++; ++ } ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n ***There are no Nodes" ++ " allocated by this Process***\n"); ++ tempCount = 1; ++ pDSPHEAPRes = pCtxtList->pDSPHEAPList; ++ while (pDSPHEAPRes != NULL) { ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n***The Heaps allocated by" ++ " this Process are***\n"); ++ GT_3trace(curTrace, GT_4CLASS, ++ "DSP Heap Resource Info: HEAP # %d " ++ "Mapped GPP Address:0x%x, Size: 0x%lx\n", ++ tempCount, (u32)pDSPHEAPRes->ulMpuAddr, ++ pDSPHEAPRes->heapSize); ++ pDSPHEAPRes = pDSPHEAPRes->next; ++ tempCount++; ++ } ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n ***There are no Heaps allocated" ++ " by this Process***\n"); ++ tempCount = 1; ++ pDMMRes = pCtxtList->pDMMList; ++ while (pDMMRes != NULL) { ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n ***The DMM resources allocated by" ++ " this Process are***\n"); ++ GT_3trace(curTrace, GT_4CLASS, ++ "DMM Resource Info: DMM # %d " ++ "Mapped GPP Address:0X%lx, Size: 0X%lx\n", ++ tempCount, pDMMRes->ulMpuAddr, ++ pDMMRes->dmmSize); ++ pDMMRes = pDMMRes->next; ++ tempCount++; ++ } ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n ***There are no DMM resources" ++ " allocated by this Process***\n"); ++ tempCount = 1; ++ pSTRMRes = pCtxtList->pSTRMList; ++ while (pSTRMRes != NULL) { ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n***The Stream resources allocated by" ++ " this Process are***\n"); ++ GT_2trace(curTrace, GT_4CLASS, ++ "Stream Resource info: STRM # %d" ++ "Stream Handle:0X%x\n", tempCount, ++ (u32)pSTRMRes->hStream); ++ pSTRMRes = pSTRMRes->next; ++ tempCount++; ++ } ++ if (tempCount == 1) ++ GT_0trace(curTrace, GT_4CLASS, ++ "\n ***There are no Stream resources" ++ "allocated by this Process***\n"); ++ pCtxtList = pCtxtList->next; ++ } ++ return status; ++} ++ ++/* GPP PROCESS CLEANUP CODE END */ ++#endif ++ ++/* ++ * ======== = DRV_Create ======== = ++ * Purpose: ++ * DRV Object gets created only once during Driver Loading. ++ */ ++DSP_STATUS DRV_Create(OUT struct DRV_OBJECT **phDRVObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DRV_OBJECT *pDRVObject = NULL; ++ ++ DBC_Require(phDRVObject != NULL); ++ DBC_Require(cRefs > 0); ++ GT_1trace(curTrace, GT_ENTER, "Entering DRV_Create" ++ " phDRVObject 0x%x\n", phDRVObject); ++ MEM_AllocObject(pDRVObject, struct DRV_OBJECT, SIGNATURE); ++ if (pDRVObject) { ++ /* Create and Initialize List of device objects */ ++ pDRVObject->devList = LST_Create(); ++ if (pDRVObject->devList) { ++ /* Create and Initialize List of device Extension */ ++ pDRVObject->devNodeString = LST_Create(); ++ if (!(pDRVObject->devNodeString)) { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_7CLASS, ++ "Failed to Create DRV_EXT list "); ++ MEM_FreeObject(pDRVObject); ++ } ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_7CLASS, ++ "Failed to Create Dev List "); ++ MEM_FreeObject(pDRVObject); ++ } ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_7CLASS, ++ "Failed to Allocate Memory for DRV Obj"); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Store the DRV Object in the Registry */ ++ if (DSP_SUCCEEDED ++ (CFG_SetObject((u32) pDRVObject, REG_DRV_OBJECT))) { ++ GT_1trace(curTrace, GT_1CLASS, ++ "DRV Obj Created pDrvObject 0x%x\n ", ++ pDRVObject); ++ *phDRVObject = pDRVObject; ++ } else { ++ /* Free the DRV Object */ ++ status = DSP_EFAIL; ++ MEM_Free(pDRVObject); ++ GT_0trace(curTrace, GT_7CLASS, ++ "Failed to update the Registry with " ++ "DRV Object "); ++ } ++ } ++ GT_2trace(curTrace, GT_ENTER, ++ "Exiting DRV_Create: phDRVObject: 0x%x\tstatus:" ++ "0x%x\n", phDRVObject, status); ++ DBC_Ensure(DSP_FAILED(status) || ++ MEM_IsValidHandle(pDRVObject, SIGNATURE)); ++ return status; ++} ++ ++/* ++ * ======== DRV_Exit ======== ++ * Purpose: ++ * Discontinue usage of the DRV module. ++ */ ++void DRV_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ GT_0trace(curTrace, GT_5CLASS, "Entering DRV_Exit \n"); ++ ++ cRefs--; ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== = DRV_Destroy ======== = ++ * purpose: ++ * Invoked during bridge de-initialization ++ */ ++DSP_STATUS DRV_Destroy(struct DRV_OBJECT *hDRVObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDRVObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pDRVObject, SIGNATURE)); ++ ++ GT_1trace(curTrace, GT_ENTER, "Entering DRV_Destroy" ++ " hDRVObject 0x%x\n", hDRVObject); ++ /* ++ * Delete the List if it exists.Should not come here ++ * as the DRV_RemoveDevObject and the Last DRV_RequestResources ++ * removes the list if the lists are empty. ++ */ ++ if (pDRVObject->devList) { ++ /* Could assert if the list is not empty */ ++ LST_Delete(pDRVObject->devList); ++ } ++ if (pDRVObject->devNodeString) { ++ /* Could assert if the list is not empty */ ++ LST_Delete(pDRVObject->devNodeString); ++ } ++ MEM_FreeObject(pDRVObject); ++ /* Update the DRV Object in Registry to be 0 */ ++ (void)CFG_SetObject(0, REG_DRV_OBJECT); ++ GT_2trace(curTrace, GT_ENTER, ++ "Exiting DRV_Destroy: hDRVObject: 0x%x\tstatus:" ++ "0x%x\n", hDRVObject, status); ++ DBC_Ensure(!MEM_IsValidHandle(pDRVObject, SIGNATURE)); ++ return status; ++} ++ ++/* ++ * ======== DRV_GetDevObject ======== ++ * Purpose: ++ * Given a index, returns a handle to DevObject from the list. ++ */ ++DSP_STATUS DRV_GetDevObject(u32 uIndex, struct DRV_OBJECT *hDrvObject, ++ struct DEV_OBJECT **phDevObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++#if GT_TRACE /* pDrvObject is used only for Assertions and debug messages.*/ ++ struct DRV_OBJECT *pDrvObject = (struct DRV_OBJECT *)hDrvObject; ++#endif ++ struct DEV_OBJECT *pDevObject; ++ u32 i; ++ DBC_Require(MEM_IsValidHandle(pDrvObject, SIGNATURE)); ++ DBC_Require(phDevObject != NULL); ++ DBC_Require(uIndex >= 0); ++ DBC_Require(cRefs > 0); ++ DBC_Assert(!(LST_IsEmpty(pDrvObject->devList))); ++ GT_3trace(curTrace, GT_ENTER, ++ "Entered DRV_GetDevObject, args:\n\tuIndex: " ++ "0x%x\n\thDrvObject: 0x%x\n\tphDevObject: 0x%x\n", ++ uIndex, hDrvObject, phDevObject); ++ pDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); ++ for (i = 0; i < uIndex; i++) { ++ pDevObject = ++ (struct DEV_OBJECT *)DRV_GetNextDevObject((u32)pDevObject); ++ } ++ if (pDevObject) { ++ *phDevObject = (struct DEV_OBJECT *) pDevObject; ++ status = DSP_SOK; ++ } else { ++ *phDevObject = NULL; ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_7CLASS, ++ "DRV: Could not get the DevObject\n"); ++ } ++ GT_2trace(curTrace, GT_ENTER, ++ "Exiting Drv_GetDevObject\n\tstatus: 0x%x\n\t" ++ "hDevObject: 0x%x\n", status, *phDevObject); ++ return status; ++} ++ ++/* ++ * ======== DRV_GetFirstDevObject ======== ++ * Purpose: ++ * Retrieve the first Device Object handle from an internal linked list of ++ * of DEV_OBJECTs maintained by DRV. ++ */ ++u32 DRV_GetFirstDevObject(void) ++{ ++ u32 dwDevObject = 0; ++ struct DRV_OBJECT *pDrvObject; ++ ++ if (DSP_SUCCEEDED ++ (CFG_GetObject((u32 *)&pDrvObject, REG_DRV_OBJECT))) { ++ if ((pDrvObject->devList != NULL) && ++ !LST_IsEmpty(pDrvObject->devList)) ++ dwDevObject = (u32) LST_First(pDrvObject->devList); ++ } ++ ++ return dwDevObject; ++} ++ ++/* ++ * ======== DRV_GetFirstDevNodeString ======== ++ * Purpose: ++ * Retrieve the first Device Extension from an internal linked list of ++ * of Pointer to DevNode Strings maintained by DRV. ++ */ ++u32 DRV_GetFirstDevExtension(void) ++{ ++ u32 dwDevExtension = 0; ++ struct DRV_OBJECT *pDrvObject; ++ ++ if (DSP_SUCCEEDED ++ (CFG_GetObject((u32 *)&pDrvObject, REG_DRV_OBJECT))) { ++ ++ if ((pDrvObject->devNodeString != NULL) && ++ !LST_IsEmpty(pDrvObject->devNodeString)) { ++ dwDevExtension = (u32)LST_First(pDrvObject-> ++ devNodeString); ++ } ++ } ++ ++ return dwDevExtension; ++} ++ ++/* ++ * ======== DRV_GetNextDevObject ======== ++ * Purpose: ++ * Retrieve the next Device Object handle from an internal linked list of ++ * of DEV_OBJECTs maintained by DRV, after having previously called ++ * DRV_GetFirstDevObject() and zero or more DRV_GetNext. ++ */ ++u32 DRV_GetNextDevObject(u32 hDevObject) ++{ ++ u32 dwNextDevObject = 0; ++ struct DRV_OBJECT *pDrvObject; ++ ++ DBC_Require(hDevObject != 0); ++ ++ if (DSP_SUCCEEDED ++ (CFG_GetObject((u32 *)&pDrvObject, REG_DRV_OBJECT))) { ++ ++ if ((pDrvObject->devList != NULL) && ++ !LST_IsEmpty(pDrvObject->devList)) { ++ dwNextDevObject = (u32)LST_Next(pDrvObject->devList, ++ (struct LST_ELEM *)hDevObject); ++ } ++ } ++ return dwNextDevObject; ++} ++ ++/* ++ * ======== DRV_GetNextDevExtension ======== ++ * Purpose: ++ * Retrieve the next Device Extension from an internal linked list of ++ * of pointer to DevNodeString maintained by DRV, after having previously ++ * called DRV_GetFirstDevExtension() and zero or more ++ * DRV_GetNextDevExtension(). ++ */ ++u32 DRV_GetNextDevExtension(u32 hDevExtension) ++{ ++ u32 dwDevExtension = 0; ++ struct DRV_OBJECT *pDrvObject; ++ ++ DBC_Require(hDevExtension != 0); ++ ++ if (DSP_SUCCEEDED(CFG_GetObject((u32 *)&pDrvObject, ++ REG_DRV_OBJECT))) { ++ if ((pDrvObject->devNodeString != NULL) && ++ !LST_IsEmpty(pDrvObject->devNodeString)) { ++ dwDevExtension = (u32)LST_Next(pDrvObject-> ++ devNodeString, ++ (struct LST_ELEM *)hDevExtension); ++ } ++ } ++ ++ return dwDevExtension; ++} ++ ++/* ++ * ======== DRV_Init ======== ++ * Purpose: ++ * Initialize DRV module private state. ++ */ ++DSP_STATUS DRV_Init(void) ++{ ++ s32 fRetval = 1; /* function return value */ ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (fRetval) ++ cRefs++; ++ ++ GT_1trace(curTrace, GT_5CLASS, "Entering DRV_Entry crefs 0x%x \n", ++ cRefs); ++ ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ return fRetval; ++} ++ ++/* ++ * ======== DRV_InsertDevObject ======== ++ * Purpose: ++ * Insert a DevObject into the list of Manager object. ++ */ ++DSP_STATUS DRV_InsertDevObject(struct DRV_OBJECT *hDRVObject, ++ struct DEV_OBJECT *hDevObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDRVObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(hDevObject != NULL); ++ DBC_Require(MEM_IsValidHandle(pDRVObject, SIGNATURE)); ++ DBC_Assert(pDRVObject->devList); ++ ++ GT_2trace(curTrace, GT_ENTER, ++ "Entering DRV_InsertProcObject hDRVObject " ++ "0x%x\n, hDevObject 0x%x\n", hDRVObject, hDevObject); ++ ++ LST_PutTail(pDRVObject->devList, (struct LST_ELEM *)hDevObject); ++ ++ GT_1trace(curTrace, GT_ENTER, ++ "Exiting InsertDevObject status 0x%x\n", status); ++ ++ DBC_Ensure(DSP_SUCCEEDED(status) && !LST_IsEmpty(pDRVObject->devList)); ++ ++ return status; ++} ++ ++/* ++ * ======== DRV_RemoveDevObject ======== ++ * Purpose: ++ * Search for and remove a DeviceObject from the given list of DRV ++ * objects. ++ */ ++DSP_STATUS DRV_RemoveDevObject(struct DRV_OBJECT *hDRVObject, ++ struct DEV_OBJECT *hDevObject) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDRVObject; ++ struct LST_ELEM *pCurElem; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pDRVObject, SIGNATURE)); ++ DBC_Require(hDevObject != NULL); ++ ++ DBC_Require(pDRVObject->devList != NULL); ++ DBC_Require(!LST_IsEmpty(pDRVObject->devList)); ++ ++ GT_2trace(curTrace, GT_ENTER, ++ "Entering DRV_RemoveDevObject hDevObject " ++ "0x%x\n, hDRVObject 0x%x\n", hDevObject, hDRVObject); ++ /* Search list for pProcObject: */ ++ for (pCurElem = LST_First(pDRVObject->devList); pCurElem != NULL; ++ pCurElem = LST_Next(pDRVObject->devList, pCurElem)) { ++ /* If found, remove it. */ ++ if ((struct DEV_OBJECT *) pCurElem == hDevObject) { ++ LST_RemoveElem(pDRVObject->devList, pCurElem); ++ status = DSP_SOK; ++ break; ++ } ++ } ++ /* Remove list if empty. */ ++ if (LST_IsEmpty(pDRVObject->devList)) { ++ LST_Delete(pDRVObject->devList); ++ pDRVObject->devList = NULL; ++ } ++ DBC_Ensure((pDRVObject->devList == NULL) || ++ !LST_IsEmpty(pDRVObject->devList)); ++ GT_1trace(curTrace, GT_ENTER, ++ "DRV_RemoveDevObject returning 0x%x\n", status); ++ return status; ++} ++ ++/* ++ * ======== DRV_RequestResources ======== ++ * Purpose: ++ * Requests resources from the OS. ++ */ ++DSP_STATUS DRV_RequestResources(u32 dwContext, u32 *pDevNodeString) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DRV_OBJECT *pDRVObject; ++ struct DRV_EXT *pszdevNode; ++ ++ DBC_Require(dwContext != 0); ++ DBC_Require(pDevNodeString != NULL); ++ GT_0trace(curTrace, GT_ENTER, "Entering DRV_RequestResources\n"); ++ /* ++ * Allocate memory to hold the string. This will live untill ++ * it is freed in the Release resources. Update the driver object ++ * list. ++ */ ++ if (DSP_SUCCEEDED(CFG_GetObject((u32 *)&pDRVObject, ++ REG_DRV_OBJECT))) { ++ pszdevNode = MEM_Calloc(sizeof(struct DRV_EXT), MEM_NONPAGED); ++ if (pszdevNode) { ++ LST_InitElem(&pszdevNode->link); ++ strncpy(pszdevNode->szString, ++ (char *)dwContext, MAXREGPATHLENGTH - 1); ++ pszdevNode->szString[MAXREGPATHLENGTH - 1] = '\0'; ++ /* Update the Driver Object List */ ++ *pDevNodeString = (u32)pszdevNode->szString; ++ LST_PutTail(pDRVObject->devNodeString, ++ (struct LST_ELEM *)pszdevNode); ++ } else { ++ GT_0trace(curTrace, GT_7CLASS, ++ "Failed to Allocate Memory devNodeString "); ++ status = DSP_EFAIL; ++ *pDevNodeString = 0; ++ } ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_7CLASS, ++ "Failed to get Driver Object from Registry"); ++ *pDevNodeString = 0; ++ } ++ ++ if (!(strcmp((char *) dwContext, "TIOMAP1510"))) { ++ GT_0trace(curTrace, GT_1CLASS, ++ " Allocating resources for UMA \n"); ++ status = RequestBridgeResourcesDSP(dwContext, DRV_ASSIGN); ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(curTrace, GT_7CLASS, "Unknown Device "); ++ } ++ ++ if (DSP_FAILED(status)) { ++ GT_0trace(curTrace, GT_7CLASS, ++ "Failed to reserve bridge resources "); ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && pDevNodeString != NULL && ++ !LST_IsEmpty(pDRVObject->devNodeString)) || ++ (DSP_FAILED(status) && *pDevNodeString == 0)); ++ ++ return status; ++} ++ ++/* ++ * ======== DRV_ReleaseResources ======== ++ * Purpose: ++ * Releases resources from the OS. ++ */ ++DSP_STATUS DRV_ReleaseResources(u32 dwContext, struct DRV_OBJECT *hDrvObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDrvObject; ++ struct DRV_EXT *pszdevNode; ++ ++ GT_0trace(curTrace, GT_ENTER, "Entering DRV_Release Resources\n"); ++ ++ if (!(strcmp((char *)((struct DRV_EXT *)dwContext)->szString, ++ "TIOMAP1510"))) { ++ GT_0trace(curTrace, GT_1CLASS, ++ " Releasing DSP-Bridge resources \n"); ++ status = RequestBridgeResources(dwContext, DRV_RELEASE); ++ } else { ++ GT_0trace(curTrace, GT_1CLASS, " Unknown device\n"); ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(curTrace, GT_1CLASS, ++ "Failed to relese bridge resources\n"); ++ } ++ ++ /* ++ * Irrespective of the status go ahead and clean it ++ * The following will over write the status. ++ */ ++ for (pszdevNode = (struct DRV_EXT *)DRV_GetFirstDevExtension(); ++ pszdevNode != NULL; pszdevNode = (struct DRV_EXT *) ++ DRV_GetNextDevExtension((u32)pszdevNode)) { ++ if ((u32)pszdevNode == dwContext) { ++ /* Found it */ ++ /* Delete from the Driver object list */ ++ LST_RemoveElem(pDRVObject->devNodeString, ++ (struct LST_ELEM *)pszdevNode); ++ MEM_Free((void *) pszdevNode); ++ break; ++ } ++ /* Delete the List if it is empty */ ++ if (LST_IsEmpty(pDRVObject->devNodeString)) { ++ LST_Delete(pDRVObject->devNodeString); ++ pDRVObject->devNodeString = NULL; ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== RequestBridgeResources ======== ++ * Purpose: ++ * Reserves shared memory for bridge. ++ */ ++static DSP_STATUS RequestBridgeResources(u32 dwContext, s32 bRequest) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CFG_HOSTRES *pResources; ++ u32 dwBuffSize; ++ ++ struct DRV_EXT *driverExt; ++ u32 shm_size; ++ ++ DBC_Require(dwContext != 0); ++ ++ GT_0trace(curTrace, GT_ENTER, "->RequestBridgeResources \n"); ++ ++ if (!bRequest) { ++ driverExt = (struct DRV_EXT *)dwContext; ++ /* Releasing resources by deleting the registry key */ ++ dwBuffSize = sizeof(struct CFG_HOSTRES); ++ pResources = MEM_Calloc(dwBuffSize, MEM_NONPAGED); ++ if (DSP_FAILED(REG_GetValue(NULL, (char *)driverExt->szString, ++ CURRENTCONFIG, (u8 *)pResources, &dwBuffSize))) { ++ status = CFG_E_RESOURCENOTAVAIL; ++ GT_0trace(curTrace, GT_1CLASS, ++ "REG_GetValue Failed \n"); ++ } else { ++ GT_0trace(curTrace, GT_1CLASS, ++ "REG_GetValue Succeeded \n"); ++ } ++ ++ if (pResources != NULL) { ++ dwBuffSize = sizeof(shm_size); ++ status = REG_GetValue(NULL, CURRENTCONFIG, SHMSIZE, ++ (u8 *)&shm_size, &dwBuffSize); ++ if (DSP_SUCCEEDED(status)) { ++ if ((pResources->dwMemBase[1]) && ++ (pResources->dwMemPhys[1])) { ++ MEM_FreePhysMem((void *)pResources-> ++ dwMemBase[1], pResources->dwMemPhys[1], ++ shm_size); ++ } ++ } else { ++ GT_1trace(curTrace, GT_7CLASS, ++ "Error getting SHM size from registry: " ++ "%x. Not calling MEM_FreePhysMem\n", ++ status); ++ } ++ pResources->dwMemBase[1] = 0; ++ pResources->dwMemPhys[1] = 0; ++ ++ if (pResources->dwPrmBase) ++ iounmap(pResources->dwPrmBase); ++ if (pResources->dwCmBase) ++ iounmap(pResources->dwCmBase); ++ if (pResources->dwMboxBase) ++ iounmap(pResources->dwMboxBase); ++ if (pResources->dwMemBase[0]) ++ iounmap((void *)pResources->dwMemBase[0]); ++ if (pResources->dwMemBase[2]) ++ iounmap((void *)pResources->dwMemBase[2]); ++ if (pResources->dwMemBase[3]) ++ iounmap((void *)pResources->dwMemBase[3]); ++ if (pResources->dwMemBase[4]) ++ iounmap((void *)pResources->dwMemBase[4]); ++ if (pResources->dwWdTimerDspBase) ++ iounmap(pResources->dwWdTimerDspBase); ++ if (pResources->dwDmmuBase) ++ iounmap(pResources->dwDmmuBase); ++ if (pResources->dwPerBase) ++ iounmap(pResources->dwPerBase); ++ if (pResources->dwPerPmBase) ++ iounmap((void *)pResources->dwPerPmBase); ++ if (pResources->dwCorePmBase) ++ iounmap((void *)pResources->dwCorePmBase); ++ if (pResources->dwSysCtrlBase) { ++ iounmap(pResources->dwSysCtrlBase); ++ /* don't set pResources->dwSysCtrlBase to null ++ * as it is used in BOARD_Stop */ ++ } ++ pResources->dwPrmBase = NULL; ++ pResources->dwCmBase = NULL; ++ pResources->dwMboxBase = NULL; ++ pResources->dwMemBase[0] = (u32) NULL; ++ pResources->dwMemBase[2] = (u32) NULL; ++ pResources->dwMemBase[3] = (u32) NULL; ++ pResources->dwMemBase[4] = (u32) NULL; ++ pResources->dwWdTimerDspBase = NULL; ++ pResources->dwDmmuBase = NULL; ++ ++ dwBuffSize = sizeof(struct CFG_HOSTRES); ++ status = REG_SetValue(NULL, (char *)driverExt->szString, ++ CURRENTCONFIG, REG_BINARY, (u8 *)pResources, ++ (u32)dwBuffSize); ++ /* Set all the other entries to NULL */ ++ MEM_Free(pResources); ++ } ++ GT_0trace(curTrace, GT_ENTER, " <- RequestBridgeResources \n"); ++ return status; ++ } ++ dwBuffSize = sizeof(struct CFG_HOSTRES); ++ pResources = MEM_Calloc(dwBuffSize, MEM_NONPAGED); ++ if (pResources != NULL) { ++ /* wNumMemWindows must not be more than CFG_MAXMEMREGISTERS */ ++ pResources->wNumMemWindows = 2; ++ /* First window is for DSP internal memory */ ++ ++ pResources->dwPrmBase = ioremap(OMAP_IVA2_PRM_BASE, ++ OMAP_IVA2_PRM_SIZE); ++ pResources->dwCmBase = ioremap(OMAP_IVA2_CM_BASE, ++ OMAP_IVA2_CM_SIZE); ++ pResources->dwMboxBase = ioremap(OMAP_MBOX_BASE, ++ OMAP_MBOX_SIZE); ++ pResources->dwSysCtrlBase = ioremap(OMAP_SYSC_BASE, ++ OMAP_SYSC_SIZE); ++ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[0] 0x%x\n", ++ pResources->dwMemBase[0]); ++ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[3] 0x%x\n", ++ pResources->dwMemBase[3]); ++ GT_1trace(curTrace, GT_2CLASS, "dwPrmBase 0x%x\n", ++ pResources->dwPrmBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwCmBase 0x%x\n", ++ pResources->dwCmBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwWdTimerDspBase 0x%x\n", ++ pResources->dwWdTimerDspBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwMboxBase 0x%x\n", ++ pResources->dwMboxBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwDmmuBase 0x%x\n", ++ pResources->dwDmmuBase); ++ ++ /* for 24xx base port is not mapping the mamory for DSP ++ * internal memory TODO Do a ioremap here */ ++ /* Second window is for DSP external memory shared with MPU */ ++ if (DSP_SUCCEEDED(status)) { ++ /* for Linux, these are hard-coded values */ ++ pResources->bIRQRegisters = 0; ++ pResources->bIRQAttrib = 0; ++ pResources->dwOffsetForMonitor = 0; ++ pResources->dwChnlOffset = 0; ++ /* CHNL_MAXCHANNELS */ ++ pResources->dwNumChnls = CHNL_MAXCHANNELS; ++ pResources->dwChnlBufSize = 0x400; ++ dwBuffSize = sizeof(struct CFG_HOSTRES); ++ status = REG_SetValue(NULL, (char *) dwContext, ++ CURRENTCONFIG, REG_BINARY, ++ (u8 *)pResources, ++ sizeof(struct CFG_HOSTRES)); ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(curTrace, GT_1CLASS, ++ " Successfully set the registry " ++ "value for CURRENTCONFIG\n"); ++ } else { ++ GT_0trace(curTrace, GT_7CLASS, ++ " Failed to set the registry " ++ "value for CURRENTCONFIG\n"); ++ } ++ } ++ MEM_Free(pResources); ++ } ++ /* End Mem alloc */ ++ return status; ++} ++ ++/* ++ * ======== RequestBridgeResourcesDSP ======== ++ * Purpose: ++ * Reserves shared memory for bridge. ++ */ ++static DSP_STATUS RequestBridgeResourcesDSP(u32 dwContext, s32 bRequest) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CFG_HOSTRES *pResources; ++ u32 dwBuffSize; ++ u32 dmaAddr; ++ u32 shm_size; ++ ++ DBC_Require(dwContext != 0); ++ ++ GT_0trace(curTrace, GT_ENTER, "->RequestBridgeResourcesDSP \n"); ++ ++ dwBuffSize = sizeof(struct CFG_HOSTRES); ++ ++ pResources = MEM_Calloc(dwBuffSize, MEM_NONPAGED); ++ ++ if (pResources != NULL) { ++ if (DSP_FAILED(CFG_GetHostResources((struct CFG_DEVNODE *) ++ dwContext, pResources))) { ++ /* Call CFG_GetHostResources to get reserve resouces */ ++ status = RequestBridgeResources(dwContext, bRequest); ++ if (DSP_SUCCEEDED(status)) { ++ status = CFG_GetHostResources ++ ((struct CFG_DEVNODE *) dwContext, ++ pResources); ++ } ++ } ++ /* wNumMemWindows must not be more than CFG_MAXMEMREGISTERS */ ++ pResources->wNumMemWindows = 4; ++ ++ pResources->dwMemBase[0] = 0; ++ pResources->dwMemBase[2] = (u32)ioremap(OMAP_DSP_MEM1_BASE, ++ OMAP_DSP_MEM1_SIZE); ++ pResources->dwMemBase[3] = (u32)ioremap(OMAP_DSP_MEM2_BASE, ++ OMAP_DSP_MEM2_SIZE); ++ pResources->dwMemBase[4] = (u32)ioremap(OMAP_DSP_MEM3_BASE, ++ OMAP_DSP_MEM3_SIZE); ++ pResources->dwPerBase = ioremap(OMAP_PER_CM_BASE, ++ OMAP_PER_CM_SIZE); ++ pResources->dwPerPmBase = (u32)ioremap(OMAP_PER_PRM_BASE, ++ OMAP_PER_PRM_SIZE); ++ pResources->dwCorePmBase = (u32)ioremap(OMAP_CORE_PRM_BASE, ++ OMAP_CORE_PRM_SIZE); ++ pResources->dwDmmuBase = ioremap(OMAP_DMMU_BASE, ++ OMAP_DMMU_SIZE); ++ pResources->dwWdTimerDspBase = NULL; ++ ++ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[0] 0x%x\n", ++ pResources->dwMemBase[0]); ++ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[1] 0x%x\n", ++ pResources->dwMemBase[1]); ++ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[2] 0x%x\n", ++ pResources->dwMemBase[2]); ++ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[3] 0x%x\n", ++ pResources->dwMemBase[3]); ++ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[4] 0x%x\n", ++ pResources->dwMemBase[4]); ++ GT_1trace(curTrace, GT_2CLASS, "dwPrmBase 0x%x\n", ++ pResources->dwPrmBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwCmBase 0x%x\n", ++ pResources->dwCmBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwWdTimerDspBase 0x%x\n", ++ pResources->dwWdTimerDspBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwMboxBase 0x%x\n", ++ pResources->dwMboxBase); ++ GT_1trace(curTrace, GT_2CLASS, "dwDmmuBase 0x%x\n", ++ pResources->dwDmmuBase); ++ dwBuffSize = sizeof(shm_size); ++ status = REG_GetValue(NULL, CURRENTCONFIG, SHMSIZE, ++ (u8 *)&shm_size, &dwBuffSize); ++ if (DSP_SUCCEEDED(status)) { ++ /* Allocate Physically contiguous, ++ * non-cacheable memory */ ++ pResources->dwMemBase[1] = ++ (u32)MEM_AllocPhysMem(shm_size, 0x100000, ++ &dmaAddr); ++ if (pResources->dwMemBase[1] == 0) { ++ status = DSP_EMEMORY; ++ GT_0trace(curTrace, GT_7CLASS, ++ "SHM reservation Failed\n"); ++ } else { ++ pResources->dwMemLength[1] = shm_size; ++ pResources->dwMemPhys[1] = dmaAddr; ++ ++ GT_3trace(curTrace, GT_1CLASS, ++ "Bridge SHM address 0x%x dmaAddr" ++ " %x size %x\n", ++ pResources->dwMemBase[1], ++ dmaAddr, shm_size); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* for Linux, these are hard-coded values */ ++ pResources->bIRQRegisters = 0; ++ pResources->bIRQAttrib = 0; ++ pResources->dwOffsetForMonitor = 0; ++ pResources->dwChnlOffset = 0; ++ /* CHNL_MAXCHANNELS */ ++ pResources->dwNumChnls = CHNL_MAXCHANNELS; ++ pResources->dwChnlBufSize = 0x400; ++ dwBuffSize = sizeof(struct CFG_HOSTRES); ++ status = REG_SetValue(NULL, (char *)dwContext, ++ CURRENTCONFIG, REG_BINARY, ++ (u8 *)pResources, ++ sizeof(struct CFG_HOSTRES)); ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(curTrace, GT_1CLASS, ++ " Successfully set the registry" ++ " value for CURRENTCONFIG\n"); ++ } else { ++ GT_0trace(curTrace, GT_7CLASS, ++ " Failed to set the registry value" ++ " for CURRENTCONFIG\n"); ++ } ++ } ++ MEM_Free(pResources); ++ } ++ /* End Mem alloc */ ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/drv_interface.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/drv_interface.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,739 @@ ++/* ++ * drv_interface.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== linux_driver.c ======== ++ * Description: ++ * DSP/BIOS Bridge driver interface. ++ * ++ * Public Functions: ++ * driver_init ++ * driver_exit ++ * driver_open ++ * driver_release ++ * driver_ioctl ++ * driver_mmap ++ * ++ *! Revision History ++ *! ================ ++ *! 21-Apr-2004 map Deprecated use of MODULE_PARM for kernel versions ++ *! greater than 2.5, use module_param. ++ *! 08-Mar-2004 sb Added the dsp_debug argument, which keeps the DSP in self ++ *! loop after image load and waits in a loop for DSP to start ++ *! 16-Feb-2004 vp Deprecated the usage of MOD_INC_USE_COUNT and ++ *! MOD_DEC_USE_COUNT ++ *! for kernel versions greater than 2.5 ++ *! 20-May-2003 vp Added unregister functions for the DPM. ++ *! 24-Mar-2003 sb Pass pid instead of driverContext to DSP_Close ++ *! 24-Mar-2003 vp Added Power Management support. ++ *! 21-Mar-2003 sb Configure SHM size using insmod argument shm_size ++ *! 10-Feb-2003 vp Updated based on code review comments ++ *! 18-Oct-2002 sb Created initial version ++ */ ++ ++/* ----------------------------------- Host OS */ ++ ++#include ++#include ++#include ++ ++#ifdef MODULE ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++#ifndef RES_CLEANUP_DISABLE ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#endif ++ ++#include ++#include ++ ++#define BRIDGE_NAME "C6410" ++/* ----------------------------------- Globals */ ++#define DRIVER_NAME "DspBridge" ++#define DRIVER_MAJOR 0 /* Linux assigns our Major device number */ ++#define DRIVER_MINOR 0 /* Linux assigns our Major device number */ ++s32 dsp_debug; ++ ++struct platform_device *omap_dspbridge_dev; ++ ++struct bridge_dev { ++ struct cdev cdev; ++}; ++ ++static struct bridge_dev *bridge_device; ++ ++static struct class *bridge_class; ++ ++static u32 driverContext; ++#ifdef CONFIG_BRIDGE_DEBUG ++static char *GT_str; ++#endif /* CONFIG_BRIDGE_DEBUG */ ++static s32 driver_major = DRIVER_MAJOR; ++static s32 driver_minor = DRIVER_MINOR; ++static char *base_img; ++char *iva_img; ++static char *num_procs = "C55=1"; ++static s32 shm_size = 0x400000; /* 4 MB */ ++static u32 phys_mempool_base; ++static u32 phys_mempool_size; ++static int tc_wordswapon; /* Default value is always false */ ++ ++/* Minimum ACTIVE VDD1 OPP level for reliable DSP operation */ ++unsigned short min_active_opp = 3; ++ ++#ifdef CONFIG_PM ++struct omap34xx_bridge_suspend_data { ++ int suspended; ++ wait_queue_head_t suspend_wq; ++}; ++ ++static struct omap34xx_bridge_suspend_data bridge_suspend_data; ++ ++static int omap34xxbridge_suspend_lockout( ++ struct omap34xx_bridge_suspend_data *s, struct file *f) ++{ ++ if ((s)->suspended) { ++ if ((f)->f_flags & O_NONBLOCK) ++ return DSP_EDPMSUSPEND; ++ wait_event_interruptible((s)->suspend_wq, (s)->suspended == 0); ++ } ++ return 0; ++} ++ ++#endif ++ ++#ifdef DEBUG ++module_param(GT_str, charp, 0); ++MODULE_PARM_DESC(GT_str, "GT string, default = NULL"); ++ ++module_param(dsp_debug, int, 0); ++MODULE_PARM_DESC(dsp_debug, "Wait after loading DSP image. default = false"); ++#endif ++ ++module_param(driver_major, int, 0); /* Driver's major number */ ++MODULE_PARM_DESC(driver_major, "Major device number, default = 0 (auto)"); ++ ++module_param(driver_minor, int, 0); /* Driver's major number */ ++MODULE_PARM_DESC(driver_minor, "Minor device number, default = 0 (auto)"); ++ ++module_param(base_img, charp, 0); ++MODULE_PARM_DESC(base_img, "DSP base image, default = NULL"); ++ ++module_param(shm_size, int, 0); ++MODULE_PARM_DESC(shm_size, "SHM size, default = 4 MB, minimum = 64 KB"); ++ ++module_param(phys_mempool_base, uint, 0); ++MODULE_PARM_DESC(phys_mempool_base, ++ "Physical memory pool base passed to driver"); ++ ++module_param(phys_mempool_size, uint, 0); ++MODULE_PARM_DESC(phys_mempool_size, ++ "Physical memory pool size passed to driver"); ++module_param(tc_wordswapon, int, 0); ++MODULE_PARM_DESC(tc_wordswapon, "TC Word Swap Option. default = 0"); ++ ++module_param(min_active_opp, ushort, S_IRUSR | S_IWUSR); ++MODULE_PARM_DESC(min_active_opp, "Minimum ACTIVE VDD1 OPP Level, default = 3"); ++ ++MODULE_AUTHOR("Texas Instruments"); ++MODULE_LICENSE("GPL"); ++ ++static char *driver_name = DRIVER_NAME; ++ ++#ifdef CONFIG_BRIDGE_DEBUG ++static struct GT_Mask driverTrace; ++#endif /* CONFIG_BRIDGE_DEBUG */ ++ ++static struct file_operations bridge_fops = { ++ .open = bridge_open, ++ .release = bridge_release, ++ .unlocked_ioctl = bridge_ioctl, ++ .mmap = bridge_mmap, ++}; ++ ++#ifdef CONFIG_PM ++static u32 timeOut = 1000; ++#ifdef CONFIG_BRIDGE_DVFS ++static struct clk *clk_handle; ++s32 dsp_max_opps = VDD1_OPP5; ++#endif ++ ++/* Maximum Opps that can be requested by IVA*/ ++/*vdd1 rate table*/ ++#ifdef CONFIG_BRIDGE_DVFS ++const struct omap_opp vdd1_rate_table_bridge[] = { ++ {0, 0, 0}, ++ /*OPP1*/ ++ {S125M, VDD1_OPP1, 0}, ++ /*OPP2*/ ++ {S250M, VDD1_OPP2, 0}, ++ /*OPP3*/ ++ {S500M, VDD1_OPP3, 0}, ++ /*OPP4*/ ++ {S550M, VDD1_OPP4, 0}, ++ /*OPP5*/ ++ {S600M, VDD1_OPP5, 0}, ++}; ++#endif ++#endif ++ ++struct dspbridge_platform_data *omap_dspbridge_pdata; ++ ++u32 vdd1_dsp_freq[6][4] = { ++ {0, 0, 0, 0}, ++ /*OPP1*/ ++ {0, 90000, 0, 86000}, ++ /*OPP2*/ ++ {0, 180000, 80000, 170000}, ++ /*OPP3*/ ++ {0, 360000, 160000, 340000}, ++ /*OPP4*/ ++ {0, 396000, 325000, 376000}, ++ /*OPP5*/ ++ {0, 430000, 355000, 430000}, ++}; ++ ++#ifdef CONFIG_BRIDGE_DVFS ++static int dspbridge_post_scale(struct notifier_block *op, unsigned long level, ++ void *ptr) ++{ ++ PWR_PM_PostScale(PRCM_VDD1, level); ++ return 0; ++} ++ ++static struct notifier_block iva_clk_notifier = { ++ .notifier_call = dspbridge_post_scale, ++ NULL, ++}; ++#endif ++ ++static int __devinit omap34xx_bridge_probe(struct platform_device *pdev) ++{ ++ int status; ++ u32 initStatus; ++ u32 temp; ++ dev_t dev = 0 ; ++ int result; ++#ifdef CONFIG_BRIDGE_DVFS ++ int i = 0; ++#endif ++ struct dspbridge_platform_data *pdata = pdev->dev.platform_data; ++ ++ omap_dspbridge_dev = pdev; ++ ++ /* use 2.6 device model */ ++ if (driver_major) { ++ dev = MKDEV(driver_major, driver_minor); ++ result = register_chrdev_region(dev, 1, driver_name); ++ } else { ++ result = alloc_chrdev_region(&dev, driver_minor, 1, ++ driver_name); ++ driver_major = MAJOR(dev); ++ } ++ ++ if (result < 0) { ++ GT_1trace(driverTrace, GT_7CLASS, "bridge_init: " ++ "Can't get Major %d \n", driver_major); ++ return result; ++ } ++ ++ bridge_device = kmalloc(sizeof(struct bridge_dev), GFP_KERNEL); ++ if (!bridge_device) { ++ result = -ENOMEM; ++ unregister_chrdev_region(dev, 1); ++ return result; ++ } ++ memset(bridge_device, 0, sizeof(struct bridge_dev)); ++ cdev_init(&bridge_device->cdev, &bridge_fops); ++ bridge_device->cdev.owner = THIS_MODULE; ++ bridge_device->cdev.ops = &bridge_fops; ++ ++ status = cdev_add(&bridge_device->cdev, dev, 1); ++ ++ if (status) { ++ GT_0trace(driverTrace, GT_7CLASS, ++ "Failed to add the bridge device \n"); ++ return status; ++ } ++ ++ /* udev support */ ++ bridge_class = class_create(THIS_MODULE, "ti_bridge"); ++ ++ if (IS_ERR(bridge_class)) ++ GT_0trace(driverTrace, GT_7CLASS, ++ "Error creating bridge class \n"); ++ ++ device_create(bridge_class, NULL, MKDEV(driver_major, driver_minor), ++ NULL, "DspBridge"); ++ ++ GT_init(); ++ GT_create(&driverTrace, "LD"); ++ ++#ifdef DEBUG ++ if (GT_str) ++ GT_set(GT_str); ++#elif defined(DDSP_DEBUG_PRODUCT) && GT_TRACE ++ GT_set("**=67"); ++#endif ++ ++ GT_0trace(driverTrace, GT_ENTER, "-> driver_init\n"); ++ ++#ifdef CONFIG_PM ++ /* Initialize the wait queue */ ++ if (!status) { ++ bridge_suspend_data.suspended = 0; ++ init_waitqueue_head(&bridge_suspend_data.suspend_wq); ++ } ++#endif ++ ++ SERVICES_Init(); ++ ++ /* Autostart flag. This should be set to true if the DSP image should ++ * be loaded and run during bridge module initialization */ ++ ++ if (base_img) { ++ temp = true; ++ REG_SetValue(NULL, NULL, AUTOSTART, REG_DWORD, (u8 *)&temp, ++ sizeof(temp)); ++ REG_SetValue(NULL, NULL, DEFEXEC, REG_SZ, (u8 *)base_img, ++ strlen(base_img) + 1); ++ } else { ++ temp = false; ++ REG_SetValue(NULL, NULL, AUTOSTART, REG_DWORD, (u8 *)&temp, ++ sizeof(temp)); ++ REG_SetValue(NULL, NULL, DEFEXEC, REG_SZ, (u8 *) "\0", (u32)2); ++ } ++ REG_SetValue(NULL, NULL, NUMPROCS, REG_SZ, (u8 *) num_procs, ++ strlen(num_procs) + 1); ++ ++ if (shm_size >= 0x10000) { /* 64 KB */ ++ initStatus = REG_SetValue(NULL, NULL, SHMSIZE, REG_DWORD, ++ (u8 *)&shm_size, sizeof(shm_size)); ++ } else { ++ initStatus = DSP_EINVALIDARG; ++ status = -1; ++ GT_0trace(driverTrace, GT_7CLASS, ++ "SHM size must be at least 64 KB\n"); ++ } ++ GT_1trace(driverTrace, GT_7CLASS, ++ "requested shm_size = 0x%x\n", shm_size); ++ ++ if (pdata->phys_mempool_base && pdata->phys_mempool_size) { ++ phys_mempool_base = pdata->phys_mempool_base; ++ phys_mempool_size = pdata->phys_mempool_size; ++ } ++ ++ if (phys_mempool_base > 0x0) { ++ initStatus = REG_SetValue(NULL, NULL, PHYSMEMPOOLBASE, ++ REG_DWORD, (u8 *)&phys_mempool_base, ++ sizeof(phys_mempool_base)); ++ } ++ GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_base = 0x%x \n", ++ phys_mempool_base); ++ ++ if (phys_mempool_size > 0x0) { ++ initStatus = REG_SetValue(NULL, NULL, PHYSMEMPOOLSIZE, ++ REG_DWORD, (u8 *)&phys_mempool_size, ++ sizeof(phys_mempool_size)); ++ } ++ GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_size = 0x%x\n", ++ phys_mempool_base); ++ if ((phys_mempool_base > 0x0) && (phys_mempool_size > 0x0)) ++ MEM_ExtPhysPoolInit(phys_mempool_base, phys_mempool_size); ++ if (tc_wordswapon) { ++ GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is enabled\n"); ++ REG_SetValue(NULL, NULL, TCWORDSWAP, REG_DWORD, ++ (u8 *)&tc_wordswapon, sizeof(tc_wordswapon)); ++ } else { ++ GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is disabled\n"); ++ REG_SetValue(NULL, NULL, TCWORDSWAP, ++ REG_DWORD, (u8 *)&tc_wordswapon, ++ sizeof(tc_wordswapon)); ++ } ++ if (DSP_SUCCEEDED(initStatus)) { ++#ifdef CONFIG_BRIDGE_DVFS ++ for (i = 0; i < 6; i++) ++ pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate; ++ ++ clk_handle = clk_get(NULL, "iva2_ck"); ++ if (!clk_handle) { ++ GT_0trace(driverTrace, GT_7CLASS, ++ "clk_get failed to get iva2_ck \n"); ++ } else { ++ GT_0trace(driverTrace, GT_7CLASS, ++ "clk_get PASS to get iva2_ck \n"); ++ } ++ if (!clk_notifier_register(clk_handle, &iva_clk_notifier)) { ++ GT_0trace(driverTrace, GT_7CLASS, ++ "clk_notifier_register PASS for iva2_ck \n"); ++ } else { ++ GT_0trace(driverTrace, GT_7CLASS, ++ "clk_notifier_register FAIL for iva2_ck \n"); ++ } ++ ++ /* ++ * When Smartreflex is ON, DSP requires at least OPP level 3 ++ * to operate reliably. So boost lower OPP levels to OPP3. ++ */ ++ if (pdata->dsp_set_min_opp) ++ (*pdata->dsp_set_min_opp)(min_active_opp); ++#endif ++ driverContext = DSP_Init(&initStatus); ++ if (DSP_FAILED(initStatus)) { ++ status = -1; ++ GT_0trace(driverTrace, GT_7CLASS, ++ "DSP/BIOS Bridge initialization Failed\n"); ++ } else { ++ GT_0trace(driverTrace, GT_5CLASS, ++ "DSP/BIOS Bridge driver loaded\n"); ++ } ++ } ++ ++ DBC_Assert(status == 0); ++ DBC_Assert(DSP_SUCCEEDED(initStatus)); ++ GT_0trace(driverTrace, GT_ENTER, " <- driver_init\n"); ++ return status; ++} ++ ++static int __devexit omap34xx_bridge_remove(struct platform_device *pdev) ++{ ++ dev_t devno; ++ bool ret; ++ DSP_STATUS dsp_status = DSP_SOK; ++ HANDLE hDrvObject = NULL; ++ struct PROCESS_CONTEXT *pTmp = NULL; ++ struct PROCESS_CONTEXT *pCtxtclosed = NULL; ++ struct PROC_OBJECT *proc_obj_ptr, *temp; ++ ++ GT_0trace(driverTrace, GT_ENTER, "-> driver_exit\n"); ++ ++ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ if (DSP_FAILED(dsp_status)) ++ goto func_cont; ++ ++#ifdef CONFIG_BRIDGE_DVFS ++ if (!clk_notifier_unregister(clk_handle, &iva_clk_notifier)) { ++ GT_0trace(driverTrace, GT_7CLASS, ++ "clk_notifier_unregister PASS for iva2_ck \n"); ++ } else { ++ GT_0trace(driverTrace, GT_7CLASS, ++ "clk_notifier_unregister FAILED for iva2_ck \n"); ++ } ++#endif /* #ifdef CONFIG_BRIDGE_DVFS */ ++ ++ DRV_GetProcCtxtList(&pCtxtclosed, (struct DRV_OBJECT *)hDrvObject); ++ while (pCtxtclosed != NULL) { ++ GT_1trace(driverTrace, GT_5CLASS, "***Cleanup of " ++ "process***%d\n", pCtxtclosed->pid); ++ DRV_RemoveAllResources(pCtxtclosed); ++ list_for_each_entry_safe(proc_obj_ptr, temp, ++ &pCtxtclosed->processor_list, proc_object) { ++ PROC_Detach(proc_obj_ptr, pCtxtclosed); ++ } ++ pTmp = pCtxtclosed->next; ++ DRV_RemoveProcContext((struct DRV_OBJECT *)hDrvObject, ++ pCtxtclosed); ++ pCtxtclosed = pTmp; ++ } ++ ++ if (driverContext) { ++ /* Put the DSP in reset state */ ++ ret = DSP_Deinit(driverContext); ++ driverContext = 0; ++ DBC_Assert(ret == true); ++ } ++ ++ clk_put(clk_handle); ++ clk_handle = NULL; ++ ++func_cont: ++ SERVICES_Exit(); ++ GT_exit(); ++ ++ devno = MKDEV(driver_major, driver_minor); ++ if (bridge_device) { ++ cdev_del(&bridge_device->cdev); ++ kfree(bridge_device); ++ } ++ unregister_chrdev_region(devno, 1); ++ if (bridge_class) { ++ /* remove the device from sysfs */ ++ device_destroy(bridge_class, MKDEV(driver_major, driver_minor)); ++ class_destroy(bridge_class); ++ ++ } ++ return 0; ++} ++ ++ ++#ifdef CONFIG_PM ++static int bridge_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ u32 status; ++ u32 command = PWR_EMERGENCYDEEPSLEEP; ++ ++ status = PWR_SleepDSP(command, timeOut); ++ if (DSP_FAILED(status)) ++ return -1; ++ ++ bridge_suspend_data.suspended = 1; ++ return 0; ++} ++ ++static int bridge_resume(struct platform_device *pdev) ++{ ++ u32 status; ++ ++ status = PWR_WakeDSP(timeOut); ++ if (DSP_FAILED(status)) ++ return -1; ++ ++ bridge_suspend_data.suspended = 0; ++ wake_up(&bridge_suspend_data.suspend_wq); ++ return 0; ++} ++#else ++#define bridge_suspend NULL ++#define bridge_resume NULL ++#endif ++ ++static struct platform_driver bridge_driver = { ++ .driver = { ++ .name = BRIDGE_NAME, ++ }, ++ .probe = omap34xx_bridge_probe, ++ .remove = __devexit_p(omap34xx_bridge_remove), ++ .suspend = bridge_suspend, ++ .resume = bridge_resume, ++}; ++ ++static int __init bridge_init(void) ++{ ++ return platform_driver_register(&bridge_driver); ++} ++ ++static void __exit bridge_exit(void) ++{ ++ platform_driver_unregister(&bridge_driver); ++} ++ ++/* ++ * This function is called when an application opens handle to the ++ * bridge driver. ++ */ ++static int bridge_open(struct inode *ip, struct file *filp) ++{ ++ int status = 0; ++ DSP_STATUS dsp_status; ++ HANDLE hDrvObject; ++ struct PROCESS_CONTEXT *pr_ctxt = NULL; ++ ++ GT_0trace(driverTrace, GT_ENTER, "-> bridge_open\n"); ++ ++ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ if (DSP_SUCCEEDED(dsp_status)) { ++ /* ++ * Allocate a new process context and insert it into global ++ * process context list. ++ */ ++ DRV_InsertProcContext(hDrvObject, &pr_ctxt); ++ if (pr_ctxt) { ++ DRV_ProcUpdatestate(pr_ctxt, PROC_RES_ALLOCATED); ++ DRV_ProcSetPID(pr_ctxt, current->tgid); ++ } else { ++ status = -ENOMEM; ++ } ++ } else { ++ status = -EIO; ++ } ++ ++ filp->private_data = pr_ctxt; ++ ++ GT_0trace(driverTrace, GT_ENTER, "<- bridge_open\n"); ++ return status; ++} ++ ++/* ++ * This function is called when an application closes handle to the bridge ++ * driver. ++ */ ++static int bridge_release(struct inode *ip, struct file *filp) ++{ ++ int status = 0; ++ DSP_STATUS dsp_status; ++ HANDLE hDrvObject; ++ struct PROCESS_CONTEXT *pr_ctxt; ++ struct PROC_OBJECT *proc_obj_ptr, *temp; ++ ++ GT_0trace(driverTrace, GT_ENTER, "-> bridge_release\n"); ++ ++ if (!filp->private_data) { ++ status = -EIO; ++ } else { ++ pr_ctxt = filp->private_data; ++ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ if (DSP_SUCCEEDED(dsp_status)) { ++ flush_signals(current); ++ DRV_RemoveAllResources(pr_ctxt); ++ list_for_each_entry_safe(proc_obj_ptr, temp, ++ &pr_ctxt->processor_list, ++ proc_object) { ++ PROC_Detach(proc_obj_ptr, pr_ctxt); ++ } ++ DRV_RemoveProcContext((struct DRV_OBJECT *)hDrvObject, ++ pr_ctxt); ++ } else { ++ status = -EIO; ++ } ++ filp->private_data = NULL; ++ } ++ ++ GT_0trace(driverTrace, GT_ENTER, "<- bridge_release\n"); ++ return status; ++} ++ ++/* This function provides IO interface to the bridge driver. */ ++static long bridge_ioctl(struct file *filp, unsigned int code, ++ unsigned long args) ++{ ++ int status; ++ u32 retval = DSP_SOK; ++ union Trapped_Args pBufIn; ++ ++ DBC_Require(filp != NULL); ++#ifdef CONFIG_PM ++ status = omap34xxbridge_suspend_lockout(&bridge_suspend_data, filp); ++ if (status != 0) ++ return status; ++#endif ++ ++ GT_0trace(driverTrace, GT_ENTER, " -> driver_ioctl\n"); ++ ++ /* Deduct one for the CMD_BASE. */ ++ code = (code - 1); ++ ++ status = copy_from_user(&pBufIn, (union Trapped_Args *)args, ++ sizeof(union Trapped_Args)); ++ ++ if (status >= 0) { ++ status = WCD_CallDevIOCtl(code, &pBufIn, &retval, ++ filp->private_data); ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = retval; ++ } else { ++ GT_1trace(driverTrace, GT_7CLASS, ++ "IOCTL Failed, code : 0x%x\n", code); ++ status = -1; ++ } ++ ++ } ++ ++ GT_0trace(driverTrace, GT_ENTER, " <- driver_ioctl\n"); ++ ++ return status; ++} ++ ++/* This function maps kernel space memory to user space memory. */ ++static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++#if GT_TRACE ++ u32 offset = vma->vm_pgoff << PAGE_SHIFT; ++#endif ++ u32 status; ++ ++ DBC_Assert(vma->vm_start < vma->vm_end); ++ ++ vma->vm_flags |= VM_RESERVED | VM_IO; ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ ++ GT_6trace(driverTrace, GT_3CLASS, ++ "vm filp %p offset %lx start %lx end %lx" ++ " page_prot %lx flags %lx\n", filp, offset, vma->vm_start, ++ vma->vm_end, vma->vm_page_prot, vma->vm_flags); ++ ++ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, vma->vm_page_prot); ++ if (status != 0) ++ status = -EAGAIN; ++ ++ return status; ++} ++ ++#ifndef RES_CLEANUP_DISABLE ++/* To remove all process resources before removing the process from the ++ * process context list*/ ++DSP_STATUS DRV_RemoveAllResources(HANDLE hPCtxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; ++ if (pCtxt != NULL) { ++ DRV_RemoveAllSTRMResElements(pCtxt); ++ DRV_RemoveAllNodeResElements(pCtxt); ++ DRV_RemoveAllDMMResElements(pCtxt); ++ DRV_ProcUpdatestate(pCtxt, PROC_RES_FREED); ++ } ++ return status; ++} ++#endif ++ ++/* Bridge driver initialization and de-initialization functions */ ++module_init(bridge_init); ++module_exit(bridge_exit); ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/drv_interface.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/drv_interface.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,40 @@ ++/* ++ * drv_interface.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== drv_interface.h ======== ++ * ++ *! Revision History ++ *! ================ ++ *! 24-Mar-2003 vp Added hooks for Power Management Test ++ *! 18-Feb-2003 vp Code review updates ++ *! 18-Oct-2002 sb Created initial version ++ ++ */ ++ ++#ifndef _DRV_INTERFACE_H_ ++#define _DRV_INTERFACE_H_ ++ ++/* Prototypes for all functions in this bridge */ ++static int __init bridge_init(void); /* Initialize bridge */ ++static void __exit bridge_exit(void); /* Opposite of initialize */ ++static int bridge_open(struct inode *, struct file *); /* Open */ ++static int bridge_release(struct inode *, struct file *); /* Release */ ++static long bridge_ioctl(struct file *, unsigned int, ++ unsigned long); ++static int bridge_mmap(struct file *filp, struct vm_area_struct *vma); ++#endif /* ifndef _DRV_INTERFACE_H_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dspdrv.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/dspdrv.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dspdrv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/dspdrv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,250 @@ ++/* ++ * dspdrv.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dspdrv.c ======== ++ * Description: ++ * Interface to allocate and free bridge resources. ++ * ++ *! Revision History ++ *! ================ ++ *! 12-Apr-2004 hp: Compile IVA only for 24xx. ++ *! 09-Feb-2004 vp: Updated to support IVA. ++ *! 10-Feb-2003 vp: Code review updates. ++ *! 18-oct-2002 vp: Ported to the Linux platform. ++ *! 03-Mar-2002 rr: DSP_Deinit bug fixed (gets the Mgrhandle from registry ++ *! before calling MGR_Destroy. ++ *! 11-Jul-2001 jeh Moved MGR_Create() from DSP_Init() to DEV_StartDevice(). ++ *! 02-Apr-2001 rr: WCD_InitComplete2 return value is not checked thus ++ *! sllowing the class driver to load irrespective of ++ *! the image load. ++ *! 30-Oct-2000 kc: Made changes w.r.t. usage of REG_SetValue. ++ *! 05-Oct-2000 rr: WCD_InitComplete2 return value checked for RM. ++ *! Failure in WCD_InitComplete2 will cause the ++ *! DSP_Init to fail. ++ *! 12-Aug-2000 kc: Changed REG_EnumValue to REG_EnumKey. ++ *! 07-Aug-2000 rr: MGR_Create does the job of loading the DCD Dll. ++ *! 26-Jul-2000 rr: Driver Object holds the DevNodeStrings for each ++ *! DevObjects. Static variables removed. Returns ++ *! the Driver Object in DSP_Init. ++ *! 17-Jul-2000 rr: Driver Object is created in DSP_Init and that holds ++ *! the list of Device objects. ++ *! 07-Jul-2000 rr: RM implementaion started. ++ *! 24-May-2000 ag: Cleaned up debug msgs. ++ *! 02-May-2000 rr: DSP_Open returns GetCallerProcess as dwOpenContext. ++ *! 03-Feb-2000 rr: GT Changes. ++ *! 28-Jan-2000 rr: Code Cleaned up.Type void changed to void. ++ *! DSP_Deinit checks return values.dwCode in ++ *! DSP_IO_CONTROL is decoded(not hard coded) ++ *! 27-Jan-2000 rr: REG_EnumValue Used .EnumerateKey fxn removed. ++ *! 13-Jan-2000 rr: CFG_GetPrivateDword renamed to CFG_GetDevObject. ++ *! 29-Dec-1999 rr: Code Cleaned up ++ *! 09-Dec-1999 rr: EnumerateKey changed for retail build. ++ *! 06-Dec-1999 rr: ArrayofInstalledNode, index and ArrayofInstalledDev ++ *! is Global.DevObject stores this pointer as hDevNode. ++ *! 02-Dec-1999 rr: DBG_SetGT and RetailMSG conditionally included. ++ *! Comments changed.Deinit handled.Code cleaned up. ++ *! DSP_IOControl, Close, Deinit returns bool values. ++ *! Calls WCD_InitComplete2 for Board AutoStart. ++ *! 29-Nov-1999 rr: DSP_IOControl returns the result through pBufOut. ++ *! Global Arrays keeps track of installed devices. ++ *! 19-Nov-1999 rr: DSP_Init handles multiple drivers. ++ *! 12-Nov-1999 rr: GetDriverKey and EnumerateKey functions added. ++ *! for multiple mini driver support.PCCARD flag ++ *! checking to include PCMCIA related stuff. ++ *! 25-Oct-1999 rr: GT_Init is called within the Process Attach. ++ *! return value initalized to S_OK upfront in the ++ *! Process Attach. ++ *! 15-Oct-1999 rr: DSP_DeInit handles the return values ++ *! 05-Oct-1999 rr: All the PCMCIA related functions are now in PCCARD.c ++ *! DRV_Request Resources is used instead of the ++ *! RegisterMiniDriver as it sounds close to what we are doing. ++ *! 24-Sep-1999 rr: DRV_RegisterMiniDriver is being called from here. Only ++ *! neccessaryPCMCIA fxns are here. Soon they will move out ++ *! either to a seperate file for bus specific inits. ++ *! 10-Sep-1999 rr: GT Enabled. Considerably changed the driver structure as ++ *! - This is the Class driver. After successfully initialized ++ *! the Class driver will attempt to load the Mini driver. ++ *! - Need to seperate the PCMCIA stuff based on bus type. ++ *! - Changed the name of the file to wcdce.c ++ *! - Made the Media Handle as Global again ++ *! ++ *! 19-Aug-1999 rr: Removed the Global hbhMediaHandle. Included the MemTest. ++ *! Modified the DSP_Init, now three windows are opened. ++ *! Split the driver into PDD so that hardware dependent ++ *! functions will reside in PDD. ++ *! 16-Jul-1999 ag Adapted from rkw's CAC Bullet card driver. ++ *! ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Globals */ ++struct GT_Mask curTrace; ++ ++/* ++ * ======== DSP_Init ======== ++ * Allocates bridge resources. Loads a base image onto DSP, if specified. ++ */ ++u32 DSP_Init(OUT u32 *initStatus) ++{ ++ char devNode[MAXREGPATHLENGTH] = "TIOMAP1510"; ++ DSP_STATUS status = DSP_EFAIL; ++ struct DRV_OBJECT *drvObject = NULL; ++ u32 index = 0; ++ u32 deviceNode; ++ u32 deviceNodeString; ++ ++ GT_create(&curTrace, "DD"); ++ ++ GT_0trace(curTrace, GT_ENTER, "Entering DSP_Init \r\n"); ++ ++ if (DSP_FAILED(WCD_Init())) { ++ GT_0trace(curTrace, GT_7CLASS, "DSP_Init Failed \n"); ++ goto func_cont; ++ } /* End WCD_Exit */ ++ if (DSP_FAILED(DRV_Create(&drvObject))) { ++ GT_0trace(curTrace, GT_7CLASS, "DSP_Init:DRV_Create Failed \n"); ++ WCD_Exit(); ++ goto func_cont; ++ } /* End DRV_Create */ ++ GT_0trace(curTrace, GT_5CLASS, "DSP_Init:DRV Created \r\n"); ++ ++ /* Request Resources */ ++ if (DSP_SUCCEEDED(DRV_RequestResources((u32)&devNode, ++ &deviceNodeString))) { ++ /* Attempt to Start the Device */ ++ if (DSP_SUCCEEDED(DEV_StartDevice( ++ (struct CFG_DEVNODE *)deviceNodeString))) { ++ /* Retreive the DevObject from the Registry */ ++ GT_2trace(curTrace, GT_1CLASS, ++ "DSP_Init Succeeded for Device1:" ++ "%d: value: %x\n", index, deviceNodeString); ++ status = DSP_SOK; ++ } else { ++ GT_0trace(curTrace, GT_7CLASS, ++ "DSP_Init:DEV_StartDevice Failed\n"); ++ (void)DRV_ReleaseResources ++ ((u32) deviceNodeString, drvObject); ++ status = DSP_EFAIL; ++ } ++ } else { ++ GT_0trace(curTrace, GT_7CLASS, ++ "DSP_Init:DRV_RequestResources Failed \r\n"); ++ status = DSP_EFAIL; ++ } /* DRV_RequestResources */ ++ index++; ++ ++ /* Unwind whatever was loaded */ ++ if (DSP_FAILED(status)) { ++ /* irrespective of the status of DEV_RemoveDevice we conitinue ++ * unloading. Get the Driver Object iterate through and remove. ++ * Reset the status to E_FAIL to avoid going through ++ * WCD_InitComplete2. */ ++ status = DSP_EFAIL; ++ for (deviceNode = DRV_GetFirstDevExtension(); deviceNode != 0; ++ deviceNode = DRV_GetNextDevExtension(deviceNode)) { ++ (void)DEV_RemoveDevice ++ ((struct CFG_DEVNODE *)deviceNode); ++ (void)DRV_ReleaseResources((u32)deviceNode, ++ drvObject); ++ } ++ /* Remove the Driver Object */ ++ (void)DRV_Destroy(drvObject); ++ drvObject = NULL; ++ WCD_Exit(); ++ GT_0trace(curTrace, GT_7CLASS, ++ "DSP_Init:Logical device Failed to Load\n"); ++ } /* Unwinding the loaded drivers */ ++func_cont: ++ /* Attempt to Start the Board */ ++ if (DSP_SUCCEEDED(status)) { ++ /* BRD_AutoStart could fail if the dsp execuetable is not the ++ * correct one. We should not propagate that error ++ * into the device loader. */ ++ (void)WCD_InitComplete2(); ++ GT_0trace(curTrace, GT_1CLASS, "DSP_Init Succeeded\n"); ++ } else { ++ GT_0trace(curTrace, GT_7CLASS, "DSP_Init Failed\n"); ++ } /* End WCD_InitComplete2 */ ++ DBC_Ensure((DSP_SUCCEEDED(status) && drvObject != NULL) || ++ (DSP_FAILED(status) && drvObject == NULL)); ++ *initStatus = status; ++ /* Return the Driver Object */ ++ return (u32)drvObject; ++} ++ ++/* ++ * ======== DSP_Deinit ======== ++ * Frees the resources allocated for bridge. ++ */ ++bool DSP_Deinit(u32 deviceContext) ++{ ++ bool retVal = true; ++ u32 deviceNode; ++ struct MGR_OBJECT *mgrObject = NULL; ++ ++ GT_0trace(curTrace, GT_ENTER, "Entering DSP_Deinit \r\n"); ++ ++ while ((deviceNode = DRV_GetFirstDevExtension()) != 0) { ++ (void)DEV_RemoveDevice((struct CFG_DEVNODE *)deviceNode); ++ ++ (void)DRV_ReleaseResources((u32)deviceNode, ++ (struct DRV_OBJECT *)deviceContext); ++ } ++ ++ (void) DRV_Destroy((struct DRV_OBJECT *) deviceContext); ++ ++ /* Get the Manager Object from Registry ++ * MGR Destroy will unload the DCD dll */ ++ if (DSP_SUCCEEDED(CFG_GetObject((u32 *)&mgrObject, REG_MGR_OBJECT))) ++ (void)MGR_Destroy(mgrObject); ++ ++ WCD_Exit(); ++ ++ return retVal; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/mgr.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/mgr.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/mgr.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/mgr.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,491 @@ ++/* ++ * mgr.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== mgr.c ======== ++ * Description: ++ * Implementation of Manager interface to the device object at the ++ * driver level. This queries the NDB data base and retrieves the ++ * data about Node and Processor. ++ * ++ * ++ *! Revision History: ++ *! ================ ++ *! 12-Feb-2003 vp: Code review updates. ++ *! 18-Oct-2002 vp: Ported to Linux platform ++ *! 01-Aug-2001 ag: Added extended info for DSP-MMU setup support. ++ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. ++ *! 22-Nov-2000 kc: Added MGR_GetPerfData. ++ *! 03-Nov-2000 rr: Updated after code review. ++ *! 25-Sep-2000 rr: Updated to Version 0.9 ++ *! 10-Aug-2000 rr: dwSignature is not specifically inserted in MGR Obj ++ *! as it is taken care by MEM_AllocObject. stdwin.h added ++ *! for retail build to succeed. ++ *! 07-Aug-2000 rr: MGR_Create does the job of Loading DCD Dll. ++ *! 26-Jul-2000 rr: MGR_Destroy releases the hNDBDll. ++ *! 20-Jun-2000 rr: Created. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define ZLDLLNAME "" ++#define SIGNATURE 0x5f52474d /* "MGR_" (in reverse) */ ++ ++struct MGR_OBJECT { ++ u32 dwSignature; ++ struct DCD_MANAGER *hDcdMgr; /* Proc/Node data manager */ ++}; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask MGR_DebugMask = { NULL, NULL }; ++#endif ++ ++static u32 cRefs; ++ ++/* ++ * ========= MGR_Create ========= ++ * Purpose: ++ * MGR Object gets created only once during driver Loading. ++ */ ++DSP_STATUS MGR_Create(OUT struct MGR_OBJECT **phMgrObject, ++ struct CFG_DEVNODE *hDevNode) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct MGR_OBJECT *pMgrObject = NULL; ++ ++ DBC_Require(phMgrObject != NULL); ++ DBC_Require(cRefs > 0); ++ GT_1trace(MGR_DebugMask, GT_ENTER, ++ "Entering MGR_Create phMgrObject 0x%x\n ", ++ phMgrObject); ++ MEM_AllocObject(pMgrObject, struct MGR_OBJECT, SIGNATURE); ++ if (pMgrObject) { ++ if (DSP_SUCCEEDED(DCD_CreateManager(ZLDLLNAME, ++ &pMgrObject->hDcdMgr))) { ++ /* If succeeded store the handle in the MGR Object */ ++ if (DSP_SUCCEEDED(CFG_SetObject((u32)pMgrObject, ++ REG_MGR_OBJECT))) { ++ *phMgrObject = pMgrObject; ++ GT_0trace(MGR_DebugMask, GT_1CLASS, ++ "MGR_Create:MGR Created\r\n"); ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "MGR_Create:CFG_SetObject " ++ "Failed\r\n"); ++ DCD_DestroyManager(pMgrObject->hDcdMgr); ++ MEM_FreeObject(pMgrObject); ++ } ++ } else { ++ /* failed to Create DCD Manager */ ++ status = DSP_EFAIL; ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "MGR_Create:DCD_ManagerCreate Failed\r\n"); ++ MEM_FreeObject(pMgrObject); ++ } ++ } else { ++ status = DSP_EMEMORY; ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "MGR_Create DSP_FAILED to allocate memory \n"); ++ } ++ GT_2trace(MGR_DebugMask, GT_ENTER, ++ "Exiting MGR_Create: phMgrObject: 0x%x\t" ++ "status: 0x%x\n", phMgrObject, status); ++ DBC_Ensure(DSP_FAILED(status) || ++ MEM_IsValidHandle(pMgrObject, SIGNATURE)); ++ return status; ++} ++ ++/* ++ * ========= MGR_Destroy ========= ++ * This function is invoked during bridge driver unloading.Frees MGR object. ++ */ ++DSP_STATUS MGR_Destroy(struct MGR_OBJECT *hMgrObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct MGR_OBJECT *pMgrObject = (struct MGR_OBJECT *)hMgrObject; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hMgrObject, SIGNATURE)); ++ ++ GT_1trace(MGR_DebugMask, GT_ENTER, ++ "Entering MGR_Destroy hMgrObject 0x%x\n", hMgrObject); ++ /* Free resources */ ++ if (hMgrObject->hDcdMgr) ++ DCD_DestroyManager(hMgrObject->hDcdMgr); ++ ++ MEM_FreeObject(pMgrObject); ++ /* Update the Registry with NULL for MGR Object */ ++ (void)CFG_SetObject(0, REG_MGR_OBJECT); ++ ++ GT_2trace(MGR_DebugMask, GT_ENTER, ++ "Exiting MGR_Destroy: hMgrObject: 0x%x\t" ++ "status: 0x%x\n", hMgrObject, status); ++ ++ DBC_Ensure(DSP_FAILED(status) || ++ !MEM_IsValidHandle(hMgrObject, SIGNATURE)); ++ ++ return status; ++} ++ ++/* ++ * ======== MGR_EnumNodeInfo ======== ++ * Enumerate and get configuration information about nodes configured ++ * in the node database. ++ */ ++DSP_STATUS MGR_EnumNodeInfo(u32 uNode, OUT struct DSP_NDBPROPS *pNDBProps, ++ u32 uNDBPropsSize, OUT u32 *puNumNodes) ++{ ++ DSP_STATUS status = DSP_SOK; ++ DSP_STATUS status1 = DSP_SOK; ++ struct DSP_UUID Uuid, uTempUuid; ++ u32 uTempIndex = 0; ++ u32 uNodeIndex = 0; ++ struct DCD_GENERICOBJ GenObj; ++ struct MGR_OBJECT *pMgrObject = NULL; ++ ++ DBC_Require(pNDBProps != NULL); ++ DBC_Require(puNumNodes != NULL); ++ DBC_Require(uNDBPropsSize >= sizeof(struct DSP_NDBPROPS)); ++ DBC_Require(cRefs > 0); ++ ++ GT_4trace(MGR_DebugMask, GT_ENTER, "Entered Manager_EnumNodeInfo, " ++ "args:\n\t uNode: 0x%x\n\tpNDBProps: 0x%x\n\tuNDBPropsSize:" ++ "0x%x\tpuNumNodes: 0x%x\n", uNode, pNDBProps, ++ uNDBPropsSize, puNumNodes); ++ *puNumNodes = 0; ++ /* Get The Manager Object from the Registry */ ++ if (DSP_FAILED(CFG_GetObject((u32 *)&pMgrObject, ++ REG_MGR_OBJECT))) { ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "Manager_EnumNodeInfo:Failed To Get" ++ " MGR Object from Registry\r\n"); ++ goto func_cont; ++ } ++ DBC_Assert(MEM_IsValidHandle(pMgrObject, SIGNATURE)); ++ /* Forever loop till we hit failed or no more items in the ++ * Enumeration. We will exit the loop other than DSP_SOK; */ ++ while (status == DSP_SOK) { ++ status = DCD_EnumerateObject(uTempIndex++, DSP_DCDNODETYPE, ++ &uTempUuid); ++ if (status == DSP_SOK) { ++ uNodeIndex++; ++ if (uNode == (uNodeIndex - 1)) ++ Uuid = uTempUuid; ++ ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ if (uNode > (uNodeIndex - 1)) { ++ status = DSP_EINVALIDARG; ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "Manager_EnumNodeInfo: uNode" ++ " is Invalid \r\n"); ++ } else { ++ status1 = DCD_GetObjectDef(pMgrObject->hDcdMgr, ++ (struct DSP_UUID *)&Uuid, ++ DSP_DCDNODETYPE, &GenObj); ++ if (DSP_SUCCEEDED(status1)) { ++ /* Get the Obj def */ ++ *pNDBProps = GenObj.objData.nodeObj.ndbProps; ++ *puNumNodes = uNodeIndex; ++ status = DSP_SOK; ++ } else { ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "Manager_EnumNodeInfo: " ++ "Failed to Get Node Info \r\n"); ++ status = DSP_EFAIL; ++ } ++ } ++ } else { ++ /* This could be changed during enum, EFAIL ... */ ++ GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumNodeInfo: " ++ "Enumeration failure\r\n"); ++ status = DSP_EFAIL; ++ } ++func_cont: ++ GT_4trace(MGR_DebugMask, GT_ENTER, ++ "Exiting Manager_EnumNodeInfo, args:\n\t" ++ "uNode: 0x%x\n\tpNDBProps: 0x%x\n\tuNDBPropsSize:" ++ " 0x%x\tuNumNodes: 0x%x\n", uNode, pNDBProps, ++ uNDBPropsSize, *puNumNodes); ++ DBC_Ensure((DSP_SUCCEEDED(status) && *puNumNodes > 0) || ++ (DSP_FAILED(status) && *puNumNodes == 0)); ++ ++ return status; ++} ++ ++/* ++ * ======== MGR_EnumProcessorInfo ======== ++ * Enumerate and get configuration information about available ++ * DSP processors. ++ */ ++DSP_STATUS MGR_EnumProcessorInfo(u32 uProcessor, ++ OUT struct DSP_PROCESSORINFO *pProcessorInfo, ++ u32 uProcessorInfoSize, OUT u32 *puNumProcs) ++{ ++ DSP_STATUS status = DSP_SOK; ++ DSP_STATUS status1 = DSP_SOK; ++ DSP_STATUS status2 = DSP_SOK; ++ struct DSP_UUID uTempUuid; ++ u32 uTempIndex = 0; ++ u32 uProcIndex = 0; ++ struct DCD_GENERICOBJ GenObj; ++ struct MGR_OBJECT *pMgrObject = NULL; ++ struct MGR_PROCESSOREXTINFO *pExtInfo; ++ struct DEV_OBJECT *hDevObject; ++ struct DRV_OBJECT *hDrvObject; ++ s32 devType; ++ struct CFG_DEVNODE *devNode; ++ struct CFG_DSPRES chipResources; ++ bool procDetect = false; ++ ++ DBC_Require(pProcessorInfo != NULL); ++ DBC_Require(puNumProcs != NULL); ++ DBC_Require(uProcessorInfoSize >= sizeof(struct DSP_PROCESSORINFO)); ++ DBC_Require(cRefs > 0); ++ ++ GT_4trace(MGR_DebugMask, GT_ENTER, ++ "Entered Manager_EnumProcessorInfo, " ++ "args:\n\tuProcessor: 0x%x\n\tpProcessorInfo: 0x%x\n\t" ++ "uProcessorInfoSize: 0x%x\tpuNumProcs: 0x%x\n", uProcessor, ++ pProcessorInfo, uProcessorInfoSize, puNumProcs); ++ *puNumProcs = 0; ++ status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ if (DSP_SUCCEEDED(status)) { ++ status = DRV_GetDevObject(uProcessor, hDrvObject, &hDevObject); ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetDevType(hDevObject, (u32 *) &devType); ++ status = DEV_GetDevNode(hDevObject, &devNode); ++ if (devType == DSP_UNIT) { ++ status = CFG_GetDSPResources(devNode, ++ &chipResources); ++ } else { ++ status = DSP_EFAIL; ++ GT_1trace(MGR_DebugMask, GT_7CLASS, ++ "Unsupported dev type gotten" ++ "from device object %d\n", devType); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pProcessorInfo->uProcessorType = ++ chipResources.uChipType; ++ } ++ } ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* Get The Manager Object from the Registry */ ++ if (DSP_FAILED(CFG_GetObject((u32 *)&pMgrObject, ++ REG_MGR_OBJECT))) { ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "Manager_EnumProcessorInfo: " ++ "Failed To Get MGR Object from Registry\r\n"); ++ goto func_end; ++ } ++ DBC_Assert(MEM_IsValidHandle(pMgrObject, SIGNATURE)); ++ /* Forever loop till we hit no more items in the ++ * Enumeration. We will exit the loop other than DSP_SOK; */ ++ while (status1 == DSP_SOK) { ++ status1 = DCD_EnumerateObject(uTempIndex++, ++ DSP_DCDPROCESSORTYPE, ++ &uTempUuid); ++ if (status1 != DSP_SOK) ++ break; ++ ++ uProcIndex++; ++ /* Get the Object properties to find the Device/Processor ++ * Type */ ++ if (procDetect != false) ++ continue; ++ ++ status2 = DCD_GetObjectDef(pMgrObject->hDcdMgr, ++ (struct DSP_UUID *)&uTempUuid, ++ DSP_DCDPROCESSORTYPE, ++ &GenObj); ++ if (DSP_SUCCEEDED(status2)) { ++ /* Get the Obj def */ ++ if (uProcessorInfoSize < ++ sizeof(struct MGR_PROCESSOREXTINFO)) { ++ *pProcessorInfo = GenObj.objData.procObj; ++ } else { ++ /* extended info */ ++ pExtInfo = (struct MGR_PROCESSOREXTINFO *) ++ pProcessorInfo; ++ *pExtInfo = GenObj.objData.extProcObj; ++ } ++ GT_1trace(MGR_DebugMask, GT_7CLASS, ++ "Manager_EnumProcessorInfo: Got" ++ " Proctype from DCD %x \r\n", ++ pProcessorInfo->uProcessorType); ++ /* See if we got the needed processor */ ++ if (devType == DSP_UNIT) { ++ if (pProcessorInfo->uProcessorType == ++ DSPPROCTYPE_C64) ++ procDetect = true; ++ } else if (devType == IVA_UNIT) { ++ if (pProcessorInfo->uProcessorType == ++ IVAPROCTYPE_ARM7) ++ procDetect = true; ++ } ++ /* User applciatiuons aonly check for chip type, so ++ * this clumsy overwrite */ ++ pProcessorInfo->uProcessorType = ++ chipResources.uChipType; ++ } else { ++ GT_1trace(MGR_DebugMask, GT_7CLASS, ++ "Manager_EnumProcessorInfo: " ++ "Failed to Get DCD Processor Info %x \r\n", ++ status2); ++ status = DSP_EFAIL; ++ } ++ } ++ *puNumProcs = uProcIndex; ++ if (procDetect == false) { ++ GT_0trace(MGR_DebugMask, GT_7CLASS, ++ "Manager_EnumProcessorInfo: Failed" ++ " to get Proc info from DCD , so use CFG registry\n"); ++ pProcessorInfo->uProcessorType = chipResources.uChipType; ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== MGR_Exit ======== ++ * Decrement reference count, and free resources when reference count is ++ * 0. ++ */ ++void MGR_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ cRefs--; ++ if (cRefs == 0) ++ DCD_Exit(); ++ ++ GT_1trace(MGR_DebugMask, GT_5CLASS, ++ "Entered MGR_Exit, ref count: 0x%x\n", cRefs); ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== MGR_GetDCDHandle ======== ++ * Retrieves the MGR handle. Accessor Function. ++ */ ++DSP_STATUS MGR_GetDCDHandle(struct MGR_OBJECT *hMGRHandle, ++ OUT u32 *phDCDHandle) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ struct MGR_OBJECT *pMgrObject = (struct MGR_OBJECT *)hMGRHandle; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDCDHandle != NULL); ++ ++ *phDCDHandle = (u32)NULL; ++ if (MEM_IsValidHandle(pMgrObject, SIGNATURE)) { ++ *phDCDHandle = (u32) pMgrObject->hDcdMgr; ++ status = DSP_SOK; ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && *phDCDHandle != (u32)NULL) || ++ (DSP_FAILED(status) && *phDCDHandle == (u32)NULL)); ++ ++ return status; ++} ++ ++/* ++ * ======== MGR_Init ======== ++ * Initialize MGR's private state, keeping a reference count on each call. ++ */ ++bool MGR_Init(void) ++{ ++ bool fRetval = true; ++ bool fInitDCD = false; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ ++ /* Set the Trace mask */ ++ DBC_Assert(!MGR_DebugMask.flags); ++ ++ GT_create(&MGR_DebugMask, "MG"); /* "MG" for Manager */ ++ fInitDCD = DCD_Init(); /* DCD Module */ ++ ++ if (!fInitDCD) { ++ fRetval = false; ++ GT_0trace(MGR_DebugMask, GT_6CLASS, ++ "MGR_Init failed\n"); ++ } ++ } ++ ++ if (fRetval) ++ cRefs++; ++ ++ ++ GT_1trace(MGR_DebugMask, GT_5CLASS, ++ "Entered MGR_Init, ref count: 0x%x\n", cRefs); ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ return fRetval; ++} ++ ++/* ++ * ======== MGR_WaitForBridgeEvents ======== ++ * Block on any Bridge event(s) ++ */ ++DSP_STATUS MGR_WaitForBridgeEvents(struct DSP_NOTIFICATION **aNotifications, ++ u32 uCount, OUT u32 *puIndex, u32 uTimeout) ++{ ++ DSP_STATUS status; ++ struct SYNC_OBJECT *hSyncEvents[MAX_EVENTS]; ++ u32 i; ++ ++ DBC_Require(uCount < MAX_EVENTS); ++ ++ for (i = 0; i < uCount; i++) ++ hSyncEvents[i] = aNotifications[i]->handle; ++ ++ status = SYNC_WaitOnMultipleEvents(hSyncEvents, uCount, uTimeout, ++ puIndex); ++ ++ return status; ++ ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/nldr.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/nldr.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/nldr.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/nldr.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1967 @@ ++/* ++ * nldr.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== nldr.c ======== ++ * Description: ++ * DSP/BIOS Bridge dynamic + overlay Node loader. ++ * ++ * Public Functions: ++ * NLDR_Allocate ++ * NLDR_Create ++ * NLDR_Delete ++ * NLDR_Exit ++ * NLDR_Free ++ * NLDR_GetFxnAddr ++ * NLDR_Init ++ * NLDR_Load ++ * NLDR_Unload ++ * ++ * Notes: ++ * ++ *! Revision History ++ *! ================ ++ *! 07-Apr-2003 map Removed references to dead DLDR module ++ *! 23-Jan-2003 map Updated RemoteAlloc to support memory granularity ++ *! 20-Jan-2003 map Updated to maintain persistent dependent libraries ++ *! 15-Jan-2003 map Adapted for use with multiple dynamic phase libraries ++ *! 19-Dec-2002 map Fixed overlay bug in AddOvlySect for overlay ++ *! sections > 1024 bytes. ++ *! 13-Dec-2002 map Fixed NLDR_GetFxnAddr bug by searching dependent ++ *! libs for symbols ++ *! 27-Sep-2002 map Added RemoteFree to convert size to words for ++ *! correct deallocation ++ *! 16-Sep-2002 map Code Review Cleanup(from dldr.c) ++ *! 29-Aug-2002 map Adjusted for ARM-side overlay copy ++ *! 05-Aug-2002 jeh Created. ++ */ ++ ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++#ifdef DEBUG ++#include ++#endif ++ ++/* OS adaptation layer */ ++#include ++#include ++ ++/* Platform manager */ ++#include ++#include ++ ++/* Resource manager */ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define NLDR_SIGNATURE 0x52444c4e /* "RDLN" */ ++#define NLDR_NODESIGNATURE 0x4e444c4e /* "NDLN" */ ++ ++/* Name of section containing dynamic load mem */ ++#define DYNMEMSECT ".dspbridge_mem" ++ ++/* Name of section containing dependent library information */ ++#define DEPLIBSECT ".dspbridge_deplibs" ++ ++/* Max depth of recursion for loading node's dependent libraries */ ++#define MAXDEPTH 5 ++ ++/* Max number of persistent libraries kept by a node */ ++#define MAXLIBS 5 ++ ++/* ++ * Defines for extracting packed dynamic load memory requirements from two ++ * masks. ++ * These defines must match node.cdb and dynm.cdb ++ * Format of data/code mask is: ++ * uuuuuuuu|fueeeeee|fudddddd|fucccccc| ++ * where ++ * u = unused ++ * cccccc = prefered/required dynamic mem segid for create phase data/code ++ * dddddd = prefered/required dynamic mem segid for delete phase data/code ++ * eeeeee = prefered/req. dynamic mem segid for execute phase data/code ++ * f = flag indicating if memory is preferred or required: ++ * f = 1 if required, f = 0 if preferred. ++ * ++ * The 6 bits of the segid are interpreted as follows: ++ * ++ * If the 6th bit (bit 5) is not set, then this specifies a memory segment ++ * between 0 and 31 (a maximum of 32 dynamic loading memory segments). ++ * If the 6th bit (bit 5) is set, segid has the following interpretation: ++ * segid = 32 - Any internal memory segment can be used. ++ * segid = 33 - Any external memory segment can be used. ++ * segid = 63 - Any memory segment can be used (in this case the ++ * required/preferred flag is irrelevant). ++ * ++ */ ++/* Maximum allowed dynamic loading memory segments */ ++#define MAXMEMSEGS 32 ++ ++#define MAXSEGID 3 /* Largest possible (real) segid */ ++#define MEMINTERNALID 32 /* Segid meaning use internal mem */ ++#define MEMEXTERNALID 33 /* Segid meaning use external mem */ ++#define NULLID 63 /* Segid meaning no memory req/pref */ ++#define FLAGBIT 7 /* 7th bit is pref./req. flag */ ++#define SEGMASK 0x3f /* Bits 0 - 5 */ ++ ++#define CREATEBIT 0 /* Create segid starts at bit 0 */ ++#define DELETEBIT 8 /* Delete segid starts at bit 8 */ ++#define EXECUTEBIT 16 /* Execute segid starts at bit 16 */ ++ ++/* ++ * Masks that define memory type. Must match defines in dynm.cdb. ++ */ ++#define DYNM_CODE 0x2 ++#define DYNM_DATA 0x4 ++#define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA) ++#define DYNM_INTERNAL 0x8 ++#define DYNM_EXTERNAL 0x10 ++ ++/* ++ * Defines for packing memory requirement/preference flags for code and ++ * data of each of the node's phases into one mask. ++ * The bit is set if the segid is required for loading code/data of the ++ * given phase. The bit is not set, if the segid is preferred only. ++ * ++ * These defines are also used as indeces into a segid array for the node. ++ * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the ++ * create phase data is required or preferred to be loaded into. ++ */ ++#define CREATEDATAFLAGBIT 0 ++#define CREATECODEFLAGBIT 1 ++#define EXECUTEDATAFLAGBIT 2 ++#define EXECUTECODEFLAGBIT 3 ++#define DELETEDATAFLAGBIT 4 ++#define DELETECODEFLAGBIT 5 ++#define MAXFLAGS 6 ++ ++#define IsInternal(hNldr, segid) (((segid) <= MAXSEGID && \ ++ hNldr->segTable[(segid)] & DYNM_INTERNAL) || \ ++ (segid) == MEMINTERNALID) ++ ++#define IsExternal(hNldr, segid) (((segid) <= MAXSEGID && \ ++ hNldr->segTable[(segid)] & DYNM_EXTERNAL) || \ ++ (segid) == MEMEXTERNALID) ++ ++#define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \ ++ (((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF)) ++ ++#define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF)) ++ ++ /* ++ * These names may be embedded in overlay sections to identify which ++ * node phase the section should be overlayed. ++ */ ++#define PCREATE "create" ++#define PDELETE "delete" ++#define PEXECUTE "execute" ++ ++#define IsEqualUUID(uuid1, uuid2) (\ ++ ((uuid1).ulData1 == (uuid2).ulData1) && \ ++ ((uuid1).usData2 == (uuid2).usData2) && \ ++ ((uuid1).usData3 == (uuid2).usData3) && \ ++ ((uuid1).ucData4 == (uuid2).ucData4) && \ ++ ((uuid1).ucData5 == (uuid2).ucData5) && \ ++ (strncmp((void *)(uuid1).ucData6, (void *)(uuid2).ucData6, 6)) == 0) ++ ++ /* ++ * ======== MemInfo ======== ++ * Format of dynamic loading memory segment info in coff file. ++ * Must match dynm.h55. ++ */ ++struct MemInfo { ++ u32 segid; /* Dynamic loading memory segment number */ ++ u32 base; ++ u32 len; ++ u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */ ++}; ++ ++/* ++ * ======== LibNode ======== ++ * For maintaining a tree of library dependencies. ++ */ ++struct LibNode { ++ struct DBLL_LibraryObj *lib; /* The library */ ++ u16 nDepLibs; /* Number of dependent libraries */ ++ struct LibNode *pDepLibs; /* Dependent libraries of lib */ ++}; ++ ++/* ++ * ======== OvlySect ======== ++ * Information needed to overlay a section. ++ */ ++struct OvlySect { ++ struct OvlySect *pNextSect; ++ u32 loadAddr; /* Load address of section */ ++ u32 runAddr; /* Run address of section */ ++ u32 size; /* Size of section */ ++ u16 page; /* DBL_CODE, DBL_DATA */ ++}; ++ ++/* ++ * ======== OvlyNode ======== ++ * For maintaining a list of overlay nodes, with sections that need to be ++ * overlayed for each of the nodes phases. ++ */ ++struct OvlyNode { ++ struct DSP_UUID uuid; ++ char *pNodeName; ++ struct OvlySect *pCreateSects; ++ struct OvlySect *pDeleteSects; ++ struct OvlySect *pExecuteSects; ++ struct OvlySect *pOtherSects; ++ u16 nCreateSects; ++ u16 nDeleteSects; ++ u16 nExecuteSects; ++ u16 nOtherSects; ++ u16 createRef; ++ u16 deleteRef; ++ u16 executeRef; ++ u16 otherRef; ++}; ++ ++/* ++ * ======== NLDR_OBJECT ======== ++ * Overlay loader object. ++ */ ++struct NLDR_OBJECT { ++ u32 dwSignature; /* For object validation */ ++ struct DEV_OBJECT *hDevObject; /* Device object */ ++ struct DCD_MANAGER *hDcdMgr; /* Proc/Node data manager */ ++ struct DBLL_TarObj *dbll; /* The DBL loader */ ++ struct DBLL_LibraryObj *baseLib; /* Base image library */ ++ struct RMM_TargetObj *rmm; /* Remote memory manager for DSP */ ++ struct DBLL_Fxns dbllFxns; /* Loader function table */ ++ struct DBLL_Attrs dbllAttrs; /* attrs to pass to loader functions */ ++ NLDR_OVLYFXN ovlyFxn; /* "write" for overlay nodes */ ++ NLDR_WRITEFXN writeFxn; /* "write" for dynamic nodes */ ++ struct OvlyNode *ovlyTable; /* Table of overlay nodes */ ++ u16 nOvlyNodes; /* Number of overlay nodes in base */ ++ u16 nNode; /* Index for tracking overlay nodes */ ++ u16 nSegs; /* Number of dynamic load mem segs */ ++ u32 *segTable; /* memtypes of dynamic memory segs ++ * indexed by segid ++ */ ++ u16 usDSPMauSize; /* Size of DSP MAU */ ++ u16 usDSPWordSize; /* Size of DSP word */ ++}; ++ ++/* ++ * ======== NLDR_NODEOBJECT ======== ++ * Dynamic node object. This object is created when a node is allocated. ++ */ ++struct NLDR_NODEOBJECT { ++ u32 dwSignature; /* For object validation */ ++ struct NLDR_OBJECT *pNldr; /* Dynamic loader handle */ ++ void *pPrivRef; /* Handle to pass to DBL_WriteFxn */ ++ struct DSP_UUID uuid; /* Node's UUID */ ++ bool fDynamic; /* Dynamically loaded node? */ ++ bool fOverlay; /* Overlay node? */ ++ bool *pfPhaseSplit; /* Multiple phase libraries? */ ++ struct LibNode root; /* Library containing node phase */ ++ struct LibNode createLib; /* Library containing create phase lib */ ++ struct LibNode executeLib; /* Library containing execute phase lib */ ++ struct LibNode deleteLib; /* Library containing delete phase lib */ ++ struct LibNode persLib[MAXLIBS]; /* libs remain loaded until Delete */ ++ s32 nPersLib; /* Number of persistent libraries */ ++ /* Path in lib dependency tree */ ++ struct DBLL_LibraryObj *libPath[MAXDEPTH + 1]; ++ enum NLDR_PHASE phase; /* Node phase currently being loaded */ ++ ++ /* ++ * Dynamic loading memory segments for data and code of each phase. ++ */ ++ u16 segId[MAXFLAGS]; ++ ++ /* ++ * Mask indicating whether each mem segment specified in segId[] ++ * is preferred or required. ++ * For example if (codeDataFlagMask & (1 << EXECUTEDATAFLAGBIT)) != 0, ++ * then it is required to load execute phase data into the memory ++ * specified by segId[EXECUTEDATAFLAGBIT]. ++ */ ++ u32 codeDataFlagMask; ++}; ++ ++/* Dynamic loader function table */ ++static struct DBLL_Fxns dbllFxns = { ++ (DBLL_CloseFxn) DBLL_close, ++ (DBLL_CreateFxn) DBLL_create, ++ (DBLL_DeleteFxn) DBLL_delete, ++ (DBLL_ExitFxn) DBLL_exit, ++ (DBLL_GetAttrsFxn) DBLL_getAttrs, ++ (DBLL_GetAddrFxn) DBLL_getAddr, ++ (DBLL_GetCAddrFxn) DBLL_getCAddr, ++ (DBLL_GetSectFxn) DBLL_getSect, ++ (DBLL_InitFxn) DBLL_init, ++ (DBLL_LoadFxn) DBLL_load, ++ (DBLL_LoadSectFxn) DBLL_loadSect, ++ (DBLL_OpenFxn) DBLL_open, ++ (DBLL_ReadSectFxn) DBLL_readSect, ++ (DBLL_SetAttrsFxn) DBLL_setAttrs, ++ (DBLL_UnloadFxn) DBLL_unload, ++ (DBLL_UnloadSectFxn) DBLL_unloadSect, ++}; ++ ++static struct GT_Mask NLDR_debugMask = { NULL, NULL }; /* GT trace variable */ ++static u32 cRefs; /* module reference count */ ++ ++static DSP_STATUS AddOvlyInfo(void *handle, struct DBLL_SectInfo *sectInfo, ++ u32 addr, u32 nBytes); ++static DSP_STATUS AddOvlyNode(struct DSP_UUID *pUuid, ++ enum DSP_DCDOBJTYPE objType, ++ IN void *handle); ++static DSP_STATUS AddOvlySect(struct NLDR_OBJECT *hNldr, ++ struct OvlySect **pList, ++ struct DBLL_SectInfo *pSectInfo, bool *pExists, ++ u32 addr, u32 nBytes); ++static s32 fakeOvlyWrite(void *handle, u32 dspAddr, void *buf, u32 nBytes, ++ s32 mtype); ++static void FreeSects(struct NLDR_OBJECT *hNldr, struct OvlySect *pPhaseSects, ++ u16 nAlloc); ++static bool GetSymbolValue(void *handle, void *pArg, void *rmmHandle, ++ char *symName, struct DBLL_Symbol **sym); ++static DSP_STATUS LoadLib(struct NLDR_NODEOBJECT *hNldrNode, ++ struct LibNode *root, struct DSP_UUID uuid, ++ bool rootPersistent, struct DBLL_LibraryObj **libPath, ++ enum NLDR_PHASE phase, u16 depth); ++static DSP_STATUS LoadOvly(struct NLDR_NODEOBJECT *hNldrNode, ++ enum NLDR_PHASE phase); ++static DSP_STATUS RemoteAlloc(void **pRef, u16 memType, u32 size, ++ u32 align, u32 *dspAddr, ++ OPTIONAL s32 segmentId, OPTIONAL s32 req, ++ bool reserve); ++static DSP_STATUS RemoteFree(void **pRef, u16 space, u32 dspAddr, ++ u32 size, bool reserve); ++ ++static void UnloadLib(struct NLDR_NODEOBJECT *hNldrNode, struct LibNode *root); ++static void UnloadOvly(struct NLDR_NODEOBJECT *hNldrNode, ++ enum NLDR_PHASE phase); ++static bool findInPersistentLibArray(struct NLDR_NODEOBJECT *hNldrNode, ++ struct DBLL_LibraryObj *lib); ++static u32 findLcm(u32 a, u32 b); ++static u32 findGcf(u32 a, u32 b); ++ ++/* ++ * ======== NLDR_Allocate ======== ++ */ ++DSP_STATUS NLDR_Allocate(struct NLDR_OBJECT *hNldr, void *pPrivRef, ++ IN CONST struct DCD_NODEPROPS *pNodeProps, ++ OUT struct NLDR_NODEOBJECT **phNldrNode, ++ IN bool *pfPhaseSplit) ++{ ++ struct NLDR_NODEOBJECT *pNldrNode = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pNodeProps != NULL); ++ DBC_Require(phNldrNode != NULL); ++ DBC_Require(MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); ++ ++ GT_5trace(NLDR_debugMask, GT_ENTER, "NLDR_Allocate(0x%x, 0x%x, 0x%x, " ++ "0x%x, 0x%x)\n", hNldr, pPrivRef, pNodeProps, phNldrNode, ++ pfPhaseSplit); ++ ++ /* Initialize handle in case of failure */ ++ *phNldrNode = NULL; ++ /* Allocate node object */ ++ MEM_AllocObject(pNldrNode, struct NLDR_NODEOBJECT, NLDR_NODESIGNATURE); ++ ++ if (pNldrNode == NULL) { ++ GT_0trace(NLDR_debugMask, GT_6CLASS, "NLDR_Allocate: " ++ "Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } else { ++ pNldrNode->pfPhaseSplit = pfPhaseSplit; ++ pNldrNode->nPersLib = 0; ++ pNldrNode->pNldr = hNldr; ++ pNldrNode->pPrivRef = pPrivRef; ++ /* Save node's UUID. */ ++ pNldrNode->uuid = pNodeProps->ndbProps.uiNodeID; ++ /* ++ * Determine if node is a dynamically loaded node from ++ * ndbProps. ++ */ ++ if (pNodeProps->usLoadType == NLDR_DYNAMICLOAD) { ++ /* Dynamic node */ ++ pNldrNode->fDynamic = true; ++ /* ++ * Extract memory requirements from ndbProps masks ++ */ ++ /* Create phase */ ++ pNldrNode->segId[CREATEDATAFLAGBIT] = (u16) ++ (pNodeProps->ulDataMemSegMask >> CREATEBIT) & ++ SEGMASK; ++ pNldrNode->codeDataFlagMask |= ++ ((pNodeProps->ulDataMemSegMask >> ++ (CREATEBIT + FLAGBIT)) & 1) << ++ CREATEDATAFLAGBIT; ++ pNldrNode->segId[CREATECODEFLAGBIT] = (u16) ++ (pNodeProps->ulCodeMemSegMask >> ++ CREATEBIT) & SEGMASK; ++ pNldrNode->codeDataFlagMask |= ++ ((pNodeProps->ulCodeMemSegMask >> ++ (CREATEBIT + FLAGBIT)) & 1) << ++ CREATECODEFLAGBIT; ++ /* Execute phase */ ++ pNldrNode->segId[EXECUTEDATAFLAGBIT] = (u16) ++ (pNodeProps->ulDataMemSegMask >> ++ EXECUTEBIT) & SEGMASK; ++ pNldrNode->codeDataFlagMask |= ++ ((pNodeProps->ulDataMemSegMask >> ++ (EXECUTEBIT + FLAGBIT)) & 1) << ++ EXECUTEDATAFLAGBIT; ++ pNldrNode->segId[EXECUTECODEFLAGBIT] = (u16) ++ (pNodeProps->ulCodeMemSegMask >> ++ EXECUTEBIT) & SEGMASK; ++ pNldrNode->codeDataFlagMask |= ++ ((pNodeProps->ulCodeMemSegMask >> ++ (EXECUTEBIT + FLAGBIT)) & 1) << ++ EXECUTECODEFLAGBIT; ++ /* Delete phase */ ++ pNldrNode->segId[DELETEDATAFLAGBIT] = (u16) ++ (pNodeProps->ulDataMemSegMask >> DELETEBIT) & ++ SEGMASK; ++ pNldrNode->codeDataFlagMask |= ++ ((pNodeProps->ulDataMemSegMask >> ++ (DELETEBIT + FLAGBIT)) & 1) << ++ DELETEDATAFLAGBIT; ++ pNldrNode->segId[DELETECODEFLAGBIT] = (u16) ++ (pNodeProps->ulCodeMemSegMask >> ++ DELETEBIT) & SEGMASK; ++ pNldrNode->codeDataFlagMask |= ++ ((pNodeProps->ulCodeMemSegMask >> ++ (DELETEBIT + FLAGBIT)) & 1) << ++ DELETECODEFLAGBIT; ++ } else { ++ /* Non-dynamically loaded nodes are part of the ++ * base image */ ++ pNldrNode->root.lib = hNldr->baseLib; ++ /* Check for overlay node */ ++ if (pNodeProps->usLoadType == NLDR_OVLYLOAD) ++ pNldrNode->fOverlay = true; ++ ++ } ++ *phNldrNode = (struct NLDR_NODEOBJECT *) pNldrNode; ++ } ++ /* Cleanup on failure */ ++ if (DSP_FAILED(status) && pNldrNode) ++ NLDR_Free((struct NLDR_NODEOBJECT *) pNldrNode); ++ ++ DBC_Ensure((DSP_SUCCEEDED(status) && ++ MEM_IsValidHandle(((struct NLDR_NODEOBJECT *)(*phNldrNode)), ++ NLDR_NODESIGNATURE)) || (DSP_FAILED(status) && ++ *phNldrNode == NULL)); ++ return status; ++} ++ ++/* ++ * ======== NLDR_Create ======== ++ */ ++DSP_STATUS NLDR_Create(OUT struct NLDR_OBJECT **phNldr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct NLDR_ATTRS *pAttrs) ++{ ++ struct COD_MANAGER *hCodMgr; /* COD manager */ ++ char *pszCoffBuf = NULL; ++ char szZLFile[COD_MAXPATHLENGTH]; ++ struct NLDR_OBJECT *pNldr = NULL; ++ struct DBLL_Attrs saveAttrs; ++ struct DBLL_Attrs newAttrs; ++ DBLL_Flags flags; ++ u32 ulEntry; ++ u16 nSegs = 0; ++ struct MemInfo *pMemInfo; ++ u32 ulLen = 0; ++ u32 ulAddr; ++ struct RMM_Segment *rmmSegs = NULL; ++ u16 i; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(cRefs > 0); ++ DBC_Require(phNldr != NULL); ++ DBC_Require(hDevObject != NULL); ++ DBC_Require(pAttrs != NULL); ++ DBC_Require(pAttrs->pfnOvly != NULL); ++ DBC_Require(pAttrs->pfnWrite != NULL); ++ GT_3trace(NLDR_debugMask, GT_ENTER, "NLDR_Create(0x%x, 0x%x, 0x%x)\n", ++ phNldr, hDevObject, pAttrs); ++ /* Allocate dynamic loader object */ ++ MEM_AllocObject(pNldr, struct NLDR_OBJECT, NLDR_SIGNATURE); ++ if (pNldr) { ++ pNldr->hDevObject = hDevObject; ++ /* warning, lazy status checking alert! */ ++ status = DEV_GetCodMgr(hDevObject, &hCodMgr); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ status = COD_GetLoader(hCodMgr, &pNldr->dbll); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ status = COD_GetBaseLib(hCodMgr, &pNldr->baseLib); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ status = COD_GetBaseName(hCodMgr, szZLFile, COD_MAXPATHLENGTH); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ status = DSP_SOK; ++ /* end lazy status checking */ ++ pNldr->usDSPMauSize = pAttrs->usDSPMauSize; ++ pNldr->usDSPWordSize = pAttrs->usDSPWordSize; ++ pNldr->dbllFxns = dbllFxns; ++ if (!(pNldr->dbllFxns.initFxn())) ++ status = DSP_EMEMORY; ++ ++ } else { ++ GT_0trace(NLDR_debugMask, GT_6CLASS, "NLDR_Create: " ++ "Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } ++ /* Create the DCD Manager */ ++ if (DSP_SUCCEEDED(status)) ++ status = DCD_CreateManager(NULL, &pNldr->hDcdMgr); ++ ++ /* Get dynamic loading memory sections from base lib */ ++ if (DSP_SUCCEEDED(status)) { ++ status = pNldr->dbllFxns.getSectFxn(pNldr->baseLib, DYNMEMSECT, ++ &ulAddr, &ulLen); ++ if (DSP_SUCCEEDED(status)) { ++ pszCoffBuf = MEM_Calloc(ulLen * pNldr->usDSPMauSize, ++ MEM_PAGED); ++ if (!pszCoffBuf) { ++ GT_0trace(NLDR_debugMask, GT_6CLASS, ++ "NLDR_Create: Memory " ++ "allocation failed\n"); ++ status = DSP_EMEMORY; ++ } ++ } else { ++ /* Ok to not have dynamic loading memory */ ++ status = DSP_SOK; ++ ulLen = 0; ++ GT_1trace(NLDR_debugMask, GT_6CLASS, ++ "NLDR_Create: DBLL_getSect " ++ "failed (no dynamic loading mem segments): " ++ "0x%lx\n", status); ++ } ++ } ++ if (DSP_SUCCEEDED(status) && ulLen > 0) { ++ /* Read section containing dynamic load mem segments */ ++ status = pNldr->dbllFxns.readSectFxn(pNldr->baseLib, DYNMEMSECT, ++ pszCoffBuf, ulLen); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NLDR_debugMask, GT_6CLASS, ++ "NLDR_Create: DBLL_read Section" ++ "failed: 0x%lx\n", status); ++ } ++ } ++ if (DSP_SUCCEEDED(status) && ulLen > 0) { ++ /* Parse memory segment data */ ++ nSegs = (u16)(*((u32 *)pszCoffBuf)); ++ if (nSegs > MAXMEMSEGS) { ++ GT_1trace(NLDR_debugMask, GT_6CLASS, ++ "NLDR_Create: Invalid number of " ++ "dynamic load mem segments: 0x%lx\n", nSegs); ++ status = DSP_ECORRUPTFILE; ++ } ++ } ++ /* Parse dynamic load memory segments */ ++ if (DSP_SUCCEEDED(status) && nSegs > 0) { ++ rmmSegs = MEM_Calloc(sizeof(struct RMM_Segment) * nSegs, ++ MEM_PAGED); ++ pNldr->segTable = MEM_Calloc(sizeof(u32) * nSegs, MEM_PAGED); ++ if (rmmSegs == NULL || pNldr->segTable == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ pNldr->nSegs = nSegs; ++ pMemInfo = (struct MemInfo *)(pszCoffBuf + ++ sizeof(u32)); ++ for (i = 0; i < nSegs; i++) { ++ rmmSegs[i].base = (pMemInfo + i)->base; ++ rmmSegs[i].length = (pMemInfo + i)->len; ++ rmmSegs[i].space = 0; ++ pNldr->segTable[i] = (pMemInfo + i)->type; ++#ifdef DEBUG ++ DBG_Trace(DBG_LEVEL7, ++ "** (proc) DLL MEMSEGMENT: %d, Base: 0x%x, " ++ "Length: 0x%x\n", i, rmmSegs[i].base, ++ rmmSegs[i].length); ++#endif ++ } ++ } ++ } ++ /* Create Remote memory manager */ ++ if (DSP_SUCCEEDED(status)) ++ status = RMM_create(&pNldr->rmm, rmmSegs, nSegs); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* set the alloc, free, write functions for loader */ ++ pNldr->dbllFxns.getAttrsFxn(pNldr->dbll, &saveAttrs); ++ newAttrs = saveAttrs; ++ newAttrs.alloc = (DBLL_AllocFxn) RemoteAlloc; ++ newAttrs.free = (DBLL_FreeFxn) RemoteFree; ++ newAttrs.symLookup = (DBLL_SymLookup) GetSymbolValue; ++ newAttrs.symHandle = pNldr; ++ newAttrs.write = (DBLL_WriteFxn) pAttrs->pfnWrite; ++ pNldr->ovlyFxn = pAttrs->pfnOvly; ++ pNldr->writeFxn = pAttrs->pfnWrite; ++ pNldr->dbllAttrs = newAttrs; ++ } ++ if (rmmSegs) ++ MEM_Free(rmmSegs); ++ ++ if (pszCoffBuf) ++ MEM_Free(pszCoffBuf); ++ ++ /* Get overlay nodes */ ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetBaseName(hCodMgr, szZLFile, COD_MAXPATHLENGTH); ++ /* lazy check */ ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ /* First count number of overlay nodes */ ++ status = DCD_GetObjects(pNldr->hDcdMgr, szZLFile, AddOvlyNode, ++ (void *) pNldr); ++ /* Now build table of overlay nodes */ ++ if (DSP_SUCCEEDED(status) && pNldr->nOvlyNodes > 0) { ++ /* Allocate table for overlay nodes */ ++ pNldr->ovlyTable = ++ MEM_Calloc(sizeof(struct OvlyNode) * pNldr->nOvlyNodes, ++ MEM_PAGED); ++ /* Put overlay nodes in the table */ ++ pNldr->nNode = 0; ++ status = DCD_GetObjects(pNldr->hDcdMgr, szZLFile, ++ AddOvlyNode, ++ (void *) pNldr); ++ } ++ } ++ /* Do a fake reload of the base image to get overlay section info */ ++ if (DSP_SUCCEEDED(status) && pNldr->nOvlyNodes > 0) { ++ saveAttrs.write = fakeOvlyWrite; ++ saveAttrs.logWrite = AddOvlyInfo; ++ saveAttrs.logWriteHandle = pNldr; ++ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; ++ status = pNldr->dbllFxns.loadFxn(pNldr->baseLib, flags, ++ &saveAttrs, &ulEntry); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ *phNldr = (struct NLDR_OBJECT *) pNldr; ++ } else { ++ if (pNldr) ++ NLDR_Delete((struct NLDR_OBJECT *) pNldr); ++ ++ *phNldr = NULL; ++ } ++ /* FIXME:Temp. Fix. Must be removed */ ++ DBC_Ensure((DSP_SUCCEEDED(status) && ++ MEM_IsValidHandle(((struct NLDR_OBJECT *)*phNldr), ++ NLDR_SIGNATURE)) ++ || (DSP_FAILED(status) && (*phNldr == NULL))); ++ return status; ++} ++ ++/* ++ * ======== NLDR_Delete ======== ++ */ ++void NLDR_Delete(struct NLDR_OBJECT *hNldr) ++{ ++ struct OvlySect *pSect; ++ struct OvlySect *pNext; ++ u16 i; ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); ++ GT_1trace(NLDR_debugMask, GT_ENTER, "NLDR_Delete(0x%x)\n", hNldr); ++ hNldr->dbllFxns.exitFxn(); ++ if (hNldr->rmm) ++ RMM_delete(hNldr->rmm); ++ ++ if (hNldr->segTable) ++ MEM_Free(hNldr->segTable); ++ ++ if (hNldr->hDcdMgr) ++ DCD_DestroyManager(hNldr->hDcdMgr); ++ ++ /* Free overlay node information */ ++ if (hNldr->ovlyTable) { ++ for (i = 0; i < hNldr->nOvlyNodes; i++) { ++ pSect = hNldr->ovlyTable[i].pCreateSects; ++ while (pSect) { ++ pNext = pSect->pNextSect; ++ MEM_Free(pSect); ++ pSect = pNext; ++ } ++ pSect = hNldr->ovlyTable[i].pDeleteSects; ++ while (pSect) { ++ pNext = pSect->pNextSect; ++ MEM_Free(pSect); ++ pSect = pNext; ++ } ++ pSect = hNldr->ovlyTable[i].pExecuteSects; ++ while (pSect) { ++ pNext = pSect->pNextSect; ++ MEM_Free(pSect); ++ pSect = pNext; ++ } ++ pSect = hNldr->ovlyTable[i].pOtherSects; ++ while (pSect) { ++ pNext = pSect->pNextSect; ++ MEM_Free(pSect); ++ pSect = pNext; ++ } ++ } ++ MEM_Free(hNldr->ovlyTable); ++ } ++ MEM_FreeObject(hNldr); ++ DBC_Ensure(!MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); ++} ++ ++/* ++ * ======== NLDR_Exit ======== ++ * Discontinue usage of NLDR module. ++ */ ++void NLDR_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(NLDR_debugMask, GT_5CLASS, ++ "Entered NLDR_Exit, ref count: 0x%x\n", cRefs); ++ ++ if (cRefs == 0) { ++ RMM_exit(); ++ NLDR_debugMask.flags = NULL; ++ } ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== NLDR_Free ======== ++ */ ++void NLDR_Free(struct NLDR_NODEOBJECT *hNldrNode) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); ++ ++ GT_1trace(NLDR_debugMask, GT_ENTER, "NLDR_Free(0x%x)\n", hNldrNode); ++ ++ MEM_FreeObject(hNldrNode); ++} ++ ++/* ++ * ======== NLDR_GetFxnAddr ======== ++ */ ++DSP_STATUS NLDR_GetFxnAddr(struct NLDR_NODEOBJECT *hNldrNode, char *pstrFxn, ++ u32 *pulAddr) ++{ ++ struct DBLL_Symbol *pSym; ++ struct NLDR_OBJECT *hNldr; ++ DSP_STATUS status = DSP_SOK; ++ bool status1 = false; ++ s32 i = 0; ++ struct LibNode root = { NULL, 0, NULL }; ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); ++ DBC_Require(pulAddr != NULL); ++ DBC_Require(pstrFxn != NULL); ++ GT_3trace(NLDR_debugMask, GT_ENTER, "NLDR_GetFxnAddr(0x%x, %s, 0x%x)\n", ++ hNldrNode, pstrFxn, pulAddr); ++ ++ hNldr = hNldrNode->pNldr; ++ /* Called from NODE_Create(), NODE_Delete(), or NODE_Run(). */ ++ if (hNldrNode->fDynamic && *hNldrNode->pfPhaseSplit) { ++ switch (hNldrNode->phase) { ++ case NLDR_CREATE: ++ root = hNldrNode->createLib; ++ break; ++ case NLDR_EXECUTE: ++ root = hNldrNode->executeLib; ++ break; ++ case NLDR_DELETE: ++ root = hNldrNode->deleteLib; ++ break; ++ default: ++ DBC_Assert(false); ++ break; ++ } ++ } else { ++ /* for Overlay nodes or non-split Dynamic nodes */ ++ root = hNldrNode->root; ++ } ++ status1 = hNldr->dbllFxns.getCAddrFxn(root.lib, pstrFxn, &pSym); ++ if (!status1) ++ status1 = hNldr->dbllFxns.getAddrFxn(root.lib, pstrFxn, &pSym); ++ ++ /* If symbol not found, check dependent libraries */ ++ if (!status1) { ++ for (i = 0; i < root.nDepLibs; i++) { ++ status1 = hNldr->dbllFxns.getAddrFxn(root.pDepLibs[i]. ++ lib, pstrFxn, &pSym); ++ if (!status1) { ++ status1 = hNldr->dbllFxns.getCAddrFxn(root. ++ pDepLibs[i].lib, pstrFxn, &pSym); ++ } ++ if (status1) { ++ /* Symbol found */ ++ break; ++ } ++ } ++ } ++ /* Check persistent libraries */ ++ if (!status1) { ++ for (i = 0; i < hNldrNode->nPersLib; i++) { ++ status1 = hNldr->dbllFxns.getAddrFxn(hNldrNode-> ++ persLib[i].lib, pstrFxn, &pSym); ++ if (!status1) { ++ status1 = ++ hNldr->dbllFxns.getCAddrFxn(hNldrNode-> ++ persLib[i].lib, pstrFxn, &pSym); ++ } ++ if (status1) { ++ /* Symbol found */ ++ break; ++ } ++ } ++ } ++ ++ if (status1) { ++ *pulAddr = pSym->value; ++ } else { ++ GT_1trace(NLDR_debugMask, GT_6CLASS, ++ "NLDR_GetFxnAddr: Symbol not found: " ++ "%s\n", pstrFxn); ++ status = DSP_ESYMBOL; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== NLDR_GetRmmManager ======== ++ * Given a NLDR object, retrieve RMM Manager Handle ++ */ ++DSP_STATUS NLDR_GetRmmManager(struct NLDR_OBJECT *hNldrObject, ++ OUT struct RMM_TargetObj **phRmmMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct NLDR_OBJECT *pNldrObject = hNldrObject; ++ DBC_Require(phRmmMgr != NULL); ++ GT_2trace(NLDR_debugMask, GT_ENTER, "NLDR_GetRmmManager(0x%x, 0x%x)\n", ++ hNldrObject, phRmmMgr); ++ if (MEM_IsValidHandle(hNldrObject, NLDR_SIGNATURE)) { ++ *phRmmMgr = pNldrObject->rmm; ++ } else { ++ *phRmmMgr = NULL; ++ status = DSP_EHANDLE; ++ GT_0trace(NLDR_debugMask, GT_7CLASS, ++ "NLDR_GetRmmManager:Invalid handle"); ++ } ++ ++ GT_2trace(NLDR_debugMask, GT_ENTER, "Exit NLDR_GetRmmManager: status " ++ "0x%x\n\tphRmmMgr: 0x%x\n", status, *phRmmMgr); ++ ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phRmmMgr != NULL) && ++ (*phRmmMgr == NULL))); ++ ++ return status; ++} ++ ++/* ++ * ======== NLDR_Init ======== ++ * Initialize the NLDR module. ++ */ ++bool NLDR_Init(void) ++{ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!NLDR_debugMask.flags); ++ GT_create(&NLDR_debugMask, "NL"); /* "NL" for NLdr */ ++ ++ RMM_init(); ++ } ++ ++ cRefs++; ++ ++ GT_1trace(NLDR_debugMask, GT_5CLASS, "NLDR_Init(), ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure(cRefs > 0); ++ return true; ++} ++ ++/* ++ * ======== NLDR_Load ======== ++ */ ++DSP_STATUS NLDR_Load(struct NLDR_NODEOBJECT *hNldrNode, enum NLDR_PHASE phase) ++{ ++ struct NLDR_OBJECT *hNldr; ++ struct DSP_UUID libUUID; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); ++ ++ hNldr = hNldrNode->pNldr; ++ ++ GT_2trace(NLDR_debugMask, GT_ENTER, "NLDR_Load(0x%x, 0x%x)\n", ++ hNldrNode, phase); ++ ++ if (hNldrNode->fDynamic) { ++ hNldrNode->phase = phase; ++ ++ libUUID = hNldrNode->uuid; ++ ++ /* At this point, we may not know if node is split into ++ * different libraries. So we'll go ahead and load the ++ * library, and then save the pointer to the appropriate ++ * location after we know. */ ++ ++ status = LoadLib(hNldrNode, &hNldrNode->root, libUUID, false, ++ hNldrNode->libPath, phase, 0); ++ ++ if (DSP_SUCCEEDED(status)) { ++ if (*hNldrNode->pfPhaseSplit) { ++ switch (phase) { ++ case NLDR_CREATE: ++ hNldrNode->createLib = hNldrNode->root; ++ break; ++ ++ case NLDR_EXECUTE: ++ hNldrNode->executeLib = hNldrNode->root; ++ break; ++ ++ case NLDR_DELETE: ++ hNldrNode->deleteLib = hNldrNode->root; ++ break; ++ ++ default: ++ DBC_Assert(false); ++ break; ++ } ++ } ++ } ++ } else { ++ if (hNldrNode->fOverlay) ++ status = LoadOvly(hNldrNode, phase); ++ ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== NLDR_Unload ======== ++ */ ++DSP_STATUS NLDR_Unload(struct NLDR_NODEOBJECT *hNldrNode, enum NLDR_PHASE phase) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct LibNode *pRootLib = NULL; ++ s32 i = 0; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); ++ GT_2trace(NLDR_debugMask, GT_ENTER, "NLDR_Unload(0x%x, 0x%x)\n", ++ hNldrNode, phase); ++ if (hNldrNode != NULL) { ++ if (hNldrNode->fDynamic) { ++ if (*hNldrNode->pfPhaseSplit) { ++ switch (phase) { ++ case NLDR_CREATE: ++ pRootLib = &hNldrNode->createLib; ++ break; ++ case NLDR_EXECUTE: ++ pRootLib = &hNldrNode->executeLib; ++ break; ++ case NLDR_DELETE: ++ pRootLib = &hNldrNode->deleteLib; ++ /* Unload persistent libraries */ ++ for (i = 0; i < hNldrNode->nPersLib; ++ i++) { ++ UnloadLib(hNldrNode, ++ &hNldrNode->persLib[i]); ++ } ++ hNldrNode->nPersLib = 0; ++ break; ++ default: ++ DBC_Assert(false); ++ break; ++ } ++ } else { ++ /* Unload main library */ ++ pRootLib = &hNldrNode->root; ++ } ++ UnloadLib(hNldrNode, pRootLib); ++ } else { ++ if (hNldrNode->fOverlay) ++ UnloadOvly(hNldrNode, phase); ++ ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== AddOvlyInfo ======== ++ */ ++static DSP_STATUS AddOvlyInfo(void *handle, struct DBLL_SectInfo *sectInfo, ++ u32 addr, u32 nBytes) ++{ ++ char *pNodeName; ++ char *pSectName = (char *)sectInfo->name; ++ bool fExists = false; ++ char seps = ':'; ++ char *pch; ++ u16 i; ++ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)handle; ++ DSP_STATUS status = DSP_SOK; ++ ++ /* Is this an overlay section (load address != run address)? */ ++ if (sectInfo->loadAddr == sectInfo->runAddr) ++ goto func_end; ++ ++ /* Find the node it belongs to */ ++ for (i = 0; i < hNldr->nOvlyNodes; i++) { ++ pNodeName = hNldr->ovlyTable[i].pNodeName; ++ DBC_Require(pNodeName); ++ if (strncmp(pNodeName, pSectName + 1, ++ strlen(pNodeName)) == 0) { ++ /* Found the node */ ++ break; ++ } ++ } ++ if (!(i < hNldr->nOvlyNodes)) ++ goto func_end; ++ ++ /* Determine which phase this section belongs to */ ++ for (pch = pSectName + 1; *pch && *pch != seps; pch++) ++ ;; ++ ++ if (*pch) { ++ pch++; /* Skip over the ':' */ ++ if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) { ++ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. ++ pCreateSects, sectInfo, &fExists, addr, nBytes); ++ if (DSP_SUCCEEDED(status) && !fExists) ++ hNldr->ovlyTable[i].nCreateSects++; ++ ++ } else ++ if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) { ++ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. ++ pDeleteSects, sectInfo, &fExists, ++ addr, nBytes); ++ if (DSP_SUCCEEDED(status) && !fExists) ++ hNldr->ovlyTable[i].nDeleteSects++; ++ ++ } else ++ if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) { ++ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. ++ pExecuteSects, sectInfo, &fExists, ++ addr, nBytes); ++ if (DSP_SUCCEEDED(status) && !fExists) ++ hNldr->ovlyTable[i].nExecuteSects++; ++ ++ } else { ++ /* Put in "other" sectins */ ++ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. ++ pOtherSects, sectInfo, &fExists, ++ addr, nBytes); ++ if (DSP_SUCCEEDED(status) && !fExists) ++ hNldr->ovlyTable[i].nOtherSects++; ++ ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== AddOvlyNode ========= ++ * Callback function passed to DCD_GetObjects. ++ */ ++static DSP_STATUS AddOvlyNode(struct DSP_UUID *pUuid, ++ enum DSP_DCDOBJTYPE objType, ++ IN void *handle) ++{ ++ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)handle; ++ char *pNodeName = NULL; ++ char *pBuf = NULL; ++ u32 uLen; ++ struct DCD_GENERICOBJ objDef; ++ DSP_STATUS status = DSP_SOK; ++ ++ if (objType != DSP_DCDNODETYPE) ++ goto func_end; ++ ++ status = DCD_GetObjectDef(hNldr->hDcdMgr, pUuid, objType, &objDef); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* If overlay node, add to the list */ ++ if (objDef.objData.nodeObj.usLoadType == NLDR_OVLYLOAD) { ++ if (hNldr->ovlyTable == NULL) { ++ hNldr->nOvlyNodes++; ++ } else { ++ /* Add node to table */ ++ hNldr->ovlyTable[hNldr->nNode].uuid = *pUuid; ++ DBC_Require(objDef.objData.nodeObj.ndbProps.acName); ++ uLen = strlen(objDef.objData.nodeObj.ndbProps.acName); ++ pNodeName = objDef.objData.nodeObj.ndbProps.acName; ++ pBuf = MEM_Calloc(uLen + 1, MEM_PAGED); ++ if (pBuf == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ strncpy(pBuf, pNodeName, uLen); ++ hNldr->ovlyTable[hNldr->nNode].pNodeName = pBuf; ++ hNldr->nNode++; ++ } ++ } ++ } ++ /* These were allocated in DCD_GetObjectDef */ ++ if (objDef.objData.nodeObj.pstrCreatePhaseFxn) ++ MEM_Free(objDef.objData.nodeObj.pstrCreatePhaseFxn); ++ ++ if (objDef.objData.nodeObj.pstrExecutePhaseFxn) ++ MEM_Free(objDef.objData.nodeObj.pstrExecutePhaseFxn); ++ ++ if (objDef.objData.nodeObj.pstrDeletePhaseFxn) ++ MEM_Free(objDef.objData.nodeObj.pstrDeletePhaseFxn); ++ ++ if (objDef.objData.nodeObj.pstrIAlgName) ++ MEM_Free(objDef.objData.nodeObj.pstrIAlgName); ++ ++func_end: ++ return status; ++} ++ ++/* ++ * ======== AddOvlySect ======== ++ */ ++static DSP_STATUS AddOvlySect(struct NLDR_OBJECT *hNldr, ++ struct OvlySect **pList, ++ struct DBLL_SectInfo *pSectInfo, bool *pExists, ++ u32 addr, u32 nBytes) ++{ ++ struct OvlySect *pNewSect = NULL; ++ struct OvlySect *pLastSect; ++ struct OvlySect *pSect; ++ DSP_STATUS status = DSP_SOK; ++ ++ pSect = pLastSect = *pList; ++ *pExists = false; ++ while (pSect) { ++ /* ++ * Make sure section has not already been added. Multiple ++ * 'write' calls may be made to load the section. ++ */ ++ if (pSect->loadAddr == addr) { ++ /* Already added */ ++ *pExists = true; ++ break; ++ } ++ pLastSect = pSect; ++ pSect = pSect->pNextSect; ++ } ++ ++ if (!pSect) { ++ /* New section */ ++ pNewSect = MEM_Calloc(sizeof(struct OvlySect), MEM_PAGED); ++ if (pNewSect == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ pNewSect->loadAddr = addr; ++ pNewSect->runAddr = pSectInfo->runAddr + ++ (addr - pSectInfo->loadAddr); ++ pNewSect->size = nBytes; ++ pNewSect->page = pSectInfo->type; ++ } ++ ++ /* Add to the list */ ++ if (DSP_SUCCEEDED(status)) { ++ if (*pList == NULL) { ++ /* First in the list */ ++ *pList = pNewSect; ++ } else { ++ pLastSect->pNextSect = pNewSect; ++ } ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== fakeOvlyWrite ======== ++ */ ++static s32 fakeOvlyWrite(void *handle, u32 dspAddr, void *buf, u32 nBytes, ++ s32 mtype) ++{ ++ return (s32)nBytes; ++} ++ ++/* ++ * ======== FreeSects ======== ++ */ ++static void FreeSects(struct NLDR_OBJECT *hNldr, struct OvlySect *pPhaseSects, ++ u16 nAlloc) ++{ ++ struct OvlySect *pSect = pPhaseSects; ++ u16 i = 0; ++ bool fRet; ++ ++ while (pSect && i < nAlloc) { ++ /* 'Deallocate' */ ++ /* segid - page not supported yet */ ++ /* Reserved memory */ ++ fRet = RMM_free(hNldr->rmm, 0, pSect->runAddr, pSect->size, ++ true); ++ DBC_Assert(fRet); ++ pSect = pSect->pNextSect; ++ i++; ++ } ++} ++ ++/* ++ * ======== GetSymbolValue ======== ++ * Find symbol in library's base image. If not there, check dependent ++ * libraries. ++ */ ++static bool GetSymbolValue(void *handle, void *pArg, void *rmmHandle, ++ char *name, struct DBLL_Symbol **sym) ++{ ++ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)handle; ++ struct NLDR_NODEOBJECT *hNldrNode = (struct NLDR_NODEOBJECT *)rmmHandle; ++ struct LibNode *root = (struct LibNode *)pArg; ++ u16 i; ++ bool status = false; ++ ++ /* check the base image */ ++ status = hNldr->dbllFxns.getAddrFxn(hNldr->baseLib, name, sym); ++ if (!status) ++ status = hNldr->dbllFxns.getCAddrFxn(hNldr->baseLib, name, sym); ++ ++ /* ++ * Check in root lib itself. If the library consists of ++ * multiple object files linked together, some symbols in the ++ * library may need to be resolved. ++ */ ++ if (!status) { ++ status = hNldr->dbllFxns.getAddrFxn(root->lib, name, sym); ++ if (!status) { ++ status = ++ hNldr->dbllFxns.getCAddrFxn(root->lib, name, sym); ++ } ++ } ++ ++ /* ++ * Check in root lib's dependent libraries, but not dependent ++ * libraries' dependents. ++ */ ++ if (!status) { ++ for (i = 0; i < root->nDepLibs; i++) { ++ status = hNldr->dbllFxns.getAddrFxn(root->pDepLibs[i]. ++ lib, name, sym); ++ if (!status) { ++ status = hNldr->dbllFxns.getCAddrFxn(root-> ++ pDepLibs[i].lib, name, sym); ++ } ++ if (status) { ++ /* Symbol found */ ++ break; ++ } ++ } ++ } ++ /* ++ * Check in persistent libraries ++ */ ++ if (!status) { ++ for (i = 0; i < hNldrNode->nPersLib; i++) { ++ status = hNldr->dbllFxns.getAddrFxn(hNldrNode-> ++ persLib[i].lib, name, sym); ++ if (!status) { ++ status = hNldr->dbllFxns.getCAddrFxn ++ (hNldrNode->persLib[i].lib, name, sym); ++ } ++ if (status) { ++ /* Symbol found */ ++ break; ++ } ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== LoadLib ======== ++ * Recursively load library and all its dependent libraries. The library ++ * we're loading is specified by a uuid. ++ */ ++static DSP_STATUS LoadLib(struct NLDR_NODEOBJECT *hNldrNode, ++ struct LibNode *root, struct DSP_UUID uuid, ++ bool rootPersistent, struct DBLL_LibraryObj **libPath, ++ enum NLDR_PHASE phase, u16 depth) ++{ ++ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; ++ u16 nLibs = 0; /* Number of dependent libraries */ ++ u16 nPLibs = 0; /* Number of persistent libraries */ ++ u16 nLoaded = 0; /* Number of dep. libraries loaded */ ++ u16 i; ++ u32 entry; ++ u32 dwBufSize = NLDR_MAXPATHLENGTH; ++ DBLL_Flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC; ++ struct DBLL_Attrs newAttrs; ++ char *pszFileName = NULL; ++ struct DSP_UUID *depLibUUIDs = NULL; ++ bool *persistentDepLibs = NULL; ++ DSP_STATUS status = DSP_SOK; ++ bool fStatus = false; ++ struct LibNode *pDepLib; ++ ++ if (depth > MAXDEPTH) { ++ /* Error */ ++ DBC_Assert(false); ++ } ++ root->lib = NULL; ++ /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */ ++ pszFileName = MEM_Calloc(DBLL_MAXPATHLENGTH, MEM_PAGED); ++ if (pszFileName == NULL) ++ status = DSP_EMEMORY; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Get the name of the library */ ++ if (depth == 0) { ++ status = DCD_GetLibraryName(hNldrNode->pNldr->hDcdMgr, ++ &uuid, pszFileName, &dwBufSize, phase, ++ hNldrNode->pfPhaseSplit); ++ } else { ++ /* Dependent libraries are registered with a phase */ ++ status = DCD_GetLibraryName(hNldrNode->pNldr->hDcdMgr, ++ &uuid, pszFileName, &dwBufSize, NLDR_NOPHASE, ++ NULL); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Open the library, don't load symbols */ ++ status = hNldr->dbllFxns.openFxn(hNldr->dbll, pszFileName, ++ DBLL_NOLOAD, &root->lib); ++ } ++ /* Done with file name */ ++ if (pszFileName) ++ MEM_Free(pszFileName); ++ ++ /* Check to see if library not already loaded */ ++ if (DSP_SUCCEEDED(status) && rootPersistent) { ++ fStatus = findInPersistentLibArray(hNldrNode, root->lib); ++ /* Close library */ ++ if (fStatus) { ++ hNldr->dbllFxns.closeFxn(root->lib); ++ return DSP_SALREADYLOADED; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Check for circular dependencies. */ ++ for (i = 0; i < depth; i++) { ++ if (root->lib == libPath[i]) { ++ /* This condition could be checked by a ++ * tool at build time. */ ++ status = DSP_EDYNLOAD; ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Add library to current path in dependency tree */ ++ libPath[depth] = root->lib; ++ depth++; ++ /* Get number of dependent libraries */ ++ status = DCD_GetNumDepLibs(hNldrNode->pNldr->hDcdMgr, &uuid, ++ &nLibs, &nPLibs, phase); ++ } ++ DBC_Assert(nLibs >= nPLibs); ++ if (DSP_SUCCEEDED(status)) { ++ if (!(*hNldrNode->pfPhaseSplit)) ++ nPLibs = 0; ++ ++ /* nLibs = #of dependent libraries */ ++ root->nDepLibs = nLibs - nPLibs; ++ if (nLibs > 0) { ++ depLibUUIDs = MEM_Calloc(sizeof(struct DSP_UUID) * ++ nLibs, MEM_PAGED); ++ persistentDepLibs = ++ MEM_Calloc(sizeof(bool) * nLibs, MEM_PAGED); ++ if (!depLibUUIDs || !persistentDepLibs) ++ status = DSP_EMEMORY; ++ ++ if (root->nDepLibs > 0) { ++ /* Allocate arrays for dependent lib UUIDs, ++ * lib nodes */ ++ root->pDepLibs = MEM_Calloc ++ (sizeof(struct LibNode) * ++ (root->nDepLibs), MEM_PAGED); ++ if (!(root->pDepLibs)) ++ status = DSP_EMEMORY; ++ ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Get the dependent library UUIDs */ ++ status = DCD_GetDepLibs(hNldrNode->pNldr-> ++ hDcdMgr, &uuid, nLibs, depLibUUIDs, ++ persistentDepLibs, phase); ++ } ++ } ++ } ++ ++ /* ++ * Recursively load dependent libraries. ++ */ ++ if (DSP_SUCCEEDED(status) && persistentDepLibs) { ++ for (i = 0; i < nLibs; i++) { ++ /* If root library is NOT persistent, and dep library ++ * is, then record it. If root library IS persistent, ++ * the deplib is already included */ ++ if (!rootPersistent && persistentDepLibs[i] && ++ *hNldrNode->pfPhaseSplit) { ++ if ((hNldrNode->nPersLib) > MAXLIBS) { ++ status = DSP_EDYNLOAD; ++ break; ++ } ++ ++ /* Allocate library outside of phase */ ++ pDepLib = &hNldrNode->persLib[hNldrNode-> ++ nPersLib]; ++ } else { ++ if (rootPersistent) ++ persistentDepLibs[i] = true; ++ ++ ++ /* Allocate library within phase */ ++ pDepLib = &root->pDepLibs[nLoaded]; ++ } ++ ++ if (depLibUUIDs) { ++ status = LoadLib(hNldrNode, pDepLib, ++ depLibUUIDs[i], ++ persistentDepLibs[i], libPath, ++ phase, ++ depth); ++ } else { ++ status = DSP_EMEMORY; ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ if ((status != DSP_SALREADYLOADED) && ++ !rootPersistent && persistentDepLibs[i] && ++ *hNldrNode->pfPhaseSplit) { ++ (hNldrNode->nPersLib)++; ++ } else { ++ if (!persistentDepLibs[i] || ++ !(*hNldrNode->pfPhaseSplit)) { ++ nLoaded++; ++ } ++ } ++ } else { ++ break; ++ } ++ } ++ } ++ ++ /* Now we can load the root library */ ++ if (DSP_SUCCEEDED(status)) { ++ newAttrs = hNldr->dbllAttrs; ++ newAttrs.symArg = root; ++ newAttrs.rmmHandle = hNldrNode; ++ newAttrs.wHandle = hNldrNode->pPrivRef; ++ newAttrs.baseImage = false; ++ ++ status = hNldr->dbllFxns.loadFxn(root->lib, flags, &newAttrs, ++ &entry); ++ } ++ ++ /* ++ * In case of failure, unload any dependent libraries that ++ * were loaded, and close the root library. ++ * (Persistent libraries are unloaded from the very top) ++ */ ++ if (DSP_FAILED(status)) { ++ if (phase != NLDR_EXECUTE) { ++ for (i = 0; i < hNldrNode->nPersLib; i++) ++ UnloadLib(hNldrNode, &hNldrNode->persLib[i]); ++ ++ hNldrNode->nPersLib = 0; ++ } ++ for (i = 0; i < nLoaded; i++) ++ UnloadLib(hNldrNode, &root->pDepLibs[i]); ++ ++ if (root->lib) ++ hNldr->dbllFxns.closeFxn(root->lib); ++ ++ } ++ ++ /* Going up one node in the dependency tree */ ++ depth--; ++ ++ if (depLibUUIDs) { ++ MEM_Free(depLibUUIDs); ++ depLibUUIDs = NULL; ++ } ++ ++ if (persistentDepLibs) { ++ MEM_Free(persistentDepLibs); ++ persistentDepLibs = NULL; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== LoadOvly ======== ++ */ ++static DSP_STATUS LoadOvly(struct NLDR_NODEOBJECT *hNldrNode, ++ enum NLDR_PHASE phase) ++{ ++ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; ++ struct OvlyNode *pONode = NULL; ++ struct OvlySect *pPhaseSects = NULL; ++ struct OvlySect *pOtherSects = NULL; ++ u16 i; ++ u16 nAlloc = 0; ++ u16 nOtherAlloc = 0; ++ u16 *pRefCount = NULL; ++ u16 *pOtherRef = NULL; ++ u32 nBytes; ++ struct OvlySect *pSect; ++ DSP_STATUS status = DSP_SOK; ++ ++ /* Find the node in the table */ ++ for (i = 0; i < hNldr->nOvlyNodes; i++) { ++ if (IsEqualUUID(hNldrNode->uuid, hNldr->ovlyTable[i].uuid)) { ++ /* Found it */ ++ pONode = &(hNldr->ovlyTable[i]); ++ break; ++ } ++ } ++ ++ DBC_Assert(i < hNldr->nOvlyNodes); ++ switch (phase) { ++ case NLDR_CREATE: ++ pRefCount = &(pONode->createRef); ++ pOtherRef = &(pONode->otherRef); ++ pPhaseSects = pONode->pCreateSects; ++ pOtherSects = pONode->pOtherSects; ++ break; ++ ++ case NLDR_EXECUTE: ++ pRefCount = &(pONode->executeRef); ++ pPhaseSects = pONode->pExecuteSects; ++ break; ++ ++ case NLDR_DELETE: ++ pRefCount = &(pONode->deleteRef); ++ pPhaseSects = pONode->pDeleteSects; ++ break; ++ ++ default: ++ DBC_Assert(false); ++ break; ++ } ++ ++ DBC_Assert(pRefCount != NULL); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (pRefCount == NULL) ++ goto func_end; ++ ++ if (*pRefCount != 0) ++ goto func_end; ++ ++ /* 'Allocate' memory for overlay sections of this phase */ ++ pSect = pPhaseSects; ++ while (pSect) { ++ /* allocate */ /* page not supported yet */ ++ /* reserve */ /* align */ ++ status = RMM_alloc(hNldr->rmm, 0, pSect->size, 0, ++ &(pSect->runAddr), true); ++ if (DSP_SUCCEEDED(status)) { ++ pSect = pSect->pNextSect; ++ nAlloc++; ++ } else { ++ break; ++ } ++ } ++ if (pOtherRef && *pOtherRef == 0) { ++ /* 'Allocate' memory for other overlay sections ++ * (create phase) */ ++ if (DSP_SUCCEEDED(status)) { ++ pSect = pOtherSects; ++ while (pSect) { ++ /* page not supported */ /* align */ ++ /* reserve */ ++ status = RMM_alloc(hNldr->rmm, 0, pSect->size, ++ 0, &(pSect->runAddr), true); ++ if (DSP_SUCCEEDED(status)) { ++ pSect = pSect->pNextSect; ++ nOtherAlloc++; ++ } else { ++ break; ++ } ++ } ++ } ++ } ++ if (*pRefCount == 0) { ++ if (DSP_SUCCEEDED(status)) { ++ /* Load sections for this phase */ ++ pSect = pPhaseSects; ++ while (pSect && DSP_SUCCEEDED(status)) { ++ nBytes = (*hNldr->ovlyFxn)(hNldrNode->pPrivRef, ++ pSect->runAddr, pSect->loadAddr, ++ pSect->size, pSect->page); ++ if (nBytes != pSect->size) ++ status = DSP_EFAIL; ++ ++ pSect = pSect->pNextSect; ++ } ++ } ++ } ++ if (pOtherRef && *pOtherRef == 0) { ++ if (DSP_SUCCEEDED(status)) { ++ /* Load other sections (create phase) */ ++ pSect = pOtherSects; ++ while (pSect && DSP_SUCCEEDED(status)) { ++ nBytes = (*hNldr->ovlyFxn)(hNldrNode->pPrivRef, ++ pSect->runAddr, pSect->loadAddr, ++ pSect->size, pSect->page); ++ if (nBytes != pSect->size) ++ status = DSP_EFAIL; ++ ++ pSect = pSect->pNextSect; ++ } ++ } ++ } ++ if (DSP_FAILED(status)) { ++ /* 'Deallocate' memory */ ++ FreeSects(hNldr, pPhaseSects, nAlloc); ++ FreeSects(hNldr, pOtherSects, nOtherAlloc); ++ } ++func_end: ++ if (DSP_SUCCEEDED(status) && (pRefCount != NULL)) { ++ *pRefCount += 1; ++ if (pOtherRef) ++ *pOtherRef += 1; ++ ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== RemoteAlloc ======== ++ */ ++static DSP_STATUS RemoteAlloc(void **pRef, u16 space, u32 size, ++ u32 align, u32 *dspAddr, ++ OPTIONAL s32 segmentId, OPTIONAL s32 req, ++ bool reserve) ++{ ++ struct NLDR_NODEOBJECT *hNode = (struct NLDR_NODEOBJECT *)pRef; ++ struct NLDR_OBJECT *hNldr; ++ struct RMM_TargetObj *rmm; ++ u16 memPhaseBit = MAXFLAGS; ++ u16 segid = 0; ++ u16 i; ++ u16 memType; ++ u32 nWords; ++ struct RMM_Addr *pRmmAddr = (struct RMM_Addr *)dspAddr; ++ bool fReq = false; ++ DSP_STATUS status = DSP_EMEMORY; /* Set to fail */ ++ DBC_Require(MEM_IsValidHandle(hNode, NLDR_NODESIGNATURE)); ++ DBC_Require(space == DBLL_CODE || space == DBLL_DATA || ++ space == DBLL_BSS); ++ hNldr = hNode->pNldr; ++ rmm = hNldr->rmm; ++ /* Convert size to DSP words */ ++ nWords = (size + hNldr->usDSPWordSize - 1) / hNldr->usDSPWordSize; ++ /* Modify memory 'align' to account for DSP cache line size */ ++ align = findLcm(GEM_CACHE_LINE_SIZE, align); ++ GT_1trace(NLDR_debugMask, GT_7CLASS, ++ "RemoteAlloc: memory align to 0x%x \n", align); ++ if (segmentId != -1) { ++ pRmmAddr->segid = segmentId; ++ segid = segmentId; ++ fReq = req; ++ } else { ++ switch (hNode->phase) { ++ case NLDR_CREATE: ++ memPhaseBit = CREATEDATAFLAGBIT; ++ break; ++ case NLDR_DELETE: ++ memPhaseBit = DELETEDATAFLAGBIT; ++ break; ++ case NLDR_EXECUTE: ++ memPhaseBit = EXECUTEDATAFLAGBIT; ++ break; ++ default: ++ DBC_Assert(false); ++ break; ++ } ++ if (space == DBLL_CODE) ++ memPhaseBit++; ++ ++ if (memPhaseBit < MAXFLAGS) ++ segid = hNode->segId[memPhaseBit]; ++ ++ /* Determine if there is a memory loading requirement */ ++ if ((hNode->codeDataFlagMask >> memPhaseBit) & 0x1) ++ fReq = true; ++ ++ } ++ memType = (space == DBLL_CODE) ? DYNM_CODE : DYNM_DATA; ++ ++ /* Find an appropriate segment based on space */ ++ if (segid == NULLID) { ++ /* No memory requirements of preferences */ ++ DBC_Assert(!fReq); ++ goto func_cont; ++ } ++ if (segid <= MAXSEGID) { ++ DBC_Assert(segid < hNldr->nSegs); ++ /* Attempt to allocate from segid first. */ ++ pRmmAddr->segid = segid; ++ status = RMM_alloc(rmm, segid, nWords, align, dspAddr, false); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NLDR_debugMask, GT_6CLASS, ++ "RemoteAlloc:Unable allocate " ++ "from segment %d.\n", segid); ++ } ++ } else { ++ /* segid > MAXSEGID ==> Internal or external memory */ ++ DBC_Assert(segid == MEMINTERNALID || segid == MEMEXTERNALID); ++ /* Check for any internal or external memory segment, ++ * depending on segid.*/ ++ memType |= segid == MEMINTERNALID ? ++ DYNM_INTERNAL : DYNM_EXTERNAL; ++ for (i = 0; i < hNldr->nSegs; i++) { ++ if ((hNldr->segTable[i] & memType) != memType) ++ continue; ++ ++ status = RMM_alloc(rmm, i, nWords, align, dspAddr, ++ false); ++ if (DSP_SUCCEEDED(status)) { ++ /* Save segid for freeing later */ ++ pRmmAddr->segid = i; ++ break; ++ } ++ } ++ } ++func_cont: ++ /* Haven't found memory yet, attempt to find any segment that works */ ++ if (status == DSP_EMEMORY && !fReq) { ++ GT_0trace(NLDR_debugMask, GT_6CLASS, ++ "RemoteAlloc: Preferred segment " ++ "unavailable, trying another segment.\n"); ++ for (i = 0; i < hNldr->nSegs; i++) { ++ /* All bits of memType must be set */ ++ if ((hNldr->segTable[i] & memType) != memType) ++ continue; ++ ++ status = RMM_alloc(rmm, i, nWords, align, dspAddr, ++ false); ++ if (DSP_SUCCEEDED(status)) { ++ /* Save segid */ ++ pRmmAddr->segid = i; ++ break; ++ } ++ } ++ } ++ ++ return status; ++} ++ ++static DSP_STATUS RemoteFree(void **pRef, u16 space, u32 dspAddr, ++ u32 size, bool reserve) ++{ ++ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)pRef; ++ struct RMM_TargetObj *rmm; ++ u32 nWords; ++ DSP_STATUS status = DSP_EMEMORY; /* Set to fail */ ++ ++ DBC_Require(MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); ++ ++ rmm = hNldr->rmm; ++ ++ /* Convert size to DSP words */ ++ nWords = (size + hNldr->usDSPWordSize - 1) / hNldr->usDSPWordSize; ++ ++ if (RMM_free(rmm, space, dspAddr, nWords, reserve)) ++ status = DSP_SOK; ++ ++ return status; ++} ++ ++/* ++ * ======== UnloadLib ======== ++ */ ++static void UnloadLib(struct NLDR_NODEOBJECT *hNldrNode, struct LibNode *root) ++{ ++ struct DBLL_Attrs newAttrs; ++ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; ++ u16 i; ++ ++ DBC_Assert(root != NULL); ++ ++ /* Unload dependent libraries */ ++ for (i = 0; i < root->nDepLibs; i++) ++ UnloadLib(hNldrNode, &root->pDepLibs[i]); ++ ++ root->nDepLibs = 0; ++ ++ newAttrs = hNldr->dbllAttrs; ++ newAttrs.rmmHandle = hNldr->rmm; ++ newAttrs.wHandle = hNldrNode->pPrivRef; ++ newAttrs.baseImage = false; ++ newAttrs.symArg = root; ++ ++ if (root->lib) { ++ /* Unload the root library */ ++ hNldr->dbllFxns.unloadFxn(root->lib, &newAttrs); ++ hNldr->dbllFxns.closeFxn(root->lib); ++ } ++ ++ /* Free dependent library list */ ++ if (root->pDepLibs) { ++ MEM_Free(root->pDepLibs); ++ root->pDepLibs = NULL; ++ } ++} ++ ++/* ++ * ======== UnloadOvly ======== ++ */ ++static void UnloadOvly(struct NLDR_NODEOBJECT *hNldrNode, enum NLDR_PHASE phase) ++{ ++ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; ++ struct OvlyNode *pONode = NULL; ++ struct OvlySect *pPhaseSects = NULL; ++ struct OvlySect *pOtherSects = NULL; ++ u16 i; ++ u16 nAlloc = 0; ++ u16 nOtherAlloc = 0; ++ u16 *pRefCount = NULL; ++ u16 *pOtherRef = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ /* Find the node in the table */ ++ for (i = 0; i < hNldr->nOvlyNodes; i++) { ++ if (IsEqualUUID(hNldrNode->uuid, hNldr->ovlyTable[i].uuid)) { ++ /* Found it */ ++ pONode = &(hNldr->ovlyTable[i]); ++ break; ++ } ++ } ++ ++ DBC_Assert(i < hNldr->nOvlyNodes); ++ switch (phase) { ++ case NLDR_CREATE: ++ pRefCount = &(pONode->createRef); ++ pPhaseSects = pONode->pCreateSects; ++ nAlloc = pONode->nCreateSects; ++ break; ++ case NLDR_EXECUTE: ++ pRefCount = &(pONode->executeRef); ++ pPhaseSects = pONode->pExecuteSects; ++ nAlloc = pONode->nExecuteSects; ++ break; ++ case NLDR_DELETE: ++ pRefCount = &(pONode->deleteRef); ++ pOtherRef = &(pONode->otherRef); ++ pPhaseSects = pONode->pDeleteSects; ++ /* 'Other' overlay sections are unloaded in the delete phase */ ++ pOtherSects = pONode->pOtherSects; ++ nAlloc = pONode->nDeleteSects; ++ nOtherAlloc = pONode->nOtherSects; ++ break; ++ default: ++ DBC_Assert(false); ++ break; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ DBC_Assert(pRefCount && (*pRefCount > 0)); ++ if (pRefCount && (*pRefCount > 0)) { ++ *pRefCount -= 1; ++ if (pOtherRef) { ++ DBC_Assert(*pOtherRef > 0); ++ *pOtherRef -= 1; ++ } ++ } ++ } ++ if (pRefCount && (*pRefCount == 0)) { ++ /* 'Deallocate' memory */ ++ FreeSects(hNldr, pPhaseSects, nAlloc); ++ } ++ if (pOtherRef && *pOtherRef == 0) ++ FreeSects(hNldr, pOtherSects, nOtherAlloc); ++ ++} ++ ++/* ++ * ======== findInPersistentLibArray ======== ++ */ ++static bool findInPersistentLibArray(struct NLDR_NODEOBJECT *hNldrNode, ++ struct DBLL_LibraryObj *lib) ++{ ++ s32 i = 0; ++ ++ for (i = 0; i < hNldrNode->nPersLib; i++) { ++ if (lib == hNldrNode->persLib[i].lib) ++ return true; ++ ++ } ++ ++ return false; ++} ++ ++/* ++ * ================ Find LCM (Least Common Multiplier === ++ */ ++static u32 findLcm(u32 a, u32 b) ++{ ++ u32 retVal; ++ ++ retVal = a * b / findGcf(a, b); ++ ++ return retVal; ++} ++ ++/* ++ * ================ Find GCF (Greatest Common Factor ) === ++ */ ++static u32 findGcf(u32 a, u32 b) ++{ ++ u32 c; ++ ++ /* Get the GCF (Greatest common factor between the numbers, ++ * using Euclidian Algo */ ++ while ((c = (a % b))) { ++ a = b; ++ b = c; ++ } ++ return b; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/node.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/node.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/node.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/node.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,3504 @@ ++/* ++ * node.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== node.c ======== ++ * ++ * Description: ++ * DSP/BIOS Bridge Node Manager. ++ * ++ * Public Functions: ++ * NODE_Allocate ++ * NODE_AllocMsgBuf ++ * NODE_ChangePriority ++ * NODE_Connect ++ * NODE_Create ++ * NODE_CreateMgr ++ * NODE_Delete ++ * NODE_DeleteMgr ++ * NODE_EnumNodes ++ * NODE_Exit ++ * NODE_FreeMsgBuf ++ * NODE_GetAttr ++ * NODE_GetChannelId ++ * NODE_GetMessage ++ * NODE_GetStrmMgr ++ * NODE_Init ++ * NODE_OnExit ++ * NODE_Pause ++ * NODE_PutMessage ++ * NODE_RegisterNotify ++ * NODE_Run ++ * NODE_Terminate ++ * ++ *! Revision History: ++ *! ================= ++ *! 12-Apr-2004 hp Compile IVA only for 24xx ++ *! 09-Feb-2004 vp Updated to support IVA. ++ *! 07-Apr-2003 map Eliminated references to old DLDR ++ *! 26-Mar-2003 vp Commented the call to DSP deep sleep in Node_Delete ++ *! function. ++ *! 18-Feb-2003 vp Code review updates. ++ *! 06-Feb-2003 kc Fixed FreeStream to release streams correctly. ++ *! 23-Jan-2003 map Removed call to DISP_DoCinit within Write() ++ *! 03-Jan-2003 map Only unload code after phase has executed if ++ *! overlay or split dynload phases ++ *! 18-Oct-2002 vp Ported to Linux platform. ++ *! 06-Nov-2002 map Fixed NODE_Run on NODE_PAUSED bug ++ *! 12-Oct-2002 map Fixed DeleteNode bug in NODE_Create ++ *! 11-Sep-2002 rr DeleteNode frees the memory for strmConnect and dcd obj ++ *! 29-Aug-2002 map Modified Ovly and Write to use ARM-side copy ++ *! 22-May-2002 sg Changed use of cbData for PWR calls. ++ *! 17-May-2002 jeh Removed LoadLoaderFxns(). Get address of RMS_cinit() ++ *! function. Call DISP_DoCinit() from Write(), if .cinit. ++ *! 13-May-2002 sg Added timeout to wake/sleep calls. ++ *! 02-May-2002 sg Added wake/sleep of DSP to support "nap" mode. ++ *! 18-Apr-2002 jeh Use dynamic loader if compile flag is set. ++ *! 13-Feb-2002 jeh Get uSysStackSize from DSP_NDBPROPS. ++ *! 07-Jan-2002 ag STRMMODE_ZEROCOPY(shared memory buffer swap) enabled. ++ *! 17-Dec-2001 ag STRMMODE_RDMA(DDMA) enabled. ++ *! 12-Dec-2001 ag Check for valid stream mode in NODE_Connect(). ++ *! 04-Dec-2001 jeh Check for node sufficiently connected in NODE_Create(). ++ *! 15-Nov-2001 jeh Removed DBC_Require(pNode->hXlator != NULL) from ++ *! NODE_AllocMsgBuf(), and check node type != NODE_DEVICE. ++ *! 11-Sep-2001 ag Zero-copy messaging support. ++ *! 28-Aug-2001 jeh Overlay/dynamic loader infrastructure added. Removed ++ *! NODE_GetDispatcher, excess node states. ++ *! 07-Aug-2001 jeh Removed critical section for dispatcher. ++ *! 26-Jul-2001 jeh Get ZL dll name through CFG. ++ *! 05-Jun-2001 jeh Assume DSP_STRMATTRS.uBufsize in GPP bytes. ++ *! 11-May-2001 jeh Some code review cleanup. ++ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. ++ *! 15-Dec-2000 sg Convert IALG_Fxn address from byte addr to word addr. ++ *! 04-Dec-2000 jeh Call MSG Get and Put functions. ++ *! 04-Dec-2000 ag Added SM support for node messaging. ++ *! 10-Nov-2000 rr: NODE_MIN/MAX Priority is defined in dspdefs.h. ++ *! 27-Oct-2000 jeh Added NODE_AllocMsgBuf(), NODE_FreeMsgBuf(). ++ *! 11-Oct-2000 jeh Changed NODE_EnumNodeInfo to NODE_EnumNodes. Added ++ *! NODE_CloseOrphans(). Remove NODE_RegisterNotifyAllNodes ++ *! 19-Jun-2000 jeh Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Link Driver */ ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++#ifdef DEBUG ++#include ++#include ++#endif ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++/* Static/Dynamic Loader includes */ ++#include ++#include ++ ++#ifndef RES_CLEANUP_DISABLE ++#include ++#include ++#include ++#include ++#endif ++ ++ ++#define NODE_SIGNATURE 0x45444f4e /* "EDON" */ ++#define NODEMGR_SIGNATURE 0x52474d4e /* "RGMN" */ ++ ++#define HOSTPREFIX "/host" ++#define PIPEPREFIX "/dbpipe" ++ ++#define MaxInputs(h) ((h)->dcdProps.objData.nodeObj.ndbProps.uNumInputStreams) ++#define MaxOutputs(h) ((h)->dcdProps.objData.nodeObj.ndbProps.uNumOutputStreams) ++ ++#define NODE_GetPriority(h) ((h)->nPriority) ++#define NODE_SetPriority(hNode, nPriority) ((hNode)->nPriority = nPriority) ++#define NODE_SetState(hNode, state) ((hNode)->nState = state) ++ ++#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */ ++#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */ ++ ++#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN) ++#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN) ++ ++#define MAXDEVNAMELEN 32 /* DSP_NDBPROPS.acName size */ ++#define CREATEPHASE 1 ++#define EXECUTEPHASE 2 ++#define DELETEPHASE 3 ++ ++/* Define default STRM parameters */ ++/* ++ * TBD: Put in header file, make global DSP_STRMATTRS with defaults, ++ * or make defaults configurable. ++ */ ++#define DEFAULTBUFSIZE 32 ++#define DEFAULTNBUFS 2 ++#define DEFAULTSEGID 0 ++#define DEFAULTALIGNMENT 0 ++#define DEFAULTTIMEOUT 10000 ++ ++#define RMSQUERYSERVER 0 ++#define RMSCONFIGURESERVER 1 ++#define RMSCREATENODE 2 ++#define RMSEXECUTENODE 3 ++#define RMSDELETENODE 4 ++#define RMSCHANGENODEPRIORITY 5 ++#define RMSREADMEMORY 6 ++#define RMSWRITEMEMORY 7 ++#define RMSCOPY 8 ++#define MAXTIMEOUT 2000 ++ ++#define NUMRMSFXNS 9 ++ ++#define PWR_TIMEOUT 500 /* default PWR timeout in msec */ ++ ++#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Address */ ++ ++/* ++ * ======== NODE_MGR ======== ++ */ ++struct NODE_MGR { ++ u32 dwSignature; /* For object validation */ ++ struct DEV_OBJECT *hDevObject; /* Device object */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ ++ struct DCD_MANAGER *hDcdMgr; /* Proc/Node data manager */ ++ struct DISP_OBJECT *hDisp; /* Node dispatcher */ ++ struct LST_LIST *nodeList; /* List of all allocated nodes */ ++ u32 uNumNodes; /* Number of nodes in nodeList */ ++ u32 uNumCreated; /* Number of nodes *created* on DSP */ ++ struct GB_TMap *pipeMap; /* Pipe connection bit map */ ++ struct GB_TMap *pipeDoneMap; /* Pipes that are half free */ ++ struct GB_TMap *chnlMap; /* Channel allocation bit map */ ++ struct GB_TMap *dmaChnlMap; /* DMA Channel allocation bit map */ ++ struct GB_TMap *zChnlMap; /* Zero-Copy Channel alloc bit map */ ++ struct NTFY_OBJECT *hNtfy; /* Manages registered notifications */ ++ struct SYNC_CSOBJECT *hSync; /* For critical sections */ ++ u32 ulFxnAddrs[NUMRMSFXNS]; /* RMS function addresses */ ++ struct MSG_MGR *hMsg; ++ ++ /* Processor properties needed by Node Dispatcher */ ++ u32 ulNumChnls; /* Total number of channels */ ++ u32 ulChnlOffset; /* Offset of chnl ids rsvd for RMS */ ++ u32 ulChnlBufSize; /* Buffer size for data to RMS */ ++ DSP_PROCFAMILY procFamily; /* eg, 5000 */ ++ DSP_PROCTYPE procType; /* eg, 5510 */ ++ u32 uDSPWordSize; /* Size of DSP word on host bytes */ ++ u32 uDSPDataMauSize; /* Size of DSP data MAU */ ++ u32 uDSPMauSize; /* Size of MAU */ ++ s32 nMinPri; /* Minimum runtime priority for node */ ++ s32 nMaxPri; /* Maximum runtime priority for node */ ++ ++ struct STRM_MGR *hStrmMgr; /* STRM manager */ ++ ++ /* Loader properties */ ++ struct NLDR_OBJECT *hNldr; /* Handle to loader */ ++ struct NLDR_FXNS nldrFxns; /* Handle to loader functions */ ++ bool fLoaderInit; /* Loader Init function succeeded? */ ++}; ++ ++/* ++ * ======== CONNECTTYPE ======== ++ */ ++enum CONNECTTYPE { ++ NOTCONNECTED = 0, ++ NODECONNECT, ++ HOSTCONNECT, ++ DEVICECONNECT, ++} ; ++ ++/* ++ * ======== STREAM ======== ++ */ ++struct STREAM { ++ enum CONNECTTYPE type; /* Type of stream connection */ ++ u32 devId; /* pipe or channel id */ ++}; ++ ++/* ++ * ======== NODE_OBJECT ======== ++ */ ++struct NODE_OBJECT { ++ struct LST_ELEM listElem; ++ u32 dwSignature; /* For object validation */ ++ struct NODE_MGR *hNodeMgr; /* The manager of this node */ ++ struct PROC_OBJECT *hProcessor; /* Back pointer to processor */ ++ struct DSP_UUID nodeId; /* Node's ID */ ++ s32 nPriority; /* Node's current priority */ ++ u32 uTimeout; /* Timeout for blocking NODE calls */ ++ u32 uHeapSize; /* Heap Size */ ++ u32 uDSPHeapVirtAddr; /* Heap Size */ ++ u32 uGPPHeapVirtAddr; /* Heap Size */ ++ enum NODE_TYPE nType; /* Type of node: message, task, etc */ ++ enum NODE_STATE nState; /* NODE_ALLOCATED, NODE_CREATED, ... */ ++ u32 uNumInputs; /* Current number of inputs */ ++ u32 uNumOutputs; /* Current number of outputs */ ++ u32 uMaxInputIndex; /* Current max input stream index */ ++ u32 uMaxOutputIndex; /* Current max output stream index */ ++ struct STREAM *inputs; /* Node's input streams */ ++ struct STREAM *outputs; /* Node's output streams */ ++ struct NODE_CREATEARGS createArgs; /* Args for node create function */ ++ NODE_ENV nodeEnv; /* Environment returned by RMS */ ++ struct DCD_GENERICOBJ dcdProps; /* Node properties from DCD */ ++ struct DSP_CBDATA *pArgs; /* Optional args to pass to node */ ++ struct NTFY_OBJECT *hNtfy; /* Manages registered notifications */ ++ char *pstrDevName; /* device name, if device node */ ++ struct SYNC_OBJECT *hSyncDone; /* Synchronize NODE_Terminate */ ++ s32 nExitStatus; /* execute function return status */ ++ ++ /* Information needed for NODE_GetAttr() */ ++ DSP_HNODE hDeviceOwner; /* If dev node, task that owns it */ ++ u32 uNumGPPInputs; /* Current # of from GPP streams */ ++ u32 uNumGPPOutputs; /* Current # of to GPP streams */ ++ /* Current stream connections */ ++ struct DSP_STREAMCONNECT *streamConnect; ++ ++ /* Message queue */ ++ struct MSG_QUEUE *hMsgQueue; ++ ++ /* These fields used for SM messaging */ ++ struct CMM_XLATOROBJECT *hXlator; /* Node's SM address translator */ ++ ++ /* Handle to pass to dynamic loader */ ++ struct NLDR_NODEOBJECT *hNldrNode; ++ bool fLoaded; /* Code is (dynamically) loaded */ ++ bool fPhaseSplit; /* Phases split in many libs or ovly */ ++ ++} ; ++ ++/* Default buffer attributes */ ++static struct DSP_BUFFERATTR NODE_DFLTBUFATTRS = { ++ 0, /* cbStruct */ ++ 1, /* uSegment */ ++ 0, /* uAlignment */ ++}; ++ ++static void DeleteNode(struct NODE_OBJECT *hNode, ++ struct PROCESS_CONTEXT *pr_ctxt); ++static void DeleteNodeMgr(struct NODE_MGR *hNodeMgr); ++static void FillStreamConnect(struct NODE_OBJECT *hNode1, ++ struct NODE_OBJECT *hNode2, u32 uStream1, ++ u32 uStream2); ++static void FillStreamDef(struct NODE_OBJECT *hNode, ++ struct NODE_STRMDEF *pstrmDef, ++ struct DSP_STRMATTR *pAttrs); ++static void FreeStream(struct NODE_MGR *hNodeMgr, struct STREAM stream); ++static DSP_STATUS GetFxnAddress(struct NODE_OBJECT *hNode, u32 *pulFxnAddr, ++ u32 uPhase); ++static DSP_STATUS GetNodeProps(struct DCD_MANAGER *hDcdMgr, ++ struct NODE_OBJECT *hNode, ++ CONST struct DSP_UUID *pNodeId, ++ struct DCD_GENERICOBJ *pdcdProps); ++static DSP_STATUS GetProcProps(struct NODE_MGR *hNodeMgr, ++ struct DEV_OBJECT *hDevObject); ++static DSP_STATUS GetRMSFxns(struct NODE_MGR *hNodeMgr); ++static u32 Ovly(void *pPrivRef, u32 ulDspRunAddr, u32 ulDspLoadAddr, ++ u32 ulNumBytes, u32 nMemSpace); ++static u32 Write(void *pPrivRef, u32 ulDspAddr, void *pBuf, ++ u32 ulNumBytes, u32 nMemSpace); ++ ++#if GT_TRACE ++static struct GT_Mask NODE_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++#ifdef DSP_DMM_DEBUG ++extern u32 DMM_MemMapDump(struct DMM_OBJECT *hDmmMgr); ++#endif ++ ++static u32 cRefs; /* module reference count */ ++ ++/* Dynamic loader functions. */ ++static struct NLDR_FXNS nldrFxns = { ++ NLDR_Allocate, ++ NLDR_Create, ++ NLDR_Delete, ++ NLDR_Exit, ++ NLDR_Free, ++ NLDR_GetFxnAddr, ++ NLDR_Init, ++ NLDR_Load, ++ NLDR_Unload, ++}; ++ ++enum NODE_STATE NODE_GetState(HANDLE hNode) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ if (!MEM_IsValidHandle(pNode, NODE_SIGNATURE)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_GetState:hNode 0x%x\n", pNode); ++ return -1; ++ } else ++ return pNode->nState; ++ ++} ++ ++/* ++ * ======== NODE_Allocate ======== ++ * Purpose: ++ * Allocate GPP resources to manage a node on the DSP. ++ */ ++DSP_STATUS NODE_Allocate(struct PROC_OBJECT *hProcessor, ++ IN CONST struct DSP_UUID *pNodeId, ++ OPTIONAL IN CONST struct DSP_CBDATA *pArgs, ++ OPTIONAL IN CONST struct DSP_NODEATTRIN *pAttrIn, ++ OUT struct NODE_OBJECT **phNode, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ struct NODE_MGR *hNodeMgr; ++ struct DEV_OBJECT *hDevObject; ++ struct NODE_OBJECT *pNode = NULL; ++ enum NODE_TYPE nodeType = NODE_TASK; ++ struct NODE_MSGARGS *pmsgArgs; ++ struct NODE_TASKARGS *ptaskArgs; ++ u32 uNumStreams; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status = DSP_SOK; ++ struct CMM_OBJECT *hCmmMgr = NULL; /* Shared memory manager hndl */ ++ u32 procId; ++ char *label; ++ u32 pulValue; ++ u32 dynextBase; ++ u32 offSet = 0; ++ u32 ulStackSegAddr, ulStackSegVal; ++ u32 ulGppMemBase; ++ struct CFG_HOSTRES hostRes; ++ u32 pMappedAddr = 0; ++ u32 mapAttrs = 0x0; ++ struct DSP_PROCESSORSTATE procStatus; ++#ifdef DSP_DMM_DEBUG ++ struct DMM_OBJECT *hDmmMgr; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++#endif ++ ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE nodeRes; ++#endif ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(hProcessor != NULL); ++ DBC_Require(phNode != NULL); ++ DBC_Require(pNodeId != NULL); ++ ++ GT_5trace(NODE_debugMask, GT_ENTER, "NODE_Allocate: \thProcessor: " ++ "0x%x\tpNodeId: 0x%x\tpArgs: 0x%x\tpAttrIn: " ++ "0x%x\tphNode: 0x%x\n", hProcessor, pNodeId, pArgs, pAttrIn, ++ phNode); ++ ++ *phNode = NULL; ++ ++ status = PROC_GetProcessorId(hProcessor, &procId); ++ ++ status = PROC_GetDevObject(hProcessor, &hDevObject); ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetNodeManager(hDevObject, &hNodeMgr); ++ if (hNodeMgr == NULL) ++ status = DSP_EFAIL; ++ ++ } ++ if (procId != DSP_UNIT) ++ goto func_cont; ++ ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* If processor is in error state then don't attempt ++ to send the message */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: proc Status 0x%x\n", ++ procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ ++ /* Assuming that 0 is not a valid function address */ ++ if (hNodeMgr->ulFxnAddrs[0] == 0) { ++ /* No RMS on target - we currently can't handle this */ ++ GT_0trace(NODE_debugMask, GT_5CLASS, "No RMS functions in base " ++ "image. Node allocation fails.\n"); ++ status = DSP_EFAIL; ++ } else { ++ /* Validate pAttrIn fields, if non-NULL */ ++ if (pAttrIn) { ++ /* Check if pAttrIn->iPriority is within range */ ++ if (pAttrIn->iPriority < hNodeMgr->nMinPri || ++ pAttrIn->iPriority > hNodeMgr->nMaxPri) ++ status = DSP_ERANGE; ++ } ++ } ++func_cont: ++ /* Allocate node object and fill in */ ++ if (DSP_FAILED(status)) ++ goto func_cont2; ++ ++ MEM_AllocObject(pNode, struct NODE_OBJECT, NODE_SIGNATURE); ++ if (pNode == NULL) { ++ status = DSP_EMEMORY; ++ goto func_cont1; ++ } ++ pNode->hNodeMgr = hNodeMgr; ++ /* This critical section protects GetNodeProps */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (procId != DSP_UNIT) ++ goto func_cont3; ++ ++ /* Get DSP_NDBPROPS from node database */ ++ status = GetNodeProps(hNodeMgr->hDcdMgr, pNode, pNodeId, ++ &(pNode->dcdProps)); ++ if (DSP_FAILED(status)) ++ goto func_cont3; ++ ++ pNode->nodeId = *pNodeId; ++ pNode->hProcessor = hProcessor; ++ pNode->nType = pNode->dcdProps.objData.nodeObj.ndbProps.uNodeType; ++ pNode->uTimeout = pNode->dcdProps.objData.nodeObj.ndbProps.uTimeout; ++ pNode->nPriority = pNode->dcdProps.objData.nodeObj.ndbProps.iPriority; ++ ++ /* Currently only C64 DSP builds support Node Dynamic * heaps */ ++ /* Allocate memory for node heap */ ++ pNode->createArgs.asa.taskArgs.uHeapSize = 0; ++ pNode->createArgs.asa.taskArgs.uDSPHeapAddr = 0; ++ pNode->createArgs.asa.taskArgs.uDSPHeapResAddr = 0; ++ pNode->createArgs.asa.taskArgs.uGPPHeapAddr = 0; ++ if (!pAttrIn) ++ goto func_cont3; ++ ++ /* Check if we have a user allocated node heap */ ++ if (!(pAttrIn->pGPPVirtAddr)) ++ goto func_cont3; ++ ++ /* check for page aligned Heap size */ ++ if (((pAttrIn->uHeapSize) & (PG_SIZE_4K - 1))) { ++ GT_1trace(NODE_debugMask, GT_7CLASS, ++ "NODE_Allocate: node heap page size" ++ " not aligned to 4K page, size=0x%x \n", ++ pAttrIn->uHeapSize); ++ status = DSP_EINVALIDARG; ++ } else { ++ pNode->createArgs.asa.taskArgs.uHeapSize = pAttrIn->uHeapSize; ++ pNode->createArgs.asa.taskArgs.uGPPHeapAddr = ++ (u32)pAttrIn->pGPPVirtAddr; ++ } ++ if (DSP_FAILED(status)) ++ goto func_cont3; ++ ++ status = PROC_ReserveMemory(hProcessor, ++ pNode->createArgs.asa.taskArgs.uHeapSize + PAGE_SIZE, ++ (void **)&(pNode->createArgs.asa.taskArgs. ++ uDSPHeapResAddr)); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate:Failed to reserve " ++ "memory for Heap: 0x%x\n", status); ++ } else { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: DSPProcessor_Reserve" ++ " Memory successful: 0x%x\n", status); ++ } ++#ifdef DSP_DMM_DEBUG ++ status = DMM_GetHandle(pProcObject, &hDmmMgr); ++ if (DSP_SUCCEEDED(status)) ++ DMM_MemMapDump(hDmmMgr); ++#endif ++ if (DSP_FAILED(status)) ++ goto func_cont3; ++ ++ mapAttrs |= DSP_MAPLITTLEENDIAN; ++ mapAttrs |= DSP_MAPELEMSIZE32; ++ mapAttrs |= DSP_MAPVIRTUALADDR; ++ status = PROC_Map(hProcessor, (void *)pAttrIn->pGPPVirtAddr, ++ pNode->createArgs.asa.taskArgs.uHeapSize, ++ (void *)pNode->createArgs.asa.taskArgs.uDSPHeapResAddr, ++ (void **)&pMappedAddr, mapAttrs, pr_ctxt); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: Failed to map memory" ++ " for Heap: 0x%x\n", status); ++ } else { ++ pNode->createArgs.asa.taskArgs.uDSPHeapAddr = ++ (u32) pMappedAddr; ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate:DSPProcessor_Map" ++ " successful: 0x%x\n", status); ++ } ++ ++func_cont3: ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++func_cont1: ++ if (pAttrIn != NULL) { ++ /* Overrides of NBD properties */ ++ pNode->uTimeout = pAttrIn->uTimeout; ++ pNode->nPriority = pAttrIn->iPriority; ++ } ++func_cont2: ++ /* Create object to manage notifications */ ++ if (DSP_SUCCEEDED(status)) ++ status = NTFY_Create(&pNode->hNtfy); ++ ++ if (DSP_SUCCEEDED(status)) { ++ nodeType = NODE_GetType(pNode); ++ /* Allocate DSP_STREAMCONNECT array for device, task, and ++ * dais socket nodes. */ ++ if (nodeType != NODE_MESSAGE) { ++ uNumStreams = MaxInputs(pNode) + MaxOutputs(pNode); ++ pNode->streamConnect = MEM_Calloc(uNumStreams * ++ sizeof(struct DSP_STREAMCONNECT), ++ MEM_PAGED); ++ if (uNumStreams > 0 && pNode->streamConnect == NULL) ++ status = DSP_EMEMORY; ++ ++ } ++ if (DSP_SUCCEEDED(status) && (nodeType == NODE_TASK || ++ nodeType == NODE_DAISSOCKET)) { ++ /* Allocate arrays for maintainig stream connections */ ++ pNode->inputs = ++ MEM_Calloc(MaxInputs(pNode) * ++ sizeof(struct STREAM), MEM_PAGED); ++ pNode->outputs = ++ MEM_Calloc(MaxOutputs(pNode) * ++ sizeof(struct STREAM), MEM_PAGED); ++ ptaskArgs = &(pNode->createArgs.asa.taskArgs); ++ ptaskArgs->strmInDef = ++ MEM_Calloc(MaxInputs(pNode) * ++ sizeof(struct NODE_STRMDEF), ++ MEM_PAGED); ++ ptaskArgs->strmOutDef = ++ MEM_Calloc(MaxOutputs(pNode) * ++ sizeof(struct NODE_STRMDEF), ++ MEM_PAGED); ++ if ((MaxInputs(pNode) > 0 && (pNode->inputs == NULL || ++ ptaskArgs->strmInDef == NULL)) || ++ (MaxOutputs(pNode) > 0 && (pNode->outputs == NULL || ++ ptaskArgs->strmOutDef == NULL))) ++ status = DSP_EMEMORY; ++ } ++ } ++ if (DSP_SUCCEEDED(status) && (nodeType != NODE_DEVICE)) { ++ /* Create an event that will be posted when RMS_EXIT is ++ * received. */ ++ status = SYNC_OpenEvent(&pNode->hSyncDone, NULL); ++ if (DSP_SUCCEEDED(status)) { ++ /*Get the shared mem mgr for this nodes dev object */ ++ status = CMM_GetHandle(hProcessor, &hCmmMgr); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: Failed to" ++ " get CMM Mgr handle: 0x%x\n", status); ++ } else { ++ /* Allocate a SM addr translator for this node ++ * w/ deflt attr */ ++ status = CMM_XlatorCreate(&pNode->hXlator, ++ hCmmMgr, NULL); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: Failed" ++ " to create SM translator: 0x%x\n", ++ status); ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Fill in message args */ ++ if ((pArgs != NULL) && (pArgs->cbData > 0)) { ++ pmsgArgs = &(pNode->createArgs.asa.msgArgs); ++ pmsgArgs->pData = MEM_Calloc(pArgs->cbData, ++ MEM_PAGED); ++ if (pmsgArgs->pData == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ pmsgArgs->uArgLength = pArgs->cbData; ++ memcpy(pmsgArgs->pData, pArgs->cData, ++ pArgs->cbData); ++ } ++ } ++ } ++ } ++ ++ if (DSP_SUCCEEDED(status) && nodeType != NODE_DEVICE) { ++ /* Create a message queue for this node */ ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnMsgCreateQueue)(hNodeMgr->hMsg, ++ &pNode->hMsgQueue, 0, ++ pNode->createArgs.asa.msgArgs.uMaxMessages, ++ pNode); ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Create object for dynamic loading */ ++ ++ status = hNodeMgr->nldrFxns.pfnAllocate(hNodeMgr->hNldr, ++ (void *) pNode, ++ &pNode->dcdProps.objData.nodeObj, ++ &pNode->hNldrNode, ++ &pNode->fPhaseSplit); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: Failed to " ++ "allocate NLDR node: 0x%x\n", status); ++ } ++ } ++ ++ /* Comapare value read from Node Properties and check if it is same as ++ * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate ++ * GPP Address, Read the value in that address and override the ++ * uStackSeg value in task args */ ++ if (DSP_SUCCEEDED(status) && ++ (char *)pNode->dcdProps.objData.nodeObj.ndbProps.uStackSegName != ++ NULL) { ++ label = MEM_Calloc(sizeof(STACKSEGLABEL)+1, MEM_PAGED); ++ strncpy(label, STACKSEGLABEL, sizeof(STACKSEGLABEL)+1); ++ ++ if (strcmp((char *)pNode->dcdProps.objData.nodeObj. ++ ndbProps.uStackSegName, label) == 0) { ++ status = hNodeMgr->nldrFxns.pfnGetFxnAddr(pNode-> ++ hNldrNode, "DYNEXT_BEG", &dynextBase); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: Failed to get Address for " ++ "DYNEXT_BEG: 0x%x\n", status); ++ } ++ ++ status = hNodeMgr->nldrFxns.pfnGetFxnAddr(pNode-> ++ hNldrNode, "L1DSRAM_HEAP", &pulValue); ++ ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: Failed to get Address for " ++ "L1DSRAM_HEAP: 0x%x\n", status); ++ } ++ ++ status = CFG_GetHostResources((struct CFG_DEVNODE *) ++ DRV_GetFirstDevExtension(), &hostRes); ++ ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Allocate: Failed to get host resource " ++ "0x%x\n", status); ++ } ++ ++ ulGppMemBase = (u32)hostRes.dwMemBase[1]; ++ offSet = pulValue - dynextBase; ++ ulStackSegAddr = ulGppMemBase + offSet; ++ ulStackSegVal = (u32)*((REG_UWORD32 *) ++ ((u32)(ulStackSegAddr))); ++ ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "StackSegVal =0x%x\n", ulStackSegVal); ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "ulStackSegAddr = 0x%x\n", ulStackSegAddr); ++ ++ pNode->createArgs.asa.taskArgs.uStackSeg = ++ ulStackSegVal; ++ ++ } ++ ++ if (label) ++ MEM_Free(label); ++ ++ } ++ ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Add the node to the node manager's list of allocated ++ * nodes. */ ++ LST_InitElem((struct LST_ELEM *)pNode); ++ NODE_SetState(pNode, NODE_ALLOCATED); ++ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ ++ if (DSP_SUCCEEDED(status)) { ++ LST_PutTail(hNodeMgr->nodeList, ++ (struct LST_ELEM *) pNode); ++ ++(hNodeMgr->uNumNodes); ++ } ++ ++ /* Exit critical section */ ++ (void) SYNC_LeaveCS(hNodeMgr->hSync); ++ ++ /* Preset this to assume phases are split ++ * (for overlay and dll) */ ++ pNode->fPhaseSplit = true; ++ ++ if (DSP_SUCCEEDED(status)) ++ *phNode = pNode; ++ ++ ++ /* Notify all clients registered for DSP_NODESTATECHANGE. */ ++ PROC_NotifyAllClients(hProcessor, DSP_NODESTATECHANGE); ++ } else { ++ /* Cleanup */ ++ if (pNode) ++ DeleteNode(pNode, pr_ctxt); ++ ++ } ++ ++#ifndef RES_CLEANUP_DISABLE ++ if (DSP_SUCCEEDED(status)) { ++ DRV_InsertNodeResElement(*phNode, &nodeRes, pr_ctxt); ++ DRV_ProcNodeUpdateHeapStatus(nodeRes, true); ++ DRV_ProcNodeUpdateStatus(nodeRes, true); ++ } ++#endif ++ DBC_Ensure((DSP_FAILED(status) && (*phNode == NULL)) || ++ (DSP_SUCCEEDED(status) ++ && MEM_IsValidHandle((*phNode), NODE_SIGNATURE))); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_AllocMsgBuf ======== ++ * Purpose: ++ * Allocates buffer for zero copy messaging. ++ */ ++DBAPI NODE_AllocMsgBuf(struct NODE_OBJECT *hNode, u32 uSize, ++ OPTIONAL IN OUT struct DSP_BUFFERATTR *pAttr, ++ OUT u8 **pBuffer) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ DSP_STATUS status = DSP_SOK; ++ bool bVirtAddr = false; ++ bool bSetInfo; ++ u32 procId; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pBuffer != NULL); ++ ++ DBC_Require(uSize > 0); ++ ++ GT_4trace(NODE_debugMask, GT_ENTER, ++ "NODE_AllocMsgBuf: hNode: 0x%x\tuSize:" ++ " 0x%x\tpAttr: 0x%x\tpBuffer: %d\n", pNode, uSize, pAttr, ++ pBuffer); ++ ++ if (!MEM_IsValidHandle(pNode, NODE_SIGNATURE)) ++ status = DSP_EHANDLE; ++ else if (NODE_GetType(pNode) == NODE_DEVICE) ++ status = DSP_ENODETYPE; ++ ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (pAttr == NULL) ++ pAttr = &NODE_DFLTBUFATTRS; /* set defaults */ ++ ++ status = PROC_GetProcessorId(pNode->hProcessor, &procId); ++ if (procId != DSP_UNIT) { ++ DBC_Assert(NULL); ++ goto func_end; ++ } ++ /* If segment ID includes MEM_SETVIRTUALSEGID then pBuffer is a ++ * virt address, so set this info in this node's translator ++ * object for future ref. If MEM_GETVIRTUALSEGID then retrieve ++ * virtual address from node's translator. */ ++ if ((pAttr->uSegment & MEM_SETVIRTUALSEGID) || ++ (pAttr->uSegment & MEM_GETVIRTUALSEGID)) { ++ bVirtAddr = true; ++ bSetInfo = (pAttr->uSegment & MEM_SETVIRTUALSEGID) ? ++ true : false; ++ pAttr->uSegment &= ~MEM_MASKVIRTUALSEGID; /* clear mask bits */ ++ /* Set/get this node's translators virtual address base/size */ ++ status = CMM_XlatorInfo(pNode->hXlator, pBuffer, uSize, ++ pAttr->uSegment, bSetInfo); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_7CLASS, ++ "NODE_AllocMsgBuf " ++ "failed: 0x%lx\n", status); ++ } ++ } ++ if (DSP_SUCCEEDED(status) && (!bVirtAddr)) { ++ if (pAttr->uSegment != 1) { ++ /* Node supports single SM segment only. */ ++ status = DSP_EBADSEGID; ++ } ++ /* Arbitrary SM buffer alignment not supported for host side ++ * allocs, but guaranteed for the following alignment ++ * values. */ ++ switch (pAttr->uAlignment) { ++ case 0: ++ case 1: ++ case 2: ++ case 4: ++ break; ++ default: ++ /* alignment value not suportted */ ++ status = DSP_EALIGNMENT; ++ break; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* allocate physical buffer from segId in node's ++ * translator */ ++ (void)CMM_XlatorAllocBuf(pNode->hXlator, pBuffer, ++ uSize); ++ if (*pBuffer == NULL) { ++ GT_0trace(NODE_debugMask, GT_7CLASS, ++ "NODE_AllocMsgBuf: " ++ "ERROR: Out of shared memory.\n"); ++ status = DSP_EMEMORY; ++ } ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_ChangePriority ======== ++ * Purpose: ++ * Change the priority of a node in the allocated state, or that is ++ * currently running or paused on the target. ++ */ ++DSP_STATUS NODE_ChangePriority(struct NODE_OBJECT *hNode, s32 nPriority) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ struct NODE_MGR *hNodeMgr = NULL; ++ enum NODE_TYPE nodeType; ++ enum NODE_STATE state; ++ DSP_STATUS status = DSP_SOK; ++ u32 procId; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_2trace(NODE_debugMask, GT_ENTER, "NODE_ChangePriority: " ++ "hNode: 0x%x\tnPriority: %d\n", hNode, nPriority); ++ ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ GT_1trace(NODE_debugMask, GT_7CLASS, ++ "Invalid NODE Handle: 0x%x\n", hNode); ++ status = DSP_EHANDLE; ++ } else { ++ hNodeMgr = hNode->hNodeMgr; ++ nodeType = NODE_GetType(hNode); ++ if (nodeType != NODE_TASK && nodeType != NODE_DAISSOCKET) ++ status = DSP_ENODETYPE; ++ else if (nPriority < hNodeMgr->nMinPri || ++ nPriority > hNodeMgr->nMaxPri) ++ status = DSP_ERANGE; ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* Enter critical section */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ state = NODE_GetState(hNode); ++ if (state == NODE_ALLOCATED || state == NODE_PAUSED) { ++ NODE_SetPriority(hNode, nPriority); ++ } else { ++ if (state != NODE_RUNNING) { ++ status = DSP_EWRONGSTATE; ++ goto func_cont; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = PROC_GetProcessorId(pNode->hProcessor, ++ &procId); ++ if (procId == DSP_UNIT) { ++ status = DISP_NodeChangePriority(hNodeMgr-> ++ hDisp, hNode, ++ hNodeMgr->ulFxnAddrs[RMSCHANGENODEPRIORITY], ++ hNode->nodeEnv, nPriority); ++ } ++ if (DSP_SUCCEEDED(status)) ++ NODE_SetPriority(hNode, nPriority); ++ ++ } ++ } ++func_cont: ++ /* Leave critical section */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_Connect ======== ++ * Purpose: ++ * Connect two nodes on the DSP, or a node on the DSP to the GPP. ++ */ ++DSP_STATUS NODE_Connect(struct NODE_OBJECT *hNode1, u32 uStream1, ++ struct NODE_OBJECT *hNode2, ++ u32 uStream2, OPTIONAL IN struct DSP_STRMATTR *pAttrs, ++ OPTIONAL IN struct DSP_CBDATA *pConnParam) ++{ ++ struct NODE_MGR *hNodeMgr; ++ char *pstrDevName = NULL; ++ enum NODE_TYPE node1Type = NODE_TASK; ++ enum NODE_TYPE node2Type = NODE_TASK; ++ struct NODE_STRMDEF *pstrmDef; ++ struct NODE_STRMDEF *pInput = NULL; ++ struct NODE_STRMDEF *pOutput = NULL; ++ struct NODE_OBJECT *hDevNode; ++ struct NODE_OBJECT *hNode; ++ struct STREAM *pStream; ++ GB_BitNum pipeId = GB_NOBITS; ++ GB_BitNum chnlId = GB_NOBITS; ++ CHNL_MODE uMode; ++ u32 dwLength; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(cRefs > 0); ++ GT_5trace(NODE_debugMask, GT_ENTER, ++ "NODE_Connect: hNode1: 0x%x\tuStream1:" ++ " %d\thNode2: 0x%x\tuStream2: %d\tpAttrs: 0x%x\n", hNode1, ++ uStream1, hNode2, uStream2, pAttrs); ++ if (DSP_SUCCEEDED(status)) { ++ if ((hNode1 != (struct NODE_OBJECT *) DSP_HGPPNODE && ++ !MEM_IsValidHandle(hNode1, NODE_SIGNATURE)) || ++ (hNode2 != (struct NODE_OBJECT *) DSP_HGPPNODE && ++ !MEM_IsValidHandle(hNode2, NODE_SIGNATURE))) ++ status = DSP_EHANDLE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* The two nodes must be on the same processor */ ++ if (hNode1 != (struct NODE_OBJECT *)DSP_HGPPNODE && ++ hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE && ++ hNode1->hNodeMgr != hNode2->hNodeMgr) ++ status = DSP_EFAIL; ++ /* Cannot connect a node to itself */ ++ if (hNode1 == hNode2) ++ status = DSP_EFAIL; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* NODE_GetType() will return NODE_GPP if hNode = ++ * DSP_HGPPNODE. */ ++ node1Type = NODE_GetType(hNode1); ++ node2Type = NODE_GetType(hNode2); ++ /* Check stream indices ranges */ ++ if ((node1Type != NODE_GPP && node1Type != NODE_DEVICE && ++ uStream1 >= MaxOutputs(hNode1)) || (node2Type != NODE_GPP && ++ node2Type != NODE_DEVICE && uStream2 >= MaxInputs(hNode2))) ++ status = DSP_EVALUE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* ++ * Only the following types of connections are allowed: ++ * task/dais socket < == > task/dais socket ++ * task/dais socket < == > device ++ * task/dais socket < == > GPP ++ * ++ * ie, no message nodes, and at least one task or dais ++ * socket node. ++ */ ++ if (node1Type == NODE_MESSAGE || node2Type == NODE_MESSAGE || ++ (node1Type != NODE_TASK && node1Type != NODE_DAISSOCKET && ++ node2Type != NODE_TASK && node2Type != NODE_DAISSOCKET)) ++ status = DSP_EFAIL; ++ } ++ /* ++ * Check stream mode. Default is STRMMODE_PROCCOPY. ++ */ ++ if (DSP_SUCCEEDED(status) && pAttrs) { ++ if (pAttrs->lMode != STRMMODE_PROCCOPY) ++ status = DSP_ESTRMMODE; /* illegal stream mode */ ++ ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (node1Type != NODE_GPP) { ++ hNodeMgr = hNode1->hNodeMgr; ++ } else { ++ DBC_Assert(hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE); ++ hNodeMgr = hNode2->hNodeMgr; ++ } ++ /* Enter critical section */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ /* Nodes must be in the allocated state */ ++ if (node1Type != NODE_GPP && NODE_GetState(hNode1) != NODE_ALLOCATED) ++ status = DSP_EWRONGSTATE; ++ ++ if (node2Type != NODE_GPP && NODE_GetState(hNode2) != NODE_ALLOCATED) ++ status = DSP_EWRONGSTATE; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Check that stream indices for task and dais socket nodes ++ * are not already be used. (Device nodes checked later) */ ++ if (node1Type == NODE_TASK || node1Type == NODE_DAISSOCKET) { ++ pOutput = &(hNode1->createArgs.asa.taskArgs. ++ strmOutDef[uStream1]); ++ if (pOutput->szDevice != NULL) ++ status = DSP_EALREADYCONNECTED; ++ ++ } ++ if (node2Type == NODE_TASK || node2Type == NODE_DAISSOCKET) { ++ pInput = &(hNode2->createArgs.asa.taskArgs. ++ strmInDef[uStream2]); ++ if (pInput->szDevice != NULL) ++ status = DSP_EALREADYCONNECTED; ++ ++ } ++ } ++ /* Connecting two task nodes? */ ++ if (DSP_SUCCEEDED(status) && ((node1Type == NODE_TASK || ++ node1Type == NODE_DAISSOCKET) && (node2Type == NODE_TASK || ++ node2Type == NODE_DAISSOCKET))) { ++ /* Find available pipe */ ++ pipeId = GB_findandset(hNodeMgr->pipeMap); ++ if (pipeId == GB_NOBITS) { ++ status = DSP_ENOMORECONNECTIONS; ++ } else { ++ hNode1->outputs[uStream1].type = NODECONNECT; ++ hNode2->inputs[uStream2].type = NODECONNECT; ++ hNode1->outputs[uStream1].devId = pipeId; ++ hNode2->inputs[uStream2].devId = pipeId; ++ pOutput->szDevice = MEM_Calloc(PIPENAMELEN + 1, ++ MEM_PAGED); ++ pInput->szDevice = MEM_Calloc(PIPENAMELEN + 1, ++ MEM_PAGED); ++ if (pOutput->szDevice == NULL || ++ pInput->szDevice == NULL) { ++ /* Undo the connection */ ++ if (pOutput->szDevice) ++ MEM_Free(pOutput->szDevice); ++ ++ if (pInput->szDevice) ++ MEM_Free(pInput->szDevice); ++ ++ pOutput->szDevice = NULL; ++ pInput->szDevice = NULL; ++ GB_clear(hNodeMgr->pipeMap, pipeId); ++ status = DSP_EMEMORY; ++ } else { ++ /* Copy "/dbpipe" name to device names */ ++ sprintf(pOutput->szDevice, "%s%d", ++ PIPEPREFIX, pipeId); ++ strcpy(pInput->szDevice, pOutput->szDevice); ++ } ++ } ++ } ++ /* Connecting task node to host? */ ++ if (DSP_SUCCEEDED(status) && (node1Type == NODE_GPP || ++ node2Type == NODE_GPP)) { ++ if (node1Type == NODE_GPP) { ++ uMode = CHNL_MODETODSP; ++ } else { ++ DBC_Assert(node2Type == NODE_GPP); ++ uMode = CHNL_MODEFROMDSP; ++ } ++ /* Reserve a channel id. We need to put the name "/host" ++ * in the node's createArgs, but the host ++ * side channel will not be opened until DSPStream_Open is ++ * called for this node. */ ++ if (pAttrs) { ++ if (pAttrs->lMode == STRMMODE_RDMA) { ++ chnlId = GB_findandset(hNodeMgr->dmaChnlMap); ++ /* dma chans are 2nd transport chnl set ++ * ids(e.g. 16-31)*/ ++ (chnlId != GB_NOBITS) ? ++ (chnlId = chnlId + hNodeMgr->ulNumChnls) : ++ chnlId; ++ } else if (pAttrs->lMode == STRMMODE_ZEROCOPY) { ++ chnlId = GB_findandset(hNodeMgr->zChnlMap); ++ /* zero-copy chans are 3nd transport set ++ * (e.g. 32-47) */ ++ (chnlId != GB_NOBITS) ? (chnlId = chnlId + ++ (2 * hNodeMgr->ulNumChnls)) : chnlId; ++ } else { /* must be PROCCOPY */ ++ DBC_Assert(pAttrs->lMode == STRMMODE_PROCCOPY); ++ chnlId = GB_findandset(hNodeMgr->chnlMap); ++ /* e.g. 0-15 */ ++ } ++ } else { ++ /* default to PROCCOPY */ ++ chnlId = GB_findandset(hNodeMgr->chnlMap); ++ } ++ if (chnlId == GB_NOBITS) { ++ status = DSP_ENOMORECONNECTIONS; ++ goto func_cont2; ++ } ++ pstrDevName = MEM_Calloc(HOSTNAMELEN + 1, MEM_PAGED); ++ if (pstrDevName != NULL) ++ goto func_cont2; ++ ++ if (pAttrs) { ++ if (pAttrs->lMode == STRMMODE_RDMA) { ++ GB_clear(hNodeMgr->dmaChnlMap, chnlId - ++ hNodeMgr->ulNumChnls); ++ } else if (pAttrs->lMode == STRMMODE_ZEROCOPY) { ++ GB_clear(hNodeMgr->zChnlMap, chnlId - ++ (2*hNodeMgr->ulNumChnls)); ++ } else { ++ DBC_Assert(pAttrs->lMode == STRMMODE_PROCCOPY); ++ GB_clear(hNodeMgr->chnlMap, chnlId); ++ } ++ } else { ++ GB_clear(hNodeMgr->chnlMap, chnlId); ++ } ++ status = DSP_EMEMORY; ++func_cont2: ++ if (DSP_SUCCEEDED(status)) { ++ if (hNode1 == (struct NODE_OBJECT *) DSP_HGPPNODE) { ++ hNode2->inputs[uStream2].type = HOSTCONNECT; ++ hNode2->inputs[uStream2].devId = chnlId; ++ pInput->szDevice = pstrDevName; ++ } else { ++ hNode1->outputs[uStream1].type = HOSTCONNECT; ++ hNode1->outputs[uStream1].devId = chnlId; ++ pOutput->szDevice = pstrDevName; ++ } ++ sprintf(pstrDevName, "%s%d", HOSTPREFIX, chnlId); ++ } ++ } ++ /* Connecting task node to device node? */ ++ if (DSP_SUCCEEDED(status) && ((node1Type == NODE_DEVICE) || ++ (node2Type == NODE_DEVICE))) { ++ if (node2Type == NODE_DEVICE) { ++ /* node1 == > device */ ++ hDevNode = hNode2; ++ hNode = hNode1; ++ pStream = &(hNode1->outputs[uStream1]); ++ pstrmDef = pOutput; ++ } else { ++ /* device == > node2 */ ++ hDevNode = hNode1; ++ hNode = hNode2; ++ pStream = &(hNode2->inputs[uStream2]); ++ pstrmDef = pInput; ++ } ++ /* Set up create args */ ++ pStream->type = DEVICECONNECT; ++ dwLength = strlen(hDevNode->pstrDevName); ++ if (pConnParam != NULL) { ++ pstrmDef->szDevice = MEM_Calloc(dwLength + 1 + ++ (u32) pConnParam->cbData, ++ MEM_PAGED); ++ } else { ++ pstrmDef->szDevice = MEM_Calloc(dwLength + 1, ++ MEM_PAGED); ++ } ++ if (pstrmDef->szDevice == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ /* Copy device name */ ++ strncpy(pstrmDef->szDevice, hDevNode->pstrDevName, ++ dwLength); ++ if (pConnParam != NULL) { ++ strncat(pstrmDef->szDevice, ++ (char *)pConnParam->cData, ++ (u32)pConnParam->cbData); ++ } ++ hDevNode->hDeviceOwner = hNode; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Fill in create args */ ++ if (node1Type == NODE_TASK || node1Type == NODE_DAISSOCKET) { ++ hNode1->createArgs.asa.taskArgs.uNumOutputs++; ++ FillStreamDef(hNode1, pOutput, pAttrs); ++ } ++ if (node2Type == NODE_TASK || node2Type == NODE_DAISSOCKET) { ++ hNode2->createArgs.asa.taskArgs.uNumInputs++; ++ FillStreamDef(hNode2, pInput, pAttrs); ++ } ++ /* Update hNode1 and hNode2 streamConnect */ ++ if (node1Type != NODE_GPP && node1Type != NODE_DEVICE) { ++ hNode1->uNumOutputs++; ++ if (uStream1 > hNode1->uMaxOutputIndex) ++ hNode1->uMaxOutputIndex = uStream1; ++ ++ } ++ if (node2Type != NODE_GPP && node2Type != NODE_DEVICE) { ++ hNode2->uNumInputs++; ++ if (uStream2 > hNode2->uMaxInputIndex) ++ hNode2->uMaxInputIndex = uStream2; ++ ++ } ++ FillStreamConnect(hNode1, hNode2, uStream1, uStream2); ++ } ++func_cont: ++ /* end of SYNC_EnterCS */ ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_Create ======== ++ * Purpose: ++ * Create a node on the DSP by remotely calling the node's create function. ++ */ ++DSP_STATUS NODE_Create(struct NODE_OBJECT *hNode) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ struct NODE_MGR *hNodeMgr; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ u32 ulCreateFxn; ++ enum NODE_TYPE nodeType; ++ DSP_STATUS status = DSP_SOK; ++ DSP_STATUS status1 = DSP_SOK; ++ bool bJustWokeDSP = false; ++ struct DSP_CBDATA cbData; ++ u32 procId = 255; ++ struct DSP_PROCESSORSTATE procStatus; ++ struct PROC_OBJECT *hProcessor; ++#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) ++ struct dspbridge_platform_data *pdata = ++ omap_dspbridge_dev->dev.platform_data; ++#endif ++ ++ DBC_Require(cRefs > 0); ++ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Create: hNode: 0x%x\n", ++ hNode); ++ if (!MEM_IsValidHandle(pNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ hProcessor = hNode->hProcessor; ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* If processor is in error state then don't attempt to create ++ new node */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_Create:" ++ " proc Status 0x%x\n", procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ /* create struct DSP_CBDATA struct for PWR calls */ ++ cbData.cbData = PWR_TIMEOUT; ++ nodeType = NODE_GetType(hNode); ++ hNodeMgr = hNode->hNodeMgr; ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ /* Get access to node dispatcher */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ /* Check node state */ ++ if (NODE_GetState(hNode) != NODE_ALLOCATED) ++ status = DSP_EWRONGSTATE; ++ ++ if (DSP_SUCCEEDED(status)) ++ status = PROC_GetProcessorId(pNode->hProcessor, &procId); ++ ++ if (DSP_FAILED(status)) ++ goto func_cont2; ++ ++ if (procId != DSP_UNIT) ++ goto func_cont2; ++ ++ /* Make sure streams are properly connected */ ++ if ((hNode->uNumInputs && hNode->uMaxInputIndex > ++ hNode->uNumInputs - 1) || ++ (hNode->uNumOutputs && hNode->uMaxOutputIndex > ++ hNode->uNumOutputs - 1)) ++ status = DSP_ENOTCONNECTED; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* If node's create function is not loaded, load it */ ++ /* Boost the OPP level to max level that DSP can be requested */ ++#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) ++ if (pdata->cpu_set_freq) { ++ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP3]); ++ ++ if (pdata->dsp_get_opp) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, "opp level" ++ "after setting to VDD1_OPP3 is %d\n", ++ (*pdata->dsp_get_opp)()); ++ } ++ } ++#endif ++ status = hNodeMgr->nldrFxns.pfnLoad(hNode->hNldrNode, ++ NLDR_CREATE); ++ /* Get address of node's create function */ ++ if (DSP_SUCCEEDED(status)) { ++ hNode->fLoaded = true; ++ if (nodeType != NODE_DEVICE) { ++ status = GetFxnAddress(hNode, &ulCreateFxn, ++ CREATEPHASE); ++ } ++ } else { ++ GT_1trace(NODE_debugMask, GT_ENTER, ++ "NODE_Create: failed to load" ++ " create code: 0x%x\n", status); ++ } ++ /* Request the lowest OPP level*/ ++#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) ++ if (pdata->cpu_set_freq) { ++ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP1]); ++ ++ if (pdata->dsp_get_opp) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, "opp level" ++ "after setting to VDD1_OPP1 is %d\n", ++ (*pdata->dsp_get_opp)()); ++ } ++ } ++#endif ++ /* Get address of iAlg functions, if socket node */ ++ if (DSP_SUCCEEDED(status)) { ++ if (nodeType == NODE_DAISSOCKET) { ++ status = hNodeMgr->nldrFxns.pfnGetFxnAddr ++ (hNode->hNldrNode, hNode->dcdProps. ++ objData.nodeObj.pstrIAlgName, ++ &hNode->createArgs.asa.taskArgs. ++ ulDaisArg); ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ if (nodeType != NODE_DEVICE) { ++ status = DISP_NodeCreate(hNodeMgr->hDisp, hNode, ++ hNodeMgr->ulFxnAddrs[RMSCREATENODE], ++ ulCreateFxn, &(hNode->createArgs), ++ &(hNode->nodeEnv)); ++ if (DSP_SUCCEEDED(status)) { ++ /* Set the message queue id to the node env ++ * pointer */ ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ (*pIntfFxns->pfnMsgSetQueueId)(hNode->hMsgQueue, ++ hNode->nodeEnv); ++ } ++ } ++ } ++ /* Phase II/Overlays: Create, execute, delete phases possibly in ++ * different files/sections. */ ++ if (hNode->fLoaded && hNode->fPhaseSplit) { ++ /* If create code was dynamically loaded, we can now unload ++ * it. */ ++ status1 = hNodeMgr->nldrFxns.pfnUnload(hNode->hNldrNode, ++ NLDR_CREATE); ++ hNode->fLoaded = false; ++ } ++ if (DSP_FAILED(status1)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Create: Failed to unload " ++ "create code: 0x%x\n", status1); ++ } ++func_cont2: ++ /* Update node state and node manager state */ ++ if (DSP_SUCCEEDED(status)) { ++ NODE_SetState(hNode, NODE_CREATED); ++ hNodeMgr->uNumCreated++; ++ goto func_cont; ++ } ++ if (status != DSP_EWRONGSTATE) { ++ /* Put back in NODE_ALLOCATED state if error occurred */ ++ NODE_SetState(hNode, NODE_ALLOCATED); ++ } ++ if (procId == DSP_UNIT) { ++ /* If node create failed, see if should sleep DSP now */ ++ if (bJustWokeDSP == true) { ++ /* Check to see if partial create happened on DSP */ ++ if (hNode->nodeEnv == (u32)NULL) { ++ /* No environment allocated on DSP, re-sleep ++ * DSP now */ ++ PROC_Ctrl(hNode->hProcessor, WMDIOCTL_DEEPSLEEP, ++ &cbData); ++ } else { ++ /* Increment count, sleep later when node fully ++ * deleted */ ++ hNodeMgr->uNumCreated++; ++ } ++ } ++ } ++func_cont: ++ /* Free access to node dispatcher */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++func_end: ++ if (DSP_SUCCEEDED(status)) { ++ PROC_NotifyClients(hNode->hProcessor, DSP_NODESTATECHANGE); ++ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== NODE_CreateMgr ======== ++ * Purpose: ++ * Create a NODE Manager object. ++ */ ++DSP_STATUS NODE_CreateMgr(OUT struct NODE_MGR **phNodeMgr, ++ struct DEV_OBJECT *hDevObject) ++{ ++ u32 i; ++ struct NODE_MGR *pNodeMgr = NULL; ++ struct DISP_ATTRS dispAttrs; ++ char *szZLFile = ""; ++ struct NLDR_ATTRS nldrAttrs; ++ DSP_STATUS status = DSP_SOK; ++ u32 devType; ++ DBC_Require(cRefs > 0); ++ DBC_Require(phNodeMgr != NULL); ++ DBC_Require(hDevObject != NULL); ++ GT_2trace(NODE_debugMask, GT_ENTER, "NODE_CreateMgr: phNodeMgr: 0x%x\t" ++ "hDevObject: 0x%x\n", phNodeMgr, hDevObject); ++ *phNodeMgr = NULL; ++ /* Allocate Node manager object */ ++ MEM_AllocObject(pNodeMgr, struct NODE_MGR, NODEMGR_SIGNATURE); ++ if (pNodeMgr) { ++ pNodeMgr->hDevObject = hDevObject; ++ pNodeMgr->nodeList = LST_Create(); ++ pNodeMgr->pipeMap = GB_create(MAXPIPES); ++ pNodeMgr->pipeDoneMap = GB_create(MAXPIPES); ++ if (pNodeMgr->nodeList == NULL || pNodeMgr->pipeMap == NULL || ++ pNodeMgr->pipeDoneMap == NULL) { ++ status = DSP_EMEMORY; ++ GT_0trace(NODE_debugMask, GT_6CLASS, ++ "NODE_CreateMgr: Memory " ++ "allocation failed\n"); ++ } else { ++ status = NTFY_Create(&pNodeMgr->hNtfy); ++ } ++ pNodeMgr->uNumCreated = 0; ++ } else { ++ GT_0trace(NODE_debugMask, GT_6CLASS, ++ "NODE_CreateMgr: Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } ++ /* get devNodeType */ ++ if (DSP_SUCCEEDED(status)) ++ status = DEV_GetDevType(hDevObject, &devType); ++ ++ /* Create the DCD Manager */ ++ if (DSP_SUCCEEDED(status)) { ++ status = DCD_CreateManager(szZLFile, &pNodeMgr->hDcdMgr); ++ if (DSP_SUCCEEDED(status)) ++ status = GetProcProps(pNodeMgr, hDevObject); ++ ++ } ++ /* Create NODE Dispatcher */ ++ if (DSP_SUCCEEDED(status)) { ++ dispAttrs.ulChnlOffset = pNodeMgr->ulChnlOffset; ++ dispAttrs.ulChnlBufSize = pNodeMgr->ulChnlBufSize; ++ dispAttrs.procFamily = pNodeMgr->procFamily; ++ dispAttrs.procType = pNodeMgr->procType; ++ status = DISP_Create(&pNodeMgr->hDisp, hDevObject, &dispAttrs); ++ } ++ /* Create a STRM Manager */ ++ if (DSP_SUCCEEDED(status)) ++ status = STRM_Create(&pNodeMgr->hStrmMgr, hDevObject); ++ ++ if (DSP_SUCCEEDED(status)) { ++ DEV_GetIntfFxns(hDevObject, &pNodeMgr->pIntfFxns); ++ /* Get MSG queue manager */ ++ DEV_GetMsgMgr(hDevObject, &pNodeMgr->hMsg); ++ status = SYNC_InitializeCS(&pNodeMgr->hSync); ++ if (DSP_FAILED(status)) ++ status = DSP_EMEMORY; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pNodeMgr->chnlMap = GB_create(pNodeMgr->ulNumChnls); ++ /* dma chnl map. ulNumChnls is # per transport */ ++ pNodeMgr->dmaChnlMap = GB_create(pNodeMgr->ulNumChnls); ++ pNodeMgr->zChnlMap = GB_create(pNodeMgr->ulNumChnls); ++ if ((pNodeMgr->chnlMap == NULL) || ++ (pNodeMgr->dmaChnlMap == NULL) || ++ (pNodeMgr->zChnlMap == NULL)) { ++ status = DSP_EMEMORY; ++ } else { ++ /* Block out reserved channels */ ++ for (i = 0; i < pNodeMgr->ulChnlOffset; i++) ++ GB_set(pNodeMgr->chnlMap, i); ++ ++ /* Block out channels reserved for RMS */ ++ GB_set(pNodeMgr->chnlMap, pNodeMgr->ulChnlOffset); ++ GB_set(pNodeMgr->chnlMap, pNodeMgr->ulChnlOffset + 1); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* NO RM Server on the IVA */ ++ if (devType != IVA_UNIT) { ++ /* Get addresses of any RMS functions loaded */ ++ status = GetRMSFxns(pNodeMgr); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_6CLASS, ++ "NODE_CreateMgr: Failed to" ++ " get RMS functions: status = 0x%x", status); ++ } ++ } ++ } ++ ++ /* Get loader functions and create loader */ ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(NODE_debugMask, GT_1CLASS, ++ "NODE_CreateMgr: using dynamic loader\n"); ++ pNodeMgr->nldrFxns = nldrFxns; /* Dynamic loader functions */ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ nldrAttrs.pfnOvly = Ovly; ++ nldrAttrs.pfnWrite = Write; ++ nldrAttrs.usDSPWordSize = pNodeMgr->uDSPWordSize; ++ nldrAttrs.usDSPMauSize = pNodeMgr->uDSPMauSize; ++ pNodeMgr->fLoaderInit = pNodeMgr->nldrFxns.pfnInit(); ++ status = pNodeMgr->nldrFxns.pfnCreate(&pNodeMgr->hNldr, ++ hDevObject, &nldrAttrs); ++ if (DSP_FAILED(status)) { ++ GT_1trace(NODE_debugMask, GT_6CLASS, ++ "NODE_CreateMgr: Failed to " ++ "create loader: status = 0x%x\n", status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) ++ *phNodeMgr = pNodeMgr; ++ else ++ DeleteNodeMgr(pNodeMgr); ++ ++ DBC_Ensure((DSP_FAILED(status) && (*phNodeMgr == NULL)) || ++ (DSP_SUCCEEDED(status) && ++ MEM_IsValidHandle((*phNodeMgr), NODEMGR_SIGNATURE))); ++ ++ return status; ++} ++ ++/* ++ * ======== NODE_Delete ======== ++ * Purpose: ++ * Delete a node on the DSP by remotely calling the node's delete function. ++ * Loads the node's delete function if necessary. Free GPP side resources ++ * after node's delete function returns. ++ */ ++DSP_STATUS NODE_Delete(struct NODE_OBJECT *hNode, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ struct NODE_MGR *hNodeMgr; ++ struct PROC_OBJECT *hProcessor; ++ struct DISP_OBJECT *hDisp; ++ u32 ulDeleteFxn; ++ enum NODE_TYPE nodeType; ++ enum NODE_STATE state; ++ DSP_STATUS status = DSP_SOK; ++ DSP_STATUS status1 = DSP_SOK; ++ struct DSP_CBDATA cbData; ++ u32 procId; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE nodeRes; ++#endif ++ struct DSP_PROCESSORSTATE procStatus; ++ DBC_Require(cRefs > 0); ++ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Delete: hNode: 0x%x\n", ++ hNode); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ /* create struct DSP_CBDATA struct for PWR call */ ++ cbData.cbData = PWR_TIMEOUT; ++ hNodeMgr = hNode->hNodeMgr; ++ hProcessor = hNode->hProcessor; ++ hDisp = hNodeMgr->hDisp; ++ nodeType = NODE_GetType(hNode); ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ /* Enter critical section */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ state = NODE_GetState(hNode); ++ /* Execute delete phase code for non-device node in all cases ++ * except when the node was only allocated. Delete phase must be ++ * executed even if create phase was executed, but failed. ++ * If the node environment pointer is non-NULL, the delete phase ++ * code must be executed. */ ++ if (!(state == NODE_ALLOCATED && hNode->nodeEnv == (u32)NULL) && ++ nodeType != NODE_DEVICE) { ++ status = PROC_GetProcessorId(pNode->hProcessor, &procId); ++ if (DSP_FAILED(status)) ++ goto func_cont1; ++ ++ if (procId == DSP_UNIT || procId == IVA_UNIT) { ++ /* If node has terminated, execute phase code will ++ * have already been unloaded in NODE_OnExit(). If the ++ * node is PAUSED, the execute phase is loaded, and it ++ * is now ok to unload it. If the node is running, we ++ * will unload the execute phase only after deleting ++ * the node. */ ++ if (state == NODE_PAUSED && hNode->fLoaded && ++ hNode->fPhaseSplit) { ++ /* Ok to unload execute code as long as node ++ * is not * running */ ++ status1 = hNodeMgr->nldrFxns.pfnUnload(hNode-> ++ hNldrNode, NLDR_EXECUTE); ++ hNode->fLoaded = false; ++ NODE_SetState(hNode, NODE_DONE); ++ } ++ /* Load delete phase code if not loaded or if haven't ++ * * unloaded EXECUTE phase */ ++ if ((!(hNode->fLoaded) || (state == NODE_RUNNING)) && ++ hNode->fPhaseSplit) { ++ status = hNodeMgr->nldrFxns.pfnLoad(hNode-> ++ hNldrNode, NLDR_DELETE); ++ if (DSP_SUCCEEDED(status)) { ++ hNode->fLoaded = true; ++ } else { ++ GT_1trace(NODE_debugMask, GT_ENTER, ++ "NODE_Delete: failed to " ++ "load delete code: 0x%x\n", ++ status); ++ } ++ } ++ } ++func_cont1: ++ if (DSP_SUCCEEDED(status)) { ++ /* Unblock a thread trying to terminate the node */ ++ (void)SYNC_SetEvent(hNode->hSyncDone); ++ if (procId == DSP_UNIT) { ++ /* ulDeleteFxn = address of node's delete ++ * function */ ++ status = GetFxnAddress(hNode, &ulDeleteFxn, ++ DELETEPHASE); ++ } else if (procId == IVA_UNIT) ++ ulDeleteFxn = (u32)hNode->nodeEnv; ++ if (DSP_SUCCEEDED(status)) { ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ GT_1trace(NODE_debugMask, GT_4CLASS, ++ "NODE_Delete: proc Status " ++ "0x%x\n", procStatus.iState); ++ if (procStatus.iState != PROC_ERROR) { ++ status = DISP_NodeDelete(hDisp, hNode, ++ hNodeMgr->ulFxnAddrs[RMSDELETENODE], ++ ulDeleteFxn, hNode->nodeEnv); ++ } else ++ NODE_SetState(hNode, NODE_DONE); ++ ++ /* Unload execute, if not unloaded, and delete ++ * function */ ++ if (state == NODE_RUNNING && ++ hNode->fPhaseSplit) { ++ status1 = hNodeMgr->nldrFxns.pfnUnload( ++ hNode->hNldrNode, NLDR_EXECUTE); ++ } ++ if (DSP_FAILED(status1)) { ++ GT_1trace(NODE_debugMask, GT_ENTER, ++ "NODE_Delete: failed to" ++ "unload execute code: 0x%x\n", ++ status1); ++ } ++ status1 = hNodeMgr->nldrFxns.pfnUnload( ++ hNode->hNldrNode, NLDR_DELETE); ++ hNode->fLoaded = false; ++ if (DSP_FAILED(status1)) { ++ GT_1trace(NODE_debugMask, GT_ENTER, ++ "NODE_Delete: failed to" ++ "unload delete code: 0x%x\n", ++ status1); ++ } ++ } ++ } ++ } ++ /* Free host side resources even if a failure occurred */ ++ /* Remove node from hNodeMgr->nodeList */ ++ LST_RemoveElem(hNodeMgr->nodeList, (struct LST_ELEM *) hNode); ++ hNodeMgr->uNumNodes--; ++ /* Decrement count of nodes created on DSP */ ++ if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) && ++ (hNode->nodeEnv != (u32) NULL))) ++ hNodeMgr->uNumCreated--; ++ /* Free host-side resources allocated by NODE_Create() ++ * DeleteNode() fails if SM buffers not freed by client! */ ++#ifndef RES_CLEANUP_DISABLE ++ if (!pr_ctxt) ++ goto func_cont; ++ if (DRV_GetNodeResElement(hNode, &nodeRes, pr_ctxt) != DSP_ENOTFOUND) { ++ GT_0trace(NODE_debugMask, GT_5CLASS, "\nNODE_Delete12:\n"); ++ DRV_ProcNodeUpdateStatus(nodeRes, false); ++ } ++#endif ++func_cont: ++ GT_0trace(NODE_debugMask, GT_ENTER, "\nNODE_Delete13:\n "); ++ DeleteNode(hNode, pr_ctxt); ++#ifndef RES_CLEANUP_DISABLE ++ GT_0trace(NODE_debugMask, GT_5CLASS, "\nNODE_Delete2:\n "); ++ if (pr_ctxt) ++ DRV_RemoveNodeResElement(nodeRes, pr_ctxt); ++#endif ++ GT_0trace(NODE_debugMask, GT_ENTER, "\nNODE_Delete3:\n "); ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ PROC_NotifyClients(hProcessor, DSP_NODESTATECHANGE); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_DeleteMgr ======== ++ * Purpose: ++ * Delete the NODE Manager. ++ */ ++DSP_STATUS NODE_DeleteMgr(struct NODE_MGR *hNodeMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)); ++ ++ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_DeleteMgr: hNodeMgr: 0x%x\n", ++ hNodeMgr); ++ DeleteNodeMgr(hNodeMgr); ++ ++ return status; ++} ++ ++/* ++ * ======== NODE_EnumNodes ======== ++ * Purpose: ++ * Enumerate currently allocated nodes. ++ */ ++DSP_STATUS NODE_EnumNodes(struct NODE_MGR *hNodeMgr, IN DSP_HNODE *aNodeTab, ++ u32 uNodeTabSize, OUT u32 *puNumNodes, ++ OUT u32 *puAllocated) ++{ ++ struct NODE_OBJECT *hNode; ++ u32 i; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)); ++ DBC_Require(aNodeTab != NULL || uNodeTabSize == 0); ++ DBC_Require(puNumNodes != NULL); ++ DBC_Require(puAllocated != NULL); ++ GT_5trace(NODE_debugMask, GT_ENTER, "NODE_EnumNodes: hNodeMgr: 0x%x\t" ++ "aNodeTab: %d\tuNodeTabSize: 0x%x\tpuNumNodes: 0x%x\t" ++ "puAllocated\n", hNodeMgr, aNodeTab, uNodeTabSize, puNumNodes, ++ puAllocated); ++ /* Enter critical section */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_SUCCEEDED(status)) { ++ if (hNodeMgr->uNumNodes > uNodeTabSize) { ++ *puAllocated = hNodeMgr->uNumNodes; ++ *puNumNodes = 0; ++ status = DSP_ESIZE; ++ } else { ++ hNode = (struct NODE_OBJECT *)LST_First(hNodeMgr-> ++ nodeList); ++ for (i = 0; i < hNodeMgr->uNumNodes; i++) { ++ DBC_Assert(MEM_IsValidHandle(hNode, ++ NODE_SIGNATURE)); ++ aNodeTab[i] = hNode; ++ hNode = (struct NODE_OBJECT *)LST_Next ++ (hNodeMgr->nodeList, ++ (struct LST_ELEM *)hNode); ++ } ++ *puAllocated = *puNumNodes = hNodeMgr->uNumNodes; ++ } ++ } ++ /* end of SYNC_EnterCS */ ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ return status; ++} ++ ++/* ++ * ======== NODE_Exit ======== ++ * Purpose: ++ * Discontinue usage of NODE module. ++ */ ++void NODE_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "Entered NODE_Exit, ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== NODE_FreeMsgBuf ======== ++ * Purpose: ++ * Frees the message buffer. ++ */ ++DSP_STATUS NODE_FreeMsgBuf(struct NODE_OBJECT *hNode, IN u8 *pBuffer, ++ OPTIONAL struct DSP_BUFFERATTR *pAttr) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ DSP_STATUS status = DSP_SOK; ++ u32 procId; ++ DBC_Require(cRefs > 0); ++ DBC_Require(pBuffer != NULL); ++ DBC_Require(pNode != NULL); ++ DBC_Require(pNode->hXlator != NULL); ++ GT_3trace(NODE_debugMask, GT_ENTER, "NODE_FreeMsgBuf: hNode: 0x%x\t" ++ "pBuffer: 0x%x\tpAttr: 0x%x\n", hNode, pBuffer, pAttr); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ status = PROC_GetProcessorId(pNode->hProcessor, &procId); ++ if (procId == DSP_UNIT) { ++ if (DSP_SUCCEEDED(status)) { ++ if (pAttr == NULL) { ++ /* set defaults */ ++ pAttr = &NODE_DFLTBUFATTRS; ++ } ++ /* Node supports single SM segment only */ ++ if (pAttr->uSegment != 1) ++ status = DSP_EBADSEGID; ++ ++ /* pBuffer is clients Va. */ ++ status = CMM_XlatorFreeBuf(pNode->hXlator, pBuffer); ++ if (DSP_FAILED(status)) ++ status = DSP_EFAIL; ++ else ++ status = DSP_SOK; ++ ++ } ++ } else { ++ DBC_Assert(NULL); /* BUG */ ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_GetAttr ======== ++ * Purpose: ++ * Copy the current attributes of the specified node into a DSP_NODEATTR ++ * structure. ++ */ ++DSP_STATUS NODE_GetAttr(struct NODE_OBJECT *hNode, ++ OUT struct DSP_NODEATTR *pAttr, u32 uAttrSize) ++{ ++ struct NODE_MGR *hNodeMgr; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(cRefs > 0); ++ DBC_Require(pAttr != NULL); ++ DBC_Require(uAttrSize >= sizeof(struct DSP_NODEATTR)); ++ GT_3trace(NODE_debugMask, GT_ENTER, "NODE_GetAttr: hNode: " ++ "0x%x\tpAttr: 0x%x \tuAttrSize: 0x%x\n", hNode, pAttr, ++ uAttrSize); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else { ++ hNodeMgr = hNode->hNodeMgr; ++ /* Enter hNodeMgr critical section (since we're accessing ++ * data that could be changed by NODE_ChangePriority() and ++ * NODE_Connect(). */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_SUCCEEDED(status)) { ++ pAttr->cbStruct = sizeof(struct DSP_NODEATTR); ++ /* DSP_NODEATTRIN */ ++ pAttr->inNodeAttrIn.cbStruct = ++ sizeof(struct DSP_NODEATTRIN); ++ pAttr->inNodeAttrIn.iPriority = hNode->nPriority; ++ pAttr->inNodeAttrIn.uTimeout = hNode->uTimeout; ++ pAttr->inNodeAttrIn.uHeapSize = ++ hNode->createArgs.asa.taskArgs.uHeapSize; ++ pAttr->inNodeAttrIn.pGPPVirtAddr = (void *) ++ hNode->createArgs.asa.taskArgs.uGPPHeapAddr; ++ pAttr->uInputs = hNode->uNumGPPInputs; ++ pAttr->uOutputs = hNode->uNumGPPOutputs; ++ /* DSP_NODEINFO */ ++ GetNodeInfo(hNode, &(pAttr->iNodeInfo)); ++ } ++ /* end of SYNC_EnterCS */ ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ } ++ return status; ++} ++ ++/* ++ * ======== NODE_GetChannelId ======== ++ * Purpose: ++ * Get the channel index reserved for a stream connection between the ++ * host and a node. ++ */ ++DSP_STATUS NODE_GetChannelId(struct NODE_OBJECT *hNode, u32 uDir, u32 uIndex, ++ OUT u32 *pulId) ++{ ++ enum NODE_TYPE nodeType; ++ DSP_STATUS status = DSP_EVALUE; ++ DBC_Require(cRefs > 0); ++ DBC_Require(uDir == DSP_TONODE || uDir == DSP_FROMNODE); ++ DBC_Require(pulId != NULL); ++ GT_4trace(NODE_debugMask, GT_ENTER, "NODE_GetChannelId: hNode: " ++ "0x%x\tuDir: %d\tuIndex: %d\tpulId: 0x%x\n", hNode, uDir, ++ uIndex, pulId); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ return status; ++ } ++ nodeType = NODE_GetType(hNode); ++ if (nodeType != NODE_TASK && nodeType != NODE_DAISSOCKET) { ++ status = DSP_ENODETYPE; ++ return status; ++ } ++ if (uDir == DSP_TONODE) { ++ if (uIndex < MaxInputs(hNode)) { ++ if (hNode->inputs[uIndex].type == HOSTCONNECT) { ++ *pulId = hNode->inputs[uIndex].devId; ++ status = DSP_SOK; ++ } ++ } ++ } else { ++ DBC_Assert(uDir == DSP_FROMNODE); ++ if (uIndex < MaxOutputs(hNode)) { ++ if (hNode->outputs[uIndex].type == HOSTCONNECT) { ++ *pulId = hNode->outputs[uIndex].devId; ++ status = DSP_SOK; ++ } ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== NODE_GetMessage ======== ++ * Purpose: ++ * Retrieve a message from a node on the DSP. ++ */ ++DSP_STATUS NODE_GetMessage(struct NODE_OBJECT *hNode, OUT struct DSP_MSG *pMsg, ++ u32 uTimeout) ++{ ++ struct NODE_MGR *hNodeMgr; ++ enum NODE_TYPE nodeType; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status = DSP_SOK; ++ void *pTmpBuf; ++ struct DSP_PROCESSORSTATE procStatus; ++ struct PROC_OBJECT *hProcessor; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pMsg != NULL); ++ GT_3trace(NODE_debugMask, GT_ENTER, ++ "NODE_GetMessage: hNode: 0x%x\tpMsg: " ++ "0x%x\tuTimeout: 0x%x\n", hNode, pMsg, uTimeout); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ hProcessor = hNode->hProcessor; ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* If processor is in error state then don't attempt to get the ++ message */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_GetMessage:" ++ " proc Status 0x%x\n", procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ hNodeMgr = hNode->hNodeMgr; ++ nodeType = NODE_GetType(hNode); ++ if (nodeType != NODE_MESSAGE && nodeType != NODE_TASK && ++ nodeType != NODE_DAISSOCKET) { ++ status = DSP_ENODETYPE; ++ goto func_end; ++ } ++ /* This function will block unless a message is available. Since ++ * DSPNode_RegisterNotify() allows notification when a message ++ * is available, the system can be designed so that ++ * DSPNode_GetMessage() is only called when a message is ++ * available. */ ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnMsgGet)(hNode->hMsgQueue, pMsg, uTimeout); ++ /* Check if message contains SM descriptor */ ++ if (DSP_FAILED(status) || !(pMsg->dwCmd & DSP_RMSBUFDESC)) ++ goto func_end; ++ ++ /* Translate DSP byte addr to GPP Va. */ ++ pTmpBuf = CMM_XlatorTranslate(hNode->hXlator, ++ (void *)(pMsg->dwArg1 * hNode->hNodeMgr->uDSPWordSize), ++ CMM_DSPPA2PA); ++ if (pTmpBuf != NULL) { ++ /* now convert this GPP Pa to Va */ ++ pTmpBuf = CMM_XlatorTranslate(hNode->hXlator, pTmpBuf, ++ CMM_PA2VA); ++ if (pTmpBuf != NULL) { ++ /* Adjust SM size in msg */ ++ pMsg->dwArg1 = (u32) pTmpBuf; ++ pMsg->dwArg2 *= hNode->hNodeMgr->uDSPWordSize; ++ } else { ++ GT_0trace(NODE_debugMask, GT_7CLASS, "NODE_GetMessage: " ++ "Failed SM translation!\n"); ++ status = DSP_ETRANSLATE; ++ } ++ } else { ++ GT_0trace(NODE_debugMask, GT_7CLASS, "NODE_GetMessage: Failed " ++ "SM Pa/Pa translation!\n"); ++ status = DSP_ETRANSLATE; ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_GetNldrObj ======== ++ */ ++DSP_STATUS NODE_GetNldrObj(struct NODE_MGR *hNodeMgr, ++ struct NLDR_OBJECT **phNldrObj) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct NODE_MGR *pNodeMgr = hNodeMgr; ++ DBC_Require(phNldrObj != NULL); ++ GT_2trace(NODE_debugMask, GT_ENTER, ++ "Entered NODE_GetNldrObj, hNodeMgr: " ++ "0x%x\n\tphNldrObj: 0x%x\n", hNodeMgr, phNldrObj); ++ if (!MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)) ++ status = DSP_EHANDLE; ++ else ++ *phNldrObj = pNodeMgr->hNldr; ++ ++ GT_2trace(NODE_debugMask, GT_ENTER, ++ "Exit NODE_GetNldrObj: status 0x%x\n\t" ++ "phNldrObj: 0x%x\n", status, *phNldrObj); ++ DBC_Ensure(DSP_SUCCEEDED(status) || ((phNldrObj != NULL) && ++ (*phNldrObj == NULL))); ++ return status; ++} ++ ++/* ++ * ======== NODE_GetStrmMgr ======== ++ * Purpose: ++ * Returns the Stream manager. ++ */ ++DSP_STATUS NODE_GetStrmMgr(struct NODE_OBJECT *hNode, ++ struct STRM_MGR **phStrmMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) ++ status = DSP_EHANDLE; ++ else ++ *phStrmMgr = hNode->hNodeMgr->hStrmMgr; ++ ++ return status; ++} ++ ++/* ++ * ======== NODE_GetLoadType ======== ++ */ ++enum NLDR_LOADTYPE NODE_GetLoadType(struct NODE_OBJECT *hNode) ++{ ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_GetLoadType: Failed. hNode:" ++ " 0x%x\n", hNode); ++ return -1; ++ } else ++ return hNode->dcdProps.objData.nodeObj.usLoadType; ++} ++ ++/* ++ * ======== NODE_GetTimeout ======== ++ * Purpose: ++ * Returns the timeout value for this node. ++ */ ++u32 NODE_GetTimeout(struct NODE_OBJECT *hNode) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_GetTimeout: Failed. hNode:" ++ " 0x%x\n", hNode); ++ return 0; ++ } else ++ return hNode->uTimeout; ++} ++ ++/* ++ * ======== NODE_GetType ======== ++ * Purpose: ++ * Returns the node type. ++ */ ++enum NODE_TYPE NODE_GetType(struct NODE_OBJECT *hNode) ++{ ++ enum NODE_TYPE nodeType; ++ ++ if (hNode == (struct NODE_OBJECT *) DSP_HGPPNODE) ++ nodeType = NODE_GPP; ++ else { ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) ++ nodeType = -1; ++ else ++ nodeType = hNode->nType; ++ } ++ return nodeType; ++} ++ ++/* ++ * ======== NODE_Init ======== ++ * Purpose: ++ * Initialize the NODE module. ++ */ ++bool NODE_Init(void) ++{ ++ bool fRetVal = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!NODE_debugMask.flags); ++ GT_create(&NODE_debugMask, "NO"); /* "NO" for NOde */ ++ } ++ ++ if (fRetVal) ++ cRefs++; ++ ++ GT_1trace(NODE_debugMask, GT_5CLASS, "NODE_Init(), ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((fRetVal && (cRefs > 0)) || (!fRetVal && (cRefs >= 0))); ++ return fRetVal; ++} ++ ++/* ++ * ======== NODE_OnExit ======== ++ * Purpose: ++ * Gets called when RMS_EXIT is received for a node. ++ */ ++void NODE_OnExit(struct NODE_OBJECT *hNode, s32 nStatus) ++{ ++ DBC_Assert(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); ++ /* Set node state to done */ ++ NODE_SetState(hNode, NODE_DONE); ++ hNode->nExitStatus = nStatus; ++ if (hNode->fLoaded && hNode->fPhaseSplit) { ++ (void)hNode->hNodeMgr->nldrFxns.pfnUnload(hNode->hNldrNode, ++ NLDR_EXECUTE); ++ hNode->fLoaded = false; ++ } ++ /* Unblock call to NODE_Terminate */ ++ (void) SYNC_SetEvent(hNode->hSyncDone); ++ /* Notify clients */ ++ PROC_NotifyClients(hNode->hProcessor, DSP_NODESTATECHANGE); ++ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); ++} ++ ++/* ++ * ======== NODE_Pause ======== ++ * Purpose: ++ * Suspend execution of a node currently running on the DSP. ++ */ ++DSP_STATUS NODE_Pause(struct NODE_OBJECT *hNode) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ enum NODE_TYPE nodeType; ++ enum NODE_STATE state; ++ struct NODE_MGR *hNodeMgr; ++ DSP_STATUS status = DSP_SOK; ++ u32 procId; ++ struct DSP_PROCESSORSTATE procStatus; ++ struct PROC_OBJECT *hProcessor; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Pause: hNode: 0x%x\n", hNode); ++ ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } else { ++ nodeType = NODE_GetType(hNode); ++ if (nodeType != NODE_TASK && nodeType != NODE_DAISSOCKET) ++ status = DSP_ENODETYPE; ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ status = PROC_GetProcessorId(pNode->hProcessor, &procId); ++ ++ if (procId == IVA_UNIT) ++ status = DSP_ENOTIMPL; ++ ++ if (DSP_SUCCEEDED(status)) { ++ hNodeMgr = hNode->hNodeMgr; ++ ++ /* Enter critical section */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ ++ if (DSP_SUCCEEDED(status)) { ++ state = NODE_GetState(hNode); ++ /* Check node state */ ++ if (state != NODE_RUNNING) ++ status = DSP_EWRONGSTATE; ++ ++ hProcessor = hNode->hProcessor; ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* If processor is in error state then don't attempt ++ to send the message */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, ++ "NODE_Pause: proc Status 0x%x\n", ++ procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = DISP_NodeChangePriority(hNodeMgr-> ++ hDisp, hNode, ++ hNodeMgr->ulFxnAddrs[RMSCHANGENODEPRIORITY], ++ hNode->nodeEnv, NODE_SUSPENDEDPRI); ++ } ++ ++ /* Update state */ ++ if (DSP_SUCCEEDED(status)) { ++ NODE_SetState(hNode, NODE_PAUSED); ++ } else { ++ GT_1trace(NODE_debugMask, GT_6CLASS, ++ "NODE_Pause: Failed. hNode:" ++ " 0x%x\n", hNode); ++ } ++ } ++ /* End of SYNC_EnterCS */ ++ /* Leave critical section */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ if (DSP_SUCCEEDED(status)) { ++ PROC_NotifyClients(hNode->hProcessor, ++ DSP_NODESTATECHANGE); ++ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_PutMessage ======== ++ * Purpose: ++ * Send a message to a message node, task node, or XDAIS socket node. This ++ * function will block until the message stream can accommodate the ++ * message, or a timeout occurs. ++ */ ++DSP_STATUS NODE_PutMessage(struct NODE_OBJECT *hNode, ++ IN CONST struct DSP_MSG *pMsg, u32 uTimeout) ++{ ++ struct NODE_MGR *hNodeMgr = NULL; ++ enum NODE_TYPE nodeType; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ enum NODE_STATE state; ++ DSP_STATUS status = DSP_SOK; ++ void *pTmpBuf; ++ struct DSP_MSG newMsg; ++ struct DSP_PROCESSORSTATE procStatus; ++ struct PROC_OBJECT *hProcessor; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pMsg != NULL); ++ GT_3trace(NODE_debugMask, GT_ENTER, ++ "NODE_PutMessage: hNode: 0x%x\tpMsg: " ++ "0x%x\tuTimeout: 0x%x\n", hNode, pMsg, uTimeout); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ hProcessor = hNode->hProcessor; ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* If processor is in bad state then don't attempt sending the ++ message */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_PutMessage:" ++ " proc Status 0x%x\n", procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ hNodeMgr = hNode->hNodeMgr; ++ nodeType = NODE_GetType(hNode); ++ if (nodeType != NODE_MESSAGE && nodeType != NODE_TASK && ++ nodeType != NODE_DAISSOCKET) ++ status = DSP_ENODETYPE; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Check node state. Can't send messages to a node after ++ * we've sent the RMS_EXIT command. There is still the ++ * possibility that NODE_Terminate can be called after we've ++ * checked the state. Could add another SYNC object to ++ * prevent this (can't use hNodeMgr->hSync, since we don't ++ * want to block other NODE functions). However, the node may ++ * still exit on its own, before this message is sent. */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_SUCCEEDED(status)) { ++ state = NODE_GetState(hNode); ++ if (state == NODE_TERMINATING || state == NODE_DONE) ++ status = DSP_EWRONGSTATE; ++ ++ } ++ /* end of SYNC_EnterCS */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* assign pMsg values to new msg */ ++ newMsg = *pMsg; ++ /* Now, check if message contains a SM buffer descriptor */ ++ if (pMsg->dwCmd & DSP_RMSBUFDESC) { ++ /* Translate GPP Va to DSP physical buf Ptr. */ ++ pTmpBuf = CMM_XlatorTranslate(hNode->hXlator, ++ (void *)newMsg.dwArg1, CMM_VA2DSPPA); ++ if (pTmpBuf != NULL) { ++ /* got translation, convert to MAUs in msg */ ++ if (hNode->hNodeMgr->uDSPWordSize != 0) { ++ newMsg.dwArg1 = ++ (u32)pTmpBuf / ++ hNode->hNodeMgr->uDSPWordSize; ++ /* MAUs */ ++ newMsg.dwArg2 /= hNode->hNodeMgr->uDSPWordSize; ++ } else { ++ GT_0trace(NODE_debugMask, GT_7CLASS, ++ "NODE_PutMessage: " ++ "uDSPWordSize is zero!\n"); ++ status = DSP_EFAIL; /* bad DSPWordSize */ ++ } ++ } else { /* failed to translate buffer address */ ++ GT_0trace(NODE_debugMask, GT_7CLASS, ++ "NODE_PutMessage: Failed to" ++ " translate SM address\n"); ++ status = DSP_ETRANSLATE; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnMsgPut)(hNode->hMsgQueue, ++ &newMsg, uTimeout); ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_RegisterNotify ======== ++ * Purpose: ++ * Register to be notified on specific events for this node. ++ */ ++DSP_STATUS NODE_RegisterNotify(struct NODE_OBJECT *hNode, u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION *hNotification) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(hNotification != NULL); ++ ++ GT_4trace(NODE_debugMask, GT_ENTER, ++ "NODE_RegisterNotify: hNode: 0x%x\t" ++ "uEventMask: 0x%x\tuNotifyType: 0x%x\thNotification: 0x%x\n", ++ hNode, uEventMask, uNotifyType, hNotification); ++ ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else { ++ /* Check if event mask is a valid node related event */ ++ if (uEventMask & ~(DSP_NODESTATECHANGE | ++ DSP_NODEMESSAGEREADY)) ++ status = DSP_EVALUE; ++ ++ /* Check if notify type is valid */ ++ if (uNotifyType != DSP_SIGNALEVENT) ++ status = DSP_EVALUE; ++ ++ /* Only one Notification can be registered at a ++ * time - Limitation */ ++ if (uEventMask == (DSP_NODESTATECHANGE | ++ DSP_NODEMESSAGEREADY)) ++ status = DSP_EVALUE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ if (uEventMask == DSP_NODESTATECHANGE) { ++ status = NTFY_Register(hNode->hNtfy, hNotification, ++ uEventMask & DSP_NODESTATECHANGE, uNotifyType); ++ } else { ++ /* Send Message part of event mask to MSG */ ++ pIntfFxns = hNode->hNodeMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnMsgRegisterNotify) ++ (hNode->hMsgQueue, ++ uEventMask & DSP_NODEMESSAGEREADY, uNotifyType, ++ hNotification); ++ } ++ ++ } ++ return status; ++} ++ ++/* ++ * ======== NODE_Run ======== ++ * Purpose: ++ * Start execution of a node's execute phase, or resume execution of a node ++ * that has been suspended (via NODE_NodePause()) on the DSP. Load the ++ * node's execute function if necessary. ++ */ ++DSP_STATUS NODE_Run(struct NODE_OBJECT *hNode) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ struct NODE_MGR *hNodeMgr; ++ enum NODE_TYPE nodeType; ++ enum NODE_STATE state; ++ u32 ulExecuteFxn; ++ u32 ulFxnAddr; ++ DSP_STATUS status = DSP_SOK; ++ u32 procId; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct DSP_PROCESSORSTATE procStatus; ++ struct PROC_OBJECT *hProcessor; ++ ++ DBC_Require(cRefs > 0); ++ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Run: hNode: 0x%x\n", hNode); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ hProcessor = hNode->hProcessor; ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* If processor is in error state then don't attempt to run the node */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_Run:" ++ " proc Status 0x%x\n", procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ nodeType = NODE_GetType(hNode); ++ if (nodeType == NODE_DEVICE) ++ status = DSP_ENODETYPE; ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ hNodeMgr = hNode->hNodeMgr; ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ /* Enter critical section */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ state = NODE_GetState(hNode); ++ if (state != NODE_CREATED && state != NODE_PAUSED) ++ status = DSP_EWRONGSTATE; ++ ++ if (DSP_SUCCEEDED(status)) ++ status = PROC_GetProcessorId(pNode->hProcessor, &procId); ++ ++ if (DSP_FAILED(status)) ++ goto func_cont1; ++ ++ if ((procId != DSP_UNIT) && (procId != IVA_UNIT)) ++ goto func_cont1; ++ ++ if (state == NODE_CREATED) { ++ /* If node's execute function is not loaded, load it */ ++ if (!(hNode->fLoaded) && hNode->fPhaseSplit) { ++ status = hNodeMgr->nldrFxns.pfnLoad(hNode->hNldrNode, ++ NLDR_EXECUTE); ++ if (DSP_SUCCEEDED(status)) { ++ hNode->fLoaded = true; ++ } else { ++ GT_1trace(NODE_debugMask, GT_ENTER, ++ "NODE_Run: failed to load " ++ "execute code:0x%x\n", status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Get address of node's execute function */ ++ if (procId == IVA_UNIT) ++ ulExecuteFxn = (u32) hNode->nodeEnv; ++ else { ++ status = GetFxnAddress(hNode, &ulExecuteFxn, ++ EXECUTEPHASE); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ ulFxnAddr = hNodeMgr->ulFxnAddrs[RMSEXECUTENODE]; ++ status = DISP_NodeRun(hNodeMgr->hDisp, hNode, ulFxnAddr, ++ ulExecuteFxn, hNode->nodeEnv); ++ } ++ } else if (state == NODE_PAUSED) { ++ ulFxnAddr = hNodeMgr->ulFxnAddrs[RMSCHANGENODEPRIORITY]; ++ status = DISP_NodeChangePriority(hNodeMgr->hDisp, hNode, ++ ulFxnAddr, hNode->nodeEnv, ++ NODE_GetPriority(hNode)); ++ } else { ++ /* We should never get here */ ++ DBC_Assert(false); ++ } ++func_cont1: ++ /* Update node state. */ ++ if (DSP_SUCCEEDED(status)) ++ NODE_SetState(hNode, NODE_RUNNING); ++ else /* Set state back to previous value */ ++ NODE_SetState(hNode, state); ++ /*End of SYNC_EnterCS */ ++ /* Exit critical section */ ++func_cont: ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ if (DSP_SUCCEEDED(status)) { ++ PROC_NotifyClients(hNode->hProcessor, ++ DSP_NODESTATECHANGE); ++ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== NODE_Terminate ======== ++ * Purpose: ++ * Signal a node running on the DSP that it should exit its execute phase ++ * function. ++ */ ++DSP_STATUS NODE_Terminate(struct NODE_OBJECT *hNode, OUT DSP_STATUS *pStatus) ++{ ++ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; ++ struct NODE_MGR *hNodeMgr = NULL; ++ enum NODE_TYPE nodeType; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ enum NODE_STATE state; ++ struct DSP_MSG msg, killmsg; ++ DSP_STATUS status = DSP_SOK; ++ u32 procId, killTimeOut; ++ struct DEH_MGR *hDehMgr; ++ struct DSP_PROCESSORSTATE procStatus; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pStatus != NULL); ++ ++ GT_1trace(NODE_debugMask, GT_ENTER, ++ "NODE_Terminate: hNode: 0x%x\n", hNode); ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ if (pNode->hProcessor == NULL) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, ++ "NODE_Terminate: pNode->hProcessor = 0x%x\n", ++ pNode->hProcessor); ++ goto func_end; ++ } ++ status = PROC_GetProcessorId(pNode->hProcessor, &procId); ++ ++ if (DSP_SUCCEEDED(status)) { ++ hNodeMgr = hNode->hNodeMgr; ++ nodeType = NODE_GetType(hNode); ++ if (nodeType != NODE_TASK && nodeType != ++ NODE_DAISSOCKET) ++ status = DSP_ENODETYPE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Check node state */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ if (DSP_SUCCEEDED(status)) { ++ state = NODE_GetState(hNode); ++ if (state != NODE_RUNNING) { ++ status = DSP_EWRONGSTATE; ++ /* Set the exit status if node terminated on ++ * its own. */ ++ if (state == NODE_DONE) ++ *pStatus = hNode->nExitStatus; ++ ++ } else { ++ NODE_SetState(hNode, NODE_TERMINATING); ++ } ++ } ++ /* end of SYNC_EnterCS */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* ++ * Send exit message. Do not change state to NODE_DONE ++ * here. That will be done in callback. ++ */ ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_Terminate: env = 0x%x\n", hNode->nodeEnv); ++ ++ status = PROC_GetState(pNode->hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ /* If processor is in error state then don't attempt to send ++ * A kill task command */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_Terminate:" ++ " proc Status 0x%x\n", procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_cont; ++ } ++ ++ msg.dwCmd = RMS_EXIT; ++ msg.dwArg1 = hNode->nodeEnv; ++ killmsg.dwCmd = RMS_KILLTASK; ++ killmsg.dwArg1 = hNode->nodeEnv; ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ ++ if (hNode->uTimeout > MAXTIMEOUT) ++ killTimeOut = MAXTIMEOUT; ++ else ++ killTimeOut = (hNode->uTimeout)*2; ++ ++ status = (*pIntfFxns->pfnMsgPut)(hNode->hMsgQueue, &msg, ++ hNode->uTimeout); ++ if (DSP_SUCCEEDED(status)) { ++ /* Wait on synchronization object that will be ++ * posted in the callback on receiving RMS_EXIT ++ * message, or by NODE_Delete. Check for valid hNode, ++ * in case posted by NODE_Delete(). */ ++ status = SYNC_WaitOnEvent(hNode->hSyncDone, ++ killTimeOut/2); ++ if (DSP_FAILED(status)) { ++ if (status == DSP_ETIMEOUT) { ++ status = (*pIntfFxns->pfnMsgPut) ++ (hNode->hMsgQueue, &killmsg, ++ hNode->uTimeout); ++ if (DSP_SUCCEEDED(status)) { ++ status = SYNC_WaitOnEvent ++ (hNode->hSyncDone, ++ killTimeOut/2); ++ if (DSP_FAILED(status)) { ++ /* Here it goes the part ++ * of the simulation of ++ * the DSP exception */ ++ DEV_GetDehMgr(hNodeMgr-> ++ hDevObject, &hDehMgr); ++ if (hDehMgr) { ++ (*pIntfFxns-> ++ pfnDehNotify)(hDehMgr, ++ DSP_SYSERROR, ++ DSP_EXCEPTIONABORT); ++ status = DSP_EFAIL; ++ } ++ } else ++ status = DSP_SOK; ++ } ++ } else ++ status = DSP_EFAIL; ++ } else /* Convert SYNC status to DSP status */ ++ status = DSP_SOK; ++ } ++ } ++func_cont: ++ if (DSP_SUCCEEDED(status)) { ++ /* Enter CS before getting exit status, in case node was ++ * deleted. */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ /* Make sure node wasn't deleted while we blocked */ ++ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { ++ status = DSP_EFAIL; ++ } else { ++ *pStatus = hNode->nExitStatus; ++ GT_1trace(NODE_debugMask, GT_ENTER, ++ "NODE_Terminate: env = 0x%x " ++ "succeeded.\n", hNode->nodeEnv); ++ } ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ } /*End of SYNC_EnterCS */ ++func_end: ++ return status; ++} ++ ++/* ++ * ======== DeleteNode ======== ++ * Purpose: ++ * Free GPP resources allocated in NODE_Allocate() or NODE_Connect(). ++ */ ++static void DeleteNode(struct NODE_OBJECT *hNode, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ struct NODE_MGR *hNodeMgr; ++ struct CMM_XLATOROBJECT *hXlator; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ u32 i; ++ enum NODE_TYPE nodeType; ++ struct STREAM stream; ++ struct NODE_MSGARGS msgArgs; ++ struct NODE_TASKARGS taskArgs; ++#ifdef DSP_DMM_DEBUG ++ struct DMM_OBJECT *hDmmMgr; ++ struct PROC_OBJECT *pProcObject = ++ (struct PROC_OBJECT *)hNode->hProcessor; ++#endif ++ DSP_STATUS status; ++ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); ++ hNodeMgr = hNode->hNodeMgr; ++ if (!MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)) ++ return; ++ hXlator = hNode->hXlator; ++ nodeType = NODE_GetType(hNode); ++ if (nodeType != NODE_DEVICE) { ++ msgArgs = hNode->createArgs.asa.msgArgs; ++ if (msgArgs.pData) ++ MEM_Free(msgArgs.pData); ++ ++ /* Free MSG queue */ ++ if (hNode->hMsgQueue) { ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ (*pIntfFxns->pfnMsgDeleteQueue) (hNode->hMsgQueue); ++ hNode->hMsgQueue = NULL; ++ ++ } ++ if (hNode->hSyncDone) ++ (void) SYNC_CloseEvent(hNode->hSyncDone); ++ ++ /* Free all stream info */ ++ if (hNode->inputs) { ++ for (i = 0; i < MaxInputs(hNode); i++) { ++ stream = hNode->inputs[i]; ++ FreeStream(hNodeMgr, stream); ++ } ++ MEM_Free(hNode->inputs); ++ hNode->inputs = NULL; ++ } ++ if (hNode->outputs) { ++ for (i = 0; i < MaxOutputs(hNode); i++) { ++ stream = hNode->outputs[i]; ++ FreeStream(hNodeMgr, stream); ++ } ++ MEM_Free(hNode->outputs); ++ hNode->outputs = NULL; ++ } ++ taskArgs = hNode->createArgs.asa.taskArgs; ++ if (taskArgs.strmInDef) { ++ for (i = 0; i < MaxInputs(hNode); i++) { ++ if (taskArgs.strmInDef[i].szDevice) { ++ MEM_Free(taskArgs.strmInDef[i]. ++ szDevice); ++ taskArgs.strmInDef[i].szDevice = NULL; ++ } ++ } ++ MEM_Free(taskArgs.strmInDef); ++ taskArgs.strmInDef = NULL; ++ } ++ if (taskArgs.strmOutDef) { ++ for (i = 0; i < MaxOutputs(hNode); i++) { ++ if (taskArgs.strmOutDef[i].szDevice) { ++ MEM_Free(taskArgs.strmOutDef[i]. ++ szDevice); ++ taskArgs.strmOutDef[i].szDevice = NULL; ++ } ++ } ++ MEM_Free(taskArgs.strmOutDef); ++ taskArgs.strmOutDef = NULL; ++ } ++ if (taskArgs.uDSPHeapResAddr) { ++ status = PROC_UnMap(hNode->hProcessor, ++ (void *)taskArgs.uDSPHeapAddr, ++ pr_ctxt); ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(NODE_debugMask, GT_5CLASS, ++ "DSPProcessor_UnMap succeeded.\n"); ++ } else { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "DSPProcessor_UnMap failed." ++ " Status = 0x%x\n", (u32)status); ++ } ++ status = PROC_UnReserveMemory(hNode->hProcessor, ++ (void *)taskArgs.uDSPHeapResAddr); ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(NODE_debugMask, GT_5CLASS, ++ "DSPProcessor_UnReserveMemory " ++ "succeeded.\n"); ++ } else { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "DSPProcessor_UnReserveMemory " ++ "failed. Status = 0x%x\n", ++ (u32)status); ++ } ++#ifdef DSP_DMM_DEBUG ++ status = DMM_GetHandle(pProcObject, &hDmmMgr); ++ if (DSP_SUCCEEDED(status)) ++ DMM_MemMapDump(hDmmMgr); ++#endif ++ } ++ } ++ if (nodeType != NODE_MESSAGE) { ++ if (hNode->streamConnect) { ++ MEM_Free(hNode->streamConnect); ++ hNode->streamConnect = NULL; ++ } ++ } ++ if (hNode->pstrDevName) { ++ MEM_Free(hNode->pstrDevName); ++ hNode->pstrDevName = NULL; ++ } ++ ++ if (hNode->hNtfy) { ++ NTFY_Delete(hNode->hNtfy); ++ hNode->hNtfy = NULL; ++ } ++ ++ /* These were allocated in DCD_GetObjectDef (via NODE_Allocate) */ ++ if (hNode->dcdProps.objData.nodeObj.pstrCreatePhaseFxn) { ++ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrCreatePhaseFxn); ++ hNode->dcdProps.objData.nodeObj.pstrCreatePhaseFxn = NULL; ++ } ++ ++ if (hNode->dcdProps.objData.nodeObj.pstrExecutePhaseFxn) { ++ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrExecutePhaseFxn); ++ hNode->dcdProps.objData.nodeObj.pstrExecutePhaseFxn = NULL; ++ } ++ ++ if (hNode->dcdProps.objData.nodeObj.pstrDeletePhaseFxn) { ++ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrDeletePhaseFxn); ++ hNode->dcdProps.objData.nodeObj.pstrDeletePhaseFxn = NULL; ++ } ++ ++ if (hNode->dcdProps.objData.nodeObj.pstrIAlgName) { ++ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrIAlgName); ++ hNode->dcdProps.objData.nodeObj.pstrIAlgName = NULL; ++ } ++ ++ /* Free all SM address translator resources */ ++ if (hXlator) { ++ (void) CMM_XlatorDelete(hXlator, TRUE); /* force free */ ++ hXlator = NULL; ++ } ++ ++ if (hNode->hNldrNode) { ++ hNodeMgr->nldrFxns.pfnFree(hNode->hNldrNode); ++ hNode->hNldrNode = NULL; ++ } ++ ++ MEM_FreeObject(hNode); ++ hNode = NULL; ++} ++ ++/* ++ * ======== DeleteNodeMgr ======== ++ * Purpose: ++ * Frees the node manager. ++ */ ++static void DeleteNodeMgr(struct NODE_MGR *hNodeMgr) ++{ ++ struct NODE_OBJECT *hNode; ++ ++ if (MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)) { ++ /* Free resources */ ++ if (hNodeMgr->hDcdMgr) ++ DCD_DestroyManager(hNodeMgr->hDcdMgr); ++ ++ /* Remove any elements remaining in lists */ ++ if (hNodeMgr->nodeList) { ++ while ((hNode = ++ (struct NODE_OBJECT *)LST_GetHead(hNodeMgr-> ++ nodeList))) ++ DeleteNode(hNode, NULL); ++ ++ DBC_Assert(LST_IsEmpty(hNodeMgr->nodeList)); ++ LST_Delete(hNodeMgr->nodeList); ++ } ++ if (hNodeMgr->hNtfy) ++ NTFY_Delete(hNodeMgr->hNtfy); ++ ++ if (hNodeMgr->pipeMap) ++ GB_delete(hNodeMgr->pipeMap); ++ ++ if (hNodeMgr->pipeDoneMap) ++ GB_delete(hNodeMgr->pipeDoneMap); ++ ++ if (hNodeMgr->chnlMap) ++ GB_delete(hNodeMgr->chnlMap); ++ ++ if (hNodeMgr->dmaChnlMap) ++ GB_delete(hNodeMgr->dmaChnlMap); ++ ++ if (hNodeMgr->zChnlMap) ++ GB_delete(hNodeMgr->zChnlMap); ++ ++ if (hNodeMgr->hDisp) ++ DISP_Delete(hNodeMgr->hDisp); ++ ++ if (hNodeMgr->hSync) ++ SYNC_DeleteCS(hNodeMgr->hSync); ++ ++ if (hNodeMgr->hStrmMgr) ++ STRM_Delete(hNodeMgr->hStrmMgr); ++ ++ /* Delete the loader */ ++ if (hNodeMgr->hNldr) ++ hNodeMgr->nldrFxns.pfnDelete(hNodeMgr->hNldr); ++ ++ if (hNodeMgr->fLoaderInit) ++ hNodeMgr->nldrFxns.pfnExit(); ++ ++ MEM_FreeObject(hNodeMgr); ++ } ++} ++ ++/* ++ * ======== FillStreamConnect ======== ++ * Purpose: ++ * Fills stream information. ++ */ ++static void FillStreamConnect(struct NODE_OBJECT *hNode1, ++ struct NODE_OBJECT *hNode2, ++ u32 uStream1, u32 uStream2) ++{ ++ u32 uStrmIndex; ++ struct DSP_STREAMCONNECT *pStrm1 = NULL; ++ struct DSP_STREAMCONNECT *pStrm2 = NULL; ++ enum NODE_TYPE node1Type = NODE_TASK; ++ enum NODE_TYPE node2Type = NODE_TASK; ++ ++ node1Type = NODE_GetType(hNode1); ++ node2Type = NODE_GetType(hNode2); ++ if (hNode1 != (struct NODE_OBJECT *)DSP_HGPPNODE) { ++ ++ if (node1Type != NODE_DEVICE) { ++ uStrmIndex = hNode1->uNumInputs + ++ hNode1->uNumOutputs - 1; ++ pStrm1 = &(hNode1->streamConnect[uStrmIndex]); ++ pStrm1->cbStruct = sizeof(struct DSP_STREAMCONNECT); ++ pStrm1->uThisNodeStreamIndex = uStream1; ++ } ++ ++ if (hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE) { ++ /* NODE == > NODE */ ++ if (node1Type != NODE_DEVICE) { ++ pStrm1->hConnectedNode = hNode2; ++ pStrm1->uiConnectedNodeID = hNode2->nodeId; ++ pStrm1->uConnectedNodeStreamIndex = uStream2; ++ pStrm1->lType = CONNECTTYPE_NODEOUTPUT; ++ } ++ if (node2Type != NODE_DEVICE) { ++ uStrmIndex = hNode2->uNumInputs + ++ hNode2->uNumOutputs - 1; ++ pStrm2 = &(hNode2->streamConnect[uStrmIndex]); ++ pStrm2->cbStruct = ++ sizeof(struct DSP_STREAMCONNECT); ++ pStrm2->uThisNodeStreamIndex = uStream2; ++ pStrm2->hConnectedNode = hNode1; ++ pStrm2->uiConnectedNodeID = hNode1->nodeId; ++ pStrm2->uConnectedNodeStreamIndex = uStream1; ++ pStrm2->lType = CONNECTTYPE_NODEINPUT; ++ } ++ } else if (node1Type != NODE_DEVICE) ++ pStrm1->lType = CONNECTTYPE_GPPOUTPUT; ++ } else { ++ /* GPP == > NODE */ ++ DBC_Assert(hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE); ++ uStrmIndex = hNode2->uNumInputs + hNode2->uNumOutputs - 1; ++ pStrm2 = &(hNode2->streamConnect[uStrmIndex]); ++ pStrm2->cbStruct = sizeof(struct DSP_STREAMCONNECT); ++ pStrm2->uThisNodeStreamIndex = uStream2; ++ pStrm2->lType = CONNECTTYPE_GPPINPUT; ++ } ++} ++ ++/* ++ * ======== FillStreamDef ======== ++ * Purpose: ++ * Fills Stream attributes. ++ */ ++static void FillStreamDef(struct NODE_OBJECT *hNode, ++ struct NODE_STRMDEF *pstrmDef, ++ struct DSP_STRMATTR *pAttrs) ++{ ++ struct NODE_MGR *hNodeMgr = hNode->hNodeMgr; ++ ++ if (pAttrs != NULL) { ++ pstrmDef->uNumBufs = pAttrs->uNumBufs; ++ pstrmDef->uBufsize = pAttrs->uBufsize / hNodeMgr-> ++ uDSPDataMauSize; ++ pstrmDef->uSegid = pAttrs->uSegid; ++ pstrmDef->uAlignment = pAttrs->uAlignment; ++ pstrmDef->uTimeout = pAttrs->uTimeout; ++ } else { ++ pstrmDef->uNumBufs = DEFAULTNBUFS; ++ pstrmDef->uBufsize = DEFAULTBUFSIZE / hNodeMgr-> ++ uDSPDataMauSize; ++ pstrmDef->uSegid = DEFAULTSEGID; ++ pstrmDef->uAlignment = DEFAULTALIGNMENT; ++ pstrmDef->uTimeout = DEFAULTTIMEOUT; ++ } ++} ++ ++/* ++ * ======== FreeStream ======== ++ * Purpose: ++ * Updates the channel mask and frees the pipe id. ++ */ ++static void FreeStream(struct NODE_MGR *hNodeMgr, struct STREAM stream) ++{ ++ /* Free up the pipe id unless other node has not yet been deleted. */ ++ if (stream.type == NODECONNECT) { ++ if (GB_test(hNodeMgr->pipeDoneMap, stream.devId)) { ++ /* The other node has already been deleted */ ++ GB_clear(hNodeMgr->pipeDoneMap, stream.devId); ++ GB_clear(hNodeMgr->pipeMap, stream.devId); ++ } else { ++ /* The other node has not been deleted yet */ ++ GB_set(hNodeMgr->pipeDoneMap, stream.devId); ++ } ++ } else if (stream.type == HOSTCONNECT) { ++ if (stream.devId < hNodeMgr->ulNumChnls) { ++ GB_clear(hNodeMgr->chnlMap, stream.devId); ++ } else if (stream.devId < (2 * hNodeMgr->ulNumChnls)) { ++ /* dsp-dma */ ++ GB_clear(hNodeMgr->dmaChnlMap, stream.devId - ++ (1 * hNodeMgr->ulNumChnls)); ++ } else if (stream.devId < (3 * hNodeMgr->ulNumChnls)) { ++ /* zero-copy */ ++ GB_clear(hNodeMgr->zChnlMap, stream.devId - ++ (2 * hNodeMgr->ulNumChnls)); ++ } ++ } ++} ++ ++/* ++ * ======== GetFxnAddress ======== ++ * Purpose: ++ * Retrieves the address for create, execute or delete phase for a node. ++ */ ++static DSP_STATUS GetFxnAddress(struct NODE_OBJECT *hNode, u32 *pulFxnAddr, ++ u32 uPhase) ++{ ++ char *pstrFxnName = NULL; ++ struct NODE_MGR *hNodeMgr = hNode->hNodeMgr; ++ DSP_STATUS status = DSP_SOK; ++ DBC_Require(NODE_GetType(hNode) == NODE_TASK || ++ NODE_GetType(hNode) == NODE_DAISSOCKET || ++ NODE_GetType(hNode) == NODE_MESSAGE); ++ ++ switch (uPhase) { ++ case CREATEPHASE: ++ pstrFxnName = hNode->dcdProps.objData.nodeObj. ++ pstrCreatePhaseFxn; ++ break; ++ case EXECUTEPHASE: ++ pstrFxnName = hNode->dcdProps.objData.nodeObj. ++ pstrExecutePhaseFxn; ++ break; ++ case DELETEPHASE: ++ pstrFxnName = hNode->dcdProps.objData.nodeObj. ++ pstrDeletePhaseFxn; ++ break; ++ default: ++ /* Should never get here */ ++ DBC_Assert(false); ++ break; ++ } ++ ++ status = hNodeMgr->nldrFxns.pfnGetFxnAddr(hNode->hNldrNode, pstrFxnName, ++ pulFxnAddr); ++ ++ return status; ++} ++ ++/* ++ * ======== GetNodeInfo ======== ++ * Purpose: ++ * Retrieves the node information. ++ */ ++void GetNodeInfo(struct NODE_OBJECT *hNode, struct DSP_NODEINFO *pNodeInfo) ++{ ++ u32 i; ++ ++ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); ++ DBC_Require(pNodeInfo != NULL); ++ ++ pNodeInfo->cbStruct = sizeof(struct DSP_NODEINFO); ++ pNodeInfo->nbNodeDatabaseProps = hNode->dcdProps.objData.nodeObj. ++ ndbProps; ++ pNodeInfo->uExecutionPriority = hNode->nPriority; ++ pNodeInfo->hDeviceOwner = hNode->hDeviceOwner; ++ pNodeInfo->uNumberStreams = hNode->uNumInputs + hNode->uNumOutputs; ++ pNodeInfo->uNodeEnv = hNode->nodeEnv; ++ ++ pNodeInfo->nsExecutionState = NODE_GetState(hNode); ++ ++ /* Copy stream connect data */ ++ for (i = 0; i < hNode->uNumInputs + hNode->uNumOutputs; i++) ++ pNodeInfo->scStreamConnection[i] = hNode->streamConnect[i]; ++ ++} ++ ++/* ++ * ======== GetNodeProps ======== ++ * Purpose: ++ * Retrieve node properties. ++ */ ++static DSP_STATUS GetNodeProps(struct DCD_MANAGER *hDcdMgr, ++ struct NODE_OBJECT *hNode, ++ CONST struct DSP_UUID *pNodeId, ++ struct DCD_GENERICOBJ *pdcdProps) ++{ ++ u32 uLen; ++ struct NODE_MSGARGS *pMsgArgs; ++ struct NODE_TASKARGS *pTaskArgs; ++ enum NODE_TYPE nodeType = NODE_TASK; ++ struct DSP_NDBPROPS *pndbProps = &(pdcdProps->objData.nodeObj.ndbProps); ++ DSP_STATUS status = DSP_SOK; ++#ifdef DEBUG ++ char szUuid[MAXUUIDLEN]; ++#endif ++ ++ status = DCD_GetObjectDef(hDcdMgr, (struct DSP_UUID *)pNodeId, ++ DSP_DCDNODETYPE, pdcdProps); ++ ++ if (DSP_SUCCEEDED(status)) { ++ hNode->nType = nodeType = pndbProps->uNodeType; ++ ++#ifdef DEBUG ++ /* Create UUID value to set in registry. */ ++ UUID_UuidToString((struct DSP_UUID *)pNodeId, szUuid, ++ MAXUUIDLEN); ++ DBG_Trace(DBG_LEVEL7, "\n** (node) UUID: %s\n", szUuid); ++#endif ++ ++ /* Fill in message args that come from NDB */ ++ if (nodeType != NODE_DEVICE) { ++ pMsgArgs = &(hNode->createArgs.asa.msgArgs); ++ pMsgArgs->uSegid = pdcdProps->objData.nodeObj.uMsgSegid; ++ pMsgArgs->uNotifyType = pdcdProps->objData.nodeObj. ++ uMsgNotifyType; ++ pMsgArgs->uMaxMessages = pndbProps->uMessageDepth; ++#ifdef DEBUG ++ DBG_Trace(DBG_LEVEL7, ++ "** (node) Max Number of Messages: 0x%x\n", ++ pMsgArgs->uMaxMessages); ++#endif ++ } else { ++ /* Copy device name */ ++ DBC_Require(pndbProps->acName); ++ uLen = strlen(pndbProps->acName); ++ DBC_Assert(uLen < MAXDEVNAMELEN); ++ hNode->pstrDevName = MEM_Calloc(uLen + 1, MEM_PAGED); ++ if (hNode->pstrDevName == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ strncpy(hNode->pstrDevName, ++ pndbProps->acName, uLen); ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Fill in create args that come from NDB */ ++ if (nodeType == NODE_TASK || nodeType == NODE_DAISSOCKET) { ++ pTaskArgs = &(hNode->createArgs.asa.taskArgs); ++ pTaskArgs->nPriority = pndbProps->iPriority; ++ pTaskArgs->uStackSize = pndbProps->uStackSize; ++ pTaskArgs->uSysStackSize = pndbProps->uSysStackSize; ++ pTaskArgs->uStackSeg = pndbProps->uStackSeg; ++#ifdef DEBUG ++ DBG_Trace(DBG_LEVEL7, ++ "** (node) Priority: 0x%x\n" "** (node) Stack" ++ " Size: 0x%x words\n" "** (node) System Stack" ++ " Size: 0x%x words\n" "** (node) Stack" ++ " Segment: 0x%x\n\n", ++ "** (node) profile count : 0x%x \n \n", ++ pTaskArgs->nPriority, pTaskArgs->uStackSize, ++ pTaskArgs->uSysStackSize, ++ pTaskArgs->uStackSeg, ++ pndbProps->uCountProfiles); ++#endif ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== GetProcProps ======== ++ * Purpose: ++ * Retrieve the processor properties. ++ */ ++static DSP_STATUS GetProcProps(struct NODE_MGR *hNodeMgr, ++ struct DEV_OBJECT *hDevObject) ++{ ++ struct CFG_DEVNODE *hDevNode; ++ struct CFG_HOSTRES hostRes; ++ DSP_STATUS status = DSP_SOK; ++ ++ status = DEV_GetDevNode(hDevObject, &hDevNode); ++ if (DSP_SUCCEEDED(status)) ++ status = CFG_GetHostResources(hDevNode, &hostRes); ++ ++ if (DSP_SUCCEEDED(status)) { ++ hNodeMgr->ulChnlOffset = hostRes.dwChnlOffset; ++ hNodeMgr->ulChnlBufSize = hostRes.dwChnlBufSize; ++ hNodeMgr->ulNumChnls = hostRes.dwNumChnls; ++ ++ /* ++ * PROC will add an API to get DSP_PROCESSORINFO. ++ * Fill in default values for now. ++ */ ++ /* TODO -- Instead of hard coding, take from registry */ ++ hNodeMgr->procFamily = 6000; ++ hNodeMgr->procType = 6410; ++ hNodeMgr->nMinPri = DSP_NODE_MIN_PRIORITY; ++ hNodeMgr->nMaxPri = DSP_NODE_MAX_PRIORITY; ++ hNodeMgr->uDSPWordSize = DSPWORDSIZE; ++ hNodeMgr->uDSPDataMauSize = DSPWORDSIZE; ++ hNodeMgr->uDSPMauSize = 1; ++ ++ } ++ return status; ++} ++ ++ ++ ++/* ++ * ======== NODE_GetUUIDProps ======== ++ * Purpose: ++ * Fetch Node UUID properties from DCD/DOF file. ++ */ ++DSP_STATUS NODE_GetUUIDProps(DSP_HPROCESSOR hProcessor, ++ IN CONST struct DSP_UUID *pNodeId, ++ OUT struct DSP_NDBPROPS *pNodeProps) ++{ ++ struct NODE_MGR *hNodeMgr = NULL; ++ struct DEV_OBJECT *hDevObject; ++ DSP_STATUS status = DSP_SOK; ++ struct DCD_NODEPROPS dcdNodeProps; ++ struct DSP_PROCESSORSTATE procStatus; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(hProcessor != NULL); ++ DBC_Require(pNodeId != NULL); ++ ++ if (hProcessor == NULL || pNodeId == NULL) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ status = PROC_GetState(hProcessor, &procStatus, ++ sizeof(struct DSP_PROCESSORSTATE)); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* If processor is in error state then don't attempt ++ to send the message */ ++ if (procStatus.iState == PROC_ERROR) { ++ GT_1trace(NODE_debugMask, GT_5CLASS, ++ "NODE_GetUUIDProps: proc Status 0x%x\n", ++ procStatus.iState); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ ++ GT_3trace(NODE_debugMask, GT_ENTER, ++ "NODE_GetUUIDProps: " "\thProcessor: " ++ "0x%x\tpNodeId: 0x%x" "\tpNodeProps: 0x%x\n", hProcessor, ++ pNodeId, pNodeProps); ++ ++ status = PROC_GetDevObject(hProcessor, &hDevObject); ++ if (DSP_SUCCEEDED(status) && hDevObject != NULL) { ++ status = DEV_GetNodeManager(hDevObject, &hNodeMgr); ++ if (hNodeMgr == NULL) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ } ++ ++ /* ++ * Enter the critical section. This is needed because ++ * DCD_GetObjectDef will ultimately end up calling DBLL_open/close, ++ * which needs to be protected in order to not corrupt the zlib manager ++ * (COD). ++ */ ++ status = SYNC_EnterCS(hNodeMgr->hSync); ++ ++ if (DSP_SUCCEEDED(status)) { ++ dcdNodeProps.pstrCreatePhaseFxn = NULL; ++ dcdNodeProps.pstrExecutePhaseFxn = NULL; ++ dcdNodeProps.pstrDeletePhaseFxn = NULL; ++ dcdNodeProps.pstrIAlgName = NULL; ++ ++ status = DCD_GetObjectDef(hNodeMgr->hDcdMgr, ++ (struct DSP_UUID *) pNodeId, ++ DSP_DCDNODETYPE, ++ (struct DCD_GENERICOBJ *) &dcdNodeProps); ++ if (DSP_SUCCEEDED(status)) { ++ *pNodeProps = dcdNodeProps.ndbProps; ++ if (dcdNodeProps.pstrCreatePhaseFxn) ++ MEM_Free(dcdNodeProps.pstrCreatePhaseFxn); ++ ++ if (dcdNodeProps.pstrExecutePhaseFxn) ++ MEM_Free(dcdNodeProps.pstrExecutePhaseFxn); ++ ++ if (dcdNodeProps.pstrDeletePhaseFxn) ++ MEM_Free(dcdNodeProps.pstrDeletePhaseFxn); ++ ++ if (dcdNodeProps.pstrIAlgName) ++ MEM_Free(dcdNodeProps.pstrIAlgName); ++ } ++ /* Leave the critical section, we're done. */ ++ (void)SYNC_LeaveCS(hNodeMgr->hSync); ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== GetRMSFxns ======== ++ * Purpose: ++ * Retrieve the RMS functions. ++ */ ++static DSP_STATUS GetRMSFxns(struct NODE_MGR *hNodeMgr) ++{ ++ s32 i; ++ struct DEV_OBJECT *hDev = hNodeMgr->hDevObject; ++ DSP_STATUS status = DSP_SOK; ++ ++ static char *pszFxns[NUMRMSFXNS] = { ++ "RMS_queryServer", /* RMSQUERYSERVER */ ++ "RMS_configureServer", /* RMSCONFIGURESERVER */ ++ "RMS_createNode", /* RMSCREATENODE */ ++ "RMS_executeNode", /* RMSEXECUTENODE */ ++ "RMS_deleteNode", /* RMSDELETENODE */ ++ "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */ ++ "RMS_readMemory", /* RMSREADMEMORY */ ++ "RMS_writeMemory", /* RMSWRITEMEMORY */ ++ "RMS_copy", /* RMSCOPY */ ++ }; ++ ++ for (i = 0; i < NUMRMSFXNS; i++) { ++ status = DEV_GetSymbol(hDev, pszFxns[i], ++ &(hNodeMgr->ulFxnAddrs[i])); ++ if (DSP_FAILED(status)) { ++ if (status == COD_E_SYMBOLNOTFOUND) { ++ /* ++ * May be loaded dynamically (in the future), ++ * but return an error for now. ++ */ ++ GT_1trace(NODE_debugMask, GT_6CLASS, ++ "RMS function: %s " ++ "currently not loaded\n", pszFxns[i]); ++ } else { ++ GT_2trace(NODE_debugMask, GT_6CLASS, ++ "GetRMSFxns: Symbol not " ++ "found: %s\tstatus = 0x%x\n", ++ pszFxns[i], status); ++ break; ++ } ++ } ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== Ovly ======== ++ * Purpose: ++ * Called during overlay.Sends command to RMS to copy a block of data. ++ */ ++static u32 Ovly(void *pPrivRef, u32 ulDspRunAddr, u32 ulDspLoadAddr, ++ u32 ulNumBytes, u32 nMemSpace) ++{ ++ struct NODE_OBJECT *hNode = (struct NODE_OBJECT *)pPrivRef; ++ struct NODE_MGR *hNodeMgr; ++ u32 ulBytes = 0; ++ u32 ulSize; ++ u32 ulTimeout; ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *hWmdContext; ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ ++ ++ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); ++ ++ hNodeMgr = hNode->hNodeMgr; ++ ++ ulSize = ulNumBytes / hNodeMgr->uDSPWordSize; ++ ulTimeout = hNode->uTimeout; ++ ++ /* Call new MemCopy function */ ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ status = DEV_GetWMDContext(hNodeMgr->hDevObject, &hWmdContext); ++ status = (*pIntfFxns->pfnBrdMemCopy)(hWmdContext, ulDspRunAddr, ++ ulDspLoadAddr, ulNumBytes, (u32) nMemSpace); ++ ++ if (DSP_SUCCEEDED(status)) ++ ulBytes = ulNumBytes; ++ ++ return ulBytes; ++} ++ ++/* ++ * ======== Write ======== ++ */ ++static u32 Write(void *pPrivRef, u32 ulDspAddr, void *pBuf, ++ u32 ulNumBytes, u32 nMemSpace) ++{ ++ struct NODE_OBJECT *hNode = (struct NODE_OBJECT *) pPrivRef; ++ struct NODE_MGR *hNodeMgr; ++ u16 memType; ++ u32 ulTimeout; ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *hWmdContext; ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ ++ ++ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); ++ DBC_Require(nMemSpace & DBLL_CODE || nMemSpace & DBLL_DATA); ++ ++ hNodeMgr = hNode->hNodeMgr; ++ ++ ulTimeout = hNode->uTimeout; ++ memType = (nMemSpace & DBLL_CODE) ? RMS_CODE : RMS_DATA; ++ ++ /* Call new MemWrite function */ ++ pIntfFxns = hNodeMgr->pIntfFxns; ++ status = DEV_GetWMDContext(hNodeMgr->hDevObject, &hWmdContext); ++ status = (*pIntfFxns->pfnBrdMemWrite) (hWmdContext, pBuf, ulDspAddr, ++ ulNumBytes, memType); ++ ++ return ulNumBytes; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/proc.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/proc.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/proc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/proc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2019 @@ ++/* ++ * proc.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== proc.c ======== ++ * Description: ++ * Processor interface at the driver level. ++ * ++ * Public Functions: ++ * PROC_Attach ++ * PROC_Ctrl ++ * PROC_Detach ++ * PROC_EnumNodes ++ * PROC_GetResourceInfo ++ * PROC_Exit ++ * PROC_FlushMemory ++ * PROC_GetState ++ * PROC_GetProcessorId ++ * PROC_GetTrace ++ * PROC_Init ++ * PROC_Load ++ * PROC_Map ++ * PROC_NotifyClients ++ * PROC_RegisterNotify ++ * PROC_ReserveMemory ++ * PROC_Start ++ * PROC_UnMap ++ * PROC_UnReserveMemory ++ * PROC_InvalidateMemory ++ ++ *! Revision History ++ *! ======== ======== ++ *! 04-Apr-2007 sh Added PROC_InvalidateMemory API ++ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian ++ *! Used MEM_FlushCache instead of OS specific API ++ *! Integrated Alan's code review updates ++ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping feature ++ *! 08-Mar-2004 vp Added g_pszLastCoff member to PROC_OBJECT. ++ *! This is required for multiprocessor environment. ++ *! 09-Feb-2004 vp Added PROC_GetProcessorID function ++ *! 22-Apr-2003 vp Fixed issue with the string that stores coff file name ++ *! 03-Apr-2003 sb Fix DEH deregistering bug ++ *! 26-Mar-2003 vp Commented the call to DSP deep sleep in PROC_Start function. ++ *! 18-Feb-2003 vp Code review updates. ++ *! 18-Oct-2002 vp Ported to Linux platform. ++ *! 22-May-2002 sg Do IOCTL-to-PWR translation before calling PWR_SleepDSP. ++ *! 14-May-2002 sg Use CSL_Atoi() instead of atoi(). ++ *! 13-May-2002 sg Propagate PWR return codes upwards. ++ *! 07-May-2002 sg Added check for, and call to PWR functions in PROC_Ctrl. ++ *! 02-May-2002 sg Added "nap" mode: put DSP to sleep once booted. ++ *! 01-Apr-2002 jeh Assume word addresses in PROC_GetTrace(). ++ *! 29-Nov-2001 jeh Don't call DEH function if hDehMgr == NULL. ++ *! 05-Nov-2001 kc: Updated PROC_RegisterNotify and PROC_GetState to support ++ *! DEH module. ++ *! 09-Oct-2001 jeh Fix number of bytes calculated in PROC_GetTrace(). ++ *! 11-Sep-2001 jeh Delete MSG manager in PROC_Monitor() to fix memory leak. ++ *! 29-Aug-2001 rr: DCD_AutoRegister and IOOnLoaded moved before COD_LoadBase ++ *! to facilitate the external loading. ++ *! 14-Aug-2001 ag DCD_AutoRegister() now called before IOOnLoaded() fxn. ++ *! 21-Jun-2001 rr: MSG_Create is done only the first time. ++ *! 02-May-2001 jeh Return failure in PROC_Load if IOOnLoaded function returns ++ *! error other than E_NOTIMPL. ++ *! 03-Apr-2001 sg: Changed DSP_DCD_ENOAUTOREGISTER to DSP_EDCDNOAUTOREGISTER. ++ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. ++ *! 05-Jan-2001 rr: PROC_LOAD MSG_Create error is checked. ++ *! 15-Dec-2000 rr: IoOnLoaded is checked for WSX_STATUS. We fail to load ++ *! if DEV_Create2 fails; ie, no non-RMS targets can be ++ *! loaded. ++ *! 12-Dec-2000 rr: PROC_Start's DEV_Create2 is checked for WSX_STATUS. ++ *! 28-Nov-2000 jeh Added call to IO OnLoaded function to PROC_Load(). ++ *! 29-Nov-2000 rr: Incorporated code review changes. ++ *! 03-Nov-2000 rr: Auto_Register happens after PROC_Load. ++ *! 06-Oct-2000 rr: Updated to ver 0.9. PROC_Start calls DEV_Create2 and ++ *! WMD_BRD_STOP is always followed by DEV_Destroy2. ++ *! 05-Sep-2000 rr: PROC_GetTrace calculates the Trace symbol for 55 in a ++ *! different way. ++ *! 10-Aug-2000 rr: PROC_NotifyClients, PROC_GetProcessorHandle Added ++ *! 07-Aug-2000 rr: PROC_IDLE/SYNCINIT/UNKNOWN state removed. ++ *! WMD fxns are checked for WSX_STATUS. ++ *! PROC_Attach does not alter the state of the BRD. ++ *! PROC_Run removed. ++ *! 04-Aug-2000 rr: All the functions return DSP_EHANDLE if proc handle is ++ *! invalid ++ *! 27-Jul-2000 rr: PROC_GetTrace and PROC_Load implemented. Updated to ++ *! ver 0.8 API. ++ *! 06-Jul-2000 rr: Created. ++ */ ++ ++/* ------------------------------------ Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++#include ++/* ----------------------------------- Mini Driver */ ++#include ++#include ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++#include ++ ++#ifndef RES_CLEANUP_DISABLE ++#include ++#endif ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define PROC_SIGNATURE 0x434F5250 /* "PROC" (in reverse). */ ++#define MAXCMDLINELEN 255 ++#define PROC_ENVPROCID "PROC_ID=%d" ++#define MAXPROCIDLEN (8 + 5) ++#define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */ ++#define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ ++#define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ ++ ++extern char *iva_img; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask PROC_DebugMask = { NULL, NULL }; /* WCD MGR Mask */ ++#endif ++ ++static u32 cRefs; ++ ++struct SYNC_CSOBJECT *hProcLock; /* For critical sections */ ++ ++/* ----------------------------------- Function Prototypes */ ++static DSP_STATUS PROC_Monitor(struct PROC_OBJECT *hProcessor); ++static s32 GetEnvpCount(char **envp); ++static char **PrependEnvp(char **newEnvp, char **envp, s32 cEnvp, s32 cNewEnvp, ++ char *szVar); ++ ++/* ++ * ======== PROC_CleanupAllResources ===== ++ * Purpose: ++ * Funtion to clean the process resources. ++ * This function is intended to be called when the ++ * processor is in error state ++ */ ++DSP_STATUS PROC_CleanupAllResources(void) ++{ ++ DSP_STATUS dsp_status = DSP_SOK; ++ HANDLE hDrvObject = NULL; ++ struct PROCESS_CONTEXT *pCtxtclosed = NULL; ++ struct PROC_OBJECT *proc_obj_ptr, *temp; ++ ++ GT_0trace(PROC_DebugMask, GT_ENTER, "PROC_CleanupAllResources\n"); ++ ++ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ if (DSP_FAILED(dsp_status)) ++ goto func_end; ++ ++ DRV_GetProcCtxtList(&pCtxtclosed, (struct DRV_OBJECT *)hDrvObject); ++ ++ while (pCtxtclosed != NULL) { ++ if (current->tgid != pCtxtclosed->pid) { ++ GT_1trace(PROC_DebugMask, GT_5CLASS, ++ "***Cleanup of " ++ "process***%d\n", pCtxtclosed->pid); ++ list_for_each_entry_safe(proc_obj_ptr, temp, ++ &pCtxtclosed->processor_list, ++ proc_object) { ++ PROC_Detach(proc_obj_ptr, pCtxtclosed); ++ } ++ } ++ pCtxtclosed = pCtxtclosed->next; ++ } ++ ++ WMD_DEH_ReleaseDummyMem(); ++func_end: ++ return dsp_status; ++} ++ ++/* ++ * ======== PROC_Attach ======== ++ * Purpose: ++ * Prepare for communication with a particular DSP processor, and return ++ * a handle to the processor object. ++ */ ++DSP_STATUS ++PROC_Attach(u32 uProcessor, OPTIONAL CONST struct DSP_PROCESSORATTRIN *pAttrIn, ++ OUT DSP_HPROCESSOR *phProcessor, struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEV_OBJECT *hDevObject; ++ struct PROC_OBJECT *pProcObject = NULL; ++ struct MGR_OBJECT *hMgrObject = NULL; ++ struct DRV_OBJECT *hDrvObject = NULL; ++ u32 devType; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phProcessor != NULL); ++ ++ GT_3trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Attach, args:\n\t" ++ "uProcessor: 0x%x\n\tpAttrIn: 0x%x\n\tphProcessor:" ++ "0x%x\n", uProcessor, pAttrIn, phProcessor); ++ ++ /* Get the Driver and Manager Object Handles */ ++ status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); ++ if (DSP_SUCCEEDED(status)) { ++ status = CFG_GetObject((u32 *)&hMgrObject, REG_MGR_OBJECT); ++ if (DSP_FAILED(status)) { ++ /* don't propogate CFG errors from this PROC function */ ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach: DSP_FAILED to get" ++ "the Manager Object.\n", status); ++ } ++ } else { ++ /* don't propogate CFG errors from this PROC function */ ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach: failed to get the" ++ " DriverObject, 0x%x!\n", status); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Get the Device Object */ ++ status = DRV_GetDevObject(uProcessor, hDrvObject, &hDevObject); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach: failed to get" ++ " DevObject, 0x%x!\n", status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetDevType(hDevObject, &devType); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach: failed to get" ++ " DevType, 0x%x!\n", status); ++ } ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* If we made it this far, create the Proceesor object: */ ++ MEM_AllocObject(pProcObject, struct PROC_OBJECT, PROC_SIGNATURE); ++ /* Fill out the Processor Object: */ ++ if (pProcObject == NULL) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach:Out of memeory \n"); ++ status = DSP_EFAIL; ++ goto func_end; ++ } ++ pProcObject->hDevObject = hDevObject; ++ pProcObject->hMgrObject = hMgrObject; ++ pProcObject->uProcessor = devType; ++ /* Store TGID of Caller Process */ ++ pProcObject->hProcess = current->tgid; ++ ++ INIT_LIST_HEAD(&pProcObject->proc_object); ++ ++ if (pAttrIn) ++ pProcObject->uTimeout = pAttrIn->uTimeout; ++ else ++ pProcObject->uTimeout = PROC_DFLT_TIMEOUT; ++ ++ status = DEV_GetIntfFxns(hDevObject, &pProcObject->pIntfFxns); ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetWMDContext(hDevObject, ++ &pProcObject->hWmdContext); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach Could not" ++ " get the WMD Context.\n", status); ++ MEM_FreeObject(pProcObject); ++ } ++ } else { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach Could not get" ++ " the DEV_ Interface fxns.\n", status); ++ MEM_FreeObject(pProcObject); ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* Create the Notification Object */ ++ /* This is created with no event mask, no notify mask ++ * and no valid handle to the notification. They all get ++ * filled up when PROC_RegisterNotify is called */ ++ status = NTFY_Create(&pProcObject->hNtfy); ++ if (DSP_SUCCEEDED(status)) { ++ /* Insert the Processor Object into the DEV List. ++ * Return handle to this Processor Object: ++ * Find out if the Device is already attached to a ++ * Processor. If so, return AlreadyAttached status */ ++ LST_InitElem(&pProcObject->link); ++ status = DEV_InsertProcObject(pProcObject->hDevObject, ++ (u32)pProcObject, ++ &pProcObject->bIsAlreadyAttached); ++ if (DSP_SUCCEEDED(status)) { ++ if (pProcObject->bIsAlreadyAttached) { ++ status = DSP_SALREADYATTACHED; ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Attach: Processor " ++ "Already Attached!\n"); ++ } ++ } else { ++ if (pProcObject->hNtfy) ++ NTFY_Delete(pProcObject->hNtfy); ++ ++ MEM_FreeObject(pProcObject); ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach: failed to insert " ++ "Proc Object into DEV, 0x%x!\n", status); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ *phProcessor = (DSP_HPROCESSOR)pProcObject; ++ (void)PROC_NotifyClients(pProcObject, ++ DSP_PROCESSORATTACH); ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Attach: Processor " ++ "Attach Success!\n"); ++ } ++ } else { ++ /* Don't leak memory if DSP_FAILED */ ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Attach: Could not allocate " ++ "storage for notification \n"); ++ MEM_FreeObject(pProcObject); ++ } ++#ifndef RES_CLEANUP_DISABLE ++ spin_lock(&pr_ctxt->proc_list_lock); ++ list_add(&pProcObject->proc_object, &pr_ctxt->processor_list); ++ spin_unlock(&pr_ctxt->proc_list_lock); ++#endif ++func_end: ++ DBC_Ensure((status == DSP_EFAIL && *phProcessor == NULL) || ++ (DSP_SUCCEEDED(status) && ++ MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) || ++ (status == DSP_SALREADYATTACHED && ++ MEM_IsValidHandle(pProcObject, PROC_SIGNATURE))); ++ GT_2trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Attach, results:\n\t" ++ "status: 0x%x\n\thProcessor: 0x%x\n", status, *phProcessor); ++ ++ return status; ++} ++ ++static DSP_STATUS GetExecFile(struct CFG_DEVNODE *hDevNode, ++ struct DEV_OBJECT *hDevObject, ++ u32 size, char *execFile) ++{ ++ s32 devType; ++ s32 len; ++ ++ DEV_GetDevType(hDevObject, (u32 *) &devType); ++ if (devType == DSP_UNIT) { ++ return CFG_GetExecFile(hDevNode, size, execFile); ++ } else if (devType == IVA_UNIT) { ++ if (iva_img) { ++ len = strlen(iva_img); ++ strncpy(execFile, iva_img, len + 1); ++ return DSP_SOK; ++ } ++ } ++ return DSP_EFILE; ++} ++ ++/* ++ * ======== PROC_AutoStart ======== = ++ * Purpose: ++ * A Particular device gets loaded with the default image ++ * if the AutoStart flag is set. ++ * Parameters: ++ * hDevObject: Handle to the Device ++ * Returns: ++ * DSP_SOK: On Successful Loading ++ * DSP_EFAIL General Failure ++ * Requires: ++ * hDevObject != NULL ++ * Ensures: ++ */ ++DSP_STATUS PROC_AutoStart(struct CFG_DEVNODE *hDevNode, ++ struct DEV_OBJECT *hDevObject) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ u32 dwAutoStart = 0; /* autostart flag */ ++ struct PROC_OBJECT *pProcObject; ++ struct PROC_OBJECT *hProcObject; ++ char szExecFile[MAXCMDLINELEN]; ++ char *argv[2]; ++ struct MGR_OBJECT *hMgrObject = NULL; ++ s32 devType; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(hDevNode != NULL); ++ DBC_Require(hDevObject != NULL); ++ ++ GT_2trace(PROC_DebugMask, GT_ENTER, ++ "Entered PROC_AutoStart, args:\n\t" ++ "hDevNode: 0x%x\thDevObject: 0x%x\n", hDevNode, hDevObject); ++ /* Create a Dummy PROC Object */ ++ if (DSP_FAILED(CFG_GetObject((u32 *)&hMgrObject, ++ REG_MGR_OBJECT))) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_AutoStart: DSP_FAILED to " ++ "Get MGR Object\n"); ++ goto func_end; ++ } ++ MEM_AllocObject(pProcObject, struct PROC_OBJECT, PROC_SIGNATURE); ++ if (pProcObject == NULL) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_AutoStart: DSP_FAILED " ++ "to Create a dummy Processor\n"); ++ goto func_end; ++ } ++ GT_0trace(PROC_DebugMask, GT_1CLASS, "NTFY Created \n"); ++ pProcObject->hDevObject = hDevObject; ++ pProcObject->hMgrObject = hMgrObject; ++ hProcObject = pProcObject; ++ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, ++ &pProcObject->pIntfFxns))) { ++ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, ++ &pProcObject->hWmdContext))) { ++ status = DSP_SOK; ++ } else { ++ MEM_FreeObject(hProcObject); ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_AutoStart: Failed " ++ "to get WMD Context \n"); ++ } ++ } else { ++ MEM_FreeObject(hProcObject); ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_AutoStart: Failed to " ++ "get IntFxns \n"); ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* Stop the Device, put it into standby mode */ ++ status = PROC_Stop(hProcObject); ++ if (DSP_FAILED(CFG_GetAutoStart(hDevNode, &dwAutoStart)) || ++ !dwAutoStart) { ++ status = DSP_EFAIL; ++ /* DSP_FAILED to Get s32 Fxn or Wmd Context */ ++ GT_0trace(PROC_DebugMask, GT_1CLASS, "PROC_AutoStart: " ++ "CFG_GetAutoStart DSP_FAILED \n"); ++ goto func_cont; ++ } ++ /* Get the default executable for this board... */ ++ DEV_GetDevType(hDevObject, (u32 *)&devType); ++ pProcObject->uProcessor = devType; ++ if (DSP_SUCCEEDED(GetExecFile(hDevNode, hDevObject, ++ sizeof(szExecFile), szExecFile))) { ++ argv[0] = szExecFile; ++ argv[1] = NULL; ++ /* ...and try to load it: */ ++ status = PROC_Load(hProcObject, 1, (CONST char **)argv, NULL); ++ if (DSP_SUCCEEDED(status)) { ++ status = PROC_Start(hProcObject); ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_AutoStart: Processor started " ++ "running\n"); ++ } else { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_AutoStart: DSP_FAILED To " ++ "Start \n"); ++ } ++ } else { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_AutoStart: DSP_FAILED to Load\n"); ++ } ++ } else { ++ status = DSP_EFILE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_AutoStart: " ++ "No Exec file found \n"); ++ } ++func_cont: ++ MEM_FreeObject(hProcObject); ++func_end: ++ GT_1trace(PROC_DebugMask, GT_ENTER, ++ "Exiting PROC_AutoStart, status:0x%x\n", status); ++ return status; ++} ++ ++/* ++ * ======== PROC_Ctrl ======== ++ * Purpose: ++ * Pass control information to the GPP device driver managing the ++ * DSP processor. ++ * ++ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge ++ * application developer's API. ++ * Call the WMD_ICOTL Fxn with the Argument This is a Synchronous ++ * Operation. arg can be null. ++ */ ++DSP_STATUS PROC_Ctrl(DSP_HPROCESSOR hProcessor, u32 dwCmd, ++ IN struct DSP_CBDATA *arg) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = hProcessor; ++ u32 timeout = 0; ++ ++ DBC_Require(cRefs > 0); ++ GT_3trace(PROC_DebugMask, GT_ENTER, ++ "Entered PROC_Ctrl, args:\n\thProcessor:" ++ " 0x%x\n\tdwCmd: 0x%x\n\targ: 0x%x\n", hProcessor, dwCmd, arg); ++ ++ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ /* intercept PWR deep sleep command */ ++ if (dwCmd == WMDIOCTL_DEEPSLEEP) { ++ timeout = arg->cbData; ++ status = PWR_SleepDSP(PWR_DEEPSLEEP, timeout); ++ } ++ /* intercept PWR emergency sleep command */ ++ else if (dwCmd == WMDIOCTL_EMERGENCYSLEEP) { ++ timeout = arg->cbData; ++ status = PWR_SleepDSP(PWR_EMERGENCYDEEPSLEEP, timeout); ++ } else if (dwCmd == PWR_DEEPSLEEP) { ++ /* timeout = arg->cbData; */ ++ status = PWR_SleepDSP(PWR_DEEPSLEEP, timeout); ++ } ++ /* intercept PWR wake commands */ ++ else if (dwCmd == WMDIOCTL_WAKEUP) { ++ timeout = arg->cbData; ++ status = PWR_WakeDSP(timeout); ++ } else if (dwCmd == PWR_WAKEUP) { ++ /* timeout = arg->cbData; */ ++ status = PWR_WakeDSP(timeout); ++ } else ++ if (DSP_SUCCEEDED ++ ((*pProcObject->pIntfFxns->pfnDevCntrl) ++ (pProcObject->hWmdContext, dwCmd, arg))) { ++ status = DSP_SOK; ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Ctrl: Failed \n"); ++ } ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Ctrl: InValid Processor Handle \n"); ++ } ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Ctrl, 0x%x\n", ++ status); ++ return status; ++} ++ ++/* ++ * ======== PROC_Detach ======== ++ * Purpose: ++ * Destroys the Processor Object. Removes the notification from the Dev ++ * List. ++ */ ++DSP_STATUS PROC_Detach(DSP_HPROCESSOR hProcessor, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ DBC_Require(cRefs > 0); ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Detach, args:\n\t" ++ "hProcessor: 0x%x\n", hProcessor); ++ ++ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++#ifndef RES_CLEANUP_DISABLE ++ if (pr_ctxt) { ++ spin_lock(&pr_ctxt->proc_list_lock); ++ list_del(&pProcObject->proc_object); ++ spin_unlock(&pr_ctxt->proc_list_lock); ++ } ++#endif ++ /* Notify the Client */ ++ NTFY_Notify(pProcObject->hNtfy, DSP_PROCESSORDETACH); ++ /* Remove the notification memory */ ++ if (pProcObject->hNtfy) ++ NTFY_Delete(pProcObject->hNtfy); ++ ++ if (pProcObject->g_pszLastCoff) { ++ MEM_Free(pProcObject->g_pszLastCoff); ++ pProcObject->g_pszLastCoff = NULL; ++ } ++ /* Remove the Proc from the DEV List */ ++ (void)DEV_RemoveProcObject(pProcObject->hDevObject, ++ (u32)pProcObject); ++ /* Free the Processor Object */ ++ MEM_FreeObject(pProcObject); ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Detach: InValid Processor Handle \n"); ++ } ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Detach, 0x%x\n", ++ status); ++ return status; ++} ++ ++/* ++ * ======== PROC_EnumNodes ======== ++ * Purpose: ++ * Enumerate and get configuration information about nodes allocated ++ * on a DSP processor. ++ */ ++DSP_STATUS PROC_EnumNodes(DSP_HPROCESSOR hProcessor, OUT DSP_HNODE *aNodeTab, ++ IN u32 uNodeTabSize, OUT u32 *puNumNodes, ++ OUT u32 *puAllocated) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ struct NODE_MGR *hNodeMgr = NULL; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(aNodeTab != NULL || uNodeTabSize == 0); ++ DBC_Require(puNumNodes != NULL); ++ DBC_Require(puAllocated != NULL); ++ ++ GT_5trace(PROC_DebugMask, GT_ENTER, "Entered PROC_EnumNodes, args:\n\t" ++ "hProcessor: 0x%x\n\taNodeTab: 0x%x\n\tuNodeTabSize: " ++ " 0x%x\n\t puNumNodes 0x%x\n\t puAllocated: 0x%x\n", ++ hProcessor, aNodeTab, uNodeTabSize, puNumNodes, ++ puAllocated); ++ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ if (DSP_SUCCEEDED(DEV_GetNodeManager(pProcObject->hDevObject, ++ &hNodeMgr))) { ++ if (hNodeMgr) { ++ status = NODE_EnumNodes(hNodeMgr, aNodeTab, ++ uNodeTabSize, ++ puNumNodes, ++ puAllocated); ++ } ++ } ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_EnumNodes: " ++ "InValid Processor Handle \n"); ++ } ++ GT_6trace(PROC_DebugMask, GT_ENTER, "Exit PROC_EnumNodes, args:\n\t" ++ "hProcessor: 0x%x\n\taNodeTab: 0x%x\n\tuNodeTabSize: " ++ " 0x%x\n\t puNumNodes 0x%x\n\t puAllocated: 0x%x\n\t " ++ "status: 0x%x \n", hProcessor, aNodeTab, uNodeTabSize, ++ puNumNodes, puAllocated, status); ++ ++ return status; ++} ++ ++/* Check if the given area blongs to process virtul memory address space */ ++static int memory_check_vma(unsigned long start, u32 len) ++{ ++ int err = 0; ++ unsigned long end; ++ struct vm_area_struct *vma; ++ ++ end = start + len; ++ if (end <= start) ++ return -EINVAL; ++ ++ down_read(¤t->mm->mmap_sem); ++ ++ while ((vma = find_vma(current->mm, start)) != NULL) { ++ ++ if (vma->vm_start > start) { ++ err = -EINVAL; ++ break; ++ } ++ ++ if (end <= vma->vm_end) ++ break; ++ ++ start = vma->vm_end; ++ } ++ ++ if (!vma) ++ err = -EINVAL; ++ ++ up_read(¤t->mm->mmap_sem); ++ ++ return err; ++} ++ ++static DSP_STATUS proc_memory_sync(DSP_HPROCESSOR hProcessor, void *pMpuAddr, ++ u32 ulSize, u32 ulFlags, ++ enum DSP_FLUSHTYPE FlushMemType) ++{ ++ /* Keep STATUS here for future additions to this function */ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ ++ DBC_Require(cRefs > 0); ++ GT_5trace(PROC_DebugMask, GT_ENTER, ++ "Entered %s, args:\n\t" ++ "hProcessor: 0x%x pMpuAddr: 0x%x ulSize 0x%x, ulFlags 0x%x\n", ++ __func__, hProcessor, pMpuAddr, ulSize, ulFlags); ++ ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "%s: InValid Processor Handle\n", __func__); ++ status = DSP_EHANDLE; ++ goto err_out; ++ } ++ ++ if (memory_check_vma((u32)pMpuAddr, ulSize)) { ++ GT_3trace(PROC_DebugMask, GT_7CLASS, ++ "%s: InValid address parameters\n", ++ __func__, pMpuAddr, ulSize); ++ status = DSP_EHANDLE; ++ goto err_out; ++ } ++ ++ (void)SYNC_EnterCS(hProcLock); ++ MEM_FlushCache(pMpuAddr, ulSize, FlushMemType); ++ (void)SYNC_LeaveCS(hProcLock); ++ ++err_out: ++ GT_2trace(PROC_DebugMask, GT_ENTER, ++ "Leaving %s [0x%x]", __func__, status); ++ ++ return status; ++} ++ ++/* ++ * ======== PROC_FlushMemory ======== ++ * Purpose: ++ * Flush cache ++ */ ++DSP_STATUS PROC_FlushMemory(DSP_HPROCESSOR hProcessor, void *pMpuAddr, ++ u32 ulSize, u32 ulFlags) ++{ ++ enum DSP_FLUSHTYPE mtype = PROC_WRITEBACK_INVALIDATE_MEM; ++ ++ if (ulFlags & 1) ++ mtype = PROC_WRITEBACK_MEM; ++ ++ return proc_memory_sync(hProcessor, pMpuAddr, ulSize, ulFlags, mtype); ++} ++ ++/* ++ * ======== PROC_InvalidateMemory ======== ++ * Purpose: ++ * Invalidates the memory specified ++ */ ++DSP_STATUS PROC_InvalidateMemory(DSP_HPROCESSOR hProcessor, void *pMpuAddr, ++ u32 ulSize) ++{ ++ enum DSP_FLUSHTYPE mtype = PROC_INVALIDATE_MEM; ++ ++ return proc_memory_sync(hProcessor, pMpuAddr, ulSize, 0, mtype); ++} ++ ++/* ++ * ======== PROC_GetResourceInfo ======== ++ * Purpose: ++ * Enumerate the resources currently available on a processor. ++ */ ++DSP_STATUS PROC_GetResourceInfo(DSP_HPROCESSOR hProcessor, u32 uResourceType, ++ OUT struct DSP_RESOURCEINFO *pResourceInfo, ++ u32 uResourceInfoSize) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ struct NODE_MGR *hNodeMgr = NULL; ++ struct NLDR_OBJECT *hNldr = NULL; ++ struct RMM_TargetObj *rmm = NULL; ++ struct IO_MGR *hIOMgr = NULL; /* IO manager handle */ ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pResourceInfo != NULL); ++ DBC_Require(uResourceInfoSize >= sizeof(struct DSP_RESOURCEINFO)); ++ ++ GT_4trace(PROC_DebugMask, GT_ENTER, "Entered PROC_GetResourceInfo,\n\t" ++ "hProcessor: 0x%x\n\tuResourceType: 0x%x\n\tpResourceInfo:" ++ " 0x%x\n\t uResourceInfoSize 0x%x\n", hProcessor, ++ uResourceType, pResourceInfo, uResourceInfoSize); ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_GetResourceInfo: InValid " ++ "Processor Handle \n"); ++ goto func_end; ++ } ++ switch (uResourceType) { ++ case DSP_RESOURCE_DYNDARAM: ++ case DSP_RESOURCE_DYNSARAM: ++ case DSP_RESOURCE_DYNEXTERNAL: ++ case DSP_RESOURCE_DYNSRAM: ++ if (DSP_FAILED(DEV_GetNodeManager(pProcObject->hDevObject, ++ &hNodeMgr))) ++ goto func_end; ++ ++ if (DSP_SUCCEEDED(NODE_GetNldrObj(hNodeMgr, &hNldr))) { ++ if (DSP_SUCCEEDED(NLDR_GetRmmManager(hNldr, &rmm))) { ++ DBC_Assert(rmm != NULL); ++ status = DSP_EVALUE; ++ if (RMM_stat(rmm, ++ (enum DSP_MEMTYPE)uResourceType, ++ (struct DSP_MEMSTAT *)&(pResourceInfo-> ++ result.memStat))) ++ status = DSP_SOK; ++ } ++ } ++ break; ++ case DSP_RESOURCE_PROCLOAD: ++ status = DEV_GetIOMgr(pProcObject->hDevObject, &hIOMgr); ++ status = pProcObject->pIntfFxns->pfnIOGetProcLoad(hIOMgr, ++ (struct DSP_PROCLOADSTAT *)&(pResourceInfo-> ++ result.procLoadStat)); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "Error in procLoadStat function 0x%x\n", status); ++ } ++ break; ++ default: ++ status = DSP_EFAIL; ++ break; ++ } ++func_end: ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_GetResourceInfo, " ++ "status 0x%x\n", status); ++ return status; ++} ++ ++/* ++ * ======== PROC_Exit ======== ++ * Purpose: ++ * Decrement reference count, and free resources when reference count is ++ * 0. ++ */ ++void PROC_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ if (hProcLock) ++ (void)SYNC_DeleteCS(hProcLock); ++ ++ cRefs--; ++ ++ GT_1trace(PROC_DebugMask, GT_5CLASS, ++ "Entered PROC_Exit, ref count:0x%x\n", cRefs); ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== PROC_GetDevObject ======== ++ * Purpose: ++ * Return the Dev Object handle for a given Processor. ++ * ++ */ ++DSP_STATUS PROC_GetDevObject(DSP_HPROCESSOR hProcessor, ++ struct DEV_OBJECT **phDevObject) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phDevObject != NULL); ++ ++ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ *phDevObject = pProcObject->hDevObject; ++ status = DSP_SOK; ++ } else { ++ *phDevObject = NULL; ++ status = DSP_EHANDLE; ++ } ++ ++ DBC_Ensure((DSP_SUCCEEDED(status) && *phDevObject != NULL) || ++ (DSP_FAILED(status) && *phDevObject == NULL)); ++ ++ return status; ++} ++ ++/* ++ * ======== PROC_GetState ======== ++ * Purpose: ++ * Report the state of the specified DSP processor. ++ */ ++DSP_STATUS PROC_GetState(DSP_HPROCESSOR hProcessor, ++ OUT struct DSP_PROCESSORSTATE *pProcStatus, ++ u32 uStateInfoSize) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ BRD_STATUS brdStatus; ++ struct DEH_MGR *hDehMgr; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pProcStatus != NULL); ++ DBC_Require(uStateInfoSize >= sizeof(struct DSP_PROCESSORSTATE)); ++ ++ GT_3trace(PROC_DebugMask, GT_ENTER, "Entering PROC_GetState, args:\n\t" ++ "pProcStatus: 0x%x\n\thProcessor: 0x%x\n\t uStateInfoSize" ++ " 0x%x\n", pProcStatus, hProcessor, uStateInfoSize); ++ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ /* First, retrieve BRD state information */ ++ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) ++ (pProcObject->hWmdContext, &brdStatus))) { ++ switch (brdStatus) { ++ case BRD_STOPPED: ++ pProcStatus->iState = PROC_STOPPED; ++ break; ++ case BRD_DSP_HIBERNATION: ++ /* Fall through */ ++ case BRD_RUNNING: ++ pProcStatus->iState = PROC_RUNNING; ++ break; ++ case BRD_LOADED: ++ pProcStatus->iState = PROC_LOADED; ++ break; ++ case BRD_ERROR: ++ pProcStatus->iState = PROC_ERROR; ++ break; ++ default: ++ pProcStatus->iState = 0xFF; ++ status = DSP_EFAIL; ++ break; ++ } ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_GetState: General Failure" ++ " to read the PROC Status \n"); ++ } ++ /* Next, retrieve error information, if any */ ++ status = DEV_GetDehMgr(pProcObject->hDevObject, &hDehMgr); ++ if (DSP_SUCCEEDED(status) && hDehMgr) { ++ status = (*pProcObject->pIntfFxns->pfnDehGetInfo) ++ (hDehMgr, &(pProcStatus->errInfo)); ++ if (DSP_FAILED(status)) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_GetState: Failed " ++ "retrieve exception info.\n"); ++ } ++ } else { ++ status = DSP_EFAIL; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_GetState: Failed to " ++ "retrieve DEH handle.\n"); ++ } ++ } else { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_GetState:InValid Processor Handle \n"); ++ } ++ GT_2trace(PROC_DebugMask, GT_ENTER, ++ "Exiting PROC_GetState, results:\n\t" ++ "status: 0x%x\n\tpProcStatus: 0x%x\n", status, ++ pProcStatus->iState); ++ return status; ++} ++ ++/* ++ * ======== PROC_GetTrace ======== ++ * Purpose: ++ * Retrieve the current contents of the trace buffer, located on the ++ * Processor. Predefined symbols for the trace buffer must have been ++ * configured into the DSP executable. ++ * Details: ++ * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a ++ * trace buffer, only. Treat it as an undocumented feature. ++ * This call is destructive, meaning the processor is placed in the monitor ++ * state as a result of this function. ++ */ ++DSP_STATUS PROC_GetTrace(DSP_HPROCESSOR hProcessor, u8 *pBuf, u32 uMaxSize) ++{ ++ DSP_STATUS status; ++ status = DSP_ENOTIMPL; ++ return status; ++} ++ ++/* ++ * ======== PROC_Init ======== ++ * Purpose: ++ * Initialize PROC's private state, keeping a reference count on each call ++ */ ++bool PROC_Init(void) ++{ ++ bool fRetval = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ /* Set the Trace mask */ ++ DBC_Assert(!PROC_DebugMask.flags); ++ GT_create(&PROC_DebugMask, "PR"); /* "PR" for Processor */ ++ ++ (void)SYNC_InitializeCS(&hProcLock); ++ } ++ ++ if (fRetval) ++ cRefs++; ++ ++ GT_1trace(PROC_DebugMask, GT_5CLASS, ++ "Entered PROC_Init, ref count:0x%x\n", cRefs); ++ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); ++ ++ return fRetval; ++} ++ ++/* ++ * ======== PROC_Load ======== ++ * Purpose: ++ * Reset a processor and load a new base program image. ++ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge ++ * application developer's API. ++ */ ++DSP_STATUS PROC_Load(DSP_HPROCESSOR hProcessor, IN CONST s32 iArgc, ++ IN CONST char **aArgv, IN CONST char **aEnvp) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ struct IO_MGR *hIOMgr; /* IO manager handle */ ++ struct MSG_MGR *hMsgMgr; ++ struct COD_MANAGER *hCodMgr; /* Code manager handle */ ++ char *pargv0; /* temp argv[0] ptr */ ++ char **newEnvp; /* Updated envp[] array. */ ++ char szProcID[MAXPROCIDLEN]; /* Size of "PROC_ID=" */ ++ s32 cEnvp; /* Num elements in envp[]. */ ++ s32 cNewEnvp; /* " " in newEnvp[] */ ++ s32 nProcID = 0; /* Anticipate MP version. */ ++ struct DCD_MANAGER *hDCDHandle; ++ struct DMM_OBJECT *hDmmMgr; ++ u32 dwExtEnd; ++ u32 uProcId; ++#ifdef DEBUG ++ BRD_STATUS uBrdState; ++#endif ++#ifdef OPT_LOAD_TIME_INSTRUMENTATION ++ struct timeval tv1; ++ struct timeval tv2; ++#endif ++ DBC_Require(cRefs > 0); ++ DBC_Require(iArgc > 0); ++ DBC_Require(aArgv != NULL); ++#ifdef OPT_LOAD_TIME_INSTRUMENTATION ++ do_gettimeofday(&tv1); ++#endif ++#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) ++ struct dspbridge_platform_data *pdata = ++ omap_dspbridge_dev->dev.platform_data; ++#endif ++ GT_2trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Load, args:\n\t" ++ "hProcessor: 0x%x\taArgv: 0x%x\n", hProcessor, aArgv[0]); ++ /* Call the WMD_BRD_Load Fxn */ ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Load: Invalid Processor Handle..\n"); ++ goto func_end; ++ } ++ if (pProcObject->bIsAlreadyAttached) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load GPP " ++ "Client is already attached status \n"); ++ } ++ if (DSP_FAILED(DEV_GetCodMgr(pProcObject->hDevObject, &hCodMgr))) { ++ status = DSP_EFAIL; ++ GT_1trace(PROC_DebugMask, GT_7CLASS, "PROC_Load: DSP_FAILED in " ++ "DEV_GetCodMgr status 0x%x \n", status); ++ goto func_end; ++ } ++ status = PROC_Stop(hProcessor); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: DSP_FAILED to Place the" ++ " Processor in Stop Mode(PROC_STOP) status 0x%x \n", ++ status); ++ goto func_end; ++ } ++ /* Place the board in the monitor state. */ ++ status = PROC_Monitor(hProcessor); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: DSP_FAILED to Place the" ++ " Processor in Monitor Mode(PROC_IDLE) status 0x%x\n", ++ status); ++ goto func_end; ++ } ++ /* Save ptr to original argv[0]. */ ++ pargv0 = (char *)aArgv[0]; ++ /*Prepend "PROC_ID="to envp array for target.*/ ++ cEnvp = GetEnvpCount((char **)aEnvp); ++ cNewEnvp = (cEnvp ? (cEnvp + 1) : (cEnvp + 2)); ++ newEnvp = MEM_Calloc(cNewEnvp * sizeof(char **), MEM_PAGED); ++ if (newEnvp) { ++ status = snprintf(szProcID, MAXPROCIDLEN, PROC_ENVPROCID, ++ nProcID); ++ if (status == -1) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Load: " ++ "Proc ID string overflow \n"); ++ status = DSP_EFAIL; ++ } else { ++ newEnvp = PrependEnvp(newEnvp, (char **)aEnvp, cEnvp, ++ cNewEnvp, szProcID); ++ /* Get the DCD Handle */ ++ status = MGR_GetDCDHandle(pProcObject->hMgrObject, ++ (u32 *)&hDCDHandle); ++ if (DSP_SUCCEEDED(status)) { ++ /* Before proceeding with new load, ++ * check if a previously registered COFF ++ * exists. ++ * If yes, unregister nodes in previously ++ * registered COFF. If any error occurred, ++ * set previously registered COFF to NULL. */ ++ if (pProcObject->g_pszLastCoff != NULL) { ++ status = DCD_AutoUnregister(hDCDHandle, ++ pProcObject->g_pszLastCoff); ++ /* Regardless of auto unregister status, ++ * free previously allocated ++ * memory. */ ++ MEM_Free(pProcObject->g_pszLastCoff); ++ pProcObject->g_pszLastCoff = NULL; ++ } ++ } ++ /* On success, do COD_OpenBase() */ ++ status = COD_OpenBase(hCodMgr, (char *)aArgv[0], ++ COD_SYMB); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: COD_OpenBase " ++ "failed (0x%x)\n", status); ++ } ++ } ++ } else { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ " PROC_Load:Out of Memory \n"); ++ status = DSP_EMEMORY; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Auto-register data base */ ++ /* Get the DCD Handle */ ++ status = MGR_GetDCDHandle(pProcObject->hMgrObject, ++ (u32 *)&hDCDHandle); ++ if (DSP_SUCCEEDED(status)) { ++ /* Auto register nodes in specified COFF ++ * file. If registration did not fail, ++ * (status = DSP_SOK or DSP_EDCDNOAUTOREGISTER) ++ * save the name of the COFF file for ++ * de-registration in the future. */ ++ status = DCD_AutoRegister(hDCDHandle, (char *)aArgv[0]); ++ if (status == DSP_EDCDNOAUTOREGISTER) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: No Auto " ++ "Register section. Proceeding..\n"); ++ status = DSP_SOK; ++ } ++ if (DSP_FAILED(status)) { ++ status = DSP_EFAIL; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: Failed to " ++ "Auto Register..\n"); ++ } else { ++ DBC_Assert(pProcObject->g_pszLastCoff == NULL); ++ /* Allocate memory for pszLastCoff */ ++ pProcObject->g_pszLastCoff = MEM_Calloc( ++ (strlen((char *)aArgv[0]) + 1), ++ MEM_PAGED); ++ /* If memory allocated, save COFF file name*/ ++ if (pProcObject->g_pszLastCoff) { ++ strncpy(pProcObject->g_pszLastCoff, ++ (char *)aArgv[0], ++ (strlen((char *)aArgv[0]) + 1)); ++ } ++ } ++ } ++ } ++ /* Update shared memory address and size */ ++ if (DSP_SUCCEEDED(status)) { ++ /* Create the message manager. This must be done ++ * before calling the IOOnLoaded function. */ ++ DEV_GetMsgMgr(pProcObject->hDevObject, &hMsgMgr); ++ if (!hMsgMgr) { ++ status = MSG_Create(&hMsgMgr, pProcObject->hDevObject, ++ (MSG_ONEXIT)NODE_OnExit); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ DEV_SetMsgMgr(pProcObject->hDevObject, hMsgMgr); ++ } ++ if (status == DSP_ENOTIMPL) { ++ /* It's OK not to have a message manager */ ++ status = DSP_SOK; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Set the Device object's message manager */ ++ status = DEV_GetIOMgr(pProcObject->hDevObject, &hIOMgr); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ status = (*pProcObject->pIntfFxns->pfnIOOnLoaded)(hIOMgr); ++ if (status == DSP_ENOTIMPL) { ++ /* Ok not to implement this function */ ++ status = DSP_SOK; ++ } else { ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: Failed to get shared " ++ "memory or message buffer address " ++ "from COFF status 0x%x\n", status); ++ status = DSP_EFAIL; ++ } ++ } ++ } else { ++ status = DSP_EFAIL; ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: DSP_FAILED in " ++ "MSG_Create status 0x%x\n", status); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Now, attempt to load an exec: */ ++ ++ /* Boost the OPP level to Maximum level supported by baseport*/ ++#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) ++ if (pdata->cpu_set_freq) ++ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP5]); ++#endif ++ status = COD_LoadBase(hCodMgr, iArgc, (char **)aArgv, ++ DEV_BrdWriteFxn, ++ pProcObject->hDevObject, NULL); ++ if (DSP_FAILED(status)) { ++ if (status == COD_E_OPENFAILED) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load:Failure to Load the EXE\n"); ++ } ++ if (status == COD_E_SYMBOLNOTFOUND) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load:Could not parse the file\n"); ++ } else { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: DSP_FAILED in " ++ "COD_Load status 0x%x \n", status); ++ } ++ } ++ /* Requesting the lowest opp supported*/ ++#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) ++ if (pdata->cpu_set_freq) ++ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP1]); ++#endif ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Update the Processor status to loaded */ ++ status = (*pProcObject->pIntfFxns->pfnBrdSetState) ++ (pProcObject->hWmdContext, BRD_LOADED); ++ if (DSP_SUCCEEDED(status)) { ++ pProcObject->sState = PROC_LOADED; ++ if (pProcObject->hNtfy) { ++ PROC_NotifyClients(pProcObject, ++ DSP_PROCESSORSTATECHANGE); ++ } ++ } else { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load, pfnBrdSetState " ++ "failed: 0x%x\n", status); ++ status = DSP_EFAIL; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = PROC_GetProcessorId(hProcessor, &uProcId); ++ if (uProcId == DSP_UNIT) { ++ /* Use all available DSP address space after EXTMEM ++ * for DMM */ ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMgr, EXTEND, ++ &dwExtEnd); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Load: Failed on " ++ "COD_GetSymValue %s.\n", ++ EXTEND); ++ } ++ } ++ /* Reset DMM structs and add an initial free chunk*/ ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetDmmMgr(pProcObject->hDevObject, ++ &hDmmMgr); ++ if (DSP_SUCCEEDED(status)) { ++ /* Set dwExtEnd to DMM START u8 ++ * address */ ++ dwExtEnd = (dwExtEnd + 1) * DSPWORDSIZE; ++ /* DMM memory is from EXT_END */ ++ status = DMM_CreateTables(hDmmMgr, ++ dwExtEnd, DMMPOOLSIZE); ++ } ++ } ++ } ++ } ++ /* Restore the original argv[0] */ ++ MEM_Free(newEnvp); ++ aArgv[0] = pargv0; ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) { ++ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) ++ (pProcObject->hWmdContext, &uBrdState))) { ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Load: Processor Loaded\n"); ++ DBC_Assert(uBrdState == BRD_LOADED); ++ } ++ } ++#endif ++func_end: ++#ifdef DEBUG ++ if (DSP_FAILED(status)) { ++ GT_0trace(PROC_DebugMask, GT_1CLASS, "PROC_Load: " ++ "Processor Load Failed.\n"); ++ ++ } ++#endif ++ GT_1trace(PROC_DebugMask, GT_ENTER, ++ "Exiting PROC_Load, status: 0x%x\n", status); ++ DBC_Ensure((DSP_SUCCEEDED(status) && pProcObject->sState == PROC_LOADED) ++ || DSP_FAILED(status)); ++#ifdef OPT_LOAD_TIME_INSTRUMENTATION ++ do_gettimeofday(&tv2); ++ if (tv2.tv_usec < tv1.tv_usec) { ++ tv2.tv_usec += 1000000; ++ tv2.tv_sec--; ++ } ++ GT_2trace(PROC_DebugMask, GT_1CLASS, ++ "Proc_Load: time to load %d sec and %d usec \n", ++ tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec); ++#endif ++ return status; ++} ++ ++/* ++ * ======== PROC_Map ======== ++ * Purpose: ++ * Maps a MPU buffer to DSP address space. ++ */ ++DSP_STATUS PROC_Map(DSP_HPROCESSOR hProcessor, void *pMpuAddr, u32 ulSize, ++ void *pReqAddr, void **ppMapAddr, u32 ulMapAttr, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ u32 vaAlign; ++ u32 paAlign; ++ struct DMM_OBJECT *hDmmMgr; ++ u32 sizeAlign; ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE dmmRes; ++#endif ++ ++ GT_6trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Map, args:\n\t" ++ "hProcessor %x, pMpuAddr %x, ulSize %x, pReqAddr %x, " ++ "ulMapAttr %x, ppMapAddr %x\n", hProcessor, pMpuAddr, ulSize, ++ pReqAddr, ulMapAttr, ppMapAddr); ++ /* Calculate the page-aligned PA, VA and size */ ++ vaAlign = PG_ALIGN_LOW((u32) pReqAddr, PG_SIZE_4K); ++ paAlign = PG_ALIGN_LOW((u32) pMpuAddr, PG_SIZE_4K); ++ sizeAlign = PG_ALIGN_HIGH(ulSize + (u32)pMpuAddr - paAlign, ++ PG_SIZE_4K); ++ ++ GT_3trace(PROC_DebugMask, GT_ENTER, "PROC_Map: vaAlign %x, paAlign %x, " ++ "sizeAlign %x\n", vaAlign, paAlign, sizeAlign); ++ ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Map: " ++ "InValid Processor Handle \n"); ++ goto func_end; ++ } ++ /* Critical section */ ++ (void)SYNC_EnterCS(hProcLock); ++ status = DMM_GetHandle(pProcObject, &hDmmMgr); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Map: Failed to get DMM Mgr " ++ "handle: 0x%x\n", status); ++ } else { ++ status = DMM_MapMemory(hDmmMgr, vaAlign, sizeAlign); ++ } ++ /* Add mapping to the page tables. */ ++ if (DSP_SUCCEEDED(status)) { ++ ++ status = (*pProcObject->pIntfFxns->pfnBrdMemMap) ++ (pProcObject->hWmdContext, paAlign, vaAlign, sizeAlign, ++ ulMapAttr); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Mapped address = MSB of VA | LSB of PA */ ++ *ppMapAddr = (void *) (vaAlign | ((u32) pMpuAddr & ++ (PG_SIZE_4K - 1))); ++ } else { ++ DMM_UnMapMemory(hDmmMgr, vaAlign, &sizeAlign); ++ } ++ (void)SYNC_LeaveCS(hProcLock); ++ ++#ifndef RES_CLEANUP_DISABLE ++ if (DSP_SUCCEEDED(status)) { ++ DRV_InsertDMMResElement(&dmmRes, pr_ctxt); ++ DRV_UpdateDMMResElement(dmmRes, (u32)pMpuAddr, ulSize, ++ (u32)pReqAddr, (u32)*ppMapAddr, hProcessor); ++ } ++#endif ++func_end: ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Leaving PROC_Map [0x%x]", status); ++ return status; ++} ++ ++/* ++ * ======== PROC_RegisterNotify ======== ++ * Purpose: ++ * Register to be notified of specific processor events. ++ */ ++DSP_STATUS PROC_RegisterNotify(DSP_HPROCESSOR hProcessor, u32 uEventMask, ++ u32 uNotifyType, struct DSP_NOTIFICATION ++ *hNotification) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ struct DEH_MGR *hDehMgr; ++ ++ DBC_Require(hNotification != NULL); ++ DBC_Require(cRefs > 0); ++ ++ GT_4trace(PROC_DebugMask, GT_ENTER, ++ "Entered PROC_RegisterNotify, args:\n\t" ++ "hProcessor: 0x%x\n\tuEventMask: 0x%x\n\tuNotifyMask:" ++ " 0x%x\n\t hNotification 0x%x\n", hProcessor, uEventMask, ++ uNotifyType, hNotification); ++ ++ /* Check processor handle */ ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_RegsiterNotify Invalid " ++ "ProcessorHandle 0x%x\n", hProcessor); ++ goto func_end; ++ } ++ /* Check if event mask is a valid processor related event */ ++ if (uEventMask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | ++ DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | DSP_MMUFAULT | ++ DSP_SYSERROR | DSP_PWRERROR)) ++ status = DSP_EVALUE; ++ ++ /* Check if notify type is valid */ ++ if (uNotifyType != DSP_SIGNALEVENT) ++ status = DSP_EVALUE; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT, ++ * or DSP_PWRERROR then register event immediately. */ ++ if (uEventMask & ++ ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR)) { ++ status = NTFY_Register(pProcObject->hNtfy, ++ hNotification, uEventMask, uNotifyType); ++ /* Special case alert, special case alert! ++ * If we're trying to *deregister* (i.e. uEventMask ++ * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification, ++ * we have to deregister with the DEH manager. ++ * There's no way to know, based on uEventMask which ++ * manager the notification event was registered with, ++ * so if we're trying to deregister and NTFY_Register ++ * failed, we'll give the deh manager a shot. ++ */ ++ if ((uEventMask == 0) && DSP_FAILED(status)) { ++ status = DEV_GetDehMgr(pProcObject->hDevObject, ++ &hDehMgr); ++ DBC_Assert(pProcObject->pIntfFxns-> ++ pfnDehRegisterNotify); ++ status = (*pProcObject->pIntfFxns-> ++ pfnDehRegisterNotify) ++ (hDehMgr, uEventMask, uNotifyType, ++ hNotification); ++ } ++ } else { ++ status = DEV_GetDehMgr(pProcObject->hDevObject, ++ &hDehMgr); ++ DBC_Assert(pProcObject->pIntfFxns-> ++ pfnDehRegisterNotify); ++ status = (*pProcObject->pIntfFxns->pfnDehRegisterNotify) ++ (hDehMgr, uEventMask, uNotifyType, ++ hNotification); ++ if (DSP_FAILED(status)) ++ status = DSP_EFAIL; ++ ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== PROC_ReserveMemory ======== ++ * Purpose: ++ * Reserve a virtually contiguous region of DSP address space. ++ */ ++DSP_STATUS PROC_ReserveMemory(DSP_HPROCESSOR hProcessor, u32 ulSize, ++ void **ppRsvAddr) ++{ ++ struct DMM_OBJECT *hDmmMgr; ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ ++ GT_3trace(PROC_DebugMask, GT_ENTER, ++ "Entered PROC_ReserveMemory, args:\n\t" ++ "hProcessor: 0x%x ulSize: 0x%x ppRsvAddr: 0x%x\n", hProcessor, ++ ulSize, ppRsvAddr); ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Map: " ++ "InValid Processor Handle \n"); ++ goto func_end; ++ } ++ status = DMM_GetHandle(pProcObject, &hDmmMgr); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, "PROC_ReserveMemory: " ++ "Failed to get DMM Mgr handle: 0x%x\n", status); ++ } else ++ status = DMM_ReserveMemory(hDmmMgr, ulSize, (u32 *)ppRsvAddr); ++ ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Leaving PROC_ReserveMemory [0x%x]", ++ status); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== PROC_Start ======== ++ * Purpose: ++ * Start a processor running. ++ */ ++DSP_STATUS PROC_Start(DSP_HPROCESSOR hProcessor) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ struct COD_MANAGER *hCodMgr; /* Code manager handle */ ++ u32 dwDspAddr; /* Loaded code's entry point. */ ++#ifdef DEBUG ++ BRD_STATUS uBrdState; ++#endif ++ DBC_Require(cRefs > 0); ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Start, args:\n\t" ++ "hProcessor: 0x%x\n", hProcessor); ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Start :InValid Handle \n"); ++ goto func_end; ++ } ++ /* Call the WMD_BRD_Start */ ++ if (pProcObject->sState != PROC_LOADED) { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Start :Wrong state \n"); ++ status = DSP_EWRONGSTATE; ++ goto func_end; ++ } ++ status = DEV_GetCodMgr(pProcObject->hDevObject, &hCodMgr); ++ if (DSP_FAILED(status)) { ++ status = DSP_EFAIL; ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "Processor Start DSP_FAILED " ++ "in Getting DEV_GetCodMgr status 0x%x\n", status); ++ goto func_cont; ++ } ++ status = COD_GetEntry(hCodMgr, &dwDspAddr); ++ if (DSP_FAILED(status)) { ++ status = DSP_EFAIL; ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "Processor Start DSP_FAILED in " ++ "Getting COD_GetEntry status 0x%x\n", status); ++ goto func_cont; ++ } ++ status = (*pProcObject->pIntfFxns->pfnBrdStart) ++ (pProcObject->hWmdContext, dwDspAddr); ++ if (DSP_FAILED(status)) { ++ status = DSP_EFAIL; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Start Failed to Start the board\n"); ++ goto func_cont; ++ } ++ /* Call DEV_Create2 */ ++ status = DEV_Create2(pProcObject->hDevObject); ++ if (DSP_SUCCEEDED(status)) { ++ pProcObject->sState = PROC_RUNNING; ++ /* Deep sleep switces off the peripheral clocks. ++ * we just put the DSP CPU in idle in the idle loop. ++ * so there is no need to send a command to DSP */ ++ ++ if (pProcObject->hNtfy) { ++ PROC_NotifyClients(pProcObject, ++ DSP_PROCESSORSTATECHANGE); ++ } ++ GT_0trace(PROC_DebugMask, GT_1CLASS, "PROC_Start: Processor " ++ "Started and running \n"); ++ } else { ++ /* Failed to Create Node Manager and DISP Object ++ * Stop the Processor from running. Put it in STOPPED State */ ++ (void)(*pProcObject->pIntfFxns->pfnBrdStop)(pProcObject-> ++ hWmdContext); ++ status = DSP_EFAIL; ++ pProcObject->sState = PROC_STOPPED; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Start " ++ "Failed to Create the Node Manager\n"); ++ } ++func_cont: ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) { ++ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) ++ (pProcObject->hWmdContext, &uBrdState))) { ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Start: Processor State is RUNNING \n"); ++ DBC_Assert(uBrdState != BRD_HIBERNATION); ++ } ++ } ++#endif ++func_end: ++ GT_1trace(PROC_DebugMask, GT_ENTER, ++ "Exiting PROC_Start, status 0x%x\n", status); ++ DBC_Ensure((DSP_SUCCEEDED(status) && pProcObject->sState == ++ PROC_RUNNING) || DSP_FAILED(status)); ++ return status; ++} ++ ++/* ++ * ======== PROC_Stop ======== ++ * Purpose: ++ * Stop a processor running. ++ */ ++DSP_STATUS PROC_Stop(DSP_HPROCESSOR hProcessor) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ struct MSG_MGR *hMsgMgr; ++ struct NODE_MGR *hNodeMgr; ++ DSP_HNODE hNode; ++ u32 uNodeTabSize = 1; ++ u32 uNumNodes = 0; ++ u32 uNodesAllocated = 0; ++ BRD_STATUS uBrdState; ++ ++ DBC_Require(cRefs > 0); ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Stop, args:\n\t" ++ "hProcessor: 0x%x\n", hProcessor); ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Stop :InValid Handle \n"); ++ goto func_end; ++ } ++ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) ++ (pProcObject->hWmdContext, &uBrdState))) { ++ /* Clean up all the resources except the current running ++ * process resources */ ++ if (uBrdState == BRD_ERROR) ++ PROC_CleanupAllResources(); ++ } ++ /* check if there are any running nodes */ ++ status = DEV_GetNodeManager(pProcObject->hDevObject, &hNodeMgr); ++ if (DSP_SUCCEEDED(status) && hNodeMgr) { ++ status = NODE_EnumNodes(hNodeMgr, &hNode, uNodeTabSize, ++ &uNumNodes, &uNodesAllocated); ++ if ((status == DSP_ESIZE) || (uNodesAllocated > 0)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "Can't stop device, Active " ++ "nodes = 0x%x \n", uNodesAllocated); ++ return DSP_EWRONGSTATE; ++ } ++ } ++ /* Call the WMD_BRD_Stop */ ++ /* It is OK to stop a device that does n't have nodes OR not started */ ++ status = (*pProcObject->pIntfFxns->pfnBrdStop)(pProcObject-> ++ hWmdContext); ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Stop: Processor Stopped, " ++ "i.e in standby mode \n"); ++ pProcObject->sState = PROC_STOPPED; ++ /* Destory the Node Manager, MSG Manager */ ++ if (DSP_SUCCEEDED(DEV_Destroy2(pProcObject->hDevObject))) { ++ /* Destroy the MSG by calling MSG_Delete */ ++ DEV_GetMsgMgr(pProcObject->hDevObject, &hMsgMgr); ++ if (hMsgMgr) { ++ MSG_Delete(hMsgMgr); ++ DEV_SetMsgMgr(pProcObject->hDevObject, NULL); ++ } ++#ifdef DEBUG ++ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns-> ++ pfnBrdStatus)(pProcObject->hWmdContext, ++ &uBrdState))) { ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Monitor:Processor Stopped \n"); ++ DBC_Assert(uBrdState == BRD_STOPPED); ++ } ++#endif ++ } else { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Stop Couldn't delete node manager \n"); ++ } ++ } else { ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Stop Failed to Stop the processor/device \n"); ++ } ++func_end: ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Stop, status 0x%x\n", ++ status); ++ ++ return status; ++} ++ ++/* ++ * ======== PROC_UnMap ======== ++ * Purpose: ++ * Removes a MPU buffer mapping from the DSP address space. ++ */ ++DSP_STATUS PROC_UnMap(DSP_HPROCESSOR hProcessor, void *pMapAddr, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ struct DMM_OBJECT *hDmmMgr; ++ u32 vaAlign; ++ u32 sizeAlign; ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE dmmRes; ++#endif ++ GT_2trace(PROC_DebugMask, GT_ENTER, ++ "Entered PROC_UnMap, args:\n\thProcessor:" ++ "0x%x pMapAddr: 0x%x\n", hProcessor, pMapAddr); ++ ++ vaAlign = PG_ALIGN_LOW((u32) pMapAddr, PG_SIZE_4K); ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_UnMap: " ++ "InValid Processor Handle \n"); ++ goto func_end; ++ } ++ ++ status = DMM_GetHandle(hProcessor, &hDmmMgr); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ /* Critical section */ ++ (void)SYNC_EnterCS(hProcLock); ++ if (DSP_FAILED(status)) { ++ GT_1trace(PROC_DebugMask, GT_7CLASS, "PROC_UnMap: " ++ "Failed to get DMM Mgr handle: 0x%x\n", status); ++ } else { ++ /* Update DMM structures. Get the size to unmap. ++ This function returns error if the VA is not mapped */ ++ status = DMM_UnMapMemory(hDmmMgr, (u32) vaAlign, &sizeAlign); ++ } ++ /* Remove mapping from the page tables. */ ++ if (DSP_SUCCEEDED(status)) { ++ status = (*pProcObject->pIntfFxns->pfnBrdMemUnMap) ++ (pProcObject->hWmdContext, vaAlign, sizeAlign); ++ } ++ (void)SYNC_LeaveCS(hProcLock); ++#ifndef RES_CLEANUP_DISABLE ++ GT_1trace(PROC_DebugMask, GT_ENTER, ++ "PROC_UnMap DRV_GetDMMResElement " ++ "pMapAddr:[0x%x]", pMapAddr); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (pr_ctxt && DRV_GetDMMResElement((u32)pMapAddr, &dmmRes, pr_ctxt) ++ != DSP_ENOTFOUND) ++ DRV_RemoveDMMResElement(dmmRes, pr_ctxt); ++#endif ++func_end: ++ GT_1trace(PROC_DebugMask, GT_ENTER, ++ "Leaving PROC_UnMap [0x%x]", status); ++ return status; ++} ++ ++/* ++ * ======== PROC_UnReserveMemory ======== ++ * Purpose: ++ * Frees a previously reserved region of DSP address space. ++ */ ++DSP_STATUS PROC_UnReserveMemory(DSP_HPROCESSOR hProcessor, void *pRsvAddr) ++{ ++ struct DMM_OBJECT *hDmmMgr; ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; ++ ++ GT_2trace(PROC_DebugMask, GT_ENTER, ++ "Entered PROC_UnReserveMemory, args:\n\t" ++ "hProcessor: 0x%x pRsvAddr: 0x%x\n", hProcessor, pRsvAddr); ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_UnMap: " ++ "InValid Processor Handle \n"); ++ goto func_end; ++ } ++ status = DMM_GetHandle(pProcObject, &hDmmMgr); ++ if (DSP_FAILED(status)) ++ GT_1trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_UnReserveMemory: Failed to get DMM Mgr " ++ "handle: 0x%x\n", status); ++ else ++ status = DMM_UnReserveMemory(hDmmMgr, (u32) pRsvAddr); ++ ++ GT_1trace(PROC_DebugMask, GT_ENTER, ++ "Leaving PROC_UnReserveMemory [0x%x]", ++ status); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== = PROC_Monitor ======== == ++ * Purpose: ++ * Place the Processor in Monitor State. This is an internal ++ * function and a requirement before Processor is loaded. ++ * This does a WMD_BRD_Stop, DEV_Destroy2 and WMD_BRD_Monitor. ++ * In DEV_Destroy2 we delete the node manager. ++ * Parameters: ++ * hProcObject: Handle to Processor Object ++ * Returns: ++ * DSP_SOK: Processor placed in monitor mode. ++ * !DSP_SOK: Failed to place processor in monitor mode. ++ * Requires: ++ * Valid Processor Handle ++ * Ensures: ++ * Success: ProcObject state is PROC_IDLE ++ */ ++static DSP_STATUS PROC_Monitor(struct PROC_OBJECT *hProcObject) ++{ ++ DSP_STATUS status = DSP_EFAIL; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcObject; ++ struct MSG_MGR *hMsgMgr; ++#ifdef DEBUG ++ BRD_STATUS uBrdState; ++#endif ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)); ++ ++ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Monitor, args:\n\t" ++ "hProcessor: 0x%x\n", hProcObject); ++ /* This is needed only when Device is loaded when it is ++ * already 'ACTIVE' */ ++ /* Destory the Node Manager, MSG Manager */ ++ if (DSP_SUCCEEDED(DEV_Destroy2(pProcObject->hDevObject))) { ++ /* Destroy the MSG by calling MSG_Delete */ ++ DEV_GetMsgMgr(pProcObject->hDevObject, &hMsgMgr); ++ if (hMsgMgr) { ++ MSG_Delete(hMsgMgr); ++ DEV_SetMsgMgr(pProcObject->hDevObject, NULL); ++ } ++ } ++ /* Place the Board in the Monitor State */ ++ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdMonitor) ++ (pProcObject->hWmdContext))) { ++ status = DSP_SOK; ++#ifdef DEBUG ++ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) ++ (pProcObject->hWmdContext, &uBrdState))) { ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_Monitor:Processor in " ++ "Monitor State\n"); ++ DBC_Assert(uBrdState == BRD_IDLE); ++ } ++#endif ++ } else { ++ /* Monitor Failure */ ++ GT_0trace(PROC_DebugMask, GT_7CLASS, ++ "PROC_Monitor: Processor Could not" ++ "be put in Monitor mode \n"); ++ } ++ GT_1trace(PROC_DebugMask, GT_ENTER, ++ "Exiting PROC_Monitor, status 0x%x\n", ++ status); ++#ifdef DEBUG ++ DBC_Ensure((DSP_SUCCEEDED(status) && uBrdState == BRD_IDLE) || ++ DSP_FAILED(status)); ++#endif ++ return status; ++} ++ ++/* ++ * ======== GetEnvpCount ======== ++ * Purpose: ++ * Return the number of elements in the envp array, including the ++ * terminating NULL element. ++ */ ++static s32 GetEnvpCount(char **envp) ++{ ++ s32 cRetval = 0; ++ if (envp) { ++ while (*envp++) ++ cRetval++; ++ ++ cRetval += 1; /* Include the terminating NULL in the count. */ ++ } ++ ++ return cRetval; ++} ++ ++/* ++ * ======== PrependEnvp ======== ++ * Purpose: ++ * Prepend an environment variable=value pair to the new envp array, and ++ * copy in the existing var=value pairs in the old envp array. ++ */ ++static char **PrependEnvp(char **newEnvp, char **envp, s32 cEnvp, s32 cNewEnvp, ++ char *szVar) ++{ ++ char **ppEnvp = newEnvp; ++ ++ DBC_Require(newEnvp); ++ ++ /* Prepend new environ var=value string */ ++ *newEnvp++ = szVar; ++ ++ /* Copy user's environment into our own. */ ++ while (cEnvp--) ++ *newEnvp++ = *envp++; ++ ++ /* Ensure NULL terminates the new environment strings array. */ ++ if (cEnvp == 0) ++ *newEnvp = NULL; ++ ++ return ppEnvp; ++} ++ ++/* ++ * ======== PROC_NotifyClients ======== ++ * Purpose: ++ * Notify the processor the events. ++ */ ++DSP_STATUS PROC_NotifyClients(DSP_HPROCESSOR hProc, u32 uEvents) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProc; ++ ++ DBC_Require(MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)); ++ DBC_Require(IsValidProcEvent(uEvents)); ++ DBC_Require(cRefs > 0); ++ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_NotifyClients: " ++ "InValid Processor Handle \n"); ++ goto func_end; ++ } ++ ++ NTFY_Notify(pProcObject->hNtfy, uEvents); ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_NotifyClients :Signaled. \n"); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== PROC_NotifyAllClients ======== ++ * Purpose: ++ * Notify the processor the events. This includes notifying all clients ++ * attached to a particulat DSP. ++ */ ++DSP_STATUS PROC_NotifyAllClients(DSP_HPROCESSOR hProc, u32 uEvents) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProc; ++ ++ DBC_Require(MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)); ++ DBC_Require(IsValidProcEvent(uEvents)); ++ DBC_Require(cRefs > 0); ++ ++ DEV_NotifyClients(pProcObject->hDevObject, uEvents); ++ ++ GT_0trace(PROC_DebugMask, GT_1CLASS, ++ "PROC_NotifyAllClients :Signaled. \n"); ++ ++ return status; ++} ++ ++/* ++ * ======== PROC_GetProcessorId ======== ++ * Purpose: ++ * Retrieves the processor ID. ++ */ ++DSP_STATUS PROC_GetProcessorId(DSP_HPROCESSOR hProc, u32 *procID) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProc; ++ ++ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) ++ *procID = pProcObject->uProcessor; ++ else { ++ status = DSP_EHANDLE; ++ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_GetProcessorId: " ++ "InValid Processor Handle \n"); ++ } ++ return status; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/pwr.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/pwr.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/pwr.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/pwr.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,184 @@ ++/* ++ * pwr.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== PWR.c ======== ++ * PWR API for controlling DSP power states. ++ * ++ * Public Functions: ++ * PWR_SleepDSP ++ * PWR_WakeDSP ++ * ++ *! Revision History ++ *! ================ ++ *! 18-Feb-2003 vp Code review updates. ++ *! 18-Oct-2002 vp Ported to Linux platform. ++ *! 22-May-2002 sg Do PWR-to-IOCTL code mapping in PWR_SleepDSP. ++ *! 29-Apr-2002 sg Initial. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++ ++/* ----------------------------------- Link Driver */ ++#include ++ ++/* ++ * ======== PWR_SleepDSP ======== ++ * Send command to DSP to enter sleep state. ++ */ ++DSP_STATUS PWR_SleepDSP(IN CONST u32 sleepCode, IN CONST u32 timeout) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct WMD_DEV_CONTEXT *dwContext; ++ DSP_STATUS status = DSP_EFAIL; ++ struct DEV_OBJECT *hDevObject = NULL; ++ u32 ioctlcode = 0; ++ u32 arg = timeout; ++ ++ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); ++ hDevObject != NULL; ++ hDevObject = ++ (struct DEV_OBJECT *)DRV_GetNextDevObject ++ ((u32)hDevObject)) { ++ if (DSP_FAILED(DEV_GetWMDContext(hDevObject, ++ (struct WMD_DEV_CONTEXT **)&dwContext))) { ++ continue; ++ } ++ if (DSP_FAILED(DEV_GetIntfFxns(hDevObject, ++ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { ++ continue; ++ } ++ if (sleepCode == PWR_DEEPSLEEP) ++ ioctlcode = WMDIOCTL_DEEPSLEEP; ++ else if (sleepCode == PWR_EMERGENCYDEEPSLEEP) ++ ioctlcode = WMDIOCTL_EMERGENCYSLEEP; ++ else ++ status = DSP_EINVALIDARG; ++ ++ if (status != DSP_EINVALIDARG) { ++ status = (*pIntfFxns->pfnDevCntrl)(dwContext, ++ ioctlcode, (void *)&arg); ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== PWR_WakeDSP ======== ++ * Send command to DSP to wake it from sleep. ++ */ ++DSP_STATUS PWR_WakeDSP(IN CONST u32 timeout) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct WMD_DEV_CONTEXT *dwContext; ++ DSP_STATUS status = DSP_EFAIL; ++ struct DEV_OBJECT *hDevObject = NULL; ++ u32 arg = timeout; ++ ++ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); ++ hDevObject != NULL; ++ hDevObject = (struct DEV_OBJECT *)DRV_GetNextDevObject ++ ((u32)hDevObject)) { ++ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, ++ (struct WMD_DEV_CONTEXT **)&dwContext))) { ++ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, ++ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { ++ status = (*pIntfFxns->pfnDevCntrl)(dwContext, ++ WMDIOCTL_WAKEUP, (void *)&arg); ++ } ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== PWR_PM_PreScale======== ++ * Sends pre-notification message to DSP. ++ */ ++DSP_STATUS PWR_PM_PreScale(IN u16 voltage_domain, u32 level) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct WMD_DEV_CONTEXT *dwContext; ++ DSP_STATUS status = DSP_EFAIL; ++ struct DEV_OBJECT *hDevObject = NULL; ++ u32 arg[2]; ++ ++ arg[0] = voltage_domain; ++ arg[1] = level; ++ ++ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); ++ hDevObject != NULL; ++ hDevObject = (struct DEV_OBJECT *)DRV_GetNextDevObject ++ ((u32)hDevObject)) { ++ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, ++ (struct WMD_DEV_CONTEXT **)&dwContext))) { ++ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, ++ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { ++ status = (*pIntfFxns->pfnDevCntrl)(dwContext, ++ WMDIOCTL_PRESCALE_NOTIFY, ++ (void *)&arg); ++ } ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== PWR_PM_PostScale======== ++ * Sends post-notification message to DSP. ++ */ ++DSP_STATUS PWR_PM_PostScale(IN u16 voltage_domain, u32 level) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct WMD_DEV_CONTEXT *dwContext; ++ DSP_STATUS status = DSP_EFAIL; ++ struct DEV_OBJECT *hDevObject = NULL; ++ u32 arg[2]; ++ ++ arg[0] = voltage_domain; ++ arg[1] = level; ++ ++ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); ++ hDevObject != NULL; ++ hDevObject = (struct DEV_OBJECT *)DRV_GetNextDevObject ++ ((u32)hDevObject)) { ++ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, ++ (struct WMD_DEV_CONTEXT **)&dwContext))) { ++ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, ++ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { ++ status = (*pIntfFxns->pfnDevCntrl)(dwContext, ++ WMDIOCTL_POSTSCALE_NOTIFY, ++ (void *)&arg); ++ } ++ } ++ } ++ return status; ++ ++} ++ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/rmm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/rmm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/rmm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/rmm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,604 @@ ++/* ++ * rmm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== rmm.c ======== ++ * Description: ++ * ++ * This memory manager provides general heap management and arbitrary ++ * alignment for any number of memory segments. ++ * ++ * Notes: ++ * ++ * Memory blocks are allocated from the end of the first free memory ++ * block large enough to satisfy the request. Alignment requirements ++ * are satisfied by "sliding" the block forward until its base satisfies ++ * the alignment specification; if this is not possible then the next ++ * free block large enough to hold the request is tried. ++ * ++ * Since alignment can cause the creation of a new free block - the ++ * unused memory formed between the start of the original free block ++ * and the start of the allocated block - the memory manager must free ++ * this memory to prevent a memory leak. ++ * ++ * Overlay memory is managed by reserving through RMM_alloc, and freeing ++ * it through RMM_free. The memory manager prevents DSP code/data that is ++ * overlayed from being overwritten as long as the memory it runs at has ++ * been allocated, and not yet freed. ++ * ++ *! Revision History ++ *! ================ ++ *! 18-Feb-2003 vp Code review updates. ++ *! 18-Oct-2002 vp Ported to Linux Platform. ++ *! 24-Sep-2002 map Updated from Code Review ++ *! 25-Jun-2002 jeh Free from segid passed to RMM_free(). ++ *! 24-Apr-2002 jeh Determine segid based on address in RMM_free(). (No way ++ *! to keep track of segid with dynamic loader library.) ++ *! 16-Oct-2001 jeh Based on gen tree rm.c. Added support for overlays. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++#define RMM_TARGSIGNATURE 0x544d4d52 /* "TMMR" */ ++ ++/* ++ * ======== RMM_Header ======== ++ * This header is used to maintain a list of free memory blocks. ++ */ ++struct RMM_Header { ++ struct RMM_Header *next; /* form a free memory link list */ ++ u32 size; /* size of the free memory */ ++ u32 addr; /* DSP address of memory block */ ++} ; ++ ++/* ++ * ======== RMM_OvlySect ======== ++ * Keeps track of memory occupied by overlay section. ++ */ ++struct RMM_OvlySect { ++ struct LST_ELEM listElem; ++ u32 addr; /* Start of memory section */ ++ u32 size; /* Length (target MAUs) of section */ ++ s32 page; /* Memory page */ ++}; ++ ++/* ++ * ======== RMM_TargetObj ======== ++ */ ++struct RMM_TargetObj { ++ u32 dwSignature; ++ struct RMM_Segment *segTab; ++ struct RMM_Header **freeList; ++ u32 numSegs; ++ struct LST_LIST *ovlyList; /* List of overlay memory in use */ ++}; ++ ++#if GT_TRACE ++static struct GT_Mask RMM_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++static u32 cRefs; /* module reference count */ ++ ++static bool allocBlock(struct RMM_TargetObj *target, u32 segid, u32 size, ++ u32 align, u32 *dspAddr); ++static bool freeBlock(struct RMM_TargetObj *target, u32 segid, u32 addr, ++ u32 size); ++ ++/* ++ * ======== RMM_alloc ======== ++ */ ++DSP_STATUS RMM_alloc(struct RMM_TargetObj *target, u32 segid, u32 size, ++ u32 align, u32 *dspAddr, bool reserve) ++{ ++ struct RMM_OvlySect *sect; ++ struct RMM_OvlySect *prevSect = NULL; ++ struct RMM_OvlySect *newSect; ++ u32 addr; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(MEM_IsValidHandle(target, RMM_TARGSIGNATURE)); ++ DBC_Require(dspAddr != NULL); ++ DBC_Require(size > 0); ++ DBC_Require(reserve || (target->numSegs > 0)); ++ DBC_Require(cRefs > 0); ++ ++ GT_6trace(RMM_debugMask, GT_ENTER, ++ "RMM_alloc(0x%lx, 0x%lx, 0x%lx, 0x%lx, " ++ "0x%lx, 0x%lx)\n", target, segid, size, align, dspAddr, ++ reserve); ++ if (!reserve) { ++ if (!allocBlock(target, segid, size, align, dspAddr)) { ++ status = DSP_EMEMORY; ++ } else { ++ /* Increment the number of allocated blocks in this ++ * segment */ ++ target->segTab[segid].number++; ++ } ++ goto func_end; ++ } ++ /* An overlay section - See if block is already in use. If not, ++ * insert into the list in ascending address size. */ ++ addr = *dspAddr; ++ sect = (struct RMM_OvlySect *)LST_First(target->ovlyList); ++ /* Find place to insert new list element. List is sorted from ++ * smallest to largest address. */ ++ while (sect != NULL) { ++ if (addr <= sect->addr) { ++ /* Check for overlap with sect */ ++ if ((addr + size > sect->addr) || (prevSect && ++ (prevSect->addr + prevSect->size > addr))) { ++ status = DSP_EOVERLAYMEMORY; ++ } ++ break; ++ } ++ prevSect = sect; ++ sect = (struct RMM_OvlySect *)LST_Next(target->ovlyList, ++ (struct LST_ELEM *)sect); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* No overlap - allocate list element for new section. */ ++ newSect = MEM_Calloc(sizeof(struct RMM_OvlySect), MEM_PAGED); ++ if (newSect == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ LST_InitElem((struct LST_ELEM *)newSect); ++ newSect->addr = addr; ++ newSect->size = size; ++ newSect->page = segid; ++ if (sect == NULL) { ++ /* Put new section at the end of the list */ ++ LST_PutTail(target->ovlyList, ++ (struct LST_ELEM *)newSect); ++ } else { ++ /* Put new section just before sect */ ++ LST_InsertBefore(target->ovlyList, ++ (struct LST_ELEM *)newSect, ++ (struct LST_ELEM *)sect); ++ } ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== RMM_create ======== ++ */ ++DSP_STATUS RMM_create(struct RMM_TargetObj **pTarget, ++ struct RMM_Segment segTab[], u32 numSegs) ++{ ++ struct RMM_Header *hptr; ++ struct RMM_Segment *sptr, *tmp; ++ struct RMM_TargetObj *target; ++ s32 i; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(pTarget != NULL); ++ DBC_Require(numSegs == 0 || segTab != NULL); ++ ++ GT_3trace(RMM_debugMask, GT_ENTER, ++ "RMM_create(0x%lx, 0x%lx, 0x%lx)\n", ++ pTarget, segTab, numSegs); ++ ++ /* Allocate DBL target object */ ++ MEM_AllocObject(target, struct RMM_TargetObj, RMM_TARGSIGNATURE); ++ ++ if (target == NULL) { ++ GT_0trace(RMM_debugMask, GT_6CLASS, ++ "RMM_create: Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ target->numSegs = numSegs; ++ if (!(numSegs > 0)) ++ goto func_cont; ++ ++ /* Allocate the memory for freelist from host's memory */ ++ target->freeList = MEM_Calloc(numSegs * sizeof(struct RMM_Header *), ++ MEM_PAGED); ++ if (target->freeList == NULL) { ++ GT_0trace(RMM_debugMask, GT_6CLASS, ++ "RMM_create: Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } else { ++ /* Allocate headers for each element on the free list */ ++ for (i = 0; i < (s32) numSegs; i++) { ++ target->freeList[i] = ++ MEM_Calloc(sizeof(struct RMM_Header), ++ MEM_PAGED); ++ if (target->freeList[i] == NULL) { ++ GT_0trace(RMM_debugMask, GT_6CLASS, ++ "RMM_create: Memory " ++ "allocation failed\n"); ++ status = DSP_EMEMORY; ++ break; ++ } ++ } ++ /* Allocate memory for initial segment table */ ++ target->segTab = MEM_Calloc(numSegs * ++ sizeof(struct RMM_Segment), MEM_PAGED); ++ if (target->segTab == NULL) { ++ GT_0trace(RMM_debugMask, GT_6CLASS, ++ "RMM_create: Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } else { ++ /* Initialize segment table and free list */ ++ sptr = target->segTab; ++ for (i = 0, tmp = segTab; numSegs > 0; numSegs--, i++) { ++ *sptr = *tmp; ++ hptr = target->freeList[i]; ++ hptr->addr = tmp->base; ++ hptr->size = tmp->length; ++ hptr->next = NULL; ++ tmp++; ++ sptr++; ++ } ++ } ++ } ++func_cont: ++ /* Initialize overlay memory list */ ++ if (DSP_SUCCEEDED(status)) { ++ target->ovlyList = LST_Create(); ++ if (target->ovlyList == NULL) { ++ GT_0trace(RMM_debugMask, GT_6CLASS, ++ "RMM_create: Memory allocation failed\n"); ++ status = DSP_EMEMORY; ++ } ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ *pTarget = target; ++ } else { ++ *pTarget = NULL; ++ if (target) ++ RMM_delete(target); ++ ++ } ++ ++ DBC_Ensure((DSP_SUCCEEDED(status) && MEM_IsValidHandle((*pTarget), ++ RMM_TARGSIGNATURE)) || (DSP_FAILED(status) && *pTarget == ++ NULL)); ++ ++ return status; ++} ++ ++/* ++ * ======== RMM_delete ======== ++ */ ++void RMM_delete(struct RMM_TargetObj *target) ++{ ++ struct RMM_OvlySect *pSect; ++ struct RMM_Header *hptr; ++ struct RMM_Header *next; ++ u32 i; ++ ++ DBC_Require(MEM_IsValidHandle(target, RMM_TARGSIGNATURE)); ++ ++ GT_1trace(RMM_debugMask, GT_ENTER, "RMM_delete(0x%lx)\n", target); ++ ++ if (target->segTab != NULL) ++ MEM_Free(target->segTab); ++ ++ if (target->ovlyList) { ++ while ((pSect = (struct RMM_OvlySect *)LST_GetHead ++ (target->ovlyList))) { ++ MEM_Free(pSect); ++ } ++ DBC_Assert(LST_IsEmpty(target->ovlyList)); ++ LST_Delete(target->ovlyList); ++ } ++ ++ if (target->freeList != NULL) { ++ /* Free elements on freelist */ ++ for (i = 0; i < target->numSegs; i++) { ++ hptr = next = target->freeList[i]; ++ while (next) { ++ hptr = next; ++ next = hptr->next; ++ MEM_Free(hptr); ++ } ++ } ++ MEM_Free(target->freeList); ++ } ++ ++ MEM_FreeObject(target); ++} ++ ++/* ++ * ======== RMM_exit ======== ++ */ ++void RMM_exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(RMM_debugMask, GT_5CLASS, "RMM_exit() ref count: 0x%x\n", ++ cRefs); ++ ++ if (cRefs == 0) ++ MEM_Exit(); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== RMM_free ======== ++ */ ++bool RMM_free(struct RMM_TargetObj *target, u32 segid, u32 addr, u32 size, ++ bool reserved) ++ ++{ ++ struct RMM_OvlySect *sect; ++ bool retVal = true; ++ ++ DBC_Require(MEM_IsValidHandle(target, RMM_TARGSIGNATURE)); ++ ++ DBC_Require(reserved || segid < target->numSegs); ++ DBC_Require(reserved || (addr >= target->segTab[segid].base && ++ (addr + size) <= (target->segTab[segid].base + ++ target->segTab[segid].length))); ++ ++ GT_5trace(RMM_debugMask, GT_ENTER, ++ "RMM_free(0x%lx, 0x%lx, 0x%lx, 0x%lx, " ++ "0x%lx)\n", target, segid, addr, size, reserved); ++ /* ++ * Free or unreserve memory. ++ */ ++ if (!reserved) { ++ retVal = freeBlock(target, segid, addr, size); ++ if (retVal) ++ target->segTab[segid].number--; ++ ++ } else { ++ /* Unreserve memory */ ++ sect = (struct RMM_OvlySect *)LST_First(target->ovlyList); ++ while (sect != NULL) { ++ if (addr == sect->addr) { ++ DBC_Assert(size == sect->size); ++ /* Remove from list */ ++ LST_RemoveElem(target->ovlyList, ++ (struct LST_ELEM *)sect); ++ MEM_Free(sect); ++ break; ++ } ++ sect = (struct RMM_OvlySect *)LST_Next(target->ovlyList, ++ (struct LST_ELEM *)sect); ++ } ++ if (sect == NULL) ++ retVal = false; ++ ++ } ++ return retVal; ++} ++ ++/* ++ * ======== RMM_init ======== ++ */ ++bool RMM_init(void) ++{ ++ bool retVal = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ DBC_Assert(!RMM_debugMask.flags); ++ GT_create(&RMM_debugMask, "RM"); /* "RM" for RMm */ ++ ++ retVal = MEM_Init(); ++ ++ if (!retVal) ++ MEM_Exit(); ++ ++ } ++ ++ if (retVal) ++ cRefs++; ++ ++ GT_1trace(RMM_debugMask, GT_5CLASS, ++ "RMM_init(), ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((retVal && (cRefs > 0)) || (!retVal && (cRefs >= 0))); ++ ++ return retVal; ++} ++ ++/* ++ * ======== RMM_stat ======== ++ */ ++bool RMM_stat(struct RMM_TargetObj *target, enum DSP_MEMTYPE segid, ++ struct DSP_MEMSTAT *pMemStatBuf) ++{ ++ struct RMM_Header *head; ++ bool retVal = false; ++ u32 maxFreeSize = 0; ++ u32 totalFreeSize = 0; ++ u32 freeBlocks = 0; ++ ++ DBC_Require(pMemStatBuf != NULL); ++ DBC_Assert(target != NULL); ++ ++ if ((u32) segid < target->numSegs) { ++ head = target->freeList[segid]; ++ ++ /* Collect data from freeList */ ++ while (head != NULL) { ++ maxFreeSize = max(maxFreeSize, head->size); ++ totalFreeSize += head->size; ++ freeBlocks++; ++ head = head->next; ++ } ++ ++ /* ulSize */ ++ pMemStatBuf->ulSize = target->segTab[segid].length; ++ ++ /* ulNumFreeBlocks */ ++ pMemStatBuf->ulNumFreeBlocks = freeBlocks; ++ ++ /* ulTotalFreeSize */ ++ pMemStatBuf->ulTotalFreeSize = totalFreeSize; ++ ++ /* ulLenMaxFreeBlock */ ++ pMemStatBuf->ulLenMaxFreeBlock = maxFreeSize; ++ ++ /* ulNumAllocBlocks */ ++ pMemStatBuf->ulNumAllocBlocks = target->segTab[segid].number; ++ ++ retVal = true; ++ } ++ ++ return retVal; ++} ++ ++/* ++ * ======== balloc ======== ++ * This allocation function allocates memory from the lowest addresses ++ * first. ++ */ ++static bool allocBlock(struct RMM_TargetObj *target, u32 segid, u32 size, ++ u32 align, u32 *dspAddr) ++{ ++ struct RMM_Header *head; ++ struct RMM_Header *prevhead = NULL; ++ struct RMM_Header *next; ++ u32 tmpalign; ++ u32 alignbytes; ++ u32 hsize; ++ u32 allocsize; ++ u32 addr; ++ ++ alignbytes = (align == 0) ? 1 : align; ++ prevhead = NULL; ++ head = target->freeList[segid]; ++ ++ do { ++ hsize = head->size; ++ next = head->next; ++ ++ addr = head->addr; /* alloc from the bottom */ ++ ++ /* align allocation */ ++ (tmpalign = (u32) addr % alignbytes); ++ if (tmpalign != 0) ++ tmpalign = alignbytes - tmpalign; ++ ++ allocsize = size + tmpalign; ++ ++ if (hsize >= allocsize) { /* big enough */ ++ if (hsize == allocsize && prevhead != NULL) { ++ prevhead->next = next; ++ MEM_Free(head); ++ } else { ++ head->size = hsize - allocsize; ++ head->addr += allocsize; ++ } ++ ++ /* free up any hole created by alignment */ ++ if (tmpalign) ++ freeBlock(target, segid, addr, tmpalign); ++ ++ *dspAddr = addr + tmpalign; ++ return true; ++ } ++ ++ prevhead = head; ++ head = next; ++ ++ } while (head != NULL); ++ ++ return false; ++} ++ ++/* ++ * ======== freeBlock ======== ++ * TO DO: freeBlock() allocates memory, which could result in failure. ++ * Could allocate an RMM_Header in RMM_alloc(), to be kept in a pool. ++ * freeBlock() could use an RMM_Header from the pool, freeing as blocks ++ * are coalesced. ++ */ ++static bool freeBlock(struct RMM_TargetObj *target, u32 segid, u32 addr, ++ u32 size) ++{ ++ struct RMM_Header *head; ++ struct RMM_Header *thead; ++ struct RMM_Header *rhead; ++ bool retVal = true; ++ ++ /* Create a memory header to hold the newly free'd block. */ ++ rhead = MEM_Calloc(sizeof(struct RMM_Header), MEM_PAGED); ++ if (rhead == NULL) { ++ retVal = false; ++ } else { ++ /* search down the free list to find the right place for addr */ ++ head = target->freeList[segid]; ++ ++ if (addr >= head->addr) { ++ while (head->next != NULL && addr > head->next->addr) ++ head = head->next; ++ ++ thead = head->next; ++ ++ head->next = rhead; ++ rhead->next = thead; ++ rhead->addr = addr; ++ rhead->size = size; ++ } else { ++ *rhead = *head; ++ head->next = rhead; ++ head->addr = addr; ++ head->size = size; ++ thead = rhead->next; ++ } ++ ++ /* join with upper block, if possible */ ++ if (thead != NULL && (rhead->addr + rhead->size) == ++ thead->addr) { ++ head->next = rhead->next; ++ thead->size = size + thead->size; ++ thead->addr = addr; ++ MEM_Free(rhead); ++ rhead = thead; ++ } ++ ++ /* join with the lower block, if possible */ ++ if ((head->addr + head->size) == rhead->addr) { ++ head->next = rhead->next; ++ head->size = head->size + rhead->size; ++ MEM_Free(rhead); ++ } ++ } ++ ++ return retVal; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/strm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/strm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/strm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/rmgr/strm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1006 @@ ++/* ++ * strm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== strm.c ======== ++ * Description: ++ * DSP/BIOS Bridge Stream Manager. ++ * ++ * Public Functions: ++ * STRM_AllocateBuffer ++ * STRM_Close ++ * STRM_Create ++ * STRM_Delete ++ * STRM_Exit ++ * STRM_FreeBuffer ++ * STRM_GetEventHandle ++ * STRM_GetInfo ++ * STRM_Idle ++ * STRM_Init ++ * STRM_Issue ++ * STRM_Open ++ * STRM_PrepareBuffer ++ * STRM_Reclaim ++ * STRM_RegisterNotify ++ * STRM_Select ++ * STRM_UnprepareBuffer ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================= ++ *! 18-Feb-2003 vp Code review updates. ++ *! 18-Oct-2002 vp Ported to Linux platform. ++ *! 13-Mar-2002 map pStrm init'd to NULL in STRM_Open to prevent error ++ *! 12-Mar-2002 map Changed return var to WSX "wStatus" instead of "status" ++ *! in DEV and CMM function calls to avoid confusion. ++ *! Return DSP_SOK instead of S_OK from API fxns. ++ *! 12-Mar-2002 map Changed FAILED(..) to DSP_FAILED(..) ++ *! 25-Jan-2002 ag Allow neg seg ids(e.g. DSP_SHMSEG0) to denote SM. ++ *! 15-Nov-2001 ag Added STRMMODE & SM for DMA/ZCopy streaming. ++ *! Changed DSP_STREAMINFO to STRM_INFO in STRM_GetInfo(). ++ *! Use strm timeout value for dma flush timeout. ++ *! 09-May-2001 jeh Code review cleanup. ++ *! 06-Feb-2001 kc Updated DBC_Ensure in STRM_Select to check timeout. ++ *! 23-Oct-2000 jeh Allow NULL STRM_ATTRS passed to STRM_Open() for DLL ++ *! tests to pass. ++ *! 25-Sep-2000 jeh Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- Mini Driver */ ++#include ++ ++/* ----------------------------------- Resource Manager */ ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++#ifndef RES_CLEANUP_DISABLE ++#include ++#include ++#include ++#endif ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define STRM_SIGNATURE 0x4d525453 /* "MRTS" */ ++#define STRMMGR_SIGNATURE 0x5254534d /* "RTSM" */ ++ ++#define DEFAULTTIMEOUT 10000 ++#define DEFAULTNUMBUFS 2 ++ ++/* ++ * ======== STRM_MGR ======== ++ * The STRM_MGR contains device information needed to open the underlying ++ * channels of a stream. ++ */ ++struct STRM_MGR { ++ u32 dwSignature; ++ struct DEV_OBJECT *hDev; /* Device for this processor */ ++ struct CHNL_MGR *hChnlMgr; /* Channel manager */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ ++ struct SYNC_CSOBJECT *hSync; /* For critical sections */ ++} ; ++ ++/* ++ * ======== STRM_OBJECT ======== ++ * This object is allocated in STRM_Open(). ++ */ ++ struct STRM_OBJECT { ++ u32 dwSignature; ++ struct STRM_MGR *hStrmMgr; ++ struct CHNL_OBJECT *hChnl; ++ u32 uDir; /* DSP_TONODE or DSP_FROMNODE */ ++ u32 uTimeout; ++ u32 uNumBufs; /* Max # of bufs allowed in stream */ ++ u32 uNBufsInStrm; /* Current # of bufs in stream */ ++ u32 ulNBytes; /* bytes transferred since idled */ ++ enum DSP_STREAMSTATE strmState; /* STREAM_IDLE, STREAM_READY, ... */ ++ HANDLE hUserEvent; /* Saved for STRM_GetInfo() */ ++ enum DSP_STRMMODE lMode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */ ++ u32 uDMAChnlId; /* DMA chnl id */ ++ u32 uDMAPriority; /* DMA priority:DMAPRI_[LOW][HIGH] */ ++ u32 uSegment; /* >0 is SM segment.=0 is local heap */ ++ u32 uAlignment; /* Alignment for stream bufs */ ++ struct CMM_XLATOROBJECT *hXlator; /* Stream's SM address translator */ ++} ; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask STRM_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++static u32 cRefs; /* module reference count */ ++ ++/* ----------------------------------- Function Prototypes */ ++static DSP_STATUS DeleteStrm(struct STRM_OBJECT *hStrm); ++static void DeleteStrmMgr(struct STRM_MGR *hStrmMgr); ++ ++/* ++ * ======== STRM_AllocateBuffer ======== ++ * Purpose: ++ * Allocates buffers for a stream. ++ */ ++DSP_STATUS STRM_AllocateBuffer(struct STRM_OBJECT *hStrm, u32 uSize, ++ OUT u8 **apBuffer, u32 uNumBufs, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 uAllocated = 0; ++ u32 i; ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE hSTRMRes; ++#endif ++ DBC_Require(cRefs > 0); ++ DBC_Require(apBuffer != NULL); ++ ++ GT_4trace(STRM_debugMask, GT_ENTER, "STRM_AllocateBuffer: hStrm: 0x%x\t" ++ "uSize: 0x%x\tapBuffer: 0x%x\tuNumBufs: 0x%x\n", ++ hStrm, uSize, apBuffer, uNumBufs); ++ if (MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ /* ++ * Allocate from segment specified at time of stream open. ++ */ ++ if (uSize == 0) ++ status = DSP_ESIZE; ++ ++ } ++ if (DSP_FAILED(status)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ for (i = 0; i < uNumBufs; i++) { ++ DBC_Assert(hStrm->hXlator != NULL); ++ (void)CMM_XlatorAllocBuf(hStrm->hXlator, &apBuffer[i], uSize); ++ if (apBuffer[i] == NULL) { ++ GT_0trace(STRM_debugMask, GT_7CLASS, ++ "STRM_AllocateBuffer: " ++ "DSP_FAILED to alloc shared memory.\n"); ++ status = DSP_EMEMORY; ++ uAllocated = i; ++ break; ++ } ++ } ++ if (DSP_FAILED(status)) ++ STRM_FreeBuffer(hStrm, apBuffer, uAllocated, pr_ctxt); ++ ++#ifndef RES_CLEANUP_DISABLE ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (DRV_GetSTRMResElement(hStrm, &hSTRMRes, pr_ctxt) != ++ DSP_ENOTFOUND) { ++ DRV_ProcUpdateSTRMRes(uNumBufs, hSTRMRes, pr_ctxt); ++ } ++#endif ++func_end: ++ return status; ++} ++ ++/* ++ * ======== STRM_Close ======== ++ * Purpose: ++ * Close a stream opened with STRM_Open(). ++ */ ++DSP_STATUS STRM_Close(struct STRM_OBJECT *hStrm, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct CHNL_INFO chnlInfo; ++ DSP_STATUS status = DSP_SOK; ++ ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE hSTRMRes; ++#endif ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(STRM_debugMask, GT_ENTER, "STRM_Close: hStrm: 0x%x\n", hStrm); ++ ++ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else { ++ /* Have all buffers been reclaimed? If not, return ++ * DSP_EPENDING */ ++ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnChnlGetInfo) (hStrm->hChnl, &chnlInfo); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ ++ if (chnlInfo.cIOCs > 0 || chnlInfo.cIOReqs > 0) { ++ status = DSP_EPENDING; ++ } else { ++ ++ status = DeleteStrm(hStrm); ++ ++ if (DSP_FAILED(status)) { ++ /* we already validated the handle. */ ++ DBC_Assert(status != DSP_EHANDLE); ++ ++ /* make sure we return a documented result */ ++ status = DSP_EFAIL; ++ } ++ } ++ } ++#ifndef RES_CLEANUP_DISABLE ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (DRV_GetSTRMResElement(hStrm, &hSTRMRes, pr_ctxt) != ++ DSP_ENOTFOUND) { ++ DRV_ProcRemoveSTRMResElement(hSTRMRes, pr_ctxt); ++ } ++func_end: ++#endif ++ DBC_Ensure(status == DSP_SOK || status == DSP_EHANDLE || ++ status == DSP_EPENDING || status == DSP_EFAIL); ++ ++ return status; ++} ++ ++/* ++ * ======== STRM_Create ======== ++ * Purpose: ++ * Create a STRM manager object. ++ */ ++DSP_STATUS STRM_Create(OUT struct STRM_MGR **phStrmMgr, struct DEV_OBJECT *hDev) ++{ ++ struct STRM_MGR *pStrmMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phStrmMgr != NULL); ++ DBC_Require(hDev != NULL); ++ ++ GT_2trace(STRM_debugMask, GT_ENTER, "STRM_Create: phStrmMgr: " ++ "0x%x\thDev: 0x%x\n", phStrmMgr, hDev); ++ *phStrmMgr = NULL; ++ /* Allocate STRM manager object */ ++ MEM_AllocObject(pStrmMgr, struct STRM_MGR, STRMMGR_SIGNATURE); ++ if (pStrmMgr == NULL) { ++ status = DSP_EMEMORY; ++ GT_0trace(STRM_debugMask, GT_6CLASS, "STRM_Create: " ++ "MEM_AllocObject() failed!\n "); ++ } else { ++ pStrmMgr->hDev = hDev; ++ } ++ /* Get Channel manager and WMD function interface */ ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetChnlMgr(hDev, &(pStrmMgr->hChnlMgr)); ++ if (DSP_SUCCEEDED(status)) { ++ (void) DEV_GetIntfFxns(hDev, &(pStrmMgr->pIntfFxns)); ++ DBC_Assert(pStrmMgr->pIntfFxns != NULL); ++ } else { ++ GT_1trace(STRM_debugMask, GT_6CLASS, "STRM_Create: " ++ "Failed to get channel manager! status = " ++ "0x%x\n", status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_InitializeCS(&pStrmMgr->hSync); ++ ++ if (DSP_SUCCEEDED(status)) ++ *phStrmMgr = pStrmMgr; ++ else ++ DeleteStrmMgr(pStrmMgr); ++ ++ DBC_Ensure(DSP_SUCCEEDED(status) && ++ (MEM_IsValidHandle((*phStrmMgr), STRMMGR_SIGNATURE) || ++ (DSP_FAILED(status) && *phStrmMgr == NULL))); ++ ++ return status; ++} ++ ++/* ++ * ======== STRM_Delete ======== ++ * Purpose: ++ * Delete the STRM Manager Object. ++ */ ++void STRM_Delete(struct STRM_MGR *hStrmMgr) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(MEM_IsValidHandle(hStrmMgr, STRMMGR_SIGNATURE)); ++ ++ GT_1trace(STRM_debugMask, GT_ENTER, "STRM_Delete: hStrmMgr: 0x%x\n", ++ hStrmMgr); ++ ++ DeleteStrmMgr(hStrmMgr); ++ ++ DBC_Ensure(!MEM_IsValidHandle(hStrmMgr, STRMMGR_SIGNATURE)); ++} ++ ++/* ++ * ======== STRM_Exit ======== ++ * Purpose: ++ * Discontinue usage of STRM module. ++ */ ++void STRM_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ cRefs--; ++ ++ GT_1trace(STRM_debugMask, GT_5CLASS, ++ "Entered STRM_Exit, ref count: 0x%x\n", cRefs); ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== STRM_FreeBuffer ======== ++ * Purpose: ++ * Frees the buffers allocated for a stream. ++ */ ++DSP_STATUS STRM_FreeBuffer(struct STRM_OBJECT *hStrm, u8 **apBuffer, ++ u32 uNumBufs, struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 i = 0; ++ ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE hSTRMRes = NULL; ++#endif ++ DBC_Require(cRefs > 0); ++ DBC_Require(apBuffer != NULL); ++ ++ GT_3trace(STRM_debugMask, GT_ENTER, "STRM_FreeBuffer: hStrm: 0x%x\t" ++ "apBuffer: 0x%x\tuNumBufs: 0x%x\n", hStrm, apBuffer, uNumBufs); ++ ++ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) ++ status = DSP_EHANDLE; ++ ++ if (DSP_SUCCEEDED(status)) { ++ for (i = 0; i < uNumBufs; i++) { ++ DBC_Assert(hStrm->hXlator != NULL); ++ status = CMM_XlatorFreeBuf(hStrm->hXlator, apBuffer[i]); ++ if (DSP_FAILED(status)) { ++ GT_0trace(STRM_debugMask, GT_7CLASS, ++ "STRM_FreeBuffer: DSP_FAILED" ++ " to free shared memory.\n"); ++ break; ++ } ++ apBuffer[i] = NULL; ++ } ++ } ++#ifndef RES_CLEANUP_DISABLE ++ if (DRV_GetSTRMResElement(hStrm, hSTRMRes, pr_ctxt) != ++ DSP_ENOTFOUND) { ++ DRV_ProcUpdateSTRMRes(uNumBufs-i, hSTRMRes, pr_ctxt); ++ } ++#endif ++ return status; ++} ++ ++/* ++ * ======== STRM_GetInfo ======== ++ * Purpose: ++ * Retrieves information about a stream. ++ */ ++DSP_STATUS STRM_GetInfo(struct STRM_OBJECT *hStrm, ++ OUT struct STRM_INFO *pStreamInfo, ++ u32 uStreamInfoSize) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct CHNL_INFO chnlInfo; ++ DSP_STATUS status = DSP_SOK; ++ void *pVirtBase = NULL; /* NULL if no SM used */ ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pStreamInfo != NULL); ++ DBC_Require(uStreamInfoSize >= sizeof(struct STRM_INFO)); ++ ++ GT_3trace(STRM_debugMask, GT_ENTER, "STRM_GetInfo: hStrm: 0x%x\t" ++ "pStreamInfo: 0x%x\tuStreamInfoSize: 0x%x\n", hStrm, ++ pStreamInfo, uStreamInfoSize); ++ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else { ++ if (uStreamInfoSize < sizeof(struct STRM_INFO)) { ++ /* size of users info */ ++ status = DSP_ESIZE; ++ } ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnChnlGetInfo) (hStrm->hChnl, &chnlInfo); ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ if (hStrm->hXlator) { ++ /* We have a translator */ ++ DBC_Assert(hStrm->uSegment > 0); ++ CMM_XlatorInfo(hStrm->hXlator, (u8 **)&pVirtBase, 0, ++ hStrm->uSegment, false); ++ } ++ pStreamInfo->uSegment = hStrm->uSegment; ++ pStreamInfo->lMode = hStrm->lMode; ++ pStreamInfo->pVirtBase = pVirtBase; ++ pStreamInfo->pUser->uNumberBufsAllowed = hStrm->uNumBufs; ++ pStreamInfo->pUser->uNumberBufsInStream = chnlInfo.cIOCs + ++ chnlInfo.cIOReqs; ++ /* # of bytes transferred since last call to DSPStream_Idle() */ ++ pStreamInfo->pUser->ulNumberBytes = chnlInfo.cPosition; ++ pStreamInfo->pUser->hSyncObjectHandle = chnlInfo.hEvent; ++ /* Determine stream state based on channel state and info */ ++ if (chnlInfo.dwState & CHNL_STATEEOS) { ++ pStreamInfo->pUser->ssStreamState = STREAM_DONE; ++ } else { ++ if (chnlInfo.cIOCs > 0) ++ pStreamInfo->pUser->ssStreamState = STREAM_READY; ++ else if (chnlInfo.cIOReqs > 0) ++ pStreamInfo->pUser->ssStreamState = STREAM_PENDING; ++ else ++ pStreamInfo->pUser->ssStreamState = STREAM_IDLE; ++ ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== STRM_Idle ======== ++ * Purpose: ++ * Idles a particular stream. ++ */ ++DSP_STATUS STRM_Idle(struct STRM_OBJECT *hStrm, bool fFlush) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_2trace(STRM_debugMask, GT_ENTER, "STRM_Idle: hStrm: 0x%x\t" ++ "fFlush: 0x%x\n", hStrm, fFlush); ++ ++ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else { ++ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; ++ ++ status = (*pIntfFxns->pfnChnlIdle) (hStrm->hChnl, ++ hStrm->uTimeout, fFlush); ++ } ++ return status; ++} ++ ++/* ++ * ======== STRM_Init ======== ++ * Purpose: ++ * Initialize the STRM module. ++ */ ++bool STRM_Init(void) ++{ ++ bool fRetVal = true; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++#if GT_TRACE ++ DBC_Assert(!STRM_debugMask.flags); ++ GT_create(&STRM_debugMask, "ST"); /* "ST" for STrm */ ++#endif ++ } ++ ++ if (fRetVal) ++ cRefs++; ++ ++ GT_1trace(STRM_debugMask, GT_5CLASS, "STRM_Init(), ref count: 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((fRetVal && (cRefs > 0)) || (!fRetVal && (cRefs >= 0))); ++ ++ return fRetVal; ++} ++ ++/* ++ * ======== STRM_Issue ======== ++ * Purpose: ++ * Issues a buffer on a stream ++ */ ++DSP_STATUS STRM_Issue(struct STRM_OBJECT *hStrm, IN u8 *pBuf, u32 ulBytes, ++ u32 ulBufSize, u32 dwArg) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status = DSP_SOK; ++ void *pTmpBuf = NULL; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pBuf != NULL); ++ ++ GT_4trace(STRM_debugMask, GT_ENTER, "STRM_Issue: hStrm: 0x%x\tpBuf: " ++ "0x%x\tulBytes: 0x%x\tdwArg: 0x%x\n", hStrm, pBuf, ulBytes, ++ dwArg); ++ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else { ++ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; ++ ++ if (hStrm->uSegment != 0) { ++ pTmpBuf = CMM_XlatorTranslate(hStrm->hXlator, ++ (void *)pBuf, CMM_VA2DSPPA); ++ if (pTmpBuf == NULL) ++ status = DSP_ETRANSLATE; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = (*pIntfFxns->pfnChnlAddIOReq) ++ (hStrm->hChnl, pBuf, ulBytes, ulBufSize, ++ (u32) pTmpBuf, dwArg); ++ } ++ if (DSP_FAILED(status)) { ++ if (status == CHNL_E_NOIORPS) ++ status = DSP_ESTREAMFULL; ++ else ++ status = DSP_EFAIL; ++ ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== STRM_Open ======== ++ * Purpose: ++ * Open a stream for sending/receiving data buffers to/from a task or ++ * XDAIS socket node on the DSP. ++ */ ++DSP_STATUS STRM_Open(struct NODE_OBJECT *hNode, u32 uDir, u32 uIndex, ++ IN struct STRM_ATTR *pAttr, ++ OUT struct STRM_OBJECT **phStrm, ++ struct PROCESS_CONTEXT *pr_ctxt) ++{ ++ struct STRM_MGR *hStrmMgr; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ u32 ulChnlId; ++ struct STRM_OBJECT *pStrm = NULL; ++ CHNL_MODE uMode; ++ struct CHNL_ATTRS chnlAttrs; ++ DSP_STATUS status = DSP_SOK; ++ struct CMM_OBJECT *hCmmMgr = NULL; /* Shared memory manager hndl */ ++ ++#ifndef RES_CLEANUP_DISABLE ++ HANDLE hSTRMRes; ++#endif ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(phStrm != NULL); ++ DBC_Require(pAttr != NULL); ++ GT_5trace(STRM_debugMask, GT_ENTER, ++ "STRM_Open: hNode: 0x%x\tuDir: 0x%x\t" ++ "uIndex: 0x%x\tpAttr: 0x%x\tphStrm: 0x%x\n", ++ hNode, uDir, uIndex, pAttr, phStrm); ++ *phStrm = NULL; ++ if (uDir != DSP_TONODE && uDir != DSP_FROMNODE) { ++ status = DSP_EDIRECTION; ++ } else { ++ /* Get the channel id from the node (set in NODE_Connect()) */ ++ status = NODE_GetChannelId(hNode, uDir, uIndex, &ulChnlId); ++ } ++ if (DSP_SUCCEEDED(status)) ++ status = NODE_GetStrmMgr(hNode, &hStrmMgr); ++ ++ if (DSP_SUCCEEDED(status)) { ++ MEM_AllocObject(pStrm, struct STRM_OBJECT, STRM_SIGNATURE); ++ if (pStrm == NULL) { ++ status = DSP_EMEMORY; ++ GT_0trace(STRM_debugMask, GT_6CLASS, ++ "STRM_Open: MEM_AllocObject() failed!\n "); ++ } else { ++ pStrm->hStrmMgr = hStrmMgr; ++ pStrm->uDir = uDir; ++ pStrm->strmState = STREAM_IDLE; ++ pStrm->hUserEvent = pAttr->hUserEvent; ++ if (pAttr->pStreamAttrIn != NULL) { ++ pStrm->uTimeout = pAttr->pStreamAttrIn-> ++ uTimeout; ++ pStrm->uNumBufs = pAttr->pStreamAttrIn-> ++ uNumBufs; ++ pStrm->lMode = pAttr->pStreamAttrIn->lMode; ++ pStrm->uSegment = pAttr->pStreamAttrIn-> ++ uSegment; ++ pStrm->uAlignment = pAttr->pStreamAttrIn-> ++ uAlignment; ++ pStrm->uDMAChnlId = pAttr->pStreamAttrIn-> ++ uDMAChnlId; ++ pStrm->uDMAPriority = pAttr->pStreamAttrIn-> ++ uDMAPriority; ++ chnlAttrs.uIOReqs = pAttr->pStreamAttrIn-> ++ uNumBufs; ++ } else { ++ pStrm->uTimeout = DEFAULTTIMEOUT; ++ pStrm->uNumBufs = DEFAULTNUMBUFS; ++ pStrm->lMode = STRMMODE_PROCCOPY; ++ pStrm->uSegment = 0; /* local memory */ ++ pStrm->uAlignment = 0; ++ pStrm->uDMAChnlId = 0; ++ pStrm->uDMAPriority = 0; ++ chnlAttrs.uIOReqs = DEFAULTNUMBUFS; ++ } ++ chnlAttrs.hReserved1 = NULL; ++ /* DMA chnl flush timeout */ ++ chnlAttrs.hReserved2 = pStrm->uTimeout; ++ chnlAttrs.hEvent = NULL; ++ if (pAttr->hUserEvent != NULL) ++ chnlAttrs.hEvent = pAttr->hUserEvent; ++ ++ } ++ } ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ if ((pAttr->pVirtBase == NULL) || !(pAttr->ulVirtSize > 0)) ++ goto func_cont; ++ ++ DBC_Assert(pStrm->lMode != STRMMODE_LDMA); /* no System DMA */ ++ /* Get the shared mem mgr for this streams dev object */ ++ status = DEV_GetCmmMgr(hStrmMgr->hDev, &hCmmMgr); ++ if (DSP_FAILED(status)) { ++ GT_1trace(STRM_debugMask, GT_6CLASS, "STRM_Open: Failed to get " ++ "CMM Mgr handle: 0x%x\n", status); ++ } else { ++ /*Allocate a SM addr translator for this strm.*/ ++ status = CMM_XlatorCreate(&pStrm->hXlator, hCmmMgr, NULL); ++ if (DSP_FAILED(status)) { ++ GT_1trace(STRM_debugMask, GT_6CLASS, ++ "STRM_Open: Failed to " ++ "create SM translator: 0x%x\n", status); ++ } else { ++ DBC_Assert(pStrm->uSegment > 0); ++ /* Set translators Virt Addr attributes */ ++ status = CMM_XlatorInfo(pStrm->hXlator, ++ (u8 **)&pAttr->pVirtBase, pAttr->ulVirtSize, ++ pStrm->uSegment, true); ++ if (status != DSP_SOK) { ++ GT_0trace(STRM_debugMask, GT_6CLASS, ++ "STRM_Open: ERROR: " ++ "in setting CMM_XlatorInfo.\n"); ++ } ++ } ++ } ++func_cont: ++ if (DSP_SUCCEEDED(status)) { ++ /* Open channel */ ++ uMode = (uDir == DSP_TONODE) ? ++ CHNL_MODETODSP : CHNL_MODEFROMDSP; ++ pIntfFxns = hStrmMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnChnlOpen) (&(pStrm->hChnl), ++ hStrmMgr->hChnlMgr, uMode, ulChnlId, &chnlAttrs); ++ if (DSP_FAILED(status)) { ++ /* ++ * over-ride non-returnable status codes so we return ++ * something documented ++ */ ++ if (status != DSP_EMEMORY && status != ++ DSP_EINVALIDARG && status != DSP_EFAIL) { ++ /* ++ * We got a status that's not return-able. ++ * Assert that we got something we were ++ * expecting (DSP_EHANDLE isn't acceptable, ++ * hStrmMgr->hChnlMgr better be valid or we ++ * assert here), and then return DSP_EFAIL. ++ */ ++ DBC_Assert(status == CHNL_E_OUTOFSTREAMS || ++ status == CHNL_E_BADCHANID || ++ status == CHNL_E_CHANBUSY || ++ status == CHNL_E_NOIORPS); ++ status = DSP_EFAIL; ++ } ++ GT_2trace(STRM_debugMask, GT_6CLASS, ++ "STRM_Open: Channel open failed, " ++ "chnl id = %d, status = 0x%x\n", ulChnlId, ++ status); ++ } ++ } ++ if (DSP_SUCCEEDED(status)) ++ *phStrm = pStrm; ++ else ++ (void)DeleteStrm(pStrm); ++ ++#ifndef RES_CLEANUP_DISABLE ++ DRV_ProcInsertSTRMResElement(*phStrm, &hSTRMRes, pr_ctxt); ++#endif ++ ++ /* ensure we return a documented error code */ ++ DBC_Ensure((DSP_SUCCEEDED(status) && ++ MEM_IsValidHandle((*phStrm), STRM_SIGNATURE)) || ++ (*phStrm == NULL && (status == DSP_EHANDLE || ++ status == DSP_EDIRECTION || status == DSP_EVALUE || ++ status == DSP_EFAIL))); ++ return status; ++} ++ ++/* ++ * ======== STRM_Reclaim ======== ++ * Purpose: ++ * Relcaims a buffer from a stream. ++ */ ++DSP_STATUS STRM_Reclaim(struct STRM_OBJECT *hStrm, OUT u8 **pBufPtr, ++ u32 *pulBytes, u32 *pulBufSize, u32 *pdwArg) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct CHNL_IOC chnlIOC; ++ DSP_STATUS status = DSP_SOK; ++ void *pTmpBuf = NULL; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pBufPtr != NULL); ++ DBC_Require(pulBytes != NULL); ++ DBC_Require(pdwArg != NULL); ++ ++ GT_4trace(STRM_debugMask, GT_ENTER, ++ "STRM_Reclaim: hStrm: 0x%x\tpBufPtr: 0x%x" ++ "\tpulBytes: 0x%x\tpdwArg: 0x%x\n", hStrm, pBufPtr, pulBytes, ++ pdwArg); ++ ++ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; ++ ++ status = (*pIntfFxns->pfnChnlGetIOC)(hStrm->hChnl, hStrm->uTimeout, ++ &chnlIOC); ++ if (DSP_FAILED(status)) { ++ GT_1trace(STRM_debugMask, GT_6CLASS, ++ "STRM_Reclaim: GetIOC failed! " ++ "Status = 0x%x\n", status); ++ } else { ++ *pulBytes = chnlIOC.cBytes; ++ if (pulBufSize) ++ *pulBufSize = chnlIOC.cBufSize; ++ ++ *pdwArg = chnlIOC.dwArg; ++ if (!CHNL_IsIOComplete(chnlIOC)) { ++ if (CHNL_IsTimedOut(chnlIOC)) { ++ status = DSP_ETIMEOUT; ++ } else { ++ /* Allow reclaims after idle to succeed */ ++ if (!CHNL_IsIOCancelled(chnlIOC)) ++ status = DSP_EFAIL; ++ ++ } ++ } ++ /* Translate zerocopy buffer if channel not canceled. */ ++ if (DSP_SUCCEEDED(status) && (!CHNL_IsIOCancelled(chnlIOC)) && ++ (hStrm->lMode == STRMMODE_ZEROCOPY)) { ++ /* ++ * This is a zero-copy channel so chnlIOC.pBuf ++ * contains the DSP address of SM. We need to ++ * translate it to a virtual address for the user ++ * thread to access. ++ * Note: Could add CMM_DSPPA2VA to CMM in the future. ++ */ ++ pTmpBuf = CMM_XlatorTranslate(hStrm->hXlator, ++ chnlIOC.pBuf, CMM_DSPPA2PA); ++ if (pTmpBuf != NULL) { ++ /* now convert this GPP Pa to Va */ ++ pTmpBuf = CMM_XlatorTranslate(hStrm->hXlator, ++ pTmpBuf, CMM_PA2VA); ++ } ++ if (pTmpBuf == NULL) { ++ GT_0trace(STRM_debugMask, GT_7CLASS, ++ "STRM_Reclaim: Failed " ++ "SM translation!\n"); ++ status = DSP_ETRANSLATE; ++ } ++ chnlIOC.pBuf = pTmpBuf; ++ } ++ *pBufPtr = chnlIOC.pBuf; ++ } ++func_end: ++ /* ensure we return a documented return code */ ++ DBC_Ensure(DSP_SUCCEEDED(status) || status == DSP_EHANDLE || ++ status == DSP_ETIMEOUT || status == DSP_ETRANSLATE || ++ status == DSP_EFAIL); ++ return status; ++} ++ ++/* ++ * ======== STRM_RegisterNotify ======== ++ * Purpose: ++ * Register to be notified on specific events for this stream. ++ */ ++DSP_STATUS STRM_RegisterNotify(struct STRM_OBJECT *hStrm, u32 uEventMask, ++ u32 uNotifyType, struct DSP_NOTIFICATION ++ *hNotification) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(hNotification != NULL); ++ ++ GT_4trace(STRM_debugMask, GT_ENTER, ++ "STRM_RegisterNotify: hStrm: 0x%x\t" ++ "uEventMask: 0x%x\tuNotifyType: 0x%x\thNotification: 0x%x\n", ++ hStrm, uEventMask, uNotifyType, hNotification); ++ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else if ((uEventMask & ~((DSP_STREAMIOCOMPLETION) | ++ DSP_STREAMDONE)) != 0) { ++ status = DSP_EVALUE; ++ } else { ++ if (uNotifyType != DSP_SIGNALEVENT) ++ status = DSP_ENOTIMPL; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; ++ ++ status = (*pIntfFxns->pfnChnlRegisterNotify)(hStrm->hChnl, ++ uEventMask, uNotifyType, hNotification); ++ } ++ /* ensure we return a documented return code */ ++ DBC_Ensure(DSP_SUCCEEDED(status) || status == DSP_EHANDLE || ++ status == DSP_ETIMEOUT || status == DSP_ETRANSLATE || ++ status == DSP_ENOTIMPL || status == DSP_EFAIL); ++ return status; ++} ++ ++/* ++ * ======== STRM_Select ======== ++ * Purpose: ++ * Selects a ready stream. ++ */ ++DSP_STATUS STRM_Select(IN struct STRM_OBJECT **aStrmTab, u32 nStrms, ++ OUT u32 *pMask, u32 uTimeout) ++{ ++ u32 uIndex; ++ struct CHNL_INFO chnlInfo; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct SYNC_OBJECT **hSyncEvents = NULL; ++ u32 i; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(cRefs > 0); ++ DBC_Require(aStrmTab != NULL); ++ DBC_Require(pMask != NULL); ++ DBC_Require(nStrms > 0); ++ ++ GT_4trace(STRM_debugMask, GT_ENTER, ++ "STRM_Select: aStrmTab: 0x%x \tnStrms: " ++ "0x%x\tpMask: 0x%x\tuTimeout: 0x%x\n", aStrmTab, ++ nStrms, pMask, uTimeout); ++ *pMask = 0; ++ for (i = 0; i < nStrms; i++) { ++ if (!MEM_IsValidHandle(aStrmTab[i], STRM_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ break; ++ } ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* Determine which channels have IO ready */ ++ for (i = 0; i < nStrms; i++) { ++ pIntfFxns = aStrmTab[i]->hStrmMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnChnlGetInfo)(aStrmTab[i]->hChnl, ++ &chnlInfo); ++ if (DSP_FAILED(status)) { ++ break; ++ } else { ++ if (chnlInfo.cIOCs > 0) ++ *pMask |= (1 << i); ++ ++ } ++ } ++ if (DSP_SUCCEEDED(status) && uTimeout > 0 && *pMask == 0) { ++ /* Non-zero timeout */ ++ hSyncEvents = (struct SYNC_OBJECT **)MEM_Alloc(nStrms * ++ sizeof(struct SYNC_OBJECT *), MEM_PAGED); ++ if (hSyncEvents == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ for (i = 0; i < nStrms; i++) { ++ pIntfFxns = aStrmTab[i]->hStrmMgr->pIntfFxns; ++ status = (*pIntfFxns->pfnChnlGetInfo) ++ (aStrmTab[i]->hChnl, &chnlInfo); ++ if (DSP_FAILED(status)) ++ break; ++ else ++ hSyncEvents[i] = chnlInfo.hSyncEvent; ++ ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = SYNC_WaitOnMultipleEvents(hSyncEvents, nStrms, ++ uTimeout, &uIndex); ++ if (DSP_SUCCEEDED(status)) { ++ /* Since we waited on the event, we have to ++ * reset it */ ++ SYNC_SetEvent(hSyncEvents[uIndex]); ++ *pMask = 1 << uIndex; ++ } ++ } ++ } ++func_end: ++ if (hSyncEvents) ++ MEM_Free(hSyncEvents); ++ ++ DBC_Ensure((DSP_SUCCEEDED(status) && (*pMask != 0 || uTimeout == 0)) || ++ (DSP_FAILED(status) && *pMask == 0)); ++ ++ return status; ++} ++ ++/* ++ * ======== DeleteStrm ======== ++ * Purpose: ++ * Frees the resources allocated for a stream. ++ */ ++static DSP_STATUS DeleteStrm(struct STRM_OBJECT *hStrm) ++{ ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ DSP_STATUS status = DSP_SOK; ++ ++ if (MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { ++ if (hStrm->hChnl) { ++ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; ++ /* Channel close can fail only if the channel handle ++ * is invalid. */ ++ status = (*pIntfFxns->pfnChnlClose) (hStrm->hChnl); ++ /* Free all SM address translator resources */ ++ if (DSP_SUCCEEDED(status)) { ++ if (hStrm->hXlator) { ++ /* force free */ ++ (void)CMM_XlatorDelete(hStrm->hXlator, ++ true); ++ } ++ } ++ } ++ MEM_FreeObject(hStrm); ++ } else { ++ status = DSP_EHANDLE; ++ } ++ return status; ++} ++ ++/* ++ * ======== DeleteStrmMgr ======== ++ * Purpose: ++ * Frees stream manager. ++ */ ++static void DeleteStrmMgr(struct STRM_MGR *hStrmMgr) ++{ ++ if (MEM_IsValidHandle(hStrmMgr, STRMMGR_SIGNATURE)) { ++ ++ if (hStrmMgr->hSync) ++ SYNC_DeleteCS(hStrmMgr->hSync); ++ ++ MEM_FreeObject(hStrmMgr); ++ } ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/cfg.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/cfg.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/cfg.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/cfg.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,483 @@ ++/* ++ * cfg.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== cfgce.c ======== ++ * Purpose: ++ * Implementation of platform specific config services. ++ * ++ * Private Functions: ++ * CFG_Exit ++ * CFG_GetAutoStart ++ * CFG_GetDevObject ++ * CFG_GetDSPResources ++ * CFG_GetExecFile ++ * CFG_GetHostResources ++ * CFG_GetObject ++ * CFG_Init ++ * CFG_SetDevObject ++ * CFG_SetObject ++ * ++ * ++ *! Revision History: ++ *! ================ ++ *! 26-Arp-2004 hp Support for handling more than one Device. ++ *! 26-Feb-2003 kc Removed unused CFG fxns. ++ *! 10-Nov-2000 rr: CFG_GetBoardName local var initialized. ++ *! 30-Oct-2000 kc: Changed local var. names to use Hungarian notation. ++ *! 10-Aug-2000 rr: Cosmetic changes. ++ *! 26-Jul-2000 rr: Added CFG_GetDCDName. CFG_Get/SetObject(based on a flag) ++ *! replaces CFG_GetMgrObject & CFG_SetMgrObject. ++ *! 17-Jul-2000 rr: Added CFG_GetMgrObject & CFG_SetMgrObject. ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 31-Jan-2000 rr: Comments and bugfixes: modified after code review ++ *! 07-Jan-2000 rr: CFG_GetBoardName Ensure class checks strlen of the ++ *! read value from the registry against the passed in BufSize; ++ *! CFG_GetZLFile,CFG_GetWMDFileName and ++ *! CFG_GetExecFile also modified same way. ++ *! 06-Jan-2000 rr: CFG_GetSearchPath & CFG_GetWinBRIDGEDir removed. ++ *! 09-Dec-1999 rr: CFG_SetDevObject stores the DevNodeString pointer. ++ *! 03-Dec-1999 rr: CFG_GetDevObject reads stored DevObject from Registry. ++ *! CFG_GetDevNode reads the Devnodestring from the registry. ++ *! CFG_SetDevObject stores the registry path as ++ *! DevNodestring in the registry. ++ *! 02-Dec-1999 rr: CFG_debugMask is declared static now. stdwin.h included ++ *! 22-Nov-1999 kc: Added windows.h to remove warnings. ++ *! 25-Oct-1999 rr: CFG_GetHostResources reads the HostResource structure ++ *! from the registry which was set by the DRV Request ++ *! Resources. ++ *! 15-Oct-1999 rr: Changes in CFG_SetPrivateDword & HostResources reflecting ++ *! changes for drv.h resource structure and wsxreg.h new ++ *! entry(DevObject) Hard coded entries removed for those items ++ *! 08-Oct-1999 rr: CFG_SetPrivateDword modified. it sets devobject into the ++ *! registry. CFG_Get HostResources modified for opening up ++ *! two mem winodws. ++ *! 24-Sep-1999 rr: CFG_GetHostResources uses hardcoded Registry calls,uses NT ++ *! type of Resource Structure. ++ *! 19-Jul-1999 a0216266: Stubbed from cfgnt.c. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++struct DRV_EXT { ++ struct LST_ELEM link; ++ char szString[MAXREGPATHLENGTH]; ++}; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask CFG_debugMask = { NULL, NULL }; /* CFG debug Mask */ ++#endif ++ ++/* ++ * ======== CFG_Exit ======== ++ * Purpose: ++ * Discontinue usage of the CFG module. ++ */ ++void CFG_Exit(void) ++{ ++ GT_0trace(CFG_debugMask, GT_5CLASS, "Entered CFG_Exit\n"); ++} ++ ++/* ++ * ======== CFG_GetAutoStart ======== ++ * Purpose: ++ * Retreive the autostart mask, if any, for this board. ++ */ ++DSP_STATUS CFG_GetAutoStart(struct CFG_DEVNODE *hDevNode, ++ OUT u32 *pdwAutoStart) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 dwBufSize; ++ GT_2trace(CFG_debugMask, GT_ENTER, ++ "Entered CFG_GetAutoStart: \n\thDevNode:" ++ "0x%x\n\tpdwAutoStart: 0x%x\n", hDevNode, pdwAutoStart); ++ dwBufSize = sizeof(*pdwAutoStart); ++ if (!hDevNode) ++ status = CFG_E_INVALIDHDEVNODE; ++ if (!pdwAutoStart) ++ status = CFG_E_INVALIDPOINTER; ++ if (DSP_SUCCEEDED(status)) { ++ status = REG_GetValue(NULL, (char *)hDevNode, AUTOSTART, ++ (u8 *)pdwAutoStart, &dwBufSize); ++ if (DSP_FAILED(status)) ++ status = CFG_E_RESOURCENOTAVAIL; ++ } ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(CFG_debugMask, GT_1CLASS, ++ "CFG_GetAutoStart SUCCESS \n"); ++ } else { ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "CFG_GetAutoStart Failed \n"); ++ } ++#endif ++ DBC_Ensure((status == DSP_SOK && ++ (*pdwAutoStart == 0 || *pdwAutoStart == 1)) ++ || status != DSP_SOK); ++ return status; ++} ++ ++/* ++ * ======== CFG_GetDevObject ======== ++ * Purpose: ++ * Retrieve the Device Object handle for a given devnode. ++ */ ++DSP_STATUS CFG_GetDevObject(struct CFG_DEVNODE *hDevNode, OUT u32 *pdwValue) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 dwBufSize; ++ GT_2trace(CFG_debugMask, GT_ENTER, "Entered CFG_GetDevObject, args: " ++ "\n\thDevNode: 0x%x\n\tpdwValue: 0x%x\n", hDevNode, ++ *pdwValue); ++ if (!hDevNode) ++ status = CFG_E_INVALIDHDEVNODE; ++ ++ if (!pdwValue) ++ status = CFG_E_INVALIDHDEVNODE; ++ ++ dwBufSize = sizeof(pdwValue); ++ if (DSP_SUCCEEDED(status)) { ++ ++ /* check the device string and then call the REG_SetValue*/ ++ if (!(strcmp((char *)((struct DRV_EXT *)hDevNode)->szString, ++ "TIOMAP1510"))) { ++ GT_0trace(CFG_debugMask, GT_1CLASS, ++ "Fetching DSP Device from " ++ "Registry \n"); ++ status = REG_GetValue(NULL, (char *)hDevNode, ++ "DEVICE_DSP", ++ (u8 *)pdwValue, &dwBufSize); ++ } else { ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "Failed to Identify the Device to Fetch \n"); ++ } ++ } ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) { ++ GT_1trace(CFG_debugMask, GT_1CLASS, ++ "CFG_GetDevObject SUCCESS DevObject" ++ ": 0x%x\n ", *pdwValue); ++ } else { ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "CFG_GetDevObject Failed \n"); ++ } ++#endif ++ return status; ++} ++ ++/* ++ * ======== CFG_GetDSPResources ======== ++ * Purpose: ++ * Get the DSP resources available to a given device. ++ */ ++DSP_STATUS CFG_GetDSPResources(struct CFG_DEVNODE *hDevNode, ++ OUT struct CFG_DSPRES *pDSPResTable) ++{ ++ DSP_STATUS status = DSP_SOK; /* return value */ ++ u32 dwResSize; ++ GT_2trace(CFG_debugMask, GT_ENTER, ++ "Entered CFG_GetDSPResources, args: " ++ "\n\thDevNode: 0x%x\n\tpDSPResTable: 0x%x\n", ++ hDevNode, pDSPResTable); ++ if (!hDevNode) { ++ status = CFG_E_INVALIDHDEVNODE; ++ } else if (!pDSPResTable) { ++ status = CFG_E_INVALIDPOINTER; ++ } else { ++ status = REG_GetValue(NULL, CONFIG, DSPRESOURCES, ++ (u8 *)pDSPResTable, ++ &dwResSize); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(CFG_debugMask, GT_1CLASS, ++ "CFG_GetDSPResources SUCCESS\n"); ++ } else { ++ status = CFG_E_RESOURCENOTAVAIL; ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "CFG_GetDSPResources Failed \n"); ++ } ++#ifdef DEBUG ++ /* assert that resource values are reasonable */ ++ DBC_Assert(pDSPResTable->uChipType < 256); ++ DBC_Assert(pDSPResTable->uWordSize > 0); ++ DBC_Assert(pDSPResTable->uWordSize < 32); ++ DBC_Assert(pDSPResTable->cChips > 0); ++ DBC_Assert(pDSPResTable->cChips < 256); ++#endif ++ return status; ++} ++ ++/* ++ * ======== CFG_GetExecFile ======== ++ * Purpose: ++ * Retreive the default executable, if any, for this board. ++ */ ++DSP_STATUS CFG_GetExecFile(struct CFG_DEVNODE *hDevNode, u32 ulBufSize, ++ OUT char *pstrExecFile) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 cExecSize = ulBufSize; ++ GT_3trace(CFG_debugMask, GT_ENTER, ++ "Entered CFG_GetExecFile:\n\tthDevNode: " ++ "0x%x\n\tulBufSize: 0x%x\n\tpstrExecFile: 0x%x\n", hDevNode, ++ ulBufSize, pstrExecFile); ++ if (!hDevNode) ++ status = CFG_E_INVALIDHDEVNODE; ++ ++ if (!pstrExecFile) ++ status = CFG_E_INVALIDPOINTER; ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = REG_GetValue(NULL, (char *)hDevNode, DEFEXEC, ++ (u8 *)pstrExecFile, &cExecSize); ++ if (DSP_FAILED(status)) ++ status = CFG_E_RESOURCENOTAVAIL; ++ else if (cExecSize > ulBufSize) ++ status = DSP_ESIZE; ++ ++ } ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) { ++ GT_1trace(CFG_debugMask, GT_1CLASS, ++ "CFG_GetExecFile SUCCESS Exec File" ++ "name : %s\n ", pstrExecFile); ++ } else { ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "CFG_GetExecFile Failed \n"); ++ } ++#endif ++ DBC_Ensure(((status == DSP_SOK) && ++ (strlen(pstrExecFile) <= ulBufSize)) || (status != DSP_SOK)); ++ return status; ++} ++ ++/* ++ * ======== CFG_GetHostResources ======== ++ * Purpose: ++ * Get the Host allocated resources assigned to a given device. ++ */ ++DSP_STATUS CFG_GetHostResources(struct CFG_DEVNODE *hDevNode, ++ OUT struct CFG_HOSTRES *pHostResTable) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 dwBufSize; ++ GT_2trace(CFG_debugMask, GT_ENTER, ++ "Entered CFG_GetHostResources, args:\n\t" ++ "pHostResTable: 0x%x\n\thDevNode: 0x%x\n", ++ pHostResTable, hDevNode); ++ if (!hDevNode) ++ status = CFG_E_INVALIDHDEVNODE; ++ ++ if (!pHostResTable) ++ status = CFG_E_INVALIDPOINTER; ++ ++ if (DSP_SUCCEEDED(status)) { ++ dwBufSize = sizeof(struct CFG_HOSTRES); ++ if (DSP_FAILED(REG_GetValue(NULL, (char *)hDevNode, ++ CURRENTCONFIG, ++ (u8 *)pHostResTable, &dwBufSize))) { ++ status = CFG_E_RESOURCENOTAVAIL; ++ } ++ } ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(CFG_debugMask, GT_1CLASS, ++ "CFG_GetHostResources SUCCESS \n"); ++ } else { ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "CFG_GetHostResources Failed \n"); ++ } ++#endif ++ return status; ++} ++ ++/* ++ * ======== CFG_GetObject ======== ++ * Purpose: ++ * Retrieve the Object handle from the Registry ++ */ ++DSP_STATUS CFG_GetObject(OUT u32 *pdwValue, u32 dwType) ++{ ++ DSP_STATUS status = DSP_EINVALIDARG; ++ u32 dwBufSize; ++ DBC_Require(pdwValue != NULL); ++ GT_1trace(CFG_debugMask, GT_ENTER, ++ "Entered CFG_GetObject, args:pdwValue: " ++ "0x%x\n", *pdwValue); ++ dwBufSize = sizeof(pdwValue); ++ switch (dwType) { ++ case (REG_DRV_OBJECT): ++ status = REG_GetValue(NULL, CONFIG, DRVOBJECT, ++ (u8 *)pdwValue, ++ &dwBufSize); ++ break; ++ case (REG_MGR_OBJECT): ++ status = REG_GetValue(NULL, CONFIG, MGROBJECT, ++ (u8 *)pdwValue, ++ &dwBufSize); ++ break; ++ default: ++ break; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ GT_1trace(CFG_debugMask, GT_1CLASS, ++ "CFG_GetObject SUCCESS DrvObject: " ++ "0x%x\n ", *pdwValue); ++ } else { ++ status = CFG_E_RESOURCENOTAVAIL; ++ *pdwValue = 0; ++ GT_0trace(CFG_debugMask, GT_6CLASS, "CFG_GetObject Failed \n"); ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && *pdwValue != 0) || ++ (DSP_FAILED(status) && *pdwValue == 0)); ++ return status; ++} ++ ++/* ++ * ======== CFG_Init ======== ++ * Purpose: ++ * Initialize the CFG module's private state. ++ */ ++bool CFG_Init(void) ++{ ++ struct CFG_DSPRES dspResources; ++ GT_create(&CFG_debugMask, "CF"); /* CF for ConFig */ ++ GT_0trace(CFG_debugMask, GT_5CLASS, "Entered CFG_Init\n"); ++ GT_0trace(CFG_debugMask, GT_5CLASS, "Intializing DSP Registry Info \n"); ++ ++ dspResources.uChipType = DSPTYPE_64; ++ dspResources.cChips = 1; ++ dspResources.uWordSize = DSPWORDSIZE; ++ dspResources.cMemTypes = 0; ++ dspResources.aMemDesc[0].uMemType = 0; ++ dspResources.aMemDesc[0].ulMin = 0; ++ dspResources.aMemDesc[0].ulMax = 0; ++ if (DSP_SUCCEEDED(REG_SetValue(NULL, CONFIG, DSPRESOURCES, REG_BINARY, ++ (u8 *)&dspResources, sizeof(struct CFG_DSPRES)))) { ++ GT_0trace(CFG_debugMask, GT_5CLASS, ++ "Initialized DSP resources in " ++ "Registry \n"); ++ } else ++ GT_0trace(CFG_debugMask, GT_5CLASS, ++ "Failed to Initialize DSP resources" ++ " in Registry \n"); ++ return true; ++} ++ ++/* ++ * ======== CFG_SetDevObject ======== ++ * Purpose: ++ * Store the Device Object handle and devNode pointer for a given devnode. ++ */ ++DSP_STATUS CFG_SetDevObject(struct CFG_DEVNODE *hDevNode, u32 dwValue) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 dwBuffSize; ++ GT_2trace(CFG_debugMask, GT_ENTER, ++ "Entered CFG_SetDevObject, args: \n\t" ++ "hDevNode: 0x%x\n\tdwValue: 0x%x\n", hDevNode, dwValue); ++ if (!hDevNode) ++ status = CFG_E_INVALIDHDEVNODE; ++ ++ dwBuffSize = sizeof(dwValue); ++ if (DSP_SUCCEEDED(status)) { ++ /* Store the WCD device object in the Registry */ ++ ++ if (!(strcmp((char *)hDevNode, "TIOMAP1510"))) { ++ GT_0trace(CFG_debugMask, GT_1CLASS, ++ "Registering the DSP Device \n"); ++ status = REG_SetValue(NULL, (char *)hDevNode, ++ "DEVICE_DSP", REG_DWORD,\ ++ (u8 *)&dwValue, dwBuffSize); ++ if (DSP_SUCCEEDED(status)) { ++ dwBuffSize = sizeof(hDevNode); ++ status = REG_SetValue(NULL, ++ (char *)hDevNode, "DEVNODESTRING_DSP", ++ REG_DWORD, (u8 *)&hDevNode, ++ dwBuffSize); ++ } ++ } else { ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "Failed to Register Device \n"); ++ } ++ } ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) { ++ GT_0trace(CFG_debugMask, GT_1CLASS, ++ "CFG_SetDevObject SUCCESS \n"); ++ } else { ++ GT_0trace(CFG_debugMask, GT_6CLASS, ++ "CFG_SetDevObject Failed \n"); ++ } ++#endif ++ return status; ++} ++ ++/* ++ * ======== CFG_SetObject ======== ++ * Purpose: ++ * Store the Driver Object handle ++ */ ++DSP_STATUS CFG_SetObject(u32 dwValue, u32 dwType) ++{ ++ DSP_STATUS status = DSP_EINVALIDARG; ++ u32 dwBuffSize; ++ GT_1trace(CFG_debugMask, GT_ENTER, ++ "Entered CFG_SetObject, args: dwValue: " ++ "0x%x\n", dwValue); ++ dwBuffSize = sizeof(dwValue); ++ switch (dwType) { ++ case (REG_DRV_OBJECT): ++ status = REG_SetValue(NULL, CONFIG, DRVOBJECT, REG_DWORD, ++ (u8 *)&dwValue, dwBuffSize); ++ break; ++ case (REG_MGR_OBJECT): ++ status = REG_SetValue(NULL, CONFIG, MGROBJECT, REG_DWORD, ++ (u8 *) &dwValue, dwBuffSize); ++ break; ++ default: ++ break; ++ } ++#ifdef DEBUG ++ if (DSP_SUCCEEDED(status)) ++ GT_0trace(CFG_debugMask, GT_1CLASS, "CFG_SetObject SUCCESS \n"); ++ else ++ GT_0trace(CFG_debugMask, GT_6CLASS, "CFG_SetObject Failed \n"); ++ ++#endif ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/clk.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/clk.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/clk.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/clk.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,375 @@ ++/* ++ * clk.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== clk.c ======== ++ * Purpose: ++ * Clock and Timer services. ++ * ++ * Public Functions: ++ * CLK_Exit ++ * CLK_Init ++ * CLK_Enable ++ * CLK_Disable ++ * CLK_GetRate ++ * CLK_Set_32KHz ++ *! Revision History: ++ *! ================ ++ *! 08-May-2007 rg: moved all clock functions from sync module. ++ * And added CLK_Set_32KHz, CLK_Set_SysClk. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++ ++typedef volatile unsigned long REG_UWORD32; ++ ++#define SSI_Base 0x48058000 ++ ++#define SSI_BASE IO_ADDRESS(SSI_Base) ++ ++ ++struct SERVICES_Clk_t { ++ struct clk *clk_handle; ++ const char *clk_name; ++ int id; ++}; ++ ++/* The row order of the below array needs to match with the clock enumerations ++ * 'SERVICES_ClkId' provided in the header file.. any changes in the ++ * enumerations needs to be fixed in the array as well */ ++static struct SERVICES_Clk_t SERVICES_Clks[] = { ++ {NULL, "iva2_ck", -1}, ++ {NULL, "mailboxes_ick", -1}, ++ {NULL, "gpt5_fck", -1}, ++ {NULL, "gpt5_ick", -1}, ++ {NULL, "gpt6_fck", -1}, ++ {NULL, "gpt6_ick", -1}, ++ {NULL, "gpt7_fck", -1}, ++ {NULL, "gpt7_ick", -1}, ++ {NULL, "gpt8_fck", -1}, ++ {NULL, "gpt8_ick", -1}, ++ {NULL, "wdt_fck", 3}, ++ {NULL, "wdt_ick", 3}, ++ {NULL, "mcbsp_fck", 1}, ++ {NULL, "mcbsp_ick", 1}, ++ {NULL, "mcbsp_fck", 2}, ++ {NULL, "mcbsp_ick", 2}, ++ {NULL, "mcbsp_fck", 3}, ++ {NULL, "mcbsp_ick", 3}, ++ {NULL, "mcbsp_fck", 4}, ++ {NULL, "mcbsp_ick", 4}, ++ {NULL, "mcbsp_fck", 5}, ++ {NULL, "mcbsp_ick", 5}, ++ {NULL, "ssi_ssr_sst_fck", -1}, ++ {NULL, "ssi_ick", -1}, ++ {NULL, "omap_32k_fck", -1}, ++ {NULL, "sys_ck", -1}, ++ {NULL, ""} ++}; ++ ++/* Generic TIMER object: */ ++struct TIMER_OBJECT { ++ struct timer_list timer; ++}; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask CLK_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++/* ++ * ======== CLK_Exit ======== ++ * Purpose: ++ * Cleanup CLK module. ++ */ ++void CLK_Exit(void) ++{ ++ int i = 0; ++ ++ GT_0trace(CLK_debugMask, GT_5CLASS, "CLK_Exit\n"); ++ /* Relinquish the clock handles */ ++ while (i < SERVICESCLK_NOT_DEFINED) { ++ if (SERVICES_Clks[i].clk_handle) ++ clk_put(SERVICES_Clks[i].clk_handle); ++ ++ SERVICES_Clks[i].clk_handle = NULL; ++ i++; ++ } ++ ++} ++ ++/* ++ * ======== CLK_Init ======== ++ * Purpose: ++ * Initialize CLK module. ++ */ ++bool CLK_Init(void) ++{ ++ static struct platform_device dspbridge_device; ++ struct clk *clk_handle; ++ int i = 0; ++ GT_create(&CLK_debugMask, "CK"); /* CK for CLK */ ++ GT_0trace(CLK_debugMask, GT_5CLASS, "CLK_Init\n"); ++ ++ dspbridge_device.dev.bus = &platform_bus_type; ++ ++ /* Get the clock handles from base port and store locally */ ++ while (i < SERVICESCLK_NOT_DEFINED) { ++ /* get the handle from BP */ ++ dspbridge_device.id = SERVICES_Clks[i].id; ++ ++ clk_handle = clk_get(&dspbridge_device.dev, ++ SERVICES_Clks[i].clk_name); ++ ++ if (!clk_handle) { ++ GT_2trace(CLK_debugMask, GT_7CLASS, ++ "CLK_Init: failed to get Clk handle %s, " ++ "CLK dev id = %d\n", ++ SERVICES_Clks[i].clk_name, ++ SERVICES_Clks[i].id); ++ /* should we fail here?? */ ++ } else { ++ GT_2trace(CLK_debugMask, GT_7CLASS, ++ "CLK_Init: PASS and Clk handle %s, " ++ "CLK dev id = %d\n", ++ SERVICES_Clks[i].clk_name, ++ SERVICES_Clks[i].id); ++ } ++ SERVICES_Clks[i].clk_handle = clk_handle; ++ i++; ++ } ++ ++ return true; ++} ++ ++/* ++ * ======== CLK_Enable ======== ++ * Purpose: ++ * Enable Clock . ++ * ++*/ ++DSP_STATUS CLK_Enable(IN enum SERVICES_ClkId clk_id) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct clk *pClk; ++ ++ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); ++ GT_2trace(CLK_debugMask, GT_6CLASS, "CLK_Enable: CLK %s, " ++ "CLK dev id = %d\n", SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ ++ pClk = SERVICES_Clks[clk_id].clk_handle; ++ if (pClk) { ++ if (clk_enable(pClk) == 0x0) { ++ /* Success ? */ ++ } else { ++ pr_err("CLK_Enable: failed to Enable CLK %s, " ++ "CLK dev id = %d\n", ++ SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ status = DSP_EFAIL; ++ } ++ } else { ++ pr_err("CLK_Enable: failed to get CLK %s, CLK dev id = %d\n", ++ SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ status = DSP_EFAIL; ++ } ++ /* The SSI module need to configured not to have the Forced idle for ++ * master interface. If it is set to forced idle, the SSI module is ++ * transitioning to standby thereby causing the client in the DSP hang ++ * waiting for the SSI module to be active after enabling the clocks ++ */ ++ if (clk_id == SERVICESCLK_ssi_fck) ++ SSI_Clk_Prepare(true); ++ ++ return status; ++} ++/* ++ * ======== CLK_Set_32KHz ======== ++ * Purpose: ++ * To Set parent of a clock to 32KHz. ++ */ ++ ++DSP_STATUS CLK_Set_32KHz(IN enum SERVICES_ClkId clk_id) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct clk *pClk; ++ struct clk *pClkParent; ++ enum SERVICES_ClkId sys_32k_id = SERVICESCLK_sys_32k_ck; ++ pClkParent = SERVICES_Clks[sys_32k_id].clk_handle; ++ ++ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); ++ GT_2trace(CLK_debugMask, GT_6CLASS, "CLK_Set_32KHz: CLK %s, " ++ "CLK dev id = %d is setting to 32KHz \n", ++ SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ pClk = SERVICES_Clks[clk_id].clk_handle; ++ if (pClk) { ++ if (!(clk_set_parent(pClk, pClkParent) == 0x0)) { ++ GT_2trace(CLK_debugMask, GT_7CLASS, "CLK_Set_32KHz: " ++ "Failed to set to 32KHz %s, CLK dev id = %d\n", ++ SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ status = DSP_EFAIL; ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== CLK_Disable ======== ++ * Purpose: ++ * Disable the clock. ++ * ++*/ ++DSP_STATUS CLK_Disable(IN enum SERVICES_ClkId clk_id) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct clk *pClk; ++ s32 clkUseCnt; ++ ++ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); ++ GT_2trace(CLK_debugMask, GT_6CLASS, "CLK_Disable: CLK %s, " ++ "CLK dev id = %d\n", SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ ++ pClk = SERVICES_Clks[clk_id].clk_handle; ++ ++ clkUseCnt = CLK_Get_UseCnt(clk_id); ++ if (clkUseCnt == -1) { ++ pr_err("CLK_Disable: failed to get CLK Use count for CLK %s," ++ "CLK dev id = %d\n", ++ SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ } else if (clkUseCnt == 0) { ++ GT_2trace(CLK_debugMask, GT_4CLASS, "CLK_Disable: CLK %s," ++ "CLK dev id= %d is already disabled\n", ++ SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ return status; ++ } ++ if (clk_id == SERVICESCLK_ssi_ick) ++ SSI_Clk_Prepare(false); ++ ++ if (pClk) { ++ clk_disable(pClk); ++ } else { ++ pr_err("CLK_Disable: failed to get CLK %s," ++ "CLK dev id = %d\n", ++ SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ status = DSP_EFAIL; ++ } ++ return status; ++} ++ ++/* ++ * ======== CLK_GetRate ======== ++ * Purpose: ++ * GetClock Speed. ++ * ++ */ ++ ++DSP_STATUS CLK_GetRate(IN enum SERVICES_ClkId clk_id, u32 *speedKhz) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct clk *pClk; ++ u32 clkSpeedHz; ++ ++ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); ++ *speedKhz = 0x0; ++ ++ GT_2trace(CLK_debugMask, GT_7CLASS, "CLK_GetRate: CLK %s, " ++ "CLK dev Id = %d \n", SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ pClk = SERVICES_Clks[clk_id].clk_handle; ++ if (pClk) { ++ clkSpeedHz = clk_get_rate(pClk); ++ *speedKhz = clkSpeedHz / 1000; ++ GT_2trace(CLK_debugMask, GT_6CLASS, ++ "CLK_GetRate: clkSpeedHz = %d , " ++ "speedinKhz=%d\n", clkSpeedHz, *speedKhz); ++ } else { ++ GT_2trace(CLK_debugMask, GT_7CLASS, ++ "CLK_GetRate: failed to get CLK %s, " ++ "CLK dev Id = %d\n", SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ status = DSP_EFAIL; ++ } ++ return status; ++} ++ ++s32 CLK_Get_UseCnt(IN enum SERVICES_ClkId clk_id) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct clk *pClk; ++ s32 useCount = -1; ++ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); ++ ++ pClk = SERVICES_Clks[clk_id].clk_handle; ++ ++ if (pClk) { ++ useCount = pClk->usecount; /* FIXME: usecount shouldn't be used */ ++ } else { ++ GT_2trace(CLK_debugMask, GT_7CLASS, ++ "CLK_GetRate: failed to get CLK %s, " ++ "CLK dev Id = %d\n", SERVICES_Clks[clk_id].clk_name, ++ SERVICES_Clks[clk_id].id); ++ status = DSP_EFAIL; ++ } ++ return useCount; ++} ++ ++void SSI_Clk_Prepare(bool FLAG) ++{ ++ u32 ssi_sysconfig; ++ ssi_sysconfig = __raw_readl((SSI_BASE) + 0x10); ++ ++ if (FLAG) { ++ /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to ++ * no idle ++ */ ++ ssi_sysconfig = 0x1011; ++ } else { ++ /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to ++ * forced idle ++ */ ++ ssi_sysconfig = 0x1; ++ } ++ __raw_writel((u32)ssi_sysconfig, SSI_BASE + 0x10); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/csl.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/csl.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/csl.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/csl.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,173 @@ ++/* ++ * csl.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== cslce.c ======== ++ * Purpose: ++ * Provides platform independent C Standard library functions. ++ * ++ * Public Functions: ++ * CSL_Atoi ++ * CSL_Exit ++ * CSL_Init ++ * CSL_NumToAscii ++ * CSL_Strtokr ++ * ++ *! Revision History: ++ *! ================ ++ *! 07-Aug-2002 jeh: Added CSL_Strtokr(). ++ *! 21-Sep-2001 jeh: Added CSL_Strncmp(). Alphabetized functions. ++ *! 22-Nov-2000 map: Added CSL_Atoi and CSL_Strtok ++ *! 19-Nov-2000 kc: Added CSL_ByteSwap. ++ *! 09-Nov-2000 kc: Added CSL_Strncat. ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 15-Dec-1999 ag: Removed incorrect assertion CSL_NumToAscii() ++ *! 29-Oct-1999 kc: Added CSL_Wstrlen for UNICODE strings. ++ *! 30-Sep-1999 ag: Removed DBC assertion (!CSL_DebugMask.flags) in ++ * CSP_Init(). ++ *! 20-Sep-1999 ag: Added CSL_WcharToAnsi(). ++ *! Removed call to GT_set(). ++ *! 19-Jan-1998 cr: Code review cleanup. ++ *! 29-Dec-1997 cr: Made platform independant, using MS CRT code, and ++ *! combined csl32.c csl95.c and cslnt.c into csl.c. Also ++ *! changed CSL_lowercase to CSL_Uppercase. ++ *! 21-Aug-1997 gp: Fix to CSL_strcpyn to initialize Source string, the NT way. ++ *! 25-Jun-1997 cr: Created from csl95, added CSL_strcmp. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* Is character c in the string pstrDelim? */ ++#define IsDelimiter(c, pstrDelim) ((c != '\0') && \ ++ (strchr(pstrDelim, c) != NULL)) ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask CSL_DebugMask = { NULL, NULL }; /* GT trace var. */ ++#endif ++ ++/* ++ * ======== CSL_Exit ======== ++ * Purpose: ++ * Discontinue usage of the CSL module. ++ */ ++void CSL_Exit(void) ++{ ++ GT_0trace(CSL_DebugMask, GT_5CLASS, "CSL_Exit\n"); ++} ++ ++/* ++ * ======== CSL_Init ======== ++ * Purpose: ++ * Initialize the CSL module's private state. ++ */ ++bool CSL_Init(void) ++{ ++ GT_create(&CSL_DebugMask, "CS"); ++ ++ GT_0trace(CSL_DebugMask, GT_5CLASS, "CSL_Init\n"); ++ ++ return true; ++} ++ ++/* ++ * ======== CSL_NumToAscii ======== ++ * Purpose: ++ * Convert a 1 or 2 digit number to a 2 digit string. ++ */ ++void CSL_NumToAscii(OUT char *pstrNumber, u32 dwNum) ++{ ++ char tens; ++ ++ DBC_Require(dwNum < 100); ++ ++ if (dwNum < 100) { ++ tens = (char) dwNum / 10; ++ dwNum = dwNum % 10; ++ ++ if (tens) { ++ pstrNumber[0] = tens + '0'; ++ pstrNumber[1] = (char) dwNum + '0'; ++ pstrNumber[2] = '\0'; ++ } else { ++ pstrNumber[0] = (char) dwNum + '0'; ++ pstrNumber[1] = '\0'; ++ } ++ } else { ++ pstrNumber[0] = '\0'; ++ } ++} ++ ++ ++ ++ ++/* ++ * ======= CSL_Strtokr ======= ++ * Purpose: ++ * Re-entrant version of strtok. ++ */ ++char *CSL_Strtokr(IN char *pstrSrc, IN CONST char *szSeparators, ++ OUT char **ppstrLast) ++{ ++ char *pstrTemp; ++ char *pstrToken; ++ ++ DBC_Require(szSeparators != NULL); ++ DBC_Require(ppstrLast != NULL); ++ DBC_Require(pstrSrc != NULL || *ppstrLast != NULL); ++ ++ /* ++ * Set string location to beginning (pstrSrc != NULL) or to the ++ * beginning of the next token. ++ */ ++ pstrTemp = (pstrSrc != NULL) ? pstrSrc : *ppstrLast; ++ if (*pstrTemp == '\0') { ++ pstrToken = NULL; ++ } else { ++ pstrToken = pstrTemp; ++ while (*pstrTemp != '\0' && !IsDelimiter(*pstrTemp, ++ szSeparators)) { ++ pstrTemp++; ++ } ++ if (*pstrTemp != '\0') { ++ while (IsDelimiter(*pstrTemp, szSeparators)) { ++ /* TODO: Shouldn't we do this for ++ * only 1 char?? */ ++ *pstrTemp = '\0'; ++ pstrTemp++; ++ } ++ } ++ ++ /* Location in string for next call */ ++ *ppstrLast = pstrTemp; ++ } ++ ++ return pstrToken; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dbg.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/dbg.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dbg.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/dbg.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,119 @@ ++/* ++ * dbg.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dbgce.c ======== ++ * Purpose: ++ * Provide debugging services for DSP/BIOS Bridge Mini Drivers. ++ * ++ * Public Functions: ++ * DBG_Exit ++ * DBG_Init ++ * DBG_Trace ++ * ++ * Notes: ++ * Requires gt.h. ++ * ++ * This implementation does not create GT masks on a per WMD basis. ++ * There is currently no facility for a WMD to alter the GT mask. ++ * ++ *! Revision History: ++ *! ================ ++ *! 15-Feb-2000 rr: DBG_Trace prints based on the DebugZones. ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 29-Oct-1999 kc: Cleaned up for code review. ++ *! 10-Oct-1997 cr: Added DBG_Printf service. ++ *! 28-May-1997 cr: Added reference counting. ++ *! 23-May-1997 cr: Updated DBG_Trace to new gt interface. ++ *! 29-May-1996 gp: Removed WCD_ prefix. ++ *! 20-May-1996 gp: Remove DEBUG conditional compilation. ++ *! 15-May-1996 gp: Created. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask DBG_debugMask = { NULL, NULL }; /* GT trace var. */ ++#endif ++ ++#if (defined(DEBUG) || defined (DDSP_DEBUG_PRODUCT)) && GT_TRACE ++ ++/* ++ * ======== DBG_Init ======== ++ * Purpose: ++ * Ensures trace capability is set up for link drivers. ++ */ ++bool DBG_Init(void) ++{ ++ GT_create(&DBG_debugMask, "WD"); /* for WmD (link driver) debug */ ++ ++ GT_0trace(DBG_debugMask, GT_5CLASS, "DBG_Init\n"); ++ ++ return true; ++} ++ ++/* ++ * ======== DBG_Trace ======== ++ * Purpose: ++ * Output a trace message to the debugger, if the given trace level ++ * is unmasked. ++ */ ++DSP_STATUS DBG_Trace(u8 bLevel, char *pstrFormat, ...) ++{ ++ s32 arg1, arg2, arg3, arg4, arg5, arg6; ++ va_list va; ++ ++ va_start(va, pstrFormat); ++ ++ arg1 = va_arg(va, s32); ++ arg2 = va_arg(va, s32); ++ arg3 = va_arg(va, s32); ++ arg4 = va_arg(va, s32); ++ arg5 = va_arg(va, s32); ++ arg6 = va_arg(va, s32); ++ ++ va_end(va); ++ ++ if (bLevel & *(DBG_debugMask).flags) ++ printk(pstrFormat, arg1, arg2, arg3, arg4, arg5, arg6); ++ ++ return DSP_SOK; ++} ++ ++/* ++ * ======== DBG_Exit ======== ++ * Purpose: ++ * Discontinue usage of the DBG module. ++ */ ++void DBG_Exit(void) ++{ ++ GT_0trace(DBG_debugMask, GT_5CLASS, "DBG_Exit\n"); ++} ++ ++#endif /* (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dpc.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/dpc.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dpc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/dpc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,274 @@ ++/* ++ * dpc.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== dpcce.c ======== ++ * Purpose: ++ * Deferred Procedure Call(DPC) Services. ++ * ++ * ++ * Public Functions: ++ * DPC_Create ++ * DPC_Destroy ++ * DPC_Exit ++ * DPC_Init ++ * DPC_Schedule ++ * ++ *! Revision History: ++ *! ================ ++ *! 28-Mar-2001 ag: Added #ifdef CHNL_NOIPCINTR to set DPC thread priority ++ *! to THREAD_PRIORITY_IDLE for polling IPC. ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 31-Jan-2000 rr: Changes after code review.Terminate thread,handle ++ *! modified.DPC_Destroy frees the DPC_Object only on ++ *! Successful termination of the thread and the handle. ++ *! 06-Jan-1999 ag: Format cleanup for code review. ++ *! Removed DPC_[Lower|Raise]IRQL[From|To]DispatchLevel. ++ *! 10-Dec-1999 ag: Added SetProcPermissions in DPC_DeferredProcedure(). ++ *! (Needed to access client(s) CHNL buffers). ++ *! 19-Sep-1999 a0216266: Stubbed from dpcnt.c. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define SIGNATURE 0x5f435044 /* "DPC_" (in reverse). */ ++ ++/* The DPC object, passed to our priority event callback routine: */ ++struct DPC_OBJECT { ++ u32 dwSignature; /* Used for object validation. */ ++ void *pRefData; /* Argument for client's DPC. */ ++ DPC_PROC pfnDPC; /* Client's DPC. */ ++ u32 numRequested; /* Number of requested DPC's. */ ++ u32 numScheduled; /* Number of executed DPC's. */ ++ struct tasklet_struct dpc_tasklet; ++ ++#ifdef DEBUG ++ u32 cEntryCount; /* Number of times DPC reentered. */ ++ u32 numRequestedMax; /* Keep track of max pending DPC's. */ ++#endif ++ ++ spinlock_t dpc_lock; ++}; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask DPC_DebugMask = { NULL, NULL }; /* DPC Debug Mask */ ++#endif ++ ++/* ----------------------------------- Function Prototypes */ ++static void DPC_DeferredProcedure(IN unsigned long pDeferredContext); ++ ++/* ++ * ======== DPC_Create ======== ++ * Purpose: ++ * Create a DPC object, allowing a client's own DPC procedure to be ++ * scheduled for a call with client reference data. ++ */ ++DSP_STATUS DPC_Create(OUT struct DPC_OBJECT **phDPC, DPC_PROC pfnDPC, ++ void *pRefData) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DPC_OBJECT *pDPCObject = NULL; ++ ++ if ((phDPC != NULL) && (pfnDPC != NULL)) { ++ /* ++ * Allocate a DPC object to store information allowing our DPC ++ * callback to dispatch to the client's DPC. ++ */ ++ MEM_AllocObject(pDPCObject, struct DPC_OBJECT, SIGNATURE); ++ if (pDPCObject != NULL) { ++ tasklet_init(&pDPCObject->dpc_tasklet, ++ DPC_DeferredProcedure, ++ (u32) pDPCObject); ++ /* Fill out our DPC Object: */ ++ pDPCObject->pRefData = pRefData; ++ pDPCObject->pfnDPC = pfnDPC; ++ pDPCObject->numRequested = 0; ++ pDPCObject->numScheduled = 0; ++#ifdef DEBUG ++ pDPCObject->numRequestedMax = 0; ++ pDPCObject->cEntryCount = 0; ++#endif ++ spin_lock_init(&pDPCObject->dpc_lock); ++ *phDPC = pDPCObject; ++ } else { ++ GT_0trace(DPC_DebugMask, GT_6CLASS, ++ "DPC_Create: DSP_EMEMORY\n"); ++ status = DSP_EMEMORY; ++ } ++ } else { ++ GT_0trace(DPC_DebugMask, GT_6CLASS, ++ "DPC_Create: DSP_EPOINTER\n"); ++ status = DSP_EPOINTER; ++ } ++ DBC_Ensure((DSP_FAILED(status) && (!phDPC || (phDPC && *phDPC == NULL))) ++ || DSP_SUCCEEDED(status)); ++ return status; ++} ++ ++/* ++ * ======== DPC_Destroy ======== ++ * Purpose: ++ * Cancel the last scheduled DPC, and deallocate a DPC object previously ++ * allocated with DPC_Create(). Frees the Object only if the thread ++ * and the event terminated successfuly. ++ */ ++DSP_STATUS DPC_Destroy(struct DPC_OBJECT *hDPC) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DPC_OBJECT *pDPCObject = (struct DPC_OBJECT *)hDPC; ++ ++ if (MEM_IsValidHandle(hDPC, SIGNATURE)) { ++ ++ /* Free our DPC object: */ ++ if (DSP_SUCCEEDED(status)) { ++ tasklet_kill(&pDPCObject->dpc_tasklet); ++ MEM_FreeObject(pDPCObject); ++ pDPCObject = NULL; ++ GT_0trace(DPC_DebugMask, GT_2CLASS, ++ "DPC_Destroy: SUCCESS\n"); ++ } ++ } else { ++ GT_0trace(DPC_DebugMask, GT_6CLASS, ++ "DPC_Destroy: DSP_EHANDLE\n"); ++ status = DSP_EHANDLE; ++ } ++ DBC_Ensure((DSP_SUCCEEDED(status) && pDPCObject == NULL) ++ || DSP_FAILED(status)); ++ return status; ++} ++ ++/* ++ * ======== DPC_Exit ======== ++ * Purpose: ++ * Discontinue usage of the DPC module. ++ */ ++void DPC_Exit(void) ++{ ++ GT_0trace(DPC_DebugMask, GT_5CLASS, "Entered DPC_Exit\n"); ++} ++ ++/* ++ * ======== DPC_Init ======== ++ * Purpose: ++ * Initialize the DPC module's private state. ++ */ ++bool DPC_Init(void) ++{ ++ GT_create(&DPC_DebugMask, "DP"); ++ ++ GT_0trace(DPC_DebugMask, GT_5CLASS, "Entered DPC_Init\n"); ++ ++ return true; ++} ++ ++/* ++ * ======== DPC_Schedule ======== ++ * Purpose: ++ * Schedule a deferred procedure call to be executed at a later time. ++ * Latency and order of DPC execution is platform specific. ++ */ ++DSP_STATUS DPC_Schedule(struct DPC_OBJECT *hDPC) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DPC_OBJECT *pDPCObject = (struct DPC_OBJECT *)hDPC; ++ unsigned long flags; ++ ++ GT_1trace(DPC_DebugMask, GT_ENTER, "DPC_Schedule hDPC %x\n", hDPC); ++ if (MEM_IsValidHandle(hDPC, SIGNATURE)) { ++ /* Increment count of DPC's pending. Needs to be protected ++ * from ISRs since this function is called from process ++ * context also. */ ++ spin_lock_irqsave(&hDPC->dpc_lock, flags); ++ pDPCObject->numRequested++; ++ spin_unlock_irqrestore(&hDPC->dpc_lock, flags); ++ tasklet_schedule(&(hDPC->dpc_tasklet)); ++#ifdef DEBUG ++ if (pDPCObject->numRequested > pDPCObject->numScheduled + ++ pDPCObject->numRequestedMax) { ++ pDPCObject->numRequestedMax = pDPCObject->numRequested - ++ pDPCObject->numScheduled; ++ } ++#endif ++ /* If an interrupt occurs between incrementing numRequested and the ++ * assertion below, then DPC will get executed while returning from ++ * ISR, which will complete all requests and make numRequested equal ++ * to numScheduled, firing this assertion. This happens only when ++ * DPC is being scheduled in process context */ ++ } else { ++ GT_0trace(DPC_DebugMask, GT_6CLASS, ++ "DPC_Schedule: DSP_EHANDLE\n"); ++ status = DSP_EHANDLE; ++ } ++ GT_1trace(DPC_DebugMask, GT_ENTER, "DPC_Schedule status %x\n", status); ++ return status; ++} ++ ++/* ++ * ======== DeferredProcedure ======== ++ * Purpose: ++ * Main DPC routine. This is called by host OS DPC callback ++ * mechanism with interrupts enabled. ++ */ ++static void DPC_DeferredProcedure(IN unsigned long pDeferredContext) ++{ ++ struct DPC_OBJECT *pDPCObject = (struct DPC_OBJECT *)pDeferredContext; ++ /* read numRequested in local variable */ ++ u32 requested; ++ u32 serviced; ++ ++ DBC_Require(pDPCObject != NULL); ++ requested = pDPCObject->numRequested; ++ serviced = pDPCObject->numScheduled; ++ ++ GT_1trace(DPC_DebugMask, GT_ENTER, "> DPC_DeferredProcedure " ++ "pDeferredContext=%x\n", pDeferredContext); ++ /* Rollover taken care of using != instead of < */ ++ if (serviced != requested) { ++ if (pDPCObject->pfnDPC != NULL) { ++ /* Process pending DPC's: */ ++ do { ++ /* Call client's DPC: */ ++ (*(pDPCObject->pfnDPC))(pDPCObject->pRefData); ++ serviced++; ++ } while (serviced != requested); ++ } ++ pDPCObject->numScheduled = requested; ++ } ++ GT_2trace(DPC_DebugMask, GT_ENTER, ++ "< DPC_DeferredProcedure requested %d" ++ " serviced %d\n", requested, serviced); ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/kfile.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/kfile.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/kfile.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/kfile.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,335 @@ ++/* ++ * kfile.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== kfilece.c ======== ++ * Purpose: ++ * This module provides file i/o services. ++ * ++ * Public Functions: ++ * KFILE_Close ++ * KFILE_Exit ++ * KFILE_Init ++ * KFILE_Open ++ * KFILE_Read ++ * KFILE_Seek ++ * KFILE_Tell ++ * ++ *! Revision History ++ *! ================ ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 22-Nov-1999 kc: Added changes from code review. ++ *! 12-Nov-1999 kc: Enabled CSL for UNICODE/ANSI string conversions. ++ *! 30-Sep-1999 ag: Changed KFILE_Read() GT level from _ENTER to _4CLASS. ++ *! Removed GT_set(). ++ *! 25-Aug-1999 ag: Changed MEM_Calloc allocation type to MEM_PAGED. ++ *! 13-Jul-1999 a0216266(ww - TID): Stubbed from kfilent.c. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define SIGNATURE 0x4c49464b /* hex code of KFIL (reversed) */ ++#define MAXFILENAMELENGTH 256 ++#define GENERAL_FAILURE 0xffffffff /* SetFilePointer error */ ++ ++/* The KFILE_FileObj abstracts the true file handle from a KFILE handle. */ ++struct KFILE_FileObj { ++ u32 dwSignature; ++ __kernel_pid_t owner_pid; /* PID of process that opened this file */ ++ char *fileName ; ++ bool isOpen ; ++ u32 size ; ++ u32 curPos ; ++ long hInternal; /* internal handle of file */ ++ struct file *fileDesc; ++ ++}; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask KFILE_debugMask = { NULL, NULL }; /* Debug mask */ ++#endif ++ ++/* ++ * ======== KFILE_Close ======== ++ * Purpose: ++ * This function closes a file's stream. ++ */ ++s32 KFILE_Close(struct KFILE_FileObj *hFile) ++{ ++ s32 cRetVal = 0; /* 0 indicates success */ ++ s32 fRetVal = 0; ++ ++ GT_1trace(KFILE_debugMask, GT_ENTER, "KFILE_Close: hFile 0x%x\n", ++ hFile); ++ ++ /* Check for valid handle */ ++ if (MEM_IsValidHandle(hFile, SIGNATURE)) { ++ /* Close file only if opened by the same process (id). Otherwise ++ * Linux closes all open file handles when process exits.*/ ++ fRetVal = filp_close(hFile->fileDesc, NULL) ; ++ if (fRetVal) { ++ cRetVal = E_KFILE_ERROR; ++ GT_1trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Close: sys_close " ++ "returned %d\n", fRetVal); ++ } ++ MEM_FreeObject(hFile); ++ } else { ++ cRetVal = E_KFILE_INVALIDHANDLE; ++ GT_0trace(KFILE_debugMask, GT_6CLASS, "KFILE_Close: " ++ "invalid file handle\n"); ++ } ++ return cRetVal; ++} ++ ++/* ++ * ======== KFILE_Exit ======== ++ * Purpose: ++ * Decrement reference count, and free resources when reference count ++ * is 0. ++ */ ++void KFILE_Exit(void) ++{ ++ GT_0trace(KFILE_debugMask, GT_5CLASS, "KFILE_Exit\n"); ++} ++ ++/* ++ * ======== KFILE_Init ======== ++ */ ++bool KFILE_Init(void) ++{ ++ GT_create(&KFILE_debugMask, "KF"); /* "KF" for KFile */ ++ ++ GT_0trace(KFILE_debugMask, GT_5CLASS, "KFILE_Init\n"); ++ ++ return true; ++} ++ ++/* ++ * ======== KFILE_Open ======== ++ * Purpose: ++ * Open a file for reading ONLY ++ */ ++struct KFILE_FileObj *KFILE_Open(CONST char *pszFileName, CONST char *pszMode) ++{ ++ struct KFILE_FileObj *hFile; /* file handle */ ++ DSP_STATUS status; ++ mm_segment_t fs; ++ ++ struct file*fileDesc = NULL; ++ DBC_Require(pszMode != NULL); ++ DBC_Require(pszFileName != NULL); ++ ++ GT_2trace(KFILE_debugMask, GT_ENTER, ++ "KFILE_Open: pszFileName %s, pszMode " ++ "%s\n", pszFileName, pszMode); ++ ++ /* create a KFILE object */ ++ MEM_AllocObject(hFile, struct KFILE_FileObj, SIGNATURE); ++ ++ if (hFile) { ++ fs = get_fs(); ++ set_fs(get_ds()); ++ /* Third argument is mode (permissions). Ignored unless creating file */ ++ fileDesc = filp_open(pszFileName, O_RDONLY, 0); ++ if ((IS_ERR(fileDesc)) || (fileDesc == NULL) || ++ (fileDesc->f_op == NULL) || (fileDesc->f_op->read == NULL) ++ || (fileDesc->f_op->llseek == NULL)) { ++ status = DSP_EFILE; ++ } else { ++ hFile->fileDesc = fileDesc; ++ hFile->fileName = (char *)pszFileName; ++ hFile->isOpen = true; ++ hFile->curPos = 0; ++ hFile->size = fileDesc->f_op->llseek(fileDesc, 0, ++ SEEK_END); ++ fileDesc->f_op->llseek(fileDesc, 0, SEEK_SET); ++ /* Return TGID instead of process handle */ ++ hFile->owner_pid = current->tgid; ++ ++ status = DSP_SOK; ++ } ++ set_fs(fs); ++ if (DSP_FAILED(status)) { ++ /* free memory, and clear handle */ ++ MEM_FreeObject(hFile); ++ hFile = NULL; ++ } ++ } else { ++ GT_0trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Open: MEM_AllocObject failed\n"); ++ status = DSP_EMEMORY; ++ } ++ return hFile; ++} ++ ++/* ++ * ======== KFILE_Read ======== ++ * Purpose: ++ * Reads a specified number of bytes into a buffer. ++ */ ++s32 ++KFILE_Read(void __user*pBuffer, s32 cSize, s32 cCount, ++ struct KFILE_FileObj *hFile) ++{ ++ u32 dwBytesRead = 0; ++ s32 cRetVal = 0; ++ mm_segment_t fs; ++ ++ DBC_Require(pBuffer != NULL); ++ ++ GT_4trace(KFILE_debugMask, GT_4CLASS, ++ "KFILE_Read: buffer 0x%x, cSize 0x%x," ++ "cCount 0x%x, hFile 0x%x\n", pBuffer, cSize, cCount, hFile); ++ ++ /* check for valid file handle */ ++ if (MEM_IsValidHandle(hFile, SIGNATURE)) { ++ if ((cSize > 0) && (cCount > 0) && pBuffer) { ++ /* read from file */ ++ fs = get_fs(); ++ set_fs(get_ds()); ++ dwBytesRead = hFile->fileDesc->f_op->read(hFile-> ++ fileDesc, pBuffer, cSize *cCount, ++ &(hFile->fileDesc->f_pos)); ++ set_fs(fs); ++ if (dwBytesRead) { ++ cRetVal = dwBytesRead / cSize; ++ hFile->curPos += dwBytesRead; ++ DBC_Assert((dwBytesRead / cSize) <= \ ++ (u32)cCount); ++ } else { ++ cRetVal = E_KFILE_ERROR; ++ GT_0trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Read: sys_read() failed\n"); ++ } ++ } else { ++ cRetVal = DSP_EINVALIDARG; ++ GT_0trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Read: Invalid argument(s)\n"); ++ } ++ } else { ++ cRetVal = E_KFILE_INVALIDHANDLE; ++ GT_0trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Read: invalid file handle\n"); ++ } ++ ++ return cRetVal; ++} ++ ++/* ++ * ======== KFILE_Seek ======== ++ * Purpose: ++ * Sets the file position indicator. NOTE: we don't support seeking ++ * beyond the boundaries of a file. ++ */ ++s32 KFILE_Seek(struct KFILE_FileObj *hFile, s32 lOffset, s32 cOrigin) ++{ ++ s32 cRetVal = 0; /* 0 for success */ ++ loff_t dwCurPos = 0; ++ ++ struct file *fileDesc = NULL; ++ ++ GT_3trace(KFILE_debugMask, GT_ENTER, "KFILE_Seek: hFile 0x%x, " ++ "lOffset 0x%x, cOrigin 0x%x\n", ++ hFile, lOffset, cOrigin); ++ ++ /* check for valid file handle */ ++ if (MEM_IsValidHandle(hFile, SIGNATURE)) { ++ /* based on the origin flag, move the internal pointer */ ++ ++ fileDesc = hFile->fileDesc; ++ switch (cOrigin) { ++ case KFILE_SEEK_SET: ++ dwCurPos = hFile->fileDesc->f_op->llseek(hFile-> ++ fileDesc, lOffset, SEEK_SET); ++ cRetVal = ((dwCurPos >= 0) ? 0 : E_KFILE_ERROR); ++ break; ++ ++ case KFILE_SEEK_CUR: ++ dwCurPos = hFile->fileDesc->f_op->llseek(hFile-> ++ fileDesc, lOffset, SEEK_CUR); ++ cRetVal = ((dwCurPos >= 0) ? 0 : E_KFILE_ERROR); ++ break; ++ case KFILE_SEEK_END: ++ dwCurPos = hFile->fileDesc->f_op->llseek(hFile-> ++ fileDesc, lOffset, SEEK_END); ++ cRetVal = ((dwCurPos >= 0) ? 0 : E_KFILE_ERROR); ++ break; ++ default: ++ cRetVal = E_KFILE_BADORIGINFLAG; ++ GT_0trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Seek:bad origin flag\n"); ++ break; ++ } ++ } else { ++ cRetVal = E_KFILE_INVALIDHANDLE; ++ GT_0trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Seek:invalid file handle\n"); ++ } ++ return cRetVal; ++} ++ ++/* ++ * ======== KFILE_Tell ======== ++ * Purpose: ++ * Reports the current value of the position indicator. We did not ++ * consider 64 bit long file size, which implies a 4GB file limit ++ * (2 to 32 power). ++ */ ++s32 KFILE_Tell(struct KFILE_FileObj *hFile) ++{ ++ loff_t dwCurPos = 0; ++ s32 lRetVal = E_KFILE_ERROR; ++ ++ GT_1trace(KFILE_debugMask, GT_ENTER, "KFILE_Tell: hFile 0x%x\n", hFile); ++ ++ if (MEM_IsValidHandle(hFile, SIGNATURE)) { ++ ++ /* Get current position. */ ++ dwCurPos = hFile->fileDesc->f_op->llseek(hFile->fileDesc, 0, ++ SEEK_CUR); ++ if (dwCurPos >= 0) ++ lRetVal = dwCurPos; ++ ++ } else { ++ lRetVal = E_KFILE_INVALIDHANDLE; ++ GT_0trace(KFILE_debugMask, GT_6CLASS, ++ "KFILE_Seek:invalid file handle\n"); ++ } ++ return lRetVal; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/list.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/list.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/list.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/list.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,285 @@ ++/* ++ * list.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== listce.c ======== ++ * Purpose ++ * Provides standard circular list handling functions. ++ * ++ * Public Functions: ++ * LST_Create ++ * LST_Delete ++ * LST_Exit ++ * LST_First ++ * LST_GetHead ++ * LST_Init ++ * LST_InitElem ++ * LST_InsertBefore ++ * LST_Next ++ * LST_PutTail ++ * LST_RemoveElem ++ * ++ *! Revision History ++ *! ================ ++ *! 06-Mar-2002 jeh Don't set element self to NULL in LST_RemoveElem(). ++ *! 10-Aug-2000 ag: Added LST_InsertBefore(). ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 22-Nov-1999 kc: Added changes from code review. ++ *! 10-Aug-1999 kc: Based on wsx-c18. ++ *! 16-Jun-1997 gp: Removed unnecessary enabling/disabling of interrupts around ++ *! list manipulation code. ++ *! 22-Oct-1996 gp: Added LST_RemoveElem, and LST_First/LST_Next iterators. ++ *! 10-Aug-1996 gp: Acquired from SMM for WinSPOX v. 1.1; renamed identifiers. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask LST_debugMask = { NULL, NULL }; /* GT trace var. */ ++#endif ++ ++/* ++ * ======== LST_Create ======== ++ * Purpose: ++ * Allocates and initializes a circular list. ++ */ ++struct LST_LIST *LST_Create(void) ++{ ++ struct LST_LIST *pList; ++ ++ GT_0trace(LST_debugMask, GT_ENTER, "LST_Create: entered\n"); ++ ++ pList = (struct LST_LIST *) MEM_Calloc(sizeof(struct LST_LIST), ++ MEM_NONPAGED); ++ if (pList != NULL) { ++ pList->head.next = &pList->head; ++ pList->head.prev = &pList->head; ++ pList->head.self = NULL; ++ } ++ ++ return pList; ++} ++ ++/* ++ * ======== LST_Delete ======== ++ * Purpose: ++ * Removes a list by freeing its control structure's memory space. ++ */ ++void LST_Delete(struct LST_LIST *pList) ++{ ++ DBC_Require(pList != NULL); ++ ++ GT_1trace(LST_debugMask, GT_ENTER, "LST_Delete: pList 0x%x\n", pList); ++ ++ MEM_Free(pList); ++} ++ ++/* ++ * ======== LST_Exit ======== ++ * Purpose: ++ * Discontinue usage of the LST module. ++ */ ++void LST_Exit(void) ++{ ++ GT_0trace(LST_debugMask, GT_5CLASS, "LST_Exit\n"); ++} ++ ++/* ++ * ======== LST_First ======== ++ * Purpose: ++ * Returns a pointer to the first element of the list, or NULL if the ++ * list is empty. ++ */ ++struct LST_ELEM *LST_First(struct LST_LIST *pList) ++{ ++ struct LST_ELEM *pElem = NULL; ++ ++ DBC_Require(pList != NULL); ++ ++ GT_1trace(LST_debugMask, GT_ENTER, "LST_First: pList 0x%x\n", pList); ++ ++ if (!LST_IsEmpty(pList)) ++ pElem = pList->head.next; ++ ++ return pElem; ++} ++ ++/* ++ * ======== LST_GetHead ======== ++ * Purpose: ++ * "Pops" the head off the list and returns a pointer to it. ++ */ ++struct LST_ELEM *LST_GetHead(struct LST_LIST *pList) ++{ ++ struct LST_ELEM *pElem; ++ ++ DBC_Require(pList != NULL); ++ ++ GT_1trace(LST_debugMask, GT_ENTER, "LST_GetHead: pList 0x%x\n", pList); ++ ++ if (LST_IsEmpty(pList)) ++ return NULL; ++ ++ /* pElem is always valid because the list cannot be empty ++ * at this point */ ++ pElem = pList->head.next; ++ pList->head.next = pElem->next; ++ pElem->next->prev = &pList->head; ++ ++ return pElem->self; ++} ++ ++/* ++ * ======== LST_Init ======== ++ * Purpose: ++ * Initialize LST module private state. ++ */ ++bool LST_Init(void) ++{ ++ GT_create(&LST_debugMask, "LS"); /* LS for LSt module */ ++ ++ GT_0trace(LST_debugMask, GT_5CLASS, "LST_Init\n"); ++ ++ return true; ++} ++ ++/* ++ * ======== LST_InitElem ======== ++ * Purpose: ++ * Initializes a list element to default (cleared) values ++ */ ++void LST_InitElem(struct LST_ELEM *pElem) ++{ ++ DBC_Require(pElem != NULL); ++ ++ GT_1trace(LST_debugMask, GT_ENTER, "LST_InitElem: pElem 0x%x\n", pElem); ++ ++ if (pElem) { ++ pElem->next = NULL; ++ pElem->prev = NULL; ++ pElem->self = pElem; ++ } ++} ++ ++/* ++ * ======== LST_InsertBefore ======== ++ * Purpose: ++ * Insert the element before the existing element. ++ */ ++void LST_InsertBefore(struct LST_LIST *pList, struct LST_ELEM *pElem, ++ struct LST_ELEM *pElemExisting) ++{ ++ DBC_Require(pList != NULL); ++ DBC_Require(pElem != NULL); ++ DBC_Require(pElemExisting != NULL); ++ ++ GT_3trace(LST_debugMask, GT_ENTER, "LST_InsertBefore: pList 0x%x, " ++ "pElem 0x%x pElemExisting 0x%x\n", pList, pElem, ++ pElemExisting); ++ ++ pElemExisting->prev->next = pElem; ++ pElem->prev = pElemExisting->prev; ++ pElem->next = pElemExisting; ++ pElemExisting->prev = pElem; ++} ++ ++/* ++ * ======== LST_Next ======== ++ * Purpose: ++ * Returns a pointer to the next element of the list, or NULL if the ++ * next element is the head of the list or the list is empty. ++ */ ++struct LST_ELEM *LST_Next(struct LST_LIST *pList, struct LST_ELEM *pCurElem) ++{ ++ struct LST_ELEM *pNextElem = NULL; ++ ++ DBC_Require(pList != NULL); ++ DBC_Require(pCurElem != NULL); ++ ++ GT_2trace(LST_debugMask, GT_ENTER, ++ "LST_Next: pList 0x%x, pCurElem 0x%x\n", ++ pList, pCurElem); ++ ++ if (!LST_IsEmpty(pList)) { ++ if (pCurElem->next != &pList->head) ++ pNextElem = pCurElem->next; ++ } ++ ++ return pNextElem; ++} ++ ++/* ++ * ======== LST_PutTail ======== ++ * Purpose: ++ * Adds the specified element to the tail of the list ++ */ ++void LST_PutTail(struct LST_LIST *pList, struct LST_ELEM *pElem) ++{ ++ DBC_Require(pList != NULL); ++ DBC_Require(pElem != NULL); ++ ++ GT_2trace(LST_debugMask, GT_ENTER, ++ "LST_PutTail: pList 0x%x, pElem 0x%x\n", ++ pList, pElem); ++ ++ pElem->prev = pList->head.prev; ++ pElem->next = &pList->head; ++ pList->head.prev = pElem; ++ pElem->prev->next = pElem; ++ ++ DBC_Ensure(!LST_IsEmpty(pList)); ++} ++ ++/* ++ * ======== LST_RemoveElem ======== ++ * Purpose: ++ * Removes (unlinks) the given element from the list, if the list is not ++ * empty. Does not free the list element. ++ */ ++void LST_RemoveElem(struct LST_LIST *pList, struct LST_ELEM *pCurElem) ++{ ++ DBC_Require(pList != NULL); ++ DBC_Require(pCurElem != NULL); ++ ++ GT_2trace(LST_debugMask, GT_ENTER, ++ "LST_RemoveElem: pList 0x%x, pCurElem " ++ "0x%x\n", pList, pCurElem); ++ ++ if (!LST_IsEmpty(pList)) { ++ pCurElem->prev->next = pCurElem->next; ++ pCurElem->next->prev = pCurElem->prev; ++ ++ /* set elem fields to NULL to prevent illegal references */ ++ pCurElem->next = NULL; ++ pCurElem->prev = NULL; ++ } ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/mem.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/mem.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/mem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/mem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,628 @@ ++/* ++ * mem.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== mem.c ======== ++ * Purpose: ++ * Implementation of platform specific memory services. ++ * ++ * Public Functions: ++ * MEM_Alloc ++ * MEM_AllocPhysMem ++ * MEM_Calloc ++ * MEM_Exit ++ * MEM_FlushCache ++ * MEM_Free ++ * MEM_FreePhysMem ++ * MEM_Init ++ * MEM_ExtPhysPoolInit ++ * ++ *! Revision History: ++ *! ================= ++ *! 18-Jan-2004 hp: Added support for External physical memory pool ++ *! 19-Apr-2004 sb: Added Alloc/Free PhysMem, FlushCache, VirtualToPhysical ++ *! 01-Sep-2001 ag: Code cleanup. ++ *! 02-May-2001 ag: MEM_[UnMap]LinearAddress revamped to align Phys to Virt. ++ *! Set PAGE_PHYSICAL if phy addr <= 512MB. Opposite uSoft doc! ++ *! 29-Aug-2000 rr: MEM_LinearAddress does not check for 512MB for non-x86. ++ *! 28-Mar-2000 rr: MEM_LinearAddress changed.Handles address larger than 512MB ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 22-Nov-1999 kc: Added changes from code review. ++ *! 16-Aug-1999 kc: modified for WinCE. ++ *! 20-Mar-1999 ag: SP 4 fix in MEM_UMBCalloc(). ++ *! Mdl offset now ORed not added to userBuf. ++ *! 23-Dec-1997 cr: Code review changes. ++ *! 08-Dec-1997 cr: Prepared for code review. ++ *! 24-Jun-1997 cr: Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++/* ----------------------------------- Defines */ ++#define MEM_512MB 0x1fffffff ++#define memInfoSign 0x464E494D /* "MINF" (in reverse). */ ++ ++#ifdef DEBUG ++#define MEM_CHECK /* Use to detect source of memory leaks */ ++#endif ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask MEM_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++static u32 cRefs; /* module reference count */ ++ ++static bool extPhysMemPoolEnabled; ++ ++struct extPhysMemPool { ++ u32 physMemBase; ++ u32 physMemSize; ++ u32 virtMemBase; ++ u32 nextPhysAllocPtr; ++}; ++ ++static struct extPhysMemPool extMemPool; ++ ++/* Information about each element allocated on heap */ ++struct memInfo { ++ struct LST_ELEM link; /* Must be first */ ++ size_t size; ++ void *caller; ++ u32 dwSignature; /* Should be last */ ++}; ++ ++#ifdef MEM_CHECK ++ ++/* ++ * This structure holds a linked list to all memory elements allocated on ++ * heap by DSP/BIOS Bridge. This is used to report memory leaks and free ++ * such elements while removing the DSP/BIOS Bridge driver ++ */ ++struct memMan { ++ struct LST_LIST lst; ++ spinlock_t lock; ++}; ++ ++static struct memMan mMan; ++ ++/* ++ * These functions are similar to LST_PutTail and LST_RemoveElem and are ++ * duplicated here to make MEM independent of LST ++ */ ++static inline void MLST_PutTail(struct LST_LIST *pList, struct LST_ELEM *pElem) ++{ ++ pElem->prev = pList->head.prev; ++ pElem->next = &pList->head; ++ pList->head.prev = pElem; ++ pElem->prev->next = pElem; ++ pElem->self = pElem; ++} ++ ++static inline void MLST_RemoveElem(struct LST_LIST *pList, ++ struct LST_ELEM *pCurElem) ++{ ++ pCurElem->prev->next = pCurElem->next; ++ pCurElem->next->prev = pCurElem->prev; ++ pCurElem->next = NULL; ++ pCurElem->prev = NULL; ++} ++ ++static void MEM_Check(void) ++{ ++ struct memInfo *pMem; ++ struct LST_ELEM *last = &mMan.lst.head; ++ struct LST_ELEM *curr = mMan.lst.head.next; ++ ++ if (!LST_IsEmpty(&mMan.lst)) { ++ GT_0trace(MEM_debugMask, GT_7CLASS, "*** MEMORY LEAK ***\n"); ++ GT_0trace(MEM_debugMask, GT_7CLASS, ++ "Addr Size Caller\n"); ++ while (curr != last) { ++ pMem = (struct memInfo *)curr; ++ curr = curr->next; ++ if ((u32)pMem > PAGE_OFFSET && ++ MEM_IsValidHandle(pMem, memInfoSign)) { ++ GT_3trace(MEM_debugMask, GT_7CLASS, ++ "%lx %d\t [<%p>]\n", ++ (u32) pMem + sizeof(struct memInfo), ++ pMem->size, pMem->caller); ++ MLST_RemoveElem(&mMan.lst, ++ (struct LST_ELEM *) pMem); ++ kfree(pMem); ++ } else { ++ GT_1trace(MEM_debugMask, GT_7CLASS, ++ "Invalid allocation or " ++ "Buffer underflow at %x\n", ++ (u32)pMem + sizeof(struct memInfo)); ++ break; ++ } ++ } ++ } ++ DBC_Ensure(LST_IsEmpty(&mMan.lst)); ++} ++ ++#endif ++ ++void MEM_ExtPhysPoolInit(u32 poolPhysBase, u32 poolSize) ++{ ++ u32 poolVirtBase; ++ ++ /* get the virtual address for the physical memory pool passed */ ++ poolVirtBase = (u32)ioremap(poolPhysBase, poolSize); ++ ++ if ((void **)poolVirtBase == NULL) { ++ GT_0trace(MEM_debugMask, GT_7CLASS, ++ "[PHYS_POOL]Mapping External " ++ "physical memory to virt failed \n"); ++ extPhysMemPoolEnabled = false; ++ } else { ++ extMemPool.physMemBase = poolPhysBase; ++ extMemPool.physMemSize = poolSize; ++ extMemPool.virtMemBase = poolVirtBase; ++ extMemPool.nextPhysAllocPtr = poolPhysBase; ++ extPhysMemPoolEnabled = true; ++ GT_3trace(MEM_debugMask, GT_1CLASS, ++ "ExtMemory Pool details " "Pool" ++ "Physical mem base = %0x " "Pool Physical mem size " ++ "= %0x" "Pool Virtual mem base = %0x \n", ++ poolPhysBase, poolSize, poolVirtBase); ++ } ++} ++ ++static void MEM_ExtPhysPoolRelease(void) ++{ ++ GT_0trace(MEM_debugMask, GT_1CLASS, ++ "Releasing External memory pool \n"); ++ if (extPhysMemPoolEnabled) { ++ iounmap((void *)(extMemPool.virtMemBase)); ++ extPhysMemPoolEnabled = false; ++ } ++} ++ ++/* ++ * ======== MEM_ExtPhysMemAlloc ======== ++ * Purpose: ++ * Allocate physically contiguous, uncached memory from external memory pool ++ */ ++ ++static void *MEM_ExtPhysMemAlloc(u32 bytes, u32 align, OUT u32 *pPhysAddr) ++{ ++ u32 newAllocPtr; ++ u32 offset; ++ u32 virtAddr; ++ ++ GT_2trace(MEM_debugMask, GT_1CLASS, ++ "Ext Memory Allocation" "bytes=0x%x , " ++ "align=0x%x \n", bytes, align); ++ if (align == 0) { ++ GT_0trace(MEM_debugMask, GT_7CLASS, ++ "ExtPhysical Memory Allocation " ++ "No alignment request in allocation call !! \n"); ++ align = 1; ++ } ++ if (bytes > ((extMemPool.physMemBase + extMemPool.physMemSize) ++ - extMemPool.nextPhysAllocPtr)) { ++ GT_1trace(MEM_debugMask, GT_7CLASS, ++ "ExtPhysical Memory Allocation " ++ "unable to allocate memory for bytes = 0x%x \n", ++ bytes); ++ pPhysAddr = NULL; ++ return NULL; ++ } else { ++ offset = (extMemPool.nextPhysAllocPtr & (align - 1)); ++ if (offset == 0) ++ newAllocPtr = extMemPool.nextPhysAllocPtr; ++ else ++ newAllocPtr = (extMemPool.nextPhysAllocPtr) + ++ (align - offset); ++ if ((newAllocPtr + bytes) <= ++ (extMemPool.physMemBase + extMemPool.physMemSize)) { ++ /* we can allocate */ ++ *pPhysAddr = newAllocPtr; ++ extMemPool.nextPhysAllocPtr = newAllocPtr + bytes; ++ virtAddr = extMemPool.virtMemBase + (newAllocPtr - ++ extMemPool.physMemBase); ++ GT_2trace(MEM_debugMask, GT_1CLASS, ++ "Ext Memory Allocation succedded " ++ "phys address=0x%x , virtaddress=0x%x \n", ++ newAllocPtr, virtAddr); ++ return (void *)virtAddr; ++ } else { ++ *pPhysAddr = 0; ++ return NULL; ++ } ++ } ++} ++ ++/* ++ * ======== MEM_Alloc ======== ++ * Purpose: ++ * Allocate memory from the paged or non-paged pools. ++ */ ++void *MEM_Alloc(u32 cBytes, enum MEM_POOLATTRS type) ++{ ++ struct memInfo *pMem = NULL; ++ ++ GT_2trace(MEM_debugMask, GT_ENTER, ++ "MEM_Alloc: cBytes 0x%x\ttype 0x%x\n", cBytes, type); ++ if (cBytes > 0) { ++ switch (type) { ++ case MEM_NONPAGED: ++ /* If non-paged memory required, see note at top of file. */ ++ case MEM_PAGED: ++#ifndef MEM_CHECK ++ pMem = kmalloc(cBytes, ++ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); ++#else ++ pMem = kmalloc(cBytes + sizeof(struct memInfo), ++ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); ++ if (pMem) { ++ pMem->size = cBytes; ++ pMem->caller = __builtin_return_address(0); ++ pMem->dwSignature = memInfoSign; ++ ++ spin_lock(&mMan.lock); ++ MLST_PutTail(&mMan.lst, ++ (struct LST_ELEM *)pMem); ++ spin_unlock(&mMan.lock); ++ ++ pMem = (void *)((u32)pMem + ++ sizeof(struct memInfo)); ++ } ++#endif ++ break; ++ case MEM_LARGEVIRTMEM: ++#ifndef MEM_CHECK ++ pMem = vmalloc(cBytes); ++#else ++ pMem = vmalloc(cBytes + sizeof(struct memInfo)); ++ if (pMem) { ++ pMem->size = cBytes; ++ pMem->caller = __builtin_return_address(0); ++ pMem->dwSignature = memInfoSign; ++ ++ spin_lock(&mMan.lock); ++ MLST_PutTail(&mMan.lst, ++ (struct LST_ELEM *) pMem); ++ spin_unlock(&mMan.lock); ++ ++ pMem = (void *)((u32)pMem + ++ sizeof(struct memInfo)); ++ } ++#endif ++ break; ++ ++ default: ++ GT_0trace(MEM_debugMask, GT_6CLASS, ++ "MEM_Alloc: unexpected " ++ "MEM_POOLATTRS value\n"); ++ break; ++ } ++ } ++ ++ return pMem; ++} ++ ++/* ++ * ======== MEM_AllocPhysMem ======== ++ * Purpose: ++ * Allocate physically contiguous, uncached memory ++ */ ++void *MEM_AllocPhysMem(u32 cBytes, u32 ulAlign, OUT u32 *pPhysicalAddress) ++{ ++ void *pVaMem = NULL; ++ dma_addr_t paMem; ++ ++ DBC_Require(cRefs > 0); ++ ++ GT_2trace(MEM_debugMask, GT_ENTER, ++ "MEM_AllocPhysMem: cBytes 0x%x\tulAlign" ++ "0x%x\n", cBytes, ulAlign); ++ ++ if (cBytes > 0) { ++ if (extPhysMemPoolEnabled) { ++ pVaMem = MEM_ExtPhysMemAlloc(cBytes, ulAlign, ++ (u32 *)&paMem); ++ } else ++ pVaMem = dma_alloc_coherent(NULL, cBytes, &paMem, ++ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); ++ if (pVaMem == NULL) { ++ *pPhysicalAddress = 0; ++ GT_1trace(MEM_debugMask, GT_6CLASS, ++ "MEM_AllocPhysMem failed: " ++ "0x%x\n", pVaMem); ++ } else { ++ *pPhysicalAddress = paMem; ++ } ++ } ++ return pVaMem; ++} ++ ++/* ++ * ======== MEM_Calloc ======== ++ * Purpose: ++ * Allocate zero-initialized memory from the paged or non-paged pools. ++ */ ++void *MEM_Calloc(u32 cBytes, enum MEM_POOLATTRS type) ++{ ++ struct memInfo *pMem = NULL; ++ ++ GT_2trace(MEM_debugMask, GT_ENTER, ++ "MEM_Calloc: cBytes 0x%x\ttype 0x%x\n", ++ cBytes, type); ++ ++ if (cBytes > 0) { ++ switch (type) { ++ case MEM_NONPAGED: ++ /* If non-paged memory required, see note at top of file. */ ++ case MEM_PAGED: ++#ifndef MEM_CHECK ++ pMem = kmalloc(cBytes, ++ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); ++ if (pMem) ++ memset(pMem, 0, cBytes); ++ ++#else ++ pMem = kmalloc(cBytes + sizeof(struct memInfo), ++ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); ++ if (pMem) { ++ memset((void *)((u32)pMem + ++ sizeof(struct memInfo)), 0, cBytes); ++ pMem->size = cBytes; ++ pMem->caller = __builtin_return_address(0); ++ pMem->dwSignature = memInfoSign; ++ spin_lock(&mMan.lock); ++ MLST_PutTail(&mMan.lst, ++ (struct LST_ELEM *) pMem); ++ spin_unlock(&mMan.lock); ++ pMem = (void *)((u32)pMem + ++ sizeof(struct memInfo)); ++ } ++#endif ++ break; ++ case MEM_LARGEVIRTMEM: ++#ifndef MEM_CHECK ++ pMem = vmalloc(cBytes); ++ if (pMem) ++ memset(pMem, 0, cBytes); ++#else ++ pMem = vmalloc(cBytes + sizeof(struct memInfo)); ++ if (pMem) { ++ memset((void *)((u32)pMem + ++ sizeof(struct memInfo)), 0, cBytes); ++ pMem->size = cBytes; ++ pMem->caller = __builtin_return_address(0); ++ pMem->dwSignature = memInfoSign; ++ spin_lock(&mMan.lock); ++ MLST_PutTail(&mMan.lst, (struct LST_ELEM *) ++ pMem); ++ spin_unlock(&mMan.lock); ++ pMem = (void *)((u32)pMem + ++ sizeof(struct memInfo)); ++ } ++#endif ++ break; ++ default: ++ GT_1trace(MEM_debugMask, GT_6CLASS, ++ "MEM_Calloc: unexpected " ++ "MEM_POOLATTRS value 0x%x\n", type); ++ break; ++ } ++ } ++ ++ return pMem; ++} ++ ++/* ++ * ======== MEM_Exit ======== ++ * Purpose: ++ * Discontinue usage of the MEM module. ++ */ ++void MEM_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(MEM_debugMask, GT_5CLASS, "MEM_Exit: cRefs 0x%x\n", cRefs); ++ ++ cRefs--; ++#ifdef MEM_CHECK ++ if (cRefs == 0) ++ MEM_Check(); ++ ++#endif ++ MEM_ExtPhysPoolRelease(); ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== MEM_FlushCache ======== ++ * Purpose: ++ * Flush cache ++ */ ++void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType) ++{ ++ DBC_Require(cRefs > 0); ++ ++ switch (FlushType) { ++ /* invalidate only */ ++ case PROC_INVALIDATE_MEM: ++ dmac_inv_range(pMemBuf, pMemBuf + cBytes); ++ outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf + ++ cBytes)); ++ break; ++ /* writeback only */ ++ case PROC_WRITEBACK_MEM: ++ dmac_clean_range(pMemBuf, pMemBuf + cBytes); ++ outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf + ++ cBytes)); ++ break; ++ /* writeback and invalidate */ ++ case PROC_WRITEBACK_INVALIDATE_MEM: ++ dmac_flush_range(pMemBuf, pMemBuf + cBytes); ++ outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf + ++ cBytes)); ++ break; ++ default: ++ GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid " ++ "FlushMemType 0x%x\n", FlushType); ++ break; ++ } ++ ++} ++ ++/* ++ * ======== MEM_VFree ======== ++ * Purpose: ++ * Free the given block of system memory in virtual space. ++ */ ++void MEM_VFree(IN void *pMemBuf) ++{ ++#ifdef MEM_CHECK ++ struct memInfo *pMem = (void *)((u32)pMemBuf - sizeof(struct memInfo)); ++#endif ++ ++ DBC_Require(pMemBuf != NULL); ++ ++ GT_1trace(MEM_debugMask, GT_ENTER, "MEM_VFree: pMemBufs 0x%x\n", ++ pMemBuf); ++ ++ if (pMemBuf) { ++#ifndef MEM_CHECK ++ vfree(pMemBuf); ++#else ++ if (pMem) { ++ if (pMem->dwSignature == memInfoSign) { ++ spin_lock(&mMan.lock); ++ MLST_RemoveElem(&mMan.lst, ++ (struct LST_ELEM *) pMem); ++ spin_unlock(&mMan.lock); ++ pMem->dwSignature = 0; ++ vfree(pMem); ++ } else { ++ GT_1trace(MEM_debugMask, GT_7CLASS, ++ "Invalid allocation or " ++ "Buffer underflow at %x\n", ++ (u32) pMem + sizeof(struct memInfo)); ++ } ++ } ++#endif ++ } ++} ++ ++/* ++ * ======== MEM_Free ======== ++ * Purpose: ++ * Free the given block of system memory. ++ */ ++void MEM_Free(IN void *pMemBuf) ++{ ++#ifdef MEM_CHECK ++ struct memInfo *pMem = (void *)((u32)pMemBuf - sizeof(struct memInfo)); ++#endif ++ ++ DBC_Require(pMemBuf != NULL); ++ ++ GT_1trace(MEM_debugMask, GT_ENTER, "MEM_Free: pMemBufs 0x%x\n", ++ pMemBuf); ++ ++ if (pMemBuf) { ++#ifndef MEM_CHECK ++ kfree(pMemBuf); ++#else ++ if (pMem) { ++ if (pMem->dwSignature == memInfoSign) { ++ spin_lock(&mMan.lock); ++ MLST_RemoveElem(&mMan.lst, ++ (struct LST_ELEM *) pMem); ++ spin_unlock(&mMan.lock); ++ pMem->dwSignature = 0; ++ kfree(pMem); ++ } else { ++ GT_1trace(MEM_debugMask, GT_7CLASS, ++ "Invalid allocation or " ++ "Buffer underflow at %x\n", ++ (u32) pMem + sizeof(struct memInfo)); ++ } ++ } ++#endif ++ } ++} ++ ++/* ++ * ======== MEM_FreePhysMem ======== ++ * Purpose: ++ * Free the given block of physically contiguous memory. ++ */ ++void MEM_FreePhysMem(void *pVirtualAddress, u32 pPhysicalAddress, ++ u32 cBytes) ++{ ++ DBC_Require(cRefs > 0); ++ DBC_Require(pVirtualAddress != NULL); ++ ++ GT_1trace(MEM_debugMask, GT_ENTER, "MEM_FreePhysMem: pVirtualAddress " ++ "0x%x\n", pVirtualAddress); ++ ++ if (!extPhysMemPoolEnabled) ++ dma_free_coherent(NULL, cBytes, pVirtualAddress, ++ pPhysicalAddress); ++} ++ ++/* ++ * ======== MEM_Init ======== ++ * Purpose: ++ * Initialize MEM module private state. ++ */ ++bool MEM_Init(void) ++{ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ GT_create(&MEM_debugMask, "MM"); /* MM for MeM module */ ++ ++#ifdef MEM_CHECK ++ mMan.lst.head.next = &mMan.lst.head; ++ mMan.lst.head.prev = &mMan.lst.head; ++ mMan.lst.head.self = NULL; ++ spin_lock_init(&mMan.lock); ++#endif ++ ++ } ++ ++ cRefs++; ++ ++ GT_1trace(MEM_debugMask, GT_5CLASS, "MEM_Init: cRefs 0x%x\n", cRefs); ++ ++ DBC_Ensure(cRefs > 0); ++ ++ return true; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/ntfy.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/ntfy.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/ntfy.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/ntfy.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,329 @@ ++/* ++ * ntfy.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== ntfyce.c ======== ++ * Purpose: ++ * Manage lists of notification events. ++ * ++ * Public Functions: ++ * NTFY_Create ++ * NTFY_Delete ++ * NTFY_Exit ++ * NTFY_Init ++ * NTFY_Notify ++ * NTFY_Register ++ * ++ *! Revision History: ++ *! ================= ++ *! 06-Feb-2003 kc Removed DSP_POSTMESSAGE related code. ++ *! 05-Nov-2001 kc Updated DSP_HNOTIFICATION structure. ++ *! 10-May-2001 jeh Removed SERVICES module init/exit from NTFY_Init/Exit. ++ *! NTFY_Register() returns DSP_ENOTIMPL for all but ++ *! DSP_SIGNALEVENT. ++ *! 12-Oct-2000 jeh Use MEM_IsValidHandle(). ++ *! 07-Sep-2000 jeh Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define NTFY_SIGNATURE 0x5946544e /* "YFTN" */ ++ ++/* ++ * ======== NTFY_OBJECT ======== ++ */ ++struct NTFY_OBJECT { ++ u32 dwSignature; /* For object validation */ ++ struct LST_LIST *notifyList; /* List of NOTIFICATION objects */ ++ struct SYNC_CSOBJECT *hSync; /* For critical sections */ ++}; ++ ++/* ++ * ======== NOTIFICATION ======== ++ * This object will be created when a client registers for events. ++ */ ++struct NOTIFICATION { ++ struct LST_ELEM listElem; ++ u32 uEventMask; /* Events to be notified about */ ++ u32 uNotifyType; /* Type of notification to be sent */ ++ ++ /* ++ * We keep a copy of the event name to check if the event has ++ * already been registered. (SYNC also keeps a copy of the name). ++ */ ++ char *pstrName; /* Name of event */ ++ HANDLE hEvent; /* Handle for notification */ ++ struct SYNC_OBJECT *hSync; ++}; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask NTFY_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++/* ----------------------------------- Function Prototypes */ ++static void DeleteNotify(struct NOTIFICATION *pNotify); ++ ++/* ++ * ======== NTFY_Create ======== ++ * Purpose: ++ * Create an empty list of notifications. ++ */ ++DSP_STATUS NTFY_Create(struct NTFY_OBJECT **phNtfy) ++{ ++ struct NTFY_OBJECT *pNtfy; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(phNtfy != NULL); ++ ++ *phNtfy = NULL; ++ MEM_AllocObject(pNtfy, struct NTFY_OBJECT, NTFY_SIGNATURE); ++ ++ if (pNtfy) { ++ ++ status = SYNC_InitializeDPCCS(&pNtfy->hSync); ++ if (DSP_SUCCEEDED(status)) { ++ pNtfy->notifyList = LST_Create(); ++ if (pNtfy->notifyList == NULL) { ++ (void) SYNC_DeleteCS(pNtfy->hSync); ++ MEM_FreeObject(pNtfy); ++ status = DSP_EMEMORY; ++ } else { ++ *phNtfy = pNtfy; ++ } ++ } ++ } else { ++ status = DSP_EMEMORY; ++ } ++ ++ DBC_Ensure((DSP_FAILED(status) && *phNtfy == NULL) || ++ (DSP_SUCCEEDED(status) && MEM_IsValidHandle((*phNtfy), ++ NTFY_SIGNATURE))); ++ ++ return status; ++} ++ ++/* ++ * ======== NTFY_Delete ======== ++ * Purpose: ++ * Free resources allocated in NTFY_Create. ++ */ ++void NTFY_Delete(struct NTFY_OBJECT *hNtfy) ++{ ++ struct NOTIFICATION *pNotify; ++ ++ DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE)); ++ ++ /* Remove any elements remaining in list */ ++ if (hNtfy->notifyList) { ++ while ((pNotify = (struct NOTIFICATION *)LST_GetHead(hNtfy-> ++ notifyList))) { ++ DeleteNotify(pNotify); ++ } ++ DBC_Assert(LST_IsEmpty(hNtfy->notifyList)); ++ LST_Delete(hNtfy->notifyList); ++ } ++ if (hNtfy->hSync) ++ (void)SYNC_DeleteCS(hNtfy->hSync); ++ ++ MEM_FreeObject(hNtfy); ++} ++ ++/* ++ * ======== NTFY_Exit ======== ++ * Purpose: ++ * Discontinue usage of NTFY module. ++ */ ++void NTFY_Exit(void) ++{ ++ GT_0trace(NTFY_debugMask, GT_5CLASS, "Entered NTFY_Exit\n"); ++} ++ ++/* ++ * ======== NTFY_Init ======== ++ * Purpose: ++ * Initialize the NTFY module. ++ */ ++bool NTFY_Init(void) ++{ ++ GT_create(&NTFY_debugMask, "NY"); /* "NY" for NtfY */ ++ ++ GT_0trace(NTFY_debugMask, GT_5CLASS, "NTFY_Init()\n"); ++ ++ return true; ++} ++ ++/* ++ * ======== NTFY_Notify ======== ++ * Purpose: ++ * Execute notify function (signal event) for every ++ * element in the notification list that is to be notified about the ++ * event specified in uEventMask. ++ */ ++void NTFY_Notify(struct NTFY_OBJECT *hNtfy, u32 uEventMask) ++{ ++ struct NOTIFICATION *pNotify; ++ ++ DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE)); ++ ++ /* ++ * Go through notifyList and notify all clients registered for ++ * uEventMask events. ++ */ ++ ++ (void) SYNC_EnterCS(hNtfy->hSync); ++ ++ pNotify = (struct NOTIFICATION *)LST_First(hNtfy->notifyList); ++ while (pNotify != NULL) { ++ if (pNotify->uEventMask & uEventMask) { ++ /* Notify */ ++ if (pNotify->uNotifyType == DSP_SIGNALEVENT) ++ (void)SYNC_SetEvent(pNotify->hSync); ++ ++ } ++ pNotify = (struct NOTIFICATION *)LST_Next(hNtfy->notifyList, ++ (struct LST_ELEM *)pNotify); ++ } ++ ++ (void) SYNC_LeaveCS(hNtfy->hSync); ++} ++ ++/* ++ * ======== NTFY_Register ======== ++ * Purpose: ++ * Add a notification element to the list. If the notification is already ++ * registered, and uEventMask != 0, the notification will get posted for ++ * events specified in the new event mask. If the notification is already ++ * registered and uEventMask == 0, the notification will be unregistered. ++ */ ++DSP_STATUS NTFY_Register(struct NTFY_OBJECT *hNtfy, ++ struct DSP_NOTIFICATION *hNotification, ++ u32 uEventMask, u32 uNotifyType) ++{ ++ struct NOTIFICATION *pNotify; ++ struct SYNC_ATTRS syncAttrs; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE)); ++ ++ if (hNotification == NULL) ++ status = DSP_EHANDLE; ++ ++ /* Return DSP_ENOTIMPL if uNotifyType is not supported */ ++ if (DSP_SUCCEEDED(status)) { ++ if (!IsValidNotifyMask(uNotifyType)) ++ status = DSP_ENOTIMPL; ++ ++ } ++ ++ if (DSP_FAILED(status)) ++ return status; ++ ++ (void)SYNC_EnterCS(hNtfy->hSync); ++ ++ pNotify = (struct NOTIFICATION *)LST_First(hNtfy->notifyList); ++ while (pNotify != NULL) { ++ /* If there is more than one notification type, each ++ * type may require its own handler code. */ ++ ++ if (hNotification->handle == pNotify->hSync) { ++ /* found */ ++ break; ++ } ++ pNotify = (struct NOTIFICATION *)LST_Next(hNtfy->notifyList, ++ (struct LST_ELEM *)pNotify); ++ } ++ if (pNotify == NULL) { ++ /* Not registered */ ++ if (uEventMask == 0) { ++ status = DSP_EVALUE; ++ } else { ++ /* Allocate NOTIFICATION object, add to list */ ++ pNotify = MEM_Calloc(sizeof(struct NOTIFICATION), ++ MEM_PAGED); ++ if (pNotify == NULL) ++ status = DSP_EMEMORY; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ LST_InitElem((struct LST_ELEM *) pNotify); ++ /* If there is more than one notification type, each ++ * type may require its own handler code. */ ++ status = SYNC_OpenEvent(&pNotify->hSync, &syncAttrs); ++ hNotification->handle = pNotify->hSync; ++ ++ if (DSP_SUCCEEDED(status)) { ++ pNotify->uEventMask = uEventMask; ++ pNotify->uNotifyType = uNotifyType; ++ LST_PutTail(hNtfy->notifyList, ++ (struct LST_ELEM *)pNotify); ++ } else { ++ DeleteNotify(pNotify); ++ } ++ } ++ } else { ++ /* Found in list */ ++ if (uEventMask == 0) { ++ /* Remove from list and free */ ++ LST_RemoveElem(hNtfy->notifyList, ++ (struct LST_ELEM *)pNotify); ++ DeleteNotify(pNotify); ++ } else { ++ /* Update notification mask (type shouldn't change) */ ++ pNotify->uEventMask = uEventMask; ++ } ++ } ++ (void)SYNC_LeaveCS(hNtfy->hSync); ++ return status; ++} ++ ++/* ++ * ======== DeleteNotify ======== ++ * Purpose: ++ * Free the notification object. ++ */ ++static void DeleteNotify(struct NOTIFICATION *pNotify) ++{ ++ if (pNotify->hSync) ++ (void) SYNC_CloseEvent(pNotify->hSync); ++ ++ if (pNotify->pstrName) ++ MEM_Free(pNotify->pstrName); ++ ++ MEM_Free(pNotify); ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/reg.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/reg.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/reg.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/reg.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,196 @@ ++/* ++ * reg.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== regce.c ======== ++ * Purpose: ++ * Provide registry functions. ++ * ++ * Public Functions: ++ * REG_DeleteValue ++ * REG_EnumValue ++ * REG_Exit ++ * REG_GetValue ++ * REG_Init ++ * REG_SetValue ++ * ++ *! Revision History: ++ *! ================ ++ * ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- This */ ++#include ++#include ++ ++#if GT_TRACE ++struct GT_Mask REG_debugMask = { NULL, NULL }; /* GT trace var. */ ++#endif ++ ++/* ++ * ======== REG_DeleteValue ======== ++ * Deletes a registry entry value. NOTE: A registry entry value is not the ++ * same as * a registry key. ++ */ ++DSP_STATUS REG_DeleteValue(OPTIONAL IN HANDLE *phKey, IN CONST char *pstrSubkey, ++ IN CONST char *pstrValue) ++{ ++ DSP_STATUS status; ++ DBC_Require(pstrSubkey && pstrValue); ++ DBC_Require(phKey == NULL); ++ DBC_Require(strlen(pstrSubkey) < REG_MAXREGPATHLENGTH); ++ DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH); ++ ++ GT_0trace(REG_debugMask, GT_ENTER, "REG_DeleteValue: entered\n"); ++ ++ /* Note that we don't use phKey */ ++ if (regsupDeleteValue(pstrSubkey, pstrValue) == DSP_SOK) ++ status = DSP_SOK; ++ else ++ status = DSP_EFAIL; ++ ++ return status; ++} ++ ++/* ++ * ======== REG_EnumValue ======== ++ * Enumerates a registry key and retrieve values stored under the key. ++ * We will assume the input pdwValueSize is smaller than ++ * REG_MAXREGPATHLENGTH for implementation purposes. ++ */ ++DSP_STATUS REG_EnumValue(IN HANDLE *phKey, IN u32 dwIndex, ++ IN CONST char *pstrKey, IN OUT char *pstrValue, ++ IN OUT u32 *pdwValueSize, IN OUT char *pstrData, ++ IN OUT u32 *pdwDataSize) ++{ ++ DSP_STATUS status; ++ ++ DBC_Require(pstrKey && pstrValue && pdwValueSize && pstrData && ++ pdwDataSize); ++ DBC_Require(*pdwValueSize <= REG_MAXREGPATHLENGTH); ++ DBC_Require(phKey == NULL); ++ DBC_Require(strlen(pstrKey) < REG_MAXREGPATHLENGTH); ++ ++ GT_0trace(REG_debugMask, GT_ENTER, "REG_EnumValue: entered\n"); ++ ++ status = regsupEnumValue(dwIndex, pstrKey, pstrValue, pdwValueSize, ++ pstrData, pdwDataSize); ++ ++ return status; ++} ++ ++/* ++ * ======== REG_Exit ======== ++ * Discontinue usage of the REG module. ++ */ ++void REG_Exit(void) ++{ ++ GT_0trace(REG_debugMask, GT_5CLASS, "REG_Exit\n"); ++ ++ regsupExit(); ++} ++ ++/* ++ * ======== REG_GetValue ======== ++ * Retrieve a value from the registry. ++ */ ++DSP_STATUS REG_GetValue(OPTIONAL IN HANDLE *phKey, IN CONST char *pstrSubkey, ++ IN CONST char *pstrValue, OUT u8 *pbData, ++ IN OUT u32 *pdwDataSize) ++{ ++ DSP_STATUS status; ++ ++ DBC_Require(pstrSubkey && pstrValue && pbData); ++ DBC_Require(phKey == NULL); ++ DBC_Require(strlen(pstrSubkey) < REG_MAXREGPATHLENGTH); ++ DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH); ++ ++ GT_0trace(REG_debugMask, GT_ENTER, "REG_GetValue: entered\n"); ++ ++ /* We need to use regsup calls... */ ++ /* ...for now we don't need the key handle or */ ++ /* the subkey, all we need is the value to lookup. */ ++ if (regsupGetValue((char *)pstrValue, pbData, pdwDataSize) == DSP_SOK) ++ status = DSP_SOK; ++ else ++ status = DSP_EFAIL; ++ ++ return status; ++} ++ ++/* ++ * ======== REG_Init ======== ++ * Initialize the REG module's private state. ++ */ ++bool REG_Init(void) ++{ ++ bool fInit; ++ ++ GT_create(®_debugMask, "RG"); /* RG for ReG */ ++ ++ fInit = regsupInit(); ++ ++ GT_0trace(REG_debugMask, GT_5CLASS, "REG_Init\n"); ++ ++ return fInit; ++} ++ ++/* ++ * ======== REG_SetValue ======== ++ * Set a value in the registry. ++ */ ++DSP_STATUS REG_SetValue(OPTIONAL IN HANDLE *phKey, IN CONST char *pstrSubkey, ++ IN CONST char *pstrValue, IN CONST u32 dwType, ++ IN u8 *pbData, IN u32 dwDataSize) ++{ ++ DSP_STATUS status; ++ ++ DBC_Require(pstrValue && pbData); ++ DBC_Require(phKey == NULL); ++ DBC_Require(dwDataSize > 0); ++ DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH); ++ ++ /* We need to use regsup calls... */ ++ /* ...for now we don't need the key handle or */ ++ /* the subkey, all we need is the value to lookup. */ ++ if (regsupSetValue((char *)pstrValue, pbData, dwDataSize) == DSP_SOK) ++ status = DSP_SOK; ++ else ++ status = DSP_EFAIL; ++ ++ return status; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/regsup.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/regsup.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,370 @@ ++/* ++ * regsup.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== regsup.c ======== ++ * Purpose: ++ * Provide registry support functions. ++ * ++ *! Revision History: ++ *! ================ ++ *! 28-May-2002 map: Integrated PSI's dspimage update mechanism ++ *! 11-May-2002 gp: Turned PERF "on". ++ *! 21-May-2002 map: Fixed bug in SetValue - if resizing datasize, set ++ *! new size too ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++struct RegValueStruct { ++ char name[BRIDGE_MAX_NAME_SIZE]; /* Name of a given value entry */ ++ u32 dataSize; /* Size of the data */ ++ void *pData; /* Pointer to the actual data */ ++}; ++ ++struct RegKeyStruct { ++ /*The current number of value entries this key has*/ ++ u32 numValueEntries; ++ /* Array of value entries */ ++ struct RegValueStruct values[BRIDGE_MAX_NUM_REG_ENTRIES]; ++}; ++ ++ ++/* Pointer to the registry support key */ ++static struct RegKeyStruct *pRegKey; ++ ++#if GT_TRACE ++extern struct GT_Mask REG_debugMask; /* GT trace var. */ ++/* ++ * ======== printS ======== ++ * Purpose: ++ * Displays printable characters in pBuf, if any. ++ */ ++static inline void printS(void *pBuf) ++{ ++ int pos = 0; ++ if (*(REG_debugMask).flags & (GT_2CLASS)) { ++ while (*(u8 *)((pBuf)+pos) >= ' ' && ++ *(u8 *)((pBuf)+pos) <= '~') { ++ GT_1trace(REG_debugMask, GT_2CLASS, "%c", ++ *(u8 *)((pBuf) + pos++)); ++ } ++ ++ GT_0trace(REG_debugMask, GT_2CLASS, "\n"); ++ } ++} ++#else ++#define printS(pBuf) ++#endif ++ ++/* ++ * ======== regsupInit ======== ++ * Purpose: ++ * Initialize the Registry Support module's private state. ++ */ ++bool regsupInit(void) ++{ ++ if (pRegKey != NULL) ++ return true; ++ ++ /* Need to allocate and setup our registry. */ ++ pRegKey = MEM_Calloc(sizeof(struct RegKeyStruct), MEM_NONPAGED); ++ if (pRegKey == NULL) ++ return false; ++ ++ return true; ++} ++ ++/* ++ * ======== regsupExit ======== ++ * Purpose: ++ * Release all registry support allocations. ++ */ ++void regsupExit(void) ++{ ++ u32 i; ++ ++ /* Make sure data has actually been allocated. */ ++ if (pRegKey == NULL) { ++ /* Nothing initialized.return! */ ++ return; ++ } ++ ++ GT_1trace(REG_debugMask, GT_2CLASS, "pRegKey->numValueEntries %d\n", ++ pRegKey->numValueEntries); ++ ++ /* Now go through each entry and free all resources. */ ++ for (i = 0; ((i < BRIDGE_MAX_NUM_REG_ENTRIES) && ++ (i < pRegKey->numValueEntries)); i++) { ++ if (pRegKey->values[i].name[0] != '\0') { ++ /* We have a valid entry.free it up! */ ++ if (pRegKey->values[i].pData != NULL) { ++ GT_3trace(REG_debugMask, GT_2CLASS, ++ "E %d\t %s DATA %x ", i, ++ pRegKey->values[i].name, ++ *(u32 *)pRegKey->values[i].pData); ++ printS((u8 *)(pRegKey->values[i].pData)); ++ MEM_Free(pRegKey->values[i].pData); ++ } ++ pRegKey->values[i].pData = NULL; ++ pRegKey->values[i].dataSize = 0; ++ pRegKey->values[i].name[0] = '\0'; ++ } ++ } ++ ++ /* Now that all of the resources are freed up, free the main one! */ ++ MEM_Free(pRegKey); ++ ++ /* Don't forget to NULL out the global entry! */ ++ pRegKey = NULL; ++} ++ ++/* ++ * ======== regsupGetValue ======== ++ * Purpose: ++ * Get the value of the entry having the given name. ++ */ ++DSP_STATUS regsupGetValue(char *valName, void *pBuf, u32 *dataSize) ++{ ++ DSP_STATUS retVal = DSP_EFAIL; ++ u32 i; ++ ++ /* Need to search through the entries looking for the right one. */ ++ for (i = 0; i < pRegKey->numValueEntries; i++) { ++ /* See if the name matches. */ ++ if (strncmp(pRegKey->values[i].name, valName, ++ BRIDGE_MAX_NAME_SIZE) == 0) { ++ ++ /* We have a match! Copy out the data. */ ++ memcpy(pBuf, pRegKey->values[i].pData, ++ pRegKey->values[i].dataSize); ++ ++ /* Get the size for the caller. */ ++ *dataSize = pRegKey->values[i].dataSize; ++ ++ /* Set our status to good and exit. */ ++ retVal = DSP_SOK; ++ break; ++ } ++ } ++ ++ if (DSP_SUCCEEDED(retVal)) { ++ GT_2trace(REG_debugMask, GT_2CLASS, "G %s DATA %x ", valName, ++ *(u32 *)pBuf); ++ printS((u8 *)pBuf); ++ } else { ++ GT_1trace(REG_debugMask, GT_3CLASS, "G %s FAILED\n", valName); ++ } ++ ++ return retVal; ++} ++ ++/* ++ * ======== regsupSetValue ======== ++ * Purpose: ++ * Sets the value of the entry having the given name. ++ */ ++DSP_STATUS regsupSetValue(char *valName, void *pBuf, u32 dataSize) ++{ ++ DSP_STATUS retVal = DSP_EFAIL; ++ u32 i; ++ ++ GT_2trace(REG_debugMask, GT_2CLASS, "S %s DATA %x ", valName, ++ *(u32 *)pBuf); ++ printS((u8 *)pBuf); ++ ++ /* Need to search through the entries looking for the right one. */ ++ for (i = 0; i < pRegKey->numValueEntries; i++) { ++ /* See if the name matches. */ ++ if (strncmp(pRegKey->values[i].name, valName, ++ BRIDGE_MAX_NAME_SIZE) == 0) { ++ /* Make sure the new data size is the same. */ ++ if (dataSize != pRegKey->values[i].dataSize) { ++ /* The caller needs a different data size! */ ++ MEM_Free(pRegKey->values[i].pData); ++ pRegKey->values[i].pData = MEM_Alloc(dataSize, ++ MEM_NONPAGED); ++ if (pRegKey->values[i].pData == NULL) ++ break; ++ ++ } ++ ++ /* We have a match! Copy out the data. */ ++ memcpy(pRegKey->values[i].pData, pBuf, dataSize); ++ ++ /* Reset datasize - overwrite if new or same */ ++ pRegKey->values[i].dataSize = dataSize; ++ ++ /* Set our status to good and exit. */ ++ retVal = DSP_SOK; ++ break; ++ } ++ } ++ ++ /* See if we found a match or if this is a new entry */ ++ if (i == pRegKey->numValueEntries) { ++ /* No match, need to make a new entry */ ++ /* First check to see if we can make any more entries. */ ++ if (pRegKey->numValueEntries < BRIDGE_MAX_NUM_REG_ENTRIES) { ++ char *tmp_name = ++ pRegKey->values[pRegKey->numValueEntries].name; ++ strncpy(tmp_name, valName, BRIDGE_MAX_NAME_SIZE - 1); ++ tmp_name[BRIDGE_MAX_NAME_SIZE - 1] = '\0'; ++ pRegKey->values[pRegKey->numValueEntries].pData = ++ MEM_Alloc(dataSize, MEM_NONPAGED); ++ if (pRegKey->values[pRegKey->numValueEntries].pData != ++ NULL) { ++ memcpy(pRegKey-> ++ values[pRegKey->numValueEntries].pData, ++ pBuf, dataSize); ++ pRegKey-> ++ values[pRegKey->numValueEntries].dataSize = ++ dataSize; ++ pRegKey->numValueEntries++; ++ retVal = DSP_SOK; ++ } ++ } else { ++ GT_0trace(REG_debugMask, GT_7CLASS, ++ "MAX NUM REG ENTRIES REACHED\n"); ++ } ++ } ++ ++ return retVal; ++} ++ ++/* ++ * ======== regsupEnumValue ======== ++ * Purpose: ++ * Returns registry "values" and their "data" under a (sub)key. ++ */ ++DSP_STATUS regsupEnumValue(IN u32 dwIndex, IN CONST char *pstrKey, ++ IN OUT char *pstrValue, IN OUT u32 *pdwValueSize, ++ IN OUT char *pstrData, IN OUT u32 *pdwDataSize) ++{ ++ DSP_STATUS retVal = REG_E_INVALIDSUBKEY; ++ u32 i; ++ u32 dwKeyLen; ++ u32 count = 0; ++ ++ DBC_Require(pstrKey); ++ dwKeyLen = strlen(pstrKey); ++ ++ /* Need to search through the entries looking for the right one. */ ++ for (i = 0; i < pRegKey->numValueEntries; i++) { ++ /* See if the name matches. */ ++ if ((strncmp(pRegKey->values[i].name, pstrKey, ++ dwKeyLen) == 0) && count++ == dwIndex) { ++ /* We have a match! Copy out the data. */ ++ memcpy(pstrData, pRegKey->values[i].pData, ++ pRegKey->values[i].dataSize); ++ /* Get the size for the caller. */ ++ *pdwDataSize = pRegKey->values[i].dataSize; ++ *pdwValueSize = strlen(&(pRegKey-> ++ values[i].name[dwKeyLen])); ++ strncpy(pstrValue, ++ &(pRegKey->values[i].name[dwKeyLen]), ++ *pdwValueSize + 1); ++ GT_3trace(REG_debugMask, GT_2CLASS, ++ "E Key %s, Value %s, Data %x ", ++ pstrKey, pstrValue, *(u32 *)pstrData); ++ printS((u8 *)pstrData); ++ /* Set our status to good and exit. */ ++ retVal = DSP_SOK; ++ break; ++ } ++ } ++ ++ if (count && DSP_FAILED(retVal)) ++ retVal = REG_E_NOMOREITEMS; ++ ++ return retVal; ++} ++ ++/* ++ * ======== regsupDeleteValue ======== ++ */ ++DSP_STATUS regsupDeleteValue(IN CONST char *pstrSubkey, ++ IN CONST char *pstrValue) ++{ ++ DSP_STATUS retVal = DSP_EFAIL; ++ u32 i; ++ ++ for (i = 0; ((i < BRIDGE_MAX_NUM_REG_ENTRIES) && ++ (i < pRegKey->numValueEntries)); i++) { ++ /* See if the name matches... */ ++ if (strncmp(pRegKey->values[i].name, pstrValue, ++ BRIDGE_MAX_NAME_SIZE) == 0) { ++ /* We have a match! Delete this key. To delete a ++ * key, we free all resources associated with this ++ * key and, if we're not already the last entry in ++ * the array, we copy that entry into this deleted ++ * key. ++ */ ++ MEM_Free(pRegKey->values[i].pData); ++ if ((pRegKey->numValueEntries - 1) == i) { ++ /* we're deleting the last one */ ++ pRegKey->values[i].name[0] = '\0'; ++ pRegKey->values[i].dataSize = 0; ++ pRegKey->values[i].pData = NULL; ++ } else { ++ /* move the last one here */ ++ strncpy(pRegKey->values[i].name, pRegKey-> ++ values[pRegKey->numValueEntries - 1].name, ++ BRIDGE_MAX_NAME_SIZE); ++ pRegKey->values[i].dataSize = ++ pRegKey-> ++ values[pRegKey->numValueEntries-1].dataSize; ++ pRegKey->values[i].pData = ++ pRegKey-> ++ values[pRegKey->numValueEntries-1].pData; ++ /* don't have to do this, but for ++ * the paranoid... */ ++ pRegKey-> ++ values[pRegKey->numValueEntries-1].name[0] = ++ '\0'; ++ } ++ ++ /* another one bites the dust. */ ++ pRegKey->numValueEntries--; ++ ++ /* Set our status to good and exit... */ ++ retVal = DSP_SOK; ++ break; ++ } ++ } ++ return retVal; ++ ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/regsup.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/regsup.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,58 @@ ++/* ++ * regsup.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== regsup.h ======== ++ * ++ *! Revision History ++ *! ================ ++ */ ++ ++#ifndef _REGSUP_H_ ++#define _REGSUP_H_ ++ ++#define BRIDGE_MAX_NAME_SIZE MAXREGPATHLENGTH ++#define BRIDGE_MAX_NUM_REG_ENTRIES 52 ++ ++/* Init function. MUST be called BEFORE any calls are */ ++/* made into this psuedo-registry!!! Returns TRUE/FALSE for SUCCESS/ERROR */ ++extern bool regsupInit(void); ++ ++/* Release all registry support allocations. */ ++extern void regsupExit(void); ++ ++/* ++ * ======== regsupDeleteValue ======== ++ */ ++extern DSP_STATUS regsupDeleteValue(IN CONST char *pstrSubkey, ++ IN CONST char *pstrValue); ++/* Get the value of the entry having the given name. Returns DSP_SOK */ ++/* if an entry was found and the value retrieved. Returns DSP_EFAIL ++ * otherwise.*/ ++extern DSP_STATUS regsupGetValue(char *valName, void *pBuf, u32 *dataSize); ++ ++/* Sets the value of the entry having the given name. Returns DSP_SOK */ ++/* if an entry was found and the value set. Returns DSP_EFAIL otherwise. */ ++extern DSP_STATUS regsupSetValue(char *valName, void *pBuf, u32 dataSize); ++ ++/* Returns registry "values" and their "data" under a (sub)key. */ ++extern DSP_STATUS regsupEnumValue(IN u32 dwIndex, IN CONST char *pstrKey, ++ IN OUT char *pstrValue, IN OUT u32 *pdwValueSize, ++ IN OUT char *pstrData, IN OUT u32 *pdwDataSize); ++ ++#endif ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/services.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/services.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/services.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/services.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,193 @@ ++/* ++ * services.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== services.c ======== ++ * Purpose: ++ * Provide SERVICES loading. ++ * ++ * Public Functions: ++ * SERVICES_Exit ++ * SERVICES_Init ++ * ++ * ++ *! Revision History ++ *! ================ ++ *! 20-Nov-2000 rr: NTFY_Init/Exit added. ++ *! 06-Jul-2000 rr: PROC prefix changed to PRCS to accomodate RM. ++ *! 01-Feb-2000 kc: Created. ++ */ ++ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask SERVICES_debugMask = { NULL, NULL }; /* GT trace var. */ ++#endif ++ ++static u32 cRefs; /* SERVICES module reference count */ ++ ++/* ++ * ======== SERVICES_Exit ======== ++ * Purpose: ++ * Discontinue usage of module; free resources when reference count ++ * reaches 0. ++ */ ++void SERVICES_Exit(void) ++{ ++ DBC_Require(cRefs > 0); ++ ++ GT_1trace(SERVICES_debugMask, GT_5CLASS, "SERVICES_Exit: cRefs 0x%x\n", ++ cRefs); ++ ++ cRefs--; ++ if (cRefs == 0) { ++ /* Uninitialize all SERVICES modules here */ ++ NTFY_Exit(); ++ UTIL_Exit(); ++ SYNC_Exit(); ++ CLK_Exit(); ++ REG_Exit(); ++ LST_Exit(); ++ KFILE_Exit(); ++ DPC_Exit(); ++ DBG_Exit(); ++ CSL_Exit(); ++ CFG_Exit(); ++ MEM_Exit(); ++ ++ GT_exit(); ++ } ++ ++ DBC_Ensure(cRefs >= 0); ++} ++ ++/* ++ * ======== SERVICES_Init ======== ++ * Purpose: ++ * Initializes SERVICES modules. ++ */ ++bool SERVICES_Init(void) ++{ ++ bool fInit = true; ++ bool fCFG, fCSL, fDBG, fDPC, fKFILE, fLST, fMEM; ++ bool fREG, fSYNC, fCLK, fUTIL, fNTFY; ++ ++ DBC_Require(cRefs >= 0); ++ ++ if (cRefs == 0) { ++ ++ GT_init(); ++ GT_create(&SERVICES_debugMask, "OS"); /* OS for OSal */ ++ ++ GT_0trace(SERVICES_debugMask, GT_ENTER, ++ "SERVICES_Init: entered\n"); ++ ++ /* Perform required initialization of SERVICES modules. */ ++ fMEM = MEM_Init(); ++ fREG = REG_Init(); ++ fCFG = CFG_Init(); ++ fCSL = CSL_Init(); ++ fDBG = DBG_Init(); ++ fDPC = DPC_Init(); ++ fKFILE = KFILE_Init(); ++ fLST = LST_Init(); ++ /* fREG = REG_Init(); */ ++ fSYNC = SYNC_Init(); ++ fCLK = CLK_Init(); ++ fUTIL = UTIL_Init(); ++ fNTFY = NTFY_Init(); ++ ++ fInit = fCFG && fCSL && fDBG && fDPC && fKFILE && ++ fLST && fMEM && fREG && fSYNC && fCLK && fUTIL; ++ ++ if (!fInit) { ++ if (fNTFY) ++ NTFY_Exit(); ++ ++ if (fUTIL) ++ UTIL_Exit(); ++ ++ if (fSYNC) ++ SYNC_Exit(); ++ ++ if (fCLK) ++ CLK_Exit(); ++ ++ if (fREG) ++ REG_Exit(); ++ ++ if (fLST) ++ LST_Exit(); ++ ++ if (fKFILE) ++ KFILE_Exit(); ++ ++ if (fDPC) ++ DPC_Exit(); ++ ++ if (fDBG) ++ DBG_Exit(); ++ ++ if (fCSL) ++ CSL_Exit(); ++ ++ if (fCFG) ++ CFG_Exit(); ++ ++ if (fMEM) ++ MEM_Exit(); ++ ++ } ++ } ++ ++ if (fInit) ++ cRefs++; ++ ++ GT_1trace(SERVICES_debugMask, GT_5CLASS, "SERVICES_Init: cRefs 0x%x\n", ++ cRefs); ++ ++ DBC_Ensure((fInit && (cRefs > 0)) || (!fInit && (cRefs >= 0))); ++ ++ return fInit; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/sync.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/sync.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/sync.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/services/sync.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,608 @@ ++/* ++ * sync.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== sync.c ======== ++ * Purpose: ++ * Synchronization services. ++ * ++ * Public Functions: ++ * SYNC_CloseEvent ++ * SYNC_DeleteCS ++ * SYNC_EnterCS ++ * SYNC_Exit ++ * SYNC_Init ++ * SYNC_InitializeCS ++ * SYNC_LeaveCS ++ * SYNC_OpenEvent ++ * SYNC_ResetEvent ++ * SYNC_SetEvent ++ * SYNC_WaitOnEvent ++ * SYNC_WaitOnMultipleEvents ++ * ++ *! Revision History: ++ *! ================ ++ *! 05-Nov-2001 kc: Minor cosmetic changes. ++ *! 05-Oct-2000 jeh Added SYNC_WaitOnMultipleEvents(). ++ *! 10-Aug-2000 rr: SYNC_PostMessage added. ++ *! 10-Jul-2000 jeh Modified SYNC_OpenEvent() to handle NULL attrs. ++ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. ++ *! GT Changes. ++ *! 01-Dec-1999 ag: Added optional named event creation in SYNC_OpenEvent(). ++ *! 22-Nov-1999 kc: Added changes from code review. ++ *! 22-Sep-1999 kc: Modified from sync95.c. ++ *! 05-Aug-1996 gp: Created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define SIGNATURE 0x434e5953 /* "SYNC" (in reverse) */ ++ ++enum wait_state { ++ wo_waiting, ++ wo_signalled ++} ; ++ ++enum sync_state { ++ so_reset, ++ so_signalled ++} ; ++ ++struct WAIT_OBJECT { ++ enum wait_state state; ++ struct SYNC_OBJECT *signalling_event; ++ struct semaphore sem; ++}; ++ ++/* Generic SYNC object: */ ++struct SYNC_OBJECT { ++ u32 dwSignature; /* Used for object validation. */ ++ enum sync_state state; ++ spinlock_t sync_lock; ++ struct WAIT_OBJECT *pWaitObj; ++}; ++ ++struct SYNC_DPCCSOBJECT { ++ u32 dwSignature; /* used for object validation */ ++ spinlock_t sync_dpccs_lock; ++ s32 count; ++} ; ++ ++/* ----------------------------------- Globals */ ++#if GT_TRACE ++static struct GT_Mask SYNC_debugMask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++static int test_and_set(volatile void *ptr, int val) ++{ ++ int ret = val; ++ asm volatile (" swp %0, %0, [%1]" : "+r" (ret) : "r"(ptr) : "memory"); ++ return ret; ++} ++ ++static void timeout_callback(unsigned long hWaitObj); ++ ++/* ++ * ======== SYNC_CloseEvent ======== ++ * Purpose: ++ * Close an existing SYNC event object. ++ */ ++DSP_STATUS SYNC_CloseEvent(struct SYNC_OBJECT *hEvent) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; ++ ++ DBC_Require(pEvent != NULL && pEvent->pWaitObj == NULL); ++ ++ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_CloseEvent: hEvent 0x%x\n", ++ hEvent); ++ ++ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { ++ if (pEvent->pWaitObj) { ++ status = DSP_EFAIL; ++ GT_0trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_CloseEvent: Wait object not NULL\n"); ++ } ++ MEM_FreeObject(pEvent); ++ ++ } else { ++ status = DSP_EHANDLE; ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_CloseEvent: invalid " ++ "hEvent handle 0x%x\n", hEvent); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== SYNC_Exit ======== ++ * Purpose: ++ * Cleanup SYNC module. ++ */ ++void SYNC_Exit(void) ++{ ++ GT_0trace(SYNC_debugMask, GT_5CLASS, "SYNC_Exit\n"); ++} ++ ++/* ++ * ======== SYNC_Init ======== ++ * Purpose: ++ * Initialize SYNC module. ++ */ ++bool SYNC_Init(void) ++{ ++ GT_create(&SYNC_debugMask, "SY"); /* SY for SYnc */ ++ ++ GT_0trace(SYNC_debugMask, GT_5CLASS, "SYNC_Init\n"); ++ ++ return true; ++} ++ ++/* ++ * ======== SYNC_OpenEvent ======== ++ * Purpose: ++ * Open a new synchronization event object. ++ */ ++DSP_STATUS SYNC_OpenEvent(OUT struct SYNC_OBJECT **phEvent, ++ IN OPTIONAL struct SYNC_ATTRS *pAttrs) ++{ ++ struct SYNC_OBJECT *pEvent = NULL; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(phEvent != NULL); ++ ++ GT_2trace(SYNC_debugMask, GT_ENTER, ++ "SYNC_OpenEvent: phEvent 0x%x, pAttrs " ++ "0x%x\n", phEvent, pAttrs); ++ ++ /* Allocate memory for sync object */ ++ MEM_AllocObject(pEvent, struct SYNC_OBJECT, SIGNATURE); ++ if (pEvent != NULL) { ++ pEvent->state = so_reset; ++ pEvent->pWaitObj = NULL; ++ spin_lock_init(&pEvent->sync_lock); ++ } else { ++ status = DSP_EMEMORY; ++ GT_0trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_OpenEvent: MEM_AllocObject failed\n"); ++ } ++ ++ *phEvent = pEvent; ++ ++ return status; ++} ++ ++/* ++ * ======== SYNC_ResetEvent ======== ++ * Purpose: ++ * Reset an event to non-signalled. ++ */ ++DSP_STATUS SYNC_ResetEvent(struct SYNC_OBJECT *hEvent) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; ++ ++ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_ResetEvent: hEvent 0x%x\n", ++ hEvent); ++ ++ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { ++ pEvent->state = so_reset; ++ status = DSP_SOK; ++ } else { ++ status = DSP_EHANDLE; ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_ResetEvent: invalid hEvent " ++ "handle 0x%x\n", hEvent); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== SYNC_SetEvent ======== ++ * Purpose: ++ * Set an event to signaled and unblock one waiting thread. ++ * ++ * This function is called from ISR, DPC and user context. Hence interrupts ++ * are disabled to ensure atomicity. ++ */ ++ ++DSP_STATUS SYNC_SetEvent(struct SYNC_OBJECT *hEvent) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; ++ unsigned long flags; ++ ++ GT_1trace(SYNC_debugMask, GT_6CLASS, "SYNC_SetEvent: hEvent 0x%x\n", ++ hEvent); ++ ++ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { ++ spin_lock_irqsave(&hEvent->sync_lock, flags); ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_SetEvent: pEvent->pWaitObj " ++ "= 0x%x \n", pEvent->pWaitObj); ++ if (pEvent->pWaitObj) ++ GT_1trace(SYNC_debugMask, GT_6CLASS, "SYNC_SetEvent: " ++ "pEvent->pWaitObj->state = 0x%x \n", ++ pEvent->pWaitObj->state); ++ if (pEvent->pWaitObj != NULL && ++ test_and_set(&pEvent->pWaitObj->state, ++ wo_signalled) == wo_waiting) { ++ ++ pEvent->state = so_reset; ++ pEvent->pWaitObj->signalling_event = pEvent; ++ up(&pEvent->pWaitObj->sem); ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_SetEvent: Unlock " ++ "Semaphore for hEvent 0x%x\n", hEvent); ++ } else { ++ pEvent->state = so_signalled; ++ } ++ spin_unlock_irqrestore(&hEvent->sync_lock, flags); ++ } else { ++ status = DSP_EHANDLE; ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_SetEvent: invalid hEvent " ++ "handle 0x%x\n", hEvent); ++ } ++ return status; ++} ++ ++/* ++ * ======== SYNC_WaitOnEvent ======== ++ * Purpose: ++ * Wait for an event to be signalled, up to the specified timeout. ++ * Note: dwTimeOut must be 0xffffffff to signal infinite wait. ++ */ ++DSP_STATUS SYNC_WaitOnEvent(struct SYNC_OBJECT *hEvent, u32 dwTimeout) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; ++ u32 temp; ++ ++ GT_2trace(SYNC_debugMask, GT_6CLASS, "SYNC_WaitOnEvent: hEvent 0x%x\n, " ++ "dwTimeOut 0x%x", hEvent, dwTimeout); ++ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { ++ status = SYNC_WaitOnMultipleEvents(&pEvent, 1, dwTimeout, ++ &temp); ++ } else { ++ status = DSP_EHANDLE; ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_WaitOnEvent: invalid hEvent" ++ "handle 0x%x\n", hEvent); ++ } ++ return status; ++} ++ ++/* ++ * ======== SYNC_WaitOnMultipleEvents ======== ++ * Purpose: ++ * Wait for any of an array of events to be signalled, up to the ++ * specified timeout. ++ */ ++DSP_STATUS SYNC_WaitOnMultipleEvents(struct SYNC_OBJECT **hSyncEvents, ++ u32 uCount, u32 dwTimeout, ++ OUT u32 *puIndex) ++{ ++ u32 i; ++ DSP_STATUS status = DSP_SOK; ++ u32 curr; ++ struct WAIT_OBJECT *Wp; ++ ++ DBC_Require(uCount > 0); ++ DBC_Require(hSyncEvents != NULL); ++ DBC_Require(puIndex != NULL); ++ ++ for (i = 0; i < uCount; i++) ++ DBC_Require(MEM_IsValidHandle(hSyncEvents[i], SIGNATURE)); ++ ++ GT_4trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_WaitOnMultipleEvents: hSyncEvents:" ++ "0x%x\tuCount: 0x%x" "\tdwTimeout: 0x%x\tpuIndex: 0x%x\n", ++ hSyncEvents, uCount, dwTimeout, puIndex); ++ ++ Wp = MEM_Calloc(sizeof(struct WAIT_OBJECT), MEM_NONPAGED); ++ if (Wp == NULL) ++ return DSP_EMEMORY; ++ ++ Wp->state = wo_waiting; ++ Wp->signalling_event = NULL; ++ init_MUTEX_LOCKED(&(Wp->sem)); ++ ++ for (curr = 0; curr < uCount; curr++) { ++ hSyncEvents[curr]->pWaitObj = Wp; ++ if (hSyncEvents[curr]->state == so_signalled) { ++ GT_0trace(SYNC_debugMask, GT_6CLASS, ++ "Detected signaled Event !!!\n"); ++ if (test_and_set(&(Wp->state), wo_signalled) == ++ wo_waiting) { ++ GT_0trace(SYNC_debugMask, GT_6CLASS, ++ "Setting Signal Event!!!\n"); ++ hSyncEvents[curr]->state = so_reset; ++ Wp->signalling_event = hSyncEvents[curr]; ++ } ++ curr++; /* Will try optimizing later */ ++ break; ++ } ++ } ++ ++ curr--; /* Will try optimizing later */ ++ if (Wp->state != wo_signalled && dwTimeout > 0) { ++ struct timer_list timeout; ++ if (dwTimeout != SYNC_INFINITE) { ++ init_timer(&timeout); ++ timeout.function = timeout_callback; ++ timeout.data = (unsigned long)Wp; ++ timeout.expires = jiffies + dwTimeout * HZ / 1000; ++ add_timer(&timeout); ++ } ++ if (down_interruptible(&(Wp->sem))) { ++ GT_0trace(SYNC_debugMask, GT_7CLASS, "SYNC: " ++ "WaitOnMultipleEvents Interrupted by signal\n"); ++ /* ++ * Most probably we are interrupted by a fake signal ++ * from freezer. Return -ERESTARTSYS so that this ++ * ioctl is restarted, and user space doesn't notice ++ * it. ++ */ ++ status = -ERESTARTSYS; ++ } ++ if (dwTimeout != SYNC_INFINITE) { ++ if (in_interrupt()) { ++ if (!del_timer(&timeout)) { ++ GT_0trace(SYNC_debugMask, GT_7CLASS, ++ "SYNC: Timer expired\n"); ++ } ++ } else { ++ if (!del_timer_sync(&timeout)) { ++ GT_0trace(SYNC_debugMask, GT_7CLASS, ++ "SYNC: Timer expired\n"); ++ } ++ } ++ } ++ } ++ for (i = 0; i <= curr; i++) { ++ if (MEM_IsValidHandle(hSyncEvents[i], SIGNATURE)) { ++ /* Memory corruption here if hSyncEvents[i] is ++ * freed before following statememt. */ ++ hSyncEvents[i]->pWaitObj = NULL; ++ } ++ if (hSyncEvents[i] == Wp->signalling_event) ++ *puIndex = i; ++ ++ } ++ if (Wp->signalling_event == NULL && DSP_SUCCEEDED(status)) { ++ GT_0trace(SYNC_debugMask, GT_7CLASS, ++ "SYNC:Signaling Event NULL!!!(:-\n"); ++ status = DSP_ETIMEOUT; ++ } ++ if (Wp) ++ MEM_Free(Wp); ++ return status; ++} ++ ++static void timeout_callback(unsigned long hWaitObj) ++{ ++ struct WAIT_OBJECT *pWaitObj = (struct WAIT_OBJECT *)hWaitObj; ++ if (test_and_set(&pWaitObj->state, wo_signalled) == wo_waiting) ++ up(&pWaitObj->sem); ++ ++} ++ ++/* ++ * ======== SYNC_DeleteCS ======== ++ */ ++DSP_STATUS SYNC_DeleteCS(struct SYNC_CSOBJECT *hCSObj) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_CSOBJECT *pCSObj = (struct SYNC_CSOBJECT *)hCSObj; ++ ++ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_DeleteCS\n"); ++ ++ if (MEM_IsValidHandle(hCSObj, SIGNATURECS)) { ++ if (down_trylock(&pCSObj->sem) != 0) { ++ GT_1trace(SYNC_debugMask, GT_7CLASS, ++ "CS in use (locked) while " ++ "deleting! pCSObj=0x%X", pCSObj); ++ DBC_Assert(0); ++ } ++ MEM_FreeObject(hCSObj); ++ } else if (MEM_IsValidHandle(hCSObj, SIGNATUREDPCCS)) { ++ struct SYNC_DPCCSOBJECT *pDPCCSObj = ++ (struct SYNC_DPCCSOBJECT *)hCSObj; ++ if (pDPCCSObj->count != 1) { ++ GT_1trace(SYNC_debugMask, GT_7CLASS, ++ "DPC CS in use (locked) while " ++ "deleting! pCSObj=0x%X", pCSObj); ++ DBC_Assert(0); ++ } ++ MEM_FreeObject(pDPCCSObj); ++ } else { ++ status = DSP_EHANDLE; ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_DeleteCS: invalid hCSObj " ++ "handle 0x%x\n", hCSObj); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== SYNC_EnterCS ======== ++ */ ++DSP_STATUS SYNC_EnterCS(struct SYNC_CSOBJECT *hCSObj) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_CSOBJECT *pCSObj = (struct SYNC_CSOBJECT *)hCSObj; ++ ++ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_EnterCS: hCSObj %p\n", ++ hCSObj); ++ if (MEM_IsValidHandle(hCSObj, SIGNATURECS)) { ++ if (in_interrupt()) { ++ status = DSP_EFAIL; ++ GT_0trace(SYNC_debugMask, GT_7CLASS, ++ "SYNC_EnterCS called from " ++ "ISR/DPC or with ISR/DPC disabled!"); ++ DBC_Assert(0); ++ } else if (down_interruptible(&pCSObj->sem)) { ++ GT_1trace(SYNC_debugMask, GT_7CLASS, ++ "CS interrupted by signal! " ++ "pCSObj=0x%X", pCSObj); ++ status = DSP_EFAIL; ++ } ++ } else if (MEM_IsValidHandle(hCSObj, SIGNATUREDPCCS)) { ++ struct SYNC_DPCCSOBJECT *pDPCCSObj = ++ (struct SYNC_DPCCSOBJECT *)hCSObj; ++ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_EnterCS DPC\n"); ++ spin_lock_bh(&pDPCCSObj->sync_dpccs_lock); ++ pDPCCSObj->count--; ++ if (pDPCCSObj->count != 0) { ++ /* FATAL ERROR : Failed to acquire DPC CS */ ++ GT_2trace(SYNC_debugMask, GT_7CLASS, ++ "SYNC_EnterCS DPCCS %x locked," ++ "count %d", pDPCCSObj, pDPCCSObj->count); ++ spin_unlock_bh(&pDPCCSObj->sync_dpccs_lock); ++ DBC_Assert(0); ++ } ++ } else { ++ status = DSP_EHANDLE; ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_EnterCS: invalid hCSObj " ++ "handle 0x%x\n", hCSObj); ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== SYNC_InitializeCS ======== ++ */ ++DSP_STATUS SYNC_InitializeCS(OUT struct SYNC_CSOBJECT **phCSObj) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_CSOBJECT *pCSObj = NULL; ++ ++ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_InitializeCS\n"); ++ ++ /* Allocate memory for sync CS object */ ++ MEM_AllocObject(pCSObj, struct SYNC_CSOBJECT, SIGNATURECS); ++ if (pCSObj != NULL) { ++ init_MUTEX(&pCSObj->sem); ++ } else { ++ status = DSP_EMEMORY; ++ GT_0trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_InitializeCS: MEM_AllocObject" ++ "failed\n"); ++ } ++ /* return CS object */ ++ *phCSObj = pCSObj; ++ DBC_Assert(DSP_FAILED(status) || (pCSObj)); ++ return status; ++} ++ ++DSP_STATUS SYNC_InitializeDPCCS(OUT struct SYNC_CSOBJECT **phCSObj) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_DPCCSOBJECT *pCSObj = NULL; ++ ++ DBC_Require(phCSObj); ++ ++ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_InitializeDPCCS\n"); ++ ++ if (phCSObj) { ++ /* Allocate memory for sync CS object */ ++ MEM_AllocObject(pCSObj, struct SYNC_DPCCSOBJECT, ++ SIGNATUREDPCCS); ++ if (pCSObj != NULL) { ++ pCSObj->count = 1; ++ spin_lock_init(&pCSObj->sync_dpccs_lock); ++ } else { ++ status = DSP_EMEMORY; ++ GT_0trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_InitializeDPCCS: " ++ "MEM_AllocObject failed\n"); ++ } ++ ++ /* return CS object */ ++ *phCSObj = (struct SYNC_CSOBJECT *)pCSObj; ++ } else { ++ status = DSP_EPOINTER; ++ } ++ ++ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_InitializeDPCCS " ++ "pCSObj %p\n", pCSObj); ++ DBC_Assert(DSP_FAILED(status) || (pCSObj)); ++ ++ return status; ++} ++ ++/* ++ * ======== SYNC_LeaveCS ======== ++ */ ++DSP_STATUS SYNC_LeaveCS(struct SYNC_CSOBJECT *hCSObj) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct SYNC_CSOBJECT *pCSObj = (struct SYNC_CSOBJECT *)hCSObj; ++ ++ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_LeaveCS: hCSObj %p\n", ++ hCSObj); ++ ++ if (MEM_IsValidHandle(hCSObj, SIGNATURECS)) { ++ up(&pCSObj->sem); ++ } else if (MEM_IsValidHandle(hCSObj, SIGNATUREDPCCS)) { ++ struct SYNC_DPCCSOBJECT *pDPCCSObj = ++ (struct SYNC_DPCCSOBJECT *)hCSObj; ++ pDPCCSObj->count++; ++ if (pDPCCSObj->count != 1) { ++ /* FATAL ERROR : Invalid DPC CS count */ ++ GT_2trace(SYNC_debugMask, GT_7CLASS, ++ "SYNC_LeaveCS DPCCS %x, " ++ "Invalid count %d", pDPCCSObj, ++ pDPCCSObj->count); ++ spin_unlock_bh(&pDPCCSObj->sync_dpccs_lock); ++ DBC_Assert(0); ++ spin_lock_bh(&pDPCCSObj->sync_dpccs_lock); ++ } ++ spin_unlock_bh(&pDPCCSObj->sync_dpccs_lock); ++ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_LeaveCS DPC\n"); ++ } else { ++ status = DSP_EHANDLE; ++ GT_1trace(SYNC_debugMask, GT_6CLASS, ++ "SYNC_LeaveCS: invalid hCSObj " ++ "handle 0x%x\n", hCSObj); ++ } ++ ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_cmm.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_cmm.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_cmm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_cmm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,59 @@ ++/* ++ * _cmm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _cmm.h ======== ++ * Description: ++ * Private header file defining CMM manager objects and defines needed ++ * by IO manager to register shared memory regions when DSP base image ++ * is loaded(WMD_IO_OnLoaded). ++ * ++ * Public Functions: ++ * None. ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 24-Aug-2001 ag Created. ++ */ ++ ++#ifndef _CMM_ ++#define _CMM_ ++ ++/* ++ * These target side symbols define the beginning and ending addresses ++ * of the section of shared memory used for shared memory manager CMM. ++ * They are defined in the *cfg.cmd file by cdb code. ++ */ ++#define SHM0_SHARED_BASE_SYM "_SHM0_BEG" ++#define SHM0_SHARED_END_SYM "_SHM0_END" ++#define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT" ++ ++/* ++ * Shared Memory Region #0(SHMSEG0) is used in the following way: ++ * ++ * |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END) ++ * V V V ++ * ------------------------------------------------------------ ++ * | DSP-side allocations | GPP-side allocations | ++ * ------------------------------------------------------------ ++ * ++ * ++ */ ++ ++#endif /* _CMM_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_deh.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_deh.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_deh.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_deh.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,46 @@ ++/* ++ * _deh.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _deh.h ======== ++ * Description: ++ * Private header for DEH module. ++ * ++ *! Revision History: ++ *! ================ ++ *! 21-Sep-2001 kc: created. ++ */ ++ ++#ifndef _DEH_ ++#define _DEH_ ++ ++#include ++#include ++#include ++ ++#define SIGNATURE 0x5f484544 /* "DEH_" backwards */ ++ ++/* DEH Manager: only one created per board: */ ++struct DEH_MGR { ++ u32 dwSignature; /* Used for object validation. */ ++ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD device context. */ ++ struct NTFY_OBJECT *hNtfy; /* NTFY object */ ++ struct DPC_OBJECT *hMmuFaultDpc; /* DPC object handle. */ ++ struct DSP_ERRORINFO errInfo; /* DSP exception info. */ ++} ; ++ ++#endif /* _DEH_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/chnl_sm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/chnl_sm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/chnl_sm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/chnl_sm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1100 @@ ++/* ++ * chnl_sm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== chnl_sm.c ======== ++ * Description: ++ * Implements upper edge functions for WMD channel module. ++ * ++ * Public Functions: ++ * WMD_CHNL_AddIOReq ++ * WMD_CHNL_CancelIO ++ * WMD_CHNL_Close ++ * WMD_CHNL_Create ++ * WMD_CHNL_Destroy ++ * WMD_CHNL_FlushIO ++ * WMD_CHNL_GetInfo ++ * WMD_CHNL_GetIOC ++ * WMD_CHNL_GetMgrInfo ++ * WMD_CHNL_Idle ++ * WMD_CHNL_Open ++ * ++ * Notes: ++ * The lower edge functions must be implemented by the WMD writer, and ++ * are declared in chnl_sm.h. ++ * ++ * Care is taken in this code to prevent simulataneous access to channel ++ * queues from ++ * 1. Threads. ++ * 2. IO_DPC(), scheduled from the IO_ISR() as an event. ++ * ++ * This is done primarily by: ++ * - Semaphores. ++ * - state flags in the channel object; and ++ * - ensuring the IO_Dispatch() routine, which is called from both ++ * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered. ++ * ++ * Channel Invariant: ++ * There is an important invariant condition which must be maintained per ++ * channel outside of WMD_CHNL_GetIOC() and IO_Dispatch(), violation of ++ * which may cause timeouts and/or failure offunction SYNC_WaitOnEvent. ++ * This invariant condition is: ++ * ++ * LST_Empty(pChnl->pIOCompletions) ==> pChnl->hSyncEvent is reset ++ * and ++ * !LST_Empty(pChnl->pIOCompletions) ==> pChnl->hSyncEvent is set. ++ * ++ *! Revision History: ++ *! ================ ++ *! 10-Feb-2004 sb: Consolidated the MAILBOX_IRQ macro at the top of the file. ++ *! 05-Jan-2004 vp: Updated for 2.6 kernel on 24xx platform. ++ *! 23-Apr-2003 sb: Fixed mailbox deadlock ++ *! 24-Feb-2003 vp: Code Review Updates. ++ *! 18-Oct-2002 vp: Ported to Linux platform ++ *! 29-Aug-2002 rr Changed the SYNC error code return to DSP error code return ++ * in WMD_CHNL_GetIOC. ++ *! 22-Jan-2002 ag Zero-copy support added. ++ *! CMM_CallocBuf() used for SM allocations. ++ *! 04-Feb-2001 ag DSP-DMA support added. ++ *! 22-Nov-2000 kc: Updated usage of PERF_RegisterStat. ++ *! 06-Nov-2000 jeh Move ISR_Install, DPC_Create from CHNL_Create to IO_Create. ++ *! 13-Oct-2000 jeh Added dwArg parameter to WMD_CHNL_AddIOReq(), added ++ *! WMD_CHNL_Idle and WMD_CHNL_RegisterNotify for DSPStream. ++ *! Remove #ifdef DEBUG from around channel cIOCs field. ++ *! 21-Sep-2000 rr: PreOMAP chnl class library acts like a IO class library. ++ *! 25-Sep-2000 ag: MEM_[Unmap]LinearAddress added for #ifdef CHNL_PREOMAP. ++ *! 07-Sep-2000 rr: Added new channel class for PreOMAP. ++ *! 11-Jul-2000 jeh Allow NULL user event in WMD_CHNL_Open(). ++ *! 06-Jul-2000 rr: Changed prefix PROC to PRCS for process module calls. ++ *! 20-Jan-2000 ag: Incorporated code review comments. ++ *! 05-Jan-2000 ag: Text format cleanup. ++ *! 07-Dec-1999 ag: Now setting ChnlMgr fSharedIRQ flag before ISR_Install(). ++ *! 01-Dec-1999 ag: WMD_CHNL_Open() now accepts named sync event. ++ *! 14-Nov-1999 ag: DPC_Schedule() uncommented. ++ *! 28-Oct-1999 ag: CHNL Attrs userEvent not supported. ++ *! SM addrs taken from COFF(IO) or host resource(SM). ++ *! 25-May-1999 jg: CHNL_IOCLASS boards now get their shared memory buffer ++ *! address and length from symbols defined in the currently ++ *! loaded COFF file. See _chn_sm.h. ++ *! 18-Jun-1997 gp: Moved waiting back to ring 0 to improve performance. ++ *! 22-Jan-1998 gp: Update User's pIOC struct in GetIOC at lower IRQL (NT). ++ *! 16-Jan-1998 gp: Commented out PERF stuff, since it is not all there in NT. ++ *! 13-Jan-1998 gp: Protect IOCTLs from IO_DPC by raising IRQL to DIRQL (NT). ++ *! 22-Oct-1997 gp: Call SYNC_OpenEvent in CHNL_Open, for NT support. ++ *! 18-Jun-1997 gp: Moved waiting back to ring 0 to improve performance. ++ *! 16-Jun-1997 gp: Added call into lower edge CHNL function to allow override ++ *! of the SHM window length reported by Windows CM. ++ *! 05-Jun-1997 gp: Removed unnecessary critical sections. ++ *! 18-Mar-1997 gp: Ensured CHNL_FlushIO on input leaves channel in READY state. ++ *! 06-Jan-1997 gp: ifdefed to support the IO variant of SHM channel class lib. ++ *! 21-Jan-1997 gp: CHNL_Close: set pChnl = NULL for DBC_Ensure(). ++ *! 14-Jan-1997 gp: Updated based on code review feedback. ++ *! 03-Jan-1997 gp: Added CHNL_E_WAITTIMEOUT error return code to CHNL_FlushIO() ++ *! 23-Oct-1996 gp: Tag channel with ring 0 process handle. ++ *! 13-Sep-1996 gp: Added performance statistics for channel. ++ *! 09-Sep-1996 gp: Added WMD_CHNL_GetMgrInfo(). ++ *! 04-Sep-1996 gp: Removed shared memory control struct offset: made zero. ++ *! 01-Aug-1996 gp: Implemented basic channel manager and channel create/delete. ++ *! 17-Jul-1996 gp: Started pseudo coding. ++ *! 11-Jul-1996 gp: Stubbed out. ++ */ ++ ++/* ----------------------------------- OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Mini-Driver */ ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- Define for This */ ++#define USERMODE_ADDR PAGE_OFFSET ++ ++#define MAILBOX_IRQ INT_MAIL_MPU_IRQ ++ ++/* ----------------------------------- Function Prototypes */ ++static struct LST_LIST *CreateChirpList(u32 uChirps); ++ ++static void FreeChirpList(struct LST_LIST *pList); ++ ++static struct CHNL_IRP *MakeNewChirp(void); ++ ++static DSP_STATUS SearchFreeChannel(struct CHNL_MGR *pChnlMgr, ++ OUT u32 *pdwChnl); ++ ++/* ++ * ======== WMD_CHNL_AddIOReq ======== ++ * Enqueue an I/O request for data transfer on a channel to the DSP. ++ * The direction (mode) is specified in the channel object. Note the DSP ++ * address is specified for channels opened in direct I/O mode. ++ */ ++DSP_STATUS WMD_CHNL_AddIOReq(struct CHNL_OBJECT *hChnl, void *pHostBuf, ++ u32 cBytes, u32 cBufSize, ++ OPTIONAL u32 dwDspAddr, u32 dwArg) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; ++ struct CHNL_IRP *pChirp = NULL; ++ u32 dwState; ++ bool fIsEOS; ++ struct CHNL_MGR *pChnlMgr = pChnl->pChnlMgr; ++ u8 *pHostSysBuf = NULL; ++ bool fSchedDPC = false; ++ u16 wMbVal = 0; ++ ++ DBG_Trace(DBG_ENTER, ++ "> WMD_CHNL_AddIOReq pChnl %p CHNL_IsOutput %x uChnlType " ++ "%x Id %d\n", pChnl, CHNL_IsOutput(pChnl->uMode), ++ pChnl->uChnlType, pChnl->uId); ++ ++ fIsEOS = (cBytes == 0) ? true : false; ++ ++ if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1 && pHostBuf) { ++ if (!(pHostBuf < (void *)USERMODE_ADDR)) { ++ pHostSysBuf = pHostBuf; ++ goto func_cont; ++ } ++ /* if addr in user mode, then copy to kernel space */ ++ pHostSysBuf = MEM_Alloc(cBufSize, MEM_NONPAGED); ++ if (pHostSysBuf == NULL) { ++ status = DSP_EMEMORY; ++ DBG_Trace(DBG_LEVEL7, ++ "No memory to allocate kernel buffer\n"); ++ goto func_cont; ++ } ++ if (CHNL_IsOutput(pChnl->uMode)) { ++ status = copy_from_user(pHostSysBuf, pHostBuf, ++ cBufSize); ++ if (status) { ++ DBG_Trace(DBG_LEVEL7, ++ "Error copying user buffer to " ++ "kernel, %d bytes remaining.\n", ++ status); ++ MEM_Free(pHostSysBuf); ++ pHostSysBuf = NULL; ++ status = DSP_EPOINTER; ++ } ++ } ++ } ++func_cont: ++ /* Validate args: */ ++ if (pHostBuf == NULL) { ++ status = DSP_EPOINTER; ++ } else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else if (fIsEOS && CHNL_IsInput(pChnl->uMode)) { ++ status = CHNL_E_NOEOS; ++ } else { ++ /* Check the channel state: only queue chirp if channel state ++ * allows */ ++ dwState = pChnl->dwState; ++ if (dwState != CHNL_STATEREADY) { ++ if (dwState & CHNL_STATECANCEL) { ++ status = CHNL_E_CANCELLED; ++ } else if ((dwState & CHNL_STATEEOS) ++ && CHNL_IsOutput(pChnl->uMode)) { ++ status = CHNL_E_EOS; ++ } else { ++ /* No other possible states left: */ ++ DBC_Assert(0); ++ } ++ } ++ } ++ /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY ++ * channels. DPCCS is held to avoid race conditions with PCPY channels. ++ * If DPC is scheduled in process context (IO_Schedule) and any ++ * non-mailbox interrupt occurs, that DPC will run and break CS. Hence ++ * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */ ++ SYNC_EnterCS(pChnlMgr->hCSObj); ++ disable_irq(MAILBOX_IRQ); ++ if (pChnl->uChnlType == CHNL_PCPY) { ++ /* This is a processor-copy channel. */ ++ if (DSP_SUCCEEDED(status) && CHNL_IsOutput(pChnl->uMode)) { ++ /* Check buffer size on output channels for fit. */ ++ if (cBytes > IO_BufSize(pChnl->pChnlMgr->hIOMgr)) ++ status = CHNL_E_BUFSIZE; ++ ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Get a free chirp: */ ++ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pFreeList); ++ if (pChirp == NULL) ++ status = CHNL_E_NOIORPS; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Enqueue the chirp on the chnl's IORequest queue: */ ++ pChirp->pHostUserBuf = pChirp->pHostSysBuf = pHostBuf; ++ if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1) ++ pChirp->pHostSysBuf = pHostSysBuf; ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Note: for dma chans dwDspAddr contains dsp address ++ * of SM buffer.*/ ++ DBC_Assert(pChnlMgr->uWordSize != 0); ++ /* DSP address */ ++ pChirp->uDspAddr = dwDspAddr / pChnlMgr->uWordSize; ++ pChirp->cBytes = cBytes; ++ pChirp->cBufSize = cBufSize; ++ /* Only valid for output channel */ ++ pChirp->dwArg = dwArg; ++ pChirp->status = (fIsEOS ? CHNL_IOCSTATEOS : ++ CHNL_IOCSTATCOMPLETE); ++ LST_PutTail(pChnl->pIORequests, (struct LST_ELEM *) ++ pChirp); ++ pChnl->cIOReqs++; ++ DBC_Assert(pChnl->cIOReqs <= pChnl->cChirps); ++ /* If end of stream, update the channel state to prevent ++ * more IOR's: */ ++ if (fIsEOS) ++ pChnl->dwState |= CHNL_STATEEOS; ++ ++ { ++ /* Legacy DSM Processor-Copy */ ++ DBC_Assert(pChnl->uChnlType == CHNL_PCPY); ++ /* Request IO from the DSP */ ++ IO_RequestChnl(pChnlMgr->hIOMgr, pChnl, ++ (CHNL_IsInput(pChnl->uMode) ? ++ IO_INPUT : IO_OUTPUT), &wMbVal); ++ fSchedDPC = true; ++ } ++ } ++ } ++ enable_irq(MAILBOX_IRQ); ++ SYNC_LeaveCS(pChnlMgr->hCSObj); ++ if (wMbVal != 0) ++ IO_IntrDSP2(pChnlMgr->hIOMgr, wMbVal); ++ ++ if (fSchedDPC == true) { ++ /* Schedule a DPC, to do the actual data transfer: */ ++ IO_Schedule(pChnlMgr->hIOMgr); ++ } ++ DBG_Trace(DBG_ENTER, "< WMD_CHNL_AddIOReq pChnl %p\n", pChnl); ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_CancelIO ======== ++ * Return all I/O requests to the client which have not yet been ++ * transferred. The channel's I/O completion object is ++ * signalled, and all the I/O requests are queued as IOC's, with the ++ * status field set to CHNL_IOCSTATCANCEL. ++ * This call is typically used in abort situations, and is a prelude to ++ * CHNL_Close(); ++ */ ++DSP_STATUS WMD_CHNL_CancelIO(struct CHNL_OBJECT *hChnl) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; ++ u32 iChnl = -1; ++ CHNL_MODE uMode; ++ struct CHNL_IRP *pChirp; ++ struct CHNL_MGR *pChnlMgr = NULL; ++ ++ /* Check args: */ ++ if (MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { ++ iChnl = pChnl->uId; ++ uMode = pChnl->uMode; ++ pChnlMgr = pChnl->pChnlMgr; ++ } else { ++ status = DSP_EHANDLE; ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ /* Mark this channel as cancelled, to prevent further IORequests or ++ * IORequests or dispatching. */ ++ SYNC_EnterCS(pChnlMgr->hCSObj); ++ pChnl->dwState |= CHNL_STATECANCEL; ++ if (LST_IsEmpty(pChnl->pIORequests)) ++ goto func_cont; ++ ++ if (pChnl->uChnlType == CHNL_PCPY) { ++ /* Indicate we have no more buffers available for transfer: */ ++ if (CHNL_IsInput(pChnl->uMode)) { ++ IO_CancelChnl(pChnlMgr->hIOMgr, iChnl); ++ } else { ++ /* Record that we no longer have output buffers ++ * available: */ ++ pChnlMgr->dwOutputMask &= ~(1 << iChnl); ++ } ++ } ++ /* Move all IOR's to IOC queue: */ ++ while (!LST_IsEmpty(pChnl->pIORequests)) { ++ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIORequests); ++ if (pChirp) { ++ pChirp->cBytes = 0; ++ pChirp->status |= CHNL_IOCSTATCANCEL; ++ LST_PutTail(pChnl->pIOCompletions, ++ (struct LST_ELEM *)pChirp); ++ pChnl->cIOCs++; ++ pChnl->cIOReqs--; ++ DBC_Assert(pChnl->cIOReqs >= 0); ++ } ++ } ++func_cont: ++ SYNC_LeaveCS(pChnlMgr->hCSObj); ++func_end: ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_Close ======== ++ * Purpose: ++ * Ensures all pending I/O on this channel is cancelled, discards all ++ * queued I/O completion notifications, then frees the resources allocated ++ * for this channel, and makes the corresponding logical channel id ++ * available for subsequent use. ++ */ ++DSP_STATUS WMD_CHNL_Close(struct CHNL_OBJECT *hChnl) ++{ ++ DSP_STATUS status; ++ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; ++ ++ /* Check args: */ ++ if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ goto func_cont; ++ } ++ { ++ /* Cancel IO: this ensures no further IO requests or ++ * notifications.*/ ++ status = WMD_CHNL_CancelIO(hChnl); ++ } ++func_cont: ++ if (DSP_SUCCEEDED(status)) { ++ /* Assert I/O on this channel is now cancelled: Protects ++ * from IO_DPC. */ ++ DBC_Assert((pChnl->dwState & CHNL_STATECANCEL)); ++ /* Invalidate channel object: Protects from ++ * CHNL_GetIOCompletion(). */ ++ pChnl->dwSignature = 0x0000; ++ /* Free the slot in the channel manager: */ ++ pChnl->pChnlMgr->apChannel[pChnl->uId] = NULL; ++ pChnl->pChnlMgr->cOpenChannels -= 1; ++ if (pChnl->hNtfy) { ++ NTFY_Delete(pChnl->hNtfy); ++ pChnl->hNtfy = NULL; ++ } ++ /* Reset channel event: (NOTE: hUserEvent freed in user ++ * context.). */ ++ if (pChnl->hSyncEvent) { ++ SYNC_ResetEvent(pChnl->hSyncEvent); ++ SYNC_CloseEvent(pChnl->hSyncEvent); ++ pChnl->hSyncEvent = NULL; ++ } ++ /* Free I/O request and I/O completion queues: */ ++ if (pChnl->pIOCompletions) { ++ FreeChirpList(pChnl->pIOCompletions); ++ pChnl->pIOCompletions = NULL; ++ pChnl->cIOCs = 0; ++ } ++ if (pChnl->pIORequests) { ++ FreeChirpList(pChnl->pIORequests); ++ pChnl->pIORequests = NULL; ++ pChnl->cIOReqs = 0; ++ } ++ if (pChnl->pFreeList) { ++ FreeChirpList(pChnl->pFreeList); ++ pChnl->pFreeList = NULL; ++ } ++ /* Release channel object. */ ++ MEM_FreeObject(pChnl); ++ pChnl = NULL; ++ } ++ DBC_Ensure(DSP_FAILED(status) || ++ !MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)); ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_Create ======== ++ * Create a channel manager object, responsible for opening new channels ++ * and closing old ones for a given board. ++ */ ++DSP_STATUS WMD_CHNL_Create(OUT struct CHNL_MGR **phChnlMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CHNL_MGRATTRS *pMgrAttrs) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_MGR *pChnlMgr = NULL; ++ s32 cChannels; ++#ifdef DEBUG ++ struct CHNL_MGR *hChnlMgr; ++#endif ++ /* Check DBC requirements: */ ++ DBC_Require(phChnlMgr != NULL); ++ DBC_Require(pMgrAttrs != NULL); ++ DBC_Require(pMgrAttrs->cChannels > 0); ++ DBC_Require(pMgrAttrs->cChannels <= CHNL_MAXCHANNELS); ++ DBC_Require(pMgrAttrs->uWordSize != 0); ++#ifdef DEBUG ++ /* This for the purposes of DBC_Require: */ ++ status = DEV_GetChnlMgr(hDevObject, &hChnlMgr); ++ DBC_Require(status != DSP_EHANDLE); ++ DBC_Require(hChnlMgr == NULL); ++#endif ++ if (DSP_SUCCEEDED(status)) { ++ /* Allocate channel manager object: */ ++ MEM_AllocObject(pChnlMgr, struct CHNL_MGR, CHNL_MGRSIGNATURE); ++ if (pChnlMgr) { ++ /* The cChannels attr must equal the # of supported ++ * chnls for each transport(# chnls for PCPY = DDMA = ++ * ZCPY): i.e. pMgrAttrs->cChannels = CHNL_MAXCHANNELS = ++ * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS. */ ++ DBC_Assert(pMgrAttrs->cChannels == CHNL_MAXCHANNELS); ++ cChannels = (CHNL_MAXCHANNELS + (CHNL_MAXCHANNELS * ++ CHNL_PCPY)); ++ /* Create array of channels: */ ++ pChnlMgr->apChannel = MEM_Calloc( ++ sizeof(struct CHNL_OBJECT *) * ++ cChannels, MEM_NONPAGED); ++ if (pChnlMgr->apChannel) { ++ /* Initialize CHNL_MGR object: */ ++ /* Shared memory driver. */ ++ pChnlMgr->dwType = CHNL_TYPESM; ++ pChnlMgr->uWordSize = pMgrAttrs->uWordSize; ++ /* total # chnls supported */ ++ pChnlMgr->cChannels = cChannels; ++ pChnlMgr->cOpenChannels = 0; ++ pChnlMgr->dwOutputMask = 0; ++ pChnlMgr->dwLastOutput = 0; ++ pChnlMgr->hDevObject = hDevObject; ++ if (DSP_SUCCEEDED(status)) { ++ status = SYNC_InitializeDPCCS ++ (&pChnlMgr->hCSObj); ++ } ++ } else { ++ status = DSP_EMEMORY; ++ } ++ } else { ++ status = DSP_EMEMORY; ++ } ++ } ++ if (DSP_FAILED(status)) { ++ WMD_CHNL_Destroy(pChnlMgr); ++ *phChnlMgr = NULL; ++ } else { ++ /* Return channel manager object to caller... */ ++ *phChnlMgr = pChnlMgr; ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_Destroy ======== ++ * Purpose: ++ * Close all open channels, and destroy the channel manager. ++ */ ++DSP_STATUS WMD_CHNL_Destroy(struct CHNL_MGR *hChnlMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_MGR *pChnlMgr = hChnlMgr; ++ u32 iChnl; ++ ++ if (MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) { ++ /* Close all open channels: */ ++ for (iChnl = 0; iChnl < pChnlMgr->cChannels; iChnl++) { ++ if (DSP_SUCCEEDED ++ (WMD_CHNL_Close(pChnlMgr->apChannel[iChnl]))) { ++ DBC_Assert(pChnlMgr->apChannel[iChnl] == NULL); ++ } ++ } ++ /* release critical section */ ++ if (pChnlMgr->hCSObj) ++ SYNC_DeleteCS(pChnlMgr->hCSObj); ++ ++ /* Free channel manager object: */ ++ if (pChnlMgr->apChannel) ++ MEM_Free(pChnlMgr->apChannel); ++ ++ /* Set hChnlMgr to NULL in device object. */ ++ DEV_SetChnlMgr(pChnlMgr->hDevObject, NULL); ++ /* Free this Chnl Mgr object: */ ++ MEM_FreeObject(hChnlMgr); ++ } else { ++ status = DSP_EHANDLE; ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_FlushIO ======== ++ * purpose: ++ * Flushes all the outstanding data requests on a channel. ++ */ ++DSP_STATUS WMD_CHNL_FlushIO(struct CHNL_OBJECT *hChnl, u32 dwTimeOut) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; ++ CHNL_MODE uMode = -1; ++ struct CHNL_MGR *pChnlMgr; ++ struct CHNL_IOC chnlIOC; ++ /* Check args: */ ++ if (MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { ++ if ((dwTimeOut == CHNL_IOCNOWAIT) ++ && CHNL_IsOutput(pChnl->uMode)) { ++ status = DSP_EINVALIDARG; ++ } else { ++ uMode = pChnl->uMode; ++ pChnlMgr = pChnl->pChnlMgr; ++ } ++ } else { ++ status = DSP_EHANDLE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Note: Currently, if another thread continues to add IO ++ * requests to this channel, this function will continue to ++ * flush all such queued IO requests. */ ++ if (CHNL_IsOutput(uMode) && (pChnl->uChnlType == CHNL_PCPY)) { ++ /* Wait for IO completions, up to the specified ++ * timeout: */ ++ while (!LST_IsEmpty(pChnl->pIORequests) && ++ DSP_SUCCEEDED(status)) { ++ status = WMD_CHNL_GetIOC(hChnl, dwTimeOut, ++ &chnlIOC); ++ if (DSP_FAILED(status)) ++ continue; ++ ++ if (chnlIOC.status & CHNL_IOCSTATTIMEOUT) ++ status = CHNL_E_WAITTIMEOUT; ++ ++ } ++ } else { ++ status = WMD_CHNL_CancelIO(hChnl); ++ /* Now, leave the channel in the ready state: */ ++ pChnl->dwState &= ~CHNL_STATECANCEL; ++ } ++ } ++ DBC_Ensure(DSP_FAILED(status) || LST_IsEmpty(pChnl->pIORequests)); ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_GetInfo ======== ++ * Purpose: ++ * Retrieve information related to a channel. ++ */ ++DSP_STATUS WMD_CHNL_GetInfo(struct CHNL_OBJECT *hChnl, ++ OUT struct CHNL_INFO *pInfo) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; ++ if (pInfo != NULL) { ++ if (MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { ++ /* Return the requested information: */ ++ pInfo->hChnlMgr = pChnl->pChnlMgr; ++ pInfo->hEvent = pChnl->hUserEvent; ++ pInfo->dwID = pChnl->uId; ++ pInfo->dwMode = pChnl->uMode; ++ pInfo->cPosition = pChnl->cBytesMoved; ++ pInfo->hProcess = pChnl->hProcess; ++ pInfo->hSyncEvent = pChnl->hSyncEvent; ++ pInfo->cIOCs = pChnl->cIOCs; ++ pInfo->cIOReqs = pChnl->cIOReqs; ++ pInfo->dwState = pChnl->dwState; ++ } else { ++ status = DSP_EHANDLE; ++ } ++ } else { ++ status = DSP_EPOINTER; ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_GetIOC ======== ++ * Optionally wait for I/O completion on a channel. Dequeue an I/O ++ * completion record, which contains information about the completed ++ * I/O request. ++ * Note: Ensures Channel Invariant (see notes above). ++ */ ++DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, u32 dwTimeOut, ++ OUT struct CHNL_IOC *pIOC) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; ++ struct CHNL_IRP *pChirp; ++ DSP_STATUS statSync; ++ bool fDequeueIOC = true; ++ struct CHNL_IOC ioc = { NULL, 0, 0, 0, 0 }; ++ u8 *pHostSysBuf = NULL; ++ ++ DBG_Trace(DBG_ENTER, "> WMD_CHNL_GetIOC pChnl %p CHNL_IsOutput %x " ++ "uChnlType %x\n", pChnl, CHNL_IsOutput(pChnl->uMode), ++ pChnl->uChnlType); ++ /* Check args: */ ++ if (pIOC == NULL) { ++ status = DSP_EPOINTER; ++ } else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else if (dwTimeOut == CHNL_IOCNOWAIT) { ++ if (LST_IsEmpty(pChnl->pIOCompletions)) ++ status = CHNL_E_NOIOC; ++ ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ ioc.status = CHNL_IOCSTATCOMPLETE; ++ if (dwTimeOut != CHNL_IOCNOWAIT && LST_IsEmpty(pChnl->pIOCompletions)) { ++ if (dwTimeOut == CHNL_IOCINFINITE) ++ dwTimeOut = SYNC_INFINITE; ++ ++ statSync = SYNC_WaitOnEvent(pChnl->hSyncEvent, dwTimeOut); ++ if (statSync == DSP_ETIMEOUT) { ++ /* No response from DSP */ ++ ioc.status |= CHNL_IOCSTATTIMEOUT; ++ fDequeueIOC = false; ++ } else if (statSync == DSP_EFAIL) { ++ /* This can occur when the user mode thread is ++ * aborted (^C), or when _VWIN32_WaitSingleObject() ++ * fails due to unkown causes. */ ++ /* Even though Wait failed, there may be something in ++ * the Q: */ ++ if (LST_IsEmpty(pChnl->pIOCompletions)) { ++ ioc.status |= CHNL_IOCSTATCANCEL; ++ fDequeueIOC = false; ++ } ++ } ++ } ++ /* See comment in AddIOReq */ ++ SYNC_EnterCS(pChnl->pChnlMgr->hCSObj); ++ disable_irq(MAILBOX_IRQ); ++ if (fDequeueIOC) { ++ /* Dequeue IOC and set pIOC; */ ++ DBC_Assert(!LST_IsEmpty(pChnl->pIOCompletions)); ++ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIOCompletions); ++ /* Update pIOC from channel state and chirp: */ ++ if (pChirp) { ++ pChnl->cIOCs--; ++ /* If this is a zero-copy channel, then set IOC's pBuf ++ * to the DSP's address. This DSP address will get ++ * translated to user's virtual addr later. */ ++ { ++ pHostSysBuf = pChirp->pHostSysBuf; ++ ioc.pBuf = pChirp->pHostUserBuf; ++ } ++ ioc.cBytes = pChirp->cBytes; ++ ioc.cBufSize = pChirp->cBufSize; ++ ioc.dwArg = pChirp->dwArg; ++ ioc.status |= pChirp->status; ++ /* Place the used chirp on the free list: */ ++ LST_PutTail(pChnl->pFreeList, (struct LST_ELEM *) ++ pChirp); ++ } else { ++ ioc.pBuf = NULL; ++ ioc.cBytes = 0; ++ } ++ } else { ++ ioc.pBuf = NULL; ++ ioc.cBytes = 0; ++ ioc.dwArg = 0; ++ ioc.cBufSize = 0; ++ } ++ /* Ensure invariant: If any IOC's are queued for this channel... */ ++ if (!LST_IsEmpty(pChnl->pIOCompletions)) { ++ /* Since DSPStream_Reclaim() does not take a timeout ++ * parameter, we pass the stream's timeout value to ++ * WMD_CHNL_GetIOC. We cannot determine whether or not ++ * we have waited in User mode. Since the stream's timeout ++ * value may be non-zero, we still have to set the event. ++ * Therefore, this optimization is taken out. ++ * ++ * if (dwTimeOut == CHNL_IOCNOWAIT) { ++ * ... ensure event is set.. ++ * SYNC_SetEvent(pChnl->hSyncEvent); ++ * } */ ++ SYNC_SetEvent(pChnl->hSyncEvent); ++ } else { ++ /* else, if list is empty, ensure event is reset. */ ++ SYNC_ResetEvent(pChnl->hSyncEvent); ++ } ++ enable_irq(MAILBOX_IRQ); ++ SYNC_LeaveCS(pChnl->pChnlMgr->hCSObj); ++ if (fDequeueIOC && (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)) { ++ if (!(ioc.pBuf < (void *) USERMODE_ADDR)) ++ goto func_cont; ++ ++ /* If the addr is in user mode, then copy it */ ++ if (!pHostSysBuf || !ioc.pBuf) { ++ status = DSP_EPOINTER; ++ DBG_Trace(DBG_LEVEL7, ++ "System buffer NULL in IO completion.\n"); ++ goto func_cont; ++ } ++ if (!CHNL_IsInput(pChnl->uMode)) ++ goto func_cont1; ++ ++ /*pHostUserBuf */ ++ status = copy_to_user(ioc.pBuf, pHostSysBuf, ioc.cBytes); ++#ifndef RES_CLEANUP_DISABLE ++ if (status) { ++ if (current->flags & PF_EXITING) { ++ DBG_Trace(DBG_LEVEL7, ++ "\n2current->flags == PF_EXITING, " ++ " current->flags;0x%x\n", ++ current->flags); ++ status = 0; ++ } else { ++ DBG_Trace(DBG_LEVEL7, ++ "\n2current->flags != PF_EXITING, " ++ " current->flags;0x%x\n", ++ current->flags); ++ } ++ } ++#endif ++ if (status) { ++ DBG_Trace(DBG_LEVEL7, ++ "Error copying kernel buffer to user, %d" ++ " bytes remaining. in_interupt %d\n", ++ status, in_interrupt()); ++ status = DSP_EPOINTER; ++ } ++func_cont1: ++ MEM_Free(pHostSysBuf); ++ } ++func_cont: ++ /* Update User's IOC block: */ ++ *pIOC = ioc; ++func_end: ++ DBG_Trace(DBG_ENTER, "< WMD_CHNL_GetIOC pChnl %p\n", pChnl); ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_GetMgrInfo ======== ++ * Retrieve information related to the channel manager. ++ */ ++DSP_STATUS WMD_CHNL_GetMgrInfo(struct CHNL_MGR *hChnlMgr, u32 uChnlID, ++ OUT struct CHNL_MGRINFO *pMgrInfo) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_MGR *pChnlMgr = (struct CHNL_MGR *)hChnlMgr; ++ ++ if (pMgrInfo != NULL) { ++ if (uChnlID <= CHNL_MAXCHANNELS) { ++ if (MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) { ++ /* Return the requested information: */ ++ pMgrInfo->hChnl = pChnlMgr->apChannel[uChnlID]; ++ pMgrInfo->cOpenChannels = pChnlMgr-> ++ cOpenChannels; ++ pMgrInfo->dwType = pChnlMgr->dwType; ++ /* total # of chnls */ ++ pMgrInfo->cChannels = pChnlMgr->cChannels; ++ } else { ++ status = DSP_EHANDLE; ++ } ++ } else { ++ status = CHNL_E_BADCHANID; ++ } ++ } else { ++ status = DSP_EPOINTER; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_Idle ======== ++ * Idles a particular channel. ++ */ ++DSP_STATUS WMD_CHNL_Idle(struct CHNL_OBJECT *hChnl, u32 dwTimeOut, ++ bool fFlush) ++{ ++ CHNL_MODE uMode; ++ struct CHNL_MGR *pChnlMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(MEM_IsValidHandle(hChnl, CHNL_SIGNATURE)); ++ ++ uMode = hChnl->uMode; ++ pChnlMgr = hChnl->pChnlMgr; ++ ++ if (CHNL_IsOutput(uMode) && !fFlush) { ++ /* Wait for IO completions, up to the specified timeout: */ ++ status = WMD_CHNL_FlushIO(hChnl, dwTimeOut); ++ } else { ++ status = WMD_CHNL_CancelIO(hChnl); ++ ++ /* Reset the byte count and put channel back in ready state. */ ++ hChnl->cBytesMoved = 0; ++ hChnl->dwState &= ~CHNL_STATECANCEL; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_Open ======== ++ * Open a new half-duplex channel to the DSP board. ++ */ ++DSP_STATUS WMD_CHNL_Open(OUT struct CHNL_OBJECT **phChnl, ++ struct CHNL_MGR *hChnlMgr, CHNL_MODE uMode, ++ u32 uChnlId, CONST IN struct CHNL_ATTRS *pAttrs) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct CHNL_MGR *pChnlMgr = hChnlMgr; ++ struct CHNL_OBJECT *pChnl = NULL; ++ struct SYNC_ATTRS *pSyncAttrs = NULL; ++ struct SYNC_OBJECT *hSyncEvent = NULL; ++ /* Ensure DBC requirements: */ ++ DBC_Require(phChnl != NULL); ++ DBC_Require(pAttrs != NULL); ++ *phChnl = NULL; ++ /* Validate Args: */ ++ if (pAttrs->uIOReqs == 0) { ++ status = DSP_EINVALIDARG; ++ } else { ++ if (!MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) { ++ status = DSP_EHANDLE; ++ } else { ++ if (uChnlId != CHNL_PICKFREE) { ++ if (uChnlId >= pChnlMgr->cChannels) { ++ status = CHNL_E_BADCHANID; ++ } else if (pChnlMgr->apChannel[uChnlId] != ++ NULL) { ++ status = CHNL_E_CHANBUSY; ++ } ++ } else { ++ /* Check for free channel */ ++ status = SearchFreeChannel(pChnlMgr, &uChnlId); ++ } ++ } ++ } ++ if (DSP_FAILED(status)) ++ goto func_end; ++ ++ DBC_Assert(uChnlId < pChnlMgr->cChannels); ++ /* Create channel object: */ ++ MEM_AllocObject(pChnl, struct CHNL_OBJECT, 0x0000); ++ if (!pChnl) { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ /* Protect queues from IO_DPC: */ ++ pChnl->dwState = CHNL_STATECANCEL; ++ /* Allocate initial IOR and IOC queues: */ ++ pChnl->pFreeList = CreateChirpList(pAttrs->uIOReqs); ++ pChnl->pIORequests = CreateChirpList(0); ++ pChnl->pIOCompletions = CreateChirpList(0); ++ pChnl->cChirps = pAttrs->uIOReqs; ++ pChnl->cIOCs = 0; ++ pChnl->cIOReqs = 0; ++ status = SYNC_OpenEvent(&hSyncEvent, pSyncAttrs); ++ if (DSP_SUCCEEDED(status)) { ++ status = NTFY_Create(&pChnl->hNtfy); ++ if (DSP_FAILED(status)) { ++ /* The only failure that could have occurred */ ++ status = DSP_EMEMORY; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ if (pChnl->pIOCompletions && pChnl->pIORequests && ++ pChnl->pFreeList) { ++ /* Initialize CHNL object fields: */ ++ pChnl->pChnlMgr = pChnlMgr; ++ pChnl->uId = uChnlId; ++ pChnl->uMode = uMode; ++ pChnl->hUserEvent = hSyncEvent; /* for Linux */ ++ pChnl->hSyncEvent = hSyncEvent; ++ /* Return TGID instead of process handle */ ++ pChnl->hProcess = current->tgid; ++ pChnl->pCBArg = 0; ++ pChnl->cBytesMoved = 0; ++ /* Default to proc-copy */ ++ pChnl->uChnlType = CHNL_PCPY; ++ } else { ++ status = DSP_EMEMORY; ++ } ++ } else { ++ status = DSP_EINVALIDARG; ++ } ++ if (DSP_FAILED(status)) { ++ /* Free memory */ ++ if (pChnl->pIOCompletions) { ++ FreeChirpList(pChnl->pIOCompletions); ++ pChnl->pIOCompletions = NULL; ++ pChnl->cIOCs = 0; ++ } ++ if (pChnl->pIORequests) { ++ FreeChirpList(pChnl->pIORequests); ++ pChnl->pIORequests = NULL; ++ } ++ if (pChnl->pFreeList) { ++ FreeChirpList(pChnl->pFreeList); ++ pChnl->pFreeList = NULL; ++ } ++ if (hSyncEvent) { ++ SYNC_CloseEvent(hSyncEvent); ++ hSyncEvent = NULL; ++ } ++ if (pChnl->hNtfy) { ++ NTFY_Delete(pChnl->hNtfy); ++ pChnl->hNtfy = NULL; ++ } ++ MEM_FreeObject(pChnl); ++ } ++func_cont: ++ if (DSP_SUCCEEDED(status)) { ++ /* Insert channel object in channel manager: */ ++ pChnlMgr->apChannel[pChnl->uId] = pChnl; ++ SYNC_EnterCS(pChnlMgr->hCSObj); ++ pChnlMgr->cOpenChannels++; ++ SYNC_LeaveCS(pChnlMgr->hCSObj); ++ /* Return result... */ ++ pChnl->dwSignature = CHNL_SIGNATURE; ++ pChnl->dwState = CHNL_STATEREADY; ++ *phChnl = pChnl; ++ } ++func_end: ++ DBC_Ensure((DSP_SUCCEEDED(status) && ++ MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) || ++ (*phChnl == NULL)); ++ return status; ++} ++ ++/* ++ * ======== WMD_CHNL_RegisterNotify ======== ++ * Registers for events on a particular channel. ++ */ ++DSP_STATUS WMD_CHNL_RegisterNotify(struct CHNL_OBJECT *hChnl, u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION *hNotification) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Assert(!(uEventMask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION))); ++ ++ status = NTFY_Register(hChnl->hNtfy, hNotification, uEventMask, ++ uNotifyType); ++ ++ return status; ++} ++ ++/* ++ * ======== CreateChirpList ======== ++ * Purpose: ++ * Initialize a queue of channel I/O Request/Completion packets. ++ * Parameters: ++ * uChirps: Number of Chirps to allocate. ++ * Returns: ++ * Pointer to queue of IRPs, or NULL. ++ * Requires: ++ * Ensures: ++ */ ++static struct LST_LIST *CreateChirpList(u32 uChirps) ++{ ++ struct LST_LIST *pChirpList; ++ struct CHNL_IRP *pChirp; ++ u32 i; ++ ++ pChirpList = LST_Create(); ++ ++ if (pChirpList) { ++ /* Make N chirps and place on queue. */ ++ for (i = 0; (i < uChirps) && ((pChirp = MakeNewChirp()) != ++ NULL); i++) { ++ LST_PutTail(pChirpList, (struct LST_ELEM *)pChirp); ++ } ++ ++ /* If we couldn't allocate all chirps, free those allocated: */ ++ if (i != uChirps) { ++ FreeChirpList(pChirpList); ++ pChirpList = NULL; ++ } ++ } ++ ++ return pChirpList; ++} ++ ++/* ++ * ======== FreeChirpList ======== ++ * Purpose: ++ * Free the queue of Chirps. ++ */ ++static void FreeChirpList(struct LST_LIST *pChirpList) ++{ ++ DBC_Require(pChirpList != NULL); ++ ++ while (!LST_IsEmpty(pChirpList)) ++ MEM_Free(LST_GetHead(pChirpList)); ++ ++ LST_Delete(pChirpList); ++} ++ ++/* ++ * ======== MakeNewChirp ======== ++ * Allocate the memory for a new channel IRP. ++ */ ++static struct CHNL_IRP *MakeNewChirp(void) ++{ ++ struct CHNL_IRP *pChirp; ++ ++ pChirp = (struct CHNL_IRP *)MEM_Calloc( ++ sizeof(struct CHNL_IRP), MEM_NONPAGED); ++ if (pChirp != NULL) { ++ /* LST_InitElem only resets the list's member values. */ ++ LST_InitElem(&pChirp->link); ++ } ++ ++ return pChirp; ++} ++ ++/* ++ * ======== SearchFreeChannel ======== ++ * Search for a free channel slot in the array of channel pointers. ++ */ ++static DSP_STATUS SearchFreeChannel(struct CHNL_MGR *pChnlMgr, ++ OUT u32 *pdwChnl) ++{ ++ DSP_STATUS status = CHNL_E_OUTOFSTREAMS; ++ u32 i; ++ ++ DBC_Require(MEM_IsValidHandle(pChnlMgr, CHNL_MGRSIGNATURE)); ++ ++ for (i = 0; i < pChnlMgr->cChannels; i++) { ++ if (pChnlMgr->apChannel[i] == NULL) { ++ status = DSP_SOK; ++ *pdwChnl = i; ++ break; ++ } ++ } ++ ++ return status; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/io_sm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/io_sm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/io_sm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/io_sm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2009 @@ ++/* ++ * io_sm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== io_sm.c ======== ++ * Description: ++ * IO dispatcher for a shared memory channel driver. ++ * ++ * Public Functions: ++ * WMD_IO_Create ++ * WMD_IO_Destroy ++ * WMD_IO_OnLoaded ++ * IO_AndSetValue ++ * IO_BufSize ++ * IO_CancelChnl ++ * IO_DPC ++ * IO_ISR ++ * IO_IVAISR ++ * IO_OrSetValue ++ * IO_ReadValue ++ * IO_ReadValueLong ++ * IO_RequestChnl ++ * IO_Schedule ++ * IO_WriteValue ++ * IO_WriteValueLong ++ * ++ * Channel Invariant: ++ * There is an important invariant condition which must be maintained per ++ * channel outside of WMD_CHNL_GetIOC() and IO_Dispatch(), violation of ++ * which may cause timeouts and/or failure of the WIN32_WaitSingleObject ++ * function (SYNC_WaitOnEvent). ++ * ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ------------------------------------ Hardware Abstraction Layer */ ++#include ++#include ++ ++/* ----------------------------------- Mini Driver */ ++#include ++#include ++#include ++#include <_tiomap.h> ++#include ++#include <_tiomap_pwr.h> ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Others */ ++#include ++#include ++#include ++#include "_cmm.h" ++ ++/* ----------------------------------- This */ ++#include ++#include "_msg_sm.h" ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define OUTPUTNOTREADY 0xffff ++#define NOTENABLED 0xffff /* channel(s) not enabled */ ++ ++#define EXTEND "_EXT_END" ++ ++#define SwapWord(x) (x) ++#define ulPageAlignSize 0x10000 /* Page Align Size */ ++ ++#define MAX_PM_REQS 32 ++ ++/* IO Manager: only one created per board: */ ++struct IO_MGR { ++ /* These four fields must be the first fields in a IO_MGR_ struct: */ ++ u32 dwSignature; /* Used for object validation */ ++ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD device context */ ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ ++ struct DEV_OBJECT *hDevObject; /* Device this board represents */ ++ ++ /* These fields initialized in WMD_IO_Create(): */ ++ struct CHNL_MGR *hChnlMgr; ++ struct SHM *pSharedMem; /* Shared Memory control */ ++ u8 *pInput; /* Address of input channel */ ++ u8 *pOutput; /* Address of output channel */ ++ struct MSG_MGR *hMsgMgr; /* Message manager */ ++ struct MSG *pMsgInputCtrl; /* Msg control for from DSP messages */ ++ struct MSG *pMsgOutputCtrl; /* Msg control for to DSP messages */ ++ u8 *pMsgInput; /* Address of input messages */ ++ u8 *pMsgOutput; /* Address of output messages */ ++ u32 uSMBufSize; /* Size of a shared memory I/O channel */ ++ bool fSharedIRQ; /* Is this IRQ shared? */ ++ struct DPC_OBJECT *hDPC; /* DPC object handle */ ++ struct SYNC_CSOBJECT *hCSObj; /* Critical section object handle */ ++ u32 uWordSize; /* Size in bytes of DSP word */ ++ u16 wIntrVal; /* interrupt value */ ++ /* private extnd proc info; mmu setup */ ++ struct MGR_PROCESSOREXTINFO extProcInfo; ++ struct CMM_OBJECT *hCmmMgr; /* Shared Mem Mngr */ ++ struct work_struct io_workq; /*workqueue */ ++ u32 dQuePowerMbxVal[MAX_PM_REQS]; ++ u32 iQuePowerHead; ++ u32 iQuePowerTail; ++#ifndef DSP_TRACEBUF_DISABLED ++ u32 ulTraceBufferBegin; /* Trace message start address */ ++ u32 ulTraceBufferEnd; /* Trace message end address */ ++ u32 ulTraceBufferCurrent; /* Trace message current address */ ++ u32 ulGPPReadPointer; /* GPP Read pointer to Trace buffer */ ++ u8 *pMsg; ++ u32 ulGppVa; ++ u32 ulDspVa; ++#endif ++} ; ++ ++/* ----------------------------------- Function Prototypes */ ++static void IO_DispatchChnl(IN struct IO_MGR *pIOMgr, ++ IN OUT struct CHNL_OBJECT *pChnl, u32 iMode); ++static void IO_DispatchMsg(IN struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr); ++static void IO_DispatchPM(struct work_struct *work); ++static void NotifyChnlComplete(struct CHNL_OBJECT *pChnl, ++ struct CHNL_IRP *pChirp); ++static void InputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, ++ u32 iMode); ++static void OutputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, ++ u32 iMode); ++static void InputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr); ++static void OutputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr); ++static u32 FindReadyOutput(struct CHNL_MGR *pChnlMgr, ++ struct CHNL_OBJECT *pChnl, u32 dwMask); ++static u32 ReadData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, ++ void *pSrc, u32 uSize); ++static u32 WriteData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, ++ void *pSrc, u32 uSize); ++static struct workqueue_struct *bridge_workqueue; ++#ifndef DSP_TRACEBUF_DISABLED ++void PrintDSPDebugTrace(struct IO_MGR *hIOMgr); ++#endif ++ ++/* Bus Addr (cached kernel)*/ ++static DSP_STATUS registerSHMSegs(struct IO_MGR *hIOMgr, ++ struct COD_MANAGER *hCodMan, ++ u32 dwGPPBasePA); ++ ++#ifdef CONFIG_BRIDGE_DVFS ++/* The maximum number of OPPs that are supported */ ++extern s32 dsp_max_opps; ++/* The Vdd1 opp table information */ ++extern u32 vdd1_dsp_freq[6][4] ; ++#endif ++ ++#if GT_TRACE ++static struct GT_Mask dsp_trace_mask = { NULL, NULL }; /* GT trace variable */ ++#endif ++ ++/* ++ * ======== WMD_IO_Create ======== ++ * Create an IO manager object. ++ */ ++DSP_STATUS WMD_IO_Create(OUT struct IO_MGR **phIOMgr, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct IO_ATTRS *pMgrAttrs) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct IO_MGR *pIOMgr = NULL; ++ struct SHM *pSharedMem = NULL; ++ struct WMD_DEV_CONTEXT *hWmdContext = NULL; ++ struct CFG_HOSTRES hostRes; ++ struct CFG_DEVNODE *hDevNode; ++ struct CHNL_MGR *hChnlMgr; ++ static int ref_count; ++ u32 devType; ++ /* Check DBC requirements: */ ++ DBC_Require(phIOMgr != NULL); ++ DBC_Require(pMgrAttrs != NULL); ++ DBC_Require(pMgrAttrs->uWordSize != 0); ++ /* This for the purposes of DBC_Require: */ ++ status = DEV_GetChnlMgr(hDevObject, &hChnlMgr); ++ DBC_Require(status != DSP_EHANDLE); ++ DBC_Require(hChnlMgr != NULL); ++ DBC_Require(hChnlMgr->hIOMgr == NULL); ++ /* ++ * Message manager will be created when a file is loaded, since ++ * size of message buffer in shared memory is configurable in ++ * the base image. ++ */ ++ DEV_GetWMDContext(hDevObject, &hWmdContext); ++ DBC_Assert(hWmdContext); ++ DEV_GetDevType(hDevObject, &devType); ++ /* ++ * DSP shared memory area will get set properly when ++ * a program is loaded. They are unknown until a COFF file is ++ * loaded. I chose the value -1 because it was less likely to be ++ * a valid address than 0. ++ */ ++ pSharedMem = (struct SHM *) -1; ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ /* Create a Single Threaded Work Queue */ ++ if (ref_count == 0) ++ bridge_workqueue = create_workqueue("bridge_work-queue"); ++ ++ if (!bridge_workqueue) ++ DBG_Trace(DBG_LEVEL1, "Workqueue creation failed!\n"); ++ ++ /* Allocate IO manager object: */ ++ MEM_AllocObject(pIOMgr, struct IO_MGR, IO_MGRSIGNATURE); ++ if (pIOMgr == NULL) { ++ status = DSP_EMEMORY; ++ goto func_cont; ++ } ++ ++ /* Intializing Work Element */ ++ if (ref_count == 0) { ++ INIT_WORK(&pIOMgr->io_workq, (void *)IO_DispatchPM); ++ ref_count = 1; ++ } else ++ PREPARE_WORK(&pIOMgr->io_workq, (void *)IO_DispatchPM); ++ ++ /* Initialize CHNL_MGR object: */ ++#ifndef DSP_TRACEBUF_DISABLED ++ pIOMgr->pMsg = NULL; ++#endif ++ pIOMgr->hChnlMgr = hChnlMgr; ++ pIOMgr->uWordSize = pMgrAttrs->uWordSize; ++ pIOMgr->pSharedMem = pSharedMem; ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_InitializeCS(&pIOMgr->hCSObj); ++ ++ if (devType == DSP_UNIT) { ++ /* Create a DPC object: */ ++ status = DPC_Create(&pIOMgr->hDPC, IO_DPC, (void *)pIOMgr); ++ if (DSP_SUCCEEDED(status)) ++ status = DEV_GetDevNode(hDevObject, &hDevNode); ++ ++ pIOMgr->iQuePowerHead = 0; ++ pIOMgr->iQuePowerTail = 0; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = CFG_GetHostResources((struct CFG_DEVNODE *) ++ DRV_GetFirstDevExtension() , &hostRes); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pIOMgr->hWmdContext = hWmdContext; ++ pIOMgr->fSharedIRQ = pMgrAttrs->fShared; ++ IO_DisableInterrupt(hWmdContext); ++ if (devType == DSP_UNIT) { ++ HW_MBOX_initSettings(hostRes.dwMboxBase); ++ /* Plug the channel ISR:. */ ++ if ((request_irq(INT_MAIL_MPU_IRQ, IO_ISR, 0, ++ "DspBridge\tmailbox", (void *)pIOMgr)) == 0) ++ status = DSP_SOK; ++ else ++ status = DSP_EFAIL; ++ } ++ if (DSP_SUCCEEDED(status)) ++ DBG_Trace(DBG_LEVEL1, "ISR_IRQ Object 0x%x \n", ++ pIOMgr); ++ else ++ status = CHNL_E_ISR; ++ } else ++ status = CHNL_E_ISR; ++func_cont: ++ if (DSP_FAILED(status)) { ++ /* Cleanup: */ ++ WMD_IO_Destroy(pIOMgr); ++ *phIOMgr = NULL; ++ } else { ++ /* Return IO manager object to caller... */ ++ hChnlMgr->hIOMgr = pIOMgr; ++ *phIOMgr = pIOMgr; ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_IO_Destroy ======== ++ * Purpose: ++ * Disable interrupts, destroy the IO manager. ++ */ ++DSP_STATUS WMD_IO_Destroy(struct IO_MGR *hIOMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *hWmdContext; ++ if (MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)) { ++ /* Unplug IRQ: */ ++ /* Disable interrupts from the board: */ ++ if (DSP_SUCCEEDED(DEV_GetWMDContext(hIOMgr->hDevObject, ++ &hWmdContext))) ++ DBC_Assert(hWmdContext); ++ (void)CHNLSM_DisableInterrupt(hWmdContext); ++ destroy_workqueue(bridge_workqueue); ++ /* Linux function to uninstall ISR */ ++ free_irq(INT_MAIL_MPU_IRQ, (void *)hIOMgr); ++ (void)DPC_Destroy(hIOMgr->hDPC); ++#ifndef DSP_TRACEBUF_DISABLED ++ if (hIOMgr->pMsg) ++ MEM_Free(hIOMgr->pMsg); ++#endif ++ SYNC_DeleteCS(hIOMgr->hCSObj); /* Leak Fix. */ ++ /* Free this IO manager object: */ ++ MEM_FreeObject(hIOMgr); ++ } else ++ status = DSP_EHANDLE; ++ ++ return status; ++} ++ ++/* ++ * ======== WMD_IO_OnLoaded ======== ++ * Purpose: ++ * Called when a new program is loaded to get shared memory buffer ++ * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit ++ * are in DSP address units. ++ */ ++DSP_STATUS WMD_IO_OnLoaded(struct IO_MGR *hIOMgr) ++{ ++ struct COD_MANAGER *hCodMan; ++ struct CHNL_MGR *hChnlMgr; ++ struct MSG_MGR *hMsgMgr; ++ u32 ulShmBase; ++ u32 ulShmBaseOffset; ++ u32 ulShmLimit; ++ u32 ulShmLength = -1; ++ u32 ulMemLength = -1; ++ u32 ulMsgBase; ++ u32 ulMsgLimit; ++ u32 ulMsgLength = -1; ++ u32 ulExtEnd; ++ u32 ulGppPa = 0; ++ u32 ulGppVa = 0; ++ u32 ulDspVa = 0; ++ u32 ulSegSize = 0; ++ u32 ulPadSize = 0; ++ u32 i; ++ DSP_STATUS status = DSP_SOK; ++ u32 uNumProcs = 0; ++ s32 ndx = 0; ++ /* DSP MMU setup table */ ++ struct WMDIOCTL_EXTPROC aEProc[WMDIOCTL_NUMOFMMUTLB]; ++ struct CFG_HOSTRES hostRes; ++ u32 mapAttrs; ++ u32 ulShm0End; ++ u32 ulDynExtBase; ++ u32 ulSeg1Size = 0; ++ u32 paCurr = 0; ++ u32 vaCurr = 0; ++ u32 gppVaCurr = 0; ++ u32 numBytes = 0; ++ u32 allBits = 0; ++ u32 pgSize[] = { HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB, ++ HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB }; ++ ++ status = DEV_GetCodMgr(hIOMgr->hDevObject, &hCodMan); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ hChnlMgr = hIOMgr->hChnlMgr; ++ /* The message manager is destroyed when the board is stopped. */ ++ DEV_GetMsgMgr(hIOMgr->hDevObject, &hIOMgr->hMsgMgr); ++ hMsgMgr = hIOMgr->hMsgMgr; ++ DBC_Assert(MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)); ++ DBC_Assert(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); ++ if (hIOMgr->pSharedMem) ++ hIOMgr->pSharedMem = NULL; ++ ++ /* Get start and length of channel part of shared memory */ ++ status = COD_GetSymValue(hCodMan, CHNL_SHARED_BUFFER_BASE_SYM, ++ &ulShmBase); ++ if (DSP_FAILED(status)) { ++ status = CHNL_E_NOMEMMAP; ++ goto func_cont1; ++ } ++ status = COD_GetSymValue(hCodMan, CHNL_SHARED_BUFFER_LIMIT_SYM, ++ &ulShmLimit); ++ if (DSP_FAILED(status)) { ++ status = CHNL_E_NOMEMMAP; ++ goto func_cont1; ++ } ++ if (ulShmLimit <= ulShmBase) { ++ status = CHNL_E_INVALIDMEMBASE; ++ } else { ++ /* get total length in bytes */ ++ ulShmLength = (ulShmLimit - ulShmBase + 1) * hIOMgr->uWordSize; ++ /* Calculate size of a PROCCOPY shared memory region */ ++ DBG_Trace(DBG_LEVEL7, ++ "**(proc)PROCCOPY SHMMEM SIZE: 0x%x bytes\n", ++ (ulShmLength - sizeof(struct SHM))); ++ } ++func_cont1: ++ if (DSP_SUCCEEDED(status)) { ++ /* Get start and length of message part of shared memory */ ++ status = COD_GetSymValue(hCodMan, MSG_SHARED_BUFFER_BASE_SYM, ++ &ulMsgBase); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMan, MSG_SHARED_BUFFER_LIMIT_SYM, ++ &ulMsgLimit); ++ if (DSP_SUCCEEDED(status)) { ++ if (ulMsgLimit <= ulMsgBase) { ++ status = CHNL_E_INVALIDMEMBASE; ++ } else { ++ /* Length (bytes) of messaging part of shared ++ * memory */ ++ ulMsgLength = (ulMsgLimit - ulMsgBase + 1) * ++ hIOMgr->uWordSize; ++ /* Total length (bytes) of shared memory: ++ * chnl + msg */ ++ ulMemLength = ulShmLength + ulMsgLength; ++ } ++ } else { ++ status = CHNL_E_NOMEMMAP; ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++#ifndef DSP_TRACEBUF_DISABLED ++ status = COD_GetSymValue(hCodMan, DSP_TRACESEC_END, &ulShm0End); ++ DBG_Trace(DBG_LEVEL7, "_BRIDGE_TRACE_END value = %x \n", ++ ulShm0End); ++#else ++ status = COD_GetSymValue(hCodMan, SHM0_SHARED_END_SYM, ++ &ulShm0End); ++ DBG_Trace(DBG_LEVEL7, "_SHM0_END = %x \n", ulShm0End); ++#endif ++ if (DSP_FAILED(status)) ++ status = CHNL_E_NOMEMMAP; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMan, DYNEXTBASE, &ulDynExtBase); ++ if (DSP_FAILED(status)) ++ status = CHNL_E_NOMEMMAP; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMan, EXTEND, &ulExtEnd); ++ if (DSP_FAILED(status)) ++ status = CHNL_E_NOMEMMAP; ++ ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Get memory reserved in host resources */ ++ (void)MGR_EnumProcessorInfo(0, ++ (struct DSP_PROCESSORINFO *)&hIOMgr->extProcInfo, ++ sizeof(struct MGR_PROCESSOREXTINFO), &uNumProcs); ++ CFG_GetHostResources(( ++ struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), ++ &hostRes); ++ /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */ ++ ndx = 0; ++ ulGppPa = hostRes.dwMemPhys[1]; ++ ulGppVa = hostRes.dwMemBase[1]; ++ /* THIS IS THE VIRTUAL UNCACHED IOREMAPPED ADDRESS !!! */ ++ /* Why can't we directly take the DSPVA from the symbols? */ ++ ulDspVa = hIOMgr->extProcInfo.tyTlb[0].ulDspVirt; ++ ulSegSize = (ulShm0End - ulDspVa) * hIOMgr->uWordSize; ++ ulSeg1Size = (ulExtEnd - ulDynExtBase) * hIOMgr->uWordSize; ++ ulSeg1Size = (ulSeg1Size + 0xFFF) & (~0xFFFUL); /* 4K align*/ ++ ulSegSize = (ulSegSize + 0xFFFF) & (~0xFFFFUL); /* 64K align*/ ++ ulPadSize = ulPageAlignSize - ((ulGppPa + ulSeg1Size) % ++ ulPageAlignSize); ++ if (ulPadSize == ulPageAlignSize) ++ ulPadSize = 0x0; ++ ++ DBG_Trace(DBG_LEVEL7, "ulGppPa %x, ulGppVa %x, ulDspVa %x, " ++ "ulShm0End %x, ulDynExtBase %x, ulExtEnd %x, " ++ "ulSegSize %x ulSeg1Size %x \n", ulGppPa, ulGppVa, ++ ulDspVa, ulShm0End, ulDynExtBase, ulExtEnd, ulSegSize, ++ ulSeg1Size); ++ ++ if ((ulSegSize + ulSeg1Size + ulPadSize) > ++ hostRes.dwMemLength[1]) { ++ DBG_Trace(DBG_LEVEL7, "ulGppPa %x, ulGppVa %x, ulDspVa " ++ "%x, ulShm0End %x, ulDynExtBase %x, ulExtEnd " ++ "%x, ulSegSize %x, ulSeg1Size %x \n", ulGppPa, ++ ulGppVa, ulDspVa, ulShm0End, ulDynExtBase, ++ ulExtEnd, ulSegSize, ulSeg1Size); ++ DBG_Trace(DBG_LEVEL7, "Insufficient SHM Reserved 0x%x. " ++ "Required 0x%x\n", hostRes.dwMemLength[1], ++ ulSegSize + ulSeg1Size + ulPadSize); ++ status = DSP_EMEMORY; ++ } ++ } ++ if (DSP_FAILED(status)) ++ goto func_cont; ++ ++ paCurr = ulGppPa; ++ vaCurr = ulDynExtBase * hIOMgr->uWordSize; ++ gppVaCurr = ulGppVa; ++ numBytes = ulSeg1Size; ++ ++ /* ++ * Try to fit into TLB entries. If not possible, push them to page ++ * tables. It is quite possible that if sections are not on ++ * bigger page boundary, we may end up making several small pages. ++ * So, push them onto page tables, if that is the case. ++ */ ++ mapAttrs = 0x00000000; ++ mapAttrs = DSP_MAPLITTLEENDIAN; ++ mapAttrs |= DSP_MAPPHYSICALADDR; ++ mapAttrs |= DSP_MAPELEMSIZE32; ++ mapAttrs |= DSP_MAPDONOTLOCK; ++ ++ while (numBytes && DSP_SUCCEEDED(status)) { ++ /* To find the max. page size with which both PA & VA are ++ * aligned */ ++ allBits = paCurr | vaCurr; ++ DBG_Trace(DBG_LEVEL1, "allBits %x, paCurr %x, vaCurr %x, " ++ "numBytes %x\n", allBits, paCurr, vaCurr, numBytes); ++ for (i = 0; i < 4; i++) { ++ if ((numBytes >= pgSize[i]) && ((allBits & ++ (pgSize[i] - 1)) == 0)) { ++ status = hIOMgr->pIntfFxns->pfnBrdMemMap ++ (hIOMgr->hWmdContext, paCurr, vaCurr, ++ pgSize[i], mapAttrs); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ paCurr += pgSize[i]; ++ vaCurr += pgSize[i]; ++ gppVaCurr += pgSize[i]; ++ numBytes -= pgSize[i]; ++ /* Don't try smaller sizes. Hopefully we have ++ * reached an address aligned to a bigger page ++ * size*/ ++ break; ++ } ++ } ++ } ++ paCurr += ulPadSize; ++ vaCurr += ulPadSize; ++ gppVaCurr += ulPadSize; ++ ++ /* configure the TLB entries for the next cacheable segment */ ++ numBytes = ulSegSize; ++ vaCurr = ulDspVa * hIOMgr->uWordSize; ++ allBits = 0x0; ++ while (numBytes && DSP_SUCCEEDED(status)) { ++ /* To find the max. page size with which both PA & VA are ++ * aligned*/ ++ allBits = paCurr | vaCurr; ++ DBG_Trace(DBG_LEVEL1, "allBits for Seg1 %x, paCurr %x, " ++ "vaCurr %x, numBytes %x\n", allBits, paCurr, vaCurr, ++ numBytes); ++ for (i = 0; i < 4; i++) { ++ if (!(numBytes >= pgSize[i]) || ++ !((allBits & (pgSize[i]-1)) == 0)) ++ continue; ++ if (ndx < MAX_LOCK_TLB_ENTRIES) { ++ /* This is the physical address written to ++ * DSP MMU */ ++ aEProc[ndx].ulGppPa = paCurr; ++ /* THIS IS THE VIRTUAL UNCACHED IOREMAPPED ++ * ADDRESS!!! */ ++ aEProc[ndx].ulGppVa = gppVaCurr; ++ aEProc[ndx].ulDspVa = vaCurr / hIOMgr-> ++ uWordSize; ++ aEProc[ndx].ulSize = pgSize[i]; ++ aEProc[ndx].endianism = HW_LITTLE_ENDIAN; ++ aEProc[ndx].elemSize = HW_ELEM_SIZE_16BIT; ++ aEProc[ndx].mixedMode = HW_MMU_CPUES; ++ DBG_Trace(DBG_LEVEL1, "SHM MMU TLB entry PA %lx" ++ " VA %lx DSP_VA %lx Size %lx\n", ++ aEProc[ndx].ulGppPa, ++ aEProc[ndx].ulGppVa, ++ aEProc[ndx].ulDspVa * ++ hIOMgr->uWordSize, pgSize[i]); ++ ndx++; ++ } else { ++ status = hIOMgr->pIntfFxns->pfnBrdMemMap( ++ hIOMgr->hWmdContext, paCurr, vaCurr, pgSize[i], ++ mapAttrs); ++ DBG_Trace(DBG_LEVEL1, "SHM MMU PTE entry PA %lx" ++ " VA %lx DSP_VA %lx Size %lx\n", ++ aEProc[ndx].ulGppPa, ++ aEProc[ndx].ulGppVa, ++ aEProc[ndx].ulDspVa * ++ hIOMgr->uWordSize, pgSize[i]); ++ DBC_Assert(DSP_SUCCEEDED(status)); ++ } ++ paCurr += pgSize[i]; ++ vaCurr += pgSize[i]; ++ gppVaCurr += pgSize[i]; ++ numBytes -= pgSize[i]; ++ /* Don't try smaller sizes. Hopefully we have reached ++ an address aligned to a bigger page size*/ ++ break; ++ } ++ } ++ ++ /* Copy remaining entries from CDB. All entries are 1 MB and should not ++ * conflict with SHM entries on MPU or DSP side */ ++ for (i = 3; i < 7 && ndx < WMDIOCTL_NUMOFMMUTLB && ++ DSP_SUCCEEDED(status); i++) { ++ if (hIOMgr->extProcInfo.tyTlb[i].ulGppPhys == 0) ++ continue; ++ ++ if ((hIOMgr->extProcInfo.tyTlb[i].ulGppPhys > ulGppPa - 0x100000 ++ && hIOMgr->extProcInfo.tyTlb[i].ulGppPhys <= ++ ulGppPa + ulSegSize) ++ || (hIOMgr->extProcInfo.tyTlb[i].ulDspVirt > ulDspVa - ++ 0x100000 / hIOMgr->uWordSize && hIOMgr-> ++ extProcInfo.tyTlb[i].ulDspVirt ++ <= ulDspVa + ulSegSize / hIOMgr->uWordSize)) { ++ DBG_Trace(DBG_LEVEL7, "CDB MMU entry %d conflicts with " ++ "SHM.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: " ++ "GppPa %x, DspVa %x, Bytes %x.\n", i, ++ hIOMgr->extProcInfo.tyTlb[i].ulGppPhys, ++ hIOMgr->extProcInfo.tyTlb[i].ulDspVirt, ++ ulGppPa, ulDspVa, ulSegSize); ++ status = DSP_EFAIL; ++ } else { ++ if (ndx < MAX_LOCK_TLB_ENTRIES) { ++ aEProc[ndx].ulDspVa = hIOMgr->extProcInfo. ++ tyTlb[i].ulDspVirt; ++ aEProc[ndx].ulGppPa = hIOMgr->extProcInfo. ++ tyTlb[i].ulGppPhys; ++ aEProc[ndx].ulGppVa = 0; ++ /* Can't convert, so set to zero*/ ++ aEProc[ndx].ulSize = 0x100000; /* 1 MB*/ ++ DBG_Trace(DBG_LEVEL1, "SHM MMU entry PA %x " ++ "DSP_VA 0x%x\n", aEProc[ndx].ulGppPa, ++ aEProc[ndx].ulDspVa); ++ ndx++; ++ } else { ++ status = hIOMgr->pIntfFxns->pfnBrdMemMap ++ (hIOMgr->hWmdContext, ++ hIOMgr->extProcInfo.tyTlb[i].ulGppPhys, ++ hIOMgr->extProcInfo.tyTlb[i].ulDspVirt, ++ 0x100000, mapAttrs); ++ } ++ } ++ } ++ if (i < 7 && DSP_SUCCEEDED(status)) { ++ /* All CDB entries could not be made*/ ++ status = DSP_EFAIL; ++ } ++func_cont: ++ mapAttrs = 0x00000000; ++ mapAttrs = DSP_MAPLITTLEENDIAN; ++ mapAttrs |= DSP_MAPPHYSICALADDR; ++ mapAttrs |= DSP_MAPELEMSIZE32; ++ mapAttrs |= DSP_MAPDONOTLOCK; ++ ++ /* Map the L4 peripherals */ ++ i = 0; ++ while (L4PeripheralTable[i].physAddr && DSP_SUCCEEDED(status)) { ++ status = hIOMgr->pIntfFxns->pfnBrdMemMap ++ (hIOMgr->hWmdContext, L4PeripheralTable[i].physAddr, ++ L4PeripheralTable[i].dspVirtAddr, HW_PAGE_SIZE_4KB, ++ mapAttrs); ++ if (DSP_FAILED(status)) ++ break; ++ i++; ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ for (i = ndx; i < WMDIOCTL_NUMOFMMUTLB; i++) { ++ aEProc[i].ulDspVa = 0; ++ aEProc[i].ulGppPa = 0; ++ aEProc[i].ulGppVa = 0; ++ aEProc[i].ulSize = 0; ++ } ++ /* Set the SHM physical address entry (grayed out in CDB file) ++ * to the virtual uncached ioremapped address of SHM reserved ++ * on MPU */ ++ hIOMgr->extProcInfo.tyTlb[0].ulGppPhys = (ulGppVa + ulSeg1Size + ++ ulPadSize); ++ DBG_Trace(DBG_LEVEL1, "*********extProcInfo *********%x \n", ++ hIOMgr->extProcInfo.tyTlb[0].ulGppPhys); ++ /* Need SHM Phys addr. IO supports only one DSP for now: ++ * uNumProcs=1 */ ++ if ((hIOMgr->extProcInfo.tyTlb[0].ulGppPhys == 0) || ++ (uNumProcs != 1)) { ++ status = CHNL_E_NOMEMMAP; ++ DBC_Assert(false); ++ } else { ++ DBC_Assert(aEProc[0].ulDspVa <= ulShmBase); ++ /* ulShmBase may not be at ulDspVa address */ ++ ulShmBaseOffset = (ulShmBase - aEProc[0].ulDspVa) * ++ hIOMgr->uWordSize; ++ /* WMD_BRD_Ctrl() will set dev context dsp-mmu info. In ++ * _BRD_Start() the MMU will be re-programed with MMU ++ * DSPVa-GPPPa pair info while DSP is in a known ++ * (reset) state. */ ++ DBC_Assert(hIOMgr->pIntfFxns != NULL); ++ DBC_Assert(hIOMgr->hWmdContext != NULL); ++ status = hIOMgr->pIntfFxns->pfnDevCntrl(hIOMgr-> ++ hWmdContext, WMDIOCTL_SETMMUCONFIG, aEProc); ++ ulShmBase = hIOMgr->extProcInfo.tyTlb[0].ulGppPhys; ++ DBG_Trace(DBG_LEVEL1, "extProcInfo.tyTlb[0].ulGppPhys " ++ "%x \n ", hIOMgr->extProcInfo.tyTlb[0]. ++ ulGppPhys); ++ ulShmBase += ulShmBaseOffset; ++ ulShmBase = (u32)MEM_LinearAddress((void *)ulShmBase, ++ ulMemLength); ++ DBC_Assert(ulShmBase != 0); ++ if (DSP_SUCCEEDED(status)) { ++ status = registerSHMSegs(hIOMgr, hCodMan, ++ aEProc[0].ulGppPa); ++ /* Register SM */ ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ hIOMgr->pSharedMem = (struct SHM *)ulShmBase; ++ hIOMgr->pInput = (u8 *)hIOMgr->pSharedMem + ++ sizeof(struct SHM); ++ hIOMgr->pOutput = hIOMgr->pInput + (ulShmLength - ++ sizeof(struct SHM))/2; ++ hIOMgr->uSMBufSize = hIOMgr->pOutput - hIOMgr->pInput; ++ DBG_Trace(DBG_LEVEL3, ++ "hIOMgr: pInput %p pOutput %p ulShmLength %x\n", ++ hIOMgr->pInput, hIOMgr->pOutput, ulShmLength); ++ DBG_Trace(DBG_LEVEL3, ++ "pSharedMem %p uSMBufSize %x sizeof(SHM) %x\n", ++ hIOMgr->pSharedMem, hIOMgr->uSMBufSize, ++ sizeof(struct SHM)); ++ /* Set up Shared memory addresses for messaging. */ ++ hIOMgr->pMsgInputCtrl = (struct MSG *)((u8 *) ++ hIOMgr->pSharedMem + ++ ulShmLength); ++ hIOMgr->pMsgInput = (u8 *)hIOMgr->pMsgInputCtrl + ++ sizeof(struct MSG); ++ hIOMgr->pMsgOutputCtrl = (struct MSG *)((u8 *)hIOMgr-> ++ pMsgInputCtrl + ulMsgLength / 2); ++ hIOMgr->pMsgOutput = (u8 *)hIOMgr->pMsgOutputCtrl + ++ sizeof(struct MSG); ++ hMsgMgr->uMaxMsgs = ((u8 *)hIOMgr->pMsgOutputCtrl - ++ hIOMgr->pMsgInput) / ++ sizeof(struct MSG_DSPMSG); ++ DBG_Trace(DBG_LEVEL7, "IO MGR SHM details : pSharedMem 0x%x, " ++ "pInput 0x%x, pOutput 0x%x, pMsgInputCtrl 0x%x, " ++ "pMsgInput 0x%x, pMsgOutputCtrl 0x%x, pMsgOutput " ++ "0x%x \n", (u8 *)hIOMgr->pSharedMem, ++ (u8 *)hIOMgr->pInput, (u8 *)hIOMgr->pOutput, ++ (u8 *)hIOMgr->pMsgInputCtrl, ++ (u8 *)hIOMgr->pMsgInput, ++ (u8 *)hIOMgr->pMsgOutputCtrl, ++ (u8 *)hIOMgr->pMsgOutput); ++ DBG_Trace(DBG_LEVEL7, "** (proc) MAX MSGS IN SHARED MEMORY: " ++ "0x%x\n", hMsgMgr->uMaxMsgs); ++ memset((void *) hIOMgr->pSharedMem, 0, sizeof(struct SHM)); ++ } ++#ifndef DSP_TRACEBUF_DISABLED ++ if (DSP_SUCCEEDED(status)) { ++ /* Get the start address of trace buffer */ ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMan, SYS_PUTCBEG, ++ &hIOMgr->ulTraceBufferBegin); ++ if (DSP_FAILED(status)) ++ status = CHNL_E_NOMEMMAP; ++ ++ } ++ hIOMgr->ulGPPReadPointer = hIOMgr->ulTraceBufferBegin = ++ (ulGppVa + ulSeg1Size + ulPadSize) + ++ (hIOMgr->ulTraceBufferBegin - ulDspVa); ++ /* Get the end address of trace buffer */ ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMan, SYS_PUTCEND, ++ &hIOMgr->ulTraceBufferEnd); ++ if (DSP_FAILED(status)) ++ status = CHNL_E_NOMEMMAP; ++ ++ } ++ hIOMgr->ulTraceBufferEnd = (ulGppVa + ulSeg1Size + ulPadSize) + ++ (hIOMgr->ulTraceBufferEnd - ulDspVa); ++ /* Get the current address of DSP write pointer */ ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMan, ++ BRIDGE_SYS_PUTC_current, ++ &hIOMgr->ulTraceBufferCurrent); ++ if (DSP_FAILED(status)) ++ status = CHNL_E_NOMEMMAP; ++ ++ } ++ hIOMgr->ulTraceBufferCurrent = (ulGppVa + ulSeg1Size + ++ ulPadSize) + (hIOMgr-> ++ ulTraceBufferCurrent - ulDspVa); ++ /* Calculate the size of trace buffer */ ++ if (hIOMgr->pMsg) ++ MEM_Free(hIOMgr->pMsg); ++ hIOMgr->pMsg = MEM_Alloc(((hIOMgr->ulTraceBufferEnd - ++ hIOMgr->ulTraceBufferBegin) * ++ hIOMgr->uWordSize) + 2, MEM_NONPAGED); ++ if (!hIOMgr->pMsg) ++ status = DSP_EMEMORY; ++ ++ DBG_Trace(DBG_LEVEL1, "** hIOMgr->pMsg: 0x%x\n", hIOMgr->pMsg); ++ hIOMgr->ulDspVa = ulDspVa; ++ hIOMgr->ulGppVa = (ulGppVa + ulSeg1Size + ulPadSize); ++ } ++#endif ++ IO_EnableInterrupt(hIOMgr->hWmdContext); ++ return status; ++} ++ ++/* ++ * ======== IO_BufSize ======== ++ * Size of shared memory I/O channel. ++ */ ++u32 IO_BufSize(struct IO_MGR *hIOMgr) ++{ ++ DBC_Require(MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)); ++ ++ return hIOMgr->uSMBufSize; ++} ++ ++/* ++ * ======== IO_CancelChnl ======== ++ * Cancel IO on a given PCPY channel. ++ */ ++void IO_CancelChnl(struct IO_MGR *hIOMgr, u32 ulChnl) ++{ ++ struct IO_MGR *pIOMgr = (struct IO_MGR *)hIOMgr; ++ struct SHM *sm; ++ ++ DBC_Require(MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)); ++ sm = hIOMgr->pSharedMem; ++ ++ /* Inform DSP that we have no more buffers on this channel: */ ++ IO_AndValue(pIOMgr->hWmdContext, struct SHM, sm, hostFreeMask, ++ (~(1 << ulChnl))); ++ ++ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); ++} ++ ++/* ++ * ======== IO_DispatchChnl ======== ++ * Proc-copy chanl dispatch. ++ */ ++static void IO_DispatchChnl(IN struct IO_MGR *pIOMgr, ++ IN OUT struct CHNL_OBJECT *pChnl, u32 iMode) ++{ ++ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); ++ ++ DBG_Trace(DBG_LEVEL3, "Entering IO_DispatchChnl \n"); ++ ++ /* See if there is any data available for transfer: */ ++ DBC_Assert(iMode == IO_SERVICE); ++ ++ /* Any channel will do for this mode: */ ++ InputChnl(pIOMgr, pChnl, iMode); ++ OutputChnl(pIOMgr, pChnl, iMode); ++} ++ ++/* ++ * ======== IO_DispatchMsg ======== ++ * Performs I/O dispatch on message queues. ++ */ ++static void IO_DispatchMsg(IN struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr) ++{ ++ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); ++ ++ DBG_Trace(DBG_LEVEL3, "Entering IO_DispatchMsg \n"); ++ ++ /* We are performing both input and output processing. */ ++ InputMsg(pIOMgr, hMsgMgr); ++ OutputMsg(pIOMgr, hMsgMgr); ++} ++ ++/* ++ * ======== IO_DispatchPM ======== ++ * Performs I/O dispatch on PM related messages from DSP ++ */ ++static void IO_DispatchPM(struct work_struct *work) ++{ ++ struct IO_MGR *pIOMgr = ++ container_of(work, struct IO_MGR, io_workq); ++ DSP_STATUS status; ++ u32 pArg[2]; ++ ++ /*DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE));*/ ++ ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM: Entering IO_DispatchPM : \n"); ++ ++ /* Perform Power message processing here */ ++ while (pIOMgr->iQuePowerHead != pIOMgr->iQuePowerTail) { ++ pArg[0] = *(u32 *)&(pIOMgr->dQuePowerMbxVal[pIOMgr-> ++ iQuePowerTail]); ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM - pArg[0] - 0x%x: \n", ++ pArg[0]); ++ /* Send the command to the WMD clk/pwr manager to handle */ ++ if (pArg[0] == MBX_PM_HIBERNATE_EN) { ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Hibernate " ++ "command\n"); ++ status = pIOMgr->pIntfFxns->pfnDevCntrl(pIOMgr-> ++ hWmdContext, WMDIOCTL_PWR_HIBERNATE, pArg); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : " ++ "Hibernation command failed\n"); ++ } ++ } else if (pArg[0] == MBX_PM_OPP_REQ) { ++ pArg[1] = pIOMgr->pSharedMem->oppRequest.rqstOppPt; ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Value of OPP " ++ "value =0x%x \n", pArg[1]); ++ status = pIOMgr->pIntfFxns->pfnDevCntrl(pIOMgr-> ++ hWmdContext, WMDIOCTL_CONSTRAINT_REQUEST, ++ pArg); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Failed " ++ "to set constraint = 0x%x \n", ++ pArg[1]); ++ } ++ ++ } else { ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM - clock control - " ++ "value of msg = 0x%x: \n", pArg[0]); ++ status = pIOMgr->pIntfFxns->pfnDevCntrl(pIOMgr-> ++ hWmdContext, WMDIOCTL_CLK_CTRL, pArg); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Failed " ++ "to control the DSP clk = 0x%x \n", ++ *pArg); ++ } ++ } ++ /* increment the tail count here */ ++ pIOMgr->iQuePowerTail++; ++ if (pIOMgr->iQuePowerTail >= MAX_PM_REQS) ++ pIOMgr->iQuePowerTail = 0; ++ ++ } ++ ++} ++ ++/* ++ * ======== IO_DPC ======== ++ * Deferred procedure call for shared memory channel driver ISR. Carries ++ * out the dispatch of I/O as a non-preemptible event.It can only be ++ * pre-empted by an ISR. ++ */ ++void IO_DPC(IN OUT void *pRefData) ++{ ++ struct IO_MGR *pIOMgr = (struct IO_MGR *)pRefData; ++ struct CHNL_MGR *pChnlMgr; ++ struct MSG_MGR *pMsgMgr; ++ struct DEH_MGR *hDehMgr; ++ ++ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); ++ pChnlMgr = pIOMgr->hChnlMgr; ++ DEV_GetMsgMgr(pIOMgr->hDevObject, &pMsgMgr); ++ DEV_GetDehMgr(pIOMgr->hDevObject, &hDehMgr); ++ DBC_Require(MEM_IsValidHandle(pChnlMgr, CHNL_MGRSIGNATURE)); ++ DBG_Trace(DBG_LEVEL7, "Entering IO_DPC(0x%x)\n", pRefData); ++ /* Check value of interrupt register to ensure it is a valid error */ ++ if ((pIOMgr->wIntrVal > DEH_BASE) && (pIOMgr->wIntrVal < DEH_LIMIT)) { ++ /* notify DSP/BIOS exception */ ++ if (hDehMgr) ++ WMD_DEH_Notify(hDehMgr, DSP_SYSERROR, pIOMgr->wIntrVal); ++ ++ } ++ IO_DispatchChnl(pIOMgr, NULL, IO_SERVICE); ++#ifdef CHNL_MESSAGES ++ if (pMsgMgr) { ++ DBC_Require(MEM_IsValidHandle(pMsgMgr, MSGMGR_SIGNATURE)); ++ IO_DispatchMsg(pIOMgr, pMsgMgr); ++ } ++#endif ++#ifndef DSP_TRACEBUF_DISABLED ++ if (pIOMgr->wIntrVal & MBX_DBG_CLASS) { ++ /* notify DSP Trace message */ ++ if (pIOMgr->wIntrVal & MBX_DBG_SYSPRINTF) ++ PrintDSPDebugTrace(pIOMgr); ++ } ++#endif ++ ++#ifndef DSP_TRACEBUF_DISABLED ++ PrintDSPDebugTrace(pIOMgr); ++#endif ++} ++ ++ ++/* ++ * ======== IO_ISR ======== ++ * Main interrupt handler for the shared memory IO manager. ++ * Calls the WMD's CHNL_ISR to determine if this interrupt is ours, then ++ * schedules a DPC to dispatch I/O. ++ */ ++irqreturn_t IO_ISR(int irq, IN void *pRefData) ++{ ++ struct IO_MGR *hIOMgr = (struct IO_MGR *)pRefData; ++ bool fSchedDPC; ++ DBC_Require(irq == INT_MAIL_MPU_IRQ); ++ DBC_Require(MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)); ++ DBG_Trace(DBG_LEVEL3, "Entering IO_ISR(0x%x)\n", pRefData); ++ ++ /* Call WMD's CHNLSM_ISR() to see if interrupt is ours, and process. */ ++ if (IO_CALLISR(hIOMgr->hWmdContext, &fSchedDPC, &hIOMgr->wIntrVal)) { ++ { ++ DBG_Trace(DBG_LEVEL3, "IO_ISR %x\n", hIOMgr->wIntrVal); ++ if (hIOMgr->wIntrVal & MBX_PM_CLASS) { ++ hIOMgr->dQuePowerMbxVal[hIOMgr->iQuePowerHead] = ++ hIOMgr->wIntrVal; ++ hIOMgr->iQuePowerHead++; ++ if (hIOMgr->iQuePowerHead >= MAX_PM_REQS) ++ hIOMgr->iQuePowerHead = 0; ++ ++ queue_work(bridge_workqueue, &hIOMgr->io_workq); ++ } ++ if (hIOMgr->wIntrVal == MBX_DEH_RESET) { ++ DBG_Trace(DBG_LEVEL6, "*** DSP RESET ***\n"); ++ hIOMgr->wIntrVal = 0; ++ } else if (fSchedDPC) { ++ /* PROC-COPY defer i/o */ ++ DPC_Schedule(hIOMgr->hDPC); ++ } ++ } ++ } else ++ /* Ensure that, if WMD didn't claim it, the IRQ is shared. */ ++ DBC_Ensure(hIOMgr->fSharedIRQ); ++ return IRQ_HANDLED; ++} ++ ++/* ++ * ======== IO_RequestChnl ======== ++ * Purpose: ++ * Request chanenel I/O from the DSP. Sets flags in shared memory, then ++ * interrupts the DSP. ++ */ ++void IO_RequestChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, ++ u32 iMode, OUT u16 *pwMbVal) ++{ ++ struct CHNL_MGR *pChnlMgr; ++ struct SHM *sm; ++ DBC_Require(pChnl != NULL); ++ DBC_Require(pwMbVal != NULL); ++ pChnlMgr = pIOMgr->hChnlMgr; ++ sm = pIOMgr->pSharedMem; ++ if (iMode == IO_INPUT) { ++ /* Assertion fires if CHNL_AddIOReq() called on a stream ++ * which was cancelled, or attached to a dead board: */ ++ DBC_Assert((pChnl->dwState == CHNL_STATEREADY) || ++ (pChnl->dwState == CHNL_STATEEOS)); ++ /* Indicate to the DSP we have a buffer available for input: */ ++ IO_OrValue(pIOMgr->hWmdContext, struct SHM, sm, hostFreeMask, ++ (1 << pChnl->uId)); ++ *pwMbVal = MBX_PCPY_CLASS; ++ } else if (iMode == IO_OUTPUT) { ++ /* This assertion fails if CHNL_AddIOReq() was called on a ++ * stream which was cancelled, or attached to a dead board: */ ++ DBC_Assert((pChnl->dwState & ~CHNL_STATEEOS) == ++ CHNL_STATEREADY); ++ /* Record the fact that we have a buffer available for ++ * output: */ ++ pChnlMgr->dwOutputMask |= (1 << pChnl->uId); ++ } else { ++ DBC_Assert(iMode); /* Shouldn't get here. */ ++ } ++} ++ ++/* ++ * ======== IO_Schedule ======== ++ * Schedule DPC for IO. ++ */ ++void IO_Schedule(struct IO_MGR *pIOMgr) ++{ ++ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); ++ ++ DPC_Schedule(pIOMgr->hDPC); ++} ++ ++/* ++ * ======== FindReadyOutput ======== ++ * Search for a host output channel which is ready to send. If this is ++ * called as a result of servicing the DPC, then implement a round ++ * robin search; otherwise, this was called by a client thread (via ++ * IO_Dispatch()), so just start searching from the current channel id. ++ */ ++static u32 FindReadyOutput(struct CHNL_MGR *pChnlMgr, ++ struct CHNL_OBJECT *pChnl, u32 dwMask) ++{ ++ u32 uRetval = OUTPUTNOTREADY; ++ u32 id, startId; ++ u32 shift; ++ ++ id = (pChnl != NULL ? pChnl->uId : (pChnlMgr->dwLastOutput + 1)); ++ id = ((id == CHNL_MAXCHANNELS) ? 0 : id); ++ DBC_Assert(id < CHNL_MAXCHANNELS); ++ if (dwMask) { ++ shift = (1 << id); ++ startId = id; ++ do { ++ if (dwMask & shift) { ++ uRetval = id; ++ if (pChnl == NULL) ++ pChnlMgr->dwLastOutput = id; ++ ++ break; ++ } ++ id = id + 1; ++ id = ((id == CHNL_MAXCHANNELS) ? 0 : id); ++ shift = (1 << id); ++ } while (id != startId); ++ } ++ DBC_Ensure((uRetval == OUTPUTNOTREADY) || (uRetval < CHNL_MAXCHANNELS)); ++ return uRetval; ++} ++ ++/* ++ * ======== InputChnl ======== ++ * Dispatch a buffer on an input channel. ++ */ ++static void InputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, ++ u32 iMode) ++{ ++ struct CHNL_MGR *pChnlMgr; ++ struct SHM *sm; ++ u32 chnlId; ++ u32 uBytes; ++ struct CHNL_IRP *pChirp = NULL; ++ u32 dwArg; ++ bool fClearChnl = false; ++ bool fNotifyClient = false; ++ ++ sm = pIOMgr->pSharedMem; ++ pChnlMgr = pIOMgr->hChnlMgr; ++ ++ DBG_Trace(DBG_LEVEL3, "> InputChnl\n"); ++ ++ /* Attempt to perform input.... */ ++ if (!IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, inputFull)) ++ goto func_end; ++ ++ uBytes = IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, inputSize) * ++ pChnlMgr->uWordSize; ++ chnlId = IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, inputId); ++ dwArg = IO_GetLong(pIOMgr->hWmdContext, struct SHM, sm, arg); ++ if (chnlId >= CHNL_MAXCHANNELS) { ++ /* Shouldn't be here: would indicate corrupted SHM. */ ++ DBC_Assert(chnlId); ++ goto func_end; ++ } ++ pChnl = pChnlMgr->apChannel[chnlId]; ++ if ((pChnl != NULL) && CHNL_IsInput(pChnl->uMode)) { ++ if ((pChnl->dwState & ~CHNL_STATEEOS) == CHNL_STATEREADY) { ++ if (!pChnl->pIORequests) ++ goto func_end; ++ /* Get the I/O request, and attempt a transfer: */ ++ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl-> ++ pIORequests); ++ if (pChirp) { ++ pChnl->cIOReqs--; ++ DBC_Assert(pChnl->cIOReqs >= 0); ++ /* Ensure we don't overflow the client's ++ * buffer: */ ++ uBytes = min(uBytes, pChirp->cBytes); ++ /* Transfer buffer from DSP side: */ ++ uBytes = ReadData(pIOMgr->hWmdContext, ++ pChirp->pHostSysBuf, ++ pIOMgr->pInput, uBytes); ++ pChnl->cBytesMoved += uBytes; ++ pChirp->cBytes = uBytes; ++ pChirp->dwArg = dwArg; ++ pChirp->status = CHNL_IOCSTATCOMPLETE; ++ DBG_Trace(DBG_LEVEL7, "Input Chnl:status= 0x%x " ++ "\n", *((RMS_WORD *)(pChirp-> ++ pHostSysBuf))); ++ if (uBytes == 0) { ++ /* This assertion fails if the DSP ++ * sends EOS more than once on this ++ * channel: */ ++ DBC_Assert(!(pChnl->dwState & ++ CHNL_STATEEOS)); ++ /* Zero bytes indicates EOS. Update ++ * IOC status for this chirp, and also ++ * the channel state: */ ++ pChirp->status |= CHNL_IOCSTATEOS; ++ pChnl->dwState |= CHNL_STATEEOS; ++ /* Notify that end of stream has ++ * occurred */ ++ NTFY_Notify(pChnl->hNtfy, ++ DSP_STREAMDONE); ++ DBG_Trace(DBG_LEVEL7, "Input Chnl NTFY " ++ "chnl = 0x%x\n", pChnl); ++ } ++ /* Tell DSP if no more I/O buffers available: */ ++ if (!pChnl->pIORequests) ++ goto func_end; ++ if (LST_IsEmpty(pChnl->pIORequests)) { ++ IO_AndValue(pIOMgr->hWmdContext, ++ struct SHM, sm, hostFreeMask, ++ ~(1 << pChnl->uId)); ++ } ++ fClearChnl = true; ++ fNotifyClient = true; ++ } else { ++ /* Input full for this channel, but we have no ++ * buffers available. The channel must be ++ * "idling". Clear out the physical input ++ * channel. */ ++ fClearChnl = true; ++ } ++ } else { ++ /* Input channel cancelled: clear input channel. */ ++ fClearChnl = true; ++ } ++ } else { ++ /* DPC fired after host closed channel: clear input channel. */ ++ fClearChnl = true; ++ } ++ if (fClearChnl) { ++ /* Indicate to the DSP we have read the input: */ ++ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, inputFull, 0); ++ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); ++ } ++ if (fNotifyClient) { ++ /* Notify client with IO completion record: */ ++ NotifyChnlComplete(pChnl, pChirp); ++ } ++func_end: ++ DBG_Trace(DBG_LEVEL3, "< InputChnl\n"); ++} ++ ++/* ++ * ======== InputMsg ======== ++ * Copies messages from shared memory to the message queues. ++ */ ++static void InputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr) ++{ ++ u32 uMsgs; ++ u32 i; ++ u8 *pMsgInput; ++ struct MSG_QUEUE *hMsgQueue; ++ struct MSG_FRAME *pMsg; ++ struct MSG_DSPMSG msg; ++ struct MSG *pCtrl; ++ u32 fInputEmpty; ++ u32 addr; ++ ++ pCtrl = pIOMgr->pMsgInputCtrl; ++ /* Get the number of input messages to be read. */ ++ fInputEmpty = IO_GetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, ++ bufEmpty); ++ uMsgs = IO_GetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, size); ++ if (fInputEmpty || uMsgs >= hMsgMgr->uMaxMsgs) ++ return; ++ ++ pMsgInput = pIOMgr->pMsgInput; ++ for (i = 0; i < uMsgs; i++) { ++ /* Read the next message */ ++ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->msg.dwCmd); ++ msg.msg.dwCmd = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); ++ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->msg.dwArg1); ++ msg.msg.dwArg1 = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); ++ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->msg.dwArg2); ++ msg.msg.dwArg2 = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); ++ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->dwId); ++ msg.dwId = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); ++ pMsgInput += sizeof(struct MSG_DSPMSG); ++ if (!hMsgMgr->queueList) ++ goto func_end; ++ ++ /* Determine which queue to put the message in */ ++ hMsgQueue = (struct MSG_QUEUE *)LST_First(hMsgMgr->queueList); ++ DBG_Trace(DBG_LEVEL7, "InputMsg RECVD: dwCmd=0x%x dwArg1=0x%x " ++ "dwArg2=0x%x dwId=0x%x \n", msg.msg.dwCmd, ++ msg.msg.dwArg1, msg.msg.dwArg2, msg.dwId); ++ /* Interrupt may occur before shared memory and message ++ * input locations have been set up. If all nodes were ++ * cleaned up, hMsgMgr->uMaxMsgs should be 0. */ ++ if (hMsgQueue && uMsgs > hMsgMgr->uMaxMsgs) ++ goto func_end; ++ ++ while (hMsgQueue != NULL) { ++ if (msg.dwId == hMsgQueue->dwId) { ++ /* Found it */ ++ if (msg.msg.dwCmd == RMS_EXITACK) { ++ /* The exit message does not get ++ * queued */ ++ /* Call the node exit notification */ ++ /* Node handle */ /* status */ ++ (*hMsgMgr->onExit)((HANDLE)hMsgQueue-> ++ hArg, msg.msg.dwArg1); ++ } else { ++ /* Not an exit acknowledgement, queue ++ * the message */ ++ if (!hMsgQueue->msgFreeList) ++ goto func_end; ++ pMsg = (struct MSG_FRAME *)LST_GetHead ++ (hMsgQueue->msgFreeList); ++ if (hMsgQueue->msgUsedList && pMsg) { ++ pMsg->msgData = msg; ++ LST_PutTail(hMsgQueue-> ++ msgUsedList, ++ (struct LST_ELEM *)pMsg); ++ NTFY_Notify(hMsgQueue->hNtfy, ++ DSP_NODEMESSAGEREADY); ++ SYNC_SetEvent(hMsgQueue-> ++ hSyncEvent); ++ } else { ++ /* No free frame to copy the ++ * message into */ ++ DBG_Trace(DBG_LEVEL7, "NO FREE " ++ "MSG FRAMES, DISCARDING" ++ " MESSAGE\n"); ++ } ++ } ++ break; ++ } ++ ++ if (!hMsgMgr->queueList || !hMsgQueue) ++ goto func_end; ++ hMsgQueue = (struct MSG_QUEUE *)LST_Next(hMsgMgr-> ++ queueList, (struct LST_ELEM *)hMsgQueue); ++ } ++ } ++ /* Set the post SWI flag */ ++ if (uMsgs > 0) { ++ /* Tell the DSP we've read the messages */ ++ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, bufEmpty, ++ true); ++ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, postSWI, ++ true); ++ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); ++ } ++func_end: ++ return; ++ ++} ++ ++/* ++ * ======== NotifyChnlComplete ======== ++ * Purpose: ++ * Signal the channel event, notifying the client that I/O has completed. ++ */ ++static void NotifyChnlComplete(struct CHNL_OBJECT *pChnl, ++ struct CHNL_IRP *pChirp) ++{ ++ bool fSignalEvent; ++ ++ DBC_Require(MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)); ++ DBC_Require(pChnl->hSyncEvent != NULL); ++ /* Note: we signal the channel event only if the queue of IO ++ * completions is empty. If it is not empty, the event is sure to be ++ * signalled by the only IO completion list consumer: ++ * WMD_CHNL_GetIOC(). */ ++ fSignalEvent = LST_IsEmpty(pChnl->pIOCompletions); ++ /* Enqueue the IO completion info for the client: */ ++ LST_PutTail(pChnl->pIOCompletions, (struct LST_ELEM *) pChirp); ++ pChnl->cIOCs++; ++ DBC_Assert(pChnl->cIOCs <= pChnl->cChirps); ++ /* Signal the channel event (if not already set) that IO is complete: */ ++ if (fSignalEvent) ++ SYNC_SetEvent(pChnl->hSyncEvent); ++ ++ /* Notify that IO is complete */ ++ NTFY_Notify(pChnl->hNtfy, DSP_STREAMIOCOMPLETION); ++} ++ ++/* ++ * ======== OutputChnl ======== ++ * Purpose: ++ * Dispatch a buffer on an output channel. ++ */ ++static void OutputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, ++ u32 iMode) ++{ ++ struct CHNL_MGR *pChnlMgr; ++ struct SHM *sm; ++ u32 chnlId; ++ struct CHNL_IRP *pChirp; ++ u32 dwDspFMask; ++ ++ pChnlMgr = pIOMgr->hChnlMgr; ++ sm = pIOMgr->pSharedMem; ++ DBG_Trace(DBG_LEVEL3, "> OutputChnl\n"); ++ /* Attempt to perform output: */ ++ if (IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, outputFull)) ++ goto func_end; ++ ++ if (pChnl && !((pChnl->dwState & ~CHNL_STATEEOS) == CHNL_STATEREADY)) ++ goto func_end; ++ ++ /* Look to see if both a PC and DSP output channel are ready: */ ++ dwDspFMask = IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, ++ dspFreeMask); ++ chnlId = FindReadyOutput(pChnlMgr, pChnl, (pChnlMgr->dwOutputMask & ++ dwDspFMask)); ++ if (chnlId == OUTPUTNOTREADY) ++ goto func_end; ++ ++ pChnl = pChnlMgr->apChannel[chnlId]; ++ if (!pChnl || !pChnl->pIORequests) { ++ /* Shouldn't get here: */ ++ goto func_end; ++ } ++ /* Get the I/O request, and attempt a transfer: */ ++ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIORequests); ++ if (!pChirp) ++ goto func_end; ++ ++ pChnl->cIOReqs--; ++ if (pChnl->cIOReqs < 0 || !pChnl->pIORequests) ++ goto func_end; ++ ++ /* Record fact that no more I/O buffers available: */ ++ if (LST_IsEmpty(pChnl->pIORequests)) ++ pChnlMgr->dwOutputMask &= ~(1 << chnlId); ++ ++ /* Transfer buffer to DSP side: */ ++ pChirp->cBytes = WriteData(pIOMgr->hWmdContext, pIOMgr->pOutput, ++ pChirp->pHostSysBuf, min(pIOMgr->uSMBufSize, pChirp-> ++ cBytes)); ++ pChnl->cBytesMoved += pChirp->cBytes; ++ /* Write all 32 bits of arg */ ++ IO_SetLong(pIOMgr->hWmdContext, struct SHM, sm, arg, pChirp->dwArg); ++#if _CHNL_WORDSIZE == 2 ++ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputId, ++ (u16)chnlId); ++ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputSize, ++ (u16)(pChirp->cBytes + (pChnlMgr->uWordSize-1)) / ++ (u16)pChnlMgr->uWordSize); ++#else ++ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputId, chnlId); ++ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputSize, ++ (pChirp->cBytes + (pChnlMgr->uWordSize - 1)) / pChnlMgr-> ++ uWordSize); ++#endif ++ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputFull, 1); ++ /* Indicate to the DSP we have written the output: */ ++ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); ++ /* Notify client with IO completion record (keep EOS) */ ++ pChirp->status &= CHNL_IOCSTATEOS; ++ NotifyChnlComplete(pChnl, pChirp); ++ /* Notify if stream is done. */ ++ if (pChirp->status & CHNL_IOCSTATEOS) ++ NTFY_Notify(pChnl->hNtfy, DSP_STREAMDONE); ++ ++func_end: ++ DBG_Trace(DBG_LEVEL3, "< OutputChnl\n"); ++} ++/* ++ * ======== OutputMsg ======== ++ * Copies messages from the message queues to the shared memory. ++ */ ++static void OutputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr) ++{ ++ u32 uMsgs = 0; ++ u32 i; ++ u8 *pMsgOutput; ++ struct MSG_FRAME *pMsg; ++ struct MSG *pCtrl; ++ u32 fOutputEmpty; ++ u32 val; ++ u32 addr; ++ ++ pCtrl = pIOMgr->pMsgOutputCtrl; ++ ++ /* Check if output has been cleared */ ++ fOutputEmpty = IO_GetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, ++ bufEmpty); ++ if (fOutputEmpty) { ++ uMsgs = (hMsgMgr->uMsgsPending > hMsgMgr->uMaxMsgs) ? ++ hMsgMgr->uMaxMsgs : hMsgMgr->uMsgsPending; ++ pMsgOutput = pIOMgr->pMsgOutput; ++ /* Copy uMsgs messages into shared memory */ ++ for (i = 0; i < uMsgs; i++) { ++ if (!hMsgMgr->msgUsedList) { ++ DBG_Trace(DBG_LEVEL3, "msgUsedList is NULL\n"); ++ pMsg = NULL; ++ goto func_end; ++ } else ++ pMsg = (struct MSG_FRAME *)LST_GetHead( ++ hMsgMgr->msgUsedList); ++ if (pMsg != NULL) { ++ val = (pMsg->msgData).dwId; ++ addr = (u32)&(((struct MSG_DSPMSG *) ++ pMsgOutput)->dwId); ++ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, ++ val); ++ val = (pMsg->msgData).msg.dwCmd; ++ addr = (u32)&((((struct MSG_DSPMSG *) ++ pMsgOutput)->msg).dwCmd); ++ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, ++ val); ++ val = (pMsg->msgData).msg.dwArg1; ++ addr = ++ (u32)&((((struct MSG_DSPMSG *) ++ pMsgOutput)->msg).dwArg1); ++ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, ++ val); ++ val = (pMsg->msgData).msg.dwArg2; ++ addr = ++ (u32)&((((struct MSG_DSPMSG *) ++ pMsgOutput)->msg).dwArg2); ++ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, ++ val); ++ pMsgOutput += sizeof(struct MSG_DSPMSG); ++ if (!hMsgMgr->msgFreeList) ++ goto func_end; ++ LST_PutTail(hMsgMgr->msgFreeList, ++ (struct LST_ELEM *) pMsg); ++ SYNC_SetEvent(hMsgMgr->hSyncEvent); ++ } else { ++ DBG_Trace(DBG_LEVEL3, "pMsg is NULL\n"); ++ } ++ } ++ ++ if (uMsgs > 0) { ++ hMsgMgr->uMsgsPending -= uMsgs; ++#if _CHNL_WORDSIZE == 2 ++ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, ++ size, (u16)uMsgs); ++#else ++ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, ++ size, uMsgs); ++#endif ++ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, ++ bufEmpty, false); ++ /* Set the post SWI flag */ ++ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, ++ postSWI, true); ++ /* Tell the DSP we have written the output. */ ++ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); ++ } ++ } ++func_end: ++ return; ++ ++} ++ ++/* ++ * ======== registerSHMSegs ======== ++ * purpose: ++ * Registers GPP SM segment with CMM. ++ */ ++static DSP_STATUS registerSHMSegs(struct IO_MGR *hIOMgr, ++ struct COD_MANAGER *hCodMan, ++ u32 dwGPPBasePA) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 ulShm0_Base = 0; ++ u32 ulShm0_End = 0; ++ u32 ulShm0_RsrvdStart = 0; ++ u32 ulRsrvdSize = 0; ++ u32 ulGppPhys; ++ u32 ulDspVirt; ++ u32 ulShmSegId0 = 0; ++ u32 dwOffset, dwGPPBaseVA, ulDSPSize; ++ ++ /* Read address and size info for first SM region.*/ ++ /* Get start of 1st SM Heap region */ ++ status = COD_GetSymValue(hCodMan, SHM0_SHARED_BASE_SYM, &ulShm0_Base); ++ DBC_Assert(ulShm0_Base != 0); ++ /* Get end of 1st SM Heap region */ ++ if (DSP_SUCCEEDED(status)) { ++ /* Get start and length of message part of shared memory */ ++ status = COD_GetSymValue(hCodMan, SHM0_SHARED_END_SYM, ++ &ulShm0_End); ++ DBC_Assert(ulShm0_End != 0); ++ } ++ /* start of Gpp reserved region */ ++ if (DSP_SUCCEEDED(status)) { ++ /* Get start and length of message part of shared memory */ ++ status = COD_GetSymValue(hCodMan, SHM0_SHARED_RESERVED_BASE_SYM, ++ &ulShm0_RsrvdStart); ++ DBG_Trace(DBG_LEVEL1, "***ulShm0_RsrvdStart 0x%x \n", ++ ulShm0_RsrvdStart); ++ DBC_Assert(ulShm0_RsrvdStart != 0); ++ } ++ /* Register with CMM */ ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetCmmMgr(hIOMgr->hDevObject, &hIOMgr->hCmmMgr); ++ if (DSP_SUCCEEDED(status)) { ++ status = CMM_UnRegisterGPPSMSeg(hIOMgr->hCmmMgr, ++ CMM_ALLSEGMENTS); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, "ERROR - Unable to " ++ "Un-Register SM segments \n"); ++ } ++ } else { ++ DBG_Trace(DBG_LEVEL7, "ERROR - Unable to get CMM " ++ "Handle \n"); ++ } ++ } ++ /* Register new SM region(s) */ ++ if (DSP_SUCCEEDED(status) && (ulShm0_End - ulShm0_Base) > 0) { ++ /* calc size (bytes) of SM the GPP can alloc from */ ++ ulRsrvdSize = (ulShm0_End - ulShm0_RsrvdStart + 1) * hIOMgr-> ++ uWordSize; ++ DBC_Assert(ulRsrvdSize > 0); ++ /* calc size of SM DSP can alloc from */ ++ ulDSPSize = (ulShm0_RsrvdStart - ulShm0_Base) * hIOMgr-> ++ uWordSize; ++ DBC_Assert(ulDSPSize > 0); ++ /* First TLB entry reserved for Bridge SM use.*/ ++ ulGppPhys = hIOMgr->extProcInfo.tyTlb[0].ulGppPhys; ++ /* get size in bytes */ ++ ulDspVirt = hIOMgr->extProcInfo.tyTlb[0].ulDspVirt * hIOMgr-> ++ uWordSize; ++ /* Calc byte offset used to convert GPP phys <-> DSP byte ++ * address.*/ ++ if (dwGPPBasePA > ulDspVirt) ++ dwOffset = dwGPPBasePA - ulDspVirt; ++ else ++ dwOffset = ulDspVirt - dwGPPBasePA; ++ ++ DBC_Assert(ulShm0_RsrvdStart * hIOMgr->uWordSize >= ulDspVirt); ++ /* calc Gpp phys base of SM region */ ++ /* Linux - this is actually uncached kernel virtual address*/ ++ dwGPPBaseVA = ulGppPhys + ulShm0_RsrvdStart * hIOMgr->uWordSize ++ - ulDspVirt; ++ /* calc Gpp phys base of SM region */ ++ /* Linux - this is the physical address*/ ++ dwGPPBasePA = dwGPPBasePA + ulShm0_RsrvdStart * hIOMgr-> ++ uWordSize - ulDspVirt; ++ /* Register SM Segment 0.*/ ++ status = CMM_RegisterGPPSMSeg(hIOMgr->hCmmMgr, dwGPPBasePA, ++ ulRsrvdSize, dwOffset, (dwGPPBasePA > ulDspVirt) ? ++ CMM_ADDTODSPPA : CMM_SUBFROMDSPPA, ++ (u32)(ulShm0_Base * hIOMgr->uWordSize), ++ ulDSPSize, &ulShmSegId0, dwGPPBaseVA); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, "ERROR - Failed to register SM " ++ "Seg 0 \n"); ++ } ++ /* first SM region is segId = 1 */ ++ DBC_Assert(ulShmSegId0 == 1); ++ } ++ return status; ++} ++ ++/* ++ * ======== ReadData ======== ++ * Copies buffers from the shared memory to the host buffer. ++ */ ++static u32 ReadData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, ++ void *pSrc, u32 uSize) ++{ ++ memcpy(pDest, pSrc, uSize); ++ return uSize; ++} ++ ++/* ++ * ======== WriteData ======== ++ * Copies buffers from the host side buffer to the shared memory. ++ */ ++static u32 WriteData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, ++ void *pSrc, u32 uSize) ++{ ++ memcpy(pDest, pSrc, uSize); ++ return uSize; ++} ++ ++/* ZCPY IO routines. */ ++void IO_IntrDSP2(IN struct IO_MGR *pIOMgr, IN u16 wMbVal) ++{ ++ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, wMbVal); ++} ++ ++/* ++ * ======== IO_SHMcontrol ======== ++ * Sets the requested SHM setting. ++ */ ++DSP_STATUS IO_SHMsetting(IN struct IO_MGR *hIOMgr, IN enum SHM_DESCTYPE desc, ++ IN void *pArgs) ++{ ++#ifdef CONFIG_BRIDGE_DVFS ++ u32 i; ++ struct dspbridge_platform_data *pdata = ++ omap_dspbridge_dev->dev.platform_data; ++ ++ switch (desc) { ++ case SHM_CURROPP: ++ /* Update the shared memory with requested OPP information */ ++ if (pArgs != NULL) ++ hIOMgr->pSharedMem->oppTableStruct.currOppPt = ++ *(u32 *)pArgs; ++ else ++ return DSP_EFAIL; ++ break; ++ case SHM_OPPINFO: ++ /* Update the shared memory with the voltage, frequency, ++ min and max frequency values for an OPP */ ++ for (i = 0; i <= dsp_max_opps; i++) { ++ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i].voltage = ++ vdd1_dsp_freq[i][0]; ++ DBG_Trace(DBG_LEVEL5, "OPP shared memory -voltage: " ++ "%d\n", hIOMgr->pSharedMem->oppTableStruct. ++ oppPoint[i].voltage); ++ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i]. ++ frequency = vdd1_dsp_freq[i][1]; ++ DBG_Trace(DBG_LEVEL5, "OPP shared memory -frequency: " ++ "%d\n", hIOMgr->pSharedMem->oppTableStruct. ++ oppPoint[i].frequency); ++ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i].minFreq = ++ vdd1_dsp_freq[i][2]; ++ DBG_Trace(DBG_LEVEL5, "OPP shared memory -min value: " ++ "%d\n", hIOMgr->pSharedMem->oppTableStruct. ++ oppPoint[i].minFreq); ++ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i].maxFreq = ++ vdd1_dsp_freq[i][3]; ++ DBG_Trace(DBG_LEVEL5, "OPP shared memory -max value: " ++ "%d\n", hIOMgr->pSharedMem->oppTableStruct. ++ oppPoint[i].maxFreq); ++ } ++ hIOMgr->pSharedMem->oppTableStruct.numOppPts = dsp_max_opps; ++ DBG_Trace(DBG_LEVEL5, "OPP shared memory - max OPP number: " ++ "%d\n", hIOMgr->pSharedMem->oppTableStruct.numOppPts); ++ /* Update the current OPP number */ ++ if (pdata->dsp_get_opp) ++ i = (*pdata->dsp_get_opp)(); ++ hIOMgr->pSharedMem->oppTableStruct.currOppPt = i; ++ DBG_Trace(DBG_LEVEL7, "OPP value programmed to shared memory: " ++ "%d\n", i); ++ break; ++ case SHM_GETOPP: ++ /* Get the OPP that DSP has requested */ ++ *(u32 *)pArgs = hIOMgr->pSharedMem->oppRequest.rqstOppPt; ++ break; ++ default: ++ break; ++ } ++#endif ++ return DSP_SOK; ++} ++ ++/* ++ * ======== WMD_IO_GetProcLoad ======== ++ * Gets the Processor's Load information ++ */ ++DSP_STATUS WMD_IO_GetProcLoad(IN struct IO_MGR *hIOMgr, ++ OUT struct DSP_PROCLOADSTAT *pProcStat) ++{ ++ pProcStat->uCurrLoad = hIOMgr->pSharedMem->loadMonInfo.currDspLoad; ++ pProcStat->uPredictedLoad = hIOMgr->pSharedMem->loadMonInfo.predDspLoad; ++ pProcStat->uCurrDspFreq = hIOMgr->pSharedMem->loadMonInfo.currDspFreq; ++ pProcStat->uPredictedFreq = hIOMgr->pSharedMem->loadMonInfo.predDspFreq; ++ ++ DBG_Trace(DBG_LEVEL4, "Curr Load =%d, Pred Load = %d, Curr Freq = %d, " ++ "Pred Freq = %d\n", pProcStat->uCurrLoad, ++ pProcStat->uPredictedLoad, pProcStat->uCurrDspFreq, ++ pProcStat->uPredictedFreq); ++ return DSP_SOK; ++} ++ ++#ifndef DSP_TRACEBUF_DISABLED ++void PrintDSPDebugTrace(struct IO_MGR *hIOMgr) ++{ ++ u32 ulNewMessageLength = 0, ulGPPCurPointer; ++ ++ GT_0trace(dsp_trace_mask, GT_ENTER, "Entering PrintDSPDebugTrace\n"); ++ ++ while (true) { ++ /* Get the DSP current pointer */ ++ ulGPPCurPointer = *(u32 *) (hIOMgr->ulTraceBufferCurrent); ++ ulGPPCurPointer = hIOMgr->ulGppVa + (ulGPPCurPointer - ++ hIOMgr->ulDspVa); ++ ++ /* No new debug messages available yet */ ++ if (ulGPPCurPointer == hIOMgr->ulGPPReadPointer) ++ break; ++ ++ /* Continuous data */ ++ else if (ulGPPCurPointer > hIOMgr->ulGPPReadPointer) { ++ ulNewMessageLength = ulGPPCurPointer - hIOMgr-> ++ ulGPPReadPointer; ++ ++ memcpy(hIOMgr->pMsg, (char *)hIOMgr->ulGPPReadPointer, ++ ulNewMessageLength); ++ hIOMgr->pMsg[ulNewMessageLength] = '\0'; ++ /* Advance the GPP trace pointer to DSP current ++ * pointer */ ++ hIOMgr->ulGPPReadPointer += ulNewMessageLength; ++ /* Print the trace messages */ ++ GT_0trace(dsp_trace_mask, GT_1CLASS, hIOMgr->pMsg); ++ } ++ /* Handle trace buffer wraparound */ ++ else if (ulGPPCurPointer < hIOMgr->ulGPPReadPointer) { ++ memcpy(hIOMgr->pMsg, (char *)hIOMgr->ulGPPReadPointer, ++ hIOMgr->ulTraceBufferEnd - ++ hIOMgr->ulGPPReadPointer); ++ ulNewMessageLength = ulGPPCurPointer - ++ hIOMgr->ulTraceBufferBegin; ++ memcpy(&hIOMgr->pMsg[hIOMgr->ulTraceBufferEnd - ++ hIOMgr->ulGPPReadPointer], ++ (char *)hIOMgr->ulTraceBufferBegin, ++ ulNewMessageLength); ++ hIOMgr->pMsg[hIOMgr->ulTraceBufferEnd - ++ hIOMgr->ulGPPReadPointer + ++ ulNewMessageLength] = '\0'; ++ /* Advance the GPP trace pointer to DSP current ++ * pointer */ ++ hIOMgr->ulGPPReadPointer = hIOMgr->ulTraceBufferBegin + ++ ulNewMessageLength; ++ /* Print the trace messages */ ++ GT_0trace(dsp_trace_mask, GT_1CLASS, hIOMgr->pMsg); ++ } ++ } ++} ++#endif ++ ++/* ++ * ======== PackTraceBuffer ======== ++ * Removes extra nulls from the trace buffer returned from the DSP. ++ * Works even on buffers that already are packed (null removed); but has ++ * one bug in that case -- loses the last character (replaces with '\0'). ++ * Continues through conversion for full set of nBytes input characters. ++ * Parameters: ++ * lpBuf: Pointer to input/output buffer ++ * nBytes: Number of characters in the buffer ++ * ulNumWords: Number of DSP words in the buffer. Indicates potential ++ * number of extra carriage returns to generate. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Unable to allocate memory. ++ * Requires: ++ * lpBuf must be a fully allocated writable block of at least nBytes. ++ * There are no more than ulNumWords extra characters needed (the number of ++ * linefeeds minus the number of NULLS in the input buffer). ++ */ ++#if (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE ++static DSP_STATUS PackTraceBuffer(char *lpBuf, u32 nBytes, u32 ulNumWords) ++{ ++ DSP_STATUS status = DSP_SOK; ++ char *lpTmpBuf; ++ char *lpBufStart; ++ char *lpTmpStart; ++ u32 nCnt; ++ char thisChar; ++ ++ /* tmp workspace, 1 KB longer than input buf */ ++ lpTmpBuf = MEM_Calloc((nBytes + ulNumWords), MEM_PAGED); ++ if (lpTmpBuf == NULL) { ++ DBG_Trace(DBG_LEVEL7, "PackTrace buffer:OutofMemory \n"); ++ status = DSP_EMEMORY; ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ lpBufStart = lpBuf; ++ lpTmpStart = lpTmpBuf; ++ for (nCnt = nBytes; nCnt > 0; nCnt--) { ++ thisChar = *lpBuf++; ++ switch (thisChar) { ++ case '\0': /* Skip null bytes */ ++ break; ++ case '\n': /* Convert \n to \r\n */ ++ /* NOTE: do not reverse order; Some OS */ ++ /* editors control doesn't understand "\n\r" */ ++ *lpTmpBuf++ = '\r'; ++ *lpTmpBuf++ = '\n'; ++ break; ++ default: /* Copy in the actual ascii byte */ ++ *lpTmpBuf++ = thisChar; ++ break; ++ } ++ } ++ *lpTmpBuf = '\0'; /* Make sure tmp buf is null terminated */ ++ /* Cut output down to input buf size */ ++ strncpy(lpBufStart, lpTmpStart, nBytes); ++ /*Make sure output is null terminated */ ++ lpBufStart[nBytes - 1] = '\0'; ++ MEM_Free(lpTmpStart); ++ } ++ ++ return status; ++} ++#endif /* (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE */ ++ ++/* ++ * ======== PrintDspTraceBuffer ======== ++ * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled). ++ * Parameters: ++ * hDehMgr: Handle to DEH manager object ++ * number of extra carriage returns to generate. ++ * Returns: ++ * DSP_SOK: Success. ++ * DSP_EMEMORY: Unable to allocate memory. ++ * Requires: ++ * hDehMgr muse be valid. Checked in WMD_DEH_Notify. ++ */ ++DSP_STATUS PrintDspTraceBuffer(struct WMD_DEV_CONTEXT *hWmdContext) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++#if (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE ++ struct COD_MANAGER *hCodMgr; ++ u32 ulTraceEnd; ++ u32 ulTraceBegin; ++ u32 ulNumBytes = 0; ++ u32 ulNumWords = 0; ++ u32 ulWordSize = 2; ++ CONST u32 uMaxSize = 512; ++ char *pszBuf; ++ u16 *lpszBuf; ++ ++ struct WMD_DEV_CONTEXT *pWmdContext = (struct WMD_DEV_CONTEXT *) ++ hWmdContext; ++ struct WMD_DRV_INTERFACE *pIntfFxns; ++ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *) ++ pWmdContext->hDevObject; ++ ++ status = DEV_GetCodMgr(pDevObject, &hCodMgr); ++ if (DSP_FAILED(status)) ++ GT_0trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: Failed on DEV_GetCodMgr.\n"); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Look for SYS_PUTCBEG/SYS_PUTCEND: */ ++ status = COD_GetSymValue(hCodMgr, COD_TRACEBEG, &ulTraceBegin); ++ GT_1trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: ulTraceBegin Value 0x%x\n", ++ ulTraceBegin); ++ if (DSP_FAILED(status)) ++ GT_0trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: Failed on " ++ "COD_GetSymValue.\n"); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ status = COD_GetSymValue(hCodMgr, COD_TRACEEND, &ulTraceEnd); ++ GT_1trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: ulTraceEnd Value 0x%x\n", ++ ulTraceEnd); ++ if (DSP_FAILED(status)) ++ GT_0trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: Failed on " ++ "COD_GetSymValue.\n"); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ ulNumBytes = (ulTraceEnd - ulTraceBegin) * ulWordSize; ++ /* If the chip type is 55 then the addresses will be ++ * byte addresses; convert them to word addresses. */ ++ if (ulNumBytes > uMaxSize) ++ ulNumBytes = uMaxSize; ++ ++ /* make sure the data we request fits evenly */ ++ ulNumBytes = (ulNumBytes / ulWordSize) * ulWordSize; ++ GT_1trace(dsp_trace_mask, GT_2CLASS, "PrintDspTraceBuffer: " ++ "ulNumBytes 0x%x\n", ulNumBytes); ++ ulNumWords = ulNumBytes * ulWordSize; ++ GT_1trace(dsp_trace_mask, GT_2CLASS, "PrintDspTraceBuffer: " ++ "ulNumWords 0x%x\n", ulNumWords); ++ status = DEV_GetIntfFxns(pDevObject, &pIntfFxns); ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ pszBuf = MEM_Calloc(uMaxSize, MEM_NONPAGED); ++ lpszBuf = MEM_Calloc(ulNumBytes * 2, MEM_NONPAGED); ++ if (pszBuf != NULL) { ++ /* Read bytes from the DSP trace buffer... */ ++ status = (*pIntfFxns->pfnBrdRead)(hWmdContext, ++ (u8 *)pszBuf, (u32)ulTraceBegin, ++ ulNumBytes, 0); ++ if (DSP_FAILED(status)) ++ GT_0trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: " ++ "Failed to Read Trace Buffer.\n"); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Pack and do newline conversion */ ++ GT_0trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: " ++ "before pack and unpack.\n"); ++ PackTraceBuffer(pszBuf, ulNumBytes, ulNumWords); ++ GT_1trace(dsp_trace_mask, GT_1CLASS, ++ "DSP Trace Buffer:\n%s\n", pszBuf); ++ } ++ MEM_Free(pszBuf); ++ MEM_Free(lpszBuf); ++ } else { ++ GT_0trace(dsp_trace_mask, GT_2CLASS, ++ "PrintDspTraceBuffer: Failed to " ++ "allocate trace buffer.\n"); ++ status = DSP_EMEMORY; ++ } ++ } ++#endif ++ return status; ++} ++ ++void IO_SM_init(void) ++{ ++ ++ GT_create(&dsp_trace_mask, "DT"); /* DSP Trace Mask */ ++ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/mmu_fault.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/mmu_fault.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,172 @@ ++/* ++ * mmu_fault.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== mmu_fault.c ======== ++ * Description: ++ * Implements DSP MMU fault handling functions. ++ * ++ *! Revision History: ++ *! ================ ++ *! 26-Dec-2004 hn: Support for IVA MMU exception. ++ *! 06-Mar-2003 sb: Print MMU fault address. Cosmetic changes. ++ *! 16-Feb-2003 vp: Fixed warning in MMU_FaultIsr ++ *! 05-Jan-2004 vp: Updated support for 24xx silicon ++ *! 19-Feb-2003 vp: Code review updates. ++ *! - Cosmetic changes. ++ *! 18-Oct-2002 sb: Ported to Linux platform. ++ *! 10-Sep-2001 kc: created. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Link Driver */ ++#include ++ ++/* ------------------------------------ Hardware Abstraction Layer */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include "_deh.h" ++#include ++#include "_tiomap_mmu.h" ++#include "_tiomap.h" ++#include "mmu_fault.h" ++ ++static u32 dmmuEventMask; ++u32 faultAddr; ++ ++static bool MMU_CheckIfFault(struct WMD_DEV_CONTEXT *pDevContext); ++ ++/* ++ * ======== MMU_FaultDpc ======== ++ * Deferred procedure call to handle DSP MMU fault. ++ */ ++void MMU_FaultDpc(IN void *pRefData) ++{ ++ struct DEH_MGR *hDehMgr = (struct DEH_MGR *)pRefData; ++ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; ++ ++ DBG_Trace(DBG_LEVEL1, "MMU_FaultDpc Enter: 0x%x\n", pRefData); ++ ++ if (pDehMgr) ++ WMD_DEH_Notify(hDehMgr, DSP_MMUFAULT, 0L); ++ ++ DBG_Trace(DBG_LEVEL1, "MMU_FaultDpc Exit: 0x%x\n", pRefData); ++} ++ ++/* ++ * ======== MMU_FaultIsr ======== ++ * ISR to be triggered by a DSP MMU fault interrupt. ++ */ ++irqreturn_t MMU_FaultIsr(int irq, IN void *pRefData) ++{ ++ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)pRefData; ++ struct WMD_DEV_CONTEXT *pDevContext; ++ struct CFG_HOSTRES resources; ++ DSP_STATUS status = DSP_SOK; ++ ++ ++ DBG_Trace(DBG_LEVEL1, "Entering DEH_DspMmuIsr: 0x%x\n", pRefData); ++ DBC_Require(irq == INT_DSP_MMU_IRQ); ++ DBC_Require(MEM_IsValidHandle(pDehMgr, SIGNATURE)); ++ ++ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { ++ ++ pDevContext = (struct WMD_DEV_CONTEXT *)pDehMgr->hWmdContext; ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), ++ &resources); ++ if (DSP_FAILED(status)) ++ DBG_Trace(DBG_LEVEL7, ++ "**Failed to get Host Resources " ++ "in MMU ISR **\n"); ++ if (MMU_CheckIfFault(pDevContext)) { ++ printk(KERN_INFO "***** DSPMMU FAULT ***** IRQStatus " ++ "0x%x\n", dmmuEventMask); ++ printk(KERN_INFO "***** DSPMMU FAULT ***** faultAddr " ++ "0x%x\n", faultAddr); ++ /* Disable the MMU events, else once we clear it will ++ * start to raise INTs again */ ++ /* ++ * Schedule a DPC directly. In the future, it may be ++ * necessary to check if DSP MMU fault is intended for ++ * Bridge. ++ */ ++ DPC_Schedule(pDehMgr->hMmuFaultDpc); ++ /* Reset errInfo structure before use. */ ++ pDehMgr->errInfo.dwErrMask = DSP_MMUFAULT; ++ pDehMgr->errInfo.dwVal1 = faultAddr >> 16; ++ pDehMgr->errInfo.dwVal2 = faultAddr & 0xFFFF; ++ pDehMgr->errInfo.dwVal3 = 0L; ++ /* Disable the MMU events, else once we clear it will ++ * start to raise INTs again */ ++ HW_MMU_EventDisable(resources.dwDmmuBase, ++ HW_MMU_TRANSLATION_FAULT); ++ } else { ++ DBG_Trace(DBG_LEVEL7, ++ "***** MMU FAULT ***** faultcode 0x%x\n", ++ dmmuEventMask); ++ HW_MMU_EventDisable(resources.dwDmmuBase, ++ HW_MMU_ALL_INTERRUPTS); ++ } ++ } ++ return IRQ_HANDLED; ++} ++ ++ ++/* ++ * ======== MMU_CheckIfFault ======== ++ * Check to see if MMU Fault is valid TLB miss from DSP ++ * Note: This function is called from an ISR ++ */ ++static bool MMU_CheckIfFault(struct WMD_DEV_CONTEXT *pDevContext) ++{ ++ ++ ++ bool retVal = false; ++ DSP_STATUS status = DSP_SOK; ++ HW_STATUS hwStatus; ++ struct CFG_HOSTRES resources; ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) ++ DBG_Trace(DBG_LEVEL7, "**Failed to get Host Resources in " ++ "MMU_CheckIfFault **\n"); ++ ++ hwStatus = HW_MMU_EventStatus(resources.dwDmmuBase, &dmmuEventMask); ++ if (dmmuEventMask == HW_MMU_TRANSLATION_FAULT) { ++ HW_MMU_FaultAddrRead(resources.dwDmmuBase, &faultAddr); ++ DBG_Trace(DBG_LEVEL1, "WMD_DEH_Notify: DSP_MMUFAULT, fault " ++ "address = 0x%x\n", faultAddr); ++ retVal = true; ++ } ++ return retVal; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/mmu_fault.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/mmu_fault.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,45 @@ ++/* ++ * mmu_fault.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== mmu_fault.h ======== ++ * Description: ++ * Defines DSP MMU fault handling functions. ++ * ++ *! Revision History: ++ *! ================ ++ *! 26-Dec-2004 hn: IVA MMU handlers. ++ *! 10-Sep-2001 kc: created. ++ */ ++ ++#ifndef MMU_FAULT_ ++#define MMU_FAULT_ ++ ++/* ++ * ======== MMU_FaultDpc ======== ++ * Deferred procedure call to handle DSP MMU fault. ++ */ ++ void MMU_FaultDpc(IN void *pRefData); ++ ++/* ++ * ======== MMU_FaultIsr ======== ++ * ISR to be triggered by a DSP MMU fault interrupt. ++ */ ++irqreturn_t MMU_FaultIsr(int irq, IN void *pRefData); ++ ++#endif /* MMU_FAULT_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/msg_sm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/msg_sm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/msg_sm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/msg_sm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,643 @@ ++/* ++ * msg_sm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== msg_sm.c ======== ++ * Description: ++ * Implements upper edge functions for WMD message module. ++ * ++ * Public Functions: ++ * WMD_MSG_Create ++ * WMD_MSG_CreateQueue ++ * WMD_MSG_Delete ++ * WMD_MSG_DeleteQueue ++ * WMD_MSG_Get ++ * WMD_MSG_Put ++ * WMD_MSG_RegisterNotify ++ * WMD_MSG_SetQueueId ++ * ++ *! Revision History: ++ *! ================= ++ *! 24-Jul-2002 jeh Release critical section in WMD_MSG_Put() before ++ *! scheduling DPC. ++ *! 09-May-2001 jeh Free MSG queue NTFY object, remove unnecessary set/ ++ *! reset of events. ++ *! 10-Jan-2001 jeh Set/Reset message manager and message queue events ++ *! correctly. ++ *! 04-Dec-2000 jeh Bug fixes. ++ *! 12-Sep-2000 jeh Created. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++ ++/* ----------------------------------- Others */ ++#include ++ ++/* ----------------------------------- This */ ++#include <_msg_sm.h> ++#include ++ ++/* ----------------------------------- Defines, Data Structures, Typedefs */ ++#define MSGQ_SIGNATURE 0x5147534d /* "QGSM" */ ++ ++/* ----------------------------------- Function Prototypes */ ++static DSP_STATUS AddNewMsg(struct LST_LIST *msgList); ++static void DeleteMsgMgr(struct MSG_MGR *hMsgMgr); ++static void DeleteMsgQueue(struct MSG_QUEUE *hMsgQueue, u32 uNumToDSP); ++static void FreeMsgList(struct LST_LIST *msgList); ++ ++/* ++ * ======== WMD_MSG_Create ======== ++ * Create an object to manage message queues. Only one of these objects ++ * can exist per device object. ++ */ ++DSP_STATUS WMD_MSG_Create(OUT struct MSG_MGR **phMsgMgr, ++ struct DEV_OBJECT *hDevObject, MSG_ONEXIT msgCallback) ++{ ++ struct MSG_MGR *pMsgMgr; ++ struct IO_MGR *hIOMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(phMsgMgr != NULL); ++ DBC_Require(msgCallback != NULL); ++ DBC_Require(hDevObject != NULL); ++ DEV_GetIOMgr(hDevObject, &hIOMgr); ++ DBC_Assert(hIOMgr != NULL); ++ *phMsgMgr = NULL; ++ /* Allocate MSG manager object */ ++ MEM_AllocObject(pMsgMgr, struct MSG_MGR, MSGMGR_SIGNATURE); ++ ++ if (pMsgMgr) { ++ pMsgMgr->onExit = msgCallback; ++ pMsgMgr->hIOMgr = hIOMgr; ++ /* List of MSG_QUEUEs */ ++ pMsgMgr->queueList = LST_Create(); ++ /* Queues of message frames for messages to the DSP. Message ++ * frames will only be added to the free queue when a ++ * MSG_QUEUE object is created. */ ++ pMsgMgr->msgFreeList = LST_Create(); ++ pMsgMgr->msgUsedList = LST_Create(); ++ if (pMsgMgr->queueList == NULL || ++ pMsgMgr->msgFreeList == NULL || ++ pMsgMgr->msgUsedList == NULL) ++ status = DSP_EMEMORY; ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_InitializeDPCCS(&pMsgMgr->hSyncCS); ++ ++ /* Create an event to be used by WMD_MSG_Put() in waiting ++ * for an available free frame from the message manager. */ ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_OpenEvent(&pMsgMgr->hSyncEvent, NULL); ++ ++ if (DSP_SUCCEEDED(status)) ++ *phMsgMgr = pMsgMgr; ++ else ++ DeleteMsgMgr(pMsgMgr); ++ ++ } else { ++ status = DSP_EMEMORY; ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_MSG_CreateQueue ======== ++ * Create a MSG_QUEUE for sending/receiving messages to/from a node ++ * on the DSP. ++ */ ++DSP_STATUS WMD_MSG_CreateQueue(struct MSG_MGR *hMsgMgr, ++ OUT struct MSG_QUEUE **phMsgQueue, ++ u32 dwId, u32 uMaxMsgs, HANDLE hArg) ++{ ++ u32 i; ++ u32 uNumAllocated = 0; ++ struct MSG_QUEUE *pMsgQ; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); ++ DBC_Require(phMsgQueue != NULL); ++ ++ *phMsgQueue = NULL; ++ /* Allocate MSG_QUEUE object */ ++ MEM_AllocObject(pMsgQ, struct MSG_QUEUE, MSGQ_SIGNATURE); ++ if (!pMsgQ) { ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ LST_InitElem((struct LST_ELEM *) pMsgQ); ++ pMsgQ->uMaxMsgs = uMaxMsgs; ++ pMsgQ->hMsgMgr = hMsgMgr; ++ pMsgQ->hArg = hArg; /* Node handle */ ++ pMsgQ->dwId = dwId; /* Node env (not valid yet) */ ++ /* Queues of Message frames for messages from the DSP */ ++ pMsgQ->msgFreeList = LST_Create(); ++ pMsgQ->msgUsedList = LST_Create(); ++ if (pMsgQ->msgFreeList == NULL || pMsgQ->msgUsedList == NULL) ++ status = DSP_EMEMORY; ++ ++ /* Create event that will be signalled when a message from ++ * the DSP is available. */ ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_OpenEvent(&pMsgQ->hSyncEvent, NULL); ++ ++ /* Create a notification list for message ready notification. */ ++ if (DSP_SUCCEEDED(status)) ++ status = NTFY_Create(&pMsgQ->hNtfy); ++ ++ /* Create events that will be used to synchronize cleanup ++ * when the object is deleted. hSyncDone will be set to ++ * unblock threads in MSG_Put() or MSG_Get(). hSyncDoneAck ++ * will be set by the unblocked thread to signal that it ++ * is unblocked and will no longer reference the object. */ ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_OpenEvent(&pMsgQ->hSyncDone, NULL); ++ ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_OpenEvent(&pMsgQ->hSyncDoneAck, NULL); ++ ++ if (DSP_SUCCEEDED(status)) { ++ if (!hMsgMgr->msgFreeList) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ /* Enter critical section */ ++ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); ++ /* Initialize message frames and put in appropriate queues */ ++ for (i = 0; i < uMaxMsgs && DSP_SUCCEEDED(status); i++) { ++ status = AddNewMsg(hMsgMgr->msgFreeList); ++ if (DSP_SUCCEEDED(status)) { ++ uNumAllocated++; ++ status = AddNewMsg(pMsgQ->msgFreeList); ++ } ++ } ++ if (DSP_FAILED(status)) { ++ /* Stay inside CS to prevent others from taking any ++ * of the newly allocated message frames. */ ++ DeleteMsgQueue(pMsgQ, uNumAllocated); ++ } else { ++ LST_PutTail(hMsgMgr->queueList, ++ (struct LST_ELEM *)pMsgQ); ++ *phMsgQueue = pMsgQ; ++ /* Signal that free frames are now available */ ++ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) ++ SYNC_SetEvent(hMsgMgr->hSyncEvent); ++ ++ } ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ } else { ++ DeleteMsgQueue(pMsgQ, 0); ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== WMD_MSG_Delete ======== ++ * Delete a MSG manager allocated in WMD_MSG_Create(). ++ */ ++void WMD_MSG_Delete(struct MSG_MGR *hMsgMgr) ++{ ++ DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); ++ ++ DeleteMsgMgr(hMsgMgr); ++} ++ ++/* ++ * ======== WMD_MSG_DeleteQueue ======== ++ * Delete a MSG queue allocated in WMD_MSG_CreateQueue. ++ */ ++void WMD_MSG_DeleteQueue(struct MSG_QUEUE *hMsgQueue) ++{ ++ struct MSG_MGR *hMsgMgr = hMsgQueue->hMsgMgr; ++ u32 refCount; ++ ++ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); ++ hMsgQueue->fDone = true; ++ /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */ ++ refCount = hMsgQueue->refCount; ++ while (refCount) { ++ /* Unblock thread */ ++ SYNC_SetEvent(hMsgQueue->hSyncDone); ++ /* Wait for acknowledgement */ ++ SYNC_WaitOnEvent(hMsgQueue->hSyncDoneAck, SYNC_INFINITE); ++ refCount = hMsgQueue->refCount; ++ } ++ /* Remove message queue from hMsgMgr->queueList */ ++ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); ++ LST_RemoveElem(hMsgMgr->queueList, (struct LST_ELEM *)hMsgQueue); ++ /* Free the message queue object */ ++ DeleteMsgQueue(hMsgQueue, hMsgQueue->uMaxMsgs); ++ if (!hMsgMgr->msgFreeList) ++ goto func_cont; ++ if (LST_IsEmpty(hMsgMgr->msgFreeList)) ++ SYNC_ResetEvent(hMsgMgr->hSyncEvent); ++func_cont: ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++} ++ ++/* ++ * ======== WMD_MSG_Get ======== ++ * Get a message from a MSG queue. ++ */ ++DSP_STATUS WMD_MSG_Get(struct MSG_QUEUE *hMsgQueue, ++ struct DSP_MSG *pMsg, u32 uTimeout) ++{ ++ struct MSG_FRAME *pMsgFrame; ++ struct MSG_MGR *hMsgMgr; ++ bool fGotMsg = false; ++ struct SYNC_OBJECT *hSyncs[2]; ++ u32 uIndex; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); ++ DBC_Require(pMsg != NULL); ++ ++ hMsgMgr = hMsgQueue->hMsgMgr; ++ if (!hMsgQueue->msgUsedList) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ ++ /* Enter critical section */ ++ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); ++ /* If a message is already there, get it */ ++ if (!LST_IsEmpty(hMsgQueue->msgUsedList)) { ++ pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgQueue-> ++ msgUsedList); ++ if (pMsgFrame != NULL) { ++ *pMsg = pMsgFrame->msgData.msg; ++ LST_PutTail(hMsgQueue->msgFreeList, ++ (struct LST_ELEM *)pMsgFrame); ++ if (LST_IsEmpty(hMsgQueue->msgUsedList)) ++ SYNC_ResetEvent(hMsgQueue->hSyncEvent); ++ ++ fGotMsg = true; ++ } ++ } else { ++ if (hMsgQueue->fDone) ++ status = DSP_EFAIL; ++ else ++ hMsgQueue->refCount++; ++ ++ } ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ if (DSP_SUCCEEDED(status) && !fGotMsg) { ++ /* Wait til message is available, timeout, or done. We don't ++ * have to schedule the DPC, since the DSP will send messages ++ * when they are available. */ ++ hSyncs[0] = hMsgQueue->hSyncEvent; ++ hSyncs[1] = hMsgQueue->hSyncDone; ++ status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout, ++ &uIndex); ++ /* Enter critical section */ ++ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); ++ if (hMsgQueue->fDone) { ++ hMsgQueue->refCount--; ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ /* Signal that we're not going to access hMsgQueue ++ * anymore, so it can be deleted. */ ++ (void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck); ++ status = DSP_EFAIL; ++ } else { ++ if (DSP_SUCCEEDED(status)) { ++ DBC_Assert(!LST_IsEmpty(hMsgQueue-> ++ msgUsedList)); ++ /* Get msg from used list */ ++ pMsgFrame = (struct MSG_FRAME *) ++ LST_GetHead(hMsgQueue->msgUsedList); ++ /* Copy message into pMsg and put frame on the ++ * free list */ ++ if (pMsgFrame != NULL) { ++ *pMsg = pMsgFrame->msgData.msg; ++ LST_PutTail(hMsgQueue->msgFreeList, ++ (struct LST_ELEM *)pMsgFrame); ++ } ++ } ++ hMsgQueue->refCount--; ++ /* Reset the event if there are still queued messages */ ++ if (!LST_IsEmpty(hMsgQueue->msgUsedList)) ++ SYNC_SetEvent(hMsgQueue->hSyncEvent); ++ ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== WMD_MSG_Put ======== ++ * Put a message onto a MSG queue. ++ */ ++DSP_STATUS WMD_MSG_Put(struct MSG_QUEUE *hMsgQueue, ++ IN CONST struct DSP_MSG *pMsg, u32 uTimeout) ++{ ++ struct MSG_FRAME *pMsgFrame; ++ struct MSG_MGR *hMsgMgr; ++ bool fPutMsg = false; ++ struct SYNC_OBJECT *hSyncs[2]; ++ u32 uIndex; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); ++ DBC_Require(pMsg != NULL); ++ ++ hMsgMgr = hMsgQueue->hMsgMgr; ++ ++ if (!hMsgMgr->msgFreeList) { ++ status = DSP_EHANDLE; ++ goto func_end; ++ } ++ ++ ++ (void) SYNC_EnterCS(hMsgMgr->hSyncCS); ++ ++ /* If a message frame is available, use it */ ++ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) { ++ pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgMgr-> ++ msgFreeList); ++ if (pMsgFrame != NULL) { ++ pMsgFrame->msgData.msg = *pMsg; ++ pMsgFrame->msgData.dwId = hMsgQueue->dwId; ++ LST_PutTail(hMsgMgr->msgUsedList, (struct LST_ELEM *) ++ pMsgFrame); ++ hMsgMgr->uMsgsPending++; ++ fPutMsg = true; ++ } ++ if (LST_IsEmpty(hMsgMgr->msgFreeList)) ++ SYNC_ResetEvent(hMsgMgr->hSyncEvent); ++ ++ /* Release critical section before scheduling DPC */ ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ /* Schedule a DPC, to do the actual data transfer: */ ++ IO_Schedule(hMsgMgr->hIOMgr); ++ } else { ++ if (hMsgQueue->fDone) ++ status = DSP_EFAIL; ++ else ++ hMsgQueue->refCount++; ++ ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ } ++ if (DSP_SUCCEEDED(status) && !fPutMsg) { ++ /* Wait til a free message frame is available, timeout, ++ * or done */ ++ hSyncs[0] = hMsgMgr->hSyncEvent; ++ hSyncs[1] = hMsgQueue->hSyncDone; ++ status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout, ++ &uIndex); ++ /* Enter critical section */ ++ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); ++ if (hMsgQueue->fDone) { ++ hMsgQueue->refCount--; ++ /* Exit critical section */ ++ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ /* Signal that we're not going to access hMsgQueue ++ * anymore, so it can be deleted. */ ++ (void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck); ++ status = DSP_EFAIL; ++ } else { ++ if (DSP_SUCCEEDED(status)) { ++ if (LST_IsEmpty(hMsgMgr->msgFreeList)) { ++ status = DSP_EPOINTER; ++ goto func_cont; ++ } ++ /* Get msg from free list */ ++ pMsgFrame = (struct MSG_FRAME *) ++ LST_GetHead(hMsgMgr->msgFreeList); ++ /* Copy message into pMsg and put frame on the ++ * used list */ ++ if (pMsgFrame != NULL) { ++ pMsgFrame->msgData.msg = *pMsg; ++ pMsgFrame->msgData.dwId = ++ hMsgQueue->dwId; ++ LST_PutTail(hMsgMgr->msgUsedList, ++ (struct LST_ELEM *) ++ pMsgFrame); ++ hMsgMgr->uMsgsPending++; ++ /* Schedule a DPC, to do the actual ++ * data transfer: */ ++ IO_Schedule(hMsgMgr->hIOMgr); ++ } ++ } ++ hMsgQueue->refCount--; ++ /* Reset event if there are still frames available */ ++ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) ++ SYNC_SetEvent(hMsgMgr->hSyncEvent); ++func_cont: ++ /* Exit critical section */ ++ (void) SYNC_LeaveCS(hMsgMgr->hSyncCS); ++ } ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== WMD_MSG_RegisterNotify ======== ++ */ ++DSP_STATUS WMD_MSG_RegisterNotify(struct MSG_QUEUE *hMsgQueue, u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION *hNotification) ++{ ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); ++ DBC_Require(hNotification != NULL); ++ DBC_Require(uEventMask == DSP_NODEMESSAGEREADY || uEventMask == 0); ++ DBC_Require(uNotifyType == DSP_SIGNALEVENT); ++ ++ status = NTFY_Register(hMsgQueue->hNtfy, hNotification, uEventMask, ++ uNotifyType); ++ ++ if (status == DSP_EVALUE) { ++ /* Not registered. Ok, since we couldn't have known. Node ++ * notifications are split between node state change handled ++ * by NODE, and message ready handled by MSG. */ ++ status = DSP_SOK; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== WMD_MSG_SetQueueId ======== ++ */ ++void WMD_MSG_SetQueueId(struct MSG_QUEUE *hMsgQueue, u32 dwId) ++{ ++ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); ++ /* DBC_Require(dwId != 0); */ ++ ++ /* ++ * A message queue must be created when a node is allocated, ++ * so that NODE_RegisterNotify() can be called before the node ++ * is created. Since we don't know the node environment until the ++ * node is created, we need this function to set hMsgQueue->dwId ++ * to the node environment, after the node is created. ++ */ ++ hMsgQueue->dwId = dwId; ++} ++ ++/* ++ * ======== AddNewMsg ======== ++ * Must be called in message manager critical section. ++ */ ++static DSP_STATUS AddNewMsg(struct LST_LIST *msgList) ++{ ++ struct MSG_FRAME *pMsg; ++ DSP_STATUS status = DSP_SOK; ++ ++ pMsg = (struct MSG_FRAME *)MEM_Calloc(sizeof(struct MSG_FRAME), ++ MEM_PAGED); ++ if (pMsg != NULL) { ++ LST_InitElem((struct LST_ELEM *) pMsg); ++ LST_PutTail(msgList, (struct LST_ELEM *) pMsg); ++ } else { ++ status = DSP_EMEMORY; ++ } ++ ++ return status; ++} ++ ++/* ++ * ======== DeleteMsgMgr ======== ++ */ ++static void DeleteMsgMgr(struct MSG_MGR *hMsgMgr) ++{ ++ DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); ++ ++ if (hMsgMgr->queueList) { ++ if (LST_IsEmpty(hMsgMgr->queueList)) { ++ LST_Delete(hMsgMgr->queueList); ++ hMsgMgr->queueList = NULL; ++ } ++ } ++ ++ if (hMsgMgr->msgFreeList) { ++ FreeMsgList(hMsgMgr->msgFreeList); ++ hMsgMgr->msgFreeList = NULL; ++ } ++ ++ if (hMsgMgr->msgUsedList) { ++ FreeMsgList(hMsgMgr->msgUsedList); ++ hMsgMgr->msgUsedList = NULL; ++ } ++ ++ if (hMsgMgr->hSyncEvent) ++ SYNC_CloseEvent(hMsgMgr->hSyncEvent); ++ ++ if (hMsgMgr->hSyncCS) ++ SYNC_DeleteCS(hMsgMgr->hSyncCS); ++ ++ MEM_FreeObject(hMsgMgr); ++} ++ ++/* ++ * ======== DeleteMsgQueue ======== ++ */ ++static void DeleteMsgQueue(struct MSG_QUEUE *hMsgQueue, u32 uNumToDSP) ++{ ++ struct MSG_MGR *hMsgMgr; ++ struct MSG_FRAME *pMsg; ++ u32 i; ++ ++ if (!MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE) ++ || !hMsgQueue->hMsgMgr || !hMsgQueue->hMsgMgr->msgFreeList) ++ goto func_end; ++ hMsgMgr = hMsgQueue->hMsgMgr; ++ ++ ++ /* Pull off uNumToDSP message frames from Msg manager and free */ ++ for (i = 0; i < uNumToDSP; i++) { ++ ++ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) { ++ pMsg = (struct MSG_FRAME *)LST_GetHead(hMsgMgr-> ++ msgFreeList); ++ MEM_Free(pMsg); ++ } else { ++ /* Cannot free all of the message frames */ ++ break; ++ } ++ } ++ ++ if (hMsgQueue->msgFreeList) { ++ FreeMsgList(hMsgQueue->msgFreeList); ++ hMsgQueue->msgFreeList = NULL; ++ } ++ ++ if (hMsgQueue->msgUsedList) { ++ FreeMsgList(hMsgQueue->msgUsedList); ++ hMsgQueue->msgUsedList = NULL; ++ } ++ ++ ++ if (hMsgQueue->hNtfy) ++ NTFY_Delete(hMsgQueue->hNtfy); ++ ++ if (hMsgQueue->hSyncEvent) ++ SYNC_CloseEvent(hMsgQueue->hSyncEvent); ++ ++ if (hMsgQueue->hSyncDone) ++ SYNC_CloseEvent(hMsgQueue->hSyncDone); ++ ++ if (hMsgQueue->hSyncDoneAck) ++ SYNC_CloseEvent(hMsgQueue->hSyncDoneAck); ++ ++ MEM_FreeObject(hMsgQueue); ++func_end: ++ return; ++ ++} ++ ++/* ++ * ======== FreeMsgList ======== ++ */ ++static void FreeMsgList(struct LST_LIST *msgList) ++{ ++ struct MSG_FRAME *pMsg; ++ ++ if (!msgList) ++ goto func_end; ++ ++ while ((pMsg = (struct MSG_FRAME *)LST_GetHead(msgList)) != NULL) ++ MEM_Free(pMsg); ++ ++ DBC_Assert(LST_IsEmpty(msgList)); ++ ++ LST_Delete(msgList); ++func_end: ++ return; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_msg_sm.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_msg_sm.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_msg_sm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_msg_sm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,158 @@ ++/* ++ * _msg_sm.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _msg_sm.h ======== ++ * Description: ++ * Private header file defining MSG manager objects and defines needed ++ * by IO manager. ++ * ++ * Public Functions: ++ * None. ++ * ++ * Notes: ++ * ++ *! Revision History: ++ *! ================ ++ *! 09-May-2001 jeh Code Review cleanup. ++ *! 08-Nov-2000 jeh Created. ++ */ ++ ++#ifndef _MSG_SM_ ++#define _MSG_SM_ ++ ++#include ++#include ++ ++/* ++ * These target side symbols define the beginning and ending addresses ++ * of the section of shared memory used for messages. They are ++ * defined in the *cfg.cmd file by cdb code. ++ */ ++#define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG" ++#define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END" ++ ++#ifndef _CHNL_WORDSIZE ++#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */ ++#endif ++ ++/* ++ * ======== MSG ======== ++ * There is a control structure for messages to the DSP, and a control ++ * structure for messages from the DSP. The shared memory region for ++ * transferring messages is partitioned as follows: ++ * ++ * ---------------------------------------------------------- ++ * |Control | Messages from DSP | Control | Messages to DSP | ++ * ---------------------------------------------------------- ++ * ++ * MSG control structure for messages to the DSP is used in the following ++ * way: ++ * ++ * bufEmpty - This flag is set to FALSE by the GPP after it has output ++ * messages for the DSP. The DSP host driver sets it to ++ * TRUE after it has copied the messages. ++ * postSWI - Set to 1 by the GPP after it has written the messages, ++ * set the size, and set bufEmpty to FALSE. ++ * The DSP Host driver uses SWI_andn of the postSWI field ++ * when a host interrupt occurs. The host driver clears ++ * this after posting the SWI. ++ * size - Number of messages to be read by the DSP. ++ * ++ * For messages from the DSP: ++ * bufEmpty - This flag is set to FALSE by the DSP after it has output ++ * messages for the GPP. The DPC on the GPP sets it to ++ * TRUE after it has copied the messages. ++ * postSWI - Set to 1 the DPC on the GPP after copying the messages. ++ * size - Number of messages to be read by the GPP. ++ */ ++struct MSG { ++ u32 bufEmpty; /* to/from DSP buffer is empty */ ++ u32 postSWI; /* Set to "1" to post MSG SWI */ ++ u32 size; /* Number of messages to/from the DSP */ ++ u32 resvd; ++} ; ++ ++/* ++ * ======== MSG_MGR ======== ++ * The MSG_MGR maintains a list of all MSG_QUEUEs. Each NODE object can ++ * have MSG_QUEUE to hold all messages that come up from the corresponding ++ * node on the DSP. The MSG_MGR also has a shared queue of messages ++ * ready to go to the DSP. ++ */ ++struct MSG_MGR { ++ /* The first two fields must match those in msgobj.h */ ++ u32 dwSignature; ++ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ ++ ++ struct IO_MGR *hIOMgr; /* IO manager */ ++ struct LST_LIST *queueList; /* List of MSG_QUEUEs */ ++ struct SYNC_CSOBJECT *hSyncCS; /* For critical sections */ ++ /* Signalled when MsgFrame is available */ ++ struct SYNC_OBJECT *hSyncEvent; ++ struct LST_LIST *msgFreeList; /* Free MsgFrames ready to be filled */ ++ struct LST_LIST *msgUsedList; /* MsgFrames ready to go to DSP */ ++ u32 uMsgsPending; /* # of queued messages to go to DSP */ ++ u32 uMaxMsgs; /* Max # of msgs that fit in buffer */ ++ MSG_ONEXIT onExit; /* called when RMS_EXIT is received */ ++} ; ++ ++/* ++ * ======== MSG_QUEUE ======== ++ * Each NODE has a MSG_QUEUE for receiving messages from the ++ * corresponding node on the DSP. The MSG_QUEUE object maintains a list ++ * of messages that have been sent to the host, but not yet read (MSG_Get), ++ * and a list of free frames that can be filled when new messages arrive ++ * from the DSP. ++ * The MSG_QUEUE's hSynEvent gets posted when a message is ready. ++ */ ++struct MSG_QUEUE { ++ struct LST_ELEM listElem; ++ u32 dwSignature; ++ struct MSG_MGR *hMsgMgr; ++ u32 uMaxMsgs; /* Node message depth */ ++ u32 dwId; /* Node environment pointer */ ++ struct LST_LIST *msgFreeList; /* Free MsgFrames ready to be filled */ ++ /* Filled MsgFramess waiting to be read */ ++ struct LST_LIST *msgUsedList; ++ HANDLE hArg; /* Handle passed to mgr onExit callback */ ++ struct SYNC_OBJECT *hSyncEvent; /* Signalled when message is ready */ ++ struct SYNC_OBJECT *hSyncDone; /* For synchronizing cleanup */ ++ struct SYNC_OBJECT *hSyncDoneAck; /* For synchronizing cleanup */ ++ struct NTFY_OBJECT *hNtfy; /* For notification of message ready */ ++ bool fDone; /* TRUE <==> deleting the object */ ++ u32 refCount; /* Number of pending MSG_get/put calls */ ++}; ++ ++/* ++ * ======== MSG_DSPMSG ======== ++ */ ++struct MSG_DSPMSG { ++ struct DSP_MSG msg; ++ u32 dwId; /* Identifies the node the message goes to */ ++} ; ++ ++/* ++ * ======== MSG_FRAME ======== ++ */ ++struct MSG_FRAME { ++ struct LST_ELEM listElem; ++ struct MSG_DSPMSG msgData; ++} ; ++ ++#endif /* _MSG_SM_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,384 @@ ++/* ++ * _tiomap.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== _tiomap.h ======== ++ * Description: ++ * Definitions and types private to this WMD. ++ * ++ */ ++ ++#ifndef _TIOMAP_ ++#define _TIOMAP_ ++ ++#include ++#include ++#include ++#include /* for WMDIOCTL_EXTPROC defn */ ++#include ++#include ++ ++struct MAP_L4PERIPHERAL { ++ u32 physAddr; ++ u32 dspVirtAddr; ++} ; ++ ++#define ARM_MAILBOX_START 0xfffcf000 ++#define ARM_MAILBOX_LENGTH 0x800 ++ ++/* New Registers in OMAP3.1 */ ++ ++#define TESTBLOCK_ID_START 0xfffed400 ++#define TESTBLOCK_ID_LENGTH 0xff ++ ++/* ID Returned by OMAP1510 */ ++#define TBC_ID_VALUE 0xB47002F ++ ++#define SPACE_LENGTH 0x2000 ++#define API_CLKM_DPLL_DMA 0xfffec000 ++#define ARM_INTERRUPT_OFFSET 0xb00 ++ ++#define BIOS_24XX ++ ++#define L4_PERIPHERAL_NULL 0x0 ++#define DSPVA_PERIPHERAL_NULL 0x0 ++ ++#define MAX_LOCK_TLB_ENTRIES 15 ++ ++#define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */ ++#define DSPVA_PERIPHERAL_PRM 0x1181e000 ++#define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */ ++#define DSPVA_PERIPHERAL_SCM 0x1181f000 ++#define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */ ++#define DSPVA_PERIPHERAL_MMU 0x11820000 ++#define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */ ++#define DSPVA_PERIPHERAL_CM 0x1181c000 ++#define L4_PERIPHERAL_PER 0x48005000 /* PER */ ++#define DSPVA_PERIPHERAL_PER 0x1181d000 ++ ++#define L4_PERIPHERAL_GPIO1 0x48310000 ++#define DSPVA_PERIPHERAL_GPIO1 0x11809000 ++#define L4_PERIPHERAL_GPIO2 0x49050000 ++#define DSPVA_PERIPHERAL_GPIO2 0x1180a000 ++#define L4_PERIPHERAL_GPIO3 0x49052000 ++#define DSPVA_PERIPHERAL_GPIO3 0x1180b000 ++#define L4_PERIPHERAL_GPIO4 0x49054000 ++#define DSPVA_PERIPHERAL_GPIO4 0x1180c000 ++#define L4_PERIPHERAL_GPIO5 0x49056000 ++#define DSPVA_PERIPHERAL_GPIO5 0x1180d000 ++ ++#define L4_PERIPHERAL_IVA2WDT 0x49030000 ++#define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000 ++ ++#define L4_PERIPHERAL_DISPLAY 0x48050000 ++#define DSPVA_PERIPHERAL_DISPLAY 0x1180f000 ++ ++#define L4_PERIPHERAL_SSI 0x48058000 ++#define DSPVA_PERIPHERAL_SSI 0x11804000 ++#define L4_PERIPHERAL_GDD 0x48059000 ++#define DSPVA_PERIPHERAL_GDD 0x11805000 ++#define L4_PERIPHERAL_SS1 0x4805a000 ++#define DSPVA_PERIPHERAL_SS1 0x11806000 ++#define L4_PERIPHERAL_SS2 0x4805b000 ++#define DSPVA_PERIPHERAL_SS2 0x11807000 ++ ++#define L4_PERIPHERAL_CAMERA 0x480BC000 ++#define DSPVA_PERIPHERAL_CAMERA 0x11819000 ++ ++#define L4_PERIPHERAL_SDMA 0x48056000 ++#define DSPVA_PERIPHERAL_SDMA 0x11810000 /*0x1181d000 conflicts with PER */ ++ ++#define L4_PERIPHERAL_UART1 0x4806a000 ++#define DSPVA_PERIPHERAL_UART1 0x11811000 ++#define L4_PERIPHERAL_UART2 0x4806c000 ++#define DSPVA_PERIPHERAL_UART2 0x11812000 ++#define L4_PERIPHERAL_UART3 0x49020000 ++#define DSPVA_PERIPHERAL_UART3 0x11813000 ++ ++#define L4_PERIPHERAL_MCBSP1 0x48074000 ++#define DSPVA_PERIPHERAL_MCBSP1 0x11814000 ++#define L4_PERIPHERAL_MCBSP2 0x49022000 ++#define DSPVA_PERIPHERAL_MCBSP2 0x11815000 ++#define L4_PERIPHERAL_MCBSP3 0x49024000 ++#define DSPVA_PERIPHERAL_MCBSP3 0x11816000 ++#define L4_PERIPHERAL_MCBSP4 0x49026000 ++#define DSPVA_PERIPHERAL_MCBSP4 0x11817000 ++#define L4_PERIPHERAL_MCBSP5 0x48096000 ++#define DSPVA_PERIPHERAL_MCBSP5 0x11818000 ++ ++#define L4_PERIPHERAL_GPTIMER5 0x49038000 ++#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000 ++#define L4_PERIPHERAL_GPTIMER6 0x4903a000 ++#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000 ++#define L4_PERIPHERAL_GPTIMER7 0x4903c000 ++#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000 ++#define L4_PERIPHERAL_GPTIMER8 0x4903e000 ++#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000 ++ ++#define L4_PERIPHERAL_SPI1 0x48098000 ++#define DSPVA_PERIPHERAL_SPI1 0x1181a000 ++#define L4_PERIPHERAL_SPI2 0x4809a000 ++#define DSPVA_PERIPHERAL_SPI2 0x1181b000 ++ ++#define L4_PERIPHERAL_MBOX 0x48094000 ++#define DSPVA_PERIPHERAL_MBOX 0x11808000 ++ ++#define PM_GRPSEL_BASE 0x48307000 ++#define DSPVA_GRPSEL_BASE 0x11821000 ++ ++#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000 ++#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000 ++#define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000 ++#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000 ++ ++/* define a static array with L4 mappings */ ++static const struct MAP_L4PERIPHERAL L4PeripheralTable[] = { ++ {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX}, ++ {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM}, ++ {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU}, ++ {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5}, ++ {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6}, ++ {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7}, ++ {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8}, ++ {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1}, ++ {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2}, ++ {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3}, ++ {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4}, ++ {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5}, ++ {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT}, ++ {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY}, ++ {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI}, ++ {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD}, ++ {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1}, ++ {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2}, ++ {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1}, ++ {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2}, ++ {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3}, ++ {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1}, ++ {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2}, ++ {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3}, ++ {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4}, ++ {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5}, ++ {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA}, ++ {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1}, ++ {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2}, ++ {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM}, ++ {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM}, ++ {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER}, ++ {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE}, ++ {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2}, ++ {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3}, ++ {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL} ++}; ++ ++/* ++ * 15 10 0 ++ * --------------------------------- ++ * |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i| ++ * --------------------------------- ++ * | (class) | (module specific) | ++ * ++ * where c -> Externel Clock Command: Clk & Autoidle Disable/Enable ++ * i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3 ++ */ ++ ++/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */ ++#define MBX_PM_CLK_IDMASK 0x7F ++ ++/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */ ++#define MBX_PM_CLK_CMDSHIFT 7 ++ ++/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */ ++#define MBX_PM_CLK_CMDMASK 7 ++ ++/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */ ++#define MBX_CORE1_RESOURCES 7 ++ ++/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */ ++#define MBX_CORE2_RESOURCES 1 ++ ++/* MBX_PM_MAX_RESOURCES: TOTAL Clock Reosurces. */ ++#define MBX_PM_MAX_RESOURCES 11 ++ ++/* Power Management Commands */ ++enum BPWR_ExtClockCmd { ++ BPWR_DisableClock = 0, ++ BPWR_EnableClock, ++ BPWR_DisableAutoIdle, ++ BPWR_EnableAutoIdle ++} ; ++ ++/* OMAP242x specific resources */ ++enum BPWR_ExtClockId { ++ BPWR_GPTimer5 = 0x10, ++ BPWR_GPTimer6, ++ BPWR_GPTimer7, ++ BPWR_GPTimer8, ++ BPWR_WDTimer3, ++ BPWR_MCBSP1, ++ BPWR_MCBSP2, ++ BPWR_MCBSP3, ++ BPWR_MCBSP4, ++ BPWR_MCBSP5, ++ BPWR_SSI = 0x20 ++} ; ++ ++static const u32 BPWR_CLKID[] = { ++ (u32) BPWR_GPTimer5, ++ (u32) BPWR_GPTimer6, ++ (u32) BPWR_GPTimer7, ++ (u32) BPWR_GPTimer8, ++ (u32) BPWR_WDTimer3, ++ (u32) BPWR_MCBSP1, ++ (u32) BPWR_MCBSP2, ++ (u32) BPWR_MCBSP3, ++ (u32) BPWR_MCBSP4, ++ (u32) BPWR_MCBSP5, ++ (u32) BPWR_SSI ++}; ++ ++struct BPWR_Clk_t { ++ u32 clkId; ++ enum SERVICES_ClkId funClk; ++ enum SERVICES_ClkId intClk; ++} ; ++ ++static const struct BPWR_Clk_t BPWR_Clks[] = { ++ {(u32) BPWR_GPTimer5, SERVICESCLK_gpt5_fck, SERVICESCLK_gpt5_ick}, ++ {(u32) BPWR_GPTimer6, SERVICESCLK_gpt6_fck, SERVICESCLK_gpt6_ick}, ++ {(u32) BPWR_GPTimer7, SERVICESCLK_gpt7_fck, SERVICESCLK_gpt7_ick}, ++ {(u32) BPWR_GPTimer8, SERVICESCLK_gpt8_fck, SERVICESCLK_gpt8_ick}, ++ {(u32) BPWR_WDTimer3, SERVICESCLK_wdt3_fck, SERVICESCLK_wdt3_ick}, ++ {(u32) BPWR_MCBSP1, SERVICESCLK_mcbsp1_fck, SERVICESCLK_mcbsp1_ick}, ++ {(u32) BPWR_MCBSP2, SERVICESCLK_mcbsp2_fck, SERVICESCLK_mcbsp2_ick}, ++ {(u32) BPWR_MCBSP3, SERVICESCLK_mcbsp3_fck, SERVICESCLK_mcbsp3_ick}, ++ {(u32) BPWR_MCBSP4, SERVICESCLK_mcbsp4_fck, SERVICESCLK_mcbsp4_ick}, ++ {(u32) BPWR_MCBSP5, SERVICESCLK_mcbsp5_fck, SERVICESCLK_mcbsp5_ick}, ++ {(u32) BPWR_SSI, SERVICESCLK_ssi_fck, SERVICESCLK_ssi_ick} ++}; ++ ++/* Interrupt Register Offsets */ ++#define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */ ++#define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */ ++ ++#define DSP_MAILBOX1_INT 10 ++ ++/* ++ * INTH_InterruptKind_t ++ * Identify the kind of interrupt: either FIQ/IRQ ++ */ ++enum INTH_InterruptKind_t { ++ INTH_IRQ = 0, ++ INTH_FIQ = 1 ++} ; ++ ++enum INTH_SensitiveEdge_t { ++ FALLING_EDGE_SENSITIVE = 0, ++ LOW_LEVEL_SENSITIVE = 1 ++} ; ++ ++/* ++ * Bit definition of Interrupt Level Registers ++ */ ++ ++/* Mail Box defines */ ++#define MB_ARM2DSP1_REG_OFFSET 0x00 ++ ++#define MB_ARM2DSP1B_REG_OFFSET 0x04 ++ ++#define MB_DSP2ARM1B_REG_OFFSET 0x0C ++ ++#define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18 ++ ++#define MB_ARM2DSP_FLAG 0x0001 ++ ++#define MBOX_ARM2DSP HW_MBOX_ID_0 ++#define MBOX_DSP2ARM HW_MBOX_ID_1 ++#define MBOX_ARM HW_MBOX_U0_ARM ++#define MBOX_DSP HW_MBOX_U1_DSP1 ++ ++#define ENABLE true ++#define DISABLE false ++ ++#define HIGH_LEVEL true ++#define LOW_LEVEL false ++ ++/* Macro's */ ++#define REG16(A) (*(REG_UWORD16 *)(A)) ++ ++#define ClearBit(reg, mask) (reg &= ~mask) ++#define SetBit(reg, mask) (reg |= mask) ++ ++#define SetGroupBits16(reg, position, width, value) \ ++ do {\ ++ reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \ ++ reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \ ++ } while (0); ++ ++#define ClearBitIndex(reg, index) (reg &= ~(1 << (index))) ++ ++/* This mini driver's device context: */ ++struct WMD_DEV_CONTEXT { ++ struct DEV_OBJECT *hDevObject; /* Handle to WCD device object. */ ++ u32 dwDspBaseAddr; /* Arm's API to DSP virtual base addr */ ++ /* ++ * DSP External memory prog address as seen virtually by the OS on ++ * the host side. ++ */ ++ u32 dwDspExtBaseAddr; /* See the comment above */ ++ u32 dwAPIRegBase; /* API memory mapped registers */ ++ void __iomem *dwDSPMmuBase; /* DSP MMU Mapped registers */ ++ u32 dwMailBoxBase; /* Mail box mapped registers */ ++ u32 dwAPIClkBase; /* CLK Registers */ ++ u32 dwDSPClkM2Base; /* DSP Clock Module m2 */ ++ u32 dwPublicRhea; /* Pub Rhea */ ++ u32 dwIntAddr; /* MB INTR reg */ ++ u32 dwTCEndianism; /* TC Endianism register */ ++ u32 dwTestBase; /* DSP MMU Mapped registers */ ++ u32 dwSelfLoop; /* Pointer to the selfloop */ ++ u32 dwDSPStartAdd; /* API Boot vector */ ++ u32 dwInternalSize; /* Internal memory size */ ++ ++ /* ++ * Processor specific info is set when prog loaded and read from DCD. ++ * [See WMD_BRD_Ctrl()] PROC info contains DSP-MMU TLB entries. ++ */ ++ /* DMMU TLB entries */ ++ struct WMDIOCTL_EXTPROC aTLBEntry[WMDIOCTL_NUMOFMMUTLB]; ++ u32 dwBrdState; /* Last known board state. */ ++ u32 ulIntMask; /* int mask */ ++ u16 ioBase; /* Board I/O base */ ++ u32 numTLBEntries; /* DSP MMU TLB entry counter */ ++ u32 fixedTLBEntries; /* Fixed DSPMMU TLB entry count */ ++ ++ /* TC Settings */ ++ bool tcWordSwapOn; /* Traffic Controller Word Swap */ ++ struct PgTableAttrs *pPtAttrs; ++ u32 uDspPerClks; ++} ; ++ ++ /* ++ * ======== WMD_TLB_DspVAToMpuPA ======== ++ * Given a DSP virtual address, traverse the page table and return ++ * a corresponding MPU physical address and size. ++ */ ++extern DSP_STATUS WMD_TLB_DspVAToMpuPA(struct WMD_DEV_CONTEXT *pDevContext, ++ IN u32 ulVirtAddr, ++ OUT u32 *ulPhysAddr, ++ OUT u32 *sizeTlb); ++ ++#endif /* _TIOMAP_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap_io.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap_io.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,427 @@ ++/* ++ * tiomap_io.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _tiomap_io.c ======== ++ * Description: ++ * Implementation for the io read/write routines. ++ * ++ *! Revision History ++ *! ================ ++ *! 16-Feb-2004 vp: Fixed warning in WriteDspData function. ++ *! 16-Apr-2003 vp: Added support for TC word swap ++ *! 26-Feb-2003 vp: Fixed issue with EXT_BEG and EXT_END address. ++ *! 24-Feb-2003 vp: Ported to Linux platform ++ *! 08-Oct-2002 rr: Created. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++ ++/* ----------------------------------- specific to this file */ ++#include "_tiomap.h" ++#include "_tiomap_pwr.h" ++#include "tiomap_io.h" ++ ++static u32 ulExtBase; ++static u32 ulExtEnd; ++ ++static u32 ulShm0End; ++static u32 ulDynExtBase; ++static u32 ulTraceSecBeg; ++static u32 ulTraceSecEnd; ++static u32 ulShmBaseVirt; ++ ++bool bSymbolsReloaded = true; ++ ++/* ++ * ======== ReadExtDspData ======== ++ * Copies DSP external memory buffers to the host side buffers. ++ */ ++DSP_STATUS ReadExtDspData(struct WMD_DEV_CONTEXT *hDevContext, ++ OUT u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ u32 offset; ++ u32 ulTLBBaseVirt = 0; ++ u32 ulShmOffsetVirt = 0; ++ u32 dwExtProgVirtMem; ++ u32 dwBaseAddr = pDevContext->dwDspExtBaseAddr; ++ bool bTraceRead = false; ++ ++ DBG_Trace(DBG_ENTER, "ReadExtDspData," ++ "hDevContext: 0x%x\n\t\tpbHostBuf: 0x%x" ++ "\n\t\tdwDSPAddr: 0x%x\n\t\tulNumBytes: 0x%x\n\t\t" ++ "ulMemType: 0x%x\n", pDevContext, pbHostBuf, dwDSPAddr, ++ ulNumBytes, ulMemType); ++ ++ if (!ulShmBaseVirt) { ++ status = DEV_GetSymbol(pDevContext->hDevObject, ++ SHMBASENAME, &ulShmBaseVirt); ++ } ++ DBC_Assert(ulShmBaseVirt != 0); ++ ++ /* Check if it is a read of Trace section */ ++ if (!ulTraceSecBeg) { ++ status = DEV_GetSymbol(pDevContext->hDevObject, ++ DSP_TRACESEC_BEG, &ulTraceSecBeg); ++ } ++ DBC_Assert(ulTraceSecBeg != 0); ++ ++ if (DSP_SUCCEEDED(status) && !ulTraceSecEnd) { ++ status = DEV_GetSymbol(pDevContext->hDevObject, ++ DSP_TRACESEC_END, &ulTraceSecEnd); ++ } ++ DBC_Assert(ulTraceSecEnd != 0); ++ ++ if (DSP_SUCCEEDED(status)) { ++ if ((dwDSPAddr <= ulTraceSecEnd) && ++ (dwDSPAddr >= ulTraceSecBeg)) { ++ DBG_Trace(DBG_LEVEL5, "Reading from DSP Trace" ++ "section 0x%x \n", dwDSPAddr); ++ bTraceRead = true; ++ } ++ } ++ ++ /* If reading from TRACE, force remap/unmap */ ++ if ((bTraceRead) && dwBaseAddr) { ++ dwBaseAddr = 0; ++ pDevContext->dwDspExtBaseAddr = 0; ++ } ++ ++ if (!dwBaseAddr) { ++ /* Initialize ulExtBase and ulExtEnd */ ++ ulExtBase = 0; ++ ulExtEnd = 0; ++ ++ /* Get DYNEXT_BEG, EXT_BEG and EXT_END.*/ ++ if (DSP_SUCCEEDED(status) && !ulDynExtBase) { ++ status = DEV_GetSymbol(pDevContext->hDevObject, ++ DYNEXTBASE, &ulDynExtBase); ++ } ++ DBC_Assert(ulDynExtBase != 0); ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetSymbol(pDevContext->hDevObject, ++ EXTBASE, &ulExtBase); ++ } ++ DBC_Assert(ulExtBase != 0); ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = DEV_GetSymbol(pDevContext->hDevObject, ++ EXTEND, &ulExtEnd); ++ } ++ DBC_Assert(ulExtEnd != 0); ++ ++ /* Trace buffer is right after the SHM SEG0, ++ * so set the base address to SHMBASE */ ++ if (bTraceRead) { ++ ulExtBase = ulShmBaseVirt; ++ ulExtEnd = ulTraceSecEnd; ++ } ++ ++ DBC_Assert(ulExtEnd != 0); ++ DBC_Assert(ulExtEnd > ulExtBase); ++ ++ if (ulExtEnd < ulExtBase) ++ status = DSP_EFAIL; ++ ++ if (DSP_SUCCEEDED(status)) { ++ ulTLBBaseVirt = ++ pDevContext->aTLBEntry[0].ulDspVa * DSPWORDSIZE; ++ DBC_Assert(ulTLBBaseVirt <= ulShmBaseVirt); ++ dwExtProgVirtMem = pDevContext->aTLBEntry[0].ulGppVa; ++ ++ if (bTraceRead) { ++ DBG_Trace(DBG_LEVEL7, "ReadExtDspData: " ++ "GPP VA pointing to SHMMEMBASE 0x%x \n", ++ dwExtProgVirtMem); ++ } else { ++ ulShmOffsetVirt = ulShmBaseVirt - ulTLBBaseVirt; ++ ulShmOffsetVirt += PG_ALIGN_HIGH(ulExtEnd - ++ ulDynExtBase + 1, ++ HW_PAGE_SIZE_64KB); ++ dwExtProgVirtMem -= ulShmOffsetVirt; ++ dwExtProgVirtMem += (ulExtBase - ulDynExtBase); ++ DBG_Trace(DBG_LEVEL7, "ReadExtDspData: " ++ "GPP VA pointing to EXTMEMBASE 0x%x \n", ++ dwExtProgVirtMem); ++ pDevContext->dwDspExtBaseAddr = ++ dwExtProgVirtMem; ++ ++ /* This dwDspExtBaseAddr will get cleared only when the board is ++ * stopped. */ ++ if (!pDevContext->dwDspExtBaseAddr) { ++ status = DSP_EFAIL; ++ DBG_Trace(DBG_LEVEL7, "ReadExtDspData: " ++ "failed to Map the program memory\n"); ++ } ++ } ++ ++ dwBaseAddr = dwExtProgVirtMem; ++ } ++ } ++ ++ if (!dwBaseAddr || !ulExtBase || !ulExtEnd) { ++ DBG_Trace(DBG_LEVEL7, ++ "Symbols missing for Ext Prog reading \n"); ++ status = DSP_EFAIL; ++ } ++ ++ offset = dwDSPAddr - ulExtBase; ++ ++ if (DSP_SUCCEEDED(status)) ++ memcpy(pbHostBuf, (u8 *)dwBaseAddr+offset, ulNumBytes); ++ ++ return status; ++} ++/* ++ * ======== WriteDspData ======== ++ * purpose: ++ * Copies buffers to the DSP internal/external memory. ++ */ ++DSP_STATUS WriteDspData(struct WMD_DEV_CONTEXT *hDevContext, IN u8 *pbHostBuf, ++ u32 dwDSPAddr, u32 ulNumBytes, u32 ulMemType) ++{ ++ u32 offset; ++ u32 dwBaseAddr = hDevContext->dwDspBaseAddr; ++ struct CFG_HOSTRES resources; ++ DSP_STATUS status; ++ u32 base1, base2, base3; ++ base1 = OMAP_DSP_MEM1_SIZE; ++ base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE; ++ base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE; ++ DBG_Trace(DBG_ENTER, "Entered WriteDspData \n"); ++ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ ++ offset = dwDSPAddr - hDevContext->dwDSPStartAdd; ++ if (offset < base1) { ++ dwBaseAddr = MEM_LinearAddress(resources.dwMemBase[2], ++ resources.dwMemLength[2]); ++ } else if (offset > base1 && offset < base2+OMAP_DSP_MEM2_SIZE) { ++ dwBaseAddr = MEM_LinearAddress(resources.dwMemBase[3], ++ resources.dwMemLength[3]); ++ offset = offset - base2; ++ } else if (offset >= base2+OMAP_DSP_MEM2_SIZE && ++ offset < base3 + OMAP_DSP_MEM3_SIZE) { ++ dwBaseAddr = MEM_LinearAddress(resources.dwMemBase[4], ++ resources.dwMemLength[4]); ++ offset = offset - base3; ++ } else{ ++ status = DSP_EFAIL; ++ return status; ++ } ++ if (ulNumBytes) ++ memcpy((u8 *) (dwBaseAddr+offset), pbHostBuf, ulNumBytes); ++ else ++ *((u32 *) pbHostBuf) = dwBaseAddr+offset; ++ ++ return status; ++} ++ ++/* ++ * ======== WriteExtDspData ======== ++ * purpose: ++ * Copies buffers to the external memory. ++ * ++ */ ++DSP_STATUS WriteExtDspData(struct WMD_DEV_CONTEXT *pDevContext, ++ IN u8 *pbHostBuf, u32 dwDSPAddr, u32 ulNumBytes, ++ u32 ulMemType, bool bDynamicLoad) ++{ ++ u32 dwBaseAddr = pDevContext->dwDspExtBaseAddr; ++ u32 dwOffset = 0; ++ u8 bTempByte1, bTempByte2; ++ u8 remainByte[4]; ++ s32 i; ++ DSP_STATUS retVal = DSP_SOK; ++ u32 dwExtProgVirtMem; ++ u32 ulTLBBaseVirt = 0; ++ u32 ulShmOffsetVirt = 0; ++ struct CFG_HOSTRES hostRes; ++ bool bTraceLoad = false; ++ bTempByte1 = 0x0; ++ bTempByte2 = 0x0; ++ ++ DBG_Trace(DBG_ENTER, "Entered WriteExtDspData dwDSPAddr 0x%x " ++ "ulNumBytes 0x%x \n", dwDSPAddr, ulNumBytes); ++ if (bSymbolsReloaded) { ++ /* Check if it is a load to Trace section */ ++ retVal = DEV_GetSymbol(pDevContext->hDevObject, ++ DSP_TRACESEC_BEG, &ulTraceSecBeg); ++ if (DSP_SUCCEEDED(retVal)) ++ retVal = DEV_GetSymbol(pDevContext->hDevObject, ++ DSP_TRACESEC_END, &ulTraceSecEnd); ++ } ++ if (DSP_SUCCEEDED(retVal)) { ++ if ((dwDSPAddr <= ulTraceSecEnd) && ++ (dwDSPAddr >= ulTraceSecBeg)) { ++ DBG_Trace(DBG_LEVEL5, "Writing to DSP Trace " ++ "section 0x%x \n", dwDSPAddr); ++ bTraceLoad = true; ++ } ++ } ++ ++ /* If dynamic, force remap/unmap */ ++ if ((bDynamicLoad || bTraceLoad) && dwBaseAddr) { ++ dwBaseAddr = 0; ++ MEM_UnmapLinearAddress((void *)pDevContext->dwDspExtBaseAddr); ++ pDevContext->dwDspExtBaseAddr = 0x0; ++ } ++ if (!dwBaseAddr) { ++ if (bSymbolsReloaded) ++ /* Get SHM_BEG EXT_BEG and EXT_END. */ ++ retVal = DEV_GetSymbol(pDevContext->hDevObject, ++ SHMBASENAME, &ulShmBaseVirt); ++ DBC_Assert(ulShmBaseVirt != 0); ++ if (bDynamicLoad) { ++ if (DSP_SUCCEEDED(retVal)) { ++ if (bSymbolsReloaded) ++ retVal = DEV_GetSymbol(pDevContext-> ++ hDevObject, DYNEXTBASE, ++ &ulExtBase); ++ } ++ DBC_Assert(ulExtBase != 0); ++ if (DSP_SUCCEEDED(retVal)) { ++ /* DR OMAPS00013235 : DLModules array may be ++ * in EXTMEM. It is expected that DYNEXTMEM and ++ * EXTMEM are contiguous, so checking for the ++ * upper bound at EXTEND should be Ok. */ ++ if (bSymbolsReloaded) ++ retVal = DEV_GetSymbol(pDevContext-> ++ hDevObject, EXTEND, &ulExtEnd); ++ } ++ } else { ++ if (bSymbolsReloaded) { ++ if (DSP_SUCCEEDED(retVal)) ++ retVal = DEV_GetSymbol(pDevContext-> ++ hDevObject, EXTBASE, ++ &ulExtBase); ++ DBC_Assert(ulExtBase != 0); ++ if (DSP_SUCCEEDED(retVal)) ++ retVal = DEV_GetSymbol(pDevContext-> ++ hDevObject, EXTEND, &ulExtEnd); ++ } ++ } ++ /* Trace buffer it right after the SHM SEG0, so set the ++ * base address to SHMBASE */ ++ if (bTraceLoad) ++ ulExtBase = ulShmBaseVirt; ++ ++ DBC_Assert(ulExtEnd != 0); ++ DBC_Assert(ulExtEnd > ulExtBase); ++ if (ulExtEnd < ulExtBase) ++ retVal = DSP_EFAIL; ++ ++ if (DSP_SUCCEEDED(retVal)) { ++ ulTLBBaseVirt = pDevContext->aTLBEntry[0].ulDspVa * ++ DSPWORDSIZE; ++ DBC_Assert(ulTLBBaseVirt <= ulShmBaseVirt); ++ ++ if (bSymbolsReloaded) { ++ if (DSP_SUCCEEDED(retVal)) { ++ retVal = DEV_GetSymbol(pDevContext-> ++ hDevObject, DSP_TRACESEC_END, ++ &ulShm0End); ++ } ++ if (DSP_SUCCEEDED(retVal)) { ++ retVal = DEV_GetSymbol(pDevContext-> ++ hDevObject, DYNEXTBASE, ++ &ulDynExtBase); ++ } ++ } ++ ulShmOffsetVirt = ulShmBaseVirt - ulTLBBaseVirt; ++ if (bTraceLoad) { ++ dwExtProgVirtMem = pDevContext->aTLBEntry[0]. ++ ulGppVa; ++ } else { ++ CFG_GetHostResources( ++ (struct CFG_DEVNODE *) ++ DRV_GetFirstDevExtension(), &hostRes); ++ dwExtProgVirtMem = hostRes.dwMemBase[1]; ++ dwExtProgVirtMem += (ulExtBase - ulDynExtBase); ++ } ++ DBG_Trace(DBG_LEVEL7, "WriteExtDspData: GPP VA " ++ "pointing to EXTMEMBASE 0x%x \n", ++ dwExtProgVirtMem); ++ ++ pDevContext->dwDspExtBaseAddr = ++ (u32)MEM_LinearAddress((void *) ++ TO_VIRTUAL_UNCACHED(dwExtProgVirtMem), ulExtEnd ++ - ulExtBase); ++ dwBaseAddr += pDevContext->dwDspExtBaseAddr; ++ /* This dwDspExtBaseAddr will get cleared only when ++ * the board is stopped. */ ++ if (!pDevContext->dwDspExtBaseAddr) { ++ retVal = DSP_EFAIL; ++ DBG_Trace(DBG_LEVEL7, "WriteExtDspData: failed " ++ "to Map the program memory\n"); ++ } ++ } ++ } ++ if (!dwBaseAddr || !ulExtBase || !ulExtEnd) { ++ DBG_Trace(DBG_LEVEL7, "Symbols missing for Ext Prog loading\n"); ++ retVal = DSP_EFAIL; ++ } ++ if (DSP_SUCCEEDED(retVal)) { ++ for (i = 0; i < 4; i++) ++ remainByte[i] = 0x0; ++ ++ dwOffset = dwDSPAddr - ulExtBase; ++ /* Also make sure the dwDSPAddr is < ulExtEnd */ ++ if (dwDSPAddr > ulExtEnd || dwOffset > dwDSPAddr) { ++ DBG_Trace(DBG_LEVEL7, "We can not load at this address " ++ "dwDSPAddr=0x%x, ulExt/DynBase=0x%x, " ++ "ulExtEnd=0x%x\n", dwDSPAddr, ulExtBase, ++ ulExtEnd); ++ retVal = DSP_EFAIL; ++ } ++ } ++ if (DSP_SUCCEEDED(retVal)) { ++ if (ulNumBytes) ++ memcpy((u8 *) dwBaseAddr + dwOffset, pbHostBuf, ++ ulNumBytes); ++ else ++ *((u32 *) pbHostBuf) = dwBaseAddr+dwOffset; ++ } ++ /* Unmap here to force remap for other Ext loads */ ++ if ((bDynamicLoad || bTraceLoad) && pDevContext->dwDspExtBaseAddr) { ++ MEM_UnmapLinearAddress((void *) pDevContext->dwDspExtBaseAddr); ++ pDevContext->dwDspExtBaseAddr = 0x0; ++ } ++ bSymbolsReloaded = false; ++ return retVal; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap_io.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap_io.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,112 @@ ++/* ++ * tiomap_io.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _tiomap_io.h ======== ++ * Description: ++ * Definitions, types and function prototypes for the io ++ * (r/w external mem). ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Oct-2002 rr: Created. ++ */ ++ ++#ifndef _TIOMAP_IO_ ++#define _TIOMAP_IO_ ++ ++/* ++ * Symbol that defines beginning of shared memory. ++ * For OMAP (Helen) this is the DSP Virtual base address of SDRAM. ++ * This will be used to program DSP MMU to map DSP Virt to GPP phys. ++ * (see dspMmuTlbEntry()). ++ */ ++#define SHMBASENAME "SHM_BEG" ++#define EXTBASE "EXT_BEG" ++#define EXTEND "_EXT_END" ++#define DYNEXTBASE "_DYNEXT_BEG" ++#define DYNEXTEND "_DYNEXT_END" ++#define IVAEXTMEMBASE "_IVAEXTMEM_BEG" ++#define IVAEXTMEMEND "_IVAEXTMEM_END" ++ ++ ++#define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG" ++#define DSP_TRACESEC_END "_BRIDGE_TRACE_END" ++ ++#define SYS_PUTCBEG "_SYS_PUTCBEG" ++#define SYS_PUTCEND "_SYS_PUTCEND" ++#define BRIDGE_SYS_PUTC_current "_BRIDGE_SYS_PUTC_current" ++ ++ ++#define WORDSWAP_ENABLE 0x3 /* Enable word swap */ ++ ++/* ++ * ======== ReadExtDspData ======== ++ * Reads it from DSP External memory. The external memory for the DSP ++ * is configured by the combination of DSP MMU and SHM Memory manager in the CDB ++ */ ++extern DSP_STATUS ReadExtDspData(struct WMD_DEV_CONTEXT *pDevContext, ++ OUT u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType); ++ ++/* ++ * ======== WriteDspData ======== ++ */ ++extern DSP_STATUS WriteDspData(struct WMD_DEV_CONTEXT *pDevContext, ++ OUT u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType); ++ ++/* ++ * ======== WriteExtDspData ======== ++ * Writes to the DSP External memory for external program. ++ * The ext mem for progra is configured by the combination of DSP MMU and ++ * SHM Memory manager in the CDB ++ */ ++extern DSP_STATUS WriteExtDspData(struct WMD_DEV_CONTEXT *pDevContext, ++ IN u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType, ++ bool bDynamicLoad); ++ ++/* ++ * ======== WriteExt32BitDspData ======== ++ * Writes 32 bit data to the external memory ++ */ ++extern inline void WriteExt32BitDspData(IN const ++ struct WMD_DEV_CONTEXT *pDevContext, IN u32 dwDSPAddr, ++ IN u32 val) ++{ ++ *(u32 *)dwDSPAddr = ((pDevContext->tcWordSwapOn) ? (((val << 16) & ++ 0xFFFF0000) | ((val >> 16) & 0x0000FFFF)) : val); ++} ++ ++/* ++ * ======== ReadExt32BitDspData ======== ++ * Reads 32 bit data from the external memory ++ */ ++extern inline u32 ReadExt32BitDspData(IN const struct WMD_DEV_CONTEXT ++ *pDevContext, IN u32 dwDSPAddr) ++{ ++ u32 retVal; ++ retVal = *(u32 *)dwDSPAddr; ++ ++ retVal = ((pDevContext->tcWordSwapOn) ? (((retVal << 16) ++ & 0xFFFF0000) | ((retVal >> 16) & 0x0000FFFF)) : retVal); ++ return retVal; ++} ++ ++#endif /* _TIOMAP_IO_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_mmu.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap_mmu.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_mmu.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap_mmu.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,53 @@ ++/* ++ * _tiomap_mmu.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _tiomap_mmu.h ======== ++ * Description: ++ * Definitions and types for the DSP MMU modules ++ * ++ *! Revision History ++ *! ================ ++ *! 19-Apr-2004 sb: Renamed HW types. Removed dspMmuTlbEntry ++ *! 05-Jan-2004 vp: Moved the file to a platform specific folder from common. ++ *! 21-Mar-2003 sb: Added macro definition TIHEL_LARGEPAGESIZE ++ *! 08-Oct-2002 rr: Created. ++ */ ++ ++#ifndef _TIOMAP_MMU_ ++#define _TIOMAP_MMU_ ++ ++#include "_tiomap.h" ++ ++/* ++ * ======== configureDspMmu ======== ++ * ++ * Make DSP MMu page table entries. ++ * Note: Not utilizing Coarse / Fine page tables. ++ * SECTION = 1MB, LARGE_PAGE = 64KB, SMALL_PAGE = 4KB, TINY_PAGE = 1KB. ++ * DSP Byte address 0x40_0000 is word addr 0x20_0000. ++ */ ++extern void configureDspMmu(struct WMD_DEV_CONTEXT *pDevContext, ++ u32 dataBasePhys, ++ u32 dspBaseVirt, ++ u32 sizeInBytes, ++ s32 nEntryStart, ++ enum HW_Endianism_t endianism, ++ enum HW_ElementSize_t elemSize, ++ enum HW_MMUMixedSize_t mixedSize); ++ ++#endif /* _TIOMAP_MMU_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_pwr.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap_pwr.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_pwr.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap_pwr.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,102 @@ ++/* ++ * _tiomap_pwr.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _tiomap_pwr.h ======== ++ * Description: ++ * Definitions and types for the DSP wake/sleep routines. ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Oct-2002 rr: Created. ++ */ ++ ++#ifndef _TIOMAP_PWR_ ++#define _TIOMAP_PWR_ ++ ++/* ++ * ======== WakeDSP ========= ++ * Wakes up the DSP from DeepSleep ++ */ ++extern DSP_STATUS WakeDSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs); ++ ++/* ++ * ======== SleepDSP ========= ++ * Places the DSP in DeepSleep. ++ */ ++extern DSP_STATUS SleepDSP(struct WMD_DEV_CONTEXT *pDevContext, ++ IN u32 dwCmd, IN void *pArgs); ++/* ++ * ========InterruptDSP======== ++ * Sends an interrupt to DSP unconditionally. ++ */ ++extern void InterruptDSP(struct WMD_DEV_CONTEXT *pDevContext, IN u16 wMbVal); ++ ++/* ++ * ======== WakeDSP ========= ++ * Wakes up the DSP from DeepSleep ++ */ ++extern DSP_STATUS DSPPeripheralClkCtrl(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs); ++/* ++ * ======== handle_hibernation_fromDSP ======== ++ * Handle Hibernation requested from DSP ++ */ ++DSP_STATUS handle_hibernation_fromDSP(struct WMD_DEV_CONTEXT *pDevContext); ++/* ++ * ======== PostScale_DSP ======== ++ * Handle Post Scale notification to DSP ++ */ ++DSP_STATUS PostScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs); ++/* ++ * ======== PreScale_DSP ======== ++ * Handle Pre Scale notification to DSP ++ */ ++DSP_STATUS PreScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs); ++/* ++ * ======== handle_constraints_set ======== ++ * Handle constraints request from DSP ++ */ ++DSP_STATUS handle_constraints_set(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs); ++/* ++ * ======== DSP_PeripheralClocks_Disable ======== ++ * This function disables all the peripheral clocks that ++ * were enabled by DSP. Call this function only when ++ * DSP is entering Hibernation or when DSP is in ++ * Error state ++ */ ++DSP_STATUS DSP_PeripheralClocks_Disable(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs); ++ ++/* ++ * ======== DSP_PeripheralClocks_Enable ======== ++ * This function enables all the peripheral clocks that ++ * were requested by DSP. ++ */ ++DSP_STATUS DSP_PeripheralClocks_Enable(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs); ++ ++/* ++ * ======== DSPClkWakeupEventCtrl ======== ++ * This function sets the group selction bits for while ++ * enabling/disabling. ++ */ ++void DSPClkWakeupEventCtrl(u32 ClkId, bool enable); ++ ++#endif /* _TIOMAP_PWR_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_sm.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap_sm.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_sm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap_sm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,197 @@ ++/* ++ * tiomap_sm.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++ ++#include "_tiomap.h" ++#include "_tiomap_pwr.h" ++ ++#define MAILBOX_FIFOSTATUS(m) (0x80 + 4 * (m)) ++ ++extern unsigned short min_active_opp; ++ ++static inline unsigned int fifo_full(void __iomem *mbox_base, int mbox_id) ++{ ++ return __raw_readl(mbox_base + MAILBOX_FIFOSTATUS(mbox_id)) & 0x1; ++} ++ ++DSP_STATUS CHNLSM_EnableInterrupt(struct WMD_DEV_CONTEXT *pDevContext) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 numMbxMsg; ++ u32 mbxValue; ++ struct CFG_HOSTRES resources; ++ u32 devType; ++ struct IO_MGR *hIOMgr; ++ ++ DBG_Trace(DBG_ENTER, "CHNLSM_EnableInterrupt(0x%x)\n", pDevContext); ++ ++ /* Read the messages in the mailbox until the message queue is empty */ ++ ++ CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), ++ &resources); ++ DEV_GetDevType(pDevContext->hDevObject, &devType); ++ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); ++ if (devType == DSP_UNIT) { ++ HW_MBOX_NumMsgGet(resources.dwMboxBase, ++ MBOX_DSP2ARM, &numMbxMsg); ++ while (numMbxMsg != 0) { ++ HW_MBOX_MsgRead(resources.dwMboxBase, ++ MBOX_DSP2ARM, ++ &mbxValue); ++ numMbxMsg--; ++ } ++ /* clear the DSP mailbox as well...*/ ++ HW_MBOX_NumMsgGet(resources.dwMboxBase, ++ MBOX_ARM2DSP, &numMbxMsg); ++ while (numMbxMsg != 0) { ++ HW_MBOX_MsgRead(resources.dwMboxBase, ++ MBOX_ARM2DSP, &mbxValue); ++ numMbxMsg--; ++ udelay(10); ++ ++ HW_MBOX_EventAck(resources.dwMboxBase, MBOX_ARM2DSP, ++ HW_MBOX_U1_DSP1, ++ HW_MBOX_INT_NEW_MSG); ++ } ++ /* Enable the new message events on this IRQ line */ ++ HW_MBOX_EventEnable(resources.dwMboxBase, ++ MBOX_DSP2ARM, ++ MBOX_ARM, ++ HW_MBOX_INT_NEW_MSG); ++ } ++ ++ return status; ++} ++ ++DSP_STATUS CHNLSM_DisableInterrupt(struct WMD_DEV_CONTEXT *pDevContext) ++{ ++ struct CFG_HOSTRES resources; ++ ++ DBG_Trace(DBG_ENTER, "CHNLSM_DisableInterrupt(0x%x)\n", pDevContext); ++ ++ CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), ++ &resources); ++ HW_MBOX_EventDisable(resources.dwMboxBase, MBOX_DSP2ARM, ++ MBOX_ARM, HW_MBOX_INT_NEW_MSG); ++ return DSP_SOK; ++} ++ ++DSP_STATUS CHNLSM_InterruptDSP2(struct WMD_DEV_CONTEXT *pDevContext, ++ u16 wMbVal) ++{ ++ struct CFG_HOSTRES resources; ++ DSP_STATUS status = DSP_SOK; ++ unsigned long timeout; ++ u32 temp; ++ ++ status = CFG_GetHostResources((struct CFG_DEVNODE *) ++ DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) ++ return DSP_EFAIL; ++ ++ if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION || ++ pDevContext->dwBrdState == BRD_HIBERNATION) { ++#ifdef CONFIG_BRIDGE_DVFS ++ struct dspbridge_platform_data *pdata = ++ omap_dspbridge_dev->dev.platform_data; ++ /* ++ * When Smartreflex is ON, DSP requires at least OPP level 3 ++ * to operate reliably. So boost lower OPP levels to OPP3. ++ */ ++ if (pdata->dsp_set_min_opp) ++ (*pdata->dsp_set_min_opp)(min_active_opp); ++#endif ++ /* Restart the peripheral clocks */ ++ DSP_PeripheralClocks_Enable(pDevContext, NULL); ++ ++ /* ++ * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control ++ * in CM_AUTOIDLE_PLL_IVA2 register ++ */ ++ *(REG_UWORD32 *)(resources.dwCmBase + 0x34) = 0x1; ++ ++ /* ++ * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to ++ * 0.75 MHz - 1.0 MHz ++ * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode ++ */ ++ temp = *(REG_UWORD32 *)(resources.dwCmBase + 0x4); ++ temp = (temp & 0xFFFFFF08) | 0x37; ++ *(REG_UWORD32 *)(resources.dwCmBase + 0x4) = temp; ++ ++ /* ++ * This delay is needed to avoid mailbox timed out ++ * issue experienced while SmartReflex is ON. ++ * TODO: Instead of 1 ms calculate proper value. ++ */ ++ mdelay(1); ++ ++ /* Restore mailbox settings */ ++ HW_MBOX_restoreSettings(resources.dwMboxBase); ++ ++ /* Access MMU SYS CONFIG register to generate a short wakeup */ ++ temp = *(REG_UWORD32 *)(resources.dwDmmuBase + 0x10); ++ ++ pDevContext->dwBrdState = BRD_RUNNING; ++ } ++ ++ timeout = jiffies + msecs_to_jiffies(1); ++ while (fifo_full((void __iomem *) resources.dwMboxBase, 0)) { ++ if (time_after(jiffies, timeout)) { ++ pr_err("dspbridge: timed out waiting for mailbox\n"); ++ return WMD_E_TIMEOUT; ++ } ++ } ++ ++ DBG_Trace(DBG_LEVEL3, "writing %x to Mailbox\n", wMbVal); ++ HW_MBOX_MsgWrite(resources.dwMboxBase, MBOX_ARM2DSP, wMbVal); ++ return DSP_SOK; ++} ++ ++bool CHNLSM_ISR(struct WMD_DEV_CONTEXT *pDevContext, bool *pfSchedDPC, ++ u16 *pwIntrVal) ++{ ++ struct CFG_HOSTRES resources; ++ u32 numMbxMsg; ++ u32 mbxValue; ++ ++ DBG_Trace(DBG_ENTER, "CHNLSM_ISR(0x%x)\n", pDevContext); ++ ++ CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ ++ HW_MBOX_NumMsgGet(resources.dwMboxBase, MBOX_DSP2ARM, &numMbxMsg); ++ ++ if (numMbxMsg > 0) { ++ HW_MBOX_MsgRead(resources.dwMboxBase, MBOX_DSP2ARM, &mbxValue); ++ ++ HW_MBOX_EventAck(resources.dwMboxBase, MBOX_DSP2ARM, ++ HW_MBOX_U0_ARM, HW_MBOX_INT_NEW_MSG); ++ ++ DBG_Trace(DBG_LEVEL3, "Read %x from Mailbox\n", mbxValue); ++ *pwIntrVal = (u16) mbxValue; ++ } ++ /* Set *pfSchedDPC to true; */ ++ *pfSchedDPC = true; ++ return true; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_util.h kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap_util.h +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_util.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/_tiomap_util.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,46 @@ ++/* ++ * _tiomap_util.h ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== _tiomap_util.h ======== ++ * Description: ++ * Definitions and types for the utility routines. ++ * ++ *! Revision History ++ *! ================ ++ *! 08-Oct-2002 rr: Created. ++ */ ++ ++#ifndef _TIOMAP_UTIL_ ++#define _TIOMAP_UTIL_ ++ ++/* Time out Values in uSeconds*/ ++#define TIHELEN_ACKTIMEOUT 10000 ++ ++/* Time delay for HOM->SAM transition. */ ++#define WAIT_SAM 1000000 /* in usec (1000 millisec) */ ++ ++/* ++ * ======== WaitForStart ======== ++ * Wait for the singal from DSP that it has started, or time out. ++ * The argument dwSyncAddr is set to 1 before releasing the DSP. ++ * If the DSP starts running, it will clear this location. ++ */ ++extern bool WaitForStart(struct WMD_DEV_CONTEXT *pDevContext, u32 dwSyncAddr); ++ ++#endif /* _TIOMAP_UTIL_ */ ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap3430.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap3430.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2091 @@ ++/* ++ * tiomap.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== tiomap.c ======== ++ * Processor Manager Driver for TI OMAP3430 EVM. ++ * ++ * Public Function: ++ * WMD_DRV_Entry ++ * ++ *! Revision History: ++ *! ================ ++ * 26-March-2008 HK and AL: Added WMD_DEV_WalkTbl funciton. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ------------------------------------ Hardware Abstraction Layer */ ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Link Driver */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Local */ ++#include "_tiomap.h" ++#include "_tiomap_pwr.h" ++#include "_tiomap_mmu.h" ++#include "_tiomap_util.h" ++#include "tiomap_io.h" ++ ++ ++/* Offset in shared mem to write to in order to synchronize start with DSP */ ++#define SHMSYNCOFFSET 4 /* GPP byte offset */ ++ ++#define BUFFERSIZE 1024 ++ ++#define MMU_SECTION_ADDR_MASK 0xFFF00000 ++#define MMU_SSECTION_ADDR_MASK 0xFF000000 ++#define MMU_LARGE_PAGE_MASK 0xFFFF0000 ++#define MMU_SMALL_PAGE_MASK 0xFFFFF000 ++#define PAGES_II_LVL_TABLE 512 ++#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT) ++ ++#define MMU_GFLUSH 0x60 ++ ++extern unsigned short min_active_opp; ++ ++/* Forward Declarations: */ ++static DSP_STATUS WMD_BRD_Monitor(struct WMD_DEV_CONTEXT *pDevContext); ++static DSP_STATUS WMD_BRD_Read(struct WMD_DEV_CONTEXT *pDevContext, ++ OUT u8 *pbHostBuf, ++ u32 dwDSPAddr, u32 ulNumBytes, u32 ulMemType); ++static DSP_STATUS WMD_BRD_Start(struct WMD_DEV_CONTEXT *pDevContext, ++ u32 dwDSPAddr); ++static DSP_STATUS WMD_BRD_Status(struct WMD_DEV_CONTEXT *pDevContext, ++ OUT BRD_STATUS *pdwState); ++static DSP_STATUS WMD_BRD_Stop(struct WMD_DEV_CONTEXT *pDevContext); ++static DSP_STATUS WMD_BRD_Write(struct WMD_DEV_CONTEXT *pDevContext, ++ IN u8 *pbHostBuf, ++ u32 dwDSPAddr, u32 ulNumBytes, u32 ulMemType); ++static DSP_STATUS WMD_BRD_SetState(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulBrdState); ++static DSP_STATUS WMD_BRD_MemCopy(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulDspDestAddr, u32 ulDspSrcAddr, ++ u32 ulNumBytes, u32 ulMemType); ++static DSP_STATUS WMD_BRD_MemWrite(struct WMD_DEV_CONTEXT *pDevContext, ++ IN u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType); ++static DSP_STATUS WMD_BRD_MemMap(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulMpuAddr, u32 ulVirtAddr, u32 ulNumBytes, ++ u32 ulMapAttr); ++static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulVirtAddr, u32 ulNumBytes); ++static DSP_STATUS WMD_DEV_Create(OUT struct WMD_DEV_CONTEXT **ppDevContext, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CFG_HOSTRES *pConfig, ++ IN CONST struct CFG_DSPRES *pDspConfig); ++static DSP_STATUS WMD_DEV_Ctrl(struct WMD_DEV_CONTEXT *pDevContext, u32 dwCmd, ++ IN OUT void *pArgs); ++static DSP_STATUS WMD_DEV_Destroy(struct WMD_DEV_CONTEXT *pDevContext); ++static u32 user_va2pa(struct mm_struct *mm, u32 address); ++static DSP_STATUS PteUpdate(struct WMD_DEV_CONTEXT *hDevContext, u32 pa, ++ u32 va, u32 size, ++ struct HW_MMUMapAttrs_t *mapAttrs); ++static DSP_STATUS PteSet(struct PgTableAttrs *pt, u32 pa, u32 va, ++ u32 size, struct HW_MMUMapAttrs_t *attrs); ++static DSP_STATUS MemMapVmalloc(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulMpuAddr, u32 ulVirtAddr, ++ u32 ulNumBytes, struct HW_MMUMapAttrs_t *hwAttrs); ++ ++#ifdef CONFIG_BRIDGE_DEBUG ++static void GetHWRegs(void __iomem *prm_base, void __iomem *cm_base) ++{ ++ u32 temp; ++ temp = __raw_readl((cm_base) + 0x00); ++ DBG_Trace(DBG_LEVEL6, "CM_FCLKEN_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((cm_base) + 0x10); ++ DBG_Trace(DBG_LEVEL6, "CM_ICLKEN1_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((cm_base) + 0x20); ++ DBG_Trace(DBG_LEVEL6, "CM_IDLEST_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((cm_base) + 0x48); ++ DBG_Trace(DBG_LEVEL6, "CM_CLKSTCTRL_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((cm_base) + 0x4c); ++ DBG_Trace(DBG_LEVEL6, "CM_CLKSTST_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((prm_base) + 0x50); ++ DBG_Trace(DBG_LEVEL6, "RM_RSTCTRL_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((prm_base) + 0x58); ++ DBG_Trace(DBG_LEVEL6, "RM_RSTST_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((prm_base) + 0xE0); ++ DBG_Trace(DBG_LEVEL6, "PM_PWSTCTRL_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((prm_base) + 0xE4); ++ DBG_Trace(DBG_LEVEL6, "PM_PWSTST_IVA2 = 0x%x \n", temp); ++ temp = __raw_readl((cm_base) + 0xA10); ++ DBG_Trace(DBG_LEVEL6, "CM_ICLKEN1_CORE = 0x%x \n", temp); ++} ++#else ++static inline void GetHWRegs(void __iomem *prm_base, void __iomem *cm_base) ++{ ++} ++#endif ++ ++/* ----------------------------------- Globals */ ++ ++/* Attributes of L2 page tables for DSP MMU */ ++struct PageInfo { ++ u32 numEntries; /* Number of valid PTEs in the L2 PT */ ++} ; ++ ++/* Attributes used to manage the DSP MMU page tables */ ++struct PgTableAttrs { ++ struct SYNC_CSOBJECT *hCSObj; /* Critical section object handle */ ++ ++ u32 L1BasePa; /* Physical address of the L1 PT */ ++ u32 L1BaseVa; /* Virtual address of the L1 PT */ ++ u32 L1size; /* Size of the L1 PT */ ++ u32 L1TblAllocPa; ++ /* Physical address of Allocated mem for L1 table. May not be aligned */ ++ u32 L1TblAllocVa; ++ /* Virtual address of Allocated mem for L1 table. May not be aligned */ ++ u32 L1TblAllocSz; ++ /* Size of consistent memory allocated for L1 table. ++ * May not be aligned */ ++ ++ u32 L2BasePa; /* Physical address of the L2 PT */ ++ u32 L2BaseVa; /* Virtual address of the L2 PT */ ++ u32 L2size; /* Size of the L2 PT */ ++ u32 L2TblAllocPa; ++ /* Physical address of Allocated mem for L2 table. May not be aligned */ ++ u32 L2TblAllocVa; ++ /* Virtual address of Allocated mem for L2 table. May not be aligned */ ++ u32 L2TblAllocSz; ++ /* Size of consistent memory allocated for L2 table. ++ * May not be aligned */ ++ ++ u32 L2NumPages; /* Number of allocated L2 PT */ ++ struct PageInfo *pgInfo; /* Array [L2NumPages] of L2 PT info structs */ ++} ; ++ ++/* ++ * If dsp_debug is true, do not branch to the DSP entry point and wait for DSP ++ * to boot ++ */ ++extern s32 dsp_debug; ++ ++/* ++ * This mini driver's function interface table. ++ */ ++static struct WMD_DRV_INTERFACE drvInterfaceFxns = { ++ WCD_MAJOR_VERSION, /* WCD ver. for which this mini driver is built. */ ++ WCD_MINOR_VERSION, ++ WMD_DEV_Create, ++ WMD_DEV_Destroy, ++ WMD_DEV_Ctrl, ++ WMD_BRD_Monitor, ++ WMD_BRD_Start, ++ WMD_BRD_Stop, ++ WMD_BRD_Status, ++ WMD_BRD_Read, ++ WMD_BRD_Write, ++ WMD_BRD_SetState, ++ WMD_BRD_MemCopy, ++ WMD_BRD_MemWrite, ++ WMD_BRD_MemMap, ++ WMD_BRD_MemUnMap, ++ /* The following CHNL functions are provided by chnl_io.lib: */ ++ WMD_CHNL_Create, ++ WMD_CHNL_Destroy, ++ WMD_CHNL_Open, ++ WMD_CHNL_Close, ++ WMD_CHNL_AddIOReq, ++ WMD_CHNL_GetIOC, ++ WMD_CHNL_CancelIO, ++ WMD_CHNL_FlushIO, ++ WMD_CHNL_GetInfo, ++ WMD_CHNL_GetMgrInfo, ++ WMD_CHNL_Idle, ++ WMD_CHNL_RegisterNotify, ++ /* The following DEH functions are provided by tihelen_ue_deh.c */ ++ WMD_DEH_Create, ++ WMD_DEH_Destroy, ++ WMD_DEH_Notify, ++ WMD_DEH_RegisterNotify, ++ WMD_DEH_GetInfo, ++ /* The following IO functions are provided by chnl_io.lib: */ ++ WMD_IO_Create, ++ WMD_IO_Destroy, ++ WMD_IO_OnLoaded, ++ WMD_IO_GetProcLoad, ++ /* The following MSG functions are provided by chnl_io.lib: */ ++ WMD_MSG_Create, ++ WMD_MSG_CreateQueue, ++ WMD_MSG_Delete, ++ WMD_MSG_DeleteQueue, ++ WMD_MSG_Get, ++ WMD_MSG_Put, ++ WMD_MSG_RegisterNotify, ++ WMD_MSG_SetQueueId, ++}; ++ ++static inline void tlb_flush_all(const void __iomem *base) ++{ ++ __raw_writeb(__raw_readb(base + MMU_GFLUSH) | 1, base + MMU_GFLUSH); ++} ++ ++static inline void flush_all(struct WMD_DEV_CONTEXT *pDevContext) ++{ ++ if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION || ++ pDevContext->dwBrdState == BRD_HIBERNATION) ++ WakeDSP(pDevContext, NULL); ++ ++ tlb_flush_all(pDevContext->dwDSPMmuBase); ++} ++ ++static void bad_page_dump(u32 pa, struct page *pg) ++{ ++ pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); ++ pr_emerg("Bad page state in process '%s'\n" ++ "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" ++ "Backtrace:\n", ++ current->comm, pg, (int)(2*sizeof(unsigned long)), ++ (unsigned long)pg->flags, pg->mapping, ++ page_mapcount(pg), page_count(pg)); ++ BUG(); ++} ++ ++/* ++ * ======== WMD_DRV_Entry ======== ++ * purpose: ++ * Mini Driver entry point. ++ */ ++void WMD_DRV_Entry(OUT struct WMD_DRV_INTERFACE **ppDrvInterface, ++ IN CONST char *pstrWMDFileName) ++{ ++ ++ DBC_Require(pstrWMDFileName != NULL); ++ DBG_Trace(DBG_ENTER, "In the WMD_DRV_Entry \n"); ++ ++ IO_SM_init(); /* Initialization of io_sm module */ ++ ++ if (strcmp(pstrWMDFileName, "UMA") == 0) ++ *ppDrvInterface = &drvInterfaceFxns; ++ else ++ DBG_Trace(DBG_LEVEL7, "WMD_DRV_Entry Unknown WMD file name"); ++ ++} ++ ++/* ++ * ======== WMD_BRD_Monitor ======== ++ * purpose: ++ * This WMD_BRD_Monitor puts DSP into a Loadable state. ++ * i.e Application can load and start the device. ++ * ++ * Preconditions: ++ * Device in 'OFF' state. ++ */ ++static DSP_STATUS WMD_BRD_Monitor(struct WMD_DEV_CONTEXT *hDevContext) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ struct CFG_HOSTRES resources; ++ u32 temp; ++ enum HW_PwrState_t pwrState; ++ ++ DBG_Trace(DBG_ENTER, "Board in the monitor state \n"); ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) ++ goto error_return; ++ ++ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); ++ HW_PWRST_IVA2RegGet(resources.dwPrmBase, &temp); ++ if ((temp & 0x03) != 0x03 || (temp & 0x03) != 0x02) { ++ /* IVA2 is not in ON state */ ++ /* Read and set PM_PWSTCTRL_IVA2 to ON */ ++ HW_PWR_IVA2PowerStateSet(resources.dwPrmBase, ++ HW_PWR_DOMAIN_DSP, ++ HW_PWR_STATE_ON); ++ /* Set the SW supervised state transition */ ++ HW_PWR_CLKCTRL_IVA2RegSet(resources.dwCmBase, HW_SW_SUP_WAKEUP); ++ /* Wait until the state has moved to ON */ ++ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, ++ &pwrState); ++ /* Disable Automatic transition */ ++ HW_PWR_CLKCTRL_IVA2RegSet(resources.dwCmBase, HW_AUTOTRANS_DIS); ++ } ++ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Monitor - Middle ****** \n"); ++ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); ++ HW_RST_UnReset(resources.dwPrmBase, HW_RST2_IVA2); ++ CLK_Enable(SERVICESCLK_iva2_ck); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* set the device state to IDLE */ ++ pDevContext->dwBrdState = BRD_IDLE; ++ } ++error_return: ++ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Monitor - End ****** \n"); ++ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); ++ return status; ++} ++ ++/* ++ * ======== WMD_BRD_Read ======== ++ * purpose: ++ * Reads buffers for DSP memory. ++ */ ++static DSP_STATUS WMD_BRD_Read(struct WMD_DEV_CONTEXT *hDevContext, ++ OUT u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ u32 offset; ++ u32 dspBaseAddr = hDevContext->dwDspBaseAddr; ++ ++ DBG_Trace(DBG_ENTER, "WMD_BRD_Read, pDevContext: 0x%x\n\t\tpbHostBuf:" ++ " 0x%x\n\t\tdwDSPAddr: 0x%x\n\t\tulNumBytes: 0x%x\n\t\t" ++ "ulMemType: 0x%x\n", pDevContext, pbHostBuf, ++ dwDSPAddr, ulNumBytes, ulMemType); ++ if (dwDSPAddr < pDevContext->dwDSPStartAdd) { ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_Read: DSP address < start address \n "); ++ status = DSP_EFAIL; ++ return status; ++ } ++ /* change here to account for the 3 bands of the DSP internal memory */ ++ if ((dwDSPAddr - pDevContext->dwDSPStartAdd) < ++ pDevContext->dwInternalSize) { ++ offset = dwDSPAddr - pDevContext->dwDSPStartAdd; ++ } else { ++ DBG_Trace(DBG_LEVEL1, ++ "**** Reading From external memory **** \n "); ++ status = ReadExtDspData(pDevContext, pbHostBuf, dwDSPAddr, ++ ulNumBytes, ulMemType); ++ return status; ++ } ++ /* copy the data from DSP memory, */ ++ memcpy(pbHostBuf, (void *)(dspBaseAddr + offset), ulNumBytes); ++ return status; ++} ++ ++/* ++ * ======== WMD_BRD_SetState ======== ++ * purpose: ++ * This routine updates the Board status. ++ */ ++static DSP_STATUS WMD_BRD_SetState(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulBrdState) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ ++ DBG_Trace(DBG_ENTER, "WMD_BRD_SetState: Board State: 0x%x \n", ++ ulBrdState); ++ pDevContext->dwBrdState = ulBrdState; ++ return status; ++} ++ ++/* ++ * ======== WMD_BRD_Start ======== ++ * purpose: ++ * Initializes DSP MMU and Starts DSP. ++ * ++ * Preconditions: ++ * a) DSP domain is 'ACTIVE'. ++ * b) DSP_RST1 is asserted. ++ * b) DSP_RST2 is released. ++ */ ++static DSP_STATUS WMD_BRD_Start(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 dwDSPAddr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ u32 dwSyncAddr = 0; ++ u32 ulShmBase; /* Gpp Phys SM base addr(byte) */ ++ u32 ulShmBaseVirt; /* Dsp Virt SM base addr */ ++ u32 ulTLBBaseVirt; /* Base of MMU TLB entry */ ++ u32 ulShmOffsetVirt; /* offset of ulShmBaseVirt from ulTLBBaseVirt */ ++ s32 iEntryNdx; ++ s32 itmpEntryNdx = 0; /* DSP-MMU TLB entry base address */ ++ struct CFG_HOSTRES resources; ++ u32 temp; ++ u32 ulDspClkRate; ++ u32 ulDspClkAddr; ++ u32 ulBiosGpTimer; ++ u32 uClkCmd; ++ struct IO_MGR *hIOMgr; ++ u32 ulLoadMonitorTimer; ++ u32 extClkId = 0; ++ u32 tmpIndex; ++ u32 clkIdIndex = MBX_PM_MAX_RESOURCES; ++ ++ DBG_Trace(DBG_ENTER, "Entering WMD_BRD_Start:\n hDevContext: 0x%x\n\t " ++ "dwDSPAddr: 0x%x\n", hDevContext, dwDSPAddr); ++ ++ /* The device context contains all the mmu setup info from when the ++ * last dsp base image was loaded. The first entry is always ++ * SHMMEM base. */ ++ /* Get SHM_BEG - convert to byte address */ ++ (void) DEV_GetSymbol(pDevContext->hDevObject, SHMBASENAME, ++ &ulShmBaseVirt); ++ ulShmBaseVirt *= DSPWORDSIZE; ++ DBC_Assert(ulShmBaseVirt != 0); ++ /* DSP Virtual address */ ++ ulTLBBaseVirt = pDevContext->aTLBEntry[0].ulDspVa; ++ DBC_Assert(ulTLBBaseVirt <= ulShmBaseVirt); ++ ulShmOffsetVirt = ulShmBaseVirt - (ulTLBBaseVirt * DSPWORDSIZE); ++ /* Kernel logical address */ ++ ulShmBase = pDevContext->aTLBEntry[0].ulGppVa + ulShmOffsetVirt; ++ ++ DBC_Assert(ulShmBase != 0); ++ /* 2nd wd is used as sync field */ ++ dwSyncAddr = ulShmBase + SHMSYNCOFFSET; ++ /* Write a signature into the SHM base + offset; this will ++ * get cleared when the DSP program starts. */ ++ if ((ulShmBaseVirt == 0) || (ulShmBase == 0)) { ++ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Start: Illegal SM base\n"); ++ status = DSP_EFAIL; ++ } else ++ *((volatile u32 *)dwSyncAddr) = 0xffffffff; ++ ++ if (DSP_SUCCEEDED(status)) { ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), ++ &resources); ++ /* Assert RST1 i.e only the RST only for DSP megacell */ ++ /* HW_RST_Reset(resources.dwPrcmBase, HW_RST1_IVA2);*/ ++ if (DSP_SUCCEEDED(status)) { ++ HW_RST_Reset(resources.dwPrmBase, HW_RST1_IVA2); ++ if (dsp_debug) { ++ /* Set the bootmode to self loop */ ++ DBG_Trace(DBG_LEVEL7, ++ "Set boot mode to self loop" ++ " for IVA2 Device\n"); ++ HW_DSPSS_BootModeSet(resources.dwSysCtrlBase, ++ HW_DSPSYSC_SELFLOOPBOOT, dwDSPAddr); ++ } else { ++ /* Set the bootmode to '0' - direct boot */ ++ DBG_Trace(DBG_LEVEL7, ++ "Set boot mode to direct" ++ " boot for IVA2 Device \n"); ++ HW_DSPSS_BootModeSet(resources.dwSysCtrlBase, ++ HW_DSPSYSC_DIRECTBOOT, dwDSPAddr); ++ } ++ } ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Reset and Unreset the RST2, so that BOOTADDR is copied to ++ * IVA2 SYSC register */ ++ HW_RST_Reset(resources.dwPrmBase, HW_RST2_IVA2); ++ udelay(100); ++ HW_RST_UnReset(resources.dwPrmBase, HW_RST2_IVA2); ++ udelay(100); ++ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Start 0 ****** \n"); ++ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); ++ /* Disbale the DSP MMU */ ++ HW_MMU_Disable(resources.dwDmmuBase); ++ /* Disable TWL */ ++ HW_MMU_TWLDisable(resources.dwDmmuBase); ++ ++ /* Only make TLB entry if both addresses are non-zero */ ++ for (iEntryNdx = 0; iEntryNdx < WMDIOCTL_NUMOFMMUTLB; ++ iEntryNdx++) { ++ if ((pDevContext->aTLBEntry[iEntryNdx].ulGppPa != 0) && ++ (pDevContext->aTLBEntry[iEntryNdx].ulDspVa != 0)) { ++ DBG_Trace(DBG_LEVEL4, "** (proc) MMU %d GppPa:" ++ " 0x%x DspVa 0x%x Size 0x%x\n", ++ itmpEntryNdx, ++ pDevContext->aTLBEntry[iEntryNdx].ulGppPa, ++ pDevContext->aTLBEntry[iEntryNdx].ulDspVa, ++ pDevContext->aTLBEntry[iEntryNdx].ulSize); ++ configureDspMmu(pDevContext, ++ pDevContext->aTLBEntry[iEntryNdx].ulGppPa, ++ pDevContext->aTLBEntry[iEntryNdx].ulDspVa * ++ DSPWORDSIZE, ++ pDevContext->aTLBEntry[iEntryNdx].ulSize, ++ itmpEntryNdx, ++ pDevContext->aTLBEntry[iEntryNdx].endianism, ++ pDevContext->aTLBEntry[iEntryNdx].elemSize, ++ pDevContext->aTLBEntry[iEntryNdx]. ++ mixedMode); ++ itmpEntryNdx++; ++ } ++ } /* end for */ ++ } ++ ++ /* Lock the above TLB entries and get the BIOS and load monitor timer ++ * information*/ ++ if (DSP_SUCCEEDED(status)) { ++ HW_MMU_NumLockedSet(resources.dwDmmuBase, itmpEntryNdx); ++ HW_MMU_VictimNumSet(resources.dwDmmuBase, itmpEntryNdx); ++ HW_MMU_TTBSet(resources.dwDmmuBase, ++ pDevContext->pPtAttrs->L1BasePa); ++ HW_MMU_TWLEnable(resources.dwDmmuBase); ++ /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ ++ ++ ++ temp = __raw_readl((resources.dwDmmuBase) + 0x10); ++ temp = (temp & 0xFFFFFFEF) | 0x11; ++ __raw_writel(temp, (resources.dwDmmuBase) + 0x10); ++ ++ /* Let the DSP MMU run */ ++ HW_MMU_Enable(resources.dwDmmuBase); ++ ++ /* Enable the BIOS clock */ ++ (void)DEV_GetSymbol(pDevContext->hDevObject, ++ BRIDGEINIT_BIOSGPTIMER, ++ &ulBiosGpTimer); ++ DBG_Trace(DBG_LEVEL7, "BIOS GPTimer : 0x%x\n", ulBiosGpTimer); ++ (void)DEV_GetSymbol(pDevContext->hDevObject, ++ BRIDGEINIT_LOADMON_GPTIMER, ++ &ulLoadMonitorTimer); ++ DBG_Trace(DBG_LEVEL7, "Load Monitor Timer : 0x%x\n", ++ ulLoadMonitorTimer); ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ if (ulLoadMonitorTimer != 0xFFFF) { ++ uClkCmd = (BPWR_DisableClock << MBX_PM_CLK_CMDSHIFT) | ++ ulLoadMonitorTimer; ++ DBG_Trace(DBG_LEVEL7, ++ "encoded LoadMonitor cmd for Disable: 0x%x\n", ++ uClkCmd); ++ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); ++ ++ extClkId = uClkCmd & MBX_PM_CLK_IDMASK; ++ for (tmpIndex = 0; tmpIndex < MBX_PM_MAX_RESOURCES; ++ tmpIndex++) { ++ if (extClkId == BPWR_CLKID[tmpIndex]) { ++ clkIdIndex = tmpIndex; ++ break; ++ } ++ } ++ ++ if (clkIdIndex < MBX_PM_MAX_RESOURCES) ++ status = CLK_Set_32KHz( ++ BPWR_Clks[clkIdIndex].funClk); ++ else ++ status = DSP_EFAIL; ++ ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, " Error while setting" ++ "LM Timer to 32KHz\n"); ++ } ++ uClkCmd = (BPWR_EnableClock << MBX_PM_CLK_CMDSHIFT) | ++ ulLoadMonitorTimer; ++ DBG_Trace(DBG_LEVEL7, ++ "encoded LoadMonitor cmd for Enable : 0x%x\n", ++ uClkCmd); ++ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); ++ ++ } else { ++ DBG_Trace(DBG_LEVEL7, ++ "Not able to get the symbol for Load " ++ "Monitor Timer\n"); ++ } ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ if (ulBiosGpTimer != 0xFFFF) { ++ uClkCmd = (BPWR_DisableClock << MBX_PM_CLK_CMDSHIFT) | ++ ulBiosGpTimer; ++ DBG_Trace(DBG_LEVEL7, "encoded BIOS GPTimer cmd for" ++ "Disable: 0x%x\n", uClkCmd); ++ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); ++ ++ extClkId = uClkCmd & MBX_PM_CLK_IDMASK; ++ ++ for (tmpIndex = 0; tmpIndex < MBX_PM_MAX_RESOURCES; ++ tmpIndex++) { ++ if (extClkId == BPWR_CLKID[tmpIndex]) { ++ clkIdIndex = tmpIndex; ++ break; ++ } ++ } ++ ++ if (clkIdIndex < MBX_PM_MAX_RESOURCES) ++ status = CLK_Set_32KHz( ++ BPWR_Clks[clkIdIndex].funClk); ++ else ++ status = DSP_EFAIL; ++ ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, ++ " Error while setting BIOS Timer to 32KHz\n"); ++ } ++ ++ uClkCmd = (BPWR_EnableClock << MBX_PM_CLK_CMDSHIFT) | ++ ulBiosGpTimer; ++ DBG_Trace(DBG_LEVEL7, "encoded BIOS GPTimer cmd :" ++ "0x%x\n", uClkCmd); ++ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); ++ ++ } else { ++ DBG_Trace(DBG_LEVEL7, ++ "Not able to get the symbol for BIOS Timer\n"); ++ } ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Set the DSP clock rate */ ++ (void)DEV_GetSymbol(pDevContext->hDevObject, ++ "_BRIDGEINIT_DSP_FREQ", &ulDspClkAddr); ++ /*Set Autoidle Mode for IVA2 PLL */ ++ temp = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwCmBase) + 0x34)); ++ temp = (temp & 0xFFFFFFFE) | 0x1; ++ *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x34)) = ++ (u32) temp; ++ DBG_Trace(DBG_LEVEL5, "WMD_BRD_Start: _BRIDGE_DSP_FREQ Addr:" ++ "0x%x \n", ulDspClkAddr); ++ if ((unsigned int *)ulDspClkAddr != NULL) { ++ /* Get the clock rate */ ++ status = CLK_GetRate(SERVICESCLK_iva2_ck, ++ &ulDspClkRate); ++ DBG_Trace(DBG_LEVEL5, ++ "WMD_BRD_Start: DSP clock rate (KHZ): 0x%x \n", ++ ulDspClkRate); ++ (void)WMD_BRD_Write(pDevContext, (u8 *)&ulDspClkRate, ++ ulDspClkAddr, sizeof(u32), 0); ++ } ++/*PM_IVA2GRPSEL_PER = 0xC0;*/ ++ temp = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ temp = (temp & 0xFFFFFF30) | 0xC0; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) = ++ (u32) temp; ++ ++/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F;*/ ++ temp = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ temp = (temp & 0xFFFFFF3F); ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) = ++ (u32) temp; ++/*CM_SLEEPDEP_PER |= 0x04;*/ ++ temp = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerBase) + 0x44)); ++ temp = (temp & 0xFFFFFFFB) | 0x04; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerBase) + 0x44)) = ++ (u32) temp; ++ ++/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions*/ ++ temp = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwCmBase) + 0x48)); ++ temp = (temp & 0xFFFFFFFC) | 0x03; ++ *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x48)) = ++ (u32) temp; ++ ++ /* Enable Mailbox events and also drain any pending ++ * stale messages */ ++ (void)CHNLSM_EnableInterrupt(pDevContext); ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ HW_RSTCTRL_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); ++ DBG_Trace(DBG_LEVEL7, "BRD_Start: RM_RSTCTRL_DSP = 0x%x \n", ++ temp); ++ HW_RSTST_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); ++ DBG_Trace(DBG_LEVEL7, "BRD_Start0: RM_RSTST_DSP = 0x%x \n", ++ temp); ++ ++ /* Let DSP go */ ++ DBG_Trace(DBG_LEVEL7, "Unreset, WMD_BRD_Start\n"); ++ /* Enable DSP MMU Interrupts */ ++ HW_MMU_EventEnable(resources.dwDmmuBase, ++ HW_MMU_ALL_INTERRUPTS); ++ /* release the RST1, DSP starts executing now .. */ ++ HW_RST_UnReset(resources.dwPrmBase, HW_RST1_IVA2); ++ ++ HW_RSTST_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); ++ DBG_Trace(DBG_LEVEL7, "BRD_Start: RM_RSTST_DSP = 0x%x \n", ++ temp); ++ HW_RSTCTRL_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); ++ DBG_Trace(DBG_LEVEL5, "WMD_BRD_Start: CM_RSTCTRL_DSP: 0x%x \n", ++ temp); ++ DBG_Trace(DBG_LEVEL7, "Driver waiting for Sync @ 0x%x \n", ++ dwSyncAddr); ++ DBG_Trace(DBG_LEVEL7, "DSP c_int00 Address = 0x%x \n", ++ dwDSPAddr); ++ if (dsp_debug) ++ while (*((volatile u16 *)dwSyncAddr)) ++ ;; ++ } ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Wait for DSP to clear word in shared memory */ ++ /* Read the Location */ ++ if (!WaitForStart(pDevContext, dwSyncAddr)) { ++ status = WMD_E_TIMEOUT; ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_Start Failed to Synchronize\n"); ++ } ++ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); ++ if (DSP_SUCCEEDED(status)) { ++ IO_SHMsetting(hIOMgr, SHM_OPPINFO, NULL); ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_Start: OPP information initialzed\n"); ++ /* Write the synchronization bit to indicate the ++ * completion of OPP table update to DSP ++ */ ++ *((volatile u32 *)dwSyncAddr) = 0XCAFECAFE; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* update board state */ ++ pDevContext->dwBrdState = BRD_RUNNING; ++ /* (void)CHNLSM_EnableInterrupt(pDevContext);*/ ++ DBG_Trace(DBG_LEVEL7, "Device Started \n "); ++ } else { ++ pDevContext->dwBrdState = BRD_UNKNOWN; ++ DBG_Trace(DBG_LEVEL7, "Device not Started \n "); ++ } ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_BRD_Stop ======== ++ * purpose: ++ * Puts DSP in self loop. ++ * ++ * Preconditions : ++ * a) None ++ */ ++static DSP_STATUS WMD_BRD_Stop(struct WMD_DEV_CONTEXT *hDevContext) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ struct CFG_HOSTRES resources; ++ struct PgTableAttrs *pPtAttrs; ++ u32 dspPwrState; ++ DSP_STATUS clk_status; ++ ++ DBG_Trace(DBG_ENTER, "Entering WMD_BRD_Stop:\nhDevContext: 0x%x\n", ++ hDevContext); ++ ++ /* Disable the mail box interrupts */ ++ (void)CHNLSM_DisableInterrupt(pDevContext); ++ ++ if (pDevContext->dwBrdState == BRD_STOPPED) ++ return status; ++ ++ /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode, ++ * before turning off the clocks.. This is to ensure that there are no ++ * pending L3 or other transactons from IVA2 */ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), ++ &resources); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_Stop: Get Host resources failed \n"); ++ DBG_Trace(DBG_LEVEL1, "Device Stopp failed \n "); ++ return DSP_EFAIL; ++ } ++ ++ HW_PWRST_IVA2RegGet(resources.dwPrmBase, &dspPwrState); ++ if (dspPwrState != HW_PWR_STATE_OFF) { ++ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_DSPIDLE); ++ mdelay(10); ++ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); ++ udelay(50); ++ ++ clk_status = CLK_Disable(SERVICESCLK_iva2_ck); ++ if (DSP_FAILED(clk_status)) { ++ DBG_Trace(DBG_LEVEL6, ++ "\n WMD_BRD_Stop: CLK_Disable failed " ++ "for iva2_fck\n"); ++ } ++ /* IVA2 is not in OFF state */ ++ /* Set PM_PWSTCTRL_IVA2 to OFF */ ++ HW_PWR_IVA2PowerStateSet(resources.dwPrmBase, ++ HW_PWR_DOMAIN_DSP, ++ HW_PWR_STATE_OFF); ++ /* Set the SW supervised state transition for Sleep */ ++ HW_PWR_CLKCTRL_IVA2RegSet(resources.dwCmBase, HW_SW_SUP_SLEEP); ++ } else { ++ clk_status = CLK_Disable(SERVICESCLK_iva2_ck); ++ if (DSP_FAILED(clk_status)) { ++ DBG_Trace(DBG_LEVEL6, ++ "\n WMD_BRD_Stop: Else loop CLK_Disable failed" ++ " for iva2_fck\n"); ++ } ++ } ++ udelay(10); ++ /* Release the Ext Base virtual Address as the next DSP Program ++ * may have a different load address */ ++ if (pDevContext->dwDspExtBaseAddr) ++ pDevContext->dwDspExtBaseAddr = 0; ++ ++ pDevContext->dwBrdState = BRD_STOPPED; /* update board state */ ++ DBG_Trace(DBG_LEVEL1, "Device Stopped \n "); ++ /* This is a good place to clear the MMU page tables as well */ ++ if (pDevContext->pPtAttrs) { ++ pPtAttrs = pDevContext->pPtAttrs; ++ memset((u8 *) pPtAttrs->L1BaseVa, 0x00, pPtAttrs->L1size); ++ memset((u8 *) pPtAttrs->L2BaseVa, 0x00, pPtAttrs->L2size); ++ memset((u8 *) pPtAttrs->pgInfo, 0x00, ++ (pPtAttrs->L2NumPages * sizeof(struct PageInfo))); ++ } ++ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Stop - End ****** \n"); ++ HW_RST_Reset(resources.dwPrmBase, HW_RST1_IVA2); ++ HW_RST_Reset(resources.dwPrmBase, HW_RST2_IVA2); ++ ++ return status; ++} ++ ++ ++/* ++ * ======== WMD_BRD_Delete ======== ++ * purpose: ++ * Puts DSP in Low power mode ++ * ++ * Preconditions : ++ * a) None ++ */ ++static DSP_STATUS WMD_BRD_Delete(struct WMD_DEV_CONTEXT *hDevContext) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ struct CFG_HOSTRES resources; ++ struct PgTableAttrs *pPtAttrs; ++ DSP_STATUS clk_status; ++ ++ DBG_Trace(DBG_ENTER, "Entering WMD_BRD_Delete:\nhDevContext: 0x%x\n", ++ hDevContext); ++ ++ /* Disable the mail box interrupts */ ++ (void) CHNLSM_DisableInterrupt(pDevContext); ++ ++ if (pDevContext->dwBrdState == BRD_STOPPED) ++ return status; ++ ++ /* as per TRM, it is advised to first drive ++ * the IVA2 to 'Standby' mode, before turning off the clocks.. This is ++ * to ensure that there are no pending L3 or other transactons from ++ * IVA2 */ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_Stop: Get Host resources failed \n"); ++ DBG_Trace(DBG_LEVEL1, "Device Delete failed \n "); ++ return DSP_EFAIL; ++ } ++ status = SleepDSP(pDevContext, PWR_EMERGENCYDEEPSLEEP, NULL); ++ clk_status = CLK_Disable(SERVICESCLK_iva2_ck); ++ if (DSP_FAILED(clk_status)) { ++ DBG_Trace(DBG_LEVEL6, "\n WMD_BRD_Stop: CLK_Disable failed for" ++ " iva2_fck\n"); ++ } ++ /* Release the Ext Base virtual Address as the next DSP Program ++ * may have a different load address */ ++ if (pDevContext->dwDspExtBaseAddr) ++ pDevContext->dwDspExtBaseAddr = 0; ++ ++ pDevContext->dwBrdState = BRD_STOPPED; /* update board state */ ++ DBG_Trace(DBG_LEVEL1, "Device Stopped \n "); ++ /* This is a good place to clear the MMU page tables as well */ ++ if (pDevContext->pPtAttrs) { ++ pPtAttrs = pDevContext->pPtAttrs; ++ memset((u8 *)pPtAttrs->L1BaseVa, 0x00, pPtAttrs->L1size); ++ memset((u8 *)pPtAttrs->L2BaseVa, 0x00, pPtAttrs->L2size); ++ memset((u8 *)pPtAttrs->pgInfo, 0x00, ++ (pPtAttrs->L2NumPages * sizeof(struct PageInfo))); ++ } ++ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Delete - End ****** \n"); ++ HW_RST_Reset(resources.dwPrmBase, HW_RST1_IVA2); ++ HW_RST_Reset(resources.dwPrmBase, HW_RST2_IVA2); ++ ++ return status; ++} ++ ++ ++/* ++ * ======== WMD_BRD_Status ======== ++ * Returns the board status. ++ */ ++static DSP_STATUS WMD_BRD_Status(struct WMD_DEV_CONTEXT *hDevContext, ++ OUT BRD_STATUS *pdwState) ++{ ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ *pdwState = pDevContext->dwBrdState; ++ return DSP_SOK; ++} ++ ++/* ++ * ======== WMD_BRD_Write ======== ++ * Copies the buffers to DSP internal or external memory. ++ */ ++static DSP_STATUS WMD_BRD_Write(struct WMD_DEV_CONTEXT *hDevContext, ++ IN u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ ++ DBG_Trace(DBG_ENTER, "WMD_BRD_Write, pDevContext: 0x%x\n\t\t " ++ "pbHostBuf: 0x%x\n\t\tdwDSPAddr: 0x%x\n\t\tulNumBytes: " ++ "0x%x\n \t\t ulMemtype: 0x%x\n", pDevContext, pbHostBuf, ++ dwDSPAddr, ulNumBytes, ulMemType); ++ if (dwDSPAddr < pDevContext->dwDSPStartAdd) { ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_Write: DSP address < start address \n "); ++ status = DSP_EFAIL; ++ return status; ++ } ++ if ((dwDSPAddr - pDevContext->dwDSPStartAdd) < ++ pDevContext->dwInternalSize) { ++ status = WriteDspData(hDevContext, pbHostBuf, dwDSPAddr, ++ ulNumBytes, ulMemType); ++ } else { ++ status = WriteExtDspData(pDevContext, pbHostBuf, dwDSPAddr, ++ ulNumBytes, ulMemType, false); ++ } ++ ++ DBG_Trace(DBG_ENTER, "WMD_BRD_Write, memcopy : DspLogicAddr=0x%x \n", ++ pDevContext->dwDspBaseAddr); ++ return status; ++} ++ ++/* ++ * ======== WMD_DEV_Create ======== ++ * Creates a driver object. Puts DSP in self loop. ++ */ ++static DSP_STATUS WMD_DEV_Create(OUT struct WMD_DEV_CONTEXT **ppDevContext, ++ struct DEV_OBJECT *hDevObject, ++ IN CONST struct CFG_HOSTRES *pConfig, ++ IN CONST struct CFG_DSPRES *pDspConfig) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = NULL; ++ s32 iEntryNdx; ++ s32 tcWordSwap; ++ u32 tcWordSwapSize = sizeof(tcWordSwap); ++ struct CFG_HOSTRES resources; ++ struct PgTableAttrs *pPtAttrs; ++ u32 pg_tbl_pa; ++ u32 pg_tbl_va; ++ u32 align_size; ++ ++ DBG_Trace(DBG_ENTER, "WMD_DEV_Create, ppDevContext: 0x%x\n\t\t " ++ "hDevObject: 0x%x\n\t\tpConfig: 0x%x\n\t\tpDspConfig: 0x%x\n", ++ ppDevContext, hDevObject, pConfig, pDspConfig); ++ /* Allocate and initialize a data structure to contain the mini driver ++ * state, which becomes the context for later calls into this WMD. */ ++ pDevContext = MEM_Calloc(sizeof(struct WMD_DEV_CONTEXT), MEM_NONPAGED); ++ if (!pDevContext) { ++ DBG_Trace(DBG_ENTER, "Failed to allocate mem \n"); ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_ENTER, "Failed to get host resources \n"); ++ status = DSP_EMEMORY; ++ goto func_end; ++ } ++ ++ pDevContext->dwDSPStartAdd = (u32)OMAP_GEM_BASE; ++ pDevContext->dwSelfLoop = (u32)NULL; ++ pDevContext->uDspPerClks = 0; ++ pDevContext->dwInternalSize = OMAP_DSP_SIZE; ++ /* Clear dev context MMU table entries. ++ * These get set on WMD_BRD_IOCTL() call after program loaded. */ ++ for (iEntryNdx = 0; iEntryNdx < WMDIOCTL_NUMOFMMUTLB; iEntryNdx++) { ++ pDevContext->aTLBEntry[iEntryNdx].ulGppPa = ++ pDevContext->aTLBEntry[iEntryNdx].ulDspVa = 0; ++ } ++ pDevContext->numTLBEntries = 0; ++ pDevContext->dwDspBaseAddr = (u32)MEM_LinearAddress((void *) ++ (pConfig->dwMemBase[3]), pConfig->dwMemLength[3]); ++ if (!pDevContext->dwDspBaseAddr) { ++ status = DSP_EFAIL; ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_DEV_Create: failed to Map the API memory\n"); ++ } ++ pPtAttrs = MEM_Calloc(sizeof(struct PgTableAttrs), MEM_NONPAGED); ++ if (pPtAttrs != NULL) { ++ /* Assuming that we use only DSP's memory map ++ * until 0x4000:0000 , we would need only 1024 ++ * L1 enties i.e L1 size = 4K */ ++ pPtAttrs->L1size = 0x1000; ++ align_size = pPtAttrs->L1size; ++ /* Align sizes are expected to be power of 2 */ ++ /* we like to get aligned on L1 table size */ ++ pg_tbl_va = (u32)MEM_AllocPhysMem(pPtAttrs->L1size, ++ align_size, &pg_tbl_pa); ++ ++ /* Check if the PA is aligned for us */ ++ if ((pg_tbl_pa) & (align_size-1)) { ++ /* PA not aligned to page table size , ++ * try with more allocation and align */ ++ MEM_FreePhysMem((void *)pg_tbl_va, pg_tbl_pa, ++ pPtAttrs->L1size); ++ /* we like to get aligned on L1 table size */ ++ pg_tbl_va = (u32) MEM_AllocPhysMem((pPtAttrs->L1size)*2, ++ align_size, &pg_tbl_pa); ++ /* We should be able to get aligned table now */ ++ pPtAttrs->L1TblAllocPa = pg_tbl_pa; ++ pPtAttrs->L1TblAllocVa = pg_tbl_va; ++ pPtAttrs->L1TblAllocSz = pPtAttrs->L1size * 2; ++ /* Align the PA to the next 'align' boundary */ ++ pPtAttrs->L1BasePa = ((pg_tbl_pa) + (align_size-1)) & ++ (~(align_size-1)); ++ pPtAttrs->L1BaseVa = pg_tbl_va + (pPtAttrs->L1BasePa - ++ pg_tbl_pa); ++ } else { ++ /* We got aligned PA, cool */ ++ pPtAttrs->L1TblAllocPa = pg_tbl_pa; ++ pPtAttrs->L1TblAllocVa = pg_tbl_va; ++ pPtAttrs->L1TblAllocSz = pPtAttrs->L1size; ++ pPtAttrs->L1BasePa = pg_tbl_pa; ++ pPtAttrs->L1BaseVa = pg_tbl_va; ++ } ++ if (pPtAttrs->L1BaseVa) ++ memset((u8 *)pPtAttrs->L1BaseVa, 0x00, ++ pPtAttrs->L1size); ++ ++ /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + ++ * L4 pages */ ++ pPtAttrs->L2NumPages = ((DMMPOOLSIZE >> 20) + 6); ++ pPtAttrs->L2size = HW_MMU_COARSE_PAGE_SIZE * ++ pPtAttrs->L2NumPages; ++ align_size = 4; /* Make it u32 aligned */ ++ /* we like to get aligned on L1 table size */ ++ pg_tbl_va = (u32)MEM_AllocPhysMem(pPtAttrs->L2size, ++ align_size, &pg_tbl_pa); ++ pPtAttrs->L2TblAllocPa = pg_tbl_pa; ++ pPtAttrs->L2TblAllocVa = pg_tbl_va; ++ pPtAttrs->L2TblAllocSz = pPtAttrs->L2size; ++ pPtAttrs->L2BasePa = pg_tbl_pa; ++ pPtAttrs->L2BaseVa = pg_tbl_va; ++ ++ if (pPtAttrs->L2BaseVa) ++ memset((u8 *)pPtAttrs->L2BaseVa, 0x00, ++ pPtAttrs->L2size); ++ ++ pPtAttrs->pgInfo = MEM_Calloc(pPtAttrs->L2NumPages * ++ sizeof(struct PageInfo), MEM_NONPAGED); ++ DBG_Trace(DBG_LEVEL1, "L1 pa %x, va %x, size %x\n L2 pa %x, va " ++ "%x, size %x\n", pPtAttrs->L1BasePa, ++ pPtAttrs->L1BaseVa, pPtAttrs->L1size, ++ pPtAttrs->L2BasePa, pPtAttrs->L2BaseVa, ++ pPtAttrs->L2size); ++ DBG_Trace(DBG_LEVEL1, "pPtAttrs %x L2 NumPages %x pgInfo %x\n", ++ pPtAttrs, pPtAttrs->L2NumPages, pPtAttrs->pgInfo); ++ } ++ if ((pPtAttrs != NULL) && (pPtAttrs->L1BaseVa != 0) && ++ (pPtAttrs->L2BaseVa != 0) && (pPtAttrs->pgInfo != NULL)) ++ pDevContext->pPtAttrs = pPtAttrs; ++ else ++ status = DSP_EMEMORY; ++ ++ if (DSP_SUCCEEDED(status)) ++ status = SYNC_InitializeCS(&pPtAttrs->hCSObj); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Set the Endianism Register */ /* Need to set this */ ++ /* Retrieve the TC u16 SWAP Option */ ++ status = REG_GetValue(NULL, CURRENTCONFIG, TCWORDSWAP, ++ (u8 *)&tcWordSwap, &tcWordSwapSize); ++ /* Save the value */ ++ pDevContext->tcWordSwapOn = tcWordSwap; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ /* Set the Clock Divisor for the DSP module */ ++ DBG_Trace(DBG_LEVEL7, "WMD_DEV_create:Reset mail box and " ++ "enable the clock \n"); ++ status = CLK_Enable(SERVICESCLK_mailbox_ick); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_DEV_create:Reset mail box and " ++ "enable the clock Fail\n"); ++ } ++ udelay(5); ++ /* 24xx-Linux MMU address is obtained from the host ++ * resources struct */ ++ pDevContext->dwDSPMmuBase = resources.dwDmmuBase; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pDevContext->hDevObject = hDevObject; ++ pDevContext->ulIntMask = 0; ++ /* Store current board state. */ ++ pDevContext->dwBrdState = BRD_STOPPED; ++ /* Return this ptr to our device state to the WCD for storage:*/ ++ *ppDevContext = pDevContext; ++ DBG_Trace(DBG_ENTER, "Device Created \n"); ++ } else { ++ if (pPtAttrs != NULL) { ++ if (pPtAttrs->hCSObj) ++ SYNC_DeleteCS(pPtAttrs->hCSObj); ++ ++ if (pPtAttrs->pgInfo) ++ MEM_Free(pPtAttrs->pgInfo); ++ ++ if (pPtAttrs->L2TblAllocVa) { ++ MEM_FreePhysMem((void *)pPtAttrs->L2TblAllocVa, ++ pPtAttrs->L2TblAllocPa, ++ pPtAttrs->L2TblAllocSz); ++ } ++ if (pPtAttrs->L1TblAllocVa) { ++ MEM_FreePhysMem((void *)pPtAttrs->L1TblAllocVa, ++ pPtAttrs->L1TblAllocPa, ++ pPtAttrs->L1TblAllocSz); ++ } ++ } ++ if (pPtAttrs) ++ MEM_Free(pPtAttrs); ++ ++ if (pDevContext) ++ MEM_Free(pDevContext); ++ ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_DEV_Create Error Device not created\n"); ++ } ++func_end: ++ return status; ++} ++ ++/* ++ * ======== WMD_DEV_Ctrl ======== ++ * Receives device specific commands. ++ */ ++static DSP_STATUS WMD_DEV_Ctrl(struct WMD_DEV_CONTEXT *pDevContext, u32 dwCmd, ++ IN OUT void *pArgs) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMDIOCTL_EXTPROC *paExtProc = (struct WMDIOCTL_EXTPROC *)pArgs; ++ s32 ndx; ++ ++ DBG_Trace(DBG_ENTER, "WMD_DEV_Ctrl, pDevContext: 0x%x\n\t\t dwCmd: " ++ "0x%x\n\t\tpArgs: 0x%x\n", pDevContext, dwCmd, pArgs); ++ switch (dwCmd) { ++ case WMDIOCTL_CHNLREAD: ++ break; ++ case WMDIOCTL_CHNLWRITE: ++ break; ++ case WMDIOCTL_SETMMUCONFIG: ++ /* store away dsp-mmu setup values for later use */ ++ for (ndx = 0; ndx < WMDIOCTL_NUMOFMMUTLB; ndx++, paExtProc++) ++ pDevContext->aTLBEntry[ndx] = *paExtProc; ++ break; ++ case WMDIOCTL_DEEPSLEEP: ++ case WMDIOCTL_EMERGENCYSLEEP: ++ /* Currently only DSP Idle is supported Need to update for ++ * later releases */ ++ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_DEEPSLEEP\n"); ++ status = SleepDSP(pDevContext, PWR_DEEPSLEEP, pArgs); ++ break; ++ case WMDIOCTL_WAKEUP: ++ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_WAKEUP\n"); ++ status = WakeDSP(pDevContext, pArgs); ++ break; ++ case WMDIOCTL_CLK_CTRL: ++ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_CLK_CTRL\n"); ++ status = DSP_SOK; ++ /* Looking For Baseport Fix for Clocks */ ++ status = DSPPeripheralClkCtrl(pDevContext, pArgs); ++ break; ++ case WMDIOCTL_PWR_HIBERNATE: ++ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_PWR_HIBERNATE\n"); ++ status = handle_hibernation_fromDSP(pDevContext); ++ break; ++ case WMDIOCTL_PRESCALE_NOTIFY: ++ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_PRESCALE_NOTIFY\n"); ++ status = PreScale_DSP(pDevContext, pArgs); ++ break; ++ case WMDIOCTL_POSTSCALE_NOTIFY: ++ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_POSTSCALE_NOTIFY\n"); ++ status = PostScale_DSP(pDevContext, pArgs); ++ break; ++ case WMDIOCTL_CONSTRAINT_REQUEST: ++ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_CONSTRAINT_REQUEST\n"); ++ status = handle_constraints_set(pDevContext, pArgs); ++ break; ++ default: ++ status = DSP_EFAIL; ++ DBG_Trace(DBG_LEVEL7, "Error in WMD_BRD_Ioctl \n"); ++ break; ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_DEV_Destroy ======== ++ * Destroys the driver object. ++ */ ++static DSP_STATUS WMD_DEV_Destroy(struct WMD_DEV_CONTEXT *hDevContext) ++{ ++ struct PgTableAttrs *pPtAttrs; ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = (struct WMD_DEV_CONTEXT *) ++ hDevContext; ++ DBG_Trace(DBG_ENTER, "Entering WMD_DEV_Destroy:n hDevContext ::0x%x\n", ++ hDevContext); ++ /* first put the device to stop state */ ++ WMD_BRD_Delete(pDevContext); ++ if (pDevContext && pDevContext->pPtAttrs) { ++ pPtAttrs = pDevContext->pPtAttrs; ++ if (pPtAttrs->hCSObj) ++ SYNC_DeleteCS(pPtAttrs->hCSObj); ++ ++ if (pPtAttrs->pgInfo) ++ MEM_Free(pPtAttrs->pgInfo); ++ ++ if (pPtAttrs->L2TblAllocVa) { ++ MEM_FreePhysMem((void *)pPtAttrs->L2TblAllocVa, ++ pPtAttrs->L2TblAllocPa, pPtAttrs-> ++ L2TblAllocSz); ++ } ++ if (pPtAttrs->L1TblAllocVa) { ++ MEM_FreePhysMem((void *)pPtAttrs->L1TblAllocVa, ++ pPtAttrs->L1TblAllocPa, pPtAttrs-> ++ L1TblAllocSz); ++ } ++ if (pPtAttrs) ++ MEM_Free(pPtAttrs); ++ ++ } ++ /* Free the driver's device context: */ ++ MEM_Free((void *) hDevContext); ++ return status; ++} ++ ++static DSP_STATUS WMD_BRD_MemCopy(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulDspDestAddr, u32 ulDspSrcAddr, ++ u32 ulNumBytes, u32 ulMemType) ++{ ++ DSP_STATUS status = DSP_SOK; ++ u32 srcAddr = ulDspSrcAddr; ++ u32 destAddr = ulDspDestAddr; ++ u32 copyBytes = 0; ++ u32 totalBytes = ulNumBytes; ++ u8 hostBuf[BUFFERSIZE]; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ while ((totalBytes > 0) && DSP_SUCCEEDED(status)) { ++ copyBytes = totalBytes > BUFFERSIZE ? BUFFERSIZE : totalBytes; ++ /* Read from External memory */ ++ status = ReadExtDspData(hDevContext, hostBuf, srcAddr, ++ copyBytes, ulMemType); ++ if (DSP_SUCCEEDED(status)) { ++ if (destAddr < (pDevContext->dwDSPStartAdd + ++ pDevContext->dwInternalSize)) { ++ /* Write to Internal memory */ ++ status = WriteDspData(hDevContext, hostBuf, ++ destAddr, copyBytes, ulMemType); ++ } else { ++ /* Write to External memory */ ++ status = WriteExtDspData(hDevContext, hostBuf, ++ destAddr, copyBytes, ulMemType, false); ++ } ++ } ++ totalBytes -= copyBytes; ++ srcAddr += copyBytes; ++ destAddr += copyBytes; ++ } ++ return status; ++} ++ ++/* Mem Write does not halt the DSP to write unlike WMD_BRD_Write */ ++static DSP_STATUS WMD_BRD_MemWrite(struct WMD_DEV_CONTEXT *hDevContext, ++ IN u8 *pbHostBuf, u32 dwDSPAddr, ++ u32 ulNumBytes, u32 ulMemType) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ u32 ulRemainBytes = 0; ++ u32 ulBytes = 0; ++ ulRemainBytes = ulNumBytes; ++ while (ulRemainBytes > 0 && DSP_SUCCEEDED(status)) { ++ ulBytes = ++ ulRemainBytes > BUFFERSIZE ? BUFFERSIZE : ulRemainBytes; ++ if (dwDSPAddr < (pDevContext->dwDSPStartAdd + ++ pDevContext->dwInternalSize)) { ++ status = WriteDspData(hDevContext, pbHostBuf, dwDSPAddr, ++ ulBytes, ulMemType); ++ } else { ++ status = WriteExtDspData(hDevContext, pbHostBuf, ++ dwDSPAddr, ulBytes, ulMemType, true); ++ } ++ ulRemainBytes -= ulBytes; ++ dwDSPAddr += ulBytes; ++ pbHostBuf = pbHostBuf + ulBytes; ++ } ++ return status; ++} ++ ++/* ++ * ======== WMD_BRD_MemMap ======== ++ * This function maps MPU buffer to the DSP address space. It performs ++ * linear to physical address translation if required. It translates each ++ * page since linear addresses can be physically non-contiguous ++ * All address & size arguments are assumed to be page aligned (in proc.c) ++ * ++ * TODO: Disable MMU while updating the page tables (but that'll stall DSP) ++ */ ++static DSP_STATUS WMD_BRD_MemMap(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulMpuAddr, u32 ulVirtAddr, ++ u32 ulNumBytes, u32 ulMapAttr) ++{ ++ u32 attrs; ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ struct HW_MMUMapAttrs_t hwAttrs; ++ struct vm_area_struct *vma; ++ struct mm_struct *mm = current->mm; ++ u32 numUsrPgs = 0, nr_pages = 0; ++ u32 va = ulVirtAddr; ++ ++ DBG_Trace(DBG_ENTER, "> WMD_BRD_MemMap hDevContext %x, pa %x, va %x, " ++ "size %x, ulMapAttr %x\n", hDevContext, ulMpuAddr, ulVirtAddr, ++ ulNumBytes, ulMapAttr); ++ if (ulNumBytes == 0) ++ return DSP_EINVALIDARG; ++ ++ if (ulMapAttr != 0) { ++ attrs = ulMapAttr; ++ } else { ++ /* Assign default attributes */ ++ attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16; ++ } ++ /* Take mapping properties */ ++ if (attrs & DSP_MAPBIGENDIAN) ++ hwAttrs.endianism = HW_BIG_ENDIAN; ++ else ++ hwAttrs.endianism = HW_LITTLE_ENDIAN; ++ ++ hwAttrs.mixedSize = (enum HW_MMUMixedSize_t) ++ ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); ++ /* Ignore elementSize if mixedSize is enabled */ ++ if (hwAttrs.mixedSize == 0) { ++ if (attrs & DSP_MAPELEMSIZE8) { ++ /* Size is 8 bit */ ++ hwAttrs.elementSize = HW_ELEM_SIZE_8BIT; ++ } else if (attrs & DSP_MAPELEMSIZE16) { ++ /* Size is 16 bit */ ++ hwAttrs.elementSize = HW_ELEM_SIZE_16BIT; ++ } else if (attrs & DSP_MAPELEMSIZE32) { ++ /* Size is 32 bit */ ++ hwAttrs.elementSize = HW_ELEM_SIZE_32BIT; ++ } else if (attrs & DSP_MAPELEMSIZE64) { ++ /* Size is 64 bit */ ++ hwAttrs.elementSize = HW_ELEM_SIZE_64BIT; ++ } else { ++ /* ++ * Mixedsize isn't enabled, so size can't be ++ * zero here ++ */ ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_MemMap: MMU element size is zero\n"); ++ return DSP_EINVALIDARG; ++ } ++ } ++ if (attrs & DSP_MAPDONOTLOCK) ++ hwAttrs.donotlockmpupage = 1; ++ else ++ hwAttrs.donotlockmpupage = 0; ++ ++ if (attrs & DSP_MAPVMALLOCADDR) { ++ return MemMapVmalloc(hDevContext, ulMpuAddr, ulVirtAddr, ++ ulNumBytes, &hwAttrs); ++ } ++ /* ++ * Do OS-specific user-va to pa translation. ++ * Combine physically contiguous regions to reduce TLBs. ++ * Pass the translated pa to PteUpdate. ++ */ ++ if ((attrs & DSP_MAPPHYSICALADDR)) { ++ status = PteUpdate(pDevContext, ulMpuAddr, ulVirtAddr, ++ ulNumBytes, &hwAttrs); ++ goto func_cont; ++ } ++ ++ /* ++ * Important Note: ulMpuAddr is mapped from user application process ++ * to current process - it must lie completely within the current ++ * virtual memory address space in order to be of use to us here! ++ */ ++ down_read(&mm->mmap_sem); ++ vma = find_vma(mm, ulMpuAddr); ++ if (vma) ++ DBG_Trace(DBG_LEVEL6, "VMAfor UserBuf: ulMpuAddr=%x, " ++ "ulNumBytes=%x, vm_start=%x vm_end=%x vm_flags=%x \n", ++ ulMpuAddr, ulNumBytes, vma->vm_start, ++ vma->vm_end, vma->vm_flags); ++ ++ /* ++ * It is observed that under some circumstances, the user buffer is ++ * spread across several VMAs. So loop through and check if the entire ++ * user buffer is covered ++ */ ++ while ((vma) && (ulMpuAddr + ulNumBytes > vma->vm_end)) { ++ /* jump to the next VMA region */ ++ vma = find_vma(mm, vma->vm_end + 1); ++ DBG_Trace(DBG_LEVEL6, "VMAfor UserBuf ulMpuAddr=%x, " ++ "ulNumBytes=%x, vm_start=%x vm_end=%x vm_flags=%x\n", ++ ulMpuAddr, ulNumBytes, vma->vm_start, ++ vma->vm_end, vma->vm_flags); ++ } ++ if (!vma) { ++ DBG_Trace(DBG_LEVEL7, "Failed to get the VMA region for " ++ "MPU Buffer !!! \n"); ++ status = DSP_EINVALIDARG; ++ up_read(&mm->mmap_sem); ++ goto func_cont; ++ } ++ ++ numUsrPgs = PAGE_ALIGN(ulNumBytes) / PG_SIZE_4K; ++ ++ DBG_Trace(DBG_LEVEL4, "%s :numOfActualTabEntries=%d, ulNumBytes= %d\n", ++ %s, numUsrPgs, ulNumBytes); ++ ++ if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_RESERVED)) { ++ for (nr_pages = numUsrPgs; nr_pages > 0;) { ++ u32 pa; ++ ++ pa = user_va2pa(mm, ulMpuAddr); ++ if (!pa) { ++ status = DSP_EFAIL; ++ pr_err("DSPBRIDGE: VM_IO mapping physical" ++ "address is invalid\n"); ++ break; ++ } ++ ++ status = PteSet(pDevContext->pPtAttrs, pa, ++ va, HW_PAGE_SIZE_4KB, &hwAttrs); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, ++ "WMD_BRD_MemMap: FAILED IN VM_IO" ++ "PTESET \n"); ++ break; ++ } ++ ++ va += HW_PAGE_SIZE_4KB; ++ ulMpuAddr += HW_PAGE_SIZE_4KB; ++ nr_pages--; ++ } ++ } else { ++ int write = 0; ++ ++ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) ++ write = 1; ++ ++ for (nr_pages = numUsrPgs; nr_pages > 0;) { ++ int i, ret; ++ struct page *pages[16]; /* for a reasonable batch */ ++ ++ ret = get_user_pages(current, mm, ulMpuAddr, ++ min_t(int, nr_pages, ARRAY_SIZE(pages)), ++ write, 1, pages, NULL); ++ if (ret <= 0) { ++ pr_err("DSPBRIDGE: get_user_pages FAILED," ++ "MPU addr = 0x%x," ++ "vma->vm_flags = 0x%lx," ++ "get_user_pages ErrValue = %d," ++ "Buffersize=0x%x\n", ++ ulMpuAddr, vma->vm_flags, ret, ++ ulNumBytes); ++ status = DSP_EFAIL; ++ goto fail_mapping; ++ } ++ ++ for (i = 0; i < ret; i++) { ++ struct page *page = pages[i]; ++ ++ status = PteSet(pDevContext->pPtAttrs, ++ page_to_phys(page), va, ++ HW_PAGE_SIZE_4KB, &hwAttrs); ++ if (DSP_FAILED(status)) { ++ pr_err("%s: FAILED IN PTESET\n", ++ __func__); ++ goto fail_mapping; ++ } ++ SetPageMlocked(page); ++ va += HW_PAGE_SIZE_4KB; ++ ulMpuAddr += HW_PAGE_SIZE_4KB; ++ nr_pages--; ++ } ++ } ++ } ++ ++fail_mapping: ++ up_read(&mm->mmap_sem); ++func_cont: ++ /* Don't propogate Linux or HW status to upper layers */ ++ if (DSP_SUCCEEDED(status)) { ++ status = DSP_SOK; ++ } else { ++ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap status %x\n", status); ++ /* ++ * Roll out the mapped pages incase it failed in middle of ++ * mapping ++ */ ++ if (numUsrPgs - nr_pages) { ++ WMD_BRD_MemUnMap(pDevContext, ulVirtAddr, ++ ((numUsrPgs - nr_pages) * PG_SIZE_4K)); ++ } ++ status = DSP_EFAIL; ++ } ++ /* ++ * In any case, flush the TLB ++ * This is called from here instead from PteUpdate to avoid unnecessary ++ * repetition while mapping non-contiguous physical regions of a virtual ++ * region ++ */ ++ flush_all(pDevContext); ++ DBG_Trace(DBG_ENTER, "< WMD_BRD_MemMap status %x\n", status); ++ return status; ++} ++ ++/* ++ * ======== WMD_BRD_MemUnMap ======== ++ * Invalidate the PTEs for the DSP VA block to be unmapped. ++ * ++ * PTEs of a mapped memory block are contiguous in any page table ++ * So, instead of looking up the PTE address for every 4K block, ++ * we clear consecutive PTEs until we unmap all the bytes ++ */ ++static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext, ++ u32 ulVirtAddr, u32 ulNumBytes) ++{ ++ u32 L1BaseVa; ++ u32 L2BaseVa; ++ u32 L2BasePa; ++ u32 L2PageNum; ++ u32 pteVal; ++ u32 pteSize; ++ u32 pteCount; ++ u32 pteAddrL1; ++ u32 pteAddrL2 = 0; ++ u32 remBytes; ++ u32 remBytesL2; ++ u32 vaCurr; ++ struct page *pg = NULL; ++ DSP_STATUS status = DSP_SOK; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ struct PgTableAttrs *pt = pDevContext->pPtAttrs; ++ u32 temp; ++ u32 pAddr; ++ u32 numof4KPages = 0; ++ ++ DBG_Trace(DBG_ENTER, "> WMD_BRD_MemUnMap hDevContext %x, va %x, " ++ "NumBytes %x\n", hDevContext, ulVirtAddr, ulNumBytes); ++ vaCurr = ulVirtAddr; ++ remBytes = ulNumBytes; ++ remBytesL2 = 0; ++ L1BaseVa = pt->L1BaseVa; ++ pteAddrL1 = HW_MMU_PteAddrL1(L1BaseVa, vaCurr); ++ DBG_Trace(DBG_ENTER, "WMD_BRD_MemUnMap L1BaseVa %x, pteAddrL1 %x " ++ "vaCurr %x remBytes %x\n", L1BaseVa, pteAddrL1, ++ vaCurr, remBytes); ++ while (remBytes && (DSP_SUCCEEDED(status))) { ++ u32 vaCurrOrig = vaCurr; ++ /* Find whether the L1 PTE points to a valid L2 PT */ ++ pteAddrL1 = HW_MMU_PteAddrL1(L1BaseVa, vaCurr); ++ pteVal = *(u32 *)pteAddrL1; ++ pteSize = HW_MMU_PteSizeL1(pteVal); ++ ++ if (pteSize != HW_MMU_COARSE_PAGE_SIZE) ++ goto skip_coarse_page; ++ ++ /* ++ * Get the L2 PA from the L1 PTE, and find ++ * corresponding L2 VA ++ */ ++ L2BasePa = HW_MMU_PteCoarseL1(pteVal); ++ L2BaseVa = L2BasePa - pt->L2BasePa + pt->L2BaseVa; ++ L2PageNum = (L2BasePa - pt->L2BasePa) / HW_MMU_COARSE_PAGE_SIZE; ++ /* ++ * Find the L2 PTE address from which we will start ++ * clearing, the number of PTEs to be cleared on this ++ * page, and the size of VA space that needs to be ++ * cleared on this L2 page ++ */ ++ pteAddrL2 = HW_MMU_PteAddrL2(L2BaseVa, vaCurr); ++ pteCount = pteAddrL2 & (HW_MMU_COARSE_PAGE_SIZE - 1); ++ pteCount = (HW_MMU_COARSE_PAGE_SIZE - pteCount) / sizeof(u32); ++ if (remBytes < (pteCount * PG_SIZE_4K)) ++ pteCount = remBytes / PG_SIZE_4K; ++ remBytesL2 = pteCount * PG_SIZE_4K; ++ DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap L2BasePa %x, " ++ "L2BaseVa %x pteAddrL2 %x, remBytesL2 %x\n", ++ L2BasePa, L2BaseVa, pteAddrL2, remBytesL2); ++ /* ++ * Unmap the VA space on this L2 PT. A quicker way ++ * would be to clear pteCount entries starting from ++ * pteAddrL2. However, below code checks that we don't ++ * clear invalid entries or less than 64KB for a 64KB ++ * entry. Similar checking is done for L1 PTEs too ++ * below ++ */ ++ while (remBytesL2 && (DSP_SUCCEEDED(status))) { ++ pteVal = *(u32 *)pteAddrL2; ++ pteSize = HW_MMU_PteSizeL2(pteVal); ++ /* vaCurr aligned to pteSize? */ ++ if (pteSize == 0 || remBytesL2 < pteSize || ++ vaCurr & (pteSize - 1)) { ++ status = DSP_EFAIL; ++ break; ++ } ++ ++ /* Collect Physical addresses from VA */ ++ pAddr = (pteVal & ~(pteSize - 1)); ++ if (pteSize == HW_PAGE_SIZE_64KB) ++ numof4KPages = 16; ++ else ++ numof4KPages = 1; ++ temp = 0; ++ while (temp++ < numof4KPages) { ++ if (!pfn_valid(__phys_to_pfn(pAddr))) { ++ pAddr += HW_PAGE_SIZE_4KB; ++ continue; ++ } ++ pg = phys_to_page(pAddr); ++ if (page_count(pg) < 1) { ++ pr_info("DSPBRIDGE: UNMAP function: " ++ "COUNT 0 FOR PA 0x%x, size = " ++ "0x%x\n", pAddr, ulNumBytes); ++ bad_page_dump(pAddr, pg); ++ } ++ ClearPageMlocked(pg); ++ SetPageDirty(pg); ++ page_cache_release(pg); ++ pAddr += HW_PAGE_SIZE_4KB; ++ } ++ if (HW_MMU_PteClear(pteAddrL2, vaCurr, pteSize) ++ == RET_FAIL) { ++ status = DSP_EFAIL; ++ goto EXIT_LOOP; ++ } ++ ++ status = DSP_SOK; ++ remBytesL2 -= pteSize; ++ vaCurr += pteSize; ++ pteAddrL2 += (pteSize >> 12) * sizeof(u32); ++ } ++ SYNC_EnterCS(pt->hCSObj); ++ if (remBytesL2 == 0) { ++ pt->pgInfo[L2PageNum].numEntries -= pteCount; ++ if (pt->pgInfo[L2PageNum].numEntries == 0) { ++ /* ++ * Clear the L1 PTE pointing to the L2 PT ++ */ ++ if (HW_MMU_PteClear(L1BaseVa, vaCurrOrig, ++ HW_MMU_COARSE_PAGE_SIZE) == RET_OK) ++ status = DSP_SOK; ++ else { ++ status = DSP_EFAIL; ++ SYNC_LeaveCS(pt->hCSObj); ++ goto EXIT_LOOP; ++ } ++ } ++ remBytes -= pteCount * PG_SIZE_4K; ++ } else ++ status = DSP_EFAIL; ++ DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap L2PageNum %x, " ++ "numEntries %x, pteCount %x, status: 0x%x\n", ++ L2PageNum, pt->pgInfo[L2PageNum].numEntries, ++ pteCount, status); ++ SYNC_LeaveCS(pt->hCSObj); ++ continue; ++skip_coarse_page: ++ /* vaCurr aligned to pteSize? */ ++ /* pteSize = 1 MB or 16 MB */ ++ if (pteSize == 0 || remBytes < pteSize || ++ vaCurr & (pteSize - 1)) { ++ status = DSP_EFAIL; ++ break; ++ } ++ ++ if (pteSize == HW_PAGE_SIZE_1MB) ++ numof4KPages = 256; ++ else ++ numof4KPages = 4096; ++ temp = 0; ++ /* Collect Physical addresses from VA */ ++ pAddr = (pteVal & ~(pteSize - 1)); ++ while (temp++ < numof4KPages) { ++ if (pfn_valid(__phys_to_pfn(pAddr))) { ++ pg = phys_to_page(pAddr); ++ if (page_count(pg) < 1) { ++ pr_info("DSPBRIDGE: UNMAP function: " ++ "COUNT 0 FOR PA 0x%x, size = " ++ "0x%x\n", pAddr, ulNumBytes); ++ bad_page_dump(pAddr, pg); ++ } ++ ClearPageMlocked(pg); ++ SetPageDirty(pg); ++ page_cache_release(pg); ++ } ++ pAddr += HW_PAGE_SIZE_4KB; ++ } ++ if (HW_MMU_PteClear(L1BaseVa, vaCurr, pteSize) == RET_OK) { ++ status = DSP_SOK; ++ remBytes -= pteSize; ++ vaCurr += pteSize; ++ } else { ++ status = DSP_EFAIL; ++ goto EXIT_LOOP; ++ } ++ } ++ /* ++ * It is better to flush the TLB here, so that any stale old entries ++ * get flushed ++ */ ++EXIT_LOOP: ++ flush_all(pDevContext); ++ DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap vaCurr %x, pteAddrL1 %x " ++ "pteAddrL2 %x\n", vaCurr, pteAddrL1, pteAddrL2); ++ DBG_Trace(DBG_ENTER, "< WMD_BRD_MemUnMap status %x remBytes %x, " ++ "remBytesL2 %x\n", status, remBytes, remBytesL2); ++ return status; ++} ++ ++/* ++ * ======== user_va2pa ======== ++ * Purpose: ++ * This function walks through the Linux page tables to convert a userland ++ * virtual address to physical address ++ */ ++static u32 user_va2pa(struct mm_struct *mm, u32 address) ++{ ++ pgd_t *pgd; ++ pmd_t *pmd; ++ pte_t *ptep, pte; ++ ++ pgd = pgd_offset(mm, address); ++ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { ++ pmd = pmd_offset(pgd, address); ++ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { ++ ptep = pte_offset_map(pmd, address); ++ if (ptep) { ++ pte = *ptep; ++ if (pte_present(pte)) ++ return pte & PAGE_MASK; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++ ++/* ++ * ======== PteUpdate ======== ++ * This function calculates the optimum page-aligned addresses and sizes ++ * Caller must pass page-aligned values ++ */ ++static DSP_STATUS PteUpdate(struct WMD_DEV_CONTEXT *hDevContext, u32 pa, ++ u32 va, u32 size, ++ struct HW_MMUMapAttrs_t *mapAttrs) ++{ ++ u32 i; ++ u32 allBits; ++ u32 paCurr = pa; ++ u32 vaCurr = va; ++ u32 numBytes = size; ++ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; ++ DSP_STATUS status = DSP_SOK; ++ u32 pgSize[] = { HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB, ++ HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB }; ++ DBG_Trace(DBG_ENTER, "> PteUpdate hDevContext %x, pa %x, va %x, " ++ "size %x, mapAttrs %x\n", hDevContext, pa, va, size, mapAttrs); ++ while (numBytes && DSP_SUCCEEDED(status)) { ++ /* To find the max. page size with which both PA & VA are ++ * aligned */ ++ allBits = paCurr | vaCurr; ++ DBG_Trace(DBG_LEVEL1, "allBits %x, paCurr %x, vaCurr %x, " ++ "numBytes %x ", allBits, paCurr, vaCurr, numBytes); ++ for (i = 0; i < 4; i++) { ++ if ((numBytes >= pgSize[i]) && ((allBits & ++ (pgSize[i] - 1)) == 0)) { ++ DBG_Trace(DBG_LEVEL1, "pgSize %x\n", pgSize[i]); ++ status = PteSet(pDevContext->pPtAttrs, paCurr, ++ vaCurr, pgSize[i], mapAttrs); ++ paCurr += pgSize[i]; ++ vaCurr += pgSize[i]; ++ numBytes -= pgSize[i]; ++ /* Don't try smaller sizes. Hopefully we have ++ * reached an address aligned to a bigger page ++ * size */ ++ break; ++ } ++ } ++ } ++ DBG_Trace(DBG_ENTER, "< PteUpdate status %x numBytes %x\n", status, ++ numBytes); ++ return status; ++} ++ ++/* ++ * ======== PteSet ======== ++ * This function calculates PTE address (MPU virtual) to be updated ++ * It also manages the L2 page tables ++ */ ++static DSP_STATUS PteSet(struct PgTableAttrs *pt, u32 pa, u32 va, ++ u32 size, struct HW_MMUMapAttrs_t *attrs) ++{ ++ u32 i; ++ u32 pteVal; ++ u32 pteAddrL1; ++ u32 pteSize; ++ u32 pgTblVa; /* Base address of the PT that will be updated */ ++ u32 L1BaseVa; ++ /* Compiler warns that the next three variables might be used ++ * uninitialized in this function. Doesn't seem so. Working around, ++ * anyways. */ ++ u32 L2BaseVa = 0; ++ u32 L2BasePa = 0; ++ u32 L2PageNum = 0; ++ DSP_STATUS status = DSP_SOK; ++ DBG_Trace(DBG_ENTER, "> PteSet pPgTableAttrs %x, pa %x, va %x, " ++ "size %x, attrs %x\n", pt, pa, va, size, attrs); ++ L1BaseVa = pt->L1BaseVa; ++ pgTblVa = L1BaseVa; ++ if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) { ++ /* Find whether the L1 PTE points to a valid L2 PT */ ++ pteAddrL1 = HW_MMU_PteAddrL1(L1BaseVa, va); ++ if (pteAddrL1 <= (pt->L1BaseVa + pt->L1size)) { ++ pteVal = *(u32 *)pteAddrL1; ++ pteSize = HW_MMU_PteSizeL1(pteVal); ++ } else { ++ return DSP_EFAIL; ++ } ++ SYNC_EnterCS(pt->hCSObj); ++ if (pteSize == HW_MMU_COARSE_PAGE_SIZE) { ++ /* Get the L2 PA from the L1 PTE, and find ++ * corresponding L2 VA */ ++ L2BasePa = HW_MMU_PteCoarseL1(pteVal); ++ L2BaseVa = L2BasePa - pt->L2BasePa + pt->L2BaseVa; ++ L2PageNum = (L2BasePa - pt->L2BasePa) / ++ HW_MMU_COARSE_PAGE_SIZE; ++ } else if (pteSize == 0) { ++ /* L1 PTE is invalid. Allocate a L2 PT and ++ * point the L1 PTE to it */ ++ /* Find a free L2 PT. */ ++ for (i = 0; (i < pt->L2NumPages) && ++ (pt->pgInfo[i].numEntries != 0); i++) ++ ;; ++ if (i < pt->L2NumPages) { ++ L2PageNum = i; ++ L2BasePa = pt->L2BasePa + (L2PageNum * ++ HW_MMU_COARSE_PAGE_SIZE); ++ L2BaseVa = pt->L2BaseVa + (L2PageNum * ++ HW_MMU_COARSE_PAGE_SIZE); ++ /* Endianness attributes are ignored for ++ * HW_MMU_COARSE_PAGE_SIZE */ ++ status = HW_MMU_PteSet(L1BaseVa, L2BasePa, va, ++ HW_MMU_COARSE_PAGE_SIZE, attrs); ++ } else { ++ status = DSP_EMEMORY; ++ } ++ } else { ++ /* Found valid L1 PTE of another size. ++ * Should not overwrite it. */ ++ status = DSP_EFAIL; ++ } ++ if (DSP_SUCCEEDED(status)) { ++ pgTblVa = L2BaseVa; ++ if (size == HW_PAGE_SIZE_64KB) ++ pt->pgInfo[L2PageNum].numEntries += 16; ++ else ++ pt->pgInfo[L2PageNum].numEntries++; ++ DBG_Trace(DBG_LEVEL1, "L2 BaseVa %x, BasePa %x, " ++ "PageNum %x numEntries %x\n", L2BaseVa, ++ L2BasePa, L2PageNum, ++ pt->pgInfo[L2PageNum].numEntries); ++ } ++ SYNC_LeaveCS(pt->hCSObj); ++ } ++ if (DSP_SUCCEEDED(status)) { ++ DBG_Trace(DBG_LEVEL1, "PTE pgTblVa %x, pa %x, va %x, size %x\n", ++ pgTblVa, pa, va, size); ++ DBG_Trace(DBG_LEVEL1, "PTE endianism %x, elementSize %x, " ++ "mixedSize %x\n", attrs->endianism, ++ attrs->elementSize, attrs->mixedSize); ++ status = HW_MMU_PteSet(pgTblVa, pa, va, size, attrs); ++ } ++ DBG_Trace(DBG_ENTER, "< PteSet status %x\n", status); ++ return status; ++} ++ ++/* Memory map kernel VA -- memory allocated with vmalloc */ ++static DSP_STATUS MemMapVmalloc(struct WMD_DEV_CONTEXT *pDevContext, ++ u32 ulMpuAddr, u32 ulVirtAddr, u32 ulNumBytes, ++ struct HW_MMUMapAttrs_t *hwAttrs) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct page *pPage[1]; ++ u32 i; ++ u32 paCurr; ++ u32 paNext; ++ u32 vaCurr; ++ u32 sizeCurr; ++ u32 numPages; ++ u32 pa; ++ u32 numOf4KPages; ++ u32 temp = 0; ++ ++ DBG_Trace(DBG_ENTER, "> MemMapVmalloc hDevContext %x, pa %x, va %x, " ++ "size %x\n", pDevContext, ulMpuAddr, ulVirtAddr, ulNumBytes); ++ ++ /* ++ * Do Kernel va to pa translation. ++ * Combine physically contiguous regions to reduce TLBs. ++ * Pass the translated pa to PteUpdate. ++ */ ++ numPages = ulNumBytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ ++ i = 0; ++ vaCurr = ulMpuAddr; ++ pPage[0] = vmalloc_to_page((void *)vaCurr); ++ paNext = page_to_phys(pPage[0]); ++ while (DSP_SUCCEEDED(status) && (i < numPages)) { ++ /* ++ * Reuse paNext from the previous iteraion to avoid ++ * an extra va2pa call ++ */ ++ paCurr = paNext; ++ sizeCurr = PAGE_SIZE; ++ /* ++ * If the next page is physically contiguous, ++ * map it with the current one by increasing ++ * the size of the region to be mapped ++ */ ++ while (++i < numPages) { ++ pPage[0] = vmalloc_to_page((void *)(vaCurr + sizeCurr)); ++ paNext = page_to_phys(pPage[0]); ++ DBG_Trace(DBG_LEVEL5, "Xlate Vmalloc VA=0x%x , " ++ "PA=0x%x \n", (vaCurr + sizeCurr), paNext); ++ if (paNext == (paCurr + sizeCurr)) ++ sizeCurr += PAGE_SIZE; ++ else ++ break; ++ ++ } ++ if (paNext == 0) { ++ status = DSP_EMEMORY; ++ break; ++ } ++ pa = paCurr; ++ numOf4KPages = sizeCurr / HW_PAGE_SIZE_4KB; ++ while (temp++ < numOf4KPages) { ++ get_page(phys_to_page(pa)); ++ pa += HW_PAGE_SIZE_4KB; ++ } ++ status = PteUpdate(pDevContext, paCurr, ulVirtAddr + ++ (vaCurr - ulMpuAddr), sizeCurr, hwAttrs); ++ vaCurr += sizeCurr; ++ } ++ /* Don't propogate Linux or HW status to upper layers */ ++ if (DSP_SUCCEEDED(status)) { ++ status = DSP_SOK; ++ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap succeeded %x\n", ++ status); ++ } else { ++ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap status %x\n", status); ++ status = DSP_EFAIL; ++ } ++ /* ++ * In any case, flush the TLB ++ * This is called from here instead from PteUpdate to avoid unnecessary ++ * repetition while mapping non-contiguous physical regions of a virtual ++ * region ++ */ ++ flush_all(pDevContext); ++ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap at end status %x\n", status); ++ return status; ++} ++ ++/* ++ * ======== configureDspMmu ======== ++ * Make DSP MMU page table entries. ++ */ ++void configureDspMmu(struct WMD_DEV_CONTEXT *pDevContext, u32 dataBasePhys, ++ u32 dspBaseVirt, u32 sizeInBytes, s32 nEntryStart, ++ enum HW_Endianism_t endianism, ++ enum HW_ElementSize_t elemSize, ++ enum HW_MMUMixedSize_t mixedSize) ++{ ++ struct CFG_HOSTRES resources; ++ struct HW_MMUMapAttrs_t mapAttrs = { endianism, elemSize, mixedSize }; ++ DSP_STATUS status = DSP_SOK; ++ ++ DBC_Require(sizeInBytes > 0); ++ DBG_Trace(DBG_LEVEL1, ++ "configureDspMmu entry %x pa %x, va %x, bytes %x ", ++ nEntryStart, dataBasePhys, dspBaseVirt, sizeInBytes); ++ ++ DBG_Trace(DBG_LEVEL1, "endianism %x, elemSize %x, mixedSize %x\n", ++ endianism, elemSize, mixedSize); ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ status = HW_MMU_TLBAdd(pDevContext->dwDSPMmuBase, dataBasePhys, ++ dspBaseVirt, sizeInBytes, nEntryStart, ++ &mapAttrs, HW_SET, HW_SET); ++} ++ ++/* ++ * ======== WaitForStart ======== ++ * Wait for the singal from DSP that it has started, or time out. ++ */ ++bool WaitForStart(struct WMD_DEV_CONTEXT *pDevContext, u32 dwSyncAddr) ++{ ++ u16 usCount = TIHELEN_ACKTIMEOUT; ++ ++ /* Wait for response from board */ ++ while (*((volatile u16 *)dwSyncAddr) && --usCount) ++ udelay(10); ++ ++ /* If timed out: return FALSE */ ++ if (!usCount) { ++ DBG_Trace(DBG_LEVEL7, "Timed out Waiting for DSP to Start\n"); ++ return FALSE; ++ } ++ return TRUE; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430_pwr.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap3430_pwr.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430_pwr.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/tiomap3430_pwr.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,750 @@ ++/* ++ * tiomap_pwr.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2007-2008 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++/* ++ * ======== _tiomap_pwr.c ======== ++ * Description: ++ * Implementation of DSP wake/sleep routines. ++ * ++ *! Revision History ++ *! ================ ++ *! 01-Nov-2007 HK: Added Off mode(Hibernation) support and DVFS support ++ *! 05-Jan-2004 vp: Moved the file to platform specific folder and commented the ++ *! code. ++ *! 27-Mar-2003 vp: Added support for DSP boot idle mode. ++ *! 06-Dec-2002 cring: Added Palm support. ++ *! 08-Oct-2002 rr: Created. ++ */ ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++#include ++ ++/* ------------------------------------ Hardware Abstraction Layer */ ++#include ++#include ++#include ++#include ++ ++#include ++ ++/* ----------------------------------- Mini Driver */ ++#include ++ ++/* ----------------------------------- specific to this file */ ++#include "_tiomap.h" ++#include "_tiomap_pwr.h" ++#include "_tiomap_util.h" ++#include ++#include ++ ++#ifdef CONFIG_PM ++#include ++#endif ++extern struct MAILBOX_CONTEXT mboxsetting; ++extern unsigned short enable_off_mode; ++extern unsigned short min_active_opp; ++/* ++ * ======== handle_constraints_set ======== ++ * Sets new DSP constraint ++ */ ++DSP_STATUS handle_constraints_set(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs) ++{ ++#ifdef CONFIG_BRIDGE_DVFS ++ u32 pConstraintVal; ++ DSP_STATUS status = DSP_SOK; ++ struct CFG_HOSTRES resources; ++ struct dspbridge_platform_data *pdata = ++ omap_dspbridge_dev->dev.platform_data; ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ ++ pConstraintVal = *(((u32 *)pArgs) + 1); ++ /* Read the target value requested by DSP */ ++ DBG_Trace(DBG_LEVEL7, "handle_constraints_set:" ++ "opp requested = 0x%x\n", pConstraintVal); ++ status = HW_MBOX_saveSettings(resources.dwMboxBase); ++ ++ /* Set the new opp value */ ++ if (pdata->dsp_set_min_opp) { ++ /* ++ * When Smartreflex is ON, DSP requires at least OPP level 3 ++ * to operate reliably. So boost lower OPP levels to OPP3. ++ */ ++ if (pConstraintVal < min_active_opp) { ++ pr_debug("DSPBRIDGE: VDD1 OPP%x elevated to OPP%x\n", ++ pConstraintVal, min_active_opp); ++ (*pdata->dsp_set_min_opp)(min_active_opp); ++ } else ++ (*pdata->dsp_set_min_opp)(pConstraintVal); ++ } ++#endif /* #ifdef CONFIG_BRIDGE_DVFS */ ++ return DSP_SOK; ++} ++ ++/* ++ * ======== handle_hibernation_fromDSP ======== ++ * Handle Hibernation requested from DSP ++ */ ++DSP_STATUS handle_hibernation_fromDSP(struct WMD_DEV_CONTEXT *pDevContext) ++{ ++ DSP_STATUS status = DSP_SOK; ++#ifdef CONFIG_PM ++ u16 usCount = TIHELEN_ACKTIMEOUT; ++ struct CFG_HOSTRES resources; ++ enum HW_PwrState_t pwrState; ++#ifdef CONFIG_BRIDGE_DVFS ++ u32 opplevel; ++ struct IO_MGR *hIOMgr; ++ struct dspbridge_platform_data *pdata = ++ omap_dspbridge_dev->dev.platform_data; ++#endif ++ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) ++ return status; ++ ++ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, ++ &pwrState); ++ /* Wait for DSP to move into Off state, how much time should ++ * we wait? */ ++ while ((pwrState != HW_PWR_STATE_OFF) && --usCount) { ++ udelay(500); ++ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, ++ &pwrState); ++ } ++ if (usCount == 0) { ++ DBG_Trace(DBG_LEVEL7, "Timed out Waiting for DSP Off mode \n"); ++ status = WMD_E_TIMEOUT; ++ return status; ++ } else { ++ ++ /* Save mailbox settings */ ++ status = HW_MBOX_saveSettings(resources.dwMboxBase); ++ DBG_Trace(DBG_LEVEL6, "MailBoxSettings: SYSCONFIG = 0x%x\n", ++ mboxsetting.sysconfig); ++ DBG_Trace(DBG_LEVEL6, "MailBoxSettings: IRQENABLE0 = 0x%x\n", ++ mboxsetting.irqEnable0); ++ DBG_Trace(DBG_LEVEL6, "MailBoxSettings: IRQENABLE1 = 0x%x\n", ++ mboxsetting.irqEnable1); ++ /* Turn off DSP Peripheral clocks and DSP Load monitor timer */ ++ status = DSP_PeripheralClocks_Disable(pDevContext, NULL); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Update the Bridger Driver state */ ++ pDevContext->dwBrdState = BRD_DSP_HIBERNATION; ++#ifdef CONFIG_BRIDGE_DVFS ++ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); ++ if (DSP_FAILED(status)) ++ return status; ++ IO_SHMsetting(hIOMgr, SHM_GETOPP, &opplevel); ++ if (opplevel != VDD1_OPP1) { ++ DBG_Trace(DBG_LEVEL5, ++ " DSP requested OPP = %d, MPU" ++ " requesting low OPP %d instead\n", ++ opplevel, VDD1_OPP1); ++ } ++ /* ++ * Set the OPP to low level before moving to OFF ++ * mode ++ */ ++ if (pdata->dsp_set_min_opp) ++ (*pdata->dsp_set_min_opp)(VDD1_OPP1); ++ status = DSP_SOK; ++#endif /* CONFIG_BRIDGE_DVFS */ ++ } else { ++ DBG_Trace(DBG_LEVEL7, ++ "handle_hibernation_fromDSP- FAILED\n"); ++ } ++ } ++#endif ++ return status; ++} ++ ++/* ++ * ======== SleepDSP ======== ++ * Put DSP in low power consuming state. ++ */ ++DSP_STATUS SleepDSP(struct WMD_DEV_CONTEXT *pDevContext, IN u32 dwCmd, ++ IN void *pArgs) ++{ ++ DSP_STATUS status = DSP_SOK; ++#ifdef CONFIG_PM ++ struct CFG_HOSTRES resources; ++ struct DEH_MGR *hDehMgr; ++ u16 usCount = TIHELEN_ACKTIMEOUT; ++ enum HW_PwrState_t pwrState, targetPwrState; ++ ++ DBG_Trace(DBG_LEVEL7, "SleepDSP- Enter function \n"); ++ ++ /* Check if sleep code is valid */ ++ if ((dwCmd != PWR_DEEPSLEEP) && (dwCmd != PWR_EMERGENCYDEEPSLEEP)) { ++ DBG_Trace(DBG_LEVEL7, "SleepDSP- Illegal sleep command\n"); ++ return DSP_EINVALIDARG; ++ } ++ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) ++ return status; ++ ++ switch (pDevContext->dwBrdState) { ++ case BRD_RUNNING: ++ status = HW_MBOX_saveSettings(resources.dwMboxBase); ++ if (enable_off_mode) { ++ CHNLSM_InterruptDSP2(pDevContext, ++ MBX_PM_DSPHIBERNATE); ++ DBG_Trace(DBG_LEVEL7, ++ "SleepDSP - Sent hibernate " ++ "command to DSP\n"); ++ targetPwrState = HW_PWR_STATE_OFF; ++ } else { ++ CHNLSM_InterruptDSP2(pDevContext, ++ MBX_PM_DSPRETENTION); ++ targetPwrState = HW_PWR_STATE_RET; ++ } ++ break; ++ case BRD_RETENTION: ++ status = HW_MBOX_saveSettings(resources.dwMboxBase); ++ if (enable_off_mode) { ++ CHNLSM_InterruptDSP2(pDevContext, ++ MBX_PM_DSPHIBERNATE); ++ targetPwrState = HW_PWR_STATE_OFF; ++ } else ++ return DSP_SOK; ++ break; ++ case BRD_HIBERNATION: ++ case BRD_DSP_HIBERNATION: ++ /* Already in Hibernation, so just return */ ++ DBG_Trace(DBG_LEVEL7, "SleepDSP- DSP already in " ++ "hibernation\n"); ++ return DSP_SOK; ++ case BRD_STOPPED: ++ DBG_Trace(DBG_LEVEL7, ++ "SleepDSP- Board in STOP state \n"); ++ return DSP_SALREADYASLEEP; ++ default: ++ DBG_Trace(DBG_LEVEL7, ++ "SleepDSP- Bridge in Illegal state\n"); ++ return DSP_EFAIL; ++ } ++ ++ /* Get the PRCM DSP power domain status */ ++ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, ++ &pwrState); ++ ++ /* ++ * Wait for DSP to move into Standby state, how much time ++ * should we wait? ++ */ ++ while ((pwrState != targetPwrState) && --usCount) { ++ udelay(500); ++ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, ++ &pwrState); ++ } ++ ++ if (!usCount) { ++ DBG_Trace(DBG_LEVEL7, "SleepDSP: Timed out Waiting for DSP" ++ " STANDBY %x \n", pwrState); ++ DEV_GetDehMgr(pDevContext->hDevObject, &hDehMgr); ++ WMD_DEH_Notify(hDehMgr, DSP_PWRERROR, 0); ++ return WMD_E_TIMEOUT; ++ } else { ++ DBG_Trace(DBG_LEVEL7, "SleepDSP: DSP STANDBY Pwr state %x \n", ++ pwrState); ++ ++ /* Update the Bridger Driver state */ ++ if (enable_off_mode) ++ pDevContext->dwBrdState = BRD_HIBERNATION; ++ else ++ pDevContext->dwBrdState = BRD_RETENTION; ++ ++ /* Turn off DSP Peripheral clocks */ ++ status = DSP_PeripheralClocks_Disable(pDevContext, NULL); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, "SleepDSP- FAILED\n"); ++ return status; ++ } ++#ifdef CONFIG_BRIDGE_DVFS ++ else if (targetPwrState == HW_PWR_STATE_OFF) { ++ struct dspbridge_platform_data *pdata = ++ omap_dspbridge_dev->dev.platform_data; ++ /* ++ * Set the OPP to low level before moving to OFF mode ++ */ ++ if (pdata->dsp_set_min_opp) ++ (*pdata->dsp_set_min_opp)(VDD1_OPP1); ++ } ++#endif /* CONFIG_BRIDGE_DVFS */ ++ } ++#endif /* CONFIG_PM */ ++ return status; ++} ++ ++ ++/* ++ * ======== WakeDSP ======== ++ * Wake up DSP from sleep. ++ */ ++DSP_STATUS WakeDSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs) ++{ ++#ifdef CONFIG_PM ++ DSP_STATUS status = DSP_SOK; ++#ifdef CONFIG_BRIDGE_DEBUG ++ enum HW_PwrState_t pwrState; ++ struct CFG_HOSTRES resources; ++ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) ++ return status; ++#endif /* CONFIG_BRIDGE_DEBUG */ ++ ++ /* Check the BRD/WMD state, if it is not 'SLEEP' then return failure */ ++ if (pDevContext->dwBrdState == BRD_RUNNING || ++ pDevContext->dwBrdState == BRD_STOPPED) { ++ /* The Device is in 'RET' or 'OFF' state and WMD state is not ++ * 'SLEEP', this means state inconsistency, so return */ ++ return DSP_SOK; ++ } ++ ++ /* Send a wakeup message to DSP */ ++ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_DSPWAKEUP); ++ ++#ifdef CONFIG_BRIDGE_DEBUG ++ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, ++ &pwrState); ++ DBG_Trace(DBG_LEVEL7, ++ "\nWakeDSP: Power State After sending Interrupt " ++ "to DSP %x\n", pwrState); ++#endif /* CONFIG_BRIDGE_DEBUG */ ++ ++ /* Set the device state to RUNNIG */ ++ pDevContext->dwBrdState = BRD_RUNNING; ++#endif /* CONFIG_PM */ ++ return status; ++} ++ ++/* ++ * ======== DSPPeripheralClkCtrl ======== ++ * Enable/Disable the DSP peripheral clocks as needed.. ++ */ ++DSP_STATUS DSPPeripheralClkCtrl(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs) ++{ ++ u32 extClk = 0; ++ u32 extClkId = 0; ++ u32 extClkCmd = 0; ++ u32 clkIdIndex = MBX_PM_MAX_RESOURCES; ++ u32 tmpIndex; ++ u32 dspPerClksBefore; ++ DSP_STATUS status = DSP_SOK; ++ DSP_STATUS status1 = DSP_SOK; ++ ++ DBG_Trace(DBG_ENTER, "Entering DSPPeripheralClkCtrl \n"); ++ dspPerClksBefore = pDevContext->uDspPerClks; ++ DBG_Trace(DBG_ENTER, "DSPPeripheralClkCtrl : uDspPerClks = 0x%x \n", ++ dspPerClksBefore); ++ ++ extClk = (u32)*((u32 *)pArgs); ++ ++ DBG_Trace(DBG_LEVEL3, "DSPPeripheralClkCtrl : extClk+Cmd = 0x%x \n", ++ extClk); ++ ++ extClkId = extClk & MBX_PM_CLK_IDMASK; ++ ++ /* process the power message -- TODO, keep it in a separate function */ ++ for (tmpIndex = 0; tmpIndex < MBX_PM_MAX_RESOURCES; tmpIndex++) { ++ if (extClkId == BPWR_CLKID[tmpIndex]) { ++ clkIdIndex = tmpIndex; ++ break; ++ } ++ } ++ /* TODO -- Assert may be a too hard restriction here.. May be we should ++ * just return with failure when the CLK ID does not match */ ++ /* DBC_Assert(clkIdIndex < MBX_PM_MAX_RESOURCES);*/ ++ if (clkIdIndex == MBX_PM_MAX_RESOURCES) { ++ DBG_Trace(DBG_LEVEL7, ++ "DSPPeripheralClkCtrl : Could n't get clock Id for" ++ "clkid 0x%x \n", clkIdIndex); ++ /* return with a more meaningfull error code */ ++ return DSP_EFAIL; ++ } ++ extClkCmd = (extClk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK; ++ switch (extClkCmd) { ++ case BPWR_DisableClock: ++ /* Call BP to disable the needed clock */ ++ DBG_Trace(DBG_LEVEL3, ++ "DSPPeripheralClkCtrl : Disable CLK for \n"); ++ status1 = CLK_Disable(BPWR_Clks[clkIdIndex].intClk); ++ status = CLK_Disable(BPWR_Clks[clkIdIndex].funClk); ++ DSPClkWakeupEventCtrl(BPWR_Clks[clkIdIndex].clkId, false); ++ if ((DSP_SUCCEEDED(status)) && (DSP_SUCCEEDED(status1))) { ++ (pDevContext->uDspPerClks) &= ++ (~((u32) (1 << clkIdIndex))); ++ } else { ++ DBG_Trace(DBG_LEVEL7, "DSPPeripheralClkCtrl : Failed " ++ "to disable clk\n"); ++ } ++ break; ++ case BPWR_EnableClock: ++ DBG_Trace(DBG_LEVEL3, ++ "DSPPeripheralClkCtrl : Enable CLK for \n"); ++ status1 = CLK_Enable(BPWR_Clks[clkIdIndex].intClk); ++ status = CLK_Enable(BPWR_Clks[clkIdIndex].funClk); ++ DSPClkWakeupEventCtrl(BPWR_Clks[clkIdIndex].clkId, true); ++ if ((DSP_SUCCEEDED(status)) && (DSP_SUCCEEDED(status1))) { ++ (pDevContext->uDspPerClks) |= (1 << clkIdIndex); ++ } else { ++ DBG_Trace(DBG_LEVEL7, ++ "DSPPeripheralClkCtrl:Failed to Enable clk\n"); ++ } ++ break; ++ default: ++ DBG_Trace(DBG_LEVEL3, ++ "DSPPeripheralClkCtrl : Unsupported CMD \n"); ++ /* unsupported cmd */ ++ /* TODO -- provide support for AUTOIDLE Enable/Disable ++ * commands */ ++ } ++ return status; ++} ++ ++/* ++ * ========PreScale_DSP======== ++ * Sends prescale notification to DSP ++ * ++ */ ++DSP_STATUS PreScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs) ++{ ++#ifdef CONFIG_BRIDGE_DVFS ++ u32 level; ++ u32 voltage_domain; ++ ++ voltage_domain = *((u32 *)pArgs); ++ level = *((u32 *)pArgs + 1); ++ ++ DBG_Trace(DBG_LEVEL7, "PreScale_DSP: voltage_domain = %x, level = " ++ "0x%x\n", voltage_domain, level); ++ if ((pDevContext->dwBrdState == BRD_HIBERNATION) || ++ (pDevContext->dwBrdState == BRD_RETENTION) || ++ (pDevContext->dwBrdState == BRD_DSP_HIBERNATION)) { ++ DBG_Trace(DBG_LEVEL7, "PreScale_DSP: IVA in sleep. " ++ "No notification to DSP\n"); ++ return DSP_SOK; ++ } else if ((pDevContext->dwBrdState == BRD_RUNNING)) { ++ /* Send a prenotificatio to DSP */ ++ DBG_Trace(DBG_LEVEL7, ++ "PreScale_DSP: Sent notification to DSP\n"); ++ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_SETPOINT_PRENOTIFY); ++ return DSP_SOK; ++ } else { ++ DBG_Trace(DBG_LEVEL7, "PreScale_DSP: Failed - DSP BRD" ++ " state in wrong state"); ++ return DSP_EFAIL; ++ } ++#endif /* #ifdef CONFIG_BRIDGE_DVFS */ ++ return DSP_SOK; ++} ++ ++/* ++ * ========PostScale_DSP======== ++ * Sends postscale notification to DSP ++ * ++ */ ++DSP_STATUS PostScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs) ++{ ++#ifdef CONFIG_BRIDGE_DVFS ++ u32 level; ++ u32 voltage_domain; ++ struct IO_MGR *hIOMgr; ++ DSP_STATUS status = DSP_SOK; ++ ++ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); ++ ++ voltage_domain = *((u32 *)pArgs); ++ level = *((u32 *)pArgs + 1); ++ DBG_Trace(DBG_LEVEL7, ++ "PostScale_DSP: voltage_domain = %x, level = 0x%x\n", ++ voltage_domain, level); ++ if ((pDevContext->dwBrdState == BRD_HIBERNATION) || ++ (pDevContext->dwBrdState == BRD_RETENTION) || ++ (pDevContext->dwBrdState == BRD_DSP_HIBERNATION)) { ++ /* Update the OPP value in shared memory */ ++ IO_SHMsetting(hIOMgr, SHM_CURROPP, &level); ++ DBG_Trace(DBG_LEVEL7, ++ "PostScale_DSP: IVA in sleep. Wrote to shared " ++ "memory \n"); ++ return DSP_SOK; ++ } else if ((pDevContext->dwBrdState == BRD_RUNNING)) { ++ /* Update the OPP value in shared memory */ ++ IO_SHMsetting(hIOMgr, SHM_CURROPP, &level); ++ /* Send a post notification to DSP */ ++ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_SETPOINT_POSTNOTIFY); ++ DBG_Trace(DBG_LEVEL7, ++ "PostScale_DSP: Wrote to shared memory Sent post" ++ " notification to DSP\n"); ++ return DSP_SOK; ++ } else { ++ DBG_Trace(DBG_LEVEL7, "PostScale_DSP: Failed - DSP BRD state " ++ "in wrong state"); ++ return DSP_EFAIL; ++ } ++#endif /* #ifdef CONFIG_BRIDGE_DVFS */ ++ return DSP_SOK; ++} ++ ++/* ++ * ========DSP_PeripheralClocks_Disable======== ++ * Disables all the peripheral clocks that were requested by DSP ++ */ ++DSP_STATUS DSP_PeripheralClocks_Disable(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs) ++{ ++ ++ u32 clkIdx; ++ DSP_STATUS status = DSP_SOK; ++ ++ for (clkIdx = 0; clkIdx < MBX_PM_MAX_RESOURCES; clkIdx++) { ++ if (((pDevContext->uDspPerClks) >> clkIdx) & 0x01) { ++ /* Disables the interface clock of the peripheral */ ++ status = CLK_Disable(BPWR_Clks[clkIdx].intClk); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, ++ "Failed to Enable the DSP Peripheral" ++ "Clk 0x%x \n", BPWR_Clks[clkIdx]); ++ } ++ /* Disables the functional clock of the periphearl */ ++ status = CLK_Disable(BPWR_Clks[clkIdx].funClk); ++ if (DSP_FAILED(status)) { ++ DBG_Trace(DBG_LEVEL7, ++ "Failed to Enable the DSP Peripheral" ++ "Clk 0x%x \n", BPWR_Clks[clkIdx]); ++ } ++ } ++ } ++ return status; ++} ++ ++/* ++ * ========DSP_PeripheralClocks_Enable======== ++ * Enables all the peripheral clocks that were requested by DSP ++ */ ++DSP_STATUS DSP_PeripheralClocks_Enable(struct WMD_DEV_CONTEXT *pDevContext, ++ IN void *pArgs) ++{ ++ u32 clkIdx; ++ DSP_STATUS int_clk_status = DSP_EFAIL, fun_clk_status = DSP_EFAIL; ++ ++ for (clkIdx = 0; clkIdx < MBX_PM_MAX_RESOURCES; clkIdx++) { ++ if (((pDevContext->uDspPerClks) >> clkIdx) & 0x01) { ++ /* Enable the interface clock of the peripheral */ ++ int_clk_status = CLK_Enable(BPWR_Clks[clkIdx].intClk); ++ /* Enable the functional clock of the periphearl */ ++ fun_clk_status = CLK_Enable(BPWR_Clks[clkIdx].funClk); ++ } ++ } ++ if ((int_clk_status | fun_clk_status) != DSP_SOK) ++ return DSP_EFAIL; ++ return DSP_SOK; ++} ++ ++void DSPClkWakeupEventCtrl(u32 ClkId, bool enable) ++{ ++ struct CFG_HOSTRES resources; ++ DSP_STATUS status = DSP_SOK; ++ u32 iva2_grpsel; ++ u32 mpu_grpsel; ++ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); ++ if (DSP_FAILED(status)) ++ return; ++ ++ switch (ClkId) { ++ case BPWR_GPTimer5: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_GPT5; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_GPT5; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_GPTimer6: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_GPT6; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_GPT6; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_GPTimer7: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_GPT7; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_GPT7; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_GPTimer8: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_GPT8; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_GPT8; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_MCBSP1: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwCorePmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwCorePmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_MCBSP2: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_MCBSP3: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_MCBSP4: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwPerPmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ case BPWR_MCBSP5: ++ iva2_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwCorePmBase) + 0xA8)); ++ mpu_grpsel = (u32) *((REG_UWORD32 *) ++ ((u32) (resources.dwCorePmBase) + 0xA4)); ++ if (enable) { ++ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5; ++ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5; ++ } else { ++ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5; ++ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5; ++ } ++ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA8)) ++ = iva2_grpsel; ++ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA4)) ++ = mpu_grpsel; ++ break; ++ } ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/ue_deh.c kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/ue_deh.c +--- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/ue_deh.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/bridge/wmd/ue_deh.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,371 @@ ++/* ++ * ue_deh.c ++ * ++ * DSP-BIOS Bridge driver support functions for TI OMAP processors. ++ * ++ * Copyright (C) 2005-2006 Texas Instruments, Inc. ++ * ++ * This package is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED ++ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. ++ */ ++ ++ ++/* ++ * ======== ue_deh.c ======== ++ * Description: ++ * Implements upper edge DSP exception handling (DEH) functions. ++ * ++ *! Revision History: ++ *! ================ ++ *! 03-Jan-2005 hn: Support for IVA DEH. ++ *! 05-Jan-2004 vp: Updated for the 24xx HW library. ++ *! 19-Feb-2003 vp: Code review updates. ++ *! - Cosmetic changes. ++ *! 18-Oct-2002 sb: Ported to Linux platform. ++ *! 10-Dec-2001 kc: Updated DSP error reporting in DEBUG mode. ++ *! 10-Sep-2001 kc: created. ++ */ ++ ++/* ----------------------------------- Host OS */ ++#include ++ ++/* ----------------------------------- DSP/BIOS Bridge */ ++#include ++#include ++#include ++ ++/* ----------------------------------- Trace & Debug */ ++#include ++#include ++ ++/* ----------------------------------- OS Adaptation Layer */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ----------------------------------- Link Driver */ ++#include ++ ++/* ----------------------------------- Platform Manager */ ++#include ++#include ++ ++/* ------------------------------------ Hardware Abstraction Layer */ ++#include ++#include ++ ++/* ----------------------------------- This */ ++#include "mmu_fault.h" ++#include "_tiomap.h" ++#include "_deh.h" ++#include "_tiomap_mmu.h" ++#include "_tiomap_pwr.h" ++#include ++ ++static struct HW_MMUMapAttrs_t mapAttrs = { HW_LITTLE_ENDIAN, ++ HW_ELEM_SIZE_16BIT, ++ HW_MMU_CPUES} ; ++#define VirtToPhys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) ++ ++static u32 dummyVaAddr; ++/* ++ * ======== WMD_DEH_Create ======== ++ * Creates DEH manager object. ++ */ ++DSP_STATUS WMD_DEH_Create(OUT struct DEH_MGR **phDehMgr, ++ struct DEV_OBJECT *hDevObject) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEH_MGR *pDehMgr = NULL; ++ struct CFG_HOSTRES cfgHostRes; ++ struct CFG_DEVNODE *hDevNode; ++ struct WMD_DEV_CONTEXT *hWmdContext = NULL; ++ ++ DBG_Trace(DBG_LEVEL1, "Entering DEH_Create: 0x%x\n", phDehMgr); ++ /* Message manager will be created when a file is loaded, since ++ * size of message buffer in shared memory is configurable in ++ * the base image. */ ++ /* Get WMD context info. */ ++ DEV_GetWMDContext(hDevObject, &hWmdContext); ++ DBC_Assert(hWmdContext); ++ dummyVaAddr = 0; ++ /* Allocate IO manager object: */ ++ MEM_AllocObject(pDehMgr, struct DEH_MGR, SIGNATURE); ++ if (pDehMgr == NULL) { ++ status = DSP_EMEMORY; ++ } else { ++ /* Create an NTFY object to manage notifications */ ++ if (DSP_SUCCEEDED(status)) ++ status = NTFY_Create(&pDehMgr->hNtfy); ++ ++ /* Create a DPC object. */ ++ status = DPC_Create(&pDehMgr->hMmuFaultDpc, MMU_FaultDpc, ++ (void *)pDehMgr); ++ if (DSP_SUCCEEDED(status)) ++ status = DEV_GetDevNode(hDevObject, &hDevNode); ++ ++ if (DSP_SUCCEEDED(status)) ++ status = CFG_GetHostResources(hDevNode, &cfgHostRes); ++ ++ if (DSP_SUCCEEDED(status)) { ++ /* Fill in context structure */ ++ pDehMgr->hWmdContext = hWmdContext; ++ pDehMgr->errInfo.dwErrMask = 0L; ++ pDehMgr->errInfo.dwVal1 = 0L; ++ pDehMgr->errInfo.dwVal2 = 0L; ++ pDehMgr->errInfo.dwVal3 = 0L; ++ /* Install ISR function for DSP MMU fault */ ++ if ((request_irq(INT_DSP_MMU_IRQ, MMU_FaultIsr, 0, ++ "DspBridge\tiommu fault", (void *)pDehMgr)) == 0) ++ status = DSP_SOK; ++ else ++ status = DSP_EFAIL; ++ } ++ } ++ if (DSP_FAILED(status)) { ++ /* If create failed, cleanup */ ++ WMD_DEH_Destroy((struct DEH_MGR *)pDehMgr); ++ *phDehMgr = NULL; ++ } else { ++ *phDehMgr = (struct DEH_MGR *)pDehMgr; ++ DBG_Trace(DBG_LEVEL1, "ISR_IRQ Object 0x%x \n", ++ pDehMgr); ++ } ++ DBG_Trace(DBG_LEVEL1, "Exiting DEH_Create.\n"); ++ return status; ++} ++ ++/* ++ * ======== WMD_DEH_Destroy ======== ++ * Destroys DEH manager object. ++ */ ++DSP_STATUS WMD_DEH_Destroy(struct DEH_MGR *hDehMgr) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; ++ ++ DBG_Trace(DBG_LEVEL1, "Entering DEH_Destroy: 0x%x\n", pDehMgr); ++ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { ++ /* Release dummy VA buffer */ ++ WMD_DEH_ReleaseDummyMem(); ++ /* If notification object exists, delete it */ ++ if (pDehMgr->hNtfy) ++ (void)NTFY_Delete(pDehMgr->hNtfy); ++ /* Disable DSP MMU fault */ ++ free_irq(INT_DSP_MMU_IRQ, pDehMgr); ++ (void)DPC_Destroy(pDehMgr->hMmuFaultDpc); ++ /* Deallocate the DEH manager object */ ++ MEM_FreeObject(pDehMgr); ++ } ++ DBG_Trace(DBG_LEVEL1, "Exiting DEH_Destroy.\n"); ++ return status; ++} ++ ++/* ++ * ======== WMD_DEH_RegisterNotify ======== ++ * Registers for DEH notifications. ++ */ ++DSP_STATUS WMD_DEH_RegisterNotify(struct DEH_MGR *hDehMgr, u32 uEventMask, ++ u32 uNotifyType, ++ struct DSP_NOTIFICATION *hNotification) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; ++ ++ DBG_Trace(DBG_LEVEL1, "Entering WMD_DEH_RegisterNotify: 0x%x\n", ++ pDehMgr); ++ ++ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { ++ status = NTFY_Register(pDehMgr->hNtfy, hNotification, ++ uEventMask, uNotifyType); ++ } ++ DBG_Trace(DBG_LEVEL1, "Exiting WMD_DEH_RegisterNotify.\n"); ++ return status; ++} ++ ++ ++/* ++ * ======== WMD_DEH_Notify ======== ++ * DEH error notification function. Informs user about the error. ++ */ ++void WMD_DEH_Notify(struct DEH_MGR *hDehMgr, u32 ulEventMask, ++ u32 dwErrInfo) ++{ ++ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; ++ struct WMD_DEV_CONTEXT *pDevContext; ++ DSP_STATUS status = DSP_SOK; ++ DSP_STATUS status1 = DSP_EFAIL; ++ u32 memPhysical = 0; ++ u32 HW_MMU_MAX_TLB_COUNT = 31; ++ extern u32 faultAddr; ++ struct CFG_HOSTRES resources; ++ HW_STATUS hwStatus; ++ ++ status = CFG_GetHostResources( ++ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), ++ &resources); ++ if (DSP_FAILED(status)) ++ DBG_Trace(DBG_LEVEL7, ++ "**Failed to get Host Resources in MMU ISR **\n"); ++ ++ DBG_Trace(DBG_LEVEL1, "Entering WMD_DEH_Notify: 0x%x, 0x%x\n", pDehMgr, ++ ulEventMask); ++ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { ++ printk(KERN_INFO "WMD_DEH_Notify: ********** DEVICE EXCEPTION " ++ "**********\n"); ++ pDevContext = (struct WMD_DEV_CONTEXT *)pDehMgr->hWmdContext; ++ ++ switch (ulEventMask) { ++ case DSP_SYSERROR: ++ /* reset errInfo structure before use */ ++ pDehMgr->errInfo.dwErrMask = DSP_SYSERROR; ++ pDehMgr->errInfo.dwVal1 = 0L; ++ pDehMgr->errInfo.dwVal2 = 0L; ++ pDehMgr->errInfo.dwVal3 = 0L; ++ pDehMgr->errInfo.dwVal1 = dwErrInfo; ++ printk(KERN_ERR "WMD_DEH_Notify: DSP_SYSERROR, errInfo " ++ "= 0x%x\n", dwErrInfo); ++ break; ++ case DSP_MMUFAULT: ++ /* MMU fault routine should have set err info ++ * structure */ ++ pDehMgr->errInfo.dwErrMask = DSP_MMUFAULT; ++ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT," ++ "errInfo = 0x%x\n", dwErrInfo); ++ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, High " ++ "Address = 0x%x\n", ++ (unsigned int)pDehMgr->errInfo.dwVal1); ++ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, Low " ++ "Address = 0x%x\n", ++ (unsigned int)pDehMgr->errInfo.dwVal2); ++ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, fault " ++ "address = 0x%x\n", (unsigned int)faultAddr); ++ dummyVaAddr = (u32)MEM_Calloc(sizeof(char) * 0x1000, ++ MEM_PAGED); ++ memPhysical = VirtToPhys(PG_ALIGN_LOW((u32)dummyVaAddr, ++ PG_SIZE_4K)); ++DBG_Trace(DBG_LEVEL6, "WMD_DEH_Notify: DSP_MMUFAULT, " ++ "mem Physical= 0x%x\n", memPhysical); ++ pDevContext = (struct WMD_DEV_CONTEXT *) ++ pDehMgr->hWmdContext; ++ /* Reset the dynamic mmu index to fixed count if it ++ * exceeds 31. So that the dynmmuindex is always ++ * between the range of standard/fixed entries ++ * and 31. */ ++ if (pDevContext->numTLBEntries > ++ HW_MMU_MAX_TLB_COUNT) { ++ pDevContext->numTLBEntries = pDevContext-> ++ fixedTLBEntries; ++ } ++ DBG_Trace(DBG_LEVEL6, "Adding TLB Entry %d: VA: 0x%x, " ++ "PA: 0x%x\n", pDevContext-> ++ numTLBEntries, faultAddr, memPhysical); ++ if (DSP_SUCCEEDED(status)) { ++ hwStatus = HW_MMU_TLBAdd(resources.dwDmmuBase, ++ memPhysical, faultAddr, ++ HW_PAGE_SIZE_4KB, 1, &mapAttrs, ++ HW_SET, HW_SET); ++ } ++ /* send an interrupt to DSP */ ++ HW_MBOX_MsgWrite(resources.dwMboxBase, MBOX_ARM2DSP, ++ MBX_DEH_CLASS | MBX_DEH_EMMU); ++ /* Clear MMU interrupt */ ++ HW_MMU_EventAck(resources.dwDmmuBase, ++ HW_MMU_TRANSLATION_FAULT); ++ break; ++ case DSP_PWRERROR: ++ /* reset errInfo structure before use */ ++ pDehMgr->errInfo.dwErrMask = DSP_PWRERROR; ++ pDehMgr->errInfo.dwVal1 = 0L; ++ pDehMgr->errInfo.dwVal2 = 0L; ++ pDehMgr->errInfo.dwVal3 = 0L; ++ pDehMgr->errInfo.dwVal1 = dwErrInfo; ++ printk(KERN_ERR "WMD_DEH_Notify: DSP_PWRERROR, errInfo " ++ "= 0x%x\n", dwErrInfo); ++ break; ++ default: ++ DBG_Trace(DBG_LEVEL6, ++ "WMD_DEH_Notify: Unknown Error, errInfo = " ++ "0x%x\n", dwErrInfo); ++ break; ++ } ++ ++ /* Filter subsequent notifications when an error occurs */ ++ if (pDevContext->dwBrdState != BRD_ERROR) { ++ /* Use it as a flag to send notifications the ++ * first time and error occurred, next time ++ * state will be BRD_ERROR */ ++ status1 = DSP_EFAIL; ++ } ++ ++ /* Filter subsequent notifications when an error occurs */ ++ if (pDevContext->dwBrdState != BRD_ERROR) ++ status1 = DSP_SOK; ++ ++ /* Set the Board state as ERROR */ ++ pDevContext->dwBrdState = BRD_ERROR; ++ /* Disable all the clocks that were enabled by DSP */ ++ (void)DSP_PeripheralClocks_Disable(pDevContext, NULL); ++ /* Call DSP Trace Buffer */ ++ PrintDspTraceBuffer(hDehMgr->hWmdContext); ++ ++ if (DSP_SUCCEEDED(status1)) { ++ /* Signal DSP error/exception event. */ ++ NTFY_Notify(pDehMgr->hNtfy, ulEventMask); ++ } ++ ++ } ++ DBG_Trace(DBG_LEVEL1, "Exiting WMD_DEH_Notify\n"); ++ ++} ++ ++/* ++ * ======== WMD_DEH_GetInfo ======== ++ * Retrieves error information. ++ */ ++DSP_STATUS WMD_DEH_GetInfo(struct DEH_MGR *hDehMgr, ++ struct DSP_ERRORINFO *pErrInfo) ++{ ++ DSP_STATUS status = DSP_SOK; ++ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; ++ ++ DBC_Require(pDehMgr); ++ DBC_Require(pErrInfo); ++ ++ DBG_Trace(DBG_LEVEL1, "Entering WMD_DEH_GetInfo: 0x%x\n", hDehMgr); ++ ++ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { ++ /* Copy DEH error info structure to PROC error info ++ * structure. */ ++ pErrInfo->dwErrMask = pDehMgr->errInfo.dwErrMask; ++ pErrInfo->dwVal1 = pDehMgr->errInfo.dwVal1; ++ pErrInfo->dwVal2 = pDehMgr->errInfo.dwVal2; ++ pErrInfo->dwVal3 = pDehMgr->errInfo.dwVal3; ++ } ++ ++ DBG_Trace(DBG_LEVEL1, "Exiting WMD_DEH_GetInfo\n"); ++ ++ return status; ++} ++ ++ ++/* ++ * ======== WMD_DEH_ReleaseDummyMem ======== ++ * Releases memory allocated for dummy page ++ */ ++void WMD_DEH_ReleaseDummyMem(void) ++{ ++ if (dummyVaAddr) { ++ MEM_Free((void *)dummyVaAddr); ++ dummyVaAddr = 0; ++ } ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/dspgateway/dsp_mem.c kernel-2.6.28-20093908+0m5/drivers/dsp/dspgateway/dsp_mem.c +--- linux-omap-2.6.28-omap1/drivers/dsp/dspgateway/dsp_mem.c 2011-09-04 11:32:10.023211266 +0200 ++++ kernel-2.6.28-20093908+0m5/drivers/dsp/dspgateway/dsp_mem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -32,13 +32,13 @@ + #include + #include + #include ++#include + #include + #include + #include + #include + #include + #include +-#include + #include + #include + #include +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpio/gpiolib.c kernel-2.6.28-20093908+0m5/drivers/gpio/gpiolib.c +--- linux-omap-2.6.28-omap1/drivers/gpio/gpiolib.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpio/gpiolib.c 2011-09-04 11:31:05.000000000 +0200 +@@ -789,6 +789,7 @@ int gpio_request(unsigned gpio, const ch + } else { + status = -EBUSY; + module_put(chip->owner); ++ goto done; + } + + if (chip->request) { +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ati_pcigart.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ati_pcigart.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ati_pcigart.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ati_pcigart.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,199 @@ ++/** ++ * \file ati_pcigart.c ++ * ATI PCI GART support ++ * ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com ++ * ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ ++# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) ++ ++#define ATI_PCIE_WRITE 0x4 ++#define ATI_PCIE_READ 0x8 ++ ++static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, u32 *pci_gart) ++{ ++ u32 page_base; ++ ++ page_base = (u32)addr & ATI_PCIGART_PAGE_MASK; ++ switch(gart_info->gart_reg_if) { ++ case DRM_ATI_GART_IGP: ++ page_base |= (upper_32_bits(addr) & 0xff) << 4; ++ page_base |= 0xc; ++ break; ++ case DRM_ATI_GART_PCIE: ++ page_base >>= 8; ++ page_base |= (upper_32_bits(addr) & 0xff) << 24; ++ page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE; ++ break; ++ default: ++ case DRM_ATI_GART_PCI: ++ break; ++ } ++ *pci_gart = cpu_to_le32(page_base); ++} ++ ++static int drm_ati_alloc_pcigart_table(struct drm_device *dev, ++ struct drm_ati_pcigart_info *gart_info) ++{ ++ gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, ++ PAGE_SIZE, ++ gart_info->table_mask); ++ if (gart_info->table_handle == NULL) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void drm_ati_free_pcigart_table(struct drm_device *dev, ++ struct drm_ati_pcigart_info *gart_info) ++{ ++ drm_pci_free(dev, gart_info->table_handle); ++ gart_info->table_handle = NULL; ++} ++ ++int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) ++{ ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long pages; ++ int i; ++ int max_pages; ++ ++ /* we need to support large memory configurations */ ++ if (!entry) { ++ DRM_ERROR("no scatter/gather memory!\n"); ++ return 0; ++ } ++ ++ if (gart_info->bus_addr) { ++ ++ max_pages = (gart_info->table_size / sizeof(u32)); ++ pages = (entry->pages <= max_pages) ++ ? entry->pages : max_pages; ++ ++ for (i = 0; i < pages; i++) { ++ if (!entry->busaddr[i]) ++ break; ++ pci_unmap_page(dev->pdev, entry->busaddr[i], ++ PAGE_SIZE, PCI_DMA_TODEVICE); ++ } ++ ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) ++ gart_info->bus_addr = 0; ++ } ++ ++ ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN ++ && gart_info->table_handle) { ++ ++ drm_ati_free_pcigart_table(dev, gart_info); ++ } ++ ++ return 1; ++} ++EXPORT_SYMBOL(drm_ati_pcigart_cleanup); ++ ++int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) ++{ ++ struct drm_sg_mem *entry = dev->sg; ++ void *address = NULL; ++ unsigned long pages; ++ u32 *pci_gart; ++ dma_addr_t bus_address = 0; ++ int i, j, ret = 0; ++ int max_pages; ++ dma_addr_t entry_addr; ++ ++ if (!entry) { ++ DRM_ERROR("no scatter/gather memory!\n"); ++ goto done; ++ } ++ ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { ++ DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); ++ ++ ret = drm_ati_alloc_pcigart_table(dev, gart_info); ++ if (ret) { ++ DRM_ERROR("cannot allocate PCI GART page!\n"); ++ goto done; ++ } ++ ++ address = gart_info->table_handle->vaddr; ++ bus_address = gart_info->table_handle->busaddr; ++ } else { ++ address = gart_info->addr; ++ bus_address = gart_info->bus_addr; ++ DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", ++ bus_address, (unsigned long)address); ++ } ++ ++ pci_gart = (u32 *) address; ++ ++ max_pages = (gart_info->table_size / sizeof(u32)); ++ pages = (entry->pages <= max_pages) ++ ? entry->pages : max_pages; ++ ++ memset(pci_gart, 0, max_pages * sizeof(u32)); ++ ++ for (i = 0; i < pages; i++) { ++ /* we need to support large memory configurations */ ++ entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], ++ 0, PAGE_SIZE, PCI_DMA_TODEVICE); ++ if (entry->busaddr[i] == 0) { ++ DRM_ERROR("unable to map PCIGART pages!\n"); ++ drm_ati_pcigart_cleanup(dev, gart_info); ++ address = NULL; ++ bus_address = 0; ++ goto done; ++ } ++ ++ entry_addr = entry->busaddr[i]; ++ for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { ++ gart_insert_page_into_table(gart_info, entry_addr, pci_gart); ++ pci_gart++; ++ entry_addr += ATI_PCIGART_PAGE_SIZE; ++ } ++ } ++ ++ ret = 1; ++ ++#if defined(__i386__) || defined(__x86_64__) ++ wbinvd(); ++#else ++ mb(); ++#endif ++ ++ done: ++ gart_info->addr = address; ++ gart_info->bus_addr = bus_address; ++ return ret; ++} ++EXPORT_SYMBOL(drm_ati_pcigart_init); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_agpsupport.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_agpsupport.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_agpsupport.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_agpsupport.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,715 @@ ++/** ++ * \file drm_agpsupport.c ++ * DRM support for AGP/GART backend ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include ++ ++#if __OS_HAS_AGP ++ ++/** ++ * Get AGP information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a (output) drm_agp_info structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device has been initialized and acquired and fills in the ++ * drm_agp_info structure with the information in drm_agp_head::agp_info. ++ */ ++int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) ++{ ++ DRM_AGP_KERN *kern; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ ++ kern = &dev->agp->agp_info; ++ info->agp_version_major = kern->version.major; ++ info->agp_version_minor = kern->version.minor; ++ info->mode = kern->mode; ++ info->aperture_base = kern->aper_base; ++ info->aperture_size = kern->aper_size * 1024 * 1024; ++ info->memory_allowed = kern->max_memory << PAGE_SHIFT; ++ info->memory_used = kern->current_memory << PAGE_SHIFT; ++ info->id_vendor = kern->device->vendor; ++ info->id_device = kern->device->device; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_info); ++ ++int drm_agp_info_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_info *info = data; ++ int err; ++ ++ err = drm_agp_info(dev, info); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++/** ++ * Acquire the AGP device. ++ * ++ * \param dev DRM device that is to acquire AGP. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device hasn't been acquired before and calls ++ * \c agp_backend_acquire. ++ */ ++int drm_agp_acquire(struct drm_device * dev) ++{ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ int retcode; ++#endif ++ ++ if (!dev->agp) ++ return -ENODEV; ++ if (dev->agp->acquired) ++ return -EBUSY; ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ if ((retcode = agp_backend_acquire())) ++ return retcode; ++#else ++ if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev))) ++ return -ENODEV; ++#endif ++ ++ dev->agp->acquired = 1; ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_acquire); ++ ++/** ++ * Acquire the AGP device (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device hasn't been acquired before and calls ++ * \c agp_backend_acquire. ++ */ ++int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return drm_agp_acquire((struct drm_device *) file_priv->minor->dev); ++} ++ ++/** ++ * Release the AGP device. ++ * ++ * \param dev DRM device that is to release AGP. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device has been acquired and calls \c agp_backend_release. ++ */ ++int drm_agp_release(struct drm_device *dev) ++{ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ agp_backend_release(); ++#else ++ agp_backend_release(dev->agp->bridge); ++#endif ++ dev->agp->acquired = 0; ++ return 0; ++ ++} ++EXPORT_SYMBOL(drm_agp_release); ++ ++int drm_agp_release_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return drm_agp_release(dev); ++} ++ ++/** ++ * Enable the AGP bus. ++ * ++ * \param dev DRM device that has previously acquired AGP. ++ * \param mode Requested AGP mode. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device has been acquired but not enabled, and calls ++ * \c agp_enable. ++ */ ++int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) ++{ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ ++ dev->agp->mode = mode.mode; ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ agp_enable(mode.mode); ++#else ++ agp_enable(dev->agp->bridge, mode.mode); ++#endif ++ dev->agp->enabled = 1; ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_enable); ++ ++int drm_agp_enable_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_mode *mode = data; ++ ++ return drm_agp_enable(dev, *mode); ++} ++ ++/** ++ * Allocate AGP memory. ++ * ++ * \param inode device inode. ++ * \param file_priv file private pointer. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_buffer structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and has been acquired, allocates the ++ * memory via alloc_agp() and creates a drm_agp_mem entry for it. ++ */ ++int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) ++{ ++ struct drm_agp_mem *entry; ++ DRM_AGP_MEM *memory; ++ unsigned long pages; ++ u32 type; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) ++ return -ENOMEM; ++ ++ memset(entry, 0, sizeof(*entry)); ++ ++ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; ++ type = (u32) request->type; ++ if (!(memory = drm_alloc_agp(dev, pages, type))) { ++ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); ++ return -ENOMEM; ++ } ++ ++ entry->handle = (unsigned long)memory->key + 1; ++ entry->memory = memory; ++ entry->bound = 0; ++ entry->pages = pages; ++ list_add(&entry->head, &dev->agp->memory); ++ ++ request->handle = entry->handle; ++ request->physical = memory->physical; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_alloc); ++ ++ ++int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_buffer *request = data; ++ ++ return drm_agp_alloc(dev, request); ++} ++ ++/** ++ * Search for the AGP memory entry associated with a handle. ++ * ++ * \param dev DRM device structure. ++ * \param handle AGP memory handle. ++ * \return pointer to the drm_agp_mem structure associated with \p handle. ++ * ++ * Walks through drm_agp_head::memory until finding a matching handle. ++ */ ++static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, ++ unsigned long handle) ++{ ++ struct drm_agp_mem *entry; ++ ++ list_for_each_entry(entry, &dev->agp->memory, head) { ++ if (entry->handle == handle) ++ return entry; ++ } ++ return NULL; ++} ++ ++/** ++ * Unbind AGP memory from the GATT (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_binding structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and acquired, looks-up the AGP memory ++ * entry and passes it to the unbind_agp() function. ++ */ ++int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) ++{ ++ struct drm_agp_mem *entry; ++ int ret; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_agp_lookup_entry(dev, request->handle))) ++ return -EINVAL; ++ if (!entry->bound) ++ return -EINVAL; ++ ret = drm_unbind_agp(entry->memory); ++ if (ret == 0) ++ entry->bound = 0; ++ return ret; ++} ++EXPORT_SYMBOL(drm_agp_unbind); ++ ++ ++int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_binding *request = data; ++ ++ return drm_agp_unbind(dev, request); ++} ++ ++ ++/** ++ * Bind AGP memory into the GATT (ioctl) ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_binding structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and has been acquired and that no memory ++ * is currently bound into the GATT. Looks-up the AGP memory entry and passes ++ * it to bind_agp() function. ++ */ ++int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) ++{ ++ struct drm_agp_mem *entry; ++ int retcode; ++ int page; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_agp_lookup_entry(dev, request->handle))) ++ return -EINVAL; ++ if (entry->bound) ++ return -EINVAL; ++ page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE; ++ if ((retcode = drm_bind_agp(entry->memory, page))) ++ return retcode; ++ entry->bound = dev->agp->base + (page << PAGE_SHIFT); ++ DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", ++ dev->agp->base, entry->bound); ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_bind); ++ ++ ++int drm_agp_bind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_binding *request = data; ++ ++ return drm_agp_bind(dev, request); ++} ++ ++ ++/** ++ * Free AGP memory (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_buffer structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and has been acquired and looks up the ++ * AGP memory entry. If the memory it's currently bound, unbind it via ++ * unbind_agp(). Frees it via free_agp() as well as the entry itself ++ * and unlinks from the doubly linked list it's inserted in. ++ */ ++int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) ++{ ++ struct drm_agp_mem *entry; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_agp_lookup_entry(dev, request->handle))) ++ return -EINVAL; ++ if (entry->bound) ++ drm_unbind_agp(entry->memory); ++ ++ list_del(&entry->head); ++ ++ drm_free_agp(entry->memory, entry->pages); ++ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_free); ++ ++ ++ ++int drm_agp_free_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_buffer *request = data; ++ ++ return drm_agp_free(dev, request); ++} ++ ++ ++/** ++ * Initialize the AGP resources. ++ * ++ * \return pointer to a drm_agp_head structure. ++ * ++ * Gets the drm_agp_t structure which is made available by the agpgart module ++ * via the inter_module_* functions. Creates and initializes a drm_agp_head ++ * structure. ++ */ ++struct drm_agp_head *drm_agp_init(struct drm_device *dev) ++{ ++ struct drm_agp_head *head = NULL; ++ ++ if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) ++ return NULL; ++ memset((void *)head, 0, sizeof(*head)); ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ agp_copy_info(&head->agp_info); ++#else ++ head->bridge = agp_find_bridge(dev->pdev); ++ if (!head->bridge) { ++ if (!(head->bridge = agp_backend_acquire(dev->pdev))) { ++ drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); ++ return NULL; ++ } ++ agp_copy_info(head->bridge, &head->agp_info); ++ agp_backend_release(head->bridge); ++ } else { ++ agp_copy_info(head->bridge, &head->agp_info); ++ } ++#endif ++ if (head->agp_info.chipset == NOT_SUPPORTED) { ++ drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); ++ return NULL; ++ } ++ INIT_LIST_HEAD(&head->memory); ++ head->cant_use_aperture = head->agp_info.cant_use_aperture; ++ head->page_mask = head->agp_info.page_mask; ++ head->base = head->agp_info.aper_base; ++ return head; ++} ++ ++/** Calls agp_allocate_memory() */ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type) ++{ ++ return agp_allocate_memory(pages, type); ++} ++#else ++DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, ++ size_t pages, u32 type) ++{ ++ return agp_allocate_memory(bridge, pages, type); ++} ++#endif ++ ++/** Calls agp_free_memory() */ ++int drm_agp_free_memory(DRM_AGP_MEM * handle) ++{ ++ if (!handle) ++ return 0; ++ agp_free_memory(handle); ++ return 1; ++} ++ ++/** Calls agp_bind_memory() */ ++int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start) ++{ ++ if (!handle) ++ return -EINVAL; ++ return agp_bind_memory(handle, start); ++} ++EXPORT_SYMBOL(drm_agp_bind_memory); ++ ++/** Calls agp_unbind_memory() */ ++int drm_agp_unbind_memory(DRM_AGP_MEM * handle) ++{ ++ if (!handle) ++ return -EINVAL; ++ return agp_unbind_memory(handle); ++} ++ ++/** ++ * Binds a collection of pages into AGP memory at the given offset, returning ++ * the AGP memory structure containing them. ++ * ++ * No reference is held on the pages during this time -- it is up to the ++ * caller to handle that. ++ */ ++DRM_AGP_MEM * ++drm_agp_bind_pages(struct drm_device *dev, ++ struct page **pages, ++ unsigned long num_pages, ++ uint32_t gtt_offset) ++{ ++ DRM_AGP_MEM *mem; ++ int ret, i; ++ ++ DRM_DEBUG("drm_agp_populate_ttm\n"); ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY); ++#else ++ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, ++ AGP_USER_MEMORY); ++#endif ++ if (mem == NULL) { ++ DRM_ERROR("Failed to allocate memory for %ld pages\n", ++ num_pages); ++ return NULL; ++ } ++ ++ for (i = 0; i < num_pages; i++) ++ mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); ++ mem->page_count = num_pages; ++ ++ mem->is_flushed = true; ++ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); ++ if (ret != 0) { ++ DRM_ERROR("Failed to bind AGP memory: %d\n", ret); ++ agp_free_memory(mem); ++ return NULL; ++ } ++ ++ return mem; ++} ++EXPORT_SYMBOL(drm_agp_bind_pages); ++ ++/* ++ * AGP ttm backend interface. ++ */ ++ ++#ifndef AGP_USER_TYPES ++#define AGP_USER_TYPES (1 << 16) ++#define AGP_USER_MEMORY (AGP_USER_TYPES) ++#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) ++#endif ++#define AGP_REQUIRED_MAJOR 0 ++#define AGP_REQUIRED_MINOR 102 ++ ++static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) ++{ ++ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); ++} ++ ++ ++static int drm_agp_populate(struct drm_ttm_backend *backend, ++ unsigned long num_pages, struct page **pages, ++ struct page *dummy_read_page) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ struct page **cur_page, **last_page = pages + num_pages; ++ DRM_AGP_MEM *mem; ++ int dummy_page_count = 0; ++ ++ if (drm_alloc_memctl(num_pages * sizeof(void *))) ++ return -1; ++ ++ DRM_DEBUG("drm_agp_populate_ttm\n"); ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY); ++#else ++ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); ++#endif ++ if (!mem) { ++ drm_free_memctl(num_pages * sizeof(void *)); ++ return -1; ++ } ++ ++ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count); ++ mem->page_count = 0; ++ for (cur_page = pages; cur_page < last_page; ++cur_page) { ++ struct page *page = *cur_page; ++ if (!page) { ++ page = dummy_read_page; ++ ++dummy_page_count; ++ } ++ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page)); ++ } ++ if (dummy_page_count) ++ DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count); ++ agp_be->mem = mem; ++ return 0; ++} ++ ++static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, ++ struct drm_bo_mem_reg *bo_mem) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ DRM_AGP_MEM *mem = agp_be->mem; ++ int ret; ++ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED); ++ ++ DRM_DEBUG("drm_agp_bind_ttm\n"); ++ mem->is_flushed = true; ++ mem->type = AGP_USER_MEMORY; ++ /* CACHED MAPPED implies not snooped memory */ ++ if (snooped) ++ mem->type = AGP_USER_CACHED_MEMORY; ++ ++ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); ++ if (ret) ++ DRM_ERROR("AGP Bind memory failed\n"); ++ ++ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? ++ DRM_BE_FLAG_BOUND_CACHED : 0, ++ DRM_BE_FLAG_BOUND_CACHED); ++ return ret; ++} ++ ++static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ ++ DRM_DEBUG("drm_agp_unbind_ttm\n"); ++ if (agp_be->mem->is_bound) ++ return drm_agp_unbind_memory(agp_be->mem); ++ else ++ return 0; ++} ++ ++static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ DRM_AGP_MEM *mem = agp_be->mem; ++ ++ DRM_DEBUG("drm_agp_clear_ttm\n"); ++ if (mem) { ++ unsigned long num_pages = mem->page_count; ++ backend->func->unbind(backend); ++ agp_free_memory(mem); ++ drm_free_memctl(num_pages * sizeof(void *)); ++ } ++ agp_be->mem = NULL; ++} ++ ++static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be; ++ ++ if (backend) { ++ DRM_DEBUG("drm_agp_destroy_ttm\n"); ++ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); ++ if (agp_be) { ++ if (agp_be->mem) ++ backend->func->clear(backend); ++ drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM); ++ } ++ } ++} ++ ++static struct drm_ttm_backend_func agp_ttm_backend = { ++ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, ++ .populate = drm_agp_populate, ++ .clear = drm_agp_clear_ttm, ++ .bind = drm_agp_bind_ttm, ++ .unbind = drm_agp_unbind_ttm, ++ .destroy = drm_agp_destroy_ttm, ++}; ++ ++struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) ++{ ++ ++ struct drm_agp_ttm_backend *agp_be; ++ struct agp_kern_info *info; ++ ++ if (!dev->agp) { ++ DRM_ERROR("AGP is not initialized.\n"); ++ return NULL; ++ } ++ info = &dev->agp->agp_info; ++ ++ if (info->version.major != AGP_REQUIRED_MAJOR || ++ info->version.minor < AGP_REQUIRED_MINOR) { ++ DRM_ERROR("Wrong agpgart version %d.%d\n" ++ "\tYou need at least version %d.%d.\n", ++ info->version.major, ++ info->version.minor, ++ AGP_REQUIRED_MAJOR, ++ AGP_REQUIRED_MINOR); ++ return NULL; ++ } ++ ++ ++ agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM); ++ if (!agp_be) ++ return NULL; ++ ++ agp_be->mem = NULL; ++ ++ agp_be->bridge = dev->agp->bridge; ++ agp_be->populated = false; ++ agp_be->backend.func = &agp_ttm_backend; ++ agp_be->backend.dev = dev; ++ ++ return &agp_be->backend; ++} ++EXPORT_SYMBOL(drm_agp_init_ttm); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) ++void drm_agp_chipset_flush(struct drm_device *dev) ++{ ++ agp_flush_chipset(dev->agp->bridge); ++} ++EXPORT_SYMBOL(drm_agp_chipset_flush); ++#endif ++ ++#endif /* __OS_HAS_AGP */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_auth.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_auth.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_auth.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_auth.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,189 @@ ++/** ++ * \file drm_auth.c ++ * IOCTLs for authentication ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Find the file with the given magic number. ++ * ++ * \param dev DRM device. ++ * \param magic magic number. ++ * ++ * Searches in drm_device::magiclist within all files with the same hash key ++ * the one with matching magic number, while holding the drm_device::struct_mutex ++ * lock. ++ */ ++static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) ++{ ++ struct drm_file *retval = NULL; ++ struct drm_magic_entry *pt; ++ struct drm_hash_item *hash; ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { ++ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); ++ retval = pt->priv; ++ } ++ mutex_unlock(&dev->struct_mutex); ++ return retval; ++} ++ ++/** ++ * Adds a magic number. ++ * ++ * \param dev DRM device. ++ * \param priv file private data. ++ * \param magic magic number. ++ * ++ * Creates a drm_magic_entry structure and appends to the linked list ++ * associated the magic number hash key in drm_device::magiclist, while holding ++ * the drm_device::struct_mutex lock. ++ */ ++static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, ++ drm_magic_t magic) ++{ ++ struct drm_magic_entry *entry; ++ ++ DRM_DEBUG("%d\n", magic); ++ ++ entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); ++ if (!entry) ++ return -ENOMEM; ++ memset(entry, 0, sizeof(*entry)); ++ entry->priv = priv; ++ entry->hash_item.key = (unsigned long)magic; ++ mutex_lock(&dev->struct_mutex); ++ drm_ht_insert_item(&dev->magiclist, &entry->hash_item); ++ list_add_tail(&entry->head, &dev->magicfree); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Remove a magic number. ++ * ++ * \param dev DRM device. ++ * \param magic magic number. ++ * ++ * Searches and unlinks the entry in drm_device::magiclist with the magic ++ * number hash key, while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) ++{ ++ struct drm_magic_entry *pt; ++ struct drm_hash_item *hash; ++ ++ DRM_DEBUG("%d\n", magic); ++ ++ mutex_lock(&dev->struct_mutex); ++ if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); ++ drm_ht_remove_item(&dev->magiclist, hash); ++ list_del(&pt->head); ++ mutex_unlock(&dev->struct_mutex); ++ ++ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); ++ ++ return 0; ++} ++ ++/** ++ * Get a unique magic number (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a resulting drm_auth structure. ++ * \return zero on success, or a negative number on failure. ++ * ++ * If there is a magic number in drm_file::magic then use it, otherwise ++ * searches an unique non-zero magic number and add it associating it with \p ++ * file_priv. ++ */ ++int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ static drm_magic_t sequence = 0; ++ static DEFINE_SPINLOCK(lock); ++ struct drm_auth *auth = data; ++ ++ /* Find unique magic */ ++ if (file_priv->magic) { ++ auth->magic = file_priv->magic; ++ } else { ++ do { ++ spin_lock(&lock); ++ if (!sequence) ++ ++sequence; /* reserve 0 */ ++ auth->magic = sequence++; ++ spin_unlock(&lock); ++ } while (drm_find_file(dev, auth->magic)); ++ file_priv->magic = auth->magic; ++ drm_add_magic(dev, file_priv, auth->magic); ++ } ++ ++ DRM_DEBUG("%u\n", auth->magic); ++ ++ return 0; ++} ++ ++/** ++ * Authenticate with a magic. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_auth structure. ++ * \return zero if authentication successed, or a negative number otherwise. ++ * ++ * Checks if \p file_priv is associated with the magic number passed in \arg. ++ */ ++int drm_authmagic(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_auth *auth = data; ++ struct drm_file *file; ++ ++ DRM_DEBUG("%u\n", auth->magic); ++ if ((file = drm_find_file(dev, auth->magic))) { ++ file->authenticated = 1; ++ drm_remove_magic(dev, auth->magic); ++ return 0; ++ } ++ return -EINVAL; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bo.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bo.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bo.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bo.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2796 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++/* ++ * Locking may look a bit complicated but isn't really: ++ * ++ * The buffer usage atomic_t needs to be protected by dev->struct_mutex ++ * when there is a chance that it can be zero before or after the operation. ++ * ++ * dev->struct_mutex also protects all lists and list heads, ++ * Hash tables and hash heads. ++ * ++ * bo->mutex protects the buffer object itself excluding the usage field. ++ * bo->mutex does also protect the buffer list heads, so to manipulate those, ++ * we need both the bo->mutex and the dev->struct_mutex. ++ * ++ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal ++ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex, ++ * the list traversal will, in general, need to be restarted. ++ * ++ */ ++ ++static void drm_bo_destroy_locked(struct drm_buffer_object *bo); ++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo); ++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo); ++static void drm_bo_unmap_virtual(struct drm_buffer_object *bo); ++ ++static inline uint64_t drm_bo_type_flags(unsigned type) ++{ ++ return (1ULL << (24 + type)); ++} ++ ++/* ++ * bo locked. dev->struct_mutex locked. ++ */ ++ ++void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo) ++{ ++ struct drm_mem_type_manager *man; ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ ++ man = &bo->dev->bm.man[bo->pinned_mem_type]; ++ list_add_tail(&bo->pinned_lru, &man->pinned); ++} ++ ++void drm_bo_add_to_lru(struct drm_buffer_object *bo) ++{ ++ struct drm_mem_type_manager *man; ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ ++ if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) ++ || bo->mem.mem_type != bo->pinned_mem_type) { ++ man = &bo->dev->bm.man[bo->mem.mem_type]; ++ list_add_tail(&bo->lru, &man->lru); ++ } else { ++ INIT_LIST_HEAD(&bo->lru); ++ } ++} ++ ++static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci) ++{ ++#ifdef DRM_ODD_MM_COMPAT ++ int ret; ++ ++ if (!bo->map_list.map) ++ return 0; ++ ++ ret = drm_bo_lock_kmm(bo); ++ if (ret) ++ return ret; ++ drm_bo_unmap_virtual(bo); ++ if (old_is_pci) ++ drm_bo_finish_unmap(bo); ++#else ++ if (!bo->map_list.map) ++ return 0; ++ ++ drm_bo_unmap_virtual(bo); ++#endif ++ return 0; ++} ++ ++static void drm_bo_vm_post_move(struct drm_buffer_object *bo) ++{ ++#ifdef DRM_ODD_MM_COMPAT ++ int ret; ++ ++ if (!bo->map_list.map) ++ return; ++ ++ ret = drm_bo_remap_bound(bo); ++ if (ret) { ++ DRM_ERROR("Failed to remap a bound buffer object.\n" ++ "\tThis might cause a sigbus later.\n"); ++ } ++ drm_bo_unlock_kmm(bo); ++#endif ++} ++ ++/* ++ * Call bo->mutex locked. ++ */ ++ ++static int drm_bo_add_ttm(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ int ret = 0; ++ uint32_t page_flags = 0; ++ ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ bo->ttm = NULL; ++ ++ if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE) ++ page_flags |= DRM_TTM_PAGE_WRITE; ++ ++ switch (bo->type) { ++ case drm_bo_type_device: ++ case drm_bo_type_kernel: ++ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, ++ page_flags, dev->bm.dummy_read_page); ++ if (!bo->ttm) ++ ret = -ENOMEM; ++ break; ++ case drm_bo_type_user: ++ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, ++ page_flags | DRM_TTM_PAGE_USER, ++ dev->bm.dummy_read_page); ++ if (!bo->ttm) { ++ ret = -ENOMEM; ++ break; ++ } ++ ++ ret = drm_ttm_set_user(bo->ttm, current, ++ bo->buffer_start, ++ bo->num_pages); ++ ++ break; ++ default: ++ DRM_ERROR("Illegal buffer object type\n"); ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++static int drm_bo_handle_move_mem(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, ++ int evict, int no_wait) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); ++ int new_is_pci = drm_mem_reg_is_pci(dev, mem); ++ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; ++ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; ++ int ret = 0; ++ ++ if (old_is_pci || new_is_pci || ++ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) ++ ret = drm_bo_vm_pre_move(bo, old_is_pci); ++ if (ret) ++ return ret; ++ ++ /* ++ * Create and bind a ttm if required. ++ */ ++ ++ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { ++ ret = drm_bo_add_ttm(bo); ++ if (ret) ++ goto out_err; ++ ++ if (mem->mem_type != DRM_BO_MEM_LOCAL) { ++ ret = drm_ttm_bind(bo->ttm, mem); ++ if (ret) ++ goto out_err; ++ } ++ ++ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) { ++ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ ++ *old_mem = *mem; ++ mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, mem->flags, ++ DRM_BO_MASK_MEMTYPE); ++ goto moved; ++ } ++ ++ } ++ ++ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && ++ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) ++ ret = drm_bo_move_ttm(bo, evict, no_wait, mem); ++ else if (dev->driver->bo_driver->move) ++ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); ++ else ++ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); ++ ++ if (ret) ++ goto out_err; ++ ++moved: ++ if (old_is_pci || new_is_pci) ++ drm_bo_vm_post_move(bo); ++ ++ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ++ ret = ++ dev->driver->bo_driver->invalidate_caches(dev, ++ bo->mem.flags); ++ if (ret) ++ DRM_ERROR("Can not flush read caches\n"); ++ } ++ ++ DRM_FLAG_MASKED(bo->priv_flags, ++ (evict) ? _DRM_BO_FLAG_EVICTED : 0, ++ _DRM_BO_FLAG_EVICTED); ++ ++ if (bo->mem.mm_node) ++ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + ++ bm->man[bo->mem.mem_type].gpu_offset; ++ ++ ++ return 0; ++ ++out_err: ++ if (old_is_pci || new_is_pci) ++ drm_bo_vm_post_move(bo); ++ ++ new_man = &bm->man[bo->mem.mem_type]; ++ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ ++ return ret; ++} ++ ++/* ++ * Call bo->mutex locked. ++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. ++ */ ++ ++static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced) ++{ ++ struct drm_fence_object *fence = bo->fence; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ return -EBUSY; ++ ++ if (fence) { ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++static int drm_bo_check_unfenced(struct drm_buffer_object *bo) ++{ ++ int ret; ++ ++ mutex_lock(&bo->mutex); ++ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); ++ mutex_unlock(&bo->mutex); ++ return ret; ++} ++ ++ ++/* ++ * Call bo->mutex locked. ++ * Wait until the buffer is idle. ++ */ ++ ++int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, ++ int no_wait, int check_unfenced) ++{ ++ int ret; ++ ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ while(unlikely(drm_bo_busy(bo, check_unfenced))) { ++ if (no_wait) ++ return -EBUSY; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) { ++ mutex_unlock(&bo->mutex); ++ wait_event(bo->event_queue, !drm_bo_check_unfenced(bo)); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ } ++ ++ if (bo->fence) { ++ struct drm_fence_object *fence; ++ uint32_t fence_type = bo->fence_type; ++ ++ drm_fence_reference_unlocked(&fence, bo->fence); ++ mutex_unlock(&bo->mutex); ++ ++ ret = drm_fence_object_wait(fence, lazy, !interruptible, ++ fence_type); ++ ++ drm_fence_usage_deref_unlocked(&fence); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ if (ret) ++ return ret; ++ } ++ ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_wait); ++ ++static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ if (bo->fence) { ++ if (bm->nice_mode) { ++ unsigned long _end = jiffies + 3 * DRM_HZ; ++ int ret; ++ do { ++ ret = drm_bo_wait(bo, 0, 0, 0, 0); ++ if (ret && allow_errors) ++ return ret; ++ ++ } while (ret && !time_after_eq(jiffies, _end)); ++ ++ if (bo->fence) { ++ bm->nice_mode = 0; ++ DRM_ERROR("Detected GPU lockup or " ++ "fence driver was taken down. " ++ "Evicting buffer.\n"); ++ } ++ } ++ if (bo->fence) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ } ++ return 0; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ * Attempts to remove all private references to a buffer by expiring its ++ * fence object and removing from lru lists and memory managers. ++ */ ++ ++static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ atomic_inc(&bo->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&bo->mutex); ++ ++ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ ++ if (bo->fence && drm_fence_object_signaled(bo->fence, ++ bo->fence_type)) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ ++ if (bo->fence && remove_all) ++ (void)drm_bo_expire_fence(bo, 0); ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!atomic_dec_and_test(&bo->usage)) ++ goto out; ++ ++ if (!bo->fence) { ++ list_del_init(&bo->lru); ++ if (bo->mem.mm_node) { ++ drm_mm_put_block(bo->mem.mm_node); ++ if (bo->pinned_node == bo->mem.mm_node) ++ bo->pinned_node = NULL; ++ bo->mem.mm_node = NULL; ++ } ++ list_del_init(&bo->pinned_lru); ++ if (bo->pinned_node) { ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = NULL; ++ } ++ list_del_init(&bo->ddestroy); ++ mutex_unlock(&bo->mutex); ++ drm_bo_destroy_locked(bo); ++ return; ++ } ++ ++ if (list_empty(&bo->ddestroy)) { ++ drm_fence_object_flush(bo->fence, bo->fence_type); ++ list_add_tail(&bo->ddestroy, &bm->ddestroy); ++ schedule_delayed_work(&bm->wq, ++ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); ++ } ++ ++out: ++ mutex_unlock(&bo->mutex); ++ return; ++} ++ ++/* ++ * Verify that refcount is 0 and that there are no internal references ++ * to the buffer object. Then destroy it. ++ */ ++ ++static void drm_bo_destroy_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && ++ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && ++ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { ++ if (bo->fence != NULL) { ++ DRM_ERROR("Fence was non-zero.\n"); ++ drm_bo_cleanup_refs(bo, 0); ++ return; ++ } ++ ++#ifdef DRM_ODD_MM_COMPAT ++ BUG_ON(!list_empty(&bo->vma_list)); ++ BUG_ON(!list_empty(&bo->p_mm_list)); ++#endif ++ ++ if (bo->ttm) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ ++ atomic_dec(&bm->count); ++ ++ drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); ++ ++ return; ++ } ++ ++ /* ++ * Some stuff is still trying to reference the buffer object. ++ * Get rid of those references. ++ */ ++ ++ drm_bo_cleanup_refs(bo, 0); ++ ++ return; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ */ ++ ++static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ struct drm_buffer_object *entry, *nentry; ++ struct list_head *list, *next; ++ ++ list_for_each_safe(list, next, &bm->ddestroy) { ++ entry = list_entry(list, struct drm_buffer_object, ddestroy); ++ ++ nentry = NULL; ++ if (next != &bm->ddestroy) { ++ nentry = list_entry(next, struct drm_buffer_object, ++ ddestroy); ++ atomic_inc(&nentry->usage); ++ } ++ ++ drm_bo_cleanup_refs(entry, remove_all); ++ ++ if (nentry) ++ atomic_dec(&nentry->usage); ++ } ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++static void drm_bo_delayed_workqueue(void *data) ++#else ++static void drm_bo_delayed_workqueue(struct work_struct *work) ++#endif ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct drm_device *dev = (struct drm_device *) data; ++ struct drm_buffer_manager *bm = &dev->bm; ++#else ++ struct drm_buffer_manager *bm = ++ container_of(work, struct drm_buffer_manager, wq.work); ++ struct drm_device *dev = container_of(bm, struct drm_device, bm); ++#endif ++ ++ DRM_DEBUG("Delayed delete Worker\n"); ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!bm->initialized) { ++ mutex_unlock(&dev->struct_mutex); ++ return; ++ } ++ drm_bo_delayed_delete(dev, 0); ++ if (bm->initialized && !list_empty(&bm->ddestroy)) { ++ schedule_delayed_work(&bm->wq, ++ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++void drm_bo_usage_deref_locked(struct drm_buffer_object **bo) ++{ ++ struct drm_buffer_object *tmp_bo = *bo; ++ bo = NULL; ++ ++ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); ++ ++ if (atomic_dec_and_test(&tmp_bo->usage)) ++ drm_bo_destroy_locked(tmp_bo); ++} ++EXPORT_SYMBOL(drm_bo_usage_deref_locked); ++ ++static void drm_bo_base_deref_locked(struct drm_file *file_priv, ++ struct drm_user_object *uo) ++{ ++ struct drm_buffer_object *bo = ++ drm_user_object_entry(uo, struct drm_buffer_object, base); ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ ++ drm_bo_takedown_vm_locked(bo); ++ drm_bo_usage_deref_locked(&bo); ++} ++ ++void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo) ++{ ++ struct drm_buffer_object *tmp_bo = *bo; ++ struct drm_device *dev = tmp_bo->dev; ++ ++ *bo = NULL; ++ if (atomic_dec_and_test(&tmp_bo->usage)) { ++ mutex_lock(&dev->struct_mutex); ++ if (atomic_read(&tmp_bo->usage) == 0) ++ drm_bo_destroy_locked(tmp_bo); ++ mutex_unlock(&dev->struct_mutex); ++ } ++} ++EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); ++ ++void drm_putback_buffer_objects(struct drm_device *dev) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct list_head *list = &bm->unfenced; ++ struct drm_buffer_object *entry, *next; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry_safe(entry, next, list, lru) { ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ ++ mutex_lock(&entry->mutex); ++ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); ++ mutex_lock(&dev->struct_mutex); ++ ++ list_del_init(&entry->lru); ++ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ wake_up_all(&entry->event_queue); ++ ++ /* ++ * FIXME: Might want to put back on head of list ++ * instead of tail here. ++ */ ++ ++ drm_bo_add_to_lru(entry); ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_locked(&entry); ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++EXPORT_SYMBOL(drm_putback_buffer_objects); ++ ++ ++/* ++ * Note. The caller has to register (if applicable) ++ * and deregister fence object usage. ++ */ ++ ++int drm_fence_buffer_objects(struct drm_device *dev, ++ struct list_head *list, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence, ++ struct drm_fence_object **used_fence) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *entry; ++ uint32_t fence_type = 0; ++ uint32_t fence_class = ~0; ++ int count = 0; ++ int ret = 0; ++ struct list_head *l; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!list) ++ list = &bm->unfenced; ++ ++ if (fence) ++ fence_class = fence->fence_class; ++ ++ list_for_each_entry(entry, list, lru) { ++ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); ++ fence_type |= entry->new_fence_type; ++ if (fence_class == ~0) ++ fence_class = entry->new_fence_class; ++ else if (entry->new_fence_class != fence_class) { ++ DRM_ERROR("Unmatching fence classes on unfenced list: " ++ "%d and %d.\n", ++ fence_class, ++ entry->new_fence_class); ++ ret = -EINVAL; ++ goto out; ++ } ++ count++; ++ } ++ ++ if (!count) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (fence) { ++ if ((fence_type & fence->type) != fence_type || ++ (fence->fence_class != fence_class)) { ++ DRM_ERROR("Given fence doesn't match buffers " ++ "on unfenced list.\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ } else { ++ mutex_unlock(&dev->struct_mutex); ++ ret = drm_fence_object_create(dev, fence_class, fence_type, ++ fence_flags | DRM_FENCE_FLAG_EMIT, ++ &fence); ++ mutex_lock(&dev->struct_mutex); ++ if (ret) ++ goto out; ++ } ++ ++ count = 0; ++ l = list->next; ++ while (l != list) { ++ prefetch(l->next); ++ entry = list_entry(l, struct drm_buffer_object, lru); ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&entry->mutex); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(l); ++ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ count++; ++ if (entry->fence) ++ drm_fence_usage_deref_locked(&entry->fence); ++ entry->fence = drm_fence_reference_locked(fence); ++ entry->fence_class = entry->new_fence_class; ++ entry->fence_type = entry->new_fence_type; ++ DRM_FLAG_MASKED(entry->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ wake_up_all(&entry->event_queue); ++ drm_bo_add_to_lru(entry); ++ } ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_locked(&entry); ++ l = list->next; ++ } ++ DRM_DEBUG("Fenced %d buffers\n", count); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ *used_fence = fence; ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_buffer_objects); ++ ++/* ++ * bo->mutex locked ++ */ ++ ++static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, ++ int no_wait) ++{ ++ int ret = 0; ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg evict_mem; ++ ++ /* ++ * Someone might have modified the buffer before we took the ++ * buffer mutex. ++ */ ++ ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ if (unlikely(bo->mem.flags & ++ (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) ++ goto out_unlock; ++ if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ goto out_unlock; ++ if (unlikely(bo->mem.mem_type != mem_type)) ++ goto out_unlock; ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 0); ++ if (ret) ++ goto out_unlock; ++ ++ } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ ++ evict_mem = bo->mem; ++ evict_mem.mm_node = NULL; ++ ++ evict_mem = bo->mem; ++ evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo); ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->lru); ++ mutex_unlock(&dev->struct_mutex); ++ ++ ret = drm_bo_mem_space(bo, &evict_mem, no_wait); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Failed to find memory space for " ++ "buffer 0x%p eviction.\n", bo); ++ goto out; ++ } ++ ++ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Buffer eviction failed\n"); ++ goto out; ++ } ++ ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, ++ _DRM_BO_FLAG_EVICTED); ++ ++out: ++ mutex_lock(&dev->struct_mutex); ++ if (evict_mem.mm_node) { ++ if (evict_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(evict_mem.mm_node); ++ evict_mem.mm_node = NULL; ++ } ++ drm_bo_add_to_lru(bo); ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++out_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++/** ++ * Repeatedly evict memory from the LRU for @mem_type until we create enough ++ * space, or we've evicted everything and there isn't enough space. ++ */ ++static int drm_bo_mem_force_space(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ uint32_t mem_type, int no_wait) ++{ ++ struct drm_mm_node *node; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *entry; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ struct list_head *lru; ++ unsigned long num_pages = mem->num_pages; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ do { ++ node = drm_mm_search_free(&man->manager, num_pages, ++ mem->page_alignment, 1); ++ if (node) ++ break; ++ ++ lru = &man->lru; ++ if (lru->next == lru) ++ break; ++ ++ entry = list_entry(lru->next, struct drm_buffer_object, lru); ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&entry->mutex); ++ ret = drm_bo_evict(entry, mem_type, no_wait); ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_unlocked(&entry); ++ if (ret) ++ return ret; ++ mutex_lock(&dev->struct_mutex); ++ } while (1); ++ ++ if (!node) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ node = drm_mm_get_block(node, num_pages, mem->page_alignment); ++ if (unlikely(!node)) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ mem->mm_node = node; ++ mem->mem_type = mem_type; ++ return 0; ++} ++ ++static int drm_bo_mt_compatible(struct drm_mem_type_manager *man, ++ int disallow_fixed, ++ uint32_t mem_type, ++ uint64_t mask, uint32_t *res_mask) ++{ ++ uint64_t cur_flags = drm_bo_type_flags(mem_type); ++ uint64_t flag_diff; ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed) ++ return 0; ++ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) ++ cur_flags |= DRM_BO_FLAG_CACHED; ++ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) ++ cur_flags |= DRM_BO_FLAG_MAPPABLE; ++ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) ++ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); ++ ++ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) ++ return 0; ++ ++ if (mem_type == DRM_BO_MEM_LOCAL) { ++ *res_mask = cur_flags; ++ return 1; ++ } ++ ++ flag_diff = (mask ^ cur_flags); ++ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED) ++ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED; ++ ++ if ((flag_diff & DRM_BO_FLAG_CACHED) && ++ (!(mask & DRM_BO_FLAG_CACHED) || ++ (mask & DRM_BO_FLAG_FORCE_CACHING))) ++ return 0; ++ ++ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ++ ((mask & DRM_BO_FLAG_MAPPABLE) || ++ (mask & DRM_BO_FLAG_FORCE_MAPPABLE))) ++ return 0; ++ ++ *res_mask = cur_flags; ++ return 1; ++} ++ ++/** ++ * Creates space for memory region @mem according to its type. ++ * ++ * This function first searches for free space in compatible memory types in ++ * the priority order defined by the driver. If free space isn't found, then ++ * drm_bo_mem_force_space is attempted in priority order to evict and find ++ * space. ++ */ ++int drm_bo_mem_space(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, int no_wait) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man; ++ ++ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; ++ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; ++ uint32_t i; ++ uint32_t mem_type = DRM_BO_MEM_LOCAL; ++ uint32_t cur_flags; ++ int type_found = 0; ++ int type_ok = 0; ++ int has_eagain = 0; ++ struct drm_mm_node *node = NULL; ++ int ret; ++ ++ mem->mm_node = NULL; ++ for (i = 0; i < num_prios; ++i) { ++ mem_type = prios[i]; ++ man = &bm->man[mem_type]; ++ ++ type_ok = drm_bo_mt_compatible(man, ++ bo->type == drm_bo_type_user, ++ mem_type, mem->proposed_flags, ++ &cur_flags); ++ ++ if (!type_ok) ++ continue; ++ ++ if (mem_type == DRM_BO_MEM_LOCAL) ++ break; ++ ++ if ((mem_type == bo->pinned_mem_type) && ++ (bo->pinned_node != NULL)) { ++ node = bo->pinned_node; ++ break; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (man->has_type && man->use_type) { ++ type_found = 1; ++ node = drm_mm_search_free(&man->manager, mem->num_pages, ++ mem->page_alignment, 1); ++ if (node) ++ node = drm_mm_get_block(node, mem->num_pages, ++ mem->page_alignment); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ if (node) ++ break; ++ } ++ ++ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { ++ mem->mm_node = node; ++ mem->mem_type = mem_type; ++ mem->flags = cur_flags; ++ return 0; ++ } ++ ++ if (!type_found) ++ return -EINVAL; ++ ++ num_prios = dev->driver->bo_driver->num_mem_busy_prio; ++ prios = dev->driver->bo_driver->mem_busy_prio; ++ ++ for (i = 0; i < num_prios; ++i) { ++ mem_type = prios[i]; ++ man = &bm->man[mem_type]; ++ ++ if (!man->has_type) ++ continue; ++ ++ if (!drm_bo_mt_compatible(man, ++ bo->type == drm_bo_type_user, ++ mem_type, ++ mem->proposed_flags, ++ &cur_flags)) ++ continue; ++ ++ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); ++ ++ if (ret == 0 && mem->mm_node) { ++ mem->flags = cur_flags; ++ return 0; ++ } ++ ++ if (ret == -EAGAIN) ++ has_eagain = 1; ++ } ++ ++ ret = (has_eagain) ? -EAGAIN : -ENOMEM; ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_mem_space); ++ ++/* ++ * drm_bo_propose_flags: ++ * ++ * @bo: the buffer object getting new flags ++ * ++ * @new_flags: the new set of proposed flag bits ++ * ++ * @new_mask: the mask of bits changed in new_flags ++ * ++ * Modify the proposed_flag bits in @bo ++ */ ++static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo, ++ uint64_t new_flags, uint64_t new_mask) ++{ ++ uint32_t new_access; ++ ++ /* Copy unchanging bits from existing proposed_flags */ ++ DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask); ++ ++ if (bo->type == drm_bo_type_user && ++ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) != ++ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) { ++ DRM_ERROR("User buffers require cache-coherent memory.\n"); ++ return -EINVAL; ++ } ++ ++ if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { ++ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n"); ++ return -EPERM; ++ } ++ ++ if (likely(new_mask & DRM_BO_MASK_MEM) && ++ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) && ++ !DRM_SUSER(DRM_CURPROC)) { ++ if (likely(bo->mem.flags & new_flags & new_mask & ++ DRM_BO_MASK_MEM)) ++ new_flags = (new_flags & ~DRM_BO_MASK_MEM) | ++ (bo->mem.flags & DRM_BO_MASK_MEM); ++ else { ++ DRM_ERROR("Incompatible memory type specification " ++ "for NO_EVICT buffer.\n"); ++ return -EPERM; ++ } ++ } ++ ++ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { ++ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); ++ return -EPERM; ++ } ++ ++ new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | ++ DRM_BO_FLAG_READ); ++ ++ if (new_access == 0) { ++ DRM_ERROR("Invalid buffer object rwx properties\n"); ++ return -EINVAL; ++ } ++ ++ bo->mem.proposed_flags = new_flags; ++ return 0; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ */ ++ ++struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, ++ uint32_t handle, int check_owner) ++{ ++ struct drm_user_object *uo; ++ struct drm_buffer_object *bo; ++ ++ uo = drm_lookup_user_object(file_priv, handle); ++ ++ if (!uo || (uo->type != drm_buffer_type)) { ++ DRM_ERROR("Could not find buffer object 0x%08x\n", handle); ++ return NULL; ++ } ++ ++ if (check_owner && file_priv != uo->owner) { ++ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE)) ++ return NULL; ++ } ++ ++ bo = drm_user_object_entry(uo, struct drm_buffer_object, base); ++ atomic_inc(&bo->usage); ++ return bo; ++} ++EXPORT_SYMBOL(drm_lookup_buffer_object); ++ ++/* ++ * Call bo->mutex locked. ++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. ++ * Doesn't do any fence flushing as opposed to the drm_bo_busy function. ++ */ ++ ++static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced) ++{ ++ struct drm_fence_object *fence = bo->fence; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ return -EBUSY; ++ ++ if (fence) { ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++int drm_bo_evict_cached(struct drm_buffer_object *bo) ++{ ++ int ret = 0; ++ ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); ++ if (bo->mem.mm_node) ++ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); ++ return ret; ++} ++ ++EXPORT_SYMBOL(drm_bo_evict_cached); ++/* ++ * Wait until a buffer is unmapped. ++ */ ++ ++static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait) ++{ ++ int ret = 0; ++ ++ if (likely(atomic_read(&bo->mapped)) == 0) ++ return 0; ++ ++ if (unlikely(no_wait)) ++ return -EBUSY; ++ ++ do { ++ mutex_unlock(&bo->mutex); ++ ret = wait_event_interruptible(bo->event_queue, ++ atomic_read(&bo->mapped) == 0); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ ++ if (ret == -ERESTARTSYS) ++ ret = -EAGAIN; ++ } while((ret == 0) && atomic_read(&bo->mapped) > 0); ++ ++ return ret; ++} ++ ++/* ++ * Fill in the ioctl reply argument with buffer info. ++ * Bo locked. ++ */ ++ ++void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, ++ struct drm_bo_info_rep *rep) ++{ ++ if (!rep) ++ return; ++ ++ rep->handle = bo->base.hash.key; ++ rep->flags = bo->mem.flags; ++ rep->size = bo->num_pages * PAGE_SIZE; ++ rep->offset = bo->offset; ++ ++ /* ++ * drm_bo_type_device buffers have user-visible ++ * handles which can be used to share across ++ * processes. Hand that back to the application ++ */ ++ if (bo->type == drm_bo_type_device) ++ rep->arg_handle = bo->map_list.user_token; ++ else ++ rep->arg_handle = 0; ++ ++ rep->proposed_flags = bo->mem.proposed_flags; ++ rep->buffer_start = bo->buffer_start; ++ rep->fence_flags = bo->fence_type; ++ rep->rep_flags = 0; ++ rep->page_alignment = bo->mem.page_alignment; ++ ++ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) { ++ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, ++ DRM_BO_REP_BUSY); ++ } ++} ++EXPORT_SYMBOL(drm_bo_fill_rep_arg); ++ ++/* ++ * Wait for buffer idle and register that we've mapped the buffer. ++ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, ++ * so that if the client dies, the mapping is automatically ++ * unregistered. ++ */ ++ ++static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, ++ uint32_t map_flags, unsigned hint, ++ struct drm_bo_info_rep *rep) ++{ ++ struct drm_buffer_object *bo; ++ struct drm_device *dev = file_priv->minor->dev; ++ int ret = 0; ++ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ mutex_lock(&bo->mutex); ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ if (unlikely(ret)) ++ goto out; ++ ++ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) ++ drm_bo_evict_cached(bo); ++ ++ } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED)); ++ ++ atomic_inc(&bo->mapped); ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) { ++ if (atomic_dec_and_test(&bo->mapped)) ++ wake_up_all(&bo->event_queue); ++ ++ } else ++ drm_bo_fill_rep_arg(bo, rep); ++ ++ out: ++ mutex_unlock(&bo->mutex); ++ drm_bo_usage_deref_unlocked(&bo); ++ ++ return ret; ++} ++ ++static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ struct drm_ref_object *ro; ++ int ret = 0; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ if (!bo) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); ++ if (!ro) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ drm_remove_ref_object(file_priv, ro); ++ drm_bo_usage_deref_locked(&bo); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/* ++ * Call struct-sem locked. ++ */ ++ ++static void drm_buffer_user_object_unmap(struct drm_file *file_priv, ++ struct drm_user_object *uo, ++ enum drm_ref_type action) ++{ ++ struct drm_buffer_object *bo = ++ drm_user_object_entry(uo, struct drm_buffer_object, base); ++ ++ /* ++ * We DON'T want to take the bo->lock here, because we want to ++ * hold it when we wait for unmapped buffer. ++ */ ++ ++ BUG_ON(action != _DRM_REF_TYPE1); ++ ++ if (atomic_dec_and_test(&bo->mapped)) ++ wake_up_all(&bo->event_queue); ++} ++ ++/* ++ * bo->mutex locked. ++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags. ++ */ ++ ++int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, ++ int no_wait, int move_unfenced) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = 0; ++ struct drm_bo_mem_reg mem; ++ ++ BUG_ON(bo->fence != NULL); ++ ++ mem.num_pages = bo->num_pages; ++ mem.size = mem.num_pages << PAGE_SHIFT; ++ mem.proposed_flags = new_mem_flags; ++ mem.page_alignment = bo->mem.page_alignment; ++ ++ mutex_lock(&bm->evict_mutex); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->lru); ++ mutex_unlock(&dev->struct_mutex); ++ ++ /* ++ * Determine where to move the buffer. ++ */ ++ ret = drm_bo_mem_space(bo, &mem, no_wait); ++ if (ret) ++ goto out_unlock; ++ ++ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); ++ ++out_unlock: ++ mutex_lock(&dev->struct_mutex); ++ if (ret || !move_unfenced) { ++ if (mem.mm_node) { ++ if (mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(mem.mm_node); ++ mem.mm_node = NULL; ++ } ++ drm_bo_add_to_lru(bo); ++ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ wake_up_all(&bo->event_queue); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ } else { ++ list_add_tail(&bo->lru, &bm->unfenced); ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&bm->evict_mutex); ++ return ret; ++} ++ ++static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem) ++{ ++ uint32_t flag_diff = (mem->proposed_flags ^ mem->flags); ++ ++ if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0) ++ return 0; ++ if ((flag_diff & DRM_BO_FLAG_CACHED) && ++ (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/ ++ (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING))) ++ return 0; ++ ++ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ++ ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) || ++ (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE))) ++ return 0; ++ return 1; ++} ++ ++/** ++ * drm_buffer_object_validate: ++ * ++ * @bo: the buffer object to modify ++ * ++ * @fence_class: the new fence class covering this buffer ++ * ++ * @move_unfenced: a boolean indicating whether switching the ++ * memory space of this buffer should cause the buffer to ++ * be placed on the unfenced list. ++ * ++ * @no_wait: whether this function should return -EBUSY instead ++ * of waiting. ++ * ++ * Change buffer access parameters. This can involve moving ++ * the buffer to the correct memory type, pinning the buffer ++ * or changing the class/type of fence covering this buffer ++ * ++ * Must be called with bo locked. ++ */ ++ ++static int drm_buffer_object_validate(struct drm_buffer_object *bo, ++ uint32_t fence_class, ++ int move_unfenced, int no_wait, ++ int move_buffer) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret; ++ ++ if (move_buffer) { ++ ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait, ++ move_unfenced); ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Failed moving buffer.\n"); ++ if (ret == -ENOMEM) ++ DRM_ERROR("Out of aperture space or " ++ "DRM memory quota.\n"); ++ return ret; ++ } ++ } ++ ++ /* ++ * Pinned buffers. ++ */ ++ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { ++ bo->pinned_mem_type = bo->mem.mem_type; ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->pinned_lru); ++ drm_bo_add_to_pinned_lru(bo); ++ ++ if (bo->pinned_node != bo->mem.mm_node) { ++ if (bo->pinned_node != NULL) ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = bo->mem.mm_node; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ } else if (bo->pinned_node != NULL) { ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (bo->pinned_node != bo->mem.mm_node) ++ drm_mm_put_block(bo->pinned_node); ++ ++ list_del_init(&bo->pinned_lru); ++ bo->pinned_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ ++ } ++ ++ /* ++ * We might need to add a TTM. ++ */ ++ ++ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { ++ ret = drm_bo_add_ttm(bo); ++ if (ret) ++ return ret; ++ } ++ /* ++ * Validation has succeeded, move the access and other ++ * non-mapping-related flag bits from the proposed flags to ++ * the active flags ++ */ ++ ++ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE); ++ ++ /* ++ * Finally, adjust lru to be sure. ++ */ ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del(&bo->lru); ++ if (move_unfenced) { ++ list_add_tail(&bo->lru, &bm->unfenced); ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, ++ _DRM_BO_FLAG_UNFENCED); ++ } else { ++ drm_bo_add_to_lru(bo); ++ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ wake_up_all(&bo->event_queue); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/* ++ * This function is called with bo->mutex locked, but may release it ++ * temporarily to wait for events. ++ */ ++ ++static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo, ++ uint64_t flags, ++ uint64_t mask, ++ uint32_t hint, ++ uint32_t fence_class, ++ int no_wait, ++ int *move_buffer) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ uint32_t ftype; ++ ++ int ret; ++ ++ DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n", ++ (unsigned long long) bo->mem.proposed_flags, ++ (unsigned long long) bo->mem.flags); ++ ++ ret = drm_bo_modify_proposed_flags (bo, flags, mask); ++ if (ret) ++ return ret; ++ ++ ret = drm_bo_wait_unmapped(bo, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = driver->fence_type(bo, &fence_class, &ftype); ++ ++ if (ret) { ++ DRM_ERROR("Driver did not support given buffer permissions.\n"); ++ return ret; ++ } ++ ++ /* ++ * We're switching command submission mechanism, ++ * or cannot simply rely on the hardware serializing for us. ++ * Insert a driver-dependant barrier or wait for buffer idle. ++ */ ++ ++ if ((fence_class != bo->fence_class) || ++ ((ftype ^ bo->fence_type) & bo->fence_type)) { ++ ++ ret = -EINVAL; ++ if (driver->command_stream_barrier) { ++ ret = driver->command_stream_barrier(bo, ++ fence_class, ++ ftype, ++ no_wait); ++ } ++ if (ret && ret != -EAGAIN) ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ ++ if (ret) ++ return ret; ++ } ++ ++ bo->new_fence_class = fence_class; ++ bo->new_fence_type = ftype; ++ ++ /* ++ * Check whether we need to move buffer. ++ */ ++ ++ *move_buffer = 0; ++ if (!drm_bo_mem_compat(&bo->mem)) { ++ *move_buffer = 1; ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ } ++ ++ return ret; ++} ++ ++/** ++ * drm_bo_do_validate: ++ * ++ * @bo: the buffer object ++ * ++ * @flags: access rights, mapping parameters and cacheability. See ++ * the DRM_BO_FLAG_* values in drm.h ++ * ++ * @mask: Which flag values to change; this allows callers to modify ++ * things without knowing the current state of other flags. ++ * ++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* ++ * values in drm.h. ++ * ++ * @fence_class: a driver-specific way of doing fences. Presumably, ++ * this would be used if the driver had more than one submission and ++ * fencing mechanism. At this point, there isn't any use of this ++ * from the user mode code. ++ * ++ * @rep: To be stuffed with the reply from validation ++ * ++ * 'validate' a buffer object. This changes where the buffer is ++ * located, along with changing access modes. ++ */ ++ ++int drm_bo_do_validate(struct drm_buffer_object *bo, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep) ++{ ++ int ret; ++ int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0; ++ int move_buffer; ++ ++ mutex_lock(&bo->mutex); ++ ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ ret = drm_bo_prepare_for_validate(bo, flags, mask, hint, ++ fence_class, no_wait, ++ &move_buffer); ++ if (ret) ++ goto out; ++ ++ } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED)); ++ ++ ret = drm_buffer_object_validate(bo, ++ fence_class, ++ !(hint & DRM_BO_HINT_DONT_FENCE), ++ no_wait, ++ move_buffer); ++ ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++out: ++ if (rep) ++ drm_bo_fill_rep_arg(bo, rep); ++ ++ mutex_unlock(&bo->mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_do_validate); ++ ++/** ++ * drm_bo_handle_validate ++ * ++ * @file_priv: the drm file private, used to get a handle to the user context ++ * ++ * @handle: the buffer object handle ++ * ++ * @flags: access rights, mapping parameters and cacheability. See ++ * the DRM_BO_FLAG_* values in drm.h ++ * ++ * @mask: Which flag values to change; this allows callers to modify ++ * things without knowing the current state of other flags. ++ * ++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* ++ * values in drm.h. ++ * ++ * @fence_class: a driver-specific way of doing fences. Presumably, ++ * this would be used if the driver had more than one submission and ++ * fencing mechanism. At this point, there isn't any use of this ++ * from the user mode code. ++ * ++ * @rep: To be stuffed with the reply from validation ++ * ++ * @bp_rep: To be stuffed with the buffer object pointer ++ * ++ * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead ++ * of a pointer to a buffer object. Optionally return a pointer to the buffer object. ++ * This is a convenience wrapper only. ++ */ ++ ++int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, ++ uint64_t flags, uint64_t mask, ++ uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep, ++ struct drm_buffer_object **bo_rep) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ if (bo->base.owner != file_priv) ++ mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); ++ ++ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep); ++ ++ if (!ret && bo_rep) ++ *bo_rep = bo; ++ else ++ drm_bo_usage_deref_unlocked(&bo); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_handle_validate); ++ ++ ++static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, ++ struct drm_bo_info_rep *rep) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ mutex_lock(&bo->mutex); ++ ++ /* ++ * FIXME: Quick busy here? ++ */ ++ ++ drm_bo_busy(bo, 1); ++ drm_bo_fill_rep_arg(bo, rep); ++ mutex_unlock(&bo->mutex); ++ drm_bo_usage_deref_unlocked(&bo); ++ return 0; ++} ++ ++static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, ++ uint32_t hint, ++ struct drm_bo_info_rep *rep) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ mutex_lock(&bo->mutex); ++ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1); ++ if (ret) ++ goto out; ++ ++ drm_bo_fill_rep_arg(bo, rep); ++out: ++ mutex_unlock(&bo->mutex); ++ drm_bo_usage_deref_unlocked(&bo); ++ return ret; ++} ++ ++int drm_buffer_object_create(struct drm_device *dev, ++ unsigned long size, ++ enum drm_bo_type type, ++ uint64_t flags, ++ uint32_t hint, ++ uint32_t page_alignment, ++ unsigned long buffer_start, ++ struct drm_buffer_object **buf_obj) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *bo; ++ int ret = 0; ++ unsigned long num_pages; ++ ++ size += buffer_start & ~PAGE_MASK; ++ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ if (num_pages == 0) { ++ DRM_ERROR("Illegal buffer object size.\n"); ++ return -EINVAL; ++ } ++ ++ bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); ++ ++ if (!bo) ++ return -ENOMEM; ++ ++ mutex_init(&bo->mutex); ++ mutex_lock(&bo->mutex); ++ ++ atomic_set(&bo->usage, 1); ++ atomic_set(&bo->mapped, 0); ++ DRM_INIT_WAITQUEUE(&bo->event_queue); ++ INIT_LIST_HEAD(&bo->lru); ++ INIT_LIST_HEAD(&bo->pinned_lru); ++ INIT_LIST_HEAD(&bo->ddestroy); ++#ifdef DRM_ODD_MM_COMPAT ++ INIT_LIST_HEAD(&bo->p_mm_list); ++ INIT_LIST_HEAD(&bo->vma_list); ++#endif ++ bo->dev = dev; ++ bo->type = type; ++ bo->num_pages = num_pages; ++ bo->mem.mem_type = DRM_BO_MEM_LOCAL; ++ bo->mem.num_pages = bo->num_pages; ++ bo->mem.mm_node = NULL; ++ bo->mem.page_alignment = page_alignment; ++ bo->buffer_start = buffer_start & PAGE_MASK; ++ bo->priv_flags = 0; ++ bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | ++ DRM_BO_FLAG_MAPPABLE); ++ bo->mem.proposed_flags = 0; ++ atomic_inc(&bm->count); ++ /* ++ * Use drm_bo_modify_proposed_flags to error-check the proposed flags ++ */ ++ ret = drm_bo_modify_proposed_flags (bo, flags, flags); ++ if (ret) ++ goto out_err; ++ ++ /* ++ * For drm_bo_type_device buffers, allocate ++ * address space from the device so that applications ++ * can mmap the buffer from there ++ */ ++ if (bo->type == drm_bo_type_device) { ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_bo_setup_vm_locked(bo); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) ++ goto out_err; ++ } ++ ++ mutex_unlock(&bo->mutex); ++ ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE, ++ 0, NULL); ++ if (ret) ++ goto out_err_unlocked; ++ ++ *buf_obj = bo; ++ return 0; ++ ++out_err: ++ mutex_unlock(&bo->mutex); ++out_err_unlocked: ++ drm_bo_usage_deref_unlocked(&bo); ++ return ret; ++} ++EXPORT_SYMBOL(drm_buffer_object_create); ++ ++ ++static int drm_bo_add_user_object(struct drm_file *file_priv, ++ struct drm_buffer_object *bo, int shareable) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_user_object(file_priv, &bo->base, shareable); ++ if (ret) ++ goto out; ++ ++ bo->base.remove = drm_bo_base_deref_locked; ++ bo->base.type = drm_buffer_type; ++ bo->base.ref_struct_locked = NULL; ++ bo->base.unref = drm_buffer_user_object_unmap; ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_create_arg *arg = data; ++ struct drm_bo_create_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ struct drm_buffer_object *entry; ++ enum drm_bo_type bo_type; ++ int ret = 0; ++ ++ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", ++ (int)(req->size / 1024), req->page_alignment * 4); ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ /* ++ * If the buffer creation request comes in with a starting address, ++ * that points at the desired user pages to map. Otherwise, create ++ * a drm_bo_type_device buffer, which uses pages allocated from the kernel ++ */ ++ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device; ++ ++ /* ++ * User buffers cannot be shared ++ */ ++ if (bo_type == drm_bo_type_user) ++ req->flags &= ~DRM_BO_FLAG_SHAREABLE; ++ ++ ret = drm_buffer_object_create(file_priv->minor->dev, ++ req->size, bo_type, req->flags, ++ req->hint, req->page_alignment, ++ req->buffer_start, &entry); ++ if (ret) ++ goto out; ++ ++ ret = drm_bo_add_user_object(file_priv, entry, ++ req->flags & DRM_BO_FLAG_SHAREABLE); ++ if (ret) { ++ drm_bo_usage_deref_unlocked(&entry); ++ goto out; ++ } ++ ++ mutex_lock(&entry->mutex); ++ drm_bo_fill_rep_arg(entry, rep); ++ mutex_unlock(&entry->mutex); ++ ++out: ++ return ret; ++} ++ ++int drm_bo_setstatus_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_map_wait_idle_arg *arg = data; ++ struct drm_bo_info_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ struct drm_buffer_object *bo; ++ int ret; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, req->handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ if (bo->base.owner != file_priv) ++ req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); ++ ++ ret = drm_bo_do_validate(bo, req->flags, req->mask, ++ req->hint | DRM_BO_HINT_DONT_FENCE, ++ bo->fence_class, rep); ++ ++ drm_bo_usage_deref_unlocked(&bo); ++ ++ (void) drm_bo_read_unlock(&dev->bm.bm_lock); ++ ++ return ret; ++} ++ ++int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_map_wait_idle_arg *arg = data; ++ struct drm_bo_info_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ int ret; ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_buffer_object_map(file_priv, req->handle, req->mask, ++ req->hint, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_handle_arg *arg = data; ++ int ret; ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_buffer_object_unmap(file_priv, arg->handle); ++ return ret; ++} ++ ++ ++int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_reference_info_arg *arg = data; ++ struct drm_bo_handle_arg *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ struct drm_user_object *uo; ++ int ret; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_user_object_ref(file_priv, req->handle, ++ drm_buffer_type, &uo); ++ if (ret) ++ return ret; ++ ++ ret = drm_bo_handle_info(file_priv, req->handle, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_handle_arg *arg = data; ++ int ret = 0; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type); ++ return ret; ++} ++ ++int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_reference_info_arg *arg = data; ++ struct drm_bo_handle_arg *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ int ret; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_handle_info(file_priv, req->handle, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_map_wait_idle_arg *arg = data; ++ struct drm_bo_info_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ int ret; ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_handle_wait(file_priv, req->handle, ++ req->hint, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int drm_bo_leave_list(struct drm_buffer_object *bo, ++ uint32_t mem_type, ++ int free_pinned, ++ int allow_errors) ++{ ++ struct drm_device *dev = bo->dev; ++ int ret = 0; ++ ++ mutex_lock(&bo->mutex); ++ ++ ret = drm_bo_expire_fence(bo, allow_errors); ++ if (ret) ++ goto out; ++ ++ if (free_pinned) { ++ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->pinned_lru); ++ if (bo->pinned_node == bo->mem.mm_node) ++ bo->pinned_node = NULL; ++ if (bo->pinned_node != NULL) { ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = NULL; ++ } ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { ++ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " ++ "cleanup. Removing flag and evicting.\n"); ++ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; ++ bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT; ++ } ++ ++ if (bo->mem.mem_type == mem_type) ++ ret = drm_bo_evict(bo, mem_type, 0); ++ ++ if (ret) { ++ if (allow_errors) { ++ goto out; ++ } else { ++ ret = 0; ++ DRM_ERROR("Cleanup eviction failed\n"); ++ } ++ } ++ ++out: ++ mutex_unlock(&bo->mutex); ++ return ret; ++} ++ ++ ++static struct drm_buffer_object *drm_bo_entry(struct list_head *list, ++ int pinned_list) ++{ ++ if (pinned_list) ++ return list_entry(list, struct drm_buffer_object, pinned_lru); ++ else ++ return list_entry(list, struct drm_buffer_object, lru); ++} ++ ++/* ++ * dev->struct_mutex locked. ++ */ ++ ++static int drm_bo_force_list_clean(struct drm_device *dev, ++ struct list_head *head, ++ unsigned mem_type, ++ int free_pinned, ++ int allow_errors, ++ int pinned_list) ++{ ++ struct list_head *list, *next, *prev; ++ struct drm_buffer_object *entry, *nentry; ++ int ret; ++ int do_restart; ++ ++ /* ++ * The list traversal is a bit odd here, because an item may ++ * disappear from the list when we release the struct_mutex or ++ * when we decrease the usage count. Also we're not guaranteed ++ * to drain pinned lists, so we can't always restart. ++ */ ++ ++restart: ++ nentry = NULL; ++ list_for_each_safe(list, next, head) { ++ prev = list->prev; ++ ++ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); ++ atomic_inc(&entry->usage); ++ if (nentry) { ++ atomic_dec(&nentry->usage); ++ nentry = NULL; ++ } ++ ++ /* ++ * Protect the next item from destruction, so we can check ++ * its list pointers later on. ++ */ ++ ++ if (next != head) { ++ nentry = drm_bo_entry(next, pinned_list); ++ atomic_inc(&nentry->usage); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ ret = drm_bo_leave_list(entry, mem_type, free_pinned, ++ allow_errors); ++ mutex_lock(&dev->struct_mutex); ++ ++ drm_bo_usage_deref_locked(&entry); ++ if (ret) ++ return ret; ++ ++ /* ++ * Has the next item disappeared from the list? ++ */ ++ ++ do_restart = ((next->prev != list) && (next->prev != prev)); ++ ++ if (nentry != NULL && do_restart) ++ drm_bo_usage_deref_locked(&nentry); ++ ++ if (do_restart) ++ goto restart; ++ } ++ return 0; ++} ++ ++int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ int ret = -EINVAL; ++ ++ if (mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", mem_type); ++ return ret; ++ } ++ ++ if (!man->has_type) { ++ DRM_ERROR("Trying to take down uninitialized " ++ "memory manager type %u\n", mem_type); ++ return ret; ++ } ++ ++ if ((man->kern_init_type) && (kern_clean == 0)) { ++ DRM_ERROR("Trying to take down kernel initialized " ++ "memory manager type %u\n", mem_type); ++ return -EPERM; ++ } ++ ++ man->use_type = 0; ++ man->has_type = 0; ++ ++ ret = 0; ++ if (mem_type > 0) { ++ BUG_ON(!list_empty(&bm->unfenced)); ++ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); ++ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); ++ ++ if (drm_mm_clean(&man->manager)) { ++ drm_mm_takedown(&man->manager); ++ } else { ++ ret = -EBUSY; ++ } ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_clean_mm); ++ ++/** ++ *Evict all buffers of a particular mem_type, but leave memory manager ++ *regions for NO_MOVE buffers intact. New buffers cannot be added at this ++ *point since we have the hardware lock. ++ */ ++ ++static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type) ++{ ++ int ret; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ ++ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); ++ return -EINVAL; ++ } ++ ++ if (!man->has_type) { ++ DRM_ERROR("Memory type %u has not been initialized.\n", ++ mem_type); ++ return 0; ++ } ++ ++ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); ++ if (ret) ++ return ret; ++ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); ++ ++ return ret; ++} ++ ++int drm_bo_init_mm(struct drm_device *dev, unsigned type, ++ unsigned long p_offset, unsigned long p_size, ++ int kern_init) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = -EINVAL; ++ struct drm_mem_type_manager *man; ++ ++ if (type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", type); ++ return ret; ++ } ++ ++ man = &bm->man[type]; ++ if (man->has_type) { ++ DRM_ERROR("Memory manager already initialized for type %d\n", ++ type); ++ return ret; ++ } ++ ++ ret = dev->driver->bo_driver->init_mem_type(dev, type, man); ++ if (ret) ++ return ret; ++ ++ ret = 0; ++ if (type != DRM_BO_MEM_LOCAL) { ++ if (!p_size) { ++ DRM_ERROR("Zero size memory manager type %d\n", type); ++ return ret; ++ } ++ ret = drm_mm_init(&man->manager, p_offset, p_size); ++ if (ret) ++ return ret; ++ } ++ man->has_type = 1; ++ man->use_type = 1; ++ man->kern_init_type = kern_init; ++ man->size = p_size; ++ ++ INIT_LIST_HEAD(&man->lru); ++ INIT_LIST_HEAD(&man->pinned); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_init_mm); ++ ++/* ++ * This function is intended to be called on drm driver unload. ++ * If you decide to call it from lastclose, you must protect the call ++ * from a potentially racing drm_bo_driver_init in firstopen. ++ * (This may happen on X server restart). ++ */ ++ ++int drm_bo_driver_finish(struct drm_device *dev) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = 0; ++ unsigned i = DRM_BO_MEM_TYPES; ++ struct drm_mem_type_manager *man; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!bm->initialized) ++ goto out; ++ bm->initialized = 0; ++ ++ while (i--) { ++ man = &bm->man[i]; ++ if (man->has_type) { ++ man->use_type = 0; ++ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) { ++ ret = -EBUSY; ++ DRM_ERROR("DRM memory manager type %d " ++ "is not clean.\n", i); ++ } ++ man->has_type = 0; ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!cancel_delayed_work(&bm->wq)) ++ flush_scheduled_work(); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_delayed_delete(dev, 1); ++ if (list_empty(&bm->ddestroy)) ++ DRM_DEBUG("Delayed destroy list was clean\n"); ++ ++ if (list_empty(&bm->man[0].lru)) ++ DRM_DEBUG("Swap list was clean\n"); ++ ++ if (list_empty(&bm->man[0].pinned)) ++ DRM_DEBUG("NO_MOVE list was clean\n"); ++ ++ if (list_empty(&bm->unfenced)) ++ DRM_DEBUG("Unfenced list was clean\n"); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ClearPageReserved(bm->dummy_read_page); ++#endif ++ __free_page(bm->dummy_read_page); ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/* ++ * This function is intended to be called on drm driver load. ++ * If you decide to call it from firstopen, you must protect the call ++ * from a potentially racing drm_bo_driver_finish in lastclose. ++ * (This may happen on X server restart). ++ */ ++ ++int drm_bo_driver_init(struct drm_device *dev) ++{ ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = -EINVAL; ++ ++ bm->dummy_read_page = NULL; ++ drm_bo_init_lock(&bm->bm_lock); ++ mutex_lock(&dev->struct_mutex); ++ if (!driver) ++ goto out_unlock; ++ ++ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); ++ if (!bm->dummy_read_page) { ++ ret = -ENOMEM; ++ goto out_unlock; ++ } ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ SetPageReserved(bm->dummy_read_page); ++#endif ++ ++ /* ++ * Initialize the system memory buffer type. ++ * Other types need to be driver / IOCTL initialized. ++ */ ++ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1); ++ if (ret) ++ goto out_unlock; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev); ++#else ++ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue); ++#endif ++ bm->initialized = 1; ++ bm->nice_mode = 1; ++ atomic_set(&bm->count, 0); ++ bm->cur_pages = 0; ++ INIT_LIST_HEAD(&bm->unfenced); ++ INIT_LIST_HEAD(&bm->ddestroy); ++out_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_driver_init); ++ ++int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_init_arg *arg = data; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv); ++ if (ret) ++ return ret; ++ ++ ret = -EINVAL; ++ if (arg->magic != DRM_BO_INIT_MAGIC) { ++ DRM_ERROR("You are using an old libdrm that is not compatible with\n" ++ "\tthe kernel DRM module. Please upgrade your libdrm.\n"); ++ return -EINVAL; ++ } ++ if (arg->major != DRM_BO_INIT_MAJOR) { ++ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" ++ "\tversion don't match. Got %d, expected %d.\n", ++ arg->major, DRM_BO_INIT_MAJOR); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!bm->initialized) { ++ DRM_ERROR("DRM memory manager was not initialized.\n"); ++ goto out; ++ } ++ if (arg->mem_type == 0) { ++ DRM_ERROR("System memory buffers already initialized.\n"); ++ goto out; ++ } ++ ret = drm_bo_init_mm(dev, arg->mem_type, ++ arg->p_offset, arg->p_size, 0); ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); ++ ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_type_arg *arg = data; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = -EINVAL; ++ if (!bm->initialized) { ++ DRM_ERROR("DRM memory manager was not initialized\n"); ++ goto out; ++ } ++ if (arg->mem_type == 0) { ++ DRM_ERROR("No takedown for System memory buffers.\n"); ++ goto out; ++ } ++ ret = 0; ++ if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) { ++ if (ret == -EINVAL) ++ DRM_ERROR("Memory manager type %d not clean. " ++ "Delaying takedown\n", arg->mem_type); ++ ret = 0; ++ } ++out: ++ mutex_unlock(&dev->struct_mutex); ++ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); ++ ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_type_arg *arg = data; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) { ++ DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ++ ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv); ++ if (ret) ++ return ret; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_bo_lock_mm(dev, arg->mem_type); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) { ++ (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int drm_mm_unlock_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_mm_type_arg *arg = data; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ++ ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_info_arg *arg = data; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ struct drm_mem_type_manager *man; ++ int ret = 0; ++ int mem_type = arg->mem_type; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ if (mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", arg->mem_type); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!bm->initialized) { ++ DRM_ERROR("DRM memory manager was not initialized\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ++ man = &bm->man[arg->mem_type]; ++ ++ arg->p_size = man->size; ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++/* ++ * buffer object vm functions. ++ */ ++ ++int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { ++ if (mem->mem_type == DRM_BO_MEM_LOCAL) ++ return 0; ++ ++ if (man->flags & _DRM_FLAG_MEMTYPE_CMA) ++ return 0; ++ ++ if (mem->flags & DRM_BO_FLAG_CACHED) ++ return 0; ++ } ++ return 1; ++} ++EXPORT_SYMBOL(drm_mem_reg_is_pci); ++ ++/** ++ * \c Get the PCI offset for the buffer object memory. ++ * ++ * \param bo The buffer object. ++ * \param bus_base On return the base of the PCI region ++ * \param bus_offset On return the byte offset into the PCI region ++ * \param bus_size On return the byte size of the buffer object or zero if ++ * the buffer object memory is not accessible through a PCI region. ++ * \return Failure indication. ++ * ++ * Returns -EINVAL if the buffer object is currently not mappable. ++ * Otherwise returns zero. ++ */ ++ ++int drm_bo_pci_offset(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ unsigned long *bus_base, ++ unsigned long *bus_offset, unsigned long *bus_size) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ ++ *bus_size = 0; ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) ++ return -EINVAL; ++ ++ if (drm_mem_reg_is_pci(dev, mem)) { ++ *bus_offset = mem->mm_node->start << PAGE_SHIFT; ++ *bus_size = mem->num_pages << PAGE_SHIFT; ++ *bus_base = man->io_offset; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \c Kill all user-space virtual mappings of this buffer object. ++ * ++ * \param bo The buffer object. ++ * ++ * Call bo->mutex locked. ++ */ ++ ++void drm_bo_unmap_virtual(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; ++ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; ++ ++ if (!dev->dev_mapping) ++ return; ++ ++ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); ++} ++ ++/** ++ * drm_bo_takedown_vm_locked: ++ * ++ * @bo: the buffer object to remove any drm device mapping ++ * ++ * Remove any associated vm mapping on the drm device node that ++ * would have been created for a drm_bo_type_device buffer ++ */ ++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_map_list *list; ++ drm_local_map_t *map; ++ struct drm_device *dev = bo->dev; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ if (bo->type != drm_bo_type_device) ++ return; ++ ++ list = &bo->map_list; ++ if (list->user_token) { ++ drm_ht_remove_item(&dev->map_hash, &list->hash); ++ list->user_token = 0; ++ } ++ if (list->file_offset_node) { ++ drm_mm_put_block(list->file_offset_node); ++ list->file_offset_node = NULL; ++ } ++ ++ map = list->map; ++ if (!map) ++ return; ++ ++ drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); ++ list->map = NULL; ++ list->user_token = 0ULL; ++ drm_bo_usage_deref_locked(&bo); ++} ++ ++/** ++ * drm_bo_setup_vm_locked: ++ * ++ * @bo: the buffer to allocate address space for ++ * ++ * Allocate address space in the drm device so that applications ++ * can mmap the buffer and access the contents. This only ++ * applies to drm_bo_type_device objects as others are not ++ * placed in the drm device address space. ++ */ ++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_map_list *list = &bo->map_list; ++ drm_local_map_t *map; ++ struct drm_device *dev = bo->dev; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); ++ if (!list->map) ++ return -ENOMEM; ++ ++ map = list->map; ++ map->offset = 0; ++ map->type = _DRM_TTM; ++ map->flags = _DRM_REMOVABLE; ++ map->size = bo->mem.num_pages * PAGE_SIZE; ++ atomic_inc(&bo->usage); ++ map->handle = (void *)bo; ++ ++ list->file_offset_node = drm_mm_search_free(&dev->offset_manager, ++ bo->mem.num_pages, 0, 0); ++ ++ if (unlikely(!list->file_offset_node)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->file_offset_node = drm_mm_get_block(list->file_offset_node, ++ bo->mem.num_pages, 0); ++ ++ if (unlikely(!list->file_offset_node)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->hash.key = list->file_offset_node->start; ++ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; ++ ++ return 0; ++} ++ ++int drm_bo_version_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data; ++ ++ arg->major = DRM_BO_INIT_MAJOR; ++ arg->minor = DRM_BO_INIT_MINOR; ++ arg->patchlevel = DRM_BO_INIT_PATCH; ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bo_lock.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bo_lock.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bo_lock.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bo_lock.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,189 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++/* ++ * This file implements a simple replacement for the buffer manager use ++ * of the heavyweight hardware lock. ++ * The lock is a read-write lock. Taking it in read mode is fast, and ++ * intended for in-kernel use only. ++ * Taking it in write mode is slow. ++ * ++ * The write mode is used only when there is a need to block all ++ * user-space processes from allocating a ++ * new memory area. ++ * Typical use in write mode is X server VT switching, and it's allowed ++ * to leave kernel space with the write lock held. If a user-space process ++ * dies while having the write-lock, it will be released during the file ++ * descriptor release. ++ * ++ * The read lock is typically placed at the start of an IOCTL- or ++ * user-space callable function that may end up allocating a memory area. ++ * This includes setstatus, super-ioctls and no_pfn; the latter may move ++ * unmappable regions to mappable. It's a bug to leave kernel space with the ++ * read lock held. ++ * ++ * Both read- and write lock taking may be interruptible for low signal-delivery ++ * latency. The locking functions will return -EAGAIN if interrupted by a ++ * signal. ++ * ++ * Locking order: The lock should be taken BEFORE any kernel mutexes ++ * or spinlocks. ++ */ ++ ++#include "drmP.h" ++ ++void drm_bo_init_lock(struct drm_bo_lock *lock) ++{ ++ DRM_INIT_WAITQUEUE(&lock->queue); ++ atomic_set(&lock->write_lock_pending, 0); ++ atomic_set(&lock->readers, 0); ++} ++ ++void drm_bo_read_unlock(struct drm_bo_lock *lock) ++{ ++ if (atomic_dec_and_test(&lock->readers)) ++ wake_up_all(&lock->queue); ++} ++EXPORT_SYMBOL(drm_bo_read_unlock); ++ ++int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible) ++{ ++ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { ++ int ret; ++ ++ if (!interruptible) { ++ wait_event(lock->queue, ++ atomic_read(&lock->write_lock_pending) == 0); ++ continue; ++ } ++ ret = wait_event_interruptible ++ (lock->queue, atomic_read(&lock->write_lock_pending) == 0); ++ if (ret) ++ return -EAGAIN; ++ } ++ ++ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { ++ int ret; ++ if (!interruptible) { ++ wait_event(lock->queue, ++ atomic_read(&lock->readers) != -1); ++ continue; ++ } ++ ret = wait_event_interruptible ++ (lock->queue, atomic_read(&lock->readers) != -1); ++ if (ret) ++ return -EAGAIN; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_read_lock); ++ ++static int __drm_bo_write_unlock(struct drm_bo_lock *lock) ++{ ++ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) ++ return -EINVAL; ++ wake_up_all(&lock->queue); ++ return 0; ++} ++ ++static void drm_bo_write_lock_remove(struct drm_file *file_priv, ++ struct drm_user_object *item) ++{ ++ struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base); ++ int ret; ++ ++ ret = __drm_bo_write_unlock(lock); ++ BUG_ON(ret); ++} ++ ++int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible, ++ struct drm_file *file_priv) ++{ ++ int ret = 0; ++ struct drm_device *dev; ++ ++ atomic_inc(&lock->write_lock_pending); ++ ++ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { ++ if (!interruptible) { ++ wait_event(lock->queue, ++ atomic_read(&lock->readers) == 0); ++ continue; ++ } ++ ret = wait_event_interruptible ++ (lock->queue, atomic_read(&lock->readers) == 0); ++ ++ if (ret) { ++ atomic_dec(&lock->write_lock_pending); ++ wake_up_all(&lock->queue); ++ return -EAGAIN; ++ } ++ } ++ ++ /* ++ * Add a dummy user-object, the destructor of which will ++ * make sure the lock is released if the client dies ++ * while holding it. ++ */ ++ ++ if (atomic_dec_and_test(&lock->write_lock_pending)) ++ wake_up_all(&lock->queue); ++ dev = file_priv->minor->dev; ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_user_object(file_priv, &lock->base, 0); ++ lock->base.remove = &drm_bo_write_lock_remove; ++ lock->base.type = drm_lock_type; ++ if (ret) ++ (void)__drm_bo_write_unlock(lock); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_ref_object *ro; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (lock->base.owner != file_priv) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE); ++ BUG_ON(!ro); ++ drm_remove_ref_object(file_priv, ro); ++ lock->base.owner = NULL; ++ ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bo_move.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bo_move.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bo_move.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bo_move.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,630 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Free the old memory node unless it's a pinned region and we ++ * have not been requested to free also pinned regions. ++ */ ++ ++static void drm_bo_free_old_node(struct drm_buffer_object *bo) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { ++ mutex_lock(&bo->dev->struct_mutex); ++ drm_mm_put_block(old_mem->mm_node); ++ mutex_unlock(&bo->dev->struct_mutex); ++ } ++ old_mem->mm_node = NULL; ++} ++ ++int drm_bo_move_ttm(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_ttm *ttm = bo->ttm; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ int ret; ++ ++ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) { ++ if (evict) ++ drm_ttm_evict(ttm); ++ else ++ drm_ttm_unbind(ttm); ++ ++ drm_bo_free_old_node(bo); ++ DRM_FLAG_MASKED(old_mem->flags, ++ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); ++ old_mem->mem_type = DRM_BO_MEM_LOCAL; ++ save_flags = old_mem->flags; ++ } ++ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { ++ ret = drm_ttm_bind(ttm, new_mem); ++ if (ret) ++ return ret; ++ } ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_move_ttm); ++ ++/** ++ * \c Return a kernel virtual address to the buffer object PCI memory. ++ * ++ * \param bo The buffer object. ++ * \return Failure indication. ++ * ++ * Returns -EINVAL if the buffer object is currently not mappable. ++ * Returns -ENOMEM if the ioremap operation failed. ++ * Otherwise returns zero. ++ * ++ * After a successfull call, bo->iomap contains the virtual address, or NULL ++ * if the buffer object content is not accessible through PCI space. ++ * Call bo->mutex locked. ++ */ ++ ++int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem, ++ void **virtual) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long bus_base; ++ int ret; ++ void *addr; ++ ++ *virtual = NULL; ++ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); ++ if (ret || bus_size == 0) ++ return ret; ++ ++ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) ++ addr = (void *)(((u8 *) man->io_addr) + bus_offset); ++ else { ++ addr = ioremap_nocache(bus_base + bus_offset, bus_size); ++ if (!addr) ++ return -ENOMEM; ++ } ++ *virtual = addr; ++ return 0; ++} ++EXPORT_SYMBOL(drm_mem_reg_ioremap); ++ ++/** ++ * \c Unmap mapping obtained using drm_bo_ioremap ++ * ++ * \param bo The buffer object. ++ * ++ * Call bo->mutex locked. ++ */ ++ ++void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem, ++ void *virtual) ++{ ++ struct drm_buffer_manager *bm; ++ struct drm_mem_type_manager *man; ++ ++ bm = &dev->bm; ++ man = &bm->man[mem->mem_type]; ++ ++ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) ++ iounmap(virtual); ++} ++ ++static int drm_copy_io_page(void *dst, void *src, unsigned long page) ++{ ++ uint32_t *dstP = ++ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); ++ uint32_t *srcP = ++ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); ++ ++ int i; ++ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) ++ iowrite32(ioread32(srcP++), dstP++); ++ return 0; ++} ++ ++static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src, ++ unsigned long page) ++{ ++ struct page *d = drm_ttm_get_page(ttm, page); ++ void *dst; ++ ++ if (!d) ++ return -ENOMEM; ++ ++ src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); ++ dst = kmap(d); ++ if (!dst) ++ return -ENOMEM; ++ ++ memcpy_fromio(dst, src, PAGE_SIZE); ++ kunmap(d); ++ return 0; ++} ++ ++static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page) ++{ ++ struct page *s = drm_ttm_get_page(ttm, page); ++ void *src; ++ ++ if (!s) ++ return -ENOMEM; ++ ++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); ++ src = kmap(s); ++ if (!src) ++ return -ENOMEM; ++ ++ memcpy_toio(dst, src, PAGE_SIZE); ++ kunmap(s); ++ return 0; ++} ++ ++int drm_bo_move_memcpy(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; ++ struct drm_ttm *ttm = bo->ttm; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ struct drm_bo_mem_reg old_copy = *old_mem; ++ void *old_iomap; ++ void *new_iomap; ++ int ret; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ unsigned long i; ++ unsigned long page; ++ unsigned long add = 0; ++ int dir; ++ ++ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); ++ if (ret) ++ return ret; ++ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); ++ if (ret) ++ goto out; ++ ++ if (old_iomap == NULL && new_iomap == NULL) ++ goto out2; ++ if (old_iomap == NULL && ttm == NULL) ++ goto out2; ++ ++ add = 0; ++ dir = 1; ++ ++ if ((old_mem->mem_type == new_mem->mem_type) && ++ (new_mem->mm_node->start < ++ old_mem->mm_node->start + old_mem->mm_node->size)) { ++ dir = -1; ++ add = new_mem->num_pages - 1; ++ } ++ ++ for (i = 0; i < new_mem->num_pages; ++i) { ++ page = i * dir + add; ++ if (old_iomap == NULL) ++ ret = drm_copy_ttm_io_page(ttm, new_iomap, page); ++ else if (new_iomap == NULL) ++ ret = drm_copy_io_ttm_page(ttm, old_iomap, page); ++ else ++ ret = drm_copy_io_page(new_iomap, old_iomap, page); ++ if (ret) ++ goto out1; ++ } ++ mb(); ++out2: ++ drm_bo_free_old_node(bo); ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { ++ drm_ttm_unbind(ttm); ++ drm_ttm_destroy(ttm); ++ bo->ttm = NULL; ++ } ++ ++out1: ++ drm_mem_reg_iounmap(dev, new_mem, new_iomap); ++out: ++ drm_mem_reg_iounmap(dev, &old_copy, old_iomap); ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_move_memcpy); ++ ++/* ++ * Transfer a buffer object's memory and LRU status to a newly ++ * created object. User-space references remains with the old ++ * object. Call bo->mutex locked. ++ */ ++ ++int drm_buffer_object_transfer(struct drm_buffer_object *bo, ++ struct drm_buffer_object **new_obj) ++{ ++ struct drm_buffer_object *fbo; ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); ++ if (!fbo) ++ return -ENOMEM; ++ ++ *fbo = *bo; ++ mutex_init(&fbo->mutex); ++ mutex_lock(&fbo->mutex); ++ mutex_lock(&dev->struct_mutex); ++ ++ DRM_INIT_WAITQUEUE(&bo->event_queue); ++ INIT_LIST_HEAD(&fbo->ddestroy); ++ INIT_LIST_HEAD(&fbo->lru); ++ INIT_LIST_HEAD(&fbo->pinned_lru); ++#ifdef DRM_ODD_MM_COMPAT ++ INIT_LIST_HEAD(&fbo->vma_list); ++ INIT_LIST_HEAD(&fbo->p_mm_list); ++#endif ++ ++ fbo->fence = drm_fence_reference_locked(bo->fence); ++ fbo->pinned_node = NULL; ++ fbo->mem.mm_node->private = (void *)fbo; ++ atomic_set(&fbo->usage, 1); ++ atomic_inc(&bm->count); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&fbo->mutex); ++ ++ *new_obj = fbo; ++ return 0; ++} ++ ++/* ++ * Since move is underway, we need to block signals in this function. ++ * We cannot restart until it has finished. ++ */ ++ ++int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, ++ int evict, int no_wait, uint32_t fence_class, ++ uint32_t fence_type, uint32_t fence_flags, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ int ret; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ struct drm_buffer_object *old_obj; ++ ++ if (bo->fence) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ ret = drm_fence_object_create(dev, fence_class, fence_type, ++ fence_flags | DRM_FENCE_FLAG_EMIT, ++ &bo->fence); ++ bo->fence_type = fence_type; ++ if (ret) ++ return ret; ++ ++#ifdef DRM_ODD_MM_COMPAT ++ /* ++ * In this mode, we don't allow pipelining a copy blit, ++ * since the buffer will be accessible from user space ++ * the moment we return and rebuild the page tables. ++ * ++ * With normal vm operation, page tables are rebuilt ++ * on demand using fault(), which waits for buffer idle. ++ */ ++ if (1) ++#else ++ if (evict || ((bo->mem.mm_node == bo->pinned_node) && ++ bo->mem.mm_node != NULL)) ++#endif ++ { ++ if (bo->fence) { ++ (void) drm_fence_object_wait(bo->fence, 0, 1, ++ bo->fence_type); ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ } ++ drm_bo_free_old_node(bo); ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ } else { ++ ++ /* This should help pipeline ordinary buffer moves. ++ * ++ * Hang old buffer memory on a new buffer object, ++ * and leave it to be released when the GPU ++ * operation has completed. ++ */ ++ ++ ret = drm_buffer_object_transfer(bo, &old_obj); ++ ++ if (ret) ++ return ret; ++ ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) ++ old_obj->ttm = NULL; ++ else ++ bo->ttm = NULL; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&old_obj->lru); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ drm_bo_add_to_lru(old_obj); ++ ++ drm_bo_usage_deref_locked(&old_obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ } ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_move_accel_cleanup); ++ ++int drm_bo_same_page(unsigned long offset, ++ unsigned long offset2) ++{ ++ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); ++} ++EXPORT_SYMBOL(drm_bo_same_page); ++ ++unsigned long drm_bo_offset_end(unsigned long offset, ++ unsigned long end) ++{ ++ offset = (offset + PAGE_SIZE) & PAGE_MASK; ++ return (end < offset) ? end : offset; ++} ++EXPORT_SYMBOL(drm_bo_offset_end); ++ ++static pgprot_t drm_kernel_io_prot(uint32_t map_type) ++{ ++ pgprot_t tmp = PAGE_KERNEL; ++ ++#if defined(__i386__) || defined(__x86_64__) ++#ifdef USE_PAT_WC ++#warning using pat ++ if (drm_use_pat() && map_type == _DRM_TTM) { ++ pgprot_val(tmp) |= _PAGE_PAT; ++ return tmp; ++ } ++#endif ++ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { ++ pgprot_val(tmp) |= _PAGE_PCD; ++ pgprot_val(tmp) &= ~_PAGE_PWT; ++ } ++#elif defined(__powerpc__) ++ pgprot_val(tmp) |= _PAGE_NO_CACHE; ++ if (map_type == _DRM_REGISTERS) ++ pgprot_val(tmp) |= _PAGE_GUARDED; ++#endif ++#if defined(__ia64__) ++ if (map_type == _DRM_TTM) ++ tmp = pgprot_writecombine(tmp); ++ else ++ tmp = pgprot_noncached(tmp); ++#endif ++ return tmp; ++} ++ ++static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, ++ unsigned long bus_offset, unsigned long bus_size, ++ struct drm_bo_kmap_obj *map) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ ++ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { ++ map->bo_kmap_type = bo_map_premapped; ++ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); ++ } else { ++ map->bo_kmap_type = bo_map_iomap; ++ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); ++ } ++ return (!map->virtual) ? -ENOMEM : 0; ++} ++ ++static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, ++ unsigned long start_page, unsigned long num_pages, ++ struct drm_bo_kmap_obj *map) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ pgprot_t prot; ++ struct drm_ttm *ttm = bo->ttm; ++ struct page *d; ++ int i; ++ ++ BUG_ON(!ttm); ++ ++ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { ++ ++ /* ++ * We're mapping a single page, and the desired ++ * page protection is consistent with the bo. ++ */ ++ ++ map->bo_kmap_type = bo_map_kmap; ++ map->page = drm_ttm_get_page(ttm, start_page); ++ map->virtual = kmap(map->page); ++ } else { ++ /* ++ * Populate the part we're mapping; ++ */ ++ ++ for (i = start_page; i < start_page + num_pages; ++i) { ++ d = drm_ttm_get_page(ttm, i); ++ if (!d) ++ return -ENOMEM; ++ } ++ ++ /* ++ * We need to use vmap to get the desired page protection ++ * or to make the buffer object look contigous. ++ */ ++ ++ prot = (mem->flags & DRM_BO_FLAG_CACHED) ? ++ PAGE_KERNEL : ++ drm_kernel_io_prot(man->drm_bus_maptype); ++ map->bo_kmap_type = bo_map_vmap; ++ map->virtual = vmap(ttm->pages + start_page, ++ num_pages, 0, prot); ++ } ++ return (!map->virtual) ? -ENOMEM : 0; ++} ++ ++/* ++ * This function is to be used for kernel mapping of buffer objects. ++ * It chooses the appropriate mapping method depending on the memory type ++ * and caching policy the buffer currently has. ++ * Mapping multiple pages or buffers that live in io memory is a bit slow and ++ * consumes vmalloc space. Be restrictive with such mappings. ++ * Mapping single pages usually returns the logical kernel address, ++ * (which is fast) ++ * BUG may use slower temporary mappings for high memory pages or ++ * uncached / write-combined pages. ++ * ++ * The function fills in a drm_bo_kmap_obj which can be used to return the ++ * kernel virtual address of the buffer. ++ * ++ * Code servicing a non-priviliged user request is only allowed to map one ++ * page at a time. We might need to implement a better scheme to stop such ++ * processes from consuming all vmalloc space. ++ */ ++ ++int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, ++ unsigned long num_pages, struct drm_bo_kmap_obj *map) ++{ ++ int ret; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ ++ map->virtual = NULL; ++ ++ if (num_pages > bo->num_pages) ++ return -EINVAL; ++ if (start_page > bo->num_pages) ++ return -EINVAL; ++#if 0 ++ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) ++ return -EPERM; ++#endif ++ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, ++ &bus_offset, &bus_size); ++ ++ if (ret) ++ return ret; ++ ++ if (bus_size == 0) { ++ return drm_bo_kmap_ttm(bo, start_page, num_pages, map); ++ } else { ++ bus_offset += start_page << PAGE_SHIFT; ++ bus_size = num_pages << PAGE_SHIFT; ++ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); ++ } ++} ++EXPORT_SYMBOL(drm_bo_kmap); ++ ++void drm_bo_kunmap(struct drm_bo_kmap_obj *map) ++{ ++ if (!map->virtual) ++ return; ++ ++ switch (map->bo_kmap_type) { ++ case bo_map_iomap: ++ iounmap(map->virtual); ++ break; ++ case bo_map_vmap: ++ vunmap(map->virtual); ++ break; ++ case bo_map_kmap: ++ kunmap(map->page); ++ break; ++ case bo_map_premapped: ++ break; ++ default: ++ BUG(); ++ } ++ map->virtual = NULL; ++ map->page = NULL; ++} ++EXPORT_SYMBOL(drm_bo_kunmap); ++ ++int drm_bo_pfn_prot(struct drm_buffer_object *bo, ++ unsigned long dst_offset, ++ unsigned long *pfn, ++ pgprot_t *prot) ++{ ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_device *dev = bo->dev; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long bus_base; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ int ret; ++ ++ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, ++ &bus_size); ++ if (ret) ++ return -EINVAL; ++ ++ if (bus_size != 0) ++ *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; ++ else if (!bo->ttm) ++ return -EINVAL; ++ else ++ *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT)); ++ ++ *prot = (mem->flags & DRM_BO_FLAG_CACHED) ? ++ PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_pfn_prot); ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bufs.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bufs.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_bufs.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_bufs.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1608 @@ ++/** ++ * \file drm_bufs.c ++ * Generic buffer template ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include "drmP.h" ++ ++unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) ++{ ++ return pci_resource_start(dev->pdev, resource); ++} ++EXPORT_SYMBOL(drm_get_resource_start); ++ ++unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) ++{ ++ return pci_resource_len(dev->pdev, resource); ++} ++EXPORT_SYMBOL(drm_get_resource_len); ++ ++struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map) ++{ ++ struct drm_map_list *entry; ++ list_for_each_entry(entry, &dev->maplist, head) { ++ if (entry->map && map->type == entry->map->type && ++ ((entry->map->offset == map->offset) || ++ (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { ++ return entry; ++ } ++ } ++ ++ return NULL; ++} ++EXPORT_SYMBOL(drm_find_matching_map); ++ ++static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, ++ unsigned long user_token, int hashed_handle) ++{ ++ int use_hashed_handle; ++ ++#if (BITS_PER_LONG == 64) ++ use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); ++#elif (BITS_PER_LONG == 32) ++ use_hashed_handle = hashed_handle; ++#else ++#error Unsupported long size. Neither 64 nor 32 bits. ++#endif ++ ++ if (!use_hashed_handle) { ++ int ret; ++ hash->key = user_token >> PAGE_SHIFT; ++ ret = drm_ht_insert_item(&dev->map_hash, hash); ++ if (ret != -EINVAL) ++ return ret; ++ } ++ return drm_ht_just_insert_please(&dev->map_hash, hash, ++ user_token, 32 - PAGE_SHIFT - 3, ++ 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT); ++} ++ ++/** ++ * Ioctl to specify a range of memory that is available for mapping by a non-root process. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_map structure. ++ * \return zero on success or a negative value on error. ++ * ++ * Adjusts the memory offset to its absolute value according to the mapping ++ * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where ++ * applicable and if supported by the kernel. ++ */ ++static int drm_addmap_core(struct drm_device *dev, unsigned int offset, ++ unsigned int size, enum drm_map_type type, ++ enum drm_map_flags flags, ++ struct drm_map_list **maplist) ++{ ++ struct drm_map *map; ++ struct drm_map_list *list; ++ drm_dma_handle_t *dmah; ++ unsigned long user_token; ++ int ret; ++ ++ map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); ++ if (!map) ++ return -ENOMEM; ++ ++ map->offset = offset; ++ map->size = size; ++ map->flags = flags; ++ map->type = type; ++ ++ /* Only allow shared memory to be removable since we only keep enough ++ * book keeping information about shared memory to allow for removal ++ * when processes fork. ++ */ ++ if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", ++ map->offset, map->size, map->type); ++ if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ map->mtrr = -1; ++ map->handle = NULL; ++ ++ switch (map->type) { ++ case _DRM_REGISTERS: ++ case _DRM_FRAME_BUFFER: ++#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) ++ if (map->offset + (map->size - 1) < map->offset || ++ map->offset < virt_to_phys(high_memory)) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++#endif ++#ifdef __alpha__ ++ map->offset += dev->hose->mem_space->start; ++#endif ++ /* Some drivers preinitialize some maps, without the X Server ++ * needing to be aware of it. Therefore, we just return success ++ * when the server tries to create a duplicate map. ++ */ ++ list = drm_find_matching_map(dev, map); ++ if (list != NULL) { ++ if (list->map->size != map->size) { ++ DRM_DEBUG("Matching maps of type %d with " ++ "mismatched sizes, (%ld vs %ld)\n", ++ map->type, map->size, ++ list->map->size); ++ list->map->size = map->size; ++ } ++ ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ *maplist = list; ++ return 0; ++ } ++ ++ if (drm_core_has_MTRR(dev)) { ++ if (map->type == _DRM_FRAME_BUFFER || ++ (map->flags & _DRM_WRITE_COMBINING)) { ++ map->mtrr = mtrr_add(map->offset, map->size, ++ MTRR_TYPE_WRCOMB, 1); ++ } ++ } ++ if (map->type == _DRM_REGISTERS) { ++ map->handle = ioremap(map->offset, map->size); ++ if (!map->handle) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -ENOMEM; ++ } ++ } ++ break; ++ case _DRM_SHM: ++ list = drm_find_matching_map(dev, map); ++ if (list != NULL) { ++ if(list->map->size != map->size) { ++ DRM_DEBUG("Matching maps of type %d with " ++ "mismatched sizes, (%ld vs %ld)\n", ++ map->type, map->size, list->map->size); ++ list->map->size = map->size; ++ } ++ ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ *maplist = list; ++ return 0; ++ } ++ map->handle = vmalloc_user(map->size); ++ DRM_DEBUG("%lu %d %p\n", ++ map->size, drm_order(map->size), map->handle); ++ if (!map->handle) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -ENOMEM; ++ } ++ map->offset = (unsigned long)map->handle; ++ if (map->flags & _DRM_CONTAINS_LOCK) { ++ /* Prevent a 2nd X Server from creating a 2nd lock */ ++ if (dev->lock.hw_lock != NULL) { ++ vfree(map->handle); ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EBUSY; ++ } ++ dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ ++ } ++ break; ++ case _DRM_AGP: { ++ struct drm_agp_mem *entry; ++ int valid = 0; ++ ++ if (!drm_core_has_AGP(dev)) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++#ifdef __alpha__ ++ map->offset += dev->hose->mem_space->start; ++#endif ++ /* In some cases (i810 driver), user space may have already ++ * added the AGP base itself, because dev->agp->base previously ++ * only got set during AGP enable. So, only add the base ++ * address if the map's offset isn't already within the ++ * aperture. ++ */ ++ if (map->offset < dev->agp->base || ++ map->offset > dev->agp->base + ++ dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { ++ map->offset += dev->agp->base; ++ } ++ map->mtrr = dev->agp->agp_mtrr; /* for getmap */ ++ ++ /* This assumes the DRM is in total control of AGP space. ++ * It's not always the case as AGP can be in the control ++ * of user space (i.e. i810 driver). So this loop will get ++ * skipped and we double check that dev->agp->memory is ++ * actually set as well as being invalid before EPERM'ing ++ */ ++ list_for_each_entry(entry, &dev->agp->memory, head) { ++ if ((map->offset >= entry->bound) && ++ (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { ++ valid = 1; ++ break; ++ } ++ } ++ if (!list_empty(&dev->agp->memory) && !valid) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EPERM; ++ } ++ DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); ++ break; ++ } ++ case _DRM_SCATTER_GATHER: ++ if (!dev->sg) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ map->offset += (unsigned long)dev->sg->virtual; ++ break; ++ case _DRM_CONSISTENT: ++ /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, ++ * As we're limiting the address to 2^32-1 (or less), ++ * casting it down to 32 bits is no problem, but we ++ * need to point to a 64bit variable first. */ ++ dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); ++ if (!dmah) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -ENOMEM; ++ } ++ map->handle = dmah->vaddr; ++ map->offset = (unsigned long)dmah->busaddr; ++ kfree(dmah); ++ break; ++ default: ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ ++ list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); ++ if (!list) { ++ if (map->type == _DRM_REGISTERS) ++ iounmap(map->handle); ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ memset(list, 0, sizeof(*list)); ++ list->map = map; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_add(&list->head, &dev->maplist); ++ ++ /* Assign a 32-bit handle */ ++ ++ user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle : ++ map->offset; ++ ret = drm_map_handle(dev, &list->hash, user_token, 0); ++ ++ if (ret) { ++ if (map->type == _DRM_REGISTERS) ++ iounmap(map->handle); ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ drm_free(list, sizeof(*list), DRM_MEM_MAPS); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ list->user_token = list->hash.key << PAGE_SHIFT; ++ mutex_unlock(&dev->struct_mutex); ++ ++ *maplist = list; ++ return 0; ++} ++ ++int drm_addmap(struct drm_device *dev, unsigned int offset, ++ unsigned int size, enum drm_map_type type, ++ enum drm_map_flags flags, drm_local_map_t ** map_ptr) ++{ ++ struct drm_map_list *list; ++ int rc; ++ ++ rc = drm_addmap_core(dev, offset, size, type, flags, &list); ++ if (!rc) ++ *map_ptr = list->map; ++ return rc; ++} ++ ++EXPORT_SYMBOL(drm_addmap); ++ ++int drm_addmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_map *map = data; ++ struct drm_map_list *maplist; ++ int err; ++ ++ if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) ++ return -EPERM; ++ ++ err = drm_addmap_core(dev, map->offset, map->size, map->type, ++ map->flags, &maplist); ++ ++ if (err) ++ return err; ++ ++ /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ ++ map->handle = (void *)(unsigned long)maplist->user_token; ++ return 0; ++} ++ ++/** ++ * Remove a map private from list and deallocate resources if the mapping ++ * isn't in use. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a struct drm_map structure. ++ * \return zero on success or a negative value on error. ++ * ++ * Searches the map on drm_device::maplist, removes it from the list, see if ++ * its being used, and free any associate resource (such as MTRR's) if it's not ++ * being on use. ++ * ++ * \sa drm_addmap ++ */ ++int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) ++{ ++ struct drm_map_list *r_list = NULL, *list_t; ++ drm_dma_handle_t dmah; ++ int found = 0; ++ ++ /* Find the list entry for the map and remove it */ ++ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { ++ if (r_list->map == map) { ++ list_del(&r_list->head); ++ drm_ht_remove_key(&dev->map_hash, ++ r_list->user_token >> PAGE_SHIFT); ++ drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ return -EINVAL; ++ ++ /* List has wrapped around to the head pointer, or it's empty and we ++ * didn't find anything. ++ */ ++ ++ switch (map->type) { ++ case _DRM_REGISTERS: ++ iounmap(map->handle); ++ /* FALLTHROUGH */ ++ case _DRM_FRAME_BUFFER: ++ if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { ++ int retcode; ++ retcode = mtrr_del(map->mtrr, map->offset, map->size); ++ DRM_DEBUG("mtrr_del=%d\n", retcode); ++ } ++ break; ++ case _DRM_SHM: ++ vfree(map->handle); ++ break; ++ case _DRM_AGP: ++ case _DRM_SCATTER_GATHER: ++ break; ++ case _DRM_CONSISTENT: ++ dmah.vaddr = map->handle; ++ dmah.busaddr = map->offset; ++ dmah.size = map->size; ++ __drm_pci_free(dev, &dmah); ++ break; ++ case _DRM_TTM: ++ BUG_ON(1); ++ } ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_rmmap_locked); ++ ++int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) ++{ ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_rmmap_locked(dev, map); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_rmmap); ++ ++/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on ++ * the last close of the device, and this is necessary for cleanup when things ++ * exit uncleanly. Therefore, having userland manually remove mappings seems ++ * like a pointless exercise since they're going away anyway. ++ * ++ * One use case might be after addmap is allowed for normal users for SHM and ++ * gets used by drivers that the server doesn't need to care about. This seems ++ * unlikely. ++ */ ++int drm_rmmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_map *request = data; ++ drm_local_map_t *map = NULL; ++ struct drm_map_list *r_list; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map && ++ r_list->user_token == (unsigned long)request->handle && ++ r_list->map->flags & _DRM_REMOVABLE) { ++ map = r_list->map; ++ break; ++ } ++ } ++ ++ /* List has wrapped around to the head pointer, or its empty we didn't ++ * find anything. ++ */ ++ if (list_empty(&dev->maplist) || !map) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ /* Register and framebuffer maps are permanent */ ++ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++ } ++ ++ ret = drm_rmmap_locked(dev, map); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++/** ++ * Cleanup after an error on one of the addbufs() functions. ++ * ++ * \param dev DRM device. ++ * \param entry buffer entry where the error occurred. ++ * ++ * Frees any pages and buffers associated with the given entry. ++ */ ++static void drm_cleanup_buf_error(struct drm_device *dev, ++ struct drm_buf_entry *entry) ++{ ++ int i; ++ ++ if (entry->seg_count) { ++ for (i = 0; i < entry->seg_count; i++) { ++ if (entry->seglist[i]) { ++ drm_pci_free(dev, entry->seglist[i]); ++ } ++ } ++ drm_free(entry->seglist, ++ entry->seg_count * ++ sizeof(*entry->seglist), DRM_MEM_SEGS); ++ ++ entry->seg_count = 0; ++ } ++ ++ if (entry->buf_count) { ++ for (i = 0; i < entry->buf_count; i++) { ++ if (entry->buflist[i].dev_private) { ++ drm_free(entry->buflist[i].dev_private, ++ entry->buflist[i].dev_priv_size, ++ DRM_MEM_BUFS); ++ } ++ } ++ drm_free(entry->buflist, ++ entry->buf_count * ++ sizeof(*entry->buflist), DRM_MEM_BUFS); ++ ++ entry->buf_count = 0; ++ } ++} ++ ++#if __OS_HAS_AGP ++/** ++ * Add AGP buffers for DMA transfers. ++ * ++ * \param dev struct drm_device to which the buffers are to be added. ++ * \param request pointer to a struct drm_buf_desc describing the request. ++ * \return zero on success or a negative number on failure. ++ * ++ * After some sanity checks creates a drm_buf structure for each buffer and ++ * reallocates the buffer list of the same size order to accommodate the new ++ * buffers. ++ */ ++int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_entry *entry; ++ struct drm_agp_mem *agp_entry; ++ struct drm_buf *buf; ++ unsigned long offset; ++ unsigned long agp_offset; ++ int count; ++ int order; ++ int size; ++ int alignment; ++ int page_order; ++ int total; ++ int byte_count; ++ int i, valid; ++ struct drm_buf **temp_buflist; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ byte_count = 0; ++ agp_offset = dev->agp->base + request->agp_start; ++ ++ DRM_DEBUG("count: %d\n", count); ++ DRM_DEBUG("order: %d\n", order); ++ DRM_DEBUG("size: %d\n", size); ++ DRM_DEBUG("agp_offset: %lx\n", agp_offset); ++ DRM_DEBUG("alignment: %d\n", alignment); ++ DRM_DEBUG("page_order: %d\n", page_order); ++ DRM_DEBUG("total: %d\n", total); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ /* Make sure buffers are located in AGP memory that we own */ ++ valid = 0; ++ list_for_each_entry(agp_entry, &dev->agp->memory, head) { ++ if ((agp_offset >= agp_entry->bound) && ++ (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { ++ valid = 1; ++ break; ++ } ++ } ++ if (!list_empty(&dev->agp->memory) && !valid) { ++ DRM_DEBUG("zone invalid\n"); ++ return -EINVAL; ++ } ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ ++ offset = 0; ++ ++ while (entry->buf_count < count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ ++ buf->offset = (dma->byte_count + offset); ++ buf->bus_address = agp_offset + offset; ++ buf->address = (void *)(agp_offset + offset); ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); ++ ++ offset += alignment; ++ entry->buf_count++; ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ DRM_DEBUG("byte_count: %d\n", byte_count); ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += byte_count >> PAGE_SHIFT; ++ dma->byte_count += byte_count; ++ ++ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ++ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ dma->flags = _DRM_DMA_USE_AGP; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++} ++EXPORT_SYMBOL(drm_addbufs_agp); ++#endif /* __OS_HAS_AGP */ ++ ++int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int count; ++ int order; ++ int size; ++ int total; ++ int page_order; ++ struct drm_buf_entry *entry; ++ drm_dma_handle_t *dmah; ++ struct drm_buf *buf; ++ int alignment; ++ unsigned long offset; ++ int i; ++ int byte_count; ++ int page_count; ++ unsigned long *temp_pagelist; ++ struct drm_buf **temp_buflist; ++ ++ if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", ++ request->count, request->size, size, order, dev->queue_count); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->seglist = drm_alloc(count * sizeof(*entry->seglist), ++ DRM_MEM_SEGS); ++ if (!entry->seglist) { ++ drm_free(entry->buflist, ++ count * sizeof(*entry->buflist), DRM_MEM_BUFS); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->seglist, 0, count * sizeof(*entry->seglist)); ++ ++ /* Keep the original pagelist until we know all the allocations ++ * have succeeded ++ */ ++ temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) ++ * sizeof(*dma->pagelist), DRM_MEM_PAGES); ++ if (!temp_pagelist) { ++ drm_free(entry->buflist, ++ count * sizeof(*entry->buflist), DRM_MEM_BUFS); ++ drm_free(entry->seglist, ++ count * sizeof(*entry->seglist), DRM_MEM_SEGS); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memcpy(temp_pagelist, ++ dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); ++ DRM_DEBUG("pagelist: %d entries\n", ++ dma->page_count + (count << page_order)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ byte_count = 0; ++ page_count = 0; ++ ++ while (entry->buf_count < count) { ++ ++ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); ++ ++ if (!dmah) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ entry->seg_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ drm_free(temp_pagelist, ++ (dma->page_count + (count << page_order)) ++ * sizeof(*dma->pagelist), DRM_MEM_PAGES); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ entry->seglist[entry->seg_count++] = dmah; ++ for (i = 0; i < (1 << page_order); i++) { ++ DRM_DEBUG("page %d @ 0x%08lx\n", ++ dma->page_count + page_count, ++ (unsigned long)dmah->vaddr + PAGE_SIZE * i); ++ temp_pagelist[dma->page_count + page_count++] ++ = (unsigned long)dmah->vaddr + PAGE_SIZE * i; ++ } ++ for (offset = 0; ++ offset + size <= total && entry->buf_count < count; ++ offset += alignment, ++entry->buf_count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ buf->offset = (dma->byte_count + byte_count + offset); ++ buf->address = (void *)(dmah->vaddr + offset); ++ buf->bus_address = dmah->busaddr + offset; ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, ++ DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ entry->seg_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ drm_free(temp_pagelist, ++ (dma->page_count + ++ (count << page_order)) ++ * sizeof(*dma->pagelist), ++ DRM_MEM_PAGES); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", ++ entry->buf_count, buf->address); ++ } ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ drm_free(temp_pagelist, ++ (dma->page_count + (count << page_order)) ++ * sizeof(*dma->pagelist), DRM_MEM_PAGES); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ /* No allocations failed, so now we can replace the orginal pagelist ++ * with the new one. ++ */ ++ if (dma->page_count) { ++ drm_free(dma->pagelist, ++ dma->page_count * sizeof(*dma->pagelist), ++ DRM_MEM_PAGES); ++ } ++ dma->pagelist = temp_pagelist; ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += entry->seg_count << page_order; ++ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ if (request->flags & _DRM_PCI_BUFFER_RO) ++ dma->flags = _DRM_DMA_USE_PCI_RO; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++ ++} ++EXPORT_SYMBOL(drm_addbufs_pci); ++ ++static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_entry *entry; ++ struct drm_buf *buf; ++ unsigned long offset; ++ unsigned long agp_offset; ++ int count; ++ int order; ++ int size; ++ int alignment; ++ int page_order; ++ int total; ++ int byte_count; ++ int i; ++ struct drm_buf **temp_buflist; ++ ++ if (!drm_core_check_feature(dev, DRIVER_SG)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ byte_count = 0; ++ agp_offset = request->agp_start; ++ ++ DRM_DEBUG("count: %d\n", count); ++ DRM_DEBUG("order: %d\n", order); ++ DRM_DEBUG("size: %d\n", size); ++ DRM_DEBUG("agp_offset: %lu\n", agp_offset); ++ DRM_DEBUG("alignment: %d\n", alignment); ++ DRM_DEBUG("page_order: %d\n", page_order); ++ DRM_DEBUG("total: %d\n", total); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ ++ offset = 0; ++ ++ while (entry->buf_count < count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ ++ buf->offset = (dma->byte_count + offset); ++ buf->bus_address = agp_offset + offset; ++ buf->address = (void *)(agp_offset + offset ++ + (unsigned long)dev->sg->virtual); ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); ++ ++ offset += alignment; ++ entry->buf_count++; ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ DRM_DEBUG("byte_count: %d\n", byte_count); ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += byte_count >> PAGE_SHIFT; ++ dma->byte_count += byte_count; ++ ++ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ++ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ dma->flags = _DRM_DMA_USE_SG; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++} ++ ++int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_entry *entry; ++ struct drm_buf *buf; ++ unsigned long offset; ++ unsigned long agp_offset; ++ int count; ++ int order; ++ int size; ++ int alignment; ++ int page_order; ++ int total; ++ int byte_count; ++ int i; ++ struct drm_buf **temp_buflist; ++ ++ if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ byte_count = 0; ++ agp_offset = request->agp_start; ++ ++ DRM_DEBUG("count: %d\n", count); ++ DRM_DEBUG("order: %d\n", order); ++ DRM_DEBUG("size: %d\n", size); ++ DRM_DEBUG("agp_offset: %lu\n", agp_offset); ++ DRM_DEBUG("alignment: %d\n", alignment); ++ DRM_DEBUG("page_order: %d\n", page_order); ++ DRM_DEBUG("total: %d\n", total); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ ++ offset = 0; ++ ++ while (entry->buf_count < count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ ++ buf->offset = (dma->byte_count + offset); ++ buf->bus_address = agp_offset + offset; ++ buf->address = (void *)(agp_offset + offset); ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); ++ ++ offset += alignment; ++ entry->buf_count++; ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ DRM_DEBUG("byte_count: %d\n", byte_count); ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += byte_count >> PAGE_SHIFT; ++ dma->byte_count += byte_count; ++ ++ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ++ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ dma->flags = _DRM_DMA_USE_FB; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++} ++EXPORT_SYMBOL(drm_addbufs_fb); ++ ++ ++/** ++ * Add buffers for DMA transfers (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a struct drm_buf_desc request. ++ * \return zero on success or a negative number on failure. ++ * ++ * According with the memory type specified in drm_buf_desc::flags and the ++ * build options, it dispatches the call either to addbufs_agp(), ++ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent ++ * PCI memory respectively. ++ */ ++int drm_addbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_buf_desc *request = data; ++ int ret; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++#if __OS_HAS_AGP ++ if (request->flags & _DRM_AGP_BUFFER) ++ ret = drm_addbufs_agp(dev, request); ++ else ++#endif ++ if (request->flags & _DRM_SG_BUFFER) ++ ret = drm_addbufs_sg(dev, request); ++ else if (request->flags & _DRM_FB_BUFFER) ++ ret = drm_addbufs_fb(dev, request); ++ else ++ ret = drm_addbufs_pci(dev, request); ++ ++ return ret; ++} ++ ++/** ++ * Get information about the buffer mappings. ++ * ++ * This was originally mean for debugging purposes, or by a sophisticated ++ * client library to determine how best to use the available buffers (e.g., ++ * large buffers can be used for image transfer). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_buf_info structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Increments drm_device::buf_use while holding the drm_device::count_lock ++ * lock, preventing of allocating more buffers after this call. Information ++ * about each requested buffer is then copied into user space. ++ */ ++int drm_infobufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_info *request = data; ++ int i; ++ int count; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ spin_lock(&dev->count_lock); ++ if (atomic_read(&dev->buf_alloc)) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ ++dev->buf_use; /* Can't allocate more after this call */ ++ spin_unlock(&dev->count_lock); ++ ++ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { ++ if (dma->bufs[i].buf_count) ++ ++count; ++ } ++ ++ DRM_DEBUG("count = %d\n", count); ++ ++ if (request->count >= count) { ++ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { ++ if (dma->bufs[i].buf_count) { ++ struct drm_buf_desc __user *to = ++ &request->list[count]; ++ struct drm_buf_entry *from = &dma->bufs[i]; ++ struct drm_freelist *list = &dma->bufs[i].freelist; ++ if (copy_to_user(&to->count, ++ &from->buf_count, ++ sizeof(from->buf_count)) || ++ copy_to_user(&to->size, ++ &from->buf_size, ++ sizeof(from->buf_size)) || ++ copy_to_user(&to->low_mark, ++ &list->low_mark, ++ sizeof(list->low_mark)) || ++ copy_to_user(&to->high_mark, ++ &list->high_mark, ++ sizeof(list->high_mark))) ++ return -EFAULT; ++ ++ DRM_DEBUG("%d %d %d %d %d\n", ++ i, ++ dma->bufs[i].buf_count, ++ dma->bufs[i].buf_size, ++ dma->bufs[i].freelist.low_mark, ++ dma->bufs[i].freelist.high_mark); ++ ++count; ++ } ++ } ++ } ++ request->count = count; ++ ++ return 0; ++} ++ ++/** ++ * Specifies a low and high water mark for buffer allocation ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg a pointer to a drm_buf_desc structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies that the size order is bounded between the admissible orders and ++ * updates the respective drm_device_dma::bufs entry low and high water mark. ++ * ++ * \note This ioctl is deprecated and mostly never used. ++ */ ++int drm_markbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_desc *request = data; ++ int order; ++ struct drm_buf_entry *entry; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ DRM_DEBUG("%d, %d, %d\n", ++ request->size, request->low_mark, request->high_mark); ++ order = drm_order(request->size); ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ entry = &dma->bufs[order]; ++ ++ if (request->low_mark < 0 || request->low_mark > entry->buf_count) ++ return -EINVAL; ++ if (request->high_mark < 0 || request->high_mark > entry->buf_count) ++ return -EINVAL; ++ ++ entry->freelist.low_mark = request->low_mark; ++ entry->freelist.high_mark = request->high_mark; ++ ++ return 0; ++} ++ ++/** ++ * Unreserve the buffers in list, previously reserved using drmDMA. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_buf_free structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls free_buffer() for each used buffer. ++ * This function is primarily used for debugging. ++ */ ++int drm_freebufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_free *request = data; ++ int i; ++ int idx; ++ struct drm_buf *buf; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ DRM_DEBUG("%d\n", request->count); ++ for (i = 0; i < request->count; i++) { ++ if (copy_from_user(&idx, &request->list[i], sizeof(idx))) ++ return -EFAULT; ++ if (idx < 0 || idx >= dma->buf_count) { ++ DRM_ERROR("Index %d (of %d max)\n", ++ idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ buf = dma->buflist[idx]; ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("Process %d freeing buffer not owned\n", ++ current->pid); ++ return -EINVAL; ++ } ++ drm_free_buffer(dev, buf); ++ } ++ ++ return 0; ++} ++ ++/** ++ * Maps all of the DMA buffers into client-virtual space (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_buf_map structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information ++ * about each buffer into user space. For PCI buffers, it calls do_mmap() with ++ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls ++ * drm_mmap_dma(). ++ */ ++int drm_mapbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int retcode = 0; ++ const int zero = 0; ++ unsigned long virtual; ++ unsigned long address; ++ struct drm_buf_map *request = data; ++ int i; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ spin_lock(&dev->count_lock); ++ if (atomic_read(&dev->buf_alloc)) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ dev->buf_use++; /* Can't allocate more after this call */ ++ spin_unlock(&dev->count_lock); ++ ++ if (request->count >= dma->buf_count) { ++ if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ++ || (drm_core_check_feature(dev, DRIVER_SG) ++ && (dma->flags & _DRM_DMA_USE_SG)) ++ || (drm_core_check_feature(dev, DRIVER_FB_DMA) ++ && (dma->flags & _DRM_DMA_USE_FB))) { ++ struct drm_map *map = dev->agp_buffer_map; ++ unsigned long token = dev->agp_buffer_token; ++ ++ if (!map) { ++ retcode = -EINVAL; ++ goto done; ++ } ++ down_write(¤t->mm->mmap_sem); ++ virtual = do_mmap(file_priv->filp, 0, map->size, ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED, ++ token); ++ up_write(¤t->mm->mmap_sem); ++ } else { ++ down_write(¤t->mm->mmap_sem); ++ virtual = do_mmap(file_priv->filp, 0, dma->byte_count, ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED, 0); ++ up_write(¤t->mm->mmap_sem); ++ } ++ if (virtual > -1024UL) { ++ /* Real error */ ++ retcode = (signed long)virtual; ++ goto done; ++ } ++ request->virtual = (void __user *)virtual; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ if (copy_to_user(&request->list[i].idx, ++ &dma->buflist[i]->idx, ++ sizeof(request->list[0].idx))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ if (copy_to_user(&request->list[i].total, ++ &dma->buflist[i]->total, ++ sizeof(request->list[0].total))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ if (copy_to_user(&request->list[i].used, ++ &zero, sizeof(zero))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ address = virtual + dma->buflist[i]->offset; /* *** */ ++ if (copy_to_user(&request->list[i].address, ++ &address, sizeof(address))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ } ++ } ++ done: ++ request->count = dma->buf_count; ++ DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); ++ ++ return retcode; ++} ++ ++/** ++ * Compute size order. Returns the exponent of the smaller power of two which ++ * is greater or equal to given number. ++ * ++ * \param size size. ++ * \return order. ++ * ++ * \todo Can be made faster. ++ */ ++int drm_order(unsigned long size) ++{ ++ int order; ++ unsigned long tmp; ++ ++ for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; ++ ++ if (size & (size - 1)) ++ ++order; ++ ++ return order; ++} ++EXPORT_SYMBOL(drm_order); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_compat.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_compat.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_compat.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_compat.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,860 @@ ++/************************************************************************** ++ * ++ * This kernel module is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ **************************************************************************/ ++/* ++ * This code provides access to unexported mm kernel features. It is necessary ++ * to use the new DRM memory manager code with kernels that don't support it ++ * directly. ++ * ++ * Authors: Thomas Hellstrom ++ * Linux kernel mm subsystem authors. ++ * (Most code taken from there). ++ */ ++ ++#include "drmP.h" ++ ++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ++/* ++ * These have bad performance in the AGP module for the indicated kernel versions. ++ */ ++ ++int drm_map_page_into_agp(struct page *page) ++{ ++ int i; ++ i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); ++ /* Caller's responsibility to call global_flush_tlb() for ++ * performance reasons */ ++ return i; ++} ++ ++int drm_unmap_page_from_agp(struct page *page) ++{ ++ int i; ++ i = change_page_attr(page, 1, PAGE_KERNEL); ++ /* Caller's responsibility to call global_flush_tlb() for ++ * performance reasons */ ++ return i; ++} ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++ ++/* ++ * The protection map was exported in 2.6.19 ++ */ ++ ++pgprot_t vm_get_page_prot(unsigned long vm_flags) ++{ ++#ifdef MODULE ++ static pgprot_t drm_protection_map[16] = { ++ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, ++ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 ++ }; ++ ++ return drm_protection_map[vm_flags & 0x0F]; ++#else ++ extern pgprot_t protection_map[]; ++ return protection_map[vm_flags & 0x0F]; ++#endif ++}; ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ++/* ++ * vm code for kernels below 2.6.15 in which version a major vm write ++ * occured. This implement a simple straightforward ++ * version similar to what's going to be ++ * in kernel 2.6.19+ ++ * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use ++ * nopfn. ++ */ ++ ++static struct { ++ spinlock_t lock; ++ struct page *dummy_page; ++ atomic_t present; ++} drm_np_retry = ++{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)}; ++ ++ ++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, ++ struct fault_data *data); ++ ++ ++struct page * get_nopage_retry(void) ++{ ++ if (atomic_read(&drm_np_retry.present) == 0) { ++ struct page *page = alloc_page(GFP_KERNEL); ++ if (!page) ++ return NOPAGE_OOM; ++ spin_lock(&drm_np_retry.lock); ++ drm_np_retry.dummy_page = page; ++ atomic_set(&drm_np_retry.present,1); ++ spin_unlock(&drm_np_retry.lock); ++ } ++ get_page(drm_np_retry.dummy_page); ++ return drm_np_retry.dummy_page; ++} ++ ++void free_nopage_retry(void) ++{ ++ if (atomic_read(&drm_np_retry.present) == 1) { ++ spin_lock(&drm_np_retry.lock); ++ __free_page(drm_np_retry.dummy_page); ++ drm_np_retry.dummy_page = NULL; ++ atomic_set(&drm_np_retry.present, 0); ++ spin_unlock(&drm_np_retry.lock); ++ } ++} ++ ++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, ++ int *type) ++{ ++ struct fault_data data; ++ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ ++ data.address = address; ++ data.vma = vma; ++ drm_bo_vm_fault(vma, &data); ++ switch (data.type) { ++ case VM_FAULT_OOM: ++ return NOPAGE_OOM; ++ case VM_FAULT_SIGBUS: ++ return NOPAGE_SIGBUS; ++ default: ++ break; ++ } ++ ++ return NOPAGE_REFAULT; ++} ++ ++#endif ++ ++#if !defined(DRM_FULL_MM_COMPAT) && \ ++ ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))) ++ ++static int drm_pte_is_clear(struct vm_area_struct *vma, ++ unsigned long addr) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ int ret = 1; ++ pte_t *pte; ++ pmd_t *pmd; ++ pud_t *pud; ++ pgd_t *pgd; ++ ++ spin_lock(&mm->page_table_lock); ++ pgd = pgd_offset(mm, addr); ++ if (pgd_none(*pgd)) ++ goto unlock; ++ pud = pud_offset(pgd, addr); ++ if (pud_none(*pud)) ++ goto unlock; ++ pmd = pmd_offset(pud, addr); ++ if (pmd_none(*pmd)) ++ goto unlock; ++ pte = pte_offset_map(pmd, addr); ++ if (!pte) ++ goto unlock; ++ ret = pte_none(*pte); ++ pte_unmap(pte); ++ unlock: ++ spin_unlock(&mm->page_table_lock); ++ return ret; ++} ++ ++static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn) ++{ ++ int ret; ++ if (!drm_pte_is_clear(vma, addr)) ++ return -EBUSY; ++ ++ ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot); ++ return ret; ++} ++ ++ ++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, ++ struct fault_data *data) ++{ ++ unsigned long address = data->address; ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page = NULL; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ unsigned long pfn; ++ int err; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ ++ dev = bo->dev; ++ drm_bo_read_lock(&dev->bm.bm_lock, 0); ++ ++ mutex_lock(&bo->mutex); ++ ++ err = drm_bo_wait(bo, 0, 1, 0); ++ if (err) { ++ data->type = (err == -EAGAIN) ? ++ VM_FAULT_MINOR : VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ ++ /* ++ * If buffer happens to be in a non-mappable location, ++ * move it to a mappable. ++ */ ++ ++ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { ++ unsigned long _end = jiffies + 3*DRM_HZ; ++ uint32_t new_mask = bo->mem.proposed_flags | ++ DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_FORCE_MAPPABLE; ++ ++ do { ++ err = drm_bo_move_buffer(bo, new_mask, 0, 0); ++ } while((err == -EAGAIN) && !time_after_eq(jiffies, _end)); ++ ++ if (err) { ++ DRM_ERROR("Timeout moving buffer to mappable location.\n"); ++ data->type = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ } ++ ++ if (address > vma->vm_end) { ++ data->type = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ dev = bo->dev; ++ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, ++ &bus_size); ++ ++ if (err) { ++ data->type = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ page_offset = (address - vma->vm_start) >> PAGE_SHIFT; ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; ++ ++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; ++ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); ++ } else { ++ ttm = bo->ttm; ++ ++ drm_ttm_fixup_caching(ttm); ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ data->type = VM_FAULT_OOM; ++ goto out_unlock; ++ } ++ pfn = page_to_pfn(page); ++ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? ++ vm_get_page_prot(vma->vm_flags) : ++ drm_io_prot(_DRM_TTM, vma); ++ } ++ ++ err = vm_insert_pfn(vma, address, pfn); ++ ++ if (!err || err == -EBUSY) ++ data->type = VM_FAULT_MINOR; ++ else ++ data->type = VM_FAULT_OOM; ++out_unlock: ++ mutex_unlock(&bo->mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return NULL; ++} ++ ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \ ++ !defined(DRM_FULL_MM_COMPAT) ++ ++/** ++ */ ++ ++unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma, ++ unsigned long address) ++{ ++ struct fault_data data; ++ data.address = address; ++ ++ (void) drm_bo_vm_fault(vma, &data); ++ if (data.type == VM_FAULT_OOM) ++ return NOPFN_OOM; ++ else if (data.type == VM_FAULT_SIGBUS) ++ return NOPFN_SIGBUS; ++ ++ /* ++ * pfn already set. ++ */ ++ ++ return 0; ++} ++#endif ++ ++ ++#ifdef DRM_ODD_MM_COMPAT ++ ++/* ++ * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated ++ * workaround for a single BUG statement in do_no_page in these versions. The ++ * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_ ++ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to ++ * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this ++ * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex, ++ * release the cpu and retry. We also need to keep track of all vmas mapping the ttm. ++ * phew. ++ */ ++ ++typedef struct p_mm_entry { ++ struct list_head head; ++ struct mm_struct *mm; ++ atomic_t refcount; ++ int locked; ++} p_mm_entry_t; ++ ++typedef struct vma_entry { ++ struct list_head head; ++ struct vm_area_struct *vma; ++} vma_entry_t; ++ ++ ++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, ++ int *type) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ ++ mutex_lock(&bo->mutex); ++ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ ++ if (address > vma->vm_end) { ++ page = NOPAGE_SIGBUS; ++ goto out_unlock; ++ } ++ ++ dev = bo->dev; ++ ++ if (drm_mem_reg_is_pci(dev, &bo->mem)) { ++ DRM_ERROR("Invalid compat nopage.\n"); ++ page = NOPAGE_SIGBUS; ++ goto out_unlock; ++ } ++ ++ ttm = bo->ttm; ++ drm_ttm_fixup_caching(ttm); ++ page_offset = (address - vma->vm_start) >> PAGE_SHIFT; ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ page = NOPAGE_OOM; ++ goto out_unlock; ++ } ++ ++ get_page(page); ++out_unlock: ++ mutex_unlock(&bo->mutex); ++ return page; ++} ++ ++ ++ ++ ++int drm_bo_map_bound(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; ++ int ret = 0; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ ++ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, ++ &bus_offset, &bus_size); ++ BUG_ON(ret); ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type]; ++ unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; ++ pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ++ ret = io_remap_pfn_range(vma, vma->vm_start, pfn, ++ vma->vm_end - vma->vm_start, ++ pgprot); ++ } ++ ++ return ret; ++} ++ ++ ++int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) ++{ ++ p_mm_entry_t *entry, *n_entry; ++ vma_entry_t *v_entry; ++ struct mm_struct *mm = vma->vm_mm; ++ ++ v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ); ++ if (!v_entry) { ++ DRM_ERROR("Allocation of vma pointer entry failed\n"); ++ return -ENOMEM; ++ } ++ v_entry->vma = vma; ++ ++ list_add_tail(&v_entry->head, &bo->vma_list); ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ if (mm == entry->mm) { ++ atomic_inc(&entry->refcount); ++ return 0; ++ } else if ((unsigned long)mm < (unsigned long)entry->mm) ; ++ } ++ ++ n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ); ++ if (!n_entry) { ++ DRM_ERROR("Allocation of process mm pointer entry failed\n"); ++ return -ENOMEM; ++ } ++ INIT_LIST_HEAD(&n_entry->head); ++ n_entry->mm = mm; ++ n_entry->locked = 0; ++ atomic_set(&n_entry->refcount, 0); ++ list_add_tail(&n_entry->head, &entry->head); ++ ++ return 0; ++} ++ ++void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) ++{ ++ p_mm_entry_t *entry, *n; ++ vma_entry_t *v_entry, *v_n; ++ int found = 0; ++ struct mm_struct *mm = vma->vm_mm; ++ ++ list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) { ++ if (v_entry->vma == vma) { ++ found = 1; ++ list_del(&v_entry->head); ++ drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ); ++ break; ++ } ++ } ++ BUG_ON(!found); ++ ++ list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) { ++ if (mm == entry->mm) { ++ if (atomic_add_negative(-1, &entry->refcount)) { ++ list_del(&entry->head); ++ BUG_ON(entry->locked); ++ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ); ++ } ++ return; ++ } ++ } ++ BUG_ON(1); ++} ++ ++ ++ ++int drm_bo_lock_kmm(struct drm_buffer_object * bo) ++{ ++ p_mm_entry_t *entry; ++ int lock_ok = 1; ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ BUG_ON(entry->locked); ++ if (!down_write_trylock(&entry->mm->mmap_sem)) { ++ lock_ok = 0; ++ break; ++ } ++ entry->locked = 1; ++ } ++ ++ if (lock_ok) ++ return 0; ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ if (!entry->locked) ++ break; ++ up_write(&entry->mm->mmap_sem); ++ entry->locked = 0; ++ } ++ ++ /* ++ * Possible deadlock. Try again. Our callers should handle this ++ * and restart. ++ */ ++ ++ return -EAGAIN; ++} ++ ++void drm_bo_unlock_kmm(struct drm_buffer_object * bo) ++{ ++ p_mm_entry_t *entry; ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ BUG_ON(!entry->locked); ++ up_write(&entry->mm->mmap_sem); ++ entry->locked = 0; ++ } ++} ++ ++int drm_bo_remap_bound(struct drm_buffer_object *bo) ++{ ++ vma_entry_t *v_entry; ++ int ret = 0; ++ ++ if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) { ++ list_for_each_entry(v_entry, &bo->vma_list, head) { ++ ret = drm_bo_map_bound(v_entry->vma); ++ if (ret) ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++void drm_bo_finish_unmap(struct drm_buffer_object *bo) ++{ ++ vma_entry_t *v_entry; ++ ++ list_for_each_entry(v_entry, &bo->vma_list, head) { ++ v_entry->vma->vm_flags &= ~VM_PFNMAP; ++ } ++} ++ ++#endif ++ ++#ifdef DRM_IDR_COMPAT_FN ++/* only called when idp->lock is held */ ++static void __free_layer(struct idr *idp, struct idr_layer *p) ++{ ++ p->ary[0] = idp->id_free; ++ idp->id_free = p; ++ idp->id_free_cnt++; ++} ++ ++static void free_layer(struct idr *idp, struct idr_layer *p) ++{ ++ unsigned long flags; ++ ++ /* ++ * Depends on the return element being zeroed. ++ */ ++ spin_lock_irqsave(&idp->lock, flags); ++ __free_layer(idp, p); ++ spin_unlock_irqrestore(&idp->lock, flags); ++} ++ ++/** ++ * idr_for_each - iterate through all stored pointers ++ * @idp: idr handle ++ * @fn: function to be called for each pointer ++ * @data: data passed back to callback function ++ * ++ * Iterate over the pointers registered with the given idr. The ++ * callback function will be called for each pointer currently ++ * registered, passing the id, the pointer and the data pointer passed ++ * to this function. It is not safe to modify the idr tree while in ++ * the callback, so functions such as idr_get_new and idr_remove are ++ * not allowed. ++ * ++ * We check the return of @fn each time. If it returns anything other ++ * than 0, we break out and return that value. ++ * ++* The caller must serialize idr_find() vs idr_get_new() and idr_remove(). ++ */ ++int idr_for_each(struct idr *idp, ++ int (*fn)(int id, void *p, void *data), void *data) ++{ ++ int n, id, max, error = 0; ++ struct idr_layer *p; ++ struct idr_layer *pa[MAX_LEVEL]; ++ struct idr_layer **paa = &pa[0]; ++ ++ n = idp->layers * IDR_BITS; ++ p = idp->top; ++ max = 1 << n; ++ ++ id = 0; ++ while (id < max) { ++ while (n > 0 && p) { ++ n -= IDR_BITS; ++ *paa++ = p; ++ p = p->ary[(id >> n) & IDR_MASK]; ++ } ++ ++ if (p) { ++ error = fn(id, (void *)p, data); ++ if (error) ++ break; ++ } ++ ++ id += 1 << n; ++ while (n < fls(id)) { ++ n += IDR_BITS; ++ p = *--paa; ++ } ++ } ++ ++ return error; ++} ++EXPORT_SYMBOL(idr_for_each); ++ ++/** ++ * idr_remove_all - remove all ids from the given idr tree ++ * @idp: idr handle ++ * ++ * idr_destroy() only frees up unused, cached idp_layers, but this ++ * function will remove all id mappings and leave all idp_layers ++ * unused. ++ * ++ * A typical clean-up sequence for objects stored in an idr tree, will ++ * use idr_for_each() to free all objects, if necessay, then ++ * idr_remove_all() to remove all ids, and idr_destroy() to free ++ * up the cached idr_layers. ++ */ ++void idr_remove_all(struct idr *idp) ++{ ++ int n, id, max, error = 0; ++ struct idr_layer *p; ++ struct idr_layer *pa[MAX_LEVEL]; ++ struct idr_layer **paa = &pa[0]; ++ ++ n = idp->layers * IDR_BITS; ++ p = idp->top; ++ max = 1 << n; ++ ++ id = 0; ++ while (id < max && !error) { ++ while (n > IDR_BITS && p) { ++ n -= IDR_BITS; ++ *paa++ = p; ++ p = p->ary[(id >> n) & IDR_MASK]; ++ } ++ ++ id += 1 << n; ++ while (n < fls(id)) { ++ if (p) { ++ memset(p, 0, sizeof *p); ++ free_layer(idp, p); ++ } ++ n += IDR_BITS; ++ p = *--paa; ++ } ++ } ++ idp->top = NULL; ++ idp->layers = 0; ++} ++EXPORT_SYMBOL(idr_remove_all); ++ ++#endif /* DRM_IDR_COMPAT_FN */ ++ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) ++/** ++ * idr_replace - replace pointer for given id ++ * @idp: idr handle ++ * @ptr: pointer you want associated with the id ++ * @id: lookup key ++ * ++ * Replace the pointer registered with an id and return the old value. ++ * A -ENOENT return indicates that @id was not found. ++ * A -EINVAL return indicates that @id was not within valid constraints. ++ * ++ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). ++ */ ++void *idr_replace(struct idr *idp, void *ptr, int id) ++{ ++ int n; ++ struct idr_layer *p, *old_p; ++ ++ n = idp->layers * IDR_BITS; ++ p = idp->top; ++ ++ id &= MAX_ID_MASK; ++ ++ if (id >= (1 << n)) ++ return ERR_PTR(-EINVAL); ++ ++ n -= IDR_BITS; ++ while ((n > 0) && p) { ++ p = p->ary[(id >> n) & IDR_MASK]; ++ n -= IDR_BITS; ++ } ++ ++ n = id & IDR_MASK; ++ if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) ++ return ERR_PTR(-ENOENT); ++ ++ old_p = p->ary[n]; ++ p->ary[n] = ptr; ++ ++ return (void *)old_p; ++} ++EXPORT_SYMBOL(idr_replace); ++#endif ++ ++#if defined(DRM_KMAP_ATOMIC_PROT_PFN) ++#define drm_kmap_get_fixmap_pte(vaddr) \ ++ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) ++ ++void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, ++ pgprot_t protection) ++{ ++ enum fixed_addresses idx; ++ unsigned long vaddr; ++ static pte_t *km_pte; ++ static int initialized = 0; ++ ++ if (unlikely(!initialized)) { ++ km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); ++ initialized = 1; ++ } ++ ++ pagefault_disable(); ++ idx = type + KM_TYPE_NR*smp_processor_id(); ++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ set_pte(km_pte-idx, pfn_pte(pfn, protection)); ++ ++ return (void*) vaddr; ++} ++ ++EXPORT_SYMBOL(kmap_atomic_prot_pfn); ++ ++#endif ++ ++#ifdef DRM_FULL_MM_COMPAT ++#ifdef DRM_NO_FAULT ++unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page = NULL; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ unsigned long pfn; ++ int err; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long ret = NOPFN_REFAULT; ++ ++ if (address > vma->vm_end) ++ return NOPFN_SIGBUS; ++ ++ dev = bo->dev; ++ err = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (err) ++ return NOPFN_REFAULT; ++ ++ err = mutex_lock_interruptible(&bo->mutex); ++ if (err) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return NOPFN_REFAULT; ++ } ++ ++ err = drm_bo_wait(bo, 0, 1, 0, 1); ++ if (err) { ++ ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ goto out_unlock; ++ } ++ ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ /* ++ * If buffer happens to be in a non-mappable location, ++ * move it to a mappable. ++ */ ++ ++ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { ++ uint32_t new_flags = bo->mem.proposed_flags | ++ DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_FORCE_MAPPABLE; ++ err = drm_bo_move_buffer(bo, new_flags, 0, 0); ++ if (err) { ++ ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; ++ goto out_unlock; ++ } ++ } ++ ++ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, ++ &bus_size); ++ ++ if (err) { ++ ret = NOPFN_SIGBUS; ++ goto out_unlock; ++ } ++ ++ page_offset = (address - vma->vm_start) >> PAGE_SHIFT; ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; ++ ++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; ++ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); ++ } else { ++ ttm = bo->ttm; ++ ++ drm_ttm_fixup_caching(ttm); ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ ret = NOPFN_OOM; ++ goto out_unlock; ++ } ++ pfn = page_to_pfn(page); ++ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? ++ vm_get_page_prot(vma->vm_flags) : ++ drm_io_prot(_DRM_TTM, vma); ++ } ++ ++ err = vm_insert_pfn(vma, address, pfn); ++ if (err) { ++ ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT; ++ goto out_unlock; ++ } ++out_unlock: ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ mutex_unlock(&bo->mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return ret; ++} ++#endif ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_compat.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_compat.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_compat.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_compat.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,380 @@ ++/** ++ * \file drm_compat.h ++ * Backward compatability definitions for Direct Rendering Manager ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DRM_COMPAT_H_ ++#define _DRM_COMPAT_H_ ++ ++#ifndef minor ++#define minor(x) MINOR((x)) ++#endif ++ ++#ifndef MODULE_LICENSE ++#define MODULE_LICENSE(x) ++#endif ++ ++#ifndef preempt_disable ++#define preempt_disable() ++#define preempt_enable() ++#endif ++ ++#ifndef pte_offset_map ++#define pte_offset_map pte_offset ++#define pte_unmap(pte) ++#endif ++ ++#ifndef module_param ++#define module_param(name, type, perm) ++#endif ++ ++/* older kernels had different irq args */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++#undef DRM_IRQ_ARGS ++#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs ++#endif ++ ++#ifndef list_for_each_safe ++#define list_for_each_safe(pos, n, head) \ ++ for (pos = (head)->next, n = pos->next; pos != (head); \ ++ pos = n, n = pos->next) ++#endif ++ ++#ifndef list_for_each_entry ++#define list_for_each_entry(pos, head, member) \ ++ for (pos = list_entry((head)->next, typeof(*pos), member), \ ++ prefetch(pos->member.next); \ ++ &pos->member != (head); \ ++ pos = list_entry(pos->member.next, typeof(*pos), member), \ ++ prefetch(pos->member.next)) ++#endif ++ ++#ifndef list_for_each_entry_safe ++#define list_for_each_entry_safe(pos, n, head, member) \ ++ for (pos = list_entry((head)->next, typeof(*pos), member), \ ++ n = list_entry(pos->member.next, typeof(*pos), member); \ ++ &pos->member != (head); \ ++ pos = n, n = list_entry(n->member.next, typeof(*n), member)) ++#endif ++ ++#ifndef __user ++#define __user ++#endif ++ ++#if !defined(__put_page) ++#define __put_page(p) atomic_dec(&(p)->count) ++#endif ++ ++#if !defined(__GFP_COMP) ++#define __GFP_COMP 0 ++#endif ++ ++#if !defined(IRQF_SHARED) ++#define IRQF_SHARED SA_SHIRQ ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ++static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot) ++{ ++ return remap_page_range(vma, from, ++ pfn << PAGE_SHIFT, ++ size, ++ pgprot); ++} ++ ++static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags) ++{ ++ void *addr; ++ ++ addr = kmalloc(size * nmemb, flags); ++ if (addr != NULL) ++ memset((void *)addr, 0, size * nmemb); ++ ++ return addr; ++} ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ++#define mutex_lock down ++#define mutex_unlock up ++ ++#define mutex semaphore ++ ++#define mutex_init(a) sema_init((a), 1) ++ ++#endif ++ ++#ifndef DEFINE_SPINLOCK ++#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED ++#endif ++ ++/* old architectures */ ++#ifdef __AMD64__ ++#define __x86_64__ ++#endif ++ ++/* sysfs __ATTR macro */ ++#ifndef __ATTR ++#define __ATTR(_name,_mode,_show,_store) { \ ++ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ ++ .show = _show, \ ++ .store = _store, \ ++} ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ++#define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \ ++ if (tmp) memset(tmp, 0, size); \ ++ (tmp);}) ++#endif ++ ++#ifndef list_for_each_entry_safe_reverse ++#define list_for_each_entry_safe_reverse(pos, n, head, member) \ ++ for (pos = list_entry((head)->prev, typeof(*pos), member), \ ++ n = list_entry(pos->member.prev, typeof(*pos), member); \ ++ &pos->member != (head); \ ++ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) ++#endif ++ ++#include ++#include ++ ++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) ++#define DRM_ODD_MM_COMPAT ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) ++#define DRM_FULL_MM_COMPAT ++#endif ++ ++ ++/* ++ * Flush relevant caches and clear a VMA structure so that page references ++ * will cause a page fault. Don't flush tlbs. ++ */ ++ ++extern void drm_clear_vma(struct vm_area_struct *vma, ++ unsigned long addr, unsigned long end); ++ ++/* ++ * Return the PTE protection map entries for the VMA flags given by ++ * flags. This is a functional interface to the kernel's protection map. ++ */ ++ ++extern pgprot_t vm_get_page_prot(unsigned long vm_flags); ++ ++#ifndef GFP_DMA32 ++#define GFP_DMA32 GFP_KERNEL ++#endif ++#ifndef __GFP_DMA32 ++#define __GFP_DMA32 GFP_KERNEL ++#endif ++ ++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ++/* ++ * These are too slow in earlier kernels. ++ */ ++ ++extern int drm_unmap_page_from_agp(struct page *page); ++extern int drm_map_page_into_agp(struct page *page); ++ ++#define map_page_into_agp drm_map_page_into_agp ++#define unmap_page_from_agp drm_unmap_page_from_agp ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++extern struct page *get_nopage_retry(void); ++extern void free_nopage_retry(void); ++ ++#define NOPAGE_REFAULT get_nopage_retry() ++#endif ++ ++ ++#ifndef DRM_FULL_MM_COMPAT ++ ++/* ++ * For now, just return a dummy page that we've allocated out of ++ * static space. The page will be put by do_nopage() since we've already ++ * filled out the pte. ++ */ ++ ++struct fault_data { ++ struct vm_area_struct *vma; ++ unsigned long address; ++ pgoff_t pgoff; ++ unsigned int flags; ++ ++ int type; ++}; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, ++ int *type); ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \ ++ !defined(DRM_FULL_MM_COMPAT) ++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, ++ unsigned long address); ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */ ++#endif /* ndef DRM_FULL_MM_COMPAT */ ++ ++#ifdef DRM_ODD_MM_COMPAT ++ ++struct drm_buffer_object; ++ ++ ++/* ++ * Add a vma to the ttm vma list, and the ++ * process mm pointer to the ttm mm list. Needs the ttm mutex. ++ */ ++ ++extern int drm_bo_add_vma(struct drm_buffer_object * bo, ++ struct vm_area_struct *vma); ++/* ++ * Delete a vma and the corresponding mm pointer from the ++ * ttm lists. Needs the ttm mutex. ++ */ ++extern void drm_bo_delete_vma(struct drm_buffer_object * bo, ++ struct vm_area_struct *vma); ++ ++/* ++ * Attempts to lock all relevant mmap_sems for a ttm, while ++ * not releasing the ttm mutex. May return -EAGAIN to avoid ++ * deadlocks. In that case the caller shall release the ttm mutex, ++ * schedule() and try again. ++ */ ++ ++extern int drm_bo_lock_kmm(struct drm_buffer_object * bo); ++ ++/* ++ * Unlock all relevant mmap_sems for a ttm. ++ */ ++extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo); ++ ++/* ++ * If the ttm was bound to the aperture, this function shall be called ++ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all ++ * vmas mapping this ttm. This is needed just after unmapping the ptes of ++ * the vma, otherwise the do_nopage() function will bug :(. The function ++ * releases the mmap_sems for this ttm. ++ */ ++ ++extern void drm_bo_finish_unmap(struct drm_buffer_object *bo); ++ ++/* ++ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot ++ * fault these pfns in, because the first one will set the vma VM_PFNMAP ++ * flag, which will make the next fault bug in do_nopage(). The function ++ * releases the mmap_sems for this ttm. ++ */ ++ ++extern int drm_bo_remap_bound(struct drm_buffer_object *bo); ++ ++ ++/* ++ * Remap a vma for a bound ttm. Call with the ttm mutex held and ++ * the relevant mmap_sem locked. ++ */ ++extern int drm_bo_map_bound(struct vm_area_struct *vma); ++ ++#endif ++ ++/* fixme when functions are upstreamed - upstreamed for 2.6.23 */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) ++#define DRM_IDR_COMPAT_FN ++#define DRM_NO_FAULT ++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, ++ unsigned long address); ++#endif ++#ifdef DRM_IDR_COMPAT_FN ++int idr_for_each(struct idr *idp, ++ int (*fn)(int id, void *p, void *data), void *data); ++void idr_remove_all(struct idr *idp); ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) ++void *idr_replace(struct idr *idp, void *ptr, int id); ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++typedef _Bool bool; ++#endif ++ ++ ++#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM)) ++#define DRM_KMAP_ATOMIC_PROT_PFN ++extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, ++ pgprot_t protection); ++#endif ++ ++#if !defined(flush_agp_mappings) ++#define flush_agp_mappings() do {} while(0) ++#endif ++ ++#ifndef DMA_BIT_MASK ++#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) ++#endif ++ ++#ifndef VM_CAN_NONLINEAR ++#define DRM_VM_NOPAGE 1 ++#endif ++ ++#ifdef DRM_VM_NOPAGE ++ ++extern struct page *drm_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++ ++extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++ ++extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++ ++extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) ++#define drm_core_ioremap_wc drm_core_ioremap ++#endif ++ ++#ifndef OS_HAS_GEM ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) ++#define OS_HAS_GEM 1 ++#else ++#define OS_HAS_GEM 0 ++#endif ++#endif ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_context.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_context.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_context.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_context.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,472 @@ ++/** ++ * \file drm_context.c ++ * IOCTLs for generic contexts ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* ++ * ChangeLog: ++ * 2001-11-16 Torsten Duwe ++ * added context constructor/destructor hooks, ++ * needed by SiS driver's memory management. ++ */ ++ ++#include "drmP.h" ++ ++/******************************************************************/ ++/** \name Context bitmap support */ ++/*@{*/ ++ ++/** ++ * Free a handle from the context bitmap. ++ * ++ * \param dev DRM device. ++ * \param ctx_handle context handle. ++ * ++ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry ++ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex ++ * lock. ++ */ ++void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle) ++{ ++ mutex_lock(&dev->struct_mutex); ++ idr_remove(&dev->ctx_idr, ctx_handle); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * Context bitmap allocation. ++ * ++ * \param dev DRM device. ++ * \return (non-negative) context handle on success or a negative number on failure. ++ * ++ * Allocate a new idr from drm_device::ctx_idr while holding the ++ * drm_device::struct_mutex lock. ++ */ ++static int drm_ctxbitmap_next(struct drm_device *dev) ++{ ++ int new_id; ++ int ret; ++ ++again: ++ if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) { ++ DRM_ERROR("Out of memory expanding drawable idr\n"); ++ return -ENOMEM; ++ } ++ mutex_lock(&dev->struct_mutex); ++ ret = idr_get_new_above(&dev->ctx_idr, NULL, ++ DRM_RESERVED_CONTEXTS, &new_id); ++ if (ret == -EAGAIN) { ++ mutex_unlock(&dev->struct_mutex); ++ goto again; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ return new_id; ++} ++ ++/** ++ * Context bitmap initialization. ++ * ++ * \param dev DRM device. ++ * ++ * Initialise the drm_device::ctx_idr ++ */ ++int drm_ctxbitmap_init(struct drm_device *dev) ++{ ++ idr_init(&dev->ctx_idr); ++ return 0; ++} ++ ++/** ++ * Context bitmap cleanup. ++ * ++ * \param dev DRM device. ++ * ++ * Free all idr members using drm_ctx_sarea_free helper function ++ * while holding the drm_device::struct_mutex lock. ++ */ ++void drm_ctxbitmap_cleanup(struct drm_device *dev) ++{ ++ mutex_lock(&dev->struct_mutex); ++ idr_remove_all(&dev->ctx_idr); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/*@}*/ ++ ++/******************************************************************/ ++/** \name Per Context SAREA Support */ ++/*@{*/ ++ ++/** ++ * Get per-context SAREA. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx_priv_map structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Gets the map from drm_device::ctx_idr with the handle specified and ++ * returns its handle. ++ */ ++int drm_getsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_priv_map *request = data; ++ struct drm_map *map; ++ struct drm_map_list *_entry; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ map = idr_find(&dev->ctx_idr, request->ctx_id); ++ if (!map) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->handle = NULL; ++ list_for_each_entry(_entry, &dev->maplist, head) { ++ if (_entry->map == map) { ++ request->handle = ++ (void *)(unsigned long)_entry->user_token; ++ break; ++ } ++ } ++ if (request->handle == NULL) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++/** ++ * Set per-context SAREA. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx_priv_map structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches the mapping specified in \p arg and update the entry in ++ * drm_device::ctx_idr with it. ++ */ ++int drm_setsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_priv_map *request = data; ++ struct drm_map *map = NULL; ++ struct drm_map_list *r_list = NULL; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map ++ && r_list->user_token == (unsigned long) request->handle) ++ goto found; ++ } ++ bad: ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ ++ found: ++ map = r_list->map; ++ if (!map) ++ goto bad; ++ ++ if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) ++ goto bad; ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++/******************************************************************/ ++/** \name The actual DRM context handling routines */ ++/*@{*/ ++ ++/** ++ * Switch context. ++ * ++ * \param dev DRM device. ++ * \param old old context handle. ++ * \param new new context handle. ++ * \return zero on success or a negative number on failure. ++ * ++ * Attempt to set drm_device::context_flag. ++ */ ++static int drm_context_switch(struct drm_device *dev, int old, int new) ++{ ++ if (test_and_set_bit(0, &dev->context_flag)) { ++ DRM_ERROR("Reentering -- FIXME\n"); ++ return -EBUSY; ++ } ++ ++ DRM_DEBUG("Context switch from %d to %d\n", old, new); ++ ++ if (new == dev->last_context) { ++ clear_bit(0, &dev->context_flag); ++ return 0; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Complete context switch. ++ * ++ * \param dev DRM device. ++ * \param new new context handle. ++ * \return zero on success or a negative number on failure. ++ * ++ * Updates drm_device::last_context and drm_device::last_switch. Verifies the ++ * hardware lock is held, clears the drm_device::context_flag and wakes up ++ * drm_device::context_wait. ++ */ ++static int drm_context_switch_complete(struct drm_device *dev, int new) ++{ ++ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ ++ dev->last_switch = jiffies; ++ ++ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { ++ DRM_ERROR("Lock isn't held after context switch\n"); ++ } ++ ++ /* If a context switch is ever initiated ++ when the kernel holds the lock, release ++ that lock here. */ ++ clear_bit(0, &dev->context_flag); ++ wake_up(&dev->context_wait); ++ ++ return 0; ++} ++ ++/** ++ * Reserve contexts. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx_res structure. ++ * \return zero on success or a negative number on failure. ++ */ ++int drm_resctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_res *res = data; ++ struct drm_ctx ctx; ++ int i; ++ ++ if (res->count >= DRM_RESERVED_CONTEXTS) { ++ memset(&ctx, 0, sizeof(ctx)); ++ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ++ ctx.handle = i; ++ if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) ++ return -EFAULT; ++ } ++ } ++ res->count = DRM_RESERVED_CONTEXTS; ++ ++ return 0; ++} ++ ++/** ++ * Add context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Get a new handle for the context and copy to userspace. ++ */ ++int drm_addctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_list *ctx_entry; ++ struct drm_ctx *ctx = data; ++ ++ ctx->handle = drm_ctxbitmap_next(dev); ++ if (ctx->handle == DRM_KERNEL_CONTEXT) { ++ /* Skip kernel's context and get a new one. */ ++ ctx->handle = drm_ctxbitmap_next(dev); ++ } ++ DRM_DEBUG("%d\n", ctx->handle); ++ if (ctx->handle == -1) { ++ DRM_DEBUG("Not enough free contexts.\n"); ++ /* Should this return -EBUSY instead? */ ++ return -ENOMEM; ++ } ++ ++ if (ctx->handle != DRM_KERNEL_CONTEXT) { ++ if (dev->driver->context_ctor) ++ if (!dev->driver->context_ctor(dev, ctx->handle)) { ++ DRM_DEBUG("Running out of ctxs or memory.\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); ++ if (!ctx_entry) { ++ DRM_DEBUG("out of memory\n"); ++ return -ENOMEM; ++ } ++ ++ INIT_LIST_HEAD(&ctx_entry->head); ++ ctx_entry->handle = ctx->handle; ++ ctx_entry->tag = file_priv; ++ ++ mutex_lock(&dev->ctxlist_mutex); ++ list_add(&ctx_entry->head, &dev->ctxlist); ++ ++dev->ctx_count; ++ mutex_unlock(&dev->ctxlist_mutex); ++ ++ return 0; ++} ++ ++int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ /* This does nothing */ ++ return 0; ++} ++ ++/** ++ * Get context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ */ ++int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ /* This is 0, because we don't handle any context flags */ ++ ctx->flags = 0; ++ ++ return 0; ++} ++ ++/** ++ * Switch context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls context_switch(). ++ */ ++int drm_switchctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ DRM_DEBUG("%d\n", ctx->handle); ++ return drm_context_switch(dev, dev->last_context, ctx->handle); ++} ++ ++/** ++ * New context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls context_switch_complete(). ++ */ ++int drm_newctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ DRM_DEBUG("%d\n", ctx->handle); ++ drm_context_switch_complete(dev, ctx->handle); ++ ++ return 0; ++} ++ ++/** ++ * Remove context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * If not the special kernel context, calls ctxbitmap_free() to free the specified context. ++ */ ++int drm_rmctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ DRM_DEBUG("%d\n", ctx->handle); ++ if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { ++ file_priv->remove_auth_on_close = 1; ++ } ++ if (ctx->handle != DRM_KERNEL_CONTEXT) { ++ if (dev->driver->context_dtor) ++ dev->driver->context_dtor(dev, ctx->handle); ++ drm_ctxbitmap_free(dev, ctx->handle); ++ } ++ ++ mutex_lock(&dev->ctxlist_mutex); ++ if (!list_empty(&dev->ctxlist)) { ++ struct drm_ctx_list *pos, *n; ++ ++ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { ++ if (pos->handle == ctx->handle) { ++ list_del(&pos->head); ++ drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); ++ --dev->ctx_count; ++ } ++ } ++ } ++ mutex_unlock(&dev->ctxlist_mutex); ++ ++ return 0; ++} ++ ++/*@}*/ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_core.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_core.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_core.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_core.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,35 @@ ++/* ++ * Copyright 2004 Jon Smirl ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl" ++ ++#define CORE_NAME "drm" ++#define CORE_DESC "DRM shared core routines" ++#define CORE_DATE "20060810" ++ ++#define DRM_IF_MAJOR 1 ++#define DRM_IF_MINOR 3 ++ ++#define CORE_MAJOR 1 ++#define CORE_MINOR 1 ++#define CORE_PATCHLEVEL 0 +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_dma.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_dma.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_dma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,179 @@ ++/** ++ * \file drm_dma.c ++ * DMA IOCTL and function support ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Initialize the DMA data. ++ * ++ * \param dev DRM device. ++ * \return zero on success or a negative value on failure. ++ * ++ * Allocate and initialize a drm_device_dma structure. ++ */ ++int drm_dma_setup(struct drm_device *dev) ++{ ++ int i; ++ ++ dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); ++ if (!dev->dma) ++ return -ENOMEM; ++ ++ memset(dev->dma, 0, sizeof(*dev->dma)); ++ ++ for (i = 0; i <= DRM_MAX_ORDER; i++) ++ memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); ++ ++ return 0; ++} ++ ++/** ++ * Cleanup the DMA resources. ++ * ++ * \param dev DRM device. ++ * ++ * Free all pages associated with DMA buffers, the buffers and pages lists, and ++ * finally the drm_device::dma structure itself. ++ */ ++void drm_dma_takedown(struct drm_device *dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i, j; ++ ++ if (!dma) ++ return; ++ ++ /* Clear dma buffers */ ++ for (i = 0; i <= DRM_MAX_ORDER; i++) { ++ if (dma->bufs[i].seg_count) { ++ DRM_DEBUG("order %d: buf_count = %d," ++ " seg_count = %d\n", ++ i, ++ dma->bufs[i].buf_count, ++ dma->bufs[i].seg_count); ++ for (j = 0; j < dma->bufs[i].seg_count; j++) { ++ if (dma->bufs[i].seglist[j]) { ++ drm_pci_free(dev, dma->bufs[i].seglist[j]); ++ } ++ } ++ drm_free(dma->bufs[i].seglist, ++ dma->bufs[i].seg_count ++ * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS); ++ } ++ if (dma->bufs[i].buf_count) { ++ for (j = 0; j < dma->bufs[i].buf_count; j++) { ++ if (dma->bufs[i].buflist[j].dev_private) { ++ drm_free(dma->bufs[i].buflist[j]. ++ dev_private, ++ dma->bufs[i].buflist[j]. ++ dev_priv_size, DRM_MEM_BUFS); ++ } ++ } ++ drm_free(dma->bufs[i].buflist, ++ dma->bufs[i].buf_count * ++ sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS); ++ } ++ } ++ ++ if (dma->buflist) { ++ drm_free(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ } ++ ++ if (dma->pagelist) { ++ drm_free(dma->pagelist, ++ dma->page_count * sizeof(*dma->pagelist), ++ DRM_MEM_PAGES); ++ } ++ drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); ++ dev->dma = NULL; ++} ++ ++/** ++ * Free a buffer. ++ * ++ * \param dev DRM device. ++ * \param buf buffer to free. ++ * ++ * Resets the fields of \p buf. ++ */ ++void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf) ++{ ++ if (!buf) ++ return; ++ ++ buf->waiting = 0; ++ buf->pending = 0; ++ buf->file_priv = NULL; ++ buf->used = 0; ++ ++ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) ++ && waitqueue_active(&buf->dma_wait)) { ++ wake_up_interruptible(&buf->dma_wait); ++ } ++} ++ ++/** ++ * Reclaim the buffers. ++ * ++ * \param file_priv DRM file private. ++ * ++ * Frees each buffer associated with \p file_priv not already on the hardware. ++ */ ++void drm_core_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ if (!dma) ++ return; ++ for (i = 0; i < dma->buf_count; i++) { ++ if (dma->buflist[i]->file_priv == file_priv) { ++ switch (dma->buflist[i]->list) { ++ case DRM_LIST_NONE: ++ drm_free_buffer(dev, dma->buflist[i]); ++ break; ++ case DRM_LIST_WAIT: ++ dma->buflist[i]->list = DRM_LIST_RECLAIM; ++ break; ++ default: ++ /* Buffer already on hardware. */ ++ break; ++ } ++ } ++ } ++} ++EXPORT_SYMBOL(drm_core_reclaim_buffers); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_drawable.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_drawable.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_drawable.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_drawable.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,192 @@ ++/** ++ * \file drm_drawable.c ++ * IOCTLs for drawables ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ * \author Michel Dänzer ++ */ ++ ++/* ++ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Allocate drawable ID and memory to store information about it. ++ */ ++int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ unsigned long irqflags; ++ struct drm_draw *draw = data; ++ int new_id = 0; ++ int ret; ++ ++again: ++ if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { ++ DRM_ERROR("Out of memory expanding drawable idr\n"); ++ return -ENOMEM; ++ } ++ ++ spin_lock_irqsave(&dev->drw_lock, irqflags); ++ ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id); ++ if (ret == -EAGAIN) { ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ goto again; ++ } ++ ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ ++ draw->handle = new_id; ++ ++ DRM_DEBUG("%d\n", draw->handle); ++ ++ return 0; ++} ++ ++/** ++ * Free drawable ID and memory to store information about it. ++ */ ++int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_draw *draw = data; ++ unsigned long irqflags; ++ ++ spin_lock_irqsave(&dev->drw_lock, irqflags); ++ ++ drm_free(drm_get_drawable_info(dev, draw->handle), ++ sizeof(struct drm_drawable_info), DRM_MEM_BUFS); ++ ++ idr_remove(&dev->drw_idr, draw->handle); ++ ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ DRM_DEBUG("%d\n", draw->handle); ++ return 0; ++} ++ ++int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_update_draw *update = data; ++ unsigned long irqflags; ++ struct drm_clip_rect *rects; ++ struct drm_drawable_info *info; ++ int err; ++ ++ info = idr_find(&dev->drw_idr, update->handle); ++ if (!info) { ++ info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); ++ if (!info) ++ return -ENOMEM; ++ if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { ++ DRM_ERROR("No such drawable %d\n", update->handle); ++ drm_free(info, sizeof(*info), DRM_MEM_BUFS); ++ return -EINVAL; ++ } ++ } ++ ++ switch (update->type) { ++ case DRM_DRAWABLE_CLIPRECTS: ++ if (update->num != info->num_rects) { ++ rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), ++ DRM_MEM_BUFS); ++ } else ++ rects = info->rects; ++ ++ if (update->num && !rects) { ++ DRM_ERROR("Failed to allocate cliprect memory\n"); ++ err = -ENOMEM; ++ goto error; ++ } ++ ++ if (update->num && DRM_COPY_FROM_USER(rects, ++ (struct drm_clip_rect __user *) ++ (unsigned long)update->data, ++ update->num * ++ sizeof(*rects))) { ++ DRM_ERROR("Failed to copy cliprects from userspace\n"); ++ err = -EFAULT; ++ goto error; ++ } ++ ++ spin_lock_irqsave(&dev->drw_lock, irqflags); ++ ++ if (rects != info->rects) { ++ drm_free(info->rects, info->num_rects * ++ sizeof(struct drm_clip_rect), DRM_MEM_BUFS); ++ } ++ ++ info->rects = rects; ++ info->num_rects = update->num; ++ ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ ++ DRM_DEBUG("Updated %d cliprects for drawable %d\n", ++ info->num_rects, update->handle); ++ break; ++ default: ++ DRM_ERROR("Invalid update type %d\n", update->type); ++ return -EINVAL; ++ } ++ ++ return 0; ++ ++error: ++ if (rects != info->rects) ++ drm_free(rects, update->num * sizeof(struct drm_clip_rect), ++ DRM_MEM_BUFS); ++ ++ return err; ++} ++ ++/** ++ * Caller must hold the drawable spinlock! ++ */ ++struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) ++{ ++ return idr_find(&dev->drw_idr, id); ++} ++EXPORT_SYMBOL(drm_get_drawable_info); ++ ++static int drm_drawable_free(int idr, void *p, void *data) ++{ ++ struct drm_drawable_info *info = p; ++ ++ if (info) { ++ drm_free(info->rects, info->num_rects * ++ sizeof(struct drm_clip_rect), DRM_MEM_BUFS); ++ drm_free(info, sizeof(*info), DRM_MEM_BUFS); ++ } ++ ++ return 0; ++} ++ ++void drm_drawable_free_all(struct drm_device *dev) ++{ ++ idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); ++ idr_remove_all(&dev->drw_idr); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,701 @@ ++/** ++ * \file drm_drv.c ++ * Generic driver template ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ * ++ * To use this template, you must at least define the following (samples ++ * given for the MGA driver): ++ * ++ * \code ++ * #define DRIVER_AUTHOR "VA Linux Systems, Inc." ++ * ++ * #define DRIVER_NAME "mga" ++ * #define DRIVER_DESC "Matrox G200/G400" ++ * #define DRIVER_DATE "20001127" ++ * ++ * #define drm_x mga_##x ++ * \endcode ++ */ ++ ++/* ++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#include "drmP.h" ++#include "drm_core.h" ++ ++static void drm_cleanup(struct drm_device * dev); ++int drm_fb_loaded = 0; ++ ++static int drm_version(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++/** Ioctl table */ ++static struct drm_ioctl_desc drm_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), ++ /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ ++ DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++#if __OS_HAS_AGP ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++#endif ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0), ++ ++#if OS_HAS_GEM ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), ++#endif ++}; ++ ++#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) ++ ++ ++/** ++ * Take down the DRM device. ++ * ++ * \param dev DRM device structure. ++ * ++ * Frees every resource in \p dev. ++ * ++ * \sa drm_device ++ */ ++int drm_lastclose(struct drm_device * dev) ++{ ++ struct drm_magic_entry *pt, *next; ++ struct drm_map_list *r_list, *list_t; ++ struct drm_vma_entry *vma, *vma_temp; ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ /* ++ * We can't do much about this function failing. ++ */ ++ ++ drm_bo_driver_finish(dev); ++ ++ if (dev->driver->lastclose) ++ dev->driver->lastclose(dev); ++ DRM_DEBUG("driver lastclose completed\n"); ++ ++ if (dev->unique) { ++ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ++ dev->unique = NULL; ++ dev->unique_len = 0; ++ } ++ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ /* Free drawable information memory */ ++ mutex_lock(&dev->struct_mutex); ++ ++ drm_drawable_free_all(dev); ++ del_timer(&dev->timer); ++ ++ if (dev->unique) { ++ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ++ dev->unique = NULL; ++ dev->unique_len = 0; ++ } ++ ++ if (dev->magicfree.next) { ++ list_for_each_entry_safe(pt, next, &dev->magicfree, head) { ++ list_del(&pt->head); ++ drm_ht_remove_item(&dev->magiclist, &pt->hash_item); ++ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); ++ } ++ drm_ht_remove(&dev->magiclist); ++ } ++ ++ ++ /* Clear AGP information */ ++ if (drm_core_has_AGP(dev) && dev->agp) { ++ struct drm_agp_mem *entry, *tempe; ++ ++ /* Remove AGP resources, but leave dev->agp ++ intact until drv_cleanup is called. */ ++ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { ++ if (entry->bound) ++ drm_unbind_agp(entry->memory); ++ drm_free_agp(entry->memory, entry->pages); ++ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); ++ } ++ INIT_LIST_HEAD(&dev->agp->memory); ++ ++ if (dev->agp->acquired) ++ drm_agp_release(dev); ++ ++ dev->agp->acquired = 0; ++ dev->agp->enabled = 0; ++ } ++ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { ++ drm_sg_cleanup(dev->sg); ++ dev->sg = NULL; ++ } ++ ++ /* Clear vma list (only built for debugging) */ ++ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { ++ list_del(&vma->head); ++ drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS); ++ } ++ ++ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { ++ if (!(r_list->map->flags & _DRM_DRIVER)) { ++ drm_rmmap_locked(dev, r_list->map); ++ r_list = NULL; ++ } ++ } ++ ++ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { ++ for (i = 0; i < dev->queue_count; i++) { ++ ++ if (dev->queuelist[i]) { ++ drm_free(dev->queuelist[i], ++ sizeof(*dev->queuelist[0]), ++ DRM_MEM_QUEUES); ++ dev->queuelist[i] = NULL; ++ } ++ } ++ drm_free(dev->queuelist, ++ dev->queue_slots * sizeof(*dev->queuelist), ++ DRM_MEM_QUEUES); ++ dev->queuelist = NULL; ++ } ++ dev->queue_count = 0; ++ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ drm_dma_takedown(dev); ++ ++ if (dev->lock.hw_lock) { ++ dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ ++ dev->lock.file_priv = NULL; ++ wake_up_interruptible(&dev->lock.lock_queue); ++ } ++ dev->dev_mapping = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ ++ DRM_DEBUG("lastclose completed\n"); ++ return 0; ++} ++ ++void drm_cleanup_pci(struct pci_dev *pdev) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ ++ pci_set_drvdata(pdev, NULL); ++ pci_release_regions(pdev); ++ if (dev) ++ drm_cleanup(dev); ++} ++EXPORT_SYMBOL(drm_cleanup_pci); ++ ++/** ++ * Module initialization. Called via init_module at module load time, or via ++ * linux/init/main.c (this is not currently supported). ++ * ++ * \return zero on success or a negative number on failure. ++ * ++ * Initializes an array of drm_device structures, and attempts to ++ * initialize all available devices, using consecutive minors, registering the ++ * stubs and initializing the AGP device. ++ * ++ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and ++ * after the initialization for driver customization. ++ */ ++int drm_init(struct drm_driver *driver, ++ struct pci_device_id *pciidlist) ++{ ++ struct pci_dev *pdev; ++ struct pci_device_id *pid; ++ int rc, i; ++ ++ DRM_DEBUG("\n"); ++ ++ for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) { ++ pid = &pciidlist[i]; ++ ++ pdev = NULL; ++ /* pass back in pdev to account for multiple identical cards */ ++ while ((pdev = ++ pci_get_subsys(pid->vendor, pid->device, pid->subvendor, ++ pid->subdevice, pdev))) { ++ /* Are there device class requirements? */ ++ if ((pid->class != 0) ++ && ((pdev->class & pid->class_mask) != pid->class)) { ++ continue; ++ } ++ /* is there already a driver loaded, or (short circuit saves work) */ ++ /* does something like VesaFB have control of the memory region? */ ++ if ( ++#ifdef CONFIG_PCI ++ pci_dev_driver(pdev) || ++#endif ++ pci_request_regions(pdev, "DRM scan")) { ++ /* go into stealth mode */ ++ drm_fb_loaded = 1; ++ pci_dev_put(pdev); ++ break; ++ } ++ /* no fbdev or vesadev, put things back and wait for normal probe */ ++ pci_release_regions(pdev); ++ } ++ } ++ ++ if (!drm_fb_loaded) ++ return pci_register_driver(&driver->pci_driver); ++ else { ++ for (i = 0; pciidlist[i].vendor != 0; i++) { ++ pid = &pciidlist[i]; ++ ++ pdev = NULL; ++ /* pass back in pdev to account for multiple identical cards */ ++ while ((pdev = ++ pci_get_subsys(pid->vendor, pid->device, ++ pid->subvendor, pid->subdevice, ++ pdev))) { ++ /* Are there device class requirements? */ ++ if ((pid->class != 0) ++ && ((pdev->class & pid->class_mask) != pid->class)) { ++ continue; ++ } ++#ifdef CONFIG_PCI ++ /* stealth mode requires a manual probe */ ++ pci_dev_get(pdev); ++#endif ++ if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { ++ pci_dev_put(pdev); ++ return rc; ++ } ++ } ++ } ++ DRM_INFO("Used old pci detect: framebuffer loaded\n"); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_init); ++ ++/** ++ * Called via cleanup_module() at module unload time. ++ * ++ * Cleans up all DRM device, calling drm_lastclose(). ++ * ++ * \sa drm_init ++ */ ++static void drm_cleanup(struct drm_device * dev) ++{ ++ ++ DRM_DEBUG("\n"); ++ if (!dev) { ++ DRM_ERROR("cleanup called no dev\n"); ++ return; ++ } ++ ++ drm_lastclose(dev); ++ drm_fence_manager_takedown(dev); ++ ++ if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp ++ && dev->agp->agp_mtrr >= 0) { ++ int retval; ++ retval = mtrr_del(dev->agp->agp_mtrr, ++ dev->agp->agp_info.aper_base, ++ dev->agp->agp_info.aper_size * 1024 * 1024); ++ DRM_DEBUG("mtrr_del=%d\n", retval); ++ } ++ ++ if (drm_core_has_AGP(dev) && dev->agp) { ++ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); ++ dev->agp = NULL; ++ } ++ if (dev->driver->unload) ++ dev->driver->unload(dev); ++ ++ if (!drm_fb_loaded) ++ pci_disable_device(dev->pdev); ++ ++ drm_ctxbitmap_cleanup(dev); ++ drm_ht_remove(&dev->map_hash); ++ drm_mm_takedown(&dev->offset_manager); ++ drm_ht_remove(&dev->object_hash); ++ ++ drm_put_minor(dev); ++ if (drm_put_dev(dev)) ++ DRM_ERROR("Cannot unload module\n"); ++} ++ ++int drm_minors_cleanup(int id, void *ptr, void *data) ++{ ++ struct drm_minor *minor = ptr; ++ struct drm_device *dev; ++ struct drm_driver *driver = data; ++ ++ dev = minor->dev; ++ if (minor->dev->driver != driver) ++ return 0; ++ ++ if (minor->type != DRM_MINOR_LEGACY) ++ return 0; ++ ++ if (dev) ++ pci_dev_put(dev->pdev); ++ drm_cleanup(dev); ++ return 1; ++} ++ ++void drm_exit(struct drm_driver *driver) ++{ ++ DRM_DEBUG("\n"); ++#ifdef CONFIG_PCI ++ if (drm_fb_loaded) { ++#endif ++ idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); ++#ifdef CONFIG_PCI ++ } else ++ pci_unregister_driver(&driver->pci_driver); ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ free_nopage_retry(); ++#endif ++ DRM_INFO("Module unloaded\n"); ++} ++EXPORT_SYMBOL(drm_exit); ++ ++/** File operations structure */ ++static const struct file_operations drm_stub_fops = { ++ .owner = THIS_MODULE, ++ .open = drm_stub_open ++}; ++ ++static int __init drm_core_init(void) ++{ ++ int ret; ++ struct sysinfo si; ++ unsigned long avail_memctl_mem; ++ unsigned long max_memctl_mem; ++ ++ idr_init(&drm_minors_idr); ++ si_meminfo(&si); ++ ++ /* ++ * AGP only allows low / DMA32 memory ATM. ++ */ ++ ++ avail_memctl_mem = si.totalram - si.totalhigh; ++ ++ /* ++ * Avoid overflows ++ */ ++ ++ max_memctl_mem = 1UL << (32 - PAGE_SHIFT); ++ max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE; ++ ++ if (avail_memctl_mem >= max_memctl_mem) ++ avail_memctl_mem = max_memctl_mem; ++ ++ drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit); ++ ++ ret = -ENOMEM; ++ ++ if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) ++ goto err_p1; ++ ++ drm_class = drm_sysfs_create(THIS_MODULE, "drm"); ++ if (IS_ERR(drm_class)) { ++ printk(KERN_ERR "DRM: Error creating drm class.\n"); ++ ret = PTR_ERR(drm_class); ++ goto err_p2; ++ } ++ ++ drm_proc_root = proc_mkdir("dri", NULL); ++ if (!drm_proc_root) { ++ DRM_ERROR("Cannot create /proc/dri\n"); ++ ret = -1; ++ goto err_p3; ++ } ++ ++ drm_mem_init(); ++ ++ DRM_INFO("Initialized %s %d.%d.%d %s\n", ++ CORE_NAME, ++ CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); ++ return 0; ++err_p3: ++ drm_sysfs_destroy(); ++err_p2: ++ unregister_chrdev(DRM_MAJOR, "drm"); ++ ++ idr_destroy(&drm_minors_idr); ++err_p1: ++ return ret; ++} ++ ++static void __exit drm_core_exit(void) ++{ ++ remove_proc_entry("dri", NULL); ++ drm_sysfs_destroy(); ++ ++ unregister_chrdev(DRM_MAJOR, "drm"); ++ ++ idr_destroy(&drm_minors_idr); ++} ++ ++module_init(drm_core_init); ++module_exit(drm_core_exit); ++ ++/** ++ * Get version information ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_version structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Fills in the version information in \p arg. ++ */ ++static int drm_version(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_version *version = data; ++ int len; ++ ++ version->version_major = dev->driver->major; ++ version->version_minor = dev->driver->minor; ++ version->version_patchlevel = dev->driver->patchlevel; ++ DRM_COPY(version->name, dev->driver->name); ++ DRM_COPY(version->date, dev->driver->date); ++ DRM_COPY(version->desc, dev->driver->desc); ++ ++ return 0; ++} ++ ++/** ++ * Called whenever a process performs an ioctl on /dev/drm. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ * ++ * Looks up the ioctl function in the ::ioctls table, checking for root ++ * previleges if so required, and dispatches to the respective function. ++ * ++ * Copies data in and out according to the size and direction given in cmd, ++ * which must match the ioctl cmd known by the kernel. The kernel uses a 512 ++ * byte stack buffer to store the ioctl arguments in kernel space. Should we ++ * ever need much larger ioctl arguments, we may need to allocate memory. ++ */ ++int drm_ioctl(struct inode *inode, struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ return drm_unlocked_ioctl(filp, cmd, arg); ++} ++EXPORT_SYMBOL(drm_ioctl); ++ ++long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ struct drm_file *file_priv = filp->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_ioctl_desc *ioctl; ++ drm_ioctl_t *func; ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ int retcode = -EINVAL; ++ char kdata[512]; ++ ++ atomic_inc(&dev->ioctl_count); ++ atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++ ++file_priv->ioctl_count; ++ ++ DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", ++ current->pid, cmd, nr, (long)old_encode_dev(file_priv->minor->device), ++ file_priv->authenticated); ++ ++ if ((nr >= DRM_CORE_IOCTL_COUNT) && ++ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) ++ goto err_i1; ++ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) ++ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) ++ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; ++ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { ++ ioctl = &drm_ioctls[nr]; ++ cmd = ioctl->cmd; ++ } else { ++ retcode = -EINVAL; ++ goto err_i1; ++ } ++#if 0 ++ /* ++ * This check is disabled, because driver private ioctl->cmd ++ * are not the ioctl commands with size and direction bits but ++ * just the indices. The DRM core ioctl->cmd are the proper ioctl ++ * commands. The drivers' ioctl tables need to be fixed. ++ */ ++ if (ioctl->cmd != cmd) { ++ retcode = -EINVAL; ++ goto err_i1; ++ } ++#endif ++ ++ func = ioctl->func; ++ /* is there a local override? */ ++ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) ++ func = dev->driver->dma_ioctl; ++ ++ if (cmd & IOC_IN) { ++ if (copy_from_user(kdata, (void __user *)arg, ++ _IOC_SIZE(cmd)) != 0) { ++ retcode = -EACCES; ++ goto err_i1; ++ } ++ } ++ ++ if (!func) { ++ DRM_DEBUG("no function\n"); ++ retcode = -EINVAL; ++ } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || ++ ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || ++ ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { ++ retcode = -EACCES; ++ } else { ++ retcode = func(dev, kdata, file_priv); ++ } ++ ++ if (cmd & IOC_OUT) { ++ if (copy_to_user((void __user *)arg, kdata, ++ _IOC_SIZE(cmd)) != 0) ++ retcode = -EACCES; ++ } ++ ++err_i1: ++ atomic_dec(&dev->ioctl_count); ++ if (retcode) ++ DRM_DEBUG("ret = %d\n", retcode); ++ return retcode; ++} ++EXPORT_SYMBOL(drm_unlocked_ioctl); ++ ++drm_local_map_t *drm_getsarea(struct drm_device *dev) ++{ ++ struct drm_map_list *entry; ++ ++ list_for_each_entry(entry, &dev->maplist, head) { ++ if (entry->map && entry->map->type == _DRM_SHM && ++ (entry->map->flags & _DRM_CONTAINS_LOCK)) { ++ return entry->map; ++ } ++ } ++ return NULL; ++} ++EXPORT_SYMBOL(drm_getsarea); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_fence.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_fence.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_fence.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_fence.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,829 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++ ++/* ++ * Convenience function to be called by fence::wait methods that ++ * need polling. ++ */ ++ ++int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask, ++ unsigned long end_jiffies) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ uint32_t count = 0; ++ int ret; ++ ++ DECLARE_WAITQUEUE(entry, current); ++ add_wait_queue(&fc->fence_queue, &entry); ++ ++ ret = 0; ++ ++ for (;;) { ++ __set_current_state((interruptible) ? ++ TASK_INTERRUPTIBLE : ++ TASK_UNINTERRUPTIBLE); ++ if (drm_fence_object_signaled(fence, mask)) ++ break; ++ if (time_after_eq(jiffies, end_jiffies)) { ++ ret = -EBUSY; ++ break; ++ } ++ if (lazy) ++ schedule_timeout(1); ++ else if ((++count & 0x0F) == 0){ ++ __set_current_state(TASK_RUNNING); ++ schedule(); ++ __set_current_state((interruptible) ? ++ TASK_INTERRUPTIBLE : ++ TASK_UNINTERRUPTIBLE); ++ } ++ if (interruptible && signal_pending(current)) { ++ ret = -EAGAIN; ++ break; ++ } ++ } ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&fc->fence_queue, &entry); ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_wait_polling); ++ ++/* ++ * Typically called by the IRQ handler. ++ */ ++ ++void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence, uint32_t type, uint32_t error) ++{ ++ int wake = 0; ++ uint32_t diff; ++ uint32_t relevant_type; ++ uint32_t new_type; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct list_head *head; ++ struct drm_fence_object *fence, *next; ++ int found = 0; ++ ++ if (list_empty(&fc->ring)) ++ return; ++ ++ list_for_each_entry(fence, &fc->ring, ring) { ++ diff = (sequence - fence->sequence) & driver->sequence_mask; ++ if (diff > driver->wrap_diff) { ++ found = 1; ++ break; ++ } ++ } ++ ++ fc->waiting_types &= ~type; ++ head = (found) ? &fence->ring : &fc->ring; ++ ++ list_for_each_entry_safe_reverse(fence, next, head, ring) { ++ if (&fence->ring == &fc->ring) ++ break; ++ ++ if (error) { ++ fence->error = error; ++ fence->signaled_types = fence->type; ++ list_del_init(&fence->ring); ++ wake = 1; ++ break; ++ } ++ ++ if (type & DRM_FENCE_TYPE_EXE) ++ type |= fence->native_types; ++ ++ relevant_type = type & fence->type; ++ new_type = (fence->signaled_types | relevant_type) ^ ++ fence->signaled_types; ++ ++ if (new_type) { ++ fence->signaled_types |= new_type; ++ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", ++ fence->base.hash.key, fence->signaled_types); ++ ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ ++ if (new_type & fence->waiting_types) ++ wake = 1; ++ } ++ ++ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; ++ ++ if (!(fence->type & ~fence->signaled_types)) { ++ DRM_DEBUG("Fence completely signaled 0x%08lx\n", ++ fence->base.hash.key); ++ list_del_init(&fence->ring); ++ } ++ } ++ ++ /* ++ * Reinstate lost waiting types. ++ */ ++ ++ if ((fc->waiting_types & type) != type) { ++ head = head->prev; ++ list_for_each_entry(fence, head, ring) { ++ if (&fence->ring == &fc->ring) ++ break; ++ diff = (fc->highest_waiting_sequence - fence->sequence) & ++ driver->sequence_mask; ++ if (diff > driver->wrap_diff) ++ break; ++ ++ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; ++ } ++ } ++ ++ if (wake) ++ wake_up_all(&fc->fence_queue); ++} ++EXPORT_SYMBOL(drm_fence_handler); ++ ++static void drm_fence_unring(struct drm_device *dev, struct list_head *ring) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ unsigned long flags; ++ ++ write_lock_irqsave(&fm->lock, flags); ++ list_del_init(ring); ++ write_unlock_irqrestore(&fm->lock, flags); ++} ++ ++void drm_fence_usage_deref_locked(struct drm_fence_object **fence) ++{ ++ struct drm_fence_object *tmp_fence = *fence; ++ struct drm_device *dev = tmp_fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ *fence = NULL; ++ if (atomic_dec_and_test(&tmp_fence->usage)) { ++ drm_fence_unring(dev, &tmp_fence->ring); ++ DRM_DEBUG("Destroyed a fence object 0x%08lx\n", ++ tmp_fence->base.hash.key); ++ atomic_dec(&fm->count); ++ BUG_ON(!list_empty(&tmp_fence->base.list)); ++ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); ++ } ++} ++EXPORT_SYMBOL(drm_fence_usage_deref_locked); ++ ++void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence) ++{ ++ struct drm_fence_object *tmp_fence = *fence; ++ struct drm_device *dev = tmp_fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ *fence = NULL; ++ if (atomic_dec_and_test(&tmp_fence->usage)) { ++ mutex_lock(&dev->struct_mutex); ++ if (atomic_read(&tmp_fence->usage) == 0) { ++ drm_fence_unring(dev, &tmp_fence->ring); ++ atomic_dec(&fm->count); ++ BUG_ON(!list_empty(&tmp_fence->base.list)); ++ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ } ++} ++EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); ++ ++struct drm_fence_object ++*drm_fence_reference_locked(struct drm_fence_object *src) ++{ ++ DRM_ASSERT_LOCKED(&src->dev->struct_mutex); ++ ++ atomic_inc(&src->usage); ++ return src; ++} ++ ++void drm_fence_reference_unlocked(struct drm_fence_object **dst, ++ struct drm_fence_object *src) ++{ ++ mutex_lock(&src->dev->struct_mutex); ++ *dst = src; ++ atomic_inc(&src->usage); ++ mutex_unlock(&src->dev->struct_mutex); ++} ++EXPORT_SYMBOL(drm_fence_reference_unlocked); ++ ++static void drm_fence_object_destroy(struct drm_file *priv, ++ struct drm_user_object *base) ++{ ++ struct drm_fence_object *fence = ++ drm_user_object_entry(base, struct drm_fence_object, base); ++ ++ drm_fence_usage_deref_locked(&fence); ++} ++ ++int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask) ++{ ++ unsigned long flags; ++ int signaled; ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ ++ mask &= fence->type; ++ read_lock_irqsave(&fm->lock, flags); ++ signaled = (mask & fence->signaled_types) == mask; ++ read_unlock_irqrestore(&fm->lock, flags); ++ if (!signaled && driver->poll) { ++ write_lock_irqsave(&fm->lock, flags); ++ driver->poll(dev, fence->fence_class, mask); ++ signaled = (mask & fence->signaled_types) == mask; ++ write_unlock_irqrestore(&fm->lock, flags); ++ } ++ return signaled; ++} ++EXPORT_SYMBOL(drm_fence_object_signaled); ++ ++ ++int drm_fence_object_flush(struct drm_fence_object *fence, ++ uint32_t type) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ unsigned long irq_flags; ++ uint32_t saved_pending_flush; ++ uint32_t diff; ++ int call_flush; ++ ++ if (type & ~fence->type) { ++ DRM_ERROR("Flush trying to extend fence type, " ++ "0x%x, 0x%x\n", type, fence->type); ++ return -EINVAL; ++ } ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ fence->waiting_types |= type; ++ fc->waiting_types |= fence->waiting_types; ++ diff = (fence->sequence - fc->highest_waiting_sequence) & ++ driver->sequence_mask; ++ ++ if (diff < driver->wrap_diff) ++ fc->highest_waiting_sequence = fence->sequence; ++ ++ /* ++ * fence->waiting_types has changed. Determine whether ++ * we need to initiate some kind of flush as a result of this. ++ */ ++ ++ saved_pending_flush = fc->pending_flush; ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ ++ if (driver->poll) ++ driver->poll(dev, fence->fence_class, fence->waiting_types); ++ ++ call_flush = fc->pending_flush; ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ ++ if (call_flush && driver->flush) ++ driver->flush(dev, fence->fence_class); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_flush); ++ ++/* ++ * Make sure old fence objects are signaled before their fence sequences are ++ * wrapped around and reused. ++ */ ++ ++void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; ++ struct drm_fence_object *fence; ++ unsigned long irq_flags; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ int call_flush; ++ ++ uint32_t diff; ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ ++ list_for_each_entry_reverse(fence, &fc->ring, ring) { ++ diff = (sequence - fence->sequence) & driver->sequence_mask; ++ if (diff <= driver->flush_diff) ++ break; ++ ++ fence->waiting_types = fence->type; ++ fc->waiting_types |= fence->type; ++ ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ } ++ ++ if (driver->poll) ++ driver->poll(dev, fence_class, fc->waiting_types); ++ ++ call_flush = fc->pending_flush; ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ ++ if (call_flush && driver->flush) ++ driver->flush(dev, fence->fence_class); ++ ++ /* ++ * FIXME: Shold we implement a wait here for really old fences? ++ */ ++ ++} ++EXPORT_SYMBOL(drm_fence_flush_old); ++ ++int drm_fence_object_wait(struct drm_fence_object *fence, ++ int lazy, int ignore_signals, uint32_t mask) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ int ret = 0; ++ unsigned long _end = 3 * DRM_HZ; ++ ++ if (mask & ~fence->type) { ++ DRM_ERROR("Wait trying to extend fence type" ++ " 0x%08x 0x%08x\n", mask, fence->type); ++ BUG(); ++ return -EINVAL; ++ } ++ ++ if (driver->wait) ++ return driver->wait(fence, lazy, !ignore_signals, mask); ++ ++ ++ drm_fence_object_flush(fence, mask); ++ if (driver->has_irq(dev, fence->fence_class, mask)) { ++ if (!ignore_signals) ++ ret = wait_event_interruptible_timeout ++ (fc->fence_queue, ++ drm_fence_object_signaled(fence, mask), ++ 3 * DRM_HZ); ++ else ++ ret = wait_event_timeout ++ (fc->fence_queue, ++ drm_fence_object_signaled(fence, mask), ++ 3 * DRM_HZ); ++ ++ if (unlikely(ret == -ERESTARTSYS)) ++ return -EAGAIN; ++ ++ if (unlikely(ret == 0)) ++ return -EBUSY; ++ ++ return 0; ++ } ++ ++ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask, ++ _end); ++} ++EXPORT_SYMBOL(drm_fence_object_wait); ++ ++ ++ ++int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags, ++ uint32_t fence_class, uint32_t type) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ unsigned long flags; ++ uint32_t sequence; ++ uint32_t native_types; ++ int ret; ++ ++ drm_fence_unring(dev, &fence->ring); ++ ret = driver->emit(dev, fence_class, fence_flags, &sequence, ++ &native_types); ++ if (ret) ++ return ret; ++ ++ write_lock_irqsave(&fm->lock, flags); ++ fence->fence_class = fence_class; ++ fence->type = type; ++ fence->waiting_types = 0; ++ fence->signaled_types = 0; ++ fence->error = 0; ++ fence->sequence = sequence; ++ fence->native_types = native_types; ++ if (list_empty(&fc->ring)) ++ fc->highest_waiting_sequence = sequence - 1; ++ list_add_tail(&fence->ring, &fc->ring); ++ fc->latest_queued_sequence = sequence; ++ write_unlock_irqrestore(&fm->lock, flags); ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_emit); ++ ++static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class, ++ uint32_t type, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence) ++{ ++ int ret = 0; ++ unsigned long flags; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ mutex_lock(&dev->struct_mutex); ++ atomic_set(&fence->usage, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ write_lock_irqsave(&fm->lock, flags); ++ INIT_LIST_HEAD(&fence->ring); ++ ++ /* ++ * Avoid hitting BUG() for kernel-only fence objects. ++ */ ++ ++ INIT_LIST_HEAD(&fence->base.list); ++ fence->fence_class = fence_class; ++ fence->type = type; ++ fence->signaled_types = 0; ++ fence->waiting_types = 0; ++ fence->sequence = 0; ++ fence->error = 0; ++ fence->dev = dev; ++ write_unlock_irqrestore(&fm->lock, flags); ++ if (fence_flags & DRM_FENCE_FLAG_EMIT) { ++ ret = drm_fence_object_emit(fence, fence_flags, ++ fence->fence_class, type); ++ } ++ return ret; ++} ++ ++int drm_fence_add_user_object(struct drm_file *priv, ++ struct drm_fence_object *fence, int shareable) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_user_object(priv, &fence->base, shareable); ++ if (ret) ++ goto out; ++ atomic_inc(&fence->usage); ++ fence->base.type = drm_fence_type; ++ fence->base.remove = &drm_fence_object_destroy; ++ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_add_user_object); ++ ++int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class, ++ uint32_t type, unsigned flags, ++ struct drm_fence_object **c_fence) ++{ ++ struct drm_fence_object *fence; ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); ++ if (!fence) { ++ DRM_ERROR("Out of memory creating fence object\n"); ++ return -ENOMEM; ++ } ++ ret = drm_fence_object_init(dev, fence_class, type, flags, fence); ++ if (ret) { ++ drm_fence_usage_deref_unlocked(&fence); ++ return ret; ++ } ++ *c_fence = fence; ++ atomic_inc(&fm->count); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_create); ++ ++void drm_fence_manager_init(struct drm_device *dev) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fence_class; ++ struct drm_fence_driver *fed = dev->driver->fence_driver; ++ int i; ++ unsigned long flags; ++ ++ rwlock_init(&fm->lock); ++ write_lock_irqsave(&fm->lock, flags); ++ fm->initialized = 0; ++ if (!fed) ++ goto out_unlock; ++ ++ fm->initialized = 1; ++ fm->num_classes = fed->num_classes; ++ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); ++ ++ for (i = 0; i < fm->num_classes; ++i) { ++ fence_class = &fm->fence_class[i]; ++ ++ memset(fence_class, 0, sizeof(*fence_class)); ++ INIT_LIST_HEAD(&fence_class->ring); ++ DRM_INIT_WAITQUEUE(&fence_class->fence_queue); ++ } ++ ++ atomic_set(&fm->count, 0); ++ out_unlock: ++ write_unlock_irqrestore(&fm->lock, flags); ++} ++ ++void drm_fence_fill_arg(struct drm_fence_object *fence, ++ struct drm_fence_arg *arg) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ unsigned long irq_flags; ++ ++ read_lock_irqsave(&fm->lock, irq_flags); ++ arg->handle = fence->base.hash.key; ++ arg->fence_class = fence->fence_class; ++ arg->type = fence->type; ++ arg->signaled = fence->signaled_types; ++ arg->error = fence->error; ++ arg->sequence = fence->sequence; ++ read_unlock_irqrestore(&fm->lock, irq_flags); ++} ++EXPORT_SYMBOL(drm_fence_fill_arg); ++ ++void drm_fence_manager_takedown(struct drm_device *dev) ++{ ++} ++ ++struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv, ++ uint32_t handle) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_user_object *uo; ++ struct drm_fence_object *fence; ++ ++ mutex_lock(&dev->struct_mutex); ++ uo = drm_lookup_user_object(priv, handle); ++ if (!uo || (uo->type != drm_fence_type)) { ++ mutex_unlock(&dev->struct_mutex); ++ return NULL; ++ } ++ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); ++ mutex_unlock(&dev->struct_mutex); ++ return fence; ++} ++ ++int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->flags & DRM_FENCE_FLAG_EMIT) ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ret = drm_fence_object_create(dev, arg->fence_class, ++ arg->type, arg->flags, &fence); ++ if (ret) ++ return ret; ++ ret = drm_fence_add_user_object(file_priv, fence, ++ arg->flags & ++ DRM_FENCE_FLAG_SHAREABLE); ++ if (ret) { ++ drm_fence_usage_deref_unlocked(&fence); ++ return ret; ++ } ++ ++ /* ++ * usage > 0. No need to lock dev->struct_mutex; ++ */ ++ ++ arg->handle = fence->base.hash.key; ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ struct drm_user_object *uo; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); ++ if (ret) ++ return ret; ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++ ++int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); ++} ++ ++int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ret = drm_fence_object_flush(fence, arg->type); ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++ ++int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ret = drm_fence_object_wait(fence, ++ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, ++ 0, arg->type); ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++ ++int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, ++ arg->type); ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized\n"); ++ return -EINVAL; ++ } ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ret = drm_fence_buffer_objects(dev, NULL, arg->flags, ++ NULL, &fence); ++ if (ret) ++ return ret; ++ ++ if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { ++ ret = drm_fence_add_user_object(file_priv, fence, ++ arg->flags & ++ DRM_FENCE_FLAG_SHAREABLE); ++ if (ret) ++ return ret; ++ } ++ ++ arg->handle = fence->base.hash.key; ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_fops.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_fops.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_fops.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_fops.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,532 @@ ++/** ++ * \file drm_fops.c ++ * File operations for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Daryll Strauss ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm_sarea.h" ++#include ++ ++static int drm_open_helper(struct inode *inode, struct file *filp, ++ struct drm_device * dev); ++ ++static int drm_setup(struct drm_device * dev) ++{ ++ drm_local_map_t *map; ++ int i; ++ int ret; ++ int sareapage; ++ ++ if (dev->driver->firstopen) { ++ ret = dev->driver->firstopen(dev); ++ if (ret != 0) ++ return ret; ++ } ++ ++ dev->magicfree.next = NULL; ++ ++ /* prebuild the SAREA */ ++ sareapage = max(SAREA_MAX, PAGE_SIZE); ++ i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); ++ if (i != 0) ++ return i; ++ ++ atomic_set(&dev->ioctl_count, 0); ++ atomic_set(&dev->vma_count, 0); ++ dev->buf_use = 0; ++ atomic_set(&dev->buf_alloc, 0); ++ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { ++ i = drm_dma_setup(dev); ++ if (i < 0) ++ return i; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(dev->counts); i++) ++ atomic_set(&dev->counts[i], 0); ++ ++ drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER); ++ INIT_LIST_HEAD(&dev->magicfree); ++ ++ dev->sigdata.lock = NULL; ++ init_waitqueue_head(&dev->lock.lock_queue); ++ dev->queue_count = 0; ++ dev->queue_reserved = 0; ++ dev->queue_slots = 0; ++ dev->queuelist = NULL; ++ dev->context_flag = 0; ++ dev->interrupt_flag = 0; ++ dev->dma_flag = 0; ++ dev->last_context = 0; ++ dev->last_switch = 0; ++ dev->last_checked = 0; ++ init_waitqueue_head(&dev->context_wait); ++ dev->if_version = 0; ++ ++ dev->ctx_start = 0; ++ dev->lck_start = 0; ++ ++ dev->buf_async = NULL; ++ init_waitqueue_head(&dev->buf_readers); ++ init_waitqueue_head(&dev->buf_writers); ++ ++ DRM_DEBUG("\n"); ++ ++ /* ++ * The kernel's context could be created here, but is now created ++ * in drm_dma_enqueue. This is more resource-efficient for ++ * hardware that does not do DMA, but may mean that ++ * drm_select_queue fails between the time the interrupt is ++ * initialized and the time the queues are initialized. ++ */ ++ ++ return 0; ++} ++ ++/** ++ * Open file. ++ * ++ * \param inode device inode ++ * \param filp file pointer. ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches the DRM device with the same minor number, calls open_helper(), and ++ * increments the device open count. If the open count was previous at zero, ++ * i.e., it's the first that the device is open, then calls setup(). ++ */ ++int drm_open(struct inode *inode, struct file *filp) ++{ ++ struct drm_device *dev = NULL; ++ int minor_id = iminor(inode); ++ struct drm_minor *minor; ++ int retcode = 0; ++ ++ minor = idr_find(&drm_minors_idr, minor_id); ++ if (!minor) ++ return -ENODEV; ++ ++ if (!(dev = minor->dev)) ++ return -ENODEV; ++ ++ retcode = drm_open_helper(inode, filp, dev); ++ if (!retcode) { ++ atomic_inc(&dev->counts[_DRM_STAT_OPENS]); ++ spin_lock(&dev->count_lock); ++ if (!dev->open_count++) { ++ spin_unlock(&dev->count_lock); ++ retcode = drm_setup(dev); ++ goto out; ++ } ++ spin_unlock(&dev->count_lock); ++ } ++ ++out: ++ mutex_lock(&dev->struct_mutex); ++ BUG_ON((dev->dev_mapping != NULL) && ++ (dev->dev_mapping != inode->i_mapping)); ++ if (dev->dev_mapping == NULL) ++ dev->dev_mapping = inode->i_mapping; ++ mutex_unlock(&dev->struct_mutex); ++ ++ return retcode; ++} ++EXPORT_SYMBOL(drm_open); ++ ++/** ++ * File \c open operation. ++ * ++ * \param inode device inode. ++ * \param filp file pointer. ++ * ++ * Puts the dev->fops corresponding to the device minor number into ++ * \p filp, call the \c open method, and restore the file operations. ++ */ ++int drm_stub_open(struct inode *inode, struct file *filp) ++{ ++ struct drm_device *dev = NULL; ++ struct drm_minor *minor; ++ int minor_id = iminor(inode); ++ int err = -ENODEV; ++ const struct file_operations *old_fops; ++ ++ DRM_DEBUG("\n"); ++ ++ minor = idr_find(&drm_minors_idr, minor_id); ++ if (!minor) ++ return -ENODEV; ++ ++ if (!(dev = minor->dev)) ++ return -ENODEV; ++ ++ old_fops = filp->f_op; ++ filp->f_op = fops_get(&dev->driver->fops); ++ if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { ++ fops_put(filp->f_op); ++ filp->f_op = fops_get(old_fops); ++ } ++ fops_put(old_fops); ++ ++ return err; ++} ++ ++/** ++ * Check whether DRI will run on this CPU. ++ * ++ * \return non-zero if the DRI will run on this CPU, or zero otherwise. ++ */ ++static int drm_cpu_valid(void) ++{ ++#if defined(__i386__) ++ if (boot_cpu_data.x86 == 3) ++ return 0; /* No cmpxchg on a 386 */ ++#endif ++#if defined(__sparc__) && !defined(__sparc_v9__) ++ return 0; /* No cmpxchg before v9 sparc. */ ++#endif ++ return 1; ++} ++ ++/** ++ * Called whenever a process opens /dev/drm. ++ * ++ * \param inode device inode. ++ * \param filp file pointer. ++ * \param dev device. ++ * \return zero on success or a negative number on failure. ++ * ++ * Creates and initializes a drm_file structure for the file private data in \p ++ * filp and add it into the double linked list in \p dev. ++ */ ++static int drm_open_helper(struct inode *inode, struct file *filp, ++ struct drm_device * dev) ++{ ++ int minor_id = iminor(inode); ++ struct drm_file *priv; ++ int ret; ++ int i, j; ++ ++ if (filp->f_flags & O_EXCL) ++ return -EBUSY; /* No exclusive opens */ ++ if (!drm_cpu_valid()) ++ return -EINVAL; ++ ++ DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor_id); ++ ++ priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); ++ if (!priv) ++ return -ENOMEM; ++ ++ memset(priv, 0, sizeof(*priv)); ++ filp->private_data = priv; ++ priv->filp = filp; ++ priv->uid = current->euid; ++ priv->pid = current->pid; ++ priv->minor = idr_find(&drm_minors_idr, minor_id); ++ priv->ioctl_count = 0; ++ /* for compatibility root is always authenticated */ ++ priv->authenticated = capable(CAP_SYS_ADMIN); ++ priv->lock_count = 0; ++ ++ INIT_LIST_HEAD(&priv->lhead); ++ INIT_LIST_HEAD(&priv->refd_objects); ++ ++ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) { ++ ret = drm_ht_create(&priv->refd_object_hash[i], ++ DRM_FILE_HASH_ORDER); ++ if (ret) ++ break; ++ } ++ ++ if (ret) { ++ for (j = 0; j < i; ++j) ++ drm_ht_remove(&priv->refd_object_hash[j]); ++ goto out_free; ++ } ++ ++ if (dev->driver->driver_features & DRIVER_GEM) ++ drm_gem_open(dev, priv); ++ ++ if (dev->driver->open) { ++ ret = dev->driver->open(dev, priv); ++ if (ret < 0) ++ goto out_free; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (list_empty(&dev->filelist)) ++ priv->master = 1; ++ ++ list_add(&priv->lhead, &dev->filelist); ++ mutex_unlock(&dev->struct_mutex); ++ ++#ifdef __alpha__ ++ /* ++ * Default the hose ++ */ ++ if (!dev->hose) { ++ struct pci_dev *pci_dev; ++ pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); ++ if (pci_dev) { ++ dev->hose = pci_dev->sysdata; ++ pci_dev_put(pci_dev); ++ } ++ if (!dev->hose) { ++ struct pci_bus *b = pci_bus_b(pci_root_buses.next); ++ if (b) ++ dev->hose = b->sysdata; ++ } ++ } ++#endif ++ ++ return 0; ++ out_free: ++ drm_free(priv, sizeof(*priv), DRM_MEM_FILES); ++ filp->private_data = NULL; ++ return ret; ++} ++ ++/** No-op. */ ++int drm_fasync(int fd, struct file *filp, int on) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ int retcode; ++ ++ DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, ++ (long)old_encode_dev(priv->minor->device)); ++ retcode = fasync_helper(fd, filp, on, &dev->buf_async); ++ if (retcode < 0) ++ return retcode; ++ return 0; ++} ++EXPORT_SYMBOL(drm_fasync); ++ ++static void drm_object_release(struct file *filp) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct list_head *head; ++ struct drm_ref_object *ref_object; ++ int i; ++ ++ /* ++ * Free leftover ref objects created by me. Note that we cannot use ++ * list_for_each() here, as the struct_mutex may be temporarily ++ * released by the remove_() functions, and thus the lists may be ++ * altered. ++ * Also, a drm_remove_ref_object() will not remove it ++ * from the list unless its refcount is 1. ++ */ ++ ++ head = &priv->refd_objects; ++ while (head->next != head) { ++ ref_object = list_entry(head->next, struct drm_ref_object, list); ++ drm_remove_ref_object(priv, ref_object); ++ head = &priv->refd_objects; ++ } ++ ++ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) ++ drm_ht_remove(&priv->refd_object_hash[i]); ++} ++ ++/** ++ * Release file. ++ * ++ * \param inode device inode ++ * \param file_priv DRM file private. ++ * \return zero on success or a negative number on failure. ++ * ++ * If the hardware lock is held then free it, and take it again for the kernel ++ * context since it's necessary to reclaim buffers. Unlink the file private ++ * data from its list and free it. Decreases the open count and if it reaches ++ * zero calls drm_lastclose(). ++ */ ++int drm_release(struct inode *inode, struct file *filp) ++{ ++ struct drm_file *file_priv = filp->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ int retcode = 0; ++ ++ lock_kernel(); ++ ++ DRM_DEBUG("open_count = %d\n", dev->open_count); ++ ++ if (dev->driver->preclose) ++ dev->driver->preclose(dev, file_priv); ++ ++ /* ======================================================== ++ * Begin inline drm_release ++ */ ++ ++ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", ++ current->pid, (long)old_encode_dev(file_priv->minor->device), ++ dev->open_count); ++ ++ if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { ++ if (drm_i_have_hw_lock(dev, file_priv)) { ++ dev->driver->reclaim_buffers_locked(dev, file_priv); ++ } else { ++ unsigned long _end=jiffies + 3*DRM_HZ; ++ int locked = 0; ++ ++ drm_idlelock_take(&dev->lock); ++ ++ /* ++ * Wait for a while. ++ */ ++ ++ do{ ++ spin_lock_bh(&dev->lock.spinlock); ++ locked = dev->lock.idle_has_lock; ++ spin_unlock_bh(&dev->lock.spinlock); ++ if (locked) ++ break; ++ schedule(); ++ } while (!time_after_eq(jiffies, _end)); ++ ++ if (!locked) { ++ DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" ++ "\tdriver to use reclaim_buffers_idlelocked() instead.\n" ++ "\tI will go on reclaiming the buffers anyway.\n"); ++ } ++ ++ dev->driver->reclaim_buffers_locked(dev, file_priv); ++ drm_idlelock_release(&dev->lock); ++ } ++ } ++ ++ if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { ++ ++ drm_idlelock_take(&dev->lock); ++ dev->driver->reclaim_buffers_idlelocked(dev, file_priv); ++ drm_idlelock_release(&dev->lock); ++ ++ } ++ ++ if (drm_i_have_hw_lock(dev, file_priv)) { ++ DRM_DEBUG("File %p released, freeing lock for context %d\n", ++ filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); ++ ++ drm_lock_free(&dev->lock, ++ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); ++ } ++ ++ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && ++ !dev->driver->reclaim_buffers_locked) { ++ dev->driver->reclaim_buffers(dev, file_priv); ++ } ++ ++ if (dev->driver->driver_features & DRIVER_GEM) ++ drm_gem_release(dev, file_priv); ++ ++ drm_fasync(-1, filp, 0); ++ ++ mutex_lock(&dev->ctxlist_mutex); ++ ++ if (!list_empty(&dev->ctxlist)) { ++ struct drm_ctx_list *pos, *n; ++ ++ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { ++ if (pos->tag == file_priv && ++ pos->handle != DRM_KERNEL_CONTEXT) { ++ if (dev->driver->context_dtor) ++ dev->driver->context_dtor(dev, ++ pos->handle); ++ ++ drm_ctxbitmap_free(dev, pos->handle); ++ ++ list_del(&pos->head); ++ drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); ++ --dev->ctx_count; ++ } ++ } ++ } ++ mutex_unlock(&dev->ctxlist_mutex); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_object_release(filp); ++ if (file_priv->remove_auth_on_close == 1) { ++ struct drm_file *temp; ++ ++ list_for_each_entry(temp, &dev->filelist, lhead) ++ temp->authenticated = 0; ++ } ++ list_del(&file_priv->lhead); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (dev->driver->postclose) ++ dev->driver->postclose(dev, file_priv); ++ drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); ++ ++ /* ======================================================== ++ * End inline drm_release ++ */ ++ ++ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); ++ spin_lock(&dev->count_lock); ++ if (!--dev->open_count) { ++ if (atomic_read(&dev->ioctl_count) || dev->blocked) { ++ DRM_ERROR("Device busy: %d %d\n", ++ atomic_read(&dev->ioctl_count), dev->blocked); ++ spin_unlock(&dev->count_lock); ++ unlock_kernel(); ++ return -EBUSY; ++ } ++ spin_unlock(&dev->count_lock); ++ unlock_kernel(); ++ return drm_lastclose(dev); ++ } ++ spin_unlock(&dev->count_lock); ++ ++ unlock_kernel(); ++ ++ return retcode; ++} ++EXPORT_SYMBOL(drm_release); ++ ++/** No-op. */ ++/* This is to deal with older X servers that believe 0 means data is ++ * available which is not the correct return for a poll function. ++ * This cannot be fixed until the Xserver is fixed. Xserver will need ++ * to set a newer interface version to avoid breaking older Xservers. ++ * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22" ++ * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try ++ * to return the correct response. ++ */ ++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) ++{ ++ /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */ ++ return 0; ++} ++EXPORT_SYMBOL(drm_poll); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_gem.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_gem.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_gem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_gem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,444 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include ++ ++#include "drmP.h" ++ ++#if OS_HAS_GEM ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** @file drm_gem.c ++ * ++ * This file provides some of the base ioctls and library routines for ++ * the graphics memory manager implemented by each device driver. ++ * ++ * Because various devices have different requirements in terms of ++ * synchronization and migration strategies, implementing that is left up to ++ * the driver, and all that the general API provides should be generic -- ++ * allocating objects, reading/writing data with the cpu, freeing objects. ++ * Even there, platform-dependent optimizations for reading/writing data with ++ * the CPU mean we'll likely hook those out to driver-specific calls. However, ++ * the DRI2 implementation wants to have at least allocate/mmap be generic. ++ * ++ * The goal was to have swap-backed object allocation managed through ++ * struct file. However, file descriptors as handles to a struct file have ++ * two major failings: ++ * - Process limits prevent more than 1024 or so being used at a time by ++ * default. ++ * - Inability to allocate high fds will aggravate the X Server's select() ++ * handling, and likely that of many GL client applications as well. ++ * ++ * This led to a plan of using our own integer IDs (called handles, following ++ * DRM terminology) to mimic fds, and implement the fd syscalls we need as ++ * ioctls. The objects themselves will still include the struct file so ++ * that we can transition to fds if the required kernel infrastructure shows ++ * up at a later date, and as our interface with shmfs for memory allocation. ++ */ ++ ++/** ++ * Initialize the GEM device fields ++ */ ++ ++int ++drm_gem_init(struct drm_device *dev) ++{ ++ spin_lock_init(&dev->object_name_lock); ++ idr_init(&dev->object_name_idr); ++ atomic_set(&dev->object_count, 0); ++ atomic_set(&dev->object_memory, 0); ++ atomic_set(&dev->pin_count, 0); ++ atomic_set(&dev->pin_memory, 0); ++ atomic_set(&dev->gtt_count, 0); ++ atomic_set(&dev->gtt_memory, 0); ++ return 0; ++} ++ ++/** ++ * Allocate a GEM object of the specified size with shmfs backing store ++ */ ++struct drm_gem_object * ++drm_gem_object_alloc(struct drm_device *dev, size_t size) ++{ ++ struct drm_gem_object *obj; ++ ++ BUG_ON((size & (PAGE_SIZE - 1)) != 0); ++ ++ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); ++ ++ obj->dev = dev; ++ obj->filp = shmem_file_setup("drm mm object", size, 0); ++ if (IS_ERR(obj->filp)) { ++ kfree(obj); ++ return NULL; ++ } ++ ++ kref_init(&obj->refcount); ++ kref_init(&obj->handlecount); ++ obj->size = size; ++ if (dev->driver->gem_init_object != NULL && ++ dev->driver->gem_init_object(obj) != 0) { ++ fput(obj->filp); ++ kfree(obj); ++ return NULL; ++ } ++ atomic_inc(&dev->object_count); ++ atomic_add(obj->size, &dev->object_memory); ++ return obj; ++} ++EXPORT_SYMBOL(drm_gem_object_alloc); ++ ++/** ++ * Removes the mapping from handle to filp for this object. ++ */ ++static int ++drm_gem_handle_delete(struct drm_file *filp, int handle) ++{ ++ struct drm_device *dev; ++ struct drm_gem_object *obj; ++ ++ /* This is gross. The idr system doesn't let us try a delete and ++ * return an error code. It just spews if you fail at deleting. ++ * So, we have to grab a lock around finding the object and then ++ * doing the delete on it and dropping the refcount, or the user ++ * could race us to double-decrement the refcount and cause a ++ * use-after-free later. Given the frequency of our handle lookups, ++ * we may want to use ida for number allocation and a hash table ++ * for the pointers, anyway. ++ */ ++ spin_lock(&filp->table_lock); ++ ++ /* Check if we currently have a reference on the object */ ++ obj = idr_find(&filp->object_idr, handle); ++ if (obj == NULL) { ++ spin_unlock(&filp->table_lock); ++ return -EINVAL; ++ } ++ dev = obj->dev; ++ ++ /* Release reference and decrement refcount. */ ++ idr_remove(&filp->object_idr, handle); ++ spin_unlock(&filp->table_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Create a handle for this object. This adds a handle reference ++ * to the object, which includes a regular reference count. Callers ++ * will likely want to dereference the object afterwards. ++ */ ++int ++drm_gem_handle_create(struct drm_file *file_priv, ++ struct drm_gem_object *obj, ++ int *handlep) ++{ ++ int ret; ++ ++ /* ++ * Get the user-visible handle using idr. ++ */ ++again: ++ /* ensure there is space available to allocate a handle */ ++ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) ++ return -ENOMEM; ++ ++ /* do the allocation under our spinlock */ ++ spin_lock(&file_priv->table_lock); ++ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); ++ spin_unlock(&file_priv->table_lock); ++ if (ret == -EAGAIN) ++ goto again; ++ ++ if (ret != 0) ++ return ret; ++ ++ drm_gem_object_handle_reference(obj); ++ return 0; ++} ++EXPORT_SYMBOL(drm_gem_handle_create); ++ ++/** Returns a reference to the object named by the handle. */ ++struct drm_gem_object * ++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, ++ int handle) ++{ ++ struct drm_gem_object *obj; ++ ++ spin_lock(&filp->table_lock); ++ ++ /* Check if we currently have a reference on the object */ ++ obj = idr_find(&filp->object_idr, handle); ++ if (obj == NULL) { ++ spin_unlock(&filp->table_lock); ++ return NULL; ++ } ++ ++ drm_gem_object_reference(obj); ++ ++ spin_unlock(&filp->table_lock); ++ ++ return obj; ++} ++EXPORT_SYMBOL(drm_gem_object_lookup); ++ ++/** ++ * Releases the handle to an mm object. ++ */ ++int ++drm_gem_close_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_gem_close *args = data; ++ int ret; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ ret = drm_gem_handle_delete(file_priv, args->handle); ++ ++ return ret; ++} ++ ++/** ++ * Create a global name for an object, returning the name. ++ * ++ * Note that the name does not hold a reference; when the object ++ * is freed, the name goes away. ++ */ ++int ++drm_gem_flink_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_gem_flink *args = data; ++ struct drm_gem_object *obj; ++ int ret; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++again: ++ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) ++ return -ENOMEM; ++ ++ spin_lock(&dev->object_name_lock); ++ if (obj->name) { ++ spin_unlock(&dev->object_name_lock); ++ return -EEXIST; ++ } ++ ret = idr_get_new_above(&dev->object_name_idr, obj, 1, ++ &obj->name); ++ spin_unlock(&dev->object_name_lock); ++ if (ret == -EAGAIN) ++ goto again; ++ ++ if (ret != 0) { ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ /* ++ * Leave the reference from the lookup around as the ++ * name table now holds one ++ */ ++ args->name = (uint64_t) obj->name; ++ ++ return 0; ++} ++ ++/** ++ * Open an object using the global name, returning a handle and the size. ++ * ++ * This handle (of course) holds a reference to the object, so the object ++ * will not go away until the handle is deleted. ++ */ ++int ++drm_gem_open_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_gem_open *args = data; ++ struct drm_gem_object *obj; ++ int ret; ++ int handle; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ spin_lock(&dev->object_name_lock); ++ obj = idr_find(&dev->object_name_idr, (int) args->name); ++ if (obj) ++ drm_gem_object_reference(obj); ++ spin_unlock(&dev->object_name_lock); ++ if (!obj) ++ return -ENOENT; ++ ++ ret = drm_gem_handle_create(file_priv, obj, &handle); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) ++ return ret; ++ ++ args->handle = handle; ++ args->size = obj->size; ++ ++ return 0; ++} ++ ++/** ++ * Called at device open time, sets up the structure for handling refcounting ++ * of mm objects. ++ */ ++void ++drm_gem_open(struct drm_device *dev, struct drm_file *file_private) ++{ ++ idr_init(&file_private->object_idr); ++ spin_lock_init(&file_private->table_lock); ++} ++ ++/** ++ * Called at device close to release the file's ++ * handle references on objects. ++ */ ++static int ++drm_gem_object_release_handle(int id, void *ptr, void *data) ++{ ++ struct drm_gem_object *obj = ptr; ++ ++ drm_gem_object_handle_unreference(obj); ++ ++ return 0; ++} ++ ++/** ++ * Called at close time when the filp is going away. ++ * ++ * Releases any remaining references on objects by this filp. ++ */ ++void ++drm_gem_release(struct drm_device *dev, struct drm_file *file_private) ++{ ++ mutex_lock(&dev->struct_mutex); ++ idr_for_each(&file_private->object_idr, ++ &drm_gem_object_release_handle, NULL); ++ ++ idr_destroy(&file_private->object_idr); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * Called after the last reference to the object has been lost. ++ * ++ * Frees the object ++ */ ++void ++drm_gem_object_free(struct kref *kref) ++{ ++ struct drm_gem_object *obj = (struct drm_gem_object *) kref; ++ struct drm_device *dev = obj->dev; ++ ++ BUG_ON(!mutex_is_locked(&dev->struct_mutex)); ++ ++ if (dev->driver->gem_free_object != NULL) ++ dev->driver->gem_free_object(obj); ++ ++ fput(obj->filp); ++ atomic_dec(&dev->object_count); ++ atomic_sub(obj->size, &dev->object_memory); ++ kfree(obj); ++} ++EXPORT_SYMBOL(drm_gem_object_free); ++ ++/** ++ * Called after the last handle to the object has been closed ++ * ++ * Removes any name for the object. Note that this must be ++ * called before drm_gem_object_free or we'll be touching ++ * freed memory ++ */ ++void ++drm_gem_object_handle_free(struct kref *kref) ++{ ++ struct drm_gem_object *obj = container_of(kref, ++ struct drm_gem_object, ++ handlecount); ++ struct drm_device *dev = obj->dev; ++ ++ /* Remove any name for this object */ ++ spin_lock(&dev->object_name_lock); ++ if (obj->name) { ++ idr_remove(&dev->object_name_idr, obj->name); ++ spin_unlock(&dev->object_name_lock); ++ /* ++ * The object name held a reference to this object, drop ++ * that now. ++ */ ++ drm_gem_object_unreference(obj); ++ } else ++ spin_unlock(&dev->object_name_lock); ++ ++} ++EXPORT_SYMBOL(drm_gem_object_handle_free); ++ ++#else ++ ++int drm_gem_init(struct drm_device *dev) ++{ ++ return 0; ++} ++ ++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private) ++{ ++ ++} ++ ++void ++drm_gem_release(struct drm_device *dev, struct drm_file *file_private) ++{ ++ ++} ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1145 @@ ++/** ++ * \file drm.h ++ * Header for the Direct Rendering Manager ++ * ++ * \author Rickard E. (Rik) Faith ++ * ++ * \par Acknowledgments: ++ * Dec 1999, Richard Henderson , move to generic \c cmpxchg. ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/** ++ * \mainpage ++ * ++ * The Direct Rendering Manager (DRM) is a device-independent kernel-level ++ * device driver that provides support for the XFree86 Direct Rendering ++ * Infrastructure (DRI). ++ * ++ * The DRM supports the Direct Rendering Infrastructure (DRI) in four major ++ * ways: ++ * -# The DRM provides synchronized access to the graphics hardware via ++ * the use of an optimized two-tiered lock. ++ * -# The DRM enforces the DRI security policy for access to the graphics ++ * hardware by only allowing authenticated X11 clients access to ++ * restricted regions of memory. ++ * -# The DRM provides a generic DMA engine, complete with multiple ++ * queues and the ability to detect the need for an OpenGL context ++ * switch. ++ * -# The DRM is extensible via the use of small device-specific modules ++ * that rely extensively on the API exported by the DRM module. ++ * ++ */ ++ ++#ifndef _DRM_H_ ++#define _DRM_H_ ++ ++#ifndef __user ++#define __user ++#endif ++#ifndef __iomem ++#define __iomem ++#endif ++ ++#ifdef __GNUC__ ++# define DEPRECATED __attribute__ ((deprecated)) ++#else ++# define DEPRECATED ++#endif ++ ++#if defined(__linux__) ++#include /* For _IO* macros */ ++#define DRM_IOCTL_NR(n) _IOC_NR(n) ++#define DRM_IOC_VOID _IOC_NONE ++#define DRM_IOC_READ _IOC_READ ++#define DRM_IOC_WRITE _IOC_WRITE ++#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE ++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) ++#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) ++#include ++#define DRM_IOCTL_NR(n) ((n) & 0xff) ++#define DRM_IOC_VOID IOC_VOID ++#define DRM_IOC_READ IOC_OUT ++#define DRM_IOC_WRITE IOC_IN ++#define DRM_IOC_READWRITE IOC_INOUT ++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) ++#endif ++ ++#ifdef __OpenBSD__ ++#define DRM_MAJOR 81 ++#endif ++#if defined(__linux__) || defined(__NetBSD__) ++#define DRM_MAJOR 226 ++#endif ++#define DRM_MAX_MINOR 15 ++ ++#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ ++#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ ++#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ ++#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ ++ ++#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ ++#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ ++#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) ++#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) ++#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) ++ ++#if defined(__linux__) ++typedef unsigned int drm_handle_t; ++#else ++#include ++typedef unsigned long drm_handle_t; /**< To mapped regions */ ++#endif ++typedef unsigned int drm_context_t; /**< GLXContext handle */ ++typedef unsigned int drm_drawable_t; ++typedef unsigned int drm_magic_t; /**< Magic for authentication */ ++ ++/** ++ * Cliprect. ++ * ++ * \warning If you change this structure, make sure you change ++ * XF86DRIClipRectRec in the server as well ++ * ++ * \note KW: Actually it's illegal to change either for ++ * backwards-compatibility reasons. ++ */ ++struct drm_clip_rect { ++ unsigned short x1; ++ unsigned short y1; ++ unsigned short x2; ++ unsigned short y2; ++}; ++ ++/** ++ * Texture region, ++ */ ++struct drm_tex_region { ++ unsigned char next; ++ unsigned char prev; ++ unsigned char in_use; ++ unsigned char padding; ++ unsigned int age; ++}; ++ ++/** ++ * Hardware lock. ++ * ++ * The lock structure is a simple cache-line aligned integer. To avoid ++ * processor bus contention on a multiprocessor system, there should not be any ++ * other data stored in the same cache line. ++ */ ++struct drm_hw_lock { ++ __volatile__ unsigned int lock; /**< lock variable */ ++ char padding[60]; /**< Pad to cache line */ ++}; ++ ++/* This is beyond ugly, and only works on GCC. However, it allows me to use ++ * drm.h in places (i.e., in the X-server) where I can't use size_t. The real ++ * fix is to use uint32_t instead of size_t, but that fix will break existing ++ * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will* ++ * eventually happen, though. I chose 'unsigned long' to be the fallback type ++ * because that works on all the platforms I know about. Hopefully, the ++ * real fix will happen before that bites us. ++ */ ++ ++#ifdef __SIZE_TYPE__ ++# define DRM_SIZE_T __SIZE_TYPE__ ++#else ++# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!" ++# define DRM_SIZE_T unsigned long ++#endif ++ ++/** ++ * DRM_IOCTL_VERSION ioctl argument type. ++ * ++ * \sa drmGetVersion(). ++ */ ++struct drm_version { ++ int version_major; /**< Major version */ ++ int version_minor; /**< Minor version */ ++ int version_patchlevel; /**< Patch level */ ++ DRM_SIZE_T name_len; /**< Length of name buffer */ ++ char __user *name; /**< Name of driver */ ++ DRM_SIZE_T date_len; /**< Length of date buffer */ ++ char __user *date; /**< User-space buffer to hold date */ ++ DRM_SIZE_T desc_len; /**< Length of desc buffer */ ++ char __user *desc; /**< User-space buffer to hold desc */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_UNIQUE ioctl argument type. ++ * ++ * \sa drmGetBusid() and drmSetBusId(). ++ */ ++struct drm_unique { ++ DRM_SIZE_T unique_len; /**< Length of unique */ ++ char __user *unique; /**< Unique name for driver instantiation */ ++}; ++ ++#undef DRM_SIZE_T ++ ++struct drm_list { ++ int count; /**< Length of user-space structures */ ++ struct drm_version __user *version; ++}; ++ ++struct drm_block { ++ int unused; ++}; ++ ++/** ++ * DRM_IOCTL_CONTROL ioctl argument type. ++ * ++ * \sa drmCtlInstHandler() and drmCtlUninstHandler(). ++ */ ++struct drm_control { ++ enum { ++ DRM_ADD_COMMAND, ++ DRM_RM_COMMAND, ++ DRM_INST_HANDLER, ++ DRM_UNINST_HANDLER ++ } func; ++ int irq; ++}; ++ ++/** ++ * Type of memory to map. ++ */ ++enum drm_map_type { ++ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ ++ _DRM_REGISTERS = 1, /**< no caching, no core dump */ ++ _DRM_SHM = 2, /**< shared, cached */ ++ _DRM_AGP = 3, /**< AGP/GART */ ++ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ ++ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ ++ _DRM_TTM = 6 ++}; ++ ++/** ++ * Memory mapping flags. ++ */ ++enum drm_map_flags { ++ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ ++ _DRM_READ_ONLY = 0x02, ++ _DRM_LOCKED = 0x04, /**< shared, cached, locked */ ++ _DRM_KERNEL = 0x08, /**< kernel requires access */ ++ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ ++ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ ++ _DRM_REMOVABLE = 0x40, /**< Removable mapping */ ++ _DRM_DRIVER = 0x80 /**< Managed by driver */ ++}; ++ ++struct drm_ctx_priv_map { ++ unsigned int ctx_id; /**< Context requesting private mapping */ ++ void *handle; /**< Handle of map */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls ++ * argument type. ++ * ++ * \sa drmAddMap(). ++ */ ++struct drm_map { ++ unsigned long offset; /**< Requested physical address (0 for SAREA)*/ ++ unsigned long size; /**< Requested physical size (bytes) */ ++ enum drm_map_type type; /**< Type of memory to map */ ++ enum drm_map_flags flags; /**< Flags */ ++ void *handle; /**< User-space: "Handle" to pass to mmap() */ ++ /**< Kernel-space: kernel-virtual address */ ++ int mtrr; /**< MTRR slot used */ ++ /* Private data */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_CLIENT ioctl argument type. ++ */ ++struct drm_client { ++ int idx; /**< Which client desired? */ ++ int auth; /**< Is client authenticated? */ ++ unsigned long pid; /**< Process ID */ ++ unsigned long uid; /**< User ID */ ++ unsigned long magic; /**< Magic */ ++ unsigned long iocs; /**< Ioctl count */ ++}; ++ ++enum drm_stat_type { ++ _DRM_STAT_LOCK, ++ _DRM_STAT_OPENS, ++ _DRM_STAT_CLOSES, ++ _DRM_STAT_IOCTLS, ++ _DRM_STAT_LOCKS, ++ _DRM_STAT_UNLOCKS, ++ _DRM_STAT_VALUE, /**< Generic value */ ++ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ ++ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ ++ ++ _DRM_STAT_IRQ, /**< IRQ */ ++ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ ++ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ ++ _DRM_STAT_DMA, /**< DMA */ ++ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ ++ _DRM_STAT_MISSED /**< Missed DMA opportunity */ ++ /* Add to the *END* of the list */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_STATS ioctl argument type. ++ */ ++struct drm_stats { ++ unsigned long count; ++ struct { ++ unsigned long value; ++ enum drm_stat_type type; ++ } data[15]; ++}; ++ ++/** ++ * Hardware locking flags. ++ */ ++enum drm_lock_flags { ++ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ ++ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ ++ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ ++ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ ++ /* These *HALT* flags aren't supported yet ++ -- they will be used to support the ++ full-screen DGA-like mode. */ ++ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ ++ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ ++}; ++ ++/** ++ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. ++ * ++ * \sa drmGetLock() and drmUnlock(). ++ */ ++struct drm_lock { ++ int context; ++ enum drm_lock_flags flags; ++}; ++ ++/** ++ * DMA flags ++ * ++ * \warning ++ * These values \e must match xf86drm.h. ++ * ++ * \sa drm_dma. ++ */ ++enum drm_dma_flags { ++ /* Flags for DMA buffer dispatch */ ++ _DRM_DMA_BLOCK = 0x01, /**< ++ * Block until buffer dispatched. ++ * ++ * \note The buffer may not yet have ++ * been processed by the hardware -- ++ * getting a hardware lock with the ++ * hardware quiescent will ensure ++ * that the buffer has been ++ * processed. ++ */ ++ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ ++ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ ++ ++ /* Flags for DMA buffer request */ ++ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ ++ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ ++ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ ++}; ++ ++/** ++ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. ++ * ++ * \sa drmAddBufs(). ++ */ ++struct drm_buf_desc { ++ int count; /**< Number of buffers of this size */ ++ int size; /**< Size in bytes */ ++ int low_mark; /**< Low water mark */ ++ int high_mark; /**< High water mark */ ++ enum { ++ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ ++ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ ++ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ ++ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ ++ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ ++ } flags; ++ unsigned long agp_start; /**< ++ * Start address of where the AGP buffers are ++ * in the AGP aperture ++ */ ++}; ++ ++/** ++ * DRM_IOCTL_INFO_BUFS ioctl argument type. ++ */ ++struct drm_buf_info { ++ int count; /**< Number of buffers described in list */ ++ struct drm_buf_desc __user *list; /**< List of buffer descriptions */ ++}; ++ ++/** ++ * DRM_IOCTL_FREE_BUFS ioctl argument type. ++ */ ++struct drm_buf_free { ++ int count; ++ int __user *list; ++}; ++ ++/** ++ * Buffer information ++ * ++ * \sa drm_buf_map. ++ */ ++struct drm_buf_pub { ++ int idx; /**< Index into the master buffer list */ ++ int total; /**< Buffer size */ ++ int used; /**< Amount of buffer in use (for DMA) */ ++ void __user *address; /**< Address of buffer */ ++}; ++ ++/** ++ * DRM_IOCTL_MAP_BUFS ioctl argument type. ++ */ ++struct drm_buf_map { ++ int count; /**< Length of the buffer list */ ++#if defined(__cplusplus) ++ void __user *c_virtual; ++#else ++ void __user *virtual; /**< Mmap'd area in user-virtual */ ++#endif ++ struct drm_buf_pub __user *list; /**< Buffer information */ ++}; ++ ++/** ++ * DRM_IOCTL_DMA ioctl argument type. ++ * ++ * Indices here refer to the offset into the buffer list in drm_buf_get. ++ * ++ * \sa drmDMA(). ++ */ ++struct drm_dma { ++ int context; /**< Context handle */ ++ int send_count; /**< Number of buffers to send */ ++ int __user *send_indices; /**< List of handles to buffers */ ++ int __user *send_sizes; /**< Lengths of data to send */ ++ enum drm_dma_flags flags; /**< Flags */ ++ int request_count; /**< Number of buffers requested */ ++ int request_size; /**< Desired size for buffers */ ++ int __user *request_indices; /**< Buffer information */ ++ int __user *request_sizes; ++ int granted_count; /**< Number of buffers granted */ ++}; ++ ++enum drm_ctx_flags { ++ _DRM_CONTEXT_PRESERVED = 0x01, ++ _DRM_CONTEXT_2DONLY = 0x02 ++}; ++ ++/** ++ * DRM_IOCTL_ADD_CTX ioctl argument type. ++ * ++ * \sa drmCreateContext() and drmDestroyContext(). ++ */ ++struct drm_ctx { ++ drm_context_t handle; ++ enum drm_ctx_flags flags; ++}; ++ ++/** ++ * DRM_IOCTL_RES_CTX ioctl argument type. ++ */ ++struct drm_ctx_res { ++ int count; ++ struct drm_ctx __user *contexts; ++}; ++ ++/** ++ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. ++ */ ++struct drm_draw { ++ drm_drawable_t handle; ++}; ++ ++/** ++ * DRM_IOCTL_UPDATE_DRAW ioctl argument type. ++ */ ++typedef enum { ++ DRM_DRAWABLE_CLIPRECTS, ++} drm_drawable_info_type_t; ++ ++struct drm_update_draw { ++ drm_drawable_t handle; ++ unsigned int type; ++ unsigned int num; ++ unsigned long long data; ++}; ++ ++/** ++ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. ++ */ ++struct drm_auth { ++ drm_magic_t magic; ++}; ++ ++/** ++ * DRM_IOCTL_IRQ_BUSID ioctl argument type. ++ * ++ * \sa drmGetInterruptFromBusID(). ++ */ ++struct drm_irq_busid { ++ int irq; /**< IRQ number */ ++ int busnum; /**< bus number */ ++ int devnum; /**< device number */ ++ int funcnum; /**< function number */ ++}; ++ ++enum drm_vblank_seq_type { ++ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ ++ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ ++ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ ++ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ ++ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ ++ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ ++}; ++ ++#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) ++#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ ++ _DRM_VBLANK_NEXTONMISS) ++ ++struct drm_wait_vblank_request { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ unsigned long signal; ++}; ++ ++struct drm_wait_vblank_reply { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ long tval_sec; ++ long tval_usec; ++}; ++ ++/** ++ * DRM_IOCTL_WAIT_VBLANK ioctl argument type. ++ * ++ * \sa drmWaitVBlank(). ++ */ ++union drm_wait_vblank { ++ struct drm_wait_vblank_request request; ++ struct drm_wait_vblank_reply reply; ++}; ++ ++ ++#define _DRM_PRE_MODESET 1 ++#define _DRM_POST_MODESET 2 ++ ++/** ++ * DRM_IOCTL_MODESET_CTL ioctl argument type ++ * ++ * \sa drmModesetCtl(). ++ */ ++struct drm_modeset_ctl { ++ uint32_t crtc; ++ uint32_t cmd; ++}; ++ ++/** ++ * DRM_IOCTL_AGP_ENABLE ioctl argument type. ++ * ++ * \sa drmAgpEnable(). ++ */ ++struct drm_agp_mode { ++ unsigned long mode; /**< AGP mode */ ++}; ++ ++/** ++ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. ++ * ++ * \sa drmAgpAlloc() and drmAgpFree(). ++ */ ++struct drm_agp_buffer { ++ unsigned long size; /**< In bytes -- will round to page boundary */ ++ unsigned long handle; /**< Used for binding / unbinding */ ++ unsigned long type; /**< Type of memory to allocate */ ++ unsigned long physical; /**< Physical used by i810 */ ++}; ++ ++/** ++ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. ++ * ++ * \sa drmAgpBind() and drmAgpUnbind(). ++ */ ++struct drm_agp_binding { ++ unsigned long handle; /**< From drm_agp_buffer */ ++ unsigned long offset; /**< In bytes -- will round to page boundary */ ++}; ++ ++/** ++ * DRM_IOCTL_AGP_INFO ioctl argument type. ++ * ++ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), ++ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), ++ * drmAgpVendorId() and drmAgpDeviceId(). ++ */ ++struct drm_agp_info { ++ int agp_version_major; ++ int agp_version_minor; ++ unsigned long mode; ++ unsigned long aperture_base; /**< physical address */ ++ unsigned long aperture_size; /**< bytes */ ++ unsigned long memory_allowed; /**< bytes */ ++ unsigned long memory_used; ++ ++ /** \name PCI information */ ++ /*@{ */ ++ unsigned short id_vendor; ++ unsigned short id_device; ++ /*@} */ ++}; ++ ++/** ++ * DRM_IOCTL_SG_ALLOC ioctl argument type. ++ */ ++struct drm_scatter_gather { ++ unsigned long size; /**< In bytes -- will round to page boundary */ ++ unsigned long handle; /**< Used for mapping / unmapping */ ++}; ++ ++/** ++ * DRM_IOCTL_SET_VERSION ioctl argument type. ++ */ ++struct drm_set_version { ++ int drm_di_major; ++ int drm_di_minor; ++ int drm_dd_major; ++ int drm_dd_minor; ++}; ++ ++ ++#define DRM_FENCE_FLAG_EMIT 0x00000001 ++#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 ++/** ++ * On hardware with no interrupt events for operation completion, ++ * indicates that the kernel should sleep while waiting for any blocking ++ * operation to complete rather than spinning. ++ * ++ * Has no effect otherwise. ++ */ ++#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 ++#define DRM_FENCE_FLAG_NO_USER 0x00000010 ++ ++/* Reserved for driver use */ ++#define DRM_FENCE_MASK_DRIVER 0xFF000000 ++ ++#define DRM_FENCE_TYPE_EXE 0x00000001 ++ ++struct drm_fence_arg { ++ unsigned int handle; ++ unsigned int fence_class; ++ unsigned int type; ++ unsigned int flags; ++ unsigned int signaled; ++ unsigned int error; ++ unsigned int sequence; ++ unsigned int pad64; ++ uint64_t expand_pad[2]; /*Future expansion */ ++}; ++ ++/* Buffer permissions, referring to how the GPU uses the buffers. ++ * these translate to fence types used for the buffers. ++ * Typically a texture buffer is read, A destination buffer is write and ++ * a command (batch-) buffer is exe. Can be or-ed together. ++ */ ++ ++#define DRM_BO_FLAG_READ (1ULL << 0) ++#define DRM_BO_FLAG_WRITE (1ULL << 1) ++#define DRM_BO_FLAG_EXE (1ULL << 2) ++ ++/* ++ * All of the bits related to access mode ++ */ ++#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) ++/* ++ * Status flags. Can be read to determine the actual state of a buffer. ++ * Can also be set in the buffer mask before validation. ++ */ ++ ++/* ++ * Mask: Never evict this buffer. Not even with force. This type of buffer is only ++ * available to root and must be manually removed before buffer manager shutdown ++ * or lock. ++ * Flags: Acknowledge ++ */ ++#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) ++ ++/* ++ * Mask: Require that the buffer is placed in mappable memory when validated. ++ * If not set the buffer may or may not be in mappable memory when validated. ++ * Flags: If set, the buffer is in mappable memory. ++ */ ++#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) ++ ++/* Mask: The buffer should be shareable with other processes. ++ * Flags: The buffer is shareable with other processes. ++ */ ++#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) ++ ++/* Mask: If set, place the buffer in cache-coherent memory if available. ++ * If clear, never place the buffer in cache coherent memory if validated. ++ * Flags: The buffer is currently in cache-coherent memory. ++ */ ++#define DRM_BO_FLAG_CACHED (1ULL << 7) ++ ++/* Mask: Make sure that every time this buffer is validated, ++ * it ends up on the same location provided that the memory mask is the same. ++ * The buffer will also not be evicted when claiming space for ++ * other buffers. Basically a pinned buffer but it may be thrown out as ++ * part of buffer manager shutdown or locking. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) ++ ++/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction ++ * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART ++ * with unsnooped PTEs instead of snooped, by using chipset-specific cache ++ * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, ++ * as the eviction to local memory (TTM unbind) on map is just a side effect ++ * to prevent aggressive cache prefetch from the GPU disturbing the cache ++ * management that the DRM is doing. ++ * ++ * Flags: Acknowledge. ++ * Buffers allocated with this flag should not be used for suballocators ++ * This type may have issues on CPUs with over-aggressive caching ++ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 ++ */ ++#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) ++ ++ ++/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) ++ ++/* ++ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) ++#define DRM_BO_FLAG_TILE (1ULL << 15) ++ ++/* ++ * Memory type flags that can be or'ed together in the mask, but only ++ * one appears in flags. ++ */ ++ ++/* System memory */ ++#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) ++/* Translation table memory */ ++#define DRM_BO_FLAG_MEM_TT (1ULL << 25) ++/* Vram memory */ ++#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) ++/* Up to the driver to define. */ ++#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) ++#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) ++#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) ++#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) ++#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) ++/* We can add more of these now with a 64-bit flag type */ ++ ++/* ++ * This is a mask covering all of the memory type flags; easier to just ++ * use a single constant than a bunch of | values. It covers ++ * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 ++ */ ++#define DRM_BO_MASK_MEM 0x00000000FF000000ULL ++/* ++ * This adds all of the CPU-mapping options in with the memory ++ * type to label all bits which change how the page gets mapped ++ */ ++#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ ++ DRM_BO_FLAG_CACHED_MAPPED | \ ++ DRM_BO_FLAG_CACHED | \ ++ DRM_BO_FLAG_MAPPABLE) ++ ++/* Driver-private flags */ ++#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL ++ ++/* ++ * Don't block on validate and map. Instead, return EBUSY. ++ */ ++#define DRM_BO_HINT_DONT_BLOCK 0x00000002 ++/* ++ * Don't place this buffer on the unfenced list. This means ++ * that the buffer will not end up having a fence associated ++ * with it as a result of this operation ++ */ ++#define DRM_BO_HINT_DONT_FENCE 0x00000004 ++/** ++ * On hardware with no interrupt events for operation completion, ++ * indicates that the kernel should sleep while waiting for any blocking ++ * operation to complete rather than spinning. ++ * ++ * Has no effect otherwise. ++ */ ++#define DRM_BO_HINT_WAIT_LAZY 0x00000008 ++/* ++ * The client has compute relocations refering to this buffer using the ++ * offset in the presumed_offset field. If that offset ends up matching ++ * where this buffer lands, the kernel is free to skip executing those ++ * relocations ++ */ ++#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 ++ ++#define DRM_BO_INIT_MAGIC 0xfe769812 ++#define DRM_BO_INIT_MAJOR 1 ++#define DRM_BO_INIT_MINOR 0 ++#define DRM_BO_INIT_PATCH 0 ++ ++ ++struct drm_bo_info_req { ++ uint64_t mask; ++ uint64_t flags; ++ unsigned int handle; ++ unsigned int hint; ++ unsigned int fence_class; ++ unsigned int desired_tile_stride; ++ unsigned int tile_info; ++ unsigned int pad64; ++ uint64_t presumed_offset; ++}; ++ ++struct drm_bo_create_req { ++ uint64_t flags; ++ uint64_t size; ++ uint64_t buffer_start; ++ unsigned int hint; ++ unsigned int page_alignment; ++}; ++ ++ ++/* ++ * Reply flags ++ */ ++ ++#define DRM_BO_REP_BUSY 0x00000001 ++ ++struct drm_bo_info_rep { ++ uint64_t flags; ++ uint64_t proposed_flags; ++ uint64_t size; ++ uint64_t offset; ++ uint64_t arg_handle; ++ uint64_t buffer_start; ++ unsigned int handle; ++ unsigned int fence_flags; ++ unsigned int rep_flags; ++ unsigned int page_alignment; ++ unsigned int desired_tile_stride; ++ unsigned int hw_tile_stride; ++ unsigned int tile_info; ++ unsigned int pad64; ++ uint64_t expand_pad[4]; /*Future expansion */ ++}; ++ ++struct drm_bo_arg_rep { ++ struct drm_bo_info_rep bo_info; ++ int ret; ++ unsigned int pad64; ++}; ++ ++struct drm_bo_create_arg { ++ union { ++ struct drm_bo_create_req req; ++ struct drm_bo_info_rep rep; ++ } d; ++}; ++ ++struct drm_bo_handle_arg { ++ unsigned int handle; ++}; ++ ++struct drm_bo_reference_info_arg { ++ union { ++ struct drm_bo_handle_arg req; ++ struct drm_bo_info_rep rep; ++ } d; ++}; ++ ++struct drm_bo_map_wait_idle_arg { ++ union { ++ struct drm_bo_info_req req; ++ struct drm_bo_info_rep rep; ++ } d; ++}; ++ ++struct drm_bo_op_req { ++ enum { ++ drm_bo_validate, ++ drm_bo_fence, ++ drm_bo_ref_fence, ++ } op; ++ unsigned int arg_handle; ++ struct drm_bo_info_req bo_req; ++}; ++ ++ ++struct drm_bo_op_arg { ++ uint64_t next; ++ union { ++ struct drm_bo_op_req req; ++ struct drm_bo_arg_rep rep; ++ } d; ++ int handled; ++ unsigned int pad64; ++}; ++ ++ ++#define DRM_BO_MEM_LOCAL 0 ++#define DRM_BO_MEM_TT 1 ++#define DRM_BO_MEM_VRAM 2 ++#define DRM_BO_MEM_PRIV0 3 ++#define DRM_BO_MEM_PRIV1 4 ++#define DRM_BO_MEM_PRIV2 5 ++#define DRM_BO_MEM_PRIV3 6 ++#define DRM_BO_MEM_PRIV4 7 ++ ++#define DRM_BO_MEM_TYPES 8 /* For now. */ ++ ++#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) ++#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) ++ ++struct drm_bo_version_arg { ++ uint32_t major; ++ uint32_t minor; ++ uint32_t patchlevel; ++}; ++ ++struct drm_mm_type_arg { ++ unsigned int mem_type; ++ unsigned int lock_flags; ++}; ++ ++struct drm_mm_init_arg { ++ unsigned int magic; ++ unsigned int major; ++ unsigned int minor; ++ unsigned int mem_type; ++ uint64_t p_offset; ++ uint64_t p_size; ++}; ++ ++struct drm_mm_info_arg { ++ unsigned int mem_type; ++ uint64_t p_size; ++}; ++ ++struct drm_gem_close { ++ /** Handle of the object to be closed. */ ++ uint32_t handle; ++ uint32_t pad; ++}; ++ ++struct drm_gem_flink { ++ /** Handle for the object being named */ ++ uint32_t handle; ++ ++ /** Returned global name */ ++ uint32_t name; ++}; ++ ++struct drm_gem_open { ++ /** Name of object being opened */ ++ uint32_t name; ++ ++ /** Returned handle for the object */ ++ uint32_t handle; ++ ++ /** Returned size of the object */ ++ uint64_t size; ++}; ++ ++/** ++ * \name Ioctls Definitions ++ */ ++/*@{*/ ++ ++#define DRM_IOCTL_BASE 'd' ++#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) ++#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) ++#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) ++#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) ++ ++#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) ++#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) ++#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) ++#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) ++#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) ++#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) ++#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) ++#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) ++#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) ++ ++#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) ++#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) ++#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) ++ ++#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) ++#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) ++#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) ++#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) ++#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) ++#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) ++#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) ++#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) ++#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) ++#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) ++#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) ++ ++#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) ++ ++#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) ++#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) ++ ++#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) ++#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) ++#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) ++#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) ++#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) ++#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) ++#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) ++#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) ++#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) ++#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) ++#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) ++#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) ++#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) ++ ++#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) ++#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) ++#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) ++#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) ++#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) ++#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) ++#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) ++#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) ++ ++#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) ++#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) ++ ++#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) ++ ++#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) ++ ++#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) ++#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) ++#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) ++#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) ++ ++#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) ++ ++#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) ++#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) ++#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) ++#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) ++#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) ++#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) ++#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) ++#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) ++#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) ++#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg) ++ ++/*@}*/ ++ ++/** ++ * Device specific ioctls should only be in their respective headers ++ * The device specific ioctl range is from 0x40 to 0x99. ++ * Generic IOCTLS restart at 0xA0. ++ * ++ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and ++ * drmCommandReadWrite(). ++ */ ++#define DRM_COMMAND_BASE 0x40 ++#define DRM_COMMAND_END 0xA0 ++ ++/* typedef area */ ++#ifndef __KERNEL__ ++typedef struct drm_clip_rect drm_clip_rect_t; ++typedef struct drm_tex_region drm_tex_region_t; ++typedef struct drm_hw_lock drm_hw_lock_t; ++typedef struct drm_version drm_version_t; ++typedef struct drm_unique drm_unique_t; ++typedef struct drm_list drm_list_t; ++typedef struct drm_block drm_block_t; ++typedef struct drm_control drm_control_t; ++typedef enum drm_map_type drm_map_type_t; ++typedef enum drm_map_flags drm_map_flags_t; ++typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; ++typedef struct drm_map drm_map_t; ++typedef struct drm_client drm_client_t; ++typedef enum drm_stat_type drm_stat_type_t; ++typedef struct drm_stats drm_stats_t; ++typedef enum drm_lock_flags drm_lock_flags_t; ++typedef struct drm_lock drm_lock_t; ++typedef enum drm_dma_flags drm_dma_flags_t; ++typedef struct drm_buf_desc drm_buf_desc_t; ++typedef struct drm_buf_info drm_buf_info_t; ++typedef struct drm_buf_free drm_buf_free_t; ++typedef struct drm_buf_pub drm_buf_pub_t; ++typedef struct drm_buf_map drm_buf_map_t; ++typedef struct drm_dma drm_dma_t; ++typedef union drm_wait_vblank drm_wait_vblank_t; ++typedef struct drm_agp_mode drm_agp_mode_t; ++typedef enum drm_ctx_flags drm_ctx_flags_t; ++typedef struct drm_ctx drm_ctx_t; ++typedef struct drm_ctx_res drm_ctx_res_t; ++typedef struct drm_draw drm_draw_t; ++typedef struct drm_update_draw drm_update_draw_t; ++typedef struct drm_auth drm_auth_t; ++typedef struct drm_irq_busid drm_irq_busid_t; ++typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; ++typedef struct drm_agp_buffer drm_agp_buffer_t; ++typedef struct drm_agp_binding drm_agp_binding_t; ++typedef struct drm_agp_info drm_agp_info_t; ++typedef struct drm_scatter_gather drm_scatter_gather_t; ++typedef struct drm_set_version drm_set_version_t; ++ ++typedef struct drm_fence_arg drm_fence_arg_t; ++typedef struct drm_mm_type_arg drm_mm_type_arg_t; ++typedef struct drm_mm_init_arg drm_mm_init_arg_t; ++typedef enum drm_bo_type drm_bo_type_t; ++#endif ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_hashtab.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_hashtab.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_hashtab.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_hashtab.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,207 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple open hash tab implementation. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "drm_hashtab.h" ++#include ++ ++int drm_ht_create(struct drm_open_hash *ht, unsigned int order) ++{ ++ unsigned int i; ++ ++ ht->size = 1 << order; ++ ht->order = order; ++ ht->fill = 0; ++ ht->table = NULL; ++ ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); ++ if (!ht->use_vmalloc) { ++ ht->table = drm_calloc(ht->size, sizeof(*ht->table), ++ DRM_MEM_HASHTAB); ++ } ++ if (!ht->table) { ++ ht->use_vmalloc = 1; ++ ht->table = vmalloc(ht->size * sizeof(*ht->table)); ++ } ++ if (!ht->table) { ++ DRM_ERROR("Out of memory for hash table\n"); ++ return -ENOMEM; ++ } ++ for (i = 0; i < ht->size; ++i) { ++ INIT_HLIST_HEAD(&ht->table[i]); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_create); ++ ++void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) ++{ ++ struct drm_hash_item *entry; ++ struct hlist_head *h_list; ++ struct hlist_node *list; ++ unsigned int hashed_key; ++ int count = 0; ++ ++ hashed_key = hash_long(key, ht->order); ++ DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); ++ h_list = &ht->table[hashed_key]; ++ hlist_for_each(list, h_list) { ++ entry = hlist_entry(list, struct drm_hash_item, head); ++ DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); ++ } ++} ++ ++static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, ++ unsigned long key) ++{ ++ struct drm_hash_item *entry; ++ struct hlist_head *h_list; ++ struct hlist_node *list; ++ unsigned int hashed_key; ++ ++ hashed_key = hash_long(key, ht->order); ++ h_list = &ht->table[hashed_key]; ++ hlist_for_each(list, h_list) { ++ entry = hlist_entry(list, struct drm_hash_item, head); ++ if (entry->key == key) ++ return list; ++ if (entry->key > key) ++ break; ++ } ++ return NULL; ++} ++ ++int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) ++{ ++ struct drm_hash_item *entry; ++ struct hlist_head *h_list; ++ struct hlist_node *list, *parent; ++ unsigned int hashed_key; ++ unsigned long key = item->key; ++ ++ hashed_key = hash_long(key, ht->order); ++ h_list = &ht->table[hashed_key]; ++ parent = NULL; ++ hlist_for_each(list, h_list) { ++ entry = hlist_entry(list, struct drm_hash_item, head); ++ if (entry->key == key) ++ return -EINVAL; ++ if (entry->key > key) ++ break; ++ parent = list; ++ } ++ if (parent) { ++ hlist_add_after(parent, &item->head); ++ } else { ++ hlist_add_head(&item->head, h_list); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_insert_item); ++ ++/* ++ * Just insert an item and return any "bits" bit key that hasn't been ++ * used before. ++ */ ++int drm_ht_just_insert_please(struct drm_open_hash *ht, ++ struct drm_hash_item *item, ++ unsigned long seed, int bits, int shift, ++ unsigned long add) ++{ ++ int ret; ++ unsigned long mask = (1 << bits) - 1; ++ unsigned long first, unshifted_key; ++ ++ unshifted_key = hash_long(seed, bits); ++ first = unshifted_key; ++ do { ++ item->key = (unshifted_key << shift) + add; ++ ret = drm_ht_insert_item(ht, item); ++ if (ret) ++ unshifted_key = (unshifted_key + 1) & mask; ++ } while (ret && (unshifted_key != first)); ++ ++ if (ret) { ++ DRM_ERROR("Available key bit space exhausted\n"); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, ++ struct drm_hash_item **item) ++{ ++ struct hlist_node *list; ++ ++ list = drm_ht_find_key(ht, key); ++ if (!list) ++ return -EINVAL; ++ ++ *item = hlist_entry(list, struct drm_hash_item, head); ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_find_item); ++ ++int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) ++{ ++ struct hlist_node *list; ++ ++ list = drm_ht_find_key(ht, key); ++ if (list) { ++ hlist_del_init(list); ++ ht->fill--; ++ return 0; ++ } ++ return -EINVAL; ++} ++ ++int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) ++{ ++ hlist_del_init(&item->head); ++ ht->fill--; ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_remove_item); ++ ++void drm_ht_remove(struct drm_open_hash *ht) ++{ ++ if (ht->table) { ++ if (ht->use_vmalloc) ++ vfree(ht->table); ++ else ++ drm_free(ht->table, ht->size * sizeof(*ht->table), ++ DRM_MEM_HASHTAB); ++ ht->table = NULL; ++ } ++} ++EXPORT_SYMBOL(drm_ht_remove); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_hashtab.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_hashtab.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_hashtab.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_hashtab.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,67 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple open hash tab implementation. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#ifndef DRM_HASHTAB_H ++#define DRM_HASHTAB_H ++ ++#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) ++ ++struct drm_hash_item { ++ struct hlist_node head; ++ unsigned long key; ++}; ++ ++struct drm_open_hash { ++ unsigned int size; ++ unsigned int order; ++ unsigned int fill; ++ struct hlist_head *table; ++ int use_vmalloc; ++}; ++ ++ ++extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); ++extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); ++extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, ++ unsigned long seed, int bits, int shift, ++ unsigned long add); ++extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); ++ ++extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); ++extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); ++extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); ++extern void drm_ht_remove(struct drm_open_hash *ht); ++ ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_internal.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_internal.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_internal.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_internal.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,40 @@ ++/* ++ * Copyright 2007 Red Hat, Inc ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* This header file holds function prototypes and data types that are ++ * internal to the drm (not exported to user space) but shared across ++ * drivers and platforms */ ++ ++#ifndef __DRM_INTERNAL_H__ ++#define __DRM_INTERNAL_H__ ++ ++/** ++ * Drawable information. ++ */ ++struct drm_drawable_info { ++ unsigned int num_rects; ++ struct drm_clip_rect *rects; ++}; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_ioctl.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_ioctl.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_ioctl.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_ioctl.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,351 @@ ++/** ++ * \file drm_ioctl.c ++ * IOCTL processing for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm_core.h" ++ ++#include "linux/pci.h" ++ ++/** ++ * Get the bus id. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_unique structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Copies the bus id from drm_device::unique into user space. ++ */ ++int drm_getunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_unique *u = data; ++ ++ if (u->unique_len >= dev->unique_len) { ++ if (copy_to_user(u->unique, dev->unique, dev->unique_len)) ++ return -EFAULT; ++ } ++ u->unique_len = dev->unique_len; ++ ++ return 0; ++} ++ ++/** ++ * Set the bus id. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_unique structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Copies the bus id from userspace into drm_device::unique, and verifies that ++ * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated ++ * in interface version 1.1 and will return EBUSY when setversion has requested ++ * version 1.1 or greater. ++ */ ++int drm_setunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_unique *u = data; ++ int domain, bus, slot, func, ret; ++ ++ if (dev->unique_len || dev->unique) ++ return -EBUSY; ++ ++ if (!u->unique_len || u->unique_len > 1024) ++ return -EINVAL; ++ ++ dev->unique_len = u->unique_len; ++ dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); ++ if (!dev->unique) ++ return -ENOMEM; ++ if (copy_from_user(dev->unique, u->unique, dev->unique_len)) ++ return -EFAULT; ++ ++ dev->unique[dev->unique_len] = '\0'; ++ ++ dev->devname = ++ drm_alloc(strlen(dev->driver->pci_driver.name) + ++ strlen(dev->unique) + 2, DRM_MEM_DRIVER); ++ if (!dev->devname) ++ return -ENOMEM; ++ ++ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, ++ dev->unique); ++ ++ /* Return error if the busid submitted doesn't match the device's actual ++ * busid. ++ */ ++ ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); ++ if (ret != 3) ++ return -EINVAL; ++ domain = bus >> 8; ++ bus &= 0xff; ++ ++ if ((domain != drm_get_pci_domain(dev)) || ++ (bus != dev->pdev->bus->number) || ++ (slot != PCI_SLOT(dev->pdev->devfn)) || ++ (func != PCI_FUNC(dev->pdev->devfn))) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static int drm_set_busid(struct drm_device * dev) ++{ ++ int len; ++ if (dev->unique != NULL) ++ return -EBUSY; ++ ++ dev->unique_len = 40; ++ dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); ++ if (dev->unique == NULL) ++ return -ENOMEM; ++ ++ len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", ++ drm_get_pci_domain(dev), ++ dev->pdev->bus->number, ++ PCI_SLOT(dev->pdev->devfn), ++ PCI_FUNC(dev->pdev->devfn)); ++ if (len > dev->unique_len) ++ DRM_ERROR("buffer overflow"); ++ ++ dev->devname = ++ drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + ++ 2, DRM_MEM_DRIVER); ++ if (dev->devname == NULL) ++ return -ENOMEM; ++ ++ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, ++ dev->unique); ++ ++ return 0; ++} ++ ++/** ++ * Get a mapping information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_map structure. ++ * ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches for the mapping with the specified offset and copies its information ++ * into userspace ++ */ ++int drm_getmap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_map *map = data; ++ struct drm_map_list *r_list = NULL; ++ struct list_head *list; ++ int idx; ++ int i; ++ ++ idx = map->offset; ++ ++ mutex_lock(&dev->struct_mutex); ++ if (idx < 0) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ i = 0; ++ list_for_each(list, &dev->maplist) { ++ if (i == idx) { ++ r_list = list_entry(list, struct drm_map_list, head); ++ break; ++ } ++ i++; ++ } ++ if (!r_list || !r_list->map) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ map->offset = r_list->map->offset; ++ map->size = r_list->map->size; ++ map->type = r_list->map->type; ++ map->flags = r_list->map->flags; ++ map->handle = (void *)(unsigned long) r_list->user_token; ++ map->mtrr = r_list->map->mtrr; ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Get client information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_client structure. ++ * ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches for the client with the specified index and copies its information ++ * into userspace ++ */ ++int drm_getclient(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_client *client = data; ++ struct drm_file *pt; ++ int idx; ++ int i; ++ ++ idx = client->idx; ++ mutex_lock(&dev->struct_mutex); ++ ++ i = 0; ++ list_for_each_entry(pt, &dev->filelist, lhead) { ++ if (i++ >= idx) { ++ client->auth = pt->authenticated; ++ client->pid = pt->pid; ++ client->uid = pt->uid; ++ client->magic = pt->magic; ++ client->iocs = pt->ioctl_count; ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ return -EINVAL; ++} ++ ++/** ++ * Get statistics information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_stats structure. ++ * ++ * \return zero on success or a negative number on failure. ++ */ ++int drm_getstats(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_stats *stats = data; ++ int i; ++ ++ memset(stats, 0, sizeof(*stats)); ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ for (i = 0; i < dev->counters; i++) { ++ if (dev->types[i] == _DRM_STAT_LOCK) ++ stats->data[i].value = ++ (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); ++ else ++ stats->data[i].value = atomic_read(&dev->counts[i]); ++ stats->data[i].type = dev->types[i]; ++ } ++ ++ stats->count = dev->counters; ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Setversion ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_lock structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Sets the requested interface version ++ */ ++int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_set_version *sv = data; ++ int if_version, retcode = 0; ++ ++ if (sv->drm_di_major != -1) { ++ if (sv->drm_di_major != DRM_IF_MAJOR || ++ sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { ++ retcode = -EINVAL; ++ goto done; ++ } ++ if_version = DRM_IF_VERSION(sv->drm_di_major, ++ sv->drm_di_minor); ++ dev->if_version = max(if_version, dev->if_version); ++ if (sv->drm_di_minor >= 1) { ++ /* ++ * Version 1.1 includes tying of DRM to specific device ++ */ ++ drm_set_busid(dev); ++ } ++ } ++ ++ if (sv->drm_dd_major != -1) { ++ if (sv->drm_dd_major != dev->driver->major || ++ sv->drm_dd_minor < 0 || sv->drm_dd_minor > ++ dev->driver->minor) { ++ retcode = -EINVAL; ++ goto done; ++ } ++ ++ if (dev->driver->set_version) ++ dev->driver->set_version(dev, sv); ++ } ++ ++done: ++ sv->drm_di_major = DRM_IF_MAJOR; ++ sv->drm_di_minor = DRM_IF_MINOR; ++ sv->drm_dd_major = dev->driver->major; ++ sv->drm_dd_minor = dev->driver->minor; ++ ++ return retcode; ++} ++ ++/** No-op ioctl. */ ++int drm_noop(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_ioc32.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_ioc32.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_ioc32.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1073 @@ ++/** ++ * \file drm_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the DRM. ++ * ++ * \author Paul Mackerras ++ * ++ * Copyright (C) Paul Mackerras 2005. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm_core.h" ++ ++#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) ++#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) ++#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t) ++#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t) ++#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t) ++ ++#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t) ++#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t) ++#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t) ++#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t) ++#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t) ++#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t) ++#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t) ++ ++#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t) ++ ++#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t) ++#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t) ++ ++#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t) ++#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t) ++ ++#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t) ++#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t) ++#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t) ++#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t) ++#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t) ++#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t) ++ ++#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t) ++#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t) ++ ++#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) ++ ++typedef struct drm_version_32 { ++ int version_major; /**< Major version */ ++ int version_minor; /**< Minor version */ ++ int version_patchlevel; /**< Patch level */ ++ u32 name_len; /**< Length of name buffer */ ++ u32 name; /**< Name of driver */ ++ u32 date_len; /**< Length of date buffer */ ++ u32 date; /**< User-space buffer to hold date */ ++ u32 desc_len; /**< Length of desc buffer */ ++ u32 desc; /**< User-space buffer to hold desc */ ++} drm_version32_t; ++ ++static int compat_drm_version(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_version32_t v32; ++ struct drm_version __user *version; ++ int err; ++ ++ if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) ++ return -EFAULT; ++ ++ version = compat_alloc_user_space(sizeof(*version)); ++ if (!access_ok(VERIFY_WRITE, version, sizeof(*version))) ++ return -EFAULT; ++ if (__put_user(v32.name_len, &version->name_len) ++ || __put_user((void __user *)(unsigned long)v32.name, ++ &version->name) ++ || __put_user(v32.date_len, &version->date_len) ++ || __put_user((void __user *)(unsigned long)v32.date, ++ &version->date) ++ || __put_user(v32.desc_len, &version->desc_len) ++ || __put_user((void __user *)(unsigned long)v32.desc, ++ &version->desc)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_VERSION, (unsigned long)version); ++ if (err) ++ return err; ++ ++ if (__get_user(v32.version_major, &version->version_major) ++ || __get_user(v32.version_minor, &version->version_minor) ++ || __get_user(v32.version_patchlevel, &version->version_patchlevel) ++ || __get_user(v32.name_len, &version->name_len) ++ || __get_user(v32.date_len, &version->date_len) ++ || __get_user(v32.desc_len, &version->desc_len)) ++ return -EFAULT; ++ ++ if (copy_to_user((void __user *)arg, &v32, sizeof(v32))) ++ return -EFAULT; ++ return 0; ++} ++ ++typedef struct drm_unique32 { ++ u32 unique_len; /**< Length of unique */ ++ u32 unique; /**< Unique name for driver instantiation */ ++} drm_unique32_t; ++ ++static int compat_drm_getunique(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_unique32_t uq32; ++ struct drm_unique __user *u; ++ int err; ++ ++ if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) ++ return -EFAULT; ++ ++ u = compat_alloc_user_space(sizeof(*u)); ++ if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) ++ return -EFAULT; ++ if (__put_user(uq32.unique_len, &u->unique_len) ++ || __put_user((void __user *)(unsigned long)uq32.unique, ++ &u->unique)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_UNIQUE, (unsigned long)u); ++ if (err) ++ return err; ++ ++ if (__get_user(uq32.unique_len, &u->unique_len)) ++ return -EFAULT; ++ if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int compat_drm_setunique(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_unique32_t uq32; ++ struct drm_unique __user *u; ++ ++ if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) ++ return -EFAULT; ++ ++ u = compat_alloc_user_space(sizeof(*u)); ++ if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) ++ return -EFAULT; ++ if (__put_user(uq32.unique_len, &u->unique_len) ++ || __put_user((void __user *)(unsigned long)uq32.unique, ++ &u->unique)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SET_UNIQUE, (unsigned long)u); ++} ++ ++typedef struct drm_map32 { ++ u32 offset; /**< Requested physical address (0 for SAREA)*/ ++ u32 size; /**< Requested physical size (bytes) */ ++ enum drm_map_type type; /**< Type of memory to map */ ++ enum drm_map_flags flags; /**< Flags */ ++ u32 handle; /**< User-space: "Handle" to pass to mmap() */ ++ int mtrr; /**< MTRR slot used */ ++} drm_map32_t; ++ ++static int compat_drm_getmap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_map32_t __user *argp = (void __user *)arg; ++ drm_map32_t m32; ++ struct drm_map __user *map; ++ int idx, err; ++ void *handle; ++ ++ if (get_user(idx, &argp->offset)) ++ return -EFAULT; ++ ++ map = compat_alloc_user_space(sizeof(*map)); ++ if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) ++ return -EFAULT; ++ if (__put_user(idx, &map->offset)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_MAP, (unsigned long)map); ++ if (err) ++ return err; ++ ++ if (__get_user(m32.offset, &map->offset) ++ || __get_user(m32.size, &map->size) ++ || __get_user(m32.type, &map->type) ++ || __get_user(m32.flags, &map->flags) ++ || __get_user(handle, &map->handle) ++ || __get_user(m32.mtrr, &map->mtrr)) ++ return -EFAULT; ++ ++ m32.handle = (unsigned long)handle; ++ if (copy_to_user(argp, &m32, sizeof(m32))) ++ return -EFAULT; ++ return 0; ++ ++} ++ ++static int compat_drm_addmap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_map32_t __user *argp = (void __user *)arg; ++ drm_map32_t m32; ++ struct drm_map __user *map; ++ int err; ++ void *handle; ++ ++ if (copy_from_user(&m32, argp, sizeof(m32))) ++ return -EFAULT; ++ ++ map = compat_alloc_user_space(sizeof(*map)); ++ if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) ++ return -EFAULT; ++ if (__put_user(m32.offset, &map->offset) ++ || __put_user(m32.size, &map->size) ++ || __put_user(m32.type, &map->type) ++ || __put_user(m32.flags, &map->flags)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_ADD_MAP, (unsigned long)map); ++ if (err) ++ return err; ++ ++ if (__get_user(m32.offset, &map->offset) ++ || __get_user(m32.mtrr, &map->mtrr) ++ || __get_user(handle, &map->handle)) ++ return -EFAULT; ++ ++ m32.handle = (unsigned long)handle; ++ if (m32.handle != (unsigned long)handle && printk_ratelimit()) ++ printk(KERN_ERR "compat_drm_addmap truncated handle" ++ " %p for type %d offset %x\n", ++ handle, m32.type, m32.offset); ++ ++ if (copy_to_user(argp, &m32, sizeof(m32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int compat_drm_rmmap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_map32_t __user *argp = (void __user *)arg; ++ struct drm_map __user *map; ++ u32 handle; ++ ++ if (get_user(handle, &argp->handle)) ++ return -EFAULT; ++ ++ map = compat_alloc_user_space(sizeof(*map)); ++ if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) ++ return -EFAULT; ++ if (__put_user((void *)(unsigned long)handle, &map->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RM_MAP, (unsigned long)map); ++} ++ ++typedef struct drm_client32 { ++ int idx; /**< Which client desired? */ ++ int auth; /**< Is client authenticated? */ ++ u32 pid; /**< Process ID */ ++ u32 uid; /**< User ID */ ++ u32 magic; /**< Magic */ ++ u32 iocs; /**< Ioctl count */ ++} drm_client32_t; ++ ++static int compat_drm_getclient(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_client32_t c32; ++ drm_client32_t __user *argp = (void __user *)arg; ++ struct drm_client __user *client; ++ int idx, err; ++ ++ if (get_user(idx, &argp->idx)) ++ return -EFAULT; ++ ++ client = compat_alloc_user_space(sizeof(*client)); ++ if (!access_ok(VERIFY_WRITE, client, sizeof(*client))) ++ return -EFAULT; ++ if (__put_user(idx, &client->idx)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_CLIENT, (unsigned long)client); ++ if (err) ++ return err; ++ ++ if (__get_user(c32.auth, &client->auth) ++ || __get_user(c32.pid, &client->pid) ++ || __get_user(c32.uid, &client->uid) ++ || __get_user(c32.magic, &client->magic) ++ || __get_user(c32.iocs, &client->iocs)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &c32, sizeof(c32))) ++ return -EFAULT; ++ return 0; ++} ++ ++typedef struct drm_stats32 { ++ u32 count; ++ struct { ++ u32 value; ++ enum drm_stat_type type; ++ } data[15]; ++} drm_stats32_t; ++ ++static int compat_drm_getstats(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_stats32_t s32; ++ drm_stats32_t __user *argp = (void __user *)arg; ++ struct drm_stats __user *stats; ++ int i, err; ++ ++ stats = compat_alloc_user_space(sizeof(*stats)); ++ if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_STATS, (unsigned long)stats); ++ if (err) ++ return err; ++ ++ if (__get_user(s32.count, &stats->count)) ++ return -EFAULT; ++ for (i = 0; i < 15; ++i) ++ if (__get_user(s32.data[i].value, &stats->data[i].value) ++ || __get_user(s32.data[i].type, &stats->data[i].type)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &s32, sizeof(s32))) ++ return -EFAULT; ++ return 0; ++} ++ ++typedef struct drm_buf_desc32 { ++ int count; /**< Number of buffers of this size */ ++ int size; /**< Size in bytes */ ++ int low_mark; /**< Low water mark */ ++ int high_mark; /**< High water mark */ ++ int flags; ++ u32 agp_start; /**< Start address in the AGP aperture */ ++} drm_buf_desc32_t; ++ ++static int compat_drm_addbufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_desc32_t __user *argp = (void __user *)arg; ++ struct drm_buf_desc __user *buf; ++ int err; ++ unsigned long agp_start; ++ ++ buf = compat_alloc_user_space(sizeof(*buf)); ++ if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)) ++ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))) ++ return -EFAULT; ++ ++ if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start)) ++ || __get_user(agp_start, &argp->agp_start) ++ || __put_user(agp_start, &buf->agp_start)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_ADD_BUFS, (unsigned long)buf); ++ if (err) ++ return err; ++ ++ if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start)) ++ || __get_user(agp_start, &buf->agp_start) ++ || __put_user(agp_start, &argp->agp_start)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int compat_drm_markbufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_desc32_t b32; ++ drm_buf_desc32_t __user *argp = (void __user *)arg; ++ struct drm_buf_desc __user *buf; ++ ++ if (copy_from_user(&b32, argp, sizeof(b32))) ++ return -EFAULT; ++ ++ buf = compat_alloc_user_space(sizeof(*buf)); ++ if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))) ++ return -EFAULT; ++ ++ if (__put_user(b32.size, &buf->size) ++ || __put_user(b32.low_mark, &buf->low_mark) ++ || __put_user(b32.high_mark, &buf->high_mark)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MARK_BUFS, (unsigned long)buf); ++} ++ ++typedef struct drm_buf_info32 { ++ int count; /**< Entries in list */ ++ u32 list; ++} drm_buf_info32_t; ++ ++static int compat_drm_infobufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_info32_t req32; ++ drm_buf_info32_t __user *argp = (void __user *)arg; ++ drm_buf_desc32_t __user *to; ++ struct drm_buf_info __user *request; ++ struct drm_buf_desc __user *list; ++ size_t nbytes; ++ int i, err; ++ int count, actual; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ count = req32.count; ++ to = (drm_buf_desc32_t __user *)(unsigned long)req32.list; ++ if (count < 0) ++ count = 0; ++ if (count > 0 ++ && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) ++ return -EFAULT; ++ ++ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); ++ request = compat_alloc_user_space(nbytes); ++ if (!access_ok(VERIFY_WRITE, request, nbytes)) ++ return -EFAULT; ++ list = (struct drm_buf_desc *) (request + 1); ++ ++ if (__put_user(count, &request->count) ++ || __put_user(list, &request->list)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_INFO_BUFS, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(actual, &request->count)) ++ return -EFAULT; ++ if (count >= actual) ++ for (i = 0; i < actual; ++i) ++ if (__copy_in_user(&to[i], &list[i], ++ offsetof(struct drm_buf_desc, flags))) ++ return -EFAULT; ++ ++ if (__put_user(actual, &argp->count)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_buf_pub32 { ++ int idx; /**< Index into the master buffer list */ ++ int total; /**< Buffer size */ ++ int used; /**< Amount of buffer in use (for DMA) */ ++ u32 address; /**< Address of buffer */ ++} drm_buf_pub32_t; ++ ++typedef struct drm_buf_map32 { ++ int count; /**< Length of the buffer list */ ++ u32 virtual; /**< Mmap'd area in user-virtual */ ++ u32 list; /**< Buffer information */ ++} drm_buf_map32_t; ++ ++static int compat_drm_mapbufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_map32_t __user *argp = (void __user *)arg; ++ drm_buf_map32_t req32; ++ drm_buf_pub32_t __user *list32; ++ struct drm_buf_map __user *request; ++ struct drm_buf_pub __user *list; ++ int i, err; ++ int count, actual; ++ size_t nbytes; ++ void __user *addr; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ count = req32.count; ++ list32 = (void __user *)(unsigned long)req32.list; ++ ++ if (count < 0) ++ return -EINVAL; ++ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); ++ request = compat_alloc_user_space(nbytes); ++ if (!access_ok(VERIFY_WRITE, request, nbytes)) ++ return -EFAULT; ++ list = (struct drm_buf_pub *) (request + 1); ++ ++ if (__put_user(count, &request->count) ++ || __put_user(list, &request->list)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MAP_BUFS, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(actual, &request->count)) ++ return -EFAULT; ++ if (count >= actual) ++ for (i = 0; i < actual; ++i) ++ if (__copy_in_user(&list32[i], &list[i], ++ offsetof(struct drm_buf_pub, address)) ++ || __get_user(addr, &list[i].address) ++ || __put_user((unsigned long)addr, ++ &list32[i].address)) ++ return -EFAULT; ++ ++ if (__put_user(actual, &argp->count) ++ || __get_user(addr, &request->virtual) ++ || __put_user((unsigned long)addr, &argp->virtual)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_buf_free32 { ++ int count; ++ u32 list; ++} drm_buf_free32_t; ++ ++static int compat_drm_freebufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_free32_t req32; ++ struct drm_buf_free __user *request; ++ drm_buf_free32_t __user *argp = (void __user *)arg; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) ++ return -EFAULT; ++ if (__put_user(req32.count, &request->count) ++ || __put_user((int __user *)(unsigned long)req32.list, ++ &request->list)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_FREE_BUFS, (unsigned long)request); ++} ++ ++typedef struct drm_ctx_priv_map32 { ++ unsigned int ctx_id; /**< Context requesting private mapping */ ++ u32 handle; /**< Handle of map */ ++} drm_ctx_priv_map32_t; ++ ++static int compat_drm_setsareactx(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_ctx_priv_map32_t req32; ++ struct drm_ctx_priv_map __user *request; ++ drm_ctx_priv_map32_t __user *argp = (void __user *)arg; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) ++ return -EFAULT; ++ if (__put_user(req32.ctx_id, &request->ctx_id) ++ || __put_user((void *)(unsigned long)req32.handle, ++ &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request); ++} ++ ++static int compat_drm_getsareactx(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct drm_ctx_priv_map __user *request; ++ drm_ctx_priv_map32_t __user *argp = (void __user *)arg; ++ int err; ++ unsigned int ctx_id; ++ void *handle; ++ ++ if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp)) ++ || __get_user(ctx_id, &argp->ctx_id)) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) ++ return -EFAULT; ++ if (__put_user(ctx_id, &request->ctx_id)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(handle, &request->handle) ++ || __put_user((unsigned long)handle, &argp->handle)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_ctx_res32 { ++ int count; ++ u32 contexts; ++} drm_ctx_res32_t; ++ ++static int compat_drm_resctx(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_ctx_res32_t __user *argp = (void __user *)arg; ++ drm_ctx_res32_t res32; ++ struct drm_ctx_res __user *res; ++ int err; ++ ++ if (copy_from_user(&res32, argp, sizeof(res32))) ++ return -EFAULT; ++ ++ res = compat_alloc_user_space(sizeof(*res)); ++ if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) ++ return -EFAULT; ++ if (__put_user(res32.count, &res->count) ++ || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, ++ &res->contexts)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RES_CTX, (unsigned long)res); ++ if (err) ++ return err; ++ ++ if (__get_user(res32.count, &res->count) ++ || __put_user(res32.count, &argp->count)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_dma32 { ++ int context; /**< Context handle */ ++ int send_count; /**< Number of buffers to send */ ++ u32 send_indices; /**< List of handles to buffers */ ++ u32 send_sizes; /**< Lengths of data to send */ ++ enum drm_dma_flags flags; /**< Flags */ ++ int request_count; /**< Number of buffers requested */ ++ int request_size; /**< Desired size for buffers */ ++ u32 request_indices; /**< Buffer information */ ++ u32 request_sizes; ++ int granted_count; /**< Number of buffers granted */ ++} drm_dma32_t; ++ ++static int compat_drm_dma(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_dma32_t d32; ++ drm_dma32_t __user *argp = (void __user *)arg; ++ struct drm_dma __user *d; ++ int err; ++ ++ if (copy_from_user(&d32, argp, sizeof(d32))) ++ return -EFAULT; ++ ++ d = compat_alloc_user_space(sizeof(*d)); ++ if (!access_ok(VERIFY_WRITE, d, sizeof(*d))) ++ return -EFAULT; ++ ++ if (__put_user(d32.context, &d->context) ++ || __put_user(d32.send_count, &d->send_count) ++ || __put_user((int __user *)(unsigned long)d32.send_indices, ++ &d->send_indices) ++ || __put_user((int __user *)(unsigned long)d32.send_sizes, ++ &d->send_sizes) ++ || __put_user(d32.flags, &d->flags) ++ || __put_user(d32.request_count, &d->request_count) ++ || __put_user((int __user *)(unsigned long)d32.request_indices, ++ &d->request_indices) ++ || __put_user((int __user *)(unsigned long)d32.request_sizes, ++ &d->request_sizes)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_DMA, (unsigned long)d); ++ if (err) ++ return err; ++ ++ if (__get_user(d32.request_size, &d->request_size) ++ || __get_user(d32.granted_count, &d->granted_count) ++ || __put_user(d32.request_size, &argp->request_size) ++ || __put_user(d32.granted_count, &argp->granted_count)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++#if __OS_HAS_AGP ++typedef struct drm_agp_mode32 { ++ u32 mode; /**< AGP mode */ ++} drm_agp_mode32_t; ++ ++static int compat_drm_agp_enable(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_mode32_t __user *argp = (void __user *)arg; ++ drm_agp_mode32_t m32; ++ struct drm_agp_mode __user *mode; ++ ++ if (get_user(m32.mode, &argp->mode)) ++ return -EFAULT; ++ ++ mode = compat_alloc_user_space(sizeof(*mode)); ++ if (put_user(m32.mode, &mode->mode)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_ENABLE, (unsigned long)mode); ++} ++ ++typedef struct drm_agp_info32 { ++ int agp_version_major; ++ int agp_version_minor; ++ u32 mode; ++ u32 aperture_base; /* physical address */ ++ u32 aperture_size; /* bytes */ ++ u32 memory_allowed; /* bytes */ ++ u32 memory_used; ++ ++ /* PCI information */ ++ unsigned short id_vendor; ++ unsigned short id_device; ++} drm_agp_info32_t; ++ ++static int compat_drm_agp_info(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_info32_t __user *argp = (void __user *)arg; ++ drm_agp_info32_t i32; ++ struct drm_agp_info __user *info; ++ int err; ++ ++ info = compat_alloc_user_space(sizeof(*info)); ++ if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_INFO, (unsigned long)info); ++ if (err) ++ return err; ++ ++ if (__get_user(i32.agp_version_major, &info->agp_version_major) ++ || __get_user(i32.agp_version_minor, &info->agp_version_minor) ++ || __get_user(i32.mode, &info->mode) ++ || __get_user(i32.aperture_base, &info->aperture_base) ++ || __get_user(i32.aperture_size, &info->aperture_size) ++ || __get_user(i32.memory_allowed, &info->memory_allowed) ++ || __get_user(i32.memory_used, &info->memory_used) ++ || __get_user(i32.id_vendor, &info->id_vendor) ++ || __get_user(i32.id_device, &info->id_device)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &i32, sizeof(i32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_agp_buffer32 { ++ u32 size; /**< In bytes -- will round to page boundary */ ++ u32 handle; /**< Used for binding / unbinding */ ++ u32 type; /**< Type of memory to allocate */ ++ u32 physical; /**< Physical used by i810 */ ++} drm_agp_buffer32_t; ++ ++static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_buffer32_t __user *argp = (void __user *)arg; ++ drm_agp_buffer32_t req32; ++ struct drm_agp_buffer __user *request; ++ int err; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.size, &request->size) ++ || __put_user(req32.type, &request->type)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_ALLOC, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(req32.handle, &request->handle) ++ || __get_user(req32.physical, &request->physical) ++ || copy_to_user(argp, &req32, sizeof(req32))) { ++ drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_FREE, (unsigned long)request); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int compat_drm_agp_free(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_buffer32_t __user *argp = (void __user *)arg; ++ struct drm_agp_buffer __user *request; ++ u32 handle; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || get_user(handle, &argp->handle) ++ || __put_user(handle, &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_FREE, (unsigned long)request); ++} ++ ++typedef struct drm_agp_binding32 { ++ u32 handle; /**< From drm_agp_buffer */ ++ u32 offset; /**< In bytes -- will round to page boundary */ ++} drm_agp_binding32_t; ++ ++static int compat_drm_agp_bind(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_binding32_t __user *argp = (void __user *)arg; ++ drm_agp_binding32_t req32; ++ struct drm_agp_binding __user *request; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.handle, &request->handle) ++ || __put_user(req32.offset, &request->offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_BIND, (unsigned long)request); ++} ++ ++static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_binding32_t __user *argp = (void __user *)arg; ++ struct drm_agp_binding __user *request; ++ u32 handle; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || get_user(handle, &argp->handle) ++ || __put_user(handle, &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_UNBIND, (unsigned long)request); ++} ++#endif /* __OS_HAS_AGP */ ++ ++typedef struct drm_scatter_gather32 { ++ u32 size; /**< In bytes -- will round to page boundary */ ++ u32 handle; /**< Used for mapping / unmapping */ ++} drm_scatter_gather32_t; ++ ++static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_scatter_gather32_t __user *argp = (void __user *)arg; ++ struct drm_scatter_gather __user *request; ++ int err; ++ unsigned long x; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) ++ || __get_user(x, &argp->size) ++ || __put_user(x, &request->size)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SG_ALLOC, (unsigned long)request); ++ if (err) ++ return err; ++ ++ /* XXX not sure about the handle conversion here... */ ++ if (__get_user(x, &request->handle) ++ || __put_user(x >> PAGE_SHIFT, &argp->handle)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int compat_drm_sg_free(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_scatter_gather32_t __user *argp = (void __user *)arg; ++ struct drm_scatter_gather __user *request; ++ unsigned long x; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) ++ || __get_user(x, &argp->handle) ++ || __put_user(x << PAGE_SHIFT, &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SG_FREE, (unsigned long)request); ++} ++ ++struct drm_wait_vblank_request32 { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ u32 signal; ++}; ++ ++struct drm_wait_vblank_reply32 { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ s32 tval_sec; ++ s32 tval_usec; ++}; ++ ++typedef union drm_wait_vblank32 { ++ struct drm_wait_vblank_request32 request; ++ struct drm_wait_vblank_reply32 reply; ++} drm_wait_vblank32_t; ++ ++static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_wait_vblank32_t __user *argp = (void __user *)arg; ++ drm_wait_vblank32_t req32; ++ union drm_wait_vblank __user *request; ++ int err; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.request.type, &request->request.type) ++ || __put_user(req32.request.sequence, &request->request.sequence) ++ || __put_user(req32.request.signal, &request->request.signal)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_WAIT_VBLANK, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(req32.reply.type, &request->reply.type) ++ || __get_user(req32.reply.sequence, &request->reply.sequence) ++ || __get_user(req32.reply.tval_sec, &request->reply.tval_sec) ++ || __get_user(req32.reply.tval_usec, &request->reply.tval_usec)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &req32, sizeof(req32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++drm_ioctl_compat_t *drm_compat_ioctls[] = { ++ [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats, ++ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique, ++ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap, ++ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap, ++ [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx, ++ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx, ++ [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma, ++#if __OS_HAS_AGP ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind, ++#endif ++ [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc, ++ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free, ++ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/drm. ++ * ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn; ++ int ret; ++ ++ ++ /* Assume that ioctls without an explicit compat routine will "just ++ * work". This may not always be a good assumption, but it's better ++ * than always failing. ++ */ ++ if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls)) ++ return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ ++ fn = drm_compat_ioctls[nr]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_compat_ioctl); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_irq.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_irq.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,771 @@ ++/** ++ * \file drm_irq.c ++ * IRQ support ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#include /* For task queue support */ ++ ++/** ++ * Get interrupt from bus id. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_irq_busid structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Finds the PCI device with the specified bus id and gets its IRQ number. ++ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal ++ * to that of the device that this DRM instance attached to. ++ */ ++int drm_irq_by_busid(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_irq_busid *p = data; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return -EINVAL; ++ ++ if ((p->busnum >> 8) != drm_get_pci_domain(dev) || ++ (p->busnum & 0xff) != dev->pdev->bus->number || ++ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) ++ return -EINVAL; ++ ++ p->irq = dev->pdev->irq; ++ ++ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, ++ p->irq); ++ ++ return 0; ++} ++ ++static void vblank_disable_fn(unsigned long arg) ++{ ++ struct drm_device *dev = (struct drm_device *)arg; ++ unsigned long irqflags; ++ int i; ++ ++ if (!dev->vblank_disable_allowed) ++ return; ++ ++ for (i = 0; i < dev->num_crtcs; i++) { ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ if (atomic_read(&dev->vblank_refcount[i]) == 0 && ++ dev->vblank_enabled[i]) { ++ DRM_DEBUG("disabling vblank on crtc %d\n", i); ++ dev->last_vblank[i] = ++ dev->driver->get_vblank_counter(dev, i); ++ dev->driver->disable_vblank(dev, i); ++ dev->vblank_enabled[i] = 0; ++ } ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ } ++} ++ ++static void drm_vblank_cleanup(struct drm_device *dev) ++{ ++ /* Bail if the driver didn't call drm_vblank_init() */ ++ if (dev->num_crtcs == 0) ++ return; ++ ++ del_timer(&dev->vblank_disable_timer); ++ ++ vblank_disable_fn((unsigned long)dev); ++ ++ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, ++ DRM_MEM_DRIVER); ++ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, ++ DRM_MEM_DRIVER); ++ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, ++ DRM_MEM_DRIVER); ++ drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ ++ dev->num_crtcs = 0; ++} ++ ++int drm_vblank_init(struct drm_device *dev, int num_crtcs) ++{ ++ int i, ret = -ENOMEM; ++ ++ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, ++ (unsigned long)dev); ++ init_timer_deferrable(&dev->vblank_disable_timer); ++ spin_lock_init(&dev->vbl_lock); ++ atomic_set(&dev->vbl_signal_pending, 0); ++ dev->num_crtcs = num_crtcs; ++ ++ dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->vbl_queue) ++ goto err; ++ ++ dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->vbl_sigs) ++ goto err; ++ ++ dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->_vblank_count) ++ goto err; ++ ++ dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->vblank_refcount) ++ goto err; ++ ++ dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), ++ DRM_MEM_DRIVER); ++ if (!dev->vblank_enabled) ++ goto err; ++ ++ dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); ++ if (!dev->last_vblank) ++ goto err; ++ ++ dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), ++ DRM_MEM_DRIVER); ++ if (!dev->vblank_inmodeset) ++ goto err; ++ ++ /* Zero per-crtc vblank stuff */ ++ for (i = 0; i < num_crtcs; i++) { ++ init_waitqueue_head(&dev->vbl_queue[i]); ++ INIT_LIST_HEAD(&dev->vbl_sigs[i]); ++ atomic_set(&dev->_vblank_count[i], 0); ++ atomic_set(&dev->vblank_refcount[i], 0); ++ } ++ ++ dev->vblank_disable_allowed = 0; ++ ++ return 0; ++ ++err: ++ drm_vblank_cleanup(dev); ++ return ret; ++} ++EXPORT_SYMBOL(drm_vblank_init); ++ ++/** ++ * Install IRQ handler. ++ * ++ * \param dev DRM device. ++ * ++ * Initializes the IRQ related data. Installs the handler, calling the driver ++ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions ++ * before and after the installation. ++ */ ++int drm_irq_install(struct drm_device * dev) ++{ ++ int ret = 0; ++ unsigned long sh_flags = 0; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return -EINVAL; ++ ++ if (dev->pdev->irq == 0) ++ return -EINVAL; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ /* Driver must have been initialized */ ++ if (!dev->dev_private) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ if (dev->irq_enabled) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EBUSY; ++ } ++ dev->irq_enabled = 1; ++ mutex_unlock(&dev->struct_mutex); ++ ++ DRM_DEBUG("irq=%d\n", dev->pdev->irq); ++ ++ /* Before installing handler */ ++ dev->driver->irq_preinstall(dev); ++ ++ /* Install handler */ ++ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) ++ sh_flags = IRQF_SHARED; ++ ++ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler, ++ sh_flags, dev->devname, dev); ++ if (ret < 0) { ++ mutex_lock(&dev->struct_mutex); ++ dev->irq_enabled = 0; ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ /* Expose the device irq to device drivers that want to export it for ++ * whatever reason. ++ */ ++ dev->irq = dev->pdev->irq; ++ ++ /* After installing handler */ ++ ret = dev->driver->irq_postinstall(dev); ++ if (ret < 0) { ++ mutex_lock(&dev->struct_mutex); ++ dev->irq_enabled = 0; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_irq_install); ++ ++/** ++ * Uninstall the IRQ handler. ++ * ++ * \param dev DRM device. ++ * ++ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. ++ */ ++int drm_irq_uninstall(struct drm_device * dev) ++{ ++ int irq_enabled; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return -EINVAL; ++ ++ mutex_lock(&dev->struct_mutex); ++ irq_enabled = dev->irq_enabled; ++ dev->irq_enabled = 0; ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!irq_enabled) ++ return -EINVAL; ++ ++ DRM_DEBUG("irq=%d\n", dev->pdev->irq); ++ ++ dev->driver->irq_uninstall(dev); ++ ++ free_irq(dev->pdev->irq, dev); ++ ++ drm_vblank_cleanup(dev); ++ ++ dev->locked_tasklet_func = NULL; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_irq_uninstall); ++ ++/** ++ * IRQ control ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_control structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls irq_install() or irq_uninstall() according to \p arg. ++ */ ++int drm_control(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_control *ctl = data; ++ ++ /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ ++ ++ ++ switch (ctl->func) { ++ case DRM_INST_HANDLER: ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return 0; ++ if (dev->if_version < DRM_IF_VERSION(1, 2) && ++ ctl->irq != dev->pdev->irq) ++ return -EINVAL; ++ return drm_irq_install(dev); ++ case DRM_UNINST_HANDLER: ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return 0; ++ return drm_irq_uninstall(dev); ++ default: ++ return -EINVAL; ++ } ++} ++ ++/** ++ * drm_vblank_count - retrieve "cooked" vblank counter value ++ * @dev: DRM device ++ * @crtc: which counter to retrieve ++ * ++ * Fetches the "cooked" vblank count value that represents the number of ++ * vblank events since the system was booted, including lost events due to ++ * modesetting activity. ++ */ ++u32 drm_vblank_count(struct drm_device *dev, int crtc) ++{ ++ return atomic_read(&dev->_vblank_count[crtc]); ++} ++EXPORT_SYMBOL(drm_vblank_count); ++ ++/** ++ * drm_update_vblank_count - update the master vblank counter ++ * @dev: DRM device ++ * @crtc: counter to update ++ * ++ * Call back into the driver to update the appropriate vblank counter ++ * (specified by @crtc). Deal with wraparound, if it occurred, and ++ * update the last read value so we can deal with wraparound on the next ++ * call if necessary. ++ * ++ * Only necessary when going from off->on, to account for frames we ++ * didn't get an interrupt for. ++ * ++ * Note: caller must hold dev->vbl_lock since this reads & writes ++ * device vblank fields. ++ */ ++static void drm_update_vblank_count(struct drm_device *dev, int crtc) ++{ ++ u32 cur_vblank, diff; ++ ++ /* ++ * Interrupts were disabled prior to this call, so deal with counter ++ * wrap if needed. ++ * NOTE! It's possible we lost a full dev->max_vblank_count events ++ * here if the register is small or we had vblank interrupts off for ++ * a long time. ++ */ ++ cur_vblank = dev->driver->get_vblank_counter(dev, crtc); ++ diff = cur_vblank - dev->last_vblank[crtc]; ++ if (cur_vblank < dev->last_vblank[crtc]) { ++ diff += dev->max_vblank_count; ++ ++ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", ++ crtc, dev->last_vblank[crtc], cur_vblank, diff); ++ } ++ ++ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", ++ crtc, diff); ++ ++ atomic_add(diff, &dev->_vblank_count[crtc]); ++} ++ ++/** ++ * drm_vblank_get - get a reference count on vblank events ++ * @dev: DRM device ++ * @crtc: which CRTC to own ++ * ++ * Acquire a reference count on vblank events to avoid having them disabled ++ * while in use. ++ * ++ * RETURNS ++ * Zero on success, nonzero on failure. ++ */ ++int drm_vblank_get(struct drm_device *dev, int crtc) ++{ ++ unsigned long irqflags; ++ int ret = 0; ++ ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ /* Going from 0->1 means we have to enable interrupts again */ ++ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && ++ !dev->vblank_enabled[crtc]) { ++ ret = dev->driver->enable_vblank(dev, crtc); ++ if (ret) ++ atomic_dec(&dev->vblank_refcount[crtc]); ++ else { ++ dev->vblank_enabled[crtc] = 1; ++ drm_update_vblank_count(dev, crtc); ++ } ++ } ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_vblank_get); ++ ++/** ++ * drm_vblank_put - give up ownership of vblank events ++ * @dev: DRM device ++ * @crtc: which counter to give up ++ * ++ * Release ownership of a given vblank counter, turning off interrupts ++ * if possible. ++ */ ++void drm_vblank_put(struct drm_device *dev, int crtc) ++{ ++ /* Last user schedules interrupt disable */ ++ if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) ++ mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); ++} ++EXPORT_SYMBOL(drm_vblank_put); ++ ++/** ++ * drm_modeset_ctl - handle vblank event counter changes across mode switch ++ * @DRM_IOCTL_ARGS: standard ioctl arguments ++ * ++ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET ++ * ioctls around modesetting so that any lost vblank events are accounted for. ++ * ++ * Generally the counter will reset across mode sets. If interrupts are ++ * enabled around this call, we don't have to do anything since the counter ++ * will have already been incremented. ++ */ ++int drm_modeset_ctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_modeset_ctl *modeset = data; ++ unsigned long irqflags; ++ int crtc, ret = 0; ++ ++ /* If drm_vblank_init() hasn't been called yet, just no-op */ ++ if (!dev->num_crtcs) ++ goto out; ++ ++ crtc = modeset->crtc; ++ if (crtc >= dev->num_crtcs) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* ++ * To avoid all the problems that might happen if interrupts ++ * were enabled/disabled around or between these calls, we just ++ * have the kernel take a reference on the CRTC (just once though ++ * to avoid corrupting the count if multiple, mismatch calls occur), ++ * so that interrupts remain enabled in the interim. ++ */ ++ switch (modeset->cmd) { ++ case _DRM_PRE_MODESET: ++ if (!dev->vblank_inmodeset[crtc]) { ++ dev->vblank_inmodeset[crtc] = 1; ++ drm_vblank_get(dev, crtc); ++ } ++ break; ++ case _DRM_POST_MODESET: ++ if (dev->vblank_inmodeset[crtc]) { ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ dev->vblank_disable_allowed = 1; ++ dev->vblank_inmodeset[crtc] = 0; ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ drm_vblank_put(dev, crtc); ++ } ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++out: ++ return ret; ++} ++ ++/** ++ * Wait for VBLANK. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param data user argument, pointing to a drm_wait_vblank structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the IRQ is installed. ++ * ++ * If a signal is requested checks if this task has already scheduled the same signal ++ * for the same vblank sequence number - nothing to be done in ++ * that case. If the number of tasks waiting for the interrupt exceeds 100 the ++ * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this ++ * task. ++ * ++ * If a signal is not requested, then calls vblank_wait(). ++ */ ++int drm_wait_vblank(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ union drm_wait_vblank *vblwait = data; ++ int ret = 0; ++ unsigned int flags, seq, crtc; ++ ++ if ((!dev->pdev->irq) || (!dev->irq_enabled)) ++ return -EINVAL; ++ ++ if (vblwait->request.type & ++ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { ++ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", ++ vblwait->request.type, ++ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); ++ return -EINVAL; ++ } ++ ++ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; ++ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; ++ ++ if (crtc >= dev->num_crtcs) ++ return -EINVAL; ++ ++ ret = drm_vblank_get(dev, crtc); ++ if (ret) ++ return ret; ++ seq = drm_vblank_count(dev, crtc); ++ ++ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { ++ case _DRM_VBLANK_RELATIVE: ++ vblwait->request.sequence += seq; ++ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; ++ case _DRM_VBLANK_ABSOLUTE: ++ break; ++ default: ++ ret = -EINVAL; ++ goto done; ++ } ++ ++ if ((flags & _DRM_VBLANK_NEXTONMISS) && ++ (seq - vblwait->request.sequence) <= (1<<23)) { ++ vblwait->request.sequence = seq + 1; ++ } ++ ++ if (flags & _DRM_VBLANK_SIGNAL) { ++ unsigned long irqflags; ++ struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; ++ struct drm_vbl_sig *vbl_sig; ++ ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ ++ /* Check if this task has already scheduled the same signal ++ * for the same vblank sequence number; nothing to be done in ++ * that case ++ */ ++ list_for_each_entry(vbl_sig, vbl_sigs, head) { ++ if (vbl_sig->sequence == vblwait->request.sequence ++ && vbl_sig->info.si_signo == ++ vblwait->request.signal ++ && vbl_sig->task == current) { ++ spin_unlock_irqrestore(&dev->vbl_lock, ++ irqflags); ++ vblwait->reply.sequence = seq; ++ goto done; ++ } ++ } ++ ++ if (atomic_read(&dev->vbl_signal_pending) >= 100) { ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ret = -EBUSY; ++ goto done; ++ } ++ ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ++ vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), ++ DRM_MEM_DRIVER); ++ if (!vbl_sig) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ ret = drm_vblank_get(dev, crtc); ++ if (ret) { ++ drm_free(vbl_sig, sizeof(struct drm_vbl_sig), ++ DRM_MEM_DRIVER); ++ return ret; ++ } ++ ++ atomic_inc(&dev->vbl_signal_pending); ++ ++ vbl_sig->sequence = vblwait->request.sequence; ++ vbl_sig->info.si_signo = vblwait->request.signal; ++ vbl_sig->task = current; ++ ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ ++ list_add_tail(&vbl_sig->head, vbl_sigs); ++ ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ++ vblwait->reply.sequence = seq; ++ } else { ++ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, ++ ((drm_vblank_count(dev, crtc) ++ - vblwait->request.sequence) <= (1 << 23))); ++ ++ if (ret != -EINTR) { ++ struct timeval now; ++ ++ do_gettimeofday(&now); ++ ++ vblwait->reply.tval_sec = now.tv_sec; ++ vblwait->reply.tval_usec = now.tv_usec; ++ vblwait->reply.sequence = drm_vblank_count(dev, crtc); ++ } ++ } ++ ++done: ++ drm_vblank_put(dev, crtc); ++ return ret; ++} ++ ++/** ++ * Send the VBLANK signals. ++ * ++ * \param dev DRM device. ++ * \param crtc CRTC where the vblank event occurred ++ * ++ * Sends a signal for each task in drm_device::vbl_sigs and empties the list. ++ * ++ * If a signal is not requested, then calls vblank_wait(). ++ */ ++static void drm_vbl_send_signals(struct drm_device * dev, int crtc) ++{ ++ struct drm_vbl_sig *vbl_sig, *tmp; ++ struct list_head *vbl_sigs; ++ unsigned int vbl_seq; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev->vbl_lock, flags); ++ ++ vbl_sigs = &dev->vbl_sigs[crtc]; ++ vbl_seq = drm_vblank_count(dev, crtc); ++ ++ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { ++ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { ++ vbl_sig->info.si_code = vbl_seq; ++ send_sig_info(vbl_sig->info.si_signo, ++ &vbl_sig->info, vbl_sig->task); ++ ++ list_del(&vbl_sig->head); ++ ++ drm_free(vbl_sig, sizeof(*vbl_sig), ++ DRM_MEM_DRIVER); ++ atomic_dec(&dev->vbl_signal_pending); ++ drm_vblank_put(dev, crtc); ++ } ++ } ++ ++ spin_unlock_irqrestore(&dev->vbl_lock, flags); ++} ++ ++/** ++ * drm_handle_vblank - handle a vblank event ++ * @dev: DRM device ++ * @crtc: where this event occurred ++ * ++ * Drivers should call this routine in their vblank interrupt handlers to ++ * update the vblank counter and send any signals that may be pending. ++ */ ++void drm_handle_vblank(struct drm_device *dev, int crtc) ++{ ++ atomic_inc(&dev->_vblank_count[crtc]); ++ DRM_WAKEUP(&dev->vbl_queue[crtc]); ++ drm_vbl_send_signals(dev, crtc); ++} ++EXPORT_SYMBOL(drm_handle_vblank); ++ ++/** ++ * Tasklet wrapper function. ++ * ++ * \param data DRM device in disguise. ++ * ++ * Attempts to grab the HW lock and calls the driver callback on success. On ++ * failure, leave the lock marked as contended so the callback can be called ++ * from drm_unlock(). ++ */ ++static void drm_locked_tasklet_func(unsigned long data) ++{ ++ struct drm_device *dev = (struct drm_device *)data; ++ unsigned long irqflags; ++ void (*tasklet_func)(struct drm_device *); ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ tasklet_func = dev->locked_tasklet_func; ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ ++ if (!tasklet_func || ++ !drm_lock_take(&dev->lock, ++ DRM_KERNEL_CONTEXT)) { ++ return; ++ } ++ ++ dev->lock.lock_time = jiffies; ++ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ tasklet_func = dev->locked_tasklet_func; ++ dev->locked_tasklet_func = NULL; ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ ++ if (tasklet_func != NULL) ++ tasklet_func(dev); ++ ++ drm_lock_free(&dev->lock, ++ DRM_KERNEL_CONTEXT); ++} ++ ++/** ++ * Schedule a tasklet to call back a driver hook with the HW lock held. ++ * ++ * \param dev DRM device. ++ * \param func Driver callback. ++ * ++ * This is intended for triggering actions that require the HW lock from an ++ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler ++ * completes. Note that the callback may be called from interrupt or process ++ * context, it must not make any assumptions about this. Also, the HW lock will ++ * be held with the kernel context or any client context. ++ */ ++void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *)) ++{ ++ unsigned long irqflags; ++ static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) || ++ test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state)) ++ return; ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ ++ if (dev->locked_tasklet_func) { ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ return; ++ } ++ ++ dev->locked_tasklet_func = func; ++ ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ ++ drm_tasklet.data = (unsigned long)dev; ++ ++ tasklet_hi_schedule(&drm_tasklet); ++} ++EXPORT_SYMBOL(drm_locked_tasklet); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_lock.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_lock.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_lock.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_lock.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,389 @@ ++/** ++ * \file drm_lock.c ++ * IOCTLs for locking ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++static int drm_notifier(void *priv); ++ ++/** ++ * Lock ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_lock structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Add the current task to the lock wait queue, and attempt to take to lock. ++ */ ++int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ DECLARE_WAITQUEUE(entry, current); ++ struct drm_lock *lock = data; ++ int ret = 0; ++ ++ ++file_priv->lock_count; ++ ++ if (lock->context == DRM_KERNEL_CONTEXT) { ++ DRM_ERROR("Process %d using kernel context %d\n", ++ current->pid, lock->context); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", ++ lock->context, current->pid, ++ dev->lock.hw_lock->lock, lock->flags); ++ ++ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) ++ if (lock->context < 0) ++ return -EINVAL; ++ ++ add_wait_queue(&dev->lock.lock_queue, &entry); ++ spin_lock_bh(&dev->lock.spinlock); ++ dev->lock.user_waiters++; ++ spin_unlock_bh(&dev->lock.spinlock); ++ for (;;) { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ if (!dev->lock.hw_lock) { ++ /* Device has been unregistered */ ++ ret = -EINTR; ++ break; ++ } ++ if (drm_lock_take(&dev->lock, lock->context)) { ++ dev->lock.file_priv = file_priv; ++ dev->lock.lock_time = jiffies; ++ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ++ break; /* Got lock */ ++ } ++ ++ /* Contention */ ++ schedule(); ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ } ++ spin_lock_bh(&dev->lock.spinlock); ++ dev->lock.user_waiters--; ++ spin_unlock_bh(&dev->lock.spinlock); ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&dev->lock.lock_queue, &entry); ++ ++ DRM_DEBUG("%d %s\n", lock->context, ++ ret ? "interrupted" : "has lock"); ++ if (ret) return ret; ++ ++ /* don't set the block all signals on the master process for now ++ * really probably not the correct answer but lets us debug xkb ++ * xserver for now */ ++ if (!file_priv->master) { ++ sigemptyset(&dev->sigmask); ++ sigaddset(&dev->sigmask, SIGSTOP); ++ sigaddset(&dev->sigmask, SIGTSTP); ++ sigaddset(&dev->sigmask, SIGTTIN); ++ sigaddset(&dev->sigmask, SIGTTOU); ++ dev->sigdata.context = lock->context; ++ dev->sigdata.lock = dev->lock.hw_lock; ++ block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); ++ } ++ ++ if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) ++ dev->driver->dma_ready(dev); ++ ++ if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) ++ { ++ if (dev->driver->dma_quiescent(dev)) { ++ DRM_DEBUG("%d waiting for DMA quiescent\n", ++ lock->context); ++ return -EBUSY; ++ } ++ } ++ ++ if (dev->driver->kernel_context_switch && ++ dev->last_context != lock->context) { ++ dev->driver->kernel_context_switch(dev, dev->last_context, ++ lock->context); ++ } ++ ++ return 0; ++} ++ ++/** ++ * Unlock ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_lock structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Transfer and free the lock. ++ */ ++int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_lock *lock = data; ++ unsigned long irqflags; ++ void (*tasklet_func)(struct drm_device *); ++ ++ if (lock->context == DRM_KERNEL_CONTEXT) { ++ DRM_ERROR("Process %d using kernel context %d\n", ++ current->pid, lock->context); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ tasklet_func = dev->locked_tasklet_func; ++ dev->locked_tasklet_func = NULL; ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ if (tasklet_func != NULL) ++ tasklet_func(dev); ++ ++ atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); ++ ++ /* kernel_context_switch isn't used by any of the x86 drm ++ * modules but is required by the Sparc driver. ++ */ ++ if (dev->driver->kernel_context_switch_unlock) ++ dev->driver->kernel_context_switch_unlock(dev); ++ else { ++ if (drm_lock_free(&dev->lock,lock->context)) { ++ /* FIXME: Should really bail out here. */ ++ } ++ } ++ ++ unblock_all_signals(); ++ return 0; ++} ++ ++/** ++ * Take the heavyweight lock. ++ * ++ * \param lock lock pointer. ++ * \param context locking context. ++ * \return one if the lock is held, or zero otherwise. ++ * ++ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. ++ */ ++int drm_lock_take(struct drm_lock_data *lock_data, ++ unsigned int context) ++{ ++ unsigned int old, new, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ do { ++ old = *lock; ++ if (old & _DRM_LOCK_HELD) ++ new = old | _DRM_LOCK_CONT; ++ else { ++ new = context | _DRM_LOCK_HELD | ++ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? ++ _DRM_LOCK_CONT : 0); ++ } ++ prev = cmpxchg(lock, old, new); ++ } while (prev != old); ++ spin_unlock_bh(&lock_data->spinlock); ++ ++ /* Warn on recursive locking of user contexts. */ ++ if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) { ++ if (context != DRM_KERNEL_CONTEXT) { ++ DRM_ERROR("%d holds heavyweight lock\n", ++ context); ++ } ++ return 0; ++ } ++ ++ return !_DRM_LOCK_IS_HELD(old); ++} ++ ++/** ++ * This takes a lock forcibly and hands it to context. Should ONLY be used ++ * inside *_unlock to give lock to kernel before calling *_dma_schedule. ++ * ++ * \param dev DRM device. ++ * \param lock lock pointer. ++ * \param context locking context. ++ * \return always one. ++ * ++ * Resets the lock file pointer. ++ * Marks the lock as held by the given context, via the \p cmpxchg instruction. ++ */ ++static int drm_lock_transfer(struct drm_lock_data *lock_data, ++ unsigned int context) ++{ ++ unsigned int old, new, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ lock_data->file_priv = NULL; ++ do { ++ old = *lock; ++ new = context | _DRM_LOCK_HELD; ++ prev = cmpxchg(lock, old, new); ++ } while (prev != old); ++ return 1; ++} ++ ++/** ++ * Free lock. ++ * ++ * \param dev DRM device. ++ * \param lock lock. ++ * \param context context. ++ * ++ * Resets the lock file pointer. ++ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task ++ * waiting on the lock queue. ++ */ ++int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) ++{ ++ unsigned int old, new, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ if (lock_data->kernel_waiters != 0) { ++ drm_lock_transfer(lock_data, 0); ++ lock_data->idle_has_lock = 1; ++ spin_unlock_bh(&lock_data->spinlock); ++ return 1; ++ } ++ spin_unlock_bh(&lock_data->spinlock); ++ ++ do { ++ old = *lock; ++ new = _DRM_LOCKING_CONTEXT(old); ++ prev = cmpxchg(lock, old, new); ++ } while (prev != old); ++ ++ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { ++ DRM_ERROR("%d freed heavyweight lock held by %d\n", ++ context, _DRM_LOCKING_CONTEXT(old)); ++ return 1; ++ } ++ wake_up_interruptible(&lock_data->lock_queue); ++ return 0; ++} ++ ++/** ++ * If we get here, it means that the process has called DRM_IOCTL_LOCK ++ * without calling DRM_IOCTL_UNLOCK. ++ * ++ * If the lock is not held, then let the signal proceed as usual. If the lock ++ * is held, then set the contended flag and keep the signal blocked. ++ * ++ * \param priv pointer to a drm_sigdata structure. ++ * \return one if the signal should be delivered normally, or zero if the ++ * signal should be blocked. ++ */ ++static int drm_notifier(void *priv) ++{ ++ struct drm_sigdata *s = (struct drm_sigdata *) priv; ++ unsigned int old, new, prev; ++ ++ /* Allow signal delivery if lock isn't held */ ++ if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) ++ || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) ++ return 1; ++ ++ /* Otherwise, set flag to force call to ++ drmUnlock */ ++ do { ++ old = s->lock->lock; ++ new = old | _DRM_LOCK_CONT; ++ prev = cmpxchg(&s->lock->lock, old, new); ++ } while (prev != old); ++ return 0; ++} ++ ++/** ++ * This function returns immediately and takes the hw lock ++ * with the kernel context if it is free, otherwise it gets the highest priority when and if ++ * it is eventually released. ++ * ++ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held ++ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause ++ * a deadlock, which is why the "idlelock" was invented). ++ * ++ * This should be sufficient to wait for GPU idle without ++ * having to worry about starvation. ++ */ ++ ++void drm_idlelock_take(struct drm_lock_data *lock_data) ++{ ++ int ret = 0; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ lock_data->kernel_waiters++; ++ if (!lock_data->idle_has_lock) { ++ ++ spin_unlock_bh(&lock_data->spinlock); ++ ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); ++ spin_lock_bh(&lock_data->spinlock); ++ ++ if (ret == 1) ++ lock_data->idle_has_lock = 1; ++ } ++ spin_unlock_bh(&lock_data->spinlock); ++} ++EXPORT_SYMBOL(drm_idlelock_take); ++ ++void drm_idlelock_release(struct drm_lock_data *lock_data) ++{ ++ unsigned int old, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ if (--lock_data->kernel_waiters == 0) { ++ if (lock_data->idle_has_lock) { ++ do { ++ old = *lock; ++ prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); ++ } while (prev != old); ++ wake_up_interruptible(&lock_data->lock_queue); ++ lock_data->idle_has_lock = 0; ++ } ++ } ++ spin_unlock_bh(&lock_data->spinlock); ++} ++EXPORT_SYMBOL(drm_idlelock_release); ++ ++int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ ++ return (file_priv->lock_count && dev->lock.hw_lock && ++ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && ++ dev->lock.file_priv == file_priv); ++} ++ ++EXPORT_SYMBOL(drm_i_have_hw_lock); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,374 @@ ++/** ++ * \file drm_memory.c ++ * Memory management wrappers for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include "drmP.h" ++ ++static struct { ++ spinlock_t lock; ++ uint64_t cur_used; ++ uint64_t emer_used; ++ uint64_t low_threshold; ++ uint64_t high_threshold; ++ uint64_t emer_threshold; ++} drm_memctl = { ++ .lock = SPIN_LOCK_UNLOCKED ++}; ++ ++static inline size_t drm_size_align(size_t size) ++{ ++ size_t tmpSize = 4; ++ if (size > PAGE_SIZE) ++ return PAGE_ALIGN(size); ++ ++ while (tmpSize < size) ++ tmpSize <<= 1; ++ ++ return (size_t) tmpSize; ++} ++ ++int drm_alloc_memctl(size_t size) ++{ ++ int ret = 0; ++ unsigned long a_size = drm_size_align(size); ++ unsigned long new_used; ++ ++ spin_lock(&drm_memctl.lock); ++ new_used = drm_memctl.cur_used + a_size; ++ if (likely(new_used < drm_memctl.high_threshold)) { ++ drm_memctl.cur_used = new_used; ++ goto out; ++ } ++ ++ /* ++ * Allow small allocations from root-only processes to ++ * succeed until the emergency threshold is reached. ++ */ ++ ++ new_used += drm_memctl.emer_used; ++ if (unlikely(!DRM_SUSER(DRM_CURPROC) || ++ (a_size > 16*PAGE_SIZE) || ++ (new_used > drm_memctl.emer_threshold))) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ drm_memctl.cur_used = drm_memctl.high_threshold; ++ drm_memctl.emer_used = new_used - drm_memctl.high_threshold; ++out: ++ spin_unlock(&drm_memctl.lock); ++ return ret; ++} ++EXPORT_SYMBOL(drm_alloc_memctl); ++ ++ ++void drm_free_memctl(size_t size) ++{ ++ unsigned long a_size = drm_size_align(size); ++ ++ spin_lock(&drm_memctl.lock); ++ if (likely(a_size >= drm_memctl.emer_used)) { ++ a_size -= drm_memctl.emer_used; ++ drm_memctl.emer_used = 0; ++ } else { ++ drm_memctl.emer_used -= a_size; ++ a_size = 0; ++ } ++ drm_memctl.cur_used -= a_size; ++ spin_unlock(&drm_memctl.lock); ++} ++EXPORT_SYMBOL(drm_free_memctl); ++ ++void drm_query_memctl(uint64_t *cur_used, ++ uint64_t *emer_used, ++ uint64_t *low_threshold, ++ uint64_t *high_threshold, ++ uint64_t *emer_threshold) ++{ ++ spin_lock(&drm_memctl.lock); ++ *cur_used = drm_memctl.cur_used; ++ *emer_used = drm_memctl.emer_used; ++ *low_threshold = drm_memctl.low_threshold; ++ *high_threshold = drm_memctl.high_threshold; ++ *emer_threshold = drm_memctl.emer_threshold; ++ spin_unlock(&drm_memctl.lock); ++} ++EXPORT_SYMBOL(drm_query_memctl); ++ ++void drm_init_memctl(size_t p_low_threshold, ++ size_t p_high_threshold, ++ size_t unit_size) ++{ ++ spin_lock(&drm_memctl.lock); ++ drm_memctl.emer_used = 0; ++ drm_memctl.cur_used = 0; ++ drm_memctl.low_threshold = p_low_threshold * unit_size; ++ drm_memctl.high_threshold = p_high_threshold * unit_size; ++ drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) + ++ drm_memctl.high_threshold; ++ spin_unlock(&drm_memctl.lock); ++} ++ ++ ++#ifndef DEBUG_MEMORY ++ ++/** No-op. */ ++void drm_mem_init(void) ++{ ++} ++ ++/** ++ * Called when "/proc/dri/%dev%/mem" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param len requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ * ++ * No-op. ++ */ ++int drm_mem_info(char *buf, char **start, off_t offset, ++ int len, int *eof, void *data) ++{ ++ return 0; ++} ++ ++/** Wrapper around kmalloc() */ ++void *drm_calloc(size_t nmemb, size_t size, int area) ++{ ++ return kcalloc(nmemb, size, GFP_KERNEL); ++} ++EXPORT_SYMBOL(drm_calloc); ++ ++/** Wrapper around kmalloc() and kfree() */ ++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) ++{ ++ void *pt; ++ ++ if (!(pt = kmalloc(size, GFP_KERNEL))) ++ return NULL; ++ if (oldpt && oldsize) { ++ memcpy(pt, oldpt, DRM_MIN(oldsize,size)); ++ kfree(oldpt); ++ } ++ return pt; ++} ++ ++/** ++ * Allocate pages. ++ * ++ * \param order size order. ++ * \param area memory area. (Not used.) ++ * \return page address on success, or zero on failure. ++ * ++ * Allocate and reserve free pages. ++ */ ++unsigned long drm_alloc_pages(int order, int area) ++{ ++ unsigned long address; ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ address = __get_free_pages(GFP_KERNEL, order); ++ if (!address) ++ return 0; ++ ++ /* Zero */ ++ memset((void *)address, 0, bytes); ++ ++ /* Reserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return address; ++} ++ ++/** ++ * Free pages. ++ * ++ * \param address address of the pages to free. ++ * \param order size order. ++ * \param area memory area. (Not used.) ++ * ++ * Unreserve and free pages allocated by alloc_pages(). ++ */ ++void drm_free_pages(unsigned long address, int order, int area) ++{ ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ if (!address) ++ return; ++ ++ /* Unreserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ ++ free_pages(address, order); ++} ++ ++#if __OS_HAS_AGP ++static void *agp_remap(unsigned long offset, unsigned long size, ++ struct drm_device * dev) ++{ ++ unsigned long *phys_addr_map, i, num_pages = ++ PAGE_ALIGN(size) / PAGE_SIZE; ++ struct drm_agp_mem *agpmem; ++ struct page **page_map; ++ void *addr; ++ ++ size = PAGE_ALIGN(size); ++ ++#ifdef __alpha__ ++ offset -= dev->hose->mem_space->start; ++#endif ++ ++ list_for_each_entry(agpmem, &dev->agp->memory, head) ++ if (agpmem->bound <= offset ++ && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= ++ (offset + size)) ++ break; ++ if (!agpmem) ++ return NULL; ++ ++ /* ++ * OK, we're mapping AGP space on a chipset/platform on which memory accesses by ++ * the CPU do not get remapped by the GART. We fix this by using the kernel's ++ * page-table instead (that's probably faster anyhow...). ++ */ ++ /* note: use vmalloc() because num_pages could be large... */ ++ page_map = vmalloc(num_pages * sizeof(struct page *)); ++ if (!page_map) ++ return NULL; ++ ++ phys_addr_map = ++ agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; ++ for (i = 0; i < num_pages; ++i) ++ page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); ++ addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); ++ vfree(page_map); ++ ++ return addr; ++} ++ ++/** Wrapper around agp_allocate_memory() */ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) ++{ ++ return drm_agp_allocate_memory(pages, type); ++} ++#else ++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) ++{ ++ return drm_agp_allocate_memory(dev->agp->bridge, pages, type); ++} ++#endif ++ ++/** Wrapper around agp_free_memory() */ ++int drm_free_agp(DRM_AGP_MEM * handle, int pages) ++{ ++ return drm_agp_free_memory(handle) ? 0 : -EINVAL; ++} ++EXPORT_SYMBOL(drm_free_agp); ++ ++/** Wrapper around agp_bind_memory() */ ++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ++{ ++ return drm_agp_bind_memory(handle, start); ++} ++ ++/** Wrapper around agp_unbind_memory() */ ++int drm_unbind_agp(DRM_AGP_MEM * handle) ++{ ++ return drm_agp_unbind_memory(handle); ++} ++EXPORT_SYMBOL(drm_unbind_agp); ++ ++#else /* __OS_HAS_AGP*/ ++static void *agp_remap(unsigned long offset, unsigned long size, ++ struct drm_device * dev) ++{ ++ return NULL; ++} ++#endif /* agp */ ++#else ++static void *agp_remap(unsigned long offset, unsigned long size, ++ struct drm_device * dev) ++{ ++ return NULL; ++} ++#endif /* debug_memory */ ++ ++void drm_core_ioremap(struct drm_map *map, struct drm_device *dev) ++{ ++ if (drm_core_has_AGP(dev) && ++ dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) ++ map->handle = agp_remap(map->offset, map->size, dev); ++ else ++ map->handle = ioremap(map->offset, map->size); ++} ++EXPORT_SYMBOL_GPL(drm_core_ioremap); ++ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) ++void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) ++{ ++ map->handle = ioremap_wc(map->offset, map->size); ++} ++EXPORT_SYMBOL_GPL(drm_core_ioremap_wc); ++#endif ++ ++void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) ++{ ++ if (!map->handle || !map->size) ++ return; ++ ++ if (drm_core_has_AGP(dev) && ++ dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) ++ vunmap(map->handle); ++ else ++ iounmap(map->handle); ++} ++EXPORT_SYMBOL_GPL(drm_core_ioremapfree); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory_debug.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory_debug.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory_debug.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory_debug.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,403 @@ ++/** ++ * \file drm_memory_debug.c ++ * Memory management wrappers for DRM. ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#ifdef DEBUG_MEMORY ++ ++typedef struct drm_mem_stats { ++ const char *name; ++ int succeed_count; ++ int free_count; ++ int fail_count; ++ unsigned long bytes_allocated; ++ unsigned long bytes_freed; ++} drm_mem_stats_t; ++ ++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; ++static unsigned long drm_ram_available = 0; /* In pages */ ++static unsigned long drm_ram_used = 0; ++static drm_mem_stats_t drm_mem_stats[] = { ++ [DRM_MEM_DMA] = {"dmabufs"}, ++ [DRM_MEM_SAREA] = {"sareas"}, ++ [DRM_MEM_DRIVER] = {"driver"}, ++ [DRM_MEM_MAGIC] = {"magic"}, ++ [DRM_MEM_IOCTLS] = {"ioctltab"}, ++ [DRM_MEM_MAPS] = {"maplist"}, ++ [DRM_MEM_VMAS] = {"vmalist"}, ++ [DRM_MEM_BUFS] = {"buflist"}, ++ [DRM_MEM_SEGS] = {"seglist"}, ++ [DRM_MEM_PAGES] = {"pagelist"}, ++ [DRM_MEM_FILES] = {"files"}, ++ [DRM_MEM_QUEUES] = {"queues"}, ++ [DRM_MEM_CMDS] = {"commands"}, ++ [DRM_MEM_MAPPINGS] = {"mappings"}, ++ [DRM_MEM_BUFLISTS] = {"buflists"}, ++ [DRM_MEM_AGPLISTS] = {"agplist"}, ++ [DRM_MEM_SGLISTS] = {"sglist"}, ++ [DRM_MEM_TOTALAGP] = {"totalagp"}, ++ [DRM_MEM_BOUNDAGP] = {"boundagp"}, ++ [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, ++ [DRM_MEM_CTXLIST] = {"ctxlist"}, ++ [DRM_MEM_STUB] = {"stub"}, ++ {NULL, 0,} /* Last entry must be null */ ++}; ++ ++void drm_mem_init(void) ++{ ++ drm_mem_stats_t *mem; ++ struct sysinfo si; ++ ++ for (mem = drm_mem_stats; mem->name; ++mem) { ++ mem->succeed_count = 0; ++ mem->free_count = 0; ++ mem->fail_count = 0; ++ mem->bytes_allocated = 0; ++ mem->bytes_freed = 0; ++ } ++ ++ si_meminfo(&si); ++ drm_ram_available = si.totalram; ++ drm_ram_used = 0; ++} ++ ++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ ++ ++static int drm__mem_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ drm_mem_stats_t *pt; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *eof = 0; ++ *start = &buf[offset]; ++ ++ DRM_PROC_PRINT(" total counts " ++ " | outstanding \n"); ++ DRM_PROC_PRINT("type alloc freed fail bytes freed" ++ " | allocs bytes\n\n"); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "system", 0, 0, 0, ++ drm_ram_available << (PAGE_SHIFT - 10)); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "locked", 0, 0, 0, drm_ram_used >> 10); ++ DRM_PROC_PRINT("\n"); ++ for (pt = drm_mem_stats; pt->name; pt++) { ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", ++ pt->name, ++ pt->succeed_count, ++ pt->free_count, ++ pt->fail_count, ++ pt->bytes_allocated, ++ pt->bytes_freed, ++ pt->succeed_count - pt->free_count, ++ (long)pt->bytes_allocated ++ - (long)pt->bytes_freed); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++int drm_mem_info(char *buf, char **start, off_t offset, ++ int len, int *eof, void *data) ++{ ++ int ret; ++ ++ spin_lock(&drm_mem_lock); ++ ret = drm__mem_info(buf, start, offset, len, eof, data); ++ spin_unlock(&drm_mem_lock); ++ return ret; ++} ++ ++void *drm_alloc(size_t size, int area) ++{ ++ void *pt; ++ ++ if (!size) { ++ DRM_MEM_ERROR(area, "Allocating 0 bytes\n"); ++ return NULL; ++ } ++ ++ if (!(pt = kmalloc(size, GFP_KERNEL))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += size; ++ spin_unlock(&drm_mem_lock); ++ return pt; ++} ++EXPORT_SYMBOL(drm_alloc); ++ ++void *drm_calloc(size_t nmemb, size_t size, int area) ++{ ++ void *addr; ++ ++ addr = drm_alloc(nmemb * size, area); ++ if (addr != NULL) ++ memset((void *)addr, 0, size * nmemb); ++ ++ return addr; ++} ++EXPORT_SYMBOL(drm_calloc); ++ ++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) ++{ ++ void *pt; ++ ++ if (!(pt = drm_alloc(size, area))) ++ return NULL; ++ if (oldpt && oldsize) { ++ memcpy(pt, oldpt, oldsize); ++ drm_free(oldpt, oldsize, area); ++ } ++ return pt; ++} ++EXPORT_SYMBOL(drm_realloc); ++ ++void drm_free(void *pt, size_t size, int area) ++{ ++ int alloc_count; ++ int free_count; ++ ++ if (!pt) ++ DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); ++ else ++ kfree(pt); ++ spin_lock(&drm_mem_lock); ++ drm_mem_stats[area].bytes_freed += size; ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++EXPORT_SYMBOL(drm_free); ++ ++unsigned long drm_alloc_pages(int order, int area) ++{ ++ unsigned long address; ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ spin_lock(&drm_mem_lock); ++ if ((drm_ram_used >> PAGE_SHIFT) ++ > (DRM_RAM_PERCENT * drm_ram_available) / 100) { ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_unlock(&drm_mem_lock); ++ ++ address = __get_free_pages(GFP_KERNEL, order); ++ if (!address) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += bytes; ++ drm_ram_used += bytes; ++ spin_unlock(&drm_mem_lock); ++ ++ /* Zero outside the lock */ ++ memset((void *)address, 0, bytes); ++ ++ /* Reserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return address; ++} ++ ++void drm_free_pages(unsigned long address, int order, int area) ++{ ++ unsigned long bytes = PAGE_SIZE << order; ++ int alloc_count; ++ int free_count; ++ unsigned long addr; ++ unsigned int sz; ++ ++ if (!address) { ++ DRM_MEM_ERROR(area, "Attempt to free address 0\n"); ++ } else { ++ /* Unreserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ free_pages(address, order); ++ } ++ ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_freed += bytes; ++ drm_ram_used -= bytes; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++ ++#if __OS_HAS_AGP ++ ++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) ++{ ++ DRM_AGP_MEM *handle; ++ ++ if (!pages) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); ++ return NULL; ++ } ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ if ((handle = drm_agp_allocate_memory(pages, type))) { ++#else ++ if ((handle = drm_agp_allocate_memory(dev->agp->bridge, pages, type))) { ++#endif ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return handle; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++} ++ ++int drm_free_agp(DRM_AGP_MEM * handle, int pages) ++{ ++ int alloc_count; ++ int free_count; ++ int retval = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Attempt to free NULL AGP handle\n"); ++ return retval; ++ } ++ ++ if (drm_agp_free_memory(handle)) { ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return 0; ++ } ++ return retval; ++} ++ ++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ++{ ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to bind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if (!(retcode = drm_agp_bind_memory(handle, start))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++} ++ ++int drm_unbind_agp(DRM_AGP_MEM * handle) ++{ ++ int alloc_count; ++ int free_count; ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to unbind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if ((retcode = drm_agp_unbind_memory(handle))) ++ return retcode; ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return retcode; ++} ++ ++#endif ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory_debug.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory_debug.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory_debug.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory_debug.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,379 @@ ++/** ++ * \file drm_memory_debug.h ++ * Memory management wrappers for DRM. ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++typedef struct drm_mem_stats { ++ const char *name; ++ int succeed_count; ++ int free_count; ++ int fail_count; ++ unsigned long bytes_allocated; ++ unsigned long bytes_freed; ++} drm_mem_stats_t; ++ ++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; ++static unsigned long drm_ram_available = 0; /* In pages */ ++static unsigned long drm_ram_used = 0; ++static drm_mem_stats_t drm_mem_stats[] = ++{ ++ [DRM_MEM_DMA] = {"dmabufs"}, ++ [DRM_MEM_SAREA] = {"sareas"}, ++ [DRM_MEM_DRIVER] = {"driver"}, ++ [DRM_MEM_MAGIC] = {"magic"}, ++ [DRM_MEM_IOCTLS] = {"ioctltab"}, ++ [DRM_MEM_MAPS] = {"maplist"}, ++ [DRM_MEM_VMAS] = {"vmalist"}, ++ [DRM_MEM_BUFS] = {"buflist"}, ++ [DRM_MEM_SEGS] = {"seglist"}, ++ [DRM_MEM_PAGES] = {"pagelist"}, ++ [DRM_MEM_FILES] = {"files"}, ++ [DRM_MEM_QUEUES] = {"queues"}, ++ [DRM_MEM_CMDS] = {"commands"}, ++ [DRM_MEM_MAPPINGS] = {"mappings"}, ++ [DRM_MEM_BUFLISTS] = {"buflists"}, ++ [DRM_MEM_AGPLISTS] = {"agplist"}, ++ [DRM_MEM_SGLISTS] = {"sglist"}, ++ [DRM_MEM_TOTALAGP] = {"totalagp"}, ++ [DRM_MEM_BOUNDAGP] = {"boundagp"}, ++ [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, ++ [DRM_MEM_CTXLIST] = {"ctxlist"}, ++ [DRM_MEM_STUB] = {"stub"}, ++ {NULL, 0,} /* Last entry must be null */ ++}; ++ ++void drm_mem_init (void) { ++ drm_mem_stats_t *mem; ++ struct sysinfo si; ++ ++ for (mem = drm_mem_stats; mem->name; ++mem) { ++ mem->succeed_count = 0; ++ mem->free_count = 0; ++ mem->fail_count = 0; ++ mem->bytes_allocated = 0; ++ mem->bytes_freed = 0; ++ } ++ ++ si_meminfo(&si); ++ drm_ram_available = si.totalram; ++ drm_ram_used = 0; ++} ++ ++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ ++ ++static int drm__mem_info (char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) { ++ drm_mem_stats_t *pt; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *eof = 0; ++ *start = &buf[offset]; ++ ++ DRM_PROC_PRINT(" total counts " ++ " | outstanding \n"); ++ DRM_PROC_PRINT("type alloc freed fail bytes freed" ++ " | allocs bytes\n\n"); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "system", 0, 0, 0, ++ drm_ram_available << (PAGE_SHIFT - 10)); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "locked", 0, 0, 0, drm_ram_used >> 10); ++ DRM_PROC_PRINT("\n"); ++ for (pt = drm_mem_stats; pt->name; pt++) { ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", ++ pt->name, ++ pt->succeed_count, ++ pt->free_count, ++ pt->fail_count, ++ pt->bytes_allocated, ++ pt->bytes_freed, ++ pt->succeed_count - pt->free_count, ++ (long)pt->bytes_allocated ++ - (long)pt->bytes_freed); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++int drm_mem_info (char *buf, char **start, off_t offset, ++ int len, int *eof, void *data) { ++ int ret; ++ ++ spin_lock(&drm_mem_lock); ++ ret = drm__mem_info (buf, start, offset, len, eof, data); ++ spin_unlock(&drm_mem_lock); ++ return ret; ++} ++ ++void *drm_alloc (size_t size, int area) { ++ void *pt; ++ ++ if (!size) { ++ DRM_MEM_ERROR(area, "Allocating 0 bytes\n"); ++ return NULL; ++ } ++ ++ if (!(pt = kmalloc(size, GFP_KERNEL))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += size; ++ spin_unlock(&drm_mem_lock); ++ return pt; ++} ++ ++void *drm_calloc (size_t nmemb, size_t size, int area) { ++ void *addr; ++ ++ addr = drm_alloc (nmemb * size, area); ++ if (addr != NULL) ++ memset((void *)addr, 0, size * nmemb); ++ ++ return addr; ++} ++ ++void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) { ++ void *pt; ++ ++ if (!(pt = drm_alloc (size, area))) ++ return NULL; ++ if (oldpt && oldsize) { ++ memcpy(pt, oldpt, oldsize); ++ drm_free (oldpt, oldsize, area); ++ } ++ return pt; ++} ++ ++void drm_free (void *pt, size_t size, int area) { ++ int alloc_count; ++ int free_count; ++ ++ if (!pt) ++ DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); ++ else ++ kfree(pt); ++ spin_lock(&drm_mem_lock); ++ drm_mem_stats[area].bytes_freed += size; ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++ ++unsigned long drm_alloc_pages (int order, int area) { ++ unsigned long address; ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ spin_lock(&drm_mem_lock); ++ if ((drm_ram_used >> PAGE_SHIFT) ++ > (DRM_RAM_PERCENT * drm_ram_available) / 100) { ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_unlock(&drm_mem_lock); ++ ++ address = __get_free_pages(GFP_KERNEL, order); ++ if (!address) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += bytes; ++ drm_ram_used += bytes; ++ spin_unlock(&drm_mem_lock); ++ ++ /* Zero outside the lock */ ++ memset((void *)address, 0, bytes); ++ ++ /* Reserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return address; ++} ++ ++void drm_free_pages (unsigned long address, int order, int area) { ++ unsigned long bytes = PAGE_SIZE << order; ++ int alloc_count; ++ int free_count; ++ unsigned long addr; ++ unsigned int sz; ++ ++ if (!address) { ++ DRM_MEM_ERROR(area, "Attempt to free address 0\n"); ++ } else { ++ /* Unreserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ free_pages(address, order); ++ } ++ ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_freed += bytes; ++ drm_ram_used -= bytes; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++ ++#if __OS_HAS_AGP ++ ++DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) { ++ DRM_AGP_MEM *handle; ++ ++ if (!pages) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); ++ return NULL; ++ } ++ ++ if ((handle = drm_agp_allocate_memory (pages, type))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return handle; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++} ++ ++int drm_free_agp (DRM_AGP_MEM * handle, int pages) { ++ int alloc_count; ++ int free_count; ++ int retval = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Attempt to free NULL AGP handle\n"); ++ return retval; ++ } ++ ++ if (drm_agp_free_memory (handle)) { ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return 0; ++ } ++ return retval; ++} ++ ++int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) { ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to bind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if (!(retcode = drm_agp_bind_memory (handle, start))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++} ++ ++int drm_unbind_agp (DRM_AGP_MEM * handle) { ++ int alloc_count; ++ int free_count; ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to unbind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if ((retcode = drm_agp_unbind_memory (handle))) ++ return retcode; ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return retcode; ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_memory.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_memory.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,61 @@ ++/** ++ * \file drm_memory.h ++ * Memory management wrappers for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include "drmP.h" ++ ++/** ++ * Cut down version of drm_memory_debug.h, which used to be called ++ * drm_memory.h. ++ */ ++ ++#if __OS_HAS_AGP ++ ++#include ++ ++#ifdef HAVE_PAGE_AGP ++#include ++#else ++# ifdef __powerpc__ ++# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) ++# else ++# define PAGE_AGP PAGE_KERNEL ++# endif ++#endif ++ ++#else /* __OS_HAS_AGP */ ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_mm.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_mm.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_mm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_mm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,298 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++ ++/* ++ * Generic simple memory manager implementation. Intended to be used as a base ++ * class implementation for more advanced memory managers. ++ * ++ * Note that the algorithm used is quite simple and there might be substantial ++ * performance gains if a smarter free list is implemented. Currently it is just an ++ * unordered stack of free regions. This could easily be improved if an RB-tree ++ * is used instead. At least if we expect heavy fragmentation. ++ * ++ * Aligned allocations can also see improvement. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include ++ ++unsigned long drm_mm_tail_space(struct drm_mm *mm) ++{ ++ struct list_head *tail_node; ++ struct drm_mm_node *entry; ++ ++ tail_node = mm->ml_entry.prev; ++ entry = list_entry(tail_node, struct drm_mm_node, ml_entry); ++ if (!entry->free) ++ return 0; ++ ++ return entry->size; ++} ++ ++int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) ++{ ++ struct list_head *tail_node; ++ struct drm_mm_node *entry; ++ ++ tail_node = mm->ml_entry.prev; ++ entry = list_entry(tail_node, struct drm_mm_node, ml_entry); ++ if (!entry->free) ++ return -ENOMEM; ++ ++ if (entry->size <= size) ++ return -ENOMEM; ++ ++ entry->size -= size; ++ return 0; ++} ++ ++ ++static int drm_mm_create_tail_node(struct drm_mm *mm, ++ unsigned long start, ++ unsigned long size) ++{ ++ struct drm_mm_node *child; ++ ++ child = (struct drm_mm_node *) ++ drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); ++ if (!child) ++ return -ENOMEM; ++ ++ child->free = 1; ++ child->size = size; ++ child->start = start; ++ child->mm = mm; ++ ++ list_add_tail(&child->ml_entry, &mm->ml_entry); ++ list_add_tail(&child->fl_entry, &mm->fl_entry); ++ ++ return 0; ++} ++ ++ ++int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) ++{ ++ struct list_head *tail_node; ++ struct drm_mm_node *entry; ++ ++ tail_node = mm->ml_entry.prev; ++ entry = list_entry(tail_node, struct drm_mm_node, ml_entry); ++ if (!entry->free) { ++ return drm_mm_create_tail_node(mm, entry->start + entry->size, size); ++ } ++ entry->size += size; ++ return 0; ++} ++ ++static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, ++ unsigned long size) ++{ ++ struct drm_mm_node *child; ++ ++ child = (struct drm_mm_node *) ++ drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); ++ if (!child) ++ return NULL; ++ ++ INIT_LIST_HEAD(&child->fl_entry); ++ ++ child->free = 0; ++ child->size = size; ++ child->start = parent->start; ++ child->mm = parent->mm; ++ ++ list_add_tail(&child->ml_entry, &parent->ml_entry); ++ INIT_LIST_HEAD(&child->fl_entry); ++ ++ parent->size -= size; ++ parent->start += size; ++ return child; ++} ++ ++struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, ++ unsigned long size, unsigned alignment) ++{ ++ ++ struct drm_mm_node *align_splitoff = NULL; ++ struct drm_mm_node *child; ++ unsigned tmp = 0; ++ ++ if (alignment) ++ tmp = parent->start % alignment; ++ ++ if (tmp) { ++ align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); ++ if (!align_splitoff) ++ return NULL; ++ } ++ ++ if (parent->size == size) { ++ list_del_init(&parent->fl_entry); ++ parent->free = 0; ++ return parent; ++ } else { ++ child = drm_mm_split_at_start(parent, size); ++ } ++ ++ if (align_splitoff) ++ drm_mm_put_block(align_splitoff); ++ ++ return child; ++} ++EXPORT_SYMBOL(drm_mm_get_block); ++ ++/* ++ * Put a block. Merge with the previous and / or next block if they are free. ++ * Otherwise add to the free stack. ++ */ ++ ++void drm_mm_put_block(struct drm_mm_node * cur) ++{ ++ ++ struct drm_mm *mm = cur->mm; ++ struct list_head *cur_head = &cur->ml_entry; ++ struct list_head *root_head = &mm->ml_entry; ++ struct drm_mm_node *prev_node = NULL; ++ struct drm_mm_node *next_node; ++ ++ int merged = 0; ++ ++ if (cur_head->prev != root_head) { ++ prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); ++ if (prev_node->free) { ++ prev_node->size += cur->size; ++ merged = 1; ++ } ++ } ++ if (cur_head->next != root_head) { ++ next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); ++ if (next_node->free) { ++ if (merged) { ++ prev_node->size += next_node->size; ++ list_del(&next_node->ml_entry); ++ list_del(&next_node->fl_entry); ++ drm_ctl_free(next_node, sizeof(*next_node), ++ DRM_MEM_MM); ++ } else { ++ next_node->size += cur->size; ++ next_node->start = cur->start; ++ merged = 1; ++ } ++ } ++ } ++ if (!merged) { ++ cur->free = 1; ++ list_add(&cur->fl_entry, &mm->fl_entry); ++ } else { ++ list_del(&cur->ml_entry); ++ drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM); ++ } ++} ++EXPORT_SYMBOL(drm_mm_put_block); ++ ++struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, ++ unsigned long size, ++ unsigned alignment, int best_match) ++{ ++ struct list_head *list; ++ const struct list_head *free_stack = &mm->fl_entry; ++ struct drm_mm_node *entry; ++ struct drm_mm_node *best; ++ unsigned long best_size; ++ unsigned wasted; ++ ++ best = NULL; ++ best_size = ~0UL; ++ ++ list_for_each(list, free_stack) { ++ entry = list_entry(list, struct drm_mm_node, fl_entry); ++ wasted = 0; ++ ++ if (entry->size < size) ++ continue; ++ ++ if (alignment) { ++ register unsigned tmp = entry->start % alignment; ++ if (tmp) ++ wasted += alignment - tmp; ++ } ++ ++ ++ if (entry->size >= size + wasted) { ++ if (!best_match) ++ return entry; ++ if (size < best_size) { ++ best = entry; ++ best_size = entry->size; ++ } ++ } ++ } ++ ++ return best; ++} ++EXPORT_SYMBOL(drm_mm_search_free); ++ ++int drm_mm_clean(struct drm_mm * mm) ++{ ++ struct list_head *head = &mm->ml_entry; ++ ++ return (head->next->next == head); ++} ++ ++int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) ++{ ++ INIT_LIST_HEAD(&mm->ml_entry); ++ INIT_LIST_HEAD(&mm->fl_entry); ++ ++ return drm_mm_create_tail_node(mm, start, size); ++} ++ ++EXPORT_SYMBOL(drm_mm_init); ++ ++void drm_mm_takedown(struct drm_mm * mm) ++{ ++ struct list_head *bnode = mm->fl_entry.next; ++ struct drm_mm_node *entry; ++ ++ entry = list_entry(bnode, struct drm_mm_node, fl_entry); ++ ++ if (entry->ml_entry.next != &mm->ml_entry || ++ entry->fl_entry.next != &mm->fl_entry) { ++ DRM_ERROR("Memory manager not clean. Delaying takedown\n"); ++ return; ++ } ++ ++ list_del(&entry->fl_entry); ++ list_del(&entry->ml_entry); ++ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM); ++} ++ ++EXPORT_SYMBOL(drm_mm_takedown); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_object.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_object.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_object.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_object.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,294 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, ++ int shareable) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ /* The refcount will be bumped to 1 when we add the ref object below. */ ++ atomic_set(&item->refcount, 0); ++ item->shareable = shareable; ++ item->owner = priv; ++ ++ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash, ++ (unsigned long)item, 31, 0, 0); ++ if (ret) ++ return ret; ++ ++ ret = drm_add_ref_object(priv, item, _DRM_REF_USE); ++ if (ret) ++ ret = drm_ht_remove_item(&dev->object_hash, &item->hash); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_add_user_object); ++ ++struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_hash_item *hash; ++ int ret; ++ struct drm_user_object *item; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ ret = drm_ht_find_item(&dev->object_hash, key, &hash); ++ if (ret) ++ return NULL; ++ ++ item = drm_hash_entry(hash, struct drm_user_object, hash); ++ ++ if (priv != item->owner) { ++ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; ++ ret = drm_ht_find_item(ht, (unsigned long)item, &hash); ++ if (ret) { ++ DRM_ERROR("Object not registered for usage\n"); ++ return NULL; ++ } ++ } ++ return item; ++} ++EXPORT_SYMBOL(drm_lookup_user_object); ++ ++static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ if (atomic_dec_and_test(&item->refcount)) { ++ ret = drm_ht_remove_item(&dev->object_hash, &item->hash); ++ BUG_ON(ret); ++ item->remove(priv, item); ++ } ++} ++ ++static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro, ++ enum drm_ref_type action) ++{ ++ int ret = 0; ++ ++ switch (action) { ++ case _DRM_REF_USE: ++ atomic_inc(&ro->refcount); ++ break; ++ default: ++ if (!ro->ref_struct_locked) { ++ break; ++ } else { ++ ro->ref_struct_locked(priv, ro, action); ++ } ++ } ++ return ret; ++} ++ ++int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action) ++{ ++ int ret = 0; ++ struct drm_ref_object *item; ++ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; ++ ++ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); ++ if (!referenced_object->shareable && priv != referenced_object->owner) { ++ DRM_ERROR("Not allowed to reference this object\n"); ++ return -EINVAL; ++ } ++ ++ /* ++ * If this is not a usage reference, Check that usage has been registered ++ * first. Otherwise strange things may happen on destruction. ++ */ ++ ++ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) { ++ item = ++ drm_lookup_ref_object(priv, referenced_object, ++ _DRM_REF_USE); ++ if (!item) { ++ DRM_ERROR ++ ("Object not registered for usage by this client\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (NULL != ++ (item = ++ drm_lookup_ref_object(priv, referenced_object, ref_action))) { ++ atomic_inc(&item->refcount); ++ return drm_object_ref_action(priv, referenced_object, ++ ref_action); ++ } ++ ++ item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS); ++ if (item == NULL) { ++ DRM_ERROR("Could not allocate reference object\n"); ++ return -ENOMEM; ++ } ++ ++ atomic_set(&item->refcount, 1); ++ item->hash.key = (unsigned long)referenced_object; ++ ret = drm_ht_insert_item(ht, &item->hash); ++ item->unref_action = ref_action; ++ ++ if (ret) ++ goto out; ++ ++ list_add(&item->list, &priv->refd_objects); ++ ret = drm_object_ref_action(priv, referenced_object, ref_action); ++out: ++ return ret; ++} ++ ++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, ++ struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action) ++{ ++ struct drm_hash_item *hash; ++ int ret; ++ ++ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); ++ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], ++ (unsigned long)referenced_object, &hash); ++ if (ret) ++ return NULL; ++ ++ return drm_hash_entry(hash, struct drm_ref_object, hash); ++} ++EXPORT_SYMBOL(drm_lookup_ref_object); ++ ++static void drm_remove_other_references(struct drm_file *priv, ++ struct drm_user_object *ro) ++{ ++ int i; ++ struct drm_open_hash *ht; ++ struct drm_hash_item *hash; ++ struct drm_ref_object *item; ++ ++ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { ++ ht = &priv->refd_object_hash[i]; ++ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { ++ item = drm_hash_entry(hash, struct drm_ref_object, hash); ++ drm_remove_ref_object(priv, item); ++ } ++ } ++} ++ ++void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item) ++{ ++ int ret; ++ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; ++ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; ++ enum drm_ref_type unref_action; ++ ++ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); ++ unref_action = item->unref_action; ++ if (atomic_dec_and_test(&item->refcount)) { ++ ret = drm_ht_remove_item(ht, &item->hash); ++ BUG_ON(ret); ++ list_del_init(&item->list); ++ if (unref_action == _DRM_REF_USE) ++ drm_remove_other_references(priv, user_object); ++ drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS); ++ } ++ ++ switch (unref_action) { ++ case _DRM_REF_USE: ++ drm_deref_user_object(priv, user_object); ++ break; ++ default: ++ BUG_ON(!user_object->unref); ++ user_object->unref(priv, user_object, unref_action); ++ break; ++ } ++ ++} ++EXPORT_SYMBOL(drm_remove_ref_object); ++ ++int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type, struct drm_user_object **object) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_user_object *uo; ++ struct drm_hash_item *hash; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_ht_find_item(&dev->object_hash, user_token, &hash); ++ if (ret) { ++ DRM_ERROR("Could not find user object to reference.\n"); ++ goto out_err; ++ } ++ uo = drm_hash_entry(hash, struct drm_user_object, hash); ++ if (uo->type != type) { ++ ret = -EINVAL; ++ goto out_err; ++ } ++ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE); ++ if (ret) ++ goto out_err; ++ mutex_unlock(&dev->struct_mutex); ++ *object = uo; ++ return 0; ++out_err: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_user_object *uo; ++ struct drm_ref_object *ro; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ uo = drm_lookup_user_object(priv, user_token); ++ if (!uo || (uo->type != type)) { ++ ret = -EINVAL; ++ goto out_err; ++ } ++ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE); ++ if (!ro) { ++ ret = -EINVAL; ++ goto out_err; ++ } ++ drm_remove_ref_object(priv, ro); ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++out_err: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_objects.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_objects.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_objects.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_objects.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,832 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#ifndef _DRM_OBJECTS_H ++#define _DRM_OBJECTS_H ++ ++struct drm_device; ++struct drm_bo_mem_reg; ++ ++/*************************************************** ++ * User space objects. (drm_object.c) ++ */ ++ ++#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) ++ ++enum drm_object_type { ++ drm_fence_type, ++ drm_buffer_type, ++ drm_lock_type, ++ /* ++ * Add other user space object types here. ++ */ ++ drm_driver_type0 = 256, ++ drm_driver_type1, ++ drm_driver_type2, ++ drm_driver_type3, ++ drm_driver_type4 ++}; ++ ++/* ++ * A user object is a structure that helps the drm give out user handles ++ * to kernel internal objects and to keep track of these objects so that ++ * they can be destroyed, for example when the user space process exits. ++ * Designed to be accessible using a user space 32-bit handle. ++ */ ++ ++struct drm_user_object { ++ struct drm_hash_item hash; ++ struct list_head list; ++ enum drm_object_type type; ++ atomic_t refcount; ++ int shareable; ++ struct drm_file *owner; ++ void (*ref_struct_locked) (struct drm_file *priv, ++ struct drm_user_object *obj, ++ enum drm_ref_type ref_action); ++ void (*unref) (struct drm_file *priv, struct drm_user_object *obj, ++ enum drm_ref_type unref_action); ++ void (*remove) (struct drm_file *priv, struct drm_user_object *obj); ++}; ++ ++/* ++ * A ref object is a structure which is used to ++ * keep track of references to user objects and to keep track of these ++ * references so that they can be destroyed for example when the user space ++ * process exits. Designed to be accessible using a pointer to the _user_ object. ++ */ ++ ++struct drm_ref_object { ++ struct drm_hash_item hash; ++ struct list_head list; ++ atomic_t refcount; ++ enum drm_ref_type unref_action; ++}; ++ ++/** ++ * Must be called with the struct_mutex held. ++ */ ++ ++extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, ++ int shareable); ++/** ++ * Must be called with the struct_mutex held. ++ */ ++ ++extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, ++ uint32_t key); ++ ++/* ++ * Must be called with the struct_mutex held. May temporarily release it. ++ */ ++ ++extern int drm_add_ref_object(struct drm_file *priv, ++ struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action); ++ ++/* ++ * Must be called with the struct_mutex held. ++ */ ++ ++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, ++ struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action); ++/* ++ * Must be called with the struct_mutex held. ++ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not ++ * release the struct_mutex before calling drm_remove_ref_object. ++ * This function may temporarily release the struct_mutex. ++ */ ++ ++extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item); ++extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type, ++ struct drm_user_object **object); ++extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type); ++ ++/*************************************************** ++ * Fence objects. (drm_fence.c) ++ */ ++ ++struct drm_fence_object { ++ struct drm_user_object base; ++ struct drm_device *dev; ++ atomic_t usage; ++ ++ /* ++ * The below three fields are protected by the fence manager spinlock. ++ */ ++ ++ struct list_head ring; ++ int fence_class; ++ uint32_t native_types; ++ uint32_t type; ++ uint32_t signaled_types; ++ uint32_t sequence; ++ uint32_t waiting_types; ++ uint32_t error; ++}; ++ ++#define _DRM_FENCE_CLASSES 8 ++#define _DRM_FENCE_TYPE_EXE 0x00 ++ ++struct drm_fence_class_manager { ++ struct list_head ring; ++ uint32_t pending_flush; ++ uint32_t waiting_types; ++ wait_queue_head_t fence_queue; ++ uint32_t highest_waiting_sequence; ++ uint32_t latest_queued_sequence; ++}; ++ ++struct drm_fence_manager { ++ int initialized; ++ rwlock_t lock; ++ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES]; ++ uint32_t num_classes; ++ atomic_t count; ++}; ++ ++struct drm_fence_driver { ++ unsigned long *waiting_jiffies; ++ uint32_t num_classes; ++ uint32_t wrap_diff; ++ uint32_t flush_diff; ++ uint32_t sequence_mask; ++ ++ /* ++ * Driver implemented functions: ++ * has_irq() : 1 if the hardware can update the indicated type_flags using an ++ * irq handler. 0 if polling is required. ++ * ++ * emit() : Emit a sequence number to the command stream. ++ * Return the sequence number. ++ * ++ * flush() : Make sure the flags indicated in fc->pending_flush will eventually ++ * signal for fc->highest_received_sequence and all preceding sequences. ++ * Acknowledge by clearing the flags fc->pending_flush. ++ * ++ * poll() : Call drm_fence_handler with any new information. ++ * ++ * needed_flush() : Given the current state of the fence->type flags and previusly ++ * executed or queued flushes, return the type_flags that need flushing. ++ * ++ * wait(): Wait for the "mask" flags to signal on a given fence, performing ++ * whatever's necessary to make this happen. ++ */ ++ ++ int (*has_irq) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t flags); ++ int (*emit) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t flags, uint32_t *breadcrumb, ++ uint32_t *native_type); ++ void (*flush) (struct drm_device *dev, uint32_t fence_class); ++ void (*poll) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t types); ++ uint32_t (*needed_flush) (struct drm_fence_object *fence); ++ int (*wait) (struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask); ++}; ++ ++extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask, ++ unsigned long end_jiffies); ++extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence, uint32_t type, ++ uint32_t error); ++extern void drm_fence_manager_init(struct drm_device *dev); ++extern void drm_fence_manager_takedown(struct drm_device *dev); ++extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence); ++extern int drm_fence_object_flush(struct drm_fence_object *fence, ++ uint32_t type); ++extern int drm_fence_object_signaled(struct drm_fence_object *fence, ++ uint32_t type); ++extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence); ++extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence); ++extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); ++extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, ++ struct drm_fence_object *src); ++extern int drm_fence_object_wait(struct drm_fence_object *fence, ++ int lazy, int ignore_signals, uint32_t mask); ++extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, ++ uint32_t fence_flags, uint32_t fence_class, ++ struct drm_fence_object **c_fence); ++extern int drm_fence_object_emit(struct drm_fence_object *fence, ++ uint32_t fence_flags, uint32_t class, ++ uint32_t type); ++extern void drm_fence_fill_arg(struct drm_fence_object *fence, ++ struct drm_fence_arg *arg); ++ ++extern int drm_fence_add_user_object(struct drm_file *priv, ++ struct drm_fence_object *fence, ++ int shareable); ++ ++extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++/************************************************** ++ *TTMs ++ */ ++ ++/* ++ * The ttm backend GTT interface. (In our case AGP). ++ * Any similar type of device (PCIE?) ++ * needs only to implement these functions to be usable with the TTM interface. ++ * The AGP backend implementation lives in drm_agpsupport.c ++ * basically maps these calls to available functions in agpgart. ++ * Each drm device driver gets an ++ * additional function pointer that creates these types, ++ * so that the device can choose the correct aperture. ++ * (Multiple AGP apertures, etc.) ++ * Most device drivers will let this point to the standard AGP implementation. ++ */ ++ ++#define DRM_BE_FLAG_NEEDS_FREE 0x00000001 ++#define DRM_BE_FLAG_BOUND_CACHED 0x00000002 ++ ++struct drm_ttm_backend; ++struct drm_ttm_backend_func { ++ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend); ++ int (*populate) (struct drm_ttm_backend *backend, ++ unsigned long num_pages, struct page **pages, ++ struct page *dummy_read_page); ++ void (*clear) (struct drm_ttm_backend *backend); ++ int (*bind) (struct drm_ttm_backend *backend, ++ struct drm_bo_mem_reg *bo_mem); ++ int (*unbind) (struct drm_ttm_backend *backend); ++ void (*destroy) (struct drm_ttm_backend *backend); ++}; ++ ++/** ++ * This structure associates a set of flags and methods with a drm_ttm ++ * object, and will also be subclassed by the particular backend. ++ * ++ * \sa #drm_agp_ttm_backend ++ */ ++struct drm_ttm_backend { ++ struct drm_device *dev; ++ uint32_t flags; ++ struct drm_ttm_backend_func *func; ++}; ++ ++struct drm_ttm { ++ struct page *dummy_read_page; ++ struct page **pages; ++ long first_himem_page; ++ long last_lomem_page; ++ uint32_t page_flags; ++ unsigned long num_pages; ++ atomic_t vma_count; ++ struct drm_device *dev; ++ int destroy; ++ uint32_t mapping_offset; ++ struct drm_ttm_backend *be; ++ unsigned long highest_lomem_entry; ++ unsigned long lowest_himem_entry; ++ enum { ++ ttm_bound, ++ ttm_evicted, ++ ttm_unbound, ++ ttm_unpopulated, ++ } state; ++ ++}; ++ ++extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, ++ uint32_t page_flags, ++ struct page *dummy_read_page); ++extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem); ++extern void drm_ttm_unbind(struct drm_ttm *ttm); ++extern void drm_ttm_evict(struct drm_ttm *ttm); ++extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); ++extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index); ++extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages); ++extern int drm_ttm_populate(struct drm_ttm *ttm); ++extern int drm_ttm_set_user(struct drm_ttm *ttm, ++ struct task_struct *tsk, ++ unsigned long start, ++ unsigned long num_pages); ++ ++/* ++ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do ++ * this which calls this function iff there are no vmas referencing it anymore. ++ * Otherwise it is called when the last vma exits. ++ */ ++ ++extern int drm_ttm_destroy(struct drm_ttm *ttm); ++ ++#define DRM_FLAG_MASKED(_old, _new, _mask) {\ ++(_old) ^= (((_old) ^ (_new)) & (_mask)); \ ++} ++ ++#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1) ++#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS) ++ ++/* ++ * Page flags. ++ */ ++ ++/* ++ * This ttm should not be cached by the CPU ++ */ ++#define DRM_TTM_PAGE_UNCACHED (1 << 0) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_USED (1 << 1) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_BOUND (1 << 2) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_PRESENT (1 << 3) ++/* ++ * The array of page pointers was allocated with vmalloc ++ * instead of drm_calloc. ++ */ ++#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4) ++/* ++ * This ttm is mapped from user space ++ */ ++#define DRM_TTM_PAGE_USER (1 << 5) ++/* ++ * This ttm will be written to by the GPU ++ */ ++#define DRM_TTM_PAGE_WRITE (1 << 6) ++/* ++ * This ttm was mapped to the GPU, and so the contents may have ++ * been modified ++ */ ++#define DRM_TTM_PAGE_USER_DIRTY (1 << 7) ++/* ++ * This flag is not used at this time; I don't know what the ++ * intent was. ++ */ ++#define DRM_TTM_PAGE_USER_DMA (1 << 8) ++ ++/*************************************************** ++ * Buffer objects. (drm_bo.c, drm_bo_move.c) ++ */ ++ ++struct drm_bo_mem_reg { ++ struct drm_mm_node *mm_node; ++ unsigned long size; ++ unsigned long num_pages; ++ uint32_t page_alignment; ++ uint32_t mem_type; ++ /* ++ * Current buffer status flags, indicating ++ * where the buffer is located and which ++ * access modes are in effect ++ */ ++ uint64_t flags; ++ /** ++ * These are the flags proposed for ++ * a validate operation. If the ++ * validate succeeds, they'll get moved ++ * into the flags field ++ */ ++ uint64_t proposed_flags; ++ ++ uint32_t desired_tile_stride; ++ uint32_t hw_tile_stride; ++}; ++ ++enum drm_bo_type { ++ /* ++ * drm_bo_type_device are 'normal' drm allocations, ++ * pages are allocated from within the kernel automatically ++ * and the objects can be mmap'd from the drm device. Each ++ * drm_bo_type_device object has a unique name which can be ++ * used by other processes to share access to the underlying ++ * buffer. ++ */ ++ drm_bo_type_device, ++ /* ++ * drm_bo_type_user are buffers of pages that already exist ++ * in the process address space. They are more limited than ++ * drm_bo_type_device buffers in that they must always ++ * remain cached (as we assume the user pages are mapped cached), ++ * and they are not sharable to other processes through DRM ++ * (although, regular shared memory should still work fine). ++ */ ++ drm_bo_type_user, ++ /* ++ * drm_bo_type_kernel are buffers that exist solely for use ++ * within the kernel. The pages cannot be mapped into the ++ * process. One obvious use would be for the ring ++ * buffer where user access would not (ideally) be required. ++ */ ++ drm_bo_type_kernel, ++}; ++ ++struct drm_buffer_object { ++ struct drm_device *dev; ++ struct drm_user_object base; ++ ++ /* ++ * If there is a possibility that the usage variable is zero, ++ * then dev->struct_mutext should be locked before incrementing it. ++ */ ++ ++ atomic_t usage; ++ unsigned long buffer_start; ++ enum drm_bo_type type; ++ unsigned long offset; ++ atomic_t mapped; ++ struct drm_bo_mem_reg mem; ++ ++ struct list_head lru; ++ struct list_head ddestroy; ++ ++ uint32_t fence_type; ++ uint32_t fence_class; ++ uint32_t new_fence_type; ++ uint32_t new_fence_class; ++ struct drm_fence_object *fence; ++ uint32_t priv_flags; ++ wait_queue_head_t event_queue; ++ struct mutex mutex; ++ unsigned long num_pages; ++ ++ /* For pinned buffers */ ++ struct drm_mm_node *pinned_node; ++ uint32_t pinned_mem_type; ++ struct list_head pinned_lru; ++ ++ /* For vm */ ++ struct drm_ttm *ttm; ++ struct drm_map_list map_list; ++ uint32_t memory_type; ++ unsigned long bus_offset; ++ uint32_t vm_flags; ++ void *iomap; ++ ++#ifdef DRM_ODD_MM_COMPAT ++ /* dev->struct_mutex only protected. */ ++ struct list_head vma_list; ++ struct list_head p_mm_list; ++#endif ++ ++}; ++ ++#define _DRM_BO_FLAG_UNFENCED 0x00000001 ++#define _DRM_BO_FLAG_EVICTED 0x00000002 ++ ++/* ++ * This flag indicates that a flag called with bo->mutex held has ++ * temporarily released the buffer object mutex, (usually to wait for something). ++ * and thus any post-lock validation needs to be rerun. ++ */ ++ ++#define _DRM_BO_FLAG_UNLOCKED 0x00000004 ++ ++struct drm_mem_type_manager { ++ int has_type; ++ int use_type; ++ int kern_init_type; ++ struct drm_mm manager; ++ struct list_head lru; ++ struct list_head pinned; ++ uint32_t flags; ++ uint32_t drm_bus_maptype; ++ unsigned long gpu_offset; ++ unsigned long io_offset; ++ unsigned long io_size; ++ void *io_addr; ++ uint64_t size; /* size of managed area for reporting to userspace */ ++}; ++ ++struct drm_bo_lock { ++ struct drm_user_object base; ++ wait_queue_head_t queue; ++ atomic_t write_lock_pending; ++ atomic_t readers; ++}; ++ ++#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ ++#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ ++#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ ++#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap ++ before kernel access. */ ++#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ ++#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ ++ ++struct drm_buffer_manager { ++ struct drm_bo_lock bm_lock; ++ struct mutex evict_mutex; ++ int nice_mode; ++ int initialized; ++ struct drm_file *last_to_validate; ++ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; ++ struct list_head unfenced; ++ struct list_head ddestroy; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct work_struct wq; ++#else ++ struct delayed_work wq; ++#endif ++ uint32_t fence_type; ++ unsigned long cur_pages; ++ atomic_t count; ++ struct page *dummy_read_page; ++}; ++ ++struct drm_bo_driver { ++ const uint32_t *mem_type_prio; ++ const uint32_t *mem_busy_prio; ++ uint32_t num_mem_type_prio; ++ uint32_t num_mem_busy_prio; ++ struct drm_ttm_backend *(*create_ttm_backend_entry) ++ (struct drm_device *dev); ++ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, ++ uint32_t *type); ++ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags); ++ int (*init_mem_type) (struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man); ++ /* ++ * evict_flags: ++ * ++ * @bo: the buffer object to be evicted ++ * ++ * Return the bo flags for a buffer which is not mapped to the hardware. ++ * These will be placed in proposed_flags so that when the move is ++ * finished, they'll end up in bo->mem.flags ++ */ ++ uint64_t(*evict_flags) (struct drm_buffer_object *bo); ++ /* ++ * move: ++ * ++ * @bo: the buffer to move ++ * ++ * @evict: whether this motion is evicting the buffer from ++ * the graphics address space ++ * ++ * @no_wait: whether this should give up and return -EBUSY ++ * if this move would require sleeping ++ * ++ * @new_mem: the new memory region receiving the buffer ++ * ++ * Move a buffer between two memory regions. ++ */ ++ int (*move) (struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem); ++ /* ++ * ttm_cache_flush ++ */ ++ void (*ttm_cache_flush)(struct drm_ttm *ttm); ++ ++ /* ++ * command_stream_barrier ++ * ++ * @dev: The drm device. ++ * ++ * @bo: The buffer object to validate. ++ * ++ * @new_fence_class: The new fence class for the buffer object. ++ * ++ * @new_fence_type: The new fence type for the buffer object. ++ * ++ * @no_wait: whether this should give up and return -EBUSY ++ * if this operation would require sleeping ++ * ++ * Insert a command stream barrier that makes sure that the ++ * buffer is idle once the commands associated with the ++ * current validation are starting to execute. If an error ++ * condition is returned, or the function pointer is NULL, ++ * the drm core will force buffer idle ++ * during validation. ++ */ ++ ++ int (*command_stream_barrier) (struct drm_buffer_object *bo, ++ uint32_t new_fence_class, ++ uint32_t new_fence_type, ++ int no_wait); ++}; ++ ++/* ++ * buffer objects (drm_bo.c) ++ */ ++ ++extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_driver_finish(struct drm_device *dev); ++extern int drm_bo_driver_init(struct drm_device *dev); ++extern int drm_bo_pci_offset(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ unsigned long *bus_base, ++ unsigned long *bus_offset, ++ unsigned long *bus_size); ++extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem); ++ ++extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo); ++extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); ++extern void drm_putback_buffer_objects(struct drm_device *dev); ++extern int drm_fence_buffer_objects(struct drm_device *dev, ++ struct list_head *list, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence, ++ struct drm_fence_object **used_fence); ++extern void drm_bo_add_to_lru(struct drm_buffer_object *bo); ++extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, ++ enum drm_bo_type type, uint64_t flags, ++ uint32_t hint, uint32_t page_alignment, ++ unsigned long buffer_start, ++ struct drm_buffer_object **bo); ++extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, ++ int no_wait, int check_unfenced); ++extern int drm_bo_mem_space(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, int no_wait); ++extern int drm_bo_move_buffer(struct drm_buffer_object *bo, ++ uint64_t new_mem_flags, ++ int no_wait, int move_unfenced); ++extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean); ++extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, ++ unsigned long p_offset, unsigned long p_size, ++ int kern_init); ++extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep, ++ struct drm_buffer_object **bo_rep); ++extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, ++ uint32_t handle, ++ int check_owner); ++extern int drm_bo_do_validate(struct drm_buffer_object *bo, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep); ++extern int drm_bo_evict_cached(struct drm_buffer_object *bo); ++/* ++ * Buffer object memory move- and map helpers. ++ * drm_bo_move.c ++ */ ++ ++extern int drm_bo_move_ttm(struct drm_buffer_object *bo, ++ int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_move_memcpy(struct drm_buffer_object *bo, ++ int evict, ++ int no_wait, struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, ++ int evict, int no_wait, ++ uint32_t fence_class, uint32_t fence_type, ++ uint32_t fence_flags, ++ struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); ++extern unsigned long drm_bo_offset_end(unsigned long offset, ++ unsigned long end); ++ ++struct drm_bo_kmap_obj { ++ void *virtual; ++ struct page *page; ++ enum { ++ bo_map_iomap, ++ bo_map_vmap, ++ bo_map_kmap, ++ bo_map_premapped, ++ } bo_kmap_type; ++}; ++ ++static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) ++{ ++ *is_iomem = (map->bo_kmap_type == bo_map_iomap || ++ map->bo_kmap_type == bo_map_premapped); ++ return map->virtual; ++} ++extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); ++extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, ++ unsigned long num_pages, struct drm_bo_kmap_obj *map); ++extern int drm_bo_pfn_prot(struct drm_buffer_object *bo, ++ unsigned long dst_offset, ++ unsigned long *pfn, ++ pgprot_t *prot); ++extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, ++ struct drm_bo_info_rep *rep); ++ ++ ++/* ++ * drm_regman.c ++ */ ++ ++struct drm_reg { ++ struct list_head head; ++ struct drm_fence_object *fence; ++ uint32_t fence_type; ++ uint32_t new_fence_type; ++}; ++ ++struct drm_reg_manager { ++ struct list_head free; ++ struct list_head lru; ++ struct list_head unfenced; ++ ++ int (*reg_reusable)(const struct drm_reg *reg, const void *data); ++ void (*reg_destroy)(struct drm_reg *reg); ++}; ++ ++extern int drm_regs_alloc(struct drm_reg_manager *manager, ++ const void *data, ++ uint32_t fence_class, ++ uint32_t fence_type, ++ int interruptible, ++ int no_wait, ++ struct drm_reg **reg); ++ ++extern void drm_regs_fence(struct drm_reg_manager *regs, ++ struct drm_fence_object *fence); ++ ++extern void drm_regs_free(struct drm_reg_manager *manager); ++extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg); ++extern void drm_regs_init(struct drm_reg_manager *manager, ++ int (*reg_reusable)(const struct drm_reg *, ++ const void *), ++ void (*reg_destroy)(struct drm_reg *)); ++ ++/* ++ * drm_bo_lock.c ++ * Simple replacement for the hardware lock on buffer manager init and clean. ++ */ ++ ++ ++extern void drm_bo_init_lock(struct drm_bo_lock *lock); ++extern void drm_bo_read_unlock(struct drm_bo_lock *lock); ++extern int drm_bo_read_lock(struct drm_bo_lock *lock, ++ int interruptible); ++extern int drm_bo_write_lock(struct drm_bo_lock *lock, ++ int interruptible, ++ struct drm_file *file_priv); ++ ++extern int drm_bo_write_unlock(struct drm_bo_lock *lock, ++ struct drm_file *file_priv); ++ ++#ifdef CONFIG_DEBUG_MUTEXES ++#define DRM_ASSERT_LOCKED(_mutex) \ ++ BUG_ON(!mutex_is_locked(_mutex) || \ ++ ((_mutex)->owner != current_thread_info())) ++#else ++#define DRM_ASSERT_LOCKED(_mutex) ++#endif ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_os_linux.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_os_linux.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_os_linux.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_os_linux.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,145 @@ ++/** ++ * \file drm_os_linux.h ++ * OS abstraction macros. ++ */ ++ ++#include /* For task queue support */ ++#include ++ ++/** Current process ID */ ++#define DRM_CURRENTPID current->pid ++#define DRM_SUSER(p) capable(CAP_SYS_ADMIN) ++#define DRM_UDELAY(d) udelay(d) ++#if LINUX_VERSION_CODE <= 0x020608 /* KERNEL_VERSION(2,6,8) */ ++#ifndef __iomem ++#define __iomem ++#endif ++/** Read a byte from a MMIO region */ ++#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) ++/** Read a word from a MMIO region */ ++#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset)) ++/** Read a dword from a MMIO region */ ++#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset)) ++/** Write a byte into a MMIO region */ ++#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset)) ++/** Write a word into a MMIO region */ ++#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) ++/** Write a dword into a MMIO region */ ++#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) ++#else ++/** Read a byte from a MMIO region */ ++#define DRM_READ8(map, offset) readb((map)->handle + (offset)) ++/** Read a word from a MMIO region */ ++#define DRM_READ16(map, offset) readw((map)->handle + (offset)) ++/** Read a dword from a MMIO region */ ++#define DRM_READ32(map, offset) readl((map)->handle + (offset)) ++/** Write a byte into a MMIO region */ ++#define DRM_WRITE8(map, offset, val) writeb(val, (map)->handle + (offset)) ++/** Write a word into a MMIO region */ ++#define DRM_WRITE16(map, offset, val) writew(val, (map)->handle + (offset)) ++/** Write a dword into a MMIO region */ ++#define DRM_WRITE32(map, offset, val) writel(val, (map)->handle + (offset)) ++#endif ++/** Read memory barrier */ ++#define DRM_READMEMORYBARRIER() rmb() ++/** Write memory barrier */ ++#define DRM_WRITEMEMORYBARRIER() wmb() ++/** Read/write memory barrier */ ++#define DRM_MEMORYBARRIER() mb() ++ ++/** IRQ handler arguments and return type and values */ ++#define DRM_IRQ_ARGS int irq, void *arg ++/** backwards compatibility with old irq return values */ ++#ifndef IRQ_HANDLED ++typedef void irqreturn_t; ++#define IRQ_HANDLED /* nothing */ ++#define IRQ_NONE /* nothing */ ++#endif ++ ++/** AGP types */ ++#if __OS_HAS_AGP ++#define DRM_AGP_MEM struct agp_memory ++#define DRM_AGP_KERN struct agp_kern_info ++#else ++/* define some dummy types for non AGP supporting kernels */ ++struct no_agp_kern { ++ unsigned long aper_base; ++ unsigned long aper_size; ++}; ++#define DRM_AGP_MEM int ++#define DRM_AGP_KERN struct no_agp_kern ++#endif ++ ++#if !(__OS_HAS_MTRR) ++static __inline__ int mtrr_add(unsigned long base, unsigned long size, ++ unsigned int type, char increment) ++{ ++ return -ENODEV; ++} ++ ++static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) ++{ ++ return -ENODEV; ++} ++ ++#define MTRR_TYPE_WRCOMB 1 ++#endif ++ ++/** Other copying of data to kernel space */ ++#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ ++ copy_from_user(arg1, arg2, arg3) ++/** Other copying of data from kernel space */ ++#define DRM_COPY_TO_USER(arg1, arg2, arg3) \ ++ copy_to_user(arg1, arg2, arg3) ++/* Macros for copyfrom user, but checking readability only once */ ++#define DRM_VERIFYAREA_READ( uaddr, size ) \ ++ (access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT) ++#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ ++ __copy_from_user(arg1, arg2, arg3) ++#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ ++ __copy_to_user(arg1, arg2, arg3) ++#define DRM_GET_USER_UNCHECKED(val, uaddr) \ ++ __get_user(val, uaddr) ++ ++#define DRM_HZ HZ ++ ++#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ ++do { \ ++ DECLARE_WAITQUEUE(entry, current); \ ++ unsigned long end = jiffies + (timeout); \ ++ add_wait_queue(&(queue), &entry); \ ++ \ ++ for (;;) { \ ++ __set_current_state(TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (time_after_eq(jiffies, end)) { \ ++ ret = -EBUSY; \ ++ break; \ ++ } \ ++ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ ++ if (signal_pending(current)) { \ ++ ret = -EINTR; \ ++ break; \ ++ } \ ++ } \ ++ __set_current_state(TASK_RUNNING); \ ++ remove_wait_queue(&(queue), &entry); \ ++} while (0) ++ ++#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) ++#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) ++ ++/** Type for the OS's non-sleepable mutex lock */ ++#define DRM_SPINTYPE spinlock_t ++/** ++ * Initialize the lock for use. name is an optional string describing the ++ * lock ++ */ ++#define DRM_SPININIT(l,name) spin_lock_init(l) ++#define DRM_SPINUNINIT(l) ++#define DRM_SPINLOCK(l) spin_lock(l) ++#define DRM_SPINUNLOCK(l) spin_unlock(l) ++#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags); ++#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags); ++#define DRM_SPINLOCK_ASSERT(l) do {} while (0) +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_pci.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_pci.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_pci.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_pci.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,177 @@ ++/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */ ++/** ++ * \file drm_pci.c ++ * \brief Functions and ioctls to manage PCI memory ++ * ++ * \warning These interfaces aren't stable yet. ++ * ++ * \todo Implement the remaining ioctl's for the PCI pools. ++ * \todo The wrappers here are so thin that they would be better off inlined.. ++ * ++ * \author Jose Fonseca ++ * \author Leif Delgass ++ */ ++ ++/* ++ * Copyright 2003 Jos�Fonseca. ++ * Copyright 2003 Leif Delgass. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include "drmP.h" ++ ++/**********************************************************************/ ++/** \name PCI memory */ ++/*@{*/ ++ ++/** ++ * \brief Allocate a PCI consistent memory block, for DMA. ++ */ ++drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, ++ dma_addr_t maxaddr) ++{ ++ drm_dma_handle_t *dmah; ++ unsigned long addr; ++ size_t sz; ++#ifdef DRM_DEBUG_MEMORY ++ int area = DRM_MEM_DMA; ++ ++ spin_lock(&drm_mem_lock); ++ if ((drm_ram_used >> PAGE_SHIFT) ++ > (DRM_RAM_PERCENT * drm_ram_available) / 100) { ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_unlock(&drm_mem_lock); ++#endif ++ ++ /* pci_alloc_consistent only guarantees alignment to the smallest ++ * PAGE_SIZE order which is greater than or equal to the requested size. ++ * Return NULL here for now to make sure nobody tries for larger alignment ++ */ ++ if (align > size) ++ return NULL; ++ ++ if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) { ++ DRM_ERROR("Setting pci dma mask failed\n"); ++ return NULL; ++ } ++ ++ dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); ++ if (!dmah) ++ return NULL; ++ ++ dmah->size = size; ++ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); ++ ++#ifdef DRM_DEBUG_MEMORY ++ if (dmah->vaddr == NULL) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ kfree(dmah); ++ return NULL; ++ } ++ ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += size; ++ drm_ram_used += size; ++ spin_unlock(&drm_mem_lock); ++#else ++ if (dmah->vaddr == NULL) { ++ kfree(dmah); ++ return NULL; ++ } ++#endif ++ ++ memset(dmah->vaddr, 0, size); ++ ++ /* XXX - Is virt_to_page() legal for consistent mem? */ ++ /* Reserve */ ++ for (addr = (unsigned long)dmah->vaddr, sz = size; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return dmah; ++} ++EXPORT_SYMBOL(drm_pci_alloc); ++ ++/** ++ * \brief Free a PCI consistent memory block without freeing its descriptor. ++ * ++ * This function is for internal use in the Linux-specific DRM core code. ++ */ ++void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah) ++{ ++ unsigned long addr; ++ size_t sz; ++#ifdef DRM_DEBUG_MEMORY ++ int area = DRM_MEM_DMA; ++ int alloc_count; ++ int free_count; ++#endif ++ ++ if (!dmah->vaddr) { ++#ifdef DRM_DEBUG_MEMORY ++ DRM_MEM_ERROR(area, "Attempt to free address 0\n"); ++#endif ++ } else { ++ /* XXX - Is virt_to_page() legal for consistent mem? */ ++ /* Unreserve */ ++ for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, ++ dmah->busaddr); ++ } ++ ++#ifdef DRM_DEBUG_MEMORY ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_freed += size; ++ drm_ram_used -= size; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++#endif ++ ++} ++ ++/** ++ * \brief Free a PCI consistent memory block ++ */ ++void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah) ++{ ++ __drm_pci_free(dev, dmah); ++ kfree(dmah); ++} ++EXPORT_SYMBOL(drm_pci_free); ++ ++/*@}*/ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_pciids.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_pciids.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_pciids.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_pciids.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,614 @@ ++/* ++ This file is auto-generated from the drm_pciids.txt in the DRM CVS ++ Please contact dri-devel@lists.sf.net to add new cards to this list ++*/ ++#define radeon_PCI_IDS \ ++ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ ++ {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ ++ {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ ++ {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ ++ {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ ++ {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ ++ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ ++ {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ ++ {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ ++ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ ++ {0, 0, 0} ++ ++#define r128_PCI_IDS \ ++ {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define mga_PCI_IDS \ ++ {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ ++ {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ ++ {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \ ++ {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ ++ {0, 0, 0} ++ ++#define mach64_PCI_IDS \ ++ {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define sis_PCI_IDS \ ++ {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ ++ {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ ++ {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ ++ {0, 0, 0} ++ ++#define pvr2d_PCI_IDS \ ++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define tdfx_PCI_IDS \ ++ {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define viadrv_PCI_IDS \ ++ {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ ++ {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ ++ {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ ++ {0x1106, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ ++ {0, 0, 0} ++ ++#define i810_PCI_IDS \ ++ {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define i830_PCI_IDS \ ++ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define gamma_PCI_IDS \ ++ {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define savage_PCI_IDS \ ++ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ ++ {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ ++ {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ ++ {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ ++ {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ ++ {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ ++ {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ ++ {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ ++ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ ++ {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ ++ {0, 0, 0} ++ ++#define ffb_PCI_IDS \ ++ {0, 0, 0} ++ ++#define i915_PCI_IDS \ ++ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x27A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x27AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x29A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2A02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2A12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x29C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x29B2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x29D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2A42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2E02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2E12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2E22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0, 0, 0} ++ ++#define imagine_PCI_IDS \ ++ {0x105d, 0x2309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128}, \ ++ {0x105d, 0x2339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128_2}, \ ++ {0x105d, 0x493d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_T2R}, \ ++ {0x105d, 0x5348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_REV4}, \ ++ {0, 0, 0} ++ ++#define nv_PCI_IDS \ ++ {0x10DE, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x002A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x002C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x0029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x002D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x00A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x0100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0113, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0171, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0172, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0173, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0174, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0175, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0176, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0177, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0178, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0179, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x017A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x017C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x017D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0189, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x01A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x01F0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0203, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0251, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0252, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0258, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0259, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x025B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0282, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x028C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0323, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0327, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0329, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0331, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0332, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0333, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x033F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0334, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0338, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0342, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0345, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x004E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10de, 0x00f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10de, 0x00f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0161, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0163, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0164, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0165, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0166, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0167, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0212, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0215, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0228, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0090, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0091, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0092, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0093, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0094, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0098, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0099, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x009C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x009D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x009E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0, 0, 0} ++ ++#define xgi_PCI_IDS \ ++ {0x18ca, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x18ca, 0x0047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drmP.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drmP.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drmP.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drmP.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1507 @@ ++/** ++ * \file drmP.h ++ * Private header for Direct Rendering Manager ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DRM_P_H_ ++#define _DRM_P_H_ ++ ++#ifdef __KERNEL__ ++#ifdef __alpha__ ++/* add include of current.h so that "current" is defined ++ * before static inline funcs in wait.h. Doing this so we ++ * can build the DRM (part of PI DRI). 4/21/2000 S + B */ ++#include ++#endif /* __alpha__ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* For (un)lock_kernel */ ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) ++#include ++#endif ++#if defined(__alpha__) || defined(__powerpc__) ++#include /* For pte_wrprotect */ ++#endif ++#include ++#include ++#include ++#ifdef CONFIG_MTRR ++#include ++#endif ++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) ++#include ++#include ++#include ++#endif ++#include ++#include ++#include ++#include "drm.h" ++#include ++#include ++ ++#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) ++#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) ++ ++#include "drm_os_linux.h" ++#include "drm_hashtab.h" ++#include "drm_internal.h" ++ ++struct drm_device; ++struct drm_file; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ++typedef unsigned long uintptr_t; ++#endif ++ ++/* If you want the memory alloc debug functionality, change define below */ ++/* #define DEBUG_MEMORY */ ++ ++/***********************************************************************/ ++/** \name DRM template customization defaults */ ++/*@{*/ ++ ++/* driver capabilities and requirements mask */ ++#define DRIVER_USE_AGP 0x1 ++#define DRIVER_REQUIRE_AGP 0x2 ++#define DRIVER_USE_MTRR 0x4 ++#define DRIVER_PCI_DMA 0x8 ++#define DRIVER_SG 0x10 ++#define DRIVER_HAVE_DMA 0x20 ++#define DRIVER_HAVE_IRQ 0x40 ++#define DRIVER_IRQ_SHARED 0x80 ++#define DRIVER_DMA_QUEUE 0x100 ++#define DRIVER_FB_DMA 0x200 ++#define DRIVER_GEM 0x400 ++ ++/*@}*/ ++ ++/***********************************************************************/ ++/** \name Begin the DRM... */ ++/*@{*/ ++ ++#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then ++ also include looping detection. */ ++ ++#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ ++#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ ++#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ ++#define DRM_LOOPING_LIMIT 5000000 ++#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ ++#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ ++ ++#define DRM_FLAG_DEBUG 0x01 ++ ++#define DRM_MEM_DMA 0 ++#define DRM_MEM_SAREA 1 ++#define DRM_MEM_DRIVER 2 ++#define DRM_MEM_MAGIC 3 ++#define DRM_MEM_IOCTLS 4 ++#define DRM_MEM_MAPS 5 ++#define DRM_MEM_VMAS 6 ++#define DRM_MEM_BUFS 7 ++#define DRM_MEM_SEGS 8 ++#define DRM_MEM_PAGES 9 ++#define DRM_MEM_FILES 10 ++#define DRM_MEM_QUEUES 11 ++#define DRM_MEM_CMDS 12 ++#define DRM_MEM_MAPPINGS 13 ++#define DRM_MEM_BUFLISTS 14 ++#define DRM_MEM_AGPLISTS 15 ++#define DRM_MEM_TOTALAGP 16 ++#define DRM_MEM_BOUNDAGP 17 ++#define DRM_MEM_CTXBITMAP 18 ++#define DRM_MEM_STUB 19 ++#define DRM_MEM_SGLISTS 20 ++#define DRM_MEM_CTXLIST 21 ++#define DRM_MEM_MM 22 ++#define DRM_MEM_HASHTAB 23 ++#define DRM_MEM_OBJECTS 24 ++#define DRM_MEM_FENCE 25 ++#define DRM_MEM_TTM 26 ++#define DRM_MEM_BUFOBJ 27 ++ ++#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) ++#define DRM_MAP_HASH_OFFSET 0x10000000 ++#define DRM_MAP_HASH_ORDER 12 ++#define DRM_OBJECT_HASH_ORDER 12 ++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) ++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) ++/* ++ * This should be small enough to allow the use of kmalloc for hash tables ++ * instead of vmalloc. ++ */ ++ ++#define DRM_FILE_HASH_ORDER 8 ++#define DRM_MM_INIT_MAX_PAGES 256 ++ ++/*@}*/ ++ ++#include "drm_compat.h" ++ ++/***********************************************************************/ ++/** \name Macros to make printk easier */ ++/*@{*/ ++ ++/** ++ * Error output. ++ * ++ * \param fmt printf() like format string. ++ * \param arg arguments ++ */ ++#define DRM_ERROR(fmt, arg...) \ ++ printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) ++ ++/** ++ * Memory error output. ++ * ++ * \param area memory area where the error occurred. ++ * \param fmt printf() like format string. ++ * \param arg arguments ++ */ ++#define DRM_MEM_ERROR(area, fmt, arg...) \ ++ printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ ++ drm_mem_stats[area].name , ##arg) ++#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) ++ ++/** ++ * Debug output. ++ * ++ * \param fmt printf() like format string. ++ * \param arg arguments ++ */ ++#if DRM_DEBUG_CODE ++#define DRM_DEBUG(fmt, arg...) \ ++ do { \ ++ if ( drm_debug ) \ ++ printk(KERN_DEBUG \ ++ "[" DRM_NAME ":%s] " fmt , \ ++ __FUNCTION__ , ##arg); \ ++ } while (0) ++#else ++#define DRM_DEBUG(fmt, arg...) do { } while (0) ++#endif ++ ++#define DRM_PROC_LIMIT (PAGE_SIZE-80) ++ ++#define DRM_PROC_PRINT(fmt, arg...) \ ++ len += sprintf(&buf[len], fmt , ##arg); \ ++ if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; } ++ ++#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \ ++ len += sprintf(&buf[len], fmt , ##arg); \ ++ if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } ++ ++/*@}*/ ++ ++/***********************************************************************/ ++/** \name Internal types and structures */ ++/*@{*/ ++ ++#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) ++#define DRM_MIN(a,b) min(a,b) ++#define DRM_MAX(a,b) max(a,b) ++ ++#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) ++#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) ++#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) ++ ++#define DRM_IF_VERSION(maj, min) (maj << 16 | min) ++/** ++ * Get the private SAREA mapping. ++ * ++ * \param _dev DRM device. ++ * \param _ctx context number. ++ * \param _map output mapping. ++ */ ++#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ ++ (_map) = (_dev)->context_sareas[_ctx]; \ ++} while(0) ++ ++/** ++ * Test that the hardware lock is held by the caller, returning otherwise. ++ * ++ * \param dev DRM device. ++ * \param file_priv DRM file private pointer of the caller. ++ */ ++#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ ++do { \ ++ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ ++ dev->lock.file_priv != file_priv ) { \ ++ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ ++ __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ ++ dev->lock.file_priv, file_priv ); \ ++ return -EINVAL; \ ++ } \ ++} while (0) ++ ++/** ++ * Copy and IOCTL return string to user space ++ */ ++#define DRM_COPY( name, value ) \ ++ len = strlen( value ); \ ++ if ( len > name##_len ) len = name##_len; \ ++ name##_len = strlen( value ); \ ++ if ( len && name ) { \ ++ if ( copy_to_user( name, value, len ) ) \ ++ return -EFAULT; \ ++ } ++ ++/** ++ * Ioctl function type. ++ * ++ * \param dev DRM device structure ++ * \param data pointer to kernel-space stored data, copied in and out according ++ * to ioctl description. ++ * \param file_priv DRM file private pointer. ++ */ ++typedef int drm_ioctl_t(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++#define DRM_AUTH 0x1 ++#define DRM_MASTER 0x2 ++#define DRM_ROOT_ONLY 0x4 ++ ++struct drm_ioctl_desc { ++ unsigned int cmd; ++ drm_ioctl_t *func; ++ int flags; ++}; ++/** ++ * Creates a driver or general drm_ioctl_desc array entry for the given ++ * ioctl, for use by drm_ioctl(). ++ */ ++#define DRM_IOCTL_DEF(ioctl, func, flags) \ ++ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} ++ ++struct drm_magic_entry { ++ struct list_head head; ++ struct drm_hash_item hash_item; ++ struct drm_file *priv; ++}; ++ ++struct drm_vma_entry { ++ struct list_head head; ++ struct vm_area_struct *vma; ++ pid_t pid; ++}; ++ ++/** ++ * DMA buffer. ++ */ ++struct drm_buf { ++ int idx; /**< Index into master buflist */ ++ int total; /**< Buffer size */ ++ int order; /**< log-base-2(total) */ ++ int used; /**< Amount of buffer in use (for DMA) */ ++ unsigned long offset; /**< Byte offset (used internally) */ ++ void *address; /**< Address of buffer */ ++ unsigned long bus_address; /**< Bus address of buffer */ ++ struct drm_buf *next; /**< Kernel-only: used for free list */ ++ __volatile__ int waiting; /**< On kernel DMA queue */ ++ __volatile__ int pending; /**< On hardware DMA queue */ ++ wait_queue_head_t dma_wait; /**< Processes waiting */ ++ struct drm_file *file_priv; /**< Private of holding file descr */ ++ int context; /**< Kernel queue for this buffer */ ++ int while_locked; /**< Dispatch this buffer while locked */ ++ enum { ++ DRM_LIST_NONE = 0, ++ DRM_LIST_FREE = 1, ++ DRM_LIST_WAIT = 2, ++ DRM_LIST_PEND = 3, ++ DRM_LIST_PRIO = 4, ++ DRM_LIST_RECLAIM = 5 ++ } list; /**< Which list we're on */ ++ ++ int dev_priv_size; /**< Size of buffer private storage */ ++ void *dev_private; /**< Per-buffer private storage */ ++}; ++ ++/** bufs is one longer than it has to be */ ++struct drm_waitlist { ++ int count; /**< Number of possible buffers */ ++ struct drm_buf **bufs; /**< List of pointers to buffers */ ++ struct drm_buf **rp; /**< Read pointer */ ++ struct drm_buf **wp; /**< Write pointer */ ++ struct drm_buf **end; /**< End pointer */ ++ spinlock_t read_lock; ++ spinlock_t write_lock; ++}; ++ ++struct drm_freelist { ++ int initialized; /**< Freelist in use */ ++ atomic_t count; /**< Number of free buffers */ ++ struct drm_buf *next; /**< End pointer */ ++ ++ wait_queue_head_t waiting; /**< Processes waiting on free bufs */ ++ int low_mark; /**< Low water mark */ ++ int high_mark; /**< High water mark */ ++ atomic_t wfh; /**< If waiting for high mark */ ++ spinlock_t lock; ++}; ++ ++typedef struct drm_dma_handle { ++ dma_addr_t busaddr; ++ void *vaddr; ++ size_t size; ++} drm_dma_handle_t; ++ ++/** ++ * Buffer entry. There is one of this for each buffer size order. ++ */ ++struct drm_buf_entry { ++ int buf_size; /**< size */ ++ int buf_count; /**< number of buffers */ ++ struct drm_buf *buflist; /**< buffer list */ ++ int seg_count; ++ int page_order; ++ struct drm_dma_handle **seglist; ++ struct drm_freelist freelist; ++}; ++ ++ ++enum drm_ref_type { ++ _DRM_REF_USE = 0, ++ _DRM_REF_TYPE1, ++ _DRM_NO_REF_TYPES ++}; ++ ++ ++/** File private data */ ++struct drm_file { ++ int authenticated; ++ int master; ++ pid_t pid; ++ uid_t uid; ++ drm_magic_t magic; ++ unsigned long ioctl_count; ++ struct list_head lhead; ++ struct drm_minor *minor; ++ int remove_auth_on_close; ++ unsigned long lock_count; ++ ++ /* ++ * The user object hash table is global and resides in the ++ * drm_device structure. We protect the lists and hash tables with the ++ * device struct_mutex. A bit coarse-grained but probably the best ++ * option. ++ */ ++ ++ struct list_head refd_objects; ++ ++ /** Mapping of mm object handles to object pointers. */ ++ struct idr object_idr; ++ /** Lock for synchronization of access to object_idr. */ ++ spinlock_t table_lock; ++ ++ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; ++ struct file *filp; ++ void *driver_priv; ++}; ++ ++/** Wait queue */ ++struct drm_queue { ++ atomic_t use_count; /**< Outstanding uses (+1) */ ++ atomic_t finalization; /**< Finalization in progress */ ++ atomic_t block_count; /**< Count of processes waiting */ ++ atomic_t block_read; /**< Queue blocked for reads */ ++ wait_queue_head_t read_queue; /**< Processes waiting on block_read */ ++ atomic_t block_write; /**< Queue blocked for writes */ ++ wait_queue_head_t write_queue; /**< Processes waiting on block_write */ ++#if 1 ++ atomic_t total_queued; /**< Total queued statistic */ ++ atomic_t total_flushed; /**< Total flushes statistic */ ++ atomic_t total_locks; /**< Total locks statistics */ ++#endif ++ enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ ++ struct drm_waitlist waitlist; /**< Pending buffers */ ++ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ ++}; ++ ++/** ++ * Lock data. ++ */ ++struct drm_lock_data { ++ struct drm_hw_lock *hw_lock; /**< Hardware lock */ ++ /** Private of lock holder's file (NULL=kernel) */ ++ struct drm_file *file_priv; ++ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ ++ unsigned long lock_time; /**< Time of last lock in jiffies */ ++ spinlock_t spinlock; ++ uint32_t kernel_waiters; ++ uint32_t user_waiters; ++ int idle_has_lock; ++}; ++ ++/** ++ * DMA data. ++ */ ++struct drm_device_dma { ++ ++ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ ++ int buf_count; /**< total number of buffers */ ++ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ ++ int seg_count; ++ int page_count; /**< number of pages */ ++ unsigned long *pagelist; /**< page list */ ++ unsigned long byte_count; ++ enum { ++ _DRM_DMA_USE_AGP = 0x01, ++ _DRM_DMA_USE_SG = 0x02, ++ _DRM_DMA_USE_FB = 0x04, ++ _DRM_DMA_USE_PCI_RO = 0x08 ++ } flags; ++ ++}; ++ ++/** ++ * AGP memory entry. Stored as a doubly linked list. ++ */ ++struct drm_agp_mem { ++ unsigned long handle; /**< handle */ ++ DRM_AGP_MEM *memory; ++ unsigned long bound; /**< address */ ++ int pages; ++ struct list_head head; ++}; ++ ++/** ++ * AGP data. ++ * ++ * \sa drm_agp_init() and drm_device::agp. ++ */ ++struct drm_agp_head { ++ DRM_AGP_KERN agp_info; /**< AGP device information */ ++ struct list_head memory; ++ unsigned long mode; /**< AGP mode */ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) ++ struct agp_bridge_data *bridge; ++#endif ++ int enabled; /**< whether the AGP bus as been enabled */ ++ int acquired; /**< whether the AGP device has been acquired */ ++ unsigned long base; ++ int agp_mtrr; ++ int cant_use_aperture; ++ unsigned long page_mask; ++}; ++ ++/** ++ * Scatter-gather memory. ++ */ ++struct drm_sg_mem { ++ unsigned long handle; ++ void *virtual; ++ int pages; ++ struct page **pagelist; ++ dma_addr_t *busaddr; ++}; ++ ++struct drm_sigdata { ++ int context; ++ struct drm_hw_lock *lock; ++}; ++ ++ ++/* ++ * Generic memory manager structs ++ */ ++ ++struct drm_mm_node { ++ struct list_head fl_entry; ++ struct list_head ml_entry; ++ int free; ++ unsigned long start; ++ unsigned long size; ++ struct drm_mm *mm; ++ void *private; ++}; ++ ++struct drm_mm { ++ struct list_head fl_entry; ++ struct list_head ml_entry; ++}; ++ ++ ++/** ++ * Mappings list ++ */ ++struct drm_map_list { ++ struct list_head head; /**< list head */ ++ struct drm_hash_item hash; ++ struct drm_map *map; /**< mapping */ ++ uint64_t user_token; ++ struct drm_mm_node *file_offset_node; ++}; ++ ++typedef struct drm_map drm_local_map_t; ++ ++/** ++ * Context handle list ++ */ ++struct drm_ctx_list { ++ struct list_head head; /**< list head */ ++ drm_context_t handle; /**< context handle */ ++ struct drm_file *tag; /**< associated fd private data */ ++}; ++ ++struct drm_vbl_sig { ++ struct list_head head; ++ unsigned int sequence; ++ struct siginfo info; ++ struct task_struct *task; ++}; ++ ++/* location of GART table */ ++#define DRM_ATI_GART_MAIN 1 ++#define DRM_ATI_GART_FB 2 ++ ++#define DRM_ATI_GART_PCI 1 ++#define DRM_ATI_GART_PCIE 2 ++#define DRM_ATI_GART_IGP 3 ++ ++struct drm_ati_pcigart_info { ++ int gart_table_location; ++ int gart_reg_if; ++ void *addr; ++ dma_addr_t bus_addr; ++ dma_addr_t table_mask; ++ dma_addr_t member_mask; ++ struct drm_dma_handle *table_handle; ++ drm_local_map_t mapping; ++ int table_size; ++}; ++ ++/** ++ * This structure defines the drm_mm memory object, which will be used by the ++ * DRM for its buffer objects. ++ */ ++struct drm_gem_object { ++ /** Reference count of this object */ ++ struct kref refcount; ++ ++ /** Handle count of this object. Each handle also holds a reference */ ++ struct kref handlecount; ++ ++ /** Related drm device */ ++ struct drm_device *dev; ++ ++ /** File representing the shmem storage */ ++ struct file *filp; ++ ++ /** ++ * Size of the object, in bytes. Immutable over the object's ++ * lifetime. ++ */ ++ size_t size; ++ ++ /** ++ * Global name for this object, starts at 1. 0 means unnamed. ++ * Access is covered by the object_name_lock in the related drm_device ++ */ ++ int name; ++ ++ /** ++ * Memory domains. These monitor which caches contain read/write data ++ * related to the object. When transitioning from one set of domains ++ * to another, the driver is called to ensure that caches are suitably ++ * flushed and invalidated ++ */ ++ uint32_t read_domains; ++ uint32_t write_domain; ++ ++ /** ++ * While validating an exec operation, the ++ * new read/write domain values are computed here. ++ * They will be transferred to the above values ++ * at the point that any cache flushing occurs ++ */ ++ uint32_t pending_read_domains; ++ uint32_t pending_write_domain; ++ ++ void *driver_private; ++}; ++ ++#include "drm_objects.h" ++ ++/** ++ * DRM driver structure. This structure represent the common code for ++ * a family of cards. There will one drm_device for each card present ++ * in this family ++ */ ++ ++struct drm_driver { ++ int (*load) (struct drm_device *, unsigned long flags); ++ int (*firstopen) (struct drm_device *); ++ int (*open) (struct drm_device *, struct drm_file *); ++ void (*preclose) (struct drm_device *, struct drm_file *file_priv); ++ void (*postclose) (struct drm_device *, struct drm_file *); ++ void (*lastclose) (struct drm_device *); ++ int (*unload) (struct drm_device *); ++ int (*suspend) (struct drm_device *, pm_message_t state); ++ int (*resume) (struct drm_device *); ++ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); ++ void (*dma_ready) (struct drm_device *); ++ int (*dma_quiescent) (struct drm_device *); ++ int (*context_ctor) (struct drm_device *dev, int context); ++ int (*context_dtor) (struct drm_device *dev, int context); ++ int (*kernel_context_switch) (struct drm_device *dev, int old, ++ int new); ++ void (*kernel_context_switch_unlock) (struct drm_device * dev); ++ /** ++ * get_vblank_counter - get raw hardware vblank counter ++ * @dev: DRM device ++ * @crtc: counter to fetch ++ * ++ * Driver callback for fetching a raw hardware vblank counter ++ * for @crtc. If a device doesn't have a hardware counter, the ++ * driver can simply return the value of drm_vblank_count and ++ * make the enable_vblank() and disable_vblank() hooks into no-ops, ++ * leaving interrupts enabled at all times. ++ * ++ * Wraparound handling and loss of events due to modesetting is dealt ++ * with in the DRM core code. ++ * ++ * RETURNS ++ * Raw vblank counter value. ++ */ ++ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); ++ ++ /** ++ * enable_vblank - enable vblank interrupt events ++ * @dev: DRM device ++ * @crtc: which irq to enable ++ * ++ * Enable vblank interrupts for @crtc. If the device doesn't have ++ * a hardware vblank counter, this routine should be a no-op, since ++ * interrupts will have to stay on to keep the count accurate. ++ * ++ * RETURNS ++ * Zero on success, appropriate errno if the given @crtc's vblank ++ * interrupt cannot be enabled. ++ */ ++ int (*enable_vblank) (struct drm_device *dev, int crtc); ++ ++ /** ++ * disable_vblank - disable vblank interrupt events ++ * @dev: DRM device ++ * @crtc: which irq to enable ++ * ++ * Disable vblank interrupts for @crtc. If the device doesn't have ++ * a hardware vblank counter, this routine should be a no-op, since ++ * interrupts will have to stay on to keep the count accurate. ++ */ ++ void (*disable_vblank) (struct drm_device *dev, int crtc); ++ int (*dri_library_name) (struct drm_device *dev, char * buf); ++ ++ /** ++ * Called by \c drm_device_is_agp. Typically used to determine if a ++ * card is really attached to AGP or not. ++ * ++ * \param dev DRM device handle ++ * ++ * \returns ++ * One of three values is returned depending on whether or not the ++ * card is absolutely \b not AGP (return of 0), absolutely \b is AGP ++ * (return of 1), or may or may not be AGP (return of 2). ++ */ ++ int (*device_is_agp) (struct drm_device *dev); ++ ++/* these have to be filled in */ ++ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); ++ void (*irq_preinstall) (struct drm_device *dev); ++ int (*irq_postinstall) (struct drm_device *dev); ++ void (*irq_uninstall) (struct drm_device *dev); ++ void (*reclaim_buffers) (struct drm_device *dev, ++ struct drm_file *file_priv); ++ void (*reclaim_buffers_locked) (struct drm_device *dev, ++ struct drm_file *file_priv); ++ void (*reclaim_buffers_idlelocked) (struct drm_device *dev, ++ struct drm_file *file_priv); ++ unsigned long (*get_map_ofs) (struct drm_map *map); ++ unsigned long (*get_reg_ofs) (struct drm_device *dev); ++ void (*set_version) (struct drm_device *dev, ++ struct drm_set_version *sv); ++ ++ int (*proc_init)(struct drm_minor *minor); ++ void (*proc_cleanup)(struct drm_minor *minor); ++ ++ /** ++ * Driver-specific constructor for drm_gem_objects, to set up ++ * obj->driver_private. ++ * ++ * Returns 0 on success. ++ */ ++ int (*gem_init_object) (struct drm_gem_object *obj); ++ void (*gem_free_object) (struct drm_gem_object *obj); ++ ++ struct drm_fence_driver *fence_driver; ++ struct drm_bo_driver *bo_driver; ++ ++ int major; ++ int minor; ++ int patchlevel; ++ char *name; ++ char *desc; ++ char *date; ++ ++/* variables */ ++ u32 driver_features; ++ int dev_priv_size; ++ struct drm_ioctl_desc *ioctls; ++ int num_ioctls; ++ struct file_operations fops; ++ struct pci_driver pci_driver; ++}; ++ ++#define DRM_MINOR_UNASSIGNED 0 ++#define DRM_MINOR_LEGACY 1 ++ ++/** ++ * DRM minor structure. This structure represents a drm minor number. ++ */ ++struct drm_minor { ++ int index; /**< Minor device number */ ++ int type; /**< Control or render */ ++ dev_t device; /**< Device number for mknod */ ++ struct device kdev; /**< Linux device */ ++ struct drm_device *dev; ++ struct proc_dir_entry *dev_root; /**< proc directory entry */ ++ struct class_device *dev_class; ++}; ++ ++ ++/** ++ * DRM device structure. This structure represent a complete card that ++ * may contain multiple heads. ++ */ ++struct drm_device { ++ char *unique; /**< Unique identifier: e.g., busid */ ++ int unique_len; /**< Length of unique field */ ++ char *devname; /**< For /proc/interrupts */ ++ int if_version; /**< Highest interface version set */ ++ ++ int blocked; /**< Blocked due to VC switch? */ ++ ++ /** \name Locks */ ++ /*@{ */ ++ spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ ++ struct mutex struct_mutex; /**< For others */ ++ /*@} */ ++ ++ /** \name Usage Counters */ ++ /*@{ */ ++ int open_count; /**< Outstanding files open */ ++ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ ++ atomic_t vma_count; /**< Outstanding vma areas open */ ++ int buf_use; /**< Buffers in use -- cannot alloc */ ++ atomic_t buf_alloc; /**< Buffer allocation in progress */ ++ /*@} */ ++ ++ /** \name Performance counters */ ++ /*@{ */ ++ unsigned long counters; ++ enum drm_stat_type types[15]; ++ atomic_t counts[15]; ++ /*@} */ ++ ++ /** \name Authentication */ ++ /*@{ */ ++ struct list_head filelist; ++ struct drm_open_hash magiclist; ++ struct list_head magicfree; ++ /*@} */ ++ ++ /** \name Memory management */ ++ /*@{ */ ++ struct list_head maplist; /**< Linked list of regions */ ++ int map_count; /**< Number of mappable regions */ ++ struct drm_open_hash map_hash; /**< User token hash table for maps */ ++ struct drm_mm offset_manager; /**< User token manager */ ++ struct drm_open_hash object_hash; /**< User token hash table for objects */ ++ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ ++ struct page *ttm_dummy_page; ++ ++ /** \name Context handle management */ ++ /*@{ */ ++ struct list_head ctxlist; /**< Linked list of context handles */ ++ int ctx_count; /**< Number of context handles */ ++ struct mutex ctxlist_mutex; /**< For ctxlist */ ++ ++ struct idr ctx_idr; ++ ++ struct list_head vmalist; /**< List of vmas (for debugging) */ ++ struct drm_lock_data lock; /**< Information on hardware lock */ ++ /*@} */ ++ ++ /** \name DMA queues (contexts) */ ++ /*@{ */ ++ int queue_count; /**< Number of active DMA queues */ ++ int queue_reserved; /**< Number of reserved DMA queues */ ++ int queue_slots; /**< Actual length of queuelist */ ++ struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ ++ struct drm_device_dma *dma; /**< Optional pointer for DMA support */ ++ /*@} */ ++ ++ /** \name Context support */ ++ /*@{ */ ++ int irq; /**< Interrupt used by board */ ++ int irq_enabled; /**< True if irq handler is enabled */ ++ __volatile__ long context_flag; /**< Context swapping flag */ ++ __volatile__ long interrupt_flag; /**< Interruption handler flag */ ++ __volatile__ long dma_flag; /**< DMA dispatch flag */ ++ struct timer_list timer; /**< Timer for delaying ctx switch */ ++ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ ++ int last_checked; /**< Last context checked for DMA */ ++ int last_context; /**< Last current context */ ++ unsigned long last_switch; /**< jiffies at last context switch */ ++ /*@} */ ++ ++ struct work_struct work; ++ ++ /** \name VBLANK IRQ support */ ++ /*@{ */ ++ ++ /* ++ * At load time, disabling the vblank interrupt won't be allowed since ++ * old clients may not call the modeset ioctl and therefore misbehave. ++ * Once the modeset ioctl *has* been called though, we can safely ++ * disable them when unused. ++ */ ++ int vblank_disable_allowed; ++ ++ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ ++ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ ++ spinlock_t vbl_lock; ++ struct list_head *vbl_sigs; /**< signal list to send on VBLANK */ ++ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/ ++ atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */ ++ u32 *last_vblank; /* protected by dev->vbl_lock, used */ ++ /* for wraparound handling */ ++ int *vblank_enabled; /* so we don't call enable more than ++ once per disable */ ++ int *vblank_inmodeset; /* Display driver is setting mode */ ++ struct timer_list vblank_disable_timer; ++ ++ u32 max_vblank_count; /**< size of vblank counter register */ ++ spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ ++ void (*locked_tasklet_func)(struct drm_device *dev); ++ ++ /*@} */ ++ cycles_t ctx_start; ++ cycles_t lck_start; ++ ++ struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ ++ wait_queue_head_t buf_readers; /**< Processes waiting to read */ ++ wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ ++ ++ struct drm_agp_head *agp; /**< AGP data */ ++ ++ struct pci_dev *pdev; /**< PCI device structure */ ++ int pci_vendor; /**< PCI vendor id */ ++ int pci_device; /**< PCI device id */ ++#ifdef __alpha__ ++ struct pci_controller *hose; ++#endif ++ int num_crtcs; /**< Number of CRTCs on this device */ ++ struct drm_sg_mem *sg; /**< Scatter gather memory */ ++ void *dev_private; /**< device private data */ ++ struct drm_sigdata sigdata; /**< For block_all_signals */ ++ sigset_t sigmask; ++ ++ struct drm_driver *driver; ++ drm_local_map_t *agp_buffer_map; ++ unsigned int agp_buffer_token; ++ struct drm_minor *primary; /**< render type primary screen head */ ++ ++ struct drm_fence_manager fm; ++ struct drm_buffer_manager bm; ++ ++ /** \name Drawable information */ ++ /*@{ */ ++ spinlock_t drw_lock; ++ struct idr drw_idr; ++ /*@} */ ++ ++ /** \name GEM information */ ++ /*@{ */ ++ spinlock_t object_name_lock; ++ struct idr object_name_idr; ++ atomic_t object_count; ++ atomic_t object_memory; ++ atomic_t pin_count; ++ atomic_t pin_memory; ++ atomic_t gtt_count; ++ atomic_t gtt_memory; ++ uint32_t gtt_total; ++ uint32_t invalidate_domains; /* domains pending invalidation */ ++ uint32_t flush_domains; /* domains pending flush */ ++ /*@} */ ++}; ++ ++#if __OS_HAS_AGP ++struct drm_agp_ttm_backend { ++ struct drm_ttm_backend backend; ++ DRM_AGP_MEM *mem; ++ struct agp_bridge_data *bridge; ++ int populated; ++}; ++#endif ++ ++ ++static __inline__ int drm_core_check_feature(struct drm_device *dev, ++ int feature) ++{ ++ return ((dev->driver->driver_features & feature) ? 1 : 0); ++} ++ ++#ifdef __alpha__ ++#define drm_get_pci_domain(dev) dev->hose->index ++#else ++#define drm_get_pci_domain(dev) 0 ++#endif ++ ++#if __OS_HAS_AGP ++static inline int drm_core_has_AGP(struct drm_device *dev) ++{ ++ return drm_core_check_feature(dev, DRIVER_USE_AGP); ++} ++#else ++#define drm_core_has_AGP(dev) (0) ++#endif ++ ++#if __OS_HAS_MTRR ++static inline int drm_core_has_MTRR(struct drm_device *dev) ++{ ++ return drm_core_check_feature(dev, DRIVER_USE_MTRR); ++} ++ ++#define DRM_MTRR_WC MTRR_TYPE_WRCOMB ++ ++static inline int drm_mtrr_add(unsigned long offset, unsigned long size, ++ unsigned int flags) ++{ ++ return mtrr_add(offset, size, flags, 1); ++} ++ ++static inline int drm_mtrr_del(int handle, unsigned long offset, ++ unsigned long size, unsigned int flags) ++{ ++ return mtrr_del(handle, offset, size); ++} ++ ++#else ++static inline int drm_mtrr_add(unsigned long offset, unsigned long size, ++ unsigned int flags) ++{ ++ return -ENODEV; ++} ++ ++static inline int drm_mtrr_del(int handle, unsigned long offset, ++ unsigned long size, unsigned int flags) ++{ ++ return -ENODEV; ++} ++ ++#define drm_core_has_MTRR(dev) (0) ++#define DRM_MTRR_WC 0 ++#endif ++ ++ ++/******************************************************************/ ++/** \name Internal function definitions */ ++/*@{*/ ++ ++ /* Driver support (drm_drv.h) */ ++extern int drm_fb_loaded; ++extern int drm_init(struct drm_driver *driver, ++ struct pci_device_id *pciidlist); ++extern void drm_exit(struct drm_driver *driver); ++extern void drm_cleanup_pci(struct pci_dev *pdev); ++extern int drm_ioctl(struct inode *inode, struct file *filp, ++ unsigned int cmd, unsigned long arg); ++extern long drm_unlocked_ioctl(struct file *filp, ++ unsigned int cmd, unsigned long arg); ++extern long drm_compat_ioctl(struct file *filp, ++ unsigned int cmd, unsigned long arg); ++ ++extern int drm_lastclose(struct drm_device *dev); ++ ++ /* Device support (drm_fops.h) */ ++extern int drm_open(struct inode *inode, struct file *filp); ++extern int drm_stub_open(struct inode *inode, struct file *filp); ++extern int drm_fasync(int fd, struct file *filp, int on); ++extern int drm_release(struct inode *inode, struct file *filp); ++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); ++ ++ /* Mapping support (drm_vm.h) */ ++extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); ++extern unsigned long drm_core_get_map_ofs(struct drm_map * map); ++extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); ++extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma); ++ ++ /* Memory management support (drm_memory.h) */ ++#include "drm_memory.h" ++extern void drm_mem_init(void); ++extern int drm_mem_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++extern void *drm_calloc(size_t nmemb, size_t size, int area); ++extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); ++extern unsigned long drm_alloc_pages(int order, int area); ++extern void drm_free_pages(unsigned long address, int order, int area); ++extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); ++extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); ++extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); ++extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, ++ struct page **pages, ++ unsigned long num_pages, ++ uint32_t gtt_offset); ++extern int drm_unbind_agp(DRM_AGP_MEM * handle); ++ ++extern void drm_free_memctl(size_t size); ++extern int drm_alloc_memctl(size_t size); ++extern void drm_query_memctl(uint64_t *cur_used, ++ uint64_t *emer_used, ++ uint64_t *low_threshold, ++ uint64_t *high_threshold, ++ uint64_t *emer_threshold); ++extern void drm_init_memctl(size_t low_threshold, ++ size_t high_threshold, ++ size_t unit_size); ++ ++ /* Misc. IOCTL support (drm_ioctl.h) */ ++extern int drm_irq_by_busid(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_setunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getmap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getclient(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getstats(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_setversion(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_noop(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* Context IOCTL support (drm_context.h) */ ++extern int drm_resctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_addctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_modctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_switchctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_newctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_rmctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern int drm_ctxbitmap_init(struct drm_device *dev); ++extern void drm_ctxbitmap_cleanup(struct drm_device *dev); ++extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); ++ ++extern int drm_setsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* Drawable IOCTL support (drm_drawable.h) */ ++extern int drm_adddraw(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_rmdraw(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_update_drawable_info(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, ++ drm_drawable_t id); ++extern void drm_drawable_free_all(struct drm_device *dev); ++ ++ /* Authentication IOCTL support (drm_auth.h) */ ++extern int drm_getmagic(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_authmagic(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* Locking IOCTL support (drm_lock.h) */ ++extern int drm_lock(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_unlock(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); ++extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); ++extern void drm_idlelock_take(struct drm_lock_data *lock_data); ++extern void drm_idlelock_release(struct drm_lock_data *lock_data); ++ ++/* ++ * These are exported to drivers so that they can implement fencing using ++ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. ++ */ ++ ++extern int drm_i_have_hw_lock(struct drm_device *dev, ++ struct drm_file *file_priv); ++ ++ /* Buffer management support (drm_bufs.h) */ ++extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); ++extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); ++extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request); ++extern int drm_addmap(struct drm_device *dev, unsigned int offset, ++ unsigned int size, enum drm_map_type type, ++ enum drm_map_flags flags, drm_local_map_t ** map_ptr); ++extern int drm_addmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); ++extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); ++extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_addbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_infobufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_markbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_freebufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_mapbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_order(unsigned long size); ++extern unsigned long drm_get_resource_start(struct drm_device *dev, ++ unsigned int resource); ++extern unsigned long drm_get_resource_len(struct drm_device *dev, ++ unsigned int resource); ++extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, ++ drm_local_map_t *map); ++ ++ ++ /* DMA support (drm_dma.h) */ ++extern int drm_dma_setup(struct drm_device *dev); ++extern void drm_dma_takedown(struct drm_device *dev); ++extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); ++extern void drm_core_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *filp); ++ ++ /* IRQ support (drm_irq.h) */ ++extern int drm_control(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); ++extern int drm_irq_install(struct drm_device *dev); ++extern int drm_irq_uninstall(struct drm_device *dev); ++extern void drm_driver_irq_preinstall(struct drm_device *dev); ++extern void drm_driver_irq_postinstall(struct drm_device *dev); ++extern void drm_driver_irq_uninstall(struct drm_device *dev); ++ ++extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); ++extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); ++extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq); ++extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); ++extern u32 drm_vblank_count(struct drm_device *dev, int crtc); ++extern void drm_handle_vblank(struct drm_device *dev, int crtc); ++extern int drm_vblank_get(struct drm_device *dev, int crtc); ++extern void drm_vblank_put(struct drm_device *dev, int crtc); ++ ++ /* Modesetting support */ ++extern int drm_modeset_ctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* AGP/GART support (drm_agpsupport.h) */ ++extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); ++extern int drm_agp_acquire(struct drm_device *dev); ++extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_release(struct drm_device *dev); ++extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); ++extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); ++extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); ++extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); ++extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); ++extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); ++extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); ++#else ++extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); ++#endif ++extern int drm_agp_free_memory(DRM_AGP_MEM * handle); ++extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); ++extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); ++extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); ++extern void drm_agp_chipset_flush(struct drm_device *dev); ++ /* Stub support (drm_stub.h) */ ++extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ++ struct drm_driver *driver); ++extern int drm_put_dev(struct drm_device *dev); ++extern int drm_put_minor(struct drm_device *dev); ++extern unsigned int drm_debug; /* 1 to enable debug output */ ++ ++extern struct class *drm_class; ++extern struct proc_dir_entry *drm_proc_root; ++ ++extern struct idr drm_minors_idr; ++ ++extern drm_local_map_t *drm_getsarea(struct drm_device *dev); ++ ++ /* Proc support (drm_proc.h) */ ++int drm_proc_init(struct drm_minor *minor, int minor_id, ++ struct proc_dir_entry *root); ++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); ++ ++ /* Scatter Gather Support (drm_scatter.h) */ ++extern void drm_sg_cleanup(struct drm_sg_mem * entry); ++extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); ++extern int drm_sg_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* ATI PCIGART support (ati_pcigart.h) */ ++extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); ++extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); ++ ++extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, ++ size_t align, dma_addr_t maxaddr); ++extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); ++extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); ++ ++ /* sysfs support (drm_sysfs.c) */ ++struct drm_sysfs_class; ++extern struct class *drm_sysfs_create(struct module *owner, char *name); ++extern void drm_sysfs_destroy(void); ++extern int drm_sysfs_device_add(struct drm_minor *minor); ++extern void drm_sysfs_device_remove(struct drm_minor *minor); ++ ++/* ++ * Basic memory manager support (drm_mm.c) ++ */ ++ ++extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, ++ unsigned alignment); ++extern void drm_mm_put_block(struct drm_mm_node *cur); ++extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, ++ unsigned alignment, int best_match); ++extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); ++extern void drm_mm_takedown(struct drm_mm *mm); ++extern int drm_mm_clean(struct drm_mm *mm); ++extern unsigned long drm_mm_tail_space(struct drm_mm *mm); ++extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); ++extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); ++ ++static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) ++{ ++ return block->mm; ++} ++ ++/* Graphics Execution Manager library functions (drm_gem.c) */ ++int ++drm_gem_init (struct drm_device *dev); ++ ++void ++drm_gem_object_free (struct kref *kref); ++ ++struct drm_gem_object * ++drm_gem_object_alloc(struct drm_device *dev, size_t size); ++ ++void ++drm_gem_object_handle_free (struct kref *kref); ++ ++static inline void drm_gem_object_reference(struct drm_gem_object *obj) ++{ ++ kref_get(&obj->refcount); ++} ++ ++static inline void drm_gem_object_unreference(struct drm_gem_object *obj) ++{ ++ if (obj == NULL) ++ return; ++ ++ kref_put (&obj->refcount, drm_gem_object_free); ++} ++ ++int ++drm_gem_handle_create(struct drm_file *file_priv, ++ struct drm_gem_object *obj, ++ int *handlep); ++ ++static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj) ++{ ++ drm_gem_object_reference (obj); ++ kref_get(&obj->handlecount); ++} ++ ++static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj) ++{ ++ if (obj == NULL) ++ return; ++ ++ /* ++ * Must bump handle count first as this may be the last ++ * ref, in which case the object would disappear before we ++ * checked for a name ++ */ ++ kref_put (&obj->handlecount, drm_gem_object_handle_free); ++ drm_gem_object_unreference (obj); ++} ++ ++struct drm_gem_object * ++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, ++ int handle); ++int drm_gem_close_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int drm_gem_flink_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int drm_gem_open_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); ++void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); ++ ++extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); ++extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); ++extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); ++ ++static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, ++ unsigned int token) ++{ ++ struct drm_map_list *_entry; ++ list_for_each_entry(_entry, &dev->maplist, head) ++ if (_entry->user_token == token) ++ return _entry->map; ++ return NULL; ++} ++ ++static __inline__ int drm_device_is_agp(struct drm_device *dev) ++{ ++ if ( dev->driver->device_is_agp != NULL ) { ++ int err = (*dev->driver->device_is_agp)(dev); ++ ++ if (err != 2) { ++ return err; ++ } ++ } ++ ++ return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); ++} ++ ++static __inline__ int drm_device_is_pcie(struct drm_device *dev) ++{ ++ return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); ++} ++ ++static __inline__ void drm_core_dropmap(struct drm_map *map) ++{ ++} ++ ++#ifndef DEBUG_MEMORY ++/** Wrapper around kmalloc() */ ++static __inline__ void *drm_alloc(size_t size, int area) ++{ ++ return kmalloc(size, GFP_KERNEL); ++} ++ ++/** Wrapper around kfree() */ ++static __inline__ void drm_free(void *pt, size_t size, int area) ++{ ++ kfree(pt); ++} ++#else ++extern void *drm_alloc(size_t size, int area); ++extern void drm_free(void *pt, size_t size, int area); ++#endif ++ ++/* ++ * Accounting variants of standard calls. ++ */ ++ ++static inline void *drm_ctl_alloc(size_t size, int area) ++{ ++ void *ret; ++ if (drm_alloc_memctl(size)) ++ return NULL; ++ ret = drm_alloc(size, area); ++ if (!ret) ++ drm_free_memctl(size); ++ return ret; ++} ++ ++static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area) ++{ ++ void *ret; ++ ++ if (drm_alloc_memctl(nmemb*size)) ++ return NULL; ++ ret = drm_calloc(nmemb, size, area); ++ if (!ret) ++ drm_free_memctl(nmemb*size); ++ return ret; ++} ++ ++static inline void drm_ctl_free(void *pt, size_t size, int area) ++{ ++ drm_free(pt, size, area); ++ drm_free_memctl(size); ++} ++ ++/*@}*/ ++ ++#endif /* __KERNEL__ */ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_proc.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_proc.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_proc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_proc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,743 @@ ++/** ++ * \file drm_proc.c ++ * /proc support for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ * ++ * \par Acknowledgements: ++ * Matthew J Sottek sent in a patch to fix ++ * the problem with the proc files not outputting all their information. ++ */ ++ ++/* ++ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++static int drm_name_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_vm_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_clients_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_queues_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_bufs_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_objects_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_gem_name_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_gem_object_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++#if DRM_DEBUG_CODE ++static int drm_vma_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++#endif ++ ++/** ++ * Proc file list. ++ */ ++static struct drm_proc_list { ++ const char *name; /**< file name */ ++ int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ ++} drm_proc_list[] = { ++ {"name", drm_name_info}, ++ {"mem", drm_mem_info}, ++ {"vm", drm_vm_info}, ++ {"clients", drm_clients_info}, ++ {"queues", drm_queues_info}, ++ {"bufs", drm_bufs_info}, ++ {"objects", drm_objects_info}, ++ {"gem_names", drm_gem_name_info}, ++ {"gem_objects", drm_gem_object_info}, ++#if DRM_DEBUG_CODE ++ {"vma", drm_vma_info}, ++#endif ++}; ++ ++#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) ++ ++/** ++ * Initialize the DRI proc filesystem for a device. ++ * ++ * \param dev DRM device. ++ * \param minor device minor number. ++ * \param root DRI proc dir entry. ++ * \param dev_root resulting DRI device proc dir entry. ++ * \return root entry pointer on success, or NULL on failure. ++ * ++ * Create the DRI proc root entry "/proc/dri", the device proc root entry ++ * "/proc/dri/%minor%/", and each entry in proc_list as ++ * "/proc/dri/%minor%/%name%". ++ */ ++int drm_proc_init(struct drm_minor *minor, int minor_id, ++ struct proc_dir_entry *root) ++{ ++ struct proc_dir_entry *ent; ++ int i, j; ++ char name[64]; ++ ++ sprintf(name, "%d", minor_id); ++ minor->dev_root = proc_mkdir(name, root); ++ if (!minor->dev_root) { ++ DRM_ERROR("Cannot create /proc/dri/%s\n", name); ++ return -1; ++ } ++ ++ for (i = 0; i < DRM_PROC_ENTRIES; i++) { ++ ent = create_proc_entry(drm_proc_list[i].name, ++ S_IFREG | S_IRUGO, minor->dev_root); ++ if (!ent) { ++ DRM_ERROR("Cannot create /proc/dri/%s/%s\n", ++ name, drm_proc_list[i].name); ++ for (j = 0; j < i; j++) ++ remove_proc_entry(drm_proc_list[i].name, ++ minor->dev_root); ++ remove_proc_entry(name, root); ++ minor->dev_root = NULL; ++ return -1; ++ } ++ ent->read_proc = drm_proc_list[i].f; ++ ent->data = minor; ++ } ++ return 0; ++} ++ ++/** ++ * Cleanup the proc filesystem resources. ++ * ++ * \param minor device minor number. ++ * \param root DRI proc dir entry. ++ * \param dev_root DRI device proc dir entry. ++ * \return always zero. ++ * ++ * Remove all proc entries created by proc_init(). ++ */ ++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) ++{ ++ int i; ++ char name[64]; ++ ++ if (!root || !minor->dev_root) ++ return 0; ++ ++ for (i = 0; i < DRM_PROC_ENTRIES; i++) ++ remove_proc_entry(drm_proc_list[i].name, minor->dev_root); ++ sprintf(name, "%d", minor->index); ++ remove_proc_entry(name, root); ++ ++ return 0; ++} ++ ++/** ++ * Called when "/proc/dri/.../name" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ * ++ * Prints the device name together with the bus id if available. ++ */ ++static int drm_name_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ if (dev->unique) { ++ DRM_PROC_PRINT("%s %s %s\n", ++ dev->driver->pci_driver.name, ++ pci_name(dev->pdev), dev->unique); ++ } else { ++ DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, ++ pci_name(dev->pdev)); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Called when "/proc/dri/.../vm" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ * ++ * Prints information about all mappings in drm_device::maplist. ++ */ ++static int drm__vm_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_map *map; ++ struct drm_map_list *r_list; ++ ++ /* Hardcoded from _DRM_FRAME_BUFFER, ++ _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, ++ _DRM_SCATTER_GATHER, and _DRM_CONSISTENT. */ ++ const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; ++ const char *type; ++ int i; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("slot offset size type flags " ++ "address mtrr\n\n"); ++ i = 0; ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ map = r_list->map; ++ if (!map) ++ continue; ++ if (map->type < 0 || map->type > 5) ++ type = "??"; ++ else ++ type = types[map->type]; ++ DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", ++ i, ++ map->offset, ++ map->size, type, map->flags, ++ (unsigned long) r_list->user_token); ++ ++ if (map->mtrr < 0) { ++ DRM_PROC_PRINT("none\n"); ++ } else { ++ DRM_PROC_PRINT("%4d\n", map->mtrr); ++ } ++ i++; ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_vm_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__vm_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../queues" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__queues_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ int i; ++ struct drm_queue *q; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT(" ctx/flags use fin" ++ " blk/rw/rwf wait flushed queued" ++ " locks\n\n"); ++ for (i = 0; i < dev->queue_count; i++) { ++ q = dev->queuelist[i]; ++ atomic_inc(&q->use_count); ++ DRM_PROC_PRINT_RET(atomic_dec(&q->use_count), ++ "%5d/0x%03x %5d %5d" ++ " %5d/%c%c/%c%c%c %5Zd\n", ++ i, ++ q->flags, ++ atomic_read(&q->use_count), ++ atomic_read(&q->finalization), ++ atomic_read(&q->block_count), ++ atomic_read(&q->block_read) ? 'r' : '-', ++ atomic_read(&q->block_write) ? 'w' : '-', ++ waitqueue_active(&q->read_queue) ? 'r' : '-', ++ waitqueue_active(&q-> ++ write_queue) ? 'w' : '-', ++ waitqueue_active(&q-> ++ flush_queue) ? 'f' : '-', ++ DRM_BUFCOUNT(&q->waitlist)); ++ atomic_dec(&q->use_count); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_queues_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__queues_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../bufs" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__bufs_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ if (!dma || offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); ++ for (i = 0; i <= DRM_MAX_ORDER; i++) { ++ if (dma->bufs[i].buf_count) ++ DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n", ++ i, ++ dma->bufs[i].buf_size, ++ dma->bufs[i].buf_count, ++ atomic_read(&dma->bufs[i] ++ .freelist.count), ++ dma->bufs[i].seg_count, ++ dma->bufs[i].seg_count ++ * (1 << dma->bufs[i].page_order), ++ (dma->bufs[i].seg_count ++ * (1 << dma->bufs[i].page_order)) ++ * PAGE_SIZE / 1024); ++ } ++ DRM_PROC_PRINT("\n"); ++ for (i = 0; i < dma->buf_count; i++) { ++ if (i && !(i % 32)) ++ DRM_PROC_PRINT("\n"); ++ DRM_PROC_PRINT(" %d", dma->buflist[i]->list); ++ } ++ DRM_PROC_PRINT("\n"); ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_bufs_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__bufs_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../objects" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__objects_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_fence_manager *fm = &dev->fm; ++ uint64_t used_mem; ++ uint64_t used_emer; ++ uint64_t low_mem; ++ uint64_t high_mem; ++ uint64_t emer_mem; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("Object accounting:\n\n"); ++ if (fm->initialized) { ++ DRM_PROC_PRINT("Number of active fence objects: %d.\n", ++ atomic_read(&fm->count)); ++ } else { ++ DRM_PROC_PRINT("Fence objects are not supported by this driver\n"); ++ } ++ ++ if (bm->initialized) { ++ DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n", ++ atomic_read(&bm->count)); ++ } ++ DRM_PROC_PRINT("Memory accounting:\n\n"); ++ if (bm->initialized) { ++ DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages); ++ } else { ++ DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n"); ++ } ++ ++ drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem); ++ ++ if (used_mem > 16*PAGE_SIZE) { ++ DRM_PROC_PRINT("Used object memory is %lu pages.\n", ++ (unsigned long) (used_mem >> PAGE_SHIFT)); ++ } else { ++ DRM_PROC_PRINT("Used object memory is %lu bytes.\n", ++ (unsigned long) used_mem); ++ } ++ if (used_emer > 16*PAGE_SIZE) { ++ DRM_PROC_PRINT("Used emergency memory is %lu pages.\n", ++ (unsigned long) (used_emer >> PAGE_SHIFT)); ++ } else { ++ DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n", ++ (unsigned long) used_emer); ++ } ++ DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n", ++ (unsigned long) (low_mem >> PAGE_SHIFT)); ++ DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n", ++ (unsigned long) (high_mem >> PAGE_SHIFT)); ++ DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n", ++ (unsigned long) (emer_mem >> PAGE_SHIFT)); ++ ++ DRM_PROC_PRINT("\n"); ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _objects_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_objects_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__objects_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../clients" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__clients_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_file *priv; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n"); ++ list_for_each_entry(priv, &dev->filelist, lhead) { ++ DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", ++ priv->authenticated ? 'y' : 'n', ++ priv->minor->index, ++ priv->pid, ++ priv->uid, priv->magic, priv->ioctl_count); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_clients_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__clients_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++struct drm_gem_name_info_data { ++ int len; ++ char *buf; ++ int eof; ++}; ++ ++static int drm_gem_one_name_info(int id, void *ptr, void *data) ++{ ++ struct drm_gem_object *obj = ptr; ++ struct drm_gem_name_info_data *nid = data; ++ ++ DRM_INFO("name %d size %d\n", obj->name, obj->size); ++ if (nid->eof) ++ return 0; ++ ++ nid->len += sprintf(&nid->buf[nid->len], ++ "%6d%9d%8d%9d\n", ++ obj->name, obj->size, ++ atomic_read(&obj->handlecount.refcount), ++ atomic_read(&obj->refcount.refcount)); ++ if (nid->len > DRM_PROC_LIMIT) { ++ nid->eof = 1; ++ return 0; ++ } ++ return 0; ++} ++ ++static int drm_gem_name_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ struct drm_gem_name_info_data nid; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ nid.len = sprintf(buf, " name size handles refcount\n"); ++ nid.buf = buf; ++ nid.eof = 0; ++ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ if (nid.len > request + offset) ++ return request; ++ *eof = 1; ++ return nid.len - offset; ++} ++ ++static int drm_gem_object_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); ++ DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); ++ DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); ++ DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); ++ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); ++ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++#if DRM_DEBUG_CODE ++ ++static int drm__vma_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_vma_entry *pt; ++ struct vm_area_struct *vma; ++#if defined(__i386__) ++ unsigned int pgprot; ++#endif ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", ++ atomic_read(&dev->vma_count), ++ high_memory, virt_to_phys(high_memory)); ++ list_for_each_entry(pt, &dev->vmalist, head) { ++ if (!(vma = pt->vma)) ++ continue; ++ DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", ++ pt->pid, ++ vma->vm_start, ++ vma->vm_end, ++ vma->vm_flags & VM_READ ? 'r' : '-', ++ vma->vm_flags & VM_WRITE ? 'w' : '-', ++ vma->vm_flags & VM_EXEC ? 'x' : '-', ++ vma->vm_flags & VM_MAYSHARE ? 's' : 'p', ++ vma->vm_flags & VM_LOCKED ? 'l' : '-', ++ vma->vm_flags & VM_IO ? 'i' : '-', ++ vma->vm_pgoff); ++ ++#if defined(__i386__) ++ pgprot = pgprot_val(vma->vm_page_prot); ++ DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c", ++ pgprot & _PAGE_PRESENT ? 'p' : '-', ++ pgprot & _PAGE_RW ? 'w' : 'r', ++ pgprot & _PAGE_USER ? 'u' : 's', ++ pgprot & _PAGE_PWT ? 't' : 'b', ++ pgprot & _PAGE_PCD ? 'u' : 'c', ++ pgprot & _PAGE_ACCESSED ? 'a' : '-', ++ pgprot & _PAGE_DIRTY ? 'd' : '-', ++ pgprot & _PAGE_PSE ? 'm' : 'k', ++ pgprot & _PAGE_GLOBAL ? 'g' : 'l'); ++#endif ++ DRM_PROC_PRINT("\n"); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int drm_vma_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__vma_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_regman.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_regman.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_regman.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_regman.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,200 @@ ++/************************************************************************** ++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * An allocate-fence manager implementation intended for sets of base-registers ++ * or tiling-registers. ++ */ ++ ++#include "drmP.h" ++ ++/* ++ * Allocate a compatible register and put it on the unfenced list. ++ */ ++ ++int drm_regs_alloc(struct drm_reg_manager *manager, ++ const void *data, ++ uint32_t fence_class, ++ uint32_t fence_type, ++ int interruptible, int no_wait, struct drm_reg **reg) ++{ ++ struct drm_reg *entry, *next_entry; ++ int ret; ++ ++ *reg = NULL; ++ ++ /* ++ * Search the unfenced list. ++ */ ++ ++ list_for_each_entry(entry, &manager->unfenced, head) { ++ if (manager->reg_reusable(entry, data)) { ++ entry->new_fence_type |= fence_type; ++ goto out; ++ } ++ } ++ ++ /* ++ * Search the lru list. ++ */ ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { ++ struct drm_fence_object *fence = entry->fence; ++ if (fence->fence_class == fence_class && ++ (entry->fence_type & fence_type) == entry->fence_type && ++ manager->reg_reusable(entry, data)) { ++ list_del(&entry->head); ++ entry->new_fence_type = fence_type; ++ list_add_tail(&entry->head, &manager->unfenced); ++ goto out; ++ } ++ } ++ ++ /* ++ * Search the free list. ++ */ ++ ++ list_for_each_entry(entry, &manager->free, head) { ++ list_del(&entry->head); ++ entry->new_fence_type = fence_type; ++ list_add_tail(&entry->head, &manager->unfenced); ++ goto out; ++ } ++ ++ if (no_wait) ++ return -EBUSY; ++ ++ /* ++ * Go back to the lru list and try to expire fences. ++ */ ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { ++ BUG_ON(!entry->fence); ++ ret = drm_fence_object_wait(entry->fence, 0, !interruptible, ++ entry->fence_type); ++ if (ret) ++ return ret; ++ ++ drm_fence_usage_deref_unlocked(&entry->fence); ++ list_del(&entry->head); ++ entry->new_fence_type = fence_type; ++ list_add_tail(&entry->head, &manager->unfenced); ++ goto out; ++ } ++ ++ /* ++ * Oops. All registers are used up :(. ++ */ ++ ++ return -EBUSY; ++out: ++ *reg = entry; ++ return 0; ++} ++EXPORT_SYMBOL(drm_regs_alloc); ++ ++void drm_regs_fence(struct drm_reg_manager *manager, ++ struct drm_fence_object *fence) ++{ ++ struct drm_reg *entry; ++ struct drm_reg *next_entry; ++ ++ if (!fence) { ++ ++ /* ++ * Old fence (if any) is still valid. ++ * Put back on free and lru lists. ++ */ ++ ++ list_for_each_entry_safe_reverse(entry, next_entry, ++ &manager->unfenced, head) { ++ list_del(&entry->head); ++ list_add(&entry->head, (entry->fence) ? ++ &manager->lru : &manager->free); ++ } ++ } else { ++ ++ /* ++ * Fence with a new fence and put on lru list. ++ */ ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->unfenced, ++ head) { ++ list_del(&entry->head); ++ if (entry->fence) ++ drm_fence_usage_deref_unlocked(&entry->fence); ++ drm_fence_reference_unlocked(&entry->fence, fence); ++ ++ entry->fence_type = entry->new_fence_type; ++ BUG_ON((entry->fence_type & fence->type) != ++ entry->fence_type); ++ ++ list_add_tail(&entry->head, &manager->lru); ++ } ++ } ++} ++EXPORT_SYMBOL(drm_regs_fence); ++ ++void drm_regs_free(struct drm_reg_manager *manager) ++{ ++ struct drm_reg *entry; ++ struct drm_reg *next_entry; ++ ++ drm_regs_fence(manager, NULL); ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->free, head) { ++ list_del(&entry->head); ++ manager->reg_destroy(entry); ++ } ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { ++ ++ (void)drm_fence_object_wait(entry->fence, 1, 1, ++ entry->fence_type); ++ list_del(&entry->head); ++ drm_fence_usage_deref_unlocked(&entry->fence); ++ manager->reg_destroy(entry); ++ } ++} ++EXPORT_SYMBOL(drm_regs_free); ++ ++void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg) ++{ ++ reg->fence = NULL; ++ list_add_tail(®->head, &manager->free); ++} ++EXPORT_SYMBOL(drm_regs_add); ++ ++void drm_regs_init(struct drm_reg_manager *manager, ++ int (*reg_reusable) (const struct drm_reg *, const void *), ++ void (*reg_destroy) (struct drm_reg *)) ++{ ++ INIT_LIST_HEAD(&manager->free); ++ INIT_LIST_HEAD(&manager->lru); ++ INIT_LIST_HEAD(&manager->unfenced); ++ manager->reg_reusable = reg_reusable; ++ manager->reg_destroy = reg_destroy; ++} ++EXPORT_SYMBOL(drm_regs_init); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sarea.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sarea.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sarea.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sarea.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,84 @@ ++/** ++ * \file drm_sarea.h ++ * \brief SAREA definitions ++ * ++ * \author Michel D�zer ++ */ ++ ++/* ++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DRM_SAREA_H_ ++#define _DRM_SAREA_H_ ++ ++#include "drm.h" ++ ++/* SAREA area needs to be at least a page */ ++#if defined(__alpha__) ++#define SAREA_MAX 0x2000 ++#elif defined(__ia64__) ++#define SAREA_MAX 0x10000 /* 64kB */ ++#else ++/* Intel 830M driver needs at least 8k SAREA */ ++#define SAREA_MAX 0x2000UL ++#endif ++ ++/** Maximum number of drawables in the SAREA */ ++#define SAREA_MAX_DRAWABLES 256 ++ ++#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000 ++ ++/** SAREA drawable */ ++struct drm_sarea_drawable { ++ unsigned int stamp; ++ unsigned int flags; ++}; ++ ++/** SAREA frame */ ++struct drm_sarea_frame { ++ unsigned int x; ++ unsigned int y; ++ unsigned int width; ++ unsigned int height; ++ unsigned int fullscreen; ++}; ++ ++/** SAREA */ ++struct drm_sarea { ++ /** first thing is always the DRM locking structure */ ++ struct drm_hw_lock lock; ++ /** \todo Use readers/writer lock for drm_sarea::drawable_lock */ ++ struct drm_hw_lock drawable_lock; ++ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ ++ struct drm_sarea_frame frame; /**< frame */ ++ drm_context_t dummy_context; ++}; ++ ++#ifndef __KERNEL__ ++typedef struct drm_sarea_drawable drm_sarea_drawable_t; ++typedef struct drm_sarea_frame drm_sarea_frame_t; ++typedef struct drm_sarea drm_sarea_t; ++#endif ++ ++#endif /* _DRM_SAREA_H_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_scatter.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_scatter.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_scatter.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_scatter.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,228 @@ ++/** ++ * \file drm_scatter.c ++ * IOCTLs to manage scatter/gather memory ++ * ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com ++ * ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include "drmP.h" ++ ++#define DEBUG_SCATTER 0 ++ ++static inline void *drm_vmalloc_dma(unsigned long size) ++{ ++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) ++ return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); ++#else ++ return vmalloc_32(size); ++#endif ++} ++ ++void drm_sg_cleanup(struct drm_sg_mem *entry) ++{ ++ struct page *page; ++ int i; ++ ++ for (i = 0; i < entry->pages; i++) { ++ page = entry->pagelist[i]; ++ if (page) ++ ClearPageReserved(page); ++ } ++ ++ vfree(entry->virtual); ++ ++ drm_free(entry->busaddr, ++ entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); ++ drm_free(entry->pagelist, ++ entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++} ++EXPORT_SYMBOL(drm_sg_cleanup); ++ ++#ifdef _LP64 ++# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) ++#else ++# define ScatterHandle(x) (unsigned int)(x) ++#endif ++ ++int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) ++{ ++ struct drm_sg_mem *entry; ++ unsigned long pages, i, j; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!drm_core_check_feature(dev, DRIVER_SG)) ++ return -EINVAL; ++ ++ if (dev->sg) ++ return -EINVAL; ++ ++ entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); ++ if (!entry) ++ return -ENOMEM; ++ ++ memset(entry, 0, sizeof(*entry)); ++ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; ++ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); ++ ++ entry->pages = pages; ++ entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), ++ DRM_MEM_PAGES); ++ if (!entry->pagelist) { ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++ return -ENOMEM; ++ } ++ ++ memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); ++ ++ entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr), ++ DRM_MEM_PAGES); ++ if (!entry->busaddr) { ++ drm_free(entry->pagelist, ++ entry->pages * sizeof(*entry->pagelist), ++ DRM_MEM_PAGES); ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++ return -ENOMEM; ++ } ++ memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); ++ ++ entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); ++ if (!entry->virtual) { ++ drm_free(entry->busaddr, ++ entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); ++ drm_free(entry->pagelist, ++ entry->pages * sizeof(*entry->pagelist), ++ DRM_MEM_PAGES); ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++ return -ENOMEM; ++ } ++ ++ /* This also forces the mapping of COW pages, so our page list ++ * will be valid. Please don't remove it... ++ */ ++ memset(entry->virtual, 0, pages << PAGE_SHIFT); ++ ++ entry->handle = ScatterHandle((unsigned long)entry->virtual); ++ ++ DRM_DEBUG("handle = %08lx\n", entry->handle); ++ DRM_DEBUG("virtual = %p\n", entry->virtual); ++ ++ for (i = (unsigned long)entry->virtual, j = 0; j < pages; ++ i += PAGE_SIZE, j++) { ++ entry->pagelist[j] = vmalloc_to_page((void *)i); ++ if (!entry->pagelist[j]) ++ goto failed; ++ SetPageReserved(entry->pagelist[j]); ++ } ++ ++ request->handle = entry->handle; ++ ++ dev->sg = entry; ++ ++#if DEBUG_SCATTER ++ /* Verify that each page points to its virtual address, and vice ++ * versa. ++ */ ++ { ++ int error = 0; ++ ++ for (i = 0; i < pages; i++) { ++ unsigned long *tmp; ++ ++ tmp = page_address(entry->pagelist[i]); ++ for (j = 0; ++ j < PAGE_SIZE / sizeof(unsigned long); ++ j++, tmp++) { ++ *tmp = 0xcafebabe; ++ } ++ tmp = (unsigned long *)((u8 *) entry->virtual + ++ (PAGE_SIZE * i)); ++ for (j = 0; ++ j < PAGE_SIZE / sizeof(unsigned long); ++ j++, tmp++) { ++ if (*tmp != 0xcafebabe && error == 0) { ++ error = 1; ++ DRM_ERROR("Scatter allocation error, " ++ "pagelist does not match " ++ "virtual mapping\n"); ++ } ++ } ++ tmp = page_address(entry->pagelist[i]); ++ for (j = 0; ++ j < PAGE_SIZE / sizeof(unsigned long); ++ j++, tmp++) { ++ *tmp = 0; ++ } ++ } ++ if (error == 0) ++ DRM_ERROR("Scatter allocation matches pagelist\n"); ++ } ++#endif ++ ++ return 0; ++ ++ failed: ++ drm_sg_cleanup(entry); ++ return -ENOMEM; ++ ++} ++EXPORT_SYMBOL(drm_sg_alloc); ++ ++int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_scatter_gather *request = data; ++ ++ return drm_sg_alloc(dev, request); ++ ++} ++ ++int drm_sg_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_scatter_gather *request = data; ++ struct drm_sg_mem *entry; ++ ++ if (!drm_core_check_feature(dev, DRIVER_SG)) ++ return -EINVAL; ++ ++ entry = dev->sg; ++ dev->sg = NULL; ++ ++ if (!entry || entry->handle != request->handle) ++ return -EINVAL; ++ ++ DRM_DEBUG("virtual = %p\n", entry->virtual); ++ ++ drm_sg_cleanup(entry); ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sman.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sman.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sman.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sman.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,353 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple memory manager interface that keeps track on allocate regions on a ++ * per "owner" basis. All regions associated with an "owner" can be released ++ * with a simple call. Typically if the "owner" exists. The owner is any ++ * "unsigned long" identifier. Can typically be a pointer to a file private ++ * struct or a context identifier. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drm_sman.h" ++ ++struct drm_owner_item { ++ struct drm_hash_item owner_hash; ++ struct list_head sman_list; ++ struct list_head mem_blocks; ++}; ++ ++void drm_sman_takedown(struct drm_sman * sman) ++{ ++ drm_ht_remove(&sman->user_hash_tab); ++ drm_ht_remove(&sman->owner_hash_tab); ++ if (sman->mm) ++ drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm), ++ DRM_MEM_MM); ++} ++ ++EXPORT_SYMBOL(drm_sman_takedown); ++ ++int ++drm_sman_init(struct drm_sman * sman, unsigned int num_managers, ++ unsigned int user_order, unsigned int owner_order) ++{ ++ int ret = 0; ++ ++ sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), ++ DRM_MEM_MM); ++ if (!sman->mm) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ sman->num_managers = num_managers; ++ INIT_LIST_HEAD(&sman->owner_items); ++ ret = drm_ht_create(&sman->owner_hash_tab, owner_order); ++ if (ret) ++ goto out1; ++ ret = drm_ht_create(&sman->user_hash_tab, user_order); ++ if (!ret) ++ goto out; ++ ++ drm_ht_remove(&sman->owner_hash_tab); ++out1: ++ drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM); ++out: ++ return ret; ++} ++ ++EXPORT_SYMBOL(drm_sman_init); ++ ++static void *drm_sman_mm_allocate(void *private, unsigned long size, ++ unsigned alignment) ++{ ++ struct drm_mm *mm = (struct drm_mm *) private; ++ struct drm_mm_node *tmp; ++ ++ tmp = drm_mm_search_free(mm, size, alignment, 1); ++ if (!tmp) { ++ return NULL; ++ } ++ tmp = drm_mm_get_block(tmp, size, alignment); ++ return tmp; ++} ++ ++static void drm_sman_mm_free(void *private, void *ref) ++{ ++ struct drm_mm_node *node = (struct drm_mm_node *) ref; ++ ++ drm_mm_put_block(node); ++} ++ ++static void drm_sman_mm_destroy(void *private) ++{ ++ struct drm_mm *mm = (struct drm_mm *) private; ++ drm_mm_takedown(mm); ++ drm_free(mm, sizeof(*mm), DRM_MEM_MM); ++} ++ ++static unsigned long drm_sman_mm_offset(void *private, void *ref) ++{ ++ struct drm_mm_node *node = (struct drm_mm_node *) ref; ++ return node->start; ++} ++ ++int ++drm_sman_set_range(struct drm_sman * sman, unsigned int manager, ++ unsigned long start, unsigned long size) ++{ ++ struct drm_sman_mm *sman_mm; ++ struct drm_mm *mm; ++ int ret; ++ ++ BUG_ON(manager >= sman->num_managers); ++ ++ sman_mm = &sman->mm[manager]; ++ mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM); ++ if (!mm) { ++ return -ENOMEM; ++ } ++ sman_mm->private = mm; ++ ret = drm_mm_init(mm, start, size); ++ ++ if (ret) { ++ drm_free(mm, sizeof(*mm), DRM_MEM_MM); ++ return ret; ++ } ++ ++ sman_mm->allocate = drm_sman_mm_allocate; ++ sman_mm->free = drm_sman_mm_free; ++ sman_mm->destroy = drm_sman_mm_destroy; ++ sman_mm->offset = drm_sman_mm_offset; ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(drm_sman_set_range); ++ ++int ++drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, ++ struct drm_sman_mm * allocator) ++{ ++ BUG_ON(manager >= sman->num_managers); ++ sman->mm[manager] = *allocator; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_sman_set_manager); ++ ++static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, ++ unsigned long owner) ++{ ++ int ret; ++ struct drm_hash_item *owner_hash_item; ++ struct drm_owner_item *owner_item; ++ ++ ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); ++ if (!ret) { ++ return drm_hash_entry(owner_hash_item, struct drm_owner_item, ++ owner_hash); ++ } ++ ++ owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM); ++ if (!owner_item) ++ goto out; ++ ++ INIT_LIST_HEAD(&owner_item->mem_blocks); ++ owner_item->owner_hash.key = owner; ++ if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash)) ++ goto out1; ++ ++ list_add_tail(&owner_item->sman_list, &sman->owner_items); ++ return owner_item; ++ ++out1: ++ drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); ++out: ++ return NULL; ++} ++ ++struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, ++ unsigned long size, unsigned alignment, ++ unsigned long owner) ++{ ++ void *tmp; ++ struct drm_sman_mm *sman_mm; ++ struct drm_owner_item *owner_item; ++ struct drm_memblock_item *memblock; ++ ++ BUG_ON(manager >= sman->num_managers); ++ ++ sman_mm = &sman->mm[manager]; ++ tmp = sman_mm->allocate(sman_mm->private, size, alignment); ++ ++ if (!tmp) { ++ return NULL; ++ } ++ ++ memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM); ++ ++ if (!memblock) ++ goto out; ++ ++ memblock->mm_info = tmp; ++ memblock->mm = sman_mm; ++ memblock->sman = sman; ++ ++ if (drm_ht_just_insert_please ++ (&sman->user_hash_tab, &memblock->user_hash, ++ (unsigned long)memblock, 32, 0, 0)) ++ goto out1; ++ ++ owner_item = drm_sman_get_owner_item(sman, owner); ++ if (!owner_item) ++ goto out2; ++ ++ list_add_tail(&memblock->owner_list, &owner_item->mem_blocks); ++ ++ return memblock; ++ ++out2: ++ drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); ++out1: ++ drm_free(memblock, sizeof(*memblock), DRM_MEM_MM); ++out: ++ sman_mm->free(sman_mm->private, tmp); ++ ++ return NULL; ++} ++ ++EXPORT_SYMBOL(drm_sman_alloc); ++ ++static void drm_sman_free(struct drm_memblock_item *item) ++{ ++ struct drm_sman *sman = item->sman; ++ ++ list_del(&item->owner_list); ++ drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); ++ item->mm->free(item->mm->private, item->mm_info); ++ drm_free(item, sizeof(*item), DRM_MEM_MM); ++} ++ ++int drm_sman_free_key(struct drm_sman *sman, unsigned int key) ++{ ++ struct drm_hash_item *hash_item; ++ struct drm_memblock_item *memblock_item; ++ ++ if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) ++ return -EINVAL; ++ ++ memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, ++ user_hash); ++ drm_sman_free(memblock_item); ++ return 0; ++} ++ ++EXPORT_SYMBOL(drm_sman_free_key); ++ ++static void drm_sman_remove_owner(struct drm_sman *sman, ++ struct drm_owner_item *owner_item) ++{ ++ list_del(&owner_item->sman_list); ++ drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); ++ drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); ++} ++ ++int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) ++{ ++ ++ struct drm_hash_item *hash_item; ++ struct drm_owner_item *owner_item; ++ ++ if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { ++ return -1; ++ } ++ ++ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); ++ if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { ++ drm_sman_remove_owner(sman, owner_item); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(drm_sman_owner_clean); ++ ++static void drm_sman_do_owner_cleanup(struct drm_sman *sman, ++ struct drm_owner_item *owner_item) ++{ ++ struct drm_memblock_item *entry, *next; ++ ++ list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, ++ owner_list) { ++ drm_sman_free(entry); ++ } ++ drm_sman_remove_owner(sman, owner_item); ++} ++ ++void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) ++{ ++ ++ struct drm_hash_item *hash_item; ++ struct drm_owner_item *owner_item; ++ ++ if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { ++ ++ return; ++ } ++ ++ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); ++ drm_sman_do_owner_cleanup(sman, owner_item); ++} ++ ++EXPORT_SYMBOL(drm_sman_owner_cleanup); ++ ++void drm_sman_cleanup(struct drm_sman *sman) ++{ ++ struct drm_owner_item *entry, *next; ++ unsigned int i; ++ struct drm_sman_mm *sman_mm; ++ ++ list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { ++ drm_sman_do_owner_cleanup(sman, entry); ++ } ++ if (sman->mm) { ++ for (i = 0; i < sman->num_managers; ++i) { ++ sman_mm = &sman->mm[i]; ++ if (sman_mm->private) { ++ sman_mm->destroy(sman_mm->private); ++ sman_mm->private = NULL; ++ } ++ } ++ } ++} ++ ++EXPORT_SYMBOL(drm_sman_cleanup); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sman.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sman.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sman.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sman.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,176 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple memory MANager interface that keeps track on allocate regions on a ++ * per "owner" basis. All regions associated with an "owner" can be released ++ * with a simple call. Typically if the "owner" exists. The owner is any ++ * "unsigned long" identifier. Can typically be a pointer to a file private ++ * struct or a context identifier. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#ifndef DRM_SMAN_H ++#define DRM_SMAN_H ++ ++#include "drmP.h" ++#include "drm_hashtab.h" ++ ++/* ++ * A class that is an abstration of a simple memory allocator. ++ * The sman implementation provides a default such allocator ++ * using the drm_mm.c implementation. But the user can replace it. ++ * See the SiS implementation, which may use the SiS FB kernel module ++ * for memory management. ++ */ ++ ++struct drm_sman_mm { ++ /* private info. If allocated, needs to be destroyed by the destroy ++ function */ ++ void *private; ++ ++ /* Allocate a memory block with given size and alignment. ++ Return an opaque reference to the memory block */ ++ ++ void *(*allocate) (void *private, unsigned long size, ++ unsigned alignment); ++ ++ /* Free a memory block. "ref" is the opaque reference that we got from ++ the "alloc" function */ ++ ++ void (*free) (void *private, void *ref); ++ ++ /* Free all resources associated with this allocator */ ++ ++ void (*destroy) (void *private); ++ ++ /* Return a memory offset from the opaque reference returned from the ++ "alloc" function */ ++ ++ unsigned long (*offset) (void *private, void *ref); ++}; ++ ++struct drm_memblock_item { ++ struct list_head owner_list; ++ struct drm_hash_item user_hash; ++ void *mm_info; ++ struct drm_sman_mm *mm; ++ struct drm_sman *sman; ++}; ++ ++struct drm_sman { ++ struct drm_sman_mm *mm; ++ int num_managers; ++ struct drm_open_hash owner_hash_tab; ++ struct drm_open_hash user_hash_tab; ++ struct list_head owner_items; ++}; ++ ++/* ++ * Take down a memory manager. This function should only be called after a ++ * successful init and after a call to drm_sman_cleanup. ++ */ ++ ++extern void drm_sman_takedown(struct drm_sman * sman); ++ ++/* ++ * Allocate structures for a manager. ++ * num_managers are the number of memory pools to manage. (VRAM, AGP, ....) ++ * user_order is the log2 of the number of buckets in the user hash table. ++ * set this to approximately log2 of the max number of memory regions ++ * that will be allocated for _all_ pools together. ++ * owner_order is the log2 of the number of buckets in the owner hash table. ++ * set this to approximately log2 of ++ * the number of client file connections that will ++ * be using the manager. ++ * ++ */ ++ ++extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, ++ unsigned int user_order, unsigned int owner_order); ++ ++/* ++ * Initialize a drm_mm.c allocator. Should be called only once for each ++ * manager unless a customized allogator is used. ++ */ ++ ++extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, ++ unsigned long start, unsigned long size); ++ ++/* ++ * Initialize a customized allocator for one of the managers. ++ * (See the SiS module). The object pointed to by "allocator" is copied, ++ * so it can be destroyed after this call. ++ */ ++ ++extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger, ++ struct drm_sman_mm * allocator); ++ ++/* ++ * Allocate a memory block. Aligment is not implemented yet. ++ */ ++ ++extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman, ++ unsigned int manager, ++ unsigned long size, ++ unsigned alignment, ++ unsigned long owner); ++/* ++ * Free a memory block identified by its user hash key. ++ */ ++ ++extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key); ++ ++/* ++ * returns 1 iff there are no stale memory blocks associated with this owner. ++ * Typically called to determine if we need to idle the hardware and call ++ * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all ++ * resources associated with owner. ++ */ ++ ++extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner); ++ ++/* ++ * Frees all stale memory blocks associated with this owner. Note that this ++ * requires that the hardware is finished with all blocks, so the graphics engine ++ * should be idled before this call is made. This function also frees ++ * any resources associated with "owner" and should be called when owner ++ * is not going to be referenced anymore. ++ */ ++ ++extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner); ++ ++/* ++ * Frees all stale memory blocks associated with the memory manager. ++ * See idling above. ++ */ ++ ++extern void drm_sman_cleanup(struct drm_sman * sman); ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_stub.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_stub.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_stub.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_stub.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,400 @@ ++/** ++ * \file drm_stub.c ++ * Stub support ++ * ++ * \author Rickard E. (Rik) Faith ++ */ ++ ++/* ++ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org ++ * ++ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++ ++#include "drmP.h" ++#include "drm_core.h" ++ ++unsigned int drm_debug = 0; /* 1 to enable debug output */ ++EXPORT_SYMBOL(drm_debug); ++ ++MODULE_AUTHOR(CORE_AUTHOR); ++MODULE_DESCRIPTION(CORE_DESC); ++MODULE_LICENSE("GPL and additional rights"); ++MODULE_PARM_DESC(debug, "Enable debug output"); ++ ++module_param_named(debug, drm_debug, int, 0600); ++ ++struct idr drm_minors_idr; ++ ++struct class *drm_class; ++struct proc_dir_entry *drm_proc_root; ++ ++static int drm_minor_get_id(struct drm_device *dev, int type) ++{ ++ int new_id; ++ int ret; ++ int base = 0, limit = 63; ++ ++again: ++ if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) { ++ DRM_ERROR("Out of memory expanding drawable idr\n"); ++ return -ENOMEM; ++ } ++ mutex_lock(&dev->struct_mutex); ++ ret = idr_get_new_above(&drm_minors_idr, NULL, ++ base, &new_id); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret == -EAGAIN) { ++ goto again; ++ } else if (ret) { ++ return ret; ++ } ++ ++ if (new_id >= limit) { ++ idr_remove(&drm_minors_idr, new_id); ++ return -EINVAL; ++ } ++ return new_id; ++} ++ ++static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, ++ const struct pci_device_id *ent, ++ struct drm_driver *driver) ++{ ++ int retcode; ++ ++ INIT_LIST_HEAD(&dev->filelist); ++ INIT_LIST_HEAD(&dev->ctxlist); ++ INIT_LIST_HEAD(&dev->vmalist); ++ INIT_LIST_HEAD(&dev->maplist); ++ ++ spin_lock_init(&dev->count_lock); ++ spin_lock_init(&dev->drw_lock); ++ spin_lock_init(&dev->tasklet_lock); ++ spin_lock_init(&dev->lock.spinlock); ++ init_timer(&dev->timer); ++ mutex_init(&dev->struct_mutex); ++ mutex_init(&dev->ctxlist_mutex); ++ mutex_init(&dev->bm.evict_mutex); ++ ++ idr_init(&dev->drw_idr); ++ ++ dev->pdev = pdev; ++ ++ if (pdev) { ++ dev->pci_device = pdev->device; ++ dev->pci_vendor = pdev->vendor; ++ ++#ifdef __alpha__ ++ dev->hose = pdev->sysdata; ++#endif ++ ++ dev->irq = pdev->irq; ++ } ++ ++ dev->irq_enabled = 0; ++ ++ if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) { ++ return -ENOMEM; ++ } ++ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, ++ DRM_FILE_PAGE_OFFSET_SIZE)) { ++ drm_ht_remove(&dev->map_hash); ++ return -ENOMEM; ++ } ++ ++ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) { ++ drm_ht_remove(&dev->map_hash); ++ drm_mm_takedown(&dev->offset_manager); ++ return -ENOMEM; ++ } ++ ++ /* the DRM has 6 counters */ ++ dev->counters = 6; ++ dev->types[0] = _DRM_STAT_LOCK; ++ dev->types[1] = _DRM_STAT_OPENS; ++ dev->types[2] = _DRM_STAT_CLOSES; ++ dev->types[3] = _DRM_STAT_IOCTLS; ++ dev->types[4] = _DRM_STAT_LOCKS; ++ dev->types[5] = _DRM_STAT_UNLOCKS; ++ ++ dev->driver = driver; ++ ++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) ++ if (drm_core_has_AGP(dev)) { ++ if (drm_device_is_agp(dev)) ++ dev->agp = drm_agp_init(dev); ++ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) ++ && (dev->agp == NULL)) { ++ DRM_ERROR("Cannot initialize the agpgart module.\n"); ++ retcode = -EINVAL; ++ goto error_out_unreg; ++ } ++ ++ if (drm_core_has_MTRR(dev)) { ++ if (dev->agp) ++ dev->agp->agp_mtrr = ++ mtrr_add(dev->agp->agp_info.aper_base, ++ dev->agp->agp_info.aper_size * ++ 1024 * 1024, MTRR_TYPE_WRCOMB, 1); ++ } ++ } ++#endif ++ ++ retcode = drm_ctxbitmap_init(dev); ++ if (retcode) { ++ DRM_ERROR("Cannot allocate memory for context bitmap.\n"); ++ goto error_out_unreg; ++ } ++ ++ if (driver->driver_features & DRIVER_GEM) { ++ retcode = drm_gem_init (dev); ++ if (retcode) { ++ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); ++ goto error_out_unreg; ++ } ++ } ++ ++ drm_fence_manager_init(dev); ++ ++ return 0; ++ ++error_out_unreg: ++ drm_lastclose(dev); ++ return retcode; ++} ++ ++/** ++ * Get a secondary minor number. ++ * ++ * \param dev device data structure ++ * \param sec-minor structure to hold the assigned minor ++ * \return negative number on failure. ++ * ++ * Search an empty entry and initialize it to the given parameters, and ++ * create the proc init entry via proc_init(). This routines assigns ++ * minor numbers to secondary heads of multi-headed cards ++ */ ++static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) ++{ ++ struct drm_minor *new_minor; ++ int ret; ++ int minor_id; ++ ++ DRM_DEBUG("\n"); ++ ++ minor_id = drm_minor_get_id(dev, type); ++ if (minor_id < 0) ++ return minor_id; ++ ++ new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL); ++ if (!new_minor) { ++ ret = -ENOMEM; ++ goto err_idr; ++ } ++ ++ new_minor->type = type; ++ new_minor->device = MKDEV(DRM_MAJOR, minor_id); ++ new_minor->dev = dev; ++ new_minor->index = minor_id; ++ ++ idr_replace(&drm_minors_idr, new_minor, minor_id); ++ ++ if (type == DRM_MINOR_LEGACY) { ++ ret = drm_proc_init(new_minor, minor_id, drm_proc_root); ++ if (ret) { ++ DRM_ERROR("DRM: Failed to initialize /proc/dri.\n"); ++ goto err_mem; ++ } ++ if (dev->driver->proc_init) { ++ ret = dev->driver->proc_init(new_minor); ++ if (ret) { ++ DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n"); ++ goto err_mem; ++ } ++ } ++ } else ++ new_minor->dev_root = NULL; ++ ++ ret = drm_sysfs_device_add(new_minor); ++ if (ret) { ++ printk(KERN_ERR ++ "DRM: Error sysfs_device_add.\n"); ++ goto err_g2; ++ } ++ *minor = new_minor; ++ ++ DRM_DEBUG("new minor assigned %d\n", minor_id); ++ return 0; ++ ++ ++err_g2: ++ if (new_minor->type == DRM_MINOR_LEGACY) { ++ if (dev->driver->proc_cleanup) ++ dev->driver->proc_cleanup(new_minor); ++ drm_proc_cleanup(new_minor, drm_proc_root); ++ } ++err_mem: ++ kfree(new_minor); ++err_idr: ++ idr_remove(&drm_minors_idr, minor_id); ++ *minor = NULL; ++ return ret; ++} ++ ++/** ++ * Register. ++ * ++ * \param pdev - PCI device structure ++ * \param ent entry from the PCI ID table with device type flags ++ * \return zero on success or a negative number on failure. ++ * ++ * Attempt to gets inter module "drm" information. If we are first ++ * then register the character device and inter module information. ++ * Try and register, if we fail to register, backout previous work. ++ */ ++int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ++ struct drm_driver *driver) ++{ ++ struct drm_device *dev; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); ++ if (!dev) ++ return -ENOMEM; ++ ++#ifdef CONFIG_PCI ++ if (!drm_fb_loaded) { ++ pci_set_drvdata(pdev, dev); ++ ret = pci_request_regions(pdev, driver->pci_driver.name); ++ if (ret) ++ goto err_g1; ++ } ++ ++ ret = pci_enable_device(pdev); ++ if (ret) ++ goto err_g2; ++ pci_set_master(pdev); ++#endif ++ ++ if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { ++ printk(KERN_ERR "DRM: fill_in_dev failed\n"); ++ goto err_g3; ++ } ++ ++ /* only add the control node on a modesetting platform */ ++ if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) ++ goto err_g3; ++ ++ if (dev->driver->load) ++ if ((ret = dev->driver->load(dev, ent ? ent->driver_data : 0))) ++ goto err_g4; ++ ++ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", ++ driver->name, driver->major, driver->minor, driver->patchlevel, ++ driver->date, dev->primary->index); ++ ++ return 0; ++err_g4: ++ drm_put_minor(dev); ++err_g3: ++#ifdef CONFIG_PCI ++ if (!drm_fb_loaded) ++ pci_disable_device(pdev); ++err_g2: ++ if (!drm_fb_loaded) ++ pci_release_regions(pdev); ++err_g1: ++ if (!drm_fb_loaded) ++ pci_set_drvdata(pdev, NULL); ++#endif ++ ++ drm_free(dev, sizeof(*dev), DRM_MEM_STUB); ++ printk(KERN_ERR "DRM: drm_get_dev failed.\n"); ++ return ret; ++} ++EXPORT_SYMBOL(drm_get_dev); ++ ++ ++/** ++ * Put a device minor number. ++ * ++ * \param dev device data structure ++ * \return always zero ++ * ++ * Cleans up the proc resources. If it is the last minor then release the foreign ++ * "drm" data, otherwise unregisters the "drm" data, frees the dev list and ++ * unregisters the character device. ++ */ ++int drm_put_dev(struct drm_device * dev) ++{ ++ DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); ++ ++ if (dev->unique) { ++ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ++ dev->unique = NULL; ++ dev->unique_len = 0; ++ } ++ if (dev->devname) { ++ drm_free(dev->devname, strlen(dev->devname) + 1, ++ DRM_MEM_DRIVER); ++ dev->devname = NULL; ++ } ++ drm_free(dev, sizeof(*dev), DRM_MEM_STUB); ++ return 0; ++} ++ ++/** ++ * Put a secondary minor number. ++ * ++ * \param sec_minor - structure to be released ++ * \return always zero ++ * ++ * Cleans up the proc resources. Not legal for this to be the ++ * last minor released. ++ * ++ */ ++int drm_put_minor(struct drm_device *dev) ++{ ++ struct drm_minor **minor_p = &dev->primary; ++ struct drm_minor *minor = *minor_p; ++ DRM_DEBUG("release secondary minor %d\n", minor->index); ++ ++ if (minor->type == DRM_MINOR_LEGACY) { ++ if (dev->driver->proc_cleanup) ++ dev->driver->proc_cleanup(minor); ++ drm_proc_cleanup(minor, drm_proc_root); ++ } ++ drm_sysfs_device_remove(minor); ++ ++ idr_remove(&drm_minors_idr, minor->index); ++ ++ kfree(minor); ++ *minor_p = NULL; ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sysfs.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sysfs.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_sysfs.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_sysfs.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,212 @@ ++ ++/* ++ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support ++ * extra sysfs attribute from DRM. Normal drm_sysfs_class ++ * does not allow adding attributes. ++ * ++ * Copyright (c) 2004 Jon Smirl ++ * Copyright (c) 2003-2004 Greg Kroah-Hartman ++ * Copyright (c) 2003-2004 IBM Corp. ++ * ++ * This file is released under the GPLv2 ++ * ++ */ ++ ++#include ++#include ++#include ++ ++#include "drm_core.h" ++#include "drmP.h" ++ ++#define to_drm_minor(d) container_of(d, struct drm_minor, kdev) ++ ++/** ++ * drm_sysfs_suspend - DRM class suspend hook ++ * @dev: Linux device to suspend ++ * @state: power state to enter ++ * ++ * Just figures out what the actual struct drm_device associated with ++ * @dev is and calls its suspend hook, if present. ++ */ ++static int drm_sysfs_suspend(struct device *dev, pm_message_t state) ++{ ++ struct drm_minor *drm_minor = to_drm_minor(dev); ++ struct drm_device *drm_dev = drm_minor->dev; ++ ++ printk(KERN_ERR "%s\n", __FUNCTION__); ++ ++ if (drm_dev->driver->suspend) ++ return drm_dev->driver->suspend(drm_dev, state); ++ ++ return 0; ++} ++ ++/** ++ * drm_sysfs_resume - DRM class resume hook ++ * @dev: Linux device to resume ++ * ++ * Just figures out what the actual struct drm_device associated with ++ * @dev is and calls its resume hook, if present. ++ */ ++static int drm_sysfs_resume(struct device *dev) ++{ ++ struct drm_minor *drm_minor = to_drm_minor(dev); ++ struct drm_device *drm_dev = drm_minor->dev; ++ ++ if (drm_dev->driver->resume) ++ return drm_dev->driver->resume(drm_dev); ++ ++ return 0; ++} ++ ++/* Display the version of drm_core. This doesn't work right in current design */ ++static ssize_t version_show(struct class *dev, char *buf) ++{ ++ return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, ++ CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); ++} ++ ++static CLASS_ATTR(version, S_IRUGO, version_show, NULL); ++ ++/** ++ * drm_sysfs_create - create a struct drm_sysfs_class structure ++ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class ++ * @name: pointer to a string for the name of this class. ++ * ++ * This is used to create DRM class pointer that can then be used ++ * in calls to drm_sysfs_device_add(). ++ * ++ * Note, the pointer created here is to be destroyed when finished by making a ++ * call to drm_sysfs_destroy(). ++ */ ++struct class *drm_sysfs_create(struct module *owner, char *name) ++{ ++ struct class *class; ++ int err; ++ ++ class = class_create(owner, name); ++ if (IS_ERR(class)) { ++ err = PTR_ERR(class); ++ goto err_out; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) ++ class->suspend = drm_sysfs_suspend; ++ class->resume = drm_sysfs_resume; ++#endif ++ ++ err = class_create_file(class, &class_attr_version); ++ if (err) ++ goto err_out_class; ++ ++ return class; ++ ++err_out_class: ++ class_destroy(class); ++err_out: ++ return ERR_PTR(err); ++} ++ ++/** ++ * drm_sysfs_destroy - destroys DRM class ++ * ++ * Destroy the DRM device class. ++ */ ++void drm_sysfs_destroy(void) ++{ ++ if ((drm_class == NULL) || (IS_ERR(drm_class))) ++ return; ++ class_remove_file(drm_class, &class_attr_version); ++ class_destroy(drm_class); ++} ++ ++static ssize_t show_dri(struct device *device, struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_minor *drm_minor = to_drm_minor(device); ++ struct drm_device *drm_dev = drm_minor->dev; ++ if (drm_dev->driver->dri_library_name) ++ return drm_dev->driver->dri_library_name(drm_dev, buf); ++ return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name); ++} ++ ++static struct device_attribute device_attrs[] = { ++ __ATTR(dri_library_name, S_IRUGO, show_dri, NULL), ++}; ++ ++/** ++ * drm_sysfs_device_release - do nothing ++ * @dev: Linux device ++ * ++ * Normally, this would free the DRM device associated with @dev, along ++ * with cleaning up any other stuff. But we do that in the DRM core, so ++ * this function can just return and hope that the core does its job. ++ */ ++static void drm_sysfs_device_release(struct device *dev) ++{ ++ return; ++} ++ ++/** ++ * drm_sysfs_device_add - adds a class device to sysfs for a character driver ++ * @dev: DRM device to be added ++ * @head: DRM head in question ++ * ++ * Add a DRM device to the DRM's device model class. We use @dev's PCI device ++ * as the parent for the Linux device, and make sure it has a file containing ++ * the driver we're using (for userspace compatibility). ++ */ ++int drm_sysfs_device_add(struct drm_minor *minor) ++{ ++ int err; ++ int i, j; ++ char *minor_str; ++ ++ minor->kdev.parent = minor->dev->pdev ? &minor->dev->pdev->dev : NULL; ++ minor->kdev.class = drm_class; ++ minor->kdev.release = drm_sysfs_device_release; ++ minor->kdev.devt = minor->device; ++ minor_str = "card%d"; ++ ++ snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index); ++ ++ err = device_register(&minor->kdev); ++ if (err) { ++ DRM_ERROR("device add failed: %d\n", err); ++ goto err_out; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { ++ err = device_create_file(&minor->kdev, &device_attrs[i]); ++ if (err) ++ goto err_out_files; ++ } ++ ++ return 0; ++ ++err_out_files: ++ if (i > 0) ++ for (j = 0; j < i; j++) ++ device_remove_file(&minor->kdev, &device_attrs[j]); ++ device_unregister(&minor->kdev); ++err_out: ++ ++ return err; ++} ++ ++/** ++ * drm_sysfs_device_remove - remove DRM device ++ * @dev: DRM device to remove ++ * ++ * This call unregisters and cleans up a class device that was created with a ++ * call to drm_sysfs_device_add() ++ */ ++void drm_sysfs_device_remove(struct drm_minor *minor) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) ++ device_remove_file(&minor->kdev, &device_attrs[i]); ++ device_unregister(&minor->kdev); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_ttm.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_ttm.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_ttm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_ttm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,524 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) ++static void drm_clflush_page(struct page *page) ++{ ++ uint8_t *page_virtual; ++ unsigned int i; ++ ++ if (unlikely(page == NULL)) ++ return; ++ ++ page_virtual = kmap_atomic(page, KM_USER0); ++ ++ for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) ++ clflush(page_virtual + i); ++ ++ kunmap_atomic(page_virtual, KM_USER0); ++} ++ ++static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages) ++{ ++ unsigned long i; ++ ++ mb(); ++ for (i=0; i < num_pages; ++i) ++ drm_clflush_page(*pages++); ++ mb(); ++} ++#endif ++ ++static void drm_ttm_ipi_handler(void *null) ++{ ++#ifdef CONFIG_AGP ++ flush_agp_cache(); ++#endif ++} ++ ++void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages) ++{ ++ ++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) ++ if (cpu_has_clflush) { ++ drm_ttm_cache_flush_clflush(pages, num_pages); ++ return; ++ } ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1)) ++#else ++ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) ++#endif ++ DRM_ERROR("Timed out waiting for drm cache flush.\n"); ++} ++EXPORT_SYMBOL(drm_ttm_cache_flush); ++ ++/** ++ * Allocates storage for pointers to the pages that back the ttm. ++ * ++ * Uses kmalloc if possible. Otherwise falls back to vmalloc. ++ */ ++static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm) ++{ ++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ++ ttm->pages = NULL; ++ ++ if (drm_alloc_memctl(size)) ++ return; ++ ++ if (size <= PAGE_SIZE) ++ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); ++ ++ if (!ttm->pages) { ++ ttm->pages = vmalloc_user(size); ++ if (ttm->pages) ++ ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC; ++ } ++ if (!ttm->pages) ++ drm_free_memctl(size); ++} ++ ++static void drm_ttm_free_page_directory(struct drm_ttm *ttm) ++{ ++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ++ ++ if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) { ++ vfree(ttm->pages); ++ ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC; ++ } else { ++ drm_free(ttm->pages, size, DRM_MEM_TTM); ++ } ++ drm_free_memctl(size); ++ ttm->pages = NULL; ++} ++ ++static struct page *drm_ttm_alloc_page(void) ++{ ++ struct page *page; ++ ++ if (drm_alloc_memctl(PAGE_SIZE)) ++ return NULL; ++ ++ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); ++ if (!page) { ++ drm_free_memctl(PAGE_SIZE); ++ return NULL; ++ } ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ SetPageReserved(page); ++#endif ++ return page; ++} ++ ++/* ++ * Change caching policy for the linear kernel map ++ * for range of pages in a ttm. ++ */ ++ ++static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached) ++{ ++ int i; ++ struct page **cur_page; ++ int do_tlbflush = 0; ++ ++ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) ++ return 0; ++ ++ if (noncached) ++ drm_ttm_cache_flush(ttm->pages, ttm->num_pages); ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ cur_page = ttm->pages + i; ++ if (*cur_page) { ++ if (!PageHighMem(*cur_page)) { ++#ifdef CONFIG_AGP ++ if (noncached) { ++ map_page_into_agp(*cur_page); ++ } else { ++ unmap_page_from_agp(*cur_page); ++ } ++#endif ++ do_tlbflush = 1; ++ } ++ } ++ } ++#ifdef CONFIG_AGP ++ if (do_tlbflush) ++ flush_agp_mappings(); ++#endif ++ ++ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED); ++ ++ return 0; ++} ++ ++ ++static void drm_ttm_free_user_pages(struct drm_ttm *ttm) ++{ ++ int write; ++ int dirty; ++ struct page *page; ++ int i; ++ ++ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER)); ++ write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0); ++ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0); ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ page = ttm->pages[i]; ++ if (page == NULL) ++ continue; ++ ++ if (page == ttm->dummy_read_page) { ++ BUG_ON(write); ++ continue; ++ } ++ ++ if (write && dirty && !PageReserved(page)) ++ set_page_dirty_lock(page); ++ ++ ttm->pages[i] = NULL; ++ put_page(page); ++ } ++} ++ ++static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm) ++{ ++ int i; ++ struct drm_buffer_manager *bm = &ttm->dev->bm; ++ struct page **cur_page; ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ cur_page = ttm->pages + i; ++ if (*cur_page) { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ClearPageReserved(*cur_page); ++#endif ++ if (page_count(*cur_page) != 1) ++ DRM_ERROR("Erroneous page count. Leaking pages.\n"); ++ if (page_mapped(*cur_page)) ++ DRM_ERROR("Erroneous map count. Leaking page mappings.\n"); ++ __free_page(*cur_page); ++ drm_free_memctl(PAGE_SIZE); ++ --bm->cur_pages; ++ } ++ } ++} ++ ++/* ++ * Free all resources associated with a ttm. ++ */ ++ ++int drm_ttm_destroy(struct drm_ttm *ttm) ++{ ++ struct drm_ttm_backend *be; ++ ++ if (!ttm) ++ return 0; ++ ++ be = ttm->be; ++ if (be) { ++ be->func->destroy(be); ++ ttm->be = NULL; ++ } ++ ++ if (ttm->pages) { ++ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) ++ drm_ttm_set_caching(ttm, 0); ++ ++ if (ttm->page_flags & DRM_TTM_PAGE_USER) ++ drm_ttm_free_user_pages(ttm); ++ else ++ drm_ttm_free_alloced_pages(ttm); ++ ++ drm_ttm_free_page_directory(ttm); ++ } ++ ++ drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); ++ return 0; ++} ++ ++struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index) ++{ ++ struct page *p; ++ struct drm_buffer_manager *bm = &ttm->dev->bm; ++ ++ while(NULL == (p = ttm->pages[index])) { ++ p = drm_ttm_alloc_page(); ++ if (!p) ++ return NULL; ++ ++ if (PageHighMem(p)) ++ ttm->pages[--ttm->first_himem_page] = p; ++ else ++ ttm->pages[++ttm->last_lomem_page] = p; ++ ++ ++bm->cur_pages; ++ } ++ return p; ++} ++EXPORT_SYMBOL(drm_ttm_get_page); ++ ++/** ++ * drm_ttm_set_user: ++ * ++ * @ttm: the ttm to map pages to. This must always be ++ * a freshly created ttm. ++ * ++ * @tsk: a pointer to the address space from which to map ++ * pages. ++ * ++ * @write: a boolean indicating that write access is desired ++ * ++ * start: the starting address ++ * ++ * Map a range of user addresses to a new ttm object. This ++ * provides access to user memory from the graphics device. ++ */ ++int drm_ttm_set_user(struct drm_ttm *ttm, ++ struct task_struct *tsk, ++ unsigned long start, ++ unsigned long num_pages) ++{ ++ struct mm_struct *mm = tsk->mm; ++ int ret; ++ int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0; ++ ++ BUG_ON(num_pages != ttm->num_pages); ++ BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0); ++ ++ down_read(&mm->mmap_sem); ++ ret = get_user_pages(tsk, mm, start, num_pages, ++ write, 0, ttm->pages, NULL); ++ up_read(&mm->mmap_sem); ++ ++ if (ret != num_pages && write) { ++ drm_ttm_free_user_pages(ttm); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/** ++ * drm_ttm_populate: ++ * ++ * @ttm: the object to allocate pages for ++ * ++ * Allocate pages for all unset page entries, then ++ * call the backend to create the hardware mappings ++ */ ++int drm_ttm_populate(struct drm_ttm *ttm) ++{ ++ struct page *page; ++ unsigned long i; ++ struct drm_ttm_backend *be; ++ ++ if (ttm->state != ttm_unpopulated) ++ return 0; ++ ++ be = ttm->be; ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ page = drm_ttm_get_page(ttm, i); ++ if (!page) ++ return -ENOMEM; ++ } ++ ++ be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page); ++ ttm->state = ttm_unbound; ++ return 0; ++} ++ ++/** ++ * drm_ttm_create: ++ * ++ * @dev: the drm_device ++ * ++ * @size: The size (in bytes) of the desired object ++ * ++ * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h. ++ * ++ * Allocate and initialize a ttm, leaving it unpopulated at this time ++ */ ++ ++struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, ++ uint32_t page_flags, struct page *dummy_read_page) ++{ ++ struct drm_bo_driver *bo_driver = dev->driver->bo_driver; ++ struct drm_ttm *ttm; ++ ++ if (!bo_driver) ++ return NULL; ++ ++ ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM); ++ if (!ttm) ++ return NULL; ++ ++ ttm->dev = dev; ++ atomic_set(&ttm->vma_count, 0); ++ ++ ttm->destroy = 0; ++ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ ttm->first_himem_page = ttm->num_pages; ++ ttm->last_lomem_page = -1; ++ ++ ttm->page_flags = page_flags; ++ ++ ttm->dummy_read_page = dummy_read_page; ++ ++ /* ++ * Account also for AGP module memory usage. ++ */ ++ ++ drm_ttm_alloc_page_directory(ttm); ++ if (!ttm->pages) { ++ drm_ttm_destroy(ttm); ++ DRM_ERROR("Failed allocating page table\n"); ++ return NULL; ++ } ++ ttm->be = bo_driver->create_ttm_backend_entry(dev); ++ if (!ttm->be) { ++ drm_ttm_destroy(ttm); ++ DRM_ERROR("Failed creating ttm backend entry\n"); ++ return NULL; ++ } ++ ttm->state = ttm_unpopulated; ++ return ttm; ++} ++ ++/** ++ * drm_ttm_evict: ++ * ++ * @ttm: the object to be unbound from the aperture. ++ * ++ * Transition a ttm from bound to evicted, where it ++ * isn't present in the aperture, but various caches may ++ * not be consistent. ++ */ ++void drm_ttm_evict(struct drm_ttm *ttm) ++{ ++ struct drm_ttm_backend *be = ttm->be; ++ int ret; ++ ++ if (ttm->state == ttm_bound) { ++ ret = be->func->unbind(be); ++ BUG_ON(ret); ++ } ++ ++ ttm->state = ttm_evicted; ++} ++ ++/** ++ * drm_ttm_fixup_caching: ++ * ++ * @ttm: the object to set unbound ++ * ++ * XXX this function is misnamed. Transition a ttm from evicted to ++ * unbound, flushing caches as appropriate. ++ */ ++void drm_ttm_fixup_caching(struct drm_ttm *ttm) ++{ ++ ++ if (ttm->state == ttm_evicted) { ++ struct drm_ttm_backend *be = ttm->be; ++ if (be->func->needs_ub_cache_adjust(be)) ++ drm_ttm_set_caching(ttm, 0); ++ ttm->state = ttm_unbound; ++ } ++} ++ ++/** ++ * drm_ttm_unbind: ++ * ++ * @ttm: the object to unbind from the graphics device ++ * ++ * Unbind an object from the aperture. This removes the mappings ++ * from the graphics device and flushes caches if necessary. ++ */ ++void drm_ttm_unbind(struct drm_ttm *ttm) ++{ ++ if (ttm->state == ttm_bound) ++ drm_ttm_evict(ttm); ++ ++ drm_ttm_fixup_caching(ttm); ++} ++ ++/** ++ * drm_ttm_bind: ++ * ++ * @ttm: the ttm object to bind to the graphics device ++ * ++ * @bo_mem: the aperture memory region which will hold the object ++ * ++ * Bind a ttm object to the aperture. This ensures that the necessary ++ * pages are allocated, flushes CPU caches as needed and marks the ++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been ++ * modified by the GPU ++ */ ++int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem) ++{ ++ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver; ++ int ret = 0; ++ struct drm_ttm_backend *be; ++ ++ if (!ttm) ++ return -EINVAL; ++ if (ttm->state == ttm_bound) ++ return 0; ++ ++ be = ttm->be; ++ ++ ret = drm_ttm_populate(ttm); ++ if (ret) ++ return ret; ++ ++ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) ++ drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); ++ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) && ++ bo_driver->ttm_cache_flush) ++ bo_driver->ttm_cache_flush(ttm); ++ ++ ret = be->func->bind(be, bo_mem); ++ if (ret) { ++ ttm->state = ttm_evicted; ++ DRM_ERROR("Couldn't bind backend.\n"); ++ return ret; ++ } ++ ++ ttm->state = ttm_bound; ++ if (ttm->page_flags & DRM_TTM_PAGE_USER) ++ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY; ++ return 0; ++} ++EXPORT_SYMBOL(drm_ttm_bind); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_vm.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_vm.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_vm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_vm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,890 @@ ++/** ++ * \file drm_vm.c ++ * Memory mapping for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#if defined(__ia64__) ++#include ++#endif ++ ++static void drm_vm_open(struct vm_area_struct *vma); ++static void drm_vm_close(struct vm_area_struct *vma); ++static int drm_bo_mmap_locked(struct vm_area_struct *vma, ++ struct file *filp, ++ drm_local_map_t *map); ++ ++ ++pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) ++{ ++ pgprot_t tmp = vm_get_page_prot(vma->vm_flags); ++ ++#if defined(__i386__) || defined(__x86_64__) ++ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { ++ pgprot_val(tmp) |= _PAGE_PCD; ++ pgprot_val(tmp) &= ~_PAGE_PWT; ++ } ++#elif defined(__powerpc__) ++ pgprot_val(tmp) |= _PAGE_NO_CACHE; ++ if (map_type == _DRM_REGISTERS) ++ pgprot_val(tmp) |= _PAGE_GUARDED; ++#elif defined(__ia64__) ++ if (efi_range_is_wc(vma->vm_start, vma->vm_end - ++ vma->vm_start)) ++ tmp = pgprot_writecombine(tmp); ++ else ++ tmp = pgprot_noncached(tmp); ++#elif defined(__sparc__) ++ tmp = pgprot_noncached(tmp); ++#endif ++ return tmp; ++} ++ ++static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) ++{ ++ pgprot_t tmp = vm_get_page_prot(vma->vm_flags); ++ ++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) ++ tmp |= _PAGE_NO_CACHE; ++#endif ++ return tmp; ++} ++ ++#ifndef DRM_VM_NOPAGE ++/** ++ * \c fault method for AGP virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Find the right map and if it's AGP memory find the real physical page to ++ * map, get the page, increment the use count and return it. ++ */ ++#if __OS_HAS_AGP ++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_map *map = NULL; ++ struct drm_map_list *r_list; ++ struct drm_hash_item *hash; ++ ++ /* ++ * Find the right map ++ */ ++ if (!drm_core_has_AGP(dev)) ++ goto vm_fault_error; ++ ++ if (!dev->agp || !dev->agp->cant_use_aperture) ++ goto vm_fault_error; ++ ++ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) ++ goto vm_fault_error; ++ ++ r_list = drm_hash_entry(hash, struct drm_map_list, hash); ++ map = r_list->map; ++ ++ if (map && map->type == _DRM_AGP) { ++ /* ++ * Using vm_pgoff as a selector forces us to use this unusual ++ * addressing scheme. ++ */ ++ unsigned long offset = (unsigned long)vmf->virtual_address - ++ vma->vm_start; ++ unsigned long baddr = map->offset + offset; ++ struct drm_agp_mem *agpmem; ++ struct page *page; ++ ++#ifdef __alpha__ ++ /* ++ * Adjust to a bus-relative address ++ */ ++ baddr -= dev->hose->mem_space->start; ++#endif ++ ++ /* ++ * It's AGP memory - find the real physical page to map ++ */ ++ list_for_each_entry(agpmem, &dev->agp->memory, head) { ++ if (agpmem->bound <= baddr && ++ agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) ++ break; ++ } ++ ++ if (!agpmem) ++ goto vm_fault_error; ++ ++ /* ++ * Get the page, inc the use count, and return it ++ */ ++ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; ++ page = virt_to_page(__va(agpmem->memory->memory[offset])); ++ get_page(page); ++ vmf->page = page; ++ ++ DRM_DEBUG ++ ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", ++ baddr, __va(agpmem->memory->memory[offset]), offset, ++ page_count(page)); ++ return 0; ++ } ++vm_fault_error: ++ return VM_FAULT_SIGBUS; /* Disallow mremap */ ++} ++#else /* __OS_HAS_AGP */ ++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ return VM_FAULT_SIGBUS; ++} ++#endif /* __OS_HAS_AGP */ ++ ++/** ++ * \c nopage method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Get the mapping, find the real physical page to map, get the page, and ++ * return it. ++ */ ++static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ unsigned long offset; ++ unsigned long i; ++ struct page *page; ++ ++ if (!map) ++ return VM_FAULT_SIGBUS; /* Nothing allocated */ ++ ++ offset = (unsigned long)vmf->virtual_address - vma->vm_start; ++ i = (unsigned long)map->handle + offset; ++ page = vmalloc_to_page((void *)i); ++ if (!page) ++ return VM_FAULT_SIGBUS; ++ get_page(page); ++ vmf->page = page; ++ ++ DRM_DEBUG("shm_fault 0x%lx\n", offset); ++ return 0; ++} ++#endif ++ ++/** ++ * \c close method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * ++ * Deletes map information if we are the last ++ * person to close a mapping and it's not in the global maplist. ++ */ ++static void drm_vm_shm_close(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_vma_entry *pt, *temp; ++ struct drm_map *map; ++ struct drm_map_list *r_list; ++ int found_maps = 0; ++ ++ DRM_DEBUG("0x%08lx,0x%08lx\n", ++ vma->vm_start, vma->vm_end - vma->vm_start); ++ atomic_dec(&dev->vma_count); ++ ++ map = vma->vm_private_data; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { ++ if (pt->vma->vm_private_data == map) ++ found_maps++; ++ if (pt->vma == vma) { ++ list_del(&pt->head); ++ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); ++ } ++ } ++ /* We were the only map that was found */ ++ if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { ++ /* Check to see if we are in the maplist, if we are not, then ++ * we delete this mappings information. ++ */ ++ found_maps = 0; ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map == map) ++ found_maps++; ++ } ++ ++ if (!found_maps) { ++ drm_dma_handle_t dmah; ++ ++ switch (map->type) { ++ case _DRM_REGISTERS: ++ case _DRM_FRAME_BUFFER: ++ if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { ++ int retcode; ++ retcode = mtrr_del(map->mtrr, ++ map->offset, ++ map->size); ++ DRM_DEBUG("mtrr_del = %d\n", retcode); ++ } ++ iounmap(map->handle); ++ break; ++ case _DRM_SHM: ++ vfree(map->handle); ++ break; ++ case _DRM_AGP: ++ case _DRM_SCATTER_GATHER: ++ break; ++ case _DRM_CONSISTENT: ++ dmah.vaddr = map->handle; ++ dmah.busaddr = map->offset; ++ dmah.size = map->size; ++ __drm_pci_free(dev, &dmah); ++ break; ++ case _DRM_TTM: ++ BUG_ON(1); ++ break; ++ } ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++#ifndef DRM_VM_NOPAGE ++/** ++ * \c fault method for DMA virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist. ++ */ ++static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_device_dma *dma = dev->dma; ++ unsigned long offset; ++ unsigned long page_nr; ++ struct page *page; ++ ++ if (!dma) ++ return VM_FAULT_SIGBUS; /* Error */ ++ if (!dma->pagelist) ++ return VM_FAULT_SIGBUS; /* Nothing allocated */ ++ ++ offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ ++ page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ ++ page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); ++ ++ get_page(page); ++ vmf->page = page; ++ ++ DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); ++ return 0; ++} ++ ++/** ++ * \c fault method for scatter-gather virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. ++ */ ++static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long offset; ++ unsigned long map_offset; ++ unsigned long page_offset; ++ struct page *page; ++ ++ if (!entry) ++ return VM_FAULT_SIGBUS; /* Error */ ++ if (!entry->pagelist) ++ return VM_FAULT_SIGBUS; /* Nothing allocated */ ++ ++ offset = (unsigned long)vmf->virtual_address - vma->vm_start; ++ map_offset = map->offset - (unsigned long)dev->sg->virtual; ++ page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); ++ page = entry->pagelist[page_offset]; ++ get_page(page); ++ vmf->page = page; ++ ++ return 0; ++} ++#endif ++ ++/** AGP virtual memory operations */ ++static struct vm_operations_struct drm_vm_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_nopage, ++#else ++ .fault = drm_do_vm_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_close, ++}; ++ ++/** Shared virtual memory operations */ ++static struct vm_operations_struct drm_vm_shm_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_shm_nopage, ++#else ++ .fault = drm_do_vm_shm_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_shm_close, ++}; ++ ++/** DMA virtual memory operations */ ++static struct vm_operations_struct drm_vm_dma_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_dma_nopage, ++#else ++ .fault = drm_do_vm_dma_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_close, ++}; ++ ++/** Scatter-gather virtual memory operations */ ++static struct vm_operations_struct drm_vm_sg_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_sg_nopage, ++#else ++ .fault = drm_do_vm_sg_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_close, ++}; ++ ++/** ++ * \c open method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * ++ * Create a new drm_vma_entry structure as the \p vma private data entry and ++ * add it to drm_device::vmalist. ++ */ ++static void drm_vm_open_locked(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_vma_entry *vma_entry; ++ ++ DRM_DEBUG("0x%08lx,0x%08lx\n", ++ vma->vm_start, vma->vm_end - vma->vm_start); ++ atomic_inc(&dev->vma_count); ++ ++ vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); ++ if (vma_entry) { ++ vma_entry->vma = vma; ++ vma_entry->pid = current->pid; ++ list_add(&vma_entry->head, &dev->vmalist); ++ } ++} ++ ++static void drm_vm_open(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_vm_open_locked(vma); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * \c close method for all virtual memory types. ++ * ++ * \param vma virtual memory area. ++ * ++ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and ++ * free it. ++ */ ++static void drm_vm_close(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_vma_entry *pt, *temp; ++ ++ DRM_DEBUG("0x%08lx,0x%08lx\n", ++ vma->vm_start, vma->vm_end - vma->vm_start); ++ atomic_dec(&dev->vma_count); ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { ++ if (pt->vma == vma) { ++ list_del(&pt->head); ++ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); ++ break; ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++ ++/** ++ * mmap DMA memory. ++ * ++ * \param file_priv DRM file private. ++ * \param vma virtual memory area. ++ * \return zero on success or a negative number on failure. ++ * ++ * Sets the virtual memory area operations structure to vm_dma_ops, the file ++ * pointer, and calls vm_open(). ++ */ ++static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev; ++ struct drm_device_dma *dma; ++ unsigned long length = vma->vm_end - vma->vm_start; ++ ++ dev = priv->minor->dev; ++ dma = dev->dma; ++ DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", ++ vma->vm_start, vma->vm_end, vma->vm_pgoff); ++ ++ /* Length must match exact page count */ ++ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { ++ return -EINVAL; ++ } ++ ++ if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) { ++ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); ++#if defined(__i386__) || defined(__x86_64__) ++ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; ++#else ++ /* Ye gads this is ugly. With more thought ++ we could move this up higher and use ++ `protection_map' instead. */ ++ vma->vm_page_prot = ++ __pgprot(pte_val ++ (pte_wrprotect ++ (__pte(pgprot_val(vma->vm_page_prot))))); ++#endif ++ } ++ ++ vma->vm_ops = &drm_vm_dma_ops; ++ vma->vm_flags |= VM_RESERVED; /* Don't swap */ ++ ++ vma->vm_file = filp; /* Needed for drm_vm_open() */ ++ drm_vm_open_locked(vma); ++ return 0; ++} ++ ++unsigned long drm_core_get_map_ofs(struct drm_map * map) ++{ ++ return map->offset; ++} ++EXPORT_SYMBOL(drm_core_get_map_ofs); ++ ++unsigned long drm_core_get_reg_ofs(struct drm_device *dev) ++{ ++#ifdef __alpha__ ++ return dev->hose->dense_mem_base - dev->hose->mem_space->start; ++#else ++ return 0; ++#endif ++} ++EXPORT_SYMBOL(drm_core_get_reg_ofs); ++ ++/** ++ * mmap DMA memory. ++ * ++ * \param file_priv DRM file private. ++ * \param vma virtual memory area. ++ * \return zero on success or a negative number on failure. ++ * ++ * If the virtual memory area has no offset associated with it then it's a DMA ++ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, ++ * checks that the restricted flag is not set, sets the virtual memory operations ++ * according to the mapping type and remaps the pages. Finally sets the file ++ * pointer and calls vm_open(). ++ */ ++static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_map *map = NULL; ++ unsigned long offset = 0; ++ struct drm_hash_item *hash; ++ ++ DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", ++ vma->vm_start, vma->vm_end, vma->vm_pgoff); ++ ++ if (!priv->authenticated) ++ return -EACCES; ++ ++ /* We check for "dma". On Apple's UniNorth, it's valid to have ++ * the AGP mapped at physical address 0 ++ * --BenH. ++ */ ++ ++ if (!vma->vm_pgoff ++#if __OS_HAS_AGP ++ && (!dev->agp ++ || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) ++#endif ++ ) ++ return drm_mmap_dma(filp, vma); ++ ++ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { ++ DRM_ERROR("Could not find map\n"); ++ return -EINVAL; ++ } ++ ++ map = drm_hash_entry(hash, struct drm_map_list, hash)->map; ++ if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) ++ return -EPERM; ++ ++ /* Check for valid size. */ ++ if (map->size < vma->vm_end - vma->vm_start) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { ++ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); ++#if defined(__i386__) || defined(__x86_64__) ++ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; ++#else ++ /* Ye gads this is ugly. With more thought ++ we could move this up higher and use ++ `protection_map' instead. */ ++ vma->vm_page_prot = ++ __pgprot(pte_val ++ (pte_wrprotect ++ (__pte(pgprot_val(vma->vm_page_prot))))); ++#endif ++ } ++ ++ switch (map->type) { ++ case _DRM_AGP: ++ if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { ++ /* ++ * On some platforms we can't talk to bus dma address from the CPU, so for ++ * memory of type DRM_AGP, we'll deal with sorting out the real physical ++ * pages and mappings in nopage() ++ */ ++#if defined(__powerpc__) ++ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; ++#endif ++ vma->vm_ops = &drm_vm_ops; ++ break; ++ } ++ /* fall through to _DRM_FRAME_BUFFER... */ ++ case _DRM_FRAME_BUFFER: ++ case _DRM_REGISTERS: ++ offset = dev->driver->get_reg_ofs(dev); ++ vma->vm_flags |= VM_IO; /* not in core dump */ ++ vma->vm_page_prot = drm_io_prot(map->type, vma); ++ if (io_remap_pfn_range(vma, vma->vm_start, ++ (map->offset + offset) >> PAGE_SHIFT, ++ vma->vm_end - vma->vm_start, ++ vma->vm_page_prot)) ++ return -EAGAIN; ++ DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," ++ " offset = 0x%lx\n", ++ map->type, ++ vma->vm_start, vma->vm_end, map->offset + offset); ++ vma->vm_ops = &drm_vm_ops; ++ break; ++ case _DRM_CONSISTENT: ++ /* Consistent memory is really like shared memory. But ++ * it's allocated in a different way, so avoid nopage */ ++ if (remap_pfn_range(vma, vma->vm_start, ++ page_to_pfn(virt_to_page(map->handle)), ++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) ++ return -EAGAIN; ++ vma->vm_page_prot = drm_dma_prot(map->type, vma); ++ /* fall through to _DRM_SHM */ ++ case _DRM_SHM: ++ vma->vm_ops = &drm_vm_shm_ops; ++ vma->vm_private_data = (void *)map; ++ /* Don't let this area swap. Change when ++ DRM_KERNEL advisory is supported. */ ++ vma->vm_flags |= VM_RESERVED; ++ break; ++ case _DRM_SCATTER_GATHER: ++ vma->vm_ops = &drm_vm_sg_ops; ++ vma->vm_private_data = (void *)map; ++ vma->vm_flags |= VM_RESERVED; ++ vma->vm_page_prot = drm_dma_prot(map->type, vma); ++ break; ++ case _DRM_TTM: ++ return drm_bo_mmap_locked(vma, filp, map); ++ default: ++ return -EINVAL; /* This should never happen. */ ++ } ++ vma->vm_flags |= VM_RESERVED; /* Don't swap */ ++ ++ vma->vm_file = filp; /* Needed for drm_vm_open() */ ++ drm_vm_open_locked(vma); ++ return 0; ++} ++ ++int drm_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_mmap_locked(filp, vma); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_mmap); ++ ++/** ++ * buffer object vm functions. ++ */ ++ ++/** ++ * \c Pagefault method for buffer objects. ++ * ++ * \param vma Virtual memory area. ++ * \param vmf vm fault data ++ * \return Error or VM_FAULT_NOPAGE:. The pfn is manually inserted. ++ * ++ * It's important that pfns are inserted while holding the bo->mutex lock. ++ * otherwise we might race with unmap_mapping_range() which is always ++ * called with the bo->mutex lock held. ++ * ++ * We're modifying the page attribute bits of the vma->vm_page_prot field, ++ * without holding the mmap_sem in write mode. Only in read mode. ++ * These bits are not used by the mm subsystem code, and we consider them ++ * protected by the bo->mutex lock. ++ */ ++ ++#if defined(DRM_FULL_MM_COMPAT) && !defined(DRM_NO_FAULT) ++static int drm_bo_vm_fault(struct vm_area_struct *vma, ++ struct vm_fault *vmf) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page = NULL; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ unsigned long pfn; ++ int err; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long ret = VM_FAULT_NOPAGE; ++ ++ dev = bo->dev; ++ err = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (err) ++ return VM_FAULT_NOPAGE; ++ ++ err = mutex_lock_interruptible(&bo->mutex); ++ if (err) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return VM_FAULT_NOPAGE; ++ } ++ ++ err = drm_bo_wait(bo, 0, 1, 0, 1); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ goto out_unlock; ++ } ++ ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ /* ++ * If buffer happens to be in a non-mappable location, ++ * move it to a mappable. ++ */ ++ ++ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { ++ uint32_t new_flags = bo->mem.proposed_flags | ++ DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_FORCE_MAPPABLE; ++ err = drm_bo_move_buffer(bo, new_flags, 0, 0); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; ++ goto out_unlock; ++ } ++ } ++ ++ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, ++ &bus_size); ++ ++ if (err) { ++ ret = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; ++ ++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; ++ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); ++ } else { ++ ttm = bo->ttm; ++ ++ drm_ttm_fixup_caching(ttm); ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ ret = VM_FAULT_OOM; ++ goto out_unlock; ++ } ++ pfn = page_to_pfn(page); ++ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? ++ vm_get_page_prot(vma->vm_flags) : ++ drm_io_prot(_DRM_TTM, vma); ++ } ++ ++ err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE; ++ goto out_unlock; ++ } ++out_unlock: ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ mutex_unlock(&bo->mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return ret; ++} ++#endif ++ ++static void drm_bo_vm_open_locked(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ ++ drm_vm_open_locked(vma); ++ atomic_inc(&bo->usage); ++#ifdef DRM_ODD_MM_COMPAT ++ drm_bo_add_vma(bo, vma); ++#endif ++} ++ ++/** ++ * \c vma open method for buffer objects. ++ * ++ * \param vma virtual memory area. ++ */ ++ ++static void drm_bo_vm_open(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ struct drm_device *dev = bo->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_vm_open_locked(vma); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * \c vma close method for buffer objects. ++ * ++ * \param vma virtual memory area. ++ */ ++ ++static void drm_bo_vm_close(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ struct drm_device *dev = bo->dev; ++ ++ drm_vm_close(vma); ++ if (bo) { ++ mutex_lock(&dev->struct_mutex); ++#ifdef DRM_ODD_MM_COMPAT ++ drm_bo_delete_vma(bo, vma); ++#endif ++ drm_bo_usage_deref_locked((struct drm_buffer_object **) ++ &vma->vm_private_data); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ return; ++} ++ ++static struct vm_operations_struct drm_bo_vm_ops = { ++#ifdef DRM_FULL_MM_COMPAT ++#ifdef DRM_NO_FAULT ++ .nopfn = drm_bo_vm_nopfn, ++#else ++ .fault = drm_bo_vm_fault, ++#endif ++#else ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) ++ .nopfn = drm_bo_vm_nopfn, ++#else ++ .nopage = drm_bo_vm_nopage, ++#endif ++#endif ++ .open = drm_bo_vm_open, ++ .close = drm_bo_vm_close, ++}; ++ ++/** ++ * mmap buffer object memory. ++ * ++ * \param vma virtual memory area. ++ * \param file_priv DRM file private. ++ * \param map The buffer object drm map. ++ * \return zero on success or a negative number on failure. ++ */ ++ ++int drm_bo_mmap_locked(struct vm_area_struct *vma, ++ struct file *filp, ++ drm_local_map_t *map) ++{ ++ vma->vm_ops = &drm_bo_vm_ops; ++ vma->vm_private_data = map->handle; ++ vma->vm_file = filp; ++ vma->vm_flags |= VM_RESERVED | VM_IO; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) ++ vma->vm_flags |= VM_PFNMAP; ++#endif ++ drm_bo_vm_open_locked(vma); ++#ifdef DRM_ODD_MM_COMPAT ++ drm_bo_map_bound(vma); ++#endif ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,267 @@ ++/** ++ * \file drm_vm.c ++ * Memory mapping for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#ifdef DRM_VM_NOPAGE ++/** ++ * \c nopage method for AGP virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Find the right map and if it's AGP memory find the real physical page to ++ * map, get the page, increment the use count and return it. ++ */ ++#if __OS_HAS_AGP ++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_map *map = NULL; ++ struct drm_map_list *r_list; ++ struct drm_hash_item *hash; ++ ++ /* ++ * Find the right map ++ */ ++ if (!drm_core_has_AGP(dev)) ++ goto vm_nopage_error; ++ ++ if (!dev->agp || !dev->agp->cant_use_aperture) ++ goto vm_nopage_error; ++ ++ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) ++ goto vm_nopage_error; ++ ++ r_list = drm_hash_entry(hash, struct drm_map_list, hash); ++ map = r_list->map; ++ ++ if (map && map->type == _DRM_AGP) { ++ unsigned long offset = address - vma->vm_start; ++ unsigned long baddr = map->offset + offset; ++ struct drm_agp_mem *agpmem; ++ struct page *page; ++ ++#ifdef __alpha__ ++ /* ++ * Adjust to a bus-relative address ++ */ ++ baddr -= dev->hose->mem_space->start; ++#endif ++ ++ /* ++ * It's AGP memory - find the real physical page to map ++ */ ++ list_for_each_entry(agpmem, &dev->agp->memory, head) { ++ if (agpmem->bound <= baddr && ++ agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) ++ break; ++ } ++ ++ if (!agpmem) ++ goto vm_nopage_error; ++ ++ /* ++ * Get the page, inc the use count, and return it ++ */ ++ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; ++ page = virt_to_page(__va(agpmem->memory->memory[offset])); ++ get_page(page); ++ ++#if 0 ++ /* page_count() not defined everywhere */ ++ DRM_DEBUG ++ ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", ++ baddr, __va(agpmem->memory->memory[offset]), offset, ++ page_count(page)); ++#endif ++ ++ return page; ++ } ++ vm_nopage_error: ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++} ++#else /* __OS_HAS_AGP */ ++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ return NOPAGE_SIGBUS; ++} ++#endif /* __OS_HAS_AGP */ ++ ++/** ++ * \c nopage method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Get the mapping, find the real physical page to map, get the page, and ++ * return it. ++ */ ++static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ unsigned long offset; ++ unsigned long i; ++ struct page *page; ++ ++ if (address > vma->vm_end) ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++ if (!map) ++ return NOPAGE_SIGBUS; /* Nothing allocated */ ++ ++ offset = address - vma->vm_start; ++ i = (unsigned long)map->handle + offset; ++ page = vmalloc_to_page((void *)i); ++ if (!page) ++ return NOPAGE_SIGBUS; ++ get_page(page); ++ ++ DRM_DEBUG("0x%lx\n", address); ++ return page; ++} ++ ++/** ++ * \c nopage method for DMA virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist. ++ */ ++static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_device_dma *dma = dev->dma; ++ unsigned long offset; ++ unsigned long page_nr; ++ struct page *page; ++ ++ if (!dma) ++ return NOPAGE_SIGBUS; /* Error */ ++ if (address > vma->vm_end) ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++ if (!dma->pagelist) ++ return NOPAGE_SIGBUS; /* Nothing allocated */ ++ ++ offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ ++ page_nr = offset >> PAGE_SHIFT; ++ page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); ++ ++ get_page(page); ++ ++ DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); ++ return page; ++} ++ ++/** ++ * \c nopage method for scatter-gather virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. ++ */ ++static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long offset; ++ unsigned long map_offset; ++ unsigned long page_offset; ++ struct page *page; ++ ++ DRM_DEBUG("\n"); ++ if (!entry) ++ return NOPAGE_SIGBUS; /* Error */ ++ if (address > vma->vm_end) ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++ if (!entry->pagelist) ++ return NOPAGE_SIGBUS; /* Nothing allocated */ ++ ++ offset = address - vma->vm_start; ++ map_offset = map->offset - (unsigned long)dev->sg->virtual; ++ page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); ++ page = entry->pagelist[page_offset]; ++ get_page(page); ++ ++ return page; ++} ++ ++ ++struct page *drm_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_nopage(vma, address); ++} ++ ++struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_shm_nopage(vma, address); ++} ++ ++struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_dma_nopage(vma, address); ++} ++ ++struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_sg_nopage(vma, address); ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ffb_context.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ffb_context.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ffb_context.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ffb_context.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,582 @@ ++/* $Id$ ++ * ffb_context.c: Creator/Creator3D DRI/DRM context switching. ++ * ++ * Copyright (C) 2000 David S. Miller (davem@redhat.com) ++ * ++ * Almost entirely stolen from tdfx_context.c, see there ++ * for authors. ++ */ ++ ++#include ++#include ++ ++#include "drmP.h" ++#include "ffb_drv.h" ++ ++static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) { ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ int i; ++ ++ for (i = 0; i < FFB_MAX_CTXS; i++) { ++ if (fpriv->hw_state[i] == NULL) ++ break; ++ } ++ if (i == FFB_MAX_CTXS) ++ return -1; ++ ++ fpriv->hw_state[i] = kmalloc(sizeof(struct ffb_hw_context), GFP_KERNEL); ++ if (fpriv->hw_state[i] == NULL) ++ return -1; ++ ++ fpriv->hw_state[i]->is_2d_only = is_2d_only; ++ ++ /* Plus one because 0 is the special DRM_KERNEL_CONTEXT. */ ++ return i + 1; ++} ++ ++static void ffb_save_context(ffb_dev_priv_t * fpriv, int idx) ++{ ++ ffb_fbcPtr ffb = fpriv->regs; ++ struct ffb_hw_context *ctx; ++ int i; ++ ++ ctx = fpriv->hw_state[idx - 1]; ++ if (idx == 0 || ctx == NULL) ++ return; ++ ++ if (ctx->is_2d_only) { ++ /* 2D applications only care about certain pieces ++ * of state. ++ */ ++ ctx->drawop = upa_readl(&ffb->drawop); ++ ctx->ppc = upa_readl(&ffb->ppc); ++ ctx->wid = upa_readl(&ffb->wid); ++ ctx->fg = upa_readl(&ffb->fg); ++ ctx->bg = upa_readl(&ffb->bg); ++ ctx->xclip = upa_readl(&ffb->xclip); ++ ctx->fbc = upa_readl(&ffb->fbc); ++ ctx->rop = upa_readl(&ffb->rop); ++ ctx->cmp = upa_readl(&ffb->cmp); ++ ctx->matchab = upa_readl(&ffb->matchab); ++ ctx->magnab = upa_readl(&ffb->magnab); ++ ctx->pmask = upa_readl(&ffb->pmask); ++ ctx->xpmask = upa_readl(&ffb->xpmask); ++ ctx->lpat = upa_readl(&ffb->lpat); ++ ctx->fontxy = upa_readl(&ffb->fontxy); ++ ctx->fontw = upa_readl(&ffb->fontw); ++ ctx->fontinc = upa_readl(&ffb->fontinc); ++ ++ /* stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ ctx->stencil = upa_readl(&ffb->stencil); ++ ctx->stencilctl = upa_readl(&ffb->stencilctl); ++ } ++ ++ for (i = 0; i < 32; i++) ++ ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]); ++ ctx->ucsr = upa_readl(&ffb->ucsr); ++ return; ++ } ++ ++ /* Fetch drawop. */ ++ ctx->drawop = upa_readl(&ffb->drawop); ++ ++ /* If we were saving the vertex registers, this is where ++ * we would do it. We would save 32 32-bit words starting ++ * at ffb->suvtx. ++ */ ++ ++ /* Capture rendering attributes. */ ++ ++ ctx->ppc = upa_readl(&ffb->ppc); /* Pixel Processor Control */ ++ ctx->wid = upa_readl(&ffb->wid); /* Current WID */ ++ ctx->fg = upa_readl(&ffb->fg); /* Constant FG color */ ++ ctx->bg = upa_readl(&ffb->bg); /* Constant BG color */ ++ ctx->consty = upa_readl(&ffb->consty); /* Constant Y */ ++ ctx->constz = upa_readl(&ffb->constz); /* Constant Z */ ++ ctx->xclip = upa_readl(&ffb->xclip); /* X plane clip */ ++ ctx->dcss = upa_readl(&ffb->dcss); /* Depth Cue Scale Slope */ ++ ctx->vclipmin = upa_readl(&ffb->vclipmin); /* Primary XY clip, minimum */ ++ ctx->vclipmax = upa_readl(&ffb->vclipmax); /* Primary XY clip, maximum */ ++ ctx->vclipzmin = upa_readl(&ffb->vclipzmin); /* Primary Z clip, minimum */ ++ ctx->vclipzmax = upa_readl(&ffb->vclipzmax); /* Primary Z clip, maximum */ ++ ctx->dcsf = upa_readl(&ffb->dcsf); /* Depth Cue Scale Front Bound */ ++ ctx->dcsb = upa_readl(&ffb->dcsb); /* Depth Cue Scale Back Bound */ ++ ctx->dczf = upa_readl(&ffb->dczf); /* Depth Cue Scale Z Front */ ++ ctx->dczb = upa_readl(&ffb->dczb); /* Depth Cue Scale Z Back */ ++ ctx->blendc = upa_readl(&ffb->blendc); /* Alpha Blend Control */ ++ ctx->blendc1 = upa_readl(&ffb->blendc1); /* Alpha Blend Color 1 */ ++ ctx->blendc2 = upa_readl(&ffb->blendc2); /* Alpha Blend Color 2 */ ++ ctx->fbc = upa_readl(&ffb->fbc); /* Frame Buffer Control */ ++ ctx->rop = upa_readl(&ffb->rop); /* Raster Operation */ ++ ctx->cmp = upa_readl(&ffb->cmp); /* Compare Controls */ ++ ctx->matchab = upa_readl(&ffb->matchab); /* Buffer A/B Match Ops */ ++ ctx->matchc = upa_readl(&ffb->matchc); /* Buffer C Match Ops */ ++ ctx->magnab = upa_readl(&ffb->magnab); /* Buffer A/B Magnitude Ops */ ++ ctx->magnc = upa_readl(&ffb->magnc); /* Buffer C Magnitude Ops */ ++ ctx->pmask = upa_readl(&ffb->pmask); /* RGB Plane Mask */ ++ ctx->xpmask = upa_readl(&ffb->xpmask); /* X Plane Mask */ ++ ctx->ypmask = upa_readl(&ffb->ypmask); /* Y Plane Mask */ ++ ctx->zpmask = upa_readl(&ffb->zpmask); /* Z Plane Mask */ ++ ++ /* Auxiliary Clips. */ ++ ctx->auxclip0min = upa_readl(&ffb->auxclip[0].min); ++ ctx->auxclip0max = upa_readl(&ffb->auxclip[0].max); ++ ctx->auxclip1min = upa_readl(&ffb->auxclip[1].min); ++ ctx->auxclip1max = upa_readl(&ffb->auxclip[1].max); ++ ctx->auxclip2min = upa_readl(&ffb->auxclip[2].min); ++ ctx->auxclip2max = upa_readl(&ffb->auxclip[2].max); ++ ctx->auxclip3min = upa_readl(&ffb->auxclip[3].min); ++ ctx->auxclip3max = upa_readl(&ffb->auxclip[3].max); ++ ++ ctx->lpat = upa_readl(&ffb->lpat); /* Line Pattern */ ++ ctx->fontxy = upa_readl(&ffb->fontxy); /* XY Font Coordinate */ ++ ctx->fontw = upa_readl(&ffb->fontw); /* Font Width */ ++ ctx->fontinc = upa_readl(&ffb->fontinc); /* Font X/Y Increment */ ++ ++ /* These registers/features only exist on FFB2 and later chips. */ ++ if (fpriv->ffb_type >= ffb2_prototype) { ++ ctx->dcss1 = upa_readl(&ffb->dcss1); /* Depth Cue Scale Slope 1 */ ++ ctx->dcss2 = upa_readl(&ffb->dcss2); /* Depth Cue Scale Slope 2 */ ++ ctx->dcss2 = upa_readl(&ffb->dcss3); /* Depth Cue Scale Slope 3 */ ++ ctx->dcs2 = upa_readl(&ffb->dcs2); /* Depth Cue Scale 2 */ ++ ctx->dcs3 = upa_readl(&ffb->dcs3); /* Depth Cue Scale 3 */ ++ ctx->dcs4 = upa_readl(&ffb->dcs4); /* Depth Cue Scale 4 */ ++ ctx->dcd2 = upa_readl(&ffb->dcd2); /* Depth Cue Depth 2 */ ++ ctx->dcd3 = upa_readl(&ffb->dcd3); /* Depth Cue Depth 3 */ ++ ctx->dcd4 = upa_readl(&ffb->dcd4); /* Depth Cue Depth 4 */ ++ ++ /* And stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ ctx->stencil = upa_readl(&ffb->stencil); ++ ctx->stencilctl = upa_readl(&ffb->stencilctl); ++ } ++ } ++ ++ /* Save the 32x32 area pattern. */ ++ for (i = 0; i < 32; i++) ++ ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]); ++ ++ /* Finally, stash away the User Constol/Status Register. */ ++ ctx->ucsr = upa_readl(&ffb->ucsr); ++} ++ ++static void ffb_restore_context(ffb_dev_priv_t * fpriv, int old, int idx) ++{ ++ ffb_fbcPtr ffb = fpriv->regs; ++ struct ffb_hw_context *ctx; ++ int i; ++ ++ ctx = fpriv->hw_state[idx - 1]; ++ if (idx == 0 || ctx == NULL) ++ return; ++ ++ if (ctx->is_2d_only) { ++ /* 2D applications only care about certain pieces ++ * of state. ++ */ ++ upa_writel(ctx->drawop, &ffb->drawop); ++ ++ /* If we were restoring the vertex registers, this is where ++ * we would do it. We would restore 32 32-bit words starting ++ * at ffb->suvtx. ++ */ ++ ++ upa_writel(ctx->ppc, &ffb->ppc); ++ upa_writel(ctx->wid, &ffb->wid); ++ upa_writel(ctx->fg, &ffb->fg); ++ upa_writel(ctx->bg, &ffb->bg); ++ upa_writel(ctx->xclip, &ffb->xclip); ++ upa_writel(ctx->fbc, &ffb->fbc); ++ upa_writel(ctx->rop, &ffb->rop); ++ upa_writel(ctx->cmp, &ffb->cmp); ++ upa_writel(ctx->matchab, &ffb->matchab); ++ upa_writel(ctx->magnab, &ffb->magnab); ++ upa_writel(ctx->pmask, &ffb->pmask); ++ upa_writel(ctx->xpmask, &ffb->xpmask); ++ upa_writel(ctx->lpat, &ffb->lpat); ++ upa_writel(ctx->fontxy, &ffb->fontxy); ++ upa_writel(ctx->fontw, &ffb->fontw); ++ upa_writel(ctx->fontinc, &ffb->fontinc); ++ ++ /* stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ upa_writel(ctx->stencil, &ffb->stencil); ++ upa_writel(ctx->stencilctl, &ffb->stencilctl); ++ upa_writel(0x80000000, &ffb->fbc); ++ upa_writel((ctx->stencilctl | 0x80000), ++ &ffb->rawstencilctl); ++ upa_writel(ctx->fbc, &ffb->fbc); ++ } ++ ++ for (i = 0; i < 32; i++) ++ upa_writel(ctx->area_pattern[i], &ffb->pattern[i]); ++ upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr); ++ return; ++ } ++ ++ /* Restore drawop. */ ++ upa_writel(ctx->drawop, &ffb->drawop); ++ ++ /* If we were restoring the vertex registers, this is where ++ * we would do it. We would restore 32 32-bit words starting ++ * at ffb->suvtx. ++ */ ++ ++ /* Restore rendering attributes. */ ++ ++ upa_writel(ctx->ppc, &ffb->ppc); /* Pixel Processor Control */ ++ upa_writel(ctx->wid, &ffb->wid); /* Current WID */ ++ upa_writel(ctx->fg, &ffb->fg); /* Constant FG color */ ++ upa_writel(ctx->bg, &ffb->bg); /* Constant BG color */ ++ upa_writel(ctx->consty, &ffb->consty); /* Constant Y */ ++ upa_writel(ctx->constz, &ffb->constz); /* Constant Z */ ++ upa_writel(ctx->xclip, &ffb->xclip); /* X plane clip */ ++ upa_writel(ctx->dcss, &ffb->dcss); /* Depth Cue Scale Slope */ ++ upa_writel(ctx->vclipmin, &ffb->vclipmin); /* Primary XY clip, minimum */ ++ upa_writel(ctx->vclipmax, &ffb->vclipmax); /* Primary XY clip, maximum */ ++ upa_writel(ctx->vclipzmin, &ffb->vclipzmin); /* Primary Z clip, minimum */ ++ upa_writel(ctx->vclipzmax, &ffb->vclipzmax); /* Primary Z clip, maximum */ ++ upa_writel(ctx->dcsf, &ffb->dcsf); /* Depth Cue Scale Front Bound */ ++ upa_writel(ctx->dcsb, &ffb->dcsb); /* Depth Cue Scale Back Bound */ ++ upa_writel(ctx->dczf, &ffb->dczf); /* Depth Cue Scale Z Front */ ++ upa_writel(ctx->dczb, &ffb->dczb); /* Depth Cue Scale Z Back */ ++ upa_writel(ctx->blendc, &ffb->blendc); /* Alpha Blend Control */ ++ upa_writel(ctx->blendc1, &ffb->blendc1); /* Alpha Blend Color 1 */ ++ upa_writel(ctx->blendc2, &ffb->blendc2); /* Alpha Blend Color 2 */ ++ upa_writel(ctx->fbc, &ffb->fbc); /* Frame Buffer Control */ ++ upa_writel(ctx->rop, &ffb->rop); /* Raster Operation */ ++ upa_writel(ctx->cmp, &ffb->cmp); /* Compare Controls */ ++ upa_writel(ctx->matchab, &ffb->matchab); /* Buffer A/B Match Ops */ ++ upa_writel(ctx->matchc, &ffb->matchc); /* Buffer C Match Ops */ ++ upa_writel(ctx->magnab, &ffb->magnab); /* Buffer A/B Magnitude Ops */ ++ upa_writel(ctx->magnc, &ffb->magnc); /* Buffer C Magnitude Ops */ ++ upa_writel(ctx->pmask, &ffb->pmask); /* RGB Plane Mask */ ++ upa_writel(ctx->xpmask, &ffb->xpmask); /* X Plane Mask */ ++ upa_writel(ctx->ypmask, &ffb->ypmask); /* Y Plane Mask */ ++ upa_writel(ctx->zpmask, &ffb->zpmask); /* Z Plane Mask */ ++ ++ /* Auxiliary Clips. */ ++ upa_writel(ctx->auxclip0min, &ffb->auxclip[0].min); ++ upa_writel(ctx->auxclip0max, &ffb->auxclip[0].max); ++ upa_writel(ctx->auxclip1min, &ffb->auxclip[1].min); ++ upa_writel(ctx->auxclip1max, &ffb->auxclip[1].max); ++ upa_writel(ctx->auxclip2min, &ffb->auxclip[2].min); ++ upa_writel(ctx->auxclip2max, &ffb->auxclip[2].max); ++ upa_writel(ctx->auxclip3min, &ffb->auxclip[3].min); ++ upa_writel(ctx->auxclip3max, &ffb->auxclip[3].max); ++ ++ upa_writel(ctx->lpat, &ffb->lpat); /* Line Pattern */ ++ upa_writel(ctx->fontxy, &ffb->fontxy); /* XY Font Coordinate */ ++ upa_writel(ctx->fontw, &ffb->fontw); /* Font Width */ ++ upa_writel(ctx->fontinc, &ffb->fontinc); /* Font X/Y Increment */ ++ ++ /* These registers/features only exist on FFB2 and later chips. */ ++ if (fpriv->ffb_type >= ffb2_prototype) { ++ upa_writel(ctx->dcss1, &ffb->dcss1); /* Depth Cue Scale Slope 1 */ ++ upa_writel(ctx->dcss2, &ffb->dcss2); /* Depth Cue Scale Slope 2 */ ++ upa_writel(ctx->dcss3, &ffb->dcss2); /* Depth Cue Scale Slope 3 */ ++ upa_writel(ctx->dcs2, &ffb->dcs2); /* Depth Cue Scale 2 */ ++ upa_writel(ctx->dcs3, &ffb->dcs3); /* Depth Cue Scale 3 */ ++ upa_writel(ctx->dcs4, &ffb->dcs4); /* Depth Cue Scale 4 */ ++ upa_writel(ctx->dcd2, &ffb->dcd2); /* Depth Cue Depth 2 */ ++ upa_writel(ctx->dcd3, &ffb->dcd3); /* Depth Cue Depth 3 */ ++ upa_writel(ctx->dcd4, &ffb->dcd4); /* Depth Cue Depth 4 */ ++ ++ /* And stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ /* Unfortunately, there is a hardware bug on ++ * the FFB2+ chips which prevents a normal write ++ * to the stencil control register from working ++ * as it should. ++ * ++ * The state controlled by the FFB stencilctl register ++ * really gets transferred to the per-buffer instances ++ * of the stencilctl register in the 3DRAM chips. ++ * ++ * The bug is that FFB does not update buffer C correctly, ++ * so we have to do it by hand for them. ++ */ ++ ++ /* This will update buffers A and B. */ ++ upa_writel(ctx->stencil, &ffb->stencil); ++ upa_writel(ctx->stencilctl, &ffb->stencilctl); ++ ++ /* Force FFB to use buffer C 3dram regs. */ ++ upa_writel(0x80000000, &ffb->fbc); ++ upa_writel((ctx->stencilctl | 0x80000), ++ &ffb->rawstencilctl); ++ ++ /* Now restore the correct FBC controls. */ ++ upa_writel(ctx->fbc, &ffb->fbc); ++ } ++ } ++ ++ /* Restore the 32x32 area pattern. */ ++ for (i = 0; i < 32; i++) ++ upa_writel(ctx->area_pattern[i], &ffb->pattern[i]); ++ ++ /* Finally, stash away the User Constol/Status Register. ++ * The only state we really preserve here is the picking ++ * control. ++ */ ++ upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr); ++} ++ ++#define FFB_UCSR_FB_BUSY 0x01000000 ++#define FFB_UCSR_RP_BUSY 0x02000000 ++#define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY) ++ ++static void FFBWait(ffb_fbcPtr ffb) ++{ ++ int limit = 100000; ++ ++ do { ++ u32 regval = upa_readl(&ffb->ucsr); ++ ++ if ((regval & FFB_UCSR_ALL_BUSY) == 0) ++ break; ++ } while (--limit); ++} ++ ++int ffb_context_switch(struct drm_device * dev, int old, int new) { ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ ++#if DRM_DMA_HISTOGRAM ++ dev->ctx_start = get_cycles(); ++#endif ++ ++ DRM_DEBUG("Context switch from %d to %d\n", old, new); ++ ++ if (new == dev->last_context || dev->last_context == 0) { ++ dev->last_context = new; ++ return 0; ++ } ++ ++ FFBWait(fpriv->regs); ++ ffb_save_context(fpriv, old); ++ ffb_restore_context(fpriv, old, new); ++ FFBWait(fpriv->regs); ++ ++ dev->last_context = new; ++ ++ return 0; ++} ++ ++int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_ctx_res_t res; ++ drm_ctx_t ctx; ++ int i; ++ ++ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); ++ if (copy_from_user(&res, (drm_ctx_res_t __user *) arg, sizeof(res))) ++ return -EFAULT; ++ if (res.count >= DRM_RESERVED_CONTEXTS) { ++ memset(&ctx, 0, sizeof(ctx)); ++ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ++ ctx.handle = i; ++ if (copy_to_user(&res.contexts[i], &i, sizeof(i))) ++ return -EFAULT; ++ } ++ } ++ res.count = DRM_RESERVED_CONTEXTS; ++ if (copy_to_user((drm_ctx_res_t __user *) arg, &res, sizeof(res))) ++ return -EFAULT; ++ return 0; ++} ++ ++int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ drm_ctx_t ctx; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ idx = ffb_alloc_queue(dev, (ctx.flags & _DRM_CONTEXT_2DONLY)); ++ if (idx < 0) ++ return -ENFILE; ++ ++ DRM_DEBUG("%d\n", ctx.handle); ++ ctx.handle = idx; ++ if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx))) ++ return -EFAULT; ++ return 0; ++} ++ ++int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ struct ffb_hw_context *hwctx; ++ drm_ctx_t ctx; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ ++ idx = ctx.handle; ++ if (idx <= 0 || idx >= FFB_MAX_CTXS) ++ return -EINVAL; ++ ++ hwctx = fpriv->hw_state[idx - 1]; ++ if (hwctx == NULL) ++ return -EINVAL; ++ ++ if ((ctx.flags & _DRM_CONTEXT_2DONLY) == 0) ++ hwctx->is_2d_only = 0; ++ else ++ hwctx->is_2d_only = 1; ++ ++ return 0; ++} ++ ++int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ struct ffb_hw_context *hwctx; ++ drm_ctx_t ctx; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ ++ idx = ctx.handle; ++ if (idx <= 0 || idx >= FFB_MAX_CTXS) ++ return -EINVAL; ++ ++ hwctx = fpriv->hw_state[idx - 1]; ++ if (hwctx == NULL) ++ return -EINVAL; ++ ++ if (hwctx->is_2d_only != 0) ++ ctx.flags = _DRM_CONTEXT_2DONLY; ++ else ++ ctx.flags = 0; ++ ++ if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ drm_ctx_t ctx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ DRM_DEBUG("%d\n", ctx.handle); ++ return ffb_context_switch(dev, dev->last_context, ctx.handle); ++} ++ ++int ffb_newctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_ctx_t ctx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ DRM_DEBUG("%d\n", ctx.handle); ++ ++ return 0; ++} ++ ++int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_ctx_t ctx; ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ DRM_DEBUG("%d\n", ctx.handle); ++ ++ idx = ctx.handle - 1; ++ if (idx < 0 || idx >= FFB_MAX_CTXS) ++ return -EINVAL; ++ ++ if (fpriv->hw_state[idx] != NULL) { ++ kfree(fpriv->hw_state[idx]); ++ fpriv->hw_state[idx] = NULL; ++ } ++ return 0; ++} ++ ++static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev) ++{ ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); ++ int idx; ++ ++ idx = context - 1; ++ if (fpriv && ++ context != DRM_KERNEL_CONTEXT && fpriv->hw_state[idx] != NULL) { ++ kfree(fpriv->hw_state[idx]); ++ fpriv->hw_state[idx] = NULL; ++ } ++} ++ ++static void ffb_driver_lastclose(struct drm_device * dev) ++{ ++ if (dev->dev_private) ++ kfree(dev->dev_private); ++} ++ ++static void ffb_driver_unload(struct drm_device * dev) ++{ ++ if (ffb_position != NULL) ++ kfree(ffb_position); ++} ++ ++static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev) ++{ ++ dev->lock.filp = 0; ++ { ++ __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; ++ unsigned int old, new, prev, ctx; ++ ++ ctx = lock.context; ++ do { ++ old = *plock; ++ new = ctx; ++ prev = cmpxchg(plock, old, new); ++ } while (prev != old); ++ } ++ wake_up_interruptible(&dev->lock.lock_queue); ++} ++ ++unsigned long ffb_driver_get_map_ofs(drm_map_t * map) ++{ ++ return (map->offset & 0xffffffff); ++} ++ ++unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev) ++{ ++ ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private; ++ ++ if (ffb_priv) ++ return ffb_priv->card_phys_base; ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ffb_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ffb_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ffb_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ffb_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,329 @@ ++/* $Id$ ++ * ffb_drv.c: Creator/Creator3D direct rendering driver. ++ * ++ * Copyright (C) 2000 David S. Miller (davem@redhat.com) ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "drmP.h" ++#include "ffb_drv.h" ++ ++#define DRIVER_AUTHOR "David S. Miller" ++ ++#define DRIVER_NAME "ffb" ++#define DRIVER_DESC "Creator/Creator3D" ++#define DRIVER_DATE "20000517" ++ ++#define DRIVER_MAJOR 0 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 1 ++ ++typedef struct _ffb_position_t { ++ int node; ++ int root; ++} ffb_position_t; ++ ++static ffb_position_t *ffb_position; ++ ++static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance) ++{ ++ volatile unsigned char *strap_bits; ++ unsigned char val; ++ ++ strap_bits = (volatile unsigned char *) ++ (ffb_priv->card_phys_base + 0x00200000UL); ++ ++ /* Don't ask, you have to read the value twice for whatever ++ * reason to get correct contents. ++ */ ++ val = upa_readb(strap_bits); ++ val = upa_readb(strap_bits); ++ switch (val & 0x78) { ++ case (0x0 << 5) | (0x0 << 3): ++ ffb_priv->ffb_type = ffb1_prototype; ++ printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance); ++ break; ++ case (0x0 << 5) | (0x1 << 3): ++ ffb_priv->ffb_type = ffb1_standard; ++ printk("ffb%d: Detected FFB1\n", instance); ++ break; ++ case (0x0 << 5) | (0x3 << 3): ++ ffb_priv->ffb_type = ffb1_speedsort; ++ printk("ffb%d: Detected FFB1-SpeedSort\n", instance); ++ break; ++ case (0x1 << 5) | (0x0 << 3): ++ ffb_priv->ffb_type = ffb2_prototype; ++ printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance); ++ break; ++ case (0x1 << 5) | (0x1 << 3): ++ ffb_priv->ffb_type = ffb2_vertical; ++ printk("ffb%d: Detected FFB2/vertical\n", instance); ++ break; ++ case (0x1 << 5) | (0x2 << 3): ++ ffb_priv->ffb_type = ffb2_vertical_plus; ++ printk("ffb%d: Detected FFB2+/vertical\n", instance); ++ break; ++ case (0x2 << 5) | (0x0 << 3): ++ ffb_priv->ffb_type = ffb2_horizontal; ++ printk("ffb%d: Detected FFB2/horizontal\n", instance); ++ break; ++ case (0x2 << 5) | (0x2 << 3): ++ ffb_priv->ffb_type = ffb2_horizontal; ++ printk("ffb%d: Detected FFB2+/horizontal\n", instance); ++ break; ++ default: ++ ffb_priv->ffb_type = ffb2_vertical; ++ printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val); ++ break; ++ }; ++} ++ ++static void ffb_apply_upa_parent_ranges(int parent, ++ struct linux_prom64_registers *regs) ++{ ++ struct linux_prom64_ranges ranges[PROMREG_MAX]; ++ char name[128]; ++ int len, i; ++ ++ prom_getproperty(parent, "name", name, sizeof(name)); ++ if (strcmp(name, "upa") != 0) ++ return; ++ ++ len = prom_getproperty(parent, "ranges", (void *) ranges, sizeof(ranges)); ++ if (len <= 0) ++ return; ++ ++ len /= sizeof(struct linux_prom64_ranges); ++ for (i = 0; i < len; i++) { ++ struct linux_prom64_ranges *rng = &ranges[i]; ++ u64 phys_addr = regs->phys_addr; ++ ++ if (phys_addr >= rng->ot_child_base && ++ phys_addr < (rng->ot_child_base + rng->or_size)) { ++ regs->phys_addr -= rng->ot_child_base; ++ regs->phys_addr += rng->ot_parent_base; ++ return; ++ } ++ } ++ ++ return; ++} ++ ++static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node, ++ int instance) ++{ ++ struct linux_prom64_registers regs[2*PROMREG_MAX]; ++ ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *)dev->dev_private; ++ int i; ++ ++ ffb_priv->prom_node = prom_node; ++ if (prom_getproperty(ffb_priv->prom_node, "reg", ++ (void *)regs, sizeof(regs)) <= 0) { ++ return -EINVAL; ++ } ++ ffb_apply_upa_parent_ranges(parent_node, ®s[0]); ++ ffb_priv->card_phys_base = regs[0].phys_addr; ++ ffb_priv->regs = (ffb_fbcPtr) ++ (regs[0].phys_addr + 0x00600000UL); ++ get_ffb_type(ffb_priv, instance); ++ for (i = 0; i < FFB_MAX_CTXS; i++) ++ ffb_priv->hw_state[i] = NULL; ++ ++ return 0; ++} ++ ++static int __init ffb_count_siblings(int root) ++{ ++ int node, child, count = 0; ++ ++ child = prom_getchild(root); ++ for (node = prom_searchsiblings(child, "SUNW,ffb"); node; ++ node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) ++ count++; ++ ++ return count; ++} ++ ++static int __init ffb_scan_siblings(int root, int instance) ++{ ++ int node, child; ++ ++ child = prom_getchild(root); ++ for (node = prom_searchsiblings(child, "SUNW,ffb"); node; ++ node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) { ++ ffb_position[instance].node = node; ++ ffb_position[instance].root = root; ++ instance++; ++ } ++ ++ return instance; ++} ++ ++static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) ++{ ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev; ++ drm_map_list_t *r_list; ++ struct list_head *list; ++ drm_map_t *map; ++ ++ if (!priv || (dev = priv->dev) == NULL) ++ return NULL; ++ ++ list_for_each(list, &dev->maplist->head) { ++ unsigned long uoff; ++ ++ r_list = (drm_map_list_t *)list; ++ map = r_list->map; ++ if (!map) ++ continue; ++ uoff = (map->offset & 0xffffffff); ++ if (uoff == off) ++ return map; ++ } ++ ++ return NULL; ++} ++ ++unsigned long ffb_get_unmapped_area(struct file *filp, ++ unsigned long hint, ++ unsigned long len, ++ unsigned long pgoff, ++ unsigned long flags) ++{ ++ drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT); ++ unsigned long addr = -ENOMEM; ++ ++ if (!map) ++ return get_unmapped_area(NULL, hint, len, pgoff, flags); ++ ++ if (map->type == _DRM_FRAME_BUFFER || ++ map->type == _DRM_REGISTERS) { ++#ifdef HAVE_ARCH_FB_UNMAPPED_AREA ++ addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags); ++#else ++ addr = get_unmapped_area(NULL, hint, len, pgoff, flags); ++#endif ++ } else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) { ++ unsigned long slack = SHMLBA - PAGE_SIZE; ++ ++ addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags); ++ if (!(addr & ~PAGE_MASK)) { ++ unsigned long kvirt = (unsigned long) map->handle; ++ ++ if ((kvirt & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { ++ unsigned long koff, aoff; ++ ++ koff = kvirt & (SHMLBA - 1); ++ aoff = addr & (SHMLBA - 1); ++ if (koff < aoff) ++ koff += SHMLBA; ++ ++ addr += (koff - aoff); ++ } ++ } ++ } else { ++ addr = get_unmapped_area(NULL, hint, len, pgoff, flags); ++ } ++ ++ return addr; ++} ++ ++/* This functions must be here since it references drm_numdevs) ++ * which drm_drv.h declares. ++ */ ++static int ffb_driver_firstopen(struct drm_device *dev) ++{ ++ ffb_dev_priv_t *ffb_priv; ++ struct drm_device *temp_dev; ++ int ret = 0; ++ int i; ++ ++ /* Check for the case where no device was found. */ ++ if (ffb_position == NULL) ++ return -ENODEV; ++ ++ /* Find our instance number by finding our device in dev structure */ ++ for (i = 0; i < drm_numdevs; i++) { ++ temp_dev = &(drm_device[i]); ++ if(temp_dev == dev) ++ break; ++ } ++ ++ if (i == drm_numdevs) ++ return -ENODEV; ++ ++ ffb_priv = kmalloc(sizeof(ffb_dev_priv_t), GFP_KERNEL); ++ if (!ffb_priv) ++ return -ENOMEM; ++ memset(ffb_priv, 0, sizeof(*ffb_priv)); ++ dev->dev_private = ffb_priv; ++ ++ ret = ffb_init_one(dev, ++ ffb_position[i].node, ++ ffb_position[i].root, ++ i); ++ return ret; ++} ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ ffb_PCI_IDS ++}; ++ ++static struct drm_driver ffb_driver = { ++ .release = ffb_driver_reclaim_buffers_locked, ++ .firstopen = ffb_driver_firstopen, ++ .lastclose = ffb_driver_lastclose, ++ .unload = ffb_driver_unload, ++ .kernel_context_switch = ffb_context_switch, ++ .kernel_context_switch_unlock = ffb_driver_kernel_context_switch_unlock, ++ .get_map_ofs = ffb_driver_get_map_ofs, ++ .get_reg_ofs = ffb_driver_get_reg_ofs, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .fasync = drm_fasync, ++ .poll = drm_poll, ++ .get_unmapped_area = ffb_get_unmapped_area, ++ }, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_probe(pdev, ent, &driver); ++} ++ ++static struct pci_driver pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++}; ++ ++static int __init ffb_init(void) ++{ ++ return drm_init(&pci_driver, pciidlist, &driver); ++} ++ ++static void __exit ffb_exit(void) ++{ ++ drm_exit(&pci_driver); ++} ++ ++module_init(ffb_init); ++module_exit(ffb_exit)); ++ ++MODULE_AUTHOR( DRIVER_AUTHOR ); ++MODULE_DESCRIPTION( DRIVER_DESC ); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ffb_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ffb_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/ffb_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/ffb_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,284 @@ ++/* $Id$ ++ * ffb_drv.h: Creator/Creator3D direct rendering driver. ++ * ++ * Copyright (C) 2000 David S. Miller (davem@redhat.com) ++ */ ++ ++/* Auxilliary clips. */ ++typedef struct { ++ volatile unsigned int min; ++ volatile unsigned int max; ++} ffb_auxclip, *ffb_auxclipPtr; ++ ++/* FFB register set. */ ++typedef struct _ffb_fbc { ++ /* Next vertex registers, on the right we list which drawops ++ * use said register and the logical name the register has in ++ * that context. ++ */ /* DESCRIPTION DRAWOP(NAME) */ ++/*0x00*/unsigned int pad1[3]; /* Reserved */ ++/*0x0c*/volatile unsigned int alpha; /* ALPHA Transparency */ ++/*0x10*/volatile unsigned int red; /* RED */ ++/*0x14*/volatile unsigned int green; /* GREEN */ ++/*0x18*/volatile unsigned int blue; /* BLUE */ ++/*0x1c*/volatile unsigned int z; /* DEPTH */ ++/*0x20*/volatile unsigned int y; /* Y triangle(DOYF) */ ++ /* aadot(DYF) */ ++ /* ddline(DYF) */ ++ /* aaline(DYF) */ ++/*0x24*/volatile unsigned int x; /* X triangle(DOXF) */ ++ /* aadot(DXF) */ ++ /* ddline(DXF) */ ++ /* aaline(DXF) */ ++/*0x28*/unsigned int pad2[2]; /* Reserved */ ++/*0x30*/volatile unsigned int ryf; /* Y (alias to DOYF) ddline(RYF) */ ++ /* aaline(RYF) */ ++ /* triangle(RYF) */ ++/*0x34*/volatile unsigned int rxf; /* X ddline(RXF) */ ++ /* aaline(RXF) */ ++ /* triangle(RXF) */ ++/*0x38*/unsigned int pad3[2]; /* Reserved */ ++/*0x40*/volatile unsigned int dmyf; /* Y (alias to DOYF) triangle(DMYF) */ ++/*0x44*/volatile unsigned int dmxf; /* X triangle(DMXF) */ ++/*0x48*/unsigned int pad4[2]; /* Reserved */ ++/*0x50*/volatile unsigned int ebyi; /* Y (alias to RYI) polygon(EBYI) */ ++/*0x54*/volatile unsigned int ebxi; /* X polygon(EBXI) */ ++/*0x58*/unsigned int pad5[2]; /* Reserved */ ++/*0x60*/volatile unsigned int by; /* Y brline(RYI) */ ++ /* fastfill(OP) */ ++ /* polygon(YI) */ ++ /* rectangle(YI) */ ++ /* bcopy(SRCY) */ ++ /* vscroll(SRCY) */ ++/*0x64*/volatile unsigned int bx; /* X brline(RXI) */ ++ /* polygon(XI) */ ++ /* rectangle(XI) */ ++ /* bcopy(SRCX) */ ++ /* vscroll(SRCX) */ ++ /* fastfill(GO) */ ++/*0x68*/volatile unsigned int dy; /* destination Y fastfill(DSTY) */ ++ /* bcopy(DSRY) */ ++ /* vscroll(DSRY) */ ++/*0x6c*/volatile unsigned int dx; /* destination X fastfill(DSTX) */ ++ /* bcopy(DSTX) */ ++ /* vscroll(DSTX) */ ++/*0x70*/volatile unsigned int bh; /* Y (alias to RYI) brline(DYI) */ ++ /* dot(DYI) */ ++ /* polygon(ETYI) */ ++ /* Height fastfill(H) */ ++ /* bcopy(H) */ ++ /* vscroll(H) */ ++ /* Y count fastfill(NY) */ ++/*0x74*/volatile unsigned int bw; /* X dot(DXI) */ ++ /* brline(DXI) */ ++ /* polygon(ETXI) */ ++ /* fastfill(W) */ ++ /* bcopy(W) */ ++ /* vscroll(W) */ ++ /* fastfill(NX) */ ++/*0x78*/unsigned int pad6[2]; /* Reserved */ ++/*0x80*/unsigned int pad7[32]; /* Reserved */ ++ ++ /* Setup Unit's vertex state register */ ++/*100*/ volatile unsigned int suvtx; ++/*104*/ unsigned int pad8[63]; /* Reserved */ ++ ++ /* Frame Buffer Control Registers */ ++/*200*/ volatile unsigned int ppc; /* Pixel Processor Control */ ++/*204*/ volatile unsigned int wid; /* Current WID */ ++/*208*/ volatile unsigned int fg; /* FG data */ ++/*20c*/ volatile unsigned int bg; /* BG data */ ++/*210*/ volatile unsigned int consty; /* Constant Y */ ++/*214*/ volatile unsigned int constz; /* Constant Z */ ++/*218*/ volatile unsigned int xclip; /* X Clip */ ++/*21c*/ volatile unsigned int dcss; /* Depth Cue Scale Slope */ ++/*220*/ volatile unsigned int vclipmin; /* Viewclip XY Min Bounds */ ++/*224*/ volatile unsigned int vclipmax; /* Viewclip XY Max Bounds */ ++/*228*/ volatile unsigned int vclipzmin; /* Viewclip Z Min Bounds */ ++/*22c*/ volatile unsigned int vclipzmax; /* Viewclip Z Max Bounds */ ++/*230*/ volatile unsigned int dcsf; /* Depth Cue Scale Front Bound */ ++/*234*/ volatile unsigned int dcsb; /* Depth Cue Scale Back Bound */ ++/*238*/ volatile unsigned int dczf; /* Depth Cue Z Front */ ++/*23c*/ volatile unsigned int dczb; /* Depth Cue Z Back */ ++/*240*/ unsigned int pad9; /* Reserved */ ++/*244*/ volatile unsigned int blendc; /* Alpha Blend Control */ ++/*248*/ volatile unsigned int blendc1; /* Alpha Blend Color 1 */ ++/*24c*/ volatile unsigned int blendc2; /* Alpha Blend Color 2 */ ++/*250*/ volatile unsigned int fbramitc; /* FB RAM Interleave Test Control */ ++/*254*/ volatile unsigned int fbc; /* Frame Buffer Control */ ++/*258*/ volatile unsigned int rop; /* Raster OPeration */ ++/*25c*/ volatile unsigned int cmp; /* Frame Buffer Compare */ ++/*260*/ volatile unsigned int matchab; /* Buffer AB Match Mask */ ++/*264*/ volatile unsigned int matchc; /* Buffer C(YZ) Match Mask */ ++/*268*/ volatile unsigned int magnab; /* Buffer AB Magnitude Mask */ ++/*26c*/ volatile unsigned int magnc; /* Buffer C(YZ) Magnitude Mask */ ++/*270*/ volatile unsigned int fbcfg0; /* Frame Buffer Config 0 */ ++/*274*/ volatile unsigned int fbcfg1; /* Frame Buffer Config 1 */ ++/*278*/ volatile unsigned int fbcfg2; /* Frame Buffer Config 2 */ ++/*27c*/ volatile unsigned int fbcfg3; /* Frame Buffer Config 3 */ ++/*280*/ volatile unsigned int ppcfg; /* Pixel Processor Config */ ++/*284*/ volatile unsigned int pick; /* Picking Control */ ++/*288*/ volatile unsigned int fillmode; /* FillMode */ ++/*28c*/ volatile unsigned int fbramwac; /* FB RAM Write Address Control */ ++/*290*/ volatile unsigned int pmask; /* RGB PlaneMask */ ++/*294*/ volatile unsigned int xpmask; /* X PlaneMask */ ++/*298*/ volatile unsigned int ypmask; /* Y PlaneMask */ ++/*29c*/ volatile unsigned int zpmask; /* Z PlaneMask */ ++/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */ ++ ++ /* New 3dRAM III support regs */ ++/*2c0*/ volatile unsigned int rawblend2; ++/*2c4*/ volatile unsigned int rawpreblend; ++/*2c8*/ volatile unsigned int rawstencil; ++/*2cc*/ volatile unsigned int rawstencilctl; ++/*2d0*/ volatile unsigned int threedram1; ++/*2d4*/ volatile unsigned int threedram2; ++/*2d8*/ volatile unsigned int passin; ++/*2dc*/ volatile unsigned int rawclrdepth; ++/*2e0*/ volatile unsigned int rawpmask; ++/*2e4*/ volatile unsigned int rawcsrc; ++/*2e8*/ volatile unsigned int rawmatch; ++/*2ec*/ volatile unsigned int rawmagn; ++/*2f0*/ volatile unsigned int rawropblend; ++/*2f4*/ volatile unsigned int rawcmp; ++/*2f8*/ volatile unsigned int rawwac; ++/*2fc*/ volatile unsigned int fbramid; ++ ++/*300*/ volatile unsigned int drawop; /* Draw OPeration */ ++/*304*/ unsigned int pad10[2]; /* Reserved */ ++/*30c*/ volatile unsigned int lpat; /* Line Pattern control */ ++/*310*/ unsigned int pad11; /* Reserved */ ++/*314*/ volatile unsigned int fontxy; /* XY Font coordinate */ ++/*318*/ volatile unsigned int fontw; /* Font Width */ ++/*31c*/ volatile unsigned int fontinc; /* Font Increment */ ++/*320*/ volatile unsigned int font; /* Font bits */ ++/*324*/ unsigned int pad12[3]; /* Reserved */ ++/*330*/ volatile unsigned int blend2; ++/*334*/ volatile unsigned int preblend; ++/*338*/ volatile unsigned int stencil; ++/*33c*/ volatile unsigned int stencilctl; ++ ++/*340*/ unsigned int pad13[4]; /* Reserved */ ++/*350*/ volatile unsigned int dcss1; /* Depth Cue Scale Slope 1 */ ++/*354*/ volatile unsigned int dcss2; /* Depth Cue Scale Slope 2 */ ++/*358*/ volatile unsigned int dcss3; /* Depth Cue Scale Slope 3 */ ++/*35c*/ volatile unsigned int widpmask; ++/*360*/ volatile unsigned int dcs2; ++/*364*/ volatile unsigned int dcs3; ++/*368*/ volatile unsigned int dcs4; ++/*36c*/ unsigned int pad14; /* Reserved */ ++/*370*/ volatile unsigned int dcd2; ++/*374*/ volatile unsigned int dcd3; ++/*378*/ volatile unsigned int dcd4; ++/*37c*/ unsigned int pad15; /* Reserved */ ++/*380*/ volatile unsigned int pattern[32]; /* area Pattern */ ++/*400*/ unsigned int pad16[8]; /* Reserved */ ++/*420*/ volatile unsigned int reset; /* chip RESET */ ++/*424*/ unsigned int pad17[247]; /* Reserved */ ++/*800*/ volatile unsigned int devid; /* Device ID */ ++/*804*/ unsigned int pad18[63]; /* Reserved */ ++/*900*/ volatile unsigned int ucsr; /* User Control & Status Register */ ++/*904*/ unsigned int pad19[31]; /* Reserved */ ++/*980*/ volatile unsigned int mer; /* Mode Enable Register */ ++/*984*/ unsigned int pad20[1439]; /* Reserved */ ++} ffb_fbc, *ffb_fbcPtr; ++ ++struct ffb_hw_context { ++ int is_2d_only; ++ ++ unsigned int ppc; ++ unsigned int wid; ++ unsigned int fg; ++ unsigned int bg; ++ unsigned int consty; ++ unsigned int constz; ++ unsigned int xclip; ++ unsigned int dcss; ++ unsigned int vclipmin; ++ unsigned int vclipmax; ++ unsigned int vclipzmin; ++ unsigned int vclipzmax; ++ unsigned int dcsf; ++ unsigned int dcsb; ++ unsigned int dczf; ++ unsigned int dczb; ++ unsigned int blendc; ++ unsigned int blendc1; ++ unsigned int blendc2; ++ unsigned int fbc; ++ unsigned int rop; ++ unsigned int cmp; ++ unsigned int matchab; ++ unsigned int matchc; ++ unsigned int magnab; ++ unsigned int magnc; ++ unsigned int pmask; ++ unsigned int xpmask; ++ unsigned int ypmask; ++ unsigned int zpmask; ++ unsigned int auxclip0min; ++ unsigned int auxclip0max; ++ unsigned int auxclip1min; ++ unsigned int auxclip1max; ++ unsigned int auxclip2min; ++ unsigned int auxclip2max; ++ unsigned int auxclip3min; ++ unsigned int auxclip3max; ++ unsigned int drawop; ++ unsigned int lpat; ++ unsigned int fontxy; ++ unsigned int fontw; ++ unsigned int fontinc; ++ unsigned int area_pattern[32]; ++ unsigned int ucsr; ++ unsigned int stencil; ++ unsigned int stencilctl; ++ unsigned int dcss1; ++ unsigned int dcss2; ++ unsigned int dcss3; ++ unsigned int dcs2; ++ unsigned int dcs3; ++ unsigned int dcs4; ++ unsigned int dcd2; ++ unsigned int dcd3; ++ unsigned int dcd4; ++ unsigned int mer; ++}; ++ ++#define FFB_MAX_CTXS 32 ++ ++enum ffb_chip_type { ++ ffb1_prototype = 0, /* Early pre-FCS FFB */ ++ ffb1_standard, /* First FCS FFB, 100Mhz UPA, 66MHz gclk */ ++ ffb1_speedsort, /* Second FCS FFB, 100Mhz UPA, 75MHz gclk */ ++ ffb2_prototype, /* Early pre-FCS vertical FFB2 */ ++ ffb2_vertical, /* First FCS FFB2/vertical, 100Mhz UPA, 100MHZ gclk, ++ 75(SingleBuffer)/83(DoubleBuffer) MHz fclk */ ++ ffb2_vertical_plus, /* Second FCS FFB2/vertical, same timings */ ++ ffb2_horizontal, /* First FCS FFB2/horizontal, same timings as FFB2/vert */ ++ ffb2_horizontal_plus, /* Second FCS FFB2/horizontal, same timings */ ++ afb_m3, /* FCS Elite3D, 3 float chips */ ++ afb_m6 /* FCS Elite3D, 6 float chips */ ++}; ++ ++typedef struct ffb_dev_priv { ++ /* Misc software state. */ ++ int prom_node; ++ enum ffb_chip_type ffb_type; ++ u64 card_phys_base; ++ struct miscdevice miscdev; ++ ++ /* Controller registers. */ ++ ffb_fbcPtr regs; ++ ++ /* Context table. */ ++ struct ffb_hw_context *hw_state[FFB_MAX_CTXS]; ++} ffb_dev_priv_t; ++ ++extern unsigned long ffb_get_unmapped_area(struct file *filp, ++ unsigned long hint, ++ unsigned long len, ++ unsigned long pgoff, ++ unsigned long flags); ++extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map) ++extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev) +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/imagine_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/imagine_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/imagine_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/imagine_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,85 @@ ++/* ++ * Copyright 2005 Adam Jackson. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * on the rights to use, copy, modify, merge, publish, distribute, sub ++ * license, and/or sell copies of the Software, and to permit persons to whom ++ * the Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* derived from tdfx_drv.c */ ++ ++#include "drmP.h" ++#include "imagine_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct drm_driver driver; ++ ++static struct pci_device_id pciidlist[] = { ++ imagine_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_MTRR, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int __init imagine_init(void) ++{ ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit imagine_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(imagine_init); ++module_exit(imagine_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i810_dma.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i810_dma.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i810_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i810_dma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1301 @@ ++/* i810_dma.c -- DMA support for the i810 -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Rickard E. (Rik) Faith ++ * Jeff Hartmann ++ * Keith Whitwell ++ * ++ */ ++ ++#include /* For task queue support */ ++#include ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i810_drm.h" ++#include "i810_drv.h" ++ ++#define I810_BUF_FREE 2 ++#define I810_BUF_CLIENT 1 ++#define I810_BUF_HARDWARE 0 ++ ++#define I810_BUF_UNMAPPED 0 ++#define I810_BUF_MAPPED 1 ++ ++static inline void i810_print_status_page(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ u32 *temp = dev_priv->hw_status_page; ++ int i; ++ ++ DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]); ++ DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]); ++ DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]); ++ DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]); ++ DRM_DEBUG("hw_status: Last Render: %x\n", temp[4]); ++ DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]); ++ for (i = 6; i < dma->buf_count + 6; i++) { ++ DRM_DEBUG("buffer status idx : %d used: %d\n", i - 6, temp[i]); ++ } ++} ++ ++static struct drm_buf *i810_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ int used; ++ ++ /* Linear search might not be the best solution */ ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ /* In use is already a pointer */ ++ used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, ++ I810_BUF_CLIENT); ++ if (used == I810_BUF_FREE) { ++ return buf; ++ } ++ } ++ return NULL; ++} ++ ++/* This should only be called if the buffer is not sent to the hardware ++ * yet, the hardware updates in use for us once its on the ring buffer. ++ */ ++ ++static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ int used; ++ ++ /* In use is already a pointer */ ++ used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); ++ if (used != I810_BUF_CLIENT) { ++ DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev; ++ drm_i810_private_t *dev_priv; ++ struct drm_buf *buf; ++ drm_i810_buf_priv_t *buf_priv; ++ ++ lock_kernel(); ++ dev = priv->minor->dev; ++ dev_priv = dev->dev_private; ++ buf = dev_priv->mmap_buffer; ++ buf_priv = buf->dev_private; ++ ++ vma->vm_flags |= (VM_IO | VM_DONTCOPY); ++ vma->vm_file = filp; ++ ++ buf_priv->currently_mapped = I810_BUF_MAPPED; ++ unlock_kernel(); ++ ++ if (io_remap_pfn_range(vma, vma->vm_start, ++ vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) ++ return -EAGAIN; ++ return 0; ++} ++ ++static const struct file_operations i810_buffer_fops = { ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = i810_mmap_buffers, ++ .fasync = drm_fasync, ++}; ++ ++static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ const struct file_operations *old_fops; ++ int retcode = 0; ++ ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) ++ return -EINVAL; ++ ++ down_write(¤t->mm->mmap_sem); ++ old_fops = file_priv->filp->f_op; ++ file_priv->filp->f_op = &i810_buffer_fops; ++ dev_priv->mmap_buffer = buf; ++ buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED, buf->bus_address); ++ dev_priv->mmap_buffer = NULL; ++ file_priv->filp->f_op = old_fops; ++ if (IS_ERR(buf_priv->virtual)) { ++ /* Real error */ ++ DRM_ERROR("mmap error\n"); ++ retcode = PTR_ERR(buf_priv->virtual); ++ buf_priv->virtual = NULL; ++ } ++ up_write(¤t->mm->mmap_sem); ++ ++ return retcode; ++} ++ ++static int i810_unmap_buffer(struct drm_buf * buf) ++{ ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ int retcode = 0; ++ ++ if (buf_priv->currently_mapped != I810_BUF_MAPPED) ++ return -EINVAL; ++ ++ down_write(¤t->mm->mmap_sem); ++ retcode = do_munmap(current->mm, ++ (unsigned long)buf_priv->virtual, ++ (size_t) buf->total); ++ up_write(¤t->mm->mmap_sem); ++ ++ buf_priv->currently_mapped = I810_BUF_UNMAPPED; ++ buf_priv->virtual = NULL; ++ ++ return retcode; ++} ++ ++static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, ++ struct drm_file *file_priv) ++{ ++ struct drm_buf *buf; ++ drm_i810_buf_priv_t *buf_priv; ++ int retcode = 0; ++ ++ buf = i810_freelist_get(dev); ++ if (!buf) { ++ retcode = -ENOMEM; ++ DRM_DEBUG("retcode=%d\n", retcode); ++ return retcode; ++ } ++ ++ retcode = i810_map_buffer(buf, file_priv); ++ if (retcode) { ++ i810_freelist_put(dev, buf); ++ DRM_ERROR("mapbuf failed, retcode %d\n", retcode); ++ return retcode; ++ } ++ buf->file_priv = file_priv; ++ buf_priv = buf->dev_private; ++ d->granted = 1; ++ d->request_idx = buf->idx; ++ d->request_size = buf->total; ++ d->virtual = buf_priv->virtual; ++ ++ return retcode; ++} ++ ++static int i810_dma_cleanup(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ int i; ++ drm_i810_private_t *dev_priv = ++ (drm_i810_private_t *) dev->dev_private; ++ ++ if (dev_priv->ring.virtual_start) { ++ drm_core_ioremapfree(&dev_priv->ring.map, dev); ++ } ++ if (dev_priv->hw_status_page) { ++ pci_free_consistent(dev->pdev, PAGE_SIZE, ++ dev_priv->hw_status_page, ++ dev_priv->dma_status_page); ++ /* Need to rewrite hardware status page */ ++ I810_WRITE(0x02080, 0x1ffff000); ++ } ++ drm_free(dev->dev_private, sizeof(drm_i810_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ if (buf_priv->kernel_virtual && buf->total) ++ drm_core_ioremapfree(&buf_priv->map, dev); ++ } ++ } ++ return 0; ++} ++ ++static int i810_wait_ring(struct drm_device * dev, int n) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_ring_buffer_t *ring = &(dev_priv->ring); ++ int iters = 0; ++ unsigned long end; ++ unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ++ ++ end = jiffies + (HZ * 3); ++ while (ring->space < n) { ++ ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++ ++ if (ring->head != last_head) { ++ end = jiffies + (HZ * 3); ++ last_head = ring->head; ++ } ++ ++ iters++; ++ if (time_before(end, jiffies)) { ++ DRM_ERROR("space: %d wanted %d\n", ring->space, n); ++ DRM_ERROR("lockup\n"); ++ goto out_wait_ring; ++ } ++ udelay(1); ++ } ++ ++ out_wait_ring: ++ return iters; ++} ++ ++static void i810_kernel_lost_context(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_ring_buffer_t *ring = &(dev_priv->ring); ++ ++ ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ++ ring->tail = I810_READ(LP_RING + RING_TAIL); ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++} ++ ++static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int my_idx = 24; ++ u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); ++ int i; ++ ++ if (dma->buf_count > 1019) { ++ /* Not enough space in the status page for the freelist */ ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ buf_priv->in_use = hw_status++; ++ buf_priv->my_use_idx = my_idx; ++ my_idx += 4; ++ ++ *buf_priv->in_use = I810_BUF_FREE; ++ ++ buf_priv->map.offset = buf->bus_address; ++ buf_priv->map.size = buf->total; ++ buf_priv->map.type = _DRM_AGP; ++ buf_priv->map.flags = 0; ++ buf_priv->map.mtrr = 0; ++ ++ drm_core_ioremap(&buf_priv->map, dev); ++ buf_priv->kernel_virtual = buf_priv->map.handle; ++ ++ } ++ return 0; ++} ++ ++static int i810_dma_initialize(struct drm_device * dev, ++ drm_i810_private_t * dev_priv, ++ drm_i810_init_t * init) ++{ ++ struct drm_map_list *r_list; ++ memset(dev_priv, 0, sizeof(drm_i810_private_t)); ++ ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map && ++ r_list->map->type == _DRM_SHM && ++ r_list->map->flags & _DRM_CONTAINS_LOCK) { ++ dev_priv->sarea_map = r_list->map; ++ break; ++ } ++ } ++ if (!dev_priv->sarea_map) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not find sarea!\n"); ++ return -EINVAL; ++ } ++ dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio_map) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not find mmio map!\n"); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not find dma buffer map!\n"); ++ return -EINVAL; ++ } ++ ++ dev_priv->sarea_priv = (drm_i810_sarea_t *) ++ ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset); ++ ++ dev_priv->ring.Start = init->ring_start; ++ dev_priv->ring.End = init->ring_end; ++ dev_priv->ring.Size = init->ring_size; ++ ++ dev_priv->ring.map.offset = dev->agp->base + init->ring_start; ++ dev_priv->ring.map.size = init->ring_size; ++ dev_priv->ring.map.type = _DRM_AGP; ++ dev_priv->ring.map.flags = 0; ++ dev_priv->ring.map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->ring.map, dev); ++ ++ if (dev_priv->ring.map.handle == NULL) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not ioremap virtual address for" ++ " ring buffer\n"); ++ return -ENOMEM; ++ } ++ ++ dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ ++ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; ++ ++ dev_priv->w = init->w; ++ dev_priv->h = init->h; ++ dev_priv->pitch = init->pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->front_offset = init->front_offset; ++ ++ dev_priv->overlay_offset = init->overlay_offset; ++ dev_priv->overlay_physical = init->overlay_physical; ++ ++ dev_priv->front_di1 = init->front_offset | init->pitch_bits; ++ dev_priv->back_di1 = init->back_offset | init->pitch_bits; ++ dev_priv->zi1 = init->depth_offset | init->pitch_bits; ++ ++ /* Program Hardware Status Page */ ++ dev_priv->hw_status_page = ++ pci_alloc_consistent(dev->pdev, PAGE_SIZE, ++ &dev_priv->dma_status_page); ++ if (!dev_priv->hw_status_page) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("Can not allocate hardware status page\n"); ++ return -ENOMEM; ++ } ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); ++ ++ I810_WRITE(0x02080, dev_priv->dma_status_page); ++ DRM_DEBUG("Enabled hardware status page\n"); ++ ++ /* Now we need to init our freelist */ ++ if (i810_freelist_init(dev, dev_priv) != 0) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("Not enough space in the status page for" ++ " the freelist\n"); ++ return -ENOMEM; ++ } ++ dev->dev_private = (void *)dev_priv; ++ ++ return 0; ++} ++ ++static int i810_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv; ++ drm_i810_init_t *init = data; ++ int retcode = 0; ++ ++ switch (init->func) { ++ case I810_INIT_DMA_1_4: ++ DRM_INFO("Using v1.4 init.\n"); ++ dev_priv = drm_alloc(sizeof(drm_i810_private_t), ++ DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ retcode = i810_dma_initialize(dev, dev_priv, init); ++ break; ++ ++ case I810_CLEANUP_DMA: ++ DRM_INFO("DMA Cleanup\n"); ++ retcode = i810_dma_cleanup(dev); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return retcode; ++} ++ ++/* Most efficient way to verify state for the i810 is as it is ++ * emitted. Non-conformant state is silently dropped. ++ * ++ * Use 'volatile' & local var tmp to force the emitted values to be ++ * identical to the verified ones. ++ */ ++static void i810EmitContextVerified(struct drm_device * dev, ++ volatile unsigned int *code) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ int i, j = 0; ++ unsigned int tmp; ++ RING_LOCALS; ++ ++ BEGIN_LP_RING(I810_CTX_SETUP_SIZE); ++ ++ OUT_RING(GFX_OP_COLOR_FACTOR); ++ OUT_RING(code[I810_CTXREG_CF1]); ++ ++ OUT_RING(GFX_OP_STIPPLE); ++ OUT_RING(code[I810_CTXREG_ST1]); ++ ++ for (i = 4; i < I810_CTX_SETUP_SIZE; i++) { ++ tmp = code[i]; ++ ++ if ((tmp & (7 << 29)) == (3 << 29) && ++ (tmp & (0x1f << 24)) < (0x1d << 24)) { ++ OUT_RING(tmp); ++ j++; ++ } else ++ printk("constext state dropped!!!\n"); ++ } ++ ++ if (j & 1) ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++} ++ ++static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ int i, j = 0; ++ unsigned int tmp; ++ RING_LOCALS; ++ ++ BEGIN_LP_RING(I810_TEX_SETUP_SIZE); ++ ++ OUT_RING(GFX_OP_MAP_INFO); ++ OUT_RING(code[I810_TEXREG_MI1]); ++ OUT_RING(code[I810_TEXREG_MI2]); ++ OUT_RING(code[I810_TEXREG_MI3]); ++ ++ for (i = 4; i < I810_TEX_SETUP_SIZE; i++) { ++ tmp = code[i]; ++ ++ if ((tmp & (7 << 29)) == (3 << 29) && ++ (tmp & (0x1f << 24)) < (0x1d << 24)) { ++ OUT_RING(tmp); ++ j++; ++ } else ++ printk("texture state dropped!!!\n"); ++ } ++ ++ if (j & 1) ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++} ++ ++/* Need to do some additional checking when setting the dest buffer. ++ */ ++static void i810EmitDestVerified(struct drm_device * dev, ++ volatile unsigned int *code) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ unsigned int tmp; ++ RING_LOCALS; ++ ++ BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); ++ ++ tmp = code[I810_DESTREG_DI1]; ++ if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { ++ OUT_RING(CMD_OP_DESTBUFFER_INFO); ++ OUT_RING(tmp); ++ } else ++ DRM_DEBUG("bad di1 %x (allow %x or %x)\n", ++ tmp, dev_priv->front_di1, dev_priv->back_di1); ++ ++ /* invarient: ++ */ ++ OUT_RING(CMD_OP_Z_BUFFER_INFO); ++ OUT_RING(dev_priv->zi1); ++ ++ OUT_RING(GFX_OP_DESTBUFFER_VARS); ++ OUT_RING(code[I810_DESTREG_DV1]); ++ ++ OUT_RING(GFX_OP_DRAWRECT_INFO); ++ OUT_RING(code[I810_DESTREG_DR1]); ++ OUT_RING(code[I810_DESTREG_DR2]); ++ OUT_RING(code[I810_DESTREG_DR3]); ++ OUT_RING(code[I810_DESTREG_DR4]); ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++} ++ ++static void i810EmitState(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ ++ DRM_DEBUG("%x\n", dirty); ++ ++ if (dirty & I810_UPLOAD_BUFFERS) { ++ i810EmitDestVerified(dev, sarea_priv->BufferState); ++ sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; ++ } ++ ++ if (dirty & I810_UPLOAD_CTX) { ++ i810EmitContextVerified(dev, sarea_priv->ContextState); ++ sarea_priv->dirty &= ~I810_UPLOAD_CTX; ++ } ++ ++ if (dirty & I810_UPLOAD_TEX0) { ++ i810EmitTexVerified(dev, sarea_priv->TexState[0]); ++ sarea_priv->dirty &= ~I810_UPLOAD_TEX0; ++ } ++ ++ if (dirty & I810_UPLOAD_TEX1) { ++ i810EmitTexVerified(dev, sarea_priv->TexState[1]); ++ sarea_priv->dirty &= ~I810_UPLOAD_TEX1; ++ } ++} ++ ++/* need to verify ++ */ ++static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, ++ unsigned int clear_color, ++ unsigned int clear_zval) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int pitch = dev_priv->pitch; ++ int cpp = 2; ++ int i; ++ RING_LOCALS; ++ ++ if (dev_priv->current_page == 1) { ++ unsigned int tmp = flags; ++ ++ flags &= ~(I810_FRONT | I810_BACK); ++ if (tmp & I810_FRONT) ++ flags |= I810_BACK; ++ if (tmp & I810_BACK) ++ flags |= I810_FRONT; ++ } ++ ++ i810_kernel_lost_context(dev); ++ ++ if (nbox > I810_NR_SAREA_CLIPRECTS) ++ nbox = I810_NR_SAREA_CLIPRECTS; ++ ++ for (i = 0; i < nbox; i++, pbox++) { ++ unsigned int x = pbox->x1; ++ unsigned int y = pbox->y1; ++ unsigned int width = (pbox->x2 - x) * cpp; ++ unsigned int height = pbox->y2 - y; ++ unsigned int start = y * pitch + x * cpp; ++ ++ if (pbox->x1 > pbox->x2 || ++ pbox->y1 > pbox->y2 || ++ pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) ++ continue; ++ ++ if (flags & I810_FRONT) { ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); ++ OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); ++ OUT_RING((height << 16) | width); ++ OUT_RING(start); ++ OUT_RING(clear_color); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ ++ if (flags & I810_BACK) { ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); ++ OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); ++ OUT_RING((height << 16) | width); ++ OUT_RING(dev_priv->back_offset + start); ++ OUT_RING(clear_color); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ ++ if (flags & I810_DEPTH) { ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); ++ OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); ++ OUT_RING((height << 16) | width); ++ OUT_RING(dev_priv->depth_offset + start); ++ OUT_RING(clear_zval); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ } ++} ++ ++static void i810_dma_dispatch_swap(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int pitch = dev_priv->pitch; ++ int cpp = 2; ++ int i; ++ RING_LOCALS; ++ ++ DRM_DEBUG("swapbuffers\n"); ++ ++ i810_kernel_lost_context(dev); ++ ++ if (nbox > I810_NR_SAREA_CLIPRECTS) ++ nbox = I810_NR_SAREA_CLIPRECTS; ++ ++ for (i = 0; i < nbox; i++, pbox++) { ++ unsigned int w = pbox->x2 - pbox->x1; ++ unsigned int h = pbox->y2 - pbox->y1; ++ unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch; ++ unsigned int start = dst; ++ ++ if (pbox->x1 > pbox->x2 || ++ pbox->y1 > pbox->y2 || ++ pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) ++ continue; ++ ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4); ++ OUT_RING(pitch | (0xCC << 16)); ++ OUT_RING((h << 16) | (w * cpp)); ++ if (dev_priv->current_page == 0) ++ OUT_RING(dev_priv->front_offset + start); ++ else ++ OUT_RING(dev_priv->back_offset + start); ++ OUT_RING(pitch); ++ if (dev_priv->current_page == 0) ++ OUT_RING(dev_priv->back_offset + start); ++ else ++ OUT_RING(dev_priv->front_offset + start); ++ ADVANCE_LP_RING(); ++ } ++} ++ ++static void i810_dma_dispatch_vertex(struct drm_device * dev, ++ struct drm_buf * buf, int discard, int used) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_clip_rect *box = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ unsigned long address = (unsigned long)buf->bus_address; ++ unsigned long start = address - dev->agp->base; ++ int i = 0; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ if (nbox > I810_NR_SAREA_CLIPRECTS) ++ nbox = I810_NR_SAREA_CLIPRECTS; ++ ++ if (used > 4 * 1024) ++ used = 0; ++ ++ if (sarea_priv->dirty) ++ i810EmitState(dev); ++ ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) { ++ unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); ++ ++ *(u32 *) buf_priv->kernel_virtual = ++ ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2))); ++ ++ if (used & 4) { ++ *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0; ++ used += 4; ++ } ++ ++ i810_unmap_buffer(buf); ++ } ++ ++ if (used) { ++ do { ++ if (i < nbox) { ++ BEGIN_LP_RING(4); ++ OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR | ++ SC_ENABLE); ++ OUT_RING(GFX_OP_SCISSOR_INFO); ++ OUT_RING(box[i].x1 | (box[i].y1 << 16)); ++ OUT_RING((box[i].x2 - ++ 1) | ((box[i].y2 - 1) << 16)); ++ ADVANCE_LP_RING(); ++ } ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(CMD_OP_BATCH_BUFFER); ++ OUT_RING(start | BB1_PROTECTED); ++ OUT_RING(start + used - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ } while (++i < nbox); ++ } ++ ++ if (discard) { ++ dev_priv->counter++; ++ ++ (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, ++ I810_BUF_HARDWARE); ++ ++ BEGIN_LP_RING(8); ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(20); ++ OUT_RING(dev_priv->counter); ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(buf_priv->my_use_idx); ++ OUT_RING(I810_BUF_FREE); ++ OUT_RING(CMD_REPORT_HEAD); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++} ++ ++static void i810_dma_dispatch_flip(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ int pitch = dev_priv->pitch; ++ RING_LOCALS; ++ ++ DRM_DEBUG("page=%d pfCurrentPage=%d\n", ++ dev_priv->current_page, ++ dev_priv->sarea_priv->pf_current_page); ++ ++ i810_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); ++ /* On i815 at least ASYNC is buggy */ ++ /* pitch<<5 is from 11.2.8 p158, ++ its the pitch / 8 then left shifted 8, ++ so (pitch >> 3) << 8 */ ++ OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ ); ++ if (dev_priv->current_page == 0) { ++ OUT_RING(dev_priv->back_offset); ++ dev_priv->current_page = 1; ++ } else { ++ OUT_RING(dev_priv->front_offset); ++ dev_priv->current_page = 0; ++ } ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; ++ ++} ++ ++static void i810_dma_quiescent(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); ++ OUT_RING(CMD_REPORT_HEAD); ++ OUT_RING(0); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ i810_wait_ring(dev, dev_priv->ring.Size - 8); ++} ++ ++static int i810_flush_queue(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ int i, ret = 0; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(CMD_REPORT_HEAD); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ i810_wait_ring(dev, dev_priv->ring.Size - 8); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, ++ I810_BUF_FREE); ++ ++ if (used == I810_BUF_HARDWARE) ++ DRM_DEBUG("reclaimed from HARDWARE\n"); ++ if (used == I810_BUF_CLIENT) ++ DRM_DEBUG("still on client\n"); ++ } ++ ++ return ret; ++} ++ ++/* Must be called with the lock held */ ++static void i810_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ if (!dma) ++ return; ++ if (!dev->dev_private) ++ return; ++ if (!dma->buflist) ++ return; ++ ++ i810_flush_queue(dev); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ if (buf->file_priv == file_priv && buf_priv) { ++ int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, ++ I810_BUF_FREE); ++ ++ if (used == I810_BUF_CLIENT) ++ DRM_DEBUG("reclaimed from client\n"); ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) ++ buf_priv->currently_mapped = I810_BUF_UNMAPPED; ++ } ++ } ++} ++ ++static int i810_flush_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ i810_flush_queue(dev); ++ return 0; ++} ++ ++static int i810_dma_vertex(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i810_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_DEBUG("idx %d used %d discard %d\n", ++ vertex->idx, vertex->used, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx > dma->buf_count) ++ return -EINVAL; ++ ++ i810_dma_dispatch_vertex(dev, ++ dma->buflist[vertex->idx], ++ vertex->discard, vertex->used); ++ ++ atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ sarea_priv->last_enqueue = dev_priv->counter - 1; ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ ++ return 0; ++} ++ ++static int i810_clear_bufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_clear_t *clear = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* GH: Someone's doing nasty things... */ ++ if (!dev->dev_private) { ++ return -EINVAL; ++ } ++ ++ i810_dma_dispatch_clear(dev, clear->flags, ++ clear->clear_color, clear->clear_depth); ++ return 0; ++} ++ ++static int i810_swap_bufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ i810_dma_dispatch_swap(dev); ++ return 0; ++} ++ ++static int i810_getage(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ return 0; ++} ++ ++static int i810_getbuf(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ int retcode = 0; ++ drm_i810_dma_t *d = data; ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ d->granted = 0; ++ ++ retcode = i810_dma_get_buffer(dev, d, file_priv); ++ ++ DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", ++ current->pid, retcode, d->granted); ++ ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ ++ return retcode; ++} ++ ++static int i810_copybuf(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ /* Never copy - 2.4.x doesn't need it */ ++ return 0; ++} ++ ++static int i810_docopy(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ /* Never copy - 2.4.x doesn't need it */ ++ return 0; ++} ++ ++static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, ++ unsigned int last_render) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned long address = (unsigned long)buf->bus_address; ++ unsigned long start = address - dev->agp->base; ++ int u; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE); ++ if (u != I810_BUF_CLIENT) { ++ DRM_DEBUG("MC found buffer that isn't mine!\n"); ++ } ++ ++ if (used > 4 * 1024) ++ used = 0; ++ ++ sarea_priv->dirty = 0x7f; ++ ++ DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used); ++ ++ dev_priv->counter++; ++ DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); ++ DRM_DEBUG("start : %lx\n", start); ++ DRM_DEBUG("used : %d\n", used); ++ DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); ++ ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) { ++ if (used & 4) { ++ *(u32 *) ((char *) buf_priv->virtual + used) = 0; ++ used += 4; ++ } ++ ++ i810_unmap_buffer(buf); ++ } ++ BEGIN_LP_RING(4); ++ OUT_RING(CMD_OP_BATCH_BUFFER); ++ OUT_RING(start | BB1_PROTECTED); ++ OUT_RING(start + used - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ BEGIN_LP_RING(8); ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(buf_priv->my_use_idx); ++ OUT_RING(I810_BUF_FREE); ++ OUT_RING(0); ++ ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(16); ++ OUT_RING(last_render); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++} ++ ++static int i810_dma_mc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i810_mc_t *mc = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (mc->idx >= dma->buf_count || mc->idx < 0) ++ return -EINVAL; ++ ++ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, ++ mc->last_render); ++ ++ atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ sarea_priv->last_enqueue = dev_priv->counter - 1; ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ ++ return 0; ++} ++ ++static int i810_rstatus(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ ++ return (int)(((u32 *) (dev_priv->hw_status_page))[4]); ++} ++ ++static int i810_ov0_info(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ drm_i810_overlay_t *ov = data; ++ ++ ov->offset = dev_priv->overlay_offset; ++ ov->physical = dev_priv->overlay_physical; ++ ++ return 0; ++} ++ ++static int i810_fstatus(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ return I810_READ(0x30008); ++} ++ ++static int i810_ov0_flip(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ //Tell the overlay to update ++ I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); ++ ++ return 0; ++} ++ ++/* Not sure why this isn't set all the time: ++ */ ++static void i810_do_init_pageflip(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ dev_priv->page_flipping = 1; ++ dev_priv->current_page = 0; ++ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; ++} ++ ++static int i810_do_cleanup_pageflip(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ if (dev_priv->current_page != 0) ++ i810_dma_dispatch_flip(dev); ++ ++ dev_priv->page_flipping = 0; ++ return 0; ++} ++ ++static int i810_flip_bufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv->page_flipping) ++ i810_do_init_pageflip(dev); ++ ++ i810_dma_dispatch_flip(dev); ++ return 0; ++} ++ ++int i810_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ /* i810 has 4 more counters */ ++ dev->counters += 4; ++ dev->types[6] = _DRM_STAT_IRQ; ++ dev->types[7] = _DRM_STAT_PRIMARY; ++ dev->types[8] = _DRM_STAT_SECONDARY; ++ dev->types[9] = _DRM_STAT_DMA; ++ ++ return 0; ++} ++ ++void i810_driver_lastclose(struct drm_device * dev) ++{ ++ i810_dma_cleanup(dev); ++} ++ ++void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) ++{ ++ if (dev->dev_private) { ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ if (dev_priv->page_flipping) { ++ i810_do_cleanup_pageflip(dev); ++ } ++ } ++} ++ ++void i810_driver_reclaim_buffers_locked(struct drm_device * dev, ++ struct drm_file *file_priv) ++{ ++ i810_reclaim_buffers(dev, file_priv); ++} ++ ++int i810_driver_dma_quiescent(struct drm_device * dev) ++{ ++ i810_dma_quiescent(dev); ++ return 0; ++} ++ ++struct drm_ioctl_desc i810_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH) ++}; ++ ++int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); ++ ++/** ++ * Determine if the device really is AGP or not. ++ * ++ * All Intel graphics chipsets are treated as AGP, even if they are really ++ * PCI-e. ++ * ++ * \param dev The device to be tested. ++ * ++ * \returns ++ * A value of 1 is always retured to indictate every i810 is AGP. ++ */ ++int i810_driver_device_is_agp(struct drm_device * dev) ++{ ++ return 1; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i810_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i810_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i810_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i810_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,263 @@ ++#ifndef _I810_DRM_H_ ++#define _I810_DRM_H_ ++ ++/* WARNING: These defines must be the same as what the Xserver uses. ++ * if you change them, you must change the defines in the Xserver. ++ */ ++ ++#ifndef _I810_DEFINES_ ++#define _I810_DEFINES_ ++ ++#define I810_DMA_BUF_ORDER 12 ++#define I810_DMA_BUF_SZ (1< ++ * Jeff Hartmann ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i810_drm.h" ++#include "i810_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ i810_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ ++ DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, ++ .dev_priv_size = sizeof(drm_i810_buf_priv_t), ++ .load = i810_driver_load, ++ .lastclose = i810_driver_lastclose, ++ .preclose = i810_driver_preclose, ++ .device_is_agp = i810_driver_device_is_agp, ++ .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, ++ .dma_quiescent = i810_driver_dma_quiescent, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = i810_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init i810_init(void) ++{ ++ driver.num_ioctls = i810_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit i810_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(i810_init); ++module_exit(i810_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i810_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i810_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i810_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i810_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,242 @@ ++/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Rickard E. (Rik) Faith ++ * Jeff Hartmann ++ * ++ */ ++ ++#ifndef _I810_DRV_H_ ++#define _I810_DRV_H_ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "VA Linux Systems Inc." ++ ++#define DRIVER_NAME "i810" ++#define DRIVER_DESC "Intel i810" ++#define DRIVER_DATE "20030605" ++ ++/* Interface history ++ * ++ * 1.1 - XFree86 4.1 ++ * 1.2 - XvMC interfaces ++ * - XFree86 4.2 ++ * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility) ++ * - Remove requirement for interrupt (leave stubs again) ++ * 1.3 - Add page flipping. ++ * 1.4 - fix DRM interface ++ */ ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 4 ++#define DRIVER_PATCHLEVEL 0 ++ ++typedef struct drm_i810_buf_priv { ++ u32 *in_use; ++ int my_use_idx; ++ int currently_mapped; ++ void *virtual; ++ void *kernel_virtual; ++ drm_local_map_t map; ++} drm_i810_buf_priv_t; ++ ++typedef struct _drm_i810_ring_buffer { ++ int tail_mask; ++ unsigned long Start; ++ unsigned long End; ++ unsigned long Size; ++ u8 *virtual_start; ++ int head; ++ int tail; ++ int space; ++ drm_local_map_t map; ++} drm_i810_ring_buffer_t; ++ ++typedef struct drm_i810_private { ++ struct drm_map *sarea_map; ++ struct drm_map *mmio_map; ++ ++ drm_i810_sarea_t *sarea_priv; ++ drm_i810_ring_buffer_t ring; ++ ++ void *hw_status_page; ++ unsigned long counter; ++ ++ dma_addr_t dma_status_page; ++ ++ struct drm_buf *mmap_buffer; ++ ++ u32 front_di1, back_di1, zi1; ++ ++ int back_offset; ++ int depth_offset; ++ int overlay_offset; ++ int overlay_physical; ++ int w, h; ++ int pitch; ++ int back_pitch; ++ int depth_pitch; ++ ++ int do_boxes; ++ int dma_used; ++ ++ int current_page; ++ int page_flipping; ++ ++ wait_queue_head_t irq_queue; ++ atomic_t irq_received; ++ atomic_t irq_emitted; ++ ++ int front_offset; ++} drm_i810_private_t; ++ ++ /* i810_dma.c */ ++extern int i810_driver_dma_quiescent(struct drm_device * dev); ++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern int i810_driver_load(struct drm_device *, unsigned long flags); ++extern void i810_driver_lastclose(struct drm_device * dev); ++extern void i810_driver_preclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern int i810_driver_device_is_agp(struct drm_device * dev); ++ ++extern struct drm_ioctl_desc i810_ioctls[]; ++extern int i810_max_ioctl; ++ ++#define I810_BASE(reg) ((unsigned long) \ ++ dev_priv->mmio_map->handle) ++#define I810_ADDR(reg) (I810_BASE(reg) + reg) ++#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) ++#define I810_READ(reg) I810_DEREF(reg) ++#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) ++#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) ++#define I810_READ16(reg) I810_DEREF16(reg) ++#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) ++ ++#define I810_VERBOSE 0 ++#define RING_LOCALS unsigned int outring, ringmask; \ ++ volatile char *virt; ++ ++#define BEGIN_LP_RING(n) do { \ ++ if (I810_VERBOSE) \ ++ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \ ++ if (dev_priv->ring.space < n*4) \ ++ i810_wait_ring(dev, n*4); \ ++ dev_priv->ring.space -= n*4; \ ++ outring = dev_priv->ring.tail; \ ++ ringmask = dev_priv->ring.tail_mask; \ ++ virt = dev_priv->ring.virtual_start; \ ++} while (0) ++ ++#define ADVANCE_LP_RING() do { \ ++ if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ ++ dev_priv->ring.tail = outring; \ ++ I810_WRITE(LP_RING + RING_TAIL, outring); \ ++} while(0) ++ ++#define OUT_RING(n) do { \ ++ if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ ++ *(volatile unsigned int *)(virt + outring) = n; \ ++ outring += 4; \ ++ outring &= ringmask; \ ++} while (0) ++ ++#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) ++#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) ++#define CMD_REPORT_HEAD (7<<23) ++#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) ++#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) ++ ++#define INST_PARSER_CLIENT 0x00000000 ++#define INST_OP_FLUSH 0x02000000 ++#define INST_FLUSH_MAP_CACHE 0x00000001 ++ ++#define BB1_START_ADDR_MASK (~0x7) ++#define BB1_PROTECTED (1<<0) ++#define BB1_UNPROTECTED (0<<0) ++#define BB2_END_ADDR_MASK (~0x7) ++ ++#define I810REG_HWSTAM 0x02098 ++#define I810REG_INT_IDENTITY_R 0x020a4 ++#define I810REG_INT_MASK_R 0x020a8 ++#define I810REG_INT_ENABLE_R 0x020a0 ++ ++#define LP_RING 0x2030 ++#define HP_RING 0x2040 ++#define RING_TAIL 0x00 ++#define TAIL_ADDR 0x000FFFF8 ++#define RING_HEAD 0x04 ++#define HEAD_WRAP_COUNT 0xFFE00000 ++#define HEAD_WRAP_ONE 0x00200000 ++#define HEAD_ADDR 0x001FFFFC ++#define RING_START 0x08 ++#define START_ADDR 0x00FFFFF8 ++#define RING_LEN 0x0C ++#define RING_NR_PAGES 0x000FF000 ++#define RING_REPORT_MASK 0x00000006 ++#define RING_REPORT_64K 0x00000002 ++#define RING_REPORT_128K 0x00000004 ++#define RING_NO_REPORT 0x00000000 ++#define RING_VALID_MASK 0x00000001 ++#define RING_VALID 0x00000001 ++#define RING_INVALID 0x00000000 ++ ++#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) ++#define SC_UPDATE_SCISSOR (0x1<<1) ++#define SC_ENABLE_MASK (0x1<<0) ++#define SC_ENABLE (0x1<<0) ++ ++#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) ++#define SCI_YMIN_MASK (0xffff<<16) ++#define SCI_XMIN_MASK (0xffff<<0) ++#define SCI_YMAX_MASK (0xffff<<16) ++#define SCI_XMAX_MASK (0xffff<<0) ++ ++#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) ++#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) ++#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2) ++#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) ++#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) ++#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24)) ++ ++#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) ++#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) ++#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23)) ++#define CMD_OP_WAIT_FOR_EVENT ((0x0<<29)|(0x03<<23)) ++ ++#define BR00_BITBLT_CLIENT 0x40000000 ++#define BR00_OP_COLOR_BLT 0x10000000 ++#define BR00_OP_SRC_COPY_BLT 0x10C00000 ++#define BR13_SOLID_PATTERN 0x80000000 ++ ++#define WAIT_FOR_PLANE_A_SCANLINES (1<<1) ++#define WAIT_FOR_PLANE_A_FLIP (1<<2) ++#define WAIT_FOR_VBLANK (1<<3) ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_buffer.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_buffer.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_buffer.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_buffer.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,303 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev) ++{ ++ return drm_agp_init_ttm(dev); ++} ++ ++int i915_fence_type(struct drm_buffer_object *bo, ++ uint32_t *fclass, ++ uint32_t *type) ++{ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) ++ *type = 3; ++ else ++ *type = 1; ++ return 0; ++} ++ ++int i915_invalidate_caches(struct drm_device *dev, uint64_t flags) ++{ ++ /* ++ * FIXME: Only emit once per batchbuffer submission. ++ */ ++ ++ uint32_t flush_cmd = MI_NO_WRITE_FLUSH; ++ ++ if (flags & DRM_BO_FLAG_READ) ++ flush_cmd |= MI_READ_FLUSH; ++ if (flags & DRM_BO_FLAG_EXE) ++ flush_cmd |= MI_EXE_FLUSH; ++ ++ return i915_emit_mi_flush(dev, flush_cmd); ++} ++ ++int i915_init_mem_type(struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man) ++{ ++ switch (type) { ++ case DRM_BO_MEM_LOCAL: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CACHED; ++ man->drm_bus_maptype = 0; ++ man->gpu_offset = 0; ++ break; ++ case DRM_BO_MEM_TT: ++ if (!(drm_core_has_AGP(dev) && dev->agp)) { ++ DRM_ERROR("AGP is not enabled for memory type %u\n", ++ (unsigned)type); ++ return -EINVAL; ++ } ++ man->io_offset = dev->agp->agp_info.aper_base; ++ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; ++ man->io_addr = NULL; ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ man->gpu_offset = 0; ++ break; ++ case DRM_BO_MEM_PRIV0: ++ if (!(drm_core_has_AGP(dev) && dev->agp)) { ++ DRM_ERROR("AGP is not enabled for memory type %u\n", ++ (unsigned)type); ++ return -EINVAL; ++ } ++ man->io_offset = dev->agp->agp_info.aper_base; ++ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; ++ man->io_addr = NULL; ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ man->gpu_offset = 0; ++ break; ++ default: ++ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/* ++ * i915_evict_flags: ++ * ++ * @bo: the buffer object to be evicted ++ * ++ * Return the bo flags for a buffer which is not mapped to the hardware. ++ * These will be placed in proposed_flags so that when the move is ++ * finished, they'll end up in bo->mem.flags ++ */ ++uint64_t i915_evict_flags(struct drm_buffer_object *bo) ++{ ++ switch (bo->mem.mem_type) { ++ case DRM_BO_MEM_LOCAL: ++ case DRM_BO_MEM_TT: ++ return DRM_BO_FLAG_MEM_LOCAL; ++ default: ++ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; ++ } ++} ++ ++#if 0 /* See comment below */ ++ ++static void i915_emit_copy_blit(struct drm_device * dev, ++ uint32_t src_offset, ++ uint32_t dst_offset, ++ uint32_t pages, int direction) ++{ ++ uint32_t cur_pages; ++ uint32_t stride = PAGE_SIZE; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ if (!dev_priv) ++ return; ++ ++ i915_kernel_lost_context(dev); ++ while (pages > 0) { ++ cur_pages = pages; ++ if (cur_pages > 2048) ++ cur_pages = 2048; ++ pages -= cur_pages; ++ ++ BEGIN_LP_RING(6); ++ OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | ++ XY_SRC_COPY_BLT_WRITE_RGB); ++ OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) | ++ (1 << 25) | (direction ? (1 << 30) : 0)); ++ OUT_RING((cur_pages << 16) | PAGE_SIZE); ++ OUT_RING(dst_offset); ++ OUT_RING(stride & 0xffff); ++ OUT_RING(src_offset); ++ ADVANCE_LP_RING(); ++ } ++ return; ++} ++ ++static int i915_move_blit(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ int dir = 0; ++ ++ if ((old_mem->mem_type == new_mem->mem_type) && ++ (new_mem->mm_node->start < ++ old_mem->mm_node->start + old_mem->mm_node->size)) { ++ dir = 1; ++ } ++ ++ i915_emit_copy_blit(bo->dev, ++ old_mem->mm_node->start << PAGE_SHIFT, ++ new_mem->mm_node->start << PAGE_SHIFT, ++ new_mem->num_pages, dir); ++ ++ i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH); ++ ++ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0, ++ DRM_FENCE_TYPE_EXE | ++ DRM_I915_FENCE_TYPE_RW, ++ DRM_I915_FENCE_FLAG_FLUSHED, new_mem); ++} ++ ++/* ++ * Flip destination ttm into cached-coherent AGP, ++ * then blit and subsequently move out again. ++ */ ++ ++static int i915_move_flip(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg tmp_mem; ++ int ret; ++ ++ tmp_mem = *new_mem; ++ tmp_mem.mm_node = NULL; ++ tmp_mem.mask = DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING; ++ ++ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = drm_bind_ttm(bo->ttm, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = i915_move_blit(bo, 1, no_wait, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); ++out_cleanup: ++ if (tmp_mem.mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ if (tmp_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(tmp_mem.mm_node); ++ tmp_mem.mm_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ return ret; ++} ++ ++#endif ++ ++/* ++ * Disable i915_move_flip for now, since we can't guarantee that the hardware ++ * lock is held here. To re-enable we need to make sure either ++ * a) The X server is using DRM to submit commands to the ring, or ++ * b) DRM can use the HP ring for these blits. This means i915 needs to ++ * implement a new ring submission mechanism and fence class. ++ */ ++int i915_move(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/ ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } else { ++ if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/ ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) ++static inline void clflush(volatile void *__p) ++{ ++ asm volatile("clflush %0" : "+m" (*(char __force *)__p)); ++} ++#endif ++ ++static inline void drm_cache_flush_addr(void *virt) ++{ ++ int i; ++ ++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) ++ clflush(virt+i); ++} ++ ++static inline void drm_cache_flush_page(struct page *p) ++{ ++ drm_cache_flush_addr(page_address(p)); ++} ++ ++void i915_flush_ttm(struct drm_ttm *ttm) ++{ ++ int i; ++ ++ if (!ttm) ++ return; ++ ++ DRM_MEMORYBARRIER(); ++ ++#ifdef CONFIG_X86_32 ++ /* Hopefully nobody has built an x86-64 processor without clflush */ ++ if (!cpu_has_clflush) { ++ wbinvd(); ++ DRM_MEMORYBARRIER(); ++ return; ++ } ++#endif ++ ++ for (i = ttm->num_pages - 1; i >= 0; i--) ++ drm_cache_flush_page(drm_ttm_get_page(ttm, i)); ++ ++ DRM_MEMORYBARRIER(); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_compat.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_compat.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_compat.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_compat.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,215 @@ ++#include "drmP.h" ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++ ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 ++#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980 ++#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 ++#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 ++#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 ++#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 ++#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC ++#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 ++#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 ++#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 ++ ++#define I915_IFPADDR 0x60 ++#define I965_IFPADDR 0x70 ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ++#define upper_32_bits(_val) (((u64)(_val)) >> 32) ++#endif ++ ++static struct _i9xx_private_compat { ++ void __iomem *flush_page; ++ int resource_valid; ++ struct resource ifp_resource; ++} i9xx_private; ++ ++static struct _i8xx_private_compat { ++ void *flush_page; ++ struct page *page; ++} i8xx_private; ++ ++static void ++intel_compat_align_resource(void *data, struct resource *res, ++ resource_size_t size, resource_size_t align) ++{ ++ return; ++} ++ ++ ++static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev) ++{ ++ int ret; ++ ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE, ++ PAGE_SIZE, PCIBIOS_MIN_MEM, 0, ++ intel_compat_align_resource, pdev); ++ if (ret != 0) ++ return ret; ++ ++ return 0; ++} ++ ++static void intel_i915_setup_chipset_flush(struct pci_dev *pdev) ++{ ++ int ret; ++ u32 temp; ++ ++ pci_read_config_dword(pdev, I915_IFPADDR, &temp); ++ if (!(temp & 0x1)) { ++ intel_alloc_chipset_flush_resource(pdev); ++ i9xx_private.resource_valid = 1; ++ pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); ++ } else { ++ temp &= ~1; ++ ++ i9xx_private.resource_valid = 1; ++ i9xx_private.ifp_resource.start = temp; ++ i9xx_private.ifp_resource.end = temp + PAGE_SIZE; ++ ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); ++ if (ret) { ++ i9xx_private.resource_valid = 0; ++ printk("Failed inserting resource into tree\n"); ++ } ++ } ++} ++ ++static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev) ++{ ++ u32 temp_hi, temp_lo; ++ int ret; ++ ++ pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi); ++ pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo); ++ ++ if (!(temp_lo & 0x1)) { ++ ++ intel_alloc_chipset_flush_resource(pdev); ++ ++ i9xx_private.resource_valid = 1; ++ pci_write_config_dword(pdev, I965_IFPADDR + 4, ++ upper_32_bits(i9xx_private.ifp_resource.start)); ++ pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); ++ } else { ++ u64 l64; ++ ++ temp_lo &= ~0x1; ++ l64 = ((u64)temp_hi << 32) | temp_lo; ++ ++ i9xx_private.resource_valid = 1; ++ i9xx_private.ifp_resource.start = l64; ++ i9xx_private.ifp_resource.end = l64 + PAGE_SIZE; ++ ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); ++ if (ret) { ++ i9xx_private.resource_valid = 0; ++ printk("Failed inserting resource into tree\n"); ++ } ++ } ++} ++ ++static void intel_i8xx_fini_flush(struct drm_device *dev) ++{ ++ kunmap(i8xx_private.page); ++ i8xx_private.flush_page = NULL; ++ unmap_page_from_agp(i8xx_private.page); ++ flush_agp_mappings(); ++ ++ __free_page(i8xx_private.page); ++} ++ ++static void intel_i8xx_setup_flush(struct drm_device *dev) ++{ ++ ++ i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); ++ if (!i8xx_private.page) { ++ return; ++ } ++ ++ /* make page uncached */ ++ map_page_into_agp(i8xx_private.page); ++ flush_agp_mappings(); ++ ++ i8xx_private.flush_page = kmap(i8xx_private.page); ++ if (!i8xx_private.flush_page) ++ intel_i8xx_fini_flush(dev); ++} ++ ++ ++static void intel_i8xx_flush_page(struct drm_device *dev) ++{ ++ unsigned int *pg = i8xx_private.flush_page; ++ int i; ++ ++ /* HAI NUT CAN I HAZ HAMMER?? */ ++ for (i = 0; i < 256; i++) ++ *(pg + i) = i; ++ ++ DRM_MEMORYBARRIER(); ++} ++ ++static void intel_i9xx_setup_flush(struct drm_device *dev) ++{ ++ struct pci_dev *agp_dev = dev->agp->agp_info.device; ++ ++ i9xx_private.ifp_resource.name = "GMCH IFPBAR"; ++ i9xx_private.ifp_resource.flags = IORESOURCE_MEM; ++ ++ /* Setup chipset flush for 915 */ ++ if (IS_I965G(dev) || IS_G33(dev)) { ++ intel_i965_g33_setup_chipset_flush(agp_dev); ++ } else { ++ intel_i915_setup_chipset_flush(agp_dev); ++ } ++ ++ if (i9xx_private.ifp_resource.start) { ++ i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE); ++ if (!i9xx_private.flush_page) ++ printk("unable to ioremap flush page - no chipset flushing"); ++ } ++} ++ ++static void intel_i9xx_fini_flush(struct drm_device *dev) ++{ ++ iounmap(i9xx_private.flush_page); ++ if (i9xx_private.resource_valid) ++ release_resource(&i9xx_private.ifp_resource); ++ i9xx_private.resource_valid = 0; ++} ++ ++static void intel_i9xx_flush_page(struct drm_device *dev) ++{ ++ if (i9xx_private.flush_page) ++ writel(1, i9xx_private.flush_page); ++} ++ ++void intel_init_chipset_flush_compat(struct drm_device *dev) ++{ ++ /* not flush on i8xx */ ++ if (IS_I9XX(dev)) ++ intel_i9xx_setup_flush(dev); ++ else ++ intel_i8xx_setup_flush(dev); ++ ++} ++ ++void intel_fini_chipset_flush_compat(struct drm_device *dev) ++{ ++ /* not flush on i8xx */ ++ if (IS_I9XX(dev)) ++ intel_i9xx_fini_flush(dev); ++ else ++ intel_i8xx_fini_flush(dev); ++} ++ ++void drm_agp_chipset_flush(struct drm_device *dev) ++{ ++ if (IS_I9XX(dev)) ++ intel_i9xx_flush_page(dev); ++ else ++ intel_i8xx_flush_page(dev); ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_dma.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_dma.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_dma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1276 @@ ++/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- ++ */ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/* Really want an OS-independent resettable timer. Would like to have ++ * this loop run for (eg) 3 sec, but have the timer reset every time ++ * the head pointer changes, so that EBUSY only happens if the ring ++ * actually stalls for (eg) 3 seconds. ++ */ ++int i915_wait_ring(struct drm_device * dev, int n, const char *caller) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ++ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; ++ u32 last_acthd = I915_READ(acthd_reg); ++ u32 acthd; ++ int i; ++ ++ for (i = 0; i < 100000; i++) { ++ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ acthd = I915_READ(acthd_reg); ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++ if (ring->space >= n) ++ return 0; ++ ++ if (ring->head != last_head) ++ i = 0; ++ ++ if (acthd != last_acthd) ++ i = 0; ++ ++ last_head = ring->head; ++ last_acthd = acthd; ++ DRM_UDELAY(10 * 1000); ++ } ++ ++ return -EBUSY; ++} ++ ++int i915_init_hardware_status(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_dma_handle_t *dmah; ++ ++ /* Program Hardware Status Page */ ++#ifdef __FreeBSD__ ++ DRM_UNLOCK(); ++#endif ++ dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); ++#ifdef __FreeBSD__ ++ DRM_LOCK(); ++#endif ++ if (!dmah) { ++ DRM_ERROR("Can not allocate hardware status page\n"); ++ return -ENOMEM; ++ } ++ ++ dev_priv->status_page_dmah = dmah; ++ dev_priv->hw_status_page = dmah->vaddr; ++ dev_priv->dma_status_page = dmah->busaddr; ++ ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ ++ I915_WRITE(0x02080, dev_priv->dma_status_page); ++ DRM_DEBUG("Enabled hardware status page\n"); ++ return 0; ++} ++ ++void i915_free_hardware_status(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ if (dev_priv->status_page_dmah) { ++ drm_pci_free(dev, dev_priv->status_page_dmah); ++ dev_priv->status_page_dmah = NULL; ++ /* Need to rewrite hardware status page */ ++ I915_WRITE(0x02080, 0x1ffff000); ++ } ++ ++ if (dev_priv->status_gfx_addr) { ++ dev_priv->status_gfx_addr = 0; ++ drm_core_ioremapfree(&dev_priv->hws_map, dev); ++ I915_WRITE(0x02080, 0x1ffff000); ++ } ++} ++ ++#if I915_RING_VALIDATE ++/** ++ * Validate the cached ring tail value ++ * ++ * If the X server writes to the ring and DRM doesn't ++ * reload the head and tail pointers, it will end up writing ++ * data to the wrong place in the ring, causing havoc. ++ */ ++void i915_ring_validate(struct drm_device *dev, const char *func, int line) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ++ u32 tail = I915_READ(PRB0_TAIL) & HEAD_ADDR; ++ u32 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ ++ if (tail != ring->tail) { ++ DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n", ++ func, line, ++ ring->head, head, ring->tail, tail); ++#ifdef __linux__ ++ BUG_ON(1); ++#endif ++ } ++} ++#endif ++ ++void i915_kernel_lost_context(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ++ ++ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++} ++ ++static int i915_dma_cleanup(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev_priv->ring.virtual_start) { ++ drm_core_ioremapfree(&dev_priv->ring.map, dev); ++ dev_priv->ring.virtual_start = 0; ++ dev_priv->ring.map.handle = 0; ++ dev_priv->ring.map.size = 0; ++ } ++ ++ if (I915_NEED_GFX_HWS(dev)) ++ i915_free_hardware_status(dev); ++ ++ return 0; ++} ++ ++#if defined(I915_HAVE_BUFFER) ++#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16) ++#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff) ++#define DRI2_SAREA_BLOCK_NEXT(p) \ ++ ((void *) ((unsigned char *) (p) + \ ++ DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p))) ++ ++#define DRI2_SAREA_BLOCK_END 0x0000 ++#define DRI2_SAREA_BLOCK_LOCK 0x0001 ++#define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002 ++ ++static int ++setup_dri2_sarea(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_i915_init_t * init) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret; ++ unsigned int *p, *end, *next; ++ ++ mutex_lock(&dev->struct_mutex); ++ dev_priv->sarea_bo = ++ drm_lookup_buffer_object(file_priv, ++ init->sarea_handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!dev_priv->sarea_bo) { ++ DRM_ERROR("did not find sarea bo\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_kmap(dev_priv->sarea_bo, 0, ++ dev_priv->sarea_bo->num_pages, ++ &dev_priv->sarea_kmap); ++ if (ret) { ++ DRM_ERROR("could not map sarea bo\n"); ++ return ret; ++ } ++ ++ p = dev_priv->sarea_kmap.virtual; ++ end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT); ++ while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) { ++ switch (DRI2_SAREA_BLOCK_TYPE(*p)) { ++ case DRI2_SAREA_BLOCK_LOCK: ++ dev->lock.hw_lock = (void *) (p + 1); ++ dev->sigdata.lock = dev->lock.hw_lock; ++ break; ++ } ++ next = DRI2_SAREA_BLOCK_NEXT(p); ++ if (next <= p || end < next) { ++ DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n", ++ next, p, end); ++ return -EINVAL; ++ } ++ p = next; ++ } ++ ++ return 0; ++} ++#endif ++ ++static int i915_initialize(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_i915_init_t * init) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++#if defined(I915_HAVE_BUFFER) ++ int ret; ++#endif ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("can not find sarea!\n"); ++ i915_dma_cleanup(dev); ++ return -EINVAL; ++ } ++ ++#ifdef I915_HAVE_BUFFER ++ dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; ++#endif ++ ++ if (init->sarea_priv_offset) ++ dev_priv->sarea_priv = (drm_i915_sarea_t *) ++ ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ else { ++ /* No sarea_priv for you! */ ++ dev_priv->sarea_priv = NULL; ++ } ++ ++ if (init->ring_size != 0) { ++ dev_priv->ring.Size = init->ring_size; ++ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; ++ ++ dev_priv->ring.map.offset = init->ring_start; ++ dev_priv->ring.map.size = init->ring_size; ++ dev_priv->ring.map.type = 0; ++ dev_priv->ring.map.flags = 0; ++ dev_priv->ring.map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->ring.map, dev); ++ ++ if (dev_priv->ring.map.handle == NULL) { ++ i915_dma_cleanup(dev); ++ DRM_ERROR("can not ioremap virtual address for" ++ " ring buffer\n"); ++ return -ENOMEM; ++ } ++ ++ dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ } ++ ++ dev_priv->cpp = init->cpp; ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->pf_current_page = 0; ++ ++ /* We are using separate values as placeholders for mechanisms for ++ * private backbuffer/depthbuffer usage. ++ */ ++ ++ /* Allow hardware batchbuffers unless told otherwise. ++ */ ++ dev_priv->allow_batchbuffer = 1; ++ ++ /* Enable vblank on pipe A for older X servers ++ */ ++ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; ++ ++#ifdef I915_HAVE_BUFFER ++ mutex_init(&dev_priv->cmdbuf_mutex); ++#endif ++#if defined(I915_HAVE_BUFFER) ++ if (init->func == I915_INIT_DMA2) { ++ ret = setup_dri2_sarea(dev, file_priv, init); ++ if (ret) { ++ i915_dma_cleanup(dev); ++ DRM_ERROR("could not set up dri2 sarea\n"); ++ return ret; ++ } ++ } ++#endif ++ ++ return 0; ++} ++ ++static int i915_dma_resume(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!dev_priv->sarea) { ++ DRM_ERROR("can not find sarea!\n"); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->ring.map.handle == NULL) { ++ DRM_ERROR("can not ioremap virtual address for" ++ " ring buffer\n"); ++ return -ENOMEM; ++ } ++ ++ /* Program Hardware Status Page */ ++ if (!dev_priv->hw_status_page) { ++ DRM_ERROR("Can not find hardware status page\n"); ++ return -EINVAL; ++ } ++ DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); ++ ++ if (dev_priv->status_gfx_addr != 0) ++ I915_WRITE(0x02080, dev_priv->status_gfx_addr); ++ else ++ I915_WRITE(0x02080, dev_priv->dma_status_page); ++ DRM_DEBUG("Enabled hardware status page\n"); ++ ++ return 0; ++} ++ ++static int i915_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_init_t *init = data; ++ int retcode = 0; ++ ++ switch (init->func) { ++ case I915_INIT_DMA: ++ case I915_INIT_DMA2: ++ retcode = i915_initialize(dev, file_priv, init); ++ break; ++ case I915_CLEANUP_DMA: ++ retcode = i915_dma_cleanup(dev); ++ break; ++ case I915_RESUME_DMA: ++ retcode = i915_dma_resume(dev); ++ break; ++ default: ++ retcode = -EINVAL; ++ break; ++ } ++ ++ return retcode; ++} ++ ++/* Implement basically the same security restrictions as hardware does ++ * for MI_BATCH_NON_SECURE. These can be made stricter at any time. ++ * ++ * Most of the calculations below involve calculating the size of a ++ * particular instruction. It's important to get the size right as ++ * that tells us where the next instruction to check is. Any illegal ++ * instruction detected will be given a size of zero, which is a ++ * signal to abort the rest of the buffer. ++ */ ++static int do_validate_cmd(int cmd) ++{ ++ switch (((cmd >> 29) & 0x7)) { ++ case 0x0: ++ switch ((cmd >> 23) & 0x3f) { ++ case 0x0: ++ return 1; /* MI_NOOP */ ++ case 0x4: ++ return 1; /* MI_FLUSH */ ++ default: ++ return 0; /* disallow everything else */ ++ } ++ break; ++ case 0x1: ++ return 0; /* reserved */ ++ case 0x2: ++ return (cmd & 0xff) + 2; /* 2d commands */ ++ case 0x3: ++ if (((cmd >> 24) & 0x1f) <= 0x18) ++ return 1; ++ ++ switch ((cmd >> 24) & 0x1f) { ++ case 0x1c: ++ return 1; ++ case 0x1d: ++ switch ((cmd >> 16) & 0xff) { ++ case 0x3: ++ return (cmd & 0x1f) + 2; ++ case 0x4: ++ return (cmd & 0xf) + 2; ++ default: ++ return (cmd & 0xffff) + 2; ++ } ++ case 0x1e: ++ if (cmd & (1 << 23)) ++ return (cmd & 0xffff) + 1; ++ else ++ return 1; ++ case 0x1f: ++ if ((cmd & (1 << 23)) == 0) /* inline vertices */ ++ return (cmd & 0x1ffff) + 2; ++ else if (cmd & (1 << 17)) /* indirect random */ ++ if ((cmd & 0xffff) == 0) ++ return 0; /* unknown length, too hard */ ++ else ++ return (((cmd & 0xffff) + 1) / 2) + 1; ++ else ++ return 2; /* indirect sequential */ ++ default: ++ return 0; ++ } ++ default: ++ return 0; ++ } ++ ++ return 0; ++} ++ ++static int validate_cmd(int cmd) ++{ ++ int ret = do_validate_cmd(cmd); ++ ++/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ ++ ++ return ret; ++} ++ ++static int i915_emit_cmds(struct drm_device *dev, int __user *buffer, ++ int dwords) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int i; ++ RING_LOCALS; ++ ++ if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) ++ return -EINVAL; ++ ++ BEGIN_LP_RING((dwords+1)&~1); ++ ++ for (i = 0; i < dwords;) { ++ int cmd, sz; ++ ++ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) ++ return -EINVAL; ++ ++ if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) ++ return -EINVAL; ++ ++ OUT_RING(cmd); ++ ++ while (++i, --sz) { ++ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], ++ sizeof(cmd))) { ++ return -EINVAL; ++ } ++ OUT_RING(cmd); ++ } ++ } ++ ++ if (dwords & 1) ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++ ++ return 0; ++} ++ ++int i915_emit_box(struct drm_device * dev, ++ struct drm_clip_rect __user * boxes, ++ int i, int DR1, int DR4) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect box; ++ RING_LOCALS; ++ ++ if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { ++ return -EFAULT; ++ } ++ ++ if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { ++ DRM_ERROR("Bad box %d,%d..%d,%d\n", ++ box.x1, box.y1, box.x2, box.y2); ++ return -EINVAL; ++ } ++ ++ if (IS_I965G(dev)) { ++ BEGIN_LP_RING(4); ++ OUT_RING(GFX_OP_DRAWRECT_INFO_I965); ++ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); ++ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); ++ OUT_RING(DR4); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(6); ++ OUT_RING(GFX_OP_DRAWRECT_INFO); ++ OUT_RING(DR1); ++ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); ++ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); ++ OUT_RING(DR4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ ++ return 0; ++} ++ ++/* XXX: Emitting the counter should really be moved to part of the IRQ ++ * emit. For now, do it in both places: ++ */ ++ ++void i915_emit_breadcrumb(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ if (++dev_priv->counter > BREADCRUMB_MASK) { ++ dev_priv->counter = 1; ++ DRM_DEBUG("Breadcrumb counter wrapped around\n"); ++ } ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_enqueue = dev_priv->counter; ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_STORE_DWORD_INDEX); ++ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); ++ OUT_RING(dev_priv->counter); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++} ++ ++ ++int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t flush_cmd = MI_FLUSH; ++ RING_LOCALS; ++ ++ flush_cmd |= flush; ++ ++ i915_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(flush_cmd); ++ OUT_RING(0); ++ OUT_RING(0); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ return 0; ++} ++ ++ ++static int i915_dispatch_cmdbuffer(struct drm_device * dev, ++ drm_i915_cmdbuffer_t * cmd) ++{ ++#ifdef I915_HAVE_FENCE ++ drm_i915_private_t *dev_priv = dev->dev_private; ++#endif ++ int nbox = cmd->num_cliprects; ++ int i = 0, count, ret; ++ ++ if (cmd->sz & 0x3) { ++ DRM_ERROR("alignment\n"); ++ return -EINVAL; ++ } ++ ++ i915_kernel_lost_context(dev); ++ ++ count = nbox ? nbox : 1; ++ ++ for (i = 0; i < count; i++) { ++ if (i < nbox) { ++ ret = i915_emit_box(dev, cmd->cliprects, i, ++ cmd->DR1, cmd->DR4); ++ if (ret) ++ return ret; ++ } ++ ++ ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); ++ if (ret) ++ return ret; ++ } ++ ++ i915_emit_breadcrumb(dev); ++#ifdef I915_HAVE_FENCE ++ if (unlikely((dev_priv->counter & 0xFF) == 0)) ++ drm_fence_flush_old(dev, 0, dev_priv->counter); ++#endif ++ return 0; ++} ++ ++int i915_dispatch_batchbuffer(struct drm_device * dev, ++ drm_i915_batchbuffer_t * batch) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect __user *boxes = batch->cliprects; ++ int nbox = batch->num_cliprects; ++ int i = 0, count; ++ RING_LOCALS; ++ ++ if ((batch->start | batch->used) & 0x7) { ++ DRM_ERROR("alignment\n"); ++ return -EINVAL; ++ } ++ ++ i915_kernel_lost_context(dev); ++ ++ count = nbox ? nbox : 1; ++ ++ for (i = 0; i < count; i++) { ++ if (i < nbox) { ++ int ret = i915_emit_box(dev, boxes, i, ++ batch->DR1, batch->DR4); ++ if (ret) ++ return ret; ++ } ++ ++ if (IS_I830(dev) || IS_845G(dev)) { ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_BATCH_BUFFER); ++ OUT_RING(batch->start | MI_BATCH_NON_SECURE); ++ OUT_RING(batch->start + batch->used - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(2); ++ if (IS_I965G(dev)) { ++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); ++ OUT_RING(batch->start); ++ } else { ++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); ++ OUT_RING(batch->start | MI_BATCH_NON_SECURE); ++ } ++ ADVANCE_LP_RING(); ++ } ++ } ++ ++ i915_emit_breadcrumb(dev); ++#ifdef I915_HAVE_FENCE ++ if (unlikely((dev_priv->counter & 0xFF) == 0)) ++ drm_fence_flush_old(dev, 0, dev_priv->counter); ++#endif ++ return 0; ++} ++ ++static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u32 num_pages, current_page, next_page, dspbase; ++ int shift = 2 * plane, x, y; ++ RING_LOCALS; ++ ++ /* Calculate display base offset */ ++ num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; ++ current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3; ++ next_page = (current_page + 1) % num_pages; ++ ++ switch (next_page) { ++ default: ++ case 0: ++ dspbase = dev_priv->sarea_priv->front_offset; ++ break; ++ case 1: ++ dspbase = dev_priv->sarea_priv->back_offset; ++ break; ++ case 2: ++ dspbase = dev_priv->sarea_priv->third_offset; ++ break; ++ } ++ ++ if (plane == 0) { ++ x = dev_priv->sarea_priv->planeA_x; ++ y = dev_priv->sarea_priv->planeA_y; ++ } else { ++ x = dev_priv->sarea_priv->planeB_x; ++ y = dev_priv->sarea_priv->planeB_y; ++ } ++ ++ dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp; ++ ++ DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page, ++ dspbase); ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(sync ? 0 : ++ (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP : ++ MI_WAIT_FOR_PLANE_A_FLIP))); ++ OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) | ++ (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A)); ++ OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp); ++ OUT_RING(dspbase); ++ ADVANCE_LP_RING(); ++ ++ dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift); ++ dev_priv->sarea_priv->pf_current_page |= next_page << shift; ++} ++ ++void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n", ++ planes, dev_priv->sarea_priv->pf_current_page); ++ ++ i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH); ++ ++ for (i = 0; i < 2; i++) ++ if (planes & (1 << i)) ++ i915_do_dispatch_flip(dev, i, sync); ++ ++ i915_emit_breadcrumb(dev); ++#ifdef I915_HAVE_FENCE ++ if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0))) ++ drm_fence_flush_old(dev, 0, dev_priv->counter); ++#endif ++} ++ ++int i915_quiescent(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret; ++ ++ i915_kernel_lost_context(dev); ++ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); ++ if (ret) ++ { ++ i915_kernel_lost_context (dev); ++ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n", ++ dev_priv->ring.head, ++ dev_priv->ring.tail, ++ dev_priv->ring.space); ++ } ++ return ret; ++} ++ ++static int i915_flush_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return i915_quiescent(dev); ++} ++ ++static int i915_batchbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i915_batchbuffer_t *batch = data; ++ int ret; ++ ++ if (!dev_priv->allow_batchbuffer) { ++ DRM_ERROR("Batchbuffer ioctl disabled\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", ++ batch->start, batch->used, batch->num_cliprects); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, ++ batch->num_cliprects * ++ sizeof(struct drm_clip_rect))) ++ return -EFAULT; ++ ++ ret = i915_dispatch_batchbuffer(dev, batch); ++ ++ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ return ret; ++} ++ ++static int i915_cmdbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i915_cmdbuffer_t *cmdbuf = data; ++ int ret; ++ ++ DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", ++ cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (cmdbuf->num_cliprects && ++ DRM_VERIFYAREA_READ(cmdbuf->cliprects, ++ cmdbuf->num_cliprects * ++ sizeof(struct drm_clip_rect))) { ++ DRM_ERROR("Fault accessing cliprects\n"); ++ return -EFAULT; ++ } ++ ++ ret = i915_dispatch_cmdbuffer(dev, cmdbuf); ++ if (ret) { ++ DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); ++ return ret; ++ } ++ ++ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ return 0; ++} ++ ++#if defined(DRM_DEBUG_CODE) ++#define DRM_DEBUG_RELOCATION (drm_debug != 0) ++#else ++#define DRM_DEBUG_RELOCATION 0 ++#endif ++ ++static int i915_do_cleanup_pageflip(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; ++ ++ DRM_DEBUG("\n"); ++ ++ for (i = 0, planes = 0; i < 2; i++) ++ if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) { ++ dev_priv->sarea_priv->pf_current_page = ++ (dev_priv->sarea_priv->pf_current_page & ++ ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i)); ++ ++ planes |= 1 << i; ++ } ++ ++ if (planes) ++ i915_dispatch_flip(dev, planes, 0); ++ ++ return 0; ++} ++ ++static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_i915_flip_t *param = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* This is really planes */ ++ if (param->pipes & ~0x3) { ++ DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n", ++ param->pipes); ++ return -EINVAL; ++ } ++ ++ i915_dispatch_flip(dev, param->pipes, 0); ++ ++ return 0; ++} ++ ++ ++static int i915_getparam(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (param->param) { ++ case I915_PARAM_IRQ_ACTIVE: ++ value = dev->irq_enabled ? 1 : 0; ++ break; ++ case I915_PARAM_ALLOW_BATCHBUFFER: ++ value = dev_priv->allow_batchbuffer ? 1 : 0; ++ break; ++ case I915_PARAM_LAST_DISPATCH: ++ value = READ_BREADCRUMB(dev_priv); ++ break; ++ case I915_PARAM_CHIPSET_ID: ++ value = dev->pci_device; ++ break; ++ case I915_PARAM_HAS_GEM: ++ value = 1; ++ break; ++ default: ++ DRM_ERROR("Unknown parameter %d\n", param->param); ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("DRM_COPY_TO_USER failed\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int i915_setparam(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_setparam_t *param = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (param->param) { ++ case I915_SETPARAM_USE_MI_BATCHBUFFER_START: ++ break; ++ case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: ++ dev_priv->tex_lru_log_granularity = param->value; ++ break; ++ case I915_SETPARAM_ALLOW_BATCHBUFFER: ++ dev_priv->allow_batchbuffer = param->value; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %d\n", param->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++drm_i915_mmio_entry_t mmio_table[] = { ++ [MMIO_REGS_PS_DEPTH_COUNT] = { ++ I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE, ++ 0x2350, ++ 8 ++ } ++}; ++ ++static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t); ++ ++static int i915_mmio(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ uint32_t buf[8]; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mmio_entry_t *e; ++ drm_i915_mmio_t *mmio = data; ++ void __iomem *base; ++ int i; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (mmio->reg >= mmio_table_size) ++ return -EINVAL; ++ ++ e = &mmio_table[mmio->reg]; ++ base = (u8 *) dev_priv->mmio_map->handle + e->offset; ++ ++ switch (mmio->read_write) { ++ case I915_MMIO_READ: ++ if (!(e->flag & I915_MMIO_MAY_READ)) ++ return -EINVAL; ++ for (i = 0; i < e->size / 4; i++) ++ buf[i] = I915_READ(e->offset + i * 4); ++ if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { ++ DRM_ERROR("DRM_COPY_TO_USER failed\n"); ++ return -EFAULT; ++ } ++ break; ++ ++ case I915_MMIO_WRITE: ++ if (!(e->flag & I915_MMIO_MAY_WRITE)) ++ return -EINVAL; ++ if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { ++ DRM_ERROR("DRM_COPY_TO_USER failed\n"); ++ return -EFAULT; ++ } ++ for (i = 0; i < e->size / 4; i++) ++ I915_WRITE(e->offset + i * 4, buf[i]); ++ break; ++ } ++ return 0; ++} ++ ++static int i915_set_status_page(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_hws_addr_t *hws = data; ++ ++ if (!I915_NEED_GFX_HWS(dev)) ++ return -EINVAL; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); ++ ++ dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); ++ ++ dev_priv->hws_map.offset = dev->agp->base + hws->addr; ++ dev_priv->hws_map.size = 4*1024; ++ dev_priv->hws_map.type = 0; ++ dev_priv->hws_map.flags = 0; ++ dev_priv->hws_map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->hws_map, dev); ++ if (dev_priv->hws_map.handle == NULL) { ++ i915_dma_cleanup(dev); ++ dev_priv->status_gfx_addr = 0; ++ DRM_ERROR("can not ioremap virtual address for" ++ " G33 hw status page\n"); ++ return -ENOMEM; ++ } ++ dev_priv->hw_status_page = dev_priv->hws_map.handle; ++ ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); ++ DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n", ++ dev_priv->status_gfx_addr); ++ DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); ++ return 0; ++} ++ ++int i915_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ struct drm_i915_private *dev_priv; ++ unsigned long base, size; ++ int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; ++ ++ /* i915 has 4 more counters */ ++ dev->counters += 4; ++ dev->types[6] = _DRM_STAT_IRQ; ++ dev->types[7] = _DRM_STAT_PRIMARY; ++ dev->types[8] = _DRM_STAT_SECONDARY; ++ dev->types[9] = _DRM_STAT_DMA; ++ ++ dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_i915_private_t)); ++ ++ dev->dev_private = (void *)dev_priv; ++ dev_priv->dev = dev; ++ ++ /* Add register map (needed for suspend/resume) */ ++ base = drm_get_resource_start(dev, mmio_bar); ++ size = drm_get_resource_len(dev, mmio_bar); ++ ++ ret = drm_addmap(dev, base, size, _DRM_REGISTERS, ++ _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); ++#ifdef I915_HAVE_GEM ++ i915_gem_load(dev); ++#endif ++ DRM_SPININIT(&dev_priv->swaps_lock, "swap"); ++ DRM_SPININIT(&dev_priv->user_irq_lock, "userirq"); ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++ intel_init_chipset_flush_compat(dev); ++#endif ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_init(dev); ++#endif ++#endif ++ ++ /* Init HWS */ ++ if (!I915_NEED_GFX_HWS(dev)) { ++ ret = i915_init_hardware_status(dev); ++ if(ret) ++ return ret; ++ } ++ ++ return ret; ++} ++ ++int i915_driver_unload(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ i915_free_hardware_status(dev); ++ ++ drm_rmmap(dev, dev_priv->mmio_map); ++ ++ DRM_SPINUNINIT(&dev_priv->swaps_lock); ++ DRM_SPINUNINIT(&dev_priv->user_irq_lock); ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_free(dev); ++#endif ++#endif ++ ++ drm_free(dev->dev_private, sizeof(drm_i915_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++ intel_fini_chipset_flush_compat(dev); ++#endif ++#endif ++ return 0; ++} ++ ++void i915_driver_lastclose(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ /* agp off can use this to get called before dev_priv */ ++ if (!dev_priv) ++ return; ++ ++#ifdef I915_HAVE_BUFFER ++ if (dev_priv->val_bufs) { ++ vfree(dev_priv->val_bufs); ++ dev_priv->val_bufs = NULL; ++ } ++#endif ++#ifdef I915_HAVE_GEM ++ i915_gem_lastclose(dev); ++#endif ++ if (drm_getsarea(dev) && dev_priv->sarea_priv) ++ i915_do_cleanup_pageflip(dev); ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv = NULL; ++ if (dev_priv->agp_heap) ++ i915_mem_takedown(&(dev_priv->agp_heap)); ++#if defined(I915_HAVE_BUFFER) ++ if (dev_priv->sarea_kmap.virtual) { ++ drm_bo_kunmap(&dev_priv->sarea_kmap); ++ dev_priv->sarea_kmap.virtual = NULL; ++ dev->lock.hw_lock = NULL; ++ dev->sigdata.lock = NULL; ++ } ++ ++ if (dev_priv->sarea_bo) { ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_usage_deref_locked(&dev_priv->sarea_bo); ++ mutex_unlock(&dev->struct_mutex); ++ dev_priv->sarea_bo = NULL; ++ } ++#endif ++ i915_dma_cleanup(dev); ++} ++ ++int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_i915_file_private *i915_file_priv; ++ ++ DRM_DEBUG("\n"); ++ i915_file_priv = (struct drm_i915_file_private *) ++ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); ++ ++ if (!i915_file_priv) ++ return -ENOMEM; ++ ++ file_priv->driver_priv = i915_file_priv; ++ ++ i915_file_priv->mm.last_gem_seqno = 0; ++ i915_file_priv->mm.last_gem_throttle_seqno = 0; ++ ++ return 0; ++} ++ ++void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ i915_mem_release(dev, file_priv, dev_priv->agp_heap); ++} ++ ++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; ++ ++ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); ++} ++ ++struct drm_ioctl_desc i915_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), ++ DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), ++ DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), ++ DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), ++#ifdef I915_HAVE_BUFFER ++ DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH), ++#endif ++#ifdef I915_HAVE_GEM ++ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), ++#endif ++}; ++ ++int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); ++ ++/** ++ * Determine if the device really is AGP or not. ++ * ++ * All Intel graphics chipsets are treated as AGP, even if they are really ++ * PCI-e. ++ * ++ * \param dev The device to be tested. ++ * ++ * \returns ++ * A value of 1 is always retured to indictate every i9x5 is AGP. ++ */ ++int i915_driver_device_is_agp(struct drm_device * dev) ++{ ++ return 1; ++} ++ ++int i915_driver_firstopen(struct drm_device *dev) ++{ ++#ifdef I915_HAVE_BUFFER ++ drm_bo_driver_init(dev); ++#endif ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,719 @@ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef _I915_DRM_H_ ++#define _I915_DRM_H_ ++ ++/* Please note that modifications to all structs defined here are ++ * subject to backwards-compatibility constraints. ++ */ ++ ++#include "drm.h" ++ ++/* Each region is a minimum of 16k, and there are at most 255 of them. ++ */ ++#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use ++ * of chars for next/prev indices */ ++#define I915_LOG_MIN_TEX_REGION_SIZE 14 ++ ++typedef struct _drm_i915_init { ++ enum { ++ I915_INIT_DMA = 0x01, ++ I915_CLEANUP_DMA = 0x02, ++ I915_RESUME_DMA = 0x03, ++ ++ /* Since this struct isn't versioned, just used a new ++ * 'func' code to indicate the presence of dri2 sarea ++ * info. */ ++ I915_INIT_DMA2 = 0x04 ++ } func; ++ unsigned int mmio_offset; ++ int sarea_priv_offset; ++ unsigned int ring_start; ++ unsigned int ring_end; ++ unsigned int ring_size; ++ unsigned int front_offset; ++ unsigned int back_offset; ++ unsigned int depth_offset; ++ unsigned int w; ++ unsigned int h; ++ unsigned int pitch; ++ unsigned int pitch_bits; ++ unsigned int back_pitch; ++ unsigned int depth_pitch; ++ unsigned int cpp; ++ unsigned int chipset; ++ unsigned int sarea_handle; ++} drm_i915_init_t; ++ ++typedef struct drm_i915_sarea { ++ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; ++ int last_upload; /* last time texture was uploaded */ ++ int last_enqueue; /* last time a buffer was enqueued */ ++ int last_dispatch; /* age of the most recently dispatched buffer */ ++ int ctxOwner; /* last context to upload state */ ++ int texAge; ++ int pf_enabled; /* is pageflipping allowed? */ ++ int pf_active; ++ int pf_current_page; /* which buffer is being displayed? */ ++ int perf_boxes; /* performance boxes to be displayed */ ++ int width, height; /* screen size in pixels */ ++ ++ drm_handle_t front_handle; ++ int front_offset; ++ int front_size; ++ ++ drm_handle_t back_handle; ++ int back_offset; ++ int back_size; ++ ++ drm_handle_t depth_handle; ++ int depth_offset; ++ int depth_size; ++ ++ drm_handle_t tex_handle; ++ int tex_offset; ++ int tex_size; ++ int log_tex_granularity; ++ int pitch; ++ int rotation; /* 0, 90, 180 or 270 */ ++ int rotated_offset; ++ int rotated_size; ++ int rotated_pitch; ++ int virtualX, virtualY; ++ ++ unsigned int front_tiled; ++ unsigned int back_tiled; ++ unsigned int depth_tiled; ++ unsigned int rotated_tiled; ++ unsigned int rotated2_tiled; ++ ++ int planeA_x; ++ int planeA_y; ++ int planeA_w; ++ int planeA_h; ++ int planeB_x; ++ int planeB_y; ++ int planeB_w; ++ int planeB_h; ++ ++ /* Triple buffering */ ++ drm_handle_t third_handle; ++ int third_offset; ++ int third_size; ++ unsigned int third_tiled; ++ ++ /* buffer object handles for the static buffers. May change ++ * over the lifetime of the client, though it doesn't in our current ++ * implementation. ++ */ ++ unsigned int front_bo_handle; ++ unsigned int back_bo_handle; ++ unsigned int third_bo_handle; ++ unsigned int depth_bo_handle; ++} drm_i915_sarea_t; ++ ++/* Driver specific fence types and classes. ++ */ ++ ++/* The only fence class we support */ ++#define DRM_I915_FENCE_CLASS_ACCEL 0 ++/* Fence type that guarantees read-write flush */ ++#define DRM_I915_FENCE_TYPE_RW 2 ++/* MI_FLUSH programmed just before the fence */ ++#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000 ++ ++/* Flags for perf_boxes ++ */ ++#define I915_BOX_RING_EMPTY 0x1 ++#define I915_BOX_FLIP 0x2 ++#define I915_BOX_WAIT 0x4 ++#define I915_BOX_TEXTURE_LOAD 0x8 ++#define I915_BOX_LOST_CONTEXT 0x10 ++ ++/* I915 specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_I915_INIT 0x00 ++#define DRM_I915_FLUSH 0x01 ++#define DRM_I915_FLIP 0x02 ++#define DRM_I915_BATCHBUFFER 0x03 ++#define DRM_I915_IRQ_EMIT 0x04 ++#define DRM_I915_IRQ_WAIT 0x05 ++#define DRM_I915_GETPARAM 0x06 ++#define DRM_I915_SETPARAM 0x07 ++#define DRM_I915_ALLOC 0x08 ++#define DRM_I915_FREE 0x09 ++#define DRM_I915_INIT_HEAP 0x0a ++#define DRM_I915_CMDBUFFER 0x0b ++#define DRM_I915_DESTROY_HEAP 0x0c ++#define DRM_I915_SET_VBLANK_PIPE 0x0d ++#define DRM_I915_GET_VBLANK_PIPE 0x0e ++#define DRM_I915_VBLANK_SWAP 0x0f ++#define DRM_I915_MMIO 0x10 ++#define DRM_I915_HWS_ADDR 0x11 ++#define DRM_I915_EXECBUFFER 0x12 ++#define DRM_I915_GEM_INIT 0x13 ++#define DRM_I915_GEM_EXECBUFFER 0x14 ++#define DRM_I915_GEM_PIN 0x15 ++#define DRM_I915_GEM_UNPIN 0x16 ++#define DRM_I915_GEM_BUSY 0x17 ++#define DRM_I915_GEM_THROTTLE 0x18 ++#define DRM_I915_GEM_ENTERVT 0x19 ++#define DRM_I915_GEM_LEAVEVT 0x1a ++#define DRM_I915_GEM_CREATE 0x1b ++#define DRM_I915_GEM_PREAD 0x1c ++#define DRM_I915_GEM_PWRITE 0x1d ++#define DRM_I915_GEM_MMAP 0x1e ++#define DRM_I915_GEM_SET_DOMAIN 0x1f ++#define DRM_I915_GEM_SW_FINISH 0x20 ++#define DRM_I915_GEM_SET_TILING 0x21 ++#define DRM_I915_GEM_GET_TILING 0x22 ++ ++#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) ++#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ++#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t) ++#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) ++#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) ++#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) ++#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) ++#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) ++#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) ++#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) ++#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) ++#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) ++#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) ++#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) ++#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) ++#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) ++#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio) ++#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer) ++#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) ++#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) ++#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) ++#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) ++#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) ++#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) ++#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) ++#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) ++#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) ++#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) ++#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) ++#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) ++#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) ++#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) ++#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) ++#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) ++ ++/* Asynchronous page flipping: ++ */ ++typedef struct drm_i915_flip { ++ /* ++ * This is really talking about planes, and we could rename it ++ * except for the fact that some of the duplicated i915_drm.h files ++ * out there check for HAVE_I915_FLIP and so might pick up this ++ * version. ++ */ ++ int pipes; ++} drm_i915_flip_t; ++ ++/* Allow drivers to submit batchbuffers directly to hardware, relying ++ * on the security mechanisms provided by hardware. ++ */ ++typedef struct drm_i915_batchbuffer { ++ int start; /* agp offset */ ++ int used; /* nr bytes in use */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ ++} drm_i915_batchbuffer_t; ++ ++/* As above, but pass a pointer to userspace buffer which can be ++ * validated by the kernel prior to sending to hardware. ++ */ ++typedef struct _drm_i915_cmdbuffer { ++ char __user *buf; /* pointer to userspace command buffer */ ++ int sz; /* nr bytes in buf */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ ++} drm_i915_cmdbuffer_t; ++ ++/* Userspace can request & wait on irq's: ++ */ ++typedef struct drm_i915_irq_emit { ++ int __user *irq_seq; ++} drm_i915_irq_emit_t; ++ ++typedef struct drm_i915_irq_wait { ++ int irq_seq; ++} drm_i915_irq_wait_t; ++ ++/* Ioctl to query kernel params: ++ */ ++#define I915_PARAM_IRQ_ACTIVE 1 ++#define I915_PARAM_ALLOW_BATCHBUFFER 2 ++#define I915_PARAM_LAST_DISPATCH 3 ++#define I915_PARAM_CHIPSET_ID 4 ++#define I915_PARAM_HAS_GEM 5 ++ ++typedef struct drm_i915_getparam { ++ int param; ++ int __user *value; ++} drm_i915_getparam_t; ++ ++/* Ioctl to set kernel params: ++ */ ++#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 ++#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 ++#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 ++ ++typedef struct drm_i915_setparam { ++ int param; ++ int value; ++} drm_i915_setparam_t; ++ ++/* A memory manager for regions of shared memory: ++ */ ++#define I915_MEM_REGION_AGP 1 ++ ++typedef struct drm_i915_mem_alloc { ++ int region; ++ int alignment; ++ int size; ++ int __user *region_offset; /* offset from start of fb or agp */ ++} drm_i915_mem_alloc_t; ++ ++typedef struct drm_i915_mem_free { ++ int region; ++ int region_offset; ++} drm_i915_mem_free_t; ++ ++typedef struct drm_i915_mem_init_heap { ++ int region; ++ int size; ++ int start; ++} drm_i915_mem_init_heap_t; ++ ++/* Allow memory manager to be torn down and re-initialized (eg on ++ * rotate): ++ */ ++typedef struct drm_i915_mem_destroy_heap { ++ int region; ++} drm_i915_mem_destroy_heap_t; ++ ++/* Allow X server to configure which pipes to monitor for vblank signals ++ */ ++#define DRM_I915_VBLANK_PIPE_A 1 ++#define DRM_I915_VBLANK_PIPE_B 2 ++ ++typedef struct drm_i915_vblank_pipe { ++ int pipe; ++} drm_i915_vblank_pipe_t; ++ ++/* Schedule buffer swap at given vertical blank: ++ */ ++typedef struct drm_i915_vblank_swap { ++ drm_drawable_t drawable; ++ enum drm_vblank_seq_type seqtype; ++ unsigned int sequence; ++} drm_i915_vblank_swap_t; ++ ++#define I915_MMIO_READ 0 ++#define I915_MMIO_WRITE 1 ++ ++#define I915_MMIO_MAY_READ 0x1 ++#define I915_MMIO_MAY_WRITE 0x2 ++ ++#define MMIO_REGS_IA_PRIMATIVES_COUNT 0 ++#define MMIO_REGS_IA_VERTICES_COUNT 1 ++#define MMIO_REGS_VS_INVOCATION_COUNT 2 ++#define MMIO_REGS_GS_PRIMITIVES_COUNT 3 ++#define MMIO_REGS_GS_INVOCATION_COUNT 4 ++#define MMIO_REGS_CL_PRIMITIVES_COUNT 5 ++#define MMIO_REGS_CL_INVOCATION_COUNT 6 ++#define MMIO_REGS_PS_INVOCATION_COUNT 7 ++#define MMIO_REGS_PS_DEPTH_COUNT 8 ++ ++typedef struct drm_i915_mmio_entry { ++ unsigned int flag; ++ unsigned int offset; ++ unsigned int size; ++} drm_i915_mmio_entry_t; ++ ++typedef struct drm_i915_mmio { ++ unsigned int read_write:1; ++ unsigned int reg:31; ++ void __user *data; ++} drm_i915_mmio_t; ++ ++typedef struct drm_i915_hws_addr { ++ uint64_t addr; ++} drm_i915_hws_addr_t; ++ ++/* ++ * Relocation header is 4 uint32_ts ++ * 0 - 32 bit reloc count ++ * 1 - 32-bit relocation type ++ * 2-3 - 64-bit user buffer handle ptr for another list of relocs. ++ */ ++#define I915_RELOC_HEADER 4 ++ ++/* ++ * type 0 relocation has 4-uint32_t stride ++ * 0 - offset into buffer ++ * 1 - delta to add in ++ * 2 - buffer handle ++ * 3 - reserved (for optimisations later). ++ */ ++/* ++ * type 1 relocation has 4-uint32_t stride. ++ * Hangs off the first item in the op list. ++ * Performed after all valiations are done. ++ * Try to group relocs into the same relocatee together for ++ * performance reasons. ++ * 0 - offset into buffer ++ * 1 - delta to add in ++ * 2 - buffer index in op list. ++ * 3 - relocatee index in op list. ++ */ ++#define I915_RELOC_TYPE_0 0 ++#define I915_RELOC0_STRIDE 4 ++#define I915_RELOC_TYPE_1 1 ++#define I915_RELOC1_STRIDE 4 ++ ++ ++struct drm_i915_op_arg { ++ uint64_t next; ++ uint64_t reloc_ptr; ++ int handled; ++ unsigned int pad64; ++ union { ++ struct drm_bo_op_req req; ++ struct drm_bo_arg_rep rep; ++ } d; ++ ++}; ++ ++struct drm_i915_execbuffer { ++ uint64_t ops_list; ++ uint32_t num_buffers; ++ struct drm_i915_batchbuffer batch; ++ drm_context_t context; /* for lockless use in the future */ ++ struct drm_fence_arg fence_arg; ++}; ++ ++struct drm_i915_gem_init { ++ /** ++ * Beginning offset in the GTT to be managed by the DRM memory ++ * manager. ++ */ ++ uint64_t gtt_start; ++ /** ++ * Ending offset in the GTT to be managed by the DRM memory ++ * manager. ++ */ ++ uint64_t gtt_end; ++}; ++ ++struct drm_i915_gem_create { ++ /** ++ * Requested size for the object. ++ * ++ * The (page-aligned) allocated size for the object will be returned. ++ */ ++ uint64_t size; ++ /** ++ * Returned handle for the object. ++ * ++ * Object handles are nonzero. ++ */ ++ uint32_t handle; ++ uint32_t pad; ++}; ++ ++struct drm_i915_gem_pread { ++ /** Handle for the object being read. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset into the object to read from */ ++ uint64_t offset; ++ /** Length of data to read */ ++ uint64_t size; ++ /** Pointer to write the data into. */ ++ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++struct drm_i915_gem_pwrite { ++ /** Handle for the object being written to. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset into the object to write to */ ++ uint64_t offset; ++ /** Length of data to write */ ++ uint64_t size; ++ /** Pointer to read the data from. */ ++ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++struct drm_i915_gem_mmap { ++ /** Handle for the object being mapped. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset in the object to map. */ ++ uint64_t offset; ++ /** ++ * Length of data to map. ++ * ++ * The value will be page-aligned. ++ */ ++ uint64_t size; ++ /** Returned pointer the data was mapped at */ ++ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++struct drm_i915_gem_set_domain { ++ /** Handle for the object */ ++ uint32_t handle; ++ ++ /** New read domains */ ++ uint32_t read_domains; ++ ++ /** New write domain */ ++ uint32_t write_domain; ++}; ++ ++struct drm_i915_gem_sw_finish { ++ /** Handle for the object */ ++ uint32_t handle; ++}; ++ ++struct drm_i915_gem_relocation_entry { ++ /** ++ * Handle of the buffer being pointed to by this relocation entry. ++ * ++ * It's appealing to make this be an index into the mm_validate_entry ++ * list to refer to the buffer, but this allows the driver to create ++ * a relocation list for state buffers and not re-write it per ++ * exec using the buffer. ++ */ ++ uint32_t target_handle; ++ ++ /** ++ * Value to be added to the offset of the target buffer to make up ++ * the relocation entry. ++ */ ++ uint32_t delta; ++ ++ /** Offset in the buffer the relocation entry will be written into */ ++ uint64_t offset; ++ ++ /** ++ * Offset value of the target buffer that the relocation entry was last ++ * written as. ++ * ++ * If the buffer has the same offset as last time, we can skip syncing ++ * and writing the relocation. This value is written back out by ++ * the execbuffer ioctl when the relocation is written. ++ */ ++ uint64_t presumed_offset; ++ ++ /** ++ * Target memory domains read by this operation. ++ */ ++ uint32_t read_domains; ++ ++ /** ++ * Target memory domains written by this operation. ++ * ++ * Note that only one domain may be written by the whole ++ * execbuffer operation, so that where there are conflicts, ++ * the application will get -EINVAL back. ++ */ ++ uint32_t write_domain; ++}; ++ ++/** @{ ++ * Intel memory domains ++ * ++ * Most of these just align with the various caches in ++ * the system and are used to flush and invalidate as ++ * objects end up cached in different domains. ++ */ ++/** CPU cache */ ++#define I915_GEM_DOMAIN_CPU 0x00000001 ++/** Render cache, used by 2D and 3D drawing */ ++#define I915_GEM_DOMAIN_RENDER 0x00000002 ++/** Sampler cache, used by texture engine */ ++#define I915_GEM_DOMAIN_SAMPLER 0x00000004 ++/** Command queue, used to load batch buffers */ ++#define I915_GEM_DOMAIN_COMMAND 0x00000008 ++/** Instruction cache, used by shader programs */ ++#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 ++/** Vertex address cache */ ++#define I915_GEM_DOMAIN_VERTEX 0x00000020 ++/** GTT domain - aperture and scanout */ ++#define I915_GEM_DOMAIN_GTT 0x00000040 ++/** @} */ ++ ++struct drm_i915_gem_exec_object { ++ /** ++ * User's handle for a buffer to be bound into the GTT for this ++ * operation. ++ */ ++ uint32_t handle; ++ ++ /** Number of relocations to be performed on this buffer */ ++ uint32_t relocation_count; ++ /** ++ * Pointer to array of struct drm_i915_gem_relocation_entry containing ++ * the relocations to be performed in this buffer. ++ */ ++ uint64_t relocs_ptr; ++ ++ /** Required alignment in graphics aperture */ ++ uint64_t alignment; ++ ++ /** ++ * Returned value of the updated offset of the object, for future ++ * presumed_offset writes. ++ */ ++ uint64_t offset; ++}; ++ ++struct drm_i915_gem_execbuffer { ++ /** ++ * List of buffers to be validated with their relocations to be ++ * performend on them. ++ * ++ * This is a pointer to an array of struct drm_i915_gem_validate_entry. ++ * ++ * These buffers must be listed in an order such that all relocations ++ * a buffer is performing refer to buffers that have already appeared ++ * in the validate list. ++ */ ++ uint64_t buffers_ptr; ++ uint32_t buffer_count; ++ ++ /** Offset in the batchbuffer to start execution from. */ ++ uint32_t batch_start_offset; ++ /** Bytes used in batchbuffer from batch_start_offset */ ++ uint32_t batch_len; ++ uint32_t DR1; ++ uint32_t DR4; ++ uint32_t num_cliprects; ++ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */ ++}; ++ ++struct drm_i915_gem_pin { ++ /** Handle of the buffer to be pinned. */ ++ uint32_t handle; ++ uint32_t pad; ++ ++ /** alignment required within the aperture */ ++ uint64_t alignment; ++ ++ /** Returned GTT offset of the buffer. */ ++ uint64_t offset; ++}; ++ ++struct drm_i915_gem_unpin { ++ /** Handle of the buffer to be unpinned. */ ++ uint32_t handle; ++ uint32_t pad; ++}; ++ ++struct drm_i915_gem_busy { ++ /** Handle of the buffer to check for busy */ ++ uint32_t handle; ++ ++ /** Return busy status (1 if busy, 0 if idle) */ ++ uint32_t busy; ++}; ++ ++#define I915_TILING_NONE 0 ++#define I915_TILING_X 1 ++#define I915_TILING_Y 2 ++ ++#define I915_BIT_6_SWIZZLE_NONE 0 ++#define I915_BIT_6_SWIZZLE_9 1 ++#define I915_BIT_6_SWIZZLE_9_10 2 ++#define I915_BIT_6_SWIZZLE_9_11 3 ++#define I915_BIT_6_SWIZZLE_9_10_11 4 ++/* Not seen by userland */ ++#define I915_BIT_6_SWIZZLE_UNKNOWN 5 ++ ++struct drm_i915_gem_set_tiling { ++ /** Handle of the buffer to have its tiling state updated */ ++ uint32_t handle; ++ ++ /** ++ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, ++ * I915_TILING_Y). ++ * ++ * This value is to be set on request, and will be updated by the ++ * kernel on successful return with the actual chosen tiling layout. ++ * ++ * The tiling mode may be demoted to I915_TILING_NONE when the system ++ * has bit 6 swizzling that can't be managed correctly by GEM. ++ * ++ * Buffer contents become undefined when changing tiling_mode. ++ */ ++ uint32_t tiling_mode; ++ ++ /** ++ * Stride in bytes for the object when in I915_TILING_X or ++ * I915_TILING_Y. ++ */ ++ uint32_t stride; ++ ++ /** ++ * Returned address bit 6 swizzling required for CPU access through ++ * mmap mapping. ++ */ ++ uint32_t swizzle_mode; ++}; ++ ++struct drm_i915_gem_get_tiling { ++ /** Handle of the buffer to get tiling state for. */ ++ uint32_t handle; ++ ++ /** ++ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, ++ * I915_TILING_Y). ++ */ ++ uint32_t tiling_mode; ++ ++ /** ++ * Returned address bit 6 swizzling required for CPU access through ++ * mmap mapping. ++ */ ++ uint32_t swizzle_mode; ++}; ++ ++#endif /* _I915_DRM_H_ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,222 @@ ++/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- ++ */ ++/* ++ * ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ i915_PCI_IDS ++}; ++ ++#ifdef I915_HAVE_FENCE ++extern struct drm_fence_driver i915_fence_driver; ++#endif ++ ++#ifdef I915_HAVE_BUFFER ++ ++static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; ++static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; ++ ++static struct drm_bo_driver i915_bo_driver = { ++ .mem_type_prio = i915_mem_prios, ++ .mem_busy_prio = i915_busy_prios, ++ .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), ++ .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t), ++ .create_ttm_backend_entry = i915_create_ttm_backend_entry, ++ .fence_type = i915_fence_type, ++ .invalidate_caches = i915_invalidate_caches, ++ .init_mem_type = i915_init_mem_type, ++ .evict_flags = i915_evict_flags, ++ .move = i915_move, ++ .ttm_cache_flush = i915_flush_ttm, ++ .command_stream_barrier = NULL, ++}; ++#endif ++ ++static int i915_suspend(struct drm_device *dev, pm_message_t state) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ if (!dev || !dev_priv) { ++ printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); ++ printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); ++ return -ENODEV; ++ } ++ ++ if (state.event == PM_EVENT_PRETHAW) ++ return 0; ++ ++ pci_save_state(dev->pdev); ++ ++ i915_save_state(dev); ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_free(dev); ++#endif ++ ++ if (state.event == PM_EVENT_SUSPEND) { ++ /* Shut down the device */ ++ pci_disable_device(dev->pdev); ++ pci_set_power_state(dev->pdev, PCI_D3hot); ++ } ++ ++ return 0; ++} ++ ++static int i915_resume(struct drm_device *dev) ++{ ++ pci_set_power_state(dev->pdev, PCI_D0); ++ pci_restore_state(dev->pdev); ++ if (pci_enable_device(dev->pdev)) ++ return -1; ++ pci_set_master(dev->pdev); ++ ++ i915_restore_state(dev); ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_init(dev); ++#endif ++ ++ return 0; ++} ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static void remove(struct pci_dev *pdev); ++ ++static struct drm_driver driver = { ++ /* don't use mtrr's here, the Xserver or user space app should ++ * deal with them for intel hardware. ++ */ ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ ++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, ++ .load = i915_driver_load, ++ .unload = i915_driver_unload, ++ .firstopen = i915_driver_firstopen, ++ .open = i915_driver_open, ++ .lastclose = i915_driver_lastclose, ++ .preclose = i915_driver_preclose, ++ .postclose = i915_driver_postclose, ++ .suspend = i915_suspend, ++ .resume = i915_resume, ++ .device_is_agp = i915_driver_device_is_agp, ++ .get_vblank_counter = i915_get_vblank_counter, ++ .enable_vblank = i915_enable_vblank, ++ .disable_vblank = i915_disable_vblank, ++ .irq_preinstall = i915_driver_irq_preinstall, ++ .irq_postinstall = i915_driver_irq_postinstall, ++ .irq_uninstall = i915_driver_irq_uninstall, ++ .irq_handler = i915_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .proc_init = i915_gem_proc_init, ++ .proc_cleanup = i915_gem_proc_cleanup, ++ .ioctls = i915_ioctls, ++ .gem_init_object = i915_gem_init_object, ++ .gem_free_object = i915_gem_free_object, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = i915_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = remove, ++ }, ++#ifdef I915_HAVE_FENCE ++ .fence_driver = &i915_fence_driver, ++#endif ++#ifdef I915_HAVE_BUFFER ++ .bo_driver = &i915_bo_driver, ++#endif ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ int ret; ++ ++ /* On the 945G/GM, the chipset reports the MSI capability on the ++ * integrated graphics even though the support isn't actually there ++ * according to the published specs. It doesn't appear to function ++ * correctly in testing on 945G. ++ * This may be a side effect of MSI having been made available for PEG ++ * and the registers being closely associated. ++ */ ++ if (pdev->device != 0x2772 && pdev->device != 0x27A2) ++ (void )pci_enable_msi(pdev); ++ ++ ret = drm_get_dev(pdev, ent, &driver); ++ if (ret && pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ return ret; ++} ++static void remove(struct pci_dev *pdev) ++{ ++ if (pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ drm_cleanup_pci(pdev); ++} ++ ++static int __init i915_init(void) ++{ ++ driver.num_ioctls = i915_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit i915_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(i915_init); ++module_exit(i915_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2123 @@ ++/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- ++ */ ++/* ++ * ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef _I915_DRV_H_ ++#define _I915_DRV_H_ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Tungsten Graphics, Inc." ++ ++#define DRIVER_NAME "i915" ++#define DRIVER_DESC "Intel Graphics" ++#define DRIVER_DATE "20080730" ++ ++#if defined(__linux__) ++#define I915_HAVE_FENCE ++#define I915_HAVE_BUFFER ++#define I915_HAVE_GEM ++#endif ++ ++/* Interface history: ++ * ++ * 1.1: Original. ++ * 1.2: Add Power Management ++ * 1.3: Add vblank support ++ * 1.4: Fix cmdbuffer path, add heap destroy ++ * 1.5: Add vblank pipe configuration ++ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank ++ * - Support vertical blank on secondary display pipe ++ * 1.8: New ioctl for ARB_Occlusion_Query ++ * 1.9: Usable page flipping and triple buffering ++ * 1.10: Plane/pipe disentangling ++ * 1.11: TTM superioctl ++ * 1.12: TTM relocation optimization ++ */ ++#define DRIVER_MAJOR 1 ++#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) ++#define DRIVER_MINOR 13 ++#else ++#define DRIVER_MINOR 6 ++#endif ++#define DRIVER_PATCHLEVEL 0 ++ ++enum pipe { ++ PIPE_A = 0, ++ PIPE_B, ++}; ++ ++#ifdef I915_HAVE_BUFFER ++#define I915_MAX_VALIDATE_BUFFERS 4096 ++struct drm_i915_validate_buffer; ++#endif ++ ++#define WATCH_COHERENCY 0 ++#define WATCH_BUF 0 ++#define WATCH_EXEC 0 ++#define WATCH_LRU 0 ++#define WATCH_RELOC 0 ++#define WATCH_INACTIVE 0 ++#define WATCH_PWRITE 0 ++ ++typedef struct _drm_i915_ring_buffer { ++ int tail_mask; ++ unsigned long Size; ++ u8 *virtual_start; ++ int head; ++ int tail; ++ int space; ++ drm_local_map_t map; ++ struct drm_gem_object *ring_obj; ++} drm_i915_ring_buffer_t; ++ ++struct mem_block { ++ struct mem_block *next; ++ struct mem_block *prev; ++ int start; ++ int size; ++ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ ++}; ++ ++typedef struct _drm_i915_vbl_swap { ++ struct list_head head; ++ drm_drawable_t drw_id; ++ unsigned int plane; ++ unsigned int sequence; ++ int flip; ++} drm_i915_vbl_swap_t; ++ ++#ifdef __linux__ ++struct opregion_header; ++struct opregion_acpi; ++struct opregion_swsci; ++struct opregion_asle; ++ ++struct intel_opregion { ++ struct opregion_header *header; ++ struct opregion_acpi *acpi; ++ struct opregion_swsci *swsci; ++ struct opregion_asle *asle; ++ ++ int enabled; ++}; ++#endif ++ ++typedef struct drm_i915_private { ++ struct drm_device *dev; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio_map; ++ ++ drm_i915_sarea_t *sarea_priv; ++ drm_i915_ring_buffer_t ring; ++ ++ drm_dma_handle_t *status_page_dmah; ++ void *hw_status_page; ++ dma_addr_t dma_status_page; ++ uint32_t counter; ++ unsigned int status_gfx_addr; ++ drm_local_map_t hws_map; ++ struct drm_gem_object *hws_obj; ++ ++ unsigned int cpp; ++ ++ wait_queue_head_t irq_queue; ++ atomic_t irq_received; ++ ++ int tex_lru_log_granularity; ++ int allow_batchbuffer; ++ struct mem_block *agp_heap; ++ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; ++ int vblank_pipe; ++ DRM_SPINTYPE user_irq_lock; ++ int user_irq_refcount; ++ int fence_irq_on; ++ uint32_t irq_mask_reg; ++ int irq_enabled; ++ ++#ifdef I915_HAVE_FENCE ++ uint32_t flush_sequence; ++ uint32_t flush_flags; ++ uint32_t flush_pending; ++ uint32_t saved_flush_status; ++#endif ++#ifdef I915_HAVE_BUFFER ++ void *agp_iomap; ++ unsigned int max_validate_buffers; ++ struct mutex cmdbuf_mutex; ++ struct drm_i915_validate_buffer *val_bufs; ++#endif ++ ++ DRM_SPINTYPE swaps_lock; ++ drm_i915_vbl_swap_t vbl_swaps; ++ unsigned int swaps_pending; ++#if defined(I915_HAVE_BUFFER) ++ /* DRI2 sarea */ ++ struct drm_buffer_object *sarea_bo; ++ struct drm_bo_kmap_obj sarea_kmap; ++#endif ++ ++#ifdef __linux__ ++ struct intel_opregion opregion; ++#endif ++ ++ /* Register state */ ++ u8 saveLBB; ++ u32 saveDSPACNTR; ++ u32 saveDSPBCNTR; ++ u32 saveDSPARB; ++ u32 savePIPEACONF; ++ u32 savePIPEBCONF; ++ u32 savePIPEASRC; ++ u32 savePIPEBSRC; ++ u32 saveFPA0; ++ u32 saveFPA1; ++ u32 saveDPLL_A; ++ u32 saveDPLL_A_MD; ++ u32 saveHTOTAL_A; ++ u32 saveHBLANK_A; ++ u32 saveHSYNC_A; ++ u32 saveVTOTAL_A; ++ u32 saveVBLANK_A; ++ u32 saveVSYNC_A; ++ u32 saveBCLRPAT_A; ++ u32 savePIPEASTAT; ++ u32 saveDSPASTRIDE; ++ u32 saveDSPASIZE; ++ u32 saveDSPAPOS; ++ u32 saveDSPAADDR; ++ u32 saveDSPASURF; ++ u32 saveDSPATILEOFF; ++ u32 savePFIT_PGM_RATIOS; ++ u32 saveBLC_PWM_CTL; ++ u32 saveBLC_PWM_CTL2; ++ u32 saveFPB0; ++ u32 saveFPB1; ++ u32 saveDPLL_B; ++ u32 saveDPLL_B_MD; ++ u32 saveHTOTAL_B; ++ u32 saveHBLANK_B; ++ u32 saveHSYNC_B; ++ u32 saveVTOTAL_B; ++ u32 saveVBLANK_B; ++ u32 saveVSYNC_B; ++ u32 saveBCLRPAT_B; ++ u32 savePIPEBSTAT; ++ u32 saveDSPBSTRIDE; ++ u32 saveDSPBSIZE; ++ u32 saveDSPBPOS; ++ u32 saveDSPBADDR; ++ u32 saveDSPBSURF; ++ u32 saveDSPBTILEOFF; ++ u32 saveVGA0; ++ u32 saveVGA1; ++ u32 saveVGA_PD; ++ u32 saveVGACNTRL; ++ u32 saveADPA; ++ u32 saveLVDS; ++ u32 savePP_ON_DELAYS; ++ u32 savePP_OFF_DELAYS; ++ u32 saveDVOA; ++ u32 saveDVOB; ++ u32 saveDVOC; ++ u32 savePP_ON; ++ u32 savePP_OFF; ++ u32 savePP_CONTROL; ++ u32 savePP_DIVISOR; ++ u32 savePFIT_CONTROL; ++ u32 save_palette_a[256]; ++ u32 save_palette_b[256]; ++ u32 saveFBC_CFB_BASE; ++ u32 saveFBC_LL_BASE; ++ u32 saveFBC_CONTROL; ++ u32 saveFBC_CONTROL2; ++ u32 saveIER; ++ u32 saveIIR; ++ u32 saveIMR; ++ u32 saveCACHE_MODE_0; ++ u32 saveD_STATE; ++ u32 saveCG_2D_DIS; ++ u32 saveMI_ARB_STATE; ++ u32 saveSWF0[16]; ++ u32 saveSWF1[16]; ++ u32 saveSWF2[3]; ++ u8 saveMSR; ++ u8 saveSR[8]; ++ u8 saveGR[25]; ++ u8 saveAR_INDEX; ++ u8 saveAR[21]; ++ u8 saveDACMASK; ++ u8 saveDACDATA[256*3]; /* 256 3-byte colors */ ++ u8 saveCR[37]; ++ ++ struct { ++#ifdef __linux__ ++ struct drm_mm gtt_space; ++#endif ++ /** ++ * List of objects currently involved in rendering from the ++ * ringbuffer. ++ * ++ * A reference is held on the buffer while on this list. ++ */ ++ struct list_head active_list; ++ ++ /** ++ * List of objects which are not in the ringbuffer but which ++ * still have a write_domain which needs to be flushed before ++ * unbinding. ++ * ++ * A reference is held on the buffer while on this list. ++ */ ++ struct list_head flushing_list; ++ ++ /** ++ * LRU list of objects which are not in the ringbuffer and ++ * are ready to unbind, but are still in the GTT. ++ * ++ * A reference is not held on the buffer while on this list, ++ * as merely being GTT-bound shouldn't prevent its being ++ * freed, and we'll pull it off the list in the free path. ++ */ ++ struct list_head inactive_list; ++ ++ /** ++ * List of breadcrumbs associated with GPU requests currently ++ * outstanding. ++ */ ++ struct list_head request_list; ++#ifdef __linux__ ++ /** ++ * We leave the user IRQ off as much as possible, ++ * but this means that requests will finish and never ++ * be retired once the system goes idle. Set a timer to ++ * fire periodically while the ring is running. When it ++ * fires, go retire requests. ++ */ ++ struct delayed_work retire_work; ++#endif ++ uint32_t next_gem_seqno; ++ ++ /** ++ * Waiting sequence number, if any ++ */ ++ uint32_t waiting_gem_seqno; ++ ++ /** ++ * Last seq seen at irq time ++ */ ++ uint32_t irq_gem_seqno; ++ ++ /** ++ * Flag if the X Server, and thus DRM, is not currently in ++ * control of the device. ++ * ++ * This is set between LeaveVT and EnterVT. It needs to be ++ * replaced with a semaphore. It also needs to be ++ * transitioned away from for kernel modesetting. ++ */ ++ int suspended; ++ ++ /** ++ * Flag if the hardware appears to be wedged. ++ * ++ * This is set when attempts to idle the device timeout. ++ * It prevents command submission from occuring and makes ++ * every pending request fail ++ */ ++ int wedged; ++ ++ /** Bit 6 swizzling required for X tiling */ ++ uint32_t bit_6_swizzle_x; ++ /** Bit 6 swizzling required for Y tiling */ ++ uint32_t bit_6_swizzle_y; ++ } mm; ++} drm_i915_private_t; ++ ++struct drm_i915_file_private { ++ struct { ++ uint32_t last_gem_seqno; ++ uint32_t last_gem_throttle_seqno; ++ } mm; ++}; ++ ++enum intel_chip_family { ++ CHIP_I8XX = 0x01, ++ CHIP_I9XX = 0x02, ++ CHIP_I915 = 0x04, ++ CHIP_I965 = 0x08, ++}; ++ ++/** driver private structure attached to each drm_gem_object */ ++struct drm_i915_gem_object { ++ struct drm_gem_object *obj; ++ ++ /** Current space allocated to this object in the GTT, if any. */ ++ struct drm_mm_node *gtt_space; ++ ++ /** This object's place on the active/flushing/inactive lists */ ++ struct list_head list; ++ ++ /** ++ * This is set if the object is on the active or flushing lists ++ * (has pending rendering), and is not set if it's on inactive (ready ++ * to be unbound). ++ */ ++ int active; ++ ++ /** ++ * This is set if the object has been written to since last bound ++ * to the GTT ++ */ ++ int dirty; ++ ++ /** AGP memory structure for our GTT binding. */ ++ DRM_AGP_MEM *agp_mem; ++ ++ struct page **page_list; ++ ++ /** ++ * Current offset of the object in GTT space. ++ * ++ * This is the same as gtt_space->start ++ */ ++ uint32_t gtt_offset; ++ ++ /** Boolean whether this object has a valid gtt offset. */ ++ int gtt_bound; ++ ++ /** How many users have pinned this object in GTT space */ ++ int pin_count; ++ ++ /** Breadcrumb of last rendering to the buffer. */ ++ uint32_t last_rendering_seqno; ++ ++ /** Current tiling mode for the object. */ ++ uint32_t tiling_mode; ++ ++ /** ++ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when ++ * GEM_DOMAIN_CPU is not in the object's read domain. ++ */ ++ uint8_t *page_cpu_valid; ++}; ++ ++/** ++ * Request queue structure. ++ * ++ * The request queue allows us to note sequence numbers that have been emitted ++ * and may be associated with active buffers to be retired. ++ * ++ * By keeping this list, we can avoid having to do questionable ++ * sequence-number comparisons on buffer last_rendering_seqnos, and associate ++ * an emission time with seqnos for tracking how far ahead of the GPU we are. ++ */ ++struct drm_i915_gem_request { ++ /** GEM sequence number associated with this request. */ ++ uint32_t seqno; ++ ++ /** Time at which this request was emitted, in jiffies. */ ++ unsigned long emitted_jiffies; ++ ++ /** Cache domains that were flushed at the start of the request. */ ++ uint32_t flush_domains; ++ ++ struct list_head list; ++}; ++ ++extern struct drm_ioctl_desc i915_ioctls[]; ++extern int i915_max_ioctl; ++ ++ /* i915_dma.c */ ++extern void i915_kernel_lost_context(struct drm_device * dev); ++extern int i915_driver_load(struct drm_device *, unsigned long flags); ++extern int i915_driver_unload(struct drm_device *); ++extern void i915_driver_lastclose(struct drm_device * dev); ++extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); ++extern void i915_driver_preclose(struct drm_device *dev, ++ struct drm_file *file_priv); ++extern void i915_driver_postclose(struct drm_device *dev, ++ struct drm_file *file_priv); ++extern int i915_driver_device_is_agp(struct drm_device * dev); ++extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++extern void i915_emit_breadcrumb(struct drm_device *dev); ++extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync); ++extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush); ++extern int i915_driver_firstopen(struct drm_device *dev); ++extern int i915_dispatch_batchbuffer(struct drm_device * dev, ++ drm_i915_batchbuffer_t * batch); ++extern int i915_quiescent(struct drm_device *dev); ++extern int i915_init_hardware_status(struct drm_device *dev); ++extern void i915_free_hardware_status(struct drm_device *dev); ++ ++int i915_emit_box(struct drm_device * dev, ++ struct drm_clip_rect __user * boxes, ++ int i, int DR1, int DR4); ++ ++/* i915_irq.c */ ++extern int i915_irq_emit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_irq_wait(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); ++extern void i915_driver_irq_preinstall(struct drm_device * dev); ++extern int i915_driver_irq_postinstall(struct drm_device * dev); ++extern void i915_driver_irq_uninstall(struct drm_device * dev); ++extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_emit_irq(struct drm_device * dev); ++extern int i915_wait_irq(struct drm_device * dev, int irq_nr); ++extern int i915_enable_vblank(struct drm_device *dev, int crtc); ++extern void i915_disable_vblank(struct drm_device *dev, int crtc); ++extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int i915_vblank_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern void i915_user_irq_on(drm_i915_private_t *dev_priv); ++extern void i915_user_irq_off(drm_i915_private_t *dev_priv); ++ ++/* i915_mem.c */ ++extern int i915_mem_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_mem_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_mem_init_heap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern void i915_mem_takedown(struct mem_block **heap); ++extern void i915_mem_release(struct drm_device * dev, ++ struct drm_file *file_priv, ++ struct mem_block *heap); ++ ++/* i915_suspend.c */ ++extern int i915_save_state(struct drm_device *dev); ++extern int i915_restore_state(struct drm_device *dev); ++ ++#ifdef I915_HAVE_FENCE ++/* i915_fence.c */ ++extern void i915_fence_handler(struct drm_device *dev); ++extern void i915_invalidate_reported_sequence(struct drm_device *dev); ++ ++#endif ++ ++#ifdef I915_HAVE_BUFFER ++/* i915_buffer.c */ ++extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); ++extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass, ++ uint32_t *type); ++extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); ++extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man); ++extern uint64_t i915_evict_flags(struct drm_buffer_object *bo); ++extern int i915_move(struct drm_buffer_object *bo, int evict, ++ int no_wait, struct drm_bo_mem_reg *new_mem); ++void i915_flush_ttm(struct drm_ttm *ttm); ++/* i915_execbuf.c */ ++int i915_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++/* i915_gem.c */ ++int i915_gem_init_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_pread_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_pin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_busy_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_set_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_get_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++void i915_gem_load(struct drm_device *dev); ++int i915_gem_proc_init(struct drm_minor *minor); ++void i915_gem_proc_cleanup(struct drm_minor *minor); ++int i915_gem_init_object(struct drm_gem_object *obj); ++void i915_gem_free_object(struct drm_gem_object *obj); ++int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); ++void i915_gem_object_unpin(struct drm_gem_object *obj); ++void i915_gem_lastclose(struct drm_device *dev); ++uint32_t i915_get_gem_seqno(struct drm_device *dev); ++void i915_gem_retire_requests(struct drm_device *dev); ++void i915_gem_retire_work_handler(struct work_struct *work); ++void i915_gem_clflush_object(struct drm_gem_object *obj); ++#endif ++ ++/* i915_gem_tiling.c */ ++void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); ++ ++/* i915_gem_debug.c */ ++#if WATCH_INACTIVE ++void i915_verify_inactive(struct drm_device *dev, char *file, int line); ++#else ++#define i915_verify_inactive(dev,file,line) ++#endif ++void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); ++void i915_gem_dump_object(struct drm_gem_object *obj, int len, ++ const char *where, uint32_t mark); ++void i915_dump_lru(struct drm_device *dev, const char *where); ++ ++#ifdef __linux__ ++/* i915_opregion.c */ ++extern int intel_opregion_init(struct drm_device *dev); ++extern void intel_opregion_free(struct drm_device *dev); ++extern void opregion_asle_intr(struct drm_device *dev); ++extern void opregion_enable_asle(struct drm_device *dev); ++#endif ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++extern void intel_init_chipset_flush_compat(struct drm_device *dev); ++extern void intel_fini_chipset_flush_compat(struct drm_device *dev); ++#endif ++#endif ++ ++#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) ++#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) ++#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) ++#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) ++#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg)) ++#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val)) ++ ++#if defined(__FreeBSD__) ++typedef boolean_t bool; ++#endif ++ ++#define I915_VERBOSE 0 ++#define I915_RING_VALIDATE 0 ++ ++#define PRIMARY_RINGBUFFER_SIZE (128*1024) ++ ++#define RING_LOCALS unsigned int outring, ringmask, outcount; \ ++ volatile char *virt; ++ ++#if I915_RING_VALIDATE ++void i915_ring_validate(struct drm_device *dev, const char *func, int line); ++#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__) ++#else ++#define I915_RING_DO_VALIDATE(dev) ++#endif ++ ++#define BEGIN_LP_RING(n) do { \ ++ if (I915_VERBOSE) \ ++ DRM_DEBUG("BEGIN_LP_RING(%d)\n", \ ++ (n)); \ ++ I915_RING_DO_VALIDATE(dev); \ ++ if (dev_priv->ring.space < (n)*4) \ ++ i915_wait_ring(dev, (n)*4, __FUNCTION__); \ ++ outcount = 0; \ ++ outring = dev_priv->ring.tail; \ ++ ringmask = dev_priv->ring.tail_mask; \ ++ virt = dev_priv->ring.virtual_start; \ ++} while (0) ++ ++#define OUT_RING(n) do { \ ++ if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ ++ *(volatile unsigned int *)(virt + outring) = (n); \ ++ outcount++; \ ++ outring += 4; \ ++ outring &= ringmask; \ ++} while (0) ++ ++#define ADVANCE_LP_RING() do { \ ++ if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ ++ I915_RING_DO_VALIDATE(dev); \ ++ dev_priv->ring.tail = outring; \ ++ dev_priv->ring.space -= outcount * 4; \ ++ I915_WRITE(PRB0_TAIL, outring); \ ++} while(0) ++ ++extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); ++ ++#define BREADCRUMB_BITS 31 ++#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) ++ ++#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) ++/** ++ * Reads a dword out of the status page, which is written to from the command ++ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or ++ * MI_STORE_DATA_IMM. ++ * ++ * The following dwords have a reserved meaning: ++ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes. ++ * 4: ring 0 head pointer ++ * 5: ring 1 head pointer (915-class) ++ * 6: ring 2 head pointer (915-class) ++ * ++ * The area from dword 0x10 to 0x3ff is available for driver usage. ++ */ ++#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) ++#define I915_GEM_HWS_INDEX 0x10 ++ ++/* MCH MMIO space */ ++/** 915-945 and GM965 MCH register controlling DRAM channel access */ ++#define DCC 0x200 ++#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) ++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) ++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) ++#define DCC_ADDRESSING_MODE_MASK (3 << 0) ++#define DCC_CHANNEL_XOR_DISABLE (1 << 10) ++ ++/** 965 MCH register controlling DRAM channel configuration */ ++#define CHDECMISC 0x111 ++#define CHDECMISC_FLEXMEMORY (1 << 1) ++ ++/* ++ * The Bridge device's PCI config space has information about the ++ * fb aperture size and the amount of pre-reserved memory. ++ */ ++#define INTEL_GMCH_CTRL 0x52 ++#define INTEL_GMCH_ENABLED 0x4 ++#define INTEL_GMCH_MEM_MASK 0x1 ++#define INTEL_GMCH_MEM_64M 0x1 ++#define INTEL_GMCH_MEM_128M 0 ++ ++#define INTEL_855_GMCH_GMS_MASK (0x7 << 4) ++#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) ++ ++#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) ++#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) ++ ++/* PCI config space */ ++ ++#define HPLLCC 0xc0 /* 855 only */ ++#define GC_CLOCK_CONTROL_MASK (3 << 0) ++#define GC_CLOCK_133_200 (0 << 0) ++#define GC_CLOCK_100_200 (1 << 0) ++#define GC_CLOCK_100_133 (2 << 0) ++#define GC_CLOCK_166_250 (3 << 0) ++#define GCFGC 0xf0 /* 915+ only */ ++#define GC_LOW_FREQUENCY_ENABLE (1 << 7) ++#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) ++#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) ++#define GC_DISPLAY_CLOCK_MASK (7 << 4) ++#define LBB 0xf4 ++ ++/* VGA stuff */ ++ ++#define VGA_ST01_MDA 0x3ba ++#define VGA_ST01_CGA 0x3da ++ ++#define VGA_MSR_WRITE 0x3c2 ++#define VGA_MSR_READ 0x3cc ++#define VGA_MSR_MEM_EN (1<<1) ++#define VGA_MSR_CGA_MODE (1<<0) ++ ++#define VGA_SR_INDEX 0x3c4 ++#define VGA_SR_DATA 0x3c5 ++ ++#define VGA_AR_INDEX 0x3c0 ++#define VGA_AR_VID_EN (1<<5) ++#define VGA_AR_DATA_WRITE 0x3c0 ++#define VGA_AR_DATA_READ 0x3c1 ++ ++#define VGA_GR_INDEX 0x3ce ++#define VGA_GR_DATA 0x3cf ++/* GR05 */ ++#define VGA_GR_MEM_READ_MODE_SHIFT 3 ++#define VGA_GR_MEM_READ_MODE_PLANE 1 ++/* GR06 */ ++#define VGA_GR_MEM_MODE_MASK 0xc ++#define VGA_GR_MEM_MODE_SHIFT 2 ++#define VGA_GR_MEM_A0000_AFFFF 0 ++#define VGA_GR_MEM_A0000_BFFFF 1 ++#define VGA_GR_MEM_B0000_B7FFF 2 ++#define VGA_GR_MEM_B0000_BFFFF 3 ++ ++#define VGA_DACMASK 0x3c6 ++#define VGA_DACRX 0x3c7 ++#define VGA_DACWX 0x3c8 ++#define VGA_DACDATA 0x3c9 ++ ++#define VGA_CR_INDEX_MDA 0x3b4 ++#define VGA_CR_DATA_MDA 0x3b5 ++#define VGA_CR_INDEX_CGA 0x3d4 ++#define VGA_CR_DATA_CGA 0x3d5 ++ ++/* ++ * Memory interface instructions used by the kernel ++ */ ++#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) ++ ++#define MI_NOOP MI_INSTR(0, 0) ++#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) ++#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) ++#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) ++#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) ++#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) ++#define MI_FLUSH MI_INSTR(0x04, 0) ++#define MI_READ_FLUSH (1 << 0) ++#define MI_EXE_FLUSH (1 << 1) ++#define MI_NO_WRITE_FLUSH (1 << 2) ++#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ ++#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ ++#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) ++#define MI_REPORT_HEAD MI_INSTR(0x07, 0) ++#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) ++#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) ++#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ ++#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) ++#define MI_STORE_DWORD_INDEX_SHIFT 2 ++#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) ++#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) ++#define MI_BATCH_NON_SECURE (1) ++#define MI_BATCH_NON_SECURE_I965 (1<<8) ++#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) ++ ++/* ++ * 3D instructions used by the kernel ++ */ ++#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) ++ ++#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) ++#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) ++#define SC_UPDATE_SCISSOR (0x1<<1) ++#define SC_ENABLE_MASK (0x1<<0) ++#define SC_ENABLE (0x1<<0) ++#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) ++#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) ++#define SCI_YMIN_MASK (0xffff<<16) ++#define SCI_XMIN_MASK (0xffff<<0) ++#define SCI_YMAX_MASK (0xffff<<16) ++#define SCI_XMAX_MASK (0xffff<<0) ++#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) ++#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) ++#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) ++#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) ++#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) ++#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) ++#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) ++#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) ++#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) ++#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) ++#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) ++#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) ++#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) ++#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) ++#define BLT_DEPTH_8 (0<<24) ++#define BLT_DEPTH_16_565 (1<<24) ++#define BLT_DEPTH_16_1555 (2<<24) ++#define BLT_DEPTH_32 (3<<24) ++#define BLT_ROP_GXCOPY (0xcc<<16) ++#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ ++#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ ++#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) ++#define ASYNC_FLIP (1<<22) ++#define DISPLAY_PLANE_A (0<<20) ++#define DISPLAY_PLANE_B (1<<20) ++ ++/* ++ * Instruction and interrupt control regs ++ */ ++ ++#define PRB0_TAIL 0x02030 ++#define PRB0_HEAD 0x02034 ++#define PRB0_START 0x02038 ++#define PRB0_CTL 0x0203c ++#define TAIL_ADDR 0x001FFFF8 ++#define HEAD_WRAP_COUNT 0xFFE00000 ++#define HEAD_WRAP_ONE 0x00200000 ++#define HEAD_ADDR 0x001FFFFC ++#define RING_NR_PAGES 0x001FF000 ++#define RING_REPORT_MASK 0x00000006 ++#define RING_REPORT_64K 0x00000002 ++#define RING_REPORT_128K 0x00000004 ++#define RING_NO_REPORT 0x00000000 ++#define RING_VALID_MASK 0x00000001 ++#define RING_VALID 0x00000001 ++#define RING_INVALID 0x00000000 ++#define PRB1_TAIL 0x02040 /* 915+ only */ ++#define PRB1_HEAD 0x02044 /* 915+ only */ ++#define PRB1_START 0x02048 /* 915+ only */ ++#define PRB1_CTL 0x0204c /* 915+ only */ ++#define ACTHD_I965 0x02074 ++#define HWS_PGA 0x02080 ++#define HWS_ADDRESS_MASK 0xfffff000 ++#define HWS_START_ADDRESS_SHIFT 4 ++#define IPEIR 0x02088 ++#define NOPID 0x02094 ++#define HWSTAM 0x02098 ++#define SCPD0 0x0209c /* 915+ only */ ++#define IER 0x020a0 ++#define IIR 0x020a4 ++#define IMR 0x020a8 ++#define ISR 0x020ac ++#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) ++#define I915_DISPLAY_PORT_INTERRUPT (1<<17) ++#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) ++#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) ++#define I915_HWB_OOM_INTERRUPT (1<<13) ++#define I915_SYNC_STATUS_INTERRUPT (1<<12) ++#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) ++#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) ++#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) ++#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) ++#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) ++#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) ++#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) ++#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) ++#define I915_DEBUG_INTERRUPT (1<<2) ++#define I915_USER_INTERRUPT (1<<1) ++#define I915_ASLE_INTERRUPT (1<<0) ++#define EIR 0x020b0 ++#define EMR 0x020b4 ++#define ESR 0x020b8 ++#define INSTPM 0x020c0 ++#define ACTHD 0x020c8 ++#define FW_BLC 0x020d8 ++#define FW_BLC_SELF 0x020e0 /* 915+ only */ ++#define MI_ARB_STATE 0x020e4 /* 915+ only */ ++#define CACHE_MODE_0 0x02120 /* 915+ only */ ++#define CM0_MASK_SHIFT 16 ++#define CM0_IZ_OPT_DISABLE (1<<6) ++#define CM0_ZR_OPT_DISABLE (1<<5) ++#define CM0_DEPTH_EVICT_DISABLE (1<<4) ++#define CM0_COLOR_EVICT_DISABLE (1<<3) ++#define CM0_DEPTH_WRITE_DISABLE (1<<1) ++#define CM0_RC_OP_FLUSH_DISABLE (1<<0) ++#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ ++ ++/* ++ * Framebuffer compression (915+ only) ++ */ ++ ++#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ ++#define FBC_LL_BASE 0x03204 /* 4k page aligned */ ++#define FBC_CONTROL 0x03208 ++#define FBC_CTL_EN (1<<31) ++#define FBC_CTL_PERIODIC (1<<30) ++#define FBC_CTL_INTERVAL_SHIFT (16) ++#define FBC_CTL_UNCOMPRESSIBLE (1<<14) ++#define FBC_CTL_STRIDE_SHIFT (5) ++#define FBC_CTL_FENCENO (1<<0) ++#define FBC_COMMAND 0x0320c ++#define FBC_CMD_COMPRESS (1<<0) ++#define FBC_STATUS 0x03210 ++#define FBC_STAT_COMPRESSING (1<<31) ++#define FBC_STAT_COMPRESSED (1<<30) ++#define FBC_STAT_MODIFIED (1<<29) ++#define FBC_STAT_CURRENT_LINE (1<<0) ++#define FBC_CONTROL2 0x03214 ++#define FBC_CTL_FENCE_DBL (0<<4) ++#define FBC_CTL_IDLE_IMM (0<<2) ++#define FBC_CTL_IDLE_FULL (1<<2) ++#define FBC_CTL_IDLE_LINE (2<<2) ++#define FBC_CTL_IDLE_DEBUG (3<<2) ++#define FBC_CTL_CPU_FENCE (1<<1) ++#define FBC_CTL_PLANEA (0<<0) ++#define FBC_CTL_PLANEB (1<<0) ++#define FBC_FENCE_OFF 0x0321b ++ ++#define FBC_LL_SIZE (1536) ++ ++/* ++ * GPIO regs ++ */ ++#define GPIOA 0x5010 ++#define GPIOB 0x5014 ++#define GPIOC 0x5018 ++#define GPIOD 0x501c ++#define GPIOE 0x5020 ++#define GPIOF 0x5024 ++#define GPIOG 0x5028 ++#define GPIOH 0x502c ++# define GPIO_CLOCK_DIR_MASK (1 << 0) ++# define GPIO_CLOCK_DIR_IN (0 << 1) ++# define GPIO_CLOCK_DIR_OUT (1 << 1) ++# define GPIO_CLOCK_VAL_MASK (1 << 2) ++# define GPIO_CLOCK_VAL_OUT (1 << 3) ++# define GPIO_CLOCK_VAL_IN (1 << 4) ++# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) ++# define GPIO_DATA_DIR_MASK (1 << 8) ++# define GPIO_DATA_DIR_IN (0 << 9) ++# define GPIO_DATA_DIR_OUT (1 << 9) ++# define GPIO_DATA_VAL_MASK (1 << 10) ++# define GPIO_DATA_VAL_OUT (1 << 11) ++# define GPIO_DATA_VAL_IN (1 << 12) ++# define GPIO_DATA_PULLUP_DISABLE (1 << 13) ++ ++/* ++ * Clock control & power management ++ */ ++ ++#define VGA0 0x6000 ++#define VGA1 0x6004 ++#define VGA_PD 0x6010 ++#define VGA0_PD_P2_DIV_4 (1 << 7) ++#define VGA0_PD_P1_DIV_2 (1 << 5) ++#define VGA0_PD_P1_SHIFT 0 ++#define VGA0_PD_P1_MASK (0x1f << 0) ++#define VGA1_PD_P2_DIV_4 (1 << 15) ++#define VGA1_PD_P1_DIV_2 (1 << 13) ++#define VGA1_PD_P1_SHIFT 8 ++#define VGA1_PD_P1_MASK (0x1f << 8) ++#define DPLL_A 0x06014 ++#define DPLL_B 0x06018 ++#define DPLL_VCO_ENABLE (1 << 31) ++#define DPLL_DVO_HIGH_SPEED (1 << 30) ++#define DPLL_SYNCLOCK_ENABLE (1 << 29) ++#define DPLL_VGA_MODE_DIS (1 << 28) ++#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ ++#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ ++#define DPLL_MODE_MASK (3 << 26) ++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ ++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ ++#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ ++#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ ++#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ ++#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ ++ ++#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) ++#define I915_CRC_ERROR_ENABLE (1UL<<29) ++#define I915_CRC_DONE_ENABLE (1UL<<28) ++#define I915_GMBUS_EVENT_ENABLE (1UL<<27) ++#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) ++#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) ++#define I915_DPST_EVENT_ENABLE (1UL<<23) ++#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) ++#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) ++#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) ++#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ ++#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) ++#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) ++#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) ++#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) ++#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) ++#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) ++#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) ++#define I915_DPST_EVENT_STATUS (1UL<<7) ++#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) ++#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) ++#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) ++#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ ++#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) ++#define I915_OVERLAY_UPDATED_STATUS (1UL<<0) ++ ++#define SRX_INDEX 0x3c4 ++#define SRX_DATA 0x3c5 ++#define SR01 1 ++#define SR01_SCREEN_OFF (1<<5) ++ ++#define PPCR 0x61204 ++#define PPCR_ON (1<<0) ++ ++#define DVOB 0x61140 ++#define DVOB_ON (1<<31) ++#define DVOC 0x61160 ++#define DVOC_ON (1<<31) ++#define LVDS 0x61180 ++#define LVDS_ON (1<<31) ++ ++#define ADPA 0x61100 ++#define ADPA_DPMS_MASK (~(3<<10)) ++#define ADPA_DPMS_ON (0<<10) ++#define ADPA_DPMS_SUSPEND (1<<10) ++#define ADPA_DPMS_STANDBY (2<<10) ++#define ADPA_DPMS_OFF (3<<10) ++ ++#define RING_TAIL 0x00 ++#define TAIL_ADDR 0x001FFFF8 ++#define RING_HEAD 0x04 ++#define HEAD_WRAP_COUNT 0xFFE00000 ++#define HEAD_WRAP_ONE 0x00200000 ++#define HEAD_ADDR 0x001FFFFC ++#define RING_START 0x08 ++#define START_ADDR 0xFFFFF000 ++#define RING_LEN 0x0C ++#define RING_NR_PAGES 0x001FF000 ++#define RING_REPORT_MASK 0x00000006 ++#define RING_REPORT_64K 0x00000002 ++#define RING_REPORT_128K 0x00000004 ++#define RING_NO_REPORT 0x00000000 ++#define RING_VALID_MASK 0x00000001 ++#define RING_VALID 0x00000001 ++#define RING_INVALID 0x00000000 ++ ++/* Scratch pad debug 0 reg: ++ */ ++#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 ++/* ++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within ++ * this field (only one bit may be set). ++ */ ++#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 ++#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 ++/* i830, required in DVO non-gang */ ++#define PLL_P2_DIVIDE_BY_4 (1 << 23) ++#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ ++#define PLL_REF_INPUT_DREFCLK (0 << 13) ++#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ ++#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ ++#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) ++#define PLL_REF_INPUT_MASK (3 << 13) ++#define PLL_LOAD_PULSE_PHASE_SHIFT 9 ++/* ++ * Parallel to Serial Load Pulse phase selection. ++ * Selects the phase for the 10X DPLL clock for the PCIe ++ * digital display port. The range is 4 to 13; 10 or more ++ * is just a flip delay. The default is 6 ++ */ ++#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) ++#define DISPLAY_RATE_SELECT_FPA1 (1 << 8) ++/* ++ * SDVO multiplier for 945G/GM. Not used on 965. ++ */ ++#define SDVO_MULTIPLIER_MASK 0x000000ff ++#define SDVO_MULTIPLIER_SHIFT_HIRES 4 ++#define SDVO_MULTIPLIER_SHIFT_VGA 0 ++#define DPLL_A_MD 0x0601c /* 965+ only */ ++/* ++ * UDI pixel divider, controlling how many pixels are stuffed into a packet. ++ * ++ * Value is pixels minus 1. Must be set to 1 pixel for SDVO. ++ */ ++#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 ++#define DPLL_MD_UDI_DIVIDER_SHIFT 24 ++/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ ++#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 ++#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 ++/* ++ * SDVO/UDI pixel multiplier. ++ * ++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus ++ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate ++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing ++ * dummy bytes in the datastream at an increased clock rate, with both sides of ++ * the link knowing how many bytes are fill. ++ * ++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock ++ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be ++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and ++ * through an SDVO command. ++ * ++ * This register field has values of multiplication factor minus 1, with ++ * a maximum multiplier of 5 for SDVO. ++ */ ++#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 ++#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 ++/* ++ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. ++ * This best be set to the default value (3) or the CRT won't work. No, ++ * I don't entirely understand what this does... ++ */ ++#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f ++#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 ++#define DPLL_B_MD 0x06020 /* 965+ only */ ++#define FPA0 0x06040 ++#define FPA1 0x06044 ++#define FPB0 0x06048 ++#define FPB1 0x0604c ++#define FP_N_DIV_MASK 0x003f0000 ++#define FP_N_DIV_SHIFT 16 ++#define FP_M1_DIV_MASK 0x00003f00 ++#define FP_M1_DIV_SHIFT 8 ++#define FP_M2_DIV_MASK 0x0000003f ++#define FP_M2_DIV_SHIFT 0 ++#define DPLL_TEST 0x606c ++#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) ++#define DPLLB_TEST_SDVO_DIV_2 (1 << 22) ++#define DPLLB_TEST_SDVO_DIV_4 (2 << 22) ++#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) ++#define DPLLB_TEST_N_BYPASS (1 << 19) ++#define DPLLB_TEST_M_BYPASS (1 << 18) ++#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) ++#define DPLLA_TEST_N_BYPASS (1 << 3) ++#define DPLLA_TEST_M_BYPASS (1 << 2) ++#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) ++#define D_STATE 0x6104 ++#define CG_2D_DIS 0x6200 ++#define CG_3D_DIS 0x6204 ++ ++/* ++ * Palette regs ++ */ ++ ++#define PALETTE_A 0x0a000 ++#define PALETTE_B 0x0a800 ++ ++/* ++ * Overlay regs ++ */ ++ ++#define OVADD 0x30000 ++#define DOVSTA 0x30008 ++#define OC_BUF (0x3<<20) ++#define OGAMC5 0x30010 ++#define OGAMC4 0x30014 ++#define OGAMC3 0x30018 ++#define OGAMC2 0x3001c ++#define OGAMC1 0x30020 ++#define OGAMC0 0x30024 ++ ++/* ++ * Display engine regs ++ */ ++ ++/* Pipe A timing regs */ ++#define HTOTAL_A 0x60000 ++#define HBLANK_A 0x60004 ++#define HSYNC_A 0x60008 ++#define VTOTAL_A 0x6000c ++#define VBLANK_A 0x60010 ++#define VSYNC_A 0x60014 ++#define PIPEASRC 0x6001c ++#define BCLRPAT_A 0x60020 ++ ++/* Pipe B timing regs */ ++#define HTOTAL_B 0x61000 ++#define HBLANK_B 0x61004 ++#define HSYNC_B 0x61008 ++#define VTOTAL_B 0x6100c ++#define VBLANK_B 0x61010 ++#define VSYNC_B 0x61014 ++#define PIPEBSRC 0x6101c ++#define BCLRPAT_B 0x61020 ++ ++/* VGA port control */ ++#define ADPA 0x61100 ++#define ADPA_DAC_ENABLE (1<<31) ++#define ADPA_DAC_DISABLE 0 ++#define ADPA_PIPE_SELECT_MASK (1<<30) ++#define ADPA_PIPE_A_SELECT 0 ++#define ADPA_PIPE_B_SELECT (1<<30) ++#define ADPA_USE_VGA_HVPOLARITY (1<<15) ++#define ADPA_SETS_HVPOLARITY 0 ++#define ADPA_VSYNC_CNTL_DISABLE (1<<11) ++#define ADPA_VSYNC_CNTL_ENABLE 0 ++#define ADPA_HSYNC_CNTL_DISABLE (1<<10) ++#define ADPA_HSYNC_CNTL_ENABLE 0 ++#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) ++#define ADPA_VSYNC_ACTIVE_LOW 0 ++#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) ++#define ADPA_HSYNC_ACTIVE_LOW 0 ++#define ADPA_DPMS_MASK (~(3<<10)) ++#define ADPA_DPMS_ON (0<<10) ++#define ADPA_DPMS_SUSPEND (1<<10) ++#define ADPA_DPMS_STANDBY (2<<10) ++#define ADPA_DPMS_OFF (3<<10) ++ ++/* Hotplug control (945+ only) */ ++#define PORT_HOTPLUG_EN 0x61110 ++#define SDVOB_HOTPLUG_INT_EN (1 << 26) ++#define SDVOC_HOTPLUG_INT_EN (1 << 25) ++#define TV_HOTPLUG_INT_EN (1 << 18) ++#define CRT_HOTPLUG_INT_EN (1 << 9) ++#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) ++ ++#define PORT_HOTPLUG_STAT 0x61114 ++#define CRT_HOTPLUG_INT_STATUS (1 << 11) ++#define TV_HOTPLUG_INT_STATUS (1 << 10) ++#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) ++#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) ++#define CRT_HOTPLUG_MONITOR_MONO (2 << 8) ++#define CRT_HOTPLUG_MONITOR_NONE (0 << 8) ++#define SDVOC_HOTPLUG_INT_STATUS (1 << 7) ++#define SDVOB_HOTPLUG_INT_STATUS (1 << 6) ++ ++/* SDVO port control */ ++#define SDVOB 0x61140 ++#define SDVOC 0x61160 ++#define SDVO_ENABLE (1 << 31) ++#define SDVO_PIPE_B_SELECT (1 << 30) ++#define SDVO_STALL_SELECT (1 << 29) ++#define SDVO_INTERRUPT_ENABLE (1 << 26) ++/** ++ * 915G/GM SDVO pixel multiplier. ++ * ++ * Programmed value is multiplier - 1, up to 5x. ++ * ++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK ++ */ ++#define SDVO_PORT_MULTIPLY_MASK (7 << 23) ++#define SDVO_PORT_MULTIPLY_SHIFT 23 ++#define SDVO_PHASE_SELECT_MASK (15 << 19) ++#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) ++#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) ++#define SDVOC_GANG_MODE (1 << 16) ++#define SDVO_BORDER_ENABLE (1 << 7) ++#define SDVOB_PCIE_CONCURRENCY (1 << 3) ++#define SDVO_DETECTED (1 << 2) ++/* Bits to be preserved when writing */ ++#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) ++#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) ++ ++/* DVO port control */ ++#define DVOA 0x61120 ++#define DVOB 0x61140 ++#define DVOC 0x61160 ++#define DVO_ENABLE (1 << 31) ++#define DVO_PIPE_B_SELECT (1 << 30) ++#define DVO_PIPE_STALL_UNUSED (0 << 28) ++#define DVO_PIPE_STALL (1 << 28) ++#define DVO_PIPE_STALL_TV (2 << 28) ++#define DVO_PIPE_STALL_MASK (3 << 28) ++#define DVO_USE_VGA_SYNC (1 << 15) ++#define DVO_DATA_ORDER_I740 (0 << 14) ++#define DVO_DATA_ORDER_FP (1 << 14) ++#define DVO_VSYNC_DISABLE (1 << 11) ++#define DVO_HSYNC_DISABLE (1 << 10) ++#define DVO_VSYNC_TRISTATE (1 << 9) ++#define DVO_HSYNC_TRISTATE (1 << 8) ++#define DVO_BORDER_ENABLE (1 << 7) ++#define DVO_DATA_ORDER_GBRG (1 << 6) ++#define DVO_DATA_ORDER_RGGB (0 << 6) ++#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) ++#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) ++#define DVO_VSYNC_ACTIVE_HIGH (1 << 4) ++#define DVO_HSYNC_ACTIVE_HIGH (1 << 3) ++#define DVO_BLANK_ACTIVE_HIGH (1 << 2) ++#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ ++#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ ++#define DVO_PRESERVE_MASK (0x7<<24) ++#define DVOA_SRCDIM 0x61124 ++#define DVOB_SRCDIM 0x61144 ++#define DVOC_SRCDIM 0x61164 ++#define DVO_SRCDIM_HORIZONTAL_SHIFT 12 ++#define DVO_SRCDIM_VERTICAL_SHIFT 0 ++ ++/* LVDS port control */ ++#define LVDS 0x61180 ++/* ++ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as ++ * the DPLL semantics change when the LVDS is assigned to that pipe. ++ */ ++#define LVDS_PORT_EN (1 << 31) ++/* Selects pipe B for LVDS data. Must be set on pre-965. */ ++#define LVDS_PIPEB_SELECT (1 << 30) ++/* ++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per ++ * pixel. ++ */ ++#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) ++#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) ++#define LVDS_A0A2_CLKA_POWER_UP (3 << 8) ++/* ++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit ++ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be ++ * on. ++ */ ++#define LVDS_A3_POWER_MASK (3 << 6) ++#define LVDS_A3_POWER_DOWN (0 << 6) ++#define LVDS_A3_POWER_UP (3 << 6) ++/* ++ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP ++ * is set. ++ */ ++#define LVDS_CLKB_POWER_MASK (3 << 4) ++#define LVDS_CLKB_POWER_DOWN (0 << 4) ++#define LVDS_CLKB_POWER_UP (3 << 4) ++/* ++ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 ++ * setting for whether we are in dual-channel mode. The B3 pair will ++ * additionally only be powered up when LVDS_A3_POWER_UP is set. ++ */ ++#define LVDS_B0B3_POWER_MASK (3 << 2) ++#define LVDS_B0B3_POWER_DOWN (0 << 2) ++#define LVDS_B0B3_POWER_UP (3 << 2) ++ ++/* Panel power sequencing */ ++#define PP_STATUS 0x61200 ++#define PP_ON (1 << 31) ++/* ++ * Indicates that all dependencies of the panel are on: ++ * ++ * - PLL enabled ++ * - pipe enabled ++ * - LVDS/DVOB/DVOC on ++ */ ++#define PP_READY (1 << 30) ++#define PP_SEQUENCE_NONE (0 << 28) ++#define PP_SEQUENCE_ON (1 << 28) ++#define PP_SEQUENCE_OFF (2 << 28) ++#define PP_SEQUENCE_MASK 0x30000000 ++#define PP_CONTROL 0x61204 ++#define POWER_TARGET_ON (1 << 0) ++#define PP_ON_DELAYS 0x61208 ++#define PP_OFF_DELAYS 0x6120c ++#define PP_DIVISOR 0x61210 ++ ++/* Panel fitting */ ++#define PFIT_CONTROL 0x61230 ++#define PFIT_ENABLE (1 << 31) ++#define PFIT_PIPE_MASK (3 << 29) ++#define PFIT_PIPE_SHIFT 29 ++#define VERT_INTERP_DISABLE (0 << 10) ++#define VERT_INTERP_BILINEAR (1 << 10) ++#define VERT_INTERP_MASK (3 << 10) ++#define VERT_AUTO_SCALE (1 << 9) ++#define HORIZ_INTERP_DISABLE (0 << 6) ++#define HORIZ_INTERP_BILINEAR (1 << 6) ++#define HORIZ_INTERP_MASK (3 << 6) ++#define HORIZ_AUTO_SCALE (1 << 5) ++#define PANEL_8TO6_DITHER_ENABLE (1 << 3) ++#define PFIT_PGM_RATIOS 0x61234 ++#define PFIT_VERT_SCALE_MASK 0xfff00000 ++#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 ++#define PFIT_AUTO_RATIOS 0x61238 ++ ++/* Backlight control */ ++#define BLC_PWM_CTL 0x61254 ++#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) ++#define BLC_PWM_CTL2 0x61250 /* 965+ only */ ++/* ++ * This is the most significant 15 bits of the number of backlight cycles in a ++ * complete cycle of the modulated backlight control. ++ * ++ * The actual value is this field multiplied by two. ++ */ ++#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) ++#define BLM_LEGACY_MODE (1 << 16) ++/* ++ * This is the number of cycles out of the backlight modulation cycle for which ++ * the backlight is on. ++ * ++ * This field must be no greater than the number of cycles in the complete ++ * backlight modulation cycle. ++ */ ++#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) ++#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) ++ ++/* TV port control */ ++#define TV_CTL 0x68000 ++/** Enables the TV encoder */ ++# define TV_ENC_ENABLE (1 << 31) ++/** Sources the TV encoder input from pipe B instead of A. */ ++# define TV_ENC_PIPEB_SELECT (1 << 30) ++/** Outputs composite video (DAC A only) */ ++# define TV_ENC_OUTPUT_COMPOSITE (0 << 28) ++/** Outputs SVideo video (DAC B/C) */ ++# define TV_ENC_OUTPUT_SVIDEO (1 << 28) ++/** Outputs Component video (DAC A/B/C) */ ++# define TV_ENC_OUTPUT_COMPONENT (2 << 28) ++/** Outputs Composite and SVideo (DAC A/B/C) */ ++# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) ++# define TV_TRILEVEL_SYNC (1 << 21) ++/** Enables slow sync generation (945GM only) */ ++# define TV_SLOW_SYNC (1 << 20) ++/** Selects 4x oversampling for 480i and 576p */ ++# define TV_OVERSAMPLE_4X (0 << 18) ++/** Selects 2x oversampling for 720p and 1080i */ ++# define TV_OVERSAMPLE_2X (1 << 18) ++/** Selects no oversampling for 1080p */ ++# define TV_OVERSAMPLE_NONE (2 << 18) ++/** Selects 8x oversampling */ ++# define TV_OVERSAMPLE_8X (3 << 18) ++/** Selects progressive mode rather than interlaced */ ++# define TV_PROGRESSIVE (1 << 17) ++/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ ++# define TV_PAL_BURST (1 << 16) ++/** Field for setting delay of Y compared to C */ ++# define TV_YC_SKEW_MASK (7 << 12) ++/** Enables a fix for 480p/576p standard definition modes on the 915GM only */ ++# define TV_ENC_SDP_FIX (1 << 11) ++/** ++ * Enables a fix for the 915GM only. ++ * ++ * Not sure what it does. ++ */ ++# define TV_ENC_C0_FIX (1 << 10) ++/** Bits that must be preserved by software */ ++# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) ++# define TV_FUSE_STATE_MASK (3 << 4) ++/** Read-only state that reports all features enabled */ ++# define TV_FUSE_STATE_ENABLED (0 << 4) ++/** Read-only state that reports that Macrovision is disabled in hardware*/ ++# define TV_FUSE_STATE_NO_MACROVISION (1 << 4) ++/** Read-only state that reports that TV-out is disabled in hardware. */ ++# define TV_FUSE_STATE_DISABLED (2 << 4) ++/** Normal operation */ ++# define TV_TEST_MODE_NORMAL (0 << 0) ++/** Encoder test pattern 1 - combo pattern */ ++# define TV_TEST_MODE_PATTERN_1 (1 << 0) ++/** Encoder test pattern 2 - full screen vertical 75% color bars */ ++# define TV_TEST_MODE_PATTERN_2 (2 << 0) ++/** Encoder test pattern 3 - full screen horizontal 75% color bars */ ++# define TV_TEST_MODE_PATTERN_3 (3 << 0) ++/** Encoder test pattern 4 - random noise */ ++# define TV_TEST_MODE_PATTERN_4 (4 << 0) ++/** Encoder test pattern 5 - linear color ramps */ ++# define TV_TEST_MODE_PATTERN_5 (5 << 0) ++/** ++ * This test mode forces the DACs to 50% of full output. ++ * ++ * This is used for load detection in combination with TVDAC_SENSE_MASK ++ */ ++# define TV_TEST_MODE_MONITOR_DETECT (7 << 0) ++# define TV_TEST_MODE_MASK (7 << 0) ++ ++#define TV_DAC 0x68004 ++/** ++ * Reports that DAC state change logic has reported change (RO). ++ * ++ * This gets cleared when TV_DAC_STATE_EN is cleared ++*/ ++# define TVDAC_STATE_CHG (1 << 31) ++# define TVDAC_SENSE_MASK (7 << 28) ++/** Reports that DAC A voltage is above the detect threshold */ ++# define TVDAC_A_SENSE (1 << 30) ++/** Reports that DAC B voltage is above the detect threshold */ ++# define TVDAC_B_SENSE (1 << 29) ++/** Reports that DAC C voltage is above the detect threshold */ ++# define TVDAC_C_SENSE (1 << 28) ++/** ++ * Enables DAC state detection logic, for load-based TV detection. ++ * ++ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set ++ * to off, for load detection to work. ++ */ ++# define TVDAC_STATE_CHG_EN (1 << 27) ++/** Sets the DAC A sense value to high */ ++# define TVDAC_A_SENSE_CTL (1 << 26) ++/** Sets the DAC B sense value to high */ ++# define TVDAC_B_SENSE_CTL (1 << 25) ++/** Sets the DAC C sense value to high */ ++# define TVDAC_C_SENSE_CTL (1 << 24) ++/** Overrides the ENC_ENABLE and DAC voltage levels */ ++# define DAC_CTL_OVERRIDE (1 << 7) ++/** Sets the slew rate. Must be preserved in software */ ++# define ENC_TVDAC_SLEW_FAST (1 << 6) ++# define DAC_A_1_3_V (0 << 4) ++# define DAC_A_1_1_V (1 << 4) ++# define DAC_A_0_7_V (2 << 4) ++# define DAC_A_OFF (3 << 4) ++# define DAC_B_1_3_V (0 << 2) ++# define DAC_B_1_1_V (1 << 2) ++# define DAC_B_0_7_V (2 << 2) ++# define DAC_B_OFF (3 << 2) ++# define DAC_C_1_3_V (0 << 0) ++# define DAC_C_1_1_V (1 << 0) ++# define DAC_C_0_7_V (2 << 0) ++# define DAC_C_OFF (3 << 0) ++ ++/** ++ * CSC coefficients are stored in a floating point format with 9 bits of ++ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, ++ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with ++ * -1 (0x3) being the only legal negative value. ++ */ ++#define TV_CSC_Y 0x68010 ++# define TV_RY_MASK 0x07ff0000 ++# define TV_RY_SHIFT 16 ++# define TV_GY_MASK 0x00000fff ++# define TV_GY_SHIFT 0 ++ ++#define TV_CSC_Y2 0x68014 ++# define TV_BY_MASK 0x07ff0000 ++# define TV_BY_SHIFT 16 ++/** ++ * Y attenuation for component video. ++ * ++ * Stored in 1.9 fixed point. ++ */ ++# define TV_AY_MASK 0x000003ff ++# define TV_AY_SHIFT 0 ++ ++#define TV_CSC_U 0x68018 ++# define TV_RU_MASK 0x07ff0000 ++# define TV_RU_SHIFT 16 ++# define TV_GU_MASK 0x000007ff ++# define TV_GU_SHIFT 0 ++ ++#define TV_CSC_U2 0x6801c ++# define TV_BU_MASK 0x07ff0000 ++# define TV_BU_SHIFT 16 ++/** ++ * U attenuation for component video. ++ * ++ * Stored in 1.9 fixed point. ++ */ ++# define TV_AU_MASK 0x000003ff ++# define TV_AU_SHIFT 0 ++ ++#define TV_CSC_V 0x68020 ++# define TV_RV_MASK 0x0fff0000 ++# define TV_RV_SHIFT 16 ++# define TV_GV_MASK 0x000007ff ++# define TV_GV_SHIFT 0 ++ ++#define TV_CSC_V2 0x68024 ++# define TV_BV_MASK 0x07ff0000 ++# define TV_BV_SHIFT 16 ++/** ++ * V attenuation for component video. ++ * ++ * Stored in 1.9 fixed point. ++ */ ++# define TV_AV_MASK 0x000007ff ++# define TV_AV_SHIFT 0 ++ ++#define TV_CLR_KNOBS 0x68028 ++/** 2s-complement brightness adjustment */ ++# define TV_BRIGHTNESS_MASK 0xff000000 ++# define TV_BRIGHTNESS_SHIFT 24 ++/** Contrast adjustment, as a 2.6 unsigned floating point number */ ++# define TV_CONTRAST_MASK 0x00ff0000 ++# define TV_CONTRAST_SHIFT 16 ++/** Saturation adjustment, as a 2.6 unsigned floating point number */ ++# define TV_SATURATION_MASK 0x0000ff00 ++# define TV_SATURATION_SHIFT 8 ++/** Hue adjustment, as an integer phase angle in degrees */ ++# define TV_HUE_MASK 0x000000ff ++# define TV_HUE_SHIFT 0 ++ ++#define TV_CLR_LEVEL 0x6802c ++/** Controls the DAC level for black */ ++# define TV_BLACK_LEVEL_MASK 0x01ff0000 ++# define TV_BLACK_LEVEL_SHIFT 16 ++/** Controls the DAC level for blanking */ ++# define TV_BLANK_LEVEL_MASK 0x000001ff ++# define TV_BLANK_LEVEL_SHIFT 0 ++ ++#define TV_H_CTL_1 0x68030 ++/** Number of pixels in the hsync. */ ++# define TV_HSYNC_END_MASK 0x1fff0000 ++# define TV_HSYNC_END_SHIFT 16 ++/** Total number of pixels minus one in the line (display and blanking). */ ++# define TV_HTOTAL_MASK 0x00001fff ++# define TV_HTOTAL_SHIFT 0 ++ ++#define TV_H_CTL_2 0x68034 ++/** Enables the colorburst (needed for non-component color) */ ++# define TV_BURST_ENA (1 << 31) ++/** Offset of the colorburst from the start of hsync, in pixels minus one. */ ++# define TV_HBURST_START_SHIFT 16 ++# define TV_HBURST_START_MASK 0x1fff0000 ++/** Length of the colorburst */ ++# define TV_HBURST_LEN_SHIFT 0 ++# define TV_HBURST_LEN_MASK 0x0001fff ++ ++#define TV_H_CTL_3 0x68038 ++/** End of hblank, measured in pixels minus one from start of hsync */ ++# define TV_HBLANK_END_SHIFT 16 ++# define TV_HBLANK_END_MASK 0x1fff0000 ++/** Start of hblank, measured in pixels minus one from start of hsync */ ++# define TV_HBLANK_START_SHIFT 0 ++# define TV_HBLANK_START_MASK 0x0001fff ++ ++#define TV_V_CTL_1 0x6803c ++/** XXX */ ++# define TV_NBR_END_SHIFT 16 ++# define TV_NBR_END_MASK 0x07ff0000 ++/** XXX */ ++# define TV_VI_END_F1_SHIFT 8 ++# define TV_VI_END_F1_MASK 0x00003f00 ++/** XXX */ ++# define TV_VI_END_F2_SHIFT 0 ++# define TV_VI_END_F2_MASK 0x0000003f ++ ++#define TV_V_CTL_2 0x68040 ++/** Length of vsync, in half lines */ ++# define TV_VSYNC_LEN_MASK 0x07ff0000 ++# define TV_VSYNC_LEN_SHIFT 16 ++/** Offset of the start of vsync in field 1, measured in one less than the ++ * number of half lines. ++ */ ++# define TV_VSYNC_START_F1_MASK 0x00007f00 ++# define TV_VSYNC_START_F1_SHIFT 8 ++/** ++ * Offset of the start of vsync in field 2, measured in one less than the ++ * number of half lines. ++ */ ++# define TV_VSYNC_START_F2_MASK 0x0000007f ++# define TV_VSYNC_START_F2_SHIFT 0 ++ ++#define TV_V_CTL_3 0x68044 ++/** Enables generation of the equalization signal */ ++# define TV_EQUAL_ENA (1 << 31) ++/** Length of vsync, in half lines */ ++# define TV_VEQ_LEN_MASK 0x007f0000 ++# define TV_VEQ_LEN_SHIFT 16 ++/** Offset of the start of equalization in field 1, measured in one less than ++ * the number of half lines. ++ */ ++# define TV_VEQ_START_F1_MASK 0x0007f00 ++# define TV_VEQ_START_F1_SHIFT 8 ++/** ++ * Offset of the start of equalization in field 2, measured in one less than ++ * the number of half lines. ++ */ ++# define TV_VEQ_START_F2_MASK 0x000007f ++# define TV_VEQ_START_F2_SHIFT 0 ++ ++#define TV_V_CTL_4 0x68048 ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F1_MASK 0x003f0000 ++# define TV_VBURST_START_F1_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F1_MASK 0x000000ff ++# define TV_VBURST_END_F1_SHIFT 0 ++ ++#define TV_V_CTL_5 0x6804c ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F2_MASK 0x003f0000 ++# define TV_VBURST_START_F2_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F2_MASK 0x000000ff ++# define TV_VBURST_END_F2_SHIFT 0 ++ ++#define TV_V_CTL_6 0x68050 ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F3_MASK 0x003f0000 ++# define TV_VBURST_START_F3_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F3_MASK 0x000000ff ++# define TV_VBURST_END_F3_SHIFT 0 ++ ++#define TV_V_CTL_7 0x68054 ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F4_MASK 0x003f0000 ++# define TV_VBURST_START_F4_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F4_MASK 0x000000ff ++# define TV_VBURST_END_F4_SHIFT 0 ++ ++#define TV_SC_CTL_1 0x68060 ++/** Turns on the first subcarrier phase generation DDA */ ++# define TV_SC_DDA1_EN (1 << 31) ++/** Turns on the first subcarrier phase generation DDA */ ++# define TV_SC_DDA2_EN (1 << 30) ++/** Turns on the first subcarrier phase generation DDA */ ++# define TV_SC_DDA3_EN (1 << 29) ++/** Sets the subcarrier DDA to reset frequency every other field */ ++# define TV_SC_RESET_EVERY_2 (0 << 24) ++/** Sets the subcarrier DDA to reset frequency every fourth field */ ++# define TV_SC_RESET_EVERY_4 (1 << 24) ++/** Sets the subcarrier DDA to reset frequency every eighth field */ ++# define TV_SC_RESET_EVERY_8 (2 << 24) ++/** Sets the subcarrier DDA to never reset the frequency */ ++# define TV_SC_RESET_NEVER (3 << 24) ++/** Sets the peak amplitude of the colorburst.*/ ++# define TV_BURST_LEVEL_MASK 0x00ff0000 ++# define TV_BURST_LEVEL_SHIFT 16 ++/** Sets the increment of the first subcarrier phase generation DDA */ ++# define TV_SCDDA1_INC_MASK 0x00000fff ++# define TV_SCDDA1_INC_SHIFT 0 ++ ++#define TV_SC_CTL_2 0x68064 ++/** Sets the rollover for the second subcarrier phase generation DDA */ ++# define TV_SCDDA2_SIZE_MASK 0x7fff0000 ++# define TV_SCDDA2_SIZE_SHIFT 16 ++/** Sets the increent of the second subcarrier phase generation DDA */ ++# define TV_SCDDA2_INC_MASK 0x00007fff ++# define TV_SCDDA2_INC_SHIFT 0 ++ ++#define TV_SC_CTL_3 0x68068 ++/** Sets the rollover for the third subcarrier phase generation DDA */ ++# define TV_SCDDA3_SIZE_MASK 0x7fff0000 ++# define TV_SCDDA3_SIZE_SHIFT 16 ++/** Sets the increent of the third subcarrier phase generation DDA */ ++# define TV_SCDDA3_INC_MASK 0x00007fff ++# define TV_SCDDA3_INC_SHIFT 0 ++ ++#define TV_WIN_POS 0x68070 ++/** X coordinate of the display from the start of horizontal active */ ++# define TV_XPOS_MASK 0x1fff0000 ++# define TV_XPOS_SHIFT 16 ++/** Y coordinate of the display from the start of vertical active (NBR) */ ++# define TV_YPOS_MASK 0x00000fff ++# define TV_YPOS_SHIFT 0 ++ ++#define TV_WIN_SIZE 0x68074 ++/** Horizontal size of the display window, measured in pixels*/ ++# define TV_XSIZE_MASK 0x1fff0000 ++# define TV_XSIZE_SHIFT 16 ++/** ++ * Vertical size of the display window, measured in pixels. ++ * ++ * Must be even for interlaced modes. ++ */ ++# define TV_YSIZE_MASK 0x00000fff ++# define TV_YSIZE_SHIFT 0 ++ ++#define TV_FILTER_CTL_1 0x68080 ++/** ++ * Enables automatic scaling calculation. ++ * ++ * If set, the rest of the registers are ignored, and the calculated values can ++ * be read back from the register. ++ */ ++# define TV_AUTO_SCALE (1 << 31) ++/** ++ * Disables the vertical filter. ++ * ++ * This is required on modes more than 1024 pixels wide */ ++# define TV_V_FILTER_BYPASS (1 << 29) ++/** Enables adaptive vertical filtering */ ++# define TV_VADAPT (1 << 28) ++# define TV_VADAPT_MODE_MASK (3 << 26) ++/** Selects the least adaptive vertical filtering mode */ ++# define TV_VADAPT_MODE_LEAST (0 << 26) ++/** Selects the moderately adaptive vertical filtering mode */ ++# define TV_VADAPT_MODE_MODERATE (1 << 26) ++/** Selects the most adaptive vertical filtering mode */ ++# define TV_VADAPT_MODE_MOST (3 << 26) ++/** ++ * Sets the horizontal scaling factor. ++ * ++ * This should be the fractional part of the horizontal scaling factor divided ++ * by the oversampling rate. TV_HSCALE should be less than 1, and set to: ++ * ++ * (src width - 1) / ((oversample * dest width) - 1) ++ */ ++# define TV_HSCALE_FRAC_MASK 0x00003fff ++# define TV_HSCALE_FRAC_SHIFT 0 ++ ++#define TV_FILTER_CTL_2 0x68084 ++/** ++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) ++ */ ++# define TV_VSCALE_INT_MASK 0x00038000 ++# define TV_VSCALE_INT_SHIFT 15 ++/** ++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * \sa TV_VSCALE_INT_MASK ++ */ ++# define TV_VSCALE_FRAC_MASK 0x00007fff ++# define TV_VSCALE_FRAC_SHIFT 0 ++ ++#define TV_FILTER_CTL_3 0x68088 ++/** ++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) ++ * ++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. ++ */ ++# define TV_VSCALE_IP_INT_MASK 0x00038000 ++# define TV_VSCALE_IP_INT_SHIFT 15 ++/** ++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. ++ * ++ * \sa TV_VSCALE_IP_INT_MASK ++ */ ++# define TV_VSCALE_IP_FRAC_MASK 0x00007fff ++# define TV_VSCALE_IP_FRAC_SHIFT 0 ++ ++#define TV_CC_CONTROL 0x68090 ++# define TV_CC_ENABLE (1 << 31) ++/** ++ * Specifies which field to send the CC data in. ++ * ++ * CC data is usually sent in field 0. ++ */ ++# define TV_CC_FID_MASK (1 << 27) ++# define TV_CC_FID_SHIFT 27 ++/** Sets the horizontal position of the CC data. Usually 135. */ ++# define TV_CC_HOFF_MASK 0x03ff0000 ++# define TV_CC_HOFF_SHIFT 16 ++/** Sets the vertical position of the CC data. Usually 21 */ ++# define TV_CC_LINE_MASK 0x0000003f ++# define TV_CC_LINE_SHIFT 0 ++ ++#define TV_CC_DATA 0x68094 ++# define TV_CC_RDY (1 << 31) ++/** Second word of CC data to be transmitted. */ ++# define TV_CC_DATA_2_MASK 0x007f0000 ++# define TV_CC_DATA_2_SHIFT 16 ++/** First word of CC data to be transmitted. */ ++# define TV_CC_DATA_1_MASK 0x0000007f ++# define TV_CC_DATA_1_SHIFT 0 ++ ++#define TV_H_LUMA_0 0x68100 ++#define TV_H_LUMA_59 0x681ec ++#define TV_H_CHROMA_0 0x68200 ++#define TV_H_CHROMA_59 0x682ec ++#define TV_V_LUMA_0 0x68300 ++#define TV_V_LUMA_42 0x683a8 ++#define TV_V_CHROMA_0 0x68400 ++#define TV_V_CHROMA_42 0x684a8 ++ ++/* Display & cursor control */ ++ ++/* Pipe A */ ++#define PIPEADSL 0x70000 ++#define PIPEACONF 0x70008 ++#define PIPEACONF_ENABLE (1<<31) ++#define PIPEACONF_DISABLE 0 ++#define PIPEACONF_DOUBLE_WIDE (1<<30) ++#define I965_PIPECONF_ACTIVE (1<<30) ++#define PIPEACONF_SINGLE_WIDE 0 ++#define PIPEACONF_PIPE_UNLOCKED 0 ++#define PIPEACONF_PIPE_LOCKED (1<<25) ++#define PIPEACONF_PALETTE 0 ++#define PIPEACONF_GAMMA (1<<24) ++#define PIPECONF_FORCE_BORDER (1<<25) ++#define PIPECONF_PROGRESSIVE (0 << 21) ++#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) ++#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) ++#define PIPEASTAT 0x70024 ++#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) ++#define PIPE_CRC_ERROR_ENABLE (1UL<<29) ++#define PIPE_CRC_DONE_ENABLE (1UL<<28) ++#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) ++#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) ++#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) ++#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) ++#define PIPE_DPST_EVENT_ENABLE (1UL<<23) ++#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) ++#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) ++#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) ++#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ ++#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ ++#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) ++#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) ++#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) ++#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) ++#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) ++#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) ++#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) ++#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) ++#define PIPE_DPST_EVENT_STATUS (1UL<<7) ++#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) ++#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) ++#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) ++#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ ++#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ ++#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) ++#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) ++ ++#define DSPARB 0x70030 ++#define DSPARB_CSTART_MASK (0x7f << 7) ++#define DSPARB_CSTART_SHIFT 7 ++#define DSPARB_BSTART_MASK (0x7f) ++#define DSPARB_BSTART_SHIFT 0 ++/* ++ * The two pipe frame counter registers are not synchronized, so ++ * reading a stable value is somewhat tricky. The following code ++ * should work: ++ * ++ * do { ++ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> ++ * PIPE_FRAME_HIGH_SHIFT; ++ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> ++ * PIPE_FRAME_LOW_SHIFT); ++ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> ++ * PIPE_FRAME_HIGH_SHIFT); ++ * } while (high1 != high2); ++ * frame = (high1 << 8) | low1; ++ */ ++#define PIPEAFRAMEHIGH 0x70040 ++#define PIPE_FRAME_HIGH_MASK 0x0000ffff ++#define PIPE_FRAME_HIGH_SHIFT 0 ++#define PIPEAFRAMEPIXEL 0x70044 ++#define PIPE_FRAME_LOW_MASK 0xff000000 ++#define PIPE_FRAME_LOW_SHIFT 24 ++#define PIPE_PIXEL_MASK 0x00ffffff ++#define PIPE_PIXEL_SHIFT 0 ++ ++/* Cursor A & B regs */ ++#define CURACNTR 0x70080 ++#define CURSOR_MODE_DISABLE 0x00 ++#define CURSOR_MODE_64_32B_AX 0x07 ++#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) ++#define MCURSOR_GAMMA_ENABLE (1 << 26) ++#define CURABASE 0x70084 ++#define CURAPOS 0x70088 ++#define CURSOR_POS_MASK 0x007FF ++#define CURSOR_POS_SIGN 0x8000 ++#define CURSOR_X_SHIFT 0 ++#define CURSOR_Y_SHIFT 16 ++#define CURBCNTR 0x700c0 ++#define CURBBASE 0x700c4 ++#define CURBPOS 0x700c8 ++ ++/* Display A control */ ++#define DSPACNTR 0x70180 ++#define DISPLAY_PLANE_ENABLE (1<<31) ++#define DISPLAY_PLANE_DISABLE 0 ++#define DISPPLANE_GAMMA_ENABLE (1<<30) ++#define DISPPLANE_GAMMA_DISABLE 0 ++#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) ++#define DISPPLANE_8BPP (0x2<<26) ++#define DISPPLANE_15_16BPP (0x4<<26) ++#define DISPPLANE_16BPP (0x5<<26) ++#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) ++#define DISPPLANE_32BPP (0x7<<26) ++#define DISPPLANE_STEREO_ENABLE (1<<25) ++#define DISPPLANE_STEREO_DISABLE 0 ++#define DISPPLANE_SEL_PIPE_MASK (1<<24) ++#define DISPPLANE_SEL_PIPE_A 0 ++#define DISPPLANE_SEL_PIPE_B (1<<24) ++#define DISPPLANE_SRC_KEY_ENABLE (1<<22) ++#define DISPPLANE_SRC_KEY_DISABLE 0 ++#define DISPPLANE_LINE_DOUBLE (1<<20) ++#define DISPPLANE_NO_LINE_DOUBLE 0 ++#define DISPPLANE_STEREO_POLARITY_FIRST 0 ++#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) ++#define DSPAADDR 0x70184 ++#define DSPASTRIDE 0x70188 ++#define DSPAPOS 0x7018C /* reserved */ ++#define DSPASIZE 0x70190 ++#define DSPASURF 0x7019C /* 965+ only */ ++#define DSPATILEOFF 0x701A4 /* 965+ only */ ++ ++/* VBIOS flags */ ++#define SWF00 0x71410 ++#define SWF01 0x71414 ++#define SWF02 0x71418 ++#define SWF03 0x7141c ++#define SWF04 0x71420 ++#define SWF05 0x71424 ++#define SWF06 0x71428 ++#define SWF10 0x70410 ++#define SWF11 0x70414 ++#define SWF14 0x71420 ++#define SWF30 0x72414 ++#define SWF31 0x72418 ++#define SWF32 0x7241c ++ ++/* Pipe B */ ++#define PIPEBDSL 0x71000 ++#define PIPEBCONF 0x71008 ++#define PIPEBSTAT 0x71024 ++#define PIPEBFRAMEHIGH 0x71040 ++#define PIPEBFRAMEPIXEL 0x71044 ++ ++/* Display B control */ ++#define DSPBCNTR 0x71180 ++#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) ++#define DISPPLANE_ALPHA_TRANS_DISABLE 0 ++#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 ++#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) ++#define DSPBADDR 0x71184 ++#define DSPBSTRIDE 0x71188 ++#define DSPBPOS 0x7118C ++#define DSPBSIZE 0x71190 ++#define DSPBSURF 0x7119C ++#define DSPBTILEOFF 0x711A4 ++ ++/* VBIOS regs */ ++#define VGACNTRL 0x71400 ++# define VGA_DISP_DISABLE (1 << 31) ++# define VGA_2X_MODE (1 << 30) ++# define VGA_PIPE_B_SELECT (1 << 29) ++ ++/* Chipset type macros */ ++ ++#define IS_I830(dev) ((dev)->pci_device == 0x3577) ++#define IS_845G(dev) ((dev)->pci_device == 0x2562) ++#define IS_I85X(dev) ((dev)->pci_device == 0x3582) ++#define IS_I855(dev) ((dev)->pci_device == 0x3582) ++#define IS_I865G(dev) ((dev)->pci_device == 0x2572) ++ ++#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) ++#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) ++#define IS_I945G(dev) ((dev)->pci_device == 0x2772) ++#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ ++ (dev)->pci_device == 0x27AE) ++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ ++ (dev)->pci_device == 0x2982 || \ ++ (dev)->pci_device == 0x2992 || \ ++ (dev)->pci_device == 0x29A2 || \ ++ (dev)->pci_device == 0x2A02 || \ ++ (dev)->pci_device == 0x2A12 || \ ++ (dev)->pci_device == 0x2A42 || \ ++ (dev)->pci_device == 0x2E02 || \ ++ (dev)->pci_device == 0x2E12 || \ ++ (dev)->pci_device == 0x2E22) ++ ++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) ++ ++#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) ++ ++#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ ++ (dev)->pci_device == 0x2E12 || \ ++ (dev)->pci_device == 0x2E22) ++ ++#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ ++ (dev)->pci_device == 0x29B2 || \ ++ (dev)->pci_device == 0x29D2) ++ ++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ ++ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) ++ ++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ ++ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) ++ ++#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_execbuf.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_execbuf.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_execbuf.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_execbuf.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,917 @@ ++/* ++ * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Thomas Hellstrom ++ * Dave Airlie ++ * Keith Packard ++ * ... ? ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#if DRM_DEBUG_CODE ++#define DRM_DEBUG_RELOCATION (drm_debug != 0) ++#else ++#define DRM_DEBUG_RELOCATION 0 ++#endif ++ ++enum i915_buf_idle { ++ I915_RELOC_UNCHECKED, ++ I915_RELOC_IDLE, ++ I915_RELOC_BUSY ++}; ++ ++struct i915_relocatee_info { ++ struct drm_buffer_object *buf; ++ unsigned long offset; ++ uint32_t *data_page; ++ unsigned page_offset; ++ struct drm_bo_kmap_obj kmap; ++ int is_iomem; ++ int dst; ++ int idle; ++ int performed_ring_relocs; ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ unsigned long pfn; ++ pgprot_t pg_prot; ++#endif ++}; ++ ++struct drm_i915_validate_buffer { ++ struct drm_buffer_object *buffer; ++ int presumed_offset_correct; ++ void __user *data; ++ int ret; ++ enum i915_buf_idle idle; ++}; ++ ++/* ++ * I'd like to use MI_STORE_DATA_IMM here, but I can't make ++ * it work. Seems like GART writes are broken with that ++ * instruction. Also I'm not sure that MI_FLUSH will ++ * act as a memory barrier for that instruction. It will ++ * for this single dword 2D blit. ++ */ ++ ++static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset, ++ uint32_t value) ++{ ++ struct drm_i915_private *dev_priv = ++ (struct drm_i915_private *)dev->dev_private; ++ ++ RING_LOCALS; ++ i915_kernel_lost_context(dev); ++ BEGIN_LP_RING(6); ++ OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3)); ++ OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40)); ++ OUT_RING((0x1 << 16) | (0x4)); ++ OUT_RING(offset); ++ OUT_RING(value); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++} ++ ++static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer ++ *buffers, unsigned num_buffers) ++{ ++ while (num_buffers--) ++ drm_bo_usage_deref_locked(&buffers[num_buffers].buffer); ++} ++ ++int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, ++ struct drm_i915_validate_buffer *buffers, ++ struct i915_relocatee_info *relocatee, uint32_t * reloc) ++{ ++ unsigned index; ++ unsigned long new_cmd_offset; ++ u32 val; ++ int ret, i; ++ int buf_index = -1; ++ ++ /* ++ * FIXME: O(relocs * buffers) complexity. ++ */ ++ ++ for (i = 0; i <= num_buffers; i++) ++ if (buffers[i].buffer) ++ if (reloc[2] == buffers[i].buffer->base.hash.key) ++ buf_index = i; ++ ++ if (buf_index == -1) { ++ DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]); ++ return -EINVAL; ++ } ++ ++ /* ++ * Short-circuit relocations that were correctly ++ * guessed by the client ++ */ ++ if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION) ++ return 0; ++ ++ new_cmd_offset = reloc[0]; ++ if (!relocatee->data_page || ++ !drm_bo_same_page(relocatee->offset, new_cmd_offset)) { ++ struct drm_bo_mem_reg *mem = &relocatee->buf->mem; ++ ++ drm_bo_kunmap(&relocatee->kmap); ++ relocatee->data_page = NULL; ++ relocatee->offset = new_cmd_offset; ++ ++ if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) { ++ ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0); ++ if (ret) ++ return ret; ++ relocatee->idle = I915_RELOC_IDLE; ++ } ++ ++ if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) && ++ (mem->flags & DRM_BO_FLAG_CACHED_MAPPED))) ++ drm_bo_evict_cached(relocatee->buf); ++ ++ ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT, ++ 1, &relocatee->kmap); ++ if (ret) { ++ DRM_ERROR ++ ("Could not map command buffer to apply relocs\n %08lx", ++ new_cmd_offset); ++ return ret; ++ } ++ relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, ++ &relocatee->is_iomem); ++ relocatee->page_offset = (relocatee->offset & PAGE_MASK); ++ } ++ ++ val = buffers[buf_index].buffer->offset; ++ index = (reloc[0] - relocatee->page_offset) >> 2; ++ ++ /* add in validate */ ++ val = val + reloc[1]; ++ ++ if (DRM_DEBUG_RELOCATION) { ++ if (buffers[buf_index].presumed_offset_correct && ++ relocatee->data_page[index] != val) { ++ DRM_DEBUG ++ ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n", ++ reloc[0], reloc[1], buf_index, ++ relocatee->data_page[index], val); ++ } ++ } ++ ++ if (relocatee->is_iomem) ++ iowrite32(val, relocatee->data_page + index); ++ else ++ relocatee->data_page[index] = val; ++ return 0; ++} ++ ++int i915_process_relocs(struct drm_file *file_priv, ++ uint32_t buf_handle, ++ uint32_t __user ** reloc_user_ptr, ++ struct i915_relocatee_info *relocatee, ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t num_buffers) ++{ ++ int ret, reloc_stride; ++ uint32_t cur_offset; ++ uint32_t reloc_count; ++ uint32_t reloc_type; ++ uint32_t reloc_buf_size; ++ uint32_t *reloc_buf = NULL; ++ int i; ++ ++ /* do a copy from user from the user ptr */ ++ ret = get_user(reloc_count, *reloc_user_ptr); ++ if (ret) { ++ DRM_ERROR("Could not map relocation buffer.\n"); ++ goto out; ++ } ++ ++ ret = get_user(reloc_type, (*reloc_user_ptr) + 1); ++ if (ret) { ++ DRM_ERROR("Could not map relocation buffer.\n"); ++ goto out; ++ } ++ ++ if (reloc_type != 0) { ++ DRM_ERROR("Unsupported relocation type requested\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ reloc_buf_size = ++ (I915_RELOC_HEADER + ++ (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t); ++ reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); ++ if (!reloc_buf) { ++ DRM_ERROR("Out of memory for reloc buffer\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ /* get next relocate buffer handle */ ++ *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2]; ++ ++ reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ ++ ++ DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, ++ *reloc_user_ptr); ++ ++ for (i = 0; i < reloc_count; i++) { ++ cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE); ++ ++ ret = i915_apply_reloc(file_priv, num_buffers, buffers, ++ relocatee, reloc_buf + cur_offset); ++ if (ret) ++ goto out; ++ } ++ ++ out: ++ if (reloc_buf) ++ kfree(reloc_buf); ++ ++ if (relocatee->data_page) { ++ drm_bo_kunmap(&relocatee->kmap); ++ relocatee->data_page = NULL; ++ } ++ ++ return ret; ++} ++ ++static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, ++ uint32_t __user * reloc_user_ptr, ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t buf_count) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct i915_relocatee_info relocatee; ++ int ret = 0; ++ int b; ++ ++ /* ++ * Short circuit relocations when all previous ++ * buffers offsets were correctly guessed by ++ * the client ++ */ ++ if (!DRM_DEBUG_RELOCATION) { ++ for (b = 0; b < buf_count; b++) ++ if (!buffers[b].presumed_offset_correct) ++ break; ++ ++ if (b == buf_count) ++ return 0; ++ } ++ ++ memset(&relocatee, 0, sizeof(relocatee)); ++ relocatee.idle = I915_RELOC_UNCHECKED; ++ ++ mutex_lock(&dev->struct_mutex); ++ relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ if (!relocatee.buf) { ++ DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); ++ ret = -EINVAL; ++ goto out_err; ++ } ++ ++ mutex_lock(&relocatee.buf->mutex); ++ while (reloc_user_ptr) { ++ ret = ++ i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, ++ &relocatee, buffers, buf_count); ++ if (ret) { ++ DRM_ERROR("process relocs failed\n"); ++ goto out_err1; ++ } ++ } ++ ++ out_err1: ++ mutex_unlock(&relocatee.buf->mutex); ++ drm_bo_usage_deref_unlocked(&relocatee.buf); ++ out_err: ++ return ret; ++} ++ ++static void i915_clear_relocatee(struct i915_relocatee_info *relocatee) ++{ ++ if (relocatee->data_page) { ++#ifndef DRM_KMAP_ATOMIC_PROT_PFN ++ drm_bo_kunmap(&relocatee->kmap); ++#else ++ kunmap_atomic(relocatee->data_page, KM_USER0); ++#endif ++ relocatee->data_page = NULL; ++ } ++ relocatee->buf = NULL; ++ relocatee->dst = ~0; ++} ++ ++static int i915_update_relocatee(struct i915_relocatee_info *relocatee, ++ struct drm_i915_validate_buffer *buffers, ++ unsigned int dst, unsigned long dst_offset) ++{ ++ int ret; ++ ++ if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) { ++ i915_clear_relocatee(relocatee); ++ relocatee->dst = dst; ++ relocatee->buf = buffers[dst].buffer; ++ relocatee->idle = buffers[dst].idle; ++ ++ /* ++ * Check for buffer idle. If the buffer is busy, revert to ++ * ring relocations. ++ */ ++ ++ if (relocatee->idle == I915_RELOC_UNCHECKED) { ++ preempt_enable(); ++ mutex_lock(&relocatee->buf->mutex); ++ ++ ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0); ++ if (ret == 0) ++ relocatee->idle = I915_RELOC_IDLE; ++ else { ++ relocatee->idle = I915_RELOC_BUSY; ++ relocatee->performed_ring_relocs = 1; ++ } ++ mutex_unlock(&relocatee->buf->mutex); ++ preempt_disable(); ++ buffers[dst].idle = relocatee->idle; ++ } ++ } ++ ++ if (relocatee->idle == I915_RELOC_BUSY) ++ return 0; ++ ++ if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) { ++ DRM_ERROR("Relocation destination out of bounds.\n"); ++ return -EINVAL; ++ } ++ if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) || ++ NULL == relocatee->data_page)) { ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ if (NULL != relocatee->data_page) { ++ kunmap_atomic(relocatee->data_page, KM_USER0); ++ relocatee->data_page = NULL; ++ } ++ ret = drm_bo_pfn_prot(relocatee->buf, dst_offset, ++ &relocatee->pfn, &relocatee->pg_prot); ++ if (ret) { ++ DRM_ERROR("Can't map relocation destination.\n"); ++ return -EINVAL; ++ } ++ relocatee->data_page = ++ kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0, ++ relocatee->pg_prot); ++#else ++ if (NULL != relocatee->data_page) { ++ drm_bo_kunmap(&relocatee->kmap); ++ relocatee->data_page = NULL; ++ } ++ ++ ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT, ++ 1, &relocatee->kmap); ++ if (ret) { ++ DRM_ERROR("Can't map relocation destination.\n"); ++ return ret; ++ } ++ ++ relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, ++ &relocatee->is_iomem); ++#endif ++ relocatee->page_offset = dst_offset & PAGE_MASK; ++ } ++ return 0; ++} ++ ++static int i915_apply_post_reloc(uint32_t reloc[], ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t num_buffers, ++ struct i915_relocatee_info *relocatee) ++{ ++ uint32_t reloc_buffer = reloc[2]; ++ uint32_t dst_buffer = reloc[3]; ++ uint32_t val; ++ uint32_t index; ++ int ret; ++ ++ if (likely(buffers[reloc_buffer].presumed_offset_correct)) ++ return 0; ++ if (unlikely(reloc_buffer >= num_buffers)) { ++ DRM_ERROR("Invalid reloc buffer index.\n"); ++ return -EINVAL; ++ } ++ if (unlikely(dst_buffer >= num_buffers)) { ++ DRM_ERROR("Invalid dest buffer index.\n"); ++ return -EINVAL; ++ } ++ ++ ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]); ++ if (unlikely(ret)) ++ return ret; ++ ++ val = buffers[reloc_buffer].buffer->offset; ++ index = (reloc[0] - relocatee->page_offset) >> 2; ++ val = val + reloc[1]; ++ ++ if (relocatee->idle == I915_RELOC_BUSY) { ++ i915_emit_ring_reloc(relocatee->buf->dev, ++ relocatee->buf->offset + reloc[0], val); ++ return 0; ++ } ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ relocatee->data_page[index] = val; ++#else ++ if (likely(relocatee->is_iomem)) ++ iowrite32(val, relocatee->data_page + index); ++ else ++ relocatee->data_page[index] = val; ++#endif ++ ++ return 0; ++} ++ ++static int i915_post_relocs(struct drm_file *file_priv, ++ uint32_t __user * new_reloc_ptr, ++ struct drm_i915_validate_buffer *buffers, ++ unsigned int num_buffers) ++{ ++ uint32_t *reloc; ++ uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); ++ uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t); ++ struct i915_relocatee_info relocatee; ++ uint32_t reloc_type; ++ uint32_t num_relocs; ++ uint32_t count; ++ int ret = 0; ++ int i; ++ int short_circuit = 1; ++ uint32_t __user *reloc_ptr; ++ uint64_t new_reloc_data; ++ uint32_t reloc_buf_size; ++ uint32_t *reloc_buf; ++ ++ for (i = 0; i < num_buffers; ++i) { ++ if (unlikely(!buffers[i].presumed_offset_correct)) { ++ short_circuit = 0; ++ break; ++ } ++ } ++ ++ if (likely(short_circuit)) ++ return 0; ++ ++ memset(&relocatee, 0, sizeof(relocatee)); ++ ++ while (new_reloc_ptr) { ++ reloc_ptr = new_reloc_ptr; ++ ++ ret = get_user(num_relocs, reloc_ptr); ++ if (unlikely(ret)) ++ goto out; ++ if (unlikely(!access_ok(VERIFY_READ, reloc_ptr, ++ header_size + ++ num_relocs * reloc_stride))) ++ return -EFAULT; ++ ++ ret = __get_user(reloc_type, reloc_ptr + 1); ++ if (unlikely(ret)) ++ goto out; ++ ++ if (unlikely(reloc_type != 1)) { ++ DRM_ERROR("Unsupported relocation type requested.\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ret = __get_user(new_reloc_data, reloc_ptr + 2); ++ new_reloc_ptr = (uint32_t __user *) (unsigned long) ++ new_reloc_data; ++ ++ reloc_ptr += I915_RELOC_HEADER; ++ ++ if (num_relocs == 0) ++ goto out; ++ ++ reloc_buf_size = ++ (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t); ++ reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); ++ if (!reloc_buf) { ++ DRM_ERROR("Out of memory for reloc buffer\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ reloc = reloc_buf; ++ preempt_disable(); ++ for (count = 0; count < num_relocs; ++count) { ++ ret = i915_apply_post_reloc(reloc, buffers, ++ num_buffers, &relocatee); ++ if (unlikely(ret)) { ++ preempt_enable(); ++ goto out; ++ } ++ reloc += I915_RELOC0_STRIDE; ++ } ++ preempt_enable(); ++ ++ if (reloc_buf) { ++ kfree(reloc_buf); ++ reloc_buf = NULL; ++ } ++ i915_clear_relocatee(&relocatee); ++ } ++ ++ out: ++ /* ++ * Flush ring relocs so the command parser will pick them up. ++ */ ++ ++ if (relocatee.performed_ring_relocs) ++ (void)i915_emit_mi_flush(file_priv->minor->dev, 0); ++ ++ i915_clear_relocatee(&relocatee); ++ if (reloc_buf) { ++ kfree(reloc_buf); ++ reloc_buf = NULL; ++ } ++ ++ return ret; ++} ++ ++static int i915_check_presumed(struct drm_i915_op_arg *arg, ++ struct drm_buffer_object *bo, ++ uint32_t __user * data, int *presumed_ok) ++{ ++ struct drm_bo_op_req *req = &arg->d.req; ++ uint32_t hint_offset; ++ uint32_t hint = req->bo_req.hint; ++ ++ *presumed_ok = 0; ++ ++ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET)) ++ return 0; ++ if (bo->offset == req->bo_req.presumed_offset) { ++ *presumed_ok = 1; ++ return 0; ++ } ++ ++ /* ++ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in ++ * the user-space IOCTL argument list, since the buffer has moved, ++ * we're about to apply relocations and we might subsequently ++ * hit an -EAGAIN. In that case the argument list will be reused by ++ * user-space, but the presumed offset is no longer valid. ++ * ++ * Needless to say, this is a bit ugly. ++ */ ++ ++ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg; ++ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET; ++ return __put_user(hint, data + hint_offset); ++} ++ ++/* ++ * Validate, add fence and relocate a block of bos from a userspace list ++ */ ++int i915_validate_buffer_list(struct drm_file *file_priv, ++ unsigned int fence_class, uint64_t data, ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t * num_buffers, ++ uint32_t __user ** post_relocs) ++{ ++ struct drm_i915_op_arg arg; ++ struct drm_bo_op_req *req = &arg.d.req; ++ int ret = 0; ++ unsigned buf_count = 0; ++ uint32_t buf_handle; ++ uint32_t __user *reloc_user_ptr; ++ struct drm_i915_validate_buffer *item = buffers; ++ *post_relocs = NULL; ++ ++ do { ++ if (buf_count >= *num_buffers) { ++ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers); ++ ret = -EINVAL; ++ goto out_err; ++ } ++ item = buffers + buf_count; ++ item->buffer = NULL; ++ item->presumed_offset_correct = 0; ++ item->idle = I915_RELOC_UNCHECKED; ++ ++ if (copy_from_user ++ (&arg, (void __user *)(unsigned long)data, sizeof(arg))) { ++ ret = -EFAULT; ++ goto out_err; ++ } ++ ++ ret = 0; ++ if (req->op != drm_bo_validate) { ++ DRM_ERROR ++ ("Buffer object operation wasn't \"validate\".\n"); ++ ret = -EINVAL; ++ goto out_err; ++ } ++ item->ret = 0; ++ item->data = (void __user *)(unsigned long)data; ++ ++ buf_handle = req->bo_req.handle; ++ reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr; ++ ++ /* ++ * Switch mode to post-validation relocations? ++ */ ++ ++ if (unlikely((buf_count == 0) && (*post_relocs == NULL) && ++ (reloc_user_ptr != NULL))) { ++ uint32_t reloc_type; ++ ++ ret = get_user(reloc_type, reloc_user_ptr + 1); ++ if (ret) ++ goto out_err; ++ ++ if (reloc_type == 1) ++ *post_relocs = reloc_user_ptr; ++ ++ } ++ ++ if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) { ++ ret = ++ i915_exec_reloc(file_priv, buf_handle, ++ reloc_user_ptr, buffers, buf_count); ++ if (ret) ++ goto out_err; ++ DRM_MEMORYBARRIER(); ++ } ++ ++ ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, ++ req->bo_req.flags, ++ req->bo_req.mask, req->bo_req.hint, ++ req->bo_req.fence_class, ++ NULL, &item->buffer); ++ if (ret) { ++ DRM_ERROR("error on handle validate %d\n", ret); ++ goto out_err; ++ } ++ ++ buf_count++; ++ ++ ret = i915_check_presumed(&arg, item->buffer, ++ (uint32_t __user *) ++ (unsigned long)data, ++ &item->presumed_offset_correct); ++ if (ret) ++ goto out_err; ++ ++ data = arg.next; ++ } while (data != 0); ++ out_err: ++ *num_buffers = buf_count; ++ item->ret = (ret != -EAGAIN) ? ret : 0; ++ return ret; ++} ++ ++/* ++ * Remove all buffers from the unfenced list. ++ * If the execbuffer operation was aborted, for example due to a signal, ++ * this also make sure that buffers retain their original state and ++ * fence pointers. ++ * Copy back buffer information to user-space unless we were interrupted ++ * by a signal. In which case the IOCTL must be rerun. ++ */ ++ ++static int i915_handle_copyback(struct drm_device *dev, ++ struct drm_i915_validate_buffer *buffers, ++ unsigned int num_buffers, int ret) ++{ ++ int err = ret; ++ int i; ++ struct drm_i915_op_arg arg; ++ struct drm_buffer_object *bo; ++ ++ if (ret) ++ drm_putback_buffer_objects(dev); ++ ++ if (ret != -EAGAIN) { ++ for (i = 0; i < num_buffers; ++i) { ++ arg.handled = 1; ++ arg.d.rep.ret = buffers->ret; ++ bo = buffers->buffer; ++ mutex_lock(&bo->mutex); ++ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info); ++ mutex_unlock(&bo->mutex); ++ if (__copy_to_user(buffers->data, &arg, sizeof(arg))) ++ err = -EFAULT; ++ buffers++; ++ } ++ } ++ ++ return err; ++} ++ ++/* ++ * Create a fence object, and if that fails, pretend that everything is ++ * OK and just idle the GPU. ++ */ ++ ++void i915_fence_or_sync(struct drm_file *file_priv, ++ uint32_t fence_flags, ++ struct drm_fence_arg *fence_arg, ++ struct drm_fence_object **fence_p) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ int ret; ++ struct drm_fence_object *fence; ++ ++ ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence); ++ ++ if (ret) { ++ ++ /* ++ * Fence creation failed. ++ * Fall back to synchronous operation and idle the engine. ++ */ ++ ++ (void)i915_emit_mi_flush(dev, MI_READ_FLUSH); ++ (void)i915_quiescent(dev); ++ ++ if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { ++ ++ /* ++ * Communicate to user-space that ++ * fence creation has failed and that ++ * the engine is idle. ++ */ ++ ++ fence_arg->handle = ~0; ++ fence_arg->error = ret; ++ } ++ drm_putback_buffer_objects(dev); ++ if (fence_p) ++ *fence_p = NULL; ++ return; ++ } ++ ++ if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { ++ ++ ret = drm_fence_add_user_object(file_priv, fence, ++ fence_flags & ++ DRM_FENCE_FLAG_SHAREABLE); ++ if (!ret) ++ drm_fence_fill_arg(fence, fence_arg); ++ else { ++ /* ++ * Fence user object creation failed. ++ * We must idle the engine here as well, as user- ++ * space expects a fence object to wait on. Since we ++ * have a fence object we wait for it to signal ++ * to indicate engine "sufficiently" idle. ++ */ ++ ++ (void)drm_fence_object_wait(fence, 0, 1, fence->type); ++ drm_fence_usage_deref_unlocked(&fence); ++ fence_arg->handle = ~0; ++ fence_arg->error = ret; ++ } ++ } ++ ++ if (fence_p) ++ *fence_p = fence; ++ else if (fence) ++ drm_fence_usage_deref_unlocked(&fence); ++} ++ ++int i915_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) ++ dev_priv->sarea_priv; ++ struct drm_i915_execbuffer *exec_buf = data; ++ struct drm_i915_batchbuffer *batch = &exec_buf->batch; ++ struct drm_fence_arg *fence_arg = &exec_buf->fence_arg; ++ int num_buffers; ++ int ret; ++ uint32_t __user *post_relocs; ++ ++ if (!dev_priv->allow_batchbuffer) { ++ DRM_ERROR("Batchbuffer ioctl disabled\n"); ++ return -EINVAL; ++ } ++ ++ if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, ++ batch->num_cliprects * ++ sizeof(struct ++ drm_clip_rect))) ++ return -EFAULT; ++ ++ if (exec_buf->num_buffers > dev_priv->max_validate_buffers) ++ return -EINVAL; ++ ++ ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (ret) ++ return ret; ++ ++ /* ++ * The cmdbuf_mutex makes sure the validate-submit-fence ++ * operation is atomic. ++ */ ++ ++ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); ++ if (ret) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return -EAGAIN; ++ } ++ ++ num_buffers = exec_buf->num_buffers; ++ ++ if (!dev_priv->val_bufs) { ++ dev_priv->val_bufs = ++ vmalloc(sizeof(struct drm_i915_validate_buffer) * ++ dev_priv->max_validate_buffers); ++ } ++ if (!dev_priv->val_bufs) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ mutex_unlock(&dev_priv->cmdbuf_mutex); ++ return -ENOMEM; ++ } ++ ++ /* validate buffer list + fixup relocations */ ++ ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, ++ dev_priv->val_bufs, &num_buffers, ++ &post_relocs); ++ if (ret) ++ goto out_err0; ++ ++ if (post_relocs) { ++ ret = i915_post_relocs(file_priv, post_relocs, ++ dev_priv->val_bufs, num_buffers); ++ if (ret) ++ goto out_err0; ++ } ++ ++ /* make sure all previous memory operations have passed */ ++ DRM_MEMORYBARRIER(); ++ ++ if (!post_relocs) { ++ drm_agp_chipset_flush(dev); ++ batch->start = ++ dev_priv->val_bufs[num_buffers - 1].buffer->offset; ++ } else { ++ batch->start += dev_priv->val_bufs[0].buffer->offset; ++ } ++ ++ DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n", ++ batch->start, batch->used, batch->num_cliprects); ++ ++ ret = i915_dispatch_batchbuffer(dev, batch); ++ if (ret) ++ goto out_err0; ++ if (sarea_priv) ++ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL); ++ ++ out_err0: ++ ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret); ++ mutex_lock(&dev->struct_mutex); ++ i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&dev_priv->cmdbuf_mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_fence.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_fence.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_fence.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_fence.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,273 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/* ++ * Initiate a sync flush if it's not already pending. ++ */ ++ ++static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv, ++ struct drm_fence_class_manager *fc) ++{ ++ if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) && ++ !dev_priv->flush_pending) { ++ dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv); ++ dev_priv->flush_flags = fc->pending_flush; ++ dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0); ++ I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); ++ dev_priv->flush_pending = 1; ++ fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW; ++ } ++} ++ ++static inline void i915_report_rwflush(struct drm_device *dev, ++ struct drm_i915_private *dev_priv) ++{ ++ if (unlikely(dev_priv->flush_pending)) { ++ ++ uint32_t flush_flags; ++ uint32_t i_status; ++ uint32_t flush_sequence; ++ ++ i_status = READ_HWSP(dev_priv, 0); ++ if ((i_status & (1 << 12)) != ++ (dev_priv->saved_flush_status & (1 << 12))) { ++ flush_flags = dev_priv->flush_flags; ++ flush_sequence = dev_priv->flush_sequence; ++ dev_priv->flush_pending = 0; ++ drm_fence_handler(dev, 0, flush_sequence, ++ flush_flags, 0); ++ } ++ } ++} ++ ++static void i915_fence_flush(struct drm_device *dev, ++ uint32_t fence_class) ++{ ++ struct drm_i915_private *dev_priv = ++ (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ unsigned long irq_flags; ++ ++ if (unlikely(!dev_priv)) ++ return; ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ i915_initiate_rwflush(dev_priv, fc); ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++} ++ ++ ++static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class, ++ uint32_t waiting_types) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ uint32_t sequence; ++ ++ if (unlikely(!dev_priv)) ++ return; ++ ++ /* ++ * First, report any executed sync flush: ++ */ ++ ++ i915_report_rwflush(dev, dev_priv); ++ ++ /* ++ * Report A new breadcrumb, and adjust IRQs. ++ */ ++ ++ if (waiting_types & DRM_FENCE_TYPE_EXE) { ++ ++ sequence = READ_BREADCRUMB(dev_priv); ++ drm_fence_handler(dev, 0, sequence, ++ DRM_FENCE_TYPE_EXE, 0); ++ ++ if (dev_priv->fence_irq_on && ++ !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) { ++ i915_user_irq_off(dev_priv); ++ dev_priv->fence_irq_on = 0; ++ } else if (!dev_priv->fence_irq_on && ++ (fc->waiting_types & DRM_FENCE_TYPE_EXE)) { ++ i915_user_irq_on(dev_priv); ++ dev_priv->fence_irq_on = 1; ++ } ++ } ++ ++ /* ++ * There may be new RW flushes pending. Start them. ++ */ ++ ++ i915_initiate_rwflush(dev_priv, fc); ++ ++ /* ++ * And possibly, but unlikely, they finish immediately. ++ */ ++ ++ i915_report_rwflush(dev, dev_priv); ++ ++} ++ ++static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, ++ uint32_t flags, uint32_t *sequence, ++ uint32_t *native_type) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ if (unlikely(!dev_priv)) ++ return -EINVAL; ++ ++ i915_emit_irq(dev); ++ *sequence = (uint32_t) dev_priv->counter; ++ *native_type = DRM_FENCE_TYPE_EXE; ++ if (flags & DRM_I915_FENCE_FLAG_FLUSHED) ++ *native_type |= DRM_I915_FENCE_TYPE_RW; ++ ++ return 0; ++} ++ ++void i915_fence_handler(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ ++ write_lock(&fm->lock); ++ if (likely(dev_priv->fence_irq_on)) ++ i915_fence_poll(dev, 0, fc->waiting_types); ++ write_unlock(&fm->lock); ++} ++ ++/* ++ * We need a separate wait function since we need to poll for ++ * sync flushes. ++ */ ++ ++static int i915_fence_wait(struct drm_fence_object *fence, ++ int lazy, int interruptible, uint32_t mask) ++{ ++ struct drm_device *dev = fence->dev; ++ drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ int ret; ++ unsigned long _end = jiffies + 3 * DRM_HZ; ++ ++ drm_fence_object_flush(fence, mask); ++ if (likely(interruptible)) ++ ret = wait_event_interruptible_timeout ++ (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), ++ 3 * DRM_HZ); ++ else ++ ret = wait_event_timeout ++ (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), ++ 3 * DRM_HZ); ++ ++ if (unlikely(ret == -ERESTARTSYS)) ++ return -EAGAIN; ++ ++ if (unlikely(ret == 0)) ++ return -EBUSY; ++ ++ if (likely(mask == DRM_FENCE_TYPE_EXE || ++ drm_fence_object_signaled(fence, mask))) ++ return 0; ++ ++ /* ++ * Remove this code snippet when fixed. HWSTAM doesn't let ++ * flush info through... ++ */ ++ ++ if (unlikely(dev_priv && !dev_priv->irq_enabled)) { ++ unsigned long irq_flags; ++ ++ DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n"); ++ msleep(100); ++ dev_priv->flush_pending = 0; ++ write_lock_irqsave(&fm->lock, irq_flags); ++ drm_fence_handler(dev, fence->fence_class, ++ fence->sequence, fence->type, 0); ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ } ++ ++ /* ++ * Poll for sync flush completion. ++ */ ++ ++ return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end); ++} ++ ++static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence) ++{ ++ uint32_t flush_flags = fence->waiting_types & ++ ~(DRM_FENCE_TYPE_EXE | fence->signaled_types); ++ ++ if (likely(flush_flags == 0 || ++ ((flush_flags & ~fence->native_types) == 0) || ++ (fence->signaled_types != DRM_FENCE_TYPE_EXE))) ++ return 0; ++ else { ++ struct drm_device *dev = fence->dev; ++ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ ++ if (unlikely(!dev_priv)) ++ return 0; ++ ++ if (dev_priv->flush_pending) { ++ uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & ++ driver->sequence_mask; ++ ++ if (diff < driver->wrap_diff) ++ return 0; ++ } ++ } ++ return flush_flags; ++} ++ ++struct drm_fence_driver i915_fence_driver = { ++ .num_classes = 1, ++ .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), ++ .flush_diff = (1U << (BREADCRUMB_BITS - 2)), ++ .sequence_mask = BREADCRUMB_MASK, ++ .has_irq = NULL, ++ .emit = i915_fence_emit_sequence, ++ .flush = i915_fence_flush, ++ .poll = i915_fence_poll, ++ .needed_flush = i915_fence_needed_flush, ++ .wait = i915_fence_wait, ++}; +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2502 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_compat.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++#include ++ ++static int ++i915_gem_object_set_domain(struct drm_gem_object *obj, ++ uint32_t read_domains, ++ uint32_t write_domain); ++static int ++i915_gem_object_set_domain_range(struct drm_gem_object *obj, ++ uint64_t offset, ++ uint64_t size, ++ uint32_t read_domains, ++ uint32_t write_domain); ++int ++i915_gem_set_domain(struct drm_gem_object *obj, ++ struct drm_file *file_priv, ++ uint32_t read_domains, ++ uint32_t write_domain); ++static int i915_gem_object_get_page_list(struct drm_gem_object *obj); ++static void i915_gem_object_free_page_list(struct drm_gem_object *obj); ++static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); ++ ++int ++i915_gem_init_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_init *args = data; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (args->gtt_start >= args->gtt_end || ++ (args->gtt_start & (PAGE_SIZE - 1)) != 0 || ++ (args->gtt_end & (PAGE_SIZE - 1)) != 0) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, ++ args->gtt_end - args->gtt_start); ++ ++ dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++ ++/** ++ * Creates a new mm object and returns a handle to it. ++ */ ++int ++i915_gem_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_create *args = data; ++ struct drm_gem_object *obj; ++ int handle, ret; ++ ++ args->size = roundup(args->size, PAGE_SIZE); ++ ++ /* Allocate the new object */ ++ obj = drm_gem_object_alloc(dev, args->size); ++ if (obj == NULL) ++ return -ENOMEM; ++ ++ ret = drm_gem_handle_create(file_priv, obj, &handle); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (ret) ++ return ret; ++ ++ args->handle = handle; ++ ++ return 0; ++} ++ ++/** ++ * Reads data from the object referenced by handle. ++ * ++ * On error, the contents of *data are undefined. ++ */ ++int ++i915_gem_pread_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pread *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ssize_t read; ++ loff_t offset; ++ int ret; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ obj_priv = obj->driver_private; ++ ++ /* Bounds check source. ++ * ++ * XXX: This could use review for overflow issues... ++ */ ++ if (args->offset > obj->size || args->size > obj->size || ++ args->offset + args->size > obj->size) { ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, ++ I915_GEM_DOMAIN_CPU, 0); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ offset = args->offset; ++ ++ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, ++ args->size, &offset); ++ if (read != args->size) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (read < 0) ++ return read; ++ else ++ return -EINVAL; ++ } ++ ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++#include "drm_compat.h" ++ ++static int ++i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, ++ struct drm_i915_gem_pwrite *args, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ssize_t remain; ++ loff_t offset; ++ char __user *user_data; ++ char *vaddr; ++ int i, o, l; ++ int ret = 0; ++ unsigned long pfn; ++ unsigned long unwritten; ++ ++ user_data = (char __user *) (uintptr_t) args->data_ptr; ++ remain = args->size; ++ if (!access_ok(VERIFY_READ, user_data, remain)) ++ return -EFAULT; ++ ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = i915_gem_object_pin(obj, 0); ++ if (ret) { ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ret = i915_gem_set_domain(obj, file_priv, ++ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); ++ if (ret) ++ goto fail; ++ ++ obj_priv = obj->driver_private; ++ offset = obj_priv->gtt_offset + args->offset; ++ obj_priv->dirty = 1; ++ ++ while (remain > 0) { ++ /* Operation in this page ++ * ++ * i = page number ++ * o = offset within page ++ * l = bytes to copy ++ */ ++ i = offset >> PAGE_SHIFT; ++ o = offset & (PAGE_SIZE-1); ++ l = remain; ++ if ((o + l) > PAGE_SIZE) ++ l = PAGE_SIZE - o; ++ ++ pfn = (dev->agp->base >> PAGE_SHIFT) + i; ++ ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ /* kmap_atomic can't map IO pages on non-HIGHMEM kernels ++ */ ++ vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0, ++ __pgprot(__PAGE_KERNEL)); ++#if WATCH_PWRITE ++ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", ++ i, o, l, pfn, vaddr); ++#endif ++ unwritten = __copy_from_user_inatomic_nocache(vaddr + o, ++ user_data, l); ++ kunmap_atomic(vaddr, KM_USER0); ++ ++ if (unwritten) ++#endif ++ { ++ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); ++#if WATCH_PWRITE ++ DRM_INFO("pwrite slow i %d o %d l %d " ++ "pfn %ld vaddr %p\n", ++ i, o, l, pfn, vaddr); ++#endif ++ if (vaddr == NULL) { ++ ret = -EFAULT; ++ goto fail; ++ } ++ unwritten = __copy_from_user(vaddr + o, user_data, l); ++#if WATCH_PWRITE ++ DRM_INFO("unwritten %ld\n", unwritten); ++#endif ++ iounmap(vaddr); ++ if (unwritten) { ++ ret = -EFAULT; ++ goto fail; ++ } ++ } ++ ++ remain -= l; ++ user_data += l; ++ offset += l; ++ } ++#if WATCH_PWRITE && 1 ++ i915_gem_clflush_object(obj); ++ i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); ++ i915_gem_clflush_object(obj); ++#endif ++ ++fail: ++ i915_gem_object_unpin(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++int ++i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, ++ struct drm_i915_gem_pwrite *args, ++ struct drm_file *file_priv) ++{ ++ int ret; ++ loff_t offset; ++ ssize_t written; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ ret = i915_gem_set_domain(obj, file_priv, ++ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); ++ if (ret) { ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ offset = args->offset; ++ ++ written = vfs_write(obj->filp, ++ (char __user *)(uintptr_t) args->data_ptr, ++ args->size, &offset); ++ if (written != args->size) { ++ mutex_unlock(&dev->struct_mutex); ++ if (written < 0) ++ return written; ++ else ++ return -EINVAL; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Writes data to the object referenced by handle. ++ * ++ * On error, the contents of the buffer that were to be modified are undefined. ++ */ ++int ++i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pwrite *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret = 0; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ obj_priv = obj->driver_private; ++ ++ /* Bounds check destination. ++ * ++ * XXX: This could use review for overflow issues... ++ */ ++ if (args->offset > obj->size || args->size > obj->size || ++ args->offset + args->size > obj->size) { ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ ++ /* We can only do the GTT pwrite on untiled buffers, as otherwise ++ * it would end up going through the fenced access, and we'll get ++ * different detiling behavior between reading and writing. ++ * pread/pwrite currently are reading and writing from the CPU ++ * perspective, requiring manual detiling by the client. ++ */ ++ if (obj_priv->tiling_mode == I915_TILING_NONE && ++ dev->gtt_total != 0) ++ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); ++ else ++ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); ++ ++#if WATCH_PWRITE ++ if (ret) ++ DRM_INFO("pwrite failed %d\n", ret); ++#endif ++ ++ drm_gem_object_unreference(obj); ++ ++ return ret; ++} ++ ++/** ++ * Called when user space prepares to use an object ++ */ ++int ++i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_set_domain *args = data; ++ struct drm_gem_object *obj; ++ int ret; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ ++ mutex_lock(&dev->struct_mutex); ++#if WATCH_BUF ++ DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", ++ obj, obj->size, args->read_domains, args->write_domain); ++#endif ++ ret = i915_gem_set_domain(obj, file_priv, ++ args->read_domains, args->write_domain); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when user space has done writes to this buffer ++ */ ++int ++i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_sw_finish *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret = 0; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ mutex_lock(&dev->struct_mutex); ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ ++#if WATCH_BUF ++ DRM_INFO("%s: sw_finish %d (%p %d)\n", ++ __func__, args->handle, obj, obj->size); ++#endif ++ obj_priv = obj->driver_private; ++ ++ /* Pinned buffers may be scanout, so flush the cache */ ++ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { ++ i915_gem_clflush_object(obj); ++ drm_agp_chipset_flush(dev); ++ } ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Maps the contents of an object, returning the address it is mapped ++ * into. ++ * ++ * While the mapping holds a reference on the contents of the object, it doesn't ++ * imply a ref on the object itself. ++ */ ++int ++i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_mmap *args = data; ++ struct drm_gem_object *obj; ++ loff_t offset; ++ unsigned long addr; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ ++ offset = args->offset; ++ ++ down_write(¤t->mm->mmap_sem); ++ addr = do_mmap(obj->filp, 0, args->size, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ args->offset); ++ up_write(¤t->mm->mmap_sem); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (IS_ERR((void *)addr)) ++ return addr; ++ ++ args->addr_ptr = (uint64_t) addr; ++ ++ return 0; ++} ++ ++static void ++i915_gem_object_free_page_list(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page_count = obj->size / PAGE_SIZE; ++ int i; ++ ++ if (obj_priv->page_list == NULL) ++ return; ++ ++ ++ for (i = 0; i < page_count; i++) ++ if (obj_priv->page_list[i] != NULL) { ++ if (obj_priv->dirty) ++ set_page_dirty(obj_priv->page_list[i]); ++ mark_page_accessed(obj_priv->page_list[i]); ++ page_cache_release(obj_priv->page_list[i]); ++ } ++ obj_priv->dirty = 0; ++ ++ drm_free(obj_priv->page_list, ++ page_count * sizeof(struct page *), ++ DRM_MEM_DRIVER); ++ obj_priv->page_list = NULL; ++} ++ ++static void ++i915_gem_object_move_to_active(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ /* Add a reference if we're newly entering the active list. */ ++ if (!obj_priv->active) { ++ drm_gem_object_reference(obj); ++ obj_priv->active = 1; ++ } ++ /* Move from whatever list we were on to the tail of execution. */ ++ list_move_tail(&obj_priv->list, ++ &dev_priv->mm.active_list); ++} ++ ++ ++static void ++i915_gem_object_move_to_inactive(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ if (obj_priv->pin_count != 0) ++ list_del_init(&obj_priv->list); ++ else ++ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); ++ ++ if (obj_priv->active) { ++ obj_priv->active = 0; ++ drm_gem_object_unreference(obj); ++ } ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++} ++ ++/** ++ * Creates a new sequence number, emitting a write of it to the status page ++ * plus an interrupt, which will trigger i915_user_interrupt_handler. ++ * ++ * Must be called with struct_lock held. ++ * ++ * Returned sequence numbers are nonzero on success. ++ */ ++static uint32_t ++i915_add_request(struct drm_device *dev, uint32_t flush_domains) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_request *request; ++ uint32_t seqno; ++ int was_empty; ++ RING_LOCALS; ++ ++ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); ++ if (request == NULL) ++ return 0; ++ ++ /* Grab the seqno we're going to make this request be, and bump the ++ * next (skipping 0 so it can be the reserved no-seqno value). ++ */ ++ seqno = dev_priv->mm.next_gem_seqno; ++ dev_priv->mm.next_gem_seqno++; ++ if (dev_priv->mm.next_gem_seqno == 0) ++ dev_priv->mm.next_gem_seqno++; ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_STORE_DWORD_INDEX); ++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); ++ OUT_RING(seqno); ++ ++ OUT_RING(MI_USER_INTERRUPT); ++ ADVANCE_LP_RING(); ++ ++ DRM_DEBUG("%d\n", seqno); ++ ++ request->seqno = seqno; ++ request->emitted_jiffies = jiffies; ++ request->flush_domains = flush_domains; ++ was_empty = list_empty(&dev_priv->mm.request_list); ++ list_add_tail(&request->list, &dev_priv->mm.request_list); ++ ++ if (was_empty) ++ schedule_delayed_work(&dev_priv->mm.retire_work, HZ); ++ return seqno; ++} ++ ++/** ++ * Command execution barrier ++ * ++ * Ensures that all commands in the ring are finished ++ * before signalling the CPU ++ */ ++uint32_t ++i915_retire_commands(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; ++ uint32_t flush_domains = 0; ++ RING_LOCALS; ++ ++ /* The sampler always gets flushed on i965 (sigh) */ ++ if (IS_I965G(dev)) ++ flush_domains |= I915_GEM_DOMAIN_SAMPLER; ++ BEGIN_LP_RING(2); ++ OUT_RING(cmd); ++ OUT_RING(0); /* noop */ ++ ADVANCE_LP_RING(); ++ return flush_domains; ++} ++ ++/** ++ * Moves buffers associated only with the given active seqno from the active ++ * to inactive list, potentially freeing them. ++ */ ++static void ++i915_gem_retire_request(struct drm_device *dev, ++ struct drm_i915_gem_request *request) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ if (request->flush_domains != 0) { ++ struct drm_i915_gem_object *obj_priv, *next; ++ ++ /* First clear any buffers that were only waiting for a flush ++ * matching the one just retired. ++ */ ++ ++ list_for_each_entry_safe(obj_priv, next, ++ &dev_priv->mm.flushing_list, list) { ++ struct drm_gem_object *obj = obj_priv->obj; ++ ++ if (obj->write_domain & request->flush_domains) { ++ obj->write_domain = 0; ++ i915_gem_object_move_to_inactive(obj); ++ } ++ } ++ ++ } ++ ++ /* Move any buffers on the active list that are no longer referenced ++ * by the ringbuffer to the flushing/inactive lists as appropriate. ++ */ ++ while (!list_empty(&dev_priv->mm.active_list)) { ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj_priv = list_first_entry(&dev_priv->mm.active_list, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ ++ /* If the seqno being retired doesn't match the oldest in the ++ * list, then the oldest in the list must still be newer than ++ * this seqno. ++ */ ++ if (obj_priv->last_rendering_seqno != request->seqno) ++ return; ++#if WATCH_LRU ++ DRM_INFO("%s: retire %d moves to inactive list %p\n", ++ __func__, request->seqno, obj); ++#endif ++ ++ /* If this request flushes the write domain, ++ * clear the write domain from the object now ++ */ ++ if (request->flush_domains & obj->write_domain) ++ obj->write_domain = 0; ++ ++ if (obj->write_domain != 0) { ++ list_move_tail(&obj_priv->list, ++ &dev_priv->mm.flushing_list); ++ } else { ++ i915_gem_object_move_to_inactive(obj); ++ } ++ } ++} ++ ++/** ++ * Returns true if seq1 is later than seq2. ++ */ ++static int ++i915_seqno_passed(uint32_t seq1, uint32_t seq2) ++{ ++ return (int32_t)(seq1 - seq2) >= 0; ++} ++ ++uint32_t ++i915_get_gem_seqno(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); ++} ++ ++/** ++ * This function clears the request list as sequence numbers are passed. ++ */ ++void ++i915_gem_retire_requests(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t seqno; ++ ++ seqno = i915_get_gem_seqno(dev); ++ ++ while (!list_empty(&dev_priv->mm.request_list)) { ++ struct drm_i915_gem_request *request; ++ uint32_t retiring_seqno; ++ ++ request = list_first_entry(&dev_priv->mm.request_list, ++ struct drm_i915_gem_request, ++ list); ++ retiring_seqno = request->seqno; ++ ++ if (i915_seqno_passed(seqno, retiring_seqno) || ++ dev_priv->mm.wedged) { ++ i915_gem_retire_request(dev, request); ++ ++ list_del(&request->list); ++ drm_free(request, sizeof(*request), DRM_MEM_DRIVER); ++ } else ++ break; ++ } ++} ++ ++void ++i915_gem_retire_work_handler(struct work_struct *work) ++{ ++ drm_i915_private_t *dev_priv; ++ struct drm_device *dev; ++ ++ dev_priv = container_of(work, drm_i915_private_t, ++ mm.retire_work.work); ++ dev = dev_priv->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ i915_gem_retire_requests(dev); ++ if (!list_empty(&dev_priv->mm.request_list)) ++ schedule_delayed_work(&dev_priv->mm.retire_work, HZ); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * Waits for a sequence number to be signaled, and cleans up the ++ * request and object lists appropriately for that event. ++ */ ++int ++i915_wait_request(struct drm_device *dev, uint32_t seqno) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret = 0; ++ ++ BUG_ON(seqno == 0); ++ ++ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { ++ dev_priv->mm.waiting_gem_seqno = seqno; ++ i915_user_irq_on(dev_priv); ++ ret = wait_event_interruptible(dev_priv->irq_queue, ++ i915_seqno_passed(i915_get_gem_seqno(dev), ++ seqno) || ++ dev_priv->mm.wedged); ++ i915_user_irq_off(dev_priv); ++ dev_priv->mm.waiting_gem_seqno = 0; ++ } ++ if (dev_priv->mm.wedged) ++ ret = -EIO; ++ ++ if (ret && ret != -ERESTARTSYS) ++ DRM_ERROR("%s returns %d (awaiting %d at %d)\n", ++ __func__, ret, seqno, i915_get_gem_seqno(dev)); ++ ++ /* Directly dispatch request retiring. While we have the work queue ++ * to handle this, the waiter on a request often wants an associated ++ * buffer to have made it to the inactive list, and we would need ++ * a separate wait queue to handle that. ++ */ ++ if (ret == 0) ++ i915_gem_retire_requests(dev); ++ ++ return ret; ++} ++ ++static void ++i915_gem_flush(struct drm_device *dev, ++ uint32_t invalidate_domains, ++ uint32_t flush_domains) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t cmd; ++ RING_LOCALS; ++ ++#if WATCH_EXEC ++ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, ++ invalidate_domains, flush_domains); ++#endif ++ ++ if (flush_domains & I915_GEM_DOMAIN_CPU) ++ drm_agp_chipset_flush(dev); ++ ++ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)) { ++ /* ++ * read/write caches: ++ * ++ * I915_GEM_DOMAIN_RENDER is always invalidated, but is ++ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is ++ * also flushed at 2d versus 3d pipeline switches. ++ * ++ * read-only caches: ++ * ++ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if ++ * MI_READ_FLUSH is set, and is always flushed on 965. ++ * ++ * I915_GEM_DOMAIN_COMMAND may not exist? ++ * ++ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is ++ * invalidated when MI_EXE_FLUSH is set. ++ * ++ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is ++ * invalidated with every MI_FLUSH. ++ * ++ * TLBs: ++ * ++ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND ++ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and ++ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER ++ * are flushed at any MI_FLUSH. ++ */ ++ ++ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; ++ if ((invalidate_domains|flush_domains) & ++ I915_GEM_DOMAIN_RENDER) ++ cmd &= ~MI_NO_WRITE_FLUSH; ++ if (!IS_I965G(dev)) { ++ /* ++ * On the 965, the sampler cache always gets flushed ++ * and this bit is reserved. ++ */ ++ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) ++ cmd |= MI_READ_FLUSH; ++ } ++ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) ++ cmd |= MI_EXE_FLUSH; ++ ++#if WATCH_EXEC ++ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); ++#endif ++ BEGIN_LP_RING(2); ++ OUT_RING(cmd); ++ OUT_RING(0); /* noop */ ++ ADVANCE_LP_RING(); ++ } ++} ++ ++/** ++ * Ensures that all rendering to the object has completed and the object is ++ * safe to unbind from the GTT or access from the CPU. ++ */ ++static int ++i915_gem_object_wait_rendering(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret; ++ uint32_t write_domain; ++ ++ /* If there are writes queued to the buffer, flush and ++ * create a new seqno to wait for. ++ */ ++ write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT); ++ if (write_domain) { ++#if WATCH_BUF ++ DRM_INFO("%s: flushing object %p from write domain %08x\n", ++ __func__, obj, write_domain); ++#endif ++ i915_gem_flush(dev, 0, write_domain); ++ ++ i915_gem_object_move_to_active(obj); ++ obj_priv->last_rendering_seqno = i915_add_request(dev, ++ write_domain); ++ BUG_ON(obj_priv->last_rendering_seqno == 0); ++#if WATCH_LRU ++ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); ++#endif ++ } ++ ++ /* If there is rendering queued on the buffer being evicted, wait for ++ * it. ++ */ ++ if (obj_priv->active) { ++#if WATCH_BUF ++ DRM_INFO("%s: object %p wait for seqno %08x\n", ++ __func__, obj, obj_priv->last_rendering_seqno); ++#endif ++ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); ++ if (ret != 0) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Unbinds an object from the GTT aperture. ++ */ ++static int ++i915_gem_object_unbind(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret = 0; ++ ++#if WATCH_BUF ++ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); ++ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); ++#endif ++ if (obj_priv->gtt_space == NULL) ++ return 0; ++ ++ if (obj_priv->pin_count != 0) { ++ DRM_ERROR("Attempting to unbind pinned buffer\n"); ++ return -EINVAL; ++ } ++ ++ /* Wait for any rendering to complete ++ */ ++ ret = i915_gem_object_wait_rendering(obj); ++ if (ret) { ++ DRM_ERROR("wait_rendering failed: %d\n", ret); ++ return ret; ++ } ++ ++ /* Move the object to the CPU domain to ensure that ++ * any possible CPU writes while it's not in the GTT ++ * are flushed when we go to remap it. This will ++ * also ensure that all pending GPU writes are finished ++ * before we unbind. ++ */ ++ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, ++ I915_GEM_DOMAIN_CPU); ++ if (ret) { ++ DRM_ERROR("set_domain failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (obj_priv->agp_mem != NULL) { ++ drm_unbind_agp(obj_priv->agp_mem); ++ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); ++ obj_priv->agp_mem = NULL; ++ } ++ ++ BUG_ON(obj_priv->active); ++ ++ i915_gem_object_free_page_list(obj); ++ ++ if (obj_priv->gtt_space) { ++ atomic_dec(&dev->gtt_count); ++ atomic_sub(obj->size, &dev->gtt_memory); ++ ++ drm_mm_put_block(obj_priv->gtt_space); ++ obj_priv->gtt_space = NULL; ++ } ++ ++ /* Remove ourselves from the LRU list if present. */ ++ if (!list_empty(&obj_priv->list)) ++ list_del_init(&obj_priv->list); ++ ++ return 0; ++} ++ ++static int ++i915_gem_evict_something(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret = 0; ++ ++ for (;;) { ++ /* If there's an inactive buffer available now, grab it ++ * and be done. ++ */ ++ if (!list_empty(&dev_priv->mm.inactive_list)) { ++ obj_priv = list_first_entry(&dev_priv->mm.inactive_list, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ BUG_ON(obj_priv->pin_count != 0); ++#if WATCH_LRU ++ DRM_INFO("%s: evicting %p\n", __func__, obj); ++#endif ++ BUG_ON(obj_priv->active); ++ ++ /* Wait on the rendering and unbind the buffer. */ ++ ret = i915_gem_object_unbind(obj); ++ break; ++ } ++ ++ /* If we didn't get anything, but the ring is still processing ++ * things, wait for one of those things to finish and hopefully ++ * leave us a buffer to evict. ++ */ ++ if (!list_empty(&dev_priv->mm.request_list)) { ++ struct drm_i915_gem_request *request; ++ ++ request = list_first_entry(&dev_priv->mm.request_list, ++ struct drm_i915_gem_request, ++ list); ++ ++ ret = i915_wait_request(dev, request->seqno); ++ if (ret) ++ break; ++ ++ /* if waiting caused an object to become inactive, ++ * then loop around and wait for it. Otherwise, we ++ * assume that waiting freed and unbound something, ++ * so there should now be some space in the GTT ++ */ ++ if (!list_empty(&dev_priv->mm.inactive_list)) ++ continue; ++ break; ++ } ++ ++ /* If we didn't have anything on the request list but there ++ * are buffers awaiting a flush, emit one and try again. ++ * When we wait on it, those buffers waiting for that flush ++ * will get moved to inactive. ++ */ ++ if (!list_empty(&dev_priv->mm.flushing_list)) { ++ obj_priv = list_first_entry(&dev_priv->mm.flushing_list, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ ++ i915_gem_flush(dev, ++ obj->write_domain, ++ obj->write_domain); ++ i915_add_request(dev, obj->write_domain); ++ ++ obj = NULL; ++ continue; ++ } ++ ++ DRM_ERROR("inactive empty %d request empty %d " ++ "flushing empty %d\n", ++ list_empty(&dev_priv->mm.inactive_list), ++ list_empty(&dev_priv->mm.request_list), ++ list_empty(&dev_priv->mm.flushing_list)); ++ /* If we didn't do any of the above, there's nothing to be done ++ * and we just can't fit it in. ++ */ ++ return -ENOMEM; ++ } ++ return ret; ++} ++ ++static int ++i915_gem_object_get_page_list(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page_count, i; ++ struct address_space *mapping; ++ struct inode *inode; ++ struct page *page; ++ int ret; ++ ++ if (obj_priv->page_list) ++ return 0; ++ ++ /* Get the list of pages out of our struct file. They'll be pinned ++ * at this point until we release them. ++ */ ++ page_count = obj->size / PAGE_SIZE; ++ BUG_ON(obj_priv->page_list != NULL); ++ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), ++ DRM_MEM_DRIVER); ++ if (obj_priv->page_list == NULL) { ++ DRM_ERROR("Faled to allocate page list\n"); ++ return -ENOMEM; ++ } ++ ++ inode = obj->filp->f_path.dentry->d_inode; ++ mapping = inode->i_mapping; ++ for (i = 0; i < page_count; i++) { ++ page = read_mapping_page(mapping, i, NULL); ++ if (IS_ERR(page)) { ++ ret = PTR_ERR(page); ++ DRM_ERROR("read_mapping_page failed: %d\n", ret); ++ i915_gem_object_free_page_list(obj); ++ return ret; ++ } ++ obj_priv->page_list[i] = page; ++ } ++ return 0; ++} ++ ++/** ++ * Finds free space in the GTT aperture and binds the object there. ++ */ ++static int ++i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_mm_node *free_space; ++ int page_count, ret; ++ ++ if (alignment == 0) ++ alignment = PAGE_SIZE; ++ if (alignment & (PAGE_SIZE - 1)) { ++ DRM_ERROR("Invalid object alignment requested %u\n", alignment); ++ return -EINVAL; ++ } ++ ++ search_free: ++ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, ++ obj->size, alignment, 0); ++ if (free_space != NULL) { ++ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, ++ alignment); ++ if (obj_priv->gtt_space != NULL) { ++ obj_priv->gtt_space->private = obj; ++ obj_priv->gtt_offset = obj_priv->gtt_space->start; ++ } ++ } ++ if (obj_priv->gtt_space == NULL) { ++ /* If the gtt is empty and we're still having trouble ++ * fitting our object in, we're out of memory. ++ */ ++#if WATCH_LRU ++ DRM_INFO("%s: GTT full, evicting something\n", __func__); ++#endif ++ if (list_empty(&dev_priv->mm.inactive_list) && ++ list_empty(&dev_priv->mm.flushing_list) && ++ list_empty(&dev_priv->mm.active_list)) { ++ DRM_ERROR("GTT full, but LRU list empty\n"); ++ return -ENOMEM; ++ } ++ ++ ret = i915_gem_evict_something(dev); ++ if (ret != 0) { ++ DRM_ERROR("Failed to evict a buffer %d\n", ret); ++ return ret; ++ } ++ goto search_free; ++ } ++ ++#if WATCH_BUF ++ DRM_INFO("Binding object of size %d at 0x%08x\n", ++ obj->size, obj_priv->gtt_offset); ++#endif ++ ret = i915_gem_object_get_page_list(obj); ++ if (ret) { ++ drm_mm_put_block(obj_priv->gtt_space); ++ obj_priv->gtt_space = NULL; ++ return ret; ++ } ++ ++ page_count = obj->size / PAGE_SIZE; ++ /* Create an AGP memory structure pointing at our pages, and bind it ++ * into the GTT. ++ */ ++ obj_priv->agp_mem = drm_agp_bind_pages(dev, ++ obj_priv->page_list, ++ page_count, ++ obj_priv->gtt_offset); ++ if (obj_priv->agp_mem == NULL) { ++ i915_gem_object_free_page_list(obj); ++ drm_mm_put_block(obj_priv->gtt_space); ++ obj_priv->gtt_space = NULL; ++ return -ENOMEM; ++ } ++ atomic_inc(&dev->gtt_count); ++ atomic_add(obj->size, &dev->gtt_memory); ++ ++ /* Assert that the object is not currently in any GPU domain. As it ++ * wasn't in the GTT, there shouldn't be any way it could have been in ++ * a GPU cache ++ */ ++ BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); ++ BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); ++ ++ return 0; ++} ++ ++void ++i915_gem_clflush_object(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ /* If we don't have a page list set up, then we're not pinned ++ * to GPU, and we can ignore the cache flush because it'll happen ++ * again at bind time. ++ */ ++ if (obj_priv->page_list == NULL) ++ return; ++ ++ drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE); ++} ++ ++/* ++ * Set the next domain for the specified object. This ++ * may not actually perform the necessary flushing/invaliding though, ++ * as that may want to be batched with other set_domain operations ++ * ++ * This is (we hope) the only really tricky part of gem. The goal ++ * is fairly simple -- track which caches hold bits of the object ++ * and make sure they remain coherent. A few concrete examples may ++ * help to explain how it works. For shorthand, we use the notation ++ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the ++ * a pair of read and write domain masks. ++ * ++ * Case 1: the batch buffer ++ * ++ * 1. Allocated ++ * 2. Written by CPU ++ * 3. Mapped to GTT ++ * 4. Read by GPU ++ * 5. Unmapped from GTT ++ * 6. Freed ++ * ++ * Let's take these a step at a time ++ * ++ * 1. Allocated ++ * Pages allocated from the kernel may still have ++ * cache contents, so we set them to (CPU, CPU) always. ++ * 2. Written by CPU (using pwrite) ++ * The pwrite function calls set_domain (CPU, CPU) and ++ * this function does nothing (as nothing changes) ++ * 3. Mapped by GTT ++ * This function asserts that the object is not ++ * currently in any GPU-based read or write domains ++ * 4. Read by GPU ++ * i915_gem_execbuffer calls set_domain (COMMAND, 0). ++ * As write_domain is zero, this function adds in the ++ * current read domains (CPU+COMMAND, 0). ++ * flush_domains is set to CPU. ++ * invalidate_domains is set to COMMAND ++ * clflush is run to get data out of the CPU caches ++ * then i915_dev_set_domain calls i915_gem_flush to ++ * emit an MI_FLUSH and drm_agp_chipset_flush ++ * 5. Unmapped from GTT ++ * i915_gem_object_unbind calls set_domain (CPU, CPU) ++ * flush_domains and invalidate_domains end up both zero ++ * so no flushing/invalidating happens ++ * 6. Freed ++ * yay, done ++ * ++ * Case 2: The shared render buffer ++ * ++ * 1. Allocated ++ * 2. Mapped to GTT ++ * 3. Read/written by GPU ++ * 4. set_domain to (CPU,CPU) ++ * 5. Read/written by CPU ++ * 6. Read/written by GPU ++ * ++ * 1. Allocated ++ * Same as last example, (CPU, CPU) ++ * 2. Mapped to GTT ++ * Nothing changes (assertions find that it is not in the GPU) ++ * 3. Read/written by GPU ++ * execbuffer calls set_domain (RENDER, RENDER) ++ * flush_domains gets CPU ++ * invalidate_domains gets GPU ++ * clflush (obj) ++ * MI_FLUSH and drm_agp_chipset_flush ++ * 4. set_domain (CPU, CPU) ++ * flush_domains gets GPU ++ * invalidate_domains gets CPU ++ * wait_rendering (obj) to make sure all drawing is complete. ++ * This will include an MI_FLUSH to get the data from GPU ++ * to memory ++ * clflush (obj) to invalidate the CPU cache ++ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) ++ * 5. Read/written by CPU ++ * cache lines are loaded and dirtied ++ * 6. Read written by GPU ++ * Same as last GPU access ++ * ++ * Case 3: The constant buffer ++ * ++ * 1. Allocated ++ * 2. Written by CPU ++ * 3. Read by GPU ++ * 4. Updated (written) by CPU again ++ * 5. Read by GPU ++ * ++ * 1. Allocated ++ * (CPU, CPU) ++ * 2. Written by CPU ++ * (CPU, CPU) ++ * 3. Read by GPU ++ * (CPU+RENDER, 0) ++ * flush_domains = CPU ++ * invalidate_domains = RENDER ++ * clflush (obj) ++ * MI_FLUSH ++ * drm_agp_chipset_flush ++ * 4. Updated (written) by CPU again ++ * (CPU, CPU) ++ * flush_domains = 0 (no previous write domain) ++ * invalidate_domains = 0 (no new read domains) ++ * 5. Read by GPU ++ * (CPU+RENDER, 0) ++ * flush_domains = CPU ++ * invalidate_domains = RENDER ++ * clflush (obj) ++ * MI_FLUSH ++ * drm_agp_chipset_flush ++ */ ++static int ++i915_gem_object_set_domain(struct drm_gem_object *obj, ++ uint32_t read_domains, ++ uint32_t write_domain) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ uint32_t invalidate_domains = 0; ++ uint32_t flush_domains = 0; ++ int ret; ++ ++#if WATCH_BUF ++ DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", ++ __func__, obj, ++ obj->read_domains, read_domains, ++ obj->write_domain, write_domain); ++#endif ++ /* ++ * If the object isn't moving to a new write domain, ++ * let the object stay in multiple read domains ++ */ ++ if (write_domain == 0) ++ read_domains |= obj->read_domains; ++ else ++ obj_priv->dirty = 1; ++ ++ /* ++ * Flush the current write domain if ++ * the new read domains don't match. Invalidate ++ * any read domains which differ from the old ++ * write domain ++ */ ++ if (obj->write_domain && obj->write_domain != read_domains) { ++ flush_domains |= obj->write_domain; ++ invalidate_domains |= read_domains & ~obj->write_domain; ++ } ++ /* ++ * Invalidate any read caches which may have ++ * stale data. That is, any new read domains. ++ */ ++ invalidate_domains |= read_domains & ~obj->read_domains; ++ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { ++#if WATCH_BUF ++ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", ++ __func__, flush_domains, invalidate_domains); ++#endif ++ /* ++ * If we're invaliding the CPU cache and flushing a GPU cache, ++ * then pause for rendering so that the GPU caches will be ++ * flushed before the cpu cache is invalidated ++ */ ++ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && ++ (flush_domains & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT))) { ++ ret = i915_gem_object_wait_rendering(obj); ++ if (ret) ++ return ret; ++ } ++ i915_gem_clflush_object(obj); ++ } ++ ++ if ((write_domain | flush_domains) != 0) ++ obj->write_domain = write_domain; ++ ++ /* If we're invalidating the CPU domain, clear the per-page CPU ++ * domain list as well. ++ */ ++ if (obj_priv->page_cpu_valid != NULL && ++ (obj->read_domains & I915_GEM_DOMAIN_CPU) && ++ ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) { ++ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); ++ } ++ obj->read_domains = read_domains; ++ ++ dev->invalidate_domains |= invalidate_domains; ++ dev->flush_domains |= flush_domains; ++#if WATCH_BUF ++ DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", ++ __func__, ++ obj->read_domains, obj->write_domain, ++ dev->invalidate_domains, dev->flush_domains); ++#endif ++ return 0; ++} ++ ++/** ++ * Set the read/write domain on a range of the object. ++ * ++ * Currently only implemented for CPU reads, otherwise drops to normal ++ * i915_gem_object_set_domain(). ++ */ ++static int ++i915_gem_object_set_domain_range(struct drm_gem_object *obj, ++ uint64_t offset, ++ uint64_t size, ++ uint32_t read_domains, ++ uint32_t write_domain) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret, i; ++ ++ if (obj->read_domains & I915_GEM_DOMAIN_CPU) ++ return 0; ++ ++ if (read_domains != I915_GEM_DOMAIN_CPU || ++ write_domain != 0) ++ return i915_gem_object_set_domain(obj, ++ read_domains, write_domain); ++ ++ /* Wait on any GPU rendering to the object to be flushed. */ ++ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { ++ ret = i915_gem_object_wait_rendering(obj); ++ if (ret) ++ return ret; ++ } ++ ++ if (obj_priv->page_cpu_valid == NULL) { ++ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, ++ DRM_MEM_DRIVER); ++ } ++ ++ /* Flush the cache on any pages that are still invalid from the CPU's ++ * perspective. ++ */ ++ for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) { ++ if (obj_priv->page_cpu_valid[i]) ++ continue; ++ ++ drm_ttm_cache_flush(obj_priv->page_list + i, 1); ++ ++ obj_priv->page_cpu_valid[i] = 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Once all of the objects have been set in the proper domain, ++ * perform the necessary flush and invalidate operations. ++ * ++ * Returns the write domains flushed, for use in flush tracking. ++ */ ++static uint32_t ++i915_gem_dev_set_domain(struct drm_device *dev) ++{ ++ uint32_t flush_domains = dev->flush_domains; ++ ++ /* ++ * Now that all the buffers are synced to the proper domains, ++ * flush and invalidate the collected domains ++ */ ++ if (dev->invalidate_domains | dev->flush_domains) { ++#if WATCH_EXEC ++ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", ++ __func__, ++ dev->invalidate_domains, ++ dev->flush_domains); ++#endif ++ i915_gem_flush(dev, ++ dev->invalidate_domains, ++ dev->flush_domains); ++ dev->invalidate_domains = 0; ++ dev->flush_domains = 0; ++ } ++ ++ return flush_domains; ++} ++ ++/** ++ * Pin an object to the GTT and evaluate the relocations landing in it. ++ */ ++static int ++i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, ++ struct drm_file *file_priv, ++ struct drm_i915_gem_exec_object *entry) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_relocation_entry reloc; ++ struct drm_i915_gem_relocation_entry __user *relocs; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int i, ret; ++ uint32_t last_reloc_offset = -1; ++ void *reloc_page = NULL; ++ ++ /* Choose the GTT offset for our buffer and put it there. */ ++ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); ++ if (ret) ++ return ret; ++ ++ entry->offset = obj_priv->gtt_offset; ++ ++ relocs = (struct drm_i915_gem_relocation_entry __user *) ++ (uintptr_t) entry->relocs_ptr; ++ /* Apply the relocations, using the GTT aperture to avoid cache ++ * flushing requirements. ++ */ ++ for (i = 0; i < entry->relocation_count; i++) { ++ struct drm_gem_object *target_obj; ++ struct drm_i915_gem_object *target_obj_priv; ++ uint32_t reloc_val, reloc_offset, *reloc_entry; ++ int ret; ++ ++ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); ++ if (ret != 0) { ++ i915_gem_object_unpin(obj); ++ return ret; ++ } ++ ++ target_obj = drm_gem_object_lookup(obj->dev, file_priv, ++ reloc.target_handle); ++ if (target_obj == NULL) { ++ i915_gem_object_unpin(obj); ++ return -EBADF; ++ } ++ target_obj_priv = target_obj->driver_private; ++ ++ /* The target buffer should have appeared before us in the ++ * exec_object list, so it should have a GTT space bound by now. ++ */ ++ if (target_obj_priv->gtt_space == NULL) { ++ DRM_ERROR("No GTT space found for object %d\n", ++ reloc.target_handle); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ ++ if (reloc.offset > obj->size - 4) { ++ DRM_ERROR("Relocation beyond object bounds: " ++ "obj %p target %d offset %d size %d.\n", ++ obj, reloc.target_handle, ++ (int) reloc.offset, (int) obj->size); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ if (reloc.offset & 3) { ++ DRM_ERROR("Relocation not 4-byte aligned: " ++ "obj %p target %d offset %d.\n", ++ obj, reloc.target_handle, ++ (int) reloc.offset); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ ++ if (reloc.write_domain && target_obj->pending_write_domain && ++ reloc.write_domain != target_obj->pending_write_domain) { ++ DRM_ERROR("Write domain conflict: " ++ "obj %p target %d offset %d " ++ "new %08x old %08x\n", ++ obj, reloc.target_handle, ++ (int) reloc.offset, ++ reloc.write_domain, ++ target_obj->pending_write_domain); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ ++#if WATCH_RELOC ++ DRM_INFO("%s: obj %p offset %08x target %d " ++ "read %08x write %08x gtt %08x " ++ "presumed %08x delta %08x\n", ++ __func__, ++ obj, ++ (int) reloc.offset, ++ (int) reloc.target_handle, ++ (int) reloc.read_domains, ++ (int) reloc.write_domain, ++ (int) target_obj_priv->gtt_offset, ++ (int) reloc.presumed_offset, ++ reloc.delta); ++#endif ++ ++ target_obj->pending_read_domains |= reloc.read_domains; ++ target_obj->pending_write_domain |= reloc.write_domain; ++ ++ /* If the relocation already has the right value in it, no ++ * more work needs to be done. ++ */ ++ if (target_obj_priv->gtt_offset == reloc.presumed_offset) { ++ drm_gem_object_unreference(target_obj); ++ continue; ++ } ++ ++ /* Now that we're going to actually write some data in, ++ * make sure that any rendering using this buffer's contents ++ * is completed. ++ */ ++ i915_gem_object_wait_rendering(obj); ++ ++ /* As we're writing through the gtt, flush ++ * any CPU writes before we write the relocations ++ */ ++ if (obj->write_domain & I915_GEM_DOMAIN_CPU) { ++ i915_gem_clflush_object(obj); ++ drm_agp_chipset_flush(dev); ++ obj->write_domain = 0; ++ } ++ ++ /* Map the page containing the relocation we're going to ++ * perform. ++ */ ++ reloc_offset = obj_priv->gtt_offset + reloc.offset; ++ if (reloc_page == NULL || ++ (last_reloc_offset & ~(PAGE_SIZE - 1)) != ++ (reloc_offset & ~(PAGE_SIZE - 1))) { ++ if (reloc_page != NULL) ++ iounmap(reloc_page); ++ ++ reloc_page = ioremap(dev->agp->base + ++ (reloc_offset & ~(PAGE_SIZE - 1)), ++ PAGE_SIZE); ++ last_reloc_offset = reloc_offset; ++ if (reloc_page == NULL) { ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -ENOMEM; ++ } ++ } ++ ++ reloc_entry = (uint32_t *)((char *)reloc_page + ++ (reloc_offset & (PAGE_SIZE - 1))); ++ reloc_val = target_obj_priv->gtt_offset + reloc.delta; ++ ++#if WATCH_BUF ++ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", ++ obj, (unsigned int) reloc.offset, ++ readl(reloc_entry), reloc_val); ++#endif ++ writel(reloc_val, reloc_entry); ++ ++ /* Write the updated presumed offset for this entry back out ++ * to the user. ++ */ ++ reloc.presumed_offset = target_obj_priv->gtt_offset; ++ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); ++ if (ret != 0) { ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return ret; ++ } ++ ++ drm_gem_object_unreference(target_obj); ++ } ++ ++ if (reloc_page != NULL) ++ iounmap(reloc_page); ++ ++#if WATCH_BUF ++ if (0) ++ i915_gem_dump_object(obj, 128, __func__, ~0); ++#endif ++ return 0; ++} ++ ++/** Dispatch a batchbuffer to the ring ++ */ ++static int ++i915_dispatch_gem_execbuffer(struct drm_device *dev, ++ struct drm_i915_gem_execbuffer *exec, ++ uint64_t exec_offset) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) ++ (uintptr_t) exec->cliprects_ptr; ++ int nbox = exec->num_cliprects; ++ int i = 0, count; ++ uint32_t exec_start, exec_len; ++ RING_LOCALS; ++ ++ exec_start = (uint32_t) exec_offset + exec->batch_start_offset; ++ exec_len = (uint32_t) exec->batch_len; ++ ++ if ((exec_start | exec_len) & 0x7) { ++ DRM_ERROR("alignment\n"); ++ return -EINVAL; ++ } ++ ++ if (!exec_start) ++ return -EINVAL; ++ ++ count = nbox ? nbox : 1; ++ ++ for (i = 0; i < count; i++) { ++ if (i < nbox) { ++ int ret = i915_emit_box(dev, boxes, i, ++ exec->DR1, exec->DR4); ++ if (ret) ++ return ret; ++ } ++ ++ if (IS_I830(dev) || IS_845G(dev)) { ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_BATCH_BUFFER); ++ OUT_RING(exec_start | MI_BATCH_NON_SECURE); ++ OUT_RING(exec_start + exec_len - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(2); ++ if (IS_I965G(dev)) { ++ OUT_RING(MI_BATCH_BUFFER_START | ++ (2 << 6) | ++ MI_BATCH_NON_SECURE_I965); ++ OUT_RING(exec_start); ++ } else { ++ OUT_RING(MI_BATCH_BUFFER_START | ++ (2 << 6)); ++ OUT_RING(exec_start | MI_BATCH_NON_SECURE); ++ } ++ ADVANCE_LP_RING(); ++ } ++ } ++ ++ /* XXX breadcrumb */ ++ return 0; ++} ++ ++/* Throttle our rendering by waiting until the ring has completed our requests ++ * emitted over 20 msec ago. ++ * ++ * This should get us reasonable parallelism between CPU and GPU but also ++ * relatively low latency when blocking on a particular request to finish. ++ */ ++static int ++i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; ++ int ret = 0; ++ uint32_t seqno; ++ ++ mutex_lock(&dev->struct_mutex); ++ seqno = i915_file_priv->mm.last_gem_throttle_seqno; ++ i915_file_priv->mm.last_gem_throttle_seqno = ++ i915_file_priv->mm.last_gem_seqno; ++ if (seqno) ++ ret = i915_wait_request(dev, seqno); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int ++i915_gem_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; ++ struct drm_i915_gem_execbuffer *args = data; ++ struct drm_i915_gem_exec_object *exec_list = NULL; ++ struct drm_gem_object **object_list = NULL; ++ struct drm_gem_object *batch_obj; ++ int ret, i, pinned = 0; ++ uint64_t exec_offset; ++ uint32_t seqno, flush_domains; ++ ++#if WATCH_EXEC ++ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", ++ (int) args->buffers_ptr, args->buffer_count, args->batch_len); ++#endif ++ ++ /* Copy in the exec list from userland */ ++ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, ++ DRM_MEM_DRIVER); ++ object_list = drm_calloc(sizeof(*object_list), args->buffer_count, ++ DRM_MEM_DRIVER); ++ if (exec_list == NULL || object_list == NULL) { ++ DRM_ERROR("Failed to allocate exec or object list " ++ "for %d buffers\n", ++ args->buffer_count); ++ ret = -ENOMEM; ++ goto pre_mutex_err; ++ } ++ ret = copy_from_user(exec_list, ++ (struct drm_i915_relocation_entry __user *) ++ (uintptr_t) args->buffers_ptr, ++ sizeof(*exec_list) * args->buffer_count); ++ if (ret != 0) { ++ DRM_ERROR("copy %d exec entries failed %d\n", ++ args->buffer_count, ret); ++ goto pre_mutex_err; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ if (dev_priv->mm.wedged) { ++ DRM_ERROR("Execbuf while wedged\n"); ++ mutex_unlock(&dev->struct_mutex); ++ return -EIO; ++ } ++ ++ if (dev_priv->mm.suspended) { ++ DRM_ERROR("Execbuf while VT-switched.\n"); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBUSY; ++ } ++ ++ /* Zero the gloabl flush/invalidate flags. These ++ * will be modified as each object is bound to the ++ * gtt ++ */ ++ dev->invalidate_domains = 0; ++ dev->flush_domains = 0; ++ ++ /* Look up object handles and perform the relocations */ ++ for (i = 0; i < args->buffer_count; i++) { ++ object_list[i] = drm_gem_object_lookup(dev, file_priv, ++ exec_list[i].handle); ++ if (object_list[i] == NULL) { ++ DRM_ERROR("Invalid object handle %d at index %d\n", ++ exec_list[i].handle, i); ++ ret = -EBADF; ++ goto err; ++ } ++ ++ object_list[i]->pending_read_domains = 0; ++ object_list[i]->pending_write_domain = 0; ++ ret = i915_gem_object_pin_and_relocate(object_list[i], ++ file_priv, ++ &exec_list[i]); ++ if (ret) { ++ DRM_ERROR("object bind and relocate failed %d\n", ret); ++ goto err; ++ } ++ pinned = i + 1; ++ } ++ ++ /* Set the pending read domains for the batch buffer to COMMAND */ ++ batch_obj = object_list[args->buffer_count-1]; ++ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; ++ batch_obj->pending_write_domain = 0; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ for (i = 0; i < args->buffer_count; i++) { ++ struct drm_gem_object *obj = object_list[i]; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ if (obj_priv->gtt_space == NULL) { ++ /* We evicted the buffer in the process of validating ++ * our set of buffers in. We could try to recover by ++ * kicking them everything out and trying again from ++ * the start. ++ */ ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ /* make sure all previous memory operations have passed */ ++ ret = i915_gem_object_set_domain(obj, ++ obj->pending_read_domains, ++ obj->pending_write_domain); ++ if (ret) ++ goto err; ++ } ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ /* Flush/invalidate caches and chipset buffer */ ++ flush_domains = i915_gem_dev_set_domain(dev); ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++#if WATCH_COHERENCY ++ for (i = 0; i < args->buffer_count; i++) { ++ i915_gem_object_check_coherency(object_list[i], ++ exec_list[i].handle); ++ } ++#endif ++ ++ exec_offset = exec_list[args->buffer_count - 1].offset; ++ ++#if WATCH_EXEC ++ i915_gem_dump_object(object_list[args->buffer_count - 1], ++ args->batch_len, ++ __func__, ++ ~0); ++#endif ++ ++ /* Exec the batchbuffer */ ++ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); ++ if (ret) { ++ DRM_ERROR("dispatch failed %d\n", ret); ++ goto err; ++ } ++ ++ /* ++ * Ensure that the commands in the batch buffer are ++ * finished before the interrupt fires ++ */ ++ flush_domains |= i915_retire_commands(dev); ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ /* ++ * Get a seqno representing the execution of the current buffer, ++ * which we can wait on. We would like to mitigate these interrupts, ++ * likely by only creating seqnos occasionally (so that we have ++ * *some* interrupts representing completion of buffers that we can ++ * wait on when trying to clear up gtt space). ++ */ ++ seqno = i915_add_request(dev, flush_domains); ++ BUG_ON(seqno == 0); ++ i915_file_priv->mm.last_gem_seqno = seqno; ++ for (i = 0; i < args->buffer_count; i++) { ++ struct drm_gem_object *obj = object_list[i]; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ i915_gem_object_move_to_active(obj); ++ obj_priv->last_rendering_seqno = seqno; ++#if WATCH_LRU ++ DRM_INFO("%s: move to exec list %p\n", __func__, obj); ++#endif ++ } ++#if WATCH_LRU ++ i915_dump_lru(dev, __func__); ++#endif ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ /* Copy the new buffer offsets back to the user's exec list. */ ++ ret = copy_to_user((struct drm_i915_relocation_entry __user *) ++ (uintptr_t) args->buffers_ptr, ++ exec_list, ++ sizeof(*exec_list) * args->buffer_count); ++ if (ret) ++ DRM_ERROR("failed to copy %d exec entries " ++ "back to user (%d)\n", ++ args->buffer_count, ret); ++err: ++ if (object_list != NULL) { ++ for (i = 0; i < pinned; i++) ++ i915_gem_object_unpin(object_list[i]); ++ ++ for (i = 0; i < args->buffer_count; i++) ++ drm_gem_object_unreference(object_list[i]); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++pre_mutex_err: ++ drm_free(object_list, sizeof(*object_list) * args->buffer_count, ++ DRM_MEM_DRIVER); ++ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, ++ DRM_MEM_DRIVER); ++ ++ return ret; ++} ++ ++int ++i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ if (obj_priv->gtt_space == NULL) { ++ ret = i915_gem_object_bind_to_gtt(obj, alignment); ++ if (ret != 0) { ++ DRM_ERROR("Failure to bind: %d", ret); ++ return ret; ++ } ++ } ++ obj_priv->pin_count++; ++ ++ /* If the object is not active and not pending a flush, ++ * remove it from the inactive list ++ */ ++ if (obj_priv->pin_count == 1) { ++ atomic_inc(&dev->pin_count); ++ atomic_add(obj->size, &dev->pin_memory); ++ if (!obj_priv->active && ++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)) == 0 && ++ !list_empty(&obj_priv->list)) ++ list_del_init(&obj_priv->list); ++ } ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ return 0; ++} ++ ++void ++i915_gem_object_unpin(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ obj_priv->pin_count--; ++ BUG_ON(obj_priv->pin_count < 0); ++ BUG_ON(obj_priv->gtt_space == NULL); ++ ++ /* If the object is no longer pinned, and is ++ * neither active nor being flushed, then stick it on ++ * the inactive list ++ */ ++ if (obj_priv->pin_count == 0) { ++ if (!obj_priv->active && ++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)) == 0) ++ list_move_tail(&obj_priv->list, ++ &dev_priv->mm.inactive_list); ++ atomic_dec(&dev->pin_count); ++ atomic_sub(obj->size, &dev->pin_memory); ++ } ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++} ++ ++int ++i915_gem_pin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pin *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", ++ args->handle); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ obj_priv = obj->driver_private; ++ ++ ret = i915_gem_object_pin(obj, args->alignment); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ /* XXX - flush the CPU caches for pinned objects ++ * as the X server doesn't manage domains yet ++ */ ++ if (obj->write_domain & I915_GEM_DOMAIN_CPU) { ++ i915_gem_clflush_object(obj); ++ drm_agp_chipset_flush(dev); ++ obj->write_domain = 0; ++ } ++ args->offset = obj_priv->gtt_offset; ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++int ++i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pin *args = data; ++ struct drm_gem_object *obj; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", ++ args->handle); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ ++ i915_gem_object_unpin(obj); ++ ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} ++ ++int ++i915_gem_busy_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_busy *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ mutex_lock(&dev->struct_mutex); ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", ++ args->handle); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ ++ obj_priv = obj->driver_private; ++ args->busy = obj_priv->active; ++ ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} ++ ++int ++i915_gem_throttle_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return i915_gem_ring_throttle(dev, file_priv); ++} ++ ++int i915_gem_init_object(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); ++ if (obj_priv == NULL) ++ return -ENOMEM; ++ ++ /* ++ * We've just allocated pages from the kernel, ++ * so they've just been written by the CPU with ++ * zeros. They'll need to be clflushed before we ++ * use them with the GPU. ++ */ ++ obj->write_domain = I915_GEM_DOMAIN_CPU; ++ obj->read_domains = I915_GEM_DOMAIN_CPU; ++ ++ obj->driver_private = obj_priv; ++ obj_priv->obj = obj; ++ INIT_LIST_HEAD(&obj_priv->list); ++ return 0; ++} ++ ++void i915_gem_free_object(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ while (obj_priv->pin_count > 0) ++ i915_gem_object_unpin(obj); ++ ++ i915_gem_object_unbind(obj); ++ ++ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); ++ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); ++} ++ ++int ++i915_gem_set_domain(struct drm_gem_object *obj, ++ struct drm_file *file_priv, ++ uint32_t read_domains, ++ uint32_t write_domain) ++{ ++ struct drm_device *dev = obj->dev; ++ int ret; ++ uint32_t flush_domains; ++ ++ BUG_ON(!mutex_is_locked(&dev->struct_mutex)); ++ ++ ret = i915_gem_object_set_domain(obj, read_domains, write_domain); ++ if (ret) ++ return ret; ++ flush_domains = i915_gem_dev_set_domain(obj->dev); ++ ++ if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) ++ (void) i915_add_request(dev, flush_domains); ++ ++ return 0; ++} ++ ++/** Unbinds all objects that are on the given buffer list. */ ++static int ++i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) ++{ ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ while (!list_empty(head)) { ++ obj_priv = list_first_entry(head, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ ++ if (obj_priv->pin_count != 0) { ++ DRM_ERROR("Pinned object in unbind list\n"); ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ ret = i915_gem_object_unbind(obj); ++ if (ret != 0) { ++ DRM_ERROR("Error unbinding object in LeaveVT: %d\n", ++ ret); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ } ++ ++ ++ return 0; ++} ++ ++static int ++i915_gem_idle(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t seqno, cur_seqno, last_seqno; ++ int stuck; ++ ++ if (dev_priv->mm.suspended) ++ return 0; ++ ++ /* Hack! Don't let anybody do execbuf while we don't control the chip. ++ * We need to replace this with a semaphore, or something. ++ */ ++ dev_priv->mm.suspended = 1; ++ ++ i915_kernel_lost_context(dev); ++ ++ /* Flush the GPU along with all non-CPU write domains ++ */ ++ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), ++ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); ++ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)); ++ ++ if (seqno == 0) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ dev_priv->mm.waiting_gem_seqno = seqno; ++ last_seqno = 0; ++ stuck = 0; ++ for (;;) { ++ cur_seqno = i915_get_gem_seqno(dev); ++ if (i915_seqno_passed(cur_seqno, seqno)) ++ break; ++ if (last_seqno == cur_seqno) { ++ if (stuck++ > 100) { ++ DRM_ERROR("hardware wedged\n"); ++ dev_priv->mm.wedged = 1; ++ DRM_WAKEUP(&dev_priv->irq_queue); ++ break; ++ } ++ } ++ msleep(10); ++ last_seqno = cur_seqno; ++ } ++ dev_priv->mm.waiting_gem_seqno = 0; ++ ++ i915_gem_retire_requests(dev); ++ ++ /* Active and flushing should now be empty as we've ++ * waited for a sequence higher than any pending execbuffer ++ */ ++ BUG_ON(!list_empty(&dev_priv->mm.active_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); ++ ++ /* Request should now be empty as we've also waited ++ * for the last request in the list ++ */ ++ BUG_ON(!list_empty(&dev_priv->mm.request_list)); ++ ++ /* Move all buffers out of the GTT. */ ++ i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); ++ ++ BUG_ON(!list_empty(&dev_priv->mm.active_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.request_list)); ++ return 0; ++} ++ ++static int ++i915_gem_init_hws(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ /* If we need a physical address for the status page, it's already ++ * initialized at driver load time. ++ */ ++ if (!I915_NEED_GFX_HWS(dev)) ++ return 0; ++ ++ obj = drm_gem_object_alloc(dev, 4096); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate status page\n"); ++ return -ENOMEM; ++ } ++ obj_priv = obj->driver_private; ++ ++ ret = i915_gem_object_pin(obj, 4096); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ return ret; ++ } ++ ++ dev_priv->status_gfx_addr = obj_priv->gtt_offset; ++ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset; ++ dev_priv->hws_map.size = 4096; ++ dev_priv->hws_map.type = 0; ++ dev_priv->hws_map.flags = 0; ++ dev_priv->hws_map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->hws_map, dev); ++ if (dev_priv->hws_map.handle == NULL) { ++ DRM_ERROR("Failed to map status page.\n"); ++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ dev_priv->hws_obj = obj; ++ dev_priv->hw_status_page = dev_priv->hws_map.handle; ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); ++ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); ++ ++ return 0; ++} ++ ++static int ++i915_gem_init_ringbuffer(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ ret = i915_gem_init_hws(dev); ++ if (ret != 0) ++ return ret; ++ ++ obj = drm_gem_object_alloc(dev, 128 * 1024); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate ringbuffer\n"); ++ return -ENOMEM; ++ } ++ obj_priv = obj->driver_private; ++ ++ ret = i915_gem_object_pin(obj, 4096); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ return ret; ++ } ++ ++ /* Set up the kernel mapping for the ring. */ ++ dev_priv->ring.Size = obj->size; ++ dev_priv->ring.tail_mask = obj->size - 1; ++ ++ dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; ++ dev_priv->ring.map.size = obj->size; ++ dev_priv->ring.map.type = 0; ++ dev_priv->ring.map.flags = 0; ++ dev_priv->ring.map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->ring.map, dev); ++ if (dev_priv->ring.map.handle == NULL) { ++ DRM_ERROR("Failed to map ringbuffer.\n"); ++ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ dev_priv->ring.ring_obj = obj; ++ dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ ++ /* Stop the ring if it's running. */ ++ I915_WRITE(PRB0_CTL, 0); ++ I915_WRITE(PRB0_HEAD, 0); ++ I915_WRITE(PRB0_TAIL, 0); ++ I915_WRITE(PRB0_START, 0); ++ ++ /* Initialize the ring. */ ++ I915_WRITE(PRB0_START, obj_priv->gtt_offset); ++ I915_WRITE(PRB0_CTL, ++ ((obj->size - 4096) & RING_NR_PAGES) | ++ RING_NO_REPORT | ++ RING_VALID); ++ ++ /* Update our cache of the ring state */ ++ i915_kernel_lost_context(dev); ++ ++ return 0; ++} ++ ++static void ++i915_gem_cleanup_ringbuffer(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->ring.ring_obj == NULL) ++ return; ++ ++ drm_core_ioremapfree(&dev_priv->ring.map, dev); ++ ++ i915_gem_object_unpin(dev_priv->ring.ring_obj); ++ drm_gem_object_unreference(dev_priv->ring.ring_obj); ++ dev_priv->ring.ring_obj = NULL; ++ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); ++ ++ if (dev_priv->hws_obj != NULL) { ++ i915_gem_object_unpin(dev_priv->hws_obj); ++ drm_gem_object_unreference(dev_priv->hws_obj); ++ dev_priv->hws_obj = NULL; ++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); ++ ++ /* Write high address into HWS_PGA when disabling. */ ++ I915_WRITE(HWS_PGA, 0x1ffff000); ++ } ++} ++ ++int ++i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (dev_priv->mm.wedged) { ++ DRM_ERROR("Reenabling wedged hardware, good luck\n"); ++ dev_priv->mm.wedged = 0; ++ } ++ ++ ret = i915_gem_init_ringbuffer(dev); ++ if (ret != 0) ++ return ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ BUG_ON(!list_empty(&dev_priv->mm.active_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.request_list)); ++ dev_priv->mm.suspended = 0; ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} ++ ++int ++i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = i915_gem_idle(dev); ++ if (ret == 0) ++ i915_gem_cleanup_ringbuffer(dev); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++void ++i915_gem_lastclose(struct drm_device *dev) ++{ ++ int ret; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (dev_priv->ring.ring_obj != NULL) { ++ ret = i915_gem_idle(dev); ++ if (ret) ++ DRM_ERROR("failed to idle hardware: %d\n", ret); ++ ++ i915_gem_cleanup_ringbuffer(dev); ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++void i915_gem_load(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ INIT_LIST_HEAD(&dev_priv->mm.active_list); ++ INIT_LIST_HEAD(&dev_priv->mm.flushing_list); ++ INIT_LIST_HEAD(&dev_priv->mm.inactive_list); ++ INIT_LIST_HEAD(&dev_priv->mm.request_list); ++ INIT_DELAYED_WORK(&dev_priv->mm.retire_work, ++ i915_gem_retire_work_handler); ++ dev_priv->mm.next_gem_seqno = 1; ++ ++ i915_gem_detect_bit_6_swizzle(dev); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem_debug.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem_debug.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem_debug.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem_debug.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,202 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Packard ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_compat.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#if WATCH_INACTIVE ++void ++i915_verify_inactive(struct drm_device *dev, char *file, int line) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { ++ obj = obj_priv->obj; ++ if (obj_priv->pin_count || obj_priv->active || ++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT))) ++ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", ++ obj, ++ obj_priv->pin_count, obj_priv->active, ++ obj->write_domain, file, line); ++ } ++} ++#endif /* WATCH_INACTIVE */ ++ ++ ++#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE ++static void ++i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, ++ uint32_t bias, uint32_t mark) ++{ ++ uint32_t *mem = kmap_atomic(page, KM_USER0); ++ int i; ++ for (i = start; i < end; i += 4) ++ DRM_INFO("%08x: %08x%s\n", ++ (int) (bias + i), mem[i / 4], ++ (bias + i == mark) ? " ********" : ""); ++ kunmap_atomic(mem, KM_USER0); ++ /* give syslog time to catch up */ ++ msleep(1); ++} ++ ++void ++i915_gem_dump_object(struct drm_gem_object *obj, int len, ++ const char *where, uint32_t mark) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page; ++ ++ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); ++ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { ++ int page_len, chunk, chunk_len; ++ ++ page_len = len - page * PAGE_SIZE; ++ if (page_len > PAGE_SIZE) ++ page_len = PAGE_SIZE; ++ ++ for (chunk = 0; chunk < page_len; chunk += 128) { ++ chunk_len = page_len - chunk; ++ if (chunk_len > 128) ++ chunk_len = 128; ++ i915_gem_dump_page(obj_priv->page_list[page], ++ chunk, chunk + chunk_len, ++ obj_priv->gtt_offset + ++ page * PAGE_SIZE, ++ mark); ++ } ++ } ++} ++#endif ++ ++#if WATCH_LRU ++void ++i915_dump_lru(struct drm_device *dev, const char *where) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ ++ DRM_INFO("active list %s {\n", where); ++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, ++ list) ++ { ++ DRM_INFO(" %p: %08x\n", obj_priv, ++ obj_priv->last_rendering_seqno); ++ } ++ DRM_INFO("}\n"); ++ DRM_INFO("flushing list %s {\n", where); ++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, ++ list) ++ { ++ DRM_INFO(" %p: %08x\n", obj_priv, ++ obj_priv->last_rendering_seqno); ++ } ++ DRM_INFO("}\n"); ++ DRM_INFO("inactive %s {\n", where); ++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { ++ DRM_INFO(" %p: %08x\n", obj_priv, ++ obj_priv->last_rendering_seqno); ++ } ++ DRM_INFO("}\n"); ++} ++#endif ++ ++ ++#if WATCH_COHERENCY ++void ++i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page; ++ uint32_t *gtt_mapping; ++ uint32_t *backing_map = NULL; ++ int bad_count = 0; ++ ++ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", ++ __func__, obj, obj_priv->gtt_offset, handle, ++ obj->size / 1024); ++ ++ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, ++ obj->size); ++ if (gtt_mapping == NULL) { ++ DRM_ERROR("failed to map GTT space\n"); ++ return; ++ } ++ ++ for (page = 0; page < obj->size / PAGE_SIZE; page++) { ++ int i; ++ ++ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); ++ ++ if (backing_map == NULL) { ++ DRM_ERROR("failed to map backing page\n"); ++ goto out; ++ } ++ ++ for (i = 0; i < PAGE_SIZE / 4; i++) { ++ uint32_t cpuval = backing_map[i]; ++ uint32_t gttval = readl(gtt_mapping + ++ page * 1024 + i); ++ ++ if (cpuval != gttval) { ++ DRM_INFO("incoherent CPU vs GPU at 0x%08x: " ++ "0x%08x vs 0x%08x\n", ++ (int)(obj_priv->gtt_offset + ++ page * PAGE_SIZE + i * 4), ++ cpuval, gttval); ++ if (bad_count++ >= 8) { ++ DRM_INFO("...\n"); ++ goto out; ++ } ++ } ++ } ++ kunmap_atomic(backing_map, KM_USER0); ++ backing_map = NULL; ++ } ++ ++ out: ++ if (backing_map != NULL) ++ kunmap_atomic(backing_map, KM_USER0); ++ iounmap(gtt_mapping); ++ ++ /* give syslog time to catch up */ ++ msleep(1); ++ ++ /* Directly flush the object, since we just loaded values with the CPU ++ * from the backing pages and we don't want to disturb the cache ++ * management that we're trying to observe. ++ */ ++ ++ i915_gem_clflush_object(obj); ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem_proc.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem_proc.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem_proc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem_proc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,293 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * Keith Packard ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_compat.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++static int i915_gem_active_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Active:\n"); ++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, ++ list) ++ { ++ struct drm_gem_object *obj = obj_priv->obj; ++ if (obj->name) { ++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", ++ obj, obj->name, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } else { ++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", ++ obj, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_flushing_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Flushing:\n"); ++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, ++ list) ++ { ++ struct drm_gem_object *obj = obj_priv->obj; ++ if (obj->name) { ++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", ++ obj, obj->name, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } else { ++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_inactive_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Inactive:\n"); ++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, ++ list) ++ { ++ struct drm_gem_object *obj = obj_priv->obj; ++ if (obj->name) { ++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", ++ obj, obj->name, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } else { ++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_request_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_request *gem_request; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Request:\n"); ++ list_for_each_entry(gem_request, &dev_priv->mm.request_list, ++ list) ++ { ++ DRM_PROC_PRINT(" %d @ %d %08x\n", ++ gem_request->seqno, ++ (int) (jiffies - gem_request->emitted_jiffies), ++ gem_request->flush_domains); ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_seqno_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); ++ DRM_PROC_PRINT("Waiter sequence: %d\n", ++ dev_priv->mm.waiting_gem_seqno); ++ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++ ++static int i915_interrupt_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Interrupt enable: %08x\n", ++ I915_READ(IER)); ++ DRM_PROC_PRINT("Interrupt identity: %08x\n", ++ I915_READ(IIR)); ++ DRM_PROC_PRINT("Interrupt mask: %08x\n", ++ I915_READ(IMR)); ++ DRM_PROC_PRINT("Pipe A stat: %08x\n", ++ I915_READ(PIPEASTAT)); ++ DRM_PROC_PRINT("Pipe B stat: %08x\n", ++ I915_READ(PIPEBSTAT)); ++ DRM_PROC_PRINT("Interrupts received: %d\n", ++ atomic_read(&dev_priv->irq_received)); ++ DRM_PROC_PRINT("Current sequence: %d\n", ++ i915_get_gem_seqno(dev)); ++ DRM_PROC_PRINT("Waiter sequence: %d\n", ++ dev_priv->mm.waiting_gem_seqno); ++ DRM_PROC_PRINT("IRQ sequence: %d\n", ++ dev_priv->mm.irq_gem_seqno); ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static struct drm_proc_list { ++ /** file name */ ++ const char *name; ++ /** proc callback*/ ++ int (*f) (char *, char **, off_t, int, int *, void *); ++} i915_gem_proc_list[] = { ++ {"i915_gem_active", i915_gem_active_info}, ++ {"i915_gem_flushing", i915_gem_flushing_info}, ++ {"i915_gem_inactive", i915_gem_inactive_info}, ++ {"i915_gem_request", i915_gem_request_info}, ++ {"i915_gem_seqno", i915_gem_seqno_info}, ++ {"i915_gem_interrupt", i915_interrupt_info}, ++}; ++ ++#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) ++ ++int i915_gem_proc_init(struct drm_minor *minor) ++{ ++ struct proc_dir_entry *ent; ++ int i, j; ++ ++ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { ++ ent = create_proc_entry(i915_gem_proc_list[i].name, ++ S_IFREG | S_IRUGO, minor->dev_root); ++ if (!ent) { ++ DRM_ERROR("Cannot create /proc/dri/.../%s\n", ++ i915_gem_proc_list[i].name); ++ for (j = 0; j < i; j++) ++ remove_proc_entry(i915_gem_proc_list[i].name, ++ minor->dev_root); ++ return -1; ++ } ++ ent->read_proc = i915_gem_proc_list[i].f; ++ ent->data = minor; ++ } ++ return 0; ++} ++ ++void i915_gem_proc_cleanup(struct drm_minor *minor) ++{ ++ int i; ++ ++ if (!minor->dev_root) ++ return; ++ ++ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) ++ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem_tiling.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem_tiling.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_gem_tiling.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_gem_tiling.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,309 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/** @file i915_gem_tiling.c ++ * ++ * Support for managing tiling state of buffer objects. ++ * ++ * The idea behind tiling is to increase cache hit rates by rearranging ++ * pixel data so that a group of pixel accesses are in the same cacheline. ++ * Performance improvement from doing this on the back/depth buffer are on ++ * the order of 30%. ++ * ++ * Intel architectures make this somewhat more complicated, though, by ++ * adjustments made to addressing of data when the memory is in interleaved ++ * mode (matched pairs of DIMMS) to improve memory bandwidth. ++ * For interleaved memory, the CPU sends every sequential 64 bytes ++ * to an alternate memory channel so it can get the bandwidth from both. ++ * ++ * The GPU also rearranges its accesses for increased bandwidth to interleaved ++ * memory, and it matches what the CPU does for non-tiled. However, when tiled ++ * it does it a little differently, since one walks addresses not just in the ++ * X direction but also Y. So, along with alternating channels when bit ++ * 6 of the address flips, it also alternates when other bits flip -- Bits 9 ++ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) ++ * are common to both the 915 and 965-class hardware. ++ * ++ * The CPU also sometimes XORs in higher bits as well, to improve ++ * bandwidth doing strided access like we do so frequently in graphics. This ++ * is called "Channel XOR Randomization" in the MCH documentation. The result ++ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address ++ * decode. ++ * ++ * All of this bit 6 XORing has an effect on our memory management, ++ * as we need to make sure that the 3d driver can correctly address object ++ * contents. ++ * ++ * If we don't have interleaved memory, all tiling is safe and no swizzling is ++ * required. ++ * ++ * When bit 17 is XORed in, we simply refuse to tile at all. Bit ++ * 17 is not just a page offset, so as we page an objet out and back in, ++ * individual pages in it will have different bit 17 addresses, resulting in ++ * each 64 bytes being swapped with its neighbor! ++ * ++ * Otherwise, if interleaved, we have to tell the 3d driver what the address ++ * swizzling it needs to do is, since it's writing with the CPU to the pages ++ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the ++ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling ++ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order ++ * to match what the GPU expects. ++ */ ++ ++/** ++ * Detects bit 6 swizzling of address lookup between IGD access and CPU ++ * access through main memory. ++ */ ++void ++i915_gem_detect_bit_6_swizzle(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct pci_dev *bridge; ++ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; ++ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; ++ int mchbar_offset; ++ char __iomem *mchbar; ++ int ret; ++ ++ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); ++ if (bridge == NULL) { ++ DRM_ERROR("Couldn't get bridge device\n"); ++ return; ++ } ++ ++ ret = pci_enable_device(bridge); ++ if (ret != 0) { ++ DRM_ERROR("pci_enable_device failed: %d\n", ret); ++ return; ++ } ++ ++ if (IS_I965G(dev)) ++ mchbar_offset = 0x48; ++ else ++ mchbar_offset = 0x44; ++ ++ /* Use resource 2 for our BAR that's stashed in a nonstandard location, ++ * since the bridge would only ever use standard BARs 0-1 (though it ++ * doesn't anyway) ++ */ ++ ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]); ++ if (ret != 0) { ++ DRM_ERROR("pci_read_base failed: %d\n", ret); ++ return; ++ } ++ ++ mchbar = ioremap(pci_resource_start(bridge, 2), ++ pci_resource_len(bridge, 2)); ++ if (mchbar == NULL) { ++ DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n"); ++ return; ++ } ++ ++ if (IS_I965G(dev) && !IS_I965GM(dev)) { ++ uint32_t chdecmisc; ++ ++ /* On the 965, channel interleave appears to be determined by ++ * the flex bit. If flex is set, then the ranks (sides of a ++ * DIMM) of memory will be "stacked" (physical addresses walk ++ * through one rank then move on to the next, flipping channels ++ * or not depending on rank configuration). The GPU in this ++ * case does exactly the same addressing as the CPU. ++ * ++ * Unlike the 945, channel randomization based does not ++ * appear to be available. ++ * ++ * XXX: While the G965 doesn't appear to do any interleaving ++ * when the DIMMs are not exactly matched, the G4x chipsets ++ * might be for "L-shaped" configurations, and will need to be ++ * detected. ++ * ++ * L-shaped configuration: ++ * ++ * +-----+ ++ * | | ++ * |DIMM2| <-- non-interleaved ++ * +-----+ ++ * +-----+ +-----+ ++ * | | | | ++ * |DIMM0| |DIMM1| <-- interleaved area ++ * +-----+ +-----+ ++ */ ++ chdecmisc = readb(mchbar + CHDECMISC); ++ ++ if (chdecmisc == 0xff) { ++ DRM_ERROR("Couldn't read from MCHBAR. " ++ "Disabling tiling.\n"); ++ } else if (chdecmisc & CHDECMISC_FLEXMEMORY) { ++ swizzle_x = I915_BIT_6_SWIZZLE_NONE; ++ swizzle_y = I915_BIT_6_SWIZZLE_NONE; ++ } else { ++ swizzle_x = I915_BIT_6_SWIZZLE_9_10; ++ swizzle_y = I915_BIT_6_SWIZZLE_9; ++ } ++ } else if (IS_I9XX(dev)) { ++ uint32_t dcc; ++ ++ /* On 915-945 and GM965, channel interleave by the CPU is ++ * determined by DCC. The CPU will alternate based on bit 6 ++ * in interleaved mode, and the GPU will then also alternate ++ * on bit 6, 9, and 10 for X, but the CPU may also optionally ++ * alternate based on bit 17 (XOR not disabled and XOR ++ * bit == 17). ++ */ ++ dcc = readl(mchbar + DCC); ++ switch (dcc & DCC_ADDRESSING_MODE_MASK) { ++ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: ++ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: ++ swizzle_x = I915_BIT_6_SWIZZLE_NONE; ++ swizzle_y = I915_BIT_6_SWIZZLE_NONE; ++ break; ++ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: ++ if (IS_I915G(dev) || IS_I915GM(dev) || ++ dcc & DCC_CHANNEL_XOR_DISABLE) { ++ swizzle_x = I915_BIT_6_SWIZZLE_9_10; ++ swizzle_y = I915_BIT_6_SWIZZLE_9; ++ } else if (IS_I965GM(dev)) { ++ /* GM965 only does bit 11-based channel ++ * randomization ++ */ ++ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; ++ swizzle_y = I915_BIT_6_SWIZZLE_9_11; ++ } else { ++ /* Bit 17 or perhaps other swizzling */ ++ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; ++ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; ++ } ++ break; ++ } ++ if (dcc == 0xffffffff) { ++ DRM_ERROR("Couldn't read from MCHBAR. " ++ "Disabling tiling.\n"); ++ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; ++ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; ++ } ++ } else { ++ /* As far as we know, the 865 doesn't have these bit 6 ++ * swizzling issues. ++ */ ++ swizzle_x = I915_BIT_6_SWIZZLE_NONE; ++ swizzle_y = I915_BIT_6_SWIZZLE_NONE; ++ } ++ ++ iounmap(mchbar); ++ ++ dev_priv->mm.bit_6_swizzle_x = swizzle_x; ++ dev_priv->mm.bit_6_swizzle_y = swizzle_y; ++} ++ ++/** ++ * Sets the tiling mode of an object, returning the required swizzling of ++ * bit 6 of addresses in the object. ++ */ ++int ++i915_gem_set_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_set_tiling *args = data; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ obj_priv = obj->driver_private; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (args->tiling_mode == I915_TILING_NONE) { ++ obj_priv->tiling_mode = I915_TILING_NONE; ++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; ++ } else { ++ if (args->tiling_mode == I915_TILING_X) ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; ++ else ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; ++ /* If we can't handle the swizzling, make it untiled. */ ++ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { ++ args->tiling_mode = I915_TILING_NONE; ++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; ++ } ++ } ++ obj_priv->tiling_mode = args->tiling_mode; ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ drm_gem_object_unreference(obj); ++ ++ return 0; ++} ++ ++/** ++ * Returns the current tiling mode and required bit 6 swizzling for the object. ++ */ ++int ++i915_gem_get_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_get_tiling *args = data; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ obj_priv = obj->driver_private; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ args->tiling_mode = obj_priv->tiling_mode; ++ switch (obj_priv->tiling_mode) { ++ case I915_TILING_X: ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; ++ break; ++ case I915_TILING_Y: ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; ++ break; ++ case I915_TILING_NONE: ++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; ++ break; ++ default: ++ DRM_ERROR("unknown tiling mode\n"); ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ drm_gem_object_unreference(obj); ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_ioc32.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_ioc32.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_ioc32.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,284 @@ ++/** ++ * \file i915_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the i915 DRM. ++ * ++ * \author Alan Hourihane ++ * ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Alan Hourihane 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++typedef struct _drm_i915_batchbuffer32 { ++ int start; /* agp offset */ ++ int used; /* nr bytes in use */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ u32 cliprects; /* pointer to userspace cliprects */ ++} drm_i915_batchbuffer32_t; ++ ++static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_batchbuffer32_t batchbuffer32; ++ drm_i915_batchbuffer_t __user *batchbuffer; ++ ++ if (copy_from_user ++ (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32))) ++ return -EFAULT; ++ ++ batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer)); ++ if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer)) ++ || __put_user(batchbuffer32.start, &batchbuffer->start) ++ || __put_user(batchbuffer32.used, &batchbuffer->used) ++ || __put_user(batchbuffer32.DR1, &batchbuffer->DR1) ++ || __put_user(batchbuffer32.DR4, &batchbuffer->DR4) ++ || __put_user(batchbuffer32.num_cliprects, ++ &batchbuffer->num_cliprects) ++ || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects, ++ &batchbuffer->cliprects)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_BATCHBUFFER, ++ (unsigned long) batchbuffer); ++} ++ ++typedef struct _drm_i915_cmdbuffer32 { ++ u32 buf; /* pointer to userspace command buffer */ ++ int sz; /* nr bytes in buf */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ u32 cliprects; /* pointer to userspace cliprects */ ++} drm_i915_cmdbuffer32_t; ++ ++static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_cmdbuffer32_t cmdbuffer32; ++ drm_i915_cmdbuffer_t __user *cmdbuffer; ++ ++ if (copy_from_user ++ (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32))) ++ return -EFAULT; ++ ++ cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer)); ++ if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer)) ++ || __put_user((int __user *)(unsigned long)cmdbuffer32.buf, ++ &cmdbuffer->buf) ++ || __put_user(cmdbuffer32.sz, &cmdbuffer->sz) ++ || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1) ++ || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4) ++ || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects) ++ || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects, ++ &cmdbuffer->cliprects)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer); ++} ++ ++typedef struct drm_i915_irq_emit32 { ++ u32 irq_seq; ++} drm_i915_irq_emit32_t; ++ ++static int compat_i915_irq_emit(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_irq_emit32_t req32; ++ drm_i915_irq_emit_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user((int __user *)(unsigned long)req32.irq_seq, ++ &request->irq_seq)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request); ++} ++typedef struct drm_i915_getparam32 { ++ int param; ++ u32 value; ++} drm_i915_getparam32_t; ++ ++static int compat_i915_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_getparam32_t req32; ++ drm_i915_getparam_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.param, &request->param) ++ || __put_user((void __user *)(unsigned long)req32.value, ++ &request->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_GETPARAM, (unsigned long) request); ++} ++ ++typedef struct drm_i915_mem_alloc32 { ++ int region; ++ int alignment; ++ int size; ++ u32 region_offset; /* offset from start of fb or agp */ ++} drm_i915_mem_alloc32_t; ++ ++static int compat_i915_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_mem_alloc32_t req32; ++ drm_i915_mem_alloc_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.region, &request->region) ++ || __put_user(req32.alignment, &request->alignment) ++ || __put_user(req32.size, &request->size) ++ || __put_user((void __user *)(unsigned long)req32.region_offset, ++ &request->region_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_ALLOC, (unsigned long) request); ++} ++ ++typedef struct drm_i915_execbuffer32 { ++ uint64_t ops_list; ++ uint32_t num_buffers; ++ struct _drm_i915_batchbuffer32 batch; ++ drm_context_t context; ++ struct drm_fence_arg fence_arg; ++} drm_i915_execbuffer32_t; ++ ++static int compat_i915_execbuffer(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_execbuffer32_t req32; ++ struct drm_i915_execbuffer __user *request; ++ int err; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.ops_list, &request->ops_list) ++ || __put_user(req32.num_buffers, &request->num_buffers) ++ || __put_user(req32.context, &request->context) ++ || __copy_to_user(&request->fence_arg, &req32.fence_arg, ++ sizeof(req32.fence_arg)) ++ || __put_user(req32.batch.start, &request->batch.start) ++ || __put_user(req32.batch.used, &request->batch.used) ++ || __put_user(req32.batch.DR1, &request->batch.DR1) ++ || __put_user(req32.batch.DR4, &request->batch.DR4) ++ || __put_user(req32.batch.num_cliprects, ++ &request->batch.num_cliprects) ++ || __put_user((int __user *)(unsigned long)req32.batch.cliprects, ++ &request->batch.cliprects)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request); ++ ++ if (err) ++ return err; ++ ++ if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle) ++ || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class) ++ || __get_user(req32.fence_arg.type, &request->fence_arg.type) ++ || __get_user(req32.fence_arg.flags, &request->fence_arg.flags) ++ || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled) ++ || __get_user(req32.fence_arg.error, &request->fence_arg.error) ++ || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence)) ++ return -EFAULT; ++ ++ if (copy_to_user((void __user *)arg, &req32, sizeof(req32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++ ++drm_ioctl_compat_t *i915_compat_ioctls[] = { ++ [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, ++ [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, ++ [DRM_I915_GETPARAM] = compat_i915_getparam, ++ [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, ++ [DRM_I915_ALLOC] = compat_i915_alloc, ++#ifdef I915_HAVE_BUFFER ++ [DRM_I915_EXECBUFFER] = compat_i915_execbuffer, ++#endif ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) ++ fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_irq.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_irq.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1005 @@ ++/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- ++ */ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#define MAX_NOPID ((u32)~0) ++ ++/* ++ * These are the interrupts used by the driver ++ */ ++#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ ++ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ ++ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) ++ ++static inline void ++i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask) ++{ ++ if ((dev_priv->irq_mask_reg & mask) != 0) { ++ dev_priv->irq_mask_reg &= ~mask; ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ (void) I915_READ(IMR); ++ } ++} ++ ++static inline void ++i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask) ++{ ++ if ((dev_priv->irq_mask_reg & mask) != mask) { ++ dev_priv->irq_mask_reg |= mask; ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ (void) I915_READ(IMR); ++ } ++} ++ ++/** ++ * i915_get_pipe - return the the pipe associated with a given plane ++ * @dev: DRM device ++ * @plane: plane to look for ++ * ++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number ++ * rather than a pipe number, since they may not always be equal. This routine ++ * maps the given @plane back to a pipe number. ++ */ ++static int ++i915_get_pipe(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ u32 dspcntr; ++ ++ dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR); ++ ++ return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0; ++} ++ ++/** ++ * i915_get_plane - return the the plane associated with a given pipe ++ * @dev: DRM device ++ * @pipe: pipe to look for ++ * ++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number ++ * rather than a plane number, since they may not always be equal. This routine ++ * maps the given @pipe back to a plane number. ++ */ ++static int ++i915_get_plane(struct drm_device *dev, int pipe) ++{ ++ if (i915_get_pipe(dev, 0) == pipe) ++ return 0; ++ return 1; ++} ++ ++/** ++ * i915_pipe_enabled - check if a pipe is enabled ++ * @dev: DRM device ++ * @pipe: pipe to check ++ * ++ * Reading certain registers when the pipe is disabled can hang the chip. ++ * Use this routine to make sure the PLL is running and the pipe is active ++ * before reading such registers if unsure. ++ */ ++static int ++i915_pipe_enabled(struct drm_device *dev, int pipe) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; ++ ++ if (I915_READ(pipeconf) & PIPEACONF_ENABLE) ++ return 1; ++ ++ return 0; ++} ++ ++/** ++ * Emit a synchronous flip. ++ * ++ * This function must be called with the drawable spinlock held. ++ */ ++static void ++i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, ++ int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u16 x1, y1, x2, y2; ++ int pf_planes = 1 << plane; ++ ++ DRM_SPINLOCK_ASSERT(&dev->drw_lock); ++ ++ /* If the window is visible on the other plane, we have to flip on that ++ * plane as well. ++ */ ++ if (plane == 1) { ++ x1 = sarea_priv->planeA_x; ++ y1 = sarea_priv->planeA_y; ++ x2 = x1 + sarea_priv->planeA_w; ++ y2 = y1 + sarea_priv->planeA_h; ++ } else { ++ x1 = sarea_priv->planeB_x; ++ y1 = sarea_priv->planeB_y; ++ x2 = x1 + sarea_priv->planeB_w; ++ y2 = y1 + sarea_priv->planeB_h; ++ } ++ ++ if (x2 > 0 && y2 > 0) { ++ int i, num_rects = drw->num_rects; ++ struct drm_clip_rect *rect = drw->rects; ++ ++ for (i = 0; i < num_rects; i++) ++ if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 || ++ rect[i].x2 <= x1 || rect[i].y2 <= y1)) { ++ pf_planes = 0x3; ++ ++ break; ++ } ++ } ++ ++ i915_dispatch_flip(dev, pf_planes, 1); ++} ++ ++/** ++ * Emit blits for scheduled buffer swaps. ++ * ++ * This function will be called with the HW lock held. ++ */ ++static void i915_vblank_tasklet(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct list_head *list, *tmp, hits, *hit; ++ int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages; ++ unsigned counter[2]; ++ struct drm_drawable_info *drw; ++ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 cpp = dev_priv->cpp, offsets[3]; ++ u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | ++ XY_SRC_COPY_BLT_WRITE_ALPHA | ++ XY_SRC_COPY_BLT_WRITE_RGB) ++ : XY_SRC_COPY_BLT_CMD; ++ u32 src_pitch = sarea_priv->pitch * cpp; ++ u32 dst_pitch = sarea_priv->pitch * cpp; ++ /* COPY rop (0xcc), map cpp to magic color depth constants */ ++ u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); ++ RING_LOCALS; ++ ++ if (IS_I965G(dev) && sarea_priv->front_tiled) { ++ cmd |= XY_SRC_COPY_BLT_DST_TILED; ++ dst_pitch >>= 2; ++ } ++ if (IS_I965G(dev) && sarea_priv->back_tiled) { ++ cmd |= XY_SRC_COPY_BLT_SRC_TILED; ++ src_pitch >>= 2; ++ } ++ ++ counter[0] = drm_vblank_count(dev, 0); ++ counter[1] = drm_vblank_count(dev, 1); ++ ++ DRM_DEBUG("\n"); ++ ++ INIT_LIST_HEAD(&hits); ++ ++ nhits = nrects = 0; ++ ++ /* No irqsave/restore necessary. This tasklet may be run in an ++ * interrupt context or normal context, but we don't have to worry ++ * about getting interrupted by something acquiring the lock, because ++ * we are the interrupt context thing that acquires the lock. ++ */ ++ DRM_SPINLOCK(&dev_priv->swaps_lock); ++ ++ /* Find buffer swaps scheduled for this vertical blank */ ++ list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { ++ drm_i915_vbl_swap_t *vbl_swap = ++ list_entry(list, drm_i915_vbl_swap_t, head); ++ int pipe = i915_get_pipe(dev, vbl_swap->plane); ++ ++ if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) ++ continue; ++ ++ list_del(list); ++ dev_priv->swaps_pending--; ++ drm_vblank_put(dev, pipe); ++ ++ DRM_SPINUNLOCK(&dev_priv->swaps_lock); ++ DRM_SPINLOCK(&dev->drw_lock); ++ ++ drw = drm_get_drawable_info(dev, vbl_swap->drw_id); ++ ++ if (!drw) { ++ DRM_SPINUNLOCK(&dev->drw_lock); ++ drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); ++ DRM_SPINLOCK(&dev_priv->swaps_lock); ++ continue; ++ } ++ ++ list_for_each(hit, &hits) { ++ drm_i915_vbl_swap_t *swap_cmp = ++ list_entry(hit, drm_i915_vbl_swap_t, head); ++ struct drm_drawable_info *drw_cmp = ++ drm_get_drawable_info(dev, swap_cmp->drw_id); ++ ++ if (drw_cmp && ++ drw_cmp->rects[0].y1 > drw->rects[0].y1) { ++ list_add_tail(list, hit); ++ break; ++ } ++ } ++ ++ DRM_SPINUNLOCK(&dev->drw_lock); ++ ++ /* List of hits was empty, or we reached the end of it */ ++ if (hit == &hits) ++ list_add_tail(list, hits.prev); ++ ++ nhits++; ++ ++ DRM_SPINLOCK(&dev_priv->swaps_lock); ++ } ++ ++ DRM_SPINUNLOCK(&dev_priv->swaps_lock); ++ ++ if (nhits == 0) { ++ return; ++ } ++ ++ i915_kernel_lost_context(dev); ++ ++ upper[0] = upper[1] = 0; ++ slice[0] = max(sarea_priv->planeA_h / nhits, 1); ++ slice[1] = max(sarea_priv->planeB_h / nhits, 1); ++ lower[0] = sarea_priv->planeA_y + slice[0]; ++ lower[1] = sarea_priv->planeB_y + slice[0]; ++ ++ offsets[0] = sarea_priv->front_offset; ++ offsets[1] = sarea_priv->back_offset; ++ offsets[2] = sarea_priv->third_offset; ++ num_pages = sarea_priv->third_handle ? 3 : 2; ++ ++ DRM_SPINLOCK(&dev->drw_lock); ++ ++ /* Emit blits for buffer swaps, partitioning both outputs into as many ++ * slices as there are buffer swaps scheduled in order to avoid tearing ++ * (based on the assumption that a single buffer swap would always ++ * complete before scanout starts). ++ */ ++ for (i = 0; i++ < nhits; ++ upper[0] = lower[0], lower[0] += slice[0], ++ upper[1] = lower[1], lower[1] += slice[1]) { ++ int init_drawrect = 1; ++ ++ if (i == nhits) ++ lower[0] = lower[1] = sarea_priv->height; ++ ++ list_for_each(hit, &hits) { ++ drm_i915_vbl_swap_t *swap_hit = ++ list_entry(hit, drm_i915_vbl_swap_t, head); ++ struct drm_clip_rect *rect; ++ int num_rects, plane, front, back; ++ unsigned short top, bottom; ++ ++ drw = drm_get_drawable_info(dev, swap_hit->drw_id); ++ ++ if (!drw) ++ continue; ++ ++ plane = swap_hit->plane; ++ ++ if (swap_hit->flip) { ++ i915_dispatch_vsync_flip(dev, drw, plane); ++ continue; ++ } ++ ++ if (init_drawrect) { ++ int width = sarea_priv->width; ++ int height = sarea_priv->height; ++ if (IS_I965G(dev)) { ++ BEGIN_LP_RING(4); ++ ++ OUT_RING(GFX_OP_DRAWRECT_INFO_I965); ++ OUT_RING(0); ++ OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(6); ++ ++ OUT_RING(GFX_OP_DRAWRECT_INFO); ++ OUT_RING(0); ++ OUT_RING(0); ++ OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); ++ OUT_RING(0); ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++ } ++ ++ sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; ++ ++ init_drawrect = 0; ++ } ++ ++ rect = drw->rects; ++ top = upper[plane]; ++ bottom = lower[plane]; ++ ++ front = (dev_priv->sarea_priv->pf_current_page >> ++ (2 * plane)) & 0x3; ++ back = (front + 1) % num_pages; ++ ++ for (num_rects = drw->num_rects; num_rects--; rect++) { ++ int y1 = max(rect->y1, top); ++ int y2 = min(rect->y2, bottom); ++ ++ if (y1 >= y2) ++ continue; ++ ++ BEGIN_LP_RING(8); ++ ++ OUT_RING(cmd); ++ OUT_RING(ropcpp | dst_pitch); ++ OUT_RING((y1 << 16) | rect->x1); ++ OUT_RING((y2 << 16) | rect->x2); ++ OUT_RING(offsets[front]); ++ OUT_RING((y1 << 16) | rect->x1); ++ OUT_RING(src_pitch); ++ OUT_RING(offsets[back]); ++ ++ ADVANCE_LP_RING(); ++ } ++ } ++ } ++ ++ DRM_SPINUNLOCK(&dev->drw_lock); ++ ++ list_for_each_safe(hit, tmp, &hits) { ++ drm_i915_vbl_swap_t *swap_hit = ++ list_entry(hit, drm_i915_vbl_swap_t, head); ++ ++ list_del(hit); ++ ++ drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); ++ } ++} ++ ++u32 i915_get_vblank_counter(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ unsigned long high_frame; ++ unsigned long low_frame; ++ u32 high1, high2, low, count; ++ int pipe; ++ ++ pipe = i915_get_pipe(dev, plane); ++ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; ++ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; ++ ++ if (!i915_pipe_enabled(dev, pipe)) { ++ DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); ++ return 0; ++ } ++ ++ /* ++ * High & low register fields aren't synchronized, so make sure ++ * we get a low value that's stable across two reads of the high ++ * register. ++ */ ++ do { ++ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> ++ PIPE_FRAME_HIGH_SHIFT); ++ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> ++ PIPE_FRAME_LOW_SHIFT); ++ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> ++ PIPE_FRAME_HIGH_SHIFT); ++ } while (high1 != high2); ++ ++ count = (high1 << 8) | low; ++ ++ return count; ++} ++ ++irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ u32 iir; ++ u32 pipea_stats = 0, pipeb_stats = 0; ++ int vblank = 0; ++#ifdef __linux__ ++ if (dev->pdev->msi_enabled) ++ I915_WRITE(IMR, ~0); ++#endif ++ iir = I915_READ(IIR); ++#if 0 ++ DRM_DEBUG("flag=%08x\n", iir); ++#endif ++ atomic_inc(&dev_priv->irq_received); ++ if (iir == 0) { ++#ifdef __linux__ ++ if (dev->pdev->msi_enabled) { ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ (void) I915_READ(IMR); ++ } ++#endif ++ return IRQ_NONE; ++ } ++ ++ /* ++ * Clear the PIPE(A|B)STAT regs before the IIR otherwise ++ * we may get extra interrupts. ++ */ ++ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { ++ pipea_stats = I915_READ(PIPEASTAT); ++ if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| ++ PIPE_VBLANK_INTERRUPT_STATUS)) ++ { ++ vblank++; ++ drm_handle_vblank(dev, i915_get_plane(dev, 0)); ++ } ++ I915_WRITE(PIPEASTAT, pipea_stats); ++ } ++ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { ++ pipeb_stats = I915_READ(PIPEBSTAT); ++ /* Ack the event */ ++ I915_WRITE(PIPEBSTAT, pipeb_stats); ++ ++ /* The vblank interrupt gets enabled even if we didn't ask for ++ it, so make sure it's shut down again */ ++ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)) ++ pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE); ++ ++ if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| ++ PIPE_VBLANK_INTERRUPT_STATUS)) ++ { ++ vblank++; ++ drm_handle_vblank(dev, i915_get_plane(dev, 1)); ++ } ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE) ++ opregion_asle_intr(dev); ++#endif ++#endif ++ I915_WRITE(PIPEBSTAT, pipeb_stats); ++ } ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ if (iir & I915_ASLE_INTERRUPT) ++ opregion_asle_intr(dev); ++#endif ++#endif ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ ++ I915_WRITE(IIR, iir); ++#ifdef __linux__ ++ if (dev->pdev->msi_enabled) ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++#endif ++ (void) I915_READ(IIR); /* Flush posted writes */ ++ ++ if (iir & I915_USER_INTERRUPT) { ++#ifdef I915_HAVE_GEM ++ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); ++#endif ++ DRM_WAKEUP(&dev_priv->irq_queue); ++#ifdef I915_HAVE_FENCE ++ i915_fence_handler(dev); ++#endif ++ } ++ ++ if (vblank) { ++ if (dev_priv->swaps_pending > 0) ++ drm_locked_tasklet(dev, i915_vblank_tasklet); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++int i915_emit_irq(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ i915_kernel_lost_context(dev); ++ ++ DRM_DEBUG("\n"); ++ ++ i915_emit_breadcrumb(dev); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(0); ++ OUT_RING(MI_USER_INTERRUPT); ++ ADVANCE_LP_RING(); ++ ++ return dev_priv->counter; ++} ++ ++void i915_user_irq_on(drm_i915_private_t *dev_priv) ++{ ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++ if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)) ++ i915_enable_irq(dev_priv, I915_USER_INTERRUPT); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++} ++ ++void i915_user_irq_off(drm_i915_private_t *dev_priv) ++{ ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++#ifdef __linux__ ++ BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0); ++#endif ++ if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) ++ i915_disable_irq(dev_priv, I915_USER_INTERRUPT); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++} ++ ++ ++int i915_wait_irq(struct drm_device * dev, int irq_nr) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int ret = 0; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, ++ READ_BREADCRUMB(dev_priv)); ++ ++ if (READ_BREADCRUMB(dev_priv) >= irq_nr) { ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_dispatch = ++ READ_BREADCRUMB(dev_priv); ++ return 0; ++ } ++ ++ i915_user_irq_on(dev_priv); ++ DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, ++ READ_BREADCRUMB(dev_priv) >= irq_nr); ++ i915_user_irq_off(dev_priv); ++ ++ if (ret == -EBUSY) { ++ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", ++ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); ++ } ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_dispatch = ++ READ_BREADCRUMB(dev_priv); ++ return ret; ++} ++ ++/* Needs the lock as it touches the ring. ++ */ ++int i915_irq_emit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_irq_emit_t *emit = data; ++ int result; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ result = i915_emit_irq(dev); ++ ++ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++/* Doesn't need the hardware lock. ++ */ ++int i915_irq_wait(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_irq_wait_t *irqwait = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ return i915_wait_irq(dev, irqwait->irq_seq); ++} ++ ++int i915_enable_vblank(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int pipe = i915_get_pipe(dev, plane); ++ u32 pipestat_reg = 0; ++ u32 mask_reg = 0; ++ u32 pipestat; ++ ++ switch (pipe) { ++ case 0: ++ pipestat_reg = PIPEASTAT; ++ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; ++ break; ++ case 1: ++ pipestat_reg = PIPEBSTAT; ++ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", ++ pipe); ++ break; ++ } ++ ++ if (pipestat_reg) ++ { ++ pipestat = I915_READ (pipestat_reg); ++ /* ++ * Older chips didn't have the start vblank interrupt, ++ * but ++ */ ++ if (IS_I965G (dev)) ++ pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; ++ else ++ pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; ++ /* ++ * Clear any pending status ++ */ ++ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | ++ PIPE_VBLANK_INTERRUPT_STATUS); ++ I915_WRITE(pipestat_reg, pipestat); ++ } ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++ i915_enable_irq(dev_priv, mask_reg); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++ ++ return 0; ++} ++ ++void i915_disable_vblank(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int pipe = i915_get_pipe(dev, plane); ++ u32 pipestat_reg = 0; ++ u32 mask_reg = 0; ++ u32 pipestat; ++ ++ switch (pipe) { ++ case 0: ++ pipestat_reg = PIPEASTAT; ++ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; ++ break; ++ case 1: ++ pipestat_reg = PIPEBSTAT; ++ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; ++ break; ++ default: ++ DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", ++ pipe); ++ break; ++ } ++ ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++ i915_disable_irq(dev_priv, mask_reg); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++ ++ if (pipestat_reg) ++ { ++ pipestat = I915_READ (pipestat_reg); ++ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | ++ PIPE_VBLANK_INTERRUPT_ENABLE); ++ /* ++ * Clear any pending status ++ */ ++ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | ++ PIPE_VBLANK_INTERRUPT_STATUS); ++ I915_WRITE(pipestat_reg, pipestat); ++ (void) I915_READ(pipestat_reg); ++ } ++} ++ ++static void i915_enable_interrupt (struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ ++ dev_priv->irq_mask_reg = ~0; ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); ++ (void) I915_READ (IER); ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ opregion_enable_asle(dev); ++#endif ++#endif ++ ++ dev_priv->irq_enabled = 1; ++} ++ ++/* Set the vblank monitor pipe ++ */ ++int i915_vblank_pipe_set(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int i915_vblank_pipe_get(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_vblank_pipe_t *pipe = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; ++ ++ return 0; ++} ++ ++/** ++ * Schedule buffer swap at given vertical blank. ++ */ ++int i915_vblank_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_vblank_swap_t *swap = data; ++ drm_i915_vbl_swap_t *vbl_swap; ++ unsigned int pipe, seqtype, curseq, plane; ++ unsigned long irqflags; ++ struct list_head *list; ++ int ret; ++ ++ if (!dev_priv) { ++ DRM_ERROR("%s called with no initialization\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) { ++ DRM_DEBUG("Rotation not supported\n"); ++ return -EINVAL; ++ } ++ ++ if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | ++ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS | ++ _DRM_VBLANK_FLIP)) { ++ DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); ++ return -EINVAL; ++ } ++ ++ plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; ++ pipe = i915_get_pipe(dev, plane); ++ ++ seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); ++ ++ if (!(dev_priv->vblank_pipe & (1 << pipe))) { ++ DRM_ERROR("Invalid pipe %d\n", pipe); ++ return -EINVAL; ++ } ++ ++ DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); ++ ++ /* It makes no sense to schedule a swap for a drawable that doesn't have ++ * valid information at this point. E.g. this could mean that the X ++ * server is too old to push drawable information to the DRM, in which ++ * case all such swaps would become ineffective. ++ */ ++ if (!drm_get_drawable_info(dev, swap->drawable)) { ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); ++ DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); ++ return -EINVAL; ++ } ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); ++ ++ /* ++ * We take the ref here and put it when the swap actually completes ++ * in the tasklet. ++ */ ++ ret = drm_vblank_get(dev, pipe); ++ if (ret) ++ return ret; ++ curseq = drm_vblank_count(dev, pipe); ++ ++ if (seqtype == _DRM_VBLANK_RELATIVE) ++ swap->sequence += curseq; ++ ++ if ((curseq - swap->sequence) <= (1<<23)) { ++ if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) { ++ swap->sequence = curseq + 1; ++ } else { ++ DRM_DEBUG("Missed target sequence\n"); ++ drm_vblank_put(dev, pipe); ++ return -EINVAL; ++ } ++ } ++ ++ if (swap->seqtype & _DRM_VBLANK_FLIP) { ++ swap->sequence--; ++ ++ if ((curseq - swap->sequence) <= (1<<23)) { ++ struct drm_drawable_info *drw; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); ++ ++ drw = drm_get_drawable_info(dev, swap->drawable); ++ ++ if (!drw) { ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, ++ irqflags); ++ DRM_DEBUG("Invalid drawable ID %d\n", ++ swap->drawable); ++ drm_vblank_put(dev, pipe); ++ return -EINVAL; ++ } ++ ++ i915_dispatch_vsync_flip(dev, drw, plane); ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); ++ ++ drm_vblank_put(dev, pipe); ++ return 0; ++ } ++ } ++ ++ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); ++ ++ list_for_each(list, &dev_priv->vbl_swaps.head) { ++ vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); ++ ++ if (vbl_swap->drw_id == swap->drawable && ++ vbl_swap->plane == plane && ++ vbl_swap->sequence == swap->sequence) { ++ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); ++ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); ++ DRM_DEBUG("Already scheduled\n"); ++ return 0; ++ } ++ } ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); ++ ++ if (dev_priv->swaps_pending >= 100) { ++ DRM_DEBUG("Too many swaps queued\n"); ++ drm_vblank_put(dev, pipe); ++ return -EBUSY; ++ } ++ ++ vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); ++ ++ if (!vbl_swap) { ++ DRM_ERROR("Failed to allocate memory to queue swap\n"); ++ drm_vblank_put(dev, pipe); ++ return -ENOMEM; ++ } ++ ++ DRM_DEBUG("\n"); ++ ++ vbl_swap->drw_id = swap->drawable; ++ vbl_swap->plane = plane; ++ vbl_swap->sequence = swap->sequence; ++ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); ++ ++ if (vbl_swap->flip) ++ swap->sequence++; ++ ++ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); ++ ++ list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); ++ dev_priv->swaps_pending++; ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); ++ ++ return 0; ++} ++ ++/* drm_dma.h hooks ++*/ ++void i915_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ ++ I915_WRITE16(HWSTAM, 0xeffe); ++ I915_WRITE16(IMR, 0x0); ++ I915_WRITE16(IER, 0x0); ++} ++ ++int i915_driver_irq_postinstall(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int ret, num_pipes = 2; ++ ++ INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); ++ dev_priv->swaps_pending = 0; ++ ++ dev_priv->user_irq_refcount = 0; ++ dev_priv->irq_mask_reg = ~0; ++ ++ ret = drm_vblank_init(dev, num_pipes); ++ if (ret) ++ return ret; ++ ++ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; ++ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ ++ ++ i915_enable_interrupt(dev); ++ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); ++ ++ /* ++ * Initialize the hardware status page IRQ location. ++ */ ++ ++ I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); ++ return 0; ++} ++ ++void i915_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ u32 temp; ++ ++ if (!dev_priv) ++ return; ++ ++ dev_priv->vblank_pipe = 0; ++ ++ dev_priv->irq_enabled = 0; ++ I915_WRITE(HWSTAM, 0xffffffff); ++ I915_WRITE(IMR, 0xffffffff); ++ I915_WRITE(IER, 0x0); ++ ++ temp = I915_READ(PIPEASTAT); ++ I915_WRITE(PIPEASTAT, temp); ++ temp = I915_READ(PIPEBSTAT); ++ I915_WRITE(PIPEBSTAT, temp); ++ temp = I915_READ(IIR); ++ I915_WRITE(IIR, temp); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_mem.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_mem.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_mem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_mem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,386 @@ ++/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- ++ */ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/* This memory manager is integrated into the global/local lru ++ * mechanisms used by the clients. Specifically, it operates by ++ * setting the 'in_use' fields of the global LRU to indicate whether ++ * this region is privately allocated to a client. ++ * ++ * This does require the client to actually respect that field. ++ * ++ * Currently no effort is made to allocate 'private' memory in any ++ * clever way - the LRU information isn't used to determine which ++ * block to allocate, and the ring is drained prior to allocations -- ++ * in other words allocation is expensive. ++ */ ++static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_tex_region *list; ++ unsigned shift, nr; ++ unsigned start; ++ unsigned end; ++ unsigned i; ++ int age; ++ ++ shift = dev_priv->tex_lru_log_granularity; ++ nr = I915_NR_TEX_REGIONS; ++ ++ start = p->start >> shift; ++ end = (p->start + p->size - 1) >> shift; ++ ++ age = ++sarea_priv->texAge; ++ list = sarea_priv->texList; ++ ++ /* Mark the regions with the new flag and update their age. Move ++ * them to head of list to preserve LRU semantics. ++ */ ++ for (i = start; i <= end; i++) { ++ list[i].in_use = in_use; ++ list[i].age = age; ++ ++ /* remove_from_list(i) ++ */ ++ list[(unsigned)list[i].next].prev = list[i].prev; ++ list[(unsigned)list[i].prev].next = list[i].next; ++ ++ /* insert_at_head(list, i) ++ */ ++ list[i].prev = nr; ++ list[i].next = list[nr].next; ++ list[(unsigned)list[nr].next].prev = i; ++ list[nr].next = i; ++ } ++} ++ ++/* Very simple allocator for agp memory, working on a static range ++ * already mapped into each client's address space. ++ */ ++ ++static struct mem_block *split_block(struct mem_block *p, int start, int size, ++ struct drm_file *file_priv) ++{ ++ /* Maybe cut off the start of an existing block */ ++ if (start > p->start) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); ++ if (!newblock) ++ goto out; ++ newblock->start = start; ++ newblock->size = p->size - (start - p->start); ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size -= newblock->size; ++ p = newblock; ++ } ++ ++ /* Maybe cut off the end of an existing block */ ++ if (size < p->size) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); ++ if (!newblock) ++ goto out; ++ newblock->start = start + size; ++ newblock->size = p->size - size; ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size = size; ++ } ++ ++ out: ++ /* Our block is in the middle */ ++ p->file_priv = file_priv; ++ return p; ++} ++ ++static struct mem_block *alloc_block(struct mem_block *heap, int size, ++ int align2, struct drm_file *file_priv) ++{ ++ struct mem_block *p; ++ int mask = (1 << align2) - 1; ++ ++ for (p = heap->next; p != heap; p = p->next) { ++ int start = (p->start + mask) & ~mask; ++ if (p->file_priv == NULL && start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ ++ return NULL; ++} ++ ++static struct mem_block *find_block(struct mem_block *heap, int start) ++{ ++ struct mem_block *p; ++ ++ for (p = heap->next; p != heap; p = p->next) ++ if (p->start == start) ++ return p; ++ ++ return NULL; ++} ++ ++static void free_block(struct mem_block *p) ++{ ++ p->file_priv = NULL; ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ if (p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++ ++ if (p->prev->file_priv == NULL) { ++ struct mem_block *q = p->prev; ++ q->size += p->size; ++ q->next = p->next; ++ q->next->prev = q; ++ drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++} ++ ++/* Initialize. How to check for an uninitialized heap? ++ */ ++static int init_heap(struct mem_block **heap, int start, int size) ++{ ++ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS); ++ ++ if (!blocks) ++ return -ENOMEM; ++ ++ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS); ++ if (!*heap) { ++ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); ++ return -ENOMEM; ++ } ++ ++ blocks->start = start; ++ blocks->size = size; ++ blocks->file_priv = NULL; ++ blocks->next = blocks->prev = *heap; ++ ++ memset(*heap, 0, sizeof(**heap)); ++ (*heap)->file_priv = (struct drm_file *) - 1; ++ (*heap)->next = (*heap)->prev = blocks; ++ return 0; ++} ++ ++/* Free all blocks associated with the releasing file. ++ */ ++void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, ++ struct mem_block *heap) ++{ ++ struct mem_block *p; ++ ++ if (!heap || !heap->next) ++ return; ++ ++ for (p = heap->next; p != heap; p = p->next) { ++ if (p->file_priv == file_priv) { ++ p->file_priv = NULL; ++ mark_block(dev, p, 0); ++ } ++ } ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ for (p = heap->next; p != heap; p = p->next) { ++ while (p->file_priv == NULL && p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++ } ++} ++ ++/* Shutdown. ++ */ ++void i915_mem_takedown(struct mem_block **heap) ++{ ++ struct mem_block *p; ++ ++ if (!*heap) ++ return; ++ ++ for (p = (*heap)->next; p != *heap;) { ++ struct mem_block *q = p; ++ p = p->next; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++ ++ drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); ++ *heap = NULL; ++} ++ ++static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region) ++{ ++ switch (region) { ++ case I915_MEM_REGION_AGP: ++ return &dev_priv->agp_heap; ++ default: ++ return NULL; ++ } ++} ++ ++/* IOCTL HANDLERS */ ++ ++int i915_mem_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_alloc_t *alloc = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, alloc->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ /* Make things easier on ourselves: all allocations at least ++ * 4k aligned. ++ */ ++ if (alloc->alignment < 12) ++ alloc->alignment = 12; ++ ++ block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); ++ ++ if (!block) ++ return -ENOMEM; ++ ++ mark_block(dev, block, 1); ++ ++ if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, ++ sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++int i915_mem_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_free_t *memfree = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, memfree->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ block = find_block(*heap, memfree->region_offset); ++ if (!block) ++ return -EFAULT; ++ ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ mark_block(dev, block, 0); ++ free_block(block); ++ return 0; ++} ++ ++int i915_mem_init_heap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_init_heap_t *initheap = data; ++ struct mem_block **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, initheap->region); ++ if (!heap) ++ return -EFAULT; ++ ++ if (*heap) { ++ DRM_ERROR("heap already initialized?"); ++ return -EFAULT; ++ } ++ ++ return init_heap(heap, initheap->start, initheap->size); ++} ++ ++int i915_mem_destroy_heap( struct drm_device *dev, void *data, ++ struct drm_file *file_priv ) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_destroy_heap_t *destroyheap = data; ++ struct mem_block **heap; ++ ++ if ( !dev_priv ) { ++ DRM_ERROR( "called with no initialization\n" ); ++ return -EINVAL; ++ } ++ ++ heap = get_heap( dev_priv, destroyheap->region ); ++ if (!heap) { ++ DRM_ERROR("get_heap failed"); ++ return -EFAULT; ++ } ++ ++ if (!*heap) { ++ DRM_ERROR("heap not initialized?"); ++ return -EFAULT; ++ } ++ ++ i915_mem_takedown( heap ); ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_opregion.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_opregion.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_opregion.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_opregion.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,389 @@ ++/* ++ * ++ * Copyright 2008 Intel Corporation ++ * Copyright 2008 Red Hat ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ * SOFTWARE. ++ * ++ */ ++ ++#include ++ ++#include "drmP.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++#define PCI_ASLE 0xe4 ++#define PCI_ASLS 0xfc ++ ++#define OPREGION_SZ (8*1024) ++#define OPREGION_HEADER_OFFSET 0 ++#define OPREGION_ACPI_OFFSET 0x100 ++#define OPREGION_SWSCI_OFFSET 0x200 ++#define OPREGION_ASLE_OFFSET 0x300 ++#define OPREGION_VBT_OFFSET 0x1000 ++ ++#define OPREGION_SIGNATURE "IntelGraphicsMem" ++#define MBOX_ACPI (1<<0) ++#define MBOX_SWSCI (1<<1) ++#define MBOX_ASLE (1<<2) ++ ++/* _DOD id definitions */ ++#define OUTPUT_CONNECTOR_MSK 0xf000 ++#define OUTPUT_CONNECTOR_OFFSET 12 ++ ++#define OUTPUT_PORT_MSK 0x00f0 ++#define OUTPUT_PORT_OFFSET 4 ++ #define OUTPUT_PORT_ANALOG 0 ++ #define OUTPUT_PORT_LVDS 1 ++ #define OUTPUT_PORT_SDVOB 2 ++ #define OUTPUT_PORT_SDVOC 3 ++ #define OUTPUT_PORT_TV 4 ++ ++#define OUTPUT_DISPLAY_MSK 0x0f00 ++#define OUTPUT_DISPLAY_OFFSET 8 ++ #define OUTPUT_DISPLAY_OTHER 0 ++ #define OUTPUT_DISPLAY_VGA 1 ++ #define OUTPUT_DISPLAY_TV 2 ++ #define OUTPUT_DISPLAY_DIGI 3 ++ #define OUTPUT_DISPLAY_FLAT_PANEL 4 ++ ++/* predefined id for integrated LVDS and VGA connector */ ++#define OUTPUT_INT_LVDS 0x00000110 ++#define OUTPUT_INT_VGA 0x80000100 ++ ++struct opregion_header { ++ u8 signature[16]; ++ u32 size; ++ u32 opregion_ver; ++ u8 bios_ver[32]; ++ u8 vbios_ver[16]; ++ u8 driver_ver[16]; ++ u32 mboxes; ++ u8 reserved[164]; ++} __attribute__((packed)); ++ ++/* OpRegion mailbox #1: public ACPI methods */ ++struct opregion_acpi { ++ u32 drdy; /* driver readiness */ ++ u32 csts; /* notification status */ ++ u32 cevt; /* current event */ ++ u8 rsvd1[20]; ++ u32 didl[8]; /* supported display devices ID list */ ++ u32 cpdl[8]; /* currently presented display list */ ++ u32 cadl[8]; /* currently active display list */ ++ u32 nadl[8]; /* next active devices list */ ++ u32 aslp; /* ASL sleep time-out */ ++ u32 tidx; /* toggle table index */ ++ u32 chpd; /* current hotplug enable indicator */ ++ u32 clid; /* current lid state*/ ++ u32 cdck; /* current docking state */ ++ u32 sxsw; /* Sx state resume */ ++ u32 evts; /* ASL supported events */ ++ u32 cnot; /* current OS notification */ ++ u32 nrdy; /* driver status */ ++ u8 rsvd2[60]; ++} __attribute__((packed)); ++ ++/* OpRegion mailbox #2: SWSCI */ ++struct opregion_swsci { ++ u32 scic; /* SWSCI command|status|data */ ++ u32 parm; /* command parameters */ ++ u32 dslp; /* driver sleep time-out */ ++ u8 rsvd[244]; ++} __attribute__((packed)); ++ ++/* OpRegion mailbox #3: ASLE */ ++struct opregion_asle { ++ u32 ardy; /* driver readiness */ ++ u32 aslc; /* ASLE interrupt command */ ++ u32 tche; /* technology enabled indicator */ ++ u32 alsi; /* current ALS illuminance reading */ ++ u32 bclp; /* backlight brightness to set */ ++ u32 pfit; /* panel fitting state */ ++ u32 cblv; /* current brightness level */ ++ u16 bclm[20]; /* backlight level duty cycle mapping table */ ++ u32 cpfm; /* current panel fitting mode */ ++ u32 epfm; /* enabled panel fitting modes */ ++ u8 plut[74]; /* panel LUT and identifier */ ++ u32 pfmb; /* PWM freq and min brightness */ ++ u8 rsvd[102]; ++} __attribute__((packed)); ++ ++/* ASLE irq request bits */ ++#define ASLE_SET_ALS_ILLUM (1 << 0) ++#define ASLE_SET_BACKLIGHT (1 << 1) ++#define ASLE_SET_PFIT (1 << 2) ++#define ASLE_SET_PWM_FREQ (1 << 3) ++#define ASLE_REQ_MSK 0xf ++ ++/* response bits of ASLE irq request */ ++#define ASLE_ALS_ILLUM_FAIL (2<<10) ++#define ASLE_BACKLIGHT_FAIL (2<<12) ++#define ASLE_PFIT_FAIL (2<<14) ++#define ASLE_PWM_FREQ_FAIL (2<<16) ++ ++/* ASLE backlight brightness to set */ ++#define ASLE_BCLP_VALID (1<<31) ++#define ASLE_BCLP_MSK (~(1<<31)) ++ ++/* ASLE panel fitting request */ ++#define ASLE_PFIT_VALID (1<<31) ++#define ASLE_PFIT_CENTER (1<<0) ++#define ASLE_PFIT_STRETCH_TEXT (1<<1) ++#define ASLE_PFIT_STRETCH_GFX (1<<2) ++ ++/* PWM frequency and minimum brightness */ ++#define ASLE_PFMB_BRIGHTNESS_MASK (0xff) ++#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) ++#define ASLE_PFMB_PWM_MASK (0x7ffffe00) ++#define ASLE_PFMB_PWM_VALID (1<<31) ++ ++#define ASLE_CBLV_VALID (1<<31) ++ ++static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct opregion_asle *asle = dev_priv->opregion.asle; ++ u32 blc_pwm_ctl; ++ ++ if (!(bclp & ASLE_BCLP_VALID)) ++ return ASLE_BACKLIGHT_FAIL; ++ ++ bclp &= ASLE_BCLP_MSK; ++ if (bclp < 0 || bclp > 255) ++ return ASLE_BACKLIGHT_FAIL; ++ ++ blc_pwm_ctl = I915_READ(BLC_PWM_CTL); ++ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; ++ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1)); ++ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; ++ ++ return 0; ++} ++ ++static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) ++{ ++ return 0; ++} ++ ++static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ if (pfmb & ASLE_PFMB_PWM_VALID) { ++ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); ++ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; ++ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; ++ pwm = pwm >> 9; ++ // FIXME - what do we do with the PWM? ++ } ++ return 0; ++} ++ ++static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) ++{ ++ if (!(pfit & ASLE_PFIT_VALID)) ++ return ASLE_PFIT_FAIL; ++ return 0; ++} ++ ++void opregion_asle_intr(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct opregion_asle *asle = dev_priv->opregion.asle; ++ u32 asle_stat = 0; ++ u32 asle_req; ++ ++ if (!asle) ++ return; ++ ++ asle_req = asle->aslc & ASLE_REQ_MSK; ++ ++ if (!asle_req) { ++ DRM_DEBUG("non asle set request??\n"); ++ return; ++ } ++ ++ if (asle_req & ASLE_SET_ALS_ILLUM) ++ asle_stat |= asle_set_als_illum(dev, asle->alsi); ++ ++ if (asle_req & ASLE_SET_BACKLIGHT) ++ asle_stat |= asle_set_backlight(dev, asle->bclp); ++ ++ if (asle_req & ASLE_SET_PFIT) ++ asle_stat |= asle_set_pfit(dev, asle->pfit); ++ ++ if (asle_req & ASLE_SET_PWM_FREQ) ++ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); ++ ++ asle->aslc = asle_stat; ++} ++ ++#define ASLE_ALS_EN (1<<0) ++#define ASLE_BLC_EN (1<<1) ++#define ASLE_PFIT_EN (1<<2) ++#define ASLE_PFMB_EN (1<<3) ++ ++void opregion_enable_asle(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct opregion_asle *asle = dev_priv->opregion.asle; ++ ++ if (asle) { ++ if (IS_MOBILE(dev)) { ++ u32 pipeb_stats = I915_READ(PIPEBSTAT); ++ /* Some hardware uses the legacy backlight controller ++ to signal interrupts, so we need to set up pipe B ++ to generate an IRQ on writes */ ++ pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE; ++ I915_WRITE(PIPEBSTAT, pipeb_stats); ++ ++ dev_priv->irq_mask_reg &= ++ ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; ++ } ++ ++ dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT; ++ ++ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ++ ASLE_PFMB_EN; ++ asle->ardy = 1; ++ } ++} ++ ++#define ACPI_EV_DISPLAY_SWITCH (1<<0) ++#define ACPI_EV_LID (1<<1) ++#define ACPI_EV_DOCK (1<<2) ++ ++static struct intel_opregion *system_opregion; ++ ++int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, ++ void *data) ++{ ++ /* The only video events relevant to opregion are 0x80. These indicate ++ either a docking event, lid switch or display switch request. In ++ Linux, these are handled by the dock, button and video drivers. ++ We might want to fix the video driver to be opregion-aware in ++ future, but right now we just indicate to the firmware that the ++ request has been handled */ ++ ++ struct opregion_acpi *acpi; ++ ++ if (!system_opregion) ++ return NOTIFY_DONE; ++ ++ acpi = system_opregion->acpi; ++ acpi->csts = 0; ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block intel_opregion_notifier = { ++ .notifier_call = intel_opregion_video_event, ++}; ++ ++int intel_opregion_init(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_opregion *opregion = &dev_priv->opregion; ++ void *base; ++ u32 asls, mboxes; ++ int err = 0; ++ ++ pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); ++ DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); ++ if (asls == 0) { ++ DRM_DEBUG("ACPI OpRegion not supported!\n"); ++ return -ENOTSUPP; ++ } ++ ++ base = ioremap(asls, OPREGION_SZ); ++ if (!base) ++ return -ENOMEM; ++ ++ opregion->header = base; ++ if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { ++ DRM_DEBUG("opregion signature mismatch\n"); ++ err = -EINVAL; ++ goto err_out; ++ } ++ ++ mboxes = opregion->header->mboxes; ++ if (mboxes & MBOX_ACPI) { ++ DRM_DEBUG("Public ACPI methods supported\n"); ++ opregion->acpi = base + OPREGION_ACPI_OFFSET; ++ } else { ++ DRM_DEBUG("Public ACPI methods not supported\n"); ++ err = -ENOTSUPP; ++ goto err_out; ++ } ++ opregion->enabled = 1; ++ ++ if (mboxes & MBOX_SWSCI) { ++ DRM_DEBUG("SWSCI supported\n"); ++ opregion->swsci = base + OPREGION_SWSCI_OFFSET; ++ } ++ if (mboxes & MBOX_ASLE) { ++ DRM_DEBUG("ASLE supported\n"); ++ opregion->asle = base + OPREGION_ASLE_OFFSET; ++ } ++ ++ /* Notify BIOS we are ready to handle ACPI video ext notifs. ++ * Right now, all the events are handled by the ACPI video module. ++ * We don't actually need to do anything with them. */ ++ opregion->acpi->csts = 0; ++ opregion->acpi->drdy = 1; ++ ++ system_opregion = opregion; ++ register_acpi_notifier(&intel_opregion_notifier); ++ ++ return 0; ++ ++err_out: ++ iounmap(opregion->header); ++ opregion->header = NULL; ++ return err; ++} ++ ++void intel_opregion_free(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_opregion *opregion = &dev_priv->opregion; ++ ++ if (!opregion->enabled) ++ return; ++ ++ opregion->acpi->drdy = 0; ++ ++ system_opregion = NULL; ++ unregister_acpi_notifier(&intel_opregion_notifier); ++ ++ /* just clear all opregion memory pointers now */ ++ iounmap(opregion->header); ++ opregion->header = NULL; ++ opregion->acpi = NULL; ++ opregion->swsci = NULL; ++ opregion->asle = NULL; ++ ++ opregion->enabled = 0; ++} ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_suspend.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_suspend.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/i915_suspend.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/i915_suspend.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,520 @@ ++/* i915_suspend.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- ++ */ ++/* ++ * ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ if (pipe == PIPE_A) ++ return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); ++ else ++ return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); ++} ++ ++static void i915_save_palette(struct drm_device *dev, enum pipe pipe) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); ++ u32 *array; ++ int i; ++ ++ if (!i915_pipe_enabled(dev, pipe)) ++ return; ++ ++ if (pipe == PIPE_A) ++ array = dev_priv->save_palette_a; ++ else ++ array = dev_priv->save_palette_b; ++ ++ for(i = 0; i < 256; i++) ++ array[i] = I915_READ(reg + (i << 2)); ++} ++ ++static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); ++ u32 *array; ++ int i; ++ ++ if (!i915_pipe_enabled(dev, pipe)) ++ return; ++ ++ if (pipe == PIPE_A) ++ array = dev_priv->save_palette_a; ++ else ++ array = dev_priv->save_palette_b; ++ ++ for(i = 0; i < 256; i++) ++ I915_WRITE(reg + (i << 2), array[i]); ++} ++ ++static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_WRITE8(index_port, reg); ++ return I915_READ8(data_port); ++} ++ ++static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_READ8(st01); ++ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); ++ return I915_READ8(VGA_AR_DATA_READ); ++} ++ ++static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_READ8(st01); ++ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); ++ I915_WRITE8(VGA_AR_DATA_WRITE, val); ++} ++ ++static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_WRITE8(index_port, reg); ++ I915_WRITE8(data_port, val); ++} ++ ++static void i915_save_vga(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ u16 cr_index, cr_data, st01; ++ ++ /* VGA color palette registers */ ++ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); ++ /* DACCRX automatically increments during read */ ++ I915_WRITE8(VGA_DACRX, 0); ++ /* Read 3 bytes of color data from each index */ ++ for (i = 0; i < 256 * 3; i++) ++ dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA); ++ ++ /* MSR bits */ ++ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); ++ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { ++ cr_index = VGA_CR_INDEX_CGA; ++ cr_data = VGA_CR_DATA_CGA; ++ st01 = VGA_ST01_CGA; ++ } else { ++ cr_index = VGA_CR_INDEX_MDA; ++ cr_data = VGA_CR_DATA_MDA; ++ st01 = VGA_ST01_MDA; ++ } ++ ++ /* CRT controller regs */ ++ i915_write_indexed(dev, cr_index, cr_data, 0x11, ++ i915_read_indexed(dev, cr_index, cr_data, 0x11) & ++ (~0x80)); ++ for (i = 0; i <= 0x24; i++) ++ dev_priv->saveCR[i] = ++ i915_read_indexed(dev, cr_index, cr_data, i); ++ /* Make sure we don't turn off CR group 0 writes */ ++ dev_priv->saveCR[0x11] &= ~0x80; ++ ++ /* Attribute controller registers */ ++ I915_READ8(st01); ++ dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); ++ for (i = 0; i <= 0x14; i++) ++ dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); ++ I915_READ8(st01); ++ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); ++ I915_READ8(st01); ++ ++ /* Graphics controller registers */ ++ for (i = 0; i < 9; i++) ++ dev_priv->saveGR[i] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); ++ ++ dev_priv->saveGR[0x10] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); ++ dev_priv->saveGR[0x11] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); ++ dev_priv->saveGR[0x18] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); ++ ++ /* Sequencer registers */ ++ for (i = 0; i < 8; i++) ++ dev_priv->saveSR[i] = ++ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); ++} ++ ++static void i915_restore_vga(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ u16 cr_index, cr_data, st01; ++ ++ /* MSR bits */ ++ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); ++ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { ++ cr_index = VGA_CR_INDEX_CGA; ++ cr_data = VGA_CR_DATA_CGA; ++ st01 = VGA_ST01_CGA; ++ } else { ++ cr_index = VGA_CR_INDEX_MDA; ++ cr_data = VGA_CR_DATA_MDA; ++ st01 = VGA_ST01_MDA; ++ } ++ ++ /* Sequencer registers, don't write SR07 */ ++ for (i = 0; i < 7; i++) ++ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, ++ dev_priv->saveSR[i]); ++ ++ /* CRT controller regs */ ++ /* Enable CR group 0 writes */ ++ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); ++ for (i = 0; i <= 0x24; i++) ++ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); ++ ++ /* Graphics controller regs */ ++ for (i = 0; i < 9; i++) ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, ++ dev_priv->saveGR[i]); ++ ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, ++ dev_priv->saveGR[0x10]); ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, ++ dev_priv->saveGR[0x11]); ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, ++ dev_priv->saveGR[0x18]); ++ ++ /* Attribute controller registers */ ++ I915_READ8(st01); /* switch back to index mode */ ++ for (i = 0; i <= 0x14; i++) ++ i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); ++ I915_READ8(st01); /* switch back to index mode */ ++ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); ++ I915_READ8(st01); ++ ++ /* VGA color palette registers */ ++ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); ++ /* DACCRX automatically increments during read */ ++ I915_WRITE8(VGA_DACWX, 0); ++ /* Read 3 bytes of color data from each index */ ++ for (i = 0; i < 256 * 3; i++) ++ I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]); ++ ++} ++ ++int i915_save_state(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ ++#if defined(__FreeBSD__) ++ dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1); ++#else ++ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); ++#endif ++ ++ /* Display arbitration control */ ++ dev_priv->saveDSPARB = I915_READ(DSPARB); ++ ++ /* Pipe & plane A info */ ++ dev_priv->savePIPEACONF = I915_READ(PIPEACONF); ++ dev_priv->savePIPEASRC = I915_READ(PIPEASRC); ++ dev_priv->saveFPA0 = I915_READ(FPA0); ++ dev_priv->saveFPA1 = I915_READ(FPA1); ++ dev_priv->saveDPLL_A = I915_READ(DPLL_A); ++ if (IS_I965G(dev)) ++ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); ++ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); ++ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); ++ dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); ++ dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); ++ dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); ++ dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); ++ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); ++ ++ dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); ++ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); ++ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); ++ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); ++ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); ++ if (IS_I965G(dev)) { ++ dev_priv->saveDSPASURF = I915_READ(DSPASURF); ++ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); ++ } ++ i915_save_palette(dev, PIPE_A); ++ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); ++ ++ /* Pipe & plane B info */ ++ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); ++ dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); ++ dev_priv->saveFPB0 = I915_READ(FPB0); ++ dev_priv->saveFPB1 = I915_READ(FPB1); ++ dev_priv->saveDPLL_B = I915_READ(DPLL_B); ++ if (IS_I965G(dev)) ++ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); ++ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); ++ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); ++ dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); ++ dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); ++ dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); ++ dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); ++ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); ++ ++ dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); ++ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); ++ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); ++ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); ++ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); ++ if (IS_I965GM(dev) || IS_GM45(dev)) { ++ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); ++ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); ++ } ++ i915_save_palette(dev, PIPE_B); ++ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); ++ ++ /* CRT state */ ++ dev_priv->saveADPA = I915_READ(ADPA); ++ ++ /* LVDS state */ ++ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); ++ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); ++ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); ++ if (IS_I965G(dev)) ++ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); ++ if (IS_MOBILE(dev) && !IS_I830(dev)) ++ dev_priv->saveLVDS = I915_READ(LVDS); ++ if (!IS_I830(dev) && !IS_845G(dev)) ++ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); ++ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); ++ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); ++ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); ++ ++ /* FIXME: save TV & SDVO state */ ++ ++ /* FBC state */ ++ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); ++ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); ++ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); ++ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); ++ ++ /* Interrupt state */ ++ dev_priv->saveIIR = I915_READ(IIR); ++ dev_priv->saveIER = I915_READ(IER); ++ dev_priv->saveIMR = I915_READ(IMR); ++ ++ /* VGA state */ ++ dev_priv->saveVGA0 = I915_READ(VGA0); ++ dev_priv->saveVGA1 = I915_READ(VGA1); ++ dev_priv->saveVGA_PD = I915_READ(VGA_PD); ++ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); ++ ++ /* Clock gating state */ ++ dev_priv->saveD_STATE = I915_READ(D_STATE); ++ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS); ++ ++ /* Cache mode state */ ++ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); ++ ++ /* Memory Arbitration state */ ++ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); ++ ++ /* Scratch space */ ++ for (i = 0; i < 16; i++) { ++ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); ++ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); ++ } ++ for (i = 0; i < 3; i++) ++ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); ++ ++ i915_save_vga(dev); ++ ++ return 0; ++} ++ ++int i915_restore_state(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ ++#if defined(__FreeBSD__) ++ pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1); ++#else ++ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); ++#endif ++ ++ I915_WRITE(DSPARB, dev_priv->saveDSPARB); ++ ++ /* Pipe & plane A info */ ++ /* Prime the clock */ ++ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { ++ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & ++ ~DPLL_VCO_ENABLE); ++ DRM_UDELAY(150); ++ } ++ I915_WRITE(FPA0, dev_priv->saveFPA0); ++ I915_WRITE(FPA1, dev_priv->saveFPA1); ++ /* Actually enable it */ ++ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); ++ DRM_UDELAY(150); ++ if (IS_I965G(dev)) ++ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); ++ DRM_UDELAY(150); ++ ++ /* Restore mode */ ++ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); ++ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); ++ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); ++ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); ++ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); ++ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); ++ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); ++ ++ /* Restore plane info */ ++ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); ++ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); ++ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); ++ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); ++ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); ++ if (IS_I965G(dev)) { ++ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); ++ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); ++ } ++ ++ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); ++ ++ i915_restore_palette(dev, PIPE_A); ++ /* Enable the plane */ ++ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); ++ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); ++ ++ /* Pipe & plane B info */ ++ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { ++ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & ++ ~DPLL_VCO_ENABLE); ++ DRM_UDELAY(150); ++ } ++ I915_WRITE(FPB0, dev_priv->saveFPB0); ++ I915_WRITE(FPB1, dev_priv->saveFPB1); ++ /* Actually enable it */ ++ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); ++ DRM_UDELAY(150); ++ if (IS_I965G(dev)) ++ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); ++ DRM_UDELAY(150); ++ ++ /* Restore mode */ ++ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); ++ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); ++ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); ++ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); ++ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); ++ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); ++ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); ++ ++ /* Restore plane info */ ++ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); ++ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); ++ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); ++ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); ++ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); ++ if (IS_I965G(dev)) { ++ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); ++ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); ++ } ++ ++ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); ++ ++ i915_restore_palette(dev, PIPE_B); ++ /* Enable the plane */ ++ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); ++ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); ++ ++ /* CRT state */ ++ I915_WRITE(ADPA, dev_priv->saveADPA); ++ ++ /* LVDS state */ ++ if (IS_I965G(dev)) ++ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); ++ if (IS_MOBILE(dev) && !IS_I830(dev)) ++ I915_WRITE(LVDS, dev_priv->saveLVDS); ++ if (!IS_I830(dev) && !IS_845G(dev)) ++ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); ++ ++ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); ++ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); ++ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); ++ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); ++ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); ++ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); ++ ++ /* FIXME: restore TV & SDVO state */ ++ ++ /* FBC info */ ++ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); ++ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); ++ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); ++ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); ++ ++ /* VGA state */ ++ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); ++ I915_WRITE(VGA0, dev_priv->saveVGA0); ++ I915_WRITE(VGA1, dev_priv->saveVGA1); ++ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); ++ DRM_UDELAY(150); ++ ++ /* Clock gating state */ ++ I915_WRITE (D_STATE, dev_priv->saveD_STATE); ++ I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS); ++ ++ /* Cache mode state */ ++ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); ++ ++ /* Memory arbitration state */ ++ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); ++ ++ for (i = 0; i < 16; i++) { ++ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); ++ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); ++ } ++ for (i = 0; i < 3; i++) ++ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); ++ ++ i915_restore_vga(dev); ++ ++ return 0; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/Kconfig kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/Kconfig +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/Kconfig 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,130 @@ ++# ++# DRM device configuration from Tungsten Graphics ++# ++# This driver provides support for the ++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ++# ++# The driver is the Tungsten alternative of the original DRM driver. ++# ++ ++menuconfig DRM_TUNGSTEN ++ tristate "Direct Rendering Manager (Tungsten - XFree86 4.1.0 and higher DRI support)" ++ help ++ Kernel-level support for the Direct Rendering Infrastructure (DRI) ++ introduced in XFree86 4.0. If you say Y here, you need to select ++ the module that's right for your graphics card from the list below. ++ These modules provide support for synchronization, security, and ++ DMA transfers. Please see for more ++ details. You should also select and configure AGP ++ (/dev/agpgart) support. ++ ++config DRM_TUNGSTEN_PVR2D ++ tristate "PVR2D kernel helper" ++ depends on DRM_TUNGSTEN && PVR ++ help ++ Choose this option if you want to give DRI access to your card ++ handled by the Imagination PowerVR framework. If M is selected, ++ the module will be called pvr2d. ++ ++if DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG ++ ++config DRM_TUNGSTEN_TDFX ++ tristate "3dfx Banshee/Voodoo3+" ++ help ++ Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), ++ graphics card. If M is selected, the module will be called tdfx. ++ ++config DRM_TUNGSTEN_R128 ++ tristate "ATI Rage 128" ++ help ++ Choose this option if you have an ATI Rage 128 graphics card. If M ++ is selected, the module will be called r128. AGP support for ++ this card is strongly suggested (unless you have a PCI version). ++ ++config DRM_TUNGSTEN_RADEON ++ tristate "ATI Radeon" ++ help ++ Choose this option if you have an ATI Radeon graphics card. There ++ are both PCI and AGP versions. You don't need to choose this to ++ run the Radeon in plain VGA mode. ++ ++ If M is selected, the module will be called radeon. ++ ++config DRM_TUNGSTEN_I810 ++ tristate "Intel I810" ++ depends on AGP && AGP_INTEL ++ help ++ Choose this option if you have an Intel I810 graphics card. If M is ++ selected, the module will be called i810. AGP support is required ++ for this driver to work. ++ ++config DRM_TUNGSTEN_I915 ++ tristate "i915 driver" ++ depends on AGP && AGP_INTEL ++ help ++ Choose this option if you have a system that has Intel 830M, 845G, ++ 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the ++ module will be called i915. AGP support is required for this driver ++ to work. This driver is used by the Intel driver in X.org 6.8 and ++ XFree86 4.4 and above. If unsure, build this and i830 as modules and ++ the X server will load the correct one. ++ ++config DRM_TUNGSTEN_MGA ++ tristate "Matrox g200/g400" ++ help ++ Choose this option if you have a Matrox G200, G400 or G450 graphics ++ card. If M is selected, the module will be called mga. AGP ++ support is required for this driver to work. ++ ++config DRM_TUNGSTEN_SIS ++ tristate "SiS video cards" ++ depends on AGP ++ help ++ Choose this option if you have a SiS 630 or compatible video ++ chipset. If M is selected the module will be called sis. AGP ++ support is required for this driver to work. ++ ++config DRM_TUNGSTEN_VIA ++ tristate "Via unichrome video cards" ++ help ++ Choose this option if you have a Via unichrome or compatible video ++ chipset. If M is selected the module will be called via. ++ ++config DRM_TUNGSTEN_SAVAGE ++ tristate "Savage video cards" ++ help ++ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister ++ chipset. If M is selected the module will be called savage. ++ ++config DRM_TUNGSTEN_FFB ++ tristate "Creator/Creator3D direct rendering" ++ help ++ Choose this option to include the Creator/Creator3D direct rendering ++ driver. If M is selected the module will be called ffb. ++ ++config DRM_TUNGSTEN_MACH64 ++ tristate "MACH64 Rage Pro video card" ++ help ++ Choose this option if you have a Mach64 Rage Pro chipset. ++ If M is selected the module will be called mach64. ++ ++config DRM_TUNGSTEN_NV ++ tristate "Nvidia video card (NV driver)" ++ help ++ Choose this option if you have a Nvidia chipset and want to use the ++ original nv driver. If M is selected the module will be called nv. ++ ++config DRM_TUNGSTEN_NOUVEAU ++ tristate "Nvidia video card (Nouveau driver)" ++ help ++ Choose this option if you have a Nvidia chipset and want to use the ++ nouveau driver. If M is selected the module will be called nouveau. ++ ++config DRM_TUNGSTEN_XGI ++ tristate "XGI video card" ++ help ++ Choose this option if you have a XGI chipset. If M is selected the ++ module will be called xgi. ++ ++endif # DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_dma.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_dma.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_dma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1778 @@ ++/* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */ ++/** ++ * \file mach64_dma.c ++ * DMA support for mach64 (Rage Pro) driver ++ * ++ * \author Gareth Hughes ++ * \author Frank C. Earl ++ * \author Leif Delgass ++ * \author José Fonseca ++ */ ++ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002 Frank C. Earl ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++/*******************************************************************/ ++/** \name Engine, FIFO control */ ++/*@{*/ ++ ++/** ++ * Waits for free entries in the FIFO. ++ * ++ * \note Most writes to Mach64 registers are automatically routed through ++ * command FIFO which is 16 entry deep. Prior to writing to any draw engine ++ * register one has to ensure that enough FIFO entries are available by calling ++ * this function. Failure to do so may cause the engine to lock. ++ * ++ * \param dev_priv pointer to device private data structure. ++ * \param entries number of free entries in the FIFO to wait for. ++ * ++ * \returns zero on success, or -EBUSY if the timeout (specificed by ++ * drm_mach64_private::usec_timeout) occurs. ++ */ ++int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries) ++{ ++ int slots = 0, i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK); ++ if (slots <= (0x8000 >> entries)) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++ DRM_INFO("failed! slots=%d entries=%d\n", slots, entries); ++ return -EBUSY; ++} ++ ++/** ++ * Wait for the draw engine to be idle. ++ */ ++int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv) ++{ ++ int i, ret; ++ ++ ret = mach64_do_wait_for_fifo(dev_priv, 16); ++ if (ret < 0) ++ return ret; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++ DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT)); ++ mach64_dump_ring_info(dev_priv); ++ return -EBUSY; ++} ++ ++/** ++ * Wait for free entries in the ring buffer. ++ * ++ * The Mach64 bus master can be configured to act as a virtual FIFO, using a ++ * circular buffer (commonly referred as "ring buffer" in other drivers) with ++ * pointers to engine commands. This allows the CPU to do other things while ++ * the graphics engine is busy, i.e., DMA mode. ++ * ++ * This function should be called before writing new entries to the ring ++ * buffer. ++ * ++ * \param dev_priv pointer to device private data structure. ++ * \param n number of free entries in the ring buffer to wait for. ++ * ++ * \returns zero on success, or -EBUSY if the timeout (specificed by ++ * drm_mach64_private_t::usec_timeout) occurs. ++ * ++ * \sa mach64_dump_ring_info() ++ */ ++int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ mach64_update_ring_snapshot(dev_priv); ++ if (ring->space >= n) { ++ if (i > 0) ++ DRM_DEBUG("%d usecs\n", i); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++ /* FIXME: This is being ignored... */ ++ DRM_ERROR("failed!\n"); ++ mach64_dump_ring_info(dev_priv); ++ return -EBUSY; ++} ++ ++/** ++ * Wait until all DMA requests have been processed... ++ * ++ * \sa mach64_wait_ring() ++ */ ++static int mach64_ring_idle(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ u32 head; ++ int i; ++ ++ head = ring->head; ++ i = 0; ++ while (i < dev_priv->usec_timeout) { ++ mach64_update_ring_snapshot(dev_priv); ++ if (ring->head == ring->tail && ++ !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) { ++ if (i > 0) ++ DRM_DEBUG("%d usecs\n", i); ++ return 0; ++ } ++ if (ring->head == head) { ++ ++i; ++ } else { ++ head = ring->head; ++ i = 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++ DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT)); ++ mach64_dump_ring_info(dev_priv); ++ return -EBUSY; ++} ++ ++/** ++ * Reset the the ring buffer descriptors. ++ * ++ * \sa mach64_do_engine_reset() ++ */ ++static void mach64_ring_reset(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ ++ mach64_do_release_used_buffers(dev_priv); ++ ring->head_addr = ring->start_addr; ++ ring->head = ring->tail = 0; ++ ring->space = ring->size; ++ ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ dev_priv->ring_running = 0; ++} ++ ++/** ++ * Ensure the all the queued commands will be processed. ++ */ ++int mach64_do_dma_flush(drm_mach64_private_t *dev_priv) ++{ ++ /* FIXME: It's not necessary to wait for idle when flushing ++ * we just need to ensure the ring will be completely processed ++ * in finite time without another ioctl ++ */ ++ return mach64_ring_idle(dev_priv); ++} ++ ++/** ++ * Stop all DMA activity. ++ */ ++int mach64_do_dma_idle(drm_mach64_private_t *dev_priv) ++{ ++ int ret; ++ ++ /* wait for completion */ ++ if ((ret = mach64_ring_idle(dev_priv)) < 0) { ++ DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n", ++ MACH64_READ(MACH64_BM_GUI_TABLE), ++ dev_priv->ring.tail); ++ return ret; ++ } ++ ++ mach64_ring_stop(dev_priv); ++ ++ /* clean up after pass */ ++ mach64_do_release_used_buffers(dev_priv); ++ return 0; ++} ++ ++/** ++ * Reset the engine. This will stop the DMA if it is running. ++ */ ++int mach64_do_engine_reset(drm_mach64_private_t *dev_priv) ++{ ++ u32 tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ /* Kill off any outstanding DMA transfers. ++ */ ++ tmp = MACH64_READ(MACH64_BUS_CNTL); ++ MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS); ++ ++ /* Reset the GUI engine (high to low transition). ++ */ ++ tmp = MACH64_READ(MACH64_GEN_TEST_CNTL); ++ MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE); ++ /* Enable the GUI engine ++ */ ++ tmp = MACH64_READ(MACH64_GEN_TEST_CNTL); ++ MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE); ++ ++ /* ensure engine is not locked up by clearing any FIFO or HOST errors ++ */ ++ tmp = MACH64_READ(MACH64_BUS_CNTL); ++ MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000); ++ ++ /* Once GUI engine is restored, disable bus mastering */ ++ MACH64_WRITE(MACH64_SRC_CNTL, 0); ++ ++ /* Reset descriptor ring */ ++ mach64_ring_reset(dev_priv); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name Debugging output */ ++/*@{*/ ++ ++/** ++ * Dump engine registers values. ++ */ ++void mach64_dump_engine_info(drm_mach64_private_t *dev_priv) ++{ ++ DRM_INFO("\n"); ++ if (!dev_priv->is_pci) { ++ DRM_INFO(" AGP_BASE = 0x%08x\n", ++ MACH64_READ(MACH64_AGP_BASE)); ++ DRM_INFO(" AGP_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_AGP_CNTL)); ++ } ++ DRM_INFO(" ALPHA_TST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_ALPHA_TST_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" BM_COMMAND = 0x%08x\n", ++ MACH64_READ(MACH64_BM_COMMAND)); ++ DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n", ++ MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET)); ++ DRM_INFO(" BM_GUI_TABLE = 0x%08x\n", ++ MACH64_READ(MACH64_BM_GUI_TABLE)); ++ DRM_INFO(" BM_STATUS = 0x%08x\n", ++ MACH64_READ(MACH64_BM_STATUS)); ++ DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n", ++ MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR)); ++ DRM_INFO(" BM_SYSTEM_TABLE = 0x%08x\n", ++ MACH64_READ(MACH64_BM_SYSTEM_TABLE)); ++ DRM_INFO(" BUS_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_BUS_CNTL)); ++ DRM_INFO("\n"); ++ /* DRM_INFO( " CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */ ++ DRM_INFO(" CLR_CMP_CLR = 0x%08x\n", ++ MACH64_READ(MACH64_CLR_CMP_CLR)); ++ DRM_INFO(" CLR_CMP_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_CLR_CMP_CNTL)); ++ /* DRM_INFO( " CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */ ++ DRM_INFO(" CONFIG_CHIP_ID = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_CHIP_ID)); ++ DRM_INFO(" CONFIG_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_CNTL)); ++ DRM_INFO(" CONFIG_STAT0 = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_STAT0)); ++ DRM_INFO(" CONFIG_STAT1 = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_STAT1)); ++ DRM_INFO(" CONFIG_STAT2 = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_STAT2)); ++ DRM_INFO(" CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG)); ++ DRM_INFO(" CUSTOM_MACRO_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_CUSTOM_MACRO_CNTL)); ++ DRM_INFO("\n"); ++ /* DRM_INFO( " DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */ ++ /* DRM_INFO( " DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */ ++ DRM_INFO(" DP_BKGD_CLR = 0x%08x\n", ++ MACH64_READ(MACH64_DP_BKGD_CLR)); ++ DRM_INFO(" DP_FRGD_CLR = 0x%08x\n", ++ MACH64_READ(MACH64_DP_FRGD_CLR)); ++ DRM_INFO(" DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX)); ++ DRM_INFO(" DP_PIX_WIDTH = 0x%08x\n", ++ MACH64_READ(MACH64_DP_PIX_WIDTH)); ++ DRM_INFO(" DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC)); ++ DRM_INFO(" DP_WRITE_MASK = 0x%08x\n", ++ MACH64_READ(MACH64_DP_WRITE_MASK)); ++ DRM_INFO(" DSP_CONFIG = 0x%08x\n", ++ MACH64_READ(MACH64_DSP_CONFIG)); ++ DRM_INFO(" DSP_ON_OFF = 0x%08x\n", ++ MACH64_READ(MACH64_DSP_ON_OFF)); ++ DRM_INFO(" DST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_DST_CNTL)); ++ DRM_INFO(" DST_OFF_PITCH = 0x%08x\n", ++ MACH64_READ(MACH64_DST_OFF_PITCH)); ++ DRM_INFO("\n"); ++ /* DRM_INFO( " EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */ ++ DRM_INFO(" EXT_MEM_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_EXT_MEM_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" FIFO_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_FIFO_STAT)); ++ DRM_INFO("\n"); ++ DRM_INFO(" GEN_TEST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_GEN_TEST_CNTL)); ++ /* DRM_INFO( " GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */ ++ DRM_INFO(" GUI_CMDFIFO_DATA = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_CMDFIFO_DATA)); ++ DRM_INFO(" GUI_CMDFIFO_DEBUG = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG)); ++ DRM_INFO(" GUI_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_CNTL)); ++ DRM_INFO(" GUI_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_STAT)); ++ DRM_INFO(" GUI_TRAJ_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_TRAJ_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" HOST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_HOST_CNTL)); ++ DRM_INFO(" HW_DEBUG = 0x%08x\n", ++ MACH64_READ(MACH64_HW_DEBUG)); ++ DRM_INFO("\n"); ++ DRM_INFO(" MEM_ADDR_CONFIG = 0x%08x\n", ++ MACH64_READ(MACH64_MEM_ADDR_CONFIG)); ++ DRM_INFO(" MEM_BUF_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_MEM_BUF_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" PAT_REG0 = 0x%08x\n", ++ MACH64_READ(MACH64_PAT_REG0)); ++ DRM_INFO(" PAT_REG1 = 0x%08x\n", ++ MACH64_READ(MACH64_PAT_REG1)); ++ DRM_INFO("\n"); ++ DRM_INFO(" SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT)); ++ DRM_INFO(" SC_RIGHT = 0x%08x\n", ++ MACH64_READ(MACH64_SC_RIGHT)); ++ DRM_INFO(" SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP)); ++ DRM_INFO(" SC_BOTTOM = 0x%08x\n", ++ MACH64_READ(MACH64_SC_BOTTOM)); ++ DRM_INFO("\n"); ++ DRM_INFO(" SCALE_3D_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SCALE_3D_CNTL)); ++ DRM_INFO(" SCRATCH_REG0 = 0x%08x\n", ++ MACH64_READ(MACH64_SCRATCH_REG0)); ++ DRM_INFO(" SCRATCH_REG1 = 0x%08x\n", ++ MACH64_READ(MACH64_SCRATCH_REG1)); ++ DRM_INFO(" SETUP_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SETUP_CNTL)); ++ DRM_INFO(" SRC_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SRC_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" TEX_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_TEX_CNTL)); ++ DRM_INFO(" TEX_SIZE_PITCH = 0x%08x\n", ++ MACH64_READ(MACH64_TEX_SIZE_PITCH)); ++ DRM_INFO(" TIMER_CONFIG = 0x%08x\n", ++ MACH64_READ(MACH64_TIMER_CONFIG)); ++ DRM_INFO("\n"); ++ DRM_INFO(" Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL)); ++ DRM_INFO(" Z_OFF_PITCH = 0x%08x\n", ++ MACH64_READ(MACH64_Z_OFF_PITCH)); ++ DRM_INFO("\n"); ++} ++ ++#define MACH64_DUMP_CONTEXT 3 ++ ++/** ++ * Used by mach64_dump_ring_info() to dump the contents of the current buffer ++ * pointed by the ring head. ++ */ ++static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv, ++ struct drm_buf *buf) ++{ ++ u32 addr = GETBUFADDR(buf); ++ u32 used = buf->used >> 2; ++ u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR); ++ u32 *p = GETBUFPTR(buf); ++ int skipped = 0; ++ ++ DRM_INFO("buffer contents:\n"); ++ ++ while (used) { ++ u32 reg, count; ++ ++ reg = le32_to_cpu(*p++); ++ if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 || ++ (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 && ++ addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) || ++ addr >= ++ GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) { ++ DRM_INFO("%08x: 0x%08x\n", addr, reg); ++ } ++ addr += 4; ++ used--; ++ ++ count = (reg >> 16) + 1; ++ reg = reg & 0xffff; ++ reg = MMSELECT(reg); ++ while (count && used) { ++ if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 || ++ (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 && ++ addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) || ++ addr >= ++ GETBUFADDR(buf) + buf->used - ++ MACH64_DUMP_CONTEXT * 4) { ++ DRM_INFO("%08x: 0x%04x = 0x%08x\n", addr, ++ reg, le32_to_cpu(*p)); ++ skipped = 0; ++ } else { ++ if (!skipped) { ++ DRM_INFO(" ...\n"); ++ skipped = 1; ++ } ++ } ++ p++; ++ addr += 4; ++ used--; ++ ++ reg += 4; ++ count--; ++ } ++ } ++ ++ DRM_INFO("\n"); ++} ++ ++/** ++ * Dump the ring state and contents, including the contents of the buffer being ++ * processed by the graphics engine. ++ */ ++void mach64_dump_ring_info(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ int i, skipped; ++ ++ DRM_INFO("\n"); ++ ++ DRM_INFO("ring contents:\n"); ++ DRM_INFO(" head_addr: 0x%08x head: %u tail: %u\n\n", ++ ring->head_addr, ring->head, ring->tail); ++ ++ skipped = 0; ++ for (i = 0; i < ring->size / sizeof(u32); i += 4) { ++ if (i <= MACH64_DUMP_CONTEXT * 4 || ++ i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 || ++ (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 && ++ i <= ring->tail + MACH64_DUMP_CONTEXT * 4) || ++ (i >= ring->head - MACH64_DUMP_CONTEXT * 4 && ++ i <= ring->head + MACH64_DUMP_CONTEXT * 4)) { ++ DRM_INFO(" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x%s%s\n", ++ (u32)(ring->start_addr + i * sizeof(u32)), ++ le32_to_cpu(((u32 *) ring->start)[i + 0]), ++ le32_to_cpu(((u32 *) ring->start)[i + 1]), ++ le32_to_cpu(((u32 *) ring->start)[i + 2]), ++ le32_to_cpu(((u32 *) ring->start)[i + 3]), ++ i == ring->head ? " (head)" : "", ++ i == ring->tail ? " (tail)" : ""); ++ skipped = 0; ++ } else { ++ if (!skipped) { ++ DRM_INFO(" ...\n"); ++ skipped = 1; ++ } ++ } ++ } ++ ++ DRM_INFO("\n"); ++ ++ if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) { ++ struct list_head *ptr; ++ u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]); ++ ++ list_for_each(ptr, &dev_priv->pending) { ++ drm_mach64_freelist_t *entry = ++ list_entry(ptr, drm_mach64_freelist_t, list); ++ struct drm_buf *buf = entry->buf; ++ ++ u32 buf_addr = GETBUFADDR(buf); ++ ++ if (buf_addr <= addr && addr < buf_addr + buf->used) ++ mach64_dump_buf_info(dev_priv, buf); ++ } ++ } ++ ++ DRM_INFO("\n"); ++ DRM_INFO(" BM_GUI_TABLE = 0x%08x\n", ++ MACH64_READ(MACH64_BM_GUI_TABLE)); ++ DRM_INFO("\n"); ++ DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n", ++ MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET)); ++ DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n", ++ MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR)); ++ DRM_INFO(" BM_COMMAND = 0x%08x\n", ++ MACH64_READ(MACH64_BM_COMMAND)); ++ DRM_INFO("\n"); ++ DRM_INFO(" BM_STATUS = 0x%08x\n", ++ MACH64_READ(MACH64_BM_STATUS)); ++ DRM_INFO(" BUS_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_BUS_CNTL)); ++ DRM_INFO(" FIFO_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_FIFO_STAT)); ++ DRM_INFO(" GUI_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_STAT)); ++ DRM_INFO(" SRC_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SRC_CNTL)); ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA descriptor ring macros */ ++/*@{*/ ++ ++/** ++ * Add the end mark to the ring's new tail position. ++ * ++ * The bus master engine will keep processing the DMA buffers listed in the ring ++ * until it finds this mark, making it stop. ++ * ++ * \sa mach64_clear_dma_eol ++ */ ++static __inline__ void mach64_set_dma_eol(volatile u32 *addr) ++{ ++#if defined(__i386__) ++ int nr = 31; ++ ++ /* Taken from include/asm-i386/bitops.h linux header */ ++ __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr) ++ :"Ir"(nr)); ++#elif defined(__powerpc__) ++ u32 old; ++ u32 mask = cpu_to_le32(MACH64_DMA_EOL); ++ ++ /* Taken from the include/asm-ppc/bitops.h linux header */ ++ __asm__ __volatile__("\n\ ++1: lwarx %0,0,%3 \n\ ++ or %0,%0,%2 \n\ ++ stwcx. %0,0,%3 \n\ ++ bne- 1b":"=&r"(old), "=m"(*addr) ++ :"r"(mask), "r"(addr), "m"(*addr) ++ :"cc"); ++#elif defined(__alpha__) ++ u32 temp; ++ u32 mask = MACH64_DMA_EOL; ++ ++ /* Taken from the include/asm-alpha/bitops.h linux header */ ++ __asm__ __volatile__("1: ldl_l %0,%3\n" ++ " bis %0,%2,%0\n" ++ " stl_c %0,%1\n" ++ " beq %0,2f\n" ++ ".subsection 2\n" ++ "2: br 1b\n" ++ ".previous":"=&r"(temp), "=m"(*addr) ++ :"Ir"(mask), "m"(*addr)); ++#else ++ u32 mask = cpu_to_le32(MACH64_DMA_EOL); ++ ++ *addr |= mask; ++#endif ++} ++ ++/** ++ * Remove the end mark from the ring's old tail position. ++ * ++ * It should be called after calling mach64_set_dma_eol to mark the ring's new ++ * tail position. ++ * ++ * We update the end marks while the bus master engine is in operation. Since ++ * the bus master engine may potentially be reading from the same position ++ * that we write, we must change atomically to avoid having intermediary bad ++ * data. ++ */ ++static __inline__ void mach64_clear_dma_eol(volatile u32 *addr) ++{ ++#if defined(__i386__) ++ int nr = 31; ++ ++ /* Taken from include/asm-i386/bitops.h linux header */ ++ __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr) ++ :"Ir"(nr)); ++#elif defined(__powerpc__) ++ u32 old; ++ u32 mask = cpu_to_le32(MACH64_DMA_EOL); ++ ++ /* Taken from the include/asm-ppc/bitops.h linux header */ ++ __asm__ __volatile__("\n\ ++1: lwarx %0,0,%3 \n\ ++ andc %0,%0,%2 \n\ ++ stwcx. %0,0,%3 \n\ ++ bne- 1b":"=&r"(old), "=m"(*addr) ++ :"r"(mask), "r"(addr), "m"(*addr) ++ :"cc"); ++#elif defined(__alpha__) ++ u32 temp; ++ u32 mask = ~MACH64_DMA_EOL; ++ ++ /* Taken from the include/asm-alpha/bitops.h linux header */ ++ __asm__ __volatile__("1: ldl_l %0,%3\n" ++ " and %0,%2,%0\n" ++ " stl_c %0,%1\n" ++ " beq %0,2f\n" ++ ".subsection 2\n" ++ "2: br 1b\n" ++ ".previous":"=&r"(temp), "=m"(*addr) ++ :"Ir"(mask), "m"(*addr)); ++#else ++ u32 mask = cpu_to_le32(~MACH64_DMA_EOL); ++ ++ *addr &= mask; ++#endif ++} ++ ++#define RING_LOCALS \ ++ int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring ++ ++#define RING_WRITE_OFS _ring_write ++ ++#define BEGIN_RING(n) \ ++ do { \ ++ if (MACH64_VERBOSE) { \ ++ DRM_INFO( "BEGIN_RING( %d ) \n", \ ++ (n) ); \ ++ } \ ++ if (dev_priv->ring.space <= (n) * sizeof(u32)) { \ ++ int ret; \ ++ if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \ ++ DRM_ERROR( "wait_ring failed, resetting engine\n"); \ ++ mach64_dump_engine_info( dev_priv ); \ ++ mach64_do_engine_reset( dev_priv ); \ ++ return ret; \ ++ } \ ++ } \ ++ dev_priv->ring.space -= (n) * sizeof(u32); \ ++ _ring = (u32 *) dev_priv->ring.start; \ ++ _ring_tail = _ring_write = dev_priv->ring.tail; \ ++ _ring_mask = dev_priv->ring.tail_mask; \ ++ } while (0) ++ ++#define OUT_RING( x ) \ ++do { \ ++ if (MACH64_VERBOSE) { \ ++ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ ++ (unsigned int)(x), _ring_write ); \ ++ } \ ++ _ring[_ring_write++] = cpu_to_le32( x ); \ ++ _ring_write &= _ring_mask; \ ++} while (0) ++ ++#define ADVANCE_RING() \ ++do { \ ++ if (MACH64_VERBOSE) { \ ++ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ ++ _ring_write, _ring_tail ); \ ++ } \ ++ DRM_MEMORYBARRIER(); \ ++ mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] ); \ ++ DRM_MEMORYBARRIER(); \ ++ dev_priv->ring.tail = _ring_write; \ ++ mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \ ++} while (0) ++ ++/** ++ * Queue a DMA buffer of registers writes into the ring buffer. ++ */ ++int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *entry) ++{ ++ int bytes, pages, remainder; ++ u32 address, page; ++ int i; ++ struct drm_buf *buf = entry->buf; ++ RING_LOCALS; ++ ++ bytes = buf->used; ++ address = GETBUFADDR( buf ); ++ pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; ++ ++ BEGIN_RING( pages * 4 ); ++ ++ for ( i = 0 ; i < pages-1 ; i++ ) { ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); ++ OUT_RING( page ); ++ OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); ++ OUT_RING( 0 ); ++ } ++ ++ /* generate the final descriptor for any remaining commands in this buffer */ ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ remainder = bytes - i * MACH64_DMA_CHUNKSIZE; ++ ++ /* Save dword offset of last descriptor for this buffer. ++ * This is needed to check for completion of the buffer in freelist_get ++ */ ++ entry->ring_ofs = RING_WRITE_OFS; ++ ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); ++ OUT_RING( page ); ++ OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); ++ OUT_RING( 0 ); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/** ++ * Queue DMA buffer controlling host data tranfers (e.g., blit). ++ * ++ * Almost identical to mach64_add_buf_to_ring. ++ */ ++int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *entry) ++{ ++ int bytes, pages, remainder; ++ u32 address, page; ++ int i; ++ struct drm_buf *buf = entry->buf; ++ RING_LOCALS; ++ ++ bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET; ++ pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; ++ address = GETBUFADDR( buf ); ++ ++ BEGIN_RING( 4 + pages * 4 ); ++ ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); ++ OUT_RING( address ); ++ OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET ); ++ OUT_RING( 0 ); ++ address += MACH64_HOSTDATA_BLIT_OFFSET; ++ ++ for ( i = 0 ; i < pages-1 ; i++ ) { ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); ++ OUT_RING( page ); ++ OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); ++ OUT_RING( 0 ); ++ } ++ ++ /* generate the final descriptor for any remaining commands in this buffer */ ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ remainder = bytes - i * MACH64_DMA_CHUNKSIZE; ++ ++ /* Save dword offset of last descriptor for this buffer. ++ * This is needed to check for completion of the buffer in freelist_get ++ */ ++ entry->ring_ofs = RING_WRITE_OFS; ++ ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); ++ OUT_RING( page ); ++ OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); ++ OUT_RING( 0 ); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA test and initialization */ ++/*@{*/ ++ ++/** ++ * Perform a simple DMA operation using the pattern registers to test whether ++ * DMA works. ++ * ++ * \return zero if successful. ++ * ++ * \note This function was the testbed for many experiences regarding Mach64 ++ * DMA operation. It is left here since it so tricky to get DMA operating ++ * properly in some architectures and hardware. ++ */ ++static int mach64_bm_dma_test(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_dma_handle_t *cpu_addr_dmah; ++ u32 data_addr; ++ u32 *table, *data; ++ u32 expected[2]; ++ u32 src_cntl, pat_reg0, pat_reg1; ++ int i, count, failed; ++ ++ DRM_DEBUG("\n"); ++ ++ table = (u32 *) dev_priv->ring.start; ++ ++ /* FIXME: get a dma buffer from the freelist here */ ++ DRM_DEBUG("Allocating data memory ...\n"); ++#ifdef __FreeBSD__ ++ DRM_UNLOCK(); ++#endif ++ cpu_addr_dmah = ++ drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful); ++#ifdef __FreeBSD__ ++ DRM_LOCK(); ++#endif ++ if (!cpu_addr_dmah) { ++ DRM_INFO("data-memory allocation failed!\n"); ++ return -ENOMEM; ++ } else { ++ data = (u32 *) cpu_addr_dmah->vaddr; ++ data_addr = (u32) cpu_addr_dmah->busaddr; ++ } ++ ++ /* Save the X server's value for SRC_CNTL and restore it ++ * in case our test fails. This prevents the X server ++ * from disabling it's cache for this register ++ */ ++ src_cntl = MACH64_READ(MACH64_SRC_CNTL); ++ pat_reg0 = MACH64_READ(MACH64_PAT_REG0); ++ pat_reg1 = MACH64_READ(MACH64_PAT_REG1); ++ ++ mach64_do_wait_for_fifo(dev_priv, 3); ++ ++ MACH64_WRITE(MACH64_SRC_CNTL, 0); ++ MACH64_WRITE(MACH64_PAT_REG0, 0x11111111); ++ MACH64_WRITE(MACH64_PAT_REG1, 0x11111111); ++ ++ mach64_do_wait_for_idle(dev_priv); ++ ++ for (i = 0; i < 2; i++) { ++ u32 reg; ++ reg = MACH64_READ((MACH64_PAT_REG0 + i * 4)); ++ DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg); ++ if (reg != 0x11111111) { ++ DRM_INFO("Error initializing test registers\n"); ++ DRM_INFO("resetting engine ...\n"); ++ mach64_do_engine_reset(dev_priv); ++ DRM_INFO("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ return -EIO; ++ } ++ } ++ ++ /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */ ++ count = 0; ++ ++ data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16)); ++ data[count++] = expected[0] = 0x22222222; ++ data[count++] = expected[1] = 0xaaaaaaaa; ++ ++ while (count < 1020) { ++ data[count++] = ++ cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16)); ++ data[count++] = 0x22222222; ++ data[count++] = 0xaaaaaaaa; ++ } ++ data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16)); ++ data[count++] = 0; ++ ++ DRM_DEBUG("Preparing table ...\n"); ++ table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR + ++ MACH64_APERTURE_OFFSET); ++ table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr); ++ table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32) ++ | MACH64_DMA_HOLD_OFFSET ++ | MACH64_DMA_EOL); ++ table[MACH64_DMA_RESERVED] = 0; ++ ++ DRM_DEBUG("table[0] = 0x%08x\n", table[0]); ++ DRM_DEBUG("table[1] = 0x%08x\n", table[1]); ++ DRM_DEBUG("table[2] = 0x%08x\n", table[2]); ++ DRM_DEBUG("table[3] = 0x%08x\n", table[3]); ++ ++ for (i = 0; i < 6; i++) { ++ DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]); ++ } ++ DRM_DEBUG(" ...\n"); ++ for (i = count - 5; i < count; i++) { ++ DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]); ++ } ++ ++ DRM_MEMORYBARRIER(); ++ ++ DRM_DEBUG("waiting for idle...\n"); ++ if ((i = mach64_do_wait_for_idle(dev_priv))) { ++ DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i); ++ DRM_INFO("resetting engine ...\n"); ++ mach64_do_engine_reset(dev_priv); ++ mach64_do_wait_for_fifo(dev_priv, 3); ++ MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); ++ MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); ++ MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); ++ DRM_INFO("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ return i; ++ } ++ DRM_DEBUG("waiting for idle...done\n"); ++ ++ DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL)); ++ DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL)); ++ DRM_DEBUG("\n"); ++ DRM_DEBUG("data bus addr = 0x%08x\n", data_addr); ++ DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr); ++ ++ DRM_DEBUG("starting DMA transfer...\n"); ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ MACH64_WRITE(MACH64_SRC_CNTL, ++ MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC | ++ MACH64_SRC_BM_OP_SYSTEM_TO_REG); ++ ++ /* Kick off the transfer */ ++ DRM_DEBUG("starting DMA transfer... done.\n"); ++ MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0); ++ ++ DRM_DEBUG("waiting for idle...\n"); ++ ++ if ((i = mach64_do_wait_for_idle(dev_priv))) { ++ /* engine locked up, dump register state and reset */ ++ DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i); ++ mach64_dump_engine_info(dev_priv); ++ DRM_INFO("resetting engine ...\n"); ++ mach64_do_engine_reset(dev_priv); ++ mach64_do_wait_for_fifo(dev_priv, 3); ++ MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); ++ MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); ++ MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); ++ DRM_INFO("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ return i; ++ } ++ ++ DRM_DEBUG("waiting for idle...done\n"); ++ ++ /* restore SRC_CNTL */ ++ mach64_do_wait_for_fifo(dev_priv, 1); ++ MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); ++ ++ failed = 0; ++ ++ /* Check register values to see if the GUI master operation succeeded */ ++ for (i = 0; i < 2; i++) { ++ u32 reg; ++ reg = MACH64_READ((MACH64_PAT_REG0 + i * 4)); ++ DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg); ++ if (reg != expected[i]) { ++ failed = -1; ++ } ++ } ++ ++ /* restore pattern registers */ ++ mach64_do_wait_for_fifo(dev_priv, 2); ++ MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); ++ MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); ++ ++ DRM_DEBUG("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ DRM_DEBUG("returning ...\n"); ++ ++ return failed; ++} ++ ++/** ++ * Called during the DMA initialization ioctl to initialize all the necessary ++ * software and hardware state for DMA operation. ++ */ ++static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) ++{ ++ drm_mach64_private_t *dev_priv; ++ u32 tmp; ++ int i, ret; ++ ++ DRM_DEBUG("\n"); ++ ++ dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_mach64_private_t)); ++ ++ dev_priv->is_pci = init->is_pci; ++ ++ dev_priv->fb_bpp = init->fb_bpp; ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ dev_priv->depth_bpp = init->depth_bpp; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) | ++ (dev_priv->front_offset >> 3)); ++ dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) | ++ (dev_priv->back_offset >> 3)); ++ dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) | ++ (dev_priv->depth_offset >> 3)); ++ ++ dev_priv->usec_timeout = 1000000; ++ ++ /* Set up the freelist, placeholder list and pending list */ ++ INIT_LIST_HEAD(&dev_priv->free_list); ++ INIT_LIST_HEAD(&dev_priv->placeholders); ++ INIT_LIST_HEAD(&dev_priv->pending); ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("can not find sarea!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ dev_priv->fb = drm_core_findmap(dev, init->fb_offset); ++ if (!dev_priv->fb) { ++ DRM_ERROR("can not find frame buffer map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio) { ++ DRM_ERROR("can not find mmio map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset); ++ if (!dev_priv->ring_map) { ++ DRM_ERROR("can not find ring map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->sarea_priv = (drm_mach64_sarea_t *) ++ ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); ++ ++ if (!dev_priv->is_pci) { ++ drm_core_ioremap(dev_priv->ring_map, dev); ++ if (!dev_priv->ring_map->handle) { ++ DRM_ERROR("can not ioremap virtual address for" ++ " descriptor ring\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -ENOMEM; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = ++ drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("can not find dma buffer map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ /* there might be a nicer way to do this - ++ dev isn't passed all the way though the mach64 - DA */ ++ dev_priv->dev_buffers = dev->agp_buffer_map; ++ ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev->agp_buffer_map->handle) { ++ DRM_ERROR("can not ioremap virtual address for" ++ " dma buffer\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -ENOMEM; ++ } ++ dev_priv->agp_textures = ++ drm_core_findmap(dev, init->agp_textures_offset); ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("can not find agp texture region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ } ++ ++ dev->dev_private = (void *)dev_priv; ++ ++ dev_priv->driver_mode = init->dma_mode; ++ ++ /* changing the FIFO size from the default causes problems with DMA */ ++ tmp = MACH64_READ(MACH64_GUI_CNTL); ++ if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) { ++ DRM_INFO("Setting FIFO size to 128 entries\n"); ++ /* FIFO must be empty to change the FIFO depth */ ++ if ((ret = mach64_do_wait_for_idle(dev_priv))) { ++ DRM_ERROR ++ ("wait for idle failed before changing FIFO depth!\n"); ++ mach64_do_cleanup_dma(dev); ++ return ret; ++ } ++ MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK) ++ | MACH64_CMDFIFO_SIZE_128)); ++ /* need to read GUI_STAT for proper sync according to docs */ ++ if ((ret = mach64_do_wait_for_idle(dev_priv))) { ++ DRM_ERROR ++ ("wait for idle failed when changing FIFO depth!\n"); ++ mach64_do_cleanup_dma(dev); ++ return ret; ++ } ++ } ++ ++ dev_priv->ring.size = 0x4000; /* 16KB */ ++ dev_priv->ring.start = dev_priv->ring_map->handle; ++ dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset; ++ ++ memset(dev_priv->ring.start, 0, dev_priv->ring.size); ++ DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n", ++ dev_priv->ring.start, dev_priv->ring.start_addr); ++ ++ ret = 0; ++ if (dev_priv->driver_mode != MACH64_MODE_MMIO) { ++ ++ /* enable block 1 registers and bus mastering */ ++ MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL) ++ | MACH64_BUS_EXT_REG_EN) ++ & ~MACH64_BUS_MASTER_DIS)); ++ ++ /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */ ++ DRM_DEBUG("Starting DMA test...\n"); ++ if ((ret = mach64_bm_dma_test(dev))) { ++ dev_priv->driver_mode = MACH64_MODE_MMIO; ++ } ++ } ++ ++ switch (dev_priv->driver_mode) { ++ case MACH64_MODE_MMIO: ++ MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL) ++ | MACH64_BUS_EXT_REG_EN ++ | MACH64_BUS_MASTER_DIS)); ++ if (init->dma_mode == MACH64_MODE_MMIO) ++ DRM_INFO("Forcing pseudo-DMA mode\n"); ++ else ++ DRM_INFO ++ ("DMA test failed (ret=%d), using pseudo-DMA mode\n", ++ ret); ++ break; ++ case MACH64_MODE_DMA_SYNC: ++ DRM_INFO("DMA test succeeded, using synchronous DMA mode\n"); ++ break; ++ case MACH64_MODE_DMA_ASYNC: ++ default: ++ DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n"); ++ } ++ ++ dev_priv->ring_running = 0; ++ ++ /* setup offsets for physical address of table start and end */ ++ dev_priv->ring.head_addr = dev_priv->ring.start_addr; ++ dev_priv->ring.head = dev_priv->ring.tail = 0; ++ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; ++ dev_priv->ring.space = dev_priv->ring.size; ++ ++ /* setup physical address and size of descriptor table */ ++ mach64_do_wait_for_fifo(dev_priv, 1); ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ (dev_priv->ring. ++ head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB)); ++ ++ /* init frame counter */ ++ dev_priv->sarea_priv->frames_queued = 0; ++ for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) { ++ dev_priv->frame_ofs[i] = ~0; /* All ones indicates placeholder */ ++ } ++ ++ /* Allocate the DMA buffer freelist */ ++ if ((ret = mach64_init_freelist(dev))) { ++ DRM_ERROR("Freelist allocation failed\n"); ++ mach64_do_cleanup_dma(dev); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++/*******************************************************************/ ++/** MMIO Pseudo-DMA (intended primarily for debugging, not performance) ++ */ ++ ++int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ volatile u32 *ring_read; ++ struct list_head *ptr; ++ drm_mach64_freelist_t *entry; ++ struct drm_buf *buf = NULL; ++ u32 *buf_ptr; ++ u32 used, reg, target; ++ int fifo, count, found, ret, no_idle_wait; ++ ++ fifo = count = reg = no_idle_wait = 0; ++ target = MACH64_BM_ADDR; ++ ++ if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) { ++ DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n"); ++ mach64_dump_engine_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return ret; ++ } ++ ++ ring_read = (u32 *) ring->start; ++ ++ while (ring->tail != ring->head) { ++ u32 buf_addr, new_target, offset; ++ u32 bytes, remaining, head, eol; ++ ++ head = ring->head; ++ ++ new_target = ++ le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET; ++ buf_addr = le32_to_cpu(ring_read[head++]); ++ eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL; ++ bytes = le32_to_cpu(ring_read[head++]) ++ & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL); ++ head++; ++ head &= ring->tail_mask; ++ ++ /* can't wait for idle between a blit setup descriptor ++ * and a HOSTDATA descriptor or the engine will lock ++ */ ++ if (new_target == MACH64_BM_HOSTDATA ++ && target == MACH64_BM_ADDR) ++ no_idle_wait = 1; ++ ++ target = new_target; ++ ++ found = 0; ++ offset = 0; ++ list_for_each(ptr, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ buf = entry->buf; ++ offset = buf_addr - GETBUFADDR(buf); ++ if (offset >= 0 && offset < MACH64_BUFFER_SIZE) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found || buf == NULL) { ++ DRM_ERROR ++ ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n", ++ head, ring->tail, buf_addr, (eol ? "eol" : "")); ++ mach64_dump_ring_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return -EINVAL; ++ } ++ ++ /* Hand feed the buffer to the card via MMIO, waiting for the fifo ++ * every 16 writes ++ */ ++ DRM_DEBUG("target: (0x%08x) %s\n", target, ++ (target == ++ MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR")); ++ DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes, ++ buf->used); ++ ++ remaining = (buf->used - offset) >> 2; /* dwords remaining in buffer */ ++ used = bytes >> 2; /* dwords in buffer for this descriptor */ ++ buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset); ++ ++ while (used) { ++ ++ if (count == 0) { ++ if (target == MACH64_BM_HOSTDATA) { ++ reg = DMAREG(MACH64_HOST_DATA0); ++ count = ++ (remaining > 16) ? 16 : remaining; ++ fifo = 0; ++ } else { ++ reg = le32_to_cpu(*buf_ptr++); ++ used--; ++ count = (reg >> 16) + 1; ++ } ++ ++ reg = reg & 0xffff; ++ reg = MMSELECT(reg); ++ } ++ while (count && used) { ++ if (!fifo) { ++ if (no_idle_wait) { ++ if ((ret = ++ mach64_do_wait_for_fifo ++ (dev_priv, 16)) < 0) { ++ no_idle_wait = 0; ++ return ret; ++ } ++ } else { ++ if ((ret = ++ mach64_do_wait_for_idle ++ (dev_priv)) < 0) { ++ return ret; ++ } ++ } ++ fifo = 16; ++ } ++ --fifo; ++ MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++)); ++ used--; ++ remaining--; ++ ++ reg += 4; ++ count--; ++ } ++ } ++ ring->head = head; ++ ring->head_addr = ring->start_addr + (ring->head * sizeof(u32)); ++ ring->space += (4 * sizeof(u32)); ++ } ++ ++ if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) { ++ return ret; ++ } ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ DRM_DEBUG("completed\n"); ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA cleanup */ ++/*@{*/ ++ ++int mach64_do_cleanup_dma(struct drm_device * dev) ++{ ++ DRM_DEBUG("\n"); ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv->is_pci) { ++ if (dev_priv->ring_map) ++ drm_core_ioremapfree(dev_priv->ring_map, dev); ++ ++ if (dev->agp_buffer_map) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ dev->agp_buffer_map = NULL; ++ } ++ } ++ ++ mach64_destroy_freelist(dev); ++ ++ drm_free(dev_priv, sizeof(drm_mach64_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ } ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name IOCTL handlers */ ++/*@{*/ ++ ++int mach64_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_init_t *init = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case DRM_MACH64_INIT_DMA: ++ return mach64_do_dma_init(dev, init); ++ case DRM_MACH64_CLEANUP_DMA: ++ return mach64_do_cleanup_dma(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++int mach64_dma_idle(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mach64_do_dma_idle(dev_priv); ++} ++ ++int mach64_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mach64_do_dma_flush(dev_priv); ++} ++ ++int mach64_engine_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mach64_do_engine_reset(dev_priv); ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name Freelist management */ ++/*@{*/ ++ ++int mach64_init_freelist(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_freelist_t *entry; ++ struct list_head *ptr; ++ int i; ++ ++ DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ if ((entry = ++ (drm_mach64_freelist_t *) ++ drm_alloc(sizeof(drm_mach64_freelist_t), ++ DRM_MEM_BUFLISTS)) == NULL) ++ return -ENOMEM; ++ memset(entry, 0, sizeof(drm_mach64_freelist_t)); ++ entry->buf = dma->buflist[i]; ++ ptr = &entry->list; ++ list_add_tail(ptr, &dev_priv->free_list); ++ } ++ ++ return 0; ++} ++ ++void mach64_destroy_freelist(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_freelist_t *entry; ++ struct list_head *ptr; ++ struct list_head *tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ list_for_each_safe(ptr, tmp, &dev_priv->pending) { ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); ++ } ++ list_for_each_safe(ptr, tmp, &dev_priv->placeholders) { ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); ++ } ++ ++ list_for_each_safe(ptr, tmp, &dev_priv->free_list) { ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); ++ } ++} ++ ++/* IMPORTANT: This function should only be called when the engine is idle or locked up, ++ * as it assumes all buffers in the pending list have been completed by the hardware. ++ */ ++int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv) ++{ ++ struct list_head *ptr; ++ struct list_head *tmp; ++ drm_mach64_freelist_t *entry; ++ int i; ++ ++ if (list_empty(&dev_priv->pending)) ++ return 0; ++ ++ /* Iterate the pending list and move all buffers into the freelist... */ ++ i = 0; ++ list_for_each_safe(ptr, tmp, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ if (entry->discard) { ++ entry->buf->pending = 0; ++ list_del(ptr); ++ list_add_tail(ptr, &dev_priv->free_list); ++ i++; ++ } ++ } ++ ++ DRM_DEBUG("released %d buffers from pending list\n", i); ++ ++ return 0; ++} ++ ++static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ struct list_head *ptr; ++ struct list_head *tmp; ++ drm_mach64_freelist_t *entry; ++ u32 head, tail, ofs; ++ ++ mach64_ring_tick(dev_priv, ring); ++ head = ring->head; ++ tail = ring->tail; ++ ++ if (head == tail) { ++#if MACH64_EXTRA_CHECKING ++ if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) { ++ DRM_ERROR("Empty ring with non-idle engine!\n"); ++ mach64_dump_ring_info(dev_priv); ++ return -1; ++ } ++#endif ++ /* last pass is complete, so release everything */ ++ mach64_do_release_used_buffers(dev_priv); ++ DRM_DEBUG("idle engine, freed all buffers.\n"); ++ if (list_empty(&dev_priv->free_list)) { ++ DRM_ERROR("Freelist empty with idle engine\n"); ++ return -1; ++ } ++ return 0; ++ } ++ /* Look for a completed buffer and bail out of the loop ++ * as soon as we find one -- don't waste time trying ++ * to free extra bufs here, leave that to do_release_used_buffers ++ */ ++ list_for_each_safe(ptr, tmp, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ ofs = entry->ring_ofs; ++ if (entry->discard && ++ ((head < tail && (ofs < head || ofs >= tail)) || ++ (head > tail && (ofs < head && ofs >= tail)))) { ++#if MACH64_EXTRA_CHECKING ++ int i; ++ ++ for (i = head; i != tail; i = (i + 4) & ring->tail_mask) ++ { ++ u32 o1 = le32_to_cpu(((u32 *) ring-> ++ start)[i + 1]); ++ u32 o2 = GETBUFADDR(entry->buf); ++ ++ if (o1 == o2) { ++ DRM_ERROR ++ ("Attempting to free used buffer: " ++ "i=%d buf=0x%08x\n", ++ i, o1); ++ mach64_dump_ring_info(dev_priv); ++ return -1; ++ } ++ } ++#endif ++ /* found a processed buffer */ ++ entry->buf->pending = 0; ++ list_del(ptr); ++ list_add_tail(ptr, &dev_priv->free_list); ++ DRM_DEBUG ++ ("freed processed buffer (head=%d tail=%d " ++ "buf ring ofs=%d).\n", ++ head, tail, ofs); ++ return 0; ++ } ++ } ++ ++ return 1; ++} ++ ++struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ drm_mach64_freelist_t *entry; ++ struct list_head *ptr; ++ int t; ++ ++ if (list_empty(&dev_priv->free_list)) { ++ if (list_empty(&dev_priv->pending)) { ++ DRM_ERROR ++ ("Couldn't get buffer - pending and free lists empty\n"); ++ t = 0; ++ list_for_each(ptr, &dev_priv->placeholders) { ++ t++; ++ } ++ DRM_INFO("Placeholders: %d\n", t); ++ return NULL; ++ } ++ ++ for (t = 0; t < dev_priv->usec_timeout; t++) { ++ int ret; ++ ++ ret = mach64_do_reclaim_completed(dev_priv); ++ if (ret == 0) ++ goto _freelist_entry_found; ++ if (ret < 0) ++ return NULL; ++ ++ DRM_UDELAY(1); ++ } ++ mach64_dump_ring_info(dev_priv); ++ DRM_ERROR ++ ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n", ++ ring->head_addr, ring->head, ring->tail); ++ return NULL; ++ } ++ ++ _freelist_entry_found: ++ ptr = dev_priv->free_list.next; ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ entry->buf->used = 0; ++ list_add_tail(ptr, &dev_priv->placeholders); ++ return entry->buf; ++} ++ ++int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf) ++{ ++ struct list_head *ptr; ++ drm_mach64_freelist_t *entry; ++ ++#if MACH64_EXTRA_CHECKING ++ list_for_each(ptr, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ if (copy_buf == entry->buf) { ++ DRM_ERROR("Trying to release a pending buf\n"); ++ return -EFAULT; ++ } ++ } ++#endif ++ ptr = dev_priv->placeholders.next; ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ copy_buf->pending = 0; ++ copy_buf->used = 0; ++ entry->buf = copy_buf; ++ entry->discard = 1; ++ list_del(ptr); ++ list_add_tail(ptr, &dev_priv->free_list); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA buffer request and submission IOCTL handler */ ++/*@{*/ ++ ++static int mach64_dma_get_buffers(struct drm_device *dev, ++ struct drm_file *file_priv, ++ struct drm_dma * d) ++{ ++ int i; ++ struct drm_buf *buf; ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = mach64_freelist_get(dev_priv); ++#if MACH64_EXTRA_CHECKING ++ if (!buf) ++ return -EFAULT; ++#else ++ if (!buf) ++ return -EAGAIN; ++#endif ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, ++ sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, ++ sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int mach64_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_dma *d = data; ++ int ret = 0; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ ret = -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = mach64_dma_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++void mach64_driver_lastclose(struct drm_device * dev) ++{ ++ mach64_do_cleanup_dma(dev); ++} ++ ++/*@}*/ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,256 @@ ++/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*- ++ * Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002 Frank C. Earl ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Frank C. Earl ++ * Leif Delgass ++ */ ++ ++#ifndef __MACH64_DRM_H__ ++#define __MACH64_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (mach64_sarea.h) ++ */ ++#ifndef __MACH64_SAREA_DEFINES__ ++#define __MACH64_SAREA_DEFINES__ ++ ++/* What needs to be changed for the current vertex buffer? ++ * GH: We're going to be pedantic about this. We want the card to do as ++ * little as possible, so let's avoid having it fetch a whole bunch of ++ * register values that don't change all that often, if at all. ++ */ ++#define MACH64_UPLOAD_DST_OFF_PITCH 0x0001 ++#define MACH64_UPLOAD_Z_OFF_PITCH 0x0002 ++#define MACH64_UPLOAD_Z_ALPHA_CNTL 0x0004 ++#define MACH64_UPLOAD_SCALE_3D_CNTL 0x0008 ++#define MACH64_UPLOAD_DP_FOG_CLR 0x0010 ++#define MACH64_UPLOAD_DP_WRITE_MASK 0x0020 ++#define MACH64_UPLOAD_DP_PIX_WIDTH 0x0040 ++#define MACH64_UPLOAD_SETUP_CNTL 0x0080 ++#define MACH64_UPLOAD_MISC 0x0100 ++#define MACH64_UPLOAD_TEXTURE 0x0200 ++#define MACH64_UPLOAD_TEX0IMAGE 0x0400 ++#define MACH64_UPLOAD_TEX1IMAGE 0x0800 ++#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */ ++#define MACH64_UPLOAD_CONTEXT 0x00ff ++#define MACH64_UPLOAD_ALL 0x1fff ++ ++/* DMA buffer size ++ */ ++#define MACH64_BUFFER_SIZE 16384 ++ ++/* Max number of swaps allowed on the ring ++ * before the client must wait ++ */ ++#define MACH64_MAX_QUEUED_FRAMES 3U ++ ++/* Byte offsets for host blit buffer data ++ */ ++#define MACH64_HOSTDATA_BLIT_OFFSET 104 ++ ++/* Keep these small for testing. ++ */ ++#define MACH64_NR_SAREA_CLIPRECTS 8 ++ ++#define MACH64_CARD_HEAP 0 ++#define MACH64_AGP_HEAP 1 ++#define MACH64_NR_TEX_HEAPS 2 ++#define MACH64_NR_TEX_REGIONS 64 ++#define MACH64_LOG_TEX_GRANULARITY 16 ++ ++#define MACH64_TEX_MAXLEVELS 1 ++ ++#define MACH64_NR_CONTEXT_REGS 15 ++#define MACH64_NR_TEXTURE_REGS 4 ++ ++#endif /* __MACH64_SAREA_DEFINES__ */ ++ ++typedef struct { ++ unsigned int dst_off_pitch; ++ ++ unsigned int z_off_pitch; ++ unsigned int z_cntl; ++ unsigned int alpha_tst_cntl; ++ ++ unsigned int scale_3d_cntl; ++ ++ unsigned int sc_left_right; ++ unsigned int sc_top_bottom; ++ ++ unsigned int dp_fog_clr; ++ unsigned int dp_write_mask; ++ unsigned int dp_pix_width; ++ unsigned int dp_mix; ++ unsigned int dp_src; ++ ++ unsigned int clr_cmp_cntl; ++ unsigned int gui_traj_cntl; ++ ++ unsigned int setup_cntl; ++ ++ unsigned int tex_size_pitch; ++ unsigned int tex_cntl; ++ unsigned int secondary_tex_off; ++ unsigned int tex_offset; ++} drm_mach64_context_regs_t; ++ ++typedef struct drm_mach64_sarea { ++ /* The channel for communication of state information to the kernel ++ * on firing a vertex dma buffer. ++ */ ++ drm_mach64_context_regs_t context_state; ++ unsigned int dirty; ++ unsigned int vertsize; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Counters for client-side throttling of rendering clients. ++ */ ++ unsigned int frames_queued; ++ ++ /* Texture memory LRU. ++ */ ++ struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS + ++ 1]; ++ unsigned int tex_age[MACH64_NR_TEX_HEAPS]; ++ int ctx_owner; ++} drm_mach64_sarea_t; ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (mach64_common.h) ++ */ ++ ++/* Mach64 specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++ ++#define DRM_MACH64_INIT 0x00 ++#define DRM_MACH64_IDLE 0x01 ++#define DRM_MACH64_RESET 0x02 ++#define DRM_MACH64_SWAP 0x03 ++#define DRM_MACH64_CLEAR 0x04 ++#define DRM_MACH64_VERTEX 0x05 ++#define DRM_MACH64_BLIT 0x06 ++#define DRM_MACH64_FLUSH 0x07 ++#define DRM_MACH64_GETPARAM 0x08 ++ ++#define DRM_IOCTL_MACH64_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t) ++#define DRM_IOCTL_MACH64_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_IDLE ) ++#define DRM_IOCTL_MACH64_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_RESET ) ++#define DRM_IOCTL_MACH64_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_SWAP ) ++#define DRM_IOCTL_MACH64_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t) ++#define DRM_IOCTL_MACH64_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t) ++#define DRM_IOCTL_MACH64_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t) ++#define DRM_IOCTL_MACH64_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_FLUSH ) ++#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t) ++ ++/* Buffer flags for clears ++ */ ++#define MACH64_FRONT 0x1 ++#define MACH64_BACK 0x2 ++#define MACH64_DEPTH 0x4 ++ ++/* Primitive types for vertex buffers ++ */ ++#define MACH64_PRIM_POINTS 0x00000000 ++#define MACH64_PRIM_LINES 0x00000001 ++#define MACH64_PRIM_LINE_LOOP 0x00000002 ++#define MACH64_PRIM_LINE_STRIP 0x00000003 ++#define MACH64_PRIM_TRIANGLES 0x00000004 ++#define MACH64_PRIM_TRIANGLE_STRIP 0x00000005 ++#define MACH64_PRIM_TRIANGLE_FAN 0x00000006 ++#define MACH64_PRIM_QUADS 0x00000007 ++#define MACH64_PRIM_QUAD_STRIP 0x00000008 ++#define MACH64_PRIM_POLYGON 0x00000009 ++ ++typedef enum _drm_mach64_dma_mode_t { ++ MACH64_MODE_DMA_ASYNC, ++ MACH64_MODE_DMA_SYNC, ++ MACH64_MODE_MMIO ++} drm_mach64_dma_mode_t; ++ ++typedef struct drm_mach64_init { ++ enum { ++ DRM_MACH64_INIT_DMA = 0x01, ++ DRM_MACH64_CLEANUP_DMA = 0x02 ++ } func; ++ ++ unsigned long sarea_priv_offset; ++ int is_pci; ++ drm_mach64_dma_mode_t dma_mode; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ unsigned long fb_offset; ++ unsigned long mmio_offset; ++ unsigned long ring_offset; ++ unsigned long buffers_offset; ++ unsigned long agp_textures_offset; ++} drm_mach64_init_t; ++ ++typedef struct drm_mach64_clear { ++ unsigned int flags; ++ int x, y, w, h; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++} drm_mach64_clear_t; ++ ++typedef struct drm_mach64_vertex { ++ int prim; ++ void *buf; /* Address of vertex buffer */ ++ unsigned long used; /* Number of bytes in buffer */ ++ int discard; /* Client finished with buffer? */ ++} drm_mach64_vertex_t; ++ ++typedef struct drm_mach64_blit { ++ void *buf; ++ int pitch; ++ int offset; ++ int format; ++ unsigned short x, y; ++ unsigned short width, height; ++} drm_mach64_blit_t; ++ ++typedef struct drm_mach64_getparam { ++ enum { ++ MACH64_PARAM_FRAMES_QUEUED = 0x01, ++ MACH64_PARAM_IRQ_NR = 0x02 ++ } param; ++ void *value; ++} drm_mach64_getparam_t; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,105 @@ ++/* mach64_drv.c -- mach64 (Rage Pro) driver -*- linux-c -*- ++ * Created: Fri Nov 24 18:34:32 2000 by gareth@valinux.com ++ * ++ * Copyright 2000 Gareth Hughes ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * GARETH HUGHES BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Leif Delgass ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ mach64_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA ++ | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .lastclose = mach64_driver_lastclose, ++ .get_vblank_counter = mach64_get_vblank_counter, ++ .enable_vblank = mach64_enable_vblank, ++ .disable_vblank = mach64_disable_vblank, ++ .irq_preinstall = mach64_driver_irq_preinstall, ++ .irq_postinstall = mach64_driver_irq_postinstall, ++ .irq_uninstall = mach64_driver_irq_uninstall, ++ .irq_handler = mach64_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = mach64_ioctls, ++ .dma_ioctl = mach64_dma_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init mach64_init(void) ++{ ++ driver.num_ioctls = mach64_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit mach64_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(mach64_init); ++module_exit(mach64_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,859 @@ ++/* mach64_drv.h -- Private header for mach64 driver -*- linux-c -*- ++ * Created: Fri Nov 24 22:07:58 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002 Frank C. Earl ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Frank C. Earl ++ * Leif Delgass ++ * José Fonseca ++ */ ++ ++#ifndef __MACH64_DRV_H__ ++#define __MACH64_DRV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca" ++ ++#define DRIVER_NAME "mach64" ++#define DRIVER_DESC "DRM module for the ATI Rage Pro" ++#define DRIVER_DATE "20060718" ++ ++#define DRIVER_MAJOR 2 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 0 ++ ++/* FIXME: remove these when not needed */ ++/* Development driver options */ ++#define MACH64_EXTRA_CHECKING 0 /* Extra sanity checks for DMA/freelist management */ ++#define MACH64_VERBOSE 0 /* Verbose debugging output */ ++ ++typedef struct drm_mach64_freelist { ++ struct list_head list; /* List pointers for free_list, placeholders, or pending list */ ++ struct drm_buf *buf; /* Pointer to the buffer */ ++ int discard; /* This flag is set when we're done (re)using a buffer */ ++ u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */ ++} drm_mach64_freelist_t; ++ ++typedef struct drm_mach64_descriptor_ring { ++ void *start; /* write pointer (cpu address) to start of descriptor ring */ ++ u32 start_addr; /* bus address of beginning of descriptor ring */ ++ int size; /* size of ring in bytes */ ++ ++ u32 head_addr; /* bus address of descriptor ring head */ ++ u32 head; /* dword offset of descriptor ring head */ ++ u32 tail; /* dword offset of descriptor ring tail */ ++ u32 tail_mask; /* mask used to wrap ring */ ++ int space; /* number of free bytes in ring */ ++} drm_mach64_descriptor_ring_t; ++ ++typedef struct drm_mach64_private { ++ drm_mach64_sarea_t *sarea_priv; ++ ++ int is_pci; ++ drm_mach64_dma_mode_t driver_mode; /* Async DMA, sync DMA, or MMIO */ ++ ++ int usec_timeout; /* Timeout for the wait functions */ ++ ++ drm_mach64_descriptor_ring_t ring; /* DMA descriptor table (ring buffer) */ ++ int ring_running; /* Is bus mastering is enabled */ ++ ++ struct list_head free_list; /* Free-list head */ ++ struct list_head placeholders; /* Placeholder list for buffers held by clients */ ++ struct list_head pending; /* Buffers pending completion */ ++ ++ u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES]; /* dword ring offsets of most recent frame swaps */ ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ atomic_t vbl_received; /**< Number of vblanks received. */ ++ ++ u32 front_offset_pitch; ++ u32 back_offset_pitch; ++ u32 depth_offset_pitch; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *fb; ++ drm_local_map_t *mmio; ++ drm_local_map_t *ring_map; ++ drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */ ++ drm_local_map_t *agp_textures; ++} drm_mach64_private_t; ++ ++extern struct drm_ioctl_desc mach64_ioctls[]; ++extern int mach64_max_ioctl; ++ ++ /* mach64_dma.c */ ++extern int mach64_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_idle(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_engine_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern void mach64_driver_lastclose(struct drm_device * dev); ++ ++extern int mach64_init_freelist(struct drm_device * dev); ++extern void mach64_destroy_freelist(struct drm_device * dev); ++extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv); ++extern int mach64_freelist_put(drm_mach64_private_t * dev_priv, ++ struct drm_buf * copy_buf); ++ ++extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, ++ int entries); ++extern int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv); ++extern int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n); ++extern int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv); ++extern int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv); ++extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv); ++extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv); ++extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv); ++ ++extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *_entry); ++extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *_entry); ++ ++extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv); ++extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv); ++extern int mach64_do_cleanup_dma(struct drm_device * dev); ++ ++ /* mach64_state.c */ ++extern int mach64_dma_clear(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_vertex(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_blit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_get_param(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int mach64_enable_vblank(struct drm_device *dev, int crtc); ++extern void mach64_disable_vblank(struct drm_device *dev, int crtc); ++extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS); ++extern void mach64_driver_irq_preinstall(struct drm_device *dev); ++extern int mach64_driver_irq_postinstall(struct drm_device *dev); ++extern void mach64_driver_irq_uninstall(struct drm_device *dev); ++ ++/* ================================================================ ++ * Registers ++ */ ++ ++#define MACH64_AGP_BASE 0x0148 ++#define MACH64_AGP_CNTL 0x014c ++#define MACH64_ALPHA_TST_CNTL 0x0550 ++ ++#define MACH64_DSP_CONFIG 0x0420 ++#define MACH64_DSP_ON_OFF 0x0424 ++#define MACH64_EXT_MEM_CNTL 0x04ac ++#define MACH64_GEN_TEST_CNTL 0x04d0 ++#define MACH64_HW_DEBUG 0x047c ++#define MACH64_MEM_ADDR_CONFIG 0x0434 ++#define MACH64_MEM_BUF_CNTL 0x042c ++#define MACH64_MEM_CNTL 0x04b0 ++ ++#define MACH64_BM_ADDR 0x0648 ++#define MACH64_BM_COMMAND 0x0188 ++#define MACH64_BM_DATA 0x0648 ++#define MACH64_BM_FRAME_BUF_OFFSET 0x0180 ++#define MACH64_BM_GUI_TABLE 0x01b8 ++#define MACH64_BM_GUI_TABLE_CMD 0x064c ++# define MACH64_CIRCULAR_BUF_SIZE_16KB (0 << 0) ++# define MACH64_CIRCULAR_BUF_SIZE_32KB (1 << 0) ++# define MACH64_CIRCULAR_BUF_SIZE_64KB (2 << 0) ++# define MACH64_CIRCULAR_BUF_SIZE_128KB (3 << 0) ++# define MACH64_LAST_DESCRIPTOR (1 << 31) ++#define MACH64_BM_HOSTDATA 0x0644 ++#define MACH64_BM_STATUS 0x018c ++#define MACH64_BM_SYSTEM_MEM_ADDR 0x0184 ++#define MACH64_BM_SYSTEM_TABLE 0x01bc ++#define MACH64_BUS_CNTL 0x04a0 ++# define MACH64_BUS_MSTR_RESET (1 << 1) ++# define MACH64_BUS_APER_REG_DIS (1 << 4) ++# define MACH64_BUS_FLUSH_BUF (1 << 2) ++# define MACH64_BUS_MASTER_DIS (1 << 6) ++# define MACH64_BUS_EXT_REG_EN (1 << 27) ++ ++#define MACH64_CLR_CMP_CLR 0x0700 ++#define MACH64_CLR_CMP_CNTL 0x0708 ++#define MACH64_CLR_CMP_MASK 0x0704 ++#define MACH64_CONFIG_CHIP_ID 0x04e0 ++#define MACH64_CONFIG_CNTL 0x04dc ++#define MACH64_CONFIG_STAT0 0x04e4 ++#define MACH64_CONFIG_STAT1 0x0494 ++#define MACH64_CONFIG_STAT2 0x0498 ++#define MACH64_CONTEXT_LOAD_CNTL 0x072c ++#define MACH64_CONTEXT_MASK 0x0720 ++#define MACH64_COMPOSITE_SHADOW_ID 0x0798 ++#define MACH64_CRC_SIG 0x04e8 ++#define MACH64_CUSTOM_MACRO_CNTL 0x04d4 ++ ++#define MACH64_DP_BKGD_CLR 0x06c0 ++#define MACH64_DP_FOG_CLR 0x06c4 ++#define MACH64_DP_FGRD_BKGD_CLR 0x06e0 ++#define MACH64_DP_FRGD_CLR 0x06c4 ++#define MACH64_DP_FGRD_CLR_MIX 0x06dc ++ ++#define MACH64_DP_MIX 0x06d4 ++# define BKGD_MIX_NOT_D (0 << 0) ++# define BKGD_MIX_ZERO (1 << 0) ++# define BKGD_MIX_ONE (2 << 0) ++# define MACH64_BKGD_MIX_D (3 << 0) ++# define BKGD_MIX_NOT_S (4 << 0) ++# define BKGD_MIX_D_XOR_S (5 << 0) ++# define BKGD_MIX_NOT_D_XOR_S (6 << 0) ++# define MACH64_BKGD_MIX_S (7 << 0) ++# define BKGD_MIX_NOT_D_OR_NOT_S (8 << 0) ++# define BKGD_MIX_D_OR_NOT_S (9 << 0) ++# define BKGD_MIX_NOT_D_OR_S (10 << 0) ++# define BKGD_MIX_D_OR_S (11 << 0) ++# define BKGD_MIX_D_AND_S (12 << 0) ++# define BKGD_MIX_NOT_D_AND_S (13 << 0) ++# define BKGD_MIX_D_AND_NOT_S (14 << 0) ++# define BKGD_MIX_NOT_D_AND_NOT_S (15 << 0) ++# define BKGD_MIX_D_PLUS_S_DIV2 (23 << 0) ++# define FRGD_MIX_NOT_D (0 << 16) ++# define FRGD_MIX_ZERO (1 << 16) ++# define FRGD_MIX_ONE (2 << 16) ++# define FRGD_MIX_D (3 << 16) ++# define FRGD_MIX_NOT_S (4 << 16) ++# define FRGD_MIX_D_XOR_S (5 << 16) ++# define FRGD_MIX_NOT_D_XOR_S (6 << 16) ++# define MACH64_FRGD_MIX_S (7 << 16) ++# define FRGD_MIX_NOT_D_OR_NOT_S (8 << 16) ++# define FRGD_MIX_D_OR_NOT_S (9 << 16) ++# define FRGD_MIX_NOT_D_OR_S (10 << 16) ++# define FRGD_MIX_D_OR_S (11 << 16) ++# define FRGD_MIX_D_AND_S (12 << 16) ++# define FRGD_MIX_NOT_D_AND_S (13 << 16) ++# define FRGD_MIX_D_AND_NOT_S (14 << 16) ++# define FRGD_MIX_NOT_D_AND_NOT_S (15 << 16) ++# define FRGD_MIX_D_PLUS_S_DIV2 (23 << 16) ++ ++#define MACH64_DP_PIX_WIDTH 0x06d0 ++# define MACH64_HOST_TRIPLE_ENABLE (1 << 13) ++# define MACH64_BYTE_ORDER_MSB_TO_LSB (0 << 24) ++# define MACH64_BYTE_ORDER_LSB_TO_MSB (1 << 24) ++ ++#define MACH64_DP_SRC 0x06d8 ++# define MACH64_BKGD_SRC_BKGD_CLR (0 << 0) ++# define MACH64_BKGD_SRC_FRGD_CLR (1 << 0) ++# define MACH64_BKGD_SRC_HOST (2 << 0) ++# define MACH64_BKGD_SRC_BLIT (3 << 0) ++# define MACH64_BKGD_SRC_PATTERN (4 << 0) ++# define MACH64_BKGD_SRC_3D (5 << 0) ++# define MACH64_FRGD_SRC_BKGD_CLR (0 << 8) ++# define MACH64_FRGD_SRC_FRGD_CLR (1 << 8) ++# define MACH64_FRGD_SRC_HOST (2 << 8) ++# define MACH64_FRGD_SRC_BLIT (3 << 8) ++# define MACH64_FRGD_SRC_PATTERN (4 << 8) ++# define MACH64_FRGD_SRC_3D (5 << 8) ++# define MACH64_MONO_SRC_ONE (0 << 16) ++# define MACH64_MONO_SRC_PATTERN (1 << 16) ++# define MACH64_MONO_SRC_HOST (2 << 16) ++# define MACH64_MONO_SRC_BLIT (3 << 16) ++ ++#define MACH64_DP_WRITE_MASK 0x06c8 ++ ++#define MACH64_DST_CNTL 0x0530 ++# define MACH64_DST_X_RIGHT_TO_LEFT (0 << 0) ++# define MACH64_DST_X_LEFT_TO_RIGHT (1 << 0) ++# define MACH64_DST_Y_BOTTOM_TO_TOP (0 << 1) ++# define MACH64_DST_Y_TOP_TO_BOTTOM (1 << 1) ++# define MACH64_DST_X_MAJOR (0 << 2) ++# define MACH64_DST_Y_MAJOR (1 << 2) ++# define MACH64_DST_X_TILE (1 << 3) ++# define MACH64_DST_Y_TILE (1 << 4) ++# define MACH64_DST_LAST_PEL (1 << 5) ++# define MACH64_DST_POLYGON_ENABLE (1 << 6) ++# define MACH64_DST_24_ROTATION_ENABLE (1 << 7) ++ ++#define MACH64_DST_HEIGHT_WIDTH 0x0518 ++#define MACH64_DST_OFF_PITCH 0x0500 ++#define MACH64_DST_WIDTH_HEIGHT 0x06ec ++#define MACH64_DST_X_Y 0x06e8 ++#define MACH64_DST_Y_X 0x050c ++ ++#define MACH64_FIFO_STAT 0x0710 ++# define MACH64_FIFO_SLOT_MASK 0x0000ffff ++# define MACH64_FIFO_ERR (1 << 31) ++ ++#define MACH64_GEN_TEST_CNTL 0x04d0 ++# define MACH64_GUI_ENGINE_ENABLE (1 << 8) ++#define MACH64_GUI_CMDFIFO_DEBUG 0x0170 ++#define MACH64_GUI_CMDFIFO_DATA 0x0174 ++#define MACH64_GUI_CNTL 0x0178 ++# define MACH64_CMDFIFO_SIZE_MASK 0x00000003ul ++# define MACH64_CMDFIFO_SIZE_192 0x00000000ul ++# define MACH64_CMDFIFO_SIZE_128 0x00000001ul ++# define MACH64_CMDFIFO_SIZE_64 0x00000002ul ++#define MACH64_GUI_STAT 0x0738 ++# define MACH64_GUI_ACTIVE (1 << 0) ++#define MACH64_GUI_TRAJ_CNTL 0x0730 ++ ++#define MACH64_HOST_CNTL 0x0640 ++#define MACH64_HOST_DATA0 0x0600 ++ ++#define MACH64_ONE_OVER_AREA 0x029c ++#define MACH64_ONE_OVER_AREA_UC 0x0300 ++ ++#define MACH64_PAT_REG0 0x0680 ++#define MACH64_PAT_REG1 0x0684 ++ ++#define MACH64_SC_LEFT 0x06a0 ++#define MACH64_SC_RIGHT 0x06a4 ++#define MACH64_SC_LEFT_RIGHT 0x06a8 ++#define MACH64_SC_TOP 0x06ac ++#define MACH64_SC_BOTTOM 0x06b0 ++#define MACH64_SC_TOP_BOTTOM 0x06b4 ++ ++#define MACH64_SCALE_3D_CNTL 0x05fc ++#define MACH64_SCRATCH_REG0 0x0480 ++#define MACH64_SCRATCH_REG1 0x0484 ++#define MACH64_SECONDARY_TEX_OFF 0x0778 ++#define MACH64_SETUP_CNTL 0x0304 ++#define MACH64_SRC_CNTL 0x05b4 ++# define MACH64_SRC_BM_ENABLE (1 << 8) ++# define MACH64_SRC_BM_SYNC (1 << 9) ++# define MACH64_SRC_BM_OP_FRAME_TO_SYSTEM (0 << 10) ++# define MACH64_SRC_BM_OP_SYSTEM_TO_FRAME (1 << 10) ++# define MACH64_SRC_BM_OP_REG_TO_SYSTEM (2 << 10) ++# define MACH64_SRC_BM_OP_SYSTEM_TO_REG (3 << 10) ++#define MACH64_SRC_HEIGHT1 0x0594 ++#define MACH64_SRC_HEIGHT2 0x05ac ++#define MACH64_SRC_HEIGHT1_WIDTH1 0x0598 ++#define MACH64_SRC_HEIGHT2_WIDTH2 0x05b0 ++#define MACH64_SRC_OFF_PITCH 0x0580 ++#define MACH64_SRC_WIDTH1 0x0590 ++#define MACH64_SRC_Y_X 0x058c ++ ++#define MACH64_TEX_0_OFF 0x05c0 ++#define MACH64_TEX_CNTL 0x0774 ++#define MACH64_TEX_SIZE_PITCH 0x0770 ++#define MACH64_TIMER_CONFIG 0x0428 ++ ++#define MACH64_VERTEX_1_ARGB 0x0254 ++#define MACH64_VERTEX_1_S 0x0240 ++#define MACH64_VERTEX_1_SECONDARY_S 0x0328 ++#define MACH64_VERTEX_1_SECONDARY_T 0x032c ++#define MACH64_VERTEX_1_SECONDARY_W 0x0330 ++#define MACH64_VERTEX_1_SPEC_ARGB 0x024c ++#define MACH64_VERTEX_1_T 0x0244 ++#define MACH64_VERTEX_1_W 0x0248 ++#define MACH64_VERTEX_1_X_Y 0x0258 ++#define MACH64_VERTEX_1_Z 0x0250 ++#define MACH64_VERTEX_2_ARGB 0x0274 ++#define MACH64_VERTEX_2_S 0x0260 ++#define MACH64_VERTEX_2_SECONDARY_S 0x0334 ++#define MACH64_VERTEX_2_SECONDARY_T 0x0338 ++#define MACH64_VERTEX_2_SECONDARY_W 0x033c ++#define MACH64_VERTEX_2_SPEC_ARGB 0x026c ++#define MACH64_VERTEX_2_T 0x0264 ++#define MACH64_VERTEX_2_W 0x0268 ++#define MACH64_VERTEX_2_X_Y 0x0278 ++#define MACH64_VERTEX_2_Z 0x0270 ++#define MACH64_VERTEX_3_ARGB 0x0294 ++#define MACH64_VERTEX_3_S 0x0280 ++#define MACH64_VERTEX_3_SECONDARY_S 0x02a0 ++#define MACH64_VERTEX_3_SECONDARY_T 0x02a4 ++#define MACH64_VERTEX_3_SECONDARY_W 0x02a8 ++#define MACH64_VERTEX_3_SPEC_ARGB 0x028c ++#define MACH64_VERTEX_3_T 0x0284 ++#define MACH64_VERTEX_3_W 0x0288 ++#define MACH64_VERTEX_3_X_Y 0x0298 ++#define MACH64_VERTEX_3_Z 0x0290 ++ ++#define MACH64_Z_CNTL 0x054c ++#define MACH64_Z_OFF_PITCH 0x0548 ++ ++#define MACH64_CRTC_VLINE_CRNT_VLINE 0x0410 ++# define MACH64_CRTC_VLINE_MASK 0x000007ff ++# define MACH64_CRTC_CRNT_VLINE_MASK 0x07ff0000 ++#define MACH64_CRTC_OFF_PITCH 0x0414 ++#define MACH64_CRTC_INT_CNTL 0x0418 ++# define MACH64_CRTC_VBLANK (1 << 0) ++# define MACH64_CRTC_VBLANK_INT_EN (1 << 1) ++# define MACH64_CRTC_VBLANK_INT (1 << 2) ++# define MACH64_CRTC_VLINE_INT_EN (1 << 3) ++# define MACH64_CRTC_VLINE_INT (1 << 4) ++# define MACH64_CRTC_VLINE_SYNC (1 << 5) /* 0=even, 1=odd */ ++# define MACH64_CRTC_FRAME (1 << 6) /* 0=even, 1=odd */ ++# define MACH64_CRTC_SNAPSHOT_INT_EN (1 << 7) ++# define MACH64_CRTC_SNAPSHOT_INT (1 << 8) ++# define MACH64_CRTC_I2C_INT_EN (1 << 9) ++# define MACH64_CRTC_I2C_INT (1 << 10) ++# define MACH64_CRTC2_VBLANK (1 << 11) /* LT Pro */ ++# define MACH64_CRTC2_VBLANK_INT_EN (1 << 12) /* LT Pro */ ++# define MACH64_CRTC2_VBLANK_INT (1 << 13) /* LT Pro */ ++# define MACH64_CRTC2_VLINE_INT_EN (1 << 14) /* LT Pro */ ++# define MACH64_CRTC2_VLINE_INT (1 << 15) /* LT Pro */ ++# define MACH64_CRTC_CAPBUF0_INT_EN (1 << 16) ++# define MACH64_CRTC_CAPBUF0_INT (1 << 17) ++# define MACH64_CRTC_CAPBUF1_INT_EN (1 << 18) ++# define MACH64_CRTC_CAPBUF1_INT (1 << 19) ++# define MACH64_CRTC_OVERLAY_EOF_INT_EN (1 << 20) ++# define MACH64_CRTC_OVERLAY_EOF_INT (1 << 21) ++# define MACH64_CRTC_ONESHOT_CAP_INT_EN (1 << 22) ++# define MACH64_CRTC_ONESHOT_CAP_INT (1 << 23) ++# define MACH64_CRTC_BUSMASTER_EOL_INT_EN (1 << 24) ++# define MACH64_CRTC_BUSMASTER_EOL_INT (1 << 25) ++# define MACH64_CRTC_GP_INT_EN (1 << 26) ++# define MACH64_CRTC_GP_INT (1 << 27) ++# define MACH64_CRTC2_VLINE_SYNC (1 << 28) /* LT Pro */ /* 0=even, 1=odd */ ++# define MACH64_CRTC_SNAPSHOT2_INT_EN (1 << 29) /* LT Pro */ ++# define MACH64_CRTC_SNAPSHOT2_INT (1 << 30) /* LT Pro */ ++# define MACH64_CRTC_VBLANK2_INT (1 << 31) ++# define MACH64_CRTC_INT_ENS \ ++ ( \ ++ MACH64_CRTC_VBLANK_INT_EN | \ ++ MACH64_CRTC_VLINE_INT_EN | \ ++ MACH64_CRTC_SNAPSHOT_INT_EN | \ ++ MACH64_CRTC_I2C_INT_EN | \ ++ MACH64_CRTC2_VBLANK_INT_EN | \ ++ MACH64_CRTC2_VLINE_INT_EN | \ ++ MACH64_CRTC_CAPBUF0_INT_EN | \ ++ MACH64_CRTC_CAPBUF1_INT_EN | \ ++ MACH64_CRTC_OVERLAY_EOF_INT_EN | \ ++ MACH64_CRTC_ONESHOT_CAP_INT_EN | \ ++ MACH64_CRTC_BUSMASTER_EOL_INT_EN | \ ++ MACH64_CRTC_GP_INT_EN | \ ++ MACH64_CRTC_SNAPSHOT2_INT_EN | \ ++ 0 \ ++ ) ++# define MACH64_CRTC_INT_ACKS \ ++ ( \ ++ MACH64_CRTC_VBLANK_INT | \ ++ MACH64_CRTC_VLINE_INT | \ ++ MACH64_CRTC_SNAPSHOT_INT | \ ++ MACH64_CRTC_I2C_INT | \ ++ MACH64_CRTC2_VBLANK_INT | \ ++ MACH64_CRTC2_VLINE_INT | \ ++ MACH64_CRTC_CAPBUF0_INT | \ ++ MACH64_CRTC_CAPBUF1_INT | \ ++ MACH64_CRTC_OVERLAY_EOF_INT | \ ++ MACH64_CRTC_ONESHOT_CAP_INT | \ ++ MACH64_CRTC_BUSMASTER_EOL_INT | \ ++ MACH64_CRTC_GP_INT | \ ++ MACH64_CRTC_SNAPSHOT2_INT | \ ++ MACH64_CRTC_VBLANK2_INT | \ ++ 0 \ ++ ) ++ ++#define MACH64_DATATYPE_CI8 2 ++#define MACH64_DATATYPE_ARGB1555 3 ++#define MACH64_DATATYPE_RGB565 4 ++#define MACH64_DATATYPE_ARGB8888 6 ++#define MACH64_DATATYPE_RGB332 7 ++#define MACH64_DATATYPE_Y8 8 ++#define MACH64_DATATYPE_RGB8 9 ++#define MACH64_DATATYPE_VYUY422 11 ++#define MACH64_DATATYPE_YVYU422 12 ++#define MACH64_DATATYPE_AYUV444 14 ++#define MACH64_DATATYPE_ARGB4444 15 ++ ++#define MACH64_READ(reg) DRM_READ32(dev_priv->mmio, (reg) ) ++#define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) ) ++ ++#define DWMREG0 0x0400 ++#define DWMREG0_END 0x07ff ++#define DWMREG1 0x0000 ++#define DWMREG1_END 0x03ff ++ ++#define ISREG0(r) (((r) >= DWMREG0) && ((r) <= DWMREG0_END)) ++#define DMAREG0(r) (((r) - DWMREG0) >> 2) ++#define DMAREG1(r) ((((r) - DWMREG1) >> 2 ) | 0x0100) ++#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r)) ++ ++#define MMREG0 0x0000 ++#define MMREG0_END 0x00ff ++ ++#define ISMMREG0(r) (((r) >= MMREG0) && ((r) <= MMREG0_END)) ++#define MMSELECT0(r) (((r) << 2) + DWMREG0) ++#define MMSELECT1(r) (((((r) & 0xff) << 2) + DWMREG1)) ++#define MMSELECT(r) (ISMMREG0(r) ? MMSELECT0(r) : MMSELECT1(r)) ++ ++/* ================================================================ ++ * DMA constants ++ */ ++ ++/* DMA descriptor field indices: ++ * The descriptor fields are loaded into the read-only ++ * BM_* system bus master registers during a bus-master operation ++ */ ++#define MACH64_DMA_FRAME_BUF_OFFSET 0 /* BM_FRAME_BUF_OFFSET */ ++#define MACH64_DMA_SYS_MEM_ADDR 1 /* BM_SYSTEM_MEM_ADDR */ ++#define MACH64_DMA_COMMAND 2 /* BM_COMMAND */ ++#define MACH64_DMA_RESERVED 3 /* BM_STATUS */ ++ ++/* BM_COMMAND descriptor field flags */ ++#define MACH64_DMA_HOLD_OFFSET (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */ ++#define MACH64_DMA_EOL (1<<31) /* End of descriptor list flag */ ++ ++#define MACH64_DMA_CHUNKSIZE 0x1000 /* 4kB per DMA descriptor */ ++#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */ ++ ++/* ================================================================ ++ * Ring operations ++ * ++ * Since the Mach64 bus master engine requires polling, these functions end ++ * up being called frequently, hence being inline. ++ */ ++ ++static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ ring->head_addr, ring->head, ring->tail, ring->space); ++ ++ if (mach64_do_wait_for_idle(dev_priv) < 0) { ++ mach64_do_engine_reset(dev_priv); ++ } ++ ++ if (dev_priv->driver_mode != MACH64_MODE_MMIO) { ++ /* enable bus mastering and block 1 registers */ ++ MACH64_WRITE(MACH64_BUS_CNTL, ++ (MACH64_READ(MACH64_BUS_CNTL) & ++ ~MACH64_BUS_MASTER_DIS) ++ | MACH64_BUS_EXT_REG_EN); ++ mach64_do_wait_for_idle(dev_priv); ++ } ++ ++ /* reset descriptor table ring head */ ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ dev_priv->ring_running = 1; ++} ++ ++static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv, ++ drm_mach64_descriptor_ring_t * ring) ++{ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ ring->head_addr, ring->head, ring->tail, ring->space); ++ ++ /* reset descriptor table ring head */ ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ if (dev_priv->driver_mode == MACH64_MODE_MMIO) { ++ mach64_do_dispatch_pseudo_dma(dev_priv); ++ } else { ++ /* enable GUI bus mastering, and sync the bus master to the GUI */ ++ MACH64_WRITE(MACH64_SRC_CNTL, ++ MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC | ++ MACH64_SRC_BM_OP_SYSTEM_TO_REG); ++ ++ /* kick off the transfer */ ++ MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0); ++ if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) { ++ if ((mach64_do_wait_for_idle(dev_priv)) < 0) { ++ DRM_ERROR("idle failed, resetting engine\n"); ++ mach64_dump_engine_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return; ++ } ++ mach64_do_release_used_buffers(dev_priv); ++ } ++ } ++} ++ ++/** ++ * Poll the ring head and make sure the bus master is alive. ++ * ++ * Mach64's bus master engine will stop if there are no more entries to process. ++ * This function polls the engine for the last processed entry and calls ++ * mach64_ring_resume if there is an unprocessed entry. ++ * ++ * Note also that, since we update the ring tail while the bus master engine is ++ * in operation, it is possible that the last tail update was too late to be ++ * processed, and the bus master engine stops at the previous tail position. ++ * Therefore it is important to call this function frequently. ++ */ ++static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv, ++ drm_mach64_descriptor_ring_t * ring) ++{ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ ring->head_addr, ring->head, ring->tail, ring->space); ++ ++ if (!dev_priv->ring_running) { ++ mach64_ring_start(dev_priv); ++ ++ if (ring->head != ring->tail) { ++ mach64_ring_resume(dev_priv, ring); ++ } ++ } else { ++ /* GUI_ACTIVE must be read before BM_GUI_TABLE to ++ * correctly determine the ring head ++ */ ++ int gui_active = ++ MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE; ++ ++ ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0; ++ ++ if (gui_active) { ++ /* If not idle, BM_GUI_TABLE points one descriptor ++ * past the current head ++ */ ++ if (ring->head_addr == ring->start_addr) { ++ ring->head_addr += ring->size; ++ } ++ ring->head_addr -= 4 * sizeof(u32); ++ } ++ ++ if (ring->head_addr < ring->start_addr || ++ ring->head_addr >= ring->start_addr + ring->size) { ++ DRM_ERROR("bad ring head address: 0x%08x\n", ++ ring->head_addr); ++ mach64_dump_ring_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return; ++ } ++ ++ ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32); ++ ++ if (!gui_active && ring->head != ring->tail) { ++ mach64_ring_resume(dev_priv, ring); ++ } ++ } ++} ++ ++static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv) ++{ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ dev_priv->ring.head_addr, dev_priv->ring.head, ++ dev_priv->ring.tail, dev_priv->ring.space); ++ ++ /* restore previous SRC_CNTL to disable busmastering */ ++ mach64_do_wait_for_fifo(dev_priv, 1); ++ MACH64_WRITE(MACH64_SRC_CNTL, 0); ++ ++ /* disable busmastering but keep the block 1 registers enabled */ ++ mach64_do_wait_for_idle(dev_priv); ++ MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL) ++ | MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN); ++ ++ dev_priv->ring_running = 0; ++} ++ ++static __inline__ void ++mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ ++ DRM_DEBUG("\n"); ++ ++ mach64_ring_tick(dev_priv, ring); ++ ++ ring->space = (ring->head - ring->tail) * sizeof(u32); ++ if (ring->space <= 0) { ++ ring->space += ring->size; ++ } ++} ++ ++/* ================================================================ ++ * DMA macros ++ * ++ * Mach64's ring buffer doesn't take register writes directly. These ++ * have to be written indirectly in DMA buffers. These macros simplify ++ * the task of setting up a buffer, writing commands to it, and ++ * queuing the buffer in the ring. ++ */ ++ ++#define DMALOCALS \ ++ drm_mach64_freelist_t *_entry = NULL; \ ++ struct drm_buf *_buf = NULL; \ ++ u32 *_buf_wptr; int _outcount ++ ++#define GETBUFPTR( __buf ) \ ++((dev_priv->is_pci) ? \ ++ ((u32 *)(__buf)->address) : \ ++ ((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset))) ++ ++#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address) ++ ++#define GETRINGOFFSET() (_entry->ring_ofs) ++ ++static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t * ++ dev_priv, ++ drm_mach64_freelist_t ** ++ entry, struct drm_buf * buf) ++{ ++ struct list_head *ptr; ++#if MACH64_EXTRA_CHECKING ++ if (list_empty(&dev_priv->pending)) { ++ DRM_ERROR("Empty pending list in \n"); ++ return -EINVAL; ++ } ++#endif ++ ptr = dev_priv->pending.prev; ++ *entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ while ((*entry)->buf != buf) { ++ if (ptr == &dev_priv->pending) { ++ return -EFAULT; ++ } ++ ptr = ptr->prev; ++ *entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ } ++ return 0; ++} ++ ++#define DMASETPTR( _p ) \ ++do { \ ++ _buf = (_p); \ ++ _outcount = 0; \ ++ _buf_wptr = GETBUFPTR( _buf ); \ ++} while(0) ++ ++/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */ ++#define DMAGETPTR( file_priv, dev_priv, n ) \ ++do { \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( "DMAGETPTR( %d )\n", (n) ); \ ++ } \ ++ _buf = mach64_freelist_get( dev_priv ); \ ++ if (_buf == NULL) { \ ++ DRM_ERROR("couldn't get buffer in DMAGETPTR\n"); \ ++ return -EAGAIN; \ ++ } \ ++ if (_buf->pending) { \ ++ DRM_ERROR("pending buf in DMAGETPTR\n"); \ ++ return -EFAULT; \ ++ } \ ++ _buf->file_priv = file_priv; \ ++ _outcount = 0; \ ++ \ ++ _buf_wptr = GETBUFPTR( _buf ); \ ++} while (0) ++ ++#define DMAOUTREG( reg, val ) \ ++do { \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( " DMAOUTREG( 0x%x = 0x%08x )\n", \ ++ reg, val ); \ ++ } \ ++ _buf_wptr[_outcount++] = cpu_to_le32(DMAREG(reg)); \ ++ _buf_wptr[_outcount++] = cpu_to_le32((val)); \ ++ _buf->used += 8; \ ++} while (0) ++ ++#define DMAADVANCE( dev_priv, _discard ) \ ++ do { \ ++ struct list_head *ptr; \ ++ int ret; \ ++ \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( "DMAADVANCE() in \n" ); \ ++ } \ ++ \ ++ if (_buf->used <= 0) { \ ++ DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \ ++ _buf->idx ); \ ++ return -EFAULT; \ ++ } \ ++ if (_buf->pending) { \ ++ /* This is a resued buffer, so we need to find it in the pending list */ \ ++ if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ ++ DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx ); \ ++ return ret; \ ++ } \ ++ if (_entry->discard) { \ ++ DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \ ++ return -EFAULT; \ ++ } \ ++ } else { \ ++ if (list_empty(&dev_priv->placeholders)) { \ ++ DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \ ++ return -EFAULT; \ ++ } \ ++ ptr = dev_priv->placeholders.next; \ ++ list_del(ptr); \ ++ _entry = list_entry(ptr, drm_mach64_freelist_t, list); \ ++ _buf->pending = 1; \ ++ _entry->buf = _buf; \ ++ list_add_tail(ptr, &dev_priv->pending); \ ++ } \ ++ _entry->discard = (_discard); \ ++ if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \ ++ return ret; \ ++ } while (0) ++ ++#define DMADISCARDBUF() \ ++ do { \ ++ if (_entry == NULL) { \ ++ int ret; \ ++ if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ ++ DRM_ERROR( "couldn't find pending buf %d\n", \ ++ _buf->idx ); \ ++ return ret; \ ++ } \ ++ } \ ++ _entry->discard = 1; \ ++ } while(0) ++ ++#define DMAADVANCEHOSTDATA( dev_priv ) \ ++ do { \ ++ struct list_head *ptr; \ ++ int ret; \ ++ \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( "DMAADVANCEHOSTDATA() in \n" ); \ ++ } \ ++ \ ++ if (_buf->used <= 0) { \ ++ DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx ); \ ++ return -EFAULT; \ ++ } \ ++ if (list_empty(&dev_priv->placeholders)) { \ ++ DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \ ++ return -EFAULT; \ ++ } \ ++ \ ++ ptr = dev_priv->placeholders.next; \ ++ list_del(ptr); \ ++ _entry = list_entry(ptr, drm_mach64_freelist_t, list); \ ++ _entry->buf = _buf; \ ++ _entry->buf->pending = 1; \ ++ list_add_tail(ptr, &dev_priv->pending); \ ++ _entry->discard = 1; \ ++ if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \ ++ return ret; \ ++ } while (0) ++ ++#endif /* __MACH64_DRV_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_irq.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_irq.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,159 @@ ++/* mach64_irq.c -- IRQ handling for ATI Mach64 -*- linux-c -*- ++ * Created: Tue Feb 25, 2003 by Leif Delgass, based on radeon_irq.c/r128_irq.c ++ */ ++/*- ++ * Copyright (C) The Weather Channel, Inc. 2002. ++ * Copyright 2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Eric Anholt ++ * Leif Delgass ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = arg; ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ int status; ++ ++ status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ /* VBLANK interrupt */ ++ if (status & MACH64_CRTC_VBLANK_INT) { ++ /* Mask off all interrupt ack bits before setting the ack bit, since ++ * there may be other handlers outside the DRM. ++ * ++ * NOTE: On mach64, you need to keep the enable bits set when doing ++ * the ack, despite what the docs say about not acking and enabling ++ * in a single write. ++ */ ++ MACH64_WRITE(MACH64_CRTC_INT_CNTL, ++ (status & ~MACH64_CRTC_INT_ACKS) ++ | MACH64_CRTC_VBLANK_INT); ++ ++ atomic_inc(&dev_priv->vbl_received); ++ drm_handle_vblank(dev, 0); ++ return IRQ_HANDLED; ++ } ++ return IRQ_NONE; ++} ++ ++u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc) ++{ ++ const drm_mach64_private_t *const dev_priv = dev->dev_private; ++ ++ if (crtc != 0) ++ return 0; ++ ++ return atomic_read(&dev_priv->vbl_received); ++} ++ ++int mach64_enable_vblank(struct drm_device * dev, int crtc) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ if (crtc != 0) { ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status); ++ ++ /* Turn on VBLANK interrupt */ ++ MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL) ++ | MACH64_CRTC_VBLANK_INT_EN); ++ ++ return 0; ++} ++ ++void mach64_disable_vblank(struct drm_device * dev, int crtc) ++{ ++ if (crtc != 0) { ++ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", ++ crtc); ++ return; ++ } ++ ++ /* ++ * FIXME: implement proper interrupt disable by using the vblank ++ * counter register (if available). ++ */ ++} ++ ++static void mach64_disable_vblank_local(struct drm_device * dev, int crtc) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ if (crtc != 0) { ++ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", ++ crtc); ++ return; ++ } ++ ++ DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status); ++ ++ /* Disable and clear VBLANK interrupt */ ++ MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN) ++ | MACH64_CRTC_VBLANK_INT); ++} ++ ++void mach64_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status); ++ ++ mach64_disable_vblank_local(dev, 0); ++} ++ ++int mach64_driver_irq_postinstall(struct drm_device * dev) ++{ ++ return drm_vblank_init(dev, 1); ++} ++ ++void mach64_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ mach64_disable_vblank_local(dev, 0); ++ ++ DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n", ++ MACH64_READ(MACH64_CRTC_INT_CNTL)); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_state.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_state.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mach64_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mach64_state.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,910 @@ ++/* mach64_state.c -- State support for mach64 (Rage Pro) driver -*- linux-c -*- ++ * Created: Sun Dec 03 19:20:26 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Leif Delgass ++ * José Fonseca ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++/* Interface history: ++ * ++ * 1.0 - Initial mach64 DRM ++ * ++ */ ++struct drm_ioctl_desc mach64_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH), ++}; ++ ++int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls); ++ ++/* ================================================================ ++ * DMA hardware state programming functions ++ */ ++ ++static void mach64_print_dirty(const char *msg, unsigned int flags) ++{ ++ DRM_DEBUG("%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s\n", ++ msg, ++ flags, ++ (flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " : ++ "", ++ (flags & MACH64_UPLOAD_Z_ALPHA_CNTL) ? "z_alpha_cntl, " : "", ++ (flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " : ++ "", (flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "", ++ (flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " : ++ "", ++ (flags & MACH64_UPLOAD_DP_PIX_WIDTH) ? "dp_pix_width, " : "", ++ (flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "", ++ (flags & MACH64_UPLOAD_MISC) ? "misc, " : "", ++ (flags & MACH64_UPLOAD_TEXTURE) ? "texture, " : "", ++ (flags & MACH64_UPLOAD_TEX0IMAGE) ? "tex0 image, " : "", ++ (flags & MACH64_UPLOAD_TEX1IMAGE) ? "tex1 image, " : "", ++ (flags & MACH64_UPLOAD_CLIPRECTS) ? "cliprects, " : ""); ++} ++ ++/* Mach64 doesn't have hardware cliprects, just one hardware scissor, ++ * so the GL scissor is intersected with each cliprect here ++ */ ++/* This function returns 0 on success, 1 for no intersection, and ++ * negative for an error ++ */ ++static int mach64_emit_cliprect(struct drm_file *file_priv, ++ drm_mach64_private_t * dev_priv, ++ struct drm_clip_rect * box) ++{ ++ u32 sc_left_right, sc_top_bottom; ++ struct drm_clip_rect scissor; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_context_regs_t *regs = &sarea_priv->context_state; ++ DMALOCALS; ++ ++ DRM_DEBUG("box=%p\n", box); ++ ++ /* Get GL scissor */ ++ /* FIXME: store scissor in SAREA as a cliprect instead of in ++ * hardware format, or do intersection client-side ++ */ ++ scissor.x1 = regs->sc_left_right & 0xffff; ++ scissor.x2 = (regs->sc_left_right & 0xffff0000) >> 16; ++ scissor.y1 = regs->sc_top_bottom & 0xffff; ++ scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16; ++ ++ /* Intersect GL scissor with cliprect */ ++ if (box->x1 > scissor.x1) ++ scissor.x1 = box->x1; ++ if (box->y1 > scissor.y1) ++ scissor.y1 = box->y1; ++ if (box->x2 < scissor.x2) ++ scissor.x2 = box->x2; ++ if (box->y2 < scissor.y2) ++ scissor.y2 = box->y2; ++ /* positive return means skip */ ++ if (scissor.x1 >= scissor.x2) ++ return 1; ++ if (scissor.y1 >= scissor.y2) ++ return 1; ++ ++ DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */ ++ ++ sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16)); ++ sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16)); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, sc_left_right); ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, sc_top_bottom); ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ return 0; ++} ++ ++static __inline__ int mach64_emit_state(struct drm_file *file_priv, ++ drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_context_regs_t *regs = &sarea_priv->context_state; ++ unsigned int dirty = sarea_priv->dirty; ++ u32 offset = ((regs->tex_size_pitch & 0xf0) >> 2); ++ DMALOCALS; ++ ++ if (MACH64_VERBOSE) { ++ mach64_print_dirty(__FUNCTION__, dirty); ++ } else { ++ DRM_DEBUG("dirty=0x%08x\n", dirty); ++ } ++ ++ DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */ ++ ++ if (dirty & MACH64_UPLOAD_MISC) { ++ DMAOUTREG(MACH64_DP_MIX, regs->dp_mix); ++ DMAOUTREG(MACH64_DP_SRC, regs->dp_src); ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, regs->clr_cmp_cntl); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, regs->gui_traj_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_MISC; ++ } ++ ++ if (dirty & MACH64_UPLOAD_DST_OFF_PITCH) { ++ DMAOUTREG(MACH64_DST_OFF_PITCH, regs->dst_off_pitch); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DST_OFF_PITCH; ++ } ++ if (dirty & MACH64_UPLOAD_Z_OFF_PITCH) { ++ DMAOUTREG(MACH64_Z_OFF_PITCH, regs->z_off_pitch); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_Z_OFF_PITCH; ++ } ++ if (dirty & MACH64_UPLOAD_Z_ALPHA_CNTL) { ++ DMAOUTREG(MACH64_Z_CNTL, regs->z_cntl); ++ DMAOUTREG(MACH64_ALPHA_TST_CNTL, regs->alpha_tst_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_Z_ALPHA_CNTL; ++ } ++ if (dirty & MACH64_UPLOAD_SCALE_3D_CNTL) { ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, regs->scale_3d_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_SCALE_3D_CNTL; ++ } ++ if (dirty & MACH64_UPLOAD_DP_FOG_CLR) { ++ DMAOUTREG(MACH64_DP_FOG_CLR, regs->dp_fog_clr); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DP_FOG_CLR; ++ } ++ if (dirty & MACH64_UPLOAD_DP_WRITE_MASK) { ++ DMAOUTREG(MACH64_DP_WRITE_MASK, regs->dp_write_mask); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DP_WRITE_MASK; ++ } ++ if (dirty & MACH64_UPLOAD_DP_PIX_WIDTH) { ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, regs->dp_pix_width); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DP_PIX_WIDTH; ++ } ++ if (dirty & MACH64_UPLOAD_SETUP_CNTL) { ++ DMAOUTREG(MACH64_SETUP_CNTL, regs->setup_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_SETUP_CNTL; ++ } ++ ++ if (dirty & MACH64_UPLOAD_TEXTURE) { ++ DMAOUTREG(MACH64_TEX_SIZE_PITCH, regs->tex_size_pitch); ++ DMAOUTREG(MACH64_TEX_CNTL, regs->tex_cntl); ++ DMAOUTREG(MACH64_SECONDARY_TEX_OFF, regs->secondary_tex_off); ++ DMAOUTREG(MACH64_TEX_0_OFF + offset, regs->tex_offset); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_TEXTURE; ++ } ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ sarea_priv->dirty &= MACH64_UPLOAD_CLIPRECTS; ++ ++ return 0; ++ ++} ++ ++/* ================================================================ ++ * DMA command dispatch functions ++ */ ++ ++static int mach64_dma_dispatch_clear(struct drm_device * dev, ++ struct drm_file *file_priv, ++ unsigned int flags, ++ int cx, int cy, int cw, int ch, ++ unsigned int clear_color, ++ unsigned int clear_depth) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_context_regs_t *ctx = &sarea_priv->context_state; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ u32 fb_bpp, depth_bpp; ++ int i; ++ DMALOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (dev_priv->fb_bpp) { ++ case 16: ++ fb_bpp = MACH64_DATATYPE_RGB565; ++ break; ++ case 32: ++ fb_bpp = MACH64_DATATYPE_ARGB8888; ++ break; ++ default: ++ return -EINVAL; ++ } ++ switch (dev_priv->depth_bpp) { ++ case 16: ++ depth_bpp = MACH64_DATATYPE_RGB565; ++ break; ++ case 24: ++ case 32: ++ depth_bpp = MACH64_DATATYPE_ARGB8888; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (!nbox) ++ return 0; ++ ++ DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */ ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n", ++ pbox[i].x1, pbox[i].y1, ++ pbox[i].x2, pbox[i].y2, flags); ++ ++ if (flags & (MACH64_FRONT | MACH64_BACK)) { ++ /* Setup for color buffer clears ++ */ ++ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right); ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, ++ (MACH64_DST_X_LEFT_TO_RIGHT | ++ MACH64_DST_Y_TOP_TO_BOTTOM)); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) | ++ (fb_bpp << 4) | ++ (fb_bpp << 8) | ++ (fb_bpp << 16) | ++ (fb_bpp << 28))); ++ ++ DMAOUTREG(MACH64_DP_FRGD_CLR, clear_color); ++ DMAOUTREG(MACH64_DP_WRITE_MASK, ctx->dp_write_mask); ++ DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | ++ MACH64_FRGD_MIX_S)); ++ DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR | ++ MACH64_FRGD_SRC_FRGD_CLR | ++ MACH64_MONO_SRC_ONE)); ++ ++ } ++ ++ if (flags & MACH64_FRONT) { ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ dev_priv->front_offset_pitch); ++ DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ ++ } ++ ++ if (flags & MACH64_BACK) { ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ dev_priv->back_offset_pitch); ++ DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ ++ } ++ ++ if (flags & MACH64_DEPTH) { ++ /* Setup for depth buffer clear ++ */ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right); ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, ++ (MACH64_DST_X_LEFT_TO_RIGHT | ++ MACH64_DST_Y_TOP_TO_BOTTOM)); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, ((depth_bpp << 0) | ++ (depth_bpp << 4) | ++ (depth_bpp << 8) | ++ (depth_bpp << 16) | ++ (depth_bpp << 28))); ++ ++ DMAOUTREG(MACH64_DP_FRGD_CLR, clear_depth); ++ DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); ++ DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | ++ MACH64_FRGD_MIX_S)); ++ DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR | ++ MACH64_FRGD_SRC_FRGD_CLR | ++ MACH64_MONO_SRC_ONE)); ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ dev_priv->depth_offset_pitch); ++ DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ } ++ } ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ return 0; ++} ++ ++static int mach64_dma_dispatch_swap(struct drm_device * dev, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ u32 fb_bpp; ++ int i; ++ DMALOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (dev_priv->fb_bpp) { ++ case 16: ++ fb_bpp = MACH64_DATATYPE_RGB565; ++ break; ++ case 32: ++ default: ++ fb_bpp = MACH64_DATATYPE_ARGB8888; ++ break; ++ } ++ ++ if (!nbox) ++ return 0; ++ ++ DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */ ++ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */ ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16)); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT | ++ MACH64_DST_Y_TOP_TO_BOTTOM)); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) | ++ (fb_bpp << 4) | ++ (fb_bpp << 8) | ++ (fb_bpp << 16) | (fb_bpp << 28))); ++ ++ DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); ++ DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S)); ++ DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR | ++ MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE)); ++ ++ DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch); ++ DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch); ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", ++ pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2); ++ ++ DMAOUTREG(MACH64_SRC_WIDTH1, w); ++ DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y); ++ DMAOUTREG(MACH64_DST_Y_X, (x << 16) | y); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ ++ } ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ if (dev_priv->driver_mode == MACH64_MODE_DMA_ASYNC) { ++ for (i = 0; i < MACH64_MAX_QUEUED_FRAMES - 1; i++) { ++ dev_priv->frame_ofs[i] = dev_priv->frame_ofs[i + 1]; ++ } ++ dev_priv->frame_ofs[i] = GETRINGOFFSET(); ++ ++ dev_priv->sarea_priv->frames_queued++; ++ } ++ ++ return 0; ++} ++ ++static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int i, start; ++ u32 head, tail, ofs; ++ ++ DRM_DEBUG("\n"); ++ ++ if (sarea_priv->frames_queued == 0) ++ return 0; ++ ++ tail = ring->tail; ++ mach64_ring_tick(dev_priv, ring); ++ head = ring->head; ++ ++ start = (MACH64_MAX_QUEUED_FRAMES - ++ DRM_MIN(MACH64_MAX_QUEUED_FRAMES, sarea_priv->frames_queued)); ++ ++ if (head == tail) { ++ sarea_priv->frames_queued = 0; ++ for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) { ++ dev_priv->frame_ofs[i] = ~0; ++ } ++ return 0; ++ } ++ ++ for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) { ++ ofs = dev_priv->frame_ofs[i]; ++ DRM_DEBUG("frame_ofs[%d] ofs: %d\n", i, ofs); ++ if (ofs == ~0 || ++ (head < tail && (ofs < head || ofs >= tail)) || ++ (head > tail && (ofs < head && ofs >= tail))) { ++ sarea_priv->frames_queued = ++ (MACH64_MAX_QUEUED_FRAMES - 1) - i; ++ dev_priv->frame_ofs[i] = ~0; ++ } ++ } ++ ++ return sarea_priv->frames_queued; ++} ++ ++/* Copy and verify a client submited buffer. ++ * FIXME: Make an assembly optimized version ++ */ ++static __inline__ int copy_from_user_vertex(u32 *to, ++ const u32 __user *ufrom, ++ unsigned long bytes) ++{ ++ unsigned long n = bytes; /* dwords remaining in buffer */ ++ u32 *from, *orig_from; ++ ++ from = drm_alloc(bytes, DRM_MEM_DRIVER); ++ if (from == NULL) ++ return -ENOMEM; ++ ++ if (DRM_COPY_FROM_USER(from, ufrom, bytes)) { ++ drm_free(from, bytes, DRM_MEM_DRIVER); ++ return -EFAULT; ++ } ++ orig_from = from; /* we'll be modifying the "from" ptr, so save it */ ++ ++ n >>= 2; ++ ++ while (n > 1) { ++ u32 data, reg, count; ++ ++ data = *from++; ++ ++ n--; ++ ++ reg = le32_to_cpu(data); ++ count = (reg >> 16) + 1; ++ if (count <= n) { ++ n -= count; ++ reg &= 0xffff; ++ ++ /* This is an exact match of Mach64's Setup Engine registers, ++ * excluding SETUP_CNTL (1_C1). ++ */ ++ if ((reg >= 0x0190 && reg < 0x01c1) || ++ (reg >= 0x01ca && reg <= 0x01cf)) { ++ *to++ = data; ++ memcpy(to, from, count << 2); ++ from += count; ++ to += count; ++ } else { ++ DRM_ERROR("Got bad command: 0x%04x\n", reg); ++ drm_free(orig_from, bytes, DRM_MEM_DRIVER); ++ return -EACCES; ++ } ++ } else { ++ DRM_ERROR ++ ("Got bad command count(=%u) dwords remaining=%lu\n", ++ count, n); ++ drm_free(orig_from, bytes, DRM_MEM_DRIVER); ++ return -EINVAL; ++ } ++ } ++ ++ drm_free(orig_from, bytes, DRM_MEM_DRIVER); ++ if (n == 0) ++ return 0; ++ else { ++ DRM_ERROR("Bad buf->used(=%lu)\n", bytes); ++ return -EINVAL; ++ } ++} ++ ++static int mach64_dma_dispatch_vertex(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_mach64_vertex_t * vertex) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_buf *copy_buf; ++ void *buf = vertex->buf; ++ unsigned long used = vertex->used; ++ int ret = 0; ++ int i = 0; ++ int done = 0; ++ int verify_ret = 0; ++ DMALOCALS; ++ ++ DRM_DEBUG("buf=%p used=%lu nbox=%d\n", ++ buf, used, sarea_priv->nbox); ++ ++ if (!used) ++ goto _vertex_done; ++ ++ copy_buf = mach64_freelist_get(dev_priv); ++ if (copy_buf == NULL) { ++ DRM_ERROR("couldn't get buffer\n"); ++ return -EAGAIN; ++ } ++ ++ /* Mach64's vertex data is actually register writes. To avoid security ++ * compromises these register writes have to be verified and copied from ++ * user space into a private DMA buffer. ++ */ ++ verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used); ++ ++ if (verify_ret != 0) { ++ mach64_freelist_put(dev_priv, copy_buf); ++ goto _vertex_done; ++ } ++ ++ copy_buf->used = used; ++ ++ DMASETPTR(copy_buf); ++ ++ if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) { ++ ret = mach64_emit_state(file_priv, dev_priv); ++ if (ret < 0) ++ return ret; ++ } ++ ++ do { ++ /* Emit the next cliprect */ ++ if (i < sarea_priv->nbox) { ++ ret = mach64_emit_cliprect(file_priv, dev_priv, ++ &sarea_priv->boxes[i]); ++ if (ret < 0) { ++ /* failed to get buffer */ ++ return ret; ++ } else if (ret != 0) { ++ /* null intersection with scissor */ ++ continue; ++ } ++ } ++ if ((i >= sarea_priv->nbox - 1)) ++ done = 1; ++ ++ /* Add the buffer to the DMA queue */ ++ DMAADVANCE(dev_priv, done); ++ ++ } while (++i < sarea_priv->nbox); ++ ++ if (!done) { ++ if (copy_buf->pending) { ++ DMADISCARDBUF(); ++ } else { ++ /* This buffer wasn't used (no cliprects), so place it ++ * back on the free list ++ */ ++ mach64_freelist_put(dev_priv, copy_buf); ++ } ++ } ++ ++_vertex_done: ++ sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS; ++ sarea_priv->nbox = 0; ++ ++ return verify_ret; ++} ++ ++static __inline__ int copy_from_user_blit(u32 *to, ++ const u32 __user *ufrom, ++ unsigned long bytes) ++{ ++ to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET); ++ ++ if (DRM_COPY_FROM_USER(to, ufrom, bytes)) { ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int mach64_dma_dispatch_blit(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_mach64_blit_t * blit) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ int dword_shift, dwords; ++ unsigned long used; ++ struct drm_buf *copy_buf; ++ int verify_ret = 0; ++ DMALOCALS; ++ ++ /* The compiler won't optimize away a division by a variable, ++ * even if the only legal values are powers of two. Thus, we'll ++ * use a shift instead. ++ */ ++ switch (blit->format) { ++ case MACH64_DATATYPE_ARGB8888: ++ dword_shift = 0; ++ break; ++ case MACH64_DATATYPE_ARGB1555: ++ case MACH64_DATATYPE_RGB565: ++ case MACH64_DATATYPE_VYUY422: ++ case MACH64_DATATYPE_YVYU422: ++ case MACH64_DATATYPE_ARGB4444: ++ dword_shift = 1; ++ break; ++ case MACH64_DATATYPE_CI8: ++ case MACH64_DATATYPE_RGB8: ++ dword_shift = 2; ++ break; ++ default: ++ DRM_ERROR("invalid blit format %d\n", blit->format); ++ return -EINVAL; ++ } ++ ++ /* Set buf->used to the bytes of blit data based on the blit dimensions ++ * and verify the size. When the setup is emitted to the buffer with ++ * the DMA* macros below, buf->used is incremented to include the bytes ++ * used for setup as well as the blit data. ++ */ ++ dwords = (blit->width * blit->height) >> dword_shift; ++ used = dwords << 2; ++ if (used <= 0 || ++ used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) { ++ DRM_ERROR("Invalid blit size: %lu bytes\n", used); ++ return -EINVAL; ++ } ++ ++ copy_buf = mach64_freelist_get(dev_priv); ++ if (copy_buf == NULL) { ++ DRM_ERROR("couldn't get buffer\n"); ++ return -EAGAIN; ++ } ++ ++ /* Copy the blit data from userspace. ++ * ++ * XXX: This is overkill. The most efficient solution would be having ++ * two sets of buffers (one set private for vertex data, the other set ++ * client-writable for blits). However that would bring more complexity ++ * and would break backward compatability. The solution currently ++ * implemented is keeping all buffers private, allowing to secure the ++ * driver, without increasing complexity at the expense of some speed ++ * transfering data. ++ */ ++ verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used); ++ ++ if (verify_ret != 0) { ++ mach64_freelist_put(dev_priv, copy_buf); ++ goto _blit_done; ++ } ++ ++ copy_buf->used = used; ++ ++ /* FIXME: Use a last buffer flag and reduce the state emitted for subsequent, ++ * continuation buffers? ++ */ ++ ++ /* Blit via BM_HOSTDATA (gui-master) - like HOST_DATA[0-15], but doesn't require ++ * a register command every 16 dwords. State setup is added at the start of the ++ * buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET ++ */ ++ DMASETPTR(copy_buf); ++ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */ ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16)); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); /* disable */ ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, ++ MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, (blit->format << 0) /* dst pix width */ ++ |(blit->format << 4) /* composite pix width */ ++ |(blit->format << 8) /* src pix width */ ++ |(blit->format << 16) /* host data pix width */ ++ |(blit->format << 28) /* scaler/3D pix width */ ++ ); ++ ++ DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); /* enable all planes */ ++ DMAOUTREG(MACH64_DP_MIX, MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S); ++ DMAOUTREG(MACH64_DP_SRC, ++ MACH64_BKGD_SRC_BKGD_CLR ++ | MACH64_FRGD_SRC_HOST | MACH64_MONO_SRC_ONE); ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ (blit->pitch << 22) | (blit->offset >> 3)); ++ DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width); ++ ++ DRM_DEBUG("%lu bytes\n", used); ++ ++ /* Add the buffer to the queue */ ++ DMAADVANCEHOSTDATA(dev_priv); ++ ++_blit_done: ++ return verify_ret; ++} ++ ++/* ================================================================ ++ * IOCTL functions ++ */ ++ ++int mach64_dma_clear(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_clear_t *clear = data; ++ int ret; ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; ++ ++ ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags, ++ clear->x, clear->y, clear->w, clear->h, ++ clear->clear_color, ++ clear->clear_depth); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC); ++ return ret; ++} ++ ++int mach64_dma_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int ret; ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; ++ ++ ret = mach64_dma_dispatch_swap(dev, file_priv); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC); ++ return ret; ++} ++ ++int mach64_dma_vertex(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n", ++ DRM_CURRENTPID, ++ vertex->buf, vertex->used, vertex->discard); ++ ++ if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) { ++ DRM_ERROR("buffer prim %d\n", vertex->prim); ++ return -EINVAL; ++ } ++ ++ if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) { ++ DRM_ERROR("Invalid vertex buffer size: %lu bytes\n", ++ vertex->used); ++ return -EINVAL; ++ } ++ ++ if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; ++ ++ return mach64_dma_dispatch_vertex(dev, file_priv, vertex); ++} ++ ++int mach64_dma_blit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_blit_t *blit = data; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ ret = mach64_dma_dispatch_blit(dev, file_priv, blit); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | ++ MACH64_UPLOAD_MISC | MACH64_UPLOAD_CLIPRECTS); ++ ++ return ret; ++} ++ ++int mach64_get_param(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_getparam_t *param = data; ++ int value; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (param->param) { ++ case MACH64_PARAM_FRAMES_QUEUED: ++ /* Needs lock since it calls mach64_ring_tick() */ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ value = mach64_do_get_frames_queued(dev_priv); ++ break; ++ case MACH64_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/Makefile kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/Makefile +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/Makefile 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,76 @@ ++# ++# Makefile for the drm device driver. This driver provides support for the ++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ++# ++# Based on David Woodhouse's mtd build. ++# ++# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.18 2003/08/16 17:59:17 dawes Exp $ ++# ++ ++EXTRA_CFLAGS += -D__linux__ ++ ++drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ ++ drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ ++ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ ++ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ ++ drm_memory_debug.o ati_pcigart.o drm_sman.o \ ++ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ ++ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \ ++ drm_regman.o drm_vm_nopage_compat.o drm_gem.o ++pvr2d-objs := pvr2d_drv.o ++tdfx-objs := tdfx_drv.o ++r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o ++mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o ++i810-objs := i810_drv.o i810_dma.o ++i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ ++ i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \ ++ i915_opregion.o \ ++ i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o ++nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ ++ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ ++ nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \ ++ nv04_timer.o \ ++ nv04_mc.o nv40_mc.o nv50_mc.o \ ++ nv04_fb.o nv10_fb.o nv40_fb.o \ ++ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ ++ nv04_graph.o nv10_graph.o nv20_graph.o \ ++ nv40_graph.o nv50_graph.o \ ++ nv04_instmem.o nv50_instmem.o ++radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o ++sis-objs := sis_drv.o sis_mm.o ++ffb-objs := ffb_drv.o ffb_context.o ++savage-objs := savage_drv.o savage_bci.o savage_state.o ++via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ ++ via_video.o via_dmablit.o via_fence.o via_buffer.o ++mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o ++nv-objs := nv_drv.o ++xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \ ++ xgi_fence.o ++ ++ifeq ($(CONFIG_COMPAT),y) ++drm-objs += drm_ioc32.o ++radeon-objs += radeon_ioc32.o ++mga-objs += mga_ioc32.o ++r128-objs += r128_ioc32.o ++i915-objs += i915_ioc32.o ++nouveau-objs += nouveau_ioc32.o ++xgi-objs += xgi_ioc32.o ++endif ++ ++obj-m += drm.o ++obj-$(CONFIG_DRM_TUNGSTEN_PVR2D) += pvr2d.o ++obj-$(CONFIG_DRM_TUNGSTEN_TDFX) += tdfx.o ++obj-$(CONFIG_DRM_TUNGSTEN_R128) += r128.o ++obj-$(CONFIG_DRM_TUNGSTEN_RADEON) += radeon.o ++obj-$(CONFIG_DRM_TUNGSTEN_MGA) += mga.o ++obj-$(CONFIG_DRM_TUNGSTEN_I810) += i810.o ++obj-$(CONFIG_DRM_TUNGSTEN_I915) += i915.o ++obj-$(CONFIG_DRM_TUNGSTEN_SIS) += sis.o ++obj-$(CONFIG_DRM_TUNGSTEN_FFB) += ffb.o ++obj-$(CONFIG_DRM_TUNGSTEN_SAVAGE) += savage.o ++obj-$(CONFIG_DRM_TUNGSTEN_VIA) += via.o ++obj-$(CONFIG_DRM_TUNGSTEN_MACH64) += mach64.o ++obj-$(CONFIG_DRM_TUNGSTEN_NV) += nv.o ++obj-$(CONFIG_DRM_TUNGSTEN_NOUVEAU) += nouveau.o ++obj-$(CONFIG_DRM_TUNGSTEN_XGI) += xgi.o ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_dma.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_dma.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_dma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1161 @@ ++/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ */ ++/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++/** ++ * \file mga_dma.c ++ * DMA support for MGA G200 / G400. ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Jeff Hartmann ++ * \author Keith Whitwell ++ * \author Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++#define MGA_DEFAULT_USEC_TIMEOUT 10000 ++#define MGA_FREELIST_DEBUG 0 ++ ++#define MINIMAL_CLEANUP 0 ++#define FULL_CLEANUP 1 ++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); ++ ++/* ================================================================ ++ * Engine control ++ */ ++ ++int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) ++{ ++ u32 status = 0; ++ int i; ++ DRM_DEBUG("\n"); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; ++ if (status == MGA_ENDPRDMASTS) { ++ MGA_WRITE8(MGA_CRTC_INDEX, 0); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if MGA_DMA_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x\n", status); ++#endif ++ return -EBUSY; ++} ++ ++static int mga_do_dma_reset(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ ++ DRM_DEBUG("\n"); ++ ++ /* The primary DMA stream should look like new right about now. ++ */ ++ primary->tail = 0; ++ primary->space = primary->size; ++ primary->last_flush = 0; ++ ++ sarea_priv->last_wrap = 0; ++ ++ /* FIXME: Reset counters, buffer ages etc... ++ */ ++ ++ /* FIXME: What else do we need to reinitialize? WARP stuff? ++ */ ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * Primary DMA stream ++ */ ++ ++void mga_do_dma_flush(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ u32 head, tail; ++ u32 status = 0; ++ int i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ /* We need to wait so that we can do an safe flush */ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; ++ if (status == MGA_ENDPRDMASTS) ++ break; ++ DRM_UDELAY(1); ++ } ++ ++ if (primary->tail == primary->last_flush) { ++ DRM_DEBUG(" bailing out...\n"); ++ return; ++ } ++ ++ tail = primary->tail + dev_priv->primary->offset; ++ ++ /* We need to pad the stream between flushes, as the card ++ * actually (partially?) reads the first of these commands. ++ * See page 4-16 in the G400 manual, middle of the page or so. ++ */ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++ ++ primary->last_flush = primary->tail; ++ ++ head = MGA_READ(MGA_PRIMADDRESS); ++ ++ if (head <= tail) { ++ primary->space = primary->size - primary->tail; ++ } else { ++ primary->space = head - tail; ++ } ++ ++ DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); ++ DRM_DEBUG(" tail = 0x%06lx\n", tail - dev_priv->primary->offset); ++ DRM_DEBUG(" space = 0x%06x\n", primary->space); ++ ++ mga_flush_write_combine(); ++ MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); ++ ++ DRM_DEBUG("done.\n"); ++} ++ ++void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ u32 head, tail; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_DMA_WRAP(); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++ ++ tail = primary->tail + dev_priv->primary->offset; ++ ++ primary->tail = 0; ++ primary->last_flush = 0; ++ primary->last_wrap++; ++ ++ head = MGA_READ(MGA_PRIMADDRESS); ++ ++ if (head == dev_priv->primary->offset) { ++ primary->space = primary->size; ++ } else { ++ primary->space = head - dev_priv->primary->offset; ++ } ++ ++ DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); ++ DRM_DEBUG(" tail = 0x%06x\n", primary->tail); ++ DRM_DEBUG(" wrap = %d\n", primary->last_wrap); ++ DRM_DEBUG(" space = 0x%06x\n", primary->space); ++ ++ mga_flush_write_combine(); ++ MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); ++ ++ set_bit(0, &primary->wrapped); ++ DRM_DEBUG("done.\n"); ++} ++ ++void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 head = dev_priv->primary->offset; ++ DRM_DEBUG("\n"); ++ ++ sarea_priv->last_wrap++; ++ DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap); ++ ++ mga_flush_write_combine(); ++ MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL); ++ ++ clear_bit(0, &primary->wrapped); ++ DRM_DEBUG("done.\n"); ++} ++ ++/* ================================================================ ++ * Freelist management ++ */ ++ ++#define MGA_BUFFER_USED ~0 ++#define MGA_BUFFER_FREE 0 ++ ++#if MGA_FREELIST_DEBUG ++static void mga_freelist_print(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_freelist_t *entry; ++ ++ DRM_INFO("\n"); ++ DRM_INFO("current dispatch: last=0x%x done=0x%x\n", ++ dev_priv->sarea_priv->last_dispatch, ++ (unsigned int)(MGA_READ(MGA_PRIMADDRESS) - ++ dev_priv->primary->offset)); ++ DRM_INFO("current freelist:\n"); ++ ++ for (entry = dev_priv->head->next; entry; entry = entry->next) { ++ DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n", ++ entry, entry->buf->idx, entry->age.head, ++ entry->age.head - dev_priv->primary->offset); ++ } ++ DRM_INFO("\n"); ++} ++#endif ++ ++static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_freelist_t *entry; ++ int i; ++ DRM_DEBUG("count=%d\n", dma->buf_count); ++ ++ dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); ++ if (dev_priv->head == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); ++ SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ ++ entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); ++ if (entry == NULL) ++ return -ENOMEM; ++ ++ memset(entry, 0, sizeof(drm_mga_freelist_t)); ++ ++ entry->next = dev_priv->head->next; ++ entry->prev = dev_priv->head; ++ SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); ++ entry->buf = buf; ++ ++ if (dev_priv->head->next != NULL) ++ dev_priv->head->next->prev = entry; ++ if (entry->next == NULL) ++ dev_priv->tail = entry; ++ ++ buf_priv->list_entry = entry; ++ buf_priv->discard = 0; ++ buf_priv->dispatched = 0; ++ ++ dev_priv->head->next = entry; ++ } ++ ++ return 0; ++} ++ ++static void mga_freelist_cleanup(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_freelist_t *entry; ++ drm_mga_freelist_t *next; ++ DRM_DEBUG("\n"); ++ ++ entry = dev_priv->head; ++ while (entry) { ++ next = entry->next; ++ drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); ++ entry = next; ++ } ++ ++ dev_priv->head = dev_priv->tail = NULL; ++} ++ ++#if 0 ++/* FIXME: Still needed? ++ */ ++static void mga_freelist_reset(struct drm_device * dev) ++{ ++ drm_device_dma_t *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ int i; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0); ++ } ++} ++#endif ++ ++static struct drm_buf *mga_freelist_get(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_freelist_t *next; ++ drm_mga_freelist_t *prev; ++ drm_mga_freelist_t *tail = dev_priv->tail; ++ u32 head, wrap; ++ DRM_DEBUG("\n"); ++ ++ head = MGA_READ(MGA_PRIMADDRESS); ++ wrap = dev_priv->sarea_priv->last_wrap; ++ ++ DRM_DEBUG(" tail=0x%06lx %d\n", ++ tail->age.head ? ++ tail->age.head - dev_priv->primary->offset : 0, ++ tail->age.wrap); ++ DRM_DEBUG(" head=0x%06lx %d\n", ++ head - dev_priv->primary->offset, wrap); ++ ++ if (TEST_AGE(&tail->age, head, wrap)) { ++ prev = dev_priv->tail->prev; ++ next = dev_priv->tail; ++ prev->next = NULL; ++ next->prev = next->next = NULL; ++ dev_priv->tail = prev; ++ SET_AGE(&next->age, MGA_BUFFER_USED, 0); ++ return next->buf; ++ } ++ ++ DRM_DEBUG("returning NULL!\n"); ++ return NULL; ++} ++ ++int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_freelist_t *head, *entry, *prev; ++ ++ DRM_DEBUG("age=0x%06lx wrap=%d\n", ++ buf_priv->list_entry->age.head - ++ dev_priv->primary->offset, buf_priv->list_entry->age.wrap); ++ ++ entry = buf_priv->list_entry; ++ head = dev_priv->head; ++ ++ if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) { ++ SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); ++ prev = dev_priv->tail; ++ prev->next = entry; ++ entry->prev = prev; ++ entry->next = NULL; ++ } else { ++ prev = head->next; ++ head->next = entry; ++ prev->prev = entry; ++ entry->prev = head; ++ entry->next = prev; ++ } ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * DMA initialization, cleanup ++ */ ++ ++int mga_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ drm_mga_private_t *dev_priv; ++ ++ dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); ++ if (!dev_priv) ++ return -ENOMEM; ++ ++ dev->dev_private = (void *)dev_priv; ++ memset(dev_priv, 0, sizeof(drm_mga_private_t)); ++ ++ dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; ++ dev_priv->chipset = flags; ++ ++ dev_priv->mmio_base = drm_get_resource_start(dev, 1); ++ dev_priv->mmio_size = drm_get_resource_len(dev, 1); ++ ++ dev->counters += 3; ++ dev->types[6] = _DRM_STAT_IRQ; ++ dev->types[7] = _DRM_STAT_PRIMARY; ++ dev->types[8] = _DRM_STAT_SECONDARY; ++ ++ return 0; ++} ++ ++/** ++ * Bootstrap the driver for AGP DMA. ++ * ++ * \todo ++ * Investigate whether there is any benifit to storing the WARP microcode in ++ * AGP memory. If not, the microcode may as well always be put in PCI ++ * memory. ++ * ++ * \todo ++ * This routine needs to set dma_bs->agp_mode to the mode actually configured ++ * in the hardware. Looking just at the Linux AGP driver code, I don't see ++ * an easy way to determine this. ++ * ++ * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap ++ */ ++static int mga_do_agp_dma_bootstrap(struct drm_device *dev, ++ drm_mga_dma_bootstrap_t * dma_bs) ++{ ++ drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *)dev->dev_private; ++ unsigned int warp_size = mga_warp_microcode_size(dev_priv); ++ int err; ++ unsigned offset; ++ const unsigned secondary_size = dma_bs->secondary_bin_count ++ * dma_bs->secondary_bin_size; ++ const unsigned agp_size = (dma_bs->agp_size << 20); ++ struct drm_buf_desc req; ++ struct drm_agp_mode mode; ++ struct drm_agp_info info; ++ struct drm_agp_buffer agp_req; ++ struct drm_agp_binding bind_req; ++ ++ /* Acquire AGP. */ ++ err = drm_agp_acquire(dev); ++ if (err) { ++ DRM_ERROR("Unable to acquire AGP: %d\n", err); ++ return err; ++ } ++ ++ err = drm_agp_info(dev, &info); ++ if (err) { ++ DRM_ERROR("Unable to get AGP info: %d\n", err); ++ return err; ++ } ++ ++ mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; ++ err = drm_agp_enable(dev, mode); ++ if (err) { ++ DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); ++ return err; ++ } ++ ++ /* In addition to the usual AGP mode configuration, the G200 AGP cards ++ * need to have the AGP mode "manually" set. ++ */ ++ ++ if (dev_priv->chipset == MGA_CARD_TYPE_G200) { ++ if (mode.mode & 0x02) { ++ MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); ++ } else { ++ MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); ++ } ++ } ++ ++ /* Allocate and bind AGP memory. */ ++ agp_req.size = agp_size; ++ agp_req.type = 0; ++ err = drm_agp_alloc(dev, &agp_req); ++ if (err) { ++ dev_priv->agp_size = 0; ++ DRM_ERROR("Unable to allocate %uMB AGP memory\n", ++ dma_bs->agp_size); ++ return err; ++ } ++ ++ dev_priv->agp_size = agp_size; ++ dev_priv->agp_handle = agp_req.handle; ++ ++ bind_req.handle = agp_req.handle; ++ bind_req.offset = 0; ++ err = drm_agp_bind( dev, &bind_req ); ++ if (err) { ++ DRM_ERROR("Unable to bind AGP memory: %d\n", err); ++ return err; ++ } ++ ++ /* Make drm_addbufs happy by not trying to create a mapping for less ++ * than a page. ++ */ ++ if (warp_size < PAGE_SIZE) ++ warp_size = PAGE_SIZE; ++ ++ offset = 0; ++ err = drm_addmap(dev, offset, warp_size, ++ _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); ++ if (err) { ++ DRM_ERROR("Unable to map WARP microcode: %d\n", err); ++ return err; ++ } ++ ++ offset += warp_size; ++ err = drm_addmap(dev, offset, dma_bs->primary_size, ++ _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary); ++ if (err) { ++ DRM_ERROR("Unable to map primary DMA region: %d\n", err); ++ return err; ++ } ++ ++ offset += dma_bs->primary_size; ++ err = drm_addmap(dev, offset, secondary_size, ++ _DRM_AGP, 0, & dev->agp_buffer_map); ++ if (err) { ++ DRM_ERROR("Unable to map secondary DMA region: %d\n", err); ++ return err; ++ } ++ ++ (void)memset( &req, 0, sizeof(req) ); ++ req.count = dma_bs->secondary_bin_count; ++ req.size = dma_bs->secondary_bin_size; ++ req.flags = _DRM_AGP_BUFFER; ++ req.agp_start = offset; ++ ++ err = drm_addbufs_agp(dev, &req); ++ if (err) { ++ DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); ++ return err; ++ } ++ ++#ifdef __linux__ ++ { ++ struct drm_map_list *_entry; ++ unsigned long agp_token = 0; ++ ++ list_for_each_entry(_entry, &dev->maplist, head) { ++ if (_entry->map == dev->agp_buffer_map) ++ agp_token = _entry->user_token; ++ } ++ if (!agp_token) ++ return -EFAULT; ++ ++ dev->agp_buffer_token = agp_token; ++ } ++#endif ++ ++ offset += secondary_size; ++ err = drm_addmap(dev, offset, agp_size - offset, ++ _DRM_AGP, 0, & dev_priv->agp_textures); ++ if (err) { ++ DRM_ERROR("Unable to map AGP texture region: %d\n", err); ++ return err; ++ } ++ ++ drm_core_ioremap(dev_priv->warp, dev); ++ drm_core_ioremap(dev_priv->primary, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ ++ if (!dev_priv->warp->handle || ++ !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { ++ DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", ++ dev_priv->warp->handle, dev_priv->primary->handle, ++ dev->agp_buffer_map->handle); ++ return -ENOMEM; ++ } ++ ++ dev_priv->dma_access = MGA_PAGPXFER; ++ dev_priv->wagp_enable = MGA_WAGP_ENABLE; ++ ++ DRM_INFO("Initialized card for AGP DMA.\n"); ++ return 0; ++} ++ ++/** ++ * Bootstrap the driver for PCI DMA. ++ * ++ * \todo ++ * The algorithm for decreasing the size of the primary DMA buffer could be ++ * better. The size should be rounded up to the nearest page size, then ++ * decrease the request size by a single page each pass through the loop. ++ * ++ * \todo ++ * Determine whether the maximum address passed to drm_pci_alloc is correct. ++ * The same goes for drm_addbufs_pci. ++ * ++ * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap ++ */ ++static int mga_do_pci_dma_bootstrap(struct drm_device * dev, ++ drm_mga_dma_bootstrap_t * dma_bs) ++{ ++ drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ unsigned int warp_size = mga_warp_microcode_size(dev_priv); ++ unsigned int primary_size; ++ unsigned int bin_count; ++ int err; ++ struct drm_buf_desc req; ++ ++ ++ if (dev->dma == NULL) { ++ DRM_ERROR("dev->dma is NULL\n"); ++ return -EFAULT; ++ } ++ ++ /* Make drm_addbufs happy by not trying to create a mapping for less ++ * than a page. ++ */ ++ if (warp_size < PAGE_SIZE) ++ warp_size = PAGE_SIZE; ++ ++ /* The proper alignment is 0x100 for this mapping */ ++ err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, ++ _DRM_READ_ONLY, &dev_priv->warp); ++ if (err != 0) { ++ DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", ++ err); ++ return err; ++ } ++ ++ /* Other than the bottom two bits being used to encode other ++ * information, there don't appear to be any restrictions on the ++ * alignment of the primary or secondary DMA buffers. ++ */ ++ ++ for (primary_size = dma_bs->primary_size; primary_size != 0; ++ primary_size >>= 1 ) { ++ /* The proper alignment for this mapping is 0x04 */ ++ err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, ++ _DRM_READ_ONLY, &dev_priv->primary); ++ if (!err) ++ break; ++ } ++ ++ if (err != 0) { ++ DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); ++ return -ENOMEM; ++ } ++ ++ if (dev_priv->primary->size != dma_bs->primary_size) { ++ DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", ++ dma_bs->primary_size, ++ (unsigned)dev_priv->primary->size); ++ dma_bs->primary_size = dev_priv->primary->size; ++ } ++ ++ for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; ++ bin_count-- ) { ++ (void)memset(&req, 0, sizeof(req)); ++ req.count = bin_count; ++ req.size = dma_bs->secondary_bin_size; ++ ++ err = drm_addbufs_pci(dev, &req); ++ if (!err) { ++ break; ++ } ++ } ++ ++ if (bin_count == 0) { ++ DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); ++ return err; ++ } ++ ++ if (bin_count != dma_bs->secondary_bin_count) { ++ DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " ++ "to %u.\n", dma_bs->secondary_bin_count, bin_count); ++ ++ dma_bs->secondary_bin_count = bin_count; ++ } ++ ++ dev_priv->dma_access = 0; ++ dev_priv->wagp_enable = 0; ++ ++ dma_bs->agp_mode = 0; ++ ++ DRM_INFO("Initialized card for PCI DMA.\n"); ++ return 0; ++} ++ ++ ++static int mga_do_dma_bootstrap(struct drm_device *dev, ++ drm_mga_dma_bootstrap_t *dma_bs) ++{ ++ const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); ++ int err; ++ drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ ++ ++ dev_priv->used_new_dma_init = 1; ++ ++ /* The first steps are the same for both PCI and AGP based DMA. Map ++ * the cards MMIO registers and map a status page. ++ */ ++ err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, ++ _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio); ++ if (err) { ++ DRM_ERROR("Unable to map MMIO region: %d\n", err); ++ return err; ++ } ++ ++ ++ err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, ++ _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, ++ & dev_priv->status); ++ if (err) { ++ DRM_ERROR("Unable to map status region: %d\n", err); ++ return err; ++ } ++ ++ ++ /* The DMA initialization procedure is slightly different for PCI and ++ * AGP cards. AGP cards just allocate a large block of AGP memory and ++ * carve off portions of it for internal uses. The remaining memory ++ * is returned to user-mode to be used for AGP textures. ++ */ ++ ++ if (is_agp) { ++ err = mga_do_agp_dma_bootstrap(dev, dma_bs); ++ } ++ ++ /* If we attempted to initialize the card for AGP DMA but failed, ++ * clean-up any mess that may have been created. ++ */ ++ ++ if (err) { ++ mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); ++ } ++ ++ ++ /* Not only do we want to try and initialized PCI cards for PCI DMA, ++ * but we also try to initialized AGP cards that could not be ++ * initialized for AGP DMA. This covers the case where we have an AGP ++ * card in a system with an unsupported AGP chipset. In that case the ++ * card will be detected as AGP, but we won't be able to allocate any ++ * AGP memory, etc. ++ */ ++ ++ if (!is_agp || err) { ++ err = mga_do_pci_dma_bootstrap(dev, dma_bs); ++ } ++ ++ ++ return err; ++} ++ ++int mga_dma_bootstrap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_dma_bootstrap_t *bootstrap = data; ++ int err; ++ static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; ++ const drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ ++ ++ err = mga_do_dma_bootstrap(dev, bootstrap); ++ if (err) { ++ mga_do_cleanup_dma(dev, FULL_CLEANUP); ++ return err; ++ } ++ ++ if (dev_priv->agp_textures != NULL) { ++ bootstrap->texture_handle = dev_priv->agp_textures->offset; ++ bootstrap->texture_size = dev_priv->agp_textures->size; ++ } else { ++ bootstrap->texture_handle = 0; ++ bootstrap->texture_size = 0; ++ } ++ ++ bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; ++ ++ return 0; ++} ++ ++ ++static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) ++{ ++ drm_mga_private_t *dev_priv; ++ int ret; ++ DRM_DEBUG("\n"); ++ ++ ++ dev_priv = dev->dev_private; ++ ++ if (init->sgram) { ++ dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; ++ } else { ++ dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; ++ } ++ dev_priv->maccess = init->maccess; ++ ++ dev_priv->fb_cpp = init->fb_cpp; ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ dev_priv->depth_cpp = init->depth_cpp; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ /* FIXME: Need to support AGP textures... ++ */ ++ dev_priv->texture_offset = init->texture_offset[0]; ++ dev_priv->texture_size = init->texture_size[0]; ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("failed to find sarea!\n"); ++ return -EINVAL; ++ } ++ ++ if (!dev_priv->used_new_dma_init) { ++ ++ dev_priv->dma_access = MGA_PAGPXFER; ++ dev_priv->wagp_enable = MGA_WAGP_ENABLE; ++ ++ dev_priv->status = drm_core_findmap(dev, init->status_offset); ++ if (!dev_priv->status) { ++ DRM_ERROR("failed to find status page!\n"); ++ return -EINVAL; ++ } ++ dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio) { ++ DRM_ERROR("failed to find mmio region!\n"); ++ return -EINVAL; ++ } ++ dev_priv->warp = drm_core_findmap(dev, init->warp_offset); ++ if (!dev_priv->warp) { ++ DRM_ERROR("failed to find warp microcode region!\n"); ++ return -EINVAL; ++ } ++ dev_priv->primary = drm_core_findmap(dev, init->primary_offset); ++ if (!dev_priv->primary) { ++ DRM_ERROR("failed to find primary dma region!\n"); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = ++ drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("failed to find dma buffer region!\n"); ++ return -EINVAL; ++ } ++ ++ drm_core_ioremap(dev_priv->warp, dev); ++ drm_core_ioremap(dev_priv->primary, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++ if (!dev_priv->warp->handle || ++ !dev_priv->primary->handle || ++ ((dev_priv->dma_access != 0) && ++ ((dev->agp_buffer_map == NULL) || ++ (dev->agp_buffer_map->handle == NULL)))) { ++ DRM_ERROR("failed to ioremap agp regions!\n"); ++ return -ENOMEM; ++ } ++ ++ ret = mga_warp_install_microcode(dev_priv); ++ if (ret != 0) { ++ DRM_ERROR("failed to install WARP ucode: %d!\n", ret); ++ return ret; ++ } ++ ++ ret = mga_warp_init(dev_priv); ++ if (ret != 0) { ++ DRM_ERROR("failed to init WARP engine: %d!\n", ret); ++ return ret; ++ } ++ ++ dev_priv->prim.status = (u32 *) dev_priv->status->handle; ++ ++ mga_do_wait_for_idle(dev_priv); ++ ++ /* Init the primary DMA registers. ++ */ ++ MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); ++ ++ dev_priv->prim.start = (u8 *) dev_priv->primary->handle; ++ dev_priv->prim.end = ((u8 *) dev_priv->primary->handle ++ + dev_priv->primary->size); ++ dev_priv->prim.size = dev_priv->primary->size; ++ ++ dev_priv->prim.tail = 0; ++ dev_priv->prim.space = dev_priv->prim.size; ++ dev_priv->prim.wrapped = 0; ++ ++ dev_priv->prim.last_flush = 0; ++ dev_priv->prim.last_wrap = 0; ++ ++ dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; ++ ++ dev_priv->prim.status[0] = dev_priv->primary->offset; ++ dev_priv->prim.status[1] = 0; ++ ++ dev_priv->sarea_priv->last_wrap = 0; ++ dev_priv->sarea_priv->last_frame.head = 0; ++ dev_priv->sarea_priv->last_frame.wrap = 0; ++ ++ if (mga_freelist_init(dev, dev_priv) < 0) { ++ DRM_ERROR("could not initialize freelist\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) ++{ ++ int err = 0; ++ DRM_DEBUG("\n"); ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ ++ if ((dev_priv->warp != NULL) ++ && (dev_priv->warp->type != _DRM_CONSISTENT)) ++ drm_core_ioremapfree(dev_priv->warp, dev); ++ ++ if ((dev_priv->primary != NULL) ++ && (dev_priv->primary->type != _DRM_CONSISTENT)) ++ drm_core_ioremapfree(dev_priv->primary, dev); ++ ++ if (dev->agp_buffer_map != NULL) ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ ++ if (dev_priv->used_new_dma_init) { ++ if (dev_priv->agp_handle != 0) { ++ struct drm_agp_binding unbind_req; ++ struct drm_agp_buffer free_req; ++ ++ unbind_req.handle = dev_priv->agp_handle; ++ drm_agp_unbind(dev, &unbind_req); ++ ++ free_req.handle = dev_priv->agp_handle; ++ drm_agp_free(dev, &free_req); ++ ++ dev_priv->agp_textures = NULL; ++ dev_priv->agp_size = 0; ++ dev_priv->agp_handle = 0; ++ } ++ ++ if ((dev->agp != NULL) && dev->agp->acquired) { ++ err = drm_agp_release(dev); ++ } ++ } ++ ++ dev_priv->warp = NULL; ++ dev_priv->primary = NULL; ++ dev_priv->sarea = NULL; ++ dev_priv->sarea_priv = NULL; ++ dev->agp_buffer_map = NULL; ++ ++ if (full_cleanup) { ++ dev_priv->mmio = NULL; ++ dev_priv->status = NULL; ++ dev_priv->used_new_dma_init = 0; ++ } ++ ++ memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); ++ dev_priv->warp_pipe = 0; ++ memset(dev_priv->warp_pipe_phys, 0, ++ sizeof(dev_priv->warp_pipe_phys)); ++ ++ if (dev_priv->head != NULL) { ++ mga_freelist_cleanup(dev); ++ } ++ } ++ ++ return err; ++} ++ ++int mga_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_init_t *init = data; ++ int err; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case MGA_INIT_DMA: ++ err = mga_do_init_dma(dev, init); ++ if (err) { ++ (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); ++ } ++ return err; ++ case MGA_CLEANUP_DMA: ++ return mga_do_cleanup_dma(dev, FULL_CLEANUP); ++ } ++ ++ return -EINVAL; ++} ++ ++/* ================================================================ ++ * Primary DMA stream management ++ */ ++ ++int mga_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ struct drm_lock *lock = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_DEBUG("%s%s%s\n", ++ (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", ++ (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", ++ (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); ++ ++ WRAP_WAIT_WITH_RETURN(dev_priv); ++ ++ if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { ++ mga_do_dma_flush(dev_priv); ++ } ++ ++ if (lock->flags & _DRM_LOCK_QUIESCENT) { ++#if MGA_DMA_DEBUG ++ int ret = mga_do_wait_for_idle(dev_priv); ++ if (ret < 0) ++ DRM_INFO("-EBUSY\n"); ++ return ret; ++#else ++ return mga_do_wait_for_idle(dev_priv); ++#endif ++ } else { ++ return 0; ++ } ++} ++ ++int mga_dma_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mga_do_dma_reset(dev_priv); ++} ++ ++/* ================================================================ ++ * DMA buffer management ++ */ ++ ++static int mga_dma_get_buffers(struct drm_device * dev, ++ struct drm_file *file_priv, struct drm_dma * d) ++{ ++ struct drm_buf *buf; ++ int i; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = mga_freelist_get(dev); ++ if (!buf) ++ return -EAGAIN; ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], ++ &buf->idx, sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], ++ &buf->total, sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int mga_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ struct drm_dma *d = data; ++ int ret = 0; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = mga_dma_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++/** ++ * Called just before the module is unloaded. ++ */ ++int mga_driver_unload(struct drm_device * dev) ++{ ++ drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ ++ return 0; ++} ++ ++/** ++ * Called when the last opener of the device is closed. ++ */ ++void mga_driver_lastclose(struct drm_device * dev) ++{ ++ mga_do_cleanup_dma(dev, FULL_CLEANUP); ++} ++ ++int mga_driver_dma_quiescent(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ return mga_do_wait_for_idle(dev_priv); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,425 @@ ++/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*- ++ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Jeff Hartmann ++ * Keith Whitwell ++ * ++ * Rewritten by: ++ * Gareth Hughes ++ */ ++ ++#ifndef __MGA_DRM_H__ ++#define __MGA_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (mga_sarea.h) ++ */ ++ ++#ifndef __MGA_SAREA_DEFINES__ ++#define __MGA_SAREA_DEFINES__ ++ ++/* WARP pipe flags ++ */ ++#define MGA_F 0x1 /* fog */ ++#define MGA_A 0x2 /* alpha */ ++#define MGA_S 0x4 /* specular */ ++#define MGA_T2 0x8 /* multitexture */ ++ ++#define MGA_WARP_TGZ 0 ++#define MGA_WARP_TGZF (MGA_F) ++#define MGA_WARP_TGZA (MGA_A) ++#define MGA_WARP_TGZAF (MGA_F|MGA_A) ++#define MGA_WARP_TGZS (MGA_S) ++#define MGA_WARP_TGZSF (MGA_S|MGA_F) ++#define MGA_WARP_TGZSA (MGA_S|MGA_A) ++#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A) ++#define MGA_WARP_T2GZ (MGA_T2) ++#define MGA_WARP_T2GZF (MGA_T2|MGA_F) ++#define MGA_WARP_T2GZA (MGA_T2|MGA_A) ++#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F) ++#define MGA_WARP_T2GZS (MGA_T2|MGA_S) ++#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F) ++#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A) ++#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A) ++ ++#define MGA_MAX_G200_PIPES 8 /* no multitex */ ++#define MGA_MAX_G400_PIPES 16 ++#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES ++#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */ ++ ++#define MGA_CARD_TYPE_G200 1 ++#define MGA_CARD_TYPE_G400 2 ++#define MGA_CARD_TYPE_G450 3 /* not currently used */ ++#define MGA_CARD_TYPE_G550 4 ++ ++#define MGA_FRONT 0x1 ++#define MGA_BACK 0x2 ++#define MGA_DEPTH 0x4 ++ ++/* What needs to be changed for the current vertex dma buffer? ++ */ ++#define MGA_UPLOAD_CONTEXT 0x1 ++#define MGA_UPLOAD_TEX0 0x2 ++#define MGA_UPLOAD_TEX1 0x4 ++#define MGA_UPLOAD_PIPE 0x8 ++#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */ ++#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */ ++#define MGA_UPLOAD_2D 0x40 ++#define MGA_WAIT_AGE 0x80 /* handled client-side */ ++#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */ ++#if 0 ++#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock ++ quiescent */ ++#endif ++ ++/* 32 buffers of 64k each, total 2 meg. ++ */ ++#define MGA_BUFFER_SIZE (1 << 16) ++#define MGA_NUM_BUFFERS 128 ++ ++/* Keep these small for testing. ++ */ ++#define MGA_NR_SAREA_CLIPRECTS 8 ++ ++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 ++ * regions, subject to a minimum region size of (1<<16) == 64k. ++ * ++ * Clients may subdivide regions internally, but when sharing between ++ * clients, the region size is the minimum granularity. ++ */ ++ ++#define MGA_CARD_HEAP 0 ++#define MGA_AGP_HEAP 1 ++#define MGA_NR_TEX_HEAPS 2 ++#define MGA_NR_TEX_REGIONS 16 ++#define MGA_LOG_MIN_TEX_REGION_SIZE 16 ++ ++#define DRM_MGA_IDLE_RETRY 2048 ++ ++#endif /* __MGA_SAREA_DEFINES__ */ ++ ++/* Setup registers for 3D context ++ */ ++typedef struct { ++ unsigned int dstorg; ++ unsigned int maccess; ++ unsigned int plnwt; ++ unsigned int dwgctl; ++ unsigned int alphactrl; ++ unsigned int fogcolor; ++ unsigned int wflag; ++ unsigned int tdualstage0; ++ unsigned int tdualstage1; ++ unsigned int fcol; ++ unsigned int stencil; ++ unsigned int stencilctl; ++} drm_mga_context_regs_t; ++ ++/* Setup registers for 2D, X server ++ */ ++typedef struct { ++ unsigned int pitch; ++} drm_mga_server_regs_t; ++ ++/* Setup registers for each texture unit ++ */ ++typedef struct { ++ unsigned int texctl; ++ unsigned int texctl2; ++ unsigned int texfilter; ++ unsigned int texbordercol; ++ unsigned int texorg; ++ unsigned int texwidth; ++ unsigned int texheight; ++ unsigned int texorg1; ++ unsigned int texorg2; ++ unsigned int texorg3; ++ unsigned int texorg4; ++} drm_mga_texture_regs_t; ++ ++/* General aging mechanism ++ */ ++typedef struct { ++ unsigned int head; /* Position of head pointer */ ++ unsigned int wrap; /* Primary DMA wrap count */ ++} drm_mga_age_t; ++ ++typedef struct _drm_mga_sarea { ++ /* The channel for communication of state information to the kernel ++ * on firing a vertex dma buffer. ++ */ ++ drm_mga_context_regs_t context_state; ++ drm_mga_server_regs_t server_state; ++ drm_mga_texture_regs_t tex_state[2]; ++ unsigned int warp_pipe; ++ unsigned int dirty; ++ unsigned int vertsize; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Information about the most recently used 3d drawable. The ++ * client fills in the req_* fields, the server fills in the ++ * exported_ fields and puts the cliprects into boxes, above. ++ * ++ * The client clears the exported_drawable field before ++ * clobbering the boxes data. ++ */ ++ unsigned int req_drawable; /* the X drawable id */ ++ unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */ ++ ++ unsigned int exported_drawable; ++ unsigned int exported_index; ++ unsigned int exported_stamp; ++ unsigned int exported_buffers; ++ unsigned int exported_nfront; ++ unsigned int exported_nback; ++ int exported_back_x, exported_front_x, exported_w; ++ int exported_back_y, exported_front_y, exported_h; ++ struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS]; ++ ++ /* Counters for aging textures and for client-side throttling. ++ */ ++ unsigned int status[4]; ++ unsigned int last_wrap; ++ ++ drm_mga_age_t last_frame; ++ unsigned int last_enqueue; /* last time a buffer was enqueued */ ++ unsigned int last_dispatch; /* age of the most recently dispatched buffer */ ++ unsigned int last_quiescent; /* */ ++ ++ /* LRU lists for texture memory in agp space and on the card. ++ */ ++ struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1]; ++ unsigned int texAge[MGA_NR_TEX_HEAPS]; ++ ++ /* Mechanism to validate card state. ++ */ ++ int ctxOwner; ++} drm_mga_sarea_t; ++ ++ ++/* MGA specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_MGA_INIT 0x00 ++#define DRM_MGA_FLUSH 0x01 ++#define DRM_MGA_RESET 0x02 ++#define DRM_MGA_SWAP 0x03 ++#define DRM_MGA_CLEAR 0x04 ++#define DRM_MGA_VERTEX 0x05 ++#define DRM_MGA_INDICES 0x06 ++#define DRM_MGA_ILOAD 0x07 ++#define DRM_MGA_BLIT 0x08 ++#define DRM_MGA_GETPARAM 0x09 ++ ++/* 3.2: ++ * ioctls for operating on fences. ++ */ ++#define DRM_MGA_SET_FENCE 0x0a ++#define DRM_MGA_WAIT_FENCE 0x0b ++#define DRM_MGA_DMA_BOOTSTRAP 0x0c ++ ++ ++#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) ++#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) ++#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) ++#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) ++#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) ++#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t) ++#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t) ++#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) ++#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) ++#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) ++#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t) ++#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t) ++#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t) ++ ++typedef struct _drm_mga_warp_index { ++ int installed; ++ unsigned long phys_addr; ++ int size; ++} drm_mga_warp_index_t; ++ ++typedef struct drm_mga_init { ++ enum { ++ MGA_INIT_DMA = 0x01, ++ MGA_CLEANUP_DMA = 0x02 ++ } func; ++ ++ unsigned long sarea_priv_offset; ++ ++ int chipset; ++ int sgram; ++ ++ unsigned int maccess; ++ ++ unsigned int fb_cpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ ++ unsigned int depth_cpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ unsigned int texture_offset[MGA_NR_TEX_HEAPS]; ++ unsigned int texture_size[MGA_NR_TEX_HEAPS]; ++ ++ unsigned long fb_offset; ++ unsigned long mmio_offset; ++ unsigned long status_offset; ++ unsigned long warp_offset; ++ unsigned long primary_offset; ++ unsigned long buffers_offset; ++} drm_mga_init_t; ++ ++ ++typedef struct drm_mga_dma_bootstrap { ++ /** ++ * \name AGP texture region ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will ++ * be filled in with the actual AGP texture settings. ++ * ++ * \warning ++ * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode ++ * is zero, it means that PCI memory (most likely through the use of ++ * an IOMMU) is being used for "AGP" textures. ++ */ ++ /*@{*/ ++ unsigned long texture_handle; /**< Handle used to map AGP textures. */ ++ uint32_t texture_size; /**< Size of the AGP texture region. */ ++ /*@}*/ ++ ++ ++ /** ++ * Requested size of the primary DMA region. ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be ++ * filled in with the actual AGP mode. If AGP was not available ++ */ ++ uint32_t primary_size; ++ ++ ++ /** ++ * Requested number of secondary DMA buffers. ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be ++ * filled in with the actual number of secondary DMA buffers ++ * allocated. Particularly when PCI DMA is used, this may be ++ * (subtantially) less than the number requested. ++ */ ++ uint32_t secondary_bin_count; ++ ++ ++ /** ++ * Requested size of each secondary DMA buffer. ++ * ++ * While the kernel \b is free to reduce ++ * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed ++ * to reduce dma_mga_dma_bootstrap::secondary_bin_size. ++ */ ++ uint32_t secondary_bin_size; ++ ++ ++ /** ++ * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X, ++ * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is ++ * zero, it means that PCI DMA should be used, even if AGP is ++ * possible. ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be ++ * filled in with the actual AGP mode. If AGP was not available ++ * (i.e., PCI DMA was used), this value will be zero. ++ */ ++ uint32_t agp_mode; ++ ++ ++ /** ++ * Desired AGP GART size, measured in megabytes. ++ */ ++ uint8_t agp_size; ++} drm_mga_dma_bootstrap_t; ++ ++typedef struct drm_mga_clear { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; ++} drm_mga_clear_t; ++ ++typedef struct drm_mga_vertex { ++ int idx; /* buffer to queue */ ++ int used; /* bytes in use */ ++ int discard; /* client finished with buffer? */ ++} drm_mga_vertex_t; ++ ++typedef struct drm_mga_indices { ++ int idx; /* buffer to queue */ ++ unsigned int start; ++ unsigned int end; ++ int discard; /* client finished with buffer? */ ++} drm_mga_indices_t; ++ ++typedef struct drm_mga_iload { ++ int idx; ++ unsigned int dstorg; ++ unsigned int length; ++} drm_mga_iload_t; ++ ++typedef struct _drm_mga_blit { ++ unsigned int planemask; ++ unsigned int srcorg; ++ unsigned int dstorg; ++ int src_pitch, dst_pitch; ++ int delta_sx, delta_sy; ++ int delta_dx, delta_dy; ++ int height, ydir; /* flip image vertically */ ++ int source_pitch, dest_pitch; ++} drm_mga_blit_t; ++ ++/* 3.1: An ioctl to get parameters that aren't available to the 3d ++ * client any other way. ++ */ ++#define MGA_PARAM_IRQ_NR 1 ++ ++/* 3.2: Query the actual card type. The DDX only distinguishes between ++ * G200 chips and non-G200 chips, which it calls G400. It turns out that ++ * there are some very sublte differences between the G4x0 chips and the G550 ++ * chips. Using this parameter query, a client-side driver can detect the ++ * difference between a G4x0 and a G550. ++ */ ++#define MGA_PARAM_CARD_TYPE 2 ++ ++typedef struct drm_mga_getparam { ++ int param; ++ void __user *value; ++} drm_mga_getparam_t; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,152 @@ ++/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*- ++ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++#include "drm_pciids.h" ++ ++static int mga_driver_device_is_agp(struct drm_device * dev); ++ ++static struct pci_device_id pciidlist[] = { ++ mga_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | ++ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .dev_priv_size = sizeof (drm_mga_buf_priv_t), ++ .load = mga_driver_load, ++ .unload = mga_driver_unload, ++ .lastclose = mga_driver_lastclose, ++ .dma_quiescent = mga_driver_dma_quiescent, ++ .device_is_agp = mga_driver_device_is_agp, ++ .get_vblank_counter = mga_get_vblank_counter, ++ .enable_vblank = mga_enable_vblank, ++ .disable_vblank = mga_disable_vblank, ++ .irq_preinstall = mga_driver_irq_preinstall, ++ .irq_postinstall = mga_driver_irq_postinstall, ++ .irq_uninstall = mga_driver_irq_uninstall, ++ .irq_handler = mga_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = mga_ioctls, ++ .dma_ioctl = mga_dma_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = mga_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init mga_init(void) ++{ ++ driver.num_ioctls = mga_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit mga_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(mga_init); ++module_exit(mga_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); ++ ++/** ++ * Determine if the device really is AGP or not. ++ * ++ * In addition to the usual tests performed by \c drm_device_is_agp, this ++ * function detects PCI G450 cards that appear to the system exactly like ++ * AGP G450 cards. ++ * ++ * \param dev The device to be tested. ++ * ++ * \returns ++ * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. ++ */ ++static int mga_driver_device_is_agp(struct drm_device * dev) ++{ ++ const struct pci_dev * const pdev = dev->pdev; ++ ++ ++ /* There are PCI versions of the G450. These cards have the ++ * same PCI ID as the AGP G450, but have an additional PCI-to-PCI ++ * bridge chip. We detect these cards, which are not currently ++ * supported by this driver, by looking at the device ID of the ++ * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the ++ * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the ++ * device. ++ */ ++ ++ if ((pdev->device == 0x0525) && pdev->bus->self ++ && (pdev->bus->self->vendor == 0x3388) ++ && (pdev->bus->self->device == 0x0021)) { ++ return 0; ++ } ++ ++ return 2; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,691 @@ ++/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#ifndef __MGA_DRV_H__ ++#define __MGA_DRV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." ++ ++#define DRIVER_NAME "mga" ++#define DRIVER_DESC "Matrox G200/G400" ++#define DRIVER_DATE "20060319" ++ ++#define DRIVER_MAJOR 3 ++#define DRIVER_MINOR 2 ++#define DRIVER_PATCHLEVEL 2 ++ ++typedef struct drm_mga_primary_buffer { ++ u8 *start; ++ u8 *end; ++ int size; ++ ++ u32 tail; ++ int space; ++ volatile long wrapped; ++ ++ volatile u32 *status; ++ ++ u32 last_flush; ++ u32 last_wrap; ++ ++ u32 high_mark; ++} drm_mga_primary_buffer_t; ++ ++typedef struct drm_mga_freelist { ++ struct drm_mga_freelist *next; ++ struct drm_mga_freelist *prev; ++ drm_mga_age_t age; ++ struct drm_buf *buf; ++} drm_mga_freelist_t; ++ ++typedef struct { ++ drm_mga_freelist_t *list_entry; ++ int discard; ++ int dispatched; ++} drm_mga_buf_priv_t; ++ ++typedef struct drm_mga_private { ++ drm_mga_primary_buffer_t prim; ++ drm_mga_sarea_t *sarea_priv; ++ ++ drm_mga_freelist_t *head; ++ drm_mga_freelist_t *tail; ++ ++ unsigned int warp_pipe; ++ unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES]; ++ ++ int chipset; ++ int usec_timeout; ++ ++ /** ++ * If set, the new DMA initialization sequence was used. This is ++ * primarilly used to select how the driver should uninitialized its ++ * internal DMA structures. ++ */ ++ int used_new_dma_init; ++ ++ /** ++ * If AGP memory is used for DMA buffers, this will be the value ++ * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer). ++ */ ++ u32 dma_access; ++ ++ /** ++ * If AGP memory is used for DMA buffers, this will be the value ++ * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI ++ * transfer). ++ */ ++ u32 wagp_enable; ++ ++ /** ++ * \name MMIO region parameters. ++ * ++ * \sa drm_mga_private_t::mmio ++ */ ++ /*@{*/ ++ u32 mmio_base; /**< Bus address of base of MMIO. */ ++ u32 mmio_size; /**< Size of the MMIO region. */ ++ /*@}*/ ++ ++ u32 clear_cmd; ++ u32 maccess; ++ ++ atomic_t vbl_received; /**< Number of vblanks received. */ ++ wait_queue_head_t fence_queue; ++ atomic_t last_fence_retired; ++ u32 next_fence_to_post; ++ ++ unsigned int fb_cpp; ++ unsigned int front_offset; ++ unsigned int front_pitch; ++ unsigned int back_offset; ++ unsigned int back_pitch; ++ ++ unsigned int depth_cpp; ++ unsigned int depth_offset; ++ unsigned int depth_pitch; ++ ++ unsigned int texture_offset; ++ unsigned int texture_size; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *status; ++ drm_local_map_t *warp; ++ drm_local_map_t *primary; ++ drm_local_map_t *agp_textures; ++ ++ unsigned long agp_handle; ++ unsigned int agp_size; ++} drm_mga_private_t; ++ ++extern struct drm_ioctl_desc mga_ioctls[]; ++extern int mga_max_ioctl; ++ ++ /* mga_dma.c */ ++extern int mga_dma_bootstrap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_driver_load(struct drm_device *dev, unsigned long flags); ++extern int mga_driver_unload(struct drm_device * dev); ++extern void mga_driver_lastclose(struct drm_device * dev); ++extern int mga_driver_dma_quiescent(struct drm_device * dev); ++ ++extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); ++ ++extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); ++extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); ++extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); ++ ++extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf); ++ ++ /* mga_warp.c */ ++extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv); ++extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); ++extern int mga_warp_init(drm_mga_private_t * dev_priv); ++ ++ /* mga_irq.c */ ++extern int mga_enable_vblank(struct drm_device *dev, int crtc); ++extern void mga_disable_vblank(struct drm_device *dev, int crtc); ++extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); ++extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); ++extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); ++extern void mga_driver_irq_preinstall(struct drm_device * dev); ++extern int mga_driver_irq_postinstall(struct drm_device * dev); ++extern void mga_driver_irq_uninstall(struct drm_device * dev); ++extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() ++ ++#if defined(__linux__) && defined(__alpha__) ++#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle)) ++#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg) ++ ++#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg ) ++#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg ) ++ ++#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg))) ++#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg))) ++#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0) ++#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0) ++ ++static inline u32 _MGA_READ(u32 * addr) ++{ ++ DRM_MEMORYBARRIER(); ++ return *(volatile u32 *)addr; ++} ++#else ++#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg)) ++#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg)) ++#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val)) ++#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val)) ++#endif ++ ++#define DWGREG0 0x1c00 ++#define DWGREG0_END 0x1dff ++#define DWGREG1 0x2c00 ++#define DWGREG1_END 0x2dff ++ ++#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END) ++#define DMAREG0(r) (u8)((r - DWGREG0) >> 2) ++#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80) ++#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r)) ++ ++/* ================================================================ ++ * Helper macross... ++ */ ++ ++#define MGA_EMIT_STATE( dev_priv, dirty ) \ ++do { \ ++ if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \ ++ if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \ ++ mga_g400_emit_state( dev_priv ); \ ++ } else { \ ++ mga_g200_emit_state( dev_priv ); \ ++ } \ ++ } \ ++} while (0) ++ ++#define WRAP_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ ++ if ( mga_is_idle( dev_priv ) ) { \ ++ mga_do_dma_wrap_end( dev_priv ); \ ++ } else if ( dev_priv->prim.space < \ ++ dev_priv->prim.high_mark ) { \ ++ if ( MGA_DMA_DEBUG ) \ ++ DRM_INFO( "wrap...\n"); \ ++ return -EBUSY; \ ++ } \ ++ } \ ++} while (0) ++ ++#define WRAP_WAIT_WITH_RETURN( dev_priv ) \ ++do { \ ++ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ ++ if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ ++ if ( MGA_DMA_DEBUG ) \ ++ DRM_INFO( "wrap...\n"); \ ++ return -EBUSY; \ ++ } \ ++ mga_do_dma_wrap_end( dev_priv ); \ ++ } \ ++} while (0) ++ ++/* ================================================================ ++ * Primary DMA command stream ++ */ ++ ++#define MGA_VERBOSE 0 ++ ++#define DMA_LOCALS unsigned int write; volatile u8 *prim; ++ ++#define DMA_BLOCK_SIZE (5 * sizeof(u32)) ++ ++#define BEGIN_DMA( n ) \ ++do { \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \ ++ DRM_INFO( " space=0x%x req=0x%Zx\n", \ ++ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \ ++ } \ ++ prim = dev_priv->prim.start; \ ++ write = dev_priv->prim.tail; \ ++} while (0) ++ ++#define BEGIN_DMA_WRAP() \ ++do { \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_DMA()\n" ); \ ++ DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ ++ } \ ++ prim = dev_priv->prim.start; \ ++ write = dev_priv->prim.tail; \ ++} while (0) ++ ++#define ADVANCE_DMA() \ ++do { \ ++ dev_priv->prim.tail = write; \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \ ++ write, dev_priv->prim.space ); \ ++ } \ ++} while (0) ++ ++#define FLUSH_DMA() \ ++do { \ ++ if ( 0 ) { \ ++ DRM_INFO( "\n" ); \ ++ DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ ++ dev_priv->prim.tail, \ ++ MGA_READ( MGA_PRIMADDRESS ) - \ ++ dev_priv->primary->offset ); \ ++ } \ ++ if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \ ++ if ( dev_priv->prim.space < \ ++ dev_priv->prim.high_mark ) { \ ++ mga_do_dma_wrap_start( dev_priv ); \ ++ } else { \ ++ mga_do_dma_flush( dev_priv ); \ ++ } \ ++ } \ ++} while (0) ++ ++/* Never use this, always use DMA_BLOCK(...) for primary DMA output. ++ */ ++#define DMA_WRITE( offset, val ) \ ++do { \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ ++ (u32)(val), write + (offset) * sizeof(u32) ); \ ++ } \ ++ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ ++} while (0) ++ ++#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \ ++do { \ ++ DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \ ++ (DMAREG( reg1 ) << 8) | \ ++ (DMAREG( reg2 ) << 16) | \ ++ (DMAREG( reg3 ) << 24)) ); \ ++ DMA_WRITE( 1, val0 ); \ ++ DMA_WRITE( 2, val1 ); \ ++ DMA_WRITE( 3, val2 ); \ ++ DMA_WRITE( 4, val3 ); \ ++ write += DMA_BLOCK_SIZE; \ ++} while (0) ++ ++/* Buffer aging via primary DMA stream head pointer. ++ */ ++ ++#define SET_AGE( age, h, w ) \ ++do { \ ++ (age)->head = h; \ ++ (age)->wrap = w; \ ++} while (0) ++ ++#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \ ++ ( (age)->wrap == w && \ ++ (age)->head < h ) ) ++ ++#define AGE_BUFFER( buf_priv ) \ ++do { \ ++ drm_mga_freelist_t *entry = (buf_priv)->list_entry; \ ++ if ( (buf_priv)->dispatched ) { \ ++ entry->age.head = (dev_priv->prim.tail + \ ++ dev_priv->primary->offset); \ ++ entry->age.wrap = dev_priv->sarea_priv->last_wrap; \ ++ } else { \ ++ entry->age.head = 0; \ ++ entry->age.wrap = 0; \ ++ } \ ++} while (0) ++ ++#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \ ++ MGA_DWGENGSTS | \ ++ MGA_ENDPRDMASTS) ++#define MGA_DMA_IDLE_MASK (MGA_SOFTRAPEN | \ ++ MGA_ENDPRDMASTS) ++ ++#define MGA_DMA_DEBUG 0 ++ ++/* A reduced set of the mga registers. ++ */ ++#define MGA_CRTC_INDEX 0x1fd4 ++#define MGA_CRTC_DATA 0x1fd5 ++ ++/* CRTC11 */ ++#define MGA_VINTCLR (1 << 4) ++#define MGA_VINTEN (1 << 5) ++ ++#define MGA_ALPHACTRL 0x2c7c ++#define MGA_AR0 0x1c60 ++#define MGA_AR1 0x1c64 ++#define MGA_AR2 0x1c68 ++#define MGA_AR3 0x1c6c ++#define MGA_AR4 0x1c70 ++#define MGA_AR5 0x1c74 ++#define MGA_AR6 0x1c78 ++ ++#define MGA_CXBNDRY 0x1c80 ++#define MGA_CXLEFT 0x1ca0 ++#define MGA_CXRIGHT 0x1ca4 ++ ++#define MGA_DMAPAD 0x1c54 ++#define MGA_DSTORG 0x2cb8 ++#define MGA_DWGCTL 0x1c00 ++# define MGA_OPCOD_MASK (15 << 0) ++# define MGA_OPCOD_TRAP (4 << 0) ++# define MGA_OPCOD_TEXTURE_TRAP (6 << 0) ++# define MGA_OPCOD_BITBLT (8 << 0) ++# define MGA_OPCOD_ILOAD (9 << 0) ++# define MGA_ATYPE_MASK (7 << 4) ++# define MGA_ATYPE_RPL (0 << 4) ++# define MGA_ATYPE_RSTR (1 << 4) ++# define MGA_ATYPE_ZI (3 << 4) ++# define MGA_ATYPE_BLK (4 << 4) ++# define MGA_ATYPE_I (7 << 4) ++# define MGA_LINEAR (1 << 7) ++# define MGA_ZMODE_MASK (7 << 8) ++# define MGA_ZMODE_NOZCMP (0 << 8) ++# define MGA_ZMODE_ZE (2 << 8) ++# define MGA_ZMODE_ZNE (3 << 8) ++# define MGA_ZMODE_ZLT (4 << 8) ++# define MGA_ZMODE_ZLTE (5 << 8) ++# define MGA_ZMODE_ZGT (6 << 8) ++# define MGA_ZMODE_ZGTE (7 << 8) ++# define MGA_SOLID (1 << 11) ++# define MGA_ARZERO (1 << 12) ++# define MGA_SGNZERO (1 << 13) ++# define MGA_SHIFTZERO (1 << 14) ++# define MGA_BOP_MASK (15 << 16) ++# define MGA_BOP_ZERO (0 << 16) ++# define MGA_BOP_DST (10 << 16) ++# define MGA_BOP_SRC (12 << 16) ++# define MGA_BOP_ONE (15 << 16) ++# define MGA_TRANS_SHIFT 20 ++# define MGA_TRANS_MASK (15 << 20) ++# define MGA_BLTMOD_MASK (15 << 25) ++# define MGA_BLTMOD_BMONOLEF (0 << 25) ++# define MGA_BLTMOD_BMONOWF (4 << 25) ++# define MGA_BLTMOD_PLAN (1 << 25) ++# define MGA_BLTMOD_BFCOL (2 << 25) ++# define MGA_BLTMOD_BU32BGR (3 << 25) ++# define MGA_BLTMOD_BU32RGB (7 << 25) ++# define MGA_BLTMOD_BU24BGR (11 << 25) ++# define MGA_BLTMOD_BU24RGB (15 << 25) ++# define MGA_PATTERN (1 << 29) ++# define MGA_TRANSC (1 << 30) ++# define MGA_CLIPDIS (1 << 31) ++#define MGA_DWGSYNC 0x2c4c ++ ++#define MGA_FCOL 0x1c24 ++#define MGA_FIFOSTATUS 0x1e10 ++#define MGA_FOGCOL 0x1cf4 ++#define MGA_FXBNDRY 0x1c84 ++#define MGA_FXLEFT 0x1ca8 ++#define MGA_FXRIGHT 0x1cac ++ ++#define MGA_ICLEAR 0x1e18 ++# define MGA_SOFTRAPICLR (1 << 0) ++# define MGA_VLINEICLR (1 << 5) ++#define MGA_IEN 0x1e1c ++# define MGA_SOFTRAPIEN (1 << 0) ++# define MGA_VLINEIEN (1 << 5) ++ ++#define MGA_LEN 0x1c5c ++ ++#define MGA_MACCESS 0x1c04 ++ ++#define MGA_PITCH 0x1c8c ++#define MGA_PLNWT 0x1c1c ++#define MGA_PRIMADDRESS 0x1e58 ++# define MGA_DMA_GENERAL (0 << 0) ++# define MGA_DMA_BLIT (1 << 0) ++# define MGA_DMA_VECTOR (2 << 0) ++# define MGA_DMA_VERTEX (3 << 0) ++#define MGA_PRIMEND 0x1e5c ++# define MGA_PRIMNOSTART (1 << 0) ++# define MGA_PAGPXFER (1 << 1) ++#define MGA_PRIMPTR 0x1e50 ++# define MGA_PRIMPTREN0 (1 << 0) ++# define MGA_PRIMPTREN1 (1 << 1) ++ ++#define MGA_RST 0x1e40 ++# define MGA_SOFTRESET (1 << 0) ++# define MGA_SOFTEXTRST (1 << 1) ++ ++#define MGA_SECADDRESS 0x2c40 ++#define MGA_SECEND 0x2c44 ++#define MGA_SETUPADDRESS 0x2cd0 ++#define MGA_SETUPEND 0x2cd4 ++#define MGA_SGN 0x1c58 ++#define MGA_SOFTRAP 0x2c48 ++#define MGA_SRCORG 0x2cb4 ++# define MGA_SRMMAP_MASK (1 << 0) ++# define MGA_SRCMAP_FB (0 << 0) ++# define MGA_SRCMAP_SYSMEM (1 << 0) ++# define MGA_SRCACC_MASK (1 << 1) ++# define MGA_SRCACC_PCI (0 << 1) ++# define MGA_SRCACC_AGP (1 << 1) ++#define MGA_STATUS 0x1e14 ++# define MGA_SOFTRAPEN (1 << 0) ++# define MGA_VSYNCPEN (1 << 4) ++# define MGA_VLINEPEN (1 << 5) ++# define MGA_DWGENGSTS (1 << 16) ++# define MGA_ENDPRDMASTS (1 << 17) ++#define MGA_STENCIL 0x2cc8 ++#define MGA_STENCILCTL 0x2ccc ++ ++#define MGA_TDUALSTAGE0 0x2cf8 ++#define MGA_TDUALSTAGE1 0x2cfc ++#define MGA_TEXBORDERCOL 0x2c5c ++#define MGA_TEXCTL 0x2c30 ++#define MGA_TEXCTL2 0x2c3c ++# define MGA_DUALTEX (1 << 7) ++# define MGA_G400_TC2_MAGIC (1 << 15) ++# define MGA_MAP1_ENABLE (1 << 31) ++#define MGA_TEXFILTER 0x2c58 ++#define MGA_TEXHEIGHT 0x2c2c ++#define MGA_TEXORG 0x2c24 ++# define MGA_TEXORGMAP_MASK (1 << 0) ++# define MGA_TEXORGMAP_FB (0 << 0) ++# define MGA_TEXORGMAP_SYSMEM (1 << 0) ++# define MGA_TEXORGACC_MASK (1 << 1) ++# define MGA_TEXORGACC_PCI (0 << 1) ++# define MGA_TEXORGACC_AGP (1 << 1) ++#define MGA_TEXORG1 0x2ca4 ++#define MGA_TEXORG2 0x2ca8 ++#define MGA_TEXORG3 0x2cac ++#define MGA_TEXORG4 0x2cb0 ++#define MGA_TEXTRANS 0x2c34 ++#define MGA_TEXTRANSHIGH 0x2c38 ++#define MGA_TEXWIDTH 0x2c28 ++ ++#define MGA_WACCEPTSEQ 0x1dd4 ++#define MGA_WCODEADDR 0x1e6c ++#define MGA_WFLAG 0x1dc4 ++#define MGA_WFLAG1 0x1de0 ++#define MGA_WFLAGNB 0x1e64 ++#define MGA_WFLAGNB1 0x1e08 ++#define MGA_WGETMSB 0x1dc8 ++#define MGA_WIADDR 0x1dc0 ++#define MGA_WIADDR2 0x1dd8 ++# define MGA_WMODE_SUSPEND (0 << 0) ++# define MGA_WMODE_RESUME (1 << 0) ++# define MGA_WMODE_JUMP (2 << 0) ++# define MGA_WMODE_START (3 << 0) ++# define MGA_WAGP_ENABLE (1 << 2) ++#define MGA_WMISC 0x1e70 ++# define MGA_WUCODECACHE_ENABLE (1 << 0) ++# define MGA_WMASTER_ENABLE (1 << 1) ++# define MGA_WCACHEFLUSH_ENABLE (1 << 3) ++#define MGA_WVRTXSZ 0x1dcc ++ ++#define MGA_YBOT 0x1c9c ++#define MGA_YDST 0x1c90 ++#define MGA_YDSTLEN 0x1c88 ++#define MGA_YDSTORG 0x1c94 ++#define MGA_YTOP 0x1c98 ++ ++#define MGA_ZORG 0x1c0c ++ ++/* This finishes the current batch of commands ++ */ ++#define MGA_EXEC 0x0100 ++ ++/* AGP PLL encoding (for G200 only). ++ */ ++#define MGA_AGP_PLL 0x1e4c ++# define MGA_AGP2XPLL_DISABLE (0 << 0) ++# define MGA_AGP2XPLL_ENABLE (1 << 0) ++ ++/* Warp registers ++ */ ++#define MGA_WR0 0x2d00 ++#define MGA_WR1 0x2d04 ++#define MGA_WR2 0x2d08 ++#define MGA_WR3 0x2d0c ++#define MGA_WR4 0x2d10 ++#define MGA_WR5 0x2d14 ++#define MGA_WR6 0x2d18 ++#define MGA_WR7 0x2d1c ++#define MGA_WR8 0x2d20 ++#define MGA_WR9 0x2d24 ++#define MGA_WR10 0x2d28 ++#define MGA_WR11 0x2d2c ++#define MGA_WR12 0x2d30 ++#define MGA_WR13 0x2d34 ++#define MGA_WR14 0x2d38 ++#define MGA_WR15 0x2d3c ++#define MGA_WR16 0x2d40 ++#define MGA_WR17 0x2d44 ++#define MGA_WR18 0x2d48 ++#define MGA_WR19 0x2d4c ++#define MGA_WR20 0x2d50 ++#define MGA_WR21 0x2d54 ++#define MGA_WR22 0x2d58 ++#define MGA_WR23 0x2d5c ++#define MGA_WR24 0x2d60 ++#define MGA_WR25 0x2d64 ++#define MGA_WR26 0x2d68 ++#define MGA_WR27 0x2d6c ++#define MGA_WR28 0x2d70 ++#define MGA_WR29 0x2d74 ++#define MGA_WR30 0x2d78 ++#define MGA_WR31 0x2d7c ++#define MGA_WR32 0x2d80 ++#define MGA_WR33 0x2d84 ++#define MGA_WR34 0x2d88 ++#define MGA_WR35 0x2d8c ++#define MGA_WR36 0x2d90 ++#define MGA_WR37 0x2d94 ++#define MGA_WR38 0x2d98 ++#define MGA_WR39 0x2d9c ++#define MGA_WR40 0x2da0 ++#define MGA_WR41 0x2da4 ++#define MGA_WR42 0x2da8 ++#define MGA_WR43 0x2dac ++#define MGA_WR44 0x2db0 ++#define MGA_WR45 0x2db4 ++#define MGA_WR46 0x2db8 ++#define MGA_WR47 0x2dbc ++#define MGA_WR48 0x2dc0 ++#define MGA_WR49 0x2dc4 ++#define MGA_WR50 0x2dc8 ++#define MGA_WR51 0x2dcc ++#define MGA_WR52 0x2dd0 ++#define MGA_WR53 0x2dd4 ++#define MGA_WR54 0x2dd8 ++#define MGA_WR55 0x2ddc ++#define MGA_WR56 0x2de0 ++#define MGA_WR57 0x2de4 ++#define MGA_WR58 0x2de8 ++#define MGA_WR59 0x2dec ++#define MGA_WR60 0x2df0 ++#define MGA_WR61 0x2df4 ++#define MGA_WR62 0x2df8 ++#define MGA_WR63 0x2dfc ++# define MGA_G400_WR_MAGIC (1 << 6) ++# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */ ++ ++#define MGA_ILOAD_ALIGN 64 ++#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1) ++ ++#define MGA_DWGCTL_FLUSH (MGA_OPCOD_TEXTURE_TRAP | \ ++ MGA_ATYPE_I | \ ++ MGA_ZMODE_NOZCMP | \ ++ MGA_ARZERO | \ ++ MGA_SGNZERO | \ ++ MGA_BOP_SRC | \ ++ (15 << MGA_TRANS_SHIFT)) ++ ++#define MGA_DWGCTL_CLEAR (MGA_OPCOD_TRAP | \ ++ MGA_ZMODE_NOZCMP | \ ++ MGA_SOLID | \ ++ MGA_ARZERO | \ ++ MGA_SGNZERO | \ ++ MGA_SHIFTZERO | \ ++ MGA_BOP_SRC | \ ++ (0 << MGA_TRANS_SHIFT) | \ ++ MGA_BLTMOD_BMONOLEF | \ ++ MGA_TRANSC | \ ++ MGA_CLIPDIS) ++ ++#define MGA_DWGCTL_COPY (MGA_OPCOD_BITBLT | \ ++ MGA_ATYPE_RPL | \ ++ MGA_SGNZERO | \ ++ MGA_SHIFTZERO | \ ++ MGA_BOP_SRC | \ ++ (0 << MGA_TRANS_SHIFT) | \ ++ MGA_BLTMOD_BFCOL | \ ++ MGA_CLIPDIS) ++ ++/* Simple idle test. ++ */ ++static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv) ++{ ++ u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; ++ return (status == MGA_ENDPRDMASTS); ++} ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_ioc32.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_ioc32.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_ioc32.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,234 @@ ++ ++/** ++ * \file mga_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the MGA DRM. ++ * ++ * \author Dave Airlie with code from patches by Egbert Eich ++ * ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Egbert Eich 2003,2004 ++ * Copyright (C) Dave Airlie 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++ ++typedef struct drm32_mga_init { ++ int func; ++ u32 sarea_priv_offset; ++ int chipset; ++ int sgram; ++ unsigned int maccess; ++ unsigned int fb_cpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_cpp; ++ unsigned int depth_offset, depth_pitch; ++ unsigned int texture_offset[MGA_NR_TEX_HEAPS]; ++ unsigned int texture_size[MGA_NR_TEX_HEAPS]; ++ u32 fb_offset; ++ u32 mmio_offset; ++ u32 status_offset; ++ u32 warp_offset; ++ u32 primary_offset; ++ u32 buffers_offset; ++} drm_mga_init32_t; ++ ++static int compat_mga_init(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_mga_init32_t init32; ++ drm_mga_init_t __user *init; ++ int err = 0, i; ++ ++ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) ++ return -EFAULT; ++ ++ init = compat_alloc_user_space(sizeof(*init)); ++ if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) ++ || __put_user(init32.func, &init->func) ++ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) ++ || __put_user(init32.chipset, &init->chipset) ++ || __put_user(init32.sgram, &init->sgram) ++ || __put_user(init32.maccess, &init->maccess) ++ || __put_user(init32.fb_cpp, &init->fb_cpp) ++ || __put_user(init32.front_offset, &init->front_offset) ++ || __put_user(init32.front_pitch, &init->front_pitch) ++ || __put_user(init32.back_offset, &init->back_offset) ++ || __put_user(init32.back_pitch, &init->back_pitch) ++ || __put_user(init32.depth_cpp, &init->depth_cpp) ++ || __put_user(init32.depth_offset, &init->depth_offset) ++ || __put_user(init32.depth_pitch, &init->depth_pitch) ++ || __put_user(init32.fb_offset, &init->fb_offset) ++ || __put_user(init32.mmio_offset, &init->mmio_offset) ++ || __put_user(init32.status_offset, &init->status_offset) ++ || __put_user(init32.warp_offset, &init->warp_offset) ++ || __put_user(init32.primary_offset, &init->primary_offset) ++ || __put_user(init32.buffers_offset, &init->buffers_offset)) ++ return -EFAULT; ++ ++ for (i=0; itexture_offset[i]); ++ err |= __put_user(init32.texture_size[i], &init->texture_size[i]); ++ } ++ if (err) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MGA_INIT, (unsigned long) init); ++} ++ ++ ++typedef struct drm_mga_getparam32 { ++ int param; ++ u32 value; ++} drm_mga_getparam32_t; ++ ++ ++static int compat_mga_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_mga_getparam32_t getparam32; ++ drm_mga_getparam_t __user *getparam; ++ ++ if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32))) ++ return -EFAULT; ++ ++ getparam = compat_alloc_user_space(sizeof(*getparam)); ++ if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) ++ || __put_user(getparam32.param, &getparam->param) ++ || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); ++} ++ ++typedef struct drm_mga_drm_bootstrap32 { ++ u32 texture_handle; ++ u32 texture_size; ++ u32 primary_size; ++ u32 secondary_bin_count; ++ u32 secondary_bin_size; ++ u32 agp_mode; ++ u8 agp_size; ++} drm_mga_dma_bootstrap32_t; ++ ++static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_mga_dma_bootstrap32_t dma_bootstrap32; ++ drm_mga_dma_bootstrap_t __user *dma_bootstrap; ++ int err; ++ ++ if (copy_from_user(&dma_bootstrap32, (void __user *)arg, ++ sizeof(dma_bootstrap32))) ++ return -EFAULT; ++ ++ dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap)); ++ if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap)) ++ || __put_user(dma_bootstrap32.texture_handle, ++ &dma_bootstrap->texture_handle) ++ || __put_user(dma_bootstrap32.texture_size, ++ &dma_bootstrap->texture_size) ++ || __put_user(dma_bootstrap32.primary_size, ++ &dma_bootstrap->primary_size) ++ || __put_user(dma_bootstrap32.secondary_bin_count, ++ &dma_bootstrap->secondary_bin_count) ++ || __put_user(dma_bootstrap32.secondary_bin_size, ++ &dma_bootstrap->secondary_bin_size) ++ || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode) ++ || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MGA_DMA_BOOTSTRAP, ++ (unsigned long)dma_bootstrap); ++ if (err) ++ return err; ++ ++ if (__get_user(dma_bootstrap32.texture_handle, ++ &dma_bootstrap->texture_handle) ++ || __get_user(dma_bootstrap32.texture_size, ++ &dma_bootstrap->texture_size) ++ || __get_user(dma_bootstrap32.primary_size, ++ &dma_bootstrap->primary_size) ++ || __get_user(dma_bootstrap32.secondary_bin_count, ++ &dma_bootstrap->secondary_bin_count) ++ || __get_user(dma_bootstrap32.secondary_bin_size, ++ &dma_bootstrap->secondary_bin_size) ++ || __get_user(dma_bootstrap32.agp_mode, ++ &dma_bootstrap->agp_mode) ++ || __get_user(dma_bootstrap32.agp_size, ++ &dma_bootstrap->agp_size)) ++ return -EFAULT; ++ ++ if (copy_to_user((void __user *)arg, &dma_bootstrap32, ++ sizeof(dma_bootstrap32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++drm_ioctl_compat_t *mga_compat_ioctls[] = { ++ [DRM_MGA_INIT] = compat_mga_init, ++ [DRM_MGA_GETPARAM] = compat_mga_getparam, ++ [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long mga_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) ++ fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_irq.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_irq.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,182 @@ ++/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- ++ */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Eric Anholt ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) ++{ ++ const drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ ++ if (crtc != 0) { ++ return 0; ++ } ++ ++ ++ return atomic_read(&dev_priv->vbl_received); ++} ++ ++ ++irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ int status; ++ int handled = 0; ++ ++ status = MGA_READ(MGA_STATUS); ++ ++ /* VBLANK interrupt */ ++ if (status & MGA_VLINEPEN) { ++ MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); ++ atomic_inc(&dev_priv->vbl_received); ++ drm_handle_vblank(dev, 0); ++ handled = 1; ++ } ++ ++ /* SOFTRAP interrupt */ ++ if (status & MGA_SOFTRAPEN) { ++ const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); ++ const u32 prim_end = MGA_READ(MGA_PRIMEND); ++ ++ ++ MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); ++ ++ /* In addition to clearing the interrupt-pending bit, we ++ * have to write to MGA_PRIMEND to re-start the DMA operation. ++ */ ++ if ((prim_start & ~0x03) != (prim_end & ~0x03)) { ++ MGA_WRITE(MGA_PRIMEND, prim_end); ++ } ++ ++ atomic_inc(&dev_priv->last_fence_retired); ++ DRM_WAKEUP(&dev_priv->fence_queue); ++ handled = 1; ++ } ++ ++ if (handled) ++ return IRQ_HANDLED; ++ return IRQ_NONE; ++} ++ ++int mga_enable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ ++ if (crtc != 0) { ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return 0; ++ } ++ ++ MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); ++ return 0; ++} ++ ++ ++void mga_disable_vblank(struct drm_device *dev, int crtc) ++{ ++ if (crtc != 0) { ++ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", ++ crtc); ++ } ++ ++ /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have ++ * a nice hardware counter that tracks the number of refreshes when ++ * the interrupt is disabled, and the kernel doesn't know the refresh ++ * rate to calculate an estimate. ++ */ ++ /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ ++} ++ ++int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ unsigned int cur_fence; ++ int ret = 0; ++ ++ /* Assume that the user has missed the current sequence number ++ * by about a day rather than she wants to wait for years ++ * using fences. ++ */ ++ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, ++ (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) ++ - *sequence) <= (1 << 23))); ++ ++ *sequence = cur_fence; ++ ++ return ret; ++} ++ ++void mga_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ ++ /* Disable *all* interrupts */ ++ MGA_WRITE(MGA_IEN, 0); ++ /* Clear bits if they're already high */ ++ MGA_WRITE(MGA_ICLEAR, ~0); ++} ++ ++int mga_driver_irq_postinstall(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ int ret; ++ ++ ret = drm_vblank_init(dev, 1); ++ if (ret) ++ return ret; ++ ++ DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); ++ ++ /* Turn on soft trap interrupt. Vertical blank interrupts are enabled ++ * in mga_enable_vblank. ++ */ ++ MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN); ++ return 0; ++} ++ ++void mga_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ /* Disable *all* interrupts */ ++ MGA_WRITE(MGA_IEN, 0); ++ ++ dev->irq_enabled = 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_state.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_state.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_state.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1139 @@ ++/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*- ++ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com ++ */ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Jeff Hartmann ++ * Keith Whitwell ++ * ++ * Rewritten by: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++/* ================================================================ ++ * DMA hardware state programming functions ++ */ ++ ++static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, ++ struct drm_clip_rect * box) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ unsigned int pitch = dev_priv->front_pitch; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(2); ++ ++ /* Force reset of DWGCTL on G400 (eliminates clip disable bit). ++ */ ++ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { ++ DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl, ++ MGA_LEN + MGA_EXEC, 0x80000000, ++ MGA_DWGCTL, ctx->dwgctl, ++ MGA_LEN + MGA_EXEC, 0x80000000); ++ } ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, ++ MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(3); ++ ++ DMA_BLOCK(MGA_DSTORG, ctx->dstorg, ++ MGA_MACCESS, ctx->maccess, ++ MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); ++ ++ DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, ++ MGA_FOGCOL, ctx->fogcolor, ++ MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); ++ ++ DMA_BLOCK(MGA_FCOL, ctx->fcol, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(4); ++ ++ DMA_BLOCK(MGA_DSTORG, ctx->dstorg, ++ MGA_MACCESS, ctx->maccess, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, ++ MGA_FOGCOL, ctx->fogcolor, ++ MGA_WFLAG, ctx->wflag, ++ MGA_ZORG, dev_priv->depth_offset); ++ ++ DMA_BLOCK(MGA_WFLAG1, ctx->wflag, ++ MGA_TDUALSTAGE0, ctx->tdualstage0, ++ MGA_TDUALSTAGE1, ctx->tdualstage1, ++ MGA_FCOL, ctx->fcol); ++ ++ DMA_BLOCK(MGA_STENCIL, ctx->stencil, ++ MGA_STENCILCTL, ctx->stencilctl, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(4); ++ ++ DMA_BLOCK(MGA_TEXCTL2, tex->texctl2, ++ MGA_TEXCTL, tex->texctl, ++ MGA_TEXFILTER, tex->texfilter, ++ MGA_TEXBORDERCOL, tex->texbordercol); ++ ++ DMA_BLOCK(MGA_TEXORG, tex->texorg, ++ MGA_TEXORG1, tex->texorg1, ++ MGA_TEXORG2, tex->texorg2, ++ MGA_TEXORG3, tex->texorg3); ++ ++ DMA_BLOCK(MGA_TEXORG4, tex->texorg4, ++ MGA_TEXWIDTH, tex->texwidth, ++ MGA_TEXHEIGHT, tex->texheight, ++ MGA_WR24, tex->texwidth); ++ ++ DMA_BLOCK(MGA_WR34, tex->texheight, ++ MGA_TEXTRANS, 0x0000ffff, ++ MGA_TEXTRANSHIGH, 0x0000ffff, ++ MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; ++ DMA_LOCALS; ++ ++/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ ++/* tex->texctl, tex->texctl2); */ ++ ++ BEGIN_DMA(6); ++ ++ DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC, ++ MGA_TEXCTL, tex->texctl, ++ MGA_TEXFILTER, tex->texfilter, ++ MGA_TEXBORDERCOL, tex->texbordercol); ++ ++ DMA_BLOCK(MGA_TEXORG, tex->texorg, ++ MGA_TEXORG1, tex->texorg1, ++ MGA_TEXORG2, tex->texorg2, ++ MGA_TEXORG3, tex->texorg3); ++ ++ DMA_BLOCK(MGA_TEXORG4, tex->texorg4, ++ MGA_TEXWIDTH, tex->texwidth, ++ MGA_TEXHEIGHT, tex->texheight, ++ MGA_WR49, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR57, 0x00000000, ++ MGA_WR53, 0x00000000, ++ MGA_WR61, 0x00000000, ++ MGA_WR52, MGA_G400_WR_MAGIC); ++ ++ DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC, ++ MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC, ++ MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_TEXTRANS, 0x0000ffff, ++ MGA_TEXTRANSHIGH, 0x0000ffff); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; ++ DMA_LOCALS; ++ ++/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */ ++/* tex->texctl, tex->texctl2); */ ++ ++ BEGIN_DMA(5); ++ ++ DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 | ++ MGA_MAP1_ENABLE | ++ MGA_G400_TC2_MAGIC), ++ MGA_TEXCTL, tex->texctl, ++ MGA_TEXFILTER, tex->texfilter, ++ MGA_TEXBORDERCOL, tex->texbordercol); ++ ++ DMA_BLOCK(MGA_TEXORG, tex->texorg, ++ MGA_TEXORG1, tex->texorg1, ++ MGA_TEXORG2, tex->texorg2, ++ MGA_TEXORG3, tex->texorg3); ++ ++ DMA_BLOCK(MGA_TEXORG4, tex->texorg4, ++ MGA_TEXWIDTH, tex->texwidth, ++ MGA_TEXHEIGHT, tex->texheight, ++ MGA_WR49, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR57, 0x00000000, ++ MGA_WR53, 0x00000000, ++ MGA_WR61, 0x00000000, ++ MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC); ++ ++ DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC, ++ MGA_TEXTRANS, 0x0000ffff, ++ MGA_TEXTRANSHIGH, 0x0000ffff, ++ MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int pipe = sarea_priv->warp_pipe; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(3); ++ ++ DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND, ++ MGA_WVRTXSZ, 0x00000007, ++ MGA_WFLAG, 0x00000000, ++ MGA_WR24, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR25, 0x00000100, ++ MGA_WR34, 0x00000000, ++ MGA_WR42, 0x0000ffff, ++ MGA_WR60, 0x0000ffff); ++ ++ /* Padding required to to hardware bug. ++ */ ++ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | ++ MGA_WMODE_START | dev_priv->wagp_enable)); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int pipe = sarea_priv->warp_pipe; ++ DMA_LOCALS; ++ ++/* printk("mga_g400_emit_pipe %x\n", pipe); */ ++ ++ BEGIN_DMA(10); ++ ++ DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ if (pipe & MGA_T2) { ++ DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x1e000000); ++ } else { ++ if (dev_priv->warp_pipe & MGA_T2) { ++ /* Flush the WARP pipe */ ++ DMA_BLOCK(MGA_YDST, 0x00000000, ++ MGA_FXLEFT, 0x00000000, ++ MGA_FXRIGHT, 0x00000001, ++ MGA_DWGCTL, MGA_DWGCTL_FLUSH); ++ ++ DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001, ++ MGA_DWGSYNC, 0x00007000, ++ MGA_TEXCTL2, MGA_G400_TC2_MAGIC, ++ MGA_LEN + MGA_EXEC, 0x00000000); ++ ++ DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX | ++ MGA_G400_TC2_MAGIC), ++ MGA_LEN + MGA_EXEC, 0x00000000, ++ MGA_TEXCTL2, MGA_G400_TC2_MAGIC, ++ MGA_DMAPAD, 0x00000000); ++ } ++ ++ DMA_BLOCK(MGA_WVRTXSZ, 0x00001807, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x18000000); ++ } ++ ++ DMA_BLOCK(MGA_WFLAG, 0x00000000, ++ MGA_WFLAG1, 0x00000000, ++ MGA_WR56, MGA_G400_WR56_MAGIC, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */ ++ MGA_WR57, 0x00000000, /* tex0 */ ++ MGA_WR53, 0x00000000, /* tex1 */ ++ MGA_WR61, 0x00000000); /* tex1 */ ++ ++ DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */ ++ MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */ ++ MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */ ++ MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */ ++ ++ /* Padding required to to hardware bug */ ++ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | ++ MGA_WMODE_START | dev_priv->wagp_enable)); ++ ++ ADVANCE_DMA(); ++} ++ ++static void mga_g200_emit_state(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ ++ if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { ++ mga_g200_emit_pipe(dev_priv); ++ dev_priv->warp_pipe = sarea_priv->warp_pipe; ++ } ++ ++ if (dirty & MGA_UPLOAD_CONTEXT) { ++ mga_g200_emit_context(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; ++ } ++ ++ if (dirty & MGA_UPLOAD_TEX0) { ++ mga_g200_emit_tex0(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; ++ } ++} ++ ++static void mga_g400_emit_state(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ int multitex = sarea_priv->warp_pipe & MGA_T2; ++ ++ if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { ++ mga_g400_emit_pipe(dev_priv); ++ dev_priv->warp_pipe = sarea_priv->warp_pipe; ++ } ++ ++ if (dirty & MGA_UPLOAD_CONTEXT) { ++ mga_g400_emit_context(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; ++ } ++ ++ if (dirty & MGA_UPLOAD_TEX0) { ++ mga_g400_emit_tex0(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; ++ } ++ ++ if ((dirty & MGA_UPLOAD_TEX1) && multitex) { ++ mga_g400_emit_tex1(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_TEX1; ++ } ++} ++ ++/* ================================================================ ++ * SAREA state verification ++ */ ++ ++/* Disallow all write destinations except the front and backbuffer. ++ */ ++static int mga_verify_context(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ ++ if (ctx->dstorg != dev_priv->front_offset && ++ ctx->dstorg != dev_priv->back_offset) { ++ DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n", ++ ctx->dstorg, dev_priv->front_offset, ++ dev_priv->back_offset); ++ ctx->dstorg = 0; ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* Disallow texture reads from PCI space. ++ */ ++static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit]; ++ unsigned int org; ++ ++ org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK); ++ ++ if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { ++ DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); ++ tex->texorg = 0; ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int mga_verify_state(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ int ret = 0; ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ if (dirty & MGA_UPLOAD_CONTEXT) ++ ret |= mga_verify_context(dev_priv); ++ ++ if (dirty & MGA_UPLOAD_TEX0) ++ ret |= mga_verify_tex(dev_priv, 0); ++ ++ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { ++ if (dirty & MGA_UPLOAD_TEX1) ++ ret |= mga_verify_tex(dev_priv, 1); ++ ++ if (dirty & MGA_UPLOAD_PIPE) ++ ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES); ++ } else { ++ if (dirty & MGA_UPLOAD_PIPE) ++ ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES); ++ } ++ ++ return (ret == 0); ++} ++ ++static int mga_verify_iload(drm_mga_private_t * dev_priv, ++ unsigned int dstorg, unsigned int length) ++{ ++ if (dstorg < dev_priv->texture_offset || ++ dstorg + length > (dev_priv->texture_offset + ++ dev_priv->texture_size)) { ++ DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); ++ return -EINVAL; ++ } ++ ++ if (length & MGA_ILOAD_MASK) { ++ DRM_ERROR("*** bad iload length: 0x%x\n", ++ length & MGA_ILOAD_MASK); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int mga_verify_blit(drm_mga_private_t * dev_priv, ++ unsigned int srcorg, unsigned int dstorg) ++{ ++ if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || ++ (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { ++ DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/* ================================================================ ++ * ++ */ ++ ++static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ int i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ ADVANCE_DMA(); ++ ++ for (i = 0; i < nbox; i++) { ++ struct drm_clip_rect *box = &pbox[i]; ++ u32 height = box->y2 - box->y1; ++ ++ DRM_DEBUG(" from=%d,%d to=%d,%d\n", ++ box->x1, box->y1, box->x2, box->y2); ++ ++ if (clear->flags & MGA_FRONT) { ++ BEGIN_DMA(2); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, clear->color_mask, ++ MGA_YDSTLEN, (box->y1 << 16) | height, ++ MGA_FXBNDRY, (box->x2 << 16) | box->x1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_FCOL, clear->clear_color, ++ MGA_DSTORG, dev_priv->front_offset, ++ MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ++ ++ ADVANCE_DMA(); ++ } ++ ++ if (clear->flags & MGA_BACK) { ++ BEGIN_DMA(2); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, clear->color_mask, ++ MGA_YDSTLEN, (box->y1 << 16) | height, ++ MGA_FXBNDRY, (box->x2 << 16) | box->x1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_FCOL, clear->clear_color, ++ MGA_DSTORG, dev_priv->back_offset, ++ MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ++ ++ ADVANCE_DMA(); ++ } ++ ++ if (clear->flags & MGA_DEPTH) { ++ BEGIN_DMA(2); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, clear->depth_mask, ++ MGA_YDSTLEN, (box->y1 << 16) | height, ++ MGA_FXBNDRY, (box->x2 << 16) | box->x1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_FCOL, clear->clear_depth, ++ MGA_DSTORG, dev_priv->depth_offset, ++ MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ++ ++ ADVANCE_DMA(); ++ } ++ ++ } ++ ++ BEGIN_DMA(1); ++ ++ /* Force reset of DWGCTL */ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ ADVANCE_DMA(); ++ ++ FLUSH_DMA(); ++} ++ ++static void mga_dma_dispatch_swap(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ int i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ sarea_priv->last_frame.head = dev_priv->prim.tail; ++ sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap; ++ ++ BEGIN_DMA(4 + nbox); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset, ++ MGA_MACCESS, dev_priv->maccess, ++ MGA_SRCORG, dev_priv->back_offset, ++ MGA_AR5, dev_priv->front_pitch); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, 0xffffffff, ++ MGA_DWGCTL, MGA_DWGCTL_COPY); ++ ++ for (i = 0; i < nbox; i++) { ++ struct drm_clip_rect *box = &pbox[i]; ++ u32 height = box->y2 - box->y1; ++ u32 start = box->y1 * dev_priv->front_pitch; ++ ++ DRM_DEBUG(" from=%d,%d to=%d,%d\n", ++ box->x1, box->y1, box->x2, box->y2); ++ ++ DMA_BLOCK(MGA_AR0, start + box->x2 - 1, ++ MGA_AR3, start + box->x1, ++ MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1, ++ MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height); ++ } ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_SRCORG, dev_priv->front_offset, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ ADVANCE_DMA(); ++ ++ FLUSH_DMA(); ++ ++ DRM_DEBUG("... done.\n"); ++} ++ ++static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 address = (u32) buf->bus_address; ++ u32 length = (u32) buf->used; ++ int i = 0; ++ DMA_LOCALS; ++ DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); ++ ++ if (buf->used) { ++ buf_priv->dispatched = 1; ++ ++ MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); ++ ++ do { ++ if (i < sarea_priv->nbox) { ++ mga_emit_clip_rect(dev_priv, ++ &sarea_priv->boxes[i]); ++ } ++ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_SECADDRESS, (address | ++ MGA_DMA_VERTEX), ++ MGA_SECEND, ((address + length) | ++ dev_priv->dma_access)); ++ ++ ADVANCE_DMA(); ++ } while (++i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ AGE_BUFFER(buf_priv); ++ buf->pending = 0; ++ buf->used = 0; ++ buf_priv->dispatched = 0; ++ ++ mga_freelist_put(dev, buf); ++ } ++ ++ FLUSH_DMA(); ++} ++ ++static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf, ++ unsigned int start, unsigned int end) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 address = (u32) buf->bus_address; ++ int i = 0; ++ DMA_LOCALS; ++ DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end); ++ ++ if (start != end) { ++ buf_priv->dispatched = 1; ++ ++ MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); ++ ++ do { ++ if (i < sarea_priv->nbox) { ++ mga_emit_clip_rect(dev_priv, ++ &sarea_priv->boxes[i]); ++ } ++ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_SETUPADDRESS, address + start, ++ MGA_SETUPEND, ((address + end) | ++ dev_priv->dma_access)); ++ ++ ADVANCE_DMA(); ++ } while (++i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ AGE_BUFFER(buf_priv); ++ buf->pending = 0; ++ buf->used = 0; ++ buf_priv->dispatched = 0; ++ ++ mga_freelist_put(dev, buf); ++ } ++ ++ FLUSH_DMA(); ++} ++ ++/* This copies a 64 byte aligned agp region to the frambuffer with a ++ * standard blit, the ioctl needs to do checking. ++ */ ++static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf, ++ unsigned int dstorg, unsigned int length) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; ++ u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM; ++ u32 y2; ++ DMA_LOCALS; ++ DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); ++ ++ y2 = length / 64; ++ ++ BEGIN_DMA(5); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ DMA_BLOCK(MGA_DSTORG, dstorg, ++ MGA_MACCESS, 0x00000000, ++ MGA_SRCORG, srcorg, ++ MGA_AR5, 64); ++ ++ DMA_BLOCK(MGA_PITCH, 64, ++ MGA_PLNWT, 0xffffffff, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGCTL, MGA_DWGCTL_COPY); ++ ++ DMA_BLOCK(MGA_AR0, 63, ++ MGA_AR3, 0, ++ MGA_FXBNDRY, (63 << 16) | 0, ++ MGA_YDSTLEN + MGA_EXEC, y2); ++ ++ DMA_BLOCK(MGA_PLNWT, ctx->plnwt, ++ MGA_SRCORG, dev_priv->front_offset, ++ MGA_PITCH, dev_priv->front_pitch, ++ MGA_DWGSYNC, 0x00007000); ++ ++ ADVANCE_DMA(); ++ ++ AGE_BUFFER(buf_priv); ++ ++ buf->pending = 0; ++ buf->used = 0; ++ buf_priv->dispatched = 0; ++ ++ mga_freelist_put(dev, buf); ++ ++ FLUSH_DMA(); ++} ++ ++static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ u32 scandir = 0, i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_DMA(4 + nbox); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY, ++ MGA_PLNWT, blit->planemask, ++ MGA_SRCORG, blit->srcorg, ++ MGA_DSTORG, blit->dstorg); ++ ++ DMA_BLOCK(MGA_SGN, scandir, ++ MGA_MACCESS, dev_priv->maccess, ++ MGA_AR5, blit->ydir * blit->src_pitch, ++ MGA_PITCH, blit->dst_pitch); ++ ++ for (i = 0; i < nbox; i++) { ++ int srcx = pbox[i].x1 + blit->delta_sx; ++ int srcy = pbox[i].y1 + blit->delta_sy; ++ int dstx = pbox[i].x1 + blit->delta_dx; ++ int dsty = pbox[i].y1 + blit->delta_dy; ++ int h = pbox[i].y2 - pbox[i].y1; ++ int w = pbox[i].x2 - pbox[i].x1 - 1; ++ int start; ++ ++ if (blit->ydir == -1) { ++ srcy = blit->height - srcy - 1; ++ } ++ ++ start = srcy * blit->src_pitch + srcx; ++ ++ DMA_BLOCK(MGA_AR0, start + w, ++ MGA_AR3, start, ++ MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff), ++ MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h); ++ } ++ ++ /* Do something to flush AGP? ++ */ ++ ++ /* Force reset of DWGCTL */ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_PITCH, dev_priv->front_pitch, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ ADVANCE_DMA(); ++} ++ ++/* ================================================================ ++ * ++ */ ++ ++static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_clear_t *clear = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_clear(dev, clear); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_swap(dev); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (vertex->idx < 0 || vertex->idx > dma->buf_count) ++ return -EINVAL; ++ buf = dma->buflist[vertex->idx]; ++ buf_priv = buf->dev_private; ++ ++ buf->used = vertex->used; ++ buf_priv->discard = vertex->discard; ++ ++ if (!mga_verify_state(dev_priv)) { ++ if (vertex->discard) { ++ if (buf_priv->dispatched == 1) ++ AGE_BUFFER(buf_priv); ++ buf_priv->dispatched = 0; ++ mga_freelist_put(dev, buf); ++ } ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_vertex(dev, buf); ++ ++ return 0; ++} ++ ++static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_indices_t *indices = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (indices->idx < 0 || indices->idx > dma->buf_count) ++ return -EINVAL; ++ ++ buf = dma->buflist[indices->idx]; ++ buf_priv = buf->dev_private; ++ ++ buf_priv->discard = indices->discard; ++ ++ if (!mga_verify_state(dev_priv)) { ++ if (indices->discard) { ++ if (buf_priv->dispatched == 1) ++ AGE_BUFFER(buf_priv); ++ buf_priv->dispatched = 0; ++ mga_freelist_put(dev, buf); ++ } ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_indices(dev, buf, indices->start, indices->end); ++ ++ return 0; ++} ++ ++static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_iload_t *iload = data; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++#if 0 ++ if (mga_do_wait_for_idle(dev_priv) < 0) { ++ if (MGA_DMA_DEBUG) ++ DRM_INFO("-EBUSY\n"); ++ return -EBUSY; ++ } ++#endif ++ if (iload->idx < 0 || iload->idx > dma->buf_count) ++ return -EINVAL; ++ ++ buf = dma->buflist[iload->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { ++ mga_freelist_put(dev, buf); ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_blit_t *blit = data; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg)) ++ return -EINVAL; ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_blit(dev, blit); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ switch (param->param) { ++ case MGA_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ case MGA_PARAM_CARD_TYPE: ++ value = dev_priv->chipset; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ u32 *fence = data; ++ DMA_LOCALS; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ /* I would normal do this assignment in the declaration of fence, ++ * but dev_priv may be NULL. ++ */ ++ ++ *fence = dev_priv->next_fence_to_post; ++ dev_priv->next_fence_to_post++; ++ ++ BEGIN_DMA(1); ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_SOFTRAP, 0x00000000); ++ ADVANCE_DMA(); ++ ++ return 0; ++} ++ ++static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ u32 *fence = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ mga_driver_fence_wait(dev, fence); ++ ++ return 0; ++} ++ ++struct drm_ioctl_desc mga_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++}; ++ ++int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_ucode.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_ucode.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_ucode.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_ucode.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,11645 @@ ++/* mga_ucode.h -- Matrox G200/G400 WARP engine microcode -*- linux-c -*- ++ * Created: Thu Jan 11 21:20:43 2001 by gareth@valinux.com ++ * ++ * Copyright 1999 Matrox Graphics Inc. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included ++ * in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE ++ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Kernel-based WARP engine management: ++ * Gareth Hughes ++ */ ++ ++/* ++ * WARP pipes are named according to the functions they perform, where: ++ * ++ * - T stands for computation of texture stage 0 ++ * - T2 stands for computation of both texture stage 0 and texture stage 1 ++ * - G stands for computation of triangle intensity (Gouraud interpolation) ++ * - Z stands for computation of Z buffer interpolation ++ * - S stands for computation of specular highlight ++ * - A stands for computation of the alpha channel ++ * - F stands for computation of vertex fog interpolation ++ */ ++ ++static unsigned char warp_g200_tgz[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x72, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x60, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x03, 0x80, 0x0A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x57, 0x39, 0x20, 0xE9, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x2B, 0x32, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x16, 0x28, 0x20, 0xE9, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x2B, 0x20, 0xE9, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x85, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x84, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x82, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x7F, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgza[] = { ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x7D, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x6B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x44, 0x4C, 0xB6, ++ 0x25, 0x44, 0x54, 0xB6, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x07, 0xC0, 0x44, 0xC6, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1F, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x3F, 0x3D, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x07, 0x20, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0x26, 0x1F, 0xDF, ++ 0x9D, 0x1F, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x9E, 0x3F, 0x4F, 0xE9, ++ ++ 0x07, 0x07, 0x1F, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x9C, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x7A, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x79, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x77, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x74, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzaf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x83, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x6F, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x2D, 0x44, 0x4C, 0xB6, ++ 0x25, 0x44, 0x54, 0xB6, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x07, 0xC0, 0x44, 0xC6, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x2D, 0x20, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x1F, 0x62, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x07, 0x20, ++ ++ 0x3F, 0x3D, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x35, 0x17, 0x4F, 0xE9, ++ ++ 0x1F, 0x26, 0x1F, 0xDF, ++ 0x9D, 0x1F, 0x4F, 0xE9, ++ ++ 0x9E, 0x3F, 0x4F, 0xE9, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x07, 0x07, 0x1F, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x9C, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x74, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x73, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x71, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6E, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x7F, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x6B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0x35, 0x17, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x78, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x77, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x75, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x72, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzs[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x8B, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x77, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x8F, 0x20, ++ ++ 0xA5, 0x37, 0x4F, 0xE9, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0xA3, 0x80, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x6C, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6B, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x69, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzsa[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x8F, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x7B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x0D, 0x44, 0x4C, 0xB6, ++ 0x05, 0x44, 0x54, 0xB6, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x0F, 0x20, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0xA5, 0x37, 0x4F, 0xE9, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2F, 0xC0, 0x44, 0xC6, ++ 0xA3, 0x80, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0x9D, 0x17, 0x4F, 0xE9, ++ ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x9E, 0x37, 0x4F, 0xE9, ++ 0x2F, 0x17, 0x2F, 0xAF, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x9C, 0x80, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x68, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x67, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x65, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x62, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzsaf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x94, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x80, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0x2D, 0x44, 0x4C, 0xB6, ++ 0x25, 0x44, 0x54, 0xB6, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x0F, 0x20, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x07, 0xC0, 0x44, 0xC6, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0xA5, 0x37, 0x4F, 0xE9, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x3E, 0x3D, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x07, 0x20, ++ ++ 0x2F, 0x20, ++ 0x00, 0xE0, ++ 0xA3, 0x0F, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ ++ 0x1E, 0x26, 0x1E, 0xDF, ++ 0x9D, 0x1E, 0x4F, 0xE9, ++ ++ 0x35, 0x17, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x07, 0x07, 0x1E, 0xAF, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x9E, 0x3E, 0x4F, 0xE9, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x9C, 0x80, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x63, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x62, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x60, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x5D, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzsf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x8F, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x7B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x0F, 0x20, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0xA5, 0x37, 0x4F, 0xE9, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2F, 0x20, ++ 0x00, 0xE0, ++ 0xA3, 0x80, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0x35, 0x17, 0x4F, 0xE9, ++ ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x68, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x67, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x65, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x62, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g400_t2gz[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x78, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x69, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x25, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x9F, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xBE, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x7D, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gza[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x7C, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x6D, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x29, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x74, 0xC6, ++ 0x3D, 0xCF, 0x74, 0xC2, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB4, ++ 0x02, 0x44, 0x64, 0xB4, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x9B, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xBA, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x79, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzaf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x81, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x72, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x2E, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x0F, 0xCF, 0x74, 0xC6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB4, ++ 0x02, 0x44, 0x64, 0xB4, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x55, 0xB6, ++ 0x02, 0x45, 0x65, 0xB6, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x96, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xB5, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x74, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x7D, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x6E, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0F, 0xCF, 0x75, 0xC6, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x28, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x0F, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB4, ++ 0x02, 0x44, 0x64, 0xB4, ++ ++ 0x2A, 0x45, 0x55, 0xB6, ++ 0x1A, 0x45, 0x65, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x9A, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xBB, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x78, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzs[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x85, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x76, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x31, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA7, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x92, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xB2, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x70, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzsa[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x8A, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x7B, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x36, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x0F, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x8D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xAD, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x6B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzsaf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x8E, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x7F, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x3A, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x0F, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x55, 0xB6, ++ 0x02, 0x45, 0x65, 0xB6, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x89, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xA9, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x67, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzsf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x8A, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x7B, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x36, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x0F, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB6, ++ 0x1A, 0x45, 0x65, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x8D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xAD, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x6B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgz[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x58, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x4A, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x1D, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xAF, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xD6, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x9D, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgza[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x5C, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x4E, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x27, 0xCF, 0x74, 0xC6, ++ 0x3D, 0xCF, 0x74, 0xC2, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x20, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB4, ++ 0x02, 0x44, 0x54, 0xB4, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xAB, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xD3, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x99, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzaf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x61, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x53, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x26, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x27, 0xCF, 0x74, 0xC6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB4, ++ 0x02, 0x44, 0x54, 0xB4, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x4D, 0xB6, ++ 0x02, 0x45, 0x55, 0xB6, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xA6, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xCD, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x94, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x5D, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x4F, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x27, 0xCF, 0x75, 0xC6, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x20, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x27, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB4, ++ 0x02, 0x44, 0x54, 0xB4, ++ ++ 0x2A, 0x45, 0x4D, 0xB6, ++ 0x1A, 0x45, 0x55, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xAA, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xD3, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x98, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzs[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x65, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x57, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x29, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA7, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xA2, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xCA, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x90, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzsa[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x6A, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x5C, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x2E, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x27, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0x9D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xC5, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x8B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzsaf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x6E, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x60, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x32, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x27, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x4D, 0xB6, ++ 0x02, 0x45, 0x55, 0xB6, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0x99, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xC1, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x87, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzsf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x6A, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x5C, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x2E, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x27, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB6, ++ 0x1A, 0x45, 0x55, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0x9D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xC5, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x8B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_warp.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_warp.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/mga_warp.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/mga_warp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,198 @@ ++/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*- ++ * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++#include "mga_ucode.h" ++ ++#define MGA_WARP_CODE_ALIGN 256 /* in bytes */ ++ ++#define WARP_UCODE_SIZE( which ) \ ++ ((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN) ++ ++#define WARP_UCODE_INSTALL( which, where ) \ ++do { \ ++ DRM_DEBUG( " pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase );\ ++ dev_priv->warp_pipe_phys[where] = pcbase; \ ++ memcpy( vcbase, which, sizeof(which) ); \ ++ pcbase += WARP_UCODE_SIZE( which ); \ ++ vcbase += WARP_UCODE_SIZE( which ); \ ++} while (0) ++ ++static const unsigned int mga_warp_g400_microcode_size = ++ (WARP_UCODE_SIZE(warp_g400_tgz) + ++ WARP_UCODE_SIZE(warp_g400_tgza) + ++ WARP_UCODE_SIZE(warp_g400_tgzaf) + ++ WARP_UCODE_SIZE(warp_g400_tgzf) + ++ WARP_UCODE_SIZE(warp_g400_tgzs) + ++ WARP_UCODE_SIZE(warp_g400_tgzsa) + ++ WARP_UCODE_SIZE(warp_g400_tgzsaf) + ++ WARP_UCODE_SIZE(warp_g400_tgzsf) + ++ WARP_UCODE_SIZE(warp_g400_t2gz) + ++ WARP_UCODE_SIZE(warp_g400_t2gza) + ++ WARP_UCODE_SIZE(warp_g400_t2gzaf) + ++ WARP_UCODE_SIZE(warp_g400_t2gzf) + ++ WARP_UCODE_SIZE(warp_g400_t2gzs) + ++ WARP_UCODE_SIZE(warp_g400_t2gzsa) + ++ WARP_UCODE_SIZE(warp_g400_t2gzsaf) + ++ WARP_UCODE_SIZE(warp_g400_t2gzsf)); ++ ++static const unsigned int mga_warp_g200_microcode_size = ++ (WARP_UCODE_SIZE(warp_g200_tgz) + ++ WARP_UCODE_SIZE(warp_g200_tgza) + ++ WARP_UCODE_SIZE(warp_g200_tgzaf) + ++ WARP_UCODE_SIZE(warp_g200_tgzf) + ++ WARP_UCODE_SIZE(warp_g200_tgzs) + ++ WARP_UCODE_SIZE(warp_g200_tgzsa) + ++ WARP_UCODE_SIZE(warp_g200_tgzsaf) + ++ WARP_UCODE_SIZE(warp_g200_tgzsf)); ++ ++ ++unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv) ++{ ++ switch (dev_priv->chipset) { ++ case MGA_CARD_TYPE_G400: ++ case MGA_CARD_TYPE_G550: ++ return PAGE_ALIGN(mga_warp_g400_microcode_size); ++ case MGA_CARD_TYPE_G200: ++ return PAGE_ALIGN(mga_warp_g200_microcode_size); ++ default: ++ DRM_ERROR("Unknown chipset value: 0x%x\n", dev_priv->chipset); ++ return 0; ++ } ++} ++ ++static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv) ++{ ++ unsigned char *vcbase = dev_priv->warp->handle; ++ unsigned long pcbase = dev_priv->warp->offset; ++ ++ memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); ++ ++ WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ); ++ WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF); ++ WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA); ++ WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF); ++ WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS); ++ WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF); ++ WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA); ++ WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF); ++ ++ WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ); ++ WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF); ++ WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA); ++ WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF); ++ WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS); ++ WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF); ++ WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA); ++ WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF); ++ ++ return 0; ++} ++ ++static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv) ++{ ++ unsigned char *vcbase = dev_priv->warp->handle; ++ unsigned long pcbase = dev_priv->warp->offset; ++ ++ memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); ++ ++ WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ); ++ WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF); ++ WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA); ++ WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF); ++ WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS); ++ WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF); ++ WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA); ++ WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF); ++ ++ return 0; ++} ++ ++int mga_warp_install_microcode(drm_mga_private_t * dev_priv) ++{ ++ const unsigned int size = mga_warp_microcode_size(dev_priv); ++ ++ DRM_DEBUG("MGA ucode size = %d bytes\n", size); ++ if (size > dev_priv->warp->size) { ++ DRM_ERROR("microcode too large! (%u > %lu)\n", ++ size, dev_priv->warp->size); ++ return -ENOMEM; ++ } ++ ++ switch (dev_priv->chipset) { ++ case MGA_CARD_TYPE_G400: ++ case MGA_CARD_TYPE_G550: ++ return mga_warp_install_g400_microcode(dev_priv); ++ case MGA_CARD_TYPE_G200: ++ return mga_warp_install_g200_microcode(dev_priv); ++ default: ++ return -EINVAL; ++ } ++} ++ ++#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) ++ ++int mga_warp_init(drm_mga_private_t * dev_priv) ++{ ++ u32 wmisc; ++ ++ /* FIXME: Get rid of these damned magic numbers... ++ */ ++ switch (dev_priv->chipset) { ++ case MGA_CARD_TYPE_G400: ++ case MGA_CARD_TYPE_G550: ++ MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); ++ MGA_WRITE(MGA_WGETMSB, 0x00000E00); ++ MGA_WRITE(MGA_WVRTXSZ, 0x00001807); ++ MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); ++ break; ++ case MGA_CARD_TYPE_G200: ++ MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND); ++ MGA_WRITE(MGA_WGETMSB, 0x1606); ++ MGA_WRITE(MGA_WVRTXSZ, 7); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | ++ MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE)); ++ wmisc = MGA_READ(MGA_WMISC); ++ if (wmisc != WMISC_EXPECTED) { ++ DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", ++ wmisc, WMISC_EXPECTED); ++ return -EINVAL; ++ } ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_bo.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_bo.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_bo.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_bo.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,296 @@ ++/* ++ * Copyright 2007 Dave Airlied ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++/* ++ * Authors: Dave Airlied ++ * Ben Skeggs ++ * Jeremy Kolb ++ */ ++ ++#include "drmP.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++static struct drm_ttm_backend * ++nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ switch (dev_priv->gart_info.type) { ++ case NOUVEAU_GART_AGP: ++ return drm_agp_init_ttm(dev); ++ case NOUVEAU_GART_SGDMA: ++ return nouveau_sgdma_init_ttm(dev); ++ default: ++ DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); ++ break; ++ } ++ ++ return NULL; ++} ++ ++static int ++nouveau_bo_fence_type(struct drm_buffer_object *bo, ++ uint32_t *fclass, uint32_t *type) ++{ ++ /* When we get called, *fclass is set to the requested fence class */ ++ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) ++ *type = 3; ++ else ++ *type = 1; ++ return 0; ++ ++} ++ ++static int ++nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) ++{ ++ /* We'll do this from user space. */ ++ return 0; ++} ++ ++static int ++nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ switch (type) { ++ case DRM_BO_MEM_LOCAL: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CACHED; ++ man->drm_bus_maptype = 0; ++ break; ++ case DRM_BO_MEM_VRAM: ++ man->flags = _DRM_FLAG_MEMTYPE_FIXED | ++ _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_NEEDS_IOREMAP; ++ man->io_addr = NULL; ++ man->drm_bus_maptype = _DRM_FRAME_BUFFER; ++ man->io_offset = drm_get_resource_start(dev, 1); ++ man->io_size = drm_get_resource_len(dev, 1); ++ if (man->io_size > nouveau_mem_fb_amount(dev)) ++ man->io_size = nouveau_mem_fb_amount(dev); ++ break; ++ case DRM_BO_MEM_PRIV0: ++ /* Unmappable VRAM */ ++ man->flags = _DRM_FLAG_MEMTYPE_CMA; ++ man->drm_bus_maptype = 0; ++ break; ++ case DRM_BO_MEM_TT: ++ switch (dev_priv->gart_info.type) { ++ case NOUVEAU_GART_AGP: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | ++ _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ break; ++ case NOUVEAU_GART_SGDMA: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | ++ _DRM_FLAG_MEMTYPE_CMA; ++ man->drm_bus_maptype = _DRM_SCATTER_GATHER; ++ break; ++ default: ++ DRM_ERROR("Unknown GART type: %d\n", ++ dev_priv->gart_info.type); ++ return -EINVAL; ++ } ++ ++ man->io_offset = dev_priv->gart_info.aper_base; ++ man->io_size = dev_priv->gart_info.aper_size; ++ man->io_addr = NULL; ++ break; ++ default: ++ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static uint64_t ++nouveau_bo_evict_flags(struct drm_buffer_object *bo) ++{ ++ switch (bo->mem.mem_type) { ++ case DRM_BO_MEM_LOCAL: ++ case DRM_BO_MEM_TT: ++ return DRM_BO_FLAG_MEM_LOCAL; ++ default: ++ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; ++ } ++ return 0; ++} ++ ++ ++/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access ++ * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. ++ */ ++static int ++nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint32_t srch, dsth, page_count; ++ ++ /* Can happen during init/takedown */ ++ if (!dchan->chan) ++ return -EINVAL; ++ ++ srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; ++ dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; ++ if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { ++ dchan->m2mf_dma_source = srch; ++ dchan->m2mf_dma_destin = dsth; ++ ++ BEGIN_RING(NvSubM2MF, ++ NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); ++ OUT_RING (dchan->m2mf_dma_source); ++ OUT_RING (dchan->m2mf_dma_destin); ++ } ++ ++ page_count = new_mem->num_pages; ++ while (page_count) { ++ int line_count = (page_count > 2047) ? 2047 : page_count; ++ ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); ++ OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); ++ OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); ++ OUT_RING (PAGE_SIZE); /* src_pitch */ ++ OUT_RING (PAGE_SIZE); /* dst_pitch */ ++ OUT_RING (PAGE_SIZE); /* line_length */ ++ OUT_RING (line_count); ++ OUT_RING ((1<<8)|(1<<0)); ++ OUT_RING (0); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); ++ OUT_RING (0); ++ ++ page_count -= line_count; ++ } ++ ++ return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, ++ DRM_FENCE_TYPE_EXE, 0, new_mem); ++} ++ ++/* Flip pages into the GART and move if we can. */ ++static int ++nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg tmp_mem; ++ int ret; ++ ++ tmp_mem = *new_mem; ++ tmp_mem.mm_node = NULL; ++ tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_CACHED | ++ DRM_BO_FLAG_FORCE_CACHING); ++ ++ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = drm_ttm_bind(bo->ttm, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); ++ ++out_cleanup: ++ if (tmp_mem.mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ if (tmp_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(tmp_mem.mm_node); ++ tmp_mem.mm_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ return ret; ++} ++ ++static int ++nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ else ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ else { ++ if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_bo_flush_ttm(struct drm_ttm *ttm) ++{ ++} ++ ++static uint32_t nouveau_mem_prios[] = { ++ DRM_BO_MEM_PRIV0, ++ DRM_BO_MEM_VRAM, ++ DRM_BO_MEM_TT, ++ DRM_BO_MEM_LOCAL ++}; ++static uint32_t nouveau_busy_prios[] = { ++ DRM_BO_MEM_TT, ++ DRM_BO_MEM_PRIV0, ++ DRM_BO_MEM_VRAM, ++ DRM_BO_MEM_LOCAL ++}; ++ ++struct drm_bo_driver nouveau_bo_driver = { ++ .mem_type_prio = nouveau_mem_prios, ++ .mem_busy_prio = nouveau_busy_prios, ++ .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), ++ .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), ++ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, ++ .fence_type = nouveau_bo_fence_type, ++ .invalidate_caches = nouveau_bo_invalidate_caches, ++ .init_mem_type = nouveau_bo_init_mem_type, ++ .evict_flags = nouveau_bo_evict_flags, ++ .move = nouveau_bo_move, ++ .ttm_cache_flush= nouveau_bo_flush_ttm, ++ .command_stream_barrier = NULL ++}; +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_dma.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_dma.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_dma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,172 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++int ++nouveau_dma_channel_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ struct mem_block *pushbuf; ++ int grclass, ret, i; ++ ++ DRM_DEBUG("\n"); ++ ++ pushbuf = nouveau_mem_alloc(dev, 0, 0x8000, ++ NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED, ++ (struct drm_file *)-2); ++ if (!pushbuf) { ++ DRM_ERROR("Failed to allocate DMA push buffer\n"); ++ return -ENOMEM; ++ } ++ ++ /* Allocate channel */ ++ ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2, ++ pushbuf, NvDmaFB, NvDmaTT); ++ if (ret) { ++ DRM_ERROR("Error allocating GPU channel: %d\n", ret); ++ return ret; ++ } ++ DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id); ++ ++ /* Map push buffer */ ++ drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev); ++ if (!dchan->chan->pushbuf_mem->map->handle) { ++ DRM_ERROR("Failed to ioremap push buffer\n"); ++ return -EINVAL; ++ } ++ dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle; ++ ++ /* Initialise DMA vars */ ++ dchan->max = (dchan->chan->pushbuf_mem->size >> 2) - 2; ++ dchan->put = dchan->chan->pushbuf_base >> 2; ++ dchan->cur = dchan->put; ++ dchan->free = dchan->max - dchan->cur; ++ ++ /* Insert NOPS for NOUVEAU_DMA_SKIPS */ ++ dchan->free -= NOUVEAU_DMA_SKIPS; ++ dchan->push_free = NOUVEAU_DMA_SKIPS; ++ for (i=0; i < NOUVEAU_DMA_SKIPS; i++) ++ OUT_RING(0); ++ ++ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */ ++ if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1, ++ &dchan->notify0_offset))) { ++ DRM_ERROR("Error allocating NvNotify0: %d\n", ret); ++ return ret; ++ } ++ ++ /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ ++ if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT; ++ else grclass = NV50_MEMORY_TO_MEMORY_FORMAT; ++ if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) { ++ DRM_ERROR("Error creating NvM2MF: %d\n", ret); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF, ++ gpuobj, NULL))) { ++ DRM_ERROR("Error referencing NvM2MF: %d\n", ret); ++ return ret; ++ } ++ dchan->m2mf_dma_source = NvDmaFB; ++ dchan->m2mf_dma_destin = NvDmaFB; ++ ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); ++ OUT_RING (NvM2MF); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1); ++ OUT_RING (NvNotify0); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); ++ OUT_RING (dchan->m2mf_dma_source); ++ OUT_RING (dchan->m2mf_dma_destin); ++ FIRE_RING(); ++ ++ return 0; ++} ++ ++void ++nouveau_dma_channel_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ ++ DRM_DEBUG("\n"); ++ ++ if (dchan->chan) { ++ nouveau_fifo_free(dchan->chan); ++ dchan->chan = NULL; ++ } ++} ++ ++#define READ_GET() ((NV_READ(dchan->chan->get) - \ ++ dchan->chan->pushbuf_base) >> 2) ++#define WRITE_PUT(val) do { \ ++ NV_WRITE(dchan->chan->put, \ ++ ((val) << 2) + dchan->chan->pushbuf_base); \ ++} while(0) ++ ++int ++nouveau_dma_wait(struct drm_device *dev, int size) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ uint32_t get; ++ ++ while (dchan->free < size) { ++ get = READ_GET(); ++ ++ if (dchan->put >= get) { ++ dchan->free = dchan->max - dchan->cur; ++ ++ if (dchan->free < size) { ++ dchan->push_free = 1; ++ OUT_RING(0x20000000|dchan->chan->pushbuf_base); ++ if (get <= NOUVEAU_DMA_SKIPS) { ++ /*corner case - will be idle*/ ++ if (dchan->put <= NOUVEAU_DMA_SKIPS) ++ WRITE_PUT(NOUVEAU_DMA_SKIPS + 1); ++ ++ do { ++ get = READ_GET(); ++ } while (get <= NOUVEAU_DMA_SKIPS); ++ } ++ ++ WRITE_PUT(NOUVEAU_DMA_SKIPS); ++ dchan->cur = dchan->put = NOUVEAU_DMA_SKIPS; ++ dchan->free = get - (NOUVEAU_DMA_SKIPS + 1); ++ } ++ } else { ++ dchan->free = get - dchan->cur - 1; ++ } ++ } ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_dma.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_dma.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_dma.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_dma.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,96 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef __NOUVEAU_DMA_H__ ++#define __NOUVEAU_DMA_H__ ++ ++typedef enum { ++ NvSubM2MF = 0, ++} nouveau_subchannel_id_t; ++ ++typedef enum { ++ NvM2MF = 0x80039001, ++ NvDmaFB = 0x8003d001, ++ NvDmaTT = 0x8003d002, ++ NvNotify0 = 0x8003d003 ++} nouveau_object_handle_t; ++ ++#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY 0x00000180 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE 0x00000184 ++#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c ++ ++#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c ++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c ++ ++#define BEGIN_RING(subc, mthd, cnt) do { \ ++ int push_size = (cnt) + 1; \ ++ if (dchan->push_free) { \ ++ DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free); \ ++ break; \ ++ } \ ++ if (dchan->free < push_size) { \ ++ if (nouveau_dma_wait(dev, push_size)) { \ ++ DRM_ERROR("FIFO timeout\n"); \ ++ break; \ ++ } \ ++ } \ ++ dchan->free -= push_size; \ ++ dchan->push_free = push_size; \ ++ OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd); \ ++} while(0) ++ ++#define OUT_RING(data) do { \ ++ if (dchan->push_free == 0) { \ ++ DRM_ERROR("no space left in packet\n"); \ ++ break; \ ++ } \ ++ dchan->pushbuf[dchan->cur++] = (data); \ ++ dchan->push_free--; \ ++} while(0) ++ ++#define FIRE_RING() do { \ ++ if (dchan->push_free) { \ ++ DRM_ERROR("packet incomplete: %d\n", dchan->push_free); \ ++ break; \ ++ } \ ++ if (dchan->cur != dchan->put) { \ ++ DRM_MEMORYBARRIER(); \ ++ dchan->put = dchan->cur; \ ++ NV_WRITE(dchan->chan->put, dchan->put << 2); \ ++ } \ ++} while(0) ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,184 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __NOUVEAU_DRM_H__ ++#define __NOUVEAU_DRM_H__ ++ ++#define NOUVEAU_DRM_HEADER_PATCHLEVEL 11 ++ ++struct drm_nouveau_channel_alloc { ++ uint32_t fb_ctxdma_handle; ++ uint32_t tt_ctxdma_handle; ++ ++ int channel; ++ uint32_t put_base; ++ /* FIFO control regs */ ++ drm_handle_t ctrl; ++ int ctrl_size; ++ /* DMA command buffer */ ++ drm_handle_t cmdbuf; ++ int cmdbuf_size; ++ /* Notifier memory */ ++ drm_handle_t notifier; ++ int notifier_size; ++}; ++ ++struct drm_nouveau_channel_free { ++ int channel; ++}; ++ ++struct drm_nouveau_grobj_alloc { ++ int channel; ++ uint32_t handle; ++ int class; ++}; ++ ++#define NOUVEAU_MEM_ACCESS_RO 1 ++#define NOUVEAU_MEM_ACCESS_WO 2 ++#define NOUVEAU_MEM_ACCESS_RW 3 ++struct drm_nouveau_notifierobj_alloc { ++ int channel; ++ uint32_t handle; ++ int count; ++ ++ uint32_t offset; ++}; ++ ++struct drm_nouveau_gpuobj_free { ++ int channel; ++ uint32_t handle; ++}; ++ ++/* This is needed to avoid a race condition. ++ * Otherwise you may be writing in the fetch area. ++ * Is this large enough, as it's only 32 bytes, and the maximum fetch size is 256 bytes? ++ */ ++#define NOUVEAU_DMA_SKIPS 8 ++ ++#define NOUVEAU_MEM_FB 0x00000001 ++#define NOUVEAU_MEM_AGP 0x00000002 ++#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 ++#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008 ++#define NOUVEAU_MEM_PCI 0x00000010 ++#define NOUVEAU_MEM_PCI_ACCEPTABLE 0x00000020 ++#define NOUVEAU_MEM_PINNED 0x00000040 ++#define NOUVEAU_MEM_USER_BACKED 0x00000080 ++#define NOUVEAU_MEM_MAPPED 0x00000100 ++#define NOUVEAU_MEM_TILE 0x00000200 ++#define NOUVEAU_MEM_TILE_ZETA 0x00000400 ++#define NOUVEAU_MEM_INSTANCE 0x01000000 /* internal */ ++#define NOUVEAU_MEM_NOTIFIER 0x02000000 /* internal */ ++#define NOUVEAU_MEM_NOVM 0x04000000 /* internal */ ++#define NOUVEAU_MEM_USER 0x08000000 /* internal */ ++#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \ ++ NOUVEAU_MEM_NOTIFIER | \ ++ NOUVEAU_MEM_NOVM | \ ++ NOUVEAU_MEM_USER) ++ ++struct drm_nouveau_mem_alloc { ++ int flags; ++ int alignment; ++ uint64_t size; // in bytes ++ uint64_t offset; ++ drm_handle_t map_handle; ++}; ++ ++struct drm_nouveau_mem_free { ++ uint64_t offset; ++ int flags; ++}; ++ ++struct drm_nouveau_mem_tile { ++ uint64_t offset; ++ uint64_t delta; ++ uint64_t size; ++ int flags; ++}; ++ ++/* FIXME : maybe unify {GET,SET}PARAMs */ ++#define NOUVEAU_GETPARAM_PCI_VENDOR 3 ++#define NOUVEAU_GETPARAM_PCI_DEVICE 4 ++#define NOUVEAU_GETPARAM_BUS_TYPE 5 ++#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 ++#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 ++#define NOUVEAU_GETPARAM_FB_SIZE 8 ++#define NOUVEAU_GETPARAM_AGP_SIZE 9 ++#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 ++#define NOUVEAU_GETPARAM_CHIPSET_ID 11 ++struct drm_nouveau_getparam { ++ uint64_t param; ++ uint64_t value; ++}; ++ ++#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1 ++#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2 ++struct drm_nouveau_setparam { ++ uint64_t param; ++ uint64_t value; ++}; ++ ++enum nouveau_card_type { ++ NV_UNKNOWN =0, ++ NV_04 =4, ++ NV_05 =5, ++ NV_10 =10, ++ NV_11 =11, ++ NV_17 =17, ++ NV_20 =20, ++ NV_30 =30, ++ NV_40 =40, ++ NV_44 =44, ++ NV_50 =50, ++ NV_LAST =0xffff, ++}; ++ ++enum nouveau_bus_type { ++ NV_AGP =0, ++ NV_PCI =1, ++ NV_PCIE =2, ++}; ++ ++#define NOUVEAU_MAX_SAREA_CLIPRECTS 16 ++ ++struct drm_nouveau_sarea { ++ /* the cliprects */ ++ struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++}; ++ ++#define DRM_NOUVEAU_CARD_INIT 0x00 ++#define DRM_NOUVEAU_GETPARAM 0x01 ++#define DRM_NOUVEAU_SETPARAM 0x02 ++#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 ++#define DRM_NOUVEAU_CHANNEL_FREE 0x04 ++#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 ++#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 ++#define DRM_NOUVEAU_GPUOBJ_FREE 0x07 ++#define DRM_NOUVEAU_MEM_ALLOC 0x08 ++#define DRM_NOUVEAU_MEM_FREE 0x09 ++#define DRM_NOUVEAU_MEM_TILE 0x0a ++#define DRM_NOUVEAU_SUSPEND 0x0b ++#define DRM_NOUVEAU_RESUME 0x0c ++ ++#endif /* __NOUVEAU_DRM_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,120 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ { ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), ++ .class = PCI_BASE_CLASS_DISPLAY << 16, ++ .class_mask = 0xff << 16, ++ }, ++ { ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), ++ .class = PCI_BASE_CLASS_DISPLAY << 16, ++ .class_mask = 0xff << 16, ++ } ++}; ++ ++extern struct drm_ioctl_desc nouveau_ioctls[]; ++extern int nouveau_max_ioctl; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | ++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .load = nouveau_load, ++ .firstopen = nouveau_firstopen, ++ .lastclose = nouveau_lastclose, ++ .unload = nouveau_unload, ++ .preclose = nouveau_preclose, ++ .irq_preinstall = nouveau_irq_preinstall, ++ .irq_postinstall = nouveau_irq_postinstall, ++ .irq_uninstall = nouveau_irq_uninstall, ++ .irq_handler = nouveau_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = nouveau_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = nouveau_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .bo_driver = &nouveau_bo_driver, ++ .fence_driver = &nouveau_fence_driver, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++#ifdef GIT_REVISION ++ .date = GIT_REVISION, ++#else ++ .date = DRIVER_DATE, ++#endif ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init nouveau_init(void) ++{ ++ driver.num_ioctls = nouveau_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit nouveau_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(nouveau_init); ++module_exit(nouveau_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,621 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __NOUVEAU_DRV_H__ ++#define __NOUVEAU_DRV_H__ ++ ++#define DRIVER_AUTHOR "Stephane Marchesin" ++#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net" ++ ++#define DRIVER_NAME "nouveau" ++#define DRIVER_DESC "nVidia Riva/TNT/GeForce" ++#define DRIVER_DATE "20060213" ++ ++#define DRIVER_MAJOR 0 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 11 ++ ++#define NOUVEAU_FAMILY 0x0000FFFF ++#define NOUVEAU_FLAGS 0xFFFF0000 ++ ++#include "nouveau_drm.h" ++#include "nouveau_reg.h" ++ ++struct mem_block { ++ struct mem_block *next; ++ struct mem_block *prev; ++ uint64_t start; ++ uint64_t size; ++ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ ++ int flags; ++ drm_local_map_t *map; ++ drm_handle_t map_handle; ++}; ++ ++enum nouveau_flags { ++ NV_NFORCE =0x10000000, ++ NV_NFORCE2 =0x20000000 ++}; ++ ++#define NVOBJ_ENGINE_SW 0 ++#define NVOBJ_ENGINE_GR 1 ++#define NVOBJ_ENGINE_INT 0xdeadbeef ++ ++#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) ++#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) ++#define NVOBJ_FLAG_ZERO_FREE (1 << 2) ++#define NVOBJ_FLAG_FAKE (1 << 3) ++struct nouveau_gpuobj { ++ struct list_head list; ++ ++ int im_channel; ++ struct mem_block *im_pramin; ++ struct mem_block *im_backing; ++ int im_bound; ++ ++ uint32_t flags; ++ int refcount; ++ ++ uint32_t engine; ++ uint32_t class; ++ ++ void (*dtor)(struct drm_device *, struct nouveau_gpuobj *); ++ void *priv; ++}; ++ ++struct nouveau_gpuobj_ref { ++ struct list_head list; ++ ++ struct nouveau_gpuobj *gpuobj; ++ uint32_t instance; ++ ++ int channel; ++ int handle; ++}; ++ ++struct nouveau_channel ++{ ++ struct drm_device *dev; ++ int id; ++ ++ /* owner of this fifo */ ++ struct drm_file *file_priv; ++ /* mapping of the fifo itself */ ++ drm_local_map_t *map; ++ /* mapping of the regs controling the fifo */ ++ drm_local_map_t *regs; ++ ++ /* Fencing */ ++ uint32_t next_sequence; ++ ++ /* DMA push buffer */ ++ struct nouveau_gpuobj_ref *pushbuf; ++ struct mem_block *pushbuf_mem; ++ uint32_t pushbuf_base; ++ ++ /* FIFO user control regs */ ++ uint32_t user, user_size; ++ uint32_t put; ++ uint32_t get; ++ uint32_t ref_cnt; ++ ++ /* Notifier memory */ ++ struct mem_block *notifier_block; ++ struct mem_block *notifier_heap; ++ drm_local_map_t *notifier_map; ++ ++ /* PFIFO context */ ++ struct nouveau_gpuobj_ref *ramfc; ++ ++ /* PGRAPH context */ ++ /* XXX may be merge 2 pointers as private data ??? */ ++ struct nouveau_gpuobj_ref *ramin_grctx; ++ void *pgraph_ctx; ++ ++ /* NV50 VM */ ++ struct nouveau_gpuobj *vm_pd; ++ struct nouveau_gpuobj_ref *vm_gart_pt; ++ struct nouveau_gpuobj_ref *vm_vram_pt; ++ ++ /* Objects */ ++ struct nouveau_gpuobj_ref *ramin; /* Private instmem */ ++ struct mem_block *ramin_heap; /* Private PRAMIN heap */ ++ struct nouveau_gpuobj_ref *ramht; /* Hash table */ ++ struct list_head ramht_refs; /* Objects referenced by RAMHT */ ++}; ++ ++struct nouveau_drm_channel { ++ struct nouveau_channel *chan; ++ ++ /* DMA state */ ++ int max, put, cur, free; ++ int push_free; ++ volatile uint32_t *pushbuf; ++ ++ /* Notifiers */ ++ uint32_t notify0_offset; ++ ++ /* Buffer moves */ ++ uint32_t m2mf_dma_source; ++ uint32_t m2mf_dma_destin; ++}; ++ ++struct nouveau_config { ++ struct { ++ int location; ++ int size; ++ } cmdbuf; ++}; ++ ++struct nouveau_instmem_engine { ++ void *priv; ++ ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++ ++ int (*populate)(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++ void (*clear)(struct drm_device *, struct nouveau_gpuobj *); ++ int (*bind)(struct drm_device *, struct nouveau_gpuobj *); ++ int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); ++}; ++ ++struct nouveau_mc_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++}; ++ ++struct nouveau_timer_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++ uint64_t (*read)(struct drm_device *dev); ++}; ++ ++struct nouveau_fb_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++}; ++ ++struct nouveau_fifo_engine { ++ void *priv; ++ ++ int channels; ++ ++ int (*init)(struct drm_device *); ++ void (*takedown)(struct drm_device *); ++ ++ int (*channel_id)(struct drm_device *); ++ ++ int (*create_context)(struct nouveau_channel *); ++ void (*destroy_context)(struct nouveau_channel *); ++ int (*load_context)(struct nouveau_channel *); ++ int (*save_context)(struct nouveau_channel *); ++}; ++ ++struct nouveau_pgraph_engine { ++ int (*init)(struct drm_device *); ++ void (*takedown)(struct drm_device *); ++ ++ int (*create_context)(struct nouveau_channel *); ++ void (*destroy_context)(struct nouveau_channel *); ++ int (*load_context)(struct nouveau_channel *); ++ int (*save_context)(struct nouveau_channel *); ++}; ++ ++struct nouveau_engine { ++ struct nouveau_instmem_engine instmem; ++ struct nouveau_mc_engine mc; ++ struct nouveau_timer_engine timer; ++ struct nouveau_fb_engine fb; ++ struct nouveau_pgraph_engine graph; ++ struct nouveau_fifo_engine fifo; ++}; ++ ++#define NOUVEAU_MAX_CHANNEL_NR 128 ++struct drm_nouveau_private { ++ enum { ++ NOUVEAU_CARD_INIT_DOWN, ++ NOUVEAU_CARD_INIT_DONE, ++ NOUVEAU_CARD_INIT_FAILED ++ } init_state; ++ ++ int ttm; ++ ++ /* the card type, takes NV_* as values */ ++ int card_type; ++ /* exact chipset, derived from NV_PMC_BOOT_0 */ ++ int chipset; ++ int flags; ++ ++ drm_local_map_t *mmio; ++ drm_local_map_t *fb; ++ drm_local_map_t *ramin; /* NV40 onwards */ ++ ++ int fifo_alloc_count; ++ struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; ++ ++ struct nouveau_engine Engine; ++ struct nouveau_drm_channel channel; ++ ++ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ ++ struct nouveau_gpuobj *ramht; ++ uint32_t ramin_rsvd_vram; ++ uint32_t ramht_offset; ++ uint32_t ramht_size; ++ uint32_t ramht_bits; ++ uint32_t ramfc_offset; ++ uint32_t ramfc_size; ++ uint32_t ramro_offset; ++ uint32_t ramro_size; ++ ++ /* base physical adresses */ ++ uint64_t fb_phys; ++ uint64_t fb_available_size; ++ ++ struct { ++ enum { ++ NOUVEAU_GART_NONE = 0, ++ NOUVEAU_GART_AGP, ++ NOUVEAU_GART_SGDMA ++ } type; ++ uint64_t aper_base; ++ uint64_t aper_size; ++ ++ struct nouveau_gpuobj *sg_ctxdma; ++ struct page *sg_dummy_page; ++ dma_addr_t sg_dummy_bus; ++ ++ /* nottm hack */ ++ struct drm_ttm_backend *sg_be; ++ unsigned long sg_handle; ++ } gart_info; ++ ++ /* G8x global VRAM page table */ ++ struct nouveau_gpuobj *vm_vram_pt; ++ ++ /* the mtrr covering the FB */ ++ int fb_mtrr; ++ ++ struct mem_block *agp_heap; ++ struct mem_block *fb_heap; ++ struct mem_block *fb_nomap_heap; ++ struct mem_block *ramin_heap; ++ struct mem_block *pci_heap; ++ ++ /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ ++ uint32_t ctx_table_size; ++ struct nouveau_gpuobj_ref *ctx_table; ++ ++ struct nouveau_config config; ++ ++ struct list_head gpuobj_list; ++ ++ struct nouveau_suspend_resume { ++ uint32_t fifo_mode; ++ uint32_t graph_ctx_control; ++ uint32_t graph_state; ++ uint32_t *ramin_copy; ++ uint64_t ramin_size; ++ } susres; ++}; ++ ++#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ ++ struct drm_nouveau_private *nv = dev->dev_private; \ ++ if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ ++ DRM_ERROR("called without init\n"); \ ++ return -EINVAL; \ ++ } \ ++} while(0) ++ ++#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \ ++ struct drm_nouveau_private *nv = dev->dev_private; \ ++ if (!nouveau_fifo_owner(dev, (cl), (id))) { \ ++ DRM_ERROR("pid %d doesn't own channel %d\n", \ ++ DRM_CURRENTPID, (id)); \ ++ return -EPERM; \ ++ } \ ++ (ch) = nv->fifos[(id)]; \ ++} while(0) ++ ++/* nouveau_state.c */ ++extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); ++extern int nouveau_load(struct drm_device *, unsigned long flags); ++extern int nouveau_firstopen(struct drm_device *); ++extern void nouveau_lastclose(struct drm_device *); ++extern int nouveau_unload(struct drm_device *); ++extern int nouveau_ioctl_getparam(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_setparam(struct drm_device *, void *data, ++ struct drm_file *); ++extern void nouveau_wait_for_idle(struct drm_device *); ++extern int nouveau_card_init(struct drm_device *); ++extern int nouveau_ioctl_card_init(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_suspend(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_resume(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_mem.c */ ++extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, ++ uint64_t size); ++extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, ++ uint64_t size, int align2, ++ struct drm_file *, int tail); ++extern void nouveau_mem_takedown(struct mem_block **heap); ++extern void nouveau_mem_free_block(struct mem_block *); ++extern uint64_t nouveau_mem_fb_amount(struct drm_device *); ++extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); ++extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_mem_free(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_mem_tile(struct drm_device *, void *data, ++ struct drm_file *); ++extern struct mem_block* nouveau_mem_alloc(struct drm_device *, ++ int alignment, uint64_t size, ++ int flags, struct drm_file *); ++extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*); ++extern int nouveau_mem_init(struct drm_device *); ++extern int nouveau_mem_init_ttm(struct drm_device *); ++extern void nouveau_mem_close(struct drm_device *); ++ ++/* nouveau_notifier.c */ ++extern int nouveau_notifier_init_channel(struct nouveau_channel *); ++extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); ++extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, ++ int cout, uint32_t *offset); ++extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_fifo.c */ ++extern int nouveau_fifo_init(struct drm_device *); ++extern int nouveau_fifo_ctx_size(struct drm_device *); ++extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *); ++extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *, ++ int channel); ++extern int nouveau_fifo_alloc(struct drm_device *dev, ++ struct nouveau_channel **chan, ++ struct drm_file *file_priv, ++ struct mem_block *pushbuf, ++ uint32_t fb_ctxdma, uint32_t tt_ctxdma); ++extern void nouveau_fifo_free(struct nouveau_channel *); ++extern int nouveau_channel_idle(struct nouveau_channel *chan); ++ ++/* nouveau_object.c */ ++extern int nouveau_gpuobj_early_init(struct drm_device *); ++extern int nouveau_gpuobj_init(struct drm_device *); ++extern void nouveau_gpuobj_takedown(struct drm_device *); ++extern void nouveau_gpuobj_late_takedown(struct drm_device *); ++extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, ++ uint32_t vram_h, uint32_t tt_h); ++extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); ++extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, ++ int size, int align, uint32_t flags, ++ struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, ++ uint32_t handle, struct nouveau_gpuobj *, ++ struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_ref_del(struct drm_device *, ++ struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, ++ struct nouveau_gpuobj_ref **ref_ret); ++extern int nouveau_gpuobj_new_ref(struct drm_device *, ++ struct nouveau_channel *alloc_chan, ++ struct nouveau_channel *ref_chan, ++ uint32_t handle, int size, int align, ++ uint32_t flags, struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_new_fake(struct drm_device *, ++ uint32_t p_offset, uint32_t b_offset, ++ uint32_t size, uint32_t flags, ++ struct nouveau_gpuobj **, ++ struct nouveau_gpuobj_ref**); ++extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, ++ uint64_t offset, uint64_t size, int access, ++ int target, struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, ++ uint64_t offset, uint64_t size, ++ int access, struct nouveau_gpuobj **, ++ uint32_t *o_ret); ++extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, ++ struct nouveau_gpuobj **); ++extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_irq.c */ ++extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); ++extern void nouveau_irq_preinstall(struct drm_device *); ++extern int nouveau_irq_postinstall(struct drm_device *); ++extern void nouveau_irq_uninstall(struct drm_device *); ++ ++/* nouveau_sgdma.c */ ++extern int nouveau_sgdma_init(struct drm_device *); ++extern void nouveau_sgdma_takedown(struct drm_device *); ++extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, ++ uint32_t *page); ++extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); ++extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); ++extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); ++ ++/* nouveau_dma.c */ ++extern int nouveau_dma_channel_init(struct drm_device *); ++extern void nouveau_dma_channel_takedown(struct drm_device *); ++extern int nouveau_dma_wait(struct drm_device *, int size); ++ ++/* nv04_fb.c */ ++extern int nv04_fb_init(struct drm_device *); ++extern void nv04_fb_takedown(struct drm_device *); ++ ++/* nv10_fb.c */ ++extern int nv10_fb_init(struct drm_device *); ++extern void nv10_fb_takedown(struct drm_device *); ++ ++/* nv40_fb.c */ ++extern int nv40_fb_init(struct drm_device *); ++extern void nv40_fb_takedown(struct drm_device *); ++ ++/* nv04_fifo.c */ ++extern int nv04_fifo_channel_id(struct drm_device *); ++extern int nv04_fifo_create_context(struct nouveau_channel *); ++extern void nv04_fifo_destroy_context(struct nouveau_channel *); ++extern int nv04_fifo_load_context(struct nouveau_channel *); ++extern int nv04_fifo_save_context(struct nouveau_channel *); ++ ++/* nv10_fifo.c */ ++extern int nv10_fifo_channel_id(struct drm_device *); ++extern int nv10_fifo_create_context(struct nouveau_channel *); ++extern void nv10_fifo_destroy_context(struct nouveau_channel *); ++extern int nv10_fifo_load_context(struct nouveau_channel *); ++extern int nv10_fifo_save_context(struct nouveau_channel *); ++ ++/* nv40_fifo.c */ ++extern int nv40_fifo_init(struct drm_device *); ++extern int nv40_fifo_create_context(struct nouveau_channel *); ++extern void nv40_fifo_destroy_context(struct nouveau_channel *); ++extern int nv40_fifo_load_context(struct nouveau_channel *); ++extern int nv40_fifo_save_context(struct nouveau_channel *); ++ ++/* nv50_fifo.c */ ++extern int nv50_fifo_init(struct drm_device *); ++extern void nv50_fifo_takedown(struct drm_device *); ++extern int nv50_fifo_channel_id(struct drm_device *); ++extern int nv50_fifo_create_context(struct nouveau_channel *); ++extern void nv50_fifo_destroy_context(struct nouveau_channel *); ++extern int nv50_fifo_load_context(struct nouveau_channel *); ++extern int nv50_fifo_save_context(struct nouveau_channel *); ++ ++/* nv04_graph.c */ ++extern void nouveau_nv04_context_switch(struct drm_device *); ++extern int nv04_graph_init(struct drm_device *); ++extern void nv04_graph_takedown(struct drm_device *); ++extern int nv04_graph_create_context(struct nouveau_channel *); ++extern void nv04_graph_destroy_context(struct nouveau_channel *); ++extern int nv04_graph_load_context(struct nouveau_channel *); ++extern int nv04_graph_save_context(struct nouveau_channel *); ++ ++/* nv10_graph.c */ ++extern void nouveau_nv10_context_switch(struct drm_device *); ++extern int nv10_graph_init(struct drm_device *); ++extern void nv10_graph_takedown(struct drm_device *); ++extern int nv10_graph_create_context(struct nouveau_channel *); ++extern void nv10_graph_destroy_context(struct nouveau_channel *); ++extern int nv10_graph_load_context(struct nouveau_channel *); ++extern int nv10_graph_save_context(struct nouveau_channel *); ++ ++/* nv20_graph.c */ ++extern int nv20_graph_create_context(struct nouveau_channel *); ++extern void nv20_graph_destroy_context(struct nouveau_channel *); ++extern int nv20_graph_load_context(struct nouveau_channel *); ++extern int nv20_graph_save_context(struct nouveau_channel *); ++extern int nv20_graph_init(struct drm_device *); ++extern void nv20_graph_takedown(struct drm_device *); ++extern int nv30_graph_init(struct drm_device *); ++ ++/* nv40_graph.c */ ++extern int nv40_graph_init(struct drm_device *); ++extern void nv40_graph_takedown(struct drm_device *); ++extern int nv40_graph_create_context(struct nouveau_channel *); ++extern void nv40_graph_destroy_context(struct nouveau_channel *); ++extern int nv40_graph_load_context(struct nouveau_channel *); ++extern int nv40_graph_save_context(struct nouveau_channel *); ++ ++/* nv50_graph.c */ ++extern int nv50_graph_init(struct drm_device *); ++extern void nv50_graph_takedown(struct drm_device *); ++extern int nv50_graph_create_context(struct nouveau_channel *); ++extern void nv50_graph_destroy_context(struct nouveau_channel *); ++extern int nv50_graph_load_context(struct nouveau_channel *); ++extern int nv50_graph_save_context(struct nouveau_channel *); ++ ++/* nv04_instmem.c */ ++extern int nv04_instmem_init(struct drm_device *); ++extern void nv04_instmem_takedown(struct drm_device *); ++extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++ ++/* nv50_instmem.c */ ++extern int nv50_instmem_init(struct drm_device *); ++extern void nv50_instmem_takedown(struct drm_device *); ++extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++ ++/* nv04_mc.c */ ++extern int nv04_mc_init(struct drm_device *); ++extern void nv04_mc_takedown(struct drm_device *); ++ ++/* nv40_mc.c */ ++extern int nv40_mc_init(struct drm_device *); ++extern void nv40_mc_takedown(struct drm_device *); ++ ++/* nv50_mc.c */ ++extern int nv50_mc_init(struct drm_device *); ++extern void nv50_mc_takedown(struct drm_device *); ++ ++/* nv04_timer.c */ ++extern int nv04_timer_init(struct drm_device *); ++extern uint64_t nv04_timer_read(struct drm_device *); ++extern void nv04_timer_takedown(struct drm_device *); ++ ++extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg); ++ ++/* nouveau_buffer.c */ ++extern struct drm_bo_driver nouveau_bo_driver; ++ ++/* nouveau_fence.c */ ++extern struct drm_fence_driver nouveau_fence_driver; ++extern void nouveau_fence_handler(struct drm_device *dev, int channel); ++ ++#if defined(__powerpc__) ++#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) ) ++#define NV_WRITE(reg,val) out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) ) ++#else ++#define NV_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#endif ++ ++/* PRAMIN access */ ++#if defined(__powerpc__) ++#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o)) ++#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v)) ++#else ++#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o)) ++#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v)) ++#endif ++ ++#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2)) ++#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v)) ++ ++#endif /* __NOUVEAU_DRV_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_fence.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_fence.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_fence.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_fence.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,119 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++static int ++nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); ++ ++ /* DRM's channel always uses IRQs to signal fences */ ++ if (class == dev_priv->channel.chan->id) ++ return 1; ++ ++ /* Other channels don't use IRQs at all yet */ ++ return 0; ++} ++ ++static int ++nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags, ++ uint32_t *breadcrumb, uint32_t *native_type) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[class]; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ ++ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); ++ ++ /* We can't emit fences on client channels, update sequence number ++ * and userspace will emit the fence ++ */ ++ *breadcrumb = ++chan->next_sequence; ++ *native_type = DRM_FENCE_TYPE_EXE; ++ if (chan != dchan->chan) { ++ DRM_DEBUG("user fence 0x%08x\n", *breadcrumb); ++ return 0; ++ } ++ ++ DRM_DEBUG("emit 0x%08x\n", *breadcrumb); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1); ++ OUT_RING (*breadcrumb); ++ BEGIN_RING(NvSubM2MF, 0x0150, 1); ++ OUT_RING (0); ++ FIRE_RING (); ++ ++ return 0; ++} ++ ++static void ++nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; ++ struct nouveau_channel *chan = dev_priv->fifos[class]; ++ ++ DRM_DEBUG("class=%d\n", class); ++ DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types); ++ ++ if (waiting_types & DRM_FENCE_TYPE_EXE) { ++ uint32_t sequence = NV_READ(chan->ref_cnt); ++ ++ DRM_DEBUG("got 0x%08x\n", sequence); ++ drm_fence_handler(dev, class, sequence, waiting_types, 0); ++ } ++} ++ ++void ++nouveau_fence_handler(struct drm_device *dev, int channel) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[channel]; ++ ++ DRM_DEBUG("class=%d\n", channel); ++ ++ write_lock(&fm->lock); ++ nouveau_fence_poll(dev, channel, fc->waiting_types); ++ write_unlock(&fm->lock); ++} ++ ++struct drm_fence_driver nouveau_fence_driver = { ++ .num_classes = 8, ++ .wrap_diff = (1 << 30), ++ .flush_diff = (1 << 29), ++ .sequence_mask = 0xffffffffU, ++ .has_irq = nouveau_fence_has_irq, ++ .emit = nouveau_fence_emit, ++ .flush = NULL, ++ .poll = nouveau_fence_poll, ++ .needed_flush = NULL, ++ .wait = NULL ++}; +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_fifo.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_fifo.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_fifo.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_fifo.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,601 @@ ++/* ++ * Copyright 2005-2006 Stephane Marchesin ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++ ++/* returns the size of fifo context */ ++int nouveau_fifo_ctx_size(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ ++ if (dev_priv->card_type >= NV_40) ++ return 128; ++ else if (dev_priv->card_type >= NV_17) ++ return 64; ++ else ++ return 32; ++} ++ ++/*********************************** ++ * functions doing the actual work ++ ***********************************/ ++ ++static int nouveau_fifo_instmem_configure(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PFIFO_RAMHT, ++ (0x03 << 24) /* search 128 */ | ++ ((dev_priv->ramht_bits - 9) << 16) | ++ (dev_priv->ramht_offset >> 8) ++ ); ++ ++ NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); ++ ++ switch(dev_priv->card_type) ++ { ++ case NV_40: ++ switch (dev_priv->chipset) { ++ case 0x47: ++ case 0x49: ++ case 0x4b: ++ NV_WRITE(0x2230, 1); ++ break; ++ default: ++ break; ++ } ++ NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); ++ break; ++ case NV_44: ++ NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | ++ (2 << 16)); ++ break; ++ case NV_30: ++ case NV_20: ++ case NV_17: ++ NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) | ++ (1 << 16) /* 64 Bytes entry*/); ++ /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ ++ break; ++ case NV_11: ++ case NV_10: ++ case NV_04: ++ NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8); ++ break; ++ } ++ ++ return 0; ++} ++ ++int nouveau_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PFIFO); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PFIFO); ++ ++ /* Enable PFIFO error reporting */ ++ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); ++ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); ++ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ ++ ret = nouveau_fifo_instmem_configure(dev); ++ if (ret) { ++ DRM_ERROR("Failed to configure instance memory\n"); ++ return ret; ++ } ++ ++ /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */ ++ ++ DRM_DEBUG("Setting defaults for remaining PFIFO regs\n"); ++ ++ /* All channels into PIO mode */ ++ NV_WRITE(NV04_PFIFO_MODE, 0x00000000); ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ /* Channel 0 active, PIO mode */ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000); ++ /* PUT and GET to 0 */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000); ++ /* No cmdbuf object */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000); ++ NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0x00000000); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001); ++ ++ /* FIXME on NV04 */ ++ if (dev_priv->card_type >= NV_10) { ++ NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0); ++ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ ); ++ if (dev_priv->card_type >= NV_40) ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001); ++ else ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000); ++ } else { ++ NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0); ++ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ ); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000); ++ } ++ ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ return 0; ++} ++ ++static int ++nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct mem_block *pb = chan->pushbuf_mem; ++ struct nouveau_gpuobj *pushbuf = NULL; ++ int ret; ++ ++ if (pb->flags & NOUVEAU_MEM_AGP) { ++ ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ &pushbuf, ++ &chan->pushbuf_base); ++ } else ++ if (pb->flags & NOUVEAU_MEM_PCI) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_PCI_NONLINEAR, ++ &pushbuf); ++ chan->pushbuf_base = 0; ++ } else if (dev_priv->card_type != NV_04) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_VIDMEM, &pushbuf); ++ chan->pushbuf_base = 0; ++ } else { ++ /* NV04 cmdbuf hack, from original ddx.. not sure of it's ++ * exact reason for existing :) PCI access to cmdbuf in ++ * VRAM. ++ */ ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start + ++ drm_get_resource_start(dev, 1), ++ pb->size, NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_PCI, &pushbuf); ++ chan->pushbuf_base = 0; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, ++ &chan->pushbuf))) { ++ DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); ++ if (pushbuf != dev_priv->gart_info.sg_ctxdma) ++ nouveau_gpuobj_del(dev, &pushbuf); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct mem_block * ++nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_config *config = &dev_priv->config; ++ struct mem_block *pb; ++ int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); ++ ++ /* Defaults for unconfigured values */ ++ if (!config->cmdbuf.location) ++ config->cmdbuf.location = NOUVEAU_MEM_FB; ++ if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size) ++ config->cmdbuf.size = pb_min_size; ++ ++ pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, ++ config->cmdbuf.location | NOUVEAU_MEM_MAPPED, ++ (struct drm_file *)-2); ++ if (!pb) ++ DRM_ERROR("Couldn't allocate DMA push buffer.\n"); ++ ++ return pb; ++} ++ ++/* allocates and initializes a fifo for user space consumption */ ++int ++nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ++ struct drm_file *file_priv, struct mem_block *pushbuf, ++ uint32_t vram_handle, uint32_t tt_handle) ++{ ++ int ret; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_channel *chan; ++ int channel; ++ ++ /* ++ * Alright, here is the full story ++ * Nvidia cards have multiple hw fifo contexts (praise them for that, ++ * no complicated crash-prone context switches) ++ * We allocate a new context for each app and let it write to it directly ++ * (woo, full userspace command submission !) ++ * When there are no more contexts, you lost ++ */ ++ for (channel = 0; channel < engine->fifo.channels; channel++) { ++ if (dev_priv->fifos[channel] == NULL) ++ break; ++ } ++ ++ /* no more fifos. you lost. */ ++ if (channel == engine->fifo.channels) ++ return -EINVAL; ++ ++ dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel), ++ DRM_MEM_DRIVER); ++ if (!dev_priv->fifos[channel]) ++ return -ENOMEM; ++ dev_priv->fifo_alloc_count++; ++ chan = dev_priv->fifos[channel]; ++ chan->dev = dev; ++ chan->id = channel; ++ chan->file_priv = file_priv; ++ chan->pushbuf_mem = pushbuf; ++ ++ DRM_INFO("Allocating FIFO number %d\n", channel); ++ ++ /* Locate channel's user control regs */ ++ if (dev_priv->card_type < NV_40) { ++ chan->user = NV03_USER(channel); ++ chan->user_size = NV03_USER_SIZE; ++ chan->put = NV03_USER_DMA_PUT(channel); ++ chan->get = NV03_USER_DMA_GET(channel); ++ chan->ref_cnt = NV03_USER_REF_CNT(channel); ++ } else ++ if (dev_priv->card_type < NV_50) { ++ chan->user = NV40_USER(channel); ++ chan->user_size = NV40_USER_SIZE; ++ chan->put = NV40_USER_DMA_PUT(channel); ++ chan->get = NV40_USER_DMA_GET(channel); ++ chan->ref_cnt = NV40_USER_REF_CNT(channel); ++ } else { ++ chan->user = NV50_USER(channel); ++ chan->user_size = NV50_USER_SIZE; ++ chan->put = NV50_USER_DMA_PUT(channel); ++ chan->get = NV50_USER_DMA_GET(channel); ++ chan->ref_cnt = NV50_USER_REF_CNT(channel); ++ } ++ ++ /* Allocate space for per-channel fixed notifier memory */ ++ ret = nouveau_notifier_init_channel(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Setup channel's default objects */ ++ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Create a dma object for the push buffer */ ++ ret = nouveau_fifo_pushbuf_ctxdma_init(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ nouveau_wait_for_idle(dev); ++ ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ /* Create a graphics context for new channel */ ++ ret = engine->graph.create_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Construct inital RAMFC for new channel */ ++ ret = engine->fifo.create_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* setup channel's default get/put values ++ * XXX: quite possibly extremely pointless.. ++ */ ++ NV_WRITE(chan->get, chan->pushbuf_base); ++ NV_WRITE(chan->put, chan->pushbuf_base); ++ ++ /* If this is the first channel, setup PFIFO ourselves. For any ++ * other case, the GPU will handle this when it switches contexts. ++ */ ++ if (dev_priv->fifo_alloc_count == 1) { ++ ret = engine->fifo.load_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ ret = engine->graph.load_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ } ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 1); ++ ++ DRM_INFO("%s: initialised FIFO %d\n", __func__, channel); ++ *chan_ret = chan; ++ return 0; ++} ++ ++int ++nouveau_channel_idle(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t caches; ++ int idle; ++ ++ caches = NV_READ(NV03_PFIFO_CACHES); ++ NV_WRITE(NV03_PFIFO_CACHES, caches & ~1); ++ ++ if (engine->fifo.channel_id(dev) != chan->id) { ++ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; ++ ++ if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1)) ++ idle = 0; ++ else ++ idle = 1; ++ } else { ++ idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) == ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHES, caches); ++ return idle; ++} ++ ++/* stops a fifo */ ++void nouveau_fifo_free(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint64_t t_start; ++ ++ DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id); ++ ++ /* Give the channel a chance to idle, wait 2s (hopefully) */ ++ t_start = engine->timer.read(dev); ++ while (!nouveau_channel_idle(chan)) { ++ if (engine->timer.read(dev) - t_start > 2000000000ULL) { ++ DRM_ERROR("Failed to idle channel %d before destroy." ++ "Prepare for strangeness..\n", chan->id); ++ break; ++ } ++ } ++ ++ /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched ++ * from CACHE1 too? ++ */ ++ ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ // FIXME XXX needs more code ++ ++ engine->fifo.destroy_context(chan); ++ ++ /* Cleanup PGRAPH state */ ++ engine->graph.destroy_context(chan); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ ++ /* Deallocate push buffer */ ++ nouveau_gpuobj_ref_del(dev, &chan->pushbuf); ++ if (chan->pushbuf_mem) { ++ nouveau_mem_free(dev, chan->pushbuf_mem); ++ chan->pushbuf_mem = NULL; ++ } ++ ++ /* Destroy objects belonging to the channel */ ++ nouveau_gpuobj_channel_takedown(chan); ++ ++ nouveau_notifier_takedown_channel(chan); ++ ++ dev_priv->fifos[chan->id] = NULL; ++ dev_priv->fifo_alloc_count--; ++ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); ++} ++ ++/* cleanups all the fifos from file_priv */ ++void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ DRM_DEBUG("clearing FIFO enables from file_priv\n"); ++ for(i = 0; i < engine->fifo.channels; i++) { ++ struct nouveau_channel *chan = dev_priv->fifos[i]; ++ ++ if (chan && chan->file_priv == file_priv) ++ nouveau_fifo_free(chan); ++ } ++} ++ ++int ++nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv, ++ int channel) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ if (channel >= engine->fifo.channels) ++ return 0; ++ if (dev_priv->fifos[channel] == NULL) ++ return 0; ++ return (dev_priv->fifos[channel]->file_priv == file_priv); ++} ++ ++/*********************************** ++ * ioctls wrapping the functions ++ ***********************************/ ++ ++static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_channel_alloc *init = data; ++ struct drm_map_list *entry; ++ struct nouveau_channel *chan; ++ struct mem_block *pushbuf; ++ int res; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) ++ return -EINVAL; ++ ++ pushbuf = nouveau_fifo_user_pushbuf_alloc(dev); ++ if (!pushbuf) ++ return -ENOMEM; ++ ++ res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf, ++ init->fb_ctxdma_handle, ++ init->tt_ctxdma_handle); ++ if (res) ++ return res; ++ init->channel = chan->id; ++ init->put_base = chan->pushbuf_base; ++ ++ /* make the fifo available to user space */ ++ /* first, the fifo control regs */ ++ init->ctrl = dev_priv->mmio->offset + chan->user; ++ init->ctrl_size = chan->user_size; ++ res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS, ++ 0, &chan->regs); ++ if (res != 0) ++ return res; ++ ++ entry = drm_find_matching_map(dev, chan->regs); ++ if (!entry) ++ return -EINVAL; ++ init->ctrl = entry->user_token; ++ ++ /* pass back FIFO map info to the caller */ ++ init->cmdbuf = chan->pushbuf_mem->map_handle; ++ init->cmdbuf_size = chan->pushbuf_mem->size; ++ ++ /* and the notifier block */ ++ init->notifier = chan->notifier_block->map_handle; ++ init->notifier_size = chan->notifier_block->size; ++ ++ return 0; ++} ++ ++static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_channel_free *cfree = data; ++ struct nouveau_channel *chan; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); ++ ++ nouveau_fifo_free(chan); ++ return 0; ++} ++ ++/*********************************** ++ * finally, the ioctl table ++ ***********************************/ ++ ++struct drm_ioctl_desc nouveau_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH), ++}; ++ ++int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_ioc32.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_ioc32.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_ioc32.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,72 @@ ++/** ++ * \file mga_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the MGA DRM. ++ * ++ * \author Dave Airlie with code from patches by Egbert Eich ++ * ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Egbert Eich 2003,2004 ++ * Copyright (C) Dave Airlie 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++ ++#include "nouveau_drm.h" ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++#if 0 ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) ++ fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; ++#endif ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_irq.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_irq.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,568 @@ ++/* ++ * Copyright (C) 2006 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_reg.h" ++#include "nouveau_swmthd.h" ++ ++void ++nouveau_irq_preinstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master disable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++} ++ ++int ++nouveau_irq_postinstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master enable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); ++ ++ return 0; ++} ++ ++void ++nouveau_irq_uninstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master disable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++} ++ ++static void ++nouveau_fifo_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t status, reassign; ++ ++ reassign = NV_READ(NV03_PFIFO_CACHES) & 1; ++ while ((status = NV_READ(NV03_PFIFO_INTR_0))) { ++ uint32_t chid, get; ++ ++ NV_WRITE(NV03_PFIFO_CACHES, 0); ++ ++ chid = engine->fifo.channel_id(dev); ++ get = NV_READ(NV03_PFIFO_CACHE1_GET); ++ ++ if (status & NV_PFIFO_INTR_CACHE_ERROR) { ++ uint32_t mthd, data; ++ int ptr; ++ ++ ptr = get >> 2; ++ if (dev_priv->card_type < NV_40) { ++ mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr)); ++ data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr)); ++ } else { ++ mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr)); ++ data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr)); ++ } ++ ++ DRM_INFO("PFIFO_CACHE_ERROR - " ++ "Ch %d/%d Mthd 0x%04x Data 0x%08x\n", ++ chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1); ++ ++ status &= ~NV_PFIFO_INTR_CACHE_ERROR; ++ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); ++ } ++ ++ if (status & NV_PFIFO_INTR_DMA_PUSHER) { ++ DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid); ++ ++ status &= ~NV_PFIFO_INTR_DMA_PUSHER; ++ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); ++ if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get) ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PFIFO_INTR_0, status); ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHES, reassign); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); ++} ++ ++struct nouveau_bitfield_names { ++ uint32_t mask; ++ const char * name; ++}; ++ ++static struct nouveau_bitfield_names nouveau_nstatus_names[] = ++{ ++ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } ++}; ++ ++static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] = ++{ ++ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } ++}; ++ ++static struct nouveau_bitfield_names nouveau_nsource_names[] = ++{ ++ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, ++ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, ++ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, ++ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, ++ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, ++ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, ++ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, ++}; ++ ++static void ++nouveau_print_bitfield_names(uint32_t value, ++ const struct nouveau_bitfield_names *namelist, ++ const int namelist_len) ++{ ++ int i; ++ for(i=0; idev_private; ++ uint32_t inst; ++ int i; ++ ++ if (dev_priv->card_type < NV_40) ++ return dev_priv->Engine.fifo.channels; ++ else ++ if (dev_priv->card_type < NV_50) ++ inst = (NV_READ(0x40032c) & 0xfffff) << 4; ++ else ++ inst = NV_READ(0x40032c) & 0xfffff; ++ ++ for (i = 0; i < dev_priv->Engine.fifo.channels; i++) { ++ struct nouveau_channel *chan = dev_priv->fifos[i]; ++ ++ if (!chan || !chan->ramin_grctx) ++ continue; ++ ++ if (dev_priv->card_type < NV_50) { ++ if (inst == chan->ramin_grctx->instance) ++ break; ++ } else { ++ if (inst == INSTANCE_RD(chan->ramin_grctx->gpuobj, 0)) ++ break; ++ } ++ } ++ ++ return i; ++} ++ ++static int ++nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int channel; ++ ++ if (dev_priv->card_type < NV_10) ++ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; ++ else ++ if (dev_priv->card_type < NV_40) ++ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; ++ else ++ channel = nouveau_graph_chid_from_grctx(dev); ++ ++ if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { ++ DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel); ++ return -EINVAL; ++ } ++ ++ *channel_ret = channel; ++ return 0; ++} ++ ++struct nouveau_pgraph_trap { ++ int channel; ++ int class; ++ int subc, mthd, size; ++ uint32_t data, data2; ++ uint32_t nsource, nstatus; ++}; ++ ++static void ++nouveau_graph_trap_info(struct drm_device *dev, ++ struct nouveau_pgraph_trap *trap) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t address; ++ ++ trap->nsource = trap->nstatus = 0; ++ if (dev_priv->card_type < NV_50) { ++ trap->nsource = NV_READ(NV03_PGRAPH_NSOURCE); ++ trap->nstatus = NV_READ(NV03_PGRAPH_NSTATUS); ++ } ++ ++ if (nouveau_graph_trapped_channel(dev, &trap->channel)) ++ trap->channel = -1; ++ address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR); ++ ++ trap->mthd = address & 0x1FFC; ++ trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA); ++ if (dev_priv->card_type < NV_10) { ++ trap->subc = (address >> 13) & 0x7; ++ } else { ++ trap->subc = (address >> 16) & 0x7; ++ trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH); ++ } ++ ++ if (dev_priv->card_type < NV_10) { ++ trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF; ++ } else if (dev_priv->card_type < NV_40) { ++ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF; ++ } else if (dev_priv->card_type < NV_50) { ++ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF; ++ } else { ++ trap->class = NV_READ(0x400814); ++ } ++} ++ ++static void ++nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, ++ struct nouveau_pgraph_trap *trap) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t nsource = trap->nsource, nstatus = trap->nstatus; ++ ++ DRM_INFO("%s - nSource:", id); ++ nouveau_print_bitfield_names(nsource, nouveau_nsource_names, ++ ARRAY_SIZE(nouveau_nsource_names)); ++ printk(", nStatus:"); ++ if (dev_priv->card_type < NV_10) ++ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names, ++ ARRAY_SIZE(nouveau_nstatus_names)); ++ else ++ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10, ++ ARRAY_SIZE(nouveau_nstatus_names_nv10)); ++ printk("\n"); ++ ++ DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n", ++ id, trap->channel, trap->subc, trap->class, trap->mthd, ++ trap->data2, trap->data); ++} ++ ++static inline void ++nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) ++{ ++ struct nouveau_pgraph_trap trap; ++ int unhandled = 0; ++ ++ nouveau_graph_trap_info(dev, &trap); ++ ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ /* NV4 (nvidia TNT 1) reports software methods with ++ * PGRAPH NOTIFY ILLEGAL_MTHD ++ */ ++ DRM_DEBUG("Got NV04 software method method %x for class %#x\n", ++ trap.mthd, trap.class); ++ ++ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) { ++ DRM_ERROR("Unable to execute NV04 software method %x " ++ "for object class %x. Please report.\n", ++ trap.mthd, trap.class); ++ unhandled = 1; ++ } ++ } else { ++ unhandled = 1; ++ } ++ ++ if (unhandled) ++ nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); ++} ++ ++static inline void ++nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) ++{ ++ struct nouveau_pgraph_trap trap; ++ int unhandled = 0; ++ ++ nouveau_graph_trap_info(dev, &trap); ++ trap.nsource = nsource; ++ ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ if (trap.channel >= 0 && trap.mthd == 0x0150) { ++ nouveau_fence_handler(dev, trap.channel); ++ } else ++ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) { ++ unhandled = 1; ++ } ++ } else { ++ unhandled = 1; ++ } ++ ++ if (unhandled) ++ nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); ++} ++ ++static inline void ++nouveau_pgraph_intr_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t chid; ++ ++ chid = engine->fifo.channel_id(dev); ++ DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid); ++ ++ switch(dev_priv->card_type) { ++ case NV_04: ++ case NV_05: ++ nouveau_nv04_context_switch(dev); ++ break; ++ case NV_10: ++ case NV_11: ++ case NV_17: ++ nouveau_nv10_context_switch(dev); ++ break; ++ default: ++ DRM_ERROR("Context switch not implemented\n"); ++ break; ++ } ++} ++ ++static void ++nouveau_pgraph_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ while ((status = NV_READ(NV03_PGRAPH_INTR))) { ++ uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE); ++ ++ if (status & NV_PGRAPH_INTR_NOTIFY) { ++ nouveau_pgraph_intr_notify(dev, nsource); ++ ++ status &= ~NV_PGRAPH_INTR_NOTIFY; ++ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); ++ } ++ ++ if (status & NV_PGRAPH_INTR_ERROR) { ++ nouveau_pgraph_intr_error(dev, nsource); ++ ++ status &= ~NV_PGRAPH_INTR_ERROR; ++ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); ++ } ++ ++ if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { ++ nouveau_pgraph_intr_context_switch(dev); ++ ++ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; ++ NV_WRITE(NV03_PGRAPH_INTR, ++ NV_PGRAPH_INTR_CONTEXT_SWITCH); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PGRAPH_INTR, status); ++ } ++ ++ if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0) ++ NV_WRITE(NV04_PGRAPH_FIFO, 1); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); ++} ++ ++static void ++nv50_pgraph_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ status = NV_READ(NV03_PGRAPH_INTR); ++ ++ if (status & 0x00000020) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); ++ ++ status &= ~0x00000020; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00000020); ++ } ++ ++ if (status & 0x00100000) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_DATA_ERROR); ++ ++ status &= ~0x00100000; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00100000); ++ } ++ ++ if (status & 0x00200000) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); ++ ++ status &= ~0x00200000; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00200000); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PGRAPH_INTR, status); ++ } ++ ++ { ++ const int isb = (1 << 16) | (1 << 0); ++ ++ if ((NV_READ(0x400500) & isb) != isb) ++ NV_WRITE(0x400500, NV_READ(0x400500) | isb); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); ++} ++ ++static void ++nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (crtc&1) { ++ NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); ++ } ++ ++ if (crtc&2) { ++ NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); ++ } ++} ++ ++static void ++nouveau_nv50_display_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t val = NV_READ(NV50_DISPLAY_SUPERVISOR); ++ ++ DRM_INFO("NV50_DISPLAY_INTR - 0x%08X\n", val); ++ ++ NV_WRITE(NV50_DISPLAY_SUPERVISOR, val); ++} ++ ++static void ++nouveau_nv50_i2c_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_INFO("NV50_I2C_INTR - 0x%08X\n", NV_READ(NV50_I2C_CONTROLLER)); ++ ++ /* This seems to be the way to acknowledge an interrupt. */ ++ NV_WRITE(NV50_I2C_CONTROLLER, 0x7FFF7FFF); ++} ++ ++irqreturn_t ++nouveau_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device*)arg; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ status = NV_READ(NV03_PMC_INTR_0); ++ if (!status) ++ return IRQ_NONE; ++ ++ if (status & NV_PMC_INTR_0_PFIFO_PENDING) { ++ nouveau_fifo_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_PFIFO_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { ++ if (dev_priv->card_type >= NV_50) ++ nv50_pgraph_irq_handler(dev); ++ else ++ nouveau_pgraph_irq_handler(dev); ++ ++ status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_CRTCn_PENDING) { ++ nouveau_crtc_irq_handler(dev, (status>>24)&3); ++ status &= ~NV_PMC_INTR_0_CRTCn_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) { ++ nouveau_nv50_display_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_NV50_I2C_PENDING) { ++ nouveau_nv50_i2c_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_NV50_I2C_PENDING; ++ } ++ ++ if (status) ++ DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status); ++ ++ return IRQ_HANDLED; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_mem.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_mem.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_mem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_mem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,872 @@ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * Copyright 2005 Stephane Marchesin ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ */ ++ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "nouveau_drv.h" ++ ++static struct mem_block * ++split_block(struct mem_block *p, uint64_t start, uint64_t size, ++ struct drm_file *file_priv) ++{ ++ /* Maybe cut off the start of an existing block */ ++ if (start > p->start) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start; ++ newblock->size = p->size - (start - p->start); ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size -= newblock->size; ++ p = newblock; ++ } ++ ++ /* Maybe cut off the end of an existing block */ ++ if (size < p->size) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start + size; ++ newblock->size = p->size - size; ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size = size; ++ } ++ ++out: ++ /* Our block is in the middle */ ++ p->file_priv = file_priv; ++ return p; ++} ++ ++struct mem_block * ++nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, ++ int align2, struct drm_file *file_priv, int tail) ++{ ++ struct mem_block *p; ++ uint64_t mask = (1 << align2) - 1; ++ ++ if (!heap) ++ return NULL; ++ ++ if (tail) { ++ list_for_each_prev(p, heap) { ++ uint64_t start = ((p->start + p->size) - size) & ~mask; ++ ++ if (p->file_priv == 0 && start >= p->start && ++ start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ } else { ++ list_for_each(p, heap) { ++ uint64_t start = (p->start + mask) & ~mask; ++ ++ if (p->file_priv == 0 && ++ start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ } ++ ++ return NULL; ++} ++ ++static struct mem_block *find_block(struct mem_block *heap, uint64_t start) ++{ ++ struct mem_block *p; ++ ++ list_for_each(p, heap) ++ if (p->start == start) ++ return p; ++ ++ return NULL; ++} ++ ++void nouveau_mem_free_block(struct mem_block *p) ++{ ++ p->file_priv = NULL; ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ if (p->next->file_priv == 0) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFS); ++ } ++ ++ if (p->prev->file_priv == 0) { ++ struct mem_block *q = p->prev; ++ q->size += p->size; ++ q->next = p->next; ++ q->next->prev = q; ++ drm_free(p, sizeof(*q), DRM_MEM_BUFS); ++ } ++} ++ ++/* Initialize. How to check for an uninitialized heap? ++ */ ++int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, ++ uint64_t size) ++{ ++ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); ++ ++ if (!blocks) ++ return -ENOMEM; ++ ++ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); ++ if (!*heap) { ++ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ ++ blocks->start = start; ++ blocks->size = size; ++ blocks->file_priv = NULL; ++ blocks->next = blocks->prev = *heap; ++ ++ memset(*heap, 0, sizeof(**heap)); ++ (*heap)->file_priv = (struct drm_file *) - 1; ++ (*heap)->next = (*heap)->prev = blocks; ++ return 0; ++} ++ ++/* ++ * Free all blocks associated with the releasing file_priv ++ */ ++void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) ++{ ++ struct mem_block *p; ++ ++ if (!heap || !heap->next) ++ return; ++ ++ list_for_each(p, heap) { ++ if (p->file_priv == file_priv) ++ p->file_priv = NULL; ++ } ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ list_for_each(p, heap) { ++ while ((p->file_priv == 0) && (p->next->file_priv == 0) && ++ (p->next!=heap)) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ } ++} ++ ++/* ++ * Cleanup everything ++ */ ++void nouveau_mem_takedown(struct mem_block **heap) ++{ ++ struct mem_block *p; ++ ++ if (!*heap) ++ return; ++ ++ for (p = (*heap)->next; p != *heap;) { ++ struct mem_block *q = p; ++ p = p->next; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ ++ drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); ++ *heap = NULL; ++} ++ ++void nouveau_mem_close(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_mem_takedown(&dev_priv->agp_heap); ++ nouveau_mem_takedown(&dev_priv->fb_heap); ++ if (dev_priv->pci_heap) ++ nouveau_mem_takedown(&dev_priv->pci_heap); ++} ++ ++/*XXX won't work on BSD because of pci_read_config_dword */ ++static uint32_t ++nouveau_mem_fb_amount_igp(struct drm_device *dev) ++{ ++#if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct pci_dev *bridge; ++ uint32_t mem; ++ ++ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1)); ++ if (!bridge) { ++ DRM_ERROR("no bridge device\n"); ++ return 0; ++ } ++ ++ if (dev_priv->flags&NV_NFORCE) { ++ pci_read_config_dword(bridge, 0x7C, &mem); ++ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; ++ } else ++ if(dev_priv->flags&NV_NFORCE2) { ++ pci_read_config_dword(bridge, 0x84, &mem); ++ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; ++ } ++ ++ DRM_ERROR("impossible!\n"); ++#else ++ DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n"); ++#endif ++ ++ return 0; ++} ++ ++/* returns the amount of FB ram in bytes */ ++uint64_t nouveau_mem_fb_amount(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ switch(dev_priv->card_type) ++ { ++ case NV_04: ++ case NV_05: ++ if (NV_READ(NV03_BOOT_0) & 0x00000100) { ++ return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; ++ } else ++ switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) ++ { ++ case NV04_BOOT_0_RAM_AMOUNT_32MB: ++ return 32*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_16MB: ++ return 16*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_8MB: ++ return 8*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_4MB: ++ return 4*1024*1024; ++ } ++ break; ++ case NV_10: ++ case NV_11: ++ case NV_17: ++ case NV_20: ++ case NV_30: ++ case NV_40: ++ case NV_44: ++ case NV_50: ++ default: ++ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { ++ return nouveau_mem_fb_amount_igp(dev); ++ } else { ++ uint64_t mem; ++ ++ mem = (NV_READ(NV04_FIFO_DATA) & ++ NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> ++ NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; ++ return mem*1024*1024; ++ } ++ break; ++ } ++ ++ DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); ++ return 0; ++} ++ ++static void nouveau_mem_reset_agp(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; ++ ++ saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1); ++ saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19); ++ ++ /* clear busmaster bit */ ++ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); ++ /* clear SBA and AGP bits */ ++ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); ++ ++ /* power cycle pgraph, if enabled */ ++ pmc_enable = NV_READ(NV03_PMC_ENABLE); ++ if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { ++ NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ } ++ ++ /* and restore (gives effect of resetting AGP) */ ++ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19); ++ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1); ++} ++ ++static int ++nouveau_mem_init_agp(struct drm_device *dev, int ttm) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_agp_info info; ++ struct drm_agp_mode mode; ++ int ret; ++ ++ nouveau_mem_reset_agp(dev); ++ ++ ret = drm_agp_acquire(dev); ++ if (ret) { ++ DRM_ERROR("Unable to acquire AGP: %d\n", ret); ++ return ret; ++ } ++ ++ ret = drm_agp_info(dev, &info); ++ if (ret) { ++ DRM_ERROR("Unable to get AGP info: %d\n", ret); ++ return ret; ++ } ++ ++ /* see agp.h for the AGPSTAT_* modes available */ ++ mode.mode = info.mode; ++ ret = drm_agp_enable(dev, mode); ++ if (ret) { ++ DRM_ERROR("Unable to enable AGP: %d\n", ret); ++ return ret; ++ } ++ ++ if (!ttm) { ++ struct drm_agp_buffer agp_req; ++ struct drm_agp_binding bind_req; ++ ++ agp_req.size = info.aperture_size; ++ agp_req.type = 0; ++ ret = drm_agp_alloc(dev, &agp_req); ++ if (ret) { ++ DRM_ERROR("Unable to alloc AGP: %d\n", ret); ++ return ret; ++ } ++ ++ bind_req.handle = agp_req.handle; ++ bind_req.offset = 0; ++ ret = drm_agp_bind(dev, &bind_req); ++ if (ret) { ++ DRM_ERROR("Unable to bind AGP: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ dev_priv->gart_info.type = NOUVEAU_GART_AGP; ++ dev_priv->gart_info.aper_base = info.aperture_base; ++ dev_priv->gart_info.aper_size = info.aperture_size; ++ return 0; ++} ++ ++#define HACK_OLD_MM ++int ++nouveau_mem_init_ttm(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t vram_size, bar1_size; ++ int ret; ++ ++ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; ++ dev_priv->fb_phys = drm_get_resource_start(dev,1); ++ dev_priv->gart_info.type = NOUVEAU_GART_NONE; ++ ++ drm_bo_driver_init(dev); ++ ++ /* non-mappable vram */ ++ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); ++ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; ++ vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; ++ bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; ++ if (bar1_size < vram_size) { ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, ++ bar1_size, vram_size - bar1_size, 1))) { ++ DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); ++ return ret; ++ } ++ vram_size = bar1_size; ++ } ++ ++ /* mappable vram */ ++#ifdef HACK_OLD_MM ++ vram_size /= 4; ++#endif ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) { ++ DRM_ERROR("Failed VRAM mm init: %d\n", ret); ++ return ret; ++ } ++ ++ /* GART */ ++#if !defined(__powerpc__) && !defined(__ia64__) ++ if (drm_device_is_agp(dev) && dev->agp) { ++ if ((ret = nouveau_mem_init_agp(dev, 1))) ++ DRM_ERROR("Error initialising AGP: %d\n", ret); ++ } ++#endif ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { ++ if ((ret = nouveau_sgdma_init(dev))) ++ DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); ++ } ++ ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, ++ dev_priv->gart_info.aper_size >> ++ PAGE_SHIFT, 1))) { ++ DRM_ERROR("Failed TT mm init: %d\n", ret); ++ return ret; ++ } ++ ++#ifdef HACK_OLD_MM ++ vram_size <<= PAGE_SHIFT; ++ DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) ++ return -ENOMEM; ++#endif ++ ++ return 0; ++} ++ ++int nouveau_mem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_size; ++ int ret = 0; ++ ++ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; ++ dev_priv->fb_phys = 0; ++ dev_priv->gart_info.type = NOUVEAU_GART_NONE; ++ ++ /* setup a mtrr over the FB */ ++ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), ++ nouveau_mem_fb_amount(dev), ++ DRM_MTRR_WC); ++ ++ /* Init FB */ ++ dev_priv->fb_phys=drm_get_resource_start(dev,1); ++ fb_size = nouveau_mem_fb_amount(dev); ++ /* On G80, limit VRAM to 512MiB temporarily due to limits in how ++ * we handle VRAM page tables. ++ */ ++ if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024)) ++ fb_size = (512 * 1024 * 1024); ++ /* On at least NV40, RAMIN is actually at the end of vram. ++ * We don't want to allocate this... */ ++ if (dev_priv->card_type >= NV_40) ++ fb_size -= dev_priv->ramin_rsvd_vram; ++ dev_priv->fb_available_size = fb_size; ++ DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); ++ ++ if (fb_size>256*1024*1024) { ++ /* On cards with > 256Mb, you can't map everything. ++ * So we create a second FB heap for that type of memory */ ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, ++ 0, 256*1024*1024)) ++ return -ENOMEM; ++ if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, ++ 256*1024*1024, fb_size-256*1024*1024)) ++ return -ENOMEM; ++ } else { ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) ++ return -ENOMEM; ++ dev_priv->fb_nomap_heap=NULL; ++ } ++ ++#if !defined(__powerpc__) && !defined(__ia64__) ++ /* Init AGP / NV50 PCIEGART */ ++ if (drm_device_is_agp(dev) && dev->agp) { ++ if ((ret = nouveau_mem_init_agp(dev, 0))) ++ DRM_ERROR("Error initialising AGP: %d\n", ret); ++ } ++#endif ++ ++ /*Note: this is *not* just NV50 code, but only used on NV50 for now */ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && ++ dev_priv->card_type >= NV_50) { ++ ret = nouveau_sgdma_init(dev); ++ if (!ret) { ++ ret = nouveau_sgdma_nottm_hack_init(dev); ++ if (ret) ++ nouveau_sgdma_takedown(dev); ++ } ++ ++ if (ret) ++ DRM_ERROR("Error initialising SG DMA: %d\n", ret); ++ } ++ ++ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ++ if (nouveau_mem_init_heap(&dev_priv->agp_heap, ++ 0, dev_priv->gart_info.aper_size)) { ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { ++ nouveau_sgdma_nottm_hack_takedown(dev); ++ nouveau_sgdma_takedown(dev); ++ } ++ } ++ } ++ ++ /* NV04-NV40 PCIEGART */ ++ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { ++ struct drm_scatter_gather sgreq; ++ ++ DRM_DEBUG("Allocating sg memory for PCI DMA\n"); ++ sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone ++ ++ if (drm_sg_alloc(dev, &sgreq)) { ++ DRM_ERROR("Unable to allocate %ldMB of scatter-gather" ++ " pages for PCI DMA!",sgreq.size>>20); ++ } else { ++ if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, ++ dev->sg->pages * PAGE_SIZE)) { ++ DRM_ERROR("Unable to initialize pci_heap!"); ++ } ++ } ++ } ++ ++ /* G8x: Allocate shared page table to map real VRAM pages into */ ++ if (dev_priv->card_type >= NV_50) { ++ unsigned size = ((512 * 1024 * 1024) / 65536) * 8; ++ ++ ret = nouveau_gpuobj_new(dev, NULL, size, 0, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ALLOW_NO_REFS, ++ &dev_priv->vm_vram_pt); ++ if (ret) { ++ DRM_ERROR("Error creating VRAM page table: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ ++ return 0; ++} ++ ++struct mem_block * ++nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, ++ int flags, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct mem_block *block; ++ int type, tail = !(flags & NOUVEAU_MEM_USER); ++ ++ /* ++ * Make things easier on ourselves: all allocations are page-aligned. ++ * We need that to map allocated regions into the user space ++ */ ++ if (alignment < PAGE_SHIFT) ++ alignment = PAGE_SHIFT; ++ ++ /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB ++ * page size in the GPU VM. ++ */ ++ if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) { ++ size = (size + 65535) & ~65535; ++ if (alignment < 16) ++ alignment = 16; ++ } ++ ++ /* ++ * Warn about 0 sized allocations, but let it go through. It'll return 1 page ++ */ ++ if (size == 0) ++ DRM_INFO("warning : 0 byte allocation\n"); ++ ++ /* ++ * Keep alloc size a multiple of the page size to keep drm_addmap() happy ++ */ ++ if (size & (~PAGE_MASK)) ++ size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; ++ ++ ++#define NOUVEAU_MEM_ALLOC_AGP {\ ++ type=NOUVEAU_MEM_AGP;\ ++ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ ++ alignment, file_priv, tail); \ ++ if (block) goto alloc_ok;\ ++ } ++ ++#define NOUVEAU_MEM_ALLOC_PCI {\ ++ type = NOUVEAU_MEM_PCI;\ ++ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ ++ alignment, file_priv, tail); \ ++ if ( block ) goto alloc_ok;\ ++ } ++ ++#define NOUVEAU_MEM_ALLOC_FB {\ ++ type=NOUVEAU_MEM_FB;\ ++ if (!(flags&NOUVEAU_MEM_MAPPED)) {\ ++ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ ++ size, alignment, \ ++ file_priv, tail); \ ++ if (block) goto alloc_ok;\ ++ }\ ++ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ ++ alignment, file_priv, tail);\ ++ if (block) goto alloc_ok;\ ++ } ++ ++ ++ if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB ++ if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP ++ if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI ++ if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB ++ if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP ++ if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI ++ ++ ++ return NULL; ++ ++alloc_ok: ++ block->flags=type; ++ ++ /* On G8x, map memory into VM */ ++ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && ++ !(flags & NOUVEAU_MEM_NOVM)) { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start; ++ unsigned count = block->size / 65536; ++ unsigned tile = 0; ++ ++ if (!pt) { ++ DRM_ERROR("vm alloc without vm pt\n"); ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ ++ /* The tiling stuff is *not* what NVIDIA does - but both the ++ * 2D and 3D engines seem happy with this simpler method. ++ * Should look into why NVIDIA do what they do at some point. ++ */ ++ if (flags & NOUVEAU_MEM_TILE) { ++ if (flags & NOUVEAU_MEM_TILE_ZETA) ++ tile = 0x00002800; ++ else ++ tile = 0x00007000; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ ++ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); ++ offset += 65536; ++ } ++ } else { ++ block->flags |= NOUVEAU_MEM_NOVM; ++ } ++ ++ if (flags&NOUVEAU_MEM_MAPPED) ++ { ++ struct drm_map_list *entry; ++ int ret = 0; ++ block->flags|=NOUVEAU_MEM_MAPPED; ++ ++ if (type == NOUVEAU_MEM_AGP) { ++ if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_AGP, 0, &block->map); ++ else ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_SCATTER_GATHER, 0, &block->map); ++ } ++ else if (type == NOUVEAU_MEM_FB) ++ ret = drm_addmap(dev, block->start + dev_priv->fb_phys, ++ block->size, _DRM_FRAME_BUFFER, ++ 0, &block->map); ++ else if (type == NOUVEAU_MEM_PCI) ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_SCATTER_GATHER, 0, &block->map); ++ ++ if (ret) { ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ ++ entry = drm_find_matching_map(dev, block->map); ++ if (!entry) { ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ block->map_handle = entry->user_token; ++ } ++ ++ DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags); ++ return block; ++} ++ ++void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); ++ ++ if (block->flags&NOUVEAU_MEM_MAPPED) ++ drm_rmmap(dev, block->map); ++ ++ /* G8x: Remove pages from vm */ ++ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && ++ !(block->flags & NOUVEAU_MEM_NOVM)) { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start; ++ unsigned count = block->size / 65536; ++ ++ if (!pt) { ++ DRM_ERROR("vm free without vm pt\n"); ++ goto out_free; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ INSTANCE_WR(pt, (pte * 2) + 0, 0); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0); ++ offset += 65536; ++ } ++ } ++ ++out_free: ++ nouveau_mem_free_block(block); ++} ++ ++/* ++ * Ioctls ++ */ ++ ++int ++nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_alloc *alloc = data; ++ struct mem_block *block; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (alloc->flags & NOUVEAU_MEM_INTERNAL) ++ return -EINVAL; ++ ++ block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size, ++ alloc->flags | NOUVEAU_MEM_USER, file_priv); ++ if (!block) ++ return -ENOMEM; ++ alloc->map_handle=block->map_handle; ++ alloc->offset=block->start; ++ alloc->flags=block->flags; ++ ++ if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB) ++ alloc->offset += 512*1024*1024; ++ ++ return 0; ++} ++ ++int ++nouveau_ioctl_mem_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_free *memfree = data; ++ struct mem_block *block; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB) ++ memfree->offset -= 512*1024*1024; ++ ++ block=NULL; ++ if (memfree->flags & NOUVEAU_MEM_FB) ++ block = find_block(dev_priv->fb_heap, memfree->offset); ++ else if (memfree->flags & NOUVEAU_MEM_AGP) ++ block = find_block(dev_priv->agp_heap, memfree->offset); ++ else if (memfree->flags & NOUVEAU_MEM_PCI) ++ block = find_block(dev_priv->pci_heap, memfree->offset); ++ if (!block) ++ return -EFAULT; ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ nouveau_mem_free(dev, block); ++ return 0; ++} ++ ++int ++nouveau_ioctl_mem_tile(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_tile *memtile = data; ++ struct mem_block *block = NULL; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (dev_priv->card_type < NV_50) ++ return -EINVAL; ++ ++ if (memtile->flags & NOUVEAU_MEM_FB) { ++ memtile->offset -= 512*1024*1024; ++ block = find_block(dev_priv->fb_heap, memtile->offset); ++ } ++ ++ if (!block) ++ return -EINVAL; ++ ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start + memtile->delta; ++ unsigned count = memtile->size / 65536; ++ unsigned tile = 0; ++ ++ if (memtile->flags & NOUVEAU_MEM_TILE) { ++ if (memtile->flags & NOUVEAU_MEM_TILE_ZETA) ++ tile = 0x00002800; ++ else ++ tile = 0x00007000; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ ++ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); ++ offset += 65536; ++ } ++ } ++ ++ return 0; ++} ++ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_notifier.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_notifier.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_notifier.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_notifier.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,165 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++int ++nouveau_notifier_init_channel(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ int flags, ret; ++ ++ flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED | ++ NOUVEAU_MEM_FB_ACCEPTABLE); ++ ++ chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, ++ (struct drm_file *)-2); ++ if (!chan->notifier_block) ++ return -ENOMEM; ++ DRM_DEBUG("Allocated notifier block in 0x%08x\n", ++ chan->notifier_block->flags); ++ ++ ret = nouveau_mem_init_heap(&chan->notifier_heap, ++ 0, chan->notifier_block->size); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ++nouveau_notifier_takedown_channel(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ ++ if (chan->notifier_block) { ++ nouveau_mem_free(dev, chan->notifier_block); ++ chan->notifier_block = NULL; ++ } ++ ++ nouveau_mem_takedown(&chan->notifier_heap); ++} ++ ++static void ++nouveau_notifier_gpuobj_dtor(struct drm_device *dev, ++ struct nouveau_gpuobj *gpuobj) ++{ ++ DRM_DEBUG("\n"); ++ ++ if (gpuobj->priv) ++ nouveau_mem_free_block(gpuobj->priv); ++} ++ ++int ++nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, ++ int count, uint32_t *b_offset) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *nobj = NULL; ++ struct mem_block *mem; ++ uint32_t offset; ++ int target, ret; ++ ++ if (!chan->notifier_heap) { ++ DRM_ERROR("Channel %d doesn't have a notifier heap!\n", ++ chan->id); ++ return -EINVAL; ++ } ++ ++ mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0, ++ (struct drm_file *)-2, 0); ++ if (!mem) { ++ DRM_ERROR("Channel %d notifier block full\n", chan->id); ++ return -ENOMEM; ++ } ++ mem->flags = NOUVEAU_MEM_NOTIFIER; ++ ++ offset = chan->notifier_block->start; ++ if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { ++ target = NV_DMA_TARGET_VIDMEM; ++ } else ++ if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && ++ dev_priv->card_type < NV_50) { ++ ret = nouveau_sgdma_get_page(dev, offset, &offset); ++ if (ret) ++ return ret; ++ target = NV_DMA_TARGET_PCI; ++ } else { ++ target = NV_DMA_TARGET_AGP; ++ } ++ } else ++ if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) { ++ target = NV_DMA_TARGET_PCI_NONLINEAR; ++ } else { ++ DRM_ERROR("Bad DMA target, flags 0x%08x!\n", ++ chan->notifier_block->flags); ++ return -EINVAL; ++ } ++ offset += mem->start; ++ ++ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ offset, mem->size, ++ NV_DMA_ACCESS_RW, target, &nobj))) { ++ nouveau_mem_free_block(mem); ++ DRM_ERROR("Error creating notifier ctxdma: %d\n", ret); ++ return ret; ++ } ++ nobj->dtor = nouveau_notifier_gpuobj_dtor; ++ nobj->priv = mem; ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) { ++ nouveau_gpuobj_del(dev, &nobj); ++ nouveau_mem_free_block(mem); ++ DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ *b_offset = mem->start; ++ return 0; ++} ++ ++int ++nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_notifierobj_alloc *na = data; ++ struct nouveau_channel *chan; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); ++ ++ ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset); ++ if (ret) ++ return ret; ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_object.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_object.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_object.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_object.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1178 @@ ++/* ++ * Copyright (C) 2006 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++/* NVidia uses context objects to drive drawing operations. ++ ++ Context objects can be selected into 8 subchannels in the FIFO, ++ and then used via DMA command buffers. ++ ++ A context object is referenced by a user defined handle (CARD32). The HW ++ looks up graphics objects in a hash table in the instance RAM. ++ ++ An entry in the hash table consists of 2 CARD32. The first CARD32 contains ++ the handle, the second one a bitfield, that contains the address of the ++ object in instance RAM. ++ ++ The format of the second CARD32 seems to be: ++ ++ NV4 to NV30: ++ ++ 15: 0 instance_addr >> 4 ++ 17:16 engine (here uses 1 = graphics) ++ 28:24 channel id (here uses 0) ++ 31 valid (use 1) ++ ++ NV40: ++ ++ 15: 0 instance_addr >> 4 (maybe 19-0) ++ 21:20 engine (here uses 1 = graphics) ++ I'm unsure about the other bits, but using 0 seems to work. ++ ++ The key into the hash table depends on the object handle and channel id and ++ is given as: ++*/ ++static uint32_t ++nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ uint32_t hash = 0; ++ int i; ++ ++ DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle); ++ ++ for (i=32;i>0;i-=dev_priv->ramht_bits) { ++ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); ++ handle >>= dev_priv->ramht_bits; ++ } ++ if (dev_priv->card_type < NV_50) ++ hash ^= channel << (dev_priv->ramht_bits - 4); ++ hash <<= 3; ++ ++ DRM_DEBUG("hash=0x%08x\n", hash); ++ return hash; ++} ++ ++static int ++nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, ++ uint32_t offset) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4); ++ ++ if (dev_priv->card_type < NV_40) ++ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); ++ return (ctx != 0); ++} ++ ++static int ++nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; ++ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; ++ struct nouveau_gpuobj *gpuobj = ref->gpuobj; ++ uint32_t ctx, co, ho; ++ ++ if (!ramht) { ++ DRM_ERROR("No hash table!\n"); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->card_type < NV_40) { ++ ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | ++ (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | ++ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } else ++ if (dev_priv->card_type < NV_50) { ++ ctx = (ref->instance >> 4) | ++ (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | ++ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } else { ++ ctx = (ref->instance >> 4) | ++ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } ++ ++ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); ++ do { ++ if (!nouveau_ramht_entry_valid(dev, ramht, co)) { ++ DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", ++ ref->channel, co, ref->handle, ctx); ++ INSTANCE_WR(ramht, (co + 0)/4, ref->handle); ++ INSTANCE_WR(ramht, (co + 4)/4, ctx); ++ ++ list_add_tail(&ref->list, &chan->ramht_refs); ++ return 0; ++ } ++ DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", ++ ref->channel, co, INSTANCE_RD(ramht, co/4)); ++ ++ co += 8; ++ if (co >= dev_priv->ramht_size) { ++ DRM_INFO("no space left after collision\n"); ++ co = 0; ++ /* exit as it seems to cause crash with nouveau_demo and ++ * 0xdead0001 object */ ++ break; ++ } ++ } while (co != ho); ++ ++ DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); ++ return -ENOMEM; ++} ++ ++static void ++nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; ++ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; ++ uint32_t co, ho; ++ ++ if (!ramht) { ++ DRM_ERROR("No hash table!\n"); ++ return; ++ } ++ ++ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); ++ do { ++ if (nouveau_ramht_entry_valid(dev, ramht, co) && ++ (ref->handle == INSTANCE_RD(ramht, (co/4)))) { ++ DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", ++ ref->channel, co, ref->handle, ++ INSTANCE_RD(ramht, (co + 4))); ++ INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); ++ INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); ++ ++ list_del(&ref->list); ++ return; ++ } ++ ++ co += 8; ++ if (co >= dev_priv->ramht_size) ++ co = 0; ++ } while (co != ho); ++ ++ DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", ++ ref->channel, ref->handle); ++} ++ ++int ++nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ++ int size, int align, uint32_t flags, ++ struct nouveau_gpuobj **gpuobj_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_gpuobj *gpuobj; ++ struct mem_block *pramin = NULL; ++ int ret; ++ ++ DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", ++ chan ? chan->id : -1, size, align, flags); ++ ++ if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) ++ return -EINVAL; ++ ++ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ if (!gpuobj) ++ return -ENOMEM; ++ DRM_DEBUG("gpuobj %p\n", gpuobj); ++ gpuobj->flags = flags; ++ gpuobj->im_channel = chan ? chan->id : -1; ++ ++ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); ++ ++ /* Choose between global instmem heap, and per-channel private ++ * instmem heap. On ramin_heap) { ++ DRM_DEBUG("private heap\n"); ++ pramin = chan->ramin_heap; ++ } else ++ if (dev_priv->card_type < NV_50) { ++ DRM_DEBUG("global heap fallback\n"); ++ pramin = dev_priv->ramin_heap; ++ } ++ } else { ++ DRM_DEBUG("global heap\n"); ++ pramin = dev_priv->ramin_heap; ++ } ++ ++ if (!pramin) { ++ DRM_ERROR("No PRAMIN heap!\n"); ++ return -EINVAL; ++ } ++ ++ if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ /* Allocate a chunk of the PRAMIN aperture */ ++ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, ++ drm_order(align), ++ (struct drm_file *)-2, 0); ++ if (!gpuobj->im_pramin) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; ++ ++ if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { ++ int i; ++ ++ for (i = 0; i < gpuobj->im_pramin->size; i += 4) ++ INSTANCE_WR(gpuobj, i/4, 0); ++ } ++ ++ *gpuobj_ret = gpuobj; ++ return 0; ++} ++ ++int ++nouveau_gpuobj_early_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ INIT_LIST_HEAD(&dev_priv->gpuobj_list); ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ if (dev_priv->card_type < NV_50) { ++ if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ++ ~0, dev_priv->ramht_size, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ALLOW_NO_REFS, ++ &dev_priv->ramht, NULL))) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_gpuobj_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ nouveau_gpuobj_del(dev, &dev_priv->ramht); ++} ++ ++void ++nouveau_gpuobj_late_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ struct list_head *entry, *tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { ++ gpuobj = list_entry(entry, struct nouveau_gpuobj, list); ++ ++ DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", ++ gpuobj, gpuobj->refcount); ++ gpuobj->refcount = 0; ++ nouveau_gpuobj_del(dev, &gpuobj); ++ } ++} ++ ++int ++nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_gpuobj *gpuobj; ++ ++ DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); ++ ++ if (!dev_priv || !pgpuobj || !(*pgpuobj)) ++ return -EINVAL; ++ gpuobj = *pgpuobj; ++ ++ if (gpuobj->refcount != 0) { ++ DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount); ++ return -EINVAL; ++ } ++ ++ if (gpuobj->dtor) ++ gpuobj->dtor(dev, gpuobj); ++ ++ if (gpuobj->im_backing) { ++ if (gpuobj->flags & NVOBJ_FLAG_FAKE) ++ drm_free(gpuobj->im_backing, ++ sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER); ++ else ++ engine->instmem.clear(dev, gpuobj); ++ } ++ ++ if (gpuobj->im_pramin) { ++ if (gpuobj->flags & NVOBJ_FLAG_FAKE) ++ drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), ++ DRM_MEM_DRIVER); ++ else ++ nouveau_mem_free_block(gpuobj->im_pramin); ++ } ++ ++ list_del(&gpuobj->list); ++ ++ *pgpuobj = NULL; ++ drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ return 0; ++} ++ ++static int ++nouveau_gpuobj_instance_get(struct drm_device *dev, ++ struct nouveau_channel *chan, ++ struct nouveau_gpuobj *gpuobj, uint32_t *inst) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *cpramin; ++ ++ /* card_type < NV_50) { ++ *inst = gpuobj->im_pramin->start; ++ return 0; ++ } ++ ++ if (chan && gpuobj->im_channel != chan->id) { ++ DRM_ERROR("Channel mismatch: obj %d, ref %d\n", ++ gpuobj->im_channel, chan->id); ++ return -EINVAL; ++ } ++ ++ /* NV50 channel-local instance */ ++ if (chan > 0) { ++ cpramin = chan->ramin->gpuobj; ++ *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; ++ return 0; ++ } ++ ++ /* NV50 global (VRAM) instance */ ++ if (gpuobj->im_channel < 0) { ++ /* ...from global heap */ ++ if (!gpuobj->im_backing) { ++ DRM_ERROR("AII, no VRAM backing gpuobj\n"); ++ return -EINVAL; ++ } ++ *inst = gpuobj->im_backing->start; ++ return 0; ++ } else { ++ /* ...from local heap */ ++ cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj; ++ *inst = cpramin->im_backing->start + ++ (gpuobj->im_pramin->start - cpramin->im_pramin->start); ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++int ++nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, ++ uint32_t handle, struct nouveau_gpuobj *gpuobj, ++ struct nouveau_gpuobj_ref **ref_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj_ref *ref; ++ uint32_t instance; ++ int ret; ++ ++ DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", ++ chan ? chan->id : -1, handle, gpuobj); ++ ++ if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) ++ return -EINVAL; ++ ++ if (!chan && !ref_ret) ++ return -EINVAL; ++ ++ ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); ++ if (ret) ++ return ret; ++ ++ ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER); ++ if (!ref) ++ return -ENOMEM; ++ ref->gpuobj = gpuobj; ++ ref->channel = chan ? chan->id : -1; ++ ref->instance = instance; ++ ++ if (!ref_ret) { ++ ref->handle = handle; ++ ++ ret = nouveau_ramht_insert(dev, ref); ++ if (ret) { ++ drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); ++ return ret; ++ } ++ } else { ++ ref->handle = ~0; ++ *ref_ret = ref; ++ } ++ ++ ref->gpuobj->refcount++; ++ return 0; ++} ++ ++int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) ++{ ++ struct nouveau_gpuobj_ref *ref; ++ ++ DRM_DEBUG("ref %p\n", pref ? *pref : NULL); ++ ++ if (!dev || !pref || *pref == NULL) ++ return -EINVAL; ++ ref = *pref; ++ ++ if (ref->handle != ~0) ++ nouveau_ramht_remove(dev, ref); ++ ++ if (ref->gpuobj) { ++ ref->gpuobj->refcount--; ++ ++ if (ref->gpuobj->refcount == 0) { ++ if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) ++ nouveau_gpuobj_del(dev, &ref->gpuobj); ++ } ++ } ++ ++ *pref = NULL; ++ drm_free(ref, sizeof(ref), DRM_MEM_DRIVER); ++ return 0; ++} ++ ++int ++nouveau_gpuobj_new_ref(struct drm_device *dev, ++ struct nouveau_channel *oc, struct nouveau_channel *rc, ++ uint32_t handle, int size, int align, uint32_t flags, ++ struct nouveau_gpuobj_ref **ref) ++{ ++ struct nouveau_gpuobj *gpuobj = NULL; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj))) ++ return ret; ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, ++ struct nouveau_gpuobj_ref **ref_ret) ++{ ++ struct nouveau_gpuobj_ref *ref; ++ struct list_head *entry, *tmp; ++ ++ list_for_each_safe(entry, tmp, &chan->ramht_refs) { ++ ref = list_entry(entry, struct nouveau_gpuobj_ref, list); ++ ++ if (ref->handle == handle) { ++ if (ref_ret) ++ *ref_ret = ref; ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++int ++nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, ++ uint32_t b_offset, uint32_t size, ++ uint32_t flags, struct nouveau_gpuobj **pgpuobj, ++ struct nouveau_gpuobj_ref **pref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ int i; ++ ++ DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", ++ p_offset, b_offset, size, flags); ++ ++ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ if (!gpuobj) ++ return -ENOMEM; ++ DRM_DEBUG("gpuobj %p\n", gpuobj); ++ gpuobj->im_channel = -1; ++ gpuobj->flags = flags | NVOBJ_FLAG_FAKE; ++ ++ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); ++ ++ if (p_offset != ~0) { ++ gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), ++ DRM_MEM_DRIVER); ++ if (!gpuobj->im_pramin) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_pramin->start = p_offset; ++ gpuobj->im_pramin->size = size; ++ } ++ ++ if (b_offset != ~0) { ++ gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block), ++ DRM_MEM_DRIVER); ++ if (!gpuobj->im_backing) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_backing->start = b_offset; ++ gpuobj->im_backing->size = size; ++ } ++ ++ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { ++ for (i = 0; i < gpuobj->im_pramin->size; i += 4) ++ INSTANCE_WR(gpuobj, i/4, 0); ++ } ++ ++ if (pref) { ++ if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return i; ++ } ++ } ++ ++ if (pgpuobj) ++ *pgpuobj = gpuobj; ++ return 0; ++} ++ ++ ++static int ++nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /*XXX: dodgy hack for now */ ++ if (dev_priv->card_type >= NV_50) ++ return 24; ++ if (dev_priv->card_type >= NV_40) ++ return 32; ++ return 16; ++} ++ ++/* ++ DMA objects are used to reference a piece of memory in the ++ framebuffer, PCI or AGP address space. Each object is 16 bytes big ++ and looks as follows: ++ ++ entry[0] ++ 11:0 class (seems like I can always use 0 here) ++ 12 page table present? ++ 13 page entry linear? ++ 15:14 access: 0 rw, 1 ro, 2 wo ++ 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP ++ 31:20 dma adjust (bits 0-11 of the address) ++ entry[1] ++ dma limit (size of transfer) ++ entry[X] ++ 1 0 readonly, 1 readwrite ++ 31:12 dma frame address of the page (bits 12-31 of the address) ++ entry[N] ++ page table terminator, same value as the first pte, as does nvidia ++ rivatv uses 0xffffffff ++ ++ Non linear page tables need a list of frame addresses afterwards, ++ the rivatv project has some info on this. ++ ++ The method below creates a DMA object in instance RAM and returns a handle ++ to it that can be used to set up context objects. ++*/ ++int ++nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, ++ uint64_t offset, uint64_t size, int access, ++ int target, struct nouveau_gpuobj **gpuobj) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ uint32_t is_scatter_gather = 0; ++ ++ /* Total number of pages covered by the request. ++ */ ++ const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE; ++ ++ ++ DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", ++ chan->id, class, offset, size); ++ DRM_DEBUG("access=%d target=%d\n", access, target); ++ ++ switch (target) { ++ case NV_DMA_TARGET_AGP: ++ offset += dev_priv->gart_info.aper_base; ++ break; ++ case NV_DMA_TARGET_PCI_NONLINEAR: ++ /*assume the "offset" is a virtual memory address*/ ++ is_scatter_gather = 1; ++ /*put back the right value*/ ++ target = NV_DMA_TARGET_PCI; ++ break; ++ default: ++ break; ++ } ++ ++ ret = nouveau_gpuobj_new(dev, chan, ++ is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), ++ 16, ++ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, ++ gpuobj); ++ if (ret) { ++ DRM_ERROR("Error creating gpuobj: %d\n", ret); ++ return ret; ++ } ++ ++ if (dev_priv->card_type < NV_50) { ++ uint32_t frame, adjust, pte_flags = 0; ++ adjust = offset & 0x00000fff; ++ if (access != NV_DMA_ACCESS_RO) ++ pte_flags |= (1<<1); ++ ++ if ( ! is_scatter_gather ) ++ { ++ frame = offset & ~0x00000fff; ++ ++ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | ++ (adjust << 20) | ++ (access << 14) | ++ (target << 16) | ++ class)); ++ INSTANCE_WR(*gpuobj, 1, size - 1); ++ INSTANCE_WR(*gpuobj, 2, frame | pte_flags); ++ INSTANCE_WR(*gpuobj, 3, frame | pte_flags); ++ } ++ else ++ { ++ /* Intial page entry in the scatter-gather area that ++ * corresponds to the base offset ++ */ ++ unsigned int idx = offset / PAGE_SIZE; ++ ++ uint32_t instance_offset; ++ unsigned int i; ++ ++ if ((idx + page_count) > dev->sg->pages) { ++ DRM_ERROR("Requested page range exceedes " ++ "allocated scatter-gather range!"); ++ return -E2BIG; ++ } ++ ++ DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size); ++ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | ++ (adjust << 20) | ++ (access << 14) | ++ (target << 16) | ++ class)); ++ INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1); ++ ++ ++ /*write starting at the third dword*/ ++ instance_offset = 2; ++ ++ /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ ++ for (i = 0; i < page_count; i++) { ++ if (dev->sg->busaddr[idx] == 0) { ++ dev->sg->busaddr[idx] = ++ pci_map_page(dev->pdev, ++ dev->sg->pagelist[idx], ++ 0, ++ PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ /* Not a 100% sure this is the right kdev in all cases. */ ++ if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) { ++#else ++ if (dma_mapping_error(dev->sg->busaddr[idx])) { ++#endif ++ return -ENOMEM; ++ } ++ } ++ ++ frame = (uint32_t) dev->sg->busaddr[idx]; ++ INSTANCE_WR(*gpuobj, instance_offset, ++ frame | pte_flags); ++ ++ idx++; ++ instance_offset ++; ++ } ++ } ++ } else { ++ uint32_t flags0, flags5; ++ ++ if (target == NV_DMA_TARGET_VIDMEM) { ++ flags0 = 0x00190000; ++ flags5 = 0x00010000; ++ } else { ++ flags0 = 0x7fc00000; ++ flags5 = 0x00080000; ++ } ++ ++ INSTANCE_WR(*gpuobj, 0, flags0 | class); ++ INSTANCE_WR(*gpuobj, 1, offset + size - 1); ++ INSTANCE_WR(*gpuobj, 2, offset); ++ INSTANCE_WR(*gpuobj, 5, flags5); ++ } ++ ++ (*gpuobj)->engine = NVOBJ_ENGINE_SW; ++ (*gpuobj)->class = class; ++ return 0; ++} ++ ++int ++nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, ++ uint64_t offset, uint64_t size, int access, ++ struct nouveau_gpuobj **gpuobj, ++ uint32_t *o_ret) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || ++ (dev_priv->card_type >= NV_50 && ++ dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ offset, size, access, ++ NV_DMA_TARGET_AGP, gpuobj); ++ if (o_ret) ++ *o_ret = 0; ++ } else ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { ++ *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ if (offset & ~0xffffffffULL) { ++ DRM_ERROR("obj offset exceeds 32-bits\n"); ++ return -EINVAL; ++ } ++ if (o_ret) ++ *o_ret = (uint32_t)offset; ++ ret = (*gpuobj != NULL) ? 0 : -EINVAL; ++ } else { ++ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); ++ return -EINVAL; ++ } ++ ++ return ret; ++} ++ ++/* Context objects in the instance RAM have the following structure. ++ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. ++ ++ NV4 - NV30: ++ ++ entry[0] ++ 11:0 class ++ 12 chroma key enable ++ 13 user clip enable ++ 14 swizzle enable ++ 17:15 patch config: ++ scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre ++ 18 synchronize enable ++ 19 endian: 1 big, 0 little ++ 21:20 dither mode ++ 23 single step enable ++ 24 patch status: 0 invalid, 1 valid ++ 25 context_surface 0: 1 valid ++ 26 context surface 1: 1 valid ++ 27 context pattern: 1 valid ++ 28 context rop: 1 valid ++ 29,30 context beta, beta4 ++ entry[1] ++ 7:0 mono format ++ 15:8 color format ++ 31:16 notify instance address ++ entry[2] ++ 15:0 dma 0 instance address ++ 31:16 dma 1 instance address ++ entry[3] ++ dma method traps ++ ++ NV40: ++ No idea what the exact format is. Here's what can be deducted: ++ ++ entry[0]: ++ 11:0 class (maybe uses more bits here?) ++ 17 user clip enable ++ 21:19 patch config ++ 25 patch status valid ? ++ entry[1]: ++ 15:0 DMA notifier (maybe 20:0) ++ entry[2]: ++ 15:0 DMA 0 instance (maybe 20:0) ++ 24 big endian ++ entry[3]: ++ 15:0 DMA 1 instance (maybe 20:0) ++ entry[4]: ++ entry[5]: ++ set to 0? ++*/ ++int ++nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, ++ struct nouveau_gpuobj **gpuobj) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class); ++ ++ ret = nouveau_gpuobj_new(dev, chan, ++ nouveau_gpuobj_class_instmem_size(dev, class), ++ 16, ++ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, ++ gpuobj); ++ if (ret) { ++ DRM_ERROR("Error creating gpuobj: %d\n", ret); ++ return ret; ++ } ++ ++ if (dev_priv->card_type >= NV_50) { ++ INSTANCE_WR(*gpuobj, 0, class); ++ INSTANCE_WR(*gpuobj, 5, 0x00010000); ++ } else { ++ switch (class) { ++ case NV_CLASS_NULL: ++ INSTANCE_WR(*gpuobj, 0, 0x00001030); ++ INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF); ++ break; ++ default: ++ if (dev_priv->card_type >= NV_40) { ++ INSTANCE_WR(*gpuobj, 0, class); ++#ifdef __BIG_ENDIAN ++ INSTANCE_WR(*gpuobj, 2, 0x01000000); ++#endif ++ } else { ++#ifdef __BIG_ENDIAN ++ INSTANCE_WR(*gpuobj, 0, class | 0x00080000); ++#else ++ INSTANCE_WR(*gpuobj, 0, class); ++#endif ++ } ++ } ++ } ++ ++ (*gpuobj)->engine = NVOBJ_ENGINE_GR; ++ (*gpuobj)->class = class; ++ return 0; ++} ++ ++static int ++nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *pramin = NULL; ++ int size, base, ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ /* Base amount for object storage (4KiB enough?) */ ++ size = 0x1000; ++ base = 0; ++ ++ /* PGRAPH context */ ++ ++ if (dev_priv->card_type == NV_50) { ++ /* Various fixed table thingos */ ++ size += 0x1400; /* mostly unknown stuff */ ++ size += 0x4000; /* vm pd */ ++ base = 0x6000; ++ /* RAMHT, not sure about setting size yet, 32KiB to be safe */ ++ size += 0x8000; ++ /* RAMFC */ ++ size += 0x1000; ++ /* PGRAPH context */ ++ size += 0x70000; ++ } ++ ++ DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", ++ chan->id, size, base); ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, ++ &chan->ramin); ++ if (ret) { ++ DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); ++ return ret; ++ } ++ pramin = chan->ramin->gpuobj; ++ ++ ret = nouveau_mem_init_heap(&chan->ramin_heap, ++ pramin->im_pramin->start + base, size); ++ if (ret) { ++ DRM_ERROR("Error creating PRAMIN heap: %d\n", ret); ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ++ uint32_t vram_h, uint32_t tt_h) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *vram = NULL, *tt = NULL; ++ int ret, i; ++ ++ INIT_LIST_HEAD(&chan->ramht_refs); ++ ++ DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); ++ ++ /* Reserve a block of PRAMIN for the channel ++ *XXX: maybe on card_type == NV_50) { ++ ret = nouveau_gpuobj_channel_init_pramin(chan); ++ if (ret) ++ return ret; ++ } ++ ++ /* NV50 VM ++ * - Allocate per-channel page-directory ++ * - Point offset 0-512MiB at shared PCIEGART table ++ * - Point offset 512-1024MiB at shared VRAM table ++ */ ++ if (dev_priv->card_type >= NV_50) { ++ uint32_t vm_offset; ++ ++ vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; ++ vm_offset += chan->ramin->gpuobj->im_pramin->start; ++ if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, ++ 0, &chan->vm_pd, NULL))) ++ return ret; ++ for (i=0; i<0x4000; i+=8) { ++ INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000); ++ INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ++ dev_priv->gart_info.sg_ctxdma, ++ &chan->vm_gart_pt))) ++ return ret; ++ INSTANCE_WR(chan->vm_pd, (0+0)/4, ++ chan->vm_gart_pt->instance | 0x03); ++ INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000); ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ++ dev_priv->vm_vram_pt, ++ &chan->vm_vram_pt))) ++ return ret; ++ INSTANCE_WR(chan->vm_pd, (8+0)/4, ++ chan->vm_vram_pt->instance | 0x61); ++ INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000); ++ } ++ ++ /* RAMHT */ ++ if (dev_priv->card_type < NV_50) { ++ ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, ++ &chan->ramht); ++ if (ret) ++ return ret; ++ } else { ++ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, ++ 0x8000, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramht); ++ if (ret) ++ return ret; ++ } ++ ++ /* VRAM ctxdma */ ++ if (dev_priv->card_type >= NV_50) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, 0x100000000ULL, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_AGP, &vram); ++ if (ret) { ++ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ } else ++ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, dev_priv->fb_available_size, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_VIDMEM, &vram))) { ++ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) { ++ DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ /* TT memory ctxdma */ ++ if (dev_priv->card_type >= NV_50) { ++ tt = vram; ++ } else ++ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ++ ret = nouveau_gpuobj_gart_dma_new(chan, 0, ++ dev_priv->gart_info.aper_size, ++ NV_DMA_ACCESS_RW, &tt, NULL); ++ } else ++ if (dev_priv->pci_heap) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, dev->sg->pages * PAGE_SIZE, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_PCI_NONLINEAR, &tt); ++ } else { ++ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); ++ ret = -EINVAL; ++ } ++ ++ if (ret) { ++ DRM_ERROR("Error creating TT ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); ++ if (ret) { ++ DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct list_head *entry, *tmp; ++ struct nouveau_gpuobj_ref *ref; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ list_for_each_safe(entry, tmp, &chan->ramht_refs) { ++ ref = list_entry(entry, struct nouveau_gpuobj_ref, list); ++ ++ nouveau_gpuobj_ref_del(dev, &ref); ++ } ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramht); ++ ++ nouveau_gpuobj_del(dev, &chan->vm_pd); ++ nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); ++ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt); ++ ++ if (chan->ramin_heap) ++ nouveau_mem_takedown(&chan->ramin_heap); ++ if (chan->ramin) ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ ++} ++ ++int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct nouveau_channel *chan; ++ struct drm_nouveau_grobj_alloc *init = data; ++ struct nouveau_gpuobj *gr = NULL; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); ++ ++ //FIXME: check args, only allow trusted objects to be created ++ ++ if (init->handle == ~0) ++ return -EINVAL; ++ ++ if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) ++ return -EEXIST; ++ ++ ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); ++ if (ret) { ++ DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", ++ ret, init->channel, init->handle); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) { ++ DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", ++ ret, init->channel, init->handle); ++ nouveau_gpuobj_del(dev, &gr); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_gpuobj_free *objfree = data; ++ struct nouveau_gpuobj_ref *ref; ++ struct nouveau_channel *chan; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); ++ ++ if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref))) ++ return ret; ++ nouveau_gpuobj_ref_del(dev, &ref); ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_reg.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_reg.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_reg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_reg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,593 @@ ++ ++ ++#define NV03_BOOT_0 0x00100000 ++# define NV03_BOOT_0_RAM_AMOUNT 0x00000003 ++# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000 ++# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001 ++# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002 ++# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003 ++# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000 ++# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001 ++# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002 ++# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003 ++ ++#define NV04_FIFO_DATA 0x0010020c ++# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 ++# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 ++ ++#define NV_RAMIN 0x00700000 ++ ++#define NV_RAMHT_HANDLE_OFFSET 0 ++#define NV_RAMHT_CONTEXT_OFFSET 4 ++# define NV_RAMHT_CONTEXT_VALID (1<<31) ++# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24 ++# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16 ++# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0 ++# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1 ++# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0 ++# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23 ++# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 ++# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 ++ ++/* DMA object defines */ ++#define NV_DMA_ACCESS_RW 0 ++#define NV_DMA_ACCESS_RO 1 ++#define NV_DMA_ACCESS_WO 2 ++#define NV_DMA_TARGET_VIDMEM 0 ++#define NV_DMA_TARGET_PCI 2 ++#define NV_DMA_TARGET_AGP 3 ++/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/ ++#define NV_DMA_TARGET_PCI_NONLINEAR 8 ++ ++/* Some object classes we care about in the drm */ ++#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 ++#define NV_CLASS_DMA_TO_MEMORY 0x00000003 ++#define NV_CLASS_NULL 0x00000030 ++#define NV_CLASS_DMA_IN_MEMORY 0x0000003D ++ ++#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE)) ++#define NV03_USER__SIZE 16 ++#define NV10_USER__SIZE 32 ++#define NV03_USER_SIZE 0x00010000 ++#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE)) ++#define NV03_USER_DMA_PUT__SIZE 16 ++#define NV10_USER_DMA_PUT__SIZE 32 ++#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE)) ++#define NV03_USER_DMA_GET__SIZE 16 ++#define NV10_USER_DMA_GET__SIZE 32 ++#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE)) ++#define NV03_USER_REF_CNT__SIZE 16 ++#define NV10_USER_REF_CNT__SIZE 32 ++ ++#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE)) ++#define NV40_USER_SIZE 0x00001000 ++#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE)) ++#define NV40_USER_DMA_PUT__SIZE 32 ++#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE)) ++#define NV40_USER_DMA_GET__SIZE 32 ++#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE)) ++#define NV40_USER_REF_CNT__SIZE 32 ++ ++#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE)) ++#define NV50_USER_SIZE 0x00002000 ++#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE)) ++#define NV50_USER_DMA_PUT__SIZE 128 ++#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE)) ++#define NV50_USER_DMA_GET__SIZE 128 ++/*XXX: I don't think this actually exists.. */ ++#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE)) ++#define NV50_USER_REF_CNT__SIZE 128 ++ ++#define NV03_FIFO_SIZE 0x8000UL ++ ++#define NV03_PMC_BOOT_0 0x00000000 ++#define NV03_PMC_BOOT_1 0x00000004 ++#define NV03_PMC_INTR_0 0x00000100 ++# define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8) ++# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12) ++# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21) ++# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24) ++# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25) ++# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26) ++# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24) ++#define NV03_PMC_INTR_EN_0 0x00000140 ++# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<< 0) ++#define NV03_PMC_ENABLE 0x00000200 ++# define NV_PMC_ENABLE_PFIFO (1<< 8) ++# define NV_PMC_ENABLE_PGRAPH (1<<12) ++/* Disabling the below bit breaks newer (G7X only?) mobile chipsets, ++ * the card will hang early on in the X init process. ++ */ ++# define NV_PMC_ENABLE_UNK13 (1<<13) ++#define NV40_PMC_1700 0x00001700 ++#define NV40_PMC_1704 0x00001704 ++#define NV40_PMC_1708 0x00001708 ++#define NV40_PMC_170C 0x0000170C ++ ++/* probably PMC ? */ ++#define NV50_PUNK_BAR0_PRAMIN 0x00001700 ++#define NV50_PUNK_BAR_CFG_BASE 0x00001704 ++#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30) ++#define NV50_PUNK_BAR1_CTXDMA 0x00001708 ++#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31) ++#define NV50_PUNK_BAR3_CTXDMA 0x0000170C ++#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31) ++#define NV50_PUNK_UNK1710 0x00001710 ++ ++#define NV04_PBUS_PCI_NV_1 0x00001804 ++#define NV04_PBUS_PCI_NV_19 0x0000184C ++ ++#define NV04_PTIMER_INTR_0 0x00009100 ++#define NV04_PTIMER_INTR_EN_0 0x00009140 ++#define NV04_PTIMER_NUMERATOR 0x00009200 ++#define NV04_PTIMER_DENOMINATOR 0x00009210 ++#define NV04_PTIMER_TIME_0 0x00009400 ++#define NV04_PTIMER_TIME_1 0x00009410 ++#define NV04_PTIMER_ALARM_0 0x00009420 ++ ++#define NV50_I2C_CONTROLLER 0x0000E054 ++ ++#define NV04_PFB_CFG0 0x00100200 ++#define NV04_PFB_CFG1 0x00100204 ++#define NV40_PFB_020C 0x0010020C ++#define NV10_PFB_TILE(i) (0x00100240 + (i*16)) ++#define NV10_PFB_TILE__SIZE 8 ++#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16)) ++#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16)) ++#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16)) ++#define NV10_PFB_CLOSE_PAGE2 0x0010033C ++#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) ++#define NV40_PFB_TILE__SIZE_0 12 ++#define NV40_PFB_TILE__SIZE_1 15 ++#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16)) ++#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16)) ++#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16)) ++#define NV40_PFB_UNK_800 0x00100800 ++ ++#define NV04_PGRAPH_DEBUG_0 0x00400080 ++#define NV04_PGRAPH_DEBUG_1 0x00400084 ++#define NV04_PGRAPH_DEBUG_2 0x00400088 ++#define NV04_PGRAPH_DEBUG_3 0x0040008c ++#define NV10_PGRAPH_DEBUG_4 0x00400090 ++#define NV03_PGRAPH_INTR 0x00400100 ++#define NV03_PGRAPH_NSTATUS 0x00400104 ++# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11) ++# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12) ++# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13) ++# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14) ++# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) ++# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24) ++# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) ++# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) ++#define NV03_PGRAPH_NSOURCE 0x00400108 ++# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0) ++# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1) ++# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<< 2) ++# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<< 3) ++# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<< 4) ++# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<< 5) ++# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<< 6) ++# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<< 7) ++# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<< 8) ++# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<< 9) ++# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) ++# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) ++# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) ++# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) ++# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) ++# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) ++# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) ++# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) ++# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) ++#define NV03_PGRAPH_INTR_EN 0x00400140 ++#define NV40_PGRAPH_INTR_EN 0x0040013C ++# define NV_PGRAPH_INTR_NOTIFY (1<< 0) ++# define NV_PGRAPH_INTR_MISSING_HW (1<< 4) ++# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) ++# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) ++# define NV_PGRAPH_INTR_ERROR (1<<20) ++#define NV10_PGRAPH_CTX_CONTROL 0x00400144 ++#define NV10_PGRAPH_CTX_USER 0x00400148 ++#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C ++#define NV10_PGRAPH_CTX_SWITCH2 0x00400150 ++#define NV10_PGRAPH_CTX_SWITCH3 0x00400154 ++#define NV10_PGRAPH_CTX_SWITCH4 0x00400158 ++#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C ++#define NV04_PGRAPH_CTX_SWITCH1 0x00400160 ++#define NV10_PGRAPH_CTX_CACHE1 0x00400160 ++#define NV04_PGRAPH_CTX_SWITCH2 0x00400164 ++#define NV04_PGRAPH_CTX_SWITCH3 0x00400168 ++#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C ++#define NV04_PGRAPH_CTX_CONTROL 0x00400170 ++#define NV04_PGRAPH_CTX_USER 0x00400174 ++#define NV04_PGRAPH_CTX_CACHE1 0x00400180 ++#define NV10_PGRAPH_CTX_CACHE2 0x00400180 ++#define NV03_PGRAPH_CTX_CONTROL 0x00400190 ++#define NV03_PGRAPH_CTX_USER 0x00400194 ++#define NV04_PGRAPH_CTX_CACHE2 0x004001A0 ++#define NV10_PGRAPH_CTX_CACHE3 0x004001A0 ++#define NV04_PGRAPH_CTX_CACHE3 0x004001C0 ++#define NV10_PGRAPH_CTX_CACHE4 0x004001C0 ++#define NV04_PGRAPH_CTX_CACHE4 0x004001E0 ++#define NV10_PGRAPH_CTX_CACHE5 0x004001E0 ++#define NV40_PGRAPH_CTXCTL_0304 0x00400304 ++#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff ++#define NV40_PGRAPH_CTXCTL_0310 0x00400310 ++#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 ++#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 ++#define NV40_PGRAPH_CTXCTL_030C 0x0040030c ++#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 ++#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 ++#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c ++#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 ++#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK 0x000FFFFF ++#define NV03_PGRAPH_ABS_X_RAM 0x00400400 ++#define NV03_PGRAPH_ABS_Y_RAM 0x00400480 ++#define NV03_PGRAPH_X_MISC 0x00400500 ++#define NV03_PGRAPH_Y_MISC 0x00400504 ++#define NV04_PGRAPH_VALID1 0x00400508 ++#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C ++#define NV04_PGRAPH_MISC24_0 0x00400510 ++#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514 ++#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518 ++#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C ++#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520 ++#define NV03_PGRAPH_CLIPX_0 0x00400524 ++#define NV03_PGRAPH_CLIPX_1 0x00400528 ++#define NV03_PGRAPH_CLIPY_0 0x0040052C ++#define NV03_PGRAPH_CLIPY_1 0x00400530 ++#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534 ++#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538 ++#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C ++#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540 ++#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544 ++#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548 ++#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560 ++#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564 ++#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568 ++#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C ++#define NV04_PGRAPH_MISC24_1 0x00400570 ++#define NV04_PGRAPH_MISC24_2 0x00400574 ++#define NV04_PGRAPH_VALID2 0x00400578 ++#define NV04_PGRAPH_PASSTHRU_0 0x0040057C ++#define NV04_PGRAPH_PASSTHRU_1 0x00400580 ++#define NV04_PGRAPH_PASSTHRU_2 0x00400584 ++#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588 ++#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C ++#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590 ++#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594 ++#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598 ++#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C ++#define NV04_PGRAPH_FORMAT_0 0x004005A8 ++#define NV04_PGRAPH_FORMAT_1 0x004005AC ++#define NV04_PGRAPH_FILTER_0 0x004005B0 ++#define NV04_PGRAPH_FILTER_1 0x004005B4 ++#define NV03_PGRAPH_MONO_COLOR0 0x00400600 ++#define NV04_PGRAPH_ROP3 0x00400604 ++#define NV04_PGRAPH_BETA_AND 0x00400608 ++#define NV04_PGRAPH_BETA_PREMULT 0x0040060C ++#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610 ++#define NV04_PGRAPH_FORMATS 0x00400618 ++#define NV10_PGRAPH_DEBUG_2 0x00400620 ++#define NV04_PGRAPH_BOFFSET0 0x00400640 ++#define NV04_PGRAPH_BOFFSET1 0x00400644 ++#define NV04_PGRAPH_BOFFSET2 0x00400648 ++#define NV04_PGRAPH_BOFFSET3 0x0040064C ++#define NV04_PGRAPH_BOFFSET4 0x00400650 ++#define NV04_PGRAPH_BOFFSET5 0x00400654 ++#define NV04_PGRAPH_BBASE0 0x00400658 ++#define NV04_PGRAPH_BBASE1 0x0040065C ++#define NV04_PGRAPH_BBASE2 0x00400660 ++#define NV04_PGRAPH_BBASE3 0x00400664 ++#define NV04_PGRAPH_BBASE4 0x00400668 ++#define NV04_PGRAPH_BBASE5 0x0040066C ++#define NV04_PGRAPH_BPITCH0 0x00400670 ++#define NV04_PGRAPH_BPITCH1 0x00400674 ++#define NV04_PGRAPH_BPITCH2 0x00400678 ++#define NV04_PGRAPH_BPITCH3 0x0040067C ++#define NV04_PGRAPH_BPITCH4 0x00400680 ++#define NV04_PGRAPH_BLIMIT0 0x00400684 ++#define NV04_PGRAPH_BLIMIT1 0x00400688 ++#define NV04_PGRAPH_BLIMIT2 0x0040068C ++#define NV04_PGRAPH_BLIMIT3 0x00400690 ++#define NV04_PGRAPH_BLIMIT4 0x00400694 ++#define NV04_PGRAPH_BLIMIT5 0x00400698 ++#define NV04_PGRAPH_BSWIZZLE2 0x0040069C ++#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 ++#define NV03_PGRAPH_STATUS 0x004006B0 ++#define NV04_PGRAPH_STATUS 0x00400700 ++#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 ++#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 ++#define NV04_PGRAPH_SURFACE 0x0040070C ++#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C ++#define NV04_PGRAPH_STATE 0x00400710 ++#define NV10_PGRAPH_SURFACE 0x00400710 ++#define NV04_PGRAPH_NOTIFY 0x00400714 ++#define NV10_PGRAPH_STATE 0x00400714 ++#define NV10_PGRAPH_NOTIFY 0x00400718 ++ ++#define NV04_PGRAPH_FIFO 0x00400720 ++ ++#define NV04_PGRAPH_BPIXEL 0x00400724 ++#define NV10_PGRAPH_RDI_INDEX 0x00400750 ++#define NV04_PGRAPH_FFINTFC_ST2 0x00400754 ++#define NV10_PGRAPH_RDI_DATA 0x00400754 ++#define NV04_PGRAPH_DMA_PITCH 0x00400760 ++#define NV10_PGRAPH_FFINTFC_ST2 0x00400764 ++#define NV04_PGRAPH_DVD_COLORFMT 0x00400764 ++#define NV04_PGRAPH_SCALED_FORMAT 0x00400768 ++#define NV10_PGRAPH_DMA_PITCH 0x00400770 ++#define NV10_PGRAPH_DVD_COLORFMT 0x00400774 ++#define NV10_PGRAPH_SCALED_FORMAT 0x00400778 ++#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 ++#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 ++#define NV04_PGRAPH_PATT_COLOR0 0x00400800 ++#define NV04_PGRAPH_PATT_COLOR1 0x00400804 ++#define NV04_PGRAPH_PATTERN 0x00400808 ++#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810 ++#define NV04_PGRAPH_CHROMA 0x00400814 ++#define NV04_PGRAPH_CONTROL0 0x00400818 ++#define NV04_PGRAPH_CONTROL1 0x0040081C ++#define NV04_PGRAPH_CONTROL2 0x00400820 ++#define NV04_PGRAPH_BLEND 0x00400824 ++#define NV04_PGRAPH_STORED_FMT 0x00400830 ++#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 ++#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) ++#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) ++#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) ++#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) ++#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) ++#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) ++#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) ++#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) ++#define NV04_PGRAPH_U_RAM 0x00400D00 ++#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) ++#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) ++#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) ++#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) ++#define NV04_PGRAPH_V_RAM 0x00400D40 ++#define NV04_PGRAPH_W_RAM 0x00400D80 ++#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 ++#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 ++#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 ++#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C ++#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50 ++#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54 ++#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58 ++#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C ++#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60 ++#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64 ++#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68 ++#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C ++#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00 ++#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20 ++#define NV10_PGRAPH_XFMODE0 0x00400F40 ++#define NV10_PGRAPH_XFMODE1 0x00400F44 ++#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48 ++#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C ++#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50 ++#define NV10_PGRAPH_PIPE_DATA 0x00400F54 ++#define NV04_PGRAPH_DMA_START_0 0x00401000 ++#define NV04_PGRAPH_DMA_START_1 0x00401004 ++#define NV04_PGRAPH_DMA_LENGTH 0x00401008 ++#define NV04_PGRAPH_DMA_MISC 0x0040100C ++#define NV04_PGRAPH_DMA_DATA_0 0x00401020 ++#define NV04_PGRAPH_DMA_DATA_1 0x00401024 ++#define NV04_PGRAPH_DMA_RM 0x00401030 ++#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040 ++#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044 ++#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048 ++#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C ++#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050 ++#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054 ++#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058 ++#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C ++#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060 ++#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080 ++#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084 ++#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088 ++#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C ++#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090 ++#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094 ++#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 ++#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C ++#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 ++#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) ++#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) ++#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) ++#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) ++ ++ ++/* It's a guess that this works on NV03. Confirmed on NV04, though */ ++#define NV04_PFIFO_DELAY_0 0x00002040 ++#define NV04_PFIFO_DMA_TIMESLICE 0x00002044 ++#define NV04_PFIFO_NEXT_CHANNEL 0x00002050 ++#define NV03_PFIFO_INTR_0 0x00002100 ++#define NV03_PFIFO_INTR_EN_0 0x00002140 ++# define NV_PFIFO_INTR_CACHE_ERROR (1<< 0) ++# define NV_PFIFO_INTR_RUNOUT (1<< 4) ++# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<< 8) ++# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) ++# define NV_PFIFO_INTR_DMA_PT (1<<16) ++# define NV_PFIFO_INTR_SEMAPHORE (1<<20) ++# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) ++#define NV03_PFIFO_RAMHT 0x00002210 ++#define NV03_PFIFO_RAMFC 0x00002214 ++#define NV03_PFIFO_RAMRO 0x00002218 ++#define NV40_PFIFO_RAMFC 0x00002220 ++#define NV03_PFIFO_CACHES 0x00002500 ++#define NV04_PFIFO_MODE 0x00002504 ++#define NV04_PFIFO_DMA 0x00002508 ++#define NV04_PFIFO_SIZE 0x0000250c ++#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) ++#define NV50_PFIFO_CTX_TABLE__SIZE 128 ++#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31) ++#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30) ++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF ++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF ++#define NV03_PFIFO_CACHE0_PUSH0 0x00003000 ++#define NV03_PFIFO_CACHE0_PULL0 0x00003040 ++#define NV04_PFIFO_CACHE0_PULL0 0x00003050 ++#define NV04_PFIFO_CACHE0_PULL1 0x00003054 ++#define NV03_PFIFO_CACHE1_PUSH0 0x00003200 ++#define NV03_PFIFO_CACHE1_PUSH1 0x00003204 ++#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8) ++#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16) ++#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f ++#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f ++#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f ++#define NV03_PFIFO_CACHE1_PUT 0x00003210 ++#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220 ++#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000 ++# define NV_PFIFO_CACHE1_ENDIAN 0x80000000 ++# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF ++# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000 ++#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228 ++#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c ++#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230 ++#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240 ++#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244 ++#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248 ++#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C ++#define NV03_PFIFO_CACHE1_PULL0 0x00003240 ++#define NV04_PFIFO_CACHE1_PULL0 0x00003250 ++#define NV03_PFIFO_CACHE1_PULL1 0x00003250 ++#define NV04_PFIFO_CACHE1_PULL1 0x00003254 ++#define NV04_PFIFO_CACHE1_HASH 0x00003258 ++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260 ++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264 ++#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268 ++#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C ++#define NV03_PFIFO_CACHE1_GET 0x00003270 ++#define NV04_PFIFO_CACHE1_ENGINE 0x00003280 ++#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 ++#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 ++#define NV40_PFIFO_UNK32E4 0x000032E4 ++#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) ++#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8)) ++#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8)) ++#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8)) ++ ++#define NV_CRTC0_INTSTAT 0x00600100 ++#define NV_CRTC0_INTEN 0x00600140 ++#define NV_CRTC1_INTSTAT 0x00602100 ++#define NV_CRTC1_INTEN 0x00602140 ++# define NV_CRTC_INTR_VBLANK (1<<0) ++ ++/* This name is a partial guess. */ ++#define NV50_DISPLAY_SUPERVISOR 0x00610024 ++ ++/* Fifo commands. These are not regs, neither masks */ ++#define NV03_FIFO_CMD_JUMP 0x20000000 ++#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc ++#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK)) ++ ++/* RAMFC offsets */ ++#define NV04_RAMFC_DMA_PUT 0x00 ++#define NV04_RAMFC_DMA_GET 0x04 ++#define NV04_RAMFC_DMA_INSTANCE 0x08 ++#define NV04_RAMFC_DMA_STATE 0x0C ++#define NV04_RAMFC_DMA_FETCH 0x10 ++#define NV04_RAMFC_ENGINE 0x14 ++#define NV04_RAMFC_PULL1_ENGINE 0x18 ++ ++#define NV10_RAMFC_DMA_PUT 0x00 ++#define NV10_RAMFC_DMA_GET 0x04 ++#define NV10_RAMFC_REF_CNT 0x08 ++#define NV10_RAMFC_DMA_INSTANCE 0x0C ++#define NV10_RAMFC_DMA_STATE 0x10 ++#define NV10_RAMFC_DMA_FETCH 0x14 ++#define NV10_RAMFC_ENGINE 0x18 ++#define NV10_RAMFC_PULL1_ENGINE 0x1C ++#define NV10_RAMFC_ACQUIRE_VALUE 0x20 ++#define NV10_RAMFC_ACQUIRE_TIMESTAMP 0x24 ++#define NV10_RAMFC_ACQUIRE_TIMEOUT 0x28 ++#define NV10_RAMFC_SEMAPHORE 0x2C ++#define NV10_RAMFC_DMA_SUBROUTINE 0x30 ++ ++#define NV40_RAMFC_DMA_PUT 0x00 ++#define NV40_RAMFC_DMA_GET 0x04 ++#define NV40_RAMFC_REF_CNT 0x08 ++#define NV40_RAMFC_DMA_INSTANCE 0x0C ++#define NV40_RAMFC_DMA_DCOUNT /* ? */ 0x10 ++#define NV40_RAMFC_DMA_STATE 0x14 ++#define NV40_RAMFC_DMA_FETCH 0x18 ++#define NV40_RAMFC_ENGINE 0x1C ++#define NV40_RAMFC_PULL1_ENGINE 0x20 ++#define NV40_RAMFC_ACQUIRE_VALUE 0x24 ++#define NV40_RAMFC_ACQUIRE_TIMESTAMP 0x28 ++#define NV40_RAMFC_ACQUIRE_TIMEOUT 0x2C ++#define NV40_RAMFC_SEMAPHORE 0x30 ++#define NV40_RAMFC_DMA_SUBROUTINE 0x34 ++#define NV40_RAMFC_GRCTX_INSTANCE /* guess */ 0x38 ++#define NV40_RAMFC_DMA_TIMESLICE 0x3C ++#define NV40_RAMFC_UNK_40 0x40 ++#define NV40_RAMFC_UNK_44 0x44 ++#define NV40_RAMFC_UNK_48 0x48 ++#define NV40_RAMFC_UNK_4C 0x4C ++#define NV40_RAMFC_UNK_50 0x50 +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_sgdma.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_sgdma.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_sgdma.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_sgdma.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,349 @@ ++#include "drmP.h" ++#include "nouveau_drv.h" ++ ++#define NV_CTXDMA_PAGE_SHIFT 12 ++#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) ++#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) ++ ++struct nouveau_sgdma_be { ++ struct drm_ttm_backend backend; ++ struct drm_device *dev; ++ ++ int pages; ++ int pages_populated; ++ dma_addr_t *pagelist; ++ int is_bound; ++ ++ unsigned int pte_start; ++}; ++ ++static int ++nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) ++{ ++ return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); ++} ++ ++static int ++nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, ++ struct page **pages, struct page *dummy_read_page) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ int p, d, o; ++ ++ DRM_DEBUG("num_pages = %ld\n", num_pages); ++ ++ if (nvbe->pagelist) ++ return -EINVAL; ++ nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; ++ nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), ++ DRM_MEM_PAGES); ++ ++ nvbe->pages_populated = d = 0; ++ for (p = 0; p < num_pages; p++) { ++ for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { ++ struct page *page = pages[p]; ++ if (!page) ++ page = dummy_read_page; ++ nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, ++ page, o, ++ NV_CTXDMA_PAGE_SIZE, ++ PCI_DMA_BIDIRECTIONAL); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) { ++#else ++ if (pci_dma_mapping_error(nvbe->pagelist[d])) { ++#endif ++ be->func->clear(be); ++ DRM_ERROR("pci_map_page failed\n"); ++ return -EINVAL; ++ } ++ nvbe->pages_populated = ++d; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_sgdma_clear(struct drm_ttm_backend *be) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ int d; ++ ++ DRM_DEBUG("\n"); ++ ++ if (nvbe && nvbe->pagelist) { ++ if (nvbe->is_bound) ++ be->func->unbind(be); ++ ++ for (d = 0; d < nvbe->pages_populated; d++) { ++ pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], ++ NV_CTXDMA_PAGE_SIZE, ++ PCI_DMA_BIDIRECTIONAL); ++ } ++ drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), ++ DRM_MEM_PAGES); ++ } ++} ++ ++static int ++nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ uint64_t offset = (mem->mm_node->start << PAGE_SHIFT); ++ uint32_t i; ++ ++ DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start, ++ offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1); ++ ++ if (offset & NV_CTXDMA_PAGE_MASK) ++ return -EINVAL; ++ nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); ++ if (dev_priv->card_type < NV_50) ++ nvbe->pte_start += 2; /* skip ctxdma header */ ++ ++ for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { ++ uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; ++ ++ if (pteval & NV_CTXDMA_PAGE_MASK) { ++ DRM_ERROR("Bad pteval 0x%llx\n", pteval); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->card_type < NV_50) { ++ INSTANCE_WR(gpuobj, i, pteval | 3); ++ } else { ++ INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); ++ INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); ++ } ++ } ++ ++ nvbe->is_bound = 1; ++ return 0; ++} ++ ++static int ++nouveau_sgdma_unbind(struct drm_ttm_backend *be) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ if (nvbe->is_bound) { ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ unsigned int pte; ++ ++ pte = nvbe->pte_start; ++ while (pte < (nvbe->pte_start + nvbe->pages)) { ++ uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; ++ ++ if (dev_priv->card_type < NV_50) { ++ INSTANCE_WR(gpuobj, pte, pteval | 3); ++ } else { ++ INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21); ++ INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000); ++ } ++ ++ pte++; ++ } ++ ++ nvbe->is_bound = 0; ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_sgdma_destroy(struct drm_ttm_backend *be) ++{ ++ DRM_DEBUG("\n"); ++ if (be) { ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ if (nvbe) { ++ if (nvbe->pagelist) ++ be->func->clear(be); ++ drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); ++ } ++ } ++} ++ ++static struct drm_ttm_backend_func nouveau_sgdma_backend = { ++ .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, ++ .populate = nouveau_sgdma_populate, ++ .clear = nouveau_sgdma_clear, ++ .bind = nouveau_sgdma_bind, ++ .unbind = nouveau_sgdma_unbind, ++ .destroy = nouveau_sgdma_destroy ++}; ++ ++struct drm_ttm_backend * ++nouveau_sgdma_init_ttm(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_sgdma_be *nvbe; ++ ++ if (!dev_priv->gart_info.sg_ctxdma) ++ return NULL; ++ ++ nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); ++ if (!nvbe) ++ return NULL; ++ ++ nvbe->dev = dev; ++ ++ nvbe->backend.func = &nouveau_sgdma_backend; ++ ++ return &nvbe->backend; ++} ++ ++int ++nouveau_sgdma_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ uint32_t aper_size, obj_size; ++ int i, ret; ++ ++ if (dev_priv->card_type < NV_50) { ++ aper_size = (64 * 1024 * 1024); ++ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; ++ obj_size += 8; /* ctxdma header */ ++ } else { ++ /* 1 entire VM page table */ ++ aper_size = (512 * 1024 * 1024); ++ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; ++ } ++ ++ if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, ++ NVOBJ_FLAG_ALLOW_NO_REFS | ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { ++ DRM_ERROR("Error creating sgdma object: %d\n", ret); ++ return ret; ++ } ++ ++ dev_priv->gart_info.sg_dummy_page = ++ alloc_page(GFP_KERNEL|__GFP_DMA32); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ set_page_locked(dev_priv->gart_info.sg_dummy_page); ++#else ++ SetPageLocked(dev_priv->gart_info.sg_dummy_page); ++#endif ++ dev_priv->gart_info.sg_dummy_bus = ++ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, ++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++ ++ if (dev_priv->card_type < NV_50) { ++ /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and ++ * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE ++ * on those cards? */ ++ INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | ++ (1 << 12) /* PT present */ | ++ (0 << 13) /* PT *not* linear */ | ++ (NV_DMA_ACCESS_RW << 14) | ++ (NV_DMA_TARGET_PCI << 16)); ++ INSTANCE_WR(gpuobj, 1, aper_size - 1); ++ for (i=2; i<2+(aper_size>>12); i++) { ++ INSTANCE_WR(gpuobj, i, ++ dev_priv->gart_info.sg_dummy_bus | 3); ++ } ++ } else { ++ for (i=0; igart_info.sg_dummy_bus | 0x21); ++ INSTANCE_WR(gpuobj, (i+4)/4, 0); ++ } ++ } ++ ++ dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; ++ dev_priv->gart_info.aper_base = 0; ++ dev_priv->gart_info.aper_size = aper_size; ++ dev_priv->gart_info.sg_ctxdma = gpuobj; ++ return 0; ++} ++ ++void ++nouveau_sgdma_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->gart_info.sg_dummy_page) { ++ pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, ++ NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++ unlock_page(dev_priv->gart_info.sg_dummy_page); ++ __free_page(dev_priv->gart_info.sg_dummy_page); ++ dev_priv->gart_info.sg_dummy_page = NULL; ++ dev_priv->gart_info.sg_dummy_bus = 0; ++ } ++ ++ nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); ++} ++ ++int ++nouveau_sgdma_nottm_hack_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_ttm_backend *be; ++ struct drm_scatter_gather sgreq; ++ struct drm_mm_node mm_node; ++ struct drm_bo_mem_reg mem; ++ int ret; ++ ++ dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); ++ if (!dev_priv->gart_info.sg_be) ++ return -ENOMEM; ++ be = dev_priv->gart_info.sg_be; ++ ++ /* Hack the aperture size down to the amount of system memory ++ * we're going to bind into it. ++ */ ++ if (dev_priv->gart_info.aper_size > 32*1024*1024) ++ dev_priv->gart_info.aper_size = 32*1024*1024; ++ ++ sgreq.size = dev_priv->gart_info.aper_size; ++ if ((ret = drm_sg_alloc(dev, &sgreq))) { ++ DRM_ERROR("drm_sg_alloc failed: %d\n", ret); ++ return ret; ++ } ++ dev_priv->gart_info.sg_handle = sgreq.handle; ++ ++ if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) { ++ DRM_ERROR("failed populate: %d\n", ret); ++ return ret; ++ } ++ ++ mm_node.start = 0; ++ mem.mm_node = &mm_node; ++ ++ if ((ret = be->func->bind(be, &mem))) { ++ DRM_ERROR("failed bind: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) ++{ ++} ++ ++int ++nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ int pte; ++ ++ pte = (offset >> NV_CTXDMA_PAGE_SHIFT); ++ if (dev_priv->card_type < NV_50) { ++ *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; ++ return 0; ++ } ++ ++ DRM_ERROR("Unimplemented on NV50\n"); ++ return -EINVAL; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_state.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_state.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_state.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,871 @@ ++/* ++ * Copyright 2005 Stephane Marchesin ++ * Copyright 2008 Stuart Bennett ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++static int nouveau_init_card_mappings(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ /* resource 0 is mmio regs */ ++ /* resource 1 is linear FB */ ++ /* resource 2 is RAMIN (mmio regs + 0x1000000) */ ++ /* resource 6 is bios */ ++ ++ /* map the mmio regs */ ++ ret = drm_addmap(dev, drm_get_resource_start(dev, 0), ++ drm_get_resource_len(dev, 0), ++ _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); ++ if (ret) { ++ DRM_ERROR("Unable to initialize the mmio mapping (%d). " ++ "Please report your setup to " DRIVER_EMAIL "\n", ++ ret); ++ return -EINVAL; ++ } ++ DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset); ++ ++ /* map larger RAMIN aperture on NV40 cards */ ++ dev_priv->ramin = NULL; ++ if (dev_priv->card_type >= NV_40) { ++ int ramin_resource = 2; ++ if (drm_get_resource_len(dev, ramin_resource) == 0) ++ ramin_resource = 3; ++ ++ ret = drm_addmap(dev, ++ drm_get_resource_start(dev, ramin_resource), ++ drm_get_resource_len(dev, ramin_resource), ++ _DRM_REGISTERS, _DRM_READ_ONLY, ++ &dev_priv->ramin); ++ if (ret) { ++ DRM_ERROR("Failed to init RAMIN mapping, " ++ "limited instance memory available\n"); ++ dev_priv->ramin = NULL; ++ } ++ } ++ ++ /* On older cards (or if the above failed), create a map covering ++ * the BAR0 PRAMIN aperture */ ++ if (!dev_priv->ramin) { ++ ret = drm_addmap(dev, ++ drm_get_resource_start(dev, 0) + NV_RAMIN, ++ (1*1024*1024), ++ _DRM_REGISTERS, _DRM_READ_ONLY, ++ &dev_priv->ramin); ++ if (ret) { ++ DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int nouveau_stub_init(struct drm_device *dev) { return 0; } ++static void nouveau_stub_takedown(struct drm_device *dev) {} ++ ++static int nouveau_init_engine_ptrs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ switch (dev_priv->chipset & 0xf0) { ++ case 0x00: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv04_fb_init; ++ engine->fb.takedown = nv04_fb_takedown; ++ engine->graph.init = nv04_graph_init; ++ engine->graph.takedown = nv04_graph_takedown; ++ engine->graph.create_context = nv04_graph_create_context; ++ engine->graph.destroy_context = nv04_graph_destroy_context; ++ engine->graph.load_context = nv04_graph_load_context; ++ engine->graph.save_context = nv04_graph_save_context; ++ engine->fifo.channels = 16; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv04_fifo_channel_id; ++ engine->fifo.create_context = nv04_fifo_create_context; ++ engine->fifo.destroy_context = nv04_fifo_destroy_context; ++ engine->fifo.load_context = nv04_fifo_load_context; ++ engine->fifo.save_context = nv04_fifo_save_context; ++ break; ++ case 0x10: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv10_graph_init; ++ engine->graph.takedown = nv10_graph_takedown; ++ engine->graph.create_context = nv10_graph_create_context; ++ engine->graph.destroy_context = nv10_graph_destroy_context; ++ engine->graph.load_context = nv10_graph_load_context; ++ engine->graph.save_context = nv10_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x20: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv20_graph_init; ++ engine->graph.takedown = nv20_graph_takedown; ++ engine->graph.create_context = nv20_graph_create_context; ++ engine->graph.destroy_context = nv20_graph_destroy_context; ++ engine->graph.load_context = nv20_graph_load_context; ++ engine->graph.save_context = nv20_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x30: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv30_graph_init; ++ engine->graph.takedown = nv20_graph_takedown; ++ engine->graph.create_context = nv20_graph_create_context; ++ engine->graph.destroy_context = nv20_graph_destroy_context; ++ engine->graph.load_context = nv20_graph_load_context; ++ engine->graph.save_context = nv20_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x40: ++ case 0x60: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv40_mc_init; ++ engine->mc.takedown = nv40_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv40_fb_init; ++ engine->fb.takedown = nv40_fb_takedown; ++ engine->graph.init = nv40_graph_init; ++ engine->graph.takedown = nv40_graph_takedown; ++ engine->graph.create_context = nv40_graph_create_context; ++ engine->graph.destroy_context = nv40_graph_destroy_context; ++ engine->graph.load_context = nv40_graph_load_context; ++ engine->graph.save_context = nv40_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nv40_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv40_fifo_create_context; ++ engine->fifo.destroy_context = nv40_fifo_destroy_context; ++ engine->fifo.load_context = nv40_fifo_load_context; ++ engine->fifo.save_context = nv40_fifo_save_context; ++ break; ++ case 0x50: ++ case 0x80: /* gotta love NVIDIA's consistency.. */ ++ case 0x90: ++ case 0xA0: ++ engine->instmem.init = nv50_instmem_init; ++ engine->instmem.takedown= nv50_instmem_takedown; ++ engine->instmem.populate = nv50_instmem_populate; ++ engine->instmem.clear = nv50_instmem_clear; ++ engine->instmem.bind = nv50_instmem_bind; ++ engine->instmem.unbind = nv50_instmem_unbind; ++ engine->mc.init = nv50_mc_init; ++ engine->mc.takedown = nv50_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nouveau_stub_init; ++ engine->fb.takedown = nouveau_stub_takedown; ++ engine->graph.init = nv50_graph_init; ++ engine->graph.takedown = nv50_graph_takedown; ++ engine->graph.create_context = nv50_graph_create_context; ++ engine->graph.destroy_context = nv50_graph_destroy_context; ++ engine->graph.load_context = nv50_graph_load_context; ++ engine->graph.save_context = nv50_graph_save_context; ++ engine->fifo.channels = 128; ++ engine->fifo.init = nv50_fifo_init; ++ engine->fifo.takedown = nv50_fifo_takedown; ++ engine->fifo.channel_id = nv50_fifo_channel_id; ++ engine->fifo.create_context = nv50_fifo_create_context; ++ engine->fifo.destroy_context = nv50_fifo_destroy_context; ++ engine->fifo.load_context = nv50_fifo_load_context; ++ engine->fifo.save_context = nv50_fifo_save_context; ++ break; ++ default: ++ DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_card_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine; ++ int ret; ++ ++ DRM_DEBUG("prev state = %d\n", dev_priv->init_state); ++ ++ if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) ++ return 0; ++ dev_priv->ttm = 0; ++ ++ /* Determine exact chipset we're running on */ ++ if (dev_priv->card_type < NV_10) ++ dev_priv->chipset = dev_priv->card_type; ++ else ++ dev_priv->chipset = ++ (NV_READ(NV03_PMC_BOOT_0) & 0x0ff00000) >> 20; ++ ++ /* Initialise internal driver API hooks */ ++ ret = nouveau_init_engine_ptrs(dev); ++ if (ret) return ret; ++ engine = &dev_priv->Engine; ++ dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; ++ ++ ret = nouveau_gpuobj_early_init(dev); ++ if (ret) return ret; ++ ++ /* Initialise instance memory, must happen before mem_init so we ++ * know exactly how much VRAM we're able to use for "normal" ++ * purposes. ++ */ ++ ret = engine->instmem.init(dev); ++ if (ret) return ret; ++ ++ /* Setup the memory manager */ ++ if (dev_priv->ttm) { ++ ret = nouveau_mem_init_ttm(dev); ++ if (ret) return ret; ++ } else { ++ ret = nouveau_mem_init(dev); ++ if (ret) return ret; ++ } ++ ++ ret = nouveau_gpuobj_init(dev); ++ if (ret) return ret; ++ ++ /* Parse BIOS tables / Run init tables? */ ++ ++ /* PMC */ ++ ret = engine->mc.init(dev); ++ if (ret) return ret; ++ ++ /* PTIMER */ ++ ret = engine->timer.init(dev); ++ if (ret) return ret; ++ ++ /* PFB */ ++ ret = engine->fb.init(dev); ++ if (ret) return ret; ++ ++ /* PGRAPH */ ++ ret = engine->graph.init(dev); ++ if (ret) return ret; ++ ++ /* PFIFO */ ++ ret = engine->fifo.init(dev); ++ if (ret) return ret; ++ ++ /* this call irq_preinstall, register irq handler and ++ * call irq_postinstall ++ */ ++ ret = drm_irq_install(dev); ++ if (ret) return ret; ++ ++ /* what about PVIDEO/PCRTC/PRAMDAC etc? */ ++ ++ ret = nouveau_dma_channel_init(dev); ++ if (ret) return ret; ++ ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; ++ return 0; ++} ++ ++static void nouveau_card_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ DRM_DEBUG("prev state = %d\n", dev_priv->init_state); ++ ++ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { ++ nouveau_dma_channel_takedown(dev); ++ ++ engine->fifo.takedown(dev); ++ engine->graph.takedown(dev); ++ engine->fb.takedown(dev); ++ engine->timer.takedown(dev); ++ engine->mc.takedown(dev); ++ ++ nouveau_sgdma_nottm_hack_takedown(dev); ++ nouveau_sgdma_takedown(dev); ++ ++ nouveau_gpuobj_takedown(dev); ++ nouveau_gpuobj_del(dev, &dev_priv->vm_vram_pt); ++ ++ nouveau_mem_close(dev); ++ engine->instmem.takedown(dev); ++ ++ drm_irq_uninstall(dev); ++ ++ nouveau_gpuobj_late_takedown(dev); ++ ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; ++ } ++} ++ ++/* here a client dies, release the stuff that was allocated for its ++ * file_priv */ ++void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_fifo_cleanup(dev, file_priv); ++ nouveau_mem_release(file_priv,dev_priv->fb_heap); ++ nouveau_mem_release(file_priv,dev_priv->agp_heap); ++ nouveau_mem_release(file_priv,dev_priv->pci_heap); ++} ++ ++/* first module load, setup the mmio/fb mapping */ ++int nouveau_firstopen(struct drm_device *dev) ++{ ++#if defined(__powerpc__) ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct device_node *dn; ++#endif ++ int ret; ++ /* Map any PCI resources we need on the card */ ++ ret = nouveau_init_card_mappings(dev); ++ if (ret) return ret; ++ ++#if defined(__powerpc__) ++ /* Put the card in BE mode if it's not */ ++ if (NV_READ(NV03_PMC_BOOT_1)) ++ NV_WRITE(NV03_PMC_BOOT_1,0x00000001); ++ ++ DRM_MEMORYBARRIER(); ++#endif ++ ++#if defined(__linux__) && defined(__powerpc__) ++ /* if we have an OF card, copy vbios to RAMIN */ ++ dn = pci_device_to_OF_node(dev->pdev); ++ if (dn) ++ { ++ int size; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) ++ const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size); ++#else ++ const uint32_t *bios = get_property(dn, "NVDA,BMP", &size); ++#endif ++ if (bios) ++ { ++ int i; ++ for(i=0;iflags = flags & NOUVEAU_FLAGS; ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; ++ ++ DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); ++ ++ /* Time to determine the card architecture */ ++ regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8); ++ if (!regs) { ++ DRM_ERROR("Could not ioremap to determine register\n"); ++ return -ENOMEM; ++ } ++ ++ reg0 = readl(regs+NV03_PMC_BOOT_0); ++ reg1 = readl(regs+NV03_PMC_BOOT_1); ++#if defined(__powerpc__) ++ if (reg1) ++ reg0=___swab32(reg0); ++#endif ++ ++ /* We're dealing with >=NV10 */ ++ if ((reg0 & 0x0f000000) > 0 ) { ++ /* Bit 27-20 contain the architecture in hex */ ++ architecture = (reg0 & 0xff00000) >> 20; ++ /* NV04 or NV05 */ ++ } else if ((reg0 & 0xff00fff0) == 0x20004000) { ++ architecture = 0x04; ++ } ++ ++ iounmap(regs); ++ ++ if (architecture >= 0x80) { ++ dev_priv->card_type = NV_50; ++ } else if (architecture >= 0x60) { ++ /* FIXME we need to figure out who's who for NV6x */ ++ dev_priv->card_type = NV_44; ++ } else if (architecture >= 0x50) { ++ dev_priv->card_type = NV_50; ++ } else if (architecture >= 0x40) { ++ uint8_t subarch = architecture & 0xf; ++ /* Selection criteria borrowed from NV40EXA */ ++ if (NV40_CHIPSET_MASK & (1 << subarch)) { ++ dev_priv->card_type = NV_40; ++ } else if (NV44_CHIPSET_MASK & (1 << subarch)) { ++ dev_priv->card_type = NV_44; ++ } else { ++ dev_priv->card_type = NV_UNKNOWN; ++ } ++ } else if (architecture >= 0x30) { ++ dev_priv->card_type = NV_30; ++ } else if (architecture >= 0x20) { ++ dev_priv->card_type = NV_20; ++ } else if (architecture >= 0x17) { ++ dev_priv->card_type = NV_17; ++ } else if (architecture >= 0x11) { ++ dev_priv->card_type = NV_11; ++ } else if (architecture >= 0x10) { ++ dev_priv->card_type = NV_10; ++ } else if (architecture >= 0x04) { ++ dev_priv->card_type = NV_04; ++ } else { ++ dev_priv->card_type = NV_UNKNOWN; ++ } ++ ++ DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0); ++ ++ if (dev_priv->card_type == NV_UNKNOWN) { ++ return -EINVAL; ++ } ++ ++ /* Special flags */ ++ if (dev->pci_device == 0x01a0) { ++ dev_priv->flags |= NV_NFORCE; ++ } else if (dev->pci_device == 0x01f0) { ++ dev_priv->flags |= NV_NFORCE2; ++ } ++ ++ dev->dev_private = (void *)dev_priv; ++ ++ return 0; ++} ++ ++void nouveau_lastclose(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* In the case of an error dev_priv may not be be allocated yet */ ++ if (dev_priv && dev_priv->card_type) { ++ nouveau_card_takedown(dev); ++ ++ if(dev_priv->fb_mtrr>0) ++ { ++ drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); ++ dev_priv->fb_mtrr=0; ++ } ++ } ++} ++ ++int nouveau_unload(struct drm_device *dev) ++{ ++ drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ return 0; ++} ++ ++int ++nouveau_ioctl_card_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return nouveau_card_init(dev); ++} ++ ++int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_getparam *getparam = data; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ switch (getparam->param) { ++ case NOUVEAU_GETPARAM_CHIPSET_ID: ++ getparam->value = dev_priv->chipset; ++ break; ++ case NOUVEAU_GETPARAM_PCI_VENDOR: ++ getparam->value=dev->pci_vendor; ++ break; ++ case NOUVEAU_GETPARAM_PCI_DEVICE: ++ getparam->value=dev->pci_device; ++ break; ++ case NOUVEAU_GETPARAM_BUS_TYPE: ++ if (drm_device_is_agp(dev)) ++ getparam->value=NV_AGP; ++ else if (drm_device_is_pcie(dev)) ++ getparam->value=NV_PCIE; ++ else ++ getparam->value=NV_PCI; ++ break; ++ case NOUVEAU_GETPARAM_FB_PHYSICAL: ++ getparam->value=dev_priv->fb_phys; ++ break; ++ case NOUVEAU_GETPARAM_AGP_PHYSICAL: ++ getparam->value=dev_priv->gart_info.aper_base; ++ break; ++ case NOUVEAU_GETPARAM_PCI_PHYSICAL: ++ if ( dev -> sg ) ++ getparam->value=(unsigned long)dev->sg->virtual; ++ else ++ { ++ DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n"); ++ return -EINVAL; ++ } ++ break; ++ case NOUVEAU_GETPARAM_FB_SIZE: ++ getparam->value=dev_priv->fb_available_size; ++ break; ++ case NOUVEAU_GETPARAM_AGP_SIZE: ++ getparam->value=dev_priv->gart_info.aper_size; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %lld\n", getparam->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_setparam *setparam = data; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ switch (setparam->param) { ++ case NOUVEAU_SETPARAM_CMDBUF_LOCATION: ++ switch (setparam->value) { ++ case NOUVEAU_MEM_AGP: ++ case NOUVEAU_MEM_FB: ++ case NOUVEAU_MEM_PCI: ++ case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE: ++ break; ++ default: ++ DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n", ++ setparam->value); ++ return -EINVAL; ++ } ++ dev_priv->config.cmdbuf.location = setparam->value; ++ break; ++ case NOUVEAU_SETPARAM_CMDBUF_SIZE: ++ dev_priv->config.cmdbuf.size = setparam->value; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %lld\n", setparam->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* waits for idle */ ++void nouveau_wait_for_idle(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ switch(dev_priv->card_type) { ++ case NV_50: ++ break; ++ default: { ++ /* This stuff is more or less a copy of what is seen ++ * in nv28 kmmio dump. ++ */ ++ uint64_t started = dev_priv->Engine.timer.read(dev); ++ uint64_t stopped = started; ++ uint32_t status; ++ do { ++ uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE); ++ (void)pmc_e; ++ status = NV_READ(NV04_PGRAPH_STATUS); ++ if (!status) ++ break; ++ stopped = dev_priv->Engine.timer.read(dev); ++ /* It'll never wrap anyway... */ ++ } while (stopped - started < 1000000000ULL); ++ if (status) ++ DRM_ERROR("timed out with status 0x%08x\n", ++ status); ++ } ++ } ++} ++ ++static int nouveau_suspend(struct drm_device *dev) ++{ ++ struct mem_block *p; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_suspend_resume *susres = &dev_priv->susres; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER); ++ susres->ramin_size = 0; ++ list_for_each(p, dev_priv->ramin_heap) ++ if (p->file_priv && (p->start + p->size) > susres->ramin_size) ++ susres->ramin_size = p->start + p->size; ++ if (!(susres->ramin_copy = drm_alloc(susres->ramin_size, DRM_MEM_DRIVER))) { ++ DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < engine->fifo.channels; i++) { ++ uint64_t t_start = engine->timer.read(dev); ++ ++ if (dev_priv->fifos[i] == NULL) ++ continue; ++ ++ /* Give the channel a chance to idle, wait 2s (hopefully) */ ++ while (!nouveau_channel_idle(dev_priv->fifos[i])) ++ if (engine->timer.read(dev) - t_start > 2000000000ULL) { ++ DRM_ERROR("Failed to idle channel %d before" ++ "suspend.", dev_priv->fifos[i]->id); ++ return -EBUSY; ++ } ++ } ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV04_PGRAPH_FIFO, 0); ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ susres->fifo_mode = NV_READ(NV04_PFIFO_MODE); ++ ++ if (dev_priv->card_type >= NV_10) { ++ susres->graph_state = NV_READ(NV10_PGRAPH_STATE); ++ susres->graph_ctx_control = NV_READ(NV10_PGRAPH_CTX_CONTROL); ++ } else { ++ susres->graph_state = NV_READ(NV04_PGRAPH_STATE); ++ susres->graph_ctx_control = NV_READ(NV04_PGRAPH_CTX_CONTROL); ++ } ++ ++ engine->fifo.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]); ++ engine->graph.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]); ++ nouveau_wait_for_idle(dev); ++ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ susres->ramin_copy[i] = NV_RI32(i << 2); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO, 1); ++ ++ return 0; ++} ++ ++static int nouveau_resume(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_suspend_resume *susres = &dev_priv->susres; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ if (!susres->ramin_copy) ++ return -EINVAL; ++ ++ DRM_DEBUG("Doing resume\n"); ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { ++ struct drm_agp_info info; ++ struct drm_agp_mode mode; ++ ++ /* agp bridge drivers don't re-enable agp on resume. lame. */ ++ if ((i = drm_agp_info(dev, &info))) { ++ DRM_ERROR("Unable to get AGP info: %d\n", i); ++ return i; ++ } ++ mode.mode = info.mode; ++ if ((i = drm_agp_enable(dev, mode))) { ++ DRM_ERROR("Unable to enable AGP: %d\n", i); ++ return i; ++ } ++ } ++ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ NV_WI32(i << 2, susres->ramin_copy[i]); ++ ++ engine->mc.init(dev); ++ engine->timer.init(dev); ++ engine->fb.init(dev); ++ engine->graph.init(dev); ++ engine->fifo.init(dev); ++ ++ NV_WRITE(NV04_PGRAPH_FIFO, 0); ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ /* PMC power cycling PFIFO in init clobbers some of the stuff stored in ++ * PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful ++ */ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ NV_WI32(i << 2, susres->ramin_copy[i]); ++ ++ engine->fifo.load_context(dev_priv->fifos[0]); ++ NV_WRITE(NV04_PFIFO_MODE, susres->fifo_mode); ++ ++ engine->graph.load_context(dev_priv->fifos[0]); ++ nouveau_wait_for_idle(dev); ++ ++ if (dev_priv->card_type >= NV_10) { ++ NV_WRITE(NV10_PGRAPH_STATE, susres->graph_state); ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, susres->graph_ctx_control); ++ } else { ++ NV_WRITE(NV04_PGRAPH_STATE, susres->graph_state); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, susres->graph_ctx_control); ++ } ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO, 0x1); ++ ++ if (dev->irq_enabled) ++ nouveau_irq_postinstall(dev); ++ ++ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER); ++ susres->ramin_copy = NULL; ++ susres->ramin_size = 0; ++ ++ return 0; ++} ++ ++int nouveau_ioctl_suspend(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ return nouveau_suspend(dev); ++} ++ ++int nouveau_ioctl_resume(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ return nouveau_resume(dev); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_swmthd.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_swmthd.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_swmthd.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_swmthd.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,191 @@ ++/* ++ * Copyright (C) 2007 Arthur Huillet. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Arthur Huillet ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_reg.h" ++ ++/*TODO: add a "card_type" attribute*/ ++typedef struct{ ++ uint32_t oclass; /* object class for this software method */ ++ uint32_t mthd; /* method number */ ++ void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */ ++ } nouveau_software_method_t; ++ ++ ++ /* This function handles the NV04 setcontext software methods. ++One function for all because they are very similar.*/ ++static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF; ++ uint32_t value_to_set = 0, bit_to_set = 0; ++ ++ switch ( oclass ) { ++ case 0x4a: ++ switch ( mthd ) { ++ case 0x188 : ++ case 0x18c : ++ bit_to_set = 0; ++ break; ++ case 0x198 : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ default : ; ++ }; ++ break; ++ case 0x5c: ++ switch ( mthd ) { ++ case 0x184: ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x188: ++ case 0x18c: ++ bit_to_set = 0; ++ break; ++ case 0x198: ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x5f: ++ switch ( mthd ) { ++ case 0x184 : ++ bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/ ++ break; ++ case 0x188 : ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x18c : ++ case 0x190 : ++ bit_to_set = 0; ++ break; ++ case 0x19c : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x61: ++ switch ( mthd ) { ++ case 0x188 : ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x18c : ++ case 0x190 : ++ bit_to_set = 0; ++ break; ++ case 0x19c : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x77: ++ switch ( mthd ) { ++ case 0x198 : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x304 : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG ++ break; ++ }; ++ break; ++ default :; ++ }; ++ ++ value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set; ++ ++ /*RAMIN*/ ++ nouveau_wait_for_idle(dev); ++ NV_WRITE(0x00700000 | inst_loc << 4, value_to_set); ++ ++ /*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/ ++ NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set); ++ ++ /*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/ ++ NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set); ++} ++ ++ nouveau_software_method_t nouveau_sw_methods[] = { ++ /*NV04 context software methods*/ ++ { 0x4a, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x184, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x184, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x190, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x190, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x19c, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x77, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x77, 0x304, nouveau_NV04_setcontext_sw_method }, ++ /*terminator*/ ++ { 0x0, 0x0, NULL, }, ++ }; ++ ++ int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) { ++ int i = 0; ++ while ( nouveau_sw_methods[ i ] . method_code != NULL ) ++ { ++ if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method ) ++ { ++ nouveau_sw_methods[ i ] . method_code(dev, oclass, method); ++ return 0; ++ } ++ i ++; ++ } ++ ++ return 1; ++ } +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_swmthd.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_swmthd.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nouveau_swmthd.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nouveau_swmthd.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,33 @@ ++/* ++ * Copyright (C) 2007 Arthur Huillet. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Arthur Huillet ++ */ ++ ++int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,94 @@ ++/* nv_drv.c -- nv driver -*- linux-c -*- ++ * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * Copyright 2005 Lars Knoll ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Daryll Strauss ++ * Gareth Hughes ++ * Lars Knoll ++ */ ++ ++#include "drmP.h" ++#include "nv_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ nv_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_MTRR | DRIVER_USE_AGP, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init nv_init(void) ++{ ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit nv_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(nv_init); ++module_exit(nv_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,52 @@ ++/* nv_drv.h -- NV DRM template customization -*- linux-c -*- ++ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com ++ * ++ * Copyright 2005 Lars Knoll ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Lars Knoll ++ */ ++ ++#ifndef __NV_H__ ++#define __NV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Lars Knoll" ++ ++#define DRIVER_NAME "nv" ++#define DRIVER_DESC "NV" ++#define DRIVER_DATE "20051006" ++ ++#define DRIVER_MAJOR 0 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 1 ++ ++#define NV04 04 ++#define NV10 10 ++#define NV20 20 ++#define NV30 30 ++#define NV40 40 ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_fb.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_fb.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_fb.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_fb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,23 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows ++ * nvidia reading PFB_CFG_0, then writing back its original value. ++ * (which was 0x701114 in this case) ++ */ ++ NV_WRITE(NV04_PFB_CFG0, 0x1114); ++ ++ return 0; ++} ++ ++void ++nv04_fb_takedown(struct drm_device *dev) ++{ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_fifo.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_fifo.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_fifo.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_fifo.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,138 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \ ++ NV04_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV04_RAMFC_##offset/4) ++#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) ++#define NV04_RAMFC__SIZE 32 ++ ++int ++nv04_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv04_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, ++ NV04_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Setup initial state */ ++ RAMFC_WR(DMA_PUT, chan->pushbuf_base); ++ RAMFC_WR(DMA_GET, chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0)); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<id)); ++ return 0; ++} ++ ++void ++nv04_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv04_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); ++ ++ tmp = RAMFC_RD(DMA_INSTANCE); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH)); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE)); ++ ++ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv04_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT, NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET, NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; ++ tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE); ++ RAMFC_WR(DMA_INSTANCE, tmp); ++ ++ RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); ++ RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_graph.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_graph.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_graph.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,516 @@ ++/* ++ * Copyright 2007 Stephane Marchesin ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++ ++static uint32_t nv04_graph_ctx_regs [] = { ++ NV04_PGRAPH_CTX_SWITCH1, ++ NV04_PGRAPH_CTX_SWITCH2, ++ NV04_PGRAPH_CTX_SWITCH3, ++ NV04_PGRAPH_CTX_SWITCH4, ++ NV04_PGRAPH_CTX_CACHE1, ++ NV04_PGRAPH_CTX_CACHE2, ++ NV04_PGRAPH_CTX_CACHE3, ++ NV04_PGRAPH_CTX_CACHE4, ++ 0x00400184, ++ 0x004001a4, ++ 0x004001c4, ++ 0x004001e4, ++ 0x00400188, ++ 0x004001a8, ++ 0x004001c8, ++ 0x004001e8, ++ 0x0040018c, ++ 0x004001ac, ++ 0x004001cc, ++ 0x004001ec, ++ 0x00400190, ++ 0x004001b0, ++ 0x004001d0, ++ 0x004001f0, ++ 0x00400194, ++ 0x004001b4, ++ 0x004001d4, ++ 0x004001f4, ++ 0x00400198, ++ 0x004001b8, ++ 0x004001d8, ++ 0x004001f8, ++ 0x0040019c, ++ 0x004001bc, ++ 0x004001dc, ++ 0x004001fc, ++ 0x00400174, ++ NV04_PGRAPH_DMA_START_0, ++ NV04_PGRAPH_DMA_START_1, ++ NV04_PGRAPH_DMA_LENGTH, ++ NV04_PGRAPH_DMA_MISC, ++ NV04_PGRAPH_DMA_PITCH, ++ NV04_PGRAPH_BOFFSET0, ++ NV04_PGRAPH_BBASE0, ++ NV04_PGRAPH_BLIMIT0, ++ NV04_PGRAPH_BOFFSET1, ++ NV04_PGRAPH_BBASE1, ++ NV04_PGRAPH_BLIMIT1, ++ NV04_PGRAPH_BOFFSET2, ++ NV04_PGRAPH_BBASE2, ++ NV04_PGRAPH_BLIMIT2, ++ NV04_PGRAPH_BOFFSET3, ++ NV04_PGRAPH_BBASE3, ++ NV04_PGRAPH_BLIMIT3, ++ NV04_PGRAPH_BOFFSET4, ++ NV04_PGRAPH_BBASE4, ++ NV04_PGRAPH_BLIMIT4, ++ NV04_PGRAPH_BOFFSET5, ++ NV04_PGRAPH_BBASE5, ++ NV04_PGRAPH_BLIMIT5, ++ NV04_PGRAPH_BPITCH0, ++ NV04_PGRAPH_BPITCH1, ++ NV04_PGRAPH_BPITCH2, ++ NV04_PGRAPH_BPITCH3, ++ NV04_PGRAPH_BPITCH4, ++ NV04_PGRAPH_SURFACE, ++ NV04_PGRAPH_STATE, ++ NV04_PGRAPH_BSWIZZLE2, ++ NV04_PGRAPH_BSWIZZLE5, ++ NV04_PGRAPH_BPIXEL, ++ NV04_PGRAPH_NOTIFY, ++ NV04_PGRAPH_PATT_COLOR0, ++ NV04_PGRAPH_PATT_COLOR1, ++ NV04_PGRAPH_PATT_COLORRAM+0x00, ++ NV04_PGRAPH_PATT_COLORRAM+0x01, ++ NV04_PGRAPH_PATT_COLORRAM+0x02, ++ NV04_PGRAPH_PATT_COLORRAM+0x03, ++ NV04_PGRAPH_PATT_COLORRAM+0x04, ++ NV04_PGRAPH_PATT_COLORRAM+0x05, ++ NV04_PGRAPH_PATT_COLORRAM+0x06, ++ NV04_PGRAPH_PATT_COLORRAM+0x07, ++ NV04_PGRAPH_PATT_COLORRAM+0x08, ++ NV04_PGRAPH_PATT_COLORRAM+0x09, ++ NV04_PGRAPH_PATT_COLORRAM+0x0A, ++ NV04_PGRAPH_PATT_COLORRAM+0x0B, ++ NV04_PGRAPH_PATT_COLORRAM+0x0C, ++ NV04_PGRAPH_PATT_COLORRAM+0x0D, ++ NV04_PGRAPH_PATT_COLORRAM+0x0E, ++ NV04_PGRAPH_PATT_COLORRAM+0x0F, ++ NV04_PGRAPH_PATT_COLORRAM+0x10, ++ NV04_PGRAPH_PATT_COLORRAM+0x11, ++ NV04_PGRAPH_PATT_COLORRAM+0x12, ++ NV04_PGRAPH_PATT_COLORRAM+0x13, ++ NV04_PGRAPH_PATT_COLORRAM+0x14, ++ NV04_PGRAPH_PATT_COLORRAM+0x15, ++ NV04_PGRAPH_PATT_COLORRAM+0x16, ++ NV04_PGRAPH_PATT_COLORRAM+0x17, ++ NV04_PGRAPH_PATT_COLORRAM+0x18, ++ NV04_PGRAPH_PATT_COLORRAM+0x19, ++ NV04_PGRAPH_PATT_COLORRAM+0x1A, ++ NV04_PGRAPH_PATT_COLORRAM+0x1B, ++ NV04_PGRAPH_PATT_COLORRAM+0x1C, ++ NV04_PGRAPH_PATT_COLORRAM+0x1D, ++ NV04_PGRAPH_PATT_COLORRAM+0x1E, ++ NV04_PGRAPH_PATT_COLORRAM+0x1F, ++ NV04_PGRAPH_PATT_COLORRAM+0x20, ++ NV04_PGRAPH_PATT_COLORRAM+0x21, ++ NV04_PGRAPH_PATT_COLORRAM+0x22, ++ NV04_PGRAPH_PATT_COLORRAM+0x23, ++ NV04_PGRAPH_PATT_COLORRAM+0x24, ++ NV04_PGRAPH_PATT_COLORRAM+0x25, ++ NV04_PGRAPH_PATT_COLORRAM+0x26, ++ NV04_PGRAPH_PATT_COLORRAM+0x27, ++ NV04_PGRAPH_PATT_COLORRAM+0x28, ++ NV04_PGRAPH_PATT_COLORRAM+0x29, ++ NV04_PGRAPH_PATT_COLORRAM+0x2A, ++ NV04_PGRAPH_PATT_COLORRAM+0x2B, ++ NV04_PGRAPH_PATT_COLORRAM+0x2C, ++ NV04_PGRAPH_PATT_COLORRAM+0x2D, ++ NV04_PGRAPH_PATT_COLORRAM+0x2E, ++ NV04_PGRAPH_PATT_COLORRAM+0x2F, ++ NV04_PGRAPH_PATT_COLORRAM+0x30, ++ NV04_PGRAPH_PATT_COLORRAM+0x31, ++ NV04_PGRAPH_PATT_COLORRAM+0x32, ++ NV04_PGRAPH_PATT_COLORRAM+0x33, ++ NV04_PGRAPH_PATT_COLORRAM+0x34, ++ NV04_PGRAPH_PATT_COLORRAM+0x35, ++ NV04_PGRAPH_PATT_COLORRAM+0x36, ++ NV04_PGRAPH_PATT_COLORRAM+0x37, ++ NV04_PGRAPH_PATT_COLORRAM+0x38, ++ NV04_PGRAPH_PATT_COLORRAM+0x39, ++ NV04_PGRAPH_PATT_COLORRAM+0x3A, ++ NV04_PGRAPH_PATT_COLORRAM+0x3B, ++ NV04_PGRAPH_PATT_COLORRAM+0x3C, ++ NV04_PGRAPH_PATT_COLORRAM+0x3D, ++ NV04_PGRAPH_PATT_COLORRAM+0x3E, ++ NV04_PGRAPH_PATT_COLORRAM+0x3F, ++ NV04_PGRAPH_PATTERN, ++ 0x0040080c, ++ NV04_PGRAPH_PATTERN_SHAPE, ++ 0x00400600, ++ NV04_PGRAPH_ROP3, ++ NV04_PGRAPH_CHROMA, ++ NV04_PGRAPH_BETA_AND, ++ NV04_PGRAPH_BETA_PREMULT, ++ NV04_PGRAPH_CONTROL0, ++ NV04_PGRAPH_CONTROL1, ++ NV04_PGRAPH_CONTROL2, ++ NV04_PGRAPH_BLEND, ++ NV04_PGRAPH_STORED_FMT, ++ NV04_PGRAPH_SOURCE_COLOR, ++ 0x00400560, ++ 0x00400568, ++ 0x00400564, ++ 0x0040056c, ++ 0x00400400, ++ 0x00400480, ++ 0x00400404, ++ 0x00400484, ++ 0x00400408, ++ 0x00400488, ++ 0x0040040c, ++ 0x0040048c, ++ 0x00400410, ++ 0x00400490, ++ 0x00400414, ++ 0x00400494, ++ 0x00400418, ++ 0x00400498, ++ 0x0040041c, ++ 0x0040049c, ++ 0x00400420, ++ 0x004004a0, ++ 0x00400424, ++ 0x004004a4, ++ 0x00400428, ++ 0x004004a8, ++ 0x0040042c, ++ 0x004004ac, ++ 0x00400430, ++ 0x004004b0, ++ 0x00400434, ++ 0x004004b4, ++ 0x00400438, ++ 0x004004b8, ++ 0x0040043c, ++ 0x004004bc, ++ 0x00400440, ++ 0x004004c0, ++ 0x00400444, ++ 0x004004c4, ++ 0x00400448, ++ 0x004004c8, ++ 0x0040044c, ++ 0x004004cc, ++ 0x00400450, ++ 0x004004d0, ++ 0x00400454, ++ 0x004004d4, ++ 0x00400458, ++ 0x004004d8, ++ 0x0040045c, ++ 0x004004dc, ++ 0x00400460, ++ 0x004004e0, ++ 0x00400464, ++ 0x004004e4, ++ 0x00400468, ++ 0x004004e8, ++ 0x0040046c, ++ 0x004004ec, ++ 0x00400470, ++ 0x004004f0, ++ 0x00400474, ++ 0x004004f4, ++ 0x00400478, ++ 0x004004f8, ++ 0x0040047c, ++ 0x004004fc, ++ 0x0040053c, ++ 0x00400544, ++ 0x00400540, ++ 0x00400548, ++ 0x00400560, ++ 0x00400568, ++ 0x00400564, ++ 0x0040056c, ++ 0x00400534, ++ 0x00400538, ++ 0x00400514, ++ 0x00400518, ++ 0x0040051c, ++ 0x00400520, ++ 0x00400524, ++ 0x00400528, ++ 0x0040052c, ++ 0x00400530, ++ 0x00400d00, ++ 0x00400d40, ++ 0x00400d80, ++ 0x00400d04, ++ 0x00400d44, ++ 0x00400d84, ++ 0x00400d08, ++ 0x00400d48, ++ 0x00400d88, ++ 0x00400d0c, ++ 0x00400d4c, ++ 0x00400d8c, ++ 0x00400d10, ++ 0x00400d50, ++ 0x00400d90, ++ 0x00400d14, ++ 0x00400d54, ++ 0x00400d94, ++ 0x00400d18, ++ 0x00400d58, ++ 0x00400d98, ++ 0x00400d1c, ++ 0x00400d5c, ++ 0x00400d9c, ++ 0x00400d20, ++ 0x00400d60, ++ 0x00400da0, ++ 0x00400d24, ++ 0x00400d64, ++ 0x00400da4, ++ 0x00400d28, ++ 0x00400d68, ++ 0x00400da8, ++ 0x00400d2c, ++ 0x00400d6c, ++ 0x00400dac, ++ 0x00400d30, ++ 0x00400d70, ++ 0x00400db0, ++ 0x00400d34, ++ 0x00400d74, ++ 0x00400db4, ++ 0x00400d38, ++ 0x00400d78, ++ 0x00400db8, ++ 0x00400d3c, ++ 0x00400d7c, ++ 0x00400dbc, ++ 0x00400590, ++ 0x00400594, ++ 0x00400598, ++ 0x0040059c, ++ 0x004005a8, ++ 0x004005ac, ++ 0x004005b0, ++ 0x004005b4, ++ 0x004005c0, ++ 0x004005c4, ++ 0x004005c8, ++ 0x004005cc, ++ 0x004005d0, ++ 0x004005d4, ++ 0x004005d8, ++ 0x004005dc, ++ 0x004005e0, ++ NV04_PGRAPH_PASSTHRU_0, ++ NV04_PGRAPH_PASSTHRU_1, ++ NV04_PGRAPH_PASSTHRU_2, ++ NV04_PGRAPH_DVD_COLORFMT, ++ NV04_PGRAPH_SCALED_FORMAT, ++ NV04_PGRAPH_MISC24_0, ++ NV04_PGRAPH_MISC24_1, ++ NV04_PGRAPH_MISC24_2, ++ 0x00400500, ++ 0x00400504, ++ NV04_PGRAPH_VALID1, ++ NV04_PGRAPH_VALID2 ++ ++ ++}; ++ ++struct graph_state { ++ int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])]; ++}; ++ ++void nouveau_nv04_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_channel *next, *last; ++ int chid; ++ ++ if (!dev) { ++ DRM_DEBUG("Invalid drm_device\n"); ++ return; ++ } ++ dev_priv = dev->dev_private; ++ if (!dev_priv) { ++ DRM_DEBUG("Invalid drm_nouveau_private\n"); ++ return; ++ } ++ if (!dev_priv->fifos) { ++ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); ++ return; ++ } ++ ++ chid = engine->fifo.channel_id(dev); ++ next = dev_priv->fifos[chid]; ++ ++ if (!next) { ++ DRM_DEBUG("Invalid next channel\n"); ++ return; ++ } ++ ++ chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1); ++ last = dev_priv->fifos[chid]; ++ ++ if (!last) { ++ DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n", ++ next->id); ++ } else { ++ DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", ++ last->id, next->id); ++ } ++ ++/* NV_WRITE(NV03_PFIFO_CACHES, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ ++ if (last) ++ nv04_graph_save_context(last); ++ ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000); ++ NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24)); ++ ++ nouveau_wait_for_idle(dev); ++ ++ nv04_graph_load_context(next); ++ ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24); ++ NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF); ++ ++/* NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x1); ++} ++ ++int nv04_graph_create_context(struct nouveau_channel *chan) { ++ struct graph_state* pgraph_ctx; ++ DRM_DEBUG("nv04_graph_context_create %d\n", chan->id); ++ ++ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), ++ DRM_MEM_DRIVER); ++ ++ if (pgraph_ctx == NULL) ++ return -ENOMEM; ++ ++ //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; ++ pgraph_ctx->nv04[0] = 0x0001ffff; ++ /* is it really needed ??? */ ++ //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); ++ //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0); ++ ++ return 0; ++} ++ ++void nv04_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ ++ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); ++ chan->pgraph_ctx = NULL; ++} ++ ++int nv04_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); ++ ++ return 0; ++} ++ ++int nv04_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]); ++ ++ return 0; ++} ++ ++int nv04_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ /* Enable PGRAPH interrupts */ ++ NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_VALID1, 0); ++ NV_WRITE(NV04_PGRAPH_VALID2, 0); ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000); ++ /*1231C000 blob, 001 haiku*/ ++ //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100); ++ /*0x72111100 blob , 01 haiku*/ ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071); ++ /*haiku same*/ ++ ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); ++ /*haiku and blob 10d4*/ ++ ++ NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ /* These don't belong here, they're part of a per-channel context */ ++ NV_WRITE(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void nv04_graph_takedown(struct drm_device *dev) ++{ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_instmem.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_instmem.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_instmem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_instmem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,159 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++static void ++nv04_instmem_determine_amount(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ /* Figure out how much instance memory we need */ ++ if (dev_priv->card_type >= NV_40) { ++ /* We'll want more instance memory than this on some NV4x cards. ++ * There's a 16MB aperture to play with that maps onto the end ++ * of vram. For now, only reserve a small piece until we know ++ * more about what each chipset requires. ++ */ ++ dev_priv->ramin_rsvd_vram = (1*1024* 1024); ++ } else { ++ /*XXX: what *are* the limits on ramin_rsvd_vram = (512*1024); ++ } ++ DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10); ++ ++ /* Clear all of it, except the BIOS image that's in the first 64KiB */ ++ for (i=(64*1024); iramin_rsvd_vram; i+=4) ++ NV_WI32(i, 0x00000000); ++} ++ ++static void ++nv04_instmem_configure_fixed_tables(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ /* FIFO hash table (RAMHT) ++ * use 4k hash table at RAMIN+0x10000 ++ * TODO: extend the hash table ++ */ ++ dev_priv->ramht_offset = 0x10000; ++ dev_priv->ramht_bits = 9; ++ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); ++ DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, ++ dev_priv->ramht_size); ++ ++ /* FIFO runout table (RAMRO) - 512k at 0x11200 */ ++ dev_priv->ramro_offset = 0x11200; ++ dev_priv->ramro_size = 512; ++ DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, ++ dev_priv->ramro_size); ++ ++ /* FIFO context table (RAMFC) ++ * NV40 : Not sure exactly how to position RAMFC on some cards, ++ * 0x30002 seems to position it at RAMIN+0x20000 on these ++ * cards. RAMFC is 4kb (32 fifos, 128byte entries). ++ * Others: Position RAMFC at RAMIN+0x11400 ++ */ ++ switch(dev_priv->card_type) ++ { ++ case NV_40: ++ case NV_44: ++ dev_priv->ramfc_offset = 0x20000; ++ dev_priv->ramfc_size = engine->fifo.channels * ++ nouveau_fifo_ctx_size(dev); ++ break; ++ case NV_30: ++ case NV_20: ++ case NV_17: ++ case NV_11: ++ case NV_10: ++ case NV_04: ++ default: ++ dev_priv->ramfc_offset = 0x11400; ++ dev_priv->ramfc_size = engine->fifo.channels * ++ nouveau_fifo_ctx_size(dev); ++ break; ++ } ++ DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, ++ dev_priv->ramfc_size); ++} ++ ++int nv04_instmem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t offset; ++ int ret = 0; ++ ++ nv04_instmem_determine_amount(dev); ++ nv04_instmem_configure_fixed_tables(dev); ++ ++ /* Create a heap to manage RAMIN allocations, we don't allocate ++ * the space that was reserved for RAMHT/FC/RO. ++ */ ++ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; ++ ++ /* On my NV4E, there's *something* clobbering the 16KiB just after ++ * where we setup these fixed tables. No idea what it is just yet, ++ * so reserve this space on all NV4X cards for now. ++ */ ++ if (dev_priv->card_type >= NV_40) ++ offset += 16*1024; ++ ++ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, ++ offset, dev_priv->ramin_rsvd_vram - offset); ++ if (ret) { ++ dev_priv->ramin_heap = NULL; ++ DRM_ERROR("Failed to init RAMIN heap\n"); ++ } ++ ++ return ret; ++} ++ ++void ++nv04_instmem_takedown(struct drm_device *dev) ++{ ++} ++ ++int ++nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) ++{ ++ if (gpuobj->im_backing) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++void ++nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (gpuobj && gpuobj->im_backing) { ++ if (gpuobj->im_bound) ++ dev_priv->Engine.instmem.unbind(dev, gpuobj); ++ gpuobj->im_backing = NULL; ++ } ++} ++ ++int ++nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ if (!gpuobj->im_pramin || gpuobj->im_bound) ++ return -EINVAL; ++ ++ gpuobj->im_bound = 1; ++ return 0; ++} ++ ++int ++nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ if (gpuobj->im_bound == 0) ++ return -EINVAL; ++ ++ gpuobj->im_bound = 0; ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_mc.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_mc.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_mc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_mc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,22 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Power up everything, resetting each individual unit will ++ * be done later if needed. ++ */ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void ++nv04_mc_takedown(struct drm_device *dev) ++{ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_timer.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_timer.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv04_timer.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv04_timer.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,53 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_timer_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000); ++ NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF); ++ ++ /* Just use the pre-existing values when possible for now; these regs ++ * are not written in nv (driver writer missed a /4 on the address), and ++ * writing 8 and 3 to the correct regs breaks the timings on the LVDS ++ * hardware sequencing microcode. ++ * A correct solution (involving calculations with the GPU PLL) can ++ * be done when kernel modesetting lands ++ */ ++ if (!NV_READ(NV04_PTIMER_NUMERATOR) || !NV_READ(NV04_PTIMER_DENOMINATOR)) { ++ NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008); ++ NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003); ++ } ++ ++ return 0; ++} ++ ++uint64_t ++nv04_timer_read(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t low; ++ /* From kmmio dumps on nv28 this looks like how the blob does this. ++ * It reads the high dword twice, before and after. ++ * The only explanation seems to be that the 64-bit timer counter ++ * advances between high and low dword reads and may corrupt the ++ * result. Not confirmed. ++ */ ++ uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1); ++ uint32_t high1; ++ do { ++ high1 = high2; ++ low = NV_READ(NV04_PTIMER_TIME_0); ++ high2 = NV_READ(NV04_PTIMER_TIME_1); ++ } while(high1 != high2); ++ return (((uint64_t)high2) << 32) | (uint64_t)low; ++} ++ ++void ++nv04_timer_takedown(struct drm_device *dev) ++{ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv10_fb.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv10_fb.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv10_fb.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv10_fb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,25 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv10_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_bar_size; ++ int i; ++ ++ fb_bar_size = drm_get_resource_len(dev, 0) - 1; ++ for (i=0; iramfc->gpuobj, \ ++ NV10_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV10_RAMFC_##offset/4) ++#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) ++#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) ++ ++int ++nv10_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV10_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv10_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, ++ NV10_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Fill entries that are seen filled in dumps of nvidia driver just ++ * after channel's is put into DMA mode ++ */ ++ RAMFC_WR(DMA_PUT , chan->pushbuf_base); ++ RAMFC_WR(DMA_GET , chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); ++ return 0; ++} ++ ++void ++nv10_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv10_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); ++ ++ tmp = RAMFC_RD(DMA_INSTANCE); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH)); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); ++ ++ if (dev_priv->chipset >= 0x17) { ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE, ++ RAMFC_RD(ACQUIRE_VALUE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, ++ RAMFC_RD(ACQUIRE_TIMESTAMP)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, ++ RAMFC_RD(ACQUIRE_TIMEOUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE, ++ RAMFC_RD(SEMAPHORE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE, ++ RAMFC_RD(DMA_SUBROUTINE)); ++ } ++ ++ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv10_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; ++ tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); ++ RAMFC_WR(DMA_INSTANCE , tmp); ++ ++ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); ++ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ ++ if (dev_priv->chipset >= 0x17) { ++ RAMFC_WR(ACQUIRE_VALUE, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); ++ RAMFC_WR(ACQUIRE_TIMESTAMP, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); ++ RAMFC_WR(ACQUIRE_TIMEOUT, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); ++ RAMFC_WR(SEMAPHORE, ++ NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); ++ RAMFC_WR(DMA_SUBROUTINE, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ } ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv10_graph.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv10_graph.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv10_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv10_graph.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,914 @@ ++/* ++ * Copyright 2007 Matthieu CASTET ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++ ++#define NV10_FIFO_NUMBER 32 ++ ++struct pipe_state { ++ uint32_t pipe_0x0000[0x040/4]; ++ uint32_t pipe_0x0040[0x010/4]; ++ uint32_t pipe_0x0200[0x0c0/4]; ++ uint32_t pipe_0x4400[0x080/4]; ++ uint32_t pipe_0x6400[0x3b0/4]; ++ uint32_t pipe_0x6800[0x2f0/4]; ++ uint32_t pipe_0x6c00[0x030/4]; ++ uint32_t pipe_0x7000[0x130/4]; ++ uint32_t pipe_0x7400[0x0c0/4]; ++ uint32_t pipe_0x7800[0x0c0/4]; ++}; ++ ++static int nv10_graph_ctx_regs [] = { ++NV10_PGRAPH_CTX_SWITCH1, ++NV10_PGRAPH_CTX_SWITCH2, ++NV10_PGRAPH_CTX_SWITCH3, ++NV10_PGRAPH_CTX_SWITCH4, ++NV10_PGRAPH_CTX_SWITCH5, ++NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */ ++NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */ ++NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */ ++NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */ ++NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */ ++0x00400164, ++0x00400184, ++0x004001a4, ++0x004001c4, ++0x004001e4, ++0x00400168, ++0x00400188, ++0x004001a8, ++0x004001c8, ++0x004001e8, ++0x0040016c, ++0x0040018c, ++0x004001ac, ++0x004001cc, ++0x004001ec, ++0x00400170, ++0x00400190, ++0x004001b0, ++0x004001d0, ++0x004001f0, ++0x00400174, ++0x00400194, ++0x004001b4, ++0x004001d4, ++0x004001f4, ++0x00400178, ++0x00400198, ++0x004001b8, ++0x004001d8, ++0x004001f8, ++0x0040017c, ++0x0040019c, ++0x004001bc, ++0x004001dc, ++0x004001fc, ++NV10_PGRAPH_CTX_USER, ++NV04_PGRAPH_DMA_START_0, ++NV04_PGRAPH_DMA_START_1, ++NV04_PGRAPH_DMA_LENGTH, ++NV04_PGRAPH_DMA_MISC, ++NV10_PGRAPH_DMA_PITCH, ++NV04_PGRAPH_BOFFSET0, ++NV04_PGRAPH_BBASE0, ++NV04_PGRAPH_BLIMIT0, ++NV04_PGRAPH_BOFFSET1, ++NV04_PGRAPH_BBASE1, ++NV04_PGRAPH_BLIMIT1, ++NV04_PGRAPH_BOFFSET2, ++NV04_PGRAPH_BBASE2, ++NV04_PGRAPH_BLIMIT2, ++NV04_PGRAPH_BOFFSET3, ++NV04_PGRAPH_BBASE3, ++NV04_PGRAPH_BLIMIT3, ++NV04_PGRAPH_BOFFSET4, ++NV04_PGRAPH_BBASE4, ++NV04_PGRAPH_BLIMIT4, ++NV04_PGRAPH_BOFFSET5, ++NV04_PGRAPH_BBASE5, ++NV04_PGRAPH_BLIMIT5, ++NV04_PGRAPH_BPITCH0, ++NV04_PGRAPH_BPITCH1, ++NV04_PGRAPH_BPITCH2, ++NV04_PGRAPH_BPITCH3, ++NV04_PGRAPH_BPITCH4, ++NV10_PGRAPH_SURFACE, ++NV10_PGRAPH_STATE, ++NV04_PGRAPH_BSWIZZLE2, ++NV04_PGRAPH_BSWIZZLE5, ++NV04_PGRAPH_BPIXEL, ++NV10_PGRAPH_NOTIFY, ++NV04_PGRAPH_PATT_COLOR0, ++NV04_PGRAPH_PATT_COLOR1, ++NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */ ++0x00400904, ++0x00400908, ++0x0040090c, ++0x00400910, ++0x00400914, ++0x00400918, ++0x0040091c, ++0x00400920, ++0x00400924, ++0x00400928, ++0x0040092c, ++0x00400930, ++0x00400934, ++0x00400938, ++0x0040093c, ++0x00400940, ++0x00400944, ++0x00400948, ++0x0040094c, ++0x00400950, ++0x00400954, ++0x00400958, ++0x0040095c, ++0x00400960, ++0x00400964, ++0x00400968, ++0x0040096c, ++0x00400970, ++0x00400974, ++0x00400978, ++0x0040097c, ++0x00400980, ++0x00400984, ++0x00400988, ++0x0040098c, ++0x00400990, ++0x00400994, ++0x00400998, ++0x0040099c, ++0x004009a0, ++0x004009a4, ++0x004009a8, ++0x004009ac, ++0x004009b0, ++0x004009b4, ++0x004009b8, ++0x004009bc, ++0x004009c0, ++0x004009c4, ++0x004009c8, ++0x004009cc, ++0x004009d0, ++0x004009d4, ++0x004009d8, ++0x004009dc, ++0x004009e0, ++0x004009e4, ++0x004009e8, ++0x004009ec, ++0x004009f0, ++0x004009f4, ++0x004009f8, ++0x004009fc, ++NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */ ++0x0040080c, ++NV04_PGRAPH_PATTERN_SHAPE, ++NV03_PGRAPH_MONO_COLOR0, ++NV04_PGRAPH_ROP3, ++NV04_PGRAPH_CHROMA, ++NV04_PGRAPH_BETA_AND, ++NV04_PGRAPH_BETA_PREMULT, ++0x00400e70, ++0x00400e74, ++0x00400e78, ++0x00400e7c, ++0x00400e80, ++0x00400e84, ++0x00400e88, ++0x00400e8c, ++0x00400ea0, ++0x00400ea4, ++0x00400ea8, ++0x00400e90, ++0x00400e94, ++0x00400e98, ++0x00400e9c, ++NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00 to 0x400f1c */ ++NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20 to 0x400f3c */ ++0x00400f04, ++0x00400f24, ++0x00400f08, ++0x00400f28, ++0x00400f0c, ++0x00400f2c, ++0x00400f10, ++0x00400f30, ++0x00400f14, ++0x00400f34, ++0x00400f18, ++0x00400f38, ++0x00400f1c, ++0x00400f3c, ++NV10_PGRAPH_XFMODE0, ++NV10_PGRAPH_XFMODE1, ++NV10_PGRAPH_GLOBALSTATE0, ++NV10_PGRAPH_GLOBALSTATE1, ++NV04_PGRAPH_STORED_FMT, ++NV04_PGRAPH_SOURCE_COLOR, ++NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */ ++NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */ ++0x00400404, ++0x00400484, ++0x00400408, ++0x00400488, ++0x0040040c, ++0x0040048c, ++0x00400410, ++0x00400490, ++0x00400414, ++0x00400494, ++0x00400418, ++0x00400498, ++0x0040041c, ++0x0040049c, ++0x00400420, ++0x004004a0, ++0x00400424, ++0x004004a4, ++0x00400428, ++0x004004a8, ++0x0040042c, ++0x004004ac, ++0x00400430, ++0x004004b0, ++0x00400434, ++0x004004b4, ++0x00400438, ++0x004004b8, ++0x0040043c, ++0x004004bc, ++0x00400440, ++0x004004c0, ++0x00400444, ++0x004004c4, ++0x00400448, ++0x004004c8, ++0x0040044c, ++0x004004cc, ++0x00400450, ++0x004004d0, ++0x00400454, ++0x004004d4, ++0x00400458, ++0x004004d8, ++0x0040045c, ++0x004004dc, ++0x00400460, ++0x004004e0, ++0x00400464, ++0x004004e4, ++0x00400468, ++0x004004e8, ++0x0040046c, ++0x004004ec, ++0x00400470, ++0x004004f0, ++0x00400474, ++0x004004f4, ++0x00400478, ++0x004004f8, ++0x0040047c, ++0x004004fc, ++NV03_PGRAPH_ABS_UCLIP_XMIN, ++NV03_PGRAPH_ABS_UCLIP_XMAX, ++NV03_PGRAPH_ABS_UCLIP_YMIN, ++NV03_PGRAPH_ABS_UCLIP_YMAX, ++0x00400550, ++0x00400558, ++0x00400554, ++0x0040055c, ++NV03_PGRAPH_ABS_UCLIPA_XMIN, ++NV03_PGRAPH_ABS_UCLIPA_XMAX, ++NV03_PGRAPH_ABS_UCLIPA_YMIN, ++NV03_PGRAPH_ABS_UCLIPA_YMAX, ++NV03_PGRAPH_ABS_ICLIP_XMAX, ++NV03_PGRAPH_ABS_ICLIP_YMAX, ++NV03_PGRAPH_XY_LOGIC_MISC0, ++NV03_PGRAPH_XY_LOGIC_MISC1, ++NV03_PGRAPH_XY_LOGIC_MISC2, ++NV03_PGRAPH_XY_LOGIC_MISC3, ++NV03_PGRAPH_CLIPX_0, ++NV03_PGRAPH_CLIPX_1, ++NV03_PGRAPH_CLIPY_0, ++NV03_PGRAPH_CLIPY_1, ++NV10_PGRAPH_COMBINER0_IN_ALPHA, ++NV10_PGRAPH_COMBINER1_IN_ALPHA, ++NV10_PGRAPH_COMBINER0_IN_RGB, ++NV10_PGRAPH_COMBINER1_IN_RGB, ++NV10_PGRAPH_COMBINER_COLOR0, ++NV10_PGRAPH_COMBINER_COLOR1, ++NV10_PGRAPH_COMBINER0_OUT_ALPHA, ++NV10_PGRAPH_COMBINER1_OUT_ALPHA, ++NV10_PGRAPH_COMBINER0_OUT_RGB, ++NV10_PGRAPH_COMBINER1_OUT_RGB, ++NV10_PGRAPH_COMBINER_FINAL0, ++NV10_PGRAPH_COMBINER_FINAL1, ++0x00400e00, ++0x00400e04, ++0x00400e08, ++0x00400e0c, ++0x00400e10, ++0x00400e14, ++0x00400e18, ++0x00400e1c, ++0x00400e20, ++0x00400e24, ++0x00400e28, ++0x00400e2c, ++0x00400e30, ++0x00400e34, ++0x00400e38, ++0x00400e3c, ++NV04_PGRAPH_PASSTHRU_0, ++NV04_PGRAPH_PASSTHRU_1, ++NV04_PGRAPH_PASSTHRU_2, ++NV10_PGRAPH_DIMX_TEXTURE, ++NV10_PGRAPH_WDIMX_TEXTURE, ++NV10_PGRAPH_DVD_COLORFMT, ++NV10_PGRAPH_SCALED_FORMAT, ++NV04_PGRAPH_MISC24_0, ++NV04_PGRAPH_MISC24_1, ++NV04_PGRAPH_MISC24_2, ++NV03_PGRAPH_X_MISC, ++NV03_PGRAPH_Y_MISC, ++NV04_PGRAPH_VALID1, ++NV04_PGRAPH_VALID2, ++}; ++ ++static int nv17_graph_ctx_regs [] = { ++NV10_PGRAPH_DEBUG_4, ++0x004006b0, ++0x00400eac, ++0x00400eb0, ++0x00400eb4, ++0x00400eb8, ++0x00400ebc, ++0x00400ec0, ++0x00400ec4, ++0x00400ec8, ++0x00400ecc, ++0x00400ed0, ++0x00400ed4, ++0x00400ed8, ++0x00400edc, ++0x00400ee0, ++0x00400a00, ++0x00400a04, ++}; ++ ++struct graph_state { ++ int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])]; ++ int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])]; ++ struct pipe_state pipe_state; ++}; ++ ++static void nv10_graph_save_pipe(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ int i; ++#define PIPE_SAVE(addr) \ ++ do { \ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ ++ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ ++ fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \ ++ } while (0) ++ ++ PIPE_SAVE(0x4400); ++ PIPE_SAVE(0x0200); ++ PIPE_SAVE(0x6400); ++ PIPE_SAVE(0x6800); ++ PIPE_SAVE(0x6c00); ++ PIPE_SAVE(0x7000); ++ PIPE_SAVE(0x7400); ++ PIPE_SAVE(0x7800); ++ PIPE_SAVE(0x0040); ++ PIPE_SAVE(0x0000); ++ ++#undef PIPE_SAVE ++} ++ ++static void nv10_graph_load_pipe(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ int i; ++ uint32_t xfmode0, xfmode1; ++#define PIPE_RESTORE(addr) \ ++ do { \ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ ++ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ ++ } while (0) ++ ++ ++ nouveau_wait_for_idle(dev); ++ /* XXX check haiku comments */ ++ xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0); ++ xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1); ++ NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000); ++ NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000); ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); ++ for (i = 0; i < 4; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); ++ for (i = 0; i < 4; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); ++ for (i = 0; i < 3; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); ++ for (i = 0; i < 3; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008); ++ ++ ++ PIPE_RESTORE(0x0200); ++ nouveau_wait_for_idle(dev); ++ ++ /* restore XFMODE */ ++ NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0); ++ NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1); ++ PIPE_RESTORE(0x6400); ++ PIPE_RESTORE(0x6800); ++ PIPE_RESTORE(0x6c00); ++ PIPE_RESTORE(0x7000); ++ PIPE_RESTORE(0x7400); ++ PIPE_RESTORE(0x7800); ++ PIPE_RESTORE(0x4400); ++ PIPE_RESTORE(0x0000); ++ PIPE_RESTORE(0x0040); ++ nouveau_wait_for_idle(dev); ++ ++#undef PIPE_RESTORE ++} ++ ++static void nv10_graph_create_pipe(struct nouveau_channel *chan) { ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ uint32_t *fifo_pipe_state_addr; ++ int i; ++#define PIPE_INIT(addr) \ ++ do { \ ++ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ ++ } while (0) ++#define PIPE_INIT_END(addr) \ ++ do { \ ++ if (fifo_pipe_state_addr != \ ++ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \ ++ DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \ ++ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \ ++ } while (0) ++#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value ++ ++ PIPE_INIT(0x0200); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0200); ++ ++ PIPE_INIT(0x6400); ++ for (i = 0; i < 211; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f000000); ++ NV_WRITE_PIPE_INIT(0x3f000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ PIPE_INIT_END(0x6400); ++ ++ PIPE_INIT(0x6800); ++ for (i = 0; i < 162; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ for (i = 0; i < 25; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x6800); ++ ++ PIPE_INIT(0x6c00); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0xbf800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x6c00); ++ ++ PIPE_INIT(0x7000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ for (i = 0; i < 35; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7000); ++ ++ PIPE_INIT(0x7400); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7400); ++ ++ PIPE_INIT(0x7800); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7800); ++ ++ PIPE_INIT(0x4400); ++ for (i = 0; i < 32; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x4400); ++ ++ PIPE_INIT(0x0000); ++ for (i = 0; i < 16; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0000); ++ ++ PIPE_INIT(0x0040); ++ for (i = 0; i < 4; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0040); ++ ++#undef PIPE_INIT ++#undef PIPE_INIT_END ++#undef NV_WRITE_PIPE_INIT ++} ++ ++static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) ++{ ++ int i; ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) { ++ if (nv10_graph_ctx_regs[i] == reg) ++ return i; ++ } ++ DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg); ++ return -1; ++} ++ ++static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) ++{ ++ int i; ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) { ++ if (nv17_graph_ctx_regs[i] == reg) ++ return i; ++ } ++ DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg); ++ return -1; ++} ++ ++int nv10_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); ++ if (dev_priv->chipset>=0x17) { ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]); ++ } ++ ++ nv10_graph_load_pipe(chan); ++ ++ return 0; ++} ++ ++int nv10_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]); ++ if (dev_priv->chipset>=0x17) { ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]); ++ } ++ ++ nv10_graph_save_pipe(chan); ++ ++ return 0; ++} ++ ++void nouveau_nv10_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv; ++ struct nouveau_engine *engine; ++ struct nouveau_channel *next, *last; ++ int chid; ++ ++ if (!dev) { ++ DRM_DEBUG("Invalid drm_device\n"); ++ return; ++ } ++ dev_priv = dev->dev_private; ++ if (!dev_priv) { ++ DRM_DEBUG("Invalid drm_nouveau_private\n"); ++ return; ++ } ++ if (!dev_priv->fifos) { ++ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); ++ return; ++ } ++ engine = &dev_priv->Engine; ++ ++ chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & ++ (engine->fifo.channels - 1); ++ next = dev_priv->fifos[chid]; ++ ++ if (!next) { ++ DRM_ERROR("Invalid next channel\n"); ++ return; ++ } ++ ++ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & ++ (engine->fifo.channels - 1); ++ last = dev_priv->fifos[chid]; ++ ++ if (!last) { ++ DRM_INFO("WARNING: Invalid last channel, switch to %x\n", ++ next->id); ++ } else { ++ DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n", ++ last->id, next->id); ++ } ++ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ if (last) { ++ nouveau_wait_for_idle(dev); ++ nv10_graph_save_context(last); ++ } ++ ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); ++ ++ nouveau_wait_for_idle(dev); ++ ++ nv10_graph_load_context(next); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO,0x1); ++} ++ ++#define NV_WRITE_CTX(reg, val) do { \ ++ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ ++ if (offset > 0) \ ++ pgraph_ctx->nv10[offset] = val; \ ++ } while (0) ++ ++#define NV17_WRITE_CTX(reg, val) do { \ ++ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ ++ if (offset > 0) \ ++ pgraph_ctx->nv17[offset] = val; \ ++ } while (0) ++ ++int nv10_graph_create_context(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx; ++ ++ DRM_DEBUG("nv10_graph_context_create %d\n", chan->id); ++ ++ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), ++ DRM_MEM_DRIVER); ++ ++ if (pgraph_ctx == NULL) ++ return -ENOMEM; ++ ++ /* mmio trace suggest that should be done in ddx with methods/objects */ ++#if 0 ++ uint32_t tmp, vramsz; ++ /* per channel init from ddx */ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ /*XXX the original ddx code, does this in 2 steps : ++ * tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ * NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ * tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ * NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ */ ++ tmp |= 0x00020100; ++ NV_WRITE_CTX(NV10_PGRAPH_SURFACE, tmp); ++ ++ vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE_CTX(NV04_PGRAPH_BOFFSET0, 0); ++ NV_WRITE_CTX(NV04_PGRAPH_BOFFSET1, 0); ++ NV_WRITE_CTX(NV04_PGRAPH_BLIMIT0 , vramsz); ++ NV_WRITE_CTX(NV04_PGRAPH_BLIMIT1 , vramsz); ++ ++ NV_WRITE_CTX(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); ++ NV_WRITE_CTX(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); ++ ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++#endif ++ ++ NV_WRITE_CTX(0x00400e88, 0x08000000); ++ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); ++ NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); ++ NV_WRITE_CTX(0x00400e10, 0x00001000); ++ NV_WRITE_CTX(0x00400e14, 0x00001000); ++ NV_WRITE_CTX(0x00400e30, 0x00080008); ++ NV_WRITE_CTX(0x00400e34, 0x00080008); ++ if (dev_priv->chipset>=0x17) { ++ /* is it really needed ??? */ ++ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4)); ++ NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0)); ++ NV17_WRITE_CTX(0x00400eac, 0x0fff0000); ++ NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); ++ NV17_WRITE_CTX(0x00400ec0, 0x00000080); ++ NV17_WRITE_CTX(0x00400ed0, 0x00000080); ++ } ++ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); ++ ++ nv10_graph_create_pipe(chan); ++ return 0; ++} ++ ++void nv10_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int chid; ++ ++ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); ++ chan->pgraph_ctx = NULL; ++ ++ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1); ++ ++ /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ???? ++ */ ++#if 0 ++ /* does this avoid a potential context switch while we are written graph ++ * reg, or we should mask graph interrupt ??? ++ */ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ if (chid == chan->id) { ++ DRM_INFO("cleanning a channel with graph in current context\n"); ++ nouveau_wait_for_idle(dev); ++ DRM_INFO("reseting current graph context\n"); ++ /* can't be call here because of dynamic mem alloc */ ++ //nv10_graph_create_context(chan); ++ nv10_graph_load_context(chan); ++ } ++ NV_WRITE(NV04_PGRAPH_FIFO, 0x1); ++#else ++ if (chid == chan->id) { ++ DRM_INFO("cleanning a channel with graph in current context\n"); ++ } ++#endif ++} ++ ++int nv10_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); ++ //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */ ++ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 | ++ (1<<29) | ++ (1<<31)); ++ if (dev_priv->chipset>=0x17) { ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000); ++ NV_WRITE(0x004006b0, 0x40000020); ++ } ++ else ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); ++ ++ /* copy tile info from PFB */ ++ for (i=0; idev_private; ++ int i; ++/* ++write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements: +++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 +++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 +++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000 +++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303 +++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000 +++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000 +++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 +++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000 +++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... ++*/ ++ INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101); ++ INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111); ++ INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008); ++ for (i = 0; i < 16; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff); ++ INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001); ++ INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000); ++ INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001); ++ INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000); ++ INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000); ++ ++/* ++... +++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++... +++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... ++*/ ++ for (i = 0; i < 0x880; i += 0x10) { ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9); ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c); ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b); ++ } ++ ++/* ++write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements: +++0x00742fbc: 3f800000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x281c/4), 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements: +++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 +++0x0074301c: 00000000 bf800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000); ++ INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000); ++ INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000); ++ INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000); ++ INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000); ++ INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000); ++ INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000); ++ ++/* ++write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements: +++0x00742fcc: 00000000 3f800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000); ++ ++/* ++write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements: +++0x0074302c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements: +++0x00743c9c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements: +++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000); ++ ++/* ++write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements: +++0x00743c6c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements: +++0x00743ccc: 00000000 000003f8 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8); ++ ++/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */ ++ INSTANCE_WR(ctx, 0x3540/4, 0x002fe000); ++ ++/* ++write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements: +++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c ++*/ ++ for (i = 0; i < 8; ++i) ++ INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c); ++} ++ ++static void nv2a_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x33c/4, 0xffff0000); ++ for(i = 0x3a0; i< 0x3a8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x47c/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x4a8/4, 0x44400000); ++ for(i = 0x4d4; i< 0x4e4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x4f4; i< 0x504; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080000); ++ for(i = 0x50c; i< 0x51c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x51c; i< 0x52c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x000105b8); ++ for(i = 0x52c; i< 0x53c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for(i = 0x55c; i< 0x59c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x5a4/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x5fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x604/4, 0x00004000); ++ INSTANCE_WR(ctx, 0x610/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x618/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x61c/4, 0x00010000); ++ ++ for (i=0x1a9c; i <= 0x22fc/4; i += 32) { ++ INSTANCE_WR(ctx, i/4 , 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ ++ INSTANCE_WR(ctx, 0x269c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26b0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26dc/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x26e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26e4/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x26ec/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x26f0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26f8/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x2700/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x3024/4, 0x000fe000); ++ INSTANCE_WR(ctx, 0x30a0/4, 0x000003f8); ++ INSTANCE_WR(ctx, 0x33fc/4, 0x002fe000); ++ for(i = 0x341c; i< 0x343c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x001c527c); ++} ++ ++static void nv25_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++/* ++write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements: +++0x00740a7c: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740a9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740abc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740adc: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 +++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bbc: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 +++0x00740bdc: 00000000 00000000 00000000 00000080 ffff0000 00000001 00000000 00000000 +++0x00740bfc: 00000000 00000000 44400000 00000000 00000000 00000000 00000000 00000000 +++0x00740c1c: 4b800000 00000000 00000000 00000000 00000000 00030303 00030303 00030303 +++0x00740c3c: 00030303 00000000 00000000 00000000 00000000 00080000 00080000 00080000 +++0x00740c5c: 00080000 00000000 00000000 01012000 01012000 01012000 01012000 000105b8 ++ +++0x00740c7c: 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 00000000 +++0x00740c9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07ff0000 +++0x00740cbc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740cdc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 00000000 +++0x00740cfc: 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d1c: 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x35c/4)+0, 0xffff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+25, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+26, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+80, 0x00000101); ++ INSTANCE_WR(ctx, (0x35c/4)+85, 0x00000111); ++ INSTANCE_WR(ctx, (0x35c/4)+91, 0x00000080); ++ INSTANCE_WR(ctx, (0x35c/4)+92, 0xffff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+93, 0x00000001); ++ INSTANCE_WR(ctx, (0x35c/4)+98, 0x44400000); ++ INSTANCE_WR(ctx, (0x35c/4)+104, 0x4b800000); ++ INSTANCE_WR(ctx, (0x35c/4)+109, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+110, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+111, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+112, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+117, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+118, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+119, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+120, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+123, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+124, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+125, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+126, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+127, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+128, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+129, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+130, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+131, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+132, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+133, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008); ++ for (i=0; i<16; ++i) ++ INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff); ++ ++/* ++write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements: +++0x00740d34: 00000000 00000000 00000000 00000080 30201000 70605040 b0a09080 f0e0d0c0 +++0x00740d54: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d74: 00000000 00000000 00000000 00000000 00000001 00000000 00004000 00000000 +++0x00740d94: 00000000 00000001 00000000 00040000 00010000 00000000 00000000 00000000 +++0x00740db4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... +++0x00742214: 00000000 00000000 00000000 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742234: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742254: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742274: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++... +++0x00742a34: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a54: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a74: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a94: 10700ff9 0436086c 000c001b 00000000 00000000 00000000 00000000 00000000 +++0x00742ab4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00742ad4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x614/4)+3, 0x00000080); ++ INSTANCE_WR(ctx, (0x614/4)+4, 0x30201000); ++ INSTANCE_WR(ctx, (0x614/4)+5, 0x70605040); ++ INSTANCE_WR(ctx, (0x614/4)+6, 0xb0a09080); ++ INSTANCE_WR(ctx, (0x614/4)+7, 0xf0e0d0c0); ++ INSTANCE_WR(ctx, (0x614/4)+20, 0x00000001); ++ INSTANCE_WR(ctx, (0x614/4)+22, 0x00004000); ++ INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001); ++ INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000); ++ INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000); ++ for (i=0; i < 0x880/4; i+=4) { ++ INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9); ++ INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c); ++ INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b); ++ } ++ ++/* ++write32 #1 block at +0x00742e24 NV_PRAMIN.GRCTX0+0x2704 of 4 (0x4) elements: +++0x00742e24: 3f800000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2704/4), 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742e64 NV_PRAMIN.GRCTX0+0x2744 of 12 (0xc) elements: +++0x00742e64: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 +++0x00742e84: 00000000 bf800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2744/4)+0, 0x40000000); ++ INSTANCE_WR(ctx, (0x2744/4)+1, 0x3f800000); ++ INSTANCE_WR(ctx, (0x2744/4)+2, 0x3f000000); ++ INSTANCE_WR(ctx, (0x2744/4)+4, 0x40000000); ++ INSTANCE_WR(ctx, (0x2744/4)+5, 0x3f800000); ++ INSTANCE_WR(ctx, (0x2744/4)+7, 0xbf800000); ++ INSTANCE_WR(ctx, (0x2744/4)+9, 0xbf800000); ++ ++/* ++write32 #1 block at +0x00742e34 NV_PRAMIN.GRCTX0+0x2714 of 4 (0x4) elements: +++0x00742e34: 00000000 3f800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2714/4)+1, 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742e94 NV_PRAMIN.GRCTX0+0x2774 of 4 (0x4) elements: +++0x00742e94: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743804 NV_PRAMIN.GRCTX0+0x30e4 of 4 (0x4) elements: +++0x00743804: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x007437a4 NV_PRAMIN.GRCTX0+0x3084 of 8 (0x8) elements: +++0x007437a4: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x3084/4)+2, 0x000fe000); ++ ++/* ++write32 #1 block at +0x007437d4 NV_PRAMIN.GRCTX0+0x30b4 of 4 (0x4) elements: +++0x007437d4: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743824 NV_PRAMIN.GRCTX0+0x3104 of 4 (0x4) elements: +++0x00743824: 00000000 000003f8 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x3104/4)+1, 0x000003f8); ++ ++/* write32 #1 NV_PRAMIN.GRCTX0+0x3468 <- 0x002fe000 */ ++ INSTANCE_WR(ctx, 0x3468/4, 0x002fe000); ++ ++/* ++write32 #1 block at +0x00743ba4 NV_PRAMIN.GRCTX0+0x3484 of 8 (0x8) elements: +++0x00743ba4: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c ++*/ ++ for (i=0; i<8; ++i) ++ INSTANCE_WR(ctx, (0x3484/4)+i, 0x001c527c); ++} ++ ++static void nv30_31_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x410/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x428/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x444/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x448/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x44c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x460/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x48c/4, 0xffff0000); ++ for(i = 0x4e0; i< 0x4e8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4ec/4, 0x00011100); ++ for(i = 0x508; i< 0x548; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x58c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x590/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x594/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x598/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000); ++ for(i = 0x600; i< 0x640; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x640; i< 0x680; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6c0; i< 0x700; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x700; i< 0x740; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x740; i< 0x780; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x85c/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x860/4, 0x00010000); ++ for(i = 0x864; i< 0x874; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x1f18; i<= 0x3088 ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x30b8; i< 0x30c8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x344c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3808/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x381c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3848/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x384c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3850/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x3858/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x385c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3864/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x386c/4, 0xbf800000); ++} ++ ++static void nv34_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x40c/4, 0x01000101); ++ INSTANCE_WR(ctx, 0x420/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x440/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x444/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45c/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x480/4, 0xffff0000); ++ for(i = 0x4d4; i< 0x4dc; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4e0/4, 0x00011100); ++ for(i = 0x4fc; i< 0x53c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x57c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x580/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x584/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x588/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000); ++ for(i = 0x5f0; i< 0x630; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x630; i< 0x670; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6b0; i< 0x6f0; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x6f0; i< 0x730; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x730; i< 0x770; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x850/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x854/4, 0x00010000); ++ for(i = 0x858; i< 0x868; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x15ac; i<= 0x271c ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x274c; i< 0x275c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2edc/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x2eec/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000); ++} ++ ++static void nv35_36_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x40c/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x420/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x440/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x444/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45c/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x488/4, 0xffff0000); ++ for(i = 0x4dc; i< 0x4e4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4e8/4, 0x00011100); ++ for(i = 0x504; i< 0x544; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x588/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x58c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x590/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x594/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000); ++ for(i = 0x604; i< 0x644; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x644; i< 0x684; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6c4; i< 0x704; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x704; i< 0x744; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x744; i< 0x784; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x860/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x864/4, 0x00010000); ++ for(i = 0x868; i< 0x878; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x1f1c; i<= 0x308c ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x30bc; i< 0x30cc; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x3450/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x380c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3820/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x384c/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x3850/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3854/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x385c/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x3860/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3868/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x3870/4, 0xbf800000); ++} ++ ++int nv20_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); ++ unsigned int ctx_size; ++ unsigned int idoffs = 0x28/4; ++ int ret; ++ ++ switch (dev_priv->chipset) { ++ case 0x20: ++ ctx_size = NV20_GRCTX_SIZE; ++ ctx_init = nv20_graph_context_init; ++ idoffs = 0; ++ break; ++ case 0x25: ++ case 0x28: ++ ctx_size = NV25_GRCTX_SIZE; ++ ctx_init = nv25_graph_context_init; ++ break; ++ case 0x2a: ++ ctx_size = NV2A_GRCTX_SIZE; ++ ctx_init = nv2a_graph_context_init; ++ idoffs = 0; ++ break; ++ case 0x30: ++ case 0x31: ++ ctx_size = NV30_31_GRCTX_SIZE; ++ ctx_init = nv30_31_graph_context_init; ++ break; ++ case 0x34: ++ ctx_size = NV34_GRCTX_SIZE; ++ ctx_init = nv34_graph_context_init; ++ break; ++ case 0x35: ++ case 0x36: ++ ctx_size = NV35_36_GRCTX_SIZE; ++ ctx_init = nv35_36_graph_context_init; ++ break; ++ default: ++ ctx_size = 0; ++ ctx_init = nv35_36_graph_context_init; ++ DRM_ERROR("Please contact the devs if you want your NV%x" ++ " card to work\n", dev_priv->chipset); ++ return -ENOSYS; ++ break; ++ } ++ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramin_grctx))) ++ return ret; ++ ++ /* Initialise default context values */ ++ ctx_init(dev, chan->ramin_grctx->gpuobj); ++ ++ /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1); ++ /* CTX_USER */ ++ ++ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, ++ chan->ramin_grctx->instance >> 4); ++ ++ return 0; ++} ++ ++void nv20_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (chan->ramin_grctx) ++ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); ++ ++ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); ++} ++ ++int nv20_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, ++ NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ ++ nouveau_wait_for_idle(dev); ++ return 0; ++} ++ ++int nv20_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, ++ NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); ++ ++ nouveau_wait_for_idle(dev); ++ return 0; ++} ++ ++static void nv20_graph_rdi(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i, writecount = 32; ++ uint32_t rdi_index = 0x2c80000; ++ ++ if (dev_priv->chipset == 0x20) { ++ rdi_index = 0x3d0000; ++ writecount = 15; ++ } ++ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index); ++ for (i = 0; i < writecount; i++) ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); ++ ++ nouveau_wait_for_idle(dev); ++} ++ ++int nv20_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = ++ (struct drm_nouveau_private *)dev->dev_private; ++ uint32_t tmp, vramsz; ++ int ret, i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ if (!dev_priv->ctx_table) { ++ /* Create Context Pointer Table */ ++ dev_priv->ctx_table_size = 32 * 4; ++ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, ++ dev_priv->ctx_table_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &dev_priv->ctx_table))) ++ return ret; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, ++ dev_priv->ctx_table->instance >> 4); ++ ++ nv20_graph_rdi(dev); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); ++ NV_WRITE(0x40009C , 0x00000040); ++ ++ if (dev_priv->chipset >= 0x25) { ++ NV_WRITE(0x400890, 0x00080000); ++ NV_WRITE(0x400610, 0x304B1FB6); ++ NV_WRITE(0x400B80, 0x18B82880); ++ NV_WRITE(0x400B84, 0x44000000); ++ NV_WRITE(0x400098, 0x40000080); ++ NV_WRITE(0x400B88, 0x000000ff); ++ } else { ++ NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */ ++ NV_WRITE(0x400094, 0x00000005); ++ NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */ ++ NV_WRITE(0x400B84, 0x24000000); ++ NV_WRITE(0x400098, 0x00000040); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); ++ } ++ ++ /* copy tile info from PFB */ ++ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { ++ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); ++ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i))); ++ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); ++ /* which is NV40_PGRAPH_TSIZE0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i))); ++ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); ++ /* which is NV40_PGRAPH_TILE0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i))); ++ } ++ for (i = 0; i < 8; i++) { ++ NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4)); ++ } ++ NV_WRITE(0x4009a0, NV_READ(0x100324)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324)); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ /* begin RAM config */ ++ vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz-1); ++ NV_WRITE(0x400868, vramsz-1); ++ ++ /* interesting.. the below overwrites some of the tile setup above.. */ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++ ++ return 0; ++} ++ ++void nv20_graph_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); ++} ++ ++int nv30_graph_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++// uint32_t vramsz, tmp; ++ int ret, i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ if (!dev_priv->ctx_table) { ++ /* Create Context Pointer Table */ ++ dev_priv->ctx_table_size = 32 * 4; ++ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, ++ dev_priv->ctx_table_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &dev_priv->ctx_table))) ++ return ret; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, ++ dev_priv->ctx_table->instance >> 4); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); ++ NV_WRITE(0x400890, 0x01b463ff); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475); ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); ++ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); ++ NV_WRITE(0x400B80, 0x1003d888); ++ NV_WRITE(0x400B84, 0x0c000000); ++ NV_WRITE(0x400098, 0x00000000); ++ NV_WRITE(0x40009C, 0x0005ad00); ++ NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2 ++ NV_WRITE(0x4000a0, 0x00000000); ++ NV_WRITE(0x4000a4, 0x00000008); ++ NV_WRITE(0x4008a8, 0xb784a400); ++ NV_WRITE(0x400ba0, 0x002f8685); ++ NV_WRITE(0x400ba4, 0x00231f3f); ++ NV_WRITE(0x4008a4, 0x40000020); ++ ++ if (dev_priv->chipset == 0x34) { ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002); ++ } ++ ++ NV_WRITE(0x4000c0, 0x00000016); ++ ++ /* copy tile info from PFB */ ++ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { ++ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); ++ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ ++ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); ++ /* which is NV40_PGRAPH_TSIZE0(i) ?? */ ++ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); ++ /* which is NV40_PGRAPH_TILE0(i) ?? */ ++ } ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(0x0040075c , 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ /* begin RAM config */ ++// vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ if (dev_priv->chipset != 0x34) { ++ NV_WRITE(0x400750, 0x00EA0000); ++ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x400750, 0x00EA0004); ++ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1)); ++ } ++ ++#if 0 ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz-1); ++ NV_WRITE(0x400868, vramsz-1); ++ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ /* per-context state, doesn't belong here */ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++#endif ++ ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv40_fb.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv40_fb.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv40_fb.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv40_fb.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,62 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv40_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_bar_size, tmp; ++ int num_tiles; ++ int i; ++ ++ /* This is strictly a NV4x register (don't know about NV5x). */ ++ /* The blob sets these to all kinds of values, and they mess up our setup. */ ++ /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */ ++ /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */ ++ /* Any idea what this is? */ ++ NV_WRITE(NV40_PFB_UNK_800, 0x1); ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ case 0x45: ++ tmp = NV_READ(NV10_PFB_CLOSE_PAGE2); ++ NV_WRITE(NV10_PFB_CLOSE_PAGE2, tmp & ~(1<<15)); ++ num_tiles = NV10_PFB_TILE__SIZE; ++ break; ++ case 0x46: /* G72 */ ++ case 0x47: /* G70 */ ++ case 0x49: /* G71 */ ++ case 0x4b: /* G73 */ ++ case 0x4c: /* C51 (G7X version) */ ++ num_tiles = NV40_PFB_TILE__SIZE_1; ++ break; ++ default: ++ num_tiles = NV40_PFB_TILE__SIZE_0; ++ break; ++ } ++ ++ fb_bar_size = drm_get_resource_len(dev, 0) - 1; ++ switch (dev_priv->chipset) { ++ case 0x40: ++ for (i=0; iramfc->gpuobj, \ ++ NV40_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV40_RAMFC_##offset/4) ++#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE)) ++#define NV40_RAMFC__SIZE 128 ++ ++int ++nv40_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, ++ NV40_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Fill entries that are seen filled in dumps of nvidia driver just ++ * after channel's is put into DMA mode ++ */ ++ RAMFC_WR(DMA_PUT , chan->pushbuf_base); ++ RAMFC_WR(DMA_GET , chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0x30000000 /* no idea.. */); ++ RAMFC_WR(DMA_SUBROUTINE, 0); ++ RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4); ++ RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); ++ return 0; ++} ++ ++void ++nv40_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ if (chan->ramfc) ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv40_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp, tmp2; ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); ++ ++ /* No idea what 0x2058 is.. */ ++ tmp = RAMFC_RD(DMA_FETCH); ++ tmp2 = NV_READ(0x2058) & 0xFFF; ++ tmp2 |= (tmp & 0x30000000); ++ NV_WRITE(0x2058, tmp2); ++ tmp &= ~0x30000000; ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); ++ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE)); ++ NV_WRITE(0x32e4, RAMFC_RD(UNK_40)); ++ /* NVIDIA does this next line twice... */ ++ NV_WRITE(0x32e8, RAMFC_RD(UNK_44)); ++ NV_WRITE(0x2088, RAMFC_RD(UNK_4C)); ++ NV_WRITE(0x3300, RAMFC_RD(UNK_50)); ++ ++ /* not sure what part is PUT, and which is GET.. never seen a non-zero ++ * value appear in a mmio-trace yet.. ++ */ ++#if 0 ++ tmp = NV_READ(UNK_84); ++ NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???); ++ NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???); ++#endif ++ ++ /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ ++ tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; ++ tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF; ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); ++ ++ /* Set channel active, and in DMA mode */ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ /* Reset DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv40_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); ++ RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); ++ RAMFC_WR(DMA_DCOUNT , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT)); ++ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH); ++ tmp |= NV_READ(0x2058) & 0x30000000; ++ RAMFC_WR(DMA_FETCH , tmp); ++ ++ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); ++ tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); ++ RAMFC_WR(ACQUIRE_TIMESTAMP, tmp); ++ RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); ++ RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); ++ ++ /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something ++ * more involved depending on the value of 0x3228? ++ */ ++ RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ ++ RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); ++ ++ /* No idea what the below is for exactly, ripped from a mmio-trace */ ++ RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); ++ ++ /* NVIDIA do this next line twice.. bug? */ ++ RAMFC_WR(UNK_44 , NV_READ(0x32e8)); ++ RAMFC_WR(UNK_4C , NV_READ(0x2088)); ++ RAMFC_WR(UNK_50 , NV_READ(0x3300)); ++ ++#if 0 /* no real idea which is PUT/GET in UNK_48.. */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_GET); ++ tmp |= (NV_READ(NV04_PFIFO_CACHE1_PUT) << 16); ++ RAMFC_WR(UNK_48 , tmp); ++#endif ++ ++ return 0; ++} ++ ++int ++nv40_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_fifo_init(dev))) ++ return ret; ++ ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv40_graph.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv40_graph.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv40_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv40_graph.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,2193 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++/*TODO: deciper what each offset in the context represents. The below ++ * contexts are taken from dumps just after the 3D object is ++ * created. ++ */ ++static void ++nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ /* Always has the "instance address" of itself at offset 0 */ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ /* unknown */ ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00180/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00184/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00188/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0018c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004d0/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00504/4, 0x00011100); ++ for (i=0x00520; i<=0x0055c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x00594/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x005b4/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00610/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00618/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00628/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00); ++ /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */ ++ /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */ ++ for (i=0x006C0; i<=0x006fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */ ++ for (i=0x00700; i<=0x0073c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */ ++ /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */ ++ for (i=0x00780; i<=0x007bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */ ++ for (i=0x007c0; i<=0x007fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */ ++ for (i=0x00800; i<=0x0083c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */ ++ /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */ ++ for (i=0x00880; i<=0x008bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ /* unknown */ ++ for (i=0x00910; i<=0x0091c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00920; i<=0x0092c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00940; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00960; i<=0x0096c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00980/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x009b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00); ++ INSTANCE_WR(ctx, 0x009d4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x80800001); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c00/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c04/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c08/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c44/4, 0x00000001); ++ for (i=0x03008; i<=0x03080; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x05288; i<=0x08570; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x08628; i<=0x08e18; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0bd28; i<=0x0f010; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0f0c8; i<=0x0f8b8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x127c8; i<=0x15ab0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x15b68; i<=0x16358; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x19268; i<=0x1c550; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x1c608; i<=0x1cdf8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x1fd08; i<=0x22ff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x230a8; i<=0x23898; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x267a8; i<=0x29a90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x29b48; i<=0x2a338; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); ++ for (i = 0x00000178; i <= 0x00000180; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); ++ for (i = 0x00000194; i <= 0x000001b0; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); ++ for (i = 0x00000350; i <= 0x0000035c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000430/4, 0x00011100); ++ for (i = 0x0000044c; i <= 0x00000488; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00000538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00); ++ for (i = 0x000005dc; i <= 0x00000618; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i = 0x0000061c; i <= 0x00000658; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i = 0x0000069c; i <= 0x000006d8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i = 0x000006dc; i <= 0x00000718; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i = 0x0000071c; i <= 0x00000758; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i = 0x0000079c; i <= 0x000007d8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i = 0x0000082c; i <= 0x00000838; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i = 0x0000083c; i <= 0x00000848; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i = 0x0000085c; i <= 0x00000868; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i = 0x0000087c; i <= 0x00000888; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff); ++ for (i = 0x00000ad4; i <= 0x00000ae4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001); ++ for (i = 0x00002ee8; i <= 0x00002f60; i += 8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x00005168; i <= 0x00007358; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00007368; i <= 0x00007758; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x0000a068; i <= 0x0000c258; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x0000c268; i <= 0x0000c658; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x0000ef68; i <= 0x00011158; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00011168; i <= 0x00011558; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x00013e68; i <= 0x00016058; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00016068; i <= 0x00016458; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++}; ++ ++static void ++nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00194/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00198/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a4/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a8/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001ac/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00); ++ for (i=0x005dc; i<=0x00618; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x0061c; i<=0x00658; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x0069c; i<=0x006d8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006dc; i<=0x00718; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x0071c; i<=0x00758; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x0079c; i<=0x007d8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x0082c; i<=0x00838; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x0083c; i<=0x00848; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x0085c; i<=0x00868; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x0087c; i<=0x00888; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x0089c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); ++ for (i=0x02ec0; i<=0x02f38; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x04c80; i<=0x06e70; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x06e80; i<=0x07270; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x096c0; i<=0x0b8b0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0b8c0; i<=0x0bcb0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0e100; i<=0x102f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x10300; i<=0x106f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++}; ++ ++static void ++nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0004c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00138/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x0013c/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00144/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00184/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0018c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00190/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00194/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00198/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001a4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001ec/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00370/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00374/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00378/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003a4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x003b8/4, 0x00003010); ++ INSTANCE_WR(ctx, 0x003dc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e8/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00400/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00404/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00410/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00414/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00418/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004b0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x004d0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004d4/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004ec/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00500/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00504/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00508/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0050c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00510/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00514/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00518/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00520/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00524/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00528/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00530/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00534/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00538/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0053c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00554/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00011100); ++ for (i=0x00578; i<0x005b4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x005f0/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x005f4/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x00608/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00624/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00658/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x0065c/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00660/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00664/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00674/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00678/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x0067c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0068c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x006c8/4, 0x00ffff00); ++ for (i=0x0070c; i<=0x00748; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x0074c; i<=0x00788; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x007cc; i<=0x00808; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x0080c; i<=0x00848; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x0084c; i<=0x00888; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x008cc; i<=0x00908; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x0095c; i<=0x00968; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x0096c; i<=0x00978; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x0098c; i<=0x00998; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009ac; i<=0x009b8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a00/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x00a14/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a1c/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x00a28/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00a60/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00aec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b30/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b38/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bec/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c30/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00c34/4, 0x000e3000); ++ for (i=0x017f8; i<=0x01870; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x035b8; i<=0x057a8; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x057b8; i<=0x05ba8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x07f38; i<=0x0a128; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0a138; i<=0x0a528; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0c8b8; i<=0x0eaa8; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0eab8; i<=0x0eea8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++/* This may only work on 7800 AGP cards, will include a warning */ ++static void ++nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00000178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); ++ for (i=0x00000194; i<=0x000001b0; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00000350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); ++ for (i=0x000003c0; i<=0x000003fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00000454/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00000458/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00000474/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000490/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000); ++ for (i=0x000004a4; i<=0x000004e0; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000500/4, 0x00011100); ++ for (i=0x0000051c; i<=0x00000558; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00000590/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00000608/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00); ++ for (i=0x000006b0; i<=0x000006ec; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x000006f0; i<=0x0000072c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00000770; i<=0x000007ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x000007b0; i<=0x000007ec; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x000007f0; i<=0x0000082c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00000870; i<=0x000008ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000910/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x00000914/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x00000918/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202); ++ for (i=0x00000930; i<=0x0000095c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00000970/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00); ++ INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001); ++ for (i=0x00000b10; i<=0x00000b8c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff); ++ for (i=0x00000bdc; i<=0x00000bf8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000); ++ for (i=0x00003000; i<=0x00003078; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00004dc0; i<=0x00006fb0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x00006fc0; i<=0x000073b0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00009800; i<=0x0000b9f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0000ba00; i<=0x00010430; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00010440; i<=0x00010830; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00012c80; i<=0x00014e70; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x00014e80; i<=0x00015270; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x000176c0; i<=0x000198b0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x000198c0; i<=0x00019cb0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0001c100; i<=0x0001e2f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0001e300; i<=0x0001e6f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00218/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00220/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00234/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00238/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00240/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00244/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00248/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00250/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00474/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00488/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00514/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00530/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00544/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00548/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00554/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00558/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00560/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00564/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00568/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00570/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00574/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00578/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00580/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); ++ for (i=0x00750; i<=0x0078c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00790; i<=0x007cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00810; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x00850; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00890; i<=0x008cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00910; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x009a0; i<=0x009ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x009b0; i<=0x009bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x009d0; i<=0x009dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009f0; i<=0x009fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); ++ for(i=0x030a0; i<=0x03118; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x098a0; i<=0x0ba90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x0baa0; i<=0x0be90; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x0e2e0; i<=0x0fff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x10008; i<=0x104d0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x104e0; i<=0x108d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x12d20; i<=0x14f10; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x14f20; i<=0x15310; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x17760; i<=0x19950; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x19960; i<=0x19d50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x1c1a0; i<=0x1e390; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x1e3a0; i<=0x1e790; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x20be0; i<=0x22dd0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x22de0; i<=0x231d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00003010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); ++ for (i=0x005d8; i<=0x00614; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00618; i<=0x00654; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00698; i<=0x006d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006d8; i<=0x00714; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00718; i<=0x00754; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00798; i<=0x007d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00828; i<=0x00834; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00838; i<=0x00844; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00858; i<=0x00864; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00878; i<=0x00884; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00898/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); ++ for (i=0x016c0; i<=0x01738; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03840; i<=0x05670; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05680; i<=0x05a70; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x07e00; i<=0x09ff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0a000; i<=0x0a3f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0c780; i<=0x0e970; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0e980; i<=0x0ed70; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00218/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00220/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00234/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00238/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00240/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00244/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00248/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00250/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00474/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00488/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00514/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00530/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00544/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00548/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00554/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00558/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00560/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00564/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00568/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00570/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00574/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00578/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00580/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); ++ for (i=0x00750; i<=0x0078c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00790; i<=0x007cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00810; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x00850; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00890; i<=0x008cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00910; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x009a0; i<=0x009ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x009b0; i<=0x009bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x009d0; i<=0x009dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009f0; i<=0x009fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); ++ for(i=0x030a0; i<=0x03118; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x098a0; i<=0x0ba90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x0baa0; i<=0x0be90; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x0e2e0; i<=0x0fff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x10008; i<=0x104d0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x104e0; i<=0x108d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x12d20; i<=0x14f10; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x14f20; i<=0x15310; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x17760; i<=0x19950; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x19960; i<=0x19d50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f4/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x0041c/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00434/4, 0x00011100); ++ for (i=0x00450; i<0x0048c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00498/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c4/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004cc/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00530/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00534/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00538/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x0053c/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x0054c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00550/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00554/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00564/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00ffff00); ++ for (i=0x005e0; i<=0x0061c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00620; i<=0x0065c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x006a0; i<=0x006dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006e0; i<=0x0071c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00720; i<=0x0075c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x007a0; i<=0x007dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00830; i<=0x0083c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00840; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00860; i<=0x0086c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00880; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x008a0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x008d8/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008ec/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008f0/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008fc/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00934/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a0c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a74/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a80/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00a9c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ad8/4, 0x00000001); ++ for (i=0x016a0; i<0x01718; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03460; i<0x05650; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05660; i<0x05a50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); ++ for (i=0x005d8; i<=0x00614; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00618; i<=0x00654; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00698; i<=0x006d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006d8; i<=0x00714; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00718; i<=0x00754; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00798; i<=0x007d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00828; i<=0x00834; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00838; i<=0x00844; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00858; i<=0x00864; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00878; i<=0x00884; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00898/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00a94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001); ++ for (i=0x01668; i<=0x016e0; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03428; i<=0x05618; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05628; i<=0x05a18; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++int ++nv40_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); ++ int ret; ++ ++ /* These functions populate the graphics context with a whole heap ++ * of default state. All these functions are very similar, with ++ * a minimal amount of chipset-specific changes. However, as we're ++ * currently dependant on the context programs used by the NVIDIA ++ * binary driver these functions must match the layout expected by ++ * them. Hopefully at some point this will all change. ++ */ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ ctx_init = nv40_graph_context_init; ++ break; ++ case 0x41: ++ case 0x42: ++ ctx_init = nv41_graph_context_init; ++ break; ++ case 0x43: ++ ctx_init = nv43_graph_context_init; ++ break; ++ case 0x46: ++ ctx_init = nv46_graph_context_init; ++ break; ++ case 0x47: ++ ctx_init = nv47_graph_context_init; ++ break; ++ case 0x49: ++ ctx_init = nv49_graph_context_init; ++ break; ++ case 0x44: ++ case 0x4a: ++ ctx_init = nv4a_graph_context_init; ++ break; ++ case 0x4b: ++ ctx_init = nv4b_graph_context_init; ++ break; ++ case 0x4c: ++ case 0x67: ++ ctx_init = nv4c_graph_context_init; ++ break; ++ case 0x4e: ++ ctx_init = nv4e_graph_context_init; ++ break; ++ default: ++ ctx_init = nv40_graph_context_init; ++ break; ++ } ++ ++ /* Allocate a 175KiB block of PRAMIN to store the context. This ++ * is massive overkill for a lot of chipsets, but it should be safe ++ * until we're able to implement this properly (will happen at more ++ * or less the same time we're able to write our own context programs. ++ */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramin_grctx))) ++ return ret; ++ ++ /* Initialise default context values */ ++ ctx_init(dev, chan->ramin_grctx->gpuobj); ++ ++ return 0; ++} ++ ++void ++nv40_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); ++} ++ ++static int ++nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t old_cp, tv = 1000, tmp; ++ int i; ++ ++ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ ++ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310); ++ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : ++ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp); ++ ++ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304); ++ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp); ++ ++ nouveau_wait_for_idle(dev); ++ ++ for (i = 0; i < tv; i++) { ++ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) ++ break; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); ++ ++ if (i == tv) { ++ uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT); ++ DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save); ++ DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n", ++ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, ++ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); ++ DRM_ERROR("0x40030C = 0x%08x\n", ++ NV_READ(NV40_PGRAPH_CTXCTL_030C)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++/* Save current context (from PGRAPH) into the channel's context */ ++int ++nv40_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ return nv40_graph_transfer_context(dev, inst, 1); ++} ++ ++/* Restore the context for a specific channel into PGRAPH */ ++int ++nv40_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ int ret; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ ret = nv40_graph_transfer_context(dev, inst, 0); ++ if (ret) ++ return ret; ++ ++ /* 0x40032C, no idea of it's exact function. Could simply be a ++ * record of the currently active PGRAPH context. It's currently ++ * unknown as to what bit 24 does. The nv ddx has it set, so we will ++ * set it here too. ++ */ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, ++ (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) | ++ NV40_PGRAPH_CTXCTL_CUR_LOADED); ++ /* 0x32E0 records the instance address of the active FIFO's PGRAPH ++ * context. If at any time this doesn't match 0x40032C, you will ++ * recieve PGRAPH_INTR_CONTEXT_SWITCH ++ */ ++ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); ++ return 0; ++} ++ ++/* These blocks of "magic numbers" are actually a microcode that the GPU uses ++ * to control how graphics contexts get saved and restored between PRAMIN ++ * and PGRAPH during a context switch. We're currently using values seen ++ * in mmio-traces of the binary driver. ++ */ ++static uint32_t nv40_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406, ++ 0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216, ++ 0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100, ++ 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, ++ 0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6, ++ 0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b, ++ 0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a, ++ 0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, ++ 0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008, ++ 0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901, ++ 0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038, ++ 0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684, ++ 0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68, ++ 0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006, ++ 0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284, ++ 0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++static uint32_t nv41_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, ++ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800, ++ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, ++ 0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, ++ 0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, ++ 0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480, ++ 0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a, ++ 0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv43_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, ++ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, ++ 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, ++ 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, ++ 0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350, ++ 0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006, ++ 0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, ++ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++static uint32_t nv44_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06, ++ 0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, ++ 0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b, ++ 0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d, ++ 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6, ++ 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158, ++ 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, ++ 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, ++ 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f, ++ 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec, ++ 0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a, ++ 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691, ++ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc, ++ 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, ++ 0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901, ++ 0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, ++ 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, ++ 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8, ++ 0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001, ++ 0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029, ++ 0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a, ++ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000, ++ 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007, ++ 0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv46_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, ++ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200008, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x00500060, 0x00403f87, 0x0060000d, 0x004079e6, 0x002000f7, 0x0060000a, ++ 0x00200045, 0x00100620, 0x00104668, 0x0017466d, 0x0011068b, 0x00168691, ++ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x00200022, ++ 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, ++ 0x00500060, 0x0020027f, 0x0060000a, 0x00104800, 0x00108901, 0x00104910, ++ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, 0x00108a14, ++ 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, 0x00104d80, ++ 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, ++ 0x00200316, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, ++ 0x00800001, 0x0020055e, 0x0060000a, 0x002037e0, 0x0040788a, 0x00201320, ++ 0x00800029, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x00200081, ++ 0x0060000a, 0x00104280, 0x00200316, 0x0060000a, 0x00200004, 0x00800001, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv47_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606, ++ 0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12, ++ 0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a, ++ 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, ++ 0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901, ++ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19, ++ 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00, ++ 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000, ++ 0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a, ++ 0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006, ++ 0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318, ++ 0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, ++ 0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f, ++ 0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880, ++ 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, ++ 0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006, ++ 0x0060000e, ~0 ++}; ++ ++//this is used for nv49 and nv4b ++static uint32_t nv49_4b_ctx_prog[] ={ ++ 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020, ++ 0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000, ++ 0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e, ++ 0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000, ++ 0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a, ++ 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210, ++ 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280, ++ 0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, ++ 0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118, ++ 0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, ++ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, ++ 0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800, ++ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, ++ 0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, ++ 0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, ++ 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a, ++ 0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88, ++ 0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f, ++ 0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280, ++ 0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68, ++ 0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e, ++ 0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, ++ 0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e, ++ 0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60, ++ 0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e, ++ 0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005, ++ 0x00700006, 0x0060000e, ~0 ++}; ++ ++ ++static uint32_t nv4a_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, ++ 0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, ++ 0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a, ++ 0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100, ++ 0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a, ++ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004, ++ 0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, ++ 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88, ++ 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000, ++ 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv4c_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406, ++ 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6, ++ 0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, ++ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, ++ 0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, ++ 0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, ++ 0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a, ++ 0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080, ++ 0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, ++ 0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, ++ 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306, ++ 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv4e_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, ++ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06, ++ 0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, ++ 0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06, ++ 0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006, ++ 0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, ++ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++/* ++ * G70 0x47 ++ * G71 0x49 ++ * NV45 0x48 ++ * G72[M] 0x46 ++ * G73 0x4b ++ * C51_G7X 0x4c ++ * C51 0x4e ++ */ ++int ++nv40_graph_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = ++ (struct drm_nouveau_private *)dev->dev_private; ++ uint32_t *ctx_prog; ++ uint32_t vramsz, tmp; ++ int i, j; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ctx_prog = nv40_ctx_prog; break; ++ case 0x41: ++ case 0x42: ctx_prog = nv41_ctx_prog; break; ++ case 0x43: ctx_prog = nv43_ctx_prog; break; ++ case 0x44: ctx_prog = nv44_ctx_prog; break; ++ case 0x46: ctx_prog = nv46_ctx_prog; break; ++ case 0x47: ctx_prog = nv47_ctx_prog; break; ++ case 0x49: ctx_prog = nv49_4b_ctx_prog; break; ++ case 0x4a: ctx_prog = nv4a_ctx_prog; break; ++ case 0x4b: ctx_prog = nv49_4b_ctx_prog; break; ++ case 0x4c: ++ case 0x67: ctx_prog = nv4c_ctx_prog; break; ++ case 0x4e: ctx_prog = nv4e_ctx_prog; break; ++ default: ++ DRM_ERROR("Context program for 0x%02x unavailable\n", ++ dev_priv->chipset); ++ ctx_prog = NULL; ++ break; ++ } ++ ++ /* Load the context program onto the card */ ++ if (ctx_prog) { ++ DRM_DEBUG("Loading context program\n"); ++ i = 0; ++ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ while (ctx_prog[i] != ~0) { ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]); ++ i++; ++ } ++ } ++ ++ /* No context present currently */ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xe0de8055); ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); ++ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ j = NV_READ(0x1540) & 0xff; ++ if (j) { ++ for (i=0; !(j&1); j>>=1, i++); ++ NV_WRITE(0x405000, i); ++ } ++ ++ if (dev_priv->chipset == 0x40) { ++ NV_WRITE(0x4009b0, 0x83280fff); ++ NV_WRITE(0x4009b4, 0x000000a0); ++ } else { ++ NV_WRITE(0x400820, 0x83280eff); ++ NV_WRITE(0x400824, 0x000000a0); ++ } ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ case 0x45: ++ NV_WRITE(0x4009b8, 0x0078e366); ++ NV_WRITE(0x4009bc, 0x0000014c); ++ break; ++ case 0x41: ++ case 0x42: /* pciid also 0x00Cx */ ++// case 0x0120: //XXX (pciid) ++ NV_WRITE(0x400828, 0x007596ff); ++ NV_WRITE(0x40082c, 0x00000108); ++ break; ++ case 0x43: ++ NV_WRITE(0x400828, 0x0072cb77); ++ NV_WRITE(0x40082c, 0x00000108); ++ break; ++ case 0x44: ++ case 0x46: /* G72 */ ++ case 0x4a: ++ case 0x4c: /* G7x-based C51 */ ++ case 0x4e: ++ NV_WRITE(0x400860, 0); ++ NV_WRITE(0x400864, 0); ++ break; ++ case 0x47: /* G70 */ ++ case 0x49: /* G71 */ ++ case 0x4b: /* G73 */ ++ NV_WRITE(0x400828, 0x07830610); ++ NV_WRITE(0x40082c, 0x0000016A); ++ break; ++ default: ++ break; ++ } ++ ++ NV_WRITE(0x400b38, 0x2ffff800); ++ NV_WRITE(0x400b3c, 0x00006000); ++ ++ /* copy tile info from PFB */ ++ switch (dev_priv->chipset) { ++ case 0x40: /* vanilla NV40 */ ++ for (i=0; ichipset) { ++ case 0x40: ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x4069A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4069A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz); ++ NV_WRITE(0x400868, vramsz); ++ break; ++ default: ++ switch (dev_priv->chipset) { ++ case 0x46: ++ case 0x47: ++ case 0x49: ++ case 0x4b: ++ NV_WRITE(0x400DF0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x400DF4, NV_READ(NV04_PFB_CFG1)); ++ break; ++ default: ++ NV_WRITE(0x4009F0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009F4, NV_READ(NV04_PFB_CFG1)); ++ break; ++ } ++ NV_WRITE(0x4069F0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4069F4, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400840, 0); ++ NV_WRITE(0x400844, 0); ++ NV_WRITE(0x4008A0, vramsz); ++ NV_WRITE(0x4008A4, vramsz); ++ break; ++ } ++ ++ /* per-context state, doesn't belong here */ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++ ++ return 0; ++} ++ ++void nv40_graph_takedown(struct drm_device *dev) ++{ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv40_mc.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv40_mc.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv40_mc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv40_mc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,38 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv40_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ /* Power up everything, resetting each individual unit will ++ * be done later if needed. ++ */ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ switch (dev_priv->chipset) { ++ case 0x44: ++ case 0x46: /* G72 */ ++ case 0x4e: ++ case 0x4c: /* C51_G7X */ ++ tmp = NV_READ(NV40_PFB_020C); ++ NV_WRITE(NV40_PMC_1700, tmp); ++ NV_WRITE(NV40_PMC_1704, 0); ++ NV_WRITE(NV40_PMC_1708, 0); ++ NV_WRITE(NV40_PMC_170C, tmp); ++ break; ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++void ++nv40_mc_takedown(struct drm_device *dev) ++{ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_fifo.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_fifo.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_fifo.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_fifo.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,343 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++struct nv50_fifo_priv { ++ struct nouveau_gpuobj_ref *thingo[2]; ++ int cur_thingo; ++}; ++ ++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) ++ ++static void ++nv50_fifo_init_thingo(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; ++ struct nouveau_gpuobj_ref *cur; ++ int i, nr; ++ ++ DRM_DEBUG("\n"); ++ ++ cur = priv->thingo[priv->cur_thingo]; ++ priv->cur_thingo = !priv->cur_thingo; ++ ++ /* We never schedule channel 0 or 127 */ ++ for (i = 1, nr = 0; i < 127; i++) { ++ if (dev_priv->fifos[i]) { ++ INSTANCE_WR(cur->gpuobj, nr++, i); ++ } ++ } ++ NV_WRITE(0x32f4, cur->instance >> 12); ++ NV_WRITE(0x32ec, nr); ++ NV_WRITE(0x2500, 0x101); ++} ++ ++static int ++nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[channel]; ++ uint32_t inst; ++ ++ DRM_DEBUG("ch%d\n", channel); ++ ++ if (!chan->ramfc) ++ return -EINVAL; ++ ++ if (IS_G80) inst = chan->ramfc->instance >> 12; ++ else inst = chan->ramfc->instance >> 8; ++ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), ++ inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); ++ ++ if (!nt) nv50_fifo_init_thingo(dev); ++ return 0; ++} ++ ++static void ++nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ DRM_DEBUG("ch%d, nt=%d\n", channel, nt); ++ ++ if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; ++ else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; ++ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst); ++ ++ if (!nt) nv50_fifo_init_thingo(dev); ++} ++ ++static void ++nv50_fifo_init_reset(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e); ++} ++ ++static void ++nv50_fifo_init_intr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); ++ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); ++} ++ ++static void ++nv50_fifo_init_context_table(struct drm_device *dev) ++{ ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) ++ nv50_fifo_channel_disable(dev, i, 1); ++ nv50_fifo_init_thingo(dev); ++} ++ ++static void ++nv50_fifo_init_regs__nv(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x250c, 0x6f3cfc34); ++} ++ ++static void ++nv50_fifo_init_regs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x2500, 0); ++ NV_WRITE(0x3250, 0); ++ NV_WRITE(0x3220, 0); ++ NV_WRITE(0x3204, 0); ++ NV_WRITE(0x3210, 0); ++ NV_WRITE(0x3270, 0); ++ ++ /* Enable dummy channels setup by nv50_instmem.c */ ++ nv50_fifo_channel_enable(dev, 0, 1); ++ nv50_fifo_channel_enable(dev, 127, 1); ++} ++ ++int ++nv50_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); ++ if (!priv) ++ return -ENOMEM; ++ dev_priv->Engine.fifo.priv = priv; ++ ++ nv50_fifo_init_reset(dev); ++ nv50_fifo_init_intr(dev); ++ ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); ++ if (ret) { ++ DRM_ERROR("error creating thingo0: %d\n", ret); ++ return ret; ++ } ++ ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); ++ if (ret) { ++ DRM_ERROR("error creating thingo1: %d\n", ret); ++ return ret; ++ } ++ ++ nv50_fifo_init_context_table(dev); ++ nv50_fifo_init_regs__nv(dev); ++ nv50_fifo_init_regs(dev); ++ ++ return 0; ++} ++ ++void ++nv50_fifo_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!priv) ++ return; ++ ++ nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); ++ nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); ++ ++ dev_priv->Engine.fifo.priv = NULL; ++ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); ++} ++ ++int ++nv50_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV50_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv50_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramfc = NULL; ++ int ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ if (IS_G80) { ++ uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; ++ uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start; ++ ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, vram_offset, ++ 0x100, NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &ramfc, ++ &chan->ramfc); ++ if (ret) ++ return ret; ++ } else { ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ &chan->ramfc); ++ if (ret) ++ return ret; ++ ramfc = chan->ramfc->gpuobj; ++ } ++ ++ INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4); ++ INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); ++ INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */ ++ INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff); ++ INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff); ++ INSTANCE_WR(ramfc, 0x10/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x08/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x40/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0); ++ INSTANCE_WR(ramfc, 0x54/4, 0x000f0000); ++ INSTANCE_WR(ramfc, 0x7c/4, 0x30000001); ++ INSTANCE_WR(ramfc, 0x78/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1); ++ ++ if (!IS_G80) { ++ INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id); ++ INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance); ++ ++ INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */ ++ INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); ++ } ++ ++ ret = nv50_fifo_channel_enable(dev, chan->id, 0); ++ if (ret) { ++ DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret); ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ nv50_fifo_channel_disable(dev, chan->id, 0); ++ ++ /* Dummy channel, also used on ch 127 */ ++ if (chan->id == 0) ++ nv50_fifo_channel_disable(dev, 127, 0); ++ ++ if ((NV_READ(NV03_PFIFO_CACHE1_PUSH1) & 0xffff) == chan->id) ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 127); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv50_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ /*XXX: incomplete, only touches the regs that NV does */ ++ ++ NV_WRITE(0x3244, 0); ++ NV_WRITE(0x3240, 0); ++ ++ NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4)); ++ NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4)); ++ NV_WRITE(0x3254, 1); ++ NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4)); ++ ++ if (!IS_G80) { ++ NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4)); ++ NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4)); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); ++ return 0; ++} ++ ++int ++nv50_fifo_save_context(struct nouveau_channel *chan) ++{ ++ DRM_DEBUG("ch%d\n", chan->id); ++ DRM_ERROR("stub!\n"); ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_graph.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_graph.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_graph.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,8286 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) ++ ++static void ++nv50_graph_init_reset(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e); ++} ++ ++static void ++nv50_graph_init_intr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff); ++ NV_WRITE(0x400138, 0xffffffff); ++ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff); ++} ++ ++static void ++nv50_graph_init_regs__nv(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x400804, 0xc0000000); ++ NV_WRITE(0x406800, 0xc0000000); ++ NV_WRITE(0x400c04, 0xc0000000); ++ NV_WRITE(0x401804, 0xc0000000); ++ NV_WRITE(0x405018, 0xc0000000); ++ NV_WRITE(0x402000, 0xc0000000); ++ ++ NV_WRITE(0x400108, 0xffffffff); ++ ++ NV_WRITE(0x400824, 0x00004000); ++ NV_WRITE(0x400500, 0x00010001); ++} ++ ++static void ++nv50_graph_init_regs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */); ++} ++ ++static uint32_t nv50_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x00417e4d, 0x00401e44, 0x00401e05, 0x00401e0d, 0x00415a06, ++ 0x00600005, 0x004015c5, 0x00600011, 0x00401c0b, 0x0090ffff, 0x0091ffff, ++ 0x00200020, 0x00600008, 0x0050004c, 0x00600009, 0x00415a45, 0x0041754d, ++ 0x0070009d, 0x004022cf, 0x0070009f, 0x0050009f, 0x00401fc0, 0x00200080, ++ 0x00600008, 0x00401f4f, 0x00401fc0, 0x004025cc, 0x00700081, 0x00200000, ++ 0x00600006, 0x00700000, 0x00111bfc, 0x00700080, 0x00700083, 0x00200047, ++ 0x00600006, 0x0011020a, 0x002005c0, 0x00600007, 0x00300000, 0x00c000ff, ++ 0x00c800ff, 0x00416507, 0x00202627, 0x008000ff, 0x00403c8c, 0x005000cb, ++ 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, 0x00170202, 0x0011020a, ++ 0x00200032, 0x0010020d, 0x001b0242, 0x00120302, 0x00140402, 0x00180500, ++ 0x00130509, 0x00150550, 0x00110605, 0x001e0607, 0x00110700, 0x00110900, ++ 0x00110902, 0x00110a00, 0x00160b02, 0x00110b28, 0x00140b2b, 0x00110c01, ++ 0x00111400, 0x00111405, 0x00111407, 0x00111409, 0x0011140b, 0x002000ea, ++ 0x00101500, 0x0040640f, 0x0040644b, 0x00213700, 0x00600007, 0x00200440, ++ 0x008800ff, 0x0070008f, 0x0040648c, 0x005000cb, 0x00000000, 0x001118f8, ++ 0x0020002b, 0x00101a05, 0x00131c00, 0x00111c04, 0x00141c20, 0x00111c25, ++ 0x00131c40, 0x00111c44, 0x00141c60, 0x00111c65, 0x00131c80, 0x00111c84, ++ 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00111cc4, 0x00141ce0, 0x00111ce5, ++ 0x00131d00, 0x00111d04, 0x00141d20, 0x00111d25, 0x00131d40, 0x00111d44, ++ 0x00141d60, 0x00111d65, 0x00131f00, 0x00191f40, 0x00409ee0, 0x00200217, ++ 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, ++ 0x00122100, 0x00122103, 0x00162200, 0x0040960f, 0x0040964b, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040968c, 0x005000cb, ++ 0x00000000, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380, ++ 0x0011238b, 0x00192394, 0x0040b0e1, 0x00200285, 0x00600006, 0x00200044, ++ 0x00102480, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, 0x00122503, ++ 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, 0x00122780, ++ 0x0011278b, 0x00192794, 0x0040cce2, 0x002002f3, 0x00600006, 0x00200044, ++ 0x00102880, 0x001128c6, 0x001528c9, 0x0040c00f, 0x0040c04b, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040c08c, 0x005000cb, ++ 0x00000000, 0x001928d0, 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, ++ 0x00112a80, 0x00112b00, 0x00112b02, 0x00122b80, 0x00112b8b, 0x00192b94, ++ 0x0040dee3, 0x00200361, 0x00600006, 0x00200044, 0x00102c80, 0x00112cc6, ++ 0x00152cc9, 0x00192cd0, 0x00122d00, 0x00122d03, 0x00162e00, 0x00122e07, ++ 0x00112e80, 0x00112f00, 0x00112f02, 0x00122f80, 0x00112f8b, 0x00192f94, ++ 0x0040fae4, 0x002003cf, 0x00600006, 0x00200044, 0x00103080, 0x0040ec0f, ++ 0x0040ec4b, 0x00213700, 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, ++ 0x0040ec8c, 0x005000cb, 0x00000000, 0x001130c6, 0x001530c9, 0x001930d0, ++ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, ++ 0x00113302, 0x00123380, 0x0011338b, 0x00193394, 0x00410ce5, 0x0020043d, ++ 0x00600006, 0x00200044, 0x00103480, 0x001134c6, 0x001534c9, 0x001934d0, ++ 0x00123500, 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, ++ 0x00113702, 0x00123780, 0x0011378b, 0x00193794, 0x004128e6, 0x002004ab, ++ 0x00600006, 0x00200044, 0x00103880, 0x00411a0f, 0x00411a4b, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x00411a8c, 0x005000cb, ++ 0x00000000, 0x001138c6, 0x001538c9, 0x001938d0, 0x00123900, 0x00123903, ++ 0x00163a00, 0x00123a07, 0x00113a80, 0x00113b00, 0x00113b02, 0x00123b80, ++ 0x00113b8b, 0x00193b94, 0x00413ae7, 0x00200519, 0x00600006, 0x00200044, ++ 0x00103c80, 0x00113cc6, 0x00153cc9, 0x00193cd0, 0x00123d00, 0x00123d03, ++ 0x00163e00, 0x00123e07, 0x00113e80, 0x00113f00, 0x00113f02, 0x00123f80, ++ 0x00113f8b, 0x00193f94, 0x00000000, 0x0041410f, 0x005000cb, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x005000cb, 0x00414487, 0x0060000a, ++ 0x00000000, 0x00415300, 0x007000a0, 0x00700080, 0x002005c0, 0x00600007, ++ 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, 0x00200000, ++ 0x00600006, 0x00111bfe, 0x0041754d, 0x00700000, 0x00200000, 0x00600006, ++ 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, 0x00600004, ++ 0x0050004a, 0x00415f88, 0x0060000b, 0x00200000, 0x00600006, 0x00700000, ++ 0x0041750b, 0x00111bfd, 0x00402e4d, 0x00202627, 0x008000fd, 0x005000cb, ++ 0x00c00002, 0x002005c0, 0x00600007, 0x0020015f, 0x00800002, 0x005000cb, ++ 0x00c01802, 0x002024c8, 0x00800002, 0x005000cb, 0x00403a4d, 0x0060000b, ++ 0x0041734d, 0x00700001, 0x00700003, 0x00417906, 0x00417a05, 0x0060000d, ++ 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, 0x0070001c, ++ 0x0060000c, ~0 ++}; ++ ++static uint32_t nv84_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06, ++ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801, ++ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020, ++ 0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d, ++ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008, ++ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007, ++ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007, ++ 0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff, ++ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, ++ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, ++ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c, ++ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04, ++ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65, ++ 0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4, ++ 0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed, ++ 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, ++ 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300, ++ 0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1, ++ 0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c, ++ 0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, ++ 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, ++ 0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb, ++ 0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0, ++ 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00, ++ 0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3, ++ 0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c, ++ 0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00, ++ 0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02, ++ 0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389, ++ 0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0, ++ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, ++ 0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5, ++ 0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c, ++ 0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500, ++ 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702, ++ 0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f, ++ 0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb, ++ 0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080, ++ 0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, ++ 0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000, ++ 0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916, ++ 0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160, ++ 0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, ++ 0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003, ++ 0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, ++ 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0 ++}; ++ ++static uint32_t nv86_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906, ++ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801, ++ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020, ++ 0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d, ++ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008, ++ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007, ++ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007, ++ 0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff, ++ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, ++ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, ++ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b, ++ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c, ++ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04, ++ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65, ++ 0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044, ++ 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103, ++ 0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380, ++ 0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb, ++ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387, ++ 0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280, ++ 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000, ++ 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, ++ 0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006, ++ 0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd, ++ 0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002, ++ 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d, ++ 0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905, ++ 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, ++ 0x0060000c, ~0 ++}; ++ ++static uint32_t nv92_ctx_voodoo[] = { ++ 0x0070008E, 0x0070009C, 0x00200020, 0x00600008, 0x0050004C, 0x00400E89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00C000FF, 0x00200000, 0x008000FF, ++ 0x00700009, 0x0041924D, 0x00402944, 0x00402905, 0x0040290D, 0x00416E06, ++ 0x00600005, 0x004015C5, 0x00600011, 0x0040270B, 0x004021C5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004A, 0x00219600, 0x00600007, 0x00C02701, ++ 0x0020002E, 0x00800001, 0x005000CB, 0x0090FFFF, 0x0091FFFF, 0x00200020, ++ 0x00600008, 0x0050004C, 0x00600009, 0x00416E45, 0x0041894D, 0x0070009D, ++ 0x00402DCF, 0x0070009F, 0x0050009F, 0x00402AC0, 0x00200080, 0x00600008, ++ 0x00402A4F, 0x00402AC0, 0x004030CC, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111BFC, 0x00700083, 0x00300000, 0x00219600, 0x00600007, ++ 0x00C00A01, 0x0020001E, 0x00800001, 0x005000CB, 0x00C000FF, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020A, 0x00200540, 0x00600007, ++ 0x00300000, 0x00C000FF, 0x00C800FF, 0x00417907, 0x00202DD2, 0x008000FF, ++ 0x0040508C, 0x005000CB, 0x00A0023F, 0x00200040, 0x00600006, 0x0070000F, ++ 0x00170202, 0x0011020A, 0x00200032, 0x0010020D, 0x001C0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000F, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110A00, 0x00160B02, ++ 0x00120B28, 0x00140B2B, 0x00110C01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140B, 0x002000CB, 0x00101500, 0x0040790F, 0x0040794B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040798C, ++ 0x005000CB, 0x00000000, 0x00141A05, 0x00131A0C, 0x00131C00, 0x00121C04, ++ 0x00141C20, 0x00111C25, 0x00131C40, 0x00121C44, 0x00141C60, 0x00111C65, ++ 0x00131C80, 0x00121C84, 0x00141CA0, 0x00111CA5, 0x00131CC0, 0x00121CC4, ++ 0x00141CE0, 0x00111CE5, 0x00131F00, 0x00191F40, 0x0040A1E0, 0x002001C9, ++ 0x00600006, 0x00200044, 0x00102080, 0x001120C6, 0x001520C9, 0x001920D0, ++ 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300, ++ 0x00112302, 0x00122380, 0x0011238B, 0x00112394, 0x0011239C, 0x0040BEE1, ++ 0x00200230, 0x00600006, 0x00200044, 0x00102480, 0x0040AF0F, 0x0040AF4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040AF8C, ++ 0x005000CB, 0x00000000, 0x001124C6, 0x001524C9, 0x001924D0, 0x00122500, ++ 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, ++ 0x00122780, 0x0011278B, 0x00112794, 0x0011279C, 0x0040D1E2, 0x00200297, ++ 0x00600006, 0x00200044, 0x00102880, 0x001128C6, 0x001528C9, 0x001928D0, ++ 0x00122900, 0x00122903, 0x00162A00, 0x00122A07, 0x00112A80, 0x00112B00, ++ 0x00112B02, 0x00122B80, 0x00112B8B, 0x00112B94, 0x00112B9C, 0x0040EEE3, ++ 0x002002FE, 0x00600006, 0x00200044, 0x00102C80, 0x0040DF0F, 0x0040DF4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040DF8C, ++ 0x005000CB, 0x00000000, 0x00112CC6, 0x00152CC9, 0x00192CD0, 0x00122D00, ++ 0x00122D03, 0x00162E00, 0x00122E07, 0x00112E80, 0x00112F00, 0x00112F02, ++ 0x00122F80, 0x00112F8B, 0x00112F94, 0x00112F9C, 0x004101E4, 0x00200365, ++ 0x00600006, 0x00200044, 0x00103080, 0x001130C6, 0x001530C9, 0x001930D0, ++ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, ++ 0x00113302, 0x00123380, 0x0011338B, 0x00113394, 0x0011339C, 0x00411EE5, ++ 0x002003CC, 0x00600006, 0x00200044, 0x00103480, 0x00410F0F, 0x00410F4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00410F8C, ++ 0x005000CB, 0x00000000, 0x001134C6, 0x001534C9, 0x001934D0, 0x00123500, ++ 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702, ++ 0x00123780, 0x0011378B, 0x00113794, 0x0011379C, 0x004131E6, 0x00200433, ++ 0x00600006, 0x00200044, 0x00103880, 0x001138C6, 0x001538C9, 0x001938D0, ++ 0x00123900, 0x00123903, 0x00163A00, 0x00123A07, 0x00113A80, 0x00113B00, ++ 0x00113B02, 0x00123B80, 0x00113B8B, 0x00113B94, 0x00113B9C, 0x00414EE7, ++ 0x0020049A, 0x00600006, 0x00200044, 0x00103C80, 0x00413F0F, 0x00413F4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00413F8C, ++ 0x005000CB, 0x00000000, 0x00113CC6, 0x00153CC9, 0x00193CD0, 0x00123D00, ++ 0x00123D03, 0x00163E00, 0x00123E07, 0x00113E80, 0x00113F00, 0x00113F02, ++ 0x00123F80, 0x00113F8B, 0x00113F94, 0x00113F9C, 0x00000000, 0x0041550F, ++ 0x005000CB, 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x005000CB, ++ 0x00415887, 0x0060000A, 0x00000000, 0x00416700, 0x007000A0, 0x00700080, ++ 0x00200540, 0x00600007, 0x00200004, 0x00C000FF, 0x008000FF, 0x005000CB, ++ 0x00700000, 0x00200000, 0x00600006, 0x00111BFE, 0x0041894D, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111BFE, 0x00700080, 0x0070001D, 0x0040114D, ++ 0x00700081, 0x00600004, 0x0050004A, 0x00417388, 0x0060000B, 0x00200000, ++ 0x00600006, 0x00700000, 0x0041890B, 0x00111BFD, 0x0040424D, 0x00202DD2, ++ 0x008000FD, 0x005000CB, 0x00C00002, 0x00200540, 0x00600007, 0x00200160, ++ 0x00800002, 0x005000CB, 0x00C01802, 0x00202C72, 0x00800002, 0x005000CB, ++ 0x00404E4D, 0x0060000B, 0x0041874D, 0x00700001, 0x00700003, 0x00418D06, ++ 0x00418E05, 0x0060000D, 0x00700005, 0x0070000D, 0x00700006, 0x0070000B, ++ 0x0070000E, 0x0070001C, 0x0060000C, ~0 ++}; ++ ++static uint32_t nvaa_ctx_voodoo[] = { ++ 0x0070009c, 0x00300000, 0x0044f109, 0x00402d09, 0x0040e551, 0x00400a44, ++ 0x00400a05, 0x00400a0d, 0x0070008e, 0x0040124d, 0x0070009d, 0x0045004d, ++ 0x00700097, 0x00450121, 0x004446a1, 0x0044764d, 0x0044824d, 0x0070001d, ++ 0x00401806, 0x00600005, 0x00444445, 0x0044308b, 0x00401845, 0x0040234d, ++ 0x00700081, 0x00401ccf, 0x0070009f, 0x0050009f, 0x0044dc4d, 0x00700017, ++ 0x0040230b, 0x00447d4d, 0x00450221, 0x004456a1, 0x007000a0, 0x00700001, ++ 0x00700003, 0x00402706, 0x00402805, 0x0060000d, 0x00700005, 0x0070000d, ++ 0x00700006, 0x00700002, 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ++ 0x00000000, 0x0090ffff, 0x0091ffff, 0x0044d44d, 0x00600009, 0x0048004d, ++ 0x00700096, 0x00403acf, 0x0070009f, 0x0050009f, 0x0040e551, 0x004036c0, ++ 0x00200080, 0x00600008, 0x0040364f, 0x004036c0, 0x00403ecc, 0x00403651, ++ 0x00700016, 0x0048004d, 0x00600011, 0x0048004d, 0x0044364d, 0x0070008e, ++ 0x00700081, 0x0044704d, 0x00447d4d, 0x00700083, 0x00300000, 0x00212740, ++ 0x00600007, 0x00c00b01, 0x00200022, 0x00800001, 0x005000cb, 0x00c000ff, ++ 0x00445e4d, 0x0048004d, 0x0044ce08, 0x0044734d, 0x00448b4d, 0x00445e4d, ++ 0x0044e24d, 0x0044764d, 0x0044824d, 0x0048004d, 0x00700083, 0x0045034d, ++ 0x00a0023f, 0x00200040, 0x00600006, 0x0044fc4d, 0x00448d4d, 0x002001d0, ++ 0x0044b860, 0x00200280, 0x0038ffff, 0x0044cc4d, 0x00300000, 0x005000cb, ++ 0x00451c4d, 0x005000cb, 0x0044d007, 0x0048004d, 0x0044794d, 0x00111bfc, ++ 0x0048004d, 0x0044794d, 0x00111bfd, 0x0048004d, 0x0044794d, 0x00111bfe, ++ 0x0048004d, 0x00200000, 0x00700000, 0x00600006, 0x0048004d, 0x00200001, ++ 0x00600006, 0x0044fc4d, 0x0011020a, 0x0048004d, 0x00300000, 0x00c3ffff, ++ 0x00200000, 0x00600007, 0x00700000, 0x00200008, 0x008000ff, 0x005000cb, ++ 0x0048004d, 0x00000000, 0x0048004d, 0x00000000, 0x00170202, 0x00200032, ++ 0x0010020d, 0x001e0242, 0x001102c0, 0x00120302, 0x00150402, 0x00180500, ++ 0x00130509, 0x00150550, 0x00110605, 0x00200013, 0x00100607, 0x00110700, ++ 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, 0x00120b28, 0x00140b2b, ++ 0x00110c01, 0x00110d01, 0x00111400, 0x00111405, 0x00111407, 0x00111409, ++ 0x0011140b, 0x002000d4, 0x00101500, 0x00141a05, 0x00131a0c, 0x00131c00, ++ 0x00131c04, 0x00141c20, 0x00131c25, 0x00131f00, 0x00131f04, 0x00111f08, ++ 0x00111f0b, 0x00200015, 0x00101f40, 0x0048004d, 0x00600006, 0x00451c4d, ++ 0x00112020, 0x00112022, 0x00200085, 0x00102040, 0x001120c8, 0x001420ca, ++ 0x001b20cf, 0x00122100, 0x00122103, 0x00162140, 0x00122147, 0x00122153, ++ 0x001121a0, 0x001221c0, 0x001121cb, 0x001121d4, 0x001521d8, 0x0048004d, ++ 0x00000000, 0x0048004d, 0x0060000b, 0x0048004d, 0x0060000a, 0x0048004d, ++ 0x0060000b, 0x0040d24d, 0x00200020, 0x00600008, 0x0050004c, 0x0048004d, ++ 0x002003e8, 0x00600008, 0x0050004c, 0x0048004d, 0x00600004, 0x0050004a, ++ 0x0048004d, 0x00c000ff, 0x00c800ff, 0x0048004d, 0x00c000ff, 0x00c800ff, ++ 0x0048004d, 0x00700016, 0x0070008e, 0x00700082, 0x00500041, 0x0044d84d, ++ 0x00700095, 0x005000d1, 0x00600016, 0x00500052, 0x00700002, 0x00700015, ++ 0x0040284d, 0x0070008e, 0x0044d44d, 0x00200000, 0x00600007, 0x00300000, ++ 0x00c000ff, 0x00200000, 0x008000ff, 0x00700009, 0x0070000e, 0x0048004d, ++ 0x00700080, 0x00480017, 0x00700000, 0x0048004d, 0x0048004d, 0x0048004d, ++ 0x0048004d, 0x0070008e, 0x0044d44d, 0x00700083, 0x0044df4d, 0x00450c4d, ++ 0x0070000f, 0x00410b8c, 0x005000cb, 0x0048004d, 0x00200280, 0x00600007, ++ 0x00452307, 0x00451187, 0x0048004d, 0x00000000, 0x00202070, 0x0044fc4d, ++ 0x008000ff, 0x0048004d, 0x00210600, 0x00600007, 0x00200428, 0x0044fc4d, ++ 0x008800ff, 0x0048004d, 0x0048000f, 0x0048004b, 0x0045164d, 0x0070008f, ++ 0x0048008c, 0x005000cb, 0x0048004d, 0x00202070, 0x0044fc4d, 0x008000fd, ++ 0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200161, 0x0044fc4d, ++ 0x00800002, 0x005000cb, 0x00c00002, 0x00201f0e, 0x0044fc4d, 0x00800002, ++ 0x005000cb, 0x0048004d, ~0 ++}; ++ ++static int ++nv50_graph_init_ctxctl(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t *voodoo = NULL; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (dev_priv->chipset) { ++ case 0x50: ++ voodoo = nv50_ctx_voodoo; ++ break; ++ case 0x84: ++ voodoo = nv84_ctx_voodoo; ++ break; ++ case 0x86: ++ voodoo = nv86_ctx_voodoo; ++ break; ++ case 0x92: ++ voodoo = nv92_ctx_voodoo; ++ break; ++ case 0xaa: ++ voodoo = nvaa_ctx_voodoo; ++ break; ++ default: ++ DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset); ++ return -EINVAL; ++ } ++ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ while (*voodoo != ~0) { ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo); ++ voodoo++; ++ } ++ ++ NV_WRITE(0x400320, 4); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); ++ ++ return 0; ++} ++ ++int ++nv50_graph_init(struct drm_device *dev) ++{ ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ nv50_graph_init_reset(dev); ++ nv50_graph_init_intr(dev); ++ nv50_graph_init_regs__nv(dev); ++ nv50_graph_init_regs(dev); ++ ++ ret = nv50_graph_init_ctxctl(dev); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ++nv50_graph_takedown(struct drm_device *dev) ++{ ++ DRM_DEBUG("\n"); ++} ++ ++static void ++nv50_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030); ++ INSTANCE_WR(ctx, 0x00120/4, 0xff400040); ++ INSTANCE_WR(ctx, 0x00124/4, 0xfff00080); ++ INSTANCE_WR(ctx, 0x00128/4, 0xfff70090); ++ INSTANCE_WR(ctx, 0x0012c/4, 0xffe806a8); ++ INSTANCE_WR(ctx, 0x001d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x001d8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00214/4, 0x0000fe0c); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00254/4, 0x0001fd87); ++ INSTANCE_WR(ctx, 0x00268/4, 0x00001018); ++ INSTANCE_WR(ctx, 0x0026c/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002a4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x002a8/4, 0x0001005f); ++ INSTANCE_WR(ctx, 0x002b0/4, 0x00000600); ++ INSTANCE_WR(ctx, 0x002b4/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x002c8/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x002e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x002e8/4, 0x00300080); ++ INSTANCE_WR(ctx, 0x002ec/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00308/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0030c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00318/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0031c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00334/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00338/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0033c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00350/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00354/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00360/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x0000000a); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00420/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00438/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00444/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00450/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004a8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x004c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004c8/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x004cc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00558/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000012); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005b8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005fc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00600/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00628/4, 0x00000200); ++ INSTANCE_WR(ctx, 0x00630/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00634/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x00638/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00648/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0065c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00660/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00668/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00678/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00680/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00688/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00690/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00698/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x006ac/4, 0x00000f80); ++ INSTANCE_WR(ctx, 0x006f4/4, 0x007f0080); ++ INSTANCE_WR(ctx, 0x00730/4, 0x007f0080); ++ INSTANCE_WR(ctx, 0x00754/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x00758/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00760/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00760/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00760/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x00760/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x00778/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x0077c/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00784/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00784/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00784/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x00784/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x0079c/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x007a0/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x007a8/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x007c0/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x007c4/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x007cc/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x007e4/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x007e8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x007f0/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x00808/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x0080c/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00814/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00814/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00814/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x00814/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x0082c/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x00834/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x00840/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x00844/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x0085c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00860/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00864/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00874/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00878/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x0089c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x008a4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x008ac/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x008b4/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x008b8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x008f8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x0091c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00924/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00934/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00938/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00960/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x0096c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00984/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00984/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00984/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00984/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x009e4/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x009e8/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00a14/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00a1c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00a2c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00a30/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00a64/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00a6c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00a70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00a94/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00a9c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ab0/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00ad4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00adc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ae4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00aec/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00b18/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x00b24/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00b9c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00ba0/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00be4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00c14/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00c24/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00c28/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00c4c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00c50/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00c64/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00c68/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00ca4/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00ca8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00cd0/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00d54/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00d58/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00d84/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d88/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00d8c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d9c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00da0/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00dc4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00dcc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00dd4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00ddc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00de0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00e04/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e08/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00e0c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e1c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00e20/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00e44/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00e4c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00e54/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00e5c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00e60/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00e88/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x00e94/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00f0c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00f10/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00f3c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00f40/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00f44/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00f54/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00f58/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00f7c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00f84/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00f8c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00f94/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00f98/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00fbc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fc0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00fc4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fd4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00fd8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00ffc/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01004/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0100c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01014/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01018/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01040/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x0104c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01064/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x01064/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x01064/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x01064/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x010c4/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x010c8/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x010f4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x010f8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x010fc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0110c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01110/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x01134/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x0113c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01144/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0114c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01150/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01174/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01178/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x0117c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0118c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01190/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x011b4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x011bc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x011c4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x011cc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x011d0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x011f8/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x01204/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01244/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x01244/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x01264/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x0127c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x01280/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x012ac/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x012b0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x012b4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x012c4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x012c8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x012ec/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x012f4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x012fc/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01304/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01308/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0132c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01330/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x01334/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01344/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01348/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x0136c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01374/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0137c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01384/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01388/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x013b0/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x013bc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x01434/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x01438/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x01444/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x01444/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01444/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01444/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x01444/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x01444/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01444/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01464/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01468/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x0146c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0147c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01480/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x014a4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x014ac/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x014b4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x014bc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x014c0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x014e4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x014e8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x014ec/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x014fc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01500/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x01524/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x0152c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01534/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0153c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01540/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01568/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x01574/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x015ec/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x015f0/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02b40/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x02b60/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02b80/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02ba0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02bc0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02be0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c60/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02c80/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02ca0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02cc0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c5e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0c600/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x44f80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x44fa0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x44fc0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x45000/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x45040/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x45060/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45080/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x450e0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x45100/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x45160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4c9a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4cc80/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4ce00/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4ce20/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4ce60/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4cee0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4cf20/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x4d080/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4d0a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4d0c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4d1e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4d260/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4d480/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4d4a0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x4d4c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d4e0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d500/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d520/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d940/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d960/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d980/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d9a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d9c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d9e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da40/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da60/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da80/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4daa0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4dac0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4dae0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4db00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4db20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4db40/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x4db80/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01784/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01824/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x01a04/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x01bc4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01be4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c24/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c44/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x01c84/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x01e24/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x042e4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x04324/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e84/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x15524/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x15764/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15784/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x157c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x157e4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15804/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15824/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x15864/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x15924/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15964/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15984/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x159a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x159c4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x159e4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x15ac4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15b04/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15b24/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15b44/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15be4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15c24/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15c44/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x15cc4/4, 0x04444480); ++ INSTANCE_WR(ctx, 0x16444/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x164e4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x16544/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x16584/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x165a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x165c4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x165e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16604/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16624/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x185a4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x185c4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x18664/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x187e4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x18804/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x16708/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x16768/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x16948/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x16a28/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16a48/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x16aa8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16d08/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x16de8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ee8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x16f08/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17108/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x171a8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x171c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x171e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x17268/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x17288/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x17508/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17528/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17548/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17568/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17588/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x175a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x175c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x175e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17608/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17628/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17648/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17668/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17688/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x176a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x176c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x176e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17708/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x17be8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x17c08/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x17c68/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17ca8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17cc8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17ce8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17d08/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x18108/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x18128/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x18608/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x18648/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18668/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18688/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x186a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x186c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x186e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18728/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x18768/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x188a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x188c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x188e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18908/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18ec8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18ee8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18f28/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18fa8/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18fc8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18fe8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19028/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19048/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x19088/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x190a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x190c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19108/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x19188/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x191a8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x19288/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x192a8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x199c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19a28/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1a148/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x1a168/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x1a1c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a4a8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a508/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1a588/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a5a8/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1aa68/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x1aaa8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1aae8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ab08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ab48/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1aba8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1abe8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ac08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ac48/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1ac68/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ac88/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x1acc8/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x25528/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x25548/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x25588/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x255a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x255c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x25608/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x25648/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x256c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x256e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25708/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25728/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25748/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25768/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25788/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x257a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x257c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x257e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25808/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25828/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25848/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25868/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25888/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x258a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25d48/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x25d68/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x25dc8/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x0180c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0184c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x019ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01a0c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01a6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01b4c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01c6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01c8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01ccc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01f4c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0216c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0218c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x021ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x021cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x021ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0220c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0222c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0224c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0226c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0228c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0230c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0232c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0234c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0268c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x026cc/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x027ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0282c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x029cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02acc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02bcc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02cac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02ccc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02cec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02d0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02d2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02d6c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x02dac/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0306c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0308c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x030ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x030cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x030ec/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0310c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0312c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x031ac/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x031cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e4c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03e8c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0402c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0404c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x040ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0418c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x042ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x042cc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0430c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0458c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x047ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x047cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x047ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0480c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0482c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0484c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0486c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0488c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x048ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x048cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x048ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0490c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0492c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0494c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0496c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0498c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x04ccc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x04d0c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x04dec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0500c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0510c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0520c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x052ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0530c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0532c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0534c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0536c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x053ac/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x053ec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x056ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x056cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x056ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0570c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0572c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0574c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0576c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x057ec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0580c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0648c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x064cc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0666c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0668c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x066ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x067cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x068ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0690c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0694c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x06bcc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x06dec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06eac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06ecc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06eec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06fac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06fcc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0730c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0734c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0742c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0746c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x074ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0764c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0774c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0784c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x078ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0790c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0792c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0794c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0796c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0798c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x079ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x079ec/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x07a2c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x07cec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07d0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07d2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07d4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07d6c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07d8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07dac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07e2c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x07e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08acc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x08b0c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x08cac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08ccc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x08d2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08e0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x08f2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08f4c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x08f8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0920c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0942c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0944c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0946c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0948c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x094ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x094cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x094ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0950c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0952c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0954c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0956c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0958c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x095ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x095cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x095ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0960c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0994c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0998c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x09a6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09aac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09aec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09c8c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x09d8c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x09e8c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x09f2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09f4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09f6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09f8c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x09fac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09fcc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x09fec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a02c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0a06c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0a32c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a34c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0a36c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a38c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a3ac/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0a3cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a3ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a46c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0a48c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b10c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0b14c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0b2ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b30c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0b36c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b44c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0b56c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b58c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0b5cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b84c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0ba6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0ba8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0baac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bacc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0baec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bbac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bbcc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bbec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bc0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bc2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bc4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bf8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0bfcc/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0c0ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c0ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c12c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c2cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0c3cc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0c4cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0c56c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c58c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c5ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c5cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0c5ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c60c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0c62c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c66c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0c6ac/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0c96c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c98c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0c9ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c9cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c9ec/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0ca0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ca2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0caac/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0cacc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0d74c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0d78c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0d92c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0d94c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0d9ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0da8c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0dbac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0dbcc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0dc0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0de8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0e0ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e0cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e0ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e10c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e12c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e14c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e16c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e18c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e1ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e1cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e1ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e20c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e22c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e24c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e26c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e28c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e5cc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0e60c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0e6ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0e72c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0e76c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0e90c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0ea0c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0eb0c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0ebac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ebcc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ebec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ec0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0ec2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ec4c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0ec6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ecac/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0ecec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0efac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0efcc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0efec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f00c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f02c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0f04c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f06c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f0ec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0f10c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01730/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a10/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a30/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01ad0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b90/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bb0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02050/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02070/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x02090/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020b0/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020f0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x02110/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x02250/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16710/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16950/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ad0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16af0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b10/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b30/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b50/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cf0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16db0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f90/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17010/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17050/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17150/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x17230/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17250/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17290/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172b0/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172d0/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x17430/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17450/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17470/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17490/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174d0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17530/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17550/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17570/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17590/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17610/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17630/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17730/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17750/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17850/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17910/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cd0/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182b0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182d0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182f0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18310/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18330/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18350/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18370/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18390/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184b0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184f0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18510/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18530/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18550/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18570/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18590/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18610/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18630/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18650/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18670/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18690/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18810/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18830/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18870/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18930/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189d0/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cb0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cd0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cf0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e90/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x19190/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x19210/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x19270/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19350/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19410/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19470/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194b0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19510/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19530/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x19730/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19750/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197b0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19830/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19950/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19990/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199b0/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199d0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199f0/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a10/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a50/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a90/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d90/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e90/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19eb0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ed0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19ef0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f10/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3d0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a410/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a430/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a450/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a470/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a510/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a530/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea70/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecb0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee50/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee70/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee90/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eeb0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f050/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f110/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f330/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f350/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4b0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f510/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f590/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5f0/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f610/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f630/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7d0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f810/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f830/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f850/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f890/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f910/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f930/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f950/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f970/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f990/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa90/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fab0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbb0/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fdd0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fed0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff50/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fff0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30030/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x30070/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30610/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30630/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30650/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30670/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30690/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306b0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306d0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306f0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30710/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30810/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30830/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30850/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30870/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30890/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308f0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30910/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30930/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30950/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30970/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30990/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a10/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a50/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b50/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b90/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c30/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c50/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c90/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d30/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30db0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30dd0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fb0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ff0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31010/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x31030/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x31050/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310d0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311d0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314f0/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x31570/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x31610/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31630/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31730/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x31810/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31830/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31850/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31870/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31890/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a90/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ab0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ad0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b10/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b90/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cf0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d10/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d30/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d50/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d70/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31db0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x01734/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019f4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a14/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a34/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01ad4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b34/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b74/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b94/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bb4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02054/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02074/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x02094/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020b4/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020f4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x02114/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x02254/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166f4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16714/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16954/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ad4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16af4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b14/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b34/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b54/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c74/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16db4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f54/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f74/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f94/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17014/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17054/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17154/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x17234/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17254/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17294/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172b4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172d4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x17434/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17474/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17494/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17534/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17554/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17574/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17594/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17614/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17634/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17734/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17754/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17854/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178b4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17914/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a74/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b74/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bf4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c14/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c54/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c94/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cd4/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d14/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d54/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182b4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182d4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182f4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18314/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18334/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18354/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18374/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18394/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183b4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184b4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184d4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184f4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18514/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18534/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18554/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18574/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18594/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185b4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185d4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185f4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18614/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18634/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18654/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18674/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18694/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186b4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187f4/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18814/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18834/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18874/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188d4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188f4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18934/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189d4/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a54/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a74/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c94/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cb4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cd4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cf4/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d74/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e74/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e94/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x19194/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x19214/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x19274/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19354/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19414/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19474/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194b4/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194d4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194f4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19514/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19534/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x19734/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19754/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19774/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197b4/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19834/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19994/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199b4/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199d4/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199f4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a14/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a54/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a94/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d74/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d94/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e34/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e94/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19eb4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ed4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19ef4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f14/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3d4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3f4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a414/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a434/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a454/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a474/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a514/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a534/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea74/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecb4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee34/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee54/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee74/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee94/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eeb4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f054/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f114/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2f4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f314/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f334/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f354/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f374/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3b4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4b4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f514/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f594/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5f4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f614/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f634/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f794/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f814/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f834/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f854/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f894/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f934/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f974/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f994/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa94/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fab4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbb4/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc14/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc74/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fdd4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fed4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff54/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff74/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fff4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30034/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x30074/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30614/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30634/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30654/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30674/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30694/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306b4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306d4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306f4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30714/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30814/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30834/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30854/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30874/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30894/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308b4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308d4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308f4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30914/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30934/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30954/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30974/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30994/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309b4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309d4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309f4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a14/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a54/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b54/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b74/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b94/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c34/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c54/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c94/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d34/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30db4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30dd4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f14/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fb4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ff4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31014/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x31034/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x31054/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310d4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311d4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314f4/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x31574/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315d4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x31614/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31634/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31734/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31774/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d4/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x31814/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31834/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31854/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31874/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31894/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a94/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ab4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ad4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b14/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b94/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d14/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d34/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d54/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d74/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31db4/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x01738/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a18/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a38/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01ad8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b38/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b78/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b98/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02058/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02078/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x02098/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020b8/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020d8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020f8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x02118/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021d8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x02258/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16718/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16958/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ad8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16af8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b18/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b38/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b58/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16db8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f58/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f98/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17018/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17058/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17158/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x17238/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17258/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17298/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172b8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172d8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x17438/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17458/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17478/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17498/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174d8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17538/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17558/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17578/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17598/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17618/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17638/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17738/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17758/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17858/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178b8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17918/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b78/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bf8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c58/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cd8/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d58/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182b8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182d8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182f8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18318/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18338/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18358/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18378/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18398/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183b8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184b8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184d8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184f8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18518/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18538/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18558/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18578/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18598/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185b8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185d8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185f8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18618/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18638/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18658/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18678/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18698/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186b8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186f8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187f8/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18818/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18838/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18878/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188d8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188f8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18938/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189d8/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a58/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a78/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cb8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cd8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cf8/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e78/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e98/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x19198/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x19218/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x19278/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19358/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19418/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19478/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194b8/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194d8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194f8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19518/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19538/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x19738/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19758/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197b8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19838/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19998/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199b8/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199d8/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199f8/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a18/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a58/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a98/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d78/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d98/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e38/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e98/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19eb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ed8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19ef8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f18/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3d8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a418/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a438/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a458/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a478/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a518/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a538/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea78/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecb8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee38/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee58/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee78/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eeb8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f058/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f118/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2f8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f318/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f338/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f358/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f378/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3b8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4b8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f518/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f598/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5f8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f618/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f638/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f798/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7d8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f818/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f838/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f858/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f898/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f918/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f938/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f978/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f998/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa98/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fab8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbb8/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc18/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc78/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fdd8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fed8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff58/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fff8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30038/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x30078/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30618/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30638/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30658/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30678/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30698/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306b8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306d8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306f8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30718/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30818/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30838/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30858/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30878/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30898/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308b8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308d8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308f8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30918/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30938/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30958/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30978/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30998/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309b8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309d8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309f8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a18/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a58/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b58/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b98/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c38/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c58/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c98/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d38/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30db8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30dd8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ff8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31018/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x31038/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x31058/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310d8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311d8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311f8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314f8/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x31578/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x31618/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31638/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31738/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d8/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x31818/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31838/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31858/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31878/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31898/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a98/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ab8/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ad8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b18/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b98/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d18/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d38/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d58/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d78/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31db8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x0173c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a3c/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01adc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b3c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b7c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b9c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bbc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0205c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0207c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0209c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020bc/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020dc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020fc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0211c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021dc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0225c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1671c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1695c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16adc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16afc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b1c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b3c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b5c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16dbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f5c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f9c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fdc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ffc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1701c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1705c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1715c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1723c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1725c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1729c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172bc/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172dc/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x1743c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1745c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1747c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1749c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174dc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1753c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1755c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1757c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1759c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1761c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1763c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1773c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x1775c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1785c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178bc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1791c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a7c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b7c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bfc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c5c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cdc/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d5c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182bc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182dc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182fc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1831c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1833c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1835c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1837c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1839c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183bc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184bc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184dc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184fc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1851c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1853c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1855c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1857c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1859c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185bc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185dc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185fc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1861c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1863c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1865c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1867c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1869c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186bc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187fc/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x1881c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1883c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1887c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188dc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188fc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1893c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189dc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a5c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a7c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cbc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cdc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cfc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d7c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e7c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e9c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1919c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x1921c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1927c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1935c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1941c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1947c/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194bc/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194dc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194fc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1951c/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x1953c/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x1973c/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1975c/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1977c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197bc/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1983c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1995c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1999c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199dc/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199fc/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a1c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a5c/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a9c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d7c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d9c/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e3c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e9c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ebc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19edc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19efc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3dc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a41c/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a43c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a45c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a47c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a51c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a53c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecbc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee3c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee5c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee7c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee9c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eebc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efdc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f05c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f11c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2fc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f31c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f33c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f35c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f37c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3bc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4bc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f51c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f59c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5fc/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f61c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f63c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f79c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7dc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f81c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f83c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f85c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f89c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f91c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f93c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f95c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f97c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f99c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa9c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fabc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbbc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc1c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc7c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fddc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fedc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff5c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fffc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3003c/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x3007c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3061c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3063c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3065c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3067c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3069c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306bc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306dc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306fc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3071c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x3081c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x3083c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3085c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3087c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3089c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308bc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308dc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308fc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3091c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3093c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3095c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3097c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3099c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309bc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309dc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309fc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a1c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a5c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b5c/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b7c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b9c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bdc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c3c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c5c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c9c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d3c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30dbc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30ddc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fbc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ffc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3101c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3103c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3105c/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310dc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311dc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314fc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x3157c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315dc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x3161c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3163c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3173c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3177c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317dc/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x3181c/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x3183c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x3185c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x3187c/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x3189c/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a9c/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31abc/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31adc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b1c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b9c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d1c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d3c/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d5c/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d7c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31dbc/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x4dc00/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4dc40/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc60/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc80/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dca0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd00/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd60/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd80/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dda0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dde0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4de00/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df80/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dfa0/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfc0/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfe0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e040/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e0a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0c0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e120/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e140/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e2a0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e380/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3a0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3e0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e400/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e420/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e440/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e460/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e4a0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e560/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e580/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5e0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e700/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e7a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8e0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e900/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e920/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e940/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e960/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e980/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55e00/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc24/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc44/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc64/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc84/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dce4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd44/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd64/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd84/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddc4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4dde4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df64/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df84/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfa4/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfc4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e024/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e084/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0a4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e104/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e124/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e284/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e364/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e384/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3a4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e404/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e424/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e444/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e484/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e544/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e564/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5c4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6e4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e784/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8c4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e904/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e924/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e944/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e964/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e984/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55de4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e24/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc28/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc48/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc68/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc88/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dce8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd48/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd68/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd88/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4dde8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df68/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df88/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfa8/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfc8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e028/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e088/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0a8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e108/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e128/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e288/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e368/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e388/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3a8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3e8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e408/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e428/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e488/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e548/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e568/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5a8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5c8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6e8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e788/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8c8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8e8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e908/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e928/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e948/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e968/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e988/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55de8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e28/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc2c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc4c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc6c/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc8c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd4c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd6c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddcc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddec/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df6c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df8c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfac/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfcc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e02c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e08c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0ac/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e10c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e12c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e28c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e36c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e38c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3ac/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3cc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e40c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e42c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e44c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e48c/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e54c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e56c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5ac/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6ec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e78c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e90c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e92c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e94c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e96c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e98c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9cc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55dec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc30/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc50/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc70/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc90/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcf0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd50/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd70/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddf0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df90/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfb0/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfd0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e090/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0b0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e110/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e130/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e290/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e370/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e390/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e410/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e430/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e450/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e490/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e550/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e570/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5b0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5d0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e790/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8d0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e910/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e930/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e950/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e970/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e990/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55df0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc34/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc54/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc74/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc94/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd54/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd74/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd94/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddd4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddf4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df74/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df94/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfb4/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfd4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e034/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e094/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0b4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e114/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e134/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e294/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e374/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e394/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3b4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3d4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e414/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e434/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e494/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e554/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e574/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5b4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5d4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6f4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e794/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e934/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e974/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e994/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9d4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55df4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc38/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc58/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc78/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc98/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd58/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd78/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddd8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddf8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df78/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df98/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfb8/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfd8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e038/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e098/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0b8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e118/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e138/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e298/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e378/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e398/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3b8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3d8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e418/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e438/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e458/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e498/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e558/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e578/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5b8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5d8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6f8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e798/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8d8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e918/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e938/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e978/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e998/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55df8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc3c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc5c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc7c/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc9c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd5c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd7c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dddc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddfc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df7c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df9c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfbc/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfdc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e03c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e09c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0bc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e11c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e13c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e29c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e37c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e39c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3bc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3dc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e41c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e43c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e45c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e49c/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e55c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e57c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5bc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5dc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6fc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e79c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8dc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e91c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e93c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e95c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e97c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e99c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9dc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55dfc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00130/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00858/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00760/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00774/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00784/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00798/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007bc/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007e0/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00804/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00814/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00828/4, 0x00000000); ++} ++ ++static void ++nv84_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030); ++ INSTANCE_WR(ctx, 0x00130/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x001d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x001d8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00218/4, 0x0000fe0c); ++ INSTANCE_WR(ctx, 0x0022c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00258/4, 0x00000187); ++ INSTANCE_WR(ctx, 0x0026c/4, 0x00001018); ++ INSTANCE_WR(ctx, 0x00270/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002ac/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x002b0/4, 0x044d00df); ++ INSTANCE_WR(ctx, 0x002b8/4, 0x00000600); ++ INSTANCE_WR(ctx, 0x002d0/4, 0x01000000); ++ INSTANCE_WR(ctx, 0x002d4/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002dc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x002f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x002f8/4, 0x000e0080); ++ INSTANCE_WR(ctx, 0x002fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00318/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0031c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00328/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0032c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00344/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00348/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00360/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00364/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00378/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0037c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00380/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00384/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0038c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00394/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00398/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x003a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003a8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003c0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003dc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00404/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00420/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00434/4, 0x00000029); ++ INSTANCE_WR(ctx, 0x00438/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x00440/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00444/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00448/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x00454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0045c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x004e4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004ec/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004f0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00500/4, 0x00000012); ++ INSTANCE_WR(ctx, 0x00504/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00508/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x0050c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00520/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00530/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00534/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00560/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00564/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00570/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00588/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0058c/4, 0x00000e00); ++ INSTANCE_WR(ctx, 0x00590/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x00000200); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x005fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00614/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0061c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00624/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00630/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00634/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0063c/4, 0x00000f80); ++ INSTANCE_WR(ctx, 0x00684/4, 0x007f0080); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x007f0080); ++ ++ INSTANCE_WR(ctx, 0x006e4/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x006e8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x006f0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x006f4/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x006f8/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x006fc/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00700/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x0070c/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00710/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00718/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0071c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00720/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00724/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00728/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x00734/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00738/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00740/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00744/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00748/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x0074c/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00750/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x0075c/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00760/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00768/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0076c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00770/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00774/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00778/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x00784/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x0078c/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x00798/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x0079c/4, 0x00000022); ++ ++ INSTANCE_WR(ctx, 0x007b4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007b8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x007bc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x007d0/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x007f4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x007fc/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00804/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0080c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00810/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00834/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00838/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x0083c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0084c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00850/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00874/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x0087c/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00884/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0088c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00890/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x008b8/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x008c4/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00904/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00908/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x0090c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00910/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00918/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00924/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00928/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00930/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00934/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x0093c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00940/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00950/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00954/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00958/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00968/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0096c/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00990/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00998/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x009a8/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x009d0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x009d4/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x009d8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x009e8/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x009ec/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00a20/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00a28/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00a2c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00a60/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00a78/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00a7c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00a80/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00a84/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00aa8/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00ab0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00ac4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00ac8/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00acc/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00ad0/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00ad8/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00adc/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00aec/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00af4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b04/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b08/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00b34/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00b44/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00b48/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00b6c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00b74/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b88/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00c18/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00c20/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00c3c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00c40/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00c44/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00c48/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00c4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c50/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00c5c/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00c64/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00c68/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00c6c/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00c74/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00c78/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00c88/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ca0/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ca4/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00cc8/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00cd0/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00ce0/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00ce4/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00d08/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d0c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00d10/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d20/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00d24/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00d48/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00d50/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00d58/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00d60/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00d8c/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00d98/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00db0/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00db4/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00db8/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00dbc/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00dd8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ddc/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00de0/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00de4/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00de8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00dec/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00df8/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00dfc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00e00/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00e04/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00e08/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00e10/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00e14/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00e24/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e28/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00e2c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e3c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00e40/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00e64/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00e6c/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00e74/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00e7c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00e80/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00ea4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ea8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ebc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ec0/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00ee4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00eec/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00efc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00f00/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00f28/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00f34/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00f4c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00f50/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00f54/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00f58/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00f74/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00f78/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00f7c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00f80/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00f84/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00f88/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00f94/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00f98/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00f9c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00fa0/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00fa4/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00fac/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00fb0/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00fc0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fc4/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00fc8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fd8/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00fdc/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x01000/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01008/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x01010/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01018/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x0101c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x01040/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01044/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x01048/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01058/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0105c/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x01080/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01088/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x01090/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01098/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x0109c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x010c4/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x010d0/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x010e8/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x010ec/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x010f0/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x010f4/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x01110/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01114/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x01118/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x0111c/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x01120/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01124/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01130/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x01134/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x01138/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x0113c/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x01140/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x01148/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x0114c/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x01230/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01284/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0130c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01324/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x0134c/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x014ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x014f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01504/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x0150c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01510/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01530/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x0156c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x015d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01630/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0164c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01650/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01670/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01690/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x016c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x016e4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01724/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01744/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0176c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01784/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x0178c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x017cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01924/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01a4c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01b30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b50/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b90/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x01bb0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01bd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01c70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01c90/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01cac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01ccc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01cec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d10/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01d2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dcc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0218c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x021cc/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x022ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x022ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0232c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x024cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x025cc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x026cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x027ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0280c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0282c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0284c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0286c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x028ac/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x028ec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x02bac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02bcc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02bec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c2c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02c4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02cec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02d0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0398c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x039cc/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x03b6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03b8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x03bec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03ccc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03dec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e04/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x03e0c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x03e44/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x040cc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x042ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0430c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0432c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0434c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0436c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0438c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0440c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0442c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0444c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0446c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0448c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x044ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x044cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0480c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0484c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0492c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0496c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x049a4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x049ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04b4c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04c4c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x04d4c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e8c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04eac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04ecc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04eec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04f2c/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x04f6c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0522c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0524c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0526c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0528c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ac/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x052cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0536c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0538c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x083a0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x083c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x083e0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x08400/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08420/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08440/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x084a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x084c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x084e0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08500/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08520/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x11e40/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x11e60/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15044/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x152e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15304/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15324/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15344/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x15384/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x15444/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15484/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154e4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x15504/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x155e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15624/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15664/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15704/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15744/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15764/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x157e4/4, 0x04444480); ++ INSTANCE_WR(ctx, 0x15f64/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x16004/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x16064/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x160a4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x160c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x160e4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x16104/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16124/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16144/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x161b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x161c8/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x161d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16228/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x16408/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x16410/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x164e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16508/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x16568/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16590/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x165b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x165d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x165f0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16610/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16730/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x167b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x167c8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x16870/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x168a8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x169a8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x169c8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16a10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16a70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ab0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16ad0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16b10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16bc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16c10/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x16c68/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16c70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16c88/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x16ca8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x16cf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16d10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16d28/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x16d48/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x16d50/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16d70/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16d90/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16de8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ef0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f30/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16f50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f90/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17008/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17010/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17028/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17048/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17050/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17068/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17070/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17088/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17090/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17108/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17128/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17148/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17168/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17188/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17208/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x17210/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17310/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x17370/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17410/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x174d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17570/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17670/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x176e8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x176f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17708/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x17710/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17750/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17768/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x177a8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x177c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x177d0/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x177e8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17808/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17810/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17828/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x17850/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17bc4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17be4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17c28/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x17c48/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x17c84/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17c88/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x17db0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17dd0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17df0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e04/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17e10/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e24/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17e30/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e50/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e70/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e90/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17eb0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17fb0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17fd0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x17ff0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18010/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18030/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18050/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18070/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18090/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x180b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x180d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x180f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18110/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18130/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18150/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18168/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x18170/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18190/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x181a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x181c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x181e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x18208/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18228/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18248/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18288/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x182c8/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x182f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18310/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18330/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x183d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x183f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18408/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18428/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18430/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x18448/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18468/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x184d0/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x18550/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18570/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x186b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18750/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x187b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x187d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x187f0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18870/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18970/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18990/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x18aa8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18b08/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18b48/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18b68/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18b88/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18bc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18be8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c28/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18c90/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x18cc8/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18ce8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18d08/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x18d10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18d28/4, 0x0000007f); ++ INSTANCE_WR(ctx, 0x18d68/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18d70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18d88/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18db0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18dc8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18dd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18de8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18e08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18e48/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18e50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18ec8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18ee8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18ef0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18f30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18fb0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x18fc8/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x18fe8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18ff0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x19010/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x19030/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19050/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19070/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x192d0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x192f0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19350/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x194f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19530/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19550/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19570/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19590/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x195b0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x195f0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19630/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19708/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19768/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x198f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19910/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19930/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x199d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19a90/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e88/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x19ea8/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x19f08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f50/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19f70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f90/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x19fb0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x19fd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a070/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a090/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a110/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a1e8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a248/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1a2c8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a2e8/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1a808/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x1a848/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a888/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a8a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a8e8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a948/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1a988/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a9a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a9e8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1aa08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1aa28/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x1aa68/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x2d2c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d2e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d328/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x2d348/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2d3a8/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x2d3e8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x2d468/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d488/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d508/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d528/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d548/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d568/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d588/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d608/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d628/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d648/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2dae8/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x2db08/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x2db68/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x2e5b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2e5d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2e810/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2e990/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2e9b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2e9d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2e9f0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ea10/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eb30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ebb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ec70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2ee70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2eeb0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2eed0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ef10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f010/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f070/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f0f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f110/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f150/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f170/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f190/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f330/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f350/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f390/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f410/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f430/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f450/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f490/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f610/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2f710/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x2f770/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f810/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f970/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2faf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fb10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fb50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fb90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fbd0/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x2fc10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x301b0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x301d0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x301f0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30210/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30230/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30250/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30270/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30290/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x302b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x303b0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x303d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x303f0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30410/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30430/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30450/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30470/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30490/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x304b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x304d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x304f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30510/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30530/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30550/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30570/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30590/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x305b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x305d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x305f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x306f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30710/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30730/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x307d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x307f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30830/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x308d0/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x30950/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30970/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30ab0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30b50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30b90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30bb0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30bd0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30bf0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x30c70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30d70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30d90/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x31090/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x31110/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x31170/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x311b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x311d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31250/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x312f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31330/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x313b0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x313f0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31410/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31430/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31450/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31470/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x316d0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x316f0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31710/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31750/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x318f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31930/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31950/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31970/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31990/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x319b0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x319f0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x4a7e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4a800/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4a820/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a840/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x4a880/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a8c0/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x4a8e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4a900/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x4a960/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a980/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x4a9e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52500/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x526a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x526c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52700/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x52780/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x527c0/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x52920/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x52940/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52960/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52a80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x52b00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x52d40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52d60/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x52d80/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52da0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52dc0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52de0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53200/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53220/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53240/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53260/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53280/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53300/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53320/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53340/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53360/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53380/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53400/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x53460/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x53500/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53524/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53540/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53544/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53560/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53564/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53580/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53584/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x535a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x535e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53600/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53660/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53684/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x536a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x536a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x536c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53824/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53840/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53844/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53860/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53864/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53880/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53884/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x538a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x538e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53900/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53944/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53960/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53984/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x539a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x539a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x539c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53b04/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53b20/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53be4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c00/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c04/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c20/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c24/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c40/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c44/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c64/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53c80/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53c84/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53ca0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53ca4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53cc0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53cc4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53ce0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53d04/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x53d20/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x53dc4/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53de0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53de4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x53e00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x53e24/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53e40/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53e44/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x53e60/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x53f64/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x53f80/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x54004/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54020/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54144/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x54160/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x54164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54180/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54184/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x541c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x541c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54200/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54204/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54244/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54260/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x5b6a4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x5b6c0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x5b6e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x5b700/4, 0x00000001); ++} ++ ++static void ++nv86_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x10C/4, 0x30); ++ INSTANCE_WR(ctx, 0x1D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1D8/4, 0x1000); ++ INSTANCE_WR(ctx, 0x218/4, 0xFE0C); ++ INSTANCE_WR(ctx, 0x22C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x258/4, 0x187); ++ INSTANCE_WR(ctx, 0x26C/4, 0x1018); ++ INSTANCE_WR(ctx, 0x270/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2AC/4, 0x4); ++ INSTANCE_WR(ctx, 0x2B0/4, 0x44D00DF); ++ INSTANCE_WR(ctx, 0x2B8/4, 0x600); ++ INSTANCE_WR(ctx, 0x2D0/4, 0x1000000); ++ INSTANCE_WR(ctx, 0x2D4/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x2F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F8/4, 0x80); ++ INSTANCE_WR(ctx, 0x2FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x318/4, 0x2); ++ INSTANCE_WR(ctx, 0x31C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328/4, 0x1); ++ INSTANCE_WR(ctx, 0x32C/4, 0x100); ++ INSTANCE_WR(ctx, 0x344/4, 0x2); ++ INSTANCE_WR(ctx, 0x348/4, 0x1); ++ INSTANCE_WR(ctx, 0x34C/4, 0x1); ++ INSTANCE_WR(ctx, 0x35C/4, 0x1); ++ INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x364/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x36C/4, 0x1); ++ INSTANCE_WR(ctx, 0x370/4, 0x1); ++ INSTANCE_WR(ctx, 0x378/4, 0x1); ++ INSTANCE_WR(ctx, 0x37C/4, 0x1); ++ INSTANCE_WR(ctx, 0x380/4, 0x1); ++ INSTANCE_WR(ctx, 0x384/4, 0x4); ++ INSTANCE_WR(ctx, 0x388/4, 0x1); ++ INSTANCE_WR(ctx, 0x38C/4, 0x1); ++ INSTANCE_WR(ctx, 0x390/4, 0x1); ++ INSTANCE_WR(ctx, 0x394/4, 0x7); ++ INSTANCE_WR(ctx, 0x398/4, 0x1); ++ INSTANCE_WR(ctx, 0x39C/4, 0x7); ++ INSTANCE_WR(ctx, 0x3A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3C0/4, 0x100); ++ INSTANCE_WR(ctx, 0x3C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3D4/4, 0x100); ++ INSTANCE_WR(ctx, 0x3D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3DC/4, 0x100); ++ INSTANCE_WR(ctx, 0x3E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3F0/4, 0x100); ++ INSTANCE_WR(ctx, 0x404/4, 0x4); ++ INSTANCE_WR(ctx, 0x408/4, 0x70); ++ INSTANCE_WR(ctx, 0x40C/4, 0x80); ++ INSTANCE_WR(ctx, 0x420/4, 0xC); ++ INSTANCE_WR(ctx, 0x428/4, 0x8); ++ INSTANCE_WR(ctx, 0x42C/4, 0x14); ++ INSTANCE_WR(ctx, 0x434/4, 0x29); ++ INSTANCE_WR(ctx, 0x438/4, 0x27); ++ INSTANCE_WR(ctx, 0x43C/4, 0x26); ++ INSTANCE_WR(ctx, 0x440/4, 0x8); ++ INSTANCE_WR(ctx, 0x444/4, 0x4); ++ INSTANCE_WR(ctx, 0x448/4, 0x27); ++ INSTANCE_WR(ctx, 0x454/4, 0x1); ++ INSTANCE_WR(ctx, 0x458/4, 0x2); ++ INSTANCE_WR(ctx, 0x45C/4, 0x3); ++ INSTANCE_WR(ctx, 0x460/4, 0x4); ++ INSTANCE_WR(ctx, 0x464/4, 0x5); ++ INSTANCE_WR(ctx, 0x468/4, 0x6); ++ INSTANCE_WR(ctx, 0x46C/4, 0x7); ++ INSTANCE_WR(ctx, 0x470/4, 0x1); ++ INSTANCE_WR(ctx, 0x4B4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x4E4/4, 0x80); ++ INSTANCE_WR(ctx, 0x4E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x4EC/4, 0x4); ++ INSTANCE_WR(ctx, 0x4F0/4, 0x3); ++ INSTANCE_WR(ctx, 0x4F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x500/4, 0x12); ++ INSTANCE_WR(ctx, 0x504/4, 0x10); ++ INSTANCE_WR(ctx, 0x508/4, 0xC); ++ INSTANCE_WR(ctx, 0x50C/4, 0x1); ++ INSTANCE_WR(ctx, 0x51C/4, 0x4); ++ INSTANCE_WR(ctx, 0x520/4, 0x2); ++ INSTANCE_WR(ctx, 0x524/4, 0x4); ++ INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x534/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x55C/4, 0x4); ++ INSTANCE_WR(ctx, 0x560/4, 0x14); ++ INSTANCE_WR(ctx, 0x564/4, 0x1); ++ INSTANCE_WR(ctx, 0x570/4, 0x2); ++ INSTANCE_WR(ctx, 0x57C/4, 0x1); ++ INSTANCE_WR(ctx, 0x584/4, 0x2); ++ INSTANCE_WR(ctx, 0x588/4, 0x1000); ++ INSTANCE_WR(ctx, 0x58C/4, 0xE00); ++ INSTANCE_WR(ctx, 0x590/4, 0x1000); ++ INSTANCE_WR(ctx, 0x594/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x59C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BC/4, 0x200); ++ INSTANCE_WR(ctx, 0x5C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5C8/4, 0x70); ++ INSTANCE_WR(ctx, 0x5CC/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC/4, 0x70); ++ INSTANCE_WR(ctx, 0x5E0/4, 0x80); ++ INSTANCE_WR(ctx, 0x5F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5F4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x5FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x60C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x614/4, 0x2); ++ INSTANCE_WR(ctx, 0x61C/4, 0x1); ++ INSTANCE_WR(ctx, 0x624/4, 0x1); ++ INSTANCE_WR(ctx, 0x62C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x630/4, 0xCF); ++ INSTANCE_WR(ctx, 0x634/4, 0x1); ++ INSTANCE_WR(ctx, 0x63C/4, 0xF80); ++ INSTANCE_WR(ctx, 0x684/4, 0x7F0080); ++ INSTANCE_WR(ctx, 0x6C0/4, 0x7F0080); ++ INSTANCE_WR(ctx, 0x6E4/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x6E8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x6F0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x6F4/4, 0x1F); ++ INSTANCE_WR(ctx, 0x6F8/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x6FC/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x700/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x70C/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x710/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x718/4, 0x1000); ++ INSTANCE_WR(ctx, 0x71C/4, 0x1F); ++ INSTANCE_WR(ctx, 0x720/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x724/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x728/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x734/4, 0x10040); ++ INSTANCE_WR(ctx, 0x73C/4, 0x22); ++ INSTANCE_WR(ctx, 0x748/4, 0x10040); ++ INSTANCE_WR(ctx, 0x74C/4, 0x22); ++ INSTANCE_WR(ctx, 0x764/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x768/4, 0x160000); ++ INSTANCE_WR(ctx, 0x76C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x77C/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x780/4, 0x8C0000); ++ INSTANCE_WR(ctx, 0x7A4/4, 0x10401); ++ INSTANCE_WR(ctx, 0x7AC/4, 0x78); ++ INSTANCE_WR(ctx, 0x7B4/4, 0xBF); ++ INSTANCE_WR(ctx, 0x7BC/4, 0x1210); ++ INSTANCE_WR(ctx, 0x7C0/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x7E4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7E8/4, 0x160000); ++ INSTANCE_WR(ctx, 0x7EC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7FC/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x800/4, 0x8C0000); ++ INSTANCE_WR(ctx, 0x824/4, 0x10401); ++ INSTANCE_WR(ctx, 0x82C/4, 0x78); ++ INSTANCE_WR(ctx, 0x834/4, 0xBF); ++ INSTANCE_WR(ctx, 0x83C/4, 0x1210); ++ INSTANCE_WR(ctx, 0x840/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x868/4, 0x27070); ++ INSTANCE_WR(ctx, 0x874/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x88C/4, 0x120407); ++ INSTANCE_WR(ctx, 0x890/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x894/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x898/4, 0x30201); ++ INSTANCE_WR(ctx, 0x8B4/4, 0x40); ++ INSTANCE_WR(ctx, 0x8B8/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x8BC/4, 0x141210); ++ INSTANCE_WR(ctx, 0x8C0/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x8C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x8C8/4, 0x3); ++ INSTANCE_WR(ctx, 0x8D4/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x8D8/4, 0x100); ++ INSTANCE_WR(ctx, 0x8DC/4, 0x3800); ++ INSTANCE_WR(ctx, 0x8E0/4, 0x404040); ++ INSTANCE_WR(ctx, 0x8E4/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x8EC/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x8F0/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x7BA0/4, 0x21); ++ INSTANCE_WR(ctx, 0x7BC0/4, 0x1); ++ INSTANCE_WR(ctx, 0x7BE0/4, 0x2); ++ INSTANCE_WR(ctx, 0x7C00/4, 0x100); ++ INSTANCE_WR(ctx, 0x7C20/4, 0x100); ++ INSTANCE_WR(ctx, 0x7C40/4, 0x1); ++ INSTANCE_WR(ctx, 0x7CA0/4, 0x1); ++ INSTANCE_WR(ctx, 0x7CC0/4, 0x2); ++ INSTANCE_WR(ctx, 0x7CE0/4, 0x100); ++ INSTANCE_WR(ctx, 0x7D00/4, 0x100); ++ INSTANCE_WR(ctx, 0x7D20/4, 0x1); ++ INSTANCE_WR(ctx, 0x11640/4, 0x4); ++ INSTANCE_WR(ctx, 0x11660/4, 0x4); ++ INSTANCE_WR(ctx, 0x49FE0/4, 0x4); ++ INSTANCE_WR(ctx, 0x4A000/4, 0x4); ++ INSTANCE_WR(ctx, 0x4A020/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A040/4, 0x3); ++ INSTANCE_WR(ctx, 0x4A080/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A0C0/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x4A0E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x4A100/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x4A160/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A180/4, 0x27); ++ INSTANCE_WR(ctx, 0x4A1E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x51A20/4, 0x1); ++ INSTANCE_WR(ctx, 0x51D00/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x51EA0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x51EC0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x51F00/4, 0x80); ++ INSTANCE_WR(ctx, 0x51F80/4, 0x80); ++ INSTANCE_WR(ctx, 0x51FC0/4, 0x3F); ++ INSTANCE_WR(ctx, 0x52120/4, 0x2); ++ INSTANCE_WR(ctx, 0x52140/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x52160/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x52280/4, 0x4); ++ INSTANCE_WR(ctx, 0x52300/4, 0x4); ++ INSTANCE_WR(ctx, 0x52540/4, 0x1); ++ INSTANCE_WR(ctx, 0x52560/4, 0x1001); ++ INSTANCE_WR(ctx, 0x52580/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525A0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525C0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525E0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x52A00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52C00/4, 0x10); ++ INSTANCE_WR(ctx, 0x52C60/4, 0x3); ++ INSTANCE_WR(ctx, 0xA84/4, 0xF); ++ INSTANCE_WR(ctx, 0xB24/4, 0x20); ++ INSTANCE_WR(ctx, 0xD04/4, 0x1A); ++ INSTANCE_WR(ctx, 0xEC4/4, 0x4); ++ INSTANCE_WR(ctx, 0xEE4/4, 0x4); ++ INSTANCE_WR(ctx, 0xF24/4, 0x4); ++ INSTANCE_WR(ctx, 0xF44/4, 0x8); ++ INSTANCE_WR(ctx, 0xF84/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x1124/4, 0xF); ++ INSTANCE_WR(ctx, 0x3604/4, 0xF); ++ INSTANCE_WR(ctx, 0x3644/4, 0x1); ++ INSTANCE_WR(ctx, 0x41A4/4, 0xF); ++ INSTANCE_WR(ctx, 0x14844/4, 0xF); ++ INSTANCE_WR(ctx, 0x14AE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14B04/4, 0x100); ++ INSTANCE_WR(ctx, 0x14B24/4, 0x100); ++ INSTANCE_WR(ctx, 0x14B44/4, 0x11); ++ INSTANCE_WR(ctx, 0x14B84/4, 0x8); ++ INSTANCE_WR(ctx, 0x14C44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14C84/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CA4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CC4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CE4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x14D04/4, 0x2); ++ INSTANCE_WR(ctx, 0x14DE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E24/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E64/4, 0x1); ++ INSTANCE_WR(ctx, 0x14F04/4, 0x4); ++ INSTANCE_WR(ctx, 0x14F44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14F64/4, 0x15); ++ INSTANCE_WR(ctx, 0x14FE4/4, 0x4444480); ++ INSTANCE_WR(ctx, 0x15764/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x15804/4, 0x100); ++ INSTANCE_WR(ctx, 0x15864/4, 0x10001); ++ INSTANCE_WR(ctx, 0x158A4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x158C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x158E4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x15904/4, 0x1); ++ INSTANCE_WR(ctx, 0x15924/4, 0x4); ++ INSTANCE_WR(ctx, 0x15944/4, 0x2); ++ INSTANCE_WR(ctx, 0x166C4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x166E4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16784/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16904/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16924/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x15948/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x159A8/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x15B88/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15C68/4, 0x4); ++ INSTANCE_WR(ctx, 0x15C88/4, 0x1A); ++ INSTANCE_WR(ctx, 0x15CE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x15F48/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x16028/4, 0xF); ++ INSTANCE_WR(ctx, 0x16128/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16148/4, 0x11); ++ INSTANCE_WR(ctx, 0x16348/4, 0x4); ++ INSTANCE_WR(ctx, 0x163E8/4, 0x2); ++ INSTANCE_WR(ctx, 0x16408/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x16428/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x164A8/4, 0x5); ++ INSTANCE_WR(ctx, 0x164C8/4, 0x52); ++ INSTANCE_WR(ctx, 0x16568/4, 0x1); ++ INSTANCE_WR(ctx, 0x16788/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16808/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16828/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16848/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16868/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16888/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16908/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16928/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16948/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16968/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16988/4, 0x10); ++ INSTANCE_WR(ctx, 0x16E68/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x16E88/4, 0x5); ++ INSTANCE_WR(ctx, 0x16EE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16F28/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F48/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F68/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F88/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16FA8/4, 0x3); ++ INSTANCE_WR(ctx, 0x173A8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x173C8/4, 0x1A); ++ INSTANCE_WR(ctx, 0x17408/4, 0x3); ++ INSTANCE_WR(ctx, 0x178E8/4, 0x102); ++ INSTANCE_WR(ctx, 0x17928/4, 0x4); ++ INSTANCE_WR(ctx, 0x17948/4, 0x4); ++ INSTANCE_WR(ctx, 0x17968/4, 0x4); ++ INSTANCE_WR(ctx, 0x17988/4, 0x4); ++ INSTANCE_WR(ctx, 0x179A8/4, 0x4); ++ INSTANCE_WR(ctx, 0x179C8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17A08/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17A48/4, 0x102); ++ INSTANCE_WR(ctx, 0x17B88/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BA8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BE8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18228/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18288/4, 0x804); ++ INSTANCE_WR(ctx, 0x182C8/4, 0x4); ++ INSTANCE_WR(ctx, 0x182E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18308/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18348/4, 0x4); ++ INSTANCE_WR(ctx, 0x18368/4, 0x4); ++ INSTANCE_WR(ctx, 0x183A8/4, 0x10); ++ INSTANCE_WR(ctx, 0x18448/4, 0x804); ++ INSTANCE_WR(ctx, 0x18468/4, 0x1); ++ INSTANCE_WR(ctx, 0x18488/4, 0x1A); ++ INSTANCE_WR(ctx, 0x184A8/4, 0x7F); ++ INSTANCE_WR(ctx, 0x184E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x18508/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18548/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18568/4, 0x4); ++ INSTANCE_WR(ctx, 0x18588/4, 0x4); ++ INSTANCE_WR(ctx, 0x185C8/4, 0x10); ++ INSTANCE_WR(ctx, 0x18648/4, 0x1); ++ INSTANCE_WR(ctx, 0x18668/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18748/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x18768/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18E88/4, 0x1); ++ INSTANCE_WR(ctx, 0x18EE8/4, 0x10); ++ INSTANCE_WR(ctx, 0x19608/4, 0x88); ++ INSTANCE_WR(ctx, 0x19628/4, 0x88); ++ INSTANCE_WR(ctx, 0x19688/4, 0x4); ++ INSTANCE_WR(ctx, 0x19968/4, 0x26); ++ INSTANCE_WR(ctx, 0x199C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x19A48/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19A68/4, 0x10); ++ INSTANCE_WR(ctx, 0x19F88/4, 0x52); ++ INSTANCE_WR(ctx, 0x19FC8/4, 0x26); ++ INSTANCE_WR(ctx, 0x1A008/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A028/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A068/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1A0C8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x1A108/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A128/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A168/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A188/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A1A8/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x1A1E8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x24A48/4, 0x4); ++ INSTANCE_WR(ctx, 0x24A68/4, 0x4); ++ INSTANCE_WR(ctx, 0x24AA8/4, 0x80); ++ INSTANCE_WR(ctx, 0x24AC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x24AE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x24B28/4, 0x27); ++ INSTANCE_WR(ctx, 0x24B68/4, 0x26); ++ INSTANCE_WR(ctx, 0x24BE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C08/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C28/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C48/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C68/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C88/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CA8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D08/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D28/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D48/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D68/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D88/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24DA8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24DC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x25268/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x25288/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x252E8/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0xB0C/4, 0x2); ++ INSTANCE_WR(ctx, 0xB4C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0xCEC/4, 0x1); ++ INSTANCE_WR(ctx, 0xD0C/4, 0x10); ++ INSTANCE_WR(ctx, 0xD6C/4, 0x1); ++ INSTANCE_WR(ctx, 0xE0C/4, 0x4); ++ INSTANCE_WR(ctx, 0xE2C/4, 0x400); ++ INSTANCE_WR(ctx, 0xE4C/4, 0x300); ++ INSTANCE_WR(ctx, 0xE6C/4, 0x1001); ++ INSTANCE_WR(ctx, 0xE8C/4, 0x15); ++ INSTANCE_WR(ctx, 0xF4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x106C/4, 0x1); ++ INSTANCE_WR(ctx, 0x108C/4, 0x10); ++ INSTANCE_WR(ctx, 0x10CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x134C/4, 0x10); ++ INSTANCE_WR(ctx, 0x156C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x158C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x160C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x162C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x164C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x166C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x170C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x172C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x1A8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x1ACC/4, 0x3F); ++ INSTANCE_WR(ctx, 0x1BAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1BEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1DCC/4, 0x11); ++ INSTANCE_WR(ctx, 0x1ECC/4, 0xF); ++ INSTANCE_WR(ctx, 0x1FCC/4, 0x11); ++ INSTANCE_WR(ctx, 0x20AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x20CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x20EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x210C/4, 0x2); ++ INSTANCE_WR(ctx, 0x212C/4, 0x1); ++ INSTANCE_WR(ctx, 0x214C/4, 0x2); ++ INSTANCE_WR(ctx, 0x216C/4, 0x1); ++ INSTANCE_WR(ctx, 0x21AC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x21EC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x24AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x24CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x24EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x250C/4, 0x1); ++ INSTANCE_WR(ctx, 0x252C/4, 0x2); ++ INSTANCE_WR(ctx, 0x254C/4, 0x1); ++ INSTANCE_WR(ctx, 0x256C/4, 0x1); ++ INSTANCE_WR(ctx, 0x25EC/4, 0x11); ++ INSTANCE_WR(ctx, 0x260C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328C/4, 0x2); ++ INSTANCE_WR(ctx, 0x32CC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x346C/4, 0x1); ++ INSTANCE_WR(ctx, 0x348C/4, 0x10); ++ INSTANCE_WR(ctx, 0x34EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x358C/4, 0x4); ++ INSTANCE_WR(ctx, 0x35AC/4, 0x400); ++ INSTANCE_WR(ctx, 0x35CC/4, 0x300); ++ INSTANCE_WR(ctx, 0x35EC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x360C/4, 0x15); ++ INSTANCE_WR(ctx, 0x36CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x37EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x380C/4, 0x10); ++ INSTANCE_WR(ctx, 0x384C/4, 0x1); ++ INSTANCE_WR(ctx, 0x3ACC/4, 0x10); ++ INSTANCE_WR(ctx, 0x3CEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D0C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D2C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D4C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DCC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E0C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E2C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E4C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3EAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3ECC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x420C/4, 0x10); ++ INSTANCE_WR(ctx, 0x424C/4, 0x3F); ++ INSTANCE_WR(ctx, 0x432C/4, 0x1); ++ INSTANCE_WR(ctx, 0x436C/4, 0x1); ++ INSTANCE_WR(ctx, 0x43AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x454C/4, 0x11); ++ INSTANCE_WR(ctx, 0x464C/4, 0xF); ++ INSTANCE_WR(ctx, 0x474C/4, 0x11); ++ INSTANCE_WR(ctx, 0x482C/4, 0x1); ++ INSTANCE_WR(ctx, 0x484C/4, 0x1); ++ INSTANCE_WR(ctx, 0x486C/4, 0x1); ++ INSTANCE_WR(ctx, 0x488C/4, 0x2); ++ INSTANCE_WR(ctx, 0x48AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x48CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x48EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x492C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x496C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x4C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4C4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x4C6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4C8C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CAC/4, 0x2); ++ INSTANCE_WR(ctx, 0x4CCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x4D6C/4, 0x11); ++ INSTANCE_WR(ctx, 0x4D8C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA30/4, 0x4); ++ INSTANCE_WR(ctx, 0xCF0/4, 0x4); ++ INSTANCE_WR(ctx, 0xD10/4, 0x4); ++ INSTANCE_WR(ctx, 0xD30/4, 0x608080); ++ INSTANCE_WR(ctx, 0xDD0/4, 0x4); ++ INSTANCE_WR(ctx, 0xE30/4, 0x4); ++ INSTANCE_WR(ctx, 0xE50/4, 0x4); ++ INSTANCE_WR(ctx, 0xE70/4, 0x80); ++ INSTANCE_WR(ctx, 0xE90/4, 0x1E00); ++ INSTANCE_WR(ctx, 0xEB0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1350/4, 0x4); ++ INSTANCE_WR(ctx, 0x1370/4, 0x80); ++ INSTANCE_WR(ctx, 0x1390/4, 0x4); ++ INSTANCE_WR(ctx, 0x13B0/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x13D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x13F0/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x1410/4, 0x4); ++ INSTANCE_WR(ctx, 0x14B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x14D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1550/4, 0x4); ++ INSTANCE_WR(ctx, 0x159F0/4, 0x4); ++ INSTANCE_WR(ctx, 0x15A10/4, 0x3); ++ INSTANCE_WR(ctx, 0x15C50/4, 0xF); ++ INSTANCE_WR(ctx, 0x15DD0/4, 0x4); ++ INSTANCE_WR(ctx, 0x15DF0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E10/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E30/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E50/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15F70/4, 0x1); ++ INSTANCE_WR(ctx, 0x15FF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x160B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16250/4, 0x1); ++ INSTANCE_WR(ctx, 0x16270/4, 0x1); ++ INSTANCE_WR(ctx, 0x16290/4, 0x2); ++ INSTANCE_WR(ctx, 0x162B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x162D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x162F0/4, 0x2); ++ INSTANCE_WR(ctx, 0x16310/4, 0x1); ++ INSTANCE_WR(ctx, 0x16350/4, 0x11); ++ INSTANCE_WR(ctx, 0x16450/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x164B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x16530/4, 0x11); ++ INSTANCE_WR(ctx, 0x16550/4, 0x1); ++ INSTANCE_WR(ctx, 0x16590/4, 0xCF); ++ INSTANCE_WR(ctx, 0x165B0/4, 0xCF); ++ INSTANCE_WR(ctx, 0x165D0/4, 0xCF); ++ INSTANCE_WR(ctx, 0x16730/4, 0x1); ++ INSTANCE_WR(ctx, 0x16750/4, 0x1); ++ INSTANCE_WR(ctx, 0x16770/4, 0x2); ++ INSTANCE_WR(ctx, 0x16790/4, 0x1); ++ INSTANCE_WR(ctx, 0x167B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x167D0/4, 0x2); ++ INSTANCE_WR(ctx, 0x167F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16830/4, 0x1); ++ INSTANCE_WR(ctx, 0x16850/4, 0x1); ++ INSTANCE_WR(ctx, 0x16870/4, 0x1); ++ INSTANCE_WR(ctx, 0x16890/4, 0x1); ++ INSTANCE_WR(ctx, 0x168B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x168D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x168F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16910/4, 0x1); ++ INSTANCE_WR(ctx, 0x16930/4, 0x11); ++ INSTANCE_WR(ctx, 0x16A30/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16A50/4, 0xF); ++ INSTANCE_WR(ctx, 0x16B50/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x16BB0/4, 0x11); ++ INSTANCE_WR(ctx, 0x16BD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16C50/4, 0x4); ++ INSTANCE_WR(ctx, 0x16D10/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB0/4, 0x11); ++ INSTANCE_WR(ctx, 0x16EB0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F30/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F50/4, 0x1); ++ INSTANCE_WR(ctx, 0x16F90/4, 0x1); ++ INSTANCE_WR(ctx, 0x16FD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17010/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17050/4, 0x1); ++ INSTANCE_WR(ctx, 0x17090/4, 0x1); ++ INSTANCE_WR(ctx, 0x175F0/4, 0x8); ++ INSTANCE_WR(ctx, 0x17610/4, 0x8); ++ INSTANCE_WR(ctx, 0x17630/4, 0x8); ++ INSTANCE_WR(ctx, 0x17650/4, 0x8); ++ INSTANCE_WR(ctx, 0x17670/4, 0x8); ++ INSTANCE_WR(ctx, 0x17690/4, 0x8); ++ INSTANCE_WR(ctx, 0x176B0/4, 0x8); ++ INSTANCE_WR(ctx, 0x176D0/4, 0x8); ++ INSTANCE_WR(ctx, 0x176F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x177F0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17810/4, 0x400); ++ INSTANCE_WR(ctx, 0x17830/4, 0x400); ++ INSTANCE_WR(ctx, 0x17850/4, 0x400); ++ INSTANCE_WR(ctx, 0x17870/4, 0x400); ++ INSTANCE_WR(ctx, 0x17890/4, 0x400); ++ INSTANCE_WR(ctx, 0x178B0/4, 0x400); ++ INSTANCE_WR(ctx, 0x178D0/4, 0x400); ++ INSTANCE_WR(ctx, 0x178F0/4, 0x400); ++ INSTANCE_WR(ctx, 0x17910/4, 0x300); ++ INSTANCE_WR(ctx, 0x17930/4, 0x300); ++ INSTANCE_WR(ctx, 0x17950/4, 0x300); ++ INSTANCE_WR(ctx, 0x17970/4, 0x300); ++ INSTANCE_WR(ctx, 0x17990/4, 0x300); ++ INSTANCE_WR(ctx, 0x179B0/4, 0x300); ++ INSTANCE_WR(ctx, 0x179D0/4, 0x300); ++ INSTANCE_WR(ctx, 0x179F0/4, 0x300); ++ INSTANCE_WR(ctx, 0x17A10/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A30/4, 0xF); ++ INSTANCE_WR(ctx, 0x17B30/4, 0x20); ++ INSTANCE_WR(ctx, 0x17B50/4, 0x11); ++ INSTANCE_WR(ctx, 0x17B70/4, 0x100); ++ INSTANCE_WR(ctx, 0x17BB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17C10/4, 0x40); ++ INSTANCE_WR(ctx, 0x17C30/4, 0x100); ++ INSTANCE_WR(ctx, 0x17C70/4, 0x3); ++ INSTANCE_WR(ctx, 0x17D10/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17D90/4, 0x2); ++ INSTANCE_WR(ctx, 0x17DB0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17EF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17F90/4, 0x4); ++ INSTANCE_WR(ctx, 0x17FD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17FF0/4, 0x400); ++ INSTANCE_WR(ctx, 0x18010/4, 0x300); ++ INSTANCE_WR(ctx, 0x18030/4, 0x1001); ++ INSTANCE_WR(ctx, 0x180B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x181B0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x181D0/4, 0xF); ++ INSTANCE_WR(ctx, 0x184D0/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18550/4, 0x11); ++ INSTANCE_WR(ctx, 0x185B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x185F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18610/4, 0x1); ++ INSTANCE_WR(ctx, 0x18690/4, 0x1); ++ INSTANCE_WR(ctx, 0x18730/4, 0x1); ++ INSTANCE_WR(ctx, 0x18770/4, 0x1); ++ INSTANCE_WR(ctx, 0x187F0/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x18830/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x18850/4, 0x40); ++ INSTANCE_WR(ctx, 0x18870/4, 0x100); ++ INSTANCE_WR(ctx, 0x18890/4, 0x10100); ++ INSTANCE_WR(ctx, 0x188B0/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x18B10/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18B30/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18B50/4, 0x1); ++ INSTANCE_WR(ctx, 0x18B90/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x18BB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18C10/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x18D30/4, 0x1); ++ INSTANCE_WR(ctx, 0x18D70/4, 0x1); ++ INSTANCE_WR(ctx, 0x18D90/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x18DB0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x18DD0/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x18DF0/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x18E30/4, 0x1A); ++} ++ ++static void ++nv92_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x10C/4, 0x30); ++ INSTANCE_WR(ctx, 0x1D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1D8/4, 0x1000); ++ INSTANCE_WR(ctx, 0x218/4, 0xFE0C); ++ INSTANCE_WR(ctx, 0x22C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x258/4, 0x187); ++ INSTANCE_WR(ctx, 0x26C/4, 0x1018); ++ INSTANCE_WR(ctx, 0x270/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2AC/4, 0x4); ++ INSTANCE_WR(ctx, 0x2B0/4, 0x42500DF); ++ INSTANCE_WR(ctx, 0x2B8/4, 0x600); ++ INSTANCE_WR(ctx, 0x2D0/4, 0x1000000); ++ INSTANCE_WR(ctx, 0x2D4/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x2F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F8/4, 0x80); ++ INSTANCE_WR(ctx, 0x2FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x318/4, 0x2); ++ INSTANCE_WR(ctx, 0x31C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328/4, 0x1); ++ INSTANCE_WR(ctx, 0x32C/4, 0x100); ++ INSTANCE_WR(ctx, 0x344/4, 0x2); ++ INSTANCE_WR(ctx, 0x348/4, 0x1); ++ INSTANCE_WR(ctx, 0x34C/4, 0x1); ++ INSTANCE_WR(ctx, 0x35C/4, 0x1); ++ INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x364/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x36C/4, 0x1); ++ INSTANCE_WR(ctx, 0x370/4, 0x1); ++ INSTANCE_WR(ctx, 0x378/4, 0x1); ++ INSTANCE_WR(ctx, 0x37C/4, 0x1); ++ INSTANCE_WR(ctx, 0x380/4, 0x1); ++ INSTANCE_WR(ctx, 0x384/4, 0x4); ++ INSTANCE_WR(ctx, 0x388/4, 0x1); ++ INSTANCE_WR(ctx, 0x38C/4, 0x1); ++ INSTANCE_WR(ctx, 0x390/4, 0x1); ++ INSTANCE_WR(ctx, 0x394/4, 0x7); ++ INSTANCE_WR(ctx, 0x398/4, 0x1); ++ INSTANCE_WR(ctx, 0x39C/4, 0x7); ++ INSTANCE_WR(ctx, 0x3A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3C0/4, 0x100); ++ INSTANCE_WR(ctx, 0x3C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3D4/4, 0x100); ++ INSTANCE_WR(ctx, 0x3D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3DC/4, 0x100); ++ INSTANCE_WR(ctx, 0x3E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3F0/4, 0x100); ++ INSTANCE_WR(ctx, 0x404/4, 0x4); ++ INSTANCE_WR(ctx, 0x408/4, 0x70); ++ INSTANCE_WR(ctx, 0x40C/4, 0x80); ++ INSTANCE_WR(ctx, 0x420/4, 0xC); ++ INSTANCE_WR(ctx, 0x428/4, 0x8); ++ INSTANCE_WR(ctx, 0x42C/4, 0x14); ++ INSTANCE_WR(ctx, 0x434/4, 0x29); ++ INSTANCE_WR(ctx, 0x438/4, 0x27); ++ INSTANCE_WR(ctx, 0x43C/4, 0x26); ++ INSTANCE_WR(ctx, 0x440/4, 0x8); ++ INSTANCE_WR(ctx, 0x444/4, 0x4); ++ INSTANCE_WR(ctx, 0x448/4, 0x27); ++ INSTANCE_WR(ctx, 0x454/4, 0x1); ++ INSTANCE_WR(ctx, 0x458/4, 0x2); ++ INSTANCE_WR(ctx, 0x45C/4, 0x3); ++ INSTANCE_WR(ctx, 0x460/4, 0x4); ++ INSTANCE_WR(ctx, 0x464/4, 0x5); ++ INSTANCE_WR(ctx, 0x468/4, 0x6); ++ INSTANCE_WR(ctx, 0x46C/4, 0x7); ++ INSTANCE_WR(ctx, 0x470/4, 0x1); ++ INSTANCE_WR(ctx, 0x4B4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x4E4/4, 0x80); ++ INSTANCE_WR(ctx, 0x4E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x4EC/4, 0x4); ++ INSTANCE_WR(ctx, 0x4F0/4, 0x3); ++ INSTANCE_WR(ctx, 0x4F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x500/4, 0x12); ++ INSTANCE_WR(ctx, 0x504/4, 0x10); ++ INSTANCE_WR(ctx, 0x508/4, 0xC); ++ INSTANCE_WR(ctx, 0x50C/4, 0x1); ++ INSTANCE_WR(ctx, 0x51C/4, 0x4); ++ INSTANCE_WR(ctx, 0x520/4, 0x2); ++ INSTANCE_WR(ctx, 0x524/4, 0x4); ++ INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x534/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x55C/4, 0x4); ++ INSTANCE_WR(ctx, 0x560/4, 0x14); ++ INSTANCE_WR(ctx, 0x564/4, 0x1); ++ INSTANCE_WR(ctx, 0x570/4, 0x2); ++ INSTANCE_WR(ctx, 0x57C/4, 0x1); ++ INSTANCE_WR(ctx, 0x584/4, 0x2); ++ INSTANCE_WR(ctx, 0x588/4, 0x1000); ++ INSTANCE_WR(ctx, 0x58C/4, 0xE00); ++ INSTANCE_WR(ctx, 0x590/4, 0x1000); ++ INSTANCE_WR(ctx, 0x594/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x59C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BC/4, 0x200); ++ INSTANCE_WR(ctx, 0x5C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5C8/4, 0x70); ++ INSTANCE_WR(ctx, 0x5CC/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC/4, 0x70); ++ INSTANCE_WR(ctx, 0x5E0/4, 0x80); ++ INSTANCE_WR(ctx, 0x5F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5F4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x5FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x60C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x614/4, 0x2); ++ INSTANCE_WR(ctx, 0x61C/4, 0x1); ++ INSTANCE_WR(ctx, 0x624/4, 0x1); ++ INSTANCE_WR(ctx, 0x62C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x630/4, 0xCF); ++ INSTANCE_WR(ctx, 0x634/4, 0x1); ++ INSTANCE_WR(ctx, 0x63C/4, 0x1F80); ++ INSTANCE_WR(ctx, 0x654/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x658/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x660/4, 0x1000); ++ INSTANCE_WR(ctx, 0x664/4, 0x1F); ++ INSTANCE_WR(ctx, 0x668/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x66C/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x670/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x67C/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x680/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x688/4, 0x1000); ++ INSTANCE_WR(ctx, 0x68C/4, 0x1F); ++ INSTANCE_WR(ctx, 0x690/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x694/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x698/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x6A4/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x6A8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x6B0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x6B4/4, 0x1F); ++ INSTANCE_WR(ctx, 0x6B8/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x6BC/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x6C0/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x6CC/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x6D0/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x6D8/4, 0x1000); ++ INSTANCE_WR(ctx, 0x6DC/4, 0x1F); ++ INSTANCE_WR(ctx, 0x6E0/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x6E4/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x6E8/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x6F4/4, 0x390040); ++ INSTANCE_WR(ctx, 0x6FC/4, 0x22); ++ INSTANCE_WR(ctx, 0x708/4, 0x390040); ++ INSTANCE_WR(ctx, 0x70C/4, 0x22); ++ INSTANCE_WR(ctx, 0x724/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x728/4, 0x160000); ++ INSTANCE_WR(ctx, 0x72C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x73C/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x740/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x764/4, 0x10401); ++ INSTANCE_WR(ctx, 0x76C/4, 0x78); ++ INSTANCE_WR(ctx, 0x774/4, 0xBF); ++ INSTANCE_WR(ctx, 0x77C/4, 0x1210); ++ INSTANCE_WR(ctx, 0x780/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x7A4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7A8/4, 0x160000); ++ INSTANCE_WR(ctx, 0x7AC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7BC/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x7C0/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x7E4/4, 0x10401); ++ INSTANCE_WR(ctx, 0x7EC/4, 0x78); ++ INSTANCE_WR(ctx, 0x7F4/4, 0xBF); ++ INSTANCE_WR(ctx, 0x7FC/4, 0x1210); ++ INSTANCE_WR(ctx, 0x800/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x828/4, 0x27070); ++ INSTANCE_WR(ctx, 0x834/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x84C/4, 0x120407); ++ INSTANCE_WR(ctx, 0x850/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x854/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x858/4, 0x30201); ++ INSTANCE_WR(ctx, 0x874/4, 0x40); ++ INSTANCE_WR(ctx, 0x878/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x87C/4, 0x141210); ++ INSTANCE_WR(ctx, 0x880/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x884/4, 0x1); ++ INSTANCE_WR(ctx, 0x888/4, 0x3); ++ INSTANCE_WR(ctx, 0x894/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x898/4, 0x100); ++ INSTANCE_WR(ctx, 0x89C/4, 0x3800); ++ INSTANCE_WR(ctx, 0x8A0/4, 0x404040); ++ INSTANCE_WR(ctx, 0x8A4/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x8AC/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x8B0/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x8C0/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x8C4/4, 0x160000); ++ INSTANCE_WR(ctx, 0x8C8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x8D8/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x8DC/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x900/4, 0x10401); ++ INSTANCE_WR(ctx, 0x908/4, 0x78); ++ INSTANCE_WR(ctx, 0x910/4, 0xBF); ++ INSTANCE_WR(ctx, 0x918/4, 0x1210); ++ INSTANCE_WR(ctx, 0x91C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x940/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x944/4, 0x160000); ++ INSTANCE_WR(ctx, 0x948/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x958/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x95C/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x980/4, 0x10401); ++ INSTANCE_WR(ctx, 0x988/4, 0x78); ++ INSTANCE_WR(ctx, 0x990/4, 0xBF); ++ INSTANCE_WR(ctx, 0x998/4, 0x1210); ++ INSTANCE_WR(ctx, 0x99C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x9C4/4, 0x27070); ++ INSTANCE_WR(ctx, 0x9D0/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x9E8/4, 0x120407); ++ INSTANCE_WR(ctx, 0x9EC/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x9F0/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x9F4/4, 0x30201); ++ INSTANCE_WR(ctx, 0xA10/4, 0x40); ++ INSTANCE_WR(ctx, 0xA14/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xA18/4, 0x141210); ++ INSTANCE_WR(ctx, 0xA1C/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xA20/4, 0x1); ++ INSTANCE_WR(ctx, 0xA24/4, 0x3); ++ INSTANCE_WR(ctx, 0xA30/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xA34/4, 0x100); ++ INSTANCE_WR(ctx, 0xA38/4, 0x3800); ++ INSTANCE_WR(ctx, 0xA3C/4, 0x404040); ++ INSTANCE_WR(ctx, 0xA40/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xA48/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xA4C/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xA5C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xA60/4, 0x160000); ++ INSTANCE_WR(ctx, 0xA64/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xA74/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xA78/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xA9C/4, 0x10401); ++ INSTANCE_WR(ctx, 0xAA4/4, 0x78); ++ INSTANCE_WR(ctx, 0xAAC/4, 0xBF); ++ INSTANCE_WR(ctx, 0xAB4/4, 0x1210); ++ INSTANCE_WR(ctx, 0xAB8/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xADC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xAE0/4, 0x160000); ++ INSTANCE_WR(ctx, 0xAE4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xAF4/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xAF8/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xB1C/4, 0x10401); ++ INSTANCE_WR(ctx, 0xB24/4, 0x78); ++ INSTANCE_WR(ctx, 0xB2C/4, 0xBF); ++ INSTANCE_WR(ctx, 0xB34/4, 0x1210); ++ INSTANCE_WR(ctx, 0xB38/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xB60/4, 0x27070); ++ INSTANCE_WR(ctx, 0xB6C/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0xB84/4, 0x120407); ++ INSTANCE_WR(ctx, 0xB88/4, 0x5091507); ++ INSTANCE_WR(ctx, 0xB8C/4, 0x5010202); ++ INSTANCE_WR(ctx, 0xB90/4, 0x30201); ++ INSTANCE_WR(ctx, 0xBAC/4, 0x40); ++ INSTANCE_WR(ctx, 0xBB0/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xBB4/4, 0x141210); ++ INSTANCE_WR(ctx, 0xBB8/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xBBC/4, 0x1); ++ INSTANCE_WR(ctx, 0xBC0/4, 0x3); ++ INSTANCE_WR(ctx, 0xBCC/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xBD0/4, 0x100); ++ INSTANCE_WR(ctx, 0xBD4/4, 0x3800); ++ INSTANCE_WR(ctx, 0xBD8/4, 0x404040); ++ INSTANCE_WR(ctx, 0xBDC/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xBE4/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xBE8/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xBF8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xBFC/4, 0x160000); ++ INSTANCE_WR(ctx, 0xC00/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xC10/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xC14/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xC38/4, 0x10401); ++ INSTANCE_WR(ctx, 0xC40/4, 0x78); ++ INSTANCE_WR(ctx, 0xC48/4, 0xBF); ++ INSTANCE_WR(ctx, 0xC50/4, 0x1210); ++ INSTANCE_WR(ctx, 0xC54/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xC78/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xC7C/4, 0x160000); ++ INSTANCE_WR(ctx, 0xC80/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xC90/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xC94/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xCB8/4, 0x10401); ++ INSTANCE_WR(ctx, 0xCC0/4, 0x78); ++ INSTANCE_WR(ctx, 0xCC8/4, 0xBF); ++ INSTANCE_WR(ctx, 0xCD0/4, 0x1210); ++ INSTANCE_WR(ctx, 0xCD4/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xCFC/4, 0x27070); ++ INSTANCE_WR(ctx, 0xD08/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0xD20/4, 0x120407); ++ INSTANCE_WR(ctx, 0xD24/4, 0x5091507); ++ INSTANCE_WR(ctx, 0xD28/4, 0x5010202); ++ INSTANCE_WR(ctx, 0xD2C/4, 0x30201); ++ INSTANCE_WR(ctx, 0xD48/4, 0x40); ++ INSTANCE_WR(ctx, 0xD4C/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xD50/4, 0x141210); ++ INSTANCE_WR(ctx, 0xD54/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xD58/4, 0x1); ++ INSTANCE_WR(ctx, 0xD5C/4, 0x3); ++ INSTANCE_WR(ctx, 0xD68/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xD6C/4, 0x100); ++ INSTANCE_WR(ctx, 0xD70/4, 0x3800); ++ INSTANCE_WR(ctx, 0xD74/4, 0x404040); ++ INSTANCE_WR(ctx, 0xD78/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xD80/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xD84/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xD94/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xD98/4, 0x160000); ++ INSTANCE_WR(ctx, 0xD9C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xDAC/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xDB0/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xDD4/4, 0x10401); ++ INSTANCE_WR(ctx, 0xDDC/4, 0x78); ++ INSTANCE_WR(ctx, 0xDE4/4, 0xBF); ++ INSTANCE_WR(ctx, 0xDEC/4, 0x1210); ++ INSTANCE_WR(ctx, 0xDF0/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xE14/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xE18/4, 0x160000); ++ INSTANCE_WR(ctx, 0xE1C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xE2C/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xE30/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xE54/4, 0x10401); ++ INSTANCE_WR(ctx, 0xE5C/4, 0x78); ++ INSTANCE_WR(ctx, 0xE64/4, 0xBF); ++ INSTANCE_WR(ctx, 0xE6C/4, 0x1210); ++ INSTANCE_WR(ctx, 0xE70/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xE98/4, 0x27070); ++ INSTANCE_WR(ctx, 0xEA4/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0xEBC/4, 0x120407); ++ INSTANCE_WR(ctx, 0xEC0/4, 0x5091507); ++ INSTANCE_WR(ctx, 0xEC4/4, 0x5010202); ++ INSTANCE_WR(ctx, 0xEC8/4, 0x30201); ++ INSTANCE_WR(ctx, 0xEE4/4, 0x40); ++ INSTANCE_WR(ctx, 0xEE8/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xEEC/4, 0x141210); ++ INSTANCE_WR(ctx, 0xEF0/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xEF4/4, 0x1); ++ INSTANCE_WR(ctx, 0xEF8/4, 0x3); ++ INSTANCE_WR(ctx, 0xF04/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xF08/4, 0x100); ++ INSTANCE_WR(ctx, 0xF0C/4, 0x3800); ++ INSTANCE_WR(ctx, 0xF10/4, 0x404040); ++ INSTANCE_WR(ctx, 0xF14/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xF1C/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xF20/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xF30/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xF34/4, 0x160000); ++ INSTANCE_WR(ctx, 0xF38/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xF48/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xF4C/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xF70/4, 0x10401); ++ INSTANCE_WR(ctx, 0xF78/4, 0x78); ++ INSTANCE_WR(ctx, 0xF80/4, 0xBF); ++ INSTANCE_WR(ctx, 0xF88/4, 0x1210); ++ INSTANCE_WR(ctx, 0xF8C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xFB0/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xFB4/4, 0x160000); ++ INSTANCE_WR(ctx, 0xFB8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xFC8/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xFCC/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xFF0/4, 0x10401); ++ INSTANCE_WR(ctx, 0xFF8/4, 0x78); ++ INSTANCE_WR(ctx, 0x1000/4, 0xBF); ++ INSTANCE_WR(ctx, 0x1008/4, 0x1210); ++ INSTANCE_WR(ctx, 0x100C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x1034/4, 0x27070); ++ INSTANCE_WR(ctx, 0x1040/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x1058/4, 0x120407); ++ INSTANCE_WR(ctx, 0x105C/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x1060/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x1064/4, 0x30201); ++ INSTANCE_WR(ctx, 0x1080/4, 0x40); ++ INSTANCE_WR(ctx, 0x1084/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x1088/4, 0x141210); ++ INSTANCE_WR(ctx, 0x108C/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x1090/4, 0x1); ++ INSTANCE_WR(ctx, 0x1094/4, 0x3); ++ INSTANCE_WR(ctx, 0x10A0/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x10A4/4, 0x100); ++ INSTANCE_WR(ctx, 0x10A8/4, 0x3800); ++ INSTANCE_WR(ctx, 0x10AC/4, 0x404040); ++ INSTANCE_WR(ctx, 0x10B0/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x10B8/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x10BC/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x10CC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x10D0/4, 0x160000); ++ INSTANCE_WR(ctx, 0x10D4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x10E4/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x10E8/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x110C/4, 0x10401); ++ INSTANCE_WR(ctx, 0x1114/4, 0x78); ++ INSTANCE_WR(ctx, 0x111C/4, 0xBF); ++ INSTANCE_WR(ctx, 0x1124/4, 0x1210); ++ INSTANCE_WR(ctx, 0x1128/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x114C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1150/4, 0x160000); ++ INSTANCE_WR(ctx, 0x1154/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1164/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x1168/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x118C/4, 0x10401); ++ INSTANCE_WR(ctx, 0x1194/4, 0x78); ++ INSTANCE_WR(ctx, 0x119C/4, 0xBF); ++ INSTANCE_WR(ctx, 0x11A4/4, 0x1210); ++ INSTANCE_WR(ctx, 0x11A8/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x11D0/4, 0x27070); ++ INSTANCE_WR(ctx, 0x11DC/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x11F4/4, 0x120407); ++ INSTANCE_WR(ctx, 0x11F8/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x11FC/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x1200/4, 0x30201); ++ INSTANCE_WR(ctx, 0x121C/4, 0x40); ++ INSTANCE_WR(ctx, 0x1220/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x1224/4, 0x141210); ++ INSTANCE_WR(ctx, 0x1228/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x122C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1230/4, 0x3); ++ INSTANCE_WR(ctx, 0x123C/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x1240/4, 0x100); ++ INSTANCE_WR(ctx, 0x1244/4, 0x3800); ++ INSTANCE_WR(ctx, 0x1248/4, 0x404040); ++ INSTANCE_WR(ctx, 0x124C/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x1254/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x1258/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x1268/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x126C/4, 0x160000); ++ INSTANCE_WR(ctx, 0x1270/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1280/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x1284/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x12A8/4, 0x10401); ++ INSTANCE_WR(ctx, 0x12B0/4, 0x78); ++ INSTANCE_WR(ctx, 0x12B8/4, 0xBF); ++ INSTANCE_WR(ctx, 0x12C0/4, 0x1210); ++ INSTANCE_WR(ctx, 0x12C4/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x12E8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x12EC/4, 0x160000); ++ INSTANCE_WR(ctx, 0x12F0/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1300/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x1304/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x1328/4, 0x10401); ++ INSTANCE_WR(ctx, 0x1330/4, 0x78); ++ INSTANCE_WR(ctx, 0x1338/4, 0xBF); ++ INSTANCE_WR(ctx, 0x1340/4, 0x1210); ++ INSTANCE_WR(ctx, 0x1344/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x136C/4, 0x27070); ++ INSTANCE_WR(ctx, 0x1378/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x1390/4, 0x120407); ++ INSTANCE_WR(ctx, 0x1394/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x1398/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x139C/4, 0x30201); ++ INSTANCE_WR(ctx, 0x13B8/4, 0x40); ++ INSTANCE_WR(ctx, 0x13BC/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x13C0/4, 0x141210); ++ INSTANCE_WR(ctx, 0x13C4/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x13C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x13CC/4, 0x3); ++ INSTANCE_WR(ctx, 0x13D8/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x13DC/4, 0x100); ++ INSTANCE_WR(ctx, 0x13E0/4, 0x3800); ++ INSTANCE_WR(ctx, 0x13E4/4, 0x404040); ++ INSTANCE_WR(ctx, 0x13E8/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x13F0/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x13F4/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x8620/4, 0x21); ++ INSTANCE_WR(ctx, 0x8640/4, 0x1); ++ INSTANCE_WR(ctx, 0x8660/4, 0x2); ++ INSTANCE_WR(ctx, 0x8680/4, 0x100); ++ INSTANCE_WR(ctx, 0x86A0/4, 0x100); ++ INSTANCE_WR(ctx, 0x86C0/4, 0x1); ++ INSTANCE_WR(ctx, 0x8720/4, 0x1); ++ INSTANCE_WR(ctx, 0x8740/4, 0x2); ++ INSTANCE_WR(ctx, 0x8760/4, 0x100); ++ INSTANCE_WR(ctx, 0x8780/4, 0x100); ++ INSTANCE_WR(ctx, 0x87A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x1B8C0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1B8E0/4, 0x4); ++ INSTANCE_WR(ctx, 0x54260/4, 0x4); ++ INSTANCE_WR(ctx, 0x54280/4, 0x4); ++ INSTANCE_WR(ctx, 0x542A0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x542C0/4, 0x3); ++ INSTANCE_WR(ctx, 0x54300/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x54340/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x54360/4, 0x1); ++ INSTANCE_WR(ctx, 0x54380/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x543E0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x54400/4, 0x27); ++ INSTANCE_WR(ctx, 0x54460/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BCA0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BF80/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5C120/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C140/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C180/4, 0x80); ++ INSTANCE_WR(ctx, 0x5C200/4, 0x80); ++ INSTANCE_WR(ctx, 0x5C240/4, 0x3F); ++ INSTANCE_WR(ctx, 0x5C3A0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5C3C0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C3E0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C500/4, 0x4); ++ INSTANCE_WR(ctx, 0x5C580/4, 0x4); ++ INSTANCE_WR(ctx, 0x5C7C0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5C7E0/4, 0x1001); ++ INSTANCE_WR(ctx, 0x5C800/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5C820/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5C840/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5C860/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5CC80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CCA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CCC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CCE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CDA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CDC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CDE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE80/4, 0x10); ++ INSTANCE_WR(ctx, 0x5CEE0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1584/4, 0xF); ++ INSTANCE_WR(ctx, 0x1624/4, 0x20); ++ INSTANCE_WR(ctx, 0x1804/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19C4/4, 0x4); ++ INSTANCE_WR(ctx, 0x19E4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A24/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A44/4, 0x8); ++ INSTANCE_WR(ctx, 0x1A84/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x1C24/4, 0xF); ++ INSTANCE_WR(ctx, 0x4104/4, 0xF); ++ INSTANCE_WR(ctx, 0x4144/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CA4/4, 0xF); ++ INSTANCE_WR(ctx, 0x15344/4, 0xF); ++ INSTANCE_WR(ctx, 0x155E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x15604/4, 0x100); ++ INSTANCE_WR(ctx, 0x15624/4, 0x100); ++ INSTANCE_WR(ctx, 0x15644/4, 0x11); ++ INSTANCE_WR(ctx, 0x15684/4, 0x8); ++ INSTANCE_WR(ctx, 0x15744/4, 0x1); ++ INSTANCE_WR(ctx, 0x15784/4, 0x1); ++ INSTANCE_WR(ctx, 0x157A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x157C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x157E4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x15804/4, 0x2); ++ INSTANCE_WR(ctx, 0x158E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x15924/4, 0x1); ++ INSTANCE_WR(ctx, 0x15944/4, 0x1); ++ INSTANCE_WR(ctx, 0x15964/4, 0x1); ++ INSTANCE_WR(ctx, 0x15A04/4, 0x4); ++ INSTANCE_WR(ctx, 0x15A44/4, 0x1); ++ INSTANCE_WR(ctx, 0x15A64/4, 0x15); ++ INSTANCE_WR(ctx, 0x15AE4/4, 0x4444480); ++ INSTANCE_WR(ctx, 0x16264/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x16304/4, 0x100); ++ INSTANCE_WR(ctx, 0x16364/4, 0x10001); ++ INSTANCE_WR(ctx, 0x163A4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x163C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x163E4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x16404/4, 0x1); ++ INSTANCE_WR(ctx, 0x16424/4, 0x4); ++ INSTANCE_WR(ctx, 0x16444/4, 0x2); ++ INSTANCE_WR(ctx, 0x183C4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x183E4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18484/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18604/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18624/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16508/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x16568/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x16748/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16828/4, 0x4); ++ INSTANCE_WR(ctx, 0x16848/4, 0x1A); ++ INSTANCE_WR(ctx, 0x168A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B08/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x16BE8/4, 0xF); ++ INSTANCE_WR(ctx, 0x16CE8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16D08/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F08/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FA8/4, 0x2); ++ INSTANCE_WR(ctx, 0x16FC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x16FE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x17068/4, 0x5); ++ INSTANCE_WR(ctx, 0x17088/4, 0x52); ++ INSTANCE_WR(ctx, 0x17128/4, 0x1); ++ INSTANCE_WR(ctx, 0x17348/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17368/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17388/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x173A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x173C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x173E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17408/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17428/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17448/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17468/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17488/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17508/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17528/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17548/4, 0x10); ++ INSTANCE_WR(ctx, 0x17A28/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x17A48/4, 0x5); ++ INSTANCE_WR(ctx, 0x17AA8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AE8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B08/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B28/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B48/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B68/4, 0x3); ++ INSTANCE_WR(ctx, 0x17F68/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x17F88/4, 0x1A); ++ INSTANCE_WR(ctx, 0x17FC8/4, 0x3); ++ INSTANCE_WR(ctx, 0x184A8/4, 0x102); ++ INSTANCE_WR(ctx, 0x184E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18508/4, 0x4); ++ INSTANCE_WR(ctx, 0x18528/4, 0x4); ++ INSTANCE_WR(ctx, 0x18548/4, 0x4); ++ INSTANCE_WR(ctx, 0x18568/4, 0x4); ++ INSTANCE_WR(ctx, 0x18588/4, 0x4); ++ INSTANCE_WR(ctx, 0x185C8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x18608/4, 0x102); ++ INSTANCE_WR(ctx, 0x18748/4, 0x4); ++ INSTANCE_WR(ctx, 0x18768/4, 0x4); ++ INSTANCE_WR(ctx, 0x18788/4, 0x4); ++ INSTANCE_WR(ctx, 0x187A8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18DE8/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18E48/4, 0x804); ++ INSTANCE_WR(ctx, 0x18E88/4, 0x4); ++ INSTANCE_WR(ctx, 0x18EA8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18EC8/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18F08/4, 0x4); ++ INSTANCE_WR(ctx, 0x18F28/4, 0x4); ++ INSTANCE_WR(ctx, 0x18F68/4, 0x10); ++ INSTANCE_WR(ctx, 0x19008/4, 0x804); ++ INSTANCE_WR(ctx, 0x19028/4, 0x1); ++ INSTANCE_WR(ctx, 0x19048/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19068/4, 0x7F); ++ INSTANCE_WR(ctx, 0x190A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x190C8/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x19108/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x19128/4, 0x4); ++ INSTANCE_WR(ctx, 0x19148/4, 0x4); ++ INSTANCE_WR(ctx, 0x19188/4, 0x10); ++ INSTANCE_WR(ctx, 0x19208/4, 0x1); ++ INSTANCE_WR(ctx, 0x19228/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x19308/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x19328/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x19A48/4, 0x1); ++ INSTANCE_WR(ctx, 0x19AA8/4, 0x10); ++ INSTANCE_WR(ctx, 0x1A1C8/4, 0x88); ++ INSTANCE_WR(ctx, 0x1A1E8/4, 0x88); ++ INSTANCE_WR(ctx, 0x1A248/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A528/4, 0x26); ++ INSTANCE_WR(ctx, 0x1A588/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x1A608/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1A628/4, 0x10); ++ INSTANCE_WR(ctx, 0x1AB48/4, 0x52); ++ INSTANCE_WR(ctx, 0x1AB88/4, 0x26); ++ INSTANCE_WR(ctx, 0x1ABC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1ABE8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1AC28/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1AC88/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x1ACC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1ACE8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1AD28/4, 0x80); ++ INSTANCE_WR(ctx, 0x1AD48/4, 0x4); ++ INSTANCE_WR(ctx, 0x1AD68/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x1ADA8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2D608/4, 0x4); ++ INSTANCE_WR(ctx, 0x2D628/4, 0x4); ++ INSTANCE_WR(ctx, 0x2D668/4, 0x80); ++ INSTANCE_WR(ctx, 0x2D688/4, 0x4); ++ INSTANCE_WR(ctx, 0x2D6A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2D6E8/4, 0x27); ++ INSTANCE_WR(ctx, 0x2D728/4, 0x26); ++ INSTANCE_WR(ctx, 0x2D7A8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D7C8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D7E8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D808/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D828/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D848/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D868/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D888/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D8A8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D8C8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D8E8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D908/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D928/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D948/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D968/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D988/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2DE28/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x2DE48/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x2DEA8/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x160C/4, 0x2); ++ INSTANCE_WR(ctx, 0x164C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x180C/4, 0x10); ++ INSTANCE_WR(ctx, 0x186C/4, 0x1); ++ INSTANCE_WR(ctx, 0x190C/4, 0x4); ++ INSTANCE_WR(ctx, 0x192C/4, 0x400); ++ INSTANCE_WR(ctx, 0x194C/4, 0x300); ++ INSTANCE_WR(ctx, 0x196C/4, 0x1001); ++ INSTANCE_WR(ctx, 0x198C/4, 0x15); ++ INSTANCE_WR(ctx, 0x1A4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x1B6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1B8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x1BCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1E4C/4, 0x10); ++ INSTANCE_WR(ctx, 0x206C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x208C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x20AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x20CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x20EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x210C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x212C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x214C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x216C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x218C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x21AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x21CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x21EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x220C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x222C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x224C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x258C/4, 0x10); ++ INSTANCE_WR(ctx, 0x25CC/4, 0x3F); ++ INSTANCE_WR(ctx, 0x26AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x26EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x272C/4, 0x1); ++ INSTANCE_WR(ctx, 0x28CC/4, 0x11); ++ INSTANCE_WR(ctx, 0x29CC/4, 0xF); ++ INSTANCE_WR(ctx, 0x2ACC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2BAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2BCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2BEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2C0C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2C4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2C6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2CAC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2CEC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FCC/4, 0x2); ++ INSTANCE_WR(ctx, 0x2FEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x300C/4, 0x1); ++ INSTANCE_WR(ctx, 0x302C/4, 0x2); ++ INSTANCE_WR(ctx, 0x304C/4, 0x1); ++ INSTANCE_WR(ctx, 0x306C/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EC/4, 0x11); ++ INSTANCE_WR(ctx, 0x310C/4, 0x1); ++ INSTANCE_WR(ctx, 0x3D8C/4, 0x2); ++ INSTANCE_WR(ctx, 0x3DCC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x3F6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x3F8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x3FEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x408C/4, 0x4); ++ INSTANCE_WR(ctx, 0x40AC/4, 0x400); ++ INSTANCE_WR(ctx, 0x40CC/4, 0x300); ++ INSTANCE_WR(ctx, 0x40EC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x410C/4, 0x15); ++ INSTANCE_WR(ctx, 0x41CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x42EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x430C/4, 0x10); ++ INSTANCE_WR(ctx, 0x434C/4, 0x1); ++ INSTANCE_WR(ctx, 0x45CC/4, 0x10); ++ INSTANCE_WR(ctx, 0x47EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x480C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x482C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x484C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x486C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x488C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x48AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x48CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x48EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x490C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x492C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x494C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x496C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x498C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x49AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x49CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x4D0C/4, 0x10); ++ INSTANCE_WR(ctx, 0x4D4C/4, 0x3F); ++ INSTANCE_WR(ctx, 0x4E2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4E6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4EAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x504C/4, 0x11); ++ INSTANCE_WR(ctx, 0x514C/4, 0xF); ++ INSTANCE_WR(ctx, 0x524C/4, 0x11); ++ INSTANCE_WR(ctx, 0x532C/4, 0x1); ++ INSTANCE_WR(ctx, 0x534C/4, 0x1); ++ INSTANCE_WR(ctx, 0x536C/4, 0x1); ++ INSTANCE_WR(ctx, 0x538C/4, 0x2); ++ INSTANCE_WR(ctx, 0x53AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x53CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x53EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x542C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x546C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x572C/4, 0x1); ++ INSTANCE_WR(ctx, 0x574C/4, 0x2); ++ INSTANCE_WR(ctx, 0x576C/4, 0x1); ++ INSTANCE_WR(ctx, 0x578C/4, 0x1); ++ INSTANCE_WR(ctx, 0x57AC/4, 0x2); ++ INSTANCE_WR(ctx, 0x57CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x57EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x586C/4, 0x11); ++ INSTANCE_WR(ctx, 0x588C/4, 0x1); ++ INSTANCE_WR(ctx, 0x650C/4, 0x2); ++ INSTANCE_WR(ctx, 0x654C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x66EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x670C/4, 0x10); ++ INSTANCE_WR(ctx, 0x676C/4, 0x1); ++ INSTANCE_WR(ctx, 0x680C/4, 0x4); ++ INSTANCE_WR(ctx, 0x682C/4, 0x400); ++ INSTANCE_WR(ctx, 0x684C/4, 0x300); ++ INSTANCE_WR(ctx, 0x686C/4, 0x1001); ++ INSTANCE_WR(ctx, 0x688C/4, 0x15); ++ INSTANCE_WR(ctx, 0x694C/4, 0x2); ++ INSTANCE_WR(ctx, 0x6A6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x6A8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x6ACC/4, 0x1); ++ INSTANCE_WR(ctx, 0x6D4C/4, 0x10); ++ INSTANCE_WR(ctx, 0x6F6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6F8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6FAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6FCC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6FEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x700C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x702C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x704C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x706C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x708C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x70AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x70CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x70EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x710C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x712C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x714C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x748C/4, 0x10); ++ INSTANCE_WR(ctx, 0x74CC/4, 0x3F); ++ INSTANCE_WR(ctx, 0x75AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x75EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x762C/4, 0x1); ++ INSTANCE_WR(ctx, 0x77CC/4, 0x11); ++ INSTANCE_WR(ctx, 0x78CC/4, 0xF); ++ INSTANCE_WR(ctx, 0x79CC/4, 0x11); ++ INSTANCE_WR(ctx, 0x7AAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7ACC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7AEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7B0C/4, 0x2); ++ INSTANCE_WR(ctx, 0x7B2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7B4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x7B6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7BAC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x7BEC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x7EAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7ECC/4, 0x2); ++ INSTANCE_WR(ctx, 0x7EEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7F0C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7F2C/4, 0x2); ++ INSTANCE_WR(ctx, 0x7F4C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7F6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7FEC/4, 0x11); ++ INSTANCE_WR(ctx, 0x800C/4, 0x1); ++ INSTANCE_WR(ctx, 0x8C8C/4, 0x2); ++ INSTANCE_WR(ctx, 0x8CCC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x8E6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x8E8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x8EEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x8F8C/4, 0x4); ++ INSTANCE_WR(ctx, 0x8FAC/4, 0x400); ++ INSTANCE_WR(ctx, 0x8FCC/4, 0x300); ++ INSTANCE_WR(ctx, 0x8FEC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x900C/4, 0x15); ++ INSTANCE_WR(ctx, 0x90CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x91EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x920C/4, 0x10); ++ INSTANCE_WR(ctx, 0x924C/4, 0x1); ++ INSTANCE_WR(ctx, 0x94CC/4, 0x10); ++ INSTANCE_WR(ctx, 0x96EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x970C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x972C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x974C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x976C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x978C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x97AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x97CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x97EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x980C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x982C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x984C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x986C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x988C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x98AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x98CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x9C0C/4, 0x10); ++ INSTANCE_WR(ctx, 0x9C4C/4, 0x3F); ++ INSTANCE_WR(ctx, 0x9D2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x9D6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x9DAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x9F4C/4, 0x11); ++ INSTANCE_WR(ctx, 0xA04C/4, 0xF); ++ INSTANCE_WR(ctx, 0xA14C/4, 0x11); ++ INSTANCE_WR(ctx, 0xA22C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA24C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA26C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA28C/4, 0x2); ++ INSTANCE_WR(ctx, 0xA2AC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA2CC/4, 0x2); ++ INSTANCE_WR(ctx, 0xA2EC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA32C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0xA36C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0xA62C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA64C/4, 0x2); ++ INSTANCE_WR(ctx, 0xA66C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA68C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA6AC/4, 0x2); ++ INSTANCE_WR(ctx, 0xA6CC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA6EC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA76C/4, 0x11); ++ INSTANCE_WR(ctx, 0xA78C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1530/4, 0x4); ++ INSTANCE_WR(ctx, 0x17F0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1810/4, 0x4); ++ INSTANCE_WR(ctx, 0x1830/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18D0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1930/4, 0x4); ++ INSTANCE_WR(ctx, 0x1950/4, 0x4); ++ INSTANCE_WR(ctx, 0x1970/4, 0x80); ++ INSTANCE_WR(ctx, 0x1990/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E30/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E50/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E70/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E90/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EB0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1ED0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F70/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F90/4, 0x3); ++ INSTANCE_WR(ctx, 0x2010/4, 0x4); ++ INSTANCE_WR(ctx, 0x164B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x164D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x16710/4, 0xF); ++ INSTANCE_WR(ctx, 0x16890/4, 0x4); ++ INSTANCE_WR(ctx, 0x168B0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168D0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168F0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16910/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A30/4, 0x1); ++ INSTANCE_WR(ctx, 0x16AB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B70/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D10/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D30/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D50/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D70/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D90/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB0/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E10/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F10/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F70/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FF0/4, 0x11); ++ INSTANCE_WR(ctx, 0x17010/4, 0x1); ++ INSTANCE_WR(ctx, 0x17050/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17070/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17090/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17210/4, 0x1); ++ INSTANCE_WR(ctx, 0x17230/4, 0x2); ++ INSTANCE_WR(ctx, 0x17250/4, 0x1); ++ INSTANCE_WR(ctx, 0x17270/4, 0x1); ++ INSTANCE_WR(ctx, 0x17290/4, 0x2); ++ INSTANCE_WR(ctx, 0x172B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x172F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17310/4, 0x1); ++ INSTANCE_WR(ctx, 0x17330/4, 0x1); ++ INSTANCE_WR(ctx, 0x17350/4, 0x1); ++ INSTANCE_WR(ctx, 0x17370/4, 0x1); ++ INSTANCE_WR(ctx, 0x17390/4, 0x1); ++ INSTANCE_WR(ctx, 0x173B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x173D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x173F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x174F0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17510/4, 0xF); ++ INSTANCE_WR(ctx, 0x17610/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17670/4, 0x11); ++ INSTANCE_WR(ctx, 0x17690/4, 0x1); ++ INSTANCE_WR(ctx, 0x17710/4, 0x4); ++ INSTANCE_WR(ctx, 0x177D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17870/4, 0x11); ++ INSTANCE_WR(ctx, 0x17970/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A10/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A50/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A90/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AD0/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B10/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B50/4, 0x1); ++ INSTANCE_WR(ctx, 0x180B0/4, 0x8); ++ INSTANCE_WR(ctx, 0x180D0/4, 0x8); ++ INSTANCE_WR(ctx, 0x180F0/4, 0x8); ++ INSTANCE_WR(ctx, 0x18110/4, 0x8); ++ INSTANCE_WR(ctx, 0x18130/4, 0x8); ++ INSTANCE_WR(ctx, 0x18150/4, 0x8); ++ INSTANCE_WR(ctx, 0x18170/4, 0x8); ++ INSTANCE_WR(ctx, 0x18190/4, 0x8); ++ INSTANCE_WR(ctx, 0x181B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x182B0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182D0/4, 0x400); ++ INSTANCE_WR(ctx, 0x182F0/4, 0x400); ++ INSTANCE_WR(ctx, 0x18310/4, 0x400); ++ INSTANCE_WR(ctx, 0x18330/4, 0x400); ++ INSTANCE_WR(ctx, 0x18350/4, 0x400); ++ INSTANCE_WR(ctx, 0x18370/4, 0x400); ++ INSTANCE_WR(ctx, 0x18390/4, 0x400); ++ INSTANCE_WR(ctx, 0x183B0/4, 0x400); ++ INSTANCE_WR(ctx, 0x183D0/4, 0x300); ++ INSTANCE_WR(ctx, 0x183F0/4, 0x300); ++ INSTANCE_WR(ctx, 0x18410/4, 0x300); ++ INSTANCE_WR(ctx, 0x18430/4, 0x300); ++ INSTANCE_WR(ctx, 0x18450/4, 0x300); ++ INSTANCE_WR(ctx, 0x18470/4, 0x300); ++ INSTANCE_WR(ctx, 0x18490/4, 0x300); ++ INSTANCE_WR(ctx, 0x184B0/4, 0x300); ++ INSTANCE_WR(ctx, 0x184D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x184F0/4, 0xF); ++ INSTANCE_WR(ctx, 0x185F0/4, 0x20); ++ INSTANCE_WR(ctx, 0x18610/4, 0x11); ++ INSTANCE_WR(ctx, 0x18630/4, 0x100); ++ INSTANCE_WR(ctx, 0x18670/4, 0x1); ++ INSTANCE_WR(ctx, 0x186D0/4, 0x40); ++ INSTANCE_WR(ctx, 0x186F0/4, 0x100); ++ INSTANCE_WR(ctx, 0x18730/4, 0x3); ++ INSTANCE_WR(ctx, 0x187D0/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18850/4, 0x2); ++ INSTANCE_WR(ctx, 0x18870/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A50/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A90/4, 0x1); ++ INSTANCE_WR(ctx, 0x18AB0/4, 0x400); ++ INSTANCE_WR(ctx, 0x18AD0/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AF0/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B70/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C90/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F90/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x19010/4, 0x11); ++ INSTANCE_WR(ctx, 0x19070/4, 0x4); ++ INSTANCE_WR(ctx, 0x190B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x190D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x19150/4, 0x1); ++ INSTANCE_WR(ctx, 0x191F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x19230/4, 0x1); ++ INSTANCE_WR(ctx, 0x192B0/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192F0/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x19310/4, 0x40); ++ INSTANCE_WR(ctx, 0x19330/4, 0x100); ++ INSTANCE_WR(ctx, 0x19350/4, 0x10100); ++ INSTANCE_WR(ctx, 0x19370/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195D0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195F0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x19610/4, 0x1); ++ INSTANCE_WR(ctx, 0x19650/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x19670/4, 0x1); ++ INSTANCE_WR(ctx, 0x196D0/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x19830/4, 0x1); ++ INSTANCE_WR(ctx, 0x19850/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19870/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19890/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198B0/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198F0/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19930/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BF0/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C10/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C30/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CD0/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D30/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D50/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D70/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D90/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A230/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A250/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A270/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A290/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2B0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2D0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A370/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A390/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A410/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB10/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC90/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECB0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECD0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECF0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED10/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE30/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF70/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F110/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F130/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F150/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F170/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F190/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1B0/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F210/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F310/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F370/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F410/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F450/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F470/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F490/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F610/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F630/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F650/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F670/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F690/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F710/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F730/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F750/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F770/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F790/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8F0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F910/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA10/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA70/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA90/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB10/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC70/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDF0/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE10/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE50/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE90/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FED0/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF10/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF50/4, 0x1); ++ INSTANCE_WR(ctx, 0x304B0/4, 0x8); ++ INSTANCE_WR(ctx, 0x304D0/4, 0x8); ++ INSTANCE_WR(ctx, 0x304F0/4, 0x8); ++ INSTANCE_WR(ctx, 0x30510/4, 0x8); ++ INSTANCE_WR(ctx, 0x30530/4, 0x8); ++ INSTANCE_WR(ctx, 0x30550/4, 0x8); ++ INSTANCE_WR(ctx, 0x30570/4, 0x8); ++ INSTANCE_WR(ctx, 0x30590/4, 0x8); ++ INSTANCE_WR(ctx, 0x305B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x306B0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306D0/4, 0x400); ++ INSTANCE_WR(ctx, 0x306F0/4, 0x400); ++ INSTANCE_WR(ctx, 0x30710/4, 0x400); ++ INSTANCE_WR(ctx, 0x30730/4, 0x400); ++ INSTANCE_WR(ctx, 0x30750/4, 0x400); ++ INSTANCE_WR(ctx, 0x30770/4, 0x400); ++ INSTANCE_WR(ctx, 0x30790/4, 0x400); ++ INSTANCE_WR(ctx, 0x307B0/4, 0x400); ++ INSTANCE_WR(ctx, 0x307D0/4, 0x300); ++ INSTANCE_WR(ctx, 0x307F0/4, 0x300); ++ INSTANCE_WR(ctx, 0x30810/4, 0x300); ++ INSTANCE_WR(ctx, 0x30830/4, 0x300); ++ INSTANCE_WR(ctx, 0x30850/4, 0x300); ++ INSTANCE_WR(ctx, 0x30870/4, 0x300); ++ INSTANCE_WR(ctx, 0x30890/4, 0x300); ++ INSTANCE_WR(ctx, 0x308B0/4, 0x300); ++ INSTANCE_WR(ctx, 0x308D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x308F0/4, 0xF); ++ INSTANCE_WR(ctx, 0x309F0/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A10/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A30/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A70/4, 0x1); ++ INSTANCE_WR(ctx, 0x30AD0/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AF0/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B30/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BD0/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C50/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E50/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E90/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EB0/4, 0x400); ++ INSTANCE_WR(ctx, 0x30ED0/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EF0/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F70/4, 0x11); ++ INSTANCE_WR(ctx, 0x31070/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x31090/4, 0xF); ++ INSTANCE_WR(ctx, 0x31390/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x31410/4, 0x11); ++ INSTANCE_WR(ctx, 0x31470/4, 0x4); ++ INSTANCE_WR(ctx, 0x314B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x314D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x31550/4, 0x1); ++ INSTANCE_WR(ctx, 0x315F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x31630/4, 0x1); ++ INSTANCE_WR(ctx, 0x316B0/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316F0/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x31710/4, 0x40); ++ INSTANCE_WR(ctx, 0x31730/4, 0x100); ++ INSTANCE_WR(ctx, 0x31750/4, 0x10100); ++ INSTANCE_WR(ctx, 0x31770/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319D0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319F0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A10/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A50/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A70/4, 0x1); ++ INSTANCE_WR(ctx, 0x31AD0/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C30/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C50/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C70/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C90/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CB0/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CF0/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1534/4, 0x4); ++ INSTANCE_WR(ctx, 0x17F4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1814/4, 0x4); ++ INSTANCE_WR(ctx, 0x1834/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18D4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1934/4, 0x4); ++ INSTANCE_WR(ctx, 0x1954/4, 0x4); ++ INSTANCE_WR(ctx, 0x1974/4, 0x80); ++ INSTANCE_WR(ctx, 0x1994/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E34/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E54/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E74/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E94/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EB4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1ED4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F74/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F94/4, 0x3); ++ INSTANCE_WR(ctx, 0x2014/4, 0x4); ++ INSTANCE_WR(ctx, 0x164B4/4, 0x4); ++ INSTANCE_WR(ctx, 0x164D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x16714/4, 0xF); ++ INSTANCE_WR(ctx, 0x16894/4, 0x4); ++ INSTANCE_WR(ctx, 0x168B4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168D4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168F4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16914/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A34/4, 0x1); ++ INSTANCE_WR(ctx, 0x16AB4/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B74/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D14/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D34/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D54/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D74/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D94/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB4/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DD4/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E14/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F14/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F74/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FF4/4, 0x11); ++ INSTANCE_WR(ctx, 0x17014/4, 0x1); ++ INSTANCE_WR(ctx, 0x17054/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17074/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17094/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x17214/4, 0x1); ++ INSTANCE_WR(ctx, 0x17234/4, 0x2); ++ INSTANCE_WR(ctx, 0x17254/4, 0x1); ++ INSTANCE_WR(ctx, 0x17274/4, 0x1); ++ INSTANCE_WR(ctx, 0x17294/4, 0x2); ++ INSTANCE_WR(ctx, 0x172B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x172F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x17314/4, 0x1); ++ INSTANCE_WR(ctx, 0x17334/4, 0x1); ++ INSTANCE_WR(ctx, 0x17354/4, 0x1); ++ INSTANCE_WR(ctx, 0x17374/4, 0x1); ++ INSTANCE_WR(ctx, 0x17394/4, 0x1); ++ INSTANCE_WR(ctx, 0x173B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x173D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x173F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x174F4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17514/4, 0xF); ++ INSTANCE_WR(ctx, 0x17614/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17674/4, 0x11); ++ INSTANCE_WR(ctx, 0x17694/4, 0x1); ++ INSTANCE_WR(ctx, 0x17714/4, 0x4); ++ INSTANCE_WR(ctx, 0x177D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x17874/4, 0x11); ++ INSTANCE_WR(ctx, 0x17974/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A14/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A54/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A94/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AD4/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B14/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B54/4, 0x1); ++ INSTANCE_WR(ctx, 0x180B4/4, 0x8); ++ INSTANCE_WR(ctx, 0x180D4/4, 0x8); ++ INSTANCE_WR(ctx, 0x180F4/4, 0x8); ++ INSTANCE_WR(ctx, 0x18114/4, 0x8); ++ INSTANCE_WR(ctx, 0x18134/4, 0x8); ++ INSTANCE_WR(ctx, 0x18154/4, 0x8); ++ INSTANCE_WR(ctx, 0x18174/4, 0x8); ++ INSTANCE_WR(ctx, 0x18194/4, 0x8); ++ INSTANCE_WR(ctx, 0x181B4/4, 0x11); ++ INSTANCE_WR(ctx, 0x182B4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182D4/4, 0x400); ++ INSTANCE_WR(ctx, 0x182F4/4, 0x400); ++ INSTANCE_WR(ctx, 0x18314/4, 0x400); ++ INSTANCE_WR(ctx, 0x18334/4, 0x400); ++ INSTANCE_WR(ctx, 0x18354/4, 0x400); ++ INSTANCE_WR(ctx, 0x18374/4, 0x400); ++ INSTANCE_WR(ctx, 0x18394/4, 0x400); ++ INSTANCE_WR(ctx, 0x183B4/4, 0x400); ++ INSTANCE_WR(ctx, 0x183D4/4, 0x300); ++ INSTANCE_WR(ctx, 0x183F4/4, 0x300); ++ INSTANCE_WR(ctx, 0x18414/4, 0x300); ++ INSTANCE_WR(ctx, 0x18434/4, 0x300); ++ INSTANCE_WR(ctx, 0x18454/4, 0x300); ++ INSTANCE_WR(ctx, 0x18474/4, 0x300); ++ INSTANCE_WR(ctx, 0x18494/4, 0x300); ++ INSTANCE_WR(ctx, 0x184B4/4, 0x300); ++ INSTANCE_WR(ctx, 0x184D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x184F4/4, 0xF); ++ INSTANCE_WR(ctx, 0x185F4/4, 0x20); ++ INSTANCE_WR(ctx, 0x18614/4, 0x11); ++ INSTANCE_WR(ctx, 0x18634/4, 0x100); ++ INSTANCE_WR(ctx, 0x18674/4, 0x1); ++ INSTANCE_WR(ctx, 0x186D4/4, 0x40); ++ INSTANCE_WR(ctx, 0x186F4/4, 0x100); ++ INSTANCE_WR(ctx, 0x18734/4, 0x3); ++ INSTANCE_WR(ctx, 0x187D4/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18854/4, 0x2); ++ INSTANCE_WR(ctx, 0x18874/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A54/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A94/4, 0x1); ++ INSTANCE_WR(ctx, 0x18AB4/4, 0x400); ++ INSTANCE_WR(ctx, 0x18AD4/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AF4/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B74/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C94/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F94/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x19014/4, 0x11); ++ INSTANCE_WR(ctx, 0x19074/4, 0x4); ++ INSTANCE_WR(ctx, 0x190B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x190D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x19154/4, 0x1); ++ INSTANCE_WR(ctx, 0x191F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x19234/4, 0x1); ++ INSTANCE_WR(ctx, 0x192B4/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192F4/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x19314/4, 0x40); ++ INSTANCE_WR(ctx, 0x19334/4, 0x100); ++ INSTANCE_WR(ctx, 0x19354/4, 0x10100); ++ INSTANCE_WR(ctx, 0x19374/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195D4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195F4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x19614/4, 0x1); ++ INSTANCE_WR(ctx, 0x19654/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x19674/4, 0x1); ++ INSTANCE_WR(ctx, 0x196D4/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x19834/4, 0x1); ++ INSTANCE_WR(ctx, 0x19854/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19874/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19894/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198B4/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198F4/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19934/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BF4/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C14/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C34/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CD4/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D34/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D54/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D74/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D94/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A234/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A254/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A274/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A294/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2B4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2D4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A374/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A394/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A414/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8B4/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB14/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC94/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECB4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECD4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECF4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED14/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE34/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEB4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF74/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F114/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F134/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F154/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F174/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F194/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1B4/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F214/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F314/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F374/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F414/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F454/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F474/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F494/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F614/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F634/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F654/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F674/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F694/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F714/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F734/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F754/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F774/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F794/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8F4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F914/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA14/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA74/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA94/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB14/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBD4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC74/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDF4/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE14/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE54/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE94/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FED4/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF14/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF54/4, 0x1); ++ INSTANCE_WR(ctx, 0x304B4/4, 0x8); ++ INSTANCE_WR(ctx, 0x304D4/4, 0x8); ++ INSTANCE_WR(ctx, 0x304F4/4, 0x8); ++ INSTANCE_WR(ctx, 0x30514/4, 0x8); ++ INSTANCE_WR(ctx, 0x30534/4, 0x8); ++ INSTANCE_WR(ctx, 0x30554/4, 0x8); ++ INSTANCE_WR(ctx, 0x30574/4, 0x8); ++ INSTANCE_WR(ctx, 0x30594/4, 0x8); ++ INSTANCE_WR(ctx, 0x305B4/4, 0x11); ++ INSTANCE_WR(ctx, 0x306B4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306D4/4, 0x400); ++ INSTANCE_WR(ctx, 0x306F4/4, 0x400); ++ INSTANCE_WR(ctx, 0x30714/4, 0x400); ++ INSTANCE_WR(ctx, 0x30734/4, 0x400); ++ INSTANCE_WR(ctx, 0x30754/4, 0x400); ++ INSTANCE_WR(ctx, 0x30774/4, 0x400); ++ INSTANCE_WR(ctx, 0x30794/4, 0x400); ++ INSTANCE_WR(ctx, 0x307B4/4, 0x400); ++ INSTANCE_WR(ctx, 0x307D4/4, 0x300); ++ INSTANCE_WR(ctx, 0x307F4/4, 0x300); ++ INSTANCE_WR(ctx, 0x30814/4, 0x300); ++ INSTANCE_WR(ctx, 0x30834/4, 0x300); ++ INSTANCE_WR(ctx, 0x30854/4, 0x300); ++ INSTANCE_WR(ctx, 0x30874/4, 0x300); ++ INSTANCE_WR(ctx, 0x30894/4, 0x300); ++ INSTANCE_WR(ctx, 0x308B4/4, 0x300); ++ INSTANCE_WR(ctx, 0x308D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x308F4/4, 0xF); ++ INSTANCE_WR(ctx, 0x309F4/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A14/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A34/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A74/4, 0x1); ++ INSTANCE_WR(ctx, 0x30AD4/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AF4/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B34/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BD4/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C54/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DB4/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E54/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E94/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EB4/4, 0x400); ++ INSTANCE_WR(ctx, 0x30ED4/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EF4/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F74/4, 0x11); ++ INSTANCE_WR(ctx, 0x31074/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x31094/4, 0xF); ++ INSTANCE_WR(ctx, 0x31394/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x31414/4, 0x11); ++ INSTANCE_WR(ctx, 0x31474/4, 0x4); ++ INSTANCE_WR(ctx, 0x314B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x314D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x31554/4, 0x1); ++ INSTANCE_WR(ctx, 0x315F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x31634/4, 0x1); ++ INSTANCE_WR(ctx, 0x316B4/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316F4/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x31714/4, 0x40); ++ INSTANCE_WR(ctx, 0x31734/4, 0x100); ++ INSTANCE_WR(ctx, 0x31754/4, 0x10100); ++ INSTANCE_WR(ctx, 0x31774/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319D4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319F4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A14/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A54/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A74/4, 0x1); ++ INSTANCE_WR(ctx, 0x31AD4/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BF4/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C34/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C54/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C74/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C94/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CB4/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CF4/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1538/4, 0x4); ++ INSTANCE_WR(ctx, 0x17F8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1818/4, 0x4); ++ INSTANCE_WR(ctx, 0x1838/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18D8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1938/4, 0x4); ++ INSTANCE_WR(ctx, 0x1958/4, 0x4); ++ INSTANCE_WR(ctx, 0x1978/4, 0x80); ++ INSTANCE_WR(ctx, 0x1998/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E38/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E58/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E78/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E98/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EB8/4, 0x3); ++ INSTANCE_WR(ctx, 0x1ED8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F78/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F98/4, 0x3); ++ INSTANCE_WR(ctx, 0x2018/4, 0x4); ++ INSTANCE_WR(ctx, 0x164B8/4, 0x4); ++ INSTANCE_WR(ctx, 0x164D8/4, 0x3); ++ INSTANCE_WR(ctx, 0x16718/4, 0xF); ++ INSTANCE_WR(ctx, 0x16898/4, 0x4); ++ INSTANCE_WR(ctx, 0x168B8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168D8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168F8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16918/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A38/4, 0x1); ++ INSTANCE_WR(ctx, 0x16AB8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B78/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D18/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D38/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D58/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D78/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D98/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB8/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DD8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E18/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F18/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F78/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FF8/4, 0x11); ++ INSTANCE_WR(ctx, 0x17018/4, 0x1); ++ INSTANCE_WR(ctx, 0x17058/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17078/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17098/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17218/4, 0x1); ++ INSTANCE_WR(ctx, 0x17238/4, 0x2); ++ INSTANCE_WR(ctx, 0x17258/4, 0x1); ++ INSTANCE_WR(ctx, 0x17278/4, 0x1); ++ INSTANCE_WR(ctx, 0x17298/4, 0x2); ++ INSTANCE_WR(ctx, 0x172B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x172F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17318/4, 0x1); ++ INSTANCE_WR(ctx, 0x17338/4, 0x1); ++ INSTANCE_WR(ctx, 0x17358/4, 0x1); ++ INSTANCE_WR(ctx, 0x17378/4, 0x1); ++ INSTANCE_WR(ctx, 0x17398/4, 0x1); ++ INSTANCE_WR(ctx, 0x173B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x173D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x173F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x174F8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17518/4, 0xF); ++ INSTANCE_WR(ctx, 0x17618/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17678/4, 0x11); ++ INSTANCE_WR(ctx, 0x17698/4, 0x1); ++ INSTANCE_WR(ctx, 0x17718/4, 0x4); ++ INSTANCE_WR(ctx, 0x177D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17878/4, 0x11); ++ INSTANCE_WR(ctx, 0x17978/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A18/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A58/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A98/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AD8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B18/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B58/4, 0x1); ++ INSTANCE_WR(ctx, 0x180B8/4, 0x8); ++ INSTANCE_WR(ctx, 0x180D8/4, 0x8); ++ INSTANCE_WR(ctx, 0x180F8/4, 0x8); ++ INSTANCE_WR(ctx, 0x18118/4, 0x8); ++ INSTANCE_WR(ctx, 0x18138/4, 0x8); ++ INSTANCE_WR(ctx, 0x18158/4, 0x8); ++ INSTANCE_WR(ctx, 0x18178/4, 0x8); ++ INSTANCE_WR(ctx, 0x18198/4, 0x8); ++ INSTANCE_WR(ctx, 0x181B8/4, 0x11); ++ INSTANCE_WR(ctx, 0x182B8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182D8/4, 0x400); ++ INSTANCE_WR(ctx, 0x182F8/4, 0x400); ++ INSTANCE_WR(ctx, 0x18318/4, 0x400); ++ INSTANCE_WR(ctx, 0x18338/4, 0x400); ++ INSTANCE_WR(ctx, 0x18358/4, 0x400); ++ INSTANCE_WR(ctx, 0x18378/4, 0x400); ++ INSTANCE_WR(ctx, 0x18398/4, 0x400); ++ INSTANCE_WR(ctx, 0x183B8/4, 0x400); ++ INSTANCE_WR(ctx, 0x183D8/4, 0x300); ++ INSTANCE_WR(ctx, 0x183F8/4, 0x300); ++ INSTANCE_WR(ctx, 0x18418/4, 0x300); ++ INSTANCE_WR(ctx, 0x18438/4, 0x300); ++ INSTANCE_WR(ctx, 0x18458/4, 0x300); ++ INSTANCE_WR(ctx, 0x18478/4, 0x300); ++ INSTANCE_WR(ctx, 0x18498/4, 0x300); ++ INSTANCE_WR(ctx, 0x184B8/4, 0x300); ++ INSTANCE_WR(ctx, 0x184D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x184F8/4, 0xF); ++ INSTANCE_WR(ctx, 0x185F8/4, 0x20); ++ INSTANCE_WR(ctx, 0x18618/4, 0x11); ++ INSTANCE_WR(ctx, 0x18638/4, 0x100); ++ INSTANCE_WR(ctx, 0x18678/4, 0x1); ++ INSTANCE_WR(ctx, 0x186D8/4, 0x40); ++ INSTANCE_WR(ctx, 0x186F8/4, 0x100); ++ INSTANCE_WR(ctx, 0x18738/4, 0x3); ++ INSTANCE_WR(ctx, 0x187D8/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18858/4, 0x2); ++ INSTANCE_WR(ctx, 0x18878/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A58/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A98/4, 0x1); ++ INSTANCE_WR(ctx, 0x18AB8/4, 0x400); ++ INSTANCE_WR(ctx, 0x18AD8/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AF8/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B78/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C98/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F98/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x19018/4, 0x11); ++ INSTANCE_WR(ctx, 0x19078/4, 0x4); ++ INSTANCE_WR(ctx, 0x190B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x190D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x19158/4, 0x1); ++ INSTANCE_WR(ctx, 0x191F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x19238/4, 0x1); ++ INSTANCE_WR(ctx, 0x192B8/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192F8/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x19318/4, 0x40); ++ INSTANCE_WR(ctx, 0x19338/4, 0x100); ++ INSTANCE_WR(ctx, 0x19358/4, 0x10100); ++ INSTANCE_WR(ctx, 0x19378/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195D8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195F8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x19618/4, 0x1); ++ INSTANCE_WR(ctx, 0x19658/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x19678/4, 0x1); ++ INSTANCE_WR(ctx, 0x196D8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x19838/4, 0x1); ++ INSTANCE_WR(ctx, 0x19858/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19878/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19898/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198B8/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198F8/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19938/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BF8/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C18/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C38/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CD8/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D38/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D58/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D78/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D98/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A238/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A258/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A278/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A298/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2B8/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2D8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A378/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A398/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A418/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8B8/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8D8/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB18/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC98/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECB8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECD8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECF8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED18/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE38/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEB8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF78/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F118/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F138/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F158/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F178/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F198/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1B8/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F218/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F318/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F378/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F418/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F458/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F478/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F498/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F618/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F638/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F658/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F678/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F698/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F718/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F738/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F758/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F778/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F798/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8F8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F918/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA18/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA78/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA98/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB18/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBD8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC78/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDF8/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE18/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE58/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE98/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FED8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF18/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF58/4, 0x1); ++ INSTANCE_WR(ctx, 0x304B8/4, 0x8); ++ INSTANCE_WR(ctx, 0x304D8/4, 0x8); ++ INSTANCE_WR(ctx, 0x304F8/4, 0x8); ++ INSTANCE_WR(ctx, 0x30518/4, 0x8); ++ INSTANCE_WR(ctx, 0x30538/4, 0x8); ++ INSTANCE_WR(ctx, 0x30558/4, 0x8); ++ INSTANCE_WR(ctx, 0x30578/4, 0x8); ++ INSTANCE_WR(ctx, 0x30598/4, 0x8); ++ INSTANCE_WR(ctx, 0x305B8/4, 0x11); ++ INSTANCE_WR(ctx, 0x306B8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306D8/4, 0x400); ++ INSTANCE_WR(ctx, 0x306F8/4, 0x400); ++ INSTANCE_WR(ctx, 0x30718/4, 0x400); ++ INSTANCE_WR(ctx, 0x30738/4, 0x400); ++ INSTANCE_WR(ctx, 0x30758/4, 0x400); ++ INSTANCE_WR(ctx, 0x30778/4, 0x400); ++ INSTANCE_WR(ctx, 0x30798/4, 0x400); ++ INSTANCE_WR(ctx, 0x307B8/4, 0x400); ++ INSTANCE_WR(ctx, 0x307D8/4, 0x300); ++ INSTANCE_WR(ctx, 0x307F8/4, 0x300); ++ INSTANCE_WR(ctx, 0x30818/4, 0x300); ++ INSTANCE_WR(ctx, 0x30838/4, 0x300); ++ INSTANCE_WR(ctx, 0x30858/4, 0x300); ++ INSTANCE_WR(ctx, 0x30878/4, 0x300); ++ INSTANCE_WR(ctx, 0x30898/4, 0x300); ++ INSTANCE_WR(ctx, 0x308B8/4, 0x300); ++ INSTANCE_WR(ctx, 0x308D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x308F8/4, 0xF); ++ INSTANCE_WR(ctx, 0x309F8/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A18/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A38/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A78/4, 0x1); ++ INSTANCE_WR(ctx, 0x30AD8/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AF8/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B38/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BD8/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C58/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DB8/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E58/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E98/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EB8/4, 0x400); ++ INSTANCE_WR(ctx, 0x30ED8/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EF8/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F78/4, 0x11); ++ INSTANCE_WR(ctx, 0x31078/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x31098/4, 0xF); ++ INSTANCE_WR(ctx, 0x31398/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x31418/4, 0x11); ++ INSTANCE_WR(ctx, 0x31478/4, 0x4); ++ INSTANCE_WR(ctx, 0x314B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x314D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x31558/4, 0x1); ++ INSTANCE_WR(ctx, 0x315F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x31638/4, 0x1); ++ INSTANCE_WR(ctx, 0x316B8/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316F8/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x31718/4, 0x40); ++ INSTANCE_WR(ctx, 0x31738/4, 0x100); ++ INSTANCE_WR(ctx, 0x31758/4, 0x10100); ++ INSTANCE_WR(ctx, 0x31778/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319D8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319F8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A18/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A58/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A78/4, 0x1); ++ INSTANCE_WR(ctx, 0x31AD8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BF8/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C38/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C58/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C78/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C98/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CB8/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CF8/4, 0x1A); ++ INSTANCE_WR(ctx, 0x153C/4, 0x4); ++ INSTANCE_WR(ctx, 0x17FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x181C/4, 0x4); ++ INSTANCE_WR(ctx, 0x183C/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18DC/4, 0x4); ++ INSTANCE_WR(ctx, 0x193C/4, 0x4); ++ INSTANCE_WR(ctx, 0x195C/4, 0x4); ++ INSTANCE_WR(ctx, 0x197C/4, 0x80); ++ INSTANCE_WR(ctx, 0x199C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E3C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E5C/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E7C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E9C/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EBC/4, 0x3); ++ INSTANCE_WR(ctx, 0x1EDC/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F7C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F9C/4, 0x3); ++ INSTANCE_WR(ctx, 0x201C/4, 0x4); ++ INSTANCE_WR(ctx, 0x164BC/4, 0x4); ++ INSTANCE_WR(ctx, 0x164DC/4, 0x3); ++ INSTANCE_WR(ctx, 0x1671C/4, 0xF); ++ INSTANCE_WR(ctx, 0x1689C/4, 0x4); ++ INSTANCE_WR(ctx, 0x168BC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168DC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168FC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x1691C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16ABC/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D5C/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DBC/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DDC/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E1C/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F1C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F7C/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FFC/4, 0x11); ++ INSTANCE_WR(ctx, 0x1701C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1705C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x1707C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x1709C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1721C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1723C/4, 0x2); ++ INSTANCE_WR(ctx, 0x1725C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1727C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1729C/4, 0x2); ++ INSTANCE_WR(ctx, 0x172BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x172FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1731C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1733C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1735C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1737C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1739C/4, 0x1); ++ INSTANCE_WR(ctx, 0x173BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x173DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x173FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x174FC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x1751C/4, 0xF); ++ INSTANCE_WR(ctx, 0x1761C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x1767C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1769C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1771C/4, 0x4); ++ INSTANCE_WR(ctx, 0x177DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1787C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1797C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17ADC/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x180BC/4, 0x8); ++ INSTANCE_WR(ctx, 0x180DC/4, 0x8); ++ INSTANCE_WR(ctx, 0x180FC/4, 0x8); ++ INSTANCE_WR(ctx, 0x1811C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1813C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1815C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1817C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1819C/4, 0x8); ++ INSTANCE_WR(ctx, 0x181BC/4, 0x11); ++ INSTANCE_WR(ctx, 0x182BC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x182FC/4, 0x400); ++ INSTANCE_WR(ctx, 0x1831C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1833C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1835C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1837C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1839C/4, 0x400); ++ INSTANCE_WR(ctx, 0x183BC/4, 0x400); ++ INSTANCE_WR(ctx, 0x183DC/4, 0x300); ++ INSTANCE_WR(ctx, 0x183FC/4, 0x300); ++ INSTANCE_WR(ctx, 0x1841C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1843C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1845C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1847C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1849C/4, 0x300); ++ INSTANCE_WR(ctx, 0x184BC/4, 0x300); ++ INSTANCE_WR(ctx, 0x184DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x184FC/4, 0xF); ++ INSTANCE_WR(ctx, 0x185FC/4, 0x20); ++ INSTANCE_WR(ctx, 0x1861C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1863C/4, 0x100); ++ INSTANCE_WR(ctx, 0x1867C/4, 0x1); ++ INSTANCE_WR(ctx, 0x186DC/4, 0x40); ++ INSTANCE_WR(ctx, 0x186FC/4, 0x100); ++ INSTANCE_WR(ctx, 0x1873C/4, 0x3); ++ INSTANCE_WR(ctx, 0x187DC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x1885C/4, 0x2); ++ INSTANCE_WR(ctx, 0x1887C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x18ABC/4, 0x400); ++ INSTANCE_WR(ctx, 0x18ADC/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AFC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C9C/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F9C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x1901C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1907C/4, 0x4); ++ INSTANCE_WR(ctx, 0x190BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x190DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1915C/4, 0x1); ++ INSTANCE_WR(ctx, 0x191FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1923C/4, 0x1); ++ INSTANCE_WR(ctx, 0x192BC/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192FC/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x1931C/4, 0x40); ++ INSTANCE_WR(ctx, 0x1933C/4, 0x100); ++ INSTANCE_WR(ctx, 0x1935C/4, 0x10100); ++ INSTANCE_WR(ctx, 0x1937C/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195DC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195FC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x1961C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1965C/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x1967C/4, 0x1); ++ INSTANCE_WR(ctx, 0x196DC/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1983C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1985C/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x1987C/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x1989C/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198BC/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198FC/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1993C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BFC/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C1C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C3C/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CDC/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D3C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D7C/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D9C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A23C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A25C/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A27C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A29C/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2BC/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2DC/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A37C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A39C/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A41C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8BC/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8DC/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB1C/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC9C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECBC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECDC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECFC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED1C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEBC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F11C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F13C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F15C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F17C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F19C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1BC/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F21C/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F31C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F37C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F41C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F45C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F47C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F49C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F61C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F63C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F65C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F67C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F69C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F71C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F73C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F75C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F77C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F79C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8FC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F91C/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA1C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB1C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBDC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDFC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FEDC/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x304BC/4, 0x8); ++ INSTANCE_WR(ctx, 0x304DC/4, 0x8); ++ INSTANCE_WR(ctx, 0x304FC/4, 0x8); ++ INSTANCE_WR(ctx, 0x3051C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3053C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3055C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3057C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3059C/4, 0x8); ++ INSTANCE_WR(ctx, 0x305BC/4, 0x11); ++ INSTANCE_WR(ctx, 0x306BC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x306FC/4, 0x400); ++ INSTANCE_WR(ctx, 0x3071C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3073C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3075C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3077C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3079C/4, 0x400); ++ INSTANCE_WR(ctx, 0x307BC/4, 0x400); ++ INSTANCE_WR(ctx, 0x307DC/4, 0x300); ++ INSTANCE_WR(ctx, 0x307FC/4, 0x300); ++ INSTANCE_WR(ctx, 0x3081C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3083C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3085C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3087C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3089C/4, 0x300); ++ INSTANCE_WR(ctx, 0x308BC/4, 0x300); ++ INSTANCE_WR(ctx, 0x308DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x308FC/4, 0xF); ++ INSTANCE_WR(ctx, 0x309FC/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A1C/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A3C/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x30ADC/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AFC/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B3C/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BDC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C5C/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DBC/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EBC/4, 0x400); ++ INSTANCE_WR(ctx, 0x30EDC/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EFC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x3107C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x3109C/4, 0xF); ++ INSTANCE_WR(ctx, 0x3139C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x3141C/4, 0x11); ++ INSTANCE_WR(ctx, 0x3147C/4, 0x4); ++ INSTANCE_WR(ctx, 0x314BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x314DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3155C/4, 0x1); ++ INSTANCE_WR(ctx, 0x315FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3163C/4, 0x1); ++ INSTANCE_WR(ctx, 0x316BC/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316FC/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x3171C/4, 0x40); ++ INSTANCE_WR(ctx, 0x3173C/4, 0x100); ++ INSTANCE_WR(ctx, 0x3175C/4, 0x10100); ++ INSTANCE_WR(ctx, 0x3177C/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319DC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319FC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A5C/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x31ADC/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BFC/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C5C/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C7C/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C9C/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CBC/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CFC/4, 0x1A); ++ INSTANCE_WR(ctx, 0x5D000/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D040/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D060/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D080/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D0A0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D100/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D160/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D1A0/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1C0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D340/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D360/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D380/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D3A0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D400/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D460/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D4A0/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4C0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D620/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D700/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D720/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D740/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D760/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D780/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D7A0/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7C0/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D820/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8E0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D900/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D940/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D960/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA80/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB20/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC60/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC80/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCA0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCC0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCE0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD00/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD20/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD60/4, 0x4); ++ INSTANCE_WR(ctx, 0x651C0/4, 0x11); ++ INSTANCE_WR(ctx, 0x65200/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D024/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D044/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D064/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D084/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D144/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D184/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1A4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D324/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D344/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D364/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D384/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D444/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D484/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4A4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D604/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6E4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D704/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D724/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D744/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D764/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D784/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7A4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D804/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8C4/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8E4/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D924/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D944/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA64/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB04/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC44/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC64/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC84/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCA4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCC4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD04/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD44/4, 0x4); ++ INSTANCE_WR(ctx, 0x651A4/4, 0x11); ++ INSTANCE_WR(ctx, 0x651E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D028/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D048/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D068/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D088/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D148/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D188/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1A8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D328/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D348/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D368/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D388/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D448/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D488/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4A8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D608/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6E8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D708/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D728/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D748/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D768/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D788/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7A8/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D808/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8C8/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D928/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D948/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA68/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB08/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC48/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC68/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC88/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCA8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCC8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD08/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD48/4, 0x4); ++ INSTANCE_WR(ctx, 0x651A8/4, 0x11); ++ INSTANCE_WR(ctx, 0x651E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D02C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D04C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D06C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D08C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D14C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D18C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1AC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D32C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D34C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D36C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D38C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D44C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D48C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4AC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D60C/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6EC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D70C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D72C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D74C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D76C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D78C/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7AC/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D80C/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8CC/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8EC/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D92C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D94C/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA6C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB0C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC8C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCAC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD0C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD4C/4, 0x4); ++ INSTANCE_WR(ctx, 0x651AC/4, 0x11); ++ INSTANCE_WR(ctx, 0x651EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D030/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D050/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D070/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D090/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D150/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D190/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1B0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D330/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D350/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D370/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D390/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D450/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D490/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4B0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D610/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6F0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D710/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D730/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D750/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D770/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D790/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7B0/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D810/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8D0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8F0/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D930/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D950/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB10/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC50/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC70/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC90/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCB0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD10/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD50/4, 0x4); ++ INSTANCE_WR(ctx, 0x651B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x651F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D034/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D054/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D074/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D094/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D154/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D194/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1B4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D334/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D354/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D374/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D394/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D454/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D494/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4B4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D614/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6F4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D714/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D734/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D754/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D774/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D794/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7B4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D814/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8D4/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8F4/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D934/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D954/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB14/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC54/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC74/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC94/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCB4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCD4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCF4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD14/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD54/4, 0x4); ++ INSTANCE_WR(ctx, 0x651B4/4, 0x11); ++ INSTANCE_WR(ctx, 0x651F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D038/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D058/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D078/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D098/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D158/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D198/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1B8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D338/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D358/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D378/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D398/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D458/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D498/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4B8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D618/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6F8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D718/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D738/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D758/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D778/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D798/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7B8/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D818/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8D8/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8F8/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D938/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D958/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB18/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC58/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC78/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC98/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCB8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCD8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCF8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD18/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD58/4, 0x4); ++ INSTANCE_WR(ctx, 0x651B8/4, 0x11); ++ INSTANCE_WR(ctx, 0x651F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D03C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D05C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D07C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D09C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D15C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D19C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1BC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D33C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D35C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D37C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D39C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D45C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D49C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4BC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D61C/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6FC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D71C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D73C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D75C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D77C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D79C/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7BC/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D81C/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8DC/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D93C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D95C/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB1C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC5C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCBC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCDC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCFC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x651BC/4, 0x11); ++ INSTANCE_WR(ctx, 0x651FC/4, 0x1); ++} ++ ++static void ++nvaa_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x001d4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00220/4, 0x0000fe0c); ++ INSTANCE_WR(ctx, 0x00238/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00264/4, 0x00000187); ++ INSTANCE_WR(ctx, 0x00278/4, 0x00001018); ++ INSTANCE_WR(ctx, 0x0027c/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x002cc/4, 0x042500df); ++ INSTANCE_WR(ctx, 0x002d4/4, 0x00000600); ++ INSTANCE_WR(ctx, 0x002ec/4, 0x01000000); ++ INSTANCE_WR(ctx, 0x002f0/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002f8/4, 0x00000800); ++ INSTANCE_WR(ctx, 0x00310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00310/4, 0x000e0080); ++ INSTANCE_WR(ctx, 0x00310/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00338/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0033c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00350/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00380/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00384/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x00390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00394/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00400/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00414/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00444/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x0044c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00450/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000029); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004d8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000012); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00524/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00540/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00544/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00548/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00558/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00588/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0058c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00000e00); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005fc/4, 0x00000200); ++ INSTANCE_WR(ctx, 0x00604/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x000000f0); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x00618/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0061c/4, 0x000000f0); ++ INSTANCE_WR(ctx, 0x00620/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x00628/4, 0x00000009); ++ INSTANCE_WR(ctx, 0x00634/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00638/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00640/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00650/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00658/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00660/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00668/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00670/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00674/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00678/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00680/4, 0x00001f80); ++ INSTANCE_WR(ctx, 0x00698/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x006a4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x006b0/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x006b4/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x006cc/4, 0x003d0040); ++ INSTANCE_WR(ctx, 0x006d4/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x006f4/4, 0x003d0040); ++ INSTANCE_WR(ctx, 0x006f8/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x00740/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00748/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0074c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00750/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00760/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00764/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00788/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00790/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00798/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x007a0/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x007a4/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x007b0/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x007c8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x007d0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007e0/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x007e4/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00808/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00810/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00818/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00820/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00824/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00830/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x00848/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0084c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00850/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00860/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00864/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00888/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00890/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00898/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x008a0/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x008a4/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x008b0/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x008c8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00908/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00910/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00918/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00920/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00924/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00930/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x0094c/4, 0x01127070); ++ INSTANCE_WR(ctx, 0x0095c/4, 0x07ffffff); ++ INSTANCE_WR(ctx, 0x00978/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00978/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00978/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00978/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x003fe006); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x003fe000); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x0cf7f007); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x02bf7fff); ++ INSTANCE_WR(ctx, 0x07ba0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x07bc0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07be0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07c00/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07c20/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07c40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07ca0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07cc0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07ce0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07d00/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07d20/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1a7c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a7e0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a800/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a820/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a840/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a860/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a880/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a8a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a8c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a8e0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a900/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a920/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a940/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a960/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a980/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a9a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1ae40/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1ae60/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1aec0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x1aee0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x1af80/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x1b020/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1b080/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x1b0c0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x1b0e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1b100/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x1b120/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1b140/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1b160/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1be20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1bf00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1bf20/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1bf80/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1c1e0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1c2c0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1c3c0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x1c3e0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1c5e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1c640/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1c6a0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1c6c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1c6e0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1c760/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x1c780/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x1c820/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1ca40/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1ca60/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1ca80/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1caa0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cac0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cae0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb40/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb60/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb80/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cba0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cbc0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cbe0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cc00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cc20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cc40/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1d120/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x1d140/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x1d1a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1d1e0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d200/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d220/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d240/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d260/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1d2e0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1d300/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1d340/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1dae0/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x1db20/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1db40/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1db60/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1db80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dca0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dcc0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dd00/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x1dd40/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x1de80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dea0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dec0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dee0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00a24/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00a64/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00a84/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x00ae4/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x0b344/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0b364/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0b3a4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0b3c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0b3e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b424/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x0b464/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x010c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x010e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39a68/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39a88/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39aa8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x39ac8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x39b08/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x39b48/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x39b68/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x39b88/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39ba8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39bc8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x39c28/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x39c48/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x39ca8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x414e8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x417c8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00a2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00acc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00b6c/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x00d6c/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x00f2c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00f4c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00f8c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00fac/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00fec/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x0118c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0362c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0366c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x041cc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1484c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x15950/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x159b0/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x00a34/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00c74/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00e14/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00e54/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x00ff4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01014/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01074/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01114/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01134/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x01154/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x01174/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x01194/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x01254/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01374/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01394/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01654/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01874/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01894/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x018b4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x018d4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x018f4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01914/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01934/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01954/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01974/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01994/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x019b4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x019d4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x019f4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01a14/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01a34/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01a54/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d94/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01dd4/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x01eb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01ef4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01f34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01f94/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x02114/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02214/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02314/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x023f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02414/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02434/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02454/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02474/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02494/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x024b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x024f4/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x02534/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x028b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x028d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x028f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02934/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02974/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02a14/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02a34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x00b78/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00b98/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00d58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00f98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00fb8/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00fd8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ff8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x01018/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x01038/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x01458/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01478/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01498/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x014b8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x014d8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x014f8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01518/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01538/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01558/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01578/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01598/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x015b8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x015d8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x015f8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01618/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01638/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01658/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x016b8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01878/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x01898/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x018d8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01958/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01a38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01a58/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x01a78/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x01a98/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x01ad8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x01b98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01bd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01bf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01c18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01c38/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x01c58/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01d38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01d78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01d98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01db8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01e58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01e98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01eb8/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x01f38/4, 0x04444480); ++ INSTANCE_WR(ctx, 0x02698/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x026d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02758/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x02798/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x027b8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x027d8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x027f8/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x02818/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x02b58/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02cd8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02cf8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02d18/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02d38/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02d58/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02e78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02ef8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02fb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03018/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03178/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03198/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x031b8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x031d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x031f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03218/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03238/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03278/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03378/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x033d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x03458/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x034b8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x034d8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x034f8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x03658/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03678/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03698/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x036b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x036d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x036f8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03718/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03758/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03798/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x037b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x037d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x037f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03818/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03838/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03858/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03958/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x03978/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x03a78/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x03ad8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03af8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03b78/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x03c38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03cd8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03dd8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x03e58/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03e78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03eb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03ef8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03f38/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x03f78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03fb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04518/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04538/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04558/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04578/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04598/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x045b8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x045d8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x045f8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04618/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04718/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x04738/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04758/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04778/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04798/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x047b8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x047d8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x047f8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04818/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04838/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04858/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04878/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04898/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x048b8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x048d8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x048f8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04918/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04938/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04958/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x04a58/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x04a78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04a98/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x04ad8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04b38/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x04b58/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x04b98/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x04c38/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x04cb8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04cd8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x04e18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04eb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x04ef8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04f18/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04f38/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04f58/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x04fd8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x050d8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x050f8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x053f8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x05418/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x05498/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x054f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x05538/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05558/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x055d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05678/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x05718/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x05758/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x057d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05938/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05978/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05998/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x059b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x059d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x059f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05a18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05a38/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x05b38/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x05b58/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x05c58/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x05c78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05df8/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x05e18/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x05e38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05e78/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x05e98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05ef8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x06018/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x06058/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x06078/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x06098/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x060b8/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x060d8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x06118/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x06158/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x063f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06418/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06438/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x064d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06538/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06558/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06578/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x06598/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x065b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06a58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06a78/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x06a98/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06ab8/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x06ad8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x06af8/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x06b18/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06bb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06bd8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x06c58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0aef8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0af18/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x00b1c/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x00b5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00b9c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00c3c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x00cfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x0000007f); ++ INSTANCE_WR(ctx, 0x00d7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00d9c/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x00ddc/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00dfc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00e1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00e5c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00edc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00efc/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00fdc/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x00ffc/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x0171c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0177c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01e9c/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x01ebc/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x01f1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021fc/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x0225c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022dc/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x022fc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0281c/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x0285c/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x0289c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x028bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x028fc/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x0295c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x41800/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x41840/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x41860/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x41880/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x418a0/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x418c0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x41920/4, 0x00000e00); ++ INSTANCE_WR(ctx, 0x41940/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x41960/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x419c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x41a00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x41a20/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x41ba0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x41be0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x41ca0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41cc0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41ce0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41d00/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41d20/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x41d40/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x41d60/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x41d80/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x41dc0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x41e80/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x41ea0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x41ee0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x41f00/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x42020/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x420c0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x42200/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x42220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x42240/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x42260/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x42280/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x422a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x422c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x42300/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x49700/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x49740/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0012c/4, 0x00000002); ++} ++ ++int ++nv50_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int grctx_size = 0x70000, hdr; ++ int ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); ++ if (ret) ++ return ret; ++ ++ hdr = IS_G80 ? 0x200 : 0x20; ++ INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002); ++ INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + ++ grctx_size - 1); ++ INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); ++ INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0); ++ INSTANCE_WR(ramin, (hdr + 0x10)/4, 0); ++ INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000); ++ ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4, ++ chan->ramin->instance >> 12); ++ if (dev_priv->chipset == 0xaa) ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00004/4, 0x00000002); ++ else ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x0011c/4, 0x00000002); ++ ++ switch (dev_priv->chipset) { ++ case 0x50: ++ nv50_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0x84: ++ nv84_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0x86: ++ nv86_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0x92: ++ nv92_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0xaa: ++ nvaa_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ default: ++ /* This is complete crack, it accidently used to make at ++ * least some G8x cards work partially somehow, though there's ++ * no good reason why - and it stopped working as the rest ++ * of the code got off the drugs.. ++ */ ++ ret = engine->graph.load_context(chan); ++ if (ret) { ++ DRM_ERROR("Error hacking up context: %d\n", ret); ++ return ret; ++ } ++ break; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i, hdr; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ hdr = IS_G80 ? 0x200 : 0x20; ++ for (i=hdr; iramin->gpuobj, i/4, 0); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); ++} ++ ++static int ++nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t old_cp, tv = 20000; ++ int i; ++ ++ DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save); ++ ++ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(0x400824, NV_READ(0x400824) | ++ (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : ++ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD)); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); ++ ++ for (i = 0; i < tv; i++) { ++ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) ++ break; ++ } ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); ++ ++ if (i == tv) { ++ DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); ++ DRM_ERROR("0x40030C = 0x%08x\n", ++ NV_READ(NV40_PGRAPH_CTXCTL_030C)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++int ++nv50_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst = chan->ramin->instance >> 12; ++ int ret; (void)ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++#if 0 ++ if ((ret = nv50_graph_transfer_context(dev, inst, 0))) ++ return ret; ++#endif ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(0x400320, 4); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst | (1<<31)); ++ ++ return 0; ++} ++ ++int ++nv50_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ uint32_t inst = chan->ramin->instance >> 12; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ return nv50_graph_transfer_context(dev, inst, 1); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_instmem.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_instmem.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_instmem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_instmem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,324 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++typedef struct { ++ uint32_t save1700[5]; /* 0x1700->0x1710 */ ++ ++ struct nouveau_gpuobj_ref *pramin_pt; ++ struct nouveau_gpuobj_ref *pramin_bar; ++} nv50_instmem_priv; ++ ++#define NV50_INSTMEM_PAGE_SHIFT 12 ++#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) ++#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) ++ ++/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN ++ */ ++#define BAR0_WI32(g,o,v) do { \ ++ uint32_t offset; \ ++ if ((g)->im_backing) { \ ++ offset = (g)->im_backing->start; \ ++ } else { \ ++ offset = chan->ramin->gpuobj->im_backing->start; \ ++ offset += (g)->im_pramin->start; \ ++ } \ ++ offset += (o); \ ++ NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \ ++} while(0) ++ ++int ++nv50_instmem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan; ++ uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; ++ nv50_instmem_priv *priv; ++ int ret, i; ++ uint32_t v; ++ ++ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); ++ if (!priv) ++ return -ENOMEM; ++ dev_priv->Engine.instmem.priv = priv; ++ ++ /* Save state, will restore at takedown. */ ++ for (i = 0x1700; i <= 0x1710; i+=4) ++ priv->save1700[(i-0x1700)/4] = NV_READ(i); ++ ++ /* Reserve the last MiB of VRAM, we should probably try to avoid ++ * setting up the below tables over the top of the VBIOS image at ++ * some point. ++ */ ++ dev_priv->ramin_rsvd_vram = 1 << 20; ++ c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; ++ c_size = 128 << 10; ++ c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; ++ c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; ++ c_base = c_vmpd + 0x4000; ++ pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size); ++ ++ DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset); ++ DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); ++ DRM_DEBUG(" Aperture size: %d MiB\n", ++ (uint32_t)dev_priv->ramin->size >> 20); ++ DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10); ++ ++ NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); ++ ++ /* Create a fake channel, and use it as our "dummy" channels 0/127. ++ * The main reason for creating a channel is so we can use the gpuobj ++ * code. However, it's probably worth noting that NVIDIA also setup ++ * their channels 0/127 with the same values they configure here. ++ * So, there may be some other reason for doing this. ++ * ++ * Have to create the entire channel manually, as the real channel ++ * creation code assumes we have PRAMIN access, and we don't until ++ * we're done here. ++ */ ++ chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER); ++ if (!chan) ++ return -ENOMEM; ++ chan->id = 0; ++ chan->dev = dev; ++ chan->file_priv = (struct drm_file *)-2; ++ dev_priv->fifos[0] = dev_priv->fifos[127] = chan; ++ ++ /* Channel's PRAMIN object + heap */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0, ++ NULL, &chan->ramin))) ++ return ret; ++ ++ if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) ++ return -ENOMEM; ++ ++ /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, ++ 0x4000, 0, NULL, &chan->ramfc))) ++ return ret; ++ ++ for (i = 0; i < c_vmpd; i += 4) ++ BAR0_WI32(chan->ramin->gpuobj, i, 0); ++ ++ /* VM page directory */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, ++ 0x4000, 0, &chan->vm_pd, NULL))) ++ return ret; ++ for (i = 0; i < 0x4000; i += 8) { ++ BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); ++ BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); ++ } ++ ++ /* PRAMIN page table, cheat and map into VM at 0x0000000000. ++ * We map the entire fake channel into the start of the PRAMIN BAR ++ */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, ++ 0, &priv->pramin_pt))) ++ return ret; ++ ++ for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) { ++ if (v < (c_offset + c_size)) ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); ++ else ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); ++ } ++ ++ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); ++ BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); ++ ++ /* DMA object for PRAMIN BAR */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, ++ &priv->pramin_bar))) ++ return ret; ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); ++ ++ /* Poke the relevant regs, and pray it works :) */ ++ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); ++ NV_WRITE(NV50_PUNK_UNK1710, 0); ++ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | ++ NV50_PUNK_BAR_CFG_BASE_VALID); ++ NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0); ++ NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | ++ NV50_PUNK_BAR3_CTXDMA_VALID); ++ ++ /* Assume that praying isn't enough, check that we can re-read the ++ * entire fake channel back from the PRAMIN BAR */ ++ for (i = 0; i < c_size; i+=4) { ++ if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) { ++ DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i); ++ return -EINVAL; ++ } ++ } ++ ++ /* Global PRAMIN heap */ ++ if (nouveau_mem_init_heap(&dev_priv->ramin_heap, ++ c_size, dev_priv->ramin->size - c_size)) { ++ dev_priv->ramin_heap = NULL; ++ DRM_ERROR("Failed to init RAMIN heap\n"); ++ } ++ ++ /*XXX: incorrect, but needed to make hash func "work" */ ++ dev_priv->ramht_offset = 0x10000; ++ dev_priv->ramht_bits = 9; ++ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); ++ return 0; ++} ++ ++void ++nv50_instmem_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ struct nouveau_channel *chan = dev_priv->fifos[0]; ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!priv) ++ return; ++ ++ /* Restore state from before init */ ++ for (i = 0x1700; i <= 0x1710; i+=4) ++ NV_WRITE(i, priv->save1700[(i-0x1700)/4]); ++ ++ nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); ++ nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); ++ ++ /* Destroy dummy channel */ ++ if (chan) { ++ nouveau_gpuobj_del(dev, &chan->vm_pd); ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ nouveau_mem_takedown(&chan->ramin_heap); ++ ++ dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; ++ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); ++ } ++ ++ dev_priv->Engine.instmem.priv = NULL; ++ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); ++} ++ ++int ++nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) ++{ ++ if (gpuobj->im_backing) ++ return -EINVAL; ++ ++ *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); ++ if (*sz == 0) ++ return -EINVAL; ++ ++ gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE, ++ *sz, NOUVEAU_MEM_FB | ++ NOUVEAU_MEM_NOVM, ++ (struct drm_file *)-2); ++ if (!gpuobj->im_backing) { ++ DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (gpuobj && gpuobj->im_backing) { ++ if (gpuobj->im_bound) ++ dev_priv->Engine.instmem.unbind(dev, gpuobj); ++ nouveau_mem_free(dev, gpuobj->im_backing); ++ gpuobj->im_backing = NULL; ++ } ++} ++ ++int ++nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ uint32_t pte, pte_end, vram; ++ ++ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) ++ return -EINVAL; ++ ++ DRM_DEBUG("st=0x%0llx sz=0x%0llx\n", ++ gpuobj->im_pramin->start, gpuobj->im_pramin->size); ++ ++ pte = (gpuobj->im_pramin->start >> 12) << 3; ++ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; ++ vram = gpuobj->im_backing->start; ++ ++ DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n", ++ gpuobj->im_pramin->start, pte, pte_end); ++ DRM_DEBUG("first vram page: 0x%llx\n", ++ gpuobj->im_backing->start); ++ ++ while (pte < pte_end) { ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); ++ ++ pte += 8; ++ vram += NV50_INSTMEM_PAGE_SIZE; ++ } ++ ++ gpuobj->im_bound = 1; ++ return 0; ++} ++ ++int ++nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ uint32_t pte, pte_end; ++ ++ if (gpuobj->im_bound == 0) ++ return -EINVAL; ++ ++ pte = (gpuobj->im_pramin->start >> 12) << 3; ++ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; ++ while (pte < pte_end) { ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); ++ pte += 8; ++ } ++ ++ gpuobj->im_bound = 0; ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_mc.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_mc.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/nv50_mc.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/nv50_mc.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,43 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++int ++nv50_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void nv50_mc_takedown(struct drm_device *dev) ++{ ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/pvr2d_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/pvr2d_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/pvr2d_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/pvr2d_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,45 @@ ++/* pvr2d_drm.h -- Public header for the PVR2D helper module -*- linux-c -*- ++ * ++ * Copyright (C) 2008 Nokia Corporation. All rights reserved. ++ */ ++ ++#ifndef __PVR2D_DRM_H__ ++#define __PVR2D_DRM_H__ ++ ++ ++/* This wouldn't work with 64 bit userland */ ++struct drm_pvr2d_buf_lock { ++ uint32_t virt; ++ uint32_t length; ++ uint32_t phys_array; ++ uint32_t handle; ++}; ++ ++struct drm_pvr2d_buf_release { ++ uint32_t handle; ++}; ++ ++enum drm_pvr2d_cflush_type { ++ DRM_PVR2D_CFLUSH_FROM_GPU = 1, ++ DRM_PVR2D_CFLUSH_TO_GPU = 2 ++}; ++ ++struct drm_pvr2d_cflush { ++ enum drm_pvr2d_cflush_type type; ++ uint32_t virt; ++ uint32_t length; ++}; ++ ++#define DRM_PVR2D_BUF_LOCK 0x0 ++#define DRM_PVR2D_BUF_RELEASE 0x1 ++#define DRM_PVR2D_CFLUSH 0x2 ++ ++#define DRM_IOCTL_PVR2D_BUF_LOCK DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR2D_BUF_LOCK, \ ++ struct drm_pvr2d_buf_lock) ++#define DRM_IOCTL_PVR2D_BUF_RELEASE DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_BUF_RELEASE, \ ++ struct drm_pvr2d_buf_release) ++#define DRM_IOCTL_PVR2D_CFLUSH DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_CFLUSH, \ ++ struct drm_pvr2d_cflush) ++ ++ ++#endif /* __PVR2D_DRM_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/pvr2d_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/pvr2d_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/pvr2d_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/pvr2d_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,548 @@ ++/* ++ * Copyright (C) 2008 Nokia Corporation. All rights reserved. ++ */ ++ ++ ++#include "drmP.h" ++#include "drm_pciids.h" ++ ++#include "pvr2d_drm.h" ++#include "pvr2d_drv.h" ++ ++#define PVR2D_SHMEM_HASH_ORDER 12 ++ ++struct pvr2d_dev { ++ rwlock_t hash_lock; ++ struct drm_open_hash shmem_hash; ++}; ++ ++struct pvr2d_buf { ++ struct pvr2d_dev *dev_priv; ++ struct drm_hash_item hash; ++ struct page **pages; ++ struct kref kref; ++ uint32_t num_pages; ++}; ++ ++/* ++ * This pvr2d_ref object is needed strictly because ++ * idr_for_each doesn't exist in 2.6.22. With kernels ++ * supporting this function, we can use it to traverse ++ * the file list of buffers at file release. ++ */ ++ ++struct pvr2d_ref{ ++ struct list_head head; ++ struct pvr2d_buf *buf; ++}; ++ ++struct pvr2d_file { ++ spinlock_t lock; ++ struct list_head ref_list; ++ struct idr buf_idr; ++}; ++ ++static inline struct pvr2d_dev *pvr2d_dp(struct drm_device *dev) ++{ ++ return (struct pvr2d_dev *) dev->dev_private; ++} ++ ++static inline struct pvr2d_file *pvr2d_fp(struct drm_file *file_priv) ++{ ++ return (struct pvr2d_file *) file_priv->driver_priv; ++} ++ ++ ++static void ++pvr2d_free_buf(struct pvr2d_buf *buf) ++{ ++ uint32_t i; ++ ++ for (i=0; inum_pages; ++i) { ++ struct page *page = buf->pages[i]; ++ ++ if (!PageReserved(page)) ++ set_page_dirty_lock(page); ++ ++ put_page(page); ++ } ++ ++ kfree(buf->pages); ++ kfree(buf); ++} ++ ++static void ++pvr2d_release_buf(struct kref *kref) ++{ ++ struct pvr2d_buf *buf = ++ container_of(kref, struct pvr2d_buf, kref); ++ ++ struct pvr2d_dev *dev_priv = buf->dev_priv; ++ ++ drm_ht_remove_item(&dev_priv->shmem_hash, &buf->hash); ++ write_unlock(&dev_priv->hash_lock); ++ pvr2d_free_buf(buf); ++ write_lock(&dev_priv->hash_lock); ++} ++ ++static struct pvr2d_buf * ++pvr2d_alloc_buf(struct pvr2d_dev *dev_priv, uint32_t num_pages) ++{ ++ struct pvr2d_buf *buf = kmalloc(sizeof(*buf), GFP_KERNEL); ++ ++ if (unlikely(!buf)) ++ return NULL; ++ ++ buf->pages = kmalloc(num_pages * sizeof(*buf->pages), GFP_KERNEL); ++ if (unlikely(!buf->pages)) ++ goto out_err0; ++ ++ buf->dev_priv = dev_priv; ++ buf->num_pages = num_pages; ++ ++ ++ DRM_DEBUG("pvr2d_alloc_buf successfully completed.\n"); ++ return buf; ++ ++out_err0: ++ kfree(buf); ++ ++ return NULL; ++} ++ ++ ++static struct pvr2d_buf* ++pvr2d_lookup_buf(struct pvr2d_dev *dev_priv, struct page *first_phys) ++{ ++ struct drm_hash_item *hash; ++ struct pvr2d_buf *buf = NULL; ++ int ret; ++ ++ read_lock(&dev_priv->hash_lock); ++ ret = drm_ht_find_item(&dev_priv->shmem_hash, ++ (unsigned long)first_phys, ++ &hash); ++ ++ if (likely(ret == 0)) { ++ buf = drm_hash_entry(hash, struct pvr2d_buf, hash); ++ kref_get(&buf->kref); ++ } ++ read_unlock(&dev_priv->hash_lock); ++ ++ if (buf != NULL) { ++ DRM_DEBUG("pvr2d_lookup_buf found already used buffer.\n"); ++ } ++ ++ return buf; ++} ++ ++ ++static int ++pvr2d_buf_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_pvr2d_buf_lock *bl = data; ++ uint32_t i; ++ unsigned nr_pages = ((bl->virt & ~PAGE_MASK) + bl->length + PAGE_SIZE - ++ 1) / PAGE_SIZE; ++ struct page *first_page; ++ struct pvr2d_buf *buf = NULL; ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv); ++ struct pvr2d_ref *ref; ++ int ret; ++ ++ ++ /* ++ * Obtain a global hash key for the pvr2d buffer structure. ++ * We use the address of the struct page of the first ++ * page. ++ */ ++ ++ down_read(¤t->mm->mmap_sem); ++ ret = get_user_pages(current, current->mm, bl->virt & PAGE_MASK, ++ 1, WRITE, 0, &first_page, NULL); ++ up_read(¤t->mm->mmap_sem); ++ ++ if (unlikely(ret < 1)) { ++ DRM_ERROR("Failed getting first page: %d\n", ret); ++ return -ENOMEM; ++ } ++ ++ /* ++ * Look up buffer already in the hash table, or create ++ * and insert a new one. ++ */ ++ ++ while(buf == NULL) { ++ buf = pvr2d_lookup_buf(dev_priv, first_page); ++ ++ if (likely(buf != NULL)) ++ break; ++ ++ /* ++ if (!capable(CAP_SYS_ADMIN)) { ++ ret = -EPERM; ++ goto out_put; ++ } ++ */ ++ ++ buf = pvr2d_alloc_buf(dev_priv, nr_pages); ++ if (unlikely(buf == NULL)) { ++ DRM_ERROR("Failed allocating pvr2d buffer.\n"); ++ ret = -ENOMEM; ++ goto out_put; ++ } ++ ++ down_read(¤t->mm->mmap_sem); ++ ret = get_user_pages(current, current->mm, bl->virt & PAGE_MASK, ++ nr_pages, WRITE, 0, buf->pages, NULL); ++ up_read(¤t->mm->mmap_sem); ++ ++ if (unlikely(ret < nr_pages)) { ++ DRM_ERROR("Failed getting user pages.\n"); ++ buf->num_pages = ret; ++ ret = -ENOMEM; ++ pvr2d_free_buf(buf); ++ goto out_put; ++ } ++ ++ kref_init(&buf->kref); ++ buf->hash.key = (unsigned long) first_page; ++ ++ write_lock(&dev_priv->hash_lock); ++ ret = drm_ht_insert_item(&dev_priv->shmem_hash, &buf->hash); ++ write_unlock(&dev_priv->hash_lock); ++ ++ if (unlikely(ret == -EINVAL)) { ++ ++ /* ++ * Somebody raced us and already ++ * inserted this buffer. ++ * Very unlikely, but retry anyway. ++ */ ++ ++ pvr2d_free_buf(buf); ++ buf = NULL; ++ } ++ } ++ ++ /* ++ * Create a reference object that is used for unreferencing ++ * either by user action or when the drm file is closed. ++ */ ++ ++ ref = kmalloc(sizeof(*ref), GFP_KERNEL); ++ if (unlikely(ref == NULL)) ++ goto out_err0; ++ ++ ref->buf = buf; ++ do { ++ if (idr_pre_get(&pvr2d_fpriv->buf_idr, GFP_KERNEL) == 0) { ++ ret = -ENOMEM; ++ DRM_ERROR("Failed idr_pre_get\n"); ++ goto out_err1; ++ } ++ ++ spin_lock( &pvr2d_fpriv->lock ); ++ ret = idr_get_new( &pvr2d_fpriv->buf_idr, ref, &bl->handle); ++ ++ if (likely(ret == 0)) ++ list_add_tail(&ref->head, &pvr2d_fpriv->ref_list); ++ ++ spin_unlock( &pvr2d_fpriv->lock ); ++ ++ } while (unlikely(ret == -EAGAIN)); ++ ++ if (unlikely(ret != 0)) ++ goto out_err1; ++ ++ ++ /* ++ * Copy info to user-space. ++ */ ++ ++ DRM_DEBUG("Locking range of %u bytes at virtual 0x%08x, physical array at 0x%08x\n", ++ bl->length, bl->virt, bl->phys_array); ++ ++ for (i = 0; i < nr_pages; i++) { ++ uint32_t physical = (uint32_t)page_to_pfn(buf->pages[i]) << PAGE_SHIFT; ++ DRM_DEBUG("Virtual 0x%08lx => Physical 0x%08x\n", ++ bl->virt + i * PAGE_SIZE, physical); ++ ++ if (DRM_COPY_TO_USER((void*)(bl->phys_array + ++ i * sizeof(uint32_t)), ++ &physical, sizeof(uint32_t))) { ++ ret = -EFAULT; ++ goto out_err2; ++ } ++ ++ } ++ ++#ifdef CONFIG_X86 ++ /* XXX: Quick'n'dirty hack to avoid corruption on Poulsbo, remove when ++ * there's a better solution ++ */ ++ wbinvd(); ++#endif ++ ++ DRM_DEBUG("pvr2d_buf_lock returning handle 0x%08x\n", ++ bl->handle); ++ ++out_put: ++ put_page(first_page); ++ return ret; ++ ++out_err2: ++ spin_lock( &pvr2d_fpriv->lock ); ++ list_del(&ref->head); ++ idr_remove( &pvr2d_fpriv->buf_idr, bl->handle); ++ spin_unlock( &pvr2d_fpriv->lock ); ++out_err1: ++ kfree(ref); ++out_err0: ++ write_lock(&dev_priv->hash_lock); ++ kref_put(&buf->kref, &pvr2d_release_buf); ++ write_unlock(&dev_priv->hash_lock); ++ put_page(first_page); ++ return ret; ++} ++ ++ ++static int ++pvr2d_buf_release(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ struct drm_pvr2d_buf_release *br = data; ++ struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv); ++ struct pvr2d_buf *buf; ++ struct pvr2d_ref *ref; ++ ++ DRM_DEBUG("pvr2d_buf_release releasing 0x%08x\n", ++ br->handle); ++ ++ spin_lock( &pvr2d_fpriv->lock ); ++ ref = idr_find( &pvr2d_fpriv->buf_idr, br->handle); ++ ++ if (unlikely(ref == NULL)) { ++ spin_unlock( &pvr2d_fpriv->lock ); ++ DRM_ERROR("Could not find pvr2d buf to unref.\n"); ++ return -EINVAL; ++ } ++ (void) idr_remove( &pvr2d_fpriv->buf_idr, br->handle); ++ list_del(&ref->head); ++ spin_unlock( &pvr2d_fpriv->lock ); ++ ++ buf = ref->buf; ++ kfree(ref); ++ ++ write_lock(&dev_priv->hash_lock); ++ kref_put(&buf->kref, &pvr2d_release_buf); ++ write_unlock(&dev_priv->hash_lock); ++ ++ return 0; ++} ++ ++static int ++pvr2d_cflush(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_pvr2d_cflush *cf = data; ++ ++ switch (cf->type) { ++ case DRM_PVR2D_CFLUSH_FROM_GPU: ++ DRM_DEBUG("DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n", ++ cf->virt, cf->length); ++#ifdef CONFIG_ARM ++ dmac_inv_range((const void*)cf->virt, ++ (const void*)(cf->virt + cf->length)); ++#endif ++ return 0; ++ case DRM_PVR2D_CFLUSH_TO_GPU: ++ DRM_DEBUG("DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n", ++ cf->virt, cf->length); ++#ifdef CONFIG_ARM ++ dmac_clean_range((const void*)cf->virt, ++ (const void*)(cf->virt + cf->length)); ++#endif ++ return 0; ++ default: ++ DRM_ERROR("Invalid cflush type 0x%x\n", cf->type); ++ return -EINVAL; ++ } ++} ++ ++static int ++pvr2d_open(struct inode *inode, struct file *filp) ++{ ++ int ret; ++ struct pvr2d_file *pvr2d_fpriv; ++ struct drm_file *file_priv; ++ ++ pvr2d_fpriv = kmalloc(sizeof(*pvr2d_fpriv), GFP_KERNEL); ++ if (unlikely(pvr2d_fpriv == NULL)) ++ return -ENOMEM; ++ ++ spin_lock_init(&pvr2d_fpriv->lock); ++ INIT_LIST_HEAD(&pvr2d_fpriv->ref_list); ++ idr_init(&pvr2d_fpriv->buf_idr); ++ ++ ret = drm_open(inode, filp); ++ ++ if (unlikely(ret != 0)) { ++ idr_destroy(&pvr2d_fpriv->buf_idr); ++ kfree(pvr2d_fpriv); ++ return ret; ++ } ++ ++ file_priv = filp->private_data; ++ file_priv->driver_priv = pvr2d_fpriv; ++ ++ DRM_DEBUG("pvr2d_open completed successfully.\n"); ++ return 0; ++}; ++ ++ ++static int ++pvr2d_release(struct inode *inode, struct file *filp) ++{ ++ struct drm_file *file_priv = filp->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv); ++ struct pvr2d_buf *buf; ++ struct pvr2d_ref *ref, *next; ++ ++ /* ++ * At this point we're the only user of the list, so ++ * it should be safe to release the file lock whenever we want to. ++ */ ++ ++ spin_lock(&pvr2d_fpriv->lock); ++ ++ list_for_each_entry_safe(ref, next, &pvr2d_fpriv->ref_list, ++ head) { ++ list_del(&ref->head); ++ buf = ref->buf; ++ kfree(ref); ++ spin_unlock(&pvr2d_fpriv->lock); ++ write_lock(&dev_priv->hash_lock); ++ kref_put(&buf->kref, &pvr2d_release_buf); ++ write_unlock(&dev_priv->hash_lock); ++ spin_lock(&pvr2d_fpriv->lock); ++ } ++ ++ idr_remove_all(&pvr2d_fpriv->buf_idr); ++ idr_destroy(&pvr2d_fpriv->buf_idr); ++ spin_unlock(&pvr2d_fpriv->lock); ++ ++ kfree(pvr2d_fpriv); ++ ++ DRM_DEBUG("pvr2d_release calling drm_release.\n"); ++ return drm_release(inode, filp); ++} ++ ++static int pvr2d_load(struct drm_device *dev, unsigned long chipset) ++{ ++ struct pvr2d_dev *dev_priv; ++ int ret; ++ ++ dev_priv = kmalloc(sizeof(*dev_priv), GFP_KERNEL); ++ if (unlikely(dev_priv == NULL)) ++ return -ENOMEM; ++ ++ rwlock_init(&dev_priv->hash_lock); ++ ret = drm_ht_create(&dev_priv->shmem_hash, ++ PVR2D_SHMEM_HASH_ORDER); ++ ++ if (unlikely(ret != 0)) ++ goto out_err0; ++ ++ dev->dev_private = dev_priv; ++ ++ DRM_DEBUG("pvr2d_load completed successfully.\n"); ++ return 0; ++out_err0: ++ kfree(dev_priv); ++ return ret; ++} ++ ++ ++static int pvr2d_unload(struct drm_device *dev) ++{ ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ ++ drm_ht_remove(&dev_priv->shmem_hash); ++ kfree(dev_priv); ++ DRM_DEBUG("pvr2d_unload completed successfully.\n"); ++ return 0; ++} ++ ++static struct pci_device_id pciidlist[] = { ++ pvr2d_PCI_IDS ++}; ++ ++struct drm_ioctl_desc pvr2d_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_PVR2D_BUF_LOCK, pvr2d_buf_lock, 0), ++ DRM_IOCTL_DEF(DRM_PVR2D_BUF_RELEASE, pvr2d_buf_release, 0), ++ DRM_IOCTL_DEF(DRM_PVR2D_CFLUSH, pvr2d_cflush, 0) ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_MTRR, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = pvr2d_ioctls, ++ .num_ioctls = DRM_ARRAY_SIZE(pvr2d_ioctls), ++ .load = pvr2d_load, ++ .unload = pvr2d_unload, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = pvr2d_open, ++ .release = pvr2d_release, ++ .unlocked_ioctl = drm_unlocked_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init pvr2d_init(void) ++{ ++#ifdef CONFIG_PCI ++ return drm_init(&driver, pciidlist); ++#else ++ return drm_get_dev(NULL, NULL, &driver); ++#endif ++} ++ ++static void __exit pvr2d_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(pvr2d_init); ++module_exit(pvr2d_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/pvr2d_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/pvr2d_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/pvr2d_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/pvr2d_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,23 @@ ++/* -*- linux-c -*- */ ++ ++/* ++ * Copyright (C) 2008 Nokia Corporation. All rights reserved. ++ */ ++ ++#ifndef __PVR2D_H__ ++#define __PVR2D_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Tungsten Graphics Inc." ++ ++#define DRIVER_NAME "pvr2d" ++#define DRIVER_DESC "PVR2D kernel helper" ++#define DRIVER_DATE "20080811" ++ ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 0 ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_cp.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_cp.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_cp.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_cp.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1771 @@ ++/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ ++/* ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * Copyright 2007 Advanced Micro Devices, Inc. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Kevin E. Martin ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++#include "r300_reg.h" ++ ++#include "radeon_microcode.h" ++#define RADEON_FIFO_DEBUG 0 ++ ++static int radeon_do_cleanup_cp(struct drm_device * dev); ++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); ++ ++static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ u32 ret; ++ RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); ++ ret = RADEON_READ(R520_MC_IND_DATA); ++ RADEON_WRITE(R520_MC_IND_INDEX, 0); ++ return ret; ++} ++ ++static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ u32 ret; ++ RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); ++ ret = RADEON_READ(RS480_NB_MC_DATA); ++ RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); ++ return ret; ++} ++ ++static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ u32 ret; ++ RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); ++ ret = RADEON_READ(RS690_MC_DATA); ++ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); ++ return ret; ++} ++ ++static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ return RS690_READ_MCIND(dev_priv, addr); ++ else ++ return RS480_READ_MCIND(dev_priv, addr); ++} ++ ++u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) ++{ ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ++ return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) ++ return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); ++ else ++ return RADEON_READ(RADEON_MC_FB_LOCATION); ++} ++ ++static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ++ R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) ++ R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); ++ else ++ RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); ++} ++ ++static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ++ R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) ++ R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); ++ else ++ RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); ++} ++ ++static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) ++{ ++ u32 agp_base_hi = upper_32_bits(agp_base); ++ u32 agp_base_lo = agp_base & 0xffffffff; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { ++ R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); ++ R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); ++ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { ++ RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); ++ RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); ++ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { ++ R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); ++ R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { ++ RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); ++ RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); ++ } else { ++ RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) ++ RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); ++ } ++} ++ ++static int RADEON_READ_PLL(struct drm_device * dev, int addr) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); ++ return RADEON_READ(RADEON_CLOCK_CNTL_DATA); ++} ++ ++static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) ++{ ++ RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); ++ return RADEON_READ(RADEON_PCIE_DATA); ++} ++ ++#if RADEON_FIFO_DEBUG ++static void radeon_status(drm_radeon_private_t * dev_priv) ++{ ++ printk("%s:\n", __FUNCTION__); ++ printk("RBBM_STATUS = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); ++ printk("CP_RB_RTPR = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); ++ printk("CP_RB_WTPR = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); ++ printk("AIC_CNTL = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); ++ printk("AIC_STAT = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_STAT)); ++ printk("AIC_PT_BASE = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); ++ printk("TLB_ADDR = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); ++ printk("TLB_DATA = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); ++} ++#endif ++ ++/* ================================================================ ++ * Engine, FIFO control ++ */ ++ ++static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) ++{ ++ u32 tmp; ++ int i; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { ++ tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); ++ tmp |= RADEON_RB3D_DC_FLUSH_ALL; ++ RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) ++ & RADEON_RB3D_DC_BUSY)) { ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ } else { ++ /* don't flush or purge cache here or lockup */ ++ return 0; ++ } ++ ++#if RADEON_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ radeon_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) ++{ ++ int i; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ int slots = (RADEON_READ(RADEON_RBBM_STATUS) ++ & RADEON_RBBM_FIFOCNT_MASK); ++ if (slots >= entries) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n", ++ RADEON_READ(RADEON_RBBM_STATUS), ++ RADEON_READ(R300_VAP_CNTL_STATUS)); ++ ++#if RADEON_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ radeon_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) ++{ ++ int i, ret; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ ret = radeon_do_wait_for_fifo(dev_priv, 64); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(RADEON_READ(RADEON_RBBM_STATUS) ++ & RADEON_RBBM_ACTIVE)) { ++ radeon_do_pixcache_flush(dev_priv); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n", ++ RADEON_READ(RADEON_RBBM_STATUS), ++ RADEON_READ(R300_VAP_CNTL_STATUS)); ++ ++#if RADEON_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ radeon_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++static void radeon_init_pipes(drm_radeon_private_t * dev_priv) ++{ ++ uint32_t gb_tile_config, gb_pipe_sel = 0; ++ ++ /* RS4xx/RS6xx/R4xx/R5xx */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { ++ gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); ++ dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; ++ } else { ++ /* R3xx */ ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { ++ dev_priv->num_gb_pipes = 2; ++ } else { ++ /* R3Vxx */ ++ dev_priv->num_gb_pipes = 1; ++ } ++ } ++ DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); ++ ++ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); ++ ++ switch(dev_priv->num_gb_pipes) { ++ case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; ++ case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; ++ case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; ++ default: ++ case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; ++ } ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { ++ RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); ++ RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); ++ } ++ RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); ++ radeon_do_wait_for_idle(dev_priv); ++ RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); ++ RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | ++ R300_DC_AUTOFLUSH_ENABLE | ++ R300_DC_DC_DISABLE_IGNORE_PE)); ++ ++ ++} ++ ++/* ================================================================ ++ * CP control, initialization ++ */ ++ ++/* Load the microcode for the CP */ ++static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv) ++{ ++ int i; ++ DRM_DEBUG("\n"); ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); ++ ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) { ++ DRM_INFO("Loading R100 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R100_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R100_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) { ++ DRM_INFO("Loading R200 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R200_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R200_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { ++ DRM_INFO("Loading R300 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R300_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R300_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { ++ DRM_INFO("Loading R400 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R420_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R420_cp_microcode[i][0]); ++ } ++ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { ++ DRM_INFO("Loading RS690 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ RS690_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ RS690_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) { ++ DRM_INFO("Loading R500 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R520_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R520_cp_microcode[i][0]); ++ } ++ } ++} ++ ++/* Flush any pending commands to the CP. This should only be used just ++ * prior to a wait for idle, as it informs the engine that the command ++ * stream is ending. ++ */ ++static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) ++{ ++ DRM_DEBUG("\n"); ++#if 0 ++ u32 tmp; ++ ++ tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); ++ RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); ++#endif ++} ++ ++/* Wait for the CP to go idle. ++ */ ++int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) ++{ ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(6); ++ ++ RADEON_PURGE_CACHE(); ++ RADEON_PURGE_ZCACHE(); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ return radeon_do_wait_for_idle(dev_priv); ++} ++ ++/* Start the Command Processor. ++ */ ++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) ++{ ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); ++ ++ dev_priv->cp_running = 1; ++ ++ BEGIN_RING(8); ++ /* isync can only be written through cp on r5xx write it here */ ++ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); ++ OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | ++ RADEON_ISYNC_ANY3D_IDLE2D | ++ RADEON_ISYNC_WAIT_IDLEGUI | ++ RADEON_ISYNC_CPSCRATCH_IDLEGUI); ++ RADEON_PURGE_CACHE(); ++ RADEON_PURGE_ZCACHE(); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; ++} ++ ++/* Reset the Command Processor. This will not flush any pending ++ * commands, so you must wait for the CP command stream to complete ++ * before calling this routine. ++ */ ++static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) ++{ ++ u32 cur_read_ptr; ++ DRM_DEBUG("\n"); ++ ++ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); ++ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); ++ SET_RING_HEAD(dev_priv, cur_read_ptr); ++ dev_priv->ring.tail = cur_read_ptr; ++} ++ ++/* Stop the Command Processor. This will not flush any pending ++ * commands, so you must flush the command stream and wait for the CP ++ * to go idle before calling this routine. ++ */ ++static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); ++ ++ dev_priv->cp_running = 0; ++} ++ ++/* Reset the engine. This will stop the CP if it is running. ++ */ ++static int radeon_do_engine_reset(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; ++ DRM_DEBUG("\n"); ++ ++ radeon_do_pixcache_flush(dev_priv); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { ++ /* may need something similar for newer chips */ ++ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); ++ mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); ++ ++ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | ++ RADEON_FORCEON_MCLKA | ++ RADEON_FORCEON_MCLKB | ++ RADEON_FORCEON_YCLKA | ++ RADEON_FORCEON_YCLKB | ++ RADEON_FORCEON_MC | ++ RADEON_FORCEON_AIC)); ++ } ++ ++ rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); ++ ++ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | ++ RADEON_SOFT_RESET_CP | ++ RADEON_SOFT_RESET_HI | ++ RADEON_SOFT_RESET_SE | ++ RADEON_SOFT_RESET_RE | ++ RADEON_SOFT_RESET_PP | ++ RADEON_SOFT_RESET_E2 | ++ RADEON_SOFT_RESET_RB)); ++ RADEON_READ(RADEON_RBBM_SOFT_RESET); ++ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & ++ ~(RADEON_SOFT_RESET_CP | ++ RADEON_SOFT_RESET_HI | ++ RADEON_SOFT_RESET_SE | ++ RADEON_SOFT_RESET_RE | ++ RADEON_SOFT_RESET_PP | ++ RADEON_SOFT_RESET_E2 | ++ RADEON_SOFT_RESET_RB))); ++ RADEON_READ(RADEON_RBBM_SOFT_RESET); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { ++ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); ++ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); ++ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); ++ } ++ ++ /* setup the raster pipes */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) ++ radeon_init_pipes(dev_priv); ++ ++ /* Reset the CP ring */ ++ radeon_do_cp_reset(dev_priv); ++ ++ /* The CP is no longer running after an engine reset */ ++ dev_priv->cp_running = 0; ++ ++ /* Reset any pending vertex, indirect buffers */ ++ radeon_freelist_reset(dev); ++ ++ return 0; ++} ++ ++static void radeon_cp_init_ring_buffer(struct drm_device * dev, ++ drm_radeon_private_t * dev_priv) ++{ ++ u32 ring_start, cur_read_ptr; ++ u32 tmp; ++ ++ /* Initialize the memory controller. With new memory map, the fb location ++ * is not changed, it should have been properly initialized already. Part ++ * of the problem is that the code below is bogus, assuming the GART is ++ * always appended to the fb which is not necessarily the case ++ */ ++ if (!dev_priv->new_memmap) ++ radeon_write_fb_location(dev_priv, ++ ((dev_priv->gart_vm_start - 1) & 0xffff0000) ++ | (dev_priv->fb_location >> 16)); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ radeon_write_agp_base(dev_priv, dev->agp->base); ++ ++ radeon_write_agp_location(dev_priv, ++ (((dev_priv->gart_vm_start - 1 + ++ dev_priv->gart_size) & 0xffff0000) | ++ (dev_priv->gart_vm_start >> 16))); ++ ++ ring_start = (dev_priv->cp_ring->offset ++ - dev->agp->base ++ + dev_priv->gart_vm_start); ++ } else ++#endif ++ ring_start = (dev_priv->cp_ring->offset ++ - (unsigned long)dev->sg->virtual ++ + dev_priv->gart_vm_start); ++ ++ RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); ++ ++ /* Set the write pointer delay */ ++ RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); ++ ++ /* Initialize the ring buffer's read and write pointers */ ++ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); ++ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); ++ SET_RING_HEAD(dev_priv, cur_read_ptr); ++ dev_priv->ring.tail = cur_read_ptr; ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, ++ dev_priv->ring_rptr->offset ++ - dev->agp->base + dev_priv->gart_vm_start); ++ } else ++#endif ++ { ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long tmp_ofs, page_ofs; ++ ++ tmp_ofs = dev_priv->ring_rptr->offset - ++ (unsigned long)dev->sg->virtual; ++ page_ofs = tmp_ofs >> PAGE_SHIFT; ++ ++ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); ++ DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n", ++ (unsigned long)entry->busaddr[page_ofs], ++ entry->handle + tmp_ofs); ++ } ++ ++ /* Set ring buffer size */ ++#ifdef __BIG_ENDIAN ++ RADEON_WRITE(RADEON_CP_RB_CNTL, ++ RADEON_BUF_SWAP_32BIT | ++ (dev_priv->ring.fetch_size_l2ow << 18) | ++ (dev_priv->ring.rptr_update_l2qw << 8) | ++ dev_priv->ring.size_l2qw); ++#else ++ RADEON_WRITE(RADEON_CP_RB_CNTL, ++ (dev_priv->ring.fetch_size_l2ow << 18) | ++ (dev_priv->ring.rptr_update_l2qw << 8) | ++ dev_priv->ring.size_l2qw); ++#endif ++ ++ /* Initialize the scratch register pointer. This will cause ++ * the scratch register values to be written out to memory ++ * whenever they are updated. ++ * ++ * We simply put this behind the ring read pointer, this works ++ * with PCI GART as well as (whatever kind of) AGP GART ++ */ ++ RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) ++ + RADEON_SCRATCH_REG_OFFSET); ++ ++ dev_priv->scratch = ((__volatile__ u32 *) ++ dev_priv->ring_rptr->handle + ++ (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); ++ ++ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); ++ ++ /* Turn on bus mastering */ ++ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; ++ RADEON_WRITE(RADEON_BUS_CNTL, tmp); ++ ++ dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; ++ RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); ++ ++ dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; ++ RADEON_WRITE(RADEON_LAST_DISPATCH_REG, ++ dev_priv->sarea_priv->last_dispatch); ++ ++ dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; ++ RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ /* Sync everything up */ ++ RADEON_WRITE(RADEON_ISYNC_CNTL, ++ (RADEON_ISYNC_ANY2D_IDLE3D | ++ RADEON_ISYNC_ANY3D_IDLE2D | ++ RADEON_ISYNC_WAIT_IDLEGUI | ++ RADEON_ISYNC_CPSCRATCH_IDLEGUI)); ++ ++} ++ ++static void radeon_test_writeback(drm_radeon_private_t * dev_priv) ++{ ++ u32 tmp; ++ ++ /* Writeback doesn't seem to work everywhere, test it here and possibly ++ * enable it if it appears to work ++ */ ++ DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0); ++ RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); ++ ++ for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { ++ if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) == ++ 0xdeadbeef) ++ break; ++ DRM_UDELAY(1); ++ } ++ ++ if (tmp < dev_priv->usec_timeout) { ++ dev_priv->writeback_works = 1; ++ DRM_INFO("writeback test succeeded in %d usecs\n", tmp); ++ } else { ++ dev_priv->writeback_works = 0; ++ DRM_INFO("writeback test failed\n"); ++ } ++ if (radeon_no_wb == 1) { ++ dev_priv->writeback_works = 0; ++ DRM_INFO("writeback forced off\n"); ++ } ++ ++ if (!dev_priv->writeback_works) { ++ /* Disable writeback to avoid unnecessary bus master transfers */ ++ RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); ++ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); ++ } ++} ++ ++/* Enable or disable IGP GART on the chip */ ++static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) ++{ ++ u32 temp; ++ ++ if (on) { ++ DRM_DEBUG("programming igp gart %08X %08lX %08X\n", ++ dev_priv->gart_vm_start, ++ (long)dev_priv->gart_info.bus_addr, ++ dev_priv->gart_size); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | ++ RS690_BLOCK_GFX_D3_EN)); ++ else ++ IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); ++ ++ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | ++ RS480_VA_SIZE_32MB)); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); ++ IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | ++ RS480_TLB_ENABLE | ++ RS480_GTW_LAC_EN | ++ RS480_1LEVEL_GART)); ++ ++ temp = dev_priv->gart_info.bus_addr & 0xfffff000; ++ temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; ++ IGP_WRITE_MCIND(RS480_GART_BASE, temp); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); ++ IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | ++ RS480_REQ_TYPE_SNOOP_DIS)); ++ ++ radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); ++ ++ dev_priv->gart_size = 32*1024*1024; ++ temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & ++ 0xffff0000) | (dev_priv->gart_vm_start >> 16)); ++ ++ radeon_write_agp_location(dev_priv, temp); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); ++ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | ++ RS480_VA_SIZE_32MB)); ++ ++ do { ++ temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); ++ if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) ++ break; ++ DRM_UDELAY(1); ++ } while(1); ++ ++ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, ++ RS480_GART_CACHE_INVALIDATE); ++ ++ do { ++ temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); ++ if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) ++ break; ++ DRM_UDELAY(1); ++ } while(1); ++ ++ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); ++ } else { ++ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); ++ } ++} ++ ++static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) ++{ ++ u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); ++ if (on) { ++ ++ DRM_DEBUG("programming pcie %08X %08lX %08X\n", ++ dev_priv->gart_vm_start, ++ (long)dev_priv->gart_info.bus_addr, ++ dev_priv->gart_size); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, ++ dev_priv->gart_vm_start); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, ++ dev_priv->gart_info.bus_addr); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, ++ dev_priv->gart_vm_start); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, ++ dev_priv->gart_vm_start + ++ dev_priv->gart_size - 1); ++ ++ radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ ++ ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, ++ RADEON_PCIE_TX_GART_EN); ++ } else { ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, ++ tmp & ~RADEON_PCIE_TX_GART_EN); ++ } ++} ++ ++/* Enable or disable PCI GART on the chip */ ++static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) ++{ ++ u32 tmp; ++ ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ++ (dev_priv->flags & RADEON_IS_IGPGART)) { ++ radeon_set_igpgart(dev_priv, on); ++ return; ++ } ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ radeon_set_pciegart(dev_priv, on); ++ return; ++ } ++ ++ tmp = RADEON_READ(RADEON_AIC_CNTL); ++ ++ if (on) { ++ RADEON_WRITE(RADEON_AIC_CNTL, ++ tmp | RADEON_PCIGART_TRANSLATE_EN); ++ ++ /* set PCI GART page-table base address ++ */ ++ RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); ++ ++ /* set address range for PCI address translate ++ */ ++ RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); ++ RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start ++ + dev_priv->gart_size - 1); ++ ++ /* Turn off AGP aperture -- is this required for PCI GART? ++ */ ++ radeon_write_agp_location(dev_priv, 0xffffffc0); ++ RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ ++ } else { ++ RADEON_WRITE(RADEON_AIC_CNTL, ++ tmp & ~RADEON_PCIGART_TRANSLATE_EN); ++ } ++} ++ ++static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ /* if we require new memory map but we don't have it fail */ ++ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { ++ DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) ++ { ++ DRM_DEBUG("Forcing AGP card to PCI mode\n"); ++ dev_priv->flags &= ~RADEON_IS_AGP; ++ } ++ else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) ++ && !init->is_pci) ++ { ++ DRM_DEBUG("Restoring AGP flag\n"); ++ dev_priv->flags |= RADEON_IS_AGP; ++ } ++ ++ if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { ++ DRM_ERROR("PCI GART memory not allocated!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->usec_timeout = init->usec_timeout; ++ if (dev_priv->usec_timeout < 1 || ++ dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { ++ DRM_DEBUG("TIMEOUT problem!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ /* Enable vblank on CRTC1 for older X servers ++ */ ++ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; ++ ++ dev_priv->do_boxes = 0; ++ dev_priv->cp_mode = init->cp_mode; ++ ++ /* We don't support anything other than bus-mastering ring mode, ++ * but the ring can be in either AGP or PCI space for the ring ++ * read pointer. ++ */ ++ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && ++ (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { ++ DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ switch (init->fb_bpp) { ++ case 16: ++ dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; ++ break; ++ case 32: ++ default: ++ dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; ++ break; ++ } ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ switch (init->depth_bpp) { ++ case 16: ++ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; ++ break; ++ case 32: ++ default: ++ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; ++ break; ++ } ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ /* Hardware state for depth clears. Remove this if/when we no ++ * longer clear the depth buffer with a 3D rectangle. Hard-code ++ * all values to prevent unwanted 3D state from slipping through ++ * and screwing with the clear operation. ++ */ ++ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | ++ (dev_priv->color_fmt << 10) | ++ (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0)); ++ ++ dev_priv->depth_clear.rb3d_zstencilcntl = ++ (dev_priv->depth_fmt | ++ RADEON_Z_TEST_ALWAYS | ++ RADEON_STENCIL_TEST_ALWAYS | ++ RADEON_STENCIL_S_FAIL_REPLACE | ++ RADEON_STENCIL_ZPASS_REPLACE | ++ RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); ++ ++ dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | ++ RADEON_BFACE_SOLID | ++ RADEON_FFACE_SOLID | ++ RADEON_FLAT_SHADE_VTX_LAST | ++ RADEON_DIFFUSE_SHADE_FLAT | ++ RADEON_ALPHA_SHADE_FLAT | ++ RADEON_SPECULAR_SHADE_FLAT | ++ RADEON_FOG_SHADE_FLAT | ++ RADEON_VTX_PIX_CENTER_OGL | ++ RADEON_ROUND_MODE_TRUNC | ++ RADEON_ROUND_PREC_8TH_PIX); ++ ++ ++ dev_priv->ring_offset = init->ring_offset; ++ dev_priv->ring_rptr_offset = init->ring_rptr_offset; ++ dev_priv->buffers_offset = init->buffers_offset; ++ dev_priv->gart_textures_offset = init->gart_textures_offset; ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("could not find sarea!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); ++ if (!dev_priv->cp_ring) { ++ DRM_ERROR("could not find cp ring region!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); ++ if (!dev_priv->ring_rptr) { ++ DRM_ERROR("could not find ring read pointer!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("could not find dma buffer region!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ if (init->gart_textures_offset) { ++ dev_priv->gart_textures = ++ drm_core_findmap(dev, init->gart_textures_offset); ++ if (!dev_priv->gart_textures) { ++ DRM_ERROR("could not find GART texture region!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ drm_core_ioremap(dev_priv->cp_ring, dev); ++ drm_core_ioremap(dev_priv->ring_rptr, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev_priv->cp_ring->handle || ++ !dev_priv->ring_rptr->handle || ++ !dev->agp_buffer_map->handle) { ++ DRM_ERROR("could not find ioremap agp regions!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ } else ++#endif ++ { ++ dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset; ++ dev_priv->ring_rptr->handle = ++ (void *)dev_priv->ring_rptr->offset; ++ dev->agp_buffer_map->handle = ++ (void *)dev->agp_buffer_map->offset; ++ ++ DRM_DEBUG("dev_priv->cp_ring->handle %p\n", ++ dev_priv->cp_ring->handle); ++ DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", ++ dev_priv->ring_rptr->handle); ++ DRM_DEBUG("dev->agp_buffer_map->handle %p\n", ++ dev->agp_buffer_map->handle); ++ } ++ ++ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; ++ dev_priv->fb_size = ++ ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) ++ - dev_priv->fb_location; ++ ++ dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | ++ ((dev_priv->front_offset ++ + dev_priv->fb_location) >> 10)); ++ ++ dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | ++ ((dev_priv->back_offset ++ + dev_priv->fb_location) >> 10)); ++ ++ dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | ++ ((dev_priv->depth_offset ++ + dev_priv->fb_location) >> 10)); ++ ++ dev_priv->gart_size = init->gart_size; ++ ++ /* New let's set the memory map ... */ ++ if (dev_priv->new_memmap) { ++ u32 base = 0; ++ ++ DRM_INFO("Setting GART location based on new memory map\n"); ++ ++ /* If using AGP, try to locate the AGP aperture at the same ++ * location in the card and on the bus, though we have to ++ * align it down. ++ */ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ base = dev->agp->base; ++ /* Check if valid */ ++ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && ++ base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { ++ DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", ++ dev->agp->base); ++ base = 0; ++ } ++ } ++#endif ++ /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ ++ if (base == 0) { ++ base = dev_priv->fb_location + dev_priv->fb_size; ++ if (base < dev_priv->fb_location || ++ ((base + dev_priv->gart_size) & 0xfffffffful) < base) ++ base = dev_priv->fb_location ++ - dev_priv->gart_size; ++ } ++ dev_priv->gart_vm_start = base & 0xffc00000u; ++ if (dev_priv->gart_vm_start != base) ++ DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", ++ base, dev_priv->gart_vm_start); ++ } else { ++ DRM_INFO("Setting GART location based on old memory map\n"); ++ dev_priv->gart_vm_start = dev_priv->fb_location + ++ RADEON_READ(RADEON_CONFIG_APER_SIZE); ++ } ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) ++ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset ++ - dev->agp->base ++ + dev_priv->gart_vm_start); ++ else ++#endif ++ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset ++ - (unsigned long)dev->sg->virtual ++ + dev_priv->gart_vm_start); ++ ++ DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); ++ DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); ++ DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", ++ dev_priv->gart_buffers_offset); ++ ++ dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; ++ dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle ++ + init->ring_size / sizeof(u32)); ++ dev_priv->ring.size = init->ring_size; ++ dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); ++ ++ dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; ++ dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); ++ ++ dev_priv->ring.fetch_size = /* init->fetch_size */ 32; ++ dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); ++ ++ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; ++ ++ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ /* Turn off PCI GART */ ++ radeon_set_pcigart(dev_priv, 0); ++ } else ++#endif ++ { ++ dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); ++ /* if we have an offset set from userspace */ ++ if (dev_priv->pcigart_offset_set) { ++ dev_priv->gart_info.bus_addr = ++ dev_priv->pcigart_offset + dev_priv->fb_location; ++ dev_priv->gart_info.mapping.offset = ++ dev_priv->pcigart_offset + dev_priv->fb_aper_offset; ++ dev_priv->gart_info.mapping.size = ++ dev_priv->gart_info.table_size; ++ ++ drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); ++ dev_priv->gart_info.addr = ++ dev_priv->gart_info.mapping.handle; ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; ++ else ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ dev_priv->gart_info.gart_table_location = ++ DRM_ATI_GART_FB; ++ ++ DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", ++ dev_priv->gart_info.addr, ++ dev_priv->pcigart_offset); ++ } else { ++ if (dev_priv->flags & RADEON_IS_IGPGART) ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; ++ else ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ dev_priv->gart_info.gart_table_location = ++ DRM_ATI_GART_MAIN; ++ dev_priv->gart_info.addr = NULL; ++ dev_priv->gart_info.bus_addr = 0; ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ DRM_ERROR ++ ("Cannot use PCI Express without GART in FB memory\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ } ++ ++ if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { ++ DRM_ERROR("failed to init PCI GART!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -ENOMEM; ++ } ++ ++ /* Turn on PCI GART */ ++ radeon_set_pcigart(dev_priv, 1); ++ } ++ ++ /* Start with assuming that writeback doesn't work */ ++ dev_priv->writeback_works = 0; ++ ++ radeon_cp_load_microcode(dev_priv); ++ radeon_cp_init_ring_buffer(dev, dev_priv); ++ ++ dev_priv->last_buf = 0; ++ ++ radeon_do_engine_reset(dev); ++ radeon_test_writeback(dev_priv); ++ ++ return 0; ++} ++ ++static int radeon_do_cleanup_cp(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ if (dev_priv->cp_ring != NULL) { ++ drm_core_ioremapfree(dev_priv->cp_ring, dev); ++ dev_priv->cp_ring = NULL; ++ } ++ if (dev_priv->ring_rptr != NULL) { ++ drm_core_ioremapfree(dev_priv->ring_rptr, dev); ++ dev_priv->ring_rptr = NULL; ++ } ++ if (dev->agp_buffer_map != NULL) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ dev->agp_buffer_map = NULL; ++ } ++ } else ++#endif ++ { ++ ++ if (dev_priv->gart_info.bus_addr) { ++ /* Turn off PCI GART */ ++ radeon_set_pcigart(dev_priv, 0); ++ if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) ++ DRM_ERROR("failed to cleanup PCI GART!\n"); ++ } ++ ++ if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) ++ { ++ drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); ++ dev_priv->gart_info.addr = 0; ++ } ++ } ++ /* only clear to the start of flags */ ++ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); ++ ++ return 0; ++} ++ ++/* This code will reinit the Radeon CP hardware after a resume from disc. ++ * AFAIK, it would be very difficult to pickle the state at suspend time, so ++ * here we make sure that all Radeon hardware initialisation is re-done without ++ * affecting running applications. ++ * ++ * Charl P. Botha ++ */ ++static int radeon_do_resume_cp(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv) { ++ DRM_ERROR("Called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("Starting radeon_do_resume_cp()\n"); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ /* Turn off PCI GART */ ++ radeon_set_pcigart(dev_priv, 0); ++ } else ++#endif ++ { ++ /* Turn on PCI GART */ ++ radeon_set_pcigart(dev_priv, 1); ++ } ++ ++ radeon_cp_load_microcode(dev_priv); ++ radeon_cp_init_ring_buffer(dev, dev_priv); ++ ++ radeon_do_engine_reset(dev); ++ radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); ++ ++ DRM_DEBUG("radeon_do_resume_cp() complete\n"); ++ ++ return 0; ++} ++ ++int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_init_t *init = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (init->func == RADEON_INIT_R300_CP) ++ r300_init_reg_flags(dev); ++ ++ switch (init->func) { ++ case RADEON_INIT_CP: ++ case RADEON_INIT_R200_CP: ++ case RADEON_INIT_R300_CP: ++ return radeon_do_init_cp(dev, init); ++ case RADEON_CLEANUP_CP: ++ return radeon_do_cleanup_cp(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dev_priv->cp_running) { ++ DRM_DEBUG("while CP running\n"); ++ return 0; ++ } ++ if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { ++ DRM_DEBUG("called with bogus CP mode (%d)\n", ++ dev_priv->cp_mode); ++ return 0; ++ } ++ ++ radeon_do_cp_start(dev_priv); ++ ++ return 0; ++} ++ ++/* Stop the CP. The engine must have been idled before calling this ++ * routine. ++ */ ++int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_cp_stop_t *stop = data; ++ int ret; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv->cp_running) ++ return 0; ++ ++ /* Flush any pending CP commands. This ensures any outstanding ++ * commands are exectuted by the engine before we turn it off. ++ */ ++ if (stop->flush) { ++ radeon_do_cp_flush(dev_priv); ++ } ++ ++ /* If we fail to make the engine go idle, we return an error ++ * code so that the DRM ioctl wrapper can try again. ++ */ ++ if (stop->idle) { ++ ret = radeon_do_cp_idle(dev_priv); ++ if (ret) ++ return ret; ++ } ++ ++ /* Finally, we can turn off the CP. If the engine isn't idle, ++ * we will get some dropped triangles as they won't be fully ++ * rendered before the CP is shut down. ++ */ ++ radeon_do_cp_stop(dev_priv); ++ ++ /* Reset the engine */ ++ radeon_do_engine_reset(dev); ++ ++ return 0; ++} ++ ++void radeon_do_release(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i, ret; ++ ++ if (dev_priv) { ++ if (dev_priv->cp_running) { ++ /* Stop the cp */ ++ while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { ++ DRM_DEBUG("radeon_do_cp_idle %d\n", ret); ++#ifdef __linux__ ++ schedule(); ++#else ++#if defined(__FreeBSD__) && __FreeBSD_version > 500000 ++ mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel", ++ 1); ++#else ++ tsleep(&ret, PZERO, "rdnrel", 1); ++#endif ++#endif ++ } ++ radeon_do_cp_stop(dev_priv); ++ radeon_do_engine_reset(dev); ++ } ++ ++ /* Disable *all* interrupts */ ++ if (dev_priv->mmio) /* remove this after permanent addmaps */ ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++ ++ if (dev_priv->mmio) { /* remove all surfaces */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); ++ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + ++ 16 * i, 0); ++ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + ++ 16 * i, 0); ++ } ++ } ++ ++ /* Free memory heap structures */ ++ radeon_mem_takedown(&(dev_priv->gart_heap)); ++ radeon_mem_takedown(&(dev_priv->fb_heap)); ++ ++ /* deallocate kernel resources */ ++ radeon_do_cleanup_cp(dev); ++ } ++} ++ ++/* Just reset the CP ring. Called as part of an X Server engine reset. ++ */ ++int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_DEBUG("called before init done\n"); ++ return -EINVAL; ++ } ++ ++ radeon_do_cp_reset(dev_priv); ++ ++ /* The CP is no longer running after an engine reset */ ++ dev_priv->cp_running = 0; ++ ++ return 0; ++} ++ ++int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return radeon_do_cp_idle(dev_priv); ++} ++ ++/* Added by Charl P. Botha to call radeon_do_resume_cp(). ++ */ ++int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ ++ return radeon_do_resume_cp(dev); ++} ++ ++int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return radeon_do_engine_reset(dev); ++} ++ ++/* ================================================================ ++ * Fullscreen mode ++ */ ++ ++/* KW: Deprecated to say the least: ++ */ ++int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ return 0; ++} ++ ++/* ================================================================ ++ * Freelist management ++ */ ++ ++/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through ++ * bufs until freelist code is used. Note this hides a problem with ++ * the scratch register * (used to keep track of last buffer ++ * completed) being written to before * the last buffer has actually ++ * completed rendering. ++ * ++ * KW: It's also a good way to find free buffers quickly. ++ * ++ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't ++ * sleep. However, bugs in older versions of radeon_accel.c mean that ++ * we essentially have to do this, else old clients will break. ++ * ++ * However, it does leave open a potential deadlock where all the ++ * buffers are held by other clients, which can't release them because ++ * they can't get the lock. ++ */ ++ ++struct drm_buf *radeon_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv; ++ struct drm_buf *buf; ++ int i, t; ++ int start; ++ ++ if (++dev_priv->last_buf >= dma->buf_count) ++ dev_priv->last_buf = 0; ++ ++ start = dev_priv->last_buf; ++ ++ for (t = 0; t < dev_priv->usec_timeout; t++) { ++ u32 done_age = GET_SCRATCH(1); ++ DRM_DEBUG("done_age = %d\n", done_age); ++ for (i = start; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->file_priv == NULL || (buf->pending && ++ buf_priv->age <= ++ done_age)) { ++ dev_priv->stats.requested_bufs++; ++ buf->pending = 0; ++ return buf; ++ } ++ start = 0; ++ } ++ ++ if (t) { ++ DRM_UDELAY(1); ++ dev_priv->stats.freelist_loops++; ++ } ++ } ++ ++ DRM_DEBUG("returning NULL!\n"); ++ return NULL; ++} ++ ++#if 0 ++struct drm_buf *radeon_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv; ++ struct drm_buf *buf; ++ int i, t; ++ int start; ++ u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); ++ ++ if (++dev_priv->last_buf >= dma->buf_count) ++ dev_priv->last_buf = 0; ++ ++ start = dev_priv->last_buf; ++ dev_priv->stats.freelist_loops++; ++ ++ for (t = 0; t < 2; t++) { ++ for (i = start; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->file_priv == 0 || (buf->pending && ++ buf_priv->age <= ++ done_age)) { ++ dev_priv->stats.requested_bufs++; ++ buf->pending = 0; ++ return buf; ++ } ++ } ++ start = 0; ++ } ++ ++ return NULL; ++} ++#endif ++ ++void radeon_freelist_reset(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ dev_priv->last_buf = 0; ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_radeon_buf_priv_t *buf_priv = buf->dev_private; ++ buf_priv->age = 0; ++ } ++} ++ ++/* ================================================================ ++ * CP command submission ++ */ ++ ++int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) ++{ ++ drm_radeon_ring_buffer_t *ring = &dev_priv->ring; ++ int i; ++ u32 last_head = GET_RING_HEAD(dev_priv); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ u32 head = GET_RING_HEAD(dev_priv); ++ ++ ring->space = (head - ring->tail) * sizeof(u32); ++ if (ring->space <= 0) ++ ring->space += ring->size; ++ if (ring->space > n) ++ return 0; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ if (head != last_head) ++ i = 0; ++ last_head = head; ++ ++ DRM_UDELAY(1); ++ } ++ ++ /* FIXME: This return value is ignored in the BEGIN_RING macro! */ ++#if RADEON_FIFO_DEBUG ++ radeon_status(dev_priv); ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++static int radeon_cp_get_buffers(struct drm_device *dev, ++ struct drm_file *file_priv, ++ struct drm_dma * d) ++{ ++ int i; ++ struct drm_buf *buf; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = radeon_freelist_get(dev); ++ if (!buf) ++ return -EBUSY; /* NOTE: broken client */ ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, ++ sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, ++ sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int ret = 0; ++ struct drm_dma *d = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = radeon_cp_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++int radeon_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ drm_radeon_private_t *dev_priv; ++ int ret = 0; ++ ++ dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_radeon_private_t)); ++ dev->dev_private = (void *)dev_priv; ++ dev_priv->flags = flags; ++ ++ switch (flags & RADEON_FAMILY_MASK) { ++ case CHIP_R100: ++ case CHIP_RV200: ++ case CHIP_R200: ++ case CHIP_R300: ++ case CHIP_R350: ++ case CHIP_R420: ++ case CHIP_RV410: ++ case CHIP_RV515: ++ case CHIP_R520: ++ case CHIP_RV570: ++ case CHIP_R580: ++ dev_priv->flags |= RADEON_HAS_HIERZ; ++ break; ++ default: ++ /* all other chips have no hierarchical z buffer */ ++ break; ++ } ++ ++ dev_priv->chip_family = flags & RADEON_FAMILY_MASK; ++ if (drm_device_is_agp(dev)) ++ dev_priv->flags |= RADEON_IS_AGP; ++ else if (drm_device_is_pcie(dev)) ++ dev_priv->flags |= RADEON_IS_PCIE; ++ else ++ dev_priv->flags |= RADEON_IS_PCI; ++ ++ DRM_DEBUG("%s card detected\n", ++ ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); ++ return ret; ++} ++ ++/* Create mappings for registers and framebuffer so userland doesn't necessarily ++ * have to find them. ++ */ ++int radeon_driver_firstopen(struct drm_device *dev) ++{ ++ int ret; ++ drm_local_map_t *map; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; ++ ++ ret = drm_addmap(dev, drm_get_resource_start(dev, 2), ++ drm_get_resource_len(dev, 2), _DRM_REGISTERS, ++ _DRM_READ_ONLY, &dev_priv->mmio); ++ if (ret != 0) ++ return ret; ++ ++ dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); ++ ret = drm_addmap(dev, dev_priv->fb_aper_offset, ++ drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, ++ _DRM_WRITE_COMBINING, &map); ++ if (ret != 0) ++ return ret; ++ ++ return 0; ++} ++ ++int radeon_driver_unload(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); ++ ++ dev->dev_private = NULL; ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,750 @@ ++/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- ++ * ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Kevin E. Martin ++ * Gareth Hughes ++ * Keith Whitwell ++ */ ++ ++#ifndef __RADEON_DRM_H__ ++#define __RADEON_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the X server file (radeon_sarea.h) ++ */ ++#ifndef __RADEON_SAREA_DEFINES__ ++#define __RADEON_SAREA_DEFINES__ ++ ++/* Old style state flags, required for sarea interface (1.1 and 1.2 ++ * clears) and 1.2 drm_vertex2 ioctl. ++ */ ++#define RADEON_UPLOAD_CONTEXT 0x00000001 ++#define RADEON_UPLOAD_VERTFMT 0x00000002 ++#define RADEON_UPLOAD_LINE 0x00000004 ++#define RADEON_UPLOAD_BUMPMAP 0x00000008 ++#define RADEON_UPLOAD_MASKS 0x00000010 ++#define RADEON_UPLOAD_VIEWPORT 0x00000020 ++#define RADEON_UPLOAD_SETUP 0x00000040 ++#define RADEON_UPLOAD_TCL 0x00000080 ++#define RADEON_UPLOAD_MISC 0x00000100 ++#define RADEON_UPLOAD_TEX0 0x00000200 ++#define RADEON_UPLOAD_TEX1 0x00000400 ++#define RADEON_UPLOAD_TEX2 0x00000800 ++#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 ++#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 ++#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 ++#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ ++#define RADEON_REQUIRE_QUIESCENCE 0x00010000 ++#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ ++#define RADEON_UPLOAD_ALL 0x003effff ++#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff ++ ++/* New style per-packet identifiers for use in cmd_buffer ioctl with ++ * the RADEON_EMIT_PACKET command. Comments relate new packets to old ++ * state bits and the packet size: ++ */ ++#define RADEON_EMIT_PP_MISC 0 /* context/7 */ ++#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ ++#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ ++#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ ++#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ ++#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ ++#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ ++#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ ++#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ ++#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ ++#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ ++#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ ++#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ ++#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ ++#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ ++#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ ++#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ ++#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ ++#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ ++#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ ++#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ ++#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ ++#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ ++#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ ++#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ ++#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ ++#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ ++#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ ++#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ ++#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ ++#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ ++#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ ++#define R200_EMIT_VAP_CTL 32 /* vap/1 */ ++#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ ++#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ ++#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ ++#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ ++#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ ++#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ ++#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ ++#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ ++#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ ++#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ ++#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ ++#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ ++#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ ++#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ ++#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ ++#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ ++#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ ++#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ ++#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ ++#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ ++#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ ++#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ ++#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ ++#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ ++#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ ++#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ ++#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ ++#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ ++#define R200_EMIT_PP_CUBIC_FACES_0 61 ++#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 ++#define R200_EMIT_PP_CUBIC_FACES_1 63 ++#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 ++#define R200_EMIT_PP_CUBIC_FACES_2 65 ++#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 ++#define R200_EMIT_PP_CUBIC_FACES_3 67 ++#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 ++#define R200_EMIT_PP_CUBIC_FACES_4 69 ++#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 ++#define R200_EMIT_PP_CUBIC_FACES_5 71 ++#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 ++#define RADEON_EMIT_PP_TEX_SIZE_0 73 ++#define RADEON_EMIT_PP_TEX_SIZE_1 74 ++#define RADEON_EMIT_PP_TEX_SIZE_2 75 ++#define R200_EMIT_RB3D_BLENDCOLOR 76 ++#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 ++#define RADEON_EMIT_PP_CUBIC_FACES_0 78 ++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 ++#define RADEON_EMIT_PP_CUBIC_FACES_1 80 ++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 ++#define RADEON_EMIT_PP_CUBIC_FACES_2 82 ++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 ++#define R200_EMIT_PP_TRI_PERF_CNTL 84 ++#define R200_EMIT_PP_AFS_0 85 ++#define R200_EMIT_PP_AFS_1 86 ++#define R200_EMIT_ATF_TFACTOR 87 ++#define R200_EMIT_PP_TXCTLALL_0 88 ++#define R200_EMIT_PP_TXCTLALL_1 89 ++#define R200_EMIT_PP_TXCTLALL_2 90 ++#define R200_EMIT_PP_TXCTLALL_3 91 ++#define R200_EMIT_PP_TXCTLALL_4 92 ++#define R200_EMIT_PP_TXCTLALL_5 93 ++#define R200_EMIT_VAP_PVS_CNTL 94 ++#define RADEON_MAX_STATE_PACKETS 95 ++ ++/* Commands understood by cmd_buffer ioctl. More can be added but ++ * obviously these can't be removed or changed: ++ */ ++#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ ++#define RADEON_CMD_SCALARS 2 /* emit scalar data */ ++#define RADEON_CMD_VECTORS 3 /* emit vector data */ ++#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ ++#define RADEON_CMD_PACKET3 5 /* emit hw packet */ ++#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ ++#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ ++#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: ++ * doesn't make the cpu wait, just ++ * the graphics hardware */ ++#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ ++ ++typedef union { ++ int i; ++ struct { ++ unsigned char cmd_type, pad0, pad1, pad2; ++ } header; ++ struct { ++ unsigned char cmd_type, packet_id, pad0, pad1; ++ } packet; ++ struct { ++ unsigned char cmd_type, offset, stride, count; ++ } scalars; ++ struct { ++ unsigned char cmd_type, offset, stride, count; ++ } vectors; ++ struct { ++ unsigned char cmd_type, addr_lo, addr_hi, count; ++ } veclinear; ++ struct { ++ unsigned char cmd_type, buf_idx, pad0, pad1; ++ } dma; ++ struct { ++ unsigned char cmd_type, flags, pad0, pad1; ++ } wait; ++} drm_radeon_cmd_header_t; ++ ++#define RADEON_WAIT_2D 0x1 ++#define RADEON_WAIT_3D 0x2 ++ ++/* Allowed parameters for R300_CMD_PACKET3 ++ */ ++#define R300_CMD_PACKET3_CLEAR 0 ++#define R300_CMD_PACKET3_RAW 1 ++ ++/* Commands understood by cmd_buffer ioctl for R300. ++ * The interface has not been stabilized, so some of these may be removed ++ * and eventually reordered before stabilization. ++ */ ++#define R300_CMD_PACKET0 1 ++#define R300_CMD_VPU 2 /* emit vertex program upload */ ++#define R300_CMD_PACKET3 3 /* emit a packet3 */ ++#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ ++#define R300_CMD_CP_DELAY 5 ++#define R300_CMD_DMA_DISCARD 6 ++#define R300_CMD_WAIT 7 ++# define R300_WAIT_2D 0x1 ++# define R300_WAIT_3D 0x2 ++/* these two defines are DOING IT WRONG - however ++ * we have userspace which relies on using these. ++ * The wait interface is backwards compat new ++ * code should use the NEW_WAIT defines below ++ * THESE ARE NOT BIT FIELDS ++ */ ++# define R300_WAIT_2D_CLEAN 0x3 ++# define R300_WAIT_3D_CLEAN 0x4 ++ ++# define R300_NEW_WAIT_2D_3D 0x3 ++# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 ++# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 ++# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 ++ ++#define R300_CMD_SCRATCH 8 ++#define R300_CMD_R500FP 9 ++ ++typedef union { ++ unsigned int u; ++ struct { ++ unsigned char cmd_type, pad0, pad1, pad2; ++ } header; ++ struct { ++ unsigned char cmd_type, count, reglo, reghi; ++ } packet0; ++ struct { ++ unsigned char cmd_type, count, adrlo, adrhi; ++ } vpu; ++ struct { ++ unsigned char cmd_type, packet, pad0, pad1; ++ } packet3; ++ struct { ++ unsigned char cmd_type, packet; ++ unsigned short count; /* amount of packet2 to emit */ ++ } delay; ++ struct { ++ unsigned char cmd_type, buf_idx, pad0, pad1; ++ } dma; ++ struct { ++ unsigned char cmd_type, flags, pad0, pad1; ++ } wait; ++ struct { ++ unsigned char cmd_type, reg, n_bufs, flags; ++ } scratch; ++ struct { ++ unsigned char cmd_type, count, adrlo, adrhi_flags; ++ } r500fp; ++} drm_r300_cmd_header_t; ++ ++#define RADEON_FRONT 0x1 ++#define RADEON_BACK 0x2 ++#define RADEON_DEPTH 0x4 ++#define RADEON_STENCIL 0x8 ++#define RADEON_CLEAR_FASTZ 0x80000000 ++#define RADEON_USE_HIERZ 0x40000000 ++#define RADEON_USE_COMP_ZBUF 0x20000000 ++ ++#define R500FP_CONSTANT_TYPE (1 << 1) ++#define R500FP_CONSTANT_CLAMP (1 << 2) ++ ++/* Primitive types ++ */ ++#define RADEON_POINTS 0x1 ++#define RADEON_LINES 0x2 ++#define RADEON_LINE_STRIP 0x3 ++#define RADEON_TRIANGLES 0x4 ++#define RADEON_TRIANGLE_FAN 0x5 ++#define RADEON_TRIANGLE_STRIP 0x6 ++ ++/* Vertex/indirect buffer size ++ */ ++#define RADEON_BUFFER_SIZE 65536 ++ ++/* Byte offsets for indirect buffer data ++ */ ++#define RADEON_INDEX_PRIM_OFFSET 20 ++ ++#define RADEON_SCRATCH_REG_OFFSET 32 ++ ++#define RADEON_NR_SAREA_CLIPRECTS 12 ++ ++/* There are 2 heaps (local/GART). Each region within a heap is a ++ * minimum of 64k, and there are at most 64 of them per heap. ++ */ ++#define RADEON_LOCAL_TEX_HEAP 0 ++#define RADEON_GART_TEX_HEAP 1 ++#define RADEON_NR_TEX_HEAPS 2 ++#define RADEON_NR_TEX_REGIONS 64 ++#define RADEON_LOG_TEX_GRANULARITY 16 ++ ++#define RADEON_MAX_TEXTURE_LEVELS 12 ++#define RADEON_MAX_TEXTURE_UNITS 3 ++ ++#define RADEON_MAX_SURFACES 8 ++ ++/* Blits have strict offset rules. All blit offset must be aligned on ++ * a 1K-byte boundary. ++ */ ++#define RADEON_OFFSET_SHIFT 10 ++#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) ++#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) ++ ++#endif /* __RADEON_SAREA_DEFINES__ */ ++ ++typedef struct { ++ unsigned int red; ++ unsigned int green; ++ unsigned int blue; ++ unsigned int alpha; ++} radeon_color_regs_t; ++ ++typedef struct { ++ /* Context state */ ++ unsigned int pp_misc; /* 0x1c14 */ ++ unsigned int pp_fog_color; ++ unsigned int re_solid_color; ++ unsigned int rb3d_blendcntl; ++ unsigned int rb3d_depthoffset; ++ unsigned int rb3d_depthpitch; ++ unsigned int rb3d_zstencilcntl; ++ ++ unsigned int pp_cntl; /* 0x1c38 */ ++ unsigned int rb3d_cntl; ++ unsigned int rb3d_coloroffset; ++ unsigned int re_width_height; ++ unsigned int rb3d_colorpitch; ++ unsigned int se_cntl; ++ ++ /* Vertex format state */ ++ unsigned int se_coord_fmt; /* 0x1c50 */ ++ ++ /* Line state */ ++ unsigned int re_line_pattern; /* 0x1cd0 */ ++ unsigned int re_line_state; ++ ++ unsigned int se_line_width; /* 0x1db8 */ ++ ++ /* Bumpmap state */ ++ unsigned int pp_lum_matrix; /* 0x1d00 */ ++ ++ unsigned int pp_rot_matrix_0; /* 0x1d58 */ ++ unsigned int pp_rot_matrix_1; ++ ++ /* Mask state */ ++ unsigned int rb3d_stencilrefmask; /* 0x1d7c */ ++ unsigned int rb3d_ropcntl; ++ unsigned int rb3d_planemask; ++ ++ /* Viewport state */ ++ unsigned int se_vport_xscale; /* 0x1d98 */ ++ unsigned int se_vport_xoffset; ++ unsigned int se_vport_yscale; ++ unsigned int se_vport_yoffset; ++ unsigned int se_vport_zscale; ++ unsigned int se_vport_zoffset; ++ ++ /* Setup state */ ++ unsigned int se_cntl_status; /* 0x2140 */ ++ ++ /* Misc state */ ++ unsigned int re_top_left; /* 0x26c0 */ ++ unsigned int re_misc; ++} drm_radeon_context_regs_t; ++ ++typedef struct { ++ /* Zbias state */ ++ unsigned int se_zbias_factor; /* 0x1dac */ ++ unsigned int se_zbias_constant; ++} drm_radeon_context2_regs_t; ++ ++/* Setup registers for each texture unit ++ */ ++typedef struct { ++ unsigned int pp_txfilter; ++ unsigned int pp_txformat; ++ unsigned int pp_txoffset; ++ unsigned int pp_txcblend; ++ unsigned int pp_txablend; ++ unsigned int pp_tfactor; ++ unsigned int pp_border_color; ++} drm_radeon_texture_regs_t; ++ ++typedef struct { ++ unsigned int start; ++ unsigned int finish; ++ unsigned int prim:8; ++ unsigned int stateidx:8; ++ unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ ++ unsigned int vc_format; /* vertex format */ ++} drm_radeon_prim_t; ++ ++typedef struct { ++ drm_radeon_context_regs_t context; ++ drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; ++ drm_radeon_context2_regs_t context2; ++ unsigned int dirty; ++} drm_radeon_state_t; ++ ++typedef struct { ++ /* The channel for communication of state information to the ++ * kernel on firing a vertex buffer with either of the ++ * obsoleted vertex/index ioctls. ++ */ ++ drm_radeon_context_regs_t context_state; ++ drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; ++ unsigned int dirty; ++ unsigned int vertsize; ++ unsigned int vc_format; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Counters for client-side throttling of rendering clients. ++ */ ++ unsigned int last_frame; ++ unsigned int last_dispatch; ++ unsigned int last_clear; ++ ++ struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + ++ 1]; ++ unsigned int tex_age[RADEON_NR_TEX_HEAPS]; ++ int ctx_owner; ++ int pfState; /* number of 3d windows (0,1,2ormore) */ ++ int pfCurrentPage; /* which buffer is being displayed? */ ++ int crtc2_base; /* CRTC2 frame offset */ ++ int tiling_enabled; /* set by drm, read by 2d + 3d clients */ ++} drm_radeon_sarea_t; ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (xf86drmRadeon.h) ++ * ++ * KW: actually it's illegal to change any of this (backwards compatibility). ++ */ ++ ++/* Radeon specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_RADEON_CP_INIT 0x00 ++#define DRM_RADEON_CP_START 0x01 ++#define DRM_RADEON_CP_STOP 0x02 ++#define DRM_RADEON_CP_RESET 0x03 ++#define DRM_RADEON_CP_IDLE 0x04 ++#define DRM_RADEON_RESET 0x05 ++#define DRM_RADEON_FULLSCREEN 0x06 ++#define DRM_RADEON_SWAP 0x07 ++#define DRM_RADEON_CLEAR 0x08 ++#define DRM_RADEON_VERTEX 0x09 ++#define DRM_RADEON_INDICES 0x0A ++#define DRM_RADEON_NOT_USED ++#define DRM_RADEON_STIPPLE 0x0C ++#define DRM_RADEON_INDIRECT 0x0D ++#define DRM_RADEON_TEXTURE 0x0E ++#define DRM_RADEON_VERTEX2 0x0F ++#define DRM_RADEON_CMDBUF 0x10 ++#define DRM_RADEON_GETPARAM 0x11 ++#define DRM_RADEON_FLIP 0x12 ++#define DRM_RADEON_ALLOC 0x13 ++#define DRM_RADEON_FREE 0x14 ++#define DRM_RADEON_INIT_HEAP 0x15 ++#define DRM_RADEON_IRQ_EMIT 0x16 ++#define DRM_RADEON_IRQ_WAIT 0x17 ++#define DRM_RADEON_CP_RESUME 0x18 ++#define DRM_RADEON_SETPARAM 0x19 ++#define DRM_RADEON_SURF_ALLOC 0x1a ++#define DRM_RADEON_SURF_FREE 0x1b ++ ++#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) ++#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) ++#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) ++#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) ++#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) ++#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) ++#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) ++#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) ++#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) ++#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) ++#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) ++#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) ++#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) ++#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) ++#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) ++#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) ++#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) ++#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) ++#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) ++#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) ++#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) ++#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) ++#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) ++#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) ++#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) ++#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) ++#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) ++ ++typedef struct drm_radeon_init { ++ enum { ++ RADEON_INIT_CP = 0x01, ++ RADEON_CLEANUP_CP = 0x02, ++ RADEON_INIT_R200_CP = 0x03, ++ RADEON_INIT_R300_CP = 0x04 ++ } func; ++ unsigned long sarea_priv_offset; ++ int is_pci; /* for overriding only */ ++ int cp_mode; ++ int gart_size; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ unsigned long fb_offset DEPRECATED; /* deprecated, driver asks hardware */ ++ unsigned long mmio_offset DEPRECATED; /* deprecated, driver asks hardware */ ++ unsigned long ring_offset; ++ unsigned long ring_rptr_offset; ++ unsigned long buffers_offset; ++ unsigned long gart_textures_offset; ++} drm_radeon_init_t; ++ ++typedef struct drm_radeon_cp_stop { ++ int flush; ++ int idle; ++} drm_radeon_cp_stop_t; ++ ++typedef struct drm_radeon_fullscreen { ++ enum { ++ RADEON_INIT_FULLSCREEN = 0x01, ++ RADEON_CLEANUP_FULLSCREEN = 0x02 ++ } func; ++} drm_radeon_fullscreen_t; ++ ++#define CLEAR_X1 0 ++#define CLEAR_Y1 1 ++#define CLEAR_X2 2 ++#define CLEAR_Y2 3 ++#define CLEAR_DEPTH 4 ++ ++typedef union drm_radeon_clear_rect { ++ float f[5]; ++ unsigned int ui[5]; ++} drm_radeon_clear_rect_t; ++ ++typedef struct drm_radeon_clear { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; /* misnamed field: should be stencil */ ++ drm_radeon_clear_rect_t __user *depth_boxes; ++} drm_radeon_clear_t; ++ ++typedef struct drm_radeon_vertex { ++ int prim; ++ int idx; /* Index of vertex buffer */ ++ int count; /* Number of vertices in buffer */ ++ int discard; /* Client finished with buffer? */ ++} drm_radeon_vertex_t; ++ ++typedef struct drm_radeon_indices { ++ int prim; ++ int idx; ++ int start; ++ int end; ++ int discard; /* Client finished with buffer? */ ++} drm_radeon_indices_t; ++ ++/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices ++ * - allows multiple primitives and state changes in a single ioctl ++ * - supports driver change to emit native primitives ++ */ ++typedef struct drm_radeon_vertex2 { ++ int idx; /* Index of vertex buffer */ ++ int discard; /* Client finished with buffer? */ ++ int nr_states; ++ drm_radeon_state_t __user *state; ++ int nr_prims; ++ drm_radeon_prim_t __user *prim; ++} drm_radeon_vertex2_t; ++ ++/* v1.3 - obsoletes drm_radeon_vertex2 ++ * - allows arbitarily large cliprect list ++ * - allows updating of tcl packet, vector and scalar state ++ * - allows memory-efficient description of state updates ++ * - allows state to be emitted without a primitive ++ * (for clears, ctx switches) ++ * - allows more than one dma buffer to be referenced per ioctl ++ * - supports tcl driver ++ * - may be extended in future versions with new cmd types, packets ++ */ ++typedef struct drm_radeon_cmd_buffer { ++ int bufsz; ++ char __user *buf; ++ int nbox; ++ struct drm_clip_rect __user *boxes; ++} drm_radeon_cmd_buffer_t; ++ ++typedef struct drm_radeon_tex_image { ++ unsigned int x, y; /* Blit coordinates */ ++ unsigned int width, height; ++ const void __user *data; ++} drm_radeon_tex_image_t; ++ ++typedef struct drm_radeon_texture { ++ unsigned int offset; ++ int pitch; ++ int format; ++ int width; /* Texture image coordinates */ ++ int height; ++ drm_radeon_tex_image_t __user *image; ++} drm_radeon_texture_t; ++ ++typedef struct drm_radeon_stipple { ++ unsigned int __user *mask; ++} drm_radeon_stipple_t; ++ ++typedef struct drm_radeon_indirect { ++ int idx; ++ int start; ++ int end; ++ int discard; ++} drm_radeon_indirect_t; ++ ++/* enum for card type parameters */ ++#define RADEON_CARD_PCI 0 ++#define RADEON_CARD_AGP 1 ++#define RADEON_CARD_PCIE 2 ++ ++/* 1.3: An ioctl to get parameters that aren't available to the 3d ++ * client any other way. ++ */ ++#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ ++#define RADEON_PARAM_LAST_FRAME 2 ++#define RADEON_PARAM_LAST_DISPATCH 3 ++#define RADEON_PARAM_LAST_CLEAR 4 ++/* Added with DRM version 1.6. */ ++#define RADEON_PARAM_IRQ_NR 5 ++#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ ++/* Added with DRM version 1.8. */ ++#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ ++#define RADEON_PARAM_STATUS_HANDLE 8 ++#define RADEON_PARAM_SAREA_HANDLE 9 ++#define RADEON_PARAM_GART_TEX_HANDLE 10 ++#define RADEON_PARAM_SCRATCH_OFFSET 11 ++#define RADEON_PARAM_CARD_TYPE 12 ++#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ ++#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ ++#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ ++ ++typedef struct drm_radeon_getparam { ++ int param; ++ void __user *value; ++} drm_radeon_getparam_t; ++ ++/* 1.6: Set up a memory manager for regions of shared memory: ++ */ ++#define RADEON_MEM_REGION_GART 1 ++#define RADEON_MEM_REGION_FB 2 ++ ++typedef struct drm_radeon_mem_alloc { ++ int region; ++ int alignment; ++ int size; ++ int __user *region_offset; /* offset from start of fb or GART */ ++} drm_radeon_mem_alloc_t; ++ ++typedef struct drm_radeon_mem_free { ++ int region; ++ int region_offset; ++} drm_radeon_mem_free_t; ++ ++typedef struct drm_radeon_mem_init_heap { ++ int region; ++ int size; ++ int start; ++} drm_radeon_mem_init_heap_t; ++ ++/* 1.6: Userspace can request & wait on irq's: ++ */ ++typedef struct drm_radeon_irq_emit { ++ int __user *irq_seq; ++} drm_radeon_irq_emit_t; ++ ++typedef struct drm_radeon_irq_wait { ++ int irq_seq; ++} drm_radeon_irq_wait_t; ++ ++/* 1.10: Clients tell the DRM where they think the framebuffer is located in ++ * the card's address space, via a new generic ioctl to set parameters ++ */ ++ ++typedef struct drm_radeon_setparam { ++ unsigned int param; ++ int64_t value; ++} drm_radeon_setparam_t; ++ ++#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ ++#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ ++#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ ++ ++#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ ++#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ ++#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ ++/* 1.14: Clients can allocate/free a surface ++ */ ++typedef struct drm_radeon_surface_alloc { ++ unsigned int address; ++ unsigned int size; ++ unsigned int flags; ++} drm_radeon_surface_alloc_t; ++ ++typedef struct drm_radeon_surface_free { ++ unsigned int address; ++} drm_radeon_surface_free_t; ++ ++#define DRM_RADEON_VBLANK_CRTC1 1 ++#define DRM_RADEON_VBLANK_CRTC2 2 ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,157 @@ ++/** ++ * \file radeon_drv.c ++ * ATI Radeon driver ++ * ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#include "drm_pciids.h" ++ ++int radeon_no_wb; ++ ++MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); ++module_param_named(no_wb, radeon_no_wb, int, 0444); ++ ++static int dri_library_name(struct drm_device * dev, char * buf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int family = dev_priv->flags & RADEON_FAMILY_MASK; ++ ++ return snprintf(buf, PAGE_SIZE, "%s\n", ++ (family < CHIP_R200) ? "radeon" : ++ ((family < CHIP_R300) ? "r200" : ++ "r300")); ++} ++ ++static int radeon_suspend(struct drm_device *dev, pm_message_t state) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ /* Disable *all* interrupts */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, 0); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++ return 0; ++} ++ ++static int radeon_resume(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ /* Restore interrupt registers */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); ++ return 0; ++} ++ ++static struct pci_device_id pciidlist[] = { ++ radeon_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | ++ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, ++ .dev_priv_size = sizeof(drm_radeon_buf_priv_t), ++ .load = radeon_driver_load, ++ .firstopen = radeon_driver_firstopen, ++ .open = radeon_driver_open, ++ .preclose = radeon_driver_preclose, ++ .postclose = radeon_driver_postclose, ++ .lastclose = radeon_driver_lastclose, ++ .unload = radeon_driver_unload, ++ .suspend = radeon_suspend, ++ .resume = radeon_resume, ++ .get_vblank_counter = radeon_get_vblank_counter, ++ .enable_vblank = radeon_enable_vblank, ++ .disable_vblank = radeon_disable_vblank, ++ .dri_library_name = dri_library_name, ++ .irq_preinstall = radeon_driver_irq_preinstall, ++ .irq_postinstall = radeon_driver_irq_postinstall, ++ .irq_uninstall = radeon_driver_irq_uninstall, ++ .irq_handler = radeon_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = radeon_ioctls, ++ .dma_ioctl = radeon_cp_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = radeon_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init radeon_init(void) ++{ ++ driver.num_ioctls = radeon_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit radeon_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(radeon_init); ++module_exit(radeon_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1443 @@ ++/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*- ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Kevin E. Martin ++ * Gareth Hughes ++ */ ++ ++#ifndef __RADEON_DRV_H__ ++#define __RADEON_DRV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." ++ ++#define DRIVER_NAME "radeon" ++#define DRIVER_DESC "ATI Radeon" ++#define DRIVER_DATE "20080613" ++ ++/* Interface history: ++ * ++ * 1.1 - ?? ++ * 1.2 - Add vertex2 ioctl (keith) ++ * - Add stencil capability to clear ioctl (gareth, keith) ++ * - Increase MAX_TEXTURE_LEVELS (brian) ++ * 1.3 - Add cmdbuf ioctl (keith) ++ * - Add support for new radeon packets (keith) ++ * - Add getparam ioctl (keith) ++ * - Add flip-buffers ioctl, deprecate fullscreen foo (keith). ++ * 1.4 - Add scratch registers to get_param ioctl. ++ * 1.5 - Add r200 packets to cmdbuf ioctl ++ * - Add r200 function to init ioctl ++ * - Add 'scalar2' instruction to cmdbuf ++ * 1.6 - Add static GART memory manager ++ * Add irq handler (won't be turned on unless X server knows to) ++ * Add irq ioctls and irq_active getparam. ++ * Add wait command for cmdbuf ioctl ++ * Add GART offset query for getparam ++ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5] ++ * and R200_PP_CUBIC_OFFSET_F1_[0..5]. ++ * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and ++ * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) ++ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith) ++ * Add 'GET' queries for starting additional clients on different VT's. ++ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl. ++ * Add texture rectangle support for r100. ++ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which ++ * clients use to tell the DRM where they think the framebuffer is ++ * located in the card's address space ++ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color ++ * and GL_EXT_blend_[func|equation]_separate on r200 ++ * 1.12- Add R300 CP microcode support - this just loads the CP on r300 ++ * (No 3D support yet - just microcode loading). ++ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters ++ * - Add hyperz support, add hyperz flags to clear ioctl. ++ * 1.14- Add support for color tiling ++ * - Add R100/R200 surface allocation/free support ++ * 1.15- Add support for texture micro tiling ++ * - Add support for r100 cube maps ++ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear ++ * texture filtering on r200 ++ * 1.17- Add initial support for R300 (3D). ++ * 1.18- Add support for GL_ATI_fragment_shader, new packets ++ * R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces ++ * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR ++ * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) ++ * 1.19- Add support for gart table in FB memory and PCIE r300 ++ * 1.20- Add support for r300 texrect ++ * 1.21- Add support for card type getparam ++ * 1.22- Add support for texture cache flushes (R300_TX_CNTL) ++ * 1.23- Add new radeon memory map work from benh ++ * 1.24- Add general-purpose packet for manipulating scratch registers (r300) ++ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL, ++ * new packet type) ++ * 1.26- Add support for variable size PCI(E) gart aperture ++ * 1.27- Add support for IGP GART ++ * 1.28- Add support for VBL on CRTC2 ++ * 1.29- R500 3D cmd buffer support ++ */ ++ ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 29 ++#define DRIVER_PATCHLEVEL 0 ++ ++/* ++ * Radeon chip families ++ */ ++enum radeon_family { ++ CHIP_R100, ++ CHIP_RV100, ++ CHIP_RS100, ++ CHIP_RV200, ++ CHIP_RS200, ++ CHIP_R200, ++ CHIP_RV250, ++ CHIP_RS300, ++ CHIP_RV280, ++ CHIP_R300, ++ CHIP_R350, ++ CHIP_RV350, ++ CHIP_RV380, ++ CHIP_R420, ++ CHIP_RV410, ++ CHIP_RS400, ++ CHIP_RS480, ++ CHIP_RS690, ++ CHIP_RV515, ++ CHIP_R520, ++ CHIP_RV530, ++ CHIP_RV560, ++ CHIP_RV570, ++ CHIP_R580, ++ CHIP_LAST, ++}; ++ ++/* ++ * Chip flags ++ */ ++enum radeon_chip_flags { ++ RADEON_FAMILY_MASK = 0x0000ffffUL, ++ RADEON_FLAGS_MASK = 0xffff0000UL, ++ RADEON_IS_MOBILITY = 0x00010000UL, ++ RADEON_IS_IGP = 0x00020000UL, ++ RADEON_SINGLE_CRTC = 0x00040000UL, ++ RADEON_IS_AGP = 0x00080000UL, ++ RADEON_HAS_HIERZ = 0x00100000UL, ++ RADEON_IS_PCIE = 0x00200000UL, ++ RADEON_NEW_MEMMAP = 0x00400000UL, ++ RADEON_IS_PCI = 0x00800000UL, ++ RADEON_IS_IGPGART = 0x01000000UL, ++}; ++ ++#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \ ++ DRM_READ32( (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR)) ++#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) ) ++ ++typedef struct drm_radeon_freelist { ++ unsigned int age; ++ struct drm_buf *buf; ++ struct drm_radeon_freelist *next; ++ struct drm_radeon_freelist *prev; ++} drm_radeon_freelist_t; ++ ++typedef struct drm_radeon_ring_buffer { ++ u32 *start; ++ u32 *end; ++ int size; /* Double Words */ ++ int size_l2qw; /* log2 Quad Words */ ++ ++ int rptr_update; /* Double Words */ ++ int rptr_update_l2qw; /* log2 Quad Words */ ++ ++ int fetch_size; /* Double Words */ ++ int fetch_size_l2ow; /* log2 Oct Words */ ++ ++ u32 tail; ++ u32 tail_mask; ++ int space; ++ ++ int high_mark; ++} drm_radeon_ring_buffer_t; ++ ++typedef struct drm_radeon_depth_clear_t { ++ u32 rb3d_cntl; ++ u32 rb3d_zstencilcntl; ++ u32 se_cntl; ++} drm_radeon_depth_clear_t; ++ ++struct drm_radeon_driver_file_fields { ++ int64_t radeon_fb_delta; ++}; ++ ++struct mem_block { ++ struct mem_block *next; ++ struct mem_block *prev; ++ int start; ++ int size; ++ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ ++}; ++ ++struct radeon_surface { ++ int refcount; ++ u32 lower; ++ u32 upper; ++ u32 flags; ++}; ++ ++struct radeon_virt_surface { ++ int surface_index; ++ u32 lower; ++ u32 upper; ++ u32 flags; ++ struct drm_file *file_priv; ++}; ++ ++#define RADEON_FLUSH_EMITED (1 < 0) ++#define RADEON_PURGE_EMITED (1 < 1) ++ ++typedef struct drm_radeon_private { ++ ++ drm_radeon_ring_buffer_t ring; ++ drm_radeon_sarea_t *sarea_priv; ++ ++ u32 fb_location; ++ u32 fb_size; ++ int new_memmap; ++ ++ int gart_size; ++ u32 gart_vm_start; ++ unsigned long gart_buffers_offset; ++ ++ int cp_mode; ++ int cp_running; ++ ++ drm_radeon_freelist_t *head; ++ drm_radeon_freelist_t *tail; ++ int last_buf; ++ volatile u32 *scratch; ++ int writeback_works; ++ ++ int usec_timeout; ++ ++ struct { ++ u32 boxes; ++ int freelist_timeouts; ++ int freelist_loops; ++ int requested_bufs; ++ int last_frame_reads; ++ int last_clear_reads; ++ int clears; ++ int texture_uploads; ++ } stats; ++ ++ int do_boxes; ++ int page_flipping; ++ ++ u32 color_fmt; ++ unsigned int front_offset; ++ unsigned int front_pitch; ++ unsigned int back_offset; ++ unsigned int back_pitch; ++ ++ u32 depth_fmt; ++ unsigned int depth_offset; ++ unsigned int depth_pitch; ++ ++ u32 front_pitch_offset; ++ u32 back_pitch_offset; ++ u32 depth_pitch_offset; ++ ++ drm_radeon_depth_clear_t depth_clear; ++ ++ unsigned long ring_offset; ++ unsigned long ring_rptr_offset; ++ unsigned long buffers_offset; ++ unsigned long gart_textures_offset; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *cp_ring; ++ drm_local_map_t *ring_rptr; ++ drm_local_map_t *gart_textures; ++ ++ struct mem_block *gart_heap; ++ struct mem_block *fb_heap; ++ ++ /* SW interrupt */ ++ wait_queue_head_t swi_queue; ++ atomic_t swi_emitted; ++ int vblank_crtc; ++ uint32_t irq_enable_reg; ++ int irq_enabled; ++ uint32_t r500_disp_irq_reg; ++ ++ struct radeon_surface surfaces[RADEON_MAX_SURFACES]; ++ struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES]; ++ ++ unsigned long pcigart_offset; ++ unsigned int pcigart_offset_set; ++ struct drm_ati_pcigart_info gart_info; ++ ++ u32 scratch_ages[5]; ++ ++ unsigned int crtc_last_cnt; ++ unsigned int crtc2_last_cnt; ++ ++ /* starting from here on, data is preserved accross an open */ ++ uint32_t flags; /* see radeon_chip_flags */ ++ unsigned long fb_aper_offset; ++ ++ int num_gb_pipes; ++ int track_flush; ++ uint32_t chip_family; /* extract from flags */ ++} drm_radeon_private_t; ++ ++typedef struct drm_radeon_buf_priv { ++ u32 age; ++} drm_radeon_buf_priv_t; ++ ++typedef struct drm_radeon_kcmd_buffer { ++ int bufsz; ++ char *buf; ++ int nbox; ++ struct drm_clip_rect __user *boxes; ++} drm_radeon_kcmd_buffer_t; ++ ++extern int radeon_no_wb; ++extern struct drm_ioctl_desc radeon_ioctls[]; ++extern int radeon_max_ioctl; ++ ++/* Check whether the given hardware address is inside the framebuffer or the ++ * GART area. ++ */ ++static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv, ++ u64 off) ++{ ++ u32 fb_start = dev_priv->fb_location; ++ u32 fb_end = fb_start + dev_priv->fb_size - 1; ++ u32 gart_start = dev_priv->gart_vm_start; ++ u32 gart_end = gart_start + dev_priv->gart_size - 1; ++ ++ return ((off >= fb_start && off <= fb_end) || ++ (off >= gart_start && off <= gart_end)); ++} ++ ++ /* radeon_cp.c */ ++extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv); ++ ++extern void radeon_freelist_reset(struct drm_device * dev); ++extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); ++ ++extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); ++ ++extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv); ++ ++extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern void radeon_mem_takedown(struct mem_block **heap); ++extern void radeon_mem_release(struct drm_file *file_priv, ++ struct mem_block *heap); ++ ++ /* radeon_irq.c */ ++extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state); ++extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); ++ ++extern void radeon_do_release(struct drm_device * dev); ++extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int radeon_enable_vblank(struct drm_device *dev, int crtc); ++extern void radeon_disable_vblank(struct drm_device *dev, int crtc); ++extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); ++extern void radeon_driver_irq_preinstall(struct drm_device * dev); ++extern int radeon_driver_irq_postinstall(struct drm_device * dev); ++extern void radeon_driver_irq_uninstall(struct drm_device * dev); ++extern int radeon_vblank_crtc_get(struct drm_device *dev); ++extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); ++ ++extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); ++extern int radeon_driver_unload(struct drm_device *dev); ++extern int radeon_driver_firstopen(struct drm_device *dev); ++extern void radeon_driver_preclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern void radeon_driver_postclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern void radeon_driver_lastclose(struct drm_device * dev); ++extern int radeon_driver_open(struct drm_device * dev, ++ struct drm_file * file_priv); ++extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++/* r300_cmdbuf.c */ ++extern void r300_init_reg_flags(struct drm_device *dev); ++ ++extern int r300_do_cp_cmdbuf(struct drm_device *dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf); ++ ++/* Flags for stats.boxes ++ */ ++#define RADEON_BOX_DMA_IDLE 0x1 ++#define RADEON_BOX_RING_FULL 0x2 ++#define RADEON_BOX_FLIP 0x4 ++#define RADEON_BOX_WAIT_IDLE 0x8 ++#define RADEON_BOX_TEXTURE_LOAD 0x10 ++ ++/* Register definitions, register access macros and drmAddMap constants ++ * for Radeon kernel driver. ++ */ ++#define RADEON_AGP_COMMAND 0x0f60 ++#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ ++# define RADEON_AGP_ENABLE (1<<8) ++#define RADEON_AUX_SCISSOR_CNTL 0x26f0 ++# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) ++# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) ++# define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26) ++# define RADEON_SCISSOR_0_ENABLE (1 << 28) ++# define RADEON_SCISSOR_1_ENABLE (1 << 29) ++# define RADEON_SCISSOR_2_ENABLE (1 << 30) ++ ++#define RADEON_BUS_CNTL 0x0030 ++# define RADEON_BUS_MASTER_DIS (1 << 6) ++ ++#define RADEON_CLOCK_CNTL_DATA 0x000c ++# define RADEON_PLL_WR_EN (1 << 7) ++#define RADEON_CLOCK_CNTL_INDEX 0x0008 ++#define RADEON_CONFIG_APER_SIZE 0x0108 ++#define RADEON_CONFIG_MEMSIZE 0x00f8 ++#define RADEON_CRTC_OFFSET 0x0224 ++#define RADEON_CRTC_OFFSET_CNTL 0x0228 ++# define RADEON_CRTC_TILE_EN (1 << 15) ++# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) ++#define RADEON_CRTC2_OFFSET 0x0324 ++#define RADEON_CRTC2_OFFSET_CNTL 0x0328 ++ ++#define RADEON_PCIE_INDEX 0x0030 ++#define RADEON_PCIE_DATA 0x0034 ++#define RADEON_PCIE_TX_GART_CNTL 0x10 ++# define RADEON_PCIE_TX_GART_EN (1 << 0) ++# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1) ++# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1) ++# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1) ++# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3) ++# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3) ++# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5) ++# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8) ++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11 ++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12 ++#define RADEON_PCIE_TX_GART_BASE 0x13 ++#define RADEON_PCIE_TX_GART_START_LO 0x14 ++#define RADEON_PCIE_TX_GART_START_HI 0x15 ++#define RADEON_PCIE_TX_GART_END_LO 0x16 ++#define RADEON_PCIE_TX_GART_END_HI 0x17 ++ ++#define RS480_NB_MC_INDEX 0x168 ++# define RS480_NB_MC_IND_WR_EN (1 << 8) ++#define RS480_NB_MC_DATA 0x16c ++ ++#define RS690_MC_INDEX 0x78 ++# define RS690_MC_INDEX_MASK 0x1ff ++# define RS690_MC_INDEX_WR_EN (1 << 9) ++# define RS690_MC_INDEX_WR_ACK 0x7f ++#define RS690_MC_DATA 0x7c ++ ++/* MC indirect registers */ ++#define RS480_MC_MISC_CNTL 0x18 ++# define RS480_DISABLE_GTW (1 << 1) ++/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */ ++# define RS480_GART_INDEX_REG_EN (1 << 12) ++# define RS690_BLOCK_GFX_D3_EN (1 << 14) ++#define RS480_K8_FB_LOCATION 0x1e ++#define RS480_GART_FEATURE_ID 0x2b ++# define RS480_HANG_EN (1 << 11) ++# define RS480_TLB_ENABLE (1 << 18) ++# define RS480_P2P_ENABLE (1 << 19) ++# define RS480_GTW_LAC_EN (1 << 25) ++# define RS480_2LEVEL_GART (0 << 30) ++# define RS480_1LEVEL_GART (1 << 30) ++# define RS480_PDC_EN (1 << 31) ++#define RS480_GART_BASE 0x2c ++#define RS480_GART_CACHE_CNTRL 0x2e ++# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */ ++#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38 ++# define RS480_GART_EN (1 << 0) ++# define RS480_VA_SIZE_32MB (0 << 1) ++# define RS480_VA_SIZE_64MB (1 << 1) ++# define RS480_VA_SIZE_128MB (2 << 1) ++# define RS480_VA_SIZE_256MB (3 << 1) ++# define RS480_VA_SIZE_512MB (4 << 1) ++# define RS480_VA_SIZE_1GB (5 << 1) ++# define RS480_VA_SIZE_2GB (6 << 1) ++#define RS480_AGP_MODE_CNTL 0x39 ++# define RS480_POST_GART_Q_SIZE (1 << 18) ++# define RS480_NONGART_SNOOP (1 << 19) ++# define RS480_AGP_RD_BUF_SIZE (1 << 20) ++# define RS480_REQ_TYPE_SNOOP_SHIFT 22 ++# define RS480_REQ_TYPE_SNOOP_MASK 0x3 ++# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24) ++#define RS480_MC_MISC_UMA_CNTL 0x5f ++#define RS480_MC_MCLK_CNTL 0x7a ++#define RS480_MC_UMA_DUALCH_CNTL 0x86 ++ ++#define RS690_MC_FB_LOCATION 0x100 ++#define RS690_MC_AGP_LOCATION 0x101 ++#define RS690_MC_AGP_BASE 0x102 ++#define RS690_MC_AGP_BASE_2 0x103 ++ ++#define R520_MC_IND_INDEX 0x70 ++#define R520_MC_IND_WR_EN (1 << 24) ++#define R520_MC_IND_DATA 0x74 ++ ++#define RV515_MC_FB_LOCATION 0x01 ++#define RV515_MC_AGP_LOCATION 0x02 ++#define RV515_MC_AGP_BASE 0x03 ++#define RV515_MC_AGP_BASE_2 0x04 ++ ++#define R520_MC_FB_LOCATION 0x04 ++#define R520_MC_AGP_LOCATION 0x05 ++#define R520_MC_AGP_BASE 0x06 ++#define R520_MC_AGP_BASE_2 0x07 ++ ++#define RADEON_MPP_TB_CONFIG 0x01c0 ++#define RADEON_MEM_CNTL 0x0140 ++#define RADEON_MEM_SDRAM_MODE_REG 0x0158 ++#define RADEON_AGP_BASE_2 0x015c /* r200+ only */ ++#define RS480_AGP_BASE_2 0x0164 ++#define RADEON_AGP_BASE 0x0170 ++ ++/* pipe config regs */ ++#define R400_GB_PIPE_SELECT 0x402c ++#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ ++#define R500_SU_REG_DEST 0x42c8 ++#define R300_GB_TILE_CONFIG 0x4018 ++# define R300_ENABLE_TILING (1 << 0) ++# define R300_PIPE_COUNT_RV350 (0 << 1) ++# define R300_PIPE_COUNT_R300 (3 << 1) ++# define R300_PIPE_COUNT_R420_3P (6 << 1) ++# define R300_PIPE_COUNT_R420 (7 << 1) ++# define R300_TILE_SIZE_8 (0 << 4) ++# define R300_TILE_SIZE_16 (1 << 4) ++# define R300_TILE_SIZE_32 (2 << 4) ++# define R300_SUBPIXEL_1_12 (0 << 16) ++# define R300_SUBPIXEL_1_16 (1 << 16) ++#define R300_DST_PIPE_CONFIG 0x170c ++# define R300_PIPE_AUTO_CONFIG (1 << 31) ++#define R300_RB2D_DSTCACHE_MODE 0x3428 ++# define R300_DC_AUTOFLUSH_ENABLE (1 << 8) ++# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17) ++ ++#define RADEON_RB3D_COLOROFFSET 0x1c40 ++#define RADEON_RB3D_COLORPITCH 0x1c48 ++ ++#define RADEON_SRC_X_Y 0x1590 ++ ++#define RADEON_DP_GUI_MASTER_CNTL 0x146c ++# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) ++# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) ++# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4) ++# define RADEON_GMC_BRUSH_NONE (15 << 4) ++# define RADEON_GMC_DST_16BPP (4 << 8) ++# define RADEON_GMC_DST_24BPP (5 << 8) ++# define RADEON_GMC_DST_32BPP (6 << 8) ++# define RADEON_GMC_DST_DATATYPE_SHIFT 8 ++# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12) ++# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24) ++# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24) ++# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28) ++# define RADEON_GMC_WR_MSK_DIS (1 << 30) ++# define RADEON_ROP3_S 0x00cc0000 ++# define RADEON_ROP3_P 0x00f00000 ++#define RADEON_DP_WRITE_MASK 0x16cc ++#define RADEON_SRC_PITCH_OFFSET 0x1428 ++#define RADEON_DST_PITCH_OFFSET 0x142c ++#define RADEON_DST_PITCH_OFFSET_C 0x1c80 ++# define RADEON_DST_TILE_LINEAR (0 << 30) ++# define RADEON_DST_TILE_MACRO (1 << 30) ++# define RADEON_DST_TILE_MICRO (2 << 30) ++# define RADEON_DST_TILE_BOTH (3 << 30) ++ ++#define RADEON_SCRATCH_REG0 0x15e0 ++#define RADEON_SCRATCH_REG1 0x15e4 ++#define RADEON_SCRATCH_REG2 0x15e8 ++#define RADEON_SCRATCH_REG3 0x15ec ++#define RADEON_SCRATCH_REG4 0x15f0 ++#define RADEON_SCRATCH_REG5 0x15f4 ++#define RADEON_SCRATCH_UMSK 0x0770 ++#define RADEON_SCRATCH_ADDR 0x0774 ++ ++#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x)) ++ ++#define GET_SCRATCH( x ) (dev_priv->writeback_works \ ++ ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \ ++ : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) ++ ++#define RADEON_CRTC_CRNT_FRAME 0x0214 ++#define RADEON_CRTC2_CRNT_FRAME 0x0314 ++ ++#define RADEON_CRTC_STATUS 0x005c ++#define RADEON_CRTC2_STATUS 0x03fc ++ ++#define RADEON_GEN_INT_CNTL 0x0040 ++# define RADEON_CRTC_VBLANK_MASK (1 << 0) ++# define RADEON_CRTC2_VBLANK_MASK (1 << 9) ++# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) ++# define RADEON_SW_INT_ENABLE (1 << 25) ++ ++#define RADEON_GEN_INT_STATUS 0x0044 ++# define RADEON_CRTC_VBLANK_STAT (1 << 0) ++# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) ++# define RADEON_CRTC2_VBLANK_STAT (1 << 9) ++# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) ++# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) ++# define RADEON_SW_INT_TEST (1 << 25) ++# define RADEON_SW_INT_TEST_ACK (1 << 25) ++# define RADEON_SW_INT_FIRE (1 << 26) ++# define R500_DISPLAY_INT_STATUS (1 << 0) ++ ++ ++#define RADEON_HOST_PATH_CNTL 0x0130 ++# define RADEON_HDP_SOFT_RESET (1 << 26) ++# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) ++# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28) ++ ++#define RADEON_ISYNC_CNTL 0x1724 ++# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0) ++# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1) ++# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2) ++# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3) ++# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) ++# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) ++ ++#define RADEON_RBBM_GUICNTL 0x172c ++# define RADEON_HOST_DATA_SWAP_NONE (0 << 0) ++# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) ++# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) ++# define RADEON_HOST_DATA_SWAP_HDW (3 << 0) ++ ++#define RADEON_MC_AGP_LOCATION 0x014c ++#define RADEON_MC_FB_LOCATION 0x0148 ++#define RADEON_MCLK_CNTL 0x0012 ++# define RADEON_FORCEON_MCLKA (1 << 16) ++# define RADEON_FORCEON_MCLKB (1 << 17) ++# define RADEON_FORCEON_YCLKA (1 << 18) ++# define RADEON_FORCEON_YCLKB (1 << 19) ++# define RADEON_FORCEON_MC (1 << 20) ++# define RADEON_FORCEON_AIC (1 << 21) ++ ++#define RADEON_PP_BORDER_COLOR_0 0x1d40 ++#define RADEON_PP_BORDER_COLOR_1 0x1d44 ++#define RADEON_PP_BORDER_COLOR_2 0x1d48 ++#define RADEON_PP_CNTL 0x1c38 ++# define RADEON_SCISSOR_ENABLE (1 << 1) ++#define RADEON_PP_LUM_MATRIX 0x1d00 ++#define RADEON_PP_MISC 0x1c14 ++#define RADEON_PP_ROT_MATRIX_0 0x1d58 ++#define RADEON_PP_TXFILTER_0 0x1c54 ++#define RADEON_PP_TXOFFSET_0 0x1c5c ++#define RADEON_PP_TXFILTER_1 0x1c6c ++#define RADEON_PP_TXFILTER_2 0x1c84 ++ ++#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */ ++#define R300_DSTCACHE_CTLSTAT 0x1714 ++# define R300_RB2D_DC_FLUSH (3 << 0) ++# define R300_RB2D_DC_FREE (3 << 2) ++# define R300_RB2D_DC_FLUSH_ALL 0xf ++# define R300_RB2D_DC_BUSY (1 << 31) ++#define RADEON_RB3D_CNTL 0x1c3c ++# define RADEON_ALPHA_BLEND_ENABLE (1 << 0) ++# define RADEON_PLANE_MASK_ENABLE (1 << 1) ++# define RADEON_DITHER_ENABLE (1 << 2) ++# define RADEON_ROUND_ENABLE (1 << 3) ++# define RADEON_SCALE_DITHER_ENABLE (1 << 4) ++# define RADEON_DITHER_INIT (1 << 5) ++# define RADEON_ROP_ENABLE (1 << 6) ++# define RADEON_STENCIL_ENABLE (1 << 7) ++# define RADEON_Z_ENABLE (1 << 8) ++# define RADEON_ZBLOCK16 (1 << 15) ++#define RADEON_RB3D_DEPTHOFFSET 0x1c24 ++#define RADEON_RB3D_DEPTHCLEARVALUE 0x3230 ++#define RADEON_RB3D_DEPTHPITCH 0x1c28 ++#define RADEON_RB3D_PLANEMASK 0x1d84 ++#define RADEON_RB3D_STENCILREFMASK 0x1d7c ++#define RADEON_RB3D_ZCACHE_MODE 0x3250 ++#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254 ++# define RADEON_RB3D_ZC_FLUSH (1 << 0) ++# define RADEON_RB3D_ZC_FREE (1 << 2) ++# define RADEON_RB3D_ZC_FLUSH_ALL 0x5 ++# define RADEON_RB3D_ZC_BUSY (1 << 31) ++#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 ++# define R300_ZC_FLUSH (1 << 0) ++# define R300_ZC_FREE (1 << 1) ++# define R300_ZC_BUSY (1 << 31) ++#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c ++# define RADEON_RB3D_DC_FLUSH (3 << 0) ++# define RADEON_RB3D_DC_FREE (3 << 2) ++# define RADEON_RB3D_DC_FLUSH_ALL 0xf ++# define RADEON_RB3D_DC_BUSY (1 << 31) ++#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c ++# define R300_RB3D_DC_FLUSH (2 << 0) ++# define R300_RB3D_DC_FREE (2 << 2) ++# define R300_RB3D_DC_FINISH (1 << 4) ++#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c ++# define RADEON_Z_TEST_MASK (7 << 4) ++# define RADEON_Z_TEST_ALWAYS (7 << 4) ++# define RADEON_Z_HIERARCHY_ENABLE (1 << 8) ++# define RADEON_STENCIL_TEST_ALWAYS (7 << 12) ++# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) ++# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) ++# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) ++# define RADEON_Z_COMPRESSION_ENABLE (1 << 28) ++# define RADEON_FORCE_Z_DIRTY (1 << 29) ++# define RADEON_Z_WRITE_ENABLE (1 << 30) ++# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) ++#define RADEON_RBBM_SOFT_RESET 0x00f0 ++# define RADEON_SOFT_RESET_CP (1 << 0) ++# define RADEON_SOFT_RESET_HI (1 << 1) ++# define RADEON_SOFT_RESET_SE (1 << 2) ++# define RADEON_SOFT_RESET_RE (1 << 3) ++# define RADEON_SOFT_RESET_PP (1 << 4) ++# define RADEON_SOFT_RESET_E2 (1 << 5) ++# define RADEON_SOFT_RESET_RB (1 << 6) ++# define RADEON_SOFT_RESET_HDP (1 << 7) ++/* ++ * 6:0 Available slots in the FIFO ++ * 8 Host Interface active ++ * 9 CP request active ++ * 10 FIFO request active ++ * 11 Host Interface retry active ++ * 12 CP retry active ++ * 13 FIFO retry active ++ * 14 FIFO pipeline busy ++ * 15 Event engine busy ++ * 16 CP command stream busy ++ * 17 2D engine busy ++ * 18 2D portion of render backend busy ++ * 20 3D setup engine busy ++ * 26 GA engine busy ++ * 27 CBA 2D engine busy ++ * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or ++ * command stream queue not empty or Ring Buffer not empty ++ */ ++#define RADEON_RBBM_STATUS 0x0e40 ++/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */ ++/* #define RADEON_RBBM_STATUS 0x1740 */ ++/* bits 6:0 are dword slots available in the cmd fifo */ ++# define RADEON_RBBM_FIFOCNT_MASK 0x007f ++# define RADEON_HIRQ_ON_RBB (1 << 8) ++# define RADEON_CPRQ_ON_RBB (1 << 9) ++# define RADEON_CFRQ_ON_RBB (1 << 10) ++# define RADEON_HIRQ_IN_RTBUF (1 << 11) ++# define RADEON_CPRQ_IN_RTBUF (1 << 12) ++# define RADEON_CFRQ_IN_RTBUF (1 << 13) ++# define RADEON_PIPE_BUSY (1 << 14) ++# define RADEON_ENG_EV_BUSY (1 << 15) ++# define RADEON_CP_CMDSTRM_BUSY (1 << 16) ++# define RADEON_E2_BUSY (1 << 17) ++# define RADEON_RB2D_BUSY (1 << 18) ++# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */ ++# define RADEON_VAP_BUSY (1 << 20) ++# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */ ++# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */ ++# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */ ++# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */ ++# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */ ++# define RADEON_GA_BUSY (1 << 26) ++# define RADEON_CBA2D_BUSY (1 << 27) ++# define RADEON_RBBM_ACTIVE (1 << 31) ++#define RADEON_RE_LINE_PATTERN 0x1cd0 ++#define RADEON_RE_MISC 0x26c4 ++#define RADEON_RE_TOP_LEFT 0x26c0 ++#define RADEON_RE_WIDTH_HEIGHT 0x1c44 ++#define RADEON_RE_STIPPLE_ADDR 0x1cc8 ++#define RADEON_RE_STIPPLE_DATA 0x1ccc ++ ++#define RADEON_SCISSOR_TL_0 0x1cd8 ++#define RADEON_SCISSOR_BR_0 0x1cdc ++#define RADEON_SCISSOR_TL_1 0x1ce0 ++#define RADEON_SCISSOR_BR_1 0x1ce4 ++#define RADEON_SCISSOR_TL_2 0x1ce8 ++#define RADEON_SCISSOR_BR_2 0x1cec ++#define RADEON_SE_COORD_FMT 0x1c50 ++#define RADEON_SE_CNTL 0x1c4c ++# define RADEON_FFACE_CULL_CW (0 << 0) ++# define RADEON_BFACE_SOLID (3 << 1) ++# define RADEON_FFACE_SOLID (3 << 3) ++# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6) ++# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8) ++# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8) ++# define RADEON_ALPHA_SHADE_FLAT (1 << 10) ++# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10) ++# define RADEON_SPECULAR_SHADE_FLAT (1 << 12) ++# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12) ++# define RADEON_FOG_SHADE_FLAT (1 << 14) ++# define RADEON_FOG_SHADE_GOURAUD (2 << 14) ++# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24) ++# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25) ++# define RADEON_VTX_PIX_CENTER_OGL (1 << 27) ++# define RADEON_ROUND_MODE_TRUNC (0 << 28) ++# define RADEON_ROUND_PREC_8TH_PIX (1 << 30) ++#define RADEON_SE_CNTL_STATUS 0x2140 ++#define RADEON_SE_LINE_WIDTH 0x1db8 ++#define RADEON_SE_VPORT_XSCALE 0x1d98 ++#define RADEON_SE_ZBIAS_FACTOR 0x1db0 ++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 ++#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 ++#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200 ++# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16 ++# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28 ++#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204 ++#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208 ++# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16 ++#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C ++#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 ++#define RADEON_SURFACE_ACCESS_CLR 0x0bfc ++#define RADEON_SURFACE_CNTL 0x0b00 ++# define RADEON_SURF_TRANSLATION_DIS (1 << 8) ++# define RADEON_NONSURF_AP0_SWP_MASK (3 << 20) ++# define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20) ++# define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20) ++# define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20) ++# define RADEON_NONSURF_AP1_SWP_MASK (3 << 22) ++# define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22) ++# define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22) ++# define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22) ++#define RADEON_SURFACE0_INFO 0x0b0c ++# define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0) ++# define RADEON_SURF_TILE_MODE_MASK (3 << 16) ++# define RADEON_SURF_TILE_MODE_MACRO (0 << 16) ++# define RADEON_SURF_TILE_MODE_MICRO (1 << 16) ++# define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16) ++# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16) ++#define RADEON_SURFACE0_LOWER_BOUND 0x0b04 ++#define RADEON_SURFACE0_UPPER_BOUND 0x0b08 ++# define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0) ++#define RADEON_SURFACE1_INFO 0x0b1c ++#define RADEON_SURFACE1_LOWER_BOUND 0x0b14 ++#define RADEON_SURFACE1_UPPER_BOUND 0x0b18 ++#define RADEON_SURFACE2_INFO 0x0b2c ++#define RADEON_SURFACE2_LOWER_BOUND 0x0b24 ++#define RADEON_SURFACE2_UPPER_BOUND 0x0b28 ++#define RADEON_SURFACE3_INFO 0x0b3c ++#define RADEON_SURFACE3_LOWER_BOUND 0x0b34 ++#define RADEON_SURFACE3_UPPER_BOUND 0x0b38 ++#define RADEON_SURFACE4_INFO 0x0b4c ++#define RADEON_SURFACE4_LOWER_BOUND 0x0b44 ++#define RADEON_SURFACE4_UPPER_BOUND 0x0b48 ++#define RADEON_SURFACE5_INFO 0x0b5c ++#define RADEON_SURFACE5_LOWER_BOUND 0x0b54 ++#define RADEON_SURFACE5_UPPER_BOUND 0x0b58 ++#define RADEON_SURFACE6_INFO 0x0b6c ++#define RADEON_SURFACE6_LOWER_BOUND 0x0b64 ++#define RADEON_SURFACE6_UPPER_BOUND 0x0b68 ++#define RADEON_SURFACE7_INFO 0x0b7c ++#define RADEON_SURFACE7_LOWER_BOUND 0x0b74 ++#define RADEON_SURFACE7_UPPER_BOUND 0x0b78 ++#define RADEON_SW_SEMAPHORE 0x013c ++ ++#define RADEON_WAIT_UNTIL 0x1720 ++# define RADEON_WAIT_CRTC_PFLIP (1 << 0) ++# define RADEON_WAIT_2D_IDLE (1 << 14) ++# define RADEON_WAIT_3D_IDLE (1 << 15) ++# define RADEON_WAIT_2D_IDLECLEAN (1 << 16) ++# define RADEON_WAIT_3D_IDLECLEAN (1 << 17) ++# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) ++ ++#define RADEON_RB3D_ZMASKOFFSET 0x3234 ++#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c ++# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) ++# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) ++ ++/* CP registers */ ++#define RADEON_CP_ME_RAM_ADDR 0x07d4 ++#define RADEON_CP_ME_RAM_RADDR 0x07d8 ++#define RADEON_CP_ME_RAM_DATAH 0x07dc ++#define RADEON_CP_ME_RAM_DATAL 0x07e0 ++ ++#define RADEON_CP_RB_BASE 0x0700 ++#define RADEON_CP_RB_CNTL 0x0704 ++# define RADEON_BUF_SWAP_32BIT (2 << 16) ++# define RADEON_RB_NO_UPDATE (1 << 27) ++#define RADEON_CP_RB_RPTR_ADDR 0x070c ++#define RADEON_CP_RB_RPTR 0x0710 ++#define RADEON_CP_RB_WPTR 0x0714 ++ ++#define RADEON_CP_RB_WPTR_DELAY 0x0718 ++# define RADEON_PRE_WRITE_TIMER_SHIFT 0 ++# define RADEON_PRE_WRITE_LIMIT_SHIFT 23 ++ ++#define RADEON_CP_IB_BASE 0x0738 ++ ++#define RADEON_CP_CSQ_CNTL 0x0740 ++# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0) ++# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28) ++# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28) ++# define RADEON_CSQ_PRIBM_INDDIS (2 << 28) ++# define RADEON_CSQ_PRIPIO_INDBM (3 << 28) ++# define RADEON_CSQ_PRIBM_INDBM (4 << 28) ++# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) ++ ++#define RADEON_AIC_CNTL 0x01d0 ++# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) ++#define RADEON_AIC_STAT 0x01d4 ++#define RADEON_AIC_PT_BASE 0x01d8 ++#define RADEON_AIC_LO_ADDR 0x01dc ++#define RADEON_AIC_HI_ADDR 0x01e0 ++#define RADEON_AIC_TLB_ADDR 0x01e4 ++#define RADEON_AIC_TLB_DATA 0x01e8 ++ ++/* CP command packets */ ++#define RADEON_CP_PACKET0 0x00000000 ++# define RADEON_ONE_REG_WR (1 << 15) ++#define RADEON_CP_PACKET1 0x40000000 ++#define RADEON_CP_PACKET2 0x80000000 ++#define RADEON_CP_PACKET3 0xC0000000 ++# define RADEON_CP_NOP 0x00001000 ++# define RADEON_CP_NEXT_CHAR 0x00001900 ++# define RADEON_CP_PLY_NEXTSCAN 0x00001D00 ++# define RADEON_CP_SET_SCISSORS 0x00001E00 ++ /* GEN_INDX_PRIM is unsupported starting with R300 */ ++# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 ++# define RADEON_WAIT_FOR_IDLE 0x00002600 ++# define RADEON_3D_DRAW_VBUF 0x00002800 ++# define RADEON_3D_DRAW_IMMD 0x00002900 ++# define RADEON_3D_DRAW_INDX 0x00002A00 ++# define RADEON_CP_LOAD_PALETTE 0x00002C00 ++# define RADEON_3D_LOAD_VBPNTR 0x00002F00 ++# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 ++# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 ++# define RADEON_3D_CLEAR_ZMASK 0x00003200 ++# define RADEON_CP_INDX_BUFFER 0x00003300 ++# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400 ++# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500 ++# define RADEON_CP_3D_DRAW_INDX_2 0x00003600 ++# define RADEON_3D_CLEAR_HIZ 0x00003700 ++# define RADEON_CP_3D_CLEAR_CMASK 0x00003802 ++# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 ++# define RADEON_CNTL_PAINT_MULTI 0x00009A00 ++# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 ++# define RADEON_CNTL_SET_SCISSORS 0xC0001E00 ++ ++#define RADEON_CP_PACKET_MASK 0xC0000000 ++#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 ++#define RADEON_CP_PACKET0_REG_MASK 0x000007ff ++#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff ++#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 ++ ++#define RADEON_VTX_Z_PRESENT (1 << 31) ++#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3) ++ ++#define RADEON_PRIM_TYPE_NONE (0 << 0) ++#define RADEON_PRIM_TYPE_POINT (1 << 0) ++#define RADEON_PRIM_TYPE_LINE (2 << 0) ++#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0) ++#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0) ++#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0) ++#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0) ++#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0) ++#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) ++#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) ++#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) ++#define RADEON_PRIM_TYPE_MASK 0xf ++#define RADEON_PRIM_WALK_IND (1 << 4) ++#define RADEON_PRIM_WALK_LIST (2 << 4) ++#define RADEON_PRIM_WALK_RING (3 << 4) ++#define RADEON_COLOR_ORDER_BGRA (0 << 6) ++#define RADEON_COLOR_ORDER_RGBA (1 << 6) ++#define RADEON_MAOS_ENABLE (1 << 7) ++#define RADEON_VTX_FMT_R128_MODE (0 << 8) ++#define RADEON_VTX_FMT_RADEON_MODE (1 << 8) ++#define RADEON_NUM_VERTICES_SHIFT 16 ++ ++#define RADEON_COLOR_FORMAT_CI8 2 ++#define RADEON_COLOR_FORMAT_ARGB1555 3 ++#define RADEON_COLOR_FORMAT_RGB565 4 ++#define RADEON_COLOR_FORMAT_ARGB8888 6 ++#define RADEON_COLOR_FORMAT_RGB332 7 ++#define RADEON_COLOR_FORMAT_RGB8 9 ++#define RADEON_COLOR_FORMAT_ARGB4444 15 ++ ++#define RADEON_TXFORMAT_I8 0 ++#define RADEON_TXFORMAT_AI88 1 ++#define RADEON_TXFORMAT_RGB332 2 ++#define RADEON_TXFORMAT_ARGB1555 3 ++#define RADEON_TXFORMAT_RGB565 4 ++#define RADEON_TXFORMAT_ARGB4444 5 ++#define RADEON_TXFORMAT_ARGB8888 6 ++#define RADEON_TXFORMAT_RGBA8888 7 ++#define RADEON_TXFORMAT_Y8 8 ++#define RADEON_TXFORMAT_VYUY422 10 ++#define RADEON_TXFORMAT_YVYU422 11 ++#define RADEON_TXFORMAT_DXT1 12 ++#define RADEON_TXFORMAT_DXT23 14 ++#define RADEON_TXFORMAT_DXT45 15 ++ ++#define R200_PP_TXCBLEND_0 0x2f00 ++#define R200_PP_TXCBLEND_1 0x2f10 ++#define R200_PP_TXCBLEND_2 0x2f20 ++#define R200_PP_TXCBLEND_3 0x2f30 ++#define R200_PP_TXCBLEND_4 0x2f40 ++#define R200_PP_TXCBLEND_5 0x2f50 ++#define R200_PP_TXCBLEND_6 0x2f60 ++#define R200_PP_TXCBLEND_7 0x2f70 ++#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268 ++#define R200_PP_TFACTOR_0 0x2ee0 ++#define R200_SE_VTX_FMT_0 0x2088 ++#define R200_SE_VAP_CNTL 0x2080 ++#define R200_SE_TCL_MATRIX_SEL_0 0x2230 ++#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8 ++#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0 ++#define R200_PP_TXFILTER_5 0x2ca0 ++#define R200_PP_TXFILTER_4 0x2c80 ++#define R200_PP_TXFILTER_3 0x2c60 ++#define R200_PP_TXFILTER_2 0x2c40 ++#define R200_PP_TXFILTER_1 0x2c20 ++#define R200_PP_TXFILTER_0 0x2c00 ++#define R200_PP_TXOFFSET_5 0x2d78 ++#define R200_PP_TXOFFSET_4 0x2d60 ++#define R200_PP_TXOFFSET_3 0x2d48 ++#define R200_PP_TXOFFSET_2 0x2d30 ++#define R200_PP_TXOFFSET_1 0x2d18 ++#define R200_PP_TXOFFSET_0 0x2d00 ++ ++#define R200_PP_CUBIC_FACES_0 0x2c18 ++#define R200_PP_CUBIC_FACES_1 0x2c38 ++#define R200_PP_CUBIC_FACES_2 0x2c58 ++#define R200_PP_CUBIC_FACES_3 0x2c78 ++#define R200_PP_CUBIC_FACES_4 0x2c98 ++#define R200_PP_CUBIC_FACES_5 0x2cb8 ++#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 ++#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 ++#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c ++#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 ++#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 ++#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c ++#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 ++#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 ++#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 ++#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c ++#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 ++#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 ++#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c ++#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 ++#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 ++#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c ++#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 ++#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 ++#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 ++#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c ++#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 ++#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 ++#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c ++#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 ++#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 ++#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c ++#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 ++#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 ++#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 ++#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c ++ ++#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 ++#define R200_SE_VTE_CNTL 0x20b0 ++#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 ++#define R200_PP_TAM_DEBUG3 0x2d9c ++#define R200_PP_CNTL_X 0x2cc4 ++#define R200_SE_VAP_CNTL_STATUS 0x2140 ++#define R200_RE_SCISSOR_TL_0 0x1cd8 ++#define R200_RE_SCISSOR_TL_1 0x1ce0 ++#define R200_RE_SCISSOR_TL_2 0x1ce8 ++#define R200_RB3D_DEPTHXY_OFFSET 0x1d60 ++#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 ++#define R200_SE_VTX_STATE_CNTL 0x2180 ++#define R200_RE_POINTSIZE 0x2648 ++#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254 ++ ++#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */ ++#define RADEON_PP_TEX_SIZE_1 0x1d0c ++#define RADEON_PP_TEX_SIZE_2 0x1d14 ++ ++#define RADEON_PP_CUBIC_FACES_0 0x1d24 ++#define RADEON_PP_CUBIC_FACES_1 0x1d28 ++#define RADEON_PP_CUBIC_FACES_2 0x1d2c ++#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */ ++#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00 ++#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14 ++ ++#define RADEON_SE_TCL_STATE_FLUSH 0x2284 ++ ++#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001 ++#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000 ++#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012 ++#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100 ++#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200 ++#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001 ++#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002 ++#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b ++#define R200_3D_DRAW_IMMD_2 0xC0003500 ++#define R200_SE_VTX_FMT_1 0x208c ++#define R200_RE_CNTL 0x1c50 ++ ++#define R200_RB3D_BLENDCOLOR 0x3218 ++ ++#define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4 ++ ++#define R200_PP_TRI_PERF 0x2cf8 ++ ++#define R200_PP_AFS_0 0x2f80 ++#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ ++ ++#define R200_VAP_PVS_CNTL_1 0x22D0 ++ ++/* MPEG settings from VHA code */ ++#define RADEON_VHA_SETTO16_1 0x2694 ++#define RADEON_VHA_SETTO16_2 0x2680 ++#define RADEON_VHA_SETTO0_1 0x1840 ++#define RADEON_VHA_FB_OFFSET 0x19e4 ++#define RADEON_VHA_SETTO1AND70S 0x19d8 ++#define RADEON_VHA_DST_PITCH 0x1408 ++ ++// set as reference header ++#define RADEON_VHA_BACKFRAME0_OFF_Y 0x1840 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y 0x1844 ++#define RADEON_VHA_BACKFRAME0_OFF_U 0x1848 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U 0x184c ++#define RADOEN_VHA_BACKFRAME0_OFF_V 0x1850 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V 0x1854 ++#define RADEON_VHA_FORWFRAME0_OFF_Y 0x1858 ++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_Y 0x185c ++#define RADEON_VHA_FORWFRAME0_OFF_U 0x1860 ++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_U 0x1864 ++#define RADEON_VHA_FORWFRAME0_OFF_V 0x1868 ++#define RADEON_VHA_FORWFRAME0_OFF_PITCH_V 0x1880 ++#define RADEON_VHA_BACKFRAME0_OFF_Y_2 0x1884 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y_2 0x1888 ++#define RADEON_VHA_BACKFRAME0_OFF_U_2 0x188c ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U_2 0x1890 ++#define RADEON_VHA_BACKFRAME0_OFF_V_2 0x1894 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2 0x1898 ++ ++#define R500_D1CRTC_STATUS 0x609c ++#define R500_D2CRTC_STATUS 0x689c ++#define R500_CRTC_V_BLANK (1<<0) ++ ++#define R500_D1CRTC_FRAME_COUNT 0x60a4 ++#define R500_D2CRTC_FRAME_COUNT 0x68a4 ++ ++#define R500_D1MODE_V_COUNTER 0x6530 ++#define R500_D2MODE_V_COUNTER 0x6d30 ++ ++#define R500_D1MODE_VBLANK_STATUS 0x6534 ++#define R500_D2MODE_VBLANK_STATUS 0x6d34 ++#define R500_VBLANK_OCCURED (1<<0) ++#define R500_VBLANK_ACK (1<<4) ++#define R500_VBLANK_STAT (1<<12) ++#define R500_VBLANK_INT (1<<16) ++ ++#define R500_DxMODE_INT_MASK 0x6540 ++#define R500_D1MODE_INT_MASK (1<<0) ++#define R500_D2MODE_INT_MASK (1<<8) ++ ++#define R500_DISP_INTERRUPT_STATUS 0x7edc ++#define R500_D1_VBLANK_INTERRUPT (1 << 4) ++#define R500_D2_VBLANK_INTERRUPT (1 << 5) ++ ++/* Constants */ ++#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ ++ ++#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 ++#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 ++#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 ++#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3 ++#define RADEON_LAST_DISPATCH 1 ++ ++#define RADEON_MAX_VB_AGE 0x7fffffff ++#define RADEON_MAX_VB_VERTS (0xffff) ++ ++#define RADEON_RING_HIGH_MARK 128 ++ ++#define RADEON_PCIGART_TABLE_SIZE (32*1024) ++ ++#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) ++#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) ++ ++#define RADEON_WRITE_PLL( addr, val ) \ ++do { \ ++ RADEON_WRITE8( RADEON_CLOCK_CNTL_INDEX, \ ++ ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \ ++ RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \ ++} while (0) ++ ++#define RADEON_WRITE_PCIE( addr, val ) \ ++do { \ ++ RADEON_WRITE8( RADEON_PCIE_INDEX, \ ++ ((addr) & 0xff)); \ ++ RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \ ++} while (0) ++ ++#define R500_WRITE_MCIND( addr, val ) \ ++do { \ ++ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \ ++ RADEON_WRITE(R520_MC_IND_DATA, (val)); \ ++ RADEON_WRITE(R520_MC_IND_INDEX, 0); \ ++} while (0) ++ ++#define RS480_WRITE_MCIND( addr, val ) \ ++do { \ ++ RADEON_WRITE( RS480_NB_MC_INDEX, \ ++ ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \ ++ RADEON_WRITE( RS480_NB_MC_DATA, (val) ); \ ++ RADEON_WRITE( RS480_NB_MC_INDEX, 0xff ); \ ++} while (0) ++ ++#define RS690_WRITE_MCIND( addr, val ) \ ++do { \ ++ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \ ++ RADEON_WRITE(RS690_MC_DATA, val); \ ++ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \ ++} while (0) ++ ++#define IGP_WRITE_MCIND( addr, val ) \ ++do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ ++ RS690_WRITE_MCIND( addr, val ); \ ++ else \ ++ RS480_WRITE_MCIND( addr, val ); \ ++} while (0) ++ ++#define CP_PACKET0( reg, n ) \ ++ (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2)) ++#define CP_PACKET0_TABLE( reg, n ) \ ++ (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2)) ++#define CP_PACKET1( reg0, reg1 ) \ ++ (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2)) ++#define CP_PACKET2() \ ++ (RADEON_CP_PACKET2) ++#define CP_PACKET3( pkt, n ) \ ++ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16)) ++ ++/* ================================================================ ++ * Engine control helper macros ++ */ ++ ++#define RADEON_WAIT_UNTIL_2D_IDLE() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ ++ RADEON_WAIT_HOST_IDLECLEAN) ); \ ++} while (0) ++ ++#define RADEON_WAIT_UNTIL_3D_IDLE() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \ ++ RADEON_WAIT_HOST_IDLECLEAN) ); \ ++} while (0) ++ ++#define RADEON_WAIT_UNTIL_IDLE() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ ++ RADEON_WAIT_3D_IDLECLEAN | \ ++ RADEON_WAIT_HOST_IDLECLEAN) ); \ ++} while (0) ++ ++#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \ ++} while (0) ++ ++#define RADEON_FLUSH_CACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(RADEON_RB3D_DC_FLUSH); \ ++ } else { \ ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(R300_RB3D_DC_FLUSH); \ ++ } \ ++} while (0) ++ ++#define RADEON_PURGE_CACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ ++ } else { \ ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \ ++ } \ ++} while (0) ++ ++#define RADEON_FLUSH_ZCACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ ++ OUT_RING( RADEON_RB3D_ZC_FLUSH ); \ ++ } else { \ ++ OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) ); \ ++ OUT_RING( R300_ZC_FLUSH ); \ ++ } \ ++} while (0) ++ ++#define RADEON_PURGE_ZCACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ ++ OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ ++ } else { \ ++ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ ++ OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ ++ } \ ++} while (0) ++ ++/* ================================================================ ++ * Misc helper macros ++ */ ++ ++/* Perfbox functionality only. ++ */ ++#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ ++ u32 head = GET_RING_HEAD( dev_priv ); \ ++ if (head == dev_priv->ring.tail) \ ++ dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ ++ } \ ++} while (0) ++ ++#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ ++ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ ++ int __ret = radeon_do_cp_idle( dev_priv ); \ ++ if ( __ret ) return __ret; \ ++ sarea_priv->last_dispatch = 0; \ ++ radeon_freelist_reset( dev ); \ ++ } \ ++} while (0) ++ ++#define RADEON_DISPATCH_AGE( age ) do { \ ++ OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \ ++ OUT_RING( age ); \ ++} while (0) ++ ++#define RADEON_FRAME_AGE( age ) do { \ ++ OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \ ++ OUT_RING( age ); \ ++} while (0) ++ ++#define RADEON_CLEAR_AGE( age ) do { \ ++ OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \ ++ OUT_RING( age ); \ ++} while (0) ++ ++/* ================================================================ ++ * Ring control ++ */ ++ ++#define RADEON_VERBOSE 0 ++ ++#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring; ++ ++#define BEGIN_RING( n ) do { \ ++ if ( RADEON_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ ++ } \ ++ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ ++ COMMIT_RING(); \ ++ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ ++ } \ ++ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ++ ring = dev_priv->ring.start; \ ++ write = dev_priv->ring.tail; \ ++ mask = dev_priv->ring.tail_mask; \ ++} while (0) ++ ++#define ADVANCE_RING() do { \ ++ if ( RADEON_VERBOSE ) { \ ++ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ ++ write, dev_priv->ring.tail ); \ ++ } \ ++ if (((dev_priv->ring.tail + _nr) & mask) != write) { \ ++ DRM_ERROR( \ ++ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ++ ((dev_priv->ring.tail + _nr) & mask), \ ++ write, __LINE__); \ ++ } else \ ++ dev_priv->ring.tail = write; \ ++} while (0) ++ ++#define COMMIT_RING() do { \ ++ /* Flush writes to ring */ \ ++ DRM_MEMORYBARRIER(); \ ++ GET_RING_HEAD( dev_priv ); \ ++ RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ ++ /* read from PCI bus to ensure correct posting */ \ ++ RADEON_READ( RADEON_CP_RB_RPTR ); \ ++} while (0) ++ ++#define OUT_RING( x ) do { \ ++ if ( RADEON_VERBOSE ) { \ ++ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ ++ (unsigned int)(x), write ); \ ++ } \ ++ ring[write++] = (x); \ ++ write &= mask; \ ++} while (0) ++ ++#define OUT_RING_REG( reg, val ) do { \ ++ OUT_RING( CP_PACKET0( reg, 0 ) ); \ ++ OUT_RING( val ); \ ++} while (0) ++ ++#define OUT_RING_TABLE( tab, sz ) do { \ ++ int _size = (sz); \ ++ int *_tab = (int *)(tab); \ ++ \ ++ if (write + _size > mask) { \ ++ int _i = (mask+1) - write; \ ++ _size -= _i; \ ++ while (_i > 0) { \ ++ *(int *)(ring + write) = *_tab++; \ ++ write++; \ ++ _i--; \ ++ } \ ++ write = 0; \ ++ _tab += _i; \ ++ } \ ++ while (_size > 0) { \ ++ *(ring + write) = *_tab++; \ ++ write++; \ ++ _size--; \ ++ } \ ++ write &= mask; \ ++} while (0) ++ ++#endif /* __RADEON_DRV_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_ioc32.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_ioc32.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_ioc32.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,424 @@ ++/** ++ * \file radeon_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the Radeon DRM. ++ * ++ * \author Paul Mackerras ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++typedef struct drm_radeon_init32 { ++ int func; ++ u32 sarea_priv_offset; ++ int is_pci; ++ int cp_mode; ++ int gart_size; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ u32 fb_offset; ++ u32 mmio_offset; ++ u32 ring_offset; ++ u32 ring_rptr_offset; ++ u32 buffers_offset; ++ u32 gart_textures_offset; ++} drm_radeon_init32_t; ++ ++static int compat_radeon_cp_init(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_init32_t init32; ++ drm_radeon_init_t __user *init; ++ ++ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) ++ return -EFAULT; ++ ++ init = compat_alloc_user_space(sizeof(*init)); ++ if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) ++ || __put_user(init32.func, &init->func) ++ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) ++ || __put_user(init32.is_pci, &init->is_pci) ++ || __put_user(init32.cp_mode, &init->cp_mode) ++ || __put_user(init32.gart_size, &init->gart_size) ++ || __put_user(init32.ring_size, &init->ring_size) ++ || __put_user(init32.usec_timeout, &init->usec_timeout) ++ || __put_user(init32.fb_bpp, &init->fb_bpp) ++ || __put_user(init32.front_offset, &init->front_offset) ++ || __put_user(init32.front_pitch, &init->front_pitch) ++ || __put_user(init32.back_offset, &init->back_offset) ++ || __put_user(init32.back_pitch, &init->back_pitch) ++ || __put_user(init32.depth_bpp, &init->depth_bpp) ++ || __put_user(init32.depth_offset, &init->depth_offset) ++ || __put_user(init32.depth_pitch, &init->depth_pitch) ++ || __put_user(init32.fb_offset, &init->fb_offset) ++ || __put_user(init32.mmio_offset, &init->mmio_offset) ++ || __put_user(init32.ring_offset, &init->ring_offset) ++ || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) ++ || __put_user(init32.buffers_offset, &init->buffers_offset) ++ || __put_user(init32.gart_textures_offset, ++ &init->gart_textures_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init); ++} ++ ++typedef struct drm_radeon_clear32 { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; /* misnamed field: should be stencil */ ++ u32 depth_boxes; ++} drm_radeon_clear32_t; ++ ++static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_clear32_t clr32; ++ drm_radeon_clear_t __user *clr; ++ ++ if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32))) ++ return -EFAULT; ++ ++ clr = compat_alloc_user_space(sizeof(*clr)); ++ if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr)) ++ || __put_user(clr32.flags, &clr->flags) ++ || __put_user(clr32.clear_color, &clr->clear_color) ++ || __put_user(clr32.clear_depth, &clr->clear_depth) ++ || __put_user(clr32.color_mask, &clr->color_mask) ++ || __put_user(clr32.depth_mask, &clr->depth_mask) ++ || __put_user((void __user *)(unsigned long)clr32.depth_boxes, ++ &clr->depth_boxes)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr); ++} ++ ++typedef struct drm_radeon_stipple32 { ++ u32 mask; ++} drm_radeon_stipple32_t; ++ ++static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_stipple32_t __user *argp = (void __user *)arg; ++ drm_radeon_stipple_t __user *request; ++ u32 mask; ++ ++ if (get_user(mask, &argp->mask)) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user((unsigned int __user *)(unsigned long) mask, ++ &request->mask)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_tex_image32 { ++ unsigned int x, y; /* Blit coordinates */ ++ unsigned int width, height; ++ u32 data; ++} drm_radeon_tex_image32_t; ++ ++typedef struct drm_radeon_texture32 { ++ unsigned int offset; ++ int pitch; ++ int format; ++ int width; /* Texture image coordinates */ ++ int height; ++ u32 image; ++} drm_radeon_texture32_t; ++ ++static int compat_radeon_cp_texture(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_texture32_t req32; ++ drm_radeon_texture_t __user *request; ++ drm_radeon_tex_image32_t img32; ++ drm_radeon_tex_image_t __user *image; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ if (req32.image == 0) ++ return -EINVAL; ++ if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image, ++ sizeof(img32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request) + sizeof(*image)); ++ if (!access_ok(VERIFY_WRITE, request, ++ sizeof(*request) + sizeof(*image))) ++ return -EFAULT; ++ image = (drm_radeon_tex_image_t __user *) (request + 1); ++ ++ if (__put_user(req32.offset, &request->offset) ++ || __put_user(req32.pitch, &request->pitch) ++ || __put_user(req32.format, &request->format) ++ || __put_user(req32.width, &request->width) ++ || __put_user(req32.height, &request->height) ++ || __put_user(image, &request->image) ++ || __put_user(img32.x, &image->x) ++ || __put_user(img32.y, &image->y) ++ || __put_user(img32.width, &image->width) ++ || __put_user(img32.height, &image->height) ++ || __put_user((const void __user *)(unsigned long)img32.data, ++ &image->data)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_vertex2_32 { ++ int idx; /* Index of vertex buffer */ ++ int discard; /* Client finished with buffer? */ ++ int nr_states; ++ u32 state; ++ int nr_prims; ++ u32 prim; ++} drm_radeon_vertex2_32_t; ++ ++static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_vertex2_32_t req32; ++ drm_radeon_vertex2_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.idx, &request->idx) ++ || __put_user(req32.discard, &request->discard) ++ || __put_user(req32.nr_states, &request->nr_states) ++ || __put_user((void __user *)(unsigned long)req32.state, ++ &request->state) ++ || __put_user(req32.nr_prims, &request->nr_prims) ++ || __put_user((void __user *)(unsigned long)req32.prim, ++ &request->prim)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_cmd_buffer32 { ++ int bufsz; ++ u32 buf; ++ int nbox; ++ u32 boxes; ++} drm_radeon_cmd_buffer32_t; ++ ++static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_cmd_buffer32_t req32; ++ drm_radeon_cmd_buffer_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.bufsz, &request->bufsz) ++ || __put_user((void __user *)(unsigned long)req32.buf, ++ &request->buf) ++ || __put_user(req32.nbox, &request->nbox) ++ || __put_user((void __user *)(unsigned long)req32.boxes, ++ &request->boxes)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_getparam32 { ++ int param; ++ u32 value; ++} drm_radeon_getparam32_t; ++ ++static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_getparam32_t req32; ++ drm_radeon_getparam_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.param, &request->param) ++ || __put_user((void __user *)(unsigned long)req32.value, ++ &request->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_mem_alloc32 { ++ int region; ++ int alignment; ++ int size; ++ u32 region_offset; /* offset from start of fb or GART */ ++} drm_radeon_mem_alloc32_t; ++ ++static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_mem_alloc32_t req32; ++ drm_radeon_mem_alloc_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.region, &request->region) ++ || __put_user(req32.alignment, &request->alignment) ++ || __put_user(req32.size, &request->size) ++ || __put_user((int __user *)(unsigned long)req32.region_offset, ++ &request->region_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_ALLOC, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_irq_emit32 { ++ u32 irq_seq; ++} drm_radeon_irq_emit32_t; ++ ++static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_irq_emit32_t req32; ++ drm_radeon_irq_emit_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user((int __user *)(unsigned long)req32.irq_seq, ++ &request->irq_seq)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request); ++} ++ ++/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ ++#if defined (CONFIG_X86_64) || defined(CONFIG_IA64) ++typedef struct drm_radeon_setparam32 { ++ int param; ++ u64 value; ++} __attribute__((packed)) drm_radeon_setparam32_t; ++ ++static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_setparam32_t req32; ++ drm_radeon_setparam_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.param, &request->param) ++ || __put_user((void __user *)(unsigned long)req32.value, ++ &request->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); ++} ++#else ++#define compat_radeon_cp_setparam NULL ++#endif /* X86_64 || IA64 */ ++ ++drm_ioctl_compat_t *radeon_compat_ioctls[] = { ++ [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, ++ [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, ++ [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, ++ [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture, ++ [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2, ++ [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf, ++ [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam, ++ [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam, ++ [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc, ++ [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) ++ fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_irq.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_irq.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,390 @@ ++/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Michel D�zer ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (state) ++ dev_priv->irq_enable_reg |= mask; ++ else ++ dev_priv->irq_enable_reg &= ~mask; ++ ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); ++} ++ ++static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (state) ++ dev_priv->r500_disp_irq_reg |= mask; ++ else ++ dev_priv->r500_disp_irq_reg &= ~mask; ++ ++ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); ++} ++ ++int radeon_enable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ switch (crtc) { ++ case 0: ++ r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1); ++ break; ++ case 1: ++ r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return EINVAL; ++ } ++ } else { ++ switch (crtc) { ++ case 0: ++ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1); ++ break; ++ case 1: ++ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++void radeon_disable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ switch (crtc) { ++ case 0: ++ r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0); ++ break; ++ case 1: ++ r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ break; ++ } ++ } else { ++ switch (crtc) { ++ case 0: ++ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0); ++ break; ++ case 1: ++ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ break; ++ } ++ } ++} ++ ++static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, u32 *r500_disp_int) ++{ ++ u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS); ++ u32 irq_mask = RADEON_SW_INT_TEST; ++ ++ *r500_disp_int = 0; ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ /* vbl interrupts in a different place */ ++ ++ if (irqs & R500_DISPLAY_INT_STATUS) { ++ /* if a display interrupt */ ++ u32 disp_irq; ++ ++ disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS); ++ ++ *r500_disp_int = disp_irq; ++ if (disp_irq & R500_D1_VBLANK_INTERRUPT) { ++ RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK); ++ } ++ if (disp_irq & R500_D2_VBLANK_INTERRUPT) { ++ RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK); ++ } ++ } ++ irq_mask |= R500_DISPLAY_INT_STATUS; ++ } else ++ irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT; ++ ++ irqs &= irq_mask; ++ ++ if (irqs) ++ RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); ++ ++ return irqs; ++} ++ ++/* Interrupts - Used for device synchronization and flushing in the ++ * following circumstances: ++ * ++ * - Exclusive FB access with hw idle: ++ * - Wait for GUI Idle (?) interrupt, then do normal flush. ++ * ++ * - Frame throttling, NV_fence: ++ * - Drop marker irq's into command stream ahead of time. ++ * - Wait on irq's with lock *not held* ++ * - Check each for termination condition ++ * ++ * - Internally in cp_getbuffer, etc: ++ * - as above, but wait with lock held??? ++ * ++ * NOTE: These functions are misleadingly named -- the irq's aren't ++ * tied to dma at all, this is just a hangover from dri prehistory. ++ */ ++ ++irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ u32 stat; ++ u32 r500_disp_int; ++ ++ /* Only consider the bits we're interested in - others could be used ++ * outside the DRM ++ */ ++ stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int); ++ if (!stat) ++ return IRQ_NONE; ++ ++ stat &= dev_priv->irq_enable_reg; ++ ++ /* SW interrupt */ ++ if (stat & RADEON_SW_INT_TEST) ++ DRM_WAKEUP(&dev_priv->swi_queue); ++ ++ /* VBLANK interrupt */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ if (r500_disp_int & R500_D1_VBLANK_INTERRUPT) ++ drm_handle_vblank(dev, 0); ++ if (r500_disp_int & R500_D2_VBLANK_INTERRUPT) ++ drm_handle_vblank(dev, 1); ++ } else { ++ if (stat & RADEON_CRTC_VBLANK_STAT) ++ drm_handle_vblank(dev, 0); ++ if (stat & RADEON_CRTC2_VBLANK_STAT) ++ drm_handle_vblank(dev, 1); ++ } ++ return IRQ_HANDLED; ++} ++ ++static int radeon_emit_irq(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ unsigned int ret; ++ RING_LOCALS; ++ ++ atomic_inc(&dev_priv->swi_emitted); ++ ret = atomic_read(&dev_priv->swi_emitted); ++ ++ BEGIN_RING(4); ++ OUT_RING_REG(RADEON_LAST_SWI_REG, ret); ++ OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ return ret; ++} ++ ++static int radeon_wait_irq(struct drm_device * dev, int swi_nr) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ int ret = 0; ++ ++ if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr) ++ return 0; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ, ++ RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr); ++ ++ return ret; ++} ++ ++u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (crtc < 0 || crtc > 1) { ++ DRM_ERROR("Invalid crtc %d\n", crtc); ++ return -EINVAL; ++ } ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ if (crtc == 0) ++ return RADEON_READ(R500_D1CRTC_FRAME_COUNT); ++ else ++ return RADEON_READ(R500_D2CRTC_FRAME_COUNT); ++ } else { ++ if (crtc == 0) ++ return RADEON_READ(RADEON_CRTC_CRNT_FRAME); ++ else ++ return RADEON_READ(RADEON_CRTC2_CRNT_FRAME); ++ } ++} ++ ++/* Needs the lock as it touches the ring. ++ */ ++int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_irq_emit_t *emit = data; ++ int result; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ result = radeon_emit_irq(dev); ++ ++ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++/* Doesn't need the hardware lock. ++ */ ++int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_irq_wait_t *irqwait = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ return radeon_wait_irq(dev, irqwait->irq_seq); ++} ++ ++/* drm_dma.h hooks ++*/ ++void radeon_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ u32 dummy; ++ ++ /* Disable *all* interrupts */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, 0); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++ ++ /* Clear bits if they're already high */ ++ radeon_acknowledge_irqs(dev_priv, &dummy); ++} ++ ++int radeon_driver_irq_postinstall(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ int ret; ++ ++ atomic_set(&dev_priv->swi_emitted, 0); ++ DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); ++ ++ ret = drm_vblank_init(dev, 2); ++ if (ret) ++ return ret; ++ ++ dev->max_vblank_count = 0x001fffff; ++ ++ radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); ++ ++ return 0; ++} ++ ++void radeon_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ dev_priv->irq_enabled = 0; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, 0); ++ /* Disable *all* interrupts */ ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++} ++ ++ ++int radeon_vblank_crtc_get(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; ++ ++ return dev_priv->vblank_crtc; ++} ++ ++int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) ++{ ++ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; ++ if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { ++ DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); ++ return -EINVAL; ++ } ++ dev_priv->vblank_crtc = (unsigned int)value; ++ return 0; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_mem.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_mem.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_mem.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_mem.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,302 @@ ++/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++/* Very simple allocator for GART memory, working on a static range ++ * already mapped into each client's address space. ++ */ ++ ++static struct mem_block *split_block(struct mem_block *p, int start, int size, ++ struct drm_file *file_priv) ++{ ++ /* Maybe cut off the start of an existing block */ ++ if (start > p->start) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start; ++ newblock->size = p->size - (start - p->start); ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size -= newblock->size; ++ p = newblock; ++ } ++ ++ /* Maybe cut off the end of an existing block */ ++ if (size < p->size) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start + size; ++ newblock->size = p->size - size; ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size = size; ++ } ++ ++ out: ++ /* Our block is in the middle */ ++ p->file_priv = file_priv; ++ return p; ++} ++ ++static struct mem_block *alloc_block(struct mem_block *heap, int size, ++ int align2, struct drm_file *file_priv) ++{ ++ struct mem_block *p; ++ int mask = (1 << align2) - 1; ++ ++ list_for_each(p, heap) { ++ int start = (p->start + mask) & ~mask; ++ if (p->file_priv == NULL && start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ ++ return NULL; ++} ++ ++static struct mem_block *find_block(struct mem_block *heap, int start) ++{ ++ struct mem_block *p; ++ ++ list_for_each(p, heap) ++ if (p->start == start) ++ return p; ++ ++ return NULL; ++} ++ ++static void free_block(struct mem_block *p) ++{ ++ p->file_priv = NULL; ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ if (p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFS); ++ } ++ ++ if (p->prev->file_priv == NULL) { ++ struct mem_block *q = p->prev; ++ q->size += p->size; ++ q->next = p->next; ++ q->next->prev = q; ++ drm_free(p, sizeof(*q), DRM_MEM_BUFS); ++ } ++} ++ ++/* Initialize. How to check for an uninitialized heap? ++ */ ++static int init_heap(struct mem_block **heap, int start, int size) ++{ ++ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); ++ ++ if (!blocks) ++ return -ENOMEM; ++ ++ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); ++ if (!*heap) { ++ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ ++ blocks->start = start; ++ blocks->size = size; ++ blocks->file_priv = NULL; ++ blocks->next = blocks->prev = *heap; ++ ++ memset(*heap, 0, sizeof(**heap)); ++ (*heap)->file_priv = (struct drm_file *) - 1; ++ (*heap)->next = (*heap)->prev = blocks; ++ return 0; ++} ++ ++/* Free all blocks associated with the releasing file. ++ */ ++void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap) ++{ ++ struct mem_block *p; ++ ++ if (!heap || !heap->next) ++ return; ++ ++ list_for_each(p, heap) { ++ if (p->file_priv == file_priv) ++ p->file_priv = NULL; ++ } ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ list_for_each(p, heap) { ++ while (p->file_priv == NULL && p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ } ++} ++ ++/* Shutdown. ++ */ ++void radeon_mem_takedown(struct mem_block **heap) ++{ ++ struct mem_block *p; ++ ++ if (!*heap) ++ return; ++ ++ for (p = (*heap)->next; p != *heap;) { ++ struct mem_block *q = p; ++ p = p->next; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ ++ drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); ++ *heap = NULL; ++} ++ ++/* IOCTL HANDLERS */ ++ ++static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region) ++{ ++ switch (region) { ++ case RADEON_MEM_REGION_GART: ++ return &dev_priv->gart_heap; ++ case RADEON_MEM_REGION_FB: ++ return &dev_priv->fb_heap; ++ default: ++ return NULL; ++ } ++} ++ ++int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_mem_alloc_t *alloc = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, alloc->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ /* Make things easier on ourselves: all allocations at least ++ * 4k aligned. ++ */ ++ if (alloc->alignment < 12) ++ alloc->alignment = 12; ++ ++ block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); ++ ++ if (!block) ++ return -ENOMEM; ++ ++ if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, ++ sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_mem_free_t *memfree = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, memfree->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ block = find_block(*heap, memfree->region_offset); ++ if (!block) ++ return -EFAULT; ++ ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ free_block(block); ++ return 0; ++} ++ ++int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_mem_init_heap_t *initheap = data; ++ struct mem_block **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, initheap->region); ++ if (!heap) ++ return -EFAULT; ++ ++ if (*heap) { ++ DRM_ERROR("heap already initialized?"); ++ return -EFAULT; ++ } ++ ++ return init_heap(heap, initheap->start, initheap->size); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_microcode.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_microcode.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_microcode.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_microcode.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1844 @@ ++/* ++ * Copyright 2007 Advanced Micro Devices, Inc. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef RADEON_MICROCODE_H ++#define RADEON_MICROCODE_H ++ ++/* production radeon ucode r1xx-r6xx */ ++static const u32 R100_cp_microcode[][2]={ ++ { 0x21007000, 0000000000 }, ++ { 0x20007000, 0000000000 }, ++ { 0x000000b4, 0x00000004 }, ++ { 0x000000b8, 0x00000004 }, ++ { 0x6f5b4d4c, 0000000000 }, ++ { 0x4c4c427f, 0000000000 }, ++ { 0x5b568a92, 0000000000 }, ++ { 0x4ca09c6d, 0000000000 }, ++ { 0xad4c4c4c, 0000000000 }, ++ { 0x4ce1af3d, 0000000000 }, ++ { 0xd8afafaf, 0000000000 }, ++ { 0xd64c4cdc, 0000000000 }, ++ { 0x4cd10d10, 0000000000 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x362f242d, 0000000000 }, ++ { 0x00000012, 0x00000004 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x362f282d, 0000000000 }, ++ { 0x000380e7, 0x00000002 }, ++ { 0x04002c97, 0x00000002 }, ++ { 0x000f0001, 0x00000016 }, ++ { 0x333a3730, 0000000000 }, ++ { 0x000077ef, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000021, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000021, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000021, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00000017, 0x00000004 }, ++ { 0x0003802b, 0x00000002 }, ++ { 0x040067e0, 0x00000002 }, ++ { 0x00000017, 0x00000004 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x000037e1, 0x00000002 }, ++ { 0x040067e1, 0x00000006 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x000077e1, 0x00000002 }, ++ { 0x000077e1, 0x00000006 }, ++ { 0xffffffff, 0000000000 }, ++ { 0x10000000, 0000000000 }, ++ { 0x0003802b, 0x00000002 }, ++ { 0x040067e0, 0x00000006 }, ++ { 0x00007675, 0x00000002 }, ++ { 0x00007676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x00000030, 0x00000018 }, ++ { 0x00000030, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x01605000, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00098000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x64c0603e, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00080000, 0x00000016 }, ++ { 0000000000, 0000000000 }, ++ { 0x0400251d, 0x00000002 }, ++ { 0x00007580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x04002580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x00000049, 0x00000004 }, ++ { 0x00005000, 0000000000 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x00019000, 0x00000002 }, ++ { 0x00011055, 0x00000014 }, ++ { 0x00000055, 0x00000012 }, ++ { 0x0400250f, 0x00000002 }, ++ { 0x0000504f, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00007565, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x00000058, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x01e655b4, 0x00000002 }, ++ { 0x4401b0e4, 0x00000002 }, ++ { 0x01c110e4, 0x00000002 }, ++ { 0x26667066, 0x00000018 }, ++ { 0x040c2565, 0x00000002 }, ++ { 0x00000066, 0x00000018 }, ++ { 0x04002564, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x0000005d, 0x00000004 }, ++ { 0x00401069, 0x00000008 }, ++ { 0x00101000, 0x00000002 }, ++ { 0x000d80ff, 0x00000002 }, ++ { 0x0080006c, 0x00000008 }, ++ { 0x000f9000, 0x00000002 }, ++ { 0x000e00ff, 0x00000002 }, ++ { 0000000000, 0x00000006 }, ++ { 0x0000008f, 0x00000018 }, ++ { 0x0000005b, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00009000, 0x00000002 }, ++ { 0x00041000, 0x00000002 }, ++ { 0x0c00350e, 0x00000002 }, ++ { 0x00049000, 0x00000002 }, ++ { 0x00051000, 0x00000002 }, ++ { 0x01e785f8, 0x00000002 }, ++ { 0x00200000, 0x00000002 }, ++ { 0x0060007e, 0x0000000c }, ++ { 0x00007563, 0x00000002 }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x20007073, 0x00000004 }, ++ { 0x00005073, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00007577, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x0000750f, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00600083, 0x0000000c }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x000075f8, 0x00000002 }, ++ { 0x00000083, 0x00000004 }, ++ { 0x000a750e, 0x00000002 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x0020750f, 0x00000002 }, ++ { 0x00600086, 0x00000004 }, ++ { 0x00007570, 0x00000002 }, ++ { 0x00007571, 0x00000002 }, ++ { 0x00007572, 0x00000006 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00007568, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000095, 0x0000000c }, ++ { 0x00058000, 0x00000002 }, ++ { 0x0c607562, 0x00000002 }, ++ { 0x00000097, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00600096, 0x00000004 }, ++ { 0x400070e5, 0000000000 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x000380e5, 0x00000002 }, ++ { 0x000000a8, 0x0000001c }, ++ { 0x000650aa, 0x00000018 }, ++ { 0x040025bb, 0x00000002 }, ++ { 0x000610ab, 0x00000018 }, ++ { 0x040075bc, 0000000000 }, ++ { 0x000075bb, 0x00000002 }, ++ { 0x000075bc, 0000000000 }, ++ { 0x00090000, 0x00000006 }, ++ { 0x00090000, 0x00000002 }, ++ { 0x000d8002, 0x00000006 }, ++ { 0x00007832, 0x00000002 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x000380e7, 0x00000002 }, ++ { 0x04002c97, 0x00000002 }, ++ { 0x00007820, 0x00000002 }, ++ { 0x00007821, 0x00000002 }, ++ { 0x00007800, 0000000000 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20077000, 0x00000002 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20007000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0120751b, 0x00000002 }, ++ { 0x8040750a, 0x00000002 }, ++ { 0x8040750b, 0x00000002 }, ++ { 0x00110000, 0x00000002 }, ++ { 0x000380e5, 0x00000002 }, ++ { 0x000000c6, 0x0000001c }, ++ { 0x000610ab, 0x00000018 }, ++ { 0x844075bd, 0x00000002 }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x840075bb, 0x00000002 }, ++ { 0x000610ab, 0x00000018 }, ++ { 0x844075bc, 0x00000002 }, ++ { 0x000000c9, 0x00000004 }, ++ { 0x804075bd, 0x00000002 }, ++ { 0x800075bb, 0x00000002 }, ++ { 0x804075bc, 0x00000002 }, ++ { 0x00108000, 0x00000002 }, ++ { 0x01400000, 0x00000002 }, ++ { 0x006000cd, 0x0000000c }, ++ { 0x20c07000, 0x00000020 }, ++ { 0x000000cf, 0x00000012 }, ++ { 0x00800000, 0x00000006 }, ++ { 0x0080751d, 0x00000006 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000775c, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00661000, 0x00000002 }, ++ { 0x0460275d, 0x00000020 }, ++ { 0x00004000, 0000000000 }, ++ { 0x01e00830, 0x00000002 }, ++ { 0x21007000, 0000000000 }, ++ { 0x6464614d, 0000000000 }, ++ { 0x69687420, 0000000000 }, ++ { 0x00000073, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x000380d0, 0x00000002 }, ++ { 0x040025e0, 0x00000002 }, ++ { 0x000075e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000380e0, 0x00000002 }, ++ { 0x04002394, 0x00000002 }, ++ { 0x00005000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x00000008, 0000000000 }, ++ { 0x00000004, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R200_cp_microcode[][2]={ ++ { 0x21007000, 0000000000 }, ++ { 0x20007000, 0000000000 }, ++ { 0x000000bf, 0x00000004 }, ++ { 0x000000c3, 0x00000004 }, ++ { 0x7a685e5d, 0000000000 }, ++ { 0x5d5d5588, 0000000000 }, ++ { 0x68659197, 0000000000 }, ++ { 0x5da19f78, 0000000000 }, ++ { 0x5d5d5d5d, 0000000000 }, ++ { 0x5dee5d50, 0000000000 }, ++ { 0xf2acacac, 0000000000 }, ++ { 0xe75df9e9, 0000000000 }, ++ { 0xb1dd0e11, 0000000000 }, ++ { 0xe2afafaf, 0000000000 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x452f232d, 0000000000 }, ++ { 0x00000013, 0x00000004 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x452f272d, 0000000000 }, ++ { 0x000f0001, 0x00000016 }, ++ { 0x3e4d4a37, 0000000000 }, ++ { 0x000077ef, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000020, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000020, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000020, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00000016, 0x00000004 }, ++ { 0x0003802a, 0x00000002 }, ++ { 0x040067e0, 0x00000002 }, ++ { 0x00000016, 0x00000004 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x000037e1, 0x00000002 }, ++ { 0x040067e1, 0x00000006 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x000077e1, 0x00000002 }, ++ { 0x000077e1, 0x00000006 }, ++ { 0xffffffff, 0000000000 }, ++ { 0x10000000, 0000000000 }, ++ { 0x07f007f0, 0000000000 }, ++ { 0x0003802a, 0x00000002 }, ++ { 0x040067e0, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007675, 0x00000002 }, ++ { 0x00007676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802b, 0x00000002 }, ++ { 0x04002676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x00000037, 0x00000018 }, ++ { 0x00000037, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x01605000, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00098000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x64c06051, 0x00000004 }, ++ { 0x00080000, 0x00000016 }, ++ { 0000000000, 0000000000 }, ++ { 0x0400251d, 0x00000002 }, ++ { 0x00007580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x04002580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x0000005a, 0x00000004 }, ++ { 0x00005000, 0000000000 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x00019000, 0x00000002 }, ++ { 0x00011064, 0x00000014 }, ++ { 0x00000064, 0x00000012 }, ++ { 0x0400250f, 0x00000002 }, ++ { 0x0000505e, 0x00000004 }, ++ { 0x00007565, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x00000065, 0x00000004 }, ++ { 0x01e655b4, 0x00000002 }, ++ { 0x4401b0f0, 0x00000002 }, ++ { 0x01c110f0, 0x00000002 }, ++ { 0x26667071, 0x00000018 }, ++ { 0x040c2565, 0x00000002 }, ++ { 0x00000071, 0x00000018 }, ++ { 0x04002564, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x00000068, 0x00000004 }, ++ { 0x00401074, 0x00000008 }, ++ { 0x00101000, 0x00000002 }, ++ { 0x000d80ff, 0x00000002 }, ++ { 0x00800077, 0x00000008 }, ++ { 0x000f9000, 0x00000002 }, ++ { 0x000e00ff, 0x00000002 }, ++ { 0000000000, 0x00000006 }, ++ { 0x00000094, 0x00000018 }, ++ { 0x00000068, 0x00000004 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00009000, 0x00000002 }, ++ { 0x00041000, 0x00000002 }, ++ { 0x0c00350e, 0x00000002 }, ++ { 0x00049000, 0x00000002 }, ++ { 0x00051000, 0x00000002 }, ++ { 0x01e785f8, 0x00000002 }, ++ { 0x00200000, 0x00000002 }, ++ { 0x00600087, 0x0000000c }, ++ { 0x00007563, 0x00000002 }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x2000707c, 0x00000004 }, ++ { 0x0000507c, 0x00000004 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00007577, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x0000750f, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x0060008a, 0x0000000c }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x000075f8, 0x00000002 }, ++ { 0x0000008a, 0x00000004 }, ++ { 0x000a750e, 0x00000002 }, ++ { 0x0020750f, 0x00000002 }, ++ { 0x0060008d, 0x00000004 }, ++ { 0x00007570, 0x00000002 }, ++ { 0x00007571, 0x00000002 }, ++ { 0x00007572, 0x00000006 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00007568, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000098, 0x0000000c }, ++ { 0x00058000, 0x00000002 }, ++ { 0x0c607562, 0x00000002 }, ++ { 0x0000009a, 0x00000004 }, ++ { 0x00600099, 0x00000004 }, ++ { 0x400070f1, 0000000000 }, ++ { 0x000380f1, 0x00000002 }, ++ { 0x000000a7, 0x0000001c }, ++ { 0x000650a9, 0x00000018 }, ++ { 0x040025bb, 0x00000002 }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x040075bc, 0000000000 }, ++ { 0x000075bb, 0x00000002 }, ++ { 0x000075bc, 0000000000 }, ++ { 0x00090000, 0x00000006 }, ++ { 0x00090000, 0x00000002 }, ++ { 0x000d8002, 0x00000006 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00007821, 0x00000002 }, ++ { 0x00007800, 0000000000 }, ++ { 0x00007821, 0x00000002 }, ++ { 0x00007800, 0000000000 }, ++ { 0x01665000, 0x00000002 }, ++ { 0x000a0000, 0x00000002 }, ++ { 0x000671cc, 0x00000002 }, ++ { 0x0286f1cd, 0x00000002 }, ++ { 0x000000b7, 0x00000010 }, ++ { 0x21007000, 0000000000 }, ++ { 0x000000be, 0x0000001c }, ++ { 0x00065000, 0x00000002 }, ++ { 0x000a0000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x000b0000, 0x00000002 }, ++ { 0x38067000, 0x00000002 }, ++ { 0x000a00ba, 0x00000004 }, ++ { 0x20007000, 0000000000 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20077000, 0x00000002 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20007000, 0000000000 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0120751b, 0x00000002 }, ++ { 0x8040750a, 0x00000002 }, ++ { 0x8040750b, 0x00000002 }, ++ { 0x00110000, 0x00000002 }, ++ { 0x000380f1, 0x00000002 }, ++ { 0x000000d1, 0x0000001c }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x844075bd, 0x00000002 }, ++ { 0x000610a9, 0x00000018 }, ++ { 0x840075bb, 0x00000002 }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x844075bc, 0x00000002 }, ++ { 0x000000d4, 0x00000004 }, ++ { 0x804075bd, 0x00000002 }, ++ { 0x800075bb, 0x00000002 }, ++ { 0x804075bc, 0x00000002 }, ++ { 0x00108000, 0x00000002 }, ++ { 0x01400000, 0x00000002 }, ++ { 0x006000d8, 0x0000000c }, ++ { 0x20c07000, 0x00000020 }, ++ { 0x000000da, 0x00000012 }, ++ { 0x00800000, 0x00000006 }, ++ { 0x0080751d, 0x00000006 }, ++ { 0x000025bb, 0x00000002 }, ++ { 0x000040d4, 0x00000004 }, ++ { 0x0000775c, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00661000, 0x00000002 }, ++ { 0x0460275d, 0x00000020 }, ++ { 0x00004000, 0000000000 }, ++ { 0x00007999, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00661000, 0x00000002 }, ++ { 0x0460299b, 0x00000020 }, ++ { 0x00004000, 0000000000 }, ++ { 0x01e00830, 0x00000002 }, ++ { 0x21007000, 0000000000 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00038056, 0x00000002 }, ++ { 0x040025e0, 0x00000002 }, ++ { 0x000075e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000380ed, 0x00000002 }, ++ { 0x04007394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000078c4, 0x00000002 }, ++ { 0x000078c5, 0x00000002 }, ++ { 0x000078c6, 0x00000002 }, ++ { 0x00007924, 0x00000002 }, ++ { 0x00007925, 0x00000002 }, ++ { 0x00007926, 0x00000002 }, ++ { 0x000000f2, 0x00000004 }, ++ { 0x00007924, 0x00000002 }, ++ { 0x00007925, 0x00000002 }, ++ { 0x00007926, 0x00000002 }, ++ { 0x000000f9, 0x00000004 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R300_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000000ae, 0x00000008 }, ++ { 0x000000b2, 0x00000008 }, ++ { 0x67554b4a, 0000000000 }, ++ { 0x4a4a4475, 0000000000 }, ++ { 0x55527d83, 0000000000 }, ++ { 0x4a8c8b65, 0000000000 }, ++ { 0x4aef4af6, 0000000000 }, ++ { 0x4ae14a4a, 0000000000 }, ++ { 0xe4979797, 0000000000 }, ++ { 0xdb4aebdd, 0000000000 }, ++ { 0x9ccc4a4a, 0000000000 }, ++ { 0xd1989898, 0000000000 }, ++ { 0x4a0f9ad6, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000080, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00012000, 0x00000004 }, ++ { 0x00082000, 0x00000004 }, ++ { 0x1800650e, 0x00000004 }, ++ { 0x00092000, 0x00000004 }, ++ { 0x000a2000, 0x00000004 }, ++ { 0x000f0000, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x00000074, 0x00000018 }, ++ { 0x0000e563, 0x00000004 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0000a069, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000077, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000077, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0007a, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000084, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000086, 0x00000008 }, ++ { 0x00c00085, 0x00000008 }, ++ { 0x000700e3, 0x00000004 }, ++ { 0x00000092, 0x00000038 }, ++ { 0x000ca094, 0x00000030 }, ++ { 0x080045bb, 0x00000004 }, ++ { 0x000c2095, 0x00000030 }, ++ { 0x0800e5bc, 0000000000 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x00120000, 0x0000000c }, ++ { 0x00120000, 0x00000004 }, ++ { 0x001b0002, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x000000a4, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x000000a1, 0x00000008 }, ++ { 0x000000a6, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x000000ad, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x001400a9, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700e3, 0x00000004 }, ++ { 0x000000c0, 0x00000038 }, ++ { 0x000c2095, 0x00000030 }, ++ { 0x0880e5bd, 0x00000005 }, ++ { 0x000c2094, 0x00000030 }, ++ { 0x0800e5bb, 0x00000005 }, ++ { 0x000c2095, 0x00000030 }, ++ { 0x0880e5bc, 0x00000005 }, ++ { 0x000000c3, 0x00000008 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000c7, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000c9, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080c3, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700e0, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000e4, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000eb, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000f3, 0x00000034 }, ++ { 0x000000f0, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R420_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x00000099, 0x00000008 }, ++ { 0x0000009d, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0xd9d3dff6, 0000000000 }, ++ { 0x4ac54a4a, 0000000000 }, ++ { 0xc8828282, 0000000000 }, ++ { 0xbf4acfc1, 0000000000 }, ++ { 0x87b04a4a, 0000000000 }, ++ { 0xb5838383, 0000000000 }, ++ { 0x4a0f85ba, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x00000080, 0x00000038 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x0000008f, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x0000008c, 0x00000008 }, ++ { 0x00000091, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x00000098, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x00140094, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x000000a4, 0x00000038 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000ab, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000ad, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080a7, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700c4, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000c8, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000cf, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000d7, 0x00000034 }, ++ { 0x000000d4, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0x0000e1cc, 0x00000004 }, ++ { 0x0500e1cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000de, 0x00000034 }, ++ { 0x000000da, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x0019e1cc, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x0500a000, 0x00000004 }, ++ { 0x080041cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 RS600_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000000a0, 0x00000008 }, ++ { 0x000000a4, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0x4ae74af6, 0000000000 }, ++ { 0x4ad34a4a, 0000000000 }, ++ { 0xd6898989, 0000000000 }, ++ { 0xcd4addcf, 0000000000 }, ++ { 0x8ebe4ae2, 0000000000 }, ++ { 0xc38a8a8a, 0000000000 }, ++ { 0x4a0f8cc8, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700d5, 0x00000004 }, ++ { 0x00000084, 0x00000038 }, ++ { 0x000ca086, 0x00000030 }, ++ { 0x080045bb, 0x00000004 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0800e5bc, 0000000000 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x00120000, 0x0000000c }, ++ { 0x00120000, 0x00000004 }, ++ { 0x001b0002, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x00000096, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x00000093, 0x00000008 }, ++ { 0x00000098, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000009f, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x0014009b, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700d5, 0x00000004 }, ++ { 0x000000b2, 0x00000038 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bd, 0x00000005 }, ++ { 0x000c2086, 0x00000030 }, ++ { 0x0800e5bb, 0x00000005 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bc, 0x00000005 }, ++ { 0x000000b5, 0x00000008 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000b9, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000bb, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080b5, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700d2, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000d6, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000dd, 0x00000008 }, ++ { 0x00e00116, 0000000000 }, ++ { 0x000700e1, 0x00000004 }, ++ { 0x0800401c, 0x00000004 }, ++ { 0x200050e7, 0x00000004 }, ++ { 0x0000e01d, 0x00000004 }, ++ { 0x000000e4, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000eb, 0x00000034 }, ++ { 0x000000e8, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 RS690_cp_microcode[][2]={ ++ { 0x000000dd, 0x00000008 }, ++ { 0x000000df, 0x00000008 }, ++ { 0x000000a0, 0x00000008 }, ++ { 0x000000a4, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0x4ad74af6, 0000000000 }, ++ { 0x4ac94a4a, 0000000000 }, ++ { 0xcc898989, 0000000000 }, ++ { 0xc34ad3c5, 0000000000 }, ++ { 0x8e4a4a4a, 0000000000 }, ++ { 0x4a8a8a8a, 0000000000 }, ++ { 0x4a0f8c4a, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700cb, 0x00000004 }, ++ { 0x00000084, 0x00000038 }, ++ { 0x000ca086, 0x00000030 }, ++ { 0x080045bb, 0x00000004 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0800e5bc, 0000000000 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x00120000, 0x0000000c }, ++ { 0x00120000, 0x00000004 }, ++ { 0x001b0002, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x00000096, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x00000093, 0x00000008 }, ++ { 0x00000098, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000009f, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x0014009b, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x00100000, 0x0000002c }, ++ { 0x00004000, 0000000000 }, ++ { 0x080045c8, 0x00000004 }, ++ { 0x00240005, 0x00000004 }, ++ { 0x08004d0b, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700cb, 0x00000004 }, ++ { 0x000000b7, 0x00000038 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bd, 0x00000005 }, ++ { 0x000c2086, 0x00000030 }, ++ { 0x0800e5bb, 0x00000005 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bc, 0x00000005 }, ++ { 0x000000ba, 0x00000008 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000be, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000c0, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080ba, 0x00000008 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700c8, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000cc, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000d3, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000db, 0x00000034 }, ++ { 0x000000d8, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0x000000e1, 0x00000030 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x000000e1, 0x00000030 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x0025001b, 0x00000004 }, ++ { 0x00230000, 0x00000004 }, ++ { 0x00250005, 0x00000004 }, ++ { 0x000000e6, 0x00000034 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00244000, 0x00000004 }, ++ { 0x080045c8, 0x00000004 }, ++ { 0x00240005, 0x00000004 }, ++ { 0x08004d0b, 0x0000000c }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R520_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x00000099, 0x00000008 }, ++ { 0x0000009d, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0xe0dae6f6, 0000000000 }, ++ { 0x4ac54a4a, 0000000000 }, ++ { 0xc8828282, 0000000000 }, ++ { 0xbf4acfc1, 0000000000 }, ++ { 0x87b04ad5, 0000000000 }, ++ { 0xb5838383, 0000000000 }, ++ { 0x4a0f85ba, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x00000080, 0x00000038 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x0000008f, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x0000008c, 0x00000008 }, ++ { 0x00000091, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x00000098, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x00140094, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x000000a4, 0x00000038 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000ab, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000ad, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080a7, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700c4, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000c8, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000cf, 0x00000008 }, ++ { 0xdeadbeef, 0000000000 }, ++ { 0x00000116, 0000000000 }, ++ { 0x000700d3, 0x00000004 }, ++ { 0x080050e7, 0x00000004 }, ++ { 0x000700d4, 0x00000004 }, ++ { 0x0800401c, 0x00000004 }, ++ { 0x0000e01d, 0000000000 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000de, 0x00000034 }, ++ { 0x000000db, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0x0000e1cc, 0x00000004 }, ++ { 0x0500e1cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000e5, 0x00000034 }, ++ { 0x000000e1, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x0019e1cc, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x0500a000, 0x00000004 }, ++ { 0x080041cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_state.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_state.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/radeon_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/radeon_state.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,3263 @@ ++/* radeon_state.c -- State support for Radeon -*- linux-c -*- */ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Kevin E. Martin ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++/* ================================================================ ++ * Helper functions for client state checking and fixup ++ */ ++ ++static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * ++ dev_priv, ++ struct drm_file *file_priv, ++ u32 * offset) ++{ ++ u64 off = *offset; ++ u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1; ++ struct drm_radeon_driver_file_fields *radeon_priv; ++ ++ /* Hrm ... the story of the offset ... So this function converts ++ * the various ideas of what userland clients might have for an ++ * offset in the card address space into an offset into the card ++ * address space :) So with a sane client, it should just keep ++ * the value intact and just do some boundary checking. However, ++ * not all clients are sane. Some older clients pass us 0 based ++ * offsets relative to the start of the framebuffer and some may ++ * assume the AGP aperture it appended to the framebuffer, so we ++ * try to detect those cases and fix them up. ++ * ++ * Note: It might be a good idea here to make sure the offset lands ++ * in some "allowed" area to protect things like the PCIE GART... ++ */ ++ ++ /* First, the best case, the offset already lands in either the ++ * framebuffer or the GART mapped space ++ */ ++ if (radeon_check_offset(dev_priv, off)) ++ return 0; ++ ++ /* Ok, that didn't happen... now check if we have a zero based ++ * offset that fits in the framebuffer + gart space, apply the ++ * magic offset we get from SETPARAM or calculated from fb_location ++ */ ++ if (off < (dev_priv->fb_size + dev_priv->gart_size)) { ++ radeon_priv = file_priv->driver_priv; ++ off += radeon_priv->radeon_fb_delta; ++ } ++ ++ /* Finally, assume we aimed at a GART offset if beyond the fb */ ++ if (off > fb_end) ++ off = off - fb_end - 1 + dev_priv->gart_vm_start; ++ ++ /* Now recheck and fail if out of bounds */ ++ if (radeon_check_offset(dev_priv, off)) { ++ DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off); ++ *offset = off; ++ return 0; ++ } ++ return -EINVAL; ++} ++ ++static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * ++ dev_priv, ++ struct drm_file *file_priv, ++ int id, u32 *data) ++{ ++ switch (id) { ++ ++ case RADEON_EMIT_PP_MISC: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { ++ DRM_ERROR("Invalid depth buffer offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_EMIT_PP_CNTL: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { ++ DRM_ERROR("Invalid colour buffer offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case R200_EMIT_PP_TXOFFSET_0: ++ case R200_EMIT_PP_TXOFFSET_1: ++ case R200_EMIT_PP_TXOFFSET_2: ++ case R200_EMIT_PP_TXOFFSET_3: ++ case R200_EMIT_PP_TXOFFSET_4: ++ case R200_EMIT_PP_TXOFFSET_5: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[0])) { ++ DRM_ERROR("Invalid R200 texture offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_EMIT_PP_TXFILTER_0: ++ case RADEON_EMIT_PP_TXFILTER_1: ++ case RADEON_EMIT_PP_TXFILTER_2: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { ++ DRM_ERROR("Invalid R100 texture offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case R200_EMIT_PP_CUBIC_OFFSETS_0: ++ case R200_EMIT_PP_CUBIC_OFFSETS_1: ++ case R200_EMIT_PP_CUBIC_OFFSETS_2: ++ case R200_EMIT_PP_CUBIC_OFFSETS_3: ++ case R200_EMIT_PP_CUBIC_OFFSETS_4: ++ case R200_EMIT_PP_CUBIC_OFFSETS_5:{ ++ int i; ++ for (i = 0; i < 5; i++) { ++ if (radeon_check_and_fixup_offset(dev_priv, ++ file_priv, ++ &data[i])) { ++ DRM_ERROR ++ ("Invalid R200 cubic texture offset\n"); ++ return -EINVAL; ++ } ++ } ++ break; ++ } ++ ++ case RADEON_EMIT_PP_CUBIC_OFFSETS_T0: ++ case RADEON_EMIT_PP_CUBIC_OFFSETS_T1: ++ case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ ++ int i; ++ for (i = 0; i < 5; i++) { ++ if (radeon_check_and_fixup_offset(dev_priv, ++ file_priv, ++ &data[i])) { ++ DRM_ERROR ++ ("Invalid R100 cubic texture offset\n"); ++ return -EINVAL; ++ } ++ } ++ } ++ break; ++ ++ case R200_EMIT_VAP_CTL: { ++ RING_LOCALS; ++ BEGIN_RING(2); ++ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ++ ADVANCE_RING(); ++ } ++ break; ++ ++ case RADEON_EMIT_RB3D_COLORPITCH: ++ case RADEON_EMIT_RE_LINE_PATTERN: ++ case RADEON_EMIT_SE_LINE_WIDTH: ++ case RADEON_EMIT_PP_LUM_MATRIX: ++ case RADEON_EMIT_PP_ROT_MATRIX_0: ++ case RADEON_EMIT_RB3D_STENCILREFMASK: ++ case RADEON_EMIT_SE_VPORT_XSCALE: ++ case RADEON_EMIT_SE_CNTL: ++ case RADEON_EMIT_SE_CNTL_STATUS: ++ case RADEON_EMIT_RE_MISC: ++ case RADEON_EMIT_PP_BORDER_COLOR_0: ++ case RADEON_EMIT_PP_BORDER_COLOR_1: ++ case RADEON_EMIT_PP_BORDER_COLOR_2: ++ case RADEON_EMIT_SE_ZBIAS_FACTOR: ++ case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT: ++ case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED: ++ case R200_EMIT_PP_TXCBLEND_0: ++ case R200_EMIT_PP_TXCBLEND_1: ++ case R200_EMIT_PP_TXCBLEND_2: ++ case R200_EMIT_PP_TXCBLEND_3: ++ case R200_EMIT_PP_TXCBLEND_4: ++ case R200_EMIT_PP_TXCBLEND_5: ++ case R200_EMIT_PP_TXCBLEND_6: ++ case R200_EMIT_PP_TXCBLEND_7: ++ case R200_EMIT_TCL_LIGHT_MODEL_CTL_0: ++ case R200_EMIT_TFACTOR_0: ++ case R200_EMIT_VTX_FMT_0: ++ case R200_EMIT_MATRIX_SELECT_0: ++ case R200_EMIT_TEX_PROC_CTL_2: ++ case R200_EMIT_TCL_UCP_VERT_BLEND_CTL: ++ case R200_EMIT_PP_TXFILTER_0: ++ case R200_EMIT_PP_TXFILTER_1: ++ case R200_EMIT_PP_TXFILTER_2: ++ case R200_EMIT_PP_TXFILTER_3: ++ case R200_EMIT_PP_TXFILTER_4: ++ case R200_EMIT_PP_TXFILTER_5: ++ case R200_EMIT_VTE_CNTL: ++ case R200_EMIT_OUTPUT_VTX_COMP_SEL: ++ case R200_EMIT_PP_TAM_DEBUG3: ++ case R200_EMIT_PP_CNTL_X: ++ case R200_EMIT_RB3D_DEPTHXY_OFFSET: ++ case R200_EMIT_RE_AUX_SCISSOR_CNTL: ++ case R200_EMIT_RE_SCISSOR_TL_0: ++ case R200_EMIT_RE_SCISSOR_TL_1: ++ case R200_EMIT_RE_SCISSOR_TL_2: ++ case R200_EMIT_SE_VAP_CNTL_STATUS: ++ case R200_EMIT_SE_VTX_STATE_CNTL: ++ case R200_EMIT_RE_POINTSIZE: ++ case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0: ++ case R200_EMIT_PP_CUBIC_FACES_0: ++ case R200_EMIT_PP_CUBIC_FACES_1: ++ case R200_EMIT_PP_CUBIC_FACES_2: ++ case R200_EMIT_PP_CUBIC_FACES_3: ++ case R200_EMIT_PP_CUBIC_FACES_4: ++ case R200_EMIT_PP_CUBIC_FACES_5: ++ case RADEON_EMIT_PP_TEX_SIZE_0: ++ case RADEON_EMIT_PP_TEX_SIZE_1: ++ case RADEON_EMIT_PP_TEX_SIZE_2: ++ case R200_EMIT_RB3D_BLENDCOLOR: ++ case R200_EMIT_TCL_POINT_SPRITE_CNTL: ++ case RADEON_EMIT_PP_CUBIC_FACES_0: ++ case RADEON_EMIT_PP_CUBIC_FACES_1: ++ case RADEON_EMIT_PP_CUBIC_FACES_2: ++ case R200_EMIT_PP_TRI_PERF_CNTL: ++ case R200_EMIT_PP_AFS_0: ++ case R200_EMIT_PP_AFS_1: ++ case R200_EMIT_ATF_TFACTOR: ++ case R200_EMIT_PP_TXCTLALL_0: ++ case R200_EMIT_PP_TXCTLALL_1: ++ case R200_EMIT_PP_TXCTLALL_2: ++ case R200_EMIT_PP_TXCTLALL_3: ++ case R200_EMIT_PP_TXCTLALL_4: ++ case R200_EMIT_PP_TXCTLALL_5: ++ case R200_EMIT_VAP_PVS_CNTL: ++ /* These packets don't contain memory offsets */ ++ break; ++ ++ default: ++ DRM_ERROR("Unknown state packet ID %d\n", id); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * ++ dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t * ++ cmdbuf, ++ unsigned int *cmdsz) ++{ ++ u32 *cmd = (u32 *) cmdbuf->buf; ++ u32 offset, narrays; ++ int count, i, k; ++ ++ *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); ++ ++ if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { ++ DRM_ERROR("Not a type 3 packet\n"); ++ return -EINVAL; ++ } ++ ++ if (4 * *cmdsz > cmdbuf->bufsz) { ++ DRM_ERROR("Packet size larger than size of data provided\n"); ++ return -EINVAL; ++ } ++ ++ switch(cmd[0] & 0xff00) { ++ /* XXX Are there old drivers needing other packets? */ ++ ++ case RADEON_3D_DRAW_IMMD: ++ case RADEON_3D_DRAW_VBUF: ++ case RADEON_3D_DRAW_INDX: ++ case RADEON_WAIT_FOR_IDLE: ++ case RADEON_CP_NOP: ++ case RADEON_3D_CLEAR_ZMASK: ++/* case RADEON_CP_NEXT_CHAR: ++ case RADEON_CP_PLY_NEXTSCAN: ++ case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */ ++ /* these packets are safe */ ++ break; ++ ++ case RADEON_CP_3D_DRAW_IMMD_2: ++ case RADEON_CP_3D_DRAW_VBUF_2: ++ case RADEON_CP_3D_DRAW_INDX_2: ++ case RADEON_3D_CLEAR_HIZ: ++ /* safe but r200 only */ ++ if ((dev_priv->chip_family < CHIP_R200) || ++ (dev_priv->chip_family > CHIP_RV280)) { ++ DRM_ERROR("Invalid 3d packet for non r200-class chip\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_3D_LOAD_VBPNTR: ++ count = (cmd[0] >> 16) & 0x3fff; ++ ++ if (count > 18) { /* 12 arrays max */ ++ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", ++ count); ++ return -EINVAL; ++ } ++ ++ /* carefully check packet contents */ ++ narrays = cmd[1] & ~0xc000; ++ k = 0; ++ i = 2; ++ while ((k < narrays) && (i < (count + 2))) { ++ i++; /* skip attribute field */ ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &cmd[i])) { ++ DRM_ERROR ++ ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ if (k == narrays) ++ break; ++ /* have one more to process, they come in pairs */ ++ if (radeon_check_and_fixup_offset(dev_priv, ++ file_priv, &cmd[i])) ++ { ++ DRM_ERROR ++ ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ } ++ /* do the counts match what we expect ? */ ++ if ((k != narrays) || (i != (count + 2))) { ++ DRM_ERROR ++ ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", ++ k, i, narrays, count + 1); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_3D_RNDR_GEN_INDX_PRIM: ++ if (dev_priv->chip_family > CHIP_RS200) { ++ DRM_ERROR("Invalid 3d packet for non-r100-class chip\n"); ++ return -EINVAL; ++ } ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { ++ DRM_ERROR("Invalid rndr_gen_indx offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_CP_INDX_BUFFER: ++ /* safe but r200 only */ ++ if ((dev_priv->chip_family < CHIP_R200) || ++ (dev_priv->chip_family > CHIP_RV280)) { ++ DRM_ERROR("Invalid 3d packet for non-r200-class chip\n"); ++ return -EINVAL; ++ } ++ if ((cmd[1] & 0x8000ffff) != 0x80000810) { ++ DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); ++ return -EINVAL; ++ } ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { ++ DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_CNTL_HOSTDATA_BLT: ++ case RADEON_CNTL_PAINT_MULTI: ++ case RADEON_CNTL_BITBLT_MULTI: ++ /* MSB of opcode: next DWORD GUI_CNTL */ ++ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL ++ | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[2] << 10; ++ if (radeon_check_and_fixup_offset ++ (dev_priv, file_priv, &offset)) { ++ DRM_ERROR("Invalid first packet offset\n"); ++ return -EINVAL; ++ } ++ cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; ++ } ++ ++ if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && ++ (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[3] << 10; ++ if (radeon_check_and_fixup_offset ++ (dev_priv, file_priv, &offset)) { ++ DRM_ERROR("Invalid second packet offset\n"); ++ return -EINVAL; ++ } ++ cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; ++ } ++ break; ++ ++ default: ++ DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * CP hardware state programming functions ++ */ ++ ++static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, ++ struct drm_clip_rect * box) ++{ ++ RING_LOCALS; ++ ++ DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n", ++ box->x1, box->y1, box->x2, box->y2); ++ ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0)); ++ OUT_RING((box->y1 << 16) | box->x1); ++ OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0)); ++ OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1)); ++ ADVANCE_RING(); ++} ++ ++/* Emit 1.1 state ++ */ ++static int radeon_emit_state(drm_radeon_private_t * dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_context_regs_t * ctx, ++ drm_radeon_texture_regs_t * tex, ++ unsigned int dirty) ++{ ++ RING_LOCALS; ++ DRM_DEBUG("dirty=0x%08x\n", dirty); ++ ++ if (dirty & RADEON_UPLOAD_CONTEXT) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &ctx->rb3d_depthoffset)) { ++ DRM_ERROR("Invalid depth buffer offset\n"); ++ return -EINVAL; ++ } ++ ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &ctx->rb3d_coloroffset)) { ++ DRM_ERROR("Invalid depth buffer offset\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(14); ++ OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6)); ++ OUT_RING(ctx->pp_misc); ++ OUT_RING(ctx->pp_fog_color); ++ OUT_RING(ctx->re_solid_color); ++ OUT_RING(ctx->rb3d_blendcntl); ++ OUT_RING(ctx->rb3d_depthoffset); ++ OUT_RING(ctx->rb3d_depthpitch); ++ OUT_RING(ctx->rb3d_zstencilcntl); ++ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2)); ++ OUT_RING(ctx->pp_cntl); ++ OUT_RING(ctx->rb3d_cntl); ++ OUT_RING(ctx->rb3d_coloroffset); ++ OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0)); ++ OUT_RING(ctx->rb3d_colorpitch); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_VERTFMT) { ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0)); ++ OUT_RING(ctx->se_coord_fmt); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_LINE) { ++ BEGIN_RING(5); ++ OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1)); ++ OUT_RING(ctx->re_line_pattern); ++ OUT_RING(ctx->re_line_state); ++ OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0)); ++ OUT_RING(ctx->se_line_width); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_BUMPMAP) { ++ BEGIN_RING(5); ++ OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0)); ++ OUT_RING(ctx->pp_lum_matrix); ++ OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1)); ++ OUT_RING(ctx->pp_rot_matrix_0); ++ OUT_RING(ctx->pp_rot_matrix_1); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_MASKS) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2)); ++ OUT_RING(ctx->rb3d_stencilrefmask); ++ OUT_RING(ctx->rb3d_ropcntl); ++ OUT_RING(ctx->rb3d_planemask); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_VIEWPORT) { ++ BEGIN_RING(7); ++ OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5)); ++ OUT_RING(ctx->se_vport_xscale); ++ OUT_RING(ctx->se_vport_xoffset); ++ OUT_RING(ctx->se_vport_yscale); ++ OUT_RING(ctx->se_vport_yoffset); ++ OUT_RING(ctx->se_vport_zscale); ++ OUT_RING(ctx->se_vport_zoffset); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_SETUP) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0)); ++ OUT_RING(ctx->se_cntl); ++ OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0)); ++ OUT_RING(ctx->se_cntl_status); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_MISC) { ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0)); ++ OUT_RING(ctx->re_misc); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_TEX0) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &tex[0].pp_txoffset)) { ++ DRM_ERROR("Invalid texture offset for unit 0\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5)); ++ OUT_RING(tex[0].pp_txfilter); ++ OUT_RING(tex[0].pp_txformat); ++ OUT_RING(tex[0].pp_txoffset); ++ OUT_RING(tex[0].pp_txcblend); ++ OUT_RING(tex[0].pp_txablend); ++ OUT_RING(tex[0].pp_tfactor); ++ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0)); ++ OUT_RING(tex[0].pp_border_color); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_TEX1) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &tex[1].pp_txoffset)) { ++ DRM_ERROR("Invalid texture offset for unit 1\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5)); ++ OUT_RING(tex[1].pp_txfilter); ++ OUT_RING(tex[1].pp_txformat); ++ OUT_RING(tex[1].pp_txoffset); ++ OUT_RING(tex[1].pp_txcblend); ++ OUT_RING(tex[1].pp_txablend); ++ OUT_RING(tex[1].pp_tfactor); ++ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0)); ++ OUT_RING(tex[1].pp_border_color); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_TEX2) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &tex[2].pp_txoffset)) { ++ DRM_ERROR("Invalid texture offset for unit 2\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5)); ++ OUT_RING(tex[2].pp_txfilter); ++ OUT_RING(tex[2].pp_txformat); ++ OUT_RING(tex[2].pp_txoffset); ++ OUT_RING(tex[2].pp_txcblend); ++ OUT_RING(tex[2].pp_txablend); ++ OUT_RING(tex[2].pp_tfactor); ++ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0)); ++ OUT_RING(tex[2].pp_border_color); ++ ADVANCE_RING(); ++ } ++ ++ return 0; ++} ++ ++/* Emit 1.2 state ++ */ ++static int radeon_emit_state2(drm_radeon_private_t * dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_state_t * state) ++{ ++ RING_LOCALS; ++ ++ if (state->dirty & RADEON_UPLOAD_ZBIAS) { ++ BEGIN_RING(3); ++ OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1)); ++ OUT_RING(state->context2.se_zbias_factor); ++ OUT_RING(state->context2.se_zbias_constant); ++ ADVANCE_RING(); ++ } ++ ++ return radeon_emit_state(dev_priv, file_priv, &state->context, ++ state->tex, state->dirty); ++} ++ ++/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in ++ * 1.3 cmdbuffers allow all previous state to be updated as well as ++ * the tcl scalar and vector areas. ++ */ ++static struct { ++ int start; ++ int len; ++ const char *name; ++} packet[RADEON_MAX_STATE_PACKETS] = { ++ {RADEON_PP_MISC, 7, "RADEON_PP_MISC"}, ++ {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"}, ++ {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"}, ++ {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"}, ++ {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"}, ++ {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"}, ++ {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"}, ++ {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"}, ++ {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"}, ++ {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"}, ++ {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"}, ++ {RADEON_RE_MISC, 1, "RADEON_RE_MISC"}, ++ {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"}, ++ {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"}, ++ {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"}, ++ {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"}, ++ {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"}, ++ {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"}, ++ {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"}, ++ {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"}, ++ {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17, ++ "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"}, ++ {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"}, ++ {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"}, ++ {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"}, ++ {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"}, ++ {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"}, ++ {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"}, ++ {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"}, ++ {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"}, ++ {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"}, ++ {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"}, ++ {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"}, ++ {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"}, ++ {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"}, ++ {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"}, ++ {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"}, ++ {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"}, ++ {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"}, ++ {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"}, ++ {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"}, ++ {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"}, ++ {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"}, ++ {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"}, ++ {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"}, ++ {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"}, ++ {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"}, ++ {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, ++ {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, ++ {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, ++ {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, ++ "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, ++ {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, ++ {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, ++ {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, ++ {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"}, ++ {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"}, ++ {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"}, ++ {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"}, ++ {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"}, ++ {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"}, ++ {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"}, ++ {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, ++ "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, ++ {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ ++ {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ ++ {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, ++ {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, ++ {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, ++ {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"}, ++ {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"}, ++ {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"}, ++ {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"}, ++ {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"}, ++ {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"}, ++ {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"}, ++ {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"}, ++ {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"}, ++ {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"}, ++ {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"}, ++ {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"}, ++ {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"}, ++ {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"}, ++ {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"}, ++ {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"}, ++ {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, ++ {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, ++ {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, ++ {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ ++ {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, ++ {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, ++ {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, ++ {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"}, ++ {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"}, ++ {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"}, ++ {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"}, ++ {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"}, ++ {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"}, ++}; ++ ++/* ================================================================ ++ * Performance monitoring functions ++ */ ++ ++static void radeon_clear_box(drm_radeon_private_t * dev_priv, ++ int x, int y, int w, int h, int r, int g, int b) ++{ ++ u32 color; ++ RING_LOCALS; ++ ++ x += dev_priv->sarea_priv->boxes[0].x1; ++ y += dev_priv->sarea_priv->boxes[0].y1; ++ ++ switch (dev_priv->color_fmt) { ++ case RADEON_COLOR_FORMAT_RGB565: ++ color = (((r & 0xf8) << 8) | ++ ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); ++ break; ++ case RADEON_COLOR_FORMAT_ARGB8888: ++ default: ++ color = (((0xff) << 24) | (r << 16) | (g << 8) | b); ++ break; ++ } ++ ++ BEGIN_RING(4); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0)); ++ OUT_RING(0xffffffff); ++ ADVANCE_RING(); ++ ++ BEGIN_RING(6); ++ ++ OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); ++ ++ if (dev_priv->sarea_priv->pfCurrentPage == 1) { ++ OUT_RING(dev_priv->front_pitch_offset); ++ } else { ++ OUT_RING(dev_priv->back_pitch_offset); ++ } ++ ++ OUT_RING(color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) ++{ ++ /* Collapse various things into a wait flag -- trying to ++ * guess if userspase slept -- better just to have them tell us. ++ */ ++ if (dev_priv->stats.last_frame_reads > 1 || ++ dev_priv->stats.last_clear_reads > dev_priv->stats.clears) { ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ } ++ ++ if (dev_priv->stats.freelist_loops) { ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ } ++ ++ /* Purple box for page flipping ++ */ ++ if (dev_priv->stats.boxes & RADEON_BOX_FLIP) ++ radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255); ++ ++ /* Red box if we have to wait for idle at any point ++ */ ++ if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) ++ radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0); ++ ++ /* Blue box: lost context? ++ */ ++ ++ /* Yellow box for texture swaps ++ */ ++ if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) ++ radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0); ++ ++ /* Green box if hardware never idles (as far as we can tell) ++ */ ++ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) ++ radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); ++ ++ /* Draw bars indicating number of buffers allocated ++ * (not a great measure, easily confused) ++ */ ++ if (dev_priv->stats.requested_bufs) { ++ if (dev_priv->stats.requested_bufs > 100) ++ dev_priv->stats.requested_bufs = 100; ++ ++ radeon_clear_box(dev_priv, 4, 16, ++ dev_priv->stats.requested_bufs, 4, ++ 196, 128, 128); ++ } ++ ++ memset(&dev_priv->stats, 0, sizeof(dev_priv->stats)); ++ ++} ++ ++/* ================================================================ ++ * CP command dispatch functions ++ */ ++ ++static void radeon_cp_dispatch_clear(struct drm_device * dev, ++ drm_radeon_clear_t * clear, ++ drm_radeon_clear_rect_t * depth_boxes) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ unsigned int flags = clear->flags; ++ u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("flags = 0x%x\n", flags); ++ ++ dev_priv->stats.clears++; ++ ++ if (dev_priv->sarea_priv->pfCurrentPage == 1) { ++ unsigned int tmp = flags; ++ ++ flags &= ~(RADEON_FRONT | RADEON_BACK); ++ if (tmp & RADEON_FRONT) ++ flags |= RADEON_BACK; ++ if (tmp & RADEON_BACK) ++ flags |= RADEON_FRONT; ++ } ++ ++ if (flags & (RADEON_FRONT | RADEON_BACK)) { ++ ++ BEGIN_RING(4); ++ ++ /* Ensure the 3D stream is idle before doing a ++ * 2D fill to clear the front or back buffer. ++ */ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ++ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0)); ++ OUT_RING(clear->color_mask); ++ ++ ADVANCE_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n", ++ x, y, w, h, flags); ++ ++ if (flags & RADEON_FRONT) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CP_PACKET3 ++ (RADEON_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv-> ++ color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_P | ++ RADEON_GMC_CLR_CMP_CNTL_DIS); ++ ++ OUT_RING(dev_priv->front_pitch_offset); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & RADEON_BACK) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CP_PACKET3 ++ (RADEON_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv-> ++ color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_P | ++ RADEON_GMC_CLR_CMP_CNTL_DIS); ++ ++ OUT_RING(dev_priv->back_pitch_offset); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ } ++ } ++ ++ /* hyper z clear */ ++ /* no docs available, based on reverse engeneering by Stephane Marchesin */ ++ if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) ++ && (flags & RADEON_CLEAR_FASTZ)) { ++ ++ int i; ++ int depthpixperline = ++ dev_priv->depth_fmt == ++ RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch / ++ 2) : (dev_priv-> ++ depth_pitch / 4); ++ ++ u32 clearmask; ++ ++ u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth | ++ ((clear->depth_mask & 0xff) << 24); ++ ++ /* Make sure we restore the 3D state next time. ++ * we haven't touched any "normal" state - still need this? ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ if ((dev_priv->flags & RADEON_HAS_HIERZ) ++ && (flags & RADEON_USE_HIERZ)) { ++ /* FIXME : reverse engineer that for Rx00 cards */ ++ /* FIXME : the mask supposedly contains low-res z values. So can't set ++ just to the max (0xff? or actually 0x3fff?), need to take z clear ++ value into account? */ ++ /* pattern seems to work for r100, though get slight ++ rendering errors with glxgears. If hierz is not enabled for r100, ++ only 4 bits which indicate clear (15,16,31,32, all zero) matter, the ++ other ones are ignored, and the same clear mask can be used. That's ++ very different behaviour than R200 which needs different clear mask ++ and different number of tiles to clear if hierz is enabled or not !?! ++ */ ++ clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f; ++ } else { ++ /* clear mask : chooses the clearing pattern. ++ rv250: could be used to clear only parts of macrotiles ++ (but that would get really complicated...)? ++ bit 0 and 1 (either or both of them ?!?!) are used to ++ not clear tile (or maybe one of the bits indicates if the tile is ++ compressed or not), bit 2 and 3 to not clear tile 1,...,. ++ Pattern is as follows: ++ | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29| ++ bits ------------------------------------------------- ++ | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31| ++ rv100: clearmask covers 2x8 4x1 tiles, but one clear still ++ covers 256 pixels ?!? ++ */ ++ clearmask = 0x0; ++ } ++ ++ BEGIN_RING(8); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE, ++ tempRB3D_DEPTHCLEARVALUE); ++ /* what offset is this exactly ? */ ++ OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0); ++ /* need ctlstat, otherwise get some strange black flickering */ ++ OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT, ++ RADEON_RB3D_ZC_FLUSH_ALL); ++ ADVANCE_RING(); ++ ++ for (i = 0; i < nbox; i++) { ++ int tileoffset, nrtilesx, nrtilesy, j; ++ /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */ ++ if ((dev_priv->flags & RADEON_HAS_HIERZ) ++ && (dev_priv->chip_family < CHIP_R200)) { ++ /* FIXME : figure this out for r200 (when hierz is enabled). Or ++ maybe r200 actually doesn't need to put the low-res z value into ++ the tile cache like r100, but just needs to clear the hi-level z-buffer? ++ Works for R100, both with hierz and without. ++ R100 seems to operate on 2x1 8x8 tiles, but... ++ odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially ++ problematic with resolutions which are not 64 pix aligned? */ ++ tileoffset = ++ ((pbox[i].y1 >> 3) * depthpixperline + ++ pbox[i].x1) >> 6; ++ nrtilesx = ++ ((pbox[i].x2 & ~63) - ++ (pbox[i].x1 & ~63)) >> 4; ++ nrtilesy = ++ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3); ++ for (j = 0; j <= nrtilesy; j++) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3 ++ (RADEON_3D_CLEAR_ZMASK, 2)); ++ /* first tile */ ++ OUT_RING(tileoffset * 8); ++ /* the number of tiles to clear */ ++ OUT_RING(nrtilesx + 4); ++ /* clear mask : chooses the clearing pattern. */ ++ OUT_RING(clearmask); ++ ADVANCE_RING(); ++ tileoffset += depthpixperline >> 6; ++ } ++ } else if ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280)) { ++ /* works for rv250. */ ++ /* find first macro tile (8x2 4x4 z-pixels on rv250) */ ++ tileoffset = ++ ((pbox[i].y1 >> 3) * depthpixperline + ++ pbox[i].x1) >> 5; ++ nrtilesx = ++ (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5); ++ nrtilesy = ++ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3); ++ for (j = 0; j <= nrtilesy; j++) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3 ++ (RADEON_3D_CLEAR_ZMASK, 2)); ++ /* first tile */ ++ /* judging by the first tile offset needed, could possibly ++ directly address/clear 4x4 tiles instead of 8x2 * 4x4 ++ macro tiles, though would still need clear mask for ++ right/bottom if truely 4x4 granularity is desired ? */ ++ OUT_RING(tileoffset * 16); ++ /* the number of tiles to clear */ ++ OUT_RING(nrtilesx + 1); ++ /* clear mask : chooses the clearing pattern. */ ++ OUT_RING(clearmask); ++ ADVANCE_RING(); ++ tileoffset += depthpixperline >> 5; ++ } ++ } else { /* rv 100 */ ++ /* rv100 might not need 64 pix alignment, who knows */ ++ /* offsets are, hmm, weird */ ++ tileoffset = ++ ((pbox[i].y1 >> 4) * depthpixperline + ++ pbox[i].x1) >> 6; ++ nrtilesx = ++ ((pbox[i].x2 & ~63) - ++ (pbox[i].x1 & ~63)) >> 4; ++ nrtilesy = ++ (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4); ++ for (j = 0; j <= nrtilesy; j++) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3 ++ (RADEON_3D_CLEAR_ZMASK, 2)); ++ OUT_RING(tileoffset * 128); ++ /* the number of tiles to clear */ ++ OUT_RING(nrtilesx + 4); ++ /* clear mask : chooses the clearing pattern. */ ++ OUT_RING(clearmask); ++ ADVANCE_RING(); ++ tileoffset += depthpixperline >> 6; ++ } ++ } ++ } ++ ++ /* TODO don't always clear all hi-level z tiles */ ++ if ((dev_priv->flags & RADEON_HAS_HIERZ) ++ && ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280)) ++ && (flags & RADEON_USE_HIERZ)) ++ /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */ ++ /* FIXME : the mask supposedly contains low-res z values. So can't set ++ just to the max (0xff? or actually 0x3fff?), need to take z clear ++ value into account? */ ++ { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2)); ++ OUT_RING(0x0); /* First tile */ ++ OUT_RING(0x3cc0); ++ OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f); ++ ADVANCE_RING(); ++ } ++ } ++ ++ /* We have to clear the depth and/or stencil buffers by ++ * rendering a quad into just those buffers. Thus, we have to ++ * make sure the 3D engine is configured correctly. ++ */ ++ else if ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280) && ++ (flags & (RADEON_DEPTH | RADEON_STENCIL))) { ++ ++ int tempPP_CNTL; ++ int tempRE_CNTL; ++ int tempRB3D_CNTL; ++ int tempRB3D_ZSTENCILCNTL; ++ int tempRB3D_STENCILREFMASK; ++ int tempRB3D_PLANEMASK; ++ int tempSE_CNTL; ++ int tempSE_VTE_CNTL; ++ int tempSE_VTX_FMT_0; ++ int tempSE_VTX_FMT_1; ++ int tempSE_VAP_CNTL; ++ int tempRE_AUX_SCISSOR_CNTL; ++ ++ tempPP_CNTL = 0; ++ tempRE_CNTL = 0; ++ ++ tempRB3D_CNTL = depth_clear->rb3d_cntl; ++ ++ tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; ++ tempRB3D_STENCILREFMASK = 0x0; ++ ++ tempSE_CNTL = depth_clear->se_cntl; ++ ++ /* Disable TCL */ ++ ++ tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */ ++ (0x9 << ++ SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT)); ++ ++ tempRB3D_PLANEMASK = 0x0; ++ ++ tempRE_AUX_SCISSOR_CNTL = 0x0; ++ ++ tempSE_VTE_CNTL = ++ SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK; ++ ++ /* Vertex format (X, Y, Z, W) */ ++ tempSE_VTX_FMT_0 = ++ SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK | ++ SE_VTX_FMT_0__VTX_W0_PRESENT_MASK; ++ tempSE_VTX_FMT_1 = 0x0; ++ ++ /* ++ * Depth buffer specific enables ++ */ ++ if (flags & RADEON_DEPTH) { ++ /* Enable depth buffer */ ++ tempRB3D_CNTL |= RADEON_Z_ENABLE; ++ } else { ++ /* Disable depth buffer */ ++ tempRB3D_CNTL &= ~RADEON_Z_ENABLE; ++ } ++ ++ /* ++ * Stencil buffer specific enables ++ */ ++ if (flags & RADEON_STENCIL) { ++ tempRB3D_CNTL |= RADEON_STENCIL_ENABLE; ++ tempRB3D_STENCILREFMASK = clear->depth_mask; ++ } else { ++ tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE; ++ tempRB3D_STENCILREFMASK = 0x00000000; ++ } ++ ++ if (flags & RADEON_USE_COMP_ZBUF) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE | ++ RADEON_Z_DECOMPRESSION_ENABLE; ++ } ++ if (flags & RADEON_USE_HIERZ) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE; ++ } ++ ++ BEGIN_RING(26); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ++ OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL); ++ OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL); ++ OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL); ++ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL); ++ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, ++ tempRB3D_STENCILREFMASK); ++ OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK); ++ OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL); ++ OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL); ++ OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0); ++ OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1); ++ OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL); ++ OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL); ++ ADVANCE_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ for (i = 0; i < nbox; i++) { ++ ++ /* Funny that this should be required -- ++ * sets top-left? ++ */ ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ ++ BEGIN_RING(14); ++ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12)); ++ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST | ++ RADEON_PRIM_WALK_RING | ++ (3 << RADEON_NUM_VERTICES_SHIFT))); ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x3f800000); ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x3f800000); ++ OUT_RING(depth_boxes[i].ui[CLEAR_X2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x3f800000); ++ ADVANCE_RING(); ++ } ++ } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) { ++ ++ int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; ++ ++ rb3d_cntl = depth_clear->rb3d_cntl; ++ ++ if (flags & RADEON_DEPTH) { ++ rb3d_cntl |= RADEON_Z_ENABLE; ++ } else { ++ rb3d_cntl &= ~RADEON_Z_ENABLE; ++ } ++ ++ if (flags & RADEON_STENCIL) { ++ rb3d_cntl |= RADEON_STENCIL_ENABLE; ++ rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */ ++ } else { ++ rb3d_cntl &= ~RADEON_STENCIL_ENABLE; ++ rb3d_stencilrefmask = 0x00000000; ++ } ++ ++ if (flags & RADEON_USE_COMP_ZBUF) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE | ++ RADEON_Z_DECOMPRESSION_ENABLE; ++ } ++ if (flags & RADEON_USE_HIERZ) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE; ++ } ++ ++ BEGIN_RING(13); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ++ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1)); ++ OUT_RING(0x00000000); ++ OUT_RING(rb3d_cntl); ++ ++ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL); ++ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask); ++ OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000); ++ OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl); ++ ADVANCE_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ for (i = 0; i < nbox; i++) { ++ ++ /* Funny that this should be required -- ++ * sets top-left? ++ */ ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ ++ BEGIN_RING(15); ++ ++ OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13)); ++ OUT_RING(RADEON_VTX_Z_PRESENT | ++ RADEON_VTX_PKCOLOR_PRESENT); ++ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST | ++ RADEON_PRIM_WALK_RING | ++ RADEON_MAOS_ENABLE | ++ RADEON_VTX_FMT_RADEON_MODE | ++ (3 << RADEON_NUM_VERTICES_SHIFT))); ++ ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x0); ++ ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x0); ++ ++ OUT_RING(depth_boxes[i].ui[CLEAR_X2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x0); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ /* Increment the clear counter. The client-side 3D driver must ++ * wait on this value before performing the clear ioctl. We ++ * need this because the card's so damned fast... ++ */ ++ dev_priv->sarea_priv->last_clear++; ++ ++ BEGIN_RING(4); ++ ++ RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_cp_dispatch_swap(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ /* Do some trivial performance monitoring... ++ */ ++ if (dev_priv->do_boxes) ++ radeon_cp_performance_boxes(dev_priv); ++ ++ /* Wait for the 3D stream to idle before dispatching the bitblt. ++ * This will prevent data corruption between the two streams. ++ */ ++ BEGIN_RING(2); ++ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ++ ADVANCE_RING(); ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h); ++ ++ BEGIN_RING(9); ++ ++ OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0)); ++ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | ++ RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_NONE | ++ (dev_priv->color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_S | ++ RADEON_DP_SRC_SOURCE_MEMORY | ++ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); ++ ++ /* Make this work even if front & back are flipped: ++ */ ++ OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); ++ if (dev_priv->sarea_priv->pfCurrentPage == 0) { ++ OUT_RING(dev_priv->back_pitch_offset); ++ OUT_RING(dev_priv->front_pitch_offset); ++ } else { ++ OUT_RING(dev_priv->front_pitch_offset); ++ OUT_RING(dev_priv->back_pitch_offset); ++ } ++ ++ OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2)); ++ OUT_RING((x << 16) | y); ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ ++ BEGIN_RING(4); ++ ++ RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_cp_dispatch_flip(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; ++ int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) ++ ? dev_priv->front_offset : dev_priv->back_offset; ++ RING_LOCALS; ++ DRM_DEBUG("pfCurrentPage=%d\n", ++ dev_priv->sarea_priv->pfCurrentPage); ++ ++ /* Do some trivial performance monitoring... ++ */ ++ if (dev_priv->do_boxes) { ++ dev_priv->stats.boxes |= RADEON_BOX_FLIP; ++ radeon_cp_performance_boxes(dev_priv); ++ } ++ ++ /* Update the frame offsets for both CRTCs ++ */ ++ BEGIN_RING(6); ++ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ OUT_RING_REG(RADEON_CRTC_OFFSET, ++ ((sarea->frame.y * dev_priv->front_pitch + ++ sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) ++ + offset); ++ OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base ++ + offset); ++ ++ ADVANCE_RING(); ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ dev_priv->sarea_priv->pfCurrentPage = ++ 1 - dev_priv->sarea_priv->pfCurrentPage; ++ ++ BEGIN_RING(2); ++ ++ RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); ++ ++ ADVANCE_RING(); ++} ++ ++static int bad_prim_vertex_nr(int primitive, int nr) ++{ ++ switch (primitive & RADEON_PRIM_TYPE_MASK) { ++ case RADEON_PRIM_TYPE_NONE: ++ case RADEON_PRIM_TYPE_POINT: ++ return nr < 1; ++ case RADEON_PRIM_TYPE_LINE: ++ return (nr & 1) || nr == 0; ++ case RADEON_PRIM_TYPE_LINE_STRIP: ++ return nr < 2; ++ case RADEON_PRIM_TYPE_TRI_LIST: ++ case RADEON_PRIM_TYPE_3VRT_POINT_LIST: ++ case RADEON_PRIM_TYPE_3VRT_LINE_LIST: ++ case RADEON_PRIM_TYPE_RECT_LIST: ++ return nr % 3 || nr == 0; ++ case RADEON_PRIM_TYPE_TRI_FAN: ++ case RADEON_PRIM_TYPE_TRI_STRIP: ++ return nr < 3; ++ default: ++ return 1; ++ } ++} ++ ++typedef struct { ++ unsigned int start; ++ unsigned int finish; ++ unsigned int prim; ++ unsigned int numverts; ++ unsigned int offset; ++ unsigned int vc_format; ++} drm_radeon_tcl_prim_t; ++ ++static void radeon_cp_dispatch_vertex(struct drm_device * dev, ++ struct drm_buf * buf, ++ drm_radeon_tcl_prim_t * prim) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; ++ int numverts = (int)prim->numverts; ++ int nbox = sarea_priv->nbox; ++ int i = 0; ++ RING_LOCALS; ++ ++ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n", ++ prim->prim, ++ prim->vc_format, prim->start, prim->finish, prim->numverts); ++ ++ if (bad_prim_vertex_nr(prim->prim, prim->numverts)) { ++ DRM_ERROR("bad prim %x numverts %d\n", ++ prim->prim, prim->numverts); ++ return; ++ } ++ ++ do { ++ /* Emit the next cliprect */ ++ if (i < nbox) { ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ } ++ ++ /* Emit the vertex buffer rendering commands */ ++ BEGIN_RING(5); ++ ++ OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3)); ++ OUT_RING(offset); ++ OUT_RING(numverts); ++ OUT_RING(prim->vc_format); ++ OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST | ++ RADEON_COLOR_ORDER_RGBA | ++ RADEON_VTX_FMT_RADEON_MODE | ++ (numverts << RADEON_NUM_VERTICES_SHIFT)); ++ ++ ADVANCE_RING(); ++ ++ i++; ++ } while (i < nbox); ++} ++ ++static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv = buf->dev_private; ++ RING_LOCALS; ++ ++ buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ RADEON_DISPATCH_AGE(buf_priv->age); ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ buf->used = 0; ++} ++ ++static void radeon_cp_dispatch_indirect(struct drm_device * dev, ++ struct drm_buf * buf, int start, int end) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); ++ ++ if (start != end) { ++ int offset = (dev_priv->gart_buffers_offset ++ + buf->offset + start); ++ int dwords = (end - start + 3) / sizeof(u32); ++ ++ /* Indirect buffer data must be an even number of ++ * dwords, so if we've been given an odd number we must ++ * pad the data with a Type-2 CP packet. ++ */ ++ if (dwords & 1) { ++ u32 *data = (u32 *) ++ ((char *)dev->agp_buffer_map->handle ++ + buf->offset + start); ++ data[dwords++] = RADEON_CP_PACKET2; ++ } ++ ++ /* Fire off the indirect buffer */ ++ BEGIN_RING(3); ++ ++ OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1)); ++ OUT_RING(offset); ++ OUT_RING(dwords); ++ ++ ADVANCE_RING(); ++ } ++} ++ ++static void radeon_cp_dispatch_indices(struct drm_device * dev, ++ struct drm_buf * elt_buf, ++ drm_radeon_tcl_prim_t * prim) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int offset = dev_priv->gart_buffers_offset + prim->offset; ++ u32 *data; ++ int dwords; ++ int i = 0; ++ int start = prim->start + RADEON_INDEX_PRIM_OFFSET; ++ int count = (prim->finish - start) / sizeof(u16); ++ int nbox = sarea_priv->nbox; ++ ++ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n", ++ prim->prim, ++ prim->vc_format, ++ prim->start, prim->finish, prim->offset, prim->numverts); ++ ++ if (bad_prim_vertex_nr(prim->prim, count)) { ++ DRM_ERROR("bad prim %x count %d\n", prim->prim, count); ++ return; ++ } ++ ++ if (start >= prim->finish || (prim->start & 0x7)) { ++ DRM_ERROR("buffer prim %d\n", prim->prim); ++ return; ++ } ++ ++ dwords = (prim->finish - prim->start + 3) / sizeof(u32); ++ ++ data = (u32 *) ((char *)dev->agp_buffer_map->handle + ++ elt_buf->offset + prim->start); ++ ++ data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2); ++ data[1] = offset; ++ data[2] = prim->numverts; ++ data[3] = prim->vc_format; ++ data[4] = (prim->prim | ++ RADEON_PRIM_WALK_IND | ++ RADEON_COLOR_ORDER_RGBA | ++ RADEON_VTX_FMT_RADEON_MODE | ++ (count << RADEON_NUM_VERTICES_SHIFT)); ++ ++ do { ++ if (i < nbox) ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ ++ radeon_cp_dispatch_indirect(dev, elt_buf, ++ prim->start, prim->finish); ++ ++ i++; ++ } while (i < nbox); ++ ++} ++ ++#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE ++ ++static int radeon_cp_dispatch_texture(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_radeon_texture_t * tex, ++ drm_radeon_tex_image_t * image) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_buf *buf; ++ u32 format; ++ u32 *buffer; ++ const u8 __user *data; ++ int size, dwords, tex_width, blit_width, spitch; ++ u32 height; ++ int i; ++ u32 texpitch, microtile; ++ u32 offset, byte_offset; ++ RING_LOCALS; ++ ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) { ++ DRM_ERROR("Invalid destination offset\n"); ++ return -EINVAL; ++ } ++ ++ dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; ++ ++ /* Flush the pixel cache. This ensures no pixel data gets mixed ++ * up with the texture data from the host data blit, otherwise ++ * part of the texture image may be corrupted. ++ */ ++ BEGIN_RING(4); ++ RADEON_FLUSH_CACHE(); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ADVANCE_RING(); ++ ++ /* The compiler won't optimize away a division by a variable, ++ * even if the only legal values are powers of two. Thus, we'll ++ * use a shift instead. ++ */ ++ switch (tex->format) { ++ case RADEON_TXFORMAT_ARGB8888: ++ case RADEON_TXFORMAT_RGBA8888: ++ format = RADEON_COLOR_FORMAT_ARGB8888; ++ tex_width = tex->width * 4; ++ blit_width = image->width * 4; ++ break; ++ case RADEON_TXFORMAT_AI88: ++ case RADEON_TXFORMAT_ARGB1555: ++ case RADEON_TXFORMAT_RGB565: ++ case RADEON_TXFORMAT_ARGB4444: ++ case RADEON_TXFORMAT_VYUY422: ++ case RADEON_TXFORMAT_YVYU422: ++ format = RADEON_COLOR_FORMAT_RGB565; ++ tex_width = tex->width * 2; ++ blit_width = image->width * 2; ++ break; ++ case RADEON_TXFORMAT_I8: ++ case RADEON_TXFORMAT_RGB332: ++ format = RADEON_COLOR_FORMAT_CI8; ++ tex_width = tex->width * 1; ++ blit_width = image->width * 1; ++ break; ++ default: ++ DRM_ERROR("invalid texture format %d\n", tex->format); ++ return -EINVAL; ++ } ++ spitch = blit_width >> 6; ++ if (spitch == 0 && image->height > 1) ++ return -EINVAL; ++ ++ texpitch = tex->pitch; ++ if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { ++ microtile = 1; ++ if (tex_width < 64) { ++ texpitch &= ~(RADEON_DST_TILE_MICRO >> 22); ++ /* we got tiled coordinates, untile them */ ++ image->x *= 2; ++ } ++ } else ++ microtile = 0; ++ ++ /* this might fail for zero-sized uploads - are those illegal? */ ++ if (!radeon_check_offset(dev_priv, tex->offset + image->height * ++ blit_width - 1)) { ++ DRM_ERROR("Invalid final destination offset\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width); ++ ++ do { ++ DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", ++ tex->offset >> 10, tex->pitch, tex->format, ++ image->x, image->y, image->width, image->height); ++ ++ /* Make a copy of some parameters in case we have to ++ * update them for a multi-pass texture blit. ++ */ ++ height = image->height; ++ data = (const u8 __user *)image->data; ++ ++ size = height * blit_width; ++ ++ if (size > RADEON_MAX_TEXTURE_SIZE) { ++ height = RADEON_MAX_TEXTURE_SIZE / blit_width; ++ size = height * blit_width; ++ } else if (size < 4 && size > 0) { ++ size = 4; ++ } else if (size == 0) { ++ return 0; ++ } ++ ++ buf = radeon_freelist_get(dev); ++ if (0 && !buf) { ++ radeon_do_cp_idle(dev_priv); ++ buf = radeon_freelist_get(dev); ++ } ++ if (!buf) { ++ DRM_DEBUG("EAGAIN\n"); ++ if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) ++ return -EFAULT; ++ return -EAGAIN; ++ } ++ ++ /* Dispatch the indirect buffer. ++ */ ++ buffer = ++ (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); ++ dwords = size / 4; ++ ++#define RADEON_COPY_MT(_buf, _data, _width) \ ++ do { \ ++ if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ ++ DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ ++ return -EFAULT; \ ++ } \ ++ } while(0) ++ ++ if (microtile) { ++ /* texture micro tiling in use, minimum texture width is thus 16 bytes. ++ however, we cannot use blitter directly for texture width < 64 bytes, ++ since minimum tex pitch is 64 bytes and we need this to match ++ the texture width, otherwise the blitter will tile it wrong. ++ Thus, tiling manually in this case. Additionally, need to special ++ case tex height = 1, since our actual image will have height 2 ++ and we need to ensure we don't read beyond the texture size ++ from user space. */ ++ if (tex->height == 1) { ++ if (tex_width >= 64 || tex_width <= 16) { ++ RADEON_COPY_MT(buffer, data, ++ (int)(tex_width * sizeof(u32))); ++ } else if (tex_width == 32) { ++ RADEON_COPY_MT(buffer, data, 16); ++ RADEON_COPY_MT(buffer + 8, ++ data + 16, 16); ++ } ++ } else if (tex_width >= 64 || tex_width == 16) { ++ RADEON_COPY_MT(buffer, data, ++ (int)(dwords * sizeof(u32))); ++ } else if (tex_width < 16) { ++ for (i = 0; i < tex->height; i++) { ++ RADEON_COPY_MT(buffer, data, tex_width); ++ buffer += 4; ++ data += tex_width; ++ } ++ } else if (tex_width == 32) { ++ /* TODO: make sure this works when not fitting in one buffer ++ (i.e. 32bytes x 2048...) */ ++ for (i = 0; i < tex->height; i += 2) { ++ RADEON_COPY_MT(buffer, data, 16); ++ data += 16; ++ RADEON_COPY_MT(buffer + 8, data, 16); ++ data += 16; ++ RADEON_COPY_MT(buffer + 4, data, 16); ++ data += 16; ++ RADEON_COPY_MT(buffer + 12, data, 16); ++ data += 16; ++ buffer += 16; ++ } ++ } ++ } else { ++ if (tex_width >= 32) { ++ /* Texture image width is larger than the minimum, so we ++ * can upload it directly. ++ */ ++ RADEON_COPY_MT(buffer, data, ++ (int)(dwords * sizeof(u32))); ++ } else { ++ /* Texture image width is less than the minimum, so we ++ * need to pad out each image scanline to the minimum ++ * width. ++ */ ++ for (i = 0; i < tex->height; i++) { ++ RADEON_COPY_MT(buffer, data, tex_width); ++ buffer += 8; ++ data += tex_width; ++ } ++ } ++ } ++ ++#undef RADEON_COPY_MT ++ byte_offset = (image->y & ~2047) * blit_width; ++ buf->file_priv = file_priv; ++ buf->used = size; ++ offset = dev_priv->gart_buffers_offset + buf->offset; ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | ++ RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_NONE | ++ (format << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_S | ++ RADEON_DP_SRC_SOURCE_MEMORY | ++ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); ++ OUT_RING((spitch << 22) | (offset >> 10)); ++ OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10))); ++ OUT_RING(0); ++ OUT_RING((image->x << 16) | (image->y % 2048)); ++ OUT_RING((image->width << 16) | height); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ radeon_cp_discard_buffer(dev, buf); ++ ++ /* Update the input parameters for next time */ ++ image->y += height; ++ image->height -= height; ++ image->data = (const u8 __user *)image->data + size; ++ } while (image->height > 0); ++ ++ /* Flush the pixel cache after the blit completes. This ensures ++ * the texture data is written out to memory before rendering ++ * continues. ++ */ ++ BEGIN_RING(4); ++ RADEON_FLUSH_CACHE(); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ return 0; ++} ++ ++static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(35); ++ ++ OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0)); ++ OUT_RING(0x00000000); ++ ++ OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31)); ++ for (i = 0; i < 32; i++) { ++ OUT_RING(stipple[i]); ++ } ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_apply_surface_regs(int surf_index, ++ drm_radeon_private_t *dev_priv) ++{ ++ if (!dev_priv->mmio) ++ return; ++ ++ radeon_do_cp_idle(dev_priv); ++ ++ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index, ++ dev_priv->surfaces[surf_index].flags); ++ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index, ++ dev_priv->surfaces[surf_index].lower); ++ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index, ++ dev_priv->surfaces[surf_index].upper); ++} ++ ++/* Allocates a virtual surface ++ * doesn't always allocate a real surface, will stretch an existing ++ * surface when possible. ++ * ++ * Note that refcount can be at most 2, since during a free refcount=3 ++ * might mean we have to allocate a new surface which might not always ++ * be available. ++ * For example : we allocate three contigous surfaces ABC. If B is ++ * freed, we suddenly need two surfaces to store A and C, which might ++ * not always be available. ++ */ ++static int alloc_surface(drm_radeon_surface_alloc_t *new, ++ drm_radeon_private_t *dev_priv, ++ struct drm_file *file_priv) ++{ ++ struct radeon_virt_surface *s; ++ int i; ++ int virt_surface_index; ++ uint32_t new_upper, new_lower; ++ ++ new_lower = new->address; ++ new_upper = new_lower + new->size - 1; ++ ++ /* sanity check */ ++ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) || ++ ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != ++ RADEON_SURF_ADDRESS_FIXED_MASK) ++ || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0)) ++ return -1; ++ ++ /* make sure there is no overlap with existing surfaces */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ if ((dev_priv->surfaces[i].refcount != 0) && ++ (((new_lower >= dev_priv->surfaces[i].lower) && ++ (new_lower < dev_priv->surfaces[i].upper)) || ++ ((new_lower < dev_priv->surfaces[i].lower) && ++ (new_upper > dev_priv->surfaces[i].lower)))) { ++ return -1; ++ } ++ } ++ ++ /* find a virtual surface */ ++ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) ++ if (dev_priv->virt_surfaces[i].file_priv == 0) ++ break; ++ if (i == 2 * RADEON_MAX_SURFACES) { ++ return -1; ++ } ++ virt_surface_index = i; ++ ++ /* try to reuse an existing surface */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ /* extend before */ ++ if ((dev_priv->surfaces[i].refcount == 1) && ++ (new->flags == dev_priv->surfaces[i].flags) && ++ (new_upper + 1 == dev_priv->surfaces[i].lower)) { ++ s = &(dev_priv->virt_surfaces[virt_surface_index]); ++ s->surface_index = i; ++ s->lower = new_lower; ++ s->upper = new_upper; ++ s->flags = new->flags; ++ s->file_priv = file_priv; ++ dev_priv->surfaces[i].refcount++; ++ dev_priv->surfaces[i].lower = s->lower; ++ radeon_apply_surface_regs(s->surface_index, dev_priv); ++ return virt_surface_index; ++ } ++ ++ /* extend after */ ++ if ((dev_priv->surfaces[i].refcount == 1) && ++ (new->flags == dev_priv->surfaces[i].flags) && ++ (new_lower == dev_priv->surfaces[i].upper + 1)) { ++ s = &(dev_priv->virt_surfaces[virt_surface_index]); ++ s->surface_index = i; ++ s->lower = new_lower; ++ s->upper = new_upper; ++ s->flags = new->flags; ++ s->file_priv = file_priv; ++ dev_priv->surfaces[i].refcount++; ++ dev_priv->surfaces[i].upper = s->upper; ++ radeon_apply_surface_regs(s->surface_index, dev_priv); ++ return virt_surface_index; ++ } ++ } ++ ++ /* okay, we need a new one */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ if (dev_priv->surfaces[i].refcount == 0) { ++ s = &(dev_priv->virt_surfaces[virt_surface_index]); ++ s->surface_index = i; ++ s->lower = new_lower; ++ s->upper = new_upper; ++ s->flags = new->flags; ++ s->file_priv = file_priv; ++ dev_priv->surfaces[i].refcount = 1; ++ dev_priv->surfaces[i].lower = s->lower; ++ dev_priv->surfaces[i].upper = s->upper; ++ dev_priv->surfaces[i].flags = s->flags; ++ radeon_apply_surface_regs(s->surface_index, dev_priv); ++ return virt_surface_index; ++ } ++ } ++ ++ /* we didn't find anything */ ++ return -1; ++} ++ ++static int free_surface(struct drm_file *file_priv, ++ drm_radeon_private_t * dev_priv, ++ int lower) ++{ ++ struct radeon_virt_surface *s; ++ int i; ++ /* find the virtual surface */ ++ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { ++ s = &(dev_priv->virt_surfaces[i]); ++ if (s->file_priv) { ++ if ((lower == s->lower) && (file_priv == s->file_priv)) ++ { ++ if (dev_priv->surfaces[s->surface_index]. ++ lower == s->lower) ++ dev_priv->surfaces[s->surface_index]. ++ lower = s->upper; ++ ++ if (dev_priv->surfaces[s->surface_index]. ++ upper == s->upper) ++ dev_priv->surfaces[s->surface_index]. ++ upper = s->lower; ++ ++ dev_priv->surfaces[s->surface_index].refcount--; ++ if (dev_priv->surfaces[s->surface_index]. ++ refcount == 0) ++ dev_priv->surfaces[s->surface_index]. ++ flags = 0; ++ s->file_priv = NULL; ++ radeon_apply_surface_regs(s->surface_index, ++ dev_priv); ++ return 0; ++ } ++ } ++ } ++ return 1; ++} ++ ++static void radeon_surfaces_release(struct drm_file *file_priv, ++ drm_radeon_private_t * dev_priv) ++{ ++ int i; ++ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { ++ if (dev_priv->virt_surfaces[i].file_priv == file_priv) ++ free_surface(file_priv, dev_priv, ++ dev_priv->virt_surfaces[i].lower); ++ } ++} ++ ++/* ================================================================ ++ * IOCTL functions ++ */ ++static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_surface_alloc_t *alloc = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (alloc_surface(alloc, dev_priv, file_priv) == -1) ++ return -EINVAL; ++ else ++ return 0; ++} ++ ++static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_surface_free_t *memfree = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (free_surface(file_priv, dev_priv, memfree->address)) ++ return -EINVAL; ++ else ++ return 0; ++} ++ ++static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_radeon_clear_t *clear = data; ++ drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; ++ ++ if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, ++ sarea_priv->nbox * sizeof(depth_boxes[0]))) ++ return -EFAULT; ++ ++ radeon_cp_dispatch_clear(dev, clear, depth_boxes); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++/* Not sure why this isn't set all the time: ++ */ ++static int radeon_do_init_pageflip(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(6); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0)); ++ OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) | ++ RADEON_CRTC_OFFSET_FLIP_CNTL); ++ OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0)); ++ OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) | ++ RADEON_CRTC_OFFSET_FLIP_CNTL); ++ ADVANCE_RING(); ++ ++ dev_priv->page_flipping = 1; ++ ++ if (dev_priv->sarea_priv->pfCurrentPage != 1) ++ dev_priv->sarea_priv->pfCurrentPage = 0; ++ ++ return 0; ++} ++ ++/* Swapping and flipping are different operations, need different ioctls. ++ * They can & should be intermixed to support multiple 3d windows. ++ */ ++static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (!dev_priv->page_flipping) ++ radeon_do_init_pageflip(dev); ++ ++ radeon_cp_dispatch_flip(dev); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; ++ ++ radeon_cp_dispatch_swap(dev); ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_vertex_t *vertex = data; ++ drm_radeon_tcl_prim_t prim; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv = dev_priv->sarea_priv; ++ ++ DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", ++ DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ vertex->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { ++ DRM_ERROR("buffer prim %d\n", vertex->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[vertex->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", vertex->idx); ++ return -EINVAL; ++ } ++ ++ /* Build up a prim_t record: ++ */ ++ if (vertex->count) { ++ buf->used = vertex->count; /* not used? */ ++ ++ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { ++ if (radeon_emit_state(dev_priv, file_priv, ++ &sarea_priv->context_state, ++ sarea_priv->tex_state, ++ sarea_priv->dirty)) { ++ DRM_ERROR("radeon_emit_state failed\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | ++ RADEON_UPLOAD_TEX1IMAGES | ++ RADEON_UPLOAD_TEX2IMAGES | ++ RADEON_REQUIRE_QUIESCENCE); ++ } ++ ++ prim.start = 0; ++ prim.finish = vertex->count; /* unused */ ++ prim.prim = vertex->prim; ++ prim.numverts = vertex->count; ++ prim.vc_format = dev_priv->sarea_priv->vc_format; ++ ++ radeon_cp_dispatch_vertex(dev, buf, &prim); ++ } ++ ++ if (vertex->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_indices_t *elts = data; ++ drm_radeon_tcl_prim_t prim; ++ int count; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ sarea_priv = dev_priv->sarea_priv; ++ ++ DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", ++ DRM_CURRENTPID, elts->idx, elts->start, elts->end, ++ elts->discard); ++ ++ if (elts->idx < 0 || elts->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ elts->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { ++ DRM_ERROR("buffer prim %d\n", elts->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[elts->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", elts->idx); ++ return -EINVAL; ++ } ++ ++ count = (elts->end - elts->start) / sizeof(u16); ++ elts->start -= RADEON_INDEX_PRIM_OFFSET; ++ ++ if (elts->start & 0x7) { ++ DRM_ERROR("misaligned buffer 0x%x\n", elts->start); ++ return -EINVAL; ++ } ++ if (elts->start < buf->used) { ++ DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); ++ return -EINVAL; ++ } ++ ++ buf->used = elts->end; ++ ++ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { ++ if (radeon_emit_state(dev_priv, file_priv, ++ &sarea_priv->context_state, ++ sarea_priv->tex_state, ++ sarea_priv->dirty)) { ++ DRM_ERROR("radeon_emit_state failed\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | ++ RADEON_UPLOAD_TEX1IMAGES | ++ RADEON_UPLOAD_TEX2IMAGES | ++ RADEON_REQUIRE_QUIESCENCE); ++ } ++ ++ /* Build up a prim_t record: ++ */ ++ prim.start = elts->start; ++ prim.finish = elts->end; ++ prim.prim = elts->prim; ++ prim.offset = 0; /* offset from start of dma buffers */ ++ prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ ++ prim.vc_format = dev_priv->sarea_priv->vc_format; ++ ++ radeon_cp_dispatch_indices(dev, buf, &prim); ++ if (elts->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_texture_t *tex = data; ++ drm_radeon_tex_image_t image; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (tex->image == NULL) { ++ DRM_ERROR("null texture image!\n"); ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_FROM_USER(&image, ++ (drm_radeon_tex_image_t __user *) tex->image, ++ sizeof(image))) ++ return -EFAULT; ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image); ++ ++ return ret; ++} ++ ++static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_stipple_t *stipple = data; ++ u32 mask[32]; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) ++ return -EFAULT; ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ radeon_cp_dispatch_stipple(dev, mask); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_indirect_t *indirect = data; ++ RING_LOCALS; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", ++ indirect->idx, indirect->start, indirect->end, ++ indirect->discard); ++ ++ if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ indirect->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ buf = dma->buflist[indirect->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", indirect->idx); ++ return -EINVAL; ++ } ++ ++ if (indirect->start < buf->used) { ++ DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", ++ indirect->start, buf->used); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf->used = indirect->end; ++ ++ /* Wait for the 3D stream to idle before the indirect buffer ++ * containing 2D acceleration commands is processed. ++ */ ++ BEGIN_RING(2); ++ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ++ ADVANCE_RING(); ++ ++ /* Dispatch the indirect buffer full of commands from the ++ * X server. This is insecure and is thus only available to ++ * privileged clients. ++ */ ++ radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); ++ if (indirect->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_vertex2_t *vertex = data; ++ int i; ++ unsigned char laststate; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv = dev_priv->sarea_priv; ++ ++ DRM_DEBUG("pid=%d index=%d discard=%d\n", ++ DRM_CURRENTPID, vertex->idx, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ vertex->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[vertex->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", vertex->idx); ++ return -EINVAL; ++ } ++ ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) ++ return -EINVAL; ++ ++ for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) { ++ drm_radeon_prim_t prim; ++ drm_radeon_tcl_prim_t tclprim; ++ ++ if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim))) ++ return -EFAULT; ++ ++ if (prim.stateidx != laststate) { ++ drm_radeon_state_t state; ++ ++ if (DRM_COPY_FROM_USER(&state, ++ &vertex->state[prim.stateidx], ++ sizeof(state))) ++ return -EFAULT; ++ ++ if (radeon_emit_state2(dev_priv, file_priv, &state)) { ++ DRM_ERROR("radeon_emit_state2 failed\n"); ++ return -EINVAL; ++ } ++ ++ laststate = prim.stateidx; ++ } ++ ++ tclprim.start = prim.start; ++ tclprim.finish = prim.finish; ++ tclprim.prim = prim.prim; ++ tclprim.vc_format = prim.vc_format; ++ ++ if (prim.prim & RADEON_PRIM_WALK_IND) { ++ tclprim.offset = prim.numverts * 64; ++ tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ ++ ++ radeon_cp_dispatch_indices(dev, buf, &tclprim); ++ } else { ++ tclprim.numverts = prim.numverts; ++ tclprim.offset = 0; /* not used */ ++ ++ radeon_cp_dispatch_vertex(dev, buf, &tclprim); ++ } ++ ++ if (sarea_priv->nbox == 1) ++ sarea_priv->nbox = 0; ++ } ++ ++ if (vertex->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_emit_packets(drm_radeon_private_t * dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int id = (int)header.packet.packet_id; ++ int sz, reg; ++ int *data = (int *)cmdbuf->buf; ++ RING_LOCALS; ++ ++ if (id >= RADEON_MAX_STATE_PACKETS) ++ return -EINVAL; ++ ++ sz = packet[id].len; ++ reg = packet[id].start; ++ ++ if (sz * sizeof(int) > cmdbuf->bufsz) { ++ DRM_ERROR("Packet size provided larger than data provided\n"); ++ return -EINVAL; ++ } ++ ++ if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { ++ DRM_ERROR("Packet verification failed\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(sz + 1); ++ OUT_RING(CP_PACKET0(reg, (sz - 1))); ++ OUT_RING_TABLE(data, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.scalars.count; ++ int start = header.scalars.offset; ++ int stride = header.scalars.stride; ++ RING_LOCALS; ++ ++ BEGIN_RING(3 + sz); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); ++ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++/* God this is ugly ++ */ ++static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.scalars.count; ++ int start = ((unsigned int)header.scalars.offset) + 0x100; ++ int stride = header.scalars.stride; ++ RING_LOCALS; ++ ++ BEGIN_RING(3 + sz); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); ++ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.vectors.count; ++ int start = header.vectors.offset; ++ int stride = header.vectors.stride; ++ RING_LOCALS; ++ ++ BEGIN_RING(5 + sz); ++ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); ++ OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.veclinear.count * 4; ++ int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8); ++ RING_LOCALS; ++ ++ if (!sz) ++ return 0; ++ if (sz * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ BEGIN_RING(5 + sz); ++ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); ++ OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static int radeon_emit_packet3(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ unsigned int cmdsz; ++ int ret; ++ RING_LOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, ++ cmdbuf, &cmdsz))) { ++ DRM_ERROR("Packet verification failed\n"); ++ return ret; ++ } ++ ++ BEGIN_RING(cmdsz); ++ OUT_RING_TABLE(cmdbuf->buf, cmdsz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += cmdsz * 4; ++ cmdbuf->bufsz -= cmdsz * 4; ++ return 0; ++} ++ ++static int radeon_emit_packet3_cliprect(struct drm_device *dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ int orig_nbox) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect box; ++ unsigned int cmdsz; ++ int ret; ++ struct drm_clip_rect __user *boxes = cmdbuf->boxes; ++ int i = 0; ++ RING_LOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, ++ cmdbuf, &cmdsz))) { ++ DRM_ERROR("Packet verification failed\n"); ++ return ret; ++ } ++ ++ if (!orig_nbox) ++ goto out; ++ ++ do { ++ if (i < cmdbuf->nbox) { ++ if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) ++ return -EFAULT; ++ /* FIXME The second and subsequent times round ++ * this loop, send a WAIT_UNTIL_3D_IDLE before ++ * calling emit_clip_rect(). This fixes a ++ * lockup on fast machines when sending ++ * several cliprects with a cmdbuf, as when ++ * waving a 2D window over a 3D ++ * window. Something in the commands from user ++ * space seems to hang the card when they're ++ * sent several times in a row. That would be ++ * the correct place to fix it but this works ++ * around it until I can figure that out - Tim ++ * Smith */ ++ if (i) { ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ADVANCE_RING(); ++ } ++ radeon_emit_clip_rect(dev_priv, &box); ++ } ++ ++ BEGIN_RING(cmdsz); ++ OUT_RING_TABLE(cmdbuf->buf, cmdsz); ++ ADVANCE_RING(); ++ ++ } while (++i < cmdbuf->nbox); ++ if (cmdbuf->nbox == 1) ++ cmdbuf->nbox = 0; ++ ++ out: ++ cmdbuf->buf += cmdsz * 4; ++ cmdbuf->bufsz -= cmdsz * 4; ++ return 0; ++} ++ ++static int radeon_emit_wait(struct drm_device * dev, int flags) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ DRM_DEBUG("%x\n", flags); ++ switch (flags) { ++ case RADEON_WAIT_2D: ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ break; ++ case RADEON_WAIT_3D: ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ADVANCE_RING(); ++ break; ++ case RADEON_WAIT_2D | RADEON_WAIT_3D: ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ADVANCE_RING(); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf = NULL; ++ int idx; ++ drm_radeon_kcmd_buffer_t *cmdbuf = data; ++ drm_radeon_cmd_header_t header; ++ int orig_nbox, orig_bufsz; ++ char *kbuf = NULL; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) { ++ return -EINVAL; ++ } ++ ++ /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid ++ * races between checking values and using those values in other code, ++ * and simply to avoid a lot of function calls to copy in data. ++ */ ++ orig_bufsz = cmdbuf->bufsz; ++ if (orig_bufsz != 0) { ++ kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); ++ if (kbuf == NULL) ++ return -ENOMEM; ++ if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, ++ cmdbuf->bufsz)) { ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ return -EFAULT; ++ } ++ cmdbuf->buf = kbuf; ++ } ++ ++ orig_nbox = cmdbuf->nbox; ++ ++ if (dev_priv->chip_family >= CHIP_R300) { ++ int temp; ++ temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); ++ ++ if (orig_bufsz != 0) ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ ++ return temp; ++ } ++ ++ /* microcode_version != r300 */ ++ while (cmdbuf->bufsz >= sizeof(header)) { ++ ++ header.i = *(int *)cmdbuf->buf; ++ cmdbuf->buf += sizeof(header); ++ cmdbuf->bufsz -= sizeof(header); ++ ++ switch (header.header.cmd_type) { ++ case RADEON_CMD_PACKET: ++ DRM_DEBUG("RADEON_CMD_PACKET\n"); ++ if (radeon_emit_packets ++ (dev_priv, file_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_packets failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_SCALARS: ++ DRM_DEBUG("RADEON_CMD_SCALARS\n"); ++ if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_scalars failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_VECTORS: ++ DRM_DEBUG("RADEON_CMD_VECTORS\n"); ++ if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_vectors failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_DMA_DISCARD: ++ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); ++ idx = header.dma.buf_idx; ++ if (idx < 0 || idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ idx, dma->buf_count - 1); ++ goto err; ++ } ++ ++ buf = dma->buflist[idx]; ++ if (buf->file_priv != file_priv || buf->pending) { ++ DRM_ERROR("bad buffer %p %p %d\n", ++ buf->file_priv, file_priv, ++ buf->pending); ++ goto err; ++ } ++ ++ radeon_cp_discard_buffer(dev, buf); ++ break; ++ ++ case RADEON_CMD_PACKET3: ++ DRM_DEBUG("RADEON_CMD_PACKET3\n"); ++ if (radeon_emit_packet3(dev, file_priv, cmdbuf)) { ++ DRM_ERROR("radeon_emit_packet3 failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_PACKET3_CLIP: ++ DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); ++ if (radeon_emit_packet3_cliprect ++ (dev, file_priv, cmdbuf, orig_nbox)) { ++ DRM_ERROR("radeon_emit_packet3_clip failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_SCALARS2: ++ DRM_DEBUG("RADEON_CMD_SCALARS2\n"); ++ if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_scalars2 failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_WAIT: ++ DRM_DEBUG("RADEON_CMD_WAIT\n"); ++ if (radeon_emit_wait(dev, header.wait.flags)) { ++ DRM_ERROR("radeon_emit_wait failed\n"); ++ goto err; ++ } ++ break; ++ case RADEON_CMD_VECLINEAR: ++ DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); ++ if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_veclinear failed\n"); ++ goto err; ++ } ++ break; ++ ++ default: ++ DRM_ERROR("bad cmd_type %d at %p\n", ++ header.header.cmd_type, ++ cmdbuf->buf - sizeof(header)); ++ goto err; ++ } ++ } ++ ++ if (orig_bufsz != 0) ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ ++ DRM_DEBUG("DONE\n"); ++ COMMIT_RING(); ++ return 0; ++ ++ err: ++ if (orig_bufsz != 0) ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ return -EINVAL; ++} ++ ++static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ switch (param->param) { ++ case RADEON_PARAM_GART_BUFFER_OFFSET: ++ value = dev_priv->gart_buffers_offset; ++ break; ++ case RADEON_PARAM_LAST_FRAME: ++ dev_priv->stats.last_frame_reads++; ++ value = GET_SCRATCH(0); ++ break; ++ case RADEON_PARAM_LAST_DISPATCH: ++ value = GET_SCRATCH(1); ++ break; ++ case RADEON_PARAM_LAST_CLEAR: ++ dev_priv->stats.last_clear_reads++; ++ value = GET_SCRATCH(2); ++ break; ++ case RADEON_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ case RADEON_PARAM_GART_BASE: ++ value = dev_priv->gart_vm_start; ++ break; ++ case RADEON_PARAM_REGISTER_HANDLE: ++ value = dev_priv->mmio->offset; ++ break; ++ case RADEON_PARAM_STATUS_HANDLE: ++ value = dev_priv->ring_rptr_offset; ++ break; ++#ifndef __LP64__ ++ /* ++ * This ioctl() doesn't work on 64-bit platforms because hw_lock is a ++ * pointer which can't fit into an int-sized variable. According to ++ * Michel Dänzer, the ioctl() is only used on embedded platforms, so ++ * not supporting it shouldn't be a problem. If the same functionality ++ * is needed on 64-bit platforms, a new ioctl() would have to be added, ++ * so backwards-compatibility for the embedded platforms can be ++ * maintained. --davidm 4-Feb-2004. ++ */ ++ case RADEON_PARAM_SAREA_HANDLE: ++ /* The lock is the first dword in the sarea. */ ++ value = (long)dev->lock.hw_lock; ++ break; ++#endif ++ case RADEON_PARAM_GART_TEX_HANDLE: ++ value = dev_priv->gart_textures_offset; ++ break; ++ case RADEON_PARAM_SCRATCH_OFFSET: ++ if (!dev_priv->writeback_works) ++ return -EINVAL; ++ value = RADEON_SCRATCH_REG_OFFSET; ++ break; ++ ++ case RADEON_PARAM_CARD_TYPE: ++ if (dev_priv->flags & RADEON_IS_PCIE) ++ value = RADEON_CARD_PCIE; ++ else if (dev_priv->flags & RADEON_IS_AGP) ++ value = RADEON_CARD_AGP; ++ else ++ value = RADEON_CARD_PCI; ++ break; ++ case RADEON_PARAM_VBLANK_CRTC: ++ value = radeon_vblank_crtc_get(dev); ++ break; ++ case RADEON_PARAM_FB_LOCATION: ++ value = radeon_read_fb_location(dev_priv); ++ break; ++ case RADEON_PARAM_NUM_GB_PIPES: ++ value = dev_priv->num_gb_pipes; ++ break; ++ default: ++ DRM_DEBUG( "Invalid parameter %d\n", param->param ); ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_setparam_t *sp = data; ++ struct drm_radeon_driver_file_fields *radeon_priv; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (sp->param) { ++ case RADEON_SETPARAM_FB_LOCATION: ++ radeon_priv = file_priv->driver_priv; ++ radeon_priv->radeon_fb_delta = dev_priv->fb_location - ++ sp->value; ++ break; ++ case RADEON_SETPARAM_SWITCH_TILING: ++ if (sp->value == 0) { ++ DRM_DEBUG("color tiling disabled\n"); ++ dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; ++ dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->tiling_enabled = 0; ++ } else if (sp->value == 1) { ++ DRM_DEBUG("color tiling enabled\n"); ++ dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; ++ dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->tiling_enabled = 1; ++ } ++ break; ++ case RADEON_SETPARAM_PCIGART_LOCATION: ++ dev_priv->pcigart_offset = sp->value; ++ dev_priv->pcigart_offset_set = 1; ++ break; ++ case RADEON_SETPARAM_NEW_MEMMAP: ++ dev_priv->new_memmap = sp->value; ++ break; ++ case RADEON_SETPARAM_PCIGART_TABLE_SIZE: ++ dev_priv->gart_info.table_size = sp->value; ++ if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) ++ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; ++ break; ++ case RADEON_SETPARAM_VBLANK_CRTC: ++ return radeon_vblank_crtc_set(dev, sp->value); ++ break; ++ default: ++ DRM_DEBUG("Invalid parameter %d\n", sp->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* When a client dies: ++ * - Check for and clean up flipped page state ++ * - Free any alloced GART memory. ++ * - Free any alloced radeon surfaces. ++ * ++ * DRM infrastructure takes care of reclaiming dma buffers. ++ */ ++void radeon_driver_preclose(struct drm_device *dev, ++ struct drm_file *file_priv) ++{ ++ if (dev->dev_private) { ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ dev_priv->page_flipping = 0; ++ radeon_mem_release(file_priv, dev_priv->gart_heap); ++ radeon_mem_release(file_priv, dev_priv->fb_heap); ++ radeon_surfaces_release(file_priv, dev_priv); ++ } ++} ++ ++void radeon_driver_lastclose(struct drm_device *dev) ++{ ++ if (dev->dev_private) { ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->sarea_priv && ++ dev_priv->sarea_priv->pfCurrentPage != 0) ++ radeon_cp_dispatch_flip(dev); ++ } ++ ++ radeon_do_release(dev); ++} ++ ++int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_driver_file_fields *radeon_priv; ++ ++ DRM_DEBUG("\n"); ++ radeon_priv = ++ (struct drm_radeon_driver_file_fields *) ++ drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); ++ ++ if (!radeon_priv) ++ return -ENOMEM; ++ ++ file_priv->driver_priv = radeon_priv; ++ ++ if (dev_priv) ++ radeon_priv->radeon_fb_delta = dev_priv->fb_location; ++ else ++ radeon_priv->radeon_fb_delta = 0; ++ return 0; ++} ++ ++void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_radeon_driver_file_fields *radeon_priv = ++ file_priv->driver_priv; ++ ++ drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); ++} ++ ++struct drm_ioctl_desc radeon_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH) ++}; ++ ++int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_cce.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_cce.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_cce.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_cce.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,933 @@ ++/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- ++ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com ++ */ ++/* ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++#define R128_FIFO_DEBUG 0 ++ ++/* CCE microcode (from ATI) */ ++static u32 r128_cce_microcode[] = { ++ 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0, ++ 1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0, ++ 599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1, ++ 11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11, ++ 262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28, ++ 1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9, ++ 30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656, ++ 1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1, ++ 15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071, ++ 12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2, ++ 46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1, ++ 459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1, ++ 18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1, ++ 15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2, ++ 268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1, ++ 15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82, ++ 1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729, ++ 3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008, ++ 1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0, ++ 15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1, ++ 180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1, ++ 114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0, ++ 33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370, ++ 1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1, ++ 14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793, ++ 1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1, ++ 198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1, ++ 114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1, ++ 1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1, ++ 1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894, ++ 16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14, ++ 174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1, ++ 33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1, ++ 33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1, ++ 409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ++}; ++ ++static int R128_READ_PLL(struct drm_device * dev, int addr) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ ++ R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f); ++ return R128_READ(R128_CLOCK_CNTL_DATA); ++} ++ ++#if R128_FIFO_DEBUG ++static void r128_status(drm_r128_private_t * dev_priv) ++{ ++ printk("GUI_STAT = 0x%08x\n", ++ (unsigned int)R128_READ(R128_GUI_STAT)); ++ printk("PM4_STAT = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_STAT)); ++ printk("PM4_BUFFER_DL_WPTR = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR)); ++ printk("PM4_BUFFER_DL_RPTR = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR)); ++ printk("PM4_MICRO_CNTL = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_MICRO_CNTL)); ++ printk("PM4_BUFFER_CNTL = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL)); ++} ++#endif ++ ++/* ================================================================ ++ * Engine, FIFO control ++ */ ++ ++static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) ++{ ++ u32 tmp; ++ int i; ++ ++ tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL; ++ R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) { ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) ++{ ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK; ++ if (slots >= entries) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) ++{ ++ int i, ret; ++ ++ ret = r128_do_wait_for_fifo(dev_priv, 64); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) { ++ r128_do_pixcache_flush(dev_priv); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++/* ================================================================ ++ * CCE control, initialization ++ */ ++ ++/* Load the microcode for the CCE */ ++static void r128_cce_load_microcode(drm_r128_private_t * dev_priv) ++{ ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ r128_do_wait_for_idle(dev_priv); ++ ++ R128_WRITE(R128_PM4_MICROCODE_ADDR, 0); ++ for (i = 0; i < 256; i++) { ++ R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]); ++ R128_WRITE(R128_PM4_MICROCODE_DATAL, ++ r128_cce_microcode[i * 2 + 1]); ++ } ++} ++ ++/* Flush any pending commands to the CCE. This should only be used just ++ * prior to a wait for idle, as it informs the engine that the command ++ * stream is ending. ++ */ ++static void r128_do_cce_flush(drm_r128_private_t * dev_priv) ++{ ++ u32 tmp; ++ ++ tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE; ++ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp); ++} ++ ++/* Wait for the CCE to go idle. ++ */ ++int r128_do_cce_idle(drm_r128_private_t * dev_priv) ++{ ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) { ++ int pm4stat = R128_READ(R128_PM4_STAT); ++ if (((pm4stat & R128_PM4_FIFOCNT_MASK) >= ++ dev_priv->cce_fifo_size) && ++ !(pm4stat & (R128_PM4_BUSY | ++ R128_PM4_GUI_ACTIVE))) { ++ return r128_do_pixcache_flush(dev_priv); ++ } ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ r128_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++/* Start the Concurrent Command Engine. ++ */ ++static void r128_do_cce_start(drm_r128_private_t * dev_priv) ++{ ++ r128_do_wait_for_idle(dev_priv); ++ ++ R128_WRITE(R128_PM4_BUFFER_CNTL, ++ dev_priv->cce_mode | dev_priv->ring.size_l2qw ++ | R128_PM4_BUFFER_CNTL_NOUPDATE); ++ R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */ ++ R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN); ++ ++ dev_priv->cce_running = 1; ++} ++ ++/* Reset the Concurrent Command Engine. This will not flush any pending ++ * commands, so you must wait for the CCE command stream to complete ++ * before calling this routine. ++ */ ++static void r128_do_cce_reset(drm_r128_private_t * dev_priv) ++{ ++ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); ++ R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); ++ dev_priv->ring.tail = 0; ++} ++ ++/* Stop the Concurrent Command Engine. This will not flush any pending ++ * commands, so you must flush the command stream and wait for the CCE ++ * to go idle before calling this routine. ++ */ ++static void r128_do_cce_stop(drm_r128_private_t * dev_priv) ++{ ++ R128_WRITE(R128_PM4_MICRO_CNTL, 0); ++ R128_WRITE(R128_PM4_BUFFER_CNTL, ++ R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE); ++ ++ dev_priv->cce_running = 0; ++} ++ ++/* Reset the engine. This will stop the CCE if it is running. ++ */ ++static int r128_do_engine_reset(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; ++ ++ r128_do_pixcache_flush(dev_priv); ++ ++ clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX); ++ mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL); ++ ++ R128_WRITE_PLL(R128_MCLK_CNTL, ++ mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP); ++ ++ gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL); ++ ++ /* Taken from the sample code - do not change */ ++ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI); ++ R128_READ(R128_GEN_RESET_CNTL); ++ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI); ++ R128_READ(R128_GEN_RESET_CNTL); ++ ++ R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl); ++ R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index); ++ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl); ++ ++ /* Reset the CCE ring */ ++ r128_do_cce_reset(dev_priv); ++ ++ /* The CCE is no longer running after an engine reset */ ++ dev_priv->cce_running = 0; ++ ++ /* Reset any pending vertex, indirect buffers */ ++ r128_freelist_reset(dev); ++ ++ return 0; ++} ++ ++static void r128_cce_init_ring_buffer(struct drm_device * dev, ++ drm_r128_private_t * dev_priv) ++{ ++ u32 ring_start; ++ u32 tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ /* The manual (p. 2) says this address is in "VM space". This ++ * means it's an offset from the start of AGP space. ++ */ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) ++ ring_start = dev_priv->cce_ring->offset - dev->agp->base; ++ else ++#endif ++ ring_start = dev_priv->cce_ring->offset - ++ (unsigned long)dev->sg->virtual; ++ ++ R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET); ++ ++ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); ++ R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); ++ ++ /* Set watermark control */ ++ R128_WRITE(R128_PM4_BUFFER_WM_CNTL, ++ ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT) ++ | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT) ++ | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT) ++ | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT)); ++ ++ /* Force read. Why? Because it's in the examples... */ ++ R128_READ(R128_PM4_BUFFER_ADDR); ++ ++ /* Turn on bus mastering */ ++ tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS; ++ R128_WRITE(R128_BUS_CNTL, tmp); ++} ++ ++static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) ++{ ++ drm_r128_private_t *dev_priv; ++ ++ DRM_DEBUG("\n"); ++ ++ dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_r128_private_t)); ++ ++ dev_priv->is_pci = init->is_pci; ++ ++ if (dev_priv->is_pci && !dev->sg) { ++ DRM_ERROR("PCI GART memory not allocated!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->usec_timeout = init->usec_timeout; ++ if (dev_priv->usec_timeout < 1 || ++ dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) { ++ DRM_DEBUG("TIMEOUT problem!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->cce_mode = init->cce_mode; ++ ++ /* GH: Simple idle check. ++ */ ++ atomic_set(&dev_priv->idle_count, 0); ++ ++ /* We don't support anything other than bus-mastering ring mode, ++ * but the ring can be in either AGP or PCI space for the ring ++ * read pointer. ++ */ ++ if ((init->cce_mode != R128_PM4_192BM) && ++ (init->cce_mode != R128_PM4_128BM_64INDBM) && ++ (init->cce_mode != R128_PM4_64BM_128INDBM) && ++ (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) { ++ DRM_DEBUG("Bad cce_mode!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ switch (init->cce_mode) { ++ case R128_PM4_NONPM4: ++ dev_priv->cce_fifo_size = 0; ++ break; ++ case R128_PM4_192PIO: ++ case R128_PM4_192BM: ++ dev_priv->cce_fifo_size = 192; ++ break; ++ case R128_PM4_128PIO_64INDBM: ++ case R128_PM4_128BM_64INDBM: ++ dev_priv->cce_fifo_size = 128; ++ break; ++ case R128_PM4_64PIO_128INDBM: ++ case R128_PM4_64BM_128INDBM: ++ case R128_PM4_64PIO_64VCBM_64INDBM: ++ case R128_PM4_64BM_64VCBM_64INDBM: ++ case R128_PM4_64PIO_64VCPIO_64INDPIO: ++ dev_priv->cce_fifo_size = 64; ++ break; ++ } ++ ++ switch (init->fb_bpp) { ++ case 16: ++ dev_priv->color_fmt = R128_DATATYPE_RGB565; ++ break; ++ case 32: ++ default: ++ dev_priv->color_fmt = R128_DATATYPE_ARGB8888; ++ break; ++ } ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ switch (init->depth_bpp) { ++ case 16: ++ dev_priv->depth_fmt = R128_DATATYPE_RGB565; ++ break; ++ case 24: ++ case 32: ++ default: ++ dev_priv->depth_fmt = R128_DATATYPE_ARGB8888; ++ break; ++ } ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ dev_priv->span_offset = init->span_offset; ++ ++ dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) | ++ (dev_priv->front_offset >> 5)); ++ dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) | ++ (dev_priv->back_offset >> 5)); ++ dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) | ++ (dev_priv->depth_offset >> 5) | ++ R128_DST_TILE); ++ dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) | ++ (dev_priv->span_offset >> 5)); ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("could not find sarea!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio) { ++ DRM_ERROR("could not find mmio region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); ++ if (!dev_priv->cce_ring) { ++ DRM_ERROR("could not find cce ring region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); ++ if (!dev_priv->ring_rptr) { ++ DRM_ERROR("could not find ring read pointer!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("could not find dma buffer region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ if (!dev_priv->is_pci) { ++ dev_priv->agp_textures = ++ drm_core_findmap(dev, init->agp_textures_offset); ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("could not find agp texture region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) { ++ drm_core_ioremap(dev_priv->cce_ring, dev); ++ drm_core_ioremap(dev_priv->ring_rptr, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev_priv->cce_ring->handle || ++ !dev_priv->ring_rptr->handle || ++ !dev->agp_buffer_map->handle) { ++ DRM_ERROR("Could not ioremap agp regions!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -ENOMEM; ++ } ++ } else ++#endif ++ { ++ dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset; ++ dev_priv->ring_rptr->handle = ++ (void *)dev_priv->ring_rptr->offset; ++ dev->agp_buffer_map->handle = ++ (void *)dev->agp_buffer_map->offset; ++ } ++ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) ++ dev_priv->cce_buffers_offset = dev->agp->base; ++ else ++#endif ++ dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual; ++ ++ dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle; ++ dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle ++ + init->ring_size / sizeof(u32)); ++ dev_priv->ring.size = init->ring_size; ++ dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); ++ ++ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; ++ ++ dev_priv->ring.high_mark = 128; ++ ++ dev_priv->sarea_priv->last_frame = 0; ++ R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); ++ ++ dev_priv->sarea_priv->last_dispatch = 0; ++ R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->is_pci) { ++#endif ++ dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); ++ dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; ++ dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE; ++ dev_priv->gart_info.addr = NULL; ++ dev_priv->gart_info.bus_addr = 0; ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { ++ DRM_ERROR("failed to init PCI GART!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -ENOMEM; ++ } ++ R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); ++#if __OS_HAS_AGP ++ } ++#endif ++ ++ r128_cce_init_ring_buffer(dev, dev_priv); ++ r128_cce_load_microcode(dev_priv); ++ ++ dev->dev_private = (void *)dev_priv; ++ ++ r128_do_engine_reset(dev); ++ ++ return 0; ++} ++ ++int r128_do_cleanup_cce(struct drm_device * dev) ++{ ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) { ++ if (dev_priv->cce_ring != NULL) ++ drm_core_ioremapfree(dev_priv->cce_ring, dev); ++ if (dev_priv->ring_rptr != NULL) ++ drm_core_ioremapfree(dev_priv->ring_rptr, dev); ++ if (dev->agp_buffer_map != NULL) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ dev->agp_buffer_map = NULL; ++ } ++ } else ++#endif ++ { ++ if (dev_priv->gart_info.bus_addr) ++ if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) ++ DRM_ERROR("failed to cleanup PCI GART!\n"); ++ } ++ ++ drm_free(dev->dev_private, sizeof(drm_r128_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ } ++ ++ return 0; ++} ++ ++int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_init_t *init = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case R128_INIT_CCE: ++ return r128_do_init_cce(dev, init); ++ case R128_CLEANUP_CCE: ++ return r128_do_cleanup_cce(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { ++ DRM_DEBUG("while CCE running\n"); ++ return 0; ++ } ++ ++ r128_do_cce_start(dev_priv); ++ ++ return 0; ++} ++ ++/* Stop the CCE. The engine must have been idled before calling this ++ * routine. ++ */ ++int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_cce_stop_t *stop = data; ++ int ret; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Flush any pending CCE commands. This ensures any outstanding ++ * commands are exectuted by the engine before we turn it off. ++ */ ++ if (stop->flush) { ++ r128_do_cce_flush(dev_priv); ++ } ++ ++ /* If we fail to make the engine go idle, we return an error ++ * code so that the DRM ioctl wrapper can try again. ++ */ ++ if (stop->idle) { ++ ret = r128_do_cce_idle(dev_priv); ++ if (ret) ++ return ret; ++ } ++ ++ /* Finally, we can turn off the CCE. If the engine isn't idle, ++ * we will get some dropped triangles as they won't be fully ++ * rendered before the CCE is shut down. ++ */ ++ r128_do_cce_stop(dev_priv); ++ ++ /* Reset the engine */ ++ r128_do_engine_reset(dev); ++ ++ return 0; ++} ++ ++/* Just reset the CCE ring. Called as part of an X Server engine reset. ++ */ ++int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_DEBUG("called before init done\n"); ++ return -EINVAL; ++ } ++ ++ r128_do_cce_reset(dev_priv); ++ ++ /* The CCE is no longer running after an engine reset */ ++ dev_priv->cce_running = 0; ++ ++ return 0; ++} ++ ++int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dev_priv->cce_running) { ++ r128_do_cce_flush(dev_priv); ++ } ++ ++ return r128_do_cce_idle(dev_priv); ++} ++ ++int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return r128_do_engine_reset(dev); ++} ++ ++int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ return -EINVAL; ++} ++ ++/* ================================================================ ++ * Freelist management ++ */ ++#define R128_BUFFER_USED 0xffffffff ++#define R128_BUFFER_FREE 0 ++ ++#if 0 ++static int r128_freelist_init(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_freelist_t *entry; ++ int i; ++ ++ dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); ++ if (dev_priv->head == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); ++ dev_priv->head->age = R128_BUFFER_USED; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ ++ entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); ++ if (!entry) ++ return -ENOMEM; ++ ++ entry->age = R128_BUFFER_FREE; ++ entry->buf = buf; ++ entry->prev = dev_priv->head; ++ entry->next = dev_priv->head->next; ++ if (!entry->next) ++ dev_priv->tail = entry; ++ ++ buf_priv->discard = 0; ++ buf_priv->dispatched = 0; ++ buf_priv->list_entry = entry; ++ ++ dev_priv->head->next = entry; ++ ++ if (dev_priv->head->next) ++ dev_priv->head->next->prev = entry; ++ } ++ ++ return 0; ++ ++} ++#endif ++ ++static struct drm_buf *r128_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv; ++ struct drm_buf *buf; ++ int i, t; ++ ++ /* FIXME: Optimize -- use freelist code */ ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->file_priv == 0) ++ return buf; ++ } ++ ++ for (t = 0; t < dev_priv->usec_timeout; t++) { ++ u32 done_age = R128_READ(R128_LAST_DISPATCH_REG); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->pending && buf_priv->age <= done_age) { ++ /* The buffer has been processed, so it ++ * can now be used. ++ */ ++ buf->pending = 0; ++ return buf; ++ } ++ } ++ DRM_UDELAY(1); ++ } ++ ++ DRM_DEBUG("returning NULL!\n"); ++ return NULL; ++} ++ ++void r128_freelist_reset(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ buf_priv->age = 0; ++ } ++} ++ ++/* ================================================================ ++ * CCE command submission ++ */ ++ ++int r128_wait_ring(drm_r128_private_t * dev_priv, int n) ++{ ++ drm_r128_ring_buffer_t *ring = &dev_priv->ring; ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ r128_update_ring_snapshot(dev_priv); ++ if (ring->space >= n) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++ /* FIXME: This is being ignored... */ ++ DRM_ERROR("failed!\n"); ++ return -EBUSY; ++} ++ ++static int r128_cce_get_buffers(struct drm_device * dev, ++ struct drm_file *file_priv, ++ struct drm_dma * d) ++{ ++ int i; ++ struct drm_buf *buf; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = r128_freelist_get(dev); ++ if (!buf) ++ return -EAGAIN; ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, ++ sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, ++ sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int ret = 0; ++ struct drm_dma *d = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = r128_cce_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,326 @@ ++/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*- ++ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com ++ */ ++/* ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Kevin E. Martin ++ */ ++ ++#ifndef __R128_DRM_H__ ++#define __R128_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the X server file (r128_sarea.h) ++ */ ++#ifndef __R128_SAREA_DEFINES__ ++#define __R128_SAREA_DEFINES__ ++ ++/* What needs to be changed for the current vertex buffer? ++ */ ++#define R128_UPLOAD_CONTEXT 0x001 ++#define R128_UPLOAD_SETUP 0x002 ++#define R128_UPLOAD_TEX0 0x004 ++#define R128_UPLOAD_TEX1 0x008 ++#define R128_UPLOAD_TEX0IMAGES 0x010 ++#define R128_UPLOAD_TEX1IMAGES 0x020 ++#define R128_UPLOAD_CORE 0x040 ++#define R128_UPLOAD_MASKS 0x080 ++#define R128_UPLOAD_WINDOW 0x100 ++#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */ ++#define R128_REQUIRE_QUIESCENCE 0x400 ++#define R128_UPLOAD_ALL 0x7ff ++ ++#define R128_FRONT 0x1 ++#define R128_BACK 0x2 ++#define R128_DEPTH 0x4 ++ ++/* Primitive types ++ */ ++#define R128_POINTS 0x1 ++#define R128_LINES 0x2 ++#define R128_LINE_STRIP 0x3 ++#define R128_TRIANGLES 0x4 ++#define R128_TRIANGLE_FAN 0x5 ++#define R128_TRIANGLE_STRIP 0x6 ++ ++/* Vertex/indirect buffer size ++ */ ++#define R128_BUFFER_SIZE 16384 ++ ++/* Byte offsets for indirect buffer data ++ */ ++#define R128_INDEX_PRIM_OFFSET 20 ++#define R128_HOSTDATA_BLIT_OFFSET 32 ++ ++/* Keep these small for testing. ++ */ ++#define R128_NR_SAREA_CLIPRECTS 12 ++ ++/* There are 2 heaps (local/AGP). Each region within a heap is a ++ * minimum of 64k, and there are at most 64 of them per heap. ++ */ ++#define R128_LOCAL_TEX_HEAP 0 ++#define R128_AGP_TEX_HEAP 1 ++#define R128_NR_TEX_HEAPS 2 ++#define R128_NR_TEX_REGIONS 64 ++#define R128_LOG_TEX_GRANULARITY 16 ++ ++#define R128_NR_CONTEXT_REGS 12 ++ ++#define R128_MAX_TEXTURE_LEVELS 11 ++#define R128_MAX_TEXTURE_UNITS 2 ++ ++#endif /* __R128_SAREA_DEFINES__ */ ++ ++typedef struct { ++ /* Context state - can be written in one large chunk */ ++ unsigned int dst_pitch_offset_c; ++ unsigned int dp_gui_master_cntl_c; ++ unsigned int sc_top_left_c; ++ unsigned int sc_bottom_right_c; ++ unsigned int z_offset_c; ++ unsigned int z_pitch_c; ++ unsigned int z_sten_cntl_c; ++ unsigned int tex_cntl_c; ++ unsigned int misc_3d_state_cntl_reg; ++ unsigned int texture_clr_cmp_clr_c; ++ unsigned int texture_clr_cmp_msk_c; ++ unsigned int fog_color_c; ++ ++ /* Texture state */ ++ unsigned int tex_size_pitch_c; ++ unsigned int constant_color_c; ++ ++ /* Setup state */ ++ unsigned int pm4_vc_fpu_setup; ++ unsigned int setup_cntl; ++ ++ /* Mask state */ ++ unsigned int dp_write_mask; ++ unsigned int sten_ref_mask_c; ++ unsigned int plane_3d_mask_c; ++ ++ /* Window state */ ++ unsigned int window_xy_offset; ++ ++ /* Core state */ ++ unsigned int scale_3d_cntl; ++} drm_r128_context_regs_t; ++ ++/* Setup registers for each texture unit ++ */ ++typedef struct { ++ unsigned int tex_cntl; ++ unsigned int tex_combine_cntl; ++ unsigned int tex_size_pitch; ++ unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS]; ++ unsigned int tex_border_color; ++} drm_r128_texture_regs_t; ++ ++typedef struct drm_r128_sarea { ++ /* The channel for communication of state information to the kernel ++ * on firing a vertex buffer. ++ */ ++ drm_r128_context_regs_t context_state; ++ drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS]; ++ unsigned int dirty; ++ unsigned int vertsize; ++ unsigned int vc_format; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Counters for client-side throttling of rendering clients. ++ */ ++ unsigned int last_frame; ++ unsigned int last_dispatch; ++ ++ struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1]; ++ unsigned int tex_age[R128_NR_TEX_HEAPS]; ++ int ctx_owner; ++ int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */ ++ int pfCurrentPage; /* which buffer is being displayed? */ ++} drm_r128_sarea_t; ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (xf86drmR128.h) ++ */ ++ ++/* Rage 128 specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_R128_INIT 0x00 ++#define DRM_R128_CCE_START 0x01 ++#define DRM_R128_CCE_STOP 0x02 ++#define DRM_R128_CCE_RESET 0x03 ++#define DRM_R128_CCE_IDLE 0x04 ++/* 0x05 not used */ ++#define DRM_R128_RESET 0x06 ++#define DRM_R128_SWAP 0x07 ++#define DRM_R128_CLEAR 0x08 ++#define DRM_R128_VERTEX 0x09 ++#define DRM_R128_INDICES 0x0a ++#define DRM_R128_BLIT 0x0b ++#define DRM_R128_DEPTH 0x0c ++#define DRM_R128_STIPPLE 0x0d ++/* 0x0e not used */ ++#define DRM_R128_INDIRECT 0x0f ++#define DRM_R128_FULLSCREEN 0x10 ++#define DRM_R128_CLEAR2 0x11 ++#define DRM_R128_GETPARAM 0x12 ++#define DRM_R128_FLIP 0x13 ++ ++#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t) ++#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START) ++#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t) ++#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET) ++#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE) ++/* 0x05 not used */ ++#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET) ++#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP) ++#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t) ++#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t) ++#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t) ++#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t) ++#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t) ++#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t) ++/* 0x0e not used */ ++#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) ++#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) ++#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) ++#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) ++#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) ++ ++typedef struct drm_r128_init { ++ enum { ++ R128_INIT_CCE = 0x01, ++ R128_CLEANUP_CCE = 0x02 ++ } func; ++ unsigned long sarea_priv_offset; ++ int is_pci; ++ int cce_mode; ++ int cce_secure; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ unsigned int span_offset; ++ ++ unsigned long fb_offset; ++ unsigned long mmio_offset; ++ unsigned long ring_offset; ++ unsigned long ring_rptr_offset; ++ unsigned long buffers_offset; ++ unsigned long agp_textures_offset; ++} drm_r128_init_t; ++ ++typedef struct drm_r128_cce_stop { ++ int flush; ++ int idle; ++} drm_r128_cce_stop_t; ++ ++typedef struct drm_r128_clear { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; ++} drm_r128_clear_t; ++ ++typedef struct drm_r128_vertex { ++ int prim; ++ int idx; /* Index of vertex buffer */ ++ int count; /* Number of vertices in buffer */ ++ int discard; /* Client finished with buffer? */ ++} drm_r128_vertex_t; ++ ++typedef struct drm_r128_indices { ++ int prim; ++ int idx; ++ int start; ++ int end; ++ int discard; /* Client finished with buffer? */ ++} drm_r128_indices_t; ++ ++typedef struct drm_r128_blit { ++ int idx; ++ int pitch; ++ int offset; ++ int format; ++ unsigned short x, y; ++ unsigned short width, height; ++} drm_r128_blit_t; ++ ++typedef struct drm_r128_depth { ++ enum { ++ R128_WRITE_SPAN = 0x01, ++ R128_WRITE_PIXELS = 0x02, ++ R128_READ_SPAN = 0x03, ++ R128_READ_PIXELS = 0x04 ++ } func; ++ int n; ++ int __user *x; ++ int __user *y; ++ unsigned int __user *buffer; ++ unsigned char __user *mask; ++} drm_r128_depth_t; ++ ++typedef struct drm_r128_stipple { ++ unsigned int __user *mask; ++} drm_r128_stipple_t; ++ ++typedef struct drm_r128_indirect { ++ int idx; ++ int start; ++ int end; ++ int discard; ++} drm_r128_indirect_t; ++ ++typedef struct drm_r128_fullscreen { ++ enum { ++ R128_INIT_FULLSCREEN = 0x01, ++ R128_CLEANUP_FULLSCREEN = 0x02 ++ } func; ++} drm_r128_fullscreen_t; ++ ++/* 2.3: An ioctl to get parameters that aren't available to the 3d ++ * client any other way. ++ */ ++#define R128_PARAM_IRQ_NR 1 ++ ++typedef struct drm_r128_getparam { ++ int param; ++ void __user *value; ++} drm_r128_getparam_t; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,113 @@ ++/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*- ++ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ r128_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | ++ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .dev_priv_size = sizeof(drm_r128_buf_priv_t), ++ .preclose = r128_driver_preclose, ++ .lastclose = r128_driver_lastclose, ++ .get_vblank_counter = r128_get_vblank_counter, ++ .enable_vblank = r128_enable_vblank, ++ .disable_vblank = r128_disable_vblank, ++ .irq_preinstall = r128_driver_irq_preinstall, ++ .irq_postinstall = r128_driver_irq_postinstall, ++ .irq_uninstall = r128_driver_irq_uninstall, ++ .irq_handler = r128_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = r128_ioctls, ++ .dma_ioctl = r128_cce_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = r128_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init r128_init(void) ++{ ++ driver.num_ioctls = r128_max_ioctl; ++ ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit r128_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(r128_init); ++module_exit(r128_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,525 @@ ++/* r128_drv.h -- Private header for r128 driver -*- linux-c -*- ++ * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com ++ */ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Kevin E. Martin ++ * Gareth Hughes ++ * Michel D�zer ++ */ ++ ++#ifndef __R128_DRV_H__ ++#define __R128_DRV_H__ ++ ++/* General customization: ++ */ ++#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." ++ ++#define DRIVER_NAME "r128" ++#define DRIVER_DESC "ATI Rage 128" ++#define DRIVER_DATE "20030725" ++ ++/* Interface history: ++ * ++ * ?? - ?? ++ * 2.4 - Add support for ycbcr textures (no new ioctls) ++ * 2.5 - Add FLIP ioctl, disable FULLSCREEN. ++ */ ++#define DRIVER_MAJOR 2 ++#define DRIVER_MINOR 5 ++#define DRIVER_PATCHLEVEL 0 ++ ++#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR ) ++ ++typedef struct drm_r128_freelist { ++ unsigned int age; ++ struct drm_buf *buf; ++ struct drm_r128_freelist *next; ++ struct drm_r128_freelist *prev; ++} drm_r128_freelist_t; ++ ++typedef struct drm_r128_ring_buffer { ++ u32 *start; ++ u32 *end; ++ int size; ++ int size_l2qw; ++ ++ u32 tail; ++ u32 tail_mask; ++ int space; ++ ++ int high_mark; ++} drm_r128_ring_buffer_t; ++ ++typedef struct drm_r128_private { ++ drm_r128_ring_buffer_t ring; ++ drm_r128_sarea_t *sarea_priv; ++ ++ int cce_mode; ++ int cce_fifo_size; ++ int cce_running; ++ ++ drm_r128_freelist_t *head; ++ drm_r128_freelist_t *tail; ++ ++ int usec_timeout; ++ int is_pci; ++ unsigned long cce_buffers_offset; ++ ++ atomic_t idle_count; ++ ++ int page_flipping; ++ int current_page; ++ u32 crtc_offset; ++ u32 crtc_offset_cntl; ++ ++ atomic_t vbl_received; ++ ++ u32 color_fmt; ++ unsigned int front_offset; ++ unsigned int front_pitch; ++ unsigned int back_offset; ++ unsigned int back_pitch; ++ ++ u32 depth_fmt; ++ unsigned int depth_offset; ++ unsigned int depth_pitch; ++ unsigned int span_offset; ++ ++ u32 front_pitch_offset_c; ++ u32 back_pitch_offset_c; ++ u32 depth_pitch_offset_c; ++ u32 span_pitch_offset_c; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *cce_ring; ++ drm_local_map_t *ring_rptr; ++ drm_local_map_t *agp_textures; ++ struct drm_ati_pcigart_info gart_info; ++} drm_r128_private_t; ++ ++typedef struct drm_r128_buf_priv { ++ u32 age; ++ int prim; ++ int discard; ++ int dispatched; ++ drm_r128_freelist_t *list_entry; ++} drm_r128_buf_priv_t; ++ ++extern struct drm_ioctl_desc r128_ioctls[]; ++extern int r128_max_ioctl; ++ ++ /* r128_cce.c */ ++extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); ++ ++extern void r128_freelist_reset(struct drm_device * dev); ++ ++extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); ++ ++extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); ++extern int r128_do_cleanup_cce(struct drm_device * dev); ++ ++extern int r128_enable_vblank(struct drm_device *dev, int crtc); ++extern void r128_disable_vblank(struct drm_device *dev, int crtc); ++extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); ++extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); ++extern void r128_driver_irq_preinstall(struct drm_device * dev); ++extern int r128_driver_irq_postinstall(struct drm_device * dev); ++extern void r128_driver_irq_uninstall(struct drm_device * dev); ++extern void r128_driver_lastclose(struct drm_device * dev); ++extern void r128_driver_preclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++ ++extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++/* Register definitions, register access macros and drmAddMap constants ++ * for Rage 128 kernel driver. ++ */ ++ ++#define R128_AUX_SC_CNTL 0x1660 ++# define R128_AUX1_SC_EN (1 << 0) ++# define R128_AUX1_SC_MODE_OR (0 << 1) ++# define R128_AUX1_SC_MODE_NAND (1 << 1) ++# define R128_AUX2_SC_EN (1 << 2) ++# define R128_AUX2_SC_MODE_OR (0 << 3) ++# define R128_AUX2_SC_MODE_NAND (1 << 3) ++# define R128_AUX3_SC_EN (1 << 4) ++# define R128_AUX3_SC_MODE_OR (0 << 5) ++# define R128_AUX3_SC_MODE_NAND (1 << 5) ++#define R128_AUX1_SC_LEFT 0x1664 ++#define R128_AUX1_SC_RIGHT 0x1668 ++#define R128_AUX1_SC_TOP 0x166c ++#define R128_AUX1_SC_BOTTOM 0x1670 ++#define R128_AUX2_SC_LEFT 0x1674 ++#define R128_AUX2_SC_RIGHT 0x1678 ++#define R128_AUX2_SC_TOP 0x167c ++#define R128_AUX2_SC_BOTTOM 0x1680 ++#define R128_AUX3_SC_LEFT 0x1684 ++#define R128_AUX3_SC_RIGHT 0x1688 ++#define R128_AUX3_SC_TOP 0x168c ++#define R128_AUX3_SC_BOTTOM 0x1690 ++ ++#define R128_BRUSH_DATA0 0x1480 ++#define R128_BUS_CNTL 0x0030 ++# define R128_BUS_MASTER_DIS (1 << 6) ++ ++#define R128_CLOCK_CNTL_INDEX 0x0008 ++#define R128_CLOCK_CNTL_DATA 0x000c ++# define R128_PLL_WR_EN (1 << 7) ++#define R128_CONSTANT_COLOR_C 0x1d34 ++#define R128_CRTC_OFFSET 0x0224 ++#define R128_CRTC_OFFSET_CNTL 0x0228 ++# define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16) ++ ++#define R128_DP_GUI_MASTER_CNTL 0x146c ++# define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) ++# define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) ++# define R128_GMC_BRUSH_SOLID_COLOR (13 << 4) ++# define R128_GMC_BRUSH_NONE (15 << 4) ++# define R128_GMC_DST_16BPP (4 << 8) ++# define R128_GMC_DST_24BPP (5 << 8) ++# define R128_GMC_DST_32BPP (6 << 8) ++# define R128_GMC_DST_DATATYPE_SHIFT 8 ++# define R128_GMC_SRC_DATATYPE_COLOR (3 << 12) ++# define R128_DP_SRC_SOURCE_MEMORY (2 << 24) ++# define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24) ++# define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28) ++# define R128_GMC_AUX_CLIP_DIS (1 << 29) ++# define R128_GMC_WR_MSK_DIS (1 << 30) ++# define R128_ROP3_S 0x00cc0000 ++# define R128_ROP3_P 0x00f00000 ++#define R128_DP_WRITE_MASK 0x16cc ++#define R128_DST_PITCH_OFFSET_C 0x1c80 ++# define R128_DST_TILE (1 << 31) ++ ++#define R128_GEN_INT_CNTL 0x0040 ++# define R128_CRTC_VBLANK_INT_EN (1 << 0) ++#define R128_GEN_INT_STATUS 0x0044 ++# define R128_CRTC_VBLANK_INT (1 << 0) ++# define R128_CRTC_VBLANK_INT_AK (1 << 0) ++#define R128_GEN_RESET_CNTL 0x00f0 ++# define R128_SOFT_RESET_GUI (1 << 0) ++ ++#define R128_GUI_SCRATCH_REG0 0x15e0 ++#define R128_GUI_SCRATCH_REG1 0x15e4 ++#define R128_GUI_SCRATCH_REG2 0x15e8 ++#define R128_GUI_SCRATCH_REG3 0x15ec ++#define R128_GUI_SCRATCH_REG4 0x15f0 ++#define R128_GUI_SCRATCH_REG5 0x15f4 ++ ++#define R128_GUI_STAT 0x1740 ++# define R128_GUI_FIFOCNT_MASK 0x0fff ++# define R128_GUI_ACTIVE (1 << 31) ++ ++#define R128_MCLK_CNTL 0x000f ++# define R128_FORCE_GCP (1 << 16) ++# define R128_FORCE_PIPE3D_CP (1 << 17) ++# define R128_FORCE_RCP (1 << 18) ++ ++#define R128_PC_GUI_CTLSTAT 0x1748 ++#define R128_PC_NGUI_CTLSTAT 0x0184 ++# define R128_PC_FLUSH_GUI (3 << 0) ++# define R128_PC_RI_GUI (1 << 2) ++# define R128_PC_FLUSH_ALL 0x00ff ++# define R128_PC_BUSY (1 << 31) ++ ++#define R128_PCI_GART_PAGE 0x017c ++#define R128_PRIM_TEX_CNTL_C 0x1cb0 ++ ++#define R128_SCALE_3D_CNTL 0x1a00 ++#define R128_SEC_TEX_CNTL_C 0x1d00 ++#define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c ++#define R128_SETUP_CNTL 0x1bc4 ++#define R128_STEN_REF_MASK_C 0x1d40 ++ ++#define R128_TEX_CNTL_C 0x1c9c ++# define R128_TEX_CACHE_FLUSH (1 << 23) ++ ++#define R128_WAIT_UNTIL 0x1720 ++# define R128_EVENT_CRTC_OFFSET (1 << 0) ++#define R128_WINDOW_XY_OFFSET 0x1bcc ++ ++/* CCE registers ++ */ ++#define R128_PM4_BUFFER_OFFSET 0x0700 ++#define R128_PM4_BUFFER_CNTL 0x0704 ++# define R128_PM4_MASK (15 << 28) ++# define R128_PM4_NONPM4 (0 << 28) ++# define R128_PM4_192PIO (1 << 28) ++# define R128_PM4_192BM (2 << 28) ++# define R128_PM4_128PIO_64INDBM (3 << 28) ++# define R128_PM4_128BM_64INDBM (4 << 28) ++# define R128_PM4_64PIO_128INDBM (5 << 28) ++# define R128_PM4_64BM_128INDBM (6 << 28) ++# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) ++# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) ++# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) ++# define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27) ++ ++#define R128_PM4_BUFFER_WM_CNTL 0x0708 ++# define R128_WMA_SHIFT 0 ++# define R128_WMB_SHIFT 8 ++# define R128_WMC_SHIFT 16 ++# define R128_WB_WM_SHIFT 24 ++ ++#define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c ++#define R128_PM4_BUFFER_DL_RPTR 0x0710 ++#define R128_PM4_BUFFER_DL_WPTR 0x0714 ++# define R128_PM4_BUFFER_DL_DONE (1 << 31) ++ ++#define R128_PM4_VC_FPU_SETUP 0x071c ++ ++#define R128_PM4_IW_INDOFF 0x0738 ++#define R128_PM4_IW_INDSIZE 0x073c ++ ++#define R128_PM4_STAT 0x07b8 ++# define R128_PM4_FIFOCNT_MASK 0x0fff ++# define R128_PM4_BUSY (1 << 16) ++# define R128_PM4_GUI_ACTIVE (1 << 31) ++ ++#define R128_PM4_MICROCODE_ADDR 0x07d4 ++#define R128_PM4_MICROCODE_RADDR 0x07d8 ++#define R128_PM4_MICROCODE_DATAH 0x07dc ++#define R128_PM4_MICROCODE_DATAL 0x07e0 ++ ++#define R128_PM4_BUFFER_ADDR 0x07f0 ++#define R128_PM4_MICRO_CNTL 0x07fc ++# define R128_PM4_MICRO_FREERUN (1 << 30) ++ ++#define R128_PM4_FIFO_DATA_EVEN 0x1000 ++#define R128_PM4_FIFO_DATA_ODD 0x1004 ++ ++/* CCE command packets ++ */ ++#define R128_CCE_PACKET0 0x00000000 ++#define R128_CCE_PACKET1 0x40000000 ++#define R128_CCE_PACKET2 0x80000000 ++#define R128_CCE_PACKET3 0xC0000000 ++# define R128_CNTL_HOSTDATA_BLT 0x00009400 ++# define R128_CNTL_PAINT_MULTI 0x00009A00 ++# define R128_CNTL_BITBLT_MULTI 0x00009B00 ++# define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300 ++ ++#define R128_CCE_PACKET_MASK 0xC0000000 ++#define R128_CCE_PACKET_COUNT_MASK 0x3fff0000 ++#define R128_CCE_PACKET0_REG_MASK 0x000007ff ++#define R128_CCE_PACKET1_REG0_MASK 0x000007ff ++#define R128_CCE_PACKET1_REG1_MASK 0x003ff800 ++ ++#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007 ++#define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010 ++#define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020 ++#define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030 ++#define R128_CCE_VC_CNTL_NUM_SHIFT 16 ++ ++#define R128_DATATYPE_VQ 0 ++#define R128_DATATYPE_CI4 1 ++#define R128_DATATYPE_CI8 2 ++#define R128_DATATYPE_ARGB1555 3 ++#define R128_DATATYPE_RGB565 4 ++#define R128_DATATYPE_RGB888 5 ++#define R128_DATATYPE_ARGB8888 6 ++#define R128_DATATYPE_RGB332 7 ++#define R128_DATATYPE_Y8 8 ++#define R128_DATATYPE_RGB8 9 ++#define R128_DATATYPE_CI16 10 ++#define R128_DATATYPE_YVYU422 11 ++#define R128_DATATYPE_VYUY422 12 ++#define R128_DATATYPE_AYUV444 14 ++#define R128_DATATYPE_ARGB4444 15 ++ ++/* Constants */ ++#define R128_AGP_OFFSET 0x02000000 ++ ++#define R128_WATERMARK_L 16 ++#define R128_WATERMARK_M 8 ++#define R128_WATERMARK_N 8 ++#define R128_WATERMARK_K 128 ++ ++#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */ ++ ++#define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0 ++#define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1 ++#define R128_MAX_VB_AGE 0x7fffffff ++#define R128_MAX_VB_VERTS (0xffff) ++ ++#define R128_RING_HIGH_MARK 128 ++ ++#define R128_PERFORMANCE_BOXES 0 ++ ++#define R128_PCIGART_TABLE_SIZE 32768 ++ ++#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) ++#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) ++ ++#define R128_WRITE_PLL(addr,val) \ ++do { \ ++ R128_WRITE8(R128_CLOCK_CNTL_INDEX, \ ++ ((addr) & 0x1f) | R128_PLL_WR_EN); \ ++ R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \ ++} while (0) ++ ++#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \ ++ ((n) << 16) | ((reg) >> 2)) ++#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \ ++ (((reg1) >> 2) << 11) | ((reg0) >> 2)) ++#define CCE_PACKET2() (R128_CCE_PACKET2) ++#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \ ++ (pkt) | ((n) << 16)) ++ ++static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_ring_buffer_t *ring = &dev_priv->ring; ++ ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); ++ if (ring->space <= 0) ++ ring->space += ring->size; ++} ++ ++/* ================================================================ ++ * Misc helper macros ++ */ ++ ++#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ ++ if ( ring->space < ring->high_mark ) { \ ++ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ ++ r128_update_ring_snapshot( dev_priv ); \ ++ if ( ring->space >= ring->high_mark ) \ ++ goto __ring_space_done; \ ++ DRM_UDELAY(1); \ ++ } \ ++ DRM_ERROR( "ring space check failed!\n" ); \ ++ return -EBUSY; \ ++ } \ ++ __ring_space_done: \ ++ ; \ ++} while (0) ++ ++#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \ ++ if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \ ++ int __ret = r128_do_cce_idle( dev_priv ); \ ++ if ( __ret ) return __ret; \ ++ sarea_priv->last_dispatch = 0; \ ++ r128_freelist_reset( dev ); \ ++ } \ ++} while (0) ++ ++#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \ ++ OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( R128_EVENT_CRTC_OFFSET ); \ ++} while (0) ++ ++/* ================================================================ ++ * Ring control ++ */ ++ ++#define R128_VERBOSE 0 ++ ++#define RING_LOCALS \ ++ int write, _nr; unsigned int tail_mask; volatile u32 *ring; ++ ++#define BEGIN_RING( n ) do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ ++ } \ ++ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ ++ COMMIT_RING(); \ ++ r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ ++ } \ ++ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ++ ring = dev_priv->ring.start; \ ++ write = dev_priv->ring.tail; \ ++ tail_mask = dev_priv->ring.tail_mask; \ ++} while (0) ++ ++/* You can set this to zero if you want. If the card locks up, you'll ++ * need to keep this set. It works around a bug in early revs of the ++ * Rage 128 chipset, where the CCE would read 32 dwords past the end of ++ * the ring buffer before wrapping around. ++ */ ++#define R128_BROKEN_CCE 1 ++ ++#define ADVANCE_RING() do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ ++ write, dev_priv->ring.tail ); \ ++ } \ ++ if ( R128_BROKEN_CCE && write < 32 ) { \ ++ memcpy( dev_priv->ring.end, \ ++ dev_priv->ring.start, \ ++ write * sizeof(u32) ); \ ++ } \ ++ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \ ++ DRM_ERROR( \ ++ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ++ ((dev_priv->ring.tail + _nr) & tail_mask), \ ++ write, __LINE__); \ ++ } else \ ++ dev_priv->ring.tail = write; \ ++} while (0) ++ ++#define COMMIT_RING() do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \ ++ dev_priv->ring.tail ); \ ++ } \ ++ DRM_MEMORYBARRIER(); \ ++ R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \ ++ R128_READ( R128_PM4_BUFFER_DL_WPTR ); \ ++} while (0) ++ ++#define OUT_RING( x ) do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ ++ (unsigned int)(x), write ); \ ++ } \ ++ ring[write++] = cpu_to_le32( x ); \ ++ write &= tail_mask; \ ++} while (0) ++ ++#endif /* __R128_DRV_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_ioc32.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_ioc32.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_ioc32.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,222 @@ ++/** ++ * \file r128_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the R128 DRM. ++ * ++ * \author Dave Airlie with code from patches by Egbert Eich ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Egbert Eich 2003,2004 ++ * Copyright (C) Dave Airlie 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++ ++typedef struct drm_r128_init32 { ++ int func; ++ unsigned int sarea_priv_offset; ++ int is_pci; ++ int cce_mode; ++ int cce_secure; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ unsigned int span_offset; ++ ++ unsigned int fb_offset; ++ unsigned int mmio_offset; ++ unsigned int ring_offset; ++ unsigned int ring_rptr_offset; ++ unsigned int buffers_offset; ++ unsigned int agp_textures_offset; ++} drm_r128_init32_t; ++ ++static int compat_r128_init(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_init32_t init32; ++ drm_r128_init_t __user *init; ++ ++ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) ++ return -EFAULT; ++ ++ init = compat_alloc_user_space(sizeof(*init)); ++ if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) ++ || __put_user(init32.func, &init->func) ++ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) ++ || __put_user(init32.is_pci, &init->is_pci) ++ || __put_user(init32.cce_mode, &init->cce_mode) ++ || __put_user(init32.cce_secure, &init->cce_secure) ++ || __put_user(init32.ring_size, &init->ring_size) ++ || __put_user(init32.usec_timeout, &init->usec_timeout) ++ || __put_user(init32.fb_bpp, &init->fb_bpp) ++ || __put_user(init32.front_offset, &init->front_offset) ++ || __put_user(init32.front_pitch, &init->front_pitch) ++ || __put_user(init32.back_offset, &init->back_offset) ++ || __put_user(init32.back_pitch, &init->back_pitch) ++ || __put_user(init32.depth_bpp, &init->depth_bpp) ++ || __put_user(init32.depth_offset, &init->depth_offset) ++ || __put_user(init32.depth_pitch, &init->depth_pitch) ++ || __put_user(init32.span_offset, &init->span_offset) ++ || __put_user(init32.fb_offset, &init->fb_offset) ++ || __put_user(init32.mmio_offset, &init->mmio_offset) ++ || __put_user(init32.ring_offset, &init->ring_offset) ++ || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) ++ || __put_user(init32.buffers_offset, &init->buffers_offset) ++ || __put_user(init32.agp_textures_offset, ++ &init->agp_textures_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_INIT, (unsigned long)init); ++} ++ ++ ++typedef struct drm_r128_depth32 { ++ int func; ++ int n; ++ u32 x; ++ u32 y; ++ u32 buffer; ++ u32 mask; ++} drm_r128_depth32_t; ++ ++static int compat_r128_depth(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_depth32_t depth32; ++ drm_r128_depth_t __user *depth; ++ ++ if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32))) ++ return -EFAULT; ++ ++ depth = compat_alloc_user_space(sizeof(*depth)); ++ if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth)) ++ || __put_user(depth32.func, &depth->func) ++ || __put_user(depth32.n, &depth->n) ++ || __put_user((int __user *)(unsigned long)depth32.x, &depth->x) ++ || __put_user((int __user *)(unsigned long)depth32.y, &depth->y) ++ || __put_user((unsigned int __user *)(unsigned long)depth32.buffer, ++ &depth->buffer) ++ || __put_user((unsigned char __user *)(unsigned long)depth32.mask, ++ &depth->mask)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_DEPTH, (unsigned long)depth); ++ ++} ++ ++typedef struct drm_r128_stipple32 { ++ u32 mask; ++} drm_r128_stipple32_t; ++ ++static int compat_r128_stipple(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_stipple32_t stipple32; ++ drm_r128_stipple_t __user *stipple; ++ ++ if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32))) ++ return -EFAULT; ++ ++ stipple = compat_alloc_user_space(sizeof(*stipple)); ++ if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple)) ++ || __put_user((unsigned int __user *)(unsigned long)stipple32.mask, ++ &stipple->mask)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); ++} ++ ++typedef struct drm_r128_getparam32 { ++ int param; ++ u32 value; ++} drm_r128_getparam32_t; ++ ++static int compat_r128_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_getparam32_t getparam32; ++ drm_r128_getparam_t __user *getparam; ++ ++ if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32))) ++ return -EFAULT; ++ ++ getparam = compat_alloc_user_space(sizeof(*getparam)); ++ if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) ++ || __put_user(getparam32.param, &getparam->param) ++ || __put_user((void __user *)(unsigned long)getparam32.value, ++ &getparam->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); ++} ++ ++drm_ioctl_compat_t *r128_compat_ioctls[] = { ++ [DRM_R128_INIT] = compat_r128_init, ++ [DRM_R128_DEPTH] = compat_r128_depth, ++ [DRM_R128_STIPPLE] = compat_r128_stipple, ++ [DRM_R128_GETPARAM] = compat_r128_getparam, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) ++ fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_irq.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_irq.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_irq.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,116 @@ ++/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Eric Anholt ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) ++{ ++ const drm_r128_private_t *dev_priv = dev->dev_private; ++ ++ if (crtc != 0) ++ return 0; ++ ++ return atomic_read(&dev_priv->vbl_received); ++} ++ ++irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; ++ int status; ++ ++ status = R128_READ(R128_GEN_INT_STATUS); ++ ++ /* VBLANK interrupt */ ++ if (status & R128_CRTC_VBLANK_INT) { ++ R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); ++ atomic_inc(&dev_priv->vbl_received); ++ drm_handle_vblank(dev, 0); ++ return IRQ_HANDLED; ++ } ++ return IRQ_NONE; ++} ++ ++int r128_enable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ ++ if (crtc != 0) { ++ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); ++ return -EINVAL; ++ } ++ ++ R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); ++ return 0; ++} ++ ++void r128_disable_vblank(struct drm_device *dev, int crtc) ++{ ++ if (crtc != 0) ++ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); ++ ++ /* ++ * FIXME: implement proper interrupt disable by using the vblank ++ * counter register (if available) ++ * ++ * R128_WRITE(R128_GEN_INT_CNTL, ++ * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN); ++ */ ++} ++ ++void r128_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; ++ ++ /* Disable *all* interrupts */ ++ R128_WRITE(R128_GEN_INT_CNTL, 0); ++ /* Clear vblank bit if it's already high */ ++ R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); ++} ++ ++int r128_driver_irq_postinstall(struct drm_device * dev) ++{ ++ return drm_vblank_init(dev, 1); ++} ++ ++void r128_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ /* Disable *all* interrupts */ ++ R128_WRITE(R128_GEN_INT_CNTL, 0); ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_state.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_state.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r128_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r128_state.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1681 @@ ++/* r128_state.c -- State support for r128 -*- linux-c -*- ++ * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++/* ================================================================ ++ * CCE hardware state programming functions ++ */ ++ ++static void r128_emit_clip_rects(drm_r128_private_t * dev_priv, ++ struct drm_clip_rect * boxes, int count) ++{ ++ u32 aux_sc_cntl = 0x00000000; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING((count < 3 ? count : 3) * 5 + 2); ++ ++ if (count >= 1) { ++ OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3)); ++ OUT_RING(boxes[0].x1); ++ OUT_RING(boxes[0].x2 - 1); ++ OUT_RING(boxes[0].y1); ++ OUT_RING(boxes[0].y2 - 1); ++ ++ aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR); ++ } ++ if (count >= 2) { ++ OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3)); ++ OUT_RING(boxes[1].x1); ++ OUT_RING(boxes[1].x2 - 1); ++ OUT_RING(boxes[1].y1); ++ OUT_RING(boxes[1].y2 - 1); ++ ++ aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR); ++ } ++ if (count >= 3) { ++ OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3)); ++ OUT_RING(boxes[2].x1); ++ OUT_RING(boxes[2].x2 - 1); ++ OUT_RING(boxes[2].y1); ++ OUT_RING(boxes[2].y2 - 1); ++ ++ aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR); ++ } ++ ++ OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0)); ++ OUT_RING(aux_sc_cntl); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0)); ++ OUT_RING(ctx->scale_3d_cntl); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(13); ++ ++ OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11)); ++ OUT_RING(ctx->dst_pitch_offset_c); ++ OUT_RING(ctx->dp_gui_master_cntl_c); ++ OUT_RING(ctx->sc_top_left_c); ++ OUT_RING(ctx->sc_bottom_right_c); ++ OUT_RING(ctx->z_offset_c); ++ OUT_RING(ctx->z_pitch_c); ++ OUT_RING(ctx->z_sten_cntl_c); ++ OUT_RING(ctx->tex_cntl_c); ++ OUT_RING(ctx->misc_3d_state_cntl_reg); ++ OUT_RING(ctx->texture_clr_cmp_clr_c); ++ OUT_RING(ctx->texture_clr_cmp_msk_c); ++ OUT_RING(ctx->fog_color_c); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(3); ++ ++ OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP)); ++ OUT_RING(ctx->setup_cntl); ++ OUT_RING(ctx->pm4_vc_fpu_setup); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(5); ++ ++ OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0)); ++ OUT_RING(ctx->dp_write_mask); ++ ++ OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1)); ++ OUT_RING(ctx->sten_ref_mask_c); ++ OUT_RING(ctx->plane_3d_mask_c); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0)); ++ OUT_RING(ctx->window_xy_offset); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0]; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS); ++ ++ OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C, ++ 2 + R128_MAX_TEXTURE_LEVELS)); ++ OUT_RING(tex->tex_cntl); ++ OUT_RING(tex->tex_combine_cntl); ++ OUT_RING(ctx->tex_size_pitch_c); ++ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { ++ OUT_RING(tex->tex_offset[i]); ++ } ++ ++ OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1)); ++ OUT_RING(ctx->constant_color_c); ++ OUT_RING(tex->tex_border_color); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1]; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS); ++ ++ OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS)); ++ OUT_RING(tex->tex_cntl); ++ OUT_RING(tex->tex_combine_cntl); ++ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { ++ OUT_RING(tex->tex_offset[i]); ++ } ++ ++ OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0)); ++ OUT_RING(tex->tex_border_color); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_emit_state(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ ++ DRM_DEBUG("dirty=0x%08x\n", dirty); ++ ++ if (dirty & R128_UPLOAD_CORE) { ++ r128_emit_core(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_CORE; ++ } ++ ++ if (dirty & R128_UPLOAD_CONTEXT) { ++ r128_emit_context(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT; ++ } ++ ++ if (dirty & R128_UPLOAD_SETUP) { ++ r128_emit_setup(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_SETUP; ++ } ++ ++ if (dirty & R128_UPLOAD_MASKS) { ++ r128_emit_masks(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_MASKS; ++ } ++ ++ if (dirty & R128_UPLOAD_WINDOW) { ++ r128_emit_window(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_WINDOW; ++ } ++ ++ if (dirty & R128_UPLOAD_TEX0) { ++ r128_emit_tex0(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_TEX0; ++ } ++ ++ if (dirty & R128_UPLOAD_TEX1) { ++ r128_emit_tex1(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_TEX1; ++ } ++ ++ /* Turn off the texture cache flushing */ ++ sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH; ++ ++ sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE; ++} ++ ++#if R128_PERFORMANCE_BOXES ++/* ================================================================ ++ * Performance monitoring functions ++ */ ++ ++static void r128_clear_box(drm_r128_private_t * dev_priv, ++ int x, int y, int w, int h, int r, int g, int b) ++{ ++ u32 pitch, offset; ++ u32 fb_bpp, color; ++ RING_LOCALS; ++ ++ switch (dev_priv->fb_bpp) { ++ case 16: ++ fb_bpp = R128_GMC_DST_16BPP; ++ color = (((r & 0xf8) << 8) | ++ ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); ++ break; ++ case 24: ++ fb_bpp = R128_GMC_DST_24BPP; ++ color = ((r << 16) | (g << 8) | b); ++ break; ++ case 32: ++ fb_bpp = R128_GMC_DST_32BPP; ++ color = (((0xff) << 24) | (r << 16) | (g << 8) | b); ++ break; ++ default: ++ return; ++ } ++ ++ offset = dev_priv->back_offset; ++ pitch = dev_priv->back_pitch >> 3; ++ ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ fb_bpp | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS); ++ ++ OUT_RING((pitch << 21) | (offset >> 5)); ++ OUT_RING(color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv) ++{ ++ if (atomic_read(&dev_priv->idle_count) == 0) { ++ r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); ++ } else { ++ atomic_set(&dev_priv->idle_count, 0); ++ } ++} ++ ++#endif ++ ++/* ================================================================ ++ * CCE command dispatch functions ++ */ ++ ++static void r128_print_dirty(const char *msg, unsigned int flags) ++{ ++ DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n", ++ msg, ++ flags, ++ (flags & R128_UPLOAD_CORE) ? "core, " : "", ++ (flags & R128_UPLOAD_CONTEXT) ? "context, " : "", ++ (flags & R128_UPLOAD_SETUP) ? "setup, " : "", ++ (flags & R128_UPLOAD_TEX0) ? "tex0, " : "", ++ (flags & R128_UPLOAD_TEX1) ? "tex1, " : "", ++ (flags & R128_UPLOAD_MASKS) ? "masks, " : "", ++ (flags & R128_UPLOAD_WINDOW) ? "window, " : "", ++ (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "", ++ (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : ""); ++} ++ ++static void r128_cce_dispatch_clear(struct drm_device * dev, ++ drm_r128_clear_t * clear) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ unsigned int flags = clear->flags; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ if (dev_priv->page_flipping && dev_priv->current_page == 1) { ++ unsigned int tmp = flags; ++ ++ flags &= ~(R128_FRONT | R128_BACK); ++ if (tmp & R128_FRONT) ++ flags |= R128_BACK; ++ if (tmp & R128_BACK) ++ flags |= R128_FRONT; ++ } ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n", ++ pbox[i].x1, pbox[i].y1, pbox[i].x2, ++ pbox[i].y2, flags); ++ ++ if (flags & (R128_FRONT | R128_BACK)) { ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0)); ++ OUT_RING(clear->color_mask); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & R128_FRONT) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->color_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS); ++ ++ OUT_RING(dev_priv->front_pitch_offset_c); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & R128_BACK) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->color_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS); ++ ++ OUT_RING(dev_priv->back_pitch_offset_c); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & R128_DEPTH) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(clear->clear_depth); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ } ++} ++ ++static void r128_cce_dispatch_swap(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++#if R128_PERFORMANCE_BOXES ++ /* Do some trivial performance monitoring... ++ */ ++ r128_cce_performance_boxes(dev_priv); ++#endif ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ BEGIN_RING(7); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | ++ R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (dev_priv->color_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_MEMORY | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS); ++ ++ /* Make this work even if front & back are flipped: ++ */ ++ if (dev_priv->current_page == 0) { ++ OUT_RING(dev_priv->back_pitch_offset_c); ++ OUT_RING(dev_priv->front_pitch_offset_c); ++ } else { ++ OUT_RING(dev_priv->front_pitch_offset_c); ++ OUT_RING(dev_priv->back_pitch_offset_c); ++ } ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0)); ++ OUT_RING(dev_priv->sarea_priv->last_frame); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_cce_dispatch_flip(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ DRM_DEBUG("page=%d pfCurrentPage=%d\n", ++ dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage); ++ ++#if R128_PERFORMANCE_BOXES ++ /* Do some trivial performance monitoring... ++ */ ++ r128_cce_performance_boxes(dev_priv); ++#endif ++ ++ BEGIN_RING(4); ++ ++ R128_WAIT_UNTIL_PAGE_FLIPPED(); ++ OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0)); ++ ++ if (dev_priv->current_page == 0) { ++ OUT_RING(dev_priv->back_offset); ++ } else { ++ OUT_RING(dev_priv->front_offset); ++ } ++ ++ ADVANCE_RING(); ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page = ++ 1 - dev_priv->current_page; ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0)); ++ OUT_RING(dev_priv->sarea_priv->last_frame); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int format = sarea_priv->vc_format; ++ int offset = buf->bus_address; ++ int size = buf->used; ++ int prim = buf_priv->prim; ++ int i = 0; ++ RING_LOCALS; ++ DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox); ++ ++ if (0) ++ r128_print_dirty("dispatch_vertex", sarea_priv->dirty); ++ ++ if (buf->used) { ++ buf_priv->dispatched = 1; ++ ++ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { ++ r128_emit_state(dev_priv); ++ } ++ ++ do { ++ /* Emit the next set of up to three cliprects */ ++ if (i < sarea_priv->nbox) { ++ r128_emit_clip_rects(dev_priv, ++ &sarea_priv->boxes[i], ++ sarea_priv->nbox - i); ++ } ++ ++ /* Emit the vertex buffer rendering commands */ ++ BEGIN_RING(5); ++ ++ OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3)); ++ OUT_RING(offset); ++ OUT_RING(size); ++ OUT_RING(format); ++ OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST | ++ (size << R128_CCE_VC_CNTL_NUM_SHIFT)); ++ ++ ADVANCE_RING(); ++ ++ i += 3; ++ } while (i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ buf_priv->age = dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); ++ OUT_RING(buf_priv->age); ++ ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ buf->used = 0; ++ /* FIXME: Check dispatched field */ ++ buf_priv->dispatched = 0; ++ } ++ ++ dev_priv->sarea_priv->last_dispatch++; ++ ++ sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; ++ sarea_priv->nbox = 0; ++} ++ ++static void r128_cce_dispatch_indirect(struct drm_device * dev, ++ struct drm_buf * buf, int start, int end) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ RING_LOCALS; ++ DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); ++ ++ if (start != end) { ++ int offset = buf->bus_address + start; ++ int dwords = (end - start + 3) / sizeof(u32); ++ ++ /* Indirect buffer data must be an even number of ++ * dwords, so if we've been given an odd number we must ++ * pad the data with a Type-2 CCE packet. ++ */ ++ if (dwords & 1) { ++ u32 *data = (u32 *) ++ ((char *)dev->agp_buffer_map->handle ++ + buf->offset + start); ++ data[dwords++] = cpu_to_le32(R128_CCE_PACKET2); ++ } ++ ++ buf_priv->dispatched = 1; ++ ++ /* Fire off the indirect buffer */ ++ BEGIN_RING(3); ++ ++ OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1)); ++ OUT_RING(offset); ++ OUT_RING(dwords); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (buf_priv->discard) { ++ buf_priv->age = dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the indirect buffer age */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); ++ OUT_RING(buf_priv->age); ++ ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ buf->used = 0; ++ /* FIXME: Check dispatched field */ ++ buf_priv->dispatched = 0; ++ } ++ ++ dev_priv->sarea_priv->last_dispatch++; ++} ++ ++static void r128_cce_dispatch_indices(struct drm_device * dev, ++ struct drm_buf * buf, ++ int start, int end, int count) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int format = sarea_priv->vc_format; ++ int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset; ++ int prim = buf_priv->prim; ++ u32 *data; ++ int dwords; ++ int i = 0; ++ RING_LOCALS; ++ DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count); ++ ++ if (0) ++ r128_print_dirty("dispatch_indices", sarea_priv->dirty); ++ ++ if (start != end) { ++ buf_priv->dispatched = 1; ++ ++ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { ++ r128_emit_state(dev_priv); ++ } ++ ++ dwords = (end - start + 3) / sizeof(u32); ++ ++ data = (u32 *) ((char *)dev->agp_buffer_map->handle ++ + buf->offset + start); ++ ++ data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, ++ dwords - 2)); ++ ++ data[1] = cpu_to_le32(offset); ++ data[2] = cpu_to_le32(R128_MAX_VB_VERTS); ++ data[3] = cpu_to_le32(format); ++ data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND | ++ (count << 16))); ++ ++ if (count & 0x1) { ++#ifdef __LITTLE_ENDIAN ++ data[dwords - 1] &= 0x0000ffff; ++#else ++ data[dwords - 1] &= 0xffff0000; ++#endif ++ } ++ ++ do { ++ /* Emit the next set of up to three cliprects */ ++ if (i < sarea_priv->nbox) { ++ r128_emit_clip_rects(dev_priv, ++ &sarea_priv->boxes[i], ++ sarea_priv->nbox - i); ++ } ++ ++ r128_cce_dispatch_indirect(dev, buf, start, end); ++ ++ i += 3; ++ } while (i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ buf_priv->age = dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); ++ OUT_RING(buf_priv->age); ++ ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ /* FIXME: Check dispatched field */ ++ buf_priv->dispatched = 0; ++ } ++ ++ dev_priv->sarea_priv->last_dispatch++; ++ ++ sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; ++ sarea_priv->nbox = 0; ++} ++ ++static int r128_cce_dispatch_blit(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_r128_blit_t * blit) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ u32 *data; ++ int dword_shift, dwords; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ /* The compiler won't optimize away a division by a variable, ++ * even if the only legal values are powers of two. Thus, we'll ++ * use a shift instead. ++ */ ++ switch (blit->format) { ++ case R128_DATATYPE_ARGB8888: ++ dword_shift = 0; ++ break; ++ case R128_DATATYPE_ARGB1555: ++ case R128_DATATYPE_RGB565: ++ case R128_DATATYPE_ARGB4444: ++ case R128_DATATYPE_YVYU422: ++ case R128_DATATYPE_VYUY422: ++ dword_shift = 1; ++ break; ++ case R128_DATATYPE_CI8: ++ case R128_DATATYPE_RGB8: ++ dword_shift = 2; ++ break; ++ default: ++ DRM_ERROR("invalid blit format %d\n", blit->format); ++ return -EINVAL; ++ } ++ ++ /* Flush the pixel cache, and mark the contents as Read Invalid. ++ * This ensures no pixel data gets mixed up with the texture ++ * data from the host data blit, otherwise part of the texture ++ * image may be corrupted. ++ */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0)); ++ OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI); ++ ++ ADVANCE_RING(); ++ ++ /* Dispatch the indirect buffer. ++ */ ++ buf = dma->buflist[blit->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", blit->idx); ++ return -EINVAL; ++ } ++ ++ buf_priv->discard = 1; ++ ++ dwords = (blit->width * blit->height) >> dword_shift; ++ ++ data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); ++ ++ data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6)); ++ data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (blit->format << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_HOST_DATA | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS)); ++ ++ data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5)); ++ data[3] = cpu_to_le32(0xffffffff); ++ data[4] = cpu_to_le32(0xffffffff); ++ data[5] = cpu_to_le32((blit->y << 16) | blit->x); ++ data[6] = cpu_to_le32((blit->height << 16) | blit->width); ++ data[7] = cpu_to_le32(dwords); ++ ++ buf->used = (dwords + 8) * sizeof(u32); ++ ++ r128_cce_dispatch_indirect(dev, buf, 0, buf->used); ++ ++ /* Flush the pixel cache after the blit completes. This ensures ++ * the texture data is written out to memory before rendering ++ * continues. ++ */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0)); ++ OUT_RING(R128_PC_FLUSH_GUI); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * Tiled depth buffer management ++ * ++ * FIXME: These should all set the destination write mask for when we ++ * have hardware stencil support. ++ */ ++ ++static int r128_cce_dispatch_write_span(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, x, y; ++ u32 *buffer; ++ u8 *mask; ++ int i, buffer_size, mask_size; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { ++ return -EFAULT; ++ } ++ ++ buffer_size = depth->n * sizeof(u32); ++ buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); ++ if (buffer == NULL) ++ return -ENOMEM; ++ if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ mask_size = depth->n * sizeof(u8); ++ if (depth->mask) { ++ mask = drm_alloc(mask_size, DRM_MEM_BUFS); ++ if (mask == NULL) { ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ for (i = 0; i < count; i++, x++) { ++ if (mask[i]) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ } else { ++ for (i = 0; i < count; i++, x++) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ ++ return 0; ++} ++ ++static int r128_cce_dispatch_write_pixels(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, *x, *y; ++ u32 *buffer; ++ u8 *mask; ++ int i, xbuf_size, ybuf_size, buffer_size, mask_size; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ xbuf_size = count * sizeof(*x); ++ ybuf_size = count * sizeof(*y); ++ x = drm_alloc(xbuf_size, DRM_MEM_BUFS); ++ if (x == NULL) { ++ return -ENOMEM; ++ } ++ y = drm_alloc(ybuf_size, DRM_MEM_BUFS); ++ if (y == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ buffer_size = depth->n * sizeof(u32); ++ buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); ++ if (buffer == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ if (depth->mask) { ++ mask_size = depth->n * sizeof(u8); ++ mask = drm_alloc(mask_size, DRM_MEM_BUFS); ++ if (mask == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ for (i = 0; i < count; i++) { ++ if (mask[i]) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x[i] << 16) | y[i]); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ } else { ++ for (i = 0; i < count; i++) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x[i] << 16) | y[i]); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ ++ return 0; ++} ++ ++static int r128_cce_dispatch_read_span(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, x, y; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { ++ return -EFAULT; ++ } ++ ++ BEGIN_RING(7); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | ++ R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_MEMORY | ++ R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(dev_priv->span_pitch_offset_c); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((0 << 16) | 0); ++ OUT_RING((count << 16) | 1); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++static int r128_cce_dispatch_read_pixels(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, *x, *y; ++ int i, xbuf_size, ybuf_size; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ if (count > dev_priv->depth_pitch) { ++ count = dev_priv->depth_pitch; ++ } ++ ++ xbuf_size = count * sizeof(*x); ++ ybuf_size = count * sizeof(*y); ++ x = drm_alloc(xbuf_size, DRM_MEM_BUFS); ++ if (x == NULL) { ++ return -ENOMEM; ++ } ++ y = drm_alloc(ybuf_size, DRM_MEM_BUFS); ++ if (y == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ for (i = 0; i < count; i++) { ++ BEGIN_RING(7); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | ++ R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_MEMORY | ++ R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(dev_priv->span_pitch_offset_c); ++ ++ OUT_RING((x[i] << 16) | y[i]); ++ OUT_RING((i << 16) | 0); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * Polygon stipple ++ */ ++ ++static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(33); ++ ++ OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31)); ++ for (i = 0; i < 32; i++) { ++ OUT_RING(stipple[i]); ++ } ++ ++ ADVANCE_RING(); ++} ++ ++/* ================================================================ ++ * IOCTL functions ++ */ ++ ++static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_clear_t *clear = data; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; ++ ++ r128_cce_dispatch_clear(dev, clear); ++ COMMIT_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS; ++ ++ return 0; ++} ++ ++static int r128_do_init_pageflip(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET); ++ dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL); ++ ++ R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset); ++ R128_WRITE(R128_CRTC_OFFSET_CNTL, ++ dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL); ++ ++ dev_priv->page_flipping = 1; ++ dev_priv->current_page = 0; ++ dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; ++ ++ return 0; ++} ++ ++static int r128_do_cleanup_pageflip(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset); ++ R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl); ++ ++ if (dev_priv->current_page != 0) { ++ r128_cce_dispatch_flip(dev); ++ COMMIT_RING(); ++ } ++ ++ dev_priv->page_flipping = 0; ++ return 0; ++} ++ ++/* Swapping and flipping are different operations, need different ioctls. ++ * They can & should be intermixed to support multiple 3d windows. ++ */ ++ ++static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (!dev_priv->page_flipping) ++ r128_do_init_pageflip(dev); ++ ++ r128_cce_dispatch_flip(dev); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; ++ ++ r128_cce_dispatch_swap(dev); ++ dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT | ++ R128_UPLOAD_MASKS); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", ++ DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ vertex->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (vertex->prim < 0 || ++ vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { ++ DRM_ERROR("buffer prim %d\n", vertex->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[vertex->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", vertex->idx); ++ return -EINVAL; ++ } ++ ++ buf->used = vertex->count; ++ buf_priv->prim = vertex->prim; ++ buf_priv->discard = vertex->discard; ++ ++ r128_cce_dispatch_vertex(dev, buf); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_indices_t *elts = data; ++ int count; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, ++ elts->idx, elts->start, elts->end, elts->discard); ++ ++ if (elts->idx < 0 || elts->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ elts->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (elts->prim < 0 || ++ elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { ++ DRM_ERROR("buffer prim %d\n", elts->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[elts->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", elts->idx); ++ return -EINVAL; ++ } ++ ++ count = (elts->end - elts->start) / sizeof(u16); ++ elts->start -= R128_INDEX_PRIM_OFFSET; ++ ++ if (elts->start & 0x7) { ++ DRM_ERROR("misaligned buffer 0x%x\n", elts->start); ++ return -EINVAL; ++ } ++ if (elts->start < buf->used) { ++ DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); ++ return -EINVAL; ++ } ++ ++ buf->used = elts->end; ++ buf_priv->prim = elts->prim; ++ buf_priv->discard = elts->discard; ++ ++ r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_blit_t *blit = data; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx); ++ ++ if (blit->idx < 0 || blit->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ blit->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ ret = r128_cce_dispatch_blit(dev, file_priv, blit); ++ ++ COMMIT_RING(); ++ return ret; ++} ++ ++static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_depth_t *depth = data; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ ret = -EINVAL; ++ switch (depth->func) { ++ case R128_WRITE_SPAN: ++ ret = r128_cce_dispatch_write_span(dev, depth); ++ break; ++ case R128_WRITE_PIXELS: ++ ret = r128_cce_dispatch_write_pixels(dev, depth); ++ break; ++ case R128_READ_SPAN: ++ ret = r128_cce_dispatch_read_span(dev, depth); ++ break; ++ case R128_READ_PIXELS: ++ ret = r128_cce_dispatch_read_pixels(dev, depth); ++ break; ++ } ++ ++ COMMIT_RING(); ++ return ret; ++} ++ ++static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_stipple_t *stipple = data; ++ u32 mask[32]; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) ++ return -EFAULT; ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ r128_cce_dispatch_stipple(dev, mask); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_indirect_t *indirect = data; ++#if 0 ++ RING_LOCALS; ++#endif ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", ++ indirect->idx, indirect->start, indirect->end, ++ indirect->discard); ++ ++ if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ indirect->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ buf = dma->buflist[indirect->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", indirect->idx); ++ return -EINVAL; ++ } ++ ++ if (indirect->start < buf->used) { ++ DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", ++ indirect->start, buf->used); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf->used = indirect->end; ++ buf_priv->discard = indirect->discard; ++ ++#if 0 ++ /* Wait for the 3D stream to idle before the indirect buffer ++ * containing 2D acceleration commands is processed. ++ */ ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ADVANCE_RING(); ++#endif ++ ++ /* Dispatch the indirect buffer full of commands from the ++ * X server. This is insecure and is thus only available to ++ * privileged clients. ++ */ ++ r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ switch (param->param) { ++ case R128_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) ++{ ++ if (dev->dev_private) { ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ if (dev_priv->page_flipping) { ++ r128_do_cleanup_pageflip(dev); ++ } ++ } ++} ++ ++void r128_driver_lastclose(struct drm_device * dev) ++{ ++ r128_do_cleanup_cce(dev); ++} ++ ++struct drm_ioctl_desc r128_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), ++}; ++ ++int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r300_cmdbuf.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r300_cmdbuf.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r300_cmdbuf.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r300_cmdbuf.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1198 @@ ++/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*- ++ * ++ * Copyright (C) The Weather Channel, Inc. 2002. ++ * Copyright (C) 2004 Nicolai Haehnle. ++ * All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Nicolai Haehnle ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++#include "r300_reg.h" ++ ++#define R300_SIMULTANEOUS_CLIPRECTS 4 ++ ++/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects ++ */ ++static const int r300_cliprect_cntl[4] = { ++ 0xAAAA, ++ 0xEEEE, ++ 0xFEFE, ++ 0xFFFE ++}; ++ ++/** ++ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command ++ * buffer, starting with index n. ++ */ ++static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, int n) ++{ ++ struct drm_clip_rect box; ++ int nr; ++ int i; ++ RING_LOCALS; ++ ++ nr = cmdbuf->nbox - n; ++ if (nr > R300_SIMULTANEOUS_CLIPRECTS) ++ nr = R300_SIMULTANEOUS_CLIPRECTS; ++ ++ DRM_DEBUG("%i cliprects\n", nr); ++ ++ if (nr) { ++ BEGIN_RING(6 + nr * 2); ++ OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); ++ ++ for (i = 0; i < nr; ++i) { ++ if (DRM_COPY_FROM_USER_UNCHECKED ++ (&box, &cmdbuf->boxes[n + i], sizeof(box))) { ++ DRM_ERROR("copy cliprect faulted\n"); ++ return -EFAULT; ++ } ++ ++ box.x2--; /* Hardware expects inclusive bottom-right corner */ ++ box.y2--; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { ++ box.x1 = (box.x1) & ++ R300_CLIPRECT_MASK; ++ box.y1 = (box.y1) & ++ R300_CLIPRECT_MASK; ++ box.x2 = (box.x2) & ++ R300_CLIPRECT_MASK; ++ box.y2 = (box.y2) & ++ R300_CLIPRECT_MASK; ++ } else { ++ box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ } ++ ++ OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | ++ (box.y1 << R300_CLIPRECT_Y_SHIFT)); ++ OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | ++ (box.y2 << R300_CLIPRECT_Y_SHIFT)); ++ ++ } ++ ++ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]); ++ ++ /* TODO/SECURITY: Force scissors to a safe value, otherwise the ++ * client might be able to trample over memory. ++ * The impact should be very limited, but I'd rather be safe than ++ * sorry. ++ */ ++ OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1)); ++ OUT_RING(0); ++ OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK); ++ ADVANCE_RING(); ++ } else { ++ /* Why we allow zero cliprect rendering: ++ * There are some commands in a command buffer that must be submitted ++ * even when there are no cliprects, e.g. DMA buffer discard ++ * or state setting (though state setting could be avoided by ++ * simulating a loss of context). ++ * ++ * Now since the cmdbuf interface is so chaotic right now (and is ++ * bound to remain that way for a bit until things settle down), ++ * it is basically impossible to filter out the commands that are ++ * necessary and those that aren't. ++ * ++ * So I choose the safe way and don't do any filtering at all; ++ * instead, I simply set up the engine so that all rendering ++ * can't produce any fragments. ++ */ ++ BEGIN_RING(2); ++ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0); ++ ADVANCE_RING(); ++ } ++ ++ /* flus cache and wait idle clean after cliprect change */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(R300_RB3D_DC_FLUSH); ++ ADVANCE_RING(); ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ ADVANCE_RING(); ++ /* set flush flag */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED; ++ ++ return 0; ++} ++ ++static u8 r300_reg_flags[0x10000 >> 2]; ++ ++void r300_init_reg_flags(struct drm_device *dev) ++{ ++ int i; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ memset(r300_reg_flags, 0, 0x10000 >> 2); ++#define ADD_RANGE_MARK(reg, count,mark) \ ++ for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ ++ r300_reg_flags[i]|=(mark); ++ ++#define MARK_SAFE 1 ++#define MARK_CHECK_OFFSET 2 ++ ++#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE) ++ ++ /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */ ++ ADD_RANGE(R300_SE_VPORT_XSCALE, 6); ++ ADD_RANGE(R300_VAP_CNTL, 1); ++ ADD_RANGE(R300_SE_VTE_CNTL, 2); ++ ADD_RANGE(0x2134, 2); ++ ADD_RANGE(R300_VAP_CNTL_STATUS, 1); ++ ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2); ++ ADD_RANGE(0x21DC, 1); ++ ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); ++ ADD_RANGE(R300_VAP_CLIP_X_0, 4); ++ ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1); ++ ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); ++ ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); ++ ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); ++ ADD_RANGE(R300_GB_ENABLE, 1); ++ ADD_RANGE(R300_GB_MSPOS0, 5); ++ ADD_RANGE(R300_TX_INVALTAGS, 1); ++ ADD_RANGE(R300_TX_ENABLE, 1); ++ ADD_RANGE(0x4200, 4); ++ ADD_RANGE(0x4214, 1); ++ ADD_RANGE(R300_RE_POINTSIZE, 1); ++ ADD_RANGE(0x4230, 3); ++ ADD_RANGE(R300_RE_LINE_CNT, 1); ++ ADD_RANGE(R300_RE_UNK4238, 1); ++ ADD_RANGE(0x4260, 3); ++ ADD_RANGE(R300_RE_SHADE, 4); ++ ADD_RANGE(R300_RE_POLYGON_MODE, 5); ++ ADD_RANGE(R300_RE_ZBIAS_CNTL, 1); ++ ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4); ++ ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1); ++ ADD_RANGE(R300_RE_CULL_CNTL, 1); ++ ADD_RANGE(0x42C0, 2); ++ ADD_RANGE(R300_RS_CNTL_0, 2); ++ ++ ADD_RANGE(R300_SC_HYPERZ, 2); ++ ADD_RANGE(0x43E8, 1); ++ ++ ADD_RANGE(0x46A4, 5); ++ ++ ADD_RANGE(R300_RE_FOG_STATE, 1); ++ ADD_RANGE(R300_FOG_COLOR_R, 3); ++ ADD_RANGE(R300_PP_ALPHA_TEST, 2); ++ ADD_RANGE(0x4BD8, 1); ++ ADD_RANGE(R300_PFS_PARAM_0_X, 64); ++ ADD_RANGE(0x4E00, 1); ++ ADD_RANGE(R300_RB3D_CBLEND, 2); ++ ADD_RANGE(R300_RB3D_COLORMASK, 1); ++ ADD_RANGE(R300_RB3D_BLEND_COLOR, 3); ++ ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */ ++ ADD_RANGE(R300_RB3D_COLORPITCH0, 1); ++ ADD_RANGE(0x4E50, 9); ++ ADD_RANGE(0x4E88, 1); ++ ADD_RANGE(0x4EA0, 2); ++ ADD_RANGE(R300_ZB_CNTL, 3); ++ ADD_RANGE(R300_ZB_FORMAT, 4); ++ ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ ++ ADD_RANGE(R300_ZB_DEPTHPITCH, 1); ++ ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1); ++ ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13); ++ ++ ADD_RANGE(R300_TX_FILTER_0, 16); ++ ADD_RANGE(R300_TX_FILTER1_0, 16); ++ ADD_RANGE(R300_TX_SIZE_0, 16); ++ ADD_RANGE(R300_TX_FORMAT_0, 16); ++ ADD_RANGE(R300_TX_PITCH_0, 16); ++ /* Texture offset is dangerous and needs more checking */ ++ ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); ++ ADD_RANGE(R300_TX_CHROMA_KEY_0, 16); ++ ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); ++ ++ /* Sporadic registers used as primitives are emitted */ ++ ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1); ++ ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1); ++ ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8); ++ ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { ++ ADD_RANGE(R500_VAP_INDEX_OFFSET, 1); ++ ADD_RANGE(R500_US_CONFIG, 2); ++ ADD_RANGE(R500_US_CODE_ADDR, 3); ++ ADD_RANGE(R500_US_FC_CTRL, 1); ++ ADD_RANGE(R500_RS_IP_0, 16); ++ ADD_RANGE(R500_RS_INST_0, 16); ++ ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2); ++ ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2); ++ ADD_RANGE(R500_ZB_FIFO_SIZE, 2); ++ } else { ++ ADD_RANGE(R300_PFS_CNTL_0, 3); ++ ADD_RANGE(R300_PFS_NODE_0, 4); ++ ADD_RANGE(R300_PFS_TEXI_0, 64); ++ ADD_RANGE(R300_PFS_INSTR0_0, 64); ++ ADD_RANGE(R300_PFS_INSTR1_0, 64); ++ ADD_RANGE(R300_PFS_INSTR2_0, 64); ++ ADD_RANGE(R300_PFS_INSTR3_0, 64); ++ ADD_RANGE(R300_RS_INTERP_0, 8); ++ ADD_RANGE(R300_RS_ROUTE_0, 8); ++ ++ } ++} ++ ++static __inline__ int r300_check_range(unsigned reg, int count) ++{ ++ int i; ++ if (reg & ~0xffff) ++ return -1; ++ for (i = (reg >> 2); i < (reg >> 2) + count; i++) ++ if (r300_reg_flags[i] != MARK_SAFE) ++ return 1; ++ return 0; ++} ++ ++static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * ++ dev_priv, ++ drm_radeon_kcmd_buffer_t ++ * cmdbuf, ++ drm_r300_cmd_header_t ++ header) ++{ ++ int reg; ++ int sz; ++ int i; ++ int values[64]; ++ RING_LOCALS; ++ ++ sz = header.packet0.count; ++ reg = (header.packet0.reghi << 8) | header.packet0.reglo; ++ ++ if ((sz > 64) || (sz < 0)) { ++ DRM_ERROR ++ ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", ++ reg, sz); ++ return -EINVAL; ++ } ++ for (i = 0; i < sz; i++) { ++ values[i] = ((int *)cmdbuf->buf)[i]; ++ switch (r300_reg_flags[(reg >> 2) + i]) { ++ case MARK_SAFE: ++ break; ++ case MARK_CHECK_OFFSET: ++ if (!radeon_check_offset(dev_priv, (u32) values[i])) { ++ DRM_ERROR ++ ("Offset failed range check (reg=%04x sz=%d)\n", ++ reg, sz); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("Register %04x failed check as flag=%02x\n", ++ reg + i * 4, r300_reg_flags[(reg >> 2) + i]); ++ return -EINVAL; ++ } ++ } ++ ++ BEGIN_RING(1 + sz); ++ OUT_RING(CP_PACKET0(reg, sz - 1)); ++ OUT_RING_TABLE(values, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * 4; ++ cmdbuf->bufsz -= sz * 4; ++ ++ return 0; ++} ++ ++/** ++ * Emits a packet0 setting arbitrary registers. ++ * Called by r300_do_cp_cmdbuf. ++ * ++ * Note that checks are performed on contents and addresses of the registers ++ */ ++static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int reg; ++ int sz; ++ RING_LOCALS; ++ ++ sz = header.packet0.count; ++ reg = (header.packet0.reghi << 8) | header.packet0.reglo; ++ ++ DRM_DEBUG("R300_CMD_PACKET0: reg %04x, sz %d\n", reg, sz); ++ if (!sz) ++ return 0; ++ ++ if (sz * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ if (reg + sz * 4 >= 0x10000) { ++ DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, ++ sz); ++ return -EINVAL; ++ } ++ ++ if (r300_check_range(reg, sz)) { ++ /* go and check everything */ ++ return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, ++ header); ++ } ++ /* the rest of the data is safe to emit, whatever the values the user passed */ ++ ++ BEGIN_RING(1 + sz); ++ OUT_RING(CP_PACKET0(reg, sz - 1)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * 4; ++ cmdbuf->bufsz -= sz * 4; ++ ++ return 0; ++} ++ ++/** ++ * Uploads user-supplied vertex program instructions or parameters onto ++ * the graphics card. ++ * Called by r300_do_cp_cmdbuf. ++ */ ++static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int sz; ++ int addr; ++ RING_LOCALS; ++ ++ sz = header.vpu.count; ++ addr = (header.vpu.adrhi << 8) | header.vpu.adrlo; ++ ++ if (!sz) ++ return 0; ++ if (sz * 16 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ /* VAP is very sensitive so we purge cache before we program it ++ * and we also flush its state before & after */ ++ BEGIN_RING(6); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(R300_RB3D_DC_FLUSH); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ /* set flush flag */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED; ++ ++ BEGIN_RING(3 + sz * 4); ++ OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); ++ OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); ++ ADVANCE_RING(); ++ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * 16; ++ cmdbuf->bufsz -= sz * 16; ++ ++ return 0; ++} ++ ++/** ++ * Emit a clear packet from userspace. ++ * Called by r300_emit_packet3. ++ */ ++static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ RING_LOCALS; ++ ++ if (8 * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ BEGIN_RING(10); ++ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); ++ OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | ++ (1 << R300_PRIM_NUM_VERTICES_SHIFT)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, 8); ++ ADVANCE_RING(); ++ ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(R300_RB3D_DC_FLUSH); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ ADVANCE_RING(); ++ /* set flush flag */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED; ++ ++ cmdbuf->buf += 8 * 4; ++ cmdbuf->bufsz -= 8 * 4; ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ u32 header) ++{ ++ int count, i, k; ++#define MAX_ARRAY_PACKET 64 ++ u32 payload[MAX_ARRAY_PACKET]; ++ u32 narrays; ++ RING_LOCALS; ++ ++ count = (header >> 16) & 0x3fff; ++ ++ if ((count + 1) > MAX_ARRAY_PACKET) { ++ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", ++ count); ++ return -EINVAL; ++ } ++ memset(payload, 0, MAX_ARRAY_PACKET * 4); ++ memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); ++ ++ /* carefully check packet contents */ ++ ++ narrays = payload[0]; ++ k = 0; ++ i = 1; ++ while ((k < narrays) && (i < (count + 1))) { ++ i++; /* skip attribute field */ ++ if (!radeon_check_offset(dev_priv, payload[i])) { ++ DRM_ERROR ++ ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ if (k == narrays) ++ break; ++ /* have one more to process, they come in pairs */ ++ if (!radeon_check_offset(dev_priv, payload[i])) { ++ DRM_ERROR ++ ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ } ++ /* do the counts match what we expect ? */ ++ if ((k != narrays) || (i != (count + 1))) { ++ DRM_ERROR ++ ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", ++ k, i, narrays, count + 1); ++ return -EINVAL; ++ } ++ ++ /* all clear, output packet */ ++ ++ BEGIN_RING(count + 2); ++ OUT_RING(header); ++ OUT_RING_TABLE(payload, count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count + 2) * 4; ++ cmdbuf->bufsz -= (count + 2) * 4; ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ u32 *cmd = (u32 *) cmdbuf->buf; ++ int count, ret; ++ RING_LOCALS; ++ ++ count=(cmd[0]>>16) & 0x3fff; ++ ++ if (cmd[0] & 0x8000) { ++ u32 offset; ++ ++ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL ++ | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[2] << 10; ++ ret = !radeon_check_offset(dev_priv, offset); ++ if (ret) { ++ DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); ++ return -EINVAL; ++ } ++ } ++ ++ if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && ++ (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[3] << 10; ++ ret = !radeon_check_offset(dev_priv, offset); ++ if (ret) { ++ DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); ++ return -EINVAL; ++ } ++ ++ } ++ } ++ ++ BEGIN_RING(count+2); ++ OUT_RING(cmd[0]); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count+2)*4; ++ cmdbuf->bufsz -= (count+2)*4; ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ u32 *cmd; ++ int count; ++ int expected_count; ++ RING_LOCALS; ++ ++ cmd = (u32 *) cmdbuf->buf; ++ count = (cmd[0]>>16) & 0x3fff; ++ expected_count = cmd[1] >> 16; ++ if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) ++ expected_count = (expected_count+1)/2; ++ ++ if (count && count != expected_count) { ++ DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n", ++ count, expected_count); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(count+2); ++ OUT_RING(cmd[0]); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count+2)*4; ++ cmdbuf->bufsz -= (count+2)*4; ++ ++ if (!count) { ++ drm_r300_cmd_header_t header; ++ ++ if (cmdbuf->bufsz < 4*4 + sizeof(header)) { ++ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); ++ return -EINVAL; ++ } ++ ++ header.u = *(unsigned int *)cmdbuf->buf; ++ ++ cmdbuf->buf += sizeof(header); ++ cmdbuf->bufsz -= sizeof(header); ++ cmd = (u32 *) cmdbuf->buf; ++ ++ if (header.header.cmd_type != R300_CMD_PACKET3 || ++ header.packet3.packet != R300_CMD_PACKET3_RAW || ++ cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { ++ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); ++ return -EINVAL; ++ } ++ ++ if ((cmd[1] & 0x8000ffff) != 0x80000810) { ++ DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); ++ return -EINVAL; ++ } ++ if (!radeon_check_offset(dev_priv, cmd[2])) { ++ DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); ++ return -EINVAL; ++ } ++ if (cmd[3] != expected_count) { ++ DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", ++ cmd[3], expected_count); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(4); ++ OUT_RING(cmd[0]); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += 4*4; ++ cmdbuf->bufsz -= 4*4; ++ } ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ u32 header; ++ int count; ++ RING_LOCALS; ++ ++ if (4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ /* Fixme !! This simply emits a packet without much checking. ++ We need to be smarter. */ ++ ++ /* obtain first word - actual packet3 header */ ++ header = *(u32 *) cmdbuf->buf; ++ ++ /* Is it packet 3 ? */ ++ if ((header >> 30) != 0x3) { ++ DRM_ERROR("Not a packet3 header (0x%08x)\n", header); ++ return -EINVAL; ++ } ++ ++ count = (header >> 16) & 0x3fff; ++ ++ /* Check again now that we know how much data to expect */ ++ if ((count + 2) * 4 > cmdbuf->bufsz) { ++ DRM_ERROR ++ ("Expected packet3 of length %d but have only %d bytes left\n", ++ (count + 2) * 4, cmdbuf->bufsz); ++ return -EINVAL; ++ } ++ ++ /* Is it a packet type we know about ? */ ++ switch (header & 0xff00) { ++ case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ ++ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); ++ ++ case RADEON_CNTL_BITBLT_MULTI: ++ return r300_emit_bitblt_multi(dev_priv, cmdbuf); ++ ++ case RADEON_CP_INDX_BUFFER: ++ DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n"); ++ return -EINVAL; ++ case RADEON_CP_3D_DRAW_IMMD_2: ++ /* triggers drawing using in-packet vertex data */ ++ case RADEON_CP_3D_DRAW_VBUF_2: ++ /* triggers drawing of vertex buffers setup elsewhere */ ++ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED | ++ RADEON_PURGE_EMITED); ++ break; ++ case RADEON_CP_3D_DRAW_INDX_2: ++ /* triggers drawing using indices to vertex buffer */ ++ /* whenever we send vertex we clear flush & purge */ ++ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED | ++ RADEON_PURGE_EMITED); ++ return r300_emit_draw_indx_2(dev_priv, cmdbuf); ++ case RADEON_WAIT_FOR_IDLE: ++ case RADEON_CP_NOP: ++ /* these packets are safe */ ++ break; ++ default: ++ DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(count + 2); ++ OUT_RING(header); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count + 2) * 4; ++ cmdbuf->bufsz -= (count + 2) * 4; ++ ++ return 0; ++} ++ ++/** ++ * Emit a rendering packet3 from userspace. ++ * Called by r300_do_cp_cmdbuf. ++ */ ++static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int n; ++ int ret; ++ char *orig_buf = cmdbuf->buf; ++ int orig_bufsz = cmdbuf->bufsz; ++ ++ /* This is a do-while-loop so that we run the interior at least once, ++ * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. ++ */ ++ n = 0; ++ do { ++ if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) { ++ ret = r300_emit_cliprects(dev_priv, cmdbuf, n); ++ if (ret) ++ return ret; ++ ++ cmdbuf->buf = orig_buf; ++ cmdbuf->bufsz = orig_bufsz; ++ } ++ ++ switch (header.packet3.packet) { ++ case R300_CMD_PACKET3_CLEAR: ++ DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n"); ++ ret = r300_emit_clear(dev_priv, cmdbuf); ++ if (ret) { ++ DRM_ERROR("r300_emit_clear failed\n"); ++ return ret; ++ } ++ break; ++ ++ case R300_CMD_PACKET3_RAW: ++ DRM_DEBUG("R300_CMD_PACKET3_RAW\n"); ++ ret = r300_emit_raw_packet3(dev_priv, cmdbuf); ++ if (ret) { ++ DRM_ERROR("r300_emit_raw_packet3 failed\n"); ++ return ret; ++ } ++ break; ++ ++ default: ++ DRM_ERROR("bad packet3 type %i at %p\n", ++ header.packet3.packet, ++ cmdbuf->buf - sizeof(header)); ++ return -EINVAL; ++ } ++ ++ n += R300_SIMULTANEOUS_CLIPRECTS; ++ } while (n < cmdbuf->nbox); ++ ++ return 0; ++} ++ ++/* Some of the R300 chips seem to be extremely touchy about the two registers ++ * that are configured in r300_pacify. ++ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace ++ * sends a command buffer that contains only state setting commands and a ++ * vertex program/parameter upload sequence, this will eventually lead to a ++ * lockup, unless the sequence is bracketed by calls to r300_pacify. ++ * So we should take great care to *always* call r300_pacify before ++ * *anything* 3D related, and again afterwards. This is what the ++ * call bracket in r300_do_cp_cmdbuf is for. ++ */ ++ ++/** ++ * Emit the sequence to pacify R300. ++ */ ++static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) ++{ ++ uint32_t cache_z, cache_3d, cache_2d; ++ RING_LOCALS; ++ ++ cache_z = R300_ZC_FLUSH; ++ cache_2d = R300_RB2D_DC_FLUSH; ++ cache_3d = R300_RB3D_DC_FLUSH; ++ if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) { ++ /* we can purge, primitive where draw since last purge */ ++ cache_z |= R300_ZC_FREE; ++ cache_2d |= R300_RB2D_DC_FREE; ++ cache_3d |= R300_RB3D_DC_FREE; ++ } ++ ++ /* flush & purge zbuffer */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); ++ OUT_RING(cache_z); ++ ADVANCE_RING(); ++ /* flush & purge 3d */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(cache_3d); ++ ADVANCE_RING(); ++ /* flush & purge texture */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ /* FIXME: is this one really needed ? */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ ADVANCE_RING(); ++ /* flush & purge 2d through E2 as RB2D will trigger lockup */ ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(cache_2d); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_2D_IDLECLEAN | ++ RADEON_WAIT_HOST_IDLECLEAN); ++ ADVANCE_RING(); ++ /* set flush & purge flags */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; ++} ++ ++/** ++ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state. ++ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must ++ * be careful about how this function is called. ++ */ ++static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv = buf->dev_private; ++ ++ buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; ++ buf->pending = 1; ++ buf->used = 0; ++} ++ ++static void r300_cmd_wait(drm_radeon_private_t * dev_priv, ++ drm_r300_cmd_header_t header) ++{ ++ u32 wait_until; ++ RING_LOCALS; ++ ++ if (!header.wait.flags) ++ return; ++ ++ wait_until = 0; ++ ++ switch(header.wait.flags) { ++ case R300_WAIT_2D: ++ wait_until = RADEON_WAIT_2D_IDLE; ++ break; ++ case R300_WAIT_3D: ++ wait_until = RADEON_WAIT_3D_IDLE; ++ break; ++ case R300_NEW_WAIT_2D_3D: ++ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE; ++ break; ++ case R300_NEW_WAIT_2D_2D_CLEAN: ++ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; ++ break; ++ case R300_NEW_WAIT_3D_3D_CLEAN: ++ wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; ++ break; ++ case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN: ++ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; ++ wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; ++ break; ++ default: ++ return; ++ } ++ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(wait_until); ++ ADVANCE_RING(); ++} ++ ++static int r300_scratch(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ u32 *ref_age_base; ++ u32 i, buf_idx, h_pending; ++ RING_LOCALS; ++ ++ if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) { ++ return -EINVAL; ++ } ++ ++ if (header.scratch.reg >= 5) { ++ return -EINVAL; ++ } ++ ++ dev_priv->scratch_ages[header.scratch.reg] ++; ++ ++ ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf); ++ ++ cmdbuf->buf += sizeof(uint64_t); ++ cmdbuf->bufsz -= sizeof(uint64_t); ++ ++ for (i=0; i < header.scratch.n_bufs; i++) { ++ buf_idx = *(u32 *)cmdbuf->buf; ++ buf_idx *= 2; /* 8 bytes per buf */ ++ ++ if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { ++ return -EINVAL; ++ } ++ ++ if (h_pending == 0) { ++ return -EINVAL; ++ } ++ ++ h_pending--; ++ ++ if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { ++ return -EINVAL; ++ } ++ ++ cmdbuf->buf += sizeof(buf_idx); ++ cmdbuf->bufsz -= sizeof(buf_idx); ++ } ++ ++ BEGIN_RING(2); ++ OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) ); ++ OUT_RING( dev_priv->scratch_ages[header.scratch.reg] ); ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/** ++ * Uploads user-supplied vertex program instructions or parameters onto ++ * the graphics card. ++ * Called by r300_do_cp_cmdbuf. ++ */ ++static __inline__ int r300_emit_r500fp(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int sz; ++ int addr; ++ int type; ++ int clamp; ++ int stride; ++ RING_LOCALS; ++ ++ sz = header.r500fp.count; ++ /* address is 9 bits 0 - 8, bit 1 of flags is part of address */ ++ addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo; ++ ++ type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE); ++ clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); ++ ++ addr |= (type << 16); ++ addr |= (clamp << 17); ++ ++ stride = type ? 4 : 6; ++ ++ DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); ++ if (!sz) ++ return 0; ++ if (sz * stride * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ BEGIN_RING(3 + sz * stride); ++ OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); ++ OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); ++ ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * stride * 4; ++ cmdbuf->bufsz -= sz * stride * 4; ++ ++ return 0; ++} ++ ++ ++/** ++ * Parses and validates a user-supplied command buffer and emits appropriate ++ * commands on the DMA ring buffer. ++ * Called by the ioctl handler function radeon_cp_cmdbuf. ++ */ ++int r300_do_cp_cmdbuf(struct drm_device *dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf = NULL; ++ int emit_dispatch_age = 0; ++ int ret = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ /* pacify */ ++ r300_pacify(dev_priv); ++ ++ if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { ++ ret = r300_emit_cliprects(dev_priv, cmdbuf, 0); ++ if (ret) ++ goto cleanup; ++ } ++ ++ while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { ++ int idx; ++ drm_r300_cmd_header_t header; ++ ++ header.u = *(unsigned int *)cmdbuf->buf; ++ ++ cmdbuf->buf += sizeof(header); ++ cmdbuf->bufsz -= sizeof(header); ++ ++ switch (header.header.cmd_type) { ++ case R300_CMD_PACKET0: ++ ret = r300_emit_packet0(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_packet0 failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_VPU: ++ DRM_DEBUG("R300_CMD_VPU\n"); ++ ret = r300_emit_vpu(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_vpu failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_PACKET3: ++ DRM_DEBUG("R300_CMD_PACKET3\n"); ++ ret = r300_emit_packet3(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_packet3 failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_END3D: ++ DRM_DEBUG("R300_CMD_END3D\n"); ++ /* TODO: ++ Ideally userspace driver should not need to issue this call, ++ i.e. the drm driver should issue it automatically and prevent ++ lockups. ++ ++ In practice, we do not understand why this call is needed and what ++ it does (except for some vague guesses that it has to do with cache ++ coherence) and so the user space driver does it. ++ ++ Once we are sure which uses prevent lockups the code could be moved ++ into the kernel and the userspace driver will not ++ need to use this command. ++ ++ Note that issuing this command does not hurt anything ++ except, possibly, performance */ ++ r300_pacify(dev_priv); ++ break; ++ ++ case R300_CMD_CP_DELAY: ++ /* simple enough, we can do it here */ ++ DRM_DEBUG("R300_CMD_CP_DELAY\n"); ++ { ++ int i; ++ RING_LOCALS; ++ ++ BEGIN_RING(header.delay.count); ++ for (i = 0; i < header.delay.count; i++) ++ OUT_RING(RADEON_CP_PACKET2); ++ ADVANCE_RING(); ++ } ++ break; ++ ++ case R300_CMD_DMA_DISCARD: ++ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); ++ idx = header.dma.buf_idx; ++ if (idx < 0 || idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ idx, dma->buf_count - 1); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ ++ buf = dma->buflist[idx]; ++ if (buf->file_priv != file_priv || buf->pending) { ++ DRM_ERROR("bad buffer %p %p %d\n", ++ buf->file_priv, file_priv, ++ buf->pending); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ ++ emit_dispatch_age = 1; ++ r300_discard_buffer(dev, buf); ++ break; ++ ++ case R300_CMD_WAIT: ++ DRM_DEBUG("R300_CMD_WAIT\n"); ++ r300_cmd_wait(dev_priv, header); ++ break; ++ ++ case R300_CMD_SCRATCH: ++ DRM_DEBUG("R300_CMD_SCRATCH\n"); ++ ret = r300_scratch(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_scratch failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_R500FP: ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) { ++ DRM_ERROR("Calling r500 command on r300 card\n"); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ DRM_DEBUG("R300_CMD_R500FP\n"); ++ ret = r300_emit_r500fp(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_r500fp failed\n"); ++ goto cleanup; ++ } ++ break; ++ default: ++ DRM_ERROR("bad cmd_type %i at %p\n", ++ header.header.cmd_type, ++ cmdbuf->buf - sizeof(header)); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ } ++ ++ DRM_DEBUG("END\n"); ++ ++ cleanup: ++ r300_pacify(dev_priv); ++ ++ /* We emit the vertex buffer age here, outside the pacifier "brackets" ++ * for two reasons: ++ * (1) This may coalesce multiple age emissions into a single one and ++ * (2) more importantly, some chips lock up hard when scratch registers ++ * are written inside the pacifier bracket. ++ */ ++ if (emit_dispatch_age) { ++ RING_LOCALS; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); ++ ADVANCE_RING(); ++ } ++ ++ COMMIT_RING(); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r300_reg.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r300_reg.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/r300_reg.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/r300_reg.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1778 @@ ++/************************************************************************** ++ ++Copyright (C) 2004-2005 Nicolai Haehnle et al. ++ ++Permission is hereby granted, free of charge, to any person obtaining a ++copy of this software and associated documentation files (the "Software"), ++to deal in the Software without restriction, including without limitation ++on the rights to use, copy, modify, merge, publish, distribute, sub ++license, and/or sell copies of the Software, and to permit persons to whom ++the Software is furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice (including the next ++paragraph) shall be included in all copies or substantial portions of the ++Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, ++DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++USE OR OTHER DEALINGS IN THE SOFTWARE. ++ ++**************************************************************************/ ++ ++/* *INDENT-OFF* */ ++ ++#ifndef _R300_REG_H ++#define _R300_REG_H ++ ++#define R300_MC_INIT_MISC_LAT_TIMER 0x180 ++# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 ++# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4 ++# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8 ++# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12 ++# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16 ++# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20 ++# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24 ++# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28 ++ ++ ++#define R300_MC_INIT_GFX_LAT_TIMER 0x154 ++# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0 ++# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4 ++# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8 ++# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12 ++# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16 ++# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20 ++# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24 ++# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28 ++ ++/* ++ * This file contains registers and constants for the R300. They have been ++ * found mostly by examining command buffers captured using glxtest, as well ++ * as by extrapolating some known registers and constants from the R200. ++ * I am fairly certain that they are correct unless stated otherwise ++ * in comments. ++ */ ++ ++#define R300_SE_VPORT_XSCALE 0x1D98 ++#define R300_SE_VPORT_XOFFSET 0x1D9C ++#define R300_SE_VPORT_YSCALE 0x1DA0 ++#define R300_SE_VPORT_YOFFSET 0x1DA4 ++#define R300_SE_VPORT_ZSCALE 0x1DA8 ++#define R300_SE_VPORT_ZOFFSET 0x1DAC ++ ++ ++/* ++ * Vertex Array Processing (VAP) Control ++ * Stolen from r200 code from Christoph Brill (It's a guess!) ++ */ ++#define R300_VAP_CNTL 0x2080 ++ ++/* This register is written directly and also starts data section ++ * in many 3d CP_PACKET3's ++ */ ++#define R300_VAP_VF_CNTL 0x2084 ++# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0 ++# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0) ++# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0) ++# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0) ++# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0) ++# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0) ++# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0) ++# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0) ++# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0) ++# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0) ++# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0) ++# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0) ++ ++# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4 ++ /* State based - direct writes to registers trigger vertex ++ generation */ ++# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4) ++# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4) ++# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4) ++# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4) ++ ++ /* I don't think I saw these three used.. */ ++# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6 ++# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9 ++# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10 ++ ++ /* index size - when not set the indices are assumed to be 16 bit */ ++# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11) ++ /* number of vertices */ ++# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16 ++ ++/* BEGIN: Wild guesses */ ++#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090 ++# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0) ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1) ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */ ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */ ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */ ++# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */ ++ ++#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094 ++ /* each of the following is 3 bits wide, specifies number ++ of components */ ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 ++/* END: Wild guesses */ ++ ++#define R300_SE_VTE_CNTL 0x20b0 ++# define R300_VPORT_X_SCALE_ENA 0x00000001 ++# define R300_VPORT_X_OFFSET_ENA 0x00000002 ++# define R300_VPORT_Y_SCALE_ENA 0x00000004 ++# define R300_VPORT_Y_OFFSET_ENA 0x00000008 ++# define R300_VPORT_Z_SCALE_ENA 0x00000010 ++# define R300_VPORT_Z_OFFSET_ENA 0x00000020 ++# define R300_VTX_XY_FMT 0x00000100 ++# define R300_VTX_Z_FMT 0x00000200 ++# define R300_VTX_W0_FMT 0x00000400 ++# define R300_VTX_W0_NORMALIZE 0x00000800 ++# define R300_VTX_ST_DENORMALIZED 0x00001000 ++ ++/* BEGIN: Vertex data assembly - lots of uncertainties */ ++ ++/* gap */ ++ ++#define R300_VAP_CNTL_STATUS 0x2140 ++# define R300_VC_NO_SWAP (0 << 0) ++# define R300_VC_16BIT_SWAP (1 << 0) ++# define R300_VC_32BIT_SWAP (2 << 0) ++# define R300_VAP_TCL_BYPASS (1 << 8) ++ ++/* gap */ ++ ++/* Where do we get our vertex data? ++ * ++ * Vertex data either comes either from immediate mode registers or from ++ * vertex arrays. ++ * There appears to be no mixed mode (though we can force the pitch of ++ * vertex arrays to 0, effectively reusing the same element over and over ++ * again). ++ * ++ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure ++ * if these registers influence vertex array processing. ++ * ++ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3. ++ * ++ * In both cases, vertex attributes are then passed through INPUT_ROUTE. ++ * ++ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data ++ * into the vertex processor's input registers. ++ * The first word routes the first input, the second word the second, etc. ++ * The corresponding input is routed into the register with the given index. ++ * The list is ended by a word with INPUT_ROUTE_END set. ++ * ++ * Always set COMPONENTS_4 in immediate mode. ++ */ ++ ++#define R300_VAP_INPUT_ROUTE_0_0 0x2150 ++# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */ ++# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8 ++# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */ ++# define R300_VAP_INPUT_ROUTE_END (1 << 13) ++# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */ ++# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */ ++# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */ ++# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */ ++#define R300_VAP_INPUT_ROUTE_0_1 0x2154 ++#define R300_VAP_INPUT_ROUTE_0_2 0x2158 ++#define R300_VAP_INPUT_ROUTE_0_3 0x215C ++#define R300_VAP_INPUT_ROUTE_0_4 0x2160 ++#define R300_VAP_INPUT_ROUTE_0_5 0x2164 ++#define R300_VAP_INPUT_ROUTE_0_6 0x2168 ++#define R300_VAP_INPUT_ROUTE_0_7 0x216C ++ ++/* gap */ ++ ++/* Notes: ++ * - always set up to produce at least two attributes: ++ * if vertex program uses only position, fglrx will set normal, too ++ * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal. ++ */ ++#define R300_VAP_INPUT_CNTL_0 0x2180 ++# define R300_INPUT_CNTL_0_COLOR 0x00000001 ++#define R300_VAP_INPUT_CNTL_1 0x2184 ++# define R300_INPUT_CNTL_POS 0x00000001 ++# define R300_INPUT_CNTL_NORMAL 0x00000002 ++# define R300_INPUT_CNTL_COLOR 0x00000004 ++# define R300_INPUT_CNTL_TC0 0x00000400 ++# define R300_INPUT_CNTL_TC1 0x00000800 ++# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */ ++# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */ ++# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */ ++# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */ ++# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */ ++# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */ ++ ++/* gap */ ++ ++/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0 ++ * are set to a swizzling bit pattern, other words are 0. ++ * ++ * In immediate mode, the pattern is always set to xyzw. In vertex array ++ * mode, the swizzling pattern is e.g. used to set zw components in texture ++ * coordinates with only tweo components. ++ */ ++#define R300_VAP_INPUT_ROUTE_1_0 0x21E0 ++# define R300_INPUT_ROUTE_SELECT_X 0 ++# define R300_INPUT_ROUTE_SELECT_Y 1 ++# define R300_INPUT_ROUTE_SELECT_Z 2 ++# define R300_INPUT_ROUTE_SELECT_W 3 ++# define R300_INPUT_ROUTE_SELECT_ZERO 4 ++# define R300_INPUT_ROUTE_SELECT_ONE 5 ++# define R300_INPUT_ROUTE_SELECT_MASK 7 ++# define R300_INPUT_ROUTE_X_SHIFT 0 ++# define R300_INPUT_ROUTE_Y_SHIFT 3 ++# define R300_INPUT_ROUTE_Z_SHIFT 6 ++# define R300_INPUT_ROUTE_W_SHIFT 9 ++# define R300_INPUT_ROUTE_ENABLE (15 << 12) ++#define R300_VAP_INPUT_ROUTE_1_1 0x21E4 ++#define R300_VAP_INPUT_ROUTE_1_2 0x21E8 ++#define R300_VAP_INPUT_ROUTE_1_3 0x21EC ++#define R300_VAP_INPUT_ROUTE_1_4 0x21F0 ++#define R300_VAP_INPUT_ROUTE_1_5 0x21F4 ++#define R300_VAP_INPUT_ROUTE_1_6 0x21F8 ++#define R300_VAP_INPUT_ROUTE_1_7 0x21FC ++ ++/* END: Vertex data assembly */ ++ ++/* gap */ ++ ++/* BEGIN: Upload vertex program and data */ ++ ++/* ++ * The programmable vertex shader unit has a memory bank of unknown size ++ * that can be written to in 16 byte units by writing the address into ++ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs). ++ * ++ * Pointers into the memory bank are always in multiples of 16 bytes. ++ * ++ * The memory bank is divided into areas with fixed meaning. ++ * ++ * Starting at address UPLOAD_PROGRAM: Vertex program instructions. ++ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB), ++ * whereas the difference between known addresses suggests size 512. ++ * ++ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters. ++ * Native reported limits and the VPI layout suggest size 256, whereas ++ * difference between known addresses suggests size 512. ++ * ++ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the ++ * floating point pointsize. The exact purpose of this state is uncertain, ++ * as there is also the R300_RE_POINTSIZE register. ++ * ++ * Multiple vertex programs and parameter sets can be loaded at once, ++ * which could explain the size discrepancy. ++ */ ++#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200 ++# define R300_PVS_UPLOAD_PROGRAM 0x00000000 ++# define R300_PVS_UPLOAD_PARAMETERS 0x00000200 ++# define R300_PVS_UPLOAD_POINTSIZE 0x00000406 ++ ++/* gap */ ++ ++#define R300_VAP_PVS_UPLOAD_DATA 0x2208 ++ ++/* END: Upload vertex program and data */ ++ ++/* gap */ ++ ++/* I do not know the purpose of this register. However, I do know that ++ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL ++ * for normal rendering. ++ */ ++#define R300_VAP_UNKNOWN_221C 0x221C ++# define R300_221C_NORMAL 0x00000000 ++# define R300_221C_CLEAR 0x0001C000 ++ ++/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first ++ * plane is per-pixel and the second plane is per-vertex. ++ * ++ * This was determined by experimentation alone but I believe it is correct. ++ * ++ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest. ++ */ ++#define R300_VAP_CLIP_X_0 0x2220 ++#define R300_VAP_CLIP_X_1 0x2224 ++#define R300_VAP_CLIP_Y_0 0x2228 ++#define R300_VAP_CLIP_Y_1 0x2230 ++ ++/* gap */ ++ ++/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between ++ * rendering commands and overwriting vertex program parameters. ++ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and ++ * avoids bugs caused by still running shaders reading bad data from memory. ++ */ ++#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284 ++ ++/* Absolutely no clue what this register is about. */ ++#define R300_VAP_UNKNOWN_2288 0x2288 ++# define R300_2288_R300 0x00750000 /* -- nh */ ++# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */ ++ ++/* gap */ ++ ++/* Addresses are relative to the vertex program instruction area of the ++ * memory bank. PROGRAM_END points to the last instruction of the active ++ * program ++ * ++ * The meaning of the two UNKNOWN fields is obviously not known. However, ++ * experiments so far have shown that both *must* point to an instruction ++ * inside the vertex program, otherwise the GPU locks up. ++ * ++ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and ++ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to ++ * position takes place. ++ * ++ * Most likely this is used to ignore rest of the program in cases ++ * where group of verts arent visible. For some reason this "section" ++ * is sometimes accepted other instruction that have no relationship with ++ * position calculations. ++ */ ++#define R300_VAP_PVS_CNTL_1 0x22D0 ++# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 ++# define R300_PVS_CNTL_1_POS_END_SHIFT 10 ++# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 ++/* Addresses are relative the the vertex program parameters area. */ ++#define R300_VAP_PVS_CNTL_2 0x22D4 ++# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 ++# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 ++#define R300_VAP_PVS_CNTL_3 0x22D8 ++# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10 ++# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0 ++ ++/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for ++ * immediate vertices ++ */ ++#define R300_VAP_VTX_COLOR_R 0x2464 ++#define R300_VAP_VTX_COLOR_G 0x2468 ++#define R300_VAP_VTX_COLOR_B 0x246C ++#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */ ++#define R300_VAP_VTX_POS_0_Y_1 0x2494 ++#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */ ++#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */ ++#define R300_VAP_VTX_POS_0_Y_2 0x24A4 ++#define R300_VAP_VTX_POS_0_Z_2 0x24A8 ++/* write 0 to indicate end of packet? */ ++#define R300_VAP_VTX_END_OF_PKT 0x24AC ++ ++/* gap */ ++ ++/* These are values from r300_reg/r300_reg.h - they are known to be correct ++ * and are here so we can use one register file instead of several ++ * - Vladimir ++ */ ++#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000 ++# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16) ++ ++#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004 ++ /* each of the following is 3 bits wide, specifies number ++ of components */ ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 ++ ++/* UNK30 seems to enables point to quad transformation on textures ++ * (or something closely related to that). ++ * This bit is rather fatal at the time being due to lackings at pixel ++ * shader side ++ */ ++#define R300_GB_ENABLE 0x4008 ++# define R300_GB_POINT_STUFF_ENABLE (1<<0) ++# define R300_GB_LINE_STUFF_ENABLE (1<<1) ++# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2) ++# define R300_GB_STENCIL_AUTO_ENABLE (1<<4) ++# define R300_GB_UNK31 (1<<31) ++ /* each of the following is 2 bits wide */ ++#define R300_GB_TEX_REPLICATE 0 ++#define R300_GB_TEX_ST 1 ++#define R300_GB_TEX_STR 2 ++# define R300_GB_TEX0_SOURCE_SHIFT 16 ++# define R300_GB_TEX1_SOURCE_SHIFT 18 ++# define R300_GB_TEX2_SOURCE_SHIFT 20 ++# define R300_GB_TEX3_SOURCE_SHIFT 22 ++# define R300_GB_TEX4_SOURCE_SHIFT 24 ++# define R300_GB_TEX5_SOURCE_SHIFT 26 ++# define R300_GB_TEX6_SOURCE_SHIFT 28 ++# define R300_GB_TEX7_SOURCE_SHIFT 30 ++ ++/* MSPOS - positions for multisample antialiasing (?) */ ++#define R300_GB_MSPOS0 0x4010 ++ /* shifts - each of the fields is 4 bits */ ++# define R300_GB_MSPOS0__MS_X0_SHIFT 0 ++# define R300_GB_MSPOS0__MS_Y0_SHIFT 4 ++# define R300_GB_MSPOS0__MS_X1_SHIFT 8 ++# define R300_GB_MSPOS0__MS_Y1_SHIFT 12 ++# define R300_GB_MSPOS0__MS_X2_SHIFT 16 ++# define R300_GB_MSPOS0__MS_Y2_SHIFT 20 ++# define R300_GB_MSPOS0__MSBD0_Y 24 ++# define R300_GB_MSPOS0__MSBD0_X 28 ++ ++#define R300_GB_MSPOS1 0x4014 ++# define R300_GB_MSPOS1__MS_X3_SHIFT 0 ++# define R300_GB_MSPOS1__MS_Y3_SHIFT 4 ++# define R300_GB_MSPOS1__MS_X4_SHIFT 8 ++# define R300_GB_MSPOS1__MS_Y4_SHIFT 12 ++# define R300_GB_MSPOS1__MS_X5_SHIFT 16 ++# define R300_GB_MSPOS1__MS_Y5_SHIFT 20 ++# define R300_GB_MSPOS1__MSBD1 24 ++ ++ ++#define R300_GB_TILE_CONFIG 0x4018 ++# define R300_GB_TILE_ENABLE (1<<0) ++# define R300_GB_TILE_PIPE_COUNT_RV300 0 ++# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1) ++# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1) ++# define R300_GB_TILE_PIPE_COUNT_RV410 (3<<1) ++# define R300_GB_TILE_SIZE_8 0 ++# define R300_GB_TILE_SIZE_16 (1<<4) ++# define R300_GB_TILE_SIZE_32 (2<<4) ++# define R300_GB_SUPER_SIZE_1 (0<<6) ++# define R300_GB_SUPER_SIZE_2 (1<<6) ++# define R300_GB_SUPER_SIZE_4 (2<<6) ++# define R300_GB_SUPER_SIZE_8 (3<<6) ++# define R300_GB_SUPER_SIZE_16 (4<<6) ++# define R300_GB_SUPER_SIZE_32 (5<<6) ++# define R300_GB_SUPER_SIZE_64 (6<<6) ++# define R300_GB_SUPER_SIZE_128 (7<<6) ++# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */ ++# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */ ++# define R300_GB_SUPER_TILE_A 0 ++# define R300_GB_SUPER_TILE_B (1<<15) ++# define R300_GB_SUBPIXEL_1_12 0 ++# define R300_GB_SUBPIXEL_1_16 (1<<16) ++ ++#define R300_GB_FIFO_SIZE 0x4024 ++ /* each of the following is 2 bits wide */ ++#define R300_GB_FIFO_SIZE_32 0 ++#define R300_GB_FIFO_SIZE_64 1 ++#define R300_GB_FIFO_SIZE_128 2 ++#define R300_GB_FIFO_SIZE_256 3 ++# define R300_SC_IFIFO_SIZE_SHIFT 0 ++# define R300_SC_TZFIFO_SIZE_SHIFT 2 ++# define R300_SC_BFIFO_SIZE_SHIFT 4 ++ ++# define R300_US_OFIFO_SIZE_SHIFT 12 ++# define R300_US_WFIFO_SIZE_SHIFT 14 ++ /* the following use the same constants as above, but meaning is ++ is times 2 (i.e. instead of 32 words it means 64 */ ++# define R300_RS_TFIFO_SIZE_SHIFT 6 ++# define R300_RS_CFIFO_SIZE_SHIFT 8 ++# define R300_US_RAM_SIZE_SHIFT 10 ++ /* watermarks, 3 bits wide */ ++# define R300_RS_HIGHWATER_COL_SHIFT 16 ++# define R300_RS_HIGHWATER_TEX_SHIFT 19 ++# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */ ++# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24 ++ ++#define R300_GB_SELECT 0x401C ++# define R300_GB_FOG_SELECT_C0A 0 ++# define R300_GB_FOG_SELECT_C1A 1 ++# define R300_GB_FOG_SELECT_C2A 2 ++# define R300_GB_FOG_SELECT_C3A 3 ++# define R300_GB_FOG_SELECT_1_1_W 4 ++# define R300_GB_FOG_SELECT_Z 5 ++# define R300_GB_DEPTH_SELECT_Z 0 ++# define R300_GB_DEPTH_SELECT_1_1_W (1<<3) ++# define R300_GB_W_SELECT_1_W 0 ++# define R300_GB_W_SELECT_1 (1<<4) ++ ++#define R300_GB_AA_CONFIG 0x4020 ++# define R300_AA_DISABLE 0x00 ++# define R300_AA_ENABLE 0x01 ++# define R300_AA_SUBSAMPLES_2 0 ++# define R300_AA_SUBSAMPLES_3 (1<<1) ++# define R300_AA_SUBSAMPLES_4 (2<<1) ++# define R300_AA_SUBSAMPLES_6 (3<<1) ++ ++/* gap */ ++ ++/* Zero to flush caches. */ ++#define R300_TX_INVALTAGS 0x4100 ++#define R300_TX_FLUSH 0x0 ++ ++/* The upper enable bits are guessed, based on fglrx reported limits. */ ++#define R300_TX_ENABLE 0x4104 ++# define R300_TX_ENABLE_0 (1 << 0) ++# define R300_TX_ENABLE_1 (1 << 1) ++# define R300_TX_ENABLE_2 (1 << 2) ++# define R300_TX_ENABLE_3 (1 << 3) ++# define R300_TX_ENABLE_4 (1 << 4) ++# define R300_TX_ENABLE_5 (1 << 5) ++# define R300_TX_ENABLE_6 (1 << 6) ++# define R300_TX_ENABLE_7 (1 << 7) ++# define R300_TX_ENABLE_8 (1 << 8) ++# define R300_TX_ENABLE_9 (1 << 9) ++# define R300_TX_ENABLE_10 (1 << 10) ++# define R300_TX_ENABLE_11 (1 << 11) ++# define R300_TX_ENABLE_12 (1 << 12) ++# define R300_TX_ENABLE_13 (1 << 13) ++# define R300_TX_ENABLE_14 (1 << 14) ++# define R300_TX_ENABLE_15 (1 << 15) ++ ++/* The pointsize is given in multiples of 6. The pointsize can be ++ * enormous: Clear() renders a single point that fills the entire ++ * framebuffer. ++ */ ++#define R300_RE_POINTSIZE 0x421C ++# define R300_POINTSIZE_Y_SHIFT 0 ++# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */ ++# define R300_POINTSIZE_X_SHIFT 16 ++# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */ ++# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6) ++ ++/* The line width is given in multiples of 6. ++ * In default mode lines are classified as vertical lines. ++ * HO: horizontal ++ * VE: vertical or horizontal ++ * HO & VE: no classification ++ */ ++#define R300_RE_LINE_CNT 0x4234 ++# define R300_LINESIZE_SHIFT 0 ++# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */ ++# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6) ++# define R300_LINE_CNT_HO (1 << 16) ++# define R300_LINE_CNT_VE (1 << 17) ++ ++/* Some sort of scale or clamp value for texcoordless textures. */ ++#define R300_RE_UNK4238 0x4238 ++ ++/* Something shade related */ ++#define R300_RE_SHADE 0x4274 ++ ++#define R300_RE_SHADE_MODEL 0x4278 ++# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa ++# define R300_RE_SHADE_MODEL_FLAT 0x39595 ++ ++/* Dangerous */ ++#define R300_RE_POLYGON_MODE 0x4288 ++# define R300_PM_ENABLED (1 << 0) ++# define R300_PM_FRONT_POINT (0 << 0) ++# define R300_PM_BACK_POINT (0 << 0) ++# define R300_PM_FRONT_LINE (1 << 4) ++# define R300_PM_FRONT_FILL (1 << 5) ++# define R300_PM_BACK_LINE (1 << 7) ++# define R300_PM_BACK_FILL (1 << 8) ++ ++/* Fog parameters */ ++#define R300_RE_FOG_SCALE 0x4294 ++#define R300_RE_FOG_START 0x4298 ++ ++/* Not sure why there are duplicate of factor and constant values. ++ * My best guess so far is that there are seperate zbiases for test and write. ++ * Ordering might be wrong. ++ * Some of the tests indicate that fgl has a fallback implementation of zbias ++ * via pixel shaders. ++ */ ++#define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */ ++#define R300_RE_ZBIAS_T_FACTOR 0x42A4 ++#define R300_RE_ZBIAS_T_CONSTANT 0x42A8 ++#define R300_RE_ZBIAS_W_FACTOR 0x42AC ++#define R300_RE_ZBIAS_W_CONSTANT 0x42B0 ++ ++/* This register needs to be set to (1<<1) for RV350 to correctly ++ * perform depth test (see --vb-triangles in r300_demo) ++ * Don't know about other chips. - Vladimir ++ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on. ++ * My guess is that there are two bits for each zbias primitive ++ * (FILL, LINE, POINT). ++ * One to enable depth test and one for depth write. ++ * Yet this doesnt explain why depth writes work ... ++ */ ++#define R300_RE_OCCLUSION_CNTL 0x42B4 ++# define R300_OCCLUSION_ON (1<<1) ++ ++#define R300_RE_CULL_CNTL 0x42B8 ++# define R300_CULL_FRONT (1 << 0) ++# define R300_CULL_BACK (1 << 1) ++# define R300_FRONT_FACE_CCW (0 << 2) ++# define R300_FRONT_FACE_CW (1 << 2) ++ ++ ++/* BEGIN: Rasterization / Interpolators - many guesses */ ++ ++/* 0_UNKNOWN_18 has always been set except for clear operations. ++ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends ++ * on the vertex program, *not* the fragment program) ++ */ ++#define R300_RS_CNTL_0 0x4300 ++# define R300_RS_CNTL_TC_CNT_SHIFT 2 ++# define R300_RS_CNTL_TC_CNT_MASK (7 << 2) ++ /* number of color interpolators used */ ++# define R300_RS_CNTL_CI_CNT_SHIFT 7 ++# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18) ++ /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n ++ register. */ ++#define R300_RS_CNTL_1 0x4304 ++ ++/* gap */ ++ ++/* Only used for texture coordinates. ++ * Use the source field to route texture coordinate input from the ++ * vertex program to the desired interpolator. Note that the source ++ * field is relative to the outputs the vertex program *actually* ++ * writes. If a vertex program only writes texcoord[1], this will ++ * be source index 0. ++ * Set INTERP_USED on all interpolators that produce data used by ++ * the fragment program. INTERP_USED looks like a swizzling mask, ++ * but I haven't seen it used that way. ++ * ++ * Note: The _UNKNOWN constants are always set in their respective ++ * register. I don't know if this is necessary. ++ */ ++#define R300_RS_INTERP_0 0x4310 ++#define R300_RS_INTERP_1 0x4314 ++# define R300_RS_INTERP_1_UNKNOWN 0x40 ++#define R300_RS_INTERP_2 0x4318 ++# define R300_RS_INTERP_2_UNKNOWN 0x80 ++#define R300_RS_INTERP_3 0x431C ++# define R300_RS_INTERP_3_UNKNOWN 0xC0 ++#define R300_RS_INTERP_4 0x4320 ++#define R300_RS_INTERP_5 0x4324 ++#define R300_RS_INTERP_6 0x4328 ++#define R300_RS_INTERP_7 0x432C ++# define R300_RS_INTERP_SRC_SHIFT 2 ++# define R300_RS_INTERP_SRC_MASK (7 << 2) ++# define R300_RS_INTERP_USED 0x00D10000 ++ ++/* These DWORDs control how vertex data is routed into fragment program ++ * registers, after interpolators. ++ */ ++#define R300_RS_ROUTE_0 0x4330 ++#define R300_RS_ROUTE_1 0x4334 ++#define R300_RS_ROUTE_2 0x4338 ++#define R300_RS_ROUTE_3 0x433C /* GUESS */ ++#define R300_RS_ROUTE_4 0x4340 /* GUESS */ ++#define R300_RS_ROUTE_5 0x4344 /* GUESS */ ++#define R300_RS_ROUTE_6 0x4348 /* GUESS */ ++#define R300_RS_ROUTE_7 0x434C /* GUESS */ ++# define R300_RS_ROUTE_SOURCE_INTERP_0 0 ++# define R300_RS_ROUTE_SOURCE_INTERP_1 1 ++# define R300_RS_ROUTE_SOURCE_INTERP_2 2 ++# define R300_RS_ROUTE_SOURCE_INTERP_3 3 ++# define R300_RS_ROUTE_SOURCE_INTERP_4 4 ++# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */ ++# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */ ++# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */ ++# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */ ++# define R300_RS_ROUTE_DEST_SHIFT 6 ++# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */ ++ ++/* Special handling for color: When the fragment program uses color, ++ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the ++ * color register index. ++ * ++ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any ++ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state. ++ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly ++ * correct or not. - Oliver. ++ */ ++# define R300_RS_ROUTE_0_COLOR (1 << 14) ++# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17 ++# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */ ++/* As above, but for secondary color */ ++# define R300_RS_ROUTE_1_COLOR1 (1 << 14) ++# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17 ++# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17) ++# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11) ++/* END: Rasterization / Interpolators - many guesses */ ++ ++/* Hierarchical Z Enable */ ++#define R300_SC_HYPERZ 0x43a4 ++# define R300_SC_HYPERZ_DISABLE (0 << 0) ++# define R300_SC_HYPERZ_ENABLE (1 << 0) ++# define R300_SC_HYPERZ_MIN (0 << 1) ++# define R300_SC_HYPERZ_MAX (1 << 1) ++# define R300_SC_HYPERZ_ADJ_256 (0 << 2) ++# define R300_SC_HYPERZ_ADJ_128 (1 << 2) ++# define R300_SC_HYPERZ_ADJ_64 (2 << 2) ++# define R300_SC_HYPERZ_ADJ_32 (3 << 2) ++# define R300_SC_HYPERZ_ADJ_16 (4 << 2) ++# define R300_SC_HYPERZ_ADJ_8 (5 << 2) ++# define R300_SC_HYPERZ_ADJ_4 (6 << 2) ++# define R300_SC_HYPERZ_ADJ_2 (7 << 2) ++# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5) ++# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5) ++# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6) ++# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6) ++ ++#define R300_SC_EDGERULE 0x43a8 ++ ++/* BEGIN: Scissors and cliprects */ ++ ++/* There are four clipping rectangles. Their corner coordinates are inclusive. ++ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending ++ * on whether the pixel is inside cliprects 0-3, respectively. For example, ++ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned ++ * the number 3 (binary 0011). ++ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set, ++ * the pixel is rasterized. ++ * ++ * In addition to this, there is a scissors rectangle. Only pixels inside the ++ * scissors rectangle are drawn. (coordinates are inclusive) ++ * ++ * For some reason, the top-left corner of the framebuffer is at (1440, 1440) ++ * for the purpose of clipping and scissors. ++ */ ++#define R300_RE_CLIPRECT_TL_0 0x43B0 ++#define R300_RE_CLIPRECT_BR_0 0x43B4 ++#define R300_RE_CLIPRECT_TL_1 0x43B8 ++#define R300_RE_CLIPRECT_BR_1 0x43BC ++#define R300_RE_CLIPRECT_TL_2 0x43C0 ++#define R300_RE_CLIPRECT_BR_2 0x43C4 ++#define R300_RE_CLIPRECT_TL_3 0x43C8 ++#define R300_RE_CLIPRECT_BR_3 0x43CC ++# define R300_CLIPRECT_OFFSET 1440 ++# define R300_CLIPRECT_MASK 0x1FFF ++# define R300_CLIPRECT_X_SHIFT 0 ++# define R300_CLIPRECT_X_MASK (0x1FFF << 0) ++# define R300_CLIPRECT_Y_SHIFT 13 ++# define R300_CLIPRECT_Y_MASK (0x1FFF << 13) ++#define R300_RE_CLIPRECT_CNTL 0x43D0 ++# define R300_CLIP_OUT (1 << 0) ++# define R300_CLIP_0 (1 << 1) ++# define R300_CLIP_1 (1 << 2) ++# define R300_CLIP_10 (1 << 3) ++# define R300_CLIP_2 (1 << 4) ++# define R300_CLIP_20 (1 << 5) ++# define R300_CLIP_21 (1 << 6) ++# define R300_CLIP_210 (1 << 7) ++# define R300_CLIP_3 (1 << 8) ++# define R300_CLIP_30 (1 << 9) ++# define R300_CLIP_31 (1 << 10) ++# define R300_CLIP_310 (1 << 11) ++# define R300_CLIP_32 (1 << 12) ++# define R300_CLIP_320 (1 << 13) ++# define R300_CLIP_321 (1 << 14) ++# define R300_CLIP_3210 (1 << 15) ++ ++/* gap */ ++ ++#define R300_RE_SCISSORS_TL 0x43E0 ++#define R300_RE_SCISSORS_BR 0x43E4 ++# define R300_SCISSORS_OFFSET 1440 ++# define R300_SCISSORS_X_SHIFT 0 ++# define R300_SCISSORS_X_MASK (0x1FFF << 0) ++# define R300_SCISSORS_Y_SHIFT 13 ++# define R300_SCISSORS_Y_MASK (0x1FFF << 13) ++/* END: Scissors and cliprects */ ++ ++/* BEGIN: Texture specification */ ++ ++/* ++ * The texture specification dwords are grouped by meaning and not by texture ++ * unit. This means that e.g. the offset for texture image unit N is found in ++ * register TX_OFFSET_0 + (4*N) ++ */ ++#define R300_TX_FILTER_0 0x4400 ++# define R300_TX_REPEAT 0 ++# define R300_TX_MIRRORED 1 ++# define R300_TX_CLAMP 4 ++# define R300_TX_CLAMP_TO_EDGE 2 ++# define R300_TX_CLAMP_TO_BORDER 6 ++# define R300_TX_WRAP_S_SHIFT 0 ++# define R300_TX_WRAP_S_MASK (7 << 0) ++# define R300_TX_WRAP_T_SHIFT 3 ++# define R300_TX_WRAP_T_MASK (7 << 3) ++# define R300_TX_WRAP_Q_SHIFT 6 ++# define R300_TX_WRAP_Q_MASK (7 << 6) ++# define R300_TX_MAG_FILTER_NEAREST (1 << 9) ++# define R300_TX_MAG_FILTER_LINEAR (2 << 9) ++# define R300_TX_MAG_FILTER_MASK (3 << 9) ++# define R300_TX_MIN_FILTER_NEAREST (1 << 11) ++# define R300_TX_MIN_FILTER_LINEAR (2 << 11) ++# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11) ++# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11) ++# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11) ++# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11) ++ ++/* NOTE: NEAREST doesnt seem to exist. ++ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all ++ * anisotropy modes because that would void selected mag filter ++ */ ++# define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13) ++# define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13) ++# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13) ++# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13) ++# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) ) ++# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21) ++# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21) ++# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21) ++# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21) ++# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21) ++# define R300_TX_MAX_ANISO_MASK (14 << 21) ++ ++#define R300_TX_FILTER1_0 0x4440 ++# define R300_CHROMA_KEY_MODE_DISABLE 0 ++# define R300_CHROMA_KEY_FORCE 1 ++# define R300_CHROMA_KEY_BLEND 2 ++# define R300_MC_ROUND_NORMAL (0<<2) ++# define R300_MC_ROUND_MPEG4 (1<<2) ++# define R300_LOD_BIAS_MASK 0x1fff ++# define R300_EDGE_ANISO_EDGE_DIAG (0<<13) ++# define R300_EDGE_ANISO_EDGE_ONLY (1<<13) ++# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14) ++# define R300_MC_COORD_TRUNCATE_MPEG (1<<14) ++# define R300_TX_TRI_PERF_0_8 (0<<15) ++# define R300_TX_TRI_PERF_1_8 (1<<15) ++# define R300_TX_TRI_PERF_1_4 (2<<15) ++# define R300_TX_TRI_PERF_3_8 (3<<15) ++# define R300_ANISO_THRESHOLD_MASK (7<<17) ++ ++#define R300_TX_SIZE_0 0x4480 ++# define R300_TX_WIDTHMASK_SHIFT 0 ++# define R300_TX_WIDTHMASK_MASK (2047 << 0) ++# define R300_TX_HEIGHTMASK_SHIFT 11 ++# define R300_TX_HEIGHTMASK_MASK (2047 << 11) ++# define R300_TX_UNK23 (1 << 23) ++# define R300_TX_MAX_MIP_LEVEL_SHIFT 26 ++# define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26) ++# define R300_TX_SIZE_PROJECTED (1<<30) ++# define R300_TX_SIZE_TXPITCH_EN (1<<31) ++#define R300_TX_FORMAT_0 0x44C0 ++ /* The interpretation of the format word by Wladimir van der Laan */ ++ /* The X, Y, Z and W refer to the layout of the components. ++ They are given meanings as R, G, B and Alpha by the swizzle ++ specification */ ++# define R300_TX_FORMAT_X8 0x0 ++# define R300_TX_FORMAT_X16 0x1 ++# define R300_TX_FORMAT_Y4X4 0x2 ++# define R300_TX_FORMAT_Y8X8 0x3 ++# define R300_TX_FORMAT_Y16X16 0x4 ++# define R300_TX_FORMAT_Z3Y3X2 0x5 ++# define R300_TX_FORMAT_Z5Y6X5 0x6 ++# define R300_TX_FORMAT_Z6Y5X5 0x7 ++# define R300_TX_FORMAT_Z11Y11X10 0x8 ++# define R300_TX_FORMAT_Z10Y11X11 0x9 ++# define R300_TX_FORMAT_W4Z4Y4X4 0xA ++# define R300_TX_FORMAT_W1Z5Y5X5 0xB ++# define R300_TX_FORMAT_W8Z8Y8X8 0xC ++# define R300_TX_FORMAT_W2Z10Y10X10 0xD ++# define R300_TX_FORMAT_W16Z16Y16X16 0xE ++# define R300_TX_FORMAT_DXT1 0xF ++# define R300_TX_FORMAT_DXT3 0x10 ++# define R300_TX_FORMAT_DXT5 0x11 ++# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */ ++# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ ++# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ ++# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ ++ /* 0x16 - some 16 bit green format.. ?? */ ++# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ ++# define R300_TX_FORMAT_CUBIC_MAP (1 << 26) ++ ++ /* gap */ ++ /* Floating point formats */ ++ /* Note - hardware supports both 16 and 32 bit floating point */ ++# define R300_TX_FORMAT_FL_I16 0x18 ++# define R300_TX_FORMAT_FL_I16A16 0x19 ++# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A ++# define R300_TX_FORMAT_FL_I32 0x1B ++# define R300_TX_FORMAT_FL_I32A32 0x1C ++# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D ++ /* alpha modes, convenience mostly */ ++ /* if you have alpha, pick constant appropriate to the ++ number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ ++# define R300_TX_FORMAT_ALPHA_1CH 0x000 ++# define R300_TX_FORMAT_ALPHA_2CH 0x200 ++# define R300_TX_FORMAT_ALPHA_4CH 0x600 ++# define R300_TX_FORMAT_ALPHA_NONE 0xA00 ++ /* Swizzling */ ++ /* constants */ ++# define R300_TX_FORMAT_X 0 ++# define R300_TX_FORMAT_Y 1 ++# define R300_TX_FORMAT_Z 2 ++# define R300_TX_FORMAT_W 3 ++# define R300_TX_FORMAT_ZERO 4 ++# define R300_TX_FORMAT_ONE 5 ++ /* 2.0*Z, everything above 1.0 is set to 0.0 */ ++# define R300_TX_FORMAT_CUT_Z 6 ++ /* 2.0*W, everything above 1.0 is set to 0.0 */ ++# define R300_TX_FORMAT_CUT_W 7 ++ ++# define R300_TX_FORMAT_B_SHIFT 18 ++# define R300_TX_FORMAT_G_SHIFT 15 ++# define R300_TX_FORMAT_R_SHIFT 12 ++# define R300_TX_FORMAT_A_SHIFT 9 ++ /* Convenience macro to take care of layout and swizzling */ ++# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \ ++ ((R300_TX_FORMAT_##B)< 0.5, return ARG0, else return ARG1 ++ * - CMP: If ARG2 < 0, return ARG1, else return ARG0 ++ * - FLR: use FRC+MAD ++ * - XPD: use MAD+MAD ++ * - SGE, SLT: use MAD+CMP ++ * - RSQ: use ABS modifier for argument ++ * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation ++ * (e.g. RCP) into color register ++ * - apparently, there's no quick DST operation ++ * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2" ++ * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0" ++ * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1" ++ * ++ * Operand selection ++ * First stage selects three sources from the available registers and ++ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha). ++ * fglrx sorts the three source fields: Registers before constants, ++ * lower indices before higher indices; I do not know whether this is ++ * necessary. ++ * ++ * fglrx fills unused sources with "read constant 0" ++ * According to specs, you cannot select more than two different constants. ++ * ++ * Second stage selects the operands from the sources. This is defined in ++ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants ++ * zero and one. ++ * Swizzling and negation happens in this stage, as well. ++ * ++ * Important: Color and alpha seem to be mostly separate, i.e. their sources ++ * selection appears to be fully independent (the register storage is probably ++ * physically split into a color and an alpha section). ++ * However (because of the apparent physical split), there is some interaction ++ * WRT swizzling. If, for example, you want to load an R component into an ++ * Alpha operand, this R component is taken from a *color* source, not from ++ * an alpha source. The corresponding register doesn't even have to appear in ++ * the alpha sources list. (I hope this all makes sense to you) ++ * ++ * Destination selection ++ * The destination register index is in FPI1 (color) and FPI3 (alpha) ++ * together with enable bits. ++ * There are separate enable bits for writing into temporary registers ++ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* ++ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the ++ * same index must be used for both). ++ * ++ * Note: There is a special form for LRP ++ * - Argument order is the same as in ARB_fragment_program. ++ * - Operation is MAD ++ * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP ++ * - Set FPI0/FPI2_SPECIAL_LRP ++ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD ++ */ ++#define R300_PFS_INSTR1_0 0x46C0 ++# define R300_FPI1_SRC0C_SHIFT 0 ++# define R300_FPI1_SRC0C_MASK (31 << 0) ++# define R300_FPI1_SRC0C_CONST (1 << 5) ++# define R300_FPI1_SRC1C_SHIFT 6 ++# define R300_FPI1_SRC1C_MASK (31 << 6) ++# define R300_FPI1_SRC1C_CONST (1 << 11) ++# define R300_FPI1_SRC2C_SHIFT 12 ++# define R300_FPI1_SRC2C_MASK (31 << 12) ++# define R300_FPI1_SRC2C_CONST (1 << 17) ++# define R300_FPI1_SRC_MASK 0x0003ffff ++# define R300_FPI1_DSTC_SHIFT 18 ++# define R300_FPI1_DSTC_MASK (31 << 18) ++# define R300_FPI1_DSTC_REG_MASK_SHIFT 23 ++# define R300_FPI1_DSTC_REG_X (1 << 23) ++# define R300_FPI1_DSTC_REG_Y (1 << 24) ++# define R300_FPI1_DSTC_REG_Z (1 << 25) ++# define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26 ++# define R300_FPI1_DSTC_OUTPUT_X (1 << 26) ++# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27) ++# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28) ++ ++#define R300_PFS_INSTR3_0 0x47C0 ++# define R300_FPI3_SRC0A_SHIFT 0 ++# define R300_FPI3_SRC0A_MASK (31 << 0) ++# define R300_FPI3_SRC0A_CONST (1 << 5) ++# define R300_FPI3_SRC1A_SHIFT 6 ++# define R300_FPI3_SRC1A_MASK (31 << 6) ++# define R300_FPI3_SRC1A_CONST (1 << 11) ++# define R300_FPI3_SRC2A_SHIFT 12 ++# define R300_FPI3_SRC2A_MASK (31 << 12) ++# define R300_FPI3_SRC2A_CONST (1 << 17) ++# define R300_FPI3_SRC_MASK 0x0003ffff ++# define R300_FPI3_DSTA_SHIFT 18 ++# define R300_FPI3_DSTA_MASK (31 << 18) ++# define R300_FPI3_DSTA_REG (1 << 23) ++# define R300_FPI3_DSTA_OUTPUT (1 << 24) ++# define R300_FPI3_DSTA_DEPTH (1 << 27) ++ ++#define R300_PFS_INSTR0_0 0x48C0 ++# define R300_FPI0_ARGC_SRC0C_XYZ 0 ++# define R300_FPI0_ARGC_SRC0C_XXX 1 ++# define R300_FPI0_ARGC_SRC0C_YYY 2 ++# define R300_FPI0_ARGC_SRC0C_ZZZ 3 ++# define R300_FPI0_ARGC_SRC1C_XYZ 4 ++# define R300_FPI0_ARGC_SRC1C_XXX 5 ++# define R300_FPI0_ARGC_SRC1C_YYY 6 ++# define R300_FPI0_ARGC_SRC1C_ZZZ 7 ++# define R300_FPI0_ARGC_SRC2C_XYZ 8 ++# define R300_FPI0_ARGC_SRC2C_XXX 9 ++# define R300_FPI0_ARGC_SRC2C_YYY 10 ++# define R300_FPI0_ARGC_SRC2C_ZZZ 11 ++# define R300_FPI0_ARGC_SRC0A 12 ++# define R300_FPI0_ARGC_SRC1A 13 ++# define R300_FPI0_ARGC_SRC2A 14 ++# define R300_FPI0_ARGC_SRC1C_LRP 15 ++# define R300_FPI0_ARGC_ZERO 20 ++# define R300_FPI0_ARGC_ONE 21 ++ /* GUESS */ ++# define R300_FPI0_ARGC_HALF 22 ++# define R300_FPI0_ARGC_SRC0C_YZX 23 ++# define R300_FPI0_ARGC_SRC1C_YZX 24 ++# define R300_FPI0_ARGC_SRC2C_YZX 25 ++# define R300_FPI0_ARGC_SRC0C_ZXY 26 ++# define R300_FPI0_ARGC_SRC1C_ZXY 27 ++# define R300_FPI0_ARGC_SRC2C_ZXY 28 ++# define R300_FPI0_ARGC_SRC0CA_WZY 29 ++# define R300_FPI0_ARGC_SRC1CA_WZY 30 ++# define R300_FPI0_ARGC_SRC2CA_WZY 31 ++ ++# define R300_FPI0_ARG0C_SHIFT 0 ++# define R300_FPI0_ARG0C_MASK (31 << 0) ++# define R300_FPI0_ARG0C_NEG (1 << 5) ++# define R300_FPI0_ARG0C_ABS (1 << 6) ++# define R300_FPI0_ARG1C_SHIFT 7 ++# define R300_FPI0_ARG1C_MASK (31 << 7) ++# define R300_FPI0_ARG1C_NEG (1 << 12) ++# define R300_FPI0_ARG1C_ABS (1 << 13) ++# define R300_FPI0_ARG2C_SHIFT 14 ++# define R300_FPI0_ARG2C_MASK (31 << 14) ++# define R300_FPI0_ARG2C_NEG (1 << 19) ++# define R300_FPI0_ARG2C_ABS (1 << 20) ++# define R300_FPI0_SPECIAL_LRP (1 << 21) ++# define R300_FPI0_OUTC_MAD (0 << 23) ++# define R300_FPI0_OUTC_DP3 (1 << 23) ++# define R300_FPI0_OUTC_DP4 (2 << 23) ++# define R300_FPI0_OUTC_MIN (4 << 23) ++# define R300_FPI0_OUTC_MAX (5 << 23) ++# define R300_FPI0_OUTC_CMPH (7 << 23) ++# define R300_FPI0_OUTC_CMP (8 << 23) ++# define R300_FPI0_OUTC_FRC (9 << 23) ++# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23) ++# define R300_FPI0_OUTC_SAT (1 << 30) ++# define R300_FPI0_INSERT_NOP (1 << 31) ++ ++#define R300_PFS_INSTR2_0 0x49C0 ++# define R300_FPI2_ARGA_SRC0C_X 0 ++# define R300_FPI2_ARGA_SRC0C_Y 1 ++# define R300_FPI2_ARGA_SRC0C_Z 2 ++# define R300_FPI2_ARGA_SRC1C_X 3 ++# define R300_FPI2_ARGA_SRC1C_Y 4 ++# define R300_FPI2_ARGA_SRC1C_Z 5 ++# define R300_FPI2_ARGA_SRC2C_X 6 ++# define R300_FPI2_ARGA_SRC2C_Y 7 ++# define R300_FPI2_ARGA_SRC2C_Z 8 ++# define R300_FPI2_ARGA_SRC0A 9 ++# define R300_FPI2_ARGA_SRC1A 10 ++# define R300_FPI2_ARGA_SRC2A 11 ++# define R300_FPI2_ARGA_SRC1A_LRP 15 ++# define R300_FPI2_ARGA_ZERO 16 ++# define R300_FPI2_ARGA_ONE 17 ++ /* GUESS */ ++# define R300_FPI2_ARGA_HALF 18 ++# define R300_FPI2_ARG0A_SHIFT 0 ++# define R300_FPI2_ARG0A_MASK (31 << 0) ++# define R300_FPI2_ARG0A_NEG (1 << 5) ++ /* GUESS */ ++# define R300_FPI2_ARG0A_ABS (1 << 6) ++# define R300_FPI2_ARG1A_SHIFT 7 ++# define R300_FPI2_ARG1A_MASK (31 << 7) ++# define R300_FPI2_ARG1A_NEG (1 << 12) ++ /* GUESS */ ++# define R300_FPI2_ARG1A_ABS (1 << 13) ++# define R300_FPI2_ARG2A_SHIFT 14 ++# define R300_FPI2_ARG2A_MASK (31 << 14) ++# define R300_FPI2_ARG2A_NEG (1 << 19) ++ /* GUESS */ ++# define R300_FPI2_ARG2A_ABS (1 << 20) ++# define R300_FPI2_SPECIAL_LRP (1 << 21) ++# define R300_FPI2_OUTA_MAD (0 << 23) ++# define R300_FPI2_OUTA_DP4 (1 << 23) ++# define R300_FPI2_OUTA_MIN (2 << 23) ++# define R300_FPI2_OUTA_MAX (3 << 23) ++# define R300_FPI2_OUTA_CMP (6 << 23) ++# define R300_FPI2_OUTA_FRC (7 << 23) ++# define R300_FPI2_OUTA_EX2 (8 << 23) ++# define R300_FPI2_OUTA_LG2 (9 << 23) ++# define R300_FPI2_OUTA_RCP (10 << 23) ++# define R300_FPI2_OUTA_RSQ (11 << 23) ++# define R300_FPI2_OUTA_SAT (1 << 30) ++# define R300_FPI2_UNKNOWN_31 (1 << 31) ++/* END: Fragment program instruction set */ ++ ++/* Fog state and color */ ++#define R300_RE_FOG_STATE 0x4BC0 ++# define R300_FOG_ENABLE (1 << 0) ++# define R300_FOG_MODE_LINEAR (0 << 1) ++# define R300_FOG_MODE_EXP (1 << 1) ++# define R300_FOG_MODE_EXP2 (2 << 1) ++# define R300_FOG_MODE_MASK (3 << 1) ++#define R300_FOG_COLOR_R 0x4BC8 ++#define R300_FOG_COLOR_G 0x4BCC ++#define R300_FOG_COLOR_B 0x4BD0 ++ ++#define R300_PP_ALPHA_TEST 0x4BD4 ++# define R300_REF_ALPHA_MASK 0x000000ff ++# define R300_ALPHA_TEST_FAIL (0 << 8) ++# define R300_ALPHA_TEST_LESS (1 << 8) ++# define R300_ALPHA_TEST_LEQUAL (3 << 8) ++# define R300_ALPHA_TEST_EQUAL (2 << 8) ++# define R300_ALPHA_TEST_GEQUAL (6 << 8) ++# define R300_ALPHA_TEST_GREATER (4 << 8) ++# define R300_ALPHA_TEST_NEQUAL (5 << 8) ++# define R300_ALPHA_TEST_PASS (7 << 8) ++# define R300_ALPHA_TEST_OP_MASK (7 << 8) ++# define R300_ALPHA_TEST_ENABLE (1 << 11) ++ ++/* gap */ ++ ++/* Fragment program parameters in 7.16 floating point */ ++#define R300_PFS_PARAM_0_X 0x4C00 ++#define R300_PFS_PARAM_0_Y 0x4C04 ++#define R300_PFS_PARAM_0_Z 0x4C08 ++#define R300_PFS_PARAM_0_W 0x4C0C ++/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */ ++#define R300_PFS_PARAM_31_X 0x4DF0 ++#define R300_PFS_PARAM_31_Y 0x4DF4 ++#define R300_PFS_PARAM_31_Z 0x4DF8 ++#define R300_PFS_PARAM_31_W 0x4DFC ++ ++/* Notes: ++ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in ++ * the application ++ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND ++ * are set to the same ++ * function (both registers are always set up completely in any case) ++ * - Most blend flags are simply copied from R200 and not tested yet ++ */ ++#define R300_RB3D_CBLEND 0x4E04 ++#define R300_RB3D_ABLEND 0x4E08 ++/* the following only appear in CBLEND */ ++# define R300_BLEND_ENABLE (1 << 0) ++# define R300_BLEND_UNKNOWN (3 << 1) ++# define R300_BLEND_NO_SEPARATE (1 << 3) ++/* the following are shared between CBLEND and ABLEND */ ++# define R300_FCN_MASK (3 << 12) ++# define R300_COMB_FCN_ADD_CLAMP (0 << 12) ++# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12) ++# define R300_COMB_FCN_SUB_CLAMP (2 << 12) ++# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12) ++# define R300_COMB_FCN_MIN (4 << 12) ++# define R300_COMB_FCN_MAX (5 << 12) ++# define R300_COMB_FCN_RSUB_CLAMP (6 << 12) ++# define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12) ++# define R300_BLEND_GL_ZERO (32) ++# define R300_BLEND_GL_ONE (33) ++# define R300_BLEND_GL_SRC_COLOR (34) ++# define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35) ++# define R300_BLEND_GL_DST_COLOR (36) ++# define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37) ++# define R300_BLEND_GL_SRC_ALPHA (38) ++# define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39) ++# define R300_BLEND_GL_DST_ALPHA (40) ++# define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41) ++# define R300_BLEND_GL_SRC_ALPHA_SATURATE (42) ++# define R300_BLEND_GL_CONST_COLOR (43) ++# define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44) ++# define R300_BLEND_GL_CONST_ALPHA (45) ++# define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46) ++# define R300_BLEND_MASK (63) ++# define R300_SRC_BLEND_SHIFT (16) ++# define R300_DST_BLEND_SHIFT (24) ++#define R300_RB3D_BLEND_COLOR 0x4E10 ++#define R300_RB3D_COLORMASK 0x4E0C ++# define R300_COLORMASK0_B (1<<0) ++# define R300_COLORMASK0_G (1<<1) ++# define R300_COLORMASK0_R (1<<2) ++# define R300_COLORMASK0_A (1<<3) ++ ++/* gap */ ++ ++#define R300_RB3D_COLOROFFSET0 0x4E28 ++# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */ ++#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */ ++#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */ ++#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */ ++ ++/* gap */ ++ ++/* Bit 16: Larger tiles ++ * Bit 17: 4x2 tiles ++ * Bit 18: Extremely weird tile like, but some pixels duplicated? ++ */ ++#define R300_RB3D_COLORPITCH0 0x4E38 ++# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ ++# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ ++# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ ++# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ ++# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ ++# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ ++# define R300_COLOR_FORMAT_RGB565 (2 << 22) ++# define R300_COLOR_FORMAT_ARGB8888 (3 << 22) ++#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */ ++#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ ++#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ ++ ++#define R300_RB3D_AARESOLVE_CTL 0x4E88 ++/* gap */ ++ ++/* Guess by Vladimir. ++ * Set to 0A before 3D operations, set to 02 afterwards. ++ */ ++/*#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C*/ ++# define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002 ++# define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A ++ ++/* gap */ ++/* There seems to be no "write only" setting, so use Z-test = ALWAYS ++ * for this. ++ * Bit (1<<8) is the "test" bit. so plain write is 6 - vd ++ */ ++#define R300_ZB_CNTL 0x4F00 ++# define R300_STENCIL_ENABLE (1 << 0) ++# define R300_Z_ENABLE (1 << 1) ++# define R300_Z_WRITE_ENABLE (1 << 2) ++# define R300_Z_SIGNED_COMPARE (1 << 3) ++# define R300_STENCIL_FRONT_BACK (1 << 4) ++ ++#define R300_ZB_ZSTENCILCNTL 0x4f04 ++ /* functions */ ++# define R300_ZS_NEVER 0 ++# define R300_ZS_LESS 1 ++# define R300_ZS_LEQUAL 2 ++# define R300_ZS_EQUAL 3 ++# define R300_ZS_GEQUAL 4 ++# define R300_ZS_GREATER 5 ++# define R300_ZS_NOTEQUAL 6 ++# define R300_ZS_ALWAYS 7 ++# define R300_ZS_MASK 7 ++ /* operations */ ++# define R300_ZS_KEEP 0 ++# define R300_ZS_ZERO 1 ++# define R300_ZS_REPLACE 2 ++# define R300_ZS_INCR 3 ++# define R300_ZS_DECR 4 ++# define R300_ZS_INVERT 5 ++# define R300_ZS_INCR_WRAP 6 ++# define R300_ZS_DECR_WRAP 7 ++# define R300_Z_FUNC_SHIFT 0 ++ /* front and back refer to operations done for front ++ and back faces, i.e. separate stencil function support */ ++# define R300_S_FRONT_FUNC_SHIFT 3 ++# define R300_S_FRONT_SFAIL_OP_SHIFT 6 ++# define R300_S_FRONT_ZPASS_OP_SHIFT 9 ++# define R300_S_FRONT_ZFAIL_OP_SHIFT 12 ++# define R300_S_BACK_FUNC_SHIFT 15 ++# define R300_S_BACK_SFAIL_OP_SHIFT 18 ++# define R300_S_BACK_ZPASS_OP_SHIFT 21 ++# define R300_S_BACK_ZFAIL_OP_SHIFT 24 ++ ++#define R300_ZB_STENCILREFMASK 0x4f08 ++# define R300_STENCILREF_SHIFT 0 ++# define R300_STENCILREF_MASK 0x000000ff ++# define R300_STENCILMASK_SHIFT 8 ++# define R300_STENCILMASK_MASK 0x0000ff00 ++# define R300_STENCILWRITEMASK_SHIFT 16 ++# define R300_STENCILWRITEMASK_MASK 0x00ff0000 ++ ++/* gap */ ++ ++#define R300_ZB_FORMAT 0x4f10 ++# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0) ++# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0) ++# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0) ++/* reserved up to (15 << 0) */ ++# define R300_INVERT_13E3_LEADING_ONES (0 << 4) ++# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4) ++ ++#define R300_ZB_ZTOP 0x4F14 ++# define R300_ZTOP_DISABLE (0 << 0) ++# define R300_ZTOP_ENABLE (1 << 0) ++ ++/* gap */ ++ ++#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31) ++ ++#define R300_ZB_BW_CNTL 0x4f1c ++# define R300_HIZ_DISABLE (0 << 0) ++# define R300_HIZ_ENABLE (1 << 0) ++# define R300_HIZ_MIN (0 << 1) ++# define R300_HIZ_MAX (1 << 1) ++# define R300_FAST_FILL_DISABLE (0 << 2) ++# define R300_FAST_FILL_ENABLE (1 << 2) ++# define R300_RD_COMP_DISABLE (0 << 3) ++# define R300_RD_COMP_ENABLE (1 << 3) ++# define R300_WR_COMP_DISABLE (0 << 4) ++# define R300_WR_COMP_ENABLE (1 << 4) ++# define R300_ZB_CB_CLEAR_RMW (0 << 5) ++# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5) ++# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6) ++# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6) ++ ++# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7) ++# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7) ++# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8) ++# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8) ++ ++# define R500_BMASK_ENABLE (0 << 10) ++# define R500_BMASK_DISABLE (1 << 10) ++# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11) ++# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11) ++# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12) ++# define R500_HIZ_FP_EXP_BITS_1 (1 << 12) ++# define R500_HIZ_FP_EXP_BITS_2 (2 << 12) ++# define R500_HIZ_FP_EXP_BITS_3 (3 << 12) ++# define R500_HIZ_FP_EXP_BITS_4 (4 << 12) ++# define R500_HIZ_FP_EXP_BITS_5 (5 << 12) ++# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15) ++# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15) ++# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16) ++# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16) ++# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17) ++# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17) ++# define R500_PEQ_PACKING_DISABLE (0 << 18) ++# define R500_PEQ_PACKING_ENABLE (1 << 18) ++# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18) ++# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18) ++ ++ ++/* gap */ ++ ++/* Z Buffer Address Offset. ++ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles. ++ */ ++#define R300_ZB_DEPTHOFFSET 0x4f20 ++ ++/* Z Buffer Pitch and Endian Control */ ++#define R300_ZB_DEPTHPITCH 0x4f24 ++# define R300_DEPTHPITCH_MASK 0x00003FFC ++# define R300_DEPTHMACROTILE_DISABLE (0 << 16) ++# define R300_DEPTHMACROTILE_ENABLE (1 << 16) ++# define R300_DEPTHMICROTILE_LINEAR (0 << 17) ++# define R300_DEPTHMICROTILE_TILED (1 << 17) ++# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17) ++# define R300_DEPTHENDIAN_NO_SWAP (0 << 18) ++# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18) ++# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18) ++# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18) ++ ++/* Z Buffer Clear Value */ ++#define R300_ZB_DEPTHCLEARVALUE 0x4f28 ++ ++#define R300_ZB_ZMASK_OFFSET 0x4f30 ++#define R300_ZB_ZMASK_PITCH 0x4f34 ++#define R300_ZB_ZMASK_WRINDEX 0x4f38 ++#define R300_ZB_ZMASK_DWORD 0x4f3c ++#define R300_ZB_ZMASK_RDINDEX 0x4f40 ++ ++/* Hierarchical Z Memory Offset */ ++#define R300_ZB_HIZ_OFFSET 0x4f44 ++ ++/* Hierarchical Z Write Index */ ++#define R300_ZB_HIZ_WRINDEX 0x4f48 ++ ++/* Hierarchical Z Data */ ++#define R300_ZB_HIZ_DWORD 0x4f4c ++ ++/* Hierarchical Z Read Index */ ++#define R300_ZB_HIZ_RDINDEX 0x4f50 ++ ++/* Hierarchical Z Pitch */ ++#define R300_ZB_HIZ_PITCH 0x4f54 ++ ++/* Z Buffer Z Pass Counter Data */ ++#define R300_ZB_ZPASS_DATA 0x4f58 ++ ++/* Z Buffer Z Pass Counter Address */ ++#define R300_ZB_ZPASS_ADDR 0x4f5c ++ ++/* Depth buffer X and Y coordinate offset */ ++#define R300_ZB_DEPTHXY_OFFSET 0x4f60 ++# define R300_DEPTHX_OFFSET_SHIFT 1 ++# define R300_DEPTHX_OFFSET_MASK 0x000007FE ++# define R300_DEPTHY_OFFSET_SHIFT 17 ++# define R300_DEPTHY_OFFSET_MASK 0x07FE0000 ++ ++/* Sets the fifo sizes */ ++#define R500_ZB_FIFO_SIZE 0x4fd0 ++# define R500_OP_FIFO_SIZE_FULL (0 << 0) ++# define R500_OP_FIFO_SIZE_HALF (1 << 0) ++# define R500_OP_FIFO_SIZE_QUATER (2 << 0) ++# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0) ++ ++/* Stencil Reference Value and Mask for backfacing quads */ ++/* R300_ZB_STENCILREFMASK handles front face */ ++#define R500_ZB_STENCILREFMASK_BF 0x4fd4 ++# define R500_STENCILREF_SHIFT 0 ++# define R500_STENCILREF_MASK 0x000000ff ++# define R500_STENCILMASK_SHIFT 8 ++# define R500_STENCILMASK_MASK 0x0000ff00 ++# define R500_STENCILWRITEMASK_SHIFT 16 ++# define R500_STENCILWRITEMASK_MASK 0x00ff0000 ++ ++/* BEGIN: Vertex program instruction set */ ++ ++/* Every instruction is four dwords long: ++ * DWORD 0: output and opcode ++ * DWORD 1: first argument ++ * DWORD 2: second argument ++ * DWORD 3: third argument ++ * ++ * Notes: ++ * - ABS r, a is implemented as MAX r, a, -a ++ * - MOV is implemented as ADD to zero ++ * - XPD is implemented as MUL + MAD ++ * - FLR is implemented as FRC + ADD ++ * - apparently, fglrx tries to schedule instructions so that there is at ++ * least one instruction between the write to a temporary and the first ++ * read from said temporary; however, violations of this scheduling are ++ * allowed ++ * - register indices seem to be unrelated with OpenGL aliasing to ++ * conventional state ++ * - only one attribute and one parameter can be loaded at a time; however, ++ * the same attribute/parameter can be used for more than one argument ++ * - the second software argument for POW is the third hardware argument ++ * (no idea why) ++ * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2 ++ * ++ * There is some magic surrounding LIT: ++ * The single argument is replicated across all three inputs, but swizzled: ++ * First argument: xyzy ++ * Second argument: xyzx ++ * Third argument: xyzw ++ * Whenever the result is used later in the fragment program, fglrx forces ++ * x and w to be 1.0 in the input selection; I don't know whether this is ++ * strictly necessary ++ */ ++#define R300_VPI_OUT_OP_DOT (1 << 0) ++#define R300_VPI_OUT_OP_MUL (2 << 0) ++#define R300_VPI_OUT_OP_ADD (3 << 0) ++#define R300_VPI_OUT_OP_MAD (4 << 0) ++#define R300_VPI_OUT_OP_DST (5 << 0) ++#define R300_VPI_OUT_OP_FRC (6 << 0) ++#define R300_VPI_OUT_OP_MAX (7 << 0) ++#define R300_VPI_OUT_OP_MIN (8 << 0) ++#define R300_VPI_OUT_OP_SGE (9 << 0) ++#define R300_VPI_OUT_OP_SLT (10 << 0) ++ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */ ++#define R300_VPI_OUT_OP_UNK12 (12 << 0) ++#define R300_VPI_OUT_OP_ARL (13 << 0) ++#define R300_VPI_OUT_OP_EXP (65 << 0) ++#define R300_VPI_OUT_OP_LOG (66 << 0) ++ /* Used in fog computations, scalar(scalar) */ ++#define R300_VPI_OUT_OP_UNK67 (67 << 0) ++#define R300_VPI_OUT_OP_LIT (68 << 0) ++#define R300_VPI_OUT_OP_POW (69 << 0) ++#define R300_VPI_OUT_OP_RCP (70 << 0) ++#define R300_VPI_OUT_OP_RSQ (72 << 0) ++ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */ ++#define R300_VPI_OUT_OP_UNK73 (73 << 0) ++#define R300_VPI_OUT_OP_EX2 (75 << 0) ++#define R300_VPI_OUT_OP_LG2 (76 << 0) ++#define R300_VPI_OUT_OP_MAD_2 (128 << 0) ++ /* all temps, vector(scalar, vector, vector) */ ++#define R300_VPI_OUT_OP_UNK129 (129 << 0) ++ ++#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8) ++#define R300_VPI_OUT_REG_CLASS_ADDR (1 << 8) ++#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8) ++#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8) ++ ++#define R300_VPI_OUT_REG_INDEX_SHIFT 13 ++ /* GUESS based on fglrx native limits */ ++#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) ++ ++#define R300_VPI_OUT_WRITE_X (1 << 20) ++#define R300_VPI_OUT_WRITE_Y (1 << 21) ++#define R300_VPI_OUT_WRITE_Z (1 << 22) ++#define R300_VPI_OUT_WRITE_W (1 << 23) ++ ++#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0) ++#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0) ++#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0) ++#define R300_VPI_IN_REG_CLASS_NONE (9 << 0) ++#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) ++ ++#define R300_VPI_IN_REG_INDEX_SHIFT 5 ++ /* GUESS based on fglrx native limits */ ++#define R300_VPI_IN_REG_INDEX_MASK (255 << 5) ++ ++/* The R300 can select components from the input register arbitrarily. ++ * Use the following constants, shifted by the component shift you ++ * want to select ++ */ ++#define R300_VPI_IN_SELECT_X 0 ++#define R300_VPI_IN_SELECT_Y 1 ++#define R300_VPI_IN_SELECT_Z 2 ++#define R300_VPI_IN_SELECT_W 3 ++#define R300_VPI_IN_SELECT_ZERO 4 ++#define R300_VPI_IN_SELECT_ONE 5 ++#define R300_VPI_IN_SELECT_MASK 7 ++ ++#define R300_VPI_IN_X_SHIFT 13 ++#define R300_VPI_IN_Y_SHIFT 16 ++#define R300_VPI_IN_Z_SHIFT 19 ++#define R300_VPI_IN_W_SHIFT 22 ++ ++#define R300_VPI_IN_NEG_X (1 << 25) ++#define R300_VPI_IN_NEG_Y (1 << 26) ++#define R300_VPI_IN_NEG_Z (1 << 27) ++#define R300_VPI_IN_NEG_W (1 << 28) ++/* END: Vertex program instruction set */ ++ ++/* BEGIN: Packet 3 commands */ ++ ++/* A primitive emission dword. */ ++#define R300_PRIM_TYPE_NONE (0 << 0) ++#define R300_PRIM_TYPE_POINT (1 << 0) ++#define R300_PRIM_TYPE_LINE (2 << 0) ++#define R300_PRIM_TYPE_LINE_STRIP (3 << 0) ++#define R300_PRIM_TYPE_TRI_LIST (4 << 0) ++#define R300_PRIM_TYPE_TRI_FAN (5 << 0) ++#define R300_PRIM_TYPE_TRI_STRIP (6 << 0) ++#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0) ++#define R300_PRIM_TYPE_RECT_LIST (8 << 0) ++#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) ++#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) ++ /* GUESS (based on r200) */ ++#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) ++#define R300_PRIM_TYPE_LINE_LOOP (12 << 0) ++#define R300_PRIM_TYPE_QUADS (13 << 0) ++#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0) ++#define R300_PRIM_TYPE_POLYGON (15 << 0) ++#define R300_PRIM_TYPE_MASK 0xF ++#define R300_PRIM_WALK_IND (1 << 4) ++#define R300_PRIM_WALK_LIST (2 << 4) ++#define R300_PRIM_WALK_RING (3 << 4) ++#define R300_PRIM_WALK_MASK (3 << 4) ++ /* GUESS (based on r200) */ ++#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) ++#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) ++#define R300_PRIM_NUM_VERTICES_SHIFT 16 ++#define R300_PRIM_NUM_VERTICES_MASK 0xffff ++ ++/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR. ++ * Two parameter dwords: ++ * 0. The first parameter appears to be always 0 ++ * 1. The second parameter is a standard primitive emission dword. ++ */ ++#define R300_PACKET3_3D_DRAW_VBUF 0x00002800 ++ ++/* Specify the full set of vertex arrays as (address, stride). ++ * The first parameter is the number of vertex arrays specified. ++ * The rest of the command is a variable length list of blocks, where ++ * each block is three dwords long and specifies two arrays. ++ * The first dword of a block is split into two words, the lower significant ++ * word refers to the first array, the more significant word to the second ++ * array in the block. ++ * The low byte of each word contains the size of an array entry in dwords, ++ * the high byte contains the stride of the array. ++ * The second dword of a block contains the pointer to the first array, ++ * the third dword of a block contains the pointer to the second array. ++ * Note that if the total number of arrays is odd, the third dword of ++ * the last block is omitted. ++ */ ++#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00 ++ ++#define R300_PACKET3_INDX_BUFFER 0x00003300 ++# define R300_EB_UNK1_SHIFT 24 ++# define R300_EB_UNK1 (0x80<<24) ++# define R300_EB_UNK2 0x0810 ++#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400 ++#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600 ++ ++/* END: Packet 3 commands */ ++ ++ ++/* Color formats for 2d packets ++ */ ++#define R300_CP_COLOR_FORMAT_CI8 2 ++#define R300_CP_COLOR_FORMAT_ARGB1555 3 ++#define R300_CP_COLOR_FORMAT_RGB565 4 ++#define R300_CP_COLOR_FORMAT_ARGB8888 6 ++#define R300_CP_COLOR_FORMAT_RGB332 7 ++#define R300_CP_COLOR_FORMAT_RGB8 9 ++#define R300_CP_COLOR_FORMAT_ARGB4444 15 ++ ++/* ++ * CP type-3 packets ++ */ ++#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00 ++ ++#define R500_VAP_INDEX_OFFSET 0x208c ++ ++#define R500_GA_US_VECTOR_INDEX 0x4250 ++#define R500_GA_US_VECTOR_DATA 0x4254 ++ ++#define R500_RS_IP_0 0x4074 ++#define R500_RS_INST_0 0x4320 ++ ++#define R500_US_CONFIG 0x4600 ++ ++#define R500_US_FC_CTRL 0x4624 ++#define R500_US_CODE_ADDR 0x4630 ++ ++#define R500_RB3D_COLOR_CLEAR_VALUE_AR 0x46c0 ++#define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8 ++ ++#endif /* _R300_REG_H */ ++ ++/* *INDENT-ON* */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_bci.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_bci.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_bci.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_bci.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1092 @@ ++/* savage_bci.c -- BCI support for Savage ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#include "drmP.h" ++#include "savage_drm.h" ++#include "savage_drv.h" ++ ++/* Need a long timeout for shadow status updates can take a while ++ * and so can waiting for events when the queue is full. */ ++#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ ++#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ ++#define SAVAGE_FREELIST_DEBUG 0 ++ ++static int savage_do_cleanup_bci(struct drm_device *dev); ++ ++static int ++savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ uint32_t mask = dev_priv->status_used_mask; ++ uint32_t threshold = dev_priv->bci_threshold_hi; ++ uint32_t status; ++ int i; ++ ++#if SAVAGE_BCI_DEBUG ++ if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) ++ DRM_ERROR("Trying to emit %d words " ++ "(more than guaranteed space in COB)\n", n); ++#endif ++ ++ for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { ++ DRM_MEMORYBARRIER(); ++ status = dev_priv->status_ptr[0]; ++ if ((status & mask) < threshold) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); ++#endif ++ return -EBUSY; ++} ++ ++static int ++savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { ++ status = SAVAGE_READ(SAVAGE_STATUS_WORD0); ++ if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x\n", status); ++#endif ++ return -EBUSY; ++} ++ ++static int ++savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { ++ status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0); ++ if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x\n", status); ++#endif ++ return -EBUSY; ++} ++ ++/* ++ * Waiting for events. ++ * ++ * The BIOSresets the event tag to 0 on mode changes. Therefore we ++ * never emit 0 to the event tag. If we find a 0 event tag we know the ++ * BIOS stomped on it and return success assuming that the BIOS waited ++ * for engine idle. ++ * ++ * Note: if the Xserver uses the event tag it has to follow the same ++ * rule. Otherwise there may be glitches every 2^16 events. ++ */ ++static int ++savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e) ++{ ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { ++ DRM_MEMORYBARRIER(); ++ status = dev_priv->status_ptr[1]; ++ if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || ++ (status & 0xffff) == 0) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); ++#endif ++ ++ return -EBUSY; ++} ++ ++static int ++savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e) ++{ ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { ++ status = SAVAGE_READ(SAVAGE_STATUS_WORD1); ++ if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || ++ (status & 0xffff) == 0) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); ++#endif ++ ++ return -EBUSY; ++} ++ ++uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, ++ unsigned int flags) ++{ ++ uint16_t count; ++ BCI_LOCALS; ++ ++ if (dev_priv->status_ptr) { ++ /* coordinate with Xserver */ ++ count = dev_priv->status_ptr[1023]; ++ if (count < dev_priv->event_counter) ++ dev_priv->event_wrap++; ++ } else { ++ count = dev_priv->event_counter; ++ } ++ count = (count + 1) & 0xffff; ++ if (count == 0) { ++ count++; /* See the comment above savage_wait_event_*. */ ++ dev_priv->event_wrap++; ++ } ++ dev_priv->event_counter = count; ++ if (dev_priv->status_ptr) ++ dev_priv->status_ptr[1023] = (uint32_t)count; ++ ++ if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { ++ unsigned int wait_cmd = BCI_CMD_WAIT; ++ if ((flags & SAVAGE_WAIT_2D)) ++ wait_cmd |= BCI_CMD_WAIT_2D; ++ if ((flags & SAVAGE_WAIT_3D)) ++ wait_cmd |= BCI_CMD_WAIT_3D; ++ BEGIN_BCI(2); ++ BCI_WRITE(wait_cmd); ++ } else { ++ BEGIN_BCI(1); ++ } ++ BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count); ++ ++ return count; ++} ++ ++/* ++ * Freelist management ++ */ ++static int savage_freelist_init(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_savage_buf_priv_t *entry; ++ int i; ++ DRM_DEBUG("count=%d\n", dma->buf_count); ++ ++ dev_priv->head.next = &dev_priv->tail; ++ dev_priv->head.prev = NULL; ++ dev_priv->head.buf = NULL; ++ ++ dev_priv->tail.next = NULL; ++ dev_priv->tail.prev = &dev_priv->head; ++ dev_priv->tail.buf = NULL; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ entry = buf->dev_private; ++ ++ SET_AGE(&entry->age, 0, 0); ++ entry->buf = buf; ++ ++ entry->next = dev_priv->head.next; ++ entry->prev = &dev_priv->head; ++ dev_priv->head.next->prev = entry; ++ dev_priv->head.next = entry; ++ } ++ ++ return 0; ++} ++ ++static struct drm_buf *savage_freelist_get(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_buf_priv_t *tail = dev_priv->tail.prev; ++ uint16_t event; ++ unsigned int wrap; ++ DRM_DEBUG("\n"); ++ ++ UPDATE_EVENT_COUNTER(); ++ if (dev_priv->status_ptr) ++ event = dev_priv->status_ptr[1] & 0xffff; ++ else ++ event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; ++ wrap = dev_priv->event_wrap; ++ if (event > dev_priv->event_counter) ++ wrap--; /* hardware hasn't passed the last wrap yet */ ++ ++ DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); ++ DRM_DEBUG(" head=0x%04x %d\n", event, wrap); ++ ++ if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { ++ drm_savage_buf_priv_t *next = tail->next; ++ drm_savage_buf_priv_t *prev = tail->prev; ++ prev->next = next; ++ next->prev = prev; ++ tail->next = tail->prev = NULL; ++ return tail->buf; ++ } ++ ++ DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); ++ return NULL; ++} ++ ++void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; ++ ++ DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); ++ ++ if (entry->next != NULL || entry->prev != NULL) { ++ DRM_ERROR("entry already on freelist.\n"); ++ return; ++ } ++ ++ prev = &dev_priv->head; ++ next = prev->next; ++ prev->next = entry; ++ next->prev = entry; ++ entry->prev = prev; ++ entry->next = next; ++} ++ ++/* ++ * Command DMA ++ */ ++static int savage_dma_init(drm_savage_private_t *dev_priv) ++{ ++ unsigned int i; ++ ++ dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / ++ (SAVAGE_DMA_PAGE_SIZE*4); ++ dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * ++ dev_priv->nr_dma_pages, DRM_MEM_DRIVER); ++ if (dev_priv->dma_pages == NULL) ++ return -ENOMEM; ++ ++ for (i = 0; i < dev_priv->nr_dma_pages; ++i) { ++ SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ SET_AGE(&dev_priv->last_dma_age, 0, 0); ++ ++ dev_priv->first_dma_page = 0; ++ dev_priv->current_dma_page = 0; ++ ++ return 0; ++} ++ ++void savage_dma_reset(drm_savage_private_t *dev_priv) ++{ ++ uint16_t event; ++ unsigned int wrap, i; ++ event = savage_bci_emit_event(dev_priv, 0); ++ wrap = dev_priv->event_wrap; ++ for (i = 0; i < dev_priv->nr_dma_pages; ++i) { ++ SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ SET_AGE(&dev_priv->last_dma_age, event, wrap); ++ dev_priv->first_dma_page = dev_priv->current_dma_page = 0; ++} ++ ++void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page) ++{ ++ uint16_t event; ++ unsigned int wrap; ++ ++ /* Faked DMA buffer pages don't age. */ ++ if (dev_priv->cmd_dma == &dev_priv->fake_dma) ++ return; ++ ++ UPDATE_EVENT_COUNTER(); ++ if (dev_priv->status_ptr) ++ event = dev_priv->status_ptr[1] & 0xffff; ++ else ++ event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; ++ wrap = dev_priv->event_wrap; ++ if (event > dev_priv->event_counter) ++ wrap--; /* hardware hasn't passed the last wrap yet */ ++ ++ if (dev_priv->dma_pages[page].age.wrap > wrap || ++ (dev_priv->dma_pages[page].age.wrap == wrap && ++ dev_priv->dma_pages[page].age.event > event)) { ++ if (dev_priv->wait_evnt(dev_priv, ++ dev_priv->dma_pages[page].age.event) ++ < 0) ++ DRM_ERROR("wait_evnt failed!\n"); ++ } ++} ++ ++uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ unsigned int cur = dev_priv->current_dma_page; ++ unsigned int rest = SAVAGE_DMA_PAGE_SIZE - ++ dev_priv->dma_pages[cur].used; ++ unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / ++ SAVAGE_DMA_PAGE_SIZE; ++ uint32_t *dma_ptr; ++ unsigned int i; ++ ++ DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", ++ cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); ++ ++ if (cur + nr_pages < dev_priv->nr_dma_pages) { ++ dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + ++ cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; ++ if (n < rest) ++ rest = n; ++ dev_priv->dma_pages[cur].used += rest; ++ n -= rest; ++ cur++; ++ } else { ++ dev_priv->dma_flush(dev_priv); ++ nr_pages = ++ (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; ++ for (i = cur; i < dev_priv->nr_dma_pages; ++i) { ++ dev_priv->dma_pages[i].age = dev_priv->last_dma_age; ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle; ++ dev_priv->first_dma_page = cur = 0; ++ } ++ for (i = cur; nr_pages > 0; ++i, --nr_pages) { ++#if SAVAGE_DMA_DEBUG ++ if (dev_priv->dma_pages[i].used) { ++ DRM_ERROR("unflushed page %u: used=%u\n", ++ i, dev_priv->dma_pages[i].used); ++ } ++#endif ++ if (n > SAVAGE_DMA_PAGE_SIZE) ++ dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE; ++ else ++ dev_priv->dma_pages[i].used = n; ++ n -= SAVAGE_DMA_PAGE_SIZE; ++ } ++ dev_priv->current_dma_page = --i; ++ ++ DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", ++ i, dev_priv->dma_pages[i].used, n); ++ ++ savage_dma_wait(dev_priv, dev_priv->current_dma_page); ++ ++ return dma_ptr; ++} ++ ++static void savage_dma_flush(drm_savage_private_t *dev_priv) ++{ ++ unsigned int first = dev_priv->first_dma_page; ++ unsigned int cur = dev_priv->current_dma_page; ++ uint16_t event; ++ unsigned int wrap, pad, align, len, i; ++ unsigned long phys_addr; ++ BCI_LOCALS; ++ ++ if (first == cur && ++ dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) ++ return; ++ ++ /* pad length to multiples of 2 entries ++ * align start of next DMA block to multiles of 8 entries */ ++ pad = -dev_priv->dma_pages[cur].used & 1; ++ align = -(dev_priv->dma_pages[cur].used + pad) & 7; ++ ++ DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " ++ "pad=%u, align=%u\n", ++ first, cur, dev_priv->dma_pages[first].flushed, ++ dev_priv->dma_pages[cur].used, pad, align); ++ ++ /* pad with noops */ ++ if (pad) { ++ uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + ++ cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; ++ dev_priv->dma_pages[cur].used += pad; ++ while (pad != 0) { ++ *dma_ptr++ = BCI_CMD_WAIT; ++ pad--; ++ } ++ } ++ ++ DRM_MEMORYBARRIER(); ++ ++ /* do flush ... */ ++ phys_addr = dev_priv->cmd_dma->offset + ++ (first * SAVAGE_DMA_PAGE_SIZE + ++ dev_priv->dma_pages[first].flushed) * 4; ++ len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + ++ dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; ++ ++ DRM_DEBUG("phys_addr=%lx, len=%u\n", ++ phys_addr | dev_priv->dma_type, len); ++ ++ BEGIN_BCI(3); ++ BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); ++ BCI_WRITE(phys_addr | dev_priv->dma_type); ++ BCI_DMA(len); ++ ++ /* fix alignment of the start of the next block */ ++ dev_priv->dma_pages[cur].used += align; ++ ++ /* age DMA pages */ ++ event = savage_bci_emit_event(dev_priv, 0); ++ wrap = dev_priv->event_wrap; ++ for (i = first; i < cur; ++i) { ++ SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ /* age the current page only when it's full */ ++ if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { ++ SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); ++ dev_priv->dma_pages[cur].used = 0; ++ dev_priv->dma_pages[cur].flushed = 0; ++ /* advance to next page */ ++ cur++; ++ if (cur == dev_priv->nr_dma_pages) ++ cur = 0; ++ dev_priv->first_dma_page = dev_priv->current_dma_page = cur; ++ } else { ++ dev_priv->first_dma_page = cur; ++ dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; ++ } ++ SET_AGE(&dev_priv->last_dma_age, event, wrap); ++ ++ DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, ++ dev_priv->dma_pages[cur].used, ++ dev_priv->dma_pages[cur].flushed); ++} ++ ++static void savage_fake_dma_flush(drm_savage_private_t *dev_priv) ++{ ++ unsigned int i, j; ++ BCI_LOCALS; ++ ++ if (dev_priv->first_dma_page == dev_priv->current_dma_page && ++ dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) ++ return; ++ ++ DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", ++ dev_priv->first_dma_page, dev_priv->current_dma_page, ++ dev_priv->dma_pages[dev_priv->current_dma_page].used); ++ ++ for (i = dev_priv->first_dma_page; ++ i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; ++ ++i) { ++ uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + ++ i * SAVAGE_DMA_PAGE_SIZE; ++#if SAVAGE_DMA_DEBUG ++ /* Sanity check: all pages except the last one must be full. */ ++ if (i < dev_priv->current_dma_page && ++ dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) { ++ DRM_ERROR("partial DMA page %u: used=%u", ++ i, dev_priv->dma_pages[i].used); ++ } ++#endif ++ BEGIN_BCI(dev_priv->dma_pages[i].used); ++ for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { ++ BCI_WRITE(dma_ptr[j]); ++ } ++ dev_priv->dma_pages[i].used = 0; ++ } ++ ++ /* reset to first page */ ++ dev_priv->first_dma_page = dev_priv->current_dma_page = 0; ++} ++ ++int savage_driver_load(struct drm_device *dev, unsigned long chipset) ++{ ++ drm_savage_private_t *dev_priv; ++ ++ dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_savage_private_t)); ++ dev->dev_private = (void *)dev_priv; ++ ++ dev_priv->chipset = (enum savage_family)chipset; ++ ++ return 0; ++} ++ ++/* ++ * Initalize mappings. On Savage4 and SavageIX the alignment ++ * and size of the aperture is not suitable for automatic MTRR setup ++ * in drm_addmap. Therefore we add them manually before the maps are ++ * initialized, and tear them down on last close. ++ */ ++int savage_driver_firstopen(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ unsigned long mmio_base, fb_base, fb_size, aperture_base; ++ /* fb_rsrc and aper_rsrc aren't really used currently, but still exist ++ * in case we decide we need information on the BAR for BSD in the ++ * future. ++ */ ++ unsigned int fb_rsrc, aper_rsrc; ++ int ret = 0; ++ ++ dev_priv->mtrr[0].handle = -1; ++ dev_priv->mtrr[1].handle = -1; ++ dev_priv->mtrr[2].handle = -1; ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ fb_rsrc = 0; ++ fb_base = drm_get_resource_start(dev, 0); ++ fb_size = SAVAGE_FB_SIZE_S3; ++ mmio_base = fb_base + SAVAGE_FB_SIZE_S3; ++ aper_rsrc = 0; ++ aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; ++ /* this should always be true */ ++ if (drm_get_resource_len(dev, 0) == 0x08000000) { ++ /* Don't make MMIO write-cobining! We need 3 ++ * MTRRs. */ ++ dev_priv->mtrr[0].base = fb_base; ++ dev_priv->mtrr[0].size = 0x01000000; ++ dev_priv->mtrr[0].handle = ++ drm_mtrr_add(dev_priv->mtrr[0].base, ++ dev_priv->mtrr[0].size, DRM_MTRR_WC); ++ dev_priv->mtrr[1].base = fb_base + 0x02000000; ++ dev_priv->mtrr[1].size = 0x02000000; ++ dev_priv->mtrr[1].handle = ++ drm_mtrr_add(dev_priv->mtrr[1].base, ++ dev_priv->mtrr[1].size, DRM_MTRR_WC); ++ dev_priv->mtrr[2].base = fb_base + 0x04000000; ++ dev_priv->mtrr[2].size = 0x04000000; ++ dev_priv->mtrr[2].handle = ++ drm_mtrr_add(dev_priv->mtrr[2].base, ++ dev_priv->mtrr[2].size, DRM_MTRR_WC); ++ } else { ++ DRM_ERROR("strange pci_resource_len %08lx\n", ++ drm_get_resource_len(dev, 0)); ++ } ++ } else if (dev_priv->chipset != S3_SUPERSAVAGE && ++ dev_priv->chipset != S3_SAVAGE2000) { ++ mmio_base = drm_get_resource_start(dev, 0); ++ fb_rsrc = 1; ++ fb_base = drm_get_resource_start(dev, 1); ++ fb_size = SAVAGE_FB_SIZE_S4; ++ aper_rsrc = 1; ++ aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; ++ /* this should always be true */ ++ if (drm_get_resource_len(dev, 1) == 0x08000000) { ++ /* Can use one MTRR to cover both fb and ++ * aperture. */ ++ dev_priv->mtrr[0].base = fb_base; ++ dev_priv->mtrr[0].size = 0x08000000; ++ dev_priv->mtrr[0].handle = ++ drm_mtrr_add(dev_priv->mtrr[0].base, ++ dev_priv->mtrr[0].size, DRM_MTRR_WC); ++ } else { ++ DRM_ERROR("strange pci_resource_len %08lx\n", ++ drm_get_resource_len(dev, 1)); ++ } ++ } else { ++ mmio_base = drm_get_resource_start(dev, 0); ++ fb_rsrc = 1; ++ fb_base = drm_get_resource_start(dev, 1); ++ fb_size = drm_get_resource_len(dev, 1); ++ aper_rsrc = 2; ++ aperture_base = drm_get_resource_start(dev, 2); ++ /* Automatic MTRR setup will do the right thing. */ ++ } ++ ++ ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, ++ _DRM_READ_ONLY, &dev_priv->mmio); ++ if (ret) ++ return ret; ++ ++ ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, ++ _DRM_WRITE_COMBINING, &dev_priv->fb); ++ if (ret) ++ return ret; ++ ++ ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, ++ _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, ++ &dev_priv->aperture); ++ if (ret) ++ return ret; ++ ++ return ret; ++} ++ ++/* ++ * Delete MTRRs and free device-private data. ++ */ ++void savage_driver_lastclose(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ for (i = 0; i < 3; ++i) ++ if (dev_priv->mtrr[i].handle >= 0) ++ drm_mtrr_del(dev_priv->mtrr[i].handle, ++ dev_priv->mtrr[i].base, ++ dev_priv->mtrr[i].size, DRM_MTRR_WC); ++} ++ ++int savage_driver_unload(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ ++ drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); ++ ++ return 0; ++} ++ ++static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ ++ if (init->fb_bpp != 16 && init->fb_bpp != 32) { ++ DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); ++ return -EINVAL; ++ } ++ if (init->depth_bpp != 16 && init->depth_bpp != 32) { ++ DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); ++ return -EINVAL; ++ } ++ if (init->dma_type != SAVAGE_DMA_AGP && ++ init->dma_type != SAVAGE_DMA_PCI) { ++ DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); ++ return -EINVAL; ++ } ++ ++ dev_priv->cob_size = init->cob_size; ++ dev_priv->bci_threshold_lo = init->bci_threshold_lo; ++ dev_priv->bci_threshold_hi = init->bci_threshold_hi; ++ dev_priv->dma_type = init->dma_type; ++ ++ dev_priv->fb_bpp = init->fb_bpp; ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ dev_priv->depth_bpp = init->depth_bpp; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ dev_priv->texture_offset = init->texture_offset; ++ dev_priv->texture_size = init->texture_size; ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("could not find sarea!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ if (init->status_offset != 0) { ++ dev_priv->status = drm_core_findmap(dev, init->status_offset); ++ if (!dev_priv->status) { ++ DRM_ERROR("could not find shadow status region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ } else { ++ dev_priv->status = NULL; ++ } ++ if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, ++ init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("could not find DMA buffer region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("failed to ioremap DMA buffer region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ } ++ if (init->agp_textures_offset) { ++ dev_priv->agp_textures = ++ drm_core_findmap(dev, init->agp_textures_offset); ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("could not find agp texture region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ } else { ++ dev_priv->agp_textures = NULL; ++ } ++ ++ if (init->cmd_dma_offset) { ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ DRM_ERROR("command DMA not supported on " ++ "Savage3D/MX/IX.\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ if (dev->dma && dev->dma->buflist) { ++ DRM_ERROR("command and vertex DMA not supported " ++ "at the same time.\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); ++ if (!dev_priv->cmd_dma) { ++ DRM_ERROR("could not find command DMA region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ if (dev_priv->dma_type == SAVAGE_DMA_AGP) { ++ if (dev_priv->cmd_dma->type != _DRM_AGP) { ++ DRM_ERROR("AGP command DMA region is not a " ++ "_DRM_AGP map!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ drm_core_ioremap(dev_priv->cmd_dma, dev); ++ if (!dev_priv->cmd_dma->handle) { ++ DRM_ERROR("failed to ioremap command " ++ "DMA region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { ++ DRM_ERROR("PCI command DMA region is not a " ++ "_DRM_CONSISTENT map!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ } else { ++ dev_priv->cmd_dma = NULL; ++ } ++ ++ dev_priv->dma_flush = savage_dma_flush; ++ if (!dev_priv->cmd_dma) { ++ DRM_DEBUG("falling back to faked command DMA.\n"); ++ dev_priv->fake_dma.offset = 0; ++ dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; ++ dev_priv->fake_dma.type = _DRM_SHM; ++ dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, ++ DRM_MEM_DRIVER); ++ if (!dev_priv->fake_dma.handle) { ++ DRM_ERROR("could not allocate faked DMA buffer!\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ dev_priv->cmd_dma = &dev_priv->fake_dma; ++ dev_priv->dma_flush = savage_fake_dma_flush; ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++ /* setup bitmap descriptors */ ++ { ++ unsigned int color_tile_format; ++ unsigned int depth_tile_format; ++ unsigned int front_stride, back_stride, depth_stride; ++ if (dev_priv->chipset <= S3_SAVAGE4) { ++ color_tile_format = dev_priv->fb_bpp == 16 ? ++ SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; ++ depth_tile_format = dev_priv->depth_bpp == 16 ? ++ SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; ++ } else { ++ color_tile_format = SAVAGE_BD_TILE_DEST; ++ depth_tile_format = SAVAGE_BD_TILE_DEST; ++ } ++ front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); ++ back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); ++ depth_stride = ++ dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); ++ ++ dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | ++ (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | ++ (color_tile_format << SAVAGE_BD_TILE_SHIFT); ++ ++ dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE | ++ (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | ++ (color_tile_format << SAVAGE_BD_TILE_SHIFT); ++ ++ dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | ++ (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | ++ (depth_tile_format << SAVAGE_BD_TILE_SHIFT); ++ } ++ ++ /* setup status and bci ptr */ ++ dev_priv->event_counter = 0; ++ dev_priv->event_wrap = 0; ++ dev_priv->bci_ptr = (volatile uint32_t *) ++ ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; ++ } else { ++ dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4; ++ } ++ if (dev_priv->status != NULL) { ++ dev_priv->status_ptr = ++ (volatile uint32_t *)dev_priv->status->handle; ++ dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; ++ dev_priv->wait_evnt = savage_bci_wait_event_shadow; ++ dev_priv->status_ptr[1023] = dev_priv->event_counter; ++ } else { ++ dev_priv->status_ptr = NULL; ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ dev_priv->wait_fifo = savage_bci_wait_fifo_s3d; ++ } else { ++ dev_priv->wait_fifo = savage_bci_wait_fifo_s4; ++ } ++ dev_priv->wait_evnt = savage_bci_wait_event_reg; ++ } ++ ++ /* cliprect functions */ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) ++ dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d; ++ else ++ dev_priv->emit_clip_rect = savage_emit_clip_rect_s4; ++ ++ if (savage_freelist_init(dev) < 0) { ++ DRM_ERROR("could not initialize freelist\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ ++ if (savage_dma_init(dev_priv) < 0) { ++ DRM_ERROR("could not initialize command DMA\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int savage_do_cleanup_bci(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->cmd_dma == &dev_priv->fake_dma) { ++ if (dev_priv->fake_dma.handle) ++ drm_free(dev_priv->fake_dma.handle, ++ SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); ++ } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && ++ dev_priv->cmd_dma->type == _DRM_AGP && ++ dev_priv->dma_type == SAVAGE_DMA_AGP) ++ drm_core_ioremapfree(dev_priv->cmd_dma, dev); ++ ++ if (dev_priv->dma_type == SAVAGE_DMA_AGP && ++ dev->agp_buffer_map && dev->agp_buffer_map->handle) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ /* make sure the next instance (which may be running ++ * in PCI mode) doesn't try to use an old ++ * agp_buffer_map. */ ++ dev->agp_buffer_map = NULL; ++ } ++ ++ if (dev_priv->dma_pages) ++ drm_free(dev_priv->dma_pages, ++ sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages, ++ DRM_MEM_DRIVER); ++ ++ return 0; ++} ++ ++static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_init_t *init = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case SAVAGE_INIT_BCI: ++ return savage_do_init_bci(dev, init); ++ case SAVAGE_CLEANUP_BCI: ++ return savage_do_cleanup_bci(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_event_emit_t *event = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ event->count = savage_bci_emit_event(dev_priv, event->flags); ++ event->count |= dev_priv->event_wrap << 16; ++ ++ return 0; ++} ++ ++static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_event_wait_t *event = data; ++ unsigned int event_e, hw_e; ++ unsigned int event_w, hw_w; ++ ++ DRM_DEBUG("\n"); ++ ++ UPDATE_EVENT_COUNTER(); ++ if (dev_priv->status_ptr) ++ hw_e = dev_priv->status_ptr[1] & 0xffff; ++ else ++ hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; ++ hw_w = dev_priv->event_wrap; ++ if (hw_e > dev_priv->event_counter) ++ hw_w--; /* hardware hasn't passed the last wrap yet */ ++ ++ event_e = event->count & 0xffff; ++ event_w = event->count >> 16; ++ ++ /* Don't need to wait if ++ * - event counter wrapped since the event was emitted or ++ * - the hardware has advanced up to or over the event to wait for. ++ */ ++ if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) ++ return 0; ++ else ++ return dev_priv->wait_evnt(dev_priv, event_e); ++} ++ ++/* ++ * DMA buffer management ++ */ ++ ++static int savage_bci_get_buffers(struct drm_device *dev, ++ struct drm_file *file_priv, ++ struct drm_dma *d) ++{ ++ struct drm_buf *buf; ++ int i; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = savage_freelist_get(dev); ++ if (!buf) ++ return -EAGAIN; ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], ++ &buf->idx, sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], ++ &buf->total, sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_dma *d = data; ++ int ret = 0; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = savage_bci_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ if (!dma) ++ return; ++ if (!dev_priv) ++ return; ++ if (!dma->buflist) ++ return; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_savage_buf_priv_t *buf_priv = buf->dev_private; ++ ++ if (buf->file_priv == file_priv && buf_priv && ++ buf_priv->next == NULL && buf_priv->prev == NULL) { ++ uint16_t event; ++ DRM_DEBUG("reclaimed from client\n"); ++ event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); ++ SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); ++ savage_freelist_put(dev, buf); ++ } ++ } ++ ++ drm_core_reclaim_buffers(dev, file_priv); ++} ++ ++struct drm_ioctl_desc savage_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), ++}; ++ ++int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,209 @@ ++/* savage_drm.h -- Public header for the savage driver ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __SAVAGE_DRM_H__ ++#define __SAVAGE_DRM_H__ ++ ++#ifndef __SAVAGE_SAREA_DEFINES__ ++#define __SAVAGE_SAREA_DEFINES__ ++ ++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 ++ * regions, subject to a minimum region size of (1<<16) == 64k. ++ * ++ * Clients may subdivide regions internally, but when sharing between ++ * clients, the region size is the minimum granularity. ++ */ ++ ++#define SAVAGE_CARD_HEAP 0 ++#define SAVAGE_AGP_HEAP 1 ++#define SAVAGE_NR_TEX_HEAPS 2 ++#define SAVAGE_NR_TEX_REGIONS 16 ++#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16 ++ ++#endif /* __SAVAGE_SAREA_DEFINES__ */ ++ ++typedef struct _drm_savage_sarea { ++ /* LRU lists for texture memory in agp space and on the card. ++ */ ++ struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; ++ unsigned int texAge[SAVAGE_NR_TEX_HEAPS]; ++ ++ /* Mechanism to validate card state. ++ */ ++ int ctxOwner; ++} drm_savage_sarea_t, *drm_savage_sarea_ptr; ++ ++/* Savage-specific ioctls ++ */ ++#define DRM_SAVAGE_BCI_INIT 0x00 ++#define DRM_SAVAGE_BCI_CMDBUF 0x01 ++#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 ++#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 ++ ++#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) ++#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) ++#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) ++#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) ++ ++#define SAVAGE_DMA_PCI 1 ++#define SAVAGE_DMA_AGP 3 ++typedef struct drm_savage_init { ++ enum { ++ SAVAGE_INIT_BCI = 1, ++ SAVAGE_CLEANUP_BCI = 2 ++ } func; ++ unsigned int sarea_priv_offset; ++ ++ /* some parameters */ ++ unsigned int cob_size; ++ unsigned int bci_threshold_lo, bci_threshold_hi; ++ unsigned int dma_type; ++ ++ /* frame buffer layout */ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ /* local textures */ ++ unsigned int texture_offset; ++ unsigned int texture_size; ++ ++ /* physical locations of non-permanent maps */ ++ unsigned long status_offset; ++ unsigned long buffers_offset; ++ unsigned long agp_textures_offset; ++ unsigned long cmd_dma_offset; ++} drm_savage_init_t; ++ ++typedef union drm_savage_cmd_header drm_savage_cmd_header_t; ++typedef struct drm_savage_cmdbuf { ++ /* command buffer in client's address space */ ++ drm_savage_cmd_header_t __user *cmd_addr; ++ unsigned int size; /* size of the command buffer in 64bit units */ ++ ++ unsigned int dma_idx; /* DMA buffer index to use */ ++ int discard; /* discard DMA buffer when done */ ++ /* vertex buffer in client's address space */ ++ unsigned int __user *vb_addr; ++ unsigned int vb_size; /* size of client vertex buffer in bytes */ ++ unsigned int vb_stride; /* stride of vertices in 32bit words */ ++ /* boxes in client's address space */ ++ struct drm_clip_rect __user *box_addr; ++ unsigned int nbox; /* number of clipping boxes */ ++} drm_savage_cmdbuf_t; ++ ++#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */ ++#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */ ++#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */ ++typedef struct drm_savage_event { ++ unsigned int count; ++ unsigned int flags; ++} drm_savage_event_emit_t, drm_savage_event_wait_t; ++ ++/* Commands for the cmdbuf ioctl ++ */ ++#define SAVAGE_CMD_STATE 0 /* a range of state registers */ ++#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */ ++#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */ ++#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */ ++#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */ ++#define SAVAGE_CMD_CLEAR 5 /* clear buffers */ ++#define SAVAGE_CMD_SWAP 6 /* swap buffers */ ++ ++/* Primitive types ++*/ ++#define SAVAGE_PRIM_TRILIST 0 /* triangle list */ ++#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */ ++#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */ ++#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat ++ * shading on s3d */ ++ ++/* Skip flags (vertex format) ++ */ ++#define SAVAGE_SKIP_Z 0x01 ++#define SAVAGE_SKIP_W 0x02 ++#define SAVAGE_SKIP_C0 0x04 ++#define SAVAGE_SKIP_C1 0x08 ++#define SAVAGE_SKIP_S0 0x10 ++#define SAVAGE_SKIP_T0 0x20 ++#define SAVAGE_SKIP_ST0 0x30 ++#define SAVAGE_SKIP_S1 0x40 ++#define SAVAGE_SKIP_T1 0x80 ++#define SAVAGE_SKIP_ST1 0xc0 ++#define SAVAGE_SKIP_ALL_S3D 0x3f ++#define SAVAGE_SKIP_ALL_S4 0xff ++ ++/* Buffer names for clear command ++ */ ++#define SAVAGE_FRONT 0x1 ++#define SAVAGE_BACK 0x2 ++#define SAVAGE_DEPTH 0x4 ++ ++/* 64-bit command header ++ */ ++union drm_savage_cmd_header { ++ struct { ++ unsigned char cmd; /* command */ ++ unsigned char pad0; ++ unsigned short pad1; ++ unsigned short pad2; ++ unsigned short pad3; ++ } cmd; /* generic */ ++ struct { ++ unsigned char cmd; ++ unsigned char global; /* need idle engine? */ ++ unsigned short count; /* number of consecutive registers */ ++ unsigned short start; /* first register */ ++ unsigned short pad3; ++ } state; /* SAVAGE_CMD_STATE */ ++ struct { ++ unsigned char cmd; ++ unsigned char prim; /* primitive type */ ++ unsigned short skip; /* vertex format (skip flags) */ ++ unsigned short count; /* number of vertices */ ++ unsigned short start; /* first vertex in DMA/vertex buffer */ ++ } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */ ++ struct { ++ unsigned char cmd; ++ unsigned char prim; ++ unsigned short skip; ++ unsigned short count; /* number of indices that follow */ ++ unsigned short pad3; ++ } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */ ++ struct { ++ unsigned char cmd; ++ unsigned char pad0; ++ unsigned short pad1; ++ unsigned int flags; ++ } clear0; /* SAVAGE_CMD_CLEAR */ ++ struct { ++ unsigned int mask; ++ unsigned int value; ++ } clear1; /* SAVAGE_CMD_CLEAR data */ ++}; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,96 @@ ++/* savage_drv.c -- Savage driver for Linux ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "savage_drm.h" ++#include "savage_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ savage_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | ++ DRIVER_HAVE_DMA | DRIVER_PCI_DMA, ++ .dev_priv_size = sizeof(drm_savage_buf_priv_t), ++ .load = savage_driver_load, ++ .firstopen = savage_driver_firstopen, ++ .lastclose = savage_driver_lastclose, ++ .unload = savage_driver_unload, ++ .reclaim_buffers = savage_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = savage_ioctls, ++ .dma_ioctl = savage_bci_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init savage_init(void) ++{ ++ driver.num_ioctls = savage_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit savage_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(savage_init); ++module_exit(savage_exit); ++ ++MODULE_AUTHOR( DRIVER_AUTHOR ); ++MODULE_DESCRIPTION( DRIVER_DESC ); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,575 @@ ++/* savage_drv.h -- Private header for the savage driver */ ++/* ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __SAVAGE_DRV_H__ ++#define __SAVAGE_DRV_H__ ++ ++#define DRIVER_AUTHOR "Felix Kuehling" ++ ++#define DRIVER_NAME "savage" ++#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]" ++#define DRIVER_DATE "20050313" ++ ++#define DRIVER_MAJOR 2 ++#define DRIVER_MINOR 4 ++#define DRIVER_PATCHLEVEL 1 ++/* Interface history: ++ * ++ * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy ++ * 2.0 The first real DRM ++ * 2.1 Scissors registers managed by the DRM, 3D operations clipped by ++ * cliprects of the cmdbuf ioctl ++ * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX ++ * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits ++ * wide and thus very long lived (unlikely to ever wrap). The size ++ * in the struct was 32 bits before, but only 16 bits were used ++ * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is ++ * actually used ++ */ ++ ++typedef struct drm_savage_age { ++ uint16_t event; ++ unsigned int wrap; ++} drm_savage_age_t; ++ ++typedef struct drm_savage_buf_priv { ++ struct drm_savage_buf_priv *next; ++ struct drm_savage_buf_priv *prev; ++ drm_savage_age_t age; ++ struct drm_buf *buf; ++} drm_savage_buf_priv_t; ++ ++typedef struct drm_savage_dma_page { ++ drm_savage_age_t age; ++ unsigned int used, flushed; ++} drm_savage_dma_page_t; ++#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */ ++/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command ++ * size of 16kbytes or 4k entries. Minimum requirement would be ++ * 10kbytes for 255 40-byte vertices in one drawing command. */ ++#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4) ++ ++/* interesting bits of hardware state that are saved in dev_priv */ ++typedef union { ++ struct drm_savage_common_state { ++ uint32_t vbaddr; ++ } common; ++ struct { ++ unsigned char pad[sizeof(struct drm_savage_common_state)]; ++ uint32_t texctrl, texaddr; ++ uint32_t scstart, new_scstart; ++ uint32_t scend, new_scend; ++ } s3d; ++ struct { ++ unsigned char pad[sizeof(struct drm_savage_common_state)]; ++ uint32_t texdescr, texaddr0, texaddr1; ++ uint32_t drawctrl0, new_drawctrl0; ++ uint32_t drawctrl1, new_drawctrl1; ++ } s4; ++} drm_savage_state_t; ++ ++/* these chip tags should match the ones in the 2D driver in savage_regs.h. */ ++enum savage_family { ++ S3_UNKNOWN = 0, ++ S3_SAVAGE3D, ++ S3_SAVAGE_MX, ++ S3_SAVAGE4, ++ S3_PROSAVAGE, ++ S3_TWISTER, ++ S3_PROSAVAGEDDR, ++ S3_SUPERSAVAGE, ++ S3_SAVAGE2000, ++ S3_LAST ++}; ++ ++extern struct drm_ioctl_desc savage_ioctls[]; ++extern int savage_max_ioctl; ++ ++#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) ++ ++#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \ ++ || (chip==S3_PROSAVAGE) \ ++ || (chip==S3_TWISTER) \ ++ || (chip==S3_PROSAVAGEDDR)) ++ ++#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) ++ ++#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) ++ ++#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \ ++ ||(chip==S3_PROSAVAGEDDR)) ++ ++/* flags */ ++#define SAVAGE_IS_AGP 1 ++ ++typedef struct drm_savage_private { ++ drm_savage_sarea_t *sarea_priv; ++ ++ drm_savage_buf_priv_t head, tail; ++ ++ /* who am I? */ ++ enum savage_family chipset; ++ ++ unsigned int cob_size; ++ unsigned int bci_threshold_lo, bci_threshold_hi; ++ unsigned int dma_type; ++ ++ /* frame buffer layout */ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ /* bitmap descriptors for swap and clear */ ++ unsigned int front_bd, back_bd, depth_bd; ++ ++ /* local textures */ ++ unsigned int texture_offset; ++ unsigned int texture_size; ++ ++ /* memory regions in physical memory */ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *fb; ++ drm_local_map_t *aperture; ++ drm_local_map_t *status; ++ drm_local_map_t *agp_textures; ++ drm_local_map_t *cmd_dma; ++ drm_local_map_t fake_dma; ++ ++ struct { ++ int handle; ++ unsigned long base, size; ++ } mtrr[3]; ++ ++ /* BCI and status-related stuff */ ++ volatile uint32_t *status_ptr, *bci_ptr; ++ uint32_t status_used_mask; ++ uint16_t event_counter; ++ unsigned int event_wrap; ++ ++ /* Savage4 command DMA */ ++ drm_savage_dma_page_t *dma_pages; ++ unsigned int nr_dma_pages, first_dma_page, current_dma_page; ++ drm_savage_age_t last_dma_age; ++ ++ /* saved hw state for global/local check on S3D */ ++ uint32_t hw_draw_ctrl, hw_zbuf_ctrl; ++ /* and for scissors (global, so don't emit if not changed) */ ++ uint32_t hw_scissors_start, hw_scissors_end; ++ ++ drm_savage_state_t state; ++ ++ /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */ ++ unsigned int waiting; ++ ++ /* config/hardware-dependent function pointers */ ++ int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n); ++ int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e); ++ /* Err, there is a macro wait_event in include/linux/wait.h. ++ * Avoid unwanted macro expansion. */ ++ void (*emit_clip_rect)(struct drm_savage_private *dev_priv, ++ const struct drm_clip_rect *pbox); ++ void (*dma_flush)(struct drm_savage_private *dev_priv); ++} drm_savage_private_t; ++ ++/* ioctls */ ++extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); ++ ++/* BCI functions */ ++extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, ++ unsigned int flags); ++extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf); ++extern void savage_dma_reset(drm_savage_private_t *dev_priv); ++extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page); ++extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, ++ unsigned int n); ++extern int savage_driver_load(struct drm_device *dev, unsigned long chipset); ++extern int savage_driver_firstopen(struct drm_device *dev); ++extern void savage_driver_lastclose(struct drm_device *dev); ++extern int savage_driver_unload(struct drm_device *dev); ++extern void savage_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *file_priv); ++ ++/* state functions */ ++extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox); ++extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox); ++ ++#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ ++#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ ++#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */ ++#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */ ++#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */ ++ ++#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region ++ * inside the MMIO region */ ++#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip ++ * BCI FIFO */ ++ ++/* ++ * MMIO registers ++ */ ++#define SAVAGE_STATUS_WORD0 0x48C00 ++#define SAVAGE_STATUS_WORD1 0x48C04 ++#define SAVAGE_ALT_STATUS_WORD0 0x48C60 ++ ++#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff ++#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff ++ ++/* Copied from savage_bci.h in the 2D driver with some renaming. */ ++ ++/* Bitmap descriptors */ ++#define SAVAGE_BD_STRIDE_SHIFT 0 ++#define SAVAGE_BD_BPP_SHIFT 16 ++#define SAVAGE_BD_TILE_SHIFT 24 ++#define SAVAGE_BD_BW_DISABLE (1<<28) ++/* common: */ ++#define SAVAGE_BD_TILE_LINEAR 0 ++/* savage4, MX, IX, 3D */ ++#define SAVAGE_BD_TILE_16BPP 2 ++#define SAVAGE_BD_TILE_32BPP 3 ++/* twister, prosavage, DDR, supersavage, 2000 */ ++#define SAVAGE_BD_TILE_DEST 1 ++#define SAVAGE_BD_TILE_TEXTURE 2 ++/* GBD - BCI enable */ ++/* savage4, MX, IX, 3D */ ++#define SAVAGE_GBD_BCI_ENABLE 8 ++/* twister, prosavage, DDR, supersavage, 2000 */ ++#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0 ++ ++#define SAVAGE_GBD_BIG_ENDIAN 4 ++#define SAVAGE_GBD_LITTLE_ENDIAN 0 ++#define SAVAGE_GBD_64 1 ++ ++/* Global Bitmap Descriptor */ ++#define SAVAGE_BCI_GLB_BD_LOW 0x8168 ++#define SAVAGE_BCI_GLB_BD_HIGH 0x816C ++ ++/* ++ * BCI registers ++ */ ++/* Savage4/Twister/ProSavage 3D registers */ ++#define SAVAGE_DRAWLOCALCTRL_S4 0x1e ++#define SAVAGE_TEXPALADDR_S4 0x1f ++#define SAVAGE_TEXCTRL0_S4 0x20 ++#define SAVAGE_TEXCTRL1_S4 0x21 ++#define SAVAGE_TEXADDR0_S4 0x22 ++#define SAVAGE_TEXADDR1_S4 0x23 ++#define SAVAGE_TEXBLEND0_S4 0x24 ++#define SAVAGE_TEXBLEND1_S4 0x25 ++#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */ ++#define SAVAGE_TEXDESCR_S4 0x27 ++#define SAVAGE_FOGTABLE_S4 0x28 ++#define SAVAGE_FOGCTRL_S4 0x30 ++#define SAVAGE_STENCILCTRL_S4 0x31 ++#define SAVAGE_ZBUFCTRL_S4 0x32 ++#define SAVAGE_ZBUFOFF_S4 0x33 ++#define SAVAGE_DESTCTRL_S4 0x34 ++#define SAVAGE_DRAWCTRL0_S4 0x35 ++#define SAVAGE_DRAWCTRL1_S4 0x36 ++#define SAVAGE_ZWATERMARK_S4 0x37 ++#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38 ++#define SAVAGE_TEXBLENDCOLOR_S4 0x39 ++/* Savage3D/MX/IX 3D registers */ ++#define SAVAGE_TEXPALADDR_S3D 0x18 ++#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */ ++#define SAVAGE_TEXADDR_S3D 0x1A ++#define SAVAGE_TEXDESCR_S3D 0x1B ++#define SAVAGE_TEXCTRL_S3D 0x1C ++#define SAVAGE_FOGTABLE_S3D 0x20 ++#define SAVAGE_FOGCTRL_S3D 0x30 ++#define SAVAGE_DRAWCTRL_S3D 0x31 ++#define SAVAGE_ZBUFCTRL_S3D 0x32 ++#define SAVAGE_ZBUFOFF_S3D 0x33 ++#define SAVAGE_DESTCTRL_S3D 0x34 ++#define SAVAGE_SCSTART_S3D 0x35 ++#define SAVAGE_SCEND_S3D 0x36 ++#define SAVAGE_ZWATERMARK_S3D 0x37 ++#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38 ++/* common stuff */ ++#define SAVAGE_VERTBUFADDR 0x3e ++#define SAVAGE_BITPLANEWTMASK 0xd7 ++#define SAVAGE_DMABUFADDR 0x51 ++ ++/* texture enable bits (needed for tex addr checking) */ ++#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */ ++#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */ ++#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */ ++ ++/* Global fields in Savage4/Twister/ProSavage 3D registers: ++ * ++ * All texture registers and DrawLocalCtrl are local. All other ++ * registers are global. */ ++ ++/* Global fields in Savage3D/MX/IX 3D registers: ++ * ++ * All texture registers are local. DrawCtrl and ZBufCtrl are ++ * partially local. All other registers are global. ++ * ++ * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal ++ * ZBufCtrl global fields: zCmpFunc, zBufEn ++ */ ++#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c ++#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027 ++ ++/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d) ++ */ ++#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff ++#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff ++ ++/* ++ * BCI commands ++ */ ++#define BCI_CMD_NOP 0x40000000 ++#define BCI_CMD_RECT 0x48000000 ++#define BCI_CMD_RECT_XP 0x01000000 ++#define BCI_CMD_RECT_YP 0x02000000 ++#define BCI_CMD_SCANLINE 0x50000000 ++#define BCI_CMD_LINE 0x5C000000 ++#define BCI_CMD_LINE_LAST_PIXEL 0x58000000 ++#define BCI_CMD_BYTE_TEXT 0x63000000 ++#define BCI_CMD_NT_BYTE_TEXT 0x67000000 ++#define BCI_CMD_BIT_TEXT 0x6C000000 ++#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF) ++#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16)) ++#define BCI_CMD_SEND_COLOR 0x00008000 ++ ++#define BCI_CMD_CLIP_NONE 0x00000000 ++#define BCI_CMD_CLIP_CURRENT 0x00002000 ++#define BCI_CMD_CLIP_LR 0x00004000 ++#define BCI_CMD_CLIP_NEW 0x00006000 ++ ++#define BCI_CMD_DEST_GBD 0x00000000 ++#define BCI_CMD_DEST_PBD 0x00000800 ++#define BCI_CMD_DEST_PBD_NEW 0x00000C00 ++#define BCI_CMD_DEST_SBD 0x00001000 ++#define BCI_CMD_DEST_SBD_NEW 0x00001400 ++ ++#define BCI_CMD_SRC_TRANSPARENT 0x00000200 ++#define BCI_CMD_SRC_SOLID 0x00000000 ++#define BCI_CMD_SRC_GBD 0x00000020 ++#define BCI_CMD_SRC_COLOR 0x00000040 ++#define BCI_CMD_SRC_MONO 0x00000060 ++#define BCI_CMD_SRC_PBD_COLOR 0x00000080 ++#define BCI_CMD_SRC_PBD_MONO 0x000000A0 ++#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0 ++#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0 ++#define BCI_CMD_SRC_SBD_COLOR 0x00000100 ++#define BCI_CMD_SRC_SBD_MONO 0x00000120 ++#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140 ++#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160 ++ ++#define BCI_CMD_PAT_TRANSPARENT 0x00000010 ++#define BCI_CMD_PAT_NONE 0x00000000 ++#define BCI_CMD_PAT_COLOR 0x00000002 ++#define BCI_CMD_PAT_MONO 0x00000003 ++#define BCI_CMD_PAT_PBD_COLOR 0x00000004 ++#define BCI_CMD_PAT_PBD_MONO 0x00000005 ++#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006 ++#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007 ++#define BCI_CMD_PAT_SBD_COLOR 0x00000008 ++#define BCI_CMD_PAT_SBD_MONO 0x00000009 ++#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A ++#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B ++ ++#define BCI_BD_BW_DISABLE 0x10000000 ++#define BCI_BD_TILE_MASK 0x03000000 ++#define BCI_BD_TILE_NONE 0x00000000 ++#define BCI_BD_TILE_16 0x02000000 ++#define BCI_BD_TILE_32 0x03000000 ++#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF) ++#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16)) ++#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF) ++#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF)) ++ ++#define BCI_CMD_SET_REGISTER 0x96000000 ++ ++#define BCI_CMD_WAIT 0xC0000000 ++#define BCI_CMD_WAIT_3D 0x00010000 ++#define BCI_CMD_WAIT_2D 0x00020000 ++ ++#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000 ++ ++#define BCI_CMD_DRAW_PRIM 0x80000000 ++#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000 ++#define BCI_CMD_DRAW_CONT 0x01000000 ++#define BCI_CMD_DRAW_TRILIST 0x00000000 ++#define BCI_CMD_DRAW_TRISTRIP 0x02000000 ++#define BCI_CMD_DRAW_TRIFAN 0x04000000 ++#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff ++#define BCI_CMD_DRAW_NO_Z 0x00000001 ++#define BCI_CMD_DRAW_NO_W 0x00000002 ++#define BCI_CMD_DRAW_NO_CD 0x00000004 ++#define BCI_CMD_DRAW_NO_CS 0x00000008 ++#define BCI_CMD_DRAW_NO_U0 0x00000010 ++#define BCI_CMD_DRAW_NO_V0 0x00000020 ++#define BCI_CMD_DRAW_NO_UV0 0x00000030 ++#define BCI_CMD_DRAW_NO_U1 0x00000040 ++#define BCI_CMD_DRAW_NO_V1 0x00000080 ++#define BCI_CMD_DRAW_NO_UV1 0x000000c0 ++ ++#define BCI_CMD_DMA 0xa8000000 ++ ++#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF) ++#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF) ++#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF) ++#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF) ++#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF) ++#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF) ++ ++#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF)) ++#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF)) ++#define BCI_LINE_MISC(maj, ym, xp, yp, err) \ ++ (((maj) & 0x1FFF) | \ ++ ((ym) ? 1<<13 : 0) | \ ++ ((xp) ? 1<<14 : 0) | \ ++ ((yp) ? 1<<15 : 0) | \ ++ ((err) << 16)) ++ ++/* ++ * common commands ++ */ ++#define BCI_SET_REGISTERS( first, n ) \ ++ BCI_WRITE(BCI_CMD_SET_REGISTER | \ ++ ((uint32_t)(n) & 0xff) << 16 | \ ++ ((uint32_t)(first) & 0xffff)) ++#define DMA_SET_REGISTERS( first, n ) \ ++ DMA_WRITE(BCI_CMD_SET_REGISTER | \ ++ ((uint32_t)(n) & 0xff) << 16 | \ ++ ((uint32_t)(first) & 0xffff)) ++ ++#define BCI_DRAW_PRIMITIVE(n, type, skip) \ ++ BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ ++ ((n) << 16)) ++#define DMA_DRAW_PRIMITIVE(n, type, skip) \ ++ DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ ++ ((n) << 16)) ++ ++#define BCI_DRAW_INDICES_S3D(n, type, i0) \ ++ BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ ++ ((n) << 16) | (i0)) ++ ++#define BCI_DRAW_INDICES_S4(n, type, skip) \ ++ BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ ++ (skip) | ((n) << 16)) ++ ++#define BCI_DMA(n) \ ++ BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1)) ++ ++/* ++ * access to MMIO ++ */ ++#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) ) ++ ++/* ++ * access to the burst command interface (BCI) ++ */ ++#define SAVAGE_BCI_DEBUG 1 ++ ++#define BCI_LOCALS volatile uint32_t *bci_ptr; ++ ++#define BEGIN_BCI( n ) do { \ ++ dev_priv->wait_fifo(dev_priv, (n)); \ ++ bci_ptr = dev_priv->bci_ptr; \ ++} while(0) ++ ++#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) ++ ++/* ++ * command DMA support ++ */ ++#define SAVAGE_DMA_DEBUG 1 ++ ++#define DMA_LOCALS uint32_t *dma_ptr; ++ ++#define BEGIN_DMA( n ) do { \ ++ unsigned int cur = dev_priv->current_dma_page; \ ++ unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \ ++ dev_priv->dma_pages[cur].used; \ ++ if ((n) > rest) { \ ++ dma_ptr = savage_dma_alloc(dev_priv, (n)); \ ++ } else { /* fast path for small allocations */ \ ++ dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \ ++ cur * SAVAGE_DMA_PAGE_SIZE + \ ++ dev_priv->dma_pages[cur].used; \ ++ if (dev_priv->dma_pages[cur].used == 0) \ ++ savage_dma_wait(dev_priv, cur); \ ++ dev_priv->dma_pages[cur].used += (n); \ ++ } \ ++} while(0) ++ ++#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) ++ ++#define DMA_COPY(src, n) do { \ ++ memcpy(dma_ptr, (src), (n)*4); \ ++ dma_ptr += n; \ ++} while(0) ++ ++#if SAVAGE_DMA_DEBUG ++#define DMA_COMMIT() do { \ ++ unsigned int cur = dev_priv->current_dma_page; \ ++ uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \ ++ cur * SAVAGE_DMA_PAGE_SIZE + \ ++ dev_priv->dma_pages[cur].used; \ ++ if (dma_ptr != expected) { \ ++ DRM_ERROR("DMA allocation and use don't match: " \ ++ "%p != %p\n", expected, dma_ptr); \ ++ savage_dma_reset(dev_priv); \ ++ } \ ++} while(0) ++#else ++#define DMA_COMMIT() do {/* nothing */} while(0) ++#endif ++ ++#define DMA_FLUSH() dev_priv->dma_flush(dev_priv) ++ ++/* Buffer aging via event tag ++ */ ++ ++#define UPDATE_EVENT_COUNTER( ) do { \ ++ if (dev_priv->status_ptr) { \ ++ uint16_t count; \ ++ /* coordinate with Xserver */ \ ++ count = dev_priv->status_ptr[1023]; \ ++ if (count < dev_priv->event_counter) \ ++ dev_priv->event_wrap++; \ ++ dev_priv->event_counter = count; \ ++ } \ ++} while(0) ++ ++#define SET_AGE( age, e, w ) do { \ ++ (age)->event = e; \ ++ (age)->wrap = w; \ ++} while(0) ++ ++#define TEST_AGE( age, e, w ) \ ++ ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) ) ++ ++#endif /* __SAVAGE_DRV_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_state.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_state.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/savage_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/savage_state.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,1165 @@ ++/* savage_state.c -- State and drawing support for Savage ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#include "drmP.h" ++#include "savage_drm.h" ++#include "savage_drv.h" ++ ++void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox) ++{ ++ uint32_t scstart = dev_priv->state.s3d.new_scstart; ++ uint32_t scend = dev_priv->state.s3d.new_scend; ++ scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | ++ ((uint32_t)pbox->x1 & 0x000007ff) | ++ (((uint32_t)pbox->y1 << 16) & 0x07ff0000); ++ scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | ++ (((uint32_t)pbox->x2 - 1) & 0x000007ff) | ++ ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000); ++ if (scstart != dev_priv->state.s3d.scstart || ++ scend != dev_priv->state.s3d.scend) { ++ DMA_LOCALS; ++ BEGIN_DMA(4); ++ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); ++ DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); ++ DMA_WRITE(scstart); ++ DMA_WRITE(scend); ++ dev_priv->state.s3d.scstart = scstart; ++ dev_priv->state.s3d.scend = scend; ++ dev_priv->waiting = 1; ++ DMA_COMMIT(); ++ } ++} ++ ++void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox) ++{ ++ uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; ++ uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; ++ drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) | ++ ((uint32_t)pbox->x1 & 0x000007ff) | ++ (((uint32_t)pbox->y1 << 12) & 0x00fff000); ++ drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | ++ (((uint32_t)pbox->x2 - 1) & 0x000007ff) | ++ ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000); ++ if (drawctrl0 != dev_priv->state.s4.drawctrl0 || ++ drawctrl1 != dev_priv->state.s4.drawctrl1) { ++ DMA_LOCALS; ++ BEGIN_DMA(4); ++ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); ++ DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); ++ DMA_WRITE(drawctrl0); ++ DMA_WRITE(drawctrl1); ++ dev_priv->state.s4.drawctrl0 = drawctrl0; ++ dev_priv->state.s4.drawctrl1 = drawctrl1; ++ dev_priv->waiting = 1; ++ DMA_COMMIT(); ++ } ++} ++ ++static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, ++ uint32_t addr) ++{ ++ if ((addr & 6) != 2) { /* reserved bits */ ++ DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); ++ return -EINVAL; ++ } ++ if (!(addr & 1)) { /* local */ ++ addr &= ~7; ++ if (addr < dev_priv->texture_offset || ++ addr >= dev_priv->texture_offset + dev_priv->texture_size) { ++ DRM_ERROR ++ ("bad texAddr%d %08x (local addr out of range)\n", ++ unit, addr); ++ return -EINVAL; ++ } ++ } else { /* AGP */ ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", ++ unit, addr); ++ return -EINVAL; ++ } ++ addr &= ~7; ++ if (addr < dev_priv->agp_textures->offset || ++ addr >= (dev_priv->agp_textures->offset + ++ dev_priv->agp_textures->size)) { ++ DRM_ERROR ++ ("bad texAddr%d %08x (AGP addr out of range)\n", ++ unit, addr); ++ return -EINVAL; ++ } ++ } ++ return 0; ++} ++ ++#define SAVE_STATE(reg,where) \ ++ if(start <= reg && start + count > reg) \ ++ dev_priv->state.where = regs[reg - start] ++#define SAVE_STATE_MASK(reg,where,mask) do { \ ++ if(start <= reg && start + count > reg) { \ ++ uint32_t tmp; \ ++ tmp = regs[reg - start]; \ ++ dev_priv->state.where = (tmp & (mask)) | \ ++ (dev_priv->state.where & ~(mask)); \ ++ } \ ++} while (0) ++static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, ++ unsigned int start, unsigned int count, ++ const uint32_t *regs) ++{ ++ if (start < SAVAGE_TEXPALADDR_S3D || ++ start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { ++ DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", ++ start, start + count - 1); ++ return -EINVAL; ++ } ++ ++ SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, ++ ~SAVAGE_SCISSOR_MASK_S3D); ++ SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend, ++ ~SAVAGE_SCISSOR_MASK_S3D); ++ ++ /* if any texture regs were changed ... */ ++ if (start <= SAVAGE_TEXCTRL_S3D && ++ start + count > SAVAGE_TEXPALADDR_S3D) { ++ /* ... check texture state */ ++ SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); ++ SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); ++ if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) ++ return savage_verify_texaddr(dev_priv, 0, ++ dev_priv->state.s3d.texaddr); ++ } ++ ++ return 0; ++} ++ ++static int savage_verify_state_s4(drm_savage_private_t *dev_priv, ++ unsigned int start, unsigned int count, ++ const uint32_t *regs) ++{ ++ int ret = 0; ++ ++ if (start < SAVAGE_DRAWLOCALCTRL_S4 || ++ start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { ++ DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", ++ start, start + count - 1); ++ return -EINVAL; ++ } ++ ++ SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, ++ ~SAVAGE_SCISSOR_MASK_S4); ++ SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1, ++ ~SAVAGE_SCISSOR_MASK_S4); ++ ++ /* if any texture regs were changed ... */ ++ if (start <= SAVAGE_TEXDESCR_S4 && ++ start + count > SAVAGE_TEXPALADDR_S4) { ++ /* ... check texture state */ ++ SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); ++ SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); ++ SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); ++ if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) ++ ret |= savage_verify_texaddr(dev_priv, 0, ++ dev_priv->state.s4.texaddr0); ++ if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) ++ ret |= savage_verify_texaddr(dev_priv, 1, ++ dev_priv->state.s4.texaddr1); ++ } ++ ++ return ret; ++} ++#undef SAVE_STATE ++#undef SAVE_STATE_MASK ++ ++static int savage_dispatch_state(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint32_t *regs) ++{ ++ unsigned int count = cmd_header->state.count; ++ unsigned int start = cmd_header->state.start; ++ unsigned int count2 = 0; ++ unsigned int bci_size; ++ int ret; ++ DMA_LOCALS; ++ ++ if (!count) ++ return 0; ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ ret = savage_verify_state_s3d(dev_priv, start, count, regs); ++ if (ret != 0) ++ return ret; ++ /* scissor regs are emitted in savage_dispatch_draw */ ++ if (start < SAVAGE_SCSTART_S3D) { ++ if (start + count > SAVAGE_SCEND_S3D + 1) ++ count2 = count - (SAVAGE_SCEND_S3D + 1 - start); ++ if (start + count > SAVAGE_SCSTART_S3D) ++ count = SAVAGE_SCSTART_S3D - start; ++ } else if (start <= SAVAGE_SCEND_S3D) { ++ if (start + count > SAVAGE_SCEND_S3D + 1) { ++ count -= SAVAGE_SCEND_S3D + 1 - start; ++ start = SAVAGE_SCEND_S3D + 1; ++ } else ++ return 0; ++ } ++ } else { ++ ret = savage_verify_state_s4(dev_priv, start, count, regs); ++ if (ret != 0) ++ return ret; ++ /* scissor regs are emitted in savage_dispatch_draw */ ++ if (start < SAVAGE_DRAWCTRL0_S4) { ++ if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) ++ count2 = count - ++ (SAVAGE_DRAWCTRL1_S4 + 1 - start); ++ if (start + count > SAVAGE_DRAWCTRL0_S4) ++ count = SAVAGE_DRAWCTRL0_S4 - start; ++ } else if (start <= SAVAGE_DRAWCTRL1_S4) { ++ if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) { ++ count -= SAVAGE_DRAWCTRL1_S4 + 1 - start; ++ start = SAVAGE_DRAWCTRL1_S4 + 1; ++ } else ++ return 0; ++ } ++ } ++ ++ bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255; ++ ++ if (cmd_header->state.global) { ++ BEGIN_DMA(bci_size + 1); ++ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); ++ dev_priv->waiting = 1; ++ } else { ++ BEGIN_DMA(bci_size); ++ } ++ ++ do { ++ while (count > 0) { ++ unsigned int n = count < 255 ? count : 255; ++ DMA_SET_REGISTERS(start, n); ++ DMA_COPY(regs, n); ++ count -= n; ++ start += n; ++ regs += n; ++ } ++ start += 2; ++ regs += 2; ++ count = count2; ++ count2 = 0; ++ } while (count); ++ ++ DMA_COMMIT(); ++ ++ return 0; ++} ++ ++static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const struct drm_buf *dmabuf) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->prim.prim; ++ unsigned int skip = cmd_header->prim.skip; ++ unsigned int n = cmd_header->prim.count; ++ unsigned int start = cmd_header->prim.start; ++ unsigned int i; ++ BCI_LOCALS; ++ ++ if (!dmabuf) { ++ DRM_ERROR("called without dma buffers!\n"); ++ return -EINVAL; ++ } ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of vertices %u in TRILIST\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of vertices %u in TRIFAN/STRIP\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip != 0) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ } else { ++ unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - ++ (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - ++ (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); ++ if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ if (reorder) { ++ DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (start + n > dmabuf->total / 32) { ++ DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", ++ start, start + n - 1, dmabuf->total / 32); ++ return -EINVAL; ++ } ++ ++ /* Vertex DMA doesn't work with command DMA at the same time, ++ * so we use BCI_... to submit commands here. Flush buffered ++ * faked DMA first. */ ++ DMA_FLUSH(); ++ ++ if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { ++ BEGIN_BCI(2); ++ BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); ++ BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); ++ dev_priv->state.common.vbaddr = dmabuf->bus_address; ++ } ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { ++ /* Workaround for what looks like a hardware bug. If a ++ * WAIT_3D_IDLE was emitted some time before the ++ * indexed drawing command then the engine will lock ++ * up. There are two known workarounds: ++ * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ ++ BEGIN_BCI(63); ++ for (i = 0; i < 63; ++i) ++ BCI_WRITE(BCI_CMD_WAIT); ++ dev_priv->waiting = 0; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 indices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ if (reorder) { ++ /* Need to reorder indices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { -1, -1, -1 }; ++ reorder[start % 3] = 2; ++ ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, start + 2); ++ ++ for (i = start + 1; i + 1 < start + count; i += 2) ++ BCI_WRITE((i + reorder[i % 3]) | ++ ((i + 1 + ++ reorder[(i + 1) % 3]) << 16)); ++ if (i < start + count) ++ BCI_WRITE(i + reorder[i % 3]); ++ } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, start); ++ ++ for (i = start + 1; i + 1 < start + count; i += 2) ++ BCI_WRITE(i | ((i + 1) << 16)); ++ if (i < start + count) ++ BCI_WRITE(i); ++ } else { ++ BEGIN_BCI((count + 2 + 1) / 2); ++ BCI_DRAW_INDICES_S4(count, prim, skip); ++ ++ for (i = start; i + 1 < start + count; i += 2) ++ BCI_WRITE(i | ((i + 1) << 16)); ++ if (i < start + count) ++ BCI_WRITE(i); ++ } ++ ++ start += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint32_t *vtxbuf, unsigned int vb_size, ++ unsigned int vb_stride) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->prim.prim; ++ unsigned int skip = cmd_header->prim.skip; ++ unsigned int n = cmd_header->prim.count; ++ unsigned int start = cmd_header->prim.start; ++ unsigned int vtx_size; ++ unsigned int i; ++ DMA_LOCALS; ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of vertices %u in TRILIST\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of vertices %u in TRIFAN/STRIP\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip > SAVAGE_SKIP_ALL_S3D) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 8; /* full vertex */ ++ } else { ++ if (skip > SAVAGE_SKIP_ALL_S4) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 10; /* full vertex */ ++ } ++ ++ vtx_size -= (skip & 1) + (skip >> 1 & 1) + ++ (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + ++ (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); ++ ++ if (vtx_size > vb_stride) { ++ DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", ++ vtx_size, vb_stride); ++ return -EINVAL; ++ } ++ ++ if (start + n > vb_size / (vb_stride * 4)) { ++ DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", ++ start, start + n - 1, vb_size / (vb_stride * 4)); ++ return -EINVAL; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 vertices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ if (reorder) { ++ /* Need to reorder vertices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { -1, -1, -1 }; ++ reorder[start % 3] = 2; ++ ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ for (i = start; i < start + count; ++i) { ++ unsigned int j = i + reorder[i % 3]; ++ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); ++ } ++ ++ DMA_COMMIT(); ++ } else { ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ if (vb_stride == vtx_size) { ++ DMA_COPY(&vtxbuf[vb_stride * start], ++ vtx_size * count); ++ } else { ++ for (i = start; i < start + count; ++i) { ++ DMA_COPY(&vtxbuf[vb_stride * i], ++ vtx_size); ++ } ++ } ++ ++ DMA_COMMIT(); ++ } ++ ++ start += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint16_t *idx, ++ const struct drm_buf *dmabuf) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->idx.prim; ++ unsigned int skip = cmd_header->idx.skip; ++ unsigned int n = cmd_header->idx.count; ++ unsigned int i; ++ BCI_LOCALS; ++ ++ if (!dmabuf) { ++ DRM_ERROR("called without dma buffers!\n"); ++ return -EINVAL; ++ } ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of indices %u in TRILIST\n", n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of indices %u in TRIFAN/STRIP\n", n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip != 0) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ } else { ++ unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - ++ (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - ++ (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); ++ if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ if (reorder) { ++ DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); ++ return -EINVAL; ++ } ++ } ++ ++ /* Vertex DMA doesn't work with command DMA at the same time, ++ * so we use BCI_... to submit commands here. Flush buffered ++ * faked DMA first. */ ++ DMA_FLUSH(); ++ ++ if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { ++ BEGIN_BCI(2); ++ BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); ++ BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); ++ dev_priv->state.common.vbaddr = dmabuf->bus_address; ++ } ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { ++ /* Workaround for what looks like a hardware bug. If a ++ * WAIT_3D_IDLE was emitted some time before the ++ * indexed drawing command then the engine will lock ++ * up. There are two known workarounds: ++ * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ ++ BEGIN_BCI(63); ++ for (i = 0; i < 63; ++i) ++ BCI_WRITE(BCI_CMD_WAIT); ++ dev_priv->waiting = 0; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 indices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ ++ /* check indices */ ++ for (i = 0; i < count; ++i) { ++ if (idx[i] > dmabuf->total / 32) { ++ DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ++ i, idx[i], dmabuf->total / 32); ++ return -EINVAL; ++ } ++ } ++ ++ if (reorder) { ++ /* Need to reorder indices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { 2, -1, -1 }; ++ ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, idx[2]); ++ ++ for (i = 1; i + 1 < count; i += 2) ++ BCI_WRITE(idx[i + reorder[i % 3]] | ++ (idx[i + 1 + ++ reorder[(i + 1) % 3]] << 16)); ++ if (i < count) ++ BCI_WRITE(idx[i + reorder[i % 3]]); ++ } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, idx[0]); ++ ++ for (i = 1; i + 1 < count; i += 2) ++ BCI_WRITE(idx[i] | (idx[i + 1] << 16)); ++ if (i < count) ++ BCI_WRITE(idx[i]); ++ } else { ++ BEGIN_BCI((count + 2 + 1) / 2); ++ BCI_DRAW_INDICES_S4(count, prim, skip); ++ ++ for (i = 0; i + 1 < count; i += 2) ++ BCI_WRITE(idx[i] | (idx[i + 1] << 16)); ++ if (i < count) ++ BCI_WRITE(idx[i]); ++ } ++ ++ idx += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint16_t *idx, ++ const uint32_t *vtxbuf, ++ unsigned int vb_size, unsigned int vb_stride) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->idx.prim; ++ unsigned int skip = cmd_header->idx.skip; ++ unsigned int n = cmd_header->idx.count; ++ unsigned int vtx_size; ++ unsigned int i; ++ DMA_LOCALS; ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of indices %u in TRILIST\n", n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of indices %u in TRIFAN/STRIP\n", n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip > SAVAGE_SKIP_ALL_S3D) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 8; /* full vertex */ ++ } else { ++ if (skip > SAVAGE_SKIP_ALL_S4) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 10; /* full vertex */ ++ } ++ ++ vtx_size -= (skip & 1) + (skip >> 1 & 1) + ++ (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + ++ (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); ++ ++ if (vtx_size > vb_stride) { ++ DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", ++ vtx_size, vb_stride); ++ return -EINVAL; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 vertices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ ++ /* Check indices */ ++ for (i = 0; i < count; ++i) { ++ if (idx[i] > vb_size / (vb_stride * 4)) { ++ DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ++ i, idx[i], vb_size / (vb_stride * 4)); ++ return -EINVAL; ++ } ++ } ++ ++ if (reorder) { ++ /* Need to reorder vertices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { 2, -1, -1 }; ++ ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ for (i = 0; i < count; ++i) { ++ unsigned int j = idx[i + reorder[i % 3]]; ++ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); ++ } ++ ++ DMA_COMMIT(); ++ } else { ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ for (i = 0; i < count; ++i) { ++ unsigned int j = idx[i]; ++ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); ++ } ++ ++ DMA_COMMIT(); ++ } ++ ++ idx += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_clear(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const drm_savage_cmd_header_t *data, ++ unsigned int nbox, ++ const struct drm_clip_rect *boxes) ++{ ++ unsigned int flags = cmd_header->clear0.flags; ++ unsigned int clear_cmd; ++ unsigned int i, nbufs; ++ DMA_LOCALS; ++ ++ if (nbox == 0) ++ return 0; ++ ++ clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | ++ BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; ++ BCI_CMD_SET_ROP(clear_cmd,0xCC); ++ ++ nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) + ++ ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0); ++ if (nbufs == 0) ++ return 0; ++ ++ if (data->clear1.mask != 0xffffffff) { ++ /* set mask */ ++ BEGIN_DMA(2); ++ DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ++ DMA_WRITE(data->clear1.mask); ++ DMA_COMMIT(); ++ } ++ for (i = 0; i < nbox; ++i) { ++ unsigned int x, y, w, h; ++ unsigned int buf; ++ ++ x = boxes[i].x1, y = boxes[i].y1; ++ w = boxes[i].x2 - boxes[i].x1; ++ h = boxes[i].y2 - boxes[i].y1; ++ BEGIN_DMA(nbufs * 6); ++ for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { ++ if (!(flags & buf)) ++ continue; ++ DMA_WRITE(clear_cmd); ++ switch (buf) { ++ case SAVAGE_FRONT: ++ DMA_WRITE(dev_priv->front_offset); ++ DMA_WRITE(dev_priv->front_bd); ++ break; ++ case SAVAGE_BACK: ++ DMA_WRITE(dev_priv->back_offset); ++ DMA_WRITE(dev_priv->back_bd); ++ break; ++ case SAVAGE_DEPTH: ++ DMA_WRITE(dev_priv->depth_offset); ++ DMA_WRITE(dev_priv->depth_bd); ++ break; ++ } ++ DMA_WRITE(data->clear1.value); ++ DMA_WRITE(BCI_X_Y(x, y)); ++ DMA_WRITE(BCI_W_H(w, h)); ++ } ++ DMA_COMMIT(); ++ } ++ if (data->clear1.mask != 0xffffffff) { ++ /* reset mask */ ++ BEGIN_DMA(2); ++ DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ++ DMA_WRITE(0xffffffff); ++ DMA_COMMIT(); ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_swap(drm_savage_private_t *dev_priv, ++ unsigned int nbox, const struct drm_clip_rect *boxes) ++{ ++ unsigned int swap_cmd; ++ unsigned int i; ++ DMA_LOCALS; ++ ++ if (nbox == 0) ++ return 0; ++ ++ swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | ++ BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; ++ BCI_CMD_SET_ROP(swap_cmd,0xCC); ++ ++ for (i = 0; i < nbox; ++i) { ++ BEGIN_DMA(6); ++ DMA_WRITE(swap_cmd); ++ DMA_WRITE(dev_priv->back_offset); ++ DMA_WRITE(dev_priv->back_bd); ++ DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); ++ DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); ++ DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, ++ boxes[i].y2 - boxes[i].y1)); ++ DMA_COMMIT(); ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_draw(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *start, ++ const drm_savage_cmd_header_t *end, ++ const struct drm_buf *dmabuf, ++ const unsigned int *vtxbuf, ++ unsigned int vb_size, unsigned int vb_stride, ++ unsigned int nbox, ++ const struct drm_clip_rect *boxes) ++{ ++ unsigned int i, j; ++ int ret; ++ ++ for (i = 0; i < nbox; ++i) { ++ const drm_savage_cmd_header_t *cmdbuf; ++ dev_priv->emit_clip_rect(dev_priv, &boxes[i]); ++ ++ cmdbuf = start; ++ while (cmdbuf < end) { ++ drm_savage_cmd_header_t cmd_header; ++ cmd_header = *cmdbuf; ++ cmdbuf++; ++ switch (cmd_header.cmd.cmd) { ++ case SAVAGE_CMD_DMA_PRIM: ++ ret = savage_dispatch_dma_prim( ++ dev_priv, &cmd_header, dmabuf); ++ break; ++ case SAVAGE_CMD_VB_PRIM: ++ ret = savage_dispatch_vb_prim( ++ dev_priv, &cmd_header, ++ vtxbuf, vb_size, vb_stride); ++ break; ++ case SAVAGE_CMD_DMA_IDX: ++ j = (cmd_header.idx.count + 3) / 4; ++ /* j was check in savage_bci_cmdbuf */ ++ ret = savage_dispatch_dma_idx(dev_priv, ++ &cmd_header, (const uint16_t *)cmdbuf, ++ dmabuf); ++ cmdbuf += j; ++ break; ++ case SAVAGE_CMD_VB_IDX: ++ j = (cmd_header.idx.count + 3) / 4; ++ /* j was check in savage_bci_cmdbuf */ ++ ret = savage_dispatch_vb_idx(dev_priv, ++ &cmd_header, (const uint16_t *)cmdbuf, ++ (const uint32_t *)vtxbuf, vb_size, ++ vb_stride); ++ cmdbuf += j; ++ break; ++ default: ++ /* What's the best return code? EFAULT? */ ++ DRM_ERROR("IMPLEMENTATION ERROR: " ++ "non-drawing-command %d\n", ++ cmd_header.cmd.cmd); ++ return -EINVAL; ++ } ++ ++ if (ret != 0) ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *dmabuf; ++ drm_savage_cmdbuf_t *cmdbuf = data; ++ drm_savage_cmd_header_t *kcmd_addr = NULL; ++ drm_savage_cmd_header_t *first_draw_cmd; ++ unsigned int *kvb_addr = NULL; ++ struct drm_clip_rect *kbox_addr = NULL; ++ unsigned int i, j; ++ int ret = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dma && dma->buflist) { ++ if (cmdbuf->dma_idx > dma->buf_count) { ++ DRM_ERROR ++ ("vertex buffer index %u out of range (0-%u)\n", ++ cmdbuf->dma_idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ dmabuf = dma->buflist[cmdbuf->dma_idx]; ++ } else { ++ dmabuf = NULL; ++ } ++ ++ /* Copy the user buffers into kernel temporary areas. This hasn't been ++ * a performance loss compared to VERIFYAREA_READ/ ++ * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct ++ * for locking on FreeBSD. ++ */ ++ if (cmdbuf->size) { ++ kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); ++ if (kcmd_addr == NULL) ++ return -ENOMEM; ++ ++ if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, ++ cmdbuf->size * 8)) ++ { ++ drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); ++ return -EFAULT; ++ } ++ cmdbuf->cmd_addr = kcmd_addr; ++ } ++ if (cmdbuf->vb_size) { ++ kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); ++ if (kvb_addr == NULL) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr, ++ cmdbuf->vb_size)) { ++ ret = -EFAULT; ++ goto done; ++ } ++ cmdbuf->vb_addr = kvb_addr; ++ } ++ if (cmdbuf->nbox) { ++ kbox_addr = drm_alloc(cmdbuf->nbox * ++ sizeof(struct drm_clip_rect), ++ DRM_MEM_DRIVER); ++ if (kbox_addr == NULL) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, ++ cmdbuf->nbox * ++ sizeof(struct drm_clip_rect))) { ++ ret = -EFAULT; ++ goto done; ++ } ++ cmdbuf->box_addr = kbox_addr; ++ } ++ ++ /* Make sure writes to DMA buffers are finished before sending ++ * DMA commands to the graphics hardware. */ ++ DRM_MEMORYBARRIER(); ++ ++ /* Coming from user space. Don't know if the Xserver has ++ * emitted wait commands. Assuming the worst. */ ++ dev_priv->waiting = 1; ++ ++ i = 0; ++ first_draw_cmd = NULL; ++ while (i < cmdbuf->size) { ++ drm_savage_cmd_header_t cmd_header; ++ cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr; ++ cmdbuf->cmd_addr++; ++ i++; ++ ++ /* Group drawing commands with same state to minimize ++ * iterations over clip rects. */ ++ j = 0; ++ switch (cmd_header.cmd.cmd) { ++ case SAVAGE_CMD_DMA_IDX: ++ case SAVAGE_CMD_VB_IDX: ++ j = (cmd_header.idx.count + 3) / 4; ++ if (i + j > cmdbuf->size) { ++ DRM_ERROR("indexed drawing command extends " ++ "beyond end of command buffer\n"); ++ DMA_FLUSH(); ++ return -EINVAL; ++ } ++ /* fall through */ ++ case SAVAGE_CMD_DMA_PRIM: ++ case SAVAGE_CMD_VB_PRIM: ++ if (!first_draw_cmd) ++ first_draw_cmd = cmdbuf->cmd_addr - 1; ++ cmdbuf->cmd_addr += j; ++ i += j; ++ break; ++ default: ++ if (first_draw_cmd) { ++ ret = savage_dispatch_draw( ++ dev_priv, first_draw_cmd, ++ cmdbuf->cmd_addr - 1, ++ dmabuf, cmdbuf->vb_addr, ++ cmdbuf->vb_size, ++ cmdbuf->vb_stride, ++ cmdbuf->nbox, cmdbuf->box_addr); ++ if (ret != 0) ++ return ret; ++ first_draw_cmd = NULL; ++ } ++ } ++ if (first_draw_cmd) ++ continue; ++ ++ switch (cmd_header.cmd.cmd) { ++ case SAVAGE_CMD_STATE: ++ j = (cmd_header.state.count + 1) / 2; ++ if (i + j > cmdbuf->size) { ++ DRM_ERROR("command SAVAGE_CMD_STATE extends " ++ "beyond end of command buffer\n"); ++ DMA_FLUSH(); ++ ret = -EINVAL; ++ goto done; ++ } ++ ret = savage_dispatch_state(dev_priv, &cmd_header, ++ (const uint32_t *)cmdbuf->cmd_addr); ++ cmdbuf->cmd_addr += j; ++ i += j; ++ break; ++ case SAVAGE_CMD_CLEAR: ++ if (i + 1 > cmdbuf->size) { ++ DRM_ERROR("command SAVAGE_CMD_CLEAR extends " ++ "beyond end of command buffer\n"); ++ DMA_FLUSH(); ++ ret = -EINVAL; ++ goto done; ++ } ++ ret = savage_dispatch_clear(dev_priv, &cmd_header, ++ cmdbuf->cmd_addr, ++ cmdbuf->nbox, ++ cmdbuf->box_addr); ++ cmdbuf->cmd_addr++; ++ i++; ++ break; ++ case SAVAGE_CMD_SWAP: ++ ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox, ++ cmdbuf->box_addr); ++ break; ++ default: ++ DRM_ERROR("invalid command 0x%x\n", ++ cmd_header.cmd.cmd); ++ DMA_FLUSH(); ++ ret = -EINVAL; ++ goto done; ++ } ++ ++ if (ret != 0) { ++ DMA_FLUSH(); ++ goto done; ++ } ++ } ++ ++ if (first_draw_cmd) { ++ ret = savage_dispatch_draw( ++ dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, ++ cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, ++ cmdbuf->nbox, cmdbuf->box_addr); ++ if (ret != 0) { ++ DMA_FLUSH(); ++ goto done; ++ } ++ } ++ ++ DMA_FLUSH(); ++ ++ if (dmabuf && cmdbuf->discard) { ++ drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; ++ uint16_t event; ++ event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); ++ SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); ++ savage_freelist_put(dev, dmabuf); ++ } ++ ++done: ++ /* If we didn't need to allocate them, these'll be NULL */ ++ drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); ++ drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); ++ drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), ++ DRM_MEM_DRIVER); ++ ++ return ret; ++} +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_drm.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_drm.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_drm.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,67 @@ ++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */ ++/* ++ * Copyright 2005 Eric Anholt ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ * SOFTWARE. ++ * ++ */ ++ ++#ifndef __SIS_DRM_H__ ++#define __SIS_DRM_H__ ++ ++/* SiS specific ioctls */ ++#define NOT_USED_0_3 ++#define DRM_SIS_FB_ALLOC 0x04 ++#define DRM_SIS_FB_FREE 0x05 ++#define NOT_USED_6_12 ++#define DRM_SIS_AGP_INIT 0x13 ++#define DRM_SIS_AGP_ALLOC 0x14 ++#define DRM_SIS_AGP_FREE 0x15 ++#define DRM_SIS_FB_INIT 0x16 ++ ++#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t) ++#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t) ++/* ++#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t) ++#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49) ++#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50) ++*/ ++ ++typedef struct { ++ int context; ++ unsigned int offset; ++ unsigned int size; ++ unsigned long free; ++} drm_sis_mem_t; ++ ++typedef struct { ++ unsigned int offset, size; ++} drm_sis_agp_t; ++ ++typedef struct { ++ unsigned int offset, size; ++} drm_sis_fb_t; ++ ++#endif /* __SIS_DRM_H__ */ +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_drv.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_drv.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_drv.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,127 @@ ++/* sis.c -- sis driver -*- linux-c -*- ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "sis_drm.h" ++#include "sis_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ sis_PCI_IDS ++}; ++ ++ ++static int sis_driver_load(struct drm_device *dev, unsigned long chipset) ++{ ++ drm_sis_private_t *dev_priv; ++ int ret; ++ ++ dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ dev->dev_private = (void *)dev_priv; ++ dev_priv->chipset = chipset; ++ ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); ++ if (ret) { ++ drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); ++ } ++ ++ return ret; ++} ++ ++static int sis_driver_unload(struct drm_device *dev) ++{ ++ drm_sis_private_t *dev_priv = dev->dev_private; ++ ++ drm_sman_takedown(&dev_priv->sman); ++ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); ++ ++ return 0; ++} ++ ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, ++ .load = sis_driver_load, ++ .unload = sis_driver_unload, ++ .context_dtor = NULL, ++ .dma_quiescent = sis_idle, ++ .reclaim_buffers = NULL, ++ .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, ++ .lastclose = sis_lastclose, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = sis_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init sis_init(void) ++{ ++ driver.num_ioctls = sis_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit sis_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(sis_init); ++module_exit(sis_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_drv.h kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_drv.h +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_drv.h 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,90 @@ ++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef _SIS_DRV_H_ ++#define _SIS_DRV_H_ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "SIS, Tungsten Graphics" ++#define DRIVER_NAME "sis" ++#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8" ++#define DRIVER_DATE "20070626" ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 3 ++#define DRIVER_PATCHLEVEL 0 ++ ++enum sis_family { ++ SIS_OTHER = 0, ++ SIS_CHIP_315 = 1, ++}; ++ ++#if defined(__linux__) ++#define SIS_HAVE_CORE_MM ++#endif ++ ++#ifdef SIS_HAVE_CORE_MM ++#include "drm_sman.h" ++ ++#define SIS_BASE (dev_priv->mmio) ++#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg); ++#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val); ++ ++typedef struct drm_sis_private { ++ drm_local_map_t *mmio; ++ unsigned int idle_fault; ++ struct drm_sman sman; ++ unsigned int chipset; ++ int vram_initialized; ++ int agp_initialized; ++ unsigned long vram_offset; ++ unsigned long agp_offset; ++} drm_sis_private_t; ++ ++extern int sis_idle(struct drm_device *dev); ++extern void sis_reclaim_buffers_locked(struct drm_device *dev, ++ struct drm_file *file_priv); ++extern void sis_lastclose(struct drm_device *dev); ++ ++#else ++#include "sis_ds.h" ++ ++typedef struct drm_sis_private { ++ memHeap_t *AGPHeap; ++ memHeap_t *FBHeap; ++} drm_sis_private_t; ++ ++extern int sis_init_context(struct drm_device * dev, int context); ++extern int sis_final_context(struct drm_device * dev, int context); ++ ++#endif ++ ++extern struct drm_ioctl_desc sis_ioctls[]; ++extern int sis_max_ioctl; ++ ++#endif +diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_mm.c kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_mm.c +--- linux-omap-2.6.28-omap1/drivers/gpu/drm-tungsten/sis_mm.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20093908+0m5/drivers/gpu/drm-tungsten/sis_mm.c 2011-09-04 11:31:05.000000000 +0200 +@@ -0,0 +1,332 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++ ++/* ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "sis_drm.h" ++#include "sis_drv.h" ++ ++#if defined(__linux__) ++#include - -+ -+ Powersave support -+!Pinclude/net/mac80211.h Powersave support -+ -+ -+ -+ Beacon filter support -+!Pinclude/net/mac80211.h Beacon filter support -+!Finclude/net/mac80211.h ieee80211_beacon_loss -+ -+ - - Multiple queues and QoS support - TBD -diff -Nurp linux-omap-2.6.28-omap1/Documentation/filesystems/ubifs.txt linux-omap-2.6.28-nokia1/Documentation/filesystems/ubifs.txt ---- linux-omap-2.6.28-omap1/Documentation/filesystems/ubifs.txt 2011-06-22 13:14:12.863067823 +0200 -+++ linux-omap-2.6.28-nokia1/Documentation/filesystems/ubifs.txt 2011-06-22 13:19:32.073063285 +0200 -@@ -79,13 +79,6 @@ Mount options - - (*) == default. - --norm_unmount (*) commit on unmount; the journal is committed -- when the file-system is unmounted so that the -- next mount does not have to replay the journal -- and it becomes very fast; --fast_unmount do not commit on unmount; this option makes -- unmount faster, but the next mount slower -- because of the need to replay the journal. - bulk_read read more in one go to take advantage of flash - media that read faster sequentially - no_bulk_read (*) do not bulk-read -@@ -95,6 +88,9 @@ no_chk_data_crc skip checking of CRCs o - of this option is that corruption of the contents - of a file can go unnoticed. - chk_data_crc (*) do not skip checking CRCs on data nodes -+compr=none override default compressor and set it to "none" -+compr=lzo override default compressor and set it to "lzo" -+compr=zlib override default compressor and set it to "zlib" - - - Quick usage instructions -diff -Nurp linux-omap-2.6.28-omap1/Documentation/filesystems/vfat.txt linux-omap-2.6.28-nokia1/Documentation/filesystems/vfat.txt ---- linux-omap-2.6.28-omap1/Documentation/filesystems/vfat.txt 2011-06-22 13:14:12.863067823 +0200 -+++ linux-omap-2.6.28-nokia1/Documentation/filesystems/vfat.txt 2011-06-22 13:19:32.073063285 +0200 -@@ -132,6 +132,11 @@ rodir -- FAT has the ATTR_RO (read - If you want to use ATTR_RO as read-only flag even for - the directory, set this option. - -+errors=panic|continue|remount-ro -+ -- specify FAT behavior on critical errors: panic, continue -+ without doing anything or remopunt the partition in -+ read-only mode (default behavior). -+ - : 0,1,yes,no,true,false - - TODO -diff -Nurp linux-omap-2.6.28-omap1/Documentation/tidspbridge/README linux-omap-2.6.28-nokia1/Documentation/tidspbridge/README ---- linux-omap-2.6.28-omap1/Documentation/tidspbridge/README 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/Documentation/tidspbridge/README 2011-06-22 13:19:32.073063285 +0200 -@@ -0,0 +1,70 @@ -+ Linux DSP/BIOS Bridge release -+ -+DSP/BIOS Bridge overview -+======================== -+ -+DSP/BIOS Bridge is designed for platforms that contain a GPP and one or more -+attached DSPs. The GPP is considered the master or "host" processor, and the -+attached DSPs are processing resources that can be utilized by applications -+and drivers running on the GPP. -+ -+The abstraction that DSP/BIOS Bridge supplies, is a direct link between a GPP -+program and a DSP task. This communication link is partitioned into two -+types of sub-links: messaging (short, fixed-length packets) and data -+streaming (multiple, large buffers). Each sub-link operates independently, -+and features in-order delivery of data, meaning that messages are delivered -+in the order they were submitted to the message link, and stream buffers are -+delivered in the order they were submitted to the stream link. -+ -+In addition, a GPP client can specify what inputs and outputs a DSP task -+uses. DSP tasks typically use message objects for passing control and status -+information and stream objects for efficient streaming of real-time data. -+ -+GPP Software Architecture -+========================= -+ -+A GPP application communicates with its associated DSP task running on the -+DSP subsystem using the DSP/BIOS Bridge API. For example, a GPP audio -+application can use the API to pass messages to a DSP task that is managing -+data flowing from analog-to-digital converters (ADCs) to digital-to-analog -+converters (DACs). -+ -+From the perspective of the GPP OS, the DSP is treated as just another -+peripheral device. Most high level GPP OS typically support a device driver -+model, whereby applications can safely access and share a hardware peripheral -+through standard driver interfaces. Therefore, to allow multiple GPP -+applications to share access to the DSP, the GPP side of DSP/BIOS Bridge -+implements a device driver for the DSP. -+ -+Since driver interfaces are not always standard across GPP OS, and to provide -+some level of interoperability of application code using DSP/BIOS Bridge -+between GPP OS, DSP/BIOS Bridge provides a standard library of APIs which -+wrap calls into the device driver. So, rather than calling GPP OS specific -+driver interfaces, applications (and even other device drivers) can use the -+standard API library directly. -+ -+DSP Software Architecture -+========================= -+ -+For DSP/BIOS, DSP/BIOS Bridge adds a device-independent streaming I/O (STRM) -+interface, a messaging interface (NODE), and a Resource Manager (RM) Server. -+The RM Server runs as a task of DSP/BIOS and is subservient to commands -+and queries from the GPP. It executes commands to start and stop DSP signal -+processing nodes in response to GPP programs making requests through the -+(GPP-side) API. -+ -+DSP tasks started by the RM Server are similar to any other DSP task with two -+important differences: they must follow a specific task model consisting of -+three C-callable functions (node create, execute, and delete), with specific -+sets of arguments, and they have a pre-defined task environment established -+by the RM Server. -+ -+Tasks started by the RM Server communicate using the STRM and NODE interfaces -+and act as servers for their corresponding GPP clients, performing signal -+processing functions as requested by messages sent by their GPP client. -+Typically, a DSP task moves data from source devices to sink devices using -+device independent I/O streams, performing application-specific processing -+and transformations on the data while it is moved. For example, an audio -+task might perform audio decompression (ADPCM, MPEG, CELP) on data received -+from a GPP audio driver and then send the decompressed linear samples to a -+digital-to-analog converter. -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/bcm203x.c linux-omap-2.6.28-nokia1/drivers/bluetooth/bcm203x.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/bcm203x.c 2011-06-22 13:14:17.273067761 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/bcm203x.c 2011-06-22 13:19:32.513063279 +0200 -@@ -37,11 +37,6 @@ - - #include - --#ifndef CONFIG_BT_HCIBCM203X_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "1.2" - - static struct usb_device_id bcm203x_table[] = { -@@ -199,7 +194,7 @@ static int bcm203x_probe(struct usb_inte - return -EIO; - } - -- BT_DBG("minidrv data %p size %d", firmware->data, firmware->size); -+ BT_DBG("minidrv data %p size %zu", firmware->data, firmware->size); - - size = max_t(uint, firmware->size, 4096); - -@@ -227,7 +222,7 @@ static int bcm203x_probe(struct usb_inte - return -EIO; - } - -- BT_DBG("firmware data %p size %d", firmware->data, firmware->size); -+ BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); - - data->fw_data = kmalloc(firmware->size, GFP_KERNEL); - if (!data->fw_data) { -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/bfusb.c linux-omap-2.6.28-nokia1/drivers/bluetooth/bfusb.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/bfusb.c 2011-06-22 13:14:17.273067761 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/bfusb.c 2011-06-22 13:19:32.513063279 +0200 -@@ -38,11 +38,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCIBFUSB_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "1.2" - - static struct usb_driver bfusb_driver; -@@ -221,7 +216,7 @@ static int bfusb_rx_submit(struct bfusb_ - struct sk_buff *skb; - int err, pipe, size = HCI_MAX_FRAME_SIZE + 32; - -- BT_DBG("bfusb %p urb %p", bfusb, urb); -+ BT_DBG("bfusb %p urb %p", data, urb); - - if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC))) - return -ENOMEM; -@@ -262,8 +257,7 @@ static inline int bfusb_recv_block(struc - - if (hdr & 0x10) { - BT_ERR("%s error in block", data->hdev->name); -- if (data->reassembly) -- kfree_skb(data->reassembly); -+ kfree_skb(data->reassembly); - data->reassembly = NULL; - return -EIO; - } -@@ -354,7 +348,7 @@ static void bfusb_rx_complete(struct urb - int count = urb->actual_length; - int err, hdr, len; - -- BT_DBG("bfusb %p urb %p skb %p len %d", bfusb, urb, skb, skb->len); -+ BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); - - read_lock(&data->lock); - -@@ -691,7 +685,7 @@ static int bfusb_probe(struct usb_interf - goto error; - } - -- BT_DBG("firmware data %p size %d", firmware->data, firmware->size); -+ BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); - - if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) { - BT_ERR("Firmware loading failed"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/bpa10x.c linux-omap-2.6.28-nokia1/drivers/bluetooth/bpa10x.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/bpa10x.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/bpa10x.c 2011-06-22 13:19:32.513063279 +0200 -@@ -35,11 +35,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCIBPA10X_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "0.10" - - static struct usb_device_id bpa10x_table[] = { -@@ -489,6 +484,8 @@ static int bpa10x_probe(struct usb_inter - - hdev->owner = THIS_MODULE; - -+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); -+ - err = hci_register_dev(hdev); - if (err < 0) { - hci_free_dev(hdev); -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/btsdio.c linux-omap-2.6.28-nokia1/drivers/bluetooth/btsdio.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/btsdio.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/btsdio.c 2011-06-22 13:19:32.523063279 +0200 -@@ -37,11 +37,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCIBTSDIO_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "0.1" - - static const struct sdio_device_id btsdio_table[] = { -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/btusb.c linux-omap-2.6.28-nokia1/drivers/bluetooth/btusb.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/btusb.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/btusb.c 2011-06-22 13:19:32.523063279 +0200 -@@ -35,31 +35,25 @@ - #include - #include - --//#define CONFIG_BT_HCIBTUSB_DEBUG --#ifndef CONFIG_BT_HCIBTUSB_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- --#define VERSION "0.3" -+#define VERSION "0.4" - - static int ignore_dga; - static int ignore_csr; - static int ignore_sniffer; - static int disable_scofix; - static int force_scofix; --static int reset; -+ -+static int reset = 1; - - static struct usb_driver btusb_driver; - - #define BTUSB_IGNORE 0x01 --#define BTUSB_RESET 0x02 --#define BTUSB_DIGIANSWER 0x04 --#define BTUSB_CSR 0x08 --#define BTUSB_SNIFFER 0x10 --#define BTUSB_BCM92035 0x20 --#define BTUSB_BROKEN_ISOC 0x40 --#define BTUSB_WRONG_SCO_MTU 0x80 -+#define BTUSB_DIGIANSWER 0x02 -+#define BTUSB_CSR 0x04 -+#define BTUSB_SNIFFER 0x08 -+#define BTUSB_BCM92035 0x10 -+#define BTUSB_BROKEN_ISOC 0x20 -+#define BTUSB_WRONG_SCO_MTU 0x40 - - static struct usb_device_id btusb_table[] = { - /* Generic Bluetooth USB device */ -@@ -79,7 +73,7 @@ static struct usb_device_id btusb_table[ - { USB_DEVICE(0x0bdb, 0x1002) }, - - /* Canyon CN-BTU1 with HID interfaces */ -- { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_RESET }, -+ { USB_DEVICE(0x0c10, 0x0000) }, - - { } /* Terminating entry */ - }; -@@ -94,52 +88,36 @@ static struct usb_device_id blacklist_ta - { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, - - /* Broadcom BCM2035 */ -- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, - - /* Broadcom BCM2045 */ -- { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- -- /* Broadcom BCM2046 */ -- { USB_DEVICE(0x0a5c, 0x2146), .driver_info = BTUSB_RESET }, -- { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET }, -- -- /* Apple MacBook Pro with Broadcom chip */ -- { USB_DEVICE(0x05ac, 0x820f), .driver_info = BTUSB_RESET }, -+ { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_WRONG_SCO_MTU }, - - /* IBM/Lenovo ThinkPad with Broadcom chip */ -- { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- -- /* Targus ACB10US */ -- { USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET }, -- { USB_DEVICE(0x0a5c, 0x2154), .driver_info = BTUSB_RESET }, -- -- /* ANYCOM Bluetooth USB-200 and USB-250 */ -- { USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET }, -+ { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_WRONG_SCO_MTU }, - - /* HP laptop with Broadcom chip */ -- { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_WRONG_SCO_MTU }, - - /* Dell laptop with Broadcom chip */ -- { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_WRONG_SCO_MTU }, - - /* Dell Wireless 370 */ -- { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_WRONG_SCO_MTU }, - - /* Dell Wireless 410 */ -- { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- -- /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ -- { USB_DEVICE(0x045e, 0x009c), .driver_info = BTUSB_RESET }, -+ { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_WRONG_SCO_MTU }, - - /* Kensington Bluetooth USB adapter */ -- { USB_DEVICE(0x047d, 0x105d), .driver_info = BTUSB_RESET }, -- { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_WRONG_SCO_MTU }, - -- /* ISSC Bluetooth Adapter v3.1 */ -- { USB_DEVICE(0x1131, 0x1001), .driver_info = BTUSB_RESET }, -+ /* Belkin F8T012 and F8T013 devices */ -+ { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_WRONG_SCO_MTU }, -+ { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_WRONG_SCO_MTU }, - - /* RTX Telecom based adapters with buggy SCO support */ - { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC }, -@@ -148,13 +126,6 @@ static struct usb_device_id blacklist_ta - /* CONWISE Technology based adapters with buggy SCO support */ - { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC }, - -- /* Belkin F8T012 and F8T013 devices */ -- { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, -- -- /* Belkin F8T016 device */ -- { USB_DEVICE(0x050d, 0x016a), .driver_info = BTUSB_RESET }, -- - /* Digianswer devices */ - { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, - { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, -@@ -197,6 +168,8 @@ struct btusb_data { - struct usb_endpoint_descriptor *isoc_tx_ep; - struct usb_endpoint_descriptor *isoc_rx_ep; - -+ __u8 cmdreq_type; -+ - int isoc_altsetting; - }; - -@@ -589,7 +562,7 @@ static int btusb_send_frame(struct sk_bu - return -ENOMEM; - } - -- dr->bRequestType = USB_TYPE_CLASS; -+ dr->bRequestType = data->cmdreq_type; - dr->bRequest = 0; - dr->wIndex = 0; - dr->wValue = 0; -@@ -828,6 +801,8 @@ static int btusb_probe(struct usb_interf - return -ENODEV; - } - -+ data->cmdreq_type = USB_TYPE_CLASS; -+ - data->udev = interface_to_usbdev(intf); - data->intf = intf; - -@@ -862,11 +837,11 @@ static int btusb_probe(struct usb_interf - - hdev->owner = THIS_MODULE; - -- /* interface numbers are hardcoded in the spec */ -+ /* Interface numbers are hardcoded in the specification */ - data->isoc = usb_ifnum_to_if(data->udev, 1); - -- if (reset || id->driver_info & BTUSB_RESET) -- set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); -+ if (!reset) -+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); - - if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { - if (!disable_scofix) -@@ -876,9 +851,23 @@ static int btusb_probe(struct usb_interf - if (id->driver_info & BTUSB_BROKEN_ISOC) - data->isoc = NULL; - -+ if (id->driver_info & BTUSB_DIGIANSWER) { -+ data->cmdreq_type = USB_TYPE_VENDOR; -+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); -+ } -+ -+ if (id->driver_info & BTUSB_CSR) { -+ struct usb_device *udev = data->udev; -+ -+ /* Old firmware would otherwise execute USB reset */ -+ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) -+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); -+ } -+ - if (id->driver_info & BTUSB_SNIFFER) { - struct usb_device *udev = data->udev; - -+ /* New sniffer firmware has crippled HCI interface */ - if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) - set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); - -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_bcsp.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_bcsp.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_bcsp.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_bcsp.c 2011-06-22 13:19:32.523063279 +0200 -@@ -47,11 +47,6 @@ - - #include "hci_uart.h" - --#ifndef CONFIG_BT_HCIUART_DEBUG --#undef BT_DBG --#define BT_DBG( A... ) --#endif -- - #define VERSION "0.3" - - static int txcrc = 1; -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4.c 2011-06-22 13:19:32.523063279 +0200 -@@ -46,11 +46,6 @@ - - #include "hci_uart.h" - --#ifndef CONFIG_BT_HCIUART_DEBUG --#undef BT_DBG --#define BT_DBG( A... ) --#endif -- - #define VERSION "1.2" - - struct h4_struct { -@@ -107,8 +102,7 @@ static int h4_close(struct hci_uart *hu) - - skb_queue_purge(&h4->txq); - -- if (h4->rx_skb) -- kfree_skb(h4->rx_skb); -+ kfree_skb(h4->rx_skb); - - hu->priv = NULL; - kfree(h4); -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/core.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/core.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/core.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/core.c 2011-06-22 13:19:32.523063279 +0200 -@@ -1,7 +1,7 @@ - /* - * This file is part of hci_h4p bluetooth driver - * -- * Copyright (C) 2005, 2006 Nokia Corporation. -+ * Copyright (C) 2005-2008 Nokia Corporation. - * - * Contact: Ville Tervo - * -@@ -22,7 +22,6 @@ - */ - - #include -- - #include - #include - #include -@@ -30,14 +29,14 @@ - #include - #include - #include --#include - #include - #include - #include -+#include - #include -+#include - - #include --#include - #include - #include - -@@ -47,8 +46,6 @@ - - #include "hci_h4p.h" - --#define PM_TIMEOUT 200 -- - /* This should be used in function that cannot release clocks */ - static void hci_h4p_set_clk(struct hci_h4p_info *info, int *clock, int enable) - { -@@ -58,21 +55,23 @@ static void hci_h4p_set_clk(struct hci_h - if (enable && !*clock) { - NBT_DBG_POWER("Enabling %p\n", clock); - clk_enable(info->uart_fclk); --#ifdef CONFIG_ARCH_OMAP2 -- if (cpu_is_omap24xx()) { -+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) -+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) - clk_enable(info->uart_iclk); -- omap2_block_sleep(); -- } - #endif -+ if (atomic_read(&info->clk_users) == 0) -+ hci_h4p_restore_regs(info); -+ atomic_inc(&info->clk_users); - } -+ - if (!enable && *clock) { - NBT_DBG_POWER("Disabling %p\n", clock); -+ if (atomic_dec_and_test(&info->clk_users)) -+ hci_h4p_store_regs(info); - clk_disable(info->uart_fclk); --#ifdef CONFIG_ARCH_OMAP2 -- if (cpu_is_omap24xx()) { -+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) -+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) - clk_disable(info->uart_iclk); -- omap2_allow_sleep(); -- } - #endif - } - -@@ -80,48 +79,70 @@ static void hci_h4p_set_clk(struct hci_h - spin_unlock_irqrestore(&info->clocks_lock, flags); - } - -+static void hci_h4p_lazy_clock_release(unsigned long data) -+{ -+ struct hci_h4p_info *info = (struct hci_h4p_info *)data; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&info->lock, flags); -+ if (!info->tx_enabled) -+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0); -+ spin_unlock_irqrestore(&info->lock, flags); -+} -+ - /* Power management functions */ --static void hci_h4p_disable_tx(struct hci_h4p_info *info) -+void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable) - { -- NBT_DBG_POWER("\n"); -+ u8 v; - -- if (!info->pm_enabled) -- return; -+ v = hci_h4p_inb(info, UART_OMAP_SYSC); -+ v &= ~(UART_OMAP_SYSC_IDLEMASK); -+ -+ if (enable) -+ v |= UART_OMAP_SYSC_SMART_IDLE; -+ else -+ v |= UART_OMAP_SYSC_NO_IDLE; - -- mod_timer(&info->tx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); -+ hci_h4p_outb(info, UART_OMAP_SYSC, v); - } - --static void hci_h4p_enable_tx(struct hci_h4p_info *info) -+static void hci_h4p_disable_tx(struct hci_h4p_info *info) - { - NBT_DBG_POWER("\n"); - - if (!info->pm_enabled) - return; - -- del_timer_sync(&info->tx_pm_timer); -- if (info->tx_pm_enabled) { -- info->tx_pm_enabled = 0; -- hci_h4p_set_clk(info, &info->tx_clocks_en, 1); -- gpio_set_value(info->bt_wakeup_gpio, 1); -- } -+ /* Re-enable smart-idle */ -+ hci_h4p_smart_idle(info, 1); -+ -+ gpio_set_value(info->bt_wakeup_gpio, 0); -+ mod_timer(&info->lazy_release, jiffies + msecs_to_jiffies(100)); -+ info->tx_enabled = 0; - } - --static void hci_h4p_tx_pm_timer(unsigned long data) -+void hci_h4p_enable_tx(struct hci_h4p_info *info) - { -- struct hci_h4p_info *info; -- -+ unsigned long flags; - NBT_DBG_POWER("\n"); - -- info = (struct hci_h4p_info *)data; -+ if (!info->pm_enabled) -+ return; - -- if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { -- gpio_set_value(info->bt_wakeup_gpio, 0); -- hci_h4p_set_clk(info, &info->tx_clocks_en, 0); -- info->tx_pm_enabled = 1; -- } -- else { -- mod_timer(&info->tx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); -- } -+ spin_lock_irqsave(&info->lock, flags); -+ del_timer(&info->lazy_release); -+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1); -+ info->tx_enabled = 1; -+ gpio_set_value(info->bt_wakeup_gpio, 1); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ /* -+ * Disable smart-idle as UART TX interrupts -+ * are not wake-up capable -+ */ -+ hci_h4p_smart_idle(info, 0); -+ -+ spin_unlock_irqrestore(&info->lock, flags); - } - - static void hci_h4p_disable_rx(struct hci_h4p_info *info) -@@ -129,49 +150,39 @@ static void hci_h4p_disable_rx(struct hc - if (!info->pm_enabled) - return; - -- mod_timer(&info->rx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); -+ info->rx_enabled = 0; -+ -+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) -+ return; -+ -+ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) -+ return; -+ -+ __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); -+ info->autorts = 0; -+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0); - } - - static void hci_h4p_enable_rx(struct hci_h4p_info *info) - { -- unsigned long flags; -- - if (!info->pm_enabled) - return; - -- del_timer_sync(&info->rx_pm_timer); -- spin_lock_irqsave(&info->lock, flags); -- if (info->rx_pm_enabled) { -- hci_h4p_set_clk(info, &info->rx_clocks_en, 1); -- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | UART_IER_RDI); -- __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); -- info->rx_pm_enabled = 0; -- } -- spin_unlock_irqrestore(&info->lock, flags); --} -+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1); -+ info->rx_enabled = 1; - --static void hci_h4p_rx_pm_timer(unsigned long data) --{ -- unsigned long flags; -- struct hci_h4p_info *info = (struct hci_h4p_info *)data; -+ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) -+ return; - -- spin_lock_irqsave(&info->lock, flags); -- if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_DR)) { -- __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); -- hci_h4p_set_rts(info, 0); -- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) & ~UART_IER_RDI); -- hci_h4p_set_clk(info, &info->rx_clocks_en, 0); -- info->rx_pm_enabled = 1; -- } -- else { -- mod_timer(&info->rx_pm_timer, jiffies + msecs_to_jiffies(PM_TIMEOUT)); -- } -- spin_unlock_irqrestore(&info->lock, flags); -+ __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); -+ info->autorts = 1; - } - - /* Negotiation functions */ - int hci_h4p_send_alive_packet(struct hci_h4p_info *info) - { -+ unsigned long flags; -+ - NBT_DBG("Sending alive packet\n"); - - if (!info->alive_cmd_skb) -@@ -181,7 +192,10 @@ int hci_h4p_send_alive_packet(struct hci - info->alive_cmd_skb = skb_get(info->alive_cmd_skb); - - skb_queue_tail(&info->txq, info->alive_cmd_skb); -- tasklet_schedule(&info->tx_task); -+ spin_lock_irqsave(&info->lock, flags); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ spin_unlock_irqrestore(&info->lock, flags); - - NBT_DBG("Alive packet sent\n"); - -@@ -191,67 +205,75 @@ int hci_h4p_send_alive_packet(struct hci - static void hci_h4p_alive_packet(struct hci_h4p_info *info, struct sk_buff *skb) - { - NBT_DBG("Received alive packet\n"); -- if (skb->data[1] == 0xCC) { -- complete(&info->init_completion); -+ if (skb->data[1] != 0xCC) { -+ dev_err(info->dev, "Could not negotiate hci_h4p settings\n"); -+ info->init_error = -EINVAL; - } - -+ complete(&info->init_completion); - kfree_skb(skb); - } - --static int hci_h4p_send_negotiation(struct hci_h4p_info *info, struct sk_buff *skb) -+static int hci_h4p_send_negotiation(struct hci_h4p_info *info, -+ struct sk_buff *skb) - { -+ unsigned long flags; -+ int err; - NBT_DBG("Sending negotiation..\n"); - - hci_h4p_change_speed(info, INIT_SPEED); - -+ hci_h4p_set_rts(info, 1); - info->init_error = 0; - init_completion(&info->init_completion); - skb_queue_tail(&info->txq, skb); -- tasklet_schedule(&info->tx_task); -+ spin_lock_irqsave(&info->lock, flags); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ spin_unlock_irqrestore(&info->lock, flags); - - if (!wait_for_completion_interruptible_timeout(&info->init_completion, - msecs_to_jiffies(1000))) - return -ETIMEDOUT; - -- NBT_DBG("Negotiation sent\n"); -- return info->init_error; --} -+ if (info->init_error < 0) -+ return info->init_error; - --static void hci_h4p_negotiation_packet(struct hci_h4p_info *info, -- struct sk_buff *skb) --{ -- int err = 0; -+ /* Change to operational settings */ -+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); -+ hci_h4p_set_rts(info, 0); -+ hci_h4p_change_speed(info, MAX_BAUD_RATE); - -- if (skb->data[1] == 0x20) { -- /* Change to operational settings */ -- hci_h4p_set_rts(info, 0); -+ err = hci_h4p_wait_for_cts(info, 1, 100); -+ if (err < 0) -+ return err; - -- err = hci_h4p_wait_for_cts(info, 0, 100); -- if (err < 0) -- goto neg_ret; -+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); -+ init_completion(&info->init_completion); -+ err = hci_h4p_send_alive_packet(info); - -- hci_h4p_change_speed(info, MAX_BAUD_RATE); -+ if (err < 0) -+ return err; - -- err = hci_h4p_wait_for_cts(info, 1, 100); -- if (err < 0) -- goto neg_ret; -+ if (!wait_for_completion_interruptible_timeout(&info->init_completion, -+ msecs_to_jiffies(1000))) -+ return -ETIMEDOUT; - -- hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS); -+ if (info->init_error < 0) -+ return info->init_error; - -- err = hci_h4p_send_alive_packet(info); -- if (err < 0) -- goto neg_ret; -- } else { -+ NBT_DBG("Negotiation succesful\n"); -+ return 0; -+} -+ -+static void hci_h4p_negotiation_packet(struct hci_h4p_info *info, -+ struct sk_buff *skb) -+{ -+ if (skb->data[1] != 0x20) { - dev_err(info->dev, "Could not negotiate hci_h4p settings\n"); -- err = -EINVAL; -- goto neg_ret; -+ info->init_error = -EINVAL; - } - -- kfree_skb(skb); -- return; -- --neg_ret: -- info->init_error = err; - complete(&info->init_completion); - kfree_skb(skb); - } -@@ -272,11 +294,14 @@ static int hci_h4p_get_hdr_len(struct hc - retval = HCI_SCO_HDR_SIZE; - break; - case H4_NEG_PKT: -- retval = 11; -+ retval = 13; - break; - case H4_ALIVE_PKT: - retval = 3; - break; -+ case H4_RADIO_PKT: -+ retval = H4_RADIO_HDR_SIZE; -+ break; - default: - dev_err(info->dev, "Unknown H4 packet type 0x%.2x\n", pkt_type); - retval = -1; -@@ -293,6 +318,7 @@ static unsigned int hci_h4p_get_data_len - struct hci_event_hdr *evt_hdr; - struct hci_acl_hdr *acl_hdr; - struct hci_sco_hdr *sco_hdr; -+ struct hci_h4p_radio_hdr *radio_hdr; - - switch (bt_cb(skb)->pkt_type) { - case H4_EVT_PKT: -@@ -307,9 +333,11 @@ static unsigned int hci_h4p_get_data_len - sco_hdr = (struct hci_sco_hdr *)skb->data; - retval = sco_hdr->dlen; - break; -- case H4_NEG_PKT: -- retval = 0; -+ case H4_RADIO_PKT: -+ radio_hdr = (struct hci_h4p_radio_hdr *)skb->data; -+ retval = radio_hdr->dlen; - break; -+ case H4_NEG_PKT: - case H4_ALIVE_PKT: - retval = 0; - break; -@@ -331,10 +359,72 @@ static inline void hci_h4p_recv_frame(st - } - } - -+static inline void hci_h4p_handle_byte(struct hci_h4p_info *info, u8 byte) -+{ -+ switch (info->rx_state) { -+ case WAIT_FOR_PKT_TYPE: -+ bt_cb(info->rx_skb)->pkt_type = byte; -+ info->rx_count = hci_h4p_get_hdr_len(info, byte); -+ if (info->rx_count < 0) { -+ info->hdev->stat.err_rx++; -+ kfree_skb(info->rx_skb); -+ info->rx_skb = NULL; -+ } else { -+ info->rx_state = WAIT_FOR_HEADER; -+ } -+ break; -+ case WAIT_FOR_HEADER: -+ info->rx_count--; -+ *skb_put(info->rx_skb, 1) = byte; -+ if (info->rx_count == 0) { -+ info->rx_count = hci_h4p_get_data_len(info, -+ info->rx_skb); -+ if (info->rx_count > skb_tailroom(info->rx_skb)) { -+ dev_err(info->dev, "Too long frame.\n"); -+ info->garbage_bytes = info->rx_count - -+ skb_tailroom(info->rx_skb); -+ kfree_skb(info->rx_skb); -+ info->rx_skb = NULL; -+ break; -+ } -+ info->rx_state = WAIT_FOR_DATA; -+ -+ if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) { -+ hci_h4p_negotiation_packet(info, info->rx_skb); -+ info->rx_skb = NULL; -+ info->rx_state = WAIT_FOR_PKT_TYPE; -+ return; -+ } -+ if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) { -+ hci_h4p_alive_packet(info, info->rx_skb); -+ info->rx_skb = NULL; -+ info->rx_state = WAIT_FOR_PKT_TYPE; -+ return; -+ } -+ } -+ break; -+ case WAIT_FOR_DATA: -+ info->rx_count--; -+ *skb_put(info->rx_skb, 1) = byte; -+ break; -+ default: -+ WARN_ON(1); -+ break; -+ } -+ -+ if (info->rx_count == 0) { -+ /* H4+ devices should allways send word aligned -+ * packets */ -+ if (!(info->rx_skb->len % 2)) -+ info->garbage_bytes++; -+ hci_h4p_recv_frame(info, info->rx_skb); -+ info->rx_skb = NULL; -+ } -+} -+ - static void hci_h4p_rx_tasklet(unsigned long data) - { - u8 byte; -- unsigned long flags; - struct hci_h4p_info *info = (struct hci_h4p_info *)data; - - NBT_DBG("tasklet woke up\n"); -@@ -347,80 +437,33 @@ static void hci_h4p_rx_tasklet(unsigned - continue; - } - if (info->rx_skb == NULL) { -- info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC | GFP_DMA); -+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, -+ GFP_ATOMIC | GFP_DMA); - if (!info->rx_skb) { -- dev_err(info->dev, "Can't allocate memory for new packet\n"); -- goto finish_task; -+ dev_err(info->dev, -+ "No memory for new packet\n"); -+ goto finish_rx; - } - info->rx_state = WAIT_FOR_PKT_TYPE; - info->rx_skb->dev = (void *)info->hdev; - } - info->hdev->stat.byte_rx++; - NBT_DBG_TRANSFER_NF("0x%.2x ", byte); -- switch (info->rx_state) { -- case WAIT_FOR_PKT_TYPE: -- bt_cb(info->rx_skb)->pkt_type = byte; -- info->rx_count = hci_h4p_get_hdr_len(info, byte); -- if (info->rx_count < 0) { -- info->hdev->stat.err_rx++; -- kfree_skb(info->rx_skb); -- info->rx_skb = NULL; -- } else { -- info->rx_state = WAIT_FOR_HEADER; -- } -- break; -- case WAIT_FOR_HEADER: -- info->rx_count--; -- *skb_put(info->rx_skb, 1) = byte; -- if (info->rx_count == 0) { -- info->rx_count = hci_h4p_get_data_len(info, info->rx_skb); -- if (info->rx_count > skb_tailroom(info->rx_skb)) { -- dev_err(info->dev, "Frame is %ld bytes too long.\n", -- info->rx_count - skb_tailroom(info->rx_skb)); -- kfree_skb(info->rx_skb); -- info->rx_skb = NULL; -- info->garbage_bytes = info->rx_count - skb_tailroom(info->rx_skb); -- break; -- } -- info->rx_state = WAIT_FOR_DATA; -- -- if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) { -- hci_h4p_negotiation_packet(info, info->rx_skb); -- info->rx_skb = NULL; -- info->rx_state = WAIT_FOR_PKT_TYPE; -- goto finish_task; -- } -- if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) { -- hci_h4p_alive_packet(info, info->rx_skb); -- info->rx_skb = NULL; -- info->rx_state = WAIT_FOR_PKT_TYPE; -- goto finish_task; -- } -- } -- break; -- case WAIT_FOR_DATA: -- info->rx_count--; -- *skb_put(info->rx_skb, 1) = byte; -- if (info->rx_count == 0) { -- /* H4+ devices should allways send word aligned packets */ -- if (!(info->rx_skb->len % 2)) { -- info->garbage_bytes++; -- } -- hci_h4p_recv_frame(info, info->rx_skb); -- info->rx_skb = NULL; -- } -- break; -- default: -- WARN_ON(1); -- break; -- } -+ hci_h4p_handle_byte(info, byte); - } - --finish_task: -- spin_lock_irqsave(&info->lock, flags); -- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | UART_IER_RDI); -- spin_unlock_irqrestore(&info->lock, flags); -+ if (!info->rx_enabled) { -+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT && -+ info->autorts) { -+ __hci_h4p_set_auto_ctsrts(info, 0 , UART_EFR_RTS); -+ info->autorts = 0; -+ } -+ /* Flush posted write to avoid spurious interrupts */ -+ hci_h4p_inb(info, UART_OMAP_SCR); -+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0); -+ } - -+finish_rx: - NBT_DBG_TRANSFER_NF("\n"); - NBT_DBG("rx_ended\n"); - } -@@ -428,19 +471,48 @@ finish_task: - static void hci_h4p_tx_tasklet(unsigned long data) - { - unsigned int sent = 0; -- unsigned long flags; - struct sk_buff *skb; - struct hci_h4p_info *info = (struct hci_h4p_info *)data; - - NBT_DBG("tasklet woke up\n"); - NBT_DBG_TRANSFER("tx_tasklet woke up\n data "); - -+ if (info->autorts != info->rx_enabled) { -+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { -+ if (info->autorts && !info->rx_enabled) { -+ __hci_h4p_set_auto_ctsrts(info, 0, -+ UART_EFR_RTS); -+ info->autorts = 0; -+ } -+ if (!info->autorts && info->rx_enabled) { -+ __hci_h4p_set_auto_ctsrts(info, 1, -+ UART_EFR_RTS); -+ info->autorts = 1; -+ } -+ } else { -+ hci_h4p_outb(info, UART_OMAP_SCR, -+ hci_h4p_inb(info, UART_OMAP_SCR) | -+ UART_OMAP_SCR_EMPTY_THR); -+ goto finish_tx; -+ } -+ } -+ - skb = skb_dequeue(&info->txq); - if (!skb) { - /* No data in buffer */ - NBT_DBG("skb ready\n"); -- hci_h4p_disable_tx(info); -- return; -+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) { -+ hci_h4p_outb(info, UART_IER, -+ hci_h4p_inb(info, UART_IER) & -+ ~UART_IER_THRI); -+ hci_h4p_inb(info, UART_OMAP_SCR); -+ hci_h4p_disable_tx(info); -+ return; -+ } else -+ hci_h4p_outb(info, UART_OMAP_SCR, -+ hci_h4p_inb(info, UART_OMAP_SCR) | -+ UART_OMAP_SCR_EMPTY_THR); -+ goto finish_tx; - } - - /* Copy data to tx fifo */ -@@ -460,9 +532,15 @@ static void hci_h4p_tx_tasklet(unsigned - skb_queue_head(&info->txq, skb); - } - -- spin_lock_irqsave(&info->lock, flags); -- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | UART_IER_THRI); -- spin_unlock_irqrestore(&info->lock, flags); -+ hci_h4p_outb(info, UART_OMAP_SCR, hci_h4p_inb(info, UART_OMAP_SCR) & -+ ~UART_OMAP_SCR_EMPTY_THR); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ -+finish_tx: -+ /* Flush posted write to avoid spurious interrupts */ -+ hci_h4p_inb(info, UART_OMAP_SCR); -+ - } - - static irqreturn_t hci_h4p_interrupt(int irq, void *data) -@@ -470,13 +548,11 @@ static irqreturn_t hci_h4p_interrupt(int - struct hci_h4p_info *info = (struct hci_h4p_info *)data; - u8 iir, msr; - int ret; -- unsigned long flags; - - ret = IRQ_NONE; - - iir = hci_h4p_inb(info, UART_IIR); - if (iir & UART_IIR_NO_INT) { -- dev_err(info->dev, "Interrupt but no reason irq 0x%.2x\n", iir); - return IRQ_HANDLED; - } - -@@ -495,18 +571,12 @@ static irqreturn_t hci_h4p_interrupt(int - } - - if (iir == UART_IIR_RDI) { -- spin_lock_irqsave(&info->lock, flags); -- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) & ~UART_IER_RDI); -- spin_unlock_irqrestore(&info->lock, flags); -- tasklet_schedule(&info->rx_task); -+ hci_h4p_rx_tasklet((unsigned long)data); - ret = IRQ_HANDLED; - } - - if (iir == UART_IIR_THRI) { -- spin_lock_irqsave(&info->lock, flags); -- hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) & ~UART_IER_THRI); -- spin_unlock_irqrestore(&info->lock, flags); -- tasklet_schedule(&info->tx_task); -+ hci_h4p_tx_tasklet((unsigned long)data); - ret = IRQ_HANDLED; - } - -@@ -529,6 +599,11 @@ static irqreturn_t hci_h4p_wakeup_interr - - should_wakeup = gpio_get_value(info->host_wakeup_gpio); - NBT_DBG_POWER("gpio interrupt %d\n", should_wakeup); -+ -+ /* Check if wee have missed some interrupts */ -+ if (info->rx_enabled == should_wakeup) -+ return IRQ_HANDLED; -+ - if (should_wakeup) { - hci_h4p_enable_rx(info); - } else { -@@ -542,16 +617,20 @@ static int hci_h4p_reset(struct hci_h4p_ - { - int err; - -+ err = hci_h4p_reset_uart(info); -+ if (err < 0) { -+ dev_err(info->dev, "Uart reset failed\n"); -+ return err; -+ } - hci_h4p_init_uart(info); - hci_h4p_set_rts(info, 0); - - gpio_set_value(info->reset_gpio, 0); -- msleep(100); - gpio_set_value(info->bt_wakeup_gpio, 1); -+ msleep(10); - gpio_set_value(info->reset_gpio, 1); -- msleep(100); - -- err = hci_h4p_wait_for_cts(info, 1, 10); -+ err = hci_h4p_wait_for_cts(info, 1, 100); - if (err < 0) { - dev_err(info->dev, "No cts from bt chip\n"); - return err; -@@ -579,6 +658,7 @@ static int hci_h4p_hci_open(struct hci_d - int err; - struct sk_buff *neg_cmd_skb; - struct sk_buff_head fw_queue; -+ unsigned long flags; - - info = hdev->driver_data; - -@@ -602,38 +682,45 @@ static int hci_h4p_hci_open(struct hci_d - goto err_clean; - } - -- hci_h4p_set_clk(info, &info->tx_clocks_en, 1); -- hci_h4p_set_clk(info, &info->rx_clocks_en, 1); -- -- tasklet_enable(&info->tx_task); -- tasklet_enable(&info->rx_task); -+ info->rx_enabled = 1; - info->rx_state = WAIT_FOR_PKT_TYPE; - info->rx_count = 0; - info->garbage_bytes = 0; - info->rx_skb = NULL; - info->pm_enabled = 0; - init_completion(&info->fw_completion); -+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1); -+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1); - - err = hci_h4p_reset(info); - if (err < 0) - goto err_clean; - -+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS); -+ info->autorts = 1; - err = hci_h4p_send_negotiation(info, neg_cmd_skb); - neg_cmd_skb = NULL; - if (err < 0) - goto err_clean; - -+ - err = hci_h4p_send_fw(info, &fw_queue); - if (err < 0) { - dev_err(info->dev, "Sending firmware failed.\n"); - goto err_clean; - } - -+ info->pm_enabled = 1; -+ -+ spin_lock_irqsave(&info->lock, flags); -+ info->rx_enabled = gpio_get_value(info->host_wakeup_gpio); -+ hci_h4p_set_clk(info, &info->rx_clocks_en, info->rx_enabled); -+ spin_unlock_irqrestore(&info->lock, flags); -+ -+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0); -+ - kfree_skb(info->alive_cmd_skb); - info->alive_cmd_skb = NULL; -- info->pm_enabled = 1; -- info->tx_pm_enabled = 1; -- info->rx_pm_enabled = 0; - set_bit(HCI_RUNNING, &hdev->flags); - - NBT_DBG("hci up and running\n"); -@@ -641,9 +728,8 @@ static int hci_h4p_hci_open(struct hci_d - - err_clean: - hci_h4p_hci_flush(hdev); -- tasklet_disable(&info->tx_task); -- tasklet_disable(&info->rx_task); - hci_h4p_reset_uart(info); -+ del_timer_sync(&info->lazy_release); - hci_h4p_set_clk(info, &info->tx_clocks_en, 0); - hci_h4p_set_clk(info, &info->rx_clocks_en, 0); - gpio_set_value(info->reset_gpio, 0); -@@ -666,13 +752,10 @@ static int hci_h4p_hci_close(struct hci_ - return 0; - - hci_h4p_hci_flush(hdev); -- del_timer_sync(&info->tx_pm_timer); -- del_timer_sync(&info->rx_pm_timer); -- tasklet_disable(&info->tx_task); -- tasklet_disable(&info->rx_task); - hci_h4p_set_clk(info, &info->tx_clocks_en, 1); - hci_h4p_set_clk(info, &info->rx_clocks_en, 1); - hci_h4p_reset_uart(info); -+ del_timer_sync(&info->lazy_release); - hci_h4p_set_clk(info, &info->tx_clocks_en, 0); - hci_h4p_set_clk(info, &info->rx_clocks_en, 0); - gpio_set_value(info->reset_gpio, 0); -@@ -723,18 +806,20 @@ static int hci_h4p_hci_send_frame(struct - /* We should allways send word aligned data to h4+ devices */ - if (skb->len % 2) { - err = skb_pad(skb, 1); -+ if (!err) -+ *skb_put(skb, 1) = 0x00; - } - if (err) - return err; - -- hci_h4p_enable_tx(info); - skb_queue_tail(&info->txq, skb); -- tasklet_schedule(&info->tx_task); -+ hci_h4p_enable_tx(info); - - return 0; - } - --static int hci_h4p_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) -+static int hci_h4p_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, -+ unsigned long arg) - { - return -ENOIOCTLCMD; - } -@@ -761,11 +846,12 @@ static int hci_h4p_register_hdev(struct - hdev->send = hci_h4p_hci_send_frame; - hdev->destruct = hci_h4p_hci_destruct; - hdev->ioctl = hci_h4p_hci_ioctl; -+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); - - hdev->owner = THIS_MODULE; - - if (hci_register_dev(hdev) < 0) { -- dev_err(info->dev, "hci_h4p: Can't register HCI device %s.\n", hdev->name); -+ dev_err(info->dev, "hci_register failed %s.\n", hdev->name); - return -ENODEV; - } - -@@ -785,28 +871,19 @@ static int hci_h4p_probe(struct platform - - info->dev = &pdev->dev; - info->pm_enabled = 0; -- info->tx_pm_enabled = 0; -- info->rx_pm_enabled = 0; -+ info->tx_enabled = 1; -+ info->rx_enabled = 1; - info->garbage_bytes = 0; - info->tx_clocks_en = 0; - info->rx_clocks_en = 0; -- tasklet_init(&info->tx_task, hci_h4p_tx_tasklet, (unsigned long)info); -- tasklet_init(&info->rx_task, hci_h4p_rx_tasklet, (unsigned long)info); -- /* hci_h4p_hci_open assumes that tasklet is disabled in startup */ -- tasklet_disable(&info->tx_task); -- tasklet_disable(&info->rx_task); -+ irq = 0; - spin_lock_init(&info->lock); - spin_lock_init(&info->clocks_lock); - skb_queue_head_init(&info->txq); -- init_timer(&info->tx_pm_timer); -- info->tx_pm_timer.function = hci_h4p_tx_pm_timer; -- info->tx_pm_timer.data = (unsigned long)info; -- init_timer(&info->rx_pm_timer); -- info->rx_pm_timer.function = hci_h4p_rx_pm_timer; -- info->rx_pm_timer.data = (unsigned long)info; - - if (pdev->dev.platform_data == NULL) { - dev_err(&pdev->dev, "Could not get Bluetooth config data\n"); -+ kfree(info); - return -ENODATA; - } - -@@ -823,33 +900,30 @@ static int hci_h4p_probe(struct platform - NBT_DBG("Uart: %d\n", bt_config->bt_uart); - NBT_DBG("sysclk: %d\n", info->bt_sysclk); - -- err = gpio_request(info->reset_gpio, "BT reset"); -+ err = gpio_request(info->reset_gpio, "bt_reset"); - if (err < 0) { - dev_err(&pdev->dev, "Cannot get GPIO line %d\n", - info->reset_gpio); -- kfree(info); -- goto cleanup; -+ goto cleanup_setup; - } - -- err = gpio_request(info->bt_wakeup_gpio, "BT wakeup"); -+ err = gpio_request(info->bt_wakeup_gpio, "bt_wakeup"); - if (err < 0) - { - dev_err(info->dev, "Cannot get GPIO line 0x%d", - info->bt_wakeup_gpio); - gpio_free(info->reset_gpio); -- kfree(info); -- goto cleanup; -+ goto cleanup_setup; - } - -- err = gpio_request(info->host_wakeup_gpio, "BT host wakeup"); -+ err = gpio_request(info->host_wakeup_gpio, "host_wakeup"); - if (err < 0) - { - dev_err(info->dev, "Cannot get GPIO line %d", - info->host_wakeup_gpio); - gpio_free(info->reset_gpio); - gpio_free(info->bt_wakeup_gpio); -- kfree(info); -- goto cleanup; -+ goto cleanup_setup; - } - - gpio_direction_output(info->reset_gpio, 0); -@@ -866,10 +940,7 @@ static int hci_h4p_probe(struct platform - info->uart_iclk = clk_get(NULL, "uart1_ick"); - info->uart_fclk = clk_get(NULL, "uart1_fck"); - } -- /* FIXME: Use platform_get_resource for the port */ -- info->uart_base = ioremap(OMAP_UART1_BASE, 0x16); -- if (!info->uart_base) -- goto cleanup; -+ info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART1_BASE); - break; - case 2: - if (cpu_is_omap16xx()) { -@@ -880,10 +951,7 @@ static int hci_h4p_probe(struct platform - info->uart_iclk = clk_get(NULL, "uart2_ick"); - info->uart_fclk = clk_get(NULL, "uart2_fck"); - } -- /* FIXME: Use platform_get_resource for the port */ -- info->uart_base = ioremap(OMAP_UART2_BASE, 0x16); -- if (!info->uart_base) -- goto cleanup; -+ info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART2_BASE); - break; - case 3: - if (cpu_is_omap16xx()) { -@@ -894,10 +962,7 @@ static int hci_h4p_probe(struct platform - info->uart_iclk = clk_get(NULL, "uart3_ick"); - info->uart_fclk = clk_get(NULL, "uart3_fck"); - } -- /* FIXME: Use platform_get_resource for the port */ -- info->uart_base = ioremap(OMAP_UART3_BASE, 0x16); -- if (!info->uart_base) -- goto cleanup; -+ info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART3_BASE); - break; - default: - dev_err(info->dev, "No uart defined\n"); -@@ -905,71 +970,83 @@ static int hci_h4p_probe(struct platform - } - - info->irq = irq; -- err = request_irq(irq, hci_h4p_interrupt, 0, "hci_h4p", (void *)info); -+ err = request_irq(irq, hci_h4p_interrupt, IRQF_DISABLED, "hci_h4p", -+ info); - if (err < 0) { - dev_err(info->dev, "hci_h4p: unable to get IRQ %d\n", irq); - goto cleanup; - } - - err = request_irq(gpio_to_irq(info->host_wakeup_gpio), -- hci_h4p_wakeup_interrupt, -- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, -- "hci_h4p_wkup", (void *)info); -+ hci_h4p_wakeup_interrupt, IRQF_TRIGGER_FALLING | -+ IRQF_TRIGGER_RISING | IRQF_DISABLED, -+ "hci_h4p_wkup", info); - if (err < 0) { - dev_err(info->dev, "hci_h4p: unable to get wakeup IRQ %d\n", - gpio_to_irq(info->host_wakeup_gpio)); -- free_irq(irq, (void *)info); -+ free_irq(irq, info); -+ goto cleanup; -+ } -+ -+ err = set_irq_wake(gpio_to_irq(info->host_wakeup_gpio), 1); -+ if (err < 0) { -+ dev_err(info->dev, "hci_h4p: unable to set wakeup for IRQ %d\n", -+ gpio_to_irq(info->host_wakeup_gpio)); -+ free_irq(irq, info); -+ free_irq(gpio_to_irq(info->host_wakeup_gpio), info); - goto cleanup; - } - -+ init_timer_deferrable(&info->lazy_release); -+ info->lazy_release.function = hci_h4p_lazy_clock_release; -+ info->lazy_release.data = (unsigned long)info; - hci_h4p_set_clk(info, &info->tx_clocks_en, 1); -- hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_CTS | UART_EFR_RTS); -- err = hci_h4p_init_uart(info); -+ err = hci_h4p_reset_uart(info); - if (err < 0) - goto cleanup_irq; -+ hci_h4p_init_uart(info); -+ hci_h4p_set_rts(info, 0); - err = hci_h4p_reset(info); -+ hci_h4p_reset_uart(info); - if (err < 0) - goto cleanup_irq; -- err = hci_h4p_wait_for_cts(info, 1, 10); -- if (err < 0) -- goto cleanup_irq; -+ gpio_set_value(info->reset_gpio, 0); - hci_h4p_set_clk(info, &info->tx_clocks_en, 0); - - platform_set_drvdata(pdev, info); -- err = hci_h4p_sysfs_create_files(info->dev); -- if (err < 0) -- goto cleanup_irq; - - if (hci_h4p_register_hdev(info) < 0) { - dev_err(info->dev, "failed to register hci_h4p hci device\n"); - goto cleanup_irq; - } -- gpio_set_value(info->reset_gpio, 0); - - return 0; - - cleanup_irq: - free_irq(irq, (void *)info); -- free_irq(gpio_to_irq(info->host_wakeup_gpio), (void *)info); -+ free_irq(gpio_to_irq(info->host_wakeup_gpio), info); - cleanup: - gpio_set_value(info->reset_gpio, 0); - gpio_free(info->reset_gpio); - gpio_free(info->bt_wakeup_gpio); - gpio_free(info->host_wakeup_gpio); -- kfree(info); - -+cleanup_setup: -+ -+ kfree(info); - return err; - - } - --static int hci_h4p_remove(struct platform_device *dev) -+static int hci_h4p_remove(struct platform_device *pdev) - { - struct hci_h4p_info *info; - -- info = platform_get_drvdata(dev); -+ info = platform_get_drvdata(pdev); - - hci_h4p_hci_close(info->hdev); -- free_irq(gpio_to_irq(info->host_wakeup_gpio), (void *) info); -+ free_irq(gpio_to_irq(info->host_wakeup_gpio), info); -+ hci_unregister_dev(info->hdev); - hci_free_dev(info->hdev); - gpio_free(info->reset_gpio); - gpio_free(info->bt_wakeup_gpio); -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-bcm.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw-bcm.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-bcm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw-bcm.c 2011-06-22 13:19:32.523063279 +0200 -@@ -0,0 +1,161 @@ -+/* -+ * This file is part of hci_h4p bluetooth driver -+ * -+ * Copyright (C) 2005-2008 Nokia Corporation. -+ * -+ * Contact: Ville Tervo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "hci_h4p.h" -+ -+static struct sk_buff_head *fw_q; -+ -+static int inject_bdaddr(struct hci_h4p_info *info, struct sk_buff *skb) -+{ -+ unsigned int offset; -+ int i; -+ struct omap_bluetooth_config *config; -+ -+ config = info->dev->platform_data; -+ -+ if (!config) -+ return -ENODEV; -+ -+ if (skb->len < 10) { -+ dev_info(info->dev, "Valid bluetooth address not found.\n"); -+ return -ENODATA; -+ } -+ -+ offset = 4; -+ skb->data[offset + 5] = config->bd_addr[0]; -+ skb->data[offset + 4] = config->bd_addr[1]; -+ skb->data[offset + 3] = config->bd_addr[2]; -+ skb->data[offset + 2] = config->bd_addr[3]; -+ skb->data[offset + 1] = config->bd_addr[4]; -+ skb->data[offset + 0] = config->bd_addr[5]; -+ -+ for (i = 0; i < 6; i++) { -+ if (config->bd_addr[i] != 0x00) -+ break; -+ } -+ -+ if (i > 5) { -+ dev_info(info->dev, "Valid bluetooth address not found.\n"); -+ return -ENODEV; -+ } -+ -+ return 0; -+} -+ -+void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb) -+{ -+ struct sk_buff *fw_skb; -+ int err; -+ unsigned long flags; -+ -+ if (skb->data[5] != 0x00) { -+ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n", -+ skb->data[5]); -+ info->fw_error = -EPROTO; -+ } -+ -+ kfree_skb(skb); -+ -+ fw_skb = skb_dequeue(fw_q); -+ if (fw_skb == NULL || info->fw_error) { -+ complete(&info->fw_completion); -+ return; -+ } -+ -+ if (fw_skb->data[1] == 0x01 && fw_skb->data[2] == 0xfc) { -+ NBT_DBG_FW("Injecting bluetooth address\n"); -+ err = inject_bdaddr(info, fw_skb); -+ if (err < 0) { -+ kfree_skb(fw_skb); -+ info->fw_error = err; -+ complete(&info->fw_completion); -+ return; -+ } -+ } -+ -+ skb_queue_tail(&info->txq, fw_skb); -+ spin_lock_irqsave(&info->lock, flags); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ spin_unlock_irqrestore(&info->lock, flags); -+} -+ -+ -+int hci_h4p_bcm_send_fw(struct hci_h4p_info *info, -+ struct sk_buff_head *fw_queue) -+{ -+ struct sk_buff *skb; -+ unsigned long flags, time; -+ -+ info->fw_error = 0; -+ -+ NBT_DBG_FW("Sending firmware\n"); -+ -+ time = jiffies; -+ -+ fw_q = fw_queue; -+ skb = skb_dequeue(fw_queue); -+ if (!skb) -+ return -ENODATA; -+ -+ NBT_DBG_FW("Sending commands\n"); -+ -+ /* -+ * Disable smart-idle as UART TX interrupts -+ * are not wake-up capable -+ */ -+ hci_h4p_smart_idle(info, 0); -+ -+ /* Check if this is bd_address packet */ -+ init_completion(&info->fw_completion); -+ skb_queue_tail(&info->txq, skb); -+ spin_lock_irqsave(&info->lock, flags); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ spin_unlock_irqrestore(&info->lock, flags); -+ -+ if (!wait_for_completion_timeout(&info->fw_completion, -+ msecs_to_jiffies(2000))) { -+ dev_err(info->dev, "No reply to fw command\n"); -+ return -ETIMEDOUT; -+ } -+ -+ if (info->fw_error) { -+ dev_err(info->dev, "FW error\n"); -+ return -EPROTO; -+ } -+ -+ NBT_DBG_FW("Firmware sent in %d msecs\n", -+ jiffies_to_msecs(jiffies-time)); -+ -+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS); -+ hci_h4p_set_rts(info, 0); -+ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); -+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS); -+ -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw.c 2011-06-22 13:19:32.523063279 +0200 -@@ -46,6 +46,9 @@ static int hci_h4p_open_firmware(struct - case BT_CHIP_CSR: - err = request_firmware(fw_entry, "bc4fw.bin", info->dev); - break; -+ case BT_CHIP_BCM: -+ err = request_firmware(fw_entry, "bcmfw.bin", info->dev); -+ break; - default: - dev_err(info->dev, "Invalid chip type\n"); - *fw_entry = NULL; -@@ -72,12 +75,18 @@ static int hci_h4p_read_fw_cmd(struct hc - return 0; - } - -+ if (fw_pos + 2 > fw_entry->size) { -+ dev_err(info->dev, "Corrupted firmware image 1\n"); -+ return -EMSGSIZE; -+ } -+ - cmd_len = fw_entry->data[fw_pos++]; -- if (!cmd_len) -+ cmd_len += fw_entry->data[fw_pos++] << 8; -+ if (cmd_len == 0) - return 0; - - if (fw_pos + cmd_len > fw_entry->size) { -- dev_err(info->dev, "Corrupted firmware image\n"); -+ dev_err(info->dev, "Corrupted firmware image 2\n"); - return -EMSGSIZE; - } - -@@ -126,6 +135,9 @@ int hci_h4p_send_fw(struct hci_h4p_info - case BT_CHIP_TI: - err = hci_h4p_brf6150_send_fw(info, fw_queue); - break; -+ case BT_CHIP_BCM: -+ err = hci_h4p_bcm_send_fw(info, fw_queue); -+ break; - default: - dev_err(info->dev, "Don't know how to send firmware\n"); - err = -EINVAL; -@@ -143,6 +155,9 @@ void hci_h4p_parse_fw_event(struct hci_h - case BT_CHIP_TI: - hci_h4p_brf6150_parse_fw_event(info, skb); - break; -+ case BT_CHIP_BCM: -+ hci_h4p_bcm_parse_fw_event(info, skb); -+ break; - default: - dev_err(info->dev, "Don't know how to parse fw event\n"); - info->fw_error = -EINVAL; -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-csr.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw-csr.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-csr.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw-csr.c 2011-06-22 13:19:32.523063279 +0200 -@@ -1,7 +1,7 @@ - /* - * This file is part of hci_h4p bluetooth driver - * -- * Copyright (C) 2005, 2006 Nokia Corporation. -+ * Copyright (C) 2005-2008 Nokia Corporation. - * - * Contact: Ville Tervo - * -@@ -50,6 +50,8 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i - struct sk_buff *skb; - unsigned int offset; - int retries, count, i; -+ unsigned long flags; -+ struct omap_bluetooth_config *config; - - info->fw_error = 0; - -@@ -59,21 +61,27 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i - if (!skb) - return -ENOMSG; - -+ config = info->dev->platform_data; -+ if (!config) { -+ kfree_skb(skb); -+ return -ENODEV; -+ } -+ - /* Check if this is bd_address packet */ - if (skb->data[15] == 0x01 && skb->data[16] == 0x00) { - offset = 21; - skb->data[offset + 1] = 0x00; - skb->data[offset + 5] = 0x00; -- skb->data[offset + 7] = info->bdaddr[0]; -- skb->data[offset + 6] = info->bdaddr[1]; -- skb->data[offset + 4] = info->bdaddr[2]; -- skb->data[offset + 0] = info->bdaddr[3]; -- skb->data[offset + 3] = info->bdaddr[4]; -- skb->data[offset + 2] = info->bdaddr[5]; -+ skb->data[offset + 7] = config->bd_addr[0]; -+ skb->data[offset + 6] = config->bd_addr[1]; -+ skb->data[offset + 4] = config->bd_addr[2]; -+ skb->data[offset + 0] = config->bd_addr[3]; -+ skb->data[offset + 3] = config->bd_addr[4]; -+ skb->data[offset + 2] = config->bd_addr[5]; - } - - for (i = 0; i < 6; i++) { -- if (info->bdaddr[i] != 0x00) -+ if (config->bd_addr[i] != 0x00) - break; - } - -@@ -87,7 +95,10 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i - NBT_DBG_FW("Sending firmware command %d\n", count); - init_completion(&info->fw_completion); - skb_queue_tail(&info->txq, skb); -- tasklet_schedule(&info->tx_task); -+ spin_lock_irqsave(&info->lock, flags); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ spin_unlock_irqrestore(&info->lock, flags); - - skb = skb_dequeue(fw_queue); - if (!skb) -@@ -120,7 +131,7 @@ int hci_h4p_bc4_send_fw(struct hci_h4p_i - hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE); - - if (hci_h4p_wait_for_cts(info, 1, 100)) { -- dev_err(info->dev, "cts didn't go down after final speed change\n"); -+ dev_err(info->dev, "cts didn't deassert after final speed\n"); - return -ETIMEDOUT; - } - -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-ti.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw-ti.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/fw-ti.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/fw-ti.c 2011-06-22 13:19:32.523063279 +0200 -@@ -1,7 +1,7 @@ - /* - * This file is part of hci_h4p bluetooth driver - * -- * Copyright (C) 2005, 2006 Nokia Corporation. -+ * Copyright (C) 2005-2008 Nokia Corporation. - * - * Contact: Ville Tervo - * -@@ -22,6 +22,7 @@ - */ - - #include -+#include - - #include "hci_h4p.h" - -@@ -55,10 +56,12 @@ ret: - complete(&info->fw_completion); - } - --int hci_h4p_brf6150_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue) -+int hci_h4p_brf6150_send_fw(struct hci_h4p_info *info, -+ struct sk_buff_head *fw_queue) - { - struct sk_buff *skb; - int err = 0; -+ unsigned long flags; - - info->fw_error = 0; - -@@ -72,7 +75,10 @@ int hci_h4p_brf6150_send_fw(struct hci_h - - init_completion(&info->fw_completion); - skb_queue_tail(&info->txq, skb); -- tasklet_schedule(&info->tx_task); -+ spin_lock_irqsave(&info->lock, flags); -+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) | -+ UART_IER_THRI); -+ spin_unlock_irqrestore(&info->lock, flags); - - if (!wait_for_completion_timeout(&info->fw_completion, HZ)) { - dev_err(info->dev, "Timeout while sending brf6150 fw\n"); -@@ -80,7 +86,8 @@ int hci_h4p_brf6150_send_fw(struct hci_h - } - - if (info->fw_error) { -- dev_err(info->dev, "There was fw_error while sending bfr6150 fw\n"); -+ dev_err(info->dev, -+ "fw_error while sending bfr6150 fw\n"); - return -EPROTO; - } - } -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/hci_h4p.h linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/hci_h4p.h ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/hci_h4p.h 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/hci_h4p.h 2011-06-22 13:19:32.523063279 +0200 -@@ -1,7 +1,7 @@ - /* - * This file is part of hci_h4p bluetooth driver - * -- * Copyright (C) 2005, 2006 Nokia Corporation. -+ * Copyright (C) 2005-2008 Nokia Corporation. - * - * Contact: Ville Tervo - * -@@ -37,6 +37,13 @@ - #define UART_OMAP_SSR_WAKEUP 0x02 - #define UART_OMAP_SSR_TXFULL 0x01 - -+#define UART_OMAP_SYSC_IDLEMODE 0x03 -+#define UART_OMAP_SYSC_IDLEMASK (3 << UART_OMAP_SYSC_IDLEMODE) -+ -+#define UART_OMAP_SYSC_FORCE_IDLE (0 << UART_OMAP_SYSC_IDLEMODE) -+#define UART_OMAP_SYSC_NO_IDLE (1 << UART_OMAP_SYSC_IDLEMODE) -+#define UART_OMAP_SYSC_SMART_IDLE (2 << UART_OMAP_SYSC_IDLEMODE) -+ - #if 0 - #define NBT_DBG(fmt, arg...) printk("%s: " fmt "" , __FUNCTION__ , ## arg) - #else -@@ -74,6 +81,7 @@ - #endif - - struct hci_h4p_info { -+ struct timer_list lazy_release; - struct hci_dev *hdev; - spinlock_t lock; - -@@ -81,14 +89,12 @@ struct hci_h4p_info { - unsigned long uart_phys_base; - int irq; - struct device *dev; -- u8 bdaddr[6]; - u8 chip_type; - u8 bt_wakeup_gpio; - u8 host_wakeup_gpio; - u8 reset_gpio; - u8 bt_sysclk; - -- - struct sk_buff_head fw_queue; - struct sk_buff *alive_cmd_skb; - struct completion init_completion; -@@ -97,27 +103,35 @@ struct hci_h4p_info { - int init_error; - - struct sk_buff_head txq; -- struct tasklet_struct tx_task; - - struct sk_buff *rx_skb; - long rx_count; - unsigned long rx_state; - unsigned long garbage_bytes; -- struct tasklet_struct rx_task; - - int pm_enabled; -- int tx_pm_enabled; -- int rx_pm_enabled; -- struct timer_list tx_pm_timer; -- struct timer_list rx_pm_timer; -+ int tx_enabled; -+ int autorts; -+ int rx_enabled; - - int tx_clocks_en; - int rx_clocks_en; - spinlock_t clocks_lock; - struct clk *uart_iclk; - struct clk *uart_fclk; -+ atomic_t clk_users; -+ u16 dll; -+ u16 dlh; -+ u16 ier; -+ u16 mdr1; -+ u16 efr; - }; - -+struct hci_h4p_radio_hdr { -+ __u8 evt; -+ __u8 dlen; -+} __attribute__ ((packed)); -+ - #define MAX_BAUD_RATE 921600 - #define BC4_MAX_BAUD_RATE 3692300 - #define UART_CLOCK 48000000 -@@ -127,6 +141,7 @@ struct hci_h4p_info { - #define INIT_SPEED 120000 - - #define H4_TYPE_SIZE 1 -+#define H4_RADIO_HDR_SIZE 2 - - /* H4+ packet types */ - #define H4_CMD_PKT 0x01 -@@ -135,6 +150,7 @@ struct hci_h4p_info { - #define H4_EVT_PKT 0x04 - #define H4_NEG_PKT 0x06 - #define H4_ALIVE_PKT 0x07 -+#define H4_RADIO_PKT 0x08 - - /* TX states */ - #define WAIT_FOR_PKT_TYPE 1 -@@ -154,6 +170,11 @@ struct hci_bc4_set_bdaddr { - - int hci_h4p_send_alive_packet(struct hci_h4p_info *info); - -+void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, -+ struct sk_buff *skb); -+int hci_h4p_bcm_send_fw(struct hci_h4p_info *info, -+ struct sk_buff_head *fw_queue); -+ - void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, - struct sk_buff *skb); - int hci_h4p_bc4_send_fw(struct hci_h4p_info *info, -@@ -169,6 +190,7 @@ int hci_h4p_send_fw(struct hci_h4p_info - void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb); - - int hci_h4p_sysfs_create_files(struct device *dev); -+void hci_h4p_sysfs_remove_files(struct device *dev); - - void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val); - u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset); -@@ -178,6 +200,10 @@ void __hci_h4p_set_auto_ctsrts(struct hc - void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which); - void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed); - int hci_h4p_reset_uart(struct hci_h4p_info *info); --int hci_h4p_init_uart(struct hci_h4p_info *info); -+void hci_h4p_init_uart(struct hci_h4p_info *info); -+void hci_h4p_enable_tx(struct hci_h4p_info *info); -+void hci_h4p_store_regs(struct hci_h4p_info *info); -+void hci_h4p_restore_regs(struct hci_h4p_info *info); -+void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable); - - #endif /* __DRIVERS_BLUETOOTH_HCI_H4P_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/Makefile linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/Makefile ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/Makefile 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/Makefile 2011-06-22 13:19:32.523063279 +0200 -@@ -4,4 +4,4 @@ - - obj-$(CONFIG_BT_HCIH4P) += hci_h4p.o - --hci_h4p-objs := core.o fw.o uart.o sysfs.o fw-ti.o fw-csr.o -+hci_h4p-objs := core.o fw.o uart.o fw-ti.o fw-csr.o fw-bcm.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/sysfs.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/sysfs.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/sysfs.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/sysfs.c 1970-01-01 01:00:00.000000000 +0100 -@@ -1,74 +0,0 @@ --/* -- * This file is part of hci_h4p bluetooth driver -- * -- * Copyright (C) 2005, 2006 Nokia Corporation. -- * -- * Contact: Ville Tervo -- * -- * This program is free software; you can redistribute it and/or -- * modify it under the terms of the GNU General Public License -- * version 2 as published by the Free Software Foundation. -- * -- * This program is distributed in the hope that it will be useful, but -- * WITHOUT ANY WARRANTY; without even the implied warranty of -- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- * General Public License for more details. -- * -- * You should have received a copy of the GNU General Public License -- * along with this program; if not, write to the Free Software -- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -- * 02110-1301 USA -- * -- */ -- --#include --#include --#include --#include -- --#include "hci_h4p.h" -- --#ifdef CONFIG_SYSFS -- --static ssize_t hci_h4p_store_bdaddr(struct device *dev, struct device_attribute *attr, -- const char *buf, size_t count) --{ -- struct hci_h4p_info *info = (struct hci_h4p_info*)dev_get_drvdata(dev); -- unsigned int bdaddr[6]; -- int ret, i; -- -- ret = sscanf(buf, "%2x:%2x:%2x:%2x:%2x:%2x\n", -- &bdaddr[0], &bdaddr[1], &bdaddr[2], -- &bdaddr[3], &bdaddr[4], &bdaddr[5]); -- -- if (ret != 6) { -- return -EINVAL; -- } -- -- for (i = 0; i < 6; i++) -- info->bdaddr[i] = bdaddr[i] & 0xff; -- -- return count; --} -- --static ssize_t hci_h4p_show_bdaddr(struct device *dev, struct device_attribute *attr, -- char *buf) --{ -- struct hci_h4p_info *info = (struct hci_h4p_info*)dev_get_drvdata(dev); -- -- return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", -- info->bdaddr[0], -- info->bdaddr[1], -- info->bdaddr[2], -- info->bdaddr[3], -- info->bdaddr[4], -- info->bdaddr[5]); --} -- --static DEVICE_ATTR(bdaddr, S_IRUGO | S_IWUSR, hci_h4p_show_bdaddr, hci_h4p_store_bdaddr); --int hci_h4p_sysfs_create_files(struct device *dev) --{ -- return device_create_file(dev, &dev_attr_bdaddr); --} -- --#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/uart.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/uart.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_h4p/uart.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_h4p/uart.c 2011-06-22 13:19:32.523063279 +0200 -@@ -31,12 +31,12 @@ - - inline void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val) - { -- outb(val, info->uart_base + (offset << 2)); -+ __raw_writeb(val, info->uart_base + (offset << 2)); - } - - inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset) - { -- return inb(info->uart_base + (offset << 2)); -+ return __raw_readb(info->uart_base + (offset << 2)); - } - - void hci_h4p_set_rts(struct hci_h4p_info *info, int active) -@@ -54,14 +54,11 @@ void hci_h4p_set_rts(struct hci_h4p_info - int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, - int timeout_ms) - { -- int okay; - unsigned long timeout; -+ int state; - -- okay = 0; - timeout = jiffies + msecs_to_jiffies(timeout_ms); - for (;;) { -- int state; -- - state = hci_h4p_inb(info, UART_MSR) & UART_MSR_CTS; - if (active) { - if (state) -@@ -72,6 +69,7 @@ int hci_h4p_wait_for_cts(struct hci_h4p_ - } - if (time_after(jiffies, timeout)) - return -ETIMEDOUT; -+ msleep(1); - } - } - -@@ -140,25 +138,60 @@ int hci_h4p_reset_uart(struct hci_h4p_in - return 0; - } - --int hci_h4p_init_uart(struct hci_h4p_info *info) -+ -+void hci_h4p_store_regs(struct hci_h4p_info *info) -+{ -+ u16 lcr = 0; -+ -+ lcr = hci_h4p_inb(info, UART_LCR); -+ hci_h4p_outb(info, UART_LCR, 0xBF); -+ info->dll = hci_h4p_inb(info, UART_DLL); -+ info->dlh = hci_h4p_inb(info, UART_DLM); -+ info->efr = hci_h4p_inb(info, UART_EFR); -+ hci_h4p_outb(info, UART_LCR, lcr); -+ info->mdr1 = hci_h4p_inb(info, UART_OMAP_MDR1); -+ info->ier = hci_h4p_inb(info, UART_IER); -+} -+ -+void hci_h4p_restore_regs(struct hci_h4p_info *info) - { -- int err; -+ u16 lcr = 0; -+ -+ hci_h4p_init_uart(info); - -- err = hci_h4p_reset_uart(info); -- if (err < 0) -- return err; -+ hci_h4p_outb(info, UART_OMAP_MDR1, 7); -+ lcr = hci_h4p_inb(info, UART_LCR); -+ hci_h4p_outb(info, UART_LCR, 0xBF); -+ hci_h4p_outb(info, UART_DLL, info->dll); /* Set speed */ -+ hci_h4p_outb(info, UART_DLM, info->dlh); -+ hci_h4p_outb(info, UART_EFR, info->efr); -+ hci_h4p_outb(info, UART_LCR, lcr); -+ hci_h4p_outb(info, UART_OMAP_MDR1, info->mdr1); -+ hci_h4p_outb(info, UART_IER, info->ier); -+} -+ -+void hci_h4p_init_uart(struct hci_h4p_info *info) -+{ -+ u8 mcr, efr; - - /* Enable and setup FIFO */ -- hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8); -- hci_h4p_outb(info, UART_OMAP_MDR1, 0x00); /* Make sure UART mode is enabled */ -- hci_h4p_outb(info, UART_OMAP_SCR, 0x80); -+ hci_h4p_outb(info, UART_OMAP_MDR1, 0x00); -+ -+ hci_h4p_outb(info, UART_LCR, 0xbf); -+ efr = hci_h4p_inb(info, UART_EFR); - hci_h4p_outb(info, UART_EFR, UART_EFR_ECB); -+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); -+ mcr = hci_h4p_inb(info, UART_MCR); - hci_h4p_outb(info, UART_MCR, UART_MCR_TCRTLR); -- hci_h4p_outb(info, UART_TI752_TLR, 0x1f); -- hci_h4p_outb(info, UART_TI752_TCR, 0xef); - hci_h4p_outb(info, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | -- UART_FCR_CLEAR_XMIT | UART_FCR_R_TRIG_00); -+ UART_FCR_CLEAR_XMIT | (3 << 6) | (0 << 4)); -+ hci_h4p_outb(info, UART_LCR, 0xbf); -+ hci_h4p_outb(info, UART_TI752_TLR, 0xed); -+ hci_h4p_outb(info, UART_TI752_TCR, 0xef); -+ hci_h4p_outb(info, UART_EFR, efr); -+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); -+ hci_h4p_outb(info, UART_MCR, 0x00); -+ hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8); - hci_h4p_outb(info, UART_IER, UART_IER_RDI); -- -- return 0; -+ hci_h4p_outb(info, UART_OMAP_SYSC, (1 << 0) | (1 << 2) | (2 << 3)); - } -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ldisc.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_ldisc.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ldisc.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_ldisc.c 2011-06-22 13:19:32.523063279 +0200 -@@ -46,11 +46,6 @@ - - #include "hci_uart.h" - --#ifndef CONFIG_BT_HCIUART_DEBUG --#undef BT_DBG --#define BT_DBG( A... ) --#endif -- - #define VERSION "2.2" - - static int reset = 0; -@@ -399,8 +394,8 @@ static int hci_uart_register_dev(struct - - hdev->owner = THIS_MODULE; - -- if (reset) -- set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); -+ if (!reset) -+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); - - if (hci_register_dev(hdev) < 0) { - BT_ERR("Can't register HCI device"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ll.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_ll.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_ll.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_ll.c 2011-06-22 13:19:32.523063279 +0200 -@@ -163,8 +163,7 @@ static int ll_close(struct hci_uart *hu) - skb_queue_purge(&ll->tx_wait_q); - skb_queue_purge(&ll->txq); - -- if (ll->rx_skb) -- kfree_skb(ll->rx_skb); -+ kfree_skb(ll->rx_skb); - - hu->priv = NULL; - -diff -Nurp linux-omap-2.6.28-omap1/drivers/bluetooth/hci_vhci.c linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_vhci.c ---- linux-omap-2.6.28-omap1/drivers/bluetooth/hci_vhci.c 2011-06-22 13:14:17.283067762 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/bluetooth/hci_vhci.c 2011-06-22 13:19:32.523063279 +0200 -@@ -40,11 +40,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCIVHCI_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "1.2" - - static int minor = MISC_DYNAMIC_MINOR; -diff -Nurp linux-omap-2.6.28-omap1/drivers/cpufreq/cpufreq_ondemand.c linux-omap-2.6.28-nokia1/drivers/cpufreq/cpufreq_ondemand.c ---- linux-omap-2.6.28-omap1/drivers/cpufreq/cpufreq_ondemand.c 2011-06-22 13:14:17.483067758 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/cpufreq/cpufreq_ondemand.c 2011-06-22 13:19:32.523063279 +0200 -@@ -21,6 +21,8 @@ - #include - #include - #include -+#include -+#include - - /* - * dbs is used in this file as a shortform for demandbased switching -@@ -540,6 +542,87 @@ static inline void dbs_timer_exit(struct - cancel_delayed_work(&dbs_info->work); - } - -+static void dbs_refresh_callback(struct work_struct *unused) -+{ -+ struct cpufreq_policy *policy; -+ struct cpu_dbs_info_s *this_dbs_info; -+ -+ this_dbs_info = &per_cpu(cpu_dbs_info, 0); -+ policy = this_dbs_info->cur_policy; -+ -+ __cpufreq_driver_target(policy, policy->max, -+ CPUFREQ_RELATION_L); -+ this_dbs_info->prev_cpu_idle = get_cpu_idle_time(0, -+ &this_dbs_info->prev_cpu_wall); -+} -+ -+static DECLARE_WORK(dbs_refresh_work, dbs_refresh_callback); -+ -+static void dbs_input_event(struct input_handle *handle, unsigned int type, -+ unsigned int code, int value) -+{ -+ struct cpufreq_policy *policy; -+ struct cpu_dbs_info_s *this_dbs_info; -+ -+ this_dbs_info = &per_cpu(cpu_dbs_info, 0); -+ policy = this_dbs_info->cur_policy; -+ -+ if (policy->cur < policy->max) { -+ policy->cur = policy->max; -+ schedule_work(&dbs_refresh_work); -+ } -+} -+ -+static int dbs_input_connect(struct input_handler *handler, -+ struct input_dev *dev, const struct input_device_id *id) -+{ -+ struct input_handle *handle; -+ int error; -+ -+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); -+ if (!handle) -+ return -ENOMEM; -+ -+ handle->dev = dev; -+ handle->handler = handler; -+ handle->name = "cpufreq"; -+ -+ error = input_register_handle(handle); -+ if (error) -+ goto err1; -+ -+ error = input_open_device(handle); -+ if (error) -+ goto err2; -+ -+ return 0; -+err1: -+ input_unregister_handle(handle); -+err2: -+ kfree(handle); -+ return error; -+} -+ -+static void dbs_input_disconnect(struct input_handle *handle) -+{ -+ input_close_device(handle); -+ input_unregister_handle(handle); -+ kfree(handle); -+} -+ -+static const struct input_device_id dbs_ids[] = { -+ { .driver_info = 1 }, -+ { }, -+}; -+ -+static struct input_handler dbs_input_handler = { -+ .event = dbs_input_event, -+ .connect = dbs_input_connect, -+ .disconnect = dbs_input_disconnect, -+ .name = "cpufreq_ond", -+ .id_table = dbs_ids, -+}; -+ - static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) - { -@@ -597,7 +680,7 @@ static int cpufreq_governor_dbs(struct c - dbs_tuners_ins.sampling_rate = def_sampling_rate; - } - dbs_timer_init(this_dbs_info); -- -+ rc = input_register_handler(&dbs_input_handler); - mutex_unlock(&dbs_mutex); - break; - -@@ -606,6 +689,7 @@ static int cpufreq_governor_dbs(struct c - dbs_timer_exit(this_dbs_info); - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - dbs_enable--; -+ input_unregister_handler(&dbs_input_handler); - mutex_unlock(&dbs_mutex); - - break; -diff -Nurp linux-omap-2.6.28-omap1/drivers/cpuidle/governors/menu.c linux-omap-2.6.28-nokia1/drivers/cpuidle/governors/menu.c ---- linux-omap-2.6.28-omap1/drivers/cpuidle/governors/menu.c 2011-06-22 13:14:17.483067758 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/cpuidle/governors/menu.c 2011-06-22 13:19:32.533063279 +0200 -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #define BREAK_FUZZ 4 /* 4 us */ - -@@ -36,6 +37,8 @@ static int menu_select(struct cpuidle_de - struct menu_device *data = &__get_cpu_var(menu_devices); - int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); - int i; -+ int device_not_idle; -+ struct timespec t; - - /* Special case when user has set very strict latency requirement */ - if (unlikely(latency_req == 0)) { -@@ -44,8 +47,11 @@ static int menu_select(struct cpuidle_de - } - - /* determine the expected residency time */ -+ t = ktime_to_timespec(tick_nohz_get_sleep_length()); - data->expected_us = -- (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; -+ t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; -+ -+ device_not_idle = !pm_check_idle(); - - /* find the deepest idle state that satisfies our constraints */ - for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { -@@ -53,7 +59,8 @@ static int menu_select(struct cpuidle_de - - if (s->target_residency > data->expected_us) - break; -- if (s->target_residency > data->predicted_us) -+ if (device_not_idle && -+ s->target_residency > data->predicted_us) - break; - if (s->exit_latency > latency_req) - break; -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/cload.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/cload.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/cload.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/cload.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,1851 @@ -+/* -+ * cload.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include "header.h" -+ -+#include "module_list.h" -+#define LINKER_MODULES_HEADER ("_" MODULES_HEADER) -+ -+/* -+ * we use the fact that DOFF section records are shaped just like -+ * LDR_SECTION_INFO to reduce our section storage usage. This macro marks -+ * the places where that assumption is made -+ */ -+#define DOFFSEC_IS_LDRSEC(pdoffsec) ((struct LDR_SECTION_INFO *)(pdoffsec)) -+ -+/* -+ * forward references -+ */ -+static void dload_symbols(struct dload_state *dlthis); -+static void dload_data(struct dload_state *dlthis); -+static void allocate_sections(struct dload_state *dlthis); -+static void string_table_free(struct dload_state *dlthis); -+static void symbol_table_free(struct dload_state *dlthis); -+static void section_table_free(struct dload_state *dlthis); -+static void init_module_handle(struct dload_state *dlthis); -+#if BITS_PER_AU > BITS_PER_BYTE -+static char *unpack_name(struct dload_state *dlthis, u32 soffset); -+#endif -+ -+static const char CINITNAME[] = { ".cinit" }; -+static const char LOADER_DLLVIEW_ROOT[] = { "?DLModules?" }; -+ -+/* -+ * Error strings -+ */ -+static const char E_READSTRM[] = { "Error reading %s from input stream" }; -+static const char E_ALLOC[] = { "Syms->Allocate( %d ) failed" }; -+static const char E_TGTALLOC[] = -+ { "Target memory allocate failed, section %s size " FMT_UI32 }; -+static const char E_INITFAIL[] = { "%s to target address " FMT_UI32 " failed" }; -+static const char E_DLVWRITE[] = { "Write to DLLview list failed" }; -+static const char E_ICONNECT[] = { "Connect call to init interface failed" }; -+static const char E_CHECKSUM[] = { "Checksum failed on %s" }; -+ -+/************************************************************************* -+ * Procedure dload_error -+ * -+ * Parameters: -+ * errtxt description of the error, printf style -+ * ... additional information -+ * -+ * Effect: -+ * Reports or records the error as appropriate. -+ ************************************************************************/ -+void dload_error(struct dload_state *dlthis, const char *errtxt, ...) -+{ -+ va_list args; -+ -+ va_start(args, errtxt); -+ dlthis->mysym->Error_Report(dlthis->mysym, errtxt, args); -+ va_end(args); -+ dlthis->dload_errcount += 1; -+ -+} /* dload_error */ -+ -+#define DL_ERROR(zza, zzb) dload_error(dlthis, zza, zzb) -+ -+/************************************************************************* -+ * Procedure dload_syms_error -+ * -+ * Parameters: -+ * errtxt description of the error, printf style -+ * ... additional information -+ * -+ * Effect: -+ * Reports or records the error as appropriate. -+ ************************************************************************/ -+void dload_syms_error(struct Dynamic_Loader_Sym *syms, const char *errtxt, ...) -+{ -+ va_list args; -+ -+ va_start(args, errtxt); -+ syms->Error_Report(syms, errtxt, args); -+ va_end(args); -+} -+ -+/************************************************************************* -+ * Procedure Dynamic_Load_Module -+ * -+ * Parameters: -+ * module The input stream that supplies the module image -+ * syms Host-side symbol table and malloc/free functions -+ * alloc Target-side memory allocation -+ * init Target-side memory initialization -+ * options Option flags DLOAD_* -+ * mhandle A module handle for use with Dynamic_Unload -+ * -+ * Effect: -+ * The module image is read using *module. Target storage for the new -+ * image is -+ * obtained from *alloc. Symbols defined and referenced by the module are -+ * managed using *syms. The image is then relocated and references -+ * resolved as necessary, and the resulting executable bits are placed -+ * into target memory using *init. -+ * -+ * Returns: -+ * On a successful load, a module handle is placed in *mhandle, -+ * and zero is returned. On error, the number of errors detected is -+ * returned. Individual errors are reported during the load process -+ * using syms->Error_Report(). -+ ***********************************************************************/ -+int Dynamic_Load_Module(struct Dynamic_Loader_Stream *module, -+ struct Dynamic_Loader_Sym *syms , -+ struct Dynamic_Loader_Allocate *alloc, -+ struct Dynamic_Loader_Initialize *init, -+ unsigned options, DLOAD_mhandle *mhandle) -+{ -+ register unsigned *dp, sz; -+ struct dload_state dl_state; /* internal state for this call */ -+ -+ /* blast our internal state */ -+ dp = (unsigned *)&dl_state; -+ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1) -+ *dp++ = 0; -+ -+ /* Enable _only_ BSS initialization if enabled by user */ -+ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS) -+ dl_state.myoptions = DLOAD_INITBSS; -+ -+ /* Check that mandatory arguments are present */ -+ if (!module || !syms) { -+ dload_error(&dl_state, "Required parameter is NULL"); -+ } else { -+ dl_state.strm = module; -+ dl_state.mysym = syms; -+ dload_headers(&dl_state); -+ if (!dl_state.dload_errcount) -+ dload_strings(&dl_state, false); -+ if (!dl_state.dload_errcount) -+ dload_sections(&dl_state); -+ -+ if (init && !dl_state.dload_errcount) { -+ if (init->connect(init)) { -+ dl_state.myio = init; -+ dl_state.myalloc = alloc; -+ /* do now, before reducing symbols */ -+ allocate_sections(&dl_state); -+ } else -+ dload_error(&dl_state, E_ICONNECT); -+ } -+ -+ if (!dl_state.dload_errcount) { -+ /* fix up entry point address */ -+ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1; -+ if (sref < dl_state.allocated_secn_count) -+ dl_state.dfile_hdr.df_entrypt += -+ dl_state.ldr_sections[sref].run_addr; -+ -+ dload_symbols(&dl_state); -+ } -+ -+ if (init && !dl_state.dload_errcount) -+ dload_data(&dl_state); -+ -+ init_module_handle(&dl_state); -+ -+ /* dl_state.myio is init or 0 at this point. */ -+ if (dl_state.myio) { -+ if ((!dl_state.dload_errcount) && -+ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) && -+ (!init->execute(init, -+ dl_state.dfile_hdr.df_entrypt))) -+ dload_error(&dl_state, -+ "Init->Execute Failed"); -+ init->release(init); -+ } -+ -+ symbol_table_free(&dl_state); -+ section_table_free(&dl_state); -+ string_table_free(&dl_state); -+ -+ if (dl_state.dload_errcount) { -+ Dynamic_Unload_Module(dl_state.myhandle, syms, alloc, -+ init); -+ dl_state.myhandle = NULL; -+ } -+ } -+ -+ if (mhandle) -+ *mhandle = dl_state.myhandle; /* give back the handle */ -+ -+ return dl_state.dload_errcount; -+} /* DLOAD_File */ -+ -+/************************************************************************* -+ * Procedure Dynamic_Open_Module -+ * -+ * Parameters: -+ * module The input stream that supplies the module image -+ * syms Host-side symbol table and malloc/free functions -+ * alloc Target-side memory allocation -+ * init Target-side memory initialization -+ * options Option flags DLOAD_* -+ * mhandle A module handle for use with Dynamic_Unload -+ * -+ * Effect: -+ * The module image is read using *module. Target storage for the new -+ * image is -+ * obtained from *alloc. Symbols defined and referenced by the module are -+ * managed using *syms. The image is then relocated and references -+ * resolved as necessary, and the resulting executable bits are placed -+ * into target memory using *init. -+ * -+ * Returns: -+ * On a successful load, a module handle is placed in *mhandle, -+ * and zero is returned. On error, the number of errors detected is -+ * returned. Individual errors are reported during the load process -+ * using syms->Error_Report(). -+ ***********************************************************************/ -+int -+Dynamic_Open_Module(struct Dynamic_Loader_Stream *module, -+ struct Dynamic_Loader_Sym *syms, -+ struct Dynamic_Loader_Allocate *alloc, -+ struct Dynamic_Loader_Initialize *init, -+ unsigned options, DLOAD_mhandle *mhandle) -+{ -+ register unsigned *dp, sz; -+ struct dload_state dl_state; /* internal state for this call */ -+ -+ /* blast our internal state */ -+ dp = (unsigned *)&dl_state; -+ for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1) -+ *dp++ = 0; -+ -+ /* Enable _only_ BSS initialization if enabled by user */ -+ if ((options & DLOAD_INITBSS) == DLOAD_INITBSS) -+ dl_state.myoptions = DLOAD_INITBSS; -+ -+ /* Check that mandatory arguments are present */ -+ if (!module || !syms) { -+ dload_error(&dl_state, "Required parameter is NULL"); -+ } else { -+ dl_state.strm = module; -+ dl_state.mysym = syms; -+ dload_headers(&dl_state); -+ if (!dl_state.dload_errcount) -+ dload_strings(&dl_state, false); -+ if (!dl_state.dload_errcount) -+ dload_sections(&dl_state); -+ -+ if (init && !dl_state.dload_errcount) { -+ if (init->connect(init)) { -+ dl_state.myio = init; -+ dl_state.myalloc = alloc; -+ /* do now, before reducing symbols */ -+ allocate_sections(&dl_state); -+ } else -+ dload_error(&dl_state, E_ICONNECT); -+ } -+ -+ if (!dl_state.dload_errcount) { -+ /* fix up entry point address */ -+ unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1; -+ if (sref < dl_state.allocated_secn_count) -+ dl_state.dfile_hdr.df_entrypt += -+ dl_state.ldr_sections[sref].run_addr; -+ -+ dload_symbols(&dl_state); -+ } -+ -+ init_module_handle(&dl_state); -+ -+ /* dl_state.myio is either 0 or init at this point. */ -+ if (dl_state.myio) { -+ if ((!dl_state.dload_errcount) && -+ (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) && -+ (!init->execute(init, -+ dl_state.dfile_hdr.df_entrypt))) -+ dload_error(&dl_state, -+ "Init->Execute Failed"); -+ init->release(init); -+ } -+ -+ symbol_table_free(&dl_state); -+ section_table_free(&dl_state); -+ string_table_free(&dl_state); -+ -+ if (dl_state.dload_errcount) { -+ Dynamic_Unload_Module(dl_state.myhandle, syms, alloc, -+ init); -+ dl_state.myhandle = NULL; -+ } -+ } -+ -+ if (mhandle) -+ *mhandle = dl_state.myhandle; /* give back the handle */ -+ -+ return dl_state.dload_errcount; -+} /* DLOAD_File */ -+ -+/************************************************************************* -+ * Procedure dload_headers -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Loads the DOFF header and verify record. Deals with any byte-order -+ * issues and checks them for validity. -+ ************************************************************************/ -+#define COMBINED_HEADER_SIZE (sizeof(struct doff_filehdr_t)+ \ -+ sizeof(struct doff_verify_rec_t)) -+ -+void dload_headers(struct dload_state *dlthis) -+{ -+ u32 map; -+ -+ /* Read the header and the verify record as one. If we don't get it -+ all, we're done */ -+ if (dlthis->strm->read_buffer(dlthis->strm, &dlthis->dfile_hdr, -+ COMBINED_HEADER_SIZE) != COMBINED_HEADER_SIZE) { -+ DL_ERROR(E_READSTRM, "File Headers"); -+ return; -+ } -+ /* -+ * Verify that we have the byte order of the file correct. -+ * If not, must fix it before we can continue -+ */ -+ map = REORDER_MAP(dlthis->dfile_hdr.df_byte_reshuffle); -+ if (map != REORDER_MAP(BYTE_RESHUFFLE_VALUE)) { -+ /* input is either byte-shuffled or bad */ -+ if ((map & 0xFCFCFCFC) == 0) { /* no obviously bogus bits */ -+ dload_reorder(&dlthis->dfile_hdr, COMBINED_HEADER_SIZE, -+ map); -+ } -+ if (dlthis->dfile_hdr.df_byte_reshuffle != -+ BYTE_RESHUFFLE_VALUE) { -+ /* didn't fix the problem, the byte swap map is bad */ -+ dload_error(dlthis, -+ "Bad byte swap map " FMT_UI32 " in header", -+ dlthis->dfile_hdr.df_byte_reshuffle); -+ return; -+ } -+ dlthis->reorder_map = map; /* keep map for future use */ -+ } -+ -+ /* -+ * Verify checksum of header and verify record -+ */ -+ if (~dload_checksum(&dlthis->dfile_hdr, -+ sizeof(struct doff_filehdr_t)) || -+ ~dload_checksum(&dlthis->verify, -+ sizeof(struct doff_verify_rec_t))) { -+ DL_ERROR(E_CHECKSUM, "header or verify record"); -+ return; -+ } -+#if HOST_ENDIANNESS -+ dlthis->dfile_hdr.df_byte_reshuffle = map; /* put back for later */ -+#endif -+ -+ /* Check for valid target ID */ -+ if ((dlthis->dfile_hdr.df_target_id != TARGET_ID) && -+ -(dlthis->dfile_hdr.df_target_id != TMS470_ID)) { -+ dload_error(dlthis, "Bad target ID 0x%x and TARGET_ID 0x%x", -+ dlthis->dfile_hdr.df_target_id, TARGET_ID); -+ return; -+ } -+ /* Check for valid file format */ -+ if ((dlthis->dfile_hdr.df_doff_version != DOFF0)) { -+ dload_error(dlthis, "Bad DOFF version 0x%x", -+ dlthis->dfile_hdr.df_doff_version); -+ return; -+ } -+ -+ /* -+ * Apply reasonableness checks to count fields -+ */ -+ if (dlthis->dfile_hdr.df_strtab_size > MAX_REASONABLE_STRINGTAB) { -+ dload_error(dlthis, "Excessive string table size " FMT_UI32, -+ dlthis->dfile_hdr.df_strtab_size); -+ return; -+ } -+ if (dlthis->dfile_hdr.df_no_scns > MAX_REASONABLE_SECTIONS) { -+ dload_error(dlthis, "Excessive section count 0x%x", -+ dlthis->dfile_hdr.df_no_scns); -+ return; -+ } -+#ifndef TARGET_ENDIANNESS -+ /* -+ * Check that endianness does not disagree with explicit specification -+ */ -+ if ((dlthis->dfile_hdr.df_flags >> ALIGN_COFF_ENDIANNESS) & -+ dlthis->myoptions & ENDIANNESS_MASK) { -+ dload_error(dlthis, -+ "Input endianness disagrees with specified option"); -+ return; -+ } -+ dlthis->big_e_target = dlthis->dfile_hdr.df_flags & DF_BIG; -+#endif -+ -+} /* dload_headers */ -+ -+/* COFF Section Processing -+ * -+ * COFF sections are read in and retained intact. Each record is embedded -+ * in a new structure that records the updated load and -+ * run addresses of the section */ -+ -+static const char SECN_ERRID[] = { "section" }; -+ -+/************************************************************************* -+ * Procedure dload_sections -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Loads the section records into an internal table. -+ ************************************************************************/ -+void -+dload_sections(struct dload_state *dlthis) -+{ -+ s16 siz; -+ struct doff_scnhdr_t *shp; -+ unsigned nsecs = dlthis->dfile_hdr.df_no_scns; -+ -+ /* allocate space for the DOFF section records */ -+ siz = nsecs * sizeof(struct doff_scnhdr_t); -+ shp = (struct doff_scnhdr_t *)dlthis->mysym->Allocate(dlthis->mysym, -+ siz); -+ if (!shp) { /* not enough storage */ -+ DL_ERROR(E_ALLOC, siz); -+ return; -+ } -+ dlthis->sect_hdrs = shp; -+ -+ /* read in the section records */ -+ if (dlthis->strm->read_buffer(dlthis->strm, shp, siz) != siz) { -+ DL_ERROR(E_READSTRM, SECN_ERRID); -+ return; -+ } -+ -+ /* if we need to fix up byte order, do it now */ -+ if (dlthis->reorder_map) -+ dload_reorder(shp, siz, dlthis->reorder_map); -+ -+ /* check for validity */ -+ if (~dload_checksum(dlthis->sect_hdrs, siz) != -+ dlthis->verify.dv_scn_rec_checksum) { -+ DL_ERROR(E_CHECKSUM, SECN_ERRID); -+ return; -+ } -+ -+} /* dload_sections */ -+ -+/***************************************************************************** -+ * Procedure allocate_sections -+ * -+ * Parameters: -+ * alloc target memory allocator class -+ * -+ * Effect: -+ * Assigns new (target) addresses for sections -+ *****************************************************************************/ -+static void allocate_sections(struct dload_state *dlthis) -+{ -+ u16 curr_sect, nsecs, siz; -+ struct doff_scnhdr_t *shp; -+ struct LDR_SECTION_INFO *asecs; -+ struct my_handle *hndl; -+ nsecs = dlthis->dfile_hdr.df_no_scns; -+ if (!nsecs) -+ return; -+ if ((dlthis->myalloc == NULL) && -+ (dlthis->dfile_hdr.df_target_scns > 0)) { -+ DL_ERROR("Arg 3 (alloc) required but NULL", 0); -+ return; -+ } -+ /* allocate space for the module handle, which we will -+ * keep for unload purposes */ -+ siz = dlthis->dfile_hdr.df_target_scns * -+ sizeof(struct LDR_SECTION_INFO) + MY_HANDLE_SIZE; -+ hndl = (struct my_handle *)dlthis->mysym->Allocate(dlthis->mysym, siz); -+ if (!hndl) { /* not enough storage */ -+ DL_ERROR(E_ALLOC, siz); -+ return; -+ } -+ /* initialize the handle header */ -+ hndl->dm.hnext = hndl->dm.hprev = hndl; /* circular list */ -+ hndl->dm.hroot = NULL; -+ hndl->dm.dbthis = 0; -+ dlthis->myhandle = hndl; /* save away for return */ -+ /* pointer to the section list of allocated sections */ -+ dlthis->ldr_sections = asecs = hndl->secns; -+ /* * Insert names into all sections, make copies of -+ the sections we allocate */ -+ shp = dlthis->sect_hdrs; -+ for (curr_sect = 0; curr_sect < nsecs; curr_sect++) { -+ u32 soffset = shp->ds_offset; -+#if BITS_PER_AU <= BITS_PER_BYTE -+ /* attempt to insert the name of this section */ -+ if (soffset < dlthis->dfile_hdr.df_strtab_size) -+ DOFFSEC_IS_LDRSEC(shp)->name = dlthis->str_head + -+ soffset; -+ else { -+ dload_error(dlthis, "Bad name offset in section %d", -+ curr_sect); -+ DOFFSEC_IS_LDRSEC(shp)->name = NULL; -+ } -+#endif -+ /* allocate target storage for sections that require it */ -+ if (DS_NEEDS_ALLOCATION(shp)) { -+ *asecs = *DOFFSEC_IS_LDRSEC(shp); -+ asecs->context = 0; /* zero the context field */ -+#if BITS_PER_AU > BITS_PER_BYTE -+ asecs->name = unpack_name(dlthis, soffset); -+ dlthis->debug_string_size = soffset + dlthis->temp_len; -+#else -+ dlthis->debug_string_size = soffset; -+#endif -+ if (dlthis->myalloc != NULL) { -+ if (!dlthis->myalloc->Allocate(dlthis->myalloc, asecs, -+ DS_ALIGNMENT(asecs->type))) { -+ dload_error(dlthis, E_TGTALLOC, asecs->name, -+ asecs->size); -+ return; -+ } -+ } -+ /* keep address deltas in original section table */ -+ shp->ds_vaddr = asecs->load_addr - shp->ds_vaddr; -+ shp->ds_paddr = asecs->run_addr - shp->ds_paddr; -+ dlthis->allocated_secn_count += 1; -+ } /* allocate target storage */ -+ shp += 1; -+ asecs += 1; -+ } -+#if BITS_PER_AU <= BITS_PER_BYTE -+ dlthis->debug_string_size += -+ strlen(dlthis->str_head + dlthis->debug_string_size) + 1; -+#endif -+} /* allocate sections */ -+ -+/************************************************************************* -+ * Procedure section_table_free -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Frees any state used by the symbol table. -+ * -+ * WARNING: -+ * This routine is not allowed to declare errors! -+ ************************************************************************/ -+static void section_table_free(struct dload_state *dlthis) -+{ -+ struct doff_scnhdr_t *shp; -+ -+ shp = dlthis->sect_hdrs; -+ if (shp) -+ dlthis->mysym->Deallocate(dlthis->mysym, shp); -+ -+} /* section_table_free */ -+ -+/************************************************************************* -+ * Procedure dload_strings -+ * -+ * Parameters: -+ * sec_names_only If true only read in the "section names" -+ * portion of the string table -+ * -+ * Effect: -+ * Loads the DOFF string table into memory. DOFF keeps all strings in a -+ * big unsorted array. We just read that array into memory in bulk. -+ ************************************************************************/ -+static const char S_STRINGTBL[] = { "string table" }; -+void dload_strings(struct dload_state *dlthis, boolean sec_names_only) -+{ -+ u32 ssiz; -+ char *strbuf; -+ -+ if (sec_names_only) { -+ ssiz = BYTE_TO_HOST(DOFF_ALIGN -+ (dlthis->dfile_hdr.df_scn_name_size)); -+ } else { -+ ssiz = BYTE_TO_HOST(DOFF_ALIGN -+ (dlthis->dfile_hdr.df_strtab_size)); -+ } -+ if (ssiz == 0) -+ return; -+ -+ /* get some memory for the string table */ -+#if BITS_PER_AU > BITS_PER_BYTE -+ strbuf = (char *)dlthis->mysym->Allocate(dlthis->mysym, ssiz + -+ dlthis->dfile_hdr.df_max_str_len); -+#else -+ strbuf = (char *)dlthis->mysym->Allocate(dlthis->mysym, ssiz); -+#endif -+ if (strbuf == NULL) { -+ DL_ERROR(E_ALLOC, ssiz); -+ return; -+ } -+ dlthis->str_head = strbuf; -+#if BITS_PER_AU > BITS_PER_BYTE -+ dlthis->str_temp = strbuf + ssiz; -+#endif -+ /* read in the strings and verify them */ -+ if ((unsigned)(dlthis->strm->read_buffer(dlthis->strm, strbuf, -+ ssiz)) != ssiz) { -+ DL_ERROR(E_READSTRM, S_STRINGTBL); -+ } -+ /* if we need to fix up byte order, do it now */ -+#ifndef _BIG_ENDIAN -+ if (dlthis->reorder_map) -+ dload_reorder(strbuf, ssiz, dlthis->reorder_map); -+ -+ if ((!sec_names_only) && (~dload_checksum(strbuf, ssiz) != -+ dlthis->verify.dv_str_tab_checksum)) { -+ DL_ERROR(E_CHECKSUM, S_STRINGTBL); -+ } -+#else -+ if (dlthis->dfile_hdr.df_byte_reshuffle != -+ HOST_BYTE_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) { -+ /* put strings in big-endian order, not in PC order */ -+ dload_reorder(strbuf, ssiz, HOST_BYTE_ORDER(dlthis->dfile_hdr. -+ df_byte_reshuffle)); -+ } -+ if ((!sec_names_only) && (~dload_reverse_checksum(strbuf, ssiz) != -+ dlthis->verify.dv_str_tab_checksum)) { -+ DL_ERROR(E_CHECKSUM, S_STRINGTBL); -+ } -+#endif -+} /* dload_strings */ -+ -+/************************************************************************* -+ * Procedure string_table_free -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Frees any state used by the string table. -+ * -+ * WARNING: -+ * This routine is not allowed to declare errors! -+ *************************************************************************/ -+static void string_table_free(struct dload_state *dlthis) -+{ -+ if (dlthis->str_head) -+ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->str_head); -+ -+} /* string_table_free */ -+ -+/* -+ * Symbol Table Maintenance Functions -+ * -+ * COFF symbols are read by dload_symbols(), which is called after -+ * sections have been allocated. Symbols which might be used in -+ * relocation (ie, not debug info) are retained in an internal temporary -+ * compressed table (type Local_Symbol). A particular symbol is recovered -+ * by index by calling dload_find_symbol(). dload_find_symbol -+ * reconstructs a more explicit representation (type SLOTVEC) which is -+ * used by reloc.c -+ */ -+/* real size of debug header */ -+#define DBG_HDR_SIZE (sizeof(struct dll_module) - sizeof(struct dll_sect)) -+ -+static const char SYM_ERRID[] = { "symbol" }; -+ -+/************************************************************************** -+ * Procedure dload_symbols -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Reads in symbols and retains ones that might be needed for relocation -+ * purposes. -+ ************************************************************************/ -+/* size of symbol buffer no bigger than target data buffer, to limit stack -+ * usage*/ -+#define MY_SYM_BUF_SIZ (BYTE_TO_HOST(IMAGE_PACKET_SIZE)/\ -+ sizeof(struct doff_syment_t)) -+ -+static void dload_symbols(struct dload_state *dlthis) -+{ -+ u32 s_count, siz, dsiz, symbols_left; -+ u32 checks; -+ struct Local_Symbol *sp; -+ struct dynload_symbol *symp; -+ struct dynload_symbol *newsym; -+ -+ s_count = dlthis->dfile_hdr.df_no_syms; -+ if (s_count == 0) -+ return; -+ -+ /* We keep a local symbol table for all of the symbols in the input. -+ * This table contains only section & value info, as we do not have -+ * to do any name processing for locals. We reuse this storage -+ * as a temporary for .dllview record construction. -+ * Allocate storage for the whole table.*/ -+ siz = s_count * sizeof(struct Local_Symbol); -+ dsiz = DBG_HDR_SIZE + -+ (sizeof(struct dll_sect) * dlthis->allocated_secn_count) + -+ BYTE_TO_HOST_ROUND(dlthis->debug_string_size + 1); -+ if (dsiz > siz) -+ siz = dsiz; /* larger of symbols and .dllview temp */ -+ sp = (struct Local_Symbol *)dlthis->mysym->Allocate(dlthis->mysym, siz); -+ if (!sp) { -+ DL_ERROR(E_ALLOC, siz); -+ return; -+ } -+ dlthis->local_symtab = sp; -+ /* Read the symbols in the input, store them in the table, and post any -+ * globals to the global symbol table. In the process, externals -+ become defined from the global symbol table */ -+ checks = dlthis->verify.dv_sym_tab_checksum; -+ symbols_left = s_count; -+ do { /* read all symbols */ -+ char *sname; -+ u32 val; -+ s32 delta; -+ struct doff_syment_t *input_sym; -+ unsigned syms_in_buf; -+ struct doff_syment_t my_sym_buf[MY_SYM_BUF_SIZ]; -+ input_sym = my_sym_buf; -+ syms_in_buf = symbols_left > MY_SYM_BUF_SIZ ? -+ MY_SYM_BUF_SIZ : symbols_left; -+ siz = syms_in_buf * sizeof(struct doff_syment_t); -+ if (dlthis->strm->read_buffer(dlthis->strm, input_sym, siz) != -+ siz) { -+ DL_ERROR(E_READSTRM, SYM_ERRID); -+ return; -+ } -+ if (dlthis->reorder_map) -+ dload_reorder(input_sym, siz, dlthis->reorder_map); -+ -+ checks += dload_checksum(input_sym, siz); -+ do { /* process symbols in buffer */ -+ symbols_left -= 1; -+ /* attempt to derive the name of this symbol */ -+ sname = NULL; -+ if (input_sym->dn_offset > 0) { -+#if BITS_PER_AU <= BITS_PER_BYTE -+ if ((u32) input_sym->dn_offset < -+ dlthis->dfile_hdr.df_strtab_size) -+ sname = dlthis->str_head + -+ BYTE_TO_HOST(input_sym->dn_offset); -+ else -+ dload_error(dlthis, -+ "Bad name offset in symbol %d", -+ symbols_left); -+#else -+ sname = unpack_name(dlthis, -+ input_sym->dn_offset); -+#endif -+ } -+ val = input_sym->dn_value; -+ delta = 0; -+ sp->sclass = input_sym->dn_sclass; -+ sp->secnn = input_sym->dn_scnum; -+ /* if this is an undefined symbol, -+ * define it (or fail) now */ -+ if (sp->secnn == DN_UNDEF) { -+ /* pointless for static undefined */ -+ if (input_sym->dn_sclass != DN_EXT) -+ goto loop_cont; -+ -+ /* try to define symbol from previously -+ * loaded images */ -+ symp = dlthis->mysym->Find_Matching_Symbol -+ (dlthis->mysym, sname); -+ if (!symp) { -+ DL_ERROR -+ ("Undefined external symbol %s", -+ sname); -+ goto loop_cont; -+ } -+ val = delta = symp->value; -+ goto loop_cont; -+ } -+ /* symbol defined by this module */ -+ if (sp->secnn > 0) { /* symbol references a section */ -+ if ((unsigned)sp->secnn <= -+ dlthis->allocated_secn_count) { -+ /* section was allocated */ -+ struct doff_scnhdr_t *srefp = -+ &dlthis->sect_hdrs -+ [sp->secnn - 1]; -+ -+ if (input_sym->dn_sclass == -+ DN_STATLAB || -+ input_sym->dn_sclass == DN_EXTLAB){ -+ /* load */ -+ delta = srefp->ds_vaddr; -+ } else { -+ /* run */ -+ delta = srefp->ds_paddr; -+ } -+ val += delta; -+ } -+ goto loop_itr; -+ } -+ /* This symbol is an absolute symbol */ -+ if (sp->secnn == DN_ABS && ((sp->sclass == DN_EXT) || -+ (sp->sclass == DN_EXTLAB))) { -+ symp = dlthis->mysym->Find_Matching_Symbol -+ (dlthis->mysym, sname); -+ if (!symp) -+ goto loop_itr; -+ /* This absolute symbol is already defined. */ -+ if (symp->value == input_sym->dn_value) { -+ /* If symbol values are equal, continue -+ * but don't add to the global symbol -+ * table */ -+ sp->value = val; -+ sp->delta = delta; -+ sp += 1; -+ input_sym += 1; -+ continue; -+ } else { -+ /* If symbol values are not equal, -+ * return with redefinition error */ -+ DL_ERROR("Absolute symbol %s is " -+ "defined multiple times with " -+ "different values", sname); -+ return; -+ } -+ } -+loop_itr: -+ /* if this is a global symbol, post it to the -+ * global table */ -+ if (input_sym->dn_sclass == DN_EXT || -+ input_sym->dn_sclass == DN_EXTLAB) { -+ /* Keep this global symbol for subsequent -+ * modules. Don't complain on error, to allow -+ * symbol API to suppress global symbols */ -+ if (!sname) -+ goto loop_cont; -+ -+ newsym = dlthis->mysym->Add_To_Symbol_Table -+ (dlthis->mysym, sname, -+ (unsigned)dlthis->myhandle); -+ if (newsym) -+ newsym->value = val; -+ -+ } /* global */ -+loop_cont: -+ sp->value = val; -+ sp->delta = delta; -+ sp += 1; -+ input_sym += 1; -+ } while ((syms_in_buf -= 1) > 0); /* process sym in buffer */ -+ } while (symbols_left > 0); /* read all symbols */ -+ if (~checks) -+ dload_error(dlthis, "Checksum of symbols failed"); -+ -+} /* dload_symbols */ -+ -+/***************************************************************************** -+ * Procedure symbol_table_free -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Frees any state used by the symbol table. -+ * -+ * WARNING: -+ * This routine is not allowed to declare errors! -+ *****************************************************************************/ -+static void symbol_table_free(struct dload_state *dlthis) -+{ -+ if (dlthis->local_symtab) { -+ if (dlthis->dload_errcount) { /* blow off our symbols */ -+ dlthis->mysym->Purge_Symbol_Table(dlthis->mysym, -+ (unsigned)dlthis->myhandle); -+ } -+ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->local_symtab); -+ } -+} /* symbol_table_free */ -+ -+/* .cinit Processing -+ * -+ * The dynamic loader does .cinit interpretation. cload_cinit() -+ * acts as a special write-to-target function, in that it takes relocated -+ * data from the normal data flow, and interprets it as .cinit actions. -+ * Because the normal data flow does not necessarily process the whole -+ * .cinit section in one buffer, cload_cinit() must be prepared to -+ * interpret the data piecemeal. A state machine is used for this -+ * purpose. -+ */ -+ -+/* The following are only for use by reloc.c and things it calls */ -+static const struct LDR_SECTION_INFO CINIT_INFO_INIT = { CINITNAME, 0, 0, -+ (LDR_ADDR) -1, 0, DLOAD_BSS, 0 }; -+ -+/************************************************************************* -+ * Procedure cload_cinit -+ * -+ * Parameters: -+ * ipacket Pointer to data packet to be loaded -+ * -+ * Effect: -+ * Interprets the data in the buffer as .cinit data, and performs the -+ * appropriate initializations. -+ ************************************************************************/ -+static void cload_cinit(struct dload_state *dlthis, -+ struct image_packet_t *ipacket) -+{ -+#if TDATA_TO_HOST(CINIT_COUNT)*BITS_PER_AU > 16 -+ s32 init_count, left; -+#else -+ s16 init_count, left; -+#endif -+ unsigned char *pktp = ipacket->i_bits; -+ unsigned char *pktend = pktp + -+ BYTE_TO_HOST_ROUND(ipacket->i_packet_size); -+ int temp; -+ LDR_ADDR atmp; -+ struct LDR_SECTION_INFO cinit_info; -+ -+ /* PROCESS ALL THE INITIALIZATION RECORDS IN THE BUFFER. */ -+ while (true) { -+ left = pktend - pktp; -+ switch (dlthis->cinit_state) { -+ case CI_count: /* count field */ -+ if (left < TDATA_TO_HOST(CINIT_COUNT)) -+ goto loopexit; -+ temp = dload_unpack(dlthis, (TgtAU_t *)pktp, -+ CINIT_COUNT * TDATA_AU_BITS, 0, -+ ROP_SGN); -+ pktp += TDATA_TO_HOST(CINIT_COUNT); -+ /* negative signifies BSS table, zero means done */ -+ if (temp <= 0) { -+ dlthis->cinit_state = CI_done; -+ break; -+ } -+ dlthis->cinit_count = temp; -+ dlthis->cinit_state = CI_address; -+ break; -+#if CINIT_ALIGN < CINIT_ADDRESS -+ case CI_partaddress: -+ pktp -= TDATA_TO_HOST(CINIT_ALIGN); -+ /* back up pointer into space courtesy of caller */ -+ *(uint16_t *)pktp = dlthis->cinit_addr; -+ /* stuff in saved bits !! FALL THRU !! */ -+#endif -+ case CI_address: /* Address field for a copy packet */ -+ if (left < TDATA_TO_HOST(CINIT_ADDRESS)) { -+#if CINIT_ALIGN < CINIT_ADDRESS -+ if (left == TDATA_TO_HOST(CINIT_ALIGN)) { -+ /* address broken into halves */ -+ dlthis->cinit_addr = *(uint16_t *)pktp; -+ /* remember 1st half */ -+ dlthis->cinit_state = CI_partaddress; -+ left = 0; -+ } -+#endif -+ goto loopexit; -+ } -+ atmp = dload_unpack(dlthis, (TgtAU_t *)pktp, -+ CINIT_ADDRESS * TDATA_AU_BITS, 0, -+ ROP_UNS); -+ pktp += TDATA_TO_HOST(CINIT_ADDRESS); -+#if CINIT_PAGE_BITS > 0 -+ dlthis->cinit_page = atmp & -+ ((1 << CINIT_PAGE_BITS) - 1); -+ atmp >>= CINIT_PAGE_BITS; -+#else -+ dlthis->cinit_page = CINIT_DEFAULT_PAGE; -+#endif -+ dlthis->cinit_addr = atmp; -+ dlthis->cinit_state = CI_copy; -+ break; -+ case CI_copy: /* copy bits to the target */ -+ init_count = HOST_TO_TDATA(left); -+ if (init_count > dlthis->cinit_count) -+ init_count = dlthis->cinit_count; -+ if (init_count == 0) -+ goto loopexit; /* get more bits */ -+ cinit_info = CINIT_INFO_INIT; -+ cinit_info.page = dlthis->cinit_page; -+ if (!dlthis->myio->writemem(dlthis->myio, pktp, -+ TDATA_TO_TADDR(dlthis->cinit_addr), -+ &cinit_info, -+ TDATA_TO_HOST(init_count))) { -+ dload_error(dlthis, E_INITFAIL, "write", -+ dlthis->cinit_addr); -+ } -+ dlthis->cinit_count -= init_count; -+ if (dlthis->cinit_count <= 0) { -+ dlthis->cinit_state = CI_count; -+ init_count = (init_count + CINIT_ALIGN - 1) & -+ -CINIT_ALIGN; -+ /* align to next init */ -+ } -+ pktp += TDATA_TO_HOST(init_count); -+ dlthis->cinit_addr += init_count; -+ break; -+ case CI_done: /* no more .cinit to do */ -+ return; -+ } /* switch (cinit_state) */ -+ } /* while */ -+ -+loopexit: -+ if (left > 0) { -+ dload_error(dlthis, "%d bytes left over in cinit packet", left); -+ dlthis->cinit_state = CI_done; /* left over bytes are bad */ -+ } -+} /* cload_cinit */ -+ -+/* Functions to interface to reloc.c -+ * -+ * reloc.c is the relocation module borrowed from the linker, with -+ * minimal (we hope) changes for our purposes. cload_sect_data() invokes -+ * this module on a section to relocate and load the image data for that -+ * section. The actual read and write actions are supplied by the global -+ * routines below. -+ */ -+ -+/************************************************************************ -+ * Procedure relocate_packet -+ * -+ * Parameters: -+ * ipacket Pointer to an image packet to relocate -+ * -+ * Effect: -+ * Performs the required relocations on the packet. Returns a checksum -+ * of the relocation operations. -+ ************************************************************************/ -+#define MY_RELOC_BUF_SIZ 8 -+/* careful! exists at the same time as the image buffer*/ -+static int relocate_packet(struct dload_state *dlthis, -+ struct image_packet_t *ipacket, u32 *checks) -+{ -+ u32 rnum; -+ -+ rnum = ipacket->i_num_relocs; -+ do { /* all relocs */ -+ unsigned rinbuf; -+ int siz; -+ struct reloc_record_t *rp, rrec[MY_RELOC_BUF_SIZ]; -+ rp = rrec; -+ rinbuf = rnum > MY_RELOC_BUF_SIZ ? MY_RELOC_BUF_SIZ : rnum; -+ siz = rinbuf * sizeof(struct reloc_record_t); -+ if (dlthis->strm->read_buffer(dlthis->strm, rp, siz) != siz) { -+ DL_ERROR(E_READSTRM, "relocation"); -+ return 0; -+ } -+ /* reorder the bytes if need be */ -+ if (dlthis->reorder_map) -+ dload_reorder(rp, siz, dlthis->reorder_map); -+ -+ *checks += dload_checksum(rp, siz); -+ do { -+ /* perform the relocation operation */ -+ dload_relocate(dlthis, (TgtAU_t *) ipacket->i_bits, rp); -+ rp += 1; -+ rnum -= 1; -+ } while ((rinbuf -= 1) > 0); -+ } while (rnum > 0); /* all relocs */ -+ return 1; -+} /* dload_read_reloc */ -+ -+#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32)) -+ -+/* VERY dangerous */ -+static const char IMAGEPAK[] = { "image packet" }; -+ -+/************************************************************************* -+ * Procedure dload_data -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Read image data from input file, relocate it, and download it to the -+ * target. -+ ************************************************************************/ -+static void dload_data(struct dload_state *dlthis) -+{ -+ u16 curr_sect; -+ struct doff_scnhdr_t *sptr = dlthis->sect_hdrs; -+ struct LDR_SECTION_INFO *lptr = dlthis->ldr_sections; -+#ifdef OPT_ZERO_COPY_LOADER -+ boolean bZeroCopy = false; -+#endif -+ u8 *pDest; -+ -+ struct { -+ struct image_packet_t ipacket; -+ u8 bufr[BYTE_TO_HOST(IMAGE_PACKET_SIZE)]; -+ } ibuf; -+ -+ /* Indicates whether CINIT processing has occurred */ -+ boolean cinit_processed = false; -+ -+ /* Loop through the sections and load them one at a time. -+ */ -+ for (curr_sect = 0; curr_sect < dlthis->dfile_hdr.df_no_scns; -+ curr_sect += 1) { -+ if (DS_NEEDS_DOWNLOAD(sptr)) { -+ s32 nip; -+ LDR_ADDR image_offset = 0; -+ /* set relocation info for this section */ -+ if (curr_sect < dlthis->allocated_secn_count) -+ dlthis->delta_runaddr = sptr->ds_paddr; -+ else { -+ lptr = DOFFSEC_IS_LDRSEC(sptr); -+ dlthis->delta_runaddr = 0; -+ } -+ dlthis->image_secn = lptr; -+#if BITS_PER_AU > BITS_PER_BYTE -+ lptr->name = unpack_name(dlthis, sptr->ds_offset); -+#endif -+ nip = sptr->ds_nipacks; -+ while ((nip -= 1) >= 0) { /* process packets */ -+ -+ s32 ipsize; -+ u32 checks; -+ /* get the fixed header bits */ -+ if (dlthis->strm->read_buffer(dlthis->strm, -+ &ibuf.ipacket, IPH_SIZE) != IPH_SIZE) { -+ DL_ERROR(E_READSTRM, IMAGEPAK); -+ return; -+ } -+ /* reorder the header if need be */ -+ if (dlthis->reorder_map) { -+ dload_reorder(&ibuf.ipacket, IPH_SIZE, -+ dlthis->reorder_map); -+ } -+ /* now read the rest of the packet */ -+ ipsize = -+ BYTE_TO_HOST(DOFF_ALIGN -+ (ibuf.ipacket.i_packet_size)); -+ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) { -+ DL_ERROR("Bad image packet size %d", -+ ipsize); -+ return; -+ } -+ pDest = ibuf.bufr; -+#ifdef OPT_ZERO_COPY_LOADER -+ bZeroCopy = false; -+ if (DLOAD_SECT_TYPE(sptr) != DLOAD_CINIT) { -+ dlthis->myio->writemem(dlthis->myio, -+ &pDest, lptr->load_addr + -+ image_offset, lptr, 0); -+ bZeroCopy = (pDest != ibuf.bufr); -+ } -+#endif -+ /* End of determination */ -+ -+ if (dlthis->strm->read_buffer(dlthis->strm, -+ ibuf.bufr, ipsize) != ipsize) { -+ DL_ERROR(E_READSTRM, IMAGEPAK); -+ return; -+ } -+ ibuf.ipacket.i_bits = pDest; -+ -+ /* reorder the bytes if need be */ -+#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16) -+ if (dlthis->reorder_map) { -+ dload_reorder(pDest, ipsize, -+ dlthis->reorder_map); -+ } -+ checks = dload_checksum(pDest, ipsize); -+#else -+ if (dlthis->dfile_hdr.df_byte_reshuffle != -+ TARGET_ORDER(REORDER_MAP -+ (BYTE_RESHUFFLE_VALUE))) { -+ /* put image bytes in big-endian order, -+ * not PC order */ -+ dload_reorder(pDest, ipsize, -+ TARGET_ORDER -+ (dlthis->dfile_hdr.df_byte_reshuffle)); -+ } -+#if TARGET_AU_BITS > 8 -+ checks = dload_reverse_checksum_16(pDest, -+ ipsize); -+#else -+ checks = dload_reverse_checksum(pDest, -+ ipsize); -+#endif -+#endif -+ -+ checks += dload_checksum(&ibuf.ipacket, -+ IPH_SIZE); -+ /* relocate the image bits as needed */ -+ if (ibuf.ipacket.i_num_relocs) { -+ dlthis->image_offset = image_offset; -+ if (!relocate_packet(dlthis, -+ &ibuf.ipacket, &checks)) -+ return; /* serious error */ -+ } -+ if (~checks) -+ DL_ERROR(E_CHECKSUM, IMAGEPAK); -+ /* stuff the result into target memory */ -+ if (DLOAD_SECT_TYPE(sptr) == DLOAD_CINIT) { -+ cload_cinit(dlthis, &ibuf.ipacket); -+ cinit_processed = true; -+ } else { -+#ifdef OPT_ZERO_COPY_LOADER -+ if (!bZeroCopy) { -+#endif -+ -+ if (!dlthis->myio->writemem -+ (dlthis->myio, ibuf.bufr, -+ lptr->load_addr + image_offset, lptr, -+ BYTE_TO_HOST -+ (ibuf.ipacket.i_packet_size))) { -+ DL_ERROR( -+ "Write to " FMT_UI32 " failed", -+ lptr->load_addr + image_offset); -+ } -+#ifdef OPT_ZERO_COPY_LOADER -+ } -+#endif -+ -+ } -+ image_offset += -+ BYTE_TO_TADDR(ibuf.ipacket.i_packet_size); -+ } /* process packets */ -+ /* if this is a BSS section, we may want to fill it */ -+ if (DLOAD_SECT_TYPE(sptr) != DLOAD_BSS) -+ goto loop_cont; -+ -+ if (!(dlthis->myoptions & DLOAD_INITBSS)) -+ goto loop_cont; -+ -+ if (cinit_processed) { -+ /* Don't clear BSS after load-time -+ * initialization */ -+ DL_ERROR -+ ("Zero-initialization at " FMT_UI32 " after " -+ "load-time initialization!", lptr->load_addr); -+ goto loop_cont; -+ } -+ /* fill the .bss area */ -+ dlthis->myio->fillmem(dlthis->myio, -+ TADDR_TO_HOST(lptr->load_addr), -+ lptr, TADDR_TO_HOST(lptr->size), -+ dload_fill_bss); -+ goto loop_cont; -+ } /* if DS_DOWNLOAD_MASK */ -+ /* If not loading, but BSS, zero initialize */ -+ if (DLOAD_SECT_TYPE(sptr) != DLOAD_BSS) -+ goto loop_cont; -+ -+ if (!(dlthis->myoptions & DLOAD_INITBSS)) -+ goto loop_cont; -+ -+ if (curr_sect >= dlthis->allocated_secn_count) -+ lptr = DOFFSEC_IS_LDRSEC(sptr); -+ -+ if (cinit_processed) { -+ /*Don't clear BSS after load-time initialization */ -+ DL_ERROR( -+ "Zero-initialization at " FMT_UI32 " attempted after " -+ "load-time initialization!", lptr->load_addr); -+ goto loop_cont; -+ } -+ /* fill the .bss area */ -+ dlthis->myio->fillmem(dlthis->myio, -+ TADDR_TO_HOST(lptr->load_addr), lptr, -+ TADDR_TO_HOST(lptr->size), dload_fill_bss); -+loop_cont: -+ sptr += 1; -+ lptr += 1; -+ } /* load sections */ -+} /* dload_data */ -+ -+/************************************************************************* -+ * Procedure dload_reorder -+ * -+ * Parameters: -+ * data 32-bit aligned pointer to data to be byte-swapped -+ * dsiz size of the data to be reordered in sizeof() units. -+ * map 32-bit map defining how to reorder the data. Value -+ * must be REORDER_MAP() of some permutation -+ * of 0x00 01 02 03 -+ * -+ * Effect: -+ * Re-arranges the bytes in each word according to the map specified. -+ * -+ ************************************************************************/ -+/* mask for byte shift count */ -+#define SHIFT_COUNT_MASK (3 << LOG_BITS_PER_BYTE) -+ -+void dload_reorder(void *data, int dsiz, unsigned int map) -+{ -+ register u32 tmp, tmap, datv; -+ u32 *dp = (u32 *)data; -+ -+ map <<= LOG_BITS_PER_BYTE; /* align map with SHIFT_COUNT_MASK */ -+ do { -+ tmp = 0; -+ datv = *dp; -+ tmap = map; -+ do { -+ tmp |= (datv & BYTE_MASK) << (tmap & SHIFT_COUNT_MASK); -+ tmap >>= BITS_PER_BYTE; -+ } while (datv >>= BITS_PER_BYTE); -+ *dp++ = tmp; -+ } while ((dsiz -= sizeof(u32)) > 0); -+} /* dload_reorder */ -+ -+/************************************************************************* -+ * Procedure dload_checksum -+ * -+ * Parameters: -+ * data 32-bit aligned pointer to data to be checksummed -+ * siz size of the data to be checksummed in sizeof() units. -+ * -+ * Effect: -+ * Returns a checksum of the specified block -+ * -+ ************************************************************************/ -+u32 dload_checksum(void *data, unsigned siz) -+{ -+ u32 sum; -+ u32 *dp; -+ int left; -+ -+ sum = 0; -+ dp = (u32 *)data; -+ for (left = siz; left > 0; left -= sizeof(u32)) -+ sum += *dp++; -+ return sum; -+} /* dload_checksum */ -+ -+#if HOST_ENDIANNESS -+/************************************************************************* -+ * Procedure dload_reverse_checksum -+ * -+ * Parameters: -+ * data 32-bit aligned pointer to data to be checksummed -+ * siz size of the data to be checksummed in sizeof() units. -+ * -+ * Effect: -+ * Returns a checksum of the specified block, which is assumed to be bytes -+ * in big-endian order. -+ * -+ * Notes: -+ * In a big-endian host, things like the string table are stored as bytes -+ * in host order. But dllcreate always checksums in little-endian order. -+ * It is most efficient to just handle the difference a word at a time. -+ * -+ ***********************************************************************/ -+u32 dload_reverse_checksum(void *data, unsigned siz) -+{ -+ u32 sum, temp; -+ u32 *dp; -+ int left; -+ -+ sum = 0; -+ dp = (u32 *)data; -+ -+ for (left = siz; left > 0; left -= sizeof(u32)) { -+ temp = *dp++; -+ sum += temp << BITS_PER_BYTE * 3; -+ sum += temp >> BITS_PER_BYTE * 3; -+ sum += (temp >> BITS_PER_BYTE) & (BYTE_MASK << BITS_PER_BYTE); -+ sum += (temp & (BYTE_MASK << BITS_PER_BYTE)) << BITS_PER_BYTE; -+ } -+ -+ return sum; -+} /* dload_reverse_checksum */ -+ -+#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32) -+u32 dload_reverse_checksum_16(void *data, unsigned siz) -+{ -+ uint_fast32_t sum, temp; -+ u32 *dp; -+ int left; -+ -+ sum = 0; -+ dp = (u32 *)data; -+ -+ for (left = siz; left > 0; left -= sizeof(u32)) { -+ temp = *dp++; -+ sum += temp << BITS_PER_BYTE * 2; -+ sum += temp >> BITS_PER_BYTE * 2; -+ } -+ -+ return sum; -+} /* dload_reverse_checksum_16 */ -+#endif -+#endif -+ -+/************************************************************************* -+ * Procedure swap_words -+ * -+ * Parameters: -+ * data 32-bit aligned pointer to data to be swapped -+ * siz size of the data to be swapped. -+ * bitmap Bit map of how to swap each 32-bit word; 1 => 2 shorts, -+ * 0 => 1 long -+ * -+ * Effect: -+ * Swaps the specified data according to the specified map -+ * -+ ************************************************************************/ -+static void swap_words(void *data, unsigned siz, unsigned bitmap) -+{ -+ register int i; -+#if TARGET_AU_BITS < 16 -+ register u16 *sp; -+#endif -+ register u32 *lp; -+ -+ siz /= sizeof(u16); -+ -+#if TARGET_AU_BITS < 16 -+ /* pass 1: do all the bytes */ -+ i = siz; -+ sp = (u16 *) data; -+ do { -+ register u16 tmp; -+ tmp = *sp; -+ *sp++ = SWAP16BY8(tmp); -+ } while ((i -= 1) > 0); -+#endif -+ -+#if TARGET_AU_BITS < 32 -+ /* pass 2: fixup the 32-bit words */ -+ i = siz >> 1; -+ lp = (u32 *) data; -+ do { -+ if ((bitmap & 1) == 0) { -+ register u32 tmp; -+ tmp = *lp; -+ *lp = SWAP32BY16(tmp); -+ } -+ lp += 1; -+ bitmap >>= 1; -+ } while ((i -= 1) > 0); -+#endif -+} /* swap_words */ -+ -+/************************************************************************* -+ * Procedure copy_tgt_strings -+ * -+ * Parameters: -+ * dstp Destination address. Assumed to be 32-bit aligned -+ * srcp Source address. Assumed to be 32-bit aligned -+ * charcount Number of characters to copy. -+ * -+ * Effect: -+ * Copies strings from the source (which is in usual .dof file order on -+ * the loading processor) to the destination buffer (which should be in proper -+ * target addressable unit order). Makes sure the last string in the -+ * buffer is NULL terminated (for safety). -+ * Returns the first unused destination address. -+ ************************************************************************/ -+static char *copy_tgt_strings(void *dstp, void *srcp, unsigned charcount) -+{ -+ register TgtAU_t *src = (TgtAU_t *)srcp; -+ register TgtAU_t *dst = (TgtAU_t *)dstp; -+ register int cnt = charcount; -+ do { -+#if TARGET_AU_BITS <= BITS_PER_AU -+ /* byte-swapping issues may exist for strings on target */ -+ *dst++ = *src++; -+#elif TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN) -+ register TgtAU_t tmp; -+ tmp = *src++; -+ *dst++ = SWAP16BY8(tmp); /* right for TARGET_AU_BITS == 16 */ -+#else -+ *dst++ = *src++; -+#endif -+ } while ((cnt -= (sizeof(TgtAU_t) * BITS_PER_AU / BITS_PER_BYTE)) > 0); -+ /*apply force to make sure that the string table has null terminator */ -+#if (BITS_PER_AU == BITS_PER_BYTE) && (TARGET_AU_BITS == BITS_PER_BYTE) -+ dst[-1] = 0; -+#elif TARGET_BIG_ENDIAN -+ dst[-1] &= ~BYTE_MASK; /* big-endian */ -+#else -+ dst[-1] &= (1 << (BITS_PER_AU - BITS_PER_BYTE)) - 1; /* little endian */ -+#endif -+ return (char *)dst; -+} /* copy_tgt_strings */ -+ -+/************************************************************************* -+ * Procedure init_module_handle -+ * -+ * Parameters: -+ * none -+ * -+ * Effect: -+ * Initializes the module handle we use to enable unloading, and installs -+ * the debug information required by the target. -+ * -+ * Notes: -+ * The handle returned from Dynamic_Load_Module needs to encapsulate all the -+ * allocations done for the module, and enable them plus the modules symbols to -+ * be deallocated. -+ * -+ ************************************************************************/ -+#ifndef _BIG_ENDIAN -+static const struct LDR_SECTION_INFO DLLVIEW_INFO_INIT = { ".dllview", 0, 0, -+ (LDR_ADDR) -1, DBG_LIST_PAGE, DLOAD_DATA, 0 }; -+#else -+static const struct LDR_SECTION_INFO DLLVIEW_INFO_INIT = { ".dllview", 0, 0, -+ (LDR_ADDR) -1, DLOAD_DATA, DBG_LIST_PAGE, 0 }; -+#endif -+static void init_module_handle(struct dload_state *dlthis) -+{ -+ struct my_handle *hndl; -+ u16 curr_sect; -+ struct LDR_SECTION_INFO *asecs; -+ struct dll_module *dbmod; -+ struct dll_sect *dbsec; -+ struct dbg_mirror_root *mlist; -+ register char *cp; -+ struct modules_header mhdr; -+ struct LDR_SECTION_INFO dllview_info; -+ struct dynload_symbol *debug_mirror_sym; -+ hndl = dlthis->myhandle; -+ if (!hndl) -+ return; /* must be errors detected, so forget it */ -+ hndl->secn_count = dlthis->allocated_secn_count << 1; -+#ifndef TARGET_ENDIANNESS -+ if (dlthis->big_e_target) -+ hndl->secn_count += 1; /* flag for big-endian */ -+#endif -+ if (dlthis->dload_errcount) -+ return; /* abandon if errors detected */ -+ /* Locate the symbol that names the header for the CCS debug list -+ of modules. If not found, we just don't generate the debug record. -+ If found, we create our modules list. We make sure to create the -+ LOADER_DLLVIEW_ROOT even if there is no relocation info to record, -+ just to try to put both symbols in the same symbol table and -+ module.*/ -+ debug_mirror_sym = dlthis->mysym->Find_Matching_Symbol(dlthis->mysym, -+ LOADER_DLLVIEW_ROOT); -+ if (!debug_mirror_sym) { -+ struct dynload_symbol *dlmodsym; -+ struct dbg_mirror_root *mlst; -+ -+ /* our root symbol is not yet present; -+ check if we have DLModules defined */ -+ dlmodsym = dlthis->mysym->Find_Matching_Symbol(dlthis->mysym, -+ LINKER_MODULES_HEADER); -+ if (!dlmodsym) -+ return; /* no DLModules list so no debug info */ -+ /* if we have DLModules defined, construct our header */ -+ mlst = (struct dbg_mirror_root *) -+ dlthis->mysym->Allocate(dlthis->mysym, -+ sizeof(struct dbg_mirror_root)); -+ if (!mlst) { -+ DL_ERROR(E_ALLOC, sizeof(struct dbg_mirror_root)); -+ return; -+ } -+ mlst->hnext = NULL; -+ mlst->changes = 0; -+ mlst->refcount = 0; -+ mlst->dbthis = TDATA_TO_TADDR(dlmodsym->value); -+ /* add our root symbol */ -+ debug_mirror_sym = dlthis->mysym->Add_To_Symbol_Table -+ (dlthis->mysym, LOADER_DLLVIEW_ROOT, -+ (unsigned)dlthis->myhandle); -+ if (!debug_mirror_sym) { -+ /* failed, recover memory */ -+ dlthis->mysym->Deallocate(dlthis->mysym, mlst); -+ return; -+ } -+ debug_mirror_sym->value = (u32)mlst; -+ } -+ /* First create the DLLview record and stuff it into the buffer. -+ Then write it to the DSP. Record pertinent locations in our hndl, -+ and add it to the per-processor list of handles with debug info.*/ -+#ifndef DEBUG_HEADER_IN_LOADER -+ mlist = (struct dbg_mirror_root *)debug_mirror_sym->value; -+ if (!mlist) -+ return; -+#else -+ mlist = (struct dbg_mirror_root *)&debug_list_header; -+#endif -+ hndl->dm.hroot = mlist; /* set pointer to root into our handle */ -+ if (!dlthis->allocated_secn_count) -+ return; /* no load addresses to be recorded */ -+ /* reuse temporary symbol storage */ -+ dbmod = (struct dll_module *) dlthis->local_symtab; -+ /* Create the DLLview record in the memory we retain for our handle*/ -+ dbmod->num_sects = dlthis->allocated_secn_count; -+ dbmod->timestamp = dlthis->verify.dv_timdat; -+ dbmod->version = INIT_VERSION; -+ dbmod->verification = VERIFICATION; -+ asecs = dlthis->ldr_sections; -+ dbsec = dbmod->sects; -+ for (curr_sect = dlthis->allocated_secn_count; -+ curr_sect > 0; curr_sect -= 1) { -+ dbsec->sect_load_adr = asecs->load_addr; -+ dbsec->sect_run_adr = asecs->run_addr; -+ dbsec += 1; -+ asecs += 1; -+ } -+ /* now cram in the names */ -+ cp = copy_tgt_strings(dbsec, dlthis->str_head, -+ dlthis->debug_string_size); -+ -+ /* round off the size of the debug record, and remember same */ -+ hndl->dm.dbsiz = HOST_TO_TDATA_ROUND(cp - (char *)dbmod); -+ *cp = 0; /* strictly to make our test harness happy */ -+ dllview_info = DLLVIEW_INFO_INIT; -+ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz); -+ /* Initialize memory context to default heap */ -+ dllview_info.context = 0; -+ hndl->dm.context = 0; -+ /* fill in next pointer and size */ -+ if (mlist->hnext) { -+ dbmod->next_module = TADDR_TO_TDATA(mlist->hnext->dm.dbthis); -+ dbmod->next_module_size = mlist->hnext->dm.dbsiz; -+ } else { -+ dbmod->next_module_size = 0; -+ dbmod->next_module = 0; -+ } -+ /* allocate memory for on-DSP DLLview debug record */ -+ if (!dlthis->myalloc) -+ return; -+ if (!dlthis->myalloc->Allocate(dlthis->myalloc, &dllview_info, -+ HOST_TO_TADDR(sizeof(u32)))) { -+ return; -+ } -+ /* Store load address of .dllview section */ -+ hndl->dm.dbthis = dllview_info.load_addr; -+ /* Store memory context (segid) in which .dllview section -+ * was allocated */ -+ hndl->dm.context = dllview_info.context; -+ mlist->refcount += 1; -+ /* swap bytes in the entire debug record, but not the string table */ -+ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) { -+ swap_words(dbmod, (char *)dbsec - (char *)dbmod, -+ DLL_MODULE_BITMAP); -+ } -+ /* Update the DLLview list on the DSP write new record */ -+ if (!dlthis->myio->writemem(dlthis->myio, dbmod, -+ dllview_info.load_addr, &dllview_info, -+ TADDR_TO_HOST(dllview_info.size))) { -+ return; -+ } -+ /* write new header */ -+ mhdr.first_module_size = hndl->dm.dbsiz; -+ mhdr.first_module = TADDR_TO_TDATA(dllview_info.load_addr); -+ /* swap bytes in the module header, if needed */ -+ if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) { -+ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16), -+ MODULES_HEADER_BITMAP); -+ } -+ dllview_info = DLLVIEW_INFO_INIT; -+ if (!dlthis->myio->writemem(dlthis->myio, &mhdr, mlist->dbthis, -+ &dllview_info, sizeof(struct modules_header) - -+ sizeof(u16))) { -+ return; -+ } -+ /* Add the module handle to this processor's list -+ of handles with debug info */ -+ hndl->dm.hnext = mlist->hnext; -+ if (hndl->dm.hnext) -+ hndl->dm.hnext->dm.hprev = hndl; -+ hndl->dm.hprev = (struct my_handle *) mlist; -+ mlist->hnext = hndl; /* insert after root*/ -+} /* init_module_handle */ -+ -+/************************************************************************* -+ * Procedure Dynamic_Unload_Module -+ * -+ * Parameters: -+ * mhandle A module handle from Dynamic_Load_Module -+ * syms Host-side symbol table and malloc/free functions -+ * alloc Target-side memory allocation -+ * -+ * Effect: -+ * The module specified by mhandle is unloaded. Unloading causes all -+ * target memory to be deallocated, all symbols defined by the module to -+ * be purged, and any host-side storage used by the dynamic loader for -+ * this module to be released. -+ * -+ * Returns: -+ * Zero for success. On error, the number of errors detected is returned. -+ * Individual errors are reported using syms->Error_Report(). -+ ************************************************************************/ -+int Dynamic_Unload_Module(DLOAD_mhandle mhandle, -+ struct Dynamic_Loader_Sym *syms, -+ struct Dynamic_Loader_Allocate *alloc, -+ struct Dynamic_Loader_Initialize *init) -+{ -+ s16 curr_sect; -+ struct LDR_SECTION_INFO *asecs; -+ struct my_handle *hndl; -+ struct dbg_mirror_root *root; -+ unsigned errcount = 0; -+ struct LDR_SECTION_INFO dllview_info = DLLVIEW_INFO_INIT; -+ struct modules_header mhdr; -+ -+ hndl = (struct my_handle *)mhandle; -+ if (!hndl) -+ return 0; /* if handle is null, nothing to do */ -+ /* Clear out the module symbols -+ * Note that if this is the module that defined MODULES_HEADER -+ (the head of the target debug list) -+ * then this operation will blow away that symbol. -+ It will therefore be impossible for subsequent -+ * operations to add entries to this un-referenceable list.*/ -+ if (!syms) -+ return 1; -+ syms->Purge_Symbol_Table(syms, (unsigned) hndl); -+ /* Deallocate target memory for sections */ -+ asecs = hndl->secns; -+ if (alloc) -+ for (curr_sect = (hndl->secn_count >> 1); curr_sect > 0; -+ curr_sect -= 1) { -+ asecs->name = NULL; -+ alloc->Deallocate(alloc, asecs++); -+ } -+ root = hndl->dm.hroot; -+ if (!root) { -+ /* there is a debug list containing this module */ -+ goto func_end; -+ } -+ if (!hndl->dm.dbthis) { /* target-side dllview record exists */ -+ goto loop_end; -+ } -+ /* Retrieve memory context in which .dllview was allocated */ -+ dllview_info.context = hndl->dm.context; -+ if (hndl->dm.hprev == hndl) -+ goto exitunltgt; -+ -+ /* target-side dllview record is in list */ -+ /* dequeue this record from our GPP-side mirror list */ -+ hndl->dm.hprev->dm.hnext = hndl->dm.hnext; -+ if (hndl->dm.hnext) -+ hndl->dm.hnext->dm.hprev = hndl->dm.hprev; -+ /* Update next_module of previous entry in target list -+ * We are using mhdr here as a surrogate for either a -+ struct modules_header or a dll_module */ -+ if (hndl->dm.hnext) { -+ mhdr.first_module = TADDR_TO_TDATA(hndl->dm.hnext->dm.dbthis); -+ mhdr.first_module_size = hndl->dm.hnext->dm.dbsiz; -+ } else { -+ mhdr.first_module = 0; -+ mhdr.first_module_size = 0; -+ } -+ if (!init) -+ goto exitunltgt; -+ -+ if (!init->connect(init)) { -+ dload_syms_error(syms, E_ICONNECT); -+ errcount += 1; -+ goto exitunltgt; -+ } -+ /* swap bytes in the module header, if needed */ -+ if (TARGET_ENDIANNESS_DIFFERS(hndl->secn_count & 0x1)) { -+ swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16), -+ MODULES_HEADER_BITMAP); -+ } -+ if (!init->writemem(init, &mhdr, hndl->dm.hprev->dm.dbthis, -+ &dllview_info, sizeof(struct modules_header) - -+ sizeof(mhdr.update_flag))) { -+ dload_syms_error(syms, E_DLVWRITE); -+ errcount += 1; -+ } -+ /* update change counter */ -+ root->changes += 1; -+ if (!init->writemem(init, &(root->changes), -+ root->dbthis + HOST_TO_TADDR -+ (sizeof(mhdr.first_module) + -+ sizeof(mhdr.first_module_size)), -+ &dllview_info, -+ sizeof(mhdr.update_flag))) { -+ dload_syms_error(syms, E_DLVWRITE); -+ errcount += 1; -+ } -+ init->release(init); -+exitunltgt: -+ /* release target storage */ -+ dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz); -+ dllview_info.load_addr = hndl->dm.dbthis; -+ if (alloc) -+ alloc->Deallocate(alloc, &dllview_info); -+ root->refcount -= 1; -+ /* target-side dllview record exists */ -+loop_end: -+#ifndef DEBUG_HEADER_IN_LOADER -+ if (root->refcount <= 0) { -+ /* if all references gone, blow off the header */ -+ /* our root symbol may be gone due to the Purge above, -+ but if not, do not destroy the root */ -+ if (syms->Find_Matching_Symbol -+ (syms, LOADER_DLLVIEW_ROOT) == NULL) -+ syms->Deallocate(syms, root); -+ } -+#endif -+func_end: -+ /* there is a debug list containing this module */ -+ syms->Deallocate(syms, mhandle); /* release our storage */ -+ return errcount; -+} /* Dynamic_Unload_Module */ -+ -+#if BITS_PER_AU > BITS_PER_BYTE -+/************************************************************************* -+ * Procedure unpack_name -+ * -+ * Parameters: -+ * soffset Byte offset into the string table -+ * -+ * Effect: -+ * Returns a pointer to the string specified by the offset supplied, or -+ * NULL for error. -+ * -+ ************************************************************************/ -+static char *unpack_name(struct dload_state *dlthis, u32 soffset) -+{ -+ u8 tmp, *src; -+ char *dst; -+ -+ if (soffset >= dlthis->dfile_hdr.df_strtab_size) { -+ dload_error(dlthis, "Bad string table offset " FMT_UI32, -+ soffset); -+ return NULL; -+ } -+ src = (uint_least8_t *)dlthis->str_head + -+ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)); -+ dst = dlthis->str_temp; -+ if (soffset & 1) -+ *dst++ = *src++; /* only 1 character in first word */ -+ do { -+ tmp = *src++; -+ *dst = (tmp >> BITS_PER_BYTE); -+ if (!(*dst++)) -+ break; -+ } while ((*dst++ = tmp & BYTE_MASK)); -+ dlthis->temp_len = dst - dlthis->str_temp; -+ /* squirrel away length including terminating null */ -+ return dlthis->str_temp; -+} /* unpack_name */ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dlclasses_hdr.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/dlclasses_hdr.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dlclasses_hdr.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/dlclasses_hdr.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,41 @@ -+/* -+ * dlclasses_hdr.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+ -+#ifndef _DLCLASSES_HDR_H -+#define _DLCLASSES_HDR_H -+ -+/***************************************************************************** -+ ***************************************************************************** -+ * -+ * DLCLASSES_HDR.H -+ * -+ * Sample classes in support of the dynamic loader -+ * -+ * These are just concrete derivations of the virtual ones in dynamic_loader.h -+ * with a few additional interfaces for init, etc. -+ ***************************************************************************** -+ *****************************************************************************/ -+ -+#include -+ -+#include "DLstream.h" -+#include "DLsymtab.h" -+#include "DLalloc.h" -+#include "DLinit.h" -+ -+#endif /* _DLCLASSES_HDR_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dload_internal.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/dload_internal.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/dload_internal.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/dload_internal.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,237 @@ -+/* -+ * dload_internal.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+ -+#ifndef __DLOAD_INTERNAL__ -+#define __DLOAD_INTERNAL__ -+ -+#include -+ -+/* -+ * Internal state definitions for the dynamic loader -+ */ -+ -+#define TRUE 1 -+#define FALSE 0 -+typedef int boolean; -+ -+ -+/* type used for relocation intermediate results */ -+typedef s32 RVALUE; -+ -+/* unsigned version of same; must have at least as many bits */ -+typedef u32 URVALUE; -+ -+/* -+ * Dynamic loader configuration constants -+ */ -+/* error issued if input has more sections than this limit */ -+#define REASONABLE_SECTION_LIMIT 100 -+ -+/* (Addressable unit) value used to clear BSS section */ -+#define dload_fill_bss 0 -+ -+/* -+ * Reorder maps explained (?) -+ * -+ * The doff file format defines a 32-bit pattern used to determine the -+ * byte order of an image being read. That value is -+ * BYTE_RESHUFFLE_VALUE == 0x00010203 -+ * For purposes of the reorder routine, we would rather have the all-is-OK -+ * for 32-bits pattern be 0x03020100. This first macro makes the -+ * translation from doff file header value to MAP value: */ -+#define REORDER_MAP(rawmap) ((rawmap) ^ 0x3030303) -+/* This translation is made in dload_headers. Thereafter, the all-is-OK -+ * value for the maps stored in dlthis is REORDER_MAP(BYTE_RESHUFFLE_VALUE). -+ * But sadly, not all bits of the doff file are 32-bit integers. -+ * The notable exceptions are strings and image bits. -+ * Strings obey host byte order: */ -+#if defined(_BIG_ENDIAN) -+#define HOST_BYTE_ORDER(cookedmap) ((cookedmap) ^ 0x3030303) -+#else -+#define HOST_BYTE_ORDER(cookedmap) (cookedmap) -+#endif -+/* Target bits consist of target AUs (could be bytes, or 16-bits, -+ * or 32-bits) stored as an array in host order. A target order -+ * map is defined by: */ -+#if !defined(_BIG_ENDIAN) || TARGET_AU_BITS > 16 -+#define TARGET_ORDER(cookedmap) (cookedmap) -+#elif TARGET_AU_BITS > 8 -+#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x2020202) -+#else -+#define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x3030303) -+#endif -+ -+/* forward declaration for handle returned by dynamic loader */ -+struct my_handle; -+ -+/* -+ * a list of module handles, which mirrors the debug list on the target -+ */ -+struct dbg_mirror_root { -+ /* must be same as dbg_mirror_list; __DLModules address on target */ -+ u32 dbthis; -+ struct my_handle *hnext; /* must be same as dbg_mirror_list */ -+ u16 changes; /* change counter */ -+ u16 refcount; /* number of modules referencing this root */ -+} ; -+ -+struct dbg_mirror_list { -+ u32 dbthis; -+ struct my_handle *hnext, *hprev; -+ struct dbg_mirror_root *hroot; -+ u16 dbsiz; -+ u32 context; /* Save context for .dllview memory allocation */ -+} ; -+ -+#define VARIABLE_SIZE 1 -+/* -+ * the structure we actually return as an opaque module handle -+ */ -+struct my_handle { -+ struct dbg_mirror_list dm; /* !!! must be first !!! */ -+ /* sections following << 1, LSB is set for big-endian target */ -+ u16 secn_count; -+ struct LDR_SECTION_INFO secns[VARIABLE_SIZE]; -+} ; -+#define MY_HANDLE_SIZE (sizeof(struct my_handle) -\ -+ sizeof(struct LDR_SECTION_INFO)) -+/* real size of my_handle */ -+ -+/* -+ * reduced symbol structure used for symbols during relocation -+ */ -+struct Local_Symbol { -+ s32 value; /* Relocated symbol value */ -+ s32 delta; /* Original value in input file */ -+ s16 secnn; /* section number */ -+ s16 sclass; /* symbol class */ -+} ; -+ -+/* -+ * States of the .cinit state machine -+ */ -+enum cinit_mode { -+ CI_count = 0, /* expecting a count */ -+ CI_address, /* expecting an address */ -+#if CINIT_ALIGN < CINIT_ADDRESS /* handle case of partial address field */ -+ CI_partaddress, /* have only part of the address */ -+#endif -+ CI_copy, /* in the middle of copying data */ -+ CI_done /* end of .cinit table */ -+}; -+ -+/* -+ * The internal state of the dynamic loader, which is passed around as -+ * an object -+ */ -+struct dload_state { -+ struct Dynamic_Loader_Stream *strm; /* The module input stream */ -+ struct Dynamic_Loader_Sym *mysym; /* Symbols for this session */ -+ struct Dynamic_Loader_Allocate *myalloc; /* target memory allocator */ -+ struct Dynamic_Loader_Initialize *myio; /* target memory initializer */ -+ unsigned myoptions; /* Options parameter Dynamic_Load_Module */ -+ -+ char *str_head; /* Pointer to string table */ -+#if BITS_PER_AU > BITS_PER_BYTE -+ char *str_temp; /* Pointer to temporary buffer for strings */ -+ /* big enough to hold longest string */ -+ unsigned temp_len; /* length of last temporary string */ -+ char *xstrings; /* Pointer to buffer for expanded */ -+ /* strings for sec names */ -+#endif -+ /* Total size of strings for DLLView section names */ -+ unsigned debug_string_size; -+ /* Pointer to parallel section info for allocated sections only */ -+ struct doff_scnhdr_t *sect_hdrs; /* Pointer to section table */ -+ struct LDR_SECTION_INFO *ldr_sections; -+#if TMS32060 -+ /* The address of the start of the .bss section */ -+ LDR_ADDR bss_run_base; -+#endif -+ struct Local_Symbol *local_symtab; /* Relocation symbol table */ -+ -+ /* pointer to DL section info for the section being relocated */ -+ struct LDR_SECTION_INFO *image_secn; -+ /* change in run address for current section during relocation */ -+ LDR_ADDR delta_runaddr; -+ LDR_ADDR image_offset; /* offset of current packet in section */ -+ enum cinit_mode cinit_state; /* current state of cload_cinit() */ -+ int cinit_count; /* the current count */ -+ LDR_ADDR cinit_addr; /* the current address */ -+ s16 cinit_page; /* the current page */ -+ /* Handle to be returned by Dynamic_Load_Module */ -+ struct my_handle *myhandle; -+ unsigned dload_errcount; /* Total # of errors reported so far */ -+ /* Number of target sections that require allocation and relocation */ -+ unsigned allocated_secn_count; -+#ifndef TARGET_ENDIANNESS -+ boolean big_e_target; /* Target data in big-endian format */ -+#endif -+ /* map for reordering bytes, 0 if not needed */ -+ u32 reorder_map; -+ struct doff_filehdr_t dfile_hdr; /* DOFF file header structure */ -+ struct doff_verify_rec_t verify; /* Verify record */ -+ -+ int relstkidx; /* index into relocation value stack */ -+ /* relocation value stack used in relexp.c */ -+ RVALUE relstk[STATIC_EXPR_STK_SIZE]; -+ -+} ; -+ -+#ifdef TARGET_ENDIANNESS -+#define TARGET_BIG_ENDIAN TARGET_ENDIANNESS -+#else -+#define TARGET_BIG_ENDIAN (dlthis->big_e_target) -+#endif -+ -+/* -+ * Exports from cload.c to rest of the world -+ */ -+extern void dload_error(struct dload_state *dlthis, const char *errtxt, ...); -+extern void dload_syms_error(struct Dynamic_Loader_Sym *syms, -+ const char *errtxt, ...); -+extern void dload_headers(struct dload_state *dlthis); -+extern void dload_strings(struct dload_state *dlthis, boolean sec_names_only); -+extern void dload_sections(struct dload_state *dlthis); -+extern void dload_reorder(void *data, int dsiz, u32 map); -+extern u32 dload_checksum(void *data, unsigned siz); -+ -+#if HOST_ENDIANNESS -+extern uint32_t dload_reverse_checksum(void *data, unsigned siz); -+#if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32) -+extern uint32_t dload_reverse_checksum_16(void *data, unsigned siz); -+#endif -+#endif -+ -+#define is_data_scn(zzz) (DLOAD_SECTION_TYPE((zzz)->type) != DLOAD_TEXT) -+#define is_data_scn_num(zzz) \ -+ (DLOAD_SECT_TYPE(&dlthis->sect_hdrs[(zzz)-1]) != DLOAD_TEXT) -+ -+/* -+ * exported by reloc.c -+ */ -+extern void dload_relocate(struct dload_state *dlthis, TgtAU_t *data, -+ struct reloc_record_t *rp); -+ -+extern RVALUE dload_unpack(struct dload_state *dlthis, TgtAU_t *data, -+ int fieldsz, int offset, unsigned sgn); -+ -+extern int dload_repack(struct dload_state *dlthis, RVALUE val, TgtAU_t *data, -+ int fieldsz, int offset, unsigned sgn); -+ -+#endif /* __DLOAD_INTERNAL__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/doff.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/doff.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/doff.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/doff.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,347 @@ -+/* -+ * doff.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/*****************************************************************************/ -+/* DOFF.H - Structures & definitions used for dynamically */ -+/* loaded modules file format. This format is a reformatted */ -+/* version of COFF.(see coff.h for details) It optimizes the */ -+/* layout for the dynamic loader. */ -+/* */ -+/* .dof files, when viewed as a sequence of 32-bit integers, look the same */ -+/* on big-endian and little-endian machines. */ -+/*****************************************************************************/ -+#ifndef _DOFF_H -+#define _DOFF_H -+ -+#ifndef UINT32_C -+#define UINT32_C(zzz) ((u32)zzz) -+#endif -+ -+#define BYTE_RESHUFFLE_VALUE UINT32_C(0x00010203) -+ -+/* DOFF file header containing fields categorizing the remainder of the file */ -+struct doff_filehdr_t { -+ -+ /* string table size, including filename, in bytes */ -+ u32 df_strtab_size; -+ -+ /* entry point if one exists */ -+ u32 df_entrypt; -+ -+ /* identifies byte ordering of file; -+ * always set to BYTE_RESHUFFLE_VALUE */ -+ u32 df_byte_reshuffle; -+ -+ /* Size of the string table up to and including the last section name */ -+ /* Size includes the name of the COFF file also */ -+ u32 df_scn_name_size; -+ -+#ifndef _BIG_ENDIAN -+ /* number of symbols */ -+ u16 df_no_syms; -+ -+ /* length in bytes of the longest string, including terminating NULL */ -+ /* excludes the name of the file */ -+ u16 df_max_str_len; -+ -+ /* total number of sections including no-load ones */ -+ u16 df_no_scns; -+ -+ /* number of sections containing target code allocated or downloaded */ -+ u16 df_target_scns; -+ -+ /* unique id for dll file format & version */ -+ u16 df_doff_version; -+ -+ /* identifies ISA */ -+ u16 df_target_id; -+ -+ /* useful file flags */ -+ u16 df_flags; -+ -+ /* section reference for entry point, N_UNDEF for none, */ -+ /* N_ABS for absolute address */ -+ s16 df_entry_secn; -+#else -+ /* length of the longest string, including terminating NULL */ -+ u16 df_max_str_len; -+ -+ /* number of symbols */ -+ u16 df_no_syms; -+ -+ /* number of sections containing target code allocated or downloaded */ -+ u16 df_target_scns; -+ -+ /* total number of sections including no-load ones */ -+ u16 df_no_scns; -+ -+ /* identifies ISA */ -+ u16 df_target_id; -+ -+ /* unique id for dll file format & version */ -+ u16 df_doff_version; -+ -+ /* section reference for entry point, N_UNDEF for none, */ -+ /* N_ABS for absolute address */ -+ s16 df_entry_secn; -+ -+ /* useful file flags */ -+ u16 df_flags; -+#endif -+ /* checksum for file header record */ -+ u32 df_checksum; -+ -+} ; -+ -+/* flags in the df_flags field */ -+#define DF_LITTLE 0x100 -+#define DF_BIG 0x200 -+#define DF_BYTE_ORDER (DF_LITTLE | DF_BIG) -+ -+/* Supported processors */ -+#define TMS470_ID 0x97 -+#define LEAD_ID 0x98 -+#define TMS32060_ID 0x99 -+#define LEAD3_ID 0x9c -+ -+/* Primary processor for loading */ -+#if TMS32060 -+#define TARGET_ID TMS32060_ID -+#endif -+ -+/* Verification record containing values used to test integrity of the bits */ -+struct doff_verify_rec_t { -+ -+ /* time and date stamp */ -+ u32 dv_timdat; -+ -+ /* checksum for all section records */ -+ u32 dv_scn_rec_checksum; -+ -+ /* checksum for string table */ -+ u32 dv_str_tab_checksum; -+ -+ /* checksum for symbol table */ -+ u32 dv_sym_tab_checksum; -+ -+ /* checksum for verification record */ -+ u32 dv_verify_rec_checksum; -+ -+} ; -+ -+/* String table is an array of null-terminated strings. The first entry is -+ * the filename, which is added by DLLcreate. No new structure definitions -+ * are required. -+ */ -+ -+/* Section Records including information on the corresponding image packets */ -+/* -+ * !!WARNING!! -+ * -+ * This structure is expected to match in form LDR_SECTION_INFO in -+ * dynamic_loader.h -+ */ -+ -+struct doff_scnhdr_t { -+ -+ s32 ds_offset; /* offset into string table of name */ -+ s32 ds_paddr; /* RUN address, in target AU */ -+ s32 ds_vaddr; /* LOAD address, in target AU */ -+ s32 ds_size; /* section size, in target AU */ -+#ifndef _BIG_ENDIAN -+ u16 ds_page; /* memory page id */ -+ u16 ds_flags; /* section flags */ -+#else -+ u16 ds_flags; /* section flags */ -+ u16 ds_page; /* memory page id */ -+#endif -+ u32 ds_first_pkt_offset; -+ /* Absolute byte offset into the file */ -+ /* where the first image record resides */ -+ -+ s32 ds_nipacks; /* number of image packets */ -+ -+}; -+ -+/* Symbol table entry */ -+struct doff_syment_t { -+ -+ s32 dn_offset; /* offset into string table of name */ -+ s32 dn_value; /* value of symbol */ -+#ifndef _BIG_ENDIAN -+ s16 dn_scnum; /* section number */ -+ s16 dn_sclass; /* storage class */ -+#else -+ s16 dn_sclass; /* storage class */ -+ s16 dn_scnum; /* section number, 1-based */ -+#endif -+ -+} ; -+ -+/* special values for dn_scnum */ -+#define DN_UNDEF 0 /* undefined symbol */ -+#define DN_ABS (-1) /* value of symbol is absolute */ -+/* special values for dn_sclass */ -+#define DN_EXT 2 -+#define DN_STATLAB 20 -+#define DN_EXTLAB 21 -+ -+/* Default value of image bits in packet */ -+/* Configurable by user on the command line */ -+#define IMAGE_PACKET_SIZE 1024 -+ -+/* An image packet contains a chunk of data from a section along with */ -+/* information necessary for its processing. */ -+struct image_packet_t { -+ -+ s32 i_num_relocs; /* number of relocations for */ -+ /* this packet */ -+ -+ s32 i_packet_size; /* number of bytes in array */ -+ /* "bits" occupied by */ -+ /* valid data. Could be */ -+ /* < IMAGE_PACKET_SIZE to */ -+ /* prevent splitting a */ -+ /* relocation across packets. */ -+ /* Last packet of a section */ -+ /* will most likely contain */ -+ /* < IMAGE_PACKET_SIZE bytes */ -+ /* of valid data */ -+ -+ s32 i_checksum; /* Checksum for image packet */ -+ /* and the corresponding */ -+ /* relocation records */ -+ -+ u8 *i_bits; /* Actual data in section */ -+ -+}; -+ -+/* The relocation structure definition matches the COFF version. Offsets */ -+/* however are relative to the image packet base not the section base. */ -+struct reloc_record_t { -+ -+ s32 r_vaddr; -+ -+ /* expressed in target AUs */ -+ -+ union { -+ struct { -+#ifndef _BIG_ENDIAN -+ u8 _offset; /* bit offset of rel fld */ -+ u8 _fieldsz; /* size of rel fld */ -+ u8 _wordsz; /* # bytes containing rel fld */ -+ u8 _dum1; -+ u16 _dum2; -+ u16 _type; -+#else -+ unsigned _dum1:8; -+ unsigned _wordsz:8; /* # bytes containing rel fld */ -+ unsigned _fieldsz:8; /* size of rel fld */ -+ unsigned _offset:8; /* bit offset of rel fld */ -+ u16 _type; -+ u16 _dum2; -+#endif -+ } _r_field; -+ -+ struct { -+ u32 _spc; /* image packet relative PC */ -+#ifndef _BIG_ENDIAN -+ u16 _dum; -+ u16 _type; /* relocation type */ -+#else -+ u16 _type; /* relocation type */ -+ u16 _dum; -+#endif -+ } _r_spc; -+ -+ struct { -+ u32 _uval; /* constant value */ -+#ifndef _BIG_ENDIAN -+ u16 _dum; -+ u16 _type; /* relocation type */ -+#else -+ u16 _type; /* relocation type */ -+ u16 _dum; -+#endif -+ } _r_uval; -+ -+ struct { -+ s32 _symndx; /* 32-bit sym tbl index */ -+#ifndef _BIG_ENDIAN -+ u16 _disp; /* extra addr encode data */ -+ u16 _type; /* relocation type */ -+#else -+ u16 _type; /* relocation type */ -+ u16 _disp; /* extra addr encode data */ -+#endif -+ } _r_sym; -+ } _u_reloc; -+ -+} ; -+ -+/* abbreviations for convenience */ -+#ifndef r_type -+#define r_type _u_reloc._r_sym._type -+#define r_uval _u_reloc._r_uval._uval -+#define r_symndx _u_reloc._r_sym._symndx -+#define r_offset _u_reloc._r_field._offset -+#define r_fieldsz _u_reloc._r_field._fieldsz -+#define r_wordsz _u_reloc._r_field._wordsz -+#define r_disp _u_reloc._r_sym._disp -+#endif -+ -+/*****************************************************************************/ -+/* */ -+/* Important DOFF macros used for file processing */ -+/* */ -+/*****************************************************************************/ -+ -+/* DOFF Versions */ -+#define DOFF0 0 -+ -+/* Return the address/size >= to addr that is at a 32-bit boundary */ -+/* This assumes that a byte is 8 bits */ -+#define DOFF_ALIGN(addr) (((addr) + 3) & ~UINT32_C(3)) -+ -+/*****************************************************************************/ -+/* */ -+/* The DOFF section header flags field is laid out as follows: */ -+/* */ -+/* Bits 0-3 : Section Type */ -+/* Bit 4 : Set when section requires target memory to be allocated by DL */ -+/* Bit 5 : Set when section requires downloading */ -+/* Bits 8-11: Alignment, same as COFF */ -+/* */ -+/*****************************************************************************/ -+ -+/* Enum for DOFF section types (bits 0-3 of flag): See dynamic_loader.h */ -+ -+/* Macros to help processing of sections */ -+#define DLOAD_SECT_TYPE(s_hdr) ((s_hdr)->ds_flags & 0xF) -+ -+/* DS_ALLOCATE indicates whether a section needs space on the target */ -+#define DS_ALLOCATE_MASK 0x10 -+#define DS_NEEDS_ALLOCATION(s_hdr) ((s_hdr)->ds_flags & DS_ALLOCATE_MASK) -+ -+/* DS_DOWNLOAD indicates that the loader needs to copy bits */ -+#define DS_DOWNLOAD_MASK 0x20 -+#define DS_NEEDS_DOWNLOAD(s_hdr) ((s_hdr)->ds_flags & DS_DOWNLOAD_MASK) -+ -+/* Section alignment requirement in AUs */ -+#define DS_ALIGNMENT(ds_flags) (1 << (((ds_flags) >> 8) & 0xF)) -+ -+#endif /* _DOFF_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/getsection.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/getsection.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/getsection.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/getsection.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,412 @@ -+/* -+ * getsection.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+ -+#include -+#include "header.h" -+ -+/* -+ * Error strings -+ */ -+static const char E_READSTRM[] = { "Error reading %s from input stream" }; -+static const char E_SEEK[] = { "Set file position to %d failed" }; -+static const char E_ISIZ[] = { "Bad image packet size %d" }; -+static const char E_CHECKSUM[] = { "Checksum failed on %s" }; -+static const char E_RELOC[] = { "DLOAD_GetSection unable to read" -+ "sections containing relocation entries"}; -+#if BITS_PER_AU > BITS_PER_BYTE -+static const char E_ALLOC[] = { "Syms->Allocate( %d ) failed" }; -+static const char E_STBL[] = { "Bad string table offset " FMT_UI32 }; -+#endif -+ -+/* -+ * we use the fact that DOFF section records are shaped just like -+ * LDR_SECTION_INFO to reduce our section storage usage. These macros -+ * marks the places where that assumption is made -+ */ -+#define DOFFSEC_IS_LDRSEC(pdoffsec) ((struct LDR_SECTION_INFO *)(pdoffsec)) -+#define LDRSEC_IS_DOFFSEC(ldrsec) ((struct doff_scnhdr_t *)(ldrsec)) -+ -+/***************************************************************/ -+/********************* SUPPORT FUNCTIONS ***********************/ -+/***************************************************************/ -+ -+#if BITS_PER_AU > BITS_PER_BYTE -+/************************************************************************** -+ * Procedure unpack_sec_name -+ * -+ * Parameters: -+ * dlthis Handle from DLOAD_module_open for this module -+ * soffset Byte offset into the string table -+ * dst Place to store the expanded string -+ * -+ * Effect: -+ * Stores a string from the string table into the destination, expanding -+ * it in the process. Returns a pointer just past the end of the stored -+ * string on success, or NULL on failure. -+ * -+ *************************************************************************/ -+static char *unpack_sec_name(struct dload_state *dlthis, -+ u32 soffset, char *dst) -+{ -+ u8 tmp, *src; -+ -+ if (soffset >= dlthis->dfile_hdr.df_scn_name_size) { -+ dload_error(dlthis, E_STBL, soffset); -+ return NULL; -+ } -+ src = (u8 *)dlthis->str_head + -+ (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)); -+ if (soffset & 1) -+ *dst++ = *src++; /* only 1 character in first word */ -+ do { -+ tmp = *src++; -+ *dst = (tmp >> BITS_PER_BYTE) -+ if (!(*dst++)) -+ break; -+ } while ((*dst++ = tmp & BYTE_MASK)); -+ -+ return dst; -+} -+ -+/************************************************************************** -+ * Procedure expand_sec_names -+ * -+ * Parameters: -+ * dlthis Handle from DLOAD_module_open for this module -+ * -+ * Effect: -+ * Allocates a buffer, unpacks and copies strings from string table into it. -+ * Stores a pointer to the buffer into a state variable. -+ **************************************************************************/ -+static void expand_sec_names(struct dload_state *dlthis) -+{ -+ char *xstrings, *curr, *next; -+ u32 xsize; -+ u16 sec; -+ struct LDR_SECTION_INFO *shp; -+ /* assume worst-case size requirement */ -+ xsize = dlthis->dfile_hdr.df_max_str_len * dlthis->dfile_hdr.df_no_scns; -+ xstrings = (char *)dlthis->mysym->Allocate(dlthis->mysym, xsize); -+ if (xstrings == NULL) { -+ dload_error(dlthis, E_ALLOC, xsize); -+ return; -+ } -+ dlthis->xstrings = xstrings; -+ /* For each sec, copy and expand its name */ -+ curr = xstrings; -+ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { -+ shp = DOFFSEC_IS_LDRSEC(&dlthis->sect_hdrs[sec]); -+ next = unpack_sec_name(dlthis, *(u32 *) &shp->name, curr); -+ if (next == NULL) -+ break; /* error */ -+ shp->name = curr; -+ curr = next; -+ } -+} -+ -+#endif -+ -+/***************************************************************/ -+/********************* EXPORTED FUNCTIONS **********************/ -+/***************************************************************/ -+ -+/************************************************************************** -+ * Procedure DLOAD_module_open -+ * -+ * Parameters: -+ * module The input stream that supplies the module image -+ * syms Host-side malloc/free and error reporting functions. -+ * Other methods are unused. -+ * -+ * Effect: -+ * Reads header information from a dynamic loader module using the -+ specified -+ * stream object, and returns a handle for the module information. This -+ * handle may be used in subsequent query calls to obtain information -+ * contained in the module. -+ * -+ * Returns: -+ * NULL if an error is encountered, otherwise a module handle for use -+ * in subsequent operations. -+ **************************************************************************/ -+DLOAD_module_info DLOAD_module_open(struct Dynamic_Loader_Stream *module, -+ struct Dynamic_Loader_Sym *syms) -+{ -+ struct dload_state *dlthis; /* internal state for this call */ -+ unsigned *dp, sz; -+ u32 sec_start; -+#if BITS_PER_AU <= BITS_PER_BYTE -+ u16 sec; -+#endif -+ -+ /* Check that mandatory arguments are present */ -+ if (!module || !syms) { -+ if (syms != NULL) -+ dload_syms_error(syms, "Required parameter is NULL"); -+ -+ return NULL; -+ } -+ -+ dlthis = (struct dload_state *) -+ syms->Allocate(syms, sizeof(struct dload_state)); -+ if (!dlthis) { -+ /* not enough storage */ -+ dload_syms_error(syms, "Can't allocate module info"); -+ return NULL; -+ } -+ -+ /* clear our internal state */ -+ dp = (unsigned *)dlthis; -+ for (sz = sizeof(struct dload_state) / sizeof(unsigned); -+ sz > 0; sz -= 1) -+ *dp++ = 0; -+ -+ dlthis->strm = module; -+ dlthis->mysym = syms; -+ -+ /* read in the doff image and store in our state variable */ -+ dload_headers(dlthis); -+ -+ if (!dlthis->dload_errcount) -+ dload_strings(dlthis, true); -+ -+ /* skip ahead past the unread portion of the string table */ -+ sec_start = sizeof(struct doff_filehdr_t) + -+ sizeof(struct doff_verify_rec_t) + -+ BYTE_TO_HOST(DOFF_ALIGN(dlthis->dfile_hdr.df_strtab_size)); -+ -+ if (dlthis->strm->set_file_posn(dlthis->strm, sec_start) != 0) { -+ dload_error(dlthis, E_SEEK, sec_start); -+ return NULL; -+ } -+ -+ if (!dlthis->dload_errcount) -+ dload_sections(dlthis); -+ -+ if (dlthis->dload_errcount) { -+ DLOAD_module_close(dlthis); /* errors, blow off our state */ -+ dlthis = NULL; -+ return NULL; -+ } -+#if BITS_PER_AU > BITS_PER_BYTE -+ /* Expand all section names from the string table into the */ -+ /* state variable, and convert section names from a relative */ -+ /* string table offset to a pointers to the expanded string. */ -+ expand_sec_names(dlthis); -+#else -+ /* Convert section names from a relative string table offset */ -+ /* to a pointer into the string table. */ -+ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { -+ struct LDR_SECTION_INFO *shp = -+ DOFFSEC_IS_LDRSEC(&dlthis->sect_hdrs[sec]); -+ shp->name = dlthis->str_head + *(u32 *)&shp->name; -+ } -+#endif -+ -+ return dlthis; -+} -+ -+/*************************************************************************** -+ * Procedure DLOAD_GetSectionInfo -+ * -+ * Parameters: -+ * minfo Handle from DLOAD_module_open for this module -+ * sectionName Pointer to the string name of the section desired -+ * sectionInfo Address of a section info structure pointer to be -+ * initialized -+ * -+ * Effect: -+ * Finds the specified section in the module information, and initializes -+ * the provided struct LDR_SECTION_INFO pointer. -+ * -+ * Returns: -+ * true for success, false for section not found -+ **************************************************************************/ -+int DLOAD_GetSectionInfo(DLOAD_module_info minfo, const char *sectionName, -+ const struct LDR_SECTION_INFO **const sectionInfo) -+{ -+ struct dload_state *dlthis; -+ struct LDR_SECTION_INFO *shp; -+ u16 sec; -+ -+ dlthis = (struct dload_state *)minfo; -+ if (!dlthis) -+ return false; -+ -+ for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { -+ shp = DOFFSEC_IS_LDRSEC(&dlthis->sect_hdrs[sec]); -+ if (strcmp(sectionName, shp->name) == 0) { -+ *sectionInfo = shp; -+ return true; -+ } -+ } -+ -+ return false; -+} -+ -+#define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32)) -+#define REVERSE_REORDER_MAP(rawmap) ((rawmap) ^ 0x3030303) -+ -+/************************************************************************** -+ * Procedure DLOAD_GetSection -+ * -+ * Parameters: -+ * minfo Handle from DLOAD_module_open for this module -+ * sectionInfo Pointer to a section info structure for the desired -+ * section -+ * sectionData Buffer to contain the section initialized data -+ * -+ * Effect: -+ * Copies the initialized data for the specified section into the -+ * supplied buffer. -+ * -+ * Returns: -+ * true for success, false for section not found -+ **************************************************************************/ -+int DLOAD_GetSection(DLOAD_module_info minfo, -+ const struct LDR_SECTION_INFO *sectionInfo, void *sectionData) -+{ -+ struct dload_state *dlthis; -+ u32 pos; -+ struct doff_scnhdr_t *sptr = NULL; -+ s32 nip; -+ struct image_packet_t ipacket; -+ s32 ipsize; -+ u32 checks; -+ s8 *dest = (s8 *)sectionData; -+ -+ dlthis = (struct dload_state *)minfo; -+ if (!dlthis) -+ return false; -+ sptr = LDRSEC_IS_DOFFSEC(sectionInfo); -+ if (sptr == NULL) -+ return false; -+ -+ /* skip ahead to the start of the first packet */ -+ pos = BYTE_TO_HOST(DOFF_ALIGN((u32) sptr->ds_first_pkt_offset)); -+ if (dlthis->strm->set_file_posn(dlthis->strm, pos) != 0) { -+ dload_error(dlthis, E_SEEK, pos); -+ return false; -+ } -+ -+ nip = sptr->ds_nipacks; -+ while ((nip -= 1) >= 0) { /* for each packet */ -+ /* get the fixed header bits */ -+ if (dlthis->strm-> -+ read_buffer(dlthis->strm, &ipacket, IPH_SIZE) != IPH_SIZE) { -+ dload_error(dlthis, E_READSTRM, "image packet"); -+ return false; -+ } -+ /* reorder the header if need be */ -+ if (dlthis->reorder_map) -+ dload_reorder(&ipacket, IPH_SIZE, dlthis->reorder_map); -+ -+ /* Now read the packet image bits. Note: round the size up to -+ * the next multiple of 4 bytes; this is what checksum -+ * routines want. */ -+ ipsize = BYTE_TO_HOST(DOFF_ALIGN(ipacket.i_packet_size)); -+ if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) { -+ dload_error(dlthis, E_ISIZ, ipsize); -+ return false; -+ } -+ if (dlthis->strm->read_buffer -+ (dlthis->strm, dest, ipsize) != ipsize) { -+ dload_error(dlthis, E_READSTRM, "image packet"); -+ return false; -+ } -+ /* reorder the bytes if need be */ -+#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16) -+ if (dlthis->reorder_map) -+ dload_reorder(dest, ipsize, dlthis->reorder_map); -+ -+ checks = dload_checksum(dest, ipsize); -+#else -+ if (dlthis->dfile_hdr.df_byte_reshuffle != -+ TARGET_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) { -+ /* put image bytes in big-endian order, not PC order */ -+ dload_reorder(dest, ipsize, -+ TARGET_ORDER(dlthis->dfile_hdr. -+ df_byte_reshuffle)); -+ } -+#if TARGET_AU_BITS > 8 -+ checks = dload_reverse_checksum_16(dest, ipsize); -+#else -+ checks = dload_reverse_checksum(dest, ipsize); -+#endif -+#endif -+ checks += dload_checksum(&ipacket, IPH_SIZE); -+ -+ /* NYI: unable to handle relocation entries here. Reloc -+ * entries referring to fields that span the packet boundaries -+ * may result in packets of sizes that are not multiple of -+ * 4 bytes. Our checksum implementation works on 32-bit words -+ * only. */ -+ if (ipacket.i_num_relocs != 0) { -+ dload_error(dlthis, E_RELOC, ipsize); -+ return false; -+ } -+ -+ if (~checks) { -+ dload_error(dlthis, E_CHECKSUM, "image packet"); -+ return false; -+ } -+ -+ /*Advance destination ptr by the size of the just-read packet*/ -+ dest += ipsize; -+ } -+ -+ return true; -+} -+ -+/*************************************************************************** -+ * Procedure DLOAD_module_close -+ * -+ * Parameters: -+ * minfo Handle from DLOAD_module_open for this module -+ * -+ * Effect: -+ * Releases any storage associated with the module handle. On return, -+ * the module handle is invalid. -+ * -+ * Returns: -+ * Zero for success. On error, the number of errors detected is returned. -+ * Individual errors are reported using syms->Error_Report(), where syms was -+ * an argument to DLOAD_module_open -+ **************************************************************************/ -+void DLOAD_module_close(DLOAD_module_info minfo) -+{ -+ struct dload_state *dlthis; -+ -+ dlthis = (struct dload_state *)minfo; -+ if (!dlthis) -+ return; -+ -+ if (dlthis->str_head) -+ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->str_head); -+ -+ if (dlthis->sect_hdrs) -+ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->sect_hdrs); -+ -+#if BITS_PER_AU > BITS_PER_BYTE -+ if (dlthis->xstrings) -+ dlthis->mysym->Deallocate(dlthis->mysym, dlthis->xstrings); -+ -+#endif -+ -+ dlthis->mysym->Deallocate(dlthis->mysym, dlthis); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/header.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/header.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/header.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/header.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,59 @@ -+/* -+ * header.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+ -+#define TRUE 1 -+#define FALSE 0 -+#ifndef NULL -+#define NULL 0 -+#endif -+ -+#include -+#define DL_STRCMP strcmp -+ -+/* maximum parenthesis nesting in relocation stack expressions */ -+#define STATIC_EXPR_STK_SIZE 10 -+ -+#include -+typedef unsigned int uint_least32_t; -+typedef unsigned short int uint_least16_t; -+ -+#include "doff.h" -+#include -+#include "params.h" -+#include "dload_internal.h" -+#include "reloc_table.h" -+ -+/* -+ * Plausibility limits -+ * -+ * These limits are imposed upon the input DOFF file as a check for validity. -+ * They are hard limits, in that the load will fail if they are exceeded. -+ * The numbers selected are arbitrary, in that the loader implementation does -+ * not require these limits. -+ */ -+ -+/* maximum number of bytes in string table */ -+#define MAX_REASONABLE_STRINGTAB (0x100000) -+/* maximum number of code,data,etc. sections */ -+#define MAX_REASONABLE_SECTIONS (200) -+/* maximum number of linker symbols */ -+#define MAX_REASONABLE_SYMBOLS (100000) -+ -+/* shift count to align F_BIG with DLOAD_LITTLE */ -+#define ALIGN_COFF_ENDIANNESS 7 -+#define ENDIANNESS_MASK (DF_BYTE_ORDER >> ALIGN_COFF_ENDIANNESS) -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/module_list.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/module_list.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/module_list.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/module_list.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,161 @@ -+/* -+ * dspbridge/mpu_driver/src/dynload/module_list.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2008 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/*============================================================================ -+ Filename: module_list.h -+ -+ Copyright (C) 2002 Texas Instruments Incorporated -+ -+ -+ This C header file gives the layout of the data structure created by the -+ dynamic loader to describe the set of modules loaded into the DSP. -+ -+ Linked List Structure: -+ ---------------------- -+ The data structure defined here is a singly-linked list. The list -+ represents the set of modules which are currently loaded in the DSP memory. -+ The first entry in the list is a header record which contains a flag -+ representing the state of the list. The rest of the entries in the list -+ are module records. -+ -+ Global symbol _DLModules designates the first record in the list (i.e. the -+ header record). This symbol must be defined in any program that wishes to -+ use DLLview plug-in. -+ -+ String Representation: -+ ---------------------- -+ The string names of the module and its sections are stored in a block of -+ memory which follows the module record itself. The strings are ordered: -+ module name first, followed by section names in order from the first -+ section to the last. String names are tightly packed arrays of 8-bit -+ characters (two characters per 16-bit word on the C55x). Strings are -+ zero-byte-terminated. -+ -+ Creating and updating the list: -+------------------------------- -+ Upon loading a new module into the DSP memory the dynamic loader inserts a -+new module record as the first module record in the list. The fields of -+ this module record are initialized to reflect the properties of the module. -+ The dynamic loader does NOT increment the flag/counter in the list's header -+ record. -+ -+ Upon unloading a module from the DSP memory the dynamic loader removes the -+module's record from this list. The dynamic loader also increments the -+ flag/counter in the list's header record to indicate that the list has been -+ changed. -+ -+============================================================================*/ -+ -+#ifndef _MODULE_LIST_H_ -+#define _MODULE_LIST_H_ -+ -+#include -+ -+/* Global pointer to the modules_header structure*/ -+#define MODULES_HEADER "_DLModules" -+#define MODULES_HEADER_NO_UNDERSCORE "DLModules" -+ -+/* Initial version number*/ -+#define INIT_VERSION 1 -+ -+/* Verification number -- to be recorded in each module record */ -+#define VERIFICATION 0x79 -+ -+/* forward declarations */ -+struct dll_module; -+struct dll_sect; -+ -+/* the first entry in the list is the modules_header record; -+ * its address is contained in the global _DLModules pointer */ -+struct modules_header { -+ -+ /* Address of the first dll_module record in the list or NULL. -+ Note: for C55x this is a word address (C55x data is word-addressable)*/ -+ u32 first_module; -+ -+ /* Combined storage size (in target addressable units) of the -+ * dll_module record which follows this header record, or zero -+ * if the list is empty. This size includes the module's string table. -+ * Note: for C55x the unit is a 16-bit word */ -+ u16 first_module_size; -+ -+ /* Counter is incremented whenever a module record is removed from -+ * the list */ -+ u16 update_flag; -+ -+} ; -+ -+/* for each 32-bits in above structure, a bitmap, LSB first, whose bits are: -+ * 0 => a 32-bit value, 1 => 2 16-bit values */ -+#define MODULES_HEADER_BITMAP 0x2 /* swapping bitmap for type modules_header */ -+ -+/* information recorded about each section in a module */ -+struct dll_sect { -+ -+ /* Load-time address of the section. -+ * Note: for C55x this is a byte address for program sections, and -+ * a word address for data sections. C55x program memory is -+ * byte-addressable, while data memory is word-addressable. */ -+ u32 sect_load_adr; -+ -+ /* Run-time address of the section. -+ * Note 1: for C55x this is a byte address for program sections, and -+ * a word address for data sections. -+ * Note 2: for C55x two most significant bits of this field indicate -+ * the section type: '00' for a code section, '11' for a data section -+ * (C55 addresses are really only 24-bits wide). */ -+ u32 sect_run_adr; -+ -+} ; -+ -+/* the rest of the entries in the list are module records */ -+struct dll_module { -+ -+ /* Address of the next dll_module record in the list, or 0 if this is -+ * the last record in the list. -+ * Note: for C55x this is a word address (C55x data is -+ * word-addressable) */ -+ u32 next_module; -+ -+ /* Combined storage size (in target addressable units) of the -+ * dll_module record which follows this one, or zero if this is the -+ * last record in the list. This size includes the module's string -+ * table. -+ * Note: for C55x the unit is a 16-bit word. */ -+ u16 next_module_size; -+ -+ /* version number of the tooling; set to INIT_VERSION for Phase 1 */ -+ u16 version; -+ -+ /* the verification word; set to VERIFICATION */ -+ u16 verification; -+ -+ /* Number of sections in the sects array */ -+ u16 num_sects; -+ -+ /* Module's "unique" id; copy of the timestamp from the host -+ * COFF file */ -+ u32 timestamp; -+ -+ /* Array of num_sects elements of the module's section records */ -+ struct dll_sect sects[1]; -+} ; -+ -+/* for each 32 bits in above structure, a bitmap, LSB first, whose bits are: -+ * 0 => a 32-bit value, 1 => 2 16-bit values */ -+#define DLL_MODULE_BITMAP 0x6 /* swapping bitmap for type dll_module */ -+ -+#endif /* _MODULE_LIST_H_ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/params.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/params.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/params.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/params.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,231 @@ -+/* -+ * params.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+ -+/****************************************************************************** -+ * -+ * This file defines host and target properties for all machines -+ * supported by the dynamic loader. To be tedious... -+ * -+ * host == the machine on which the dynamic loader runs -+ * target == the machine that the dynamic loader is loading -+ * -+ * Host and target may or may not be the same, depending upon the particular -+ * use. -+ *****************************************************************************/ -+ -+/****************************************************************************** -+ * -+ * Host Properties -+ * -+ *****************************************************************************/ -+ -+#define BITS_PER_BYTE 8 /* bits in the standard PC/SUN byte */ -+#define LOG_BITS_PER_BYTE 3 /* log base 2 of same */ -+#define BYTE_MASK ((1U<> 16)) -+#define SWAP16BY8(zz) (((zz) << 8) | ((zz) >> 8)) -+ -+/* !! don't be tempted to insert type definitions here; use !! */ -+ -+/****************************************************************************** -+ * -+ * Target Properties -+ * -+ *****************************************************************************/ -+ -+ -+/*--------------------------------------------------------------------------*/ -+/* TMS320C6x Target Specific Parameters (byte-addressable) */ -+/*--------------------------------------------------------------------------*/ -+#if TMS32060 -+#define MEMORG 0x0L /* Size of configured memory */ -+#define MEMSIZE 0x0L /* (full address space) */ -+ -+#define CINIT_ALIGN 8 /* alignment of cinit record in TDATA AUs */ -+#define CINIT_COUNT 4 /* width of count field in TDATA AUs */ -+#define CINIT_ADDRESS 4 /* width of address field in TDATA AUs */ -+#define CINIT_PAGE_BITS 0 /* Number of LSBs of address that -+ * are page number */ -+ -+#define LENIENT_SIGNED_RELEXPS 0 /* DOES SIGNED ALLOW MAX UNSIGNED */ -+ -+#undef TARGET_ENDIANNESS /* may be big or little endian */ -+ -+/* align a target address to a word boundary */ -+#define TARGET_WORD_ALIGN(zz) (((zz) + 0x3) & -0x4) -+#endif -+ -+ -+/*-------------------------------------------------------------------------- -+ * -+ * DEFAULT SETTINGS and DERIVED PROPERTIES -+ * -+ * This section establishes defaults for values not specified above -+ *--------------------------------------------------------------------------*/ -+#ifndef TARGET_AU_BITS -+#define TARGET_AU_BITS 8 /* width of the target addressable unit */ -+#define LOG_TARGET_AU_BITS 3 /* log2 of same */ -+#endif -+ -+#ifndef CINIT_DEFAULT_PAGE -+#define CINIT_DEFAULT_PAGE 0 /* default .cinit page number */ -+#endif -+ -+#ifndef DATA_RUN2LOAD -+#define DATA_RUN2LOAD(zz) (zz) /* translate data run address to load address */ -+#endif -+ -+#ifndef DBG_LIST_PAGE -+#define DBG_LIST_PAGE 0 /* page number for .dllview section */ -+#endif -+ -+#ifndef TARGET_WORD_ALIGN -+/* align a target address to a word boundary */ -+#define TARGET_WORD_ALIGN(zz) (zz) -+#endif -+ -+#ifndef TDATA_TO_TADDR -+#define TDATA_TO_TADDR(zz) (zz) /* target data address to target AU address */ -+#define TADDR_TO_TDATA(zz) (zz) /* target AU address to target data address */ -+#define TDATA_AU_BITS TARGET_AU_BITS /* bits per data AU */ -+#define LOG_TDATA_AU_BITS LOG_TARGET_AU_BITS -+#endif -+ -+/* -+ * -+ * Useful properties and conversions derived from the above -+ * -+ */ -+ -+/* -+ * Conversions between host and target addresses -+ */ -+#if LOG_BITS_PER_AU == LOG_TARGET_AU_BITS -+/* translate target addressable unit to host address */ -+#define TADDR_TO_HOST(x) (x) -+/* translate host address to target addressable unit */ -+#define HOST_TO_TADDR(x) (x) -+#elif LOG_BITS_PER_AU > LOG_TARGET_AU_BITS -+#define TADDR_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS)) -+#define HOST_TO_TADDR(x) ((x) << (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS)) -+#else -+#define TADDR_TO_HOST(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU)) -+#define HOST_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU)) -+#endif -+ -+#if LOG_BITS_PER_AU == LOG_TDATA_AU_BITS -+/* translate target addressable unit to host address */ -+#define TDATA_TO_HOST(x) (x) -+/* translate host address to target addressable unit */ -+#define HOST_TO_TDATA(x) (x) -+/* translate host address to target addressable unit, round up */ -+#define HOST_TO_TDATA_ROUND(x) (x) -+/* byte offset to host offset, rounded up for TDATA size */ -+#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x) -+#elif LOG_BITS_PER_AU > LOG_TDATA_AU_BITS -+#define TDATA_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) -+#define HOST_TO_TDATA(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) -+#define HOST_TO_TDATA_ROUND(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) -+#define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x) -+#else -+#define TDATA_TO_HOST(x) ((x) << (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) -+#define HOST_TO_TDATA(x) ((x) >> (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) -+#define HOST_TO_TDATA_ROUND(x) (((x) +\ -+ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))-1) >>\ -+ (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) -+#define BYTE_TO_HOST_TDATA_ROUND(x) (BYTE_TO_HOST((x) +\ -+ (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_BYTE))-1) &\ -+ -(TDATA_AU_BITS/BITS_PER_AU)) -+#endif -+ -+/* -+ * Input in DOFF format is always expresed in bytes, regardless of loading host -+ * so we wind up converting from bytes to target and host units even when the -+ * host is not a byte machine. -+ */ -+#if LOG_BITS_PER_AU == LOG_BITS_PER_BYTE -+#define BYTE_TO_HOST(x) (x) -+#define BYTE_TO_HOST_ROUND(x) (x) -+#define HOST_TO_BYTE(x) (x) -+#elif LOG_BITS_PER_AU >= LOG_BITS_PER_BYTE -+#define BYTE_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) -+#define BYTE_TO_HOST_ROUND(x) ((x + (BITS_PER_AU/BITS_PER_BYTE-1)) >>\ -+ (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) -+#define HOST_TO_BYTE(x) ((x) << (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) -+#else -+/* lets not try to deal with sub-8-bit byte machines */ -+#endif -+ -+#if LOG_TARGET_AU_BITS == LOG_BITS_PER_BYTE -+/* translate target addressable unit to byte address */ -+#define TADDR_TO_BYTE(x) (x) -+/* translate byte address to target addressable unit */ -+#define BYTE_TO_TADDR(x) (x) -+#elif LOG_TARGET_AU_BITS > LOG_BITS_PER_BYTE -+#define TADDR_TO_BYTE(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE)) -+#define BYTE_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE)) -+#else -+/* lets not try to deal with sub-8-bit byte machines */ -+#endif -+ -+#ifdef _BIG_ENDIAN -+#define HOST_ENDIANNESS 1 -+#else -+#define HOST_ENDIANNESS 0 -+#endif -+ -+#ifdef TARGET_ENDIANNESS -+#define TARGET_ENDIANNESS_DIFFERS(rtend) (HOST_ENDIANNESS^TARGET_ENDIANNESS) -+#elif HOST_ENDIANNESS -+#define TARGET_ENDIANNESS_DIFFERS(rtend) (!(rtend)) -+#else -+#define TARGET_ENDIANNESS_DIFFERS(rtend) (rtend) -+#endif -+ -+/* the unit in which we process target image data */ -+#if TARGET_AU_BITS <= 8 -+typedef u8 TgtAU_t; -+#elif TARGET_AU_BITS <= 16 -+typedef u16 TgtAU_t; -+#else -+typedef u32 TgtAU_t; -+#endif -+ -+/* size of that unit */ -+#if TARGET_AU_BITS < BITS_PER_AU -+#define TGTAU_BITS BITS_PER_AU -+#define LOG_TGTAU_BITS LOG_BITS_PER_AU -+#else -+#define TGTAU_BITS TARGET_AU_BITS -+#define LOG_TGTAU_BITS LOG_TARGET_AU_BITS -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/reloc.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/reloc.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,425 @@ -+/* -+ * reloc.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include "header.h" -+ -+#if TMS32060 -+/* the magic symbol for the start of BSS */ -+static const char BSSSYMBOL[] = {".bss"}; -+#endif -+ -+#if TMS32060 -+#include "reloc_table_c6000.c" -+#endif -+ -+#if TMS32060 -+/* From coff.h - ignore these relocation operations */ -+#define R_C60ALIGN 0x76 /* C60: Alignment info for compressor */ -+#define R_C60FPHEAD 0x77 /* C60: Explicit assembly directive */ -+#define R_C60NOCMP 0x100 /* C60: Don't compress this code scn */ -+#endif -+ -+/************************************************************************** -+ * Procedure dload_unpack -+ * -+ * Parameters: -+ * data pointer to storage unit containing lowest host address of -+ * image data -+ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(RVALUE)*BITS_PER_AU -+ * offset Offset from LSB, 0 <= offset < BITS_PER_AU -+ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) -+ * -+ * Effect: -+ * Extracts the specified field and returns it. -+ **************************************************************************/ -+RVALUE dload_unpack(struct dload_state *dlthis, TgtAU_t *data, int fieldsz, -+ int offset, unsigned sgn) -+{ -+ register RVALUE objval; -+ register int shift, direction; -+ register TgtAU_t *dp = data; -+ -+ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value*/ -+ /* * collect up enough bits to contain the desired field */ -+ if (TARGET_BIG_ENDIAN) { -+ dp += (fieldsz + offset) >> LOG_TGTAU_BITS; -+ direction = -1; -+ } else -+ direction = 1; -+ objval = *dp >> offset; -+ shift = TGTAU_BITS - offset; -+ while (shift <= fieldsz) { -+ dp += direction; -+ objval += (RVALUE)*dp << shift; -+ shift += TGTAU_BITS; -+ } -+ -+ /* * sign or zero extend the value appropriately */ -+ if (sgn == ROP_UNS) -+ objval &= (2 << fieldsz) - 1; -+ else { -+ shift = sizeof(RVALUE) * BITS_PER_AU-1 - fieldsz; -+ objval = (objval << shift) >> shift; -+ } -+ -+ return objval; -+ -+} /* dload_unpack */ -+ -+ -+/************************************************************************** -+ * Procedure dload_repack -+ * -+ * Parameters: -+ * val Value to insert -+ * data Pointer to storage unit containing lowest host address of -+ * image data -+ * fieldsz Size of bit field, 0 < fieldsz <= sizeof(RVALUE)*BITS_PER_AU -+ * offset Offset from LSB, 0 <= offset < BITS_PER_AU -+ * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) -+ * -+ * Effect: -+ * Stuffs the specified value in the specified field. Returns 0 for -+ * success -+ * or 1 if the value will not fit in the specified field according to the -+ * specified signedness rule. -+ **************************************************************************/ -+static const unsigned char ovf_limit[] = {1, 2, 2}; -+int dload_repack(struct dload_state *dlthis, RVALUE val, TgtAU_t *data, -+ int fieldsz, int offset, unsigned sgn) -+{ -+ register URVALUE objval, mask; -+ register int shift, direction; -+ register TgtAU_t *dp = data; -+ -+ -+ fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */ -+ /* clip the bits */ -+ mask = ((UINT32_C(2) << fieldsz) - 1); -+ objval = (val & mask); -+ /* * store the bits through the specified mask */ -+ if (TARGET_BIG_ENDIAN) { -+ dp += (fieldsz + offset) >> LOG_TGTAU_BITS; -+ direction = -1; -+ } else -+ direction = 1; -+ -+ /* insert LSBs */ -+ *dp = (*dp & ~(mask << offset)) + (objval << offset); -+ shift = TGTAU_BITS-offset; -+ /* align mask and objval with AU boundary */ -+ objval >>= shift; -+ mask >>= shift; -+ -+ while (mask) { -+ dp += direction; -+ *dp = (*dp & ~mask) + objval; -+ objval >>= TGTAU_BITS; -+ mask >>= TGTAU_BITS; -+ } -+ -+ /* -+ * check for overflow -+ */ -+ if (sgn) { -+ unsigned tmp = (val >> fieldsz) + (sgn & 0x1); -+ if (tmp > ovf_limit[sgn-1]) -+ return 1; -+ } -+ return 0; -+ -+} /* dload_repack */ -+ -+/* lookup table for the scaling amount in a C6x instruction */ -+#if TMS32060 -+#define SCALE_BITS 4 /* there are 4 bits in the scale field */ -+#define SCALE_MASK 0x7 /* we really only use the bottom 3 bits */ -+static const u8 C60_Scale[SCALE_MASK+1] = { -+ 1, 0, 0, 0, 1, 1, 2, 2 -+}; -+#endif -+ -+/************************************************************************** -+ * Procedure dload_relocate -+ * -+ * Parameters: -+ * data Pointer to base of image data -+ * rp Pointer to relocation operation -+ * -+ * Effect: -+ * Performs the specified relocation operation -+ **************************************************************************/ -+void dload_relocate(struct dload_state *dlthis, TgtAU_t *data, -+ struct reloc_record_t *rp) -+{ -+ RVALUE val = 0; -+ RVALUE reloc_amt = 0; -+ unsigned int fieldsz = 0; -+ unsigned int offset = 0; -+ unsigned int reloc_info = 0; -+ unsigned int reloc_action = 0; -+ register int rx = 0; -+ RVALUE *stackp = NULL; -+ int top; -+ struct Local_Symbol *svp = NULL; -+#ifdef RFV_SCALE -+ unsigned int scale = 0; -+#endif -+ -+ rx = HASH_FUNC(rp->r_type); -+ while (rop_map1[rx] != rp->r_type) { -+ rx = HASH_L(rop_map2[rx]); -+ if (rx < 0) { -+#if TMS32060 -+ switch (rp->r_type) { -+ case R_C60ALIGN: -+ case R_C60NOCMP: -+ case R_C60FPHEAD: -+ /* Ignore these reloc types and return */ -+ break; -+ default: -+ /* Unknown reloc type, print error and return */ -+ dload_error(dlthis, "Bad coff operator 0x%x", rp->r_type); -+ } -+#else -+ dload_error(dlthis, "Bad coff operator 0x%x", rp->r_type); -+#endif -+ return; -+ } -+ } -+ rx = HASH_I(rop_map2[rx]); -+ if ((rx < (sizeof(rop_action)/sizeof(uint_least16_t))) -+ && (rx < (sizeof(rop_info)/sizeof(uint_least16_t))) && (rx > 0)) { -+ reloc_action = rop_action[rx]; reloc_info = rop_info[rx]; -+ } else { -+ dload_error(dlthis, "Buffer Overflow - Array Index Out of Bounds"); -+ } -+ -+ /* Compute the relocation amount for the referenced symbol, if any */ -+ reloc_amt = rp->r_uval; -+ if (RFV_SYM(reloc_info)) { /* relocation uses a symbol reference */ -+ if ((u32)rp->r_symndx < dlthis->dfile_hdr.df_no_syms) { -+ /* real symbol reference */ -+ svp = &dlthis->local_symtab[rp->r_symndx]; -+ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? -+ svp->delta : svp->value; -+ } -+ /* reloc references current section */ -+ else if (rp->r_symndx == -1) -+ reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? -+ dlthis->delta_runaddr : dlthis->image_secn->run_addr; -+ } /* relocation uses a symbol reference */ -+ /* Handle stack adjustment */ -+ val = 0; -+ top = RFV_STK(reloc_info); -+ if (top) { -+ top += dlthis->relstkidx - RSTK_UOP; -+ if (top >= STATIC_EXPR_STK_SIZE) { -+ dload_error(dlthis, -+ "Expression stack overflow in %s at offset " -+ FMT_UI32, dlthis->image_secn->name, -+ rp->r_vaddr + dlthis->image_offset); -+ return; -+ } -+ val = dlthis->relstk[dlthis->relstkidx]; -+ dlthis->relstkidx = top; -+ stackp = &dlthis->relstk[top]; -+ } -+ /* Derive field position and size, if we need them */ -+ if (reloc_info & ROP_RW) { /* read or write action in our future */ -+ fieldsz = RFV_WIDTH(reloc_action); -+ if (fieldsz) { /* field info from table */ -+ offset = RFV_POSN(reloc_action); -+ if (TARGET_BIG_ENDIAN) -+ /* make sure r_vaddr is the lowest target -+ * address containing bits */ -+ rp->r_vaddr += RFV_BIGOFF(reloc_info); -+ } else { /* field info from relocation op */ -+ fieldsz = rp->r_fieldsz; offset = rp->r_offset; -+ if (TARGET_BIG_ENDIAN) -+ /* make sure r_vaddr is the lowest target -+ address containing bits */ -+ rp->r_vaddr += (rp->r_wordsz - offset - fieldsz) -+ >> LOG_TARGET_AU_BITS; -+ } -+ data = (TgtAU_t *)((char *)data + TADDR_TO_HOST(rp->r_vaddr)); -+ /* compute lowest host location of referenced data */ -+#if BITS_PER_AU > TARGET_AU_BITS -+ /* conversion from target address to host address may lose -+ address bits; add loss to offset */ -+ if (TARGET_BIG_ENDIAN) { -+ offset += -((rp->r_vaddr << LOG_TARGET_AU_BITS) + -+ offset + fieldsz) & -+ (BITS_PER_AU-TARGET_AU_BITS); -+ } else { -+ offset += (rp->r_vaddr << LOG_TARGET_AU_BITS) & -+ (BITS_PER_AU-1); -+ } -+#endif -+#ifdef RFV_SCALE -+ scale = RFV_SCALE(reloc_info); -+#endif -+ } -+ /* read the object value from the current image, if so ordered */ -+ if (reloc_info & ROP_R) { /* relocation reads current image value */ -+ val = dload_unpack(dlthis, data, fieldsz, offset, -+ RFV_SIGN(reloc_info)); -+#ifdef RFV_SCALE -+ val <<= scale; -+#endif -+ } -+ /* perform the necessary arithmetic */ -+ switch (RFV_ACTION(reloc_action)) { /* relocation actions */ -+ case RACT_VAL: -+ break; -+ case RACT_ASGN: -+ val = reloc_amt; -+ break; -+ case RACT_ADD: -+ val += reloc_amt; -+ break; -+ case RACT_PCR: -+ /*----------------------------------------------------------- -+ * Handle special cases of jumping from absolute sections -+ * (special reloc type) or to absolute destination -+ * (symndx == -1). In either case, set the appropriate -+ * relocation amount to 0. -+ *-----------------------------------------------------------*/ -+ if (rp->r_symndx == -1) -+ reloc_amt = 0; -+ val += reloc_amt - dlthis->delta_runaddr; -+ break; -+ case RACT_ADDISP: -+ val += rp->r_disp + reloc_amt; -+ break; -+ case RACT_ASGPC: -+ val = dlthis->image_secn->run_addr + reloc_amt; -+ break; -+ case RACT_PLUS: -+ if (stackp != NULL) -+ val += *stackp; -+ break; -+ case RACT_SUB: -+ if (stackp != NULL) -+ val = *stackp - val; -+ break; -+ case RACT_NEG: -+ val = -val; -+ break; -+ case RACT_MPY: -+ if (stackp != NULL) -+ val *= *stackp; -+ break; -+ case RACT_DIV: -+ if (stackp != NULL) -+ val = *stackp / val; -+ break; -+ case RACT_MOD: -+ if (stackp != NULL) -+ val = *stackp % val; -+ break; -+ case RACT_SR: -+ if (val >= sizeof(RVALUE) * BITS_PER_AU) -+ val = 0; -+ else if (stackp != NULL) -+ val = (URVALUE)*stackp >> val; -+ break; -+ case RACT_ASR: -+ if (val >= sizeof(RVALUE)*BITS_PER_AU) -+ val = sizeof(RVALUE)*BITS_PER_AU - 1; -+ else if (stackp != NULL) -+ val = *stackp >> val; -+ break; -+ case RACT_SL: -+ if (val >= sizeof(RVALUE)*BITS_PER_AU) -+ val = 0; -+ else if (stackp != NULL) -+ val = *stackp << val; -+ break; -+ case RACT_AND: -+ if (stackp != NULL) -+ val &= *stackp; -+ break; -+ case RACT_OR: -+ if (stackp != NULL) -+ val |= *stackp; -+ break; -+ case RACT_XOR: -+ if (stackp != NULL) -+ val ^= *stackp; -+ break; -+ case RACT_NOT: -+ val = ~val; -+ break; -+#if TMS32060 -+ case RACT_C6SECT: -+ /* actually needed address of secn containing symbol */ -+ if (svp != NULL) { -+ if (rp->r_symndx >= 0) -+ if (svp->secnn > 0) -+ reloc_amt = dlthis->ldr_sections -+ [svp->secnn-1].run_addr; -+ } -+ /* !!! FALL THRU !!! */ -+ case RACT_C6BASE: -+ if (dlthis->bss_run_base == 0) { -+ struct dynload_symbol *symp; -+ symp = dlthis->mysym->Find_Matching_Symbol -+ (dlthis->mysym, BSSSYMBOL); -+ /* lookup value of global BSS base */ -+ if (symp) -+ dlthis->bss_run_base = symp->value; -+ else -+ dload_error(dlthis, -+ "Global BSS base referenced in %s offset"\ -+ FMT_UI32 " but not defined", -+ dlthis->image_secn->name, -+ rp->r_vaddr + dlthis->image_offset); -+ } -+ reloc_amt -= dlthis->bss_run_base; -+ /* !!! FALL THRU !!! */ -+ case RACT_C6DSPL: -+ /* scale factor determined by 3 LSBs of field */ -+ scale = C60_Scale[val & SCALE_MASK]; -+ offset += SCALE_BITS; -+ fieldsz -= SCALE_BITS; -+ val >>= SCALE_BITS; /* ignore the scale field hereafter */ -+ val <<= scale; -+ val += reloc_amt; /* do the usual relocation */ -+ if (((1 << scale)-1) & val) -+ dload_error(dlthis, -+ "Unaligned reference in %s offset " FMT_UI32, -+ dlthis->image_secn->name, -+ rp->r_vaddr + dlthis->image_offset); -+ break; -+#endif -+ } /* relocation actions */ -+ /* * Put back result as required */ -+ if (reloc_info & ROP_W) { /* relocation writes image value */ -+#ifdef RFV_SCALE -+ val >>= scale; -+#endif -+ if (dload_repack(dlthis, val, data, fieldsz, offset, -+ RFV_SIGN(reloc_info))) { -+ dload_error(dlthis, "Relocation value " FMT_UI32 -+ " overflows %d bits in %s offset " FMT_UI32, val, -+ fieldsz, dlthis->image_secn->name, -+ dlthis->image_offset + rp->r_vaddr); -+ } -+ } else if (top) -+ *stackp = val; -+} /* reloc_value */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table_c6000.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/reloc_table_c6000.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table_c6000.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/reloc_table_c6000.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,258 @@ -+/* -+ * reloc_table_c6000.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* Tables generated for c6000 */ -+ -+#define HASH_FUNC(zz) (((((zz) + 1) * UINT32_C(1845)) >> 11) & 63) -+#define HASH_L(zz) ((zz) >> 8) -+#define HASH_I(zz) ((zz) & 0xFF) -+ -+static const u16 rop_map1[] = { -+ 0, -+ 1, -+ 2, -+ 20, -+ 4, -+ 5, -+ 6, -+ 15, -+ 80, -+ 81, -+ 82, -+ 83, -+ 84, -+ 85, -+ 86, -+ 87, -+ 17, -+ 18, -+ 19, -+ 21, -+ 16, -+ 16394, -+ 16404, -+ 65535, -+ 65535, -+ 65535, -+ 65535, -+ 65535, -+ 65535, -+ 32, -+ 65535, -+ 65535, -+ 65535, -+ 65535, -+ 65535, -+ 65535, -+ 40, -+ 112, -+ 113, -+ 65535, -+ 16384, -+ 16385, -+ 16386, -+ 16387, -+ 16388, -+ 16389, -+ 16390, -+ 16391, -+ 16392, -+ 16393, -+ 16395, -+ 16396, -+ 16397, -+ 16398, -+ 16399, -+ 16400, -+ 16401, -+ 16402, -+ 16403, -+ 16405, -+ 16406, -+ 65535, -+ 65535, -+ 65535 -+}; -+ -+static const s16 rop_map2[] = { -+ -256, -+ -255, -+ -254, -+ -245, -+ -253, -+ -252, -+ -251, -+ -250, -+ -241, -+ -240, -+ -239, -+ -238, -+ -237, -+ -236, -+ 1813, -+ 5142, -+ -248, -+ -247, -+ 778, -+ -244, -+ -249, -+ -221, -+ -211, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -243, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -242, -+ -233, -+ -232, -+ -1, -+ -231, -+ -230, -+ -229, -+ -228, -+ -227, -+ -226, -+ -225, -+ -224, -+ -223, -+ 5410, -+ -220, -+ -219, -+ -218, -+ -217, -+ -216, -+ -215, -+ -214, -+ -213, -+ 5676, -+ -210, -+ -209, -+ -1, -+ -1, -+ -1 -+}; -+ -+static const u16 rop_action[] = { -+ 2560, -+ 2304, -+ 2304, -+ 2432, -+ 2432, -+ 2560, -+ 2176, -+ 2304, -+ 2560, -+ 3200, -+ 3328, -+ 3584, -+ 3456, -+ 2304, -+ 4208, -+ 20788, -+ 21812, -+ 3415, -+ 3245, -+ 2311, -+ 4359, -+ 19764, -+ 2311, -+ 3191, -+ 3280, -+ 6656, -+ 7680, -+ 8704, -+ 9728, -+ 10752, -+ 11776, -+ 12800, -+ 13824, -+ 14848, -+ 15872, -+ 16896, -+ 17920, -+ 18944, -+ 0, -+ 0, -+ 0, -+ 0, -+ 1536, -+ 1536, -+ 1536, -+ 5632, -+ 512, -+ 0 -+}; -+ -+static const u16 rop_info[] = { -+ 0, -+ 35, -+ 35, -+ 35, -+ 35, -+ 35, -+ 35, -+ 35, -+ 35, -+ 39, -+ 39, -+ 39, -+ 39, -+ 35, -+ 34, -+ 283, -+ 299, -+ 4135, -+ 4391, -+ 291, -+ 33059, -+ 283, -+ 295, -+ 4647, -+ 4135, -+ 64, -+ 64, -+ 128, -+ 64, -+ 64, -+ 64, -+ 64, -+ 64, -+ 64, -+ 64, -+ 64, -+ 64, -+ 128, -+ 201, -+ 197, -+ 74, -+ 70, -+ 208, -+ 196, -+ 200, -+ 192, -+ 192, -+ 66 -+}; -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/reloc_table.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/dynload/reloc_table.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/dynload/reloc_table.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,102 @@ -+/* -+ * reloc_table.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+ -+#ifndef __RELOC_TABLE_H__ -+#define __RELOC_TABLE_H__ -+/* -+ * Table of relocation operator properties -+ */ -+#include -+ -+/* How does this relocation operation access the program image? */ -+#define ROP_N 0 /* does not access image */ -+#define ROP_R 1 /* read from image */ -+#define ROP_W 2 /* write to image */ -+#define ROP_RW 3 /* read from and write to image */ -+ -+/* For program image access, what are the overflow rules for the bit field? */ -+/* Beware! Procedure repack depends on this encoding */ -+#define ROP_ANY 0 /* no overflow ever, just truncate the value */ -+#define ROP_SGN 1 /* signed field */ -+#define ROP_UNS 2 /* unsigned field */ -+#define ROP_MAX 3 /* allow maximum range of either signed or unsigned */ -+ -+/* How does the relocation operation use the symbol reference */ -+#define ROP_IGN 0 /* no symbol is referenced */ -+#define ROP_LIT 0 /* use rp->r_uval literal field */ -+#define ROP_SYM 1 /* symbol value is used in relocation */ -+#define ROP_SYMD 2 /* delta value vs last link is used */ -+ -+/* How does the reloc op use the stack? */ -+#define RSTK_N 0 /* Does not use */ -+#define RSTK_POP 1 /* Does a POP */ -+#define RSTK_UOP 2 /* Unary op, stack position unaffected */ -+#define RSTK_PSH 3 /* Does a push */ -+ -+/* -+ * Computational actions performed by the dynamic loader -+ */ -+enum Dload_Actions { -+ RACT_VAL, /* don't alter the current val (from stack or mem fetch) */ -+ RACT_ASGN, /* set value to reference amount (from symbol reference) */ -+ RACT_ADD, /* add reference to value */ -+ RACT_PCR, /* add reference minus PC delta to value */ -+ RACT_ADDISP, /* add reference plus r_disp */ -+ RACT_ASGPC, /* set value to section address plus reference */ -+ -+ RACT_PLUS, /* stack + */ -+ RACT_SUB, /* stack - */ -+ RACT_NEG, /* stack unary - */ -+ -+ RACT_MPY, /* stack * */ -+ RACT_DIV, /* stack / */ -+ RACT_MOD, /* stack % */ -+ -+ RACT_SR, /* stack unsigned >> */ -+ RACT_ASR, /* stack signed >> */ -+ RACT_SL, /* stack << */ -+ RACT_AND, /* stack & */ -+ RACT_OR, /* stack | */ -+ RACT_XOR, /* stack ^ */ -+ RACT_NOT, /* stack ~ */ -+ RACT_C6SECT, /* for C60 R_SECT op */ -+ RACT_C6BASE, /* for C60 R_BASE op */ -+ RACT_C6DSPL, /* for C60 scaled 15-bit displacement */ -+ RACT_PCR23T /* for ARM Thumb long branch */ -+}; -+ -+/* -+ * macros used to extract values -+ */ -+#define RFV_POSN(aaa) ((aaa) & 0xF) -+#define RFV_WIDTH(aaa) (((aaa) >> 4) & 0x3F) -+#define RFV_ACTION(aaa) ((aaa) >> 10) -+ -+#define RFV_SIGN(iii) (((iii) >> 2) & 0x3) -+#define RFV_SYM(iii) (((iii) >> 4) & 0x3) -+#define RFV_STK(iii) (((iii) >> 6) & 0x3) -+#define RFV_ACCS(iii) ((iii) & 0x3) -+ -+#if (TMS32060) -+#define RFV_SCALE(iii) ((iii) >> 11) -+#define RFV_BIGOFF(iii) (((iii) >> 8) & 0x7) -+#else -+#define RFV_BIGOFF(iii) ((iii) >> 8) -+#endif -+ -+#endif /* __RELOC_TABLE_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gb.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gb.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gb.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gb.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,182 @@ -+/* -+ * gb.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== gb.c ======== -+ * Description: Generic bitmap operations. -+ * -+ *! Revision History -+ *! ================ -+ *! 24-Feb-2003 vp Code review updates. -+ *! 17-Dec-2002 map Fixed GB_minset(), GB_empty(), and GB_full(), -+ *! to ensure only 'len' bits are considered in the map -+ *! 18-Oct-2002 sb Ported to Linux platform. -+ *! 06-Dec-2001 jeh Fixed bug in GB_minclear(). -+ *! -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+/* ----------------------------------- This */ -+#include -+#include -+ -+typedef GB_BitNum GB_WordNum; -+ -+struct GB_TMap { -+ GB_BitNum len; -+ GB_WordNum wcnt; -+ u32 *words; -+}; -+ -+/* -+ * ======== GB_clear ======== -+ * purpose: -+ * Clears a bit in the bit map. -+ */ -+ -+void GB_clear(struct GB_TMap *map, GB_BitNum bitn) -+{ -+ u32 mask; -+ -+ mask = 1L << (bitn % BITS_PER_LONG); -+ map->words[bitn / BITS_PER_LONG] &= ~mask; -+} -+ -+/* -+ * ======== GB_create ======== -+ * purpose: -+ * Creates a bit map. -+ */ -+ -+struct GB_TMap *GB_create(GB_BitNum len) -+{ -+ struct GB_TMap *map; -+ GB_WordNum i; -+ map = (struct GB_TMap *)GS_alloc(sizeof(struct GB_TMap)); -+ if (map != NULL) { -+ map->len = len; -+ map->wcnt = len / BITS_PER_LONG + 1; -+ map->words = (u32 *)GS_alloc(map->wcnt * sizeof(u32)); -+ if (map->words != NULL) { -+ for (i = 0; i < map->wcnt; i++) -+ map->words[i] = 0L; -+ -+ } else { -+ GS_frees(map, sizeof(struct GB_TMap)); -+ map = NULL; -+ } -+ } -+ -+ return map; -+} -+ -+/* -+ * ======== GB_delete ======== -+ * purpose: -+ * Frees a bit map. -+ */ -+ -+void GB_delete(struct GB_TMap *map) -+{ -+ GS_frees(map->words, map->wcnt * sizeof(u32)); -+ GS_frees(map, sizeof(struct GB_TMap)); -+} -+ -+/* -+ * ======== GB_findandset ======== -+ * purpose: -+ * Finds a free bit and sets it. -+ */ -+GB_BitNum GB_findandset(struct GB_TMap *map) -+{ -+ GB_BitNum bitn; -+ -+ bitn = GB_minclear(map); -+ -+ if (bitn != GB_NOBITS) -+ GB_set(map, bitn); -+ -+ return bitn; -+} -+ -+/* -+ * ======== GB_minclear ======== -+ * purpose: -+ * returns the location of the first unset bit in the bit map. -+ */ -+GB_BitNum GB_minclear(struct GB_TMap *map) -+{ -+ GB_BitNum bit_location = 0; -+ GB_BitNum bitAcc = 0; -+ GB_WordNum i; -+ GB_BitNum bit; -+ u32 *word; -+ -+ for (word = map->words, i = 0; i < map->wcnt; word++, i++) { -+ if (~*word) { -+ for (bit = 0; bit < BITS_PER_LONG; bit++, bitAcc++) { -+ if (bitAcc == map->len) -+ return GB_NOBITS; -+ -+ if (~*word & (1L << bit)) { -+ bit_location = i * BITS_PER_LONG + bit; -+ return bit_location; -+ } -+ -+ } -+ } else { -+ bitAcc += BITS_PER_LONG; -+ } -+ } -+ -+ return GB_NOBITS; -+} -+ -+/* -+ * ======== GB_set ======== -+ * purpose: -+ * Sets a bit in the bit map. -+ */ -+ -+void GB_set(struct GB_TMap *map, GB_BitNum bitn) -+{ -+ u32 mask; -+ -+ mask = 1L << (bitn % BITS_PER_LONG); -+ map->words[bitn / BITS_PER_LONG] |= mask; -+} -+ -+/* -+ * ======== GB_test ======== -+ * purpose: -+ * Returns true if the bit is set in the specified location. -+ */ -+ -+bool GB_test(struct GB_TMap *map, GB_BitNum bitn) -+{ -+ bool state; -+ u32 mask; -+ u32 word; -+ -+ mask = 1L << (bitn % BITS_PER_LONG); -+ word = map->words[bitn / BITS_PER_LONG]; -+ state = word & mask ? TRUE : FALSE; -+ -+ return state; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gh.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gh.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gh.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gh.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,191 @@ -+/* -+ * gh.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== gh.c ======== -+ */ -+ -+#include -+ -+#include -+ -+#include -+ -+#include -+ -+struct Elem { -+ struct Elem *next; -+ u8 data[1]; -+}; -+ -+struct GH_THashTab { -+ u16 maxBucket; -+ u16 valSize; -+ struct Elem **buckets; -+ u16(*hash) (void *, u16); -+ bool(*match) (void *, void *); -+ void(*delete) (void *); -+}; -+ -+static void Nop(void *p); -+static s32 curInit; -+static void myfree(void *ptr, s32 size); -+ -+/* -+ * ======== GH_create ======== -+ */ -+ -+struct GH_THashTab *GH_create(u16 maxBucket, u16 valSize, -+ u16(*hash)(void *, u16), bool(*match)(void *, void *), -+ void(*delete)(void *)) -+{ -+ struct GH_THashTab *hashTab; -+ u16 i; -+ hashTab = (struct GH_THashTab *)GS_alloc(sizeof(struct GH_THashTab)); -+ if (hashTab == NULL) -+ return NULL; -+ hashTab->maxBucket = maxBucket; -+ hashTab->valSize = valSize; -+ hashTab->hash = hash; -+ hashTab->match = match; -+ hashTab->delete = delete == NULL ? Nop : delete; -+ -+ hashTab->buckets = (struct Elem **) -+ GS_alloc(sizeof(struct Elem *) * maxBucket); -+ if (hashTab->buckets == NULL) { -+ GH_delete(hashTab); -+ return NULL; -+ } -+ -+ for (i = 0; i < maxBucket; i++) -+ hashTab->buckets[i] = NULL; -+ -+ return hashTab; -+} -+ -+/* -+ * ======== GH_delete ======== -+ */ -+void GH_delete(struct GH_THashTab *hashTab) -+{ -+ struct Elem *elem, *next; -+ u16 i; -+ -+ if (hashTab != NULL) { -+ if (hashTab->buckets != NULL) { -+ for (i = 0; i < hashTab->maxBucket; i++) { -+ for (elem = hashTab->buckets[i]; elem != NULL; -+ elem = next) { -+ next = elem->next; -+ (*hashTab->delete) (elem->data); -+ myfree(elem, sizeof(struct Elem) - 1 + -+ hashTab->valSize); -+ } -+ } -+ -+ myfree(hashTab->buckets, sizeof(struct Elem *) -+ * hashTab->maxBucket); -+ } -+ -+ myfree(hashTab, sizeof(struct GH_THashTab)); -+ } -+} -+ -+/* -+ * ======== GH_exit ======== -+ */ -+ -+void GH_exit(void) -+{ -+ if (curInit-- == 1) -+ GS_exit(); -+ -+} -+ -+/* -+ * ======== GH_find ======== -+ */ -+ -+void *GH_find(struct GH_THashTab *hashTab, void *key) -+{ -+ struct Elem *elem; -+ -+ elem = hashTab->buckets[(*hashTab->hash)(key, hashTab->maxBucket)]; -+ -+ for (; elem; elem = elem->next) { -+ if ((*hashTab->match)(key, elem->data)) -+ return elem->data; -+ } -+ -+ return NULL; -+} -+ -+/* -+ * ======== GH_init ======== -+ */ -+ -+void GH_init(void) -+{ -+ if (curInit++ == 0) -+ GS_init(); -+} -+ -+/* -+ * ======== GH_insert ======== -+ */ -+ -+void *GH_insert(struct GH_THashTab *hashTab, void *key, void *value) -+{ -+ struct Elem *elem; -+ u16 i; -+ char *src, *dst; -+ -+ elem = (struct Elem *)GS_alloc(sizeof(struct Elem) - 1 + -+ hashTab->valSize); -+ if (elem != NULL) { -+ -+ dst = (char *)elem->data; -+ src = (char *)value; -+ for (i = 0; i < hashTab->valSize; i++) -+ *dst++ = *src++; -+ -+ i = (*hashTab->hash)(key, hashTab->maxBucket); -+ elem->next = hashTab->buckets[i]; -+ hashTab->buckets[i] = elem; -+ -+ return elem->data; -+ } -+ -+ return NULL; -+} -+ -+/* -+ * ======== Nop ======== -+ */ -+/* ARGSUSED */ -+static void Nop(void *p) -+{ -+ p = p; /* stifle compiler warning */ -+} -+ -+/* -+ * ======== myfree ======== -+ */ -+static void myfree(void *ptr, s32 size) -+{ -+ GS_free(ptr); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gs.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gs.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gs.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,106 @@ -+/* -+ * gs.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== gs.c ======== -+ * Description: -+ * General storage memory allocator services. -+ * -+ *! Revision History -+ *! ================ -+ *! 29-Sep-1999 ag: Un-commented MEM_Init in GS_init(). -+ *! 14-May-1997 mg: Modified to use new GS API for GS_free() and GS_frees(). -+ *! 06-Nov-1996 gp: Re-commented MEM_Init in GS_init(). GS needs GS_Exit(). -+ *! 21-Oct-1996 db: Un-commented MEM_Init in GS_init(). -+ *! 21-May-1996 mg: Created from original stdlib implementation. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Globals */ -+static u32 cumsize; -+ -+/* -+ * ======== GS_alloc ======== -+ * purpose: -+ * Allocates memory of the specified size. -+ */ -+void *GS_alloc(u32 size) -+{ -+ void *p; -+ -+ p = MEM_Calloc(size, MEM_PAGED); -+ if (p == NULL) -+ return NULL; -+ cumsize += size; -+ return p; -+} -+ -+/* -+ * ======== GS_exit ======== -+ * purpose: -+ * Discontinue the usage of the GS module. -+ */ -+void GS_exit(void) -+{ -+ MEM_Exit(); -+} -+ -+/* -+ * ======== GS_free ======== -+ * purpose: -+ * Frees the memory. -+ */ -+void GS_free(void *ptr) -+{ -+ MEM_Free(ptr); -+ /* ack! no size info */ -+ /* cumsize -= size; */ -+} -+ -+/* -+ * ======== GS_frees ======== -+ * purpose: -+ * Frees the memory. -+ */ -+void GS_frees(void *ptr, u32 size) -+{ -+ MEM_Free(ptr); -+ cumsize -= size; -+} -+ -+/* -+ * ======== GS_init ======== -+ * purpose: -+ * Initializes the GS module. -+ */ -+void GS_init(void) -+{ -+ static bool curInit; -+ -+ if (curInit == false) { -+ curInit = MEM_Init(); /* which can't fail currently. */ -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gt.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gt.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/gt.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/gt.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,348 @@ -+/* -+ * gt.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== gt.c ======== -+ * Description: This module implements the trace mechanism for bridge. -+ * -+ *! Revision History -+ *! ================ -+ *! 16-May-1997 dr Changed GT_Config member names to conform to coding -+ *! standards. -+ *! 23-Apr-1997 ge Check for GT->TIDFXN for NULL before calling it. -+ *! 03-Jan-1997 ge Changed GT_Config structure member names to eliminate -+ *! preprocessor confusion with other macros. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+#define GT_WILD '*' -+ -+#define GT_CLEAR '=' -+#define GT_ON '+' -+#define GT_OFF '-' -+ -+enum GT_State { -+ GT_SEP, -+ GT_FIRST, -+ GT_SECOND, -+ GT_OP, -+ GT_DIGITS -+} ; -+ -+#ifdef CONFIG_BRIDGE_DEBUG -+static char *GT_1format = "%s - %d: "; -+static char *GT_2format = "%s - %d(%d): "; -+#endif /* CONFIG_BRIDGE_DEBUG */ -+ -+static unsigned char *GT_tMask[GT_BOUND]; -+ -+static bool curInit; -+static char *separator; -+static unsigned char tabMem[GT_BOUND][sizeof(unsigned char) * GT_BOUND]; -+ -+static void error(char *string); -+static void setMask(s16 index1, s16 index2, char op, unsigned char mask); -+ -+/* -+ * ======== _GT_create ======== -+ * purpose: -+ * Creates GT mask. -+ */ -+void _GT_create(struct GT_Mask *mask, char *modName) -+{ -+ mask->modName = modName; -+ mask->flags = &(GT_tMask[modName[0] - 'A'][modName[1] - 'A']); -+} -+ -+/* -+ * ======== GT_init ======== -+ * purpose: -+ * Initializes GT module. -+ */ -+#ifdef GT_init -+#undef GT_init -+#endif -+void GT_init(void) -+{ -+ register unsigned char index1; -+ register unsigned char index2; -+ -+ if (!curInit) { -+ curInit = true; -+ -+ separator = " ,;/"; -+ -+ for (index1 = 0; index1 < GT_BOUND; index1++) { -+ GT_tMask[index1] = tabMem[index1]; -+ for (index2 = 0; index2 < GT_BOUND; index2++) { -+ /* no tracing */ -+ GT_tMask[index1][index2] = 0x00; -+ } -+ } -+ } -+} -+ -+/* -+ * ======== _GT_set ======== -+ * purpose: -+ * Sets the trace string format. -+ */ -+ -+void _GT_set(char *str) -+{ -+ enum GT_State state; -+ char *sep; -+ s16 index1 = GT_BOUND; /* indicates all values */ -+ s16 index2 = GT_BOUND; /* indicates all values */ -+ char op = GT_CLEAR; -+ bool maskValid; -+ s16 digit; -+ register unsigned char mask = 0x0; /* no tracing */ -+ -+ if (str == NULL) -+ return; -+ -+ maskValid = false; -+ state = GT_SEP; -+ while (*str != '\0') { -+ switch ((s32) state) { -+ case (s32) GT_SEP: -+ maskValid = false; -+ sep = separator; -+ while (*sep != '\0') { -+ if (*str == *sep) { -+ str++; -+ break; -+ } else { -+ sep++; -+ } -+ } -+ if (*sep == '\0') -+ state = GT_FIRST; -+ -+ break; -+ case (s32) GT_FIRST: -+ if (*str == GT_WILD) { -+ /* indicates all values */ -+ index1 = GT_BOUND; -+ /* indicates all values */ -+ index2 = GT_BOUND; -+ state = GT_OP; -+ } else { -+ if (*str >= 'a') -+ index1 = (s16) (*str - 'a'); -+ else -+ index1 = (s16) (*str - 'A'); -+ if ((index1 >= 0) && (index1 < GT_BOUND)) -+ state = GT_SECOND; -+ else -+ state = GT_SEP; -+ } -+ str++; -+ break; -+ case (s32) GT_SECOND: -+ if (*str == GT_WILD) { -+ index2 = GT_BOUND; /* indicates all values */ -+ state = GT_OP; -+ str++; -+ } else { -+ if (*str >= 'a') -+ index2 = (s16) (*str - 'a'); -+ else -+ index2 = (s16) (*str - 'A'); -+ if ((index2 >= 0) && (index2 < GT_BOUND)) { -+ state = GT_OP; -+ str++; -+ } else { -+ state = GT_SEP; -+ } -+ } -+ break; -+ case (s32) GT_OP: -+ op = *str; -+ mask = 0x0; /* no tracing */ -+ switch (op) { -+ case (s32) GT_CLEAR: -+ maskValid = true; -+ case (s32) GT_ON: -+ case (s32) GT_OFF: -+ state = GT_DIGITS; -+ str++; -+ break; -+ default: -+ state = GT_SEP; -+ break; -+ } -+ break; -+ case (s32) GT_DIGITS: -+ digit = (s16) (*str - '0'); -+ if ((digit >= 0) && (digit <= 7)) { -+ mask |= (0x01 << digit); -+ maskValid = true; -+ str++; -+ } else { -+ if (maskValid == true) { -+ setMask(index1, index2, op, mask); -+ maskValid = false; -+ } -+ state = GT_SEP; -+ } -+ break; -+ default: -+ error("illegal trace mask"); -+ break; -+ } -+ } -+ -+ if (maskValid) -+ setMask(index1, index2, op, mask); -+} -+ -+/* -+ * ======== _GT_trace ======== -+ * purpose: -+ * Prints the input string onto standard output -+ */ -+ -+s32 _GT_trace(struct GT_Mask *mask, char *format, ...) -+{ -+ s32 arg1, arg2, arg3, arg4, arg5, arg6; -+ va_list va; -+ -+ va_start(va, format); -+ -+ arg1 = va_arg(va, s32); -+ arg2 = va_arg(va, s32); -+ arg3 = va_arg(va, s32); -+ arg4 = va_arg(va, s32); -+ arg5 = va_arg(va, s32); -+ arg6 = va_arg(va, s32); -+ -+ va_end(va); -+#ifdef DEBUG -+ if (GT->PIDFXN == NULL) { -+ printk(GT_1format, mask->modName, GT->TIDFXN ? -+ (*GT->TIDFXN)() : 0); -+ } else { -+ printk(GT_2format, mask->modName, (*GT->PIDFXN)(), -+ GT->TIDFXN ? (*GT->TIDFXN)() : 0); -+ } -+#endif -+ printk(format, arg1, arg2, arg3, arg4, arg5, arg6); -+ -+ return 0; -+} -+ -+/* -+ * ======== error ======== -+ * purpose: -+ * Prints errors onto the standard output. -+ */ -+static void error(char *string) -+{ -+ printk("GT: %s", string); -+} -+ -+/* -+ * ======== setmask ======== -+ * purpose: -+ * Sets mask for the GT module. -+ */ -+ -+static void setMask(s16 index1, s16 index2, char op, u8 mask) -+{ -+ register s16 index; -+ -+ if (index1 < GT_BOUND) { -+ if (index2 < GT_BOUND) { -+ switch (op) { -+ case (s32) GT_CLEAR: -+ GT_tMask[index1][index2] = mask; -+ break; -+ case (s32) GT_ON: -+ GT_tMask[index1][index2] |= mask; -+ break; -+ case (s32) GT_OFF: -+ GT_tMask[index1][index2] &= ~mask; -+ break; -+ default: -+ error("illegal trace mask"); -+ break; -+ } -+ } else { -+ for (index2--; index2 >= 0; index2--) { -+ switch (op) { -+ case (s32) GT_CLEAR: -+ GT_tMask[index1][index2] = mask; -+ break; -+ case (s32) GT_ON: -+ GT_tMask[index1][index2] |= mask; -+ break; -+ case (s32) GT_OFF: -+ GT_tMask[index1][index2] &= ~mask; -+ break; -+ default: -+ error("illegal trace mask"); -+ break; -+ } -+ } -+ } -+ } else { -+ for (index1--; index1 >= 0; index1--) { -+ if (index2 < GT_BOUND) { -+ switch (op) { -+ case (s32) GT_CLEAR: -+ GT_tMask[index1][index2] = mask; -+ break; -+ case (s32) GT_ON: -+ GT_tMask[index1][index2] |= mask; -+ break; -+ case (s32) GT_OFF: -+ GT_tMask[index1][index2] &= ~mask; -+ break; -+ default: -+ error("illegal trace mask"); -+ break; -+ } -+ } else { -+ index = GT_BOUND; -+ for (index--; index >= 0; index--) { -+ switch (op) { -+ case (s32) GT_CLEAR: -+ GT_tMask[index1][index] = mask; -+ break; -+ case (s32) GT_ON: -+ GT_tMask[index1][index] |= mask; -+ break; -+ case (s32) GT_OFF: -+ GT_tMask[index1][index] &= -+ ~mask; -+ break; -+ default: -+ error("illegal trace mask"); -+ break; -+ } -+ } -+ } -+ } -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/_gt_para.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/_gt_para.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/_gt_para.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/_gt_para.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,107 @@ -+/* -+ * _gt_para.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _gt_para.c ======== -+ * Description: -+ * Configuration parameters for GT. This file is separated from -+ * gt.c so that GT_assert() can reference the error function without -+ * forcing the linker to include all the code for GT_set(), GT_init(), -+ * etc. into a fully bound image. Thus, GT_assert() can be retained in -+ * a program for which GT_?trace() has been compiled out. -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Feb-2003 vp: Code Review Updates. -+ *! 18-Oct-2002 sb: Ported to Linux platform. -+ *! 03-Jul-2001 rr: Removed kfuncs.h because of build errors. -+ *! 07-Dec-1999 ag: Fxn error now causes a WinCE DebugBreak; -+ *! 30-Aug-1999 ag: Now uses GP_printf for printf and error. -+ *! -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+ -+/* ----------------------------------- Function Prototypes */ -+static void error(char *msg, ...); -+static s32 GT_nop(void); -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+ -+struct GT_Config _GT_params = { -+ (Fxn) printk, /* printf */ -+ (Fxn) NULL, /* procid */ -+ (Fxn) GT_nop, /* taskid */ -+ (Fxn) error, /* error */ -+}; -+ -+/* ----------------------------------- Globals */ -+struct GT_Config *GT = &_GT_params; -+ -+/* -+ * ======== GT_nop ======== -+ */ -+static s32 GT_nop(void) -+{ -+ return 0; -+} -+ -+/* -+ * ======== error ======== -+ * purpose: -+ * Prints error onto the standard output. -+ */ -+static void error(char *fmt, ...) -+{ -+ s32 arg1, arg2, arg3, arg4, arg5, arg6; -+ -+ va_list va; -+ -+ va_start(va, fmt); -+ -+ arg1 = va_arg(va, s32); -+ arg2 = va_arg(va, s32); -+ arg3 = va_arg(va, s32); -+ arg4 = va_arg(va, s32); -+ arg5 = va_arg(va, s32); -+ arg6 = va_arg(va, s32); -+ -+ va_end(va); -+ -+ printk("ERROR: "); -+ printk(fmt, arg1, arg2, arg3, arg4, arg5, arg6); -+ -+#if defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT) -+ if (in_interrupt()) { -+ printk(KERN_INFO "Not stopping after error since ISR/DPC " -+ "are disabled\n"); -+ } else { -+ set_current_state(TASK_INTERRUPTIBLE); -+ flush_signals(current); -+ schedule(); -+ flush_signals(current); -+ printk(KERN_INFO "Signaled in error function\n"); -+ } -+#endif -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/uuidutil.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/uuidutil.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/gen/uuidutil.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/gen/uuidutil.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,238 @@ -+/* -+ * uuidutil.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== uuidutil.c ======== -+ * Description: -+ * This file contains the implementation of UUID helper functions. -+ * -+ *! Revision History -+ *! ================ -+ *! 23-Feb-2003 vp: Code review updates. -+ *! 18-Oct-2003 vp: Ported to Linux platform. -+ *! 31-Aug-2000 rr: UUID_UuidFromString bug fixed. -+ *! 29-Aug-2000 rr: Modified UUID_UuidFromString. -+ *! 09-Nov-2000 kc: Modified UUID_UuidFromString to simplify implementation. -+ *! 30-Oct-2000 kc: Modified UUID utility module function prefix. -+ *! 10-Aug-2000 kc: Created. -+ *! -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* -+ * ======== UUID_UuidToString ======== -+ * Purpose: -+ * Converts a struct DSP_UUID to a string. -+ * Note: snprintf format specifier is: -+ * %[flags] [width] [.precision] [{h | l | I64 | L}]type -+ */ -+void UUID_UuidToString(IN struct DSP_UUID *pUuid, OUT char *pszUuid, -+ IN s32 size) -+{ -+ s32 i; /* return result from snprintf. */ -+ -+ DBC_Require(pUuid && pszUuid); -+ -+ i = snprintf(pszUuid, size, -+ "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X", -+ pUuid->ulData1, pUuid->usData2, pUuid->usData3, -+ pUuid->ucData4, pUuid->ucData5, pUuid->ucData6[0], -+ pUuid->ucData6[1], pUuid->ucData6[2], pUuid->ucData6[3], -+ pUuid->ucData6[4], pUuid->ucData6[5]); -+ -+ DBC_Ensure(i != -1); -+} -+ -+/* -+ * ======== htoi ======== -+ * Purpose: -+ * Converts a hex value to a decimal integer. -+ */ -+ -+static int htoi(char c) -+{ -+ switch (c) { -+ case '0': -+ return 0; -+ case '1': -+ return 1; -+ case '2': -+ return 2; -+ case '3': -+ return 3; -+ case '4': -+ return 4; -+ case '5': -+ return 5; -+ case '6': -+ return 6; -+ case '7': -+ return 7; -+ case '8': -+ return 8; -+ case '9': -+ return 9; -+ case 'A': -+ return 10; -+ case 'B': -+ return 11; -+ case 'C': -+ return 12; -+ case 'D': -+ return 13; -+ case 'E': -+ return 14; -+ case 'F': -+ return 15; -+ case 'a': -+ return 10; -+ case 'b': -+ return 11; -+ case 'c': -+ return 12; -+ case 'd': -+ return 13; -+ case 'e': -+ return 14; -+ case 'f': -+ return 15; -+ } -+ return 0; -+} -+ -+/* -+ * ======== UUID_UuidFromString ======== -+ * Purpose: -+ * Converts a string to a struct DSP_UUID. -+ */ -+void UUID_UuidFromString(IN char *pszUuid, OUT struct DSP_UUID *pUuid) -+{ -+ char c; -+ s32 i, j; -+ s32 result; -+ char *temp = pszUuid; -+ -+ result = 0; -+ for (i = 0; i < 8; i++) { -+ /* Get first character in string */ -+ c = *temp; -+ -+ /* Increase the results by new value */ -+ result *= 16; -+ result += htoi(c); -+ -+ /* Go to next character in string */ -+ temp++; -+ } -+ pUuid->ulData1 = result; -+ -+ /* Step over underscore */ -+ temp++; -+ -+ result = 0; -+ for (i = 0; i < 4; i++) { -+ /* Get first character in string */ -+ c = *temp; -+ -+ /* Increase the results by new value */ -+ result *= 16; -+ result += htoi(c); -+ -+ /* Go to next character in string */ -+ temp++; -+ } -+ pUuid->usData2 = (u16)result; -+ -+ /* Step over underscore */ -+ temp++; -+ -+ result = 0; -+ for (i = 0; i < 4; i++) { -+ /* Get first character in string */ -+ c = *temp; -+ -+ /* Increase the results by new value */ -+ result *= 16; -+ result += htoi(c); -+ -+ /* Go to next character in string */ -+ temp++; -+ } -+ pUuid->usData3 = (u16)result; -+ -+ /* Step over underscore */ -+ temp++; -+ -+ result = 0; -+ for (i = 0; i < 2; i++) { -+ /* Get first character in string */ -+ c = *temp; -+ -+ /* Increase the results by new value */ -+ result *= 16; -+ result += htoi(c); -+ -+ /* Go to next character in string */ -+ temp++; -+ } -+ pUuid->ucData4 = (u8)result; -+ -+ result = 0; -+ for (i = 0; i < 2; i++) { -+ /* Get first character in string */ -+ c = *temp; -+ -+ /* Increase the results by new value */ -+ result *= 16; -+ result += htoi(c); -+ -+ /* Go to next character in string */ -+ temp++; -+ } -+ pUuid->ucData5 = (u8)result; -+ -+ /* Step over underscore */ -+ temp++; -+ -+ for (j = 0; j < 6; j++) { -+ result = 0; -+ for (i = 0; i < 2; i++) { -+ /* Get first character in string */ -+ c = *temp; -+ -+ /* Increase the results by new value */ -+ result *= 16; -+ result += htoi(c); -+ -+ /* Go to next character in string */ -+ temp++; -+ } -+ pUuid->ucData6[j] = (u8)result; -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/EasiGlobal.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/EasiGlobal.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/EasiGlobal.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/EasiGlobal.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,42 @@ -+/* -+ * EasiGlobal.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef __EASIGLOBAL_H -+#define __EASIGLOBAL_H -+#include -+ -+/* -+ * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE -+ * -+ * DESCRIPTION: Defines used to describe register types for EASI-checker tests. -+ */ -+ -+#define READ_ONLY 1 -+#define WRITE_ONLY 2 -+#define READ_WRITE 3 -+ -+/* -+ * MACRO: _DEBUG_LEVEL_1_EASI -+ * -+ * DESCRIPTION: A MACRO which can be used to indicate that a particular beach -+ * register access function was called. -+ * -+ * NOTE: We currently dont use this functionality. -+ */ -+#define _DEBUG_LEVEL_1_EASI(easiNum) ((void)0) -+ -+#endif /* __EASIGLOBAL_H */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/GlobalTypes.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/GlobalTypes.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/GlobalTypes.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/GlobalTypes.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,325 @@ -+/* -+ * GlobalTypes.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== GlobalTypes.h ======== -+ * Description: -+ * Global HW definitions -+ * -+ *! Revision History: -+ *! ================ -+ *! 16 Feb 2003 sb: Initial version -+ */ -+#ifndef __GLOBALTYPES_H -+#define __GLOBALTYPES_H -+ -+/* -+ * Definition: TRUE, FALSE -+ * -+ * DESCRIPTION: Boolean Definitions -+ */ -+#ifndef TRUE -+#define FALSE 0 -+#define TRUE (!(FALSE)) -+#endif -+ -+/* -+ * Definition: NULL -+ * -+ * DESCRIPTION: Invalid pointer -+ */ -+#ifndef NULL -+#define NULL (void *)0 -+#endif -+ -+/* -+ * Definition: RET_CODE_BASE -+ * -+ * DESCRIPTION: Base value for return code offsets -+ */ -+#define RET_CODE_BASE 0 -+ -+/* -+ * Definition: *BIT_OFFSET -+ * -+ * DESCRIPTION: offset in bytes from start of 32-bit word. -+ */ -+#define LOWER_16BIT_OFFSET 0 -+#define UPPER_16BIT_OFFSET 2 -+ -+#define LOWER_8BIT_OFFSET 0 -+#define LOWER_MIDDLE_8BIT_OFFSET 1 -+#define UPPER_MIDDLE_8BIT_OFFSET 2 -+#define UPPER_8BIT_OFFSET 3 -+ -+#define LOWER_8BIT_OF16_OFFSET 0 -+#define UPPER_8BIT_OF16_OFFSET 1 -+ -+/* -+ * Definition: *BIT_SHIFT -+ * -+ * DESCRIPTION: offset in bits from start of 32-bit word. -+ */ -+#define LOWER_16BIT_SHIFT 0 -+#define UPPER_16BIT_SHIFT 16 -+ -+#define LOWER_8BIT_SHIFT 0 -+#define LOWER_MIDDLE_8BIT_SHIFT 8 -+#define UPPER_MIDDLE_8BIT_SHIFT 16 -+#define UPPER_8BIT_SHIFT 24 -+ -+#define LOWER_8BIT_OF16_SHIFT 0 -+#define UPPER_8BIT_OF16_SHIFT 8 -+ -+ -+/* -+ * Definition: LOWER_16BIT_MASK -+ * -+ * DESCRIPTION: 16 bit mask used for inclusion of lower 16 bits i.e. mask out -+ * the upper 16 bits -+ */ -+#define LOWER_16BIT_MASK 0x0000FFFF -+ -+ -+/* -+ * Definition: LOWER_8BIT_MASK -+ * -+ * DESCRIPTION: 8 bit masks used for inclusion of 8 bits i.e. mask out -+ * the upper 16 bits -+ */ -+#define LOWER_8BIT_MASK 0x000000FF -+ -+/* -+ * Definition: RETURN_32BITS_FROM_16LOWER_AND_16UPPER(lower16Bits, upper16Bits) -+ * -+ * DESCRIPTION: Returns a 32 bit value given a 16 bit lower value and a 16 -+ * bit upper value -+ */ -+#define RETURN_32BITS_FROM_16LOWER_AND_16UPPER(lower16Bits,upper16Bits)\ -+ (((((u32)lower16Bits) & LOWER_16BIT_MASK)) | \ -+ (((((u32)upper16Bits) & LOWER_16BIT_MASK) << UPPER_16BIT_SHIFT))) -+ -+/* -+ * Definition: RETURN_16BITS_FROM_8LOWER_AND_8UPPER(lower16Bits, upper16Bits) -+ * -+ * DESCRIPTION: Returns a 16 bit value given a 8 bit lower value and a 8 -+ * bit upper value -+ */ -+#define RETURN_16BITS_FROM_8LOWER_AND_8UPPER(lower8Bits,upper8Bits)\ -+ (((((u32)lower8Bits) & LOWER_8BIT_MASK)) | \ -+ (((((u32)upper8Bits) & LOWER_8BIT_MASK) << UPPER_8BIT_OF16_SHIFT))) -+ -+/* -+ * Definition: RETURN_32BITS_FROM_4_8BIT_VALUES(lower8Bits, lowerMiddle8Bits, -+ * lowerUpper8Bits, upper8Bits) -+ * -+ * DESCRIPTION: Returns a 32 bit value given four 8 bit values -+ */ -+#define RETURN_32BITS_FROM_4_8BIT_VALUES(lower8Bits, lowerMiddle8Bits,\ -+ lowerUpper8Bits, upper8Bits)\ -+ (((((u32)lower8Bits) & LOWER_8BIT_MASK)) | \ -+ (((((u32)lowerMiddle8Bits) & LOWER_8BIT_MASK) <<\ -+ LOWER_MIDDLE_8BIT_SHIFT)) | \ -+ (((((u32)lowerUpper8Bits) & LOWER_8BIT_MASK) <<\ -+ UPPER_MIDDLE_8BIT_SHIFT)) | \ -+ (((((u32)upper8Bits) & LOWER_8BIT_MASK) <<\ -+ UPPER_8BIT_SHIFT))) -+ -+/* -+ * Definition: READ_LOWER_16BITS_OF_32(value32bits) -+ * -+ * DESCRIPTION: Returns a 16 lower bits of 32bit value -+ */ -+#define READ_LOWER_16BITS_OF_32(value32bits)\ -+ ((u16)((u32)(value32bits) & LOWER_16BIT_MASK)) -+ -+/* -+ * Definition: READ_UPPER_16BITS_OF_32(value32bits) -+ * -+ * DESCRIPTION: Returns a 16 lower bits of 32bit value -+ */ -+#define READ_UPPER_16BITS_OF_32(value32bits)\ -+ (((u16)((u32)(value32bits) >> UPPER_16BIT_SHIFT)) &\ -+ LOWER_16BIT_MASK) -+ -+ -+/* -+ * Definition: READ_LOWER_8BITS_OF_32(value32bits) -+ * -+ * DESCRIPTION: Returns a 8 lower bits of 32bit value -+ */ -+#define READ_LOWER_8BITS_OF_32(value32bits)\ -+ ((u8)((u32)(value32bits) & LOWER_8BIT_MASK)) -+ -+/* -+ * Definition: READ_LOWER_MIDDLE_8BITS_OF_32(value32bits) -+ * -+ * DESCRIPTION: Returns a 8 lower middle bits of 32bit value -+ */ -+#define READ_LOWER_MIDDLE_8BITS_OF_32(value32bits)\ -+ (((u8)((u32)(value32bits) >> LOWER_MIDDLE_8BIT_SHIFT)) &\ -+ LOWER_8BIT_MASK) -+ -+/* -+ * Definition: READ_LOWER_MIDDLE_8BITS_OF_32(value32bits) -+ * -+ * DESCRIPTION: Returns a 8 lower middle bits of 32bit value -+ */ -+#define READ_UPPER_MIDDLE_8BITS_OF_32(value32bits)\ -+ (((u8)((u32)(value32bits) >> LOWER_MIDDLE_8BIT_SHIFT)) &\ -+ LOWER_8BIT_MASK) -+ -+/* -+ * Definition: READ_UPPER_8BITS_OF_32(value32bits) -+ * -+ * DESCRIPTION: Returns a 8 upper bits of 32bit value -+ */ -+#define READ_UPPER_8BITS_OF_32(value32bits)\ -+ (((u8)((u32)(value32bits) >> UPPER_8BIT_SHIFT)) & LOWER_8BIT_MASK) -+ -+ -+/* -+ * Definition: READ_LOWER_8BITS_OF_16(value16bits) -+ * -+ * DESCRIPTION: Returns a 8 lower bits of 16bit value -+ */ -+#define READ_LOWER_8BITS_OF_16(value16bits)\ -+ ((u8)((u16)(value16bits) & LOWER_8BIT_MASK)) -+ -+/* -+ * Definition: READ_UPPER_8BITS_OF_16(value32bits) -+ * -+ * DESCRIPTION: Returns a 8 upper bits of 16bit value -+ */ -+#define READ_UPPER_8BITS_OF_16(value16bits)\ -+ (((u8)((u32)(value16bits) >> UPPER_8BIT_SHIFT)) & LOWER_8BIT_MASK) -+ -+ -+ -+/* UWORD16: 16 bit tpyes */ -+ -+ -+/* REG_UWORD8, REG_WORD8: 8 bit register types */ -+typedef volatile unsigned char REG_UWORD8; -+typedef volatile signed char REG_WORD8; -+ -+/* REG_UWORD16, REG_WORD16: 16 bit register types */ -+#ifndef OMAPBRIDGE_TYPES -+typedef volatile unsigned short REG_UWORD16; -+#endif -+typedef volatile short REG_WORD16; -+ -+/* REG_UWORD32, REG_WORD32: 32 bit register types */ -+typedef volatile unsigned long REG_UWORD32; -+ -+/* FLOAT -+ * -+ * Type to be used for floating point calculation. Note that floating point -+ * calculation is very CPU expensive, and you should only use if you -+ * absolutely need this. */ -+ -+ -+/* boolean_t: Boolean Type True, False */ -+/* ReturnCode_t: Return codes to be returned by all library functions */ -+typedef enum ReturnCode_label { -+ RET_OK = 0, -+ RET_FAIL = -1, -+ RET_BAD_NULL_PARAM = -2, -+ RET_PARAM_OUT_OF_RANGE = -3, -+ RET_INVALID_ID = -4, -+ RET_EMPTY = -5, -+ RET_FULL = -6, -+ RET_TIMEOUT = -7, -+ RET_INVALID_OPERATION = -8, -+ -+ /* Add new error codes at end of above list */ -+ -+ RET_NUM_RET_CODES /* this should ALWAYS be LAST entry */ -+} ReturnCode_t, *pReturnCode_t; -+ -+/* MACRO: RD_MEM_8, WR_MEM_8 -+ * -+ * DESCRIPTION: 32 bit memory access macros -+ */ -+#define RD_MEM_8(addr) ((u8)(*((u8 *)(addr)))) -+#define WR_MEM_8(addr, data) (*((u8 *)(addr)) = (u8)(data)) -+ -+/* MACRO: RD_MEM_8_VOLATILE, WR_MEM_8_VOLATILE -+ * -+ * DESCRIPTION: 8 bit register access macros -+ */ -+#define RD_MEM_8_VOLATILE(addr) ((u8)(*((REG_UWORD8 *)(addr)))) -+#define WR_MEM_8_VOLATILE(addr, data) (*((REG_UWORD8 *)(addr)) = (u8)(data)) -+ -+ -+/* -+ * MACRO: RD_MEM_16, WR_MEM_16 -+ * -+ * DESCRIPTION: 16 bit memory access macros -+ */ -+#define RD_MEM_16(addr) ((u16)(*((u16 *)(addr)))) -+#define WR_MEM_16(addr, data) (*((u16 *)(addr)) = (u16)(data)) -+ -+/* -+ * MACRO: RD_MEM_16_VOLATILE, WR_MEM_16_VOLATILE -+ * -+ * DESCRIPTION: 16 bit register access macros -+ */ -+#define RD_MEM_16_VOLATILE(addr) ((u16)(*((REG_UWORD16 *)(addr)))) -+#define WR_MEM_16_VOLATILE(addr, data) (*((REG_UWORD16 *)(addr)) =\ -+ (u16)(data)) -+ -+/* -+ * MACRO: RD_MEM_32, WR_MEM_32 -+ * -+ * DESCRIPTION: 32 bit memory access macros -+ */ -+#define RD_MEM_32(addr) ((u32)(*((u32 *)(addr)))) -+#define WR_MEM_32(addr, data) (*((u32 *)(addr)) = (u32)(data)) -+ -+/* -+ * MACRO: RD_MEM_32_VOLATILE, WR_MEM_32_VOLATILE -+ * -+ * DESCRIPTION: 32 bit register access macros -+ */ -+#define RD_MEM_32_VOLATILE(addr) ((u32)(*((REG_UWORD32 *)(addr)))) -+#define WR_MEM_32_VOLATILE(addr, data) (*((REG_UWORD32 *)(addr)) =\ -+ (u32)(data)) -+ -+/* Not sure if this all belongs here */ -+ -+#define CHECK_RETURN_VALUE(actualValue, expectedValue, returnCodeIfMismatch,\ -+ spyCodeIfMisMatch) -+#define CHECK_RETURN_VALUE_RET(actualValue, expectedValue, returnCodeIfMismatch) -+#define CHECK_RETURN_VALUE_RES(actualValue, expectedValue, spyCodeIfMisMatch) -+#define CHECK_RETURN_VALUE_RET_VOID(actualValue, expectedValue,\ -+ spyCodeIfMisMatch) -+ -+#define CHECK_INPUT_PARAM(actualValue, invalidValue, returnCodeIfMismatch,\ -+ spyCodeIfMisMatch) -+#define CHECK_INPUT_PARAM_NO_SPY(actualValue, invalidValue,\ -+ returnCodeIfMismatch) -+#define CHECK_INPUT_RANGE(actualValue, minValidValue, maxValidValue,\ -+ returnCodeIfMismatch, spyCodeIfMisMatch) -+#define CHECK_INPUT_RANGE_NO_SPY(actualValue, minValidValue, maxValidValue,\ -+ returnCodeIfMismatch) -+#define CHECK_INPUT_RANGE_MIN0(actualValue, maxValidValue,\ -+ returnCodeIfMismatch, spyCodeIfMisMatch) -+#define CHECK_INPUT_RANGE_NO_SPY_MIN0(actualValue, maxValidValue,\ -+ returnCodeIfMismatch) -+ -+#endif /* __GLOBALTYPES_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_defs.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_defs.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_defs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_defs.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,73 @@ -+/* -+ * hw_defs.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== hw_defs.h ======== -+ * Description: -+ * Global HW definitions -+ * -+ *! Revision History: -+ *! ================ -+ *! 19 Apr 2004 sb: Added generic page size, endianness and element size defns -+ *! 16 Feb 2003 sb: Initial version -+ */ -+#ifndef __HW_DEFS_H -+#define __HW_DEFS_H -+ -+#include -+ -+/* Page size */ -+#define HW_PAGE_SIZE_4KB 0x1000 -+#define HW_PAGE_SIZE_64KB 0x10000 -+#define HW_PAGE_SIZE_1MB 0x100000 -+#define HW_PAGE_SIZE_16MB 0x1000000 -+ -+/* HW_STATUS: return type for HW API */ -+typedef long HW_STATUS; -+ -+/* HW_SetClear_t: Enumerated Type used to set and clear any bit */ -+enum HW_SetClear_t { -+ HW_CLEAR, -+ HW_SET -+} ; -+ -+/* HW_Endianism_t: Enumerated Type used to specify the endianism -+ * Do NOT change these values. They are used as bit fields. */ -+enum HW_Endianism_t { -+ HW_LITTLE_ENDIAN, -+ HW_BIG_ENDIAN -+ -+} ; -+ -+/* HW_ElementSize_t: Enumerated Type used to specify the element size -+ * Do NOT change these values. They are used as bit fields. */ -+enum HW_ElementSize_t { -+ HW_ELEM_SIZE_8BIT, -+ HW_ELEM_SIZE_16BIT, -+ HW_ELEM_SIZE_32BIT, -+ HW_ELEM_SIZE_64BIT -+ -+} ; -+ -+/* HW_IdleMode_t: Enumerated Type used to specify Idle modes */ -+ enum HW_IdleMode_t { -+ HW_FORCE_IDLE, -+ HW_NO_IDLE, -+ HW_SMART_IDLE -+ } ; -+ -+#endif /* __HW_DEFS_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_dspssC64P.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_dspssC64P.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,56 @@ -+/* -+ * hw_dspss64P.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== hw_dspss64P.c ======== -+ * Description: -+ * API definitions to configure DSP Subsystem modules like IPI -+ * -+ *! Revision History: -+ *! ================ -+ *! 19 Apr 2004 sb: Implemented HW_DSPSS_IPIEndianismSet -+ *! 16 Feb 2003 sb: Initial version -+ */ -+ -+/* PROJECT SPECIFIC INCLUDE FILES */ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* HW FUNCTIONS */ -+HW_STATUS HW_DSPSS_BootModeSet(const void __iomem *baseAddress, -+ enum HW_DSPSYSC_BootMode_t bootMode, -+ const u32 bootAddress) -+{ -+ HW_STATUS status = RET_OK; -+ u32 offset = SYSC_IVA2BOOTMOD_OFFSET; -+ u32 alignedBootAddr; -+ -+ /* if Boot mode it DIRECT BOOT, check that the bootAddress is -+ * aligned to atleast 1K :: TODO */ -+ __raw_writel(bootMode, (baseAddress) + offset); -+ -+ offset = SYSC_IVA2BOOTADDR_OFFSET; -+ -+ alignedBootAddr = bootAddress & SYSC_IVA2BOOTADDR_MASK; -+ -+ __raw_writel(alignedBootAddr, (baseAddress) + offset); -+ -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_dspssC64P.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_dspssC64P.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_dspssC64P.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,48 @@ -+/* -+ * hw_dspssC64P.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== hw_dspss.h ======== -+ * Description: -+ * DSP Subsystem API declarations -+ * -+ *! Revision History: -+ *! ================ -+ *! 19-Apr-2004 sb: Removed redundant argument from HW_DSPSS_IPIEndianismSet -+ *! Moved endianness and element size to generic hw_defs.h -+ *! 16 Feb 2003 sb: Initial version -+ */ -+ -+#ifndef __HW_DSPSS_H -+#define __HW_DSPSS_H -+#include -+ -+ enum HW_DSPSYSC_BootMode_t { -+ HW_DSPSYSC_DIRECTBOOT = 0x0, -+ HW_DSPSYSC_IDLEBOOT = 0x1, -+ HW_DSPSYSC_SELFLOOPBOOT = 0x2, -+ HW_DSPSYSC_USRBOOTSTRAP = 0x3, -+ HW_DSPSYSC_DEFAULTRESTORE = 0x4 -+ } ; -+ -+#define HW_DSP_IDLEBOOT_ADDR 0x007E0000 -+ -+ extern HW_STATUS HW_DSPSS_BootModeSet(const void __iomem *baseAddress, -+ enum HW_DSPSYSC_BootMode_t bootMode, -+ const u32 bootAddress); -+ -+#endif /* __HW_DSPSS_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mbox.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mbox.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,247 @@ -+/* -+ * hw_mbox.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== hw_mbox.c ======== -+ * Description: -+ * Mailbox messaging & configuration API definitions -+ * -+ *! Revision History: -+ *! ================ -+ *! 16 Feb 2003 sb: Initial version -+ */ -+ -+#include -+#include "MLBRegAcM.h" -+#include -+#include -+ -+/* width in bits of MBOX Id */ -+#define HW_MBOX_ID_WIDTH 2 -+ -+struct MAILBOX_CONTEXT mboxsetting = { -+ .sysconfig = 2 << 3 | 1, /* SMART/AUTO-IDLE */ -+}; -+ -+/* Saves the mailbox context */ -+HW_STATUS HW_MBOX_saveSettings(void __iomem *baseAddress) -+{ -+ HW_STATUS status = RET_OK; -+ -+ mboxsetting.sysconfig = MLBMAILBOX_SYSCONFIGReadRegister32(baseAddress); -+ /* Get current enable status */ -+ mboxsetting.irqEnable0 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 -+ (baseAddress, HW_MBOX_U0_ARM); -+ mboxsetting.irqEnable1 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 -+ (baseAddress, HW_MBOX_U1_DSP1); -+ return status; -+} -+ -+/* Restores the mailbox context */ -+HW_STATUS HW_MBOX_restoreSettings(void __iomem *baseAddress) -+{ -+ HW_STATUS status = RET_OK; -+ /* Restor IRQ enable status */ -+ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, HW_MBOX_U0_ARM, -+ mboxsetting.irqEnable0); -+ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, HW_MBOX_U1_DSP1, -+ mboxsetting.irqEnable1); -+ /* Restore Sysconfig register */ -+ MLBMAILBOX_SYSCONFIGWriteRegister32(baseAddress, mboxsetting.sysconfig); -+ return status; -+} -+ -+/* Reads a u32 from the sub module message box Specified. if there are no -+ * messages in the mailbox then and error is returned. */ -+HW_STATUS HW_MBOX_MsgRead(const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, u32 *const pReadValue) -+{ -+ HW_STATUS status = RET_OK; -+ -+ /* Check input parameters */ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_PARAM(pReadValue, NULL, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* Read 32-bit message in mail box */ -+ *pReadValue = MLBMAILBOX_MESSAGE___0_15ReadRegister32(baseAddress, -+ (u32)mailBoxId); -+ -+ return status; -+} -+ -+/* Writes a u32 from the sub module message box Specified. */ -+HW_STATUS HW_MBOX_MsgWrite(const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, const u32 writeValue) -+{ -+ HW_STATUS status = RET_OK; -+ -+ /* Check input parameters */ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* Write 32-bit value to mailbox */ -+ MLBMAILBOX_MESSAGE___0_15WriteRegister32(baseAddress, (u32)mailBoxId, -+ (u32)writeValue); -+ -+ return status; -+} -+ -+/* Gets number of messages in a specified mailbox. */ -+HW_STATUS HW_MBOX_NumMsgGet(const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, u32 *const pNumMsg) -+{ -+ HW_STATUS status = RET_OK; -+ -+ /* Check input parameters */ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_PARAM(pNumMsg, NULL, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ -+ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* Get number of messages available for MailBox */ -+ *pNumMsg = MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32(baseAddress, -+ (u32)mailBoxId); -+ -+ return status; -+} -+ -+/* Enables the specified IRQ. */ -+HW_STATUS HW_MBOX_EventEnable(const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ const HW_MBOX_UserId_t userId, -+ const u32 events) -+{ -+ HW_STATUS status = RET_OK; -+ u32 irqEnableReg; -+ -+ /* Check input parameters */ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(enableIrq, HW_MBOX_INT_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(userId, HW_MBOX_USER_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* Get current enable status */ -+ irqEnableReg = MLBMAILBOX_IRQENABLE___0_3ReadRegister32(baseAddress, -+ (u32)userId); -+ -+ /* update enable value */ -+ irqEnableReg |= ((u32)(events)) << (((u32)(mailBoxId)) * -+ HW_MBOX_ID_WIDTH); -+ -+ /* write new enable status */ -+ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, (u32)userId, -+ (u32)irqEnableReg); -+ -+ mboxsetting.sysconfig = MLBMAILBOX_SYSCONFIGReadRegister32(baseAddress); -+ /* Get current enable status */ -+ mboxsetting.irqEnable0 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 -+ (baseAddress, HW_MBOX_U0_ARM); -+ mboxsetting.irqEnable1 = MLBMAILBOX_IRQENABLE___0_3ReadRegister32 -+ (baseAddress, HW_MBOX_U1_DSP1); -+ return status; -+} -+ -+/* Disables the specified IRQ. */ -+HW_STATUS HW_MBOX_EventDisable(const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ const HW_MBOX_UserId_t userId, -+ const u32 events) -+{ -+ HW_STATUS status = RET_OK; -+ u32 irqDisableReg; -+ -+ /* Check input parameters */ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(disableIrq, HW_MBOX_INT_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(userId, HW_MBOX_USER_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* Get current enable status */ -+ irqDisableReg = MLBMAILBOX_IRQENABLE___0_3ReadRegister32(baseAddress, -+ (u32)userId); -+ -+ /* update enable value */ -+ irqDisableReg &= ~(((u32)(events)) << (((u32)(mailBoxId)) * -+ HW_MBOX_ID_WIDTH)); -+ -+ /* write new enable status */ -+ MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, (u32)userId, -+ (u32)irqDisableReg); -+ -+ return status; -+} -+ -+/* Sets the status of the specified IRQ. */ -+HW_STATUS HW_MBOX_EventAck(const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, const HW_MBOX_UserId_t userId, -+ const u32 event) -+{ -+ HW_STATUS status = RET_OK; -+ u32 irqStatusReg; -+ -+ /* Check input parameters */ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, RES_MBOX_BASE + -+ RES_INVALID_INPUT_PARAM); -+ -+ CHECK_INPUT_RANGE_MIN0(irqStatus, HW_MBOX_INT_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(mailBoxId, HW_MBOX_ID_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(userId, HW_MBOX_USER_MAX, RET_INVALID_ID, -+ RES_MBOX_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* calculate status to write */ -+ irqStatusReg = ((u32)event) << (((u32)(mailBoxId)) * -+ HW_MBOX_ID_WIDTH); -+ -+ /* clear Irq Status for specified mailbox/User Id */ -+ MLBMAILBOX_IRQSTATUS___0_3WriteRegister32(baseAddress, (u32)userId, -+ (u32)irqStatusReg); -+ -+ /* -+ * FIXME: Replace all this custom register access with standard -+ * __raw_read/write(). -+ * -+ * FIXME: Replace all interrupt handlers with standard linux style -+ * interrupt handlers. -+ * -+ * FIXME: Replace direct access to PRCM registers with omap standard -+ * PRCM register access. -+ * -+ * Flush posted write for the irq status to avoid spurious interrupts. -+ */ -+ MLBMAILBOX_IRQSTATUS___0_3ReadRegister32(baseAddress, (u32)userId); -+ -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mbox.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mbox.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mbox.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,328 @@ -+/* -+ * hw_mbox.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== hw_mbox.h ======== -+ * Description: -+ * HW Mailbox API and types definitions -+ * -+ *! Revision History: -+ *! ================ -+ *! 16 Feb 2003 sb: Initial version -+ */ -+#ifndef __MBOX_H -+#define __MBOX_H -+ -+/* Bitmasks for Mailbox interrupt sources */ -+#define HW_MBOX_INT_NEW_MSG 0x1 -+#define HW_MBOX_INT_NOT_FULL 0x2 -+#define HW_MBOX_INT_ALL 0x3 -+ -+/* Maximum number of messages that mailbox can hald at a time. */ -+#define HW_MBOX_MAX_NUM_MESSAGES 4 -+ -+/* HW_MBOX_Id_t: Enumerated Type used to specify Mailbox Sub Module Id Number */ -+typedef enum HW_MBOX_Id_label { -+ HW_MBOX_ID_0, -+ HW_MBOX_ID_1, -+ HW_MBOX_ID_2, -+ HW_MBOX_ID_3, -+ HW_MBOX_ID_4, -+ HW_MBOX_ID_5 -+ -+} HW_MBOX_Id_t, *pHW_MBOX_Id_t; -+ -+/* HW_MBOX_UserId_t: Enumerated Type used to specify Mail box User Id */ -+typedef enum HW_MBOX_UserId_label { -+ HW_MBOX_U0_ARM, -+ HW_MBOX_U1_DSP1, -+ HW_MBOX_U2_DSP2, -+ HW_MBOX_U3_ARM -+ -+} HW_MBOX_UserId_t, *pHW_MBOX_UserId_t; -+ -+/* Mailbox context settings */ -+struct MAILBOX_CONTEXT { -+ u32 sysconfig; -+ u32 irqEnable0; -+ u32 irqEnable1; -+}; -+ -+/* -+* FUNCTION : HW_MBOX_MsgRead -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of Mailbox module -+* -+* Identifier : mailBoxId -+* Type : const HW_MBOX_Id_t -+* Description : Mail Box Sub module Id to read -+* -+* OUTPUTS: -+* -+* Identifier : pReadValue -+* Type : u32 *const -+* Description : Value read from MailBox -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM Address/ptr Paramater was set to 0/NULL -+* RET_INVALID_ID Invalid Id used -+* RET_EMPTY Mailbox empty -+* -+* PURPOSE: : this function reads a u32 from the sub module message -+* box Specified. if there are no messages in the mailbox -+* then and error is returned. -+*/ -+extern HW_STATUS HW_MBOX_MsgRead(const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ u32 *const pReadValue); -+ -+/* -+* FUNCTION : HW_MBOX_MsgWrite -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of Mailbox module -+* -+* Identifier : mailBoxId -+* Type : const HW_MBOX_Id_t -+* Description : Mail Box Sub module Id to write -+* -+* Identifier : writeValue -+* Type : const u32 -+* Description : Value to write to MailBox -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL -+* RET_INVALID_ID Invalid Id used -+* -+* PURPOSE: : this function writes a u32 from the sub module message -+* box Specified. -+*/ -+extern HW_STATUS HW_MBOX_MsgWrite( -+ const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ const u32 writeValue -+ ); -+ -+/* -+* FUNCTION : HW_MBOX_NumMsgGet -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of Mailbox module -+* -+* Identifier : mailBoxId -+* Type : const HW_MBOX_Id_t -+* Description : Mail Box Sub module Id to get num messages -+* -+* OUTPUTS: -+* -+* Identifier : pNumMsg -+* Type : u32 *const -+* Description : Number of messages in mailbox -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL -+* RET_INVALID_ID Inavlid ID input at parameter -+* -+* PURPOSE: : this function gets number of messages in a specified mailbox. -+*/ -+extern HW_STATUS HW_MBOX_NumMsgGet( -+ const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ u32 *const pNumMsg -+ ); -+ -+/* -+* FUNCTION : HW_MBOX_EventEnable -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL -+* -+* Identifier : mailBoxId -+* Type : const HW_MBOX_Id_t -+* Description : Mail Box Sub module Id to enable -+* -+* Identifier : userId -+* Type : const HW_MBOX_UserId_t -+* Description : Mail box User Id to enable -+* -+* Identifier : enableIrq -+* Type : const u32 -+* Description : Irq value to enable -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM A Pointer Paramater was set to NULL -+* RET_INVALID_ID Invalid Id used -+* -+* PURPOSE: : this function enables the specified IRQ. -+*/ -+extern HW_STATUS HW_MBOX_EventEnable( -+ const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ const HW_MBOX_UserId_t userId, -+ const u32 events -+ ); -+ -+/* -+* FUNCTION : HW_MBOX_EventDisable -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL -+* -+* Identifier : mailBoxId -+* Type : const HW_MBOX_Id_t -+* Description : Mail Box Sub module Id to disable -+* -+* Identifier : userId -+* Type : const HW_MBOX_UserId_t -+* Description : Mail box User Id to disable -+* -+* Identifier : enableIrq -+* Type : const u32 -+* Description : Irq value to disable -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM A Pointer Paramater was set to NULL -+* RET_INVALID_ID Invalid Id used -+* -+* PURPOSE: : this function disables the specified IRQ. -+*/ -+extern HW_STATUS HW_MBOX_EventDisable( -+ const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ const HW_MBOX_UserId_t userId, -+ const u32 events -+ ); -+ -+/* -+* FUNCTION : HW_MBOX_EventAck -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of Mailbox module -+* -+* Identifier : mailBoxId -+* Type : const HW_MBOX_Id_t -+* Description : Mail Box Sub module Id to set -+* -+* Identifier : userId -+* Type : const HW_MBOX_UserId_t -+* Description : Mail box User Id to set -+* -+* Identifier : irqStatus -+* Type : const u32 -+* Description : The value to write IRQ status -+* -+* OUTPUTS: -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM Address Paramater was set to 0 -+* RET_INVALID_ID Invalid Id used -+* -+* PURPOSE: : this function sets the status of the specified IRQ. -+*/ -+extern HW_STATUS HW_MBOX_EventAck( -+ const void __iomem *baseAddress, -+ const HW_MBOX_Id_t mailBoxId, -+ const HW_MBOX_UserId_t userId, -+ const u32 event -+ ); -+ -+/* -+* FUNCTION : HW_MBOX_saveSettings -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of Mailbox module -+* -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL -+* RET_INVALID_ID Invalid Id used -+* RET_EMPTY Mailbox empty -+* -+* PURPOSE: : this function saves the context of mailbox -+*/ -+extern HW_STATUS HW_MBOX_saveSettings(void __iomem *baseAddres); -+ -+/* -+* FUNCTION : HW_MBOX_restoreSettings -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of Mailbox module -+* -+* -+* RETURNS: -+* -+* Type : ReturnCode_t -+* Description : RET_OK No errors occured -+* RET_BAD_NULL_PARAM Address/pointer Paramater was set to 0/NULL -+* RET_INVALID_ID Invalid Id used -+* RET_EMPTY Mailbox empty -+* -+* PURPOSE: : this function restores the context of mailbox -+*/ -+extern HW_STATUS HW_MBOX_restoreSettings(void __iomem *baseAddres); -+ -+static inline void HW_MBOX_initSettings(void __iomem *baseAddres) -+{ -+ HW_MBOX_restoreSettings(baseAddres); -+} -+ -+#endif /* __MBOX_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mmu.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mmu.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,599 @@ -+/* -+ * hw_mmu.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== hw_mmu.c ======== -+ * Description: -+ * API definitions to setup MMU TLB and PTE -+ * -+ *! Revision History: -+ *! ================ -+ *! 19-Apr-2004 sb TLBAdd and TLBFlush input the page size in bytes instead -+ of an enum. TLBAdd inputs mapping attributes struct instead -+ of individual arguments. -+ Removed MMU.h and other cosmetic updates. -+ *! 08-Mar-2004 sb Added the Page Table Management APIs -+ *! 16 Feb 2003 sb: Initial version -+ */ -+ -+#include -+#include -+#include "MMURegAcM.h" -+#include -+#include -+#include -+ -+#define MMU_BASE_VAL_MASK 0xFC00 -+#define MMU_PAGE_MAX 3 -+#define MMU_ELEMENTSIZE_MAX 3 -+#define MMU_ADDR_MASK 0xFFFFF000 -+#define MMU_TTB_MASK 0xFFFFC000 -+#define MMU_SECTION_ADDR_MASK 0xFFF00000 -+#define MMU_SSECTION_ADDR_MASK 0xFF000000 -+#define MMU_PAGE_TABLE_MASK 0xFFFFFC00 -+#define MMU_LARGE_PAGE_MASK 0xFFFF0000 -+#define MMU_SMALL_PAGE_MASK 0xFFFFF000 -+ -+#define MMU_LOAD_TLB 0x00000001 -+ -+/* HW_MMUPageSize_t: Enumerated Type used to specify the MMU Page Size(SLSS) */ -+enum HW_MMUPageSize_t { -+ HW_MMU_SECTION, -+ HW_MMU_LARGE_PAGE, -+ HW_MMU_SMALL_PAGE, -+ HW_MMU_SUPERSECTION -+} ; -+ -+/* -+* FUNCTION : MMU_FlushEntry -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of MMU module -+* -+* RETURNS: -+* -+* Type : HW_STATUS -+* Description : RET_OK -- No errors occured -+* RET_BAD_NULL_PARAM -- A Pointer -+* Paramater was set to NULL -+* -+* PURPOSE: : Flush the TLB entry pointed by the -+* lock counter register -+* even if this entry is set protected -+* -+* METHOD: : Check the Input parameter and Flush a -+* single entry in the TLB. -+*/ -+static HW_STATUS MMU_FlushEntry(const void __iomem *baseAddress); -+ -+/* -+* FUNCTION : MMU_SetCAMEntry -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* TypE : const u32 -+* Description : Base Address of instance of MMU module -+* -+* Identifier : pageSize -+* TypE : const u32 -+* Description : It indicates the page size -+* -+* Identifier : preservedBit -+* Type : const u32 -+* Description : It indicates the TLB entry is preserved entry -+* or not -+* -+* Identifier : validBit -+* Type : const u32 -+* Description : It indicates the TLB entry is valid entry or not -+* -+* -+* Identifier : virtualAddrTag -+* Type : const u32 -+* Description : virtual Address -+* -+* RETURNS: -+* -+* Type : HW_STATUS -+* Description : RET_OK -- No errors occured -+* RET_BAD_NULL_PARAM -- A Pointer Paramater -+* was set to NULL -+* RET_PARAM_OUT_OF_RANGE -- Input Parameter out -+* of Range -+* -+* PURPOSE: : Set MMU_CAM reg -+* -+* METHOD: : Check the Input parameters and set the CAM entry. -+*/ -+static HW_STATUS MMU_SetCAMEntry(const void __iomem *baseAddress, -+ const u32 pageSize, -+ const u32 preservedBit, -+ const u32 validBit, -+ const u32 virtualAddrTag); -+ -+/* -+* FUNCTION : MMU_SetRAMEntry -+* -+* INPUTS: -+* -+* Identifier : baseAddress -+* Type : const u32 -+* Description : Base Address of instance of MMU module -+* -+* Identifier : physicalAddr -+* Type : const u32 -+* Description : Physical Address to which the corresponding -+* virtual Address shouldpoint -+* -+* Identifier : endianism -+* Type : HW_Endianism_t -+* Description : endianism for the given page -+* -+* Identifier : elementSize -+* Type : HW_ElementSize_t -+* Description : The element size ( 8,16, 32 or 64 bit) -+* -+* Identifier : mixedSize -+* Type : HW_MMUMixedSize_t -+* Description : Element Size to follow CPU or TLB -+* -+* RETURNS: -+* -+* Type : HW_STATUS -+* Description : RET_OK -- No errors occured -+* RET_BAD_NULL_PARAM -- A Pointer Paramater -+* was set to NULL -+* RET_PARAM_OUT_OF_RANGE -- Input Parameter -+* out of Range -+* -+* PURPOSE: : Set MMU_CAM reg -+* -+* METHOD: : Check the Input parameters and set the RAM entry. -+*/ -+static HW_STATUS MMU_SetRAMEntry(const void __iomem *baseAddress, -+ const u32 physicalAddr, -+ enum HW_Endianism_t endianism, -+ enum HW_ElementSize_t elementSize, -+ enum HW_MMUMixedSize_t mixedSize); -+ -+/* HW FUNCTIONS */ -+ -+HW_STATUS HW_MMU_Enable(const void __iomem *baseAddress) -+{ -+ HW_STATUS status = RET_OK; -+ -+ MMUMMU_CNTLMMUEnableWrite32(baseAddress, HW_SET); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_Disable(const void __iomem *baseAddress) -+{ -+ HW_STATUS status = RET_OK; -+ -+ MMUMMU_CNTLMMUEnableWrite32(baseAddress, HW_CLEAR); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_NumLockedSet(const void __iomem *baseAddress, -+ u32 numLockedEntries) -+{ -+ HW_STATUS status = RET_OK; -+ -+ MMUMMU_LOCKBaseValueWrite32(baseAddress, numLockedEntries); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_VictimNumSet(const void __iomem *baseAddress, -+ u32 victimEntryNum) -+{ -+ HW_STATUS status = RET_OK; -+ -+ MMUMMU_LOCKCurrentVictimWrite32(baseAddress, victimEntryNum); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_EventAck(const void __iomem *baseAddress, u32 irqMask) -+{ -+ HW_STATUS status = RET_OK; -+ -+ MMUMMU_IRQSTATUSWriteRegister32(baseAddress, irqMask); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_EventDisable(const void __iomem *baseAddress, -+ u32 irqMask) -+{ -+ HW_STATUS status = RET_OK; -+ u32 irqReg; -+ -+ irqReg = MMUMMU_IRQENABLEReadRegister32(baseAddress); -+ -+ MMUMMU_IRQENABLEWriteRegister32(baseAddress, irqReg & ~irqMask); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_EventEnable(const void __iomem *baseAddress, u32 irqMask) -+{ -+ HW_STATUS status = RET_OK; -+ u32 irqReg; -+ -+ irqReg = MMUMMU_IRQENABLEReadRegister32(baseAddress); -+ -+ MMUMMU_IRQENABLEWriteRegister32(baseAddress, irqReg | irqMask); -+ -+ return status; -+} -+ -+ -+HW_STATUS HW_MMU_EventStatus(const void __iomem *baseAddress, u32 *irqMask) -+{ -+ HW_STATUS status = RET_OK; -+ -+ *irqMask = MMUMMU_IRQSTATUSReadRegister32(baseAddress); -+ -+ return status; -+} -+ -+ -+HW_STATUS HW_MMU_FaultAddrRead(const void __iomem *baseAddress, u32 *addr) -+{ -+ HW_STATUS status = RET_OK; -+ -+ /*Check the input Parameters*/ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, -+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* read values from register */ -+ *addr = MMUMMU_FAULT_ADReadRegister32(baseAddress); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_TTBSet(const void __iomem *baseAddress, u32 TTBPhysAddr) -+{ -+ HW_STATUS status = RET_OK; -+ u32 loadTTB; -+ -+ /*Check the input Parameters*/ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, -+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); -+ -+ loadTTB = TTBPhysAddr & ~0x7FUL; -+ /* write values to register */ -+ MMUMMU_TTBWriteRegister32(baseAddress, loadTTB); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_TWLEnable(const void __iomem *baseAddress) -+{ -+ HW_STATUS status = RET_OK; -+ -+ MMUMMU_CNTLTWLEnableWrite32(baseAddress, HW_SET); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_TWLDisable(const void __iomem *baseAddress) -+{ -+ HW_STATUS status = RET_OK; -+ -+ MMUMMU_CNTLTWLEnableWrite32(baseAddress, HW_CLEAR); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_TLBFlush(const void __iomem *baseAddress, u32 virtualAddr, -+ u32 pageSize) -+{ -+ HW_STATUS status = RET_OK; -+ u32 virtualAddrTag; -+ enum HW_MMUPageSize_t pgSizeBits; -+ -+ switch (pageSize) { -+ case HW_PAGE_SIZE_4KB: -+ pgSizeBits = HW_MMU_SMALL_PAGE; -+ break; -+ -+ case HW_PAGE_SIZE_64KB: -+ pgSizeBits = HW_MMU_LARGE_PAGE; -+ break; -+ -+ case HW_PAGE_SIZE_1MB: -+ pgSizeBits = HW_MMU_SECTION; -+ break; -+ -+ case HW_PAGE_SIZE_16MB: -+ pgSizeBits = HW_MMU_SUPERSECTION; -+ break; -+ -+ default: -+ return RET_FAIL; -+ } -+ -+ /* Generate the 20-bit tag from virtual address */ -+ virtualAddrTag = ((virtualAddr & MMU_ADDR_MASK) >> 12); -+ -+ MMU_SetCAMEntry(baseAddress, pgSizeBits, 0, 0, virtualAddrTag); -+ -+ MMU_FlushEntry(baseAddress); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_TLBAdd(const void __iomem *baseAddress, -+ u32 physicalAddr, -+ u32 virtualAddr, -+ u32 pageSize, -+ u32 entryNum, -+ struct HW_MMUMapAttrs_t *mapAttrs, -+ enum HW_SetClear_t preservedBit, -+ enum HW_SetClear_t validBit) -+{ -+ HW_STATUS status = RET_OK; -+ u32 lockReg; -+ u32 virtualAddrTag; -+ enum HW_MMUPageSize_t mmuPgSize; -+ -+ /*Check the input Parameters*/ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, -+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(pageSize, MMU_PAGE_MAX, RET_PARAM_OUT_OF_RANGE, -+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(mapAttrs->elementSize, MMU_ELEMENTSIZE_MAX, -+ RET_PARAM_OUT_OF_RANGE, RES_MMU_BASE + -+ RES_INVALID_INPUT_PARAM); -+ -+ switch (pageSize) { -+ case HW_PAGE_SIZE_4KB: -+ mmuPgSize = HW_MMU_SMALL_PAGE; -+ break; -+ -+ case HW_PAGE_SIZE_64KB: -+ mmuPgSize = HW_MMU_LARGE_PAGE; -+ break; -+ -+ case HW_PAGE_SIZE_1MB: -+ mmuPgSize = HW_MMU_SECTION; -+ break; -+ -+ case HW_PAGE_SIZE_16MB: -+ mmuPgSize = HW_MMU_SUPERSECTION; -+ break; -+ -+ default: -+ return RET_FAIL; -+ } -+ -+ lockReg = MMUMMU_LOCKReadRegister32(baseAddress); -+ -+ /* Generate the 20-bit tag from virtual address */ -+ virtualAddrTag = ((virtualAddr & MMU_ADDR_MASK) >> 12); -+ -+ /* Write the fields in the CAM Entry Register */ -+ MMU_SetCAMEntry(baseAddress, mmuPgSize, preservedBit, validBit, -+ virtualAddrTag); -+ -+ /* Write the different fields of the RAM Entry Register */ -+ /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit)*/ -+ MMU_SetRAMEntry(baseAddress, physicalAddr, mapAttrs->endianism, -+ mapAttrs->elementSize, mapAttrs->mixedSize); -+ -+ /* Update the MMU Lock Register */ -+ /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1)*/ -+ MMUMMU_LOCKCurrentVictimWrite32(baseAddress, entryNum); -+ -+ /* Enable loading of an entry in TLB by writing 1 -+ into LD_TLB_REG register */ -+ MMUMMU_LD_TLBWriteRegister32(baseAddress, MMU_LOAD_TLB); -+ -+ -+ MMUMMU_LOCKWriteRegister32(baseAddress, lockReg); -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_PteSet(const u32 pgTblVa, -+ u32 physicalAddr, -+ u32 virtualAddr, -+ u32 pageSize, -+ struct HW_MMUMapAttrs_t *mapAttrs) -+{ -+ HW_STATUS status = RET_OK; -+ u32 pteAddr, pteVal; -+ s32 numEntries = 1; -+ -+ switch (pageSize) { -+ case HW_PAGE_SIZE_4KB: -+ pteAddr = HW_MMU_PteAddrL2(pgTblVa, -+ virtualAddr & MMU_SMALL_PAGE_MASK); -+ pteVal = ((physicalAddr & MMU_SMALL_PAGE_MASK) | -+ (mapAttrs->endianism << 9) | -+ (mapAttrs->elementSize << 4) | -+ (mapAttrs->mixedSize << 11) | 2 -+ ); -+ break; -+ -+ case HW_PAGE_SIZE_64KB: -+ numEntries = 16; -+ pteAddr = HW_MMU_PteAddrL2(pgTblVa, -+ virtualAddr & MMU_LARGE_PAGE_MASK); -+ pteVal = ((physicalAddr & MMU_LARGE_PAGE_MASK) | -+ (mapAttrs->endianism << 9) | -+ (mapAttrs->elementSize << 4) | -+ (mapAttrs->mixedSize << 11) | 1 -+ ); -+ break; -+ -+ case HW_PAGE_SIZE_1MB: -+ pteAddr = HW_MMU_PteAddrL1(pgTblVa, -+ virtualAddr & MMU_SECTION_ADDR_MASK); -+ pteVal = ((((physicalAddr & MMU_SECTION_ADDR_MASK) | -+ (mapAttrs->endianism << 15) | -+ (mapAttrs->elementSize << 10) | -+ (mapAttrs->mixedSize << 17)) & -+ ~0x40000) | 0x2 -+ ); -+ break; -+ -+ case HW_PAGE_SIZE_16MB: -+ numEntries = 16; -+ pteAddr = HW_MMU_PteAddrL1(pgTblVa, -+ virtualAddr & MMU_SSECTION_ADDR_MASK); -+ pteVal = (((physicalAddr & MMU_SSECTION_ADDR_MASK) | -+ (mapAttrs->endianism << 15) | -+ (mapAttrs->elementSize << 10) | -+ (mapAttrs->mixedSize << 17) -+ ) | 0x40000 | 0x2 -+ ); -+ break; -+ -+ case HW_MMU_COARSE_PAGE_SIZE: -+ pteAddr = HW_MMU_PteAddrL1(pgTblVa, -+ virtualAddr & MMU_SECTION_ADDR_MASK); -+ pteVal = (physicalAddr & MMU_PAGE_TABLE_MASK) | 1; -+ break; -+ -+ default: -+ return RET_FAIL; -+ } -+ -+ while (--numEntries >= 0) -+ ((u32 *)pteAddr)[numEntries] = pteVal; -+ -+ return status; -+} -+ -+HW_STATUS HW_MMU_PteClear(const u32 pgTblVa, -+ u32 virtualAddr, -+ u32 pgSize) -+{ -+ HW_STATUS status = RET_OK; -+ u32 pteAddr; -+ s32 numEntries = 1; -+ -+ switch (pgSize) { -+ case HW_PAGE_SIZE_4KB: -+ pteAddr = HW_MMU_PteAddrL2(pgTblVa, -+ virtualAddr & MMU_SMALL_PAGE_MASK); -+ break; -+ -+ case HW_PAGE_SIZE_64KB: -+ numEntries = 16; -+ pteAddr = HW_MMU_PteAddrL2(pgTblVa, -+ virtualAddr & MMU_LARGE_PAGE_MASK); -+ break; -+ -+ case HW_PAGE_SIZE_1MB: -+ case HW_MMU_COARSE_PAGE_SIZE: -+ pteAddr = HW_MMU_PteAddrL1(pgTblVa, -+ virtualAddr & MMU_SECTION_ADDR_MASK); -+ break; -+ -+ case HW_PAGE_SIZE_16MB: -+ numEntries = 16; -+ pteAddr = HW_MMU_PteAddrL1(pgTblVa, -+ virtualAddr & MMU_SSECTION_ADDR_MASK); -+ break; -+ -+ default: -+ return RET_FAIL; -+ } -+ -+ while (--numEntries >= 0) -+ ((u32 *)pteAddr)[numEntries] = 0; -+ -+ return status; -+} -+ -+/* MMU_FlushEntry */ -+static HW_STATUS MMU_FlushEntry(const void __iomem *baseAddress) -+{ -+ HW_STATUS status = RET_OK; -+ u32 flushEntryData = 0x1; -+ -+ /*Check the input Parameters*/ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, -+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); -+ -+ /* write values to register */ -+ MMUMMU_FLUSH_ENTRYWriteRegister32(baseAddress, flushEntryData); -+ -+ return status; -+} -+ -+/* MMU_SetCAMEntry */ -+static HW_STATUS MMU_SetCAMEntry(const void __iomem *baseAddress, -+ const u32 pageSize, -+ const u32 preservedBit, -+ const u32 validBit, -+ const u32 virtualAddrTag) -+{ -+ HW_STATUS status = RET_OK; -+ u32 mmuCamReg; -+ -+ /*Check the input Parameters*/ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, -+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); -+ -+ mmuCamReg = (virtualAddrTag << 12); -+ mmuCamReg = (mmuCamReg) | (pageSize) | (validBit << 2) | -+ (preservedBit << 3) ; -+ -+ /* write values to register */ -+ MMUMMU_CAMWriteRegister32(baseAddress, mmuCamReg); -+ -+ return status; -+} -+ -+/* MMU_SetRAMEntry */ -+static HW_STATUS MMU_SetRAMEntry(const void __iomem *baseAddress, -+ const u32 physicalAddr, -+ enum HW_Endianism_t endianism, -+ enum HW_ElementSize_t elementSize, -+ enum HW_MMUMixedSize_t mixedSize) -+{ -+ HW_STATUS status = RET_OK; -+ u32 mmuRamReg; -+ -+ /*Check the input Parameters*/ -+ CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM, -+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM); -+ CHECK_INPUT_RANGE_MIN0(elementSize, MMU_ELEMENTSIZE_MAX, -+ RET_PARAM_OUT_OF_RANGE, RES_MMU_BASE + -+ RES_INVALID_INPUT_PARAM); -+ -+ -+ mmuRamReg = (physicalAddr & MMU_ADDR_MASK); -+ mmuRamReg = (mmuRamReg) | ((endianism << 9) | (elementSize << 7) | -+ (mixedSize << 6)); -+ -+ /* write values to register */ -+ MMUMMU_RAMWriteRegister32(baseAddress, mmuRamReg); -+ -+ return status; -+ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mmu.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_mmu.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_mmu.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,177 @@ -+/* -+ * hw_mmu.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== hw_mmu.h ======== -+ * Description: -+ * MMU types and API declarations -+ * -+ *! Revision History: -+ *! ================ -+ *! 19-Apr-2004 sb Moved & renamed endianness, page size, element size -+ TLBAdd takes in MMUMapAttrs instead of separate arguments -+ *! 08-Mar-2004 sb Added the Page Table management APIs -+ *! 16 Feb 2003 sb: Initial version -+ */ -+#ifndef __HW_MMU_H -+#define __HW_MMU_H -+ -+#include -+ -+/* Bitmasks for interrupt sources */ -+#define HW_MMU_TRANSLATION_FAULT 0x2 -+#define HW_MMU_ALL_INTERRUPTS 0x1F -+ -+#define HW_MMU_COARSE_PAGE_SIZE 0x400 -+ -+/* HW_MMUMixedSize_t: Enumerated Type used to specify whether to follow -+ CPU/TLB Element size */ -+enum HW_MMUMixedSize_t { -+ HW_MMU_TLBES, -+ HW_MMU_CPUES -+ -+} ; -+ -+/* HW_MMUMapAttrs_t: Struct containing MMU mapping attributes */ -+struct HW_MMUMapAttrs_t { -+ enum HW_Endianism_t endianism; -+ enum HW_ElementSize_t elementSize; -+ enum HW_MMUMixedSize_t mixedSize; -+ bool donotlockmpupage; -+} ; -+ -+extern HW_STATUS HW_MMU_Enable(const void __iomem *baseAddress); -+ -+extern HW_STATUS HW_MMU_Disable(const void __iomem *baseAddress); -+ -+extern HW_STATUS HW_MMU_NumLockedSet(const void __iomem *baseAddress, -+ u32 numLockedEntries); -+ -+extern HW_STATUS HW_MMU_VictimNumSet(const void __iomem *baseAddress, -+ u32 victimEntryNum); -+ -+/* For MMU faults */ -+extern HW_STATUS HW_MMU_EventAck(const void __iomem *baseAddress, -+ u32 irqMask); -+ -+extern HW_STATUS HW_MMU_EventDisable(const void __iomem *baseAddress, -+ u32 irqMask); -+ -+extern HW_STATUS HW_MMU_EventEnable(const void __iomem *baseAddress, -+ u32 irqMask); -+ -+extern HW_STATUS HW_MMU_EventStatus(const void __iomem *baseAddress, -+ u32 *irqMask); -+ -+extern HW_STATUS HW_MMU_FaultAddrRead(const void __iomem *baseAddress, -+ u32 *addr); -+ -+/* Set the TT base address */ -+extern HW_STATUS HW_MMU_TTBSet(const void __iomem *baseAddress, -+ u32 TTBPhysAddr); -+ -+extern HW_STATUS HW_MMU_TWLEnable(const void __iomem *baseAddress); -+ -+extern HW_STATUS HW_MMU_TWLDisable(const void __iomem *baseAddress); -+ -+extern HW_STATUS HW_MMU_TLBFlush(const void __iomem *baseAddress, -+ u32 virtualAddr, -+ u32 pageSize); -+ -+extern HW_STATUS HW_MMU_TLBAdd(const void __iomem *baseAddress, -+ u32 physicalAddr, -+ u32 virtualAddr, -+ u32 pageSize, -+ u32 entryNum, -+ struct HW_MMUMapAttrs_t *mapAttrs, -+ enum HW_SetClear_t preservedBit, -+ enum HW_SetClear_t validBit); -+ -+ -+/* For PTEs */ -+extern HW_STATUS HW_MMU_PteSet(const u32 pgTblVa, -+ u32 physicalAddr, -+ u32 virtualAddr, -+ u32 pageSize, -+ struct HW_MMUMapAttrs_t *mapAttrs); -+ -+extern HW_STATUS HW_MMU_PteClear(const u32 pgTblVa, -+ u32 pgSize, -+ u32 virtualAddr); -+ -+static inline u32 HW_MMU_PteAddrL1(u32 L1_base, u32 va) -+{ -+ u32 pteAddr; -+ u32 VA_31_to_20; -+ -+ VA_31_to_20 = va >> (20 - 2); /* Left-shift by 2 here itself */ -+ VA_31_to_20 &= 0xFFFFFFFCUL; -+ pteAddr = L1_base + VA_31_to_20; -+ -+ return pteAddr; -+} -+ -+static inline u32 HW_MMU_PteAddrL2(u32 L2_base, u32 va) -+{ -+ u32 pteAddr; -+ -+ pteAddr = (L2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC); -+ -+ return pteAddr; -+} -+ -+static inline u32 HW_MMU_PteCoarseL1(u32 pteVal) -+{ -+ u32 pteCoarse; -+ -+ pteCoarse = pteVal & 0xFFFFFC00; -+ -+ return pteCoarse; -+} -+ -+static inline u32 HW_MMU_PteSizeL1(u32 pteVal) -+{ -+ u32 pteSize = 0; -+ -+ if ((pteVal & 0x3) == 0x1) { -+ /* Points to L2 PT */ -+ pteSize = HW_MMU_COARSE_PAGE_SIZE; -+ } -+ -+ if ((pteVal & 0x3) == 0x2) { -+ if (pteVal & (1 << 18)) -+ pteSize = HW_PAGE_SIZE_16MB; -+ else -+ pteSize = HW_PAGE_SIZE_1MB; -+ } -+ -+ return pteSize; -+} -+ -+static inline u32 HW_MMU_PteSizeL2(u32 pteVal) -+{ -+ u32 pteSize = 0; -+ -+ if (pteVal & 0x2) -+ pteSize = HW_PAGE_SIZE_4KB; -+ else if (pteVal & 0x1) -+ pteSize = HW_PAGE_SIZE_64KB; -+ -+ return pteSize; -+} -+ -+#endif /* __HW_MMU_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_prcm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_prcm.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,167 @@ -+/* -+ * hw_prcm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== hw_prcm.c ======== -+ * Description: -+ * API definitions to configure PRCM (Power, Reset & Clocks Manager) -+ * -+ *! Revision History: -+ *! ================ -+ *! 16 Feb 2003 sb: Initial version -+ */ -+ -+#include -+#include "PRCMRegAcM.h" -+#include -+#include -+ -+static HW_STATUS HW_RST_WriteVal(const void __iomem *baseAddress, -+ enum HW_RstModule_t r, -+ enum HW_SetClear_t val); -+ -+HW_STATUS HW_RST_Reset(const void __iomem *baseAddress, enum HW_RstModule_t r) -+{ -+ return HW_RST_WriteVal(baseAddress, r, HW_SET); -+} -+ -+HW_STATUS HW_RST_UnReset(const void __iomem *baseAddress, enum HW_RstModule_t r) -+{ -+ return HW_RST_WriteVal(baseAddress, r, HW_CLEAR); -+} -+ -+static HW_STATUS HW_RST_WriteVal(const void __iomem *baseAddress, -+ enum HW_RstModule_t r, -+ enum HW_SetClear_t val) -+{ -+ HW_STATUS status = RET_OK; -+ -+ switch (r) { -+ case HW_RST1_IVA2: -+ PRM_RSTCTRL_IVA2RST1_DSPWrite32(baseAddress, val); -+ break; -+ case HW_RST2_IVA2: -+ PRM_RSTCTRL_IVA2RST2_DSPWrite32(baseAddress, val); -+ break; -+ case HW_RST3_IVA2: -+ PRM_RSTCTRL_IVA2RST3_DSPWrite32(baseAddress, val); -+ break; -+ default: -+ status = RET_FAIL; -+ break; -+ } -+ return status; -+} -+ -+HW_STATUS HW_PWR_IVA2StateGet(const void __iomem *baseAddress, -+ enum HW_PwrModule_t p, enum HW_PwrState_t *value) -+{ -+ HW_STATUS status = RET_OK; -+ u32 temp; -+ -+ switch (p) { -+ case HW_PWR_DOMAIN_DSP: -+ /* wait until Transition is complete */ -+ do { -+ /* mdelay(1); */ -+ temp = PRCMPM_PWSTST_IVA2InTransitionRead32 -+ (baseAddress); -+ -+ } while (temp); -+ -+ temp = PRCMPM_PWSTST_IVA2ReadRegister32(baseAddress); -+ *value = PRCMPM_PWSTST_IVA2PowerStateStGet32(temp); -+ break; -+ -+ default: -+ status = RET_FAIL; -+ break; -+ } -+ return status; -+} -+ -+HW_STATUS HW_PWRST_IVA2RegGet(const void __iomem *baseAddress, u32 *value) -+{ -+ HW_STATUS status = RET_OK; -+ -+ *value = PRCMPM_PWSTST_IVA2ReadRegister32(baseAddress); -+ -+ return status; -+} -+ -+ -+HW_STATUS HW_PWR_IVA2PowerStateSet(const void __iomem *baseAddress, -+ enum HW_PwrModule_t p, -+ enum HW_PwrState_t value) -+{ -+ HW_STATUS status = RET_OK; -+ -+ switch (p) { -+ case HW_PWR_DOMAIN_DSP: -+ switch (value) { -+ case HW_PWR_STATE_ON: -+ PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32(baseAddress); -+ break; -+ case HW_PWR_STATE_RET: -+ PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32(baseAddress); -+ break; -+ case HW_PWR_STATE_OFF: -+ PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32(baseAddress); -+ break; -+ default: -+ status = RET_FAIL; -+ break; -+ } -+ break; -+ -+ default: -+ status = RET_FAIL; -+ break; -+ } -+ -+ return status; -+} -+ -+HW_STATUS HW_PWR_CLKCTRL_IVA2RegSet(const void __iomem *baseAddress, -+ enum HW_TransitionState_t val) -+{ -+ HW_STATUS status = RET_OK; -+ -+ PRCMCM_CLKSTCTRL_IVA2WriteRegister32(baseAddress, val); -+ -+ return status; -+ -+} -+ -+HW_STATUS HW_RSTST_RegGet(const void __iomem *baseAddress, -+ enum HW_RstModule_t m, u32 *value) -+{ -+ HW_STATUS status = RET_OK; -+ -+ *value = PRCMRM_RSTST_DSPReadRegister32(baseAddress); -+ -+ return status; -+} -+ -+HW_STATUS HW_RSTCTRL_RegGet(const void __iomem *baseAddress, -+ enum HW_RstModule_t m, u32 *value) -+{ -+ HW_STATUS status = RET_OK; -+ -+ *value = PRCMRM_RSTCTRL_DSPReadRegister32(baseAddress); -+ -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_prcm.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/hw_prcm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/hw_prcm.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,169 @@ -+/* -+ * hw_prcm.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== hw_prcm.h ======== -+ * Description: -+ * PRCM types and API declarations -+ * -+ *! Revision History: -+ *! ================ -+ *! 16 Feb 2003 sb: Initial version -+ */ -+ -+#ifndef __HW_PRCM_H -+#define __HW_PRCM_H -+ -+/* HW_ClkModule: Enumerated Type used to specify the clock domain */ -+ -+enum HW_ClkModule_t { -+/* DSP Domain */ -+ HW_CLK_DSP_CPU, -+ HW_CLK_DSP_IPI_MMU, -+ HW_CLK_IVA_ARM, -+ HW_CLK_IVA_COP, /* IVA Coprocessor */ -+ -+/* Core Domain */ -+ HW_CLK_FN_WDT4, /* Functional Clock */ -+ HW_CLK_FN_WDT3, -+ HW_CLK_FN_UART2, -+ HW_CLK_FN_UART1, -+ HW_CLK_GPT5, -+ HW_CLK_GPT6, -+ HW_CLK_GPT7, -+ HW_CLK_GPT8, -+ -+ HW_CLK_IF_WDT4, /* Interface Clock */ -+ HW_CLK_IF_WDT3, -+ HW_CLK_IF_UART2, -+ HW_CLK_IF_UART1, -+ HW_CLK_IF_MBOX -+ -+} ; -+ -+enum HW_ClkSubsys_t { -+ HW_CLK_DSPSS, -+ HW_CLK_IVASS -+} ; -+ -+/* HW_GPtimers: General purpose timers */ -+enum HW_GPtimer_t { -+ HW_GPT5 = 5, -+ HW_GPT6 = 6, -+ HW_GPT7 = 7, -+ HW_GPT8 = 8 -+} ; -+ -+ -+/* GP timers Input clock type: General purpose timers */ -+enum HW_Clocktype_t { -+ HW_CLK_32KHz = 0, -+ HW_CLK_SYS = 1, -+ HW_CLK_EXT = 2 -+} ; -+ -+/* HW_ClkDiv: Clock divisors */ -+enum HW_ClkDiv_t { -+ HW_CLK_DIV_1 = 0x1, -+ HW_CLK_DIV_2 = 0x2, -+ HW_CLK_DIV_3 = 0x3, -+ HW_CLK_DIV_4 = 0x4, -+ HW_CLK_DIV_6 = 0x6, -+ HW_CLK_DIV_8 = 0x8, -+ HW_CLK_DIV_12 = 0xC -+} ; -+ -+/* HW_RstModule: Enumerated Type used to specify the module to be reset */ -+enum HW_RstModule_t { -+ HW_RST1_IVA2, /* Reset the DSP */ -+ HW_RST2_IVA2, /* Reset MMU and LEON HWa */ -+ HW_RST3_IVA2 /* Reset LEON sequencer */ -+} ; -+ -+/* HW_PwrModule: Enumerated Type used to specify the power domain */ -+enum HW_PwrModule_t { -+/* Domains */ -+ HW_PWR_DOMAIN_CORE, -+ HW_PWR_DOMAIN_MPU, -+ HW_PWR_DOMAIN_WAKEUP, -+ HW_PWR_DOMAIN_DSP, -+ -+/* Sub-domains */ -+ HW_PWR_DSP_IPI, /* IPI = Intrusive Port Interface */ -+ HW_PWR_IVA_ISP /* ISP = Intrusive Slave Port */ -+} ; -+ -+enum HW_PwrState_t { -+ HW_PWR_STATE_OFF, -+ HW_PWR_STATE_RET, -+ HW_PWR_STATE_INACT, -+ HW_PWR_STATE_ON = 3 -+} ; -+ -+enum HW_ForceState_t { -+ HW_FORCE_OFF, -+ HW_FORCE_ON -+} ; -+ -+enum HW_IdleState_t { -+ HW_ACTIVE, -+ HW_STANDBY -+ -+} ; -+ -+enum HW_TransitionState_t { -+ HW_AUTOTRANS_DIS, -+ HW_SW_SUP_SLEEP, -+ HW_SW_SUP_WAKEUP, -+ HW_AUTOTRANS_EN -+} ; -+ -+ -+extern HW_STATUS HW_RST_Reset(const void __iomem *baseAddress, -+ enum HW_RstModule_t r); -+ -+extern HW_STATUS HW_RST_UnReset(const void __iomem *baseAddress, -+ enum HW_RstModule_t r); -+ -+extern HW_STATUS HW_RSTCTRL_RegGet(const void __iomem *baseAddress, -+ enum HW_RstModule_t p, -+ u32 *value); -+extern HW_STATUS HW_RSTST_RegGet(const void __iomem *baseAddress, -+ enum HW_RstModule_t p, u32 *value); -+ -+extern HW_STATUS HW_PWR_PowerStateSet(const u32 baseAddress, -+ enum HW_PwrModule_t p, -+ enum HW_PwrState_t value); -+ -+extern HW_STATUS HW_CLK_SetInputClock(const u32 baseAddress, -+ enum HW_GPtimer_t gpt, -+ enum HW_Clocktype_t c); -+ -+extern HW_STATUS HW_PWR_IVA2StateGet(const void __iomem *baseAddress, -+ enum HW_PwrModule_t p, -+ enum HW_PwrState_t *value); -+ -+extern HW_STATUS HW_PWRST_IVA2RegGet(const void __iomem *baseAddress, -+ u32 *value); -+ -+extern HW_STATUS HW_PWR_IVA2PowerStateSet(const void __iomem *baseAddress, -+ enum HW_PwrModule_t p, -+ enum HW_PwrState_t value); -+ -+extern HW_STATUS HW_PWR_CLKCTRL_IVA2RegSet(const void __iomem *baseAddress, -+ enum HW_TransitionState_t val); -+ -+#endif /* __HW_PRCM_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IPIAccInt.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/IPIAccInt.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IPIAccInt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/IPIAccInt.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,41 @@ -+/* -+ * IPIAccInt.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef _IPI_ACC_INT_H -+#define _IPI_ACC_INT_H -+ -+/* Bitfield mask and offset declarations */ -+#define SYSC_IVA2BOOTMOD_OFFSET 0x404 -+#define SYSC_IVA2BOOTADDR_OFFSET 0x400 -+#define SYSC_IVA2BOOTADDR_MASK 0xfffffc00 -+ -+ -+/* The following represent the enumerated values for each bitfield */ -+ -+enum IPIIPI_SYSCONFIGAutoIdleE { -+ IPIIPI_SYSCONFIGAutoIdleclkfree = 0x0000, -+ IPIIPI_SYSCONFIGAutoIdleautoclkgate = 0x0001 -+} ; -+ -+enum IPIIPI_ENTRYElemSizeValueE { -+ IPIIPI_ENTRYElemSizeValueElemSz8b = 0x0000, -+ IPIIPI_ENTRYElemSizeValueElemSz16b = 0x0001, -+ IPIIPI_ENTRYElemSizeValueElemSz32b = 0x0002, -+ IPIIPI_ENTRYElemSizeValueReserved = 0x0003 -+} ; -+ -+#endif /* _IPI_ACC_INT_H */ -+/* EOF */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IVA2RegAcM.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/IVA2RegAcM.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/IVA2RegAcM.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/IVA2RegAcM.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,28 @@ -+/* -+ * IVA1RegAcM.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+ -+#ifndef _IVA2_REG_ACM_H -+#define _IVA2_REG_ACM_H -+ -+#include -+#include -+ -+#define SYSC_IVA2BOOTMOD_OFFSET 0x404 -+#define SYSC_IVA2BOOTADDR_OFFSET 0x400 -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBAccInt.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MLBAccInt.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBAccInt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MLBAccInt.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,132 @@ -+/* -+ * MLBAccInt.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+#ifndef _MLB_ACC_INT_H -+#define _MLB_ACC_INT_H -+ -+/* Mappings of level 1 EASI function numbers to function names */ -+ -+#define EASIL1_MLBMAILBOX_SYSCONFIGReadRegister32 (MLB_BASE_EASIL1 + 3) -+#define EASIL1_MLBMAILBOX_SYSCONFIGWriteRegister32 (MLB_BASE_EASIL1 + 4) -+#define EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeRead32 (MLB_BASE_EASIL1 + 7) -+#define EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeWrite32 (MLB_BASE_EASIL1 + 17) -+#define EASIL1_MLBMAILBOX_SYSCONFIGSoftResetWrite32 (MLB_BASE_EASIL1 + 29) -+#define EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleRead32 \ -+ (MLB_BASE_EASIL1 + 33) -+#define EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleWrite32 (MLB_BASE_EASIL1 + 39) -+#define EASIL1_MLBMAILBOX_SYSSTATUSResetDoneRead32 (MLB_BASE_EASIL1 + 44) -+#define EASIL1_MLBMAILBOX_MESSAGE___0_15ReadRegister32 \ -+ (MLB_BASE_EASIL1 + 50) -+#define EASIL1_MLBMAILBOX_MESSAGE___0_15WriteRegister32 \ -+ (MLB_BASE_EASIL1 + 51) -+#define EASIL1_MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32 \ -+ (MLB_BASE_EASIL1 + 56) -+#define EASIL1_MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32 \ -+ (MLB_BASE_EASIL1 + 57) -+#define EASIL1_MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32 \ -+ (MLB_BASE_EASIL1 + 60) -+#define EASIL1_MLBMAILBOX_IRQSTATUS___0_3ReadRegister32 \ -+ (MLB_BASE_EASIL1 + 62) -+#define EASIL1_MLBMAILBOX_IRQSTATUS___0_3WriteRegister32 \ -+ (MLB_BASE_EASIL1 + 63) -+#define EASIL1_MLBMAILBOX_IRQENABLE___0_3ReadRegister32 \ -+ (MLB_BASE_EASIL1 + 192) -+#define EASIL1_MLBMAILBOX_IRQENABLE___0_3WriteRegister32 \ -+ (MLB_BASE_EASIL1 + 193) -+ -+/* Register set MAILBOX_MESSAGE___REGSET_0_15 address offset, bank address -+ * increment and number of banks */ -+ -+#define MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET (u32)(0x0040) -+#define MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP (u32)(0x0004) -+ -+/* Register offset address definitions relative to register set -+ * MAILBOX_MESSAGE___REGSET_0_15 */ -+ -+#define MLB_MAILBOX_MESSAGE___0_15_OFFSET (u32)(0x0) -+ -+ -+/* Register set MAILBOX_FIFOSTATUS___REGSET_0_15 address offset, bank address -+ * increment and number of banks */ -+ -+#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET (u32)(0x0080) -+#define MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP (u32)(0x0004) -+ -+/* Register offset address definitions relative to register set -+ * MAILBOX_FIFOSTATUS___REGSET_0_15 */ -+ -+#define MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET (u32)(0x0) -+ -+ -+/* Register set MAILBOX_MSGSTATUS___REGSET_0_15 address offset, bank address -+ * increment and number of banks */ -+ -+#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET (u32)(0x00c0) -+#define MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP (u32)(0x0004) -+ -+/* Register offset address definitions relative to register set -+ * MAILBOX_MSGSTATUS___REGSET_0_15 */ -+ -+#define MLB_MAILBOX_MSGSTATUS___0_15_OFFSET (u32)(0x0) -+ -+ -+/* Register set MAILBOX_IRQSTATUS___REGSET_0_3 address offset, bank address -+ * increment and number of banks */ -+ -+#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET (u32)(0x0100) -+#define MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP (u32)(0x0008) -+ -+/* Register offset address definitions relative to register set -+ * MAILBOX_IRQSTATUS___REGSET_0_3 */ -+ -+#define MLB_MAILBOX_IRQSTATUS___0_3_OFFSET (u32)(0x0) -+ -+ -+/* Register set MAILBOX_IRQENABLE___REGSET_0_3 address offset, bank address -+ * increment and number of banks */ -+ -+#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET (u32)(0x0104) -+#define MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP (u32)(0x0008) -+ -+/* Register offset address definitions relative to register set -+ * MAILBOX_IRQENABLE___REGSET_0_3 */ -+ -+#define MLB_MAILBOX_IRQENABLE___0_3_OFFSET (u32)(0x0) -+ -+ -+/* Register offset address definitions */ -+ -+#define MLB_MAILBOX_SYSCONFIG_OFFSET (u32)(0x10) -+#define MLB_MAILBOX_SYSSTATUS_OFFSET (u32)(0x14) -+ -+ -+/* Bitfield mask and offset declarations */ -+ -+#define MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK (u32)(0x18) -+#define MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET (u32)(3) -+#define MLB_MAILBOX_SYSCONFIG_SoftReset_MASK (u32)(0x2) -+#define MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET (u32)(1) -+#define MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK (u32)(0x1) -+#define MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET (u32)(0) -+#define MLB_MAILBOX_SYSSTATUS_ResetDone_MASK (u32)(0x1) -+#define MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET (u32)(0) -+#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK (u32)(0x1) -+#define MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET (u32)(0) -+#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK (u32)(0x7f) -+#define MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET (u32)(0) -+ -+#endif /* _MLB_ACC_INT_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBRegAcM.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MLBRegAcM.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MLBRegAcM.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MLBRegAcM.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,201 @@ -+/* -+ * MLBRegAcM.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef _MLB_REG_ACM_H -+#define _MLB_REG_ACM_H -+ -+#include -+#include -+#include -+#include "MLBAccInt.h" -+ -+#if defined(USE_LEVEL_1_MACROS) -+ -+#define MLBMAILBOX_SYSCONFIGReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGReadRegister32),\ -+ __raw_readl(((baseAddress))+ \ -+ MLB_MAILBOX_SYSCONFIG_OFFSET)) -+ -+ -+#define MLBMAILBOX_SYSCONFIGWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGWriteRegister32);\ -+ __raw_writel(newValue, ((baseAddress))+offset);\ -+} -+ -+ -+#define MLBMAILBOX_SYSCONFIGSIdleModeRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\ -+ MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK) >>\ -+ MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET)) -+ -+ -+#define MLBMAILBOX_SYSCONFIGSIdleModeWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ -+ register u32 data = __raw_readl(((u32)(baseAddress)) +\ -+ offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSIdleModeWrite32);\ -+ data &= ~(MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK);\ -+ newValue <<= MLB_MAILBOX_SYSCONFIG_SIdleMode_OFFSET;\ -+ newValue &= MLB_MAILBOX_SYSCONFIG_SIdleMode_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define MLBMAILBOX_SYSCONFIGSoftResetWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGSoftResetWrite32);\ -+ data &= ~(MLB_MAILBOX_SYSCONFIG_SoftReset_MASK);\ -+ newValue <<= MLB_MAILBOX_SYSCONFIG_SoftReset_OFFSET;\ -+ newValue &= MLB_MAILBOX_SYSCONFIG_SoftReset_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define MLBMAILBOX_SYSCONFIGAutoIdleRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (MLB_MAILBOX_SYSCONFIG_OFFSET)))) &\ -+ MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK) >>\ -+ MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET)) -+ -+ -+#define MLBMAILBOX_SYSCONFIGAutoIdleWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MLB_MAILBOX_SYSCONFIG_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSCONFIGAutoIdleWrite32);\ -+ data &= ~(MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK);\ -+ newValue <<= MLB_MAILBOX_SYSCONFIG_AutoIdle_OFFSET;\ -+ newValue &= MLB_MAILBOX_SYSCONFIG_AutoIdle_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define MLBMAILBOX_SYSSTATUSResetDoneRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_SYSSTATUSResetDoneRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (MLB_MAILBOX_SYSSTATUS_OFFSET)))) &\ -+ MLB_MAILBOX_SYSSTATUS_ResetDone_MASK) >>\ -+ MLB_MAILBOX_SYSSTATUS_ResetDone_OFFSET)) -+ -+ -+#define MLBMAILBOX_MESSAGE___0_15ReadRegister32(baseAddress, bank)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_MESSAGE___0_15ReadRegister32),\ -+ __raw_readl(((baseAddress))+\ -+ (MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\ -+ MLB_MAILBOX_MESSAGE___0_15_OFFSET+(\ -+ (bank)*MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP)))) -+ -+ -+#define MLBMAILBOX_MESSAGE___0_15WriteRegister32(baseAddress, bank, value)\ -+{\ -+ const u32 offset = MLB_MAILBOX_MESSAGE___REGSET_0_15_OFFSET +\ -+ MLB_MAILBOX_MESSAGE___0_15_OFFSET +\ -+ ((bank)*MLB_MAILBOX_MESSAGE___REGSET_0_15_STEP);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_MESSAGE___0_15WriteRegister32);\ -+ __raw_writel(newValue, ((baseAddress))+offset);\ -+} -+ -+ -+#define MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32(baseAddress, bank)\ -+ (_DEBUG_LEVEL_1_EASI(\ -+ EASIL1_MLBMAILBOX_FIFOSTATUS___0_15ReadRegister32),\ -+ __raw_readl(((u32)(baseAddress))+\ -+ (MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\ -+ MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+\ -+ ((bank)*MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP)))) -+ -+ -+#define MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32(baseAddress, bank)\ -+ (_DEBUG_LEVEL_1_EASI(\ -+ EASIL1_MLBMAILBOX_FIFOSTATUS___0_15FifoFullMBmRead32),\ -+ (((__raw_readl(((baseAddress))+\ -+ (MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_OFFSET +\ -+ MLB_MAILBOX_FIFOSTATUS___0_15_OFFSET+\ -+ ((bank)*MLB_MAILBOX_FIFOSTATUS___REGSET_0_15_STEP)))) &\ -+ MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_MASK) >>\ -+ MLB_MAILBOX_FIFOSTATUS___0_15_FifoFullMBm_OFFSET)) -+ -+ -+#define MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32(baseAddress, bank)\ -+ (_DEBUG_LEVEL_1_EASI(\ -+ EASIL1_MLBMAILBOX_MSGSTATUS___0_15NbOfMsgMBmRead32),\ -+ (((__raw_readl(((baseAddress))+\ -+ (MLB_MAILBOX_MSGSTATUS___REGSET_0_15_OFFSET +\ -+ MLB_MAILBOX_MSGSTATUS___0_15_OFFSET+\ -+ ((bank)*MLB_MAILBOX_MSGSTATUS___REGSET_0_15_STEP)))) &\ -+ MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_MASK) >>\ -+ MLB_MAILBOX_MSGSTATUS___0_15_NbOfMsgMBm_OFFSET)) -+ -+ -+#define MLBMAILBOX_IRQSTATUS___0_3ReadRegister32(baseAddress, bank)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQSTATUS___0_3ReadRegister32),\ -+ __raw_readl(((baseAddress))+\ -+ (MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\ -+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET+\ -+ ((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP)))) -+ -+ -+#define MLBMAILBOX_IRQSTATUS___0_3WriteRegister32(baseAddress, bank, value)\ -+{\ -+ const u32 offset = MLB_MAILBOX_IRQSTATUS___REGSET_0_3_OFFSET +\ -+ MLB_MAILBOX_IRQSTATUS___0_3_OFFSET +\ -+ ((bank)*MLB_MAILBOX_IRQSTATUS___REGSET_0_3_STEP);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQSTATUS___0_3WriteRegister32);\ -+ __raw_writel(newValue, ((baseAddress))+offset);\ -+} -+ -+ -+#define MLBMAILBOX_IRQENABLE___0_3ReadRegister32(baseAddress, bank)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQENABLE___0_3ReadRegister32),\ -+ __raw_readl(((baseAddress))+\ -+ (MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\ -+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET+\ -+ ((bank)*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP)))) -+ -+ -+#define MLBMAILBOX_IRQENABLE___0_3WriteRegister32(baseAddress, bank, value)\ -+{\ -+ const u32 offset = MLB_MAILBOX_IRQENABLE___REGSET_0_3_OFFSET +\ -+ MLB_MAILBOX_IRQENABLE___0_3_OFFSET +\ -+ ((bank)*MLB_MAILBOX_IRQENABLE___REGSET_0_3_STEP);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MLBMAILBOX_IRQENABLE___0_3WriteRegister32);\ -+ __raw_writel(newValue, ((baseAddress))+offset);\ -+} -+ -+ -+#endif /* USE_LEVEL_1_MACROS */ -+ -+#endif /* _MLB_REG_ACM_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMUAccInt.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MMUAccInt.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMUAccInt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MMUAccInt.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,76 @@ -+/* -+ * MMUAccInt.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef _MMU_ACC_INT_H -+#define _MMU_ACC_INT_H -+ -+/* Mappings of level 1 EASI function numbers to function names */ -+ -+#define EASIL1_MMUMMU_SYSCONFIGReadRegister32 (MMU_BASE_EASIL1 + 3) -+#define EASIL1_MMUMMU_SYSCONFIGIdleModeWrite32 (MMU_BASE_EASIL1 + 17) -+#define EASIL1_MMUMMU_SYSCONFIGAutoIdleWrite32 (MMU_BASE_EASIL1 + 39) -+#define EASIL1_MMUMMU_IRQSTATUSWriteRegister32 (MMU_BASE_EASIL1 + 51) -+#define EASIL1_MMUMMU_IRQENABLEReadRegister32 (MMU_BASE_EASIL1 + 102) -+#define EASIL1_MMUMMU_IRQENABLEWriteRegister32 (MMU_BASE_EASIL1 + 103) -+#define EASIL1_MMUMMU_WALKING_STTWLRunningRead32 (MMU_BASE_EASIL1 + 156) -+#define EASIL1_MMUMMU_CNTLTWLEnableRead32 (MMU_BASE_EASIL1 + 174) -+#define EASIL1_MMUMMU_CNTLTWLEnableWrite32 (MMU_BASE_EASIL1 + 180) -+#define EASIL1_MMUMMU_CNTLMMUEnableWrite32 (MMU_BASE_EASIL1 + 190) -+#define EASIL1_MMUMMU_FAULT_ADReadRegister32 (MMU_BASE_EASIL1 + 194) -+#define EASIL1_MMUMMU_TTBWriteRegister32 (MMU_BASE_EASIL1 + 198) -+#define EASIL1_MMUMMU_LOCKReadRegister32 (MMU_BASE_EASIL1 + 203) -+#define EASIL1_MMUMMU_LOCKWriteRegister32 (MMU_BASE_EASIL1 + 204) -+#define EASIL1_MMUMMU_LOCKBaseValueRead32 (MMU_BASE_EASIL1 + 205) -+#define EASIL1_MMUMMU_LOCKCurrentVictimRead32 (MMU_BASE_EASIL1 + 209) -+#define EASIL1_MMUMMU_LOCKCurrentVictimWrite32 (MMU_BASE_EASIL1 + 211) -+#define EASIL1_MMUMMU_LOCKCurrentVictimSet32 (MMU_BASE_EASIL1 + 212) -+#define EASIL1_MMUMMU_LD_TLBReadRegister32 (MMU_BASE_EASIL1 + 213) -+#define EASIL1_MMUMMU_LD_TLBWriteRegister32 (MMU_BASE_EASIL1 + 214) -+#define EASIL1_MMUMMU_CAMWriteRegister32 (MMU_BASE_EASIL1 + 226) -+#define EASIL1_MMUMMU_RAMWriteRegister32 (MMU_BASE_EASIL1 + 268) -+#define EASIL1_MMUMMU_FLUSH_ENTRYWriteRegister32 (MMU_BASE_EASIL1 + 322) -+ -+/* Register offset address definitions */ -+#define MMU_MMU_SYSCONFIG_OFFSET 0x10 -+#define MMU_MMU_IRQSTATUS_OFFSET 0x18 -+#define MMU_MMU_IRQENABLE_OFFSET 0x1c -+#define MMU_MMU_WALKING_ST_OFFSET 0x40 -+#define MMU_MMU_CNTL_OFFSET 0x44 -+#define MMU_MMU_FAULT_AD_OFFSET 0x48 -+#define MMU_MMU_TTB_OFFSET 0x4c -+#define MMU_MMU_LOCK_OFFSET 0x50 -+#define MMU_MMU_LD_TLB_OFFSET 0x54 -+#define MMU_MMU_CAM_OFFSET 0x58 -+#define MMU_MMU_RAM_OFFSET 0x5c -+#define MMU_MMU_GFLUSH_OFFSET 0x60 -+#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64 -+/* Bitfield mask and offset declarations */ -+#define MMU_MMU_SYSCONFIG_IdleMode_MASK 0x18 -+#define MMU_MMU_SYSCONFIG_IdleMode_OFFSET 3 -+#define MMU_MMU_SYSCONFIG_AutoIdle_MASK 0x1 -+#define MMU_MMU_SYSCONFIG_AutoIdle_OFFSET 0 -+#define MMU_MMU_WALKING_ST_TWLRunning_MASK 0x1 -+#define MMU_MMU_WALKING_ST_TWLRunning_OFFSET 0 -+#define MMU_MMU_CNTL_TWLEnable_MASK 0x4 -+#define MMU_MMU_CNTL_TWLEnable_OFFSET 2 -+#define MMU_MMU_CNTL_MMUEnable_MASK 0x2 -+#define MMU_MMU_CNTL_MMUEnable_OFFSET 1 -+#define MMU_MMU_LOCK_BaseValue_MASK 0xfc00 -+#define MMU_MMU_LOCK_BaseValue_OFFSET 10 -+#define MMU_MMU_LOCK_CurrentVictim_MASK 0x3f0 -+#define MMU_MMU_LOCK_CurrentVictim_OFFSET 4 -+ -+#endif /* _MMU_ACC_INT_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMURegAcM.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MMURegAcM.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/MMURegAcM.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/MMURegAcM.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,253 @@ -+/* -+ * MMURegAcM.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+#ifndef _MMU_REG_ACM_H -+#define _MMU_REG_ACM_H -+ -+#include -+#include -+#include -+ -+#include "MMUAccInt.h" -+ -+#if defined(USE_LEVEL_1_MACROS) -+ -+ -+#define MMUMMU_SYSCONFIGReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_SYSCONFIGReadRegister32),\ -+ __raw_readl((baseAddress)+MMU_MMU_SYSCONFIG_OFFSET)) -+ -+ -+#define MMUMMU_SYSCONFIGIdleModeWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_SYSCONFIGIdleModeWrite32);\ -+ data &= ~(MMU_MMU_SYSCONFIG_IdleMode_MASK);\ -+ newValue <<= MMU_MMU_SYSCONFIG_IdleMode_OFFSET;\ -+ newValue &= MMU_MMU_SYSCONFIG_IdleMode_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, baseAddress+offset);\ -+} -+ -+ -+#define MMUMMU_SYSCONFIGAutoIdleWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_SYSCONFIGAutoIdleWrite32);\ -+ data &= ~(MMU_MMU_SYSCONFIG_AutoIdle_MASK);\ -+ newValue <<= MMU_MMU_SYSCONFIG_AutoIdle_OFFSET;\ -+ newValue &= MMU_MMU_SYSCONFIG_AutoIdle_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, baseAddress+offset);\ -+} -+ -+ -+#define MMUMMU_IRQSTATUSReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQSTATUSReadRegister32),\ -+ __raw_readl((baseAddress)+MMU_MMU_IRQSTATUS_OFFSET)) -+ -+ -+#define MMUMMU_IRQSTATUSWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQSTATUSWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define MMUMMU_IRQENABLEReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQENABLEReadRegister32),\ -+ __raw_readl((baseAddress)+MMU_MMU_IRQENABLE_OFFSET)) -+ -+ -+#define MMUMMU_IRQENABLEWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_IRQENABLEWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define MMUMMU_WALKING_STTWLRunningRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_WALKING_STTWLRunningRead32),\ -+ (((__raw_readl(((baseAddress)+(MMU_MMU_WALKING_ST_OFFSET))))\ -+ & MMU_MMU_WALKING_ST_TWLRunning_MASK) >>\ -+ MMU_MMU_WALKING_ST_TWLRunning_OFFSET)) -+ -+ -+#define MMUMMU_CNTLTWLEnableRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CNTLTWLEnableRead32),\ -+ (((__raw_readl(((baseAddress)+(MMU_MMU_CNTL_OFFSET)))) &\ -+ MMU_MMU_CNTL_TWLEnable_MASK) >>\ -+ MMU_MMU_CNTL_TWLEnable_OFFSET)) -+ -+ -+#define MMUMMU_CNTLTWLEnableWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_CNTL_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CNTLTWLEnableWrite32);\ -+ data &= ~(MMU_MMU_CNTL_TWLEnable_MASK);\ -+ newValue <<= MMU_MMU_CNTL_TWLEnable_OFFSET;\ -+ newValue &= MMU_MMU_CNTL_TWLEnable_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, baseAddress+offset);\ -+} -+ -+ -+#define MMUMMU_CNTLMMUEnableWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_CNTL_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CNTLMMUEnableWrite32);\ -+ data &= ~(MMU_MMU_CNTL_MMUEnable_MASK);\ -+ newValue <<= MMU_MMU_CNTL_MMUEnable_OFFSET;\ -+ newValue &= MMU_MMU_CNTL_MMUEnable_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, baseAddress+offset);\ -+} -+ -+ -+#define MMUMMU_FAULT_ADReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_FAULT_ADReadRegister32),\ -+ __raw_readl((baseAddress)+MMU_MMU_FAULT_AD_OFFSET)) -+ -+ -+#define MMUMMU_TTBWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_TTB_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_TTBWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define MMUMMU_LOCKReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKReadRegister32),\ -+ __raw_readl((baseAddress)+MMU_MMU_LOCK_OFFSET)) -+ -+ -+#define MMUMMU_LOCKWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_LOCK_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define MMUMMU_LOCKBaseValueRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKBaseValueRead32),\ -+ (((__raw_readl(((baseAddress)+(MMU_MMU_LOCK_OFFSET)))) &\ -+ MMU_MMU_LOCK_BaseValue_MASK) >>\ -+ MMU_MMU_LOCK_BaseValue_OFFSET)) -+ -+ -+#define MMUMMU_LOCKBaseValueWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_LOCK_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKBaseValueWrite32);\ -+ data &= ~(MMU_MMU_LOCK_BaseValue_MASK);\ -+ newValue <<= MMU_MMU_LOCK_BaseValue_OFFSET;\ -+ newValue &= MMU_MMU_LOCK_BaseValue_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, baseAddress+offset);\ -+} -+ -+ -+#define MMUMMU_LOCKCurrentVictimRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKCurrentVictimRead32),\ -+ (((__raw_readl(((baseAddress)+(MMU_MMU_LOCK_OFFSET)))) &\ -+ MMU_MMU_LOCK_CurrentVictim_MASK) >>\ -+ MMU_MMU_LOCK_CurrentVictim_OFFSET)) -+ -+ -+#define MMUMMU_LOCKCurrentVictimWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_LOCK_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKCurrentVictimWrite32);\ -+ data &= ~(MMU_MMU_LOCK_CurrentVictim_MASK);\ -+ newValue <<= MMU_MMU_LOCK_CurrentVictim_OFFSET;\ -+ newValue &= MMU_MMU_LOCK_CurrentVictim_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, baseAddress+offset);\ -+} -+ -+ -+#define MMUMMU_LOCKCurrentVictimSet32(var, value)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LOCKCurrentVictimSet32),\ -+ (((var) & ~(MMU_MMU_LOCK_CurrentVictim_MASK)) |\ -+ (((value) << MMU_MMU_LOCK_CurrentVictim_OFFSET) &\ -+ MMU_MMU_LOCK_CurrentVictim_MASK))) -+ -+ -+#define MMUMMU_LD_TLBReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LD_TLBReadRegister32),\ -+ __raw_readl((baseAddress)+MMU_MMU_LD_TLB_OFFSET)) -+ -+ -+#define MMUMMU_LD_TLBWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_LD_TLB_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_LD_TLBWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define MMUMMU_CAMWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_CAM_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_CAMWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define MMUMMU_RAMWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_RAM_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_RAMWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define MMUMMU_FLUSH_ENTRYWriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\ -+ register u32 newValue = (value);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_MMUMMU_FLUSH_ENTRYWriteRegister32);\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#endif /* USE_LEVEL_1_MACROS */ -+ -+#endif /* _MMU_REG_ACM_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMAccInt.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/PRCMAccInt.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMAccInt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/PRCMAccInt.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,300 @@ -+/* -+ * PRCMAccInt.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef _PRCM_ACC_INT_H -+#define _PRCM_ACC_INT_H -+ -+/* Mappings of level 1 EASI function numbers to function names */ -+ -+#define EASIL1_PRCMPRCM_CLKCFG_CTRLValid_configWriteClk_valid32 \ -+ (PRCM_BASE_EASIL1 + 349) -+#define EASIL1_PRCMCM_FCLKEN1_COREReadRegister32 (PRCM_BASE_EASIL1 + 743) -+#define EASIL1_PRCMCM_FCLKEN1_COREEN_GPT8Write32 (PRCM_BASE_EASIL1 + 951) -+#define EASIL1_PRCMCM_FCLKEN1_COREEN_GPT7Write32 (PRCM_BASE_EASIL1 + 961) -+#define EASIL1_PRCMCM_ICLKEN1_COREReadRegister32 \ -+ (PRCM_BASE_EASIL1 + 1087) -+#define EASIL1_PRCMCM_ICLKEN1_COREEN_MAILBOXESWrite32 \ -+ (PRCM_BASE_EASIL1 + 1105) -+#define EASIL1_PRCMCM_ICLKEN1_COREEN_GPT8Write32 \ -+ (PRCM_BASE_EASIL1 + 1305) -+#define EASIL1_PRCMCM_ICLKEN1_COREEN_GPT7Write32 \ -+ (PRCM_BASE_EASIL1 + 1315) -+#define EASIL1_PRCMCM_CLKSEL1_CORECLKSEL_L3ReadIssel132 \ -+ (PRCM_BASE_EASIL1 + 2261) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8Write32k32 \ -+ (PRCM_BASE_EASIL1 + 2364) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteSys32 \ -+ (PRCM_BASE_EASIL1 + 2365) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteExt32 \ -+ (PRCM_BASE_EASIL1 + 2366) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7Write32k32 \ -+ (PRCM_BASE_EASIL1 + 2380) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteSys32 \ -+ (PRCM_BASE_EASIL1 + 2381) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteExt32 \ -+ (PRCM_BASE_EASIL1 + 2382) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteSys32 \ -+ (PRCM_BASE_EASIL1 + 2397) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteExt32 \ -+ (PRCM_BASE_EASIL1 + 2398) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteSys32 \ -+ (PRCM_BASE_EASIL1 + 2413) -+#define EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteExt32 \ -+ (PRCM_BASE_EASIL1 + 2414) -+#define EASIL1_PRCMCM_CLKSEL1_PLLAPLLs_ClkinRead32 \ -+ (PRCM_BASE_EASIL1 + 3747) -+#define EASIL1_PRCMCM_FCLKEN_DSPEN_DSPWrite32 (PRCM_BASE_EASIL1 + 3834) -+#define EASIL1_PRCMCM_ICLKEN_DSPEN_DSP_IPIWrite32 \ -+ (PRCM_BASE_EASIL1 + 3846) -+#define EASIL1_PRCMCM_IDLEST_DSPReadRegister32 (PRCM_BASE_EASIL1 + 3850) -+#define EASIL1_PRCMCM_IDLEST_DSPST_IPIRead32 (PRCM_BASE_EASIL1 + 3857) -+#define EASIL1_PRCMCM_IDLEST_DSPST_DSPRead32 (PRCM_BASE_EASIL1 + 3863) -+#define EASIL1_PRCMCM_AUTOIDLE_DSPAUTO_DSP_IPIWrite32 \ -+ (PRCM_BASE_EASIL1 + 3877) -+#define EASIL1_PRCMCM_CLKSEL_DSPSYNC_DSPWrite32 (PRCM_BASE_EASIL1 + 3927) -+#define EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSP_IFWrite32 \ -+ (PRCM_BASE_EASIL1 + 3941) -+#define EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSPWrite32 \ -+ (PRCM_BASE_EASIL1 + 3965) -+#define EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPRead32 \ -+ (PRCM_BASE_EASIL1 + 3987) -+#define EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPWrite32 \ -+ (PRCM_BASE_EASIL1 + 3993) -+#define EASIL1_PRCMRM_RSTCTRL_DSPReadRegister32 (PRCM_BASE_EASIL1 + 3997) -+#define EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32 \ -+ (PRCM_BASE_EASIL1 + 4025) -+#define EASIL1_PRCMRM_RSTST_DSPReadRegister32 (PRCM_BASE_EASIL1 + 4029) -+#define EASIL1_PRCMRM_RSTST_DSPWriteRegister32 (PRCM_BASE_EASIL1 + 4030) -+#define EASIL1_PRCMPM_PWSTCTRL_DSPForceStateWrite32 \ -+ (PRCM_BASE_EASIL1 + 4165) -+#define EASIL1_PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32 \ -+ (PRCM_BASE_EASIL1 + 4193) -+#define EASIL1_PRCMPM_PWSTST_DSPReadRegister32 (PRCM_BASE_EASIL1 + 4197) -+#define EASIL1_PRCMPM_PWSTST_DSPInTransitionRead32 \ -+ (PRCM_BASE_EASIL1 + 4198) -+#define EASIL1_PRCMPM_PWSTST_DSPPowerStateStGet32 \ -+ (PRCM_BASE_EASIL1 + 4235) -+#define EASIL1_CM_FCLKEN_PER_GPT5WriteRegister32 \ -+ (PRCM_BASE_EASIL1 + 4368) -+#define EASIL1_CM_ICLKEN_PER_GPT5WriteRegister32 \ -+ (PRCM_BASE_EASIL1 + 4370) -+#define EASIL1_CM_CLKSEL_PER_GPT5Write32k32 (PRCM_BASE_EASIL1 + 4372) -+#define EASIL1_CM_CLKSEL_PER_GPT6Write32k32 (PRCM_BASE_EASIL1 + 4373) -+#define EASIL1_PRCMCM_CLKSTCTRL_IVA2WriteRegister32 \ -+ (PRCM_BASE_EASIL1 + 4374) -+#define EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32 \ -+ (PRCM_BASE_EASIL1 + 4375) -+#define EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32 \ -+ (PRCM_BASE_EASIL1 + 4376) -+#define EASIL1_PRCMPM_PWSTST_IVA2InTransitionRead32 \ -+ (PRCM_BASE_EASIL1 + 4377) -+#define EASIL1_PRCMPM_PWSTST_IVA2PowerStateStGet32 \ -+ (PRCM_BASE_EASIL1 + 4378) -+#define EASIL1_PRCMPM_PWSTST_IVA2ReadRegister32 (PRCM_BASE_EASIL1 + 4379) -+ -+/* Register offset address definitions */ -+ -+#define PRCM_PRCM_CLKCFG_CTRL_OFFSET (u32)(0x80) -+#define PRCM_CM_FCLKEN1_CORE_OFFSET (u32)(0x200) -+#define PRCM_CM_ICLKEN1_CORE_OFFSET (u32)(0x210) -+#define PRCM_CM_CLKSEL2_CORE_OFFSET (u32)(0x244) -+#define PRCM_CM_CLKSEL1_PLL_OFFSET (u32)(0x540) -+#define PRCM_CM_ICLKEN_DSP_OFFSET (u32)(0x810) -+#define PRCM_CM_IDLEST_DSP_OFFSET (u32)(0x820) -+#define PRCM_CM_AUTOIDLE_DSP_OFFSET (u32)(0x830) -+#define PRCM_CM_CLKSEL_DSP_OFFSET (u32)(0x840) -+#define PRCM_CM_CLKSTCTRL_DSP_OFFSET (u32)(0x848) -+#define PRCM_RM_RSTCTRL_DSP_OFFSET (u32)(0x050) -+#define PRCM_RM_RSTST_DSP_OFFSET (u32)(0x058) -+#define PRCM_PM_PWSTCTRL_DSP_OFFSET (u32)(0x8e0) -+#define PRCM_PM_PWSTST_DSP_OFFSET (u32)(0x8e4) -+#define PRCM_PM_PWSTST_IVA2_OFFSET (u32)(0xE4) -+#define PRCM_PM_PWSTCTRL_IVA2_OFFSET (u32)(0xE0) -+#define PRCM_CM_CLKSTCTRL_IVA2_OFFSET (u32)(0x48) -+#define CM_CLKSEL_PER_OFFSET (u32)(0x40) -+ -+/* Bitfield mask and offset declarations */ -+ -+#define PRCM_PRCM_CLKCFG_CTRL_Valid_config_MASK (u32)(0x1) -+#define PRCM_PRCM_CLKCFG_CTRL_Valid_config_OFFSET (u32)(0) -+ -+#define PRCM_CM_FCLKEN1_CORE_EN_GPT8_MASK (u32)(0x400) -+#define PRCM_CM_FCLKEN1_CORE_EN_GPT8_OFFSET (u32)(10) -+ -+#define PRCM_CM_FCLKEN1_CORE_EN_GPT7_MASK (u32)(0x200) -+#define PRCM_CM_FCLKEN1_CORE_EN_GPT7_OFFSET (u32)(9) -+ -+#define PRCM_CM_ICLKEN1_CORE_EN_GPT8_MASK (u32)(0x400) -+#define PRCM_CM_ICLKEN1_CORE_EN_GPT8_OFFSET (u32)(10) -+ -+#define PRCM_CM_ICLKEN1_CORE_EN_GPT7_MASK (u32)(0x200) -+#define PRCM_CM_ICLKEN1_CORE_EN_GPT7_OFFSET (u32)(9) -+ -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK (u32)(0xc000) -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET (u32)(14) -+ -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK (u32)(0x3000) -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET (u32)(12) -+ -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_MASK (u32)(0xc00) -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_OFFSET (u32)(10) -+ -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_MASK (u32)(0x300) -+#define PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_OFFSET (u32)(8) -+ -+#define PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_MASK (u32)(0x3800000) -+#define PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_OFFSET (u32)(23) -+ -+#define PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_MASK (u32)(0x2) -+#define PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_OFFSET (u32)(1) -+ -+#define PRCM_CM_IDLEST_DSP_ST_IPI_MASK (u32)(0x2) -+#define PRCM_CM_IDLEST_DSP_ST_IPI_OFFSET (u32)(1) -+ -+#define PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_MASK (u32)(0x2) -+#define PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_OFFSET (u32)(1) -+ -+#define PRCM_CM_CLKSEL_DSP_SYNC_DSP_MASK (u32)(0x80) -+#define PRCM_CM_CLKSEL_DSP_SYNC_DSP_OFFSET (u32)(7) -+ -+#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_MASK (u32)(0x60) -+#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_OFFSET (u32)(5) -+ -+#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_MASK (u32)(0x1f) -+#define PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_OFFSET (u32)(0) -+ -+#define PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK (u32)(0x1) -+#define PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_OFFSET (u32)(0) -+ -+#define PRCM_PM_PWSTCTRL_DSP_ForceState_MASK (u32)(0x40000) -+#define PRCM_PM_PWSTCTRL_DSP_ForceState_OFFSET (u32)(18) -+ -+#define PRCM_PM_PWSTCTRL_DSP_PowerState_MASK (u32)(0x3) -+#define PRCM_PM_PWSTCTRL_DSP_PowerState_OFFSET (u32)(0) -+ -+#define PRCM_PM_PWSTCTRL_IVA2_PowerState_MASK (u32)(0x3) -+#define PRCM_PM_PWSTCTRL_IVA2_PowerState_OFFSET (u32)(0) -+ -+#define PRCM_PM_PWSTST_DSP_InTransition_MASK (u32)(0x100000) -+#define PRCM_PM_PWSTST_DSP_InTransition_OFFSET (u32)(20) -+ -+#define PRCM_PM_PWSTST_IVA2_InTransition_MASK (u32)(0x100000) -+#define PRCM_PM_PWSTST_IVA2_InTransition_OFFSET (u32)(20) -+ -+#define PRCM_PM_PWSTST_DSP_PowerStateSt_MASK (u32)(0x3) -+#define PRCM_PM_PWSTST_DSP_PowerStateSt_OFFSET (u32)(0) -+ -+#define PRCM_PM_PWSTST_IVA2_PowerStateSt_MASK (u32)(0x3) -+#define PRCM_PM_PWSTST_IVA2_PowerStateSt_OFFSET (u32)(0) -+ -+#define CM_FCLKEN_PER_OFFSET (u32)(0x0) -+#define CM_FCLKEN_PER_GPT5_OFFSET (u32)(6) -+#define CM_FCLKEN_PER_GPT5_MASK (u32)(0x40) -+ -+#define CM_FCLKEN_PER_GPT6_OFFSET (u32)(7) -+#define CM_FCLKEN_PER_GPT6_MASK (u32)(0x80) -+ -+#define CM_ICLKEN_PER_OFFSET (u32)(0x10) -+#define CM_ICLKEN_PER_GPT5_OFFSET (u32)(6) -+#define CM_ICLKEN_PER_GPT5_MASK (u32)(0x40) -+ -+#define CM_ICLKEN_PER_GPT6_OFFSET (u32)(7) -+#define CM_ICLKEN_PER_GPT6_MASK (u32)(0x80) -+ -+#define CM_CLKSEL_PER_GPT5_OFFSET (u32)(3) -+#define CM_CLKSEL_PER_GPT5_MASK (u32)(0x8) -+ -+#define CM_CLKSEL_PER_GPT6_OFFSET (u32)(4) -+#define CM_CLKSEL_PER_GPT6_MASK (u32)(0x10) -+ -+ -+#define CM_FCLKEN_IVA2_OFFSET (u32)(0x0) -+#define CM_FCLKEN_IVA2_EN_MASK (u32)(0x1) -+#define CM_FCLKEN_IVA2_EN_OFFSET (u32)(0x0) -+ -+#define CM_IDLEST_IVA2_OFFSET (u32)(0x20) -+#define CM_IDLEST_IVA2_ST_IVA2_MASK (u32) (0x01) -+#define CM_IDLEST_IVA2_ST_IVA2_OFFSET (u32) (0x00) -+ -+#define CM_FCLKEN1_CORE_OFFSET (u32)(0xA00) -+ -+#define CM_ICLKEN1_CORE_OFFSET (u32)(0xA10) -+#define CM_ICLKEN1_CORE_EN_MAILBOXES_MASK (u32)(0x00000080) /* bit 7 */ -+#define CM_ICLKEN1_CORE_EN_MAILBOXES_OFFSET (u32)(7) -+ -+#define CM_CLKSTCTRL_IVA2_OFFSET (u32)(0x0) -+#define CM_CLKSTCTRL_IVA2_MASK (u32)(0x3) -+ -+ -+#define PRM_RSTCTRL_IVA2_OFFSET (u32)(0x50) -+#define PRM_RSTCTRL_IVA2_RST1_MASK (u32)(0x1) -+#define PRM_RSTCTRL_IVA2_RST1_OFFSET (u32)(0x0) -+#define PRM_RSTCTRL_IVA2_RST2_MASK (u32)(0x2) -+#define PRM_RSTCTRL_IVA2_RST2_OFFSET (u32)(0x1) -+#define PRM_RSTCTRL_IVA2_RST3_MASK (u32)(0x4) -+#define PRM_RSTCTRL_IVA2_RST3_OFFSET (u32)(0x2) -+ -+ -+/* The following represent the enumerated values for each bitfield */ -+ -+enum PRCMPRCM_CLKCFG_CTRLValid_configE { -+ PRCMPRCM_CLKCFG_CTRLValid_configUpdated = 0x0000, -+ PRCMPRCM_CLKCFG_CTRLValid_configClk_valid = 0x0001 -+} ; -+ -+enum PRCMCM_CLKSEL2_CORECLKSEL_GPT8E { -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT832k = 0x0000, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT8Sys = 0x0001, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT8Ext = 0x0002, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT8Reserved = 0x0003 -+} ; -+ -+enum PRCMCM_CLKSEL2_CORECLKSEL_GPT7E { -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT732k = 0x0000, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT7Sys = 0x0001, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT7Ext = 0x0002, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT7Reserved = 0x0003 -+} ; -+ -+enum PRCMCM_CLKSEL2_CORECLKSEL_GPT6E { -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT632k = 0x0000, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT6Sys = 0x0001, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT6Ext = 0x0002, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT6Reserved = 0x0003 -+} ; -+ -+enum PRCMCM_CLKSEL2_CORECLKSEL_GPT5E { -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT532k = 0x0000, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT5Sys = 0x0001, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT5Ext = 0x0002, -+ PRCMCM_CLKSEL2_CORECLKSEL_GPT5Reserved = 0x0003 -+} ; -+ -+enum PRCMPM_PWSTCTRL_DSPPowerStateE { -+ PRCMPM_PWSTCTRL_DSPPowerStateON = 0x0000, -+ PRCMPM_PWSTCTRL_DSPPowerStateRET = 0x0001, -+ PRCMPM_PWSTCTRL_DSPPowerStateReserved = 0x0002, -+ PRCMPM_PWSTCTRL_DSPPowerStateOFF = 0x0003 -+} ; -+ -+enum PRCMPM_PWSTCTRL_IVA2PowerStateE { -+ PRCMPM_PWSTCTRL_IVA2PowerStateON = 0x0003, -+ PRCMPM_PWSTCTRL_IVA2PowerStateRET = 0x0001, -+ PRCMPM_PWSTCTRL_IVA2PowerStateReserved = 0x0002, -+ PRCMPM_PWSTCTRL_IVA2PowerStateOFF = 0x0000 -+} ; -+ -+#endif /* _PRCM_ACC_INT_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMRegAcM.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/PRCMRegAcM.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/hw/PRCMRegAcM.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/hw/PRCMRegAcM.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,670 @@ -+/* -+ * PRCMRegAcM.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef _PRCM_REG_ACM_H -+#define _PRCM_REG_ACM_H -+ -+#include -+#include -+ -+#include -+ -+#include "PRCMAccInt.h" -+ -+#if defined(USE_LEVEL_1_MACROS) -+ -+#define PRCMPRCM_CLKCFG_CTRLValid_configWriteClk_valid32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_PRCM_CLKCFG_CTRL_OFFSET;\ -+ const u32 newValue = \ -+ (u32)PRCMPRCM_CLKCFG_CTRLValid_configClk_valid <<\ -+ PRCM_PRCM_CLKCFG_CTRL_Valid_config_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(\ -+ EASIL1_PRCMPRCM_CLKCFG_CTRLValid_configWriteClk_valid32);\ -+ data &= ~(PRCM_PRCM_CLKCFG_CTRL_Valid_config_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define CM_FCLKEN_PERReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREReadRegister32),\ -+ __raw_readl(((u32)(baseAddress))+CM_FCLKEN_PER_OFFSET)) -+ -+ -+#define CM_ICLKEN_PERReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREReadRegister32),\ -+ __raw_readl(((u32)(baseAddress))+CM_ICLKEN_PER_OFFSET)) -+ -+ -+#define CM_FCLKEN_PER_GPT5WriteRegister32(baseAddress,value)\ -+{\ -+ const u32 offset = CM_FCLKEN_PER_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_CM_FCLKEN_PER_GPT5WriteRegister32);\ -+ data &= ~(CM_FCLKEN_PER_GPT5_MASK);\ -+ newValue <<= CM_FCLKEN_PER_GPT5_OFFSET;\ -+ newValue &= CM_FCLKEN_PER_GPT5_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ -+} -+ -+ -+#define CM_FCLKEN_PER_GPT6WriteRegister32(baseAddress,value)\ -+{\ -+ const u32 offset = CM_FCLKEN_PER_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_CM_FCLKEN_PER_GPT5WriteRegister32);\ -+ data &= ~(CM_FCLKEN_PER_GPT6_MASK);\ -+ newValue <<= CM_FCLKEN_PER_GPT6_OFFSET;\ -+ newValue &= CM_FCLKEN_PER_GPT6_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ -+} -+ -+ -+#define CM_ICLKEN_PER_GPT5WriteRegister32(baseAddress,value)\ -+{\ -+ const u32 offset = CM_ICLKEN_PER_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_CM_ICLKEN_PER_GPT5WriteRegister32);\ -+ data &= ~(CM_ICLKEN_PER_GPT5_MASK);\ -+ newValue <<= CM_ICLKEN_PER_GPT5_OFFSET;\ -+ newValue &= CM_ICLKEN_PER_GPT5_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ -+} -+ -+ -+#define CM_ICLKEN_PER_GPT6WriteRegister32(baseAddress,value)\ -+{\ -+ const u32 offset = CM_ICLKEN_PER_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_CM_ICLKEN_PER_GPT5WriteRegister32);\ -+ data &= ~(CM_ICLKEN_PER_GPT6_MASK);\ -+ newValue <<= CM_ICLKEN_PER_GPT6_OFFSET;\ -+ newValue &= CM_ICLKEN_PER_GPT6_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ -+} -+ -+ -+#define CM_FCLKEN1_COREReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREReadRegister32),\ -+ __raw_readl(((u32)(baseAddress))+CM_FCLKEN1_CORE_OFFSET)) -+ -+ -+#define PRCMCM_FCLKEN1_COREEN_GPT8Write32(baseAddress,value)\ -+{\ -+ const u32 offset = PRCM_CM_FCLKEN1_CORE_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREEN_GPT8Write32);\ -+ data &= ~(PRCM_CM_FCLKEN1_CORE_EN_GPT8_MASK);\ -+ newValue <<= PRCM_CM_FCLKEN1_CORE_EN_GPT8_OFFSET;\ -+ newValue &= PRCM_CM_FCLKEN1_CORE_EN_GPT8_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_FCLKEN1_COREEN_GPT7Write32(baseAddress,value)\ -+{\ -+ const u32 offset = PRCM_CM_FCLKEN1_CORE_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN1_COREEN_GPT7Write32);\ -+ data &= ~(PRCM_CM_FCLKEN1_CORE_EN_GPT7_MASK);\ -+ newValue <<= PRCM_CM_FCLKEN1_CORE_EN_GPT7_OFFSET;\ -+ newValue &= PRCM_CM_FCLKEN1_CORE_EN_GPT7_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define CM_ICLKEN1_COREReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREReadRegister32),\ -+ __raw_readl(((u32)(baseAddress))+CM_ICLKEN1_CORE_OFFSET)) -+ -+ -+#define CM_ICLKEN1_COREEN_MAILBOXESWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = CM_ICLKEN1_CORE_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREEN_MAILBOXESWrite32);\ -+ data &= ~(CM_ICLKEN1_CORE_EN_MAILBOXES_MASK);\ -+ newValue <<= CM_ICLKEN1_CORE_EN_MAILBOXES_OFFSET;\ -+ newValue &= CM_ICLKEN1_CORE_EN_MAILBOXES_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_ICLKEN1_COREEN_GPT8Write32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_ICLKEN1_CORE_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREEN_GPT8Write32);\ -+ data &= ~(PRCM_CM_ICLKEN1_CORE_EN_GPT8_MASK);\ -+ newValue <<= PRCM_CM_ICLKEN1_CORE_EN_GPT8_OFFSET;\ -+ newValue &= PRCM_CM_ICLKEN1_CORE_EN_GPT8_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_ICLKEN1_COREEN_GPT7Write32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_ICLKEN1_CORE_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN1_COREEN_GPT7Write32);\ -+ data &= ~(PRCM_CM_ICLKEN1_CORE_EN_GPT7_MASK);\ -+ newValue <<= PRCM_CM_ICLKEN1_CORE_EN_GPT7_OFFSET;\ -+ newValue &= PRCM_CM_ICLKEN1_CORE_EN_GPT7_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT8Write32k32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT832k <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8Write32k32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteSys32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT8Sys <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteSys32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteExt32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT8Ext <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT8WriteExt32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT8_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT7Write32k32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT732k <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7Write32k32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteSys32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT7Sys <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteSys32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteExt32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT7Ext <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT7WriteExt32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT7_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteSys32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT6Sys <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteSys32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteExt32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT6Ext <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT6WriteExt32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT6_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define CM_CLKSEL_PER_GPT5Write32k32(baseAddress)\ -+{\ -+ const u32 offset = CM_CLKSEL_PER_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT532k <<\ -+ CM_CLKSEL_PER_GPT5_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_CM_CLKSEL_PER_GPT5Write32k32);\ -+ data &= ~(CM_CLKSEL_PER_GPT5_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define CM_CLKSEL_PER_GPT6Write32k32(baseAddress)\ -+{\ -+ const u32 offset = CM_CLKSEL_PER_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT532k <<\ -+ CM_CLKSEL_PER_GPT6_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_CM_CLKSEL_PER_GPT6Write32k32);\ -+ data &= ~(CM_CLKSEL_PER_GPT6_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteSys32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT5Sys <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteSys32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteExt32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL2_CORE_OFFSET;\ -+ const u32 newValue = (u32)PRCMCM_CLKSEL2_CORECLKSEL_GPT5Ext <<\ -+ PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_OFFSET;\ -+ register u32 data = __raw_readl((u32)(baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL2_CORECLKSEL_GPT5WriteExt32);\ -+ data &= ~(PRCM_CM_CLKSEL2_CORE_CLKSEL_GPT5_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL1_PLLAPLLs_ClkinRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL1_PLLAPLLs_ClkinRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (PRCM_CM_CLKSEL1_PLL_OFFSET)))) &\ -+ PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_MASK) >>\ -+ PRCM_CM_CLKSEL1_PLL_APLLs_Clkin_OFFSET)) -+ -+ -+#define CM_FCLKEN_IVA2EN_DSPWrite32(baseAddress,value)\ -+{\ -+ const u32 offset = CM_FCLKEN_IVA2_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_FCLKEN_DSPEN_DSPWrite32);\ -+ data &= ~(CM_FCLKEN_IVA2_EN_MASK);\ -+ newValue <<= CM_FCLKEN_IVA2_EN_OFFSET;\ -+ newValue &= CM_FCLKEN_IVA2_EN_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_ICLKEN_DSPEN_DSP_IPIWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_ICLKEN_DSP_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_ICLKEN_DSPEN_DSP_IPIWrite32);\ -+ data &= ~(PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_MASK);\ -+ newValue <<= PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_OFFSET;\ -+ newValue &= PRCM_CM_ICLKEN_DSP_EN_DSP_IPI_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_IDLEST_DSPReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_IDLEST_DSPReadRegister32),\ -+ __raw_readl(((u32)(baseAddress))+PRCM_CM_IDLEST_DSP_OFFSET)) -+ -+ -+#define PRCMCM_IDLEST_DSPST_IPIRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_IDLEST_DSPST_IPIRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (PRCM_CM_IDLEST_DSP_OFFSET)))) &\ -+ PRCM_CM_IDLEST_DSP_ST_IPI_MASK) >>\ -+ PRCM_CM_IDLEST_DSP_ST_IPI_OFFSET)) -+ -+ -+#define PRM_IDLEST_IVA2ST_IVA2Read32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_IDLEST_DSPST_DSPRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (CM_IDLEST_IVA2_OFFSET)))) &\ -+ CM_IDLEST_IVA2_ST_IVA2_MASK) >>\ -+ CM_IDLEST_IVA2_ST_IVA2_OFFSET)) -+ -+ -+#define PRCMCM_AUTOIDLE_DSPAUTO_DSP_IPIWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_AUTOIDLE_DSP_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_AUTOIDLE_DSPAUTO_DSP_IPIWrite32);\ -+ data &= ~(PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_MASK);\ -+ newValue <<= PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_OFFSET;\ -+ newValue &= PRCM_CM_AUTOIDLE_DSP_AUTO_DSP_IPI_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL_DSPSYNC_DSPWrite32(baseAddress,value)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL_DSP_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL_DSPSYNC_DSPWrite32);\ -+ data &= ~(PRCM_CM_CLKSEL_DSP_SYNC_DSP_MASK);\ -+ newValue <<= PRCM_CM_CLKSEL_DSP_SYNC_DSP_OFFSET;\ -+ newValue &= PRCM_CM_CLKSEL_DSP_SYNC_DSP_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL_DSPCLKSEL_DSP_IFWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL_DSP_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSP_IFWrite32);\ -+ data &= ~(PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_MASK);\ -+ newValue <<= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_OFFSET;\ -+ newValue &= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_IF_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSEL_DSPCLKSEL_DSPWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSEL_DSP_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSEL_DSPCLKSEL_DSPWrite32);\ -+ data &= ~(PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_MASK);\ -+ newValue <<= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_OFFSET;\ -+ newValue &= PRCM_CM_CLKSEL_DSP_CLKSEL_DSP_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSTCTRL_IVA2WriteRegister32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSTCTRL_IVA2_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSTCTRL_IVA2WriteRegister32);\ -+ data &= ~(CM_CLKSTCTRL_IVA2_MASK);\ -+ newValue <<= CM_CLKSTCTRL_IVA2_OFFSET;\ -+ newValue &= CM_CLKSTCTRL_IVA2_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define PRCMCM_CLKSTCTRL_DSPAutostate_DSPRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (PRCM_CM_CLKSTCTRL_DSP_OFFSET)))) &\ -+ PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK) >>\ -+ PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_OFFSET)) -+ -+ -+#define PRCMCM_CLKSTCTRL_DSPAutostate_DSPWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_CM_CLKSTCTRL_DSP_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMCM_CLKSTCTRL_DSPAutostate_DSPWrite32);\ -+ data &= ~(PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK);\ -+ newValue <<= PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_OFFSET;\ -+ newValue &= PRCM_CM_CLKSTCTRL_DSP_Autostate_DSP_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMRM_RSTCTRL_DSPReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPReadRegister32),\ -+ __raw_readl(((baseAddress))+PRCM_RM_RSTCTRL_DSP_OFFSET)) -+ -+ -+#define PRM_RSTCTRL_IVA2RST1_DSPWrite32(baseAddress,value)\ -+{\ -+ const u32 offset = PRM_RSTCTRL_IVA2_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32);\ -+ data &= ~(PRM_RSTCTRL_IVA2_RST1_MASK);\ -+ newValue <<= PRM_RSTCTRL_IVA2_RST1_OFFSET;\ -+ newValue &= PRM_RSTCTRL_IVA2_RST1_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define PRM_RSTCTRL_IVA2RST2_DSPWrite32(baseAddress,value)\ -+{\ -+ const u32 offset = PRM_RSTCTRL_IVA2_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32);\ -+ data &= ~(PRM_RSTCTRL_IVA2_RST2_MASK);\ -+ newValue <<= PRM_RSTCTRL_IVA2_RST2_OFFSET;\ -+ newValue &= PRM_RSTCTRL_IVA2_RST2_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define PRM_RSTCTRL_IVA2RST3_DSPWrite32(baseAddress,value)\ -+{\ -+ const u32 offset = PRM_RSTCTRL_IVA2_OFFSET;\ -+ register u32 data =\ -+ __raw_readl(((baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTCTRL_DSPRST1_DSPWrite32);\ -+ data &= ~(PRM_RSTCTRL_IVA2_RST3_MASK);\ -+ newValue <<= PRM_RSTCTRL_IVA2_RST3_OFFSET;\ -+ newValue &= PRM_RSTCTRL_IVA2_RST3_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (baseAddress)+offset);\ -+} -+ -+ -+#define PRCMRM_RSTST_DSPReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTST_DSPReadRegister32),\ -+ __raw_readl(((baseAddress))+PRCM_RM_RSTST_DSP_OFFSET)) -+ -+ -+#define PRCMRM_RSTST_DSPWriteRegister32(baseAddress,value)\ -+{\ -+ const u32 offset = PRCM_RM_RSTST_DSP_OFFSET;\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMRM_RSTST_DSPWriteRegister32);\ -+ __raw_writel(newValue, ((u32)(baseAddress))+offset);\ -+} -+ -+ -+#define PRCMPM_PWSTCTRL_DSPForceStateWrite32(baseAddress, value)\ -+{\ -+ const u32 offset = PRCM_PM_PWSTCTRL_DSP_OFFSET;\ -+ register u32 data = \ -+ __raw_readl(((u32)(baseAddress))+offset);\ -+ register u32 newValue = ((u32)(value));\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_DSPForceStateWrite32);\ -+ data &= ~(PRCM_PM_PWSTCTRL_DSP_ForceState_MASK);\ -+ newValue <<= PRCM_PM_PWSTCTRL_DSP_ForceState_OFFSET;\ -+ newValue &= PRCM_PM_PWSTCTRL_DSP_ForceState_MASK;\ -+ newValue |= data;\ -+ __raw_writel(newValue, (u32)(baseAddress)+offset);\ -+} -+ -+ -+#define PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_PM_PWSTCTRL_IVA2_OFFSET;\ -+ const u32 newValue = (u32)PRCMPM_PWSTCTRL_IVA2PowerStateON <<\ -+ PRCM_PM_PWSTCTRL_IVA2_PowerState_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteON32);\ -+ data &= ~(PRCM_PM_PWSTCTRL_IVA2_PowerState_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (baseAddress)+offset);\ -+} -+ -+ -+#define PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_PM_PWSTCTRL_IVA2_OFFSET;\ -+ const u32 newValue = (u32)PRCMPM_PWSTCTRL_IVA2PowerStateOFF <<\ -+ PRCM_PM_PWSTCTRL_IVA2_PowerState_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_IVA2PowerStateWriteOFF32);\ -+ data &= ~(PRCM_PM_PWSTCTRL_IVA2_PowerState_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (baseAddress)+offset);\ -+} -+ -+ -+#define PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32(baseAddress)\ -+{\ -+ const u32 offset = PRCM_PM_PWSTCTRL_DSP_OFFSET;\ -+ const u32 newValue = (u32)PRCMPM_PWSTCTRL_DSPPowerStateRET <<\ -+ PRCM_PM_PWSTCTRL_DSP_PowerState_OFFSET;\ -+ register u32 data = __raw_readl((baseAddress)+offset);\ -+ _DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTCTRL_DSPPowerStateWriteRET32);\ -+ data &= ~(PRCM_PM_PWSTCTRL_DSP_PowerState_MASK);\ -+ data |= newValue;\ -+ __raw_writel(data, (baseAddress)+offset);\ -+} -+ -+ -+#define PRCMPM_PWSTST_DSPReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_DSPReadRegister32),\ -+ __raw_readl(((u32)(baseAddress))+PRCM_PM_PWSTST_DSP_OFFSET)) -+ -+ -+#define PRCMPM_PWSTST_IVA2ReadRegister32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_IVA2ReadRegister32),\ -+ __raw_readl((baseAddress) + PRCM_PM_PWSTST_IVA2_OFFSET)) -+ -+ -+#define PRCMPM_PWSTST_DSPInTransitionRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_DSPInTransitionRead32),\ -+ (((__raw_readl((((u32)(baseAddress))+\ -+ (PRCM_PM_PWSTST_DSP_OFFSET)))) &\ -+ PRCM_PM_PWSTST_DSP_InTransition_MASK) >>\ -+ PRCM_PM_PWSTST_DSP_InTransition_OFFSET)) -+ -+ -+#define PRCMPM_PWSTST_IVA2InTransitionRead32(baseAddress)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_IVA2InTransitionRead32),\ -+ (((__raw_readl((((baseAddress))+\ -+ (PRCM_PM_PWSTST_IVA2_OFFSET)))) &\ -+ PRCM_PM_PWSTST_IVA2_InTransition_MASK) >>\ -+ PRCM_PM_PWSTST_IVA2_InTransition_OFFSET)) -+ -+ -+#define PRCMPM_PWSTST_DSPPowerStateStGet32(var)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_DSPPowerStateStGet32),\ -+ (u32)((((u32)(var)) & PRCM_PM_PWSTST_DSP_PowerStateSt_MASK) >>\ -+ PRCM_PM_PWSTST_DSP_PowerStateSt_OFFSET)) -+ -+ -+#define PRCMPM_PWSTST_IVA2PowerStateStGet32(var)\ -+ (_DEBUG_LEVEL_1_EASI(EASIL1_PRCMPM_PWSTST_IVA2PowerStateStGet32),\ -+ (u32)((((u32)(var)) & PRCM_PM_PWSTST_IVA2_PowerStateSt_MASK) >>\ -+ PRCM_PM_PWSTST_IVA2_PowerStateSt_OFFSET)) -+ -+ -+#endif /* USE_LEVEL_1_MACROS */ -+ -+#endif /* _PRCM_REG_ACM_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kbuild linux-omap-2.6.28-nokia1/drivers/dsp/bridge/Kbuild ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kbuild 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/Kbuild 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,39 @@ -+obj-$(CONFIG_MPU_BRIDGE) += bridgedriver.o -+ -+libgen = gen/gb.o gen/gt.o gen/gs.o gen/gh.o gen/_gt_para.o gen/uuidutil.o -+libservices = services/csl.o services/mem.o services/list.o services/dpc.o \ -+ services/kfile.o services/sync.o \ -+ services/clk.o services/cfg.o services/reg.o \ -+ services/regsup.o services/ntfy.o \ -+ services/dbg.o services/services.o -+libwmd = wmd/chnl_sm.o wmd/msg_sm.o wmd/io_sm.o wmd/tiomap3430.o \ -+ wmd/tiomap3430_pwr.o wmd/tiomap_sm.o wmd/tiomap_io.o \ -+ wmd/mmu_fault.o wmd/ue_deh.o -+libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/wcd.o \ -+ pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o -+librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ -+ rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ -+ rmgr/nldr.o rmgr/drv_interface.o -+libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o -+libhw = hw/hw_prcm.o hw/hw_dspssC64P.o hw/hw_mmu.o hw/hw_mbox.o -+ -+bridgedriver-objs = $(libgen) $(libservices) $(libwmd) $(libpmgr) $(librmgr) \ -+ $(libdload) $(libhw) -+ -+# Debug -+ifeq ($(CONFIG_BRIDGE_DEBUG),y) -+ccflags-y += -DGT_TRACE -DDEBUG -+endif -+ -+#Machine dependent -+ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ -+ -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \ -+ -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS -+ -+#Header files -+ccflags-y += -Idrivers/dsp/bridge/services -+ccflags-y += -Idrivers/dsp/bridge/wmd -+ccflags-y += -Idrivers/dsp/bridge/pmgr -+ccflags-y += -Idrivers/dsp/bridge/rmgr -+ccflags-y += -Idrivers/dsp/bridge/hw -+ccflags-y += -Iarch/arm -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kconfig linux-omap-2.6.28-nokia1/drivers/dsp/bridge/Kconfig ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/Kconfig 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,36 @@ -+# -+# DSP Bridge Driver Support -+# -+ -+menuconfig MPU_BRIDGE -+ tristate "DSP Bridge driver" -+ default n -+ help -+ DSP/BIOS Bridge is designed for platforms that contain a GPP and -+ one or more attached DSPs. The GPP is considered the master or -+ "host" processor, and the attached DSPs are processing resources -+ that can be utilized by applications and drivers running on the GPP. -+ -+config BRIDGE_DVFS -+ bool "Enable Bridge Dynamic Voltage and Frequency Scaling (DVFS)" -+ depends on MPU_BRIDGE && OMAP_PM_SRF -+ default n -+ help -+ DVFS allows DSP Bridge to initiate the operating point change to -+ scale the chip voltage and frequency in order to match the -+ performance and power consumption to the current processing -+ requirements. -+ -+config BRIDGE_MEMPOOL_SIZE -+ hex "Physical memory pool size (Byte)" -+ depends on MPU_BRIDGE -+ default 0x600000 -+ help -+ Allocate specified size of memory at booting time to avoid allocation -+ failure under heavy memory fragmentation after some use time. -+ -+config BRIDGE_DEBUG -+ bool "DSP Bridge Debug Support" -+ depends on MPU_BRIDGE -+ help -+ Say Y to enable Bridge debugging capabilities -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cmm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/cmm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cmm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/cmm.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,1291 @@ -+/* -+ * cmm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== cmm.c ======== -+ * Purpose: -+ * The Communication(Shared) Memory Management(CMM) module provides -+ * shared memory management services for DSP/BIOS Bridge data streaming -+ * and messaging. -+ * -+ * Multiple shared memory segments can be registered with CMM. -+ * Each registered SM segment is represented by a SM "allocator" that -+ * describes a block of physically contiguous shared memory used for -+ * future allocations by CMM. -+ * -+ * Memory is coelesced back to the appropriate heap when a buffer is -+ * freed. -+ * -+ * Public Functions: -+ * CMM_CallocBuf -+ * CMM_Create -+ * CMM_Destroy -+ * CMM_Exit -+ * CMM_FreeBuf -+ * CMM_GetHandle -+ * CMM_GetInfo -+ * CMM_Init -+ * CMM_RegisterGPPSMSeg -+ * CMM_UnRegisterGPPSMSeg -+ * -+ * The CMM_Xlator[xxx] routines below are used by Node and Stream -+ * to perform SM address translation to the client process address space. -+ * A "translator" object is created by a node/stream for each SM seg used. -+ * -+ * Translator Routines: -+ * CMM_XlatorAllocBuf -+ * CMM_XlatorCreate -+ * CMM_XlatorDelete -+ * CMM_XlatorFreeBuf -+ * CMM_XlatorInfo -+ * CMM_XlatorTranslate -+ * -+ * Private Functions: -+ * AddToFreeList -+ * GetAllocator -+ * GetFreeBlock -+ * GetNode -+ * GetSlot -+ * UnRegisterGPPSMSeg -+ * -+ * Notes: -+ * Va: Virtual address. -+ * Pa: Physical or kernel system address. -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 16-Feb-2002 ag Code review cleanup. -+ *! PreOMAP address translation no longner supported. -+ *! 30-Jan-2002 ag Updates to CMM_XlatorTranslate() per TII, ANSI C++ -+ *! warnings. -+ *! 27-Jan-2002 ag Removed unused CMM_[Alloc][Free]Desc() & #ifdef USELOOKUP, -+ *! & unused VALIDATECMM and VaPaConvert(). -+ *! Removed bFastXlate from CMM_XLATOR. Always fast lookup. -+ *! 03-Jan-2002 ag Clear SM in CMM_AllocBuf(). Renamed to CMM_CallocBuf(). -+ *! 13-Nov-2001 ag Now delete pNodeFreeListHead and nodes in CMM_Destroy(). -+ *! 28-Aug-2001 ag CMM_GetHandle() returns CMM Mgr hndle given HPROCESSOR. -+ *! Removed unused CMM_[Un]RegisterDSPSMSeg() & -+ * CMM_[Un}ReserveVirtSpace fxns. Some cleanup. -+ *! 12-Aug-2001 ag Exposed CMM_UnRegisterGPP[DSP]SMSeg. -+ *! 13-Feb-2001 kc DSP/BIOS Bridge name update. -+ *! 21-Dec-2000 rr GetFreeBlock checks for pAllocator. -+ *! 09-Dec-2000 ag Added GPPPA2DSPPA, DSPPA2GPPPA macros. -+ *! 05-Dec-2000 ag CMM_XlatorDelete() optionally frees SM bufs and descriptors. -+ *! 30-Oct-2000 ag Buf size bug fixed in CMM_AllocBuf() causing leak. -+ *! Revamped XlatorTranslate() routine. -+ *! 10-Oct-2000 ag Added CMM_Xlator[xxx] functions. -+ *! 02-Aug-2000 ag Created. -+ *! -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+/* Object signatures */ -+#define CMMSIGNATURE 0x004d4d43 /* "CMM" (in reverse) */ -+#define SMEMSIGNATURE 0x4D454D53 /* "SMEM" SM space */ -+#define CMMXLATESIGNATURE 0x584d4d43 /* "CMMX" CMM Xlator */ -+ -+#define NEXT_PA(pNode) (pNode->dwPA + pNode->ulSize) -+ -+/* Other bus/platform translations */ -+#define DSPPA2GPPPA(base, x, y) ((x)+(y)) -+#define GPPPA2DSPPA(base, x, y) ((x)-(y)) -+ -+/* -+ * Allocators define a block of contiguous memory used for future allocations. -+ * -+ * sma - shared memory allocator. -+ * vma - virtual memory allocator.(not used). -+ */ -+struct CMM_ALLOCATOR { /* sma */ -+ u32 dwSignature; /* SMA allocator signature SMEMSIGNATURE */ -+ unsigned int dwSmBase; /* Start of physical SM block */ -+ u32 ulSmSize; /* Size of SM block in bytes */ -+ unsigned int dwVmBase; /* Start of VM block. (Dev driver -+ * context for 'sma') */ -+ u32 dwDSPPhysAddrOffset; /* DSP PA to GPP PA offset for this -+ * SM space */ -+ /* CMM_ADDTO[SUBFROM]DSPPA, _POMAPEMIF2DSPBUS */ -+ enum CMM_CNVTTYPE cFactor; -+ unsigned int dwDSPBase; /* DSP virt base byte address */ -+ u32 ulDSPSize; /* DSP seg size in bytes */ -+ struct CMM_OBJECT *hCmmMgr; /* back ref to parent mgr */ -+ struct LST_LIST *pFreeListHead; /* node list of available memory */ -+ struct LST_LIST *pInUseListHead; /* node list of memory in use */ -+} ; -+ -+struct CMM_XLATOR { /* Pa<->Va translator object */ -+ u32 dwSignature; /* "CMMX" */ -+ struct CMM_OBJECT *hCmmMgr; /* CMM object this translator associated */ -+ /* -+ * Client process virtual base address that corresponds to phys SM -+ * base address for translator's ulSegId. -+ * Only 1 segment ID currently supported. -+ */ -+ unsigned int dwVirtBase; /* virtual base address */ -+ u32 ulVirtSize; /* size of virt space in bytes */ -+ u32 ulSegId; /* Segment Id */ -+} ; -+ -+/* CMM Mgr */ -+struct CMM_OBJECT { -+ u32 dwSignature; /* Used for object validation */ -+ /* -+ * Cmm Lock is used to serialize access mem manager for multi-threads. -+ */ -+ struct SYNC_CSOBJECT *hCmmLock; /* Lock to access cmm mgr */ -+ struct LST_LIST *pNodeFreeListHead; /* Free list of memory nodes */ -+ u32 ulMinBlockSize; /* Min SM block; default 16 bytes */ -+ u32 dwPageSize; /* Memory Page size (1k/4k) */ -+ /* GPP SM segment ptrs */ -+ struct CMM_ALLOCATOR *paGPPSMSegTab[CMM_MAXGPPSEGS]; -+} ; -+ -+/* Default CMM Mgr attributes */ -+static struct CMM_MGRATTRS CMM_DFLTMGRATTRS = { -+ 16 /* ulMinBlockSize, min block size(bytes) allocated by cmm mgr */ -+}; -+ -+/* Default allocation attributes */ -+static struct CMM_ATTRS CMM_DFLTALCTATTRS = { -+ 1 /* ulSegId, default segment Id for allocator */ -+}; -+ -+/* Address translator default attrs */ -+static struct CMM_XLATORATTRS CMM_DFLTXLATORATTRS = { -+ 1, /* ulSegId, does not have to match CMM_DFLTALCTATTRS ulSegId */ -+ 0, /* dwDSPBufs */ -+ 0, /* dwDSPBufSize */ -+ NULL, /* pVmBase */ -+ 0, /* dwVmSize */ -+}; -+ -+/* SM node representing a block of memory. */ -+struct CMM_MNODE { -+ struct LST_ELEM link; /* must be 1st element */ -+ u32 dwPA; /* Phys addr */ -+ u32 dwVA; /* Virtual address in device process context */ -+ u32 ulSize; /* SM block size in bytes */ -+ u32 hClientProc; /* Process that allocated this mem block */ -+} ; -+ -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask CMM_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+static u32 cRefs; /* module reference count */ -+ -+/* ----------------------------------- Function Prototypes */ -+static void AddToFreeList(struct CMM_ALLOCATOR *pAllocator, -+ struct CMM_MNODE *pNode); -+static struct CMM_ALLOCATOR *GetAllocator(struct CMM_OBJECT *pCmmMgr, -+ u32 ulSegId); -+static struct CMM_MNODE *GetFreeBlock(struct CMM_ALLOCATOR *pAllocator, -+ u32 uSize); -+static struct CMM_MNODE *GetNode(struct CMM_OBJECT *pCmmMgr, u32 dwPA, -+ u32 dwVA, u32 ulSize); -+/* get available slot for new allocator */ -+static s32 GetSlot(struct CMM_OBJECT *hCmmMgr); -+static void UnRegisterGPPSMSeg(struct CMM_ALLOCATOR *pSMA); -+ -+/* -+ * ======== CMM_CallocBuf ======== -+ * Purpose: -+ * Allocate a SM buffer, zero contents, and return the physical address -+ * and optional driver context virtual address(ppBufVA). -+ * -+ * The freelist is sorted in increasing size order. Get the first -+ * block that satifies the request and sort the remaining back on -+ * the freelist; if large enough. The kept block is placed on the -+ * inUseList. -+ */ -+void *CMM_CallocBuf(struct CMM_OBJECT *hCmmMgr, u32 uSize, -+ struct CMM_ATTRS *pAttrs, OUT void **ppBufVA) -+{ -+ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; -+ void *pBufPA = NULL; -+ struct CMM_MNODE *pNode = NULL; -+ struct CMM_MNODE *pNewNode = NULL; -+ struct CMM_ALLOCATOR *pAllocator = NULL; -+ u32 uDeltaSize; -+ u8 *pByte = NULL; -+ s32 cnt; -+ -+ if (pAttrs == NULL) -+ pAttrs = &CMM_DFLTALCTATTRS; -+ -+ if (ppBufVA != NULL) -+ *ppBufVA = NULL; -+ -+ if ((MEM_IsValidHandle(pCmmMgr, CMMSIGNATURE)) && (uSize != 0)) { -+ if (pAttrs->ulSegId > 0) { -+ /* SegId > 0 is SM */ -+ /* get the allocator object for this segment id */ -+ pAllocator = GetAllocator(pCmmMgr, pAttrs->ulSegId); -+ /* keep block size a multiple of ulMinBlockSize */ -+ uSize = ((uSize - 1) & ~(pCmmMgr->ulMinBlockSize - 1)) -+ + pCmmMgr->ulMinBlockSize; -+ SYNC_EnterCS(pCmmMgr->hCmmLock); -+ pNode = GetFreeBlock(pAllocator, uSize); -+ } -+ if (pNode) { -+ uDeltaSize = (pNode->ulSize - uSize); -+ if (uDeltaSize >= pCmmMgr->ulMinBlockSize) { -+ /* create a new block with the leftovers and -+ * add to freelist */ -+ pNewNode = GetNode(pCmmMgr, pNode->dwPA + uSize, -+ pNode->dwVA + uSize, -+ (u32)uDeltaSize); -+ /* leftovers go free */ -+ AddToFreeList(pAllocator, pNewNode); -+ /* adjust our node's size */ -+ pNode->ulSize = uSize; -+ } -+ /* Tag node with client process requesting allocation -+ * We'll need to free up a process's alloc'd SM if the -+ * client process goes away. -+ */ -+ /* Return TGID instead of process handle */ -+ pNode->hClientProc = current->tgid; -+ -+ /* put our node on InUse list */ -+ LST_PutTail(pAllocator->pInUseListHead, -+ (struct LST_ELEM *)pNode); -+ pBufPA = (void *)pNode->dwPA; /* physical address */ -+ /* clear mem */ -+ pByte = (u8 *)pNode->dwVA; -+ for (cnt = 0; cnt < (s32) uSize; cnt++, pByte++) -+ *pByte = 0; -+ -+ if (ppBufVA != NULL) { -+ /* Virtual address */ -+ *ppBufVA = (void *)pNode->dwVA; -+ } -+ } -+ GT_3trace(CMM_debugMask, GT_3CLASS, -+ "CMM_CallocBuf dwPA %x, dwVA %x uSize" -+ "%x\n", pNode->dwPA, pNode->dwVA, uSize); -+ SYNC_LeaveCS(pCmmMgr->hCmmLock); -+ } -+ return pBufPA; -+} -+ -+/* -+ * ======== CMM_Create ======== -+ * Purpose: -+ * Create a communication memory manager object. -+ */ -+DSP_STATUS CMM_Create(OUT struct CMM_OBJECT **phCmmMgr, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct CMM_MGRATTRS *pMgrAttrs) -+{ -+ struct CMM_OBJECT *pCmmObject = NULL; -+ DSP_STATUS status = DSP_SOK; -+ struct UTIL_SYSINFO sysInfo; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phCmmMgr != NULL); -+ -+ GT_3trace(CMM_debugMask, GT_ENTER, -+ "CMM_Create: phCmmMgr: 0x%x\thDevObject: " -+ "0x%x\tpMgrAttrs: 0x%x\n", phCmmMgr, hDevObject, pMgrAttrs); -+ *phCmmMgr = NULL; -+ /* create, zero, and tag a cmm mgr object */ -+ MEM_AllocObject(pCmmObject, struct CMM_OBJECT, CMMSIGNATURE); -+ if (pCmmObject != NULL) { -+ if (pMgrAttrs == NULL) -+ pMgrAttrs = &CMM_DFLTMGRATTRS; /* set defaults */ -+ -+ /* 4 bytes minimum */ -+ DBC_Assert(pMgrAttrs->ulMinBlockSize >= 4); -+ /* save away smallest block allocation for this cmm mgr */ -+ pCmmObject->ulMinBlockSize = pMgrAttrs->ulMinBlockSize; -+ /* save away the systems memory page size */ -+ sysInfo.dwPageSize = PAGE_SIZE; -+ sysInfo.dwAllocationGranularity = PAGE_SIZE; -+ sysInfo.dwNumberOfProcessors = 1; -+ if (DSP_SUCCEEDED(status)) { -+ GT_1trace(CMM_debugMask, GT_5CLASS, -+ "CMM_Create: Got system page size" -+ "= 0x%x\t\n", sysInfo.dwPageSize); -+ pCmmObject->dwPageSize = sysInfo.dwPageSize; -+ } else { -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_Create: failed to get system" -+ "page size\n"); -+ pCmmObject->dwPageSize = 0; -+ status = DSP_EFAIL; -+ } -+ /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by -+ * MEM_AllocObject */ -+ if (DSP_SUCCEEDED(status)) { -+ /* create node free list */ -+ pCmmObject->pNodeFreeListHead = LST_Create(); -+ if (pCmmObject->pNodeFreeListHead == NULL) { -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_Create: LST_Create() " -+ "failed \n"); -+ status = DSP_EMEMORY; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_InitializeCS(&pCmmObject->hCmmLock); -+ -+ if (DSP_SUCCEEDED(status)) -+ *phCmmMgr = pCmmObject; -+ else -+ CMM_Destroy(pCmmObject, true); -+ -+ } else { -+ GT_0trace(CMM_debugMask, GT_6CLASS, -+ "CMM_Create: Object Allocation " -+ "Failure(CMM Object)\n"); -+ status = DSP_EMEMORY; -+ } -+ return status; -+} -+ -+/* -+ * ======== CMM_Destroy ======== -+ * Purpose: -+ * Release the communication memory manager resources. -+ */ -+DSP_STATUS CMM_Destroy(struct CMM_OBJECT *hCmmMgr, bool bForce) -+{ -+ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; -+ struct CMM_INFO tempInfo; -+ DSP_STATUS status = DSP_SOK; -+ s32 nSlot; -+ struct CMM_MNODE *pNode; -+ -+ DBC_Require(cRefs > 0); -+ if (!MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { -+ status = DSP_EHANDLE; -+ return status; -+ } -+ SYNC_EnterCS(pCmmMgr->hCmmLock); -+ /* If not force then fail if outstanding allocations exist */ -+ if (!bForce) { -+ /* Check for outstanding memory allocations */ -+ status = CMM_GetInfo(hCmmMgr, &tempInfo); -+ if (DSP_SUCCEEDED(status)) { -+ if (tempInfo.ulTotalInUseCnt > 0) { -+ /* outstanding allocations */ -+ status = DSP_EFAIL; -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* UnRegister SM allocator */ -+ for (nSlot = 0; nSlot < CMM_MAXGPPSEGS; nSlot++) { -+ if (pCmmMgr->paGPPSMSegTab[nSlot] != NULL) { -+ UnRegisterGPPSMSeg(pCmmMgr-> -+ paGPPSMSegTab[nSlot]); -+ /* Set slot to NULL for future reuse */ -+ pCmmMgr->paGPPSMSegTab[nSlot] = NULL; -+ } -+ } -+ } -+ if (pCmmMgr->pNodeFreeListHead != NULL) { -+ /* Free the free nodes */ -+ while (!LST_IsEmpty(pCmmMgr->pNodeFreeListHead)) { -+ /* (struct LST_ELEM*) pNode = -+ * LST_GetHead(pCmmMgr->pNodeFreeListHead);*/ -+ pNode = (struct CMM_MNODE *)LST_GetHead(pCmmMgr-> -+ pNodeFreeListHead); -+ MEM_Free(pNode); -+ } -+ /* delete NodeFreeList list */ -+ LST_Delete(pCmmMgr->pNodeFreeListHead); -+ } -+ SYNC_LeaveCS(pCmmMgr->hCmmLock); -+ if (DSP_SUCCEEDED(status)) { -+ /* delete CS & cmm mgr object */ -+ SYNC_DeleteCS(pCmmMgr->hCmmLock); -+ MEM_FreeObject(pCmmMgr); -+ } -+ return status; -+} -+ -+/* -+ * ======== CMM_Exit ======== -+ * Purpose: -+ * Discontinue usage of module; free resources when reference count -+ * reaches 0. -+ */ -+void CMM_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(CMM_debugMask, GT_ENTER, -+ "exiting CMM_Exit,ref count:0x%x\n", cRefs); -+} -+ -+/* -+ * ======== CMM_FreeBuf ======== -+ * Purpose: -+ * Free the given buffer. -+ */ -+DSP_STATUS CMM_FreeBuf(struct CMM_OBJECT *hCmmMgr, void *pBufPA, u32 ulSegId) -+{ -+ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; -+ DSP_STATUS status = DSP_EPOINTER; -+ struct CMM_MNODE *pCurNode = NULL; -+ struct CMM_ALLOCATOR *pAllocator = NULL; -+ struct CMM_ATTRS *pAttrs; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pBufPA != NULL); -+ GT_1trace(CMM_debugMask, GT_ENTER, "CMM_FreeBuf pBufPA %x\n", pBufPA); -+ if (ulSegId == 0) { -+ pAttrs = &CMM_DFLTALCTATTRS; -+ ulSegId = pAttrs->ulSegId; -+ } -+ if (!(MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) || !(ulSegId > 0)) { -+ status = DSP_EHANDLE; -+ return status; -+ } -+ /* get the allocator for this segment id */ -+ pAllocator = GetAllocator(pCmmMgr, ulSegId); -+ if (pAllocator != NULL) { -+ SYNC_EnterCS(pCmmMgr->hCmmLock); -+ pCurNode = (struct CMM_MNODE *)LST_First(pAllocator-> -+ pInUseListHead); -+ while (pCurNode) { -+ if ((u32)pBufPA == pCurNode->dwPA) { -+ /* Found it */ -+ LST_RemoveElem(pAllocator->pInUseListHead, -+ (struct LST_ELEM *)pCurNode); -+ /* back to freelist */ -+ AddToFreeList(pAllocator, pCurNode); -+ status = DSP_SOK; /* all right! */ -+ break; -+ } -+ /* next node. */ -+ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> -+ pInUseListHead, (struct LST_ELEM *)pCurNode); -+ } -+ SYNC_LeaveCS(pCmmMgr->hCmmLock); -+ } -+ return status; -+} -+ -+/* -+ * ======== CMM_GetHandle ======== -+ * Purpose: -+ * Return the communication memory manager object for this device. -+ * This is typically called from the client process. -+ */ -+DSP_STATUS CMM_GetHandle(DSP_HPROCESSOR hProcessor, -+ OUT struct CMM_OBJECT **phCmmMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phCmmMgr != NULL); -+ if (hProcessor != NULL) -+ status = PROC_GetDevObject(hProcessor, &hDevObject); -+ else -+ hDevObject = DEV_GetFirst(); /* default */ -+ -+ if (DSP_SUCCEEDED(status)) -+ status = DEV_GetCmmMgr(hDevObject, phCmmMgr); -+ -+ return status; -+} -+ -+/* -+ * ======== CMM_GetInfo ======== -+ * Purpose: -+ * Return the current memory utilization information. -+ */ -+DSP_STATUS CMM_GetInfo(struct CMM_OBJECT *hCmmMgr, -+ OUT struct CMM_INFO *pCmmInfo) -+{ -+ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; -+ u32 ulSeg; -+ DSP_STATUS status = DSP_SOK; -+ struct CMM_ALLOCATOR *pAltr; -+ struct CMM_MNODE *pCurNode = NULL; -+ -+ DBC_Require(pCmmInfo != NULL); -+ -+ if (!MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { -+ status = DSP_EHANDLE; -+ return status; -+ } -+ SYNC_EnterCS(pCmmMgr->hCmmLock); -+ pCmmInfo->ulNumGPPSMSegs = 0; /* # of SM segments */ -+ pCmmInfo->ulTotalInUseCnt = 0; /* Total # of outstanding alloc */ -+ pCmmInfo->ulMinBlockSize = pCmmMgr->ulMinBlockSize; /* min block size */ -+ /* check SM memory segments */ -+ for (ulSeg = 1; ulSeg <= CMM_MAXGPPSEGS; ulSeg++) { -+ /* get the allocator object for this segment id */ -+ pAltr = GetAllocator(pCmmMgr, ulSeg); -+ if (pAltr != NULL) { -+ pCmmInfo->ulNumGPPSMSegs++; -+ pCmmInfo->segInfo[ulSeg - 1].dwSegBasePa = -+ pAltr->dwSmBase - pAltr->ulDSPSize; -+ pCmmInfo->segInfo[ulSeg - 1].ulTotalSegSize = -+ pAltr->ulDSPSize + pAltr->ulSmSize; -+ pCmmInfo->segInfo[ulSeg - 1].dwGPPBasePA = -+ pAltr->dwSmBase; -+ pCmmInfo->segInfo[ulSeg - 1].ulGPPSize = -+ pAltr->ulSmSize; -+ pCmmInfo->segInfo[ulSeg - 1].dwDSPBaseVA = -+ pAltr->dwDSPBase; -+ pCmmInfo->segInfo[ulSeg - 1].ulDSPSize = -+ pAltr->ulDSPSize; -+ pCmmInfo->segInfo[ulSeg - 1].dwSegBaseVa = -+ pAltr->dwVmBase - pAltr->ulDSPSize; -+ pCmmInfo->segInfo[ulSeg - 1].ulInUseCnt = 0; -+ pCurNode = (struct CMM_MNODE *)LST_First(pAltr-> -+ pInUseListHead); -+ /* Count inUse blocks */ -+ while (pCurNode) { -+ pCmmInfo->ulTotalInUseCnt++; -+ pCmmInfo->segInfo[ulSeg - 1].ulInUseCnt++; -+ /* next node. */ -+ pCurNode = (struct CMM_MNODE *)LST_Next(pAltr-> -+ pInUseListHead, -+ (struct LST_ELEM *)pCurNode); -+ } -+ } -+ } /* end for */ -+ SYNC_LeaveCS(pCmmMgr->hCmmLock); -+ return status; -+} -+ -+/* -+ * ======== CMM_Init ======== -+ * Purpose: -+ * Initializes private state of CMM module. -+ */ -+bool CMM_Init(void) -+{ -+ bool fRetval = true; -+ -+ DBC_Require(cRefs >= 0); -+ if (cRefs == 0) { -+ /* Set the Trace mask */ -+ /* "CM" for Comm Memory manager */ -+ GT_create(&CMM_debugMask, "CM"); -+ } -+ if (fRetval) -+ cRefs++; -+ -+ GT_1trace(CMM_debugMask, GT_ENTER, -+ "Entered CMM_Init,ref count:0x%x\n", cRefs); -+ -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ return fRetval; -+} -+ -+/* -+ * ======== CMM_RegisterGPPSMSeg ======== -+ * Purpose: -+ * Register a block of SM with the CMM to be used for later GPP SM -+ * allocations. -+ */ -+DSP_STATUS CMM_RegisterGPPSMSeg(struct CMM_OBJECT *hCmmMgr, u32 dwGPPBasePA, -+ u32 ulSize, u32 dwDSPAddrOffset, -+ enum CMM_CNVTTYPE cFactor, u32 dwDSPBase, -+ u32 ulDSPSize, u32 *pulSegId, -+ u32 dwGPPBaseVA) -+{ -+ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; -+ struct CMM_ALLOCATOR *pSMA = NULL; -+ DSP_STATUS status = DSP_SOK; -+ struct CMM_MNODE *pNewNode; -+ s32 nSlot; -+ -+ DBC_Require(ulSize > 0); -+ DBC_Require(pulSegId != NULL); -+ DBC_Require(dwGPPBasePA != 0); -+ DBC_Require(dwGPPBaseVA != 0); -+ DBC_Require((cFactor <= CMM_ADDTODSPPA) && -+ (cFactor >= CMM_SUBFROMDSPPA)); -+ GT_6trace(CMM_debugMask, GT_ENTER, -+ "CMM_RegisterGPPSMSeg dwGPPBasePA %x " -+ "ulSize %x dwDSPAddrOffset %x dwDSPBase %x ulDSPSize %x " -+ "dwGPPBaseVA %x\n", dwGPPBasePA, ulSize, dwDSPAddrOffset, -+ dwDSPBase, ulDSPSize, dwGPPBaseVA); -+ if (!MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { -+ status = DSP_EHANDLE; -+ return status; -+ } -+ /* make sure we have room for another allocator */ -+ SYNC_EnterCS(pCmmMgr->hCmmLock); -+ nSlot = GetSlot(pCmmMgr); -+ if (nSlot < 0) { -+ /* get a slot number */ -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ /* Check if input ulSize is big enough to alloc at least one block */ -+ if (DSP_SUCCEEDED(status)) { -+ if (ulSize < pCmmMgr->ulMinBlockSize) { -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_RegisterGPPSMSeg: " -+ "ulSize too small\n"); -+ status = DSP_EINVALIDARG; -+ goto func_end; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* create, zero, and tag an SM allocator object */ -+ MEM_AllocObject(pSMA, struct CMM_ALLOCATOR, SMEMSIGNATURE); -+ } -+ if (pSMA != NULL) { -+ pSMA->hCmmMgr = hCmmMgr; /* ref to parent */ -+ pSMA->dwSmBase = dwGPPBasePA; /* SM Base phys */ -+ pSMA->ulSmSize = ulSize; /* SM segment size in bytes */ -+ pSMA->dwVmBase = dwGPPBaseVA; -+ pSMA->dwDSPPhysAddrOffset = dwDSPAddrOffset; -+ pSMA->cFactor = cFactor; -+ pSMA->dwDSPBase = dwDSPBase; -+ pSMA->ulDSPSize = ulDSPSize; -+ if (pSMA->dwVmBase == 0) { -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_RegisterGPPSMSeg: Error" -+ "MEM_LinearAddress()\n"); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* return the actual segment identifier */ -+ *pulSegId = (u32) nSlot + 1; -+ /* create memory free list */ -+ pSMA->pFreeListHead = LST_Create(); -+ if (pSMA->pFreeListHead == NULL) { -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_RegisterGPPSMSeg: " -+ "Out Of Memory \n"); -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* create memory in-use list */ -+ pSMA->pInUseListHead = LST_Create(); -+ if (pSMA->pInUseListHead == NULL) { -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_RegisterGPPSMSeg: " -+ "LST_Create failed\n"); -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Get a mem node for this hunk-o-memory */ -+ pNewNode = GetNode(pCmmMgr, dwGPPBasePA, -+ pSMA->dwVmBase, ulSize); -+ /* Place node on the SM allocator's free list */ -+ if (pNewNode) { -+ LST_PutTail(pSMA->pFreeListHead, -+ (struct LST_ELEM *)pNewNode); -+ } else { -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ } -+ if (DSP_FAILED(status)) { -+ /* Cleanup allocator */ -+ UnRegisterGPPSMSeg(pSMA); -+ } -+ } else { -+ GT_0trace(CMM_debugMask, GT_6CLASS, -+ "CMM_RegisterGPPSMSeg: SMA Object " -+ "Allocation Failure\n"); -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ /* make entry */ -+ if (DSP_SUCCEEDED(status)) -+ pCmmMgr->paGPPSMSegTab[nSlot] = pSMA; -+ -+func_end: -+ SYNC_LeaveCS(pCmmMgr->hCmmLock); -+ return status; -+} -+ -+/* -+ * ======== CMM_UnRegisterGPPSMSeg ======== -+ * Purpose: -+ * UnRegister GPP SM segments with the CMM. -+ */ -+DSP_STATUS CMM_UnRegisterGPPSMSeg(struct CMM_OBJECT *hCmmMgr, u32 ulSegId) -+{ -+ struct CMM_OBJECT *pCmmMgr = (struct CMM_OBJECT *)hCmmMgr; -+ DSP_STATUS status = DSP_SOK; -+ struct CMM_ALLOCATOR *pSMA; -+ u32 ulId = ulSegId; -+ -+ DBC_Require(ulSegId > 0); -+ if (MEM_IsValidHandle(hCmmMgr, CMMSIGNATURE)) { -+ if (ulSegId == CMM_ALLSEGMENTS) -+ ulId = 1; -+ -+ if ((ulId > 0) && (ulId <= CMM_MAXGPPSEGS)) { -+ while (ulId <= CMM_MAXGPPSEGS) { -+ SYNC_EnterCS(pCmmMgr->hCmmLock); -+ /* slot = segId-1 */ -+ pSMA = pCmmMgr->paGPPSMSegTab[ulId - 1]; -+ if (pSMA != NULL) { -+ UnRegisterGPPSMSeg(pSMA); -+ /* Set alctr ptr to NULL for future -+ * reuse */ -+ pCmmMgr->paGPPSMSegTab[ulId - 1] = NULL; -+ } else if (ulSegId != CMM_ALLSEGMENTS) { -+ status = DSP_EFAIL; -+ } -+ SYNC_LeaveCS(pCmmMgr->hCmmLock); -+ if (ulSegId != CMM_ALLSEGMENTS) -+ break; -+ -+ ulId++; -+ } /* end while */ -+ } else { -+ status = DSP_EINVALIDARG; -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_UnRegisterGPPSMSeg: Bad " -+ "segment Id\n"); -+ } -+ } else { -+ status = DSP_EHANDLE; -+ } -+ return status; -+} -+ -+/* -+ * ======== UnRegisterGPPSMSeg ======== -+ * Purpose: -+ * UnRegister the SM allocator by freeing all its resources and -+ * nulling cmm mgr table entry. -+ * Note: -+ * This routine is always called within cmm lock crit sect. -+ */ -+static void UnRegisterGPPSMSeg(struct CMM_ALLOCATOR *pSMA) -+{ -+ struct CMM_MNODE *pCurNode = NULL; -+ struct CMM_MNODE *pNextNode = NULL; -+ -+ DBC_Require(pSMA != NULL); -+ if (pSMA->pFreeListHead != NULL) { -+ /* free nodes on free list */ -+ pCurNode = (struct CMM_MNODE *)LST_First(pSMA->pFreeListHead); -+ while (pCurNode) { -+ pNextNode = (struct CMM_MNODE *)LST_Next(pSMA-> -+ pFreeListHead, -+ (struct LST_ELEM *)pCurNode); -+ LST_RemoveElem(pSMA->pFreeListHead, -+ (struct LST_ELEM *)pCurNode); -+ MEM_Free((void *) pCurNode); -+ /* next node. */ -+ pCurNode = pNextNode; -+ } -+ LST_Delete(pSMA->pFreeListHead); /* delete freelist */ -+ /* free nodes on InUse list */ -+ pCurNode = (struct CMM_MNODE *)LST_First(pSMA->pInUseListHead); -+ while (pCurNode) { -+ pNextNode = (struct CMM_MNODE *)LST_Next(pSMA-> -+ pInUseListHead, -+ (struct LST_ELEM *)pCurNode); -+ LST_RemoveElem(pSMA->pInUseListHead, -+ (struct LST_ELEM *)pCurNode); -+ MEM_Free((void *) pCurNode); -+ /* next node. */ -+ pCurNode = pNextNode; -+ } -+ LST_Delete(pSMA->pInUseListHead); /* delete InUse list */ -+ } -+ if ((void *) pSMA->dwVmBase != NULL) -+ MEM_UnmapLinearAddress((void *) pSMA->dwVmBase); -+ -+ /* Free allocator itself */ -+ MEM_FreeObject(pSMA); -+} -+ -+/* -+ * ======== GetSlot ======== -+ * Purpose: -+ * An available slot # is returned. Returns negative on failure. -+ */ -+static s32 GetSlot(struct CMM_OBJECT *pCmmMgr) -+{ -+ s32 nSlot = -1; /* neg on failure */ -+ DBC_Require(pCmmMgr != NULL); -+ /* get first available slot in cmm mgr SMSegTab[] */ -+ for (nSlot = 0; nSlot < CMM_MAXGPPSEGS; nSlot++) { -+ if (pCmmMgr->paGPPSMSegTab[nSlot] == NULL) -+ break; -+ -+ } -+ if (nSlot == CMM_MAXGPPSEGS) { -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_RegisterGPPSMSeg: Allocator " -+ "entry failure, max exceeded\n"); -+ nSlot = -1; /* failed */ -+ } -+ return nSlot; -+} -+ -+/* -+ * ======== GetNode ======== -+ * Purpose: -+ * Get a memory node from freelist or create a new one. -+ */ -+static struct CMM_MNODE *GetNode(struct CMM_OBJECT *pCmmMgr, u32 dwPA, -+ u32 dwVA, u32 ulSize) -+{ -+ struct CMM_MNODE *pNode = NULL; -+ -+ DBC_Require(pCmmMgr != NULL); -+ DBC_Require(dwPA != 0); -+ DBC_Require(dwVA != 0); -+ DBC_Require(ulSize != 0); -+ /* Check cmm mgr's node freelist */ -+ if (LST_IsEmpty(pCmmMgr->pNodeFreeListHead)) { -+ pNode = (struct CMM_MNODE *)MEM_Calloc(sizeof(struct CMM_MNODE), -+ MEM_PAGED); -+ } else { -+ /* surely a valid element */ -+ /* (struct LST_ELEM*) pNode = LST_GetHead(pCmmMgr-> -+ * pNodeFreeListHead);*/ -+ pNode = (struct CMM_MNODE *)LST_GetHead(pCmmMgr-> -+ pNodeFreeListHead); -+ } -+ if (pNode == NULL) { -+ GT_0trace(CMM_debugMask, GT_7CLASS, "GetNode: Out Of Memory\n"); -+ } else { -+ LST_InitElem((struct LST_ELEM *) pNode); /* set self */ -+ pNode->dwPA = dwPA; /* Physical addr of start of block */ -+ pNode->dwVA = dwVA; /* Virtual " " */ -+ pNode->ulSize = ulSize; /* Size of block */ -+ } -+ return pNode; -+} -+ -+/* -+ * ======== DeleteNode ======== -+ * Purpose: -+ * Put a memory node on the cmm nodelist for later use. -+ * Doesn't actually delete the node. Heap thrashing friendly. -+ */ -+static void DeleteNode(struct CMM_OBJECT *pCmmMgr, struct CMM_MNODE *pNode) -+{ -+ DBC_Require(pNode != NULL); -+ LST_InitElem((struct LST_ELEM *) pNode); /* init .self ptr */ -+ LST_PutTail(pCmmMgr->pNodeFreeListHead, (struct LST_ELEM *) pNode); -+} -+ -+/* -+ * ====== GetFreeBlock ======== -+ * Purpose: -+ * Scan the free block list and return the first block that satisfies -+ * the size. -+ */ -+static struct CMM_MNODE *GetFreeBlock(struct CMM_ALLOCATOR *pAllocator, -+ u32 uSize) -+{ -+ if (pAllocator) { -+ struct CMM_MNODE *pCurNode = (struct CMM_MNODE *) -+ LST_First(pAllocator->pFreeListHead); -+ while (pCurNode) { -+ if (uSize <= (u32) pCurNode->ulSize) { -+ LST_RemoveElem(pAllocator->pFreeListHead, -+ (struct LST_ELEM *)pCurNode); -+ return pCurNode; -+ } -+ /* next node. */ -+ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> -+ pFreeListHead, (struct LST_ELEM *)pCurNode); -+ } -+ } -+ return NULL; -+} -+ -+/* -+ * ======== AddToFreeList ======== -+ * Purpose: -+ * Coelesce node into the freelist in ascending size order. -+ */ -+static void AddToFreeList(struct CMM_ALLOCATOR *pAllocator, -+ struct CMM_MNODE *pNode) -+{ -+ struct CMM_MNODE *pNodePrev = NULL; -+ struct CMM_MNODE *pNodeNext = NULL; -+ struct CMM_MNODE *pCurNode; -+ u32 dwThisPA; -+ u32 dwNextPA; -+ -+ DBC_Require(pNode != NULL); -+ DBC_Require(pAllocator != NULL); -+ dwThisPA = pNode->dwPA; -+ dwNextPA = NEXT_PA(pNode); -+ pCurNode = (struct CMM_MNODE *)LST_First(pAllocator->pFreeListHead); -+ while (pCurNode) { -+ if (dwThisPA == NEXT_PA(pCurNode)) { -+ /* found the block ahead of this one */ -+ pNodePrev = pCurNode; -+ } else if (dwNextPA == pCurNode->dwPA) { -+ pNodeNext = pCurNode; -+ } -+ if ((pNodePrev == NULL) || (pNodeNext == NULL)) { -+ /* next node. */ -+ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> -+ pFreeListHead, (struct LST_ELEM *)pCurNode); -+ } else { -+ /* got 'em */ -+ break; -+ } -+ } /* while */ -+ if (pNodePrev != NULL) { -+ /* combine with previous block */ -+ LST_RemoveElem(pAllocator->pFreeListHead, -+ (struct LST_ELEM *)pNodePrev); -+ /* grow node to hold both */ -+ pNode->ulSize += pNodePrev->ulSize; -+ pNode->dwPA = pNodePrev->dwPA; -+ pNode->dwVA = pNodePrev->dwVA; -+ /* place node on mgr nodeFreeList */ -+ DeleteNode((struct CMM_OBJECT *)pAllocator->hCmmMgr, pNodePrev); -+ } -+ if (pNodeNext != NULL) { -+ /* combine with next block */ -+ LST_RemoveElem(pAllocator->pFreeListHead, -+ (struct LST_ELEM *)pNodeNext); -+ /* grow da node */ -+ pNode->ulSize += pNodeNext->ulSize; -+ /* place node on mgr nodeFreeList */ -+ DeleteNode((struct CMM_OBJECT *)pAllocator->hCmmMgr, pNodeNext); -+ } -+ /* Now, let's add to freelist in increasing size order */ -+ pCurNode = (struct CMM_MNODE *)LST_First(pAllocator->pFreeListHead); -+ while (pCurNode) { -+ if (pNode->ulSize <= pCurNode->ulSize) -+ break; -+ -+ /* next node. */ -+ pCurNode = (struct CMM_MNODE *)LST_Next(pAllocator-> -+ pFreeListHead, (struct LST_ELEM *)pCurNode); -+ } -+ /* if pCurNode is NULL then add our pNode to the end of the freelist */ -+ if (pCurNode == NULL) { -+ LST_PutTail(pAllocator->pFreeListHead, -+ (struct LST_ELEM *)pNode); -+ } else { -+ /* insert our node before the current traversed node */ -+ LST_InsertBefore(pAllocator->pFreeListHead, -+ (struct LST_ELEM *)pNode, -+ (struct LST_ELEM *)pCurNode); -+ } -+} -+ -+/* -+ * ======== GetAllocator ======== -+ * Purpose: -+ * Return the allocator for the given SM Segid. -+ * SegIds: 1,2,3..max. -+ */ -+static struct CMM_ALLOCATOR *GetAllocator(struct CMM_OBJECT *pCmmMgr, -+ u32 ulSegId) -+{ -+ struct CMM_ALLOCATOR *pAllocator = NULL; -+ -+ DBC_Require(pCmmMgr != NULL); -+ DBC_Require((ulSegId > 0) && (ulSegId <= CMM_MAXGPPSEGS)); -+ pAllocator = pCmmMgr->paGPPSMSegTab[ulSegId - 1]; -+ if (pAllocator != NULL) { -+ /* make sure it's for real */ -+ if (!MEM_IsValidHandle(pAllocator, SMEMSIGNATURE)) { -+ pAllocator = NULL; -+ DBC_Assert(false); -+ } -+ } -+ return pAllocator; -+} -+ -+/* -+ * ======== CMM_XlatorCreate ======== -+ * Purpose: -+ * Create an address translator object. -+ */ -+DSP_STATUS CMM_XlatorCreate(OUT struct CMM_XLATOROBJECT **phXlator, -+ struct CMM_OBJECT *hCmmMgr, -+ struct CMM_XLATORATTRS *pXlatorAttrs) -+{ -+ struct CMM_XLATOR *pXlatorObject = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phXlator != NULL); -+ DBC_Require(hCmmMgr != NULL); -+ GT_3trace(CMM_debugMask, GT_ENTER, -+ "CMM_XlatorCreate: phXlator: 0x%x\t" -+ "phCmmMgr: 0x%x\tpXlAttrs: 0x%x\n", phXlator, -+ hCmmMgr, pXlatorAttrs); -+ *phXlator = NULL; -+ if (pXlatorAttrs == NULL) -+ pXlatorAttrs = &CMM_DFLTXLATORATTRS; /* set defaults */ -+ -+ MEM_AllocObject(pXlatorObject, struct CMM_XLATOR, CMMXLATESIGNATURE); -+ if (pXlatorObject != NULL) { -+ pXlatorObject->hCmmMgr = hCmmMgr; /* ref back to CMM */ -+ pXlatorObject->ulSegId = pXlatorAttrs->ulSegId; /* SM segId */ -+ } else { -+ GT_0trace(CMM_debugMask, GT_6CLASS, -+ "CMM_XlatorCreate: Object Allocation" -+ "Failure(CMM Xlator)\n"); -+ status = DSP_EMEMORY; -+ } -+ if (DSP_SUCCEEDED(status)) -+ *phXlator = (struct CMM_XLATOROBJECT *) pXlatorObject; -+ -+ return status; -+} -+ -+/* -+ * ======== CMM_XlatorDelete ======== -+ * Purpose: -+ * Free the Xlator resources. -+ * VM gets freed later. -+ */ -+DSP_STATUS CMM_XlatorDelete(struct CMM_XLATOROBJECT *hXlator, bool bForce) -+{ -+ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ -+ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { -+ MEM_FreeObject(pXlator); -+ } else { -+ status = DSP_EHANDLE; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== CMM_XlatorAllocBuf ======== -+ */ -+void *CMM_XlatorAllocBuf(struct CMM_XLATOROBJECT *hXlator, void *pVaBuf, -+ u32 uPaSize) -+{ -+ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; -+ void *pBuf = NULL; -+ struct CMM_ATTRS attrs; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(hXlator != NULL); -+ DBC_Require(pXlator->hCmmMgr != NULL); -+ DBC_Require(pVaBuf != NULL); -+ DBC_Require(uPaSize > 0); -+ DBC_Require(pXlator->ulSegId > 0); -+ -+ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { -+ attrs.ulSegId = pXlator->ulSegId; -+ *(volatile u32 *)pVaBuf = 0; -+ /* Alloc SM */ -+ pBuf = CMM_CallocBuf(pXlator->hCmmMgr, uPaSize, &attrs, NULL); -+ if (pBuf) { -+ /* convert to translator(node/strm) process Virtual -+ * address */ -+ *(volatile u32 **)pVaBuf = -+ (u32 *)CMM_XlatorTranslate(hXlator, -+ pBuf, CMM_PA2VA); -+ } -+ } -+ return pBuf; -+} -+ -+/* -+ * ======== CMM_XlatorFreeBuf ======== -+ * Purpose: -+ * Free the given SM buffer and descriptor. -+ * Does not free virtual memory. -+ */ -+DSP_STATUS CMM_XlatorFreeBuf(struct CMM_XLATOROBJECT *hXlator, void *pBufVa) -+{ -+ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; -+ DSP_STATUS status = DSP_EFAIL; -+ void *pBufPa = NULL; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pBufVa != NULL); -+ DBC_Require(pXlator->ulSegId > 0); -+ -+ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { -+ /* convert Va to Pa so we can free it. */ -+ pBufPa = CMM_XlatorTranslate(hXlator, pBufVa, CMM_VA2PA); -+ if (pBufPa) { -+ status = CMM_FreeBuf(pXlator->hCmmMgr, pBufPa, -+ pXlator->ulSegId); -+ if (DSP_FAILED(status)) { -+ /* Uh oh, this shouldn't happen. Descriptor -+ * gone! */ -+ GT_2trace(CMM_debugMask, GT_7CLASS, -+ "Cannot free DMA/ZCPY buffer" -+ "not allocated by MPU. PA %x, VA %x\n", -+ pBufPa, pBufVa); -+ DBC_Assert(false); /* CMM is leaking mem! */ -+ } -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== CMM_XlatorInfo ======== -+ * Purpose: -+ * Set/Get translator info. -+ */ -+DSP_STATUS CMM_XlatorInfo(struct CMM_XLATOROBJECT *hXlator, IN OUT u8 **pAddr, -+ u32 ulSize, u32 uSegId, bool bSetInfo) -+{ -+ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pAddr != NULL); -+ DBC_Require((uSegId > 0) && (uSegId <= CMM_MAXGPPSEGS)); -+ -+ if (MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) { -+ if (bSetInfo) { -+ /* set translators virtual address range */ -+ pXlator->dwVirtBase = (u32)*pAddr; -+ pXlator->ulVirtSize = ulSize; -+ GT_2trace(CMM_debugMask, GT_3CLASS, -+ "pXlator->dwVirtBase %x, " -+ "ulVirtSize %x\n", pXlator->dwVirtBase, -+ pXlator->ulVirtSize); -+ } else { /* return virt base address */ -+ *pAddr = (u8 *)pXlator->dwVirtBase; -+ } -+ } else { -+ status = DSP_EHANDLE; -+ } -+ return status; -+} -+ -+/* -+ * ======== CMM_XlatorTranslate ======== -+ */ -+void *CMM_XlatorTranslate(struct CMM_XLATOROBJECT *hXlator, void *pAddr, -+ enum CMM_XLATETYPE xType) -+{ -+ u32 dwAddrXlate = 0; -+ struct CMM_XLATOR *pXlator = (struct CMM_XLATOR *)hXlator; -+ struct CMM_OBJECT *pCmmMgr = NULL; -+ struct CMM_ALLOCATOR *pAlctr = NULL; -+ u32 dwOffset = 0; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pAddr != NULL); -+ DBC_Require((xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA)); -+ -+ if (!MEM_IsValidHandle(pXlator, CMMXLATESIGNATURE)) -+ goto loop_cont; -+ -+ pCmmMgr = (struct CMM_OBJECT *)pXlator->hCmmMgr; -+ /* get this translator's default SM allocator */ -+ DBC_Assert(pXlator->ulSegId > 0); -+ pAlctr = pCmmMgr->paGPPSMSegTab[pXlator->ulSegId - 1]; -+ if (!MEM_IsValidHandle(pAlctr, SMEMSIGNATURE)) -+ goto loop_cont; -+ -+ if ((xType == CMM_VA2DSPPA) || (xType == CMM_VA2PA) || -+ (xType == CMM_PA2VA)) { -+ if (xType == CMM_PA2VA) { -+ /* Gpp Va = Va Base + offset */ -+ dwOffset = (u8 *)pAddr - (u8 *)(pAlctr->dwSmBase - -+ pAlctr->ulDSPSize); -+ dwAddrXlate = pXlator->dwVirtBase + dwOffset; -+ /* Check if translated Va base is in range */ -+ if ((dwAddrXlate < pXlator->dwVirtBase) || -+ (dwAddrXlate >= -+ (pXlator->dwVirtBase + pXlator->ulVirtSize))) { -+ dwAddrXlate = 0; /* bad address */ -+ GT_0trace(CMM_debugMask, GT_7CLASS, -+ "CMM_XlatorTranslate: " -+ "Virt addr out of range\n"); -+ } -+ } else { -+ /* Gpp PA = Gpp Base + offset */ -+ dwOffset = (u8 *)pAddr - (u8 *)pXlator->dwVirtBase; -+ dwAddrXlate = pAlctr->dwSmBase - pAlctr->ulDSPSize + -+ dwOffset; -+ } -+ } else { -+ dwAddrXlate = (u32)pAddr; -+ } -+ /*Now convert address to proper target physical address if needed*/ -+ if ((xType == CMM_VA2DSPPA) || (xType == CMM_PA2DSPPA)) { -+ /* Got Gpp Pa now, convert to DSP Pa */ -+ dwAddrXlate = GPPPA2DSPPA((pAlctr->dwSmBase - pAlctr-> -+ ulDSPSize), dwAddrXlate, -+ pAlctr->dwDSPPhysAddrOffset * -+ pAlctr->cFactor); -+ } else if (xType == CMM_DSPPA2PA) { -+ /* Got DSP Pa, convert to GPP Pa */ -+ dwAddrXlate = DSPPA2GPPPA(pAlctr->dwSmBase - pAlctr->ulDSPSize, -+ dwAddrXlate, -+ pAlctr->dwDSPPhysAddrOffset * -+ pAlctr->cFactor); -+ } -+loop_cont: -+ if (!dwAddrXlate) { -+ GT_2trace(CMM_debugMask, GT_7CLASS, -+ "CMM_XlatorTranslate: Can't translate" -+ " address: 0x%x xType %x\n", pAddr, xType); -+ } else { -+ GT_3trace(CMM_debugMask, GT_3CLASS, -+ "CMM_XlatorTranslate: pAddr %x, xType" -+ " %x, dwAddrXlate %x\n", pAddr, xType, dwAddrXlate); -+ } -+ return (void *)dwAddrXlate; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cod.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/cod.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/cod.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/cod.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,684 @@ -+/* -+ * cod.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== cod.c ======== -+ * This module implements DSP code management for the DSP/BIOS Bridge -+ * environment. It is mostly a thin wrapper. -+ * -+ * This module provides an interface for loading both static and -+ * dynamic code objects onto DSP systems. -+ * -+ *! Revision History -+ *! ================ -+ *! 08-Apr-2003 map: Consolidated DBL to DBLL loader name -+ *! 24-Feb-2003 swa: PMGR Code review comments incorporated. -+ *! 18-Apr-2002 jeh: Added DBL function tables. -+ *! 20-Nov-2001 jeh: Removed call to ZL_loadArgs function. -+ *! 19-Oct-2001 jeh: Access DBL as a static library. Added COD_GetBaseLib, -+ *! COD_GetLoader, removed COD_LoadSection, COD_UnloadSection. -+ *! 07-Sep-2001 jeh: Added COD_LoadSection(), COD_UnloadSection(). -+ *! 07-Aug-2001 rr: hMgr->baseLib is updated after zlopen in COD_LoadBase. -+ *! 18-Apr-2001 jeh: Check for fLoaded flag before ZL_unload, to allow -+ *! COD_OpenBase to be used. -+ *! 11-Jan-2001 jeh: Added COD_OpenBase (not used yet, since there is an -+ *! occasional crash). -+ *! 02-Aug-2000 kc: Added COD_ReadSection to COD module. Incorporates use -+ *! of ZL_readSect (new function in ZL module). -+ *! 28-Feb-2000 rr: New GT Usage Implementation -+ *! 08-Dec-1999 ag: Removed x86 specific __asm int 3. -+ *! 02-Oct-1999 ag: Added #ifdef DEBUGINT3COD for debug. -+ *! 20-Sep-1999 ag: Removed call to GT_set(). -+ *! 04-Jun-1997 cr: Added validation of argc/argv pair in COD_LoadBase, as it -+ *! is a requirement to ZL_loadArgs. -+ *! 31-May-1997 cr: Changed COD_LoadBase argc value from u32 to int, added -+ *! DSP_ENOTIMPL return value to COD_Create when attrs != NULL. -+ *! 29-May-1997 cr: Added debugging support. -+ *! 24-Oct-1996 gp: Added COD_GetSection(). -+ *! 18-Jun-1996 gp: Updated GetSymValue() to check for lib; updated E_ codes. -+ *! 12-Jun-1996 gp: Imported CSL_ services for strcpyn(); Added ref counting. -+ *! 20-May-1996 mg: Adapted for new MEM and LDR modules. -+ *! 08-May-1996 mg: Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+/* Include appropriate loader header file */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* magic number for handle validation */ -+#define MAGIC 0xc001beef -+ -+/* macro to validate COD manager handles */ -+#define IsValid(h) ((h) != NULL && (h)->ulMagic == MAGIC) -+ -+/* -+ * ======== COD_MANAGER ======== -+ */ -+struct COD_MANAGER { -+ struct DBLL_TarObj *target; -+ struct DBLL_LibraryObj *baseLib; -+ bool fLoaded; /* Base library loaded? */ -+ u32 ulEntry; -+ struct LDR_MODULE *hDll; -+ struct DBLL_Fxns fxns; -+ struct DBLL_Attrs attrs; -+ char szZLFile[COD_MAXPATHLENGTH]; -+ u32 ulMagic; -+} ; -+ -+/* -+ * ======== COD_LIBRARYOBJ ======== -+ */ -+struct COD_LIBRARYOBJ { -+ struct DBLL_LibraryObj *dbllLib; -+ struct COD_MANAGER *hCodMgr; -+} ; -+ -+static u32 cRefs = 0L; -+ -+#if GT_TRACE -+static struct GT_Mask COD_debugMask = { NULL, NULL }; -+#endif -+ -+static struct DBLL_Fxns dbllFxns = { -+ (DBLL_CloseFxn) DBLL_close, -+ (DBLL_CreateFxn) DBLL_create, -+ (DBLL_DeleteFxn) DBLL_delete, -+ (DBLL_ExitFxn) DBLL_exit, -+ (DBLL_GetAttrsFxn) DBLL_getAttrs, -+ (DBLL_GetAddrFxn) DBLL_getAddr, -+ (DBLL_GetCAddrFxn) DBLL_getCAddr, -+ (DBLL_GetSectFxn) DBLL_getSect, -+ (DBLL_InitFxn) DBLL_init, -+ (DBLL_LoadFxn) DBLL_load, -+ (DBLL_LoadSectFxn) DBLL_loadSect, -+ (DBLL_OpenFxn) DBLL_open, -+ (DBLL_ReadSectFxn) DBLL_readSect, -+ (DBLL_SetAttrsFxn) DBLL_setAttrs, -+ (DBLL_UnloadFxn) DBLL_unload, -+ (DBLL_UnloadSectFxn) DBLL_unloadSect, -+}; -+ -+static bool NoOp(void); -+ -+/* -+ * ======== COD_Close ======== -+ */ -+void COD_Close(struct COD_LIBRARYOBJ *lib) -+{ -+ struct COD_MANAGER *hMgr; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(lib != NULL); -+ DBC_Require(IsValid(((struct COD_LIBRARYOBJ *)lib)->hCodMgr)); -+ -+ hMgr = lib->hCodMgr; -+ hMgr->fxns.closeFxn(lib->dbllLib); -+ -+ MEM_Free(lib); -+} -+ -+/* -+ * ======== COD_Create ======== -+ * Purpose: -+ * Create an object to manage code on a DSP system. -+ * This object can be used to load an initial program image with -+ * arguments that can later be expanded with -+ * dynamically loaded object files. -+ * -+ */ -+DSP_STATUS COD_Create(OUT struct COD_MANAGER **phMgr, char *pstrDummyFile, -+ IN OPTIONAL CONST struct COD_ATTRS *attrs) -+{ -+ struct COD_MANAGER *hMgrNew; -+ struct DBLL_Attrs zlAttrs; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phMgr != NULL); -+ -+ GT_3trace(COD_debugMask, GT_ENTER, -+ "Entered COD_Create, Args: \t\nphMgr: " -+ "0x%x\t\npstrDummyFile: 0x%x\t\nattr: 0x%x\n", -+ phMgr, pstrDummyFile, attrs); -+ /* assume failure */ -+ *phMgr = NULL; -+ -+ /* we don't support non-default attrs yet */ -+ if (attrs != NULL) -+ return DSP_ENOTIMPL; -+ -+ hMgrNew = MEM_Calloc(sizeof(struct COD_MANAGER), MEM_NONPAGED); -+ if (hMgrNew == NULL) { -+ GT_0trace(COD_debugMask, GT_7CLASS, -+ "COD_Create: Out Of Memory\n"); -+ return DSP_EMEMORY; -+ } -+ -+ hMgrNew->ulMagic = MAGIC; -+ -+ /* Set up loader functions */ -+ hMgrNew->fxns = dbllFxns; -+ -+ /* initialize the ZL module */ -+ hMgrNew->fxns.initFxn(); -+ -+ zlAttrs.alloc = (DBLL_AllocFxn)NoOp; -+ zlAttrs.free = (DBLL_FreeFxn)NoOp; -+ zlAttrs.fread = (DBLL_ReadFxn)KFILE_Read; -+ zlAttrs.fseek = (DBLL_SeekFxn)KFILE_Seek; -+ zlAttrs.ftell = (DBLL_TellFxn)KFILE_Tell; -+ zlAttrs.fclose = (DBLL_FCloseFxn)KFILE_Close; -+ zlAttrs.fopen = (DBLL_FOpenFxn)KFILE_Open; -+ zlAttrs.symLookup = NULL; -+ zlAttrs.baseImage = true; -+ zlAttrs.logWrite = NULL; -+ zlAttrs.logWriteHandle = NULL; -+ zlAttrs.write = NULL; -+ zlAttrs.rmmHandle = NULL; -+ zlAttrs.wHandle = NULL; -+ zlAttrs.symHandle = NULL; -+ zlAttrs.symArg = NULL; -+ -+ hMgrNew->attrs = zlAttrs; -+ -+ status = hMgrNew->fxns.createFxn(&hMgrNew->target, &zlAttrs); -+ -+ if (DSP_FAILED(status)) { -+ COD_Delete(hMgrNew); -+ GT_1trace(COD_debugMask, GT_7CLASS, -+ "COD_Create:ZL Create Failed: 0x%x\n", status); -+ return COD_E_ZLCREATEFAILED; -+ } -+ -+ /* return the new manager */ -+ *phMgr = hMgrNew; -+ GT_1trace(COD_debugMask, GT_1CLASS, -+ "COD_Create: Success CodMgr: 0x%x\n", *phMgr); -+ return DSP_SOK; -+} -+ -+/* -+ * ======== COD_Delete ======== -+ * Purpose: -+ * Delete a code manager object. -+ */ -+void COD_Delete(struct COD_MANAGER *hMgr) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hMgr)); -+ -+ GT_1trace(COD_debugMask, GT_ENTER, "COD_Delete:hMgr 0x%x\n", hMgr); -+ if (hMgr->baseLib) { -+ if (hMgr->fLoaded) -+ hMgr->fxns.unloadFxn(hMgr->baseLib, &hMgr->attrs); -+ -+ hMgr->fxns.closeFxn(hMgr->baseLib); -+ } -+ if (hMgr->target) { -+ hMgr->fxns.deleteFxn(hMgr->target); -+ hMgr->fxns.exitFxn(); -+ } -+ hMgr->ulMagic = ~MAGIC; -+ MEM_Free(hMgr); -+} -+ -+/* -+ * ======== COD_Exit ======== -+ * Purpose: -+ * Discontinue usage of the COD module. -+ * -+ */ -+void COD_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(COD_debugMask, GT_ENTER, -+ "Entered COD_Exit, ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== COD_GetBaseLib ======== -+ * Purpose: -+ * Get handle to the base image DBL library. -+ */ -+DSP_STATUS COD_GetBaseLib(struct COD_MANAGER *hManager, -+ struct DBLL_LibraryObj **plib) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hManager)); -+ DBC_Require(plib != NULL); -+ -+ *plib = (struct DBLL_LibraryObj *) hManager->baseLib; -+ -+ return status; -+} -+ -+/* -+ * ======== COD_GetBaseName ======== -+ */ -+DSP_STATUS COD_GetBaseName(struct COD_MANAGER *hManager, char *pszName, -+ u32 uSize) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hManager)); -+ DBC_Require(pszName != NULL); -+ -+ if (uSize <= COD_MAXPATHLENGTH) -+ strncpy(pszName, hManager->szZLFile, uSize); -+ else -+ status = DSP_EFAIL; -+ -+ return status; -+} -+ -+/* -+ * ======== COD_GetEntry ======== -+ * Purpose: -+ * Retrieve the entry point of a loaded DSP program image -+ * -+ */ -+DSP_STATUS COD_GetEntry(struct COD_MANAGER *hManager, u32 *pulEntry) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hManager)); -+ DBC_Require(pulEntry != NULL); -+ -+ *pulEntry = hManager->ulEntry; -+ -+ GT_1trace(COD_debugMask, GT_ENTER, "COD_GetEntry:ulEntr 0x%x\n", -+ *pulEntry); -+ -+ return DSP_SOK; -+} -+ -+/* -+ * ======== COD_GetLoader ======== -+ * Purpose: -+ * Get handle to the DBLL loader. -+ */ -+DSP_STATUS COD_GetLoader(struct COD_MANAGER *hManager, -+ struct DBLL_TarObj **phLoader) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hManager)); -+ DBC_Require(phLoader != NULL); -+ -+ *phLoader = (struct DBLL_TarObj *)hManager->target; -+ -+ return status; -+} -+ -+/* -+ * ======== COD_GetSection ======== -+ * Purpose: -+ * Retrieve the starting address and length of a section in the COFF file -+ * given the section name. -+ */ -+DSP_STATUS COD_GetSection(struct COD_LIBRARYOBJ *lib, IN char *pstrSect, -+ OUT u32 *puAddr, OUT u32 *puLen) -+{ -+ struct COD_MANAGER *hManager; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(lib != NULL); -+ DBC_Require(IsValid(lib->hCodMgr)); -+ DBC_Require(pstrSect != NULL); -+ DBC_Require(puAddr != NULL); -+ DBC_Require(puLen != NULL); -+ -+ GT_4trace(COD_debugMask, GT_ENTER, -+ "Entered COD_GetSection Args \t\n lib: " -+ "0x%x\t\npstrsect: 0x%x\t\npuAddr: 0x%x\t\npuLen: 0x%x\n", -+ lib, pstrSect, puAddr, puLen); -+ *puAddr = 0; -+ *puLen = 0; -+ if (lib != NULL) { -+ hManager = lib->hCodMgr; -+ status = hManager->fxns.getSectFxn(lib->dbllLib, pstrSect, -+ puAddr, puLen); -+ if (DSP_FAILED(status)) { -+ GT_1trace(COD_debugMask, GT_7CLASS, -+ "COD_GetSection: Section %s not" -+ "found\n", pstrSect); -+ } -+ } else { -+ status = COD_E_NOSYMBOLSLOADED; -+ GT_0trace(COD_debugMask, GT_7CLASS, -+ "COD_GetSection:No Symbols loaded\n"); -+ } -+ -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((*puAddr == 0) && (*puLen == 0))); -+ -+ return status; -+} -+ -+/* -+ * ======== COD_GetSymValue ======== -+ * Purpose: -+ * Retrieve the value for the specified symbol. The symbol is first -+ * searched for literally and then, if not found, searched for as a -+ * C symbol. -+ * -+ */ -+DSP_STATUS COD_GetSymValue(struct COD_MANAGER *hMgr, char *pstrSym, -+ u32 *pulValue) -+{ -+ struct DBLL_Symbol *pSym; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hMgr)); -+ DBC_Require(pstrSym != NULL); -+ DBC_Require(pulValue != NULL); -+ -+ GT_3trace(COD_debugMask, GT_ENTER, "Entered COD_GetSymValue Args \t\n" -+ "hMgr: 0x%x\t\npstrSym: 0x%x\t\npulValue: 0x%x\n", -+ hMgr, pstrSym, pulValue); -+ if (hMgr->baseLib) { -+ if (!hMgr->fxns.getAddrFxn(hMgr->baseLib, pstrSym, &pSym)) { -+ if (!hMgr->fxns.getCAddrFxn(hMgr->baseLib, pstrSym, -+ &pSym)) { -+ GT_0trace(COD_debugMask, GT_7CLASS, -+ "COD_GetSymValue: " -+ "Symbols not found\n"); -+ return COD_E_SYMBOLNOTFOUND; -+ } -+ } -+ } else { -+ GT_0trace(COD_debugMask, GT_7CLASS, "COD_GetSymValue: " -+ "No Symbols loaded\n"); -+ return COD_E_NOSYMBOLSLOADED; -+ } -+ -+ *pulValue = pSym->value; -+ -+ return DSP_SOK; -+} -+ -+/* -+ * ======== COD_Init ======== -+ * Purpose: -+ * Initialize the COD module's private state. -+ * -+ */ -+bool COD_Init(void) -+{ -+ bool fRetVal = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!COD_debugMask.flags); -+ GT_create(&COD_debugMask, "CO"); -+ } -+ -+ if (fRetVal) -+ cRefs++; -+ -+ -+ GT_1trace(COD_debugMask, GT_1CLASS, -+ "Entered COD_Init, ref count: 0x%x\n", cRefs); -+ DBC_Ensure((fRetVal && cRefs > 0) || (!fRetVal && cRefs >= 0)); -+ return fRetVal; -+} -+ -+/* -+ * ======== COD_LoadBase ======== -+ * Purpose: -+ * Load the initial program image, optionally with command-line arguments, -+ * on the DSP system managed by the supplied handle. The program to be -+ * loaded must be the first element of the args array and must be a fully -+ * qualified pathname. -+ * Details: -+ * if nArgc doesn't match the number of arguments in the aArgs array, the -+ * aArgs array is searched for a NULL terminating entry, and argc is -+ * recalculated to reflect this. In this way, we can support NULL -+ * terminating aArgs arrays, if nArgc is very large. -+ */ -+DSP_STATUS COD_LoadBase(struct COD_MANAGER *hMgr, u32 nArgc, char *aArgs[], -+ COD_WRITEFXN pfnWrite, void *pArb, char *envp[]) -+{ -+ DBLL_Flags flags; -+ struct DBLL_Attrs saveAttrs; -+ struct DBLL_Attrs newAttrs; -+ DSP_STATUS status; -+ u32 i; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hMgr)); -+ DBC_Require(nArgc > 0); -+ DBC_Require(aArgs != NULL); -+ DBC_Require(aArgs[0] != NULL); -+ DBC_Require(pfnWrite != NULL); -+ DBC_Require(hMgr->baseLib != NULL); -+ -+ GT_6trace(COD_debugMask, GT_ENTER, -+ "Entered COD_LoadBase, hMgr: 0x%x\n \t" -+ "nArgc: 0x%x\n\taArgs: 0x%x\n\tpfnWrite: 0x%x\n\tpArb:" -+ " 0x%x\n \tenvp: 0x%x\n", hMgr, nArgc, aArgs, pfnWrite, -+ pArb, envp); -+ /* -+ * Make sure every argv[] stated in argc has a value, or change argc to -+ * reflect true number in NULL terminated argv array. -+ */ -+ for (i = 0; i < nArgc; i++) { -+ if (aArgs[i] == NULL) { -+ nArgc = i; -+ break; -+ } -+ } -+ -+ /* set the write function for this operation */ -+ hMgr->fxns.getAttrsFxn(hMgr->target, &saveAttrs); -+ -+ newAttrs = saveAttrs; -+ newAttrs.write = (DBLL_WriteFxn)pfnWrite; -+ newAttrs.wHandle = pArb; -+ newAttrs.alloc = (DBLL_AllocFxn)NoOp; -+ newAttrs.free = (DBLL_FreeFxn)NoOp; -+ newAttrs.logWrite = NULL; -+ newAttrs.logWriteHandle = NULL; -+ -+ /* Load the image */ -+ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; -+ status = hMgr->fxns.loadFxn(hMgr->baseLib, flags, &newAttrs, -+ &hMgr->ulEntry); -+ if (DSP_FAILED(status)) { -+ hMgr->fxns.closeFxn(hMgr->baseLib); -+ GT_1trace(COD_debugMask, GT_7CLASS, -+ "COD_LoadBase: COD Load failed: " -+ "0x%x\n", status); -+ } -+ if (DSP_SUCCEEDED(status)) -+ hMgr->fLoaded = true; -+ else -+ hMgr->baseLib = NULL; -+ -+ return status; -+} -+ -+/* -+ * ======== COD_Open ======== -+ * Open library for reading sections. -+ */ -+DSP_STATUS COD_Open(struct COD_MANAGER *hMgr, IN char *pszCoffPath, -+ COD_FLAGS flags, struct COD_LIBRARYOBJ **pLib) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct COD_LIBRARYOBJ *lib = NULL; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hMgr)); -+ DBC_Require(pszCoffPath != NULL); -+ DBC_Require(flags == COD_NOLOAD || flags == COD_SYMB); -+ DBC_Require(pLib != NULL); -+ -+ GT_4trace(COD_debugMask, GT_ENTER, "Entered COD_Open, hMgr: 0x%x\n\t " -+ "pszCoffPath: 0x%x\tflags: 0x%x\tlib: 0x%x\n", hMgr, -+ pszCoffPath, flags, pLib); -+ -+ *pLib = NULL; -+ -+ lib = MEM_Calloc(sizeof(struct COD_LIBRARYOBJ), MEM_NONPAGED); -+ if (lib == NULL) { -+ GT_0trace(COD_debugMask, GT_7CLASS, -+ "COD_Open: Out Of Memory\n"); -+ status = DSP_EMEMORY; -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ lib->hCodMgr = hMgr; -+ status = hMgr->fxns.openFxn(hMgr->target, pszCoffPath, flags, -+ &lib->dbllLib); -+ if (DSP_FAILED(status)) { -+ GT_1trace(COD_debugMask, GT_7CLASS, -+ "COD_Open failed: 0x%x\n", status); -+ } else { -+ *pLib = lib; -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== COD_OpenBase ======== -+ * Purpose: -+ * Open base image for reading sections. -+ */ -+DSP_STATUS COD_OpenBase(struct COD_MANAGER *hMgr, IN char *pszCoffPath, -+ DBLL_Flags flags) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DBLL_LibraryObj *lib; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValid(hMgr)); -+ DBC_Require(pszCoffPath != NULL); -+ -+ GT_2trace(COD_debugMask, GT_ENTER, -+ "Entered COD_OpenBase, hMgr: 0x%x\n\t" -+ "pszCoffPath: 0x%x\n", hMgr, pszCoffPath); -+ -+ /* if we previously opened a base image, close it now */ -+ if (hMgr->baseLib) { -+ if (hMgr->fLoaded) { -+ GT_0trace(COD_debugMask, GT_7CLASS, -+ "Base Image is already loaded. " -+ "Unloading it...\n"); -+ hMgr->fxns.unloadFxn(hMgr->baseLib, &hMgr->attrs); -+ hMgr->fLoaded = false; -+ } -+ hMgr->fxns.closeFxn(hMgr->baseLib); -+ hMgr->baseLib = NULL; -+ } else { -+ GT_0trace(COD_debugMask, GT_1CLASS, -+ "COD_OpenBase: Opening the base image ...\n"); -+ } -+ status = hMgr->fxns.openFxn(hMgr->target, pszCoffPath, flags, &lib); -+ if (DSP_FAILED(status)) { -+ GT_0trace(COD_debugMask, GT_7CLASS, -+ "COD_OpenBase: COD Open failed\n"); -+ } else { -+ /* hang onto the library for subsequent sym table usage */ -+ hMgr->baseLib = lib; -+ strncpy(hMgr->szZLFile, pszCoffPath, COD_MAXPATHLENGTH - 1); -+ hMgr->szZLFile[COD_MAXPATHLENGTH - 1] = '\0'; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== COD_ReadSection ======== -+ * Purpose: -+ * Retrieve the content of a code section given the section name. -+ */ -+DSP_STATUS COD_ReadSection(struct COD_LIBRARYOBJ *lib, IN char *pstrSect, -+ OUT char *pstrContent, IN u32 cContentSize) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(lib != NULL); -+ DBC_Require(IsValid(lib->hCodMgr)); -+ DBC_Require(pstrSect != NULL); -+ DBC_Require(pstrContent != NULL); -+ -+ GT_4trace(COD_debugMask, GT_ENTER, "Entered COD_ReadSection Args: 0x%x," -+ " 0x%x, 0x%x, 0x%x\n", lib, pstrSect, pstrContent, -+ cContentSize); -+ -+ if (lib != NULL) { -+ status = lib->hCodMgr->fxns.readSectFxn(lib->dbllLib, pstrSect, -+ pstrContent, -+ cContentSize); -+ if (DSP_FAILED(status)) { -+ GT_1trace(COD_debugMask, GT_7CLASS, -+ "COD_ReadSection failed: 0x%lx\n", status); -+ } -+ } else { -+ status = COD_E_NOSYMBOLSLOADED; -+ GT_0trace(COD_debugMask, GT_7CLASS, -+ "COD_ReadSection: No Symbols loaded\n"); -+ } -+ return status; -+} -+ -+/* -+ * ======== NoOp ======== -+ * Purpose: -+ * No Operation. -+ * -+ */ -+static bool NoOp(void) -+{ -+ return true; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbl.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dbl.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dbl.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,1385 @@ -+/* -+ * dbl.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== dbl.c ======== -+ * Dynamic BOF Loader library. Contains functions related to -+ * loading and unloading symbols/code/data on DSP. -+ * Also contains other support functions. -+ * -+ *! Revision History -+ *! ================ -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 24-May-2002 jeh Free DCD sects in DBL_close(). -+ *! 19-Mar-2002 jeh Changes made to match dynamic loader (dbll.c): Pass -+ *! DBL_Library to DBL_getAddr() instead of DBL_Target, -+ *! eliminate scope param, use DBL_Symbol. Pass attrs to -+ *! DBL_load(), DBL_unload(). -+ *! 20-Nov-2001 jeh Removed DBL_loadArgs(). -+ *! 07-Sep-2001 jeh Added overlay support. -+ *! 31-Jul-2001 jeh Include windows.h. -+ *! 06-Jun-2001 jeh Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+#define DBL_TARGSIGNATURE 0x544c4244 /* "TLBD" */ -+#define DBL_LIBSIGNATURE 0x4c4c4244 /* "LLBD" */ -+ -+#define C54TARG 0 -+#define C55TARG 1 -+#define NUMTARGS 2 -+ -+#define C54MAGIC 0x98 /* Magic number for TI C54 COF */ -+#define C55MAGIC 0x9c /* Magic number for LEAD3 (C55) COF */ -+ -+/* Three task phases */ -+#define CREATEPHASE 0 -+#define DELETEPHASE 1 -+#define EXECUTEPHASE 2 -+#define NONE 3 /* For overlay section with phase not specified */ -+ -+/* Default load buffer size */ -+#define LOADBUFSIZE 0x800 -+ -+#define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \ -+ (((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF)) -+ -+#define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF)) -+ -+/* -+ * Macros for accessing the following types of overlay data within a -+ * structure of type OvlyData: -+ * - Overlay data not associated with a particular phase -+ * - Create phase overlay data -+ * - Delete phase overlay data -+ * - Execute phase overlay data -+ */ -+#define numOtherSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numOtherSects) -+#define numCreateSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numCreateSects) -+#define numDeleteSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numDeleteSects) -+#define numExecuteSects(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numExecuteSects) -+#define otherOffset(pOvlyData) 0 -+#define createOffset(pOvlyData) ((pOvlyData)->hdr.dbofHdr.numOtherSects) -+#define deleteOffset(pOvlyData) (createOffset(pOvlyData) + \ -+ (pOvlyData->hdr.dbofHdr.numCreateSects)) -+#define executeOffset(pOvlyData) (deleteOffset(pOvlyData) + \ -+ (pOvlyData->hdr.dbofHdr.numDeleteSects)) -+/* -+ * ======== OvlyHdr ======== -+ */ -+struct OvlyHdr { -+ struct DBOF_OvlySectHdr dbofHdr; -+ char *pName; /* Name of overlay section */ -+ u16 createRef; /* Reference count for create phase */ -+ u16 deleteRef; /* Reference count for delete phase */ -+ u16 executeRef; /* Execute phase ref count */ -+ u16 otherRef; /* Unspecified phase ref count */ -+} ; -+ -+/* -+ * ======== OvlyData ======== -+ */ -+struct OvlyData { -+ struct OvlyHdr hdr; -+ struct DBOF_OvlySectData data[1]; -+} ; -+ -+/* -+ * ======== Symbol ======== -+ */ -+struct Symbol { -+ struct DBL_Symbol sym; -+ char *pSymName; -+}; -+ -+/* -+ * ======== DCDSect ======== -+ */ -+struct DCDSect { -+ struct DBOF_DCDSectHdr sectHdr; -+ char *pData; -+} ; -+ -+/* -+ * ======== DBL_TargetObj ======== -+ */ -+struct DBL_TargetObj { -+ u32 dwSignature; /* For object validation */ -+ struct DBL_Attrs dblAttrs; /* file read, write, etc. functions */ -+ char *pBuf; /* Load buffer */ -+}; -+ -+/* -+ * ======== TargetInfo ======== -+ */ -+struct TargetInfo { -+ u16 dspType; /* eg, C54TARG, C55TARG */ -+ u32 magic; /* COFF magic number, identifies target type */ -+ u16 wordSize; /* Size of a DSP word */ -+ u16 mauSize; /* Size of minimum addressable unit */ -+ u16 charSize; /* For C55x, mausize = 1, but charsize = 2 */ -+} ; -+ -+/* -+ * ======== DBL_LibraryObj ======== -+ * Represents a library loaded on a target. -+ */ -+struct DBL_LibraryObj { -+ u32 dwSignature; /* For object validation */ -+ struct DBL_TargetObj *pTarget; /* Target for this library */ -+ struct KFILE_FileObj *file; /* DBOF file handle */ -+ bool byteSwapped; /* Are bytes swapped? */ -+ struct DBOF_FileHdr fileHdr; /* Header of DBOF file */ -+ u16 nSymbols; /* Number of DSP/Bridge symbols */ -+ struct Symbol *symbols; /* Table of DSP/Bridge symbols */ -+ u16 nDCDSects; /* Number of DCD sections */ -+ u16 nOvlySects; /* Number of overlay nodes */ -+ struct DCDSect *dcdSects; /* DCD section data */ -+ struct OvlyData **ppOvlyData; /* Array of overlay section data */ -+ struct TargetInfo *pTargetInfo; /* Entry in targetTab[] below */ -+} ; -+ -+#if GT_TRACE -+static struct GT_Mask DBL_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+static u32 cRefs; /* module reference count */ -+ -+static u32 magicTab[NUMTARGS] = { C54MAGIC, C55MAGIC }; -+ -+static struct TargetInfo targetTab[] = { -+ /* targ magic wordsize mausize charsize */ -+ {C54TARG, C54MAGIC, 2, 2, 2}, /* C54 */ -+ {C55TARG, C55MAGIC, 2, 1, 2}, /* C55 */ -+}; -+ -+static void freeSects(struct DBL_TargetObj *dbl, struct OvlyData *pOvlyData, -+ s32 offset, s32 nSects); -+static DSP_STATUS loadSect(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib); -+static DSP_STATUS readDCDSects(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib); -+static DSP_STATUS readHeader(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib); -+static DSP_STATUS readOvlySects(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib); -+static DSP_STATUS readSymbols(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib); -+ -+/* -+ * ======== DBL_close ======== -+ * Purpose: -+ * Close library opened with DBL_open. -+ */ -+void DBL_close(struct DBL_LibraryObj *lib) -+{ -+ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; -+ u16 i; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); -+ -+ GT_1trace(DBL_debugMask, GT_ENTER, "DBL_close: lib: 0x%x\n", lib); -+ -+ /* Free symbols */ -+ if (pdblLib->symbols) { -+ for (i = 0; i < pdblLib->nSymbols; i++) { -+ if (pdblLib->symbols[i].pSymName) -+ MEM_Free(pdblLib->symbols[i].pSymName); -+ -+ } -+ MEM_Free(pdblLib->symbols); -+ } -+ -+ /* Free DCD sects */ -+ if (pdblLib->dcdSects) { -+ for (i = 0; i < pdblLib->nDCDSects; i++) { -+ if (pdblLib->dcdSects[i].pData) -+ MEM_Free(pdblLib->dcdSects[i].pData); -+ -+ } -+ MEM_Free(pdblLib->dcdSects); -+ } -+ -+ /* Free overlay sects */ -+ if (pdblLib->ppOvlyData) { -+ for (i = 0; i < pdblLib->nOvlySects; i++) { -+ if (pdblLib->ppOvlyData[i]) { -+ if (pdblLib->ppOvlyData[i]->hdr.pName) { -+ MEM_Free(pdblLib->ppOvlyData[i]-> -+ hdr.pName); -+ } -+ MEM_Free(pdblLib->ppOvlyData[i]); -+ } -+ } -+ MEM_Free(pdblLib->ppOvlyData); -+ } -+ -+ /* Close the file */ -+ if (pdblLib->file) -+ (*pdblLib->pTarget->dblAttrs.fclose) (pdblLib->file); -+ -+ -+ MEM_FreeObject(pdblLib); -+} -+ -+/* -+ * ======== DBL_create ======== -+ * Purpose: -+ * Create a target object by specifying the alloc, free, and -+ * write functions for the target. -+ */ -+DSP_STATUS DBL_create(struct DBL_TargetObj **pTarget, struct DBL_Attrs *pAttrs) -+{ -+ struct DBL_TargetObj *pdblTarget = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pAttrs != NULL); -+ DBC_Require(pTarget != NULL); -+ -+ GT_2trace(DBL_debugMask, GT_ENTER, -+ "DBL_create: pTarget: 0x%x pAttrs: 0x%x\n", -+ pTarget, pAttrs); -+ /* Allocate DBL target object */ -+ MEM_AllocObject(pdblTarget, struct DBL_TargetObj, DBL_TARGSIGNATURE); -+ if (pdblTarget == NULL) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "DBL_create: Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } else { -+ pdblTarget->dblAttrs = *pAttrs; -+ /* Allocate buffer for loading target */ -+ pdblTarget->pBuf = MEM_Calloc(LOADBUFSIZE, MEM_PAGED); -+ if (pdblTarget->pBuf == NULL) -+ status = DSP_EMEMORY; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ *pTarget = pdblTarget; -+ } else { -+ *pTarget = NULL; -+ if (pdblTarget) -+ DBL_delete(pdblTarget); -+ -+ } -+ DBC_Ensure(DSP_SUCCEEDED(status) && -+ ((MEM_IsValidHandle((*pTarget), DBL_TARGSIGNATURE)) || -+ (DSP_FAILED(status) && *pTarget == NULL))); -+ return status; -+} -+ -+/* -+ * ======== DBL_delete ======== -+ * Purpose: -+ * Delete target object and free resources for any loaded libraries. -+ */ -+void DBL_delete(struct DBL_TargetObj *target) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); -+ -+ GT_1trace(DBL_debugMask, GT_ENTER, -+ "DBL_delete: target: 0x%x\n", target); -+ -+ if (target->pBuf) -+ MEM_Free(target->pBuf); -+ -+ MEM_FreeObject(target); -+} -+ -+/* -+ * ======== DBL_exit ======== -+ * Purpose -+ * Discontinue usage of DBL module. -+ */ -+void DBL_exit() -+{ -+ DBC_Require(cRefs > 0); -+ cRefs--; -+ GT_1trace(DBL_debugMask, GT_5CLASS, -+ "DBL_exit() ref count: 0x%x\n", cRefs); -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== DBL_getAddr ======== -+ * Purpose: -+ * Get address of name in the specified library. -+ */ -+bool DBL_getAddr(struct DBL_LibraryObj *lib, char *name, -+ struct DBL_Symbol **ppSym) -+{ -+ bool retVal = false; -+ struct Symbol *symbol; -+ u16 i; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); -+ DBC_Require(name != NULL); -+ DBC_Require(ppSym != NULL); -+ -+ GT_3trace(DBL_debugMask, GT_ENTER, -+ "DBL_getAddr: libt: 0x%x name: %s pAddr: " -+ "0x%x\n", lib, name, ppSym); -+ for (i = 0; i < lib->nSymbols; i++) { -+ symbol = &lib->symbols[i]; -+ if (CSL_Strcmp(name, symbol->pSymName) == 0) { -+ /* Found it */ -+ *ppSym = &lib->symbols[i].sym; -+ retVal = true; -+ break; -+ } -+ } -+ return retVal; -+} -+ -+/* -+ * ======== DBL_getAttrs ======== -+ * Purpose: -+ * Retrieve the attributes of the target. -+ */ -+void DBL_getAttrs(struct DBL_TargetObj *target, struct DBL_Attrs *pAttrs) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); -+ DBC_Require(pAttrs != NULL); -+ GT_2trace(DBL_debugMask, GT_ENTER, "DBL_getAttrs: target: 0x%x pAttrs: " -+ "0x%x\n", target, pAttrs); -+ *pAttrs = target->dblAttrs; -+} -+ -+/* -+ * ======== DBL_getCAddr ======== -+ * Purpose: -+ * Get address of "C" name in the specified library. -+ */ -+bool DBL_getCAddr(struct DBL_LibraryObj *lib, char *name, -+ struct DBL_Symbol **ppSym) -+{ -+ bool retVal = false; -+ struct Symbol *symbol; -+ u16 i; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); -+ DBC_Require(name != NULL); -+ DBC_Require(ppSym != NULL); -+ -+ GT_3trace(DBL_debugMask, GT_ENTER, -+ "DBL_getCAddr: target: 0x%x name:%s pAddr:" -+ " 0x%x\n", lib, name, ppSym); -+ for (i = 0; i < lib->nSymbols; i++) { -+ symbol = &lib->symbols[i]; -+ if ((CSL_Strcmp(name, symbol->pSymName) == 0) || -+ (CSL_Strcmp(name, symbol->pSymName + 1) == 0 && -+ symbol->pSymName[0] == '_')) { -+ /* Found it */ -+ *ppSym = &lib->symbols[i].sym; -+ retVal = true; -+ break; -+ } -+ } -+ return retVal; -+} -+ -+/* -+ * ======== DBL_getEntry ======== -+ * Purpose: -+ * Get program entry point. -+ * -+ */ -+bool DBL_getEntry(struct DBL_LibraryObj *lib, u32 *pEntry) -+{ -+ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); -+ DBC_Require(pEntry != NULL); -+ -+ GT_2trace(DBL_debugMask, GT_ENTER, -+ "DBL_getEntry: lib: 0x%x pEntry: 0x%x\n", lib, pEntry); -+ *pEntry = pdblLib->fileHdr.entry; -+ -+ return true; -+} -+ -+/* -+ * ======== DBL_getSect ======== -+ * Purpose: -+ * Get address and size of a named section. -+ */ -+DSP_STATUS DBL_getSect(struct DBL_LibraryObj *lib, char *name, u32 *pAddr, -+ u32 *pSize) -+{ -+ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; -+ u16 i; -+ DSP_STATUS status = DSP_ENOSECT; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(name != NULL); -+ DBC_Require(pAddr != NULL); -+ DBC_Require(pSize != NULL); -+ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); -+ -+ GT_4trace(DBL_debugMask, GT_ENTER, -+ "DBL_getSect: lib: 0x%x name: %s pAddr:" -+ " 0x%x pSize: 0x%x\n", lib, name, pAddr, pSize); -+ -+ /* -+ * Check for DCD and overlay sections. Overlay loader uses DBL_getSect -+ * to determine whether or not a node has overlay sections. -+ * DCD section names begin with '.' -+ */ -+ if (name[0] == '.') { -+ /* Get DCD section size (address is 0, since it's a NOLOAD). */ -+ for (i = 0; i < pdblLib->nDCDSects; i++) { -+ if (CSL_Strcmp(pdblLib->dcdSects[i].sectHdr.name, -+ name) == 0) { -+ *pAddr = 0; -+ *pSize = pdblLib->dcdSects[i].sectHdr.size * -+ pdblLib->pTargetInfo->mauSize; -+ status = DSP_SOK; -+ break; -+ } -+ } -+ } else { -+ /* Check for overlay section */ -+ for (i = 0; i < pdblLib->nOvlySects; i++) { -+ if (CSL_Strcmp(pdblLib->ppOvlyData[i]->hdr.pName, -+ name) == 0) { -+ /* Address and size are meaningless */ -+ *pAddr = 0; -+ *pSize = 0; -+ status = DSP_SOK; -+ break; -+ } -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DBL_init ======== -+ * Purpose: -+ * Initialize DBL module. -+ */ -+bool DBL_init(void) -+{ -+ bool retVal = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!DBL_debugMask.flags); -+ GT_create(&DBL_debugMask, "BL"); /* "BL" for dBL */ -+ -+ } -+ -+ if (retVal) -+ cRefs++; -+ -+ -+ GT_1trace(DBL_debugMask, GT_5CLASS, "DBL_init(), ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((retVal && (cRefs > 0)) || (!retVal && (cRefs >= 0))); -+ -+ return retVal; -+} -+ -+/* -+ * ======== DBL_load ======== -+ * Purpose: -+ * Add symbols/code/data defined in file to that already present -+ * on the target. -+ */ -+DSP_STATUS DBL_load(struct DBL_LibraryObj *lib, DBL_Flags flags, -+ struct DBL_Attrs *attrs, u32 *pEntry) -+{ -+ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; -+ struct DBL_TargetObj *dbl; -+ u16 i; -+ u16 nSects; -+ DSP_STATUS status = DSP_EFAIL; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); -+ DBC_Require(pEntry != NULL); -+ DBC_Require(attrs != NULL); -+ -+ GT_4trace(DBL_debugMask, GT_ENTER, "DBL_load: lib: 0x%x flags: " -+ "0x%x attrs: 0x%x pEntry: 0x%x\n", lib, flags, attrs, pEntry); -+ -+ dbl = pdblLib->pTarget; -+ *pEntry = pdblLib->fileHdr.entry; -+ nSects = pdblLib->fileHdr.numSects; -+ dbl->dblAttrs = *attrs; -+ -+ for (i = 0; i < nSects; i++) { -+ /* Load the section at the current file offset */ -+ status = loadSect(dbl, lib); -+ if (DSP_FAILED(status)) -+ break; -+ -+ } -+ -+ /* Done with file, we can close it */ -+ if (pdblLib->file) { -+ (*pdblLib->pTarget->dblAttrs.fclose) (pdblLib->file); -+ pdblLib->file = NULL; -+ } -+ return status; -+} -+ -+/* -+ * ======== DBL_loadSect ======== -+ * Purpose: -+ * Load a named section from an library (for overlay support). -+ */ -+DSP_STATUS DBL_loadSect(struct DBL_LibraryObj *lib, char *sectName, -+ struct DBL_Attrs *attrs) -+{ -+ struct DBL_TargetObj *dbl; -+ s32 i; -+ s32 phase; -+ s32 offset = -1; -+ s32 nSects = -1; -+ s32 allocdSects = 0; -+ u32 loadAddr; -+ u32 runAddr; -+ u32 size; -+ u32 space; -+ u32 ulBytes; -+ u16 mauSize; -+ u16 wordSize; -+ u16 *phaseRef = NULL; -+ u16 *otherRef = NULL; -+ char *name = NULL; -+ struct OvlyData *pOvlyData; -+ DSP_STATUS status = DSP_ENOSECT; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); -+ DBC_Require(sectName != NULL); -+ DBC_Require(attrs != NULL); -+ DBC_Require(attrs->write != NULL); -+ GT_3trace(DBL_debugMask, GT_ENTER, -+ "DBL_loadSect: lib: 0x%x sectName: %s " -+ "attrs: 0x%x\n", lib, sectName, attrs); -+ dbl = lib->pTarget; -+ mauSize = lib->pTargetInfo->mauSize; -+ wordSize = lib->pTargetInfo->wordSize; -+ /* Check for match of sect name in overlay table */ -+ for (i = 0; i < lib->nOvlySects; i++) { -+ name = lib->ppOvlyData[i]->hdr.pName; -+ if (!CSL_Strncmp(name, sectName, CSL_Strlen(name))) { -+ /* Match found */ -+ status = DSP_SOK; -+ break; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ DBC_Assert(i < lib->nOvlySects); -+ pOvlyData = lib->ppOvlyData[i]; -+ /* -+ * If node overlay, phase will be encoded in name. If not node -+ * overlay, set phase to NONE. -+ */ -+ phase = (CSL_Strcmp(name, sectName)) ? -+ CSL_Atoi(sectName + CSL_Strlen(sectName) - 1) : NONE; -+ /* Get reference count of node phase to be loaded, offset into -+ * overlay data array, and number of sections to overlay. */ -+ switch (phase) { -+ case NONE: -+ /* Not a node overlay */ -+ phaseRef = &pOvlyData->hdr.otherRef; -+ nSects = numOtherSects(pOvlyData); -+ offset = otherOffset(pOvlyData); -+ break; -+ case CREATEPHASE: -+ phaseRef = &pOvlyData->hdr.createRef; -+ otherRef = &pOvlyData->hdr.otherRef; -+ if (*otherRef) { -+ /* The overlay sections where node phase was -+ * not specified, have already been loaded. */ -+ nSects = numCreateSects(pOvlyData); -+ offset = createOffset(pOvlyData); -+ } else { -+ /* Overlay sections where node phase was not -+ * specified get loaded at create time, along -+ * with create sects. */ -+ nSects = numCreateSects(pOvlyData) + -+ numOtherSects(pOvlyData); -+ offset = otherOffset(pOvlyData); -+ } -+ break; -+ case DELETEPHASE: -+ phaseRef = &pOvlyData->hdr.deleteRef; -+ nSects = numDeleteSects(pOvlyData); -+ offset = deleteOffset(pOvlyData); -+ break; -+ case EXECUTEPHASE: -+ phaseRef = &pOvlyData->hdr.executeRef; -+ nSects = numExecuteSects(pOvlyData); -+ offset = executeOffset(pOvlyData); -+ break; -+ default: -+ /* ERROR */ -+ DBC_Assert(false); -+ break; -+ } -+ /* Do overlay if reference count is 0 */ -+ if (!(*phaseRef)) { -+ /* "Allocate" all sections */ -+ for (i = 0; i < nSects; i++) { -+ runAddr = pOvlyData->data[offset + i].runAddr; -+ size = pOvlyData->data[offset + i].size; -+ space = pOvlyData->data[offset + i].page; -+ status = (dbl->dblAttrs.alloc)(dbl->dblAttrs. -+ rmmHandle, space, size, 0, -+ &runAddr, true); -+ if (DSP_FAILED(status)) -+ break; -+ -+ allocdSects++; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Load sections */ -+ for (i = 0; i < nSects; i++) { -+ loadAddr = pOvlyData->data[offset + i]. -+ loadAddr; -+ runAddr = pOvlyData->data[offset + i]. -+ runAddr; -+ size = pOvlyData->data[offset + i]. -+ size; -+ space = pOvlyData->data[offset + i]. -+ page; -+ /* Convert to word address, call -+ * write function */ -+ loadAddr /= (wordSize / mauSize); -+ runAddr /= (wordSize / mauSize); -+ ulBytes = size * mauSize; -+ if ((*attrs->write)(attrs->wHandle, -+ runAddr, (void *)loadAddr, ulBytes, -+ space) != ulBytes) { -+ GT_0trace(DBL_debugMask, -+ GT_6CLASS, -+ "DBL_loadSect: write" -+ " failed\n"); -+ status = DSP_EFWRITE; -+ break; -+ } -+ } -+ } -+ /* Free sections on failure */ -+ if (DSP_FAILED(status)) -+ freeSects(dbl, pOvlyData, offset, allocdSects); -+ -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Increment reference counts */ -+ if (otherRef) -+ *otherRef = *otherRef + 1; -+ -+ *phaseRef = *phaseRef + 1; -+ } -+ return status; -+} -+ -+/* -+ * ======== DBL_open ======== -+ * Purpose: -+ * DBL_open() returns a library handle that can be used to -+ * load/unload the symbols/code/data via DBL_load()/DBL_unload(). -+ */ -+DSP_STATUS DBL_open(struct DBL_TargetObj *target, char *file, DBL_Flags flags, -+ struct DBL_LibraryObj **pLib) -+{ -+ struct DBL_LibraryObj *pdblLib = NULL; -+ u16 nSymbols; -+ u16 nDCDSects; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); -+ DBC_Require(target->dblAttrs.fopen != NULL); -+ DBC_Require(file != NULL); -+ DBC_Require(pLib != NULL); -+ -+ GT_3trace(DBL_debugMask, GT_ENTER, "DBL_open: target: 0x%x file: %s " -+ "pLib: 0x%x\n", target, file, pLib); -+ /* Allocate DBL library object */ -+ MEM_AllocObject(pdblLib, struct DBL_LibraryObj, DBL_LIBSIGNATURE); -+ if (pdblLib == NULL) -+ status = DSP_EMEMORY; -+ -+ /* Open the file */ -+ if (DSP_SUCCEEDED(status)) { -+ pdblLib->pTarget = target; -+ pdblLib->file = (*target->dblAttrs.fopen)(file, "rb"); -+ if (pdblLib->file == NULL) -+ status = DSP_EFOPEN; -+ -+ } -+ /* Read file header */ -+ if (DSP_SUCCEEDED(status)) { -+ status = readHeader(target, pdblLib); -+ if (DSP_FAILED(status)) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "DBL_open(): Failed to read file header\n"); -+ } -+ } -+ /* Allocate symbol table */ -+ if (DSP_SUCCEEDED(status)) { -+ nSymbols = pdblLib->nSymbols = pdblLib->fileHdr.numSymbols; -+ pdblLib->symbols = MEM_Calloc(nSymbols * sizeof(struct Symbol), -+ MEM_PAGED); -+ if (pdblLib->symbols == NULL) -+ status = DSP_EMEMORY; -+ -+ } -+ /* Read all the symbols */ -+ if (DSP_SUCCEEDED(status)) { -+ status = readSymbols(target, pdblLib); -+ if (DSP_FAILED(status)) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "DBL_open(): Failed to read symbols\n"); -+ } -+ } -+ /* Allocate DCD sect table */ -+ if (DSP_SUCCEEDED(status)) { -+ nDCDSects = pdblLib->nDCDSects = pdblLib->fileHdr.numDCDSects; -+ pdblLib->dcdSects = MEM_Calloc(nDCDSects * -+ sizeof(struct DCDSect), MEM_PAGED); -+ if (pdblLib->dcdSects == NULL) -+ status = DSP_EMEMORY; -+ -+ } -+ /* Read DCD sections */ -+ if (DSP_SUCCEEDED(status)) { -+ status = readDCDSects(target, pdblLib); -+ if (DSP_FAILED(status)) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "DBL_open(): Failed to read DCD sections\n"); -+ } -+ } -+ /* Read overlay sections */ -+ if (DSP_SUCCEEDED(status)) { -+ status = readOvlySects(target, pdblLib); -+ if (DSP_FAILED(status)) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "DBL_open(): Failed to read " -+ "overlay sections\n"); -+ } -+ } -+ if (DSP_FAILED(status)) { -+ *pLib = NULL; -+ if (pdblLib != NULL) -+ DBL_close((struct DBL_LibraryObj *) pdblLib); -+ -+ } else { -+ *pLib = pdblLib; -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && -+ (MEM_IsValidHandle((*pLib), DBL_LIBSIGNATURE))) || -+ (DSP_FAILED(status) && *pLib == NULL)); -+ return status; -+} -+ -+/* -+ * ======== DBL_readSect ======== -+ * Purpose: -+ * Read COFF section into a character buffer. -+ */ -+DSP_STATUS DBL_readSect(struct DBL_LibraryObj *lib, char *name, char *pContent, -+ u32 size) -+{ -+ struct DBL_LibraryObj *pdblLib = (struct DBL_LibraryObj *)lib; -+ u16 i; -+ u32 mauSize; -+ u32 max; -+ DSP_STATUS status = DSP_ENOSECT; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pdblLib, DBL_LIBSIGNATURE)); -+ DBC_Require(name != NULL); -+ DBC_Require(pContent != NULL); -+ DBC_Require(size != 0); -+ GT_4trace(DBL_debugMask, GT_ENTER, "DBL_readSect: lib: 0x%x name: %s " -+ "pContent: 0x%x size: 0x%x\n", lib, name, pContent, size); -+ -+ mauSize = pdblLib->pTargetInfo->mauSize; -+ -+ /* Attempt to find match with DCD section names. */ -+ for (i = 0; i < pdblLib->nDCDSects; i++) { -+ if (CSL_Strcmp(pdblLib->dcdSects[i].sectHdr.name, name) == 0) { -+ /* Match found */ -+ max = pdblLib->dcdSects[i].sectHdr.size * mauSize; -+ max = (max > size) ? size : max; -+ memcpy(pContent, pdblLib->dcdSects[i].pData, max); -+ status = DSP_SOK; -+ break; -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DBL_setAttrs ======== -+ * Purpose: -+ * Set the attributes of the target. -+ */ -+void DBL_setAttrs(struct DBL_TargetObj *target, struct DBL_Attrs *pAttrs) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(target, DBL_TARGSIGNATURE)); -+ DBC_Require(pAttrs != NULL); -+ -+ GT_2trace(DBL_debugMask, GT_ENTER, "DBL_setAttrs: target: 0x%x pAttrs: " -+ "0x%x\n", target, pAttrs); -+ -+ target->dblAttrs = *pAttrs; -+} -+ -+/* -+ * ======== DBL_unload ======== -+ * Purpose: -+ * Remove the symbols/code/data corresponding to the library lib. -+ */ -+void DBL_unload(struct DBL_LibraryObj *lib, struct DBL_Attrs *attrs) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); -+ -+ GT_1trace(DBL_debugMask, GT_ENTER, "DBL_unload: lib: 0x%x\n", lib); -+ -+ /* Nothing to do for static loading */ -+} -+ -+/* -+ * ======== DBL_unloadSect ======== -+ * Purpose: -+ * Unload a named section from an library (for overlay support). -+ */ -+DSP_STATUS DBL_unloadSect(struct DBL_LibraryObj *lib, char *sectName, -+ struct DBL_Attrs *attrs) -+{ -+ struct DBL_TargetObj *dbl; -+ s32 i; -+ s32 phase; -+ s32 offset = -1; -+ s32 nSects = -1; -+ u16 *phaseRef = NULL; -+ u16 *otherRef = NULL; -+ char *pName = NULL; -+ struct OvlyData *pOvlyData; -+ DSP_STATUS status = DSP_ENOSECT; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(lib, DBL_LIBSIGNATURE)); -+ DBC_Require(sectName != NULL); -+ -+ GT_2trace(DBL_debugMask, GT_ENTER, -+ "DBL_unloadSect: lib: 0x%x sectName: %s\n", lib, sectName); -+ dbl = lib->pTarget; -+ /* Check for match of sect name in overlay table */ -+ for (i = 0; i < lib->nOvlySects; i++) { -+ pName = lib->ppOvlyData[i]->hdr.pName; -+ if (!CSL_Strncmp(pName, sectName, CSL_Strlen(pName))) { -+ /* Match found */ -+ status = DSP_SOK; -+ break; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ DBC_Assert(i < lib->nOvlySects); -+ pOvlyData = lib->ppOvlyData[i]; -+ /* If node overlay, phase will be encoded in name. */ -+ phase = (CSL_Strcmp(pName, sectName)) ? -+ CSL_Atoi(sectName + CSL_Strlen(sectName) - 1) : NONE; -+ switch (phase) { -+ case NONE: -+ nSects = numOtherSects(pOvlyData); -+ phaseRef = &pOvlyData->hdr.otherRef; -+ offset = otherOffset(pOvlyData); -+ break; -+ case CREATEPHASE: -+ nSects = numCreateSects(pOvlyData); -+ offset = createOffset(pOvlyData); -+ phaseRef = &pOvlyData->hdr.createRef; -+ break; -+ case DELETEPHASE: -+ nSects = numDeleteSects(pOvlyData); -+ offset = deleteOffset(pOvlyData); -+ phaseRef = &pOvlyData->hdr.deleteRef; -+ otherRef = &pOvlyData->hdr.otherRef; -+ break; -+ case EXECUTEPHASE: -+ nSects = numExecuteSects(pOvlyData); -+ offset = executeOffset(pOvlyData); -+ phaseRef = &pOvlyData->hdr.executeRef; -+ break; -+ default: -+ /* ERROR */ -+ DBC_Assert(false); -+ break; -+ } -+ if (*phaseRef) { -+ *phaseRef = *phaseRef - 1; -+ if (*phaseRef == 0) { -+ /* Unload overlay sections for phase */ -+ freeSects(dbl, pOvlyData, offset, nSects); -+ } -+ if (phase == DELETEPHASE) { -+ DBC_Assert(*otherRef); -+ *otherRef = *otherRef - 1; -+ if (*otherRef == 0) { -+ /* Unload other overlay sections */ -+ nSects = numOtherSects(pOvlyData); -+ offset = otherOffset(pOvlyData); -+ freeSects(dbl, pOvlyData, offset, -+ nSects); -+ } -+ } -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== freeSects ======== -+ * Purpose: -+ * Free section -+ */ -+static void freeSects(struct DBL_TargetObj *dbl, struct OvlyData *pOvlyData, -+ s32 offset, s32 nSects) -+{ -+ u32 runAddr; -+ u32 size; -+ u32 space; -+ s32 i; -+ -+ for (i = 0; i < nSects; i++) { -+ runAddr = pOvlyData->data[offset + i].runAddr; -+ size = pOvlyData->data[offset + i].size; -+ space = pOvlyData->data[offset + i].page; -+ if (!(dbl->dblAttrs.free) -+ (dbl->dblAttrs.rmmHandle, space, runAddr, size, true)) { -+ /* -+ * Free function will not fail for overlay, unless -+ * address passed in is bad. -+ */ -+ DBC_Assert(false); -+ } -+ } -+} -+ -+/* -+ * ======== loadSect ======== -+ * Purpose: -+ * Load section to target -+ */ -+static DSP_STATUS loadSect(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib) -+{ -+ struct DBOF_SectHdr sectHdr; -+ char *pBuf; -+ struct KFILE_FileObj *file; -+ u32 space; -+ u32 addr; -+ u32 total; -+ u32 nWords = 0; -+ u32 nBytes = 0; -+ u16 mauSize; -+ u32 bufSize; -+ DSP_STATUS status = DSP_SOK; -+ -+ file = pdblLib->file; -+ mauSize = pdblLib->pTargetInfo->mauSize; -+ bufSize = LOADBUFSIZE / mauSize; -+ pBuf = dbl->pBuf; -+ -+ /* Read the section header */ -+ if ((*dbl->dblAttrs.fread)(§Hdr, sizeof(struct DBOF_SectHdr), -+ 1, file) != 1) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to read DCD sect header\n"); -+ status = DSP_EFREAD; -+ } else { -+ if (pdblLib->byteSwapped) { -+ sectHdr.size = SWAPLONG(sectHdr.size); -+ sectHdr.addr = SWAPLONG(sectHdr.addr); -+ sectHdr.page = SWAPWORD(sectHdr.page); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ addr = sectHdr.addr; -+ space = sectHdr.page; -+ for (total = sectHdr.size; total > 0; total -= nWords) { -+ nWords = min(total, bufSize); -+ nBytes = nWords * mauSize; -+ /* Read section data */ -+ if ((*dbl->dblAttrs.fread)(pBuf, nBytes, 1, -+ file) != 1) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to read DCD sect header\n"); -+ status = DSP_EFREAD; -+ break; -+ } -+ /* Write section to target */ -+ if (!(*dbl->dblAttrs.write)(dbl->dblAttrs.wHandle, -+ addr, pBuf, nBytes, space)) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to write section data\n"); -+ status = DSP_EFWRITE; -+ break; -+ } -+ addr += nWords; -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== readDCDSects ======== -+ * Purpose: -+ * Read DCD sections. -+ */ -+static DSP_STATUS readDCDSects(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib) -+{ -+ struct DBOF_DCDSectHdr *pSectHdr; -+ struct DCDSect *pSect; -+ struct KFILE_FileObj *file; -+ u16 nSects; -+ u16 i; -+ u16 mauSize; -+ DSP_STATUS status = DSP_SOK; -+ -+ file = pdblLib->file; -+ mauSize = pdblLib->pTargetInfo->mauSize; -+ nSects = pdblLib->fileHdr.numDCDSects; -+ for (i = 0; i < nSects; i++) { -+ pSect = &pdblLib->dcdSects[i]; -+ pSectHdr = &pdblLib->dcdSects[i].sectHdr; -+ /* Read sect header */ -+ if ((*dbl->dblAttrs.fread)(pSectHdr, -+ sizeof(struct DBOF_DCDSectHdr), 1, file) != 1) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to read DCD sect header\n"); -+ status = DSP_EFREAD; -+ break; -+ } -+ if (pdblLib->byteSwapped) -+ pSectHdr->size = SWAPLONG(pSectHdr->size); -+ -+ pSect->pData = (char *)MEM_Calloc(pSectHdr->size * -+ mauSize, MEM_PAGED); -+ if (pSect->pData == NULL) { -+ GT_2trace(DBL_debugMask, GT_6CLASS, -+ "Memory allocation for sect %s " -+ "data failed: Size: 0x%lx\n", pSectHdr->name, -+ pSectHdr->size); -+ status = DSP_EMEMORY; -+ break; -+ } -+ /* Read DCD sect data */ -+ if ((*dbl->dblAttrs.fread)(pSect->pData, mauSize, -+ pSectHdr->size, file) != pSectHdr->size) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to read DCD sect data\n"); -+ status = DSP_EFREAD; -+ break; -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== readHeader ======== -+ * Purpose: -+ * Read Header. -+ */ -+static DSP_STATUS readHeader(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib) -+{ -+ struct KFILE_FileObj *file; -+ s32 i; -+ struct DBOF_FileHdr *pHdr; -+ u32 swapMagic; -+ DSP_STATUS status = DSP_SOK; -+ -+ pdblLib->byteSwapped = false; -+ file = pdblLib->file; -+ pHdr = &pdblLib->fileHdr; -+ if ((*dbl->dblAttrs.fread)(pHdr, sizeof(struct DBOF_FileHdr), 1, -+ file) != 1) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "readHeader: Failed to read file header\n"); -+ status = DSP_EFREAD; -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Determine if byte swapped */ -+ for (i = 0; i < NUMTARGS; i++) { -+ swapMagic = SWAPLONG(pHdr->magic); -+ if (pHdr->magic == magicTab[i] || swapMagic == -+ magicTab[i]) { -+ if (swapMagic == magicTab[i]) { -+ pdblLib->byteSwapped = true; -+ pHdr->magic = SWAPLONG(pHdr->magic); -+ pHdr->entry = SWAPLONG(pHdr->entry); -+ pHdr->symOffset = SWAPLONG(pHdr-> -+ symOffset); -+ pHdr->dcdSectOffset = SWAPLONG(pHdr-> -+ dcdSectOffset); -+ pHdr->loadSectOffset = SWAPLONG(pHdr-> -+ loadSectOffset); -+ pHdr->ovlySectOffset = SWAPLONG(pHdr-> -+ ovlySectOffset); -+ pHdr->numSymbols = SWAPWORD(pHdr-> -+ numSymbols); -+ pHdr->numDCDSects = SWAPWORD(pHdr-> -+ numDCDSects); -+ pHdr->numSects = SWAPWORD(pHdr-> -+ numSects); -+ pHdr->numOvlySects = SWAPWORD(pHdr-> -+ numOvlySects); -+ } -+ break; -+ } -+ } -+ if (i == NUMTARGS) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "readHeader: Failed to determine" -+ " target type\n"); -+ status = DSP_ECORRUPTFILE; -+ } else { -+ pdblLib->pTargetInfo = &targetTab[i]; -+ GT_1trace(DBL_debugMask, GT_ENTER, -+ "COF type: 0x%lx\n", pHdr->magic); -+ GT_1trace(DBL_debugMask, GT_ENTER, -+ "Entry point:0x%lx\n", pHdr->entry); -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== readOvlySects ======== -+ * Purpose: -+ * Read Overlay Sections -+ */ -+static DSP_STATUS readOvlySects(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib) -+{ -+ struct DBOF_OvlySectHdr hdr; -+ struct DBOF_OvlySectData *pData; -+ struct OvlyData *pOvlyData; -+ char *pName; -+ struct KFILE_FileObj *file; -+ u16 i, j; -+ u16 nSects; -+ u16 n; -+ DSP_STATUS status = DSP_SOK; -+ -+ pdblLib->nOvlySects = nSects = pdblLib->fileHdr.numOvlySects; -+ file = pdblLib->file; -+ if (nSects > 0) { -+ pdblLib->ppOvlyData = MEM_Calloc(nSects * sizeof(OvlyData *), -+ MEM_PAGED); -+ if (pdblLib->ppOvlyData == NULL) { -+ GT_0trace(DBL_debugMask, GT_7CLASS, -+ "Failed to allocatate overlay " -+ "data memory\n"); -+ status = DSP_EMEMORY; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Read overlay data for each node */ -+ for (i = 0; i < nSects; i++) { -+ /* Read overlay section header */ -+ if ((*dbl->dblAttrs.fread)(&hdr, -+ sizeof(struct DBOF_OvlySectHdr), 1, file) != 1) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to read overlay sect" -+ " header\n"); -+ status = DSP_EFREAD; -+ break; -+ } -+ if (pdblLib->byteSwapped) { -+ hdr.nameLen = SWAPWORD(hdr.nameLen); -+ hdr.numCreateSects = -+ SWAPWORD(hdr.numCreateSects); -+ hdr.numDeleteSects = -+ SWAPWORD(hdr.numDeleteSects); -+ hdr.numExecuteSects = -+ SWAPWORD(hdr.numExecuteSects); -+ hdr.numOtherSects = -+ SWAPWORD(hdr.numOtherSects); -+ hdr.resvd = SWAPWORD(hdr.resvd); -+ } -+ n = hdr.numCreateSects + hdr.numDeleteSects + -+ hdr.numExecuteSects + hdr.numOtherSects; -+ -+ /* Allocate memory for node's overlay data */ -+ pOvlyData = (struct OvlyData *)MEM_Calloc -+ (sizeof(struct OvlyHdr) + -+ n * sizeof(struct DBOF_OvlySectData), -+ MEM_PAGED); -+ if (pOvlyData == NULL) { -+ GT_0trace(DBL_debugMask, GT_7CLASS, -+ "Failed to allocatate ovlyay" -+ " data memory\n"); -+ status = DSP_EMEMORY; -+ break; -+ } -+ pOvlyData->hdr.dbofHdr = hdr; -+ pdblLib->ppOvlyData[i] = pOvlyData; -+ /* Allocate memory for section name */ -+ pName = (char *)MEM_Calloc(hdr.nameLen + 1, MEM_PAGED); -+ if (pName == NULL) { -+ GT_0trace(DBL_debugMask, GT_7CLASS, -+ "Failed to allocatate ovlyay" -+ " section name\n"); -+ status = DSP_EMEMORY; -+ break; -+ } -+ pOvlyData->hdr.pName = pName; -+ /* Read the overlay section name */ -+ if ((*dbl->dblAttrs.fread)(pName, sizeof(char), -+ hdr.nameLen, file) != hdr.nameLen) { -+ GT_0trace(DBL_debugMask, GT_7CLASS, -+ "readOvlySects: Unable to " -+ "read overlay name.\n"); -+ status = DSP_EFREAD; -+ break; -+ } -+ /* Read the overlay section data */ -+ pData = pOvlyData->data; -+ if ((*dbl->dblAttrs.fread)(pData, -+ sizeof(struct DBOF_OvlySectData), n, file) != n) { -+ GT_0trace(DBL_debugMask, GT_7CLASS, -+ "readOvlySects: Unable to " -+ "read overlay data.\n"); -+ status = DSP_EFREAD; -+ break; -+ } -+ /* Swap overlay data, if necessary */ -+ if (pdblLib->byteSwapped) { -+ for (j = 0; j < n; j++) { -+ pData[j].loadAddr = -+ SWAPLONG(pData[j].loadAddr); -+ pData[j].runAddr = -+ SWAPLONG(pData[j].runAddr); -+ pData[j].size = -+ SWAPLONG(pData[j].size); -+ pData[j].page = -+ SWAPWORD(pData[j].page); -+ } -+ } -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== readSymbols ======== -+ * Purpose: -+ * Read Symbols -+ */ -+static DSP_STATUS readSymbols(struct DBL_TargetObj *dbl, -+ struct DBL_LibraryObj *pdblLib) -+{ -+ struct DBOF_SymbolHdr symHdr; -+ struct KFILE_FileObj *file; -+ u16 i; -+ u16 nSymbols; -+ u16 len; -+ char *pName = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ file = pdblLib->file; -+ -+ nSymbols = pdblLib->fileHdr.numSymbols; -+ -+ for (i = 0; i < nSymbols; i++) { -+ /* Read symbol value */ -+ if ((*dbl->dblAttrs.fread)(&symHdr, -+ sizeof(struct DBOF_SymbolHdr), 1, file) != 1) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to read symbol value\n"); -+ status = DSP_EFREAD; -+ break; -+ } -+ if (pdblLib->byteSwapped) { -+ symHdr.nameLen = SWAPWORD(symHdr.nameLen); -+ symHdr.value = SWAPLONG(symHdr.value); -+ } -+ /* Allocate buffer for symbol name */ -+ len = symHdr.nameLen; -+ pName = (char *)MEM_Calloc(len + 1, MEM_PAGED); -+ if (pName == NULL) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ break; -+ } -+ pdblLib->symbols[i].pSymName = pName; -+ pdblLib->symbols[i].sym.value = symHdr.value; -+ /* Read symbol name */ -+ if ((*dbl->dblAttrs.fread) (pName, sizeof(char), len, file) != -+ len) { -+ GT_0trace(DBL_debugMask, GT_6CLASS, -+ "Failed to read symbol value\n"); -+ status = DSP_EFREAD; -+ break; -+ } else { -+ pName[len] = '\0'; -+ GT_2trace(DBL_debugMask, GT_ENTER, -+ "Symbol: %s Value: 0x%lx\n", -+ pName, symHdr.value); -+ } -+ } -+ return status; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbll.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dbll.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dbll.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dbll.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,1572 @@ -+/* -+ * dbll.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== dbll.c ======== -+ * -+ *! Revision History -+ *! ================ -+ *! 25-Apr-2030 map: Fixed symbol redefinition bug + unload and return error -+ *! 08-Apr-2003 map: Consolidated DBL with DBLL loader name -+ *! 24-Mar-2003 map: Updated findSymbol to support dllview update -+ *! 23-Jan-2003 map: Updated rmmAlloc to support memory granularity -+ *! 21-Nov-2002 map: Combine fopen and DLOAD_module_open to increase -+ *! performance on start. -+ *! 04-Oct-2002 map: Integrated new TIP dynamic loader w/ DOF api. -+ *! 27-Sep-2002 map: Changed handle passed to RemoteFree, instead of -+ *! RMM_free; added GT_trace to rmmDealloc -+ *! 20-Sep-2002 map: Updated from Code Review -+ *! 08-Aug-2002 jeh: Updated to support overlays. -+ *! 25-Jun-2002 jeh: Pass RMM_Addr object to alloc function in rmmAlloc(). -+ *! 20-Mar-2002 jeh: Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* Dynamic loader library interface */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+#define DBLL_TARGSIGNATURE 0x544c4c44 /* "TLLD" */ -+#define DBLL_LIBSIGNATURE 0x4c4c4c44 /* "LLLD" */ -+ -+/* Number of buckets for symbol hash table */ -+#define MAXBUCKETS 211 -+ -+/* Max buffer length */ -+#define MAXEXPR 128 -+ -+#ifndef UINT32_C -+#define UINT32_C(zzz) ((uint32_t)zzz) -+#endif -+#define DOFF_ALIGN(x) (((x) + 3) & ~UINT32_C(3)) -+ -+/* -+ * ======== struct DBLL_TarObj* ======== -+ * A target may have one or more libraries of symbols/code/data loaded -+ * onto it, where a library is simply the symbols/code/data contained -+ * in a DOFF file. -+ */ -+/* -+ * ======== DBLL_TarObj ======== -+ */ -+struct DBLL_TarObj { -+ u32 dwSignature; /* For object validation */ -+ struct DBLL_Attrs attrs; -+ struct DBLL_LibraryObj *head; /* List of all opened libraries */ -+} ; -+ -+/* -+ * The following 4 typedefs are "super classes" of the dynamic loader -+ * library types used in dynamic loader functions (dynamic_loader.h). -+ */ -+/* -+ * ======== DBLLStream ======== -+ * Contains Dynamic_Loader_Stream -+ */ -+struct DBLLStream { -+ struct Dynamic_Loader_Stream dlStream; -+ struct DBLL_LibraryObj *lib; -+} ; -+ -+/* -+ * ======== DBLLSymbol ======== -+ */ -+struct DBLLSymbol { -+ struct Dynamic_Loader_Sym dlSymbol; -+ struct DBLL_LibraryObj *lib; -+} ; -+ -+/* -+ * ======== DBLLAlloc ======== -+ */ -+ struct DBLLAlloc { -+ struct Dynamic_Loader_Allocate dlAlloc; -+ struct DBLL_LibraryObj *lib; -+} ; -+ -+/* -+ * ======== DBLLInit ======== -+ */ -+struct DBLLInit { -+ struct Dynamic_Loader_Initialize dlInit; -+ struct DBLL_LibraryObj *lib; -+}; -+ -+/* -+ * ======== DBLL_Library ======== -+ * A library handle is returned by DBLL_Open() and is passed to DBLL_load() -+ * to load symbols/code/data, and to DBLL_unload(), to remove the -+ * symbols/code/data loaded by DBLL_load(). -+ */ -+ -+/* -+ * ======== DBLL_LibraryObj ======== -+ */ -+ struct DBLL_LibraryObj { -+ u32 dwSignature; /* For object validation */ -+ struct DBLL_LibraryObj *next; /* Next library in target's list */ -+ struct DBLL_LibraryObj *prev; /* Previous in the list */ -+ struct DBLL_TarObj *pTarget; /* target for this library */ -+ -+ /* Objects needed by dynamic loader */ -+ struct DBLLStream stream; -+ struct DBLLSymbol symbol; -+ struct DBLLAlloc allocate; -+ struct DBLLInit init; -+ DLOAD_mhandle mHandle; -+ -+ char *fileName; /* COFF file name */ -+ void *fp; /* Opaque file handle */ -+ u32 entry; /* Entry point */ -+ DLOAD_mhandle desc; /* desc of DOFF file loaded */ -+ u32 openRef; /* Number of times opened */ -+ u32 loadRef; /* Number of times loaded */ -+ struct GH_THashTab *symTab; /* Hash table of symbols */ -+ u32 ulPos; -+} ; -+ -+/* -+ * ======== Symbol ======== -+ */ -+struct Symbol { -+ struct DBLL_Symbol value; -+ char *name; -+} ; -+extern bool bSymbolsReloaded; -+ -+static void dofClose(struct DBLL_LibraryObj *zlLib); -+static DSP_STATUS dofOpen(struct DBLL_LibraryObj *zlLib); -+static s32 NoOp(struct Dynamic_Loader_Initialize *thisptr, void *bufr, -+ LDR_ADDR locn, struct LDR_SECTION_INFO *info, unsigned bytsiz); -+ -+/* -+ * Functions called by dynamic loader -+ * -+ */ -+/* Dynamic_Loader_Stream */ -+static int readBuffer(struct Dynamic_Loader_Stream *this, void *buffer, -+ unsigned bufsize); -+static int setFilePosn(struct Dynamic_Loader_Stream *this, unsigned int pos); -+/* Dynamic_Loader_Sym */ -+static struct dynload_symbol *findSymbol(struct Dynamic_Loader_Sym *this, -+ const char *name); -+static struct dynload_symbol *addToSymbolTable(struct Dynamic_Loader_Sym *this, -+ const char *name, -+ unsigned moduleId); -+static struct dynload_symbol *findInSymbolTable(struct Dynamic_Loader_Sym *this, -+ const char *name, -+ unsigned moduleid); -+static void purgeSymbolTable(struct Dynamic_Loader_Sym *this, -+ unsigned moduleId); -+static void *allocate(struct Dynamic_Loader_Sym *this, unsigned memsize); -+static void deallocate(struct Dynamic_Loader_Sym *this, void *memPtr); -+static void errorReport(struct Dynamic_Loader_Sym *this, const char *errstr, -+ va_list args); -+/* Dynamic_Loader_Allocate */ -+static int rmmAlloc(struct Dynamic_Loader_Allocate *this, -+ struct LDR_SECTION_INFO *info, unsigned align); -+static void rmmDealloc(struct Dynamic_Loader_Allocate *this, -+ struct LDR_SECTION_INFO *info); -+ -+/* Dynamic_Loader_Initialize */ -+static int connect(struct Dynamic_Loader_Initialize *this); -+static int readMem(struct Dynamic_Loader_Initialize *this, void *buf, -+ LDR_ADDR addr, struct LDR_SECTION_INFO *info, -+ unsigned nbytes); -+static int writeMem(struct Dynamic_Loader_Initialize *this, void *buf, -+ LDR_ADDR addr, struct LDR_SECTION_INFO *info, -+ unsigned nbytes); -+static int fillMem(struct Dynamic_Loader_Initialize *this, LDR_ADDR addr, -+ struct LDR_SECTION_INFO *info, unsigned nbytes, -+ unsigned val); -+static int execute(struct Dynamic_Loader_Initialize *this, LDR_ADDR start); -+static void release(struct Dynamic_Loader_Initialize *this); -+ -+/* symbol table hash functions */ -+static u16 nameHash(void *name, u16 maxBucket); -+static bool nameMatch(void *name, void *sp); -+static void symDelete(void *sp); -+ -+#if GT_TRACE -+static struct GT_Mask DBLL_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+static u32 cRefs; /* module reference count */ -+ -+/* Symbol Redefinition */ -+static int bRedefinedSymbol; -+static int bGblSearch = 1; -+ -+/* -+ * ======== DBLL_close ======== -+ */ -+void DBLL_close(struct DBLL_LibraryObj *zlLib) -+{ -+ struct DBLL_TarObj *zlTarget; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ DBC_Require(zlLib->openRef > 0); -+ zlTarget = zlLib->pTarget; -+ GT_1trace(DBLL_debugMask, GT_ENTER, "DBLL_close: lib: 0x%x\n", zlLib); -+ zlLib->openRef--; -+ if (zlLib->openRef == 0) { -+ /* Remove library from list */ -+ if (zlTarget->head == zlLib) -+ zlTarget->head = zlLib->next; -+ -+ if (zlLib->prev) -+ (zlLib->prev)->next = zlLib->next; -+ -+ if (zlLib->next) -+ (zlLib->next)->prev = zlLib->prev; -+ -+ /* Free DOF resources */ -+ dofClose(zlLib); -+ if (zlLib->fileName) -+ MEM_Free(zlLib->fileName); -+ -+ /* remove symbols from symbol table */ -+ if (zlLib->symTab) -+ GH_delete(zlLib->symTab); -+ -+ /* remove the library object itself */ -+ MEM_FreeObject(zlLib); -+ zlLib = NULL; -+ } -+} -+ -+/* -+ * ======== DBLL_create ======== -+ */ -+DSP_STATUS DBLL_create(struct DBLL_TarObj **pTarget, struct DBLL_Attrs *pAttrs) -+{ -+ struct DBLL_TarObj *pzlTarget; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pAttrs != NULL); -+ DBC_Require(pTarget != NULL); -+ -+ GT_2trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_create: pTarget: 0x%x pAttrs: " -+ "0x%x\n", pTarget, pAttrs); -+ /* Allocate DBL target object */ -+ MEM_AllocObject(pzlTarget, struct DBLL_TarObj, DBLL_TARGSIGNATURE); -+ if (pTarget != NULL) { -+ if (pzlTarget == NULL) { -+ GT_0trace(DBLL_debugMask, GT_6CLASS, -+ "DBLL_create: Memory allocation" -+ " failed\n"); -+ *pTarget = NULL; -+ status = DSP_EMEMORY; -+ } else { -+ pzlTarget->attrs = *pAttrs; -+ *pTarget = (struct DBLL_TarObj *)pzlTarget; -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && -+ MEM_IsValidHandle(((struct DBLL_TarObj *)(*pTarget)), -+ DBLL_TARGSIGNATURE)) || (DSP_FAILED(status) && -+ *pTarget == NULL)); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DBLL_delete ======== -+ */ -+void DBLL_delete(struct DBLL_TarObj *target) -+{ -+ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); -+ -+ GT_1trace(DBLL_debugMask, GT_ENTER, "DBLL_delete: target: 0x%x\n", -+ target); -+ -+ if (zlTarget != NULL) -+ MEM_FreeObject(zlTarget); -+ -+} -+ -+/* -+ * ======== DBLL_exit ======== -+ * Discontinue usage of DBL module. -+ */ -+void DBLL_exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(DBLL_debugMask, GT_5CLASS, "DBLL_exit() ref count: 0x%x\n", -+ cRefs); -+ -+ if (cRefs == 0) { -+ MEM_Exit(); -+ CSL_Exit(); -+ GH_exit(); -+#if GT_TRACE -+ DBLL_debugMask.flags = NULL; -+#endif -+ } -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== DBLL_getAddr ======== -+ * Get address of name in the specified library. -+ */ -+bool DBLL_getAddr(struct DBLL_LibraryObj *zlLib, char *name, -+ struct DBLL_Symbol **ppSym) -+{ -+ struct Symbol *sym; -+ bool status = false; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ DBC_Require(name != NULL); -+ DBC_Require(ppSym != NULL); -+ DBC_Require(zlLib->symTab != NULL); -+ -+ GT_3trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_getAddr: lib: 0x%x name: %s pAddr:" -+ " 0x%x\n", zlLib, name, ppSym); -+ sym = (struct Symbol *)GH_find(zlLib->symTab, name); -+ if (sym != NULL) { -+ *ppSym = &sym->value; -+ status = true; -+ } -+ return status; -+} -+ -+/* -+ * ======== DBLL_getAttrs ======== -+ * Retrieve the attributes of the target. -+ */ -+void DBLL_getAttrs(struct DBLL_TarObj *target, struct DBLL_Attrs *pAttrs) -+{ -+ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); -+ DBC_Require(pAttrs != NULL); -+ -+ if ((pAttrs != NULL) && (zlTarget != NULL)) -+ *pAttrs = zlTarget->attrs; -+ -+} -+ -+/* -+ * ======== DBLL_getCAddr ======== -+ * Get address of a "C" name in the specified library. -+ */ -+bool DBLL_getCAddr(struct DBLL_LibraryObj *zlLib, char *name, -+ struct DBLL_Symbol **ppSym) -+{ -+ struct Symbol *sym; -+ char cname[MAXEXPR + 1]; -+ bool status = false; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ DBC_Require(ppSym != NULL); -+ DBC_Require(zlLib->symTab != NULL); -+ DBC_Require(name != NULL); -+ -+ cname[0] = '_'; -+ -+ strncpy(cname + 1, name, sizeof(cname) - 2); -+ cname[MAXEXPR] = '\0'; /* insure '\0' string termination */ -+ -+ /* Check for C name, if not found */ -+ sym = (struct Symbol *)GH_find(zlLib->symTab, cname); -+ -+ if (sym != NULL) { -+ *ppSym = &sym->value; -+ status = true; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DBLL_getSect ======== -+ * Get the base address and size (in bytes) of a COFF section. -+ */ -+DSP_STATUS DBLL_getSect(struct DBLL_LibraryObj *lib, char *name, u32 *pAddr, -+ u32 *pSize) -+{ -+ u32 uByteSize; -+ bool fOpenedDoff = false; -+ const struct LDR_SECTION_INFO *sect = NULL; -+ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(name != NULL); -+ DBC_Require(pAddr != NULL); -+ DBC_Require(pSize != NULL); -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ -+ GT_4trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_getSect: lib: 0x%x name: %s pAddr:" -+ " 0x%x pSize: 0x%x\n", lib, name, pAddr, pSize); -+ /* If DOFF file is not open, we open it. */ -+ if (zlLib != NULL) { -+ if (zlLib->fp == NULL) { -+ status = dofOpen(zlLib); -+ if (DSP_SUCCEEDED(status)) -+ fOpenedDoff = true; -+ -+ } else { -+ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, -+ zlLib->ulPos, SEEK_SET); -+ } -+ } else { -+ status = DSP_EHANDLE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ uByteSize = 1; -+ if (DLOAD_GetSectionInfo(zlLib->desc, name, §)) { -+ *pAddr = sect->load_addr; -+ *pSize = sect->size * uByteSize; -+ /* Make sure size is even for good swap */ -+ if (*pSize % 2) -+ (*pSize)++; -+ -+ /* Align size */ -+ *pSize = DOFF_ALIGN(*pSize); -+ } else { -+ status = DSP_ENOSECT; -+ } -+ } -+ if (fOpenedDoff) { -+ dofClose(zlLib); -+ fOpenedDoff = false; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DBLL_init ======== -+ */ -+bool DBLL_init(void) -+{ -+ bool retVal = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!DBLL_debugMask.flags); -+ GT_create(&DBLL_debugMask, "DL"); /* "DL" for dbDL */ -+ GH_init(); -+ CSL_Init(); -+ retVal = MEM_Init(); -+ if (!retVal) -+ MEM_Exit(); -+ -+ } -+ -+ if (retVal) -+ cRefs++; -+ -+ -+ GT_1trace(DBLL_debugMask, GT_5CLASS, "DBLL_init(), ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((retVal && (cRefs > 0)) || (!retVal && (cRefs >= 0))); -+ -+ return retVal; -+} -+ -+/* -+ * ======== DBLL_load ======== -+ */ -+DSP_STATUS DBLL_load(struct DBLL_LibraryObj *lib, DBLL_Flags flags, -+ struct DBLL_Attrs *attrs, u32 *pEntry) -+{ -+ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; -+ struct DBLL_TarObj *dbzl; -+ bool gotSymbols = true; -+ s32 err; -+ DSP_STATUS status = DSP_SOK; -+ bool fOpenedDoff = false; -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ DBC_Require(pEntry != NULL); -+ DBC_Require(attrs != NULL); -+ -+ GT_4trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_load: lib: 0x%x flags: 0x%x pEntry:" -+ " 0x%x\n", lib, flags, attrs, pEntry); -+ /* -+ * Load if not already loaded. -+ */ -+ if (zlLib->loadRef == 0 || !(flags & DBLL_DYNAMIC)) { -+ dbzl = zlLib->pTarget; -+ dbzl->attrs = *attrs; -+ /* Create a hash table for symbols if not already created */ -+ if (zlLib->symTab == NULL) { -+ gotSymbols = false; -+ zlLib->symTab = GH_create(MAXBUCKETS, -+ sizeof(struct Symbol), -+ nameHash, -+ nameMatch, symDelete); -+ if (zlLib->symTab == NULL) -+ status = DSP_EMEMORY; -+ -+ } -+ /* -+ * Set up objects needed by the dynamic loader -+ */ -+ /* Stream */ -+ zlLib->stream.dlStream.read_buffer = readBuffer; -+ zlLib->stream.dlStream.set_file_posn = setFilePosn; -+ zlLib->stream.lib = zlLib; -+ /* Symbol */ -+ zlLib->symbol.dlSymbol.Find_Matching_Symbol = findSymbol; -+ if (gotSymbols) { -+ zlLib->symbol.dlSymbol.Add_To_Symbol_Table = -+ findInSymbolTable; -+ } else { -+ zlLib->symbol.dlSymbol.Add_To_Symbol_Table = -+ addToSymbolTable; -+ } -+ zlLib->symbol.dlSymbol.Purge_Symbol_Table = purgeSymbolTable; -+ zlLib->symbol.dlSymbol.Allocate = allocate; -+ zlLib->symbol.dlSymbol.Deallocate = deallocate; -+ zlLib->symbol.dlSymbol.Error_Report = errorReport; -+ zlLib->symbol.lib = zlLib; -+ /* Allocate */ -+ zlLib->allocate.dlAlloc.Allocate = rmmAlloc; -+ zlLib->allocate.dlAlloc.Deallocate = rmmDealloc; -+ zlLib->allocate.lib = zlLib; -+ /* Init */ -+ zlLib->init.dlInit.connect = connect; -+ zlLib->init.dlInit.readmem = readMem; -+ zlLib->init.dlInit.writemem = writeMem; -+ zlLib->init.dlInit.fillmem = fillMem; -+ zlLib->init.dlInit.execute = execute; -+ zlLib->init.dlInit.release = release; -+ zlLib->init.lib = zlLib; -+ /* If COFF file is not open, we open it. */ -+ if (zlLib->fp == NULL) { -+ status = dofOpen(zlLib); -+ if (DSP_SUCCEEDED(status)) -+ fOpenedDoff = true; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ zlLib->ulPos = (*(zlLib->pTarget->attrs.ftell)) -+ (zlLib->fp); -+ /* Reset file cursor */ -+ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long)0, -+ SEEK_SET); -+ bSymbolsReloaded = true; -+ /* The 5th argument, DLOAD_INITBSS, tells the DLL -+ * module to zero-init all BSS sections. In general, -+ * this is not necessary and also increases load time. -+ * We may want to make this configurable by the user */ -+ err = Dynamic_Load_Module(&zlLib->stream.dlStream, -+ &zlLib->symbol.dlSymbol, &zlLib->allocate.dlAlloc, -+ &zlLib->init.dlInit, DLOAD_INITBSS, -+ &zlLib->mHandle); -+ -+ if (err != 0) { -+ GT_1trace(DBLL_debugMask, GT_6CLASS, -+ "DBLL_load: " -+ "Dynamic_Load_Module failed: 0x%lx\n", -+ err); -+ status = DSP_EDYNLOAD; -+ } else if (bRedefinedSymbol) { -+ zlLib->loadRef++; -+ DBLL_unload(zlLib, (struct DBLL_Attrs *) attrs); -+ bRedefinedSymbol = false; -+ status = DSP_EDYNLOAD; -+ } else { -+ *pEntry = zlLib->entry; -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) -+ zlLib->loadRef++; -+ -+ /* Clean up DOFF resources */ -+ if (fOpenedDoff) -+ dofClose(zlLib); -+ -+ DBC_Ensure(DSP_FAILED(status) || zlLib->loadRef > 0); -+ return status; -+} -+ -+/* -+ * ======== DBLL_loadSect ======== -+ * Not supported for COFF. -+ */ -+DSP_STATUS DBLL_loadSect(struct DBLL_LibraryObj *zlLib, char *sectName, -+ struct DBLL_Attrs *attrs) -+{ -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ -+ return DSP_ENOTIMPL; -+} -+ -+/* -+ * ======== DBLL_open ======== -+ */ -+DSP_STATUS DBLL_open(struct DBLL_TarObj *target, char *file, DBLL_Flags flags, -+ struct DBLL_LibraryObj **pLib) -+{ -+ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; -+ struct DBLL_LibraryObj *zlLib = NULL; -+ s32 err; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); -+ DBC_Require(zlTarget->attrs.fopen != NULL); -+ DBC_Require(file != NULL); -+ DBC_Require(pLib != NULL); -+ -+ GT_3trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_open: target: 0x%x file: %s pLib:" -+ " 0x%x\n", target, file, pLib); -+ zlLib = zlTarget->head; -+ while (zlLib != NULL) { -+ if (strcmp(zlLib->fileName, file) == 0) { -+ /* Library is already opened */ -+ zlLib->openRef++; -+ break; -+ } -+ zlLib = zlLib->next; -+ } -+ if (zlLib == NULL) { -+ /* Allocate DBL library object */ -+ MEM_AllocObject(zlLib, struct DBLL_LibraryObj, -+ DBLL_LIBSIGNATURE); -+ if (zlLib == NULL) { -+ GT_0trace(DBLL_debugMask, GT_6CLASS, -+ "DBLL_open: Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } else { -+ zlLib->ulPos = 0; -+ /* Increment ref count to allow close on failure -+ * later on */ -+ zlLib->openRef++; -+ zlLib->pTarget = zlTarget; -+ /* Keep a copy of the file name */ -+ zlLib->fileName = MEM_Calloc(strlen(file) + 1, -+ MEM_PAGED); -+ if (zlLib->fileName == NULL) { -+ GT_0trace(DBLL_debugMask, GT_6CLASS, -+ "DBLL_open: Memory " -+ "allocation failed\n"); -+ status = DSP_EMEMORY; -+ } else { -+ strncpy(zlLib->fileName, file, -+ strlen(file) + 1); -+ } -+ zlLib->symTab = NULL; -+ } -+ } -+ /* -+ * Set up objects needed by the dynamic loader -+ */ -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ /* Stream */ -+ zlLib->stream.dlStream.read_buffer = readBuffer; -+ zlLib->stream.dlStream.set_file_posn = setFilePosn; -+ zlLib->stream.lib = zlLib; -+ /* Symbol */ -+ zlLib->symbol.dlSymbol.Add_To_Symbol_Table = addToSymbolTable; -+ zlLib->symbol.dlSymbol.Find_Matching_Symbol = findSymbol; -+ zlLib->symbol.dlSymbol.Purge_Symbol_Table = purgeSymbolTable; -+ zlLib->symbol.dlSymbol.Allocate = allocate; -+ zlLib->symbol.dlSymbol.Deallocate = deallocate; -+ zlLib->symbol.dlSymbol.Error_Report = errorReport; -+ zlLib->symbol.lib = zlLib; -+ /* Allocate */ -+ zlLib->allocate.dlAlloc.Allocate = rmmAlloc; -+ zlLib->allocate.dlAlloc.Deallocate = rmmDealloc; -+ zlLib->allocate.lib = zlLib; -+ /* Init */ -+ zlLib->init.dlInit.connect = connect; -+ zlLib->init.dlInit.readmem = readMem; -+ zlLib->init.dlInit.writemem = writeMem; -+ zlLib->init.dlInit.fillmem = fillMem; -+ zlLib->init.dlInit.execute = execute; -+ zlLib->init.dlInit.release = release; -+ zlLib->init.lib = zlLib; -+ if (DSP_SUCCEEDED(status) && zlLib->fp == NULL) -+ status = dofOpen(zlLib); -+ -+ zlLib->ulPos = (*(zlLib->pTarget->attrs.ftell)) (zlLib->fp); -+ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long) 0, SEEK_SET); -+ /* Create a hash table for symbols if flag is set */ -+ if (zlLib->symTab != NULL || !(flags & DBLL_SYMB)) -+ goto func_cont; -+ -+ zlLib->symTab = GH_create(MAXBUCKETS, sizeof(struct Symbol), nameHash, -+ nameMatch, symDelete); -+ if (zlLib->symTab == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ /* Do a fake load to get symbols - set write function to NoOp */ -+ zlLib->init.dlInit.writemem = NoOp; -+ err = Dynamic_Open_Module(&zlLib->stream.dlStream, -+ &zlLib->symbol.dlSymbol, -+ &zlLib->allocate.dlAlloc, -+ &zlLib->init.dlInit, 0, -+ &zlLib->mHandle); -+ if (err != 0) { -+ GT_1trace(DBLL_debugMask, GT_6CLASS, "DBLL_open: " -+ "Dynamic_Load_Module failed: 0x%lx\n", err); -+ status = DSP_EDYNLOAD; -+ } else { -+ /* Now that we have the symbol table, we can unload */ -+ err = Dynamic_Unload_Module(zlLib->mHandle, -+ &zlLib->symbol.dlSymbol, -+ &zlLib->allocate.dlAlloc, -+ &zlLib->init.dlInit); -+ if (err != 0) { -+ GT_1trace(DBLL_debugMask, GT_6CLASS, -+ "DBLL_open: " -+ "Dynamic_Unload_Module failed: 0x%lx\n", -+ err); -+ status = DSP_EDYNLOAD; -+ } -+ zlLib->mHandle = NULL; -+ } -+ } -+func_cont: -+ if (DSP_SUCCEEDED(status)) { -+ if (zlLib->openRef == 1) { -+ /* First time opened - insert in list */ -+ if (zlTarget->head) -+ (zlTarget->head)->prev = zlLib; -+ -+ zlLib->prev = NULL; -+ zlLib->next = zlTarget->head; -+ zlTarget->head = zlLib; -+ } -+ *pLib = (struct DBLL_LibraryObj *)zlLib; -+ } else { -+ *pLib = NULL; -+ if (zlLib != NULL) -+ DBLL_close((struct DBLL_LibraryObj *)zlLib); -+ -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && (zlLib->openRef > 0) && -+ MEM_IsValidHandle(((struct DBLL_LibraryObj *)(*pLib)), -+ DBLL_LIBSIGNATURE)) || (DSP_FAILED(status) && *pLib == NULL)); -+ return status; -+} -+ -+/* -+ * ======== DBLL_readSect ======== -+ * Get the content of a COFF section. -+ */ -+DSP_STATUS DBLL_readSect(struct DBLL_LibraryObj *lib, char *name, -+ char *pContent, u32 size) -+{ -+ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; -+ bool fOpenedDoff = false; -+ u32 uByteSize; /* size of bytes */ -+ u32 ulSectSize; /* size of section */ -+ const struct LDR_SECTION_INFO *sect = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ DBC_Require(name != NULL); -+ DBC_Require(pContent != NULL); -+ DBC_Require(size != 0); -+ -+ GT_4trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_readSect: lib: 0x%x name: %s " -+ "pContent: 0x%x size: 0x%x\n", lib, name, pContent, size); -+ /* If DOFF file is not open, we open it. */ -+ if (zlLib != NULL) { -+ if (zlLib->fp == NULL) { -+ status = dofOpen(zlLib); -+ if (DSP_SUCCEEDED(status)) -+ fOpenedDoff = true; -+ -+ } else { -+ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, -+ zlLib->ulPos, SEEK_SET); -+ } -+ } else { -+ status = DSP_EHANDLE; -+ } -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ uByteSize = 1; -+ if (!DLOAD_GetSectionInfo(zlLib->desc, name, §)) { -+ status = DSP_ENOSECT; -+ goto func_cont; -+ } -+ /* -+ * Ensure the supplied buffer size is sufficient to store -+ * the section content to be read. -+ */ -+ ulSectSize = sect->size * uByteSize; -+ /* Make sure size is even for good swap */ -+ if (ulSectSize % 2) -+ ulSectSize++; -+ -+ /* Align size */ -+ ulSectSize = DOFF_ALIGN(ulSectSize); -+ if (ulSectSize > size) { -+ status = DSP_EFAIL; -+ } else { -+ if (!DLOAD_GetSection(zlLib->desc, sect, pContent)) -+ status = DSP_EFREAD; -+ -+ } -+func_cont: -+ if (fOpenedDoff) { -+ dofClose(zlLib); -+ fOpenedDoff = false; -+ } -+ return status; -+} -+ -+/* -+ * ======== DBLL_setAttrs ======== -+ * Set the attributes of the target. -+ */ -+void DBLL_setAttrs(struct DBLL_TarObj *target, struct DBLL_Attrs *pAttrs) -+{ -+ struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); -+ DBC_Require(pAttrs != NULL); -+ GT_2trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_setAttrs: target: 0x%x pAttrs: " -+ "0x%x\n", target, pAttrs); -+ if ((pAttrs != NULL) && (zlTarget != NULL)) -+ zlTarget->attrs = *pAttrs; -+ -+} -+ -+/* -+ * ======== DBLL_unload ======== -+ */ -+void DBLL_unload(struct DBLL_LibraryObj *lib, struct DBLL_Attrs *attrs) -+{ -+ struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; -+ s32 err = 0; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); -+ DBC_Require(zlLib->loadRef > 0); -+ GT_1trace(DBLL_debugMask, GT_ENTER, "DBLL_unload: lib: 0x%x\n", lib); -+ zlLib->loadRef--; -+ /* Unload only if reference count is 0 */ -+ if (zlLib->loadRef != 0) -+ goto func_end; -+ -+ zlLib->pTarget->attrs = *attrs; -+ if (zlLib != NULL) { -+ if (zlLib->mHandle) { -+ err = Dynamic_Unload_Module(zlLib->mHandle, -+ &zlLib->symbol.dlSymbol, -+ &zlLib->allocate.dlAlloc, &zlLib->init.dlInit); -+ if (err != 0) { -+ GT_1trace(DBLL_debugMask, GT_5CLASS, -+ "Dynamic_Unload_Module " -+ "failed: 0x%x\n", err); -+ } -+ } -+ /* remove symbols from symbol table */ -+ if (zlLib->symTab != NULL) { -+ GH_delete(zlLib->symTab); -+ zlLib->symTab = NULL; -+ } -+ /* delete DOFF desc since it holds *lots* of host OS -+ * resources */ -+ dofClose(zlLib); -+ } -+func_end: -+ DBC_Ensure(zlLib->loadRef >= 0); -+} -+ -+/* -+ * ======== DBLL_unloadSect ======== -+ * Not supported for COFF. -+ */ -+DSP_STATUS DBLL_unloadSect(struct DBLL_LibraryObj *lib, char *sectName, -+ struct DBLL_Attrs *attrs) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(sectName != NULL); -+ GT_2trace(DBLL_debugMask, GT_ENTER, -+ "DBLL_unloadSect: lib: 0x%x sectName: " -+ "%s\n", lib, sectName); -+ return DSP_ENOTIMPL; -+} -+ -+/* -+ * ======== dofClose ======== -+ */ -+static void dofClose(struct DBLL_LibraryObj *zlLib) -+{ -+ if (zlLib->desc) { -+ DLOAD_module_close(zlLib->desc); -+ zlLib->desc = NULL; -+ } -+ /* close file */ -+ if (zlLib->fp) { -+ (zlLib->pTarget->attrs.fclose) (zlLib->fp); -+ zlLib->fp = NULL; -+ } -+} -+ -+/* -+ * ======== dofOpen ======== -+ */ -+static DSP_STATUS dofOpen(struct DBLL_LibraryObj *zlLib) -+{ -+ void *open = *(zlLib->pTarget->attrs.fopen); -+ DSP_STATUS status = DSP_SOK; -+ -+ /* First open the file for the dynamic loader, then open COF */ -+ zlLib->fp = (void *)((DBLL_FOpenFxn)(open))(zlLib->fileName, "rb"); -+ -+ /* Open DOFF module */ -+ if (zlLib->fp && zlLib->desc == NULL) { -+ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long)0, SEEK_SET); -+ zlLib->desc = DLOAD_module_open(&zlLib->stream.dlStream, -+ &zlLib->symbol.dlSymbol); -+ if (zlLib->desc == NULL) { -+ (zlLib->pTarget->attrs.fclose)(zlLib->fp); -+ zlLib->fp = NULL; -+ status = DSP_EFOPEN; -+ } -+ } else { -+ status = DSP_EFOPEN; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== nameHash ======== -+ */ -+static u16 nameHash(void *key, u16 maxBucket) -+{ -+ u16 ret; -+ u16 hash; -+ char *name = (char *)key; -+ -+ DBC_Require(name != NULL); -+ -+ hash = 0; -+ -+ while (*name) { -+ hash <<= 1; -+ hash ^= *name++; -+ } -+ -+ ret = hash % maxBucket; -+ -+ return ret; -+} -+ -+/* -+ * ======== nameMatch ======== -+ */ -+static bool nameMatch(void *key, void *value) -+{ -+ DBC_Require(key != NULL); -+ DBC_Require(value != NULL); -+ -+ if ((key != NULL) && (value != NULL)) { -+ if (strcmp((char *)key, ((struct Symbol *)value)->name) == 0) -+ return true; -+ } -+ return false; -+} -+ -+/* -+ * ======== NoOp ======== -+ */ -+static int NoOp(struct Dynamic_Loader_Initialize *thisptr, void *bufr, -+ LDR_ADDR locn, struct LDR_SECTION_INFO *info, unsigned bytsize) -+{ -+ return 1; -+} -+ -+/* -+ * ======== symDelete ======== -+ */ -+static void symDelete(void *value) -+{ -+ struct Symbol *sp = (struct Symbol *)value; -+ -+ MEM_Free(sp->name); -+} -+ -+/* -+ * Dynamic Loader Functions -+ */ -+ -+/* Dynamic_Loader_Stream */ -+/* -+ * ======== readBuffer ======== -+ */ -+static int readBuffer(struct Dynamic_Loader_Stream *this, void *buffer, -+ unsigned bufsize) -+{ -+ struct DBLLStream *pStream = (struct DBLLStream *)this; -+ struct DBLL_LibraryObj *lib; -+ int bytesRead = 0; -+ -+ DBC_Require(this != NULL); -+ lib = pStream->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ if (lib != NULL) { -+ bytesRead = (*(lib->pTarget->attrs.fread))(buffer, 1, bufsize, -+ lib->fp); -+ } -+ return bytesRead; -+} -+ -+/* -+ * ======== setFilePosn ======== -+ */ -+static int setFilePosn(struct Dynamic_Loader_Stream *this, unsigned int pos) -+{ -+ struct DBLLStream *pStream = (struct DBLLStream *)this; -+ struct DBLL_LibraryObj *lib; -+ int status = 0; /* Success */ -+ -+ DBC_Require(this != NULL); -+ lib = pStream->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ if (lib != NULL) { -+ status = (*(lib->pTarget->attrs.fseek))(lib->fp, (long)pos, -+ SEEK_SET); -+ } -+ -+ return status; -+} -+ -+/* Dynamic_Loader_Sym */ -+ -+/* -+ * ======== findSymbol ======== -+ */ -+static struct dynload_symbol *findSymbol(struct Dynamic_Loader_Sym *this, -+ const char *name) -+{ -+ struct dynload_symbol *retSym; -+ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; -+ struct DBLL_LibraryObj *lib; -+ struct DBLL_Symbol *pSym = NULL; -+ bool status = false; /* Symbol not found yet */ -+ -+ DBC_Require(this != NULL); -+ lib = pSymbol->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ if (lib != NULL) { -+ if (lib->pTarget->attrs.symLookup) { -+ /* Check current lib + base lib + dep lib + -+ * persistent lib */ -+ status = (*(lib->pTarget->attrs.symLookup)) -+ (lib->pTarget->attrs.symHandle, -+ lib->pTarget->attrs.symArg, -+ lib->pTarget->attrs.rmmHandle, name, &pSym); -+ } else { -+ /* Just check current lib for symbol */ -+ status = DBLL_getAddr((struct DBLL_LibraryObj *)lib, -+ (char *)name, &pSym); -+ if (!status) { -+ status = -+ DBLL_getCAddr((struct DBLL_LibraryObj *)lib, -+ (char *)name, &pSym); -+ } -+ } -+ } -+ -+ if (!status && bGblSearch) { -+ GT_1trace(DBLL_debugMask, GT_6CLASS, -+ "findSymbol: Symbol not found: %s\n", name); -+ } -+ -+ DBC_Assert((status && (pSym != NULL)) || (!status && (pSym == NULL))); -+ -+ retSym = (struct dynload_symbol *)pSym; -+ return retSym; -+} -+ -+/* -+ * ======== findInSymbolTable ======== -+ */ -+static struct dynload_symbol *findInSymbolTable(struct Dynamic_Loader_Sym *this, -+ const char *name, -+ unsigned moduleid) -+{ -+ struct dynload_symbol *retSym; -+ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; -+ struct DBLL_LibraryObj *lib; -+ struct Symbol *sym; -+ -+ DBC_Require(this != NULL); -+ lib = pSymbol->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ DBC_Require(lib->symTab != NULL); -+ -+ sym = (struct Symbol *)GH_find(lib->symTab, (char *) name); -+ -+ retSym = (struct dynload_symbol *)&sym->value; -+ return retSym; -+} -+ -+/* -+ * ======== addToSymbolTable ======== -+ */ -+static struct dynload_symbol *addToSymbolTable(struct Dynamic_Loader_Sym *this, -+ const char *name, -+ unsigned moduleId) -+{ -+ struct Symbol *symPtr = NULL; -+ struct Symbol symbol; -+ struct dynload_symbol *pSym = NULL; -+ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; -+ struct DBLL_LibraryObj *lib; -+ struct dynload_symbol *retVal; -+ -+ DBC_Require(this != NULL); -+ DBC_Require(name); -+ lib = pSymbol->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ /* Check to see if symbol is already defined in symbol table */ -+ if (!(lib->pTarget->attrs.baseImage)) { -+ bGblSearch = false; -+ pSym = findSymbol(this, name); -+ bGblSearch = true; -+ if (pSym) { -+ bRedefinedSymbol = true; -+ GT_1trace(DBLL_debugMask, GT_6CLASS, -+ "Symbol already defined in " -+ "symbol table: %s\n", name); -+ return NULL; -+ } -+ } -+ /* Allocate string to copy symbol name */ -+ symbol.name = (char *)MEM_Calloc(strlen((char *const)name) + 1, -+ MEM_PAGED); -+ if (symbol.name == NULL) -+ return NULL; -+ -+ if (symbol.name != NULL) { -+ /* Just copy name (value will be filled in by dynamic loader) */ -+ strncpy(symbol.name, (char *const)name, -+ strlen((char *const)name) + 1); -+ -+ /* Add symbol to symbol table */ -+ symPtr = (struct Symbol *)GH_insert(lib->symTab, (void *)name, -+ (void *)&symbol); -+ if (symPtr == NULL) -+ MEM_Free(symbol.name); -+ -+ } -+ if (symPtr != NULL) -+ retVal = (struct dynload_symbol *)&symPtr->value; -+ else -+ retVal = NULL; -+ -+ return retVal; -+} -+ -+/* -+ * ======== purgeSymbolTable ======== -+ */ -+static void purgeSymbolTable(struct Dynamic_Loader_Sym *this, unsigned moduleId) -+{ -+ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; -+ struct DBLL_LibraryObj *lib; -+ -+ DBC_Require(this != NULL); -+ lib = pSymbol->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ /* May not need to do anything */ -+} -+ -+/* -+ * ======== allocate ======== -+ */ -+static void *allocate(struct Dynamic_Loader_Sym *this, unsigned memsize) -+{ -+ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; -+ struct DBLL_LibraryObj *lib; -+ void *buf; -+ -+ DBC_Require(this != NULL); -+ lib = pSymbol->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ buf = MEM_Calloc(memsize, MEM_PAGED); -+ -+ return buf; -+} -+ -+/* -+ * ======== deallocate ======== -+ */ -+static void deallocate(struct Dynamic_Loader_Sym *this, void *memPtr) -+{ -+ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; -+ struct DBLL_LibraryObj *lib; -+ -+ DBC_Require(this != NULL); -+ lib = pSymbol->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ MEM_Free(memPtr); -+} -+ -+/* -+ * ======== errorReport ======== -+ */ -+static void errorReport(struct Dynamic_Loader_Sym *this, const char *errstr, -+ va_list args) -+{ -+ struct DBLLSymbol *pSymbol = (struct DBLLSymbol *)this; -+ struct DBLL_LibraryObj *lib; -+ char tempBuf[MAXEXPR]; -+ -+ DBC_Require(this != NULL); -+ lib = pSymbol->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ vsnprintf((char *)tempBuf, MAXEXPR, (char *)errstr, args); -+ GT_1trace(DBLL_debugMask, GT_5CLASS, "%s\n", tempBuf); -+} -+ -+/* Dynamic_Loader_Allocate */ -+ -+/* -+ * ======== rmmAlloc ======== -+ */ -+static int rmmAlloc(struct Dynamic_Loader_Allocate *this, -+ struct LDR_SECTION_INFO *info, unsigned align) -+{ -+ struct DBLLAlloc *pAlloc = (struct DBLLAlloc *)this; -+ struct DBLL_LibraryObj *lib; -+ DSP_STATUS status = DSP_SOK; -+ u32 memType; -+ struct RMM_Addr rmmAddr; -+ s32 retVal = TRUE; -+ unsigned stype = DLOAD_SECTION_TYPE(info->type); -+ char *pToken = NULL; -+ char *szSecLastToken = NULL; -+ char *szLastToken = NULL; -+ char *szSectName = NULL; -+ char *pszCur; -+ s32 tokenLen = 0; -+ s32 segId = -1; -+ s32 req = -1; -+ s32 count = 0; -+ u32 allocSize = 0; -+ -+ DBC_Require(this != NULL); -+ lib = pAlloc->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ memType = (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == DLOAD_BSS) ? -+ DBLL_BSS : DBLL_DATA; -+ -+ /* Attempt to extract the segment ID and requirement information from -+ the name of the section */ -+ DBC_Require(info->name); -+ tokenLen = strlen((char *)(info->name)) + 1; -+ -+ szSectName = MEM_Calloc(tokenLen, MEM_PAGED); -+ szLastToken = MEM_Calloc(tokenLen, MEM_PAGED); -+ szSecLastToken = MEM_Calloc(tokenLen, MEM_PAGED); -+ -+ if (szSectName == NULL || szSecLastToken == NULL || -+ szLastToken == NULL) { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ strncpy(szSectName, (char *)(info->name), tokenLen); -+ pszCur = szSectName; -+ while ((pToken = strsep(&pszCur, ":")) && *pToken != '\0') { -+ strncpy(szSecLastToken, szLastToken, strlen(szLastToken) + 1); -+ strncpy(szLastToken, pToken, strlen(pToken) + 1); -+ pToken = strsep(&pszCur, ":"); -+ count++; /* optimizes processing*/ -+ } -+ /* If pToken is 0 or 1, and szSecLastToken is DYN_DARAM or DYN_SARAM, -+ or DYN_EXTERNAL, then mem granularity information is present -+ within the section name - only process if there are at least three -+ tokens within the section name (just a minor optimization)*/ -+ if (count >= 3) -+ strict_strtol(szLastToken, 10, (long *)&req); -+ -+ if ((req == 0) || (req == 1)) { -+ if (strcmp(szSecLastToken, "DYN_DARAM") == 0) { -+ segId = 0; -+ } else { -+ if (strcmp(szSecLastToken, "DYN_SARAM") == 0) { -+ segId = 1; -+ } else { -+ if (strcmp(szSecLastToken, -+ "DYN_EXTERNAL") == 0) { -+ segId = 2; -+ } -+ } -+ } -+ if (segId != -1) { -+ GT_2trace(DBLL_debugMask, GT_5CLASS, -+ "Extracted values for memory" -+ " granularity req [%d] segId [%d]\n", -+ req, segId); -+ } -+ } -+ MEM_Free(szSectName); -+ szSectName = NULL; -+ MEM_Free(szLastToken); -+ szLastToken = NULL; -+ MEM_Free(szSecLastToken); -+ szSecLastToken = NULL; -+func_cont: -+ if (memType == DBLL_CODE) -+ allocSize = info->size + GEM_L1P_PREFETCH_SIZE; -+ else -+ allocSize = info->size; -+ /* TODO - ideally, we can pass the alignment requirement also -+ * from here */ -+ if (lib != NULL) { -+ status = (lib->pTarget->attrs.alloc)(lib->pTarget-> -+ attrs.rmmHandle, memType, allocSize, align, -+ (u32 *)&rmmAddr, segId, req, FALSE); -+ } -+ if (DSP_FAILED(status)) { -+ retVal = false; -+ } else { -+ /* RMM gives word address. Need to convert to byte address */ -+ info->load_addr = rmmAddr.addr * DSPWORDSIZE; -+ info->run_addr = info->load_addr; -+ info->context = (u32)rmmAddr.segid; -+ GT_3trace(DBLL_debugMask, GT_5CLASS, -+ "Remote alloc: %s base = 0x%lx len" -+ "= 0x%lx\n", info->name, info->load_addr / DSPWORDSIZE, -+ info->size / DSPWORDSIZE); -+ } -+ return retVal; -+} -+ -+/* -+ * ======== rmmDealloc ======== -+ */ -+static void rmmDealloc(struct Dynamic_Loader_Allocate *this, -+ struct LDR_SECTION_INFO *info) -+{ -+ struct DBLLAlloc *pAlloc = (struct DBLLAlloc *)this; -+ struct DBLL_LibraryObj *lib; -+ u32 segid; -+ DSP_STATUS status = DSP_SOK; -+ unsigned stype = DLOAD_SECTION_TYPE(info->type); -+ u32 memType; -+ u32 freeSize = 0; -+ -+ memType = (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == DLOAD_BSS) ? -+ DBLL_BSS : DBLL_DATA; -+ DBC_Require(this != NULL); -+ lib = pAlloc->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ /* segid was set by alloc function */ -+ segid = (u32)info->context; -+ if (memType == DBLL_CODE) -+ freeSize = info->size + GEM_L1P_PREFETCH_SIZE; -+ else -+ freeSize = info->size; -+ if (lib != NULL) { -+ status = (lib->pTarget->attrs.free)(lib->pTarget-> -+ attrs.symHandle, segid, info->load_addr / DSPWORDSIZE, -+ freeSize, false); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ GT_2trace(DBLL_debugMask, GT_5CLASS, -+ "Remote dealloc: base = 0x%lx len =" -+ "0x%lx\n", info->load_addr / DSPWORDSIZE, -+ freeSize / DSPWORDSIZE); -+ } -+} -+ -+/* Dynamic_Loader_Initialize */ -+/* -+ * ======== connect ======== -+ */ -+static int connect(struct Dynamic_Loader_Initialize *this) -+{ -+ return true; -+} -+ -+/* -+ * ======== readMem ======== -+ * This function does not need to be implemented. -+ */ -+static int readMem(struct Dynamic_Loader_Initialize *this, void *buf, -+ LDR_ADDR addr, struct LDR_SECTION_INFO *info, -+ unsigned nbytes) -+{ -+ struct DBLLInit *pInit = (struct DBLLInit *)this; -+ struct DBLL_LibraryObj *lib; -+ int bytesRead = 0; -+ -+ DBC_Require(this != NULL); -+ lib = pInit->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ /* Need WMD_BRD_Read function */ -+ return bytesRead; -+} -+ -+/* -+ * ======== writeMem ======== -+ */ -+static int writeMem(struct Dynamic_Loader_Initialize *this, void *buf, -+ LDR_ADDR addr, struct LDR_SECTION_INFO *info, -+ unsigned nBytes) -+{ -+ struct DBLLInit *pInit = (struct DBLLInit *)this; -+ struct DBLL_LibraryObj *lib; -+ struct DBLL_TarObj *pTarget; -+ struct DBLL_SectInfo sectInfo; -+ u32 memType; -+ bool retVal = true; -+ -+ DBC_Require(this != NULL); -+ lib = pInit->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ -+ memType = (DLOAD_SECTION_TYPE(info->type) == DLOAD_TEXT) ? DBLL_CODE : -+ DBLL_DATA; -+ if ((lib != NULL) && -+ ((pTarget = lib->pTarget) != NULL) && -+ (pTarget->attrs.write != NULL)) { -+ retVal = (*pTarget->attrs.write)(pTarget->attrs.wHandle, -+ addr, buf, nBytes, memType); -+ -+ if (pTarget->attrs.logWrite) { -+ sectInfo.name = info->name; -+ sectInfo.runAddr = info->run_addr; -+ sectInfo.loadAddr = info->load_addr; -+ sectInfo.size = info->size; -+ sectInfo.type = memType; -+ /* Pass the information about what we've written to -+ * another module */ -+ (*pTarget->attrs.logWrite)( -+ pTarget->attrs.logWriteHandle, -+ §Info, addr, nBytes); -+ } -+ } -+ return retVal; -+} -+ -+/* -+ * ======== fillMem ======== -+ * Fill nBytes of memory at a given address with a given value by -+ * writing from a buffer containing the given value. Write in -+ * sets of MAXEXPR (128) bytes to avoid large stack buffer issues. -+ */ -+static int fillMem(struct Dynamic_Loader_Initialize *this, LDR_ADDR addr, -+ struct LDR_SECTION_INFO *info, unsigned nBytes, -+ unsigned val) -+{ -+ bool retVal = true; -+ char *pBuf; -+ struct DBLL_LibraryObj *lib; -+ struct DBLLInit *pInit = (struct DBLLInit *)this; -+ -+ DBC_Require(this != NULL); -+ lib = pInit->lib; -+ pBuf = NULL; -+ /* Pass the NULL pointer to writeMem to get the start address of Shared -+ memory. This is a trick to just get the start address, there is no -+ writing taking place with this Writemem -+ */ -+ if ((lib->pTarget->attrs.write) != (DBLL_WriteFxn)NoOp) -+ writeMem(this, &pBuf, addr, info, 0); -+ if (pBuf) -+ memset(pBuf, val, nBytes); -+ -+ return retVal; -+} -+ -+/* -+ * ======== execute ======== -+ */ -+static int execute(struct Dynamic_Loader_Initialize *this, LDR_ADDR start) -+{ -+ struct DBLLInit *pInit = (struct DBLLInit *)this; -+ struct DBLL_LibraryObj *lib; -+ bool retVal = true; -+ -+ DBC_Require(this != NULL); -+ lib = pInit->lib; -+ DBC_Require(MEM_IsValidHandle(lib, DBLL_LIBSIGNATURE)); -+ /* Save entry point */ -+ if (lib != NULL) -+ lib->entry = (u32)start; -+ -+ return retVal; -+} -+ -+/* -+ * ======== release ======== -+ */ -+static void release(struct Dynamic_Loader_Initialize *this) -+{ -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dev.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dev.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dev.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dev.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,1476 @@ -+/* -+ * dev.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== dev.c ======== -+ * Description: -+ * Implementation of 'Bridge Mini-driver device operations. -+ * -+ * Public Functions: -+ * DEV_BrdWriteFxn -+ * DEV_CreateDevice -+ * DEV_Create2 -+ * DEV_Destroy2 -+ * DEV_DestroyDevice -+ * DEV_GetChnlMgr -+ * DEV_GetCmmMgr -+ * DEV_GetCodMgr -+ * DEV_GetDehMgr -+ * DEV_GetDevNode -+ * DEV_GetDSPWordSize -+ * DEV_GetFirst -+ * DEV_GetIntfFxns -+ * DEV_GetIOMgr -+ * DEV_GetNext -+ * DEV_GetNodeManager -+ * DEV_GetSymbol -+ * DEV_GetWMDContext -+ * DEV_Exit -+ * DEV_Init -+ * DEV_InsertProcObject -+ * DEV_IsLocked -+ * DEV_NotifyClient -+ * DEV_RegisterNotify -+ * DEV_ReleaseCodMgr -+ * DEV_RemoveDevice -+ * DEV_RemoveProcObject -+ * DEV_SetChnlMgr -+ * DEV_SetMsgMgr -+ * DEV_SetLockOwner -+ * DEV_StartDevice -+ * -+ * Private Functions: -+ * FxnNotImplemented -+ * InitCodMgr -+ * InsertDevObject -+ * IsValidHandle -+ * RemoveDevObject -+ * StoreInterfaceFxns -+ * -+ *! Revision History: -+ *! ================ -+ *! 03-Jan-2005 hn Support for IVA DEH -+ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping feature -+ *! 09-Feb-2004 vp Updated to support IVA. -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 29-Nov-2001 jeh Check for DSP_ENOTIMPL status of DEH create function. -+ *! 05-Nov-2001 kc Added support for DEH module. -+ *! 05-Aug-2001 ag Shared memory registration moved to WMD_IO_OnLoaded(). -+ *! 11-Jul-2001 jeh Moved MGR_Create() from DSP_Init() to DEV_StartDevice(). -+ *! 11-Apr-2001 rr: Removed CMM_RegisterGPPSMSeg. -+ *! 02-Apr-2001 rr: CHNL_Create failure is printed out. -+ *! 15-Jan-2001 jeh Removed call to IO_OnLoaded() from DEV_Create2(). -+ *! 13-Feb-2001 kc: DSP/BIOS Bridge name update. -+ *! 15-Dec-2000 rr: Dev_Create2 returns error if NODE_CreateMgr fails. -+ *! 05-Dec-2000 jeh Moved IO_OnLoaded() to PROC_Load. Added DEV_SetMsgMgr. -+ *! 05-Dev-2000 ag SM Heap for messaging registered via CMM_RegisterGPPSMSeg(). -+ *! SM heap base and size currently taken from registry. -+ *! 29-Nov-2000 rr: Incorporated code review changes. -+ *! 17-Nov-2000 jeh Added calls to get IO manager (IO_Create), IO_OnLoaded(). -+ *! 06-Oct-2000 rr: DEV_Destroy2 and DEV_Create2 added. -+ *! 02-Oct-2000 rr: DEV_GetNodeManager added. -+ *! 11-Aug-2000 ag: Added DEV_GetCmmMgr(), CMM_Init() & CMM_Exit(). -+ *! Removed & , added -+ *! 10-Aug-2000 rr: DEV_InsertProcObject/RemoveProcObject added. -+ *! DEV_Cleanup calls PROC_Detach if it is a matching process. -+ *! 27-Jul-2000 rr: DEV is in new directoy DEV and produces devlib.lib -+ *! 17-Jul-2000 rr: DRV Object holds the list of Dev Objects. DEV gets -+ *! the List and Next devices through DRV. -+ *! DEV object has a back pointer to DRV Object. -+ *! 06-Jun-2000 jeh Added DEV_GetSymbol(). -+ *! 09-May-2000 rr: dwMemBase has index for multiple windows need. -+ *! 28-Feb-2000 rr: New GT Usage implemented. -+ *! 03-Feb-2000 rr: GT and Module init/exit Changes.(Done up front from -+ *! SERVICES) -+ *! 31-Jan-2000 rr: Comments changed after code review. -+ *! 21-Jan-2000 rr: windows.h, tchar.h, HMODULE removed. FreeLibrary replaced -+ *! with LDR_FreeModule -+ *! 17-Jan-2000 rr: CFG_Get/SetPrivateDword renamed to CFG_Get/SetDevObject. -+ *! StoreInterfaceFxns stores the new fxn WMD_BRD_SETSTATE. -+ *! 20-Nov-1999 ag: Actual uSMLength = total - monitor offset. -+ *! 12-Nov-1999 rr: bIRQ and IRQAttrib taken from the struct CFG_HOSTRES. -+ *! dMemBase is added with offset for monitor taken from -+ *! registry. -+ *! 31-Oct-1999 ag: Added CHNL support. -+ *! 10-Sep-1999 rr: GT Enabled. DEV_Create will Load the Mini Driver and will -+ *! find its fxn table. Right now lot of things are hardcoded -+ *! as the REG is not ready. -+ *! 10-Jun-1996 rr: Created from WSX -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+#include /* WCD version info. */ -+ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+ -+#define SIGNATURE 0x5f564544 /* "DEV_" (in reverse) */ -+#define MAKEVERSION(major, minor) (major * 10 + minor) -+#define WCDVERSION MAKEVERSION(WCD_MAJOR_VERSION, WCD_MINOR_VERSION) -+ -+/* The WMD device object: */ -+struct DEV_OBJECT { -+ /* LST requires "link" to be first field! */ -+ struct LST_ELEM link; /* Link to next DEV_OBJECT. */ -+ u32 devType; /* Device Type */ -+ u32 dwSignature; /* Used for object validation. */ -+ struct CFG_DEVNODE *hDevNode; /* Platform specific device id */ -+ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD Context Handle */ -+ struct WMD_DRV_INTERFACE intfFxns; /* Function interface to WMD. */ -+ struct BRD_OBJECT *lockOwner; /* Client with exclusive access. */ -+ struct COD_MANAGER *hCodMgr; /* Code manager handle. */ -+ struct CHNL_MGR *hChnlMgr; /* Channel manager. */ -+ struct DEH_MGR *hDehMgr; /* DEH manager. */ -+ struct MSG_MGR *hMsgMgr; /* Message manager. */ -+ struct IO_MGR *hIOMgr; /* IO manager (CHNL, MSG) */ -+ struct CMM_OBJECT *hCmmMgr; /* SM memory manager. */ -+ struct DMM_OBJECT *hDmmMgr; /* Dynamic memory manager. */ -+ struct LDR_MODULE *hModule; /* WMD Module handle. */ -+ u32 uWordSize; /* DSP word size: quick access. */ -+ struct DRV_OBJECT *hDrvObject; /* Driver Object */ -+ struct LST_LIST *procList; /* List of Proceeosr attached to -+ * this device */ -+ struct NODE_MGR *hNodeMgr; -+} ; -+ -+/* ----------------------------------- Globals */ -+static u32 cRefs; /* Module reference count */ -+#if GT_TRACE -+static struct GT_Mask debugMask = { NULL, NULL }; /* For debugging */ -+#endif -+ -+/* ----------------------------------- Function Prototypes */ -+static DSP_STATUS FxnNotImplemented(int arg, ...); -+static DSP_STATUS InitCodMgr(struct DEV_OBJECT *pDevObject); -+static bool IsValidHandle(struct DEV_OBJECT *hObj); -+static void StoreInterfaceFxns(struct WMD_DRV_INTERFACE *pDrvFxns, -+ OUT struct WMD_DRV_INTERFACE *pIntfFxns); -+/* -+ * ======== DEV_BrdWriteFxn ======== -+ * Purpose: -+ * Exported function to be used as the COD write function. This function -+ * is passed a handle to a DEV_hObject, then calls the -+ * device's WMD_BRD_Write() function. -+ */ -+u32 DEV_BrdWriteFxn(void *pArb, u32 ulDspAddr, void *pHostBuf, -+ u32 ulNumBytes, u32 nMemSpace) -+{ -+ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)pArb; -+ u32 ulWritten = 0; -+ DSP_STATUS status; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pHostBuf != NULL); /* Required of BrdWrite(). */ -+ GT_5trace(debugMask, GT_ENTER, -+ "Entered DEV_BrdWriteFxn, pArb: 0x%x\n\t\t" -+ "ulDspAddr: 0x%x\n\t\tpHostBuf: 0x%x\n \t\tulNumBytes: 0x%x\n" -+ "\t\tnMemSpace: 0x%x\n", pArb, ulDspAddr, pHostBuf, -+ ulNumBytes, nMemSpace); -+ if (IsValidHandle(pDevObject)) { -+ /* Require of BrdWrite() */ -+ DBC_Assert(pDevObject->hWmdContext != NULL); -+ status = (*pDevObject->intfFxns.pfnBrdWrite)(pDevObject-> -+ hWmdContext, pHostBuf, ulDspAddr, ulNumBytes, -+ nMemSpace); -+ /* Special case of getting the address only */ -+ if (ulNumBytes == 0) -+ ulNumBytes = 1; -+ if (DSP_SUCCEEDED(status)) -+ ulWritten = ulNumBytes; -+ -+ } -+ GT_1trace(debugMask, GT_ENTER, "Exit DEV_BrdWriteFxn ulWritten: 0x%x\n", -+ ulWritten); -+ return ulWritten; -+} -+ -+/* -+ * ======== DEV_CreateDevice ======== -+ * Purpose: -+ * Called by the operating system to load the PM Mini Driver for a -+ * PM board (device). -+ */ -+DSP_STATUS DEV_CreateDevice(OUT struct DEV_OBJECT **phDevObject, -+ IN CONST char *pstrWMDFileName, -+ IN CONST struct CFG_HOSTRES *pHostConfig, -+ IN CONST struct CFG_DSPRES *pDspConfig, -+ struct CFG_DEVNODE *hDevNode) -+{ -+ struct LDR_MODULE *hModule = NULL; -+ struct WMD_DRV_INTERFACE *pDrvFxns = NULL; -+ struct DEV_OBJECT *pDevObject = NULL; -+ struct CHNL_MGRATTRS mgrAttrs; -+ struct IO_ATTRS ioMgrAttrs; -+ u32 uNumWindows; -+ struct DRV_OBJECT *hDrvObject = NULL; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDevObject != NULL); -+ DBC_Require(pstrWMDFileName != NULL); -+ DBC_Require(pHostConfig != NULL); -+ DBC_Require(pDspConfig != NULL); -+ -+ GT_5trace(debugMask, GT_ENTER, -+ "Entered DEV_CreateDevice, phDevObject: 0x%x\n" -+ "\t\tpstrWMDFileName: 0x%x\n\t\tpHostConfig:0x%x\n\t\t" -+ "pDspConfig: 0x%x\n\t\tnhDevNode: 0x%x\n", phDevObject, -+ pstrWMDFileName, pHostConfig, pDspConfig, hDevNode); -+ /* Get the WMD interface functions*/ -+ WMD_DRV_Entry(&pDrvFxns, pstrWMDFileName); -+ if (DSP_FAILED(CFG_GetObject((u32 *) &hDrvObject, REG_DRV_OBJECT))) { -+ /* don't propogate CFG errors from this PROC function */ -+ GT_0trace(debugMask, GT_7CLASS, -+ "Failed to get the DRV Object \n"); -+ status = DSP_EFAIL; -+ } -+ /* Create the device object, and pass a handle to the WMD for -+ * storage. */ -+ if (DSP_SUCCEEDED(status)) { -+ DBC_Assert(pDrvFxns); -+ MEM_AllocObject(pDevObject, struct DEV_OBJECT, SIGNATURE); -+ if (pDevObject) { -+ /* Fill out the rest of the Dev Object structure: */ -+ pDevObject->hDevNode = hDevNode; -+ pDevObject->hModule = hModule; -+ pDevObject->hCodMgr = NULL; -+ pDevObject->hChnlMgr = NULL; -+ pDevObject->hDehMgr = NULL; -+ pDevObject->lockOwner = NULL; -+ pDevObject->uWordSize = pDspConfig->uWordSize; -+ pDevObject->hDrvObject = hDrvObject; -+ pDevObject->devType = DSP_UNIT; -+ /* Store this WMD's interface functions, based on its -+ * version. */ -+ StoreInterfaceFxns(pDrvFxns, &pDevObject->intfFxns); -+ /* Call WMD_DEV_CREATE() to get the WMD's device -+ * context handle. */ -+ status = (pDevObject->intfFxns.pfnDevCreate) -+ (&pDevObject->hWmdContext, pDevObject, -+ pHostConfig, pDspConfig); -+ /* Assert WMD_DEV_Create()'s ensure clause: */ -+ DBC_Assert(DSP_FAILED(status) || (pDevObject-> -+ hWmdContext != NULL)); -+ } else { -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_Create: Out Of Memory"); -+ status = DSP_EMEMORY; -+ } -+ } -+ /* Attempt to create the COD manager for this device: */ -+ if (DSP_SUCCEEDED(status)) -+ status = InitCodMgr(pDevObject); -+ -+ /* Attempt to create the channel manager for this device: */ -+ if (DSP_SUCCEEDED(status)) { -+ mgrAttrs.cChannels = CHNL_MAXCHANNELS; -+ ioMgrAttrs.bIRQ = pHostConfig->bIRQRegisters; -+ ioMgrAttrs.fShared = (pHostConfig->bIRQAttrib & CFG_IRQSHARED); -+ ioMgrAttrs.uWordSize = pDspConfig->uWordSize; -+ mgrAttrs.uWordSize = pDspConfig->uWordSize; -+ uNumWindows = pHostConfig->wNumMemWindows; -+ if (uNumWindows) { -+ /* Assume last memory window is for CHNL */ -+ ioMgrAttrs.dwSMBase = pHostConfig->dwMemBase[1] + -+ pHostConfig->dwOffsetForMonitor; -+ ioMgrAttrs.uSMLength = pHostConfig->dwMemLength[1] - -+ pHostConfig->dwOffsetForMonitor; -+ } else { -+ ioMgrAttrs.dwSMBase = 0; -+ ioMgrAttrs.uSMLength = 0; -+ GT_0trace(debugMask, GT_7CLASS, -+ "**There is no memory reserved for " -+ "shared structures**\n"); -+ } -+ status = CHNL_Create(&pDevObject->hChnlMgr, pDevObject, -+ &mgrAttrs); -+ if (status == DSP_ENOTIMPL) { -+ /* It's OK for a device not to have a channel -+ * manager: */ -+ status = DSP_SOK; -+ } -+ /* Create CMM mgr even if Msg Mgr not impl. */ -+ status = CMM_Create(&pDevObject->hCmmMgr, -+ (struct DEV_OBJECT *)pDevObject, NULL); -+ if (DSP_FAILED(status)) { -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_Create: Failed to Create SM " -+ "Manager\n"); -+ } -+ /* Only create IO manager if we have a channel manager */ -+ if (DSP_SUCCEEDED(status) && pDevObject->hChnlMgr) { -+ status = IO_Create(&pDevObject->hIOMgr, pDevObject, -+ &ioMgrAttrs); -+ } -+ /* Only create DEH manager if we have an IO manager */ -+ if (DSP_SUCCEEDED(status)) { -+ /* Instantiate the DEH module */ -+ status = (*pDevObject->intfFxns.pfnDehCreate) -+ (&pDevObject->hDehMgr, pDevObject); -+ } -+ /* Create DMM mgr . */ -+ status = DMM_Create(&pDevObject->hDmmMgr, -+ (struct DEV_OBJECT *)pDevObject, NULL); -+ if (DSP_FAILED(status)) { -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_Create: Failed to Create DMM " -+ "Manager\n"); -+ } -+ } -+ /* Add the new DEV_Object to the global list: */ -+ if (DSP_SUCCEEDED(status)) { -+ LST_InitElem(&pDevObject->link); -+ status = DRV_InsertDevObject(hDrvObject, pDevObject); -+ } -+ /* Create the Processor List */ -+ if (DSP_SUCCEEDED(status)) { -+ pDevObject->procList = LST_Create(); -+ if (!(pDevObject->procList)) { -+ status = DSP_EFAIL; -+ GT_0trace(debugMask, GT_7CLASS, "DEV_Create: " -+ "Failed to Create Proc List"); -+ } -+ } -+ /* If all went well, return a handle to the dev object; -+ * else, cleanup and return NULL in the OUT parameter. */ -+ if (DSP_SUCCEEDED(status)) { -+ *phDevObject = pDevObject; -+ GT_1trace(debugMask, GT_1CLASS, -+ "DEV_CreateDevice Succeeded \nDevObject " -+ "0x%x\n", pDevObject); -+ } else { -+ if (pDevObject && pDevObject->procList) -+ LST_Delete(pDevObject->procList); -+ -+ if (pDevObject && pDevObject->hCodMgr) -+ COD_Delete(pDevObject->hCodMgr); -+ -+ if (pDevObject && pDevObject->hDmmMgr) -+ DMM_Destroy(pDevObject->hDmmMgr); -+ -+ if (pDevObject) -+ MEM_FreeObject(pDevObject); -+ -+ *phDevObject = NULL; -+ GT_0trace(debugMask, GT_7CLASS, "DEV_CreateDevice Failed\n"); -+ } -+ GT_1trace(debugMask, GT_1CLASS, "Exiting DEV_Create: DevObject 0x%x\n", -+ *phDevObject); -+ DBC_Ensure((DSP_SUCCEEDED(status) && IsValidHandle(*phDevObject)) || -+ (DSP_FAILED(status) && !*phDevObject)); -+ return status; -+} -+ -+/* -+ * ======== DEV_Create2 ======== -+ * Purpose: -+ * After successful loading of the image from WCD_InitComplete2 -+ * (PROC Auto_Start) or PROC_Load this fxn is called. This creates -+ * the Node Manager and updates the DEV Object. -+ */ -+DSP_STATUS DEV_Create2(struct DEV_OBJECT *hDevObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValidHandle(hDevObject)); -+ -+ GT_1trace(debugMask, GT_ENTER, -+ "Entered DEV_Create2, hDevObject: 0x%x\n", hDevObject); -+ /* There can be only one Node Manager per DEV object */ -+ DBC_Assert(!pDevObject->hNodeMgr); -+ status = NODE_CreateMgr(&pDevObject->hNodeMgr, hDevObject); -+ if (DSP_FAILED(status)) { -+ GT_1trace(debugMask, GT_7CLASS, -+ "DEV_Create2: NODE_CreateMgr failed, " -+ "0x%x!\n", status); -+ pDevObject->hNodeMgr = NULL; -+ GT_0trace(debugMask, GT_7CLASS, "DEV_Create2: Failed!!\n"); -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && pDevObject->hNodeMgr != NULL) -+ || (DSP_FAILED(status) && pDevObject->hNodeMgr == NULL)); -+ GT_2trace(debugMask, GT_ENTER, -+ "Exiting DEV_Create2, hNodeMgr: 0x%x, status:" -+ " 0x%x\n", pDevObject->hNodeMgr, status); -+ return status; -+} -+ -+/* -+ * ======== DEV_Destroy2 ======== -+ * Purpose: -+ * Destroys the Node manager for this device. -+ */ -+DSP_STATUS DEV_Destroy2(struct DEV_OBJECT *hDevObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValidHandle(hDevObject)); -+ -+ GT_1trace(debugMask, GT_ENTER, -+ "Entered DEV_Destroy2, hDevObject: 0x%x\n", -+ hDevObject); -+ if (pDevObject->hNodeMgr) { -+ if (DSP_FAILED(NODE_DeleteMgr(pDevObject->hNodeMgr))) -+ status = DSP_EFAIL; -+ else -+ pDevObject->hNodeMgr = NULL; -+ -+ } -+ if (DSP_FAILED(status)) -+ GT_0trace(debugMask, GT_7CLASS, "DEV_Destroy2 failed!!\n"); -+ -+ DBC_Ensure((DSP_SUCCEEDED(status) && pDevObject->hNodeMgr == NULL) || -+ DSP_FAILED(status)); -+ GT_2trace(debugMask, GT_ENTER, -+ "Exiting DEV_Destroy2, hNodeMgr: 0x%x, status" -+ " = 0x%x\n", pDevObject->hNodeMgr, status); -+ return status; -+} -+ -+/* -+ * ======== DEV_DestroyDevice ======== -+ * Purpose: -+ * Destroys the channel manager for this device, if any, calls -+ * WMD_DEV_Destroy(), and then attempts to unload the WMD module. -+ */ -+DSP_STATUS DEV_DestroyDevice(struct DEV_OBJECT *hDevObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(debugMask, GT_ENTER, "Entered DEV_DestroyDevice, hDevObject: " -+ "0x%x\n", hDevObject); -+ if (IsValidHandle(hDevObject)) { -+ if (pDevObject->hCodMgr) -+ COD_Delete(pDevObject->hCodMgr); -+ -+ if (pDevObject->hNodeMgr) -+ NODE_DeleteMgr(pDevObject->hNodeMgr); -+ -+ /* Free the io, channel, and message managers for this board: */ -+ if (pDevObject->hIOMgr) { -+ IO_Destroy(pDevObject->hIOMgr); -+ pDevObject->hIOMgr = NULL; -+ } -+ if (pDevObject->hChnlMgr) { -+ CHNL_Destroy(pDevObject->hChnlMgr); -+ pDevObject->hChnlMgr = NULL; -+ } -+ if (pDevObject->hMsgMgr) -+ MSG_Delete(pDevObject->hMsgMgr); -+ -+ if (pDevObject->hDehMgr) { -+ /* Uninitialize DEH module. */ -+ (*pDevObject->intfFxns.pfnDehDestroy) -+ (pDevObject->hDehMgr); -+ } -+ if (pDevObject->hCmmMgr) -+ CMM_Destroy(pDevObject->hCmmMgr, true); -+ -+ if (pDevObject->hDmmMgr) -+ DMM_Destroy(pDevObject->hDmmMgr); -+ -+ /* Call the driver's WMD_DEV_Destroy() function: */ -+ /* Require of DevDestroy */ -+ DBC_Assert(pDevObject->hWmdContext != NULL); -+ status = (*pDevObject->intfFxns.pfnDevDestroy) -+ (pDevObject->hWmdContext); -+ if (DSP_SUCCEEDED(status)) { -+ if (pDevObject->procList) -+ LST_Delete(pDevObject->procList); -+ -+ /* Remove this DEV_Object from the global list: */ -+ DRV_RemoveDevObject(pDevObject->hDrvObject, pDevObject); -+ /* Free The library * LDR_FreeModule -+ * (pDevObject->hModule);*/ -+ /* Free this dev object: */ -+ MEM_FreeObject(pDevObject); -+ } -+ } else { -+ GT_0trace(debugMask, GT_7CLASS, "DEV_Destroy: Invlaid handle"); -+ status = DSP_EHANDLE; -+ } -+ GT_1trace(debugMask, GT_ENTER, "Exit DEV_destroy: status 0x%x\n", -+ status); -+ return status; -+} -+ -+/* -+ * ======== DEV_GetChnlMgr ======== -+ * Purpose: -+ * Retrieve the handle to the channel manager handle created for this -+ * device. -+ */ -+DSP_STATUS DEV_GetChnlMgr(struct DEV_OBJECT *hDevObject, -+ OUT struct CHNL_MGR **phMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phMgr != NULL); -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_GetChnlMgr, hDevObject: 0x%x\n\t" -+ "\tphMgr: 0x%x\n", hDevObject, phMgr); -+ if (IsValidHandle(hDevObject)) { -+ *phMgr = pDevObject->hChnlMgr; -+ } else { -+ *phMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetChnlMgr: Invalid handle"); -+ } -+ GT_2trace(debugMask, GT_ENTER, -+ "Exit DEV_GetChnlMgr: status 0x%x\t\n hMgr: " -+ "0x%x\n", status, *phMgr); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phMgr != NULL) && -+ (*phMgr == NULL))); -+ return status; -+} -+ -+/* -+ * ======== DEV_GetCmmMgr ======== -+ * Purpose: -+ * Retrieve the handle to the shared memory manager created for this -+ * device. -+ */ -+DSP_STATUS DEV_GetCmmMgr(struct DEV_OBJECT *hDevObject, -+ OUT struct CMM_OBJECT **phMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phMgr != NULL); -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_GetCmmMgr, hDevObject: 0x%x\n\t" -+ "\tphMgr: 0x%x\n", hDevObject, phMgr); -+ if (IsValidHandle(hDevObject)) { -+ *phMgr = pDevObject->hCmmMgr; -+ } else { -+ *phMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetCmmMgr: Invalid handle"); -+ } -+ GT_2trace(debugMask, GT_ENTER, -+ "Exit DEV_GetCmmMgr: status 0x%x\t\nhMgr: " -+ "0x%x\n", status, *phMgr); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phMgr != NULL) && -+ (*phMgr == NULL))); -+ return status; -+} -+ -+/* -+ * ======== DEV_GetDmmMgr ======== -+ * Purpose: -+ * Retrieve the handle to the dynamic memory manager created for this -+ * device. -+ */ -+DSP_STATUS DEV_GetDmmMgr(struct DEV_OBJECT *hDevObject, -+ OUT struct DMM_OBJECT **phMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phMgr != NULL); -+ -+ GT_2trace(debugMask, GT_ENTER, "Entered DEV_GetDmmMgr, hDevObject: " -+ "0x%x\n\t\tphMgr: 0x%x\n", hDevObject, phMgr); -+ if (IsValidHandle(hDevObject)) { -+ *phMgr = pDevObject->hDmmMgr; -+ } else { -+ *phMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetDmmMgr: Invalid handle"); -+ } -+ GT_2trace(debugMask, GT_ENTER, -+ "Exit DEV_GetDmmMgr: status 0x%x\t\n hMgr: " -+ "0x%x\n", status, *phMgr); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phMgr != NULL) && -+ (*phMgr == NULL))); -+ return status; -+} -+ -+/* -+ * ======== DEV_GetCodMgr ======== -+ * Purpose: -+ * Retrieve the COD manager create for this device. -+ */ -+DSP_STATUS DEV_GetCodMgr(struct DEV_OBJECT *hDevObject, -+ OUT struct COD_MANAGER **phCodMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phCodMgr != NULL); -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_GetCodMgr, hDevObject: 0x%x\n\t\t" -+ "phCodMgr: 0x%x\n", hDevObject, phCodMgr); -+ if (IsValidHandle(hDevObject)) { -+ *phCodMgr = pDevObject->hCodMgr; -+ } else { -+ *phCodMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_1trace(debugMask, GT_7CLASS, -+ "DEV_GetCodMgr, invalid handle: 0x%x\n", -+ hDevObject); -+ } -+ GT_2trace(debugMask, GT_ENTER, -+ "Exit DEV_GetCodMgr: status 0x%x\t\n hCodMgr:" -+ " 0x%x\n", status, *phCodMgr); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phCodMgr != NULL) && -+ (*phCodMgr == NULL))); -+ return status; -+} -+ -+/* -+ * ========= DEV_GetDehMgr ======== -+ */ -+DSP_STATUS DEV_GetDehMgr(struct DEV_OBJECT *hDevObject, -+ OUT struct DEH_MGR **phDehMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDehMgr != NULL); -+ DBC_Require(MEM_IsValidHandle(hDevObject, SIGNATURE)); -+ if (IsValidHandle(hDevObject)) { -+ *phDehMgr = hDevObject->hDehMgr; -+ } else { -+ *phDehMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetDehMgr: Invalid handle"); -+ } -+ return status; -+} -+ -+/* -+ * ======== DEV_GetDevNode ======== -+ * Purpose: -+ * Retrieve the platform specific device ID for this device. -+ */ -+DSP_STATUS DEV_GetDevNode(struct DEV_OBJECT *hDevObject, -+ OUT struct CFG_DEVNODE **phDevNode) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDevNode != NULL); -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_GetDevNode, hDevObject: 0x%x\n\t" -+ "\tphDevNode: 0x%x\n", hDevObject, phDevNode); -+ if (IsValidHandle(hDevObject)) { -+ *phDevNode = pDevObject->hDevNode; -+ } else { -+ *phDevNode = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetDevNode: Invalid handle"); -+ } -+ GT_2trace(debugMask, GT_ENTER, -+ "Exit DEV_GetDevNode: status 0x%x\t\nhDevNode:" -+ "0x%x\n", status, *phDevNode); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phDevNode != NULL) && -+ (*phDevNode == NULL))); -+ return status; -+} -+ -+/* -+ * ======== DEV_GetFirst ======== -+ * Purpose: -+ * Retrieve the first Device Object handle from an internal linked list -+ * DEV_OBJECTs maintained by DEV. -+ */ -+struct DEV_OBJECT *DEV_GetFirst(void) -+{ -+ struct DEV_OBJECT *pDevObject = NULL; -+ -+ pDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); -+ -+ DBC_Ensure((pDevObject == NULL) || IsValidHandle(pDevObject)); -+ -+ return pDevObject; -+} -+ -+/* -+ * ======== DEV_GetIntfFxns ======== -+ * Purpose: -+ * Retrieve the WMD interface function structure for the loaded WMD. -+ * ppIntfFxns != NULL. -+ */ -+DSP_STATUS DEV_GetIntfFxns(struct DEV_OBJECT *hDevObject, -+ OUT struct WMD_DRV_INTERFACE **ppIntfFxns) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(ppIntfFxns != NULL); -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_GetIntfFxns, hDevObject: 0x%x\n\t" -+ "\tppIntfFxns: 0x%x\n", hDevObject, ppIntfFxns); -+ if (IsValidHandle(hDevObject)) { -+ *ppIntfFxns = &pDevObject->intfFxns; -+ } else { -+ *ppIntfFxns = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetIntDxns: Invalid handle"); -+ } -+ GT_2trace(debugMask, GT_ENTER, "Exit DEV_GetIntFxns: status 0x%x\t\n" -+ "ppIntFxns: 0x%x\n", status, *ppIntfFxns); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((ppIntfFxns != NULL) && -+ (*ppIntfFxns == NULL))); -+ return status; -+} -+ -+/* -+ * ========= DEV_GetIOMgr ======== -+ */ -+DSP_STATUS DEV_GetIOMgr(struct DEV_OBJECT *hDevObject, -+ OUT struct IO_MGR **phIOMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phIOMgr != NULL); -+ DBC_Require(MEM_IsValidHandle(hDevObject, SIGNATURE)); -+ -+ if (IsValidHandle(hDevObject)) { -+ *phIOMgr = hDevObject->hIOMgr; -+ } else { -+ *phIOMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, "DEV_GetIOMgr: Invalid handle"); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DEV_GetNext ======== -+ * Purpose: -+ * Retrieve the next Device Object handle from an internal linked list -+ * of DEV_OBJECTs maintained by DEV, after having previously called -+ * DEV_GetFirst() and zero or more DEV_GetNext -+ */ -+struct DEV_OBJECT *DEV_GetNext(struct DEV_OBJECT *hDevObject) -+{ -+ struct DEV_OBJECT *pNextDevObject = NULL; -+ -+ if (IsValidHandle(hDevObject)) { -+ pNextDevObject = (struct DEV_OBJECT *) -+ DRV_GetNextDevObject((u32)hDevObject); -+ } -+ DBC_Ensure((pNextDevObject == NULL) || IsValidHandle(pNextDevObject)); -+ return pNextDevObject; -+} -+ -+/* -+ * ========= DEV_GetMsgMgr ======== -+ */ -+void DEV_GetMsgMgr(struct DEV_OBJECT *hDevObject, -+ OUT struct MSG_MGR **phMsgMgr) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phMsgMgr != NULL); -+ DBC_Require(MEM_IsValidHandle(hDevObject, SIGNATURE)); -+ -+ *phMsgMgr = hDevObject->hMsgMgr; -+} -+ -+/* -+ * ======== DEV_GetNodeManager ======== -+ * Purpose: -+ * Retrieve the Node Manager Handle -+ */ -+DSP_STATUS DEV_GetNodeManager(struct DEV_OBJECT *hDevObject, -+ OUT struct NODE_MGR **phNodeMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phNodeMgr != NULL); -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_GetNodeManager, hDevObject: 0x%x" -+ "\n\t\tphNodeMgr: 0x%x\n", hDevObject, phNodeMgr); -+ if (IsValidHandle(hDevObject)) { -+ *phNodeMgr = pDevObject->hNodeMgr; -+ } else { -+ *phNodeMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_1trace(debugMask, GT_7CLASS, -+ "DEV_GetNodeManager, invalid handle: 0x" -+ "%x\n", hDevObject); -+ } -+ GT_2trace(debugMask, GT_ENTER, -+ "Exit DEV_GetNodeManager: status 0x%x\t\nhMgr:" -+ " 0x%x\n", status, *phNodeMgr); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phNodeMgr != NULL) && -+ (*phNodeMgr == NULL))); -+ return status; -+} -+ -+/* -+ * ======== DEV_GetSymbol ======== -+ */ -+DSP_STATUS DEV_GetSymbol(struct DEV_OBJECT *hDevObject, -+ IN CONST char *pstrSym, OUT u32 *pulValue) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct COD_MANAGER *hCodMgr; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pstrSym != NULL && pulValue != NULL); -+ -+ GT_3trace(debugMask, GT_ENTER, -+ "Entered DEV_GetSymbol, hDevObject: 0x%x\n\t\t" -+ "pstrSym: 0x%x\n\t\tpulValue: 0x%x\n", hDevObject, pstrSym, -+ pulValue); -+ if (IsValidHandle(hDevObject)) { -+ status = DEV_GetCodMgr(hDevObject, &hCodMgr); -+ if (DSP_SUCCEEDED(status)) { -+ DBC_Assert(hCodMgr != NULL); -+ status = COD_GetSymValue(hCodMgr, (char *)pstrSym, -+ pulValue); -+ } -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetSymbol: Invalid handle"); -+ } -+ GT_2trace(debugMask, GT_ENTER, "Exit DEV_GetSymbol: status 0x%x\t\n" -+ "hWmdContext: 0x%x\n", status, *pulValue); -+ return status; -+} -+ -+/* -+ * ======== DEV_GetWMDContext ======== -+ * Purpose: -+ * Retrieve the WMD Context handle, as returned by the WMD_Create fxn. -+ */ -+DSP_STATUS DEV_GetWMDContext(struct DEV_OBJECT *hDevObject, -+ OUT struct WMD_DEV_CONTEXT **phWmdContext) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phWmdContext != NULL); -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_GetWMDContext, hDevObject: 0x%x\n" -+ "\t\tphWmdContext: 0x%x\n", hDevObject, phWmdContext); -+ if (IsValidHandle(hDevObject)) { -+ *phWmdContext = pDevObject->hWmdContext; -+ } else { -+ *phWmdContext = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_GetWMDContext: Invalid handle"); -+ } -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Exit DEV_GetWMDContext: status 0x%x\t\n" -+ "hWmdContext: 0x%x\n", status, *phWmdContext); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phWmdContext != NULL) && -+ (*phWmdContext == NULL))); -+ return status; -+} -+ -+/* -+ * ======== DEV_Exit ======== -+ * Purpose: -+ * Decrement reference count, and free resources when reference count is -+ * 0. -+ */ -+void DEV_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ if (cRefs == 0) { -+ CMM_Exit(); -+ DMM_Exit(); -+ } -+ -+ GT_1trace(debugMask, GT_5CLASS, "Entered DEV_Exit, ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== DEV_Init ======== -+ * Purpose: -+ * Initialize DEV's private state, keeping a reference count on each call. -+ */ -+bool DEV_Init(void) -+{ -+ bool fCmm, fDmm, fRetval = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ /* Set the Trace mask */ -+ DBC_Assert(!debugMask.flags); -+ GT_create(&debugMask, "DV"); /* "DV" for DeVice */ -+ fCmm = CMM_Init(); -+ fDmm = DMM_Init(); -+ -+ fRetval = fCmm && fDmm; -+ -+ if (!fRetval) { -+ if (fCmm) -+ CMM_Exit(); -+ -+ -+ if (fDmm) -+ DMM_Exit(); -+ -+ } -+ } -+ -+ if (fRetval) -+ cRefs++; -+ -+ -+ GT_1trace(debugMask, GT_5CLASS, "Entered DEV_Init, ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ return fRetval; -+} -+ -+/* -+ * ======== DEV_NotifyClients ======== -+ * Purpose: -+ * Notify all clients of this device of a change in device status. -+ */ -+DSP_STATUS DEV_NotifyClients(struct DEV_OBJECT *hDevObject, u32 ulStatus) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ DSP_HPROCESSOR hProcObject; -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_NotifyClients, hDevObject: 0x%x\n" -+ "\t\tulStatus: 0x%x\n", hDevObject, ulStatus); -+ for (hProcObject = (DSP_HPROCESSOR)LST_First(pDevObject->procList); -+ hProcObject != NULL; -+ hProcObject = (DSP_HPROCESSOR)LST_Next(pDevObject->procList, -+ (struct LST_ELEM *)hProcObject)) -+ PROC_NotifyClients(hProcObject, (u32) ulStatus); -+ -+ return status; -+} -+ -+/* -+ * ======== DEV_RemoveDevice ======== -+ */ -+DSP_STATUS DEV_RemoveDevice(struct CFG_DEVNODE *hDevNode) -+{ -+ struct DEV_OBJECT *hDevObject; /* handle to device object */ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject; -+ -+ GT_1trace(debugMask, GT_ENTER, -+ "Entered DEV_RemoveDevice, hDevNode: 0x%x\n", hDevNode); -+ /* Retrieve the device object handle originaly stored with -+ * the DevNode: */ -+ status = CFG_GetDevObject(hDevNode, (u32 *)&hDevObject); -+ if (DSP_SUCCEEDED(status)) { -+ /* Remove the Processor List */ -+ pDevObject = (struct DEV_OBJECT *)hDevObject; -+ /* Destroy the device object. */ -+ status = DEV_DestroyDevice(hDevObject); -+ if (DSP_SUCCEEDED(status)) { -+ /* Null out the handle stored with the DevNode. */ -+ GT_0trace(debugMask, GT_1CLASS, -+ "DEV_RemoveDevice, success"); -+ } -+ } -+ GT_1trace(debugMask, GT_ENTER, "Exit DEV_RemoveDevice, status: 0x%x\n", -+ status); -+ return status; -+} -+ -+/* -+ * ======== DEV_SetChnlMgr ======== -+ * Purpose: -+ * Set the channel manager for this device. -+ */ -+DSP_STATUS DEV_SetChnlMgr(struct DEV_OBJECT *hDevObject, struct CHNL_MGR *hMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = hDevObject; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_SetChnlMgr, hDevObject: 0x%x\n\t" -+ "\thMgr:0x%x\n", hDevObject, hMgr); -+ if (IsValidHandle(hDevObject)) { -+ pDevObject->hChnlMgr = hMgr; -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(debugMask, GT_7CLASS, -+ "DEV_SetChnlMgr, Invalid handle\n"); -+ } -+ DBC_Ensure(DSP_FAILED(status) || (pDevObject->hChnlMgr == hMgr)); -+ return status; -+} -+ -+/* -+ * ======== DEV_SetMsgMgr ======== -+ * Purpose: -+ * Set the message manager for this device. -+ */ -+void DEV_SetMsgMgr(struct DEV_OBJECT *hDevObject, struct MSG_MGR *hMgr) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValidHandle(hDevObject)); -+ GT_2trace(debugMask, GT_ENTER, -+ "Entered DEV_SetMsgMgr, hDevObject: 0x%x\n\t\t" -+ "hMgr: 0x%x\n", hDevObject, hMgr); -+ hDevObject->hMsgMgr = hMgr; -+} -+ -+/* -+ * ======== DEV_StartDevice ======== -+ * Purpose: -+ * Initializes the new device with the BRIDGE environment. -+ */ -+DSP_STATUS DEV_StartDevice(struct CFG_DEVNODE *hDevNode) -+{ -+ struct DEV_OBJECT *hDevObject = NULL; /* handle to 'Bridge Device */ -+ struct CFG_HOSTRES hostRes; /* resources struct. */ -+ struct CFG_DSPRES dspRes; /* DSP resources struct */ -+ char szWMDFileName[CFG_MAXSEARCHPATHLEN] = "UMA"; /* wmd filename */ -+ DSP_STATUS status; -+ struct MGR_OBJECT *hMgrObject = NULL; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(debugMask, GT_ENTER, -+ "Entered DEV_StartDevice, hDevObject: 0x%x\n", hDevNode); -+ status = CFG_GetHostResources(hDevNode, &hostRes); -+ if (DSP_SUCCEEDED(status)) { -+ /* Get DSP resources of device from Registry: */ -+ status = CFG_GetDSPResources(hDevNode, &dspRes); -+ if (DSP_FAILED(status)) { -+ GT_1trace(debugMask, GT_7CLASS, -+ "Failed to get WMD DSP resources" -+ " from registry: 0x%x ", status); -+ } -+ } else { -+ GT_1trace(debugMask, GT_7CLASS, -+ "Failed to get WMD Host resources " -+ "from registry: 0x%x ", status); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Given all resources, create a device object. */ -+ status = DEV_CreateDevice(&hDevObject, szWMDFileName, &hostRes, -+ &dspRes, hDevNode); -+ if (DSP_SUCCEEDED(status)) { -+ /* Store away the hDevObject with the DEVNODE */ -+ status = CFG_SetDevObject(hDevNode, (u32)hDevObject); -+ if (DSP_FAILED(status)) { -+ /* Clean up */ -+ GT_1trace(debugMask, GT_7CLASS, -+ "Failed to set DevObject in the " -+ "Registry: 0x%x", status); -+ DEV_DestroyDevice(hDevObject); -+ hDevObject = NULL; -+ } -+ } else { -+ GT_1trace(debugMask, GT_7CLASS, -+ "Failed to Create Device: 0x%x", -+ status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Create the Manager Object */ -+ status = MGR_Create(&hMgrObject, hDevNode); -+ } -+ if (DSP_FAILED(status)) { -+ GT_1trace(debugMask, GT_7CLASS, "Failed to MGR object: 0x%x", -+ status); -+ status = DSP_EFAIL; -+ } -+ if (DSP_FAILED(status)) { -+ if (hDevObject) -+ DEV_DestroyDevice(hDevObject); -+ -+ /* Ensure the device extension is NULL */ -+ CFG_SetDevObject(hDevNode, 0L); -+ } -+ GT_1trace(debugMask, GT_ENTER, "Exiting DEV_StartDevice status 0x%x\n", -+ status); -+ return status; -+} -+ -+/* -+ * ======== FxnNotImplemented ======== -+ * Purpose: -+ * Takes the place of a WMD Null Function. -+ * Parameters: -+ * Multiple, optional. -+ * Returns: -+ * DSP_ENOTIMPL: Always. -+ */ -+static DSP_STATUS FxnNotImplemented(int arg, ...) -+{ -+ DBG_Trace(DBG_LEVEL1, -+ "WARNING: Calling a non-implemented WMD function.\n"); -+ -+ return DSP_ENOTIMPL; -+} -+ -+/* -+ * ======== IsValidHandle ======== -+ * Purpose: -+ * Validate the device object handle. -+ * Parameters: -+ * hDevObject: Handle to device object created with -+ * DEV_CreateDevice(). -+ * Returns: -+ * true if handle is valid; false otherwise. -+ * Requires: -+ * Ensures: -+ */ -+static bool IsValidHandle(struct DEV_OBJECT *hObj) -+{ -+ bool retVal; -+ -+ retVal = (hObj != NULL) && (hObj->dwSignature == SIGNATURE); -+ -+ return retVal; -+} -+ -+/* -+ * ======== InitCodMgr ======== -+ * Purpose: -+ * Create a COD manager for this device. -+ * Parameters: -+ * pDevObject: Pointer to device object created with -+ * DEV_CreateDevice() -+ * Returns: -+ * DSP_SOK: Success. -+ * DSP_EHANDLE: Invalid hDevObject. -+ * Requires: -+ * Should only be called once by DEV_CreateDevice() for a given DevObject. -+ * Ensures: -+ */ -+static DSP_STATUS InitCodMgr(struct DEV_OBJECT *pDevObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ char *szDummyFile = "dummy"; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(!IsValidHandle(pDevObject) || -+ (pDevObject->hCodMgr == NULL)); -+ GT_1trace(debugMask, GT_ENTER, "Entering InitCodMgr pDevObject: 0x%x", -+ pDevObject); -+ status = COD_Create(&pDevObject->hCodMgr, szDummyFile, NULL); -+ GT_1trace(debugMask, GT_ENTER, "Exiting InitCodMgr status 0x%x\n ", -+ status); -+ return status; -+} -+ -+/* -+ * ======== DEV_InsertProcObject ======== -+ * Purpose: -+ * Insert a ProcObject into the list maintained by DEV. -+ * Parameters: -+ * pProcObject: Ptr to ProcObject to insert. -+ * pDevObject: Ptr to Dev Object where the list is. -+ * pbAlreadyAttached: Ptr to return the bool -+ * Returns: -+ * DSP_SOK: If successful. -+ * Requires: -+ * List Exists -+ * hDevObject is Valid handle -+ * DEV Initialized -+ * pbAlreadyAttached != NULL -+ * hProcObject != 0 -+ * Ensures: -+ * DSP_SOK and List is not Empty. -+ */ -+DSP_STATUS DEV_InsertProcObject(struct DEV_OBJECT *hDevObject, -+ u32 hProcObject, -+ OUT bool *pbAlreadyAttached) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)hDevObject; -+ -+ GT_2trace(debugMask, GT_ENTER, -+ "Entering DEV_InsetProcObject pProcObject 0x%x" -+ "pDevObject 0x%x\n", hProcObject, hDevObject); -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValidHandle(pDevObject)); -+ DBC_Require(hProcObject != 0); -+ DBC_Require(pDevObject->procList != NULL); -+ DBC_Require(pbAlreadyAttached != NULL); -+ if (!LST_IsEmpty(pDevObject->procList)) -+ *pbAlreadyAttached = true; -+ -+ /* Add DevObject to tail. */ -+ LST_PutTail(pDevObject->procList, (struct LST_ELEM *)hProcObject); -+ -+ GT_1trace(debugMask, GT_ENTER, -+ "Exiting DEV_InsetProcObject status 0x%x\n", status); -+ DBC_Ensure(DSP_SUCCEEDED(status) && !LST_IsEmpty(pDevObject->procList)); -+ -+ return status; -+} -+ -+/* -+ * ======== DEV_RemoveProcObject ======== -+ * Purpose: -+ * Search for and remove a Proc object from the given list maintained -+ * by the DEV -+ * Parameters: -+ * pProcObject: Ptr to ProcObject to insert. -+ * pDevObject Ptr to Dev Object where the list is. -+ * Returns: -+ * DSP_SOK: If successful. -+ * Requires: -+ * List exists and is not empty -+ * hProcObject != 0 -+ * hDevObject is a valid Dev handle. -+ * Ensures: -+ * Details: -+ * List will be deleted when the DEV is destroyed. -+ */ -+DSP_STATUS DEV_RemoveProcObject(struct DEV_OBJECT *hDevObject, -+ u32 hProcObject) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ struct LST_ELEM *pCurElem; -+ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)hDevObject; -+ -+ DBC_Require(IsValidHandle(pDevObject)); -+ DBC_Require(hProcObject != 0); -+ DBC_Require(pDevObject->procList != NULL); -+ DBC_Require(!LST_IsEmpty(pDevObject->procList)); -+ -+ GT_1trace(debugMask, GT_ENTER, -+ "Entering DEV_RemoveProcObject hDevObject " -+ "0x%x\n", hDevObject); -+ /* Search list for pDevObject: */ -+ for (pCurElem = LST_First(pDevObject->procList); pCurElem != NULL; -+ pCurElem = LST_Next(pDevObject->procList, pCurElem)) { -+ /* If found, remove it. */ -+ if ((u32)pCurElem == hProcObject) { -+ LST_RemoveElem(pDevObject->procList, pCurElem); -+ status = DSP_SOK; -+ break; -+ } -+ } -+ GT_1trace(debugMask, GT_ENTER, "DEV_RemoveProcObject returning 0x%x\n", -+ status); -+ return status; -+} -+ -+DSP_STATUS DEV_GetDevType(struct DEV_OBJECT *hdevObject, u32 *devType) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *)hdevObject; -+ -+ *devType = pDevObject->devType; -+ -+ return status; -+} -+ -+/* -+ * ======== StoreInterfaceFxns ======== -+ * Purpose: -+ * Copy the WMD's interface functions into the device object, -+ * ensuring that FxnNotImplemented() is set for: -+ * -+ * 1. All WMD function pointers which are NULL; and -+ * 2. All function slots in the struct DEV_OBJECT structure which have no -+ * corresponding slots in the the WMD's interface, because the WMD -+ * is of an *older* version. -+ * Parameters: -+ * pIntfFxns: Interface Fxn Structure of the WCD's Dev Object. -+ * pDrvFxns: Interface Fxns offered by the WMD during DEV_Create(). -+ * Returns: -+ * Requires: -+ * Input pointers are valid. -+ * WMD is *not* written for a newer WCD. -+ * Ensures: -+ * All function pointers in the dev object's Fxn interface are not NULL. -+ */ -+static void StoreInterfaceFxns(struct WMD_DRV_INTERFACE *pDrvFxns, -+ OUT struct WMD_DRV_INTERFACE *pIntfFxns) -+{ -+ u32 dwWMDVersion; -+ -+ /* Local helper macro: */ -+#define StoreFxn(cast, pfn) \ -+ (pIntfFxns->pfn = ((pDrvFxns->pfn != NULL) ? pDrvFxns->pfn : \ -+ (cast)FxnNotImplemented)) -+ -+ DBC_Require(pIntfFxns != NULL); -+ DBC_Require(pDrvFxns != NULL); -+ DBC_Require(MAKEVERSION(pDrvFxns->dwWCDMajorVersion, -+ pDrvFxns->dwWCDMinorVersion) <= WCDVERSION); -+ dwWMDVersion = MAKEVERSION(pDrvFxns->dwWCDMajorVersion, -+ pDrvFxns->dwWCDMinorVersion); -+ pIntfFxns->dwWCDMajorVersion = pDrvFxns->dwWCDMajorVersion; -+ pIntfFxns->dwWCDMinorVersion = pDrvFxns->dwWCDMinorVersion; -+ /* Install functions up to WCD version .80 (first alpha): */ -+ if (dwWMDVersion > 0) { -+ StoreFxn(WMD_DEV_CREATE, pfnDevCreate); -+ StoreFxn(WMD_DEV_DESTROY, pfnDevDestroy); -+ StoreFxn(WMD_DEV_CTRL, pfnDevCntrl); -+ StoreFxn(WMD_BRD_MONITOR, pfnBrdMonitor); -+ StoreFxn(WMD_BRD_START, pfnBrdStart); -+ StoreFxn(WMD_BRD_STOP, pfnBrdStop); -+ StoreFxn(WMD_BRD_STATUS, pfnBrdStatus); -+ StoreFxn(WMD_BRD_READ, pfnBrdRead); -+ StoreFxn(WMD_BRD_WRITE, pfnBrdWrite); -+ StoreFxn(WMD_BRD_SETSTATE, pfnBrdSetState); -+ StoreFxn(WMD_BRD_MEMCOPY, pfnBrdMemCopy); -+ StoreFxn(WMD_BRD_MEMWRITE, pfnBrdMemWrite); -+ StoreFxn(WMD_BRD_MEMMAP, pfnBrdMemMap); -+ StoreFxn(WMD_BRD_MEMUNMAP, pfnBrdMemUnMap); -+ StoreFxn(WMD_CHNL_CREATE, pfnChnlCreate); -+ StoreFxn(WMD_CHNL_DESTROY, pfnChnlDestroy); -+ StoreFxn(WMD_CHNL_OPEN, pfnChnlOpen); -+ StoreFxn(WMD_CHNL_CLOSE, pfnChnlClose); -+ StoreFxn(WMD_CHNL_ADDIOREQ, pfnChnlAddIOReq); -+ StoreFxn(WMD_CHNL_GETIOC, pfnChnlGetIOC); -+ StoreFxn(WMD_CHNL_CANCELIO, pfnChnlCancelIO); -+ StoreFxn(WMD_CHNL_FLUSHIO, pfnChnlFlushIO); -+ StoreFxn(WMD_CHNL_GETINFO, pfnChnlGetInfo); -+ StoreFxn(WMD_CHNL_GETMGRINFO, pfnChnlGetMgrInfo); -+ StoreFxn(WMD_CHNL_IDLE, pfnChnlIdle); -+ StoreFxn(WMD_CHNL_REGISTERNOTIFY, pfnChnlRegisterNotify); -+ StoreFxn(WMD_DEH_CREATE, pfnDehCreate); -+ StoreFxn(WMD_DEH_DESTROY, pfnDehDestroy); -+ StoreFxn(WMD_DEH_NOTIFY, pfnDehNotify); -+ StoreFxn(WMD_DEH_REGISTERNOTIFY, pfnDehRegisterNotify); -+ StoreFxn(WMD_DEH_GETINFO, pfnDehGetInfo); -+ StoreFxn(WMD_IO_CREATE, pfnIOCreate); -+ StoreFxn(WMD_IO_DESTROY, pfnIODestroy); -+ StoreFxn(WMD_IO_ONLOADED, pfnIOOnLoaded); -+ StoreFxn(WMD_IO_GETPROCLOAD, pfnIOGetProcLoad); -+ StoreFxn(WMD_MSG_CREATE, pfnMsgCreate); -+ StoreFxn(WMD_MSG_CREATEQUEUE, pfnMsgCreateQueue); -+ StoreFxn(WMD_MSG_DELETE, pfnMsgDelete); -+ StoreFxn(WMD_MSG_DELETEQUEUE, pfnMsgDeleteQueue); -+ StoreFxn(WMD_MSG_GET, pfnMsgGet); -+ StoreFxn(WMD_MSG_PUT, pfnMsgPut); -+ StoreFxn(WMD_MSG_REGISTERNOTIFY, pfnMsgRegisterNotify); -+ StoreFxn(WMD_MSG_SETQUEUEID, pfnMsgSetQueueId); -+ } -+ /* Add code for any additional functions in newer WMD versions here: */ -+ /* Ensure postcondition: */ -+ DBC_Ensure(pIntfFxns->pfnDevCreate != NULL); -+ DBC_Ensure(pIntfFxns->pfnDevDestroy != NULL); -+ DBC_Ensure(pIntfFxns->pfnDevCntrl != NULL); -+ DBC_Ensure(pIntfFxns->pfnBrdMonitor != NULL); -+ DBC_Ensure(pIntfFxns->pfnBrdStart != NULL); -+ DBC_Ensure(pIntfFxns->pfnBrdStop != NULL); -+ DBC_Ensure(pIntfFxns->pfnBrdStatus != NULL); -+ DBC_Ensure(pIntfFxns->pfnBrdRead != NULL); -+ DBC_Ensure(pIntfFxns->pfnBrdWrite != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlCreate != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlDestroy != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlOpen != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlClose != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlAddIOReq != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlGetIOC != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlCancelIO != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlFlushIO != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlGetInfo != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlGetMgrInfo != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlIdle != NULL); -+ DBC_Ensure(pIntfFxns->pfnChnlRegisterNotify != NULL); -+ DBC_Ensure(pIntfFxns->pfnDehCreate != NULL); -+ DBC_Ensure(pIntfFxns->pfnDehDestroy != NULL); -+ DBC_Ensure(pIntfFxns->pfnDehNotify != NULL); -+ DBC_Ensure(pIntfFxns->pfnDehRegisterNotify != NULL); -+ DBC_Ensure(pIntfFxns->pfnDehGetInfo != NULL); -+ DBC_Ensure(pIntfFxns->pfnIOCreate != NULL); -+ DBC_Ensure(pIntfFxns->pfnIODestroy != NULL); -+ DBC_Ensure(pIntfFxns->pfnIOOnLoaded != NULL); -+ DBC_Ensure(pIntfFxns->pfnIOGetProcLoad != NULL); -+ DBC_Ensure(pIntfFxns->pfnMsgSetQueueId != NULL); -+ -+#undef StoreFxn -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dmm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dmm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/dmm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/dmm.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,657 @@ -+/* -+ * dmm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== dmm.c ======== -+ * Purpose: -+ * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address -+ * space that can be directly mapped to any MPU buffer or memory region -+ * -+ * Public Functions: -+ * DMM_CreateTables -+ * DMM_Create -+ * DMM_Destroy -+ * DMM_Exit -+ * DMM_Init -+ * DMM_MapMemory -+ * DMM_Reset -+ * DMM_ReserveMemory -+ * DMM_UnMapMemory -+ * DMM_UnReserveMemory -+ * -+ * Private Functions: -+ * AddRegion -+ * CreateRegion -+ * GetRegion -+ * GetFreeRegion -+ * GetMappedRegion -+ * -+ * Notes: -+ * Region: Generic memory entitiy having a start address and a size -+ * Chunk: Reserved region -+ * -+ * -+ *! Revision History: -+ *! ================ -+ *! 04-Jun-2008 Hari K : Optimized DMM implementation. Removed linked list -+ *! and instead used Table approach. -+ *! 19-Apr-2004 sb: Integrated Alan's code review updates. -+ *! 17-Mar-2004 ap: Fixed GetRegion for size=0 using tighter bound. -+ *! 20-Feb-2004 sb: Created. -+ *! -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+/* Object signatures */ -+#define DMMSIGNATURE 0x004d4d44 /* "DMM" (in reverse) */ -+ -+#define DMM_ADDR_VIRTUAL(a) \ -+ (((struct MapPage *)(a) - pVirtualMappingTable) * PG_SIZE_4K +\ -+ dynMemMapBeg) -+#define DMM_ADDR_TO_INDEX(a) (((a) - dynMemMapBeg) / PG_SIZE_4K) -+ -+/* DMM Mgr */ -+struct DMM_OBJECT { -+ u32 dwSignature; /* Used for object validation */ -+ /* Dmm Lock is used to serialize access mem manager for -+ * multi-threads. */ -+ struct SYNC_CSOBJECT *hDmmLock; /* Lock to access dmm mgr */ -+}; -+ -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask DMM_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+static u32 cRefs; /* module reference count */ -+struct MapPage { -+ u32 RegionSize:15; -+ u32 MappedSize:15; -+ u32 bReserved:1; -+ u32 bMapped:1; -+}; -+ -+/* Create the free list */ -+static struct MapPage *pVirtualMappingTable; -+static u32 iFreeRegion; /* The index of free region */ -+static u32 iFreeSize; -+static u32 dynMemMapBeg; /* The Beginning of dynamic memory mapping */ -+static u32 TableSize;/* The size of virtual and physical pages tables */ -+ -+/* ----------------------------------- Function Prototypes */ -+static struct MapPage *GetRegion(u32 addr); -+static struct MapPage *GetFreeRegion(u32 aSize); -+static struct MapPage *GetMappedRegion(u32 aAddr); -+#ifdef DSP_DMM_DEBUG -+u32 DMM_MemMapDump(struct DMM_OBJECT *hDmmMgr); -+#endif -+ -+/* ======== DMM_CreateTables ======== -+ * Purpose: -+ * Create table to hold the information of physical address -+ * the buffer pages that is passed by the user, and the table -+ * to hold the information of the virtual memory that is reserved -+ * for DSP. -+ */ -+DSP_STATUS DMM_CreateTables(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 size) -+{ -+ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ GT_3trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_CreateTables () hDmmMgr %x, addr" -+ " %x, size %x\n", hDmmMgr, addr, size); -+ status = DMM_DeleteTables(pDmmObj); -+ if (DSP_SUCCEEDED(status)) { -+ SYNC_EnterCS(pDmmObj->hDmmLock); -+ dynMemMapBeg = addr; -+ TableSize = PG_ALIGN_HIGH(size, PG_SIZE_4K)/PG_SIZE_4K; -+ /* Create the free list */ -+ pVirtualMappingTable = (struct MapPage *) MEM_Calloc -+ (TableSize * sizeof(struct MapPage), MEM_LARGEVIRTMEM); -+ if (pVirtualMappingTable == NULL) -+ status = DSP_EMEMORY; -+ else { -+ /* On successful allocation, -+ * all entries are zero ('free') */ -+ iFreeRegion = 0; -+ iFreeSize = TableSize*PG_SIZE_4K; -+ pVirtualMappingTable[0].RegionSize = TableSize; -+ } -+ SYNC_LeaveCS(pDmmObj->hDmmLock); -+ } else -+ GT_0trace(DMM_debugMask, GT_7CLASS, -+ "DMM_CreateTables: DMM_DeleteTables" -+ "Failure\n"); -+ -+ GT_1trace(DMM_debugMask, GT_4CLASS, "Leaving DMM_CreateTables status" -+ "0x%x\n", status); -+ return status; -+} -+ -+/* -+ * ======== DMM_Create ======== -+ * Purpose: -+ * Create a dynamic memory manager object. -+ */ -+DSP_STATUS DMM_Create(OUT struct DMM_OBJECT **phDmmMgr, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct DMM_MGRATTRS *pMgrAttrs) -+{ -+ struct DMM_OBJECT *pDmmObject = NULL; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDmmMgr != NULL); -+ -+ GT_3trace(DMM_debugMask, GT_ENTER, -+ "DMM_Create: phDmmMgr: 0x%x hDevObject: " -+ "0x%x pMgrAttrs: 0x%x\n", phDmmMgr, hDevObject, pMgrAttrs); -+ *phDmmMgr = NULL; -+ /* create, zero, and tag a cmm mgr object */ -+ MEM_AllocObject(pDmmObject, struct DMM_OBJECT, DMMSIGNATURE); -+ if (pDmmObject != NULL) { -+ status = SYNC_InitializeCS(&pDmmObject->hDmmLock); -+ if (DSP_SUCCEEDED(status)) -+ *phDmmMgr = pDmmObject; -+ else -+ DMM_Destroy(pDmmObject); -+ } else { -+ GT_0trace(DMM_debugMask, GT_7CLASS, -+ "DMM_Create: Object Allocation " -+ "Failure(DMM Object)\n"); -+ status = DSP_EMEMORY; -+ } -+ GT_2trace(DMM_debugMask, GT_4CLASS, -+ "Leaving DMM_Create status %x pDmmObject %x\n", -+ status, pDmmObject); -+ -+ return status; -+} -+ -+/* -+ * ======== DMM_Destroy ======== -+ * Purpose: -+ * Release the communication memory manager resources. -+ */ -+DSP_STATUS DMM_Destroy(struct DMM_OBJECT *hDmmMgr) -+{ -+ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ GT_1trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_Destroy () hDmmMgr %x\n", hDmmMgr); -+ DBC_Require(cRefs > 0); -+ if (MEM_IsValidHandle(hDmmMgr, DMMSIGNATURE)) { -+ status = DMM_DeleteTables(pDmmObj); -+ if (DSP_SUCCEEDED(status)) { -+ /* Delete CS & dmm mgr object */ -+ SYNC_DeleteCS(pDmmObj->hDmmLock); -+ MEM_FreeObject(pDmmObj); -+ } else -+ GT_0trace(DMM_debugMask, GT_7CLASS, -+ "DMM_Destroy: DMM_DeleteTables " -+ "Failure\n"); -+ } else -+ status = DSP_EHANDLE; -+ GT_1trace(DMM_debugMask, GT_4CLASS, "Leaving DMM_Destroy status %x\n", -+ status); -+ return status; -+} -+ -+ -+/* -+ * ======== DMM_DeleteTables ======== -+ * Purpose: -+ * Delete DMM Tables. -+ */ -+DSP_STATUS DMM_DeleteTables(struct DMM_OBJECT *hDmmMgr) -+{ -+ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ GT_1trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_DeleteTables () hDmmMgr %x\n", hDmmMgr); -+ DBC_Require(cRefs > 0); -+ if (MEM_IsValidHandle(hDmmMgr, DMMSIGNATURE)) { -+ /* Delete all DMM tables */ -+ SYNC_EnterCS(pDmmObj->hDmmLock); -+ -+ if (pVirtualMappingTable != NULL) -+ MEM_VFree(pVirtualMappingTable); -+ -+ SYNC_LeaveCS(pDmmObj->hDmmLock); -+ } else -+ status = DSP_EHANDLE; -+ GT_1trace(DMM_debugMask, GT_4CLASS, -+ "Leaving DMM_DeleteTables status %x\n", status); -+ return status; -+} -+ -+ -+ -+ -+/* -+ * ======== DMM_Exit ======== -+ * Purpose: -+ * Discontinue usage of module; free resources when reference count -+ * reaches 0. -+ */ -+void DMM_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(DMM_debugMask, GT_ENTER, -+ "exiting DMM_Exit, ref count:0x%x\n", cRefs); -+} -+ -+/* -+ * ======== DMM_GetHandle ======== -+ * Purpose: -+ * Return the dynamic memory manager object for this device. -+ * This is typically called from the client process. -+ */ -+DSP_STATUS DMM_GetHandle(DSP_HPROCESSOR hProcessor, -+ OUT struct DMM_OBJECT **phDmmMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *hDevObject; -+ -+ GT_2trace(DMM_debugMask, GT_ENTER, -+ "DMM_GetHandle: hProcessor %x, phDmmMgr" -+ "%x\n", hProcessor, phDmmMgr); -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDmmMgr != NULL); -+ if (hProcessor != NULL) -+ status = PROC_GetDevObject(hProcessor, &hDevObject); -+ else -+ hDevObject = DEV_GetFirst(); /* default */ -+ -+ if (DSP_SUCCEEDED(status)) -+ status = DEV_GetDmmMgr(hDevObject, phDmmMgr); -+ -+ GT_2trace(DMM_debugMask, GT_4CLASS, "Leaving DMM_GetHandle status %x, " -+ "*phDmmMgr %x\n", status, phDmmMgr ? *phDmmMgr : NULL); -+ return status; -+} -+ -+/* -+ * ======== DMM_Init ======== -+ * Purpose: -+ * Initializes private state of DMM module. -+ */ -+bool DMM_Init(void) -+{ -+ bool fRetval = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ /* Set the Trace mask */ -+ /*"DM" for Dymanic Memory Manager */ -+ GT_create(&DMM_debugMask, "DM"); -+ } -+ -+ if (fRetval) -+ cRefs++; -+ -+ GT_1trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_Init, ref count:0x%x\n", cRefs); -+ -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ pVirtualMappingTable = NULL ; -+ TableSize = 0; -+ -+ return fRetval; -+} -+ -+/* -+ * ======== DMM_MapMemory ======== -+ * Purpose: -+ * Add a mapping block to the reserved chunk. DMM assumes that this block -+ * will be mapped in the DSP/IVA's address space. DMM returns an error if a -+ * mapping overlaps another one. This function stores the info that will be -+ * required later while unmapping the block. -+ */ -+DSP_STATUS DMM_MapMemory(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 size) -+{ -+ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; -+ struct MapPage *chunk; -+ DSP_STATUS status = DSP_SOK; -+ -+ GT_3trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_MapMemory () hDmmMgr %x, " -+ "addr %x, size %x\n", hDmmMgr, addr, size); -+ SYNC_EnterCS(pDmmObj->hDmmLock); -+ /* Find the Reserved memory chunk containing the DSP block to -+ * be mapped */ -+ chunk = (struct MapPage *)GetRegion(addr); -+ if (chunk != NULL) { -+ /* Mark the region 'mapped', leave the 'reserved' info as-is */ -+ chunk->bMapped = true; -+ chunk->MappedSize = (size/PG_SIZE_4K); -+ } else -+ status = DSP_ENOTFOUND; -+ SYNC_LeaveCS(pDmmObj->hDmmLock); -+ GT_2trace(DMM_debugMask, GT_4CLASS, -+ "Leaving DMM_MapMemory status %x, chunk %x\n", -+ status, chunk); -+ return status; -+} -+ -+/* -+ * ======== DMM_ReserveMemory ======== -+ * Purpose: -+ * Reserve a chunk of virtually contiguous DSP/IVA address space. -+ */ -+DSP_STATUS DMM_ReserveMemory(struct DMM_OBJECT *hDmmMgr, u32 size, -+ u32 *pRsvAddr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; -+ struct MapPage *node; -+ u32 rsvAddr = 0; -+ u32 rsvSize = 0; -+ -+ GT_3trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_ReserveMemory () hDmmMgr %x, " -+ "size %x, pRsvAddr %x\n", hDmmMgr, size, pRsvAddr); -+ SYNC_EnterCS(pDmmObj->hDmmLock); -+ -+ /* Try to get a DSP chunk from the free list */ -+ node = GetFreeRegion(size); -+ if (node != NULL) { -+ /* DSP chunk of given size is available. */ -+ rsvAddr = DMM_ADDR_VIRTUAL(node); -+ /* Calculate the number entries to use */ -+ rsvSize = size/PG_SIZE_4K; -+ if (rsvSize < node->RegionSize) { -+ /* Mark remainder of free region */ -+ node[rsvSize].bMapped = false; -+ node[rsvSize].bReserved = false; -+ node[rsvSize].RegionSize = node->RegionSize - rsvSize; -+ node[rsvSize].MappedSize = 0; -+ } -+ /* GetRegion will return first fit chunk. But we only use what -+ is requested. */ -+ node->bMapped = false; -+ node->bReserved = true; -+ node->RegionSize = rsvSize; -+ node->MappedSize = 0; -+ /* Return the chunk's starting address */ -+ *pRsvAddr = rsvAddr; -+ } else -+ /*dSP chunk of given size is not available */ -+ status = DSP_EMEMORY; -+ -+ SYNC_LeaveCS(pDmmObj->hDmmLock); -+ GT_3trace(DMM_debugMask, GT_4CLASS, -+ "Leaving ReserveMemory status %x, rsvAddr" -+ " %x, rsvSize %x\n", status, rsvAddr, rsvSize); -+ return status; -+} -+ -+ -+/* -+ * ======== DMM_UnMapMemory ======== -+ * Purpose: -+ * Remove the mapped block from the reserved chunk. -+ */ -+DSP_STATUS DMM_UnMapMemory(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 *pSize) -+{ -+ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; -+ struct MapPage *chunk; -+ DSP_STATUS status = DSP_SOK; -+ -+ GT_3trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_UnMapMemory () hDmmMgr %x, " -+ "addr %x, pSize %x\n", hDmmMgr, addr, pSize); -+ SYNC_EnterCS(pDmmObj->hDmmLock); -+ chunk = GetMappedRegion(addr) ; -+ if (chunk == NULL) -+ status = DSP_ENOTFOUND ; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Unmap the region */ -+ *pSize = chunk->MappedSize * PG_SIZE_4K; -+ chunk->bMapped = false; -+ chunk->MappedSize = 0; -+ } -+ SYNC_LeaveCS(pDmmObj->hDmmLock); -+ GT_3trace(DMM_debugMask, GT_ENTER, -+ "Leaving DMM_UnMapMemory status %x, chunk" -+ " %x, *pSize %x\n", status, chunk, *pSize); -+ -+ return status; -+} -+ -+/* -+ * ======== DMM_UnReserveMemory ======== -+ * Purpose: -+ * Free a chunk of reserved DSP/IVA address space. -+ */ -+DSP_STATUS DMM_UnReserveMemory(struct DMM_OBJECT *hDmmMgr, u32 rsvAddr) -+{ -+ struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; -+ struct MapPage *chunk; -+ u32 i; -+ DSP_STATUS status = DSP_SOK; -+ u32 chunkSize; -+ -+ GT_2trace(DMM_debugMask, GT_ENTER, -+ "Entered DMM_UnReserveMemory () hDmmMgr " -+ "%x, rsvAddr %x\n", hDmmMgr, rsvAddr); -+ -+ SYNC_EnterCS(pDmmObj->hDmmLock); -+ -+ /* Find the chunk containing the reserved address */ -+ chunk = GetMappedRegion(rsvAddr); -+ if (chunk == NULL) -+ status = DSP_ENOTFOUND; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Free all the mapped pages for this reserved region */ -+ i = 0; -+ while (i < chunk->RegionSize) { -+ if (chunk[i].bMapped) { -+ /* Remove mapping from the page tables. */ -+ chunkSize = chunk[i].MappedSize; -+ /* Clear the mapping flags */ -+ chunk[i].bMapped = false; -+ chunk[i].MappedSize = 0; -+ i += chunkSize; -+ } else -+ i++; -+ } -+ /* Clear the flags (mark the region 'free') */ -+ chunk->bReserved = false; -+ /* NOTE: We do NOT coalesce free regions here. -+ * Free regions are coalesced in GetRegion(), as it traverses -+ *the whole mapping table -+ */ -+ } -+ SYNC_LeaveCS(pDmmObj->hDmmLock); -+ GT_2trace(DMM_debugMask, GT_ENTER, -+ "Leaving DMM_UnReserveMemory status %x" -+ " chunk %x\n", status, chunk); -+ return status; -+} -+ -+ -+/* -+ * ======== GetRegion ======== -+ * Purpose: -+ * Returns a region containing the specified memory region -+ */ -+static struct MapPage *GetRegion(u32 aAddr) -+{ -+ struct MapPage *currRegion = NULL; -+ u32 i = 0; -+ -+ GT_1trace(DMM_debugMask, GT_ENTER, "Entered GetRegion () " -+ " aAddr %x\n", aAddr); -+ -+ if (pVirtualMappingTable != NULL) { -+ /* find page mapped by this address */ -+ i = DMM_ADDR_TO_INDEX(aAddr); -+ if (i < TableSize) -+ currRegion = pVirtualMappingTable + i; -+ } -+ GT_3trace(DMM_debugMask, GT_4CLASS, -+ "Leaving GetRegion currRegion %x, iFreeRegion %d\n," -+ "iFreeSize %d\n", currRegion, iFreeRegion, iFreeSize) ; -+ return currRegion; -+} -+ -+/* -+ * ======== GetFreeRegion ======== -+ * Purpose: -+ * Returns the requested free region -+ */ -+static struct MapPage *GetFreeRegion(u32 aSize) -+{ -+ struct MapPage *currRegion = NULL; -+ u32 i = 0; -+ u32 RegionSize = 0; -+ u32 nextI = 0; -+ GT_1trace(DMM_debugMask, GT_ENTER, "Entered GetFreeRegion () " -+ "aSize 0x%x\n", aSize); -+ -+ if (pVirtualMappingTable == NULL) -+ return currRegion; -+ if (aSize > iFreeSize) { -+ /* Find the largest free region -+ * (coalesce during the traversal) */ -+ while (i < TableSize) { -+ RegionSize = pVirtualMappingTable[i].RegionSize; -+ nextI = i+RegionSize; -+ if (pVirtualMappingTable[i].bReserved == false) { -+ /* Coalesce, if possible */ -+ if (nextI < TableSize && -+ pVirtualMappingTable[nextI].bReserved -+ == false) { -+ pVirtualMappingTable[i].RegionSize += -+ pVirtualMappingTable[nextI].RegionSize; -+ continue; -+ } -+ RegionSize *= PG_SIZE_4K; -+ if (RegionSize > iFreeSize) { -+ iFreeRegion = i; -+ iFreeSize = RegionSize; -+ } -+ } -+ i = nextI; -+ } -+ } -+ if (aSize <= iFreeSize) { -+ currRegion = pVirtualMappingTable + iFreeRegion; -+ iFreeRegion += (aSize / PG_SIZE_4K); -+ iFreeSize -= aSize; -+ } -+ return currRegion; -+} -+ -+/* -+ * ======== GetMappedRegion ======== -+ * Purpose: -+ * Returns the requestedmapped region -+ */ -+static struct MapPage *GetMappedRegion(u32 aAddr) -+{ -+ u32 i = 0; -+ struct MapPage *currRegion = NULL; -+ GT_1trace(DMM_debugMask, GT_ENTER, "Entered GetMappedRegion () " -+ "aAddr 0x%x\n", aAddr); -+ -+ if (pVirtualMappingTable == NULL) -+ return currRegion; -+ -+ i = DMM_ADDR_TO_INDEX(aAddr); -+ if (i < TableSize && (pVirtualMappingTable[i].bMapped || -+ pVirtualMappingTable[i].bReserved)) -+ currRegion = pVirtualMappingTable + i; -+ return currRegion; -+} -+ -+#ifdef DSP_DMM_DEBUG -+u32 DMM_MemMapDump(struct DMM_OBJECT *hDmmMgr) -+{ -+ struct MapPage *curNode = NULL; -+ u32 i; -+ u32 freemem = 0; -+ u32 bigsize = 0; -+ -+ SYNC_EnterCS(hDmmMgr->hDmmLock); -+ -+ if (pVirtualMappingTable != NULL) { -+ for (i = 0; i < TableSize; i += -+ pVirtualMappingTable[i].RegionSize) { -+ curNode = pVirtualMappingTable + i; -+ if (curNode->bReserved == TRUE) { -+ /*printk("RESERVED size = 0x%x, " -+ "Map size = 0x%x\n", -+ (curNode->RegionSize * PG_SIZE_4K), -+ (curNode->bMapped == false) ? 0 : -+ (curNode->MappedSize * PG_SIZE_4K)); -+*/ -+ } else { -+/* printk("UNRESERVED size = 0x%x\n", -+ (curNode->RegionSize * PG_SIZE_4K)); -+*/ -+ freemem += (curNode->RegionSize * PG_SIZE_4K); -+ if (curNode->RegionSize > bigsize) -+ bigsize = curNode->RegionSize; -+ } -+ } -+ } -+ printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n", -+ freemem/(1024*1024)); -+ printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n", -+ (((TableSize * PG_SIZE_4K)-freemem))/(1024*1024)); -+ printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n", -+ (bigsize*PG_SIZE_4K/(1024*1024))); -+ SYNC_LeaveCS(hDmmMgr->hDmmLock); -+ -+ return 0; -+} -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnl.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/chnl.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/chnl.c 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,260 @@ -+/* -+ * chnl.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== chnl.c ======== -+ * Description: -+ * WCD channel interface: multiplexes data streams through the single -+ * physical link managed by a 'Bridge mini-driver. -+ * -+ * Public Functions: -+ * CHNL_Close -+ * CHNL_CloseOrphans -+ * CHNL_Create -+ * CHNL_Destroy -+ * CHNL_Exit -+ * CHNL_GetHandle -+ * CHNL_GetProcessHandle -+ * CHNL_Init -+ * CHNL_Open -+ * -+ * Notes: -+ * This interface is basically a pass through to the WMD CHNL functions, -+ * except for the CHNL_Get() accessor functions which call -+ * WMD_CHNL_GetInfo(). -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 07-Jan-2002 ag CHNL_CloseOrphans() now closes supported # of channels. -+ *! 17-Nov-2000 jeh Removed IRQ, shared memory stuff from CHNL_Create. -+ *! 28-Feb-2000 rr: New GT USage Implementation -+ *! 03-Feb-2000 rr: GT and Module init/exit Changes.(Done up front from -+ *! SERVICES) -+ *! 21-Jan-2000 ag: Added code review comments. -+ *! 13-Jan-2000 rr: CFG_Get/SetPrivateDword renamed to CFG_Get/SetDevObject. -+ *! 08-Dec-1999 ag: CHNL_[Alloc|Free]Buffer bufs taken from client process heap. -+ *! 02-Dec-1999 ag: Implemented CHNL_GetEventHandle(). -+ *! 17-Nov-1999 ag: CHNL_AllocBuffer() allocs extra word for process mapping. -+ *! 28-Oct-1999 ag: WinCE port. Search for "WinCE" for changes(TBR). -+ *! 07-Jan-1998 gp: CHNL_[Alloc|Free]Buffer now call MEM_UMB functions. -+ *! 22-Oct-1997 gp: Removed requirement in CHNL_Open that hReserved1 != NULL. -+ *! 30-Aug-1997 cr: Renamed cfg.h wbwcd.h b/c of WINNT file name collision. -+ *! 10-Mar-1997 gp: Added GT trace. -+ *! 14-Jan-1997 gp: Updated based on code review feedback. -+ *! 03-Jan-1997 gp: Moved CHNL_AllocBuffer/CHNL_FreeBuffer code from udspsys. -+ *! 14-Dec-1996 gp: Added uChnlId parameter to CHNL_Open(). -+ *! 09-Sep-1996 gp: Added CHNL_GetProcessHandle(). -+ *! 15-Jul-1996 gp: Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Globals */ -+static u32 cRefs; -+#if GT_TRACE -+static struct GT_Mask CHNL_DebugMask = { NULL, NULL }; /* WCD CHNL Mask */ -+#endif -+ -+ -+ -+/* -+ * ======== CHNL_Create ======== -+ * Purpose: -+ * Create a channel manager object, responsible for opening new channels -+ * and closing old ones for a given 'Bridge board. -+ */ -+DSP_STATUS CHNL_Create(OUT struct CHNL_MGR **phChnlMgr, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct CHNL_MGRATTRS *pMgrAttrs) -+{ -+ DSP_STATUS status; -+ struct CHNL_MGR *hChnlMgr; -+ struct CHNL_MGR_ *pChnlMgr = NULL; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phChnlMgr != NULL); -+ DBC_Require(pMgrAttrs != NULL); -+ -+ GT_3trace(CHNL_DebugMask, GT_ENTER, -+ "Entered CHNL_Create: phChnlMgr: 0x%x\t" -+ "hDevObject: 0x%x\tpMgrAttrs:0x%x\n", -+ phChnlMgr, hDevObject, pMgrAttrs); -+ -+ *phChnlMgr = NULL; -+ -+ /* Validate args: */ -+ if ((0 < pMgrAttrs->cChannels) && -+ (pMgrAttrs->cChannels <= CHNL_MAXCHANNELS)) { -+ status = DSP_SOK; -+ } else if (pMgrAttrs->cChannels == 0) { -+ status = DSP_EINVALIDARG; -+ GT_0trace(CHNL_DebugMask, GT_7CLASS, -+ "CHNL_Create:Invalid Args\n"); -+ } else { -+ status = CHNL_E_MAXCHANNELS; -+ GT_0trace(CHNL_DebugMask, GT_7CLASS, -+ "CHNL_Create:Error Max Channels\n"); -+ } -+ if (pMgrAttrs->uWordSize == 0) { -+ status = CHNL_E_INVALIDWORDSIZE; -+ GT_0trace(CHNL_DebugMask, GT_7CLASS, -+ "CHNL_Create:Invalid Word size\n"); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetChnlMgr(hDevObject, &hChnlMgr); -+ if (DSP_SUCCEEDED(status) && hChnlMgr != NULL) -+ status = CHNL_E_MGREXISTS; -+ -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DEV_GetIntfFxns(hDevObject, &pIntfFxns); -+ /* Let WMD channel module finish the create: */ -+ status = (*pIntfFxns->pfnChnlCreate)(&hChnlMgr, hDevObject, -+ pMgrAttrs); -+ if (DSP_SUCCEEDED(status)) { -+ /* Fill in WCD channel module's fields of the -+ * CHNL_MGR structure */ -+ pChnlMgr = (struct CHNL_MGR_ *)hChnlMgr; -+ pChnlMgr->pIntfFxns = pIntfFxns; -+ /* Finally, return the new channel manager handle: */ -+ *phChnlMgr = hChnlMgr; -+ GT_1trace(CHNL_DebugMask, GT_1CLASS, -+ "CHNL_Create: Success pChnlMgr:" -+ "0x%x\n", pChnlMgr); -+ } -+ } -+ -+ GT_2trace(CHNL_DebugMask, GT_ENTER, -+ "Exiting CHNL_Create: pChnlMgr: 0x%x," -+ "status: 0x%x\n", pChnlMgr, status); -+ DBC_Ensure(DSP_FAILED(status) || CHNL_IsValidMgr(pChnlMgr)); -+ -+ return status; -+} -+ -+/* -+ * ======== CHNL_Destroy ======== -+ * Purpose: -+ * Close all open channels, and destroy the channel manager. -+ */ -+DSP_STATUS CHNL_Destroy(struct CHNL_MGR *hChnlMgr) -+{ -+ struct CHNL_MGR_ *pChnlMgr = (struct CHNL_MGR_ *)hChnlMgr; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(CHNL_DebugMask, GT_ENTER, -+ "Entered CHNL_Destroy: hChnlMgr: 0x%x\n", hChnlMgr); -+ if (CHNL_IsValidMgr(pChnlMgr)) { -+ pIntfFxns = pChnlMgr->pIntfFxns; -+ /* Let WMD channel module destroy the CHNL_MGR: */ -+ status = (*pIntfFxns->pfnChnlDestroy)(hChnlMgr); -+ } else { -+ GT_0trace(CHNL_DebugMask, GT_7CLASS, -+ "CHNL_Destroy:Invalid Handle\n"); -+ status = DSP_EHANDLE; -+ } -+ -+ GT_2trace(CHNL_DebugMask, GT_ENTER, -+ "Exiting CHNL_Destroy: pChnlMgr: 0x%x," -+ " status:0x%x\n", pChnlMgr, status); -+ DBC_Ensure(DSP_FAILED(status) || !CHNL_IsValidMgr(pChnlMgr)); -+ -+ return status; -+} -+ -+/* -+ * ======== CHNL_Exit ======== -+ * Purpose: -+ * Discontinue usage of the CHNL module. -+ */ -+void CHNL_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(CHNL_DebugMask, GT_5CLASS, -+ "Entered CHNL_Exit, ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+ -+/* -+ * ======== CHNL_Init ======== -+ * Purpose: -+ * Initialize the CHNL module's private state. -+ */ -+bool CHNL_Init(void) -+{ -+ bool fRetval = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!CHNL_DebugMask.flags); -+ GT_create(&CHNL_DebugMask, "CH"); /* "CH" for CHannel */ -+ } -+ -+ if (fRetval) -+ cRefs++; -+ -+ GT_1trace(CHNL_DebugMask, GT_5CLASS, -+ "Entered CHNL_Init, ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ return fRetval; -+} -+ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnlobj.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/chnlobj.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/chnlobj.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/chnlobj.h 2011-06-22 13:19:32.533063279 +0200 -@@ -0,0 +1,71 @@ -+/* -+ * chnlobj.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== chnlobj.h ======== -+ * Description: -+ * Structure subcomponents of channel class library channel objects which -+ * are exposed to class driver from mini-driver. -+ * -+ * Public Functions: -+ * None. -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 17-Nov-2000 jeh Removed some fields from CHNL_MGR_ to match CHNL_MGR -+ *! structure defined in _chnl_sm.h. -+ *! 16-Jan-1997 gp: Created from chnlpriv.h -+ */ -+ -+#ifndef CHNLOBJ_ -+#define CHNLOBJ_ -+ -+#include -+#include -+ -+/* Object validateion macros: */ -+#define CHNL_IsValidMgr(h) \ -+ ((h != NULL) && ((h)->dwSignature == CHNL_MGRSIGNATURE)) -+ -+#define CHNL_IsValidChnl(h)\ -+ ((h != NULL) && ((h)->dwSignature == CHNL_SIGNATURE)) -+ -+/* -+ * This struct is the first field in a CHNL_MGR struct, as implemented in -+ * a WMD channel class library. Other, implementation specific fields -+ * follow this structure in memory. -+ */ -+struct CHNL_MGR_ { -+ /* These must be the first fields in a CHNL_MGR struct: */ -+ u32 dwSignature; /* Used for object validation. */ -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ -+} ; -+ -+/* -+ * This struct is the first field in a CHNL_OBJECT struct, as implemented in -+ * a WMD channel class library. Other, implementation specific fields -+ * follow this structure in memory. -+ */ -+struct CHNL_OBJECT_ { -+ /* These must be the first fields in a CHNL_OBJECT struct: */ -+ u32 dwSignature; /* Used for object validation. */ -+ struct CHNL_MGR_ *pChnlMgr; /* Pointer back to channel manager. */ -+} ; -+ -+#endif /* CHNLOBJ_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/io.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/io.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/io.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/io.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,205 @@ -+/* -+ * io.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== io.c ======== -+ * Description: -+ * IO manager interface: Manages IO between CHNL and MSG. -+ * -+ * Public Functions: -+ * IO_Create -+ * IO_Destroy -+ * IO_Exit -+ * IO_Init -+ * IO_OnLoaded -+ * -+ * Notes: -+ * This interface is basically a pass through to the WMD IO functions. -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 04-Apr-2001 rr WSX_STATUS initialized in IO_Create. -+ *! 07-Nov-2000 jeh Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Globals */ -+static u32 cRefs; -+ -+#if GT_TRACE -+static struct GT_Mask IO_DebugMask = { NULL, NULL }; /* WCD IO Mask */ -+#endif -+ -+/* -+ * ======== IO_Create ======== -+ * Purpose: -+ * Create an IO manager object, responsible for managing IO between -+ * CHNL and MSG -+ */ -+DSP_STATUS IO_Create(OUT struct IO_MGR **phIOMgr, struct DEV_OBJECT *hDevObject, -+ IN CONST struct IO_ATTRS *pMgrAttrs) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct IO_MGR *hIOMgr = NULL; -+ struct IO_MGR_ *pIOMgr = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phIOMgr != NULL); -+ DBC_Require(pMgrAttrs != NULL); -+ -+ GT_3trace(IO_DebugMask, GT_ENTER, "Entered IO_Create: phIOMgr: 0x%x\t " -+ "hDevObject: 0x%x\tpMgrAttrs: 0x%x\n", -+ phIOMgr, hDevObject, pMgrAttrs); -+ -+ *phIOMgr = NULL; -+ -+ /* A memory base of 0 implies no memory base: */ -+ if ((pMgrAttrs->dwSMBase != 0) && (pMgrAttrs->uSMLength == 0)) { -+ status = CHNL_E_INVALIDMEMBASE; -+ GT_0trace(IO_DebugMask, GT_7CLASS, -+ "IO_Create:Invalid Mem Base\n"); -+ } -+ -+ if (pMgrAttrs->uWordSize == 0) { -+ status = CHNL_E_INVALIDWORDSIZE; -+ GT_0trace(IO_DebugMask, GT_7CLASS, -+ "IO_Create:Invalid Word size\n"); -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ DEV_GetIntfFxns(hDevObject, &pIntfFxns); -+ -+ /* Let WMD channel module finish the create: */ -+ status = (*pIntfFxns->pfnIOCreate)(&hIOMgr, hDevObject, -+ pMgrAttrs); -+ -+ if (DSP_SUCCEEDED(status)) { -+ pIOMgr = (struct IO_MGR_ *) hIOMgr; -+ pIOMgr->pIntfFxns = pIntfFxns; -+ pIOMgr->hDevObject = hDevObject; -+ -+ /* Return the new channel manager handle: */ -+ *phIOMgr = hIOMgr; -+ GT_1trace(IO_DebugMask, GT_1CLASS, -+ "IO_Create: Success hIOMgr: 0x%x\n", -+ hIOMgr); -+ } -+ } -+ -+ GT_2trace(IO_DebugMask, GT_ENTER, -+ "Exiting IO_Create: hIOMgr: 0x%x, status:" -+ " 0x%x\n", hIOMgr, status); -+ -+ return status; -+} -+ -+/* -+ * ======== IO_Destroy ======== -+ * Purpose: -+ * Delete IO manager. -+ */ -+DSP_STATUS IO_Destroy(struct IO_MGR *hIOMgr) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct IO_MGR_ *pIOMgr = (struct IO_MGR_ *)hIOMgr; -+ DSP_STATUS status; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(IO_DebugMask, GT_ENTER, "Entered IO_Destroy: hIOMgr: 0x%x\n", -+ hIOMgr); -+ -+ pIntfFxns = pIOMgr->pIntfFxns; -+ -+ /* Let WMD channel module destroy the IO_MGR: */ -+ status = (*pIntfFxns->pfnIODestroy) (hIOMgr); -+ -+ GT_2trace(IO_DebugMask, GT_ENTER, -+ "Exiting IO_Destroy: pIOMgr: 0x%x, status:" -+ " 0x%x\n", pIOMgr, status); -+ return status; -+} -+ -+/* -+ * ======== IO_Exit ======== -+ * Purpose: -+ * Discontinue usage of the IO module. -+ */ -+void IO_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(IO_DebugMask, GT_5CLASS, -+ "Entered IO_Exit, ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== IO_Init ======== -+ * Purpose: -+ * Initialize the IO module's private state. -+ */ -+bool IO_Init(void) -+{ -+ bool fRetval = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!IO_DebugMask.flags); -+ GT_create(&IO_DebugMask, "IO"); /* "IO" for IO */ -+ } -+ -+ if (fRetval) -+ cRefs++; -+ -+ -+ GT_1trace(IO_DebugMask, GT_5CLASS, -+ "Entered IO_Init, ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ return fRetval; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/ioobj.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/ioobj.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/ioobj.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/ioobj.h 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,52 @@ -+/* -+ * ioobj.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== ioobj.h ======== -+ * Description: -+ * Structure subcomponents of channel class library IO objects which -+ * are exposed to class driver from mini-driver. -+ * -+ * Public Functions: -+ * None. -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 01/16/97 gp: Created from chnlpriv.h -+ */ -+ -+#ifndef IOOBJ_ -+#define IOOBJ_ -+ -+#include -+#include -+ -+/* -+ * This struct is the first field in a IO_MGR struct, as implemented in -+ * a WMD channel class library. Other, implementation specific fields -+ * follow this structure in memory. -+ */ -+struct IO_MGR_ { -+ /* These must be the first fields in a IO_MGR struct: */ -+ u32 dwSignature; /* Used for object validation. */ -+ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD device context. */ -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ -+ struct DEV_OBJECT *hDevObject; /* Device this board represents. */ -+} ; -+ -+#endif /* IOOBJ_ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msg.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/msg.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msg.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/msg.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,173 @@ -+/* -+ * msg.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== msg.c ======== -+ * Description: -+ * DSP/BIOS Bridge MSG Module. -+ * -+ * Public Functions: -+ * MSG_Create -+ * MSG_Delete -+ * MSG_Exit -+ * MSG_Init -+ * -+ *! Revision History: -+ *! ================= -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 15-May-2001 ag Changed SUCCEEDED to DSP_SUCCEEDED. -+ *! 16-Feb-2001 jeh Fixed some comments. -+ *! 15-Dec-2000 rr MSG_Create returns DSP_EFAIL if pfnMsgCreate fails. -+ *! 12-Sep-2000 jeh Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- Mini Driver */ -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask MSG_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+static u32 cRefs; /* module reference count */ -+ -+/* -+ * ======== MSG_Create ======== -+ * Purpose: -+ * Create an object to manage message queues. Only one of these objects -+ * can exist per device object. -+ */ -+DSP_STATUS MSG_Create(OUT struct MSG_MGR **phMsgMgr, -+ struct DEV_OBJECT *hDevObject, MSG_ONEXIT msgCallback) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct MSG_MGR_ *pMsgMgr; -+ struct MSG_MGR *hMsgMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phMsgMgr != NULL); -+ DBC_Require(msgCallback != NULL); -+ DBC_Require(hDevObject != NULL); -+ -+ GT_3trace(MSG_debugMask, GT_ENTER, "MSG_Create: phMsgMgr: 0x%x\t" -+ "hDevObject: 0x%x\tmsgCallback: 0x%x\n", -+ phMsgMgr, hDevObject, msgCallback); -+ -+ *phMsgMgr = NULL; -+ -+ DEV_GetIntfFxns(hDevObject, &pIntfFxns); -+ -+ /* Let WMD message module finish the create: */ -+ status = (*pIntfFxns->pfnMsgCreate)(&hMsgMgr, hDevObject, msgCallback); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Fill in WCD message module's fields of the MSG_MGR -+ * structure */ -+ pMsgMgr = (struct MSG_MGR_ *)hMsgMgr; -+ pMsgMgr->pIntfFxns = pIntfFxns; -+ -+ /* Finally, return the new message manager handle: */ -+ *phMsgMgr = hMsgMgr; -+ GT_1trace(MSG_debugMask, GT_1CLASS, -+ "MSG_Create: Success pMsgMgr: 0x%x\n", pMsgMgr); -+ } else { -+ status = DSP_EFAIL; -+ } -+ return status; -+} -+ -+/* -+ * ======== MSG_Delete ======== -+ * Purpose: -+ * Delete a MSG manager allocated in MSG_Create(). -+ */ -+void MSG_Delete(struct MSG_MGR *hMsgMgr) -+{ -+ struct MSG_MGR_ *pMsgMgr = (struct MSG_MGR_ *)hMsgMgr; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pMsgMgr, MSGMGR_SIGNATURE)); -+ -+ GT_1trace(MSG_debugMask, GT_ENTER, "MSG_Delete: hMsgMgr: 0x%x\n", -+ hMsgMgr); -+ -+ pIntfFxns = pMsgMgr->pIntfFxns; -+ -+ /* Let WMD message module destroy the MSG_MGR: */ -+ (*pIntfFxns->pfnMsgDelete)(hMsgMgr); -+ -+ DBC_Ensure(!MEM_IsValidHandle(pMsgMgr, MSGMGR_SIGNATURE)); -+} -+ -+/* -+ * ======== MSG_Exit ======== -+ */ -+void MSG_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ cRefs--; -+ GT_1trace(MSG_debugMask, GT_5CLASS, -+ "Entered MSG_Exit, ref count: 0x%x\n", cRefs); -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== MSG_Init ======== -+ */ -+bool MSG_Init(void) -+{ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!MSG_debugMask.flags); -+ GT_create(&MSG_debugMask, "MS"); /* "MS" for MSg */ -+ } -+ -+ cRefs++; -+ -+ GT_1trace(MSG_debugMask, GT_5CLASS, "MSG_Init(), ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+ -+ return true; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msgobj.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/msgobj.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/msgobj.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/msgobj.h 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,52 @@ -+/* -+ * msgobj.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== msgobj.h ======== -+ * Description: -+ * Structure subcomponents of channel class library MSG objects which -+ * are exposed to class driver from mini-driver. -+ * -+ * Public Functions: -+ * None. -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 17-Nov-2000 jeh Created. -+ */ -+ -+#ifndef MSGOBJ_ -+#define MSGOBJ_ -+ -+#include -+ -+#include -+ -+/* -+ * This struct is the first field in a MSG_MGR struct, as implemented in -+ * a WMD channel class library. Other, implementation specific fields -+ * follow this structure in memory. -+ */ -+struct MSG_MGR_ { -+ /* The first two fields must match those in msgobj.h */ -+ u32 dwSignature; -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ -+}; -+ -+#endif /* MSGOBJ_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/wcd.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/wcd.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/pmgr/wcd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/pmgr/wcd.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,1747 @@ -+/* -+ * wcd.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== wcd.c ======== -+ * Description: -+ * Common WCD functions, also includes the wrapper -+ * functions called directly by the DeviceIOControl interface. -+ * -+ * Public Functions: -+ * WCD_CallDevIOCtl -+ * WCD_Init -+ * WCD_InitComplete2 -+ * WCD_Exit -+ * WRAP_* -+ * -+ *! Revision History: -+ *! ================ -+ *! 29-Apr-2004 hp Call PROC_AutoStart only for DSP device -+ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian -+ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping APIs -+ *! 03-Apr-2003 sb Process environment pointer in PROCWRAP_Load -+ *! 24-Feb-2003 swa PMGR Code review comments incorporated. -+ *! 30-Jan-2002 ag CMMWRAP_AllocBuf name changed to CMMWRAP_CallocBuf -+ *! 15-Jan-2002 ag Added actual bufSize param to STRMWRAP_Reclaim[issue]. -+ *! 14-Dec-2001 rr ARGS_NODE_CONNECT maps the pAttr. -+ *! 03-Oct-2001 rr ARGS_NODE_ALLOCMSGBUF/FREEMSGBUF maps the pAttr. -+ *! 10-Sep-2001 ag Added CMD_CMM_GETHANDLE. -+ *! 23-Apr-2001 jeh Pass pStatus to NODE_Terminate. -+ *! 11-Apr-2001 jeh STRMWRAP_Reclaim embedded pointer is mapped and unmapped. -+ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. -+ *! 06-Dec-2000 jeh WRAP_MAP2CALLER pointers in RegisterNotify calls. -+ *! 05-Dec-2000 ag: Removed MAP2CALLER in NODEWRAP_FreeMsgBuf(). -+ *! 22-Nov-2000 kc: Added MGRWRAP_GetPerf_Data(). -+ *! 20-Nov-2000 jeh Added MSG_Init()/MSG_Exit(), IO_Init()/IO_Exit(). -+ *! WRAP pointers to handles for PROC_Attach, NODE_Allocate. -+ *! 27-Oct-2000 jeh Added NODEWRAP_AllocMsgBuf, NODEWRAP_FreeMsgBuf. Removed -+ *! NODEWRAP_GetMessageStream. -+ *! 12-Oct-2000 ag: Added user CMM wrappers. -+ *! 05-Oct-2000 rr: WcdInitComplete2 will fail even if one BRD or PROC -+ *! AutoStart fails. -+ *! 25-Sep-2000 rr: Updated to Version 0.9 -+ *! 13-Sep-2000 jeh Pass ARGS_NODE_CONNECT.pAttrs to NODE_Connect(). -+ *! 11-Aug-2000 rr: Part of node enabled. -+ *! 31-Jul-2000 rr: UTIL_Wrap and MEM_Wrap added to RM. -+ *! 27-Jul-2000 rr: PROCWRAP, NODEWRAP and STRMWRAP implemented. -+ *! STRM and some NODE Wrappers are not implemented. -+ *! 27-Jun-2000 rr: MGRWRAP fxns added.IFDEF to build for PM or DSP/BIOS Bridge -+ *! 08-Feb-2000 rr File name changed to wcd.c -+ *! 03-Feb-2000 rr: Module initialization are done by SERVICES init. GT Class -+ *! changes for module init/exit fxns. -+ *! 24-Jan-2000 rr: Merged with Scott's code. -+ *! 21-Jan-1999 sg: Changed ARGS_CHNL_GETMODE field name from pdwMode to pMode. -+ *! 17-Jan-2000 rr: BRD_GetStatus does WRAP_MAP2CALLER for state. -+ *! 14-Dec-1999 ag: Removed _MAP2CALLER in CHNL_GetMgr(). -+ *! 13-Dec-1999 rr: BRDWRAP_GetSymbol, BRDWRAP_GetTrace uses WRAP_MAP2CALLER -+ *! macros.BRDWRAP_Load maps and unmaps embedded pointers. -+ *! 10-Dec-1999 ag: User CHNL bufs mapped in _AddIOReq & _GetIOCompletion. -+ *! 09-Dec-1999 rr: BRDWRAP_Open and CHNLWRAP_GetMgr does not map -+ *! pointer as there was a change in config.c -+ *! 06-Dec-1999 rr: BRD_Read and Write Maps the buf pointers. -+ *! 03-Dec-1999 rr: CHNLWRAP_GetMgr and BRDWRAP_Open maps hDevNode pointer. -+ *! WCD_InitComplete2 Included for BRD_AutoStart. -+ *! 16-Nov-1999 ag: Map buf to process in CHNLWRAP_AllocBuffer(). -+ *! CHNL_GetMgr() Mapping Fix. -+ *! 10-Nov-1999 ag: Removed unnecessary calls to WRAP_MAP2CALLER. -+ *! 08-Nov-1999 kc: Added MEMRY & enabled BRD_IOCtl for tests. -+ *! 29-Oct-1999 ag: Added CHNL. -+ *! 29-Oct-1999 kc: Added trace statements; added ptr mapping; updated -+ *! use of UTIL module API. -+ *! 29-Oct-1999 rr: Wrapper functions does the Mapping of the Pointers. -+ *! in WinCE all the explicit pointers will be converted -+ *! by the OS during interprocess but not the embedded pointers. -+ *! 16-Oct-1999 kc: Code review cleanup. -+ *! 07-Oct-1999 kc: Added UTILWRAP_TestDll() to run PM test harness. See -+ *! /src/doc/pmtest.doc for more detail. -+ *! 09-Sep-1999 rr: After exactly two years(!). Adopted for WinCE. GT Enabled. -+ *! 09-Sep-1997 gp: Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+ -+#include -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+#include -+#include -+#include -+ -+ -+/* ----------------------------------- Others */ -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+#ifndef RES_CLEANUP_DISABLE -+#include -+#endif -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define MAX_TRACEBUFLEN 255 -+#define MAX_LOADARGS 16 -+#define MAX_NODES 64 -+#define MAX_STREAMS 16 -+#define MAX_BUFS 64 -+ -+/* Device IOCtl function pointer */ -+struct WCD_Cmd { -+ u32(*fxn)(union Trapped_Args *args, void *pr_ctxt); -+ u32 dwIndex; -+} ; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask WCD_debugMask = { NULL, NULL }; /* Core VxD Mask */ -+#endif -+static u32 WCD_cRefs; -+ -+static inline void __cp_fm_usr(void *to, const void __user *from, -+ DSP_STATUS *err, unsigned long bytes) -+{ -+ if (DSP_FAILED(*err)) -+ return; -+ -+ if (unlikely(!from)) { -+ *err = DSP_EPOINTER; -+ return; -+ } -+ -+ if (unlikely(copy_from_user(to, from, bytes))) { -+ GT_2trace(WCD_debugMask, GT_7CLASS, -+ "%s failed, from=0x%08x\n", __func__, from); -+ *err = DSP_EPOINTER; -+ } -+} -+#define cp_fm_usr(to, from, err, n) \ -+ __cp_fm_usr(to, from, &(err), (n) * sizeof(*(to))) -+ -+static inline void __cp_to_usr(void __user *to, const void *from, -+ DSP_STATUS *err, unsigned long bytes) -+{ -+ if (DSP_FAILED(*err)) -+ return; -+ -+ if (unlikely(!to)) { -+ *err = DSP_EPOINTER; -+ return; -+ } -+ -+ if (unlikely(copy_to_user(to, from, bytes))) { -+ GT_2trace(WCD_debugMask, GT_7CLASS, -+ "%s failed, to=0x%08x\n", __func__, to); -+ *err = DSP_EPOINTER; -+ } -+} -+#define cp_to_usr(to, from, err, n) \ -+ __cp_to_usr(to, from, &(err), (n) * sizeof(*(from))) -+ -+/* -+ * Function table. -+ * The order of these functions MUST be the same as the order of the command -+ * numbers defined in wcdioctl.h This is how an IOCTL number in user mode -+ * turns into a function call in kernel mode. -+ */ -+static struct WCD_Cmd WCD_cmdTable[] = { -+ /* MGR module */ -+ {MGRWRAP_EnumNode_Info, CMD_MGR_ENUMNODE_INFO_OFFSET}, -+ {MGRWRAP_EnumProc_Info, CMD_MGR_ENUMPROC_INFO_OFFSET}, -+ {MGRWRAP_RegisterObject, CMD_MGR_REGISTEROBJECT_OFFSET}, -+ {MGRWRAP_UnregisterObject, CMD_MGR_UNREGISTEROBJECT_OFFSET}, -+ {MGRWRAP_WaitForBridgeEvents, CMD_MGR_WAIT_OFFSET}, -+#ifndef RES_CLEANUP_DISABLE -+ {MGRWRAP_GetProcessResourcesInfo, CMD_MGR_RESOUCES_OFFSET}, -+#endif -+ /* PROC Module */ -+ {PROCWRAP_Attach, CMD_PROC_ATTACH_OFFSET}, -+ {PROCWRAP_Ctrl, CMD_PROC_CTRL_OFFSET}, -+ {PROCWRAP_Detach, CMD_PROC_DETACH_OFFSET}, -+ {PROCWRAP_EnumNode_Info, CMD_PROC_ENUMNODE_OFFSET}, -+ {PROCWRAP_EnumResources, CMD_PROC_ENUMRESOURCES_OFFSET}, -+ {PROCWRAP_GetState, CMD_PROC_GETSTATE_OFFSET}, -+ {PROCWRAP_GetTrace, CMD_PROC_GETTRACE_OFFSET}, -+ {PROCWRAP_Load, CMD_PROC_LOAD_OFFSET}, -+ {PROCWRAP_RegisterNotify, CMD_PROC_REGISTERNOTIFY_OFFSET}, -+ {PROCWRAP_Start, CMD_PROC_START_OFFSET}, -+ {PROCWRAP_ReserveMemory, CMD_PROC_RSVMEM_OFFSET}, -+ {PROCWRAP_UnReserveMemory, CMD_PROC_UNRSVMEM_OFFSET}, -+ {PROCWRAP_Map, CMD_PROC_MAPMEM_OFFSET}, -+ {PROCWRAP_UnMap, CMD_PROC_UNMAPMEM_OFFSET}, -+ {PROCWRAP_FlushMemory, CMD_PROC_FLUSHMEMORY_OFFSET}, -+ {PROCWRAP_Stop, CMD_PROC_STOP_OFFSET}, -+ {PROCWRAP_InvalidateMemory, CMD_PROC_INVALIDATEMEMORY_OFFSET}, -+ /* NODE Module */ -+ {NODEWRAP_Allocate, CMD_NODE_ALLOCATE_OFFSET}, -+ {NODEWRAP_AllocMsgBuf, CMD_NODE_ALLOCMSGBUF_OFFSET}, -+ {NODEWRAP_ChangePriority, CMD_NODE_CHANGEPRIORITY_OFFSET}, -+ {NODEWRAP_Connect, CMD_NODE_CONNECT_OFFSET}, -+ {NODEWRAP_Create, CMD_NODE_CREATE_OFFSET}, -+ {NODEWRAP_Delete, CMD_NODE_DELETE_OFFSET}, -+ {NODEWRAP_FreeMsgBuf, CMD_NODE_FREEMSGBUF_OFFSET}, -+ {NODEWRAP_GetAttr, CMD_NODE_GETATTR_OFFSET}, -+ {NODEWRAP_GetMessage, CMD_NODE_GETMESSAGE_OFFSET}, -+ {NODEWRAP_Pause, CMD_NODE_PAUSE_OFFSET}, -+ {NODEWRAP_PutMessage, CMD_NODE_PUTMESSAGE_OFFSET}, -+ {NODEWRAP_RegisterNotify, CMD_NODE_REGISTERNOTIFY_OFFSET}, -+ {NODEWRAP_Run, CMD_NODE_RUN_OFFSET}, -+ {NODEWRAP_Terminate, CMD_NODE_TERMINATE_OFFSET}, -+ {NODEWRAP_GetUUIDProps, CMD_NODE_GETUUIDPROPS_OFFSET}, -+ /* STRM wrapper functions */ -+ {STRMWRAP_AllocateBuffer, CMD_STRM_ALLOCATEBUFFER_OFFSET}, -+ {STRMWRAP_Close, CMD_STRM_CLOSE_OFFSET}, -+ {STRMWRAP_FreeBuffer, CMD_STRM_FREEBUFFER_OFFSET}, -+ {STRMWRAP_GetEventHandle, CMD_STRM_GETEVENTHANDLE_OFFSET}, -+ {STRMWRAP_GetInfo, CMD_STRM_GETINFO_OFFSET}, -+ {STRMWRAP_Idle, CMD_STRM_IDLE_OFFSET}, -+ {STRMWRAP_Issue, CMD_STRM_ISSUE_OFFSET}, -+ {STRMWRAP_Open, CMD_STRM_OPEN_OFFSET}, -+ {STRMWRAP_Reclaim, CMD_STRM_RECLAIM_OFFSET}, -+ {STRMWRAP_RegisterNotify, CMD_STRM_REGISTERNOTIFY_OFFSET}, -+ {STRMWRAP_Select, CMD_STRM_SELECT_OFFSET}, -+ /* CMM module */ -+ {CMMWRAP_CallocBuf, CMD_CMM_ALLOCBUF_OFFSET}, -+ {CMMWRAP_FreeBuf, CMD_CMM_FREEBUF_OFFSET}, -+ {CMMWRAP_GetHandle, CMD_CMM_GETHANDLE_OFFSET}, -+ {CMMWRAP_GetInfo, CMD_CMM_GETINFO_OFFSET} -+}; -+ -+/* -+ * ======== WCD_CallDevIOCtl ======== -+ * Purpose: -+ * Call the (wrapper) function for the corresponding WCD IOCTL. -+ */ -+inline DSP_STATUS WCD_CallDevIOCtl(u32 cmd, union Trapped_Args *args, -+ u32 *pResult, void *pr_ctxt) -+{ -+ if ((cmd < (sizeof(WCD_cmdTable) / sizeof(struct WCD_Cmd)))) { -+ /* make the fxn call via the cmd table */ -+ *pResult = (*WCD_cmdTable[cmd].fxn) (args, pr_ctxt); -+ return DSP_SOK; -+ } else { -+ return DSP_EINVALIDARG; -+ } -+} -+ -+/* -+ * ======== WCD_Exit ======== -+ */ -+void WCD_Exit(void) -+{ -+ DBC_Require(WCD_cRefs > 0); -+ WCD_cRefs--; -+ GT_1trace(WCD_debugMask, GT_5CLASS, -+ "Entered WCD_Exit, ref count: 0x%x\n", WCD_cRefs); -+ if (WCD_cRefs == 0) { -+ /* Release all WCD modules initialized in WCD_Init(). */ -+ COD_Exit(); -+ DEV_Exit(); -+ CHNL_Exit(); -+ MSG_Exit(); -+ IO_Exit(); -+ STRM_Exit(); -+ NTFY_Exit(); -+ DISP_Exit(); -+ NODE_Exit(); -+ PROC_Exit(); -+ MGR_Exit(); -+ RMM_exit(); -+ DRV_Exit(); -+ SERVICES_Exit(); -+ } -+ DBC_Ensure(WCD_cRefs >= 0); -+} -+ -+/* -+ * ======== WCD_Init ======== -+ * Purpose: -+ * Module initialization is done by SERVICES Init. -+ */ -+bool WCD_Init(void) -+{ -+ bool fInit = true; -+ bool fDRV, fDEV, fCOD, fSERVICES, fCHNL, fMSG, fIO; -+ bool fMGR, fPROC, fNODE, fDISP, fNTFY, fSTRM, fRMM; -+#ifdef DEBUG -+ /* runtime check of Device IOCtl array. */ -+ u32 i; -+ for (i = 1; i < (sizeof(WCD_cmdTable) / sizeof(struct WCD_Cmd)); i++) -+ DBC_Assert(WCD_cmdTable[i - 1].dwIndex == i); -+ -+#endif -+ if (WCD_cRefs == 0) { -+ /* initialize all SERVICES modules */ -+ fSERVICES = SERVICES_Init(); -+ /* initialize debugging module */ -+ DBC_Assert(!WCD_debugMask.flags); -+ GT_create(&WCD_debugMask, "CD"); /* CD for class driver */ -+ /* initialize class driver and other modules */ -+ fDRV = DRV_Init(); -+ fMGR = MGR_Init(); -+ fPROC = PROC_Init(); -+ fNODE = NODE_Init(); -+ fDISP = DISP_Init(); -+ fNTFY = NTFY_Init(); -+ fSTRM = STRM_Init(); -+ fRMM = RMM_init(); -+ fCHNL = CHNL_Init(); -+ fMSG = MSG_Init(); -+ fIO = IO_Init(); -+ fDEV = DEV_Init(); -+ fCOD = COD_Init(); -+ fInit = fSERVICES && fDRV && fDEV && fCHNL && fCOD && -+ fMSG && fIO; -+ fInit = fInit && fMGR && fPROC && fRMM; -+ if (!fInit) { -+ if (fSERVICES) -+ SERVICES_Exit(); -+ -+ if (fDRV) -+ DRV_Exit(); -+ -+ if (fMGR) -+ MGR_Exit(); -+ -+ if (fSTRM) -+ STRM_Exit(); -+ -+ if (fPROC) -+ PROC_Exit(); -+ -+ if (fNODE) -+ NODE_Exit(); -+ -+ if (fDISP) -+ DISP_Exit(); -+ -+ if (fNTFY) -+ NTFY_Exit(); -+ -+ if (fCHNL) -+ CHNL_Exit(); -+ -+ if (fMSG) -+ MSG_Exit(); -+ -+ if (fIO) -+ IO_Exit(); -+ -+ if (fDEV) -+ DEV_Exit(); -+ -+ if (fCOD) -+ COD_Exit(); -+ -+ if (fRMM) -+ RMM_exit(); -+ -+ } -+ } -+ if (fInit) -+ WCD_cRefs++; -+ -+ GT_1trace(WCD_debugMask, GT_5CLASS, -+ "Entered WCD_Init, ref count: 0x%x\n", WCD_cRefs); -+ return fInit; -+} -+ -+/* -+ * ======== WCD_InitComplete2 ======== -+ * Purpose: -+ * Perform any required WCD, and WMD initialization which -+ * cannot not be performed in WCD_Init() or DEV_StartDevice() due -+ * to the fact that some services are not yet -+ * completely initialized. -+ * Parameters: -+ * Returns: -+ * DSP_SOK: Allow this device to load -+ * DSP_EFAIL: Failure. -+ * Requires: -+ * WCD initialized. -+ * Ensures: -+ */ -+DSP_STATUS WCD_InitComplete2(void) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CFG_DEVNODE *DevNode; -+ struct DEV_OBJECT *hDevObject; -+ u32 devType; -+ -+ DBC_Require(WCD_cRefs > 0); -+ GT_0trace(WCD_debugMask, GT_ENTER, "Entered WCD_InitComplete\n"); -+ /* Walk the list of DevObjects, get each devnode, and attempting to -+ * autostart the board. Note that this requires COF loading, which -+ * requires KFILE. */ -+ for (hDevObject = DEV_GetFirst(); hDevObject != NULL; -+ hDevObject = DEV_GetNext(hDevObject)) { -+ if (DSP_FAILED(DEV_GetDevNode(hDevObject, &DevNode))) -+ continue; -+ -+ if (DSP_FAILED(DEV_GetDevType(hDevObject, &devType))) -+ continue; -+ -+ if ((devType == DSP_UNIT) || (devType == IVA_UNIT)) { -+ if (DSP_FAILED(PROC_AutoStart(DevNode, hDevObject))) { -+ GT_0trace(WCD_debugMask, GT_1CLASS, -+ "WCD_InitComplete2 Failed\n"); -+ status = DSP_EFAIL; -+ /* break; */ -+ } -+ } else -+ GT_1trace(WCD_debugMask, GT_ENTER, -+ "Ignoring PROC_AutoStart " -+ "for Device Type = 0x%x \n", devType); -+ } /* End For Loop */ -+ GT_1trace(WCD_debugMask, GT_ENTER, -+ "Exiting WCD_InitComplete status 0x%x\n", status); -+ return status; -+} -+ -+/* -+ * ======== MGRWRAP_EnumNode_Info ======== -+ */ -+u32 MGRWRAP_EnumNode_Info(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u8 *pNDBProps; -+ u32 uNumNodes; -+ DSP_STATUS status = DSP_SOK; -+ u32 size = args->ARGS_MGR_ENUMNODE_INFO.uNDBPropsSize; -+ -+ GT_4trace(WCD_debugMask, GT_ENTER, -+ "MGR_EnumNodeInfo: entered args:\n0x%x" -+ " uNode: 0x%x\tpNDBProps: 0x%x\tuNDBPropsSize: " -+ "0x%x\tpuNumNodes\n", args->ARGS_MGR_ENUMNODE_INFO.uNode, -+ args->ARGS_MGR_ENUMNODE_INFO.pNDBProps, -+ args->ARGS_MGR_ENUMNODE_INFO.uNDBPropsSize, -+ args->ARGS_MGR_ENUMNODE_INFO.puNumNodes); -+ pNDBProps = MEM_Alloc(size, MEM_NONPAGED); -+ if (pNDBProps == NULL) -+ status = DSP_EMEMORY; -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = MGR_EnumNodeInfo(args->ARGS_MGR_ENUMNODE_INFO.uNode, -+ (struct DSP_NDBPROPS *)pNDBProps, -+ size, &uNumNodes); -+ } -+ cp_to_usr(args->ARGS_MGR_ENUMNODE_INFO.pNDBProps, pNDBProps, status, -+ size); -+ cp_to_usr(args->ARGS_MGR_ENUMNODE_INFO.puNumNodes, &uNumNodes, status, -+ 1); -+ if (pNDBProps) -+ MEM_Free(pNDBProps); -+ -+ return status; -+} -+ -+/* -+ * ======== MGRWRAP_EnumProc_Info ======== -+ */ -+u32 MGRWRAP_EnumProc_Info(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u8 *pProcessorInfo; -+ u32 uNumProcs; -+ DSP_STATUS status = DSP_SOK; -+ u32 size = args->ARGS_MGR_ENUMPROC_INFO.uProcessorInfoSize; -+ -+ GT_4trace(WCD_debugMask, GT_ENTER, -+ "MGRWRAP_EnumProc_Info: entered args:\n" -+ "0x%x uProcessor: 0x%x\tpProcessorInfo: 0x%x\t" -+ "uProcessorInfoSize: 0x%x\tpuNumProcs \n", -+ args->ARGS_MGR_ENUMPROC_INFO.uProcessor, -+ args->ARGS_MGR_ENUMPROC_INFO.pProcessorInfo, -+ args->ARGS_MGR_ENUMPROC_INFO.uProcessorInfoSize, -+ args->ARGS_MGR_ENUMPROC_INFO.puNumProcs); -+ pProcessorInfo = MEM_Alloc(size, MEM_NONPAGED); -+ if (pProcessorInfo == NULL) -+ status = DSP_EMEMORY; -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = MGR_EnumProcessorInfo(args-> -+ ARGS_MGR_ENUMPROC_INFO.uProcessor, -+ (struct DSP_PROCESSORINFO *)pProcessorInfo, -+ size, &uNumProcs); -+ } -+ cp_to_usr(args->ARGS_MGR_ENUMPROC_INFO.pProcessorInfo, pProcessorInfo, -+ status, size); -+ cp_to_usr(args->ARGS_MGR_ENUMPROC_INFO.puNumProcs, &uNumProcs, -+ status, 1); -+ if (pProcessorInfo) -+ MEM_Free(pProcessorInfo); -+ -+ return status; -+} -+ -+#define WRAP_MAP2CALLER(x) x -+/* -+ * ======== MGRWRAP_RegisterObject ======== -+ */ -+u32 MGRWRAP_RegisterObject(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ struct DSP_UUID pUuid; -+ u32 pathSize = 0; -+ char *pszPathName = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ cp_fm_usr(&pUuid, args->ARGS_MGR_REGISTEROBJECT.pUuid, status, 1); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* pathSize is increased by 1 to accommodate NULL */ -+ pathSize = strlen_user((char *) -+ args->ARGS_MGR_REGISTEROBJECT.pszPathName) + 1; -+ pszPathName = MEM_Alloc(pathSize, MEM_NONPAGED); -+ if (!pszPathName) -+ goto func_end; -+ retVal = strncpy_from_user(pszPathName, -+ (char *)args->ARGS_MGR_REGISTEROBJECT.pszPathName, -+ pathSize); -+ if (!retVal) { -+ status = DSP_EPOINTER; -+ goto func_end; -+ } -+ -+ GT_1trace(WCD_debugMask, GT_ENTER, -+ "MGRWRAP_RegisterObject: entered pg2hMsg " -+ "0x%x\n", args->ARGS_MGR_REGISTEROBJECT.pUuid); -+ status = DCD_RegisterObject(&pUuid, -+ args->ARGS_MGR_REGISTEROBJECT.objType, -+ (char *)pszPathName); -+func_end: -+ if (pszPathName) -+ MEM_Free(pszPathName); -+ return status; -+} -+ -+/* -+ * ======== MGRWRAP_UnregisterObject ======== -+ */ -+u32 MGRWRAP_UnregisterObject(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_UUID pUuid; -+ -+ cp_fm_usr(&pUuid, args->ARGS_MGR_REGISTEROBJECT.pUuid, status, 1); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ GT_1trace(WCD_debugMask, GT_ENTER, -+ "MGRWRAP_UnregisterObject: entered pg2hMsg" -+ " 0x%x\n", args->ARGS_MGR_UNREGISTEROBJECT.pUuid); -+ status = DCD_UnregisterObject(&pUuid, -+ args->ARGS_MGR_UNREGISTEROBJECT.objType); -+func_end: -+ return status; -+ -+} -+ -+/* -+ * ======== MGRWRAP_WaitForBridgeEvents ======== -+ */ -+u32 MGRWRAP_WaitForBridgeEvents(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK, real_status = DSP_SOK; -+ struct DSP_NOTIFICATION *aNotifications[MAX_EVENTS]; -+ struct DSP_NOTIFICATION notifications[MAX_EVENTS]; -+ u32 uIndex, i; -+ u32 uCount = args->ARGS_MGR_WAIT.uCount; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "MGRWRAP_WaitForBridgeEvents: entered\n"); -+ -+ if (uCount > MAX_EVENTS) -+ status = DSP_EINVALIDARG; -+ -+ /* get the array of pointers to user structures */ -+ cp_fm_usr(aNotifications, args->ARGS_MGR_WAIT.aNotifications, -+ status, uCount); -+ /* get the events */ -+ for (i = 0; i < uCount; i++) { -+ cp_fm_usr(¬ifications[i], aNotifications[i], status, 1); -+ if (DSP_SUCCEEDED(status)) { -+ /* set the array of pointers to kernel structures*/ -+ aNotifications[i] = ¬ifications[i]; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ real_status = MGR_WaitForBridgeEvents(aNotifications, uCount, -+ &uIndex, args->ARGS_MGR_WAIT.uTimeout); -+ } -+ cp_to_usr(args->ARGS_MGR_WAIT.puIndex, &uIndex, status, 1); -+ return real_status; -+} -+ -+ -+#ifndef RES_CLEANUP_DISABLE -+/* -+ * ======== MGRWRAP_GetProcessResourceInfo ======== -+ */ -+u32 MGRWRAP_GetProcessResourcesInfo(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 uSize = 0; -+ u8 *pBuf = MEM_Alloc(8092, MEM_NONPAGED); -+ status = DRV_ProcDisplayResInfo(pBuf, &uSize); -+ GT_1trace(WCD_debugMask, GT_ENTER, -+ "MGRWRAP_GetProcessResourcesInfo:uSize=%d :\n", uSize); -+ cp_to_usr(args->ARGS_PROC_GETTRACE.pBuf, pBuf, status, uSize); -+ GT_0trace(WCD_debugMask, GT_ENTER, "\n***********" -+ "123MGRWRAP_GetProcessResourcesInfo:**************\n"); -+ GT_0trace(WCD_debugMask, GT_ENTER, "\n***********" -+ "456MGRWRAP_GetProcessResourcesInfo:**************\n"); -+ cp_to_usr(args->ARGS_PROC_GETTRACE.pSize, &uSize, status, 1); -+ MEM_Free(pBuf); -+ return status; -+} -+#endif -+ -+ -+/* -+ * ======== PROCWRAP_Attach ======== -+ */ -+u32 PROCWRAP_Attach(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_HPROCESSOR processor; -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_PROCESSORATTRIN attrIn, *pAttrIn = NULL; -+ -+ GT_3trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_Attach: entered args:\n" "0x%x" -+ " uProcessor: 0x%x\tpAttrIn: 0x%x\tphProcessor \n", -+ args->ARGS_PROC_ATTACH.uProcessor, -+ args->ARGS_PROC_ATTACH.pAttrIn, -+ args->ARGS_PROC_ATTACH.phProcessor); -+ /* Optional argument */ -+ if (args->ARGS_PROC_ATTACH.pAttrIn) { -+ cp_fm_usr(&attrIn, args->ARGS_PROC_ATTACH.pAttrIn, status, 1); -+ if (DSP_SUCCEEDED(status)) -+ pAttrIn = &attrIn; -+ else -+ goto func_end; -+ -+ -+ } -+ status = PROC_Attach(args->ARGS_PROC_ATTACH.uProcessor, pAttrIn, -+ &processor, pr_ctxt); -+ cp_to_usr(args->ARGS_PROC_ATTACH.phProcessor, &processor, status, 1); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_Ctrl ======== -+ */ -+u32 PROCWRAP_Ctrl(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 cbDataSize, __user *pSize = (u32 __user *) -+ args->ARGS_PROC_CTRL.pArgs; -+ u8 *pArgs = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ GT_3trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_Ctrl: entered args:\n 0x%x" -+ " uProcessor: 0x%x\tdwCmd: 0x%x\tpArgs \n", -+ args->ARGS_PROC_CTRL.hProcessor, -+ args->ARGS_PROC_CTRL.dwCmd, -+ args->ARGS_PROC_CTRL.pArgs); -+ if (pSize) { -+ if (get_user(cbDataSize, pSize)) { -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ cbDataSize += sizeof(u32); -+ pArgs = MEM_Alloc(cbDataSize, MEM_NONPAGED); -+ if (pArgs == NULL) { -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ -+ cp_fm_usr(pArgs, args->ARGS_PROC_CTRL.pArgs, status, -+ cbDataSize); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = PROC_Ctrl(args->ARGS_PROC_CTRL.hProcessor, -+ args->ARGS_PROC_CTRL.dwCmd, -+ (struct DSP_CBDATA *)pArgs); -+ } -+ -+ /* cp_to_usr(args->ARGS_PROC_CTRL.pArgs, pArgs, status, 1);*/ -+ if (pArgs) -+ MEM_Free(pArgs); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_Detach ======== -+ */ -+u32 PROCWRAP_Detach(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_1trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_Detach: entered args\n0x%x " -+ "hProceesor \n", args->ARGS_PROC_DETACH.hProcessor); -+ retVal = PROC_Detach(args->ARGS_PROC_DETACH.hProcessor, pr_ctxt); -+ -+ return retVal; -+} -+ -+/* -+ * ======== PROCWRAP_EnumNode_Info ======== -+ */ -+u32 PROCWRAP_EnumNode_Info(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ DSP_HNODE aNodeTab[MAX_NODES]; -+ u32 uNumNodes; -+ u32 uAllocated; -+ -+ GT_5trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_EnumNode_Info:entered args:\n0x" -+ "%xhProcessor:0x%x\taNodeTab:0x%x\tuNodeTabSize:" -+ "%0x%x\tpuNumNodes%\n0x%x puAllocated: \n", -+ args->ARGS_PROC_ENUMNODE_INFO.hProcessor, -+ args->ARGS_PROC_ENUMNODE_INFO.aNodeTab, -+ args->ARGS_PROC_ENUMNODE_INFO.uNodeTabSize, -+ args->ARGS_PROC_ENUMNODE_INFO.puNumNodes, -+ args->ARGS_PROC_ENUMNODE_INFO.puAllocated); -+ DBC_Require(args->ARGS_PROC_ENUMNODE_INFO.uNodeTabSize <= MAX_NODES); -+ status = PROC_EnumNodes(args->ARGS_PROC_ENUMNODE_INFO.hProcessor, -+ aNodeTab, -+ args->ARGS_PROC_ENUMNODE_INFO.uNodeTabSize, -+ &uNumNodes, &uAllocated); -+ cp_to_usr(args->ARGS_PROC_ENUMNODE_INFO.aNodeTab, aNodeTab, status, -+ uNumNodes); -+ cp_to_usr(args->ARGS_PROC_ENUMNODE_INFO.puNumNodes, &uNumNodes, -+ status, 1); -+ cp_to_usr(args->ARGS_PROC_ENUMNODE_INFO.puAllocated, &uAllocated, -+ status, 1); -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_FlushMemory ======== -+ */ -+u32 PROCWRAP_FlushMemory(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_FlushMemory: entered\n"); -+ -+ status = PROC_FlushMemory(args->ARGS_PROC_FLUSHMEMORY.hProcessor, -+ args->ARGS_PROC_FLUSHMEMORY.pMpuAddr, -+ args->ARGS_PROC_FLUSHMEMORY.ulSize, -+ args->ARGS_PROC_FLUSHMEMORY.ulFlags); -+ return status; -+} -+ -+ -+/* -+ * ======== PROCWRAP_InvalidateMemory ======== -+ */ -+u32 PROCWRAP_InvalidateMemory(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_InvalidateMemory:entered\n"); -+ -+ status = PROC_InvalidateMemory( -+ args->ARGS_PROC_INVALIDATEMEMORY.hProcessor, -+ args->ARGS_PROC_INVALIDATEMEMORY.pMpuAddr, -+ args->ARGS_PROC_INVALIDATEMEMORY.ulSize); -+ return status; -+} -+ -+ -+/* -+ * ======== PROCWRAP_EnumResources ======== -+ */ -+u32 PROCWRAP_EnumResources(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_RESOURCEINFO pResourceInfo; -+ -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ GT_4trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_EnumResources: entered args:\n" -+ "0x%x hProcessor: 0x%x\tuResourceMask: 0x%x\tpResourceInfo" -+ " 0x%x\tuResourceInfoSixe \n", -+ args->ARGS_PROC_ENUMRESOURCES.hProcessor, -+ args->ARGS_PROC_ENUMRESOURCES.uResourceType, -+ args->ARGS_PROC_ENUMRESOURCES.pResourceInfo, -+ args->ARGS_PROC_ENUMRESOURCES.uResourceInfoSize); -+ status = PROC_GetResourceInfo(args->ARGS_PROC_ENUMRESOURCES.hProcessor, -+ args->ARGS_PROC_ENUMRESOURCES.uResourceType, -+ &pResourceInfo, -+ args->ARGS_PROC_ENUMRESOURCES.uResourceInfoSize); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ cp_to_usr(args->ARGS_PROC_ENUMRESOURCES.pResourceInfo, &pResourceInfo, -+ status, 1); -+func_end: -+ return status; -+ -+} -+ -+/* -+ * ======== PROCWRAP_GetState ======== -+ */ -+u32 PROCWRAP_GetState(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ struct DSP_PROCESSORSTATE procStatus; -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_GetState: entered\n"); -+ status = PROC_GetState(args->ARGS_PROC_GETSTATE.hProcessor, &procStatus, -+ args->ARGS_PROC_GETSTATE.uStateInfoSize); -+ cp_to_usr(args->ARGS_PROC_GETSTATE.pProcStatus, &procStatus, status, 1); -+ return status; -+ -+} -+ -+/* -+ * ======== PROCWRAP_GetTrace ======== -+ */ -+u32 PROCWRAP_GetTrace(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ u8 *pBuf; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_GetTrace: entered\n"); -+ -+ DBC_Require(args->ARGS_PROC_GETTRACE.uMaxSize <= MAX_TRACEBUFLEN); -+ -+ pBuf = MEM_Calloc(args->ARGS_PROC_GETTRACE.uMaxSize, MEM_NONPAGED); -+ if (pBuf != NULL) { -+ status = PROC_GetTrace(args->ARGS_PROC_GETTRACE.hProcessor, -+ pBuf, args->ARGS_PROC_GETTRACE.uMaxSize); -+ } else { -+ status = DSP_EMEMORY; -+ } -+ cp_to_usr(args->ARGS_PROC_GETTRACE.pBuf, pBuf, status, -+ args->ARGS_PROC_GETTRACE.uMaxSize); -+ if (pBuf) -+ MEM_Free(pBuf); -+ -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_Load ======== -+ */ -+u32 PROCWRAP_Load(union Trapped_Args *args, void *pr_ctxt) -+{ -+ s32 i, len; -+ DSP_STATUS status = DSP_SOK; -+ char *temp; -+ s32 count = args->ARGS_PROC_LOAD.iArgc; -+ u8 **argv, **envp = NULL; -+ -+ DBC_Require(count > 0); -+ DBC_Require(count <= MAX_LOADARGS); -+ -+ argv = MEM_Alloc(count * sizeof(u8 *), MEM_NONPAGED); -+ if (!argv) { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ -+ cp_fm_usr(argv, args->ARGS_PROC_LOAD.aArgv, status, count); -+ if (DSP_FAILED(status)) { -+ MEM_Free(argv); -+ argv = NULL; -+ goto func_cont; -+ } -+ -+ for (i = 0; i < count; i++) { -+ if (argv[i]) { -+ /* User space pointer to argument */ -+ temp = (char *) argv[i]; -+ /* len is increased by 1 to accommodate NULL */ -+ len = strlen_user((char *)temp) + 1; -+ /* Kernel space pointer to argument */ -+ argv[i] = MEM_Alloc(len, MEM_NONPAGED); -+ if (argv[i]) { -+ cp_fm_usr(argv[i], temp, status, len); -+ if (DSP_FAILED(status)) { -+ MEM_Free(argv[i]); -+ argv[i] = NULL; -+ goto func_cont; -+ } -+ } else { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ } -+ } -+ /* TODO: validate this */ -+ if (args->ARGS_PROC_LOAD.aEnvp) { -+ /* number of elements in the envp array including NULL */ -+ count = 0; -+ do { -+ get_user(temp, args->ARGS_PROC_LOAD.aEnvp + count); -+ count++; -+ } while (temp); -+ envp = MEM_Alloc(count * sizeof(u8 *), MEM_NONPAGED); -+ if (!envp) { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ -+ cp_fm_usr(envp, args->ARGS_PROC_LOAD.aEnvp, status, count); -+ if (DSP_FAILED(status)) { -+ MEM_Free(envp); -+ envp = NULL; -+ goto func_cont; -+ } -+ for (i = 0; envp[i]; i++) { -+ /* User space pointer to argument */ -+ temp = (char *)envp[i]; -+ /* len is increased by 1 to accommodate NULL */ -+ len = strlen_user((char *)temp) + 1; -+ /* Kernel space pointer to argument */ -+ envp[i] = MEM_Alloc(len, MEM_NONPAGED); -+ if (envp[i]) { -+ cp_fm_usr(envp[i], temp, status, len); -+ if (DSP_FAILED(status)) { -+ MEM_Free(envp[i]); -+ envp[i] = NULL; -+ goto func_cont; -+ } -+ } else { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ } -+ } -+ GT_5trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_Load, hProcessor: 0x%x\n\tiArgc:" -+ "0x%x\n\taArgv: 0x%x\n\taArgv[0]: %s\n\taEnvp: 0x%0x\n", -+ args->ARGS_PROC_LOAD.hProcessor, -+ args->ARGS_PROC_LOAD.iArgc, args->ARGS_PROC_LOAD.aArgv, -+ argv[0], args->ARGS_PROC_LOAD.aEnvp); -+ if (DSP_SUCCEEDED(status)) { -+ status = PROC_Load(args->ARGS_PROC_LOAD.hProcessor, -+ args->ARGS_PROC_LOAD.iArgc, -+ (CONST char **)argv, (CONST char **)envp); -+ } -+func_cont: -+ if (envp) { -+ i = 0; -+ while (envp[i]) -+ MEM_Free(envp[i++]); -+ -+ MEM_Free(envp); -+ } -+ -+ if (argv) { -+ count = args->ARGS_PROC_LOAD.iArgc; -+ for (i = 0; (i < count) && argv[i]; i++) -+ MEM_Free(argv[i]); -+ -+ MEM_Free(argv); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_Map ======== -+ */ -+u32 PROCWRAP_Map(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ void *pMapAddr; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_Map: entered\n"); -+ status = PROC_Map(args->ARGS_PROC_MAPMEM.hProcessor, -+ args->ARGS_PROC_MAPMEM.pMpuAddr, -+ args->ARGS_PROC_MAPMEM.ulSize, -+ args->ARGS_PROC_MAPMEM.pReqAddr, &pMapAddr, -+ args->ARGS_PROC_MAPMEM.ulMapAttr, pr_ctxt); -+ if (DSP_SUCCEEDED(status)) { -+ if (put_user(pMapAddr, args->ARGS_PROC_MAPMEM.ppMapAddr)) -+ status = DSP_EINVALIDARG; -+ -+ } -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_RegisterNotify ======== -+ */ -+u32 PROCWRAP_RegisterNotify(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ struct DSP_NOTIFICATION notification; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_RegisterNotify: entered\n"); -+ -+ /* Initialize the notification data structure */ -+ notification.psName = NULL; -+ notification.handle = NULL; -+ -+ status = PROC_RegisterNotify(args->ARGS_PROC_REGISTER_NOTIFY.hProcessor, -+ args->ARGS_PROC_REGISTER_NOTIFY.uEventMask, -+ args->ARGS_PROC_REGISTER_NOTIFY.uNotifyType, -+ ¬ification); -+ cp_to_usr(args->ARGS_PROC_REGISTER_NOTIFY.hNotification, ¬ification, -+ status, 1); -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_ReserveMemory ======== -+ */ -+u32 PROCWRAP_ReserveMemory(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ void *pRsvAddr; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_ReserveMemory: entered\n"); -+ status = PROC_ReserveMemory(args->ARGS_PROC_RSVMEM.hProcessor, -+ args->ARGS_PROC_RSVMEM.ulSize, &pRsvAddr); -+ if (put_user(pRsvAddr, args->ARGS_PROC_RSVMEM.ppRsvAddr)) -+ status = DSP_EINVALIDARG; -+ -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_Start ======== -+ */ -+u32 PROCWRAP_Start(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_Start: entered\n"); -+ retVal = PROC_Start(args->ARGS_PROC_START.hProcessor); -+ return retVal; -+} -+ -+/* -+ * ======== PROCWRAP_UnMap ======== -+ */ -+u32 PROCWRAP_UnMap(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_UnMap: entered\n"); -+ status = PROC_UnMap(args->ARGS_PROC_UNMAPMEM.hProcessor, -+ args->ARGS_PROC_UNMAPMEM.pMapAddr, pr_ctxt); -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_UnReserveMemory ======== -+ */ -+u32 PROCWRAP_UnReserveMemory(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "PROCWRAP_UnReserveMemory: entered\n"); -+ status = PROC_UnReserveMemory(args->ARGS_PROC_UNRSVMEM.hProcessor, -+ args->ARGS_PROC_UNRSVMEM.pRsvAddr); -+ return status; -+} -+ -+/* -+ * ======== PROCWRAP_Stop ======== -+ */ -+u32 PROCWRAP_Stop(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "PROCWRAP_Stop: entered\n"); -+ retVal = PROC_Stop(args->ARGS_PROC_STOP.hProcessor); -+ -+ return retVal; -+} -+ -+/* -+ * ======== NODEWRAP_Allocate ======== -+ */ -+u32 NODEWRAP_Allocate(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_UUID nodeId; -+ u32 cbDataSize = 0; -+ u32 __user *pSize = (u32 __user *)args->ARGS_NODE_ALLOCATE.pArgs; -+ u8 *pArgs = NULL; -+ struct DSP_NODEATTRIN attrIn, *pAttrIn = NULL; -+ struct NODE_OBJECT *hNode; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Allocate: entered\n"); -+ -+ /* Optional argument */ -+ if (pSize) { -+ if (get_user(cbDataSize, pSize)) -+ status = DSP_EFAIL; -+ -+ cbDataSize += sizeof(u32); -+ if (DSP_SUCCEEDED(status)) { -+ pArgs = MEM_Alloc(cbDataSize, MEM_NONPAGED); -+ if (pArgs == NULL) -+ status = DSP_EMEMORY; -+ -+ } -+ cp_fm_usr(pArgs, args->ARGS_NODE_ALLOCATE.pArgs, status, -+ cbDataSize); -+ } -+ cp_fm_usr(&nodeId, args->ARGS_NODE_ALLOCATE.pNodeID, status, 1); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ /* Optional argument */ -+ if (args->ARGS_NODE_ALLOCATE.pAttrIn) { -+ cp_fm_usr(&attrIn, args->ARGS_NODE_ALLOCATE.pAttrIn, status, 1); -+ if (DSP_SUCCEEDED(status)) -+ pAttrIn = &attrIn; -+ else -+ status = DSP_EMEMORY; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = NODE_Allocate(args->ARGS_NODE_ALLOCATE.hProcessor, -+ &nodeId, (struct DSP_CBDATA *)pArgs, -+ pAttrIn, &hNode, pr_ctxt); -+ } -+ cp_to_usr(args->ARGS_NODE_ALLOCATE.phNode, &hNode, status, 1); -+func_cont: -+ if (pArgs) -+ MEM_Free(pArgs); -+ -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_AllocMsgBuf ======== -+ */ -+u32 NODEWRAP_AllocMsgBuf(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_BUFFERATTR *pAttr = NULL; -+ struct DSP_BUFFERATTR attr; -+ u8 *pBuffer = NULL; -+ -+ if (args->ARGS_NODE_ALLOCMSGBUF.pAttr) { /* Optional argument */ -+ cp_fm_usr(&attr, args->ARGS_NODE_ALLOCMSGBUF.pAttr, status, 1); -+ if (DSP_SUCCEEDED(status)) -+ pAttr = &attr; -+ -+ } -+ /* IN OUT argument */ -+ cp_fm_usr(&pBuffer, args->ARGS_NODE_ALLOCMSGBUF.pBuffer, status, 1); -+ if (DSP_SUCCEEDED(status)) { -+ status = NODE_AllocMsgBuf(args->ARGS_NODE_ALLOCMSGBUF.hNode, -+ args->ARGS_NODE_ALLOCMSGBUF.uSize, -+ pAttr, &pBuffer); -+ } -+ cp_to_usr(args->ARGS_NODE_ALLOCMSGBUF.pBuffer, &pBuffer, status, 1); -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_ChangePriority ======== -+ */ -+u32 NODEWRAP_ChangePriority(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "NODEWRAP_ChangePriority: entered\n"); -+ retVal = NODE_ChangePriority(args->ARGS_NODE_CHANGEPRIORITY.hNode, -+ args->ARGS_NODE_CHANGEPRIORITY.iPriority); -+ -+ return retVal; -+} -+ -+/* -+ * ======== NODEWRAP_Connect ======== -+ */ -+u32 NODEWRAP_Connect(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_STRMATTR attrs; -+ struct DSP_STRMATTR *pAttrs = NULL; -+ u32 cbDataSize; -+ u32 __user *pSize = (u32 __user *)args->ARGS_NODE_CONNECT.pConnParam; -+ u8 *pArgs = NULL; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Connect: entered\n"); -+ -+ /* Optional argument */ -+ if (pSize) { -+ if (get_user(cbDataSize, pSize)) -+ status = DSP_EFAIL; -+ -+ cbDataSize += sizeof(u32); -+ if (DSP_SUCCEEDED(status)) { -+ pArgs = MEM_Alloc(cbDataSize, MEM_NONPAGED); -+ if (pArgs == NULL) { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ -+ } -+ cp_fm_usr(pArgs, args->ARGS_NODE_CONNECT.pConnParam, status, -+ cbDataSize); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ } -+ if (args->ARGS_NODE_CONNECT.pAttrs) { /* Optional argument */ -+ cp_fm_usr(&attrs, args->ARGS_NODE_CONNECT.pAttrs, status, 1); -+ if (DSP_SUCCEEDED(status)) -+ pAttrs = &attrs; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = NODE_Connect(args->ARGS_NODE_CONNECT.hNode, -+ args->ARGS_NODE_CONNECT.uStream, -+ args->ARGS_NODE_CONNECT.hOtherNode, -+ args->ARGS_NODE_CONNECT.uOtherStream, -+ pAttrs, (struct DSP_CBDATA *)pArgs); -+ } -+func_cont: -+ if (pArgs) -+ MEM_Free(pArgs); -+ -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_Create ======== -+ */ -+u32 NODEWRAP_Create(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Create: entered\n"); -+ retVal = NODE_Create(args->ARGS_NODE_CREATE.hNode); -+ -+ return retVal; -+} -+ -+/* -+ * ======== NODEWRAP_Delete ======== -+ */ -+u32 NODEWRAP_Delete(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Delete: entered\n"); -+ retVal = NODE_Delete(args->ARGS_NODE_DELETE.hNode, pr_ctxt); -+ -+ return retVal; -+} -+ -+/* -+ * ======== NODEWRAP_FreeMsgBuf ======== -+ */ -+u32 NODEWRAP_FreeMsgBuf(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_BUFFERATTR *pAttr = NULL; -+ struct DSP_BUFFERATTR attr; -+ if (args->ARGS_NODE_FREEMSGBUF.pAttr) { /* Optional argument */ -+ cp_fm_usr(&attr, args->ARGS_NODE_FREEMSGBUF.pAttr, status, 1); -+ if (DSP_SUCCEEDED(status)) -+ pAttr = &attr; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = NODE_FreeMsgBuf(args->ARGS_NODE_FREEMSGBUF.hNode, -+ args->ARGS_NODE_FREEMSGBUF.pBuffer, -+ pAttr); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_GetAttr ======== -+ */ -+u32 NODEWRAP_GetAttr(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_NODEATTR attr; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_GetAttr: entered\n"); -+ -+ status = NODE_GetAttr(args->ARGS_NODE_GETATTR.hNode, &attr, -+ args->ARGS_NODE_GETATTR.uAttrSize); -+ cp_to_usr(args->ARGS_NODE_GETATTR.pAttr, &attr, status, 1); -+ -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_GetMessage ======== -+ */ -+u32 NODEWRAP_GetMessage(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ struct DSP_MSG msg; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_GetMessage: entered\n"); -+ -+ status = NODE_GetMessage(args->ARGS_NODE_GETMESSAGE.hNode, &msg, -+ args->ARGS_NODE_GETMESSAGE.uTimeout); -+ -+ cp_to_usr(args->ARGS_NODE_GETMESSAGE.pMessage, &msg, status, 1); -+ -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_Pause ======== -+ */ -+u32 NODEWRAP_Pause(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Pause: entered\n"); -+ retVal = NODE_Pause(args->ARGS_NODE_PAUSE.hNode); -+ -+ return retVal; -+} -+ -+/* -+ * ======== NODEWRAP_PutMessage ======== -+ */ -+u32 NODEWRAP_PutMessage(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_MSG msg; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_PutMessage: entered\n"); -+ -+ cp_fm_usr(&msg, args->ARGS_NODE_PUTMESSAGE.pMessage, status, 1); -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = NODE_PutMessage(args->ARGS_NODE_PUTMESSAGE.hNode, &msg, -+ args->ARGS_NODE_PUTMESSAGE.uTimeout); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_RegisterNotify ======== -+ */ -+u32 NODEWRAP_RegisterNotify(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_NOTIFICATION notification; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "NODEWRAP_RegisterNotify: entered\n"); -+ -+ /* Initialize the notification data structure */ -+ notification.psName = NULL; -+ notification.handle = NULL; -+ -+ status = NODE_RegisterNotify(args->ARGS_NODE_REGISTERNOTIFY.hNode, -+ args->ARGS_NODE_REGISTERNOTIFY.uEventMask, -+ args->ARGS_NODE_REGISTERNOTIFY.uNotifyType, -+ ¬ification); -+ cp_to_usr(args->ARGS_NODE_REGISTERNOTIFY.hNotification, ¬ification, -+ status, 1); -+ return status; -+} -+ -+/* -+ * ======== NODEWRAP_Run ======== -+ */ -+u32 NODEWRAP_Run(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Run: entered\n"); -+ retVal = NODE_Run(args->ARGS_NODE_RUN.hNode); -+ -+ return retVal; -+} -+ -+/* -+ * ======== NODEWRAP_Terminate ======== -+ */ -+u32 NODEWRAP_Terminate(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ DSP_STATUS tempstatus; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, "NODEWRAP_Terminate: entered\n"); -+ -+ status = NODE_Terminate(args->ARGS_NODE_TERMINATE.hNode, &tempstatus); -+ -+ cp_to_usr(args->ARGS_NODE_TERMINATE.pStatus, &tempstatus, status, 1); -+ -+ return status; -+} -+ -+ -+/* -+ * ======== NODEWRAP_GetUUIDProps ======== -+ */ -+u32 NODEWRAP_GetUUIDProps(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_UUID nodeId; -+ struct DSP_NDBPROPS *pnodeProps = NULL; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "NODEWRAP_GetUUIDPropste: entered\n"); -+ -+ -+ cp_fm_usr(&nodeId, args->ARGS_NODE_GETUUIDPROPS.pNodeID, status, 1); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ pnodeProps = MEM_Alloc(sizeof(struct DSP_NDBPROPS), MEM_NONPAGED); -+ if (pnodeProps != NULL) { -+ status = NODE_GetUUIDProps(args-> -+ ARGS_NODE_GETUUIDPROPS.hProcessor, -+ &nodeId, pnodeProps); -+ cp_to_usr(args->ARGS_NODE_GETUUIDPROPS.pNodeProps, pnodeProps, -+ status, 1); -+ } else -+ status = DSP_EMEMORY; -+func_cont: -+ if (pnodeProps) -+ MEM_Free(pnodeProps); -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_AllocateBuffer ======== -+ */ -+u32 STRMWRAP_AllocateBuffer(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status; -+ u8 **apBuffer = NULL; -+ u32 uNumBufs = args->ARGS_STRM_ALLOCATEBUFFER.uNumBufs; -+ -+ DBC_Require(uNumBufs <= MAX_BUFS); -+ -+ apBuffer = MEM_Alloc((uNumBufs * sizeof(u8 *)), MEM_NONPAGED); -+ -+ status = STRM_AllocateBuffer(args->ARGS_STRM_ALLOCATEBUFFER.hStream, -+ args->ARGS_STRM_ALLOCATEBUFFER.uSize, -+ apBuffer, uNumBufs, pr_ctxt); -+ cp_to_usr(args->ARGS_STRM_ALLOCATEBUFFER.apBuffer, apBuffer, status, -+ uNumBufs); -+ if (apBuffer) -+ MEM_Free(apBuffer); -+ -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_Close ======== -+ */ -+u32 STRMWRAP_Close(union Trapped_Args *args, void *pr_ctxt) -+{ -+ return STRM_Close(args->ARGS_STRM_CLOSE.hStream, pr_ctxt); -+} -+ -+/* -+ * ======== STRMWRAP_FreeBuffer ======== -+ */ -+u32 STRMWRAP_FreeBuffer(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u8 **apBuffer = NULL; -+ u32 uNumBufs = args->ARGS_STRM_FREEBUFFER.uNumBufs; -+ -+ DBC_Require(uNumBufs <= MAX_BUFS); -+ -+ apBuffer = MEM_Alloc((uNumBufs * sizeof(u8 *)), MEM_NONPAGED); -+ -+ cp_fm_usr(apBuffer, args->ARGS_STRM_FREEBUFFER.apBuffer, status, -+ uNumBufs); -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = STRM_FreeBuffer(args->ARGS_STRM_FREEBUFFER.hStream, -+ apBuffer, uNumBufs, pr_ctxt); -+ } -+ cp_to_usr(args->ARGS_STRM_FREEBUFFER.apBuffer, apBuffer, status, -+ uNumBufs); -+ if (apBuffer) -+ MEM_Free(apBuffer); -+ -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_GetEventHandle ======== -+ */ -+u32 STRMWRAP_GetEventHandle(union Trapped_Args *args, void *pr_ctxt) -+{ -+ return DSP_ENOTIMPL; -+} -+ -+/* -+ * ======== STRMWRAP_GetInfo ======== -+ */ -+u32 STRMWRAP_GetInfo(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct STRM_INFO strmInfo; -+ struct DSP_STREAMINFO user; -+ struct DSP_STREAMINFO *temp; -+ -+ cp_fm_usr(&strmInfo, args->ARGS_STRM_GETINFO.pStreamInfo, status, 1); -+ temp = strmInfo.pUser; -+ -+ strmInfo.pUser = &user; -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = STRM_GetInfo(args->ARGS_STRM_GETINFO.hStream, -+ &strmInfo, args->ARGS_STRM_GETINFO.uStreamInfoSize); -+ } -+ cp_to_usr(temp, strmInfo.pUser, status, 1); -+ strmInfo.pUser = temp; -+ cp_to_usr(args->ARGS_STRM_GETINFO.pStreamInfo, &strmInfo, status, 1); -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_Idle ======== -+ */ -+u32 STRMWRAP_Idle(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 retVal; -+ -+ retVal = STRM_Idle(args->ARGS_STRM_IDLE.hStream, -+ args->ARGS_STRM_IDLE.bFlush); -+ -+ return retVal; -+} -+ -+/* -+ * ======== STRMWRAP_Issue ======== -+ */ -+u32 STRMWRAP_Issue(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ /* No need of doing cp_fm_usr for the user buffer (pBuffer) -+ as this is done in Bridge internal function WMD_CHNL_AddIOReq -+ in chnl_sm.c */ -+ status = STRM_Issue(args->ARGS_STRM_ISSUE.hStream, -+ args->ARGS_STRM_ISSUE.pBuffer, -+ args->ARGS_STRM_ISSUE.dwBytes, -+ args->ARGS_STRM_ISSUE.dwBufSize, -+ args->ARGS_STRM_ISSUE.dwArg); -+ -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_Open ======== -+ */ -+u32 STRMWRAP_Open(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct STRM_ATTR attr; -+ struct STRM_OBJECT *pStrm; -+ struct DSP_STREAMATTRIN strmAttrIn; -+ -+ cp_fm_usr(&attr, args->ARGS_STRM_OPEN.pAttrIn, status, 1); -+ -+ if (attr.pStreamAttrIn != NULL) { /* Optional argument */ -+ cp_fm_usr(&strmAttrIn, attr.pStreamAttrIn, status, 1); -+ if (DSP_SUCCEEDED(status)) -+ attr.pStreamAttrIn = &strmAttrIn; -+ -+ } -+ status = STRM_Open(args->ARGS_STRM_OPEN.hNode, -+ args->ARGS_STRM_OPEN.uDirection, -+ args->ARGS_STRM_OPEN.uIndex, &attr, &pStrm, -+ pr_ctxt); -+ cp_to_usr(args->ARGS_STRM_OPEN.phStream, &pStrm, status, 1); -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_Reclaim ======== -+ */ -+u32 STRMWRAP_Reclaim(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u8 *pBufPtr; -+ u32 ulBytes; -+ u32 dwArg; -+ u32 ulBufSize; -+ -+ status = STRM_Reclaim(args->ARGS_STRM_RECLAIM.hStream, &pBufPtr, -+ &ulBytes, &ulBufSize, &dwArg); -+ cp_to_usr(args->ARGS_STRM_RECLAIM.pBufPtr, &pBufPtr, status, 1); -+ cp_to_usr(args->ARGS_STRM_RECLAIM.pBytes, &ulBytes, status, 1); -+ cp_to_usr(args->ARGS_STRM_RECLAIM.pdwArg, &dwArg, status, 1); -+ -+ if (args->ARGS_STRM_RECLAIM.pBufSize != NULL) { -+ cp_to_usr(args->ARGS_STRM_RECLAIM.pBufSize, &ulBufSize, -+ status, 1); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_RegisterNotify ======== -+ */ -+u32 STRMWRAP_RegisterNotify(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_NOTIFICATION notification; -+ -+ GT_0trace(WCD_debugMask, GT_ENTER, -+ "NODEWRAP_RegisterNotify: entered\n"); -+ -+ /* Initialize the notification data structure */ -+ notification.psName = NULL; -+ notification.handle = NULL; -+ -+ status = STRM_RegisterNotify(args->ARGS_STRM_REGISTERNOTIFY.hStream, -+ args->ARGS_STRM_REGISTERNOTIFY.uEventMask, -+ args->ARGS_STRM_REGISTERNOTIFY.uNotifyType, -+ ¬ification); -+ cp_to_usr(args->ARGS_STRM_REGISTERNOTIFY.hNotification, ¬ification, -+ status, 1); -+ -+ return status; -+} -+ -+/* -+ * ======== STRMWRAP_Select ======== -+ */ -+u32 STRMWRAP_Select(union Trapped_Args *args, void *pr_ctxt) -+{ -+ u32 mask; -+ struct STRM_OBJECT *aStrmTab[MAX_STREAMS]; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(args->ARGS_STRM_SELECT.nStreams <= MAX_STREAMS); -+ -+ cp_fm_usr(aStrmTab, args->ARGS_STRM_SELECT.aStreamTab, status, -+ args->ARGS_STRM_SELECT.nStreams); -+ if (DSP_SUCCEEDED(status)) { -+ status = STRM_Select(aStrmTab, args->ARGS_STRM_SELECT.nStreams, -+ &mask, args->ARGS_STRM_SELECT.uTimeout); -+ } -+ cp_to_usr(args->ARGS_STRM_SELECT.pMask, &mask, status, 1); -+ return status; -+} -+ -+/* CMM */ -+ -+/* -+ * ======== CMMWRAP_CallocBuf ======== -+ */ -+u32 CMMWRAP_CallocBuf(union Trapped_Args *args, void *pr_ctxt) -+{ -+ /* This operation is done in kernel */ -+ return DSP_ENOTIMPL; -+} -+ -+/* -+ * ======== CMMWRAP_FreeBuf ======== -+ */ -+u32 CMMWRAP_FreeBuf(union Trapped_Args *args, void *pr_ctxt) -+{ -+ /* This operation is done in kernel */ -+ return DSP_ENOTIMPL; -+} -+ -+/* -+ * ======== CMMWRAP_GetHandle ======== -+ */ -+u32 CMMWRAP_GetHandle(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CMM_OBJECT *hCmmMgr; -+ -+ status = CMM_GetHandle(args->ARGS_CMM_GETHANDLE.hProcessor, &hCmmMgr); -+ -+ cp_to_usr(args->ARGS_CMM_GETHANDLE.phCmmMgr, &hCmmMgr, status, 1); -+ -+ return status; -+} -+ -+/* -+ * ======== CMMWRAP_GetInfo ======== -+ */ -+u32 CMMWRAP_GetInfo(union Trapped_Args *args, void *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CMM_INFO cmmInfo; -+ -+ status = CMM_GetInfo(args->ARGS_CMM_GETINFO.hCmmMgr, &cmmInfo); -+ -+ cp_to_usr(args->ARGS_CMM_GETINFO.pCmmInfo, &cmmInfo, status, 1); -+ -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dbdcd.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/dbdcd.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dbdcd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/dbdcd.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,1573 @@ -+/* -+ * dbdcd.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== dbdcd.c ======== -+ * Description: -+ * This file contains the implementation of the DSP/BIOS Bridge -+ * Configuration Database (DCD). -+ * -+ * Notes: -+ * The fxn DCD_GetObjects can apply a callback fxn to each DCD object -+ * that is located in a specified COFF file. At the moment, -+ * DCD_AutoRegister, DCD_AutoUnregister, and NLDR module all use -+ * DCD_GetObjects. -+ * -+ *! Revision History -+ *! ================ -+ *! 03-Dec-2003 map Changed DCD_OBJTYPE to DSP_DCDOBJTYPE -+ *! 17-Dec-2002 map Modified DCD_GetDepLibs, DCD_GetNumDepLibs, GetDepLibInfo -+ *! to include phase information -+ *! 02-Dec-2002 map Modified DCD_GetLibraryName for phases in different -+ *! libraries -+ *! 26-Feb-2003 kc Updated DCD_AutoUnregister and DCD_GetObjects to simplify -+ *! DCD implementation. -+ *! 17-Jul-2002 jeh Call COD_Open() instead of COD_OpenBase(), call COD_Close() -+ *! 11-Jul-2002 jeh Added DCD_GetDepLibs(), DCD_GetNumDepLibs(). -+ *! 18-Feb-2003 vp Code review updates -+ *! 18-Oct-2002 vp Ported to Linux platform -+ *! 15-Mar-2002 jeh Read dynamic loading memory requirements into node object -+ *! data. Added DCD_GetLibraryName(). -+ *! 13-Feb-2002 jeh Get system stack size in GetAttrsFromBuf(). -+ *! 01-Aug-2001 ag: Added check for PROC "extended" attributes used for -+ *! DSP-MMU setup. These are private attributes. -+ *! 18-Apr-2001 jeh Use COD_OpenBase instead of COD_LoadBase. -+ *! 03-Apr-2001 sg: Changed error names to DSP_EDCD* format. -+ *! 11-Jan-2001 jeh Changes to DCD_GetObjectDef to match node.cdb, proc.cdb. -+ *! 12-Dec-2000 kc: Added DCD_AutoUnregister. MSGNODE, DAISNODE added in -+ *! GetAttrsFromBuf -+ *! 22-Nov-2000 kc: Replaced sprintf() calls with strncat. -+ *! 09-Nov-2000 kc: Optimized DCD module. -+ *! 30-Oct-2000 kc: Added DCD_AutoRegister function; changed local var. names. -+ *! 29-Sep-2000 kc: Added code review changes (src/reviews/dcd_reviews.txt). -+ *! 06-Sep-2000 jeh Get message segid, message notification type. Added Atoi() -+ *! to replace atoi(), until cdb generation can output in -+ *! decimal format. -+ *! 26-Jul-2000 kc: Created. -+ *! -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Global defines. */ -+#define SIGNATURE 0x5f444344 /* "DCD_" (in reverse). */ -+ -+#define IsValidHandle(h) (((h) != NULL) && (h->dwSignature == SIGNATURE)) -+ -+#define MAX_INT2CHAR_LENGTH 16 /* Maximum int2char len of 32 bit int. */ -+ -+/* Name of section containing dependent libraries */ -+#define DEPLIBSECT ".dspbridge_deplibs" -+ -+/* DCD specific structures. */ -+struct DCD_MANAGER { -+ u32 dwSignature; /* Used for object validation. */ -+ struct COD_MANAGER *hCodMgr; /* Handle to COD manager object. */ -+}; -+ -+/* Global reference variables. */ -+static u32 cRefs; -+static u32 cEnumRefs; -+ -+extern struct GT_Mask curTrace; -+ -+/* helper function prototypes. */ -+static s32 Atoi(char *pszBuf); -+ -+static DSP_STATUS GetAttrsFromBuf(char *pszBuf, u32 ulBufSize, -+ enum DSP_DCDOBJTYPE objType, -+ struct DCD_GENERICOBJ *pGenObj); -+ -+static void CompressBuf(char *pszBuf, u32 ulBufSize, s32 cCharSize); -+ -+static char DspChar2GppChar(char *pWord, s32 cDspCharSize); -+ -+static DSP_STATUS GetDepLibInfo(IN struct DCD_MANAGER *hDcdMgr, -+ IN struct DSP_UUID *pUuid, -+ IN OUT u16 *pNumLibs, -+ OPTIONAL OUT u16 *pNumPersLibs, -+ OPTIONAL OUT struct DSP_UUID *pDepLibUuids, -+ OPTIONAL OUT bool *pPersistentDepLibs, -+ IN enum NLDR_PHASE phase); -+ -+/* -+ * ======== DCD_AutoRegister ======== -+ * Purpose: -+ * Parses the supplied image and resigsters with DCD. -+ */ -+ -+DSP_STATUS DCD_AutoRegister(IN struct DCD_MANAGER *hDcdMgr, -+ IN char *pszCoffPath) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_AutoRegister: hDcdMgr 0x%x\n", -+ hDcdMgr); -+ -+ if (IsValidHandle(hDcdMgr)) { -+ status = DCD_GetObjects(hDcdMgr, pszCoffPath, -+ (DCD_REGISTERFXN)DCD_RegisterObject, -+ (void *)pszCoffPath); -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_AutoRegister: invalid DCD manager handle.\n"); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_AutoUnregister ======== -+ * Purpose: -+ * Parses the supplied DSP image and unresiters from DCD. -+ */ -+DSP_STATUS DCD_AutoUnregister(IN struct DCD_MANAGER *hDcdMgr, -+ IN char *pszCoffPath) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_AutoUnregister: hDcdMgr 0x%x\n", -+ hDcdMgr); -+ -+ if (IsValidHandle(hDcdMgr)) { -+ status = DCD_GetObjects(hDcdMgr, pszCoffPath, -+ (DCD_REGISTERFXN)DCD_RegisterObject, -+ NULL); -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_AutoUnregister: invalid DCD manager" -+ " handle.\n"); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_CreateManager ======== -+ * Purpose: -+ * Creates DCD manager. -+ */ -+DSP_STATUS DCD_CreateManager(IN char *pszZlDllName, -+ OUT struct DCD_MANAGER **phDcdMgr) -+{ -+ struct COD_MANAGER *hCodMgr; /* COD manager handle */ -+ struct DCD_MANAGER *pDcdMgr = NULL; /* DCD Manager pointer */ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs >= 0); -+ DBC_Require(phDcdMgr); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_CreateManager: phDcdMgr 0x%x\n", -+ phDcdMgr); -+ -+ status = COD_Create(&hCodMgr, pszZlDllName, NULL); -+ if (DSP_SUCCEEDED(status)) { -+ -+ /* Create a DCD object. */ -+ MEM_AllocObject(pDcdMgr, struct DCD_MANAGER, SIGNATURE); -+ if (pDcdMgr != NULL) { -+ -+ /* Fill out the object. */ -+ pDcdMgr->hCodMgr = hCodMgr; -+ -+ /* Return handle to this DCD interface. */ -+ *phDcdMgr = pDcdMgr; -+ -+ GT_2trace(curTrace, GT_5CLASS, -+ "DCD_CreateManager: pDcdMgr 0x%x, " -+ " hCodMgr 0x%x", pDcdMgr, hCodMgr); -+ } else { -+ status = DSP_EMEMORY; -+ -+ /* -+ * If allocation of DcdManager object failed, delete the -+ * COD manager. -+ */ -+ COD_Delete(hCodMgr); -+ -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_CreateManager: MEM_AllocObject failed\n"); -+ } -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_CreateManager: COD_Create failed\n"); -+ } -+ -+ DBC_Ensure((DSP_SUCCEEDED(status)) || ((hCodMgr == NULL) && -+ (status == DSP_EFAIL)) || ((pDcdMgr == NULL) && -+ (status == DSP_EMEMORY))); -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_DestroyManager ======== -+ * Purpose: -+ * Frees DCD Manager object. -+ */ -+DSP_STATUS DCD_DestroyManager(IN struct DCD_MANAGER *hDcdMgr) -+{ -+ struct DCD_MANAGER *pDcdMgr = hDcdMgr; -+ DSP_STATUS status = DSP_EHANDLE; -+ -+ DBC_Require(cRefs >= 0); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_DestroyManager: hDcdMgr 0x%x\n", -+ hDcdMgr); -+ -+ if (IsValidHandle(hDcdMgr)) { -+ -+ /* Delete the COD manager. */ -+ COD_Delete(pDcdMgr->hCodMgr); -+ -+ /* Deallocate a DCD manager object. */ -+ MEM_FreeObject(pDcdMgr); -+ -+ status = DSP_SOK; -+ } else { -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_DestroyManager: invalid DCD manager handle.\n"); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_EnumerateObject ======== -+ * Purpose: -+ * Enumerates objects in the DCD. -+ */ -+DSP_STATUS DCD_EnumerateObject(IN s32 cIndex, IN enum DSP_DCDOBJTYPE objType, -+ OUT struct DSP_UUID *pUuid) -+{ -+ DSP_STATUS status = DSP_SOK; -+ char szRegKey[REG_MAXREGPATHLENGTH]; -+ char szValue[REG_MAXREGPATHLENGTH]; -+ char szData[REG_MAXREGPATHLENGTH]; -+ u32 dwValueSize; -+ u32 dwDataSize; -+ struct DSP_UUID dspUuid; -+ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ -+ u32 dwKeyLen = 0; -+ -+ DBC_Require(cRefs >= 0); -+ DBC_Require(cIndex >= 0); -+ DBC_Require(pUuid != NULL); -+ -+ GT_3trace(curTrace, GT_ENTER, -+ "DCD_EnumerateObject: cIndex %d, objType %d, " -+ " pUuid 0x%x\n", cIndex, objType, pUuid); -+ -+ if ((cIndex != 0) && (cEnumRefs == 0)) { -+ /* -+ * If an enumeration is being performed on an index greater -+ * than zero, then the current cEnumRefs must have been -+ * incremented to greater than zero. -+ */ -+ status = DSP_ECHANGEDURINGENUM; -+ } else { -+ /* Enumerate a specific key in the registry by index. */ -+ dwValueSize = REG_MAXREGPATHLENGTH; -+ dwDataSize = REG_MAXREGPATHLENGTH; -+ -+ /* -+ * Pre-determine final key length. It's length of DCD_REGKEY + -+ * "_\0" + length of szObjType string + terminating NULL. -+ */ -+ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; -+ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); -+ -+ /* Create proper REG key; concatenate DCD_REGKEY with -+ * objType. */ -+ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); -+ if ((strlen(szRegKey) + strlen("_\0")) < -+ REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, "_\0", 2); -+ } else { -+ status = DSP_EFAIL; -+ } -+ -+ /* This snprintf is guaranteed not to exceed max size of an -+ * integer. */ -+ status = snprintf(szObjType, MAX_INT2CHAR_LENGTH, "%d", -+ objType); -+ -+ if (status == -1) { -+ status = DSP_EFAIL; -+ } else { -+ status = DSP_SOK; -+ if ((strlen(szRegKey) + strlen(szObjType)) < -+ REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, szObjType, -+ strlen(szObjType) + 1); -+ } else { -+ status = DSP_EFAIL; -+ } -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = REG_EnumValue(NULL, cIndex, szRegKey, szValue, -+ &dwValueSize, szData, -+ &dwDataSize); -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Create UUID value using string retrieved from -+ * registry. */ -+ UUID_UuidFromString(szValue, &dspUuid); -+ -+ *pUuid = dspUuid; -+ -+ /* Increment cEnumRefs to update reference count. */ -+ cEnumRefs++; -+ -+ status = DSP_SOK; -+ } else if (status == REG_E_NOMOREITEMS) { -+ /* At the end of enumeration. Reset cEnumRefs. */ -+ cEnumRefs = 0; -+ -+ status = DSP_SENUMCOMPLETE; -+ } else { -+ status = DSP_EFAIL; -+ GT_1trace(curTrace, GT_6CLASS, -+ "DCD_EnumerateObject: REG_EnumValue" -+ " failed, status = 0x%x\n", status); -+ } -+ } -+ -+ DBC_Ensure(pUuid || (status == DSP_EFAIL)); -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_Exit ======== -+ * Purpose: -+ * Discontinue usage of the DCD module. -+ */ -+void DCD_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(curTrace, GT_5CLASS, "DCD_Exit: cRefs 0x%x\n", cRefs); -+ -+ cRefs--; -+ if (cRefs == 0) { -+ REG_Exit(); -+ COD_Exit(); -+ MEM_Exit(); -+ } -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== DCD_GetDepLibs ======== -+ */ -+DSP_STATUS DCD_GetDepLibs(IN struct DCD_MANAGER *hDcdMgr, -+ IN struct DSP_UUID *pUuid, -+ u16 numLibs, OUT struct DSP_UUID *pDepLibUuids, -+ OUT bool *pPersistentDepLibs, IN enum NLDR_PHASE phase) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValidHandle(hDcdMgr)); -+ DBC_Require(pUuid != NULL); -+ DBC_Require(pDepLibUuids != NULL); -+ DBC_Require(pPersistentDepLibs != NULL); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_GetDepLibs: hDcdMgr 0x%x\n", -+ hDcdMgr); -+ -+ status = GetDepLibInfo(hDcdMgr, pUuid, &numLibs, NULL, pDepLibUuids, -+ pPersistentDepLibs, phase); -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_GetNumDepLibs ======== -+ */ -+DSP_STATUS DCD_GetNumDepLibs(IN struct DCD_MANAGER *hDcdMgr, -+ IN struct DSP_UUID *pUuid, OUT u16 *pNumLibs, -+ OUT u16 *pNumPersLibs, IN enum NLDR_PHASE phase) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(IsValidHandle(hDcdMgr)); -+ DBC_Require(pNumLibs != NULL); -+ DBC_Require(pNumPersLibs != NULL); -+ DBC_Require(pUuid != NULL); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_GetNumDepLibs: hDcdMgr 0x%x\n", -+ hDcdMgr); -+ -+ status = GetDepLibInfo(hDcdMgr, pUuid, pNumLibs, pNumPersLibs, -+ NULL, NULL, phase); -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_GetObjectDef ======== -+ * Purpose: -+ * Retrieves the properties of a node or processor based on the UUID and -+ * object type. -+ */ -+DSP_STATUS DCD_GetObjectDef(IN struct DCD_MANAGER *hDcdMgr, -+ IN struct DSP_UUID *pObjUuid, -+ IN enum DSP_DCDOBJTYPE objType, -+ OUT struct DCD_GENERICOBJ *pObjDef) -+{ -+ struct DCD_MANAGER *pDcdMgr = hDcdMgr; /* pointer to DCD manager */ -+ struct COD_LIBRARYOBJ *lib = NULL; -+ DSP_STATUS status = DSP_SOK; -+ u32 ulAddr = 0; /* Used by COD_GetSection */ -+ u32 ulLen = 0; /* Used by COD_GetSection */ -+ u32 dwBufSize; /* Used by REG functions */ -+ char szRegKey[REG_MAXREGPATHLENGTH]; -+ char *szUuid; /*[MAXUUIDLEN];*/ -+ char szRegData[REG_MAXREGPATHLENGTH]; -+ char szSectName[MAXUUIDLEN + 2]; /* ".[UUID]\0" */ -+ char *pszCoffBuf; -+ u32 dwKeyLen; /* Len of REG key. */ -+ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pObjDef != NULL); -+ DBC_Require(pObjUuid != NULL); -+ -+ GT_4trace(curTrace, GT_ENTER, -+ "DCD_GetObjectDef: hDcdMgr 0x%x, " "objUuid" -+ " 0x%x, objType 0x%x, pObjDef 0x%x\n", hDcdMgr, pObjUuid, -+ objType, pObjDef); -+ szUuid = (char *)MEM_Calloc(MAXUUIDLEN, MEM_PAGED); -+ if (!szUuid) -+ return status = DSP_EMEMORY; -+ -+ if (!IsValidHandle(hDcdMgr)) { -+ status = DSP_EHANDLE; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: invalid " -+ "DCD manager handle.\n"); -+ goto func_end; -+ } -+ /* Pre-determine final key length. It's length of DCD_REGKEY + -+ * "_\0" + length of szObjType string + terminating NULL */ -+ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; -+ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); -+ /* Create proper REG key; concatenate DCD_REGKEY with objType. */ -+ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); -+ -+ if ((strlen(szRegKey) + strlen("_\0")) < REG_MAXREGPATHLENGTH) -+ strncat(szRegKey, "_\0", 2); -+ else -+ status = DSP_EFAIL; -+ -+ status = snprintf(szObjType, MAX_INT2CHAR_LENGTH, "%d", objType); -+ if (status == -1) { -+ status = DSP_EFAIL; -+ } else { -+ status = DSP_SOK; -+ -+ if ((strlen(szRegKey) + strlen(szObjType)) < -+ REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, szObjType, strlen(szObjType) + 1); -+ } else { -+ status = DSP_EFAIL; -+ } -+ /* Create UUID value to set in registry. */ -+ UUID_UuidToString(pObjUuid, szUuid, MAXUUIDLEN); -+ -+ if ((strlen(szRegKey) + MAXUUIDLEN) < REG_MAXREGPATHLENGTH) -+ strncat(szRegKey, szUuid, MAXUUIDLEN); -+ else -+ status = DSP_EFAIL; -+ -+ /* Retrieve paths from the registry based on struct DSP_UUID */ -+ dwBufSize = REG_MAXREGPATHLENGTH; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = REG_GetValue(NULL, szRegKey, szRegKey, (u8 *)szRegData, -+ &dwBufSize); -+ } -+ if (DSP_FAILED(status)) { -+ status = DSP_EUUID; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " -+ "REG_GetValue() failed\n"); -+ goto func_end; -+ } -+ /* Open COFF file. */ -+ status = COD_Open(pDcdMgr->hCodMgr, szRegData, COD_NOLOAD, &lib); -+ if (DSP_FAILED(status)) { -+ status = DSP_EDCDLOADBASE; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " -+ "COD_OpenBase() failed\n"); -+ goto func_end; -+ } -+ /* Ensure szUuid + 1 is not greater than sizeof szSectName. */ -+ DBC_Assert((strlen(szUuid) + 1) < sizeof(szSectName)); -+ /* Create section name based on node UUID. A period is -+ * pre-pended to the UUID string to form the section name. -+ * I.e. ".24BC8D90_BB45_11d4_B756_006008BDB66F" */ -+ strncpy(szSectName, ".", 2); -+ strncat(szSectName, szUuid, strlen(szUuid)); -+ /* Get section information. */ -+ status = COD_GetSection(lib, szSectName, &ulAddr, &ulLen); -+ if (DSP_FAILED(status)) { -+ status = DSP_EDCDGETSECT; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef:" -+ " COD_GetSection() failed\n"); -+ goto func_end; -+ } -+ /* Allocate zeroed buffer. */ -+ pszCoffBuf = MEM_Calloc(ulLen + 4, MEM_PAGED); -+#ifdef _DB_TIOMAP -+ if (strstr(szRegData, "iva") == NULL) { -+ /* Locate section by objectID and read its content. */ -+ status = COD_ReadSection(lib, szSectName, pszCoffBuf, ulLen); -+ } else { -+ status = COD_ReadSection(lib, szSectName, pszCoffBuf, ulLen); -+ GT_0trace(curTrace, GT_4CLASS, -+ "Skipped Byte swap for IVA !!\n"); -+ } -+#else -+ status = COD_ReadSection(lib, szSectName, pszCoffBuf, ulLen); -+#endif -+ if (DSP_SUCCEEDED(status)) { -+ /* Compres DSP buffer to conform to PC format. */ -+ if (strstr(szRegData, "iva") == NULL) { -+ CompressBuf(pszCoffBuf, ulLen, DSPWORDSIZE); -+ } else { -+ CompressBuf(pszCoffBuf, ulLen, 1); -+ GT_0trace(curTrace, GT_4CLASS, "Compressing IVA " -+ "COFF buffer by 1 for IVA !!\n"); -+ } -+ /* Parse the content of the COFF buffer. */ -+ status = GetAttrsFromBuf(pszCoffBuf, ulLen, objType, pObjDef); -+ if (DSP_FAILED(status)) { -+ status = DSP_EDCDPARSESECT; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " -+ "GetAttrsFromBuf() failed\n"); -+ } -+ } else { -+ status = DSP_EDCDREADSECT; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjectDef: " -+ "COD_ReadSection() failed\n"); -+ } -+ /* Free the previously allocated dynamic buffer. */ -+ MEM_Free(pszCoffBuf); -+func_end: -+ if (lib) -+ COD_Close(lib); -+ -+ if (szUuid) -+ MEM_Free(szUuid); -+ return status; -+} -+ -+/* -+ * ======== DCD_GetObjects ======== -+ */ -+DSP_STATUS DCD_GetObjects(IN struct DCD_MANAGER *hDcdMgr, IN char *pszCoffPath, -+ DCD_REGISTERFXN registerFxn, void *handle) -+{ -+ struct DCD_MANAGER *pDcdMgr = hDcdMgr; /* pointer to DCD manager */ -+ DSP_STATUS status = DSP_SOK; -+ char *pszCoffBuf; -+ char *pszCur; -+ struct COD_LIBRARYOBJ *lib = NULL; -+ u32 ulAddr = 0; /* Used by COD_GetSection */ -+ u32 ulLen = 0; /* Used by COD_GetSection */ -+ char seps[] = ":, "; -+ char *pToken = NULL; -+ struct DSP_UUID dspUuid; -+ s32 cObjectType; -+ -+ DBC_Require(cRefs > 0); -+ GT_1trace(curTrace, GT_ENTER, -+ "DCD_GetObjects: hDcdMgr 0x%x\n", hDcdMgr); -+ if (!IsValidHandle(hDcdMgr)) { -+ status = DSP_EHANDLE; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_GetObjects: invalid DCD manager handle.\n"); -+ goto func_end; -+ } -+ /* Open DSP coff file, don't load symbols. */ -+ status = COD_Open(pDcdMgr->hCodMgr, pszCoffPath, COD_NOLOAD, &lib); -+ if (DSP_FAILED(status)) { -+ status = DSP_EDCDLOADBASE; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_AutoRegister: COD_Open() failed\n"); -+ goto func_cont; -+ } -+ /* Get DCD_RESIGER_SECTION section information. */ -+ status = COD_GetSection(lib, DCD_REGISTER_SECTION, &ulAddr, &ulLen); -+ if (DSP_FAILED(status) || !(ulLen > 0)) { -+ status = DSP_EDCDNOAUTOREGISTER; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_GetObjects: COD_GetSection() " -+ "- no auto register section\n"); -+ goto func_cont; -+ } -+ /* Allocate zeroed buffer. */ -+ pszCoffBuf = MEM_Calloc(ulLen + 4, MEM_PAGED); -+#ifdef _DB_TIOMAP -+ if (strstr(pszCoffPath, "iva") == NULL) { -+ /* Locate section by objectID and read its content. */ -+ status = COD_ReadSection(lib, DCD_REGISTER_SECTION, -+ pszCoffBuf, ulLen); -+ } else { -+ GT_0trace(curTrace, GT_4CLASS, "Skipped Byte swap for IVA!!\n"); -+ status = COD_ReadSection(lib, DCD_REGISTER_SECTION, -+ pszCoffBuf, ulLen); -+ } -+#else -+ status = COD_ReadSection(lib, DCD_REGISTER_SECTION, pszCoffBuf, ulLen); -+#endif -+ if (DSP_SUCCEEDED(status)) { -+ /* Compress DSP buffer to conform to PC format. */ -+ GT_0trace(curTrace, GT_4CLASS, -+ "Successfully read section !!\n"); -+ if (strstr(pszCoffPath, "iva") == NULL) { -+ CompressBuf(pszCoffBuf, ulLen, DSPWORDSIZE); -+ } else { -+ CompressBuf(pszCoffBuf, ulLen, 1); -+ GT_0trace(curTrace, GT_4CLASS, "Compress COFF buffer " -+ "with 1 word for IVA !!\n"); -+ } -+ /* Read from buffer and register object in buffer. */ -+ pszCur = pszCoffBuf; -+ while ((pToken = strsep(&pszCur, seps)) && *pToken != '\0') { -+ /* Retrieve UUID string. */ -+ UUID_UuidFromString(pToken, &dspUuid); -+ /* Retrieve object type */ -+ pToken = strsep(&pszCur, seps); -+ /* Retrieve object type */ -+ cObjectType = Atoi(pToken); -+ /* -+ * Apply registerFxn to the found DCD object. -+ * Possible actions include: -+ * -+ * 1) Register found DCD object. -+ * 2) Unregister found DCD object (when handle == NULL) -+ * 3) Add overlay node. -+ */ -+ GT_1trace(curTrace, GT_4CLASS, "Registering objtype " -+ "%d \n", cObjectType); -+ status = registerFxn(&dspUuid, cObjectType, handle); -+ if (DSP_SUCCEEDED(status)) { -+ GT_1trace(curTrace, GT_5CLASS, -+ "DCD_GetObjects: status 0x%x\n", -+ status); -+ } else { -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_GetObjects: " -+ "registration() failed\n"); -+ /* if error occurs, break from while loop. */ -+ break; -+ } -+ } -+ } else { -+ status = DSP_EDCDREADSECT; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_GetObjects: " -+ "COD_ReadSection() failed\n"); -+ } -+ /* Free the previously allocated dynamic buffer. */ -+ MEM_Free(pszCoffBuf); -+func_cont: -+ if (lib) -+ COD_Close(lib); -+ -+func_end: -+ return status; -+} -+ -+/* -+ * ======== DCD_GetLibraryName ======== -+ * Purpose: -+ * Retrieves the library name for the given UUID. -+ * -+ */ -+DSP_STATUS DCD_GetLibraryName(IN struct DCD_MANAGER *hDcdMgr, -+ IN struct DSP_UUID *pUuid, -+ IN OUT char *pstrLibName, IN OUT u32 *pdwSize, -+ enum NLDR_PHASE phase, OUT bool *fPhaseSplit) -+{ -+ char szRegKey[REG_MAXREGPATHLENGTH]; -+ char szUuid[MAXUUIDLEN]; -+ u32 dwKeyLen; /* Len of REG key. */ -+ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(pUuid != NULL); -+ DBC_Require(pstrLibName != NULL); -+ DBC_Require(pdwSize != NULL); -+ DBC_Require(IsValidHandle(hDcdMgr)); -+ -+ GT_4trace(curTrace, GT_ENTER, -+ "DCD_GetLibraryName: hDcdMgr 0x%x, pUuid 0x%x, " -+ " pstrLibName 0x%x, pdwSize 0x%x\n", hDcdMgr, pUuid, -+ pstrLibName, pdwSize); -+ /* -+ * Pre-determine final key length. It's length of DCD_REGKEY + -+ * "_\0" + length of szObjType string + terminating NULL. -+ */ -+ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; -+ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); -+ /* Create proper REG key; concatenate DCD_REGKEY with objType. */ -+ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); -+ if ((strlen(szRegKey) + strlen("_\0")) < REG_MAXREGPATHLENGTH) -+ strncat(szRegKey, "_\0", 2); -+ else -+ status = DSP_EFAIL; -+ -+ switch (phase) { -+ case NLDR_CREATE: -+ /* create phase type */ -+ sprintf(szObjType, "%d", DSP_DCDCREATELIBTYPE); -+ break; -+ case NLDR_EXECUTE: -+ /* execute phase type */ -+ sprintf(szObjType, "%d", DSP_DCDEXECUTELIBTYPE); -+ break; -+ case NLDR_DELETE: -+ /* delete phase type */ -+ sprintf(szObjType, "%d", DSP_DCDDELETELIBTYPE); -+ break; -+ case NLDR_NOPHASE: -+ /* known to be a dependent library */ -+ sprintf(szObjType, "%d", DSP_DCDLIBRARYTYPE); -+ break; -+ default: -+ status = -1; -+ DBC_Assert(false); -+ } -+ if (status == -1) { -+ status = DSP_EFAIL; -+ } else { -+ status = DSP_SOK; -+ if ((strlen(szRegKey) + strlen(szObjType)) -+ < REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, szObjType, strlen(szObjType) + 1); -+ } else { -+ status = DSP_EFAIL; -+ } -+ /* Create UUID value to find match in registry. */ -+ UUID_UuidToString(pUuid, szUuid, MAXUUIDLEN); -+ if ((strlen(szRegKey) + MAXUUIDLEN) < -+ REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, szUuid, MAXUUIDLEN); -+ } else { -+ status = DSP_EFAIL; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Retrieve path from the registry based on DSP_UUID */ -+ status = REG_GetValue(NULL, szRegKey, szRegKey, -+ (u8 *)pstrLibName, pdwSize); -+ } -+ /* If can't find, phases might be registered as generic LIBRARYTYPE */ -+ if (DSP_FAILED(status) && phase != NLDR_NOPHASE) { -+ if (fPhaseSplit) -+ *fPhaseSplit = false; -+ -+ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); -+ if ((strlen(szRegKey) + strlen("_\0")) < -+ REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, "_\0", 2); -+ } else { -+ status = DSP_EFAIL; -+ } -+ sprintf(szObjType, "%d", DSP_DCDLIBRARYTYPE); -+ if ((strlen(szRegKey) + strlen(szObjType)) -+ < REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, szObjType, strlen(szObjType) + 1); -+ } else { -+ status = DSP_EFAIL; -+ } -+ UUID_UuidToString(pUuid, szUuid, MAXUUIDLEN); -+ if ((strlen(szRegKey) + MAXUUIDLEN) < REG_MAXREGPATHLENGTH) -+ strncat(szRegKey, szUuid, MAXUUIDLEN); -+ else -+ status = DSP_EFAIL; -+ -+ status = REG_GetValue(NULL, szRegKey, szRegKey, -+ (u8 *)pstrLibName, pdwSize); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_Init ======== -+ * Purpose: -+ * Initialize the DCD module. -+ */ -+bool DCD_Init(void) -+{ -+ bool fInitMEM; -+ bool fInitREG; -+ bool fInitCOD; -+ bool fInit = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_Init: (on enter) cRefs = 0x%x\n", -+ cRefs); -+ -+ if (cRefs == 0) { -+ -+ /* Initialize required modules. */ -+ fInitMEM = MEM_Init(); -+ fInitCOD = COD_Init(); -+ fInitREG = REG_Init(); -+ if (!fInitMEM || !fInitCOD || !fInitREG) { -+ fInit = false; -+ GT_0trace(curTrace, GT_6CLASS, "DCD_Init failed\n"); -+ /* Exit initialized modules. */ -+ if (fInitMEM) -+ MEM_Exit(); -+ -+ if (fInitCOD) -+ COD_Exit(); -+ -+ if (fInitREG) -+ REG_Exit(); -+ -+ } -+ } -+ -+ if (fInit) -+ cRefs++; -+ -+ -+ GT_1trace(curTrace, GT_5CLASS, "DCD_Init: (on exit) cRefs = 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((fInit && (cRefs > 0)) || (!fInit && (cRefs == 0))); -+ -+ return fInit; -+} -+ -+/* -+ * ======== DCD_RegisterObject ======== -+ * Purpose: -+ * Registers a node or a processor with the DCD. -+ * If pszPathName == NULL, unregister the specified DCD object. -+ */ -+DSP_STATUS DCD_RegisterObject(IN struct DSP_UUID *pUuid, -+ IN enum DSP_DCDOBJTYPE objType, -+ IN char *pszPathName) -+{ -+ DSP_STATUS status = DSP_SOK; -+ char szRegKey[REG_MAXREGPATHLENGTH]; -+ char szUuid[MAXUUIDLEN + 1]; -+ u32 dwPathSize = 0; -+ u32 dwKeyLen; /* Len of REG key. */ -+ char szObjType[MAX_INT2CHAR_LENGTH]; /* str. rep. of objType. */ -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pUuid != NULL); -+ DBC_Require((objType == DSP_DCDNODETYPE) || -+ (objType == DSP_DCDPROCESSORTYPE) || -+ (objType == DSP_DCDLIBRARYTYPE) || -+ (objType == DSP_DCDCREATELIBTYPE) || -+ (objType == DSP_DCDEXECUTELIBTYPE) || -+ (objType == DSP_DCDDELETELIBTYPE)); -+ -+ GT_3trace(curTrace, GT_ENTER, "DCD_RegisterObject: object UUID 0x%x, " -+ "objType %d, szPathName %s\n", pUuid, objType, pszPathName); -+ /* -+ * Pre-determine final key length. It's length of DCD_REGKEY + -+ * "_\0" + length of szObjType string + terminating NULL. -+ */ -+ dwKeyLen = strlen(DCD_REGKEY) + 1 + sizeof(szObjType) + 1; -+ DBC_Assert(dwKeyLen < REG_MAXREGPATHLENGTH); -+ /* Create proper REG key; concatenate DCD_REGKEY with objType. */ -+ strncpy(szRegKey, DCD_REGKEY, strlen(DCD_REGKEY) + 1); -+ if ((strlen(szRegKey) + strlen("_\0")) < REG_MAXREGPATHLENGTH) -+ strncat(szRegKey, "_\0", 2); -+ else -+ status = DSP_EFAIL; -+ -+ status = snprintf(szObjType, MAX_INT2CHAR_LENGTH, "%d", objType); -+ if (status == -1) { -+ status = DSP_EFAIL; -+ } else { -+ status = DSP_SOK; -+ if ((strlen(szRegKey) + strlen(szObjType)) < -+ REG_MAXREGPATHLENGTH) { -+ strncat(szRegKey, szObjType, strlen(szObjType) + 1); -+ } else { -+ status = DSP_EFAIL; -+ } -+ /* Create UUID value to set in registry. */ -+ UUID_UuidToString(pUuid, szUuid, MAXUUIDLEN); -+ if ((strlen(szRegKey) + MAXUUIDLEN) < REG_MAXREGPATHLENGTH) -+ strncat(szRegKey, szUuid, MAXUUIDLEN); -+ else -+ status = DSP_EFAIL; -+ -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* -+ * If pszPathName != NULL, perform registration, otherwise, -+ * perform unregistration. -+ */ -+ if (pszPathName) { -+ /* Add new reg value (UUID+objType) with COFF path -+ * info. */ -+ dwPathSize = strlen(pszPathName) + 1; -+ status = REG_SetValue(NULL, szRegKey, szRegKey, REG_SZ, -+ (u8 *)pszPathName, dwPathSize); -+ GT_3trace(curTrace, GT_6CLASS, -+ "REG_SetValue REG_SZ=%d, " -+ "(u8 *)pszPathName=%s, dwPathSize=%d\n", -+ REG_SZ, pszPathName, dwPathSize); -+ if (DSP_FAILED(status)) { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_RegisterObject: REG_SetValue failed!\n"); -+ } -+ } else { -+ /* Deregister an existing object. */ -+ status = REG_DeleteValue(NULL, szRegKey, szRegKey); -+ if (DSP_FAILED(status)) { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_6CLASS, -+ "DCD_UnregisterObject: " -+ "REG_DeleteValue failed!\n"); -+ } -+ } -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* -+ * Because the node database has been updated through a -+ * successful object registration/de-registration operation, -+ * we need to reset the object enumeration counter to allow -+ * current enumerations to reflect this update in the node -+ * database. -+ */ -+ -+ cEnumRefs = 0; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DCD_UnregisterObject ======== -+ * Call DCD_Register object with pszPathName set to NULL to -+ * perform actual object de-registration. -+ */ -+DSP_STATUS DCD_UnregisterObject(IN struct DSP_UUID *pUuid, -+ IN enum DSP_DCDOBJTYPE objType) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pUuid != NULL); -+ DBC_Require((objType == DSP_DCDNODETYPE) || -+ (objType == DSP_DCDPROCESSORTYPE) || -+ (objType == DSP_DCDLIBRARYTYPE) || -+ (objType == DSP_DCDCREATELIBTYPE) || -+ (objType == DSP_DCDEXECUTELIBTYPE) || -+ (objType == DSP_DCDDELETELIBTYPE)); -+ -+ GT_2trace(curTrace, GT_ENTER, -+ "DCD_UnregisterObject: object UUID 0x%x, " -+ "objType %d\n", pUuid, objType); -+ -+ /* -+ * When DCD_RegisterObject is called with NULL as pathname, -+ * it indicates an unregister object operation. -+ */ -+ status = DCD_RegisterObject(pUuid, objType, NULL); -+ -+ return status; -+} -+ -+/* -+ ********************************************************************** -+ * DCD Helper Functions -+ ********************************************************************** -+ */ -+ -+/* -+ * ======== Atoi ======== -+ * Purpose: -+ * This function converts strings in decimal or hex format to integers. -+ */ -+static s32 Atoi(char *pszBuf) -+{ -+ s32 result = 0; -+ char *pch = pszBuf; -+ char c; -+ char first; -+ s32 base = 10; -+ s32 len; -+ -+ while (isspace(*pch)) -+ pch++; -+ -+ first = *pch; -+ if (first == '-' || first == '+') { -+ pch++; -+ } else { -+ /* Determine if base 10 or base 16 */ -+ len = strlen(pch); -+ if (len > 1) { -+ c = pch[1]; -+ if ((*pch == '0' && (c == 'x' || c == 'X'))) { -+ base = 16; -+ pch += 2; -+ } -+ c = pch[len - 1]; -+ if (c == 'h' || c == 'H') -+ base = 16; -+ -+ } -+ } -+ -+ while (isdigit(c = *pch) || ((base == 16) && isxdigit(c))) { -+ result *= base; -+ if ('A' <= c && c <= 'F') { -+ c = c - 'A' + 10; -+ } else { -+ if ('a' <= c && c <= 'f') -+ c = c - 'a' + 10; -+ else -+ c -= '0'; -+ -+ } -+ result += c; -+ ++pch; -+ } -+ -+ return result; -+} -+ -+/* -+ * ======== GetAttrsFromBuf ======== -+ * Purpose: -+ * Parse the content of a buffer filled with DSP-side data and -+ * retrieve an object's attributes from it. IMPORTANT: Assume the -+ * buffer has been converted from DSP format to GPP format. -+ */ -+static DSP_STATUS GetAttrsFromBuf(char *pszBuf, u32 ulBufSize, -+ enum DSP_DCDOBJTYPE objType, -+ struct DCD_GENERICOBJ *pGenObj) -+{ -+ DSP_STATUS status = DSP_SOK; -+ char seps[] = ", "; -+ char *pszCur; -+ char *token; -+ s32 cLen = 0; -+ u32 i = 0; -+#ifdef _DB_TIOMAP -+ s32 iEntry; -+#endif -+ -+ DBC_Require(pszBuf != NULL); -+ DBC_Require(ulBufSize != 0); -+ DBC_Require((objType == DSP_DCDNODETYPE) -+ || (objType == DSP_DCDPROCESSORTYPE)); -+ DBC_Require(pGenObj != NULL); -+ -+ -+ switch (objType) { -+ case DSP_DCDNODETYPE: -+ /* -+ * Parse COFF sect buffer to retrieve individual tokens used -+ * to fill in object attrs. -+ */ -+ pszCur = pszBuf; -+ token = strsep(&pszCur, seps); -+ -+ /* u32 cbStruct */ -+ pGenObj->objData.nodeObj.ndbProps.cbStruct = -+ (u32) Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* DSP_UUID uiNodeID */ -+ UUID_UuidFromString(token, -+ &pGenObj->objData.nodeObj.ndbProps.uiNodeID); -+ token = strsep(&pszCur, seps); -+ -+ /* acName */ -+ DBC_Require(token); -+ cLen = strlen(token); -+ if (cLen > DSP_MAXNAMELEN - 1) -+ cLen = DSP_MAXNAMELEN - 1; -+ -+ strncpy(pGenObj->objData.nodeObj.ndbProps.acName, -+ token, cLen); -+ pGenObj->objData.nodeObj.ndbProps.acName[cLen] = '\0'; -+ token = strsep(&pszCur, seps); -+ /* u32 uNodeType */ -+ pGenObj->objData.nodeObj.ndbProps.uNodeType = Atoi(token); -+ token = strsep(&pszCur, seps); -+ /* u32 bCacheOnGPP */ -+ pGenObj->objData.nodeObj.ndbProps.bCacheOnGPP = Atoi(token); -+ token = strsep(&pszCur, seps); -+ /* DSP_RESOURCEREQMTS dspResourceReqmts */ -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts.cbStruct = -+ (u32) Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uStaticDataSize = Atoi(token); -+ token = strsep(&pszCur, seps); -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uGlobalDataSize = Atoi(token); -+ token = strsep(&pszCur, seps); -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uProgramMemSize = Atoi(token); -+ token = strsep(&pszCur, seps); -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uWCExecutionTime = Atoi(token); -+ token = strsep(&pszCur, seps); -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uWCPeriod = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uWCDeadline = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uAvgExectionTime = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.nodeObj.ndbProps.dspResourceReqmts. -+ uMinimumPeriod = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* s32 iPriority */ -+ pGenObj->objData.nodeObj.ndbProps.iPriority = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* u32 uStackSize */ -+ pGenObj->objData.nodeObj.ndbProps.uStackSize = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* u32 uSysStackSize */ -+ pGenObj->objData.nodeObj.ndbProps.uSysStackSize = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* u32 uStackSeg */ -+ pGenObj->objData.nodeObj.ndbProps.uStackSeg = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* u32 uMessageDepth */ -+ pGenObj->objData.nodeObj.ndbProps.uMessageDepth = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* u32 uNumInputStreams */ -+ pGenObj->objData.nodeObj.ndbProps.uNumInputStreams = -+ Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* u32 uNumOutputStreams */ -+ pGenObj->objData.nodeObj.ndbProps.uNumOutputStreams = -+ Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* u32 uTimeout */ -+ pGenObj->objData.nodeObj.ndbProps.uTimeout = -+ Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* char * pstrCreatePhaseFxn */ -+ DBC_Require(token); -+ cLen = strlen(token); -+ pGenObj->objData.nodeObj.pstrCreatePhaseFxn = -+ MEM_Calloc(cLen + 1, MEM_PAGED); -+ strncpy(pGenObj->objData.nodeObj.pstrCreatePhaseFxn, -+ token, cLen); -+ pGenObj->objData.nodeObj.pstrCreatePhaseFxn[cLen] = '\0'; -+ token = strsep(&pszCur, seps); -+ -+ /* char * pstrExecutePhaseFxn */ -+ DBC_Require(token); -+ cLen = strlen(token); -+ pGenObj->objData.nodeObj.pstrExecutePhaseFxn = -+ MEM_Calloc(cLen + 1, MEM_PAGED); -+ strncpy(pGenObj->objData.nodeObj.pstrExecutePhaseFxn, -+ token, cLen); -+ pGenObj->objData.nodeObj.pstrExecutePhaseFxn[cLen] = '\0'; -+ token = strsep(&pszCur, seps); -+ -+ /* char * pstrDeletePhaseFxn */ -+ DBC_Require(token); -+ cLen = strlen(token); -+ pGenObj->objData.nodeObj.pstrDeletePhaseFxn = -+ MEM_Calloc(cLen + 1, MEM_PAGED); -+ strncpy(pGenObj->objData.nodeObj.pstrDeletePhaseFxn, -+ token, cLen); -+ pGenObj->objData.nodeObj.pstrDeletePhaseFxn[cLen] = '\0'; -+ token = strsep(&pszCur, seps); -+ -+ /* Segment id for message buffers */ -+ pGenObj->objData.nodeObj.uMsgSegid = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* Message notification type */ -+ pGenObj->objData.nodeObj.uMsgNotifyType = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ /* char * pstrIAlgName */ -+ if (token) { -+ cLen = strlen(token); -+ pGenObj->objData.nodeObj.pstrIAlgName = -+ MEM_Calloc(cLen + 1, MEM_PAGED); -+ strncpy(pGenObj->objData.nodeObj.pstrIAlgName, -+ token, cLen); -+ pGenObj->objData.nodeObj.pstrIAlgName[cLen] = '\0'; -+ token = strsep(&pszCur, seps); -+ } -+ -+ /* Load type (static, dynamic, or overlay) */ -+ if (token) { -+ pGenObj->objData.nodeObj.usLoadType = Atoi(token); -+ token = strsep(&pszCur, seps); -+ } -+ -+ /* Dynamic load data requirements */ -+ if (token) { -+ pGenObj->objData.nodeObj.ulDataMemSegMask = Atoi(token); -+ token = strsep(&pszCur, seps); -+ } -+ -+ /* Dynamic load code requirements */ -+ if (token) { -+ pGenObj->objData.nodeObj.ulCodeMemSegMask = Atoi(token); -+ token = strsep(&pszCur, seps); -+ } -+ -+ /* Extract node profiles into node properties */ -+ if (token) { -+ -+ pGenObj->objData.nodeObj.ndbProps.uCountProfiles = -+ Atoi(token); -+ for (i = 0; i < pGenObj->objData.nodeObj.ndbProps. -+ uCountProfiles; i++) { -+ token = strsep(&pszCur, seps); -+ if (token) { -+ /* Heap Size for the node */ -+ pGenObj->objData.nodeObj.ndbProps. -+ aProfiles[i].ulHeapSize = -+ Atoi(token); -+ } -+ } -+ } -+ token = strsep(&pszCur, seps); -+ if (token) { -+ pGenObj->objData.nodeObj.ndbProps.uStackSegName = -+ (u32)(token); -+ } -+ -+ break; -+ -+ case DSP_DCDPROCESSORTYPE: -+ /* -+ * Parse COFF sect buffer to retrieve individual tokens used -+ * to fill in object attrs. -+ */ -+ pszCur = pszBuf; -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.cbStruct = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.uProcessorFamily = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.uProcessorType = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.uClockRate = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.ulInternalMemSize = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.ulExternalMemSize = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.uProcessorID = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.tyRunningRTOS = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.nNodeMinPriority = Atoi(token); -+ token = strsep(&pszCur, seps); -+ -+ pGenObj->objData.procObj.nNodeMaxPriority = Atoi(token); -+ -+#ifdef _DB_TIOMAP -+ /* Proc object may contain additional(extended) attributes. */ -+ /* attr must match proc.hxx */ -+ for (iEntry = 0; iEntry < 7; iEntry++) { -+ token = strsep(&pszCur, seps); -+ pGenObj->objData.extProcObj.tyTlb[iEntry].ulGppPhys = -+ Atoi(token); -+ -+ token = strsep(&pszCur, seps); -+ pGenObj->objData.extProcObj.tyTlb[iEntry].ulDspVirt = -+ Atoi(token); -+ } -+#endif -+ -+ break; -+ -+ default: -+ status = DSP_EFAIL; -+ break; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== CompressBuffer ======== -+ * Purpose: -+ * Compress the DSP buffer, if necessary, to conform to PC format. -+ */ -+static void CompressBuf(char *pszBuf, u32 ulBufSize, s32 cCharSize) -+{ -+ char *p; -+ char ch; -+ char *q; -+ -+ p = pszBuf; -+ if (p == NULL) -+ return; -+ -+ for (q = pszBuf; q < (pszBuf + ulBufSize);) { -+ -+ ch = DspChar2GppChar(q, cCharSize); -+ if (ch == '\\') { -+ q += cCharSize; -+ ch = DspChar2GppChar(q, cCharSize); -+ switch (ch) { -+ case 't': -+ *p = '\t'; -+ break; -+ -+ case 'n': -+ *p = '\n'; -+ break; -+ -+ case 'r': -+ *p = '\r'; -+ break; -+ -+ case '0': -+ *p = '\0'; -+ break; -+ -+ default: -+ *p = ch; -+ break; -+ } -+ } else { -+ *p = ch; -+ } -+ p++; -+ q += cCharSize; -+ } -+ -+ /* NULL out remainder of buffer. */ -+ while (p < q) -+ *p++ = '\0'; -+ -+} -+ -+/* -+ * ======== DspChar2GppChar ======== -+ * Purpose: -+ * Convert DSP char to host GPP char in a portable manner -+ */ -+static char DspChar2GppChar(char *pWord, s32 cDspCharSize) -+{ -+ char ch = '\0'; -+ char *chSrc; -+ s32 i; -+ -+ for (chSrc = pWord, i = cDspCharSize; i > 0; i--) -+ ch |= *chSrc++; -+ -+ return ch; -+} -+ -+/* -+ * ======== GetDepLibInfo ======== -+ */ -+static DSP_STATUS GetDepLibInfo(IN struct DCD_MANAGER *hDcdMgr, -+ IN struct DSP_UUID *pUuid, -+ IN OUT u16 *pNumLibs, -+ OPTIONAL OUT u16 *pNumPersLibs, -+ OPTIONAL OUT struct DSP_UUID *pDepLibUuids, -+ OPTIONAL OUT bool *pPersistentDepLibs, -+ enum NLDR_PHASE phase) -+{ -+ struct DCD_MANAGER *pDcdMgr = hDcdMgr; /* pointer to DCD manager */ -+ char *pszCoffBuf = NULL; -+ char *pszCur; -+ char *pszFileName = NULL; -+ struct COD_LIBRARYOBJ *lib = NULL; -+ u32 ulAddr = 0; /* Used by COD_GetSection */ -+ u32 ulLen = 0; /* Used by COD_GetSection */ -+ u32 dwDataSize = COD_MAXPATHLENGTH; -+ char seps[] = ", "; -+ char *pToken = NULL; -+ bool fGetUuids = (pDepLibUuids != NULL); -+ u16 nDepLibs = 0; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ -+ DBC_Require(IsValidHandle(hDcdMgr)); -+ DBC_Require(pNumLibs != NULL); -+ DBC_Require(pUuid != NULL); -+ -+ GT_1trace(curTrace, GT_ENTER, "DCD_GetNumDepLibs: hDcdMgr 0x%x\n", -+ hDcdMgr); -+ -+ /* Initialize to 0 dependent libraries, if only counting number of -+ * dependent libraries */ -+ if (!fGetUuids) { -+ *pNumLibs = 0; -+ *pNumPersLibs = 0; -+ } -+ -+ /* Allocate a buffer for file name */ -+ pszFileName = MEM_Calloc(dwDataSize, MEM_PAGED); -+ if (pszFileName == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ /* Get the name of the library */ -+ status = DCD_GetLibraryName(hDcdMgr, pUuid, pszFileName, -+ &dwDataSize, phase, NULL); -+ } -+ /* Open the library */ -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_Open(pDcdMgr->hCodMgr, pszFileName, -+ COD_NOLOAD, &lib); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Get dependent library section information. */ -+ status = COD_GetSection(lib, DEPLIBSECT, &ulAddr, &ulLen); -+ -+ if (DSP_FAILED(status)) { -+ /* Ok, no dependent libraries */ -+ ulLen = 0; -+ status = DSP_SNODEPENDENTLIBS; -+ } -+ } -+ -+ if (DSP_FAILED(status) || !(ulLen > 0)) -+ goto func_cont; -+ -+ /* Allocate zeroed buffer. */ -+ pszCoffBuf = MEM_Calloc(ulLen, MEM_PAGED); -+ if (pszCoffBuf == NULL) -+ status = DSP_EMEMORY; -+ -+ /* Read section contents. */ -+ status = COD_ReadSection(lib, DEPLIBSECT, pszCoffBuf, ulLen); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ /* Compress and format DSP buffer to conform to PC format. */ -+ CompressBuf(pszCoffBuf, ulLen, DSPWORDSIZE); -+ /* Read from buffer */ -+ pszCur = pszCoffBuf; -+ while ((pToken = strsep(&pszCur, seps)) && *pToken != '\0') { -+ if (fGetUuids) { -+ if (nDepLibs >= *pNumLibs) { -+ /* Gone beyond the limit */ -+ break; -+ } else { -+ /* Retrieve UUID string. */ -+ UUID_UuidFromString(pToken, -+ &(pDepLibUuids[nDepLibs])); -+ /* Is this library persistent? */ -+ pToken = strsep(&pszCur, seps); -+ pPersistentDepLibs[nDepLibs] = Atoi(pToken); -+ nDepLibs++; -+ } -+ } else { -+ /* Advanc to next token */ -+ pToken = strsep(&pszCur, seps); -+ if (Atoi(pToken)) -+ (*pNumPersLibs)++; -+ -+ /* Just counting number of dependent libraries */ -+ (*pNumLibs)++; -+ } -+ } -+func_cont: -+ if (lib) -+ COD_Close(lib); -+ -+ /* Free previously allocated dynamic buffers. */ -+ if (pszFileName) -+ MEM_Free(pszFileName); -+ -+ if (pszCoffBuf) -+ MEM_Free(pszCoffBuf); -+ -+ return status; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/disp.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/disp.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/disp.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/disp.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,916 @@ -+/* -+ * disp.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== disp.c ======== -+ * -+ * Description: -+ * Node Dispatcher interface. Communicates with Resource Manager Server -+ * (RMS) on DSP. Access to RMS is synchronized in NODE. -+ * -+ * Public Functions: -+ * DISP_Create -+ * DISP_Delete -+ * DISP_Exit -+ * DISP_Init -+ * DISP_NodeChangePriority -+ * DISP_NodeCreate -+ * DISP_NodeDelete -+ * DISP_NodePause -+ * DISP_NodeRun -+ * -+ *! Revision History: -+ *! ================= -+ *! 18-Feb-2003 vp Code review updates -+ *! 18-Oct-2002 vp Ported to Linux platform -+ *! 16-May-2002 jeh Added DISP_DoCinit(). -+ *! 24-Apr-2002 jeh Added DISP_MemWrite(). -+ *! 13-Feb-2002 jeh Pass system stack size to RMS. -+ *! 16-Jan-2002 ag Added bufsize param to _ChnlAddIOReq() fxn -+ *! 10-May-2001 jeh Code Review cleanup. -+ *! 26-Sep-2000 jeh Fixed status values in SendMessage(). -+ *! 19-Jun-2000 jeh Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Link Driver */ -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+#define DISP_SIGNATURE 0x50534944 /* "PSID" */ -+ -+/* Size of a reply from RMS */ -+#define REPLYSIZE (3 * sizeof(RMS_WORD)) -+ -+/* Reserved channel offsets for communication with RMS */ -+#define CHNLTORMSOFFSET 0 -+#define CHNLFROMRMSOFFSET 1 -+ -+#define CHNLIOREQS 1 -+ -+#define SwapWord(x) (((u32)(x) >> 16) | ((u32)(x) << 16)) -+ -+/* -+ * ======== DISP_OBJECT ======== -+ */ -+struct DISP_OBJECT { -+ u32 dwSignature; /* Used for object validation */ -+ struct DEV_OBJECT *hDevObject; /* Device for this processor */ -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ -+ struct CHNL_MGR *hChnlMgr; /* Channel manager */ -+ struct CHNL_OBJECT *hChnlToDsp; /* Channel for commands to RMS */ -+ struct CHNL_OBJECT *hChnlFromDsp; /* Channel for replies from RMS */ -+ u8 *pBuf; /* Buffer for commands, replies */ -+ u32 ulBufsize; /* pBuf size in bytes */ -+ u32 ulBufsizeRMS; /* pBuf size in RMS words */ -+ u32 uCharSize; /* Size of DSP character */ -+ u32 uWordSize; /* Size of DSP word */ -+ u32 uDataMauSize; /* Size of DSP Data MAU */ -+}; -+ -+static u32 cRefs; -+ -+/* Debug msgs: */ -+#if GT_TRACE -+static struct GT_Mask DISP_DebugMask = { NULL, NULL }; -+#endif -+ -+static void DeleteDisp(struct DISP_OBJECT *hDisp); -+static DSP_STATUS FillStreamDef(RMS_WORD *pdwBuf, u32 *ptotal, u32 offset, -+ struct NODE_STRMDEF strmDef, u32 max, -+ u32 uCharsInRMSWord); -+static DSP_STATUS SendMessage(struct DISP_OBJECT *hDisp, u32 dwTimeout, -+ u32 ulBytes, OUT u32 *pdwArg); -+ -+/* -+ * ======== DISP_Create ======== -+ * Create a NODE Dispatcher object. -+ */ -+DSP_STATUS DISP_Create(OUT struct DISP_OBJECT **phDispObject, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct DISP_ATTRS *pDispAttrs) -+{ -+ struct DISP_OBJECT *pDisp; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ u32 ulChnlId; -+ struct CHNL_ATTRS chnlAttrs; -+ DSP_STATUS status = DSP_SOK; -+ u32 devType; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDispObject != NULL); -+ DBC_Require(pDispAttrs != NULL); -+ DBC_Require(hDevObject != NULL); -+ -+ GT_3trace(DISP_DebugMask, GT_ENTER, "DISP_Create: phDispObject: 0x%x\t" -+ "hDevObject: 0x%x\tpDispAttrs: 0x%x\n", phDispObject, -+ hDevObject, pDispAttrs); -+ -+ *phDispObject = NULL; -+ -+ /* Allocate Node Dispatcher object */ -+ MEM_AllocObject(pDisp, struct DISP_OBJECT, DISP_SIGNATURE); -+ if (pDisp == NULL) { -+ status = DSP_EMEMORY; -+ GT_0trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Create: MEM_AllocObject() failed!\n"); -+ } else { -+ pDisp->hDevObject = hDevObject; -+ } -+ -+ /* Get Channel manager and WMD function interface */ -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetChnlMgr(hDevObject, &(pDisp->hChnlMgr)); -+ if (DSP_SUCCEEDED(status)) { -+ (void) DEV_GetIntfFxns(hDevObject, &pIntfFxns); -+ pDisp->pIntfFxns = pIntfFxns; -+ } else { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Create: Failed to get " -+ "channel manager! status = 0x%x\n", status); -+ } -+ } -+ -+ /* check device type and decide if streams or messag'ing is used for -+ * RMS/EDS */ -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ status = DEV_GetDevType(hDevObject, &devType); -+ GT_1trace(DISP_DebugMask, GT_6CLASS, "DISP_Create: Creating DISP for " -+ "device = 0x%x\n", devType); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ if (devType != DSP_UNIT) { -+ GT_0trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Create: Unkown device " -+ "type in Device object !! \n"); -+ status = DSP_EFAIL; -+ goto func_cont; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pDisp->uCharSize = DSPWORDSIZE; -+ pDisp->uWordSize = DSPWORDSIZE; -+ pDisp->uDataMauSize = DSPWORDSIZE; -+ /* Open channels for communicating with the RMS */ -+ chnlAttrs.uIOReqs = CHNLIOREQS; -+ chnlAttrs.hEvent = NULL; -+ ulChnlId = pDispAttrs->ulChnlOffset + CHNLTORMSOFFSET; -+ status = (*pIntfFxns->pfnChnlOpen)(&(pDisp->hChnlToDsp), -+ pDisp->hChnlMgr, CHNL_MODETODSP, ulChnlId, &chnlAttrs); -+ if (DSP_FAILED(status)) { -+ GT_2trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Create: Channel to RMS " -+ "open failed, chnl id = %d, status = 0x%x\n", -+ ulChnlId, status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ ulChnlId = pDispAttrs->ulChnlOffset + CHNLFROMRMSOFFSET; -+ status = (*pIntfFxns->pfnChnlOpen)(&(pDisp->hChnlFromDsp), -+ pDisp->hChnlMgr, CHNL_MODEFROMDSP, ulChnlId, -+ &chnlAttrs); -+ if (DSP_FAILED(status)) { -+ GT_2trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Create: Channel from RMS " -+ "open failed, chnl id = %d, status = 0x%x\n", -+ ulChnlId, status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Allocate buffer for commands, replies */ -+ pDisp->ulBufsize = pDispAttrs->ulChnlBufSize; -+ pDisp->ulBufsizeRMS = RMS_COMMANDBUFSIZE; -+ pDisp->pBuf = MEM_Calloc(pDisp->ulBufsize, MEM_PAGED); -+ if (pDisp->pBuf == NULL) { -+ status = DSP_EMEMORY; -+ GT_0trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Create: Failed " -+ "to allocate channel buffer!\n"); -+ } -+ } -+func_cont: -+ if (DSP_SUCCEEDED(status)) -+ *phDispObject = pDisp; -+ else -+ DeleteDisp(pDisp); -+ -+ DBC_Ensure(((DSP_FAILED(status)) && ((*phDispObject == NULL))) || -+ ((DSP_SUCCEEDED(status)) && -+ (MEM_IsValidHandle((*phDispObject), DISP_SIGNATURE)))); -+ return status; -+} -+ -+/* -+ * ======== DISP_Delete ======== -+ * Delete the NODE Dispatcher. -+ */ -+void DISP_Delete(struct DISP_OBJECT *hDisp) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); -+ -+ GT_1trace(DISP_DebugMask, GT_ENTER, -+ "DISP_Delete: hDisp: 0x%x\n", hDisp); -+ -+ DeleteDisp(hDisp); -+ -+ DBC_Ensure(!MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); -+} -+ -+/* -+ * ======== DISP_Exit ======== -+ * Discontinue usage of DISP module. -+ */ -+void DISP_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(DISP_DebugMask, GT_5CLASS, -+ "Entered DISP_Exit, ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== DISP_Init ======== -+ * Initialize the DISP module. -+ */ -+bool DISP_Init(void) -+{ -+ bool fRetVal = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!DISP_DebugMask.flags); -+ GT_create(&DISP_DebugMask, "DI"); /* "DI" for DIspatcher */ -+ } -+ -+ if (fRetVal) -+ cRefs++; -+ -+ GT_1trace(DISP_DebugMask, GT_5CLASS, -+ "DISP_Init(), ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure((fRetVal && (cRefs > 0)) || (!fRetVal && (cRefs >= 0))); -+ return fRetVal; -+} -+ -+/* -+ * ======== DISP_NodeChangePriority ======== -+ * Change the priority of a node currently running on the target. -+ */ -+DSP_STATUS DISP_NodeChangePriority(struct DISP_OBJECT *hDisp, -+ struct NODE_OBJECT *hNode, -+ u32 ulRMSFxn, NODE_ENV nodeEnv, -+ s32 nPriority) -+{ -+ u32 dwArg; -+ struct RMS_Command *pCommand; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); -+ DBC_Require(hNode != NULL); -+ -+ GT_5trace(DISP_DebugMask, GT_ENTER, "DISP_NodeChangePriority: hDisp: " -+ "0x%x\thNode: 0x%x\tulRMSFxn: 0x%x\tnodeEnv: 0x%x\tnPriority\n", -+ hDisp, hNode, ulRMSFxn, nodeEnv, nPriority); -+ -+ /* Send message to RMS to change priority */ -+ pCommand = (struct RMS_Command *)(hDisp->pBuf); -+ pCommand->fxn = (RMS_WORD)(ulRMSFxn); -+ pCommand->arg1 = (RMS_WORD)nodeEnv; -+ pCommand->arg2 = nPriority; -+ status = SendMessage(hDisp, NODE_GetTimeout(hNode), -+ sizeof(struct RMS_Command), &dwArg); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeChangePriority failed! " -+ "status = 0x%x\n", status); -+ } -+ return status; -+} -+ -+/* -+ * ======== DISP_NodeCreate ======== -+ * Create a node on the DSP by remotely calling the node's create function. -+ */ -+DSP_STATUS DISP_NodeCreate(struct DISP_OBJECT *hDisp, struct NODE_OBJECT *hNode, -+ u32 ulRMSFxn, u32 ulCreateFxn, -+ IN CONST struct NODE_CREATEARGS *pArgs, -+ OUT NODE_ENV *pNodeEnv) -+{ -+ struct NODE_MSGARGS msgArgs; -+ struct NODE_TASKARGS taskArgs; -+ struct RMS_Command *pCommand; -+ struct RMS_MsgArgs *pMsgArgs; -+ struct RMS_MoreTaskArgs *pMoreTaskArgs; -+ enum NODE_TYPE nodeType; -+ u32 dwLength; -+ RMS_WORD *pdwBuf = NULL; -+ u32 ulBytes; -+ u32 i; -+ u32 total; -+ u32 uCharsInRMSWord; -+ s32 taskArgsOffset; -+ s32 sioInDefOffset; -+ s32 sioOutDefOffset; -+ s32 sioDefsOffset; -+ s32 argsOffset = -1; -+ s32 offset; -+ struct NODE_STRMDEF strmDef; -+ u32 max; -+ DSP_STATUS status = DSP_SOK; -+ struct DSP_NODEINFO nodeInfo; -+ u32 devType; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); -+ DBC_Require(hNode != NULL); -+ DBC_Require(NODE_GetType(hNode) != NODE_DEVICE); -+ DBC_Require(pNodeEnv != NULL); -+ -+ GT_6trace(DISP_DebugMask, GT_ENTER, -+ "DISP_NodeCreate: hDisp: 0x%x\thNode:" -+ " 0x%x\tulRMSFxn: 0x%x\tulCreateFxn: 0x%x\tpArgs: 0x%x\tpNodeEnv:" -+ " 0x%x\n", hDisp, hNode, ulRMSFxn, ulCreateFxn, pArgs, pNodeEnv); -+ -+ status = DEV_GetDevType(hDisp->hDevObject, &devType); -+ -+ GT_1trace(DISP_DebugMask, GT_6CLASS, "DISP_Create: Creating DISP " -+ "for device = 0x%x\n", devType); -+ -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (devType != DSP_UNIT) { -+ GT_1trace(DISP_DebugMask, GT_7CLASS, -+ "DISP_NodeCreate unknown device " -+ "type = 0x%x\n", devType); -+ goto func_end; -+ } -+ DBC_Require(pArgs != NULL); -+ nodeType = NODE_GetType(hNode); -+ msgArgs = pArgs->asa.msgArgs; -+ max = hDisp->ulBufsizeRMS; /*Max # of RMS words that can be sent */ -+ DBC_Assert(max == RMS_COMMANDBUFSIZE); -+ uCharsInRMSWord = sizeof(RMS_WORD) / hDisp->uCharSize; -+ /* Number of RMS words needed to hold arg data */ -+ dwLength = (msgArgs.uArgLength + uCharsInRMSWord - 1) / uCharsInRMSWord; -+ /* Make sure msg args and command fit in buffer */ -+ total = sizeof(struct RMS_Command) / sizeof(RMS_WORD) + -+ sizeof(struct RMS_MsgArgs) -+ / sizeof(RMS_WORD) - 1 + dwLength; -+ if (total >= max) { -+ status = DSP_EFAIL; -+ GT_2trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeCreate: Message args too" -+ " large for buffer! Message args size = %d, max = %d\n", -+ total, max); -+ } -+ /* -+ * Fill in buffer to send to RMS. -+ * The buffer will have the following format: -+ * -+ * RMS command: -+ * Address of RMS_CreateNode() -+ * Address of node's create function -+ * dummy argument -+ * node type -+ * -+ * Message Args: -+ * max number of messages -+ * segid for message buffer allocation -+ * notification type to use when message is received -+ * length of message arg data -+ * message args data -+ * -+ * Task Args (if task or socket node): -+ * priority -+ * stack size -+ * system stack size -+ * stack segment -+ * misc -+ * number of input streams -+ * pSTRMInDef[] - offsets of STRM definitions for input streams -+ * number of output streams -+ * pSTRMOutDef[] - offsets of STRM definitions for output -+ * streams -+ * STRMInDef[] - array of STRM definitions for input streams -+ * STRMOutDef[] - array of STRM definitions for output streams -+ * -+ * Socket Args (if DAIS socket node): -+ * -+ */ -+ if (DSP_SUCCEEDED(status)) { -+ total = 0; /* Total number of words in buffer so far */ -+ pdwBuf = (RMS_WORD *)hDisp->pBuf; -+ pCommand = (struct RMS_Command *)pdwBuf; -+ pCommand->fxn = (RMS_WORD)(ulRMSFxn); -+ pCommand->arg1 = (RMS_WORD)(ulCreateFxn); -+ if (NODE_GetLoadType(hNode) == NLDR_DYNAMICLOAD) { -+ /* Flush ICACHE on Load */ -+ pCommand->arg2 = 1; /* dummy argument */ -+ } else { -+ /* Do not flush ICACHE */ -+ pCommand->arg2 = 0; /* dummy argument */ -+ } -+ pCommand->data = NODE_GetType(hNode); -+ /* -+ * argsOffset is the offset of the data field in struct -+ * RMS_Command structure. We need this to calculate stream -+ * definition offsets. -+ */ -+ argsOffset = 3; -+ total += sizeof(struct RMS_Command) / sizeof(RMS_WORD); -+ /* Message args */ -+ pMsgArgs = (struct RMS_MsgArgs *) (pdwBuf + total); -+ pMsgArgs->maxMessages = msgArgs.uMaxMessages; -+ pMsgArgs->segid = msgArgs.uSegid; -+ pMsgArgs->notifyType = msgArgs.uNotifyType; -+ pMsgArgs->argLength = msgArgs.uArgLength; -+ total += sizeof(struct RMS_MsgArgs) / sizeof(RMS_WORD) - 1; -+ memcpy(pdwBuf + total, msgArgs.pData, msgArgs.uArgLength); -+ total += dwLength; -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* If node is a task node, copy task create arguments into buffer */ -+ if (nodeType == NODE_TASK || nodeType == NODE_DAISSOCKET) { -+ taskArgs = pArgs->asa.taskArgs; -+ taskArgsOffset = total; -+ total += sizeof(struct RMS_MoreTaskArgs) / sizeof(RMS_WORD) + -+ 1 + taskArgs.uNumInputs + taskArgs.uNumOutputs; -+ /* Copy task arguments */ -+ if (total < max) { -+ total = taskArgsOffset; -+ pMoreTaskArgs = (struct RMS_MoreTaskArgs *)(pdwBuf + -+ total); -+ /* -+ * Get some important info about the node. Note that we -+ * don't just reach into the hNode struct because -+ * that would break the node object's abstraction. -+ */ -+ GetNodeInfo(hNode, &nodeInfo); -+ GT_2trace(DISP_DebugMask, GT_ENTER, -+ "uExecutionPriority %x, nPriority %x\n", -+ nodeInfo.uExecutionPriority, -+ taskArgs.nPriority); -+ pMoreTaskArgs->priority = nodeInfo.uExecutionPriority; -+ pMoreTaskArgs->stackSize = taskArgs.uStackSize; -+ pMoreTaskArgs->sysstackSize = taskArgs.uSysStackSize; -+ pMoreTaskArgs->stackSeg = taskArgs.uStackSeg; -+ pMoreTaskArgs->heapAddr = taskArgs.uDSPHeapAddr; -+ pMoreTaskArgs->heapSize = taskArgs.uHeapSize; -+ pMoreTaskArgs->misc = taskArgs.ulDaisArg; -+ pMoreTaskArgs->numInputStreams = taskArgs.uNumInputs; -+ total += -+ sizeof(struct RMS_MoreTaskArgs) / sizeof(RMS_WORD); -+ GT_2trace(DISP_DebugMask, GT_7CLASS, -+ "DISP::::uDSPHeapAddr %x, " -+ "uHeapSize %x\n", taskArgs.uDSPHeapAddr, -+ taskArgs.uHeapSize); -+ /* Keep track of pSIOInDef[] and pSIOOutDef[] -+ * positions in the buffer, since this needs to be -+ * filled in later. */ -+ sioInDefOffset = total; -+ total += taskArgs.uNumInputs; -+ pdwBuf[total++] = taskArgs.uNumOutputs; -+ sioOutDefOffset = total; -+ total += taskArgs.uNumOutputs; -+ sioDefsOffset = total; -+ /* Fill SIO defs and offsets */ -+ offset = sioDefsOffset; -+ for (i = 0; i < taskArgs.uNumInputs; i++) { -+ if (DSP_FAILED(status)) -+ break; -+ -+ pdwBuf[sioInDefOffset + i] = -+ (offset - argsOffset) -+ * (sizeof(RMS_WORD) / DSPWORDSIZE); -+ strmDef = taskArgs.strmInDef[i]; -+ status = FillStreamDef(pdwBuf, &total, offset, -+ strmDef, max, uCharsInRMSWord); -+ offset = total; -+ } -+ for (i = 0; (i < taskArgs.uNumOutputs) && -+ (DSP_SUCCEEDED(status)); i++) { -+ pdwBuf[sioOutDefOffset + i] = -+ (offset - argsOffset) -+ * (sizeof(RMS_WORD) / DSPWORDSIZE); -+ strmDef = taskArgs.strmOutDef[i]; -+ status = FillStreamDef(pdwBuf, &total, offset, -+ strmDef, max, uCharsInRMSWord); -+ offset = total; -+ } -+ if (DSP_FAILED(status)) { -+ GT_2trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeCreate: Message" -+ " args to large for buffer! Message args" -+ " size = %d, max = %d\n", total, max); -+ } -+ } else { -+ /* Args won't fit */ -+ status = DSP_EFAIL; -+ GT_2trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeCreate: Message args " -+ " too large for buffer! Message args size = %d" -+ ", max = %d\n", total, max); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ ulBytes = total * sizeof(RMS_WORD); -+ DBC_Assert(ulBytes < (RMS_COMMANDBUFSIZE * sizeof(RMS_WORD))); -+ status = SendMessage(hDisp, NODE_GetTimeout(hNode), -+ ulBytes, pNodeEnv); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeCreate failed! " -+ "status = 0x%x\n", status); -+ } else { -+ /* -+ * Message successfully received from RMS. -+ * Return the status of the Node's create function -+ * on the DSP-side -+ */ -+ status = (((RMS_WORD *)(hDisp->pBuf))[0]); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeCreate, " -+ "DSP-side Node Create failed: 0x%x\n", -+ status); -+ } -+ -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== DISP_NodeDelete ======== -+ * purpose: -+ * Delete a node on the DSP by remotely calling the node's delete function. -+ * -+ */ -+DSP_STATUS DISP_NodeDelete(struct DISP_OBJECT *hDisp, struct NODE_OBJECT *hNode, -+ u32 ulRMSFxn, u32 ulDeleteFxn, NODE_ENV nodeEnv) -+{ -+ u32 dwArg; -+ struct RMS_Command *pCommand; -+ DSP_STATUS status = DSP_SOK; -+ u32 devType; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); -+ DBC_Require(hNode != NULL); -+ -+ GT_5trace(DISP_DebugMask, GT_ENTER, -+ "DISP_NodeDelete: hDisp: 0x%xthNode: " -+ "0x%x\tulRMSFxn: 0x%x\tulDeleteFxn: 0x%x\tnodeEnv: 0x%x\n", -+ hDisp, hNode, ulRMSFxn, ulDeleteFxn, nodeEnv); -+ -+ status = DEV_GetDevType(hDisp->hDevObject, &devType); -+ -+ if (DSP_SUCCEEDED(status)) { -+ -+ if (devType == DSP_UNIT) { -+ -+ /* -+ * Fill in buffer to send to RMS -+ */ -+ pCommand = (struct RMS_Command *)hDisp->pBuf; -+ pCommand->fxn = (RMS_WORD)(ulRMSFxn); -+ pCommand->arg1 = (RMS_WORD)nodeEnv; -+ pCommand->arg2 = (RMS_WORD)(ulDeleteFxn); -+ pCommand->data = NODE_GetType(hNode); -+ -+ status = SendMessage(hDisp, NODE_GetTimeout(hNode), -+ sizeof(struct RMS_Command), &dwArg); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeDelete failed!" -+ "status = 0x%x\n", status); -+ } else { -+ /* -+ * Message successfully received from RMS. -+ * Return the status of the Node's delete -+ * function on the DSP-side -+ */ -+ status = (((RMS_WORD *)(hDisp->pBuf))[0]); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeDelete, " -+ "DSP-side Node Delete failed: 0x%x\n", -+ status); -+ } -+ } -+ -+ -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== DISP_NodeRun ======== -+ * purpose: -+ * Start execution of a node's execute phase, or resume execution of a node -+ * that has been suspended (via DISP_NodePause()) on the DSP. -+ */ -+DSP_STATUS DISP_NodeRun(struct DISP_OBJECT *hDisp, struct NODE_OBJECT *hNode, -+ u32 ulRMSFxn, u32 ulExecuteFxn, NODE_ENV nodeEnv) -+{ -+ u32 dwArg; -+ struct RMS_Command *pCommand; -+ DSP_STATUS status = DSP_SOK; -+ u32 devType; -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hDisp, DISP_SIGNATURE)); -+ DBC_Require(hNode != NULL); -+ -+ GT_5trace(DISP_DebugMask, GT_ENTER, "DISP_NodeRun: hDisp: 0x%xthNode: \ -+ 0x%x\tulRMSFxn: 0x%x\tulExecuteFxn: 0x%x\tnodeEnv: 0x%x\n", \ -+ hDisp, hNode, ulRMSFxn, ulExecuteFxn, nodeEnv); -+ -+ status = DEV_GetDevType(hDisp->hDevObject, &devType); -+ -+ if (DSP_SUCCEEDED(status)) { -+ -+ if (devType == DSP_UNIT) { -+ -+ /* -+ * Fill in buffer to send to RMS. -+ */ -+ pCommand = (struct RMS_Command *) hDisp->pBuf; -+ pCommand->fxn = (RMS_WORD) (ulRMSFxn); -+ pCommand->arg1 = (RMS_WORD) nodeEnv; -+ pCommand->arg2 = (RMS_WORD) (ulExecuteFxn); -+ pCommand->data = NODE_GetType(hNode); -+ -+ status = SendMessage(hDisp, NODE_GetTimeout(hNode), -+ sizeof(struct RMS_Command), &dwArg); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeRun failed!" -+ "status = 0x%x\n", status); -+ } else { -+ /* -+ * Message successfully received from RMS. -+ * Return the status of the Node's execute -+ * function on the DSP-side -+ */ -+ status = (((RMS_WORD *)(hDisp->pBuf))[0]); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_NodeRun, DSP-side Node " -+ "Execute failed: 0x%x\n", -+ status); -+ } -+ } -+ -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DeleteDisp ======== -+ * purpose: -+ * Frees the resources allocated for the dispatcher. -+ */ -+static void DeleteDisp(struct DISP_OBJECT *hDisp) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ -+ if (MEM_IsValidHandle(hDisp, DISP_SIGNATURE)) { -+ pIntfFxns = hDisp->pIntfFxns; -+ -+ /* Free Node Dispatcher resources */ -+ if (hDisp->hChnlFromDsp) { -+ /* Channel close can fail only if the channel handle -+ * is invalid. */ -+ status = (*pIntfFxns->pfnChnlClose) -+ (hDisp->hChnlFromDsp); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Delete: Failed to " -+ "close channel from RMS: 0x%x\n", -+ status); -+ } -+ } -+ if (hDisp->hChnlToDsp) { -+ status = (*pIntfFxns->pfnChnlClose)(hDisp->hChnlToDsp); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "DISP_Delete: Failed to " -+ "close channel to RMS: 0x%x\n", -+ status); -+ } -+ } -+ if (hDisp->pBuf) -+ MEM_Free(hDisp->pBuf); -+ -+ MEM_FreeObject(hDisp); -+ } -+} -+ -+/* -+ * ======== FillStreamDef ======== -+ * purpose: -+ * Fills stream definitions. -+ */ -+static DSP_STATUS FillStreamDef(RMS_WORD *pdwBuf, u32 *ptotal, u32 offset, -+ struct NODE_STRMDEF strmDef, u32 max, -+ u32 uCharsInRMSWord) -+{ -+ struct RMS_StrmDef *pStrmDef; -+ u32 total = *ptotal; -+ u32 uNameLen; -+ u32 dwLength; -+ DSP_STATUS status = DSP_SOK; -+ -+ if (total + sizeof(struct RMS_StrmDef) / sizeof(RMS_WORD) >= max) { -+ status = DSP_EFAIL; -+ } else { -+ pStrmDef = (struct RMS_StrmDef *)(pdwBuf + total); -+ pStrmDef->bufsize = strmDef.uBufsize; -+ pStrmDef->nbufs = strmDef.uNumBufs; -+ pStrmDef->segid = strmDef.uSegid; -+ pStrmDef->align = strmDef.uAlignment; -+ pStrmDef->timeout = strmDef.uTimeout; -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* -+ * Since we haven't added the device name yet, subtract -+ * 1 from total. -+ */ -+ total += sizeof(struct RMS_StrmDef) / sizeof(RMS_WORD) - 1; -+ DBC_Require(strmDef.szDevice); -+ dwLength = strlen(strmDef.szDevice) + 1; -+ -+ /* Number of RMS_WORDS needed to hold device name */ -+ uNameLen = (dwLength + uCharsInRMSWord - 1) / uCharsInRMSWord; -+ -+ if (total + uNameLen >= max) { -+ status = DSP_EFAIL; -+ } else { -+ /* -+ * Zero out last word, since the device name may not -+ * extend to completely fill this word. -+ */ -+ pdwBuf[total + uNameLen - 1] = 0; -+ /** TODO USE SERVICES **/ -+ memcpy(pdwBuf + total, strmDef.szDevice, dwLength); -+ total += uNameLen; -+ *ptotal = total; -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== SendMessage ====== -+ * Send command message to RMS, get reply from RMS. -+ */ -+static DSP_STATUS SendMessage(struct DISP_OBJECT *hDisp, u32 dwTimeout, -+ u32 ulBytes, u32 *pdwArg) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct CHNL_OBJECT *hChnl; -+ u32 dwArg = 0; -+ u8 *pBuf; -+ struct CHNL_IOC chnlIOC; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(pdwArg != NULL); -+ -+ *pdwArg = (u32) NULL; -+ pIntfFxns = hDisp->pIntfFxns; -+ hChnl = hDisp->hChnlToDsp; -+ pBuf = hDisp->pBuf; -+ -+ /* Send the command */ -+ status = (*pIntfFxns->pfnChnlAddIOReq) (hChnl, pBuf, ulBytes, 0, -+ 0L, dwArg); -+ -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "SendMessage: Channel AddIOReq to" -+ " RMS failed! Status = 0x%x\n", status); -+ goto func_cont; -+ } -+ status = (*pIntfFxns->pfnChnlGetIOC) (hChnl, dwTimeout, &chnlIOC); -+ if (DSP_SUCCEEDED(status)) { -+ if (!CHNL_IsIOComplete(chnlIOC)) { -+ if (CHNL_IsTimedOut(chnlIOC)) { -+ status = DSP_ETIMEOUT; -+ } else { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "SendMessage failed! " -+ "Channel IOC status = 0x%x\n", -+ chnlIOC.status); -+ status = DSP_EFAIL; -+ } -+ } -+ } else { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "SendMessage: Channel GetIOC to" -+ " RMS failed! Status = 0x%x\n", status); -+ } -+func_cont: -+ /* Get the reply */ -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ hChnl = hDisp->hChnlFromDsp; -+ ulBytes = REPLYSIZE; -+ status = (*pIntfFxns->pfnChnlAddIOReq)(hChnl, pBuf, ulBytes, -+ 0, 0L, dwArg); -+ if (DSP_FAILED(status)) { -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "SendMessage: Channel AddIOReq " -+ "from RMS failed! Status = 0x%x\n", status); -+ goto func_end; -+ } -+ status = (*pIntfFxns->pfnChnlGetIOC) (hChnl, dwTimeout, &chnlIOC); -+ if (DSP_SUCCEEDED(status)) { -+ if (CHNL_IsTimedOut(chnlIOC)) { -+ status = DSP_ETIMEOUT; -+ } else if (chnlIOC.cBytes < ulBytes) { -+ /* Did not get all of the reply from the RMS */ -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "SendMessage: Did not get all" -+ "of reply from RMS! Bytes received: %d\n", -+ chnlIOC.cBytes); -+ status = DSP_EFAIL; -+ } else { -+ if (CHNL_IsIOComplete(chnlIOC)) { -+ DBC_Assert(chnlIOC.pBuf == pBuf); -+ status = (*((RMS_WORD *)chnlIOC.pBuf)); -+ *pdwArg = (((RMS_WORD *)(chnlIOC.pBuf))[1]); -+ } else { -+ status = DSP_EFAIL; -+ } -+ } -+ } else { -+ /* GetIOC failed */ -+ GT_1trace(DISP_DebugMask, GT_6CLASS, -+ "SendMessage: Failed to get " -+ "reply from RMS! Status = 0x%x\n", status); -+ } -+func_end: -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/drv.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/drv.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,1840 @@ -+/* -+ * drv.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== drv.c ======== -+ * Description: -+ * DSP/BIOS Bridge resource allocation module. -+ * -+ * Public Functions: -+ * DRV_Create -+ * DRV_Destroy -+ * DRV_Exit -+ * DRV_GetDevObject -+ * DRV_GetDevExtension -+ * DRV_GetFirstDevObject -+ * DRV_GetNextDevObject -+ * DRV_GetNextDevExtension -+ * DRV_Init -+ * DRV_InsertDevObject -+ * DRV_RemoveDevObject -+ * DRV_RequestResources -+ * DRV_ReleaseResources -+ * -+ *! Revision History -+ *! ======== ======== -+ *! 19-Apr-2004 sb: Replaced OS specific APIs with MEM_AllocPhysMem and -+ MEM_FreePhysMem. Fixed warnings. Cosmetic updates. -+ *! 12-Apr-2004 hp: IVA clean up during bridge-uninstall -+ *! 05-Jan-2004 vp: Updated for 24xx platform -+ *! 21-Mar-2003 sb: Get SHM size from registry -+ *! 10-Feb-2003 vp: Code review updates -+ *! 18-Oct-2002 vp: Ported to Linux platform -+ *! 30-Oct-2000 kc: Modified usage of REG_SetValue. -+ *! 06-Sep-2000 jeh Read channel info into struct CFG_HOSTRES in -+ *! RequestISAResources() -+ *! 21-Sep-2000 rr: numwindows is calculated instead of default value in -+ *! RequestISAResources. -+ *! 07-Aug-2000 rr: static list of dev objects removed. -+ *! 27-Jul-2000 rr: RequestResources split into two(Request and Release) -+ *! Device extension created to hold the DevNodeString. -+ *! 17-Jul-2000 rr: Driver Object holds the list of Device Objects. -+ *! Added DRV_Create, DRV_Destroy, DRV_GetDevObject, -+ *! DRV_GetFirst/NextDevObject, DRV_Insert/RemoveDevObject. -+ *! 09-May-2000 rr: PCI Support is not L301 specific.Use of MEM_Calloc -+ *! instead of MEM_Alloc. -+ *! 28-Mar-2000 rr: PCI Support added. L301 Specific. TBD. -+ *! 03-Feb-2000 rr: GT and Module Init/exit Changes. Merged with kc. -+ *! 19-Jan-2000 rr: DBC_Ensure in RequestPCMCIA moved within PCCARD ifdef -+ *! 29-Dec-1999 rr: PCCard support for any slot.Bus type stored in the -+ *! struct CFG_HOSTRES Structure. -+ *! 17-Dec-1999 rr: if PCCARD_Init fails we return DSP_EFAIL. -+ *! DBC_Ensure checks for sucess and pDevice != NULL -+ *! 11-Dec-1999 ag: #define "Isa" renamed to "IsaBus". -+ *! 09-Dec-1999 rr: windows.h included to remove warnings. -+ *! 02-Dec-1999 rr: struct GT_Mask is with in if DEBUG. Request resources checks -+ *! status while making call to Reg functions. -+ *! 23-Nov-1999 rr: windows.h included -+ *! 19-Nov-1999 rr: DRV_RELEASE bug while setting the registry to zero. -+ *! fixed. -+ *! 12-Nov-1999 rr: RequestResources() reads values from the registry. -+ *! Hardcoded bIRQRegister define removed. -+ *! 05-Nov-1999 rr: Added hardcoded device interrupt. -+ *! 25-Oct-1999 rr: Resource structure removed. Now it uses the Host -+ *! Resource structure directly. -+ *! 15-Oct-1999 rr: Resource Structure modified. See drv.h -+ *! dwBusType taken from the registry.Hard coded -+ *! registry entries removed. -+ *! 05-Oct-1999 rr: Calling DEV_StartDevice moved to wcdce.c. DRV_Register -+ *! MiniDriver has been renamed to DRV_RequestResources. -+ *! DRV_UnRegisterMiniDriver fxn removed. -+ *! 24-Sep-1999 rr: Significant changes to the RegisterMiniDriver fxns. -+ *! Now it is simpler. IT stores the dev node in the -+ *! registry, assign resources and calls the DEV_Start. -+ *! 10-Sep-1999 rr: Register Minidriver modified. -+ *! - Resource structure follows the NT model -+ *! 08-Aug-1999 rr: Adopted for WinCE. Exports Fxns removed. Hull Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+#ifndef RES_CLEANUP_DISABLE -+#include -+#include -+#include -+#include -+#include -+#include -+#endif -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define SIGNATURE 0x5f52474d /* "DRV_" (in reverse) */ -+ -+struct DRV_OBJECT { -+ u32 dwSignature; -+ struct LST_LIST *devList; -+ struct LST_LIST *devNodeString; -+#ifndef RES_CLEANUP_DISABLE -+ struct PROCESS_CONTEXT *procCtxtList; -+#endif -+}; -+ -+/* -+ * This is the Device Extension. Named with the Prefix -+ * DRV_ since it is living in this module -+ */ -+struct DRV_EXT { -+ struct LST_ELEM link; -+ char szString[MAXREGPATHLENGTH]; -+}; -+ -+/* ----------------------------------- Globals */ -+static s32 cRefs; -+ -+#if GT_TRACE -+extern struct GT_Mask curTrace; -+#endif -+ -+/* ----------------------------------- Function Prototypes */ -+static DSP_STATUS RequestBridgeResources(u32 dwContext, s32 fRequest); -+static DSP_STATUS RequestBridgeResourcesDSP(u32 dwContext, s32 fRequest); -+ -+#ifndef RES_CLEANUP_DISABLE -+/* GPP PROCESS CLEANUP CODE */ -+ -+static DSP_STATUS PrintProcessInformation(void); -+static DSP_STATUS DRV_ProcFreeNodeRes(HANDLE hPCtxt); -+static DSP_STATUS DRV_ProcFreeSTRMRes(HANDLE hPCtxt); -+extern enum NODE_STATE NODE_GetState(HANDLE hNode); -+ -+/* Get the process context list from driver object */ -+ -+/* Set the Process ID */ -+DSP_STATUS DRV_ProcSetPID(HANDLE hPCtxt, s32 hProcess) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Assert(hPCtxt != NULL); -+ -+ pCtxt->pid = hProcess; -+ return status; -+} -+ -+ -+/* Getting the head of the process context list */ -+DSP_STATUS DRV_GetProcCtxtList(struct PROCESS_CONTEXT **pPctxt, -+ struct DRV_OBJECT *hDrvObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DRV_OBJECT *pDrvObject = (struct DRV_OBJECT *)hDrvObject; -+ -+ DBC_Assert(hDrvObject != NULL); -+ GT_2trace(curTrace, GT_ENTER, -+ "DRV_GetProcCtxtList: 2 *pPctxt:%x, pDrvObject" -+ ":%x", *pPctxt, pDrvObject); -+ *pPctxt = pDrvObject->procCtxtList; -+ GT_2trace(curTrace, GT_ENTER, -+ "DRV_GetProcCtxtList: 3 *pPctxt:%x, pDrvObject" -+ ":%x", *pPctxt, pDrvObject); -+ return status; -+} -+ -+/* Add a new process context to process context list */ -+DSP_STATUS DRV_InsertProcContext(struct DRV_OBJECT *hDrVObject, HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT **pCtxt = (struct PROCESS_CONTEXT **)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct PROCESS_CONTEXT *pCtxtList = NULL; -+ struct DRV_OBJECT *hDRVObject; -+ -+ GT_0trace(curTrace, GT_ENTER, "\n In DRV_InsertProcContext\n"); -+ -+ status = CFG_GetObject((u32 *)&hDRVObject, REG_DRV_OBJECT); -+ DBC_Assert(hDRVObject != NULL); -+ -+ *pCtxt = MEM_Calloc(1 * sizeof(struct PROCESS_CONTEXT), MEM_PAGED); -+ if (!*pCtxt) { -+ pr_err("DSP: MEM_Calloc failed in DRV_InsertProcContext\n"); -+ return DSP_EMEMORY; -+ } -+ -+ spin_lock_init(&(*pCtxt)->proc_list_lock); -+ INIT_LIST_HEAD(&(*pCtxt)->processor_list); -+ -+ GT_0trace(curTrace, GT_ENTER, -+ "\n In DRV_InsertProcContext Calling " -+ "DRV_GetProcCtxtList\n"); -+ DRV_GetProcCtxtList(&pCtxtList, hDRVObject); -+ GT_0trace(curTrace, GT_ENTER, -+ "\n In DRV_InsertProcContext After Calling " -+ "DRV_GetProcCtxtList\n"); -+ if (pCtxtList != NULL) { -+ GT_0trace(curTrace, GT_ENTER, -+ "\n In DRV_InsertProcContext and pCtxt is " -+ "not Null\n"); -+ while (pCtxtList->next != NULL) -+ pCtxtList = pCtxtList->next; -+ -+ pCtxtList->next = *pCtxt; -+ } else { -+ GT_0trace(curTrace, GT_ENTER, -+ "\n In DRV_InsertProcContext and " -+ "pCtxt is Null\n"); -+ hDRVObject->procCtxtList = *pCtxt; -+ } -+ return status; -+} -+ -+/* Delete a process context from process resource context list */ -+DSP_STATUS DRV_RemoveProcContext(struct DRV_OBJECT *hDRVObject, -+ HANDLE pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROCESS_CONTEXT *pr_ctxt_list = NULL; -+ struct PROCESS_CONTEXT *uninitialized_var(ptr_prev); -+ -+ DBC_Assert(hDRVObject != NULL); -+ -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 12"); -+ DRV_GetProcCtxtList(&pr_ctxt_list, hDRVObject); -+ -+ /* Special condition */ -+ if (pr_ctxt_list == pr_ctxt) { -+ hDRVObject->procCtxtList = NULL; -+ goto func_cont; -+ } -+ -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 13"); -+ while (pr_ctxt_list && (pr_ctxt_list != pr_ctxt)) { -+ ptr_prev = pr_ctxt_list; -+ pr_ctxt_list = pr_ctxt_list->next; -+ GT_0trace(curTrace, GT_ENTER, -+ "DRV_RemoveProcContext: 2"); -+ } -+ -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 3"); -+ -+ if (!pr_ctxt_list) -+ return DSP_ENOTFOUND; -+ else -+ ptr_prev->next = pr_ctxt_list->next; -+ -+func_cont: -+ MEM_Free(pr_ctxt); -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveProcContext: 7"); -+ -+ return status; -+} -+ -+/* Update the state of process context */ -+DSP_STATUS DRV_ProcUpdatestate(HANDLE hPCtxt, enum GPP_PROC_RES_STATE status) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status1 = DSP_SOK; -+ if (pCtxt != NULL) { -+ pCtxt->resState = status; -+ } else { -+ GT_0trace(curTrace, GT_ENTER, -+ "DRV_ProcUpdatestate: Failed to update " -+ "process state"); -+ } -+ return status1; -+} -+ -+/* Allocate and add a node resource element -+* This function is called from .Node_Allocate. */ -+DSP_STATUS DRV_InsertNodeResElement(HANDLE hNode, HANDLE hNodeRes, -+ HANDLE hPCtxt) -+{ -+ struct NODE_RES_OBJECT **pNodeRes = (struct NODE_RES_OBJECT **)hNodeRes; -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct NODE_RES_OBJECT *pTempNodeRes = NULL; -+ GT_0trace(curTrace, GT_ENTER, "DRV_InsertNodeResElement: 1"); -+ *pNodeRes = (struct NODE_RES_OBJECT *)MEM_Calloc -+ (1 * sizeof(struct NODE_RES_OBJECT), MEM_PAGED); -+ DBC_Assert(hPCtxt != NULL); -+ if ((*pNodeRes == NULL) || (hPCtxt == NULL)) { -+ GT_0trace(curTrace, GT_ENTER, "DRV_InsertNodeResElement: 12"); -+ status = DSP_EHANDLE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ (*pNodeRes)->hNode = hNode; -+ if (pCtxt->pNodeList != NULL) { -+ pTempNodeRes = pCtxt->pNodeList; -+ while (pTempNodeRes->next != NULL) -+ pTempNodeRes = pTempNodeRes->next; -+ -+ pTempNodeRes->next = *pNodeRes; -+ GT_0trace(curTrace, GT_ENTER, -+ "DRV_InsertNodeResElement: 2"); -+ } else { -+ pCtxt->pNodeList = *pNodeRes; -+ GT_0trace(curTrace, GT_ENTER, -+ "DRV_InsertNodeResElement: 3"); -+ } -+ } -+ GT_0trace(curTrace, GT_ENTER, "DRV_InsertNodeResElement: 4"); -+ return status; -+} -+ -+/* Release all Node resources and its context -+* This is called from .Node_Delete. */ -+DSP_STATUS DRV_RemoveNodeResElement(HANDLE hNodeRes, HANDLE hPCtxt) -+{ -+ struct NODE_RES_OBJECT *pNodeRes = (struct NODE_RES_OBJECT *)hNodeRes; -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct NODE_RES_OBJECT *pTempNode2 = pCtxt->pNodeList; -+ struct NODE_RES_OBJECT *pTempNode = pCtxt->pNodeList; -+ -+ DBC_Assert(hPCtxt != NULL); -+ GT_0trace(curTrace, GT_ENTER, "\nDRV_RemoveNodeResElement: 1\n"); -+ while ((pTempNode != NULL) && (pTempNode != pNodeRes)) { -+ pTempNode2 = pTempNode; -+ pTempNode = pTempNode->next; -+ } -+ if (pCtxt->pNodeList == pNodeRes) -+ pCtxt->pNodeList = pNodeRes->next; -+ -+ if (pTempNode == NULL) -+ return DSP_ENOTFOUND; -+ else if (pTempNode2->next != NULL) -+ pTempNode2->next = pTempNode2->next->next; -+ -+ MEM_Free(pTempNode); -+ return status; -+} -+ -+/* Actual Node De-Allocation */ -+static DSP_STATUS DRV_ProcFreeNodeRes(HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct NODE_RES_OBJECT *pNodeList = NULL; -+ struct NODE_RES_OBJECT *pNodeRes = NULL; -+ u32 nState; -+ -+ DBC_Assert(hPCtxt != NULL); -+ pNodeList = pCtxt->pNodeList; -+ while (pNodeList != NULL) { -+ GT_0trace(curTrace, GT_ENTER, "DRV_ProcFreeNodeRes: 1"); -+ pNodeRes = pNodeList; -+ pNodeList = pNodeList->next; -+ if (pNodeRes->nodeAllocated) { -+ nState = NODE_GetState(pNodeRes->hNode) ; -+ GT_1trace(curTrace, GT_5CLASS, -+ "DRV_ProcFreeNodeRes: Node state %x\n", nState); -+ if (nState <= NODE_DELETING) { -+ if ((nState == NODE_RUNNING) || -+ (nState == NODE_PAUSED) || -+ (nState == NODE_TERMINATING)) { -+ GT_1trace(curTrace, GT_5CLASS, -+ "Calling Node_Terminate for Node:" -+ " 0x%x\n", pNodeRes->hNode); -+ status = NODE_Terminate -+ (pNodeRes->hNode, &status); -+ GT_1trace(curTrace, GT_5CLASS, -+ "Calling Node_Delete for Node:" -+ " 0x%x\n", pNodeRes->hNode); -+ status = NODE_Delete(pNodeRes->hNode, -+ pCtxt); -+ GT_1trace(curTrace, GT_5CLASS, -+ "the status after the NodeDelete %x\n", -+ status); -+ } else if ((nState == NODE_ALLOCATED) -+ || (nState == NODE_CREATED)) -+ status = NODE_Delete(pNodeRes->hNode, -+ pCtxt); -+ } -+ } -+ } -+ return status; -+} -+ -+/* Allocate the DMM resource element -+* This is called from Proc_Map. after the actual resource is allocated */ -+DSP_STATUS DRV_InsertDMMResElement(HANDLE hDMMRes, HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ struct DMM_RES_OBJECT **pDMMRes = (struct DMM_RES_OBJECT **)hDMMRes; -+ DSP_STATUS status = DSP_SOK; -+ struct DMM_RES_OBJECT *pTempDMMRes = NULL; -+ -+ *pDMMRes = (struct DMM_RES_OBJECT *) -+ MEM_Calloc(1 * sizeof(struct DMM_RES_OBJECT), MEM_PAGED); -+ DBC_Assert(hPCtxt != NULL); -+ GT_0trace(curTrace, GT_ENTER, "DRV_InsertDMMResElement: 1"); -+ if ((*pDMMRes == NULL) || (hPCtxt == NULL)) { -+ GT_0trace(curTrace, GT_5CLASS, "DRV_InsertDMMResElement: 2"); -+ status = DSP_EHANDLE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ if (pCtxt->pDMMList != NULL) { -+ GT_0trace(curTrace, GT_5CLASS, -+ "DRV_InsertDMMResElement: 3"); -+ pTempDMMRes = pCtxt->pDMMList; -+ while (pTempDMMRes->next != NULL) -+ pTempDMMRes = pTempDMMRes->next; -+ -+ pTempDMMRes->next = *pDMMRes; -+ } else { -+ pCtxt->pDMMList = *pDMMRes; -+ GT_0trace(curTrace, GT_5CLASS, -+ "DRV_InsertDMMResElement: 4"); -+ } -+ } -+ GT_0trace(curTrace, GT_ENTER, "DRV_InsertDMMResElement: 5"); -+ return status; -+} -+ -+ -+ -+/* Release DMM resource element context -+* This is called from Proc_UnMap. after the actual resource is freed */ -+DSP_STATUS DRV_RemoveDMMResElement(HANDLE hDMMRes, HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ struct DMM_RES_OBJECT *pDMMRes = (struct DMM_RES_OBJECT *)hDMMRes; -+ DSP_STATUS status = DSP_SOK; -+ struct DMM_RES_OBJECT *pTempDMMRes2 = NULL; -+ struct DMM_RES_OBJECT *pTempDMMRes = NULL; -+ -+ DBC_Assert(hPCtxt != NULL); -+ pTempDMMRes2 = pCtxt->pDMMList; -+ pTempDMMRes = pCtxt->pDMMList; -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 1"); -+ while ((pTempDMMRes != NULL) && (pTempDMMRes != pDMMRes)) { -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 2"); -+ pTempDMMRes2 = pTempDMMRes; -+ pTempDMMRes = pTempDMMRes->next; -+ } -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 3"); -+ if (pCtxt->pDMMList == pTempDMMRes) -+ pCtxt->pDMMList = pTempDMMRes->next; -+ -+ if (pTempDMMRes == NULL) -+ return DSP_ENOTFOUND; -+ else if (pTempDMMRes2->next != NULL) -+ pTempDMMRes2->next = pTempDMMRes2->next->next; -+ -+ MEM_Free(pDMMRes); -+ GT_0trace(curTrace, GT_ENTER, "DRV_RemoveDMMResElement: 4"); -+ return status; -+} -+ -+/* Update DMM resource status */ -+DSP_STATUS DRV_UpdateDMMResElement(HANDLE hDMMRes, u32 pMpuAddr, u32 ulSize, -+ u32 pReqAddr, u32 pMapAddr, -+ HANDLE hProcessor) -+{ -+ struct DMM_RES_OBJECT *pDMMRes = (struct DMM_RES_OBJECT *)hDMMRes; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Assert(hDMMRes != NULL); -+ pDMMRes->ulMpuAddr = pMpuAddr; -+ pDMMRes->ulDSPAddr = pMapAddr; -+ pDMMRes->ulDSPResAddr = pReqAddr; -+ pDMMRes->dmmSize = ulSize; -+ pDMMRes->hProcessor = hProcessor; -+ pDMMRes->dmmAllocated = 1; -+ -+ return status; -+} -+ -+/* Actual DMM De-Allocation */ -+DSP_STATUS DRV_ProcFreeDMMRes(HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct DMM_RES_OBJECT *pDMMList = pCtxt->pDMMList; -+ struct DMM_RES_OBJECT *pDMMRes = NULL; -+ -+ DBC_Assert(hPCtxt != NULL); -+ GT_0trace(curTrace, GT_ENTER, "\nDRV_ProcFreeDMMRes: 1\n"); -+ while (pDMMList != NULL) { -+ pDMMRes = pDMMList; -+ pDMMList = pDMMList->next; -+ if (pDMMRes->dmmAllocated) { -+ status = PROC_UnMap(pDMMRes->hProcessor, -+ (void *)pDMMRes->ulDSPResAddr, pCtxt); -+ status = PROC_UnReserveMemory(pDMMRes->hProcessor, -+ (void *)pDMMRes->ulDSPResAddr); -+ pDMMRes->dmmAllocated = 0; -+ } -+ } -+ return status; -+} -+ -+ -+/* Release all DMM resources and its context -+* This is called from .bridge_release. */ -+DSP_STATUS DRV_RemoveAllDMMResElements(HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct DMM_RES_OBJECT *pTempDMMRes2 = NULL; -+ struct DMM_RES_OBJECT *pTempDMMRes = NULL; -+ -+ DBC_Assert(pCtxt != NULL); -+ DRV_ProcFreeDMMRes(pCtxt); -+ pTempDMMRes = pCtxt->pDMMList; -+ while (pTempDMMRes != NULL) { -+ pTempDMMRes2 = pTempDMMRes; -+ pTempDMMRes = pTempDMMRes->next; -+ MEM_Free(pTempDMMRes2); -+ } -+ pCtxt->pDMMList = NULL; -+ return status; -+} -+ -+DSP_STATUS DRV_GetDMMResElement(u32 pMapAddr, HANDLE hDMMRes, HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ struct DMM_RES_OBJECT **pDMMRes = (struct DMM_RES_OBJECT **)hDMMRes; -+ DSP_STATUS status = DSP_SOK; -+ struct DMM_RES_OBJECT *pTempDMM2 = NULL; -+ struct DMM_RES_OBJECT *pTempDMM = NULL; -+ -+ DBC_Assert(hPCtxt != NULL); -+ pTempDMM = pCtxt->pDMMList; -+ while ((pTempDMM != NULL) && (pTempDMM->ulDSPAddr != pMapAddr)) { -+ GT_3trace(curTrace, GT_ENTER, -+ "DRV_GetDMMResElement: 2 pTempDMM:%x " -+ "pTempDMM->ulDSPAddr:%x pMapAddr:%x\n", pTempDMM, -+ pTempDMM->ulDSPAddr, pMapAddr); -+ pTempDMM2 = pTempDMM; -+ pTempDMM = pTempDMM->next; -+ } -+ if (pTempDMM != NULL) { -+ GT_0trace(curTrace, GT_ENTER, "DRV_GetDMMResElement: 3"); -+ *pDMMRes = pTempDMM; -+ } else { -+ status = DSP_ENOTFOUND; -+ } GT_0trace(curTrace, GT_ENTER, "DRV_GetDMMResElement: 4"); -+ return status; -+} -+ -+/* Update Node allocation status */ -+void DRV_ProcNodeUpdateStatus(HANDLE hNodeRes, s32 status) -+{ -+ struct NODE_RES_OBJECT *pNodeRes = (struct NODE_RES_OBJECT *)hNodeRes; -+ DBC_Assert(hNodeRes != NULL); -+ pNodeRes->nodeAllocated = status; -+} -+ -+/* Update Node Heap status */ -+void DRV_ProcNodeUpdateHeapStatus(HANDLE hNodeRes, s32 status) -+{ -+ struct NODE_RES_OBJECT *pNodeRes = (struct NODE_RES_OBJECT *)hNodeRes; -+ DBC_Assert(hNodeRes != NULL); -+ pNodeRes->heapAllocated = status; -+} -+ -+/* Release all Node resources and its context -+* This is called from .bridge_release. -+*/ -+DSP_STATUS DRV_RemoveAllNodeResElements(HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct NODE_RES_OBJECT *pTempNode2 = NULL; -+ struct NODE_RES_OBJECT *pTempNode = NULL; -+ -+ DBC_Assert(hPCtxt != NULL); -+ DRV_ProcFreeNodeRes(pCtxt); -+ pTempNode = pCtxt->pNodeList; -+ while (pTempNode != NULL) { -+ pTempNode2 = pTempNode; -+ pTempNode = pTempNode->next; -+ MEM_Free(pTempNode2); -+ } -+ pCtxt->pNodeList = NULL; -+ return status; -+} -+ -+/* Getting the node resource element */ -+ -+DSP_STATUS DRV_GetNodeResElement(HANDLE hNode, HANDLE hNodeRes, HANDLE hPCtxt) -+{ -+ struct NODE_RES_OBJECT **nodeRes = (struct NODE_RES_OBJECT **)hNodeRes; -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct NODE_RES_OBJECT *pTempNode2 = NULL; -+ struct NODE_RES_OBJECT *pTempNode = NULL; -+ -+ DBC_Assert(hPCtxt != NULL); -+ pTempNode = pCtxt->pNodeList; -+ GT_0trace(curTrace, GT_ENTER, "DRV_GetNodeResElement: 1"); -+ while ((pTempNode != NULL) && (pTempNode->hNode != hNode)) { -+ pTempNode2 = pTempNode; -+ pTempNode = pTempNode->next; -+ } -+ if (pTempNode != NULL) -+ *nodeRes = pTempNode; -+ else -+ status = DSP_ENOTFOUND; -+ -+ return status; -+} -+ -+ -+ -+/* Allocate the STRM resource element -+* This is called after the actual resource is allocated -+*/ -+DSP_STATUS DRV_ProcInsertSTRMResElement(HANDLE hStreamHandle, HANDLE hSTRMRes, -+ HANDLE hPCtxt) -+{ -+ struct STRM_RES_OBJECT **pSTRMRes = (struct STRM_RES_OBJECT **)hSTRMRes; -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct STRM_RES_OBJECT *pTempSTRMRes = NULL; -+ DBC_Assert(hPCtxt != NULL); -+ -+ *pSTRMRes = (struct STRM_RES_OBJECT *) -+ MEM_Calloc(1 * sizeof(struct STRM_RES_OBJECT), MEM_PAGED); -+ if ((*pSTRMRes == NULL) || (hPCtxt == NULL)) { -+ GT_0trace(curTrace, GT_ENTER, "DRV_InsertSTRMResElement: 2"); -+ status = DSP_EHANDLE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ (*pSTRMRes)->hStream = hStreamHandle; -+ if (pCtxt->pSTRMList != NULL) { -+ GT_0trace(curTrace, GT_ENTER, -+ "DRV_InsertiSTRMResElement: 3"); -+ pTempSTRMRes = pCtxt->pSTRMList; -+ while (pTempSTRMRes->next != NULL) -+ pTempSTRMRes = pTempSTRMRes->next; -+ -+ pTempSTRMRes->next = *pSTRMRes; -+ } else { -+ pCtxt->pSTRMList = *pSTRMRes; -+ GT_0trace(curTrace, GT_ENTER, -+ "DRV_InsertSTRMResElement: 4"); -+ } -+ } -+ return status; -+} -+ -+ -+ -+/* Release Stream resource element context -+* This function called after the actual resource is freed -+*/ -+DSP_STATUS DRV_ProcRemoveSTRMResElement(HANDLE hSTRMRes, HANDLE hPCtxt) -+{ -+ struct STRM_RES_OBJECT *pSTRMRes = (struct STRM_RES_OBJECT *)hSTRMRes; -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct STRM_RES_OBJECT *pTempSTRMRes2 = pCtxt->pSTRMList; -+ struct STRM_RES_OBJECT *pTempSTRMRes = pCtxt->pSTRMList; -+ -+ DBC_Assert(hPCtxt != NULL); -+ while ((pTempSTRMRes != NULL) && (pTempSTRMRes != pSTRMRes)) { -+ pTempSTRMRes2 = pTempSTRMRes; -+ pTempSTRMRes = pTempSTRMRes->next; -+ } -+ if (pCtxt->pSTRMList == pTempSTRMRes) -+ pCtxt->pSTRMList = pTempSTRMRes->next; -+ -+ if (pTempSTRMRes == NULL) -+ status = DSP_ENOTFOUND; -+ else if (pTempSTRMRes2->next != NULL) -+ pTempSTRMRes2->next = pTempSTRMRes2->next->next; -+ -+ MEM_Free(pSTRMRes); -+ return status; -+} -+ -+ -+/* Actual Stream De-Allocation */ -+static DSP_STATUS DRV_ProcFreeSTRMRes(HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ DSP_STATUS status1 = DSP_SOK; -+ u8 **apBuffer = NULL; -+ struct STRM_RES_OBJECT *pSTRMList = NULL; -+ struct STRM_RES_OBJECT *pSTRMRes = NULL; -+ u8 *pBufPtr; -+ u32 ulBytes; -+ u32 dwArg; -+ s32 ulBufSize; -+ -+ -+ DBC_Assert(hPCtxt != NULL); -+ pSTRMList = pCtxt->pSTRMList; -+ while (pSTRMList != NULL) { -+ pSTRMRes = pSTRMList; -+ pSTRMList = pSTRMList->next; -+ if (pSTRMRes->uNumBufs != 0) { -+ apBuffer = MEM_Alloc((pSTRMRes->uNumBufs * -+ sizeof(u8 *)), MEM_NONPAGED); -+ status = STRM_FreeBuffer(pSTRMRes->hStream, apBuffer, -+ pSTRMRes->uNumBufs, pCtxt); -+ MEM_Free(apBuffer); -+ } -+ status = STRM_Close(pSTRMRes->hStream, pCtxt); -+ if (DSP_FAILED(status)) { -+ if (status == DSP_EPENDING) { -+ status = STRM_Reclaim(pSTRMRes->hStream, -+ &pBufPtr, &ulBytes, -+ (u32 *)&ulBufSize, &dwArg); -+ if (DSP_SUCCEEDED(status)) -+ status = STRM_Close(pSTRMRes->hStream, -+ pCtxt); -+ -+ } -+ } -+ } -+ return status1; -+} -+ -+/* Release all Stream resources and its context -+* This is called from .bridge_release. -+*/ -+DSP_STATUS DRV_RemoveAllSTRMResElements(HANDLE hPCtxt) -+{ -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct STRM_RES_OBJECT *pTempSTRMRes2 = NULL; -+ struct STRM_RES_OBJECT *pTempSTRMRes = NULL; -+ -+ DBC_Assert(hPCtxt != NULL); -+ DRV_ProcFreeSTRMRes(pCtxt); -+ pTempSTRMRes = pCtxt->pSTRMList; -+ while (pTempSTRMRes != NULL) { -+ pTempSTRMRes2 = pTempSTRMRes; -+ pTempSTRMRes = pTempSTRMRes->next; -+ MEM_Free(pTempSTRMRes2); -+ } -+ pCtxt->pSTRMList = NULL; -+ return status; -+} -+ -+ -+/* Getting the stream resource element */ -+DSP_STATUS DRV_GetSTRMResElement(HANDLE hStrm, HANDLE hSTRMRes, HANDLE hPCtxt) -+{ -+ struct STRM_RES_OBJECT **STRMRes = (struct STRM_RES_OBJECT **)hSTRMRes; -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ DSP_STATUS status = DSP_SOK; -+ struct STRM_RES_OBJECT *pTempSTRM2 = NULL; -+ struct STRM_RES_OBJECT *pTempSTRM = pCtxt->pSTRMList; -+ -+ DBC_Assert(hPCtxt != NULL); -+ while ((pTempSTRM != NULL) && (pTempSTRM->hStream != hStrm)) { -+ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 2"); -+ pTempSTRM2 = pTempSTRM; -+ pTempSTRM = pTempSTRM->next; -+ } -+ if (pTempSTRM != NULL) { -+ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 3"); -+ *STRMRes = pTempSTRM; -+ } else { -+ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 4"); -+ status = DSP_ENOTFOUND; -+ } -+ GT_0trace(curTrace, GT_ENTER, "DRV_GetSTRMResElement: 5"); -+ return status; -+} -+ -+/* Updating the stream resource element */ -+DSP_STATUS DRV_ProcUpdateSTRMRes(u32 uNumBufs, HANDLE hSTRMRes, HANDLE hPCtxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct STRM_RES_OBJECT **STRMRes = (struct STRM_RES_OBJECT **)hSTRMRes; -+ -+ DBC_Assert(hPCtxt != NULL); -+ (*STRMRes)->uNumBufs = uNumBufs; -+ return status; -+} -+ -+/* Displaying the resources allocated by a process */ -+DSP_STATUS DRV_ProcDisplayResInfo(u8 *pBuf1, u32 *pSize) -+{ -+ struct PROCESS_CONTEXT *pCtxt = NULL; -+ struct NODE_RES_OBJECT *pNodeRes = NULL; -+ struct DMM_RES_OBJECT *pDMMRes = NULL; -+ struct STRM_RES_OBJECT *pSTRMRes = NULL; -+ struct DSPHEAP_RES_OBJECT *pDSPHEAPRes = NULL; -+ u32 tempCount = 1; -+ HANDLE hDrvObject = NULL; -+ void *pBuf = pBuf1; -+ u8 pTempBuf[250]; -+ u32 tempStrLen = 0, tempStrLen2 = 0; -+ DSP_STATUS status = DSP_SOK; -+ -+ CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ DRV_GetProcCtxtList(&pCtxt, (struct DRV_OBJECT *)hDrvObject); -+ GT_0trace(curTrace, GT_ENTER, "*********************" -+ "DRV_ProcDisplayResourceInfo:*\n"); -+ while (pCtxt != NULL) { -+ tempStrLen2 = sprintf((char *)pTempBuf, -+ "-------------------------------------" -+ "-----------------------------------\n"); -+ tempStrLen2 += 2; -+ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); -+ tempStrLen += tempStrLen2; -+ if (pCtxt->resState == PROC_RES_ALLOCATED) { -+ tempStrLen2 = sprintf((char *)pTempBuf, -+ "GPP Process Resource State: " -+ "pCtxt->resState = PROC_RES_ALLOCATED, " -+ " Process ID: %d\n", pCtxt->pid); -+ tempStrLen2 += 2; -+ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); -+ tempStrLen += tempStrLen2; -+ } else { -+ tempStrLen2 = sprintf((char *)pTempBuf, -+ "GPP Resource State: pCtxt->resState" -+ " = PROC_RES_DEALLOCATED, Process ID:%d\n", -+ pCtxt->pid); -+ tempStrLen2 += 2; -+ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); -+ tempStrLen += tempStrLen2; -+ } -+ pNodeRes = pCtxt->pNodeList; -+ tempCount = 1; -+ while (pNodeRes != NULL) { -+ GT_2trace(curTrace, GT_ENTER, -+ "DRV_ProcDisplayResourceInfo: #:%d " -+ "pCtxt->pNodeList->hNode:%x\n", -+ tempCount, pNodeRes->hNode); -+ tempStrLen2 = sprintf((char *)pTempBuf, -+ "Node Resource Information: Node #" -+ " %d Node Handle hNode:0X%x\n", -+ tempCount, (u32)pNodeRes->hNode); -+ pNodeRes = pNodeRes->next; -+ tempStrLen2 += 2; -+ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); -+ tempStrLen += tempStrLen2; -+ tempCount++; -+ } -+ tempCount = 1; -+ pDSPHEAPRes = pCtxt->pDSPHEAPList; -+ while (pDSPHEAPRes != NULL) { -+ GT_2trace(curTrace, GT_ENTER, -+ "DRV_ProcDisplayResourceInfo: #:%d " -+ "pCtxt->pDSPHEAPRList->ulMpuAddr:%x\n", -+ tempCount, pDSPHEAPRes->ulMpuAddr); -+ tempStrLen2 = sprintf((char *)pTempBuf, -+ "DSP Heap Resource Info: HEAP # %d" -+ " Mapped GPP Address: 0x%x, size: 0x%x\n", -+ tempCount, (u32)pDSPHEAPRes->ulMpuAddr, -+ (u32)pDSPHEAPRes->heapSize); -+ pDSPHEAPRes = pDSPHEAPRes->next; -+ tempStrLen2 += 2; -+ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); -+ tempStrLen += tempStrLen2; -+ tempCount++; -+ } -+ tempCount = 1; -+ pDMMRes = pCtxt->pDMMList; -+ while (pDMMRes != NULL) { -+ GT_2trace(curTrace, GT_ENTER, -+ "DRV_ProcDisplayResourceInfo: #:%d " -+ " pCtxt->pDMMList->ulMpuAddr:%x\n", -+ tempCount, -+ pDMMRes->ulMpuAddr); -+ tempStrLen2 = sprintf((char *)pTempBuf, -+ "DMM Resource Info: DMM # %d Mapped" -+ " GPP Address: 0x%x, size: 0x%x\n", -+ tempCount, (u32)pDMMRes->ulMpuAddr, -+ (u32)pDMMRes->dmmSize); -+ pDMMRes = pDMMRes->next; -+ tempStrLen2 += 2; -+ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); -+ tempStrLen += tempStrLen2; -+ tempCount++; -+ } -+ tempCount = 1; -+ pSTRMRes = pCtxt->pSTRMList; -+ while (pSTRMRes != NULL) { -+ GT_2trace(curTrace, GT_ENTER, -+ "DRV_ProcDisplayResourceInfo: #:%d " -+ "pCtxt->pSTRMList->hStream:%x\n", tempCount, -+ pSTRMRes->hStream); -+ tempStrLen2 = sprintf((char *)pTempBuf, -+ "Stream Resource info: STRM # %d " -+ "Stream Handle: 0x%x \n", -+ tempCount, (u32)pSTRMRes->hStream); -+ pSTRMRes = pSTRMRes->next; -+ tempStrLen2 += 2; -+ memmove(pBuf+tempStrLen, pTempBuf, tempStrLen2); -+ tempStrLen += tempStrLen2; -+ tempCount++; -+ } -+ pCtxt = pCtxt->next; -+ } -+ *pSize = tempStrLen; -+ status = PrintProcessInformation(); -+ GT_0trace(curTrace, GT_ENTER, "*********************" -+ "DRV_ProcDisplayResourceInfo:**\n"); -+ return status; -+} -+ -+/* -+ * ======== PrintProcessInformation ======== -+ * Purpose: -+ * This function prints the Process's information stored in -+ * the process context list. Some of the information that -+ * it displays is Process's state, Node, Stream, DMM, and -+ * Heap information. -+ */ -+static DSP_STATUS PrintProcessInformation(void) -+{ -+ struct DRV_OBJECT *hDrvObject = NULL; -+ struct PROCESS_CONTEXT *pCtxtList = NULL; -+ struct NODE_RES_OBJECT *pNodeRes = NULL; -+ struct DMM_RES_OBJECT *pDMMRes = NULL; -+ struct STRM_RES_OBJECT *pSTRMRes = NULL; -+ struct DSPHEAP_RES_OBJECT *pDSPHEAPRes = NULL; -+ struct PROC_OBJECT *proc_obj_ptr; -+ DSP_STATUS status = DSP_SOK; -+ u32 tempCount; -+ u32 procID; -+ -+ /* Get the Process context list */ -+ CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ DRV_GetProcCtxtList(&pCtxtList, hDrvObject); -+ GT_0trace(curTrace, GT_4CLASS, "\n### Debug information" -+ " for DSP bridge ##\n"); -+ GT_0trace(curTrace, GT_4CLASS, " \n ###The processes" -+ " information is as follows ### \n") ; -+ GT_0trace(curTrace, GT_4CLASS, " =====================" -+ "============ \n"); -+ /* Go through the entries in the Process context list */ -+ while (pCtxtList != NULL) { -+ GT_1trace(curTrace, GT_4CLASS, "\nThe process" -+ " id is %d\n", pCtxtList->pid); -+ GT_0trace(curTrace, GT_4CLASS, " -------------------" -+ "---------\n"); -+ if (pCtxtList->resState == PROC_RES_ALLOCATED) { -+ GT_0trace(curTrace, GT_4CLASS, " \nThe Process" -+ " is in Allocated state\n"); -+ } else { -+ GT_0trace(curTrace, GT_4CLASS, "\nThe Process" -+ " is in DeAllocated state\n"); -+ } -+ -+ spin_lock(&pCtxtList->proc_list_lock); -+ list_for_each_entry(proc_obj_ptr, &pCtxtList->processor_list, -+ proc_object) { -+ PROC_GetProcessorId(proc_obj_ptr, &procID); -+ if (procID == DSP_UNIT) { -+ GT_0trace(curTrace, GT_4CLASS, -+ "\nProcess connected to" -+ " DSP Processor\n"); -+ } else if (procID == IVA_UNIT) { -+ GT_0trace(curTrace, GT_4CLASS, -+ "\nProcess connected to" -+ " IVA Processor\n"); -+ } else { -+ GT_0trace(curTrace, GT_7CLASS, -+ "\n***ERROR:Invalid Processor Id***\n"); -+ } -+ } -+ spin_unlock(&pCtxtList->proc_list_lock); -+ -+ pNodeRes = pCtxtList->pNodeList; -+ tempCount = 1; -+ while (pNodeRes != NULL) { -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n***The Nodes allocated by" -+ " this Process are***\n"); -+ GT_2trace(curTrace, GT_4CLASS, -+ "Node # %d Node Handle hNode:0x%x\n", -+ tempCount, (u32)pNodeRes->hNode); -+ pNodeRes = pNodeRes->next; -+ tempCount++; -+ } -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n ***There are no Nodes" -+ " allocated by this Process***\n"); -+ tempCount = 1; -+ pDSPHEAPRes = pCtxtList->pDSPHEAPList; -+ while (pDSPHEAPRes != NULL) { -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n***The Heaps allocated by" -+ " this Process are***\n"); -+ GT_3trace(curTrace, GT_4CLASS, -+ "DSP Heap Resource Info: HEAP # %d " -+ "Mapped GPP Address:0x%x, Size: 0x%lx\n", -+ tempCount, (u32)pDSPHEAPRes->ulMpuAddr, -+ pDSPHEAPRes->heapSize); -+ pDSPHEAPRes = pDSPHEAPRes->next; -+ tempCount++; -+ } -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n ***There are no Heaps allocated" -+ " by this Process***\n"); -+ tempCount = 1; -+ pDMMRes = pCtxtList->pDMMList; -+ while (pDMMRes != NULL) { -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n ***The DMM resources allocated by" -+ " this Process are***\n"); -+ GT_3trace(curTrace, GT_4CLASS, -+ "DMM Resource Info: DMM # %d " -+ "Mapped GPP Address:0X%lx, Size: 0X%lx\n", -+ tempCount, pDMMRes->ulMpuAddr, -+ pDMMRes->dmmSize); -+ pDMMRes = pDMMRes->next; -+ tempCount++; -+ } -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n ***There are no DMM resources" -+ " allocated by this Process***\n"); -+ tempCount = 1; -+ pSTRMRes = pCtxtList->pSTRMList; -+ while (pSTRMRes != NULL) { -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n***The Stream resources allocated by" -+ " this Process are***\n"); -+ GT_2trace(curTrace, GT_4CLASS, -+ "Stream Resource info: STRM # %d" -+ "Stream Handle:0X%x\n", tempCount, -+ (u32)pSTRMRes->hStream); -+ pSTRMRes = pSTRMRes->next; -+ tempCount++; -+ } -+ if (tempCount == 1) -+ GT_0trace(curTrace, GT_4CLASS, -+ "\n ***There are no Stream resources" -+ "allocated by this Process***\n"); -+ pCtxtList = pCtxtList->next; -+ } -+ return status; -+} -+ -+/* GPP PROCESS CLEANUP CODE END */ -+#endif -+ -+/* -+ * ======== = DRV_Create ======== = -+ * Purpose: -+ * DRV Object gets created only once during Driver Loading. -+ */ -+DSP_STATUS DRV_Create(OUT struct DRV_OBJECT **phDRVObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DRV_OBJECT *pDRVObject = NULL; -+ -+ DBC_Require(phDRVObject != NULL); -+ DBC_Require(cRefs > 0); -+ GT_1trace(curTrace, GT_ENTER, "Entering DRV_Create" -+ " phDRVObject 0x%x\n", phDRVObject); -+ MEM_AllocObject(pDRVObject, struct DRV_OBJECT, SIGNATURE); -+ if (pDRVObject) { -+ /* Create and Initialize List of device objects */ -+ pDRVObject->devList = LST_Create(); -+ if (pDRVObject->devList) { -+ /* Create and Initialize List of device Extension */ -+ pDRVObject->devNodeString = LST_Create(); -+ if (!(pDRVObject->devNodeString)) { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_7CLASS, -+ "Failed to Create DRV_EXT list "); -+ MEM_FreeObject(pDRVObject); -+ } -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_7CLASS, -+ "Failed to Create Dev List "); -+ MEM_FreeObject(pDRVObject); -+ } -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_7CLASS, -+ "Failed to Allocate Memory for DRV Obj"); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Store the DRV Object in the Registry */ -+ if (DSP_SUCCEEDED -+ (CFG_SetObject((u32) pDRVObject, REG_DRV_OBJECT))) { -+ GT_1trace(curTrace, GT_1CLASS, -+ "DRV Obj Created pDrvObject 0x%x\n ", -+ pDRVObject); -+ *phDRVObject = pDRVObject; -+ } else { -+ /* Free the DRV Object */ -+ status = DSP_EFAIL; -+ MEM_Free(pDRVObject); -+ GT_0trace(curTrace, GT_7CLASS, -+ "Failed to update the Registry with " -+ "DRV Object "); -+ } -+ } -+ GT_2trace(curTrace, GT_ENTER, -+ "Exiting DRV_Create: phDRVObject: 0x%x\tstatus:" -+ "0x%x\n", phDRVObject, status); -+ DBC_Ensure(DSP_FAILED(status) || -+ MEM_IsValidHandle(pDRVObject, SIGNATURE)); -+ return status; -+} -+ -+/* -+ * ======== DRV_Exit ======== -+ * Purpose: -+ * Discontinue usage of the DRV module. -+ */ -+void DRV_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ GT_0trace(curTrace, GT_5CLASS, "Entering DRV_Exit \n"); -+ -+ cRefs--; -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== = DRV_Destroy ======== = -+ * purpose: -+ * Invoked during bridge de-initialization -+ */ -+DSP_STATUS DRV_Destroy(struct DRV_OBJECT *hDRVObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDRVObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pDRVObject, SIGNATURE)); -+ -+ GT_1trace(curTrace, GT_ENTER, "Entering DRV_Destroy" -+ " hDRVObject 0x%x\n", hDRVObject); -+ /* -+ * Delete the List if it exists.Should not come here -+ * as the DRV_RemoveDevObject and the Last DRV_RequestResources -+ * removes the list if the lists are empty. -+ */ -+ if (pDRVObject->devList) { -+ /* Could assert if the list is not empty */ -+ LST_Delete(pDRVObject->devList); -+ } -+ if (pDRVObject->devNodeString) { -+ /* Could assert if the list is not empty */ -+ LST_Delete(pDRVObject->devNodeString); -+ } -+ MEM_FreeObject(pDRVObject); -+ /* Update the DRV Object in Registry to be 0 */ -+ (void)CFG_SetObject(0, REG_DRV_OBJECT); -+ GT_2trace(curTrace, GT_ENTER, -+ "Exiting DRV_Destroy: hDRVObject: 0x%x\tstatus:" -+ "0x%x\n", hDRVObject, status); -+ DBC_Ensure(!MEM_IsValidHandle(pDRVObject, SIGNATURE)); -+ return status; -+} -+ -+/* -+ * ======== DRV_GetDevObject ======== -+ * Purpose: -+ * Given a index, returns a handle to DevObject from the list. -+ */ -+DSP_STATUS DRV_GetDevObject(u32 uIndex, struct DRV_OBJECT *hDrvObject, -+ struct DEV_OBJECT **phDevObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+#if GT_TRACE /* pDrvObject is used only for Assertions and debug messages.*/ -+ struct DRV_OBJECT *pDrvObject = (struct DRV_OBJECT *)hDrvObject; -+#endif -+ struct DEV_OBJECT *pDevObject; -+ u32 i; -+ DBC_Require(MEM_IsValidHandle(pDrvObject, SIGNATURE)); -+ DBC_Require(phDevObject != NULL); -+ DBC_Require(uIndex >= 0); -+ DBC_Require(cRefs > 0); -+ DBC_Assert(!(LST_IsEmpty(pDrvObject->devList))); -+ GT_3trace(curTrace, GT_ENTER, -+ "Entered DRV_GetDevObject, args:\n\tuIndex: " -+ "0x%x\n\thDrvObject: 0x%x\n\tphDevObject: 0x%x\n", -+ uIndex, hDrvObject, phDevObject); -+ pDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); -+ for (i = 0; i < uIndex; i++) { -+ pDevObject = -+ (struct DEV_OBJECT *)DRV_GetNextDevObject((u32)pDevObject); -+ } -+ if (pDevObject) { -+ *phDevObject = (struct DEV_OBJECT *) pDevObject; -+ status = DSP_SOK; -+ } else { -+ *phDevObject = NULL; -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_7CLASS, -+ "DRV: Could not get the DevObject\n"); -+ } -+ GT_2trace(curTrace, GT_ENTER, -+ "Exiting Drv_GetDevObject\n\tstatus: 0x%x\n\t" -+ "hDevObject: 0x%x\n", status, *phDevObject); -+ return status; -+} -+ -+/* -+ * ======== DRV_GetFirstDevObject ======== -+ * Purpose: -+ * Retrieve the first Device Object handle from an internal linked list of -+ * of DEV_OBJECTs maintained by DRV. -+ */ -+u32 DRV_GetFirstDevObject(void) -+{ -+ u32 dwDevObject = 0; -+ struct DRV_OBJECT *pDrvObject; -+ -+ if (DSP_SUCCEEDED -+ (CFG_GetObject((u32 *)&pDrvObject, REG_DRV_OBJECT))) { -+ if ((pDrvObject->devList != NULL) && -+ !LST_IsEmpty(pDrvObject->devList)) -+ dwDevObject = (u32) LST_First(pDrvObject->devList); -+ } -+ -+ return dwDevObject; -+} -+ -+/* -+ * ======== DRV_GetFirstDevNodeString ======== -+ * Purpose: -+ * Retrieve the first Device Extension from an internal linked list of -+ * of Pointer to DevNode Strings maintained by DRV. -+ */ -+u32 DRV_GetFirstDevExtension(void) -+{ -+ u32 dwDevExtension = 0; -+ struct DRV_OBJECT *pDrvObject; -+ -+ if (DSP_SUCCEEDED -+ (CFG_GetObject((u32 *)&pDrvObject, REG_DRV_OBJECT))) { -+ -+ if ((pDrvObject->devNodeString != NULL) && -+ !LST_IsEmpty(pDrvObject->devNodeString)) { -+ dwDevExtension = (u32)LST_First(pDrvObject-> -+ devNodeString); -+ } -+ } -+ -+ return dwDevExtension; -+} -+ -+/* -+ * ======== DRV_GetNextDevObject ======== -+ * Purpose: -+ * Retrieve the next Device Object handle from an internal linked list of -+ * of DEV_OBJECTs maintained by DRV, after having previously called -+ * DRV_GetFirstDevObject() and zero or more DRV_GetNext. -+ */ -+u32 DRV_GetNextDevObject(u32 hDevObject) -+{ -+ u32 dwNextDevObject = 0; -+ struct DRV_OBJECT *pDrvObject; -+ -+ DBC_Require(hDevObject != 0); -+ -+ if (DSP_SUCCEEDED -+ (CFG_GetObject((u32 *)&pDrvObject, REG_DRV_OBJECT))) { -+ -+ if ((pDrvObject->devList != NULL) && -+ !LST_IsEmpty(pDrvObject->devList)) { -+ dwNextDevObject = (u32)LST_Next(pDrvObject->devList, -+ (struct LST_ELEM *)hDevObject); -+ } -+ } -+ return dwNextDevObject; -+} -+ -+/* -+ * ======== DRV_GetNextDevExtension ======== -+ * Purpose: -+ * Retrieve the next Device Extension from an internal linked list of -+ * of pointer to DevNodeString maintained by DRV, after having previously -+ * called DRV_GetFirstDevExtension() and zero or more -+ * DRV_GetNextDevExtension(). -+ */ -+u32 DRV_GetNextDevExtension(u32 hDevExtension) -+{ -+ u32 dwDevExtension = 0; -+ struct DRV_OBJECT *pDrvObject; -+ -+ DBC_Require(hDevExtension != 0); -+ -+ if (DSP_SUCCEEDED(CFG_GetObject((u32 *)&pDrvObject, -+ REG_DRV_OBJECT))) { -+ if ((pDrvObject->devNodeString != NULL) && -+ !LST_IsEmpty(pDrvObject->devNodeString)) { -+ dwDevExtension = (u32)LST_Next(pDrvObject-> -+ devNodeString, -+ (struct LST_ELEM *)hDevExtension); -+ } -+ } -+ -+ return dwDevExtension; -+} -+ -+/* -+ * ======== DRV_Init ======== -+ * Purpose: -+ * Initialize DRV module private state. -+ */ -+DSP_STATUS DRV_Init(void) -+{ -+ s32 fRetval = 1; /* function return value */ -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (fRetval) -+ cRefs++; -+ -+ GT_1trace(curTrace, GT_5CLASS, "Entering DRV_Entry crefs 0x%x \n", -+ cRefs); -+ -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ return fRetval; -+} -+ -+/* -+ * ======== DRV_InsertDevObject ======== -+ * Purpose: -+ * Insert a DevObject into the list of Manager object. -+ */ -+DSP_STATUS DRV_InsertDevObject(struct DRV_OBJECT *hDRVObject, -+ struct DEV_OBJECT *hDevObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDRVObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(hDevObject != NULL); -+ DBC_Require(MEM_IsValidHandle(pDRVObject, SIGNATURE)); -+ DBC_Assert(pDRVObject->devList); -+ -+ GT_2trace(curTrace, GT_ENTER, -+ "Entering DRV_InsertProcObject hDRVObject " -+ "0x%x\n, hDevObject 0x%x\n", hDRVObject, hDevObject); -+ -+ LST_PutTail(pDRVObject->devList, (struct LST_ELEM *)hDevObject); -+ -+ GT_1trace(curTrace, GT_ENTER, -+ "Exiting InsertDevObject status 0x%x\n", status); -+ -+ DBC_Ensure(DSP_SUCCEEDED(status) && !LST_IsEmpty(pDRVObject->devList)); -+ -+ return status; -+} -+ -+/* -+ * ======== DRV_RemoveDevObject ======== -+ * Purpose: -+ * Search for and remove a DeviceObject from the given list of DRV -+ * objects. -+ */ -+DSP_STATUS DRV_RemoveDevObject(struct DRV_OBJECT *hDRVObject, -+ struct DEV_OBJECT *hDevObject) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDRVObject; -+ struct LST_ELEM *pCurElem; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pDRVObject, SIGNATURE)); -+ DBC_Require(hDevObject != NULL); -+ -+ DBC_Require(pDRVObject->devList != NULL); -+ DBC_Require(!LST_IsEmpty(pDRVObject->devList)); -+ -+ GT_2trace(curTrace, GT_ENTER, -+ "Entering DRV_RemoveDevObject hDevObject " -+ "0x%x\n, hDRVObject 0x%x\n", hDevObject, hDRVObject); -+ /* Search list for pProcObject: */ -+ for (pCurElem = LST_First(pDRVObject->devList); pCurElem != NULL; -+ pCurElem = LST_Next(pDRVObject->devList, pCurElem)) { -+ /* If found, remove it. */ -+ if ((struct DEV_OBJECT *) pCurElem == hDevObject) { -+ LST_RemoveElem(pDRVObject->devList, pCurElem); -+ status = DSP_SOK; -+ break; -+ } -+ } -+ /* Remove list if empty. */ -+ if (LST_IsEmpty(pDRVObject->devList)) { -+ LST_Delete(pDRVObject->devList); -+ pDRVObject->devList = NULL; -+ } -+ DBC_Ensure((pDRVObject->devList == NULL) || -+ !LST_IsEmpty(pDRVObject->devList)); -+ GT_1trace(curTrace, GT_ENTER, -+ "DRV_RemoveDevObject returning 0x%x\n", status); -+ return status; -+} -+ -+/* -+ * ======== DRV_RequestResources ======== -+ * Purpose: -+ * Requests resources from the OS. -+ */ -+DSP_STATUS DRV_RequestResources(u32 dwContext, u32 *pDevNodeString) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DRV_OBJECT *pDRVObject; -+ struct DRV_EXT *pszdevNode; -+ -+ DBC_Require(dwContext != 0); -+ DBC_Require(pDevNodeString != NULL); -+ GT_0trace(curTrace, GT_ENTER, "Entering DRV_RequestResources\n"); -+ /* -+ * Allocate memory to hold the string. This will live untill -+ * it is freed in the Release resources. Update the driver object -+ * list. -+ */ -+ if (DSP_SUCCEEDED(CFG_GetObject((u32 *)&pDRVObject, -+ REG_DRV_OBJECT))) { -+ pszdevNode = MEM_Calloc(sizeof(struct DRV_EXT), MEM_NONPAGED); -+ if (pszdevNode) { -+ LST_InitElem(&pszdevNode->link); -+ strncpy(pszdevNode->szString, -+ (char *)dwContext, MAXREGPATHLENGTH - 1); -+ pszdevNode->szString[MAXREGPATHLENGTH - 1] = '\0'; -+ /* Update the Driver Object List */ -+ *pDevNodeString = (u32)pszdevNode->szString; -+ LST_PutTail(pDRVObject->devNodeString, -+ (struct LST_ELEM *)pszdevNode); -+ } else { -+ GT_0trace(curTrace, GT_7CLASS, -+ "Failed to Allocate Memory devNodeString "); -+ status = DSP_EFAIL; -+ *pDevNodeString = 0; -+ } -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_7CLASS, -+ "Failed to get Driver Object from Registry"); -+ *pDevNodeString = 0; -+ } -+ -+ if (!(strcmp((char *) dwContext, "TIOMAP1510"))) { -+ GT_0trace(curTrace, GT_1CLASS, -+ " Allocating resources for UMA \n"); -+ status = RequestBridgeResourcesDSP(dwContext, DRV_ASSIGN); -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(curTrace, GT_7CLASS, "Unknown Device "); -+ } -+ -+ if (DSP_FAILED(status)) { -+ GT_0trace(curTrace, GT_7CLASS, -+ "Failed to reserve bridge resources "); -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && pDevNodeString != NULL && -+ !LST_IsEmpty(pDRVObject->devNodeString)) || -+ (DSP_FAILED(status) && *pDevNodeString == 0)); -+ -+ return status; -+} -+ -+/* -+ * ======== DRV_ReleaseResources ======== -+ * Purpose: -+ * Releases resources from the OS. -+ */ -+DSP_STATUS DRV_ReleaseResources(u32 dwContext, struct DRV_OBJECT *hDrvObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DRV_OBJECT *pDRVObject = (struct DRV_OBJECT *)hDrvObject; -+ struct DRV_EXT *pszdevNode; -+ -+ GT_0trace(curTrace, GT_ENTER, "Entering DRV_Release Resources\n"); -+ -+ if (!(strcmp((char *)((struct DRV_EXT *)dwContext)->szString, -+ "TIOMAP1510"))) { -+ GT_0trace(curTrace, GT_1CLASS, -+ " Releasing DSP-Bridge resources \n"); -+ status = RequestBridgeResources(dwContext, DRV_RELEASE); -+ } else { -+ GT_0trace(curTrace, GT_1CLASS, " Unknown device\n"); -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(curTrace, GT_1CLASS, -+ "Failed to relese bridge resources\n"); -+ } -+ -+ /* -+ * Irrespective of the status go ahead and clean it -+ * The following will over write the status. -+ */ -+ for (pszdevNode = (struct DRV_EXT *)DRV_GetFirstDevExtension(); -+ pszdevNode != NULL; pszdevNode = (struct DRV_EXT *) -+ DRV_GetNextDevExtension((u32)pszdevNode)) { -+ if ((u32)pszdevNode == dwContext) { -+ /* Found it */ -+ /* Delete from the Driver object list */ -+ LST_RemoveElem(pDRVObject->devNodeString, -+ (struct LST_ELEM *)pszdevNode); -+ MEM_Free((void *) pszdevNode); -+ break; -+ } -+ /* Delete the List if it is empty */ -+ if (LST_IsEmpty(pDRVObject->devNodeString)) { -+ LST_Delete(pDRVObject->devNodeString); -+ pDRVObject->devNodeString = NULL; -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== RequestBridgeResources ======== -+ * Purpose: -+ * Reserves shared memory for bridge. -+ */ -+static DSP_STATUS RequestBridgeResources(u32 dwContext, s32 bRequest) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CFG_HOSTRES *pResources; -+ u32 dwBuffSize; -+ -+ struct DRV_EXT *driverExt; -+ u32 shm_size; -+ -+ DBC_Require(dwContext != 0); -+ -+ GT_0trace(curTrace, GT_ENTER, "->RequestBridgeResources \n"); -+ -+ if (!bRequest) { -+ driverExt = (struct DRV_EXT *)dwContext; -+ /* Releasing resources by deleting the registry key */ -+ dwBuffSize = sizeof(struct CFG_HOSTRES); -+ pResources = MEM_Calloc(dwBuffSize, MEM_NONPAGED); -+ if (DSP_FAILED(REG_GetValue(NULL, (char *)driverExt->szString, -+ CURRENTCONFIG, (u8 *)pResources, &dwBuffSize))) { -+ status = CFG_E_RESOURCENOTAVAIL; -+ GT_0trace(curTrace, GT_1CLASS, -+ "REG_GetValue Failed \n"); -+ } else { -+ GT_0trace(curTrace, GT_1CLASS, -+ "REG_GetValue Succeeded \n"); -+ } -+ -+ if (pResources != NULL) { -+ dwBuffSize = sizeof(shm_size); -+ status = REG_GetValue(NULL, CURRENTCONFIG, SHMSIZE, -+ (u8 *)&shm_size, &dwBuffSize); -+ if (DSP_SUCCEEDED(status)) { -+ if ((pResources->dwMemBase[1]) && -+ (pResources->dwMemPhys[1])) { -+ MEM_FreePhysMem((void *)pResources-> -+ dwMemBase[1], pResources->dwMemPhys[1], -+ shm_size); -+ } -+ } else { -+ GT_1trace(curTrace, GT_7CLASS, -+ "Error getting SHM size from registry: " -+ "%x. Not calling MEM_FreePhysMem\n", -+ status); -+ } -+ pResources->dwMemBase[1] = 0; -+ pResources->dwMemPhys[1] = 0; -+ -+ if (pResources->dwPrmBase) -+ iounmap(pResources->dwPrmBase); -+ if (pResources->dwCmBase) -+ iounmap(pResources->dwCmBase); -+ if (pResources->dwMboxBase) -+ iounmap(pResources->dwMboxBase); -+ if (pResources->dwMemBase[0]) -+ iounmap((void *)pResources->dwMemBase[0]); -+ if (pResources->dwMemBase[2]) -+ iounmap((void *)pResources->dwMemBase[2]); -+ if (pResources->dwMemBase[3]) -+ iounmap((void *)pResources->dwMemBase[3]); -+ if (pResources->dwMemBase[4]) -+ iounmap((void *)pResources->dwMemBase[4]); -+ if (pResources->dwWdTimerDspBase) -+ iounmap(pResources->dwWdTimerDspBase); -+ if (pResources->dwDmmuBase) -+ iounmap(pResources->dwDmmuBase); -+ if (pResources->dwPerBase) -+ iounmap(pResources->dwPerBase); -+ if (pResources->dwPerPmBase) -+ iounmap((void *)pResources->dwPerPmBase); -+ if (pResources->dwCorePmBase) -+ iounmap((void *)pResources->dwCorePmBase); -+ if (pResources->dwSysCtrlBase) { -+ iounmap(pResources->dwSysCtrlBase); -+ /* don't set pResources->dwSysCtrlBase to null -+ * as it is used in BOARD_Stop */ -+ } -+ pResources->dwPrmBase = NULL; -+ pResources->dwCmBase = NULL; -+ pResources->dwMboxBase = NULL; -+ pResources->dwMemBase[0] = (u32) NULL; -+ pResources->dwMemBase[2] = (u32) NULL; -+ pResources->dwMemBase[3] = (u32) NULL; -+ pResources->dwMemBase[4] = (u32) NULL; -+ pResources->dwWdTimerDspBase = NULL; -+ pResources->dwDmmuBase = NULL; -+ -+ dwBuffSize = sizeof(struct CFG_HOSTRES); -+ status = REG_SetValue(NULL, (char *)driverExt->szString, -+ CURRENTCONFIG, REG_BINARY, (u8 *)pResources, -+ (u32)dwBuffSize); -+ /* Set all the other entries to NULL */ -+ MEM_Free(pResources); -+ } -+ GT_0trace(curTrace, GT_ENTER, " <- RequestBridgeResources \n"); -+ return status; -+ } -+ dwBuffSize = sizeof(struct CFG_HOSTRES); -+ pResources = MEM_Calloc(dwBuffSize, MEM_NONPAGED); -+ if (pResources != NULL) { -+ /* wNumMemWindows must not be more than CFG_MAXMEMREGISTERS */ -+ pResources->wNumMemWindows = 2; -+ /* First window is for DSP internal memory */ -+ -+ pResources->dwPrmBase = ioremap(OMAP_IVA2_PRM_BASE, -+ OMAP_IVA2_PRM_SIZE); -+ pResources->dwCmBase = ioremap(OMAP_IVA2_CM_BASE, -+ OMAP_IVA2_CM_SIZE); -+ pResources->dwMboxBase = ioremap(OMAP_MBOX_BASE, -+ OMAP_MBOX_SIZE); -+ pResources->dwSysCtrlBase = ioremap(OMAP_SYSC_BASE, -+ OMAP_SYSC_SIZE); -+ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[0] 0x%x\n", -+ pResources->dwMemBase[0]); -+ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[3] 0x%x\n", -+ pResources->dwMemBase[3]); -+ GT_1trace(curTrace, GT_2CLASS, "dwPrmBase 0x%x\n", -+ pResources->dwPrmBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwCmBase 0x%x\n", -+ pResources->dwCmBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwWdTimerDspBase 0x%x\n", -+ pResources->dwWdTimerDspBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwMboxBase 0x%x\n", -+ pResources->dwMboxBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwDmmuBase 0x%x\n", -+ pResources->dwDmmuBase); -+ -+ /* for 24xx base port is not mapping the mamory for DSP -+ * internal memory TODO Do a ioremap here */ -+ /* Second window is for DSP external memory shared with MPU */ -+ if (DSP_SUCCEEDED(status)) { -+ /* for Linux, these are hard-coded values */ -+ pResources->bIRQRegisters = 0; -+ pResources->bIRQAttrib = 0; -+ pResources->dwOffsetForMonitor = 0; -+ pResources->dwChnlOffset = 0; -+ /* CHNL_MAXCHANNELS */ -+ pResources->dwNumChnls = CHNL_MAXCHANNELS; -+ pResources->dwChnlBufSize = 0x400; -+ dwBuffSize = sizeof(struct CFG_HOSTRES); -+ status = REG_SetValue(NULL, (char *) dwContext, -+ CURRENTCONFIG, REG_BINARY, -+ (u8 *)pResources, -+ sizeof(struct CFG_HOSTRES)); -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(curTrace, GT_1CLASS, -+ " Successfully set the registry " -+ "value for CURRENTCONFIG\n"); -+ } else { -+ GT_0trace(curTrace, GT_7CLASS, -+ " Failed to set the registry " -+ "value for CURRENTCONFIG\n"); -+ } -+ } -+ MEM_Free(pResources); -+ } -+ /* End Mem alloc */ -+ return status; -+} -+ -+/* -+ * ======== RequestBridgeResourcesDSP ======== -+ * Purpose: -+ * Reserves shared memory for bridge. -+ */ -+static DSP_STATUS RequestBridgeResourcesDSP(u32 dwContext, s32 bRequest) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CFG_HOSTRES *pResources; -+ u32 dwBuffSize; -+ u32 dmaAddr; -+ u32 shm_size; -+ -+ DBC_Require(dwContext != 0); -+ -+ GT_0trace(curTrace, GT_ENTER, "->RequestBridgeResourcesDSP \n"); -+ -+ dwBuffSize = sizeof(struct CFG_HOSTRES); -+ -+ pResources = MEM_Calloc(dwBuffSize, MEM_NONPAGED); -+ -+ if (pResources != NULL) { -+ if (DSP_FAILED(CFG_GetHostResources((struct CFG_DEVNODE *) -+ dwContext, pResources))) { -+ /* Call CFG_GetHostResources to get reserve resouces */ -+ status = RequestBridgeResources(dwContext, bRequest); -+ if (DSP_SUCCEEDED(status)) { -+ status = CFG_GetHostResources -+ ((struct CFG_DEVNODE *) dwContext, -+ pResources); -+ } -+ } -+ /* wNumMemWindows must not be more than CFG_MAXMEMREGISTERS */ -+ pResources->wNumMemWindows = 4; -+ -+ pResources->dwMemBase[0] = 0; -+ pResources->dwMemBase[2] = (u32)ioremap(OMAP_DSP_MEM1_BASE, -+ OMAP_DSP_MEM1_SIZE); -+ pResources->dwMemBase[3] = (u32)ioremap(OMAP_DSP_MEM2_BASE, -+ OMAP_DSP_MEM2_SIZE); -+ pResources->dwMemBase[4] = (u32)ioremap(OMAP_DSP_MEM3_BASE, -+ OMAP_DSP_MEM3_SIZE); -+ pResources->dwPerBase = ioremap(OMAP_PER_CM_BASE, -+ OMAP_PER_CM_SIZE); -+ pResources->dwPerPmBase = (u32)ioremap(OMAP_PER_PRM_BASE, -+ OMAP_PER_PRM_SIZE); -+ pResources->dwCorePmBase = (u32)ioremap(OMAP_CORE_PRM_BASE, -+ OMAP_CORE_PRM_SIZE); -+ pResources->dwDmmuBase = ioremap(OMAP_DMMU_BASE, -+ OMAP_DMMU_SIZE); -+ pResources->dwWdTimerDspBase = NULL; -+ -+ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[0] 0x%x\n", -+ pResources->dwMemBase[0]); -+ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[1] 0x%x\n", -+ pResources->dwMemBase[1]); -+ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[2] 0x%x\n", -+ pResources->dwMemBase[2]); -+ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[3] 0x%x\n", -+ pResources->dwMemBase[3]); -+ GT_1trace(curTrace, GT_2CLASS, "dwMemBase[4] 0x%x\n", -+ pResources->dwMemBase[4]); -+ GT_1trace(curTrace, GT_2CLASS, "dwPrmBase 0x%x\n", -+ pResources->dwPrmBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwCmBase 0x%x\n", -+ pResources->dwCmBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwWdTimerDspBase 0x%x\n", -+ pResources->dwWdTimerDspBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwMboxBase 0x%x\n", -+ pResources->dwMboxBase); -+ GT_1trace(curTrace, GT_2CLASS, "dwDmmuBase 0x%x\n", -+ pResources->dwDmmuBase); -+ dwBuffSize = sizeof(shm_size); -+ status = REG_GetValue(NULL, CURRENTCONFIG, SHMSIZE, -+ (u8 *)&shm_size, &dwBuffSize); -+ if (DSP_SUCCEEDED(status)) { -+ /* Allocate Physically contiguous, -+ * non-cacheable memory */ -+ pResources->dwMemBase[1] = -+ (u32)MEM_AllocPhysMem(shm_size, 0x100000, -+ &dmaAddr); -+ if (pResources->dwMemBase[1] == 0) { -+ status = DSP_EMEMORY; -+ GT_0trace(curTrace, GT_7CLASS, -+ "SHM reservation Failed\n"); -+ } else { -+ pResources->dwMemLength[1] = shm_size; -+ pResources->dwMemPhys[1] = dmaAddr; -+ -+ GT_3trace(curTrace, GT_1CLASS, -+ "Bridge SHM address 0x%x dmaAddr" -+ " %x size %x\n", -+ pResources->dwMemBase[1], -+ dmaAddr, shm_size); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* for Linux, these are hard-coded values */ -+ pResources->bIRQRegisters = 0; -+ pResources->bIRQAttrib = 0; -+ pResources->dwOffsetForMonitor = 0; -+ pResources->dwChnlOffset = 0; -+ /* CHNL_MAXCHANNELS */ -+ pResources->dwNumChnls = CHNL_MAXCHANNELS; -+ pResources->dwChnlBufSize = 0x400; -+ dwBuffSize = sizeof(struct CFG_HOSTRES); -+ status = REG_SetValue(NULL, (char *)dwContext, -+ CURRENTCONFIG, REG_BINARY, -+ (u8 *)pResources, -+ sizeof(struct CFG_HOSTRES)); -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(curTrace, GT_1CLASS, -+ " Successfully set the registry" -+ " value for CURRENTCONFIG\n"); -+ } else { -+ GT_0trace(curTrace, GT_7CLASS, -+ " Failed to set the registry value" -+ " for CURRENTCONFIG\n"); -+ } -+ } -+ MEM_Free(pResources); -+ } -+ /* End Mem alloc */ -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/drv_interface.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/drv_interface.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,739 @@ -+/* -+ * drv_interface.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== linux_driver.c ======== -+ * Description: -+ * DSP/BIOS Bridge driver interface. -+ * -+ * Public Functions: -+ * driver_init -+ * driver_exit -+ * driver_open -+ * driver_release -+ * driver_ioctl -+ * driver_mmap -+ * -+ *! Revision History -+ *! ================ -+ *! 21-Apr-2004 map Deprecated use of MODULE_PARM for kernel versions -+ *! greater than 2.5, use module_param. -+ *! 08-Mar-2004 sb Added the dsp_debug argument, which keeps the DSP in self -+ *! loop after image load and waits in a loop for DSP to start -+ *! 16-Feb-2004 vp Deprecated the usage of MOD_INC_USE_COUNT and -+ *! MOD_DEC_USE_COUNT -+ *! for kernel versions greater than 2.5 -+ *! 20-May-2003 vp Added unregister functions for the DPM. -+ *! 24-Mar-2003 sb Pass pid instead of driverContext to DSP_Close -+ *! 24-Mar-2003 vp Added Power Management support. -+ *! 21-Mar-2003 sb Configure SHM size using insmod argument shm_size -+ *! 10-Feb-2003 vp Updated based on code review comments -+ *! 18-Oct-2002 sb Created initial version -+ */ -+ -+/* ----------------------------------- Host OS */ -+ -+#include -+#include -+#include -+ -+#ifdef MODULE -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+#ifndef RES_CLEANUP_DISABLE -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#endif -+ -+#include -+#include -+ -+#define BRIDGE_NAME "C6410" -+/* ----------------------------------- Globals */ -+#define DRIVER_NAME "DspBridge" -+#define DRIVER_MAJOR 0 /* Linux assigns our Major device number */ -+#define DRIVER_MINOR 0 /* Linux assigns our Major device number */ -+s32 dsp_debug; -+ -+struct platform_device *omap_dspbridge_dev; -+ -+struct bridge_dev { -+ struct cdev cdev; -+}; -+ -+static struct bridge_dev *bridge_device; -+ -+static struct class *bridge_class; -+ -+static u32 driverContext; -+#ifdef CONFIG_BRIDGE_DEBUG -+static char *GT_str; -+#endif /* CONFIG_BRIDGE_DEBUG */ -+static s32 driver_major = DRIVER_MAJOR; -+static s32 driver_minor = DRIVER_MINOR; -+static char *base_img; -+char *iva_img; -+static char *num_procs = "C55=1"; -+static s32 shm_size = 0x400000; /* 4 MB */ -+static u32 phys_mempool_base; -+static u32 phys_mempool_size; -+static int tc_wordswapon; /* Default value is always false */ -+ -+/* Minimum ACTIVE VDD1 OPP level for reliable DSP operation */ -+unsigned short min_active_opp = 3; -+ -+#ifdef CONFIG_PM -+struct omap34xx_bridge_suspend_data { -+ int suspended; -+ wait_queue_head_t suspend_wq; -+}; -+ -+static struct omap34xx_bridge_suspend_data bridge_suspend_data; -+ -+static int omap34xxbridge_suspend_lockout( -+ struct omap34xx_bridge_suspend_data *s, struct file *f) -+{ -+ if ((s)->suspended) { -+ if ((f)->f_flags & O_NONBLOCK) -+ return DSP_EDPMSUSPEND; -+ wait_event_interruptible((s)->suspend_wq, (s)->suspended == 0); -+ } -+ return 0; -+} -+ -+#endif -+ -+#ifdef DEBUG -+module_param(GT_str, charp, 0); -+MODULE_PARM_DESC(GT_str, "GT string, default = NULL"); -+ -+module_param(dsp_debug, int, 0); -+MODULE_PARM_DESC(dsp_debug, "Wait after loading DSP image. default = false"); -+#endif -+ -+module_param(driver_major, int, 0); /* Driver's major number */ -+MODULE_PARM_DESC(driver_major, "Major device number, default = 0 (auto)"); -+ -+module_param(driver_minor, int, 0); /* Driver's major number */ -+MODULE_PARM_DESC(driver_minor, "Minor device number, default = 0 (auto)"); -+ -+module_param(base_img, charp, 0); -+MODULE_PARM_DESC(base_img, "DSP base image, default = NULL"); -+ -+module_param(shm_size, int, 0); -+MODULE_PARM_DESC(shm_size, "SHM size, default = 4 MB, minimum = 64 KB"); -+ -+module_param(phys_mempool_base, uint, 0); -+MODULE_PARM_DESC(phys_mempool_base, -+ "Physical memory pool base passed to driver"); -+ -+module_param(phys_mempool_size, uint, 0); -+MODULE_PARM_DESC(phys_mempool_size, -+ "Physical memory pool size passed to driver"); -+module_param(tc_wordswapon, int, 0); -+MODULE_PARM_DESC(tc_wordswapon, "TC Word Swap Option. default = 0"); -+ -+module_param(min_active_opp, ushort, S_IRUSR | S_IWUSR); -+MODULE_PARM_DESC(min_active_opp, "Minimum ACTIVE VDD1 OPP Level, default = 3"); -+ -+MODULE_AUTHOR("Texas Instruments"); -+MODULE_LICENSE("GPL"); -+ -+static char *driver_name = DRIVER_NAME; -+ -+#ifdef CONFIG_BRIDGE_DEBUG -+static struct GT_Mask driverTrace; -+#endif /* CONFIG_BRIDGE_DEBUG */ -+ -+static struct file_operations bridge_fops = { -+ .open = bridge_open, -+ .release = bridge_release, -+ .unlocked_ioctl = bridge_ioctl, -+ .mmap = bridge_mmap, -+}; -+ -+#ifdef CONFIG_PM -+static u32 timeOut = 1000; -+#ifdef CONFIG_BRIDGE_DVFS -+static struct clk *clk_handle; -+s32 dsp_max_opps = VDD1_OPP5; -+#endif -+ -+/* Maximum Opps that can be requested by IVA*/ -+/*vdd1 rate table*/ -+#ifdef CONFIG_BRIDGE_DVFS -+const struct omap_opp vdd1_rate_table_bridge[] = { -+ {0, 0, 0}, -+ /*OPP1*/ -+ {S125M, VDD1_OPP1, 0}, -+ /*OPP2*/ -+ {S250M, VDD1_OPP2, 0}, -+ /*OPP3*/ -+ {S500M, VDD1_OPP3, 0}, -+ /*OPP4*/ -+ {S550M, VDD1_OPP4, 0}, -+ /*OPP5*/ -+ {S600M, VDD1_OPP5, 0}, -+}; -+#endif -+#endif -+ -+struct dspbridge_platform_data *omap_dspbridge_pdata; -+ -+u32 vdd1_dsp_freq[6][4] = { -+ {0, 0, 0, 0}, -+ /*OPP1*/ -+ {0, 90000, 0, 86000}, -+ /*OPP2*/ -+ {0, 180000, 80000, 170000}, -+ /*OPP3*/ -+ {0, 360000, 160000, 340000}, -+ /*OPP4*/ -+ {0, 396000, 325000, 376000}, -+ /*OPP5*/ -+ {0, 430000, 355000, 430000}, -+}; -+ -+#ifdef CONFIG_BRIDGE_DVFS -+static int dspbridge_post_scale(struct notifier_block *op, unsigned long level, -+ void *ptr) -+{ -+ PWR_PM_PostScale(PRCM_VDD1, level); -+ return 0; -+} -+ -+static struct notifier_block iva_clk_notifier = { -+ .notifier_call = dspbridge_post_scale, -+ NULL, -+}; -+#endif -+ -+static int __devinit omap34xx_bridge_probe(struct platform_device *pdev) -+{ -+ int status; -+ u32 initStatus; -+ u32 temp; -+ dev_t dev = 0 ; -+ int result; -+#ifdef CONFIG_BRIDGE_DVFS -+ int i = 0; -+#endif -+ struct dspbridge_platform_data *pdata = pdev->dev.platform_data; -+ -+ omap_dspbridge_dev = pdev; -+ -+ /* use 2.6 device model */ -+ if (driver_major) { -+ dev = MKDEV(driver_major, driver_minor); -+ result = register_chrdev_region(dev, 1, driver_name); -+ } else { -+ result = alloc_chrdev_region(&dev, driver_minor, 1, -+ driver_name); -+ driver_major = MAJOR(dev); -+ } -+ -+ if (result < 0) { -+ GT_1trace(driverTrace, GT_7CLASS, "bridge_init: " -+ "Can't get Major %d \n", driver_major); -+ return result; -+ } -+ -+ bridge_device = kmalloc(sizeof(struct bridge_dev), GFP_KERNEL); -+ if (!bridge_device) { -+ result = -ENOMEM; -+ unregister_chrdev_region(dev, 1); -+ return result; -+ } -+ memset(bridge_device, 0, sizeof(struct bridge_dev)); -+ cdev_init(&bridge_device->cdev, &bridge_fops); -+ bridge_device->cdev.owner = THIS_MODULE; -+ bridge_device->cdev.ops = &bridge_fops; -+ -+ status = cdev_add(&bridge_device->cdev, dev, 1); -+ -+ if (status) { -+ GT_0trace(driverTrace, GT_7CLASS, -+ "Failed to add the bridge device \n"); -+ return status; -+ } -+ -+ /* udev support */ -+ bridge_class = class_create(THIS_MODULE, "ti_bridge"); -+ -+ if (IS_ERR(bridge_class)) -+ GT_0trace(driverTrace, GT_7CLASS, -+ "Error creating bridge class \n"); -+ -+ device_create(bridge_class, NULL, MKDEV(driver_major, driver_minor), -+ NULL, "DspBridge"); -+ -+ GT_init(); -+ GT_create(&driverTrace, "LD"); -+ -+#ifdef DEBUG -+ if (GT_str) -+ GT_set(GT_str); -+#elif defined(DDSP_DEBUG_PRODUCT) && GT_TRACE -+ GT_set("**=67"); -+#endif -+ -+ GT_0trace(driverTrace, GT_ENTER, "-> driver_init\n"); -+ -+#ifdef CONFIG_PM -+ /* Initialize the wait queue */ -+ if (!status) { -+ bridge_suspend_data.suspended = 0; -+ init_waitqueue_head(&bridge_suspend_data.suspend_wq); -+ } -+#endif -+ -+ SERVICES_Init(); -+ -+ /* Autostart flag. This should be set to true if the DSP image should -+ * be loaded and run during bridge module initialization */ -+ -+ if (base_img) { -+ temp = true; -+ REG_SetValue(NULL, NULL, AUTOSTART, REG_DWORD, (u8 *)&temp, -+ sizeof(temp)); -+ REG_SetValue(NULL, NULL, DEFEXEC, REG_SZ, (u8 *)base_img, -+ strlen(base_img) + 1); -+ } else { -+ temp = false; -+ REG_SetValue(NULL, NULL, AUTOSTART, REG_DWORD, (u8 *)&temp, -+ sizeof(temp)); -+ REG_SetValue(NULL, NULL, DEFEXEC, REG_SZ, (u8 *) "\0", (u32)2); -+ } -+ REG_SetValue(NULL, NULL, NUMPROCS, REG_SZ, (u8 *) num_procs, -+ strlen(num_procs) + 1); -+ -+ if (shm_size >= 0x10000) { /* 64 KB */ -+ initStatus = REG_SetValue(NULL, NULL, SHMSIZE, REG_DWORD, -+ (u8 *)&shm_size, sizeof(shm_size)); -+ } else { -+ initStatus = DSP_EINVALIDARG; -+ status = -1; -+ GT_0trace(driverTrace, GT_7CLASS, -+ "SHM size must be at least 64 KB\n"); -+ } -+ GT_1trace(driverTrace, GT_7CLASS, -+ "requested shm_size = 0x%x\n", shm_size); -+ -+ if (pdata->phys_mempool_base && pdata->phys_mempool_size) { -+ phys_mempool_base = pdata->phys_mempool_base; -+ phys_mempool_size = pdata->phys_mempool_size; -+ } -+ -+ if (phys_mempool_base > 0x0) { -+ initStatus = REG_SetValue(NULL, NULL, PHYSMEMPOOLBASE, -+ REG_DWORD, (u8 *)&phys_mempool_base, -+ sizeof(phys_mempool_base)); -+ } -+ GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_base = 0x%x \n", -+ phys_mempool_base); -+ -+ if (phys_mempool_size > 0x0) { -+ initStatus = REG_SetValue(NULL, NULL, PHYSMEMPOOLSIZE, -+ REG_DWORD, (u8 *)&phys_mempool_size, -+ sizeof(phys_mempool_size)); -+ } -+ GT_1trace(driverTrace, GT_7CLASS, "phys_mempool_size = 0x%x\n", -+ phys_mempool_base); -+ if ((phys_mempool_base > 0x0) && (phys_mempool_size > 0x0)) -+ MEM_ExtPhysPoolInit(phys_mempool_base, phys_mempool_size); -+ if (tc_wordswapon) { -+ GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is enabled\n"); -+ REG_SetValue(NULL, NULL, TCWORDSWAP, REG_DWORD, -+ (u8 *)&tc_wordswapon, sizeof(tc_wordswapon)); -+ } else { -+ GT_0trace(driverTrace, GT_7CLASS, "TC Word Swap is disabled\n"); -+ REG_SetValue(NULL, NULL, TCWORDSWAP, -+ REG_DWORD, (u8 *)&tc_wordswapon, -+ sizeof(tc_wordswapon)); -+ } -+ if (DSP_SUCCEEDED(initStatus)) { -+#ifdef CONFIG_BRIDGE_DVFS -+ for (i = 0; i < 6; i++) -+ pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate; -+ -+ clk_handle = clk_get(NULL, "iva2_ck"); -+ if (!clk_handle) { -+ GT_0trace(driverTrace, GT_7CLASS, -+ "clk_get failed to get iva2_ck \n"); -+ } else { -+ GT_0trace(driverTrace, GT_7CLASS, -+ "clk_get PASS to get iva2_ck \n"); -+ } -+ if (!clk_notifier_register(clk_handle, &iva_clk_notifier)) { -+ GT_0trace(driverTrace, GT_7CLASS, -+ "clk_notifier_register PASS for iva2_ck \n"); -+ } else { -+ GT_0trace(driverTrace, GT_7CLASS, -+ "clk_notifier_register FAIL for iva2_ck \n"); -+ } -+ -+ /* -+ * When Smartreflex is ON, DSP requires at least OPP level 3 -+ * to operate reliably. So boost lower OPP levels to OPP3. -+ */ -+ if (pdata->dsp_set_min_opp) -+ (*pdata->dsp_set_min_opp)(min_active_opp); -+#endif -+ driverContext = DSP_Init(&initStatus); -+ if (DSP_FAILED(initStatus)) { -+ status = -1; -+ GT_0trace(driverTrace, GT_7CLASS, -+ "DSP/BIOS Bridge initialization Failed\n"); -+ } else { -+ GT_0trace(driverTrace, GT_5CLASS, -+ "DSP/BIOS Bridge driver loaded\n"); -+ } -+ } -+ -+ DBC_Assert(status == 0); -+ DBC_Assert(DSP_SUCCEEDED(initStatus)); -+ GT_0trace(driverTrace, GT_ENTER, " <- driver_init\n"); -+ return status; -+} -+ -+static int __devexit omap34xx_bridge_remove(struct platform_device *pdev) -+{ -+ dev_t devno; -+ bool ret; -+ DSP_STATUS dsp_status = DSP_SOK; -+ HANDLE hDrvObject = NULL; -+ struct PROCESS_CONTEXT *pTmp = NULL; -+ struct PROCESS_CONTEXT *pCtxtclosed = NULL; -+ struct PROC_OBJECT *proc_obj_ptr, *temp; -+ -+ GT_0trace(driverTrace, GT_ENTER, "-> driver_exit\n"); -+ -+ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ if (DSP_FAILED(dsp_status)) -+ goto func_cont; -+ -+#ifdef CONFIG_BRIDGE_DVFS -+ if (!clk_notifier_unregister(clk_handle, &iva_clk_notifier)) { -+ GT_0trace(driverTrace, GT_7CLASS, -+ "clk_notifier_unregister PASS for iva2_ck \n"); -+ } else { -+ GT_0trace(driverTrace, GT_7CLASS, -+ "clk_notifier_unregister FAILED for iva2_ck \n"); -+ } -+#endif /* #ifdef CONFIG_BRIDGE_DVFS */ -+ -+ DRV_GetProcCtxtList(&pCtxtclosed, (struct DRV_OBJECT *)hDrvObject); -+ while (pCtxtclosed != NULL) { -+ GT_1trace(driverTrace, GT_5CLASS, "***Cleanup of " -+ "process***%d\n", pCtxtclosed->pid); -+ DRV_RemoveAllResources(pCtxtclosed); -+ list_for_each_entry_safe(proc_obj_ptr, temp, -+ &pCtxtclosed->processor_list, proc_object) { -+ PROC_Detach(proc_obj_ptr, pCtxtclosed); -+ } -+ pTmp = pCtxtclosed->next; -+ DRV_RemoveProcContext((struct DRV_OBJECT *)hDrvObject, -+ pCtxtclosed); -+ pCtxtclosed = pTmp; -+ } -+ -+ if (driverContext) { -+ /* Put the DSP in reset state */ -+ ret = DSP_Deinit(driverContext); -+ driverContext = 0; -+ DBC_Assert(ret == true); -+ } -+ -+ clk_put(clk_handle); -+ clk_handle = NULL; -+ -+func_cont: -+ SERVICES_Exit(); -+ GT_exit(); -+ -+ devno = MKDEV(driver_major, driver_minor); -+ if (bridge_device) { -+ cdev_del(&bridge_device->cdev); -+ kfree(bridge_device); -+ } -+ unregister_chrdev_region(devno, 1); -+ if (bridge_class) { -+ /* remove the device from sysfs */ -+ device_destroy(bridge_class, MKDEV(driver_major, driver_minor)); -+ class_destroy(bridge_class); -+ -+ } -+ return 0; -+} -+ -+ -+#ifdef CONFIG_PM -+static int bridge_suspend(struct platform_device *pdev, pm_message_t state) -+{ -+ u32 status; -+ u32 command = PWR_EMERGENCYDEEPSLEEP; -+ -+ status = PWR_SleepDSP(command, timeOut); -+ if (DSP_FAILED(status)) -+ return -1; -+ -+ bridge_suspend_data.suspended = 1; -+ return 0; -+} -+ -+static int bridge_resume(struct platform_device *pdev) -+{ -+ u32 status; -+ -+ status = PWR_WakeDSP(timeOut); -+ if (DSP_FAILED(status)) -+ return -1; -+ -+ bridge_suspend_data.suspended = 0; -+ wake_up(&bridge_suspend_data.suspend_wq); -+ return 0; -+} -+#else -+#define bridge_suspend NULL -+#define bridge_resume NULL -+#endif -+ -+static struct platform_driver bridge_driver = { -+ .driver = { -+ .name = BRIDGE_NAME, -+ }, -+ .probe = omap34xx_bridge_probe, -+ .remove = __devexit_p(omap34xx_bridge_remove), -+ .suspend = bridge_suspend, -+ .resume = bridge_resume, -+}; -+ -+static int __init bridge_init(void) -+{ -+ return platform_driver_register(&bridge_driver); -+} -+ -+static void __exit bridge_exit(void) -+{ -+ platform_driver_unregister(&bridge_driver); -+} -+ -+/* -+ * This function is called when an application opens handle to the -+ * bridge driver. -+ */ -+static int bridge_open(struct inode *ip, struct file *filp) -+{ -+ int status = 0; -+ DSP_STATUS dsp_status; -+ HANDLE hDrvObject; -+ struct PROCESS_CONTEXT *pr_ctxt = NULL; -+ -+ GT_0trace(driverTrace, GT_ENTER, "-> bridge_open\n"); -+ -+ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ if (DSP_SUCCEEDED(dsp_status)) { -+ /* -+ * Allocate a new process context and insert it into global -+ * process context list. -+ */ -+ DRV_InsertProcContext(hDrvObject, &pr_ctxt); -+ if (pr_ctxt) { -+ DRV_ProcUpdatestate(pr_ctxt, PROC_RES_ALLOCATED); -+ DRV_ProcSetPID(pr_ctxt, current->tgid); -+ } else { -+ status = -ENOMEM; -+ } -+ } else { -+ status = -EIO; -+ } -+ -+ filp->private_data = pr_ctxt; -+ -+ GT_0trace(driverTrace, GT_ENTER, "<- bridge_open\n"); -+ return status; -+} -+ -+/* -+ * This function is called when an application closes handle to the bridge -+ * driver. -+ */ -+static int bridge_release(struct inode *ip, struct file *filp) -+{ -+ int status = 0; -+ DSP_STATUS dsp_status; -+ HANDLE hDrvObject; -+ struct PROCESS_CONTEXT *pr_ctxt; -+ struct PROC_OBJECT *proc_obj_ptr, *temp; -+ -+ GT_0trace(driverTrace, GT_ENTER, "-> bridge_release\n"); -+ -+ if (!filp->private_data) { -+ status = -EIO; -+ } else { -+ pr_ctxt = filp->private_data; -+ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ if (DSP_SUCCEEDED(dsp_status)) { -+ flush_signals(current); -+ DRV_RemoveAllResources(pr_ctxt); -+ list_for_each_entry_safe(proc_obj_ptr, temp, -+ &pr_ctxt->processor_list, -+ proc_object) { -+ PROC_Detach(proc_obj_ptr, pr_ctxt); -+ } -+ DRV_RemoveProcContext((struct DRV_OBJECT *)hDrvObject, -+ pr_ctxt); -+ } else { -+ status = -EIO; -+ } -+ filp->private_data = NULL; -+ } -+ -+ GT_0trace(driverTrace, GT_ENTER, "<- bridge_release\n"); -+ return status; -+} -+ -+/* This function provides IO interface to the bridge driver. */ -+static long bridge_ioctl(struct file *filp, unsigned int code, -+ unsigned long args) -+{ -+ int status; -+ u32 retval = DSP_SOK; -+ union Trapped_Args pBufIn; -+ -+ DBC_Require(filp != NULL); -+#ifdef CONFIG_PM -+ status = omap34xxbridge_suspend_lockout(&bridge_suspend_data, filp); -+ if (status != 0) -+ return status; -+#endif -+ -+ GT_0trace(driverTrace, GT_ENTER, " -> driver_ioctl\n"); -+ -+ /* Deduct one for the CMD_BASE. */ -+ code = (code - 1); -+ -+ status = copy_from_user(&pBufIn, (union Trapped_Args *)args, -+ sizeof(union Trapped_Args)); -+ -+ if (status >= 0) { -+ status = WCD_CallDevIOCtl(code, &pBufIn, &retval, -+ filp->private_data); -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = retval; -+ } else { -+ GT_1trace(driverTrace, GT_7CLASS, -+ "IOCTL Failed, code : 0x%x\n", code); -+ status = -1; -+ } -+ -+ } -+ -+ GT_0trace(driverTrace, GT_ENTER, " <- driver_ioctl\n"); -+ -+ return status; -+} -+ -+/* This function maps kernel space memory to user space memory. */ -+static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+#if GT_TRACE -+ u32 offset = vma->vm_pgoff << PAGE_SHIFT; -+#endif -+ u32 status; -+ -+ DBC_Assert(vma->vm_start < vma->vm_end); -+ -+ vma->vm_flags |= VM_RESERVED | VM_IO; -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ -+ GT_6trace(driverTrace, GT_3CLASS, -+ "vm filp %p offset %lx start %lx end %lx" -+ " page_prot %lx flags %lx\n", filp, offset, vma->vm_start, -+ vma->vm_end, vma->vm_page_prot, vma->vm_flags); -+ -+ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, -+ vma->vm_end - vma->vm_start, vma->vm_page_prot); -+ if (status != 0) -+ status = -EAGAIN; -+ -+ return status; -+} -+ -+#ifndef RES_CLEANUP_DISABLE -+/* To remove all process resources before removing the process from the -+ * process context list*/ -+DSP_STATUS DRV_RemoveAllResources(HANDLE hPCtxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROCESS_CONTEXT *pCtxt = (struct PROCESS_CONTEXT *)hPCtxt; -+ if (pCtxt != NULL) { -+ DRV_RemoveAllSTRMResElements(pCtxt); -+ DRV_RemoveAllNodeResElements(pCtxt); -+ DRV_RemoveAllDMMResElements(pCtxt); -+ DRV_ProcUpdatestate(pCtxt, PROC_RES_FREED); -+ } -+ return status; -+} -+#endif -+ -+/* Bridge driver initialization and de-initialization functions */ -+module_init(bridge_init); -+module_exit(bridge_exit); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/drv_interface.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/drv_interface.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/drv_interface.h 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,40 @@ -+/* -+ * drv_interface.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== drv_interface.h ======== -+ * -+ *! Revision History -+ *! ================ -+ *! 24-Mar-2003 vp Added hooks for Power Management Test -+ *! 18-Feb-2003 vp Code review updates -+ *! 18-Oct-2002 sb Created initial version -+ -+ */ -+ -+#ifndef _DRV_INTERFACE_H_ -+#define _DRV_INTERFACE_H_ -+ -+/* Prototypes for all functions in this bridge */ -+static int __init bridge_init(void); /* Initialize bridge */ -+static void __exit bridge_exit(void); /* Opposite of initialize */ -+static int bridge_open(struct inode *, struct file *); /* Open */ -+static int bridge_release(struct inode *, struct file *); /* Release */ -+static long bridge_ioctl(struct file *, unsigned int, -+ unsigned long); -+static int bridge_mmap(struct file *filp, struct vm_area_struct *vma); -+#endif /* ifndef _DRV_INTERFACE_H_ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dspdrv.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/dspdrv.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/dspdrv.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/dspdrv.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,250 @@ -+/* -+ * dspdrv.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== dspdrv.c ======== -+ * Description: -+ * Interface to allocate and free bridge resources. -+ * -+ *! Revision History -+ *! ================ -+ *! 12-Apr-2004 hp: Compile IVA only for 24xx. -+ *! 09-Feb-2004 vp: Updated to support IVA. -+ *! 10-Feb-2003 vp: Code review updates. -+ *! 18-oct-2002 vp: Ported to the Linux platform. -+ *! 03-Mar-2002 rr: DSP_Deinit bug fixed (gets the Mgrhandle from registry -+ *! before calling MGR_Destroy. -+ *! 11-Jul-2001 jeh Moved MGR_Create() from DSP_Init() to DEV_StartDevice(). -+ *! 02-Apr-2001 rr: WCD_InitComplete2 return value is not checked thus -+ *! sllowing the class driver to load irrespective of -+ *! the image load. -+ *! 30-Oct-2000 kc: Made changes w.r.t. usage of REG_SetValue. -+ *! 05-Oct-2000 rr: WCD_InitComplete2 return value checked for RM. -+ *! Failure in WCD_InitComplete2 will cause the -+ *! DSP_Init to fail. -+ *! 12-Aug-2000 kc: Changed REG_EnumValue to REG_EnumKey. -+ *! 07-Aug-2000 rr: MGR_Create does the job of loading the DCD Dll. -+ *! 26-Jul-2000 rr: Driver Object holds the DevNodeStrings for each -+ *! DevObjects. Static variables removed. Returns -+ *! the Driver Object in DSP_Init. -+ *! 17-Jul-2000 rr: Driver Object is created in DSP_Init and that holds -+ *! the list of Device objects. -+ *! 07-Jul-2000 rr: RM implementaion started. -+ *! 24-May-2000 ag: Cleaned up debug msgs. -+ *! 02-May-2000 rr: DSP_Open returns GetCallerProcess as dwOpenContext. -+ *! 03-Feb-2000 rr: GT Changes. -+ *! 28-Jan-2000 rr: Code Cleaned up.Type void changed to void. -+ *! DSP_Deinit checks return values.dwCode in -+ *! DSP_IO_CONTROL is decoded(not hard coded) -+ *! 27-Jan-2000 rr: REG_EnumValue Used .EnumerateKey fxn removed. -+ *! 13-Jan-2000 rr: CFG_GetPrivateDword renamed to CFG_GetDevObject. -+ *! 29-Dec-1999 rr: Code Cleaned up -+ *! 09-Dec-1999 rr: EnumerateKey changed for retail build. -+ *! 06-Dec-1999 rr: ArrayofInstalledNode, index and ArrayofInstalledDev -+ *! is Global.DevObject stores this pointer as hDevNode. -+ *! 02-Dec-1999 rr: DBG_SetGT and RetailMSG conditionally included. -+ *! Comments changed.Deinit handled.Code cleaned up. -+ *! DSP_IOControl, Close, Deinit returns bool values. -+ *! Calls WCD_InitComplete2 for Board AutoStart. -+ *! 29-Nov-1999 rr: DSP_IOControl returns the result through pBufOut. -+ *! Global Arrays keeps track of installed devices. -+ *! 19-Nov-1999 rr: DSP_Init handles multiple drivers. -+ *! 12-Nov-1999 rr: GetDriverKey and EnumerateKey functions added. -+ *! for multiple mini driver support.PCCARD flag -+ *! checking to include PCMCIA related stuff. -+ *! 25-Oct-1999 rr: GT_Init is called within the Process Attach. -+ *! return value initalized to S_OK upfront in the -+ *! Process Attach. -+ *! 15-Oct-1999 rr: DSP_DeInit handles the return values -+ *! 05-Oct-1999 rr: All the PCMCIA related functions are now in PCCARD.c -+ *! DRV_Request Resources is used instead of the -+ *! RegisterMiniDriver as it sounds close to what we are doing. -+ *! 24-Sep-1999 rr: DRV_RegisterMiniDriver is being called from here. Only -+ *! neccessaryPCMCIA fxns are here. Soon they will move out -+ *! either to a seperate file for bus specific inits. -+ *! 10-Sep-1999 rr: GT Enabled. Considerably changed the driver structure as -+ *! - This is the Class driver. After successfully initialized -+ *! the Class driver will attempt to load the Mini driver. -+ *! - Need to seperate the PCMCIA stuff based on bus type. -+ *! - Changed the name of the file to wcdce.c -+ *! - Made the Media Handle as Global again -+ *! -+ *! 19-Aug-1999 rr: Removed the Global hbhMediaHandle. Included the MemTest. -+ *! Modified the DSP_Init, now three windows are opened. -+ *! Split the driver into PDD so that hardware dependent -+ *! functions will reside in PDD. -+ *! 16-Jul-1999 ag Adapted from rkw's CAC Bullet card driver. -+ *! -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Globals */ -+struct GT_Mask curTrace; -+ -+/* -+ * ======== DSP_Init ======== -+ * Allocates bridge resources. Loads a base image onto DSP, if specified. -+ */ -+u32 DSP_Init(OUT u32 *initStatus) -+{ -+ char devNode[MAXREGPATHLENGTH] = "TIOMAP1510"; -+ DSP_STATUS status = DSP_EFAIL; -+ struct DRV_OBJECT *drvObject = NULL; -+ u32 index = 0; -+ u32 deviceNode; -+ u32 deviceNodeString; -+ -+ GT_create(&curTrace, "DD"); -+ -+ GT_0trace(curTrace, GT_ENTER, "Entering DSP_Init \r\n"); -+ -+ if (DSP_FAILED(WCD_Init())) { -+ GT_0trace(curTrace, GT_7CLASS, "DSP_Init Failed \n"); -+ goto func_cont; -+ } /* End WCD_Exit */ -+ if (DSP_FAILED(DRV_Create(&drvObject))) { -+ GT_0trace(curTrace, GT_7CLASS, "DSP_Init:DRV_Create Failed \n"); -+ WCD_Exit(); -+ goto func_cont; -+ } /* End DRV_Create */ -+ GT_0trace(curTrace, GT_5CLASS, "DSP_Init:DRV Created \r\n"); -+ -+ /* Request Resources */ -+ if (DSP_SUCCEEDED(DRV_RequestResources((u32)&devNode, -+ &deviceNodeString))) { -+ /* Attempt to Start the Device */ -+ if (DSP_SUCCEEDED(DEV_StartDevice( -+ (struct CFG_DEVNODE *)deviceNodeString))) { -+ /* Retreive the DevObject from the Registry */ -+ GT_2trace(curTrace, GT_1CLASS, -+ "DSP_Init Succeeded for Device1:" -+ "%d: value: %x\n", index, deviceNodeString); -+ status = DSP_SOK; -+ } else { -+ GT_0trace(curTrace, GT_7CLASS, -+ "DSP_Init:DEV_StartDevice Failed\n"); -+ (void)DRV_ReleaseResources -+ ((u32) deviceNodeString, drvObject); -+ status = DSP_EFAIL; -+ } -+ } else { -+ GT_0trace(curTrace, GT_7CLASS, -+ "DSP_Init:DRV_RequestResources Failed \r\n"); -+ status = DSP_EFAIL; -+ } /* DRV_RequestResources */ -+ index++; -+ -+ /* Unwind whatever was loaded */ -+ if (DSP_FAILED(status)) { -+ /* irrespective of the status of DEV_RemoveDevice we conitinue -+ * unloading. Get the Driver Object iterate through and remove. -+ * Reset the status to E_FAIL to avoid going through -+ * WCD_InitComplete2. */ -+ status = DSP_EFAIL; -+ for (deviceNode = DRV_GetFirstDevExtension(); deviceNode != 0; -+ deviceNode = DRV_GetNextDevExtension(deviceNode)) { -+ (void)DEV_RemoveDevice -+ ((struct CFG_DEVNODE *)deviceNode); -+ (void)DRV_ReleaseResources((u32)deviceNode, -+ drvObject); -+ } -+ /* Remove the Driver Object */ -+ (void)DRV_Destroy(drvObject); -+ drvObject = NULL; -+ WCD_Exit(); -+ GT_0trace(curTrace, GT_7CLASS, -+ "DSP_Init:Logical device Failed to Load\n"); -+ } /* Unwinding the loaded drivers */ -+func_cont: -+ /* Attempt to Start the Board */ -+ if (DSP_SUCCEEDED(status)) { -+ /* BRD_AutoStart could fail if the dsp execuetable is not the -+ * correct one. We should not propagate that error -+ * into the device loader. */ -+ (void)WCD_InitComplete2(); -+ GT_0trace(curTrace, GT_1CLASS, "DSP_Init Succeeded\n"); -+ } else { -+ GT_0trace(curTrace, GT_7CLASS, "DSP_Init Failed\n"); -+ } /* End WCD_InitComplete2 */ -+ DBC_Ensure((DSP_SUCCEEDED(status) && drvObject != NULL) || -+ (DSP_FAILED(status) && drvObject == NULL)); -+ *initStatus = status; -+ /* Return the Driver Object */ -+ return (u32)drvObject; -+} -+ -+/* -+ * ======== DSP_Deinit ======== -+ * Frees the resources allocated for bridge. -+ */ -+bool DSP_Deinit(u32 deviceContext) -+{ -+ bool retVal = true; -+ u32 deviceNode; -+ struct MGR_OBJECT *mgrObject = NULL; -+ -+ GT_0trace(curTrace, GT_ENTER, "Entering DSP_Deinit \r\n"); -+ -+ while ((deviceNode = DRV_GetFirstDevExtension()) != 0) { -+ (void)DEV_RemoveDevice((struct CFG_DEVNODE *)deviceNode); -+ -+ (void)DRV_ReleaseResources((u32)deviceNode, -+ (struct DRV_OBJECT *)deviceContext); -+ } -+ -+ (void) DRV_Destroy((struct DRV_OBJECT *) deviceContext); -+ -+ /* Get the Manager Object from Registry -+ * MGR Destroy will unload the DCD dll */ -+ if (DSP_SUCCEEDED(CFG_GetObject((u32 *)&mgrObject, REG_MGR_OBJECT))) -+ (void)MGR_Destroy(mgrObject); -+ -+ WCD_Exit(); -+ -+ return retVal; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/mgr.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/mgr.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/mgr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/mgr.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,491 @@ -+/* -+ * mgr.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== mgr.c ======== -+ * Description: -+ * Implementation of Manager interface to the device object at the -+ * driver level. This queries the NDB data base and retrieves the -+ * data about Node and Processor. -+ * -+ * -+ *! Revision History: -+ *! ================ -+ *! 12-Feb-2003 vp: Code review updates. -+ *! 18-Oct-2002 vp: Ported to Linux platform -+ *! 01-Aug-2001 ag: Added extended info for DSP-MMU setup support. -+ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. -+ *! 22-Nov-2000 kc: Added MGR_GetPerfData. -+ *! 03-Nov-2000 rr: Updated after code review. -+ *! 25-Sep-2000 rr: Updated to Version 0.9 -+ *! 10-Aug-2000 rr: dwSignature is not specifically inserted in MGR Obj -+ *! as it is taken care by MEM_AllocObject. stdwin.h added -+ *! for retail build to succeed. -+ *! 07-Aug-2000 rr: MGR_Create does the job of Loading DCD Dll. -+ *! 26-Jul-2000 rr: MGR_Destroy releases the hNDBDll. -+ *! 20-Jun-2000 rr: Created. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define ZLDLLNAME "" -+#define SIGNATURE 0x5f52474d /* "MGR_" (in reverse) */ -+ -+struct MGR_OBJECT { -+ u32 dwSignature; -+ struct DCD_MANAGER *hDcdMgr; /* Proc/Node data manager */ -+}; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask MGR_DebugMask = { NULL, NULL }; -+#endif -+ -+static u32 cRefs; -+ -+/* -+ * ========= MGR_Create ========= -+ * Purpose: -+ * MGR Object gets created only once during driver Loading. -+ */ -+DSP_STATUS MGR_Create(OUT struct MGR_OBJECT **phMgrObject, -+ struct CFG_DEVNODE *hDevNode) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct MGR_OBJECT *pMgrObject = NULL; -+ -+ DBC_Require(phMgrObject != NULL); -+ DBC_Require(cRefs > 0); -+ GT_1trace(MGR_DebugMask, GT_ENTER, -+ "Entering MGR_Create phMgrObject 0x%x\n ", -+ phMgrObject); -+ MEM_AllocObject(pMgrObject, struct MGR_OBJECT, SIGNATURE); -+ if (pMgrObject) { -+ if (DSP_SUCCEEDED(DCD_CreateManager(ZLDLLNAME, -+ &pMgrObject->hDcdMgr))) { -+ /* If succeeded store the handle in the MGR Object */ -+ if (DSP_SUCCEEDED(CFG_SetObject((u32)pMgrObject, -+ REG_MGR_OBJECT))) { -+ *phMgrObject = pMgrObject; -+ GT_0trace(MGR_DebugMask, GT_1CLASS, -+ "MGR_Create:MGR Created\r\n"); -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "MGR_Create:CFG_SetObject " -+ "Failed\r\n"); -+ DCD_DestroyManager(pMgrObject->hDcdMgr); -+ MEM_FreeObject(pMgrObject); -+ } -+ } else { -+ /* failed to Create DCD Manager */ -+ status = DSP_EFAIL; -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "MGR_Create:DCD_ManagerCreate Failed\r\n"); -+ MEM_FreeObject(pMgrObject); -+ } -+ } else { -+ status = DSP_EMEMORY; -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "MGR_Create DSP_FAILED to allocate memory \n"); -+ } -+ GT_2trace(MGR_DebugMask, GT_ENTER, -+ "Exiting MGR_Create: phMgrObject: 0x%x\t" -+ "status: 0x%x\n", phMgrObject, status); -+ DBC_Ensure(DSP_FAILED(status) || -+ MEM_IsValidHandle(pMgrObject, SIGNATURE)); -+ return status; -+} -+ -+/* -+ * ========= MGR_Destroy ========= -+ * This function is invoked during bridge driver unloading.Frees MGR object. -+ */ -+DSP_STATUS MGR_Destroy(struct MGR_OBJECT *hMgrObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct MGR_OBJECT *pMgrObject = (struct MGR_OBJECT *)hMgrObject; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hMgrObject, SIGNATURE)); -+ -+ GT_1trace(MGR_DebugMask, GT_ENTER, -+ "Entering MGR_Destroy hMgrObject 0x%x\n", hMgrObject); -+ /* Free resources */ -+ if (hMgrObject->hDcdMgr) -+ DCD_DestroyManager(hMgrObject->hDcdMgr); -+ -+ MEM_FreeObject(pMgrObject); -+ /* Update the Registry with NULL for MGR Object */ -+ (void)CFG_SetObject(0, REG_MGR_OBJECT); -+ -+ GT_2trace(MGR_DebugMask, GT_ENTER, -+ "Exiting MGR_Destroy: hMgrObject: 0x%x\t" -+ "status: 0x%x\n", hMgrObject, status); -+ -+ DBC_Ensure(DSP_FAILED(status) || -+ !MEM_IsValidHandle(hMgrObject, SIGNATURE)); -+ -+ return status; -+} -+ -+/* -+ * ======== MGR_EnumNodeInfo ======== -+ * Enumerate and get configuration information about nodes configured -+ * in the node database. -+ */ -+DSP_STATUS MGR_EnumNodeInfo(u32 uNode, OUT struct DSP_NDBPROPS *pNDBProps, -+ u32 uNDBPropsSize, OUT u32 *puNumNodes) -+{ -+ DSP_STATUS status = DSP_SOK; -+ DSP_STATUS status1 = DSP_SOK; -+ struct DSP_UUID Uuid, uTempUuid; -+ u32 uTempIndex = 0; -+ u32 uNodeIndex = 0; -+ struct DCD_GENERICOBJ GenObj; -+ struct MGR_OBJECT *pMgrObject = NULL; -+ -+ DBC_Require(pNDBProps != NULL); -+ DBC_Require(puNumNodes != NULL); -+ DBC_Require(uNDBPropsSize >= sizeof(struct DSP_NDBPROPS)); -+ DBC_Require(cRefs > 0); -+ -+ GT_4trace(MGR_DebugMask, GT_ENTER, "Entered Manager_EnumNodeInfo, " -+ "args:\n\t uNode: 0x%x\n\tpNDBProps: 0x%x\n\tuNDBPropsSize:" -+ "0x%x\tpuNumNodes: 0x%x\n", uNode, pNDBProps, -+ uNDBPropsSize, puNumNodes); -+ *puNumNodes = 0; -+ /* Get The Manager Object from the Registry */ -+ if (DSP_FAILED(CFG_GetObject((u32 *)&pMgrObject, -+ REG_MGR_OBJECT))) { -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "Manager_EnumNodeInfo:Failed To Get" -+ " MGR Object from Registry\r\n"); -+ goto func_cont; -+ } -+ DBC_Assert(MEM_IsValidHandle(pMgrObject, SIGNATURE)); -+ /* Forever loop till we hit failed or no more items in the -+ * Enumeration. We will exit the loop other than DSP_SOK; */ -+ while (status == DSP_SOK) { -+ status = DCD_EnumerateObject(uTempIndex++, DSP_DCDNODETYPE, -+ &uTempUuid); -+ if (status == DSP_SOK) { -+ uNodeIndex++; -+ if (uNode == (uNodeIndex - 1)) -+ Uuid = uTempUuid; -+ -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ if (uNode > (uNodeIndex - 1)) { -+ status = DSP_EINVALIDARG; -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "Manager_EnumNodeInfo: uNode" -+ " is Invalid \r\n"); -+ } else { -+ status1 = DCD_GetObjectDef(pMgrObject->hDcdMgr, -+ (struct DSP_UUID *)&Uuid, -+ DSP_DCDNODETYPE, &GenObj); -+ if (DSP_SUCCEEDED(status1)) { -+ /* Get the Obj def */ -+ *pNDBProps = GenObj.objData.nodeObj.ndbProps; -+ *puNumNodes = uNodeIndex; -+ status = DSP_SOK; -+ } else { -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "Manager_EnumNodeInfo: " -+ "Failed to Get Node Info \r\n"); -+ status = DSP_EFAIL; -+ } -+ } -+ } else { -+ /* This could be changed during enum, EFAIL ... */ -+ GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumNodeInfo: " -+ "Enumeration failure\r\n"); -+ status = DSP_EFAIL; -+ } -+func_cont: -+ GT_4trace(MGR_DebugMask, GT_ENTER, -+ "Exiting Manager_EnumNodeInfo, args:\n\t" -+ "uNode: 0x%x\n\tpNDBProps: 0x%x\n\tuNDBPropsSize:" -+ " 0x%x\tuNumNodes: 0x%x\n", uNode, pNDBProps, -+ uNDBPropsSize, *puNumNodes); -+ DBC_Ensure((DSP_SUCCEEDED(status) && *puNumNodes > 0) || -+ (DSP_FAILED(status) && *puNumNodes == 0)); -+ -+ return status; -+} -+ -+/* -+ * ======== MGR_EnumProcessorInfo ======== -+ * Enumerate and get configuration information about available -+ * DSP processors. -+ */ -+DSP_STATUS MGR_EnumProcessorInfo(u32 uProcessor, -+ OUT struct DSP_PROCESSORINFO *pProcessorInfo, -+ u32 uProcessorInfoSize, OUT u32 *puNumProcs) -+{ -+ DSP_STATUS status = DSP_SOK; -+ DSP_STATUS status1 = DSP_SOK; -+ DSP_STATUS status2 = DSP_SOK; -+ struct DSP_UUID uTempUuid; -+ u32 uTempIndex = 0; -+ u32 uProcIndex = 0; -+ struct DCD_GENERICOBJ GenObj; -+ struct MGR_OBJECT *pMgrObject = NULL; -+ struct MGR_PROCESSOREXTINFO *pExtInfo; -+ struct DEV_OBJECT *hDevObject; -+ struct DRV_OBJECT *hDrvObject; -+ s32 devType; -+ struct CFG_DEVNODE *devNode; -+ struct CFG_DSPRES chipResources; -+ bool procDetect = false; -+ -+ DBC_Require(pProcessorInfo != NULL); -+ DBC_Require(puNumProcs != NULL); -+ DBC_Require(uProcessorInfoSize >= sizeof(struct DSP_PROCESSORINFO)); -+ DBC_Require(cRefs > 0); -+ -+ GT_4trace(MGR_DebugMask, GT_ENTER, -+ "Entered Manager_EnumProcessorInfo, " -+ "args:\n\tuProcessor: 0x%x\n\tpProcessorInfo: 0x%x\n\t" -+ "uProcessorInfoSize: 0x%x\tpuNumProcs: 0x%x\n", uProcessor, -+ pProcessorInfo, uProcessorInfoSize, puNumProcs); -+ *puNumProcs = 0; -+ status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ if (DSP_SUCCEEDED(status)) { -+ status = DRV_GetDevObject(uProcessor, hDrvObject, &hDevObject); -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetDevType(hDevObject, (u32 *) &devType); -+ status = DEV_GetDevNode(hDevObject, &devNode); -+ if (devType == DSP_UNIT) { -+ status = CFG_GetDSPResources(devNode, -+ &chipResources); -+ } else { -+ status = DSP_EFAIL; -+ GT_1trace(MGR_DebugMask, GT_7CLASS, -+ "Unsupported dev type gotten" -+ "from device object %d\n", devType); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pProcessorInfo->uProcessorType = -+ chipResources.uChipType; -+ } -+ } -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* Get The Manager Object from the Registry */ -+ if (DSP_FAILED(CFG_GetObject((u32 *)&pMgrObject, -+ REG_MGR_OBJECT))) { -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "Manager_EnumProcessorInfo: " -+ "Failed To Get MGR Object from Registry\r\n"); -+ goto func_end; -+ } -+ DBC_Assert(MEM_IsValidHandle(pMgrObject, SIGNATURE)); -+ /* Forever loop till we hit no more items in the -+ * Enumeration. We will exit the loop other than DSP_SOK; */ -+ while (status1 == DSP_SOK) { -+ status1 = DCD_EnumerateObject(uTempIndex++, -+ DSP_DCDPROCESSORTYPE, -+ &uTempUuid); -+ if (status1 != DSP_SOK) -+ break; -+ -+ uProcIndex++; -+ /* Get the Object properties to find the Device/Processor -+ * Type */ -+ if (procDetect != false) -+ continue; -+ -+ status2 = DCD_GetObjectDef(pMgrObject->hDcdMgr, -+ (struct DSP_UUID *)&uTempUuid, -+ DSP_DCDPROCESSORTYPE, -+ &GenObj); -+ if (DSP_SUCCEEDED(status2)) { -+ /* Get the Obj def */ -+ if (uProcessorInfoSize < -+ sizeof(struct MGR_PROCESSOREXTINFO)) { -+ *pProcessorInfo = GenObj.objData.procObj; -+ } else { -+ /* extended info */ -+ pExtInfo = (struct MGR_PROCESSOREXTINFO *) -+ pProcessorInfo; -+ *pExtInfo = GenObj.objData.extProcObj; -+ } -+ GT_1trace(MGR_DebugMask, GT_7CLASS, -+ "Manager_EnumProcessorInfo: Got" -+ " Proctype from DCD %x \r\n", -+ pProcessorInfo->uProcessorType); -+ /* See if we got the needed processor */ -+ if (devType == DSP_UNIT) { -+ if (pProcessorInfo->uProcessorType == -+ DSPPROCTYPE_C64) -+ procDetect = true; -+ } else if (devType == IVA_UNIT) { -+ if (pProcessorInfo->uProcessorType == -+ IVAPROCTYPE_ARM7) -+ procDetect = true; -+ } -+ /* User applciatiuons aonly check for chip type, so -+ * this clumsy overwrite */ -+ pProcessorInfo->uProcessorType = -+ chipResources.uChipType; -+ } else { -+ GT_1trace(MGR_DebugMask, GT_7CLASS, -+ "Manager_EnumProcessorInfo: " -+ "Failed to Get DCD Processor Info %x \r\n", -+ status2); -+ status = DSP_EFAIL; -+ } -+ } -+ *puNumProcs = uProcIndex; -+ if (procDetect == false) { -+ GT_0trace(MGR_DebugMask, GT_7CLASS, -+ "Manager_EnumProcessorInfo: Failed" -+ " to get Proc info from DCD , so use CFG registry\n"); -+ pProcessorInfo->uProcessorType = chipResources.uChipType; -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== MGR_Exit ======== -+ * Decrement reference count, and free resources when reference count is -+ * 0. -+ */ -+void MGR_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ cRefs--; -+ if (cRefs == 0) -+ DCD_Exit(); -+ -+ GT_1trace(MGR_DebugMask, GT_5CLASS, -+ "Entered MGR_Exit, ref count: 0x%x\n", cRefs); -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== MGR_GetDCDHandle ======== -+ * Retrieves the MGR handle. Accessor Function. -+ */ -+DSP_STATUS MGR_GetDCDHandle(struct MGR_OBJECT *hMGRHandle, -+ OUT u32 *phDCDHandle) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ struct MGR_OBJECT *pMgrObject = (struct MGR_OBJECT *)hMGRHandle; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDCDHandle != NULL); -+ -+ *phDCDHandle = (u32)NULL; -+ if (MEM_IsValidHandle(pMgrObject, SIGNATURE)) { -+ *phDCDHandle = (u32) pMgrObject->hDcdMgr; -+ status = DSP_SOK; -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && *phDCDHandle != (u32)NULL) || -+ (DSP_FAILED(status) && *phDCDHandle == (u32)NULL)); -+ -+ return status; -+} -+ -+/* -+ * ======== MGR_Init ======== -+ * Initialize MGR's private state, keeping a reference count on each call. -+ */ -+bool MGR_Init(void) -+{ -+ bool fRetval = true; -+ bool fInitDCD = false; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ -+ /* Set the Trace mask */ -+ DBC_Assert(!MGR_DebugMask.flags); -+ -+ GT_create(&MGR_DebugMask, "MG"); /* "MG" for Manager */ -+ fInitDCD = DCD_Init(); /* DCD Module */ -+ -+ if (!fInitDCD) { -+ fRetval = false; -+ GT_0trace(MGR_DebugMask, GT_6CLASS, -+ "MGR_Init failed\n"); -+ } -+ } -+ -+ if (fRetval) -+ cRefs++; -+ -+ -+ GT_1trace(MGR_DebugMask, GT_5CLASS, -+ "Entered MGR_Init, ref count: 0x%x\n", cRefs); -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ return fRetval; -+} -+ -+/* -+ * ======== MGR_WaitForBridgeEvents ======== -+ * Block on any Bridge event(s) -+ */ -+DSP_STATUS MGR_WaitForBridgeEvents(struct DSP_NOTIFICATION **aNotifications, -+ u32 uCount, OUT u32 *puIndex, u32 uTimeout) -+{ -+ DSP_STATUS status; -+ struct SYNC_OBJECT *hSyncEvents[MAX_EVENTS]; -+ u32 i; -+ -+ DBC_Require(uCount < MAX_EVENTS); -+ -+ for (i = 0; i < uCount; i++) -+ hSyncEvents[i] = aNotifications[i]->handle; -+ -+ status = SYNC_WaitOnMultipleEvents(hSyncEvents, uCount, uTimeout, -+ puIndex); -+ -+ return status; -+ -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/nldr.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/nldr.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/nldr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/nldr.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,1967 @@ -+/* -+ * nldr.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== nldr.c ======== -+ * Description: -+ * DSP/BIOS Bridge dynamic + overlay Node loader. -+ * -+ * Public Functions: -+ * NLDR_Allocate -+ * NLDR_Create -+ * NLDR_Delete -+ * NLDR_Exit -+ * NLDR_Free -+ * NLDR_GetFxnAddr -+ * NLDR_Init -+ * NLDR_Load -+ * NLDR_Unload -+ * -+ * Notes: -+ * -+ *! Revision History -+ *! ================ -+ *! 07-Apr-2003 map Removed references to dead DLDR module -+ *! 23-Jan-2003 map Updated RemoteAlloc to support memory granularity -+ *! 20-Jan-2003 map Updated to maintain persistent dependent libraries -+ *! 15-Jan-2003 map Adapted for use with multiple dynamic phase libraries -+ *! 19-Dec-2002 map Fixed overlay bug in AddOvlySect for overlay -+ *! sections > 1024 bytes. -+ *! 13-Dec-2002 map Fixed NLDR_GetFxnAddr bug by searching dependent -+ *! libs for symbols -+ *! 27-Sep-2002 map Added RemoteFree to convert size to words for -+ *! correct deallocation -+ *! 16-Sep-2002 map Code Review Cleanup(from dldr.c) -+ *! 29-Aug-2002 map Adjusted for ARM-side overlay copy -+ *! 05-Aug-2002 jeh Created. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+ -+#include -+#include -+#ifdef DEBUG -+#include -+#endif -+ -+/* OS adaptation layer */ -+#include -+#include -+ -+/* Platform manager */ -+#include -+#include -+ -+/* Resource manager */ -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define NLDR_SIGNATURE 0x52444c4e /* "RDLN" */ -+#define NLDR_NODESIGNATURE 0x4e444c4e /* "NDLN" */ -+ -+/* Name of section containing dynamic load mem */ -+#define DYNMEMSECT ".dspbridge_mem" -+ -+/* Name of section containing dependent library information */ -+#define DEPLIBSECT ".dspbridge_deplibs" -+ -+/* Max depth of recursion for loading node's dependent libraries */ -+#define MAXDEPTH 5 -+ -+/* Max number of persistent libraries kept by a node */ -+#define MAXLIBS 5 -+ -+/* -+ * Defines for extracting packed dynamic load memory requirements from two -+ * masks. -+ * These defines must match node.cdb and dynm.cdb -+ * Format of data/code mask is: -+ * uuuuuuuu|fueeeeee|fudddddd|fucccccc| -+ * where -+ * u = unused -+ * cccccc = prefered/required dynamic mem segid for create phase data/code -+ * dddddd = prefered/required dynamic mem segid for delete phase data/code -+ * eeeeee = prefered/req. dynamic mem segid for execute phase data/code -+ * f = flag indicating if memory is preferred or required: -+ * f = 1 if required, f = 0 if preferred. -+ * -+ * The 6 bits of the segid are interpreted as follows: -+ * -+ * If the 6th bit (bit 5) is not set, then this specifies a memory segment -+ * between 0 and 31 (a maximum of 32 dynamic loading memory segments). -+ * If the 6th bit (bit 5) is set, segid has the following interpretation: -+ * segid = 32 - Any internal memory segment can be used. -+ * segid = 33 - Any external memory segment can be used. -+ * segid = 63 - Any memory segment can be used (in this case the -+ * required/preferred flag is irrelevant). -+ * -+ */ -+/* Maximum allowed dynamic loading memory segments */ -+#define MAXMEMSEGS 32 -+ -+#define MAXSEGID 3 /* Largest possible (real) segid */ -+#define MEMINTERNALID 32 /* Segid meaning use internal mem */ -+#define MEMEXTERNALID 33 /* Segid meaning use external mem */ -+#define NULLID 63 /* Segid meaning no memory req/pref */ -+#define FLAGBIT 7 /* 7th bit is pref./req. flag */ -+#define SEGMASK 0x3f /* Bits 0 - 5 */ -+ -+#define CREATEBIT 0 /* Create segid starts at bit 0 */ -+#define DELETEBIT 8 /* Delete segid starts at bit 8 */ -+#define EXECUTEBIT 16 /* Execute segid starts at bit 16 */ -+ -+/* -+ * Masks that define memory type. Must match defines in dynm.cdb. -+ */ -+#define DYNM_CODE 0x2 -+#define DYNM_DATA 0x4 -+#define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA) -+#define DYNM_INTERNAL 0x8 -+#define DYNM_EXTERNAL 0x10 -+ -+/* -+ * Defines for packing memory requirement/preference flags for code and -+ * data of each of the node's phases into one mask. -+ * The bit is set if the segid is required for loading code/data of the -+ * given phase. The bit is not set, if the segid is preferred only. -+ * -+ * These defines are also used as indeces into a segid array for the node. -+ * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the -+ * create phase data is required or preferred to be loaded into. -+ */ -+#define CREATEDATAFLAGBIT 0 -+#define CREATECODEFLAGBIT 1 -+#define EXECUTEDATAFLAGBIT 2 -+#define EXECUTECODEFLAGBIT 3 -+#define DELETEDATAFLAGBIT 4 -+#define DELETECODEFLAGBIT 5 -+#define MAXFLAGS 6 -+ -+#define IsInternal(hNldr, segid) (((segid) <= MAXSEGID && \ -+ hNldr->segTable[(segid)] & DYNM_INTERNAL) || \ -+ (segid) == MEMINTERNALID) -+ -+#define IsExternal(hNldr, segid) (((segid) <= MAXSEGID && \ -+ hNldr->segTable[(segid)] & DYNM_EXTERNAL) || \ -+ (segid) == MEMEXTERNALID) -+ -+#define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \ -+ (((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF)) -+ -+#define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF)) -+ -+ /* -+ * These names may be embedded in overlay sections to identify which -+ * node phase the section should be overlayed. -+ */ -+#define PCREATE "create" -+#define PDELETE "delete" -+#define PEXECUTE "execute" -+ -+#define IsEqualUUID(uuid1, uuid2) (\ -+ ((uuid1).ulData1 == (uuid2).ulData1) && \ -+ ((uuid1).usData2 == (uuid2).usData2) && \ -+ ((uuid1).usData3 == (uuid2).usData3) && \ -+ ((uuid1).ucData4 == (uuid2).ucData4) && \ -+ ((uuid1).ucData5 == (uuid2).ucData5) && \ -+ (strncmp((void *)(uuid1).ucData6, (void *)(uuid2).ucData6, 6)) == 0) -+ -+ /* -+ * ======== MemInfo ======== -+ * Format of dynamic loading memory segment info in coff file. -+ * Must match dynm.h55. -+ */ -+struct MemInfo { -+ u32 segid; /* Dynamic loading memory segment number */ -+ u32 base; -+ u32 len; -+ u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */ -+}; -+ -+/* -+ * ======== LibNode ======== -+ * For maintaining a tree of library dependencies. -+ */ -+struct LibNode { -+ struct DBLL_LibraryObj *lib; /* The library */ -+ u16 nDepLibs; /* Number of dependent libraries */ -+ struct LibNode *pDepLibs; /* Dependent libraries of lib */ -+}; -+ -+/* -+ * ======== OvlySect ======== -+ * Information needed to overlay a section. -+ */ -+struct OvlySect { -+ struct OvlySect *pNextSect; -+ u32 loadAddr; /* Load address of section */ -+ u32 runAddr; /* Run address of section */ -+ u32 size; /* Size of section */ -+ u16 page; /* DBL_CODE, DBL_DATA */ -+}; -+ -+/* -+ * ======== OvlyNode ======== -+ * For maintaining a list of overlay nodes, with sections that need to be -+ * overlayed for each of the nodes phases. -+ */ -+struct OvlyNode { -+ struct DSP_UUID uuid; -+ char *pNodeName; -+ struct OvlySect *pCreateSects; -+ struct OvlySect *pDeleteSects; -+ struct OvlySect *pExecuteSects; -+ struct OvlySect *pOtherSects; -+ u16 nCreateSects; -+ u16 nDeleteSects; -+ u16 nExecuteSects; -+ u16 nOtherSects; -+ u16 createRef; -+ u16 deleteRef; -+ u16 executeRef; -+ u16 otherRef; -+}; -+ -+/* -+ * ======== NLDR_OBJECT ======== -+ * Overlay loader object. -+ */ -+struct NLDR_OBJECT { -+ u32 dwSignature; /* For object validation */ -+ struct DEV_OBJECT *hDevObject; /* Device object */ -+ struct DCD_MANAGER *hDcdMgr; /* Proc/Node data manager */ -+ struct DBLL_TarObj *dbll; /* The DBL loader */ -+ struct DBLL_LibraryObj *baseLib; /* Base image library */ -+ struct RMM_TargetObj *rmm; /* Remote memory manager for DSP */ -+ struct DBLL_Fxns dbllFxns; /* Loader function table */ -+ struct DBLL_Attrs dbllAttrs; /* attrs to pass to loader functions */ -+ NLDR_OVLYFXN ovlyFxn; /* "write" for overlay nodes */ -+ NLDR_WRITEFXN writeFxn; /* "write" for dynamic nodes */ -+ struct OvlyNode *ovlyTable; /* Table of overlay nodes */ -+ u16 nOvlyNodes; /* Number of overlay nodes in base */ -+ u16 nNode; /* Index for tracking overlay nodes */ -+ u16 nSegs; /* Number of dynamic load mem segs */ -+ u32 *segTable; /* memtypes of dynamic memory segs -+ * indexed by segid -+ */ -+ u16 usDSPMauSize; /* Size of DSP MAU */ -+ u16 usDSPWordSize; /* Size of DSP word */ -+}; -+ -+/* -+ * ======== NLDR_NODEOBJECT ======== -+ * Dynamic node object. This object is created when a node is allocated. -+ */ -+struct NLDR_NODEOBJECT { -+ u32 dwSignature; /* For object validation */ -+ struct NLDR_OBJECT *pNldr; /* Dynamic loader handle */ -+ void *pPrivRef; /* Handle to pass to DBL_WriteFxn */ -+ struct DSP_UUID uuid; /* Node's UUID */ -+ bool fDynamic; /* Dynamically loaded node? */ -+ bool fOverlay; /* Overlay node? */ -+ bool *pfPhaseSplit; /* Multiple phase libraries? */ -+ struct LibNode root; /* Library containing node phase */ -+ struct LibNode createLib; /* Library containing create phase lib */ -+ struct LibNode executeLib; /* Library containing execute phase lib */ -+ struct LibNode deleteLib; /* Library containing delete phase lib */ -+ struct LibNode persLib[MAXLIBS]; /* libs remain loaded until Delete */ -+ s32 nPersLib; /* Number of persistent libraries */ -+ /* Path in lib dependency tree */ -+ struct DBLL_LibraryObj *libPath[MAXDEPTH + 1]; -+ enum NLDR_PHASE phase; /* Node phase currently being loaded */ -+ -+ /* -+ * Dynamic loading memory segments for data and code of each phase. -+ */ -+ u16 segId[MAXFLAGS]; -+ -+ /* -+ * Mask indicating whether each mem segment specified in segId[] -+ * is preferred or required. -+ * For example if (codeDataFlagMask & (1 << EXECUTEDATAFLAGBIT)) != 0, -+ * then it is required to load execute phase data into the memory -+ * specified by segId[EXECUTEDATAFLAGBIT]. -+ */ -+ u32 codeDataFlagMask; -+}; -+ -+/* Dynamic loader function table */ -+static struct DBLL_Fxns dbllFxns = { -+ (DBLL_CloseFxn) DBLL_close, -+ (DBLL_CreateFxn) DBLL_create, -+ (DBLL_DeleteFxn) DBLL_delete, -+ (DBLL_ExitFxn) DBLL_exit, -+ (DBLL_GetAttrsFxn) DBLL_getAttrs, -+ (DBLL_GetAddrFxn) DBLL_getAddr, -+ (DBLL_GetCAddrFxn) DBLL_getCAddr, -+ (DBLL_GetSectFxn) DBLL_getSect, -+ (DBLL_InitFxn) DBLL_init, -+ (DBLL_LoadFxn) DBLL_load, -+ (DBLL_LoadSectFxn) DBLL_loadSect, -+ (DBLL_OpenFxn) DBLL_open, -+ (DBLL_ReadSectFxn) DBLL_readSect, -+ (DBLL_SetAttrsFxn) DBLL_setAttrs, -+ (DBLL_UnloadFxn) DBLL_unload, -+ (DBLL_UnloadSectFxn) DBLL_unloadSect, -+}; -+ -+static struct GT_Mask NLDR_debugMask = { NULL, NULL }; /* GT trace variable */ -+static u32 cRefs; /* module reference count */ -+ -+static DSP_STATUS AddOvlyInfo(void *handle, struct DBLL_SectInfo *sectInfo, -+ u32 addr, u32 nBytes); -+static DSP_STATUS AddOvlyNode(struct DSP_UUID *pUuid, -+ enum DSP_DCDOBJTYPE objType, -+ IN void *handle); -+static DSP_STATUS AddOvlySect(struct NLDR_OBJECT *hNldr, -+ struct OvlySect **pList, -+ struct DBLL_SectInfo *pSectInfo, bool *pExists, -+ u32 addr, u32 nBytes); -+static s32 fakeOvlyWrite(void *handle, u32 dspAddr, void *buf, u32 nBytes, -+ s32 mtype); -+static void FreeSects(struct NLDR_OBJECT *hNldr, struct OvlySect *pPhaseSects, -+ u16 nAlloc); -+static bool GetSymbolValue(void *handle, void *pArg, void *rmmHandle, -+ char *symName, struct DBLL_Symbol **sym); -+static DSP_STATUS LoadLib(struct NLDR_NODEOBJECT *hNldrNode, -+ struct LibNode *root, struct DSP_UUID uuid, -+ bool rootPersistent, struct DBLL_LibraryObj **libPath, -+ enum NLDR_PHASE phase, u16 depth); -+static DSP_STATUS LoadOvly(struct NLDR_NODEOBJECT *hNldrNode, -+ enum NLDR_PHASE phase); -+static DSP_STATUS RemoteAlloc(void **pRef, u16 memType, u32 size, -+ u32 align, u32 *dspAddr, -+ OPTIONAL s32 segmentId, OPTIONAL s32 req, -+ bool reserve); -+static DSP_STATUS RemoteFree(void **pRef, u16 space, u32 dspAddr, -+ u32 size, bool reserve); -+ -+static void UnloadLib(struct NLDR_NODEOBJECT *hNldrNode, struct LibNode *root); -+static void UnloadOvly(struct NLDR_NODEOBJECT *hNldrNode, -+ enum NLDR_PHASE phase); -+static bool findInPersistentLibArray(struct NLDR_NODEOBJECT *hNldrNode, -+ struct DBLL_LibraryObj *lib); -+static u32 findLcm(u32 a, u32 b); -+static u32 findGcf(u32 a, u32 b); -+ -+/* -+ * ======== NLDR_Allocate ======== -+ */ -+DSP_STATUS NLDR_Allocate(struct NLDR_OBJECT *hNldr, void *pPrivRef, -+ IN CONST struct DCD_NODEPROPS *pNodeProps, -+ OUT struct NLDR_NODEOBJECT **phNldrNode, -+ IN bool *pfPhaseSplit) -+{ -+ struct NLDR_NODEOBJECT *pNldrNode = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pNodeProps != NULL); -+ DBC_Require(phNldrNode != NULL); -+ DBC_Require(MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); -+ -+ GT_5trace(NLDR_debugMask, GT_ENTER, "NLDR_Allocate(0x%x, 0x%x, 0x%x, " -+ "0x%x, 0x%x)\n", hNldr, pPrivRef, pNodeProps, phNldrNode, -+ pfPhaseSplit); -+ -+ /* Initialize handle in case of failure */ -+ *phNldrNode = NULL; -+ /* Allocate node object */ -+ MEM_AllocObject(pNldrNode, struct NLDR_NODEOBJECT, NLDR_NODESIGNATURE); -+ -+ if (pNldrNode == NULL) { -+ GT_0trace(NLDR_debugMask, GT_6CLASS, "NLDR_Allocate: " -+ "Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } else { -+ pNldrNode->pfPhaseSplit = pfPhaseSplit; -+ pNldrNode->nPersLib = 0; -+ pNldrNode->pNldr = hNldr; -+ pNldrNode->pPrivRef = pPrivRef; -+ /* Save node's UUID. */ -+ pNldrNode->uuid = pNodeProps->ndbProps.uiNodeID; -+ /* -+ * Determine if node is a dynamically loaded node from -+ * ndbProps. -+ */ -+ if (pNodeProps->usLoadType == NLDR_DYNAMICLOAD) { -+ /* Dynamic node */ -+ pNldrNode->fDynamic = true; -+ /* -+ * Extract memory requirements from ndbProps masks -+ */ -+ /* Create phase */ -+ pNldrNode->segId[CREATEDATAFLAGBIT] = (u16) -+ (pNodeProps->ulDataMemSegMask >> CREATEBIT) & -+ SEGMASK; -+ pNldrNode->codeDataFlagMask |= -+ ((pNodeProps->ulDataMemSegMask >> -+ (CREATEBIT + FLAGBIT)) & 1) << -+ CREATEDATAFLAGBIT; -+ pNldrNode->segId[CREATECODEFLAGBIT] = (u16) -+ (pNodeProps->ulCodeMemSegMask >> -+ CREATEBIT) & SEGMASK; -+ pNldrNode->codeDataFlagMask |= -+ ((pNodeProps->ulCodeMemSegMask >> -+ (CREATEBIT + FLAGBIT)) & 1) << -+ CREATECODEFLAGBIT; -+ /* Execute phase */ -+ pNldrNode->segId[EXECUTEDATAFLAGBIT] = (u16) -+ (pNodeProps->ulDataMemSegMask >> -+ EXECUTEBIT) & SEGMASK; -+ pNldrNode->codeDataFlagMask |= -+ ((pNodeProps->ulDataMemSegMask >> -+ (EXECUTEBIT + FLAGBIT)) & 1) << -+ EXECUTEDATAFLAGBIT; -+ pNldrNode->segId[EXECUTECODEFLAGBIT] = (u16) -+ (pNodeProps->ulCodeMemSegMask >> -+ EXECUTEBIT) & SEGMASK; -+ pNldrNode->codeDataFlagMask |= -+ ((pNodeProps->ulCodeMemSegMask >> -+ (EXECUTEBIT + FLAGBIT)) & 1) << -+ EXECUTECODEFLAGBIT; -+ /* Delete phase */ -+ pNldrNode->segId[DELETEDATAFLAGBIT] = (u16) -+ (pNodeProps->ulDataMemSegMask >> DELETEBIT) & -+ SEGMASK; -+ pNldrNode->codeDataFlagMask |= -+ ((pNodeProps->ulDataMemSegMask >> -+ (DELETEBIT + FLAGBIT)) & 1) << -+ DELETEDATAFLAGBIT; -+ pNldrNode->segId[DELETECODEFLAGBIT] = (u16) -+ (pNodeProps->ulCodeMemSegMask >> -+ DELETEBIT) & SEGMASK; -+ pNldrNode->codeDataFlagMask |= -+ ((pNodeProps->ulCodeMemSegMask >> -+ (DELETEBIT + FLAGBIT)) & 1) << -+ DELETECODEFLAGBIT; -+ } else { -+ /* Non-dynamically loaded nodes are part of the -+ * base image */ -+ pNldrNode->root.lib = hNldr->baseLib; -+ /* Check for overlay node */ -+ if (pNodeProps->usLoadType == NLDR_OVLYLOAD) -+ pNldrNode->fOverlay = true; -+ -+ } -+ *phNldrNode = (struct NLDR_NODEOBJECT *) pNldrNode; -+ } -+ /* Cleanup on failure */ -+ if (DSP_FAILED(status) && pNldrNode) -+ NLDR_Free((struct NLDR_NODEOBJECT *) pNldrNode); -+ -+ DBC_Ensure((DSP_SUCCEEDED(status) && -+ MEM_IsValidHandle(((struct NLDR_NODEOBJECT *)(*phNldrNode)), -+ NLDR_NODESIGNATURE)) || (DSP_FAILED(status) && -+ *phNldrNode == NULL)); -+ return status; -+} -+ -+/* -+ * ======== NLDR_Create ======== -+ */ -+DSP_STATUS NLDR_Create(OUT struct NLDR_OBJECT **phNldr, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct NLDR_ATTRS *pAttrs) -+{ -+ struct COD_MANAGER *hCodMgr; /* COD manager */ -+ char *pszCoffBuf = NULL; -+ char szZLFile[COD_MAXPATHLENGTH]; -+ struct NLDR_OBJECT *pNldr = NULL; -+ struct DBLL_Attrs saveAttrs; -+ struct DBLL_Attrs newAttrs; -+ DBLL_Flags flags; -+ u32 ulEntry; -+ u16 nSegs = 0; -+ struct MemInfo *pMemInfo; -+ u32 ulLen = 0; -+ u32 ulAddr; -+ struct RMM_Segment *rmmSegs = NULL; -+ u16 i; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(cRefs > 0); -+ DBC_Require(phNldr != NULL); -+ DBC_Require(hDevObject != NULL); -+ DBC_Require(pAttrs != NULL); -+ DBC_Require(pAttrs->pfnOvly != NULL); -+ DBC_Require(pAttrs->pfnWrite != NULL); -+ GT_3trace(NLDR_debugMask, GT_ENTER, "NLDR_Create(0x%x, 0x%x, 0x%x)\n", -+ phNldr, hDevObject, pAttrs); -+ /* Allocate dynamic loader object */ -+ MEM_AllocObject(pNldr, struct NLDR_OBJECT, NLDR_SIGNATURE); -+ if (pNldr) { -+ pNldr->hDevObject = hDevObject; -+ /* warning, lazy status checking alert! */ -+ status = DEV_GetCodMgr(hDevObject, &hCodMgr); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ status = COD_GetLoader(hCodMgr, &pNldr->dbll); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ status = COD_GetBaseLib(hCodMgr, &pNldr->baseLib); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ status = COD_GetBaseName(hCodMgr, szZLFile, COD_MAXPATHLENGTH); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ status = DSP_SOK; -+ /* end lazy status checking */ -+ pNldr->usDSPMauSize = pAttrs->usDSPMauSize; -+ pNldr->usDSPWordSize = pAttrs->usDSPWordSize; -+ pNldr->dbllFxns = dbllFxns; -+ if (!(pNldr->dbllFxns.initFxn())) -+ status = DSP_EMEMORY; -+ -+ } else { -+ GT_0trace(NLDR_debugMask, GT_6CLASS, "NLDR_Create: " -+ "Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } -+ /* Create the DCD Manager */ -+ if (DSP_SUCCEEDED(status)) -+ status = DCD_CreateManager(NULL, &pNldr->hDcdMgr); -+ -+ /* Get dynamic loading memory sections from base lib */ -+ if (DSP_SUCCEEDED(status)) { -+ status = pNldr->dbllFxns.getSectFxn(pNldr->baseLib, DYNMEMSECT, -+ &ulAddr, &ulLen); -+ if (DSP_SUCCEEDED(status)) { -+ pszCoffBuf = MEM_Calloc(ulLen * pNldr->usDSPMauSize, -+ MEM_PAGED); -+ if (!pszCoffBuf) { -+ GT_0trace(NLDR_debugMask, GT_6CLASS, -+ "NLDR_Create: Memory " -+ "allocation failed\n"); -+ status = DSP_EMEMORY; -+ } -+ } else { -+ /* Ok to not have dynamic loading memory */ -+ status = DSP_SOK; -+ ulLen = 0; -+ GT_1trace(NLDR_debugMask, GT_6CLASS, -+ "NLDR_Create: DBLL_getSect " -+ "failed (no dynamic loading mem segments): " -+ "0x%lx\n", status); -+ } -+ } -+ if (DSP_SUCCEEDED(status) && ulLen > 0) { -+ /* Read section containing dynamic load mem segments */ -+ status = pNldr->dbllFxns.readSectFxn(pNldr->baseLib, DYNMEMSECT, -+ pszCoffBuf, ulLen); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NLDR_debugMask, GT_6CLASS, -+ "NLDR_Create: DBLL_read Section" -+ "failed: 0x%lx\n", status); -+ } -+ } -+ if (DSP_SUCCEEDED(status) && ulLen > 0) { -+ /* Parse memory segment data */ -+ nSegs = (u16)(*((u32 *)pszCoffBuf)); -+ if (nSegs > MAXMEMSEGS) { -+ GT_1trace(NLDR_debugMask, GT_6CLASS, -+ "NLDR_Create: Invalid number of " -+ "dynamic load mem segments: 0x%lx\n", nSegs); -+ status = DSP_ECORRUPTFILE; -+ } -+ } -+ /* Parse dynamic load memory segments */ -+ if (DSP_SUCCEEDED(status) && nSegs > 0) { -+ rmmSegs = MEM_Calloc(sizeof(struct RMM_Segment) * nSegs, -+ MEM_PAGED); -+ pNldr->segTable = MEM_Calloc(sizeof(u32) * nSegs, MEM_PAGED); -+ if (rmmSegs == NULL || pNldr->segTable == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ pNldr->nSegs = nSegs; -+ pMemInfo = (struct MemInfo *)(pszCoffBuf + -+ sizeof(u32)); -+ for (i = 0; i < nSegs; i++) { -+ rmmSegs[i].base = (pMemInfo + i)->base; -+ rmmSegs[i].length = (pMemInfo + i)->len; -+ rmmSegs[i].space = 0; -+ pNldr->segTable[i] = (pMemInfo + i)->type; -+#ifdef DEBUG -+ DBG_Trace(DBG_LEVEL7, -+ "** (proc) DLL MEMSEGMENT: %d, Base: 0x%x, " -+ "Length: 0x%x\n", i, rmmSegs[i].base, -+ rmmSegs[i].length); -+#endif -+ } -+ } -+ } -+ /* Create Remote memory manager */ -+ if (DSP_SUCCEEDED(status)) -+ status = RMM_create(&pNldr->rmm, rmmSegs, nSegs); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* set the alloc, free, write functions for loader */ -+ pNldr->dbllFxns.getAttrsFxn(pNldr->dbll, &saveAttrs); -+ newAttrs = saveAttrs; -+ newAttrs.alloc = (DBLL_AllocFxn) RemoteAlloc; -+ newAttrs.free = (DBLL_FreeFxn) RemoteFree; -+ newAttrs.symLookup = (DBLL_SymLookup) GetSymbolValue; -+ newAttrs.symHandle = pNldr; -+ newAttrs.write = (DBLL_WriteFxn) pAttrs->pfnWrite; -+ pNldr->ovlyFxn = pAttrs->pfnOvly; -+ pNldr->writeFxn = pAttrs->pfnWrite; -+ pNldr->dbllAttrs = newAttrs; -+ } -+ if (rmmSegs) -+ MEM_Free(rmmSegs); -+ -+ if (pszCoffBuf) -+ MEM_Free(pszCoffBuf); -+ -+ /* Get overlay nodes */ -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetBaseName(hCodMgr, szZLFile, COD_MAXPATHLENGTH); -+ /* lazy check */ -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ /* First count number of overlay nodes */ -+ status = DCD_GetObjects(pNldr->hDcdMgr, szZLFile, AddOvlyNode, -+ (void *) pNldr); -+ /* Now build table of overlay nodes */ -+ if (DSP_SUCCEEDED(status) && pNldr->nOvlyNodes > 0) { -+ /* Allocate table for overlay nodes */ -+ pNldr->ovlyTable = -+ MEM_Calloc(sizeof(struct OvlyNode) * pNldr->nOvlyNodes, -+ MEM_PAGED); -+ /* Put overlay nodes in the table */ -+ pNldr->nNode = 0; -+ status = DCD_GetObjects(pNldr->hDcdMgr, szZLFile, -+ AddOvlyNode, -+ (void *) pNldr); -+ } -+ } -+ /* Do a fake reload of the base image to get overlay section info */ -+ if (DSP_SUCCEEDED(status) && pNldr->nOvlyNodes > 0) { -+ saveAttrs.write = fakeOvlyWrite; -+ saveAttrs.logWrite = AddOvlyInfo; -+ saveAttrs.logWriteHandle = pNldr; -+ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; -+ status = pNldr->dbllFxns.loadFxn(pNldr->baseLib, flags, -+ &saveAttrs, &ulEntry); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ *phNldr = (struct NLDR_OBJECT *) pNldr; -+ } else { -+ if (pNldr) -+ NLDR_Delete((struct NLDR_OBJECT *) pNldr); -+ -+ *phNldr = NULL; -+ } -+ /* FIXME:Temp. Fix. Must be removed */ -+ DBC_Ensure((DSP_SUCCEEDED(status) && -+ MEM_IsValidHandle(((struct NLDR_OBJECT *)*phNldr), -+ NLDR_SIGNATURE)) -+ || (DSP_FAILED(status) && (*phNldr == NULL))); -+ return status; -+} -+ -+/* -+ * ======== NLDR_Delete ======== -+ */ -+void NLDR_Delete(struct NLDR_OBJECT *hNldr) -+{ -+ struct OvlySect *pSect; -+ struct OvlySect *pNext; -+ u16 i; -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); -+ GT_1trace(NLDR_debugMask, GT_ENTER, "NLDR_Delete(0x%x)\n", hNldr); -+ hNldr->dbllFxns.exitFxn(); -+ if (hNldr->rmm) -+ RMM_delete(hNldr->rmm); -+ -+ if (hNldr->segTable) -+ MEM_Free(hNldr->segTable); -+ -+ if (hNldr->hDcdMgr) -+ DCD_DestroyManager(hNldr->hDcdMgr); -+ -+ /* Free overlay node information */ -+ if (hNldr->ovlyTable) { -+ for (i = 0; i < hNldr->nOvlyNodes; i++) { -+ pSect = hNldr->ovlyTable[i].pCreateSects; -+ while (pSect) { -+ pNext = pSect->pNextSect; -+ MEM_Free(pSect); -+ pSect = pNext; -+ } -+ pSect = hNldr->ovlyTable[i].pDeleteSects; -+ while (pSect) { -+ pNext = pSect->pNextSect; -+ MEM_Free(pSect); -+ pSect = pNext; -+ } -+ pSect = hNldr->ovlyTable[i].pExecuteSects; -+ while (pSect) { -+ pNext = pSect->pNextSect; -+ MEM_Free(pSect); -+ pSect = pNext; -+ } -+ pSect = hNldr->ovlyTable[i].pOtherSects; -+ while (pSect) { -+ pNext = pSect->pNextSect; -+ MEM_Free(pSect); -+ pSect = pNext; -+ } -+ } -+ MEM_Free(hNldr->ovlyTable); -+ } -+ MEM_FreeObject(hNldr); -+ DBC_Ensure(!MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); -+} -+ -+/* -+ * ======== NLDR_Exit ======== -+ * Discontinue usage of NLDR module. -+ */ -+void NLDR_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(NLDR_debugMask, GT_5CLASS, -+ "Entered NLDR_Exit, ref count: 0x%x\n", cRefs); -+ -+ if (cRefs == 0) { -+ RMM_exit(); -+ NLDR_debugMask.flags = NULL; -+ } -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== NLDR_Free ======== -+ */ -+void NLDR_Free(struct NLDR_NODEOBJECT *hNldrNode) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); -+ -+ GT_1trace(NLDR_debugMask, GT_ENTER, "NLDR_Free(0x%x)\n", hNldrNode); -+ -+ MEM_FreeObject(hNldrNode); -+} -+ -+/* -+ * ======== NLDR_GetFxnAddr ======== -+ */ -+DSP_STATUS NLDR_GetFxnAddr(struct NLDR_NODEOBJECT *hNldrNode, char *pstrFxn, -+ u32 *pulAddr) -+{ -+ struct DBLL_Symbol *pSym; -+ struct NLDR_OBJECT *hNldr; -+ DSP_STATUS status = DSP_SOK; -+ bool status1 = false; -+ s32 i = 0; -+ struct LibNode root = { NULL, 0, NULL }; -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); -+ DBC_Require(pulAddr != NULL); -+ DBC_Require(pstrFxn != NULL); -+ GT_3trace(NLDR_debugMask, GT_ENTER, "NLDR_GetFxnAddr(0x%x, %s, 0x%x)\n", -+ hNldrNode, pstrFxn, pulAddr); -+ -+ hNldr = hNldrNode->pNldr; -+ /* Called from NODE_Create(), NODE_Delete(), or NODE_Run(). */ -+ if (hNldrNode->fDynamic && *hNldrNode->pfPhaseSplit) { -+ switch (hNldrNode->phase) { -+ case NLDR_CREATE: -+ root = hNldrNode->createLib; -+ break; -+ case NLDR_EXECUTE: -+ root = hNldrNode->executeLib; -+ break; -+ case NLDR_DELETE: -+ root = hNldrNode->deleteLib; -+ break; -+ default: -+ DBC_Assert(false); -+ break; -+ } -+ } else { -+ /* for Overlay nodes or non-split Dynamic nodes */ -+ root = hNldrNode->root; -+ } -+ status1 = hNldr->dbllFxns.getCAddrFxn(root.lib, pstrFxn, &pSym); -+ if (!status1) -+ status1 = hNldr->dbllFxns.getAddrFxn(root.lib, pstrFxn, &pSym); -+ -+ /* If symbol not found, check dependent libraries */ -+ if (!status1) { -+ for (i = 0; i < root.nDepLibs; i++) { -+ status1 = hNldr->dbllFxns.getAddrFxn(root.pDepLibs[i]. -+ lib, pstrFxn, &pSym); -+ if (!status1) { -+ status1 = hNldr->dbllFxns.getCAddrFxn(root. -+ pDepLibs[i].lib, pstrFxn, &pSym); -+ } -+ if (status1) { -+ /* Symbol found */ -+ break; -+ } -+ } -+ } -+ /* Check persistent libraries */ -+ if (!status1) { -+ for (i = 0; i < hNldrNode->nPersLib; i++) { -+ status1 = hNldr->dbllFxns.getAddrFxn(hNldrNode-> -+ persLib[i].lib, pstrFxn, &pSym); -+ if (!status1) { -+ status1 = -+ hNldr->dbllFxns.getCAddrFxn(hNldrNode-> -+ persLib[i].lib, pstrFxn, &pSym); -+ } -+ if (status1) { -+ /* Symbol found */ -+ break; -+ } -+ } -+ } -+ -+ if (status1) { -+ *pulAddr = pSym->value; -+ } else { -+ GT_1trace(NLDR_debugMask, GT_6CLASS, -+ "NLDR_GetFxnAddr: Symbol not found: " -+ "%s\n", pstrFxn); -+ status = DSP_ESYMBOL; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== NLDR_GetRmmManager ======== -+ * Given a NLDR object, retrieve RMM Manager Handle -+ */ -+DSP_STATUS NLDR_GetRmmManager(struct NLDR_OBJECT *hNldrObject, -+ OUT struct RMM_TargetObj **phRmmMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct NLDR_OBJECT *pNldrObject = hNldrObject; -+ DBC_Require(phRmmMgr != NULL); -+ GT_2trace(NLDR_debugMask, GT_ENTER, "NLDR_GetRmmManager(0x%x, 0x%x)\n", -+ hNldrObject, phRmmMgr); -+ if (MEM_IsValidHandle(hNldrObject, NLDR_SIGNATURE)) { -+ *phRmmMgr = pNldrObject->rmm; -+ } else { -+ *phRmmMgr = NULL; -+ status = DSP_EHANDLE; -+ GT_0trace(NLDR_debugMask, GT_7CLASS, -+ "NLDR_GetRmmManager:Invalid handle"); -+ } -+ -+ GT_2trace(NLDR_debugMask, GT_ENTER, "Exit NLDR_GetRmmManager: status " -+ "0x%x\n\tphRmmMgr: 0x%x\n", status, *phRmmMgr); -+ -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phRmmMgr != NULL) && -+ (*phRmmMgr == NULL))); -+ -+ return status; -+} -+ -+/* -+ * ======== NLDR_Init ======== -+ * Initialize the NLDR module. -+ */ -+bool NLDR_Init(void) -+{ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!NLDR_debugMask.flags); -+ GT_create(&NLDR_debugMask, "NL"); /* "NL" for NLdr */ -+ -+ RMM_init(); -+ } -+ -+ cRefs++; -+ -+ GT_1trace(NLDR_debugMask, GT_5CLASS, "NLDR_Init(), ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure(cRefs > 0); -+ return true; -+} -+ -+/* -+ * ======== NLDR_Load ======== -+ */ -+DSP_STATUS NLDR_Load(struct NLDR_NODEOBJECT *hNldrNode, enum NLDR_PHASE phase) -+{ -+ struct NLDR_OBJECT *hNldr; -+ struct DSP_UUID libUUID; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); -+ -+ hNldr = hNldrNode->pNldr; -+ -+ GT_2trace(NLDR_debugMask, GT_ENTER, "NLDR_Load(0x%x, 0x%x)\n", -+ hNldrNode, phase); -+ -+ if (hNldrNode->fDynamic) { -+ hNldrNode->phase = phase; -+ -+ libUUID = hNldrNode->uuid; -+ -+ /* At this point, we may not know if node is split into -+ * different libraries. So we'll go ahead and load the -+ * library, and then save the pointer to the appropriate -+ * location after we know. */ -+ -+ status = LoadLib(hNldrNode, &hNldrNode->root, libUUID, false, -+ hNldrNode->libPath, phase, 0); -+ -+ if (DSP_SUCCEEDED(status)) { -+ if (*hNldrNode->pfPhaseSplit) { -+ switch (phase) { -+ case NLDR_CREATE: -+ hNldrNode->createLib = hNldrNode->root; -+ break; -+ -+ case NLDR_EXECUTE: -+ hNldrNode->executeLib = hNldrNode->root; -+ break; -+ -+ case NLDR_DELETE: -+ hNldrNode->deleteLib = hNldrNode->root; -+ break; -+ -+ default: -+ DBC_Assert(false); -+ break; -+ } -+ } -+ } -+ } else { -+ if (hNldrNode->fOverlay) -+ status = LoadOvly(hNldrNode, phase); -+ -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== NLDR_Unload ======== -+ */ -+DSP_STATUS NLDR_Unload(struct NLDR_NODEOBJECT *hNldrNode, enum NLDR_PHASE phase) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct LibNode *pRootLib = NULL; -+ s32 i = 0; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNldrNode, NLDR_NODESIGNATURE)); -+ GT_2trace(NLDR_debugMask, GT_ENTER, "NLDR_Unload(0x%x, 0x%x)\n", -+ hNldrNode, phase); -+ if (hNldrNode != NULL) { -+ if (hNldrNode->fDynamic) { -+ if (*hNldrNode->pfPhaseSplit) { -+ switch (phase) { -+ case NLDR_CREATE: -+ pRootLib = &hNldrNode->createLib; -+ break; -+ case NLDR_EXECUTE: -+ pRootLib = &hNldrNode->executeLib; -+ break; -+ case NLDR_DELETE: -+ pRootLib = &hNldrNode->deleteLib; -+ /* Unload persistent libraries */ -+ for (i = 0; i < hNldrNode->nPersLib; -+ i++) { -+ UnloadLib(hNldrNode, -+ &hNldrNode->persLib[i]); -+ } -+ hNldrNode->nPersLib = 0; -+ break; -+ default: -+ DBC_Assert(false); -+ break; -+ } -+ } else { -+ /* Unload main library */ -+ pRootLib = &hNldrNode->root; -+ } -+ UnloadLib(hNldrNode, pRootLib); -+ } else { -+ if (hNldrNode->fOverlay) -+ UnloadOvly(hNldrNode, phase); -+ -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== AddOvlyInfo ======== -+ */ -+static DSP_STATUS AddOvlyInfo(void *handle, struct DBLL_SectInfo *sectInfo, -+ u32 addr, u32 nBytes) -+{ -+ char *pNodeName; -+ char *pSectName = (char *)sectInfo->name; -+ bool fExists = false; -+ char seps = ':'; -+ char *pch; -+ u16 i; -+ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)handle; -+ DSP_STATUS status = DSP_SOK; -+ -+ /* Is this an overlay section (load address != run address)? */ -+ if (sectInfo->loadAddr == sectInfo->runAddr) -+ goto func_end; -+ -+ /* Find the node it belongs to */ -+ for (i = 0; i < hNldr->nOvlyNodes; i++) { -+ pNodeName = hNldr->ovlyTable[i].pNodeName; -+ DBC_Require(pNodeName); -+ if (strncmp(pNodeName, pSectName + 1, -+ strlen(pNodeName)) == 0) { -+ /* Found the node */ -+ break; -+ } -+ } -+ if (!(i < hNldr->nOvlyNodes)) -+ goto func_end; -+ -+ /* Determine which phase this section belongs to */ -+ for (pch = pSectName + 1; *pch && *pch != seps; pch++) -+ ;; -+ -+ if (*pch) { -+ pch++; /* Skip over the ':' */ -+ if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) { -+ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. -+ pCreateSects, sectInfo, &fExists, addr, nBytes); -+ if (DSP_SUCCEEDED(status) && !fExists) -+ hNldr->ovlyTable[i].nCreateSects++; -+ -+ } else -+ if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) { -+ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. -+ pDeleteSects, sectInfo, &fExists, -+ addr, nBytes); -+ if (DSP_SUCCEEDED(status) && !fExists) -+ hNldr->ovlyTable[i].nDeleteSects++; -+ -+ } else -+ if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) { -+ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. -+ pExecuteSects, sectInfo, &fExists, -+ addr, nBytes); -+ if (DSP_SUCCEEDED(status) && !fExists) -+ hNldr->ovlyTable[i].nExecuteSects++; -+ -+ } else { -+ /* Put in "other" sectins */ -+ status = AddOvlySect(hNldr, &hNldr->ovlyTable[i]. -+ pOtherSects, sectInfo, &fExists, -+ addr, nBytes); -+ if (DSP_SUCCEEDED(status) && !fExists) -+ hNldr->ovlyTable[i].nOtherSects++; -+ -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== AddOvlyNode ========= -+ * Callback function passed to DCD_GetObjects. -+ */ -+static DSP_STATUS AddOvlyNode(struct DSP_UUID *pUuid, -+ enum DSP_DCDOBJTYPE objType, -+ IN void *handle) -+{ -+ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)handle; -+ char *pNodeName = NULL; -+ char *pBuf = NULL; -+ u32 uLen; -+ struct DCD_GENERICOBJ objDef; -+ DSP_STATUS status = DSP_SOK; -+ -+ if (objType != DSP_DCDNODETYPE) -+ goto func_end; -+ -+ status = DCD_GetObjectDef(hNldr->hDcdMgr, pUuid, objType, &objDef); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* If overlay node, add to the list */ -+ if (objDef.objData.nodeObj.usLoadType == NLDR_OVLYLOAD) { -+ if (hNldr->ovlyTable == NULL) { -+ hNldr->nOvlyNodes++; -+ } else { -+ /* Add node to table */ -+ hNldr->ovlyTable[hNldr->nNode].uuid = *pUuid; -+ DBC_Require(objDef.objData.nodeObj.ndbProps.acName); -+ uLen = strlen(objDef.objData.nodeObj.ndbProps.acName); -+ pNodeName = objDef.objData.nodeObj.ndbProps.acName; -+ pBuf = MEM_Calloc(uLen + 1, MEM_PAGED); -+ if (pBuf == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ strncpy(pBuf, pNodeName, uLen); -+ hNldr->ovlyTable[hNldr->nNode].pNodeName = pBuf; -+ hNldr->nNode++; -+ } -+ } -+ } -+ /* These were allocated in DCD_GetObjectDef */ -+ if (objDef.objData.nodeObj.pstrCreatePhaseFxn) -+ MEM_Free(objDef.objData.nodeObj.pstrCreatePhaseFxn); -+ -+ if (objDef.objData.nodeObj.pstrExecutePhaseFxn) -+ MEM_Free(objDef.objData.nodeObj.pstrExecutePhaseFxn); -+ -+ if (objDef.objData.nodeObj.pstrDeletePhaseFxn) -+ MEM_Free(objDef.objData.nodeObj.pstrDeletePhaseFxn); -+ -+ if (objDef.objData.nodeObj.pstrIAlgName) -+ MEM_Free(objDef.objData.nodeObj.pstrIAlgName); -+ -+func_end: -+ return status; -+} -+ -+/* -+ * ======== AddOvlySect ======== -+ */ -+static DSP_STATUS AddOvlySect(struct NLDR_OBJECT *hNldr, -+ struct OvlySect **pList, -+ struct DBLL_SectInfo *pSectInfo, bool *pExists, -+ u32 addr, u32 nBytes) -+{ -+ struct OvlySect *pNewSect = NULL; -+ struct OvlySect *pLastSect; -+ struct OvlySect *pSect; -+ DSP_STATUS status = DSP_SOK; -+ -+ pSect = pLastSect = *pList; -+ *pExists = false; -+ while (pSect) { -+ /* -+ * Make sure section has not already been added. Multiple -+ * 'write' calls may be made to load the section. -+ */ -+ if (pSect->loadAddr == addr) { -+ /* Already added */ -+ *pExists = true; -+ break; -+ } -+ pLastSect = pSect; -+ pSect = pSect->pNextSect; -+ } -+ -+ if (!pSect) { -+ /* New section */ -+ pNewSect = MEM_Calloc(sizeof(struct OvlySect), MEM_PAGED); -+ if (pNewSect == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ pNewSect->loadAddr = addr; -+ pNewSect->runAddr = pSectInfo->runAddr + -+ (addr - pSectInfo->loadAddr); -+ pNewSect->size = nBytes; -+ pNewSect->page = pSectInfo->type; -+ } -+ -+ /* Add to the list */ -+ if (DSP_SUCCEEDED(status)) { -+ if (*pList == NULL) { -+ /* First in the list */ -+ *pList = pNewSect; -+ } else { -+ pLastSect->pNextSect = pNewSect; -+ } -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== fakeOvlyWrite ======== -+ */ -+static s32 fakeOvlyWrite(void *handle, u32 dspAddr, void *buf, u32 nBytes, -+ s32 mtype) -+{ -+ return (s32)nBytes; -+} -+ -+/* -+ * ======== FreeSects ======== -+ */ -+static void FreeSects(struct NLDR_OBJECT *hNldr, struct OvlySect *pPhaseSects, -+ u16 nAlloc) -+{ -+ struct OvlySect *pSect = pPhaseSects; -+ u16 i = 0; -+ bool fRet; -+ -+ while (pSect && i < nAlloc) { -+ /* 'Deallocate' */ -+ /* segid - page not supported yet */ -+ /* Reserved memory */ -+ fRet = RMM_free(hNldr->rmm, 0, pSect->runAddr, pSect->size, -+ true); -+ DBC_Assert(fRet); -+ pSect = pSect->pNextSect; -+ i++; -+ } -+} -+ -+/* -+ * ======== GetSymbolValue ======== -+ * Find symbol in library's base image. If not there, check dependent -+ * libraries. -+ */ -+static bool GetSymbolValue(void *handle, void *pArg, void *rmmHandle, -+ char *name, struct DBLL_Symbol **sym) -+{ -+ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)handle; -+ struct NLDR_NODEOBJECT *hNldrNode = (struct NLDR_NODEOBJECT *)rmmHandle; -+ struct LibNode *root = (struct LibNode *)pArg; -+ u16 i; -+ bool status = false; -+ -+ /* check the base image */ -+ status = hNldr->dbllFxns.getAddrFxn(hNldr->baseLib, name, sym); -+ if (!status) -+ status = hNldr->dbllFxns.getCAddrFxn(hNldr->baseLib, name, sym); -+ -+ /* -+ * Check in root lib itself. If the library consists of -+ * multiple object files linked together, some symbols in the -+ * library may need to be resolved. -+ */ -+ if (!status) { -+ status = hNldr->dbllFxns.getAddrFxn(root->lib, name, sym); -+ if (!status) { -+ status = -+ hNldr->dbllFxns.getCAddrFxn(root->lib, name, sym); -+ } -+ } -+ -+ /* -+ * Check in root lib's dependent libraries, but not dependent -+ * libraries' dependents. -+ */ -+ if (!status) { -+ for (i = 0; i < root->nDepLibs; i++) { -+ status = hNldr->dbllFxns.getAddrFxn(root->pDepLibs[i]. -+ lib, name, sym); -+ if (!status) { -+ status = hNldr->dbllFxns.getCAddrFxn(root-> -+ pDepLibs[i].lib, name, sym); -+ } -+ if (status) { -+ /* Symbol found */ -+ break; -+ } -+ } -+ } -+ /* -+ * Check in persistent libraries -+ */ -+ if (!status) { -+ for (i = 0; i < hNldrNode->nPersLib; i++) { -+ status = hNldr->dbllFxns.getAddrFxn(hNldrNode-> -+ persLib[i].lib, name, sym); -+ if (!status) { -+ status = hNldr->dbllFxns.getCAddrFxn -+ (hNldrNode->persLib[i].lib, name, sym); -+ } -+ if (status) { -+ /* Symbol found */ -+ break; -+ } -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== LoadLib ======== -+ * Recursively load library and all its dependent libraries. The library -+ * we're loading is specified by a uuid. -+ */ -+static DSP_STATUS LoadLib(struct NLDR_NODEOBJECT *hNldrNode, -+ struct LibNode *root, struct DSP_UUID uuid, -+ bool rootPersistent, struct DBLL_LibraryObj **libPath, -+ enum NLDR_PHASE phase, u16 depth) -+{ -+ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; -+ u16 nLibs = 0; /* Number of dependent libraries */ -+ u16 nPLibs = 0; /* Number of persistent libraries */ -+ u16 nLoaded = 0; /* Number of dep. libraries loaded */ -+ u16 i; -+ u32 entry; -+ u32 dwBufSize = NLDR_MAXPATHLENGTH; -+ DBLL_Flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC; -+ struct DBLL_Attrs newAttrs; -+ char *pszFileName = NULL; -+ struct DSP_UUID *depLibUUIDs = NULL; -+ bool *persistentDepLibs = NULL; -+ DSP_STATUS status = DSP_SOK; -+ bool fStatus = false; -+ struct LibNode *pDepLib; -+ -+ if (depth > MAXDEPTH) { -+ /* Error */ -+ DBC_Assert(false); -+ } -+ root->lib = NULL; -+ /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */ -+ pszFileName = MEM_Calloc(DBLL_MAXPATHLENGTH, MEM_PAGED); -+ if (pszFileName == NULL) -+ status = DSP_EMEMORY; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Get the name of the library */ -+ if (depth == 0) { -+ status = DCD_GetLibraryName(hNldrNode->pNldr->hDcdMgr, -+ &uuid, pszFileName, &dwBufSize, phase, -+ hNldrNode->pfPhaseSplit); -+ } else { -+ /* Dependent libraries are registered with a phase */ -+ status = DCD_GetLibraryName(hNldrNode->pNldr->hDcdMgr, -+ &uuid, pszFileName, &dwBufSize, NLDR_NOPHASE, -+ NULL); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Open the library, don't load symbols */ -+ status = hNldr->dbllFxns.openFxn(hNldr->dbll, pszFileName, -+ DBLL_NOLOAD, &root->lib); -+ } -+ /* Done with file name */ -+ if (pszFileName) -+ MEM_Free(pszFileName); -+ -+ /* Check to see if library not already loaded */ -+ if (DSP_SUCCEEDED(status) && rootPersistent) { -+ fStatus = findInPersistentLibArray(hNldrNode, root->lib); -+ /* Close library */ -+ if (fStatus) { -+ hNldr->dbllFxns.closeFxn(root->lib); -+ return DSP_SALREADYLOADED; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Check for circular dependencies. */ -+ for (i = 0; i < depth; i++) { -+ if (root->lib == libPath[i]) { -+ /* This condition could be checked by a -+ * tool at build time. */ -+ status = DSP_EDYNLOAD; -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Add library to current path in dependency tree */ -+ libPath[depth] = root->lib; -+ depth++; -+ /* Get number of dependent libraries */ -+ status = DCD_GetNumDepLibs(hNldrNode->pNldr->hDcdMgr, &uuid, -+ &nLibs, &nPLibs, phase); -+ } -+ DBC_Assert(nLibs >= nPLibs); -+ if (DSP_SUCCEEDED(status)) { -+ if (!(*hNldrNode->pfPhaseSplit)) -+ nPLibs = 0; -+ -+ /* nLibs = #of dependent libraries */ -+ root->nDepLibs = nLibs - nPLibs; -+ if (nLibs > 0) { -+ depLibUUIDs = MEM_Calloc(sizeof(struct DSP_UUID) * -+ nLibs, MEM_PAGED); -+ persistentDepLibs = -+ MEM_Calloc(sizeof(bool) * nLibs, MEM_PAGED); -+ if (!depLibUUIDs || !persistentDepLibs) -+ status = DSP_EMEMORY; -+ -+ if (root->nDepLibs > 0) { -+ /* Allocate arrays for dependent lib UUIDs, -+ * lib nodes */ -+ root->pDepLibs = MEM_Calloc -+ (sizeof(struct LibNode) * -+ (root->nDepLibs), MEM_PAGED); -+ if (!(root->pDepLibs)) -+ status = DSP_EMEMORY; -+ -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Get the dependent library UUIDs */ -+ status = DCD_GetDepLibs(hNldrNode->pNldr-> -+ hDcdMgr, &uuid, nLibs, depLibUUIDs, -+ persistentDepLibs, phase); -+ } -+ } -+ } -+ -+ /* -+ * Recursively load dependent libraries. -+ */ -+ if (DSP_SUCCEEDED(status) && persistentDepLibs) { -+ for (i = 0; i < nLibs; i++) { -+ /* If root library is NOT persistent, and dep library -+ * is, then record it. If root library IS persistent, -+ * the deplib is already included */ -+ if (!rootPersistent && persistentDepLibs[i] && -+ *hNldrNode->pfPhaseSplit) { -+ if ((hNldrNode->nPersLib) > MAXLIBS) { -+ status = DSP_EDYNLOAD; -+ break; -+ } -+ -+ /* Allocate library outside of phase */ -+ pDepLib = &hNldrNode->persLib[hNldrNode-> -+ nPersLib]; -+ } else { -+ if (rootPersistent) -+ persistentDepLibs[i] = true; -+ -+ -+ /* Allocate library within phase */ -+ pDepLib = &root->pDepLibs[nLoaded]; -+ } -+ -+ if (depLibUUIDs) { -+ status = LoadLib(hNldrNode, pDepLib, -+ depLibUUIDs[i], -+ persistentDepLibs[i], libPath, -+ phase, -+ depth); -+ } else { -+ status = DSP_EMEMORY; -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ if ((status != DSP_SALREADYLOADED) && -+ !rootPersistent && persistentDepLibs[i] && -+ *hNldrNode->pfPhaseSplit) { -+ (hNldrNode->nPersLib)++; -+ } else { -+ if (!persistentDepLibs[i] || -+ !(*hNldrNode->pfPhaseSplit)) { -+ nLoaded++; -+ } -+ } -+ } else { -+ break; -+ } -+ } -+ } -+ -+ /* Now we can load the root library */ -+ if (DSP_SUCCEEDED(status)) { -+ newAttrs = hNldr->dbllAttrs; -+ newAttrs.symArg = root; -+ newAttrs.rmmHandle = hNldrNode; -+ newAttrs.wHandle = hNldrNode->pPrivRef; -+ newAttrs.baseImage = false; -+ -+ status = hNldr->dbllFxns.loadFxn(root->lib, flags, &newAttrs, -+ &entry); -+ } -+ -+ /* -+ * In case of failure, unload any dependent libraries that -+ * were loaded, and close the root library. -+ * (Persistent libraries are unloaded from the very top) -+ */ -+ if (DSP_FAILED(status)) { -+ if (phase != NLDR_EXECUTE) { -+ for (i = 0; i < hNldrNode->nPersLib; i++) -+ UnloadLib(hNldrNode, &hNldrNode->persLib[i]); -+ -+ hNldrNode->nPersLib = 0; -+ } -+ for (i = 0; i < nLoaded; i++) -+ UnloadLib(hNldrNode, &root->pDepLibs[i]); -+ -+ if (root->lib) -+ hNldr->dbllFxns.closeFxn(root->lib); -+ -+ } -+ -+ /* Going up one node in the dependency tree */ -+ depth--; -+ -+ if (depLibUUIDs) { -+ MEM_Free(depLibUUIDs); -+ depLibUUIDs = NULL; -+ } -+ -+ if (persistentDepLibs) { -+ MEM_Free(persistentDepLibs); -+ persistentDepLibs = NULL; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== LoadOvly ======== -+ */ -+static DSP_STATUS LoadOvly(struct NLDR_NODEOBJECT *hNldrNode, -+ enum NLDR_PHASE phase) -+{ -+ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; -+ struct OvlyNode *pONode = NULL; -+ struct OvlySect *pPhaseSects = NULL; -+ struct OvlySect *pOtherSects = NULL; -+ u16 i; -+ u16 nAlloc = 0; -+ u16 nOtherAlloc = 0; -+ u16 *pRefCount = NULL; -+ u16 *pOtherRef = NULL; -+ u32 nBytes; -+ struct OvlySect *pSect; -+ DSP_STATUS status = DSP_SOK; -+ -+ /* Find the node in the table */ -+ for (i = 0; i < hNldr->nOvlyNodes; i++) { -+ if (IsEqualUUID(hNldrNode->uuid, hNldr->ovlyTable[i].uuid)) { -+ /* Found it */ -+ pONode = &(hNldr->ovlyTable[i]); -+ break; -+ } -+ } -+ -+ DBC_Assert(i < hNldr->nOvlyNodes); -+ switch (phase) { -+ case NLDR_CREATE: -+ pRefCount = &(pONode->createRef); -+ pOtherRef = &(pONode->otherRef); -+ pPhaseSects = pONode->pCreateSects; -+ pOtherSects = pONode->pOtherSects; -+ break; -+ -+ case NLDR_EXECUTE: -+ pRefCount = &(pONode->executeRef); -+ pPhaseSects = pONode->pExecuteSects; -+ break; -+ -+ case NLDR_DELETE: -+ pRefCount = &(pONode->deleteRef); -+ pPhaseSects = pONode->pDeleteSects; -+ break; -+ -+ default: -+ DBC_Assert(false); -+ break; -+ } -+ -+ DBC_Assert(pRefCount != NULL); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (pRefCount == NULL) -+ goto func_end; -+ -+ if (*pRefCount != 0) -+ goto func_end; -+ -+ /* 'Allocate' memory for overlay sections of this phase */ -+ pSect = pPhaseSects; -+ while (pSect) { -+ /* allocate */ /* page not supported yet */ -+ /* reserve */ /* align */ -+ status = RMM_alloc(hNldr->rmm, 0, pSect->size, 0, -+ &(pSect->runAddr), true); -+ if (DSP_SUCCEEDED(status)) { -+ pSect = pSect->pNextSect; -+ nAlloc++; -+ } else { -+ break; -+ } -+ } -+ if (pOtherRef && *pOtherRef == 0) { -+ /* 'Allocate' memory for other overlay sections -+ * (create phase) */ -+ if (DSP_SUCCEEDED(status)) { -+ pSect = pOtherSects; -+ while (pSect) { -+ /* page not supported */ /* align */ -+ /* reserve */ -+ status = RMM_alloc(hNldr->rmm, 0, pSect->size, -+ 0, &(pSect->runAddr), true); -+ if (DSP_SUCCEEDED(status)) { -+ pSect = pSect->pNextSect; -+ nOtherAlloc++; -+ } else { -+ break; -+ } -+ } -+ } -+ } -+ if (*pRefCount == 0) { -+ if (DSP_SUCCEEDED(status)) { -+ /* Load sections for this phase */ -+ pSect = pPhaseSects; -+ while (pSect && DSP_SUCCEEDED(status)) { -+ nBytes = (*hNldr->ovlyFxn)(hNldrNode->pPrivRef, -+ pSect->runAddr, pSect->loadAddr, -+ pSect->size, pSect->page); -+ if (nBytes != pSect->size) -+ status = DSP_EFAIL; -+ -+ pSect = pSect->pNextSect; -+ } -+ } -+ } -+ if (pOtherRef && *pOtherRef == 0) { -+ if (DSP_SUCCEEDED(status)) { -+ /* Load other sections (create phase) */ -+ pSect = pOtherSects; -+ while (pSect && DSP_SUCCEEDED(status)) { -+ nBytes = (*hNldr->ovlyFxn)(hNldrNode->pPrivRef, -+ pSect->runAddr, pSect->loadAddr, -+ pSect->size, pSect->page); -+ if (nBytes != pSect->size) -+ status = DSP_EFAIL; -+ -+ pSect = pSect->pNextSect; -+ } -+ } -+ } -+ if (DSP_FAILED(status)) { -+ /* 'Deallocate' memory */ -+ FreeSects(hNldr, pPhaseSects, nAlloc); -+ FreeSects(hNldr, pOtherSects, nOtherAlloc); -+ } -+func_end: -+ if (DSP_SUCCEEDED(status) && (pRefCount != NULL)) { -+ *pRefCount += 1; -+ if (pOtherRef) -+ *pOtherRef += 1; -+ -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== RemoteAlloc ======== -+ */ -+static DSP_STATUS RemoteAlloc(void **pRef, u16 space, u32 size, -+ u32 align, u32 *dspAddr, -+ OPTIONAL s32 segmentId, OPTIONAL s32 req, -+ bool reserve) -+{ -+ struct NLDR_NODEOBJECT *hNode = (struct NLDR_NODEOBJECT *)pRef; -+ struct NLDR_OBJECT *hNldr; -+ struct RMM_TargetObj *rmm; -+ u16 memPhaseBit = MAXFLAGS; -+ u16 segid = 0; -+ u16 i; -+ u16 memType; -+ u32 nWords; -+ struct RMM_Addr *pRmmAddr = (struct RMM_Addr *)dspAddr; -+ bool fReq = false; -+ DSP_STATUS status = DSP_EMEMORY; /* Set to fail */ -+ DBC_Require(MEM_IsValidHandle(hNode, NLDR_NODESIGNATURE)); -+ DBC_Require(space == DBLL_CODE || space == DBLL_DATA || -+ space == DBLL_BSS); -+ hNldr = hNode->pNldr; -+ rmm = hNldr->rmm; -+ /* Convert size to DSP words */ -+ nWords = (size + hNldr->usDSPWordSize - 1) / hNldr->usDSPWordSize; -+ /* Modify memory 'align' to account for DSP cache line size */ -+ align = findLcm(GEM_CACHE_LINE_SIZE, align); -+ GT_1trace(NLDR_debugMask, GT_7CLASS, -+ "RemoteAlloc: memory align to 0x%x \n", align); -+ if (segmentId != -1) { -+ pRmmAddr->segid = segmentId; -+ segid = segmentId; -+ fReq = req; -+ } else { -+ switch (hNode->phase) { -+ case NLDR_CREATE: -+ memPhaseBit = CREATEDATAFLAGBIT; -+ break; -+ case NLDR_DELETE: -+ memPhaseBit = DELETEDATAFLAGBIT; -+ break; -+ case NLDR_EXECUTE: -+ memPhaseBit = EXECUTEDATAFLAGBIT; -+ break; -+ default: -+ DBC_Assert(false); -+ break; -+ } -+ if (space == DBLL_CODE) -+ memPhaseBit++; -+ -+ if (memPhaseBit < MAXFLAGS) -+ segid = hNode->segId[memPhaseBit]; -+ -+ /* Determine if there is a memory loading requirement */ -+ if ((hNode->codeDataFlagMask >> memPhaseBit) & 0x1) -+ fReq = true; -+ -+ } -+ memType = (space == DBLL_CODE) ? DYNM_CODE : DYNM_DATA; -+ -+ /* Find an appropriate segment based on space */ -+ if (segid == NULLID) { -+ /* No memory requirements of preferences */ -+ DBC_Assert(!fReq); -+ goto func_cont; -+ } -+ if (segid <= MAXSEGID) { -+ DBC_Assert(segid < hNldr->nSegs); -+ /* Attempt to allocate from segid first. */ -+ pRmmAddr->segid = segid; -+ status = RMM_alloc(rmm, segid, nWords, align, dspAddr, false); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NLDR_debugMask, GT_6CLASS, -+ "RemoteAlloc:Unable allocate " -+ "from segment %d.\n", segid); -+ } -+ } else { -+ /* segid > MAXSEGID ==> Internal or external memory */ -+ DBC_Assert(segid == MEMINTERNALID || segid == MEMEXTERNALID); -+ /* Check for any internal or external memory segment, -+ * depending on segid.*/ -+ memType |= segid == MEMINTERNALID ? -+ DYNM_INTERNAL : DYNM_EXTERNAL; -+ for (i = 0; i < hNldr->nSegs; i++) { -+ if ((hNldr->segTable[i] & memType) != memType) -+ continue; -+ -+ status = RMM_alloc(rmm, i, nWords, align, dspAddr, -+ false); -+ if (DSP_SUCCEEDED(status)) { -+ /* Save segid for freeing later */ -+ pRmmAddr->segid = i; -+ break; -+ } -+ } -+ } -+func_cont: -+ /* Haven't found memory yet, attempt to find any segment that works */ -+ if (status == DSP_EMEMORY && !fReq) { -+ GT_0trace(NLDR_debugMask, GT_6CLASS, -+ "RemoteAlloc: Preferred segment " -+ "unavailable, trying another segment.\n"); -+ for (i = 0; i < hNldr->nSegs; i++) { -+ /* All bits of memType must be set */ -+ if ((hNldr->segTable[i] & memType) != memType) -+ continue; -+ -+ status = RMM_alloc(rmm, i, nWords, align, dspAddr, -+ false); -+ if (DSP_SUCCEEDED(status)) { -+ /* Save segid */ -+ pRmmAddr->segid = i; -+ break; -+ } -+ } -+ } -+ -+ return status; -+} -+ -+static DSP_STATUS RemoteFree(void **pRef, u16 space, u32 dspAddr, -+ u32 size, bool reserve) -+{ -+ struct NLDR_OBJECT *hNldr = (struct NLDR_OBJECT *)pRef; -+ struct RMM_TargetObj *rmm; -+ u32 nWords; -+ DSP_STATUS status = DSP_EMEMORY; /* Set to fail */ -+ -+ DBC_Require(MEM_IsValidHandle(hNldr, NLDR_SIGNATURE)); -+ -+ rmm = hNldr->rmm; -+ -+ /* Convert size to DSP words */ -+ nWords = (size + hNldr->usDSPWordSize - 1) / hNldr->usDSPWordSize; -+ -+ if (RMM_free(rmm, space, dspAddr, nWords, reserve)) -+ status = DSP_SOK; -+ -+ return status; -+} -+ -+/* -+ * ======== UnloadLib ======== -+ */ -+static void UnloadLib(struct NLDR_NODEOBJECT *hNldrNode, struct LibNode *root) -+{ -+ struct DBLL_Attrs newAttrs; -+ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; -+ u16 i; -+ -+ DBC_Assert(root != NULL); -+ -+ /* Unload dependent libraries */ -+ for (i = 0; i < root->nDepLibs; i++) -+ UnloadLib(hNldrNode, &root->pDepLibs[i]); -+ -+ root->nDepLibs = 0; -+ -+ newAttrs = hNldr->dbllAttrs; -+ newAttrs.rmmHandle = hNldr->rmm; -+ newAttrs.wHandle = hNldrNode->pPrivRef; -+ newAttrs.baseImage = false; -+ newAttrs.symArg = root; -+ -+ if (root->lib) { -+ /* Unload the root library */ -+ hNldr->dbllFxns.unloadFxn(root->lib, &newAttrs); -+ hNldr->dbllFxns.closeFxn(root->lib); -+ } -+ -+ /* Free dependent library list */ -+ if (root->pDepLibs) { -+ MEM_Free(root->pDepLibs); -+ root->pDepLibs = NULL; -+ } -+} -+ -+/* -+ * ======== UnloadOvly ======== -+ */ -+static void UnloadOvly(struct NLDR_NODEOBJECT *hNldrNode, enum NLDR_PHASE phase) -+{ -+ struct NLDR_OBJECT *hNldr = hNldrNode->pNldr; -+ struct OvlyNode *pONode = NULL; -+ struct OvlySect *pPhaseSects = NULL; -+ struct OvlySect *pOtherSects = NULL; -+ u16 i; -+ u16 nAlloc = 0; -+ u16 nOtherAlloc = 0; -+ u16 *pRefCount = NULL; -+ u16 *pOtherRef = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ /* Find the node in the table */ -+ for (i = 0; i < hNldr->nOvlyNodes; i++) { -+ if (IsEqualUUID(hNldrNode->uuid, hNldr->ovlyTable[i].uuid)) { -+ /* Found it */ -+ pONode = &(hNldr->ovlyTable[i]); -+ break; -+ } -+ } -+ -+ DBC_Assert(i < hNldr->nOvlyNodes); -+ switch (phase) { -+ case NLDR_CREATE: -+ pRefCount = &(pONode->createRef); -+ pPhaseSects = pONode->pCreateSects; -+ nAlloc = pONode->nCreateSects; -+ break; -+ case NLDR_EXECUTE: -+ pRefCount = &(pONode->executeRef); -+ pPhaseSects = pONode->pExecuteSects; -+ nAlloc = pONode->nExecuteSects; -+ break; -+ case NLDR_DELETE: -+ pRefCount = &(pONode->deleteRef); -+ pOtherRef = &(pONode->otherRef); -+ pPhaseSects = pONode->pDeleteSects; -+ /* 'Other' overlay sections are unloaded in the delete phase */ -+ pOtherSects = pONode->pOtherSects; -+ nAlloc = pONode->nDeleteSects; -+ nOtherAlloc = pONode->nOtherSects; -+ break; -+ default: -+ DBC_Assert(false); -+ break; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ DBC_Assert(pRefCount && (*pRefCount > 0)); -+ if (pRefCount && (*pRefCount > 0)) { -+ *pRefCount -= 1; -+ if (pOtherRef) { -+ DBC_Assert(*pOtherRef > 0); -+ *pOtherRef -= 1; -+ } -+ } -+ } -+ if (pRefCount && (*pRefCount == 0)) { -+ /* 'Deallocate' memory */ -+ FreeSects(hNldr, pPhaseSects, nAlloc); -+ } -+ if (pOtherRef && *pOtherRef == 0) -+ FreeSects(hNldr, pOtherSects, nOtherAlloc); -+ -+} -+ -+/* -+ * ======== findInPersistentLibArray ======== -+ */ -+static bool findInPersistentLibArray(struct NLDR_NODEOBJECT *hNldrNode, -+ struct DBLL_LibraryObj *lib) -+{ -+ s32 i = 0; -+ -+ for (i = 0; i < hNldrNode->nPersLib; i++) { -+ if (lib == hNldrNode->persLib[i].lib) -+ return true; -+ -+ } -+ -+ return false; -+} -+ -+/* -+ * ================ Find LCM (Least Common Multiplier === -+ */ -+static u32 findLcm(u32 a, u32 b) -+{ -+ u32 retVal; -+ -+ retVal = a * b / findGcf(a, b); -+ -+ return retVal; -+} -+ -+/* -+ * ================ Find GCF (Greatest Common Factor ) === -+ */ -+static u32 findGcf(u32 a, u32 b) -+{ -+ u32 c; -+ -+ /* Get the GCF (Greatest common factor between the numbers, -+ * using Euclidian Algo */ -+ while ((c = (a % b))) { -+ a = b; -+ b = c; -+ } -+ return b; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/node.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/node.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/node.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/node.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,3504 @@ -+/* -+ * node.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== node.c ======== -+ * -+ * Description: -+ * DSP/BIOS Bridge Node Manager. -+ * -+ * Public Functions: -+ * NODE_Allocate -+ * NODE_AllocMsgBuf -+ * NODE_ChangePriority -+ * NODE_Connect -+ * NODE_Create -+ * NODE_CreateMgr -+ * NODE_Delete -+ * NODE_DeleteMgr -+ * NODE_EnumNodes -+ * NODE_Exit -+ * NODE_FreeMsgBuf -+ * NODE_GetAttr -+ * NODE_GetChannelId -+ * NODE_GetMessage -+ * NODE_GetStrmMgr -+ * NODE_Init -+ * NODE_OnExit -+ * NODE_Pause -+ * NODE_PutMessage -+ * NODE_RegisterNotify -+ * NODE_Run -+ * NODE_Terminate -+ * -+ *! Revision History: -+ *! ================= -+ *! 12-Apr-2004 hp Compile IVA only for 24xx -+ *! 09-Feb-2004 vp Updated to support IVA. -+ *! 07-Apr-2003 map Eliminated references to old DLDR -+ *! 26-Mar-2003 vp Commented the call to DSP deep sleep in Node_Delete -+ *! function. -+ *! 18-Feb-2003 vp Code review updates. -+ *! 06-Feb-2003 kc Fixed FreeStream to release streams correctly. -+ *! 23-Jan-2003 map Removed call to DISP_DoCinit within Write() -+ *! 03-Jan-2003 map Only unload code after phase has executed if -+ *! overlay or split dynload phases -+ *! 18-Oct-2002 vp Ported to Linux platform. -+ *! 06-Nov-2002 map Fixed NODE_Run on NODE_PAUSED bug -+ *! 12-Oct-2002 map Fixed DeleteNode bug in NODE_Create -+ *! 11-Sep-2002 rr DeleteNode frees the memory for strmConnect and dcd obj -+ *! 29-Aug-2002 map Modified Ovly and Write to use ARM-side copy -+ *! 22-May-2002 sg Changed use of cbData for PWR calls. -+ *! 17-May-2002 jeh Removed LoadLoaderFxns(). Get address of RMS_cinit() -+ *! function. Call DISP_DoCinit() from Write(), if .cinit. -+ *! 13-May-2002 sg Added timeout to wake/sleep calls. -+ *! 02-May-2002 sg Added wake/sleep of DSP to support "nap" mode. -+ *! 18-Apr-2002 jeh Use dynamic loader if compile flag is set. -+ *! 13-Feb-2002 jeh Get uSysStackSize from DSP_NDBPROPS. -+ *! 07-Jan-2002 ag STRMMODE_ZEROCOPY(shared memory buffer swap) enabled. -+ *! 17-Dec-2001 ag STRMMODE_RDMA(DDMA) enabled. -+ *! 12-Dec-2001 ag Check for valid stream mode in NODE_Connect(). -+ *! 04-Dec-2001 jeh Check for node sufficiently connected in NODE_Create(). -+ *! 15-Nov-2001 jeh Removed DBC_Require(pNode->hXlator != NULL) from -+ *! NODE_AllocMsgBuf(), and check node type != NODE_DEVICE. -+ *! 11-Sep-2001 ag Zero-copy messaging support. -+ *! 28-Aug-2001 jeh Overlay/dynamic loader infrastructure added. Removed -+ *! NODE_GetDispatcher, excess node states. -+ *! 07-Aug-2001 jeh Removed critical section for dispatcher. -+ *! 26-Jul-2001 jeh Get ZL dll name through CFG. -+ *! 05-Jun-2001 jeh Assume DSP_STRMATTRS.uBufsize in GPP bytes. -+ *! 11-May-2001 jeh Some code review cleanup. -+ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. -+ *! 15-Dec-2000 sg Convert IALG_Fxn address from byte addr to word addr. -+ *! 04-Dec-2000 jeh Call MSG Get and Put functions. -+ *! 04-Dec-2000 ag Added SM support for node messaging. -+ *! 10-Nov-2000 rr: NODE_MIN/MAX Priority is defined in dspdefs.h. -+ *! 27-Oct-2000 jeh Added NODE_AllocMsgBuf(), NODE_FreeMsgBuf(). -+ *! 11-Oct-2000 jeh Changed NODE_EnumNodeInfo to NODE_EnumNodes. Added -+ *! NODE_CloseOrphans(). Remove NODE_RegisterNotifyAllNodes -+ *! 19-Jun-2000 jeh Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Link Driver */ -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+#ifdef DEBUG -+#include -+#include -+#endif -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+/* Static/Dynamic Loader includes */ -+#include -+#include -+ -+#ifndef RES_CLEANUP_DISABLE -+#include -+#include -+#include -+#include -+#endif -+ -+ -+#define NODE_SIGNATURE 0x45444f4e /* "EDON" */ -+#define NODEMGR_SIGNATURE 0x52474d4e /* "RGMN" */ -+ -+#define HOSTPREFIX "/host" -+#define PIPEPREFIX "/dbpipe" -+ -+#define MaxInputs(h) ((h)->dcdProps.objData.nodeObj.ndbProps.uNumInputStreams) -+#define MaxOutputs(h) ((h)->dcdProps.objData.nodeObj.ndbProps.uNumOutputStreams) -+ -+#define NODE_GetPriority(h) ((h)->nPriority) -+#define NODE_SetPriority(hNode, nPriority) ((hNode)->nPriority = nPriority) -+#define NODE_SetState(hNode, state) ((hNode)->nState = state) -+ -+#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */ -+#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */ -+ -+#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN) -+#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN) -+ -+#define MAXDEVNAMELEN 32 /* DSP_NDBPROPS.acName size */ -+#define CREATEPHASE 1 -+#define EXECUTEPHASE 2 -+#define DELETEPHASE 3 -+ -+/* Define default STRM parameters */ -+/* -+ * TBD: Put in header file, make global DSP_STRMATTRS with defaults, -+ * or make defaults configurable. -+ */ -+#define DEFAULTBUFSIZE 32 -+#define DEFAULTNBUFS 2 -+#define DEFAULTSEGID 0 -+#define DEFAULTALIGNMENT 0 -+#define DEFAULTTIMEOUT 10000 -+ -+#define RMSQUERYSERVER 0 -+#define RMSCONFIGURESERVER 1 -+#define RMSCREATENODE 2 -+#define RMSEXECUTENODE 3 -+#define RMSDELETENODE 4 -+#define RMSCHANGENODEPRIORITY 5 -+#define RMSREADMEMORY 6 -+#define RMSWRITEMEMORY 7 -+#define RMSCOPY 8 -+#define MAXTIMEOUT 2000 -+ -+#define NUMRMSFXNS 9 -+ -+#define PWR_TIMEOUT 500 /* default PWR timeout in msec */ -+ -+#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Address */ -+ -+/* -+ * ======== NODE_MGR ======== -+ */ -+struct NODE_MGR { -+ u32 dwSignature; /* For object validation */ -+ struct DEV_OBJECT *hDevObject; /* Device object */ -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ -+ struct DCD_MANAGER *hDcdMgr; /* Proc/Node data manager */ -+ struct DISP_OBJECT *hDisp; /* Node dispatcher */ -+ struct LST_LIST *nodeList; /* List of all allocated nodes */ -+ u32 uNumNodes; /* Number of nodes in nodeList */ -+ u32 uNumCreated; /* Number of nodes *created* on DSP */ -+ struct GB_TMap *pipeMap; /* Pipe connection bit map */ -+ struct GB_TMap *pipeDoneMap; /* Pipes that are half free */ -+ struct GB_TMap *chnlMap; /* Channel allocation bit map */ -+ struct GB_TMap *dmaChnlMap; /* DMA Channel allocation bit map */ -+ struct GB_TMap *zChnlMap; /* Zero-Copy Channel alloc bit map */ -+ struct NTFY_OBJECT *hNtfy; /* Manages registered notifications */ -+ struct SYNC_CSOBJECT *hSync; /* For critical sections */ -+ u32 ulFxnAddrs[NUMRMSFXNS]; /* RMS function addresses */ -+ struct MSG_MGR *hMsg; -+ -+ /* Processor properties needed by Node Dispatcher */ -+ u32 ulNumChnls; /* Total number of channels */ -+ u32 ulChnlOffset; /* Offset of chnl ids rsvd for RMS */ -+ u32 ulChnlBufSize; /* Buffer size for data to RMS */ -+ DSP_PROCFAMILY procFamily; /* eg, 5000 */ -+ DSP_PROCTYPE procType; /* eg, 5510 */ -+ u32 uDSPWordSize; /* Size of DSP word on host bytes */ -+ u32 uDSPDataMauSize; /* Size of DSP data MAU */ -+ u32 uDSPMauSize; /* Size of MAU */ -+ s32 nMinPri; /* Minimum runtime priority for node */ -+ s32 nMaxPri; /* Maximum runtime priority for node */ -+ -+ struct STRM_MGR *hStrmMgr; /* STRM manager */ -+ -+ /* Loader properties */ -+ struct NLDR_OBJECT *hNldr; /* Handle to loader */ -+ struct NLDR_FXNS nldrFxns; /* Handle to loader functions */ -+ bool fLoaderInit; /* Loader Init function succeeded? */ -+}; -+ -+/* -+ * ======== CONNECTTYPE ======== -+ */ -+enum CONNECTTYPE { -+ NOTCONNECTED = 0, -+ NODECONNECT, -+ HOSTCONNECT, -+ DEVICECONNECT, -+} ; -+ -+/* -+ * ======== STREAM ======== -+ */ -+struct STREAM { -+ enum CONNECTTYPE type; /* Type of stream connection */ -+ u32 devId; /* pipe or channel id */ -+}; -+ -+/* -+ * ======== NODE_OBJECT ======== -+ */ -+struct NODE_OBJECT { -+ struct LST_ELEM listElem; -+ u32 dwSignature; /* For object validation */ -+ struct NODE_MGR *hNodeMgr; /* The manager of this node */ -+ struct PROC_OBJECT *hProcessor; /* Back pointer to processor */ -+ struct DSP_UUID nodeId; /* Node's ID */ -+ s32 nPriority; /* Node's current priority */ -+ u32 uTimeout; /* Timeout for blocking NODE calls */ -+ u32 uHeapSize; /* Heap Size */ -+ u32 uDSPHeapVirtAddr; /* Heap Size */ -+ u32 uGPPHeapVirtAddr; /* Heap Size */ -+ enum NODE_TYPE nType; /* Type of node: message, task, etc */ -+ enum NODE_STATE nState; /* NODE_ALLOCATED, NODE_CREATED, ... */ -+ u32 uNumInputs; /* Current number of inputs */ -+ u32 uNumOutputs; /* Current number of outputs */ -+ u32 uMaxInputIndex; /* Current max input stream index */ -+ u32 uMaxOutputIndex; /* Current max output stream index */ -+ struct STREAM *inputs; /* Node's input streams */ -+ struct STREAM *outputs; /* Node's output streams */ -+ struct NODE_CREATEARGS createArgs; /* Args for node create function */ -+ NODE_ENV nodeEnv; /* Environment returned by RMS */ -+ struct DCD_GENERICOBJ dcdProps; /* Node properties from DCD */ -+ struct DSP_CBDATA *pArgs; /* Optional args to pass to node */ -+ struct NTFY_OBJECT *hNtfy; /* Manages registered notifications */ -+ char *pstrDevName; /* device name, if device node */ -+ struct SYNC_OBJECT *hSyncDone; /* Synchronize NODE_Terminate */ -+ s32 nExitStatus; /* execute function return status */ -+ -+ /* Information needed for NODE_GetAttr() */ -+ DSP_HNODE hDeviceOwner; /* If dev node, task that owns it */ -+ u32 uNumGPPInputs; /* Current # of from GPP streams */ -+ u32 uNumGPPOutputs; /* Current # of to GPP streams */ -+ /* Current stream connections */ -+ struct DSP_STREAMCONNECT *streamConnect; -+ -+ /* Message queue */ -+ struct MSG_QUEUE *hMsgQueue; -+ -+ /* These fields used for SM messaging */ -+ struct CMM_XLATOROBJECT *hXlator; /* Node's SM address translator */ -+ -+ /* Handle to pass to dynamic loader */ -+ struct NLDR_NODEOBJECT *hNldrNode; -+ bool fLoaded; /* Code is (dynamically) loaded */ -+ bool fPhaseSplit; /* Phases split in many libs or ovly */ -+ -+} ; -+ -+/* Default buffer attributes */ -+static struct DSP_BUFFERATTR NODE_DFLTBUFATTRS = { -+ 0, /* cbStruct */ -+ 1, /* uSegment */ -+ 0, /* uAlignment */ -+}; -+ -+static void DeleteNode(struct NODE_OBJECT *hNode, -+ struct PROCESS_CONTEXT *pr_ctxt); -+static void DeleteNodeMgr(struct NODE_MGR *hNodeMgr); -+static void FillStreamConnect(struct NODE_OBJECT *hNode1, -+ struct NODE_OBJECT *hNode2, u32 uStream1, -+ u32 uStream2); -+static void FillStreamDef(struct NODE_OBJECT *hNode, -+ struct NODE_STRMDEF *pstrmDef, -+ struct DSP_STRMATTR *pAttrs); -+static void FreeStream(struct NODE_MGR *hNodeMgr, struct STREAM stream); -+static DSP_STATUS GetFxnAddress(struct NODE_OBJECT *hNode, u32 *pulFxnAddr, -+ u32 uPhase); -+static DSP_STATUS GetNodeProps(struct DCD_MANAGER *hDcdMgr, -+ struct NODE_OBJECT *hNode, -+ CONST struct DSP_UUID *pNodeId, -+ struct DCD_GENERICOBJ *pdcdProps); -+static DSP_STATUS GetProcProps(struct NODE_MGR *hNodeMgr, -+ struct DEV_OBJECT *hDevObject); -+static DSP_STATUS GetRMSFxns(struct NODE_MGR *hNodeMgr); -+static u32 Ovly(void *pPrivRef, u32 ulDspRunAddr, u32 ulDspLoadAddr, -+ u32 ulNumBytes, u32 nMemSpace); -+static u32 Write(void *pPrivRef, u32 ulDspAddr, void *pBuf, -+ u32 ulNumBytes, u32 nMemSpace); -+ -+#if GT_TRACE -+static struct GT_Mask NODE_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+#ifdef DSP_DMM_DEBUG -+extern u32 DMM_MemMapDump(struct DMM_OBJECT *hDmmMgr); -+#endif -+ -+static u32 cRefs; /* module reference count */ -+ -+/* Dynamic loader functions. */ -+static struct NLDR_FXNS nldrFxns = { -+ NLDR_Allocate, -+ NLDR_Create, -+ NLDR_Delete, -+ NLDR_Exit, -+ NLDR_Free, -+ NLDR_GetFxnAddr, -+ NLDR_Init, -+ NLDR_Load, -+ NLDR_Unload, -+}; -+ -+enum NODE_STATE NODE_GetState(HANDLE hNode) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ if (!MEM_IsValidHandle(pNode, NODE_SIGNATURE)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_GetState:hNode 0x%x\n", pNode); -+ return -1; -+ } else -+ return pNode->nState; -+ -+} -+ -+/* -+ * ======== NODE_Allocate ======== -+ * Purpose: -+ * Allocate GPP resources to manage a node on the DSP. -+ */ -+DSP_STATUS NODE_Allocate(struct PROC_OBJECT *hProcessor, -+ IN CONST struct DSP_UUID *pNodeId, -+ OPTIONAL IN CONST struct DSP_CBDATA *pArgs, -+ OPTIONAL IN CONST struct DSP_NODEATTRIN *pAttrIn, -+ OUT struct NODE_OBJECT **phNode, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ struct NODE_MGR *hNodeMgr; -+ struct DEV_OBJECT *hDevObject; -+ struct NODE_OBJECT *pNode = NULL; -+ enum NODE_TYPE nodeType = NODE_TASK; -+ struct NODE_MSGARGS *pmsgArgs; -+ struct NODE_TASKARGS *ptaskArgs; -+ u32 uNumStreams; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status = DSP_SOK; -+ struct CMM_OBJECT *hCmmMgr = NULL; /* Shared memory manager hndl */ -+ u32 procId; -+ char *label; -+ u32 pulValue; -+ u32 dynextBase; -+ u32 offSet = 0; -+ u32 ulStackSegAddr, ulStackSegVal; -+ u32 ulGppMemBase; -+ struct CFG_HOSTRES hostRes; -+ u32 pMappedAddr = 0; -+ u32 mapAttrs = 0x0; -+ struct DSP_PROCESSORSTATE procStatus; -+#ifdef DSP_DMM_DEBUG -+ struct DMM_OBJECT *hDmmMgr; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+#endif -+ -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE nodeRes; -+#endif -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(hProcessor != NULL); -+ DBC_Require(phNode != NULL); -+ DBC_Require(pNodeId != NULL); -+ -+ GT_5trace(NODE_debugMask, GT_ENTER, "NODE_Allocate: \thProcessor: " -+ "0x%x\tpNodeId: 0x%x\tpArgs: 0x%x\tpAttrIn: " -+ "0x%x\tphNode: 0x%x\n", hProcessor, pNodeId, pArgs, pAttrIn, -+ phNode); -+ -+ *phNode = NULL; -+ -+ status = PROC_GetProcessorId(hProcessor, &procId); -+ -+ status = PROC_GetDevObject(hProcessor, &hDevObject); -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetNodeManager(hDevObject, &hNodeMgr); -+ if (hNodeMgr == NULL) -+ status = DSP_EFAIL; -+ -+ } -+ if (procId != DSP_UNIT) -+ goto func_cont; -+ -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* If processor is in error state then don't attempt -+ to send the message */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: proc Status 0x%x\n", -+ procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ -+ /* Assuming that 0 is not a valid function address */ -+ if (hNodeMgr->ulFxnAddrs[0] == 0) { -+ /* No RMS on target - we currently can't handle this */ -+ GT_0trace(NODE_debugMask, GT_5CLASS, "No RMS functions in base " -+ "image. Node allocation fails.\n"); -+ status = DSP_EFAIL; -+ } else { -+ /* Validate pAttrIn fields, if non-NULL */ -+ if (pAttrIn) { -+ /* Check if pAttrIn->iPriority is within range */ -+ if (pAttrIn->iPriority < hNodeMgr->nMinPri || -+ pAttrIn->iPriority > hNodeMgr->nMaxPri) -+ status = DSP_ERANGE; -+ } -+ } -+func_cont: -+ /* Allocate node object and fill in */ -+ if (DSP_FAILED(status)) -+ goto func_cont2; -+ -+ MEM_AllocObject(pNode, struct NODE_OBJECT, NODE_SIGNATURE); -+ if (pNode == NULL) { -+ status = DSP_EMEMORY; -+ goto func_cont1; -+ } -+ pNode->hNodeMgr = hNodeMgr; -+ /* This critical section protects GetNodeProps */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (procId != DSP_UNIT) -+ goto func_cont3; -+ -+ /* Get DSP_NDBPROPS from node database */ -+ status = GetNodeProps(hNodeMgr->hDcdMgr, pNode, pNodeId, -+ &(pNode->dcdProps)); -+ if (DSP_FAILED(status)) -+ goto func_cont3; -+ -+ pNode->nodeId = *pNodeId; -+ pNode->hProcessor = hProcessor; -+ pNode->nType = pNode->dcdProps.objData.nodeObj.ndbProps.uNodeType; -+ pNode->uTimeout = pNode->dcdProps.objData.nodeObj.ndbProps.uTimeout; -+ pNode->nPriority = pNode->dcdProps.objData.nodeObj.ndbProps.iPriority; -+ -+ /* Currently only C64 DSP builds support Node Dynamic * heaps */ -+ /* Allocate memory for node heap */ -+ pNode->createArgs.asa.taskArgs.uHeapSize = 0; -+ pNode->createArgs.asa.taskArgs.uDSPHeapAddr = 0; -+ pNode->createArgs.asa.taskArgs.uDSPHeapResAddr = 0; -+ pNode->createArgs.asa.taskArgs.uGPPHeapAddr = 0; -+ if (!pAttrIn) -+ goto func_cont3; -+ -+ /* Check if we have a user allocated node heap */ -+ if (!(pAttrIn->pGPPVirtAddr)) -+ goto func_cont3; -+ -+ /* check for page aligned Heap size */ -+ if (((pAttrIn->uHeapSize) & (PG_SIZE_4K - 1))) { -+ GT_1trace(NODE_debugMask, GT_7CLASS, -+ "NODE_Allocate: node heap page size" -+ " not aligned to 4K page, size=0x%x \n", -+ pAttrIn->uHeapSize); -+ status = DSP_EINVALIDARG; -+ } else { -+ pNode->createArgs.asa.taskArgs.uHeapSize = pAttrIn->uHeapSize; -+ pNode->createArgs.asa.taskArgs.uGPPHeapAddr = -+ (u32)pAttrIn->pGPPVirtAddr; -+ } -+ if (DSP_FAILED(status)) -+ goto func_cont3; -+ -+ status = PROC_ReserveMemory(hProcessor, -+ pNode->createArgs.asa.taskArgs.uHeapSize + PAGE_SIZE, -+ (void **)&(pNode->createArgs.asa.taskArgs. -+ uDSPHeapResAddr)); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate:Failed to reserve " -+ "memory for Heap: 0x%x\n", status); -+ } else { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: DSPProcessor_Reserve" -+ " Memory successful: 0x%x\n", status); -+ } -+#ifdef DSP_DMM_DEBUG -+ status = DMM_GetHandle(pProcObject, &hDmmMgr); -+ if (DSP_SUCCEEDED(status)) -+ DMM_MemMapDump(hDmmMgr); -+#endif -+ if (DSP_FAILED(status)) -+ goto func_cont3; -+ -+ mapAttrs |= DSP_MAPLITTLEENDIAN; -+ mapAttrs |= DSP_MAPELEMSIZE32; -+ mapAttrs |= DSP_MAPVIRTUALADDR; -+ status = PROC_Map(hProcessor, (void *)pAttrIn->pGPPVirtAddr, -+ pNode->createArgs.asa.taskArgs.uHeapSize, -+ (void *)pNode->createArgs.asa.taskArgs.uDSPHeapResAddr, -+ (void **)&pMappedAddr, mapAttrs, pr_ctxt); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: Failed to map memory" -+ " for Heap: 0x%x\n", status); -+ } else { -+ pNode->createArgs.asa.taskArgs.uDSPHeapAddr = -+ (u32) pMappedAddr; -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate:DSPProcessor_Map" -+ " successful: 0x%x\n", status); -+ } -+ -+func_cont3: -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+func_cont1: -+ if (pAttrIn != NULL) { -+ /* Overrides of NBD properties */ -+ pNode->uTimeout = pAttrIn->uTimeout; -+ pNode->nPriority = pAttrIn->iPriority; -+ } -+func_cont2: -+ /* Create object to manage notifications */ -+ if (DSP_SUCCEEDED(status)) -+ status = NTFY_Create(&pNode->hNtfy); -+ -+ if (DSP_SUCCEEDED(status)) { -+ nodeType = NODE_GetType(pNode); -+ /* Allocate DSP_STREAMCONNECT array for device, task, and -+ * dais socket nodes. */ -+ if (nodeType != NODE_MESSAGE) { -+ uNumStreams = MaxInputs(pNode) + MaxOutputs(pNode); -+ pNode->streamConnect = MEM_Calloc(uNumStreams * -+ sizeof(struct DSP_STREAMCONNECT), -+ MEM_PAGED); -+ if (uNumStreams > 0 && pNode->streamConnect == NULL) -+ status = DSP_EMEMORY; -+ -+ } -+ if (DSP_SUCCEEDED(status) && (nodeType == NODE_TASK || -+ nodeType == NODE_DAISSOCKET)) { -+ /* Allocate arrays for maintainig stream connections */ -+ pNode->inputs = -+ MEM_Calloc(MaxInputs(pNode) * -+ sizeof(struct STREAM), MEM_PAGED); -+ pNode->outputs = -+ MEM_Calloc(MaxOutputs(pNode) * -+ sizeof(struct STREAM), MEM_PAGED); -+ ptaskArgs = &(pNode->createArgs.asa.taskArgs); -+ ptaskArgs->strmInDef = -+ MEM_Calloc(MaxInputs(pNode) * -+ sizeof(struct NODE_STRMDEF), -+ MEM_PAGED); -+ ptaskArgs->strmOutDef = -+ MEM_Calloc(MaxOutputs(pNode) * -+ sizeof(struct NODE_STRMDEF), -+ MEM_PAGED); -+ if ((MaxInputs(pNode) > 0 && (pNode->inputs == NULL || -+ ptaskArgs->strmInDef == NULL)) || -+ (MaxOutputs(pNode) > 0 && (pNode->outputs == NULL || -+ ptaskArgs->strmOutDef == NULL))) -+ status = DSP_EMEMORY; -+ } -+ } -+ if (DSP_SUCCEEDED(status) && (nodeType != NODE_DEVICE)) { -+ /* Create an event that will be posted when RMS_EXIT is -+ * received. */ -+ status = SYNC_OpenEvent(&pNode->hSyncDone, NULL); -+ if (DSP_SUCCEEDED(status)) { -+ /*Get the shared mem mgr for this nodes dev object */ -+ status = CMM_GetHandle(hProcessor, &hCmmMgr); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: Failed to" -+ " get CMM Mgr handle: 0x%x\n", status); -+ } else { -+ /* Allocate a SM addr translator for this node -+ * w/ deflt attr */ -+ status = CMM_XlatorCreate(&pNode->hXlator, -+ hCmmMgr, NULL); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: Failed" -+ " to create SM translator: 0x%x\n", -+ status); -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Fill in message args */ -+ if ((pArgs != NULL) && (pArgs->cbData > 0)) { -+ pmsgArgs = &(pNode->createArgs.asa.msgArgs); -+ pmsgArgs->pData = MEM_Calloc(pArgs->cbData, -+ MEM_PAGED); -+ if (pmsgArgs->pData == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ pmsgArgs->uArgLength = pArgs->cbData; -+ memcpy(pmsgArgs->pData, pArgs->cData, -+ pArgs->cbData); -+ } -+ } -+ } -+ } -+ -+ if (DSP_SUCCEEDED(status) && nodeType != NODE_DEVICE) { -+ /* Create a message queue for this node */ -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnMsgCreateQueue)(hNodeMgr->hMsg, -+ &pNode->hMsgQueue, 0, -+ pNode->createArgs.asa.msgArgs.uMaxMessages, -+ pNode); -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Create object for dynamic loading */ -+ -+ status = hNodeMgr->nldrFxns.pfnAllocate(hNodeMgr->hNldr, -+ (void *) pNode, -+ &pNode->dcdProps.objData.nodeObj, -+ &pNode->hNldrNode, -+ &pNode->fPhaseSplit); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: Failed to " -+ "allocate NLDR node: 0x%x\n", status); -+ } -+ } -+ -+ /* Comapare value read from Node Properties and check if it is same as -+ * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate -+ * GPP Address, Read the value in that address and override the -+ * uStackSeg value in task args */ -+ if (DSP_SUCCEEDED(status) && -+ (char *)pNode->dcdProps.objData.nodeObj.ndbProps.uStackSegName != -+ NULL) { -+ label = MEM_Calloc(sizeof(STACKSEGLABEL)+1, MEM_PAGED); -+ strncpy(label, STACKSEGLABEL, sizeof(STACKSEGLABEL)+1); -+ -+ if (strcmp((char *)pNode->dcdProps.objData.nodeObj. -+ ndbProps.uStackSegName, label) == 0) { -+ status = hNodeMgr->nldrFxns.pfnGetFxnAddr(pNode-> -+ hNldrNode, "DYNEXT_BEG", &dynextBase); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: Failed to get Address for " -+ "DYNEXT_BEG: 0x%x\n", status); -+ } -+ -+ status = hNodeMgr->nldrFxns.pfnGetFxnAddr(pNode-> -+ hNldrNode, "L1DSRAM_HEAP", &pulValue); -+ -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: Failed to get Address for " -+ "L1DSRAM_HEAP: 0x%x\n", status); -+ } -+ -+ status = CFG_GetHostResources((struct CFG_DEVNODE *) -+ DRV_GetFirstDevExtension(), &hostRes); -+ -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Allocate: Failed to get host resource " -+ "0x%x\n", status); -+ } -+ -+ ulGppMemBase = (u32)hostRes.dwMemBase[1]; -+ offSet = pulValue - dynextBase; -+ ulStackSegAddr = ulGppMemBase + offSet; -+ ulStackSegVal = (u32)*((REG_UWORD32 *) -+ ((u32)(ulStackSegAddr))); -+ -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "StackSegVal =0x%x\n", ulStackSegVal); -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "ulStackSegAddr = 0x%x\n", ulStackSegAddr); -+ -+ pNode->createArgs.asa.taskArgs.uStackSeg = -+ ulStackSegVal; -+ -+ } -+ -+ if (label) -+ MEM_Free(label); -+ -+ } -+ -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Add the node to the node manager's list of allocated -+ * nodes. */ -+ LST_InitElem((struct LST_ELEM *)pNode); -+ NODE_SetState(pNode, NODE_ALLOCATED); -+ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ -+ if (DSP_SUCCEEDED(status)) { -+ LST_PutTail(hNodeMgr->nodeList, -+ (struct LST_ELEM *) pNode); -+ ++(hNodeMgr->uNumNodes); -+ } -+ -+ /* Exit critical section */ -+ (void) SYNC_LeaveCS(hNodeMgr->hSync); -+ -+ /* Preset this to assume phases are split -+ * (for overlay and dll) */ -+ pNode->fPhaseSplit = true; -+ -+ if (DSP_SUCCEEDED(status)) -+ *phNode = pNode; -+ -+ -+ /* Notify all clients registered for DSP_NODESTATECHANGE. */ -+ PROC_NotifyAllClients(hProcessor, DSP_NODESTATECHANGE); -+ } else { -+ /* Cleanup */ -+ if (pNode) -+ DeleteNode(pNode, pr_ctxt); -+ -+ } -+ -+#ifndef RES_CLEANUP_DISABLE -+ if (DSP_SUCCEEDED(status)) { -+ DRV_InsertNodeResElement(*phNode, &nodeRes, pr_ctxt); -+ DRV_ProcNodeUpdateHeapStatus(nodeRes, true); -+ DRV_ProcNodeUpdateStatus(nodeRes, true); -+ } -+#endif -+ DBC_Ensure((DSP_FAILED(status) && (*phNode == NULL)) || -+ (DSP_SUCCEEDED(status) -+ && MEM_IsValidHandle((*phNode), NODE_SIGNATURE))); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_AllocMsgBuf ======== -+ * Purpose: -+ * Allocates buffer for zero copy messaging. -+ */ -+DBAPI NODE_AllocMsgBuf(struct NODE_OBJECT *hNode, u32 uSize, -+ OPTIONAL IN OUT struct DSP_BUFFERATTR *pAttr, -+ OUT u8 **pBuffer) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ DSP_STATUS status = DSP_SOK; -+ bool bVirtAddr = false; -+ bool bSetInfo; -+ u32 procId; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pBuffer != NULL); -+ -+ DBC_Require(uSize > 0); -+ -+ GT_4trace(NODE_debugMask, GT_ENTER, -+ "NODE_AllocMsgBuf: hNode: 0x%x\tuSize:" -+ " 0x%x\tpAttr: 0x%x\tpBuffer: %d\n", pNode, uSize, pAttr, -+ pBuffer); -+ -+ if (!MEM_IsValidHandle(pNode, NODE_SIGNATURE)) -+ status = DSP_EHANDLE; -+ else if (NODE_GetType(pNode) == NODE_DEVICE) -+ status = DSP_ENODETYPE; -+ -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (pAttr == NULL) -+ pAttr = &NODE_DFLTBUFATTRS; /* set defaults */ -+ -+ status = PROC_GetProcessorId(pNode->hProcessor, &procId); -+ if (procId != DSP_UNIT) { -+ DBC_Assert(NULL); -+ goto func_end; -+ } -+ /* If segment ID includes MEM_SETVIRTUALSEGID then pBuffer is a -+ * virt address, so set this info in this node's translator -+ * object for future ref. If MEM_GETVIRTUALSEGID then retrieve -+ * virtual address from node's translator. */ -+ if ((pAttr->uSegment & MEM_SETVIRTUALSEGID) || -+ (pAttr->uSegment & MEM_GETVIRTUALSEGID)) { -+ bVirtAddr = true; -+ bSetInfo = (pAttr->uSegment & MEM_SETVIRTUALSEGID) ? -+ true : false; -+ pAttr->uSegment &= ~MEM_MASKVIRTUALSEGID; /* clear mask bits */ -+ /* Set/get this node's translators virtual address base/size */ -+ status = CMM_XlatorInfo(pNode->hXlator, pBuffer, uSize, -+ pAttr->uSegment, bSetInfo); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_7CLASS, -+ "NODE_AllocMsgBuf " -+ "failed: 0x%lx\n", status); -+ } -+ } -+ if (DSP_SUCCEEDED(status) && (!bVirtAddr)) { -+ if (pAttr->uSegment != 1) { -+ /* Node supports single SM segment only. */ -+ status = DSP_EBADSEGID; -+ } -+ /* Arbitrary SM buffer alignment not supported for host side -+ * allocs, but guaranteed for the following alignment -+ * values. */ -+ switch (pAttr->uAlignment) { -+ case 0: -+ case 1: -+ case 2: -+ case 4: -+ break; -+ default: -+ /* alignment value not suportted */ -+ status = DSP_EALIGNMENT; -+ break; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* allocate physical buffer from segId in node's -+ * translator */ -+ (void)CMM_XlatorAllocBuf(pNode->hXlator, pBuffer, -+ uSize); -+ if (*pBuffer == NULL) { -+ GT_0trace(NODE_debugMask, GT_7CLASS, -+ "NODE_AllocMsgBuf: " -+ "ERROR: Out of shared memory.\n"); -+ status = DSP_EMEMORY; -+ } -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_ChangePriority ======== -+ * Purpose: -+ * Change the priority of a node in the allocated state, or that is -+ * currently running or paused on the target. -+ */ -+DSP_STATUS NODE_ChangePriority(struct NODE_OBJECT *hNode, s32 nPriority) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ struct NODE_MGR *hNodeMgr = NULL; -+ enum NODE_TYPE nodeType; -+ enum NODE_STATE state; -+ DSP_STATUS status = DSP_SOK; -+ u32 procId; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_2trace(NODE_debugMask, GT_ENTER, "NODE_ChangePriority: " -+ "hNode: 0x%x\tnPriority: %d\n", hNode, nPriority); -+ -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ GT_1trace(NODE_debugMask, GT_7CLASS, -+ "Invalid NODE Handle: 0x%x\n", hNode); -+ status = DSP_EHANDLE; -+ } else { -+ hNodeMgr = hNode->hNodeMgr; -+ nodeType = NODE_GetType(hNode); -+ if (nodeType != NODE_TASK && nodeType != NODE_DAISSOCKET) -+ status = DSP_ENODETYPE; -+ else if (nPriority < hNodeMgr->nMinPri || -+ nPriority > hNodeMgr->nMaxPri) -+ status = DSP_ERANGE; -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* Enter critical section */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ state = NODE_GetState(hNode); -+ if (state == NODE_ALLOCATED || state == NODE_PAUSED) { -+ NODE_SetPriority(hNode, nPriority); -+ } else { -+ if (state != NODE_RUNNING) { -+ status = DSP_EWRONGSTATE; -+ goto func_cont; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = PROC_GetProcessorId(pNode->hProcessor, -+ &procId); -+ if (procId == DSP_UNIT) { -+ status = DISP_NodeChangePriority(hNodeMgr-> -+ hDisp, hNode, -+ hNodeMgr->ulFxnAddrs[RMSCHANGENODEPRIORITY], -+ hNode->nodeEnv, nPriority); -+ } -+ if (DSP_SUCCEEDED(status)) -+ NODE_SetPriority(hNode, nPriority); -+ -+ } -+ } -+func_cont: -+ /* Leave critical section */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_Connect ======== -+ * Purpose: -+ * Connect two nodes on the DSP, or a node on the DSP to the GPP. -+ */ -+DSP_STATUS NODE_Connect(struct NODE_OBJECT *hNode1, u32 uStream1, -+ struct NODE_OBJECT *hNode2, -+ u32 uStream2, OPTIONAL IN struct DSP_STRMATTR *pAttrs, -+ OPTIONAL IN struct DSP_CBDATA *pConnParam) -+{ -+ struct NODE_MGR *hNodeMgr; -+ char *pstrDevName = NULL; -+ enum NODE_TYPE node1Type = NODE_TASK; -+ enum NODE_TYPE node2Type = NODE_TASK; -+ struct NODE_STRMDEF *pstrmDef; -+ struct NODE_STRMDEF *pInput = NULL; -+ struct NODE_STRMDEF *pOutput = NULL; -+ struct NODE_OBJECT *hDevNode; -+ struct NODE_OBJECT *hNode; -+ struct STREAM *pStream; -+ GB_BitNum pipeId = GB_NOBITS; -+ GB_BitNum chnlId = GB_NOBITS; -+ CHNL_MODE uMode; -+ u32 dwLength; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(cRefs > 0); -+ GT_5trace(NODE_debugMask, GT_ENTER, -+ "NODE_Connect: hNode1: 0x%x\tuStream1:" -+ " %d\thNode2: 0x%x\tuStream2: %d\tpAttrs: 0x%x\n", hNode1, -+ uStream1, hNode2, uStream2, pAttrs); -+ if (DSP_SUCCEEDED(status)) { -+ if ((hNode1 != (struct NODE_OBJECT *) DSP_HGPPNODE && -+ !MEM_IsValidHandle(hNode1, NODE_SIGNATURE)) || -+ (hNode2 != (struct NODE_OBJECT *) DSP_HGPPNODE && -+ !MEM_IsValidHandle(hNode2, NODE_SIGNATURE))) -+ status = DSP_EHANDLE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* The two nodes must be on the same processor */ -+ if (hNode1 != (struct NODE_OBJECT *)DSP_HGPPNODE && -+ hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE && -+ hNode1->hNodeMgr != hNode2->hNodeMgr) -+ status = DSP_EFAIL; -+ /* Cannot connect a node to itself */ -+ if (hNode1 == hNode2) -+ status = DSP_EFAIL; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* NODE_GetType() will return NODE_GPP if hNode = -+ * DSP_HGPPNODE. */ -+ node1Type = NODE_GetType(hNode1); -+ node2Type = NODE_GetType(hNode2); -+ /* Check stream indices ranges */ -+ if ((node1Type != NODE_GPP && node1Type != NODE_DEVICE && -+ uStream1 >= MaxOutputs(hNode1)) || (node2Type != NODE_GPP && -+ node2Type != NODE_DEVICE && uStream2 >= MaxInputs(hNode2))) -+ status = DSP_EVALUE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* -+ * Only the following types of connections are allowed: -+ * task/dais socket < == > task/dais socket -+ * task/dais socket < == > device -+ * task/dais socket < == > GPP -+ * -+ * ie, no message nodes, and at least one task or dais -+ * socket node. -+ */ -+ if (node1Type == NODE_MESSAGE || node2Type == NODE_MESSAGE || -+ (node1Type != NODE_TASK && node1Type != NODE_DAISSOCKET && -+ node2Type != NODE_TASK && node2Type != NODE_DAISSOCKET)) -+ status = DSP_EFAIL; -+ } -+ /* -+ * Check stream mode. Default is STRMMODE_PROCCOPY. -+ */ -+ if (DSP_SUCCEEDED(status) && pAttrs) { -+ if (pAttrs->lMode != STRMMODE_PROCCOPY) -+ status = DSP_ESTRMMODE; /* illegal stream mode */ -+ -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (node1Type != NODE_GPP) { -+ hNodeMgr = hNode1->hNodeMgr; -+ } else { -+ DBC_Assert(hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE); -+ hNodeMgr = hNode2->hNodeMgr; -+ } -+ /* Enter critical section */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ /* Nodes must be in the allocated state */ -+ if (node1Type != NODE_GPP && NODE_GetState(hNode1) != NODE_ALLOCATED) -+ status = DSP_EWRONGSTATE; -+ -+ if (node2Type != NODE_GPP && NODE_GetState(hNode2) != NODE_ALLOCATED) -+ status = DSP_EWRONGSTATE; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Check that stream indices for task and dais socket nodes -+ * are not already be used. (Device nodes checked later) */ -+ if (node1Type == NODE_TASK || node1Type == NODE_DAISSOCKET) { -+ pOutput = &(hNode1->createArgs.asa.taskArgs. -+ strmOutDef[uStream1]); -+ if (pOutput->szDevice != NULL) -+ status = DSP_EALREADYCONNECTED; -+ -+ } -+ if (node2Type == NODE_TASK || node2Type == NODE_DAISSOCKET) { -+ pInput = &(hNode2->createArgs.asa.taskArgs. -+ strmInDef[uStream2]); -+ if (pInput->szDevice != NULL) -+ status = DSP_EALREADYCONNECTED; -+ -+ } -+ } -+ /* Connecting two task nodes? */ -+ if (DSP_SUCCEEDED(status) && ((node1Type == NODE_TASK || -+ node1Type == NODE_DAISSOCKET) && (node2Type == NODE_TASK || -+ node2Type == NODE_DAISSOCKET))) { -+ /* Find available pipe */ -+ pipeId = GB_findandset(hNodeMgr->pipeMap); -+ if (pipeId == GB_NOBITS) { -+ status = DSP_ENOMORECONNECTIONS; -+ } else { -+ hNode1->outputs[uStream1].type = NODECONNECT; -+ hNode2->inputs[uStream2].type = NODECONNECT; -+ hNode1->outputs[uStream1].devId = pipeId; -+ hNode2->inputs[uStream2].devId = pipeId; -+ pOutput->szDevice = MEM_Calloc(PIPENAMELEN + 1, -+ MEM_PAGED); -+ pInput->szDevice = MEM_Calloc(PIPENAMELEN + 1, -+ MEM_PAGED); -+ if (pOutput->szDevice == NULL || -+ pInput->szDevice == NULL) { -+ /* Undo the connection */ -+ if (pOutput->szDevice) -+ MEM_Free(pOutput->szDevice); -+ -+ if (pInput->szDevice) -+ MEM_Free(pInput->szDevice); -+ -+ pOutput->szDevice = NULL; -+ pInput->szDevice = NULL; -+ GB_clear(hNodeMgr->pipeMap, pipeId); -+ status = DSP_EMEMORY; -+ } else { -+ /* Copy "/dbpipe" name to device names */ -+ sprintf(pOutput->szDevice, "%s%d", -+ PIPEPREFIX, pipeId); -+ strcpy(pInput->szDevice, pOutput->szDevice); -+ } -+ } -+ } -+ /* Connecting task node to host? */ -+ if (DSP_SUCCEEDED(status) && (node1Type == NODE_GPP || -+ node2Type == NODE_GPP)) { -+ if (node1Type == NODE_GPP) { -+ uMode = CHNL_MODETODSP; -+ } else { -+ DBC_Assert(node2Type == NODE_GPP); -+ uMode = CHNL_MODEFROMDSP; -+ } -+ /* Reserve a channel id. We need to put the name "/host" -+ * in the node's createArgs, but the host -+ * side channel will not be opened until DSPStream_Open is -+ * called for this node. */ -+ if (pAttrs) { -+ if (pAttrs->lMode == STRMMODE_RDMA) { -+ chnlId = GB_findandset(hNodeMgr->dmaChnlMap); -+ /* dma chans are 2nd transport chnl set -+ * ids(e.g. 16-31)*/ -+ (chnlId != GB_NOBITS) ? -+ (chnlId = chnlId + hNodeMgr->ulNumChnls) : -+ chnlId; -+ } else if (pAttrs->lMode == STRMMODE_ZEROCOPY) { -+ chnlId = GB_findandset(hNodeMgr->zChnlMap); -+ /* zero-copy chans are 3nd transport set -+ * (e.g. 32-47) */ -+ (chnlId != GB_NOBITS) ? (chnlId = chnlId + -+ (2 * hNodeMgr->ulNumChnls)) : chnlId; -+ } else { /* must be PROCCOPY */ -+ DBC_Assert(pAttrs->lMode == STRMMODE_PROCCOPY); -+ chnlId = GB_findandset(hNodeMgr->chnlMap); -+ /* e.g. 0-15 */ -+ } -+ } else { -+ /* default to PROCCOPY */ -+ chnlId = GB_findandset(hNodeMgr->chnlMap); -+ } -+ if (chnlId == GB_NOBITS) { -+ status = DSP_ENOMORECONNECTIONS; -+ goto func_cont2; -+ } -+ pstrDevName = MEM_Calloc(HOSTNAMELEN + 1, MEM_PAGED); -+ if (pstrDevName != NULL) -+ goto func_cont2; -+ -+ if (pAttrs) { -+ if (pAttrs->lMode == STRMMODE_RDMA) { -+ GB_clear(hNodeMgr->dmaChnlMap, chnlId - -+ hNodeMgr->ulNumChnls); -+ } else if (pAttrs->lMode == STRMMODE_ZEROCOPY) { -+ GB_clear(hNodeMgr->zChnlMap, chnlId - -+ (2*hNodeMgr->ulNumChnls)); -+ } else { -+ DBC_Assert(pAttrs->lMode == STRMMODE_PROCCOPY); -+ GB_clear(hNodeMgr->chnlMap, chnlId); -+ } -+ } else { -+ GB_clear(hNodeMgr->chnlMap, chnlId); -+ } -+ status = DSP_EMEMORY; -+func_cont2: -+ if (DSP_SUCCEEDED(status)) { -+ if (hNode1 == (struct NODE_OBJECT *) DSP_HGPPNODE) { -+ hNode2->inputs[uStream2].type = HOSTCONNECT; -+ hNode2->inputs[uStream2].devId = chnlId; -+ pInput->szDevice = pstrDevName; -+ } else { -+ hNode1->outputs[uStream1].type = HOSTCONNECT; -+ hNode1->outputs[uStream1].devId = chnlId; -+ pOutput->szDevice = pstrDevName; -+ } -+ sprintf(pstrDevName, "%s%d", HOSTPREFIX, chnlId); -+ } -+ } -+ /* Connecting task node to device node? */ -+ if (DSP_SUCCEEDED(status) && ((node1Type == NODE_DEVICE) || -+ (node2Type == NODE_DEVICE))) { -+ if (node2Type == NODE_DEVICE) { -+ /* node1 == > device */ -+ hDevNode = hNode2; -+ hNode = hNode1; -+ pStream = &(hNode1->outputs[uStream1]); -+ pstrmDef = pOutput; -+ } else { -+ /* device == > node2 */ -+ hDevNode = hNode1; -+ hNode = hNode2; -+ pStream = &(hNode2->inputs[uStream2]); -+ pstrmDef = pInput; -+ } -+ /* Set up create args */ -+ pStream->type = DEVICECONNECT; -+ dwLength = strlen(hDevNode->pstrDevName); -+ if (pConnParam != NULL) { -+ pstrmDef->szDevice = MEM_Calloc(dwLength + 1 + -+ (u32) pConnParam->cbData, -+ MEM_PAGED); -+ } else { -+ pstrmDef->szDevice = MEM_Calloc(dwLength + 1, -+ MEM_PAGED); -+ } -+ if (pstrmDef->szDevice == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ /* Copy device name */ -+ strncpy(pstrmDef->szDevice, hDevNode->pstrDevName, -+ dwLength); -+ if (pConnParam != NULL) { -+ strncat(pstrmDef->szDevice, -+ (char *)pConnParam->cData, -+ (u32)pConnParam->cbData); -+ } -+ hDevNode->hDeviceOwner = hNode; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Fill in create args */ -+ if (node1Type == NODE_TASK || node1Type == NODE_DAISSOCKET) { -+ hNode1->createArgs.asa.taskArgs.uNumOutputs++; -+ FillStreamDef(hNode1, pOutput, pAttrs); -+ } -+ if (node2Type == NODE_TASK || node2Type == NODE_DAISSOCKET) { -+ hNode2->createArgs.asa.taskArgs.uNumInputs++; -+ FillStreamDef(hNode2, pInput, pAttrs); -+ } -+ /* Update hNode1 and hNode2 streamConnect */ -+ if (node1Type != NODE_GPP && node1Type != NODE_DEVICE) { -+ hNode1->uNumOutputs++; -+ if (uStream1 > hNode1->uMaxOutputIndex) -+ hNode1->uMaxOutputIndex = uStream1; -+ -+ } -+ if (node2Type != NODE_GPP && node2Type != NODE_DEVICE) { -+ hNode2->uNumInputs++; -+ if (uStream2 > hNode2->uMaxInputIndex) -+ hNode2->uMaxInputIndex = uStream2; -+ -+ } -+ FillStreamConnect(hNode1, hNode2, uStream1, uStream2); -+ } -+func_cont: -+ /* end of SYNC_EnterCS */ -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_Create ======== -+ * Purpose: -+ * Create a node on the DSP by remotely calling the node's create function. -+ */ -+DSP_STATUS NODE_Create(struct NODE_OBJECT *hNode) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ struct NODE_MGR *hNodeMgr; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ u32 ulCreateFxn; -+ enum NODE_TYPE nodeType; -+ DSP_STATUS status = DSP_SOK; -+ DSP_STATUS status1 = DSP_SOK; -+ bool bJustWokeDSP = false; -+ struct DSP_CBDATA cbData; -+ u32 procId = 255; -+ struct DSP_PROCESSORSTATE procStatus; -+ struct PROC_OBJECT *hProcessor; -+#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) -+ struct dspbridge_platform_data *pdata = -+ omap_dspbridge_dev->dev.platform_data; -+#endif -+ -+ DBC_Require(cRefs > 0); -+ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Create: hNode: 0x%x\n", -+ hNode); -+ if (!MEM_IsValidHandle(pNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ hProcessor = hNode->hProcessor; -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* If processor is in error state then don't attempt to create -+ new node */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_Create:" -+ " proc Status 0x%x\n", procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ /* create struct DSP_CBDATA struct for PWR calls */ -+ cbData.cbData = PWR_TIMEOUT; -+ nodeType = NODE_GetType(hNode); -+ hNodeMgr = hNode->hNodeMgr; -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ /* Get access to node dispatcher */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ /* Check node state */ -+ if (NODE_GetState(hNode) != NODE_ALLOCATED) -+ status = DSP_EWRONGSTATE; -+ -+ if (DSP_SUCCEEDED(status)) -+ status = PROC_GetProcessorId(pNode->hProcessor, &procId); -+ -+ if (DSP_FAILED(status)) -+ goto func_cont2; -+ -+ if (procId != DSP_UNIT) -+ goto func_cont2; -+ -+ /* Make sure streams are properly connected */ -+ if ((hNode->uNumInputs && hNode->uMaxInputIndex > -+ hNode->uNumInputs - 1) || -+ (hNode->uNumOutputs && hNode->uMaxOutputIndex > -+ hNode->uNumOutputs - 1)) -+ status = DSP_ENOTCONNECTED; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* If node's create function is not loaded, load it */ -+ /* Boost the OPP level to max level that DSP can be requested */ -+#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) -+ if (pdata->cpu_set_freq) { -+ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP3]); -+ -+ if (pdata->dsp_get_opp) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, "opp level" -+ "after setting to VDD1_OPP3 is %d\n", -+ (*pdata->dsp_get_opp)()); -+ } -+ } -+#endif -+ status = hNodeMgr->nldrFxns.pfnLoad(hNode->hNldrNode, -+ NLDR_CREATE); -+ /* Get address of node's create function */ -+ if (DSP_SUCCEEDED(status)) { -+ hNode->fLoaded = true; -+ if (nodeType != NODE_DEVICE) { -+ status = GetFxnAddress(hNode, &ulCreateFxn, -+ CREATEPHASE); -+ } -+ } else { -+ GT_1trace(NODE_debugMask, GT_ENTER, -+ "NODE_Create: failed to load" -+ " create code: 0x%x\n", status); -+ } -+ /* Request the lowest OPP level*/ -+#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) -+ if (pdata->cpu_set_freq) { -+ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP1]); -+ -+ if (pdata->dsp_get_opp) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, "opp level" -+ "after setting to VDD1_OPP1 is %d\n", -+ (*pdata->dsp_get_opp)()); -+ } -+ } -+#endif -+ /* Get address of iAlg functions, if socket node */ -+ if (DSP_SUCCEEDED(status)) { -+ if (nodeType == NODE_DAISSOCKET) { -+ status = hNodeMgr->nldrFxns.pfnGetFxnAddr -+ (hNode->hNldrNode, hNode->dcdProps. -+ objData.nodeObj.pstrIAlgName, -+ &hNode->createArgs.asa.taskArgs. -+ ulDaisArg); -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ if (nodeType != NODE_DEVICE) { -+ status = DISP_NodeCreate(hNodeMgr->hDisp, hNode, -+ hNodeMgr->ulFxnAddrs[RMSCREATENODE], -+ ulCreateFxn, &(hNode->createArgs), -+ &(hNode->nodeEnv)); -+ if (DSP_SUCCEEDED(status)) { -+ /* Set the message queue id to the node env -+ * pointer */ -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ (*pIntfFxns->pfnMsgSetQueueId)(hNode->hMsgQueue, -+ hNode->nodeEnv); -+ } -+ } -+ } -+ /* Phase II/Overlays: Create, execute, delete phases possibly in -+ * different files/sections. */ -+ if (hNode->fLoaded && hNode->fPhaseSplit) { -+ /* If create code was dynamically loaded, we can now unload -+ * it. */ -+ status1 = hNodeMgr->nldrFxns.pfnUnload(hNode->hNldrNode, -+ NLDR_CREATE); -+ hNode->fLoaded = false; -+ } -+ if (DSP_FAILED(status1)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Create: Failed to unload " -+ "create code: 0x%x\n", status1); -+ } -+func_cont2: -+ /* Update node state and node manager state */ -+ if (DSP_SUCCEEDED(status)) { -+ NODE_SetState(hNode, NODE_CREATED); -+ hNodeMgr->uNumCreated++; -+ goto func_cont; -+ } -+ if (status != DSP_EWRONGSTATE) { -+ /* Put back in NODE_ALLOCATED state if error occurred */ -+ NODE_SetState(hNode, NODE_ALLOCATED); -+ } -+ if (procId == DSP_UNIT) { -+ /* If node create failed, see if should sleep DSP now */ -+ if (bJustWokeDSP == true) { -+ /* Check to see if partial create happened on DSP */ -+ if (hNode->nodeEnv == (u32)NULL) { -+ /* No environment allocated on DSP, re-sleep -+ * DSP now */ -+ PROC_Ctrl(hNode->hProcessor, WMDIOCTL_DEEPSLEEP, -+ &cbData); -+ } else { -+ /* Increment count, sleep later when node fully -+ * deleted */ -+ hNodeMgr->uNumCreated++; -+ } -+ } -+ } -+func_cont: -+ /* Free access to node dispatcher */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+func_end: -+ if (DSP_SUCCEEDED(status)) { -+ PROC_NotifyClients(hNode->hProcessor, DSP_NODESTATECHANGE); -+ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== NODE_CreateMgr ======== -+ * Purpose: -+ * Create a NODE Manager object. -+ */ -+DSP_STATUS NODE_CreateMgr(OUT struct NODE_MGR **phNodeMgr, -+ struct DEV_OBJECT *hDevObject) -+{ -+ u32 i; -+ struct NODE_MGR *pNodeMgr = NULL; -+ struct DISP_ATTRS dispAttrs; -+ char *szZLFile = ""; -+ struct NLDR_ATTRS nldrAttrs; -+ DSP_STATUS status = DSP_SOK; -+ u32 devType; -+ DBC_Require(cRefs > 0); -+ DBC_Require(phNodeMgr != NULL); -+ DBC_Require(hDevObject != NULL); -+ GT_2trace(NODE_debugMask, GT_ENTER, "NODE_CreateMgr: phNodeMgr: 0x%x\t" -+ "hDevObject: 0x%x\n", phNodeMgr, hDevObject); -+ *phNodeMgr = NULL; -+ /* Allocate Node manager object */ -+ MEM_AllocObject(pNodeMgr, struct NODE_MGR, NODEMGR_SIGNATURE); -+ if (pNodeMgr) { -+ pNodeMgr->hDevObject = hDevObject; -+ pNodeMgr->nodeList = LST_Create(); -+ pNodeMgr->pipeMap = GB_create(MAXPIPES); -+ pNodeMgr->pipeDoneMap = GB_create(MAXPIPES); -+ if (pNodeMgr->nodeList == NULL || pNodeMgr->pipeMap == NULL || -+ pNodeMgr->pipeDoneMap == NULL) { -+ status = DSP_EMEMORY; -+ GT_0trace(NODE_debugMask, GT_6CLASS, -+ "NODE_CreateMgr: Memory " -+ "allocation failed\n"); -+ } else { -+ status = NTFY_Create(&pNodeMgr->hNtfy); -+ } -+ pNodeMgr->uNumCreated = 0; -+ } else { -+ GT_0trace(NODE_debugMask, GT_6CLASS, -+ "NODE_CreateMgr: Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } -+ /* get devNodeType */ -+ if (DSP_SUCCEEDED(status)) -+ status = DEV_GetDevType(hDevObject, &devType); -+ -+ /* Create the DCD Manager */ -+ if (DSP_SUCCEEDED(status)) { -+ status = DCD_CreateManager(szZLFile, &pNodeMgr->hDcdMgr); -+ if (DSP_SUCCEEDED(status)) -+ status = GetProcProps(pNodeMgr, hDevObject); -+ -+ } -+ /* Create NODE Dispatcher */ -+ if (DSP_SUCCEEDED(status)) { -+ dispAttrs.ulChnlOffset = pNodeMgr->ulChnlOffset; -+ dispAttrs.ulChnlBufSize = pNodeMgr->ulChnlBufSize; -+ dispAttrs.procFamily = pNodeMgr->procFamily; -+ dispAttrs.procType = pNodeMgr->procType; -+ status = DISP_Create(&pNodeMgr->hDisp, hDevObject, &dispAttrs); -+ } -+ /* Create a STRM Manager */ -+ if (DSP_SUCCEEDED(status)) -+ status = STRM_Create(&pNodeMgr->hStrmMgr, hDevObject); -+ -+ if (DSP_SUCCEEDED(status)) { -+ DEV_GetIntfFxns(hDevObject, &pNodeMgr->pIntfFxns); -+ /* Get MSG queue manager */ -+ DEV_GetMsgMgr(hDevObject, &pNodeMgr->hMsg); -+ status = SYNC_InitializeCS(&pNodeMgr->hSync); -+ if (DSP_FAILED(status)) -+ status = DSP_EMEMORY; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pNodeMgr->chnlMap = GB_create(pNodeMgr->ulNumChnls); -+ /* dma chnl map. ulNumChnls is # per transport */ -+ pNodeMgr->dmaChnlMap = GB_create(pNodeMgr->ulNumChnls); -+ pNodeMgr->zChnlMap = GB_create(pNodeMgr->ulNumChnls); -+ if ((pNodeMgr->chnlMap == NULL) || -+ (pNodeMgr->dmaChnlMap == NULL) || -+ (pNodeMgr->zChnlMap == NULL)) { -+ status = DSP_EMEMORY; -+ } else { -+ /* Block out reserved channels */ -+ for (i = 0; i < pNodeMgr->ulChnlOffset; i++) -+ GB_set(pNodeMgr->chnlMap, i); -+ -+ /* Block out channels reserved for RMS */ -+ GB_set(pNodeMgr->chnlMap, pNodeMgr->ulChnlOffset); -+ GB_set(pNodeMgr->chnlMap, pNodeMgr->ulChnlOffset + 1); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* NO RM Server on the IVA */ -+ if (devType != IVA_UNIT) { -+ /* Get addresses of any RMS functions loaded */ -+ status = GetRMSFxns(pNodeMgr); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_6CLASS, -+ "NODE_CreateMgr: Failed to" -+ " get RMS functions: status = 0x%x", status); -+ } -+ } -+ } -+ -+ /* Get loader functions and create loader */ -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(NODE_debugMask, GT_1CLASS, -+ "NODE_CreateMgr: using dynamic loader\n"); -+ pNodeMgr->nldrFxns = nldrFxns; /* Dynamic loader functions */ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ nldrAttrs.pfnOvly = Ovly; -+ nldrAttrs.pfnWrite = Write; -+ nldrAttrs.usDSPWordSize = pNodeMgr->uDSPWordSize; -+ nldrAttrs.usDSPMauSize = pNodeMgr->uDSPMauSize; -+ pNodeMgr->fLoaderInit = pNodeMgr->nldrFxns.pfnInit(); -+ status = pNodeMgr->nldrFxns.pfnCreate(&pNodeMgr->hNldr, -+ hDevObject, &nldrAttrs); -+ if (DSP_FAILED(status)) { -+ GT_1trace(NODE_debugMask, GT_6CLASS, -+ "NODE_CreateMgr: Failed to " -+ "create loader: status = 0x%x\n", status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) -+ *phNodeMgr = pNodeMgr; -+ else -+ DeleteNodeMgr(pNodeMgr); -+ -+ DBC_Ensure((DSP_FAILED(status) && (*phNodeMgr == NULL)) || -+ (DSP_SUCCEEDED(status) && -+ MEM_IsValidHandle((*phNodeMgr), NODEMGR_SIGNATURE))); -+ -+ return status; -+} -+ -+/* -+ * ======== NODE_Delete ======== -+ * Purpose: -+ * Delete a node on the DSP by remotely calling the node's delete function. -+ * Loads the node's delete function if necessary. Free GPP side resources -+ * after node's delete function returns. -+ */ -+DSP_STATUS NODE_Delete(struct NODE_OBJECT *hNode, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ struct NODE_MGR *hNodeMgr; -+ struct PROC_OBJECT *hProcessor; -+ struct DISP_OBJECT *hDisp; -+ u32 ulDeleteFxn; -+ enum NODE_TYPE nodeType; -+ enum NODE_STATE state; -+ DSP_STATUS status = DSP_SOK; -+ DSP_STATUS status1 = DSP_SOK; -+ struct DSP_CBDATA cbData; -+ u32 procId; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE nodeRes; -+#endif -+ struct DSP_PROCESSORSTATE procStatus; -+ DBC_Require(cRefs > 0); -+ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Delete: hNode: 0x%x\n", -+ hNode); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ /* create struct DSP_CBDATA struct for PWR call */ -+ cbData.cbData = PWR_TIMEOUT; -+ hNodeMgr = hNode->hNodeMgr; -+ hProcessor = hNode->hProcessor; -+ hDisp = hNodeMgr->hDisp; -+ nodeType = NODE_GetType(hNode); -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ /* Enter critical section */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ state = NODE_GetState(hNode); -+ /* Execute delete phase code for non-device node in all cases -+ * except when the node was only allocated. Delete phase must be -+ * executed even if create phase was executed, but failed. -+ * If the node environment pointer is non-NULL, the delete phase -+ * code must be executed. */ -+ if (!(state == NODE_ALLOCATED && hNode->nodeEnv == (u32)NULL) && -+ nodeType != NODE_DEVICE) { -+ status = PROC_GetProcessorId(pNode->hProcessor, &procId); -+ if (DSP_FAILED(status)) -+ goto func_cont1; -+ -+ if (procId == DSP_UNIT || procId == IVA_UNIT) { -+ /* If node has terminated, execute phase code will -+ * have already been unloaded in NODE_OnExit(). If the -+ * node is PAUSED, the execute phase is loaded, and it -+ * is now ok to unload it. If the node is running, we -+ * will unload the execute phase only after deleting -+ * the node. */ -+ if (state == NODE_PAUSED && hNode->fLoaded && -+ hNode->fPhaseSplit) { -+ /* Ok to unload execute code as long as node -+ * is not * running */ -+ status1 = hNodeMgr->nldrFxns.pfnUnload(hNode-> -+ hNldrNode, NLDR_EXECUTE); -+ hNode->fLoaded = false; -+ NODE_SetState(hNode, NODE_DONE); -+ } -+ /* Load delete phase code if not loaded or if haven't -+ * * unloaded EXECUTE phase */ -+ if ((!(hNode->fLoaded) || (state == NODE_RUNNING)) && -+ hNode->fPhaseSplit) { -+ status = hNodeMgr->nldrFxns.pfnLoad(hNode-> -+ hNldrNode, NLDR_DELETE); -+ if (DSP_SUCCEEDED(status)) { -+ hNode->fLoaded = true; -+ } else { -+ GT_1trace(NODE_debugMask, GT_ENTER, -+ "NODE_Delete: failed to " -+ "load delete code: 0x%x\n", -+ status); -+ } -+ } -+ } -+func_cont1: -+ if (DSP_SUCCEEDED(status)) { -+ /* Unblock a thread trying to terminate the node */ -+ (void)SYNC_SetEvent(hNode->hSyncDone); -+ if (procId == DSP_UNIT) { -+ /* ulDeleteFxn = address of node's delete -+ * function */ -+ status = GetFxnAddress(hNode, &ulDeleteFxn, -+ DELETEPHASE); -+ } else if (procId == IVA_UNIT) -+ ulDeleteFxn = (u32)hNode->nodeEnv; -+ if (DSP_SUCCEEDED(status)) { -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ GT_1trace(NODE_debugMask, GT_4CLASS, -+ "NODE_Delete: proc Status " -+ "0x%x\n", procStatus.iState); -+ if (procStatus.iState != PROC_ERROR) { -+ status = DISP_NodeDelete(hDisp, hNode, -+ hNodeMgr->ulFxnAddrs[RMSDELETENODE], -+ ulDeleteFxn, hNode->nodeEnv); -+ } else -+ NODE_SetState(hNode, NODE_DONE); -+ -+ /* Unload execute, if not unloaded, and delete -+ * function */ -+ if (state == NODE_RUNNING && -+ hNode->fPhaseSplit) { -+ status1 = hNodeMgr->nldrFxns.pfnUnload( -+ hNode->hNldrNode, NLDR_EXECUTE); -+ } -+ if (DSP_FAILED(status1)) { -+ GT_1trace(NODE_debugMask, GT_ENTER, -+ "NODE_Delete: failed to" -+ "unload execute code: 0x%x\n", -+ status1); -+ } -+ status1 = hNodeMgr->nldrFxns.pfnUnload( -+ hNode->hNldrNode, NLDR_DELETE); -+ hNode->fLoaded = false; -+ if (DSP_FAILED(status1)) { -+ GT_1trace(NODE_debugMask, GT_ENTER, -+ "NODE_Delete: failed to" -+ "unload delete code: 0x%x\n", -+ status1); -+ } -+ } -+ } -+ } -+ /* Free host side resources even if a failure occurred */ -+ /* Remove node from hNodeMgr->nodeList */ -+ LST_RemoveElem(hNodeMgr->nodeList, (struct LST_ELEM *) hNode); -+ hNodeMgr->uNumNodes--; -+ /* Decrement count of nodes created on DSP */ -+ if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) && -+ (hNode->nodeEnv != (u32) NULL))) -+ hNodeMgr->uNumCreated--; -+ /* Free host-side resources allocated by NODE_Create() -+ * DeleteNode() fails if SM buffers not freed by client! */ -+#ifndef RES_CLEANUP_DISABLE -+ if (!pr_ctxt) -+ goto func_cont; -+ if (DRV_GetNodeResElement(hNode, &nodeRes, pr_ctxt) != DSP_ENOTFOUND) { -+ GT_0trace(NODE_debugMask, GT_5CLASS, "\nNODE_Delete12:\n"); -+ DRV_ProcNodeUpdateStatus(nodeRes, false); -+ } -+#endif -+func_cont: -+ GT_0trace(NODE_debugMask, GT_ENTER, "\nNODE_Delete13:\n "); -+ DeleteNode(hNode, pr_ctxt); -+#ifndef RES_CLEANUP_DISABLE -+ GT_0trace(NODE_debugMask, GT_5CLASS, "\nNODE_Delete2:\n "); -+ if (pr_ctxt) -+ DRV_RemoveNodeResElement(nodeRes, pr_ctxt); -+#endif -+ GT_0trace(NODE_debugMask, GT_ENTER, "\nNODE_Delete3:\n "); -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ PROC_NotifyClients(hProcessor, DSP_NODESTATECHANGE); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_DeleteMgr ======== -+ * Purpose: -+ * Delete the NODE Manager. -+ */ -+DSP_STATUS NODE_DeleteMgr(struct NODE_MGR *hNodeMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)); -+ -+ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_DeleteMgr: hNodeMgr: 0x%x\n", -+ hNodeMgr); -+ DeleteNodeMgr(hNodeMgr); -+ -+ return status; -+} -+ -+/* -+ * ======== NODE_EnumNodes ======== -+ * Purpose: -+ * Enumerate currently allocated nodes. -+ */ -+DSP_STATUS NODE_EnumNodes(struct NODE_MGR *hNodeMgr, IN DSP_HNODE *aNodeTab, -+ u32 uNodeTabSize, OUT u32 *puNumNodes, -+ OUT u32 *puAllocated) -+{ -+ struct NODE_OBJECT *hNode; -+ u32 i; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)); -+ DBC_Require(aNodeTab != NULL || uNodeTabSize == 0); -+ DBC_Require(puNumNodes != NULL); -+ DBC_Require(puAllocated != NULL); -+ GT_5trace(NODE_debugMask, GT_ENTER, "NODE_EnumNodes: hNodeMgr: 0x%x\t" -+ "aNodeTab: %d\tuNodeTabSize: 0x%x\tpuNumNodes: 0x%x\t" -+ "puAllocated\n", hNodeMgr, aNodeTab, uNodeTabSize, puNumNodes, -+ puAllocated); -+ /* Enter critical section */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_SUCCEEDED(status)) { -+ if (hNodeMgr->uNumNodes > uNodeTabSize) { -+ *puAllocated = hNodeMgr->uNumNodes; -+ *puNumNodes = 0; -+ status = DSP_ESIZE; -+ } else { -+ hNode = (struct NODE_OBJECT *)LST_First(hNodeMgr-> -+ nodeList); -+ for (i = 0; i < hNodeMgr->uNumNodes; i++) { -+ DBC_Assert(MEM_IsValidHandle(hNode, -+ NODE_SIGNATURE)); -+ aNodeTab[i] = hNode; -+ hNode = (struct NODE_OBJECT *)LST_Next -+ (hNodeMgr->nodeList, -+ (struct LST_ELEM *)hNode); -+ } -+ *puAllocated = *puNumNodes = hNodeMgr->uNumNodes; -+ } -+ } -+ /* end of SYNC_EnterCS */ -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ return status; -+} -+ -+/* -+ * ======== NODE_Exit ======== -+ * Purpose: -+ * Discontinue usage of NODE module. -+ */ -+void NODE_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "Entered NODE_Exit, ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== NODE_FreeMsgBuf ======== -+ * Purpose: -+ * Frees the message buffer. -+ */ -+DSP_STATUS NODE_FreeMsgBuf(struct NODE_OBJECT *hNode, IN u8 *pBuffer, -+ OPTIONAL struct DSP_BUFFERATTR *pAttr) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ DSP_STATUS status = DSP_SOK; -+ u32 procId; -+ DBC_Require(cRefs > 0); -+ DBC_Require(pBuffer != NULL); -+ DBC_Require(pNode != NULL); -+ DBC_Require(pNode->hXlator != NULL); -+ GT_3trace(NODE_debugMask, GT_ENTER, "NODE_FreeMsgBuf: hNode: 0x%x\t" -+ "pBuffer: 0x%x\tpAttr: 0x%x\n", hNode, pBuffer, pAttr); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ status = PROC_GetProcessorId(pNode->hProcessor, &procId); -+ if (procId == DSP_UNIT) { -+ if (DSP_SUCCEEDED(status)) { -+ if (pAttr == NULL) { -+ /* set defaults */ -+ pAttr = &NODE_DFLTBUFATTRS; -+ } -+ /* Node supports single SM segment only */ -+ if (pAttr->uSegment != 1) -+ status = DSP_EBADSEGID; -+ -+ /* pBuffer is clients Va. */ -+ status = CMM_XlatorFreeBuf(pNode->hXlator, pBuffer); -+ if (DSP_FAILED(status)) -+ status = DSP_EFAIL; -+ else -+ status = DSP_SOK; -+ -+ } -+ } else { -+ DBC_Assert(NULL); /* BUG */ -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_GetAttr ======== -+ * Purpose: -+ * Copy the current attributes of the specified node into a DSP_NODEATTR -+ * structure. -+ */ -+DSP_STATUS NODE_GetAttr(struct NODE_OBJECT *hNode, -+ OUT struct DSP_NODEATTR *pAttr, u32 uAttrSize) -+{ -+ struct NODE_MGR *hNodeMgr; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(cRefs > 0); -+ DBC_Require(pAttr != NULL); -+ DBC_Require(uAttrSize >= sizeof(struct DSP_NODEATTR)); -+ GT_3trace(NODE_debugMask, GT_ENTER, "NODE_GetAttr: hNode: " -+ "0x%x\tpAttr: 0x%x \tuAttrSize: 0x%x\n", hNode, pAttr, -+ uAttrSize); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else { -+ hNodeMgr = hNode->hNodeMgr; -+ /* Enter hNodeMgr critical section (since we're accessing -+ * data that could be changed by NODE_ChangePriority() and -+ * NODE_Connect(). */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_SUCCEEDED(status)) { -+ pAttr->cbStruct = sizeof(struct DSP_NODEATTR); -+ /* DSP_NODEATTRIN */ -+ pAttr->inNodeAttrIn.cbStruct = -+ sizeof(struct DSP_NODEATTRIN); -+ pAttr->inNodeAttrIn.iPriority = hNode->nPriority; -+ pAttr->inNodeAttrIn.uTimeout = hNode->uTimeout; -+ pAttr->inNodeAttrIn.uHeapSize = -+ hNode->createArgs.asa.taskArgs.uHeapSize; -+ pAttr->inNodeAttrIn.pGPPVirtAddr = (void *) -+ hNode->createArgs.asa.taskArgs.uGPPHeapAddr; -+ pAttr->uInputs = hNode->uNumGPPInputs; -+ pAttr->uOutputs = hNode->uNumGPPOutputs; -+ /* DSP_NODEINFO */ -+ GetNodeInfo(hNode, &(pAttr->iNodeInfo)); -+ } -+ /* end of SYNC_EnterCS */ -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ } -+ return status; -+} -+ -+/* -+ * ======== NODE_GetChannelId ======== -+ * Purpose: -+ * Get the channel index reserved for a stream connection between the -+ * host and a node. -+ */ -+DSP_STATUS NODE_GetChannelId(struct NODE_OBJECT *hNode, u32 uDir, u32 uIndex, -+ OUT u32 *pulId) -+{ -+ enum NODE_TYPE nodeType; -+ DSP_STATUS status = DSP_EVALUE; -+ DBC_Require(cRefs > 0); -+ DBC_Require(uDir == DSP_TONODE || uDir == DSP_FROMNODE); -+ DBC_Require(pulId != NULL); -+ GT_4trace(NODE_debugMask, GT_ENTER, "NODE_GetChannelId: hNode: " -+ "0x%x\tuDir: %d\tuIndex: %d\tpulId: 0x%x\n", hNode, uDir, -+ uIndex, pulId); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ return status; -+ } -+ nodeType = NODE_GetType(hNode); -+ if (nodeType != NODE_TASK && nodeType != NODE_DAISSOCKET) { -+ status = DSP_ENODETYPE; -+ return status; -+ } -+ if (uDir == DSP_TONODE) { -+ if (uIndex < MaxInputs(hNode)) { -+ if (hNode->inputs[uIndex].type == HOSTCONNECT) { -+ *pulId = hNode->inputs[uIndex].devId; -+ status = DSP_SOK; -+ } -+ } -+ } else { -+ DBC_Assert(uDir == DSP_FROMNODE); -+ if (uIndex < MaxOutputs(hNode)) { -+ if (hNode->outputs[uIndex].type == HOSTCONNECT) { -+ *pulId = hNode->outputs[uIndex].devId; -+ status = DSP_SOK; -+ } -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== NODE_GetMessage ======== -+ * Purpose: -+ * Retrieve a message from a node on the DSP. -+ */ -+DSP_STATUS NODE_GetMessage(struct NODE_OBJECT *hNode, OUT struct DSP_MSG *pMsg, -+ u32 uTimeout) -+{ -+ struct NODE_MGR *hNodeMgr; -+ enum NODE_TYPE nodeType; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status = DSP_SOK; -+ void *pTmpBuf; -+ struct DSP_PROCESSORSTATE procStatus; -+ struct PROC_OBJECT *hProcessor; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pMsg != NULL); -+ GT_3trace(NODE_debugMask, GT_ENTER, -+ "NODE_GetMessage: hNode: 0x%x\tpMsg: " -+ "0x%x\tuTimeout: 0x%x\n", hNode, pMsg, uTimeout); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ hProcessor = hNode->hProcessor; -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* If processor is in error state then don't attempt to get the -+ message */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_GetMessage:" -+ " proc Status 0x%x\n", procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ hNodeMgr = hNode->hNodeMgr; -+ nodeType = NODE_GetType(hNode); -+ if (nodeType != NODE_MESSAGE && nodeType != NODE_TASK && -+ nodeType != NODE_DAISSOCKET) { -+ status = DSP_ENODETYPE; -+ goto func_end; -+ } -+ /* This function will block unless a message is available. Since -+ * DSPNode_RegisterNotify() allows notification when a message -+ * is available, the system can be designed so that -+ * DSPNode_GetMessage() is only called when a message is -+ * available. */ -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnMsgGet)(hNode->hMsgQueue, pMsg, uTimeout); -+ /* Check if message contains SM descriptor */ -+ if (DSP_FAILED(status) || !(pMsg->dwCmd & DSP_RMSBUFDESC)) -+ goto func_end; -+ -+ /* Translate DSP byte addr to GPP Va. */ -+ pTmpBuf = CMM_XlatorTranslate(hNode->hXlator, -+ (void *)(pMsg->dwArg1 * hNode->hNodeMgr->uDSPWordSize), -+ CMM_DSPPA2PA); -+ if (pTmpBuf != NULL) { -+ /* now convert this GPP Pa to Va */ -+ pTmpBuf = CMM_XlatorTranslate(hNode->hXlator, pTmpBuf, -+ CMM_PA2VA); -+ if (pTmpBuf != NULL) { -+ /* Adjust SM size in msg */ -+ pMsg->dwArg1 = (u32) pTmpBuf; -+ pMsg->dwArg2 *= hNode->hNodeMgr->uDSPWordSize; -+ } else { -+ GT_0trace(NODE_debugMask, GT_7CLASS, "NODE_GetMessage: " -+ "Failed SM translation!\n"); -+ status = DSP_ETRANSLATE; -+ } -+ } else { -+ GT_0trace(NODE_debugMask, GT_7CLASS, "NODE_GetMessage: Failed " -+ "SM Pa/Pa translation!\n"); -+ status = DSP_ETRANSLATE; -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_GetNldrObj ======== -+ */ -+DSP_STATUS NODE_GetNldrObj(struct NODE_MGR *hNodeMgr, -+ struct NLDR_OBJECT **phNldrObj) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct NODE_MGR *pNodeMgr = hNodeMgr; -+ DBC_Require(phNldrObj != NULL); -+ GT_2trace(NODE_debugMask, GT_ENTER, -+ "Entered NODE_GetNldrObj, hNodeMgr: " -+ "0x%x\n\tphNldrObj: 0x%x\n", hNodeMgr, phNldrObj); -+ if (!MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)) -+ status = DSP_EHANDLE; -+ else -+ *phNldrObj = pNodeMgr->hNldr; -+ -+ GT_2trace(NODE_debugMask, GT_ENTER, -+ "Exit NODE_GetNldrObj: status 0x%x\n\t" -+ "phNldrObj: 0x%x\n", status, *phNldrObj); -+ DBC_Ensure(DSP_SUCCEEDED(status) || ((phNldrObj != NULL) && -+ (*phNldrObj == NULL))); -+ return status; -+} -+ -+/* -+ * ======== NODE_GetStrmMgr ======== -+ * Purpose: -+ * Returns the Stream manager. -+ */ -+DSP_STATUS NODE_GetStrmMgr(struct NODE_OBJECT *hNode, -+ struct STRM_MGR **phStrmMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) -+ status = DSP_EHANDLE; -+ else -+ *phStrmMgr = hNode->hNodeMgr->hStrmMgr; -+ -+ return status; -+} -+ -+/* -+ * ======== NODE_GetLoadType ======== -+ */ -+enum NLDR_LOADTYPE NODE_GetLoadType(struct NODE_OBJECT *hNode) -+{ -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_GetLoadType: Failed. hNode:" -+ " 0x%x\n", hNode); -+ return -1; -+ } else -+ return hNode->dcdProps.objData.nodeObj.usLoadType; -+} -+ -+/* -+ * ======== NODE_GetTimeout ======== -+ * Purpose: -+ * Returns the timeout value for this node. -+ */ -+u32 NODE_GetTimeout(struct NODE_OBJECT *hNode) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_GetTimeout: Failed. hNode:" -+ " 0x%x\n", hNode); -+ return 0; -+ } else -+ return hNode->uTimeout; -+} -+ -+/* -+ * ======== NODE_GetType ======== -+ * Purpose: -+ * Returns the node type. -+ */ -+enum NODE_TYPE NODE_GetType(struct NODE_OBJECT *hNode) -+{ -+ enum NODE_TYPE nodeType; -+ -+ if (hNode == (struct NODE_OBJECT *) DSP_HGPPNODE) -+ nodeType = NODE_GPP; -+ else { -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) -+ nodeType = -1; -+ else -+ nodeType = hNode->nType; -+ } -+ return nodeType; -+} -+ -+/* -+ * ======== NODE_Init ======== -+ * Purpose: -+ * Initialize the NODE module. -+ */ -+bool NODE_Init(void) -+{ -+ bool fRetVal = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!NODE_debugMask.flags); -+ GT_create(&NODE_debugMask, "NO"); /* "NO" for NOde */ -+ } -+ -+ if (fRetVal) -+ cRefs++; -+ -+ GT_1trace(NODE_debugMask, GT_5CLASS, "NODE_Init(), ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((fRetVal && (cRefs > 0)) || (!fRetVal && (cRefs >= 0))); -+ return fRetVal; -+} -+ -+/* -+ * ======== NODE_OnExit ======== -+ * Purpose: -+ * Gets called when RMS_EXIT is received for a node. -+ */ -+void NODE_OnExit(struct NODE_OBJECT *hNode, s32 nStatus) -+{ -+ DBC_Assert(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); -+ /* Set node state to done */ -+ NODE_SetState(hNode, NODE_DONE); -+ hNode->nExitStatus = nStatus; -+ if (hNode->fLoaded && hNode->fPhaseSplit) { -+ (void)hNode->hNodeMgr->nldrFxns.pfnUnload(hNode->hNldrNode, -+ NLDR_EXECUTE); -+ hNode->fLoaded = false; -+ } -+ /* Unblock call to NODE_Terminate */ -+ (void) SYNC_SetEvent(hNode->hSyncDone); -+ /* Notify clients */ -+ PROC_NotifyClients(hNode->hProcessor, DSP_NODESTATECHANGE); -+ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); -+} -+ -+/* -+ * ======== NODE_Pause ======== -+ * Purpose: -+ * Suspend execution of a node currently running on the DSP. -+ */ -+DSP_STATUS NODE_Pause(struct NODE_OBJECT *hNode) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ enum NODE_TYPE nodeType; -+ enum NODE_STATE state; -+ struct NODE_MGR *hNodeMgr; -+ DSP_STATUS status = DSP_SOK; -+ u32 procId; -+ struct DSP_PROCESSORSTATE procStatus; -+ struct PROC_OBJECT *hProcessor; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Pause: hNode: 0x%x\n", hNode); -+ -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } else { -+ nodeType = NODE_GetType(hNode); -+ if (nodeType != NODE_TASK && nodeType != NODE_DAISSOCKET) -+ status = DSP_ENODETYPE; -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ status = PROC_GetProcessorId(pNode->hProcessor, &procId); -+ -+ if (procId == IVA_UNIT) -+ status = DSP_ENOTIMPL; -+ -+ if (DSP_SUCCEEDED(status)) { -+ hNodeMgr = hNode->hNodeMgr; -+ -+ /* Enter critical section */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ -+ if (DSP_SUCCEEDED(status)) { -+ state = NODE_GetState(hNode); -+ /* Check node state */ -+ if (state != NODE_RUNNING) -+ status = DSP_EWRONGSTATE; -+ -+ hProcessor = hNode->hProcessor; -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* If processor is in error state then don't attempt -+ to send the message */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, -+ "NODE_Pause: proc Status 0x%x\n", -+ procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = DISP_NodeChangePriority(hNodeMgr-> -+ hDisp, hNode, -+ hNodeMgr->ulFxnAddrs[RMSCHANGENODEPRIORITY], -+ hNode->nodeEnv, NODE_SUSPENDEDPRI); -+ } -+ -+ /* Update state */ -+ if (DSP_SUCCEEDED(status)) { -+ NODE_SetState(hNode, NODE_PAUSED); -+ } else { -+ GT_1trace(NODE_debugMask, GT_6CLASS, -+ "NODE_Pause: Failed. hNode:" -+ " 0x%x\n", hNode); -+ } -+ } -+ /* End of SYNC_EnterCS */ -+ /* Leave critical section */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ if (DSP_SUCCEEDED(status)) { -+ PROC_NotifyClients(hNode->hProcessor, -+ DSP_NODESTATECHANGE); -+ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_PutMessage ======== -+ * Purpose: -+ * Send a message to a message node, task node, or XDAIS socket node. This -+ * function will block until the message stream can accommodate the -+ * message, or a timeout occurs. -+ */ -+DSP_STATUS NODE_PutMessage(struct NODE_OBJECT *hNode, -+ IN CONST struct DSP_MSG *pMsg, u32 uTimeout) -+{ -+ struct NODE_MGR *hNodeMgr = NULL; -+ enum NODE_TYPE nodeType; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ enum NODE_STATE state; -+ DSP_STATUS status = DSP_SOK; -+ void *pTmpBuf; -+ struct DSP_MSG newMsg; -+ struct DSP_PROCESSORSTATE procStatus; -+ struct PROC_OBJECT *hProcessor; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pMsg != NULL); -+ GT_3trace(NODE_debugMask, GT_ENTER, -+ "NODE_PutMessage: hNode: 0x%x\tpMsg: " -+ "0x%x\tuTimeout: 0x%x\n", hNode, pMsg, uTimeout); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ hProcessor = hNode->hProcessor; -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* If processor is in bad state then don't attempt sending the -+ message */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_PutMessage:" -+ " proc Status 0x%x\n", procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ hNodeMgr = hNode->hNodeMgr; -+ nodeType = NODE_GetType(hNode); -+ if (nodeType != NODE_MESSAGE && nodeType != NODE_TASK && -+ nodeType != NODE_DAISSOCKET) -+ status = DSP_ENODETYPE; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Check node state. Can't send messages to a node after -+ * we've sent the RMS_EXIT command. There is still the -+ * possibility that NODE_Terminate can be called after we've -+ * checked the state. Could add another SYNC object to -+ * prevent this (can't use hNodeMgr->hSync, since we don't -+ * want to block other NODE functions). However, the node may -+ * still exit on its own, before this message is sent. */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_SUCCEEDED(status)) { -+ state = NODE_GetState(hNode); -+ if (state == NODE_TERMINATING || state == NODE_DONE) -+ status = DSP_EWRONGSTATE; -+ -+ } -+ /* end of SYNC_EnterCS */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* assign pMsg values to new msg */ -+ newMsg = *pMsg; -+ /* Now, check if message contains a SM buffer descriptor */ -+ if (pMsg->dwCmd & DSP_RMSBUFDESC) { -+ /* Translate GPP Va to DSP physical buf Ptr. */ -+ pTmpBuf = CMM_XlatorTranslate(hNode->hXlator, -+ (void *)newMsg.dwArg1, CMM_VA2DSPPA); -+ if (pTmpBuf != NULL) { -+ /* got translation, convert to MAUs in msg */ -+ if (hNode->hNodeMgr->uDSPWordSize != 0) { -+ newMsg.dwArg1 = -+ (u32)pTmpBuf / -+ hNode->hNodeMgr->uDSPWordSize; -+ /* MAUs */ -+ newMsg.dwArg2 /= hNode->hNodeMgr->uDSPWordSize; -+ } else { -+ GT_0trace(NODE_debugMask, GT_7CLASS, -+ "NODE_PutMessage: " -+ "uDSPWordSize is zero!\n"); -+ status = DSP_EFAIL; /* bad DSPWordSize */ -+ } -+ } else { /* failed to translate buffer address */ -+ GT_0trace(NODE_debugMask, GT_7CLASS, -+ "NODE_PutMessage: Failed to" -+ " translate SM address\n"); -+ status = DSP_ETRANSLATE; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnMsgPut)(hNode->hMsgQueue, -+ &newMsg, uTimeout); -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_RegisterNotify ======== -+ * Purpose: -+ * Register to be notified on specific events for this node. -+ */ -+DSP_STATUS NODE_RegisterNotify(struct NODE_OBJECT *hNode, u32 uEventMask, -+ u32 uNotifyType, -+ struct DSP_NOTIFICATION *hNotification) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(hNotification != NULL); -+ -+ GT_4trace(NODE_debugMask, GT_ENTER, -+ "NODE_RegisterNotify: hNode: 0x%x\t" -+ "uEventMask: 0x%x\tuNotifyType: 0x%x\thNotification: 0x%x\n", -+ hNode, uEventMask, uNotifyType, hNotification); -+ -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else { -+ /* Check if event mask is a valid node related event */ -+ if (uEventMask & ~(DSP_NODESTATECHANGE | -+ DSP_NODEMESSAGEREADY)) -+ status = DSP_EVALUE; -+ -+ /* Check if notify type is valid */ -+ if (uNotifyType != DSP_SIGNALEVENT) -+ status = DSP_EVALUE; -+ -+ /* Only one Notification can be registered at a -+ * time - Limitation */ -+ if (uEventMask == (DSP_NODESTATECHANGE | -+ DSP_NODEMESSAGEREADY)) -+ status = DSP_EVALUE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ if (uEventMask == DSP_NODESTATECHANGE) { -+ status = NTFY_Register(hNode->hNtfy, hNotification, -+ uEventMask & DSP_NODESTATECHANGE, uNotifyType); -+ } else { -+ /* Send Message part of event mask to MSG */ -+ pIntfFxns = hNode->hNodeMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnMsgRegisterNotify) -+ (hNode->hMsgQueue, -+ uEventMask & DSP_NODEMESSAGEREADY, uNotifyType, -+ hNotification); -+ } -+ -+ } -+ return status; -+} -+ -+/* -+ * ======== NODE_Run ======== -+ * Purpose: -+ * Start execution of a node's execute phase, or resume execution of a node -+ * that has been suspended (via NODE_NodePause()) on the DSP. Load the -+ * node's execute function if necessary. -+ */ -+DSP_STATUS NODE_Run(struct NODE_OBJECT *hNode) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ struct NODE_MGR *hNodeMgr; -+ enum NODE_TYPE nodeType; -+ enum NODE_STATE state; -+ u32 ulExecuteFxn; -+ u32 ulFxnAddr; -+ DSP_STATUS status = DSP_SOK; -+ u32 procId; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct DSP_PROCESSORSTATE procStatus; -+ struct PROC_OBJECT *hProcessor; -+ -+ DBC_Require(cRefs > 0); -+ GT_1trace(NODE_debugMask, GT_ENTER, "NODE_Run: hNode: 0x%x\n", hNode); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ hProcessor = hNode->hProcessor; -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* If processor is in error state then don't attempt to run the node */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_Run:" -+ " proc Status 0x%x\n", procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ nodeType = NODE_GetType(hNode); -+ if (nodeType == NODE_DEVICE) -+ status = DSP_ENODETYPE; -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ hNodeMgr = hNode->hNodeMgr; -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ /* Enter critical section */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ state = NODE_GetState(hNode); -+ if (state != NODE_CREATED && state != NODE_PAUSED) -+ status = DSP_EWRONGSTATE; -+ -+ if (DSP_SUCCEEDED(status)) -+ status = PROC_GetProcessorId(pNode->hProcessor, &procId); -+ -+ if (DSP_FAILED(status)) -+ goto func_cont1; -+ -+ if ((procId != DSP_UNIT) && (procId != IVA_UNIT)) -+ goto func_cont1; -+ -+ if (state == NODE_CREATED) { -+ /* If node's execute function is not loaded, load it */ -+ if (!(hNode->fLoaded) && hNode->fPhaseSplit) { -+ status = hNodeMgr->nldrFxns.pfnLoad(hNode->hNldrNode, -+ NLDR_EXECUTE); -+ if (DSP_SUCCEEDED(status)) { -+ hNode->fLoaded = true; -+ } else { -+ GT_1trace(NODE_debugMask, GT_ENTER, -+ "NODE_Run: failed to load " -+ "execute code:0x%x\n", status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Get address of node's execute function */ -+ if (procId == IVA_UNIT) -+ ulExecuteFxn = (u32) hNode->nodeEnv; -+ else { -+ status = GetFxnAddress(hNode, &ulExecuteFxn, -+ EXECUTEPHASE); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ ulFxnAddr = hNodeMgr->ulFxnAddrs[RMSEXECUTENODE]; -+ status = DISP_NodeRun(hNodeMgr->hDisp, hNode, ulFxnAddr, -+ ulExecuteFxn, hNode->nodeEnv); -+ } -+ } else if (state == NODE_PAUSED) { -+ ulFxnAddr = hNodeMgr->ulFxnAddrs[RMSCHANGENODEPRIORITY]; -+ status = DISP_NodeChangePriority(hNodeMgr->hDisp, hNode, -+ ulFxnAddr, hNode->nodeEnv, -+ NODE_GetPriority(hNode)); -+ } else { -+ /* We should never get here */ -+ DBC_Assert(false); -+ } -+func_cont1: -+ /* Update node state. */ -+ if (DSP_SUCCEEDED(status)) -+ NODE_SetState(hNode, NODE_RUNNING); -+ else /* Set state back to previous value */ -+ NODE_SetState(hNode, state); -+ /*End of SYNC_EnterCS */ -+ /* Exit critical section */ -+func_cont: -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ if (DSP_SUCCEEDED(status)) { -+ PROC_NotifyClients(hNode->hProcessor, -+ DSP_NODESTATECHANGE); -+ NTFY_Notify(hNode->hNtfy, DSP_NODESTATECHANGE); -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== NODE_Terminate ======== -+ * Purpose: -+ * Signal a node running on the DSP that it should exit its execute phase -+ * function. -+ */ -+DSP_STATUS NODE_Terminate(struct NODE_OBJECT *hNode, OUT DSP_STATUS *pStatus) -+{ -+ struct NODE_OBJECT *pNode = (struct NODE_OBJECT *)hNode; -+ struct NODE_MGR *hNodeMgr = NULL; -+ enum NODE_TYPE nodeType; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ enum NODE_STATE state; -+ struct DSP_MSG msg, killmsg; -+ DSP_STATUS status = DSP_SOK; -+ u32 procId, killTimeOut; -+ struct DEH_MGR *hDehMgr; -+ struct DSP_PROCESSORSTATE procStatus; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pStatus != NULL); -+ -+ GT_1trace(NODE_debugMask, GT_ENTER, -+ "NODE_Terminate: hNode: 0x%x\n", hNode); -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ if (pNode->hProcessor == NULL) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, -+ "NODE_Terminate: pNode->hProcessor = 0x%x\n", -+ pNode->hProcessor); -+ goto func_end; -+ } -+ status = PROC_GetProcessorId(pNode->hProcessor, &procId); -+ -+ if (DSP_SUCCEEDED(status)) { -+ hNodeMgr = hNode->hNodeMgr; -+ nodeType = NODE_GetType(hNode); -+ if (nodeType != NODE_TASK && nodeType != -+ NODE_DAISSOCKET) -+ status = DSP_ENODETYPE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Check node state */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ if (DSP_SUCCEEDED(status)) { -+ state = NODE_GetState(hNode); -+ if (state != NODE_RUNNING) { -+ status = DSP_EWRONGSTATE; -+ /* Set the exit status if node terminated on -+ * its own. */ -+ if (state == NODE_DONE) -+ *pStatus = hNode->nExitStatus; -+ -+ } else { -+ NODE_SetState(hNode, NODE_TERMINATING); -+ } -+ } -+ /* end of SYNC_EnterCS */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* -+ * Send exit message. Do not change state to NODE_DONE -+ * here. That will be done in callback. -+ */ -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_Terminate: env = 0x%x\n", hNode->nodeEnv); -+ -+ status = PROC_GetState(pNode->hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ /* If processor is in error state then don't attempt to send -+ * A kill task command */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_4CLASS, "NODE_Terminate:" -+ " proc Status 0x%x\n", procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_cont; -+ } -+ -+ msg.dwCmd = RMS_EXIT; -+ msg.dwArg1 = hNode->nodeEnv; -+ killmsg.dwCmd = RMS_KILLTASK; -+ killmsg.dwArg1 = hNode->nodeEnv; -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ -+ if (hNode->uTimeout > MAXTIMEOUT) -+ killTimeOut = MAXTIMEOUT; -+ else -+ killTimeOut = (hNode->uTimeout)*2; -+ -+ status = (*pIntfFxns->pfnMsgPut)(hNode->hMsgQueue, &msg, -+ hNode->uTimeout); -+ if (DSP_SUCCEEDED(status)) { -+ /* Wait on synchronization object that will be -+ * posted in the callback on receiving RMS_EXIT -+ * message, or by NODE_Delete. Check for valid hNode, -+ * in case posted by NODE_Delete(). */ -+ status = SYNC_WaitOnEvent(hNode->hSyncDone, -+ killTimeOut/2); -+ if (DSP_FAILED(status)) { -+ if (status == DSP_ETIMEOUT) { -+ status = (*pIntfFxns->pfnMsgPut) -+ (hNode->hMsgQueue, &killmsg, -+ hNode->uTimeout); -+ if (DSP_SUCCEEDED(status)) { -+ status = SYNC_WaitOnEvent -+ (hNode->hSyncDone, -+ killTimeOut/2); -+ if (DSP_FAILED(status)) { -+ /* Here it goes the part -+ * of the simulation of -+ * the DSP exception */ -+ DEV_GetDehMgr(hNodeMgr-> -+ hDevObject, &hDehMgr); -+ if (hDehMgr) { -+ (*pIntfFxns-> -+ pfnDehNotify)(hDehMgr, -+ DSP_SYSERROR, -+ DSP_EXCEPTIONABORT); -+ status = DSP_EFAIL; -+ } -+ } else -+ status = DSP_SOK; -+ } -+ } else -+ status = DSP_EFAIL; -+ } else /* Convert SYNC status to DSP status */ -+ status = DSP_SOK; -+ } -+ } -+func_cont: -+ if (DSP_SUCCEEDED(status)) { -+ /* Enter CS before getting exit status, in case node was -+ * deleted. */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ /* Make sure node wasn't deleted while we blocked */ -+ if (!MEM_IsValidHandle(hNode, NODE_SIGNATURE)) { -+ status = DSP_EFAIL; -+ } else { -+ *pStatus = hNode->nExitStatus; -+ GT_1trace(NODE_debugMask, GT_ENTER, -+ "NODE_Terminate: env = 0x%x " -+ "succeeded.\n", hNode->nodeEnv); -+ } -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ } /*End of SYNC_EnterCS */ -+func_end: -+ return status; -+} -+ -+/* -+ * ======== DeleteNode ======== -+ * Purpose: -+ * Free GPP resources allocated in NODE_Allocate() or NODE_Connect(). -+ */ -+static void DeleteNode(struct NODE_OBJECT *hNode, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ struct NODE_MGR *hNodeMgr; -+ struct CMM_XLATOROBJECT *hXlator; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ u32 i; -+ enum NODE_TYPE nodeType; -+ struct STREAM stream; -+ struct NODE_MSGARGS msgArgs; -+ struct NODE_TASKARGS taskArgs; -+#ifdef DSP_DMM_DEBUG -+ struct DMM_OBJECT *hDmmMgr; -+ struct PROC_OBJECT *pProcObject = -+ (struct PROC_OBJECT *)hNode->hProcessor; -+#endif -+ DSP_STATUS status; -+ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); -+ hNodeMgr = hNode->hNodeMgr; -+ if (!MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)) -+ return; -+ hXlator = hNode->hXlator; -+ nodeType = NODE_GetType(hNode); -+ if (nodeType != NODE_DEVICE) { -+ msgArgs = hNode->createArgs.asa.msgArgs; -+ if (msgArgs.pData) -+ MEM_Free(msgArgs.pData); -+ -+ /* Free MSG queue */ -+ if (hNode->hMsgQueue) { -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ (*pIntfFxns->pfnMsgDeleteQueue) (hNode->hMsgQueue); -+ hNode->hMsgQueue = NULL; -+ -+ } -+ if (hNode->hSyncDone) -+ (void) SYNC_CloseEvent(hNode->hSyncDone); -+ -+ /* Free all stream info */ -+ if (hNode->inputs) { -+ for (i = 0; i < MaxInputs(hNode); i++) { -+ stream = hNode->inputs[i]; -+ FreeStream(hNodeMgr, stream); -+ } -+ MEM_Free(hNode->inputs); -+ hNode->inputs = NULL; -+ } -+ if (hNode->outputs) { -+ for (i = 0; i < MaxOutputs(hNode); i++) { -+ stream = hNode->outputs[i]; -+ FreeStream(hNodeMgr, stream); -+ } -+ MEM_Free(hNode->outputs); -+ hNode->outputs = NULL; -+ } -+ taskArgs = hNode->createArgs.asa.taskArgs; -+ if (taskArgs.strmInDef) { -+ for (i = 0; i < MaxInputs(hNode); i++) { -+ if (taskArgs.strmInDef[i].szDevice) { -+ MEM_Free(taskArgs.strmInDef[i]. -+ szDevice); -+ taskArgs.strmInDef[i].szDevice = NULL; -+ } -+ } -+ MEM_Free(taskArgs.strmInDef); -+ taskArgs.strmInDef = NULL; -+ } -+ if (taskArgs.strmOutDef) { -+ for (i = 0; i < MaxOutputs(hNode); i++) { -+ if (taskArgs.strmOutDef[i].szDevice) { -+ MEM_Free(taskArgs.strmOutDef[i]. -+ szDevice); -+ taskArgs.strmOutDef[i].szDevice = NULL; -+ } -+ } -+ MEM_Free(taskArgs.strmOutDef); -+ taskArgs.strmOutDef = NULL; -+ } -+ if (taskArgs.uDSPHeapResAddr) { -+ status = PROC_UnMap(hNode->hProcessor, -+ (void *)taskArgs.uDSPHeapAddr, -+ pr_ctxt); -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(NODE_debugMask, GT_5CLASS, -+ "DSPProcessor_UnMap succeeded.\n"); -+ } else { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "DSPProcessor_UnMap failed." -+ " Status = 0x%x\n", (u32)status); -+ } -+ status = PROC_UnReserveMemory(hNode->hProcessor, -+ (void *)taskArgs.uDSPHeapResAddr); -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(NODE_debugMask, GT_5CLASS, -+ "DSPProcessor_UnReserveMemory " -+ "succeeded.\n"); -+ } else { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "DSPProcessor_UnReserveMemory " -+ "failed. Status = 0x%x\n", -+ (u32)status); -+ } -+#ifdef DSP_DMM_DEBUG -+ status = DMM_GetHandle(pProcObject, &hDmmMgr); -+ if (DSP_SUCCEEDED(status)) -+ DMM_MemMapDump(hDmmMgr); -+#endif -+ } -+ } -+ if (nodeType != NODE_MESSAGE) { -+ if (hNode->streamConnect) { -+ MEM_Free(hNode->streamConnect); -+ hNode->streamConnect = NULL; -+ } -+ } -+ if (hNode->pstrDevName) { -+ MEM_Free(hNode->pstrDevName); -+ hNode->pstrDevName = NULL; -+ } -+ -+ if (hNode->hNtfy) { -+ NTFY_Delete(hNode->hNtfy); -+ hNode->hNtfy = NULL; -+ } -+ -+ /* These were allocated in DCD_GetObjectDef (via NODE_Allocate) */ -+ if (hNode->dcdProps.objData.nodeObj.pstrCreatePhaseFxn) { -+ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrCreatePhaseFxn); -+ hNode->dcdProps.objData.nodeObj.pstrCreatePhaseFxn = NULL; -+ } -+ -+ if (hNode->dcdProps.objData.nodeObj.pstrExecutePhaseFxn) { -+ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrExecutePhaseFxn); -+ hNode->dcdProps.objData.nodeObj.pstrExecutePhaseFxn = NULL; -+ } -+ -+ if (hNode->dcdProps.objData.nodeObj.pstrDeletePhaseFxn) { -+ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrDeletePhaseFxn); -+ hNode->dcdProps.objData.nodeObj.pstrDeletePhaseFxn = NULL; -+ } -+ -+ if (hNode->dcdProps.objData.nodeObj.pstrIAlgName) { -+ MEM_Free(hNode->dcdProps.objData.nodeObj.pstrIAlgName); -+ hNode->dcdProps.objData.nodeObj.pstrIAlgName = NULL; -+ } -+ -+ /* Free all SM address translator resources */ -+ if (hXlator) { -+ (void) CMM_XlatorDelete(hXlator, TRUE); /* force free */ -+ hXlator = NULL; -+ } -+ -+ if (hNode->hNldrNode) { -+ hNodeMgr->nldrFxns.pfnFree(hNode->hNldrNode); -+ hNode->hNldrNode = NULL; -+ } -+ -+ MEM_FreeObject(hNode); -+ hNode = NULL; -+} -+ -+/* -+ * ======== DeleteNodeMgr ======== -+ * Purpose: -+ * Frees the node manager. -+ */ -+static void DeleteNodeMgr(struct NODE_MGR *hNodeMgr) -+{ -+ struct NODE_OBJECT *hNode; -+ -+ if (MEM_IsValidHandle(hNodeMgr, NODEMGR_SIGNATURE)) { -+ /* Free resources */ -+ if (hNodeMgr->hDcdMgr) -+ DCD_DestroyManager(hNodeMgr->hDcdMgr); -+ -+ /* Remove any elements remaining in lists */ -+ if (hNodeMgr->nodeList) { -+ while ((hNode = -+ (struct NODE_OBJECT *)LST_GetHead(hNodeMgr-> -+ nodeList))) -+ DeleteNode(hNode, NULL); -+ -+ DBC_Assert(LST_IsEmpty(hNodeMgr->nodeList)); -+ LST_Delete(hNodeMgr->nodeList); -+ } -+ if (hNodeMgr->hNtfy) -+ NTFY_Delete(hNodeMgr->hNtfy); -+ -+ if (hNodeMgr->pipeMap) -+ GB_delete(hNodeMgr->pipeMap); -+ -+ if (hNodeMgr->pipeDoneMap) -+ GB_delete(hNodeMgr->pipeDoneMap); -+ -+ if (hNodeMgr->chnlMap) -+ GB_delete(hNodeMgr->chnlMap); -+ -+ if (hNodeMgr->dmaChnlMap) -+ GB_delete(hNodeMgr->dmaChnlMap); -+ -+ if (hNodeMgr->zChnlMap) -+ GB_delete(hNodeMgr->zChnlMap); -+ -+ if (hNodeMgr->hDisp) -+ DISP_Delete(hNodeMgr->hDisp); -+ -+ if (hNodeMgr->hSync) -+ SYNC_DeleteCS(hNodeMgr->hSync); -+ -+ if (hNodeMgr->hStrmMgr) -+ STRM_Delete(hNodeMgr->hStrmMgr); -+ -+ /* Delete the loader */ -+ if (hNodeMgr->hNldr) -+ hNodeMgr->nldrFxns.pfnDelete(hNodeMgr->hNldr); -+ -+ if (hNodeMgr->fLoaderInit) -+ hNodeMgr->nldrFxns.pfnExit(); -+ -+ MEM_FreeObject(hNodeMgr); -+ } -+} -+ -+/* -+ * ======== FillStreamConnect ======== -+ * Purpose: -+ * Fills stream information. -+ */ -+static void FillStreamConnect(struct NODE_OBJECT *hNode1, -+ struct NODE_OBJECT *hNode2, -+ u32 uStream1, u32 uStream2) -+{ -+ u32 uStrmIndex; -+ struct DSP_STREAMCONNECT *pStrm1 = NULL; -+ struct DSP_STREAMCONNECT *pStrm2 = NULL; -+ enum NODE_TYPE node1Type = NODE_TASK; -+ enum NODE_TYPE node2Type = NODE_TASK; -+ -+ node1Type = NODE_GetType(hNode1); -+ node2Type = NODE_GetType(hNode2); -+ if (hNode1 != (struct NODE_OBJECT *)DSP_HGPPNODE) { -+ -+ if (node1Type != NODE_DEVICE) { -+ uStrmIndex = hNode1->uNumInputs + -+ hNode1->uNumOutputs - 1; -+ pStrm1 = &(hNode1->streamConnect[uStrmIndex]); -+ pStrm1->cbStruct = sizeof(struct DSP_STREAMCONNECT); -+ pStrm1->uThisNodeStreamIndex = uStream1; -+ } -+ -+ if (hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE) { -+ /* NODE == > NODE */ -+ if (node1Type != NODE_DEVICE) { -+ pStrm1->hConnectedNode = hNode2; -+ pStrm1->uiConnectedNodeID = hNode2->nodeId; -+ pStrm1->uConnectedNodeStreamIndex = uStream2; -+ pStrm1->lType = CONNECTTYPE_NODEOUTPUT; -+ } -+ if (node2Type != NODE_DEVICE) { -+ uStrmIndex = hNode2->uNumInputs + -+ hNode2->uNumOutputs - 1; -+ pStrm2 = &(hNode2->streamConnect[uStrmIndex]); -+ pStrm2->cbStruct = -+ sizeof(struct DSP_STREAMCONNECT); -+ pStrm2->uThisNodeStreamIndex = uStream2; -+ pStrm2->hConnectedNode = hNode1; -+ pStrm2->uiConnectedNodeID = hNode1->nodeId; -+ pStrm2->uConnectedNodeStreamIndex = uStream1; -+ pStrm2->lType = CONNECTTYPE_NODEINPUT; -+ } -+ } else if (node1Type != NODE_DEVICE) -+ pStrm1->lType = CONNECTTYPE_GPPOUTPUT; -+ } else { -+ /* GPP == > NODE */ -+ DBC_Assert(hNode2 != (struct NODE_OBJECT *)DSP_HGPPNODE); -+ uStrmIndex = hNode2->uNumInputs + hNode2->uNumOutputs - 1; -+ pStrm2 = &(hNode2->streamConnect[uStrmIndex]); -+ pStrm2->cbStruct = sizeof(struct DSP_STREAMCONNECT); -+ pStrm2->uThisNodeStreamIndex = uStream2; -+ pStrm2->lType = CONNECTTYPE_GPPINPUT; -+ } -+} -+ -+/* -+ * ======== FillStreamDef ======== -+ * Purpose: -+ * Fills Stream attributes. -+ */ -+static void FillStreamDef(struct NODE_OBJECT *hNode, -+ struct NODE_STRMDEF *pstrmDef, -+ struct DSP_STRMATTR *pAttrs) -+{ -+ struct NODE_MGR *hNodeMgr = hNode->hNodeMgr; -+ -+ if (pAttrs != NULL) { -+ pstrmDef->uNumBufs = pAttrs->uNumBufs; -+ pstrmDef->uBufsize = pAttrs->uBufsize / hNodeMgr-> -+ uDSPDataMauSize; -+ pstrmDef->uSegid = pAttrs->uSegid; -+ pstrmDef->uAlignment = pAttrs->uAlignment; -+ pstrmDef->uTimeout = pAttrs->uTimeout; -+ } else { -+ pstrmDef->uNumBufs = DEFAULTNBUFS; -+ pstrmDef->uBufsize = DEFAULTBUFSIZE / hNodeMgr-> -+ uDSPDataMauSize; -+ pstrmDef->uSegid = DEFAULTSEGID; -+ pstrmDef->uAlignment = DEFAULTALIGNMENT; -+ pstrmDef->uTimeout = DEFAULTTIMEOUT; -+ } -+} -+ -+/* -+ * ======== FreeStream ======== -+ * Purpose: -+ * Updates the channel mask and frees the pipe id. -+ */ -+static void FreeStream(struct NODE_MGR *hNodeMgr, struct STREAM stream) -+{ -+ /* Free up the pipe id unless other node has not yet been deleted. */ -+ if (stream.type == NODECONNECT) { -+ if (GB_test(hNodeMgr->pipeDoneMap, stream.devId)) { -+ /* The other node has already been deleted */ -+ GB_clear(hNodeMgr->pipeDoneMap, stream.devId); -+ GB_clear(hNodeMgr->pipeMap, stream.devId); -+ } else { -+ /* The other node has not been deleted yet */ -+ GB_set(hNodeMgr->pipeDoneMap, stream.devId); -+ } -+ } else if (stream.type == HOSTCONNECT) { -+ if (stream.devId < hNodeMgr->ulNumChnls) { -+ GB_clear(hNodeMgr->chnlMap, stream.devId); -+ } else if (stream.devId < (2 * hNodeMgr->ulNumChnls)) { -+ /* dsp-dma */ -+ GB_clear(hNodeMgr->dmaChnlMap, stream.devId - -+ (1 * hNodeMgr->ulNumChnls)); -+ } else if (stream.devId < (3 * hNodeMgr->ulNumChnls)) { -+ /* zero-copy */ -+ GB_clear(hNodeMgr->zChnlMap, stream.devId - -+ (2 * hNodeMgr->ulNumChnls)); -+ } -+ } -+} -+ -+/* -+ * ======== GetFxnAddress ======== -+ * Purpose: -+ * Retrieves the address for create, execute or delete phase for a node. -+ */ -+static DSP_STATUS GetFxnAddress(struct NODE_OBJECT *hNode, u32 *pulFxnAddr, -+ u32 uPhase) -+{ -+ char *pstrFxnName = NULL; -+ struct NODE_MGR *hNodeMgr = hNode->hNodeMgr; -+ DSP_STATUS status = DSP_SOK; -+ DBC_Require(NODE_GetType(hNode) == NODE_TASK || -+ NODE_GetType(hNode) == NODE_DAISSOCKET || -+ NODE_GetType(hNode) == NODE_MESSAGE); -+ -+ switch (uPhase) { -+ case CREATEPHASE: -+ pstrFxnName = hNode->dcdProps.objData.nodeObj. -+ pstrCreatePhaseFxn; -+ break; -+ case EXECUTEPHASE: -+ pstrFxnName = hNode->dcdProps.objData.nodeObj. -+ pstrExecutePhaseFxn; -+ break; -+ case DELETEPHASE: -+ pstrFxnName = hNode->dcdProps.objData.nodeObj. -+ pstrDeletePhaseFxn; -+ break; -+ default: -+ /* Should never get here */ -+ DBC_Assert(false); -+ break; -+ } -+ -+ status = hNodeMgr->nldrFxns.pfnGetFxnAddr(hNode->hNldrNode, pstrFxnName, -+ pulFxnAddr); -+ -+ return status; -+} -+ -+/* -+ * ======== GetNodeInfo ======== -+ * Purpose: -+ * Retrieves the node information. -+ */ -+void GetNodeInfo(struct NODE_OBJECT *hNode, struct DSP_NODEINFO *pNodeInfo) -+{ -+ u32 i; -+ -+ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); -+ DBC_Require(pNodeInfo != NULL); -+ -+ pNodeInfo->cbStruct = sizeof(struct DSP_NODEINFO); -+ pNodeInfo->nbNodeDatabaseProps = hNode->dcdProps.objData.nodeObj. -+ ndbProps; -+ pNodeInfo->uExecutionPriority = hNode->nPriority; -+ pNodeInfo->hDeviceOwner = hNode->hDeviceOwner; -+ pNodeInfo->uNumberStreams = hNode->uNumInputs + hNode->uNumOutputs; -+ pNodeInfo->uNodeEnv = hNode->nodeEnv; -+ -+ pNodeInfo->nsExecutionState = NODE_GetState(hNode); -+ -+ /* Copy stream connect data */ -+ for (i = 0; i < hNode->uNumInputs + hNode->uNumOutputs; i++) -+ pNodeInfo->scStreamConnection[i] = hNode->streamConnect[i]; -+ -+} -+ -+/* -+ * ======== GetNodeProps ======== -+ * Purpose: -+ * Retrieve node properties. -+ */ -+static DSP_STATUS GetNodeProps(struct DCD_MANAGER *hDcdMgr, -+ struct NODE_OBJECT *hNode, -+ CONST struct DSP_UUID *pNodeId, -+ struct DCD_GENERICOBJ *pdcdProps) -+{ -+ u32 uLen; -+ struct NODE_MSGARGS *pMsgArgs; -+ struct NODE_TASKARGS *pTaskArgs; -+ enum NODE_TYPE nodeType = NODE_TASK; -+ struct DSP_NDBPROPS *pndbProps = &(pdcdProps->objData.nodeObj.ndbProps); -+ DSP_STATUS status = DSP_SOK; -+#ifdef DEBUG -+ char szUuid[MAXUUIDLEN]; -+#endif -+ -+ status = DCD_GetObjectDef(hDcdMgr, (struct DSP_UUID *)pNodeId, -+ DSP_DCDNODETYPE, pdcdProps); -+ -+ if (DSP_SUCCEEDED(status)) { -+ hNode->nType = nodeType = pndbProps->uNodeType; -+ -+#ifdef DEBUG -+ /* Create UUID value to set in registry. */ -+ UUID_UuidToString((struct DSP_UUID *)pNodeId, szUuid, -+ MAXUUIDLEN); -+ DBG_Trace(DBG_LEVEL7, "\n** (node) UUID: %s\n", szUuid); -+#endif -+ -+ /* Fill in message args that come from NDB */ -+ if (nodeType != NODE_DEVICE) { -+ pMsgArgs = &(hNode->createArgs.asa.msgArgs); -+ pMsgArgs->uSegid = pdcdProps->objData.nodeObj.uMsgSegid; -+ pMsgArgs->uNotifyType = pdcdProps->objData.nodeObj. -+ uMsgNotifyType; -+ pMsgArgs->uMaxMessages = pndbProps->uMessageDepth; -+#ifdef DEBUG -+ DBG_Trace(DBG_LEVEL7, -+ "** (node) Max Number of Messages: 0x%x\n", -+ pMsgArgs->uMaxMessages); -+#endif -+ } else { -+ /* Copy device name */ -+ DBC_Require(pndbProps->acName); -+ uLen = strlen(pndbProps->acName); -+ DBC_Assert(uLen < MAXDEVNAMELEN); -+ hNode->pstrDevName = MEM_Calloc(uLen + 1, MEM_PAGED); -+ if (hNode->pstrDevName == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ strncpy(hNode->pstrDevName, -+ pndbProps->acName, uLen); -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Fill in create args that come from NDB */ -+ if (nodeType == NODE_TASK || nodeType == NODE_DAISSOCKET) { -+ pTaskArgs = &(hNode->createArgs.asa.taskArgs); -+ pTaskArgs->nPriority = pndbProps->iPriority; -+ pTaskArgs->uStackSize = pndbProps->uStackSize; -+ pTaskArgs->uSysStackSize = pndbProps->uSysStackSize; -+ pTaskArgs->uStackSeg = pndbProps->uStackSeg; -+#ifdef DEBUG -+ DBG_Trace(DBG_LEVEL7, -+ "** (node) Priority: 0x%x\n" "** (node) Stack" -+ " Size: 0x%x words\n" "** (node) System Stack" -+ " Size: 0x%x words\n" "** (node) Stack" -+ " Segment: 0x%x\n\n", -+ "** (node) profile count : 0x%x \n \n", -+ pTaskArgs->nPriority, pTaskArgs->uStackSize, -+ pTaskArgs->uSysStackSize, -+ pTaskArgs->uStackSeg, -+ pndbProps->uCountProfiles); -+#endif -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== GetProcProps ======== -+ * Purpose: -+ * Retrieve the processor properties. -+ */ -+static DSP_STATUS GetProcProps(struct NODE_MGR *hNodeMgr, -+ struct DEV_OBJECT *hDevObject) -+{ -+ struct CFG_DEVNODE *hDevNode; -+ struct CFG_HOSTRES hostRes; -+ DSP_STATUS status = DSP_SOK; -+ -+ status = DEV_GetDevNode(hDevObject, &hDevNode); -+ if (DSP_SUCCEEDED(status)) -+ status = CFG_GetHostResources(hDevNode, &hostRes); -+ -+ if (DSP_SUCCEEDED(status)) { -+ hNodeMgr->ulChnlOffset = hostRes.dwChnlOffset; -+ hNodeMgr->ulChnlBufSize = hostRes.dwChnlBufSize; -+ hNodeMgr->ulNumChnls = hostRes.dwNumChnls; -+ -+ /* -+ * PROC will add an API to get DSP_PROCESSORINFO. -+ * Fill in default values for now. -+ */ -+ /* TODO -- Instead of hard coding, take from registry */ -+ hNodeMgr->procFamily = 6000; -+ hNodeMgr->procType = 6410; -+ hNodeMgr->nMinPri = DSP_NODE_MIN_PRIORITY; -+ hNodeMgr->nMaxPri = DSP_NODE_MAX_PRIORITY; -+ hNodeMgr->uDSPWordSize = DSPWORDSIZE; -+ hNodeMgr->uDSPDataMauSize = DSPWORDSIZE; -+ hNodeMgr->uDSPMauSize = 1; -+ -+ } -+ return status; -+} -+ -+ -+ -+/* -+ * ======== NODE_GetUUIDProps ======== -+ * Purpose: -+ * Fetch Node UUID properties from DCD/DOF file. -+ */ -+DSP_STATUS NODE_GetUUIDProps(DSP_HPROCESSOR hProcessor, -+ IN CONST struct DSP_UUID *pNodeId, -+ OUT struct DSP_NDBPROPS *pNodeProps) -+{ -+ struct NODE_MGR *hNodeMgr = NULL; -+ struct DEV_OBJECT *hDevObject; -+ DSP_STATUS status = DSP_SOK; -+ struct DCD_NODEPROPS dcdNodeProps; -+ struct DSP_PROCESSORSTATE procStatus; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(hProcessor != NULL); -+ DBC_Require(pNodeId != NULL); -+ -+ if (hProcessor == NULL || pNodeId == NULL) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ status = PROC_GetState(hProcessor, &procStatus, -+ sizeof(struct DSP_PROCESSORSTATE)); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* If processor is in error state then don't attempt -+ to send the message */ -+ if (procStatus.iState == PROC_ERROR) { -+ GT_1trace(NODE_debugMask, GT_5CLASS, -+ "NODE_GetUUIDProps: proc Status 0x%x\n", -+ procStatus.iState); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ -+ GT_3trace(NODE_debugMask, GT_ENTER, -+ "NODE_GetUUIDProps: " "\thProcessor: " -+ "0x%x\tpNodeId: 0x%x" "\tpNodeProps: 0x%x\n", hProcessor, -+ pNodeId, pNodeProps); -+ -+ status = PROC_GetDevObject(hProcessor, &hDevObject); -+ if (DSP_SUCCEEDED(status) && hDevObject != NULL) { -+ status = DEV_GetNodeManager(hDevObject, &hNodeMgr); -+ if (hNodeMgr == NULL) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ } -+ -+ /* -+ * Enter the critical section. This is needed because -+ * DCD_GetObjectDef will ultimately end up calling DBLL_open/close, -+ * which needs to be protected in order to not corrupt the zlib manager -+ * (COD). -+ */ -+ status = SYNC_EnterCS(hNodeMgr->hSync); -+ -+ if (DSP_SUCCEEDED(status)) { -+ dcdNodeProps.pstrCreatePhaseFxn = NULL; -+ dcdNodeProps.pstrExecutePhaseFxn = NULL; -+ dcdNodeProps.pstrDeletePhaseFxn = NULL; -+ dcdNodeProps.pstrIAlgName = NULL; -+ -+ status = DCD_GetObjectDef(hNodeMgr->hDcdMgr, -+ (struct DSP_UUID *) pNodeId, -+ DSP_DCDNODETYPE, -+ (struct DCD_GENERICOBJ *) &dcdNodeProps); -+ if (DSP_SUCCEEDED(status)) { -+ *pNodeProps = dcdNodeProps.ndbProps; -+ if (dcdNodeProps.pstrCreatePhaseFxn) -+ MEM_Free(dcdNodeProps.pstrCreatePhaseFxn); -+ -+ if (dcdNodeProps.pstrExecutePhaseFxn) -+ MEM_Free(dcdNodeProps.pstrExecutePhaseFxn); -+ -+ if (dcdNodeProps.pstrDeletePhaseFxn) -+ MEM_Free(dcdNodeProps.pstrDeletePhaseFxn); -+ -+ if (dcdNodeProps.pstrIAlgName) -+ MEM_Free(dcdNodeProps.pstrIAlgName); -+ } -+ /* Leave the critical section, we're done. */ -+ (void)SYNC_LeaveCS(hNodeMgr->hSync); -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== GetRMSFxns ======== -+ * Purpose: -+ * Retrieve the RMS functions. -+ */ -+static DSP_STATUS GetRMSFxns(struct NODE_MGR *hNodeMgr) -+{ -+ s32 i; -+ struct DEV_OBJECT *hDev = hNodeMgr->hDevObject; -+ DSP_STATUS status = DSP_SOK; -+ -+ static char *pszFxns[NUMRMSFXNS] = { -+ "RMS_queryServer", /* RMSQUERYSERVER */ -+ "RMS_configureServer", /* RMSCONFIGURESERVER */ -+ "RMS_createNode", /* RMSCREATENODE */ -+ "RMS_executeNode", /* RMSEXECUTENODE */ -+ "RMS_deleteNode", /* RMSDELETENODE */ -+ "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */ -+ "RMS_readMemory", /* RMSREADMEMORY */ -+ "RMS_writeMemory", /* RMSWRITEMEMORY */ -+ "RMS_copy", /* RMSCOPY */ -+ }; -+ -+ for (i = 0; i < NUMRMSFXNS; i++) { -+ status = DEV_GetSymbol(hDev, pszFxns[i], -+ &(hNodeMgr->ulFxnAddrs[i])); -+ if (DSP_FAILED(status)) { -+ if (status == COD_E_SYMBOLNOTFOUND) { -+ /* -+ * May be loaded dynamically (in the future), -+ * but return an error for now. -+ */ -+ GT_1trace(NODE_debugMask, GT_6CLASS, -+ "RMS function: %s " -+ "currently not loaded\n", pszFxns[i]); -+ } else { -+ GT_2trace(NODE_debugMask, GT_6CLASS, -+ "GetRMSFxns: Symbol not " -+ "found: %s\tstatus = 0x%x\n", -+ pszFxns[i], status); -+ break; -+ } -+ } -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== Ovly ======== -+ * Purpose: -+ * Called during overlay.Sends command to RMS to copy a block of data. -+ */ -+static u32 Ovly(void *pPrivRef, u32 ulDspRunAddr, u32 ulDspLoadAddr, -+ u32 ulNumBytes, u32 nMemSpace) -+{ -+ struct NODE_OBJECT *hNode = (struct NODE_OBJECT *)pPrivRef; -+ struct NODE_MGR *hNodeMgr; -+ u32 ulBytes = 0; -+ u32 ulSize; -+ u32 ulTimeout; -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *hWmdContext; -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ -+ -+ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); -+ -+ hNodeMgr = hNode->hNodeMgr; -+ -+ ulSize = ulNumBytes / hNodeMgr->uDSPWordSize; -+ ulTimeout = hNode->uTimeout; -+ -+ /* Call new MemCopy function */ -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ status = DEV_GetWMDContext(hNodeMgr->hDevObject, &hWmdContext); -+ status = (*pIntfFxns->pfnBrdMemCopy)(hWmdContext, ulDspRunAddr, -+ ulDspLoadAddr, ulNumBytes, (u32) nMemSpace); -+ -+ if (DSP_SUCCEEDED(status)) -+ ulBytes = ulNumBytes; -+ -+ return ulBytes; -+} -+ -+/* -+ * ======== Write ======== -+ */ -+static u32 Write(void *pPrivRef, u32 ulDspAddr, void *pBuf, -+ u32 ulNumBytes, u32 nMemSpace) -+{ -+ struct NODE_OBJECT *hNode = (struct NODE_OBJECT *) pPrivRef; -+ struct NODE_MGR *hNodeMgr; -+ u16 memType; -+ u32 ulTimeout; -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *hWmdContext; -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ -+ -+ DBC_Require(MEM_IsValidHandle(hNode, NODE_SIGNATURE)); -+ DBC_Require(nMemSpace & DBLL_CODE || nMemSpace & DBLL_DATA); -+ -+ hNodeMgr = hNode->hNodeMgr; -+ -+ ulTimeout = hNode->uTimeout; -+ memType = (nMemSpace & DBLL_CODE) ? RMS_CODE : RMS_DATA; -+ -+ /* Call new MemWrite function */ -+ pIntfFxns = hNodeMgr->pIntfFxns; -+ status = DEV_GetWMDContext(hNodeMgr->hDevObject, &hWmdContext); -+ status = (*pIntfFxns->pfnBrdMemWrite) (hWmdContext, pBuf, ulDspAddr, -+ ulNumBytes, memType); -+ -+ return ulNumBytes; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/proc.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/proc.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/proc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/proc.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,2061 @@ -+/* -+ * proc.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== proc.c ======== -+ * Description: -+ * Processor interface at the driver level. -+ * -+ * Public Functions: -+ * PROC_Attach -+ * PROC_Ctrl -+ * PROC_Detach -+ * PROC_EnumNodes -+ * PROC_GetResourceInfo -+ * PROC_Exit -+ * PROC_FlushMemory -+ * PROC_GetState -+ * PROC_GetProcessorId -+ * PROC_GetTrace -+ * PROC_Init -+ * PROC_Load -+ * PROC_Map -+ * PROC_NotifyClients -+ * PROC_RegisterNotify -+ * PROC_ReserveMemory -+ * PROC_Start -+ * PROC_UnMap -+ * PROC_UnReserveMemory -+ * PROC_InvalidateMemory -+ -+ *! Revision History -+ *! ======== ======== -+ *! 04-Apr-2007 sh Added PROC_InvalidateMemory API -+ *! 19-Apr-2004 sb Aligned DMM definitions with Symbian -+ *! Used MEM_FlushCache instead of OS specific API -+ *! Integrated Alan's code review updates -+ *! 08-Mar-2004 sb Added the Dynamic Memory Mapping feature -+ *! 08-Mar-2004 vp Added g_pszLastCoff member to PROC_OBJECT. -+ *! This is required for multiprocessor environment. -+ *! 09-Feb-2004 vp Added PROC_GetProcessorID function -+ *! 22-Apr-2003 vp Fixed issue with the string that stores coff file name -+ *! 03-Apr-2003 sb Fix DEH deregistering bug -+ *! 26-Mar-2003 vp Commented the call to DSP deep sleep in PROC_Start function. -+ *! 18-Feb-2003 vp Code review updates. -+ *! 18-Oct-2002 vp Ported to Linux platform. -+ *! 22-May-2002 sg Do IOCTL-to-PWR translation before calling PWR_SleepDSP. -+ *! 14-May-2002 sg Use CSL_Atoi() instead of atoi(). -+ *! 13-May-2002 sg Propagate PWR return codes upwards. -+ *! 07-May-2002 sg Added check for, and call to PWR functions in PROC_Ctrl. -+ *! 02-May-2002 sg Added "nap" mode: put DSP to sleep once booted. -+ *! 01-Apr-2002 jeh Assume word addresses in PROC_GetTrace(). -+ *! 29-Nov-2001 jeh Don't call DEH function if hDehMgr == NULL. -+ *! 05-Nov-2001 kc: Updated PROC_RegisterNotify and PROC_GetState to support -+ *! DEH module. -+ *! 09-Oct-2001 jeh Fix number of bytes calculated in PROC_GetTrace(). -+ *! 11-Sep-2001 jeh Delete MSG manager in PROC_Monitor() to fix memory leak. -+ *! 29-Aug-2001 rr: DCD_AutoRegister and IOOnLoaded moved before COD_LoadBase -+ *! to facilitate the external loading. -+ *! 14-Aug-2001 ag DCD_AutoRegister() now called before IOOnLoaded() fxn. -+ *! 21-Jun-2001 rr: MSG_Create is done only the first time. -+ *! 02-May-2001 jeh Return failure in PROC_Load if IOOnLoaded function returns -+ *! error other than E_NOTIMPL. -+ *! 03-Apr-2001 sg: Changed DSP_DCD_ENOAUTOREGISTER to DSP_EDCDNOAUTOREGISTER. -+ *! 13-Feb-2001 kc: DSP/BIOS Bridge name updates. -+ *! 05-Jan-2001 rr: PROC_LOAD MSG_Create error is checked. -+ *! 15-Dec-2000 rr: IoOnLoaded is checked for WSX_STATUS. We fail to load -+ *! if DEV_Create2 fails; ie, no non-RMS targets can be -+ *! loaded. -+ *! 12-Dec-2000 rr: PROC_Start's DEV_Create2 is checked for WSX_STATUS. -+ *! 28-Nov-2000 jeh Added call to IO OnLoaded function to PROC_Load(). -+ *! 29-Nov-2000 rr: Incorporated code review changes. -+ *! 03-Nov-2000 rr: Auto_Register happens after PROC_Load. -+ *! 06-Oct-2000 rr: Updated to ver 0.9. PROC_Start calls DEV_Create2 and -+ *! WMD_BRD_STOP is always followed by DEV_Destroy2. -+ *! 05-Sep-2000 rr: PROC_GetTrace calculates the Trace symbol for 55 in a -+ *! different way. -+ *! 10-Aug-2000 rr: PROC_NotifyClients, PROC_GetProcessorHandle Added -+ *! 07-Aug-2000 rr: PROC_IDLE/SYNCINIT/UNKNOWN state removed. -+ *! WMD fxns are checked for WSX_STATUS. -+ *! PROC_Attach does not alter the state of the BRD. -+ *! PROC_Run removed. -+ *! 04-Aug-2000 rr: All the functions return DSP_EHANDLE if proc handle is -+ *! invalid -+ *! 27-Jul-2000 rr: PROC_GetTrace and PROC_Load implemented. Updated to -+ *! ver 0.8 API. -+ *! 06-Jul-2000 rr: Created. -+ */ -+ -+/* ------------------------------------ Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+#include -+/* ----------------------------------- Mini Driver */ -+#include -+#include -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+#include -+ -+#ifndef RES_CLEANUP_DISABLE -+#include -+#endif -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define PROC_SIGNATURE 0x434F5250 /* "PROC" (in reverse). */ -+#define MAXCMDLINELEN 255 -+#define PROC_ENVPROCID "PROC_ID=%d" -+#define MAXPROCIDLEN (8 + 5) -+#define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */ -+#define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ -+#define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ -+ -+#define DSP_CACHE_LINE 128 -+ -+extern char *iva_img; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask PROC_DebugMask = { NULL, NULL }; /* WCD MGR Mask */ -+#endif -+ -+static u32 cRefs; -+ -+struct SYNC_CSOBJECT *hProcLock; /* For critical sections */ -+ -+/* ----------------------------------- Function Prototypes */ -+static DSP_STATUS PROC_Monitor(struct PROC_OBJECT *hProcessor); -+static s32 GetEnvpCount(char **envp); -+static char **PrependEnvp(char **newEnvp, char **envp, s32 cEnvp, s32 cNewEnvp, -+ char *szVar); -+ -+/* -+ * ======== PROC_CleanupAllResources ===== -+ * Purpose: -+ * Funtion to clean the process resources. -+ * This function is intended to be called when the -+ * processor is in error state -+ */ -+DSP_STATUS PROC_CleanupAllResources(void) -+{ -+ DSP_STATUS dsp_status = DSP_SOK; -+ HANDLE hDrvObject = NULL; -+ struct PROCESS_CONTEXT *pCtxtclosed = NULL; -+ struct PROC_OBJECT *proc_obj_ptr, *temp; -+ -+ GT_0trace(PROC_DebugMask, GT_ENTER, "PROC_CleanupAllResources\n"); -+ -+ dsp_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ if (DSP_FAILED(dsp_status)) -+ goto func_end; -+ -+ DRV_GetProcCtxtList(&pCtxtclosed, (struct DRV_OBJECT *)hDrvObject); -+ -+ while (pCtxtclosed != NULL) { -+ if (current->tgid != pCtxtclosed->pid) { -+ GT_1trace(PROC_DebugMask, GT_5CLASS, -+ "***Cleanup of " -+ "process***%d\n", pCtxtclosed->pid); -+ list_for_each_entry_safe(proc_obj_ptr, temp, -+ &pCtxtclosed->processor_list, -+ proc_object) { -+ PROC_Detach(proc_obj_ptr, pCtxtclosed); -+ } -+ } -+ pCtxtclosed = pCtxtclosed->next; -+ } -+ -+ WMD_DEH_ReleaseDummyMem(); -+func_end: -+ return dsp_status; -+} -+ -+/* -+ * ======== PROC_Attach ======== -+ * Purpose: -+ * Prepare for communication with a particular DSP processor, and return -+ * a handle to the processor object. -+ */ -+DSP_STATUS -+PROC_Attach(u32 uProcessor, OPTIONAL CONST struct DSP_PROCESSORATTRIN *pAttrIn, -+ OUT DSP_HPROCESSOR *phProcessor, struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEV_OBJECT *hDevObject; -+ struct PROC_OBJECT *pProcObject = NULL; -+ struct MGR_OBJECT *hMgrObject = NULL; -+ struct DRV_OBJECT *hDrvObject = NULL; -+ u32 devType; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phProcessor != NULL); -+ -+ GT_3trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Attach, args:\n\t" -+ "uProcessor: 0x%x\n\tpAttrIn: 0x%x\n\tphProcessor:" -+ "0x%x\n", uProcessor, pAttrIn, phProcessor); -+ -+ /* Get the Driver and Manager Object Handles */ -+ status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); -+ if (DSP_SUCCEEDED(status)) { -+ status = CFG_GetObject((u32 *)&hMgrObject, REG_MGR_OBJECT); -+ if (DSP_FAILED(status)) { -+ /* don't propogate CFG errors from this PROC function */ -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach: DSP_FAILED to get" -+ "the Manager Object.\n", status); -+ } -+ } else { -+ /* don't propogate CFG errors from this PROC function */ -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach: failed to get the" -+ " DriverObject, 0x%x!\n", status); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Get the Device Object */ -+ status = DRV_GetDevObject(uProcessor, hDrvObject, &hDevObject); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach: failed to get" -+ " DevObject, 0x%x!\n", status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetDevType(hDevObject, &devType); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach: failed to get" -+ " DevType, 0x%x!\n", status); -+ } -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* If we made it this far, create the Proceesor object: */ -+ MEM_AllocObject(pProcObject, struct PROC_OBJECT, PROC_SIGNATURE); -+ /* Fill out the Processor Object: */ -+ if (pProcObject == NULL) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach:Out of memeory \n"); -+ status = DSP_EFAIL; -+ goto func_end; -+ } -+ pProcObject->hDevObject = hDevObject; -+ pProcObject->hMgrObject = hMgrObject; -+ pProcObject->uProcessor = devType; -+ /* Store TGID of Caller Process */ -+ pProcObject->hProcess = current->tgid; -+ -+ INIT_LIST_HEAD(&pProcObject->proc_object); -+ -+ if (pAttrIn) -+ pProcObject->uTimeout = pAttrIn->uTimeout; -+ else -+ pProcObject->uTimeout = PROC_DFLT_TIMEOUT; -+ -+ status = DEV_GetIntfFxns(hDevObject, &pProcObject->pIntfFxns); -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetWMDContext(hDevObject, -+ &pProcObject->hWmdContext); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach Could not" -+ " get the WMD Context.\n", status); -+ MEM_FreeObject(pProcObject); -+ } -+ } else { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach Could not get" -+ " the DEV_ Interface fxns.\n", status); -+ MEM_FreeObject(pProcObject); -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* Create the Notification Object */ -+ /* This is created with no event mask, no notify mask -+ * and no valid handle to the notification. They all get -+ * filled up when PROC_RegisterNotify is called */ -+ status = NTFY_Create(&pProcObject->hNtfy); -+ if (DSP_SUCCEEDED(status)) { -+ /* Insert the Processor Object into the DEV List. -+ * Return handle to this Processor Object: -+ * Find out if the Device is already attached to a -+ * Processor. If so, return AlreadyAttached status */ -+ LST_InitElem(&pProcObject->link); -+ status = DEV_InsertProcObject(pProcObject->hDevObject, -+ (u32)pProcObject, -+ &pProcObject->bIsAlreadyAttached); -+ if (DSP_SUCCEEDED(status)) { -+ if (pProcObject->bIsAlreadyAttached) { -+ status = DSP_SALREADYATTACHED; -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Attach: Processor " -+ "Already Attached!\n"); -+ } -+ } else { -+ if (pProcObject->hNtfy) -+ NTFY_Delete(pProcObject->hNtfy); -+ -+ MEM_FreeObject(pProcObject); -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach: failed to insert " -+ "Proc Object into DEV, 0x%x!\n", status); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ *phProcessor = (DSP_HPROCESSOR)pProcObject; -+ (void)PROC_NotifyClients(pProcObject, -+ DSP_PROCESSORATTACH); -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Attach: Processor " -+ "Attach Success!\n"); -+ } -+ } else { -+ /* Don't leak memory if DSP_FAILED */ -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Attach: Could not allocate " -+ "storage for notification \n"); -+ MEM_FreeObject(pProcObject); -+ } -+#ifndef RES_CLEANUP_DISABLE -+ spin_lock(&pr_ctxt->proc_list_lock); -+ list_add(&pProcObject->proc_object, &pr_ctxt->processor_list); -+ spin_unlock(&pr_ctxt->proc_list_lock); -+#endif -+func_end: -+ DBC_Ensure((status == DSP_EFAIL && *phProcessor == NULL) || -+ (DSP_SUCCEEDED(status) && -+ MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) || -+ (status == DSP_SALREADYATTACHED && -+ MEM_IsValidHandle(pProcObject, PROC_SIGNATURE))); -+ GT_2trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Attach, results:\n\t" -+ "status: 0x%x\n\thProcessor: 0x%x\n", status, *phProcessor); -+ -+ return status; -+} -+ -+static DSP_STATUS GetExecFile(struct CFG_DEVNODE *hDevNode, -+ struct DEV_OBJECT *hDevObject, -+ u32 size, char *execFile) -+{ -+ s32 devType; -+ s32 len; -+ -+ DEV_GetDevType(hDevObject, (u32 *) &devType); -+ if (devType == DSP_UNIT) { -+ return CFG_GetExecFile(hDevNode, size, execFile); -+ } else if (devType == IVA_UNIT) { -+ if (iva_img) { -+ len = strlen(iva_img); -+ strncpy(execFile, iva_img, len + 1); -+ return DSP_SOK; -+ } -+ } -+ return DSP_EFILE; -+} -+ -+/* -+ * ======== PROC_AutoStart ======== = -+ * Purpose: -+ * A Particular device gets loaded with the default image -+ * if the AutoStart flag is set. -+ * Parameters: -+ * hDevObject: Handle to the Device -+ * Returns: -+ * DSP_SOK: On Successful Loading -+ * DSP_EFAIL General Failure -+ * Requires: -+ * hDevObject != NULL -+ * Ensures: -+ */ -+DSP_STATUS PROC_AutoStart(struct CFG_DEVNODE *hDevNode, -+ struct DEV_OBJECT *hDevObject) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ u32 dwAutoStart = 0; /* autostart flag */ -+ struct PROC_OBJECT *pProcObject; -+ struct PROC_OBJECT *hProcObject; -+ char szExecFile[MAXCMDLINELEN]; -+ char *argv[2]; -+ struct MGR_OBJECT *hMgrObject = NULL; -+ s32 devType; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(hDevNode != NULL); -+ DBC_Require(hDevObject != NULL); -+ -+ GT_2trace(PROC_DebugMask, GT_ENTER, -+ "Entered PROC_AutoStart, args:\n\t" -+ "hDevNode: 0x%x\thDevObject: 0x%x\n", hDevNode, hDevObject); -+ /* Create a Dummy PROC Object */ -+ if (DSP_FAILED(CFG_GetObject((u32 *)&hMgrObject, -+ REG_MGR_OBJECT))) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_AutoStart: DSP_FAILED to " -+ "Get MGR Object\n"); -+ goto func_end; -+ } -+ MEM_AllocObject(pProcObject, struct PROC_OBJECT, PROC_SIGNATURE); -+ if (pProcObject == NULL) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_AutoStart: DSP_FAILED " -+ "to Create a dummy Processor\n"); -+ goto func_end; -+ } -+ GT_0trace(PROC_DebugMask, GT_1CLASS, "NTFY Created \n"); -+ pProcObject->hDevObject = hDevObject; -+ pProcObject->hMgrObject = hMgrObject; -+ hProcObject = pProcObject; -+ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, -+ &pProcObject->pIntfFxns))) { -+ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, -+ &pProcObject->hWmdContext))) { -+ status = DSP_SOK; -+ } else { -+ MEM_FreeObject(hProcObject); -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_AutoStart: Failed " -+ "to get WMD Context \n"); -+ } -+ } else { -+ MEM_FreeObject(hProcObject); -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_AutoStart: Failed to " -+ "get IntFxns \n"); -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* Stop the Device, put it into standby mode */ -+ status = PROC_Stop(hProcObject); -+ if (DSP_FAILED(CFG_GetAutoStart(hDevNode, &dwAutoStart)) || -+ !dwAutoStart) { -+ status = DSP_EFAIL; -+ /* DSP_FAILED to Get s32 Fxn or Wmd Context */ -+ GT_0trace(PROC_DebugMask, GT_1CLASS, "PROC_AutoStart: " -+ "CFG_GetAutoStart DSP_FAILED \n"); -+ goto func_cont; -+ } -+ /* Get the default executable for this board... */ -+ DEV_GetDevType(hDevObject, (u32 *)&devType); -+ pProcObject->uProcessor = devType; -+ if (DSP_SUCCEEDED(GetExecFile(hDevNode, hDevObject, -+ sizeof(szExecFile), szExecFile))) { -+ argv[0] = szExecFile; -+ argv[1] = NULL; -+ /* ...and try to load it: */ -+ status = PROC_Load(hProcObject, 1, (CONST char **)argv, NULL); -+ if (DSP_SUCCEEDED(status)) { -+ status = PROC_Start(hProcObject); -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_AutoStart: Processor started " -+ "running\n"); -+ } else { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_AutoStart: DSP_FAILED To " -+ "Start \n"); -+ } -+ } else { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_AutoStart: DSP_FAILED to Load\n"); -+ } -+ } else { -+ status = DSP_EFILE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_AutoStart: " -+ "No Exec file found \n"); -+ } -+func_cont: -+ MEM_FreeObject(hProcObject); -+func_end: -+ GT_1trace(PROC_DebugMask, GT_ENTER, -+ "Exiting PROC_AutoStart, status:0x%x\n", status); -+ return status; -+} -+ -+/* -+ * ======== PROC_Ctrl ======== -+ * Purpose: -+ * Pass control information to the GPP device driver managing the -+ * DSP processor. -+ * -+ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge -+ * application developer's API. -+ * Call the WMD_ICOTL Fxn with the Argument This is a Synchronous -+ * Operation. arg can be null. -+ */ -+DSP_STATUS PROC_Ctrl(DSP_HPROCESSOR hProcessor, u32 dwCmd, -+ IN struct DSP_CBDATA *arg) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = hProcessor; -+ u32 timeout = 0; -+ -+ DBC_Require(cRefs > 0); -+ GT_3trace(PROC_DebugMask, GT_ENTER, -+ "Entered PROC_Ctrl, args:\n\thProcessor:" -+ " 0x%x\n\tdwCmd: 0x%x\n\targ: 0x%x\n", hProcessor, dwCmd, arg); -+ -+ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ /* intercept PWR deep sleep command */ -+ if (dwCmd == WMDIOCTL_DEEPSLEEP) { -+ timeout = arg->cbData; -+ status = PWR_SleepDSP(PWR_DEEPSLEEP, timeout); -+ } -+ /* intercept PWR emergency sleep command */ -+ else if (dwCmd == WMDIOCTL_EMERGENCYSLEEP) { -+ timeout = arg->cbData; -+ status = PWR_SleepDSP(PWR_EMERGENCYDEEPSLEEP, timeout); -+ } else if (dwCmd == PWR_DEEPSLEEP) { -+ /* timeout = arg->cbData; */ -+ status = PWR_SleepDSP(PWR_DEEPSLEEP, timeout); -+ } -+ /* intercept PWR wake commands */ -+ else if (dwCmd == WMDIOCTL_WAKEUP) { -+ timeout = arg->cbData; -+ status = PWR_WakeDSP(timeout); -+ } else if (dwCmd == PWR_WAKEUP) { -+ /* timeout = arg->cbData; */ -+ status = PWR_WakeDSP(timeout); -+ } else -+ if (DSP_SUCCEEDED -+ ((*pProcObject->pIntfFxns->pfnDevCntrl) -+ (pProcObject->hWmdContext, dwCmd, arg))) { -+ status = DSP_SOK; -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Ctrl: Failed \n"); -+ } -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Ctrl: InValid Processor Handle \n"); -+ } -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Ctrl, 0x%x\n", -+ status); -+ return status; -+} -+ -+/* -+ * ======== PROC_Detach ======== -+ * Purpose: -+ * Destroys the Processor Object. Removes the notification from the Dev -+ * List. -+ */ -+DSP_STATUS PROC_Detach(DSP_HPROCESSOR hProcessor, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ DBC_Require(cRefs > 0); -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Detach, args:\n\t" -+ "hProcessor: 0x%x\n", hProcessor); -+ -+ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+#ifndef RES_CLEANUP_DISABLE -+ if (pr_ctxt) { -+ spin_lock(&pr_ctxt->proc_list_lock); -+ list_del(&pProcObject->proc_object); -+ spin_unlock(&pr_ctxt->proc_list_lock); -+ } -+#endif -+ /* Notify the Client */ -+ NTFY_Notify(pProcObject->hNtfy, DSP_PROCESSORDETACH); -+ /* Remove the notification memory */ -+ if (pProcObject->hNtfy) -+ NTFY_Delete(pProcObject->hNtfy); -+ -+ if (pProcObject->g_pszLastCoff) { -+ MEM_Free(pProcObject->g_pszLastCoff); -+ pProcObject->g_pszLastCoff = NULL; -+ } -+ /* Remove the Proc from the DEV List */ -+ (void)DEV_RemoveProcObject(pProcObject->hDevObject, -+ (u32)pProcObject); -+ /* Free the Processor Object */ -+ MEM_FreeObject(pProcObject); -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Detach: InValid Processor Handle \n"); -+ } -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Detach, 0x%x\n", -+ status); -+ return status; -+} -+ -+/* -+ * ======== PROC_EnumNodes ======== -+ * Purpose: -+ * Enumerate and get configuration information about nodes allocated -+ * on a DSP processor. -+ */ -+DSP_STATUS PROC_EnumNodes(DSP_HPROCESSOR hProcessor, OUT DSP_HNODE *aNodeTab, -+ IN u32 uNodeTabSize, OUT u32 *puNumNodes, -+ OUT u32 *puAllocated) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ struct NODE_MGR *hNodeMgr = NULL; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(aNodeTab != NULL || uNodeTabSize == 0); -+ DBC_Require(puNumNodes != NULL); -+ DBC_Require(puAllocated != NULL); -+ -+ GT_5trace(PROC_DebugMask, GT_ENTER, "Entered PROC_EnumNodes, args:\n\t" -+ "hProcessor: 0x%x\n\taNodeTab: 0x%x\n\tuNodeTabSize: " -+ " 0x%x\n\t puNumNodes 0x%x\n\t puAllocated: 0x%x\n", -+ hProcessor, aNodeTab, uNodeTabSize, puNumNodes, -+ puAllocated); -+ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ if (DSP_SUCCEEDED(DEV_GetNodeManager(pProcObject->hDevObject, -+ &hNodeMgr))) { -+ if (hNodeMgr) { -+ status = NODE_EnumNodes(hNodeMgr, aNodeTab, -+ uNodeTabSize, -+ puNumNodes, -+ puAllocated); -+ } -+ } -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_EnumNodes: " -+ "InValid Processor Handle \n"); -+ } -+ GT_6trace(PROC_DebugMask, GT_ENTER, "Exit PROC_EnumNodes, args:\n\t" -+ "hProcessor: 0x%x\n\taNodeTab: 0x%x\n\tuNodeTabSize: " -+ " 0x%x\n\t puNumNodes 0x%x\n\t puAllocated: 0x%x\n\t " -+ "status: 0x%x \n", hProcessor, aNodeTab, uNodeTabSize, -+ puNumNodes, puAllocated, status); -+ -+ return status; -+} -+ -+/* Cache operation against kernel address instead of users */ -+static int memory_sync_page(struct vm_area_struct *vma, unsigned long start, -+ ssize_t len, enum DSP_FLUSHTYPE ftype) -+{ -+ struct page *page; -+ void *kaddr; -+ unsigned long offset; -+ ssize_t rest; -+ -+#ifdef CHECK_DSP_CACHE_LINE -+ if ((start & DSP_CACHE_LINE) || (len & DSP_CACHE_LINE)) -+ pr_warning("%s: not aligned: %08lx(%d)\n", __func__, -+ start, len); -+#endif -+ while (len) { -+ page = follow_page(vma, start, FOLL_GET); -+ if (!page) { -+ pr_err("%s: no page for %08lx\n", __func__, start); -+ return -EINVAL; -+ } else if (IS_ERR(page)) { -+ pr_err("%s: err page for %08lx(%lu)\n", __func__, start, -+ IS_ERR(page)); -+ return IS_ERR(page); -+ } -+ -+ offset = start & ~PAGE_MASK; -+ kaddr = page_address(page) + offset; -+ rest = min_t(ssize_t, PAGE_SIZE - offset, len); -+ -+ MEM_FlushCache(kaddr, rest, ftype); -+ -+ put_page(page); -+ len -= rest; -+ start += rest; -+ } -+ -+ return 0; -+} -+ -+/* Check if the given area blongs to process virtul memory address space */ -+static int memory_sync_vma(unsigned long start, u32 len, -+ enum DSP_FLUSHTYPE ftype) -+{ -+ int err = 0; -+ unsigned long end; -+ struct vm_area_struct *vma; -+ -+ end = start + len; -+ if (end <= start) -+ return -EINVAL; -+ -+ while ((vma = find_vma(current->mm, start)) != NULL) { -+ ssize_t size; -+ -+ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) -+ return -EINVAL; -+ -+ if (vma->vm_start > start) -+ return -EINVAL; -+ -+ size = min_t(ssize_t, vma->vm_end - start, len); -+ err = memory_sync_page(vma, start, size, ftype); -+ if (err) -+ break; -+ -+ if (end <= vma->vm_end) -+ break; -+ -+ start = vma->vm_end; -+ } -+ -+ if (!vma) -+ err = -EINVAL; -+ -+ return err; -+} -+ -+static DSP_STATUS proc_memory_sync(DSP_HPROCESSOR hProcessor, void *pMpuAddr, -+ u32 ulSize, u32 ulFlags, -+ enum DSP_FLUSHTYPE FlushMemType) -+{ -+ /* Keep STATUS here for future additions to this function */ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ -+ DBC_Require(cRefs > 0); -+ GT_5trace(PROC_DebugMask, GT_ENTER, -+ "Entered %s, args:\n\t" -+ "hProcessor: 0x%x pMpuAddr: 0x%x ulSize 0x%x, ulFlags 0x%x\n", -+ __func__, hProcessor, pMpuAddr, ulSize, ulFlags); -+ -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "%s: InValid Processor Handle\n", __func__); -+ status = DSP_EHANDLE; -+ goto err_out; -+ } -+ -+ down_read(¤t->mm->mmap_sem); -+ -+ if (memory_sync_vma((u32)pMpuAddr, ulSize, FlushMemType)) { -+ pr_err("%s: InValid address parameters %p %x\n", -+ __func__, pMpuAddr, ulSize); -+ status = DSP_EHANDLE; -+ } -+ -+ up_read(¤t->mm->mmap_sem); -+err_out: -+ GT_2trace(PROC_DebugMask, GT_ENTER, -+ "Leaving %s [0x%x]", __func__, status); -+ -+ return status; -+} -+ -+/* -+ * ======== PROC_FlushMemory ======== -+ * Purpose: -+ * Flush cache -+ */ -+DSP_STATUS PROC_FlushMemory(DSP_HPROCESSOR hProcessor, void *pMpuAddr, -+ u32 ulSize, u32 ulFlags) -+{ -+ enum DSP_FLUSHTYPE mtype = PROC_WRITEBACK_INVALIDATE_MEM; -+ -+ if (ulFlags & 1) -+ mtype = PROC_WRITEBACK_MEM; -+ -+ return proc_memory_sync(hProcessor, pMpuAddr, ulSize, ulFlags, mtype); -+} -+ -+/* -+ * ======== PROC_InvalidateMemory ======== -+ * Purpose: -+ * Invalidates the memory specified -+ */ -+DSP_STATUS PROC_InvalidateMemory(DSP_HPROCESSOR hProcessor, void *pMpuAddr, -+ u32 ulSize) -+{ -+ enum DSP_FLUSHTYPE mtype = PROC_INVALIDATE_MEM; -+ -+ return proc_memory_sync(hProcessor, pMpuAddr, ulSize, 0, mtype); -+} -+ -+/* -+ * ======== PROC_GetResourceInfo ======== -+ * Purpose: -+ * Enumerate the resources currently available on a processor. -+ */ -+DSP_STATUS PROC_GetResourceInfo(DSP_HPROCESSOR hProcessor, u32 uResourceType, -+ OUT struct DSP_RESOURCEINFO *pResourceInfo, -+ u32 uResourceInfoSize) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ struct NODE_MGR *hNodeMgr = NULL; -+ struct NLDR_OBJECT *hNldr = NULL; -+ struct RMM_TargetObj *rmm = NULL; -+ struct IO_MGR *hIOMgr = NULL; /* IO manager handle */ -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pResourceInfo != NULL); -+ DBC_Require(uResourceInfoSize >= sizeof(struct DSP_RESOURCEINFO)); -+ -+ GT_4trace(PROC_DebugMask, GT_ENTER, "Entered PROC_GetResourceInfo,\n\t" -+ "hProcessor: 0x%x\n\tuResourceType: 0x%x\n\tpResourceInfo:" -+ " 0x%x\n\t uResourceInfoSize 0x%x\n", hProcessor, -+ uResourceType, pResourceInfo, uResourceInfoSize); -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_GetResourceInfo: InValid " -+ "Processor Handle \n"); -+ goto func_end; -+ } -+ switch (uResourceType) { -+ case DSP_RESOURCE_DYNDARAM: -+ case DSP_RESOURCE_DYNSARAM: -+ case DSP_RESOURCE_DYNEXTERNAL: -+ case DSP_RESOURCE_DYNSRAM: -+ if (DSP_FAILED(DEV_GetNodeManager(pProcObject->hDevObject, -+ &hNodeMgr))) -+ goto func_end; -+ -+ if (DSP_SUCCEEDED(NODE_GetNldrObj(hNodeMgr, &hNldr))) { -+ if (DSP_SUCCEEDED(NLDR_GetRmmManager(hNldr, &rmm))) { -+ DBC_Assert(rmm != NULL); -+ status = DSP_EVALUE; -+ if (RMM_stat(rmm, -+ (enum DSP_MEMTYPE)uResourceType, -+ (struct DSP_MEMSTAT *)&(pResourceInfo-> -+ result.memStat))) -+ status = DSP_SOK; -+ } -+ } -+ break; -+ case DSP_RESOURCE_PROCLOAD: -+ status = DEV_GetIOMgr(pProcObject->hDevObject, &hIOMgr); -+ status = pProcObject->pIntfFxns->pfnIOGetProcLoad(hIOMgr, -+ (struct DSP_PROCLOADSTAT *)&(pResourceInfo-> -+ result.procLoadStat)); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "Error in procLoadStat function 0x%x\n", status); -+ } -+ break; -+ default: -+ status = DSP_EFAIL; -+ break; -+ } -+func_end: -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_GetResourceInfo, " -+ "status 0x%x\n", status); -+ return status; -+} -+ -+/* -+ * ======== PROC_Exit ======== -+ * Purpose: -+ * Decrement reference count, and free resources when reference count is -+ * 0. -+ */ -+void PROC_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ if (hProcLock) -+ (void)SYNC_DeleteCS(hProcLock); -+ -+ cRefs--; -+ -+ GT_1trace(PROC_DebugMask, GT_5CLASS, -+ "Entered PROC_Exit, ref count:0x%x\n", cRefs); -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== PROC_GetDevObject ======== -+ * Purpose: -+ * Return the Dev Object handle for a given Processor. -+ * -+ */ -+DSP_STATUS PROC_GetDevObject(DSP_HPROCESSOR hProcessor, -+ struct DEV_OBJECT **phDevObject) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phDevObject != NULL); -+ -+ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ *phDevObject = pProcObject->hDevObject; -+ status = DSP_SOK; -+ } else { -+ *phDevObject = NULL; -+ status = DSP_EHANDLE; -+ } -+ -+ DBC_Ensure((DSP_SUCCEEDED(status) && *phDevObject != NULL) || -+ (DSP_FAILED(status) && *phDevObject == NULL)); -+ -+ return status; -+} -+ -+/* -+ * ======== PROC_GetState ======== -+ * Purpose: -+ * Report the state of the specified DSP processor. -+ */ -+DSP_STATUS PROC_GetState(DSP_HPROCESSOR hProcessor, -+ OUT struct DSP_PROCESSORSTATE *pProcStatus, -+ u32 uStateInfoSize) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ BRD_STATUS brdStatus; -+ struct DEH_MGR *hDehMgr; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pProcStatus != NULL); -+ DBC_Require(uStateInfoSize >= sizeof(struct DSP_PROCESSORSTATE)); -+ -+ GT_3trace(PROC_DebugMask, GT_ENTER, "Entering PROC_GetState, args:\n\t" -+ "pProcStatus: 0x%x\n\thProcessor: 0x%x\n\t uStateInfoSize" -+ " 0x%x\n", pProcStatus, hProcessor, uStateInfoSize); -+ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ /* First, retrieve BRD state information */ -+ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) -+ (pProcObject->hWmdContext, &brdStatus))) { -+ switch (brdStatus) { -+ case BRD_STOPPED: -+ pProcStatus->iState = PROC_STOPPED; -+ break; -+ case BRD_DSP_HIBERNATION: -+ /* Fall through */ -+ case BRD_RUNNING: -+ pProcStatus->iState = PROC_RUNNING; -+ break; -+ case BRD_LOADED: -+ pProcStatus->iState = PROC_LOADED; -+ break; -+ case BRD_ERROR: -+ pProcStatus->iState = PROC_ERROR; -+ break; -+ default: -+ pProcStatus->iState = 0xFF; -+ status = DSP_EFAIL; -+ break; -+ } -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_GetState: General Failure" -+ " to read the PROC Status \n"); -+ } -+ /* Next, retrieve error information, if any */ -+ status = DEV_GetDehMgr(pProcObject->hDevObject, &hDehMgr); -+ if (DSP_SUCCEEDED(status) && hDehMgr) { -+ status = (*pProcObject->pIntfFxns->pfnDehGetInfo) -+ (hDehMgr, &(pProcStatus->errInfo)); -+ if (DSP_FAILED(status)) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_GetState: Failed " -+ "retrieve exception info.\n"); -+ } -+ } else { -+ status = DSP_EFAIL; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_GetState: Failed to " -+ "retrieve DEH handle.\n"); -+ } -+ } else { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_GetState:InValid Processor Handle \n"); -+ } -+ GT_2trace(PROC_DebugMask, GT_ENTER, -+ "Exiting PROC_GetState, results:\n\t" -+ "status: 0x%x\n\tpProcStatus: 0x%x\n", status, -+ pProcStatus->iState); -+ return status; -+} -+ -+/* -+ * ======== PROC_GetTrace ======== -+ * Purpose: -+ * Retrieve the current contents of the trace buffer, located on the -+ * Processor. Predefined symbols for the trace buffer must have been -+ * configured into the DSP executable. -+ * Details: -+ * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a -+ * trace buffer, only. Treat it as an undocumented feature. -+ * This call is destructive, meaning the processor is placed in the monitor -+ * state as a result of this function. -+ */ -+DSP_STATUS PROC_GetTrace(DSP_HPROCESSOR hProcessor, u8 *pBuf, u32 uMaxSize) -+{ -+ DSP_STATUS status; -+ status = DSP_ENOTIMPL; -+ return status; -+} -+ -+/* -+ * ======== PROC_Init ======== -+ * Purpose: -+ * Initialize PROC's private state, keeping a reference count on each call -+ */ -+bool PROC_Init(void) -+{ -+ bool fRetval = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ /* Set the Trace mask */ -+ DBC_Assert(!PROC_DebugMask.flags); -+ GT_create(&PROC_DebugMask, "PR"); /* "PR" for Processor */ -+ -+ (void)SYNC_InitializeCS(&hProcLock); -+ } -+ -+ if (fRetval) -+ cRefs++; -+ -+ GT_1trace(PROC_DebugMask, GT_5CLASS, -+ "Entered PROC_Init, ref count:0x%x\n", cRefs); -+ DBC_Ensure((fRetval && (cRefs > 0)) || (!fRetval && (cRefs >= 0))); -+ -+ return fRetval; -+} -+ -+/* -+ * ======== PROC_Load ======== -+ * Purpose: -+ * Reset a processor and load a new base program image. -+ * This will be an OEM-only function, and not part of the DSP/BIOS Bridge -+ * application developer's API. -+ */ -+DSP_STATUS PROC_Load(DSP_HPROCESSOR hProcessor, IN CONST s32 iArgc, -+ IN CONST char **aArgv, IN CONST char **aEnvp) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ struct IO_MGR *hIOMgr; /* IO manager handle */ -+ struct MSG_MGR *hMsgMgr; -+ struct COD_MANAGER *hCodMgr; /* Code manager handle */ -+ char *pargv0; /* temp argv[0] ptr */ -+ char **newEnvp; /* Updated envp[] array. */ -+ char szProcID[MAXPROCIDLEN]; /* Size of "PROC_ID=" */ -+ s32 cEnvp; /* Num elements in envp[]. */ -+ s32 cNewEnvp; /* " " in newEnvp[] */ -+ s32 nProcID = 0; /* Anticipate MP version. */ -+ struct DCD_MANAGER *hDCDHandle; -+ struct DMM_OBJECT *hDmmMgr; -+ u32 dwExtEnd; -+ u32 uProcId; -+#ifdef DEBUG -+ BRD_STATUS uBrdState; -+#endif -+#ifdef OPT_LOAD_TIME_INSTRUMENTATION -+ struct timeval tv1; -+ struct timeval tv2; -+#endif -+ DBC_Require(cRefs > 0); -+ DBC_Require(iArgc > 0); -+ DBC_Require(aArgv != NULL); -+#ifdef OPT_LOAD_TIME_INSTRUMENTATION -+ do_gettimeofday(&tv1); -+#endif -+#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) -+ struct dspbridge_platform_data *pdata = -+ omap_dspbridge_dev->dev.platform_data; -+#endif -+ GT_2trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Load, args:\n\t" -+ "hProcessor: 0x%x\taArgv: 0x%x\n", hProcessor, aArgv[0]); -+ /* Call the WMD_BRD_Load Fxn */ -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Load: Invalid Processor Handle..\n"); -+ goto func_end; -+ } -+ if (pProcObject->bIsAlreadyAttached) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load GPP " -+ "Client is already attached status \n"); -+ } -+ if (DSP_FAILED(DEV_GetCodMgr(pProcObject->hDevObject, &hCodMgr))) { -+ status = DSP_EFAIL; -+ GT_1trace(PROC_DebugMask, GT_7CLASS, "PROC_Load: DSP_FAILED in " -+ "DEV_GetCodMgr status 0x%x \n", status); -+ goto func_end; -+ } -+ status = PROC_Stop(hProcessor); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: DSP_FAILED to Place the" -+ " Processor in Stop Mode(PROC_STOP) status 0x%x \n", -+ status); -+ goto func_end; -+ } -+ /* Place the board in the monitor state. */ -+ status = PROC_Monitor(hProcessor); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: DSP_FAILED to Place the" -+ " Processor in Monitor Mode(PROC_IDLE) status 0x%x\n", -+ status); -+ goto func_end; -+ } -+ /* Save ptr to original argv[0]. */ -+ pargv0 = (char *)aArgv[0]; -+ /*Prepend "PROC_ID="to envp array for target.*/ -+ cEnvp = GetEnvpCount((char **)aEnvp); -+ cNewEnvp = (cEnvp ? (cEnvp + 1) : (cEnvp + 2)); -+ newEnvp = MEM_Calloc(cNewEnvp * sizeof(char **), MEM_PAGED); -+ if (newEnvp) { -+ status = snprintf(szProcID, MAXPROCIDLEN, PROC_ENVPROCID, -+ nProcID); -+ if (status == -1) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Load: " -+ "Proc ID string overflow \n"); -+ status = DSP_EFAIL; -+ } else { -+ newEnvp = PrependEnvp(newEnvp, (char **)aEnvp, cEnvp, -+ cNewEnvp, szProcID); -+ /* Get the DCD Handle */ -+ status = MGR_GetDCDHandle(pProcObject->hMgrObject, -+ (u32 *)&hDCDHandle); -+ if (DSP_SUCCEEDED(status)) { -+ /* Before proceeding with new load, -+ * check if a previously registered COFF -+ * exists. -+ * If yes, unregister nodes in previously -+ * registered COFF. If any error occurred, -+ * set previously registered COFF to NULL. */ -+ if (pProcObject->g_pszLastCoff != NULL) { -+ status = DCD_AutoUnregister(hDCDHandle, -+ pProcObject->g_pszLastCoff); -+ /* Regardless of auto unregister status, -+ * free previously allocated -+ * memory. */ -+ MEM_Free(pProcObject->g_pszLastCoff); -+ pProcObject->g_pszLastCoff = NULL; -+ } -+ } -+ /* On success, do COD_OpenBase() */ -+ status = COD_OpenBase(hCodMgr, (char *)aArgv[0], -+ COD_SYMB); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: COD_OpenBase " -+ "failed (0x%x)\n", status); -+ } -+ } -+ } else { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ " PROC_Load:Out of Memory \n"); -+ status = DSP_EMEMORY; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Auto-register data base */ -+ /* Get the DCD Handle */ -+ status = MGR_GetDCDHandle(pProcObject->hMgrObject, -+ (u32 *)&hDCDHandle); -+ if (DSP_SUCCEEDED(status)) { -+ /* Auto register nodes in specified COFF -+ * file. If registration did not fail, -+ * (status = DSP_SOK or DSP_EDCDNOAUTOREGISTER) -+ * save the name of the COFF file for -+ * de-registration in the future. */ -+ status = DCD_AutoRegister(hDCDHandle, (char *)aArgv[0]); -+ if (status == DSP_EDCDNOAUTOREGISTER) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: No Auto " -+ "Register section. Proceeding..\n"); -+ status = DSP_SOK; -+ } -+ if (DSP_FAILED(status)) { -+ status = DSP_EFAIL; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: Failed to " -+ "Auto Register..\n"); -+ } else { -+ DBC_Assert(pProcObject->g_pszLastCoff == NULL); -+ /* Allocate memory for pszLastCoff */ -+ pProcObject->g_pszLastCoff = MEM_Calloc( -+ (strlen((char *)aArgv[0]) + 1), -+ MEM_PAGED); -+ /* If memory allocated, save COFF file name*/ -+ if (pProcObject->g_pszLastCoff) { -+ strncpy(pProcObject->g_pszLastCoff, -+ (char *)aArgv[0], -+ (strlen((char *)aArgv[0]) + 1)); -+ } -+ } -+ } -+ } -+ /* Update shared memory address and size */ -+ if (DSP_SUCCEEDED(status)) { -+ /* Create the message manager. This must be done -+ * before calling the IOOnLoaded function. */ -+ DEV_GetMsgMgr(pProcObject->hDevObject, &hMsgMgr); -+ if (!hMsgMgr) { -+ status = MSG_Create(&hMsgMgr, pProcObject->hDevObject, -+ (MSG_ONEXIT)NODE_OnExit); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ DEV_SetMsgMgr(pProcObject->hDevObject, hMsgMgr); -+ } -+ if (status == DSP_ENOTIMPL) { -+ /* It's OK not to have a message manager */ -+ status = DSP_SOK; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Set the Device object's message manager */ -+ status = DEV_GetIOMgr(pProcObject->hDevObject, &hIOMgr); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ status = (*pProcObject->pIntfFxns->pfnIOOnLoaded)(hIOMgr); -+ if (status == DSP_ENOTIMPL) { -+ /* Ok not to implement this function */ -+ status = DSP_SOK; -+ } else { -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: Failed to get shared " -+ "memory or message buffer address " -+ "from COFF status 0x%x\n", status); -+ status = DSP_EFAIL; -+ } -+ } -+ } else { -+ status = DSP_EFAIL; -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: DSP_FAILED in " -+ "MSG_Create status 0x%x\n", status); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Now, attempt to load an exec: */ -+ -+ /* Boost the OPP level to Maximum level supported by baseport*/ -+#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) -+ if (pdata->cpu_set_freq) -+ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP5]); -+#endif -+ status = COD_LoadBase(hCodMgr, iArgc, (char **)aArgv, -+ DEV_BrdWriteFxn, -+ pProcObject->hDevObject, NULL); -+ if (DSP_FAILED(status)) { -+ if (status == COD_E_OPENFAILED) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load:Failure to Load the EXE\n"); -+ } -+ if (status == COD_E_SYMBOLNOTFOUND) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load:Could not parse the file\n"); -+ } else { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: DSP_FAILED in " -+ "COD_Load status 0x%x \n", status); -+ } -+ } -+ /* Requesting the lowest opp supported*/ -+#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) -+ if (pdata->cpu_set_freq) -+ (*pdata->cpu_set_freq)(pdata->mpu_speed[VDD1_OPP1]); -+#endif -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Update the Processor status to loaded */ -+ status = (*pProcObject->pIntfFxns->pfnBrdSetState) -+ (pProcObject->hWmdContext, BRD_LOADED); -+ if (DSP_SUCCEEDED(status)) { -+ pProcObject->sState = PROC_LOADED; -+ if (pProcObject->hNtfy) { -+ PROC_NotifyClients(pProcObject, -+ DSP_PROCESSORSTATECHANGE); -+ } -+ } else { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load, pfnBrdSetState " -+ "failed: 0x%x\n", status); -+ status = DSP_EFAIL; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = PROC_GetProcessorId(hProcessor, &uProcId); -+ if (uProcId == DSP_UNIT) { -+ /* Use all available DSP address space after EXTMEM -+ * for DMM */ -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMgr, EXTEND, -+ &dwExtEnd); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Load: Failed on " -+ "COD_GetSymValue %s.\n", -+ EXTEND); -+ } -+ } -+ /* Reset DMM structs and add an initial free chunk*/ -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetDmmMgr(pProcObject->hDevObject, -+ &hDmmMgr); -+ if (DSP_SUCCEEDED(status)) { -+ /* Set dwExtEnd to DMM START u8 -+ * address */ -+ dwExtEnd = (dwExtEnd + 1) * DSPWORDSIZE; -+ /* DMM memory is from EXT_END */ -+ status = DMM_CreateTables(hDmmMgr, -+ dwExtEnd, DMMPOOLSIZE); -+ } -+ } -+ } -+ } -+ /* Restore the original argv[0] */ -+ MEM_Free(newEnvp); -+ aArgv[0] = pargv0; -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) { -+ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) -+ (pProcObject->hWmdContext, &uBrdState))) { -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Load: Processor Loaded\n"); -+ DBC_Assert(uBrdState == BRD_LOADED); -+ } -+ } -+#endif -+func_end: -+#ifdef DEBUG -+ if (DSP_FAILED(status)) { -+ GT_0trace(PROC_DebugMask, GT_1CLASS, "PROC_Load: " -+ "Processor Load Failed.\n"); -+ -+ } -+#endif -+ GT_1trace(PROC_DebugMask, GT_ENTER, -+ "Exiting PROC_Load, status: 0x%x\n", status); -+ DBC_Ensure((DSP_SUCCEEDED(status) && pProcObject->sState == PROC_LOADED) -+ || DSP_FAILED(status)); -+#ifdef OPT_LOAD_TIME_INSTRUMENTATION -+ do_gettimeofday(&tv2); -+ if (tv2.tv_usec < tv1.tv_usec) { -+ tv2.tv_usec += 1000000; -+ tv2.tv_sec--; -+ } -+ GT_2trace(PROC_DebugMask, GT_1CLASS, -+ "Proc_Load: time to load %d sec and %d usec \n", -+ tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec); -+#endif -+ return status; -+} -+ -+/* -+ * ======== PROC_Map ======== -+ * Purpose: -+ * Maps a MPU buffer to DSP address space. -+ */ -+DSP_STATUS PROC_Map(DSP_HPROCESSOR hProcessor, void *pMpuAddr, u32 ulSize, -+ void *pReqAddr, void **ppMapAddr, u32 ulMapAttr, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ u32 vaAlign; -+ u32 paAlign; -+ struct DMM_OBJECT *hDmmMgr; -+ u32 sizeAlign; -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE dmmRes; -+#endif -+ -+ GT_6trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Map, args:\n\t" -+ "hProcessor %x, pMpuAddr %x, ulSize %x, pReqAddr %x, " -+ "ulMapAttr %x, ppMapAddr %x\n", hProcessor, pMpuAddr, ulSize, -+ pReqAddr, ulMapAttr, ppMapAddr); -+ /* Calculate the page-aligned PA, VA and size */ -+ vaAlign = PG_ALIGN_LOW((u32) pReqAddr, PG_SIZE_4K); -+ paAlign = PG_ALIGN_LOW((u32) pMpuAddr, PG_SIZE_4K); -+ sizeAlign = PG_ALIGN_HIGH(ulSize + (u32)pMpuAddr - paAlign, -+ PG_SIZE_4K); -+ -+ GT_3trace(PROC_DebugMask, GT_ENTER, "PROC_Map: vaAlign %x, paAlign %x, " -+ "sizeAlign %x\n", vaAlign, paAlign, sizeAlign); -+ -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Map: " -+ "InValid Processor Handle \n"); -+ goto func_end; -+ } -+ /* Critical section */ -+ (void)SYNC_EnterCS(hProcLock); -+ status = DMM_GetHandle(pProcObject, &hDmmMgr); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Map: Failed to get DMM Mgr " -+ "handle: 0x%x\n", status); -+ } else { -+ status = DMM_MapMemory(hDmmMgr, vaAlign, sizeAlign); -+ } -+ /* Add mapping to the page tables. */ -+ if (DSP_SUCCEEDED(status)) { -+ -+ status = (*pProcObject->pIntfFxns->pfnBrdMemMap) -+ (pProcObject->hWmdContext, paAlign, vaAlign, sizeAlign, -+ ulMapAttr); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Mapped address = MSB of VA | LSB of PA */ -+ *ppMapAddr = (void *) (vaAlign | ((u32) pMpuAddr & -+ (PG_SIZE_4K - 1))); -+ } else { -+ DMM_UnMapMemory(hDmmMgr, vaAlign, &sizeAlign); -+ } -+ (void)SYNC_LeaveCS(hProcLock); -+ -+#ifndef RES_CLEANUP_DISABLE -+ if (DSP_SUCCEEDED(status)) { -+ DRV_InsertDMMResElement(&dmmRes, pr_ctxt); -+ DRV_UpdateDMMResElement(dmmRes, (u32)pMpuAddr, ulSize, -+ (u32)pReqAddr, (u32)*ppMapAddr, hProcessor); -+ } -+#endif -+func_end: -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Leaving PROC_Map [0x%x]", status); -+ return status; -+} -+ -+/* -+ * ======== PROC_RegisterNotify ======== -+ * Purpose: -+ * Register to be notified of specific processor events. -+ */ -+DSP_STATUS PROC_RegisterNotify(DSP_HPROCESSOR hProcessor, u32 uEventMask, -+ u32 uNotifyType, struct DSP_NOTIFICATION -+ *hNotification) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ struct DEH_MGR *hDehMgr; -+ -+ DBC_Require(hNotification != NULL); -+ DBC_Require(cRefs > 0); -+ -+ GT_4trace(PROC_DebugMask, GT_ENTER, -+ "Entered PROC_RegisterNotify, args:\n\t" -+ "hProcessor: 0x%x\n\tuEventMask: 0x%x\n\tuNotifyMask:" -+ " 0x%x\n\t hNotification 0x%x\n", hProcessor, uEventMask, -+ uNotifyType, hNotification); -+ -+ /* Check processor handle */ -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_RegsiterNotify Invalid " -+ "ProcessorHandle 0x%x\n", hProcessor); -+ goto func_end; -+ } -+ /* Check if event mask is a valid processor related event */ -+ if (uEventMask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | -+ DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | DSP_MMUFAULT | -+ DSP_SYSERROR | DSP_PWRERROR)) -+ status = DSP_EVALUE; -+ -+ /* Check if notify type is valid */ -+ if (uNotifyType != DSP_SIGNALEVENT) -+ status = DSP_EVALUE; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT, -+ * or DSP_PWRERROR then register event immediately. */ -+ if (uEventMask & -+ ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR)) { -+ status = NTFY_Register(pProcObject->hNtfy, -+ hNotification, uEventMask, uNotifyType); -+ /* Special case alert, special case alert! -+ * If we're trying to *deregister* (i.e. uEventMask -+ * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification, -+ * we have to deregister with the DEH manager. -+ * There's no way to know, based on uEventMask which -+ * manager the notification event was registered with, -+ * so if we're trying to deregister and NTFY_Register -+ * failed, we'll give the deh manager a shot. -+ */ -+ if ((uEventMask == 0) && DSP_FAILED(status)) { -+ status = DEV_GetDehMgr(pProcObject->hDevObject, -+ &hDehMgr); -+ DBC_Assert(pProcObject->pIntfFxns-> -+ pfnDehRegisterNotify); -+ status = (*pProcObject->pIntfFxns-> -+ pfnDehRegisterNotify) -+ (hDehMgr, uEventMask, uNotifyType, -+ hNotification); -+ } -+ } else { -+ status = DEV_GetDehMgr(pProcObject->hDevObject, -+ &hDehMgr); -+ DBC_Assert(pProcObject->pIntfFxns-> -+ pfnDehRegisterNotify); -+ status = (*pProcObject->pIntfFxns->pfnDehRegisterNotify) -+ (hDehMgr, uEventMask, uNotifyType, -+ hNotification); -+ if (DSP_FAILED(status)) -+ status = DSP_EFAIL; -+ -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== PROC_ReserveMemory ======== -+ * Purpose: -+ * Reserve a virtually contiguous region of DSP address space. -+ */ -+DSP_STATUS PROC_ReserveMemory(DSP_HPROCESSOR hProcessor, u32 ulSize, -+ void **ppRsvAddr) -+{ -+ struct DMM_OBJECT *hDmmMgr; -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ -+ GT_3trace(PROC_DebugMask, GT_ENTER, -+ "Entered PROC_ReserveMemory, args:\n\t" -+ "hProcessor: 0x%x ulSize: 0x%x ppRsvAddr: 0x%x\n", hProcessor, -+ ulSize, ppRsvAddr); -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Map: " -+ "InValid Processor Handle \n"); -+ goto func_end; -+ } -+ status = DMM_GetHandle(pProcObject, &hDmmMgr); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, "PROC_ReserveMemory: " -+ "Failed to get DMM Mgr handle: 0x%x\n", status); -+ } else -+ status = DMM_ReserveMemory(hDmmMgr, ulSize, (u32 *)ppRsvAddr); -+ -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Leaving PROC_ReserveMemory [0x%x]", -+ status); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== PROC_Start ======== -+ * Purpose: -+ * Start a processor running. -+ */ -+DSP_STATUS PROC_Start(DSP_HPROCESSOR hProcessor) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ struct COD_MANAGER *hCodMgr; /* Code manager handle */ -+ u32 dwDspAddr; /* Loaded code's entry point. */ -+#ifdef DEBUG -+ BRD_STATUS uBrdState; -+#endif -+ DBC_Require(cRefs > 0); -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Start, args:\n\t" -+ "hProcessor: 0x%x\n", hProcessor); -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Start :InValid Handle \n"); -+ goto func_end; -+ } -+ /* Call the WMD_BRD_Start */ -+ if (pProcObject->sState != PROC_LOADED) { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Start :Wrong state \n"); -+ status = DSP_EWRONGSTATE; -+ goto func_end; -+ } -+ status = DEV_GetCodMgr(pProcObject->hDevObject, &hCodMgr); -+ if (DSP_FAILED(status)) { -+ status = DSP_EFAIL; -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "Processor Start DSP_FAILED " -+ "in Getting DEV_GetCodMgr status 0x%x\n", status); -+ goto func_cont; -+ } -+ status = COD_GetEntry(hCodMgr, &dwDspAddr); -+ if (DSP_FAILED(status)) { -+ status = DSP_EFAIL; -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "Processor Start DSP_FAILED in " -+ "Getting COD_GetEntry status 0x%x\n", status); -+ goto func_cont; -+ } -+ status = (*pProcObject->pIntfFxns->pfnBrdStart) -+ (pProcObject->hWmdContext, dwDspAddr); -+ if (DSP_FAILED(status)) { -+ status = DSP_EFAIL; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Start Failed to Start the board\n"); -+ goto func_cont; -+ } -+ /* Call DEV_Create2 */ -+ status = DEV_Create2(pProcObject->hDevObject); -+ if (DSP_SUCCEEDED(status)) { -+ pProcObject->sState = PROC_RUNNING; -+ /* Deep sleep switces off the peripheral clocks. -+ * we just put the DSP CPU in idle in the idle loop. -+ * so there is no need to send a command to DSP */ -+ -+ if (pProcObject->hNtfy) { -+ PROC_NotifyClients(pProcObject, -+ DSP_PROCESSORSTATECHANGE); -+ } -+ GT_0trace(PROC_DebugMask, GT_1CLASS, "PROC_Start: Processor " -+ "Started and running \n"); -+ } else { -+ /* Failed to Create Node Manager and DISP Object -+ * Stop the Processor from running. Put it in STOPPED State */ -+ (void)(*pProcObject->pIntfFxns->pfnBrdStop)(pProcObject-> -+ hWmdContext); -+ status = DSP_EFAIL; -+ pProcObject->sState = PROC_STOPPED; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_Start " -+ "Failed to Create the Node Manager\n"); -+ } -+func_cont: -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) { -+ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) -+ (pProcObject->hWmdContext, &uBrdState))) { -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Start: Processor State is RUNNING \n"); -+ DBC_Assert(uBrdState != BRD_HIBERNATION); -+ } -+ } -+#endif -+func_end: -+ GT_1trace(PROC_DebugMask, GT_ENTER, -+ "Exiting PROC_Start, status 0x%x\n", status); -+ DBC_Ensure((DSP_SUCCEEDED(status) && pProcObject->sState == -+ PROC_RUNNING) || DSP_FAILED(status)); -+ return status; -+} -+ -+/* -+ * ======== PROC_Stop ======== -+ * Purpose: -+ * Stop a processor running. -+ */ -+DSP_STATUS PROC_Stop(DSP_HPROCESSOR hProcessor) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ struct MSG_MGR *hMsgMgr; -+ struct NODE_MGR *hNodeMgr; -+ DSP_HNODE hNode; -+ u32 uNodeTabSize = 1; -+ u32 uNumNodes = 0; -+ u32 uNodesAllocated = 0; -+ BRD_STATUS uBrdState; -+ -+ DBC_Require(cRefs > 0); -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Stop, args:\n\t" -+ "hProcessor: 0x%x\n", hProcessor); -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Stop :InValid Handle \n"); -+ goto func_end; -+ } -+ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) -+ (pProcObject->hWmdContext, &uBrdState))) { -+ /* Clean up all the resources except the current running -+ * process resources */ -+ if (uBrdState == BRD_ERROR) -+ PROC_CleanupAllResources(); -+ } -+ /* check if there are any running nodes */ -+ status = DEV_GetNodeManager(pProcObject->hDevObject, &hNodeMgr); -+ if (DSP_SUCCEEDED(status) && hNodeMgr) { -+ status = NODE_EnumNodes(hNodeMgr, &hNode, uNodeTabSize, -+ &uNumNodes, &uNodesAllocated); -+ if ((status == DSP_ESIZE) || (uNodesAllocated > 0)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "Can't stop device, Active " -+ "nodes = 0x%x \n", uNodesAllocated); -+ return DSP_EWRONGSTATE; -+ } -+ } -+ /* Call the WMD_BRD_Stop */ -+ /* It is OK to stop a device that does n't have nodes OR not started */ -+ status = (*pProcObject->pIntfFxns->pfnBrdStop)(pProcObject-> -+ hWmdContext); -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Stop: Processor Stopped, " -+ "i.e in standby mode \n"); -+ pProcObject->sState = PROC_STOPPED; -+ /* Destory the Node Manager, MSG Manager */ -+ if (DSP_SUCCEEDED(DEV_Destroy2(pProcObject->hDevObject))) { -+ /* Destroy the MSG by calling MSG_Delete */ -+ DEV_GetMsgMgr(pProcObject->hDevObject, &hMsgMgr); -+ if (hMsgMgr) { -+ MSG_Delete(hMsgMgr); -+ DEV_SetMsgMgr(pProcObject->hDevObject, NULL); -+ } -+#ifdef DEBUG -+ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns-> -+ pfnBrdStatus)(pProcObject->hWmdContext, -+ &uBrdState))) { -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Monitor:Processor Stopped \n"); -+ DBC_Assert(uBrdState == BRD_STOPPED); -+ } -+#endif -+ } else { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Stop Couldn't delete node manager \n"); -+ } -+ } else { -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Stop Failed to Stop the processor/device \n"); -+ } -+func_end: -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Exiting PROC_Stop, status 0x%x\n", -+ status); -+ -+ return status; -+} -+ -+/* -+ * ======== PROC_UnMap ======== -+ * Purpose: -+ * Removes a MPU buffer mapping from the DSP address space. -+ */ -+DSP_STATUS PROC_UnMap(DSP_HPROCESSOR hProcessor, void *pMapAddr, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ struct DMM_OBJECT *hDmmMgr; -+ u32 vaAlign; -+ u32 sizeAlign; -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE dmmRes; -+#endif -+ GT_2trace(PROC_DebugMask, GT_ENTER, -+ "Entered PROC_UnMap, args:\n\thProcessor:" -+ "0x%x pMapAddr: 0x%x\n", hProcessor, pMapAddr); -+ -+ vaAlign = PG_ALIGN_LOW((u32) pMapAddr, PG_SIZE_4K); -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_UnMap: " -+ "InValid Processor Handle \n"); -+ goto func_end; -+ } -+ -+ status = DMM_GetHandle(hProcessor, &hDmmMgr); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ /* Critical section */ -+ (void)SYNC_EnterCS(hProcLock); -+ if (DSP_FAILED(status)) { -+ GT_1trace(PROC_DebugMask, GT_7CLASS, "PROC_UnMap: " -+ "Failed to get DMM Mgr handle: 0x%x\n", status); -+ } else { -+ /* Update DMM structures. Get the size to unmap. -+ This function returns error if the VA is not mapped */ -+ status = DMM_UnMapMemory(hDmmMgr, (u32) vaAlign, &sizeAlign); -+ } -+ /* Remove mapping from the page tables. */ -+ if (DSP_SUCCEEDED(status)) { -+ status = (*pProcObject->pIntfFxns->pfnBrdMemUnMap) -+ (pProcObject->hWmdContext, vaAlign, sizeAlign); -+ } -+ (void)SYNC_LeaveCS(hProcLock); -+#ifndef RES_CLEANUP_DISABLE -+ GT_1trace(PROC_DebugMask, GT_ENTER, -+ "PROC_UnMap DRV_GetDMMResElement " -+ "pMapAddr:[0x%x]", pMapAddr); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (pr_ctxt && DRV_GetDMMResElement((u32)pMapAddr, &dmmRes, pr_ctxt) -+ != DSP_ENOTFOUND) -+ DRV_RemoveDMMResElement(dmmRes, pr_ctxt); -+#endif -+func_end: -+ GT_1trace(PROC_DebugMask, GT_ENTER, -+ "Leaving PROC_UnMap [0x%x]", status); -+ return status; -+} -+ -+/* -+ * ======== PROC_UnReserveMemory ======== -+ * Purpose: -+ * Frees a previously reserved region of DSP address space. -+ */ -+DSP_STATUS PROC_UnReserveMemory(DSP_HPROCESSOR hProcessor, void *pRsvAddr) -+{ -+ struct DMM_OBJECT *hDmmMgr; -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcessor; -+ -+ GT_2trace(PROC_DebugMask, GT_ENTER, -+ "Entered PROC_UnReserveMemory, args:\n\t" -+ "hProcessor: 0x%x pRsvAddr: 0x%x\n", hProcessor, pRsvAddr); -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_UnMap: " -+ "InValid Processor Handle \n"); -+ goto func_end; -+ } -+ status = DMM_GetHandle(pProcObject, &hDmmMgr); -+ if (DSP_FAILED(status)) -+ GT_1trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_UnReserveMemory: Failed to get DMM Mgr " -+ "handle: 0x%x\n", status); -+ else -+ status = DMM_UnReserveMemory(hDmmMgr, (u32) pRsvAddr); -+ -+ GT_1trace(PROC_DebugMask, GT_ENTER, -+ "Leaving PROC_UnReserveMemory [0x%x]", -+ status); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== = PROC_Monitor ======== == -+ * Purpose: -+ * Place the Processor in Monitor State. This is an internal -+ * function and a requirement before Processor is loaded. -+ * This does a WMD_BRD_Stop, DEV_Destroy2 and WMD_BRD_Monitor. -+ * In DEV_Destroy2 we delete the node manager. -+ * Parameters: -+ * hProcObject: Handle to Processor Object -+ * Returns: -+ * DSP_SOK: Processor placed in monitor mode. -+ * !DSP_SOK: Failed to place processor in monitor mode. -+ * Requires: -+ * Valid Processor Handle -+ * Ensures: -+ * Success: ProcObject state is PROC_IDLE -+ */ -+static DSP_STATUS PROC_Monitor(struct PROC_OBJECT *hProcObject) -+{ -+ DSP_STATUS status = DSP_EFAIL; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProcObject; -+ struct MSG_MGR *hMsgMgr; -+#ifdef DEBUG -+ BRD_STATUS uBrdState; -+#endif -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)); -+ -+ GT_1trace(PROC_DebugMask, GT_ENTER, "Entered PROC_Monitor, args:\n\t" -+ "hProcessor: 0x%x\n", hProcObject); -+ /* This is needed only when Device is loaded when it is -+ * already 'ACTIVE' */ -+ /* Destory the Node Manager, MSG Manager */ -+ if (DSP_SUCCEEDED(DEV_Destroy2(pProcObject->hDevObject))) { -+ /* Destroy the MSG by calling MSG_Delete */ -+ DEV_GetMsgMgr(pProcObject->hDevObject, &hMsgMgr); -+ if (hMsgMgr) { -+ MSG_Delete(hMsgMgr); -+ DEV_SetMsgMgr(pProcObject->hDevObject, NULL); -+ } -+ } -+ /* Place the Board in the Monitor State */ -+ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdMonitor) -+ (pProcObject->hWmdContext))) { -+ status = DSP_SOK; -+#ifdef DEBUG -+ if (DSP_SUCCEEDED((*pProcObject->pIntfFxns->pfnBrdStatus) -+ (pProcObject->hWmdContext, &uBrdState))) { -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_Monitor:Processor in " -+ "Monitor State\n"); -+ DBC_Assert(uBrdState == BRD_IDLE); -+ } -+#endif -+ } else { -+ /* Monitor Failure */ -+ GT_0trace(PROC_DebugMask, GT_7CLASS, -+ "PROC_Monitor: Processor Could not" -+ "be put in Monitor mode \n"); -+ } -+ GT_1trace(PROC_DebugMask, GT_ENTER, -+ "Exiting PROC_Monitor, status 0x%x\n", -+ status); -+#ifdef DEBUG -+ DBC_Ensure((DSP_SUCCEEDED(status) && uBrdState == BRD_IDLE) || -+ DSP_FAILED(status)); -+#endif -+ return status; -+} -+ -+/* -+ * ======== GetEnvpCount ======== -+ * Purpose: -+ * Return the number of elements in the envp array, including the -+ * terminating NULL element. -+ */ -+static s32 GetEnvpCount(char **envp) -+{ -+ s32 cRetval = 0; -+ if (envp) { -+ while (*envp++) -+ cRetval++; -+ -+ cRetval += 1; /* Include the terminating NULL in the count. */ -+ } -+ -+ return cRetval; -+} -+ -+/* -+ * ======== PrependEnvp ======== -+ * Purpose: -+ * Prepend an environment variable=value pair to the new envp array, and -+ * copy in the existing var=value pairs in the old envp array. -+ */ -+static char **PrependEnvp(char **newEnvp, char **envp, s32 cEnvp, s32 cNewEnvp, -+ char *szVar) -+{ -+ char **ppEnvp = newEnvp; -+ -+ DBC_Require(newEnvp); -+ -+ /* Prepend new environ var=value string */ -+ *newEnvp++ = szVar; -+ -+ /* Copy user's environment into our own. */ -+ while (cEnvp--) -+ *newEnvp++ = *envp++; -+ -+ /* Ensure NULL terminates the new environment strings array. */ -+ if (cEnvp == 0) -+ *newEnvp = NULL; -+ -+ return ppEnvp; -+} -+ -+/* -+ * ======== PROC_NotifyClients ======== -+ * Purpose: -+ * Notify the processor the events. -+ */ -+DSP_STATUS PROC_NotifyClients(DSP_HPROCESSOR hProc, u32 uEvents) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProc; -+ -+ DBC_Require(MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)); -+ DBC_Require(IsValidProcEvent(uEvents)); -+ DBC_Require(cRefs > 0); -+ if (!MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_NotifyClients: " -+ "InValid Processor Handle \n"); -+ goto func_end; -+ } -+ -+ NTFY_Notify(pProcObject->hNtfy, uEvents); -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_NotifyClients :Signaled. \n"); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== PROC_NotifyAllClients ======== -+ * Purpose: -+ * Notify the processor the events. This includes notifying all clients -+ * attached to a particulat DSP. -+ */ -+DSP_STATUS PROC_NotifyAllClients(DSP_HPROCESSOR hProc, u32 uEvents) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProc; -+ -+ DBC_Require(MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)); -+ DBC_Require(IsValidProcEvent(uEvents)); -+ DBC_Require(cRefs > 0); -+ -+ DEV_NotifyClients(pProcObject->hDevObject, uEvents); -+ -+ GT_0trace(PROC_DebugMask, GT_1CLASS, -+ "PROC_NotifyAllClients :Signaled. \n"); -+ -+ return status; -+} -+ -+/* -+ * ======== PROC_GetProcessorId ======== -+ * Purpose: -+ * Retrieves the processor ID. -+ */ -+DSP_STATUS PROC_GetProcessorId(DSP_HPROCESSOR hProc, u32 *procID) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct PROC_OBJECT *pProcObject = (struct PROC_OBJECT *)hProc; -+ -+ if (MEM_IsValidHandle(pProcObject, PROC_SIGNATURE)) -+ *procID = pProcObject->uProcessor; -+ else { -+ status = DSP_EHANDLE; -+ GT_0trace(PROC_DebugMask, GT_7CLASS, "PROC_GetProcessorId: " -+ "InValid Processor Handle \n"); -+ } -+ return status; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/pwr.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/pwr.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/pwr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/pwr.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,184 @@ -+/* -+ * pwr.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== PWR.c ======== -+ * PWR API for controlling DSP power states. -+ * -+ * Public Functions: -+ * PWR_SleepDSP -+ * PWR_WakeDSP -+ * -+ *! Revision History -+ *! ================ -+ *! 18-Feb-2003 vp Code review updates. -+ *! 18-Oct-2002 vp Ported to Linux platform. -+ *! 22-May-2002 sg Do PWR-to-IOCTL code mapping in PWR_SleepDSP. -+ *! 29-Apr-2002 sg Initial. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+ -+/* ----------------------------------- Link Driver */ -+#include -+ -+/* -+ * ======== PWR_SleepDSP ======== -+ * Send command to DSP to enter sleep state. -+ */ -+DSP_STATUS PWR_SleepDSP(IN CONST u32 sleepCode, IN CONST u32 timeout) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct WMD_DEV_CONTEXT *dwContext; -+ DSP_STATUS status = DSP_EFAIL; -+ struct DEV_OBJECT *hDevObject = NULL; -+ u32 ioctlcode = 0; -+ u32 arg = timeout; -+ -+ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); -+ hDevObject != NULL; -+ hDevObject = -+ (struct DEV_OBJECT *)DRV_GetNextDevObject -+ ((u32)hDevObject)) { -+ if (DSP_FAILED(DEV_GetWMDContext(hDevObject, -+ (struct WMD_DEV_CONTEXT **)&dwContext))) { -+ continue; -+ } -+ if (DSP_FAILED(DEV_GetIntfFxns(hDevObject, -+ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { -+ continue; -+ } -+ if (sleepCode == PWR_DEEPSLEEP) -+ ioctlcode = WMDIOCTL_DEEPSLEEP; -+ else if (sleepCode == PWR_EMERGENCYDEEPSLEEP) -+ ioctlcode = WMDIOCTL_EMERGENCYSLEEP; -+ else -+ status = DSP_EINVALIDARG; -+ -+ if (status != DSP_EINVALIDARG) { -+ status = (*pIntfFxns->pfnDevCntrl)(dwContext, -+ ioctlcode, (void *)&arg); -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== PWR_WakeDSP ======== -+ * Send command to DSP to wake it from sleep. -+ */ -+DSP_STATUS PWR_WakeDSP(IN CONST u32 timeout) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct WMD_DEV_CONTEXT *dwContext; -+ DSP_STATUS status = DSP_EFAIL; -+ struct DEV_OBJECT *hDevObject = NULL; -+ u32 arg = timeout; -+ -+ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); -+ hDevObject != NULL; -+ hDevObject = (struct DEV_OBJECT *)DRV_GetNextDevObject -+ ((u32)hDevObject)) { -+ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, -+ (struct WMD_DEV_CONTEXT **)&dwContext))) { -+ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, -+ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { -+ status = (*pIntfFxns->pfnDevCntrl)(dwContext, -+ WMDIOCTL_WAKEUP, (void *)&arg); -+ } -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== PWR_PM_PreScale======== -+ * Sends pre-notification message to DSP. -+ */ -+DSP_STATUS PWR_PM_PreScale(IN u16 voltage_domain, u32 level) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct WMD_DEV_CONTEXT *dwContext; -+ DSP_STATUS status = DSP_EFAIL; -+ struct DEV_OBJECT *hDevObject = NULL; -+ u32 arg[2]; -+ -+ arg[0] = voltage_domain; -+ arg[1] = level; -+ -+ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); -+ hDevObject != NULL; -+ hDevObject = (struct DEV_OBJECT *)DRV_GetNextDevObject -+ ((u32)hDevObject)) { -+ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, -+ (struct WMD_DEV_CONTEXT **)&dwContext))) { -+ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, -+ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { -+ status = (*pIntfFxns->pfnDevCntrl)(dwContext, -+ WMDIOCTL_PRESCALE_NOTIFY, -+ (void *)&arg); -+ } -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== PWR_PM_PostScale======== -+ * Sends post-notification message to DSP. -+ */ -+DSP_STATUS PWR_PM_PostScale(IN u16 voltage_domain, u32 level) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct WMD_DEV_CONTEXT *dwContext; -+ DSP_STATUS status = DSP_EFAIL; -+ struct DEV_OBJECT *hDevObject = NULL; -+ u32 arg[2]; -+ -+ arg[0] = voltage_domain; -+ arg[1] = level; -+ -+ for (hDevObject = (struct DEV_OBJECT *)DRV_GetFirstDevObject(); -+ hDevObject != NULL; -+ hDevObject = (struct DEV_OBJECT *)DRV_GetNextDevObject -+ ((u32)hDevObject)) { -+ if (DSP_SUCCEEDED(DEV_GetWMDContext(hDevObject, -+ (struct WMD_DEV_CONTEXT **)&dwContext))) { -+ if (DSP_SUCCEEDED(DEV_GetIntfFxns(hDevObject, -+ (struct WMD_DRV_INTERFACE **)&pIntfFxns))) { -+ status = (*pIntfFxns->pfnDevCntrl)(dwContext, -+ WMDIOCTL_POSTSCALE_NOTIFY, -+ (void *)&arg); -+ } -+ } -+ } -+ return status; -+ -+} -+ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/rmm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/rmm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/rmm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/rmm.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,604 @@ -+/* -+ * rmm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== rmm.c ======== -+ * Description: -+ * -+ * This memory manager provides general heap management and arbitrary -+ * alignment for any number of memory segments. -+ * -+ * Notes: -+ * -+ * Memory blocks are allocated from the end of the first free memory -+ * block large enough to satisfy the request. Alignment requirements -+ * are satisfied by "sliding" the block forward until its base satisfies -+ * the alignment specification; if this is not possible then the next -+ * free block large enough to hold the request is tried. -+ * -+ * Since alignment can cause the creation of a new free block - the -+ * unused memory formed between the start of the original free block -+ * and the start of the allocated block - the memory manager must free -+ * this memory to prevent a memory leak. -+ * -+ * Overlay memory is managed by reserving through RMM_alloc, and freeing -+ * it through RMM_free. The memory manager prevents DSP code/data that is -+ * overlayed from being overwritten as long as the memory it runs at has -+ * been allocated, and not yet freed. -+ * -+ *! Revision History -+ *! ================ -+ *! 18-Feb-2003 vp Code review updates. -+ *! 18-Oct-2002 vp Ported to Linux Platform. -+ *! 24-Sep-2002 map Updated from Code Review -+ *! 25-Jun-2002 jeh Free from segid passed to RMM_free(). -+ *! 24-Apr-2002 jeh Determine segid based on address in RMM_free(). (No way -+ *! to keep track of segid with dynamic loader library.) -+ *! 16-Oct-2001 jeh Based on gen tree rm.c. Added support for overlays. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+#define RMM_TARGSIGNATURE 0x544d4d52 /* "TMMR" */ -+ -+/* -+ * ======== RMM_Header ======== -+ * This header is used to maintain a list of free memory blocks. -+ */ -+struct RMM_Header { -+ struct RMM_Header *next; /* form a free memory link list */ -+ u32 size; /* size of the free memory */ -+ u32 addr; /* DSP address of memory block */ -+} ; -+ -+/* -+ * ======== RMM_OvlySect ======== -+ * Keeps track of memory occupied by overlay section. -+ */ -+struct RMM_OvlySect { -+ struct LST_ELEM listElem; -+ u32 addr; /* Start of memory section */ -+ u32 size; /* Length (target MAUs) of section */ -+ s32 page; /* Memory page */ -+}; -+ -+/* -+ * ======== RMM_TargetObj ======== -+ */ -+struct RMM_TargetObj { -+ u32 dwSignature; -+ struct RMM_Segment *segTab; -+ struct RMM_Header **freeList; -+ u32 numSegs; -+ struct LST_LIST *ovlyList; /* List of overlay memory in use */ -+}; -+ -+#if GT_TRACE -+static struct GT_Mask RMM_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+static u32 cRefs; /* module reference count */ -+ -+static bool allocBlock(struct RMM_TargetObj *target, u32 segid, u32 size, -+ u32 align, u32 *dspAddr); -+static bool freeBlock(struct RMM_TargetObj *target, u32 segid, u32 addr, -+ u32 size); -+ -+/* -+ * ======== RMM_alloc ======== -+ */ -+DSP_STATUS RMM_alloc(struct RMM_TargetObj *target, u32 segid, u32 size, -+ u32 align, u32 *dspAddr, bool reserve) -+{ -+ struct RMM_OvlySect *sect; -+ struct RMM_OvlySect *prevSect = NULL; -+ struct RMM_OvlySect *newSect; -+ u32 addr; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(MEM_IsValidHandle(target, RMM_TARGSIGNATURE)); -+ DBC_Require(dspAddr != NULL); -+ DBC_Require(size > 0); -+ DBC_Require(reserve || (target->numSegs > 0)); -+ DBC_Require(cRefs > 0); -+ -+ GT_6trace(RMM_debugMask, GT_ENTER, -+ "RMM_alloc(0x%lx, 0x%lx, 0x%lx, 0x%lx, " -+ "0x%lx, 0x%lx)\n", target, segid, size, align, dspAddr, -+ reserve); -+ if (!reserve) { -+ if (!allocBlock(target, segid, size, align, dspAddr)) { -+ status = DSP_EMEMORY; -+ } else { -+ /* Increment the number of allocated blocks in this -+ * segment */ -+ target->segTab[segid].number++; -+ } -+ goto func_end; -+ } -+ /* An overlay section - See if block is already in use. If not, -+ * insert into the list in ascending address size. */ -+ addr = *dspAddr; -+ sect = (struct RMM_OvlySect *)LST_First(target->ovlyList); -+ /* Find place to insert new list element. List is sorted from -+ * smallest to largest address. */ -+ while (sect != NULL) { -+ if (addr <= sect->addr) { -+ /* Check for overlap with sect */ -+ if ((addr + size > sect->addr) || (prevSect && -+ (prevSect->addr + prevSect->size > addr))) { -+ status = DSP_EOVERLAYMEMORY; -+ } -+ break; -+ } -+ prevSect = sect; -+ sect = (struct RMM_OvlySect *)LST_Next(target->ovlyList, -+ (struct LST_ELEM *)sect); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* No overlap - allocate list element for new section. */ -+ newSect = MEM_Calloc(sizeof(struct RMM_OvlySect), MEM_PAGED); -+ if (newSect == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ LST_InitElem((struct LST_ELEM *)newSect); -+ newSect->addr = addr; -+ newSect->size = size; -+ newSect->page = segid; -+ if (sect == NULL) { -+ /* Put new section at the end of the list */ -+ LST_PutTail(target->ovlyList, -+ (struct LST_ELEM *)newSect); -+ } else { -+ /* Put new section just before sect */ -+ LST_InsertBefore(target->ovlyList, -+ (struct LST_ELEM *)newSect, -+ (struct LST_ELEM *)sect); -+ } -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== RMM_create ======== -+ */ -+DSP_STATUS RMM_create(struct RMM_TargetObj **pTarget, -+ struct RMM_Segment segTab[], u32 numSegs) -+{ -+ struct RMM_Header *hptr; -+ struct RMM_Segment *sptr, *tmp; -+ struct RMM_TargetObj *target; -+ s32 i; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(pTarget != NULL); -+ DBC_Require(numSegs == 0 || segTab != NULL); -+ -+ GT_3trace(RMM_debugMask, GT_ENTER, -+ "RMM_create(0x%lx, 0x%lx, 0x%lx)\n", -+ pTarget, segTab, numSegs); -+ -+ /* Allocate DBL target object */ -+ MEM_AllocObject(target, struct RMM_TargetObj, RMM_TARGSIGNATURE); -+ -+ if (target == NULL) { -+ GT_0trace(RMM_debugMask, GT_6CLASS, -+ "RMM_create: Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ target->numSegs = numSegs; -+ if (!(numSegs > 0)) -+ goto func_cont; -+ -+ /* Allocate the memory for freelist from host's memory */ -+ target->freeList = MEM_Calloc(numSegs * sizeof(struct RMM_Header *), -+ MEM_PAGED); -+ if (target->freeList == NULL) { -+ GT_0trace(RMM_debugMask, GT_6CLASS, -+ "RMM_create: Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } else { -+ /* Allocate headers for each element on the free list */ -+ for (i = 0; i < (s32) numSegs; i++) { -+ target->freeList[i] = -+ MEM_Calloc(sizeof(struct RMM_Header), -+ MEM_PAGED); -+ if (target->freeList[i] == NULL) { -+ GT_0trace(RMM_debugMask, GT_6CLASS, -+ "RMM_create: Memory " -+ "allocation failed\n"); -+ status = DSP_EMEMORY; -+ break; -+ } -+ } -+ /* Allocate memory for initial segment table */ -+ target->segTab = MEM_Calloc(numSegs * -+ sizeof(struct RMM_Segment), MEM_PAGED); -+ if (target->segTab == NULL) { -+ GT_0trace(RMM_debugMask, GT_6CLASS, -+ "RMM_create: Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } else { -+ /* Initialize segment table and free list */ -+ sptr = target->segTab; -+ for (i = 0, tmp = segTab; numSegs > 0; numSegs--, i++) { -+ *sptr = *tmp; -+ hptr = target->freeList[i]; -+ hptr->addr = tmp->base; -+ hptr->size = tmp->length; -+ hptr->next = NULL; -+ tmp++; -+ sptr++; -+ } -+ } -+ } -+func_cont: -+ /* Initialize overlay memory list */ -+ if (DSP_SUCCEEDED(status)) { -+ target->ovlyList = LST_Create(); -+ if (target->ovlyList == NULL) { -+ GT_0trace(RMM_debugMask, GT_6CLASS, -+ "RMM_create: Memory allocation failed\n"); -+ status = DSP_EMEMORY; -+ } -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ *pTarget = target; -+ } else { -+ *pTarget = NULL; -+ if (target) -+ RMM_delete(target); -+ -+ } -+ -+ DBC_Ensure((DSP_SUCCEEDED(status) && MEM_IsValidHandle((*pTarget), -+ RMM_TARGSIGNATURE)) || (DSP_FAILED(status) && *pTarget == -+ NULL)); -+ -+ return status; -+} -+ -+/* -+ * ======== RMM_delete ======== -+ */ -+void RMM_delete(struct RMM_TargetObj *target) -+{ -+ struct RMM_OvlySect *pSect; -+ struct RMM_Header *hptr; -+ struct RMM_Header *next; -+ u32 i; -+ -+ DBC_Require(MEM_IsValidHandle(target, RMM_TARGSIGNATURE)); -+ -+ GT_1trace(RMM_debugMask, GT_ENTER, "RMM_delete(0x%lx)\n", target); -+ -+ if (target->segTab != NULL) -+ MEM_Free(target->segTab); -+ -+ if (target->ovlyList) { -+ while ((pSect = (struct RMM_OvlySect *)LST_GetHead -+ (target->ovlyList))) { -+ MEM_Free(pSect); -+ } -+ DBC_Assert(LST_IsEmpty(target->ovlyList)); -+ LST_Delete(target->ovlyList); -+ } -+ -+ if (target->freeList != NULL) { -+ /* Free elements on freelist */ -+ for (i = 0; i < target->numSegs; i++) { -+ hptr = next = target->freeList[i]; -+ while (next) { -+ hptr = next; -+ next = hptr->next; -+ MEM_Free(hptr); -+ } -+ } -+ MEM_Free(target->freeList); -+ } -+ -+ MEM_FreeObject(target); -+} -+ -+/* -+ * ======== RMM_exit ======== -+ */ -+void RMM_exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(RMM_debugMask, GT_5CLASS, "RMM_exit() ref count: 0x%x\n", -+ cRefs); -+ -+ if (cRefs == 0) -+ MEM_Exit(); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== RMM_free ======== -+ */ -+bool RMM_free(struct RMM_TargetObj *target, u32 segid, u32 addr, u32 size, -+ bool reserved) -+ -+{ -+ struct RMM_OvlySect *sect; -+ bool retVal = true; -+ -+ DBC_Require(MEM_IsValidHandle(target, RMM_TARGSIGNATURE)); -+ -+ DBC_Require(reserved || segid < target->numSegs); -+ DBC_Require(reserved || (addr >= target->segTab[segid].base && -+ (addr + size) <= (target->segTab[segid].base + -+ target->segTab[segid].length))); -+ -+ GT_5trace(RMM_debugMask, GT_ENTER, -+ "RMM_free(0x%lx, 0x%lx, 0x%lx, 0x%lx, " -+ "0x%lx)\n", target, segid, addr, size, reserved); -+ /* -+ * Free or unreserve memory. -+ */ -+ if (!reserved) { -+ retVal = freeBlock(target, segid, addr, size); -+ if (retVal) -+ target->segTab[segid].number--; -+ -+ } else { -+ /* Unreserve memory */ -+ sect = (struct RMM_OvlySect *)LST_First(target->ovlyList); -+ while (sect != NULL) { -+ if (addr == sect->addr) { -+ DBC_Assert(size == sect->size); -+ /* Remove from list */ -+ LST_RemoveElem(target->ovlyList, -+ (struct LST_ELEM *)sect); -+ MEM_Free(sect); -+ break; -+ } -+ sect = (struct RMM_OvlySect *)LST_Next(target->ovlyList, -+ (struct LST_ELEM *)sect); -+ } -+ if (sect == NULL) -+ retVal = false; -+ -+ } -+ return retVal; -+} -+ -+/* -+ * ======== RMM_init ======== -+ */ -+bool RMM_init(void) -+{ -+ bool retVal = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ DBC_Assert(!RMM_debugMask.flags); -+ GT_create(&RMM_debugMask, "RM"); /* "RM" for RMm */ -+ -+ retVal = MEM_Init(); -+ -+ if (!retVal) -+ MEM_Exit(); -+ -+ } -+ -+ if (retVal) -+ cRefs++; -+ -+ GT_1trace(RMM_debugMask, GT_5CLASS, -+ "RMM_init(), ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((retVal && (cRefs > 0)) || (!retVal && (cRefs >= 0))); -+ -+ return retVal; -+} -+ -+/* -+ * ======== RMM_stat ======== -+ */ -+bool RMM_stat(struct RMM_TargetObj *target, enum DSP_MEMTYPE segid, -+ struct DSP_MEMSTAT *pMemStatBuf) -+{ -+ struct RMM_Header *head; -+ bool retVal = false; -+ u32 maxFreeSize = 0; -+ u32 totalFreeSize = 0; -+ u32 freeBlocks = 0; -+ -+ DBC_Require(pMemStatBuf != NULL); -+ DBC_Assert(target != NULL); -+ -+ if ((u32) segid < target->numSegs) { -+ head = target->freeList[segid]; -+ -+ /* Collect data from freeList */ -+ while (head != NULL) { -+ maxFreeSize = max(maxFreeSize, head->size); -+ totalFreeSize += head->size; -+ freeBlocks++; -+ head = head->next; -+ } -+ -+ /* ulSize */ -+ pMemStatBuf->ulSize = target->segTab[segid].length; -+ -+ /* ulNumFreeBlocks */ -+ pMemStatBuf->ulNumFreeBlocks = freeBlocks; -+ -+ /* ulTotalFreeSize */ -+ pMemStatBuf->ulTotalFreeSize = totalFreeSize; -+ -+ /* ulLenMaxFreeBlock */ -+ pMemStatBuf->ulLenMaxFreeBlock = maxFreeSize; -+ -+ /* ulNumAllocBlocks */ -+ pMemStatBuf->ulNumAllocBlocks = target->segTab[segid].number; -+ -+ retVal = true; -+ } -+ -+ return retVal; -+} -+ -+/* -+ * ======== balloc ======== -+ * This allocation function allocates memory from the lowest addresses -+ * first. -+ */ -+static bool allocBlock(struct RMM_TargetObj *target, u32 segid, u32 size, -+ u32 align, u32 *dspAddr) -+{ -+ struct RMM_Header *head; -+ struct RMM_Header *prevhead = NULL; -+ struct RMM_Header *next; -+ u32 tmpalign; -+ u32 alignbytes; -+ u32 hsize; -+ u32 allocsize; -+ u32 addr; -+ -+ alignbytes = (align == 0) ? 1 : align; -+ prevhead = NULL; -+ head = target->freeList[segid]; -+ -+ do { -+ hsize = head->size; -+ next = head->next; -+ -+ addr = head->addr; /* alloc from the bottom */ -+ -+ /* align allocation */ -+ (tmpalign = (u32) addr % alignbytes); -+ if (tmpalign != 0) -+ tmpalign = alignbytes - tmpalign; -+ -+ allocsize = size + tmpalign; -+ -+ if (hsize >= allocsize) { /* big enough */ -+ if (hsize == allocsize && prevhead != NULL) { -+ prevhead->next = next; -+ MEM_Free(head); -+ } else { -+ head->size = hsize - allocsize; -+ head->addr += allocsize; -+ } -+ -+ /* free up any hole created by alignment */ -+ if (tmpalign) -+ freeBlock(target, segid, addr, tmpalign); -+ -+ *dspAddr = addr + tmpalign; -+ return true; -+ } -+ -+ prevhead = head; -+ head = next; -+ -+ } while (head != NULL); -+ -+ return false; -+} -+ -+/* -+ * ======== freeBlock ======== -+ * TO DO: freeBlock() allocates memory, which could result in failure. -+ * Could allocate an RMM_Header in RMM_alloc(), to be kept in a pool. -+ * freeBlock() could use an RMM_Header from the pool, freeing as blocks -+ * are coalesced. -+ */ -+static bool freeBlock(struct RMM_TargetObj *target, u32 segid, u32 addr, -+ u32 size) -+{ -+ struct RMM_Header *head; -+ struct RMM_Header *thead; -+ struct RMM_Header *rhead; -+ bool retVal = true; -+ -+ /* Create a memory header to hold the newly free'd block. */ -+ rhead = MEM_Calloc(sizeof(struct RMM_Header), MEM_PAGED); -+ if (rhead == NULL) { -+ retVal = false; -+ } else { -+ /* search down the free list to find the right place for addr */ -+ head = target->freeList[segid]; -+ -+ if (addr >= head->addr) { -+ while (head->next != NULL && addr > head->next->addr) -+ head = head->next; -+ -+ thead = head->next; -+ -+ head->next = rhead; -+ rhead->next = thead; -+ rhead->addr = addr; -+ rhead->size = size; -+ } else { -+ *rhead = *head; -+ head->next = rhead; -+ head->addr = addr; -+ head->size = size; -+ thead = rhead->next; -+ } -+ -+ /* join with upper block, if possible */ -+ if (thead != NULL && (rhead->addr + rhead->size) == -+ thead->addr) { -+ head->next = rhead->next; -+ thead->size = size + thead->size; -+ thead->addr = addr; -+ MEM_Free(rhead); -+ rhead = thead; -+ } -+ -+ /* join with the lower block, if possible */ -+ if ((head->addr + head->size) == rhead->addr) { -+ head->next = rhead->next; -+ head->size = head->size + rhead->size; -+ MEM_Free(rhead); -+ } -+ } -+ -+ return retVal; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/strm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/strm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/rmgr/strm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/rmgr/strm.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,1006 @@ -+/* -+ * strm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== strm.c ======== -+ * Description: -+ * DSP/BIOS Bridge Stream Manager. -+ * -+ * Public Functions: -+ * STRM_AllocateBuffer -+ * STRM_Close -+ * STRM_Create -+ * STRM_Delete -+ * STRM_Exit -+ * STRM_FreeBuffer -+ * STRM_GetEventHandle -+ * STRM_GetInfo -+ * STRM_Idle -+ * STRM_Init -+ * STRM_Issue -+ * STRM_Open -+ * STRM_PrepareBuffer -+ * STRM_Reclaim -+ * STRM_RegisterNotify -+ * STRM_Select -+ * STRM_UnprepareBuffer -+ * -+ * Notes: -+ * -+ *! Revision History: -+ *! ================= -+ *! 18-Feb-2003 vp Code review updates. -+ *! 18-Oct-2002 vp Ported to Linux platform. -+ *! 13-Mar-2002 map pStrm init'd to NULL in STRM_Open to prevent error -+ *! 12-Mar-2002 map Changed return var to WSX "wStatus" instead of "status" -+ *! in DEV and CMM function calls to avoid confusion. -+ *! Return DSP_SOK instead of S_OK from API fxns. -+ *! 12-Mar-2002 map Changed FAILED(..) to DSP_FAILED(..) -+ *! 25-Jan-2002 ag Allow neg seg ids(e.g. DSP_SHMSEG0) to denote SM. -+ *! 15-Nov-2001 ag Added STRMMODE & SM for DMA/ZCopy streaming. -+ *! Changed DSP_STREAMINFO to STRM_INFO in STRM_GetInfo(). -+ *! Use strm timeout value for dma flush timeout. -+ *! 09-May-2001 jeh Code review cleanup. -+ *! 06-Feb-2001 kc Updated DBC_Ensure in STRM_Select to check timeout. -+ *! 23-Oct-2000 jeh Allow NULL STRM_ATTRS passed to STRM_Open() for DLL -+ *! tests to pass. -+ *! 25-Sep-2000 jeh Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- Mini Driver */ -+#include -+ -+/* ----------------------------------- Resource Manager */ -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+#ifndef RES_CLEANUP_DISABLE -+#include -+#include -+#include -+#endif -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define STRM_SIGNATURE 0x4d525453 /* "MRTS" */ -+#define STRMMGR_SIGNATURE 0x5254534d /* "RTSM" */ -+ -+#define DEFAULTTIMEOUT 10000 -+#define DEFAULTNUMBUFS 2 -+ -+/* -+ * ======== STRM_MGR ======== -+ * The STRM_MGR contains device information needed to open the underlying -+ * channels of a stream. -+ */ -+struct STRM_MGR { -+ u32 dwSignature; -+ struct DEV_OBJECT *hDev; /* Device for this processor */ -+ struct CHNL_MGR *hChnlMgr; /* Channel manager */ -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ -+ struct SYNC_CSOBJECT *hSync; /* For critical sections */ -+} ; -+ -+/* -+ * ======== STRM_OBJECT ======== -+ * This object is allocated in STRM_Open(). -+ */ -+ struct STRM_OBJECT { -+ u32 dwSignature; -+ struct STRM_MGR *hStrmMgr; -+ struct CHNL_OBJECT *hChnl; -+ u32 uDir; /* DSP_TONODE or DSP_FROMNODE */ -+ u32 uTimeout; -+ u32 uNumBufs; /* Max # of bufs allowed in stream */ -+ u32 uNBufsInStrm; /* Current # of bufs in stream */ -+ u32 ulNBytes; /* bytes transferred since idled */ -+ enum DSP_STREAMSTATE strmState; /* STREAM_IDLE, STREAM_READY, ... */ -+ HANDLE hUserEvent; /* Saved for STRM_GetInfo() */ -+ enum DSP_STRMMODE lMode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */ -+ u32 uDMAChnlId; /* DMA chnl id */ -+ u32 uDMAPriority; /* DMA priority:DMAPRI_[LOW][HIGH] */ -+ u32 uSegment; /* >0 is SM segment.=0 is local heap */ -+ u32 uAlignment; /* Alignment for stream bufs */ -+ struct CMM_XLATOROBJECT *hXlator; /* Stream's SM address translator */ -+} ; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask STRM_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+static u32 cRefs; /* module reference count */ -+ -+/* ----------------------------------- Function Prototypes */ -+static DSP_STATUS DeleteStrm(struct STRM_OBJECT *hStrm); -+static void DeleteStrmMgr(struct STRM_MGR *hStrmMgr); -+ -+/* -+ * ======== STRM_AllocateBuffer ======== -+ * Purpose: -+ * Allocates buffers for a stream. -+ */ -+DSP_STATUS STRM_AllocateBuffer(struct STRM_OBJECT *hStrm, u32 uSize, -+ OUT u8 **apBuffer, u32 uNumBufs, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 uAllocated = 0; -+ u32 i; -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE hSTRMRes; -+#endif -+ DBC_Require(cRefs > 0); -+ DBC_Require(apBuffer != NULL); -+ -+ GT_4trace(STRM_debugMask, GT_ENTER, "STRM_AllocateBuffer: hStrm: 0x%x\t" -+ "uSize: 0x%x\tapBuffer: 0x%x\tuNumBufs: 0x%x\n", -+ hStrm, uSize, apBuffer, uNumBufs); -+ if (MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ /* -+ * Allocate from segment specified at time of stream open. -+ */ -+ if (uSize == 0) -+ status = DSP_ESIZE; -+ -+ } -+ if (DSP_FAILED(status)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ for (i = 0; i < uNumBufs; i++) { -+ DBC_Assert(hStrm->hXlator != NULL); -+ (void)CMM_XlatorAllocBuf(hStrm->hXlator, &apBuffer[i], uSize); -+ if (apBuffer[i] == NULL) { -+ GT_0trace(STRM_debugMask, GT_7CLASS, -+ "STRM_AllocateBuffer: " -+ "DSP_FAILED to alloc shared memory.\n"); -+ status = DSP_EMEMORY; -+ uAllocated = i; -+ break; -+ } -+ } -+ if (DSP_FAILED(status)) -+ STRM_FreeBuffer(hStrm, apBuffer, uAllocated, pr_ctxt); -+ -+#ifndef RES_CLEANUP_DISABLE -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (DRV_GetSTRMResElement(hStrm, &hSTRMRes, pr_ctxt) != -+ DSP_ENOTFOUND) { -+ DRV_ProcUpdateSTRMRes(uNumBufs, hSTRMRes, pr_ctxt); -+ } -+#endif -+func_end: -+ return status; -+} -+ -+/* -+ * ======== STRM_Close ======== -+ * Purpose: -+ * Close a stream opened with STRM_Open(). -+ */ -+DSP_STATUS STRM_Close(struct STRM_OBJECT *hStrm, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct CHNL_INFO chnlInfo; -+ DSP_STATUS status = DSP_SOK; -+ -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE hSTRMRes; -+#endif -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(STRM_debugMask, GT_ENTER, "STRM_Close: hStrm: 0x%x\n", hStrm); -+ -+ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else { -+ /* Have all buffers been reclaimed? If not, return -+ * DSP_EPENDING */ -+ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnChnlGetInfo) (hStrm->hChnl, &chnlInfo); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ -+ if (chnlInfo.cIOCs > 0 || chnlInfo.cIOReqs > 0) { -+ status = DSP_EPENDING; -+ } else { -+ -+ status = DeleteStrm(hStrm); -+ -+ if (DSP_FAILED(status)) { -+ /* we already validated the handle. */ -+ DBC_Assert(status != DSP_EHANDLE); -+ -+ /* make sure we return a documented result */ -+ status = DSP_EFAIL; -+ } -+ } -+ } -+#ifndef RES_CLEANUP_DISABLE -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (DRV_GetSTRMResElement(hStrm, &hSTRMRes, pr_ctxt) != -+ DSP_ENOTFOUND) { -+ DRV_ProcRemoveSTRMResElement(hSTRMRes, pr_ctxt); -+ } -+func_end: -+#endif -+ DBC_Ensure(status == DSP_SOK || status == DSP_EHANDLE || -+ status == DSP_EPENDING || status == DSP_EFAIL); -+ -+ return status; -+} -+ -+/* -+ * ======== STRM_Create ======== -+ * Purpose: -+ * Create a STRM manager object. -+ */ -+DSP_STATUS STRM_Create(OUT struct STRM_MGR **phStrmMgr, struct DEV_OBJECT *hDev) -+{ -+ struct STRM_MGR *pStrmMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phStrmMgr != NULL); -+ DBC_Require(hDev != NULL); -+ -+ GT_2trace(STRM_debugMask, GT_ENTER, "STRM_Create: phStrmMgr: " -+ "0x%x\thDev: 0x%x\n", phStrmMgr, hDev); -+ *phStrmMgr = NULL; -+ /* Allocate STRM manager object */ -+ MEM_AllocObject(pStrmMgr, struct STRM_MGR, STRMMGR_SIGNATURE); -+ if (pStrmMgr == NULL) { -+ status = DSP_EMEMORY; -+ GT_0trace(STRM_debugMask, GT_6CLASS, "STRM_Create: " -+ "MEM_AllocObject() failed!\n "); -+ } else { -+ pStrmMgr->hDev = hDev; -+ } -+ /* Get Channel manager and WMD function interface */ -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetChnlMgr(hDev, &(pStrmMgr->hChnlMgr)); -+ if (DSP_SUCCEEDED(status)) { -+ (void) DEV_GetIntfFxns(hDev, &(pStrmMgr->pIntfFxns)); -+ DBC_Assert(pStrmMgr->pIntfFxns != NULL); -+ } else { -+ GT_1trace(STRM_debugMask, GT_6CLASS, "STRM_Create: " -+ "Failed to get channel manager! status = " -+ "0x%x\n", status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_InitializeCS(&pStrmMgr->hSync); -+ -+ if (DSP_SUCCEEDED(status)) -+ *phStrmMgr = pStrmMgr; -+ else -+ DeleteStrmMgr(pStrmMgr); -+ -+ DBC_Ensure(DSP_SUCCEEDED(status) && -+ (MEM_IsValidHandle((*phStrmMgr), STRMMGR_SIGNATURE) || -+ (DSP_FAILED(status) && *phStrmMgr == NULL))); -+ -+ return status; -+} -+ -+/* -+ * ======== STRM_Delete ======== -+ * Purpose: -+ * Delete the STRM Manager Object. -+ */ -+void STRM_Delete(struct STRM_MGR *hStrmMgr) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(MEM_IsValidHandle(hStrmMgr, STRMMGR_SIGNATURE)); -+ -+ GT_1trace(STRM_debugMask, GT_ENTER, "STRM_Delete: hStrmMgr: 0x%x\n", -+ hStrmMgr); -+ -+ DeleteStrmMgr(hStrmMgr); -+ -+ DBC_Ensure(!MEM_IsValidHandle(hStrmMgr, STRMMGR_SIGNATURE)); -+} -+ -+/* -+ * ======== STRM_Exit ======== -+ * Purpose: -+ * Discontinue usage of STRM module. -+ */ -+void STRM_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ cRefs--; -+ -+ GT_1trace(STRM_debugMask, GT_5CLASS, -+ "Entered STRM_Exit, ref count: 0x%x\n", cRefs); -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== STRM_FreeBuffer ======== -+ * Purpose: -+ * Frees the buffers allocated for a stream. -+ */ -+DSP_STATUS STRM_FreeBuffer(struct STRM_OBJECT *hStrm, u8 **apBuffer, -+ u32 uNumBufs, struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 i = 0; -+ -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE hSTRMRes = NULL; -+#endif -+ DBC_Require(cRefs > 0); -+ DBC_Require(apBuffer != NULL); -+ -+ GT_3trace(STRM_debugMask, GT_ENTER, "STRM_FreeBuffer: hStrm: 0x%x\t" -+ "apBuffer: 0x%x\tuNumBufs: 0x%x\n", hStrm, apBuffer, uNumBufs); -+ -+ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) -+ status = DSP_EHANDLE; -+ -+ if (DSP_SUCCEEDED(status)) { -+ for (i = 0; i < uNumBufs; i++) { -+ DBC_Assert(hStrm->hXlator != NULL); -+ status = CMM_XlatorFreeBuf(hStrm->hXlator, apBuffer[i]); -+ if (DSP_FAILED(status)) { -+ GT_0trace(STRM_debugMask, GT_7CLASS, -+ "STRM_FreeBuffer: DSP_FAILED" -+ " to free shared memory.\n"); -+ break; -+ } -+ apBuffer[i] = NULL; -+ } -+ } -+#ifndef RES_CLEANUP_DISABLE -+ if (DRV_GetSTRMResElement(hStrm, hSTRMRes, pr_ctxt) != -+ DSP_ENOTFOUND) { -+ DRV_ProcUpdateSTRMRes(uNumBufs-i, hSTRMRes, pr_ctxt); -+ } -+#endif -+ return status; -+} -+ -+/* -+ * ======== STRM_GetInfo ======== -+ * Purpose: -+ * Retrieves information about a stream. -+ */ -+DSP_STATUS STRM_GetInfo(struct STRM_OBJECT *hStrm, -+ OUT struct STRM_INFO *pStreamInfo, -+ u32 uStreamInfoSize) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct CHNL_INFO chnlInfo; -+ DSP_STATUS status = DSP_SOK; -+ void *pVirtBase = NULL; /* NULL if no SM used */ -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pStreamInfo != NULL); -+ DBC_Require(uStreamInfoSize >= sizeof(struct STRM_INFO)); -+ -+ GT_3trace(STRM_debugMask, GT_ENTER, "STRM_GetInfo: hStrm: 0x%x\t" -+ "pStreamInfo: 0x%x\tuStreamInfoSize: 0x%x\n", hStrm, -+ pStreamInfo, uStreamInfoSize); -+ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else { -+ if (uStreamInfoSize < sizeof(struct STRM_INFO)) { -+ /* size of users info */ -+ status = DSP_ESIZE; -+ } -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnChnlGetInfo) (hStrm->hChnl, &chnlInfo); -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ if (hStrm->hXlator) { -+ /* We have a translator */ -+ DBC_Assert(hStrm->uSegment > 0); -+ CMM_XlatorInfo(hStrm->hXlator, (u8 **)&pVirtBase, 0, -+ hStrm->uSegment, false); -+ } -+ pStreamInfo->uSegment = hStrm->uSegment; -+ pStreamInfo->lMode = hStrm->lMode; -+ pStreamInfo->pVirtBase = pVirtBase; -+ pStreamInfo->pUser->uNumberBufsAllowed = hStrm->uNumBufs; -+ pStreamInfo->pUser->uNumberBufsInStream = chnlInfo.cIOCs + -+ chnlInfo.cIOReqs; -+ /* # of bytes transferred since last call to DSPStream_Idle() */ -+ pStreamInfo->pUser->ulNumberBytes = chnlInfo.cPosition; -+ pStreamInfo->pUser->hSyncObjectHandle = chnlInfo.hEvent; -+ /* Determine stream state based on channel state and info */ -+ if (chnlInfo.dwState & CHNL_STATEEOS) { -+ pStreamInfo->pUser->ssStreamState = STREAM_DONE; -+ } else { -+ if (chnlInfo.cIOCs > 0) -+ pStreamInfo->pUser->ssStreamState = STREAM_READY; -+ else if (chnlInfo.cIOReqs > 0) -+ pStreamInfo->pUser->ssStreamState = STREAM_PENDING; -+ else -+ pStreamInfo->pUser->ssStreamState = STREAM_IDLE; -+ -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== STRM_Idle ======== -+ * Purpose: -+ * Idles a particular stream. -+ */ -+DSP_STATUS STRM_Idle(struct STRM_OBJECT *hStrm, bool fFlush) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_2trace(STRM_debugMask, GT_ENTER, "STRM_Idle: hStrm: 0x%x\t" -+ "fFlush: 0x%x\n", hStrm, fFlush); -+ -+ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else { -+ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; -+ -+ status = (*pIntfFxns->pfnChnlIdle) (hStrm->hChnl, -+ hStrm->uTimeout, fFlush); -+ } -+ return status; -+} -+ -+/* -+ * ======== STRM_Init ======== -+ * Purpose: -+ * Initialize the STRM module. -+ */ -+bool STRM_Init(void) -+{ -+ bool fRetVal = true; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+#if GT_TRACE -+ DBC_Assert(!STRM_debugMask.flags); -+ GT_create(&STRM_debugMask, "ST"); /* "ST" for STrm */ -+#endif -+ } -+ -+ if (fRetVal) -+ cRefs++; -+ -+ GT_1trace(STRM_debugMask, GT_5CLASS, "STRM_Init(), ref count: 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((fRetVal && (cRefs > 0)) || (!fRetVal && (cRefs >= 0))); -+ -+ return fRetVal; -+} -+ -+/* -+ * ======== STRM_Issue ======== -+ * Purpose: -+ * Issues a buffer on a stream -+ */ -+DSP_STATUS STRM_Issue(struct STRM_OBJECT *hStrm, IN u8 *pBuf, u32 ulBytes, -+ u32 ulBufSize, u32 dwArg) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status = DSP_SOK; -+ void *pTmpBuf = NULL; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pBuf != NULL); -+ -+ GT_4trace(STRM_debugMask, GT_ENTER, "STRM_Issue: hStrm: 0x%x\tpBuf: " -+ "0x%x\tulBytes: 0x%x\tdwArg: 0x%x\n", hStrm, pBuf, ulBytes, -+ dwArg); -+ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else { -+ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; -+ -+ if (hStrm->uSegment != 0) { -+ pTmpBuf = CMM_XlatorTranslate(hStrm->hXlator, -+ (void *)pBuf, CMM_VA2DSPPA); -+ if (pTmpBuf == NULL) -+ status = DSP_ETRANSLATE; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = (*pIntfFxns->pfnChnlAddIOReq) -+ (hStrm->hChnl, pBuf, ulBytes, ulBufSize, -+ (u32) pTmpBuf, dwArg); -+ } -+ if (DSP_FAILED(status)) { -+ if (status == CHNL_E_NOIORPS) -+ status = DSP_ESTREAMFULL; -+ else -+ status = DSP_EFAIL; -+ -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== STRM_Open ======== -+ * Purpose: -+ * Open a stream for sending/receiving data buffers to/from a task or -+ * XDAIS socket node on the DSP. -+ */ -+DSP_STATUS STRM_Open(struct NODE_OBJECT *hNode, u32 uDir, u32 uIndex, -+ IN struct STRM_ATTR *pAttr, -+ OUT struct STRM_OBJECT **phStrm, -+ struct PROCESS_CONTEXT *pr_ctxt) -+{ -+ struct STRM_MGR *hStrmMgr; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ u32 ulChnlId; -+ struct STRM_OBJECT *pStrm = NULL; -+ CHNL_MODE uMode; -+ struct CHNL_ATTRS chnlAttrs; -+ DSP_STATUS status = DSP_SOK; -+ struct CMM_OBJECT *hCmmMgr = NULL; /* Shared memory manager hndl */ -+ -+#ifndef RES_CLEANUP_DISABLE -+ HANDLE hSTRMRes; -+#endif -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(phStrm != NULL); -+ DBC_Require(pAttr != NULL); -+ GT_5trace(STRM_debugMask, GT_ENTER, -+ "STRM_Open: hNode: 0x%x\tuDir: 0x%x\t" -+ "uIndex: 0x%x\tpAttr: 0x%x\tphStrm: 0x%x\n", -+ hNode, uDir, uIndex, pAttr, phStrm); -+ *phStrm = NULL; -+ if (uDir != DSP_TONODE && uDir != DSP_FROMNODE) { -+ status = DSP_EDIRECTION; -+ } else { -+ /* Get the channel id from the node (set in NODE_Connect()) */ -+ status = NODE_GetChannelId(hNode, uDir, uIndex, &ulChnlId); -+ } -+ if (DSP_SUCCEEDED(status)) -+ status = NODE_GetStrmMgr(hNode, &hStrmMgr); -+ -+ if (DSP_SUCCEEDED(status)) { -+ MEM_AllocObject(pStrm, struct STRM_OBJECT, STRM_SIGNATURE); -+ if (pStrm == NULL) { -+ status = DSP_EMEMORY; -+ GT_0trace(STRM_debugMask, GT_6CLASS, -+ "STRM_Open: MEM_AllocObject() failed!\n "); -+ } else { -+ pStrm->hStrmMgr = hStrmMgr; -+ pStrm->uDir = uDir; -+ pStrm->strmState = STREAM_IDLE; -+ pStrm->hUserEvent = pAttr->hUserEvent; -+ if (pAttr->pStreamAttrIn != NULL) { -+ pStrm->uTimeout = pAttr->pStreamAttrIn-> -+ uTimeout; -+ pStrm->uNumBufs = pAttr->pStreamAttrIn-> -+ uNumBufs; -+ pStrm->lMode = pAttr->pStreamAttrIn->lMode; -+ pStrm->uSegment = pAttr->pStreamAttrIn-> -+ uSegment; -+ pStrm->uAlignment = pAttr->pStreamAttrIn-> -+ uAlignment; -+ pStrm->uDMAChnlId = pAttr->pStreamAttrIn-> -+ uDMAChnlId; -+ pStrm->uDMAPriority = pAttr->pStreamAttrIn-> -+ uDMAPriority; -+ chnlAttrs.uIOReqs = pAttr->pStreamAttrIn-> -+ uNumBufs; -+ } else { -+ pStrm->uTimeout = DEFAULTTIMEOUT; -+ pStrm->uNumBufs = DEFAULTNUMBUFS; -+ pStrm->lMode = STRMMODE_PROCCOPY; -+ pStrm->uSegment = 0; /* local memory */ -+ pStrm->uAlignment = 0; -+ pStrm->uDMAChnlId = 0; -+ pStrm->uDMAPriority = 0; -+ chnlAttrs.uIOReqs = DEFAULTNUMBUFS; -+ } -+ chnlAttrs.hReserved1 = NULL; -+ /* DMA chnl flush timeout */ -+ chnlAttrs.hReserved2 = pStrm->uTimeout; -+ chnlAttrs.hEvent = NULL; -+ if (pAttr->hUserEvent != NULL) -+ chnlAttrs.hEvent = pAttr->hUserEvent; -+ -+ } -+ } -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ if ((pAttr->pVirtBase == NULL) || !(pAttr->ulVirtSize > 0)) -+ goto func_cont; -+ -+ DBC_Assert(pStrm->lMode != STRMMODE_LDMA); /* no System DMA */ -+ /* Get the shared mem mgr for this streams dev object */ -+ status = DEV_GetCmmMgr(hStrmMgr->hDev, &hCmmMgr); -+ if (DSP_FAILED(status)) { -+ GT_1trace(STRM_debugMask, GT_6CLASS, "STRM_Open: Failed to get " -+ "CMM Mgr handle: 0x%x\n", status); -+ } else { -+ /*Allocate a SM addr translator for this strm.*/ -+ status = CMM_XlatorCreate(&pStrm->hXlator, hCmmMgr, NULL); -+ if (DSP_FAILED(status)) { -+ GT_1trace(STRM_debugMask, GT_6CLASS, -+ "STRM_Open: Failed to " -+ "create SM translator: 0x%x\n", status); -+ } else { -+ DBC_Assert(pStrm->uSegment > 0); -+ /* Set translators Virt Addr attributes */ -+ status = CMM_XlatorInfo(pStrm->hXlator, -+ (u8 **)&pAttr->pVirtBase, pAttr->ulVirtSize, -+ pStrm->uSegment, true); -+ if (status != DSP_SOK) { -+ GT_0trace(STRM_debugMask, GT_6CLASS, -+ "STRM_Open: ERROR: " -+ "in setting CMM_XlatorInfo.\n"); -+ } -+ } -+ } -+func_cont: -+ if (DSP_SUCCEEDED(status)) { -+ /* Open channel */ -+ uMode = (uDir == DSP_TONODE) ? -+ CHNL_MODETODSP : CHNL_MODEFROMDSP; -+ pIntfFxns = hStrmMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnChnlOpen) (&(pStrm->hChnl), -+ hStrmMgr->hChnlMgr, uMode, ulChnlId, &chnlAttrs); -+ if (DSP_FAILED(status)) { -+ /* -+ * over-ride non-returnable status codes so we return -+ * something documented -+ */ -+ if (status != DSP_EMEMORY && status != -+ DSP_EINVALIDARG && status != DSP_EFAIL) { -+ /* -+ * We got a status that's not return-able. -+ * Assert that we got something we were -+ * expecting (DSP_EHANDLE isn't acceptable, -+ * hStrmMgr->hChnlMgr better be valid or we -+ * assert here), and then return DSP_EFAIL. -+ */ -+ DBC_Assert(status == CHNL_E_OUTOFSTREAMS || -+ status == CHNL_E_BADCHANID || -+ status == CHNL_E_CHANBUSY || -+ status == CHNL_E_NOIORPS); -+ status = DSP_EFAIL; -+ } -+ GT_2trace(STRM_debugMask, GT_6CLASS, -+ "STRM_Open: Channel open failed, " -+ "chnl id = %d, status = 0x%x\n", ulChnlId, -+ status); -+ } -+ } -+ if (DSP_SUCCEEDED(status)) -+ *phStrm = pStrm; -+ else -+ (void)DeleteStrm(pStrm); -+ -+#ifndef RES_CLEANUP_DISABLE -+ DRV_ProcInsertSTRMResElement(*phStrm, &hSTRMRes, pr_ctxt); -+#endif -+ -+ /* ensure we return a documented error code */ -+ DBC_Ensure((DSP_SUCCEEDED(status) && -+ MEM_IsValidHandle((*phStrm), STRM_SIGNATURE)) || -+ (*phStrm == NULL && (status == DSP_EHANDLE || -+ status == DSP_EDIRECTION || status == DSP_EVALUE || -+ status == DSP_EFAIL))); -+ return status; -+} -+ -+/* -+ * ======== STRM_Reclaim ======== -+ * Purpose: -+ * Relcaims a buffer from a stream. -+ */ -+DSP_STATUS STRM_Reclaim(struct STRM_OBJECT *hStrm, OUT u8 **pBufPtr, -+ u32 *pulBytes, u32 *pulBufSize, u32 *pdwArg) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct CHNL_IOC chnlIOC; -+ DSP_STATUS status = DSP_SOK; -+ void *pTmpBuf = NULL; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pBufPtr != NULL); -+ DBC_Require(pulBytes != NULL); -+ DBC_Require(pdwArg != NULL); -+ -+ GT_4trace(STRM_debugMask, GT_ENTER, -+ "STRM_Reclaim: hStrm: 0x%x\tpBufPtr: 0x%x" -+ "\tpulBytes: 0x%x\tpdwArg: 0x%x\n", hStrm, pBufPtr, pulBytes, -+ pdwArg); -+ -+ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; -+ -+ status = (*pIntfFxns->pfnChnlGetIOC)(hStrm->hChnl, hStrm->uTimeout, -+ &chnlIOC); -+ if (DSP_FAILED(status)) { -+ GT_1trace(STRM_debugMask, GT_6CLASS, -+ "STRM_Reclaim: GetIOC failed! " -+ "Status = 0x%x\n", status); -+ } else { -+ *pulBytes = chnlIOC.cBytes; -+ if (pulBufSize) -+ *pulBufSize = chnlIOC.cBufSize; -+ -+ *pdwArg = chnlIOC.dwArg; -+ if (!CHNL_IsIOComplete(chnlIOC)) { -+ if (CHNL_IsTimedOut(chnlIOC)) { -+ status = DSP_ETIMEOUT; -+ } else { -+ /* Allow reclaims after idle to succeed */ -+ if (!CHNL_IsIOCancelled(chnlIOC)) -+ status = DSP_EFAIL; -+ -+ } -+ } -+ /* Translate zerocopy buffer if channel not canceled. */ -+ if (DSP_SUCCEEDED(status) && (!CHNL_IsIOCancelled(chnlIOC)) && -+ (hStrm->lMode == STRMMODE_ZEROCOPY)) { -+ /* -+ * This is a zero-copy channel so chnlIOC.pBuf -+ * contains the DSP address of SM. We need to -+ * translate it to a virtual address for the user -+ * thread to access. -+ * Note: Could add CMM_DSPPA2VA to CMM in the future. -+ */ -+ pTmpBuf = CMM_XlatorTranslate(hStrm->hXlator, -+ chnlIOC.pBuf, CMM_DSPPA2PA); -+ if (pTmpBuf != NULL) { -+ /* now convert this GPP Pa to Va */ -+ pTmpBuf = CMM_XlatorTranslate(hStrm->hXlator, -+ pTmpBuf, CMM_PA2VA); -+ } -+ if (pTmpBuf == NULL) { -+ GT_0trace(STRM_debugMask, GT_7CLASS, -+ "STRM_Reclaim: Failed " -+ "SM translation!\n"); -+ status = DSP_ETRANSLATE; -+ } -+ chnlIOC.pBuf = pTmpBuf; -+ } -+ *pBufPtr = chnlIOC.pBuf; -+ } -+func_end: -+ /* ensure we return a documented return code */ -+ DBC_Ensure(DSP_SUCCEEDED(status) || status == DSP_EHANDLE || -+ status == DSP_ETIMEOUT || status == DSP_ETRANSLATE || -+ status == DSP_EFAIL); -+ return status; -+} -+ -+/* -+ * ======== STRM_RegisterNotify ======== -+ * Purpose: -+ * Register to be notified on specific events for this stream. -+ */ -+DSP_STATUS STRM_RegisterNotify(struct STRM_OBJECT *hStrm, u32 uEventMask, -+ u32 uNotifyType, struct DSP_NOTIFICATION -+ *hNotification) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(hNotification != NULL); -+ -+ GT_4trace(STRM_debugMask, GT_ENTER, -+ "STRM_RegisterNotify: hStrm: 0x%x\t" -+ "uEventMask: 0x%x\tuNotifyType: 0x%x\thNotification: 0x%x\n", -+ hStrm, uEventMask, uNotifyType, hNotification); -+ if (!MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else if ((uEventMask & ~((DSP_STREAMIOCOMPLETION) | -+ DSP_STREAMDONE)) != 0) { -+ status = DSP_EVALUE; -+ } else { -+ if (uNotifyType != DSP_SIGNALEVENT) -+ status = DSP_ENOTIMPL; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; -+ -+ status = (*pIntfFxns->pfnChnlRegisterNotify)(hStrm->hChnl, -+ uEventMask, uNotifyType, hNotification); -+ } -+ /* ensure we return a documented return code */ -+ DBC_Ensure(DSP_SUCCEEDED(status) || status == DSP_EHANDLE || -+ status == DSP_ETIMEOUT || status == DSP_ETRANSLATE || -+ status == DSP_ENOTIMPL || status == DSP_EFAIL); -+ return status; -+} -+ -+/* -+ * ======== STRM_Select ======== -+ * Purpose: -+ * Selects a ready stream. -+ */ -+DSP_STATUS STRM_Select(IN struct STRM_OBJECT **aStrmTab, u32 nStrms, -+ OUT u32 *pMask, u32 uTimeout) -+{ -+ u32 uIndex; -+ struct CHNL_INFO chnlInfo; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct SYNC_OBJECT **hSyncEvents = NULL; -+ u32 i; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(cRefs > 0); -+ DBC_Require(aStrmTab != NULL); -+ DBC_Require(pMask != NULL); -+ DBC_Require(nStrms > 0); -+ -+ GT_4trace(STRM_debugMask, GT_ENTER, -+ "STRM_Select: aStrmTab: 0x%x \tnStrms: " -+ "0x%x\tpMask: 0x%x\tuTimeout: 0x%x\n", aStrmTab, -+ nStrms, pMask, uTimeout); -+ *pMask = 0; -+ for (i = 0; i < nStrms; i++) { -+ if (!MEM_IsValidHandle(aStrmTab[i], STRM_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ break; -+ } -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* Determine which channels have IO ready */ -+ for (i = 0; i < nStrms; i++) { -+ pIntfFxns = aStrmTab[i]->hStrmMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnChnlGetInfo)(aStrmTab[i]->hChnl, -+ &chnlInfo); -+ if (DSP_FAILED(status)) { -+ break; -+ } else { -+ if (chnlInfo.cIOCs > 0) -+ *pMask |= (1 << i); -+ -+ } -+ } -+ if (DSP_SUCCEEDED(status) && uTimeout > 0 && *pMask == 0) { -+ /* Non-zero timeout */ -+ hSyncEvents = (struct SYNC_OBJECT **)MEM_Alloc(nStrms * -+ sizeof(struct SYNC_OBJECT *), MEM_PAGED); -+ if (hSyncEvents == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ for (i = 0; i < nStrms; i++) { -+ pIntfFxns = aStrmTab[i]->hStrmMgr->pIntfFxns; -+ status = (*pIntfFxns->pfnChnlGetInfo) -+ (aStrmTab[i]->hChnl, &chnlInfo); -+ if (DSP_FAILED(status)) -+ break; -+ else -+ hSyncEvents[i] = chnlInfo.hSyncEvent; -+ -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = SYNC_WaitOnMultipleEvents(hSyncEvents, nStrms, -+ uTimeout, &uIndex); -+ if (DSP_SUCCEEDED(status)) { -+ /* Since we waited on the event, we have to -+ * reset it */ -+ SYNC_SetEvent(hSyncEvents[uIndex]); -+ *pMask = 1 << uIndex; -+ } -+ } -+ } -+func_end: -+ if (hSyncEvents) -+ MEM_Free(hSyncEvents); -+ -+ DBC_Ensure((DSP_SUCCEEDED(status) && (*pMask != 0 || uTimeout == 0)) || -+ (DSP_FAILED(status) && *pMask == 0)); -+ -+ return status; -+} -+ -+/* -+ * ======== DeleteStrm ======== -+ * Purpose: -+ * Frees the resources allocated for a stream. -+ */ -+static DSP_STATUS DeleteStrm(struct STRM_OBJECT *hStrm) -+{ -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ DSP_STATUS status = DSP_SOK; -+ -+ if (MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { -+ if (hStrm->hChnl) { -+ pIntfFxns = hStrm->hStrmMgr->pIntfFxns; -+ /* Channel close can fail only if the channel handle -+ * is invalid. */ -+ status = (*pIntfFxns->pfnChnlClose) (hStrm->hChnl); -+ /* Free all SM address translator resources */ -+ if (DSP_SUCCEEDED(status)) { -+ if (hStrm->hXlator) { -+ /* force free */ -+ (void)CMM_XlatorDelete(hStrm->hXlator, -+ true); -+ } -+ } -+ } -+ MEM_FreeObject(hStrm); -+ } else { -+ status = DSP_EHANDLE; -+ } -+ return status; -+} -+ -+/* -+ * ======== DeleteStrmMgr ======== -+ * Purpose: -+ * Frees stream manager. -+ */ -+static void DeleteStrmMgr(struct STRM_MGR *hStrmMgr) -+{ -+ if (MEM_IsValidHandle(hStrmMgr, STRMMGR_SIGNATURE)) { -+ -+ if (hStrmMgr->hSync) -+ SYNC_DeleteCS(hStrmMgr->hSync); -+ -+ MEM_FreeObject(hStrmMgr); -+ } -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/cfg.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/cfg.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/cfg.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/cfg.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,483 @@ -+/* -+ * cfg.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== cfgce.c ======== -+ * Purpose: -+ * Implementation of platform specific config services. -+ * -+ * Private Functions: -+ * CFG_Exit -+ * CFG_GetAutoStart -+ * CFG_GetDevObject -+ * CFG_GetDSPResources -+ * CFG_GetExecFile -+ * CFG_GetHostResources -+ * CFG_GetObject -+ * CFG_Init -+ * CFG_SetDevObject -+ * CFG_SetObject -+ * -+ * -+ *! Revision History: -+ *! ================ -+ *! 26-Arp-2004 hp Support for handling more than one Device. -+ *! 26-Feb-2003 kc Removed unused CFG fxns. -+ *! 10-Nov-2000 rr: CFG_GetBoardName local var initialized. -+ *! 30-Oct-2000 kc: Changed local var. names to use Hungarian notation. -+ *! 10-Aug-2000 rr: Cosmetic changes. -+ *! 26-Jul-2000 rr: Added CFG_GetDCDName. CFG_Get/SetObject(based on a flag) -+ *! replaces CFG_GetMgrObject & CFG_SetMgrObject. -+ *! 17-Jul-2000 rr: Added CFG_GetMgrObject & CFG_SetMgrObject. -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 31-Jan-2000 rr: Comments and bugfixes: modified after code review -+ *! 07-Jan-2000 rr: CFG_GetBoardName Ensure class checks strlen of the -+ *! read value from the registry against the passed in BufSize; -+ *! CFG_GetZLFile,CFG_GetWMDFileName and -+ *! CFG_GetExecFile also modified same way. -+ *! 06-Jan-2000 rr: CFG_GetSearchPath & CFG_GetWinBRIDGEDir removed. -+ *! 09-Dec-1999 rr: CFG_SetDevObject stores the DevNodeString pointer. -+ *! 03-Dec-1999 rr: CFG_GetDevObject reads stored DevObject from Registry. -+ *! CFG_GetDevNode reads the Devnodestring from the registry. -+ *! CFG_SetDevObject stores the registry path as -+ *! DevNodestring in the registry. -+ *! 02-Dec-1999 rr: CFG_debugMask is declared static now. stdwin.h included -+ *! 22-Nov-1999 kc: Added windows.h to remove warnings. -+ *! 25-Oct-1999 rr: CFG_GetHostResources reads the HostResource structure -+ *! from the registry which was set by the DRV Request -+ *! Resources. -+ *! 15-Oct-1999 rr: Changes in CFG_SetPrivateDword & HostResources reflecting -+ *! changes for drv.h resource structure and wsxreg.h new -+ *! entry(DevObject) Hard coded entries removed for those items -+ *! 08-Oct-1999 rr: CFG_SetPrivateDword modified. it sets devobject into the -+ *! registry. CFG_Get HostResources modified for opening up -+ *! two mem winodws. -+ *! 24-Sep-1999 rr: CFG_GetHostResources uses hardcoded Registry calls,uses NT -+ *! type of Resource Structure. -+ *! 19-Jul-1999 a0216266: Stubbed from cfgnt.c. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+struct DRV_EXT { -+ struct LST_ELEM link; -+ char szString[MAXREGPATHLENGTH]; -+}; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask CFG_debugMask = { NULL, NULL }; /* CFG debug Mask */ -+#endif -+ -+/* -+ * ======== CFG_Exit ======== -+ * Purpose: -+ * Discontinue usage of the CFG module. -+ */ -+void CFG_Exit(void) -+{ -+ GT_0trace(CFG_debugMask, GT_5CLASS, "Entered CFG_Exit\n"); -+} -+ -+/* -+ * ======== CFG_GetAutoStart ======== -+ * Purpose: -+ * Retreive the autostart mask, if any, for this board. -+ */ -+DSP_STATUS CFG_GetAutoStart(struct CFG_DEVNODE *hDevNode, -+ OUT u32 *pdwAutoStart) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 dwBufSize; -+ GT_2trace(CFG_debugMask, GT_ENTER, -+ "Entered CFG_GetAutoStart: \n\thDevNode:" -+ "0x%x\n\tpdwAutoStart: 0x%x\n", hDevNode, pdwAutoStart); -+ dwBufSize = sizeof(*pdwAutoStart); -+ if (!hDevNode) -+ status = CFG_E_INVALIDHDEVNODE; -+ if (!pdwAutoStart) -+ status = CFG_E_INVALIDPOINTER; -+ if (DSP_SUCCEEDED(status)) { -+ status = REG_GetValue(NULL, (char *)hDevNode, AUTOSTART, -+ (u8 *)pdwAutoStart, &dwBufSize); -+ if (DSP_FAILED(status)) -+ status = CFG_E_RESOURCENOTAVAIL; -+ } -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(CFG_debugMask, GT_1CLASS, -+ "CFG_GetAutoStart SUCCESS \n"); -+ } else { -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "CFG_GetAutoStart Failed \n"); -+ } -+#endif -+ DBC_Ensure((status == DSP_SOK && -+ (*pdwAutoStart == 0 || *pdwAutoStart == 1)) -+ || status != DSP_SOK); -+ return status; -+} -+ -+/* -+ * ======== CFG_GetDevObject ======== -+ * Purpose: -+ * Retrieve the Device Object handle for a given devnode. -+ */ -+DSP_STATUS CFG_GetDevObject(struct CFG_DEVNODE *hDevNode, OUT u32 *pdwValue) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 dwBufSize; -+ GT_2trace(CFG_debugMask, GT_ENTER, "Entered CFG_GetDevObject, args: " -+ "\n\thDevNode: 0x%x\n\tpdwValue: 0x%x\n", hDevNode, -+ *pdwValue); -+ if (!hDevNode) -+ status = CFG_E_INVALIDHDEVNODE; -+ -+ if (!pdwValue) -+ status = CFG_E_INVALIDHDEVNODE; -+ -+ dwBufSize = sizeof(pdwValue); -+ if (DSP_SUCCEEDED(status)) { -+ -+ /* check the device string and then call the REG_SetValue*/ -+ if (!(strcmp((char *)((struct DRV_EXT *)hDevNode)->szString, -+ "TIOMAP1510"))) { -+ GT_0trace(CFG_debugMask, GT_1CLASS, -+ "Fetching DSP Device from " -+ "Registry \n"); -+ status = REG_GetValue(NULL, (char *)hDevNode, -+ "DEVICE_DSP", -+ (u8 *)pdwValue, &dwBufSize); -+ } else { -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "Failed to Identify the Device to Fetch \n"); -+ } -+ } -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) { -+ GT_1trace(CFG_debugMask, GT_1CLASS, -+ "CFG_GetDevObject SUCCESS DevObject" -+ ": 0x%x\n ", *pdwValue); -+ } else { -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "CFG_GetDevObject Failed \n"); -+ } -+#endif -+ return status; -+} -+ -+/* -+ * ======== CFG_GetDSPResources ======== -+ * Purpose: -+ * Get the DSP resources available to a given device. -+ */ -+DSP_STATUS CFG_GetDSPResources(struct CFG_DEVNODE *hDevNode, -+ OUT struct CFG_DSPRES *pDSPResTable) -+{ -+ DSP_STATUS status = DSP_SOK; /* return value */ -+ u32 dwResSize; -+ GT_2trace(CFG_debugMask, GT_ENTER, -+ "Entered CFG_GetDSPResources, args: " -+ "\n\thDevNode: 0x%x\n\tpDSPResTable: 0x%x\n", -+ hDevNode, pDSPResTable); -+ if (!hDevNode) { -+ status = CFG_E_INVALIDHDEVNODE; -+ } else if (!pDSPResTable) { -+ status = CFG_E_INVALIDPOINTER; -+ } else { -+ status = REG_GetValue(NULL, CONFIG, DSPRESOURCES, -+ (u8 *)pDSPResTable, -+ &dwResSize); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(CFG_debugMask, GT_1CLASS, -+ "CFG_GetDSPResources SUCCESS\n"); -+ } else { -+ status = CFG_E_RESOURCENOTAVAIL; -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "CFG_GetDSPResources Failed \n"); -+ } -+#ifdef DEBUG -+ /* assert that resource values are reasonable */ -+ DBC_Assert(pDSPResTable->uChipType < 256); -+ DBC_Assert(pDSPResTable->uWordSize > 0); -+ DBC_Assert(pDSPResTable->uWordSize < 32); -+ DBC_Assert(pDSPResTable->cChips > 0); -+ DBC_Assert(pDSPResTable->cChips < 256); -+#endif -+ return status; -+} -+ -+/* -+ * ======== CFG_GetExecFile ======== -+ * Purpose: -+ * Retreive the default executable, if any, for this board. -+ */ -+DSP_STATUS CFG_GetExecFile(struct CFG_DEVNODE *hDevNode, u32 ulBufSize, -+ OUT char *pstrExecFile) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 cExecSize = ulBufSize; -+ GT_3trace(CFG_debugMask, GT_ENTER, -+ "Entered CFG_GetExecFile:\n\tthDevNode: " -+ "0x%x\n\tulBufSize: 0x%x\n\tpstrExecFile: 0x%x\n", hDevNode, -+ ulBufSize, pstrExecFile); -+ if (!hDevNode) -+ status = CFG_E_INVALIDHDEVNODE; -+ -+ if (!pstrExecFile) -+ status = CFG_E_INVALIDPOINTER; -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = REG_GetValue(NULL, (char *)hDevNode, DEFEXEC, -+ (u8 *)pstrExecFile, &cExecSize); -+ if (DSP_FAILED(status)) -+ status = CFG_E_RESOURCENOTAVAIL; -+ else if (cExecSize > ulBufSize) -+ status = DSP_ESIZE; -+ -+ } -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) { -+ GT_1trace(CFG_debugMask, GT_1CLASS, -+ "CFG_GetExecFile SUCCESS Exec File" -+ "name : %s\n ", pstrExecFile); -+ } else { -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "CFG_GetExecFile Failed \n"); -+ } -+#endif -+ DBC_Ensure(((status == DSP_SOK) && -+ (strlen(pstrExecFile) <= ulBufSize)) || (status != DSP_SOK)); -+ return status; -+} -+ -+/* -+ * ======== CFG_GetHostResources ======== -+ * Purpose: -+ * Get the Host allocated resources assigned to a given device. -+ */ -+DSP_STATUS CFG_GetHostResources(struct CFG_DEVNODE *hDevNode, -+ OUT struct CFG_HOSTRES *pHostResTable) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 dwBufSize; -+ GT_2trace(CFG_debugMask, GT_ENTER, -+ "Entered CFG_GetHostResources, args:\n\t" -+ "pHostResTable: 0x%x\n\thDevNode: 0x%x\n", -+ pHostResTable, hDevNode); -+ if (!hDevNode) -+ status = CFG_E_INVALIDHDEVNODE; -+ -+ if (!pHostResTable) -+ status = CFG_E_INVALIDPOINTER; -+ -+ if (DSP_SUCCEEDED(status)) { -+ dwBufSize = sizeof(struct CFG_HOSTRES); -+ if (DSP_FAILED(REG_GetValue(NULL, (char *)hDevNode, -+ CURRENTCONFIG, -+ (u8 *)pHostResTable, &dwBufSize))) { -+ status = CFG_E_RESOURCENOTAVAIL; -+ } -+ } -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(CFG_debugMask, GT_1CLASS, -+ "CFG_GetHostResources SUCCESS \n"); -+ } else { -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "CFG_GetHostResources Failed \n"); -+ } -+#endif -+ return status; -+} -+ -+/* -+ * ======== CFG_GetObject ======== -+ * Purpose: -+ * Retrieve the Object handle from the Registry -+ */ -+DSP_STATUS CFG_GetObject(OUT u32 *pdwValue, u32 dwType) -+{ -+ DSP_STATUS status = DSP_EINVALIDARG; -+ u32 dwBufSize; -+ DBC_Require(pdwValue != NULL); -+ GT_1trace(CFG_debugMask, GT_ENTER, -+ "Entered CFG_GetObject, args:pdwValue: " -+ "0x%x\n", *pdwValue); -+ dwBufSize = sizeof(pdwValue); -+ switch (dwType) { -+ case (REG_DRV_OBJECT): -+ status = REG_GetValue(NULL, CONFIG, DRVOBJECT, -+ (u8 *)pdwValue, -+ &dwBufSize); -+ break; -+ case (REG_MGR_OBJECT): -+ status = REG_GetValue(NULL, CONFIG, MGROBJECT, -+ (u8 *)pdwValue, -+ &dwBufSize); -+ break; -+ default: -+ break; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ GT_1trace(CFG_debugMask, GT_1CLASS, -+ "CFG_GetObject SUCCESS DrvObject: " -+ "0x%x\n ", *pdwValue); -+ } else { -+ status = CFG_E_RESOURCENOTAVAIL; -+ *pdwValue = 0; -+ GT_0trace(CFG_debugMask, GT_6CLASS, "CFG_GetObject Failed \n"); -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && *pdwValue != 0) || -+ (DSP_FAILED(status) && *pdwValue == 0)); -+ return status; -+} -+ -+/* -+ * ======== CFG_Init ======== -+ * Purpose: -+ * Initialize the CFG module's private state. -+ */ -+bool CFG_Init(void) -+{ -+ struct CFG_DSPRES dspResources; -+ GT_create(&CFG_debugMask, "CF"); /* CF for ConFig */ -+ GT_0trace(CFG_debugMask, GT_5CLASS, "Entered CFG_Init\n"); -+ GT_0trace(CFG_debugMask, GT_5CLASS, "Intializing DSP Registry Info \n"); -+ -+ dspResources.uChipType = DSPTYPE_64; -+ dspResources.cChips = 1; -+ dspResources.uWordSize = DSPWORDSIZE; -+ dspResources.cMemTypes = 0; -+ dspResources.aMemDesc[0].uMemType = 0; -+ dspResources.aMemDesc[0].ulMin = 0; -+ dspResources.aMemDesc[0].ulMax = 0; -+ if (DSP_SUCCEEDED(REG_SetValue(NULL, CONFIG, DSPRESOURCES, REG_BINARY, -+ (u8 *)&dspResources, sizeof(struct CFG_DSPRES)))) { -+ GT_0trace(CFG_debugMask, GT_5CLASS, -+ "Initialized DSP resources in " -+ "Registry \n"); -+ } else -+ GT_0trace(CFG_debugMask, GT_5CLASS, -+ "Failed to Initialize DSP resources" -+ " in Registry \n"); -+ return true; -+} -+ -+/* -+ * ======== CFG_SetDevObject ======== -+ * Purpose: -+ * Store the Device Object handle and devNode pointer for a given devnode. -+ */ -+DSP_STATUS CFG_SetDevObject(struct CFG_DEVNODE *hDevNode, u32 dwValue) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 dwBuffSize; -+ GT_2trace(CFG_debugMask, GT_ENTER, -+ "Entered CFG_SetDevObject, args: \n\t" -+ "hDevNode: 0x%x\n\tdwValue: 0x%x\n", hDevNode, dwValue); -+ if (!hDevNode) -+ status = CFG_E_INVALIDHDEVNODE; -+ -+ dwBuffSize = sizeof(dwValue); -+ if (DSP_SUCCEEDED(status)) { -+ /* Store the WCD device object in the Registry */ -+ -+ if (!(strcmp((char *)hDevNode, "TIOMAP1510"))) { -+ GT_0trace(CFG_debugMask, GT_1CLASS, -+ "Registering the DSP Device \n"); -+ status = REG_SetValue(NULL, (char *)hDevNode, -+ "DEVICE_DSP", REG_DWORD,\ -+ (u8 *)&dwValue, dwBuffSize); -+ if (DSP_SUCCEEDED(status)) { -+ dwBuffSize = sizeof(hDevNode); -+ status = REG_SetValue(NULL, -+ (char *)hDevNode, "DEVNODESTRING_DSP", -+ REG_DWORD, (u8 *)&hDevNode, -+ dwBuffSize); -+ } -+ } else { -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "Failed to Register Device \n"); -+ } -+ } -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) { -+ GT_0trace(CFG_debugMask, GT_1CLASS, -+ "CFG_SetDevObject SUCCESS \n"); -+ } else { -+ GT_0trace(CFG_debugMask, GT_6CLASS, -+ "CFG_SetDevObject Failed \n"); -+ } -+#endif -+ return status; -+} -+ -+/* -+ * ======== CFG_SetObject ======== -+ * Purpose: -+ * Store the Driver Object handle -+ */ -+DSP_STATUS CFG_SetObject(u32 dwValue, u32 dwType) -+{ -+ DSP_STATUS status = DSP_EINVALIDARG; -+ u32 dwBuffSize; -+ GT_1trace(CFG_debugMask, GT_ENTER, -+ "Entered CFG_SetObject, args: dwValue: " -+ "0x%x\n", dwValue); -+ dwBuffSize = sizeof(dwValue); -+ switch (dwType) { -+ case (REG_DRV_OBJECT): -+ status = REG_SetValue(NULL, CONFIG, DRVOBJECT, REG_DWORD, -+ (u8 *)&dwValue, dwBuffSize); -+ break; -+ case (REG_MGR_OBJECT): -+ status = REG_SetValue(NULL, CONFIG, MGROBJECT, REG_DWORD, -+ (u8 *) &dwValue, dwBuffSize); -+ break; -+ default: -+ break; -+ } -+#ifdef DEBUG -+ if (DSP_SUCCEEDED(status)) -+ GT_0trace(CFG_debugMask, GT_1CLASS, "CFG_SetObject SUCCESS \n"); -+ else -+ GT_0trace(CFG_debugMask, GT_6CLASS, "CFG_SetObject Failed \n"); -+ -+#endif -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/clk.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/clk.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/clk.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/clk.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,375 @@ -+/* -+ * clk.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== clk.c ======== -+ * Purpose: -+ * Clock and Timer services. -+ * -+ * Public Functions: -+ * CLK_Exit -+ * CLK_Init -+ * CLK_Enable -+ * CLK_Disable -+ * CLK_GetRate -+ * CLK_Set_32KHz -+ *! Revision History: -+ *! ================ -+ *! 08-May-2007 rg: moved all clock functions from sync module. -+ * And added CLK_Set_32KHz, CLK_Set_SysClk. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+ -+typedef volatile unsigned long REG_UWORD32; -+ -+#define SSI_Base 0x48058000 -+ -+#define SSI_BASE IO_ADDRESS(SSI_Base) -+ -+ -+struct SERVICES_Clk_t { -+ struct clk *clk_handle; -+ const char *clk_name; -+ int id; -+}; -+ -+/* The row order of the below array needs to match with the clock enumerations -+ * 'SERVICES_ClkId' provided in the header file.. any changes in the -+ * enumerations needs to be fixed in the array as well */ -+static struct SERVICES_Clk_t SERVICES_Clks[] = { -+ {NULL, "iva2_ck", -1}, -+ {NULL, "mailboxes_ick", -1}, -+ {NULL, "gpt5_fck", -1}, -+ {NULL, "gpt5_ick", -1}, -+ {NULL, "gpt6_fck", -1}, -+ {NULL, "gpt6_ick", -1}, -+ {NULL, "gpt7_fck", -1}, -+ {NULL, "gpt7_ick", -1}, -+ {NULL, "gpt8_fck", -1}, -+ {NULL, "gpt8_ick", -1}, -+ {NULL, "wdt_fck", 3}, -+ {NULL, "wdt_ick", 3}, -+ {NULL, "mcbsp_fck", 1}, -+ {NULL, "mcbsp_ick", 1}, -+ {NULL, "mcbsp_fck", 2}, -+ {NULL, "mcbsp_ick", 2}, -+ {NULL, "mcbsp_fck", 3}, -+ {NULL, "mcbsp_ick", 3}, -+ {NULL, "mcbsp_fck", 4}, -+ {NULL, "mcbsp_ick", 4}, -+ {NULL, "mcbsp_fck", 5}, -+ {NULL, "mcbsp_ick", 5}, -+ {NULL, "ssi_ssr_sst_fck", -1}, -+ {NULL, "ssi_ick", -1}, -+ {NULL, "omap_32k_fck", -1}, -+ {NULL, "sys_ck", -1}, -+ {NULL, ""} -+}; -+ -+/* Generic TIMER object: */ -+struct TIMER_OBJECT { -+ struct timer_list timer; -+}; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask CLK_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+/* -+ * ======== CLK_Exit ======== -+ * Purpose: -+ * Cleanup CLK module. -+ */ -+void CLK_Exit(void) -+{ -+ int i = 0; -+ -+ GT_0trace(CLK_debugMask, GT_5CLASS, "CLK_Exit\n"); -+ /* Relinquish the clock handles */ -+ while (i < SERVICESCLK_NOT_DEFINED) { -+ if (SERVICES_Clks[i].clk_handle) -+ clk_put(SERVICES_Clks[i].clk_handle); -+ -+ SERVICES_Clks[i].clk_handle = NULL; -+ i++; -+ } -+ -+} -+ -+/* -+ * ======== CLK_Init ======== -+ * Purpose: -+ * Initialize CLK module. -+ */ -+bool CLK_Init(void) -+{ -+ static struct platform_device dspbridge_device; -+ struct clk *clk_handle; -+ int i = 0; -+ GT_create(&CLK_debugMask, "CK"); /* CK for CLK */ -+ GT_0trace(CLK_debugMask, GT_5CLASS, "CLK_Init\n"); -+ -+ dspbridge_device.dev.bus = &platform_bus_type; -+ -+ /* Get the clock handles from base port and store locally */ -+ while (i < SERVICESCLK_NOT_DEFINED) { -+ /* get the handle from BP */ -+ dspbridge_device.id = SERVICES_Clks[i].id; -+ -+ clk_handle = clk_get(&dspbridge_device.dev, -+ SERVICES_Clks[i].clk_name); -+ -+ if (!clk_handle) { -+ GT_2trace(CLK_debugMask, GT_7CLASS, -+ "CLK_Init: failed to get Clk handle %s, " -+ "CLK dev id = %d\n", -+ SERVICES_Clks[i].clk_name, -+ SERVICES_Clks[i].id); -+ /* should we fail here?? */ -+ } else { -+ GT_2trace(CLK_debugMask, GT_7CLASS, -+ "CLK_Init: PASS and Clk handle %s, " -+ "CLK dev id = %d\n", -+ SERVICES_Clks[i].clk_name, -+ SERVICES_Clks[i].id); -+ } -+ SERVICES_Clks[i].clk_handle = clk_handle; -+ i++; -+ } -+ -+ return true; -+} -+ -+/* -+ * ======== CLK_Enable ======== -+ * Purpose: -+ * Enable Clock . -+ * -+*/ -+DSP_STATUS CLK_Enable(IN enum SERVICES_ClkId clk_id) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct clk *pClk; -+ -+ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); -+ GT_2trace(CLK_debugMask, GT_6CLASS, "CLK_Enable: CLK %s, " -+ "CLK dev id = %d\n", SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ -+ pClk = SERVICES_Clks[clk_id].clk_handle; -+ if (pClk) { -+ if (clk_enable(pClk) == 0x0) { -+ /* Success ? */ -+ } else { -+ pr_err("CLK_Enable: failed to Enable CLK %s, " -+ "CLK dev id = %d\n", -+ SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ status = DSP_EFAIL; -+ } -+ } else { -+ pr_err("CLK_Enable: failed to get CLK %s, CLK dev id = %d\n", -+ SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ status = DSP_EFAIL; -+ } -+ /* The SSI module need to configured not to have the Forced idle for -+ * master interface. If it is set to forced idle, the SSI module is -+ * transitioning to standby thereby causing the client in the DSP hang -+ * waiting for the SSI module to be active after enabling the clocks -+ */ -+ if (clk_id == SERVICESCLK_ssi_fck) -+ SSI_Clk_Prepare(true); -+ -+ return status; -+} -+/* -+ * ======== CLK_Set_32KHz ======== -+ * Purpose: -+ * To Set parent of a clock to 32KHz. -+ */ -+ -+DSP_STATUS CLK_Set_32KHz(IN enum SERVICES_ClkId clk_id) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct clk *pClk; -+ struct clk *pClkParent; -+ enum SERVICES_ClkId sys_32k_id = SERVICESCLK_sys_32k_ck; -+ pClkParent = SERVICES_Clks[sys_32k_id].clk_handle; -+ -+ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); -+ GT_2trace(CLK_debugMask, GT_6CLASS, "CLK_Set_32KHz: CLK %s, " -+ "CLK dev id = %d is setting to 32KHz \n", -+ SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ pClk = SERVICES_Clks[clk_id].clk_handle; -+ if (pClk) { -+ if (!(clk_set_parent(pClk, pClkParent) == 0x0)) { -+ GT_2trace(CLK_debugMask, GT_7CLASS, "CLK_Set_32KHz: " -+ "Failed to set to 32KHz %s, CLK dev id = %d\n", -+ SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ status = DSP_EFAIL; -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== CLK_Disable ======== -+ * Purpose: -+ * Disable the clock. -+ * -+*/ -+DSP_STATUS CLK_Disable(IN enum SERVICES_ClkId clk_id) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct clk *pClk; -+ s32 clkUseCnt; -+ -+ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); -+ GT_2trace(CLK_debugMask, GT_6CLASS, "CLK_Disable: CLK %s, " -+ "CLK dev id = %d\n", SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ -+ pClk = SERVICES_Clks[clk_id].clk_handle; -+ -+ clkUseCnt = CLK_Get_UseCnt(clk_id); -+ if (clkUseCnt == -1) { -+ pr_err("CLK_Disable: failed to get CLK Use count for CLK %s," -+ "CLK dev id = %d\n", -+ SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ } else if (clkUseCnt == 0) { -+ GT_2trace(CLK_debugMask, GT_4CLASS, "CLK_Disable: CLK %s," -+ "CLK dev id= %d is already disabled\n", -+ SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ return status; -+ } -+ if (clk_id == SERVICESCLK_ssi_ick) -+ SSI_Clk_Prepare(false); -+ -+ if (pClk) { -+ clk_disable(pClk); -+ } else { -+ pr_err("CLK_Disable: failed to get CLK %s," -+ "CLK dev id = %d\n", -+ SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ status = DSP_EFAIL; -+ } -+ return status; -+} -+ -+/* -+ * ======== CLK_GetRate ======== -+ * Purpose: -+ * GetClock Speed. -+ * -+ */ -+ -+DSP_STATUS CLK_GetRate(IN enum SERVICES_ClkId clk_id, u32 *speedKhz) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct clk *pClk; -+ u32 clkSpeedHz; -+ -+ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); -+ *speedKhz = 0x0; -+ -+ GT_2trace(CLK_debugMask, GT_7CLASS, "CLK_GetRate: CLK %s, " -+ "CLK dev Id = %d \n", SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ pClk = SERVICES_Clks[clk_id].clk_handle; -+ if (pClk) { -+ clkSpeedHz = clk_get_rate(pClk); -+ *speedKhz = clkSpeedHz / 1000; -+ GT_2trace(CLK_debugMask, GT_6CLASS, -+ "CLK_GetRate: clkSpeedHz = %d , " -+ "speedinKhz=%d\n", clkSpeedHz, *speedKhz); -+ } else { -+ GT_2trace(CLK_debugMask, GT_7CLASS, -+ "CLK_GetRate: failed to get CLK %s, " -+ "CLK dev Id = %d\n", SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ status = DSP_EFAIL; -+ } -+ return status; -+} -+ -+s32 CLK_Get_UseCnt(IN enum SERVICES_ClkId clk_id) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct clk *pClk; -+ s32 useCount = -1; -+ DBC_Require(clk_id < SERVICESCLK_NOT_DEFINED); -+ -+ pClk = SERVICES_Clks[clk_id].clk_handle; -+ -+ if (pClk) { -+ useCount = pClk->usecount; /* FIXME: usecount shouldn't be used */ -+ } else { -+ GT_2trace(CLK_debugMask, GT_7CLASS, -+ "CLK_GetRate: failed to get CLK %s, " -+ "CLK dev Id = %d\n", SERVICES_Clks[clk_id].clk_name, -+ SERVICES_Clks[clk_id].id); -+ status = DSP_EFAIL; -+ } -+ return useCount; -+} -+ -+void SSI_Clk_Prepare(bool FLAG) -+{ -+ u32 ssi_sysconfig; -+ ssi_sysconfig = __raw_readl((SSI_BASE) + 0x10); -+ -+ if (FLAG) { -+ /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to -+ * no idle -+ */ -+ ssi_sysconfig = 0x1011; -+ } else { -+ /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to -+ * forced idle -+ */ -+ ssi_sysconfig = 0x1; -+ } -+ __raw_writel((u32)ssi_sysconfig, SSI_BASE + 0x10); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/csl.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/csl.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/csl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/csl.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,173 @@ -+/* -+ * csl.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== cslce.c ======== -+ * Purpose: -+ * Provides platform independent C Standard library functions. -+ * -+ * Public Functions: -+ * CSL_Atoi -+ * CSL_Exit -+ * CSL_Init -+ * CSL_NumToAscii -+ * CSL_Strtokr -+ * -+ *! Revision History: -+ *! ================ -+ *! 07-Aug-2002 jeh: Added CSL_Strtokr(). -+ *! 21-Sep-2001 jeh: Added CSL_Strncmp(). Alphabetized functions. -+ *! 22-Nov-2000 map: Added CSL_Atoi and CSL_Strtok -+ *! 19-Nov-2000 kc: Added CSL_ByteSwap. -+ *! 09-Nov-2000 kc: Added CSL_Strncat. -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 15-Dec-1999 ag: Removed incorrect assertion CSL_NumToAscii() -+ *! 29-Oct-1999 kc: Added CSL_Wstrlen for UNICODE strings. -+ *! 30-Sep-1999 ag: Removed DBC assertion (!CSL_DebugMask.flags) in -+ * CSP_Init(). -+ *! 20-Sep-1999 ag: Added CSL_WcharToAnsi(). -+ *! Removed call to GT_set(). -+ *! 19-Jan-1998 cr: Code review cleanup. -+ *! 29-Dec-1997 cr: Made platform independant, using MS CRT code, and -+ *! combined csl32.c csl95.c and cslnt.c into csl.c. Also -+ *! changed CSL_lowercase to CSL_Uppercase. -+ *! 21-Aug-1997 gp: Fix to CSL_strcpyn to initialize Source string, the NT way. -+ *! 25-Jun-1997 cr: Created from csl95, added CSL_strcmp. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* Is character c in the string pstrDelim? */ -+#define IsDelimiter(c, pstrDelim) ((c != '\0') && \ -+ (strchr(pstrDelim, c) != NULL)) -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask CSL_DebugMask = { NULL, NULL }; /* GT trace var. */ -+#endif -+ -+/* -+ * ======== CSL_Exit ======== -+ * Purpose: -+ * Discontinue usage of the CSL module. -+ */ -+void CSL_Exit(void) -+{ -+ GT_0trace(CSL_DebugMask, GT_5CLASS, "CSL_Exit\n"); -+} -+ -+/* -+ * ======== CSL_Init ======== -+ * Purpose: -+ * Initialize the CSL module's private state. -+ */ -+bool CSL_Init(void) -+{ -+ GT_create(&CSL_DebugMask, "CS"); -+ -+ GT_0trace(CSL_DebugMask, GT_5CLASS, "CSL_Init\n"); -+ -+ return true; -+} -+ -+/* -+ * ======== CSL_NumToAscii ======== -+ * Purpose: -+ * Convert a 1 or 2 digit number to a 2 digit string. -+ */ -+void CSL_NumToAscii(OUT char *pstrNumber, u32 dwNum) -+{ -+ char tens; -+ -+ DBC_Require(dwNum < 100); -+ -+ if (dwNum < 100) { -+ tens = (char) dwNum / 10; -+ dwNum = dwNum % 10; -+ -+ if (tens) { -+ pstrNumber[0] = tens + '0'; -+ pstrNumber[1] = (char) dwNum + '0'; -+ pstrNumber[2] = '\0'; -+ } else { -+ pstrNumber[0] = (char) dwNum + '0'; -+ pstrNumber[1] = '\0'; -+ } -+ } else { -+ pstrNumber[0] = '\0'; -+ } -+} -+ -+ -+ -+ -+/* -+ * ======= CSL_Strtokr ======= -+ * Purpose: -+ * Re-entrant version of strtok. -+ */ -+char *CSL_Strtokr(IN char *pstrSrc, IN CONST char *szSeparators, -+ OUT char **ppstrLast) -+{ -+ char *pstrTemp; -+ char *pstrToken; -+ -+ DBC_Require(szSeparators != NULL); -+ DBC_Require(ppstrLast != NULL); -+ DBC_Require(pstrSrc != NULL || *ppstrLast != NULL); -+ -+ /* -+ * Set string location to beginning (pstrSrc != NULL) or to the -+ * beginning of the next token. -+ */ -+ pstrTemp = (pstrSrc != NULL) ? pstrSrc : *ppstrLast; -+ if (*pstrTemp == '\0') { -+ pstrToken = NULL; -+ } else { -+ pstrToken = pstrTemp; -+ while (*pstrTemp != '\0' && !IsDelimiter(*pstrTemp, -+ szSeparators)) { -+ pstrTemp++; -+ } -+ if (*pstrTemp != '\0') { -+ while (IsDelimiter(*pstrTemp, szSeparators)) { -+ /* TODO: Shouldn't we do this for -+ * only 1 char?? */ -+ *pstrTemp = '\0'; -+ pstrTemp++; -+ } -+ } -+ -+ /* Location in string for next call */ -+ *ppstrLast = pstrTemp; -+ } -+ -+ return pstrToken; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dbg.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/dbg.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dbg.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/dbg.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,119 @@ -+/* -+ * dbg.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== dbgce.c ======== -+ * Purpose: -+ * Provide debugging services for DSP/BIOS Bridge Mini Drivers. -+ * -+ * Public Functions: -+ * DBG_Exit -+ * DBG_Init -+ * DBG_Trace -+ * -+ * Notes: -+ * Requires gt.h. -+ * -+ * This implementation does not create GT masks on a per WMD basis. -+ * There is currently no facility for a WMD to alter the GT mask. -+ * -+ *! Revision History: -+ *! ================ -+ *! 15-Feb-2000 rr: DBG_Trace prints based on the DebugZones. -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 29-Oct-1999 kc: Cleaned up for code review. -+ *! 10-Oct-1997 cr: Added DBG_Printf service. -+ *! 28-May-1997 cr: Added reference counting. -+ *! 23-May-1997 cr: Updated DBG_Trace to new gt interface. -+ *! 29-May-1996 gp: Removed WCD_ prefix. -+ *! 20-May-1996 gp: Remove DEBUG conditional compilation. -+ *! 15-May-1996 gp: Created. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask DBG_debugMask = { NULL, NULL }; /* GT trace var. */ -+#endif -+ -+#if (defined(DEBUG) || defined (DDSP_DEBUG_PRODUCT)) && GT_TRACE -+ -+/* -+ * ======== DBG_Init ======== -+ * Purpose: -+ * Ensures trace capability is set up for link drivers. -+ */ -+bool DBG_Init(void) -+{ -+ GT_create(&DBG_debugMask, "WD"); /* for WmD (link driver) debug */ -+ -+ GT_0trace(DBG_debugMask, GT_5CLASS, "DBG_Init\n"); -+ -+ return true; -+} -+ -+/* -+ * ======== DBG_Trace ======== -+ * Purpose: -+ * Output a trace message to the debugger, if the given trace level -+ * is unmasked. -+ */ -+DSP_STATUS DBG_Trace(u8 bLevel, char *pstrFormat, ...) -+{ -+ s32 arg1, arg2, arg3, arg4, arg5, arg6; -+ va_list va; -+ -+ va_start(va, pstrFormat); -+ -+ arg1 = va_arg(va, s32); -+ arg2 = va_arg(va, s32); -+ arg3 = va_arg(va, s32); -+ arg4 = va_arg(va, s32); -+ arg5 = va_arg(va, s32); -+ arg6 = va_arg(va, s32); -+ -+ va_end(va); -+ -+ if (bLevel & *(DBG_debugMask).flags) -+ printk(pstrFormat, arg1, arg2, arg3, arg4, arg5, arg6); -+ -+ return DSP_SOK; -+} -+ -+/* -+ * ======== DBG_Exit ======== -+ * Purpose: -+ * Discontinue usage of the DBG module. -+ */ -+void DBG_Exit(void) -+{ -+ GT_0trace(DBG_debugMask, GT_5CLASS, "DBG_Exit\n"); -+} -+ -+#endif /* (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dpc.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/dpc.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/dpc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/dpc.c 2011-06-22 13:19:32.543063279 +0200 -@@ -0,0 +1,274 @@ -+/* -+ * dpc.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== dpcce.c ======== -+ * Purpose: -+ * Deferred Procedure Call(DPC) Services. -+ * -+ * -+ * Public Functions: -+ * DPC_Create -+ * DPC_Destroy -+ * DPC_Exit -+ * DPC_Init -+ * DPC_Schedule -+ * -+ *! Revision History: -+ *! ================ -+ *! 28-Mar-2001 ag: Added #ifdef CHNL_NOIPCINTR to set DPC thread priority -+ *! to THREAD_PRIORITY_IDLE for polling IPC. -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 31-Jan-2000 rr: Changes after code review.Terminate thread,handle -+ *! modified.DPC_Destroy frees the DPC_Object only on -+ *! Successful termination of the thread and the handle. -+ *! 06-Jan-1999 ag: Format cleanup for code review. -+ *! Removed DPC_[Lower|Raise]IRQL[From|To]DispatchLevel. -+ *! 10-Dec-1999 ag: Added SetProcPermissions in DPC_DeferredProcedure(). -+ *! (Needed to access client(s) CHNL buffers). -+ *! 19-Sep-1999 a0216266: Stubbed from dpcnt.c. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define SIGNATURE 0x5f435044 /* "DPC_" (in reverse). */ -+ -+/* The DPC object, passed to our priority event callback routine: */ -+struct DPC_OBJECT { -+ u32 dwSignature; /* Used for object validation. */ -+ void *pRefData; /* Argument for client's DPC. */ -+ DPC_PROC pfnDPC; /* Client's DPC. */ -+ u32 numRequested; /* Number of requested DPC's. */ -+ u32 numScheduled; /* Number of executed DPC's. */ -+ struct tasklet_struct dpc_tasklet; -+ -+#ifdef DEBUG -+ u32 cEntryCount; /* Number of times DPC reentered. */ -+ u32 numRequestedMax; /* Keep track of max pending DPC's. */ -+#endif -+ -+ spinlock_t dpc_lock; -+}; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask DPC_DebugMask = { NULL, NULL }; /* DPC Debug Mask */ -+#endif -+ -+/* ----------------------------------- Function Prototypes */ -+static void DPC_DeferredProcedure(IN unsigned long pDeferredContext); -+ -+/* -+ * ======== DPC_Create ======== -+ * Purpose: -+ * Create a DPC object, allowing a client's own DPC procedure to be -+ * scheduled for a call with client reference data. -+ */ -+DSP_STATUS DPC_Create(OUT struct DPC_OBJECT **phDPC, DPC_PROC pfnDPC, -+ void *pRefData) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DPC_OBJECT *pDPCObject = NULL; -+ -+ if ((phDPC != NULL) && (pfnDPC != NULL)) { -+ /* -+ * Allocate a DPC object to store information allowing our DPC -+ * callback to dispatch to the client's DPC. -+ */ -+ MEM_AllocObject(pDPCObject, struct DPC_OBJECT, SIGNATURE); -+ if (pDPCObject != NULL) { -+ tasklet_init(&pDPCObject->dpc_tasklet, -+ DPC_DeferredProcedure, -+ (u32) pDPCObject); -+ /* Fill out our DPC Object: */ -+ pDPCObject->pRefData = pRefData; -+ pDPCObject->pfnDPC = pfnDPC; -+ pDPCObject->numRequested = 0; -+ pDPCObject->numScheduled = 0; -+#ifdef DEBUG -+ pDPCObject->numRequestedMax = 0; -+ pDPCObject->cEntryCount = 0; -+#endif -+ spin_lock_init(&pDPCObject->dpc_lock); -+ *phDPC = pDPCObject; -+ } else { -+ GT_0trace(DPC_DebugMask, GT_6CLASS, -+ "DPC_Create: DSP_EMEMORY\n"); -+ status = DSP_EMEMORY; -+ } -+ } else { -+ GT_0trace(DPC_DebugMask, GT_6CLASS, -+ "DPC_Create: DSP_EPOINTER\n"); -+ status = DSP_EPOINTER; -+ } -+ DBC_Ensure((DSP_FAILED(status) && (!phDPC || (phDPC && *phDPC == NULL))) -+ || DSP_SUCCEEDED(status)); -+ return status; -+} -+ -+/* -+ * ======== DPC_Destroy ======== -+ * Purpose: -+ * Cancel the last scheduled DPC, and deallocate a DPC object previously -+ * allocated with DPC_Create(). Frees the Object only if the thread -+ * and the event terminated successfuly. -+ */ -+DSP_STATUS DPC_Destroy(struct DPC_OBJECT *hDPC) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DPC_OBJECT *pDPCObject = (struct DPC_OBJECT *)hDPC; -+ -+ if (MEM_IsValidHandle(hDPC, SIGNATURE)) { -+ -+ /* Free our DPC object: */ -+ if (DSP_SUCCEEDED(status)) { -+ tasklet_kill(&pDPCObject->dpc_tasklet); -+ MEM_FreeObject(pDPCObject); -+ pDPCObject = NULL; -+ GT_0trace(DPC_DebugMask, GT_2CLASS, -+ "DPC_Destroy: SUCCESS\n"); -+ } -+ } else { -+ GT_0trace(DPC_DebugMask, GT_6CLASS, -+ "DPC_Destroy: DSP_EHANDLE\n"); -+ status = DSP_EHANDLE; -+ } -+ DBC_Ensure((DSP_SUCCEEDED(status) && pDPCObject == NULL) -+ || DSP_FAILED(status)); -+ return status; -+} -+ -+/* -+ * ======== DPC_Exit ======== -+ * Purpose: -+ * Discontinue usage of the DPC module. -+ */ -+void DPC_Exit(void) -+{ -+ GT_0trace(DPC_DebugMask, GT_5CLASS, "Entered DPC_Exit\n"); -+} -+ -+/* -+ * ======== DPC_Init ======== -+ * Purpose: -+ * Initialize the DPC module's private state. -+ */ -+bool DPC_Init(void) -+{ -+ GT_create(&DPC_DebugMask, "DP"); -+ -+ GT_0trace(DPC_DebugMask, GT_5CLASS, "Entered DPC_Init\n"); -+ -+ return true; -+} -+ -+/* -+ * ======== DPC_Schedule ======== -+ * Purpose: -+ * Schedule a deferred procedure call to be executed at a later time. -+ * Latency and order of DPC execution is platform specific. -+ */ -+DSP_STATUS DPC_Schedule(struct DPC_OBJECT *hDPC) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DPC_OBJECT *pDPCObject = (struct DPC_OBJECT *)hDPC; -+ unsigned long flags; -+ -+ GT_1trace(DPC_DebugMask, GT_ENTER, "DPC_Schedule hDPC %x\n", hDPC); -+ if (MEM_IsValidHandle(hDPC, SIGNATURE)) { -+ /* Increment count of DPC's pending. Needs to be protected -+ * from ISRs since this function is called from process -+ * context also. */ -+ spin_lock_irqsave(&hDPC->dpc_lock, flags); -+ pDPCObject->numRequested++; -+ spin_unlock_irqrestore(&hDPC->dpc_lock, flags); -+ tasklet_schedule(&(hDPC->dpc_tasklet)); -+#ifdef DEBUG -+ if (pDPCObject->numRequested > pDPCObject->numScheduled + -+ pDPCObject->numRequestedMax) { -+ pDPCObject->numRequestedMax = pDPCObject->numRequested - -+ pDPCObject->numScheduled; -+ } -+#endif -+ /* If an interrupt occurs between incrementing numRequested and the -+ * assertion below, then DPC will get executed while returning from -+ * ISR, which will complete all requests and make numRequested equal -+ * to numScheduled, firing this assertion. This happens only when -+ * DPC is being scheduled in process context */ -+ } else { -+ GT_0trace(DPC_DebugMask, GT_6CLASS, -+ "DPC_Schedule: DSP_EHANDLE\n"); -+ status = DSP_EHANDLE; -+ } -+ GT_1trace(DPC_DebugMask, GT_ENTER, "DPC_Schedule status %x\n", status); -+ return status; -+} -+ -+/* -+ * ======== DeferredProcedure ======== -+ * Purpose: -+ * Main DPC routine. This is called by host OS DPC callback -+ * mechanism with interrupts enabled. -+ */ -+static void DPC_DeferredProcedure(IN unsigned long pDeferredContext) -+{ -+ struct DPC_OBJECT *pDPCObject = (struct DPC_OBJECT *)pDeferredContext; -+ /* read numRequested in local variable */ -+ u32 requested; -+ u32 serviced; -+ -+ DBC_Require(pDPCObject != NULL); -+ requested = pDPCObject->numRequested; -+ serviced = pDPCObject->numScheduled; -+ -+ GT_1trace(DPC_DebugMask, GT_ENTER, "> DPC_DeferredProcedure " -+ "pDeferredContext=%x\n", pDeferredContext); -+ /* Rollover taken care of using != instead of < */ -+ if (serviced != requested) { -+ if (pDPCObject->pfnDPC != NULL) { -+ /* Process pending DPC's: */ -+ do { -+ /* Call client's DPC: */ -+ (*(pDPCObject->pfnDPC))(pDPCObject->pRefData); -+ serviced++; -+ } while (serviced != requested); -+ } -+ pDPCObject->numScheduled = requested; -+ } -+ GT_2trace(DPC_DebugMask, GT_ENTER, -+ "< DPC_DeferredProcedure requested %d" -+ " serviced %d\n", requested, serviced); -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/kfile.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/kfile.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/kfile.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/kfile.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,335 @@ -+/* -+ * kfile.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== kfilece.c ======== -+ * Purpose: -+ * This module provides file i/o services. -+ * -+ * Public Functions: -+ * KFILE_Close -+ * KFILE_Exit -+ * KFILE_Init -+ * KFILE_Open -+ * KFILE_Read -+ * KFILE_Seek -+ * KFILE_Tell -+ * -+ *! Revision History -+ *! ================ -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 22-Nov-1999 kc: Added changes from code review. -+ *! 12-Nov-1999 kc: Enabled CSL for UNICODE/ANSI string conversions. -+ *! 30-Sep-1999 ag: Changed KFILE_Read() GT level from _ENTER to _4CLASS. -+ *! Removed GT_set(). -+ *! 25-Aug-1999 ag: Changed MEM_Calloc allocation type to MEM_PAGED. -+ *! 13-Jul-1999 a0216266(ww - TID): Stubbed from kfilent.c. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define SIGNATURE 0x4c49464b /* hex code of KFIL (reversed) */ -+#define MAXFILENAMELENGTH 256 -+#define GENERAL_FAILURE 0xffffffff /* SetFilePointer error */ -+ -+/* The KFILE_FileObj abstracts the true file handle from a KFILE handle. */ -+struct KFILE_FileObj { -+ u32 dwSignature; -+ __kernel_pid_t owner_pid; /* PID of process that opened this file */ -+ char *fileName ; -+ bool isOpen ; -+ u32 size ; -+ u32 curPos ; -+ long hInternal; /* internal handle of file */ -+ struct file *fileDesc; -+ -+}; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask KFILE_debugMask = { NULL, NULL }; /* Debug mask */ -+#endif -+ -+/* -+ * ======== KFILE_Close ======== -+ * Purpose: -+ * This function closes a file's stream. -+ */ -+s32 KFILE_Close(struct KFILE_FileObj *hFile) -+{ -+ s32 cRetVal = 0; /* 0 indicates success */ -+ s32 fRetVal = 0; -+ -+ GT_1trace(KFILE_debugMask, GT_ENTER, "KFILE_Close: hFile 0x%x\n", -+ hFile); -+ -+ /* Check for valid handle */ -+ if (MEM_IsValidHandle(hFile, SIGNATURE)) { -+ /* Close file only if opened by the same process (id). Otherwise -+ * Linux closes all open file handles when process exits.*/ -+ fRetVal = filp_close(hFile->fileDesc, NULL) ; -+ if (fRetVal) { -+ cRetVal = E_KFILE_ERROR; -+ GT_1trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Close: sys_close " -+ "returned %d\n", fRetVal); -+ } -+ MEM_FreeObject(hFile); -+ } else { -+ cRetVal = E_KFILE_INVALIDHANDLE; -+ GT_0trace(KFILE_debugMask, GT_6CLASS, "KFILE_Close: " -+ "invalid file handle\n"); -+ } -+ return cRetVal; -+} -+ -+/* -+ * ======== KFILE_Exit ======== -+ * Purpose: -+ * Decrement reference count, and free resources when reference count -+ * is 0. -+ */ -+void KFILE_Exit(void) -+{ -+ GT_0trace(KFILE_debugMask, GT_5CLASS, "KFILE_Exit\n"); -+} -+ -+/* -+ * ======== KFILE_Init ======== -+ */ -+bool KFILE_Init(void) -+{ -+ GT_create(&KFILE_debugMask, "KF"); /* "KF" for KFile */ -+ -+ GT_0trace(KFILE_debugMask, GT_5CLASS, "KFILE_Init\n"); -+ -+ return true; -+} -+ -+/* -+ * ======== KFILE_Open ======== -+ * Purpose: -+ * Open a file for reading ONLY -+ */ -+struct KFILE_FileObj *KFILE_Open(CONST char *pszFileName, CONST char *pszMode) -+{ -+ struct KFILE_FileObj *hFile; /* file handle */ -+ DSP_STATUS status; -+ mm_segment_t fs; -+ -+ struct file*fileDesc = NULL; -+ DBC_Require(pszMode != NULL); -+ DBC_Require(pszFileName != NULL); -+ -+ GT_2trace(KFILE_debugMask, GT_ENTER, -+ "KFILE_Open: pszFileName %s, pszMode " -+ "%s\n", pszFileName, pszMode); -+ -+ /* create a KFILE object */ -+ MEM_AllocObject(hFile, struct KFILE_FileObj, SIGNATURE); -+ -+ if (hFile) { -+ fs = get_fs(); -+ set_fs(get_ds()); -+ /* Third argument is mode (permissions). Ignored unless creating file */ -+ fileDesc = filp_open(pszFileName, O_RDONLY, 0); -+ if ((IS_ERR(fileDesc)) || (fileDesc == NULL) || -+ (fileDesc->f_op == NULL) || (fileDesc->f_op->read == NULL) -+ || (fileDesc->f_op->llseek == NULL)) { -+ status = DSP_EFILE; -+ } else { -+ hFile->fileDesc = fileDesc; -+ hFile->fileName = (char *)pszFileName; -+ hFile->isOpen = true; -+ hFile->curPos = 0; -+ hFile->size = fileDesc->f_op->llseek(fileDesc, 0, -+ SEEK_END); -+ fileDesc->f_op->llseek(fileDesc, 0, SEEK_SET); -+ /* Return TGID instead of process handle */ -+ hFile->owner_pid = current->tgid; -+ -+ status = DSP_SOK; -+ } -+ set_fs(fs); -+ if (DSP_FAILED(status)) { -+ /* free memory, and clear handle */ -+ MEM_FreeObject(hFile); -+ hFile = NULL; -+ } -+ } else { -+ GT_0trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Open: MEM_AllocObject failed\n"); -+ status = DSP_EMEMORY; -+ } -+ return hFile; -+} -+ -+/* -+ * ======== KFILE_Read ======== -+ * Purpose: -+ * Reads a specified number of bytes into a buffer. -+ */ -+s32 -+KFILE_Read(void __user*pBuffer, s32 cSize, s32 cCount, -+ struct KFILE_FileObj *hFile) -+{ -+ u32 dwBytesRead = 0; -+ s32 cRetVal = 0; -+ mm_segment_t fs; -+ -+ DBC_Require(pBuffer != NULL); -+ -+ GT_4trace(KFILE_debugMask, GT_4CLASS, -+ "KFILE_Read: buffer 0x%x, cSize 0x%x," -+ "cCount 0x%x, hFile 0x%x\n", pBuffer, cSize, cCount, hFile); -+ -+ /* check for valid file handle */ -+ if (MEM_IsValidHandle(hFile, SIGNATURE)) { -+ if ((cSize > 0) && (cCount > 0) && pBuffer) { -+ /* read from file */ -+ fs = get_fs(); -+ set_fs(get_ds()); -+ dwBytesRead = hFile->fileDesc->f_op->read(hFile-> -+ fileDesc, pBuffer, cSize *cCount, -+ &(hFile->fileDesc->f_pos)); -+ set_fs(fs); -+ if (dwBytesRead) { -+ cRetVal = dwBytesRead / cSize; -+ hFile->curPos += dwBytesRead; -+ DBC_Assert((dwBytesRead / cSize) <= \ -+ (u32)cCount); -+ } else { -+ cRetVal = E_KFILE_ERROR; -+ GT_0trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Read: sys_read() failed\n"); -+ } -+ } else { -+ cRetVal = DSP_EINVALIDARG; -+ GT_0trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Read: Invalid argument(s)\n"); -+ } -+ } else { -+ cRetVal = E_KFILE_INVALIDHANDLE; -+ GT_0trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Read: invalid file handle\n"); -+ } -+ -+ return cRetVal; -+} -+ -+/* -+ * ======== KFILE_Seek ======== -+ * Purpose: -+ * Sets the file position indicator. NOTE: we don't support seeking -+ * beyond the boundaries of a file. -+ */ -+s32 KFILE_Seek(struct KFILE_FileObj *hFile, s32 lOffset, s32 cOrigin) -+{ -+ s32 cRetVal = 0; /* 0 for success */ -+ loff_t dwCurPos = 0; -+ -+ struct file *fileDesc = NULL; -+ -+ GT_3trace(KFILE_debugMask, GT_ENTER, "KFILE_Seek: hFile 0x%x, " -+ "lOffset 0x%x, cOrigin 0x%x\n", -+ hFile, lOffset, cOrigin); -+ -+ /* check for valid file handle */ -+ if (MEM_IsValidHandle(hFile, SIGNATURE)) { -+ /* based on the origin flag, move the internal pointer */ -+ -+ fileDesc = hFile->fileDesc; -+ switch (cOrigin) { -+ case KFILE_SEEK_SET: -+ dwCurPos = hFile->fileDesc->f_op->llseek(hFile-> -+ fileDesc, lOffset, SEEK_SET); -+ cRetVal = ((dwCurPos >= 0) ? 0 : E_KFILE_ERROR); -+ break; -+ -+ case KFILE_SEEK_CUR: -+ dwCurPos = hFile->fileDesc->f_op->llseek(hFile-> -+ fileDesc, lOffset, SEEK_CUR); -+ cRetVal = ((dwCurPos >= 0) ? 0 : E_KFILE_ERROR); -+ break; -+ case KFILE_SEEK_END: -+ dwCurPos = hFile->fileDesc->f_op->llseek(hFile-> -+ fileDesc, lOffset, SEEK_END); -+ cRetVal = ((dwCurPos >= 0) ? 0 : E_KFILE_ERROR); -+ break; -+ default: -+ cRetVal = E_KFILE_BADORIGINFLAG; -+ GT_0trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Seek:bad origin flag\n"); -+ break; -+ } -+ } else { -+ cRetVal = E_KFILE_INVALIDHANDLE; -+ GT_0trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Seek:invalid file handle\n"); -+ } -+ return cRetVal; -+} -+ -+/* -+ * ======== KFILE_Tell ======== -+ * Purpose: -+ * Reports the current value of the position indicator. We did not -+ * consider 64 bit long file size, which implies a 4GB file limit -+ * (2 to 32 power). -+ */ -+s32 KFILE_Tell(struct KFILE_FileObj *hFile) -+{ -+ loff_t dwCurPos = 0; -+ s32 lRetVal = E_KFILE_ERROR; -+ -+ GT_1trace(KFILE_debugMask, GT_ENTER, "KFILE_Tell: hFile 0x%x\n", hFile); -+ -+ if (MEM_IsValidHandle(hFile, SIGNATURE)) { -+ -+ /* Get current position. */ -+ dwCurPos = hFile->fileDesc->f_op->llseek(hFile->fileDesc, 0, -+ SEEK_CUR); -+ if (dwCurPos >= 0) -+ lRetVal = dwCurPos; -+ -+ } else { -+ lRetVal = E_KFILE_INVALIDHANDLE; -+ GT_0trace(KFILE_debugMask, GT_6CLASS, -+ "KFILE_Seek:invalid file handle\n"); -+ } -+ return lRetVal; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/list.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/list.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/list.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/list.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,285 @@ -+/* -+ * list.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== listce.c ======== -+ * Purpose -+ * Provides standard circular list handling functions. -+ * -+ * Public Functions: -+ * LST_Create -+ * LST_Delete -+ * LST_Exit -+ * LST_First -+ * LST_GetHead -+ * LST_Init -+ * LST_InitElem -+ * LST_InsertBefore -+ * LST_Next -+ * LST_PutTail -+ * LST_RemoveElem -+ * -+ *! Revision History -+ *! ================ -+ *! 06-Mar-2002 jeh Don't set element self to NULL in LST_RemoveElem(). -+ *! 10-Aug-2000 ag: Added LST_InsertBefore(). -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 22-Nov-1999 kc: Added changes from code review. -+ *! 10-Aug-1999 kc: Based on wsx-c18. -+ *! 16-Jun-1997 gp: Removed unnecessary enabling/disabling of interrupts around -+ *! list manipulation code. -+ *! 22-Oct-1996 gp: Added LST_RemoveElem, and LST_First/LST_Next iterators. -+ *! 10-Aug-1996 gp: Acquired from SMM for WinSPOX v. 1.1; renamed identifiers. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask LST_debugMask = { NULL, NULL }; /* GT trace var. */ -+#endif -+ -+/* -+ * ======== LST_Create ======== -+ * Purpose: -+ * Allocates and initializes a circular list. -+ */ -+struct LST_LIST *LST_Create(void) -+{ -+ struct LST_LIST *pList; -+ -+ GT_0trace(LST_debugMask, GT_ENTER, "LST_Create: entered\n"); -+ -+ pList = (struct LST_LIST *) MEM_Calloc(sizeof(struct LST_LIST), -+ MEM_NONPAGED); -+ if (pList != NULL) { -+ pList->head.next = &pList->head; -+ pList->head.prev = &pList->head; -+ pList->head.self = NULL; -+ } -+ -+ return pList; -+} -+ -+/* -+ * ======== LST_Delete ======== -+ * Purpose: -+ * Removes a list by freeing its control structure's memory space. -+ */ -+void LST_Delete(struct LST_LIST *pList) -+{ -+ DBC_Require(pList != NULL); -+ -+ GT_1trace(LST_debugMask, GT_ENTER, "LST_Delete: pList 0x%x\n", pList); -+ -+ MEM_Free(pList); -+} -+ -+/* -+ * ======== LST_Exit ======== -+ * Purpose: -+ * Discontinue usage of the LST module. -+ */ -+void LST_Exit(void) -+{ -+ GT_0trace(LST_debugMask, GT_5CLASS, "LST_Exit\n"); -+} -+ -+/* -+ * ======== LST_First ======== -+ * Purpose: -+ * Returns a pointer to the first element of the list, or NULL if the -+ * list is empty. -+ */ -+struct LST_ELEM *LST_First(struct LST_LIST *pList) -+{ -+ struct LST_ELEM *pElem = NULL; -+ -+ DBC_Require(pList != NULL); -+ -+ GT_1trace(LST_debugMask, GT_ENTER, "LST_First: pList 0x%x\n", pList); -+ -+ if (!LST_IsEmpty(pList)) -+ pElem = pList->head.next; -+ -+ return pElem; -+} -+ -+/* -+ * ======== LST_GetHead ======== -+ * Purpose: -+ * "Pops" the head off the list and returns a pointer to it. -+ */ -+struct LST_ELEM *LST_GetHead(struct LST_LIST *pList) -+{ -+ struct LST_ELEM *pElem; -+ -+ DBC_Require(pList != NULL); -+ -+ GT_1trace(LST_debugMask, GT_ENTER, "LST_GetHead: pList 0x%x\n", pList); -+ -+ if (LST_IsEmpty(pList)) -+ return NULL; -+ -+ /* pElem is always valid because the list cannot be empty -+ * at this point */ -+ pElem = pList->head.next; -+ pList->head.next = pElem->next; -+ pElem->next->prev = &pList->head; -+ -+ return pElem->self; -+} -+ -+/* -+ * ======== LST_Init ======== -+ * Purpose: -+ * Initialize LST module private state. -+ */ -+bool LST_Init(void) -+{ -+ GT_create(&LST_debugMask, "LS"); /* LS for LSt module */ -+ -+ GT_0trace(LST_debugMask, GT_5CLASS, "LST_Init\n"); -+ -+ return true; -+} -+ -+/* -+ * ======== LST_InitElem ======== -+ * Purpose: -+ * Initializes a list element to default (cleared) values -+ */ -+void LST_InitElem(struct LST_ELEM *pElem) -+{ -+ DBC_Require(pElem != NULL); -+ -+ GT_1trace(LST_debugMask, GT_ENTER, "LST_InitElem: pElem 0x%x\n", pElem); -+ -+ if (pElem) { -+ pElem->next = NULL; -+ pElem->prev = NULL; -+ pElem->self = pElem; -+ } -+} -+ -+/* -+ * ======== LST_InsertBefore ======== -+ * Purpose: -+ * Insert the element before the existing element. -+ */ -+void LST_InsertBefore(struct LST_LIST *pList, struct LST_ELEM *pElem, -+ struct LST_ELEM *pElemExisting) -+{ -+ DBC_Require(pList != NULL); -+ DBC_Require(pElem != NULL); -+ DBC_Require(pElemExisting != NULL); -+ -+ GT_3trace(LST_debugMask, GT_ENTER, "LST_InsertBefore: pList 0x%x, " -+ "pElem 0x%x pElemExisting 0x%x\n", pList, pElem, -+ pElemExisting); -+ -+ pElemExisting->prev->next = pElem; -+ pElem->prev = pElemExisting->prev; -+ pElem->next = pElemExisting; -+ pElemExisting->prev = pElem; -+} -+ -+/* -+ * ======== LST_Next ======== -+ * Purpose: -+ * Returns a pointer to the next element of the list, or NULL if the -+ * next element is the head of the list or the list is empty. -+ */ -+struct LST_ELEM *LST_Next(struct LST_LIST *pList, struct LST_ELEM *pCurElem) -+{ -+ struct LST_ELEM *pNextElem = NULL; -+ -+ DBC_Require(pList != NULL); -+ DBC_Require(pCurElem != NULL); -+ -+ GT_2trace(LST_debugMask, GT_ENTER, -+ "LST_Next: pList 0x%x, pCurElem 0x%x\n", -+ pList, pCurElem); -+ -+ if (!LST_IsEmpty(pList)) { -+ if (pCurElem->next != &pList->head) -+ pNextElem = pCurElem->next; -+ } -+ -+ return pNextElem; -+} -+ -+/* -+ * ======== LST_PutTail ======== -+ * Purpose: -+ * Adds the specified element to the tail of the list -+ */ -+void LST_PutTail(struct LST_LIST *pList, struct LST_ELEM *pElem) -+{ -+ DBC_Require(pList != NULL); -+ DBC_Require(pElem != NULL); -+ -+ GT_2trace(LST_debugMask, GT_ENTER, -+ "LST_PutTail: pList 0x%x, pElem 0x%x\n", -+ pList, pElem); -+ -+ pElem->prev = pList->head.prev; -+ pElem->next = &pList->head; -+ pList->head.prev = pElem; -+ pElem->prev->next = pElem; -+ -+ DBC_Ensure(!LST_IsEmpty(pList)); -+} -+ -+/* -+ * ======== LST_RemoveElem ======== -+ * Purpose: -+ * Removes (unlinks) the given element from the list, if the list is not -+ * empty. Does not free the list element. -+ */ -+void LST_RemoveElem(struct LST_LIST *pList, struct LST_ELEM *pCurElem) -+{ -+ DBC_Require(pList != NULL); -+ DBC_Require(pCurElem != NULL); -+ -+ GT_2trace(LST_debugMask, GT_ENTER, -+ "LST_RemoveElem: pList 0x%x, pCurElem " -+ "0x%x\n", pList, pCurElem); -+ -+ if (!LST_IsEmpty(pList)) { -+ pCurElem->prev->next = pCurElem->next; -+ pCurElem->next->prev = pCurElem->prev; -+ -+ /* set elem fields to NULL to prevent illegal references */ -+ pCurElem->next = NULL; -+ pCurElem->prev = NULL; -+ } -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/mem.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/mem.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/mem.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/mem.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,628 @@ -+/* -+ * mem.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== mem.c ======== -+ * Purpose: -+ * Implementation of platform specific memory services. -+ * -+ * Public Functions: -+ * MEM_Alloc -+ * MEM_AllocPhysMem -+ * MEM_Calloc -+ * MEM_Exit -+ * MEM_FlushCache -+ * MEM_Free -+ * MEM_FreePhysMem -+ * MEM_Init -+ * MEM_ExtPhysPoolInit -+ * -+ *! Revision History: -+ *! ================= -+ *! 18-Jan-2004 hp: Added support for External physical memory pool -+ *! 19-Apr-2004 sb: Added Alloc/Free PhysMem, FlushCache, VirtualToPhysical -+ *! 01-Sep-2001 ag: Code cleanup. -+ *! 02-May-2001 ag: MEM_[UnMap]LinearAddress revamped to align Phys to Virt. -+ *! Set PAGE_PHYSICAL if phy addr <= 512MB. Opposite uSoft doc! -+ *! 29-Aug-2000 rr: MEM_LinearAddress does not check for 512MB for non-x86. -+ *! 28-Mar-2000 rr: MEM_LinearAddress changed.Handles address larger than 512MB -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 22-Nov-1999 kc: Added changes from code review. -+ *! 16-Aug-1999 kc: modified for WinCE. -+ *! 20-Mar-1999 ag: SP 4 fix in MEM_UMBCalloc(). -+ *! Mdl offset now ORed not added to userBuf. -+ *! 23-Dec-1997 cr: Code review changes. -+ *! 08-Dec-1997 cr: Prepared for code review. -+ *! 24-Jun-1997 cr: Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+/* ----------------------------------- Defines */ -+#define MEM_512MB 0x1fffffff -+#define memInfoSign 0x464E494D /* "MINF" (in reverse). */ -+ -+#ifdef DEBUG -+#define MEM_CHECK /* Use to detect source of memory leaks */ -+#endif -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask MEM_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+static u32 cRefs; /* module reference count */ -+ -+static bool extPhysMemPoolEnabled; -+ -+struct extPhysMemPool { -+ u32 physMemBase; -+ u32 physMemSize; -+ u32 virtMemBase; -+ u32 nextPhysAllocPtr; -+}; -+ -+static struct extPhysMemPool extMemPool; -+ -+/* Information about each element allocated on heap */ -+struct memInfo { -+ struct LST_ELEM link; /* Must be first */ -+ size_t size; -+ void *caller; -+ u32 dwSignature; /* Should be last */ -+}; -+ -+#ifdef MEM_CHECK -+ -+/* -+ * This structure holds a linked list to all memory elements allocated on -+ * heap by DSP/BIOS Bridge. This is used to report memory leaks and free -+ * such elements while removing the DSP/BIOS Bridge driver -+ */ -+struct memMan { -+ struct LST_LIST lst; -+ spinlock_t lock; -+}; -+ -+static struct memMan mMan; -+ -+/* -+ * These functions are similar to LST_PutTail and LST_RemoveElem and are -+ * duplicated here to make MEM independent of LST -+ */ -+static inline void MLST_PutTail(struct LST_LIST *pList, struct LST_ELEM *pElem) -+{ -+ pElem->prev = pList->head.prev; -+ pElem->next = &pList->head; -+ pList->head.prev = pElem; -+ pElem->prev->next = pElem; -+ pElem->self = pElem; -+} -+ -+static inline void MLST_RemoveElem(struct LST_LIST *pList, -+ struct LST_ELEM *pCurElem) -+{ -+ pCurElem->prev->next = pCurElem->next; -+ pCurElem->next->prev = pCurElem->prev; -+ pCurElem->next = NULL; -+ pCurElem->prev = NULL; -+} -+ -+static void MEM_Check(void) -+{ -+ struct memInfo *pMem; -+ struct LST_ELEM *last = &mMan.lst.head; -+ struct LST_ELEM *curr = mMan.lst.head.next; -+ -+ if (!LST_IsEmpty(&mMan.lst)) { -+ GT_0trace(MEM_debugMask, GT_7CLASS, "*** MEMORY LEAK ***\n"); -+ GT_0trace(MEM_debugMask, GT_7CLASS, -+ "Addr Size Caller\n"); -+ while (curr != last) { -+ pMem = (struct memInfo *)curr; -+ curr = curr->next; -+ if ((u32)pMem > PAGE_OFFSET && -+ MEM_IsValidHandle(pMem, memInfoSign)) { -+ GT_3trace(MEM_debugMask, GT_7CLASS, -+ "%lx %d\t [<%p>]\n", -+ (u32) pMem + sizeof(struct memInfo), -+ pMem->size, pMem->caller); -+ MLST_RemoveElem(&mMan.lst, -+ (struct LST_ELEM *) pMem); -+ kfree(pMem); -+ } else { -+ GT_1trace(MEM_debugMask, GT_7CLASS, -+ "Invalid allocation or " -+ "Buffer underflow at %x\n", -+ (u32)pMem + sizeof(struct memInfo)); -+ break; -+ } -+ } -+ } -+ DBC_Ensure(LST_IsEmpty(&mMan.lst)); -+} -+ -+#endif -+ -+void MEM_ExtPhysPoolInit(u32 poolPhysBase, u32 poolSize) -+{ -+ u32 poolVirtBase; -+ -+ /* get the virtual address for the physical memory pool passed */ -+ poolVirtBase = (u32)ioremap(poolPhysBase, poolSize); -+ -+ if ((void **)poolVirtBase == NULL) { -+ GT_0trace(MEM_debugMask, GT_7CLASS, -+ "[PHYS_POOL]Mapping External " -+ "physical memory to virt failed \n"); -+ extPhysMemPoolEnabled = false; -+ } else { -+ extMemPool.physMemBase = poolPhysBase; -+ extMemPool.physMemSize = poolSize; -+ extMemPool.virtMemBase = poolVirtBase; -+ extMemPool.nextPhysAllocPtr = poolPhysBase; -+ extPhysMemPoolEnabled = true; -+ GT_3trace(MEM_debugMask, GT_1CLASS, -+ "ExtMemory Pool details " "Pool" -+ "Physical mem base = %0x " "Pool Physical mem size " -+ "= %0x" "Pool Virtual mem base = %0x \n", -+ poolPhysBase, poolSize, poolVirtBase); -+ } -+} -+ -+static void MEM_ExtPhysPoolRelease(void) -+{ -+ GT_0trace(MEM_debugMask, GT_1CLASS, -+ "Releasing External memory pool \n"); -+ if (extPhysMemPoolEnabled) { -+ iounmap((void *)(extMemPool.virtMemBase)); -+ extPhysMemPoolEnabled = false; -+ } -+} -+ -+/* -+ * ======== MEM_ExtPhysMemAlloc ======== -+ * Purpose: -+ * Allocate physically contiguous, uncached memory from external memory pool -+ */ -+ -+static void *MEM_ExtPhysMemAlloc(u32 bytes, u32 align, OUT u32 *pPhysAddr) -+{ -+ u32 newAllocPtr; -+ u32 offset; -+ u32 virtAddr; -+ -+ GT_2trace(MEM_debugMask, GT_1CLASS, -+ "Ext Memory Allocation" "bytes=0x%x , " -+ "align=0x%x \n", bytes, align); -+ if (align == 0) { -+ GT_0trace(MEM_debugMask, GT_7CLASS, -+ "ExtPhysical Memory Allocation " -+ "No alignment request in allocation call !! \n"); -+ align = 1; -+ } -+ if (bytes > ((extMemPool.physMemBase + extMemPool.physMemSize) -+ - extMemPool.nextPhysAllocPtr)) { -+ GT_1trace(MEM_debugMask, GT_7CLASS, -+ "ExtPhysical Memory Allocation " -+ "unable to allocate memory for bytes = 0x%x \n", -+ bytes); -+ pPhysAddr = NULL; -+ return NULL; -+ } else { -+ offset = (extMemPool.nextPhysAllocPtr & (align - 1)); -+ if (offset == 0) -+ newAllocPtr = extMemPool.nextPhysAllocPtr; -+ else -+ newAllocPtr = (extMemPool.nextPhysAllocPtr) + -+ (align - offset); -+ if ((newAllocPtr + bytes) <= -+ (extMemPool.physMemBase + extMemPool.physMemSize)) { -+ /* we can allocate */ -+ *pPhysAddr = newAllocPtr; -+ extMemPool.nextPhysAllocPtr = newAllocPtr + bytes; -+ virtAddr = extMemPool.virtMemBase + (newAllocPtr - -+ extMemPool.physMemBase); -+ GT_2trace(MEM_debugMask, GT_1CLASS, -+ "Ext Memory Allocation succedded " -+ "phys address=0x%x , virtaddress=0x%x \n", -+ newAllocPtr, virtAddr); -+ return (void *)virtAddr; -+ } else { -+ *pPhysAddr = 0; -+ return NULL; -+ } -+ } -+} -+ -+/* -+ * ======== MEM_Alloc ======== -+ * Purpose: -+ * Allocate memory from the paged or non-paged pools. -+ */ -+void *MEM_Alloc(u32 cBytes, enum MEM_POOLATTRS type) -+{ -+ struct memInfo *pMem = NULL; -+ -+ GT_2trace(MEM_debugMask, GT_ENTER, -+ "MEM_Alloc: cBytes 0x%x\ttype 0x%x\n", cBytes, type); -+ if (cBytes > 0) { -+ switch (type) { -+ case MEM_NONPAGED: -+ /* If non-paged memory required, see note at top of file. */ -+ case MEM_PAGED: -+#ifndef MEM_CHECK -+ pMem = kmalloc(cBytes, -+ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); -+#else -+ pMem = kmalloc(cBytes + sizeof(struct memInfo), -+ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); -+ if (pMem) { -+ pMem->size = cBytes; -+ pMem->caller = __builtin_return_address(0); -+ pMem->dwSignature = memInfoSign; -+ -+ spin_lock(&mMan.lock); -+ MLST_PutTail(&mMan.lst, -+ (struct LST_ELEM *)pMem); -+ spin_unlock(&mMan.lock); -+ -+ pMem = (void *)((u32)pMem + -+ sizeof(struct memInfo)); -+ } -+#endif -+ break; -+ case MEM_LARGEVIRTMEM: -+#ifndef MEM_CHECK -+ pMem = vmalloc(cBytes); -+#else -+ pMem = vmalloc(cBytes + sizeof(struct memInfo)); -+ if (pMem) { -+ pMem->size = cBytes; -+ pMem->caller = __builtin_return_address(0); -+ pMem->dwSignature = memInfoSign; -+ -+ spin_lock(&mMan.lock); -+ MLST_PutTail(&mMan.lst, -+ (struct LST_ELEM *) pMem); -+ spin_unlock(&mMan.lock); -+ -+ pMem = (void *)((u32)pMem + -+ sizeof(struct memInfo)); -+ } -+#endif -+ break; -+ -+ default: -+ GT_0trace(MEM_debugMask, GT_6CLASS, -+ "MEM_Alloc: unexpected " -+ "MEM_POOLATTRS value\n"); -+ break; -+ } -+ } -+ -+ return pMem; -+} -+ -+/* -+ * ======== MEM_AllocPhysMem ======== -+ * Purpose: -+ * Allocate physically contiguous, uncached memory -+ */ -+void *MEM_AllocPhysMem(u32 cBytes, u32 ulAlign, OUT u32 *pPhysicalAddress) -+{ -+ void *pVaMem = NULL; -+ dma_addr_t paMem; -+ -+ DBC_Require(cRefs > 0); -+ -+ GT_2trace(MEM_debugMask, GT_ENTER, -+ "MEM_AllocPhysMem: cBytes 0x%x\tulAlign" -+ "0x%x\n", cBytes, ulAlign); -+ -+ if (cBytes > 0) { -+ if (extPhysMemPoolEnabled) { -+ pVaMem = MEM_ExtPhysMemAlloc(cBytes, ulAlign, -+ (u32 *)&paMem); -+ } else -+ pVaMem = dma_alloc_coherent(NULL, cBytes, &paMem, -+ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); -+ if (pVaMem == NULL) { -+ *pPhysicalAddress = 0; -+ GT_1trace(MEM_debugMask, GT_6CLASS, -+ "MEM_AllocPhysMem failed: " -+ "0x%x\n", pVaMem); -+ } else { -+ *pPhysicalAddress = paMem; -+ } -+ } -+ return pVaMem; -+} -+ -+/* -+ * ======== MEM_Calloc ======== -+ * Purpose: -+ * Allocate zero-initialized memory from the paged or non-paged pools. -+ */ -+void *MEM_Calloc(u32 cBytes, enum MEM_POOLATTRS type) -+{ -+ struct memInfo *pMem = NULL; -+ -+ GT_2trace(MEM_debugMask, GT_ENTER, -+ "MEM_Calloc: cBytes 0x%x\ttype 0x%x\n", -+ cBytes, type); -+ -+ if (cBytes > 0) { -+ switch (type) { -+ case MEM_NONPAGED: -+ /* If non-paged memory required, see note at top of file. */ -+ case MEM_PAGED: -+#ifndef MEM_CHECK -+ pMem = kmalloc(cBytes, -+ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); -+ if (pMem) -+ memset(pMem, 0, cBytes); -+ -+#else -+ pMem = kmalloc(cBytes + sizeof(struct memInfo), -+ (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL); -+ if (pMem) { -+ memset((void *)((u32)pMem + -+ sizeof(struct memInfo)), 0, cBytes); -+ pMem->size = cBytes; -+ pMem->caller = __builtin_return_address(0); -+ pMem->dwSignature = memInfoSign; -+ spin_lock(&mMan.lock); -+ MLST_PutTail(&mMan.lst, -+ (struct LST_ELEM *) pMem); -+ spin_unlock(&mMan.lock); -+ pMem = (void *)((u32)pMem + -+ sizeof(struct memInfo)); -+ } -+#endif -+ break; -+ case MEM_LARGEVIRTMEM: -+#ifndef MEM_CHECK -+ pMem = vmalloc(cBytes); -+ if (pMem) -+ memset(pMem, 0, cBytes); -+#else -+ pMem = vmalloc(cBytes + sizeof(struct memInfo)); -+ if (pMem) { -+ memset((void *)((u32)pMem + -+ sizeof(struct memInfo)), 0, cBytes); -+ pMem->size = cBytes; -+ pMem->caller = __builtin_return_address(0); -+ pMem->dwSignature = memInfoSign; -+ spin_lock(&mMan.lock); -+ MLST_PutTail(&mMan.lst, (struct LST_ELEM *) -+ pMem); -+ spin_unlock(&mMan.lock); -+ pMem = (void *)((u32)pMem + -+ sizeof(struct memInfo)); -+ } -+#endif -+ break; -+ default: -+ GT_1trace(MEM_debugMask, GT_6CLASS, -+ "MEM_Calloc: unexpected " -+ "MEM_POOLATTRS value 0x%x\n", type); -+ break; -+ } -+ } -+ -+ return pMem; -+} -+ -+/* -+ * ======== MEM_Exit ======== -+ * Purpose: -+ * Discontinue usage of the MEM module. -+ */ -+void MEM_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(MEM_debugMask, GT_5CLASS, "MEM_Exit: cRefs 0x%x\n", cRefs); -+ -+ cRefs--; -+#ifdef MEM_CHECK -+ if (cRefs == 0) -+ MEM_Check(); -+ -+#endif -+ MEM_ExtPhysPoolRelease(); -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== MEM_FlushCache ======== -+ * Purpose: -+ * Flush cache -+ */ -+void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType) -+{ -+ DBC_Require(cRefs > 0); -+ -+ switch (FlushType) { -+ /* invalidate only */ -+ case PROC_INVALIDATE_MEM: -+ dmac_inv_range(pMemBuf, pMemBuf + cBytes); -+ outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf + -+ cBytes)); -+ break; -+ /* writeback only */ -+ case PROC_WRITEBACK_MEM: -+ dmac_clean_range(pMemBuf, pMemBuf + cBytes); -+ outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf + -+ cBytes)); -+ break; -+ /* writeback and invalidate */ -+ case PROC_WRITEBACK_INVALIDATE_MEM: -+ dmac_flush_range(pMemBuf, pMemBuf + cBytes); -+ outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf + -+ cBytes)); -+ break; -+ default: -+ GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid " -+ "FlushMemType 0x%x\n", FlushType); -+ break; -+ } -+ -+} -+ -+/* -+ * ======== MEM_VFree ======== -+ * Purpose: -+ * Free the given block of system memory in virtual space. -+ */ -+void MEM_VFree(IN void *pMemBuf) -+{ -+#ifdef MEM_CHECK -+ struct memInfo *pMem = (void *)((u32)pMemBuf - sizeof(struct memInfo)); -+#endif -+ -+ DBC_Require(pMemBuf != NULL); -+ -+ GT_1trace(MEM_debugMask, GT_ENTER, "MEM_VFree: pMemBufs 0x%x\n", -+ pMemBuf); -+ -+ if (pMemBuf) { -+#ifndef MEM_CHECK -+ vfree(pMemBuf); -+#else -+ if (pMem) { -+ if (pMem->dwSignature == memInfoSign) { -+ spin_lock(&mMan.lock); -+ MLST_RemoveElem(&mMan.lst, -+ (struct LST_ELEM *) pMem); -+ spin_unlock(&mMan.lock); -+ pMem->dwSignature = 0; -+ vfree(pMem); -+ } else { -+ GT_1trace(MEM_debugMask, GT_7CLASS, -+ "Invalid allocation or " -+ "Buffer underflow at %x\n", -+ (u32) pMem + sizeof(struct memInfo)); -+ } -+ } -+#endif -+ } -+} -+ -+/* -+ * ======== MEM_Free ======== -+ * Purpose: -+ * Free the given block of system memory. -+ */ -+void MEM_Free(IN void *pMemBuf) -+{ -+#ifdef MEM_CHECK -+ struct memInfo *pMem = (void *)((u32)pMemBuf - sizeof(struct memInfo)); -+#endif -+ -+ DBC_Require(pMemBuf != NULL); -+ -+ GT_1trace(MEM_debugMask, GT_ENTER, "MEM_Free: pMemBufs 0x%x\n", -+ pMemBuf); -+ -+ if (pMemBuf) { -+#ifndef MEM_CHECK -+ kfree(pMemBuf); -+#else -+ if (pMem) { -+ if (pMem->dwSignature == memInfoSign) { -+ spin_lock(&mMan.lock); -+ MLST_RemoveElem(&mMan.lst, -+ (struct LST_ELEM *) pMem); -+ spin_unlock(&mMan.lock); -+ pMem->dwSignature = 0; -+ kfree(pMem); -+ } else { -+ GT_1trace(MEM_debugMask, GT_7CLASS, -+ "Invalid allocation or " -+ "Buffer underflow at %x\n", -+ (u32) pMem + sizeof(struct memInfo)); -+ } -+ } -+#endif -+ } -+} -+ -+/* -+ * ======== MEM_FreePhysMem ======== -+ * Purpose: -+ * Free the given block of physically contiguous memory. -+ */ -+void MEM_FreePhysMem(void *pVirtualAddress, u32 pPhysicalAddress, -+ u32 cBytes) -+{ -+ DBC_Require(cRefs > 0); -+ DBC_Require(pVirtualAddress != NULL); -+ -+ GT_1trace(MEM_debugMask, GT_ENTER, "MEM_FreePhysMem: pVirtualAddress " -+ "0x%x\n", pVirtualAddress); -+ -+ if (!extPhysMemPoolEnabled) -+ dma_free_coherent(NULL, cBytes, pVirtualAddress, -+ pPhysicalAddress); -+} -+ -+/* -+ * ======== MEM_Init ======== -+ * Purpose: -+ * Initialize MEM module private state. -+ */ -+bool MEM_Init(void) -+{ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ GT_create(&MEM_debugMask, "MM"); /* MM for MeM module */ -+ -+#ifdef MEM_CHECK -+ mMan.lst.head.next = &mMan.lst.head; -+ mMan.lst.head.prev = &mMan.lst.head; -+ mMan.lst.head.self = NULL; -+ spin_lock_init(&mMan.lock); -+#endif -+ -+ } -+ -+ cRefs++; -+ -+ GT_1trace(MEM_debugMask, GT_5CLASS, "MEM_Init: cRefs 0x%x\n", cRefs); -+ -+ DBC_Ensure(cRefs > 0); -+ -+ return true; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/ntfy.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/ntfy.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/ntfy.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/ntfy.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,329 @@ -+/* -+ * ntfy.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== ntfyce.c ======== -+ * Purpose: -+ * Manage lists of notification events. -+ * -+ * Public Functions: -+ * NTFY_Create -+ * NTFY_Delete -+ * NTFY_Exit -+ * NTFY_Init -+ * NTFY_Notify -+ * NTFY_Register -+ * -+ *! Revision History: -+ *! ================= -+ *! 06-Feb-2003 kc Removed DSP_POSTMESSAGE related code. -+ *! 05-Nov-2001 kc Updated DSP_HNOTIFICATION structure. -+ *! 10-May-2001 jeh Removed SERVICES module init/exit from NTFY_Init/Exit. -+ *! NTFY_Register() returns DSP_ENOTIMPL for all but -+ *! DSP_SIGNALEVENT. -+ *! 12-Oct-2000 jeh Use MEM_IsValidHandle(). -+ *! 07-Sep-2000 jeh Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define NTFY_SIGNATURE 0x5946544e /* "YFTN" */ -+ -+/* -+ * ======== NTFY_OBJECT ======== -+ */ -+struct NTFY_OBJECT { -+ u32 dwSignature; /* For object validation */ -+ struct LST_LIST *notifyList; /* List of NOTIFICATION objects */ -+ struct SYNC_CSOBJECT *hSync; /* For critical sections */ -+}; -+ -+/* -+ * ======== NOTIFICATION ======== -+ * This object will be created when a client registers for events. -+ */ -+struct NOTIFICATION { -+ struct LST_ELEM listElem; -+ u32 uEventMask; /* Events to be notified about */ -+ u32 uNotifyType; /* Type of notification to be sent */ -+ -+ /* -+ * We keep a copy of the event name to check if the event has -+ * already been registered. (SYNC also keeps a copy of the name). -+ */ -+ char *pstrName; /* Name of event */ -+ HANDLE hEvent; /* Handle for notification */ -+ struct SYNC_OBJECT *hSync; -+}; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask NTFY_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+/* ----------------------------------- Function Prototypes */ -+static void DeleteNotify(struct NOTIFICATION *pNotify); -+ -+/* -+ * ======== NTFY_Create ======== -+ * Purpose: -+ * Create an empty list of notifications. -+ */ -+DSP_STATUS NTFY_Create(struct NTFY_OBJECT **phNtfy) -+{ -+ struct NTFY_OBJECT *pNtfy; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(phNtfy != NULL); -+ -+ *phNtfy = NULL; -+ MEM_AllocObject(pNtfy, struct NTFY_OBJECT, NTFY_SIGNATURE); -+ -+ if (pNtfy) { -+ -+ status = SYNC_InitializeDPCCS(&pNtfy->hSync); -+ if (DSP_SUCCEEDED(status)) { -+ pNtfy->notifyList = LST_Create(); -+ if (pNtfy->notifyList == NULL) { -+ (void) SYNC_DeleteCS(pNtfy->hSync); -+ MEM_FreeObject(pNtfy); -+ status = DSP_EMEMORY; -+ } else { -+ *phNtfy = pNtfy; -+ } -+ } -+ } else { -+ status = DSP_EMEMORY; -+ } -+ -+ DBC_Ensure((DSP_FAILED(status) && *phNtfy == NULL) || -+ (DSP_SUCCEEDED(status) && MEM_IsValidHandle((*phNtfy), -+ NTFY_SIGNATURE))); -+ -+ return status; -+} -+ -+/* -+ * ======== NTFY_Delete ======== -+ * Purpose: -+ * Free resources allocated in NTFY_Create. -+ */ -+void NTFY_Delete(struct NTFY_OBJECT *hNtfy) -+{ -+ struct NOTIFICATION *pNotify; -+ -+ DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE)); -+ -+ /* Remove any elements remaining in list */ -+ if (hNtfy->notifyList) { -+ while ((pNotify = (struct NOTIFICATION *)LST_GetHead(hNtfy-> -+ notifyList))) { -+ DeleteNotify(pNotify); -+ } -+ DBC_Assert(LST_IsEmpty(hNtfy->notifyList)); -+ LST_Delete(hNtfy->notifyList); -+ } -+ if (hNtfy->hSync) -+ (void)SYNC_DeleteCS(hNtfy->hSync); -+ -+ MEM_FreeObject(hNtfy); -+} -+ -+/* -+ * ======== NTFY_Exit ======== -+ * Purpose: -+ * Discontinue usage of NTFY module. -+ */ -+void NTFY_Exit(void) -+{ -+ GT_0trace(NTFY_debugMask, GT_5CLASS, "Entered NTFY_Exit\n"); -+} -+ -+/* -+ * ======== NTFY_Init ======== -+ * Purpose: -+ * Initialize the NTFY module. -+ */ -+bool NTFY_Init(void) -+{ -+ GT_create(&NTFY_debugMask, "NY"); /* "NY" for NtfY */ -+ -+ GT_0trace(NTFY_debugMask, GT_5CLASS, "NTFY_Init()\n"); -+ -+ return true; -+} -+ -+/* -+ * ======== NTFY_Notify ======== -+ * Purpose: -+ * Execute notify function (signal event) for every -+ * element in the notification list that is to be notified about the -+ * event specified in uEventMask. -+ */ -+void NTFY_Notify(struct NTFY_OBJECT *hNtfy, u32 uEventMask) -+{ -+ struct NOTIFICATION *pNotify; -+ -+ DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE)); -+ -+ /* -+ * Go through notifyList and notify all clients registered for -+ * uEventMask events. -+ */ -+ -+ (void) SYNC_EnterCS(hNtfy->hSync); -+ -+ pNotify = (struct NOTIFICATION *)LST_First(hNtfy->notifyList); -+ while (pNotify != NULL) { -+ if (pNotify->uEventMask & uEventMask) { -+ /* Notify */ -+ if (pNotify->uNotifyType == DSP_SIGNALEVENT) -+ (void)SYNC_SetEvent(pNotify->hSync); -+ -+ } -+ pNotify = (struct NOTIFICATION *)LST_Next(hNtfy->notifyList, -+ (struct LST_ELEM *)pNotify); -+ } -+ -+ (void) SYNC_LeaveCS(hNtfy->hSync); -+} -+ -+/* -+ * ======== NTFY_Register ======== -+ * Purpose: -+ * Add a notification element to the list. If the notification is already -+ * registered, and uEventMask != 0, the notification will get posted for -+ * events specified in the new event mask. If the notification is already -+ * registered and uEventMask == 0, the notification will be unregistered. -+ */ -+DSP_STATUS NTFY_Register(struct NTFY_OBJECT *hNtfy, -+ struct DSP_NOTIFICATION *hNotification, -+ u32 uEventMask, u32 uNotifyType) -+{ -+ struct NOTIFICATION *pNotify; -+ struct SYNC_ATTRS syncAttrs; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE)); -+ -+ if (hNotification == NULL) -+ status = DSP_EHANDLE; -+ -+ /* Return DSP_ENOTIMPL if uNotifyType is not supported */ -+ if (DSP_SUCCEEDED(status)) { -+ if (!IsValidNotifyMask(uNotifyType)) -+ status = DSP_ENOTIMPL; -+ -+ } -+ -+ if (DSP_FAILED(status)) -+ return status; -+ -+ (void)SYNC_EnterCS(hNtfy->hSync); -+ -+ pNotify = (struct NOTIFICATION *)LST_First(hNtfy->notifyList); -+ while (pNotify != NULL) { -+ /* If there is more than one notification type, each -+ * type may require its own handler code. */ -+ -+ if (hNotification->handle == pNotify->hSync) { -+ /* found */ -+ break; -+ } -+ pNotify = (struct NOTIFICATION *)LST_Next(hNtfy->notifyList, -+ (struct LST_ELEM *)pNotify); -+ } -+ if (pNotify == NULL) { -+ /* Not registered */ -+ if (uEventMask == 0) { -+ status = DSP_EVALUE; -+ } else { -+ /* Allocate NOTIFICATION object, add to list */ -+ pNotify = MEM_Calloc(sizeof(struct NOTIFICATION), -+ MEM_PAGED); -+ if (pNotify == NULL) -+ status = DSP_EMEMORY; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ LST_InitElem((struct LST_ELEM *) pNotify); -+ /* If there is more than one notification type, each -+ * type may require its own handler code. */ -+ status = SYNC_OpenEvent(&pNotify->hSync, &syncAttrs); -+ hNotification->handle = pNotify->hSync; -+ -+ if (DSP_SUCCEEDED(status)) { -+ pNotify->uEventMask = uEventMask; -+ pNotify->uNotifyType = uNotifyType; -+ LST_PutTail(hNtfy->notifyList, -+ (struct LST_ELEM *)pNotify); -+ } else { -+ DeleteNotify(pNotify); -+ } -+ } -+ } else { -+ /* Found in list */ -+ if (uEventMask == 0) { -+ /* Remove from list and free */ -+ LST_RemoveElem(hNtfy->notifyList, -+ (struct LST_ELEM *)pNotify); -+ DeleteNotify(pNotify); -+ } else { -+ /* Update notification mask (type shouldn't change) */ -+ pNotify->uEventMask = uEventMask; -+ } -+ } -+ (void)SYNC_LeaveCS(hNtfy->hSync); -+ return status; -+} -+ -+/* -+ * ======== DeleteNotify ======== -+ * Purpose: -+ * Free the notification object. -+ */ -+static void DeleteNotify(struct NOTIFICATION *pNotify) -+{ -+ if (pNotify->hSync) -+ (void) SYNC_CloseEvent(pNotify->hSync); -+ -+ if (pNotify->pstrName) -+ MEM_Free(pNotify->pstrName); -+ -+ MEM_Free(pNotify); -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/reg.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/reg.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/reg.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/reg.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,196 @@ -+/* -+ * reg.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== regce.c ======== -+ * Purpose: -+ * Provide registry functions. -+ * -+ * Public Functions: -+ * REG_DeleteValue -+ * REG_EnumValue -+ * REG_Exit -+ * REG_GetValue -+ * REG_Init -+ * REG_SetValue -+ * -+ *! Revision History: -+ *! ================ -+ * -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- This */ -+#include -+#include -+ -+#if GT_TRACE -+struct GT_Mask REG_debugMask = { NULL, NULL }; /* GT trace var. */ -+#endif -+ -+/* -+ * ======== REG_DeleteValue ======== -+ * Deletes a registry entry value. NOTE: A registry entry value is not the -+ * same as * a registry key. -+ */ -+DSP_STATUS REG_DeleteValue(OPTIONAL IN HANDLE *phKey, IN CONST char *pstrSubkey, -+ IN CONST char *pstrValue) -+{ -+ DSP_STATUS status; -+ DBC_Require(pstrSubkey && pstrValue); -+ DBC_Require(phKey == NULL); -+ DBC_Require(strlen(pstrSubkey) < REG_MAXREGPATHLENGTH); -+ DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH); -+ -+ GT_0trace(REG_debugMask, GT_ENTER, "REG_DeleteValue: entered\n"); -+ -+ /* Note that we don't use phKey */ -+ if (regsupDeleteValue(pstrSubkey, pstrValue) == DSP_SOK) -+ status = DSP_SOK; -+ else -+ status = DSP_EFAIL; -+ -+ return status; -+} -+ -+/* -+ * ======== REG_EnumValue ======== -+ * Enumerates a registry key and retrieve values stored under the key. -+ * We will assume the input pdwValueSize is smaller than -+ * REG_MAXREGPATHLENGTH for implementation purposes. -+ */ -+DSP_STATUS REG_EnumValue(IN HANDLE *phKey, IN u32 dwIndex, -+ IN CONST char *pstrKey, IN OUT char *pstrValue, -+ IN OUT u32 *pdwValueSize, IN OUT char *pstrData, -+ IN OUT u32 *pdwDataSize) -+{ -+ DSP_STATUS status; -+ -+ DBC_Require(pstrKey && pstrValue && pdwValueSize && pstrData && -+ pdwDataSize); -+ DBC_Require(*pdwValueSize <= REG_MAXREGPATHLENGTH); -+ DBC_Require(phKey == NULL); -+ DBC_Require(strlen(pstrKey) < REG_MAXREGPATHLENGTH); -+ -+ GT_0trace(REG_debugMask, GT_ENTER, "REG_EnumValue: entered\n"); -+ -+ status = regsupEnumValue(dwIndex, pstrKey, pstrValue, pdwValueSize, -+ pstrData, pdwDataSize); -+ -+ return status; -+} -+ -+/* -+ * ======== REG_Exit ======== -+ * Discontinue usage of the REG module. -+ */ -+void REG_Exit(void) -+{ -+ GT_0trace(REG_debugMask, GT_5CLASS, "REG_Exit\n"); -+ -+ regsupExit(); -+} -+ -+/* -+ * ======== REG_GetValue ======== -+ * Retrieve a value from the registry. -+ */ -+DSP_STATUS REG_GetValue(OPTIONAL IN HANDLE *phKey, IN CONST char *pstrSubkey, -+ IN CONST char *pstrValue, OUT u8 *pbData, -+ IN OUT u32 *pdwDataSize) -+{ -+ DSP_STATUS status; -+ -+ DBC_Require(pstrSubkey && pstrValue && pbData); -+ DBC_Require(phKey == NULL); -+ DBC_Require(strlen(pstrSubkey) < REG_MAXREGPATHLENGTH); -+ DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH); -+ -+ GT_0trace(REG_debugMask, GT_ENTER, "REG_GetValue: entered\n"); -+ -+ /* We need to use regsup calls... */ -+ /* ...for now we don't need the key handle or */ -+ /* the subkey, all we need is the value to lookup. */ -+ if (regsupGetValue((char *)pstrValue, pbData, pdwDataSize) == DSP_SOK) -+ status = DSP_SOK; -+ else -+ status = DSP_EFAIL; -+ -+ return status; -+} -+ -+/* -+ * ======== REG_Init ======== -+ * Initialize the REG module's private state. -+ */ -+bool REG_Init(void) -+{ -+ bool fInit; -+ -+ GT_create(®_debugMask, "RG"); /* RG for ReG */ -+ -+ fInit = regsupInit(); -+ -+ GT_0trace(REG_debugMask, GT_5CLASS, "REG_Init\n"); -+ -+ return fInit; -+} -+ -+/* -+ * ======== REG_SetValue ======== -+ * Set a value in the registry. -+ */ -+DSP_STATUS REG_SetValue(OPTIONAL IN HANDLE *phKey, IN CONST char *pstrSubkey, -+ IN CONST char *pstrValue, IN CONST u32 dwType, -+ IN u8 *pbData, IN u32 dwDataSize) -+{ -+ DSP_STATUS status; -+ -+ DBC_Require(pstrValue && pbData); -+ DBC_Require(phKey == NULL); -+ DBC_Require(dwDataSize > 0); -+ DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH); -+ -+ /* We need to use regsup calls... */ -+ /* ...for now we don't need the key handle or */ -+ /* the subkey, all we need is the value to lookup. */ -+ if (regsupSetValue((char *)pstrValue, pbData, dwDataSize) == DSP_SOK) -+ status = DSP_SOK; -+ else -+ status = DSP_EFAIL; -+ -+ return status; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/regsup.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/regsup.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,370 @@ -+/* -+ * regsup.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== regsup.c ======== -+ * Purpose: -+ * Provide registry support functions. -+ * -+ *! Revision History: -+ *! ================ -+ *! 28-May-2002 map: Integrated PSI's dspimage update mechanism -+ *! 11-May-2002 gp: Turned PERF "on". -+ *! 21-May-2002 map: Fixed bug in SetValue - if resizing datasize, set -+ *! new size too -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+struct RegValueStruct { -+ char name[BRIDGE_MAX_NAME_SIZE]; /* Name of a given value entry */ -+ u32 dataSize; /* Size of the data */ -+ void *pData; /* Pointer to the actual data */ -+}; -+ -+struct RegKeyStruct { -+ /*The current number of value entries this key has*/ -+ u32 numValueEntries; -+ /* Array of value entries */ -+ struct RegValueStruct values[BRIDGE_MAX_NUM_REG_ENTRIES]; -+}; -+ -+ -+/* Pointer to the registry support key */ -+static struct RegKeyStruct *pRegKey; -+ -+#if GT_TRACE -+extern struct GT_Mask REG_debugMask; /* GT trace var. */ -+/* -+ * ======== printS ======== -+ * Purpose: -+ * Displays printable characters in pBuf, if any. -+ */ -+static inline void printS(void *pBuf) -+{ -+ int pos = 0; -+ if (*(REG_debugMask).flags & (GT_2CLASS)) { -+ while (*(u8 *)((pBuf)+pos) >= ' ' && -+ *(u8 *)((pBuf)+pos) <= '~') { -+ GT_1trace(REG_debugMask, GT_2CLASS, "%c", -+ *(u8 *)((pBuf) + pos++)); -+ } -+ -+ GT_0trace(REG_debugMask, GT_2CLASS, "\n"); -+ } -+} -+#else -+#define printS(pBuf) -+#endif -+ -+/* -+ * ======== regsupInit ======== -+ * Purpose: -+ * Initialize the Registry Support module's private state. -+ */ -+bool regsupInit(void) -+{ -+ if (pRegKey != NULL) -+ return true; -+ -+ /* Need to allocate and setup our registry. */ -+ pRegKey = MEM_Calloc(sizeof(struct RegKeyStruct), MEM_NONPAGED); -+ if (pRegKey == NULL) -+ return false; -+ -+ return true; -+} -+ -+/* -+ * ======== regsupExit ======== -+ * Purpose: -+ * Release all registry support allocations. -+ */ -+void regsupExit(void) -+{ -+ u32 i; -+ -+ /* Make sure data has actually been allocated. */ -+ if (pRegKey == NULL) { -+ /* Nothing initialized.return! */ -+ return; -+ } -+ -+ GT_1trace(REG_debugMask, GT_2CLASS, "pRegKey->numValueEntries %d\n", -+ pRegKey->numValueEntries); -+ -+ /* Now go through each entry and free all resources. */ -+ for (i = 0; ((i < BRIDGE_MAX_NUM_REG_ENTRIES) && -+ (i < pRegKey->numValueEntries)); i++) { -+ if (pRegKey->values[i].name[0] != '\0') { -+ /* We have a valid entry.free it up! */ -+ if (pRegKey->values[i].pData != NULL) { -+ GT_3trace(REG_debugMask, GT_2CLASS, -+ "E %d\t %s DATA %x ", i, -+ pRegKey->values[i].name, -+ *(u32 *)pRegKey->values[i].pData); -+ printS((u8 *)(pRegKey->values[i].pData)); -+ MEM_Free(pRegKey->values[i].pData); -+ } -+ pRegKey->values[i].pData = NULL; -+ pRegKey->values[i].dataSize = 0; -+ pRegKey->values[i].name[0] = '\0'; -+ } -+ } -+ -+ /* Now that all of the resources are freed up, free the main one! */ -+ MEM_Free(pRegKey); -+ -+ /* Don't forget to NULL out the global entry! */ -+ pRegKey = NULL; -+} -+ -+/* -+ * ======== regsupGetValue ======== -+ * Purpose: -+ * Get the value of the entry having the given name. -+ */ -+DSP_STATUS regsupGetValue(char *valName, void *pBuf, u32 *dataSize) -+{ -+ DSP_STATUS retVal = DSP_EFAIL; -+ u32 i; -+ -+ /* Need to search through the entries looking for the right one. */ -+ for (i = 0; i < pRegKey->numValueEntries; i++) { -+ /* See if the name matches. */ -+ if (strncmp(pRegKey->values[i].name, valName, -+ BRIDGE_MAX_NAME_SIZE) == 0) { -+ -+ /* We have a match! Copy out the data. */ -+ memcpy(pBuf, pRegKey->values[i].pData, -+ pRegKey->values[i].dataSize); -+ -+ /* Get the size for the caller. */ -+ *dataSize = pRegKey->values[i].dataSize; -+ -+ /* Set our status to good and exit. */ -+ retVal = DSP_SOK; -+ break; -+ } -+ } -+ -+ if (DSP_SUCCEEDED(retVal)) { -+ GT_2trace(REG_debugMask, GT_2CLASS, "G %s DATA %x ", valName, -+ *(u32 *)pBuf); -+ printS((u8 *)pBuf); -+ } else { -+ GT_1trace(REG_debugMask, GT_3CLASS, "G %s FAILED\n", valName); -+ } -+ -+ return retVal; -+} -+ -+/* -+ * ======== regsupSetValue ======== -+ * Purpose: -+ * Sets the value of the entry having the given name. -+ */ -+DSP_STATUS regsupSetValue(char *valName, void *pBuf, u32 dataSize) -+{ -+ DSP_STATUS retVal = DSP_EFAIL; -+ u32 i; -+ -+ GT_2trace(REG_debugMask, GT_2CLASS, "S %s DATA %x ", valName, -+ *(u32 *)pBuf); -+ printS((u8 *)pBuf); -+ -+ /* Need to search through the entries looking for the right one. */ -+ for (i = 0; i < pRegKey->numValueEntries; i++) { -+ /* See if the name matches. */ -+ if (strncmp(pRegKey->values[i].name, valName, -+ BRIDGE_MAX_NAME_SIZE) == 0) { -+ /* Make sure the new data size is the same. */ -+ if (dataSize != pRegKey->values[i].dataSize) { -+ /* The caller needs a different data size! */ -+ MEM_Free(pRegKey->values[i].pData); -+ pRegKey->values[i].pData = MEM_Alloc(dataSize, -+ MEM_NONPAGED); -+ if (pRegKey->values[i].pData == NULL) -+ break; -+ -+ } -+ -+ /* We have a match! Copy out the data. */ -+ memcpy(pRegKey->values[i].pData, pBuf, dataSize); -+ -+ /* Reset datasize - overwrite if new or same */ -+ pRegKey->values[i].dataSize = dataSize; -+ -+ /* Set our status to good and exit. */ -+ retVal = DSP_SOK; -+ break; -+ } -+ } -+ -+ /* See if we found a match or if this is a new entry */ -+ if (i == pRegKey->numValueEntries) { -+ /* No match, need to make a new entry */ -+ /* First check to see if we can make any more entries. */ -+ if (pRegKey->numValueEntries < BRIDGE_MAX_NUM_REG_ENTRIES) { -+ char *tmp_name = -+ pRegKey->values[pRegKey->numValueEntries].name; -+ strncpy(tmp_name, valName, BRIDGE_MAX_NAME_SIZE - 1); -+ tmp_name[BRIDGE_MAX_NAME_SIZE - 1] = '\0'; -+ pRegKey->values[pRegKey->numValueEntries].pData = -+ MEM_Alloc(dataSize, MEM_NONPAGED); -+ if (pRegKey->values[pRegKey->numValueEntries].pData != -+ NULL) { -+ memcpy(pRegKey-> -+ values[pRegKey->numValueEntries].pData, -+ pBuf, dataSize); -+ pRegKey-> -+ values[pRegKey->numValueEntries].dataSize = -+ dataSize; -+ pRegKey->numValueEntries++; -+ retVal = DSP_SOK; -+ } -+ } else { -+ GT_0trace(REG_debugMask, GT_7CLASS, -+ "MAX NUM REG ENTRIES REACHED\n"); -+ } -+ } -+ -+ return retVal; -+} -+ -+/* -+ * ======== regsupEnumValue ======== -+ * Purpose: -+ * Returns registry "values" and their "data" under a (sub)key. -+ */ -+DSP_STATUS regsupEnumValue(IN u32 dwIndex, IN CONST char *pstrKey, -+ IN OUT char *pstrValue, IN OUT u32 *pdwValueSize, -+ IN OUT char *pstrData, IN OUT u32 *pdwDataSize) -+{ -+ DSP_STATUS retVal = REG_E_INVALIDSUBKEY; -+ u32 i; -+ u32 dwKeyLen; -+ u32 count = 0; -+ -+ DBC_Require(pstrKey); -+ dwKeyLen = strlen(pstrKey); -+ -+ /* Need to search through the entries looking for the right one. */ -+ for (i = 0; i < pRegKey->numValueEntries; i++) { -+ /* See if the name matches. */ -+ if ((strncmp(pRegKey->values[i].name, pstrKey, -+ dwKeyLen) == 0) && count++ == dwIndex) { -+ /* We have a match! Copy out the data. */ -+ memcpy(pstrData, pRegKey->values[i].pData, -+ pRegKey->values[i].dataSize); -+ /* Get the size for the caller. */ -+ *pdwDataSize = pRegKey->values[i].dataSize; -+ *pdwValueSize = strlen(&(pRegKey-> -+ values[i].name[dwKeyLen])); -+ strncpy(pstrValue, -+ &(pRegKey->values[i].name[dwKeyLen]), -+ *pdwValueSize + 1); -+ GT_3trace(REG_debugMask, GT_2CLASS, -+ "E Key %s, Value %s, Data %x ", -+ pstrKey, pstrValue, *(u32 *)pstrData); -+ printS((u8 *)pstrData); -+ /* Set our status to good and exit. */ -+ retVal = DSP_SOK; -+ break; -+ } -+ } -+ -+ if (count && DSP_FAILED(retVal)) -+ retVal = REG_E_NOMOREITEMS; -+ -+ return retVal; -+} -+ -+/* -+ * ======== regsupDeleteValue ======== -+ */ -+DSP_STATUS regsupDeleteValue(IN CONST char *pstrSubkey, -+ IN CONST char *pstrValue) -+{ -+ DSP_STATUS retVal = DSP_EFAIL; -+ u32 i; -+ -+ for (i = 0; ((i < BRIDGE_MAX_NUM_REG_ENTRIES) && -+ (i < pRegKey->numValueEntries)); i++) { -+ /* See if the name matches... */ -+ if (strncmp(pRegKey->values[i].name, pstrValue, -+ BRIDGE_MAX_NAME_SIZE) == 0) { -+ /* We have a match! Delete this key. To delete a -+ * key, we free all resources associated with this -+ * key and, if we're not already the last entry in -+ * the array, we copy that entry into this deleted -+ * key. -+ */ -+ MEM_Free(pRegKey->values[i].pData); -+ if ((pRegKey->numValueEntries - 1) == i) { -+ /* we're deleting the last one */ -+ pRegKey->values[i].name[0] = '\0'; -+ pRegKey->values[i].dataSize = 0; -+ pRegKey->values[i].pData = NULL; -+ } else { -+ /* move the last one here */ -+ strncpy(pRegKey->values[i].name, pRegKey-> -+ values[pRegKey->numValueEntries - 1].name, -+ BRIDGE_MAX_NAME_SIZE); -+ pRegKey->values[i].dataSize = -+ pRegKey-> -+ values[pRegKey->numValueEntries-1].dataSize; -+ pRegKey->values[i].pData = -+ pRegKey-> -+ values[pRegKey->numValueEntries-1].pData; -+ /* don't have to do this, but for -+ * the paranoid... */ -+ pRegKey-> -+ values[pRegKey->numValueEntries-1].name[0] = -+ '\0'; -+ } -+ -+ /* another one bites the dust. */ -+ pRegKey->numValueEntries--; -+ -+ /* Set our status to good and exit... */ -+ retVal = DSP_SOK; -+ break; -+ } -+ } -+ return retVal; -+ -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/regsup.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/regsup.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/regsup.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,58 @@ -+/* -+ * regsup.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== regsup.h ======== -+ * -+ *! Revision History -+ *! ================ -+ */ -+ -+#ifndef _REGSUP_H_ -+#define _REGSUP_H_ -+ -+#define BRIDGE_MAX_NAME_SIZE MAXREGPATHLENGTH -+#define BRIDGE_MAX_NUM_REG_ENTRIES 52 -+ -+/* Init function. MUST be called BEFORE any calls are */ -+/* made into this psuedo-registry!!! Returns TRUE/FALSE for SUCCESS/ERROR */ -+extern bool regsupInit(void); -+ -+/* Release all registry support allocations. */ -+extern void regsupExit(void); -+ -+/* -+ * ======== regsupDeleteValue ======== -+ */ -+extern DSP_STATUS regsupDeleteValue(IN CONST char *pstrSubkey, -+ IN CONST char *pstrValue); -+/* Get the value of the entry having the given name. Returns DSP_SOK */ -+/* if an entry was found and the value retrieved. Returns DSP_EFAIL -+ * otherwise.*/ -+extern DSP_STATUS regsupGetValue(char *valName, void *pBuf, u32 *dataSize); -+ -+/* Sets the value of the entry having the given name. Returns DSP_SOK */ -+/* if an entry was found and the value set. Returns DSP_EFAIL otherwise. */ -+extern DSP_STATUS regsupSetValue(char *valName, void *pBuf, u32 dataSize); -+ -+/* Returns registry "values" and their "data" under a (sub)key. */ -+extern DSP_STATUS regsupEnumValue(IN u32 dwIndex, IN CONST char *pstrKey, -+ IN OUT char *pstrValue, IN OUT u32 *pdwValueSize, -+ IN OUT char *pstrData, IN OUT u32 *pdwDataSize); -+ -+#endif -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/services.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/services.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/services.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/services.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,193 @@ -+/* -+ * services.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== services.c ======== -+ * Purpose: -+ * Provide SERVICES loading. -+ * -+ * Public Functions: -+ * SERVICES_Exit -+ * SERVICES_Init -+ * -+ * -+ *! Revision History -+ *! ================ -+ *! 20-Nov-2000 rr: NTFY_Init/Exit added. -+ *! 06-Jul-2000 rr: PROC prefix changed to PRCS to accomodate RM. -+ *! 01-Feb-2000 kc: Created. -+ */ -+ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask SERVICES_debugMask = { NULL, NULL }; /* GT trace var. */ -+#endif -+ -+static u32 cRefs; /* SERVICES module reference count */ -+ -+/* -+ * ======== SERVICES_Exit ======== -+ * Purpose: -+ * Discontinue usage of module; free resources when reference count -+ * reaches 0. -+ */ -+void SERVICES_Exit(void) -+{ -+ DBC_Require(cRefs > 0); -+ -+ GT_1trace(SERVICES_debugMask, GT_5CLASS, "SERVICES_Exit: cRefs 0x%x\n", -+ cRefs); -+ -+ cRefs--; -+ if (cRefs == 0) { -+ /* Uninitialize all SERVICES modules here */ -+ NTFY_Exit(); -+ UTIL_Exit(); -+ SYNC_Exit(); -+ CLK_Exit(); -+ REG_Exit(); -+ LST_Exit(); -+ KFILE_Exit(); -+ DPC_Exit(); -+ DBG_Exit(); -+ CSL_Exit(); -+ CFG_Exit(); -+ MEM_Exit(); -+ -+ GT_exit(); -+ } -+ -+ DBC_Ensure(cRefs >= 0); -+} -+ -+/* -+ * ======== SERVICES_Init ======== -+ * Purpose: -+ * Initializes SERVICES modules. -+ */ -+bool SERVICES_Init(void) -+{ -+ bool fInit = true; -+ bool fCFG, fCSL, fDBG, fDPC, fKFILE, fLST, fMEM; -+ bool fREG, fSYNC, fCLK, fUTIL, fNTFY; -+ -+ DBC_Require(cRefs >= 0); -+ -+ if (cRefs == 0) { -+ -+ GT_init(); -+ GT_create(&SERVICES_debugMask, "OS"); /* OS for OSal */ -+ -+ GT_0trace(SERVICES_debugMask, GT_ENTER, -+ "SERVICES_Init: entered\n"); -+ -+ /* Perform required initialization of SERVICES modules. */ -+ fMEM = MEM_Init(); -+ fREG = REG_Init(); -+ fCFG = CFG_Init(); -+ fCSL = CSL_Init(); -+ fDBG = DBG_Init(); -+ fDPC = DPC_Init(); -+ fKFILE = KFILE_Init(); -+ fLST = LST_Init(); -+ /* fREG = REG_Init(); */ -+ fSYNC = SYNC_Init(); -+ fCLK = CLK_Init(); -+ fUTIL = UTIL_Init(); -+ fNTFY = NTFY_Init(); -+ -+ fInit = fCFG && fCSL && fDBG && fDPC && fKFILE && -+ fLST && fMEM && fREG && fSYNC && fCLK && fUTIL; -+ -+ if (!fInit) { -+ if (fNTFY) -+ NTFY_Exit(); -+ -+ if (fUTIL) -+ UTIL_Exit(); -+ -+ if (fSYNC) -+ SYNC_Exit(); -+ -+ if (fCLK) -+ CLK_Exit(); -+ -+ if (fREG) -+ REG_Exit(); -+ -+ if (fLST) -+ LST_Exit(); -+ -+ if (fKFILE) -+ KFILE_Exit(); -+ -+ if (fDPC) -+ DPC_Exit(); -+ -+ if (fDBG) -+ DBG_Exit(); -+ -+ if (fCSL) -+ CSL_Exit(); -+ -+ if (fCFG) -+ CFG_Exit(); -+ -+ if (fMEM) -+ MEM_Exit(); -+ -+ } -+ } -+ -+ if (fInit) -+ cRefs++; -+ -+ GT_1trace(SERVICES_debugMask, GT_5CLASS, "SERVICES_Init: cRefs 0x%x\n", -+ cRefs); -+ -+ DBC_Ensure((fInit && (cRefs > 0)) || (!fInit && (cRefs >= 0))); -+ -+ return fInit; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/sync.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/sync.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/services/sync.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/services/sync.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,608 @@ -+/* -+ * sync.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== sync.c ======== -+ * Purpose: -+ * Synchronization services. -+ * -+ * Public Functions: -+ * SYNC_CloseEvent -+ * SYNC_DeleteCS -+ * SYNC_EnterCS -+ * SYNC_Exit -+ * SYNC_Init -+ * SYNC_InitializeCS -+ * SYNC_LeaveCS -+ * SYNC_OpenEvent -+ * SYNC_ResetEvent -+ * SYNC_SetEvent -+ * SYNC_WaitOnEvent -+ * SYNC_WaitOnMultipleEvents -+ * -+ *! Revision History: -+ *! ================ -+ *! 05-Nov-2001 kc: Minor cosmetic changes. -+ *! 05-Oct-2000 jeh Added SYNC_WaitOnMultipleEvents(). -+ *! 10-Aug-2000 rr: SYNC_PostMessage added. -+ *! 10-Jul-2000 jeh Modified SYNC_OpenEvent() to handle NULL attrs. -+ *! 03-Feb-2000 rr: Module init/exit is handled by SERVICES Init/Exit. -+ *! GT Changes. -+ *! 01-Dec-1999 ag: Added optional named event creation in SYNC_OpenEvent(). -+ *! 22-Nov-1999 kc: Added changes from code review. -+ *! 22-Sep-1999 kc: Modified from sync95.c. -+ *! 05-Aug-1996 gp: Created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define SIGNATURE 0x434e5953 /* "SYNC" (in reverse) */ -+ -+enum wait_state { -+ wo_waiting, -+ wo_signalled -+} ; -+ -+enum sync_state { -+ so_reset, -+ so_signalled -+} ; -+ -+struct WAIT_OBJECT { -+ enum wait_state state; -+ struct SYNC_OBJECT *signalling_event; -+ struct semaphore sem; -+}; -+ -+/* Generic SYNC object: */ -+struct SYNC_OBJECT { -+ u32 dwSignature; /* Used for object validation. */ -+ enum sync_state state; -+ spinlock_t sync_lock; -+ struct WAIT_OBJECT *pWaitObj; -+}; -+ -+struct SYNC_DPCCSOBJECT { -+ u32 dwSignature; /* used for object validation */ -+ spinlock_t sync_dpccs_lock; -+ s32 count; -+} ; -+ -+/* ----------------------------------- Globals */ -+#if GT_TRACE -+static struct GT_Mask SYNC_debugMask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+static int test_and_set(volatile void *ptr, int val) -+{ -+ int ret = val; -+ asm volatile (" swp %0, %0, [%1]" : "+r" (ret) : "r"(ptr) : "memory"); -+ return ret; -+} -+ -+static void timeout_callback(unsigned long hWaitObj); -+ -+/* -+ * ======== SYNC_CloseEvent ======== -+ * Purpose: -+ * Close an existing SYNC event object. -+ */ -+DSP_STATUS SYNC_CloseEvent(struct SYNC_OBJECT *hEvent) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; -+ -+ DBC_Require(pEvent != NULL && pEvent->pWaitObj == NULL); -+ -+ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_CloseEvent: hEvent 0x%x\n", -+ hEvent); -+ -+ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { -+ if (pEvent->pWaitObj) { -+ status = DSP_EFAIL; -+ GT_0trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_CloseEvent: Wait object not NULL\n"); -+ } -+ MEM_FreeObject(pEvent); -+ -+ } else { -+ status = DSP_EHANDLE; -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_CloseEvent: invalid " -+ "hEvent handle 0x%x\n", hEvent); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== SYNC_Exit ======== -+ * Purpose: -+ * Cleanup SYNC module. -+ */ -+void SYNC_Exit(void) -+{ -+ GT_0trace(SYNC_debugMask, GT_5CLASS, "SYNC_Exit\n"); -+} -+ -+/* -+ * ======== SYNC_Init ======== -+ * Purpose: -+ * Initialize SYNC module. -+ */ -+bool SYNC_Init(void) -+{ -+ GT_create(&SYNC_debugMask, "SY"); /* SY for SYnc */ -+ -+ GT_0trace(SYNC_debugMask, GT_5CLASS, "SYNC_Init\n"); -+ -+ return true; -+} -+ -+/* -+ * ======== SYNC_OpenEvent ======== -+ * Purpose: -+ * Open a new synchronization event object. -+ */ -+DSP_STATUS SYNC_OpenEvent(OUT struct SYNC_OBJECT **phEvent, -+ IN OPTIONAL struct SYNC_ATTRS *pAttrs) -+{ -+ struct SYNC_OBJECT *pEvent = NULL; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(phEvent != NULL); -+ -+ GT_2trace(SYNC_debugMask, GT_ENTER, -+ "SYNC_OpenEvent: phEvent 0x%x, pAttrs " -+ "0x%x\n", phEvent, pAttrs); -+ -+ /* Allocate memory for sync object */ -+ MEM_AllocObject(pEvent, struct SYNC_OBJECT, SIGNATURE); -+ if (pEvent != NULL) { -+ pEvent->state = so_reset; -+ pEvent->pWaitObj = NULL; -+ spin_lock_init(&pEvent->sync_lock); -+ } else { -+ status = DSP_EMEMORY; -+ GT_0trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_OpenEvent: MEM_AllocObject failed\n"); -+ } -+ -+ *phEvent = pEvent; -+ -+ return status; -+} -+ -+/* -+ * ======== SYNC_ResetEvent ======== -+ * Purpose: -+ * Reset an event to non-signalled. -+ */ -+DSP_STATUS SYNC_ResetEvent(struct SYNC_OBJECT *hEvent) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; -+ -+ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_ResetEvent: hEvent 0x%x\n", -+ hEvent); -+ -+ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { -+ pEvent->state = so_reset; -+ status = DSP_SOK; -+ } else { -+ status = DSP_EHANDLE; -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_ResetEvent: invalid hEvent " -+ "handle 0x%x\n", hEvent); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== SYNC_SetEvent ======== -+ * Purpose: -+ * Set an event to signaled and unblock one waiting thread. -+ * -+ * This function is called from ISR, DPC and user context. Hence interrupts -+ * are disabled to ensure atomicity. -+ */ -+ -+DSP_STATUS SYNC_SetEvent(struct SYNC_OBJECT *hEvent) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; -+ unsigned long flags; -+ -+ GT_1trace(SYNC_debugMask, GT_6CLASS, "SYNC_SetEvent: hEvent 0x%x\n", -+ hEvent); -+ -+ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { -+ spin_lock_irqsave(&hEvent->sync_lock, flags); -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_SetEvent: pEvent->pWaitObj " -+ "= 0x%x \n", pEvent->pWaitObj); -+ if (pEvent->pWaitObj) -+ GT_1trace(SYNC_debugMask, GT_6CLASS, "SYNC_SetEvent: " -+ "pEvent->pWaitObj->state = 0x%x \n", -+ pEvent->pWaitObj->state); -+ if (pEvent->pWaitObj != NULL && -+ test_and_set(&pEvent->pWaitObj->state, -+ wo_signalled) == wo_waiting) { -+ -+ pEvent->state = so_reset; -+ pEvent->pWaitObj->signalling_event = pEvent; -+ up(&pEvent->pWaitObj->sem); -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_SetEvent: Unlock " -+ "Semaphore for hEvent 0x%x\n", hEvent); -+ } else { -+ pEvent->state = so_signalled; -+ } -+ spin_unlock_irqrestore(&hEvent->sync_lock, flags); -+ } else { -+ status = DSP_EHANDLE; -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_SetEvent: invalid hEvent " -+ "handle 0x%x\n", hEvent); -+ } -+ return status; -+} -+ -+/* -+ * ======== SYNC_WaitOnEvent ======== -+ * Purpose: -+ * Wait for an event to be signalled, up to the specified timeout. -+ * Note: dwTimeOut must be 0xffffffff to signal infinite wait. -+ */ -+DSP_STATUS SYNC_WaitOnEvent(struct SYNC_OBJECT *hEvent, u32 dwTimeout) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_OBJECT *pEvent = (struct SYNC_OBJECT *)hEvent; -+ u32 temp; -+ -+ GT_2trace(SYNC_debugMask, GT_6CLASS, "SYNC_WaitOnEvent: hEvent 0x%x\n, " -+ "dwTimeOut 0x%x", hEvent, dwTimeout); -+ if (MEM_IsValidHandle(hEvent, SIGNATURE)) { -+ status = SYNC_WaitOnMultipleEvents(&pEvent, 1, dwTimeout, -+ &temp); -+ } else { -+ status = DSP_EHANDLE; -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_WaitOnEvent: invalid hEvent" -+ "handle 0x%x\n", hEvent); -+ } -+ return status; -+} -+ -+/* -+ * ======== SYNC_WaitOnMultipleEvents ======== -+ * Purpose: -+ * Wait for any of an array of events to be signalled, up to the -+ * specified timeout. -+ */ -+DSP_STATUS SYNC_WaitOnMultipleEvents(struct SYNC_OBJECT **hSyncEvents, -+ u32 uCount, u32 dwTimeout, -+ OUT u32 *puIndex) -+{ -+ u32 i; -+ DSP_STATUS status = DSP_SOK; -+ u32 curr; -+ struct WAIT_OBJECT *Wp; -+ -+ DBC_Require(uCount > 0); -+ DBC_Require(hSyncEvents != NULL); -+ DBC_Require(puIndex != NULL); -+ -+ for (i = 0; i < uCount; i++) -+ DBC_Require(MEM_IsValidHandle(hSyncEvents[i], SIGNATURE)); -+ -+ GT_4trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_WaitOnMultipleEvents: hSyncEvents:" -+ "0x%x\tuCount: 0x%x" "\tdwTimeout: 0x%x\tpuIndex: 0x%x\n", -+ hSyncEvents, uCount, dwTimeout, puIndex); -+ -+ Wp = MEM_Calloc(sizeof(struct WAIT_OBJECT), MEM_NONPAGED); -+ if (Wp == NULL) -+ return DSP_EMEMORY; -+ -+ Wp->state = wo_waiting; -+ Wp->signalling_event = NULL; -+ init_MUTEX_LOCKED(&(Wp->sem)); -+ -+ for (curr = 0; curr < uCount; curr++) { -+ hSyncEvents[curr]->pWaitObj = Wp; -+ if (hSyncEvents[curr]->state == so_signalled) { -+ GT_0trace(SYNC_debugMask, GT_6CLASS, -+ "Detected signaled Event !!!\n"); -+ if (test_and_set(&(Wp->state), wo_signalled) == -+ wo_waiting) { -+ GT_0trace(SYNC_debugMask, GT_6CLASS, -+ "Setting Signal Event!!!\n"); -+ hSyncEvents[curr]->state = so_reset; -+ Wp->signalling_event = hSyncEvents[curr]; -+ } -+ curr++; /* Will try optimizing later */ -+ break; -+ } -+ } -+ -+ curr--; /* Will try optimizing later */ -+ if (Wp->state != wo_signalled && dwTimeout > 0) { -+ struct timer_list timeout; -+ if (dwTimeout != SYNC_INFINITE) { -+ init_timer(&timeout); -+ timeout.function = timeout_callback; -+ timeout.data = (unsigned long)Wp; -+ timeout.expires = jiffies + dwTimeout * HZ / 1000; -+ add_timer(&timeout); -+ } -+ if (down_interruptible(&(Wp->sem))) { -+ GT_0trace(SYNC_debugMask, GT_7CLASS, "SYNC: " -+ "WaitOnMultipleEvents Interrupted by signal\n"); -+ /* -+ * Most probably we are interrupted by a fake signal -+ * from freezer. Return -ERESTARTSYS so that this -+ * ioctl is restarted, and user space doesn't notice -+ * it. -+ */ -+ status = -ERESTARTSYS; -+ } -+ if (dwTimeout != SYNC_INFINITE) { -+ if (in_interrupt()) { -+ if (!del_timer(&timeout)) { -+ GT_0trace(SYNC_debugMask, GT_7CLASS, -+ "SYNC: Timer expired\n"); -+ } -+ } else { -+ if (!del_timer_sync(&timeout)) { -+ GT_0trace(SYNC_debugMask, GT_7CLASS, -+ "SYNC: Timer expired\n"); -+ } -+ } -+ } -+ } -+ for (i = 0; i <= curr; i++) { -+ if (MEM_IsValidHandle(hSyncEvents[i], SIGNATURE)) { -+ /* Memory corruption here if hSyncEvents[i] is -+ * freed before following statememt. */ -+ hSyncEvents[i]->pWaitObj = NULL; -+ } -+ if (hSyncEvents[i] == Wp->signalling_event) -+ *puIndex = i; -+ -+ } -+ if (Wp->signalling_event == NULL && DSP_SUCCEEDED(status)) { -+ GT_0trace(SYNC_debugMask, GT_7CLASS, -+ "SYNC:Signaling Event NULL!!!(:-\n"); -+ status = DSP_ETIMEOUT; -+ } -+ if (Wp) -+ MEM_Free(Wp); -+ return status; -+} -+ -+static void timeout_callback(unsigned long hWaitObj) -+{ -+ struct WAIT_OBJECT *pWaitObj = (struct WAIT_OBJECT *)hWaitObj; -+ if (test_and_set(&pWaitObj->state, wo_signalled) == wo_waiting) -+ up(&pWaitObj->sem); -+ -+} -+ -+/* -+ * ======== SYNC_DeleteCS ======== -+ */ -+DSP_STATUS SYNC_DeleteCS(struct SYNC_CSOBJECT *hCSObj) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_CSOBJECT *pCSObj = (struct SYNC_CSOBJECT *)hCSObj; -+ -+ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_DeleteCS\n"); -+ -+ if (MEM_IsValidHandle(hCSObj, SIGNATURECS)) { -+ if (down_trylock(&pCSObj->sem) != 0) { -+ GT_1trace(SYNC_debugMask, GT_7CLASS, -+ "CS in use (locked) while " -+ "deleting! pCSObj=0x%X", pCSObj); -+ DBC_Assert(0); -+ } -+ MEM_FreeObject(hCSObj); -+ } else if (MEM_IsValidHandle(hCSObj, SIGNATUREDPCCS)) { -+ struct SYNC_DPCCSOBJECT *pDPCCSObj = -+ (struct SYNC_DPCCSOBJECT *)hCSObj; -+ if (pDPCCSObj->count != 1) { -+ GT_1trace(SYNC_debugMask, GT_7CLASS, -+ "DPC CS in use (locked) while " -+ "deleting! pCSObj=0x%X", pCSObj); -+ DBC_Assert(0); -+ } -+ MEM_FreeObject(pDPCCSObj); -+ } else { -+ status = DSP_EHANDLE; -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_DeleteCS: invalid hCSObj " -+ "handle 0x%x\n", hCSObj); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== SYNC_EnterCS ======== -+ */ -+DSP_STATUS SYNC_EnterCS(struct SYNC_CSOBJECT *hCSObj) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_CSOBJECT *pCSObj = (struct SYNC_CSOBJECT *)hCSObj; -+ -+ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_EnterCS: hCSObj %p\n", -+ hCSObj); -+ if (MEM_IsValidHandle(hCSObj, SIGNATURECS)) { -+ if (in_interrupt()) { -+ status = DSP_EFAIL; -+ GT_0trace(SYNC_debugMask, GT_7CLASS, -+ "SYNC_EnterCS called from " -+ "ISR/DPC or with ISR/DPC disabled!"); -+ DBC_Assert(0); -+ } else if (down_interruptible(&pCSObj->sem)) { -+ GT_1trace(SYNC_debugMask, GT_7CLASS, -+ "CS interrupted by signal! " -+ "pCSObj=0x%X", pCSObj); -+ status = DSP_EFAIL; -+ } -+ } else if (MEM_IsValidHandle(hCSObj, SIGNATUREDPCCS)) { -+ struct SYNC_DPCCSOBJECT *pDPCCSObj = -+ (struct SYNC_DPCCSOBJECT *)hCSObj; -+ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_EnterCS DPC\n"); -+ spin_lock_bh(&pDPCCSObj->sync_dpccs_lock); -+ pDPCCSObj->count--; -+ if (pDPCCSObj->count != 0) { -+ /* FATAL ERROR : Failed to acquire DPC CS */ -+ GT_2trace(SYNC_debugMask, GT_7CLASS, -+ "SYNC_EnterCS DPCCS %x locked," -+ "count %d", pDPCCSObj, pDPCCSObj->count); -+ spin_unlock_bh(&pDPCCSObj->sync_dpccs_lock); -+ DBC_Assert(0); -+ } -+ } else { -+ status = DSP_EHANDLE; -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_EnterCS: invalid hCSObj " -+ "handle 0x%x\n", hCSObj); -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== SYNC_InitializeCS ======== -+ */ -+DSP_STATUS SYNC_InitializeCS(OUT struct SYNC_CSOBJECT **phCSObj) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_CSOBJECT *pCSObj = NULL; -+ -+ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_InitializeCS\n"); -+ -+ /* Allocate memory for sync CS object */ -+ MEM_AllocObject(pCSObj, struct SYNC_CSOBJECT, SIGNATURECS); -+ if (pCSObj != NULL) { -+ init_MUTEX(&pCSObj->sem); -+ } else { -+ status = DSP_EMEMORY; -+ GT_0trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_InitializeCS: MEM_AllocObject" -+ "failed\n"); -+ } -+ /* return CS object */ -+ *phCSObj = pCSObj; -+ DBC_Assert(DSP_FAILED(status) || (pCSObj)); -+ return status; -+} -+ -+DSP_STATUS SYNC_InitializeDPCCS(OUT struct SYNC_CSOBJECT **phCSObj) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_DPCCSOBJECT *pCSObj = NULL; -+ -+ DBC_Require(phCSObj); -+ -+ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_InitializeDPCCS\n"); -+ -+ if (phCSObj) { -+ /* Allocate memory for sync CS object */ -+ MEM_AllocObject(pCSObj, struct SYNC_DPCCSOBJECT, -+ SIGNATUREDPCCS); -+ if (pCSObj != NULL) { -+ pCSObj->count = 1; -+ spin_lock_init(&pCSObj->sync_dpccs_lock); -+ } else { -+ status = DSP_EMEMORY; -+ GT_0trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_InitializeDPCCS: " -+ "MEM_AllocObject failed\n"); -+ } -+ -+ /* return CS object */ -+ *phCSObj = (struct SYNC_CSOBJECT *)pCSObj; -+ } else { -+ status = DSP_EPOINTER; -+ } -+ -+ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_InitializeDPCCS " -+ "pCSObj %p\n", pCSObj); -+ DBC_Assert(DSP_FAILED(status) || (pCSObj)); -+ -+ return status; -+} -+ -+/* -+ * ======== SYNC_LeaveCS ======== -+ */ -+DSP_STATUS SYNC_LeaveCS(struct SYNC_CSOBJECT *hCSObj) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct SYNC_CSOBJECT *pCSObj = (struct SYNC_CSOBJECT *)hCSObj; -+ -+ GT_1trace(SYNC_debugMask, GT_ENTER, "SYNC_LeaveCS: hCSObj %p\n", -+ hCSObj); -+ -+ if (MEM_IsValidHandle(hCSObj, SIGNATURECS)) { -+ up(&pCSObj->sem); -+ } else if (MEM_IsValidHandle(hCSObj, SIGNATUREDPCCS)) { -+ struct SYNC_DPCCSOBJECT *pDPCCSObj = -+ (struct SYNC_DPCCSOBJECT *)hCSObj; -+ pDPCCSObj->count++; -+ if (pDPCCSObj->count != 1) { -+ /* FATAL ERROR : Invalid DPC CS count */ -+ GT_2trace(SYNC_debugMask, GT_7CLASS, -+ "SYNC_LeaveCS DPCCS %x, " -+ "Invalid count %d", pDPCCSObj, -+ pDPCCSObj->count); -+ spin_unlock_bh(&pDPCCSObj->sync_dpccs_lock); -+ DBC_Assert(0); -+ spin_lock_bh(&pDPCCSObj->sync_dpccs_lock); -+ } -+ spin_unlock_bh(&pDPCCSObj->sync_dpccs_lock); -+ GT_0trace(SYNC_debugMask, GT_ENTER, "SYNC_LeaveCS DPC\n"); -+ } else { -+ status = DSP_EHANDLE; -+ GT_1trace(SYNC_debugMask, GT_6CLASS, -+ "SYNC_LeaveCS: invalid hCSObj " -+ "handle 0x%x\n", hCSObj); -+ } -+ -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_cmm.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_cmm.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_cmm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_cmm.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,59 @@ -+/* -+ * _cmm.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _cmm.h ======== -+ * Description: -+ * Private header file defining CMM manager objects and defines needed -+ * by IO manager to register shared memory regions when DSP base image -+ * is loaded(WMD_IO_OnLoaded). -+ * -+ * Public Functions: -+ * None. -+ * -+ * Notes: -+ * -+ *! Revision History: -+ *! ================ -+ *! 24-Aug-2001 ag Created. -+ */ -+ -+#ifndef _CMM_ -+#define _CMM_ -+ -+/* -+ * These target side symbols define the beginning and ending addresses -+ * of the section of shared memory used for shared memory manager CMM. -+ * They are defined in the *cfg.cmd file by cdb code. -+ */ -+#define SHM0_SHARED_BASE_SYM "_SHM0_BEG" -+#define SHM0_SHARED_END_SYM "_SHM0_END" -+#define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT" -+ -+/* -+ * Shared Memory Region #0(SHMSEG0) is used in the following way: -+ * -+ * |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END) -+ * V V V -+ * ------------------------------------------------------------ -+ * | DSP-side allocations | GPP-side allocations | -+ * ------------------------------------------------------------ -+ * -+ * -+ */ -+ -+#endif /* _CMM_ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_deh.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_deh.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_deh.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_deh.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,46 @@ -+/* -+ * _deh.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _deh.h ======== -+ * Description: -+ * Private header for DEH module. -+ * -+ *! Revision History: -+ *! ================ -+ *! 21-Sep-2001 kc: created. -+ */ -+ -+#ifndef _DEH_ -+#define _DEH_ -+ -+#include -+#include -+#include -+ -+#define SIGNATURE 0x5f484544 /* "DEH_" backwards */ -+ -+/* DEH Manager: only one created per board: */ -+struct DEH_MGR { -+ u32 dwSignature; /* Used for object validation. */ -+ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD device context. */ -+ struct NTFY_OBJECT *hNtfy; /* NTFY object */ -+ struct DPC_OBJECT *hMmuFaultDpc; /* DPC object handle. */ -+ struct DSP_ERRORINFO errInfo; /* DSP exception info. */ -+} ; -+ -+#endif /* _DEH_ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/chnl_sm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/chnl_sm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/chnl_sm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/chnl_sm.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,1100 @@ -+/* -+ * chnl_sm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== chnl_sm.c ======== -+ * Description: -+ * Implements upper edge functions for WMD channel module. -+ * -+ * Public Functions: -+ * WMD_CHNL_AddIOReq -+ * WMD_CHNL_CancelIO -+ * WMD_CHNL_Close -+ * WMD_CHNL_Create -+ * WMD_CHNL_Destroy -+ * WMD_CHNL_FlushIO -+ * WMD_CHNL_GetInfo -+ * WMD_CHNL_GetIOC -+ * WMD_CHNL_GetMgrInfo -+ * WMD_CHNL_Idle -+ * WMD_CHNL_Open -+ * -+ * Notes: -+ * The lower edge functions must be implemented by the WMD writer, and -+ * are declared in chnl_sm.h. -+ * -+ * Care is taken in this code to prevent simulataneous access to channel -+ * queues from -+ * 1. Threads. -+ * 2. IO_DPC(), scheduled from the IO_ISR() as an event. -+ * -+ * This is done primarily by: -+ * - Semaphores. -+ * - state flags in the channel object; and -+ * - ensuring the IO_Dispatch() routine, which is called from both -+ * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered. -+ * -+ * Channel Invariant: -+ * There is an important invariant condition which must be maintained per -+ * channel outside of WMD_CHNL_GetIOC() and IO_Dispatch(), violation of -+ * which may cause timeouts and/or failure offunction SYNC_WaitOnEvent. -+ * This invariant condition is: -+ * -+ * LST_Empty(pChnl->pIOCompletions) ==> pChnl->hSyncEvent is reset -+ * and -+ * !LST_Empty(pChnl->pIOCompletions) ==> pChnl->hSyncEvent is set. -+ * -+ *! Revision History: -+ *! ================ -+ *! 10-Feb-2004 sb: Consolidated the MAILBOX_IRQ macro at the top of the file. -+ *! 05-Jan-2004 vp: Updated for 2.6 kernel on 24xx platform. -+ *! 23-Apr-2003 sb: Fixed mailbox deadlock -+ *! 24-Feb-2003 vp: Code Review Updates. -+ *! 18-Oct-2002 vp: Ported to Linux platform -+ *! 29-Aug-2002 rr Changed the SYNC error code return to DSP error code return -+ * in WMD_CHNL_GetIOC. -+ *! 22-Jan-2002 ag Zero-copy support added. -+ *! CMM_CallocBuf() used for SM allocations. -+ *! 04-Feb-2001 ag DSP-DMA support added. -+ *! 22-Nov-2000 kc: Updated usage of PERF_RegisterStat. -+ *! 06-Nov-2000 jeh Move ISR_Install, DPC_Create from CHNL_Create to IO_Create. -+ *! 13-Oct-2000 jeh Added dwArg parameter to WMD_CHNL_AddIOReq(), added -+ *! WMD_CHNL_Idle and WMD_CHNL_RegisterNotify for DSPStream. -+ *! Remove #ifdef DEBUG from around channel cIOCs field. -+ *! 21-Sep-2000 rr: PreOMAP chnl class library acts like a IO class library. -+ *! 25-Sep-2000 ag: MEM_[Unmap]LinearAddress added for #ifdef CHNL_PREOMAP. -+ *! 07-Sep-2000 rr: Added new channel class for PreOMAP. -+ *! 11-Jul-2000 jeh Allow NULL user event in WMD_CHNL_Open(). -+ *! 06-Jul-2000 rr: Changed prefix PROC to PRCS for process module calls. -+ *! 20-Jan-2000 ag: Incorporated code review comments. -+ *! 05-Jan-2000 ag: Text format cleanup. -+ *! 07-Dec-1999 ag: Now setting ChnlMgr fSharedIRQ flag before ISR_Install(). -+ *! 01-Dec-1999 ag: WMD_CHNL_Open() now accepts named sync event. -+ *! 14-Nov-1999 ag: DPC_Schedule() uncommented. -+ *! 28-Oct-1999 ag: CHNL Attrs userEvent not supported. -+ *! SM addrs taken from COFF(IO) or host resource(SM). -+ *! 25-May-1999 jg: CHNL_IOCLASS boards now get their shared memory buffer -+ *! address and length from symbols defined in the currently -+ *! loaded COFF file. See _chn_sm.h. -+ *! 18-Jun-1997 gp: Moved waiting back to ring 0 to improve performance. -+ *! 22-Jan-1998 gp: Update User's pIOC struct in GetIOC at lower IRQL (NT). -+ *! 16-Jan-1998 gp: Commented out PERF stuff, since it is not all there in NT. -+ *! 13-Jan-1998 gp: Protect IOCTLs from IO_DPC by raising IRQL to DIRQL (NT). -+ *! 22-Oct-1997 gp: Call SYNC_OpenEvent in CHNL_Open, for NT support. -+ *! 18-Jun-1997 gp: Moved waiting back to ring 0 to improve performance. -+ *! 16-Jun-1997 gp: Added call into lower edge CHNL function to allow override -+ *! of the SHM window length reported by Windows CM. -+ *! 05-Jun-1997 gp: Removed unnecessary critical sections. -+ *! 18-Mar-1997 gp: Ensured CHNL_FlushIO on input leaves channel in READY state. -+ *! 06-Jan-1997 gp: ifdefed to support the IO variant of SHM channel class lib. -+ *! 21-Jan-1997 gp: CHNL_Close: set pChnl = NULL for DBC_Ensure(). -+ *! 14-Jan-1997 gp: Updated based on code review feedback. -+ *! 03-Jan-1997 gp: Added CHNL_E_WAITTIMEOUT error return code to CHNL_FlushIO() -+ *! 23-Oct-1996 gp: Tag channel with ring 0 process handle. -+ *! 13-Sep-1996 gp: Added performance statistics for channel. -+ *! 09-Sep-1996 gp: Added WMD_CHNL_GetMgrInfo(). -+ *! 04-Sep-1996 gp: Removed shared memory control struct offset: made zero. -+ *! 01-Aug-1996 gp: Implemented basic channel manager and channel create/delete. -+ *! 17-Jul-1996 gp: Started pseudo coding. -+ *! 11-Jul-1996 gp: Stubbed out. -+ */ -+ -+/* ----------------------------------- OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Mini-Driver */ -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- Define for This */ -+#define USERMODE_ADDR PAGE_OFFSET -+ -+#define MAILBOX_IRQ INT_MAIL_MPU_IRQ -+ -+/* ----------------------------------- Function Prototypes */ -+static struct LST_LIST *CreateChirpList(u32 uChirps); -+ -+static void FreeChirpList(struct LST_LIST *pList); -+ -+static struct CHNL_IRP *MakeNewChirp(void); -+ -+static DSP_STATUS SearchFreeChannel(struct CHNL_MGR *pChnlMgr, -+ OUT u32 *pdwChnl); -+ -+/* -+ * ======== WMD_CHNL_AddIOReq ======== -+ * Enqueue an I/O request for data transfer on a channel to the DSP. -+ * The direction (mode) is specified in the channel object. Note the DSP -+ * address is specified for channels opened in direct I/O mode. -+ */ -+DSP_STATUS WMD_CHNL_AddIOReq(struct CHNL_OBJECT *hChnl, void *pHostBuf, -+ u32 cBytes, u32 cBufSize, -+ OPTIONAL u32 dwDspAddr, u32 dwArg) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; -+ struct CHNL_IRP *pChirp = NULL; -+ u32 dwState; -+ bool fIsEOS; -+ struct CHNL_MGR *pChnlMgr = pChnl->pChnlMgr; -+ u8 *pHostSysBuf = NULL; -+ bool fSchedDPC = false; -+ u16 wMbVal = 0; -+ -+ DBG_Trace(DBG_ENTER, -+ "> WMD_CHNL_AddIOReq pChnl %p CHNL_IsOutput %x uChnlType " -+ "%x Id %d\n", pChnl, CHNL_IsOutput(pChnl->uMode), -+ pChnl->uChnlType, pChnl->uId); -+ -+ fIsEOS = (cBytes == 0) ? true : false; -+ -+ if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1 && pHostBuf) { -+ if (!(pHostBuf < (void *)USERMODE_ADDR)) { -+ pHostSysBuf = pHostBuf; -+ goto func_cont; -+ } -+ /* if addr in user mode, then copy to kernel space */ -+ pHostSysBuf = MEM_Alloc(cBufSize, MEM_NONPAGED); -+ if (pHostSysBuf == NULL) { -+ status = DSP_EMEMORY; -+ DBG_Trace(DBG_LEVEL7, -+ "No memory to allocate kernel buffer\n"); -+ goto func_cont; -+ } -+ if (CHNL_IsOutput(pChnl->uMode)) { -+ status = copy_from_user(pHostSysBuf, pHostBuf, -+ cBufSize); -+ if (status) { -+ DBG_Trace(DBG_LEVEL7, -+ "Error copying user buffer to " -+ "kernel, %d bytes remaining.\n", -+ status); -+ MEM_Free(pHostSysBuf); -+ pHostSysBuf = NULL; -+ status = DSP_EPOINTER; -+ } -+ } -+ } -+func_cont: -+ /* Validate args: */ -+ if (pHostBuf == NULL) { -+ status = DSP_EPOINTER; -+ } else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else if (fIsEOS && CHNL_IsInput(pChnl->uMode)) { -+ status = CHNL_E_NOEOS; -+ } else { -+ /* Check the channel state: only queue chirp if channel state -+ * allows */ -+ dwState = pChnl->dwState; -+ if (dwState != CHNL_STATEREADY) { -+ if (dwState & CHNL_STATECANCEL) { -+ status = CHNL_E_CANCELLED; -+ } else if ((dwState & CHNL_STATEEOS) -+ && CHNL_IsOutput(pChnl->uMode)) { -+ status = CHNL_E_EOS; -+ } else { -+ /* No other possible states left: */ -+ DBC_Assert(0); -+ } -+ } -+ } -+ /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY -+ * channels. DPCCS is held to avoid race conditions with PCPY channels. -+ * If DPC is scheduled in process context (IO_Schedule) and any -+ * non-mailbox interrupt occurs, that DPC will run and break CS. Hence -+ * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */ -+ SYNC_EnterCS(pChnlMgr->hCSObj); -+ disable_irq(MAILBOX_IRQ); -+ if (pChnl->uChnlType == CHNL_PCPY) { -+ /* This is a processor-copy channel. */ -+ if (DSP_SUCCEEDED(status) && CHNL_IsOutput(pChnl->uMode)) { -+ /* Check buffer size on output channels for fit. */ -+ if (cBytes > IO_BufSize(pChnl->pChnlMgr->hIOMgr)) -+ status = CHNL_E_BUFSIZE; -+ -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Get a free chirp: */ -+ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pFreeList); -+ if (pChirp == NULL) -+ status = CHNL_E_NOIORPS; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Enqueue the chirp on the chnl's IORequest queue: */ -+ pChirp->pHostUserBuf = pChirp->pHostSysBuf = pHostBuf; -+ if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1) -+ pChirp->pHostSysBuf = pHostSysBuf; -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Note: for dma chans dwDspAddr contains dsp address -+ * of SM buffer.*/ -+ DBC_Assert(pChnlMgr->uWordSize != 0); -+ /* DSP address */ -+ pChirp->uDspAddr = dwDspAddr / pChnlMgr->uWordSize; -+ pChirp->cBytes = cBytes; -+ pChirp->cBufSize = cBufSize; -+ /* Only valid for output channel */ -+ pChirp->dwArg = dwArg; -+ pChirp->status = (fIsEOS ? CHNL_IOCSTATEOS : -+ CHNL_IOCSTATCOMPLETE); -+ LST_PutTail(pChnl->pIORequests, (struct LST_ELEM *) -+ pChirp); -+ pChnl->cIOReqs++; -+ DBC_Assert(pChnl->cIOReqs <= pChnl->cChirps); -+ /* If end of stream, update the channel state to prevent -+ * more IOR's: */ -+ if (fIsEOS) -+ pChnl->dwState |= CHNL_STATEEOS; -+ -+ { -+ /* Legacy DSM Processor-Copy */ -+ DBC_Assert(pChnl->uChnlType == CHNL_PCPY); -+ /* Request IO from the DSP */ -+ IO_RequestChnl(pChnlMgr->hIOMgr, pChnl, -+ (CHNL_IsInput(pChnl->uMode) ? -+ IO_INPUT : IO_OUTPUT), &wMbVal); -+ fSchedDPC = true; -+ } -+ } -+ } -+ enable_irq(MAILBOX_IRQ); -+ SYNC_LeaveCS(pChnlMgr->hCSObj); -+ if (wMbVal != 0) -+ IO_IntrDSP2(pChnlMgr->hIOMgr, wMbVal); -+ -+ if (fSchedDPC == true) { -+ /* Schedule a DPC, to do the actual data transfer: */ -+ IO_Schedule(pChnlMgr->hIOMgr); -+ } -+ DBG_Trace(DBG_ENTER, "< WMD_CHNL_AddIOReq pChnl %p\n", pChnl); -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_CancelIO ======== -+ * Return all I/O requests to the client which have not yet been -+ * transferred. The channel's I/O completion object is -+ * signalled, and all the I/O requests are queued as IOC's, with the -+ * status field set to CHNL_IOCSTATCANCEL. -+ * This call is typically used in abort situations, and is a prelude to -+ * CHNL_Close(); -+ */ -+DSP_STATUS WMD_CHNL_CancelIO(struct CHNL_OBJECT *hChnl) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; -+ u32 iChnl = -1; -+ CHNL_MODE uMode; -+ struct CHNL_IRP *pChirp; -+ struct CHNL_MGR *pChnlMgr = NULL; -+ -+ /* Check args: */ -+ if (MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { -+ iChnl = pChnl->uId; -+ uMode = pChnl->uMode; -+ pChnlMgr = pChnl->pChnlMgr; -+ } else { -+ status = DSP_EHANDLE; -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ /* Mark this channel as cancelled, to prevent further IORequests or -+ * IORequests or dispatching. */ -+ SYNC_EnterCS(pChnlMgr->hCSObj); -+ pChnl->dwState |= CHNL_STATECANCEL; -+ if (LST_IsEmpty(pChnl->pIORequests)) -+ goto func_cont; -+ -+ if (pChnl->uChnlType == CHNL_PCPY) { -+ /* Indicate we have no more buffers available for transfer: */ -+ if (CHNL_IsInput(pChnl->uMode)) { -+ IO_CancelChnl(pChnlMgr->hIOMgr, iChnl); -+ } else { -+ /* Record that we no longer have output buffers -+ * available: */ -+ pChnlMgr->dwOutputMask &= ~(1 << iChnl); -+ } -+ } -+ /* Move all IOR's to IOC queue: */ -+ while (!LST_IsEmpty(pChnl->pIORequests)) { -+ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIORequests); -+ if (pChirp) { -+ pChirp->cBytes = 0; -+ pChirp->status |= CHNL_IOCSTATCANCEL; -+ LST_PutTail(pChnl->pIOCompletions, -+ (struct LST_ELEM *)pChirp); -+ pChnl->cIOCs++; -+ pChnl->cIOReqs--; -+ DBC_Assert(pChnl->cIOReqs >= 0); -+ } -+ } -+func_cont: -+ SYNC_LeaveCS(pChnlMgr->hCSObj); -+func_end: -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_Close ======== -+ * Purpose: -+ * Ensures all pending I/O on this channel is cancelled, discards all -+ * queued I/O completion notifications, then frees the resources allocated -+ * for this channel, and makes the corresponding logical channel id -+ * available for subsequent use. -+ */ -+DSP_STATUS WMD_CHNL_Close(struct CHNL_OBJECT *hChnl) -+{ -+ DSP_STATUS status; -+ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; -+ -+ /* Check args: */ -+ if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ goto func_cont; -+ } -+ { -+ /* Cancel IO: this ensures no further IO requests or -+ * notifications.*/ -+ status = WMD_CHNL_CancelIO(hChnl); -+ } -+func_cont: -+ if (DSP_SUCCEEDED(status)) { -+ /* Assert I/O on this channel is now cancelled: Protects -+ * from IO_DPC. */ -+ DBC_Assert((pChnl->dwState & CHNL_STATECANCEL)); -+ /* Invalidate channel object: Protects from -+ * CHNL_GetIOCompletion(). */ -+ pChnl->dwSignature = 0x0000; -+ /* Free the slot in the channel manager: */ -+ pChnl->pChnlMgr->apChannel[pChnl->uId] = NULL; -+ pChnl->pChnlMgr->cOpenChannels -= 1; -+ if (pChnl->hNtfy) { -+ NTFY_Delete(pChnl->hNtfy); -+ pChnl->hNtfy = NULL; -+ } -+ /* Reset channel event: (NOTE: hUserEvent freed in user -+ * context.). */ -+ if (pChnl->hSyncEvent) { -+ SYNC_ResetEvent(pChnl->hSyncEvent); -+ SYNC_CloseEvent(pChnl->hSyncEvent); -+ pChnl->hSyncEvent = NULL; -+ } -+ /* Free I/O request and I/O completion queues: */ -+ if (pChnl->pIOCompletions) { -+ FreeChirpList(pChnl->pIOCompletions); -+ pChnl->pIOCompletions = NULL; -+ pChnl->cIOCs = 0; -+ } -+ if (pChnl->pIORequests) { -+ FreeChirpList(pChnl->pIORequests); -+ pChnl->pIORequests = NULL; -+ pChnl->cIOReqs = 0; -+ } -+ if (pChnl->pFreeList) { -+ FreeChirpList(pChnl->pFreeList); -+ pChnl->pFreeList = NULL; -+ } -+ /* Release channel object. */ -+ MEM_FreeObject(pChnl); -+ pChnl = NULL; -+ } -+ DBC_Ensure(DSP_FAILED(status) || -+ !MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)); -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_Create ======== -+ * Create a channel manager object, responsible for opening new channels -+ * and closing old ones for a given board. -+ */ -+DSP_STATUS WMD_CHNL_Create(OUT struct CHNL_MGR **phChnlMgr, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct CHNL_MGRATTRS *pMgrAttrs) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_MGR *pChnlMgr = NULL; -+ s32 cChannels; -+#ifdef DEBUG -+ struct CHNL_MGR *hChnlMgr; -+#endif -+ /* Check DBC requirements: */ -+ DBC_Require(phChnlMgr != NULL); -+ DBC_Require(pMgrAttrs != NULL); -+ DBC_Require(pMgrAttrs->cChannels > 0); -+ DBC_Require(pMgrAttrs->cChannels <= CHNL_MAXCHANNELS); -+ DBC_Require(pMgrAttrs->uWordSize != 0); -+#ifdef DEBUG -+ /* This for the purposes of DBC_Require: */ -+ status = DEV_GetChnlMgr(hDevObject, &hChnlMgr); -+ DBC_Require(status != DSP_EHANDLE); -+ DBC_Require(hChnlMgr == NULL); -+#endif -+ if (DSP_SUCCEEDED(status)) { -+ /* Allocate channel manager object: */ -+ MEM_AllocObject(pChnlMgr, struct CHNL_MGR, CHNL_MGRSIGNATURE); -+ if (pChnlMgr) { -+ /* The cChannels attr must equal the # of supported -+ * chnls for each transport(# chnls for PCPY = DDMA = -+ * ZCPY): i.e. pMgrAttrs->cChannels = CHNL_MAXCHANNELS = -+ * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS. */ -+ DBC_Assert(pMgrAttrs->cChannels == CHNL_MAXCHANNELS); -+ cChannels = (CHNL_MAXCHANNELS + (CHNL_MAXCHANNELS * -+ CHNL_PCPY)); -+ /* Create array of channels: */ -+ pChnlMgr->apChannel = MEM_Calloc( -+ sizeof(struct CHNL_OBJECT *) * -+ cChannels, MEM_NONPAGED); -+ if (pChnlMgr->apChannel) { -+ /* Initialize CHNL_MGR object: */ -+ /* Shared memory driver. */ -+ pChnlMgr->dwType = CHNL_TYPESM; -+ pChnlMgr->uWordSize = pMgrAttrs->uWordSize; -+ /* total # chnls supported */ -+ pChnlMgr->cChannels = cChannels; -+ pChnlMgr->cOpenChannels = 0; -+ pChnlMgr->dwOutputMask = 0; -+ pChnlMgr->dwLastOutput = 0; -+ pChnlMgr->hDevObject = hDevObject; -+ if (DSP_SUCCEEDED(status)) { -+ status = SYNC_InitializeDPCCS -+ (&pChnlMgr->hCSObj); -+ } -+ } else { -+ status = DSP_EMEMORY; -+ } -+ } else { -+ status = DSP_EMEMORY; -+ } -+ } -+ if (DSP_FAILED(status)) { -+ WMD_CHNL_Destroy(pChnlMgr); -+ *phChnlMgr = NULL; -+ } else { -+ /* Return channel manager object to caller... */ -+ *phChnlMgr = pChnlMgr; -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_Destroy ======== -+ * Purpose: -+ * Close all open channels, and destroy the channel manager. -+ */ -+DSP_STATUS WMD_CHNL_Destroy(struct CHNL_MGR *hChnlMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_MGR *pChnlMgr = hChnlMgr; -+ u32 iChnl; -+ -+ if (MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) { -+ /* Close all open channels: */ -+ for (iChnl = 0; iChnl < pChnlMgr->cChannels; iChnl++) { -+ if (DSP_SUCCEEDED -+ (WMD_CHNL_Close(pChnlMgr->apChannel[iChnl]))) { -+ DBC_Assert(pChnlMgr->apChannel[iChnl] == NULL); -+ } -+ } -+ /* release critical section */ -+ if (pChnlMgr->hCSObj) -+ SYNC_DeleteCS(pChnlMgr->hCSObj); -+ -+ /* Free channel manager object: */ -+ if (pChnlMgr->apChannel) -+ MEM_Free(pChnlMgr->apChannel); -+ -+ /* Set hChnlMgr to NULL in device object. */ -+ DEV_SetChnlMgr(pChnlMgr->hDevObject, NULL); -+ /* Free this Chnl Mgr object: */ -+ MEM_FreeObject(hChnlMgr); -+ } else { -+ status = DSP_EHANDLE; -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_FlushIO ======== -+ * purpose: -+ * Flushes all the outstanding data requests on a channel. -+ */ -+DSP_STATUS WMD_CHNL_FlushIO(struct CHNL_OBJECT *hChnl, u32 dwTimeOut) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; -+ CHNL_MODE uMode = -1; -+ struct CHNL_MGR *pChnlMgr; -+ struct CHNL_IOC chnlIOC; -+ /* Check args: */ -+ if (MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { -+ if ((dwTimeOut == CHNL_IOCNOWAIT) -+ && CHNL_IsOutput(pChnl->uMode)) { -+ status = DSP_EINVALIDARG; -+ } else { -+ uMode = pChnl->uMode; -+ pChnlMgr = pChnl->pChnlMgr; -+ } -+ } else { -+ status = DSP_EHANDLE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Note: Currently, if another thread continues to add IO -+ * requests to this channel, this function will continue to -+ * flush all such queued IO requests. */ -+ if (CHNL_IsOutput(uMode) && (pChnl->uChnlType == CHNL_PCPY)) { -+ /* Wait for IO completions, up to the specified -+ * timeout: */ -+ while (!LST_IsEmpty(pChnl->pIORequests) && -+ DSP_SUCCEEDED(status)) { -+ status = WMD_CHNL_GetIOC(hChnl, dwTimeOut, -+ &chnlIOC); -+ if (DSP_FAILED(status)) -+ continue; -+ -+ if (chnlIOC.status & CHNL_IOCSTATTIMEOUT) -+ status = CHNL_E_WAITTIMEOUT; -+ -+ } -+ } else { -+ status = WMD_CHNL_CancelIO(hChnl); -+ /* Now, leave the channel in the ready state: */ -+ pChnl->dwState &= ~CHNL_STATECANCEL; -+ } -+ } -+ DBC_Ensure(DSP_FAILED(status) || LST_IsEmpty(pChnl->pIORequests)); -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_GetInfo ======== -+ * Purpose: -+ * Retrieve information related to a channel. -+ */ -+DSP_STATUS WMD_CHNL_GetInfo(struct CHNL_OBJECT *hChnl, -+ OUT struct CHNL_INFO *pInfo) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; -+ if (pInfo != NULL) { -+ if (MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { -+ /* Return the requested information: */ -+ pInfo->hChnlMgr = pChnl->pChnlMgr; -+ pInfo->hEvent = pChnl->hUserEvent; -+ pInfo->dwID = pChnl->uId; -+ pInfo->dwMode = pChnl->uMode; -+ pInfo->cPosition = pChnl->cBytesMoved; -+ pInfo->hProcess = pChnl->hProcess; -+ pInfo->hSyncEvent = pChnl->hSyncEvent; -+ pInfo->cIOCs = pChnl->cIOCs; -+ pInfo->cIOReqs = pChnl->cIOReqs; -+ pInfo->dwState = pChnl->dwState; -+ } else { -+ status = DSP_EHANDLE; -+ } -+ } else { -+ status = DSP_EPOINTER; -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_GetIOC ======== -+ * Optionally wait for I/O completion on a channel. Dequeue an I/O -+ * completion record, which contains information about the completed -+ * I/O request. -+ * Note: Ensures Channel Invariant (see notes above). -+ */ -+DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, u32 dwTimeOut, -+ OUT struct CHNL_IOC *pIOC) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; -+ struct CHNL_IRP *pChirp; -+ DSP_STATUS statSync; -+ bool fDequeueIOC = true; -+ struct CHNL_IOC ioc = { NULL, 0, 0, 0, 0 }; -+ u8 *pHostSysBuf = NULL; -+ -+ DBG_Trace(DBG_ENTER, "> WMD_CHNL_GetIOC pChnl %p CHNL_IsOutput %x " -+ "uChnlType %x\n", pChnl, CHNL_IsOutput(pChnl->uMode), -+ pChnl->uChnlType); -+ /* Check args: */ -+ if (pIOC == NULL) { -+ status = DSP_EPOINTER; -+ } else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else if (dwTimeOut == CHNL_IOCNOWAIT) { -+ if (LST_IsEmpty(pChnl->pIOCompletions)) -+ status = CHNL_E_NOIOC; -+ -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ ioc.status = CHNL_IOCSTATCOMPLETE; -+ if (dwTimeOut != CHNL_IOCNOWAIT && LST_IsEmpty(pChnl->pIOCompletions)) { -+ if (dwTimeOut == CHNL_IOCINFINITE) -+ dwTimeOut = SYNC_INFINITE; -+ -+ statSync = SYNC_WaitOnEvent(pChnl->hSyncEvent, dwTimeOut); -+ if (statSync == DSP_ETIMEOUT) { -+ /* No response from DSP */ -+ ioc.status |= CHNL_IOCSTATTIMEOUT; -+ fDequeueIOC = false; -+ } else if (statSync == DSP_EFAIL) { -+ /* This can occur when the user mode thread is -+ * aborted (^C), or when _VWIN32_WaitSingleObject() -+ * fails due to unkown causes. */ -+ /* Even though Wait failed, there may be something in -+ * the Q: */ -+ if (LST_IsEmpty(pChnl->pIOCompletions)) { -+ ioc.status |= CHNL_IOCSTATCANCEL; -+ fDequeueIOC = false; -+ } -+ } -+ } -+ /* See comment in AddIOReq */ -+ SYNC_EnterCS(pChnl->pChnlMgr->hCSObj); -+ disable_irq(MAILBOX_IRQ); -+ if (fDequeueIOC) { -+ /* Dequeue IOC and set pIOC; */ -+ DBC_Assert(!LST_IsEmpty(pChnl->pIOCompletions)); -+ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIOCompletions); -+ /* Update pIOC from channel state and chirp: */ -+ if (pChirp) { -+ pChnl->cIOCs--; -+ /* If this is a zero-copy channel, then set IOC's pBuf -+ * to the DSP's address. This DSP address will get -+ * translated to user's virtual addr later. */ -+ { -+ pHostSysBuf = pChirp->pHostSysBuf; -+ ioc.pBuf = pChirp->pHostUserBuf; -+ } -+ ioc.cBytes = pChirp->cBytes; -+ ioc.cBufSize = pChirp->cBufSize; -+ ioc.dwArg = pChirp->dwArg; -+ ioc.status |= pChirp->status; -+ /* Place the used chirp on the free list: */ -+ LST_PutTail(pChnl->pFreeList, (struct LST_ELEM *) -+ pChirp); -+ } else { -+ ioc.pBuf = NULL; -+ ioc.cBytes = 0; -+ } -+ } else { -+ ioc.pBuf = NULL; -+ ioc.cBytes = 0; -+ ioc.dwArg = 0; -+ ioc.cBufSize = 0; -+ } -+ /* Ensure invariant: If any IOC's are queued for this channel... */ -+ if (!LST_IsEmpty(pChnl->pIOCompletions)) { -+ /* Since DSPStream_Reclaim() does not take a timeout -+ * parameter, we pass the stream's timeout value to -+ * WMD_CHNL_GetIOC. We cannot determine whether or not -+ * we have waited in User mode. Since the stream's timeout -+ * value may be non-zero, we still have to set the event. -+ * Therefore, this optimization is taken out. -+ * -+ * if (dwTimeOut == CHNL_IOCNOWAIT) { -+ * ... ensure event is set.. -+ * SYNC_SetEvent(pChnl->hSyncEvent); -+ * } */ -+ SYNC_SetEvent(pChnl->hSyncEvent); -+ } else { -+ /* else, if list is empty, ensure event is reset. */ -+ SYNC_ResetEvent(pChnl->hSyncEvent); -+ } -+ enable_irq(MAILBOX_IRQ); -+ SYNC_LeaveCS(pChnl->pChnlMgr->hCSObj); -+ if (fDequeueIOC && (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)) { -+ if (!(ioc.pBuf < (void *) USERMODE_ADDR)) -+ goto func_cont; -+ -+ /* If the addr is in user mode, then copy it */ -+ if (!pHostSysBuf || !ioc.pBuf) { -+ status = DSP_EPOINTER; -+ DBG_Trace(DBG_LEVEL7, -+ "System buffer NULL in IO completion.\n"); -+ goto func_cont; -+ } -+ if (!CHNL_IsInput(pChnl->uMode)) -+ goto func_cont1; -+ -+ /*pHostUserBuf */ -+ status = copy_to_user(ioc.pBuf, pHostSysBuf, ioc.cBytes); -+#ifndef RES_CLEANUP_DISABLE -+ if (status) { -+ if (current->flags & PF_EXITING) { -+ DBG_Trace(DBG_LEVEL7, -+ "\n2current->flags == PF_EXITING, " -+ " current->flags;0x%x\n", -+ current->flags); -+ status = 0; -+ } else { -+ DBG_Trace(DBG_LEVEL7, -+ "\n2current->flags != PF_EXITING, " -+ " current->flags;0x%x\n", -+ current->flags); -+ } -+ } -+#endif -+ if (status) { -+ DBG_Trace(DBG_LEVEL7, -+ "Error copying kernel buffer to user, %d" -+ " bytes remaining. in_interupt %d\n", -+ status, in_interrupt()); -+ status = DSP_EPOINTER; -+ } -+func_cont1: -+ MEM_Free(pHostSysBuf); -+ } -+func_cont: -+ /* Update User's IOC block: */ -+ *pIOC = ioc; -+func_end: -+ DBG_Trace(DBG_ENTER, "< WMD_CHNL_GetIOC pChnl %p\n", pChnl); -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_GetMgrInfo ======== -+ * Retrieve information related to the channel manager. -+ */ -+DSP_STATUS WMD_CHNL_GetMgrInfo(struct CHNL_MGR *hChnlMgr, u32 uChnlID, -+ OUT struct CHNL_MGRINFO *pMgrInfo) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_MGR *pChnlMgr = (struct CHNL_MGR *)hChnlMgr; -+ -+ if (pMgrInfo != NULL) { -+ if (uChnlID <= CHNL_MAXCHANNELS) { -+ if (MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) { -+ /* Return the requested information: */ -+ pMgrInfo->hChnl = pChnlMgr->apChannel[uChnlID]; -+ pMgrInfo->cOpenChannels = pChnlMgr-> -+ cOpenChannels; -+ pMgrInfo->dwType = pChnlMgr->dwType; -+ /* total # of chnls */ -+ pMgrInfo->cChannels = pChnlMgr->cChannels; -+ } else { -+ status = DSP_EHANDLE; -+ } -+ } else { -+ status = CHNL_E_BADCHANID; -+ } -+ } else { -+ status = DSP_EPOINTER; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_Idle ======== -+ * Idles a particular channel. -+ */ -+DSP_STATUS WMD_CHNL_Idle(struct CHNL_OBJECT *hChnl, u32 dwTimeOut, -+ bool fFlush) -+{ -+ CHNL_MODE uMode; -+ struct CHNL_MGR *pChnlMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(MEM_IsValidHandle(hChnl, CHNL_SIGNATURE)); -+ -+ uMode = hChnl->uMode; -+ pChnlMgr = hChnl->pChnlMgr; -+ -+ if (CHNL_IsOutput(uMode) && !fFlush) { -+ /* Wait for IO completions, up to the specified timeout: */ -+ status = WMD_CHNL_FlushIO(hChnl, dwTimeOut); -+ } else { -+ status = WMD_CHNL_CancelIO(hChnl); -+ -+ /* Reset the byte count and put channel back in ready state. */ -+ hChnl->cBytesMoved = 0; -+ hChnl->dwState &= ~CHNL_STATECANCEL; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_Open ======== -+ * Open a new half-duplex channel to the DSP board. -+ */ -+DSP_STATUS WMD_CHNL_Open(OUT struct CHNL_OBJECT **phChnl, -+ struct CHNL_MGR *hChnlMgr, CHNL_MODE uMode, -+ u32 uChnlId, CONST IN struct CHNL_ATTRS *pAttrs) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct CHNL_MGR *pChnlMgr = hChnlMgr; -+ struct CHNL_OBJECT *pChnl = NULL; -+ struct SYNC_ATTRS *pSyncAttrs = NULL; -+ struct SYNC_OBJECT *hSyncEvent = NULL; -+ /* Ensure DBC requirements: */ -+ DBC_Require(phChnl != NULL); -+ DBC_Require(pAttrs != NULL); -+ *phChnl = NULL; -+ /* Validate Args: */ -+ if (pAttrs->uIOReqs == 0) { -+ status = DSP_EINVALIDARG; -+ } else { -+ if (!MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) { -+ status = DSP_EHANDLE; -+ } else { -+ if (uChnlId != CHNL_PICKFREE) { -+ if (uChnlId >= pChnlMgr->cChannels) { -+ status = CHNL_E_BADCHANID; -+ } else if (pChnlMgr->apChannel[uChnlId] != -+ NULL) { -+ status = CHNL_E_CHANBUSY; -+ } -+ } else { -+ /* Check for free channel */ -+ status = SearchFreeChannel(pChnlMgr, &uChnlId); -+ } -+ } -+ } -+ if (DSP_FAILED(status)) -+ goto func_end; -+ -+ DBC_Assert(uChnlId < pChnlMgr->cChannels); -+ /* Create channel object: */ -+ MEM_AllocObject(pChnl, struct CHNL_OBJECT, 0x0000); -+ if (!pChnl) { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ /* Protect queues from IO_DPC: */ -+ pChnl->dwState = CHNL_STATECANCEL; -+ /* Allocate initial IOR and IOC queues: */ -+ pChnl->pFreeList = CreateChirpList(pAttrs->uIOReqs); -+ pChnl->pIORequests = CreateChirpList(0); -+ pChnl->pIOCompletions = CreateChirpList(0); -+ pChnl->cChirps = pAttrs->uIOReqs; -+ pChnl->cIOCs = 0; -+ pChnl->cIOReqs = 0; -+ status = SYNC_OpenEvent(&hSyncEvent, pSyncAttrs); -+ if (DSP_SUCCEEDED(status)) { -+ status = NTFY_Create(&pChnl->hNtfy); -+ if (DSP_FAILED(status)) { -+ /* The only failure that could have occurred */ -+ status = DSP_EMEMORY; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ if (pChnl->pIOCompletions && pChnl->pIORequests && -+ pChnl->pFreeList) { -+ /* Initialize CHNL object fields: */ -+ pChnl->pChnlMgr = pChnlMgr; -+ pChnl->uId = uChnlId; -+ pChnl->uMode = uMode; -+ pChnl->hUserEvent = hSyncEvent; /* for Linux */ -+ pChnl->hSyncEvent = hSyncEvent; -+ /* Return TGID instead of process handle */ -+ pChnl->hProcess = current->tgid; -+ pChnl->pCBArg = 0; -+ pChnl->cBytesMoved = 0; -+ /* Default to proc-copy */ -+ pChnl->uChnlType = CHNL_PCPY; -+ } else { -+ status = DSP_EMEMORY; -+ } -+ } else { -+ status = DSP_EINVALIDARG; -+ } -+ if (DSP_FAILED(status)) { -+ /* Free memory */ -+ if (pChnl->pIOCompletions) { -+ FreeChirpList(pChnl->pIOCompletions); -+ pChnl->pIOCompletions = NULL; -+ pChnl->cIOCs = 0; -+ } -+ if (pChnl->pIORequests) { -+ FreeChirpList(pChnl->pIORequests); -+ pChnl->pIORequests = NULL; -+ } -+ if (pChnl->pFreeList) { -+ FreeChirpList(pChnl->pFreeList); -+ pChnl->pFreeList = NULL; -+ } -+ if (hSyncEvent) { -+ SYNC_CloseEvent(hSyncEvent); -+ hSyncEvent = NULL; -+ } -+ if (pChnl->hNtfy) { -+ NTFY_Delete(pChnl->hNtfy); -+ pChnl->hNtfy = NULL; -+ } -+ MEM_FreeObject(pChnl); -+ } -+func_cont: -+ if (DSP_SUCCEEDED(status)) { -+ /* Insert channel object in channel manager: */ -+ pChnlMgr->apChannel[pChnl->uId] = pChnl; -+ SYNC_EnterCS(pChnlMgr->hCSObj); -+ pChnlMgr->cOpenChannels++; -+ SYNC_LeaveCS(pChnlMgr->hCSObj); -+ /* Return result... */ -+ pChnl->dwSignature = CHNL_SIGNATURE; -+ pChnl->dwState = CHNL_STATEREADY; -+ *phChnl = pChnl; -+ } -+func_end: -+ DBC_Ensure((DSP_SUCCEEDED(status) && -+ MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) || -+ (*phChnl == NULL)); -+ return status; -+} -+ -+/* -+ * ======== WMD_CHNL_RegisterNotify ======== -+ * Registers for events on a particular channel. -+ */ -+DSP_STATUS WMD_CHNL_RegisterNotify(struct CHNL_OBJECT *hChnl, u32 uEventMask, -+ u32 uNotifyType, -+ struct DSP_NOTIFICATION *hNotification) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Assert(!(uEventMask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION))); -+ -+ status = NTFY_Register(hChnl->hNtfy, hNotification, uEventMask, -+ uNotifyType); -+ -+ return status; -+} -+ -+/* -+ * ======== CreateChirpList ======== -+ * Purpose: -+ * Initialize a queue of channel I/O Request/Completion packets. -+ * Parameters: -+ * uChirps: Number of Chirps to allocate. -+ * Returns: -+ * Pointer to queue of IRPs, or NULL. -+ * Requires: -+ * Ensures: -+ */ -+static struct LST_LIST *CreateChirpList(u32 uChirps) -+{ -+ struct LST_LIST *pChirpList; -+ struct CHNL_IRP *pChirp; -+ u32 i; -+ -+ pChirpList = LST_Create(); -+ -+ if (pChirpList) { -+ /* Make N chirps and place on queue. */ -+ for (i = 0; (i < uChirps) && ((pChirp = MakeNewChirp()) != -+ NULL); i++) { -+ LST_PutTail(pChirpList, (struct LST_ELEM *)pChirp); -+ } -+ -+ /* If we couldn't allocate all chirps, free those allocated: */ -+ if (i != uChirps) { -+ FreeChirpList(pChirpList); -+ pChirpList = NULL; -+ } -+ } -+ -+ return pChirpList; -+} -+ -+/* -+ * ======== FreeChirpList ======== -+ * Purpose: -+ * Free the queue of Chirps. -+ */ -+static void FreeChirpList(struct LST_LIST *pChirpList) -+{ -+ DBC_Require(pChirpList != NULL); -+ -+ while (!LST_IsEmpty(pChirpList)) -+ MEM_Free(LST_GetHead(pChirpList)); -+ -+ LST_Delete(pChirpList); -+} -+ -+/* -+ * ======== MakeNewChirp ======== -+ * Allocate the memory for a new channel IRP. -+ */ -+static struct CHNL_IRP *MakeNewChirp(void) -+{ -+ struct CHNL_IRP *pChirp; -+ -+ pChirp = (struct CHNL_IRP *)MEM_Calloc( -+ sizeof(struct CHNL_IRP), MEM_NONPAGED); -+ if (pChirp != NULL) { -+ /* LST_InitElem only resets the list's member values. */ -+ LST_InitElem(&pChirp->link); -+ } -+ -+ return pChirp; -+} -+ -+/* -+ * ======== SearchFreeChannel ======== -+ * Search for a free channel slot in the array of channel pointers. -+ */ -+static DSP_STATUS SearchFreeChannel(struct CHNL_MGR *pChnlMgr, -+ OUT u32 *pdwChnl) -+{ -+ DSP_STATUS status = CHNL_E_OUTOFSTREAMS; -+ u32 i; -+ -+ DBC_Require(MEM_IsValidHandle(pChnlMgr, CHNL_MGRSIGNATURE)); -+ -+ for (i = 0; i < pChnlMgr->cChannels; i++) { -+ if (pChnlMgr->apChannel[i] == NULL) { -+ status = DSP_SOK; -+ *pdwChnl = i; -+ break; -+ } -+ } -+ -+ return status; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/io_sm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/io_sm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/io_sm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/io_sm.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,2009 @@ -+/* -+ * io_sm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== io_sm.c ======== -+ * Description: -+ * IO dispatcher for a shared memory channel driver. -+ * -+ * Public Functions: -+ * WMD_IO_Create -+ * WMD_IO_Destroy -+ * WMD_IO_OnLoaded -+ * IO_AndSetValue -+ * IO_BufSize -+ * IO_CancelChnl -+ * IO_DPC -+ * IO_ISR -+ * IO_IVAISR -+ * IO_OrSetValue -+ * IO_ReadValue -+ * IO_ReadValueLong -+ * IO_RequestChnl -+ * IO_Schedule -+ * IO_WriteValue -+ * IO_WriteValueLong -+ * -+ * Channel Invariant: -+ * There is an important invariant condition which must be maintained per -+ * channel outside of WMD_CHNL_GetIOC() and IO_Dispatch(), violation of -+ * which may cause timeouts and/or failure of the WIN32_WaitSingleObject -+ * function (SYNC_WaitOnEvent). -+ * -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ------------------------------------ Hardware Abstraction Layer */ -+#include -+#include -+ -+/* ----------------------------------- Mini Driver */ -+#include -+#include -+#include -+#include <_tiomap.h> -+#include -+#include <_tiomap_pwr.h> -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Others */ -+#include -+#include -+#include -+#include "_cmm.h" -+ -+/* ----------------------------------- This */ -+#include -+#include "_msg_sm.h" -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define OUTPUTNOTREADY 0xffff -+#define NOTENABLED 0xffff /* channel(s) not enabled */ -+ -+#define EXTEND "_EXT_END" -+ -+#define SwapWord(x) (x) -+#define ulPageAlignSize 0x10000 /* Page Align Size */ -+ -+#define MAX_PM_REQS 32 -+ -+/* IO Manager: only one created per board: */ -+struct IO_MGR { -+ /* These four fields must be the first fields in a IO_MGR_ struct: */ -+ u32 dwSignature; /* Used for object validation */ -+ struct WMD_DEV_CONTEXT *hWmdContext; /* WMD device context */ -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD */ -+ struct DEV_OBJECT *hDevObject; /* Device this board represents */ -+ -+ /* These fields initialized in WMD_IO_Create(): */ -+ struct CHNL_MGR *hChnlMgr; -+ struct SHM *pSharedMem; /* Shared Memory control */ -+ u8 *pInput; /* Address of input channel */ -+ u8 *pOutput; /* Address of output channel */ -+ struct MSG_MGR *hMsgMgr; /* Message manager */ -+ struct MSG *pMsgInputCtrl; /* Msg control for from DSP messages */ -+ struct MSG *pMsgOutputCtrl; /* Msg control for to DSP messages */ -+ u8 *pMsgInput; /* Address of input messages */ -+ u8 *pMsgOutput; /* Address of output messages */ -+ u32 uSMBufSize; /* Size of a shared memory I/O channel */ -+ bool fSharedIRQ; /* Is this IRQ shared? */ -+ struct DPC_OBJECT *hDPC; /* DPC object handle */ -+ struct SYNC_CSOBJECT *hCSObj; /* Critical section object handle */ -+ u32 uWordSize; /* Size in bytes of DSP word */ -+ u16 wIntrVal; /* interrupt value */ -+ /* private extnd proc info; mmu setup */ -+ struct MGR_PROCESSOREXTINFO extProcInfo; -+ struct CMM_OBJECT *hCmmMgr; /* Shared Mem Mngr */ -+ struct work_struct io_workq; /*workqueue */ -+ u32 dQuePowerMbxVal[MAX_PM_REQS]; -+ u32 iQuePowerHead; -+ u32 iQuePowerTail; -+#ifndef DSP_TRACEBUF_DISABLED -+ u32 ulTraceBufferBegin; /* Trace message start address */ -+ u32 ulTraceBufferEnd; /* Trace message end address */ -+ u32 ulTraceBufferCurrent; /* Trace message current address */ -+ u32 ulGPPReadPointer; /* GPP Read pointer to Trace buffer */ -+ u8 *pMsg; -+ u32 ulGppVa; -+ u32 ulDspVa; -+#endif -+} ; -+ -+/* ----------------------------------- Function Prototypes */ -+static void IO_DispatchChnl(IN struct IO_MGR *pIOMgr, -+ IN OUT struct CHNL_OBJECT *pChnl, u32 iMode); -+static void IO_DispatchMsg(IN struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr); -+static void IO_DispatchPM(struct work_struct *work); -+static void NotifyChnlComplete(struct CHNL_OBJECT *pChnl, -+ struct CHNL_IRP *pChirp); -+static void InputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, -+ u32 iMode); -+static void OutputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, -+ u32 iMode); -+static void InputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr); -+static void OutputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr); -+static u32 FindReadyOutput(struct CHNL_MGR *pChnlMgr, -+ struct CHNL_OBJECT *pChnl, u32 dwMask); -+static u32 ReadData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, -+ void *pSrc, u32 uSize); -+static u32 WriteData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, -+ void *pSrc, u32 uSize); -+static struct workqueue_struct *bridge_workqueue; -+#ifndef DSP_TRACEBUF_DISABLED -+void PrintDSPDebugTrace(struct IO_MGR *hIOMgr); -+#endif -+ -+/* Bus Addr (cached kernel)*/ -+static DSP_STATUS registerSHMSegs(struct IO_MGR *hIOMgr, -+ struct COD_MANAGER *hCodMan, -+ u32 dwGPPBasePA); -+ -+#ifdef CONFIG_BRIDGE_DVFS -+/* The maximum number of OPPs that are supported */ -+extern s32 dsp_max_opps; -+/* The Vdd1 opp table information */ -+extern u32 vdd1_dsp_freq[6][4] ; -+#endif -+ -+#if GT_TRACE -+static struct GT_Mask dsp_trace_mask = { NULL, NULL }; /* GT trace variable */ -+#endif -+ -+/* -+ * ======== WMD_IO_Create ======== -+ * Create an IO manager object. -+ */ -+DSP_STATUS WMD_IO_Create(OUT struct IO_MGR **phIOMgr, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct IO_ATTRS *pMgrAttrs) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct IO_MGR *pIOMgr = NULL; -+ struct SHM *pSharedMem = NULL; -+ struct WMD_DEV_CONTEXT *hWmdContext = NULL; -+ struct CFG_HOSTRES hostRes; -+ struct CFG_DEVNODE *hDevNode; -+ struct CHNL_MGR *hChnlMgr; -+ static int ref_count; -+ u32 devType; -+ /* Check DBC requirements: */ -+ DBC_Require(phIOMgr != NULL); -+ DBC_Require(pMgrAttrs != NULL); -+ DBC_Require(pMgrAttrs->uWordSize != 0); -+ /* This for the purposes of DBC_Require: */ -+ status = DEV_GetChnlMgr(hDevObject, &hChnlMgr); -+ DBC_Require(status != DSP_EHANDLE); -+ DBC_Require(hChnlMgr != NULL); -+ DBC_Require(hChnlMgr->hIOMgr == NULL); -+ /* -+ * Message manager will be created when a file is loaded, since -+ * size of message buffer in shared memory is configurable in -+ * the base image. -+ */ -+ DEV_GetWMDContext(hDevObject, &hWmdContext); -+ DBC_Assert(hWmdContext); -+ DEV_GetDevType(hDevObject, &devType); -+ /* -+ * DSP shared memory area will get set properly when -+ * a program is loaded. They are unknown until a COFF file is -+ * loaded. I chose the value -1 because it was less likely to be -+ * a valid address than 0. -+ */ -+ pSharedMem = (struct SHM *) -1; -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ /* Create a Single Threaded Work Queue */ -+ if (ref_count == 0) -+ bridge_workqueue = create_workqueue("bridge_work-queue"); -+ -+ if (!bridge_workqueue) -+ DBG_Trace(DBG_LEVEL1, "Workqueue creation failed!\n"); -+ -+ /* Allocate IO manager object: */ -+ MEM_AllocObject(pIOMgr, struct IO_MGR, IO_MGRSIGNATURE); -+ if (pIOMgr == NULL) { -+ status = DSP_EMEMORY; -+ goto func_cont; -+ } -+ -+ /* Intializing Work Element */ -+ if (ref_count == 0) { -+ INIT_WORK(&pIOMgr->io_workq, (void *)IO_DispatchPM); -+ ref_count = 1; -+ } else -+ PREPARE_WORK(&pIOMgr->io_workq, (void *)IO_DispatchPM); -+ -+ /* Initialize CHNL_MGR object: */ -+#ifndef DSP_TRACEBUF_DISABLED -+ pIOMgr->pMsg = NULL; -+#endif -+ pIOMgr->hChnlMgr = hChnlMgr; -+ pIOMgr->uWordSize = pMgrAttrs->uWordSize; -+ pIOMgr->pSharedMem = pSharedMem; -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_InitializeCS(&pIOMgr->hCSObj); -+ -+ if (devType == DSP_UNIT) { -+ /* Create a DPC object: */ -+ status = DPC_Create(&pIOMgr->hDPC, IO_DPC, (void *)pIOMgr); -+ if (DSP_SUCCEEDED(status)) -+ status = DEV_GetDevNode(hDevObject, &hDevNode); -+ -+ pIOMgr->iQuePowerHead = 0; -+ pIOMgr->iQuePowerTail = 0; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = CFG_GetHostResources((struct CFG_DEVNODE *) -+ DRV_GetFirstDevExtension() , &hostRes); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pIOMgr->hWmdContext = hWmdContext; -+ pIOMgr->fSharedIRQ = pMgrAttrs->fShared; -+ IO_DisableInterrupt(hWmdContext); -+ if (devType == DSP_UNIT) { -+ HW_MBOX_initSettings(hostRes.dwMboxBase); -+ /* Plug the channel ISR:. */ -+ if ((request_irq(INT_MAIL_MPU_IRQ, IO_ISR, 0, -+ "DspBridge\tmailbox", (void *)pIOMgr)) == 0) -+ status = DSP_SOK; -+ else -+ status = DSP_EFAIL; -+ } -+ if (DSP_SUCCEEDED(status)) -+ DBG_Trace(DBG_LEVEL1, "ISR_IRQ Object 0x%x \n", -+ pIOMgr); -+ else -+ status = CHNL_E_ISR; -+ } else -+ status = CHNL_E_ISR; -+func_cont: -+ if (DSP_FAILED(status)) { -+ /* Cleanup: */ -+ WMD_IO_Destroy(pIOMgr); -+ *phIOMgr = NULL; -+ } else { -+ /* Return IO manager object to caller... */ -+ hChnlMgr->hIOMgr = pIOMgr; -+ *phIOMgr = pIOMgr; -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_IO_Destroy ======== -+ * Purpose: -+ * Disable interrupts, destroy the IO manager. -+ */ -+DSP_STATUS WMD_IO_Destroy(struct IO_MGR *hIOMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *hWmdContext; -+ if (MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)) { -+ /* Unplug IRQ: */ -+ /* Disable interrupts from the board: */ -+ if (DSP_SUCCEEDED(DEV_GetWMDContext(hIOMgr->hDevObject, -+ &hWmdContext))) -+ DBC_Assert(hWmdContext); -+ (void)CHNLSM_DisableInterrupt(hWmdContext); -+ destroy_workqueue(bridge_workqueue); -+ /* Linux function to uninstall ISR */ -+ free_irq(INT_MAIL_MPU_IRQ, (void *)hIOMgr); -+ (void)DPC_Destroy(hIOMgr->hDPC); -+#ifndef DSP_TRACEBUF_DISABLED -+ if (hIOMgr->pMsg) -+ MEM_Free(hIOMgr->pMsg); -+#endif -+ SYNC_DeleteCS(hIOMgr->hCSObj); /* Leak Fix. */ -+ /* Free this IO manager object: */ -+ MEM_FreeObject(hIOMgr); -+ } else -+ status = DSP_EHANDLE; -+ -+ return status; -+} -+ -+/* -+ * ======== WMD_IO_OnLoaded ======== -+ * Purpose: -+ * Called when a new program is loaded to get shared memory buffer -+ * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit -+ * are in DSP address units. -+ */ -+DSP_STATUS WMD_IO_OnLoaded(struct IO_MGR *hIOMgr) -+{ -+ struct COD_MANAGER *hCodMan; -+ struct CHNL_MGR *hChnlMgr; -+ struct MSG_MGR *hMsgMgr; -+ u32 ulShmBase; -+ u32 ulShmBaseOffset; -+ u32 ulShmLimit; -+ u32 ulShmLength = -1; -+ u32 ulMemLength = -1; -+ u32 ulMsgBase; -+ u32 ulMsgLimit; -+ u32 ulMsgLength = -1; -+ u32 ulExtEnd; -+ u32 ulGppPa = 0; -+ u32 ulGppVa = 0; -+ u32 ulDspVa = 0; -+ u32 ulSegSize = 0; -+ u32 ulPadSize = 0; -+ u32 i; -+ DSP_STATUS status = DSP_SOK; -+ u32 uNumProcs = 0; -+ s32 ndx = 0; -+ /* DSP MMU setup table */ -+ struct WMDIOCTL_EXTPROC aEProc[WMDIOCTL_NUMOFMMUTLB]; -+ struct CFG_HOSTRES hostRes; -+ u32 mapAttrs; -+ u32 ulShm0End; -+ u32 ulDynExtBase; -+ u32 ulSeg1Size = 0; -+ u32 paCurr = 0; -+ u32 vaCurr = 0; -+ u32 gppVaCurr = 0; -+ u32 numBytes = 0; -+ u32 allBits = 0; -+ u32 pgSize[] = { HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB, -+ HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB }; -+ -+ status = DEV_GetCodMgr(hIOMgr->hDevObject, &hCodMan); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ hChnlMgr = hIOMgr->hChnlMgr; -+ /* The message manager is destroyed when the board is stopped. */ -+ DEV_GetMsgMgr(hIOMgr->hDevObject, &hIOMgr->hMsgMgr); -+ hMsgMgr = hIOMgr->hMsgMgr; -+ DBC_Assert(MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)); -+ DBC_Assert(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); -+ if (hIOMgr->pSharedMem) -+ hIOMgr->pSharedMem = NULL; -+ -+ /* Get start and length of channel part of shared memory */ -+ status = COD_GetSymValue(hCodMan, CHNL_SHARED_BUFFER_BASE_SYM, -+ &ulShmBase); -+ if (DSP_FAILED(status)) { -+ status = CHNL_E_NOMEMMAP; -+ goto func_cont1; -+ } -+ status = COD_GetSymValue(hCodMan, CHNL_SHARED_BUFFER_LIMIT_SYM, -+ &ulShmLimit); -+ if (DSP_FAILED(status)) { -+ status = CHNL_E_NOMEMMAP; -+ goto func_cont1; -+ } -+ if (ulShmLimit <= ulShmBase) { -+ status = CHNL_E_INVALIDMEMBASE; -+ } else { -+ /* get total length in bytes */ -+ ulShmLength = (ulShmLimit - ulShmBase + 1) * hIOMgr->uWordSize; -+ /* Calculate size of a PROCCOPY shared memory region */ -+ DBG_Trace(DBG_LEVEL7, -+ "**(proc)PROCCOPY SHMMEM SIZE: 0x%x bytes\n", -+ (ulShmLength - sizeof(struct SHM))); -+ } -+func_cont1: -+ if (DSP_SUCCEEDED(status)) { -+ /* Get start and length of message part of shared memory */ -+ status = COD_GetSymValue(hCodMan, MSG_SHARED_BUFFER_BASE_SYM, -+ &ulMsgBase); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMan, MSG_SHARED_BUFFER_LIMIT_SYM, -+ &ulMsgLimit); -+ if (DSP_SUCCEEDED(status)) { -+ if (ulMsgLimit <= ulMsgBase) { -+ status = CHNL_E_INVALIDMEMBASE; -+ } else { -+ /* Length (bytes) of messaging part of shared -+ * memory */ -+ ulMsgLength = (ulMsgLimit - ulMsgBase + 1) * -+ hIOMgr->uWordSize; -+ /* Total length (bytes) of shared memory: -+ * chnl + msg */ -+ ulMemLength = ulShmLength + ulMsgLength; -+ } -+ } else { -+ status = CHNL_E_NOMEMMAP; -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+#ifndef DSP_TRACEBUF_DISABLED -+ status = COD_GetSymValue(hCodMan, DSP_TRACESEC_END, &ulShm0End); -+ DBG_Trace(DBG_LEVEL7, "_BRIDGE_TRACE_END value = %x \n", -+ ulShm0End); -+#else -+ status = COD_GetSymValue(hCodMan, SHM0_SHARED_END_SYM, -+ &ulShm0End); -+ DBG_Trace(DBG_LEVEL7, "_SHM0_END = %x \n", ulShm0End); -+#endif -+ if (DSP_FAILED(status)) -+ status = CHNL_E_NOMEMMAP; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMan, DYNEXTBASE, &ulDynExtBase); -+ if (DSP_FAILED(status)) -+ status = CHNL_E_NOMEMMAP; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMan, EXTEND, &ulExtEnd); -+ if (DSP_FAILED(status)) -+ status = CHNL_E_NOMEMMAP; -+ -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Get memory reserved in host resources */ -+ (void)MGR_EnumProcessorInfo(0, -+ (struct DSP_PROCESSORINFO *)&hIOMgr->extProcInfo, -+ sizeof(struct MGR_PROCESSOREXTINFO), &uNumProcs); -+ CFG_GetHostResources(( -+ struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), -+ &hostRes); -+ /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */ -+ ndx = 0; -+ ulGppPa = hostRes.dwMemPhys[1]; -+ ulGppVa = hostRes.dwMemBase[1]; -+ /* THIS IS THE VIRTUAL UNCACHED IOREMAPPED ADDRESS !!! */ -+ /* Why can't we directly take the DSPVA from the symbols? */ -+ ulDspVa = hIOMgr->extProcInfo.tyTlb[0].ulDspVirt; -+ ulSegSize = (ulShm0End - ulDspVa) * hIOMgr->uWordSize; -+ ulSeg1Size = (ulExtEnd - ulDynExtBase) * hIOMgr->uWordSize; -+ ulSeg1Size = (ulSeg1Size + 0xFFF) & (~0xFFFUL); /* 4K align*/ -+ ulSegSize = (ulSegSize + 0xFFFF) & (~0xFFFFUL); /* 64K align*/ -+ ulPadSize = ulPageAlignSize - ((ulGppPa + ulSeg1Size) % -+ ulPageAlignSize); -+ if (ulPadSize == ulPageAlignSize) -+ ulPadSize = 0x0; -+ -+ DBG_Trace(DBG_LEVEL7, "ulGppPa %x, ulGppVa %x, ulDspVa %x, " -+ "ulShm0End %x, ulDynExtBase %x, ulExtEnd %x, " -+ "ulSegSize %x ulSeg1Size %x \n", ulGppPa, ulGppVa, -+ ulDspVa, ulShm0End, ulDynExtBase, ulExtEnd, ulSegSize, -+ ulSeg1Size); -+ -+ if ((ulSegSize + ulSeg1Size + ulPadSize) > -+ hostRes.dwMemLength[1]) { -+ DBG_Trace(DBG_LEVEL7, "ulGppPa %x, ulGppVa %x, ulDspVa " -+ "%x, ulShm0End %x, ulDynExtBase %x, ulExtEnd " -+ "%x, ulSegSize %x, ulSeg1Size %x \n", ulGppPa, -+ ulGppVa, ulDspVa, ulShm0End, ulDynExtBase, -+ ulExtEnd, ulSegSize, ulSeg1Size); -+ DBG_Trace(DBG_LEVEL7, "Insufficient SHM Reserved 0x%x. " -+ "Required 0x%x\n", hostRes.dwMemLength[1], -+ ulSegSize + ulSeg1Size + ulPadSize); -+ status = DSP_EMEMORY; -+ } -+ } -+ if (DSP_FAILED(status)) -+ goto func_cont; -+ -+ paCurr = ulGppPa; -+ vaCurr = ulDynExtBase * hIOMgr->uWordSize; -+ gppVaCurr = ulGppVa; -+ numBytes = ulSeg1Size; -+ -+ /* -+ * Try to fit into TLB entries. If not possible, push them to page -+ * tables. It is quite possible that if sections are not on -+ * bigger page boundary, we may end up making several small pages. -+ * So, push them onto page tables, if that is the case. -+ */ -+ mapAttrs = 0x00000000; -+ mapAttrs = DSP_MAPLITTLEENDIAN; -+ mapAttrs |= DSP_MAPPHYSICALADDR; -+ mapAttrs |= DSP_MAPELEMSIZE32; -+ mapAttrs |= DSP_MAPDONOTLOCK; -+ -+ while (numBytes && DSP_SUCCEEDED(status)) { -+ /* To find the max. page size with which both PA & VA are -+ * aligned */ -+ allBits = paCurr | vaCurr; -+ DBG_Trace(DBG_LEVEL1, "allBits %x, paCurr %x, vaCurr %x, " -+ "numBytes %x\n", allBits, paCurr, vaCurr, numBytes); -+ for (i = 0; i < 4; i++) { -+ if ((numBytes >= pgSize[i]) && ((allBits & -+ (pgSize[i] - 1)) == 0)) { -+ status = hIOMgr->pIntfFxns->pfnBrdMemMap -+ (hIOMgr->hWmdContext, paCurr, vaCurr, -+ pgSize[i], mapAttrs); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ paCurr += pgSize[i]; -+ vaCurr += pgSize[i]; -+ gppVaCurr += pgSize[i]; -+ numBytes -= pgSize[i]; -+ /* Don't try smaller sizes. Hopefully we have -+ * reached an address aligned to a bigger page -+ * size*/ -+ break; -+ } -+ } -+ } -+ paCurr += ulPadSize; -+ vaCurr += ulPadSize; -+ gppVaCurr += ulPadSize; -+ -+ /* configure the TLB entries for the next cacheable segment */ -+ numBytes = ulSegSize; -+ vaCurr = ulDspVa * hIOMgr->uWordSize; -+ allBits = 0x0; -+ while (numBytes && DSP_SUCCEEDED(status)) { -+ /* To find the max. page size with which both PA & VA are -+ * aligned*/ -+ allBits = paCurr | vaCurr; -+ DBG_Trace(DBG_LEVEL1, "allBits for Seg1 %x, paCurr %x, " -+ "vaCurr %x, numBytes %x\n", allBits, paCurr, vaCurr, -+ numBytes); -+ for (i = 0; i < 4; i++) { -+ if (!(numBytes >= pgSize[i]) || -+ !((allBits & (pgSize[i]-1)) == 0)) -+ continue; -+ if (ndx < MAX_LOCK_TLB_ENTRIES) { -+ /* This is the physical address written to -+ * DSP MMU */ -+ aEProc[ndx].ulGppPa = paCurr; -+ /* THIS IS THE VIRTUAL UNCACHED IOREMAPPED -+ * ADDRESS!!! */ -+ aEProc[ndx].ulGppVa = gppVaCurr; -+ aEProc[ndx].ulDspVa = vaCurr / hIOMgr-> -+ uWordSize; -+ aEProc[ndx].ulSize = pgSize[i]; -+ aEProc[ndx].endianism = HW_LITTLE_ENDIAN; -+ aEProc[ndx].elemSize = HW_ELEM_SIZE_16BIT; -+ aEProc[ndx].mixedMode = HW_MMU_CPUES; -+ DBG_Trace(DBG_LEVEL1, "SHM MMU TLB entry PA %lx" -+ " VA %lx DSP_VA %lx Size %lx\n", -+ aEProc[ndx].ulGppPa, -+ aEProc[ndx].ulGppVa, -+ aEProc[ndx].ulDspVa * -+ hIOMgr->uWordSize, pgSize[i]); -+ ndx++; -+ } else { -+ status = hIOMgr->pIntfFxns->pfnBrdMemMap( -+ hIOMgr->hWmdContext, paCurr, vaCurr, pgSize[i], -+ mapAttrs); -+ DBG_Trace(DBG_LEVEL1, "SHM MMU PTE entry PA %lx" -+ " VA %lx DSP_VA %lx Size %lx\n", -+ aEProc[ndx].ulGppPa, -+ aEProc[ndx].ulGppVa, -+ aEProc[ndx].ulDspVa * -+ hIOMgr->uWordSize, pgSize[i]); -+ DBC_Assert(DSP_SUCCEEDED(status)); -+ } -+ paCurr += pgSize[i]; -+ vaCurr += pgSize[i]; -+ gppVaCurr += pgSize[i]; -+ numBytes -= pgSize[i]; -+ /* Don't try smaller sizes. Hopefully we have reached -+ an address aligned to a bigger page size*/ -+ break; -+ } -+ } -+ -+ /* Copy remaining entries from CDB. All entries are 1 MB and should not -+ * conflict with SHM entries on MPU or DSP side */ -+ for (i = 3; i < 7 && ndx < WMDIOCTL_NUMOFMMUTLB && -+ DSP_SUCCEEDED(status); i++) { -+ if (hIOMgr->extProcInfo.tyTlb[i].ulGppPhys == 0) -+ continue; -+ -+ if ((hIOMgr->extProcInfo.tyTlb[i].ulGppPhys > ulGppPa - 0x100000 -+ && hIOMgr->extProcInfo.tyTlb[i].ulGppPhys <= -+ ulGppPa + ulSegSize) -+ || (hIOMgr->extProcInfo.tyTlb[i].ulDspVirt > ulDspVa - -+ 0x100000 / hIOMgr->uWordSize && hIOMgr-> -+ extProcInfo.tyTlb[i].ulDspVirt -+ <= ulDspVa + ulSegSize / hIOMgr->uWordSize)) { -+ DBG_Trace(DBG_LEVEL7, "CDB MMU entry %d conflicts with " -+ "SHM.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: " -+ "GppPa %x, DspVa %x, Bytes %x.\n", i, -+ hIOMgr->extProcInfo.tyTlb[i].ulGppPhys, -+ hIOMgr->extProcInfo.tyTlb[i].ulDspVirt, -+ ulGppPa, ulDspVa, ulSegSize); -+ status = DSP_EFAIL; -+ } else { -+ if (ndx < MAX_LOCK_TLB_ENTRIES) { -+ aEProc[ndx].ulDspVa = hIOMgr->extProcInfo. -+ tyTlb[i].ulDspVirt; -+ aEProc[ndx].ulGppPa = hIOMgr->extProcInfo. -+ tyTlb[i].ulGppPhys; -+ aEProc[ndx].ulGppVa = 0; -+ /* Can't convert, so set to zero*/ -+ aEProc[ndx].ulSize = 0x100000; /* 1 MB*/ -+ DBG_Trace(DBG_LEVEL1, "SHM MMU entry PA %x " -+ "DSP_VA 0x%x\n", aEProc[ndx].ulGppPa, -+ aEProc[ndx].ulDspVa); -+ ndx++; -+ } else { -+ status = hIOMgr->pIntfFxns->pfnBrdMemMap -+ (hIOMgr->hWmdContext, -+ hIOMgr->extProcInfo.tyTlb[i].ulGppPhys, -+ hIOMgr->extProcInfo.tyTlb[i].ulDspVirt, -+ 0x100000, mapAttrs); -+ } -+ } -+ } -+ if (i < 7 && DSP_SUCCEEDED(status)) { -+ /* All CDB entries could not be made*/ -+ status = DSP_EFAIL; -+ } -+func_cont: -+ mapAttrs = 0x00000000; -+ mapAttrs = DSP_MAPLITTLEENDIAN; -+ mapAttrs |= DSP_MAPPHYSICALADDR; -+ mapAttrs |= DSP_MAPELEMSIZE32; -+ mapAttrs |= DSP_MAPDONOTLOCK; -+ -+ /* Map the L4 peripherals */ -+ i = 0; -+ while (L4PeripheralTable[i].physAddr && DSP_SUCCEEDED(status)) { -+ status = hIOMgr->pIntfFxns->pfnBrdMemMap -+ (hIOMgr->hWmdContext, L4PeripheralTable[i].physAddr, -+ L4PeripheralTable[i].dspVirtAddr, HW_PAGE_SIZE_4KB, -+ mapAttrs); -+ if (DSP_FAILED(status)) -+ break; -+ i++; -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ for (i = ndx; i < WMDIOCTL_NUMOFMMUTLB; i++) { -+ aEProc[i].ulDspVa = 0; -+ aEProc[i].ulGppPa = 0; -+ aEProc[i].ulGppVa = 0; -+ aEProc[i].ulSize = 0; -+ } -+ /* Set the SHM physical address entry (grayed out in CDB file) -+ * to the virtual uncached ioremapped address of SHM reserved -+ * on MPU */ -+ hIOMgr->extProcInfo.tyTlb[0].ulGppPhys = (ulGppVa + ulSeg1Size + -+ ulPadSize); -+ DBG_Trace(DBG_LEVEL1, "*********extProcInfo *********%x \n", -+ hIOMgr->extProcInfo.tyTlb[0].ulGppPhys); -+ /* Need SHM Phys addr. IO supports only one DSP for now: -+ * uNumProcs=1 */ -+ if ((hIOMgr->extProcInfo.tyTlb[0].ulGppPhys == 0) || -+ (uNumProcs != 1)) { -+ status = CHNL_E_NOMEMMAP; -+ DBC_Assert(false); -+ } else { -+ DBC_Assert(aEProc[0].ulDspVa <= ulShmBase); -+ /* ulShmBase may not be at ulDspVa address */ -+ ulShmBaseOffset = (ulShmBase - aEProc[0].ulDspVa) * -+ hIOMgr->uWordSize; -+ /* WMD_BRD_Ctrl() will set dev context dsp-mmu info. In -+ * _BRD_Start() the MMU will be re-programed with MMU -+ * DSPVa-GPPPa pair info while DSP is in a known -+ * (reset) state. */ -+ DBC_Assert(hIOMgr->pIntfFxns != NULL); -+ DBC_Assert(hIOMgr->hWmdContext != NULL); -+ status = hIOMgr->pIntfFxns->pfnDevCntrl(hIOMgr-> -+ hWmdContext, WMDIOCTL_SETMMUCONFIG, aEProc); -+ ulShmBase = hIOMgr->extProcInfo.tyTlb[0].ulGppPhys; -+ DBG_Trace(DBG_LEVEL1, "extProcInfo.tyTlb[0].ulGppPhys " -+ "%x \n ", hIOMgr->extProcInfo.tyTlb[0]. -+ ulGppPhys); -+ ulShmBase += ulShmBaseOffset; -+ ulShmBase = (u32)MEM_LinearAddress((void *)ulShmBase, -+ ulMemLength); -+ DBC_Assert(ulShmBase != 0); -+ if (DSP_SUCCEEDED(status)) { -+ status = registerSHMSegs(hIOMgr, hCodMan, -+ aEProc[0].ulGppPa); -+ /* Register SM */ -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ hIOMgr->pSharedMem = (struct SHM *)ulShmBase; -+ hIOMgr->pInput = (u8 *)hIOMgr->pSharedMem + -+ sizeof(struct SHM); -+ hIOMgr->pOutput = hIOMgr->pInput + (ulShmLength - -+ sizeof(struct SHM))/2; -+ hIOMgr->uSMBufSize = hIOMgr->pOutput - hIOMgr->pInput; -+ DBG_Trace(DBG_LEVEL3, -+ "hIOMgr: pInput %p pOutput %p ulShmLength %x\n", -+ hIOMgr->pInput, hIOMgr->pOutput, ulShmLength); -+ DBG_Trace(DBG_LEVEL3, -+ "pSharedMem %p uSMBufSize %x sizeof(SHM) %x\n", -+ hIOMgr->pSharedMem, hIOMgr->uSMBufSize, -+ sizeof(struct SHM)); -+ /* Set up Shared memory addresses for messaging. */ -+ hIOMgr->pMsgInputCtrl = (struct MSG *)((u8 *) -+ hIOMgr->pSharedMem + -+ ulShmLength); -+ hIOMgr->pMsgInput = (u8 *)hIOMgr->pMsgInputCtrl + -+ sizeof(struct MSG); -+ hIOMgr->pMsgOutputCtrl = (struct MSG *)((u8 *)hIOMgr-> -+ pMsgInputCtrl + ulMsgLength / 2); -+ hIOMgr->pMsgOutput = (u8 *)hIOMgr->pMsgOutputCtrl + -+ sizeof(struct MSG); -+ hMsgMgr->uMaxMsgs = ((u8 *)hIOMgr->pMsgOutputCtrl - -+ hIOMgr->pMsgInput) / -+ sizeof(struct MSG_DSPMSG); -+ DBG_Trace(DBG_LEVEL7, "IO MGR SHM details : pSharedMem 0x%x, " -+ "pInput 0x%x, pOutput 0x%x, pMsgInputCtrl 0x%x, " -+ "pMsgInput 0x%x, pMsgOutputCtrl 0x%x, pMsgOutput " -+ "0x%x \n", (u8 *)hIOMgr->pSharedMem, -+ (u8 *)hIOMgr->pInput, (u8 *)hIOMgr->pOutput, -+ (u8 *)hIOMgr->pMsgInputCtrl, -+ (u8 *)hIOMgr->pMsgInput, -+ (u8 *)hIOMgr->pMsgOutputCtrl, -+ (u8 *)hIOMgr->pMsgOutput); -+ DBG_Trace(DBG_LEVEL7, "** (proc) MAX MSGS IN SHARED MEMORY: " -+ "0x%x\n", hMsgMgr->uMaxMsgs); -+ memset((void *) hIOMgr->pSharedMem, 0, sizeof(struct SHM)); -+ } -+#ifndef DSP_TRACEBUF_DISABLED -+ if (DSP_SUCCEEDED(status)) { -+ /* Get the start address of trace buffer */ -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMan, SYS_PUTCBEG, -+ &hIOMgr->ulTraceBufferBegin); -+ if (DSP_FAILED(status)) -+ status = CHNL_E_NOMEMMAP; -+ -+ } -+ hIOMgr->ulGPPReadPointer = hIOMgr->ulTraceBufferBegin = -+ (ulGppVa + ulSeg1Size + ulPadSize) + -+ (hIOMgr->ulTraceBufferBegin - ulDspVa); -+ /* Get the end address of trace buffer */ -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMan, SYS_PUTCEND, -+ &hIOMgr->ulTraceBufferEnd); -+ if (DSP_FAILED(status)) -+ status = CHNL_E_NOMEMMAP; -+ -+ } -+ hIOMgr->ulTraceBufferEnd = (ulGppVa + ulSeg1Size + ulPadSize) + -+ (hIOMgr->ulTraceBufferEnd - ulDspVa); -+ /* Get the current address of DSP write pointer */ -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMan, -+ BRIDGE_SYS_PUTC_current, -+ &hIOMgr->ulTraceBufferCurrent); -+ if (DSP_FAILED(status)) -+ status = CHNL_E_NOMEMMAP; -+ -+ } -+ hIOMgr->ulTraceBufferCurrent = (ulGppVa + ulSeg1Size + -+ ulPadSize) + (hIOMgr-> -+ ulTraceBufferCurrent - ulDspVa); -+ /* Calculate the size of trace buffer */ -+ if (hIOMgr->pMsg) -+ MEM_Free(hIOMgr->pMsg); -+ hIOMgr->pMsg = MEM_Alloc(((hIOMgr->ulTraceBufferEnd - -+ hIOMgr->ulTraceBufferBegin) * -+ hIOMgr->uWordSize) + 2, MEM_NONPAGED); -+ if (!hIOMgr->pMsg) -+ status = DSP_EMEMORY; -+ -+ DBG_Trace(DBG_LEVEL1, "** hIOMgr->pMsg: 0x%x\n", hIOMgr->pMsg); -+ hIOMgr->ulDspVa = ulDspVa; -+ hIOMgr->ulGppVa = (ulGppVa + ulSeg1Size + ulPadSize); -+ } -+#endif -+ IO_EnableInterrupt(hIOMgr->hWmdContext); -+ return status; -+} -+ -+/* -+ * ======== IO_BufSize ======== -+ * Size of shared memory I/O channel. -+ */ -+u32 IO_BufSize(struct IO_MGR *hIOMgr) -+{ -+ DBC_Require(MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)); -+ -+ return hIOMgr->uSMBufSize; -+} -+ -+/* -+ * ======== IO_CancelChnl ======== -+ * Cancel IO on a given PCPY channel. -+ */ -+void IO_CancelChnl(struct IO_MGR *hIOMgr, u32 ulChnl) -+{ -+ struct IO_MGR *pIOMgr = (struct IO_MGR *)hIOMgr; -+ struct SHM *sm; -+ -+ DBC_Require(MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)); -+ sm = hIOMgr->pSharedMem; -+ -+ /* Inform DSP that we have no more buffers on this channel: */ -+ IO_AndValue(pIOMgr->hWmdContext, struct SHM, sm, hostFreeMask, -+ (~(1 << ulChnl))); -+ -+ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); -+} -+ -+/* -+ * ======== IO_DispatchChnl ======== -+ * Proc-copy chanl dispatch. -+ */ -+static void IO_DispatchChnl(IN struct IO_MGR *pIOMgr, -+ IN OUT struct CHNL_OBJECT *pChnl, u32 iMode) -+{ -+ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); -+ -+ DBG_Trace(DBG_LEVEL3, "Entering IO_DispatchChnl \n"); -+ -+ /* See if there is any data available for transfer: */ -+ DBC_Assert(iMode == IO_SERVICE); -+ -+ /* Any channel will do for this mode: */ -+ InputChnl(pIOMgr, pChnl, iMode); -+ OutputChnl(pIOMgr, pChnl, iMode); -+} -+ -+/* -+ * ======== IO_DispatchMsg ======== -+ * Performs I/O dispatch on message queues. -+ */ -+static void IO_DispatchMsg(IN struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr) -+{ -+ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); -+ -+ DBG_Trace(DBG_LEVEL3, "Entering IO_DispatchMsg \n"); -+ -+ /* We are performing both input and output processing. */ -+ InputMsg(pIOMgr, hMsgMgr); -+ OutputMsg(pIOMgr, hMsgMgr); -+} -+ -+/* -+ * ======== IO_DispatchPM ======== -+ * Performs I/O dispatch on PM related messages from DSP -+ */ -+static void IO_DispatchPM(struct work_struct *work) -+{ -+ struct IO_MGR *pIOMgr = -+ container_of(work, struct IO_MGR, io_workq); -+ DSP_STATUS status; -+ u32 pArg[2]; -+ -+ /*DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE));*/ -+ -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM: Entering IO_DispatchPM : \n"); -+ -+ /* Perform Power message processing here */ -+ while (pIOMgr->iQuePowerHead != pIOMgr->iQuePowerTail) { -+ pArg[0] = *(u32 *)&(pIOMgr->dQuePowerMbxVal[pIOMgr-> -+ iQuePowerTail]); -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM - pArg[0] - 0x%x: \n", -+ pArg[0]); -+ /* Send the command to the WMD clk/pwr manager to handle */ -+ if (pArg[0] == MBX_PM_HIBERNATE_EN) { -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Hibernate " -+ "command\n"); -+ status = pIOMgr->pIntfFxns->pfnDevCntrl(pIOMgr-> -+ hWmdContext, WMDIOCTL_PWR_HIBERNATE, pArg); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : " -+ "Hibernation command failed\n"); -+ } -+ } else if (pArg[0] == MBX_PM_OPP_REQ) { -+ pArg[1] = pIOMgr->pSharedMem->oppRequest.rqstOppPt; -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Value of OPP " -+ "value =0x%x \n", pArg[1]); -+ status = pIOMgr->pIntfFxns->pfnDevCntrl(pIOMgr-> -+ hWmdContext, WMDIOCTL_CONSTRAINT_REQUEST, -+ pArg); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Failed " -+ "to set constraint = 0x%x \n", -+ pArg[1]); -+ } -+ -+ } else { -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM - clock control - " -+ "value of msg = 0x%x: \n", pArg[0]); -+ status = pIOMgr->pIntfFxns->pfnDevCntrl(pIOMgr-> -+ hWmdContext, WMDIOCTL_CLK_CTRL, pArg); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, "IO_DispatchPM : Failed " -+ "to control the DSP clk = 0x%x \n", -+ *pArg); -+ } -+ } -+ /* increment the tail count here */ -+ pIOMgr->iQuePowerTail++; -+ if (pIOMgr->iQuePowerTail >= MAX_PM_REQS) -+ pIOMgr->iQuePowerTail = 0; -+ -+ } -+ -+} -+ -+/* -+ * ======== IO_DPC ======== -+ * Deferred procedure call for shared memory channel driver ISR. Carries -+ * out the dispatch of I/O as a non-preemptible event.It can only be -+ * pre-empted by an ISR. -+ */ -+void IO_DPC(IN OUT void *pRefData) -+{ -+ struct IO_MGR *pIOMgr = (struct IO_MGR *)pRefData; -+ struct CHNL_MGR *pChnlMgr; -+ struct MSG_MGR *pMsgMgr; -+ struct DEH_MGR *hDehMgr; -+ -+ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); -+ pChnlMgr = pIOMgr->hChnlMgr; -+ DEV_GetMsgMgr(pIOMgr->hDevObject, &pMsgMgr); -+ DEV_GetDehMgr(pIOMgr->hDevObject, &hDehMgr); -+ DBC_Require(MEM_IsValidHandle(pChnlMgr, CHNL_MGRSIGNATURE)); -+ DBG_Trace(DBG_LEVEL7, "Entering IO_DPC(0x%x)\n", pRefData); -+ /* Check value of interrupt register to ensure it is a valid error */ -+ if ((pIOMgr->wIntrVal > DEH_BASE) && (pIOMgr->wIntrVal < DEH_LIMIT)) { -+ /* notify DSP/BIOS exception */ -+ if (hDehMgr) -+ WMD_DEH_Notify(hDehMgr, DSP_SYSERROR, pIOMgr->wIntrVal); -+ -+ } -+ IO_DispatchChnl(pIOMgr, NULL, IO_SERVICE); -+#ifdef CHNL_MESSAGES -+ if (pMsgMgr) { -+ DBC_Require(MEM_IsValidHandle(pMsgMgr, MSGMGR_SIGNATURE)); -+ IO_DispatchMsg(pIOMgr, pMsgMgr); -+ } -+#endif -+#ifndef DSP_TRACEBUF_DISABLED -+ if (pIOMgr->wIntrVal & MBX_DBG_CLASS) { -+ /* notify DSP Trace message */ -+ if (pIOMgr->wIntrVal & MBX_DBG_SYSPRINTF) -+ PrintDSPDebugTrace(pIOMgr); -+ } -+#endif -+ -+#ifndef DSP_TRACEBUF_DISABLED -+ PrintDSPDebugTrace(pIOMgr); -+#endif -+} -+ -+ -+/* -+ * ======== IO_ISR ======== -+ * Main interrupt handler for the shared memory IO manager. -+ * Calls the WMD's CHNL_ISR to determine if this interrupt is ours, then -+ * schedules a DPC to dispatch I/O. -+ */ -+irqreturn_t IO_ISR(int irq, IN void *pRefData) -+{ -+ struct IO_MGR *hIOMgr = (struct IO_MGR *)pRefData; -+ bool fSchedDPC; -+ DBC_Require(irq == INT_MAIL_MPU_IRQ); -+ DBC_Require(MEM_IsValidHandle(hIOMgr, IO_MGRSIGNATURE)); -+ DBG_Trace(DBG_LEVEL3, "Entering IO_ISR(0x%x)\n", pRefData); -+ -+ /* Call WMD's CHNLSM_ISR() to see if interrupt is ours, and process. */ -+ if (IO_CALLISR(hIOMgr->hWmdContext, &fSchedDPC, &hIOMgr->wIntrVal)) { -+ { -+ DBG_Trace(DBG_LEVEL3, "IO_ISR %x\n", hIOMgr->wIntrVal); -+ if (hIOMgr->wIntrVal & MBX_PM_CLASS) { -+ hIOMgr->dQuePowerMbxVal[hIOMgr->iQuePowerHead] = -+ hIOMgr->wIntrVal; -+ hIOMgr->iQuePowerHead++; -+ if (hIOMgr->iQuePowerHead >= MAX_PM_REQS) -+ hIOMgr->iQuePowerHead = 0; -+ -+ queue_work(bridge_workqueue, &hIOMgr->io_workq); -+ } -+ if (hIOMgr->wIntrVal == MBX_DEH_RESET) { -+ DBG_Trace(DBG_LEVEL6, "*** DSP RESET ***\n"); -+ hIOMgr->wIntrVal = 0; -+ } else if (fSchedDPC) { -+ /* PROC-COPY defer i/o */ -+ DPC_Schedule(hIOMgr->hDPC); -+ } -+ } -+ } else -+ /* Ensure that, if WMD didn't claim it, the IRQ is shared. */ -+ DBC_Ensure(hIOMgr->fSharedIRQ); -+ return IRQ_HANDLED; -+} -+ -+/* -+ * ======== IO_RequestChnl ======== -+ * Purpose: -+ * Request chanenel I/O from the DSP. Sets flags in shared memory, then -+ * interrupts the DSP. -+ */ -+void IO_RequestChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, -+ u32 iMode, OUT u16 *pwMbVal) -+{ -+ struct CHNL_MGR *pChnlMgr; -+ struct SHM *sm; -+ DBC_Require(pChnl != NULL); -+ DBC_Require(pwMbVal != NULL); -+ pChnlMgr = pIOMgr->hChnlMgr; -+ sm = pIOMgr->pSharedMem; -+ if (iMode == IO_INPUT) { -+ /* Assertion fires if CHNL_AddIOReq() called on a stream -+ * which was cancelled, or attached to a dead board: */ -+ DBC_Assert((pChnl->dwState == CHNL_STATEREADY) || -+ (pChnl->dwState == CHNL_STATEEOS)); -+ /* Indicate to the DSP we have a buffer available for input: */ -+ IO_OrValue(pIOMgr->hWmdContext, struct SHM, sm, hostFreeMask, -+ (1 << pChnl->uId)); -+ *pwMbVal = MBX_PCPY_CLASS; -+ } else if (iMode == IO_OUTPUT) { -+ /* This assertion fails if CHNL_AddIOReq() was called on a -+ * stream which was cancelled, or attached to a dead board: */ -+ DBC_Assert((pChnl->dwState & ~CHNL_STATEEOS) == -+ CHNL_STATEREADY); -+ /* Record the fact that we have a buffer available for -+ * output: */ -+ pChnlMgr->dwOutputMask |= (1 << pChnl->uId); -+ } else { -+ DBC_Assert(iMode); /* Shouldn't get here. */ -+ } -+} -+ -+/* -+ * ======== IO_Schedule ======== -+ * Schedule DPC for IO. -+ */ -+void IO_Schedule(struct IO_MGR *pIOMgr) -+{ -+ DBC_Require(MEM_IsValidHandle(pIOMgr, IO_MGRSIGNATURE)); -+ -+ DPC_Schedule(pIOMgr->hDPC); -+} -+ -+/* -+ * ======== FindReadyOutput ======== -+ * Search for a host output channel which is ready to send. If this is -+ * called as a result of servicing the DPC, then implement a round -+ * robin search; otherwise, this was called by a client thread (via -+ * IO_Dispatch()), so just start searching from the current channel id. -+ */ -+static u32 FindReadyOutput(struct CHNL_MGR *pChnlMgr, -+ struct CHNL_OBJECT *pChnl, u32 dwMask) -+{ -+ u32 uRetval = OUTPUTNOTREADY; -+ u32 id, startId; -+ u32 shift; -+ -+ id = (pChnl != NULL ? pChnl->uId : (pChnlMgr->dwLastOutput + 1)); -+ id = ((id == CHNL_MAXCHANNELS) ? 0 : id); -+ DBC_Assert(id < CHNL_MAXCHANNELS); -+ if (dwMask) { -+ shift = (1 << id); -+ startId = id; -+ do { -+ if (dwMask & shift) { -+ uRetval = id; -+ if (pChnl == NULL) -+ pChnlMgr->dwLastOutput = id; -+ -+ break; -+ } -+ id = id + 1; -+ id = ((id == CHNL_MAXCHANNELS) ? 0 : id); -+ shift = (1 << id); -+ } while (id != startId); -+ } -+ DBC_Ensure((uRetval == OUTPUTNOTREADY) || (uRetval < CHNL_MAXCHANNELS)); -+ return uRetval; -+} -+ -+/* -+ * ======== InputChnl ======== -+ * Dispatch a buffer on an input channel. -+ */ -+static void InputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, -+ u32 iMode) -+{ -+ struct CHNL_MGR *pChnlMgr; -+ struct SHM *sm; -+ u32 chnlId; -+ u32 uBytes; -+ struct CHNL_IRP *pChirp = NULL; -+ u32 dwArg; -+ bool fClearChnl = false; -+ bool fNotifyClient = false; -+ -+ sm = pIOMgr->pSharedMem; -+ pChnlMgr = pIOMgr->hChnlMgr; -+ -+ DBG_Trace(DBG_LEVEL3, "> InputChnl\n"); -+ -+ /* Attempt to perform input.... */ -+ if (!IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, inputFull)) -+ goto func_end; -+ -+ uBytes = IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, inputSize) * -+ pChnlMgr->uWordSize; -+ chnlId = IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, inputId); -+ dwArg = IO_GetLong(pIOMgr->hWmdContext, struct SHM, sm, arg); -+ if (chnlId >= CHNL_MAXCHANNELS) { -+ /* Shouldn't be here: would indicate corrupted SHM. */ -+ DBC_Assert(chnlId); -+ goto func_end; -+ } -+ pChnl = pChnlMgr->apChannel[chnlId]; -+ if ((pChnl != NULL) && CHNL_IsInput(pChnl->uMode)) { -+ if ((pChnl->dwState & ~CHNL_STATEEOS) == CHNL_STATEREADY) { -+ if (!pChnl->pIORequests) -+ goto func_end; -+ /* Get the I/O request, and attempt a transfer: */ -+ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl-> -+ pIORequests); -+ if (pChirp) { -+ pChnl->cIOReqs--; -+ DBC_Assert(pChnl->cIOReqs >= 0); -+ /* Ensure we don't overflow the client's -+ * buffer: */ -+ uBytes = min(uBytes, pChirp->cBytes); -+ /* Transfer buffer from DSP side: */ -+ uBytes = ReadData(pIOMgr->hWmdContext, -+ pChirp->pHostSysBuf, -+ pIOMgr->pInput, uBytes); -+ pChnl->cBytesMoved += uBytes; -+ pChirp->cBytes = uBytes; -+ pChirp->dwArg = dwArg; -+ pChirp->status = CHNL_IOCSTATCOMPLETE; -+ DBG_Trace(DBG_LEVEL7, "Input Chnl:status= 0x%x " -+ "\n", *((RMS_WORD *)(pChirp-> -+ pHostSysBuf))); -+ if (uBytes == 0) { -+ /* This assertion fails if the DSP -+ * sends EOS more than once on this -+ * channel: */ -+ DBC_Assert(!(pChnl->dwState & -+ CHNL_STATEEOS)); -+ /* Zero bytes indicates EOS. Update -+ * IOC status for this chirp, and also -+ * the channel state: */ -+ pChirp->status |= CHNL_IOCSTATEOS; -+ pChnl->dwState |= CHNL_STATEEOS; -+ /* Notify that end of stream has -+ * occurred */ -+ NTFY_Notify(pChnl->hNtfy, -+ DSP_STREAMDONE); -+ DBG_Trace(DBG_LEVEL7, "Input Chnl NTFY " -+ "chnl = 0x%x\n", pChnl); -+ } -+ /* Tell DSP if no more I/O buffers available: */ -+ if (!pChnl->pIORequests) -+ goto func_end; -+ if (LST_IsEmpty(pChnl->pIORequests)) { -+ IO_AndValue(pIOMgr->hWmdContext, -+ struct SHM, sm, hostFreeMask, -+ ~(1 << pChnl->uId)); -+ } -+ fClearChnl = true; -+ fNotifyClient = true; -+ } else { -+ /* Input full for this channel, but we have no -+ * buffers available. The channel must be -+ * "idling". Clear out the physical input -+ * channel. */ -+ fClearChnl = true; -+ } -+ } else { -+ /* Input channel cancelled: clear input channel. */ -+ fClearChnl = true; -+ } -+ } else { -+ /* DPC fired after host closed channel: clear input channel. */ -+ fClearChnl = true; -+ } -+ if (fClearChnl) { -+ /* Indicate to the DSP we have read the input: */ -+ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, inputFull, 0); -+ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); -+ } -+ if (fNotifyClient) { -+ /* Notify client with IO completion record: */ -+ NotifyChnlComplete(pChnl, pChirp); -+ } -+func_end: -+ DBG_Trace(DBG_LEVEL3, "< InputChnl\n"); -+} -+ -+/* -+ * ======== InputMsg ======== -+ * Copies messages from shared memory to the message queues. -+ */ -+static void InputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr) -+{ -+ u32 uMsgs; -+ u32 i; -+ u8 *pMsgInput; -+ struct MSG_QUEUE *hMsgQueue; -+ struct MSG_FRAME *pMsg; -+ struct MSG_DSPMSG msg; -+ struct MSG *pCtrl; -+ u32 fInputEmpty; -+ u32 addr; -+ -+ pCtrl = pIOMgr->pMsgInputCtrl; -+ /* Get the number of input messages to be read. */ -+ fInputEmpty = IO_GetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, -+ bufEmpty); -+ uMsgs = IO_GetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, size); -+ if (fInputEmpty || uMsgs >= hMsgMgr->uMaxMsgs) -+ return; -+ -+ pMsgInput = pIOMgr->pMsgInput; -+ for (i = 0; i < uMsgs; i++) { -+ /* Read the next message */ -+ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->msg.dwCmd); -+ msg.msg.dwCmd = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); -+ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->msg.dwArg1); -+ msg.msg.dwArg1 = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); -+ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->msg.dwArg2); -+ msg.msg.dwArg2 = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); -+ addr = (u32)&(((struct MSG_DSPMSG *)pMsgInput)->dwId); -+ msg.dwId = ReadExt32BitDspData(pIOMgr->hWmdContext, addr); -+ pMsgInput += sizeof(struct MSG_DSPMSG); -+ if (!hMsgMgr->queueList) -+ goto func_end; -+ -+ /* Determine which queue to put the message in */ -+ hMsgQueue = (struct MSG_QUEUE *)LST_First(hMsgMgr->queueList); -+ DBG_Trace(DBG_LEVEL7, "InputMsg RECVD: dwCmd=0x%x dwArg1=0x%x " -+ "dwArg2=0x%x dwId=0x%x \n", msg.msg.dwCmd, -+ msg.msg.dwArg1, msg.msg.dwArg2, msg.dwId); -+ /* Interrupt may occur before shared memory and message -+ * input locations have been set up. If all nodes were -+ * cleaned up, hMsgMgr->uMaxMsgs should be 0. */ -+ if (hMsgQueue && uMsgs > hMsgMgr->uMaxMsgs) -+ goto func_end; -+ -+ while (hMsgQueue != NULL) { -+ if (msg.dwId == hMsgQueue->dwId) { -+ /* Found it */ -+ if (msg.msg.dwCmd == RMS_EXITACK) { -+ /* The exit message does not get -+ * queued */ -+ /* Call the node exit notification */ -+ /* Node handle */ /* status */ -+ (*hMsgMgr->onExit)((HANDLE)hMsgQueue-> -+ hArg, msg.msg.dwArg1); -+ } else { -+ /* Not an exit acknowledgement, queue -+ * the message */ -+ if (!hMsgQueue->msgFreeList) -+ goto func_end; -+ pMsg = (struct MSG_FRAME *)LST_GetHead -+ (hMsgQueue->msgFreeList); -+ if (hMsgQueue->msgUsedList && pMsg) { -+ pMsg->msgData = msg; -+ LST_PutTail(hMsgQueue-> -+ msgUsedList, -+ (struct LST_ELEM *)pMsg); -+ NTFY_Notify(hMsgQueue->hNtfy, -+ DSP_NODEMESSAGEREADY); -+ SYNC_SetEvent(hMsgQueue-> -+ hSyncEvent); -+ } else { -+ /* No free frame to copy the -+ * message into */ -+ DBG_Trace(DBG_LEVEL7, "NO FREE " -+ "MSG FRAMES, DISCARDING" -+ " MESSAGE\n"); -+ } -+ } -+ break; -+ } -+ -+ if (!hMsgMgr->queueList || !hMsgQueue) -+ goto func_end; -+ hMsgQueue = (struct MSG_QUEUE *)LST_Next(hMsgMgr-> -+ queueList, (struct LST_ELEM *)hMsgQueue); -+ } -+ } -+ /* Set the post SWI flag */ -+ if (uMsgs > 0) { -+ /* Tell the DSP we've read the messages */ -+ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, bufEmpty, -+ true); -+ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, postSWI, -+ true); -+ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); -+ } -+func_end: -+ return; -+ -+} -+ -+/* -+ * ======== NotifyChnlComplete ======== -+ * Purpose: -+ * Signal the channel event, notifying the client that I/O has completed. -+ */ -+static void NotifyChnlComplete(struct CHNL_OBJECT *pChnl, -+ struct CHNL_IRP *pChirp) -+{ -+ bool fSignalEvent; -+ -+ DBC_Require(MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)); -+ DBC_Require(pChnl->hSyncEvent != NULL); -+ /* Note: we signal the channel event only if the queue of IO -+ * completions is empty. If it is not empty, the event is sure to be -+ * signalled by the only IO completion list consumer: -+ * WMD_CHNL_GetIOC(). */ -+ fSignalEvent = LST_IsEmpty(pChnl->pIOCompletions); -+ /* Enqueue the IO completion info for the client: */ -+ LST_PutTail(pChnl->pIOCompletions, (struct LST_ELEM *) pChirp); -+ pChnl->cIOCs++; -+ DBC_Assert(pChnl->cIOCs <= pChnl->cChirps); -+ /* Signal the channel event (if not already set) that IO is complete: */ -+ if (fSignalEvent) -+ SYNC_SetEvent(pChnl->hSyncEvent); -+ -+ /* Notify that IO is complete */ -+ NTFY_Notify(pChnl->hNtfy, DSP_STREAMIOCOMPLETION); -+} -+ -+/* -+ * ======== OutputChnl ======== -+ * Purpose: -+ * Dispatch a buffer on an output channel. -+ */ -+static void OutputChnl(struct IO_MGR *pIOMgr, struct CHNL_OBJECT *pChnl, -+ u32 iMode) -+{ -+ struct CHNL_MGR *pChnlMgr; -+ struct SHM *sm; -+ u32 chnlId; -+ struct CHNL_IRP *pChirp; -+ u32 dwDspFMask; -+ -+ pChnlMgr = pIOMgr->hChnlMgr; -+ sm = pIOMgr->pSharedMem; -+ DBG_Trace(DBG_LEVEL3, "> OutputChnl\n"); -+ /* Attempt to perform output: */ -+ if (IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, outputFull)) -+ goto func_end; -+ -+ if (pChnl && !((pChnl->dwState & ~CHNL_STATEEOS) == CHNL_STATEREADY)) -+ goto func_end; -+ -+ /* Look to see if both a PC and DSP output channel are ready: */ -+ dwDspFMask = IO_GetValue(pIOMgr->hWmdContext, struct SHM, sm, -+ dspFreeMask); -+ chnlId = FindReadyOutput(pChnlMgr, pChnl, (pChnlMgr->dwOutputMask & -+ dwDspFMask)); -+ if (chnlId == OUTPUTNOTREADY) -+ goto func_end; -+ -+ pChnl = pChnlMgr->apChannel[chnlId]; -+ if (!pChnl || !pChnl->pIORequests) { -+ /* Shouldn't get here: */ -+ goto func_end; -+ } -+ /* Get the I/O request, and attempt a transfer: */ -+ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIORequests); -+ if (!pChirp) -+ goto func_end; -+ -+ pChnl->cIOReqs--; -+ if (pChnl->cIOReqs < 0 || !pChnl->pIORequests) -+ goto func_end; -+ -+ /* Record fact that no more I/O buffers available: */ -+ if (LST_IsEmpty(pChnl->pIORequests)) -+ pChnlMgr->dwOutputMask &= ~(1 << chnlId); -+ -+ /* Transfer buffer to DSP side: */ -+ pChirp->cBytes = WriteData(pIOMgr->hWmdContext, pIOMgr->pOutput, -+ pChirp->pHostSysBuf, min(pIOMgr->uSMBufSize, pChirp-> -+ cBytes)); -+ pChnl->cBytesMoved += pChirp->cBytes; -+ /* Write all 32 bits of arg */ -+ IO_SetLong(pIOMgr->hWmdContext, struct SHM, sm, arg, pChirp->dwArg); -+#if _CHNL_WORDSIZE == 2 -+ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputId, -+ (u16)chnlId); -+ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputSize, -+ (u16)(pChirp->cBytes + (pChnlMgr->uWordSize-1)) / -+ (u16)pChnlMgr->uWordSize); -+#else -+ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputId, chnlId); -+ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputSize, -+ (pChirp->cBytes + (pChnlMgr->uWordSize - 1)) / pChnlMgr-> -+ uWordSize); -+#endif -+ IO_SetValue(pIOMgr->hWmdContext, struct SHM, sm, outputFull, 1); -+ /* Indicate to the DSP we have written the output: */ -+ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); -+ /* Notify client with IO completion record (keep EOS) */ -+ pChirp->status &= CHNL_IOCSTATEOS; -+ NotifyChnlComplete(pChnl, pChirp); -+ /* Notify if stream is done. */ -+ if (pChirp->status & CHNL_IOCSTATEOS) -+ NTFY_Notify(pChnl->hNtfy, DSP_STREAMDONE); -+ -+func_end: -+ DBG_Trace(DBG_LEVEL3, "< OutputChnl\n"); -+} -+/* -+ * ======== OutputMsg ======== -+ * Copies messages from the message queues to the shared memory. -+ */ -+static void OutputMsg(struct IO_MGR *pIOMgr, struct MSG_MGR *hMsgMgr) -+{ -+ u32 uMsgs = 0; -+ u32 i; -+ u8 *pMsgOutput; -+ struct MSG_FRAME *pMsg; -+ struct MSG *pCtrl; -+ u32 fOutputEmpty; -+ u32 val; -+ u32 addr; -+ -+ pCtrl = pIOMgr->pMsgOutputCtrl; -+ -+ /* Check if output has been cleared */ -+ fOutputEmpty = IO_GetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, -+ bufEmpty); -+ if (fOutputEmpty) { -+ uMsgs = (hMsgMgr->uMsgsPending > hMsgMgr->uMaxMsgs) ? -+ hMsgMgr->uMaxMsgs : hMsgMgr->uMsgsPending; -+ pMsgOutput = pIOMgr->pMsgOutput; -+ /* Copy uMsgs messages into shared memory */ -+ for (i = 0; i < uMsgs; i++) { -+ if (!hMsgMgr->msgUsedList) { -+ DBG_Trace(DBG_LEVEL3, "msgUsedList is NULL\n"); -+ pMsg = NULL; -+ goto func_end; -+ } else -+ pMsg = (struct MSG_FRAME *)LST_GetHead( -+ hMsgMgr->msgUsedList); -+ if (pMsg != NULL) { -+ val = (pMsg->msgData).dwId; -+ addr = (u32)&(((struct MSG_DSPMSG *) -+ pMsgOutput)->dwId); -+ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, -+ val); -+ val = (pMsg->msgData).msg.dwCmd; -+ addr = (u32)&((((struct MSG_DSPMSG *) -+ pMsgOutput)->msg).dwCmd); -+ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, -+ val); -+ val = (pMsg->msgData).msg.dwArg1; -+ addr = -+ (u32)&((((struct MSG_DSPMSG *) -+ pMsgOutput)->msg).dwArg1); -+ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, -+ val); -+ val = (pMsg->msgData).msg.dwArg2; -+ addr = -+ (u32)&((((struct MSG_DSPMSG *) -+ pMsgOutput)->msg).dwArg2); -+ WriteExt32BitDspData(pIOMgr->hWmdContext, addr, -+ val); -+ pMsgOutput += sizeof(struct MSG_DSPMSG); -+ if (!hMsgMgr->msgFreeList) -+ goto func_end; -+ LST_PutTail(hMsgMgr->msgFreeList, -+ (struct LST_ELEM *) pMsg); -+ SYNC_SetEvent(hMsgMgr->hSyncEvent); -+ } else { -+ DBG_Trace(DBG_LEVEL3, "pMsg is NULL\n"); -+ } -+ } -+ -+ if (uMsgs > 0) { -+ hMsgMgr->uMsgsPending -= uMsgs; -+#if _CHNL_WORDSIZE == 2 -+ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, -+ size, (u16)uMsgs); -+#else -+ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, -+ size, uMsgs); -+#endif -+ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, -+ bufEmpty, false); -+ /* Set the post SWI flag */ -+ IO_SetValue(pIOMgr->hWmdContext, struct MSG, pCtrl, -+ postSWI, true); -+ /* Tell the DSP we have written the output. */ -+ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, MBX_PCPY_CLASS); -+ } -+ } -+func_end: -+ return; -+ -+} -+ -+/* -+ * ======== registerSHMSegs ======== -+ * purpose: -+ * Registers GPP SM segment with CMM. -+ */ -+static DSP_STATUS registerSHMSegs(struct IO_MGR *hIOMgr, -+ struct COD_MANAGER *hCodMan, -+ u32 dwGPPBasePA) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 ulShm0_Base = 0; -+ u32 ulShm0_End = 0; -+ u32 ulShm0_RsrvdStart = 0; -+ u32 ulRsrvdSize = 0; -+ u32 ulGppPhys; -+ u32 ulDspVirt; -+ u32 ulShmSegId0 = 0; -+ u32 dwOffset, dwGPPBaseVA, ulDSPSize; -+ -+ /* Read address and size info for first SM region.*/ -+ /* Get start of 1st SM Heap region */ -+ status = COD_GetSymValue(hCodMan, SHM0_SHARED_BASE_SYM, &ulShm0_Base); -+ DBC_Assert(ulShm0_Base != 0); -+ /* Get end of 1st SM Heap region */ -+ if (DSP_SUCCEEDED(status)) { -+ /* Get start and length of message part of shared memory */ -+ status = COD_GetSymValue(hCodMan, SHM0_SHARED_END_SYM, -+ &ulShm0_End); -+ DBC_Assert(ulShm0_End != 0); -+ } -+ /* start of Gpp reserved region */ -+ if (DSP_SUCCEEDED(status)) { -+ /* Get start and length of message part of shared memory */ -+ status = COD_GetSymValue(hCodMan, SHM0_SHARED_RESERVED_BASE_SYM, -+ &ulShm0_RsrvdStart); -+ DBG_Trace(DBG_LEVEL1, "***ulShm0_RsrvdStart 0x%x \n", -+ ulShm0_RsrvdStart); -+ DBC_Assert(ulShm0_RsrvdStart != 0); -+ } -+ /* Register with CMM */ -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetCmmMgr(hIOMgr->hDevObject, &hIOMgr->hCmmMgr); -+ if (DSP_SUCCEEDED(status)) { -+ status = CMM_UnRegisterGPPSMSeg(hIOMgr->hCmmMgr, -+ CMM_ALLSEGMENTS); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, "ERROR - Unable to " -+ "Un-Register SM segments \n"); -+ } -+ } else { -+ DBG_Trace(DBG_LEVEL7, "ERROR - Unable to get CMM " -+ "Handle \n"); -+ } -+ } -+ /* Register new SM region(s) */ -+ if (DSP_SUCCEEDED(status) && (ulShm0_End - ulShm0_Base) > 0) { -+ /* calc size (bytes) of SM the GPP can alloc from */ -+ ulRsrvdSize = (ulShm0_End - ulShm0_RsrvdStart + 1) * hIOMgr-> -+ uWordSize; -+ DBC_Assert(ulRsrvdSize > 0); -+ /* calc size of SM DSP can alloc from */ -+ ulDSPSize = (ulShm0_RsrvdStart - ulShm0_Base) * hIOMgr-> -+ uWordSize; -+ DBC_Assert(ulDSPSize > 0); -+ /* First TLB entry reserved for Bridge SM use.*/ -+ ulGppPhys = hIOMgr->extProcInfo.tyTlb[0].ulGppPhys; -+ /* get size in bytes */ -+ ulDspVirt = hIOMgr->extProcInfo.tyTlb[0].ulDspVirt * hIOMgr-> -+ uWordSize; -+ /* Calc byte offset used to convert GPP phys <-> DSP byte -+ * address.*/ -+ if (dwGPPBasePA > ulDspVirt) -+ dwOffset = dwGPPBasePA - ulDspVirt; -+ else -+ dwOffset = ulDspVirt - dwGPPBasePA; -+ -+ DBC_Assert(ulShm0_RsrvdStart * hIOMgr->uWordSize >= ulDspVirt); -+ /* calc Gpp phys base of SM region */ -+ /* Linux - this is actually uncached kernel virtual address*/ -+ dwGPPBaseVA = ulGppPhys + ulShm0_RsrvdStart * hIOMgr->uWordSize -+ - ulDspVirt; -+ /* calc Gpp phys base of SM region */ -+ /* Linux - this is the physical address*/ -+ dwGPPBasePA = dwGPPBasePA + ulShm0_RsrvdStart * hIOMgr-> -+ uWordSize - ulDspVirt; -+ /* Register SM Segment 0.*/ -+ status = CMM_RegisterGPPSMSeg(hIOMgr->hCmmMgr, dwGPPBasePA, -+ ulRsrvdSize, dwOffset, (dwGPPBasePA > ulDspVirt) ? -+ CMM_ADDTODSPPA : CMM_SUBFROMDSPPA, -+ (u32)(ulShm0_Base * hIOMgr->uWordSize), -+ ulDSPSize, &ulShmSegId0, dwGPPBaseVA); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, "ERROR - Failed to register SM " -+ "Seg 0 \n"); -+ } -+ /* first SM region is segId = 1 */ -+ DBC_Assert(ulShmSegId0 == 1); -+ } -+ return status; -+} -+ -+/* -+ * ======== ReadData ======== -+ * Copies buffers from the shared memory to the host buffer. -+ */ -+static u32 ReadData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, -+ void *pSrc, u32 uSize) -+{ -+ memcpy(pDest, pSrc, uSize); -+ return uSize; -+} -+ -+/* -+ * ======== WriteData ======== -+ * Copies buffers from the host side buffer to the shared memory. -+ */ -+static u32 WriteData(struct WMD_DEV_CONTEXT *hDevContext, void *pDest, -+ void *pSrc, u32 uSize) -+{ -+ memcpy(pDest, pSrc, uSize); -+ return uSize; -+} -+ -+/* ZCPY IO routines. */ -+void IO_IntrDSP2(IN struct IO_MGR *pIOMgr, IN u16 wMbVal) -+{ -+ CHNLSM_InterruptDSP2(pIOMgr->hWmdContext, wMbVal); -+} -+ -+/* -+ * ======== IO_SHMcontrol ======== -+ * Sets the requested SHM setting. -+ */ -+DSP_STATUS IO_SHMsetting(IN struct IO_MGR *hIOMgr, IN enum SHM_DESCTYPE desc, -+ IN void *pArgs) -+{ -+#ifdef CONFIG_BRIDGE_DVFS -+ u32 i; -+ struct dspbridge_platform_data *pdata = -+ omap_dspbridge_dev->dev.platform_data; -+ -+ switch (desc) { -+ case SHM_CURROPP: -+ /* Update the shared memory with requested OPP information */ -+ if (pArgs != NULL) -+ hIOMgr->pSharedMem->oppTableStruct.currOppPt = -+ *(u32 *)pArgs; -+ else -+ return DSP_EFAIL; -+ break; -+ case SHM_OPPINFO: -+ /* Update the shared memory with the voltage, frequency, -+ min and max frequency values for an OPP */ -+ for (i = 0; i <= dsp_max_opps; i++) { -+ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i].voltage = -+ vdd1_dsp_freq[i][0]; -+ DBG_Trace(DBG_LEVEL5, "OPP shared memory -voltage: " -+ "%d\n", hIOMgr->pSharedMem->oppTableStruct. -+ oppPoint[i].voltage); -+ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i]. -+ frequency = vdd1_dsp_freq[i][1]; -+ DBG_Trace(DBG_LEVEL5, "OPP shared memory -frequency: " -+ "%d\n", hIOMgr->pSharedMem->oppTableStruct. -+ oppPoint[i].frequency); -+ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i].minFreq = -+ vdd1_dsp_freq[i][2]; -+ DBG_Trace(DBG_LEVEL5, "OPP shared memory -min value: " -+ "%d\n", hIOMgr->pSharedMem->oppTableStruct. -+ oppPoint[i].minFreq); -+ hIOMgr->pSharedMem->oppTableStruct.oppPoint[i].maxFreq = -+ vdd1_dsp_freq[i][3]; -+ DBG_Trace(DBG_LEVEL5, "OPP shared memory -max value: " -+ "%d\n", hIOMgr->pSharedMem->oppTableStruct. -+ oppPoint[i].maxFreq); -+ } -+ hIOMgr->pSharedMem->oppTableStruct.numOppPts = dsp_max_opps; -+ DBG_Trace(DBG_LEVEL5, "OPP shared memory - max OPP number: " -+ "%d\n", hIOMgr->pSharedMem->oppTableStruct.numOppPts); -+ /* Update the current OPP number */ -+ if (pdata->dsp_get_opp) -+ i = (*pdata->dsp_get_opp)(); -+ hIOMgr->pSharedMem->oppTableStruct.currOppPt = i; -+ DBG_Trace(DBG_LEVEL7, "OPP value programmed to shared memory: " -+ "%d\n", i); -+ break; -+ case SHM_GETOPP: -+ /* Get the OPP that DSP has requested */ -+ *(u32 *)pArgs = hIOMgr->pSharedMem->oppRequest.rqstOppPt; -+ break; -+ default: -+ break; -+ } -+#endif -+ return DSP_SOK; -+} -+ -+/* -+ * ======== WMD_IO_GetProcLoad ======== -+ * Gets the Processor's Load information -+ */ -+DSP_STATUS WMD_IO_GetProcLoad(IN struct IO_MGR *hIOMgr, -+ OUT struct DSP_PROCLOADSTAT *pProcStat) -+{ -+ pProcStat->uCurrLoad = hIOMgr->pSharedMem->loadMonInfo.currDspLoad; -+ pProcStat->uPredictedLoad = hIOMgr->pSharedMem->loadMonInfo.predDspLoad; -+ pProcStat->uCurrDspFreq = hIOMgr->pSharedMem->loadMonInfo.currDspFreq; -+ pProcStat->uPredictedFreq = hIOMgr->pSharedMem->loadMonInfo.predDspFreq; -+ -+ DBG_Trace(DBG_LEVEL4, "Curr Load =%d, Pred Load = %d, Curr Freq = %d, " -+ "Pred Freq = %d\n", pProcStat->uCurrLoad, -+ pProcStat->uPredictedLoad, pProcStat->uCurrDspFreq, -+ pProcStat->uPredictedFreq); -+ return DSP_SOK; -+} -+ -+#ifndef DSP_TRACEBUF_DISABLED -+void PrintDSPDebugTrace(struct IO_MGR *hIOMgr) -+{ -+ u32 ulNewMessageLength = 0, ulGPPCurPointer; -+ -+ GT_0trace(dsp_trace_mask, GT_ENTER, "Entering PrintDSPDebugTrace\n"); -+ -+ while (true) { -+ /* Get the DSP current pointer */ -+ ulGPPCurPointer = *(u32 *) (hIOMgr->ulTraceBufferCurrent); -+ ulGPPCurPointer = hIOMgr->ulGppVa + (ulGPPCurPointer - -+ hIOMgr->ulDspVa); -+ -+ /* No new debug messages available yet */ -+ if (ulGPPCurPointer == hIOMgr->ulGPPReadPointer) -+ break; -+ -+ /* Continuous data */ -+ else if (ulGPPCurPointer > hIOMgr->ulGPPReadPointer) { -+ ulNewMessageLength = ulGPPCurPointer - hIOMgr-> -+ ulGPPReadPointer; -+ -+ memcpy(hIOMgr->pMsg, (char *)hIOMgr->ulGPPReadPointer, -+ ulNewMessageLength); -+ hIOMgr->pMsg[ulNewMessageLength] = '\0'; -+ /* Advance the GPP trace pointer to DSP current -+ * pointer */ -+ hIOMgr->ulGPPReadPointer += ulNewMessageLength; -+ /* Print the trace messages */ -+ GT_0trace(dsp_trace_mask, GT_1CLASS, hIOMgr->pMsg); -+ } -+ /* Handle trace buffer wraparound */ -+ else if (ulGPPCurPointer < hIOMgr->ulGPPReadPointer) { -+ memcpy(hIOMgr->pMsg, (char *)hIOMgr->ulGPPReadPointer, -+ hIOMgr->ulTraceBufferEnd - -+ hIOMgr->ulGPPReadPointer); -+ ulNewMessageLength = ulGPPCurPointer - -+ hIOMgr->ulTraceBufferBegin; -+ memcpy(&hIOMgr->pMsg[hIOMgr->ulTraceBufferEnd - -+ hIOMgr->ulGPPReadPointer], -+ (char *)hIOMgr->ulTraceBufferBegin, -+ ulNewMessageLength); -+ hIOMgr->pMsg[hIOMgr->ulTraceBufferEnd - -+ hIOMgr->ulGPPReadPointer + -+ ulNewMessageLength] = '\0'; -+ /* Advance the GPP trace pointer to DSP current -+ * pointer */ -+ hIOMgr->ulGPPReadPointer = hIOMgr->ulTraceBufferBegin + -+ ulNewMessageLength; -+ /* Print the trace messages */ -+ GT_0trace(dsp_trace_mask, GT_1CLASS, hIOMgr->pMsg); -+ } -+ } -+} -+#endif -+ -+/* -+ * ======== PackTraceBuffer ======== -+ * Removes extra nulls from the trace buffer returned from the DSP. -+ * Works even on buffers that already are packed (null removed); but has -+ * one bug in that case -- loses the last character (replaces with '\0'). -+ * Continues through conversion for full set of nBytes input characters. -+ * Parameters: -+ * lpBuf: Pointer to input/output buffer -+ * nBytes: Number of characters in the buffer -+ * ulNumWords: Number of DSP words in the buffer. Indicates potential -+ * number of extra carriage returns to generate. -+ * Returns: -+ * DSP_SOK: Success. -+ * DSP_EMEMORY: Unable to allocate memory. -+ * Requires: -+ * lpBuf must be a fully allocated writable block of at least nBytes. -+ * There are no more than ulNumWords extra characters needed (the number of -+ * linefeeds minus the number of NULLS in the input buffer). -+ */ -+#if (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE -+static DSP_STATUS PackTraceBuffer(char *lpBuf, u32 nBytes, u32 ulNumWords) -+{ -+ DSP_STATUS status = DSP_SOK; -+ char *lpTmpBuf; -+ char *lpBufStart; -+ char *lpTmpStart; -+ u32 nCnt; -+ char thisChar; -+ -+ /* tmp workspace, 1 KB longer than input buf */ -+ lpTmpBuf = MEM_Calloc((nBytes + ulNumWords), MEM_PAGED); -+ if (lpTmpBuf == NULL) { -+ DBG_Trace(DBG_LEVEL7, "PackTrace buffer:OutofMemory \n"); -+ status = DSP_EMEMORY; -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ lpBufStart = lpBuf; -+ lpTmpStart = lpTmpBuf; -+ for (nCnt = nBytes; nCnt > 0; nCnt--) { -+ thisChar = *lpBuf++; -+ switch (thisChar) { -+ case '\0': /* Skip null bytes */ -+ break; -+ case '\n': /* Convert \n to \r\n */ -+ /* NOTE: do not reverse order; Some OS */ -+ /* editors control doesn't understand "\n\r" */ -+ *lpTmpBuf++ = '\r'; -+ *lpTmpBuf++ = '\n'; -+ break; -+ default: /* Copy in the actual ascii byte */ -+ *lpTmpBuf++ = thisChar; -+ break; -+ } -+ } -+ *lpTmpBuf = '\0'; /* Make sure tmp buf is null terminated */ -+ /* Cut output down to input buf size */ -+ strncpy(lpBufStart, lpTmpStart, nBytes); -+ /*Make sure output is null terminated */ -+ lpBufStart[nBytes - 1] = '\0'; -+ MEM_Free(lpTmpStart); -+ } -+ -+ return status; -+} -+#endif /* (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE */ -+ -+/* -+ * ======== PrintDspTraceBuffer ======== -+ * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled). -+ * Parameters: -+ * hDehMgr: Handle to DEH manager object -+ * number of extra carriage returns to generate. -+ * Returns: -+ * DSP_SOK: Success. -+ * DSP_EMEMORY: Unable to allocate memory. -+ * Requires: -+ * hDehMgr muse be valid. Checked in WMD_DEH_Notify. -+ */ -+DSP_STATUS PrintDspTraceBuffer(struct WMD_DEV_CONTEXT *hWmdContext) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+#if (defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT)) && GT_TRACE -+ struct COD_MANAGER *hCodMgr; -+ u32 ulTraceEnd; -+ u32 ulTraceBegin; -+ u32 ulNumBytes = 0; -+ u32 ulNumWords = 0; -+ u32 ulWordSize = 2; -+ CONST u32 uMaxSize = 512; -+ char *pszBuf; -+ u16 *lpszBuf; -+ -+ struct WMD_DEV_CONTEXT *pWmdContext = (struct WMD_DEV_CONTEXT *) -+ hWmdContext; -+ struct WMD_DRV_INTERFACE *pIntfFxns; -+ struct DEV_OBJECT *pDevObject = (struct DEV_OBJECT *) -+ pWmdContext->hDevObject; -+ -+ status = DEV_GetCodMgr(pDevObject, &hCodMgr); -+ if (DSP_FAILED(status)) -+ GT_0trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: Failed on DEV_GetCodMgr.\n"); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Look for SYS_PUTCBEG/SYS_PUTCEND: */ -+ status = COD_GetSymValue(hCodMgr, COD_TRACEBEG, &ulTraceBegin); -+ GT_1trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: ulTraceBegin Value 0x%x\n", -+ ulTraceBegin); -+ if (DSP_FAILED(status)) -+ GT_0trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: Failed on " -+ "COD_GetSymValue.\n"); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ status = COD_GetSymValue(hCodMgr, COD_TRACEEND, &ulTraceEnd); -+ GT_1trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: ulTraceEnd Value 0x%x\n", -+ ulTraceEnd); -+ if (DSP_FAILED(status)) -+ GT_0trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: Failed on " -+ "COD_GetSymValue.\n"); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ ulNumBytes = (ulTraceEnd - ulTraceBegin) * ulWordSize; -+ /* If the chip type is 55 then the addresses will be -+ * byte addresses; convert them to word addresses. */ -+ if (ulNumBytes > uMaxSize) -+ ulNumBytes = uMaxSize; -+ -+ /* make sure the data we request fits evenly */ -+ ulNumBytes = (ulNumBytes / ulWordSize) * ulWordSize; -+ GT_1trace(dsp_trace_mask, GT_2CLASS, "PrintDspTraceBuffer: " -+ "ulNumBytes 0x%x\n", ulNumBytes); -+ ulNumWords = ulNumBytes * ulWordSize; -+ GT_1trace(dsp_trace_mask, GT_2CLASS, "PrintDspTraceBuffer: " -+ "ulNumWords 0x%x\n", ulNumWords); -+ status = DEV_GetIntfFxns(pDevObject, &pIntfFxns); -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ pszBuf = MEM_Calloc(uMaxSize, MEM_NONPAGED); -+ lpszBuf = MEM_Calloc(ulNumBytes * 2, MEM_NONPAGED); -+ if (pszBuf != NULL) { -+ /* Read bytes from the DSP trace buffer... */ -+ status = (*pIntfFxns->pfnBrdRead)(hWmdContext, -+ (u8 *)pszBuf, (u32)ulTraceBegin, -+ ulNumBytes, 0); -+ if (DSP_FAILED(status)) -+ GT_0trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: " -+ "Failed to Read Trace Buffer.\n"); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Pack and do newline conversion */ -+ GT_0trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: " -+ "before pack and unpack.\n"); -+ PackTraceBuffer(pszBuf, ulNumBytes, ulNumWords); -+ GT_1trace(dsp_trace_mask, GT_1CLASS, -+ "DSP Trace Buffer:\n%s\n", pszBuf); -+ } -+ MEM_Free(pszBuf); -+ MEM_Free(lpszBuf); -+ } else { -+ GT_0trace(dsp_trace_mask, GT_2CLASS, -+ "PrintDspTraceBuffer: Failed to " -+ "allocate trace buffer.\n"); -+ status = DSP_EMEMORY; -+ } -+ } -+#endif -+ return status; -+} -+ -+void IO_SM_init(void) -+{ -+ -+ GT_create(&dsp_trace_mask, "DT"); /* DSP Trace Mask */ -+ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/mmu_fault.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/mmu_fault.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,172 @@ -+/* -+ * mmu_fault.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== mmu_fault.c ======== -+ * Description: -+ * Implements DSP MMU fault handling functions. -+ * -+ *! Revision History: -+ *! ================ -+ *! 26-Dec-2004 hn: Support for IVA MMU exception. -+ *! 06-Mar-2003 sb: Print MMU fault address. Cosmetic changes. -+ *! 16-Feb-2003 vp: Fixed warning in MMU_FaultIsr -+ *! 05-Jan-2004 vp: Updated support for 24xx silicon -+ *! 19-Feb-2003 vp: Code review updates. -+ *! - Cosmetic changes. -+ *! 18-Oct-2002 sb: Ported to Linux platform. -+ *! 10-Sep-2001 kc: created. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Link Driver */ -+#include -+ -+/* ------------------------------------ Hardware Abstraction Layer */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include "_deh.h" -+#include -+#include "_tiomap_mmu.h" -+#include "_tiomap.h" -+#include "mmu_fault.h" -+ -+static u32 dmmuEventMask; -+u32 faultAddr; -+ -+static bool MMU_CheckIfFault(struct WMD_DEV_CONTEXT *pDevContext); -+ -+/* -+ * ======== MMU_FaultDpc ======== -+ * Deferred procedure call to handle DSP MMU fault. -+ */ -+void MMU_FaultDpc(IN void *pRefData) -+{ -+ struct DEH_MGR *hDehMgr = (struct DEH_MGR *)pRefData; -+ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; -+ -+ DBG_Trace(DBG_LEVEL1, "MMU_FaultDpc Enter: 0x%x\n", pRefData); -+ -+ if (pDehMgr) -+ WMD_DEH_Notify(hDehMgr, DSP_MMUFAULT, 0L); -+ -+ DBG_Trace(DBG_LEVEL1, "MMU_FaultDpc Exit: 0x%x\n", pRefData); -+} -+ -+/* -+ * ======== MMU_FaultIsr ======== -+ * ISR to be triggered by a DSP MMU fault interrupt. -+ */ -+irqreturn_t MMU_FaultIsr(int irq, IN void *pRefData) -+{ -+ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)pRefData; -+ struct WMD_DEV_CONTEXT *pDevContext; -+ struct CFG_HOSTRES resources; -+ DSP_STATUS status = DSP_SOK; -+ -+ -+ DBG_Trace(DBG_LEVEL1, "Entering DEH_DspMmuIsr: 0x%x\n", pRefData); -+ DBC_Require(irq == INT_DSP_MMU_IRQ); -+ DBC_Require(MEM_IsValidHandle(pDehMgr, SIGNATURE)); -+ -+ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { -+ -+ pDevContext = (struct WMD_DEV_CONTEXT *)pDehMgr->hWmdContext; -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), -+ &resources); -+ if (DSP_FAILED(status)) -+ DBG_Trace(DBG_LEVEL7, -+ "**Failed to get Host Resources " -+ "in MMU ISR **\n"); -+ if (MMU_CheckIfFault(pDevContext)) { -+ printk(KERN_INFO "***** DSPMMU FAULT ***** IRQStatus " -+ "0x%x\n", dmmuEventMask); -+ printk(KERN_INFO "***** DSPMMU FAULT ***** faultAddr " -+ "0x%x\n", faultAddr); -+ /* Disable the MMU events, else once we clear it will -+ * start to raise INTs again */ -+ /* -+ * Schedule a DPC directly. In the future, it may be -+ * necessary to check if DSP MMU fault is intended for -+ * Bridge. -+ */ -+ DPC_Schedule(pDehMgr->hMmuFaultDpc); -+ /* Reset errInfo structure before use. */ -+ pDehMgr->errInfo.dwErrMask = DSP_MMUFAULT; -+ pDehMgr->errInfo.dwVal1 = faultAddr >> 16; -+ pDehMgr->errInfo.dwVal2 = faultAddr & 0xFFFF; -+ pDehMgr->errInfo.dwVal3 = 0L; -+ /* Disable the MMU events, else once we clear it will -+ * start to raise INTs again */ -+ HW_MMU_EventDisable(resources.dwDmmuBase, -+ HW_MMU_TRANSLATION_FAULT); -+ } else { -+ DBG_Trace(DBG_LEVEL7, -+ "***** MMU FAULT ***** faultcode 0x%x\n", -+ dmmuEventMask); -+ HW_MMU_EventDisable(resources.dwDmmuBase, -+ HW_MMU_ALL_INTERRUPTS); -+ } -+ } -+ return IRQ_HANDLED; -+} -+ -+ -+/* -+ * ======== MMU_CheckIfFault ======== -+ * Check to see if MMU Fault is valid TLB miss from DSP -+ * Note: This function is called from an ISR -+ */ -+static bool MMU_CheckIfFault(struct WMD_DEV_CONTEXT *pDevContext) -+{ -+ -+ -+ bool retVal = false; -+ DSP_STATUS status = DSP_SOK; -+ HW_STATUS hwStatus; -+ struct CFG_HOSTRES resources; -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) -+ DBG_Trace(DBG_LEVEL7, "**Failed to get Host Resources in " -+ "MMU_CheckIfFault **\n"); -+ -+ hwStatus = HW_MMU_EventStatus(resources.dwDmmuBase, &dmmuEventMask); -+ if (dmmuEventMask == HW_MMU_TRANSLATION_FAULT) { -+ HW_MMU_FaultAddrRead(resources.dwDmmuBase, &faultAddr); -+ DBG_Trace(DBG_LEVEL1, "WMD_DEH_Notify: DSP_MMUFAULT, fault " -+ "address = 0x%x\n", faultAddr); -+ retVal = true; -+ } -+ return retVal; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/mmu_fault.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/mmu_fault.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/mmu_fault.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,45 @@ -+/* -+ * mmu_fault.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== mmu_fault.h ======== -+ * Description: -+ * Defines DSP MMU fault handling functions. -+ * -+ *! Revision History: -+ *! ================ -+ *! 26-Dec-2004 hn: IVA MMU handlers. -+ *! 10-Sep-2001 kc: created. -+ */ -+ -+#ifndef MMU_FAULT_ -+#define MMU_FAULT_ -+ -+/* -+ * ======== MMU_FaultDpc ======== -+ * Deferred procedure call to handle DSP MMU fault. -+ */ -+ void MMU_FaultDpc(IN void *pRefData); -+ -+/* -+ * ======== MMU_FaultIsr ======== -+ * ISR to be triggered by a DSP MMU fault interrupt. -+ */ -+irqreturn_t MMU_FaultIsr(int irq, IN void *pRefData); -+ -+#endif /* MMU_FAULT_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/msg_sm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/msg_sm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/msg_sm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/msg_sm.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,643 @@ -+/* -+ * msg_sm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== msg_sm.c ======== -+ * Description: -+ * Implements upper edge functions for WMD message module. -+ * -+ * Public Functions: -+ * WMD_MSG_Create -+ * WMD_MSG_CreateQueue -+ * WMD_MSG_Delete -+ * WMD_MSG_DeleteQueue -+ * WMD_MSG_Get -+ * WMD_MSG_Put -+ * WMD_MSG_RegisterNotify -+ * WMD_MSG_SetQueueId -+ * -+ *! Revision History: -+ *! ================= -+ *! 24-Jul-2002 jeh Release critical section in WMD_MSG_Put() before -+ *! scheduling DPC. -+ *! 09-May-2001 jeh Free MSG queue NTFY object, remove unnecessary set/ -+ *! reset of events. -+ *! 10-Jan-2001 jeh Set/Reset message manager and message queue events -+ *! correctly. -+ *! 04-Dec-2000 jeh Bug fixes. -+ *! 12-Sep-2000 jeh Created. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+ -+/* ----------------------------------- Others */ -+#include -+ -+/* ----------------------------------- This */ -+#include <_msg_sm.h> -+#include -+ -+/* ----------------------------------- Defines, Data Structures, Typedefs */ -+#define MSGQ_SIGNATURE 0x5147534d /* "QGSM" */ -+ -+/* ----------------------------------- Function Prototypes */ -+static DSP_STATUS AddNewMsg(struct LST_LIST *msgList); -+static void DeleteMsgMgr(struct MSG_MGR *hMsgMgr); -+static void DeleteMsgQueue(struct MSG_QUEUE *hMsgQueue, u32 uNumToDSP); -+static void FreeMsgList(struct LST_LIST *msgList); -+ -+/* -+ * ======== WMD_MSG_Create ======== -+ * Create an object to manage message queues. Only one of these objects -+ * can exist per device object. -+ */ -+DSP_STATUS WMD_MSG_Create(OUT struct MSG_MGR **phMsgMgr, -+ struct DEV_OBJECT *hDevObject, MSG_ONEXIT msgCallback) -+{ -+ struct MSG_MGR *pMsgMgr; -+ struct IO_MGR *hIOMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(phMsgMgr != NULL); -+ DBC_Require(msgCallback != NULL); -+ DBC_Require(hDevObject != NULL); -+ DEV_GetIOMgr(hDevObject, &hIOMgr); -+ DBC_Assert(hIOMgr != NULL); -+ *phMsgMgr = NULL; -+ /* Allocate MSG manager object */ -+ MEM_AllocObject(pMsgMgr, struct MSG_MGR, MSGMGR_SIGNATURE); -+ -+ if (pMsgMgr) { -+ pMsgMgr->onExit = msgCallback; -+ pMsgMgr->hIOMgr = hIOMgr; -+ /* List of MSG_QUEUEs */ -+ pMsgMgr->queueList = LST_Create(); -+ /* Queues of message frames for messages to the DSP. Message -+ * frames will only be added to the free queue when a -+ * MSG_QUEUE object is created. */ -+ pMsgMgr->msgFreeList = LST_Create(); -+ pMsgMgr->msgUsedList = LST_Create(); -+ if (pMsgMgr->queueList == NULL || -+ pMsgMgr->msgFreeList == NULL || -+ pMsgMgr->msgUsedList == NULL) -+ status = DSP_EMEMORY; -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_InitializeDPCCS(&pMsgMgr->hSyncCS); -+ -+ /* Create an event to be used by WMD_MSG_Put() in waiting -+ * for an available free frame from the message manager. */ -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_OpenEvent(&pMsgMgr->hSyncEvent, NULL); -+ -+ if (DSP_SUCCEEDED(status)) -+ *phMsgMgr = pMsgMgr; -+ else -+ DeleteMsgMgr(pMsgMgr); -+ -+ } else { -+ status = DSP_EMEMORY; -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_MSG_CreateQueue ======== -+ * Create a MSG_QUEUE for sending/receiving messages to/from a node -+ * on the DSP. -+ */ -+DSP_STATUS WMD_MSG_CreateQueue(struct MSG_MGR *hMsgMgr, -+ OUT struct MSG_QUEUE **phMsgQueue, -+ u32 dwId, u32 uMaxMsgs, HANDLE hArg) -+{ -+ u32 i; -+ u32 uNumAllocated = 0; -+ struct MSG_QUEUE *pMsgQ; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); -+ DBC_Require(phMsgQueue != NULL); -+ -+ *phMsgQueue = NULL; -+ /* Allocate MSG_QUEUE object */ -+ MEM_AllocObject(pMsgQ, struct MSG_QUEUE, MSGQ_SIGNATURE); -+ if (!pMsgQ) { -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ LST_InitElem((struct LST_ELEM *) pMsgQ); -+ pMsgQ->uMaxMsgs = uMaxMsgs; -+ pMsgQ->hMsgMgr = hMsgMgr; -+ pMsgQ->hArg = hArg; /* Node handle */ -+ pMsgQ->dwId = dwId; /* Node env (not valid yet) */ -+ /* Queues of Message frames for messages from the DSP */ -+ pMsgQ->msgFreeList = LST_Create(); -+ pMsgQ->msgUsedList = LST_Create(); -+ if (pMsgQ->msgFreeList == NULL || pMsgQ->msgUsedList == NULL) -+ status = DSP_EMEMORY; -+ -+ /* Create event that will be signalled when a message from -+ * the DSP is available. */ -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_OpenEvent(&pMsgQ->hSyncEvent, NULL); -+ -+ /* Create a notification list for message ready notification. */ -+ if (DSP_SUCCEEDED(status)) -+ status = NTFY_Create(&pMsgQ->hNtfy); -+ -+ /* Create events that will be used to synchronize cleanup -+ * when the object is deleted. hSyncDone will be set to -+ * unblock threads in MSG_Put() or MSG_Get(). hSyncDoneAck -+ * will be set by the unblocked thread to signal that it -+ * is unblocked and will no longer reference the object. */ -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_OpenEvent(&pMsgQ->hSyncDone, NULL); -+ -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_OpenEvent(&pMsgQ->hSyncDoneAck, NULL); -+ -+ if (DSP_SUCCEEDED(status)) { -+ if (!hMsgMgr->msgFreeList) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ /* Enter critical section */ -+ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); -+ /* Initialize message frames and put in appropriate queues */ -+ for (i = 0; i < uMaxMsgs && DSP_SUCCEEDED(status); i++) { -+ status = AddNewMsg(hMsgMgr->msgFreeList); -+ if (DSP_SUCCEEDED(status)) { -+ uNumAllocated++; -+ status = AddNewMsg(pMsgQ->msgFreeList); -+ } -+ } -+ if (DSP_FAILED(status)) { -+ /* Stay inside CS to prevent others from taking any -+ * of the newly allocated message frames. */ -+ DeleteMsgQueue(pMsgQ, uNumAllocated); -+ } else { -+ LST_PutTail(hMsgMgr->queueList, -+ (struct LST_ELEM *)pMsgQ); -+ *phMsgQueue = pMsgQ; -+ /* Signal that free frames are now available */ -+ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) -+ SYNC_SetEvent(hMsgMgr->hSyncEvent); -+ -+ } -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ } else { -+ DeleteMsgQueue(pMsgQ, 0); -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== WMD_MSG_Delete ======== -+ * Delete a MSG manager allocated in WMD_MSG_Create(). -+ */ -+void WMD_MSG_Delete(struct MSG_MGR *hMsgMgr) -+{ -+ DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); -+ -+ DeleteMsgMgr(hMsgMgr); -+} -+ -+/* -+ * ======== WMD_MSG_DeleteQueue ======== -+ * Delete a MSG queue allocated in WMD_MSG_CreateQueue. -+ */ -+void WMD_MSG_DeleteQueue(struct MSG_QUEUE *hMsgQueue) -+{ -+ struct MSG_MGR *hMsgMgr = hMsgQueue->hMsgMgr; -+ u32 refCount; -+ -+ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); -+ hMsgQueue->fDone = true; -+ /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */ -+ refCount = hMsgQueue->refCount; -+ while (refCount) { -+ /* Unblock thread */ -+ SYNC_SetEvent(hMsgQueue->hSyncDone); -+ /* Wait for acknowledgement */ -+ SYNC_WaitOnEvent(hMsgQueue->hSyncDoneAck, SYNC_INFINITE); -+ refCount = hMsgQueue->refCount; -+ } -+ /* Remove message queue from hMsgMgr->queueList */ -+ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); -+ LST_RemoveElem(hMsgMgr->queueList, (struct LST_ELEM *)hMsgQueue); -+ /* Free the message queue object */ -+ DeleteMsgQueue(hMsgQueue, hMsgQueue->uMaxMsgs); -+ if (!hMsgMgr->msgFreeList) -+ goto func_cont; -+ if (LST_IsEmpty(hMsgMgr->msgFreeList)) -+ SYNC_ResetEvent(hMsgMgr->hSyncEvent); -+func_cont: -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+} -+ -+/* -+ * ======== WMD_MSG_Get ======== -+ * Get a message from a MSG queue. -+ */ -+DSP_STATUS WMD_MSG_Get(struct MSG_QUEUE *hMsgQueue, -+ struct DSP_MSG *pMsg, u32 uTimeout) -+{ -+ struct MSG_FRAME *pMsgFrame; -+ struct MSG_MGR *hMsgMgr; -+ bool fGotMsg = false; -+ struct SYNC_OBJECT *hSyncs[2]; -+ u32 uIndex; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); -+ DBC_Require(pMsg != NULL); -+ -+ hMsgMgr = hMsgQueue->hMsgMgr; -+ if (!hMsgQueue->msgUsedList) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ -+ /* Enter critical section */ -+ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); -+ /* If a message is already there, get it */ -+ if (!LST_IsEmpty(hMsgQueue->msgUsedList)) { -+ pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgQueue-> -+ msgUsedList); -+ if (pMsgFrame != NULL) { -+ *pMsg = pMsgFrame->msgData.msg; -+ LST_PutTail(hMsgQueue->msgFreeList, -+ (struct LST_ELEM *)pMsgFrame); -+ if (LST_IsEmpty(hMsgQueue->msgUsedList)) -+ SYNC_ResetEvent(hMsgQueue->hSyncEvent); -+ -+ fGotMsg = true; -+ } -+ } else { -+ if (hMsgQueue->fDone) -+ status = DSP_EFAIL; -+ else -+ hMsgQueue->refCount++; -+ -+ } -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ if (DSP_SUCCEEDED(status) && !fGotMsg) { -+ /* Wait til message is available, timeout, or done. We don't -+ * have to schedule the DPC, since the DSP will send messages -+ * when they are available. */ -+ hSyncs[0] = hMsgQueue->hSyncEvent; -+ hSyncs[1] = hMsgQueue->hSyncDone; -+ status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout, -+ &uIndex); -+ /* Enter critical section */ -+ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); -+ if (hMsgQueue->fDone) { -+ hMsgQueue->refCount--; -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ /* Signal that we're not going to access hMsgQueue -+ * anymore, so it can be deleted. */ -+ (void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck); -+ status = DSP_EFAIL; -+ } else { -+ if (DSP_SUCCEEDED(status)) { -+ DBC_Assert(!LST_IsEmpty(hMsgQueue-> -+ msgUsedList)); -+ /* Get msg from used list */ -+ pMsgFrame = (struct MSG_FRAME *) -+ LST_GetHead(hMsgQueue->msgUsedList); -+ /* Copy message into pMsg and put frame on the -+ * free list */ -+ if (pMsgFrame != NULL) { -+ *pMsg = pMsgFrame->msgData.msg; -+ LST_PutTail(hMsgQueue->msgFreeList, -+ (struct LST_ELEM *)pMsgFrame); -+ } -+ } -+ hMsgQueue->refCount--; -+ /* Reset the event if there are still queued messages */ -+ if (!LST_IsEmpty(hMsgQueue->msgUsedList)) -+ SYNC_SetEvent(hMsgQueue->hSyncEvent); -+ -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== WMD_MSG_Put ======== -+ * Put a message onto a MSG queue. -+ */ -+DSP_STATUS WMD_MSG_Put(struct MSG_QUEUE *hMsgQueue, -+ IN CONST struct DSP_MSG *pMsg, u32 uTimeout) -+{ -+ struct MSG_FRAME *pMsgFrame; -+ struct MSG_MGR *hMsgMgr; -+ bool fPutMsg = false; -+ struct SYNC_OBJECT *hSyncs[2]; -+ u32 uIndex; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); -+ DBC_Require(pMsg != NULL); -+ -+ hMsgMgr = hMsgQueue->hMsgMgr; -+ -+ if (!hMsgMgr->msgFreeList) { -+ status = DSP_EHANDLE; -+ goto func_end; -+ } -+ -+ -+ (void) SYNC_EnterCS(hMsgMgr->hSyncCS); -+ -+ /* If a message frame is available, use it */ -+ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) { -+ pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgMgr-> -+ msgFreeList); -+ if (pMsgFrame != NULL) { -+ pMsgFrame->msgData.msg = *pMsg; -+ pMsgFrame->msgData.dwId = hMsgQueue->dwId; -+ LST_PutTail(hMsgMgr->msgUsedList, (struct LST_ELEM *) -+ pMsgFrame); -+ hMsgMgr->uMsgsPending++; -+ fPutMsg = true; -+ } -+ if (LST_IsEmpty(hMsgMgr->msgFreeList)) -+ SYNC_ResetEvent(hMsgMgr->hSyncEvent); -+ -+ /* Release critical section before scheduling DPC */ -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ /* Schedule a DPC, to do the actual data transfer: */ -+ IO_Schedule(hMsgMgr->hIOMgr); -+ } else { -+ if (hMsgQueue->fDone) -+ status = DSP_EFAIL; -+ else -+ hMsgQueue->refCount++; -+ -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ } -+ if (DSP_SUCCEEDED(status) && !fPutMsg) { -+ /* Wait til a free message frame is available, timeout, -+ * or done */ -+ hSyncs[0] = hMsgMgr->hSyncEvent; -+ hSyncs[1] = hMsgQueue->hSyncDone; -+ status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout, -+ &uIndex); -+ /* Enter critical section */ -+ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); -+ if (hMsgQueue->fDone) { -+ hMsgQueue->refCount--; -+ /* Exit critical section */ -+ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ /* Signal that we're not going to access hMsgQueue -+ * anymore, so it can be deleted. */ -+ (void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck); -+ status = DSP_EFAIL; -+ } else { -+ if (DSP_SUCCEEDED(status)) { -+ if (LST_IsEmpty(hMsgMgr->msgFreeList)) { -+ status = DSP_EPOINTER; -+ goto func_cont; -+ } -+ /* Get msg from free list */ -+ pMsgFrame = (struct MSG_FRAME *) -+ LST_GetHead(hMsgMgr->msgFreeList); -+ /* Copy message into pMsg and put frame on the -+ * used list */ -+ if (pMsgFrame != NULL) { -+ pMsgFrame->msgData.msg = *pMsg; -+ pMsgFrame->msgData.dwId = -+ hMsgQueue->dwId; -+ LST_PutTail(hMsgMgr->msgUsedList, -+ (struct LST_ELEM *) -+ pMsgFrame); -+ hMsgMgr->uMsgsPending++; -+ /* Schedule a DPC, to do the actual -+ * data transfer: */ -+ IO_Schedule(hMsgMgr->hIOMgr); -+ } -+ } -+ hMsgQueue->refCount--; -+ /* Reset event if there are still frames available */ -+ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) -+ SYNC_SetEvent(hMsgMgr->hSyncEvent); -+func_cont: -+ /* Exit critical section */ -+ (void) SYNC_LeaveCS(hMsgMgr->hSyncCS); -+ } -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== WMD_MSG_RegisterNotify ======== -+ */ -+DSP_STATUS WMD_MSG_RegisterNotify(struct MSG_QUEUE *hMsgQueue, u32 uEventMask, -+ u32 uNotifyType, -+ struct DSP_NOTIFICATION *hNotification) -+{ -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); -+ DBC_Require(hNotification != NULL); -+ DBC_Require(uEventMask == DSP_NODEMESSAGEREADY || uEventMask == 0); -+ DBC_Require(uNotifyType == DSP_SIGNALEVENT); -+ -+ status = NTFY_Register(hMsgQueue->hNtfy, hNotification, uEventMask, -+ uNotifyType); -+ -+ if (status == DSP_EVALUE) { -+ /* Not registered. Ok, since we couldn't have known. Node -+ * notifications are split between node state change handled -+ * by NODE, and message ready handled by MSG. */ -+ status = DSP_SOK; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== WMD_MSG_SetQueueId ======== -+ */ -+void WMD_MSG_SetQueueId(struct MSG_QUEUE *hMsgQueue, u32 dwId) -+{ -+ DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); -+ /* DBC_Require(dwId != 0); */ -+ -+ /* -+ * A message queue must be created when a node is allocated, -+ * so that NODE_RegisterNotify() can be called before the node -+ * is created. Since we don't know the node environment until the -+ * node is created, we need this function to set hMsgQueue->dwId -+ * to the node environment, after the node is created. -+ */ -+ hMsgQueue->dwId = dwId; -+} -+ -+/* -+ * ======== AddNewMsg ======== -+ * Must be called in message manager critical section. -+ */ -+static DSP_STATUS AddNewMsg(struct LST_LIST *msgList) -+{ -+ struct MSG_FRAME *pMsg; -+ DSP_STATUS status = DSP_SOK; -+ -+ pMsg = (struct MSG_FRAME *)MEM_Calloc(sizeof(struct MSG_FRAME), -+ MEM_PAGED); -+ if (pMsg != NULL) { -+ LST_InitElem((struct LST_ELEM *) pMsg); -+ LST_PutTail(msgList, (struct LST_ELEM *) pMsg); -+ } else { -+ status = DSP_EMEMORY; -+ } -+ -+ return status; -+} -+ -+/* -+ * ======== DeleteMsgMgr ======== -+ */ -+static void DeleteMsgMgr(struct MSG_MGR *hMsgMgr) -+{ -+ DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); -+ -+ if (hMsgMgr->queueList) { -+ if (LST_IsEmpty(hMsgMgr->queueList)) { -+ LST_Delete(hMsgMgr->queueList); -+ hMsgMgr->queueList = NULL; -+ } -+ } -+ -+ if (hMsgMgr->msgFreeList) { -+ FreeMsgList(hMsgMgr->msgFreeList); -+ hMsgMgr->msgFreeList = NULL; -+ } -+ -+ if (hMsgMgr->msgUsedList) { -+ FreeMsgList(hMsgMgr->msgUsedList); -+ hMsgMgr->msgUsedList = NULL; -+ } -+ -+ if (hMsgMgr->hSyncEvent) -+ SYNC_CloseEvent(hMsgMgr->hSyncEvent); -+ -+ if (hMsgMgr->hSyncCS) -+ SYNC_DeleteCS(hMsgMgr->hSyncCS); -+ -+ MEM_FreeObject(hMsgMgr); -+} -+ -+/* -+ * ======== DeleteMsgQueue ======== -+ */ -+static void DeleteMsgQueue(struct MSG_QUEUE *hMsgQueue, u32 uNumToDSP) -+{ -+ struct MSG_MGR *hMsgMgr; -+ struct MSG_FRAME *pMsg; -+ u32 i; -+ -+ if (!MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE) -+ || !hMsgQueue->hMsgMgr || !hMsgQueue->hMsgMgr->msgFreeList) -+ goto func_end; -+ hMsgMgr = hMsgQueue->hMsgMgr; -+ -+ -+ /* Pull off uNumToDSP message frames from Msg manager and free */ -+ for (i = 0; i < uNumToDSP; i++) { -+ -+ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) { -+ pMsg = (struct MSG_FRAME *)LST_GetHead(hMsgMgr-> -+ msgFreeList); -+ MEM_Free(pMsg); -+ } else { -+ /* Cannot free all of the message frames */ -+ break; -+ } -+ } -+ -+ if (hMsgQueue->msgFreeList) { -+ FreeMsgList(hMsgQueue->msgFreeList); -+ hMsgQueue->msgFreeList = NULL; -+ } -+ -+ if (hMsgQueue->msgUsedList) { -+ FreeMsgList(hMsgQueue->msgUsedList); -+ hMsgQueue->msgUsedList = NULL; -+ } -+ -+ -+ if (hMsgQueue->hNtfy) -+ NTFY_Delete(hMsgQueue->hNtfy); -+ -+ if (hMsgQueue->hSyncEvent) -+ SYNC_CloseEvent(hMsgQueue->hSyncEvent); -+ -+ if (hMsgQueue->hSyncDone) -+ SYNC_CloseEvent(hMsgQueue->hSyncDone); -+ -+ if (hMsgQueue->hSyncDoneAck) -+ SYNC_CloseEvent(hMsgQueue->hSyncDoneAck); -+ -+ MEM_FreeObject(hMsgQueue); -+func_end: -+ return; -+ -+} -+ -+/* -+ * ======== FreeMsgList ======== -+ */ -+static void FreeMsgList(struct LST_LIST *msgList) -+{ -+ struct MSG_FRAME *pMsg; -+ -+ if (!msgList) -+ goto func_end; -+ -+ while ((pMsg = (struct MSG_FRAME *)LST_GetHead(msgList)) != NULL) -+ MEM_Free(pMsg); -+ -+ DBC_Assert(LST_IsEmpty(msgList)); -+ -+ LST_Delete(msgList); -+func_end: -+ return; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_msg_sm.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_msg_sm.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_msg_sm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_msg_sm.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,158 @@ -+/* -+ * _msg_sm.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _msg_sm.h ======== -+ * Description: -+ * Private header file defining MSG manager objects and defines needed -+ * by IO manager. -+ * -+ * Public Functions: -+ * None. -+ * -+ * Notes: -+ * -+ *! Revision History: -+ *! ================ -+ *! 09-May-2001 jeh Code Review cleanup. -+ *! 08-Nov-2000 jeh Created. -+ */ -+ -+#ifndef _MSG_SM_ -+#define _MSG_SM_ -+ -+#include -+#include -+ -+/* -+ * These target side symbols define the beginning and ending addresses -+ * of the section of shared memory used for messages. They are -+ * defined in the *cfg.cmd file by cdb code. -+ */ -+#define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG" -+#define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END" -+ -+#ifndef _CHNL_WORDSIZE -+#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */ -+#endif -+ -+/* -+ * ======== MSG ======== -+ * There is a control structure for messages to the DSP, and a control -+ * structure for messages from the DSP. The shared memory region for -+ * transferring messages is partitioned as follows: -+ * -+ * ---------------------------------------------------------- -+ * |Control | Messages from DSP | Control | Messages to DSP | -+ * ---------------------------------------------------------- -+ * -+ * MSG control structure for messages to the DSP is used in the following -+ * way: -+ * -+ * bufEmpty - This flag is set to FALSE by the GPP after it has output -+ * messages for the DSP. The DSP host driver sets it to -+ * TRUE after it has copied the messages. -+ * postSWI - Set to 1 by the GPP after it has written the messages, -+ * set the size, and set bufEmpty to FALSE. -+ * The DSP Host driver uses SWI_andn of the postSWI field -+ * when a host interrupt occurs. The host driver clears -+ * this after posting the SWI. -+ * size - Number of messages to be read by the DSP. -+ * -+ * For messages from the DSP: -+ * bufEmpty - This flag is set to FALSE by the DSP after it has output -+ * messages for the GPP. The DPC on the GPP sets it to -+ * TRUE after it has copied the messages. -+ * postSWI - Set to 1 the DPC on the GPP after copying the messages. -+ * size - Number of messages to be read by the GPP. -+ */ -+struct MSG { -+ u32 bufEmpty; /* to/from DSP buffer is empty */ -+ u32 postSWI; /* Set to "1" to post MSG SWI */ -+ u32 size; /* Number of messages to/from the DSP */ -+ u32 resvd; -+} ; -+ -+/* -+ * ======== MSG_MGR ======== -+ * The MSG_MGR maintains a list of all MSG_QUEUEs. Each NODE object can -+ * have MSG_QUEUE to hold all messages that come up from the corresponding -+ * node on the DSP. The MSG_MGR also has a shared queue of messages -+ * ready to go to the DSP. -+ */ -+struct MSG_MGR { -+ /* The first two fields must match those in msgobj.h */ -+ u32 dwSignature; -+ struct WMD_DRV_INTERFACE *pIntfFxns; /* Function interface to WMD. */ -+ -+ struct IO_MGR *hIOMgr; /* IO manager */ -+ struct LST_LIST *queueList; /* List of MSG_QUEUEs */ -+ struct SYNC_CSOBJECT *hSyncCS; /* For critical sections */ -+ /* Signalled when MsgFrame is available */ -+ struct SYNC_OBJECT *hSyncEvent; -+ struct LST_LIST *msgFreeList; /* Free MsgFrames ready to be filled */ -+ struct LST_LIST *msgUsedList; /* MsgFrames ready to go to DSP */ -+ u32 uMsgsPending; /* # of queued messages to go to DSP */ -+ u32 uMaxMsgs; /* Max # of msgs that fit in buffer */ -+ MSG_ONEXIT onExit; /* called when RMS_EXIT is received */ -+} ; -+ -+/* -+ * ======== MSG_QUEUE ======== -+ * Each NODE has a MSG_QUEUE for receiving messages from the -+ * corresponding node on the DSP. The MSG_QUEUE object maintains a list -+ * of messages that have been sent to the host, but not yet read (MSG_Get), -+ * and a list of free frames that can be filled when new messages arrive -+ * from the DSP. -+ * The MSG_QUEUE's hSynEvent gets posted when a message is ready. -+ */ -+struct MSG_QUEUE { -+ struct LST_ELEM listElem; -+ u32 dwSignature; -+ struct MSG_MGR *hMsgMgr; -+ u32 uMaxMsgs; /* Node message depth */ -+ u32 dwId; /* Node environment pointer */ -+ struct LST_LIST *msgFreeList; /* Free MsgFrames ready to be filled */ -+ /* Filled MsgFramess waiting to be read */ -+ struct LST_LIST *msgUsedList; -+ HANDLE hArg; /* Handle passed to mgr onExit callback */ -+ struct SYNC_OBJECT *hSyncEvent; /* Signalled when message is ready */ -+ struct SYNC_OBJECT *hSyncDone; /* For synchronizing cleanup */ -+ struct SYNC_OBJECT *hSyncDoneAck; /* For synchronizing cleanup */ -+ struct NTFY_OBJECT *hNtfy; /* For notification of message ready */ -+ bool fDone; /* TRUE <==> deleting the object */ -+ u32 refCount; /* Number of pending MSG_get/put calls */ -+}; -+ -+/* -+ * ======== MSG_DSPMSG ======== -+ */ -+struct MSG_DSPMSG { -+ struct DSP_MSG msg; -+ u32 dwId; /* Identifies the node the message goes to */ -+} ; -+ -+/* -+ * ======== MSG_FRAME ======== -+ */ -+struct MSG_FRAME { -+ struct LST_ELEM listElem; -+ struct MSG_DSPMSG msgData; -+} ; -+ -+#endif /* _MSG_SM_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,384 @@ -+/* -+ * _tiomap.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== _tiomap.h ======== -+ * Description: -+ * Definitions and types private to this WMD. -+ * -+ */ -+ -+#ifndef _TIOMAP_ -+#define _TIOMAP_ -+ -+#include -+#include -+#include -+#include /* for WMDIOCTL_EXTPROC defn */ -+#include -+#include -+ -+struct MAP_L4PERIPHERAL { -+ u32 physAddr; -+ u32 dspVirtAddr; -+} ; -+ -+#define ARM_MAILBOX_START 0xfffcf000 -+#define ARM_MAILBOX_LENGTH 0x800 -+ -+/* New Registers in OMAP3.1 */ -+ -+#define TESTBLOCK_ID_START 0xfffed400 -+#define TESTBLOCK_ID_LENGTH 0xff -+ -+/* ID Returned by OMAP1510 */ -+#define TBC_ID_VALUE 0xB47002F -+ -+#define SPACE_LENGTH 0x2000 -+#define API_CLKM_DPLL_DMA 0xfffec000 -+#define ARM_INTERRUPT_OFFSET 0xb00 -+ -+#define BIOS_24XX -+ -+#define L4_PERIPHERAL_NULL 0x0 -+#define DSPVA_PERIPHERAL_NULL 0x0 -+ -+#define MAX_LOCK_TLB_ENTRIES 15 -+ -+#define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */ -+#define DSPVA_PERIPHERAL_PRM 0x1181e000 -+#define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */ -+#define DSPVA_PERIPHERAL_SCM 0x1181f000 -+#define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */ -+#define DSPVA_PERIPHERAL_MMU 0x11820000 -+#define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */ -+#define DSPVA_PERIPHERAL_CM 0x1181c000 -+#define L4_PERIPHERAL_PER 0x48005000 /* PER */ -+#define DSPVA_PERIPHERAL_PER 0x1181d000 -+ -+#define L4_PERIPHERAL_GPIO1 0x48310000 -+#define DSPVA_PERIPHERAL_GPIO1 0x11809000 -+#define L4_PERIPHERAL_GPIO2 0x49050000 -+#define DSPVA_PERIPHERAL_GPIO2 0x1180a000 -+#define L4_PERIPHERAL_GPIO3 0x49052000 -+#define DSPVA_PERIPHERAL_GPIO3 0x1180b000 -+#define L4_PERIPHERAL_GPIO4 0x49054000 -+#define DSPVA_PERIPHERAL_GPIO4 0x1180c000 -+#define L4_PERIPHERAL_GPIO5 0x49056000 -+#define DSPVA_PERIPHERAL_GPIO5 0x1180d000 -+ -+#define L4_PERIPHERAL_IVA2WDT 0x49030000 -+#define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000 -+ -+#define L4_PERIPHERAL_DISPLAY 0x48050000 -+#define DSPVA_PERIPHERAL_DISPLAY 0x1180f000 -+ -+#define L4_PERIPHERAL_SSI 0x48058000 -+#define DSPVA_PERIPHERAL_SSI 0x11804000 -+#define L4_PERIPHERAL_GDD 0x48059000 -+#define DSPVA_PERIPHERAL_GDD 0x11805000 -+#define L4_PERIPHERAL_SS1 0x4805a000 -+#define DSPVA_PERIPHERAL_SS1 0x11806000 -+#define L4_PERIPHERAL_SS2 0x4805b000 -+#define DSPVA_PERIPHERAL_SS2 0x11807000 -+ -+#define L4_PERIPHERAL_CAMERA 0x480BC000 -+#define DSPVA_PERIPHERAL_CAMERA 0x11819000 -+ -+#define L4_PERIPHERAL_SDMA 0x48056000 -+#define DSPVA_PERIPHERAL_SDMA 0x11810000 /*0x1181d000 conflicts with PER */ -+ -+#define L4_PERIPHERAL_UART1 0x4806a000 -+#define DSPVA_PERIPHERAL_UART1 0x11811000 -+#define L4_PERIPHERAL_UART2 0x4806c000 -+#define DSPVA_PERIPHERAL_UART2 0x11812000 -+#define L4_PERIPHERAL_UART3 0x49020000 -+#define DSPVA_PERIPHERAL_UART3 0x11813000 -+ -+#define L4_PERIPHERAL_MCBSP1 0x48074000 -+#define DSPVA_PERIPHERAL_MCBSP1 0x11814000 -+#define L4_PERIPHERAL_MCBSP2 0x49022000 -+#define DSPVA_PERIPHERAL_MCBSP2 0x11815000 -+#define L4_PERIPHERAL_MCBSP3 0x49024000 -+#define DSPVA_PERIPHERAL_MCBSP3 0x11816000 -+#define L4_PERIPHERAL_MCBSP4 0x49026000 -+#define DSPVA_PERIPHERAL_MCBSP4 0x11817000 -+#define L4_PERIPHERAL_MCBSP5 0x48096000 -+#define DSPVA_PERIPHERAL_MCBSP5 0x11818000 -+ -+#define L4_PERIPHERAL_GPTIMER5 0x49038000 -+#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000 -+#define L4_PERIPHERAL_GPTIMER6 0x4903a000 -+#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000 -+#define L4_PERIPHERAL_GPTIMER7 0x4903c000 -+#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000 -+#define L4_PERIPHERAL_GPTIMER8 0x4903e000 -+#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000 -+ -+#define L4_PERIPHERAL_SPI1 0x48098000 -+#define DSPVA_PERIPHERAL_SPI1 0x1181a000 -+#define L4_PERIPHERAL_SPI2 0x4809a000 -+#define DSPVA_PERIPHERAL_SPI2 0x1181b000 -+ -+#define L4_PERIPHERAL_MBOX 0x48094000 -+#define DSPVA_PERIPHERAL_MBOX 0x11808000 -+ -+#define PM_GRPSEL_BASE 0x48307000 -+#define DSPVA_GRPSEL_BASE 0x11821000 -+ -+#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000 -+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000 -+#define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000 -+#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000 -+ -+/* define a static array with L4 mappings */ -+static const struct MAP_L4PERIPHERAL L4PeripheralTable[] = { -+ {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX}, -+ {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM}, -+ {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU}, -+ {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5}, -+ {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6}, -+ {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7}, -+ {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8}, -+ {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1}, -+ {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2}, -+ {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3}, -+ {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4}, -+ {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5}, -+ {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT}, -+ {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY}, -+ {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI}, -+ {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD}, -+ {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1}, -+ {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2}, -+ {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1}, -+ {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2}, -+ {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3}, -+ {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1}, -+ {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2}, -+ {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3}, -+ {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4}, -+ {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5}, -+ {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA}, -+ {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1}, -+ {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2}, -+ {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM}, -+ {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM}, -+ {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER}, -+ {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE}, -+ {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2}, -+ {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3}, -+ {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL} -+}; -+ -+/* -+ * 15 10 0 -+ * --------------------------------- -+ * |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i| -+ * --------------------------------- -+ * | (class) | (module specific) | -+ * -+ * where c -> Externel Clock Command: Clk & Autoidle Disable/Enable -+ * i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3 -+ */ -+ -+/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */ -+#define MBX_PM_CLK_IDMASK 0x7F -+ -+/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */ -+#define MBX_PM_CLK_CMDSHIFT 7 -+ -+/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */ -+#define MBX_PM_CLK_CMDMASK 7 -+ -+/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */ -+#define MBX_CORE1_RESOURCES 7 -+ -+/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */ -+#define MBX_CORE2_RESOURCES 1 -+ -+/* MBX_PM_MAX_RESOURCES: TOTAL Clock Reosurces. */ -+#define MBX_PM_MAX_RESOURCES 11 -+ -+/* Power Management Commands */ -+enum BPWR_ExtClockCmd { -+ BPWR_DisableClock = 0, -+ BPWR_EnableClock, -+ BPWR_DisableAutoIdle, -+ BPWR_EnableAutoIdle -+} ; -+ -+/* OMAP242x specific resources */ -+enum BPWR_ExtClockId { -+ BPWR_GPTimer5 = 0x10, -+ BPWR_GPTimer6, -+ BPWR_GPTimer7, -+ BPWR_GPTimer8, -+ BPWR_WDTimer3, -+ BPWR_MCBSP1, -+ BPWR_MCBSP2, -+ BPWR_MCBSP3, -+ BPWR_MCBSP4, -+ BPWR_MCBSP5, -+ BPWR_SSI = 0x20 -+} ; -+ -+static const u32 BPWR_CLKID[] = { -+ (u32) BPWR_GPTimer5, -+ (u32) BPWR_GPTimer6, -+ (u32) BPWR_GPTimer7, -+ (u32) BPWR_GPTimer8, -+ (u32) BPWR_WDTimer3, -+ (u32) BPWR_MCBSP1, -+ (u32) BPWR_MCBSP2, -+ (u32) BPWR_MCBSP3, -+ (u32) BPWR_MCBSP4, -+ (u32) BPWR_MCBSP5, -+ (u32) BPWR_SSI -+}; -+ -+struct BPWR_Clk_t { -+ u32 clkId; -+ enum SERVICES_ClkId funClk; -+ enum SERVICES_ClkId intClk; -+} ; -+ -+static const struct BPWR_Clk_t BPWR_Clks[] = { -+ {(u32) BPWR_GPTimer5, SERVICESCLK_gpt5_fck, SERVICESCLK_gpt5_ick}, -+ {(u32) BPWR_GPTimer6, SERVICESCLK_gpt6_fck, SERVICESCLK_gpt6_ick}, -+ {(u32) BPWR_GPTimer7, SERVICESCLK_gpt7_fck, SERVICESCLK_gpt7_ick}, -+ {(u32) BPWR_GPTimer8, SERVICESCLK_gpt8_fck, SERVICESCLK_gpt8_ick}, -+ {(u32) BPWR_WDTimer3, SERVICESCLK_wdt3_fck, SERVICESCLK_wdt3_ick}, -+ {(u32) BPWR_MCBSP1, SERVICESCLK_mcbsp1_fck, SERVICESCLK_mcbsp1_ick}, -+ {(u32) BPWR_MCBSP2, SERVICESCLK_mcbsp2_fck, SERVICESCLK_mcbsp2_ick}, -+ {(u32) BPWR_MCBSP3, SERVICESCLK_mcbsp3_fck, SERVICESCLK_mcbsp3_ick}, -+ {(u32) BPWR_MCBSP4, SERVICESCLK_mcbsp4_fck, SERVICESCLK_mcbsp4_ick}, -+ {(u32) BPWR_MCBSP5, SERVICESCLK_mcbsp5_fck, SERVICESCLK_mcbsp5_ick}, -+ {(u32) BPWR_SSI, SERVICESCLK_ssi_fck, SERVICESCLK_ssi_ick} -+}; -+ -+/* Interrupt Register Offsets */ -+#define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */ -+#define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */ -+ -+#define DSP_MAILBOX1_INT 10 -+ -+/* -+ * INTH_InterruptKind_t -+ * Identify the kind of interrupt: either FIQ/IRQ -+ */ -+enum INTH_InterruptKind_t { -+ INTH_IRQ = 0, -+ INTH_FIQ = 1 -+} ; -+ -+enum INTH_SensitiveEdge_t { -+ FALLING_EDGE_SENSITIVE = 0, -+ LOW_LEVEL_SENSITIVE = 1 -+} ; -+ -+/* -+ * Bit definition of Interrupt Level Registers -+ */ -+ -+/* Mail Box defines */ -+#define MB_ARM2DSP1_REG_OFFSET 0x00 -+ -+#define MB_ARM2DSP1B_REG_OFFSET 0x04 -+ -+#define MB_DSP2ARM1B_REG_OFFSET 0x0C -+ -+#define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18 -+ -+#define MB_ARM2DSP_FLAG 0x0001 -+ -+#define MBOX_ARM2DSP HW_MBOX_ID_0 -+#define MBOX_DSP2ARM HW_MBOX_ID_1 -+#define MBOX_ARM HW_MBOX_U0_ARM -+#define MBOX_DSP HW_MBOX_U1_DSP1 -+ -+#define ENABLE true -+#define DISABLE false -+ -+#define HIGH_LEVEL true -+#define LOW_LEVEL false -+ -+/* Macro's */ -+#define REG16(A) (*(REG_UWORD16 *)(A)) -+ -+#define ClearBit(reg, mask) (reg &= ~mask) -+#define SetBit(reg, mask) (reg |= mask) -+ -+#define SetGroupBits16(reg, position, width, value) \ -+ do {\ -+ reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \ -+ reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \ -+ } while (0); -+ -+#define ClearBitIndex(reg, index) (reg &= ~(1 << (index))) -+ -+/* This mini driver's device context: */ -+struct WMD_DEV_CONTEXT { -+ struct DEV_OBJECT *hDevObject; /* Handle to WCD device object. */ -+ u32 dwDspBaseAddr; /* Arm's API to DSP virtual base addr */ -+ /* -+ * DSP External memory prog address as seen virtually by the OS on -+ * the host side. -+ */ -+ u32 dwDspExtBaseAddr; /* See the comment above */ -+ u32 dwAPIRegBase; /* API memory mapped registers */ -+ void __iomem *dwDSPMmuBase; /* DSP MMU Mapped registers */ -+ u32 dwMailBoxBase; /* Mail box mapped registers */ -+ u32 dwAPIClkBase; /* CLK Registers */ -+ u32 dwDSPClkM2Base; /* DSP Clock Module m2 */ -+ u32 dwPublicRhea; /* Pub Rhea */ -+ u32 dwIntAddr; /* MB INTR reg */ -+ u32 dwTCEndianism; /* TC Endianism register */ -+ u32 dwTestBase; /* DSP MMU Mapped registers */ -+ u32 dwSelfLoop; /* Pointer to the selfloop */ -+ u32 dwDSPStartAdd; /* API Boot vector */ -+ u32 dwInternalSize; /* Internal memory size */ -+ -+ /* -+ * Processor specific info is set when prog loaded and read from DCD. -+ * [See WMD_BRD_Ctrl()] PROC info contains DSP-MMU TLB entries. -+ */ -+ /* DMMU TLB entries */ -+ struct WMDIOCTL_EXTPROC aTLBEntry[WMDIOCTL_NUMOFMMUTLB]; -+ u32 dwBrdState; /* Last known board state. */ -+ u32 ulIntMask; /* int mask */ -+ u16 ioBase; /* Board I/O base */ -+ u32 numTLBEntries; /* DSP MMU TLB entry counter */ -+ u32 fixedTLBEntries; /* Fixed DSPMMU TLB entry count */ -+ -+ /* TC Settings */ -+ bool tcWordSwapOn; /* Traffic Controller Word Swap */ -+ struct PgTableAttrs *pPtAttrs; -+ u32 uDspPerClks; -+} ; -+ -+ /* -+ * ======== WMD_TLB_DspVAToMpuPA ======== -+ * Given a DSP virtual address, traverse the page table and return -+ * a corresponding MPU physical address and size. -+ */ -+extern DSP_STATUS WMD_TLB_DspVAToMpuPA(struct WMD_DEV_CONTEXT *pDevContext, -+ IN u32 ulVirtAddr, -+ OUT u32 *ulPhysAddr, -+ OUT u32 *sizeTlb); -+ -+#endif /* _TIOMAP_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap_io.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap_io.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,427 @@ -+/* -+ * tiomap_io.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _tiomap_io.c ======== -+ * Description: -+ * Implementation for the io read/write routines. -+ * -+ *! Revision History -+ *! ================ -+ *! 16-Feb-2004 vp: Fixed warning in WriteDspData function. -+ *! 16-Apr-2003 vp: Added support for TC word swap -+ *! 26-Feb-2003 vp: Fixed issue with EXT_BEG and EXT_END address. -+ *! 24-Feb-2003 vp: Ported to Linux platform -+ *! 08-Oct-2002 rr: Created. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+ -+/* ----------------------------------- specific to this file */ -+#include "_tiomap.h" -+#include "_tiomap_pwr.h" -+#include "tiomap_io.h" -+ -+static u32 ulExtBase; -+static u32 ulExtEnd; -+ -+static u32 ulShm0End; -+static u32 ulDynExtBase; -+static u32 ulTraceSecBeg; -+static u32 ulTraceSecEnd; -+static u32 ulShmBaseVirt; -+ -+bool bSymbolsReloaded = true; -+ -+/* -+ * ======== ReadExtDspData ======== -+ * Copies DSP external memory buffers to the host side buffers. -+ */ -+DSP_STATUS ReadExtDspData(struct WMD_DEV_CONTEXT *hDevContext, -+ OUT u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ u32 offset; -+ u32 ulTLBBaseVirt = 0; -+ u32 ulShmOffsetVirt = 0; -+ u32 dwExtProgVirtMem; -+ u32 dwBaseAddr = pDevContext->dwDspExtBaseAddr; -+ bool bTraceRead = false; -+ -+ DBG_Trace(DBG_ENTER, "ReadExtDspData," -+ "hDevContext: 0x%x\n\t\tpbHostBuf: 0x%x" -+ "\n\t\tdwDSPAddr: 0x%x\n\t\tulNumBytes: 0x%x\n\t\t" -+ "ulMemType: 0x%x\n", pDevContext, pbHostBuf, dwDSPAddr, -+ ulNumBytes, ulMemType); -+ -+ if (!ulShmBaseVirt) { -+ status = DEV_GetSymbol(pDevContext->hDevObject, -+ SHMBASENAME, &ulShmBaseVirt); -+ } -+ DBC_Assert(ulShmBaseVirt != 0); -+ -+ /* Check if it is a read of Trace section */ -+ if (!ulTraceSecBeg) { -+ status = DEV_GetSymbol(pDevContext->hDevObject, -+ DSP_TRACESEC_BEG, &ulTraceSecBeg); -+ } -+ DBC_Assert(ulTraceSecBeg != 0); -+ -+ if (DSP_SUCCEEDED(status) && !ulTraceSecEnd) { -+ status = DEV_GetSymbol(pDevContext->hDevObject, -+ DSP_TRACESEC_END, &ulTraceSecEnd); -+ } -+ DBC_Assert(ulTraceSecEnd != 0); -+ -+ if (DSP_SUCCEEDED(status)) { -+ if ((dwDSPAddr <= ulTraceSecEnd) && -+ (dwDSPAddr >= ulTraceSecBeg)) { -+ DBG_Trace(DBG_LEVEL5, "Reading from DSP Trace" -+ "section 0x%x \n", dwDSPAddr); -+ bTraceRead = true; -+ } -+ } -+ -+ /* If reading from TRACE, force remap/unmap */ -+ if ((bTraceRead) && dwBaseAddr) { -+ dwBaseAddr = 0; -+ pDevContext->dwDspExtBaseAddr = 0; -+ } -+ -+ if (!dwBaseAddr) { -+ /* Initialize ulExtBase and ulExtEnd */ -+ ulExtBase = 0; -+ ulExtEnd = 0; -+ -+ /* Get DYNEXT_BEG, EXT_BEG and EXT_END.*/ -+ if (DSP_SUCCEEDED(status) && !ulDynExtBase) { -+ status = DEV_GetSymbol(pDevContext->hDevObject, -+ DYNEXTBASE, &ulDynExtBase); -+ } -+ DBC_Assert(ulDynExtBase != 0); -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetSymbol(pDevContext->hDevObject, -+ EXTBASE, &ulExtBase); -+ } -+ DBC_Assert(ulExtBase != 0); -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = DEV_GetSymbol(pDevContext->hDevObject, -+ EXTEND, &ulExtEnd); -+ } -+ DBC_Assert(ulExtEnd != 0); -+ -+ /* Trace buffer is right after the SHM SEG0, -+ * so set the base address to SHMBASE */ -+ if (bTraceRead) { -+ ulExtBase = ulShmBaseVirt; -+ ulExtEnd = ulTraceSecEnd; -+ } -+ -+ DBC_Assert(ulExtEnd != 0); -+ DBC_Assert(ulExtEnd > ulExtBase); -+ -+ if (ulExtEnd < ulExtBase) -+ status = DSP_EFAIL; -+ -+ if (DSP_SUCCEEDED(status)) { -+ ulTLBBaseVirt = -+ pDevContext->aTLBEntry[0].ulDspVa * DSPWORDSIZE; -+ DBC_Assert(ulTLBBaseVirt <= ulShmBaseVirt); -+ dwExtProgVirtMem = pDevContext->aTLBEntry[0].ulGppVa; -+ -+ if (bTraceRead) { -+ DBG_Trace(DBG_LEVEL7, "ReadExtDspData: " -+ "GPP VA pointing to SHMMEMBASE 0x%x \n", -+ dwExtProgVirtMem); -+ } else { -+ ulShmOffsetVirt = ulShmBaseVirt - ulTLBBaseVirt; -+ ulShmOffsetVirt += PG_ALIGN_HIGH(ulExtEnd - -+ ulDynExtBase + 1, -+ HW_PAGE_SIZE_64KB); -+ dwExtProgVirtMem -= ulShmOffsetVirt; -+ dwExtProgVirtMem += (ulExtBase - ulDynExtBase); -+ DBG_Trace(DBG_LEVEL7, "ReadExtDspData: " -+ "GPP VA pointing to EXTMEMBASE 0x%x \n", -+ dwExtProgVirtMem); -+ pDevContext->dwDspExtBaseAddr = -+ dwExtProgVirtMem; -+ -+ /* This dwDspExtBaseAddr will get cleared only when the board is -+ * stopped. */ -+ if (!pDevContext->dwDspExtBaseAddr) { -+ status = DSP_EFAIL; -+ DBG_Trace(DBG_LEVEL7, "ReadExtDspData: " -+ "failed to Map the program memory\n"); -+ } -+ } -+ -+ dwBaseAddr = dwExtProgVirtMem; -+ } -+ } -+ -+ if (!dwBaseAddr || !ulExtBase || !ulExtEnd) { -+ DBG_Trace(DBG_LEVEL7, -+ "Symbols missing for Ext Prog reading \n"); -+ status = DSP_EFAIL; -+ } -+ -+ offset = dwDSPAddr - ulExtBase; -+ -+ if (DSP_SUCCEEDED(status)) -+ memcpy(pbHostBuf, (u8 *)dwBaseAddr+offset, ulNumBytes); -+ -+ return status; -+} -+/* -+ * ======== WriteDspData ======== -+ * purpose: -+ * Copies buffers to the DSP internal/external memory. -+ */ -+DSP_STATUS WriteDspData(struct WMD_DEV_CONTEXT *hDevContext, IN u8 *pbHostBuf, -+ u32 dwDSPAddr, u32 ulNumBytes, u32 ulMemType) -+{ -+ u32 offset; -+ u32 dwBaseAddr = hDevContext->dwDspBaseAddr; -+ struct CFG_HOSTRES resources; -+ DSP_STATUS status; -+ u32 base1, base2, base3; -+ base1 = OMAP_DSP_MEM1_SIZE; -+ base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE; -+ base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE; -+ DBG_Trace(DBG_ENTER, "Entered WriteDspData \n"); -+ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ -+ offset = dwDSPAddr - hDevContext->dwDSPStartAdd; -+ if (offset < base1) { -+ dwBaseAddr = MEM_LinearAddress(resources.dwMemBase[2], -+ resources.dwMemLength[2]); -+ } else if (offset > base1 && offset < base2+OMAP_DSP_MEM2_SIZE) { -+ dwBaseAddr = MEM_LinearAddress(resources.dwMemBase[3], -+ resources.dwMemLength[3]); -+ offset = offset - base2; -+ } else if (offset >= base2+OMAP_DSP_MEM2_SIZE && -+ offset < base3 + OMAP_DSP_MEM3_SIZE) { -+ dwBaseAddr = MEM_LinearAddress(resources.dwMemBase[4], -+ resources.dwMemLength[4]); -+ offset = offset - base3; -+ } else{ -+ status = DSP_EFAIL; -+ return status; -+ } -+ if (ulNumBytes) -+ memcpy((u8 *) (dwBaseAddr+offset), pbHostBuf, ulNumBytes); -+ else -+ *((u32 *) pbHostBuf) = dwBaseAddr+offset; -+ -+ return status; -+} -+ -+/* -+ * ======== WriteExtDspData ======== -+ * purpose: -+ * Copies buffers to the external memory. -+ * -+ */ -+DSP_STATUS WriteExtDspData(struct WMD_DEV_CONTEXT *pDevContext, -+ IN u8 *pbHostBuf, u32 dwDSPAddr, u32 ulNumBytes, -+ u32 ulMemType, bool bDynamicLoad) -+{ -+ u32 dwBaseAddr = pDevContext->dwDspExtBaseAddr; -+ u32 dwOffset = 0; -+ u8 bTempByte1, bTempByte2; -+ u8 remainByte[4]; -+ s32 i; -+ DSP_STATUS retVal = DSP_SOK; -+ u32 dwExtProgVirtMem; -+ u32 ulTLBBaseVirt = 0; -+ u32 ulShmOffsetVirt = 0; -+ struct CFG_HOSTRES hostRes; -+ bool bTraceLoad = false; -+ bTempByte1 = 0x0; -+ bTempByte2 = 0x0; -+ -+ DBG_Trace(DBG_ENTER, "Entered WriteExtDspData dwDSPAddr 0x%x " -+ "ulNumBytes 0x%x \n", dwDSPAddr, ulNumBytes); -+ if (bSymbolsReloaded) { -+ /* Check if it is a load to Trace section */ -+ retVal = DEV_GetSymbol(pDevContext->hDevObject, -+ DSP_TRACESEC_BEG, &ulTraceSecBeg); -+ if (DSP_SUCCEEDED(retVal)) -+ retVal = DEV_GetSymbol(pDevContext->hDevObject, -+ DSP_TRACESEC_END, &ulTraceSecEnd); -+ } -+ if (DSP_SUCCEEDED(retVal)) { -+ if ((dwDSPAddr <= ulTraceSecEnd) && -+ (dwDSPAddr >= ulTraceSecBeg)) { -+ DBG_Trace(DBG_LEVEL5, "Writing to DSP Trace " -+ "section 0x%x \n", dwDSPAddr); -+ bTraceLoad = true; -+ } -+ } -+ -+ /* If dynamic, force remap/unmap */ -+ if ((bDynamicLoad || bTraceLoad) && dwBaseAddr) { -+ dwBaseAddr = 0; -+ MEM_UnmapLinearAddress((void *)pDevContext->dwDspExtBaseAddr); -+ pDevContext->dwDspExtBaseAddr = 0x0; -+ } -+ if (!dwBaseAddr) { -+ if (bSymbolsReloaded) -+ /* Get SHM_BEG EXT_BEG and EXT_END. */ -+ retVal = DEV_GetSymbol(pDevContext->hDevObject, -+ SHMBASENAME, &ulShmBaseVirt); -+ DBC_Assert(ulShmBaseVirt != 0); -+ if (bDynamicLoad) { -+ if (DSP_SUCCEEDED(retVal)) { -+ if (bSymbolsReloaded) -+ retVal = DEV_GetSymbol(pDevContext-> -+ hDevObject, DYNEXTBASE, -+ &ulExtBase); -+ } -+ DBC_Assert(ulExtBase != 0); -+ if (DSP_SUCCEEDED(retVal)) { -+ /* DR OMAPS00013235 : DLModules array may be -+ * in EXTMEM. It is expected that DYNEXTMEM and -+ * EXTMEM are contiguous, so checking for the -+ * upper bound at EXTEND should be Ok. */ -+ if (bSymbolsReloaded) -+ retVal = DEV_GetSymbol(pDevContext-> -+ hDevObject, EXTEND, &ulExtEnd); -+ } -+ } else { -+ if (bSymbolsReloaded) { -+ if (DSP_SUCCEEDED(retVal)) -+ retVal = DEV_GetSymbol(pDevContext-> -+ hDevObject, EXTBASE, -+ &ulExtBase); -+ DBC_Assert(ulExtBase != 0); -+ if (DSP_SUCCEEDED(retVal)) -+ retVal = DEV_GetSymbol(pDevContext-> -+ hDevObject, EXTEND, &ulExtEnd); -+ } -+ } -+ /* Trace buffer it right after the SHM SEG0, so set the -+ * base address to SHMBASE */ -+ if (bTraceLoad) -+ ulExtBase = ulShmBaseVirt; -+ -+ DBC_Assert(ulExtEnd != 0); -+ DBC_Assert(ulExtEnd > ulExtBase); -+ if (ulExtEnd < ulExtBase) -+ retVal = DSP_EFAIL; -+ -+ if (DSP_SUCCEEDED(retVal)) { -+ ulTLBBaseVirt = pDevContext->aTLBEntry[0].ulDspVa * -+ DSPWORDSIZE; -+ DBC_Assert(ulTLBBaseVirt <= ulShmBaseVirt); -+ -+ if (bSymbolsReloaded) { -+ if (DSP_SUCCEEDED(retVal)) { -+ retVal = DEV_GetSymbol(pDevContext-> -+ hDevObject, DSP_TRACESEC_END, -+ &ulShm0End); -+ } -+ if (DSP_SUCCEEDED(retVal)) { -+ retVal = DEV_GetSymbol(pDevContext-> -+ hDevObject, DYNEXTBASE, -+ &ulDynExtBase); -+ } -+ } -+ ulShmOffsetVirt = ulShmBaseVirt - ulTLBBaseVirt; -+ if (bTraceLoad) { -+ dwExtProgVirtMem = pDevContext->aTLBEntry[0]. -+ ulGppVa; -+ } else { -+ CFG_GetHostResources( -+ (struct CFG_DEVNODE *) -+ DRV_GetFirstDevExtension(), &hostRes); -+ dwExtProgVirtMem = hostRes.dwMemBase[1]; -+ dwExtProgVirtMem += (ulExtBase - ulDynExtBase); -+ } -+ DBG_Trace(DBG_LEVEL7, "WriteExtDspData: GPP VA " -+ "pointing to EXTMEMBASE 0x%x \n", -+ dwExtProgVirtMem); -+ -+ pDevContext->dwDspExtBaseAddr = -+ (u32)MEM_LinearAddress((void *) -+ TO_VIRTUAL_UNCACHED(dwExtProgVirtMem), ulExtEnd -+ - ulExtBase); -+ dwBaseAddr += pDevContext->dwDspExtBaseAddr; -+ /* This dwDspExtBaseAddr will get cleared only when -+ * the board is stopped. */ -+ if (!pDevContext->dwDspExtBaseAddr) { -+ retVal = DSP_EFAIL; -+ DBG_Trace(DBG_LEVEL7, "WriteExtDspData: failed " -+ "to Map the program memory\n"); -+ } -+ } -+ } -+ if (!dwBaseAddr || !ulExtBase || !ulExtEnd) { -+ DBG_Trace(DBG_LEVEL7, "Symbols missing for Ext Prog loading\n"); -+ retVal = DSP_EFAIL; -+ } -+ if (DSP_SUCCEEDED(retVal)) { -+ for (i = 0; i < 4; i++) -+ remainByte[i] = 0x0; -+ -+ dwOffset = dwDSPAddr - ulExtBase; -+ /* Also make sure the dwDSPAddr is < ulExtEnd */ -+ if (dwDSPAddr > ulExtEnd || dwOffset > dwDSPAddr) { -+ DBG_Trace(DBG_LEVEL7, "We can not load at this address " -+ "dwDSPAddr=0x%x, ulExt/DynBase=0x%x, " -+ "ulExtEnd=0x%x\n", dwDSPAddr, ulExtBase, -+ ulExtEnd); -+ retVal = DSP_EFAIL; -+ } -+ } -+ if (DSP_SUCCEEDED(retVal)) { -+ if (ulNumBytes) -+ memcpy((u8 *) dwBaseAddr + dwOffset, pbHostBuf, -+ ulNumBytes); -+ else -+ *((u32 *) pbHostBuf) = dwBaseAddr+dwOffset; -+ } -+ /* Unmap here to force remap for other Ext loads */ -+ if ((bDynamicLoad || bTraceLoad) && pDevContext->dwDspExtBaseAddr) { -+ MEM_UnmapLinearAddress((void *) pDevContext->dwDspExtBaseAddr); -+ pDevContext->dwDspExtBaseAddr = 0x0; -+ } -+ bSymbolsReloaded = false; -+ return retVal; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap_io.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_io.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap_io.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,112 @@ -+/* -+ * tiomap_io.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _tiomap_io.h ======== -+ * Description: -+ * Definitions, types and function prototypes for the io -+ * (r/w external mem). -+ * -+ *! Revision History -+ *! ================ -+ *! 08-Oct-2002 rr: Created. -+ */ -+ -+#ifndef _TIOMAP_IO_ -+#define _TIOMAP_IO_ -+ -+/* -+ * Symbol that defines beginning of shared memory. -+ * For OMAP (Helen) this is the DSP Virtual base address of SDRAM. -+ * This will be used to program DSP MMU to map DSP Virt to GPP phys. -+ * (see dspMmuTlbEntry()). -+ */ -+#define SHMBASENAME "SHM_BEG" -+#define EXTBASE "EXT_BEG" -+#define EXTEND "_EXT_END" -+#define DYNEXTBASE "_DYNEXT_BEG" -+#define DYNEXTEND "_DYNEXT_END" -+#define IVAEXTMEMBASE "_IVAEXTMEM_BEG" -+#define IVAEXTMEMEND "_IVAEXTMEM_END" -+ -+ -+#define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG" -+#define DSP_TRACESEC_END "_BRIDGE_TRACE_END" -+ -+#define SYS_PUTCBEG "_SYS_PUTCBEG" -+#define SYS_PUTCEND "_SYS_PUTCEND" -+#define BRIDGE_SYS_PUTC_current "_BRIDGE_SYS_PUTC_current" -+ -+ -+#define WORDSWAP_ENABLE 0x3 /* Enable word swap */ -+ -+/* -+ * ======== ReadExtDspData ======== -+ * Reads it from DSP External memory. The external memory for the DSP -+ * is configured by the combination of DSP MMU and SHM Memory manager in the CDB -+ */ -+extern DSP_STATUS ReadExtDspData(struct WMD_DEV_CONTEXT *pDevContext, -+ OUT u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType); -+ -+/* -+ * ======== WriteDspData ======== -+ */ -+extern DSP_STATUS WriteDspData(struct WMD_DEV_CONTEXT *pDevContext, -+ OUT u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType); -+ -+/* -+ * ======== WriteExtDspData ======== -+ * Writes to the DSP External memory for external program. -+ * The ext mem for progra is configured by the combination of DSP MMU and -+ * SHM Memory manager in the CDB -+ */ -+extern DSP_STATUS WriteExtDspData(struct WMD_DEV_CONTEXT *pDevContext, -+ IN u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType, -+ bool bDynamicLoad); -+ -+/* -+ * ======== WriteExt32BitDspData ======== -+ * Writes 32 bit data to the external memory -+ */ -+extern inline void WriteExt32BitDspData(IN const -+ struct WMD_DEV_CONTEXT *pDevContext, IN u32 dwDSPAddr, -+ IN u32 val) -+{ -+ *(u32 *)dwDSPAddr = ((pDevContext->tcWordSwapOn) ? (((val << 16) & -+ 0xFFFF0000) | ((val >> 16) & 0x0000FFFF)) : val); -+} -+ -+/* -+ * ======== ReadExt32BitDspData ======== -+ * Reads 32 bit data from the external memory -+ */ -+extern inline u32 ReadExt32BitDspData(IN const struct WMD_DEV_CONTEXT -+ *pDevContext, IN u32 dwDSPAddr) -+{ -+ u32 retVal; -+ retVal = *(u32 *)dwDSPAddr; -+ -+ retVal = ((pDevContext->tcWordSwapOn) ? (((retVal << 16) -+ & 0xFFFF0000) | ((retVal >> 16) & 0x0000FFFF)) : retVal); -+ return retVal; -+} -+ -+#endif /* _TIOMAP_IO_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_mmu.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap_mmu.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_mmu.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap_mmu.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,53 @@ -+/* -+ * _tiomap_mmu.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _tiomap_mmu.h ======== -+ * Description: -+ * Definitions and types for the DSP MMU modules -+ * -+ *! Revision History -+ *! ================ -+ *! 19-Apr-2004 sb: Renamed HW types. Removed dspMmuTlbEntry -+ *! 05-Jan-2004 vp: Moved the file to a platform specific folder from common. -+ *! 21-Mar-2003 sb: Added macro definition TIHEL_LARGEPAGESIZE -+ *! 08-Oct-2002 rr: Created. -+ */ -+ -+#ifndef _TIOMAP_MMU_ -+#define _TIOMAP_MMU_ -+ -+#include "_tiomap.h" -+ -+/* -+ * ======== configureDspMmu ======== -+ * -+ * Make DSP MMu page table entries. -+ * Note: Not utilizing Coarse / Fine page tables. -+ * SECTION = 1MB, LARGE_PAGE = 64KB, SMALL_PAGE = 4KB, TINY_PAGE = 1KB. -+ * DSP Byte address 0x40_0000 is word addr 0x20_0000. -+ */ -+extern void configureDspMmu(struct WMD_DEV_CONTEXT *pDevContext, -+ u32 dataBasePhys, -+ u32 dspBaseVirt, -+ u32 sizeInBytes, -+ s32 nEntryStart, -+ enum HW_Endianism_t endianism, -+ enum HW_ElementSize_t elemSize, -+ enum HW_MMUMixedSize_t mixedSize); -+ -+#endif /* _TIOMAP_MMU_ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_pwr.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap_pwr.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_pwr.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap_pwr.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,102 @@ -+/* -+ * _tiomap_pwr.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _tiomap_pwr.h ======== -+ * Description: -+ * Definitions and types for the DSP wake/sleep routines. -+ * -+ *! Revision History -+ *! ================ -+ *! 08-Oct-2002 rr: Created. -+ */ -+ -+#ifndef _TIOMAP_PWR_ -+#define _TIOMAP_PWR_ -+ -+/* -+ * ======== WakeDSP ========= -+ * Wakes up the DSP from DeepSleep -+ */ -+extern DSP_STATUS WakeDSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs); -+ -+/* -+ * ======== SleepDSP ========= -+ * Places the DSP in DeepSleep. -+ */ -+extern DSP_STATUS SleepDSP(struct WMD_DEV_CONTEXT *pDevContext, -+ IN u32 dwCmd, IN void *pArgs); -+/* -+ * ========InterruptDSP======== -+ * Sends an interrupt to DSP unconditionally. -+ */ -+extern void InterruptDSP(struct WMD_DEV_CONTEXT *pDevContext, IN u16 wMbVal); -+ -+/* -+ * ======== WakeDSP ========= -+ * Wakes up the DSP from DeepSleep -+ */ -+extern DSP_STATUS DSPPeripheralClkCtrl(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs); -+/* -+ * ======== handle_hibernation_fromDSP ======== -+ * Handle Hibernation requested from DSP -+ */ -+DSP_STATUS handle_hibernation_fromDSP(struct WMD_DEV_CONTEXT *pDevContext); -+/* -+ * ======== PostScale_DSP ======== -+ * Handle Post Scale notification to DSP -+ */ -+DSP_STATUS PostScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs); -+/* -+ * ======== PreScale_DSP ======== -+ * Handle Pre Scale notification to DSP -+ */ -+DSP_STATUS PreScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs); -+/* -+ * ======== handle_constraints_set ======== -+ * Handle constraints request from DSP -+ */ -+DSP_STATUS handle_constraints_set(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs); -+/* -+ * ======== DSP_PeripheralClocks_Disable ======== -+ * This function disables all the peripheral clocks that -+ * were enabled by DSP. Call this function only when -+ * DSP is entering Hibernation or when DSP is in -+ * Error state -+ */ -+DSP_STATUS DSP_PeripheralClocks_Disable(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs); -+ -+/* -+ * ======== DSP_PeripheralClocks_Enable ======== -+ * This function enables all the peripheral clocks that -+ * were requested by DSP. -+ */ -+DSP_STATUS DSP_PeripheralClocks_Enable(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs); -+ -+/* -+ * ======== DSPClkWakeupEventCtrl ======== -+ * This function sets the group selction bits for while -+ * enabling/disabling. -+ */ -+void DSPClkWakeupEventCtrl(u32 ClkId, bool enable); -+ -+#endif /* _TIOMAP_PWR_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_sm.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap_sm.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap_sm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap_sm.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,197 @@ -+/* -+ * tiomap_sm.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "_tiomap.h" -+#include "_tiomap_pwr.h" -+ -+#define MAILBOX_FIFOSTATUS(m) (0x80 + 4 * (m)) -+ -+extern unsigned short min_active_opp; -+ -+static inline unsigned int fifo_full(void __iomem *mbox_base, int mbox_id) -+{ -+ return __raw_readl(mbox_base + MAILBOX_FIFOSTATUS(mbox_id)) & 0x1; -+} -+ -+DSP_STATUS CHNLSM_EnableInterrupt(struct WMD_DEV_CONTEXT *pDevContext) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 numMbxMsg; -+ u32 mbxValue; -+ struct CFG_HOSTRES resources; -+ u32 devType; -+ struct IO_MGR *hIOMgr; -+ -+ DBG_Trace(DBG_ENTER, "CHNLSM_EnableInterrupt(0x%x)\n", pDevContext); -+ -+ /* Read the messages in the mailbox until the message queue is empty */ -+ -+ CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), -+ &resources); -+ DEV_GetDevType(pDevContext->hDevObject, &devType); -+ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); -+ if (devType == DSP_UNIT) { -+ HW_MBOX_NumMsgGet(resources.dwMboxBase, -+ MBOX_DSP2ARM, &numMbxMsg); -+ while (numMbxMsg != 0) { -+ HW_MBOX_MsgRead(resources.dwMboxBase, -+ MBOX_DSP2ARM, -+ &mbxValue); -+ numMbxMsg--; -+ } -+ /* clear the DSP mailbox as well...*/ -+ HW_MBOX_NumMsgGet(resources.dwMboxBase, -+ MBOX_ARM2DSP, &numMbxMsg); -+ while (numMbxMsg != 0) { -+ HW_MBOX_MsgRead(resources.dwMboxBase, -+ MBOX_ARM2DSP, &mbxValue); -+ numMbxMsg--; -+ udelay(10); -+ -+ HW_MBOX_EventAck(resources.dwMboxBase, MBOX_ARM2DSP, -+ HW_MBOX_U1_DSP1, -+ HW_MBOX_INT_NEW_MSG); -+ } -+ /* Enable the new message events on this IRQ line */ -+ HW_MBOX_EventEnable(resources.dwMboxBase, -+ MBOX_DSP2ARM, -+ MBOX_ARM, -+ HW_MBOX_INT_NEW_MSG); -+ } -+ -+ return status; -+} -+ -+DSP_STATUS CHNLSM_DisableInterrupt(struct WMD_DEV_CONTEXT *pDevContext) -+{ -+ struct CFG_HOSTRES resources; -+ -+ DBG_Trace(DBG_ENTER, "CHNLSM_DisableInterrupt(0x%x)\n", pDevContext); -+ -+ CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), -+ &resources); -+ HW_MBOX_EventDisable(resources.dwMboxBase, MBOX_DSP2ARM, -+ MBOX_ARM, HW_MBOX_INT_NEW_MSG); -+ return DSP_SOK; -+} -+ -+DSP_STATUS CHNLSM_InterruptDSP2(struct WMD_DEV_CONTEXT *pDevContext, -+ u16 wMbVal) -+{ -+ struct CFG_HOSTRES resources; -+ DSP_STATUS status = DSP_SOK; -+ unsigned long timeout; -+ u32 temp; -+ -+ status = CFG_GetHostResources((struct CFG_DEVNODE *) -+ DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) -+ return DSP_EFAIL; -+ -+ if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION || -+ pDevContext->dwBrdState == BRD_HIBERNATION) { -+#ifdef CONFIG_BRIDGE_DVFS -+ struct dspbridge_platform_data *pdata = -+ omap_dspbridge_dev->dev.platform_data; -+ /* -+ * When Smartreflex is ON, DSP requires at least OPP level 3 -+ * to operate reliably. So boost lower OPP levels to OPP3. -+ */ -+ if (pdata->dsp_set_min_opp) -+ (*pdata->dsp_set_min_opp)(min_active_opp); -+#endif -+ /* Restart the peripheral clocks */ -+ DSP_PeripheralClocks_Enable(pDevContext, NULL); -+ -+ /* -+ * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control -+ * in CM_AUTOIDLE_PLL_IVA2 register -+ */ -+ *(REG_UWORD32 *)(resources.dwCmBase + 0x34) = 0x1; -+ -+ /* -+ * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to -+ * 0.75 MHz - 1.0 MHz -+ * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode -+ */ -+ temp = *(REG_UWORD32 *)(resources.dwCmBase + 0x4); -+ temp = (temp & 0xFFFFFF08) | 0x37; -+ *(REG_UWORD32 *)(resources.dwCmBase + 0x4) = temp; -+ -+ /* -+ * This delay is needed to avoid mailbox timed out -+ * issue experienced while SmartReflex is ON. -+ * TODO: Instead of 1 ms calculate proper value. -+ */ -+ mdelay(1); -+ -+ /* Restore mailbox settings */ -+ HW_MBOX_restoreSettings(resources.dwMboxBase); -+ -+ /* Access MMU SYS CONFIG register to generate a short wakeup */ -+ temp = *(REG_UWORD32 *)(resources.dwDmmuBase + 0x10); -+ -+ pDevContext->dwBrdState = BRD_RUNNING; -+ } -+ -+ timeout = jiffies + msecs_to_jiffies(1); -+ while (fifo_full((void __iomem *) resources.dwMboxBase, 0)) { -+ if (time_after(jiffies, timeout)) { -+ pr_err("dspbridge: timed out waiting for mailbox\n"); -+ return WMD_E_TIMEOUT; -+ } -+ } -+ -+ DBG_Trace(DBG_LEVEL3, "writing %x to Mailbox\n", wMbVal); -+ HW_MBOX_MsgWrite(resources.dwMboxBase, MBOX_ARM2DSP, wMbVal); -+ return DSP_SOK; -+} -+ -+bool CHNLSM_ISR(struct WMD_DEV_CONTEXT *pDevContext, bool *pfSchedDPC, -+ u16 *pwIntrVal) -+{ -+ struct CFG_HOSTRES resources; -+ u32 numMbxMsg; -+ u32 mbxValue; -+ -+ DBG_Trace(DBG_ENTER, "CHNLSM_ISR(0x%x)\n", pDevContext); -+ -+ CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ -+ HW_MBOX_NumMsgGet(resources.dwMboxBase, MBOX_DSP2ARM, &numMbxMsg); -+ -+ if (numMbxMsg > 0) { -+ HW_MBOX_MsgRead(resources.dwMboxBase, MBOX_DSP2ARM, &mbxValue); -+ -+ HW_MBOX_EventAck(resources.dwMboxBase, MBOX_DSP2ARM, -+ HW_MBOX_U0_ARM, HW_MBOX_INT_NEW_MSG); -+ -+ DBG_Trace(DBG_LEVEL3, "Read %x from Mailbox\n", mbxValue); -+ *pwIntrVal = (u16) mbxValue; -+ } -+ /* Set *pfSchedDPC to true; */ -+ *pfSchedDPC = true; -+ return true; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_util.h linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap_util.h ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/_tiomap_util.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/_tiomap_util.h 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,46 @@ -+/* -+ * _tiomap_util.h -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== _tiomap_util.h ======== -+ * Description: -+ * Definitions and types for the utility routines. -+ * -+ *! Revision History -+ *! ================ -+ *! 08-Oct-2002 rr: Created. -+ */ -+ -+#ifndef _TIOMAP_UTIL_ -+#define _TIOMAP_UTIL_ -+ -+/* Time out Values in uSeconds*/ -+#define TIHELEN_ACKTIMEOUT 10000 -+ -+/* Time delay for HOM->SAM transition. */ -+#define WAIT_SAM 1000000 /* in usec (1000 millisec) */ -+ -+/* -+ * ======== WaitForStart ======== -+ * Wait for the singal from DSP that it has started, or time out. -+ * The argument dwSyncAddr is set to 1 before releasing the DSP. -+ * If the DSP starts running, it will clear this location. -+ */ -+extern bool WaitForStart(struct WMD_DEV_CONTEXT *pDevContext, u32 dwSyncAddr); -+ -+#endif /* _TIOMAP_UTIL_ */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap3430.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap3430.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,2091 @@ -+/* -+ * tiomap.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== tiomap.c ======== -+ * Processor Manager Driver for TI OMAP3430 EVM. -+ * -+ * Public Function: -+ * WMD_DRV_Entry -+ * -+ *! Revision History: -+ *! ================ -+ * 26-March-2008 HK and AL: Added WMD_DEV_WalkTbl funciton. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ------------------------------------ Hardware Abstraction Layer */ -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Link Driver */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Local */ -+#include "_tiomap.h" -+#include "_tiomap_pwr.h" -+#include "_tiomap_mmu.h" -+#include "_tiomap_util.h" -+#include "tiomap_io.h" -+ -+ -+/* Offset in shared mem to write to in order to synchronize start with DSP */ -+#define SHMSYNCOFFSET 4 /* GPP byte offset */ -+ -+#define BUFFERSIZE 1024 -+ -+#define MMU_SECTION_ADDR_MASK 0xFFF00000 -+#define MMU_SSECTION_ADDR_MASK 0xFF000000 -+#define MMU_LARGE_PAGE_MASK 0xFFFF0000 -+#define MMU_SMALL_PAGE_MASK 0xFFFFF000 -+#define PAGES_II_LVL_TABLE 512 -+#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT) -+ -+#define MMU_GFLUSH 0x60 -+ -+extern unsigned short min_active_opp; -+ -+/* Forward Declarations: */ -+static DSP_STATUS WMD_BRD_Monitor(struct WMD_DEV_CONTEXT *pDevContext); -+static DSP_STATUS WMD_BRD_Read(struct WMD_DEV_CONTEXT *pDevContext, -+ OUT u8 *pbHostBuf, -+ u32 dwDSPAddr, u32 ulNumBytes, u32 ulMemType); -+static DSP_STATUS WMD_BRD_Start(struct WMD_DEV_CONTEXT *pDevContext, -+ u32 dwDSPAddr); -+static DSP_STATUS WMD_BRD_Status(struct WMD_DEV_CONTEXT *pDevContext, -+ OUT BRD_STATUS *pdwState); -+static DSP_STATUS WMD_BRD_Stop(struct WMD_DEV_CONTEXT *pDevContext); -+static DSP_STATUS WMD_BRD_Write(struct WMD_DEV_CONTEXT *pDevContext, -+ IN u8 *pbHostBuf, -+ u32 dwDSPAddr, u32 ulNumBytes, u32 ulMemType); -+static DSP_STATUS WMD_BRD_SetState(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulBrdState); -+static DSP_STATUS WMD_BRD_MemCopy(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulDspDestAddr, u32 ulDspSrcAddr, -+ u32 ulNumBytes, u32 ulMemType); -+static DSP_STATUS WMD_BRD_MemWrite(struct WMD_DEV_CONTEXT *pDevContext, -+ IN u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType); -+static DSP_STATUS WMD_BRD_MemMap(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulMpuAddr, u32 ulVirtAddr, u32 ulNumBytes, -+ u32 ulMapAttr); -+static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulVirtAddr, u32 ulNumBytes); -+static DSP_STATUS WMD_DEV_Create(OUT struct WMD_DEV_CONTEXT **ppDevContext, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct CFG_HOSTRES *pConfig, -+ IN CONST struct CFG_DSPRES *pDspConfig); -+static DSP_STATUS WMD_DEV_Ctrl(struct WMD_DEV_CONTEXT *pDevContext, u32 dwCmd, -+ IN OUT void *pArgs); -+static DSP_STATUS WMD_DEV_Destroy(struct WMD_DEV_CONTEXT *pDevContext); -+static u32 user_va2pa(struct mm_struct *mm, u32 address); -+static DSP_STATUS PteUpdate(struct WMD_DEV_CONTEXT *hDevContext, u32 pa, -+ u32 va, u32 size, -+ struct HW_MMUMapAttrs_t *mapAttrs); -+static DSP_STATUS PteSet(struct PgTableAttrs *pt, u32 pa, u32 va, -+ u32 size, struct HW_MMUMapAttrs_t *attrs); -+static DSP_STATUS MemMapVmalloc(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulMpuAddr, u32 ulVirtAddr, -+ u32 ulNumBytes, struct HW_MMUMapAttrs_t *hwAttrs); -+ -+#ifdef CONFIG_BRIDGE_DEBUG -+static void GetHWRegs(void __iomem *prm_base, void __iomem *cm_base) -+{ -+ u32 temp; -+ temp = __raw_readl((cm_base) + 0x00); -+ DBG_Trace(DBG_LEVEL6, "CM_FCLKEN_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((cm_base) + 0x10); -+ DBG_Trace(DBG_LEVEL6, "CM_ICLKEN1_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((cm_base) + 0x20); -+ DBG_Trace(DBG_LEVEL6, "CM_IDLEST_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((cm_base) + 0x48); -+ DBG_Trace(DBG_LEVEL6, "CM_CLKSTCTRL_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((cm_base) + 0x4c); -+ DBG_Trace(DBG_LEVEL6, "CM_CLKSTST_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((prm_base) + 0x50); -+ DBG_Trace(DBG_LEVEL6, "RM_RSTCTRL_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((prm_base) + 0x58); -+ DBG_Trace(DBG_LEVEL6, "RM_RSTST_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((prm_base) + 0xE0); -+ DBG_Trace(DBG_LEVEL6, "PM_PWSTCTRL_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((prm_base) + 0xE4); -+ DBG_Trace(DBG_LEVEL6, "PM_PWSTST_IVA2 = 0x%x \n", temp); -+ temp = __raw_readl((cm_base) + 0xA10); -+ DBG_Trace(DBG_LEVEL6, "CM_ICLKEN1_CORE = 0x%x \n", temp); -+} -+#else -+static inline void GetHWRegs(void __iomem *prm_base, void __iomem *cm_base) -+{ -+} -+#endif -+ -+/* ----------------------------------- Globals */ -+ -+/* Attributes of L2 page tables for DSP MMU */ -+struct PageInfo { -+ u32 numEntries; /* Number of valid PTEs in the L2 PT */ -+} ; -+ -+/* Attributes used to manage the DSP MMU page tables */ -+struct PgTableAttrs { -+ struct SYNC_CSOBJECT *hCSObj; /* Critical section object handle */ -+ -+ u32 L1BasePa; /* Physical address of the L1 PT */ -+ u32 L1BaseVa; /* Virtual address of the L1 PT */ -+ u32 L1size; /* Size of the L1 PT */ -+ u32 L1TblAllocPa; -+ /* Physical address of Allocated mem for L1 table. May not be aligned */ -+ u32 L1TblAllocVa; -+ /* Virtual address of Allocated mem for L1 table. May not be aligned */ -+ u32 L1TblAllocSz; -+ /* Size of consistent memory allocated for L1 table. -+ * May not be aligned */ -+ -+ u32 L2BasePa; /* Physical address of the L2 PT */ -+ u32 L2BaseVa; /* Virtual address of the L2 PT */ -+ u32 L2size; /* Size of the L2 PT */ -+ u32 L2TblAllocPa; -+ /* Physical address of Allocated mem for L2 table. May not be aligned */ -+ u32 L2TblAllocVa; -+ /* Virtual address of Allocated mem for L2 table. May not be aligned */ -+ u32 L2TblAllocSz; -+ /* Size of consistent memory allocated for L2 table. -+ * May not be aligned */ -+ -+ u32 L2NumPages; /* Number of allocated L2 PT */ -+ struct PageInfo *pgInfo; /* Array [L2NumPages] of L2 PT info structs */ -+} ; -+ -+/* -+ * If dsp_debug is true, do not branch to the DSP entry point and wait for DSP -+ * to boot -+ */ -+extern s32 dsp_debug; -+ -+/* -+ * This mini driver's function interface table. -+ */ -+static struct WMD_DRV_INTERFACE drvInterfaceFxns = { -+ WCD_MAJOR_VERSION, /* WCD ver. for which this mini driver is built. */ -+ WCD_MINOR_VERSION, -+ WMD_DEV_Create, -+ WMD_DEV_Destroy, -+ WMD_DEV_Ctrl, -+ WMD_BRD_Monitor, -+ WMD_BRD_Start, -+ WMD_BRD_Stop, -+ WMD_BRD_Status, -+ WMD_BRD_Read, -+ WMD_BRD_Write, -+ WMD_BRD_SetState, -+ WMD_BRD_MemCopy, -+ WMD_BRD_MemWrite, -+ WMD_BRD_MemMap, -+ WMD_BRD_MemUnMap, -+ /* The following CHNL functions are provided by chnl_io.lib: */ -+ WMD_CHNL_Create, -+ WMD_CHNL_Destroy, -+ WMD_CHNL_Open, -+ WMD_CHNL_Close, -+ WMD_CHNL_AddIOReq, -+ WMD_CHNL_GetIOC, -+ WMD_CHNL_CancelIO, -+ WMD_CHNL_FlushIO, -+ WMD_CHNL_GetInfo, -+ WMD_CHNL_GetMgrInfo, -+ WMD_CHNL_Idle, -+ WMD_CHNL_RegisterNotify, -+ /* The following DEH functions are provided by tihelen_ue_deh.c */ -+ WMD_DEH_Create, -+ WMD_DEH_Destroy, -+ WMD_DEH_Notify, -+ WMD_DEH_RegisterNotify, -+ WMD_DEH_GetInfo, -+ /* The following IO functions are provided by chnl_io.lib: */ -+ WMD_IO_Create, -+ WMD_IO_Destroy, -+ WMD_IO_OnLoaded, -+ WMD_IO_GetProcLoad, -+ /* The following MSG functions are provided by chnl_io.lib: */ -+ WMD_MSG_Create, -+ WMD_MSG_CreateQueue, -+ WMD_MSG_Delete, -+ WMD_MSG_DeleteQueue, -+ WMD_MSG_Get, -+ WMD_MSG_Put, -+ WMD_MSG_RegisterNotify, -+ WMD_MSG_SetQueueId, -+}; -+ -+static inline void tlb_flush_all(const void __iomem *base) -+{ -+ __raw_writeb(__raw_readb(base + MMU_GFLUSH) | 1, base + MMU_GFLUSH); -+} -+ -+static inline void flush_all(struct WMD_DEV_CONTEXT *pDevContext) -+{ -+ if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION || -+ pDevContext->dwBrdState == BRD_HIBERNATION) -+ WakeDSP(pDevContext, NULL); -+ -+ tlb_flush_all(pDevContext->dwDSPMmuBase); -+} -+ -+static void bad_page_dump(u32 pa, struct page *pg) -+{ -+ pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); -+ pr_emerg("Bad page state in process '%s'\n" -+ "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" -+ "Backtrace:\n", -+ current->comm, pg, (int)(2*sizeof(unsigned long)), -+ (unsigned long)pg->flags, pg->mapping, -+ page_mapcount(pg), page_count(pg)); -+ BUG(); -+} -+ -+/* -+ * ======== WMD_DRV_Entry ======== -+ * purpose: -+ * Mini Driver entry point. -+ */ -+void WMD_DRV_Entry(OUT struct WMD_DRV_INTERFACE **ppDrvInterface, -+ IN CONST char *pstrWMDFileName) -+{ -+ -+ DBC_Require(pstrWMDFileName != NULL); -+ DBG_Trace(DBG_ENTER, "In the WMD_DRV_Entry \n"); -+ -+ IO_SM_init(); /* Initialization of io_sm module */ -+ -+ if (strcmp(pstrWMDFileName, "UMA") == 0) -+ *ppDrvInterface = &drvInterfaceFxns; -+ else -+ DBG_Trace(DBG_LEVEL7, "WMD_DRV_Entry Unknown WMD file name"); -+ -+} -+ -+/* -+ * ======== WMD_BRD_Monitor ======== -+ * purpose: -+ * This WMD_BRD_Monitor puts DSP into a Loadable state. -+ * i.e Application can load and start the device. -+ * -+ * Preconditions: -+ * Device in 'OFF' state. -+ */ -+static DSP_STATUS WMD_BRD_Monitor(struct WMD_DEV_CONTEXT *hDevContext) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ struct CFG_HOSTRES resources; -+ u32 temp; -+ enum HW_PwrState_t pwrState; -+ -+ DBG_Trace(DBG_ENTER, "Board in the monitor state \n"); -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) -+ goto error_return; -+ -+ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); -+ HW_PWRST_IVA2RegGet(resources.dwPrmBase, &temp); -+ if ((temp & 0x03) != 0x03 || (temp & 0x03) != 0x02) { -+ /* IVA2 is not in ON state */ -+ /* Read and set PM_PWSTCTRL_IVA2 to ON */ -+ HW_PWR_IVA2PowerStateSet(resources.dwPrmBase, -+ HW_PWR_DOMAIN_DSP, -+ HW_PWR_STATE_ON); -+ /* Set the SW supervised state transition */ -+ HW_PWR_CLKCTRL_IVA2RegSet(resources.dwCmBase, HW_SW_SUP_WAKEUP); -+ /* Wait until the state has moved to ON */ -+ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, -+ &pwrState); -+ /* Disable Automatic transition */ -+ HW_PWR_CLKCTRL_IVA2RegSet(resources.dwCmBase, HW_AUTOTRANS_DIS); -+ } -+ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Monitor - Middle ****** \n"); -+ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); -+ HW_RST_UnReset(resources.dwPrmBase, HW_RST2_IVA2); -+ CLK_Enable(SERVICESCLK_iva2_ck); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* set the device state to IDLE */ -+ pDevContext->dwBrdState = BRD_IDLE; -+ } -+error_return: -+ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Monitor - End ****** \n"); -+ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); -+ return status; -+} -+ -+/* -+ * ======== WMD_BRD_Read ======== -+ * purpose: -+ * Reads buffers for DSP memory. -+ */ -+static DSP_STATUS WMD_BRD_Read(struct WMD_DEV_CONTEXT *hDevContext, -+ OUT u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ u32 offset; -+ u32 dspBaseAddr = hDevContext->dwDspBaseAddr; -+ -+ DBG_Trace(DBG_ENTER, "WMD_BRD_Read, pDevContext: 0x%x\n\t\tpbHostBuf:" -+ " 0x%x\n\t\tdwDSPAddr: 0x%x\n\t\tulNumBytes: 0x%x\n\t\t" -+ "ulMemType: 0x%x\n", pDevContext, pbHostBuf, -+ dwDSPAddr, ulNumBytes, ulMemType); -+ if (dwDSPAddr < pDevContext->dwDSPStartAdd) { -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_Read: DSP address < start address \n "); -+ status = DSP_EFAIL; -+ return status; -+ } -+ /* change here to account for the 3 bands of the DSP internal memory */ -+ if ((dwDSPAddr - pDevContext->dwDSPStartAdd) < -+ pDevContext->dwInternalSize) { -+ offset = dwDSPAddr - pDevContext->dwDSPStartAdd; -+ } else { -+ DBG_Trace(DBG_LEVEL1, -+ "**** Reading From external memory **** \n "); -+ status = ReadExtDspData(pDevContext, pbHostBuf, dwDSPAddr, -+ ulNumBytes, ulMemType); -+ return status; -+ } -+ /* copy the data from DSP memory, */ -+ memcpy(pbHostBuf, (void *)(dspBaseAddr + offset), ulNumBytes); -+ return status; -+} -+ -+/* -+ * ======== WMD_BRD_SetState ======== -+ * purpose: -+ * This routine updates the Board status. -+ */ -+static DSP_STATUS WMD_BRD_SetState(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulBrdState) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ -+ DBG_Trace(DBG_ENTER, "WMD_BRD_SetState: Board State: 0x%x \n", -+ ulBrdState); -+ pDevContext->dwBrdState = ulBrdState; -+ return status; -+} -+ -+/* -+ * ======== WMD_BRD_Start ======== -+ * purpose: -+ * Initializes DSP MMU and Starts DSP. -+ * -+ * Preconditions: -+ * a) DSP domain is 'ACTIVE'. -+ * b) DSP_RST1 is asserted. -+ * b) DSP_RST2 is released. -+ */ -+static DSP_STATUS WMD_BRD_Start(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 dwDSPAddr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ u32 dwSyncAddr = 0; -+ u32 ulShmBase; /* Gpp Phys SM base addr(byte) */ -+ u32 ulShmBaseVirt; /* Dsp Virt SM base addr */ -+ u32 ulTLBBaseVirt; /* Base of MMU TLB entry */ -+ u32 ulShmOffsetVirt; /* offset of ulShmBaseVirt from ulTLBBaseVirt */ -+ s32 iEntryNdx; -+ s32 itmpEntryNdx = 0; /* DSP-MMU TLB entry base address */ -+ struct CFG_HOSTRES resources; -+ u32 temp; -+ u32 ulDspClkRate; -+ u32 ulDspClkAddr; -+ u32 ulBiosGpTimer; -+ u32 uClkCmd; -+ struct IO_MGR *hIOMgr; -+ u32 ulLoadMonitorTimer; -+ u32 extClkId = 0; -+ u32 tmpIndex; -+ u32 clkIdIndex = MBX_PM_MAX_RESOURCES; -+ -+ DBG_Trace(DBG_ENTER, "Entering WMD_BRD_Start:\n hDevContext: 0x%x\n\t " -+ "dwDSPAddr: 0x%x\n", hDevContext, dwDSPAddr); -+ -+ /* The device context contains all the mmu setup info from when the -+ * last dsp base image was loaded. The first entry is always -+ * SHMMEM base. */ -+ /* Get SHM_BEG - convert to byte address */ -+ (void) DEV_GetSymbol(pDevContext->hDevObject, SHMBASENAME, -+ &ulShmBaseVirt); -+ ulShmBaseVirt *= DSPWORDSIZE; -+ DBC_Assert(ulShmBaseVirt != 0); -+ /* DSP Virtual address */ -+ ulTLBBaseVirt = pDevContext->aTLBEntry[0].ulDspVa; -+ DBC_Assert(ulTLBBaseVirt <= ulShmBaseVirt); -+ ulShmOffsetVirt = ulShmBaseVirt - (ulTLBBaseVirt * DSPWORDSIZE); -+ /* Kernel logical address */ -+ ulShmBase = pDevContext->aTLBEntry[0].ulGppVa + ulShmOffsetVirt; -+ -+ DBC_Assert(ulShmBase != 0); -+ /* 2nd wd is used as sync field */ -+ dwSyncAddr = ulShmBase + SHMSYNCOFFSET; -+ /* Write a signature into the SHM base + offset; this will -+ * get cleared when the DSP program starts. */ -+ if ((ulShmBaseVirt == 0) || (ulShmBase == 0)) { -+ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Start: Illegal SM base\n"); -+ status = DSP_EFAIL; -+ } else -+ *((volatile u32 *)dwSyncAddr) = 0xffffffff; -+ -+ if (DSP_SUCCEEDED(status)) { -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), -+ &resources); -+ /* Assert RST1 i.e only the RST only for DSP megacell */ -+ /* HW_RST_Reset(resources.dwPrcmBase, HW_RST1_IVA2);*/ -+ if (DSP_SUCCEEDED(status)) { -+ HW_RST_Reset(resources.dwPrmBase, HW_RST1_IVA2); -+ if (dsp_debug) { -+ /* Set the bootmode to self loop */ -+ DBG_Trace(DBG_LEVEL7, -+ "Set boot mode to self loop" -+ " for IVA2 Device\n"); -+ HW_DSPSS_BootModeSet(resources.dwSysCtrlBase, -+ HW_DSPSYSC_SELFLOOPBOOT, dwDSPAddr); -+ } else { -+ /* Set the bootmode to '0' - direct boot */ -+ DBG_Trace(DBG_LEVEL7, -+ "Set boot mode to direct" -+ " boot for IVA2 Device \n"); -+ HW_DSPSS_BootModeSet(resources.dwSysCtrlBase, -+ HW_DSPSYSC_DIRECTBOOT, dwDSPAddr); -+ } -+ } -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Reset and Unreset the RST2, so that BOOTADDR is copied to -+ * IVA2 SYSC register */ -+ HW_RST_Reset(resources.dwPrmBase, HW_RST2_IVA2); -+ udelay(100); -+ HW_RST_UnReset(resources.dwPrmBase, HW_RST2_IVA2); -+ udelay(100); -+ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Start 0 ****** \n"); -+ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); -+ /* Disbale the DSP MMU */ -+ HW_MMU_Disable(resources.dwDmmuBase); -+ /* Disable TWL */ -+ HW_MMU_TWLDisable(resources.dwDmmuBase); -+ -+ /* Only make TLB entry if both addresses are non-zero */ -+ for (iEntryNdx = 0; iEntryNdx < WMDIOCTL_NUMOFMMUTLB; -+ iEntryNdx++) { -+ if ((pDevContext->aTLBEntry[iEntryNdx].ulGppPa != 0) && -+ (pDevContext->aTLBEntry[iEntryNdx].ulDspVa != 0)) { -+ DBG_Trace(DBG_LEVEL4, "** (proc) MMU %d GppPa:" -+ " 0x%x DspVa 0x%x Size 0x%x\n", -+ itmpEntryNdx, -+ pDevContext->aTLBEntry[iEntryNdx].ulGppPa, -+ pDevContext->aTLBEntry[iEntryNdx].ulDspVa, -+ pDevContext->aTLBEntry[iEntryNdx].ulSize); -+ configureDspMmu(pDevContext, -+ pDevContext->aTLBEntry[iEntryNdx].ulGppPa, -+ pDevContext->aTLBEntry[iEntryNdx].ulDspVa * -+ DSPWORDSIZE, -+ pDevContext->aTLBEntry[iEntryNdx].ulSize, -+ itmpEntryNdx, -+ pDevContext->aTLBEntry[iEntryNdx].endianism, -+ pDevContext->aTLBEntry[iEntryNdx].elemSize, -+ pDevContext->aTLBEntry[iEntryNdx]. -+ mixedMode); -+ itmpEntryNdx++; -+ } -+ } /* end for */ -+ } -+ -+ /* Lock the above TLB entries and get the BIOS and load monitor timer -+ * information*/ -+ if (DSP_SUCCEEDED(status)) { -+ HW_MMU_NumLockedSet(resources.dwDmmuBase, itmpEntryNdx); -+ HW_MMU_VictimNumSet(resources.dwDmmuBase, itmpEntryNdx); -+ HW_MMU_TTBSet(resources.dwDmmuBase, -+ pDevContext->pPtAttrs->L1BasePa); -+ HW_MMU_TWLEnable(resources.dwDmmuBase); -+ /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ -+ -+ -+ temp = __raw_readl((resources.dwDmmuBase) + 0x10); -+ temp = (temp & 0xFFFFFFEF) | 0x11; -+ __raw_writel(temp, (resources.dwDmmuBase) + 0x10); -+ -+ /* Let the DSP MMU run */ -+ HW_MMU_Enable(resources.dwDmmuBase); -+ -+ /* Enable the BIOS clock */ -+ (void)DEV_GetSymbol(pDevContext->hDevObject, -+ BRIDGEINIT_BIOSGPTIMER, -+ &ulBiosGpTimer); -+ DBG_Trace(DBG_LEVEL7, "BIOS GPTimer : 0x%x\n", ulBiosGpTimer); -+ (void)DEV_GetSymbol(pDevContext->hDevObject, -+ BRIDGEINIT_LOADMON_GPTIMER, -+ &ulLoadMonitorTimer); -+ DBG_Trace(DBG_LEVEL7, "Load Monitor Timer : 0x%x\n", -+ ulLoadMonitorTimer); -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ if (ulLoadMonitorTimer != 0xFFFF) { -+ uClkCmd = (BPWR_DisableClock << MBX_PM_CLK_CMDSHIFT) | -+ ulLoadMonitorTimer; -+ DBG_Trace(DBG_LEVEL7, -+ "encoded LoadMonitor cmd for Disable: 0x%x\n", -+ uClkCmd); -+ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); -+ -+ extClkId = uClkCmd & MBX_PM_CLK_IDMASK; -+ for (tmpIndex = 0; tmpIndex < MBX_PM_MAX_RESOURCES; -+ tmpIndex++) { -+ if (extClkId == BPWR_CLKID[tmpIndex]) { -+ clkIdIndex = tmpIndex; -+ break; -+ } -+ } -+ -+ if (clkIdIndex < MBX_PM_MAX_RESOURCES) -+ status = CLK_Set_32KHz( -+ BPWR_Clks[clkIdIndex].funClk); -+ else -+ status = DSP_EFAIL; -+ -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, " Error while setting" -+ "LM Timer to 32KHz\n"); -+ } -+ uClkCmd = (BPWR_EnableClock << MBX_PM_CLK_CMDSHIFT) | -+ ulLoadMonitorTimer; -+ DBG_Trace(DBG_LEVEL7, -+ "encoded LoadMonitor cmd for Enable : 0x%x\n", -+ uClkCmd); -+ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); -+ -+ } else { -+ DBG_Trace(DBG_LEVEL7, -+ "Not able to get the symbol for Load " -+ "Monitor Timer\n"); -+ } -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ if (ulBiosGpTimer != 0xFFFF) { -+ uClkCmd = (BPWR_DisableClock << MBX_PM_CLK_CMDSHIFT) | -+ ulBiosGpTimer; -+ DBG_Trace(DBG_LEVEL7, "encoded BIOS GPTimer cmd for" -+ "Disable: 0x%x\n", uClkCmd); -+ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); -+ -+ extClkId = uClkCmd & MBX_PM_CLK_IDMASK; -+ -+ for (tmpIndex = 0; tmpIndex < MBX_PM_MAX_RESOURCES; -+ tmpIndex++) { -+ if (extClkId == BPWR_CLKID[tmpIndex]) { -+ clkIdIndex = tmpIndex; -+ break; -+ } -+ } -+ -+ if (clkIdIndex < MBX_PM_MAX_RESOURCES) -+ status = CLK_Set_32KHz( -+ BPWR_Clks[clkIdIndex].funClk); -+ else -+ status = DSP_EFAIL; -+ -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, -+ " Error while setting BIOS Timer to 32KHz\n"); -+ } -+ -+ uClkCmd = (BPWR_EnableClock << MBX_PM_CLK_CMDSHIFT) | -+ ulBiosGpTimer; -+ DBG_Trace(DBG_LEVEL7, "encoded BIOS GPTimer cmd :" -+ "0x%x\n", uClkCmd); -+ DSPPeripheralClkCtrl(pDevContext, &uClkCmd); -+ -+ } else { -+ DBG_Trace(DBG_LEVEL7, -+ "Not able to get the symbol for BIOS Timer\n"); -+ } -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Set the DSP clock rate */ -+ (void)DEV_GetSymbol(pDevContext->hDevObject, -+ "_BRIDGEINIT_DSP_FREQ", &ulDspClkAddr); -+ /*Set Autoidle Mode for IVA2 PLL */ -+ temp = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwCmBase) + 0x34)); -+ temp = (temp & 0xFFFFFFFE) | 0x1; -+ *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x34)) = -+ (u32) temp; -+ DBG_Trace(DBG_LEVEL5, "WMD_BRD_Start: _BRIDGE_DSP_FREQ Addr:" -+ "0x%x \n", ulDspClkAddr); -+ if ((unsigned int *)ulDspClkAddr != NULL) { -+ /* Get the clock rate */ -+ status = CLK_GetRate(SERVICESCLK_iva2_ck, -+ &ulDspClkRate); -+ DBG_Trace(DBG_LEVEL5, -+ "WMD_BRD_Start: DSP clock rate (KHZ): 0x%x \n", -+ ulDspClkRate); -+ (void)WMD_BRD_Write(pDevContext, (u8 *)&ulDspClkRate, -+ ulDspClkAddr, sizeof(u32), 0); -+ } -+/*PM_IVA2GRPSEL_PER = 0xC0;*/ -+ temp = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ temp = (temp & 0xFFFFFF30) | 0xC0; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) = -+ (u32) temp; -+ -+/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F;*/ -+ temp = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ temp = (temp & 0xFFFFFF3F); -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) = -+ (u32) temp; -+/*CM_SLEEPDEP_PER |= 0x04;*/ -+ temp = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerBase) + 0x44)); -+ temp = (temp & 0xFFFFFFFB) | 0x04; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerBase) + 0x44)) = -+ (u32) temp; -+ -+/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions*/ -+ temp = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwCmBase) + 0x48)); -+ temp = (temp & 0xFFFFFFFC) | 0x03; -+ *((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x48)) = -+ (u32) temp; -+ -+ /* Enable Mailbox events and also drain any pending -+ * stale messages */ -+ (void)CHNLSM_EnableInterrupt(pDevContext); -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ HW_RSTCTRL_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); -+ DBG_Trace(DBG_LEVEL7, "BRD_Start: RM_RSTCTRL_DSP = 0x%x \n", -+ temp); -+ HW_RSTST_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); -+ DBG_Trace(DBG_LEVEL7, "BRD_Start0: RM_RSTST_DSP = 0x%x \n", -+ temp); -+ -+ /* Let DSP go */ -+ DBG_Trace(DBG_LEVEL7, "Unreset, WMD_BRD_Start\n"); -+ /* Enable DSP MMU Interrupts */ -+ HW_MMU_EventEnable(resources.dwDmmuBase, -+ HW_MMU_ALL_INTERRUPTS); -+ /* release the RST1, DSP starts executing now .. */ -+ HW_RST_UnReset(resources.dwPrmBase, HW_RST1_IVA2); -+ -+ HW_RSTST_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); -+ DBG_Trace(DBG_LEVEL7, "BRD_Start: RM_RSTST_DSP = 0x%x \n", -+ temp); -+ HW_RSTCTRL_RegGet(resources.dwPrmBase, HW_RST1_IVA2, &temp); -+ DBG_Trace(DBG_LEVEL5, "WMD_BRD_Start: CM_RSTCTRL_DSP: 0x%x \n", -+ temp); -+ DBG_Trace(DBG_LEVEL7, "Driver waiting for Sync @ 0x%x \n", -+ dwSyncAddr); -+ DBG_Trace(DBG_LEVEL7, "DSP c_int00 Address = 0x%x \n", -+ dwDSPAddr); -+ if (dsp_debug) -+ while (*((volatile u16 *)dwSyncAddr)) -+ ;; -+ } -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Wait for DSP to clear word in shared memory */ -+ /* Read the Location */ -+ if (!WaitForStart(pDevContext, dwSyncAddr)) { -+ status = WMD_E_TIMEOUT; -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_Start Failed to Synchronize\n"); -+ } -+ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); -+ if (DSP_SUCCEEDED(status)) { -+ IO_SHMsetting(hIOMgr, SHM_OPPINFO, NULL); -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_Start: OPP information initialzed\n"); -+ /* Write the synchronization bit to indicate the -+ * completion of OPP table update to DSP -+ */ -+ *((volatile u32 *)dwSyncAddr) = 0XCAFECAFE; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* update board state */ -+ pDevContext->dwBrdState = BRD_RUNNING; -+ /* (void)CHNLSM_EnableInterrupt(pDevContext);*/ -+ DBG_Trace(DBG_LEVEL7, "Device Started \n "); -+ } else { -+ pDevContext->dwBrdState = BRD_UNKNOWN; -+ DBG_Trace(DBG_LEVEL7, "Device not Started \n "); -+ } -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_BRD_Stop ======== -+ * purpose: -+ * Puts DSP in self loop. -+ * -+ * Preconditions : -+ * a) None -+ */ -+static DSP_STATUS WMD_BRD_Stop(struct WMD_DEV_CONTEXT *hDevContext) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ struct CFG_HOSTRES resources; -+ struct PgTableAttrs *pPtAttrs; -+ u32 dspPwrState; -+ DSP_STATUS clk_status; -+ -+ DBG_Trace(DBG_ENTER, "Entering WMD_BRD_Stop:\nhDevContext: 0x%x\n", -+ hDevContext); -+ -+ /* Disable the mail box interrupts */ -+ (void)CHNLSM_DisableInterrupt(pDevContext); -+ -+ if (pDevContext->dwBrdState == BRD_STOPPED) -+ return status; -+ -+ /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode, -+ * before turning off the clocks.. This is to ensure that there are no -+ * pending L3 or other transactons from IVA2 */ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), -+ &resources); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_Stop: Get Host resources failed \n"); -+ DBG_Trace(DBG_LEVEL1, "Device Stopp failed \n "); -+ return DSP_EFAIL; -+ } -+ -+ HW_PWRST_IVA2RegGet(resources.dwPrmBase, &dspPwrState); -+ if (dspPwrState != HW_PWR_STATE_OFF) { -+ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_DSPIDLE); -+ mdelay(10); -+ GetHWRegs(resources.dwPrmBase, resources.dwCmBase); -+ udelay(50); -+ -+ clk_status = CLK_Disable(SERVICESCLK_iva2_ck); -+ if (DSP_FAILED(clk_status)) { -+ DBG_Trace(DBG_LEVEL6, -+ "\n WMD_BRD_Stop: CLK_Disable failed " -+ "for iva2_fck\n"); -+ } -+ /* IVA2 is not in OFF state */ -+ /* Set PM_PWSTCTRL_IVA2 to OFF */ -+ HW_PWR_IVA2PowerStateSet(resources.dwPrmBase, -+ HW_PWR_DOMAIN_DSP, -+ HW_PWR_STATE_OFF); -+ /* Set the SW supervised state transition for Sleep */ -+ HW_PWR_CLKCTRL_IVA2RegSet(resources.dwCmBase, HW_SW_SUP_SLEEP); -+ } else { -+ clk_status = CLK_Disable(SERVICESCLK_iva2_ck); -+ if (DSP_FAILED(clk_status)) { -+ DBG_Trace(DBG_LEVEL6, -+ "\n WMD_BRD_Stop: Else loop CLK_Disable failed" -+ " for iva2_fck\n"); -+ } -+ } -+ udelay(10); -+ /* Release the Ext Base virtual Address as the next DSP Program -+ * may have a different load address */ -+ if (pDevContext->dwDspExtBaseAddr) -+ pDevContext->dwDspExtBaseAddr = 0; -+ -+ pDevContext->dwBrdState = BRD_STOPPED; /* update board state */ -+ DBG_Trace(DBG_LEVEL1, "Device Stopped \n "); -+ /* This is a good place to clear the MMU page tables as well */ -+ if (pDevContext->pPtAttrs) { -+ pPtAttrs = pDevContext->pPtAttrs; -+ memset((u8 *) pPtAttrs->L1BaseVa, 0x00, pPtAttrs->L1size); -+ memset((u8 *) pPtAttrs->L2BaseVa, 0x00, pPtAttrs->L2size); -+ memset((u8 *) pPtAttrs->pgInfo, 0x00, -+ (pPtAttrs->L2NumPages * sizeof(struct PageInfo))); -+ } -+ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Stop - End ****** \n"); -+ HW_RST_Reset(resources.dwPrmBase, HW_RST1_IVA2); -+ HW_RST_Reset(resources.dwPrmBase, HW_RST2_IVA2); -+ -+ return status; -+} -+ -+ -+/* -+ * ======== WMD_BRD_Delete ======== -+ * purpose: -+ * Puts DSP in Low power mode -+ * -+ * Preconditions : -+ * a) None -+ */ -+static DSP_STATUS WMD_BRD_Delete(struct WMD_DEV_CONTEXT *hDevContext) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ struct CFG_HOSTRES resources; -+ struct PgTableAttrs *pPtAttrs; -+ DSP_STATUS clk_status; -+ -+ DBG_Trace(DBG_ENTER, "Entering WMD_BRD_Delete:\nhDevContext: 0x%x\n", -+ hDevContext); -+ -+ /* Disable the mail box interrupts */ -+ (void) CHNLSM_DisableInterrupt(pDevContext); -+ -+ if (pDevContext->dwBrdState == BRD_STOPPED) -+ return status; -+ -+ /* as per TRM, it is advised to first drive -+ * the IVA2 to 'Standby' mode, before turning off the clocks.. This is -+ * to ensure that there are no pending L3 or other transactons from -+ * IVA2 */ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_Stop: Get Host resources failed \n"); -+ DBG_Trace(DBG_LEVEL1, "Device Delete failed \n "); -+ return DSP_EFAIL; -+ } -+ status = SleepDSP(pDevContext, PWR_EMERGENCYDEEPSLEEP, NULL); -+ clk_status = CLK_Disable(SERVICESCLK_iva2_ck); -+ if (DSP_FAILED(clk_status)) { -+ DBG_Trace(DBG_LEVEL6, "\n WMD_BRD_Stop: CLK_Disable failed for" -+ " iva2_fck\n"); -+ } -+ /* Release the Ext Base virtual Address as the next DSP Program -+ * may have a different load address */ -+ if (pDevContext->dwDspExtBaseAddr) -+ pDevContext->dwDspExtBaseAddr = 0; -+ -+ pDevContext->dwBrdState = BRD_STOPPED; /* update board state */ -+ DBG_Trace(DBG_LEVEL1, "Device Stopped \n "); -+ /* This is a good place to clear the MMU page tables as well */ -+ if (pDevContext->pPtAttrs) { -+ pPtAttrs = pDevContext->pPtAttrs; -+ memset((u8 *)pPtAttrs->L1BaseVa, 0x00, pPtAttrs->L1size); -+ memset((u8 *)pPtAttrs->L2BaseVa, 0x00, pPtAttrs->L2size); -+ memset((u8 *)pPtAttrs->pgInfo, 0x00, -+ (pPtAttrs->L2NumPages * sizeof(struct PageInfo))); -+ } -+ DBG_Trace(DBG_LEVEL6, "WMD_BRD_Delete - End ****** \n"); -+ HW_RST_Reset(resources.dwPrmBase, HW_RST1_IVA2); -+ HW_RST_Reset(resources.dwPrmBase, HW_RST2_IVA2); -+ -+ return status; -+} -+ -+ -+/* -+ * ======== WMD_BRD_Status ======== -+ * Returns the board status. -+ */ -+static DSP_STATUS WMD_BRD_Status(struct WMD_DEV_CONTEXT *hDevContext, -+ OUT BRD_STATUS *pdwState) -+{ -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ *pdwState = pDevContext->dwBrdState; -+ return DSP_SOK; -+} -+ -+/* -+ * ======== WMD_BRD_Write ======== -+ * Copies the buffers to DSP internal or external memory. -+ */ -+static DSP_STATUS WMD_BRD_Write(struct WMD_DEV_CONTEXT *hDevContext, -+ IN u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ -+ DBG_Trace(DBG_ENTER, "WMD_BRD_Write, pDevContext: 0x%x\n\t\t " -+ "pbHostBuf: 0x%x\n\t\tdwDSPAddr: 0x%x\n\t\tulNumBytes: " -+ "0x%x\n \t\t ulMemtype: 0x%x\n", pDevContext, pbHostBuf, -+ dwDSPAddr, ulNumBytes, ulMemType); -+ if (dwDSPAddr < pDevContext->dwDSPStartAdd) { -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_Write: DSP address < start address \n "); -+ status = DSP_EFAIL; -+ return status; -+ } -+ if ((dwDSPAddr - pDevContext->dwDSPStartAdd) < -+ pDevContext->dwInternalSize) { -+ status = WriteDspData(hDevContext, pbHostBuf, dwDSPAddr, -+ ulNumBytes, ulMemType); -+ } else { -+ status = WriteExtDspData(pDevContext, pbHostBuf, dwDSPAddr, -+ ulNumBytes, ulMemType, false); -+ } -+ -+ DBG_Trace(DBG_ENTER, "WMD_BRD_Write, memcopy : DspLogicAddr=0x%x \n", -+ pDevContext->dwDspBaseAddr); -+ return status; -+} -+ -+/* -+ * ======== WMD_DEV_Create ======== -+ * Creates a driver object. Puts DSP in self loop. -+ */ -+static DSP_STATUS WMD_DEV_Create(OUT struct WMD_DEV_CONTEXT **ppDevContext, -+ struct DEV_OBJECT *hDevObject, -+ IN CONST struct CFG_HOSTRES *pConfig, -+ IN CONST struct CFG_DSPRES *pDspConfig) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = NULL; -+ s32 iEntryNdx; -+ s32 tcWordSwap; -+ u32 tcWordSwapSize = sizeof(tcWordSwap); -+ struct CFG_HOSTRES resources; -+ struct PgTableAttrs *pPtAttrs; -+ u32 pg_tbl_pa; -+ u32 pg_tbl_va; -+ u32 align_size; -+ -+ DBG_Trace(DBG_ENTER, "WMD_DEV_Create, ppDevContext: 0x%x\n\t\t " -+ "hDevObject: 0x%x\n\t\tpConfig: 0x%x\n\t\tpDspConfig: 0x%x\n", -+ ppDevContext, hDevObject, pConfig, pDspConfig); -+ /* Allocate and initialize a data structure to contain the mini driver -+ * state, which becomes the context for later calls into this WMD. */ -+ pDevContext = MEM_Calloc(sizeof(struct WMD_DEV_CONTEXT), MEM_NONPAGED); -+ if (!pDevContext) { -+ DBG_Trace(DBG_ENTER, "Failed to allocate mem \n"); -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_ENTER, "Failed to get host resources \n"); -+ status = DSP_EMEMORY; -+ goto func_end; -+ } -+ -+ pDevContext->dwDSPStartAdd = (u32)OMAP_GEM_BASE; -+ pDevContext->dwSelfLoop = (u32)NULL; -+ pDevContext->uDspPerClks = 0; -+ pDevContext->dwInternalSize = OMAP_DSP_SIZE; -+ /* Clear dev context MMU table entries. -+ * These get set on WMD_BRD_IOCTL() call after program loaded. */ -+ for (iEntryNdx = 0; iEntryNdx < WMDIOCTL_NUMOFMMUTLB; iEntryNdx++) { -+ pDevContext->aTLBEntry[iEntryNdx].ulGppPa = -+ pDevContext->aTLBEntry[iEntryNdx].ulDspVa = 0; -+ } -+ pDevContext->numTLBEntries = 0; -+ pDevContext->dwDspBaseAddr = (u32)MEM_LinearAddress((void *) -+ (pConfig->dwMemBase[3]), pConfig->dwMemLength[3]); -+ if (!pDevContext->dwDspBaseAddr) { -+ status = DSP_EFAIL; -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_DEV_Create: failed to Map the API memory\n"); -+ } -+ pPtAttrs = MEM_Calloc(sizeof(struct PgTableAttrs), MEM_NONPAGED); -+ if (pPtAttrs != NULL) { -+ /* Assuming that we use only DSP's memory map -+ * until 0x4000:0000 , we would need only 1024 -+ * L1 enties i.e L1 size = 4K */ -+ pPtAttrs->L1size = 0x1000; -+ align_size = pPtAttrs->L1size; -+ /* Align sizes are expected to be power of 2 */ -+ /* we like to get aligned on L1 table size */ -+ pg_tbl_va = (u32)MEM_AllocPhysMem(pPtAttrs->L1size, -+ align_size, &pg_tbl_pa); -+ -+ /* Check if the PA is aligned for us */ -+ if ((pg_tbl_pa) & (align_size-1)) { -+ /* PA not aligned to page table size , -+ * try with more allocation and align */ -+ MEM_FreePhysMem((void *)pg_tbl_va, pg_tbl_pa, -+ pPtAttrs->L1size); -+ /* we like to get aligned on L1 table size */ -+ pg_tbl_va = (u32) MEM_AllocPhysMem((pPtAttrs->L1size)*2, -+ align_size, &pg_tbl_pa); -+ /* We should be able to get aligned table now */ -+ pPtAttrs->L1TblAllocPa = pg_tbl_pa; -+ pPtAttrs->L1TblAllocVa = pg_tbl_va; -+ pPtAttrs->L1TblAllocSz = pPtAttrs->L1size * 2; -+ /* Align the PA to the next 'align' boundary */ -+ pPtAttrs->L1BasePa = ((pg_tbl_pa) + (align_size-1)) & -+ (~(align_size-1)); -+ pPtAttrs->L1BaseVa = pg_tbl_va + (pPtAttrs->L1BasePa - -+ pg_tbl_pa); -+ } else { -+ /* We got aligned PA, cool */ -+ pPtAttrs->L1TblAllocPa = pg_tbl_pa; -+ pPtAttrs->L1TblAllocVa = pg_tbl_va; -+ pPtAttrs->L1TblAllocSz = pPtAttrs->L1size; -+ pPtAttrs->L1BasePa = pg_tbl_pa; -+ pPtAttrs->L1BaseVa = pg_tbl_va; -+ } -+ if (pPtAttrs->L1BaseVa) -+ memset((u8 *)pPtAttrs->L1BaseVa, 0x00, -+ pPtAttrs->L1size); -+ -+ /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + -+ * L4 pages */ -+ pPtAttrs->L2NumPages = ((DMMPOOLSIZE >> 20) + 6); -+ pPtAttrs->L2size = HW_MMU_COARSE_PAGE_SIZE * -+ pPtAttrs->L2NumPages; -+ align_size = 4; /* Make it u32 aligned */ -+ /* we like to get aligned on L1 table size */ -+ pg_tbl_va = (u32)MEM_AllocPhysMem(pPtAttrs->L2size, -+ align_size, &pg_tbl_pa); -+ pPtAttrs->L2TblAllocPa = pg_tbl_pa; -+ pPtAttrs->L2TblAllocVa = pg_tbl_va; -+ pPtAttrs->L2TblAllocSz = pPtAttrs->L2size; -+ pPtAttrs->L2BasePa = pg_tbl_pa; -+ pPtAttrs->L2BaseVa = pg_tbl_va; -+ -+ if (pPtAttrs->L2BaseVa) -+ memset((u8 *)pPtAttrs->L2BaseVa, 0x00, -+ pPtAttrs->L2size); -+ -+ pPtAttrs->pgInfo = MEM_Calloc(pPtAttrs->L2NumPages * -+ sizeof(struct PageInfo), MEM_NONPAGED); -+ DBG_Trace(DBG_LEVEL1, "L1 pa %x, va %x, size %x\n L2 pa %x, va " -+ "%x, size %x\n", pPtAttrs->L1BasePa, -+ pPtAttrs->L1BaseVa, pPtAttrs->L1size, -+ pPtAttrs->L2BasePa, pPtAttrs->L2BaseVa, -+ pPtAttrs->L2size); -+ DBG_Trace(DBG_LEVEL1, "pPtAttrs %x L2 NumPages %x pgInfo %x\n", -+ pPtAttrs, pPtAttrs->L2NumPages, pPtAttrs->pgInfo); -+ } -+ if ((pPtAttrs != NULL) && (pPtAttrs->L1BaseVa != 0) && -+ (pPtAttrs->L2BaseVa != 0) && (pPtAttrs->pgInfo != NULL)) -+ pDevContext->pPtAttrs = pPtAttrs; -+ else -+ status = DSP_EMEMORY; -+ -+ if (DSP_SUCCEEDED(status)) -+ status = SYNC_InitializeCS(&pPtAttrs->hCSObj); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Set the Endianism Register */ /* Need to set this */ -+ /* Retrieve the TC u16 SWAP Option */ -+ status = REG_GetValue(NULL, CURRENTCONFIG, TCWORDSWAP, -+ (u8 *)&tcWordSwap, &tcWordSwapSize); -+ /* Save the value */ -+ pDevContext->tcWordSwapOn = tcWordSwap; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ /* Set the Clock Divisor for the DSP module */ -+ DBG_Trace(DBG_LEVEL7, "WMD_DEV_create:Reset mail box and " -+ "enable the clock \n"); -+ status = CLK_Enable(SERVICESCLK_mailbox_ick); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_DEV_create:Reset mail box and " -+ "enable the clock Fail\n"); -+ } -+ udelay(5); -+ /* 24xx-Linux MMU address is obtained from the host -+ * resources struct */ -+ pDevContext->dwDSPMmuBase = resources.dwDmmuBase; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pDevContext->hDevObject = hDevObject; -+ pDevContext->ulIntMask = 0; -+ /* Store current board state. */ -+ pDevContext->dwBrdState = BRD_STOPPED; -+ /* Return this ptr to our device state to the WCD for storage:*/ -+ *ppDevContext = pDevContext; -+ DBG_Trace(DBG_ENTER, "Device Created \n"); -+ } else { -+ if (pPtAttrs != NULL) { -+ if (pPtAttrs->hCSObj) -+ SYNC_DeleteCS(pPtAttrs->hCSObj); -+ -+ if (pPtAttrs->pgInfo) -+ MEM_Free(pPtAttrs->pgInfo); -+ -+ if (pPtAttrs->L2TblAllocVa) { -+ MEM_FreePhysMem((void *)pPtAttrs->L2TblAllocVa, -+ pPtAttrs->L2TblAllocPa, -+ pPtAttrs->L2TblAllocSz); -+ } -+ if (pPtAttrs->L1TblAllocVa) { -+ MEM_FreePhysMem((void *)pPtAttrs->L1TblAllocVa, -+ pPtAttrs->L1TblAllocPa, -+ pPtAttrs->L1TblAllocSz); -+ } -+ } -+ if (pPtAttrs) -+ MEM_Free(pPtAttrs); -+ -+ if (pDevContext) -+ MEM_Free(pDevContext); -+ -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_DEV_Create Error Device not created\n"); -+ } -+func_end: -+ return status; -+} -+ -+/* -+ * ======== WMD_DEV_Ctrl ======== -+ * Receives device specific commands. -+ */ -+static DSP_STATUS WMD_DEV_Ctrl(struct WMD_DEV_CONTEXT *pDevContext, u32 dwCmd, -+ IN OUT void *pArgs) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMDIOCTL_EXTPROC *paExtProc = (struct WMDIOCTL_EXTPROC *)pArgs; -+ s32 ndx; -+ -+ DBG_Trace(DBG_ENTER, "WMD_DEV_Ctrl, pDevContext: 0x%x\n\t\t dwCmd: " -+ "0x%x\n\t\tpArgs: 0x%x\n", pDevContext, dwCmd, pArgs); -+ switch (dwCmd) { -+ case WMDIOCTL_CHNLREAD: -+ break; -+ case WMDIOCTL_CHNLWRITE: -+ break; -+ case WMDIOCTL_SETMMUCONFIG: -+ /* store away dsp-mmu setup values for later use */ -+ for (ndx = 0; ndx < WMDIOCTL_NUMOFMMUTLB; ndx++, paExtProc++) -+ pDevContext->aTLBEntry[ndx] = *paExtProc; -+ break; -+ case WMDIOCTL_DEEPSLEEP: -+ case WMDIOCTL_EMERGENCYSLEEP: -+ /* Currently only DSP Idle is supported Need to update for -+ * later releases */ -+ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_DEEPSLEEP\n"); -+ status = SleepDSP(pDevContext, PWR_DEEPSLEEP, pArgs); -+ break; -+ case WMDIOCTL_WAKEUP: -+ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_WAKEUP\n"); -+ status = WakeDSP(pDevContext, pArgs); -+ break; -+ case WMDIOCTL_CLK_CTRL: -+ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_CLK_CTRL\n"); -+ status = DSP_SOK; -+ /* Looking For Baseport Fix for Clocks */ -+ status = DSPPeripheralClkCtrl(pDevContext, pArgs); -+ break; -+ case WMDIOCTL_PWR_HIBERNATE: -+ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_PWR_HIBERNATE\n"); -+ status = handle_hibernation_fromDSP(pDevContext); -+ break; -+ case WMDIOCTL_PRESCALE_NOTIFY: -+ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_PRESCALE_NOTIFY\n"); -+ status = PreScale_DSP(pDevContext, pArgs); -+ break; -+ case WMDIOCTL_POSTSCALE_NOTIFY: -+ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_POSTSCALE_NOTIFY\n"); -+ status = PostScale_DSP(pDevContext, pArgs); -+ break; -+ case WMDIOCTL_CONSTRAINT_REQUEST: -+ DBG_Trace(DBG_LEVEL5, "WMDIOCTL_CONSTRAINT_REQUEST\n"); -+ status = handle_constraints_set(pDevContext, pArgs); -+ break; -+ default: -+ status = DSP_EFAIL; -+ DBG_Trace(DBG_LEVEL7, "Error in WMD_BRD_Ioctl \n"); -+ break; -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_DEV_Destroy ======== -+ * Destroys the driver object. -+ */ -+static DSP_STATUS WMD_DEV_Destroy(struct WMD_DEV_CONTEXT *hDevContext) -+{ -+ struct PgTableAttrs *pPtAttrs; -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = (struct WMD_DEV_CONTEXT *) -+ hDevContext; -+ DBG_Trace(DBG_ENTER, "Entering WMD_DEV_Destroy:n hDevContext ::0x%x\n", -+ hDevContext); -+ /* first put the device to stop state */ -+ WMD_BRD_Delete(pDevContext); -+ if (pDevContext && pDevContext->pPtAttrs) { -+ pPtAttrs = pDevContext->pPtAttrs; -+ if (pPtAttrs->hCSObj) -+ SYNC_DeleteCS(pPtAttrs->hCSObj); -+ -+ if (pPtAttrs->pgInfo) -+ MEM_Free(pPtAttrs->pgInfo); -+ -+ if (pPtAttrs->L2TblAllocVa) { -+ MEM_FreePhysMem((void *)pPtAttrs->L2TblAllocVa, -+ pPtAttrs->L2TblAllocPa, pPtAttrs-> -+ L2TblAllocSz); -+ } -+ if (pPtAttrs->L1TblAllocVa) { -+ MEM_FreePhysMem((void *)pPtAttrs->L1TblAllocVa, -+ pPtAttrs->L1TblAllocPa, pPtAttrs-> -+ L1TblAllocSz); -+ } -+ if (pPtAttrs) -+ MEM_Free(pPtAttrs); -+ -+ } -+ /* Free the driver's device context: */ -+ MEM_Free((void *) hDevContext); -+ return status; -+} -+ -+static DSP_STATUS WMD_BRD_MemCopy(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulDspDestAddr, u32 ulDspSrcAddr, -+ u32 ulNumBytes, u32 ulMemType) -+{ -+ DSP_STATUS status = DSP_SOK; -+ u32 srcAddr = ulDspSrcAddr; -+ u32 destAddr = ulDspDestAddr; -+ u32 copyBytes = 0; -+ u32 totalBytes = ulNumBytes; -+ u8 hostBuf[BUFFERSIZE]; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ while ((totalBytes > 0) && DSP_SUCCEEDED(status)) { -+ copyBytes = totalBytes > BUFFERSIZE ? BUFFERSIZE : totalBytes; -+ /* Read from External memory */ -+ status = ReadExtDspData(hDevContext, hostBuf, srcAddr, -+ copyBytes, ulMemType); -+ if (DSP_SUCCEEDED(status)) { -+ if (destAddr < (pDevContext->dwDSPStartAdd + -+ pDevContext->dwInternalSize)) { -+ /* Write to Internal memory */ -+ status = WriteDspData(hDevContext, hostBuf, -+ destAddr, copyBytes, ulMemType); -+ } else { -+ /* Write to External memory */ -+ status = WriteExtDspData(hDevContext, hostBuf, -+ destAddr, copyBytes, ulMemType, false); -+ } -+ } -+ totalBytes -= copyBytes; -+ srcAddr += copyBytes; -+ destAddr += copyBytes; -+ } -+ return status; -+} -+ -+/* Mem Write does not halt the DSP to write unlike WMD_BRD_Write */ -+static DSP_STATUS WMD_BRD_MemWrite(struct WMD_DEV_CONTEXT *hDevContext, -+ IN u8 *pbHostBuf, u32 dwDSPAddr, -+ u32 ulNumBytes, u32 ulMemType) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ u32 ulRemainBytes = 0; -+ u32 ulBytes = 0; -+ ulRemainBytes = ulNumBytes; -+ while (ulRemainBytes > 0 && DSP_SUCCEEDED(status)) { -+ ulBytes = -+ ulRemainBytes > BUFFERSIZE ? BUFFERSIZE : ulRemainBytes; -+ if (dwDSPAddr < (pDevContext->dwDSPStartAdd + -+ pDevContext->dwInternalSize)) { -+ status = WriteDspData(hDevContext, pbHostBuf, dwDSPAddr, -+ ulBytes, ulMemType); -+ } else { -+ status = WriteExtDspData(hDevContext, pbHostBuf, -+ dwDSPAddr, ulBytes, ulMemType, true); -+ } -+ ulRemainBytes -= ulBytes; -+ dwDSPAddr += ulBytes; -+ pbHostBuf = pbHostBuf + ulBytes; -+ } -+ return status; -+} -+ -+/* -+ * ======== WMD_BRD_MemMap ======== -+ * This function maps MPU buffer to the DSP address space. It performs -+ * linear to physical address translation if required. It translates each -+ * page since linear addresses can be physically non-contiguous -+ * All address & size arguments are assumed to be page aligned (in proc.c) -+ * -+ * TODO: Disable MMU while updating the page tables (but that'll stall DSP) -+ */ -+static DSP_STATUS WMD_BRD_MemMap(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulMpuAddr, u32 ulVirtAddr, -+ u32 ulNumBytes, u32 ulMapAttr) -+{ -+ u32 attrs; -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ struct HW_MMUMapAttrs_t hwAttrs; -+ struct vm_area_struct *vma; -+ struct mm_struct *mm = current->mm; -+ u32 numUsrPgs = 0, nr_pages = 0; -+ u32 va = ulVirtAddr; -+ -+ DBG_Trace(DBG_ENTER, "> WMD_BRD_MemMap hDevContext %x, pa %x, va %x, " -+ "size %x, ulMapAttr %x\n", hDevContext, ulMpuAddr, ulVirtAddr, -+ ulNumBytes, ulMapAttr); -+ if (ulNumBytes == 0) -+ return DSP_EINVALIDARG; -+ -+ if (ulMapAttr != 0) { -+ attrs = ulMapAttr; -+ } else { -+ /* Assign default attributes */ -+ attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16; -+ } -+ /* Take mapping properties */ -+ if (attrs & DSP_MAPBIGENDIAN) -+ hwAttrs.endianism = HW_BIG_ENDIAN; -+ else -+ hwAttrs.endianism = HW_LITTLE_ENDIAN; -+ -+ hwAttrs.mixedSize = (enum HW_MMUMixedSize_t) -+ ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); -+ /* Ignore elementSize if mixedSize is enabled */ -+ if (hwAttrs.mixedSize == 0) { -+ if (attrs & DSP_MAPELEMSIZE8) { -+ /* Size is 8 bit */ -+ hwAttrs.elementSize = HW_ELEM_SIZE_8BIT; -+ } else if (attrs & DSP_MAPELEMSIZE16) { -+ /* Size is 16 bit */ -+ hwAttrs.elementSize = HW_ELEM_SIZE_16BIT; -+ } else if (attrs & DSP_MAPELEMSIZE32) { -+ /* Size is 32 bit */ -+ hwAttrs.elementSize = HW_ELEM_SIZE_32BIT; -+ } else if (attrs & DSP_MAPELEMSIZE64) { -+ /* Size is 64 bit */ -+ hwAttrs.elementSize = HW_ELEM_SIZE_64BIT; -+ } else { -+ /* -+ * Mixedsize isn't enabled, so size can't be -+ * zero here -+ */ -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_MemMap: MMU element size is zero\n"); -+ return DSP_EINVALIDARG; -+ } -+ } -+ if (attrs & DSP_MAPDONOTLOCK) -+ hwAttrs.donotlockmpupage = 1; -+ else -+ hwAttrs.donotlockmpupage = 0; -+ -+ if (attrs & DSP_MAPVMALLOCADDR) { -+ return MemMapVmalloc(hDevContext, ulMpuAddr, ulVirtAddr, -+ ulNumBytes, &hwAttrs); -+ } -+ /* -+ * Do OS-specific user-va to pa translation. -+ * Combine physically contiguous regions to reduce TLBs. -+ * Pass the translated pa to PteUpdate. -+ */ -+ if ((attrs & DSP_MAPPHYSICALADDR)) { -+ status = PteUpdate(pDevContext, ulMpuAddr, ulVirtAddr, -+ ulNumBytes, &hwAttrs); -+ goto func_cont; -+ } -+ -+ /* -+ * Important Note: ulMpuAddr is mapped from user application process -+ * to current process - it must lie completely within the current -+ * virtual memory address space in order to be of use to us here! -+ */ -+ down_read(&mm->mmap_sem); -+ vma = find_vma(mm, ulMpuAddr); -+ if (vma) -+ DBG_Trace(DBG_LEVEL6, "VMAfor UserBuf: ulMpuAddr=%x, " -+ "ulNumBytes=%x, vm_start=%x vm_end=%x vm_flags=%x \n", -+ ulMpuAddr, ulNumBytes, vma->vm_start, -+ vma->vm_end, vma->vm_flags); -+ -+ /* -+ * It is observed that under some circumstances, the user buffer is -+ * spread across several VMAs. So loop through and check if the entire -+ * user buffer is covered -+ */ -+ while ((vma) && (ulMpuAddr + ulNumBytes > vma->vm_end)) { -+ /* jump to the next VMA region */ -+ vma = find_vma(mm, vma->vm_end + 1); -+ DBG_Trace(DBG_LEVEL6, "VMAfor UserBuf ulMpuAddr=%x, " -+ "ulNumBytes=%x, vm_start=%x vm_end=%x vm_flags=%x\n", -+ ulMpuAddr, ulNumBytes, vma->vm_start, -+ vma->vm_end, vma->vm_flags); -+ } -+ if (!vma) { -+ DBG_Trace(DBG_LEVEL7, "Failed to get the VMA region for " -+ "MPU Buffer !!! \n"); -+ status = DSP_EINVALIDARG; -+ up_read(&mm->mmap_sem); -+ goto func_cont; -+ } -+ -+ numUsrPgs = PAGE_ALIGN(ulNumBytes) / PG_SIZE_4K; -+ -+ DBG_Trace(DBG_LEVEL4, "%s :numOfActualTabEntries=%d, ulNumBytes= %d\n", -+ %s, numUsrPgs, ulNumBytes); -+ -+ if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_RESERVED)) { -+ for (nr_pages = numUsrPgs; nr_pages > 0;) { -+ u32 pa; -+ -+ pa = user_va2pa(mm, ulMpuAddr); -+ if (!pa) { -+ status = DSP_EFAIL; -+ pr_err("DSPBRIDGE: VM_IO mapping physical" -+ "address is invalid\n"); -+ break; -+ } -+ -+ status = PteSet(pDevContext->pPtAttrs, pa, -+ va, HW_PAGE_SIZE_4KB, &hwAttrs); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, -+ "WMD_BRD_MemMap: FAILED IN VM_IO" -+ "PTESET \n"); -+ break; -+ } -+ -+ va += HW_PAGE_SIZE_4KB; -+ ulMpuAddr += HW_PAGE_SIZE_4KB; -+ nr_pages--; -+ } -+ } else { -+ int write = 0; -+ -+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) -+ write = 1; -+ -+ for (nr_pages = numUsrPgs; nr_pages > 0;) { -+ int i, ret; -+ struct page *pages[16]; /* for a reasonable batch */ -+ -+ ret = get_user_pages(current, mm, ulMpuAddr, -+ min_t(int, nr_pages, ARRAY_SIZE(pages)), -+ write, 1, pages, NULL); -+ if (ret <= 0) { -+ pr_err("DSPBRIDGE: get_user_pages FAILED," -+ "MPU addr = 0x%x," -+ "vma->vm_flags = 0x%lx," -+ "get_user_pages ErrValue = %d," -+ "Buffersize=0x%x\n", -+ ulMpuAddr, vma->vm_flags, ret, -+ ulNumBytes); -+ status = DSP_EFAIL; -+ goto fail_mapping; -+ } -+ -+ for (i = 0; i < ret; i++) { -+ struct page *page = pages[i]; -+ -+ status = PteSet(pDevContext->pPtAttrs, -+ page_to_phys(page), va, -+ HW_PAGE_SIZE_4KB, &hwAttrs); -+ if (DSP_FAILED(status)) { -+ pr_err("%s: FAILED IN PTESET\n", -+ __func__); -+ goto fail_mapping; -+ } -+ SetPageMlocked(page); -+ va += HW_PAGE_SIZE_4KB; -+ ulMpuAddr += HW_PAGE_SIZE_4KB; -+ nr_pages--; -+ } -+ } -+ } -+ -+fail_mapping: -+ up_read(&mm->mmap_sem); -+func_cont: -+ /* Don't propogate Linux or HW status to upper layers */ -+ if (DSP_SUCCEEDED(status)) { -+ status = DSP_SOK; -+ } else { -+ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap status %x\n", status); -+ /* -+ * Roll out the mapped pages incase it failed in middle of -+ * mapping -+ */ -+ if (numUsrPgs - nr_pages) { -+ WMD_BRD_MemUnMap(pDevContext, ulVirtAddr, -+ ((numUsrPgs - nr_pages) * PG_SIZE_4K)); -+ } -+ status = DSP_EFAIL; -+ } -+ /* -+ * In any case, flush the TLB -+ * This is called from here instead from PteUpdate to avoid unnecessary -+ * repetition while mapping non-contiguous physical regions of a virtual -+ * region -+ */ -+ flush_all(pDevContext); -+ DBG_Trace(DBG_ENTER, "< WMD_BRD_MemMap status %x\n", status); -+ return status; -+} -+ -+/* -+ * ======== WMD_BRD_MemUnMap ======== -+ * Invalidate the PTEs for the DSP VA block to be unmapped. -+ * -+ * PTEs of a mapped memory block are contiguous in any page table -+ * So, instead of looking up the PTE address for every 4K block, -+ * we clear consecutive PTEs until we unmap all the bytes -+ */ -+static DSP_STATUS WMD_BRD_MemUnMap(struct WMD_DEV_CONTEXT *hDevContext, -+ u32 ulVirtAddr, u32 ulNumBytes) -+{ -+ u32 L1BaseVa; -+ u32 L2BaseVa; -+ u32 L2BasePa; -+ u32 L2PageNum; -+ u32 pteVal; -+ u32 pteSize; -+ u32 pteCount; -+ u32 pteAddrL1; -+ u32 pteAddrL2 = 0; -+ u32 remBytes; -+ u32 remBytesL2; -+ u32 vaCurr; -+ struct page *pg = NULL; -+ DSP_STATUS status = DSP_SOK; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ struct PgTableAttrs *pt = pDevContext->pPtAttrs; -+ u32 temp; -+ u32 pAddr; -+ u32 numof4KPages = 0; -+ -+ DBG_Trace(DBG_ENTER, "> WMD_BRD_MemUnMap hDevContext %x, va %x, " -+ "NumBytes %x\n", hDevContext, ulVirtAddr, ulNumBytes); -+ vaCurr = ulVirtAddr; -+ remBytes = ulNumBytes; -+ remBytesL2 = 0; -+ L1BaseVa = pt->L1BaseVa; -+ pteAddrL1 = HW_MMU_PteAddrL1(L1BaseVa, vaCurr); -+ DBG_Trace(DBG_ENTER, "WMD_BRD_MemUnMap L1BaseVa %x, pteAddrL1 %x " -+ "vaCurr %x remBytes %x\n", L1BaseVa, pteAddrL1, -+ vaCurr, remBytes); -+ while (remBytes && (DSP_SUCCEEDED(status))) { -+ u32 vaCurrOrig = vaCurr; -+ /* Find whether the L1 PTE points to a valid L2 PT */ -+ pteAddrL1 = HW_MMU_PteAddrL1(L1BaseVa, vaCurr); -+ pteVal = *(u32 *)pteAddrL1; -+ pteSize = HW_MMU_PteSizeL1(pteVal); -+ -+ if (pteSize != HW_MMU_COARSE_PAGE_SIZE) -+ goto skip_coarse_page; -+ -+ /* -+ * Get the L2 PA from the L1 PTE, and find -+ * corresponding L2 VA -+ */ -+ L2BasePa = HW_MMU_PteCoarseL1(pteVal); -+ L2BaseVa = L2BasePa - pt->L2BasePa + pt->L2BaseVa; -+ L2PageNum = (L2BasePa - pt->L2BasePa) / HW_MMU_COARSE_PAGE_SIZE; -+ /* -+ * Find the L2 PTE address from which we will start -+ * clearing, the number of PTEs to be cleared on this -+ * page, and the size of VA space that needs to be -+ * cleared on this L2 page -+ */ -+ pteAddrL2 = HW_MMU_PteAddrL2(L2BaseVa, vaCurr); -+ pteCount = pteAddrL2 & (HW_MMU_COARSE_PAGE_SIZE - 1); -+ pteCount = (HW_MMU_COARSE_PAGE_SIZE - pteCount) / sizeof(u32); -+ if (remBytes < (pteCount * PG_SIZE_4K)) -+ pteCount = remBytes / PG_SIZE_4K; -+ remBytesL2 = pteCount * PG_SIZE_4K; -+ DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap L2BasePa %x, " -+ "L2BaseVa %x pteAddrL2 %x, remBytesL2 %x\n", -+ L2BasePa, L2BaseVa, pteAddrL2, remBytesL2); -+ /* -+ * Unmap the VA space on this L2 PT. A quicker way -+ * would be to clear pteCount entries starting from -+ * pteAddrL2. However, below code checks that we don't -+ * clear invalid entries or less than 64KB for a 64KB -+ * entry. Similar checking is done for L1 PTEs too -+ * below -+ */ -+ while (remBytesL2 && (DSP_SUCCEEDED(status))) { -+ pteVal = *(u32 *)pteAddrL2; -+ pteSize = HW_MMU_PteSizeL2(pteVal); -+ /* vaCurr aligned to pteSize? */ -+ if (pteSize == 0 || remBytesL2 < pteSize || -+ vaCurr & (pteSize - 1)) { -+ status = DSP_EFAIL; -+ break; -+ } -+ -+ /* Collect Physical addresses from VA */ -+ pAddr = (pteVal & ~(pteSize - 1)); -+ if (pteSize == HW_PAGE_SIZE_64KB) -+ numof4KPages = 16; -+ else -+ numof4KPages = 1; -+ temp = 0; -+ while (temp++ < numof4KPages) { -+ if (!pfn_valid(__phys_to_pfn(pAddr))) { -+ pAddr += HW_PAGE_SIZE_4KB; -+ continue; -+ } -+ pg = phys_to_page(pAddr); -+ if (page_count(pg) < 1) { -+ pr_info("DSPBRIDGE: UNMAP function: " -+ "COUNT 0 FOR PA 0x%x, size = " -+ "0x%x\n", pAddr, ulNumBytes); -+ bad_page_dump(pAddr, pg); -+ } -+ ClearPageMlocked(pg); -+ SetPageDirty(pg); -+ page_cache_release(pg); -+ pAddr += HW_PAGE_SIZE_4KB; -+ } -+ if (HW_MMU_PteClear(pteAddrL2, vaCurr, pteSize) -+ == RET_FAIL) { -+ status = DSP_EFAIL; -+ goto EXIT_LOOP; -+ } -+ -+ status = DSP_SOK; -+ remBytesL2 -= pteSize; -+ vaCurr += pteSize; -+ pteAddrL2 += (pteSize >> 12) * sizeof(u32); -+ } -+ SYNC_EnterCS(pt->hCSObj); -+ if (remBytesL2 == 0) { -+ pt->pgInfo[L2PageNum].numEntries -= pteCount; -+ if (pt->pgInfo[L2PageNum].numEntries == 0) { -+ /* -+ * Clear the L1 PTE pointing to the L2 PT -+ */ -+ if (HW_MMU_PteClear(L1BaseVa, vaCurrOrig, -+ HW_MMU_COARSE_PAGE_SIZE) == RET_OK) -+ status = DSP_SOK; -+ else { -+ status = DSP_EFAIL; -+ SYNC_LeaveCS(pt->hCSObj); -+ goto EXIT_LOOP; -+ } -+ } -+ remBytes -= pteCount * PG_SIZE_4K; -+ } else -+ status = DSP_EFAIL; -+ DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap L2PageNum %x, " -+ "numEntries %x, pteCount %x, status: 0x%x\n", -+ L2PageNum, pt->pgInfo[L2PageNum].numEntries, -+ pteCount, status); -+ SYNC_LeaveCS(pt->hCSObj); -+ continue; -+skip_coarse_page: -+ /* vaCurr aligned to pteSize? */ -+ /* pteSize = 1 MB or 16 MB */ -+ if (pteSize == 0 || remBytes < pteSize || -+ vaCurr & (pteSize - 1)) { -+ status = DSP_EFAIL; -+ break; -+ } -+ -+ if (pteSize == HW_PAGE_SIZE_1MB) -+ numof4KPages = 256; -+ else -+ numof4KPages = 4096; -+ temp = 0; -+ /* Collect Physical addresses from VA */ -+ pAddr = (pteVal & ~(pteSize - 1)); -+ while (temp++ < numof4KPages) { -+ if (pfn_valid(__phys_to_pfn(pAddr))) { -+ pg = phys_to_page(pAddr); -+ if (page_count(pg) < 1) { -+ pr_info("DSPBRIDGE: UNMAP function: " -+ "COUNT 0 FOR PA 0x%x, size = " -+ "0x%x\n", pAddr, ulNumBytes); -+ bad_page_dump(pAddr, pg); -+ } -+ ClearPageMlocked(pg); -+ SetPageDirty(pg); -+ page_cache_release(pg); -+ } -+ pAddr += HW_PAGE_SIZE_4KB; -+ } -+ if (HW_MMU_PteClear(L1BaseVa, vaCurr, pteSize) == RET_OK) { -+ status = DSP_SOK; -+ remBytes -= pteSize; -+ vaCurr += pteSize; -+ } else { -+ status = DSP_EFAIL; -+ goto EXIT_LOOP; -+ } -+ } -+ /* -+ * It is better to flush the TLB here, so that any stale old entries -+ * get flushed -+ */ -+EXIT_LOOP: -+ flush_all(pDevContext); -+ DBG_Trace(DBG_LEVEL1, "WMD_BRD_MemUnMap vaCurr %x, pteAddrL1 %x " -+ "pteAddrL2 %x\n", vaCurr, pteAddrL1, pteAddrL2); -+ DBG_Trace(DBG_ENTER, "< WMD_BRD_MemUnMap status %x remBytes %x, " -+ "remBytesL2 %x\n", status, remBytes, remBytesL2); -+ return status; -+} -+ -+/* -+ * ======== user_va2pa ======== -+ * Purpose: -+ * This function walks through the Linux page tables to convert a userland -+ * virtual address to physical address -+ */ -+static u32 user_va2pa(struct mm_struct *mm, u32 address) -+{ -+ pgd_t *pgd; -+ pmd_t *pmd; -+ pte_t *ptep, pte; -+ -+ pgd = pgd_offset(mm, address); -+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { -+ pmd = pmd_offset(pgd, address); -+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { -+ ptep = pte_offset_map(pmd, address); -+ if (ptep) { -+ pte = *ptep; -+ if (pte_present(pte)) -+ return pte & PAGE_MASK; -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+ -+/* -+ * ======== PteUpdate ======== -+ * This function calculates the optimum page-aligned addresses and sizes -+ * Caller must pass page-aligned values -+ */ -+static DSP_STATUS PteUpdate(struct WMD_DEV_CONTEXT *hDevContext, u32 pa, -+ u32 va, u32 size, -+ struct HW_MMUMapAttrs_t *mapAttrs) -+{ -+ u32 i; -+ u32 allBits; -+ u32 paCurr = pa; -+ u32 vaCurr = va; -+ u32 numBytes = size; -+ struct WMD_DEV_CONTEXT *pDevContext = hDevContext; -+ DSP_STATUS status = DSP_SOK; -+ u32 pgSize[] = { HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB, -+ HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB }; -+ DBG_Trace(DBG_ENTER, "> PteUpdate hDevContext %x, pa %x, va %x, " -+ "size %x, mapAttrs %x\n", hDevContext, pa, va, size, mapAttrs); -+ while (numBytes && DSP_SUCCEEDED(status)) { -+ /* To find the max. page size with which both PA & VA are -+ * aligned */ -+ allBits = paCurr | vaCurr; -+ DBG_Trace(DBG_LEVEL1, "allBits %x, paCurr %x, vaCurr %x, " -+ "numBytes %x ", allBits, paCurr, vaCurr, numBytes); -+ for (i = 0; i < 4; i++) { -+ if ((numBytes >= pgSize[i]) && ((allBits & -+ (pgSize[i] - 1)) == 0)) { -+ DBG_Trace(DBG_LEVEL1, "pgSize %x\n", pgSize[i]); -+ status = PteSet(pDevContext->pPtAttrs, paCurr, -+ vaCurr, pgSize[i], mapAttrs); -+ paCurr += pgSize[i]; -+ vaCurr += pgSize[i]; -+ numBytes -= pgSize[i]; -+ /* Don't try smaller sizes. Hopefully we have -+ * reached an address aligned to a bigger page -+ * size */ -+ break; -+ } -+ } -+ } -+ DBG_Trace(DBG_ENTER, "< PteUpdate status %x numBytes %x\n", status, -+ numBytes); -+ return status; -+} -+ -+/* -+ * ======== PteSet ======== -+ * This function calculates PTE address (MPU virtual) to be updated -+ * It also manages the L2 page tables -+ */ -+static DSP_STATUS PteSet(struct PgTableAttrs *pt, u32 pa, u32 va, -+ u32 size, struct HW_MMUMapAttrs_t *attrs) -+{ -+ u32 i; -+ u32 pteVal; -+ u32 pteAddrL1; -+ u32 pteSize; -+ u32 pgTblVa; /* Base address of the PT that will be updated */ -+ u32 L1BaseVa; -+ /* Compiler warns that the next three variables might be used -+ * uninitialized in this function. Doesn't seem so. Working around, -+ * anyways. */ -+ u32 L2BaseVa = 0; -+ u32 L2BasePa = 0; -+ u32 L2PageNum = 0; -+ DSP_STATUS status = DSP_SOK; -+ DBG_Trace(DBG_ENTER, "> PteSet pPgTableAttrs %x, pa %x, va %x, " -+ "size %x, attrs %x\n", pt, pa, va, size, attrs); -+ L1BaseVa = pt->L1BaseVa; -+ pgTblVa = L1BaseVa; -+ if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) { -+ /* Find whether the L1 PTE points to a valid L2 PT */ -+ pteAddrL1 = HW_MMU_PteAddrL1(L1BaseVa, va); -+ if (pteAddrL1 <= (pt->L1BaseVa + pt->L1size)) { -+ pteVal = *(u32 *)pteAddrL1; -+ pteSize = HW_MMU_PteSizeL1(pteVal); -+ } else { -+ return DSP_EFAIL; -+ } -+ SYNC_EnterCS(pt->hCSObj); -+ if (pteSize == HW_MMU_COARSE_PAGE_SIZE) { -+ /* Get the L2 PA from the L1 PTE, and find -+ * corresponding L2 VA */ -+ L2BasePa = HW_MMU_PteCoarseL1(pteVal); -+ L2BaseVa = L2BasePa - pt->L2BasePa + pt->L2BaseVa; -+ L2PageNum = (L2BasePa - pt->L2BasePa) / -+ HW_MMU_COARSE_PAGE_SIZE; -+ } else if (pteSize == 0) { -+ /* L1 PTE is invalid. Allocate a L2 PT and -+ * point the L1 PTE to it */ -+ /* Find a free L2 PT. */ -+ for (i = 0; (i < pt->L2NumPages) && -+ (pt->pgInfo[i].numEntries != 0); i++) -+ ;; -+ if (i < pt->L2NumPages) { -+ L2PageNum = i; -+ L2BasePa = pt->L2BasePa + (L2PageNum * -+ HW_MMU_COARSE_PAGE_SIZE); -+ L2BaseVa = pt->L2BaseVa + (L2PageNum * -+ HW_MMU_COARSE_PAGE_SIZE); -+ /* Endianness attributes are ignored for -+ * HW_MMU_COARSE_PAGE_SIZE */ -+ status = HW_MMU_PteSet(L1BaseVa, L2BasePa, va, -+ HW_MMU_COARSE_PAGE_SIZE, attrs); -+ } else { -+ status = DSP_EMEMORY; -+ } -+ } else { -+ /* Found valid L1 PTE of another size. -+ * Should not overwrite it. */ -+ status = DSP_EFAIL; -+ } -+ if (DSP_SUCCEEDED(status)) { -+ pgTblVa = L2BaseVa; -+ if (size == HW_PAGE_SIZE_64KB) -+ pt->pgInfo[L2PageNum].numEntries += 16; -+ else -+ pt->pgInfo[L2PageNum].numEntries++; -+ DBG_Trace(DBG_LEVEL1, "L2 BaseVa %x, BasePa %x, " -+ "PageNum %x numEntries %x\n", L2BaseVa, -+ L2BasePa, L2PageNum, -+ pt->pgInfo[L2PageNum].numEntries); -+ } -+ SYNC_LeaveCS(pt->hCSObj); -+ } -+ if (DSP_SUCCEEDED(status)) { -+ DBG_Trace(DBG_LEVEL1, "PTE pgTblVa %x, pa %x, va %x, size %x\n", -+ pgTblVa, pa, va, size); -+ DBG_Trace(DBG_LEVEL1, "PTE endianism %x, elementSize %x, " -+ "mixedSize %x\n", attrs->endianism, -+ attrs->elementSize, attrs->mixedSize); -+ status = HW_MMU_PteSet(pgTblVa, pa, va, size, attrs); -+ } -+ DBG_Trace(DBG_ENTER, "< PteSet status %x\n", status); -+ return status; -+} -+ -+/* Memory map kernel VA -- memory allocated with vmalloc */ -+static DSP_STATUS MemMapVmalloc(struct WMD_DEV_CONTEXT *pDevContext, -+ u32 ulMpuAddr, u32 ulVirtAddr, u32 ulNumBytes, -+ struct HW_MMUMapAttrs_t *hwAttrs) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct page *pPage[1]; -+ u32 i; -+ u32 paCurr; -+ u32 paNext; -+ u32 vaCurr; -+ u32 sizeCurr; -+ u32 numPages; -+ u32 pa; -+ u32 numOf4KPages; -+ u32 temp = 0; -+ -+ DBG_Trace(DBG_ENTER, "> MemMapVmalloc hDevContext %x, pa %x, va %x, " -+ "size %x\n", pDevContext, ulMpuAddr, ulVirtAddr, ulNumBytes); -+ -+ /* -+ * Do Kernel va to pa translation. -+ * Combine physically contiguous regions to reduce TLBs. -+ * Pass the translated pa to PteUpdate. -+ */ -+ numPages = ulNumBytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ -+ i = 0; -+ vaCurr = ulMpuAddr; -+ pPage[0] = vmalloc_to_page((void *)vaCurr); -+ paNext = page_to_phys(pPage[0]); -+ while (DSP_SUCCEEDED(status) && (i < numPages)) { -+ /* -+ * Reuse paNext from the previous iteraion to avoid -+ * an extra va2pa call -+ */ -+ paCurr = paNext; -+ sizeCurr = PAGE_SIZE; -+ /* -+ * If the next page is physically contiguous, -+ * map it with the current one by increasing -+ * the size of the region to be mapped -+ */ -+ while (++i < numPages) { -+ pPage[0] = vmalloc_to_page((void *)(vaCurr + sizeCurr)); -+ paNext = page_to_phys(pPage[0]); -+ DBG_Trace(DBG_LEVEL5, "Xlate Vmalloc VA=0x%x , " -+ "PA=0x%x \n", (vaCurr + sizeCurr), paNext); -+ if (paNext == (paCurr + sizeCurr)) -+ sizeCurr += PAGE_SIZE; -+ else -+ break; -+ -+ } -+ if (paNext == 0) { -+ status = DSP_EMEMORY; -+ break; -+ } -+ pa = paCurr; -+ numOf4KPages = sizeCurr / HW_PAGE_SIZE_4KB; -+ while (temp++ < numOf4KPages) { -+ get_page(phys_to_page(pa)); -+ pa += HW_PAGE_SIZE_4KB; -+ } -+ status = PteUpdate(pDevContext, paCurr, ulVirtAddr + -+ (vaCurr - ulMpuAddr), sizeCurr, hwAttrs); -+ vaCurr += sizeCurr; -+ } -+ /* Don't propogate Linux or HW status to upper layers */ -+ if (DSP_SUCCEEDED(status)) { -+ status = DSP_SOK; -+ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap succeeded %x\n", -+ status); -+ } else { -+ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap status %x\n", status); -+ status = DSP_EFAIL; -+ } -+ /* -+ * In any case, flush the TLB -+ * This is called from here instead from PteUpdate to avoid unnecessary -+ * repetition while mapping non-contiguous physical regions of a virtual -+ * region -+ */ -+ flush_all(pDevContext); -+ DBG_Trace(DBG_LEVEL7, "< WMD_BRD_MemMap at end status %x\n", status); -+ return status; -+} -+ -+/* -+ * ======== configureDspMmu ======== -+ * Make DSP MMU page table entries. -+ */ -+void configureDspMmu(struct WMD_DEV_CONTEXT *pDevContext, u32 dataBasePhys, -+ u32 dspBaseVirt, u32 sizeInBytes, s32 nEntryStart, -+ enum HW_Endianism_t endianism, -+ enum HW_ElementSize_t elemSize, -+ enum HW_MMUMixedSize_t mixedSize) -+{ -+ struct CFG_HOSTRES resources; -+ struct HW_MMUMapAttrs_t mapAttrs = { endianism, elemSize, mixedSize }; -+ DSP_STATUS status = DSP_SOK; -+ -+ DBC_Require(sizeInBytes > 0); -+ DBG_Trace(DBG_LEVEL1, -+ "configureDspMmu entry %x pa %x, va %x, bytes %x ", -+ nEntryStart, dataBasePhys, dspBaseVirt, sizeInBytes); -+ -+ DBG_Trace(DBG_LEVEL1, "endianism %x, elemSize %x, mixedSize %x\n", -+ endianism, elemSize, mixedSize); -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ status = HW_MMU_TLBAdd(pDevContext->dwDSPMmuBase, dataBasePhys, -+ dspBaseVirt, sizeInBytes, nEntryStart, -+ &mapAttrs, HW_SET, HW_SET); -+} -+ -+/* -+ * ======== WaitForStart ======== -+ * Wait for the singal from DSP that it has started, or time out. -+ */ -+bool WaitForStart(struct WMD_DEV_CONTEXT *pDevContext, u32 dwSyncAddr) -+{ -+ u16 usCount = TIHELEN_ACKTIMEOUT; -+ -+ /* Wait for response from board */ -+ while (*((volatile u16 *)dwSyncAddr) && --usCount) -+ udelay(10); -+ -+ /* If timed out: return FALSE */ -+ if (!usCount) { -+ DBG_Trace(DBG_LEVEL7, "Timed out Waiting for DSP to Start\n"); -+ return FALSE; -+ } -+ return TRUE; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430_pwr.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap3430_pwr.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/tiomap3430_pwr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/tiomap3430_pwr.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,750 @@ -+/* -+ * tiomap_pwr.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2007-2008 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* -+ * ======== _tiomap_pwr.c ======== -+ * Description: -+ * Implementation of DSP wake/sleep routines. -+ * -+ *! Revision History -+ *! ================ -+ *! 01-Nov-2007 HK: Added Off mode(Hibernation) support and DVFS support -+ *! 05-Jan-2004 vp: Moved the file to platform specific folder and commented the -+ *! code. -+ *! 27-Mar-2003 vp: Added support for DSP boot idle mode. -+ *! 06-Dec-2002 cring: Added Palm support. -+ *! 08-Oct-2002 rr: Created. -+ */ -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+#include -+ -+/* ------------------------------------ Hardware Abstraction Layer */ -+#include -+#include -+#include -+#include -+ -+#include -+ -+/* ----------------------------------- Mini Driver */ -+#include -+ -+/* ----------------------------------- specific to this file */ -+#include "_tiomap.h" -+#include "_tiomap_pwr.h" -+#include "_tiomap_util.h" -+#include -+#include -+ -+#ifdef CONFIG_PM -+#include -+#endif -+extern struct MAILBOX_CONTEXT mboxsetting; -+extern unsigned short enable_off_mode; -+extern unsigned short min_active_opp; -+/* -+ * ======== handle_constraints_set ======== -+ * Sets new DSP constraint -+ */ -+DSP_STATUS handle_constraints_set(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs) -+{ -+#ifdef CONFIG_BRIDGE_DVFS -+ u32 pConstraintVal; -+ DSP_STATUS status = DSP_SOK; -+ struct CFG_HOSTRES resources; -+ struct dspbridge_platform_data *pdata = -+ omap_dspbridge_dev->dev.platform_data; -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ -+ pConstraintVal = *(((u32 *)pArgs) + 1); -+ /* Read the target value requested by DSP */ -+ DBG_Trace(DBG_LEVEL7, "handle_constraints_set:" -+ "opp requested = 0x%x\n", pConstraintVal); -+ status = HW_MBOX_saveSettings(resources.dwMboxBase); -+ -+ /* Set the new opp value */ -+ if (pdata->dsp_set_min_opp) { -+ /* -+ * When Smartreflex is ON, DSP requires at least OPP level 3 -+ * to operate reliably. So boost lower OPP levels to OPP3. -+ */ -+ if (pConstraintVal < min_active_opp) { -+ pr_debug("DSPBRIDGE: VDD1 OPP%x elevated to OPP%x\n", -+ pConstraintVal, min_active_opp); -+ (*pdata->dsp_set_min_opp)(min_active_opp); -+ } else -+ (*pdata->dsp_set_min_opp)(pConstraintVal); -+ } -+#endif /* #ifdef CONFIG_BRIDGE_DVFS */ -+ return DSP_SOK; -+} -+ -+/* -+ * ======== handle_hibernation_fromDSP ======== -+ * Handle Hibernation requested from DSP -+ */ -+DSP_STATUS handle_hibernation_fromDSP(struct WMD_DEV_CONTEXT *pDevContext) -+{ -+ DSP_STATUS status = DSP_SOK; -+#ifdef CONFIG_PM -+ u16 usCount = TIHELEN_ACKTIMEOUT; -+ struct CFG_HOSTRES resources; -+ enum HW_PwrState_t pwrState; -+#ifdef CONFIG_BRIDGE_DVFS -+ u32 opplevel; -+ struct IO_MGR *hIOMgr; -+ struct dspbridge_platform_data *pdata = -+ omap_dspbridge_dev->dev.platform_data; -+#endif -+ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) -+ return status; -+ -+ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, -+ &pwrState); -+ /* Wait for DSP to move into Off state, how much time should -+ * we wait? */ -+ while ((pwrState != HW_PWR_STATE_OFF) && --usCount) { -+ udelay(500); -+ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, -+ &pwrState); -+ } -+ if (usCount == 0) { -+ DBG_Trace(DBG_LEVEL7, "Timed out Waiting for DSP Off mode \n"); -+ status = WMD_E_TIMEOUT; -+ return status; -+ } else { -+ -+ /* Save mailbox settings */ -+ status = HW_MBOX_saveSettings(resources.dwMboxBase); -+ DBG_Trace(DBG_LEVEL6, "MailBoxSettings: SYSCONFIG = 0x%x\n", -+ mboxsetting.sysconfig); -+ DBG_Trace(DBG_LEVEL6, "MailBoxSettings: IRQENABLE0 = 0x%x\n", -+ mboxsetting.irqEnable0); -+ DBG_Trace(DBG_LEVEL6, "MailBoxSettings: IRQENABLE1 = 0x%x\n", -+ mboxsetting.irqEnable1); -+ /* Turn off DSP Peripheral clocks and DSP Load monitor timer */ -+ status = DSP_PeripheralClocks_Disable(pDevContext, NULL); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Update the Bridger Driver state */ -+ pDevContext->dwBrdState = BRD_DSP_HIBERNATION; -+#ifdef CONFIG_BRIDGE_DVFS -+ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); -+ if (DSP_FAILED(status)) -+ return status; -+ IO_SHMsetting(hIOMgr, SHM_GETOPP, &opplevel); -+ if (opplevel != VDD1_OPP1) { -+ DBG_Trace(DBG_LEVEL5, -+ " DSP requested OPP = %d, MPU" -+ " requesting low OPP %d instead\n", -+ opplevel, VDD1_OPP1); -+ } -+ /* -+ * Set the OPP to low level before moving to OFF -+ * mode -+ */ -+ if (pdata->dsp_set_min_opp) -+ (*pdata->dsp_set_min_opp)(VDD1_OPP1); -+ status = DSP_SOK; -+#endif /* CONFIG_BRIDGE_DVFS */ -+ } else { -+ DBG_Trace(DBG_LEVEL7, -+ "handle_hibernation_fromDSP- FAILED\n"); -+ } -+ } -+#endif -+ return status; -+} -+ -+/* -+ * ======== SleepDSP ======== -+ * Put DSP in low power consuming state. -+ */ -+DSP_STATUS SleepDSP(struct WMD_DEV_CONTEXT *pDevContext, IN u32 dwCmd, -+ IN void *pArgs) -+{ -+ DSP_STATUS status = DSP_SOK; -+#ifdef CONFIG_PM -+ struct CFG_HOSTRES resources; -+ struct DEH_MGR *hDehMgr; -+ u16 usCount = TIHELEN_ACKTIMEOUT; -+ enum HW_PwrState_t pwrState, targetPwrState; -+ -+ DBG_Trace(DBG_LEVEL7, "SleepDSP- Enter function \n"); -+ -+ /* Check if sleep code is valid */ -+ if ((dwCmd != PWR_DEEPSLEEP) && (dwCmd != PWR_EMERGENCYDEEPSLEEP)) { -+ DBG_Trace(DBG_LEVEL7, "SleepDSP- Illegal sleep command\n"); -+ return DSP_EINVALIDARG; -+ } -+ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) -+ return status; -+ -+ switch (pDevContext->dwBrdState) { -+ case BRD_RUNNING: -+ status = HW_MBOX_saveSettings(resources.dwMboxBase); -+ if (enable_off_mode) { -+ CHNLSM_InterruptDSP2(pDevContext, -+ MBX_PM_DSPHIBERNATE); -+ DBG_Trace(DBG_LEVEL7, -+ "SleepDSP - Sent hibernate " -+ "command to DSP\n"); -+ targetPwrState = HW_PWR_STATE_OFF; -+ } else { -+ CHNLSM_InterruptDSP2(pDevContext, -+ MBX_PM_DSPRETENTION); -+ targetPwrState = HW_PWR_STATE_RET; -+ } -+ break; -+ case BRD_RETENTION: -+ status = HW_MBOX_saveSettings(resources.dwMboxBase); -+ if (enable_off_mode) { -+ CHNLSM_InterruptDSP2(pDevContext, -+ MBX_PM_DSPHIBERNATE); -+ targetPwrState = HW_PWR_STATE_OFF; -+ } else -+ return DSP_SOK; -+ break; -+ case BRD_HIBERNATION: -+ case BRD_DSP_HIBERNATION: -+ /* Already in Hibernation, so just return */ -+ DBG_Trace(DBG_LEVEL7, "SleepDSP- DSP already in " -+ "hibernation\n"); -+ return DSP_SOK; -+ case BRD_STOPPED: -+ DBG_Trace(DBG_LEVEL7, -+ "SleepDSP- Board in STOP state \n"); -+ return DSP_SALREADYASLEEP; -+ default: -+ DBG_Trace(DBG_LEVEL7, -+ "SleepDSP- Bridge in Illegal state\n"); -+ return DSP_EFAIL; -+ } -+ -+ /* Get the PRCM DSP power domain status */ -+ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, -+ &pwrState); -+ -+ /* -+ * Wait for DSP to move into Standby state, how much time -+ * should we wait? -+ */ -+ while ((pwrState != targetPwrState) && --usCount) { -+ udelay(500); -+ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, -+ &pwrState); -+ } -+ -+ if (!usCount) { -+ DBG_Trace(DBG_LEVEL7, "SleepDSP: Timed out Waiting for DSP" -+ " STANDBY %x \n", pwrState); -+ DEV_GetDehMgr(pDevContext->hDevObject, &hDehMgr); -+ WMD_DEH_Notify(hDehMgr, DSP_PWRERROR, 0); -+ return WMD_E_TIMEOUT; -+ } else { -+ DBG_Trace(DBG_LEVEL7, "SleepDSP: DSP STANDBY Pwr state %x \n", -+ pwrState); -+ -+ /* Update the Bridger Driver state */ -+ if (enable_off_mode) -+ pDevContext->dwBrdState = BRD_HIBERNATION; -+ else -+ pDevContext->dwBrdState = BRD_RETENTION; -+ -+ /* Turn off DSP Peripheral clocks */ -+ status = DSP_PeripheralClocks_Disable(pDevContext, NULL); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, "SleepDSP- FAILED\n"); -+ return status; -+ } -+#ifdef CONFIG_BRIDGE_DVFS -+ else if (targetPwrState == HW_PWR_STATE_OFF) { -+ struct dspbridge_platform_data *pdata = -+ omap_dspbridge_dev->dev.platform_data; -+ /* -+ * Set the OPP to low level before moving to OFF mode -+ */ -+ if (pdata->dsp_set_min_opp) -+ (*pdata->dsp_set_min_opp)(VDD1_OPP1); -+ } -+#endif /* CONFIG_BRIDGE_DVFS */ -+ } -+#endif /* CONFIG_PM */ -+ return status; -+} -+ -+ -+/* -+ * ======== WakeDSP ======== -+ * Wake up DSP from sleep. -+ */ -+DSP_STATUS WakeDSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs) -+{ -+#ifdef CONFIG_PM -+ DSP_STATUS status = DSP_SOK; -+#ifdef CONFIG_BRIDGE_DEBUG -+ enum HW_PwrState_t pwrState; -+ struct CFG_HOSTRES resources; -+ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) -+ return status; -+#endif /* CONFIG_BRIDGE_DEBUG */ -+ -+ /* Check the BRD/WMD state, if it is not 'SLEEP' then return failure */ -+ if (pDevContext->dwBrdState == BRD_RUNNING || -+ pDevContext->dwBrdState == BRD_STOPPED) { -+ /* The Device is in 'RET' or 'OFF' state and WMD state is not -+ * 'SLEEP', this means state inconsistency, so return */ -+ return DSP_SOK; -+ } -+ -+ /* Send a wakeup message to DSP */ -+ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_DSPWAKEUP); -+ -+#ifdef CONFIG_BRIDGE_DEBUG -+ HW_PWR_IVA2StateGet(resources.dwPrmBase, HW_PWR_DOMAIN_DSP, -+ &pwrState); -+ DBG_Trace(DBG_LEVEL7, -+ "\nWakeDSP: Power State After sending Interrupt " -+ "to DSP %x\n", pwrState); -+#endif /* CONFIG_BRIDGE_DEBUG */ -+ -+ /* Set the device state to RUNNIG */ -+ pDevContext->dwBrdState = BRD_RUNNING; -+#endif /* CONFIG_PM */ -+ return status; -+} -+ -+/* -+ * ======== DSPPeripheralClkCtrl ======== -+ * Enable/Disable the DSP peripheral clocks as needed.. -+ */ -+DSP_STATUS DSPPeripheralClkCtrl(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs) -+{ -+ u32 extClk = 0; -+ u32 extClkId = 0; -+ u32 extClkCmd = 0; -+ u32 clkIdIndex = MBX_PM_MAX_RESOURCES; -+ u32 tmpIndex; -+ u32 dspPerClksBefore; -+ DSP_STATUS status = DSP_SOK; -+ DSP_STATUS status1 = DSP_SOK; -+ -+ DBG_Trace(DBG_ENTER, "Entering DSPPeripheralClkCtrl \n"); -+ dspPerClksBefore = pDevContext->uDspPerClks; -+ DBG_Trace(DBG_ENTER, "DSPPeripheralClkCtrl : uDspPerClks = 0x%x \n", -+ dspPerClksBefore); -+ -+ extClk = (u32)*((u32 *)pArgs); -+ -+ DBG_Trace(DBG_LEVEL3, "DSPPeripheralClkCtrl : extClk+Cmd = 0x%x \n", -+ extClk); -+ -+ extClkId = extClk & MBX_PM_CLK_IDMASK; -+ -+ /* process the power message -- TODO, keep it in a separate function */ -+ for (tmpIndex = 0; tmpIndex < MBX_PM_MAX_RESOURCES; tmpIndex++) { -+ if (extClkId == BPWR_CLKID[tmpIndex]) { -+ clkIdIndex = tmpIndex; -+ break; -+ } -+ } -+ /* TODO -- Assert may be a too hard restriction here.. May be we should -+ * just return with failure when the CLK ID does not match */ -+ /* DBC_Assert(clkIdIndex < MBX_PM_MAX_RESOURCES);*/ -+ if (clkIdIndex == MBX_PM_MAX_RESOURCES) { -+ DBG_Trace(DBG_LEVEL7, -+ "DSPPeripheralClkCtrl : Could n't get clock Id for" -+ "clkid 0x%x \n", clkIdIndex); -+ /* return with a more meaningfull error code */ -+ return DSP_EFAIL; -+ } -+ extClkCmd = (extClk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK; -+ switch (extClkCmd) { -+ case BPWR_DisableClock: -+ /* Call BP to disable the needed clock */ -+ DBG_Trace(DBG_LEVEL3, -+ "DSPPeripheralClkCtrl : Disable CLK for \n"); -+ status1 = CLK_Disable(BPWR_Clks[clkIdIndex].intClk); -+ status = CLK_Disable(BPWR_Clks[clkIdIndex].funClk); -+ DSPClkWakeupEventCtrl(BPWR_Clks[clkIdIndex].clkId, false); -+ if ((DSP_SUCCEEDED(status)) && (DSP_SUCCEEDED(status1))) { -+ (pDevContext->uDspPerClks) &= -+ (~((u32) (1 << clkIdIndex))); -+ } else { -+ DBG_Trace(DBG_LEVEL7, "DSPPeripheralClkCtrl : Failed " -+ "to disable clk\n"); -+ } -+ break; -+ case BPWR_EnableClock: -+ DBG_Trace(DBG_LEVEL3, -+ "DSPPeripheralClkCtrl : Enable CLK for \n"); -+ status1 = CLK_Enable(BPWR_Clks[clkIdIndex].intClk); -+ status = CLK_Enable(BPWR_Clks[clkIdIndex].funClk); -+ DSPClkWakeupEventCtrl(BPWR_Clks[clkIdIndex].clkId, true); -+ if ((DSP_SUCCEEDED(status)) && (DSP_SUCCEEDED(status1))) { -+ (pDevContext->uDspPerClks) |= (1 << clkIdIndex); -+ } else { -+ DBG_Trace(DBG_LEVEL7, -+ "DSPPeripheralClkCtrl:Failed to Enable clk\n"); -+ } -+ break; -+ default: -+ DBG_Trace(DBG_LEVEL3, -+ "DSPPeripheralClkCtrl : Unsupported CMD \n"); -+ /* unsupported cmd */ -+ /* TODO -- provide support for AUTOIDLE Enable/Disable -+ * commands */ -+ } -+ return status; -+} -+ -+/* -+ * ========PreScale_DSP======== -+ * Sends prescale notification to DSP -+ * -+ */ -+DSP_STATUS PreScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs) -+{ -+#ifdef CONFIG_BRIDGE_DVFS -+ u32 level; -+ u32 voltage_domain; -+ -+ voltage_domain = *((u32 *)pArgs); -+ level = *((u32 *)pArgs + 1); -+ -+ DBG_Trace(DBG_LEVEL7, "PreScale_DSP: voltage_domain = %x, level = " -+ "0x%x\n", voltage_domain, level); -+ if ((pDevContext->dwBrdState == BRD_HIBERNATION) || -+ (pDevContext->dwBrdState == BRD_RETENTION) || -+ (pDevContext->dwBrdState == BRD_DSP_HIBERNATION)) { -+ DBG_Trace(DBG_LEVEL7, "PreScale_DSP: IVA in sleep. " -+ "No notification to DSP\n"); -+ return DSP_SOK; -+ } else if ((pDevContext->dwBrdState == BRD_RUNNING)) { -+ /* Send a prenotificatio to DSP */ -+ DBG_Trace(DBG_LEVEL7, -+ "PreScale_DSP: Sent notification to DSP\n"); -+ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_SETPOINT_PRENOTIFY); -+ return DSP_SOK; -+ } else { -+ DBG_Trace(DBG_LEVEL7, "PreScale_DSP: Failed - DSP BRD" -+ " state in wrong state"); -+ return DSP_EFAIL; -+ } -+#endif /* #ifdef CONFIG_BRIDGE_DVFS */ -+ return DSP_SOK; -+} -+ -+/* -+ * ========PostScale_DSP======== -+ * Sends postscale notification to DSP -+ * -+ */ -+DSP_STATUS PostScale_DSP(struct WMD_DEV_CONTEXT *pDevContext, IN void *pArgs) -+{ -+#ifdef CONFIG_BRIDGE_DVFS -+ u32 level; -+ u32 voltage_domain; -+ struct IO_MGR *hIOMgr; -+ DSP_STATUS status = DSP_SOK; -+ -+ status = DEV_GetIOMgr(pDevContext->hDevObject, &hIOMgr); -+ -+ voltage_domain = *((u32 *)pArgs); -+ level = *((u32 *)pArgs + 1); -+ DBG_Trace(DBG_LEVEL7, -+ "PostScale_DSP: voltage_domain = %x, level = 0x%x\n", -+ voltage_domain, level); -+ if ((pDevContext->dwBrdState == BRD_HIBERNATION) || -+ (pDevContext->dwBrdState == BRD_RETENTION) || -+ (pDevContext->dwBrdState == BRD_DSP_HIBERNATION)) { -+ /* Update the OPP value in shared memory */ -+ IO_SHMsetting(hIOMgr, SHM_CURROPP, &level); -+ DBG_Trace(DBG_LEVEL7, -+ "PostScale_DSP: IVA in sleep. Wrote to shared " -+ "memory \n"); -+ return DSP_SOK; -+ } else if ((pDevContext->dwBrdState == BRD_RUNNING)) { -+ /* Update the OPP value in shared memory */ -+ IO_SHMsetting(hIOMgr, SHM_CURROPP, &level); -+ /* Send a post notification to DSP */ -+ CHNLSM_InterruptDSP2(pDevContext, MBX_PM_SETPOINT_POSTNOTIFY); -+ DBG_Trace(DBG_LEVEL7, -+ "PostScale_DSP: Wrote to shared memory Sent post" -+ " notification to DSP\n"); -+ return DSP_SOK; -+ } else { -+ DBG_Trace(DBG_LEVEL7, "PostScale_DSP: Failed - DSP BRD state " -+ "in wrong state"); -+ return DSP_EFAIL; -+ } -+#endif /* #ifdef CONFIG_BRIDGE_DVFS */ -+ return DSP_SOK; -+} -+ -+/* -+ * ========DSP_PeripheralClocks_Disable======== -+ * Disables all the peripheral clocks that were requested by DSP -+ */ -+DSP_STATUS DSP_PeripheralClocks_Disable(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs) -+{ -+ -+ u32 clkIdx; -+ DSP_STATUS status = DSP_SOK; -+ -+ for (clkIdx = 0; clkIdx < MBX_PM_MAX_RESOURCES; clkIdx++) { -+ if (((pDevContext->uDspPerClks) >> clkIdx) & 0x01) { -+ /* Disables the interface clock of the peripheral */ -+ status = CLK_Disable(BPWR_Clks[clkIdx].intClk); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, -+ "Failed to Enable the DSP Peripheral" -+ "Clk 0x%x \n", BPWR_Clks[clkIdx]); -+ } -+ /* Disables the functional clock of the periphearl */ -+ status = CLK_Disable(BPWR_Clks[clkIdx].funClk); -+ if (DSP_FAILED(status)) { -+ DBG_Trace(DBG_LEVEL7, -+ "Failed to Enable the DSP Peripheral" -+ "Clk 0x%x \n", BPWR_Clks[clkIdx]); -+ } -+ } -+ } -+ return status; -+} -+ -+/* -+ * ========DSP_PeripheralClocks_Enable======== -+ * Enables all the peripheral clocks that were requested by DSP -+ */ -+DSP_STATUS DSP_PeripheralClocks_Enable(struct WMD_DEV_CONTEXT *pDevContext, -+ IN void *pArgs) -+{ -+ u32 clkIdx; -+ DSP_STATUS int_clk_status = DSP_EFAIL, fun_clk_status = DSP_EFAIL; -+ -+ for (clkIdx = 0; clkIdx < MBX_PM_MAX_RESOURCES; clkIdx++) { -+ if (((pDevContext->uDspPerClks) >> clkIdx) & 0x01) { -+ /* Enable the interface clock of the peripheral */ -+ int_clk_status = CLK_Enable(BPWR_Clks[clkIdx].intClk); -+ /* Enable the functional clock of the periphearl */ -+ fun_clk_status = CLK_Enable(BPWR_Clks[clkIdx].funClk); -+ } -+ } -+ if ((int_clk_status | fun_clk_status) != DSP_SOK) -+ return DSP_EFAIL; -+ return DSP_SOK; -+} -+ -+void DSPClkWakeupEventCtrl(u32 ClkId, bool enable) -+{ -+ struct CFG_HOSTRES resources; -+ DSP_STATUS status = DSP_SOK; -+ u32 iva2_grpsel; -+ u32 mpu_grpsel; -+ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), &resources); -+ if (DSP_FAILED(status)) -+ return; -+ -+ switch (ClkId) { -+ case BPWR_GPTimer5: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_GPT5; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_GPT5; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_GPTimer6: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_GPT6; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_GPT6; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_GPTimer7: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_GPT7; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_GPT7; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_GPTimer8: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_GPT8; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_GPT8; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_MCBSP1: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwCorePmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwCorePmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_MCBSP2: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_MCBSP3: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_MCBSP4: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwPerPmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwPerPmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ case BPWR_MCBSP5: -+ iva2_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwCorePmBase) + 0xA8)); -+ mpu_grpsel = (u32) *((REG_UWORD32 *) -+ ((u32) (resources.dwCorePmBase) + 0xA4)); -+ if (enable) { -+ iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5; -+ mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5; -+ } else { -+ mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5; -+ iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5; -+ } -+ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA8)) -+ = iva2_grpsel; -+ *((REG_UWORD32 *) ((u32) (resources.dwCorePmBase) + 0xA4)) -+ = mpu_grpsel; -+ break; -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/ue_deh.c linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/ue_deh.c ---- linux-omap-2.6.28-omap1/drivers/dsp/bridge/wmd/ue_deh.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/bridge/wmd/ue_deh.c 2011-06-22 13:19:32.553063280 +0200 -@@ -0,0 +1,371 @@ -+/* -+ * ue_deh.c -+ * -+ * DSP-BIOS Bridge driver support functions for TI OMAP processors. -+ * -+ * Copyright (C) 2005-2006 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+ -+/* -+ * ======== ue_deh.c ======== -+ * Description: -+ * Implements upper edge DSP exception handling (DEH) functions. -+ * -+ *! Revision History: -+ *! ================ -+ *! 03-Jan-2005 hn: Support for IVA DEH. -+ *! 05-Jan-2004 vp: Updated for the 24xx HW library. -+ *! 19-Feb-2003 vp: Code review updates. -+ *! - Cosmetic changes. -+ *! 18-Oct-2002 sb: Ported to Linux platform. -+ *! 10-Dec-2001 kc: Updated DSP error reporting in DEBUG mode. -+ *! 10-Sep-2001 kc: created. -+ */ -+ -+/* ----------------------------------- Host OS */ -+#include -+ -+/* ----------------------------------- DSP/BIOS Bridge */ -+#include -+#include -+#include -+ -+/* ----------------------------------- Trace & Debug */ -+#include -+#include -+ -+/* ----------------------------------- OS Adaptation Layer */ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* ----------------------------------- Link Driver */ -+#include -+ -+/* ----------------------------------- Platform Manager */ -+#include -+#include -+ -+/* ------------------------------------ Hardware Abstraction Layer */ -+#include -+#include -+ -+/* ----------------------------------- This */ -+#include "mmu_fault.h" -+#include "_tiomap.h" -+#include "_deh.h" -+#include "_tiomap_mmu.h" -+#include "_tiomap_pwr.h" -+#include -+ -+static struct HW_MMUMapAttrs_t mapAttrs = { HW_LITTLE_ENDIAN, -+ HW_ELEM_SIZE_16BIT, -+ HW_MMU_CPUES} ; -+#define VirtToPhys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) -+ -+static u32 dummyVaAddr; -+/* -+ * ======== WMD_DEH_Create ======== -+ * Creates DEH manager object. -+ */ -+DSP_STATUS WMD_DEH_Create(OUT struct DEH_MGR **phDehMgr, -+ struct DEV_OBJECT *hDevObject) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEH_MGR *pDehMgr = NULL; -+ struct CFG_HOSTRES cfgHostRes; -+ struct CFG_DEVNODE *hDevNode; -+ struct WMD_DEV_CONTEXT *hWmdContext = NULL; -+ -+ DBG_Trace(DBG_LEVEL1, "Entering DEH_Create: 0x%x\n", phDehMgr); -+ /* Message manager will be created when a file is loaded, since -+ * size of message buffer in shared memory is configurable in -+ * the base image. */ -+ /* Get WMD context info. */ -+ DEV_GetWMDContext(hDevObject, &hWmdContext); -+ DBC_Assert(hWmdContext); -+ dummyVaAddr = 0; -+ /* Allocate IO manager object: */ -+ MEM_AllocObject(pDehMgr, struct DEH_MGR, SIGNATURE); -+ if (pDehMgr == NULL) { -+ status = DSP_EMEMORY; -+ } else { -+ /* Create an NTFY object to manage notifications */ -+ if (DSP_SUCCEEDED(status)) -+ status = NTFY_Create(&pDehMgr->hNtfy); -+ -+ /* Create a DPC object. */ -+ status = DPC_Create(&pDehMgr->hMmuFaultDpc, MMU_FaultDpc, -+ (void *)pDehMgr); -+ if (DSP_SUCCEEDED(status)) -+ status = DEV_GetDevNode(hDevObject, &hDevNode); -+ -+ if (DSP_SUCCEEDED(status)) -+ status = CFG_GetHostResources(hDevNode, &cfgHostRes); -+ -+ if (DSP_SUCCEEDED(status)) { -+ /* Fill in context structure */ -+ pDehMgr->hWmdContext = hWmdContext; -+ pDehMgr->errInfo.dwErrMask = 0L; -+ pDehMgr->errInfo.dwVal1 = 0L; -+ pDehMgr->errInfo.dwVal2 = 0L; -+ pDehMgr->errInfo.dwVal3 = 0L; -+ /* Install ISR function for DSP MMU fault */ -+ if ((request_irq(INT_DSP_MMU_IRQ, MMU_FaultIsr, 0, -+ "DspBridge\tiommu fault", (void *)pDehMgr)) == 0) -+ status = DSP_SOK; -+ else -+ status = DSP_EFAIL; -+ } -+ } -+ if (DSP_FAILED(status)) { -+ /* If create failed, cleanup */ -+ WMD_DEH_Destroy((struct DEH_MGR *)pDehMgr); -+ *phDehMgr = NULL; -+ } else { -+ *phDehMgr = (struct DEH_MGR *)pDehMgr; -+ DBG_Trace(DBG_LEVEL1, "ISR_IRQ Object 0x%x \n", -+ pDehMgr); -+ } -+ DBG_Trace(DBG_LEVEL1, "Exiting DEH_Create.\n"); -+ return status; -+} -+ -+/* -+ * ======== WMD_DEH_Destroy ======== -+ * Destroys DEH manager object. -+ */ -+DSP_STATUS WMD_DEH_Destroy(struct DEH_MGR *hDehMgr) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; -+ -+ DBG_Trace(DBG_LEVEL1, "Entering DEH_Destroy: 0x%x\n", pDehMgr); -+ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { -+ /* Release dummy VA buffer */ -+ WMD_DEH_ReleaseDummyMem(); -+ /* If notification object exists, delete it */ -+ if (pDehMgr->hNtfy) -+ (void)NTFY_Delete(pDehMgr->hNtfy); -+ /* Disable DSP MMU fault */ -+ free_irq(INT_DSP_MMU_IRQ, pDehMgr); -+ (void)DPC_Destroy(pDehMgr->hMmuFaultDpc); -+ /* Deallocate the DEH manager object */ -+ MEM_FreeObject(pDehMgr); -+ } -+ DBG_Trace(DBG_LEVEL1, "Exiting DEH_Destroy.\n"); -+ return status; -+} -+ -+/* -+ * ======== WMD_DEH_RegisterNotify ======== -+ * Registers for DEH notifications. -+ */ -+DSP_STATUS WMD_DEH_RegisterNotify(struct DEH_MGR *hDehMgr, u32 uEventMask, -+ u32 uNotifyType, -+ struct DSP_NOTIFICATION *hNotification) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; -+ -+ DBG_Trace(DBG_LEVEL1, "Entering WMD_DEH_RegisterNotify: 0x%x\n", -+ pDehMgr); -+ -+ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { -+ status = NTFY_Register(pDehMgr->hNtfy, hNotification, -+ uEventMask, uNotifyType); -+ } -+ DBG_Trace(DBG_LEVEL1, "Exiting WMD_DEH_RegisterNotify.\n"); -+ return status; -+} -+ -+ -+/* -+ * ======== WMD_DEH_Notify ======== -+ * DEH error notification function. Informs user about the error. -+ */ -+void WMD_DEH_Notify(struct DEH_MGR *hDehMgr, u32 ulEventMask, -+ u32 dwErrInfo) -+{ -+ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; -+ struct WMD_DEV_CONTEXT *pDevContext; -+ DSP_STATUS status = DSP_SOK; -+ DSP_STATUS status1 = DSP_EFAIL; -+ u32 memPhysical = 0; -+ u32 HW_MMU_MAX_TLB_COUNT = 31; -+ extern u32 faultAddr; -+ struct CFG_HOSTRES resources; -+ HW_STATUS hwStatus; -+ -+ status = CFG_GetHostResources( -+ (struct CFG_DEVNODE *)DRV_GetFirstDevExtension(), -+ &resources); -+ if (DSP_FAILED(status)) -+ DBG_Trace(DBG_LEVEL7, -+ "**Failed to get Host Resources in MMU ISR **\n"); -+ -+ DBG_Trace(DBG_LEVEL1, "Entering WMD_DEH_Notify: 0x%x, 0x%x\n", pDehMgr, -+ ulEventMask); -+ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { -+ printk(KERN_INFO "WMD_DEH_Notify: ********** DEVICE EXCEPTION " -+ "**********\n"); -+ pDevContext = (struct WMD_DEV_CONTEXT *)pDehMgr->hWmdContext; -+ -+ switch (ulEventMask) { -+ case DSP_SYSERROR: -+ /* reset errInfo structure before use */ -+ pDehMgr->errInfo.dwErrMask = DSP_SYSERROR; -+ pDehMgr->errInfo.dwVal1 = 0L; -+ pDehMgr->errInfo.dwVal2 = 0L; -+ pDehMgr->errInfo.dwVal3 = 0L; -+ pDehMgr->errInfo.dwVal1 = dwErrInfo; -+ printk(KERN_ERR "WMD_DEH_Notify: DSP_SYSERROR, errInfo " -+ "= 0x%x\n", dwErrInfo); -+ break; -+ case DSP_MMUFAULT: -+ /* MMU fault routine should have set err info -+ * structure */ -+ pDehMgr->errInfo.dwErrMask = DSP_MMUFAULT; -+ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT," -+ "errInfo = 0x%x\n", dwErrInfo); -+ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, High " -+ "Address = 0x%x\n", -+ (unsigned int)pDehMgr->errInfo.dwVal1); -+ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, Low " -+ "Address = 0x%x\n", -+ (unsigned int)pDehMgr->errInfo.dwVal2); -+ printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, fault " -+ "address = 0x%x\n", (unsigned int)faultAddr); -+ dummyVaAddr = (u32)MEM_Calloc(sizeof(char) * 0x1000, -+ MEM_PAGED); -+ memPhysical = VirtToPhys(PG_ALIGN_LOW((u32)dummyVaAddr, -+ PG_SIZE_4K)); -+DBG_Trace(DBG_LEVEL6, "WMD_DEH_Notify: DSP_MMUFAULT, " -+ "mem Physical= 0x%x\n", memPhysical); -+ pDevContext = (struct WMD_DEV_CONTEXT *) -+ pDehMgr->hWmdContext; -+ /* Reset the dynamic mmu index to fixed count if it -+ * exceeds 31. So that the dynmmuindex is always -+ * between the range of standard/fixed entries -+ * and 31. */ -+ if (pDevContext->numTLBEntries > -+ HW_MMU_MAX_TLB_COUNT) { -+ pDevContext->numTLBEntries = pDevContext-> -+ fixedTLBEntries; -+ } -+ DBG_Trace(DBG_LEVEL6, "Adding TLB Entry %d: VA: 0x%x, " -+ "PA: 0x%x\n", pDevContext-> -+ numTLBEntries, faultAddr, memPhysical); -+ if (DSP_SUCCEEDED(status)) { -+ hwStatus = HW_MMU_TLBAdd(resources.dwDmmuBase, -+ memPhysical, faultAddr, -+ HW_PAGE_SIZE_4KB, 1, &mapAttrs, -+ HW_SET, HW_SET); -+ } -+ /* send an interrupt to DSP */ -+ HW_MBOX_MsgWrite(resources.dwMboxBase, MBOX_ARM2DSP, -+ MBX_DEH_CLASS | MBX_DEH_EMMU); -+ /* Clear MMU interrupt */ -+ HW_MMU_EventAck(resources.dwDmmuBase, -+ HW_MMU_TRANSLATION_FAULT); -+ break; -+ case DSP_PWRERROR: -+ /* reset errInfo structure before use */ -+ pDehMgr->errInfo.dwErrMask = DSP_PWRERROR; -+ pDehMgr->errInfo.dwVal1 = 0L; -+ pDehMgr->errInfo.dwVal2 = 0L; -+ pDehMgr->errInfo.dwVal3 = 0L; -+ pDehMgr->errInfo.dwVal1 = dwErrInfo; -+ printk(KERN_ERR "WMD_DEH_Notify: DSP_PWRERROR, errInfo " -+ "= 0x%x\n", dwErrInfo); -+ break; -+ default: -+ DBG_Trace(DBG_LEVEL6, -+ "WMD_DEH_Notify: Unknown Error, errInfo = " -+ "0x%x\n", dwErrInfo); -+ break; -+ } -+ -+ /* Filter subsequent notifications when an error occurs */ -+ if (pDevContext->dwBrdState != BRD_ERROR) { -+ /* Use it as a flag to send notifications the -+ * first time and error occurred, next time -+ * state will be BRD_ERROR */ -+ status1 = DSP_EFAIL; -+ } -+ -+ /* Filter subsequent notifications when an error occurs */ -+ if (pDevContext->dwBrdState != BRD_ERROR) -+ status1 = DSP_SOK; -+ -+ /* Set the Board state as ERROR */ -+ pDevContext->dwBrdState = BRD_ERROR; -+ /* Disable all the clocks that were enabled by DSP */ -+ (void)DSP_PeripheralClocks_Disable(pDevContext, NULL); -+ /* Call DSP Trace Buffer */ -+ PrintDspTraceBuffer(hDehMgr->hWmdContext); -+ -+ if (DSP_SUCCEEDED(status1)) { -+ /* Signal DSP error/exception event. */ -+ NTFY_Notify(pDehMgr->hNtfy, ulEventMask); -+ } -+ -+ } -+ DBG_Trace(DBG_LEVEL1, "Exiting WMD_DEH_Notify\n"); -+ -+} -+ -+/* -+ * ======== WMD_DEH_GetInfo ======== -+ * Retrieves error information. -+ */ -+DSP_STATUS WMD_DEH_GetInfo(struct DEH_MGR *hDehMgr, -+ struct DSP_ERRORINFO *pErrInfo) -+{ -+ DSP_STATUS status = DSP_SOK; -+ struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; -+ -+ DBC_Require(pDehMgr); -+ DBC_Require(pErrInfo); -+ -+ DBG_Trace(DBG_LEVEL1, "Entering WMD_DEH_GetInfo: 0x%x\n", hDehMgr); -+ -+ if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { -+ /* Copy DEH error info structure to PROC error info -+ * structure. */ -+ pErrInfo->dwErrMask = pDehMgr->errInfo.dwErrMask; -+ pErrInfo->dwVal1 = pDehMgr->errInfo.dwVal1; -+ pErrInfo->dwVal2 = pDehMgr->errInfo.dwVal2; -+ pErrInfo->dwVal3 = pDehMgr->errInfo.dwVal3; -+ } -+ -+ DBG_Trace(DBG_LEVEL1, "Exiting WMD_DEH_GetInfo\n"); -+ -+ return status; -+} -+ -+ -+/* -+ * ======== WMD_DEH_ReleaseDummyMem ======== -+ * Releases memory allocated for dummy page -+ */ -+void WMD_DEH_ReleaseDummyMem(void) -+{ -+ if (dummyVaAddr) { -+ MEM_Free((void *)dummyVaAddr); -+ dummyVaAddr = 0; -+ } -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/dsp/dspgateway/dsp_mem.c linux-omap-2.6.28-nokia1/drivers/dsp/dspgateway/dsp_mem.c ---- linux-omap-2.6.28-omap1/drivers/dsp/dspgateway/dsp_mem.c 2011-06-22 13:14:17.533067757 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/dsp/dspgateway/dsp_mem.c 2011-06-22 13:19:32.553063280 +0200 -@@ -32,13 +32,13 @@ - #include - #include - #include -+#include - #include - #include - #include - #include - #include - #include --#include - #include - #include - #include -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpio/gpiolib.c linux-omap-2.6.28-nokia1/drivers/gpio/gpiolib.c ---- linux-omap-2.6.28-omap1/drivers/gpio/gpiolib.c 2011-06-22 13:14:17.573067756 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/gpio/gpiolib.c 2011-06-22 13:19:32.583063278 +0200 -@@ -789,6 +789,7 @@ int gpio_request(unsigned gpio, const ch - } else { - status = -EBUSY; - module_put(chip->owner); -+ goto done; - } - - if (chip->request) { -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/Kconfig linux-omap-2.6.28-nokia1/drivers/gpu/Kconfig ---- linux-omap-2.6.28-omap1/drivers/gpu/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/Kconfig 2011-06-22 13:19:32.583063278 +0200 -@@ -0,0 +1,3 @@ -+source drivers/gpu/pvr/Kconfig -+source drivers/gpu/drm/Kconfig -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/Makefile linux-omap-2.6.28-nokia1/drivers/gpu/Makefile ---- linux-omap-2.6.28-omap1/drivers/gpu/Makefile 2011-06-22 13:14:17.573067756 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/Makefile 2011-06-22 13:19:32.583063278 +0200 -@@ -1 +1,3 @@ --obj-y += drm/ -+obj-$(CONFIG_PVR) += pvr/ -+obj-$(CONFIG_DRM) += drm/ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/bridged_pvr_bridge.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bridged_pvr_bridge.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/bridged_pvr_bridge.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bridged_pvr_bridge.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,4793 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+#include "services.h" -+#include "pvr_bridge_km.h" -+#include "pvr_debug.h" -+#include "ra.h" -+#include "pvr_bridge.h" -+#include "sgx_bridge.h" -+#include "perproc.h" -+#include "sgx_bridge_km.h" -+#include "pdump_km.h" -+#include "sgxutils.h" -+#include "mmu.h" -+ -+#include "bridged_pvr_bridge.h" -+#include "env_data.h" -+ -+#include "mmap.h" -+ -+#include /* for cache flush */ -+ -+ -+#if defined(DEBUG) -+#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y)) -+#else -+#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X) -+#endif -+ -+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY -+ g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; -+ -+#if defined(DEBUG_BRIDGE_KM) -+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; -+#endif -+ -+static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS]; -+ -+#if defined(DEBUG_BRIDGE_KM) -+static PVRSRV_ERROR -+CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA * pProcData, -+ IMG_UINT32 ui32BridgeID, -+ IMG_VOID * pvDest, IMG_VOID * pvSrc, IMG_UINT32 ui32Size) -+{ -+ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes += -+ ui32Size; -+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes += ui32Size; -+ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size); -+} -+ -+static PVRSRV_ERROR -+CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA * pProcData, -+ IMG_UINT32 ui32BridgeID, -+ IMG_VOID * pvDest, IMG_VOID * pvSrc, IMG_UINT32 ui32Size) -+{ -+ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes += -+ ui32Size; -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes += ui32Size; -+ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size); -+} -+#else -+#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \ -+ OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size) -+#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \ -+ OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size) -+#endif -+ -+#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \ -+ do \ -+ { \ -+ (error) = (src); \ -+ if ((error) != PVRSRV_OK) \ -+ { \ -+ return (res); \ -+ } \ -+ } while (error != PVRSRV_OK) -+ -+#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \ -+ ASSIGN_AND_RETURN_ON_ERROR(error, src, 0) -+ -+static INLINE PVRSRV_ERROR -+NewHandleBatch(PVRSRV_PER_PROCESS_DATA * psPerProc, IMG_UINT32 ui32BatchSize) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(!psPerProc->bHandlesBatched); -+ -+ eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize); -+ -+ if (eError == PVRSRV_OK) { -+ psPerProc->bHandlesBatched = IMG_TRUE; -+ } -+ -+ return eError; -+} -+ -+#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \ -+ ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize)) -+ -+static INLINE PVRSRV_ERROR -+CommitHandleBatch(PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVR_ASSERT(psPerProc->bHandlesBatched); -+ -+ psPerProc->bHandlesBatched = IMG_FALSE; -+ -+ return PVRSRVCommitHandleBatch(psPerProc->psHandleBase); -+} -+ -+#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \ -+ ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc)) -+ -+static INLINE IMG_VOID ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ if (psPerProc->bHandlesBatched) { -+ psPerProc->bHandlesBatched = IMG_FALSE; -+ -+ PVRSRVReleaseHandleBatch(psPerProc->psHandleBase); -+ } -+} -+ -+static int -+PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID, -+ IMG_VOID * psBridgeIn, -+ PVRSRV_BRIDGE_OUT_ENUMDEVICE * psEnumDeviceOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES); -+ -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ -+ psEnumDeviceOUT->eError = -+ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices, -+ psEnumDeviceOUT->asDeviceIdentifier); -+ -+ return 0; -+} -+ -+static int -+PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO * -+ psAcquireDevInfoIN, -+ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO * -+ psAcquireDevInfoOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO); -+ -+ psAcquireDevInfoOUT->eError = -+ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex, -+ psAcquireDevInfoIN->eDeviceType, -+ &hDevCookieInt); -+ if (psAcquireDevInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psAcquireDevInfoOUT->eError = -+ PVRSRVAllocHandle(psPerProc->psHandleBase, -+ &psAcquireDevInfoOUT->hDevCookie, -+ hDevCookieInt, -+ PVRSRV_HANDLE_TYPE_DEV_NODE, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED); -+ -+ return 0; -+} -+ -+static int -+SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT * -+ psSGXInfoForSrvinitIN, -+ PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT * -+ psSGXInfoForSrvinitOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_UINT32 i; -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, -+ PVRSRV_MAX_CLIENT_HEAPS); -+ -+ if (!psPerProc->bInitProcess) { -+ psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ psSGXInfoForSrvinitOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psSGXInfoForSrvinitIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psSGXInfoForSrvinitOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psSGXInfoForSrvinitOUT->eError = -+ SGXGetInfoForSrvinitKM(hDevCookieInt, -+ &psSGXInfoForSrvinitOUT->sInitInfo); -+ -+ if (psSGXInfoForSrvinitOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) { -+ PVRSRV_HEAP_INFO *psHeapInfo; -+ -+ psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i]; -+ -+ if (psHeapInfo->ui32HeapID != -+ (IMG_UINT32) SGX_UNDEFINED_HEAP_ID) { -+ IMG_HANDLE hDevMemHeapExt; -+ -+ if (psHeapInfo->hDevMemHeap != IMG_NULL) { -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &hDevMemHeapExt, -+ psHeapInfo->hDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED); -+ psHeapInfo->hDevMemHeap = hDevMemHeapExt; -+ } -+ } -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT * -+ psCreateDevMemContextIN, -+ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT * -+ psCreateDevMemContextOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hDevMemContextInt; -+ IMG_UINT32 i; -+ IMG_BOOL bCreated; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, -+ PVRSRV_MAX_CLIENT_HEAPS + 1); -+ -+ psCreateDevMemContextOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psCreateDevMemContextIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psCreateDevMemContextOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psCreateDevMemContextOUT->eError = -+ PVRSRVCreateDeviceMemContextKM(hDevCookieInt, -+ psPerProc, -+ &hDevMemContextInt, -+ &psCreateDevMemContextOUT-> -+ ui32ClientHeapCount, -+ &psCreateDevMemContextOUT-> -+ sHeapInfo[0], &bCreated -+ , abSharedDeviceMemHeap -+ ); -+ -+ if (psCreateDevMemContextOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (bCreated) { -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psCreateDevMemContextOUT->hDevMemContext, -+ hDevMemContextInt, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ } else { -+ psCreateDevMemContextOUT->eError = -+ PVRSRVFindHandle(psPerProc->psHandleBase, -+ &psCreateDevMemContextOUT->hDevMemContext, -+ hDevMemContextInt, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); -+ if (psCreateDevMemContextOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ for (i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++) { -+ IMG_HANDLE hDevMemHeapExt; -+ -+ if (abSharedDeviceMemHeap[i]) -+ { -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &hDevMemHeapExt, -+ psCreateDevMemContextOUT-> -+ sHeapInfo[i].hDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED); -+ } -+ else { -+ -+ if (bCreated) { -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &hDevMemHeapExt, -+ psCreateDevMemContextOUT-> -+ sHeapInfo[i].hDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ psCreateDevMemContextOUT-> -+ hDevMemContext); -+ } else { -+ psCreateDevMemContextOUT->eError = -+ PVRSRVFindHandle(psPerProc->psHandleBase, -+ &hDevMemHeapExt, -+ psCreateDevMemContextOUT-> -+ sHeapInfo[i].hDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); -+ if (psCreateDevMemContextOUT->eError != -+ PVRSRV_OK) { -+ return 0; -+ } -+ } -+ } -+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = -+ hDevMemHeapExt; -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT * -+ psDestroyDevMemContextIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hDevMemContextInt; -+ IMG_BOOL bDestroyed; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psDestroyDevMemContextIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, -+ psDestroyDevMemContextIN->hDevMemContext, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, -+ &bDestroyed); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (bDestroyed) { -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psDestroyDevMemContextIN-> -+ hDevMemContext, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); -+ } -+ -+ return 0; -+} -+ -+static int -+PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO * -+ psGetDevMemHeapInfoIN, -+ PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO * -+ psGetDevMemHeapInfoOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hDevMemContextInt; -+ IMG_UINT32 i; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, -+ PVRSRV_MAX_CLIENT_HEAPS); -+ -+ psGetDevMemHeapInfoOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psGetDevMemHeapInfoIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetDevMemHeapInfoOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, -+ psGetDevMemHeapInfoIN->hDevMemContext, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); -+ -+ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetDevMemHeapInfoOUT->eError = -+ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt, -+ hDevMemContextInt, -+ &psGetDevMemHeapInfoOUT-> -+ ui32ClientHeapCount, -+ &psGetDevMemHeapInfoOUT->sHeapInfo[0] -+ , abSharedDeviceMemHeap -+ ); -+ -+ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ for (i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++) { -+ IMG_HANDLE hDevMemHeapExt; -+ -+ if (abSharedDeviceMemHeap[i]) -+ { -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &hDevMemHeapExt, -+ psGetDevMemHeapInfoOUT-> -+ sHeapInfo[i].hDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED); -+ } -+ else { -+ -+ psGetDevMemHeapInfoOUT->eError = -+ PVRSRVFindHandle(psPerProc->psHandleBase, -+ &hDevMemHeapExt, -+ psGetDevMemHeapInfoOUT-> -+ sHeapInfo[i].hDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); -+ if (psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = -+ hDevMemHeapExt; -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM * psAllocDeviceMemIN, -+ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM * psAllocDeviceMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo; -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hDevMemHeapInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2); -+ -+ psAllocDeviceMemOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psAllocDeviceMemIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psAllocDeviceMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psAllocDeviceMemOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt, -+ psAllocDeviceMemIN->hDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); -+ -+ if (psAllocDeviceMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psAllocDeviceMemOUT->eError = -+ PVRSRVAllocDeviceMemKM(hDevCookieInt, -+ psPerProc, -+ hDevMemHeapInt, -+ psAllocDeviceMemIN->ui32Attribs, -+ psAllocDeviceMemIN->ui32Size, -+ psAllocDeviceMemIN->ui32Alignment, -+ &psMemInfo); -+ -+ if (psAllocDeviceMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo, -+ 0, sizeof(psAllocDeviceMemOUT->sClientMemInfo)); -+ -+ if (psMemInfo->pvLinAddrKM) { -+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM = -+ psMemInfo->pvLinAddrKM; -+ } else { -+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM = -+ psMemInfo->sMemBlk.hOSMemHandle; -+ } -+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0; -+ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; -+ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; -+ psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = -+ psMemInfo->ui32AllocSize; -+ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = -+ psMemInfo->sMemBlk.hOSMemHandle; -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo, -+ psMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ if (psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) { -+ -+ OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo, -+ 0, sizeof(PVRSRV_CLIENT_SYNC_INFO)); -+ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL; -+ psAllocDeviceMemOUT->psKernelSyncInfo = IMG_NULL; -+ } else { -+ -+ psAllocDeviceMemOUT->psKernelSyncInfo = -+ psMemInfo->psKernelSyncInfo; -+ -+ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData = -+ psMemInfo->psKernelSyncInfo->psSyncData; -+ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = -+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; -+ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = -+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = -+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk. -+ hOSMemHandle; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psAllocDeviceMemOUT->sClientSyncInfo. -+ hKernelSyncInfo, -+ psMemInfo->psKernelSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ psAllocDeviceMemOUT->sClientMemInfo. -+ hKernelMemInfo); -+ -+ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = -+ &psAllocDeviceMemOUT->sClientSyncInfo; -+ -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+ -+static int -+PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_FREEDEVICEMEM * psFreeDeviceMemIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_VOID *pvKernelMemInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psFreeDeviceMemIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo, -+ psFreeDeviceMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psFreeDeviceMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ -+ return 0; -+} -+ -+static int -+PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY * psMapDevMemIN, -+ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY * psMapDevMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL; -+ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL; -+ IMG_HANDLE hDstDevMemHeap = IMG_NULL; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEV_MEMORY); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2); -+ -+ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ (IMG_VOID **) & -+ psSrcKernelMemInfo, -+ psMapDevMemIN-> -+ psSrcKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psMapDevMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDstDevMemHeap, -+ psMapDevMemIN-> -+ hDstDevMemHeap, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); -+ if (psMapDevMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc, -+ psSrcKernelMemInfo, -+ hDstDevMemHeap, -+ &psDstKernelMemInfo); -+ if (psMapDevMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ OSMemSet(&psMapDevMemOUT->sDstClientMemInfo, -+ 0, sizeof(psMapDevMemOUT->sDstClientMemInfo)); -+ OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo, -+ 0, sizeof(psMapDevMemOUT->sDstClientSyncInfo)); -+ -+ if (psDstKernelMemInfo->pvLinAddrKM) { -+ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM = -+ psDstKernelMemInfo->pvLinAddrKM; -+ } else { -+ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM = -+ psDstKernelMemInfo->sMemBlk.hOSMemHandle; -+ } -+ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0; -+ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = -+ psDstKernelMemInfo->sDevVAddr; -+ psMapDevMemOUT->sDstClientMemInfo.ui32Flags = -+ psDstKernelMemInfo->ui32Flags; -+ psMapDevMemOUT->sDstClientMemInfo.ui32AllocSize = -+ psDstKernelMemInfo->ui32AllocSize; -+ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = -+ psDstKernelMemInfo->sMemBlk.hOSMemHandle; -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo, -+ psDstKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL; -+ psMapDevMemOUT->psDstKernelSyncInfo = IMG_NULL; -+ -+ if (psDstKernelMemInfo->psKernelSyncInfo) { -+ psMapDevMemOUT->psDstKernelSyncInfo = -+ psDstKernelMemInfo->psKernelSyncInfo; -+ -+ psMapDevMemOUT->sDstClientSyncInfo.psSyncData = -+ psDstKernelMemInfo->psKernelSyncInfo->psSyncData; -+ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr = -+ psDstKernelMemInfo->psKernelSyncInfo-> -+ sWriteOpsCompleteDevVAddr; -+ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr = -+ psDstKernelMemInfo->psKernelSyncInfo-> -+ sReadOpsCompleteDevVAddr; -+ -+ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo = -+ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM-> -+ sMemBlk.hOSMemHandle; -+ -+ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = -+ &psMapDevMemOUT->sDstClientSyncInfo; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psMapDevMemOUT->sDstClientSyncInfo. -+ hKernelSyncInfo, -+ psDstKernelMemInfo->psKernelSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psMapDevMemOUT->sDstClientMemInfo. -+ hKernelMemInfo); -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY * psUnmapDevMemIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY); -+ -+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ (IMG_VOID **) & psKernelMemInfo, -+ psUnmapDevMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psUnmapDevMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ -+ return 0; -+} -+ -+static int -+FlushCacheDRI(IMG_UINT32 ui32Type, IMG_VOID *pvVirt, IMG_UINT32 ui32Length) -+{ -+ switch (ui32Type) { -+ case DRM_PVR2D_CFLUSH_FROM_GPU: -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n", -+ pvVirt, ui32Length)); -+#ifdef CONFIG_ARM -+ dmac_inv_range((const void *)pvVirt, -+ (const void *)(pvVirt + ui32Length)); -+#endif -+ return 0; -+ case DRM_PVR2D_CFLUSH_TO_GPU: -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n", -+ pvVirt, ui32Length)); -+#ifdef CONFIG_ARM -+ dmac_clean_range((const void *)pvVirt, -+ (const void *)(pvVirt + ui32Length)); -+#endif -+ return 0; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "Invalid cflush type 0x%x\n", -+ ui32Type)); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+PVRSRV_ERROR -+PVRSRVIsWrappedExtMemoryBW(PVRSRV_PER_PROCESS_DATA *psPerProc, -+ PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER *psCacheFlushIN) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hDevCookieInt; -+ -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psCacheFlushIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ eError = PVRSRVIsWrappedExtMemoryKM( -+ hDevCookieInt, -+ psPerProc, -+ &(psCacheFlushIN->ui32Length), -+ &(psCacheFlushIN->pvVirt)); -+ -+ return eError; -+} -+ -+static int -+PVRSRVCacheFlushDRIBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER * psCacheFlushIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CACHE_FLUSH_DRM); -+ -+ down_read(¤t->mm->mmap_sem); -+ -+ eError = PVRSRVIsWrappedExtMemoryBW(psPerProc, psCacheFlushIN); -+ -+ if (eError == PVRSRV_OK) { -+ psRetOUT->eError = FlushCacheDRI(psCacheFlushIN->ui32Type, -+ psCacheFlushIN->pvVirt, -+ psCacheFlushIN->ui32Length); -+ } else { -+ printk(KERN_WARNING -+ ": PVRSRVCacheFlushDRIBW: Start address 0x%08x and length 0x%08x not wrapped \n", -+ (unsigned int)(psCacheFlushIN->pvVirt), -+ (unsigned int)(psCacheFlushIN->ui32Length)); -+ } -+ -+ up_read(¤t->mm->mmap_sem); -+ return 0; -+} -+ -+static int -+PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY * -+ psMapDevClassMemIN, -+ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY * -+ psMapDevClassMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo; -+ IMG_HANDLE hOSMapInfo; -+ IMG_HANDLE hDeviceClassBufferInt; -+ PVRSRV_HANDLE_TYPE eHandleType; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2); -+ -+ psMapDevClassMemOUT->eError = -+ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, -+ &hDeviceClassBufferInt, &eHandleType, -+ psMapDevClassMemIN->hDeviceClassBuffer); -+ -+ if (psMapDevClassMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ switch (eHandleType) { -+ case PVRSRV_HANDLE_TYPE_DISP_BUFFER: -+ case PVRSRV_HANDLE_TYPE_BUF_BUFFER: -+ break; -+ default: -+ psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ psMapDevClassMemOUT->eError = -+ PVRSRVMapDeviceClassMemoryKM(psPerProc, -+ hDeviceClassBufferInt, -+ &psMemInfo, &hOSMapInfo); -+ -+ if (psMapDevClassMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ OSMemSet(&psMapDevClassMemOUT->sClientMemInfo, -+ 0, sizeof(psMapDevClassMemOUT->sClientMemInfo)); -+ OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo, -+ 0, sizeof(psMapDevClassMemOUT->sClientSyncInfo)); -+ -+ if (psMemInfo->pvLinAddrKM) { -+ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM = -+ psMemInfo->pvLinAddrKM; -+ } else { -+ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM = -+ psMemInfo->sMemBlk.hOSMemHandle; -+ } -+ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0; -+ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; -+ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; -+ psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize = -+ psMemInfo->ui32AllocSize; -+ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = -+ psMemInfo->sMemBlk.hOSMemHandle; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psMapDevClassMemOUT->sClientMemInfo. -+ hKernelMemInfo, psMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ psMapDevClassMemIN->hDeviceClassBuffer); -+ -+ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL; -+ psMapDevClassMemOUT->psKernelSyncInfo = IMG_NULL; -+ -+ if (psMemInfo->psKernelSyncInfo) { -+ psMapDevClassMemOUT->psKernelSyncInfo = -+ psMemInfo->psKernelSyncInfo; -+ -+ psMapDevClassMemOUT->sClientSyncInfo.psSyncData = -+ psMemInfo->psKernelSyncInfo->psSyncData; -+ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = -+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; -+ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = -+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = -+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk. -+ hOSMemHandle; -+ -+ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = -+ &psMapDevClassMemOUT->sClientSyncInfo; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psMapDevClassMemOUT->sClientSyncInfo. -+ hKernelSyncInfo, -+ psMemInfo->psKernelSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psMapDevClassMemOUT->sClientMemInfo. -+ hKernelMemInfo); -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY * -+ psUnmapDevClassMemIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvKernelMemInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo, -+ psUnmapDevClassMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psUnmapDevClassMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ -+ return 0; -+} -+ -+static int -+PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY * psWrapExtMemIN, -+ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY * psWrapExtMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo; -+ IMG_UINT32 ui32PageTableSize = 0; -+ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2); -+ -+ psWrapExtMemOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psWrapExtMemIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psWrapExtMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (psWrapExtMemIN->ui32NumPageTableEntries) { -+ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries -+ * sizeof(IMG_SYS_PHYADDR); -+ -+ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError, -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32PageTableSize, -+ (IMG_VOID **) & psSysPAddr, -+ 0)); -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ psSysPAddr, -+ psWrapExtMemIN->psSysPAddr, -+ ui32PageTableSize) != PVRSRV_OK) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, -+ (IMG_VOID *) psSysPAddr, 0); -+ return -EFAULT; -+ } -+ } -+ -+ psWrapExtMemOUT->eError = -+ PVRSRVWrapExtMemoryKM(hDevCookieInt, -+ psPerProc, -+ psWrapExtMemIN->ui32ByteSize, -+ psWrapExtMemIN->ui32PageOffset, -+ psWrapExtMemIN->bPhysContig, -+ psSysPAddr, -+ psWrapExtMemIN->pvLinAddr, &psMemInfo); -+ if (psWrapExtMemOUT->eError != PVRSRV_OK) { -+ /* PVRSRVWrapExtMemoryKM failed, so clean up page list */ -+ if (psWrapExtMemIN->ui32NumPageTableEntries) -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32PageTableSize, -+ (IMG_VOID *) psSysPAddr, 0); -+ return 0; -+ } -+ -+ if (psMemInfo->pvLinAddrKM) { -+ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = -+ psMemInfo->pvLinAddrKM; -+ } else { -+ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = -+ psMemInfo->sMemBlk.hOSMemHandle; -+ } -+ -+ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0; -+ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; -+ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; -+ psWrapExtMemOUT->sClientMemInfo.ui32AllocSize = -+ psMemInfo->ui32AllocSize; -+ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL; -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo, -+ psMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ psWrapExtMemOUT->sClientSyncInfo.psSyncData = -+ psMemInfo->psKernelSyncInfo->psSyncData; -+ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = -+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; -+ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = -+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = -+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk. -+ hOSMemHandle; -+ -+ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = -+ &psWrapExtMemOUT->sClientSyncInfo; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psWrapExtMemOUT->sClientSyncInfo. -+ hKernelSyncInfo, -+ (IMG_HANDLE) psMemInfo->psKernelSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY * psUnwrapExtMemIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvMemInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvMemInfo, -+ psUnwrapExtMemIN->hKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *) pvMemInfo); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psUnwrapExtMemIN->hKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ -+ return 0; -+} -+ -+static int -+PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM * -+ psGetFreeDeviceMemIN, -+ PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM * -+ psGetFreeDeviceMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM); -+ -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psGetFreeDeviceMemOUT->eError = -+ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags, -+ &psGetFreeDeviceMemOUT->ui32Total, -+ &psGetFreeDeviceMemOUT->ui32Free, -+ &psGetFreeDeviceMemOUT->ui32LargestBlock); -+ -+ return 0; -+} -+ -+static int -+PVRMMapKVIndexAddressToMMapDataBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA * -+ psMMapDataIN, -+ PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA * -+ psMMapDataOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_KV_TO_MMAP_DATA); -+ PVR_UNREFERENCED_PARAMETER(psMMapDataIN); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psMMapDataOUT->eError = -+ PVRMMapKVIndexAddressToMMapData(psMMapDataIN->pvKVIndexAddress, -+ psMMapDataIN->ui32Bytes, -+ &psMMapDataOUT->ui32MMapOffset, -+ &psMMapDataOUT->ui32ByteOffset, -+ &psMMapDataOUT->ui32RealByteSize); -+ -+ return 0; -+} -+ -+#ifdef PDUMP -+static int -+PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID, -+ IMG_VOID * psBridgeIn, -+ PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING * -+ psPDumpIsCapturingOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING); -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM(); -+ psPDumpIsCapturingOUT->eError = PVRSRV_OK; -+ -+ return 0; -+} -+ -+static int -+PDumpCommentBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_COMMENT * psPDumpCommentIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0], -+ psPDumpCommentIN->ui32Flags); -+ return 0; -+} -+ -+static int -+PDumpSetFrameBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_SETFRAME * psPDumpSetFrameIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame); -+ -+ return 0; -+} -+ -+static int -+PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_DUMPREG * psPDumpRegDumpIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psRetOUT->eError = -+ PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr, -+ psPDumpRegDumpIN->sHWReg.ui32RegVal, -+ psPDumpRegDumpIN->ui32Flags); -+ -+ return 0; -+} -+ -+static int -+PDumpRegPolBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_REGPOL * psPDumpRegPolIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psRetOUT->eError = -+ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr, -+ psPDumpRegPolIN->sHWReg.ui32RegVal, -+ psPDumpRegPolIN->ui32Mask, -+ psPDumpRegPolIN->ui32Flags); -+ -+ return 0; -+} -+ -+static int -+PDumpMemPolBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_MEMPOL * psPDumpMemPolIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvMemInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvMemInfo, -+ psPDumpMemPolIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *) pvMemInfo), -+ psPDumpMemPolIN->ui32Offset, -+ psPDumpMemPolIN->ui32Value, -+ psPDumpMemPolIN->ui32Mask, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ psPDumpMemPolIN->bLastFrame, -+ psPDumpMemPolIN->bOverwrite, -+ MAKEUNIQUETAG(pvMemInfo)); -+ -+ return 0; -+} -+ -+static int -+PDumpMemBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM * psPDumpMemDumpIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvMemInfo; -+ IMG_VOID *pvAltLinAddrKM = IMG_NULL; -+ IMG_UINT32 ui32Bytes = psPDumpMemDumpIN->ui32Bytes; -+ IMG_HANDLE hBlockAlloc = 0; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvMemInfo, -+ psPDumpMemDumpIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (psPDumpMemDumpIN->pvAltLinAddr) { -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32Bytes, -+ &pvAltLinAddrKM, &hBlockAlloc) != PVRSRV_OK) { -+ return -EFAULT; -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ pvAltLinAddrKM, -+ psPDumpMemDumpIN->pvAltLinAddr, -+ ui32Bytes) != PVRSRV_OK) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, -+ pvAltLinAddrKM, hBlockAlloc); -+ return -EFAULT; -+ } -+ } -+ -+ psRetOUT->eError = -+ PDumpMemKM(pvAltLinAddrKM, -+ pvMemInfo, -+ psPDumpMemDumpIN->ui32Offset, -+ ui32Bytes, -+ psPDumpMemDumpIN->ui32Flags, MAKEUNIQUETAG(pvMemInfo)); -+ -+ if (pvAltLinAddrKM) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, -+ hBlockAlloc); -+ } -+ -+ return 0; -+} -+ -+static int -+PDumpBitmapBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_BITMAP * psPDumpBitmapIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID); -+ -+ psRetOUT->eError = -+ PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0], -+ psPDumpBitmapIN->ui32FileOffset, -+ psPDumpBitmapIN->ui32Width, -+ psPDumpBitmapIN->ui32Height, -+ psPDumpBitmapIN->ui32StrideInBytes, -+ psPDumpBitmapIN->sDevBaseAddr, -+ psPDumpBitmapIN->ui32Size, -+ psPDumpBitmapIN->ePixelFormat, -+ psPDumpBitmapIN->eMemFormat, -+ psPDumpBitmapIN->ui32Flags); -+ -+ return 0; -+} -+ -+static int -+PDumpReadRegBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_READREG * psPDumpReadRegIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ psRetOUT->eError = -+ PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0], -+ psPDumpReadRegIN->ui32FileOffset, -+ psPDumpReadRegIN->ui32Address, -+ psPDumpReadRegIN->ui32Size, -+ psPDumpReadRegIN->ui32Flags); -+ -+ return 0; -+} -+ -+static int -+PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO * psPDumpDriverInfoIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_UINT32 ui32PDumpFlags; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ ui32PDumpFlags = 0; -+ if (psPDumpDriverInfoIN->bContinuous) { -+ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS; -+ } -+ psRetOUT->eError = -+ PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0], -+ ui32PDumpFlags); -+ -+ return 0; -+} -+ -+static int -+PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC * psPDumpSyncDumpIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvAltLinAddrKM = IMG_NULL; -+ IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes; -+ IMG_VOID *pvSyncInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, -+ psPDumpSyncDumpIN->psKernelSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (psPDumpSyncDumpIN->pvAltLinAddr) { -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32Bytes, &pvAltLinAddrKM, 0) != PVRSRV_OK) { -+ return -EFAULT; -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ pvAltLinAddrKM, -+ psPDumpSyncDumpIN->pvAltLinAddr, -+ ui32Bytes) != PVRSRV_OK) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, -+ pvAltLinAddrKM, 0); -+ return -EFAULT; -+ } -+ } -+ -+ psRetOUT->eError = -+ PDumpMemKM(pvAltLinAddrKM, -+ ((PVRSRV_KERNEL_SYNC_INFO *) pvSyncInfo)-> -+ psSyncDataMemInfoKM, psPDumpSyncDumpIN->ui32Offset, -+ ui32Bytes, 0, -+ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *) pvSyncInfo)-> -+ psSyncDataMemInfoKM)); -+ -+ if (pvAltLinAddrKM) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, -+ 0); -+ } -+ -+ return 0; -+} -+ -+static int -+PDumpSyncPolBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL * psPDumpSyncPolIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_UINT32 ui32Offset; -+ IMG_VOID *pvSyncInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, -+ psPDumpSyncPolIN->psKernelSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (psPDumpSyncPolIN->bIsRead) { -+ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); -+ } else { -+ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete); -+ } -+ -+ psRetOUT->eError = -+ PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *) pvSyncInfo)-> -+ psSyncDataMemInfoKM, ui32Offset, -+ psPDumpSyncPolIN->ui32Value, -+ psPDumpSyncPolIN->ui32Mask, PDUMP_POLL_OPERATOR_EQUAL, -+ IMG_FALSE, IMG_FALSE, -+ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *) -+ pvSyncInfo)->psSyncDataMemInfoKM)); -+ -+ return 0; -+} -+ -+static int -+PDumpPDRegBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG * psPDumpPDRegDumpIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr, -+ psPDumpPDRegDumpIN->sHWReg.ui32RegVal, PDUMP_PD_UNIQUETAG); -+ -+ psRetOUT->eError = PVRSRV_OK; -+ return 0; -+} -+ -+static int -+PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ * -+ psPDumpCycleCountRegReadIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset, -+ psPDumpCycleCountRegReadIN->bLastFrame); -+ -+ psRetOUT->eError = PVRSRV_OK; -+ -+ return 0; -+} -+ -+static int -+PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR * psPDumpPDDevPAddrIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvMemInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo, -+ psPDumpPDDevPAddrIN->hKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *) pvMemInfo, -+ psPDumpPDDevPAddrIN->ui32Offset, -+ psPDumpPDDevPAddrIN->sPDDevPAddr, -+ MAKEUNIQUETAG(pvMemInfo), PDUMP_PD_UNIQUETAG); -+ return 0; -+} -+ -+static int -+PDumpBufferArrayBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY * psPDumpBufferArrayIN, -+ IMG_VOID * psBridgeOut, PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_UINT32 i; -+ PVR3DIF4_KICKTA_DUMP_BUFFER *psKickTADumpBuffer; -+ IMG_UINT32 ui32BufferArrayLength = -+ psPDumpBufferArrayIN->ui32BufferArrayLength; -+ IMG_UINT32 ui32BufferArraySize = -+ ui32BufferArrayLength * sizeof(PVR3DIF4_KICKTA_DUMP_BUFFER); -+ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC; -+ -+ PVR_UNREFERENCED_PARAMETER(psBridgeOut); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32BufferArraySize, -+ (IMG_PVOID *) & psKickTADumpBuffer, 0) != PVRSRV_OK) { -+ return -ENOMEM; -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ psKickTADumpBuffer, -+ psPDumpBufferArrayIN->psBufferArray, -+ ui32BufferArraySize) != PVRSRV_OK) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, -+ psKickTADumpBuffer, 0); -+ return -EFAULT; -+ } -+ -+ for (i = 0; i < ui32BufferArrayLength; i++) { -+ IMG_VOID *pvMemInfo; -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvMemInfo, -+ psKickTADumpBuffer[i]. -+ hKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY: " -+ "PVRSRVLookupHandle failed (%d)", eError)); -+ break; -+ } -+ psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo; -+ } -+ -+ if (eError == PVRSRV_OK) { -+ DumpBufferArray(psKickTADumpBuffer, -+ ui32BufferArrayLength, -+ psPDumpBufferArrayIN->bDumpPolls); -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, -+ psKickTADumpBuffer, 0); -+ -+ return 0; -+} -+ -+static int -+PDump3DSignatureRegistersBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS * -+ psPDump3DSignatureRegistersIN, -+ IMG_VOID * psBridgeOut, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_UINT32 ui32RegisterArraySize = -+ psPDump3DSignatureRegistersIN->ui32NumRegisters * -+ sizeof(IMG_UINT32); -+ IMG_UINT32 *pui32Registers = IMG_NULL; -+ int ret = -EFAULT; -+ -+ PVR_UNREFERENCED_PARAMETER(psBridgeOut); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_PDUMP_3D_SIGNATURE_REGISTERS); -+ -+ if (ui32RegisterArraySize == 0) { -+ goto ExitNoError; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32RegisterArraySize, -+ (IMG_PVOID *) & pui32Registers, 0) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PDump3DSignatureRegistersBW: OSAllocMem failed")); -+ goto Exit; -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ pui32Registers, -+ psPDump3DSignatureRegistersIN->pui32Registers, -+ ui32RegisterArraySize) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed")); -+ goto Exit; -+ } -+ -+ PDump3DSignatureRegisters(psPDump3DSignatureRegistersIN-> -+ ui32DumpFrameNum, -+ psPDump3DSignatureRegistersIN->bLastFrame, -+ pui32Registers, -+ psPDump3DSignatureRegistersIN-> -+ ui32NumRegisters); -+ -+ExitNoError: -+ ret = 0; -+Exit: -+ if (pui32Registers != IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, -+ pui32Registers, 0); -+ } -+ -+ return ret; -+} -+ -+static int -+PDumpCounterRegistersBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS * -+ psPDumpCounterRegistersIN, IMG_VOID * psBridgeOut, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_UINT32 ui32RegisterArraySize = -+ psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32); -+ IMG_UINT32 *pui32Registers = IMG_NULL; -+ int ret = -EFAULT; -+ -+ PVR_UNREFERENCED_PARAMETER(psBridgeOut); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_PDUMP_COUNTER_REGISTERS); -+ -+ if (ui32RegisterArraySize == 0) { -+ goto ExitNoError; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32RegisterArraySize, -+ (IMG_PVOID *) & pui32Registers, 0) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PDumpCounterRegistersBW: OSAllocMem failed")); -+ ret = -ENOMEM; -+ goto Exit; -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ pui32Registers, -+ psPDumpCounterRegistersIN->pui32Registers, -+ ui32RegisterArraySize) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PDumpCounterRegistersBW: CopyFromUserWrapper failed")); -+ goto Exit; -+ } -+ -+ PDumpCounterRegisters(psPDumpCounterRegistersIN->ui32DumpFrameNum, -+ psPDumpCounterRegistersIN->bLastFrame, -+ pui32Registers, -+ psPDumpCounterRegistersIN->ui32NumRegisters); -+ -+ExitNoError: -+ ret = 0; -+Exit: -+ if (pui32Registers != IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, -+ pui32Registers, 0); -+ } -+ -+ return ret; -+} -+ -+static int -+PDumpTASignatureRegistersBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS * -+ psPDumpTASignatureRegistersIN, -+ IMG_VOID * psBridgeOut, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_UINT32 ui32RegisterArraySize = -+ psPDumpTASignatureRegistersIN->ui32NumRegisters * -+ sizeof(IMG_UINT32); -+ IMG_UINT32 *pui32Registers = IMG_NULL; -+ int ret = -EFAULT; -+ -+ PVR_UNREFERENCED_PARAMETER(psBridgeOut); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_PDUMP_TA_SIGNATURE_REGISTERS); -+ -+ if (ui32RegisterArraySize == 0) { -+ goto ExitNoError; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32RegisterArraySize, -+ (IMG_PVOID *) & pui32Registers, 0) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PDumpTASignatureRegistersBW: OSAllocMem failed")); -+ ret = -ENOMEM; -+ goto Exit; -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ pui32Registers, -+ psPDumpTASignatureRegistersIN->pui32Registers, -+ ui32RegisterArraySize) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed")); -+ goto Exit; -+ } -+ -+ PDumpTASignatureRegisters(psPDumpTASignatureRegistersIN-> -+ ui32DumpFrameNum, -+ psPDumpTASignatureRegistersIN-> -+ ui32TAKickCount, -+ psPDumpTASignatureRegistersIN->bLastFrame, -+ pui32Registers, -+ psPDumpTASignatureRegistersIN-> -+ ui32NumRegisters); -+ -+ExitNoError: -+ ret = 0; -+Exit: -+ if (pui32Registers != IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, -+ pui32Registers, 0); -+ } -+ -+ return ret; -+} -+#endif -+ -+static int -+SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GETCLIENTINFO * psGetClientInfoIN, -+ PVRSRV_BRIDGE_OUT_GETCLIENTINFO * psGetClientInfoOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO); -+ -+ psGetClientInfoOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psGetClientInfoIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psGetClientInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetClientInfoOUT->eError = -+ SGXGetClientInfoKM(hDevCookieInt, &psGetClientInfoOUT->sClientInfo); -+ return 0; -+} -+ -+static int -+SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_RELEASECLIENTINFO * -+ psReleaseClientInfoIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ IMG_HANDLE hDevCookieInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psReleaseClientInfoIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookieInt)-> -+ pvDevice; -+ -+ PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0); -+ -+ psDevInfo->ui32ClientRefCount--; -+ -+ psRetOUT->eError = PVRSRV_OK; -+ -+ return 0; -+} -+ -+static int -+SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO * -+ psSGXGetInternalDevInfoIN, -+ PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO * -+ psSGXGetInternalDevInfoOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO); -+ -+ psSGXGetInternalDevInfoOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psSGXGetInternalDevInfoIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psSGXGetInternalDevInfoOUT->eError = -+ SGXGetInternalDevInfoKM(hDevCookieInt, -+ &psSGXGetInternalDevInfoOUT-> -+ sSGXInternalDevInfo); -+ -+ psSGXGetInternalDevInfoOUT->eError = -+ PVRSRVAllocHandle(psPerProc->psHandleBase, -+ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo. -+ hCtlKernelMemInfoHandle, -+ psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo. -+ hCtlKernelMemInfoHandle, -+ PVRSRV_HANDLE_TYPE_MEM_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED); -+ -+ return 0; -+} -+ -+static int -+SGXDoKickBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_DOKICK * psDoKickIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_UINT32 i; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psDoKickIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick.hCCBKernelMemInfo, -+ psDoKickIN->sCCBKick.hCCBKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick.hTA3DSyncInfo, -+ psDoKickIN->sCCBKick.hTA3DSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick.hTASyncInfo, -+ psDoKickIN->sCCBKick.hTASyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick.h3DSyncInfo, -+ psDoKickIN->sCCBKick.h3DSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS) { -+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ return 0; -+ } -+ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick. -+ ahSrcKernelSyncInfo[i], -+ psDoKickIN->sCCBKick. -+ ahSrcKernelSyncInfo[i], -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS) { -+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ return 0; -+ } -+ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick. -+ ahTAStatusSyncInfo[i], -+ psDoKickIN->sCCBKick. -+ ahTAStatusSyncInfo[i], -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS) { -+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ return 0; -+ } -+ for (i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick. -+ ah3DStatusSyncInfo[i], -+ psDoKickIN->sCCBKick. -+ ah3DStatusSyncInfo[i], -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psDoKickIN->sCCBKick.hRenderSurfSyncInfo != IMG_NULL) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psDoKickIN->sCCBKick. -+ hRenderSurfSyncInfo, -+ psDoKickIN->sCCBKick.hRenderSurfSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ psRetOUT->eError = SGXDoKickKM(hDevCookieInt, &psDoKickIN->sCCBKick); -+ -+ return 0; -+} -+ -+static int -+SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SUBMITTRANSFER * psSubmitTransferIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ PVRSRV_TRANSFER_SGX_KICK *psKick; -+ IMG_UINT32 i; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_SUBMITTRANSFER); -+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID); -+ -+ psKick = &psSubmitTransferIN->sKick; -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSubmitTransferIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psKick->hCCBMemInfo, -+ psKick->hCCBMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ if (psKick->hTASyncInfo != IMG_NULL) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psKick->hTASyncInfo, -+ psKick->hTASyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psKick->h3DSyncInfo != IMG_NULL) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psKick->h3DSyncInfo, -+ psKick->h3DSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS) { -+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ return 0; -+ } -+ for (i = 0; i < psKick->ui32NumSrcSync; i++) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psKick->ahSrcSyncInfo[i], -+ psKick->ahSrcSyncInfo[i], -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS) { -+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ return 0; -+ } -+ for (i = 0; i < psKick->ui32NumDstSync; i++) { -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psKick->ahDstSyncInfo[i], -+ psKick->ahDstSyncInfo[i], -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ } -+ -+ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick); -+ -+ return 0; -+} -+ -+ -+ -+static int -+SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGXGETMISCINFO * psSGXGetMiscInfoIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ SGX_MISC_INFO sMiscInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETMISCINFO); -+ -+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSGXGetMiscInfoIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookieInt)-> -+ pvDevice; -+ -+ psRetOUT->eError = CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ &sMiscInfo, -+ psSGXGetMiscInfoIN->psMiscInfo, -+ sizeof(SGX_MISC_INFO)); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return -EFAULT; -+ } -+ if (sMiscInfo.eRequest == SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB) { -+ void *pAllocated; -+ IMG_HANDLE hAllocatedHandle; -+ void *psTmpUserData; -+ int allocatedSize; -+ -+ allocatedSize = -+ sMiscInfo.uData.sRetrieveCB.ui32ArraySize * -+ sizeof(PVRSRV_SGX_HWPERF_CBDATA); -+ -+ ASSIGN_AND_EXIT_ON_ERROR(psRetOUT->eError, -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ allocatedSize, -+ &pAllocated, -+ &hAllocatedHandle)); -+ -+ psTmpUserData = sMiscInfo.uData.sRetrieveCB.psHWPerfData; -+ sMiscInfo.uData.sRetrieveCB.psHWPerfData = pAllocated; -+ -+ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ allocatedSize, pAllocated, hAllocatedHandle); -+ return -EFAULT; -+ } -+ -+ psRetOUT->eError = CopyToUserWrapper(psPerProc, -+ ui32BridgeID, -+ psTmpUserData, -+ sMiscInfo.uData. -+ sRetrieveCB.psHWPerfData, -+ allocatedSize); -+ -+ sMiscInfo.uData.sRetrieveCB.psHWPerfData = psTmpUserData; -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ allocatedSize, pAllocated, hAllocatedHandle); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return -EFAULT; -+ } -+ } else -+ { -+ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return -EFAULT; -+ } -+ } -+ -+ psRetOUT->eError = CopyToUserWrapper(psPerProc, -+ ui32BridgeID, -+ psSGXGetMiscInfoIN->psMiscInfo, -+ &sMiscInfo, sizeof(SGX_MISC_INFO)); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return -EFAULT; -+ } -+ return 0; -+} -+ -+static int -+SGXReadDiffCountersBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS * -+ psSGXReadDiffCountersIN, -+ PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS * -+ psSGXReadDiffCountersOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS); -+ -+ psSGXReadDiffCountersOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ psSGXReadDiffCountersIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ -+ if (psSGXReadDiffCountersOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psSGXReadDiffCountersOUT->eError = SGXReadDiffCountersKM(hDevCookieInt, -+ psSGXReadDiffCountersIN-> -+ ui32Reg, -+ &psSGXReadDiffCountersOUT-> -+ ui32Old, -+ psSGXReadDiffCountersIN-> -+ bNew, -+ psSGXReadDiffCountersIN-> -+ ui32New, -+ psSGXReadDiffCountersIN-> -+ ui32NewReset, -+ psSGXReadDiffCountersIN-> -+ ui32CountersReg, -+ &psSGXReadDiffCountersOUT-> -+ ui32Time, -+ &psSGXReadDiffCountersOUT-> -+ bActive, -+ &psSGXReadDiffCountersOUT-> -+ sDiffs); -+ -+ return 0; -+} -+ -+static int -+PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID, -+ IMG_VOID * psBridgeIn, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT); -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ -+ if (!OSProcHasPrivSrvInit() -+ || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) -+ || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) { -+ psRetOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE); -+ psPerProc->bInitProcess = IMG_TRUE; -+ -+ psRetOUT->eError = PVRSRV_OK; -+ -+ return 0; -+} -+ -+static int -+PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT * -+ psInitSrvDisconnectIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_INITSRV_DISCONNECT); -+ -+ if (!psPerProc->bInitProcess) { -+ psRetOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ psPerProc->bInitProcess = IMG_FALSE; -+ -+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE); -+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE); -+ -+ psRetOUT->eError = -+ PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful); -+ -+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL, -+ (IMG_BOOL) (((psRetOUT->eError == PVRSRV_OK) -+ && (psInitSrvDisconnectIN-> -+ bInitSuccesful)))); -+ -+ return 0; -+} -+ -+static int -+PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT * -+ psEventObjectWaitIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hOSEventKM; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT); -+ -+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hOSEventKM, -+ psEventObjectWaitIN->hOSEventKM, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = OSEventObjectWait(hOSEventKM); -+ -+ return 0; -+} -+ -+static int -+PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN * -+ psEventObjectOpenIN, -+ PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN * -+ psEventObjectOpenOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1); -+ -+ psEventObjectOpenOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psEventObjectOpenIN->sEventObject.hOSEventKM, -+ psEventObjectOpenIN->sEventObject.hOSEventKM, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); -+ -+ if (psEventObjectOpenOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psEventObjectOpenOUT->eError = -+ OSEventObjectOpen(&psEventObjectOpenIN->sEventObject, -+ &psEventObjectOpenOUT->hOSEvent); -+ -+ if (psEventObjectOpenOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psEventObjectOpenOUT->hOSEvent, -+ psEventObjectOpenOUT->hOSEvent, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE * -+ psEventObjectCloseIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hOSEventKM; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &psEventObjectCloseIN->sEventObject.hOSEventKM, -+ psEventObjectCloseIN->sEventObject.hOSEventKM, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, -+ &hOSEventKM, -+ psEventObjectCloseIN-> -+ hOSEventKM, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM); -+ -+ return 0; -+} -+ -+static int -+SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGXDEVINITPART2 * psSGXDevInitPart2IN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bDissociateFailed = IMG_FALSE; -+ IMG_BOOL bLookupFailed = IMG_FALSE; -+ IMG_BOOL bReleaseFailed = IMG_FALSE; -+ IMG_HANDLE hDummy; -+ IMG_UINT32 i; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2); -+ -+ if (!psPerProc->bInitProcess) { -+ psRetOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSGXDevInitPart2IN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDummy, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDummy, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBCtlMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDummy, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBEventKickerMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDummy, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelSGXHostCtlMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDummy, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelHWPerfCBMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) { -+ IMG_HANDLE hHandle = -+ psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; -+ -+ if (hHandle == IMG_NULL) { -+ continue; -+ } -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDummy, -+ hHandle, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bLookupFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ } -+ -+ if (bLookupFailed) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart2BW: A handle lookup failed")); -+ psRetOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, -+ &psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBMemInfo, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bReleaseFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, -+ &psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBCtlMemInfo, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBCtlMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bReleaseFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, -+ &psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBEventKickerMemInfo, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBEventKickerMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bReleaseFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, -+ &psSGXDevInitPart2IN->sInitInfo. -+ hKernelSGXHostCtlMemInfo, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelSGXHostCtlMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bReleaseFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ -+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, -+ &psSGXDevInitPart2IN->sInitInfo. -+ hKernelHWPerfCBMemInfo, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelHWPerfCBMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bReleaseFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) { -+ IMG_HANDLE *phHandle = -+ &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; -+ -+ if (*phHandle == IMG_NULL) -+ continue; -+ -+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, -+ phHandle, -+ *phHandle, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ bReleaseFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ } -+ -+ if (bReleaseFailed) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart2BW: A handle release failed")); -+ psRetOUT->eError = PVRSRV_ERROR_GENERIC; -+ -+ PVR_DBG_BREAK; -+ return 0; -+ } -+ -+ eError = -+ PVRSRVDissociateDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBMemInfo); -+ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = -+ PVRSRVDissociateDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBCtlMemInfo); -+ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = -+ PVRSRVDissociateDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBEventKickerMemInfo); -+ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ eError = -+ PVRSRVDissociateDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelSGXHostCtlMemInfo); -+ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ -+ eError = -+ PVRSRVDissociateDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelHWPerfCBMemInfo); -+ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ -+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) { -+ IMG_HANDLE hHandle = -+ psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; -+ -+ if (hHandle == IMG_NULL) -+ continue; -+ -+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle); -+ bDissociateFailed |= (IMG_BOOL) (eError != PVRSRV_OK); -+ } -+ -+ if (bDissociateFailed) { -+ PVRSRVFreeDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBMemInfo); -+ PVRSRVFreeDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelCCBCtlMemInfo); -+ PVRSRVFreeDeviceMemKM(hDevCookieInt, -+ psSGXDevInitPart2IN->sInitInfo. -+ hKernelSGXHostCtlMemInfo); -+ -+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) { -+ IMG_HANDLE hHandle = -+ psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; -+ -+ if (hHandle == IMG_NULL) -+ continue; -+ -+ PVRSRVFreeDeviceMemKM(hDevCookieInt, -+ (PVRSRV_KERNEL_MEM_INFO *) -+ hHandle); -+ -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart2BW: A dissociate failed")); -+ -+ psRetOUT->eError = PVRSRV_ERROR_GENERIC; -+ -+ PVR_DBG_BREAK; -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ DevInitSGXPart2KM(psPerProc, -+ hDevCookieInt, &psSGXDevInitPart2IN->sInitInfo); -+ -+ return 0; -+} -+ -+static int -+SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT * -+ psSGXRegHWRenderContextIN, -+ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT * -+ psSGXRegHWRenderContextOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hHWRenderContextInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, -+ 1); -+ -+ psSGXRegHWRenderContextOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSGXRegHWRenderContextIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psSGXRegHWRenderContextOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ hHWRenderContextInt = -+ SGXRegisterHWRenderContextKM(hDevCookieInt, -+ &psSGXRegHWRenderContextIN-> -+ sHWRenderContextDevVAddr, psPerProc); -+ -+ if (hHWRenderContextInt == IMG_NULL) { -+ psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psSGXRegHWRenderContextOUT->hHWRenderContext, -+ hHWRenderContextInt, -+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT -+ * psSGXUnregHWRenderContextIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hHWRenderContextInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hHWRenderContextInt, -+ psSGXUnregHWRenderContextIN->hHWRenderContext, -+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psSGXUnregHWRenderContextIN->hHWRenderContext, -+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); -+ -+ return 0; -+} -+ -+static int -+SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT -+ * psSGXRegHWTransferContextIN, -+ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT -+ * psSGXRegHWTransferContextOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hHWTransferContextInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, -+ psPerProc, 1); -+ -+ psSGXRegHWTransferContextOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSGXRegHWTransferContextIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psSGXRegHWTransferContextOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ hHWTransferContextInt = -+ SGXRegisterHWTransferContextKM(hDevCookieInt, -+ &psSGXRegHWTransferContextIN-> -+ sHWTransferContextDevVAddr, -+ psPerProc); -+ -+ if (hHWTransferContextInt == IMG_NULL) { -+ psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psSGXRegHWTransferContextOUT->hHWTransferContext, -+ hHWTransferContextInt, -+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT -+ * psSGXUnregHWTransferContextIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hHWTransferContextInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hHWTransferContextInt, -+ psSGXUnregHWTransferContextIN-> -+ hHWTransferContext, -+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ SGXUnregisterHWTransferContextKM(hHWTransferContextInt); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psSGXUnregHWTransferContextIN-> -+ hHWTransferContext, -+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); -+ -+ return 0; -+} -+ -+ -+static int -+SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET * -+ psSGXFlushHWRenderTargetIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSGXFlushHWRenderTargetIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ SGXFlushHWRenderTargetKM(hDevCookieInt, -+ psSGXFlushHWRenderTargetIN-> -+ sHWRTDataSetDevVAddr); -+ -+ return 0; -+} -+ -+static int -+SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE * -+ ps2DQueryBltsCompleteIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_VOID *pvSyncInfo; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, -+ ps2DQueryBltsCompleteIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, -+ ps2DQueryBltsCompleteIN->hKernSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookieInt)-> -+ pvDevice; -+ -+ psRetOUT->eError = -+ SGX2DQueryBlitsCompleteKM(psDevInfo, -+ (PVRSRV_KERNEL_SYNC_INFO *) pvSyncInfo, -+ ps2DQueryBltsCompleteIN-> -+ bWaitForComplete); -+ -+ return 0; -+} -+ -+static int -+SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC * -+ psSGXFindSharedPBDescIN, -+ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC * -+ psSGXFindSharedPBDescOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; -+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; -+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; -+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL; -+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0; -+ IMG_UINT32 i; -+ IMG_HANDLE hSharedPBDesc = IMG_NULL; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, -+ PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS -+ + 4); -+ -+ psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL; -+ -+ psSGXFindSharedPBDescOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSGXFindSharedPBDescIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) -+ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; -+ -+ psSGXFindSharedPBDescOUT->eError = -+ SGXFindSharedPBDescKM(psPerProc, hDevCookieInt, -+ psSGXFindSharedPBDescIN->bLockOnFailure, -+ psSGXFindSharedPBDescIN->ui32TotalPBSize, -+ &hSharedPBDesc, -+ &psSharedPBDescKernelMemInfo, -+ &psHWPBDescKernelMemInfo, -+ &psBlockKernelMemInfo, -+ &ppsSharedPBDescSubKernelMemInfos, -+ &ui32SharedPBDescSubKernelMemInfosCount); -+ if (psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) -+ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; -+ -+ PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount -+ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS); -+ -+ psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount = -+ ui32SharedPBDescSubKernelMemInfosCount; -+ -+ if (hSharedPBDesc == IMG_NULL) { -+ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0; -+ -+ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; -+ } -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psSGXFindSharedPBDescOUT->hSharedPBDesc, -+ hSharedPBDesc, -+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psSGXFindSharedPBDescOUT-> -+ hSharedPBDescKernelMemInfoHandle, -+ psSharedPBDescKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psSGXFindSharedPBDescOUT->hSharedPBDesc); -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psSGXFindSharedPBDescOUT-> -+ hHWPBDescKernelMemInfoHandle, -+ psHWPBDescKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psSGXFindSharedPBDescOUT->hSharedPBDesc); -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psSGXFindSharedPBDescOUT-> -+ hBlockKernelMemInfoHandle, psBlockKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psSGXFindSharedPBDescOUT->hSharedPBDesc); -+ -+ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) { -+ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut -+ = psSGXFindSharedPBDescOUT; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psSGXFindSharedPBDescOut-> -+ ahSharedPBDescSubKernelMemInfoHandles[i], -+ ppsSharedPBDescSubKernelMemInfos[i], -+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psSGXFindSharedPBDescOUT-> -+ hSharedPBDescKernelMemInfoHandle); -+ } -+ -+PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT: -+ if (ppsSharedPBDescSubKernelMemInfos != IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO *) -+ * ui32SharedPBDescSubKernelMemInfosCount, -+ ppsSharedPBDescSubKernelMemInfos, IMG_NULL); -+ } -+ -+ if (psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) { -+ if (hSharedPBDesc != IMG_NULL) { -+ SGXUnrefSharedPBDescKM(hSharedPBDesc); -+ } -+ } else { -+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, -+ psPerProc); -+ } -+ -+ return 0; -+} -+ -+static int -+SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC * -+ psSGXUnrefSharedPBDescIN, -+ PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC * -+ psSGXUnrefSharedPBDescOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hSharedPBDesc; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC); -+ -+ psSGXUnrefSharedPBDescOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hSharedPBDesc, -+ psSGXUnrefSharedPBDescIN->hSharedPBDesc, -+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC); -+ if (psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psSGXUnrefSharedPBDescOUT->eError = -+ SGXUnrefSharedPBDescKM(hSharedPBDesc); -+ -+ if (psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psSGXUnrefSharedPBDescOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psSGXUnrefSharedPBDescIN->hSharedPBDesc, -+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC); -+ -+ return 0; -+} -+ -+static int -+SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC * -+ psSGXAddSharedPBDescIN, -+ PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC * -+ psSGXAddSharedPBDescOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; -+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; -+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; -+ IMG_UINT32 ui32KernelMemInfoHandlesCount = -+ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount; -+ int ret = 0; -+ IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL; -+ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hSharedPBDesc = IMG_NULL; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, -+ 1); -+ -+ psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL; -+ -+ PVR_ASSERT(ui32KernelMemInfoHandlesCount -+ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS); -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psSGXAddSharedPBDescIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (eError != PVRSRV_OK) { -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ (IMG_VOID **) & psSharedPBDescKernelMemInfo, -+ psSGXAddSharedPBDescIN-> -+ hSharedPBDescKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); -+ if (eError != PVRSRV_OK) { -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ (IMG_VOID **) & psHWPBDescKernelMemInfo, -+ psSGXAddSharedPBDescIN-> -+ hHWPBDescKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (eError != PVRSRV_OK) { -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ (IMG_VOID **) & psBlockKernelMemInfo, -+ psSGXAddSharedPBDescIN->hBlockKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); -+ if (eError != PVRSRV_OK) { -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ if (!OSAccessOK(PVR_VERIFY_READ, -+ psSGXAddSharedPBDescIN->phKernelMemInfoHandles, -+ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))) { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:" -+ " Invalid phKernelMemInfos pointer", __FUNCTION__)); -+ ret = -EFAULT; -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE), -+ (IMG_VOID **) & phKernelMemInfoHandles, -+ 0) != PVRSRV_OK) { -+ ret = -ENOMEM; -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ phKernelMemInfoHandles, -+ psSGXAddSharedPBDescIN->phKernelMemInfoHandles, -+ ui32KernelMemInfoHandlesCount * -+ sizeof(IMG_HANDLE)) -+ != PVRSRV_OK) { -+ ret = -EFAULT; -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32KernelMemInfoHandlesCount * -+ sizeof(PVRSRV_KERNEL_MEM_INFO *), -+ (IMG_VOID **) & ppsKernelMemInfos, 0) != PVRSRV_OK) { -+ ret = -ENOMEM; -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ for (i = 0; i < ui32KernelMemInfoHandlesCount; i++) { -+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase, -+ (IMG_VOID **) & -+ ppsKernelMemInfos[i], -+ phKernelMemInfoHandles[i], -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ if (eError != PVRSRV_OK) { -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ } -+ -+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psSGXAddSharedPBDescIN-> -+ hSharedPBDescKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psSGXAddSharedPBDescIN-> -+ hHWPBDescKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psSGXAddSharedPBDescIN-> -+ hBlockKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ for (i = 0; i < ui32KernelMemInfoHandlesCount; i++) { -+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ phKernelMemInfoHandles[i], -+ PVRSRV_HANDLE_TYPE_MEM_INFO); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ -+ eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt, -+ psSharedPBDescKernelMemInfo, -+ psHWPBDescKernelMemInfo, -+ psBlockKernelMemInfo, -+ psSGXAddSharedPBDescIN->ui32TotalPBSize, -+ &hSharedPBDesc, -+ ppsKernelMemInfos, -+ ui32KernelMemInfoHandlesCount); -+ -+ if (eError != PVRSRV_OK) { -+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; -+ } -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psSGXAddSharedPBDescOUT->hSharedPBDesc, -+ hSharedPBDesc, -+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT: -+ -+ if (phKernelMemInfoHandles) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount -+ * sizeof(IMG_HANDLE), -+ (IMG_VOID *) phKernelMemInfoHandles, 0); -+ } -+ if (ppsKernelMemInfos) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount -+ * sizeof(PVRSRV_KERNEL_MEM_INFO *), -+ (IMG_VOID *) ppsKernelMemInfos, 0); -+ } -+ -+ if (ret == 0 && eError == PVRSRV_OK) { -+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, -+ psPerProc); -+ } -+ -+ psSGXAddSharedPBDescOUT->eError = eError; -+ -+ return ret; -+} -+ -+ -+static int -+PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GET_MISC_INFO * psGetMiscInfoIN, -+ PVRSRV_BRIDGE_OUT_GET_MISC_INFO * psGetMiscInfoOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO); -+ -+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo, -+ &psGetMiscInfoIN->sMiscInfo, sizeof(PVRSRV_MISC_INFO)); -+ -+ if (psGetMiscInfoIN->sMiscInfo. -+ ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) { -+ -+ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError, -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ psGetMiscInfoOUT->sMiscInfo. -+ ui32MemoryStrLen, -+ (IMG_VOID **) & -+ psGetMiscInfoOUT->sMiscInfo. -+ pszMemoryStr, 0)); -+ -+ psGetMiscInfoOUT->eError = -+ PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo); -+ -+ eError = CopyToUserWrapper(psPerProc, ui32BridgeID, -+ psGetMiscInfoIN->sMiscInfo. -+ pszMemoryStr, -+ psGetMiscInfoOUT->sMiscInfo. -+ pszMemoryStr, -+ psGetMiscInfoOUT->sMiscInfo. -+ ui32MemoryStrLen); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen, -+ (IMG_VOID *) psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, -+ 0); -+ -+ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = -+ psGetMiscInfoIN->sMiscInfo.pszMemoryStr; -+ -+ if (eError != PVRSRV_OK) { -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetMiscInfoBW Error copy to user")); -+ return -EFAULT; -+ } -+ } else { -+ psGetMiscInfoOUT->eError = -+ PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo); -+ } -+ -+ if (psGetMiscInfoIN->sMiscInfo. -+ ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) { -+ -+ psGetMiscInfoOUT->eError = -+ PVRSRVAllocHandle(psPerProc->psHandleBase, -+ &psGetMiscInfoOUT->sMiscInfo. -+ sGlobalEventObject.hOSEventKM, -+ psGetMiscInfoOUT->sMiscInfo. -+ sGlobalEventObject.hOSEventKM, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED); -+ } -+ -+ return 0; -+} -+ -+static int -+PVRSRVConnectBW(IMG_UINT32 ui32BridgeID, -+ IMG_VOID * psBridgeIn, -+ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES * psConnectServicesOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES); -+ -+ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData; -+ psConnectServicesOUT->eError = PVRSRV_OK; -+ -+ return 0; -+} -+ -+static int -+PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID, -+ IMG_VOID * psBridgeIn, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_DISCONNECT_SERVICES); -+ -+ psRetOUT->eError = PVRSRV_OK; -+ -+ return 0; -+} -+ -+static int -+PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_ENUMCLASS * psEnumDispClassIN, -+ PVRSRV_BRIDGE_OUT_ENUMCLASS * psEnumDispClassOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS); -+ -+ psEnumDispClassOUT->eError = -+ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass, -+ &psEnumDispClassOUT->ui32NumDevices, -+ &psEnumDispClassOUT->ui32DevID[0]); -+ -+ return 0; -+} -+ -+static int -+PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE * -+ psOpenDispClassDeviceIN, -+ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE * -+ psOpenDispClassDeviceOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hDispClassInfoInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, -+ 1); -+ -+ psOpenDispClassDeviceOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psOpenDispClassDeviceIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psOpenDispClassDeviceOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psOpenDispClassDeviceOUT->eError = -+ PVRSRVOpenDCDeviceKM(psPerProc, -+ psOpenDispClassDeviceIN->ui32DeviceID, -+ hDevCookieInt, &hDispClassInfoInt); -+ -+ if (psOpenDispClassDeviceOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psOpenDispClassDeviceOUT->hDeviceKM, -+ hDispClassInfoInt, -+ PVRSRV_HANDLE_TYPE_DISP_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE * -+ psCloseDispClassDeviceIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfoInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfoInt, -+ psCloseDispClassDeviceIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psCloseDispClassDeviceIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ return 0; -+} -+ -+static int -+PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS * -+ psEnumDispClassFormatsIN, -+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS * -+ psEnumDispClassFormatsOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfoInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS); -+ -+ psEnumDispClassFormatsOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfoInt, -+ psEnumDispClassFormatsIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psEnumDispClassFormatsOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psEnumDispClassFormatsOUT->eError = -+ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt, -+ &psEnumDispClassFormatsOUT->ui32Count, -+ psEnumDispClassFormatsOUT->asFormat); -+ -+ return 0; -+} -+ -+static int -+PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS * psEnumDispClassDimsIN, -+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS * -+ psEnumDispClassDimsOUT, PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfoInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS); -+ -+ psEnumDispClassDimsOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfoInt, -+ psEnumDispClassDimsIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ -+ if (psEnumDispClassDimsOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psEnumDispClassDimsOUT->eError = -+ PVRSRVEnumDCDimsKM(pvDispClassInfoInt, -+ &psEnumDispClassDimsIN->sFormat, -+ &psEnumDispClassDimsOUT->ui32Count, -+ psEnumDispClassDimsOUT->asDim); -+ -+ return 0; -+} -+ -+static int -+PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER * -+ psGetDispClassSysBufferIN, -+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER * -+ psGetDispClassSysBufferOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hBufferInt; -+ IMG_VOID *pvDispClassInfoInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, -+ 1); -+ -+ psGetDispClassSysBufferOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfoInt, -+ psGetDispClassSysBufferIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psGetDispClassSysBufferOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetDispClassSysBufferOUT->eError = -+ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt, &hBufferInt); -+ -+ if (psGetDispClassSysBufferOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psGetDispClassSysBufferOUT->hBuffer, -+ hBufferInt, -+ PVRSRV_HANDLE_TYPE_DISP_BUFFER, -+ (PVRSRV_HANDLE_ALLOC_FLAG) -+ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED), -+ psGetDispClassSysBufferIN->hDeviceKM); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO * psGetDispClassInfoIN, -+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO * psGetDispClassInfoOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_GET_DISPCLASS_INFO); -+ -+ psGetDispClassInfoOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psGetDispClassInfoIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psGetDispClassInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetDispClassInfoOUT->eError = -+ PVRSRVGetDCInfoKM(pvDispClassInfo, -+ &psGetDispClassInfoOUT->sDisplayInfo); -+ -+ return 0; -+} -+ -+static int -+PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN * -+ psCreateDispClassSwapChainIN, -+ PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN * -+ psCreateDispClassSwapChainOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_HANDLE hSwapChainInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, -+ psPerProc, 1); -+ -+ psCreateDispClassSwapChainOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psCreateDispClassSwapChainIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ -+ if (psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psCreateDispClassSwapChainOUT->eError = -+ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo, -+ psCreateDispClassSwapChainIN->ui32Flags, -+ &psCreateDispClassSwapChainIN-> -+ sDstSurfAttrib, -+ &psCreateDispClassSwapChainIN-> -+ sSrcSurfAttrib, -+ psCreateDispClassSwapChainIN-> -+ ui32BufferCount, -+ psCreateDispClassSwapChainIN-> -+ ui32OEMFlags, &hSwapChainInt, -+ &psCreateDispClassSwapChainOUT-> -+ ui32SwapChainID); -+ -+ if (psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psCreateDispClassSwapChainOUT->hSwapChain, -+ hSwapChainInt, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ psCreateDispClassSwapChainIN->hDeviceKM); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN * -+ psDestroyDispClassSwapChainIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvSwapChain; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain, -+ psDestroyDispClassSwapChainIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVDestroyDCSwapChainKM(pvSwapChain); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psDestroyDispClassSwapChainIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); -+ -+ return 0; -+} -+ -+static int -+PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT * -+ psSetDispClassDstRectIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_VOID *pvSwapChain; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psSetDispClassDstRectIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvSwapChain, -+ psSetDispClassDstRectIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVSetDCDstRectKM(pvDispClassInfo, -+ pvSwapChain, &psSetDispClassDstRectIN->sRect); -+ -+ return 0; -+} -+ -+static int -+PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT * -+ psSetDispClassSrcRectIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_VOID *pvSwapChain; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psSetDispClassSrcRectIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvSwapChain, -+ psSetDispClassSrcRectIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVSetDCSrcRectKM(pvDispClassInfo, -+ pvSwapChain, &psSetDispClassSrcRectIN->sRect); -+ -+ return 0; -+} -+ -+static int -+PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY * -+ psSetDispClassColKeyIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_VOID *pvSwapChain; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psSetDispClassColKeyIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvSwapChain, -+ psSetDispClassColKeyIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo, -+ pvSwapChain, -+ psSetDispClassColKeyIN->ui32CKColour); -+ -+ return 0; -+} -+ -+static int -+PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY * -+ psSetDispClassColKeyIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_VOID *pvSwapChain; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psSetDispClassColKeyIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvSwapChain, -+ psSetDispClassColKeyIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo, -+ pvSwapChain, -+ psSetDispClassColKeyIN->ui32CKColour); -+ -+ return 0; -+} -+ -+static int -+PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS * -+ psGetDispClassBuffersIN, -+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS * -+ psGetDispClassBuffersOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_VOID *pvSwapChain; -+ IMG_UINT32 i; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, -+ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); -+ -+ psGetDispClassBuffersOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psGetDispClassBuffersIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetDispClassBuffersOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvSwapChain, -+ psGetDispClassBuffersIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); -+ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetDispClassBuffersOUT->eError = -+ PVRSRVGetDCBuffersKM(pvDispClassInfo, -+ pvSwapChain, -+ &psGetDispClassBuffersOUT->ui32BufferCount, -+ psGetDispClassBuffersOUT->ahBuffer); -+ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= -+ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); -+ -+ for (i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++) { -+ IMG_HANDLE hBufferExt; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &hBufferExt, -+ psGetDispClassBuffersOUT->ahBuffer[i], -+ PVRSRV_HANDLE_TYPE_DISP_BUFFER, -+ (PVRSRV_HANDLE_ALLOC_FLAG) -+ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED), -+ psGetDispClassBuffersIN->hSwapChain); -+ -+ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt; -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER * -+ psSwapDispClassBufferIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_VOID *pvSwapChainBuf; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psSwapDispClassBufferIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupSubHandle(psPerProc->psHandleBase, -+ &pvSwapChainBuf, -+ psSwapDispClassBufferIN->hBuffer, -+ PVRSRV_HANDLE_TYPE_DISP_BUFFER, -+ psSwapDispClassBufferIN->hDeviceKM); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVSwapToDCBufferKM(pvDispClassInfo, -+ pvSwapChainBuf, -+ psSwapDispClassBufferIN->ui32SwapInterval, -+ psSwapDispClassBufferIN->hPrivateTag, -+ psSwapDispClassBufferIN->ui32ClipRectCount, -+ psSwapDispClassBufferIN->sClipRect); -+ -+ return 0; -+} -+ -+static int -+PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM * -+ psSwapDispClassSystemIN, PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvDispClassInfo; -+ IMG_VOID *pvSwapChain; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvDispClassInfo, -+ psSwapDispClassSystemIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_DISP_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = -+ PVRSRVLookupSubHandle(psPerProc->psHandleBase, -+ &pvSwapChain, -+ psSwapDispClassSystemIN->hSwapChain, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, -+ psSwapDispClassSystemIN->hDeviceKM); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ psRetOUT->eError = PVRSRVSwapToDCSystemKM(pvDispClassInfo, pvSwapChain); -+ -+ return 0; -+} -+ -+static int -+PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE * -+ psOpenBufferClassDeviceIN, -+ PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE * -+ psOpenBufferClassDeviceOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevCookieInt; -+ IMG_HANDLE hBufClassInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, -+ 1); -+ -+ psOpenBufferClassDeviceOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &hDevCookieInt, -+ psOpenBufferClassDeviceIN->hDevCookie, -+ PVRSRV_HANDLE_TYPE_DEV_NODE); -+ if (psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psOpenBufferClassDeviceOUT->eError = -+ PVRSRVOpenBCDeviceKM(psPerProc, -+ psOpenBufferClassDeviceIN->ui32DeviceID, -+ hDevCookieInt, &hBufClassInfo); -+ if (psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psOpenBufferClassDeviceOUT->hDeviceKM, -+ hBufClassInfo, -+ PVRSRV_HANDLE_TYPE_BUF_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE * -+ psCloseBufferClassDeviceIN, -+ PVRSRV_BRIDGE_RETURN * psRetOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvBufClassInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE); -+ -+ psRetOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvBufClassInfo, -+ psCloseBufferClassDeviceIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_BUF_INFO); -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE); -+ -+ if (psRetOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psCloseBufferClassDeviceIN-> -+ hDeviceKM, -+ PVRSRV_HANDLE_TYPE_BUF_INFO); -+ -+ return 0; -+} -+ -+static int -+PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO * -+ psGetBufferClassInfoIN, -+ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO * -+ psGetBufferClassInfoOUT, PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvBufClassInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO); -+ -+ psGetBufferClassInfoOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvBufClassInfo, -+ psGetBufferClassInfoIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_BUF_INFO); -+ if (psGetBufferClassInfoOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetBufferClassInfoOUT->eError = -+ PVRSRVGetBCInfoKM(pvBufClassInfo, -+ &psGetBufferClassInfoOUT->sBufferInfo); -+ return 0; -+} -+ -+static int -+PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER * -+ psGetBufferClassBufferIN, -+ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER * -+ psGetBufferClassBufferOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_VOID *pvBufClassInfo; -+ IMG_HANDLE hBufferInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, -+ 1); -+ -+ psGetBufferClassBufferOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ &pvBufClassInfo, -+ psGetBufferClassBufferIN->hDeviceKM, -+ PVRSRV_HANDLE_TYPE_BUF_INFO); -+ if (psGetBufferClassBufferOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetBufferClassBufferOUT->eError = -+ PVRSRVGetBCBufferKM(pvBufClassInfo, -+ psGetBufferClassBufferIN->ui32BufferIndex, -+ &hBufferInt); -+ -+ if (psGetBufferClassBufferOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psGetBufferClassBufferOUT->hBuffer, -+ hBufferInt, -+ PVRSRV_HANDLE_TYPE_BUF_BUFFER, -+ (PVRSRV_HANDLE_ALLOC_FLAG) -+ (PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED), -+ psGetBufferClassBufferIN->hDeviceKM); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, -+ psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM * -+ psAllocSharedSysMemIN, -+ PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM * -+ psAllocSharedSysMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1); -+ -+ psAllocSharedSysMemOUT->eError = -+ PVRSRVAllocSharedSysMemoryKM(psPerProc, -+ psAllocSharedSysMemIN->ui32Flags, -+ psAllocSharedSysMemIN->ui32Size, -+ &psKernelMemInfo); -+ if (psAllocSharedSysMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo, -+ 0, sizeof(psAllocSharedSysMemOUT->sClientMemInfo)); -+ -+ if (psKernelMemInfo->pvLinAddrKM) { -+ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM = -+ psKernelMemInfo->pvLinAddrKM; -+ } else { -+ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM = -+ psKernelMemInfo->sMemBlk.hOSMemHandle; -+ } -+ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0; -+ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags = -+ psKernelMemInfo->ui32Flags; -+ psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize = -+ psKernelMemInfo->ui32AllocSize; -+ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = -+ psKernelMemInfo->sMemBlk.hOSMemHandle; -+ -+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, -+ &psAllocSharedSysMemOUT->sClientMemInfo. -+ hKernelMemInfo, psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM * -+ psFreeSharedSysMemIN, -+ PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM * -+ psFreeSharedSysMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM); -+ -+ psFreeSharedSysMemOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, -+ (IMG_VOID **) & psKernelMemInfo, -+ psFreeSharedSysMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); -+ -+ if (psFreeSharedSysMemOUT->eError != PVRSRV_OK) -+ return 0; -+ -+ psFreeSharedSysMemOUT->eError = -+ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo); -+ if (psFreeSharedSysMemOUT->eError != PVRSRV_OK) -+ return 0; -+ -+ psFreeSharedSysMemOUT->eError = -+ PVRSRVReleaseHandle(psPerProc->psHandleBase, -+ psFreeSharedSysMemIN->psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); -+ return 0; -+} -+ -+static int -+PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM * psMapMemInfoMemIN, -+ PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM * psMapMemInfoMemOUT, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_HANDLE_TYPE eHandleType; -+ IMG_HANDLE hParent; -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM); -+ -+ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2); -+ -+ psMapMemInfoMemOUT->eError = -+ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, -+ (IMG_VOID **) & psKernelMemInfo, -+ &eHandleType, -+ psMapMemInfoMemIN->hKernelMemInfo); -+ if (psMapMemInfoMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ switch (eHandleType) { -+ case PVRSRV_HANDLE_TYPE_MEM_INFO: -+ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF: -+ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO: -+ break; -+ default: -+ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC; -+ return 0; -+ } -+ -+ psMapMemInfoMemOUT->eError = -+ PVRSRVGetParentHandle(psPerProc->psHandleBase, -+ &hParent, -+ psMapMemInfoMemIN->hKernelMemInfo, -+ eHandleType); -+ if (psMapMemInfoMemOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ if (hParent == IMG_NULL) { -+ hParent = psMapMemInfoMemIN->hKernelMemInfo; -+ } -+ -+ OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo, -+ 0, sizeof(psMapMemInfoMemOUT->sClientMemInfo)); -+ -+ if (psKernelMemInfo->pvLinAddrKM) { -+ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM = -+ psKernelMemInfo->pvLinAddrKM; -+ } else { -+ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM = -+ psKernelMemInfo->sMemBlk.hOSMemHandle; -+ } -+ -+ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0; -+ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr = -+ psKernelMemInfo->sDevVAddr; -+ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags = -+ psKernelMemInfo->ui32Flags; -+ psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize = -+ psKernelMemInfo->ui32AllocSize; -+ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = -+ psKernelMemInfo->sMemBlk.hOSMemHandle; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psMapMemInfoMemOUT->sClientMemInfo. -+ hKernelMemInfo, psKernelMemInfo, -+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, hParent); -+ -+ if (psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) { -+ -+ OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo, -+ 0, sizeof(PVRSRV_CLIENT_SYNC_INFO)); -+ psMapMemInfoMemOUT->psKernelSyncInfo = IMG_NULL; -+ } else { -+ -+ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData = -+ psKernelMemInfo->psKernelSyncInfo->psSyncData; -+ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = -+ psKernelMemInfo->psKernelSyncInfo-> -+ sWriteOpsCompleteDevVAddr; -+ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = -+ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = -+ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM-> -+ sMemBlk.hOSMemHandle; -+ -+ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = -+ &psMapMemInfoMemOUT->sClientSyncInfo; -+ -+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, -+ &psMapMemInfoMemOUT->sClientSyncInfo. -+ hKernelSyncInfo, -+ psKernelMemInfo->psKernelSyncInfo, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psMapMemInfoMemOUT->sClientMemInfo. -+ hKernelMemInfo); -+ } -+ -+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc); -+ -+ return 0; -+} -+ -+static int -+MMU_GetPDDevPAddrBW(IMG_UINT32 ui32BridgeID, -+ PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR * psGetMmuPDDevPAddrIN, -+ PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR * -+ psGetMmuPDDevPAddrOUT, PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ IMG_HANDLE hDevMemContextInt; -+ -+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, -+ PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR); -+ -+ psGetMmuPDDevPAddrOUT->eError = -+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, -+ psGetMmuPDDevPAddrIN->hDevMemContext, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); -+ if (psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK) { -+ return 0; -+ } -+ -+ psGetMmuPDDevPAddrOUT->sPDDevPAddr = -+ MMU_GetPDDevPAddr(BM_GetMMUContextFromMemContext -+ (hDevMemContextInt)); -+ if (psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr) { -+ psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK; -+ } else { -+ psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC; -+ } -+ return 0; -+} -+ -+static int -+DummyBW(IMG_UINT32 ui32BridgeID, -+ IMG_VOID * psBridgeIn, -+ IMG_VOID * psBridgeOut, PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+#if !defined(DEBUG) -+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID); -+#endif -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ PVR_UNREFERENCED_PARAMETER(psBridgeOut); -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ -+#if defined(DEBUG_BRIDGE_KM) -+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to " -+ "Dummy Wrapper (probably not what you want!)", -+ __FUNCTION__, ui32BridgeID, -+ g_BridgeDispatchTable[ui32BridgeID].pszIOCName)); -+#else -+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to " -+ "Dummy Wrapper (probably not what you want!)", -+ __FUNCTION__, ui32BridgeID)); -+#endif -+ return -ENOTTY; -+} -+ -+#define SetDispatchTableEntry(ui32Index, pfFunction) \ -+ _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction) -+#define DISPATCH_TABLE_GAP_THRESHOLD 5 -+static IMG_VOID -+_SetDispatchTableEntry(IMG_UINT32 ui32Index, -+ const IMG_CHAR * pszIOCName, -+ BridgeWrapperFunction pfFunction, -+ const IMG_CHAR * pszFunctionName) -+{ -+ static IMG_UINT32 ui32PrevIndex = ~0UL; -+#if !defined(DEBUG) -+ PVR_UNREFERENCED_PARAMETER(pszIOCName); -+#endif -+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM) -+ PVR_UNREFERENCED_PARAMETER(pszFunctionName); -+#endif -+ -+ -+ if (g_BridgeDispatchTable[ui32Index].pfFunction) { -+#if defined(DEBUG_BRIDGE_KM) -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s", -+ __FUNCTION__, pszIOCName, -+ g_BridgeDispatchTable[ui32Index].pszIOCName)); -+#else -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%lu)", -+ __FUNCTION__, pszIOCName, ui32Index)); -+#endif -+ PVR_DPF((PVR_DBG_ERROR, -+ "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.", -+ __FUNCTION__)); -+ } -+ -+ if ((ui32PrevIndex != ~0UL) && -+ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) || -+ (ui32Index <= ui32PrevIndex))) { -+#if defined(DEBUG_BRIDGE_KM) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: There is a gap in the dispatch table between indices %lu (%s) and %lu (%s)", -+ __FUNCTION__, ui32PrevIndex, -+ g_BridgeDispatchTable[ui32PrevIndex].pszIOCName, -+ ui32Index, pszIOCName)); -+#else -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: There is a gap in the dispatch table between indices %lu and %lu (%s)", -+ __FUNCTION__, ui32PrevIndex, ui32Index, pszIOCName)); -+#endif -+ PVR_DPF((PVR_DBG_ERROR, -+ "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.", -+ __FUNCTION__)); -+ } -+ -+ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; -+#if defined(DEBUG_BRIDGE_KM) -+ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; -+ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; -+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; -+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; -+#endif -+ -+ ui32PrevIndex = ui32Index; -+} -+ -+PVRSRV_ERROR CommonBridgeInit(IMG_VOID) -+{ -+ IMG_UINT32 i; -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, -+ PVRSRVEnumerateDevicesBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, -+ PVRSRVAcquireDeviceDataBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, -+ PVRSRVCreateDeviceMemContextBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, -+ PVRSRVDestroyDeviceMemContextBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, -+ PVRSRVGetDeviceMemHeapInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, -+ PVRSRVAllocDeviceMemBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, -+ PVRSRVFreeDeviceMemBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, -+ PVRSRVGetFreeDeviceMemBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_KV_TO_MMAP_DATA, -+ PVRMMapKVIndexAddressToMMapDataBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, -+ PVRSRVDisconnectBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, -+ PVRSRVMapDeviceMemoryBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, -+ PVRSRVUnmapDeviceMemoryBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, -+ PVRSRVMapDeviceClassMemoryBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, -+ PVRSRVUnmapDeviceClassMemoryBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE_FLUSH_DRM, -+ PVRSRVCacheFlushDRIBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW); -+ -+ -+#if defined(PDUMP) -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, -+ PDumpIsCaptureFrameBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, -+ PDumpDriverInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, -+ PDumpPDDevPAddrBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY, -+ PDumpBufferArrayBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, -+ PDumpCycleCountRegReadBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_3D_SIGNATURE_REGISTERS, -+ PDump3DSignatureRegistersBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COUNTER_REGISTERS, -+ PDumpCounterRegistersBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_TA_SIGNATURE_REGISTERS, -+ PDumpTASignatureRegistersBW); -+#endif -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, -+ PVRSRVOpenDCDeviceBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, -+ PVRSRVCloseDCDeviceBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, -+ PVRSRVEnumDCFormatsBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, -+ PVRSRVEnumDCDimsBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, -+ PVRSRVGetDCSystemBufferBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, -+ PVRSRVGetDCInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, -+ PVRSRVCreateDCSwapChainBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, -+ PVRSRVDestroyDCSwapChainBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, -+ PVRSRVSetDCDstRectBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, -+ PVRSRVSetDCSrcRectBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, -+ PVRSRVSetDCDstColourKeyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, -+ PVRSRVSetDCSrcColourKeyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, -+ PVRSRVGetDCBuffersBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, -+ PVRSRVSwapToDCBufferBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, -+ PVRSRVSwapToDCSystemBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, -+ PVRSRVOpenBCDeviceBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, -+ PVRSRVCloseBCDeviceBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, -+ PVRSRVGetBCInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, -+ PVRSRVGetBCBufferBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, -+ PVRSRVWrapExtMemoryBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, -+ PVRSRVUnwrapExtMemoryBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, -+ PVRSRVAllocSharedSysMemoryBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, -+ PVRSRVFreeSharedSysMemoryBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, -+ PVRSRVMapMemInfoMemBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR, -+ MMU_GetPDDevPAddrBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, -+ PVRSRVInitSrvConnectBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, -+ PVRSRVInitSrvDisconnectBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, -+ PVRSRVEventObjectWaitBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, -+ PVRSRVEventObjectOpenBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, -+ PVRSRVEventObjectCloseBW); -+ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, -+ SGXGetClientInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, -+ SGXReleaseClientInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, -+ SGXGetInternalDevInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND, DummyBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, -+ SGX2DQueryBlitsCompleteBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, -+ SGXSubmitTransferBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT, -+ SGXGetInfoForSrvinitBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, -+ SGXDevInitPart2BW); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, -+ SGXFindSharedPBDescBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, -+ SGXUnrefSharedPBDescBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, -+ SGXAddSharedPBDescBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, -+ SGXRegisterHWRenderContextBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, -+ SGXFlushHWRenderTargetBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, -+ SGXUnregisterHWRenderContextBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, -+ SGXRegisterHWTransferContextBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, -+ SGXUnregisterHWTransferContextBW); -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS, -+ SGXReadDiffCountersBW); -+ -+ for (i = 0; i < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT; i++) { -+ if (!g_BridgeDispatchTable[i].pfFunction) { -+ g_BridgeDispatchTable[i].pfFunction = DummyBW; -+#if defined(DEBUG_BRIDGE_KM) -+ g_BridgeDispatchTable[i].pszIOCName = -+ "_PVRSRV_BRIDGE_DUMMY"; -+ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW"; -+ g_BridgeDispatchTable[i].ui32CallCount = 0; -+ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0; -+ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0; -+#endif -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM) -+{ -+ -+ IMG_VOID *psBridgeIn; -+ IMG_VOID *psBridgeOut; -+ BridgeWrapperFunction pfBridgeHandler; -+ IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID; -+ int err = -EFAULT; -+ -+ -+#if defined(DEBUG_BRIDGE_KM) -+ g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++; -+ g_BridgeGlobalStats.ui32IOCTLCount++; -+#endif -+ -+ if (!psPerProc->bInitProcess) { -+ if (PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) { -+ if (!PVRSRVGetInitServerState -+ (PVRSRV_INIT_SERVER_SUCCESSFUL)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Initialisation failed. Driver unusable.", -+ __FUNCTION__)); -+ goto return_fault; -+ } -+ } else { -+ if (PVRSRVGetInitServerState -+ (PVRSRV_INIT_SERVER_RUNNING)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Initialisation is in progress", -+ __FUNCTION__)); -+ goto return_fault; -+ } else { -+ -+ switch (ui32BridgeID) { -+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES): -+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES): -+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT): -+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT): -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Driver initialisation not completed yet.", -+ __FUNCTION__)); -+ goto return_fault; -+ } -+ } -+ } -+ } -+ -+ { -+ -+ SYS_DATA *psSysData; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ goto return_fault; -+ } -+ -+ psBridgeIn = -+ ((ENV_DATA *) psSysData->pvEnvSpecificData)->pvBridgeData; -+ psBridgeOut = -+ (IMG_PVOID) ((IMG_PBYTE) psBridgeIn + -+ PVRSRV_MAX_BRIDGE_IN_SIZE); -+ -+ if (psBridgePackageKM->ui32InBufferSize > 0) { -+ if (!OSAccessOK(PVR_VERIFY_READ, -+ psBridgePackageKM->pvParamIn, -+ psBridgePackageKM->ui32InBufferSize)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid pvParamIn pointer", -+ __FUNCTION__)); -+ } -+ -+ if (CopyFromUserWrapper(psPerProc, -+ ui32BridgeID, -+ psBridgeIn, -+ psBridgePackageKM->pvParamIn, -+ psBridgePackageKM-> -+ ui32InBufferSize) -+ != PVRSRV_OK) { -+ goto return_fault; -+ } -+ } -+ } -+ -+ if (ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ui32BridgeID = %d is out if range!", __FUNCTION__, -+ ui32BridgeID)); -+ goto return_fault; -+ } -+ pfBridgeHandler = -+ (BridgeWrapperFunction) g_BridgeDispatchTable[ui32BridgeID]. -+ pfFunction; -+ err = pfBridgeHandler(ui32BridgeID, psBridgeIn, psBridgeOut, psPerProc); -+ if (err < 0) { -+ goto return_fault; -+ } -+ -+ -+ if (CopyToUserWrapper(psPerProc, -+ ui32BridgeID, -+ psBridgePackageKM->pvParamOut, -+ psBridgeOut, psBridgePackageKM->ui32OutBufferSize) -+ != PVRSRV_OK) { -+ goto return_fault; -+ } -+ -+ err = 0; -+return_fault: -+ ReleaseHandleBatch(psPerProc); -+ return err; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/bridged_pvr_bridge.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bridged_pvr_bridge.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/bridged_pvr_bridge.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bridged_pvr_bridge.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,72 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __BRIDGED_PVR_BRIDGE_H__ -+#define __BRIDGED_PVR_BRIDGE_H__ -+ -+#include "pvr_bridge.h" -+ -+ -+#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X) -+ -+ typedef int (*BridgeWrapperFunction) (IMG_UINT32 ui32BridgeID, -+ IMG_VOID * psBridgeIn, -+ IMG_VOID * psBridgeOut, -+ PVRSRV_PER_PROCESS_DATA * -+ psPerProc); -+ -+ typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY { -+ BridgeWrapperFunction pfFunction; -+#if defined(DEBUG_BRIDGE_KM) -+ const IMG_CHAR *pszIOCName; -+ const IMG_CHAR *pszFunctionName; -+ IMG_UINT32 ui32CallCount; -+ IMG_UINT32 ui32CopyFromUserTotalBytes; -+ IMG_UINT32 ui32CopyToUserTotalBytes; -+#endif -+ } PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY; -+ -+#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1) -+ -+ extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY -+ g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; -+ -+#if defined(DEBUG_BRIDGE_KM) -+ typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS { -+ IMG_UINT32 ui32IOCTLCount; -+ IMG_UINT32 ui32TotalCopyFromUserBytes; -+ IMG_UINT32 ui32TotalCopyToUserBytes; -+ } PVRSRV_BRIDGE_GLOBAL_STATS; -+ -+ extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; -+#endif -+ -+ PVRSRV_ERROR CommonBridgeInit(IMG_VOID); -+ -+ int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,284 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "bufferclass_example.h" -+ -+static IMG_VOID *gpvAnchor = IMG_NULL; -+static PFN_BC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL; -+ -+BC_EXAMPLE_DEVINFO *GetAnchorPtr(IMG_VOID) -+{ -+ return (BC_EXAMPLE_DEVINFO *) gpvAnchor; -+} -+ -+static IMG_VOID SetAnchorPtr(BC_EXAMPLE_DEVINFO * psDevInfo) -+{ -+ gpvAnchor = (IMG_VOID *) psDevInfo; -+} -+ -+static PVRSRV_ERROR OpenBCDevice(IMG_HANDLE * phDevice) -+{ -+ BC_EXAMPLE_DEVINFO *psDevInfo; -+ -+ psDevInfo = GetAnchorPtr(); -+ -+ *phDevice = (IMG_HANDLE) psDevInfo; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR CloseBCDevice(IMG_HANDLE hDevice) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDevice); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR GetBCBuffer(IMG_HANDLE hDevice, -+ IMG_UINT32 ui32BufferNumber, -+ PVRSRV_SYNC_DATA * psSyncData, -+ IMG_HANDLE * phBuffer) -+{ -+ BC_EXAMPLE_DEVINFO *psDevInfo; -+ -+ if (!hDevice || !phBuffer) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (BC_EXAMPLE_DEVINFO *) hDevice; -+ -+ if (ui32BufferNumber < psDevInfo->sBufferInfo.ui32BufferCount) { -+ psDevInfo->psSystemBuffer[ui32BufferNumber].psSyncData = -+ psSyncData; -+ *phBuffer = -+ (IMG_HANDLE) & psDevInfo->psSystemBuffer[ui32BufferNumber]; -+ } else { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR GetBCInfo(IMG_HANDLE hDevice, BUFFER_INFO * psBCInfo) -+{ -+ BC_EXAMPLE_DEVINFO *psDevInfo; -+ -+ if (!hDevice || !psBCInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (BC_EXAMPLE_DEVINFO *) hDevice; -+ -+ *psBCInfo = psDevInfo->sBufferInfo; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR GetBCBufferAddr(IMG_HANDLE hDevice, -+ IMG_HANDLE hBuffer, -+ IMG_SYS_PHYADDR ** ppsSysAddr, -+ IMG_UINT32 * pui32ByteSize, -+ IMG_VOID ** ppvCpuVAddr, -+ IMG_HANDLE * phOSMapInfo, -+ IMG_BOOL * pbIsContiguous) -+{ -+ BC_EXAMPLE_BUFFER *psBuffer; -+ -+ if (!hDevice || !hBuffer || !ppsSysAddr || !pui32ByteSize) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psBuffer = (BC_EXAMPLE_BUFFER *) hBuffer; -+ -+ *ppsSysAddr = &psBuffer->sPageAlignSysAddr; -+ *ppvCpuVAddr = psBuffer->sCPUVAddr; -+ -+ *pui32ByteSize = psBuffer->ui32Size; -+ -+ *phOSMapInfo = IMG_NULL; -+ *pbIsContiguous = IMG_TRUE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR BC_Example_Init(IMG_VOID) -+{ -+ BC_EXAMPLE_DEVINFO *psDevInfo; -+ IMG_CPU_PHYADDR sSystemBufferCPUPAddr; -+ IMG_UINT32 i; -+ -+ psDevInfo = GetAnchorPtr(); -+ -+ if (psDevInfo == IMG_NULL) { -+ -+ psDevInfo = -+ (BC_EXAMPLE_DEVINFO *) -+ BCAllocKernelMem(sizeof(BC_EXAMPLE_DEVINFO)); -+ -+ if (!psDevInfo) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ SetAnchorPtr((IMG_VOID *) psDevInfo); -+ -+ psDevInfo->ui32RefCount = 0; -+ -+ if (BCOpenPVRServices(&psDevInfo->hPVRServices) != PVRSRV_OK) { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ if (BCGetLibFuncAddr -+ (psDevInfo->hPVRServices, "PVRGetBufferClassJTable", -+ &pfnGetPVRJTable) != PVRSRV_OK) { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ if (!(*pfnGetPVRJTable) (&psDevInfo->sPVRJTable)) { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ psDevInfo->ui32NumBuffers = 0; -+ -+ psDevInfo->psSystemBuffer = -+ BCAllocKernelMem(sizeof(BC_EXAMPLE_BUFFER) * -+ BC_EXAMPLE_NUM_BUFFERS); -+ -+ if (!psDevInfo->psSystemBuffer) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT; -+ psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH; -+ psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT; -+ psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE; -+ psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID; -+ psDevInfo->sBufferInfo.ui32Flags = -+ PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | -+ PVRSRV_BC_FLAGS_YUVCSC_BT601; -+ -+ for (i = 0; i < BC_EXAMPLE_NUM_BUFFERS; i++) { -+ IMG_UINT32 ui32Size = -+ BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE; -+ -+ if (psDevInfo->sBufferInfo.pixelformat == -+ PVRSRV_PIXEL_FORMAT_NV12) { -+ -+ ui32Size += -+ ((BC_EXAMPLE_STRIDE >> 1) * -+ (BC_EXAMPLE_HEIGHT >> 1) << 1); -+ } -+ -+ if (BCAllocContigMemory(ui32Size, -+ &psDevInfo->psSystemBuffer[i]. -+ hMemHandle, -+ &psDevInfo->psSystemBuffer[i]. -+ sCPUVAddr, -+ &sSystemBufferCPUPAddr) != -+ PVRSRV_OK) { -+ break; -+ } -+ -+ psDevInfo->ui32NumBuffers++; -+ -+ psDevInfo->psSystemBuffer[i].ui32Size = ui32Size; -+ psDevInfo->psSystemBuffer[i].sSysAddr = -+ CpuPAddrToSysPAddrBC(sSystemBufferCPUPAddr); -+ psDevInfo->psSystemBuffer[i].sPageAlignSysAddr.uiAddr = -+ (psDevInfo->psSystemBuffer[i].sSysAddr. -+ uiAddr & 0xFFFFF000); -+ psDevInfo->psSystemBuffer[i].psSyncData = IMG_NULL; -+ } -+ -+ psDevInfo->sBufferInfo.ui32BufferCount = -+ psDevInfo->ui32NumBuffers; -+ -+ psDevInfo->sBCJTable.ui32TableSize = -+ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE); -+ psDevInfo->sBCJTable.pfnOpenBCDevice = OpenBCDevice; -+ psDevInfo->sBCJTable.pfnCloseBCDevice = CloseBCDevice; -+ psDevInfo->sBCJTable.pfnGetBCBuffer = GetBCBuffer; -+ psDevInfo->sBCJTable.pfnGetBCInfo = GetBCInfo; -+ psDevInfo->sBCJTable.pfnGetBufferAddr = GetBCBufferAddr; -+ -+ if (psDevInfo->sPVRJTable. -+ pfnPVRSRVRegisterBCDevice(&psDevInfo->sBCJTable, -+ &psDevInfo->ui32DeviceID) != -+ PVRSRV_OK) { -+ return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; -+ } -+ } -+ -+ psDevInfo->ui32RefCount++; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR BC_Example_Deinit(IMG_VOID) -+{ -+ BC_EXAMPLE_DEVINFO *psDevInfo; -+ IMG_UINT32 i; -+ psDevInfo = GetAnchorPtr(); -+ -+ if (psDevInfo == IMG_NULL) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psDevInfo->ui32RefCount--; -+ -+ if (psDevInfo->ui32RefCount == 0) { -+ -+ PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable = -+ &psDevInfo->sPVRJTable; -+ -+ if (psJTable-> -+ pfnPVRSRVRemoveBCDevice(psDevInfo->ui32DeviceID) != -+ PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (BCClosePVRServices(psDevInfo->hPVRServices) != PVRSRV_OK) { -+ psDevInfo->hPVRServices = IMG_NULL; -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ for (i = 0; i < psDevInfo->ui32NumBuffers; i++) { -+ BCFreeContigMemory(psDevInfo->psSystemBuffer[i]. -+ ui32Size, -+ psDevInfo->psSystemBuffer[i]. -+ hMemHandle, -+ psDevInfo->psSystemBuffer[i]. -+ sCPUVAddr, -+ SysPAddrToCpuPAddrBC(psDevInfo-> -+ psSystemBuffer -+ [i].sSysAddr)); -+ } -+ -+ BCFreeKernelMem(psDevInfo); -+ -+ SetAnchorPtr(IMG_NULL); -+ } -+ -+ return PVRSRV_OK; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,117 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __BC_EXAMPLE_H__ -+#define __BC_EXAMPLE_H__ -+ -+#include "img_defs.h" -+#include "servicesext.h" -+#include "kernelbuffer.h" -+ -+ -+ extern IMG_IMPORT IMG_BOOL -+ PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE * psJTable); -+ -+#define BC_EXAMPLE_NUM_BUFFERS 3 -+ -+#define YUV420 1 -+#ifdef YUV420 -+ -+#define BC_EXAMPLE_WIDTH (320) -+#define BC_EXAMPLE_HEIGHT (160) -+#define BC_EXAMPLE_STRIDE (320) -+#define BC_EXAMPLE_PIXELFORMAT (PVRSRV_PIXEL_FORMAT_NV12) -+ -+#else -+ -+#define BC_EXAMPLE_WIDTH (320) -+#define BC_EXAMPLE_HEIGHT (160) -+#define BC_EXAMPLE_STRIDE (320*2) -+#define BC_EXAMPLE_PIXELFORMAT (PVRSRV_PIXEL_FORMAT_RGB565) -+ -+#endif -+ -+#define BC_EXAMPLE_DEVICEID 0 -+ -+ typedef struct BC_EXAMPLE_BUFFER_TAG { -+ IMG_UINT32 ui32Size; -+ IMG_HANDLE hMemHandle; -+ IMG_SYS_PHYADDR sSysAddr; -+ IMG_SYS_PHYADDR sPageAlignSysAddr; -+ IMG_CPU_VIRTADDR sCPUVAddr; -+ PVRSRV_SYNC_DATA *psSyncData; -+ struct BC_EXAMPLE_BUFFER_TAG *psNext; -+ } BC_EXAMPLE_BUFFER; -+ -+ typedef struct BC_EXAMPLE_DEVINFO_TAG { -+ IMG_UINT32 ui32DeviceID; -+ -+ BC_EXAMPLE_BUFFER *psSystemBuffer; -+ -+ BUFFER_INFO sBufferInfo; -+ -+ IMG_UINT32 ui32NumBuffers; -+ -+ PVRSRV_BC_BUFFER2SRV_KMJTABLE sPVRJTable; -+ -+ PVRSRV_BC_SRV2BUFFER_KMJTABLE sBCJTable; -+ -+ IMG_HANDLE hPVRServices; -+ -+ IMG_UINT32 ui32RefCount; -+ -+ } BC_EXAMPLE_DEVINFO; -+ -+ PVRSRV_ERROR BC_Example_Init(IMG_VOID); -+ PVRSRV_ERROR BC_Example_Deinit(IMG_VOID); -+ -+ PVRSRV_ERROR BCOpenPVRServices(IMG_HANDLE * phPVRServices); -+ PVRSRV_ERROR BCClosePVRServices(IMG_HANDLE hPVRServices); -+ -+ IMG_VOID *BCAllocKernelMem(IMG_UINT32 ui32Size); -+ IMG_VOID BCFreeKernelMem(IMG_VOID * pvMem); -+ -+ PVRSRV_ERROR BCAllocContigMemory(IMG_UINT32 ui32Size, -+ IMG_HANDLE * phMemHandle, -+ IMG_CPU_VIRTADDR * pLinAddr, -+ IMG_CPU_PHYADDR * pPhysAddr); -+ IMG_VOID BCFreeContigMemory(IMG_UINT32 ui32Size, -+ IMG_HANDLE hMemHandle, -+ IMG_CPU_VIRTADDR LinAddr, -+ IMG_CPU_PHYADDR PhysAddr); -+ -+ IMG_SYS_PHYADDR CpuPAddrToSysPAddrBC(IMG_CPU_PHYADDR cpu_paddr); -+ IMG_CPU_PHYADDR SysPAddrToCpuPAddrBC(IMG_SYS_PHYADDR sys_paddr); -+ -+ IMG_VOID *MapPhysAddr(IMG_SYS_PHYADDR sSysAddr, IMG_UINT32 ui32Size); -+ IMG_VOID UnMapPhysAddr(IMG_VOID * pvAddr, IMG_UINT32 ui32Size); -+ -+ PVRSRV_ERROR BCGetLibFuncAddr(IMG_HANDLE hExtDrv, -+ IMG_CHAR * szFunctionName, -+ PFN_BC_GET_PVRJTABLE * ppfnFuncTable); -+ BC_EXAMPLE_DEVINFO *GetAnchorPtr(IMG_VOID); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example_linux.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example_linux.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example_linux.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example_linux.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,215 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "bufferclass_example.h" -+#include "bufferclass_example_linux.h" -+#include "pvrmodule.h" -+ -+#define DEVNAME "bc_example" -+#define DRVNAME DEVNAME -+ -+MODULE_SUPPORTED_DEVICE(DEVNAME); -+ -+int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, -+ unsigned long arg); -+int FillBuffer(unsigned int ui32BufferIndex); -+int GetBufferCount(unsigned int *pui32BufferCount); -+ -+static int AssignedMajorNumber; -+ -+static struct file_operations bufferclass_example_fops = { -+ioctl: BC_Example_Bridge, -+}; -+ -+#define unref__ __attribute__ ((unused)) -+ -+ -+static int __init BC_Example_ModInit(void) -+{ -+ -+ -+ AssignedMajorNumber = -+ register_chrdev(0, DEVNAME, &bufferclass_example_fops); -+ -+ if (AssignedMajorNumber <= 0) { -+ printk(KERN_ERR DRVNAME -+ ": BC_Example_ModInit: unable to get major number\n"); -+ -+ goto ExitDisable; -+ } -+#if defined(DEBUG) -+ printk(KERN_ERR DRVNAME ": BC_Example_ModInit: major device %d\n", -+ AssignedMajorNumber); -+#endif -+ -+ -+ if (BC_Example_Init() != PVRSRV_OK) { -+ printk(KERN_ERR DRVNAME -+ ": BC_Example_ModInit: can't init device\n"); -+ goto ExitUnregister; -+ } -+ -+ return 0; -+ -+ExitUnregister: -+ unregister_chrdev(AssignedMajorNumber, DEVNAME); -+ExitDisable: -+ return -EBUSY; -+} -+ -+static void __exit BC_Example_ModCleanup(void) -+{ -+ unregister_chrdev(AssignedMajorNumber, DEVNAME); -+ -+ if (BC_Example_Deinit() != PVRSRV_OK) { -+ printk(KERN_ERR DRVNAME -+ ": BC_Example_ModCleanup: can't deinit device\n"); -+ } -+ -+} -+ -+IMG_VOID *BCAllocKernelMem(IMG_UINT32 ui32Size) -+{ -+ return kmalloc(ui32Size, GFP_KERNEL); -+} -+ -+IMG_VOID BCFreeKernelMem(IMG_VOID * pvMem) -+{ -+ kfree(pvMem); -+} -+ -+PVRSRV_ERROR BCAllocContigMemory(IMG_UINT32 ui32Size, -+ IMG_HANDLE unref__ * phMemHandle, -+ IMG_CPU_VIRTADDR * pLinAddr, -+ IMG_CPU_PHYADDR * pPhysAddr) -+{ -+ dma_addr_t dma; -+ IMG_VOID *pvLinAddr; -+ -+ pvLinAddr = dma_alloc_coherent(NULL, ui32Size, &dma, GFP_KERNEL); -+ -+ if (pvLinAddr == IMG_NULL) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ pPhysAddr->uiAddr = dma; -+ *pLinAddr = pvLinAddr; -+ -+ return PVRSRV_OK; -+} -+ -+void BCFreeContigMemory(IMG_UINT32 ui32Size, -+ IMG_HANDLE unref__ hMemHandle, -+ IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr) -+{ -+ dma_free_coherent(NULL, ui32Size, LinAddr, -+ (dma_addr_t) PhysAddr.uiAddr); -+} -+ -+IMG_SYS_PHYADDR CpuPAddrToSysPAddrBC(IMG_CPU_PHYADDR cpu_paddr) -+{ -+ IMG_SYS_PHYADDR sys_paddr; -+ -+ sys_paddr.uiAddr = cpu_paddr.uiAddr; -+ return sys_paddr; -+} -+ -+IMG_CPU_PHYADDR SysPAddrToCpuPAddrBC(IMG_SYS_PHYADDR sys_paddr) -+{ -+ -+ IMG_CPU_PHYADDR cpu_paddr; -+ -+ cpu_paddr.uiAddr = sys_paddr.uiAddr; -+ return cpu_paddr; -+} -+ -+PVRSRV_ERROR BCOpenPVRServices(IMG_HANDLE * phPVRServices) -+{ -+ -+ *phPVRServices = 0; -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR BCClosePVRServices(IMG_HANDLE unref__ hPVRServices) -+{ -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR BCGetLibFuncAddr(IMG_HANDLE unref__ hExtDrv, -+ IMG_CHAR * szFunctionName, -+ PFN_BC_GET_PVRJTABLE * ppfnFuncTable) -+{ -+ if (strcmp("PVRGetBufferClassJTable", szFunctionName) != 0) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ *ppfnFuncTable = PVRGetBufferClassJTable; -+ -+ return PVRSRV_OK; -+} -+ -+int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ int err = -EFAULT; -+ int command = _IOC_NR(cmd); -+ BC_Example_ioctl_package *psBridge = (BC_Example_ioctl_package *) arg; -+ -+ if (!access_ok -+ (VERIFY_WRITE, psBridge, sizeof(BC_Example_ioctl_package))) -+ return err; -+ -+ switch (command) { -+ case _IOC_NR(BC_Example_ioctl_fill_buffer): -+ { -+ if (FillBuffer(psBridge->inputparam) == -1) -+ return err; -+ break; -+ } -+ case _IOC_NR(BC_Example_ioctl_get_buffer_count): -+ { -+ if (GetBufferCount(&psBridge->outputparam) == -1) -+ return err; -+ -+ break; -+ } -+ default: -+ return err; -+ } -+ -+ return 0; -+} -+ -+module_init(BC_Example_ModInit); -+module_exit(BC_Example_ModCleanup); -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example_linux.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example_linux.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example_linux.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example_linux.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,45 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __BC_EXAMPLE_LINUX_H__ -+#define __BC_EXAMPLE_LINUX_H__ -+ -+#include -+ -+typedef struct BC_Example_ioctl_package_TAG { -+ int inputparam; -+ int outputparam; -+ -+} BC_Example_ioctl_package; -+ -+#define BC_EXAMPLE_IOC_GID 'g' -+ -+#define BC_EXAMPLE_IOWR(INDEX) _IOWR(BC_EXAMPLE_IOC_GID, INDEX, BC_Example_ioctl_package) -+ -+#define BC_Example_ioctl_fill_buffer BC_EXAMPLE_IOWR(0) -+#define BC_Example_ioctl_get_buffer_count BC_EXAMPLE_IOWR(1) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example_private.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example_private.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/bufferclass_example_private.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/bufferclass_example_private.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,226 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "bufferclass_example.h" -+ -+#define MIN(a,b) ((a)<(b)?(a):(b)) -+ -+void FillYUV420Image(void *pvDest, int width, int height, int bytestride) -+{ -+ static int iPhase = 0; -+ int i, j; -+ unsigned char u, v, y; -+ unsigned char *pui8y = (unsigned char *)pvDest; -+ unsigned short *pui16uv; -+ unsigned int count = 0; -+ -+ for (j = 0; j < height; j++) { -+ for (i = 0; i < width; i++) { -+ y = (((i + iPhase) >> 6) % (2) == 0) ? 0x7f : 0x00; -+ -+ pui8y[count++] = y; -+ } -+ } -+ -+ pui16uv = -+ (unsigned short *)((unsigned char *)pvDest + (width * height)); -+ count = 0; -+ -+ for (j = 0; j < height; j += 2) { -+ for (i = 0; i < width; i += 2) { -+ u = (j < -+ (height / 2)) ? ((i < -+ (width / -+ 2)) ? 0xFF : 0x33) : ((i < -+ (width / -+ 2)) ? -+ 0x33 : -+ 0xAA); -+ v = (j < -+ (height / 2)) ? ((i < -+ (width / -+ 2)) ? 0xAC : 0x0) : ((i < -+ (width / -+ 2)) ? -+ 0x03 : -+ 0xEE); -+ -+ pui16uv[count++] = (v << 8) | u; -+ -+ } -+ } -+ -+ iPhase++; -+} -+ -+void FillYUV422Image(void *pvDest, int width, int height, int bytestride) -+{ -+ static int iPhase = 0; -+ int x, y; -+ unsigned char u, v, y0, y1; -+ unsigned int *pui32yuv = (unsigned int *)pvDest; -+ unsigned int count = 0; -+ -+ for (y = 0; y < height; y++) { -+ for (x = 0; x < width; x += 2) { -+ u = (y < -+ (height / 2)) ? ((x < -+ (width / -+ 2)) ? 0xFF : 0x33) : ((x < -+ (width / -+ 2)) ? -+ 0x33 : -+ 0xAA); -+ v = (y < -+ (height / 2)) ? ((x < -+ (width / -+ 2)) ? 0xAA : 0x0) : ((x < -+ (width / -+ 2)) ? -+ 0x03 : -+ 0xEE); -+ -+ y0 = y1 = -+ (((x + iPhase) >> 6) % (2) == 0) ? 0x7f : 0x00; -+ -+ pui32yuv[count++] = -+ (y1 << 24) | (v << 16) | (y0 << 8) | u; -+ -+ } -+ } -+ -+ iPhase++; -+} -+ -+void FillRGB565Image(void *pvDest, int width, int height, int bytestride) -+{ -+ int i, Count; -+ unsigned long *pui32Addr = (unsigned long *)pvDest; -+ unsigned short *pui16Addr = (unsigned short *)pvDest; -+ unsigned long Colour32; -+ unsigned short Colour16; -+ static unsigned char Colour8 = 0; -+ -+ Colour16 = -+ (Colour8 >> 3) | ((Colour8 >> 2) << 5) | ((Colour8 >> 3) << 11); -+ Colour32 = Colour16 | Colour16 << 16; -+ -+ Count = (height * bytestride) >> 2; -+ -+ for (i = 0; i < Count; i++) { -+ pui32Addr[i] = Colour32; -+ } -+ -+ Count = height; -+ -+ pui16Addr = (unsigned short *)((unsigned char *)pvDest + (2 * Colour8)); -+ -+ for (i = 0; i < Count; i++) { -+ *pui16Addr = 0xF800; -+ -+ pui16Addr = -+ (unsigned short *)((unsigned char *)pui16Addr + bytestride); -+ } -+ Count = bytestride >> 2; -+ -+ pui32Addr = -+ (unsigned long *)((unsigned char *)pvDest + -+ (bytestride * (MIN(height - 1, 0xFF) - Colour8))); -+ -+ for (i = 0; i < Count; i++) { -+ pui32Addr[i] = 0x001F001F; -+ } -+ -+ Colour8 = (Colour8 + 1) % MIN(height - 1, 0xFF); -+} -+ -+int FillBuffer(unsigned int ui32BufferIndex) -+{ -+ BC_EXAMPLE_DEVINFO *psDevInfo = GetAnchorPtr(); -+ BC_EXAMPLE_BUFFER *psBuffer; -+ BUFFER_INFO *psBufferInfo; -+ PVRSRV_SYNC_DATA *psSyncData; -+ -+ if (psDevInfo == IMG_NULL) { -+ return -1; -+ } -+ -+ psBuffer = &psDevInfo->psSystemBuffer[ui32BufferIndex]; -+ psBufferInfo = &psDevInfo->sBufferInfo; -+ -+ psSyncData = psBuffer->psSyncData; -+ -+ if (psSyncData) { -+ -+ if (psSyncData->ui32ReadOpsPending != -+ psSyncData->ui32ReadOpsComplete) { -+ return -1; -+ } -+ -+ psSyncData->ui32WriteOpsPending++; -+ } -+ -+ switch (psBufferInfo->pixelformat) { -+ case PVRSRV_PIXEL_FORMAT_RGB565: -+ default: -+ { -+ FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, -+ BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE); -+ break; -+ } -+ case PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY: -+ { -+ FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, -+ BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE); -+ break; -+ } -+ case PVRSRV_PIXEL_FORMAT_NV12: -+ { -+ FillYUV420Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, -+ BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE); -+ break; -+ } -+ } -+ -+ if (psSyncData) { -+ psSyncData->ui32WriteOpsComplete++; -+ } -+ -+ return 0; -+} -+ -+int GetBufferCount(unsigned int *pui32BufferCount) -+{ -+ BC_EXAMPLE_DEVINFO *psDevInfo = GetAnchorPtr(); -+ -+ if (psDevInfo == IMG_NULL) { -+ return -1; -+ } -+ -+ *pui32BufferCount = psDevInfo->sBufferInfo.ui32BufferCount; -+ -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/buffer_manager.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/buffer_manager.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/buffer_manager.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/buffer_manager.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,1632 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+#include "services_headers.h" -+ -+#include "sysconfig.h" -+#include "hash.h" -+#include "ra.h" -+#include "pdump_km.h" -+ -+#include -+ -+#define MIN(a,b) (a > b ? b : a) -+ -+static IMG_BOOL -+ZeroBuf(BM_BUF * pBuf, BM_MAPPING * pMapping, IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags); -+static void BM_FreeMemory(void *pH, IMG_UINTPTR_T base, BM_MAPPING * psMapping); -+static IMG_BOOL -+BM_ImportMemory(void *pH, IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, BM_MAPPING ** ppsMapping, -+ IMG_UINT32 uFlags, IMG_UINTPTR_T * pBase); -+ -+static IMG_BOOL -+DevMemoryAlloc(BM_CONTEXT * pBMContext, -+ BM_MAPPING * pMapping, -+ IMG_SIZE_T * pActualSize, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 dev_vaddr_alignment, IMG_DEV_VIRTADDR * pDevVAddr); -+static void DevMemoryFree(BM_MAPPING * pMapping); -+ -+static IMG_BOOL -+AllocMemory(BM_CONTEXT * pBMContext, -+ BM_HEAP * psBMHeap, -+ IMG_DEV_VIRTADDR * psDevVAddr, -+ IMG_SIZE_T uSize, -+ IMG_UINT32 uFlags, IMG_UINT32 uDevVAddrAlignment, BM_BUF * pBuf) -+{ -+ BM_MAPPING *pMapping; -+ IMG_UINTPTR_T uOffset; -+ RA_ARENA *pArena = IMG_NULL; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "AllocMemory (pBMContext=%08X, uSize=0x%x, uFlags=0x%x, align=0x%x, pBuf=%08X)", -+ pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf)); -+ -+ if (uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) { -+ if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) { -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported")); -+ return IMG_FALSE; -+ } -+ -+ if (psBMHeap->ui32Attribs -+ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG -+ | PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) { -+ -+ pArena = psBMHeap->pImportArena; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocMemory: backing store type doesn't match heap")); -+ return IMG_FALSE; -+ } -+ -+ if (!RA_Alloc(pArena, -+ uSize, -+ IMG_NULL, -+ (void *)&pMapping, -+ uFlags, -+ uDevVAddrAlignment, -+ 0, (IMG_UINTPTR_T *) & (pBuf->DevVAddr.uiAddr))) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocMemory: RA_Alloc(0x%x) FAILED", uSize)); -+ return IMG_FALSE; -+ } -+ -+ uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr; -+ if (pMapping->CpuVAddr) { -+ pBuf->CpuVAddr = -+ (void *)((IMG_UINTPTR_T) pMapping->CpuVAddr + -+ uOffset); -+ } else { -+ pBuf->CpuVAddr = IMG_NULL; -+ } -+ -+ if (uSize == pMapping->uSize) { -+ pBuf->hOSMemHandle = pMapping->hOSMemHandle; -+ } else { -+ if (OSGetSubMemHandle(pMapping->hOSMemHandle, -+ uOffset, -+ uSize, -+ psBMHeap->ui32Attribs, -+ &pBuf->hOSMemHandle) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocMemory: OSGetSubMemHandle FAILED")); -+ return IMG_FALSE; -+ } -+ } -+ -+ pBuf->CpuPAddr = pMapping->CpuPAddr; -+ -+ if (uFlags & PVRSRV_MEM_ZERO) { -+ if (!ZeroBuf -+ (pBuf, pMapping, uSize, -+ psBMHeap->ui32Attribs | uFlags)) { -+ return IMG_FALSE; -+ } -+ } -+ } else { -+ if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) { -+ -+ PVR_ASSERT(psDevVAddr != IMG_NULL); -+ -+ pBMContext->psDeviceNode->pfnMMUAlloc(psBMHeap-> -+ pMMUHeap, uSize, -+ IMG_NULL, -+ PVRSRV_MEM_USER_SUPPLIED_DEVVADDR, -+ uDevVAddrAlignment, -+ psDevVAddr); -+ -+ pBuf->DevVAddr = *psDevVAddr; -+ } else { -+ -+ pBMContext->psDeviceNode->pfnMMUAlloc(psBMHeap-> -+ pMMUHeap, uSize, -+ IMG_NULL, 0, -+ uDevVAddrAlignment, -+ &pBuf->DevVAddr); -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(struct _BM_MAPPING_), -+ (IMG_PVOID *) & pMapping, IMG_NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocMemory: OSAllocMem(0x%x) FAILED")); -+ return IMG_FALSE; -+ } -+ -+ pBuf->CpuVAddr = IMG_NULL; -+ pBuf->hOSMemHandle = 0; -+ pBuf->CpuPAddr.uiAddr = 0; -+ -+ pMapping->CpuVAddr = IMG_NULL; -+ pMapping->CpuPAddr.uiAddr = 0; -+ pMapping->DevVAddr = pBuf->DevVAddr; -+ pMapping->psSysAddr = IMG_NULL; -+ pMapping->uSize = uSize; -+ pMapping->hOSMemHandle = 0; -+ } -+ -+ pMapping->pArena = pArena; -+ -+ pMapping->pBMHeap = psBMHeap; -+ pBuf->pMapping = pMapping; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "AllocMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x", -+ pMapping, -+ pMapping->DevVAddr.uiAddr, -+ pMapping->CpuVAddr, -+ pMapping->CpuPAddr.uiAddr, pMapping->uSize)); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "AllocMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x", -+ pBuf, -+ pBuf->DevVAddr.uiAddr, -+ pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr, uSize)); -+ -+ PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0); -+ -+ return IMG_TRUE; -+} -+ -+static IMG_BOOL -+WrapMemory(BM_HEAP * psBMHeap, -+ IMG_SIZE_T uSize, -+ IMG_UINT32 ui32BaseOffset, -+ IMG_BOOL bPhysContig, -+ IMG_SYS_PHYADDR * psAddr, -+ IMG_VOID * pvCPUVAddr, IMG_UINT32 uFlags, BM_BUF * pBuf) -+{ -+ IMG_DEV_VIRTADDR DevVAddr = { 0 }; -+ BM_MAPPING *pMapping; -+ IMG_BOOL bResult; -+ IMG_UINT32 const ui32PageSize = HOST_PAGESIZE(); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)", -+ psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr, -+ uFlags, pBuf)); -+ -+ PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0); -+ -+ PVR_ASSERT(((IMG_UINT32) pvCPUVAddr & (ui32PageSize - 1)) == 0); -+ -+ uSize += ui32BaseOffset; -+ uSize = HOST_PAGEALIGN(uSize); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(*pMapping), -+ (IMG_PVOID *) & pMapping, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED", -+ sizeof(*pMapping))); -+ return IMG_FALSE; -+ } -+ -+ OSMemSet(pMapping, 0, sizeof(*pMapping)); -+ -+ pMapping->uSize = uSize; -+ pMapping->pBMHeap = psBMHeap; -+ -+ if (pvCPUVAddr) { -+ pMapping->CpuVAddr = pvCPUVAddr; -+ -+ if (bPhysContig) { -+ pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr; -+ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]); -+ -+ if (OSRegisterMem(pMapping->CpuPAddr, -+ pMapping->CpuVAddr, -+ pMapping->uSize, -+ uFlags, -+ &pMapping->hOSMemHandle) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "WrapMemory: OSRegisterMem Phys=0x%08X, CpuVAddr = 0x%08X, Size=%d) failed", -+ pMapping->CpuPAddr, pMapping->CpuVAddr, -+ pMapping->uSize)); -+ goto fail_cleanup; -+ } -+ } else { -+ pMapping->eCpuMemoryOrigin = -+ hm_wrapped_scatter_virtaddr; -+ pMapping->psSysAddr = psAddr; -+ -+ if (OSRegisterDiscontigMem(pMapping->psSysAddr, -+ pMapping->CpuVAddr, -+ pMapping->uSize, -+ uFlags, -+ &pMapping->hOSMemHandle) != -+ PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "WrapMemory: OSRegisterDiscontigMem CpuVAddr = 0x%08X, Size=%d) failed", -+ pMapping->CpuVAddr, pMapping->uSize)); -+ goto fail_cleanup; -+ } -+ } -+ } else { -+ if (bPhysContig) { -+ pMapping->eCpuMemoryOrigin = hm_wrapped; -+ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]); -+ -+ if (OSReservePhys(pMapping->CpuPAddr, -+ pMapping->uSize, -+ uFlags, -+ &pMapping->CpuVAddr, -+ &pMapping->hOSMemHandle) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed", -+ pMapping->CpuPAddr, pMapping->uSize)); -+ goto fail_cleanup; -+ } -+ } else { -+ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter; -+ pMapping->psSysAddr = psAddr; -+ -+ if (OSReserveDiscontigPhys(pMapping->psSysAddr, -+ pMapping->uSize, -+ uFlags, -+ &pMapping->CpuVAddr, -+ &pMapping->hOSMemHandle) != -+ PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "WrapMemory: OSReserveDiscontigPhys Size=%d) failed", -+ pMapping->uSize)); -+ goto fail_cleanup; -+ } -+ } -+ } -+ -+ bResult = DevMemoryAlloc(psBMHeap->pBMContext, -+ pMapping, -+ IMG_NULL, -+ uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE, -+ ui32PageSize, &DevVAddr); -+ if (!bResult) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "WrapMemory: DevMemoryAlloc(0x%x) failed", -+ pMapping->uSize)); -+ goto fail_cleanup; -+ } -+ -+ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset; -+ if (!ui32BaseOffset) { -+ pBuf->hOSMemHandle = pMapping->hOSMemHandle; -+ } else { -+ if (OSGetSubMemHandle(pMapping->hOSMemHandle, -+ ui32BaseOffset, -+ (pMapping->uSize - ui32BaseOffset), -+ uFlags, -+ &pBuf->hOSMemHandle) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "WrapMemory: OSGetSubMemHandle failed")); -+ goto fail_cleanup; -+ } -+ } -+ if (pMapping->CpuVAddr) { -+ pBuf->CpuVAddr = -+ (void *)((IMG_UINTPTR_T) pMapping->CpuVAddr + -+ ui32BaseOffset); -+ } -+ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + ui32BaseOffset; -+ -+ if (uFlags & PVRSRV_MEM_ZERO) { -+ if (!ZeroBuf(pBuf, pMapping, uSize, uFlags)) { -+ return IMG_FALSE; -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr)); -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "WrapMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x", -+ pMapping, pMapping->DevVAddr.uiAddr, -+ pMapping->CpuVAddr, pMapping->CpuPAddr.uiAddr, -+ pMapping->uSize)); -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "WrapMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x", -+ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, -+ pBuf->CpuPAddr.uiAddr, uSize)); -+ -+ pBuf->pMapping = pMapping; -+ return IMG_TRUE; -+ -+fail_cleanup: -+ if (ui32BaseOffset && pBuf->hOSMemHandle) { -+ OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags); -+ } -+ -+ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) { -+ switch (pMapping->eCpuMemoryOrigin) { -+ case hm_wrapped: -+ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, -+ uFlags, pMapping->hOSMemHandle); -+ break; -+ case hm_wrapped_virtaddr: -+ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, -+ uFlags, pMapping->hOSMemHandle); -+ break; -+ case hm_wrapped_scatter: -+ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, -+ pMapping->uSize, uFlags, -+ pMapping->hOSMemHandle); -+ break; -+ case hm_wrapped_scatter_virtaddr: -+ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, -+ pMapping->uSize, uFlags, -+ pMapping->hOSMemHandle); -+ break; -+ default: -+ break; -+ } -+ -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, -+ IMG_NULL); -+ -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL -+ZeroBuf(BM_BUF * pBuf, BM_MAPPING * pMapping, IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags) -+{ -+ IMG_VOID *pvCpuVAddr; -+ -+ if (pBuf->CpuVAddr) { -+ OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes); -+ } else if (pMapping->eCpuMemoryOrigin == hm_contiguous -+ || pMapping->eCpuMemoryOrigin == hm_wrapped) { -+ pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr, -+ ui32Bytes, -+ PVRSRV_HAP_KERNEL_ONLY -+ | (ui32Flags & -+ PVRSRV_HAP_CACHETYPE_MASK), -+ IMG_NULL); -+ if (!pvCpuVAddr) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "ZeroBuf: OSMapPhysToLin for contiguous buffer failed")); -+ return IMG_FALSE; -+ } -+ OSMemSet(pvCpuVAddr, 0, ui32Bytes); -+ OSUnMapPhysToLin(pvCpuVAddr, -+ ui32Bytes, -+ PVRSRV_HAP_KERNEL_ONLY -+ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), -+ IMG_NULL); -+ } else { -+ IMG_UINT32 ui32BytesRemaining = ui32Bytes; -+ IMG_UINT32 ui32CurrentOffset = 0; -+ IMG_CPU_PHYADDR CpuPAddr; -+ -+ PVR_ASSERT(pBuf->hOSMemHandle); -+ -+ while (ui32BytesRemaining > 0) { -+ IMG_UINT32 ui32BlockBytes = -+ MIN(ui32BytesRemaining, HOST_PAGESIZE()); -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, -+ ui32CurrentOffset); -+ -+ if (CpuPAddr.uiAddr & (HOST_PAGESIZE() - 1)) { -+ ui32BlockBytes = -+ MIN(ui32BytesRemaining, -+ HOST_PAGEALIGN(CpuPAddr.uiAddr) - -+ CpuPAddr.uiAddr); -+ } -+ -+ pvCpuVAddr = OSMapPhysToLin(CpuPAddr, -+ ui32BlockBytes, -+ PVRSRV_HAP_KERNEL_ONLY -+ | (ui32Flags & -+ PVRSRV_HAP_CACHETYPE_MASK), -+ IMG_NULL); -+ if (!pvCpuVAddr) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED")); -+ return IMG_FALSE; -+ } -+ OSMemSet(pvCpuVAddr, 0, ui32BlockBytes); -+ OSUnMapPhysToLin(pvCpuVAddr, -+ ui32BlockBytes, -+ PVRSRV_HAP_KERNEL_ONLY -+ | (ui32Flags & -+ PVRSRV_HAP_CACHETYPE_MASK), -+ IMG_NULL); -+ -+ ui32BytesRemaining -= ui32BlockBytes; -+ ui32CurrentOffset += ui32BlockBytes; -+ } -+ } -+ -+ return IMG_TRUE; -+} -+ -+static void FreeBuf(BM_BUF * pBuf, IMG_UINT32 ui32Flags) -+{ -+ BM_MAPPING *pMapping; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X", -+ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, -+ pBuf->CpuPAddr.uiAddr)); -+ -+ pMapping = pBuf->pMapping; -+ -+ if (ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) { -+ -+ if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) { -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported")); -+ } else { -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), -+ pMapping, IMG_NULL); -+ } -+ } else { -+ -+ if (pBuf->hOSMemHandle != pMapping->hOSMemHandle) { -+ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags); -+ } -+ if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) { -+ -+ RA_Free(pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, -+ IMG_FALSE); -+ } else { -+ switch (pMapping->eCpuMemoryOrigin) { -+ case hm_wrapped: -+ OSUnReservePhys(pMapping->CpuVAddr, -+ pMapping->uSize, ui32Flags, -+ pMapping->hOSMemHandle); -+ break; -+ case hm_wrapped_virtaddr: -+ OSUnRegisterMem(pMapping->CpuVAddr, -+ pMapping->uSize, ui32Flags, -+ pMapping->hOSMemHandle); -+ break; -+ case hm_wrapped_scatter: -+ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, -+ pMapping->uSize, -+ ui32Flags, -+ pMapping-> -+ hOSMemHandle); -+ break; -+ case hm_wrapped_scatter_virtaddr: -+ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, -+ pMapping->uSize, -+ ui32Flags, -+ pMapping-> -+ hOSMemHandle); -+ break; -+ default: -+ break; -+ } -+ -+ DevMemoryFree(pMapping); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), -+ pMapping, IMG_NULL); -+ } -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL); -+} -+ -+PVRSRV_ERROR BM_DestroyContext(IMG_HANDLE hBMContext, IMG_BOOL * pbDestroyed) -+{ -+ PVRSRV_ERROR eError; -+ BM_CONTEXT *pBMContext = (BM_CONTEXT *) hBMContext; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyContext")); -+ -+ if (pbDestroyed != IMG_NULL) { -+ *pbDestroyed = IMG_FALSE; -+ } -+ -+ if (pBMContext == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ pBMContext->ui32RefCount--; -+ -+ if (pBMContext->ui32RefCount > 0) { -+ -+ return PVRSRV_OK; -+ } -+ -+ eError = ResManFreeResByPtr(pBMContext->hResItem); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_DestroyContext: ResManFreeResByPtr failed %d", -+ eError)); -+ return eError; -+ } -+ -+ if (pbDestroyed != IMG_NULL) { -+ *pbDestroyed = IMG_TRUE; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ BM_CONTEXT *pBMContext = pvParam; -+ BM_CONTEXT **ppBMContext; -+ BM_HEAP *psBMHeap, *psTmpBMHeap; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ psDeviceNode = pBMContext->psDeviceNode; -+ -+ psBMHeap = pBMContext->psBMHeap; -+ while (psBMHeap) { -+ -+ if (psBMHeap->ui32Attribs -+ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG -+ | PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) { -+ if (psBMHeap->pImportArena) { -+ RA_Delete(psBMHeap->pImportArena); -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_DestroyContext: backing store type unsupported")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap); -+ -+ psTmpBMHeap = psBMHeap; -+ -+ psBMHeap = psBMHeap->psNext; -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), -+ psTmpBMHeap, IMG_NULL); -+ } -+ -+ if (pBMContext->psMMUContext) { -+ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext); -+ } -+ -+ if (pBMContext->pBufferHash) { -+ HASH_Delete(pBMContext->pBufferHash); -+ } -+ -+ if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext) { -+ -+ psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL; -+ } else { -+ -+ for (ppBMContext = &psDeviceNode->sDevMemoryInfo.pBMContext; -+ *ppBMContext; ppBMContext = &((*ppBMContext)->psNext)) { -+ if (*ppBMContext == pBMContext) { -+ -+ *ppBMContext = pBMContext->psNext; -+ -+ break; -+ } -+ } -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, -+ IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_HANDLE -+BM_CreateContext(PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_DEV_PHYADDR * psPDDevPAddr, -+ PVRSRV_PER_PROCESS_DATA * psPerProc, IMG_BOOL * pbCreated) -+{ -+ BM_CONTEXT *pBMContext; -+ BM_HEAP *psBMHeap; -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ IMG_BOOL bKernelContext; -+ PRESMAN_CONTEXT hResManContext; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext")); -+ -+ if (psPerProc == IMG_NULL) { -+ bKernelContext = IMG_TRUE; -+ hResManContext = psDeviceNode->hResManContext; -+ } else { -+ bKernelContext = IMG_FALSE; -+ hResManContext = psPerProc->hResManContext; -+ } -+ -+ if (pbCreated != IMG_NULL) { -+ *pbCreated = IMG_FALSE; -+ } -+ -+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; -+ -+ if (bKernelContext == IMG_FALSE) { -+ for (pBMContext = psDevMemoryInfo->pBMContext; -+ pBMContext != IMG_NULL; pBMContext = pBMContext->psNext) { -+ if (ResManFindResourceByPtr -+ (hResManContext, -+ pBMContext->hResItem) == PVRSRV_OK) { -+ -+ pBMContext->ui32RefCount++; -+ -+ return (IMG_HANDLE) pBMContext; -+ } -+ } -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(struct _BM_CONTEXT_), -+ (IMG_PVOID *) & pBMContext, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed")); -+ return IMG_NULL; -+ } -+ OSMemSet(pBMContext, 0, sizeof(BM_CONTEXT)); -+ -+ pBMContext->psDeviceNode = psDeviceNode; -+ -+ pBMContext->pBufferHash = HASH_Create(32); -+ if (pBMContext->pBufferHash == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_CreateContext: HASH_Create failed")); -+ goto cleanup; -+ } -+ -+ if (psDeviceNode->pfnMMUInitialise(psDeviceNode, -+ &pBMContext->psMMUContext, -+ psPDDevPAddr) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_CreateContext: MMUInitialise failed")); -+ goto cleanup; -+ } -+ -+ if (bKernelContext) { -+ -+ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL); -+ psDevMemoryInfo->pBMKernelContext = pBMContext; -+ } else { -+ -+ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext); -+ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap); -+ -+ pBMContext->psBMSharedHeap = -+ psDevMemoryInfo->pBMKernelContext->psBMHeap; -+ -+ psBMHeap = pBMContext->psBMSharedHeap; -+ while (psBMHeap) { -+ switch (psBMHeap->sDevArena.DevMemHeapType) { -+ case DEVICE_MEMORY_HEAP_SHARED: -+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: -+ { -+ -+ psDeviceNode-> -+ pfnMMUInsertHeap(pBMContext-> -+ psMMUContext, -+ psBMHeap-> -+ pMMUHeap); -+ break; -+ } -+ } -+ -+ psBMHeap = psBMHeap->psNext; -+ } -+ -+ pBMContext->psNext = psDevMemoryInfo->pBMContext; -+ psDevMemoryInfo->pBMContext = pBMContext; -+ } -+ -+ pBMContext->ui32RefCount++; -+ -+ pBMContext->hResItem = ResManRegisterRes(hResManContext, -+ RESMAN_TYPE_DEVICEMEM_CONTEXT, -+ pBMContext, -+ 0, BM_DestroyContextCallBack); -+ if (pBMContext->hResItem == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_CreateContext: ResManRegisterRes failed")); -+ goto cleanup; -+ } -+ -+ if (pbCreated != IMG_NULL) { -+ *pbCreated = IMG_TRUE; -+ } -+ return (IMG_HANDLE) pBMContext; -+ -+cleanup: -+ BM_DestroyContextCallBack(pBMContext, 0); -+ -+ return IMG_NULL; -+} -+ -+IMG_HANDLE -+BM_CreateHeap(IMG_HANDLE hBMContext, DEVICE_MEMORY_HEAP_INFO * psDevMemHeapInfo) -+{ -+ BM_CONTEXT *pBMContext = (BM_CONTEXT *) hBMContext; -+ PVRSRV_DEVICE_NODE *psDeviceNode = pBMContext->psDeviceNode; -+ BM_HEAP *psBMHeap; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap")); -+ -+ if (!pBMContext) { -+ return IMG_NULL; -+ } -+ -+ if (pBMContext->ui32RefCount > 0) { -+ psBMHeap = pBMContext->psBMHeap; -+ -+ while (psBMHeap) { -+ if (psBMHeap->sDevArena.ui32HeapID == -+ psDevMemHeapInfo->ui32HeapID) -+ { -+ -+ return psBMHeap; -+ } -+ psBMHeap = psBMHeap->psNext; -+ } -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BM_HEAP), -+ (IMG_PVOID *) & psBMHeap, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed")); -+ return IMG_NULL; -+ } -+ -+ OSMemSet(psBMHeap, 0, sizeof(BM_HEAP)); -+ -+ psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID; -+ psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName; -+ psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase; -+ psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize; -+ psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType; -+ psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo; -+ psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs; -+ -+ psBMHeap->pBMContext = pBMContext; -+ -+ psBMHeap->pMMUHeap = -+ psDeviceNode->pfnMMUCreate(pBMContext->psMMUContext, -+ &psBMHeap->sDevArena, -+ &psBMHeap->pVMArena); -+ if (!psBMHeap->pMMUHeap) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed")); -+ goto ErrorExit; -+ } -+ -+ psBMHeap->pImportArena = RA_Create(psDevMemHeapInfo->pszBSName, -+ 0, 0, IMG_NULL, -+ HOST_PAGESIZE(), -+ BM_ImportMemory, -+ BM_FreeMemory, IMG_NULL, psBMHeap); -+ if (psBMHeap->pImportArena == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed")); -+ goto ErrorExit; -+ } -+ -+ if (psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { -+ -+ psBMHeap->pLocalDevMemArena = -+ psDevMemHeapInfo->psLocalDevMemArena; -+ if (psBMHeap->pLocalDevMemArena == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_CreateHeap: LocalDevMemArena null")); -+ goto ErrorExit; -+ } -+ } -+ -+ psBMHeap->psNext = pBMContext->psBMHeap; -+ pBMContext->psBMHeap = psBMHeap; -+ -+ return (IMG_HANDLE) psBMHeap; -+ -+ErrorExit: -+ -+ if (psBMHeap->pMMUHeap != IMG_NULL) { -+ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap); -+ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext); -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL); -+ -+ return IMG_NULL; -+} -+ -+IMG_VOID BM_DestroyHeap(IMG_HANDLE hDevMemHeap) -+{ -+ BM_HEAP *psBMHeap = (BM_HEAP *) hDevMemHeap; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap")); -+ -+ if (psBMHeap) { -+ BM_HEAP **ppsBMHeap; -+ -+ if (psBMHeap->ui32Attribs -+ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG -+ | PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) { -+ if (psBMHeap->pImportArena) { -+ RA_Delete(psBMHeap->pImportArena); -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_DestroyHeap: backing store type unsupported")); -+ return; -+ } -+ -+ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap); -+ -+ ppsBMHeap = &psBMHeap->pBMContext->psBMHeap; -+ while (*ppsBMHeap) { -+ if (*ppsBMHeap == psBMHeap) { -+ -+ *ppsBMHeap = psBMHeap->psNext; -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BM_HEAP), psBMHeap, IMG_NULL); -+ break; -+ } -+ ppsBMHeap = &((*ppsBMHeap)->psNext); -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle")); -+ } -+} -+ -+IMG_BOOL BM_Reinitialise(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise")); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ -+ return IMG_TRUE; -+} -+ -+IMG_BOOL -+BM_Alloc(IMG_HANDLE hDevMemHeap, -+ IMG_DEV_VIRTADDR * psDevVAddr, -+ IMG_SIZE_T uSize, -+ IMG_UINT32 * pui32Flags, -+ IMG_UINT32 uDevVAddrAlignment, BM_HANDLE * phBuf) -+{ -+ BM_BUF *pBuf; -+ BM_CONTEXT *pBMContext; -+ BM_HEAP *psBMHeap; -+ SYS_DATA *psSysData; -+ IMG_UINT32 uFlags = 0; -+ -+ if (pui32Flags) { -+ uFlags = *pui32Flags; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)", -+ uSize, uFlags, uDevVAddrAlignment)); -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ return IMG_FALSE; -+ } -+ -+ psBMHeap = (BM_HEAP *) hDevMemHeap; -+ pBMContext = psBMHeap->pBMContext; -+ -+ if (uDevVAddrAlignment == 0) { -+ uDevVAddrAlignment = 1; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BM_BUF), -+ (IMG_PVOID *) & pBuf, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED")); -+ return IMG_FALSE; -+ } -+ OSMemSet(pBuf, 0, sizeof(BM_BUF)); -+ -+ if (AllocMemory(pBMContext, -+ psBMHeap, -+ psDevVAddr, -+ uSize, uFlags, uDevVAddrAlignment, pBuf) != IMG_TRUE) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, -+ IMG_NULL); -+ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED")); -+ return IMG_FALSE; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X", -+ uSize, uFlags, pBuf)); -+ -+ pBuf->ui32RefCount = 1; -+ *phBuf = (BM_HANDLE) pBuf; -+ *pui32Flags = uFlags | psBMHeap->ui32Attribs; -+ -+ return IMG_TRUE; -+} -+ -+IMG_BOOL -+BM_IsWrapped(IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Offset, IMG_SYS_PHYADDR sSysAddr) -+{ -+ BM_BUF *pBuf; -+ BM_CONTEXT *psBMContext; -+ BM_HEAP *psBMHeap; -+ -+ psBMHeap = (BM_HEAP *) hDevMemHeap; -+ psBMContext = psBMHeap->pBMContext; -+ sSysAddr.uiAddr += ui32Offset; -+ pBuf = (BM_BUF *) HASH_Retrieve(psBMContext->pBufferHash, -+ (IMG_UINTPTR_T) sSysAddr.uiAddr); -+ return pBuf != IMG_NULL; -+} -+ -+IMG_BOOL -+BM_IsWrappedCheckSize(IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Offset, -+ IMG_SYS_PHYADDR sSysAddr, -+ IMG_UINT32 ui32ByteSize) -+{ -+ BM_BUF *pBuf; -+ BM_CONTEXT *psBMContext; -+ BM_HEAP *psBMHeap; -+ -+ IMG_BOOL ret = IMG_FALSE; -+ -+ psBMHeap = (BM_HEAP *) hDevMemHeap; -+ psBMContext = psBMHeap->pBMContext; -+ sSysAddr.uiAddr += ui32Offset; -+ pBuf = (BM_BUF *) HASH_Retrieve(psBMContext->pBufferHash, -+ (IMG_UINTPTR_T) sSysAddr.uiAddr); -+ -+ if (pBuf != NULL) { -+ if (pBuf->pMapping->uSize >= ui32ByteSize) -+ ret = IMG_TRUE; -+ else -+ ret = IMG_FALSE; -+ } -+ -+ return ret; -+} -+ -+IMG_BOOL -+BM_Wrap(IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Offset, -+ IMG_BOOL bPhysContig, -+ IMG_SYS_PHYADDR * psSysAddr, -+ IMG_BOOL bFreePageList, -+ IMG_VOID * pvCPUVAddr, IMG_UINT32 * pui32Flags, BM_HANDLE * phBuf) -+{ -+ BM_BUF *pBuf; -+ BM_CONTEXT *psBMContext; -+ BM_HEAP *psBMHeap; -+ SYS_DATA *psSysData; -+ IMG_SYS_PHYADDR sHashAddress; -+ IMG_UINT32 uFlags; -+ -+ psBMHeap = (BM_HEAP *) hDevMemHeap; -+ psBMContext = psBMHeap->pBMContext; -+ -+ uFlags = -+ psBMHeap-> -+ ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK); -+ -+ if (pui32Flags) -+ uFlags |= *pui32Flags; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)", -+ ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags)); -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) -+ return IMG_FALSE; -+ -+ sHashAddress = psSysAddr[0]; -+ -+ sHashAddress.uiAddr += ui32Offset; -+ -+ pBuf = -+ (BM_BUF *) HASH_Retrieve(psBMContext->pBufferHash, -+ (IMG_UINTPTR_T) sHashAddress.uiAddr); -+ -+ if (pBuf) { -+ IMG_UINT32 ui32MappingSize = -+ HOST_PAGEALIGN(ui32Size + ui32Offset); -+ -+ if (pBuf->pMapping->uSize == ui32MappingSize -+ && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped -+ || pBuf->pMapping->eCpuMemoryOrigin == -+ hm_wrapped_virtaddr -+ || pBuf->pMapping->eCpuMemoryOrigin == -+ hm_wrapped_scatter)) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)", -+ ui32Size, ui32Offset, sHashAddress.uiAddr)); -+ -+ pBuf->ui32RefCount++; -+ *phBuf = (BM_HANDLE) pBuf; -+ if (pui32Flags) -+ *pui32Flags = uFlags; -+ -+ /* reusing previous mapping, free the page list */ -+ if (bFreePageList && psSysAddr) -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32MappingSize / HOST_PAGESIZE() * -+ sizeof(IMG_SYS_PHYADDR), -+ (IMG_VOID *) psSysAddr, 0); -+ return IMG_TRUE; -+ } -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BM_BUF), -+ (IMG_PVOID *) & pBuf, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED")); -+ return IMG_FALSE; -+ } -+ OSMemSet(pBuf, 0, sizeof(BM_BUF)); -+ -+ if (WrapMemory -+ (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, -+ uFlags, pBuf) != IMG_TRUE) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, -+ IMG_NULL); -+ return IMG_FALSE; -+ } -+ -+ if (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped -+ || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr -+ || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_scatter) { -+ pBuf->uHashKey = (IMG_UINTPTR_T) sHashAddress.uiAddr; -+ if (!HASH_Insert -+ (psBMContext->pBufferHash, pBuf->uHashKey, -+ (IMG_UINTPTR_T) pBuf)) { -+ FreeBuf(pBuf, uFlags); -+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED")); -+ return IMG_FALSE; -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)", -+ ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr)); -+ -+ pBuf->ui32RefCount = 1; -+ *phBuf = (BM_HANDLE) pBuf; -+ if (pui32Flags) -+ *pui32Flags = uFlags; -+ -+ /* take ownership of the list if requested so */ -+ if (bFreePageList && psSysAddr) -+ pBuf->pvPageList = (void *)psSysAddr; -+ return IMG_TRUE; -+} -+ -+void BM_Free(BM_HANDLE hBuf, IMG_UINT32 ui32Flags) -+{ -+ BM_BUF *pBuf = (BM_BUF *) hBuf; -+ SYS_DATA *psSysData; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf)); -+ /* Calling BM_Free with NULL hBuf is either a bug or out-of-memory condition. -+ * Bail out if in debug mode, continue in release builds */ -+ PVR_ASSERT(pBuf != IMG_NULL); -+#if !defined(DEBUG) -+ if (!pBuf) { -+ return; -+ } -+#endif -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) -+ return; -+ -+ pBuf->ui32RefCount--; -+ -+ if (pBuf->ui32RefCount == 0) { -+ void *pPageList = pBuf->pvPageList; -+ IMG_UINT32 ui32ListSize = -+ pBuf->pMapping->uSize / HOST_PAGESIZE() * -+ sizeof(IMG_SYS_PHYADDR); -+ if (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped -+ || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr -+ || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_scatter) { -+ HASH_Remove(pBuf->pMapping->pBMHeap->pBMContext-> -+ pBufferHash, pBuf->uHashKey); -+ } -+ FreeBuf(pBuf, ui32Flags); -+ if (pPageList) -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32ListSize, -+ pPageList, 0); -+ } -+} -+ -+IMG_CPU_VIRTADDR BM_HandleToCpuVaddr(BM_HANDLE hBuf) -+{ -+ BM_BUF *pBuf = (BM_BUF *) hBuf; -+ -+ PVR_ASSERT(pBuf != IMG_NULL); -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_HandleToCpuVaddr(h=%08X)=%08X", hBuf, pBuf->CpuVAddr)); -+ return pBuf->CpuVAddr; -+} -+ -+IMG_DEV_VIRTADDR BM_HandleToDevVaddr(BM_HANDLE hBuf) -+{ -+ BM_BUF *pBuf = (BM_BUF *) hBuf; -+ -+ PVR_ASSERT(pBuf != IMG_NULL); -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf, -+ pBuf->DevVAddr)); -+ return pBuf->DevVAddr; -+} -+ -+IMG_SYS_PHYADDR BM_HandleToSysPaddr(BM_HANDLE hBuf) -+{ -+ BM_BUF *pBuf = (BM_BUF *) hBuf; -+ -+ PVR_ASSERT(pBuf != IMG_NULL); -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf, -+ pBuf->CpuPAddr.uiAddr)); -+ return SysCpuPAddrToSysPAddr(pBuf->CpuPAddr); -+} -+ -+IMG_HANDLE BM_HandleToOSMemHandle(BM_HANDLE hBuf) -+{ -+ BM_BUF *pBuf = (BM_BUF *) hBuf; -+ -+ PVR_ASSERT(pBuf != IMG_NULL); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_HandleToOSMemHandle(h=%08X)=%08X", -+ hBuf, pBuf->hOSMemHandle)); -+ return pBuf->hOSMemHandle; -+} -+ -+IMG_BOOL -+BM_ContiguousStatistics(IMG_UINT32 uFlags, -+ IMG_UINT32 * pTotalBytes, IMG_UINT32 * pAvailableBytes) -+{ -+ if (pAvailableBytes || pTotalBytes || uFlags) ; -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL -+DevMemoryAlloc(BM_CONTEXT * pBMContext, -+ BM_MAPPING * pMapping, -+ IMG_SIZE_T * pActualSize, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 dev_vaddr_alignment, IMG_DEV_VIRTADDR * pDevVAddr) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+#ifdef PDUMP -+ IMG_UINT32 ui32PDumpSize = pMapping->uSize; -+#endif -+ -+ psDeviceNode = pBMContext->psDeviceNode; -+ -+ if (uFlags & PVRSRV_MEM_INTERLEAVED) { -+ -+ pMapping->uSize *= 2; -+ } -+#ifdef PDUMP -+ if (uFlags & PVRSRV_MEM_DUMMY) { -+ -+ ui32PDumpSize = HOST_PAGESIZE(); -+ } -+#endif -+ -+ if (!psDeviceNode->pfnMMUAlloc(pMapping->pBMHeap->pMMUHeap, -+ pMapping->uSize, -+ pActualSize, -+ 0, -+ dev_vaddr_alignment, -+ &(pMapping->DevVAddr))) { -+ PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc")); -+ return IMG_FALSE; -+ } -+ -+ PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType, -+ pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr, -+ pMapping->hOSMemHandle, ui32PDumpSize, -+ (IMG_HANDLE) pMapping); -+ -+ switch (pMapping->eCpuMemoryOrigin) { -+ case hm_wrapped: -+ case hm_wrapped_virtaddr: -+ case hm_contiguous: -+ { -+ psDeviceNode->pfnMMUMapPages(pMapping->pBMHeap-> -+ pMMUHeap, -+ pMapping->DevVAddr, -+ SysCpuPAddrToSysPAddr -+ (pMapping->CpuPAddr), -+ pMapping->uSize, uFlags, -+ (IMG_HANDLE) pMapping); -+ -+ *pDevVAddr = pMapping->DevVAddr; -+ break; -+ } -+ case hm_env: -+ { -+ psDeviceNode->pfnMMUMapShadow(pMapping->pBMHeap-> -+ pMMUHeap, -+ pMapping->DevVAddr, -+ pMapping->uSize, -+ pMapping->CpuVAddr, -+ pMapping->hOSMemHandle, -+ pDevVAddr, uFlags, -+ (IMG_HANDLE) pMapping); -+ break; -+ } -+ case hm_wrapped_scatter: -+ case hm_wrapped_scatter_virtaddr: -+ { -+ psDeviceNode->pfnMMUMapScatter(pMapping->pBMHeap-> -+ pMMUHeap, -+ pMapping->DevVAddr, -+ pMapping->psSysAddr, -+ pMapping->uSize, uFlags, -+ (IMG_HANDLE) pMapping); -+ -+ *pDevVAddr = pMapping->DevVAddr; -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "Illegal value %d for pMapping->eCpuMemoryOrigin", -+ pMapping->eCpuMemoryOrigin)); -+ return IMG_FALSE; -+ } -+ -+ -+ return IMG_TRUE; -+} -+ -+static void DevMemoryFree(BM_MAPPING * pMapping) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+#ifdef PDUMP -+ IMG_UINT32 ui32PSize; -+#endif -+ -+#ifdef PDUMP -+ -+ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) { -+ -+ ui32PSize = HOST_PAGESIZE(); -+ } else { -+ ui32PSize = pMapping->uSize; -+ } -+ -+ PDUMPFREEPAGES(pMapping->pBMHeap, pMapping->DevVAddr, -+ ui32PSize, (IMG_HANDLE) pMapping, -+ (IMG_BOOL) (pMapping-> -+ ui32Flags & PVRSRV_MEM_INTERLEAVED)); -+#endif -+ -+ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode; -+ -+ psDeviceNode->pfnMMUFree(pMapping->pBMHeap->pMMUHeap, -+ pMapping->DevVAddr, pMapping->uSize); -+} -+ -+static IMG_BOOL -+BM_ImportMemory(void *pH, -+ IMG_SIZE_T uRequestSize, -+ IMG_SIZE_T * pActualSize, -+ BM_MAPPING ** ppsMapping, -+ IMG_UINT32 uFlags, IMG_UINTPTR_T * pBase) -+{ -+ BM_MAPPING *pMapping; -+ BM_HEAP *pBMHeap = pH; -+ BM_CONTEXT *pBMContext = pBMHeap->pBMContext; -+ IMG_BOOL bResult; -+ IMG_SIZE_T uSize; -+ IMG_SIZE_T uPSize; -+ IMG_UINT32 uDevVAddrAlignment = 0; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)", -+ pBMContext, uRequestSize, uFlags, uDevVAddrAlignment)); -+ -+ PVR_ASSERT(ppsMapping != IMG_NULL); -+ PVR_ASSERT(pBMContext != IMG_NULL); -+ -+ uSize = HOST_PAGEALIGN(uRequestSize); -+ PVR_ASSERT(uSize >= uRequestSize); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BM_MAPPING), -+ (IMG_PVOID *) & pMapping, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_ImportMemory: failed BM_MAPPING alloc")); -+ goto fail_exit; -+ } -+ -+ pMapping->hOSMemHandle = 0; -+ pMapping->CpuVAddr = 0; -+ pMapping->DevVAddr.uiAddr = 0; -+ pMapping->CpuPAddr.uiAddr = 0; -+ pMapping->uSize = uSize; -+ pMapping->pBMHeap = pBMHeap; -+ pMapping->ui32Flags = uFlags; -+ -+ if (pActualSize) { -+ *pActualSize = uSize; -+ } -+ -+ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) { -+ uPSize = HOST_PAGESIZE(); -+ } else { -+ uPSize = pMapping->uSize; -+ } -+ -+ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { -+ -+ if (OSAllocPages(pBMHeap->ui32Attribs, -+ uPSize, -+ (IMG_VOID **) & pMapping->CpuVAddr, -+ &pMapping->hOSMemHandle) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_ImportMemory: OSAllocPages(0x%x) failed", -+ uPSize)); -+ goto fail_mapping_alloc; -+ } -+ -+ pMapping->eCpuMemoryOrigin = hm_env; -+ } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { -+ IMG_SYS_PHYADDR sSysPAddr; -+ -+ PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL); -+ -+ if (!RA_Alloc(pBMHeap->pLocalDevMemArena, -+ uPSize, -+ IMG_NULL, -+ IMG_NULL, -+ 0, -+ HOST_PAGESIZE(), -+ 0, (IMG_UINTPTR_T *) & sSysPAddr.uiAddr)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_ImportMemory: RA_Alloc(0x%x) FAILED", -+ uPSize)); -+ goto fail_mapping_alloc; -+ } -+ -+ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); -+ if (OSReservePhys(pMapping->CpuPAddr, -+ uPSize, -+ pBMHeap->ui32Attribs, -+ &pMapping->CpuVAddr, -+ &pMapping->hOSMemHandle) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_ImportMemory: OSReservePhys failed")); -+ goto fail_dev_mem_alloc; -+ } -+ -+ pMapping->eCpuMemoryOrigin = hm_contiguous; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_ImportMemory: Invalid backing store type")); -+ goto fail_mapping_alloc; -+ } -+ -+ bResult = DevMemoryAlloc(pBMContext, pMapping, IMG_NULL, uFlags, -+ uDevVAddrAlignment, &pMapping->DevVAddr); -+ if (!bResult) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed", -+ pMapping->uSize)); -+ goto fail_dev_mem_alloc; -+ } -+ -+ PVR_ASSERT(uDevVAddrAlignment > -+ 1 ? (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) == -+ 0 : 1); -+ -+ *pBase = pMapping->DevVAddr.uiAddr; -+ *ppsMapping = pMapping; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE")); -+ return IMG_TRUE; -+ -+fail_dev_mem_alloc: -+ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) { -+ -+ if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) { -+ pMapping->uSize /= 2; -+ } -+ -+ if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) { -+ uPSize = HOST_PAGESIZE(); -+ } else { -+ uPSize = pMapping->uSize; -+ } -+ -+ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { -+ OSFreePages(pBMHeap->ui32Attribs, -+ uPSize, -+ (void *)pMapping->CpuVAddr, -+ pMapping->hOSMemHandle); -+ } else { -+ IMG_SYS_PHYADDR sSysPAddr; -+ -+ if (pMapping->CpuVAddr) { -+ OSUnReservePhys(pMapping->CpuVAddr, uPSize, -+ pBMHeap->ui32Attribs, -+ pMapping->hOSMemHandle); -+ } -+ sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr); -+ RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, -+ IMG_FALSE); -+ } -+ } -+fail_mapping_alloc: -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, -+ IMG_NULL); -+fail_exit: -+ return IMG_FALSE; -+} -+ -+static void BM_FreeMemory(void *h, IMG_UINTPTR_T _base, BM_MAPPING * psMapping) -+{ -+ BM_HEAP *pBMHeap = h; -+ IMG_SIZE_T uPSize; -+ -+ PVR_UNREFERENCED_PARAMETER(_base); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, -+ psMapping)); -+ -+ PVR_ASSERT(psMapping != IMG_NULL); -+ -+ DevMemoryFree(psMapping); -+ -+ if ((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) { -+ psMapping->uSize /= 2; -+ } -+ -+ if (psMapping->ui32Flags & PVRSRV_MEM_DUMMY) { -+ uPSize = HOST_PAGESIZE(); -+ } else { -+ uPSize = psMapping->uSize; -+ } -+ -+ if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { -+ OSFreePages(pBMHeap->ui32Attribs, -+ uPSize, -+ (void *)psMapping->CpuVAddr, -+ psMapping->hOSMemHandle); -+ } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { -+ IMG_SYS_PHYADDR sSysPAddr; -+ -+ OSUnReservePhys(psMapping->CpuVAddr, uPSize, -+ pBMHeap->ui32Attribs, psMapping->hOSMemHandle); -+ -+ sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr); -+ -+ RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, -+ IMG_FALSE); -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "BM_FreeMemory: Invalid backing store type")); -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, -+ IMG_NULL); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", -+ h, _base, psMapping)); -+} -+ -+PVRSRV_ERROR BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO * psMemInfo, -+ IMG_DEV_VIRTADDR sDevVPageAddr, -+ IMG_DEV_PHYADDR * psDevPAddr) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr")); -+ -+ if (!psMemInfo || !psDevPAddr) { -+ PVR_DPF((PVR_DBG_ERROR, "BM_GetPhysPageAddr: Invalid params")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0); -+ -+ psDeviceNode = -+ ((BM_BUF *) psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap-> -+ pBMContext->psDeviceNode; -+ -+ *psDevPAddr = -+ psDeviceNode-> -+ pfnMMUGetPhysPageAddr(((BM_BUF *) psMemInfo->sMemBlk.hBuffer)-> -+ pMapping->pBMHeap->pMMUHeap, sDevVPageAddr); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, -+ PVRSRV_HEAP_INFO * psHeapInfo) -+{ -+ BM_HEAP *psBMHeap = (BM_HEAP *) hDevMemHeap; -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetHeapInfo")); -+ -+ psHeapInfo->hDevMemHeap = hDevMemHeap; -+ psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr; -+ psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size; -+ psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs; -+ -+ return PVRSRV_OK; -+} -+ -+MMU_CONTEXT *BM_GetMMUContext(IMG_HANDLE hDevMemHeap) -+{ -+ BM_HEAP *pBMHeap = (BM_HEAP *) hDevMemHeap; -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext")); -+ -+ return pBMHeap->pBMContext->psMMUContext; -+} -+ -+MMU_CONTEXT *BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext) -+{ -+ BM_CONTEXT *pBMContext = (BM_CONTEXT *) hDevMemContext; -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext")); -+ -+ return pBMContext->psMMUContext; -+} -+ -+IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap) -+{ -+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap")); -+ -+ return (IMG_HANDLE) ((BM_HEAP *) hDevMemHeap)->pMMUHeap; -+} -+ -+PVRSRV_DEVICE_NODE *BM_GetDeviceNode(IMG_HANDLE hDevMemContext) -+{ -+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode")); -+ -+ return ((BM_CONTEXT *) hDevMemContext)->psDeviceNode; -+} -+ -+IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO * psMemInfo) -+{ -+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle")); -+ -+ return ((BM_BUF *) psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/buffer_manager.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/buffer_manager.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/buffer_manager.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/buffer_manager.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,189 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _BUFFER_MANAGER_H_ -+#define _BUFFER_MANAGER_H_ -+ -+#include "img_types.h" -+#include "ra.h" -+#include "perproc.h" -+ -+ -+ typedef struct _BM_HEAP_ BM_HEAP; -+ -+ struct _BM_MAPPING_ { -+ enum { -+ hm_wrapped = 1, -+ hm_wrapped_scatter, -+ hm_wrapped_virtaddr, -+ hm_wrapped_scatter_virtaddr, -+ hm_env, -+ hm_contiguous -+ } eCpuMemoryOrigin; -+ -+ BM_HEAP *pBMHeap; -+ RA_ARENA *pArena; -+ -+ IMG_CPU_VIRTADDR CpuVAddr; -+ IMG_CPU_PHYADDR CpuPAddr; -+ IMG_DEV_VIRTADDR DevVAddr; -+ IMG_SYS_PHYADDR *psSysAddr; -+ IMG_SIZE_T uSize; -+ IMG_HANDLE hOSMemHandle; -+ IMG_UINT32 ui32Flags; -+ }; -+ -+ typedef struct _BM_BUF_ { -+ IMG_CPU_VIRTADDR *CpuVAddr; -+ IMG_VOID *hOSMemHandle; -+ IMG_CPU_PHYADDR CpuPAddr; -+ IMG_DEV_VIRTADDR DevVAddr; -+ -+ BM_MAPPING *pMapping; -+ IMG_UINT32 ui32RefCount; -+ IMG_UINTPTR_T uHashKey; -+ void *pvKernelSyncInfo; -+ void *pvPageList; -+ IMG_HANDLE hOSWrapMem; -+ } BM_BUF; -+ -+ struct _BM_HEAP_ { -+ IMG_UINT32 ui32Attribs; -+ BM_CONTEXT *pBMContext; -+ RA_ARENA *pImportArena; -+ RA_ARENA *pLocalDevMemArena; -+ RA_ARENA *pVMArena; -+ DEV_ARENA_DESCRIPTOR sDevArena; -+ MMU_HEAP *pMMUHeap; -+ -+ struct _BM_HEAP_ *psNext; -+ }; -+ -+ struct _BM_CONTEXT_ { -+ MMU_CONTEXT *psMMUContext; -+ -+ BM_HEAP *psBMHeap; -+ -+ BM_HEAP *psBMSharedHeap; -+ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ HASH_TABLE *pBufferHash; -+ -+ IMG_HANDLE hResItem; -+ -+ IMG_UINT32 ui32RefCount; -+ -+ struct _BM_CONTEXT_ *psNext; -+ }; -+ -+ typedef void *BM_HANDLE; -+ -+#define BP_POOL_MASK 0x7 -+ -+#define BP_CONTIGUOUS (1 << 3) -+#define BP_PARAMBUFFER (1 << 4) -+ -+#define BM_MAX_DEVMEM_ARENAS 2 -+ -+ IMG_HANDLE -+ BM_CreateContext(PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_DEV_PHYADDR * psPDDevPAddr, -+ PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_BOOL * pbCreated); -+ -+ PVRSRV_ERROR -+ BM_DestroyContext(IMG_HANDLE hBMContext, IMG_BOOL * pbCreated); -+ -+ IMG_HANDLE -+ BM_CreateHeap(IMG_HANDLE hBMContext, -+ DEVICE_MEMORY_HEAP_INFO * psDevMemHeapInfo); -+ -+ IMG_VOID BM_DestroyHeap(IMG_HANDLE hDevMemHeap); -+ -+ IMG_BOOL BM_Reinitialise(PVRSRV_DEVICE_NODE * psDeviceNode); -+ -+ IMG_BOOL -+ BM_Alloc(IMG_HANDLE hDevMemHeap, -+ IMG_DEV_VIRTADDR * psDevVAddr, -+ IMG_SIZE_T uSize, -+ IMG_UINT32 * pui32Flags, -+ IMG_UINT32 uDevVAddrAlignment, BM_HANDLE * phBuf); -+ -+ IMG_BOOL -+ BM_IsWrapped(IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Offset, IMG_SYS_PHYADDR sSysAddr); -+ -+ IMG_BOOL -+ BM_IsWrappedCheckSize(IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Offset, -+ IMG_SYS_PHYADDR sSysAddr, -+ IMG_UINT32 ui32ByteSize); -+ -+ IMG_BOOL -+ BM_Wrap(IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Offset, -+ IMG_BOOL bPhysContig, -+ IMG_SYS_PHYADDR * psSysAddr, -+ IMG_BOOL bFreePageList, -+ IMG_VOID * pvCPUVAddr, -+ IMG_UINT32 * pui32Flags, BM_HANDLE * phBuf); -+ -+ void -+ BM_Free(BM_HANDLE hBuf, IMG_UINT32 ui32Flags); -+ -+ IMG_CPU_VIRTADDR BM_HandleToCpuVaddr(BM_HANDLE hBuf); -+ -+ IMG_DEV_VIRTADDR BM_HandleToDevVaddr(BM_HANDLE hBuf); -+ -+ IMG_SYS_PHYADDR BM_HandleToSysPaddr(BM_HANDLE hBuf); -+ -+ IMG_HANDLE BM_HandleToOSMemHandle(BM_HANDLE hBuf); -+ -+ IMG_BOOL -+ BM_ContiguousStatistics(IMG_UINT32 uFlags, -+ IMG_UINT32 * pTotalBytes, -+ IMG_UINT32 * pAvailableBytes); -+ -+ PVRSRV_ERROR BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO * psMemInfo, -+ IMG_DEV_VIRTADDR sDevVPageAddr, -+ IMG_DEV_PHYADDR * psDevPAddr); -+ -+ PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, -+ PVRSRV_HEAP_INFO * psHeapInfo); -+ -+ MMU_CONTEXT *BM_GetMMUContext(IMG_HANDLE hDevMemHeap); -+ -+ MMU_CONTEXT *BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext); -+ -+ IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap); -+ -+ PVRSRV_DEVICE_NODE *BM_GetDeviceNode(IMG_HANDLE hDevMemContext); -+ -+ IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO * psMemInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/COPYING linux-omap-2.6.28-nokia1/drivers/gpu/pvr/COPYING ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/COPYING 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/COPYING 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,351 @@ -+ -+This software is Copyright (C) 2008 Imagination Technologies Ltd. -+ All rights reserved. -+ -+You may use, distribute and copy this software under the terms of -+GNU General Public License version 2, which is displayed below. -+ -+------------------------------------------------------------------------- -+ -+ GNU GENERAL PUBLIC LICENSE -+ Version 2, June 1991 -+ -+ Copyright (C) 1989, 1991 Free Software Foundation, Inc. -+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ Everyone is permitted to copy and distribute verbatim copies -+ of this license document, but changing it is not allowed. -+ -+ Preamble -+ -+ The licenses for most software are designed to take away your -+freedom to share and change it. By contrast, the GNU General Public -+License is intended to guarantee your freedom to share and change free -+software--to make sure the software is free for all its users. This -+General Public License applies to most of the Free Software -+Foundation's software and to any other program whose authors commit to -+using it. (Some other Free Software Foundation software is covered by -+the GNU Library General Public License instead.) You can apply it to -+your programs, too. -+ -+ When we speak of free software, we are referring to freedom, not -+price. Our General Public Licenses are designed to make sure that you -+have the freedom to distribute copies of free software (and charge for -+this service if you wish), that you receive source code or can get it -+if you want it, that you can change the software or use pieces of it -+in new free programs; and that you know you can do these things. -+ -+ To protect your rights, we need to make restrictions that forbid -+anyone to deny you these rights or to ask you to surrender the rights. -+These restrictions translate to certain responsibilities for you if you -+distribute copies of the software, or if you modify it. -+ -+ For example, if you distribute copies of such a program, whether -+gratis or for a fee, you must give the recipients all the rights that -+you have. You must make sure that they, too, receive or can get the -+source code. And you must show them these terms so they know their -+rights. -+ -+ We protect your rights with two steps: (1) copyright the software, and -+(2) offer you this license which gives you legal permission to copy, -+distribute and/or modify the software. -+ -+ Also, for each author's protection and ours, we want to make certain -+that everyone understands that there is no warranty for this free -+software. If the software is modified by someone else and passed on, we -+want its recipients to know that what they have is not the original, so -+that any problems introduced by others will not reflect on the original -+authors' reputations. -+ -+ Finally, any free program is threatened constantly by software -+patents. We wish to avoid the danger that redistributors of a free -+program will individually obtain patent licenses, in effect making the -+program proprietary. To prevent this, we have made it clear that any -+patent must be licensed for everyone's free use or not licensed at all. -+ -+ The precise terms and conditions for copying, distribution and -+modification follow. -+ -+ GNU GENERAL PUBLIC LICENSE -+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION -+ -+ 0. This License applies to any program or other work which contains -+a notice placed by the copyright holder saying it may be distributed -+under the terms of this General Public License. The "Program", below, -+refers to any such program or work, and a "work based on the Program" -+means either the Program or any derivative work under copyright law: -+that is to say, a work containing the Program or a portion of it, -+either verbatim or with modifications and/or translated into another -+language. (Hereinafter, translation is included without limitation in -+the term "modification".) Each licensee is addressed as "you". -+ -+Activities other than copying, distribution and modification are not -+covered by this License; they are outside its scope. The act of -+running the Program is not restricted, and the output from the Program -+is covered only if its contents constitute a work based on the -+Program (independent of having been made by running the Program). -+Whether that is true depends on what the Program does. -+ -+ 1. You may copy and distribute verbatim copies of the Program's -+source code as you receive it, in any medium, provided that you -+conspicuously and appropriately publish on each copy an appropriate -+copyright notice and disclaimer of warranty; keep intact all the -+notices that refer to this License and to the absence of any warranty; -+and give any other recipients of the Program a copy of this License -+along with the Program. -+ -+You may charge a fee for the physical act of transferring a copy, and -+you may at your option offer warranty protection in exchange for a fee. -+ -+ 2. You may modify your copy or copies of the Program or any portion -+of it, thus forming a work based on the Program, and copy and -+distribute such modifications or work under the terms of Section 1 -+above, provided that you also meet all of these conditions: -+ -+ a) You must cause the modified files to carry prominent notices -+ stating that you changed the files and the date of any change. -+ -+ b) You must cause any work that you distribute or publish, that in -+ whole or in part contains or is derived from the Program or any -+ part thereof, to be licensed as a whole at no charge to all third -+ parties under the terms of this License. -+ -+ c) If the modified program normally reads commands interactively -+ when run, you must cause it, when started running for such -+ interactive use in the most ordinary way, to print or display an -+ announcement including an appropriate copyright notice and a -+ notice that there is no warranty (or else, saying that you provide -+ a warranty) and that users may redistribute the program under -+ these conditions, and telling the user how to view a copy of this -+ License. (Exception: if the Program itself is interactive but -+ does not normally print such an announcement, your work based on -+ the Program is not required to print an announcement.) -+ -+These requirements apply to the modified work as a whole. If -+identifiable sections of that work are not derived from the Program, -+and can be reasonably considered independent and separate works in -+themselves, then this License, and its terms, do not apply to those -+sections when you distribute them as separate works. But when you -+distribute the same sections as part of a whole which is a work based -+on the Program, the distribution of the whole must be on the terms of -+this License, whose permissions for other licensees extend to the -+entire whole, and thus to each and every part regardless of who wrote it. -+ -+Thus, it is not the intent of this section to claim rights or contest -+your rights to work written entirely by you; rather, the intent is to -+exercise the right to control the distribution of derivative or -+collective works based on the Program. -+ -+In addition, mere aggregation of another work not based on the Program -+with the Program (or with a work based on the Program) on a volume of -+a storage or distribution medium does not bring the other work under -+the scope of this License. -+ -+ 3. You may copy and distribute the Program (or a work based on it, -+under Section 2) in object code or executable form under the terms of -+Sections 1 and 2 above provided that you also do one of the following: -+ -+ a) Accompany it with the complete corresponding machine-readable -+ source code, which must be distributed under the terms of Sections -+ 1 and 2 above on a medium customarily used for software interchange; or, -+ -+ b) Accompany it with a written offer, valid for at least three -+ years, to give any third party, for a charge no more than your -+ cost of physically performing source distribution, a complete -+ machine-readable copy of the corresponding source code, to be -+ distributed under the terms of Sections 1 and 2 above on a medium -+ customarily used for software interchange; or, -+ -+ c) Accompany it with the information you received as to the offer -+ to distribute corresponding source code. (This alternative is -+ allowed only for noncommercial distribution and only if you -+ received the program in object code or executable form with such -+ an offer, in accord with Subsection b above.) -+ -+The source code for a work means the preferred form of the work for -+making modifications to it. For an executable work, complete source -+code means all the source code for all modules it contains, plus any -+associated interface definition files, plus the scripts used to -+control compilation and installation of the executable. However, as a -+special exception, the source code distributed need not include -+anything that is normally distributed (in either source or binary -+form) with the major components (compiler, kernel, and so on) of the -+operating system on which the executable runs, unless that component -+itself accompanies the executable. -+ -+If distribution of executable or object code is made by offering -+access to copy from a designated place, then offering equivalent -+access to copy the source code from the same place counts as -+distribution of the source code, even though third parties are not -+compelled to copy the source along with the object code. -+ -+ 4. You may not copy, modify, sublicense, or distribute the Program -+except as expressly provided under this License. Any attempt -+otherwise to copy, modify, sublicense or distribute the Program is -+void, and will automatically terminate your rights under this License. -+However, parties who have received copies, or rights, from you under -+this License will not have their licenses terminated so long as such -+parties remain in full compliance. -+ -+ 5. You are not required to accept this License, since you have not -+signed it. However, nothing else grants you permission to modify or -+distribute the Program or its derivative works. These actions are -+prohibited by law if you do not accept this License. Therefore, by -+modifying or distributing the Program (or any work based on the -+Program), you indicate your acceptance of this License to do so, and -+all its terms and conditions for copying, distributing or modifying -+the Program or works based on it. -+ -+ 6. Each time you redistribute the Program (or any work based on the -+Program), the recipient automatically receives a license from the -+original licensor to copy, distribute or modify the Program subject to -+these terms and conditions. You may not impose any further -+restrictions on the recipients' exercise of the rights granted herein. -+You are not responsible for enforcing compliance by third parties to -+this License. -+ -+ 7. If, as a consequence of a court judgment or allegation of patent -+infringement or for any other reason (not limited to patent issues), -+conditions are imposed on you (whether by court order, agreement or -+otherwise) that contradict the conditions of this License, they do not -+excuse you from the conditions of this License. If you cannot -+distribute so as to satisfy simultaneously your obligations under this -+License and any other pertinent obligations, then as a consequence you -+may not distribute the Program at all. For example, if a patent -+license would not permit royalty-free redistribution of the Program by -+all those who receive copies directly or indirectly through you, then -+the only way you could satisfy both it and this License would be to -+refrain entirely from distribution of the Program. -+ -+If any portion of this section is held invalid or unenforceable under -+any particular circumstance, the balance of the section is intended to -+apply and the section as a whole is intended to apply in other -+circumstances. -+ -+It is not the purpose of this section to induce you to infringe any -+patents or other property right claims or to contest validity of any -+such claims; this section has the sole purpose of protecting the -+integrity of the free software distribution system, which is -+implemented by public license practices. Many people have made -+generous contributions to the wide range of software distributed -+through that system in reliance on consistent application of that -+system; it is up to the author/donor to decide if he or she is willing -+to distribute software through any other system and a licensee cannot -+impose that choice. -+ -+This section is intended to make thoroughly clear what is believed to -+be a consequence of the rest of this License. -+ -+ 8. If the distribution and/or use of the Program is restricted in -+certain countries either by patents or by copyrighted interfaces, the -+original copyright holder who places the Program under this License -+may add an explicit geographical distribution limitation excluding -+those countries, so that distribution is permitted only in or among -+countries not thus excluded. In such case, this License incorporates -+the limitation as if written in the body of this License. -+ -+ 9. The Free Software Foundation may publish revised and/or new versions -+of the General Public License from time to time. Such new versions will -+be similar in spirit to the present version, but may differ in detail to -+address new problems or concerns. -+ -+Each version is given a distinguishing version number. If the Program -+specifies a version number of this License which applies to it and "any -+later version", you have the option of following the terms and conditions -+either of that version or of any later version published by the Free -+Software Foundation. If the Program does not specify a version number of -+this License, you may choose any version ever published by the Free Software -+Foundation. -+ -+ 10. If you wish to incorporate parts of the Program into other free -+programs whose distribution conditions are different, write to the author -+to ask for permission. For software which is copyrighted by the Free -+Software Foundation, write to the Free Software Foundation; we sometimes -+make exceptions for this. Our decision will be guided by the two goals -+of preserving the free status of all derivatives of our free software and -+of promoting the sharing and reuse of software generally. -+ -+ NO WARRANTY -+ -+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -+REPAIR OR CORRECTION. -+ -+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -+POSSIBILITY OF SUCH DAMAGES. -+ -+ END OF TERMS AND CONDITIONS -+ -+ Appendix: How to Apply These Terms to Your New Programs -+ -+ If you develop a new program, and you want it to be of the greatest -+possible use to the public, the best way to achieve this is to make it -+free software which everyone can redistribute and change under these terms. -+ -+ To do so, attach the following notices to the program. It is safest -+to attach them to the start of each source file to most effectively -+convey the exclusion of warranty; and each file should have at least -+the "copyright" line and a pointer to where the full notice is found. -+ -+ -+ Copyright (C) 19yy -+ -+ This program is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 2 of the License, or -+ (at your option) any later version. -+ -+ This program is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with this program; if not, write to the Free Software -+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ -+Also add information on how to contact you by electronic and paper mail. -+ -+If the program is interactive, make it output a short notice like this -+when it starts in an interactive mode: -+ -+ Gnomovision version 69, Copyright (C) 19yy name of author -+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. -+ This is free software, and you are welcome to redistribute it -+ under certain conditions; type `show c' for details. -+ -+The hypothetical commands `show w' and `show c' should show the appropriate -+parts of the General Public License. Of course, the commands you use may -+be called something other than `show w' and `show c'; they could even be -+mouse-clicks or menu items--whatever suits your program. -+ -+You should also get your employer (if you work as a programmer) or your -+school, if any, to sign a "copyright disclaimer" for the program, if -+necessary. Here is a sample; alter the names: -+ -+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program -+ `Gnomovision' (which makes passes at compilers) written by James Hacker. -+ -+ , 1 April 1989 -+ Ty Coon, President of Vice -+ -+This General Public License does not permit incorporating your program into -+proprietary programs. If your program is a subroutine library, you may -+consider it more useful to permit linking proprietary applications with the -+library. If this is what you want to do, use the GNU Library General -+Public License instead of this License. -+ -+------------------------------------------------------------------------- -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/dbgdrvif.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/dbgdrvif.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/dbgdrvif.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/dbgdrvif.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,283 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _DBGDRVIF_ -+#define _DBGDRVIF_ -+ -+#include "ioctldef.h" -+ -+#define DEBUG_CAPMODE_FRAMED 0x00000001 -+#define DEBUG_CAPMODE_CONTINUOUS 0x00000002 -+#define DEBUG_CAPMODE_HOTKEY 0x00000004 -+ -+#define DEBUG_OUTMODE_STANDARDDBG 0x00000001 -+#define DEBUG_OUTMODE_MONO 0x00000002 -+#define DEBUG_OUTMODE_STREAMENABLE 0x00000004 -+#define DEBUG_OUTMODE_ASYNC 0x00000008 -+#define DEBUG_OUTMODE_SGXVGA 0x00000010 -+ -+#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001 -+#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002 -+#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004 -+ -+#define DEBUG_FLAGS_TEXTSTREAM 0x80000000 -+ -+#define DEBUG_LEVEL_0 0x00000001 -+#define DEBUG_LEVEL_1 0x00000003 -+#define DEBUG_LEVEL_2 0x00000007 -+#define DEBUG_LEVEL_3 0x0000000F -+#define DEBUG_LEVEL_4 0x0000001F -+#define DEBUG_LEVEL_5 0x0000003F -+#define DEBUG_LEVEL_6 0x0000007F -+#define DEBUG_LEVEL_7 0x000000FF -+#define DEBUG_LEVEL_8 0x000001FF -+#define DEBUG_LEVEL_9 0x000003FF -+#define DEBUG_LEVEL_10 0x000007FF -+#define DEBUG_LEVEL_11 0x00000FFF -+ -+#define DEBUG_LEVEL_SEL0 0x00000001 -+#define DEBUG_LEVEL_SEL1 0x00000002 -+#define DEBUG_LEVEL_SEL2 0x00000004 -+#define DEBUG_LEVEL_SEL3 0x00000008 -+#define DEBUG_LEVEL_SEL4 0x00000010 -+#define DEBUG_LEVEL_SEL5 0x00000020 -+#define DEBUG_LEVEL_SEL6 0x00000040 -+#define DEBUG_LEVEL_SEL7 0x00000080 -+#define DEBUG_LEVEL_SEL8 0x00000100 -+#define DEBUG_LEVEL_SEL9 0x00000200 -+#define DEBUG_LEVEL_SEL10 0x00000400 -+#define DEBUG_LEVEL_SEL11 0x00000800 -+ -+#define DEBUG_SERVICE_IOCTL_BASE 0x800 -+#define DEBUG_SERVICE_CREATESTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_DESTROYSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_GETSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_WRITESTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_READSTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_WRITE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_READ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_SETDEBUGMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_SETDEBUGOUTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_SETDEBUGLEVEL CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_SETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_GETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_OVERRIDEMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_DEFAULTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_WRITE2 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_WRITESTRINGCM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_WRITECM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_SETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_GETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_ISCAPTUREFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_WRITELF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, METHOD_BUFFERED, FILE_ANY_ACCESS) -+#define DEBUG_SERVICE_READLF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, METHOD_BUFFERED, FILE_ANY_ACCESS) -+ -+typedef struct _DBG_IN_CREATESTREAM_ { -+ IMG_UINT32 ui32Pages; -+ IMG_UINT32 ui32CapMode; -+ IMG_UINT32 ui32OutMode; -+ IMG_CHAR *pszName; -+} DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM; -+ -+typedef struct _DBG_IN_FINDSTREAM_ { -+ IMG_BOOL bResetStream; -+ IMG_CHAR *pszName; -+} DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM; -+ -+typedef struct _DBG_IN_WRITESTRING_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Level; -+ IMG_CHAR *pszString; -+} DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING; -+ -+typedef struct _DBG_IN_READSTRING_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32StringLen; -+ IMG_CHAR *pszString; -+} DBG_IN_READSTRING, *PDBG_IN_READSTRING; -+ -+typedef struct _DBG_IN_SETDEBUGMODE_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Mode; -+ IMG_UINT32 ui32Start; -+ IMG_UINT32 ui32End; -+ IMG_UINT32 ui32SampleRate; -+} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE; -+ -+typedef struct _DBG_IN_SETDEBUGOUTMODE_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Mode; -+} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE; -+ -+typedef struct _DBG_IN_SETDEBUGLEVEL_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Level; -+} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL; -+ -+typedef struct _DBG_IN_SETFRAME_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Frame; -+} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME; -+ -+typedef struct _DBG_IN_WRITE_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Level; -+ IMG_UINT32 ui32TransferSize; -+ IMG_UINT8 *pui8InBuffer; -+} DBG_IN_WRITE, *PDBG_IN_WRITE; -+ -+typedef struct _DBG_IN_READ_ { -+ IMG_VOID *pvStream; -+ IMG_BOOL bReadInitBuffer; -+ IMG_UINT32 ui32OutBufferSize; -+ IMG_UINT8 *pui8OutBuffer; -+} DBG_IN_READ, *PDBG_IN_READ; -+ -+typedef struct _DBG_IN_OVERRIDEMODE_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Mode; -+} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE; -+ -+typedef struct _DBG_IN_ISCAPTUREFRAME_ { -+ IMG_VOID *pvStream; -+ IMG_BOOL bCheckPreviousFrame; -+} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME; -+ -+typedef struct _DBG_IN_SETMARKER_ { -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Marker; -+} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER; -+ -+typedef struct _DBG_IN_WRITE_LF_ { -+ IMG_UINT32 ui32Flags; -+ IMG_VOID *pvStream; -+ IMG_UINT32 ui32Level; -+ IMG_UINT32 ui32BufferSize; -+ IMG_UINT8 *pui8InBuffer; -+} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF; -+ -+#define WRITELF_FLAGS_RESETBUF 0x00000001 -+ -+typedef struct _DBG_STREAM_ { -+ struct _DBG_STREAM_ *psNext; -+ struct _DBG_STREAM_ *psInitStream; -+ IMG_BOOL bInitPhaseComplete; -+ IMG_UINT32 ui32Flags; -+ IMG_UINT32 ui32Base; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32RPtr; -+ IMG_UINT32 ui32WPtr; -+ IMG_UINT32 ui32DataWritten; -+ IMG_UINT32 ui32CapMode; -+ IMG_UINT32 ui32OutMode; -+ IMG_UINT32 ui32DebugLevel; -+ IMG_UINT32 ui32DefaultMode; -+ IMG_UINT32 ui32Start; -+ IMG_UINT32 ui32End; -+ IMG_UINT32 ui32Current; -+ IMG_UINT32 ui32Access; -+ IMG_UINT32 ui32SampleRate; -+ IMG_UINT32 ui32Reserved; -+ IMG_UINT32 ui32Timeout; -+ IMG_UINT32 ui32Marker; -+ IMG_CHAR szName[30]; -+} DBG_STREAM, *PDBG_STREAM; -+ -+typedef struct _DBGKM_SERVICE_TABLE_ { -+ IMG_UINT32 ui32Size; -+ IMG_VOID *(IMG_CALLCONV * pfnCreateStream) (IMG_CHAR * pszName, -+ IMG_UINT32 ui32CapMode, -+ IMG_UINT32 ui32OutMode, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Pages); -+ IMG_VOID(IMG_CALLCONV * pfnDestroyStream) (PDBG_STREAM psStream); -+ IMG_VOID *(IMG_CALLCONV * pfnFindStream) (IMG_CHAR * pszName, -+ IMG_BOOL bResetInitBuffer); -+ IMG_UINT32(IMG_CALLCONV * pfnWriteString) (PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level); -+ IMG_UINT32(IMG_CALLCONV * pfnReadString) (PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Limit); -+ IMG_UINT32(IMG_CALLCONV * pfnWriteBIN) (PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+ IMG_UINT32(IMG_CALLCONV * pfnReadBIN) (PDBG_STREAM psStream, -+ IMG_BOOL bReadInitBuffer, -+ IMG_UINT32 ui32OutBufferSize, -+ IMG_UINT8 * pui8OutBuf); -+ IMG_VOID(IMG_CALLCONV * pfnSetCaptureMode) (PDBG_STREAM psStream, -+ IMG_UINT32 ui32CapMode, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32Stop, -+ IMG_UINT32 ui32SampleRate); -+ IMG_VOID(IMG_CALLCONV * pfnSetOutputMode) (PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutMode); -+ IMG_VOID(IMG_CALLCONV * pfnSetDebugLevel) (PDBG_STREAM psStream, -+ IMG_UINT32 ui32DebugLevel); -+ IMG_VOID(IMG_CALLCONV * pfnSetFrame) (PDBG_STREAM psStream, -+ IMG_UINT32 ui32Frame); -+ IMG_UINT32(IMG_CALLCONV * pfnGetFrame) (PDBG_STREAM psStream); -+ IMG_VOID(IMG_CALLCONV * pfnOverrideMode) (PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode); -+ IMG_VOID(IMG_CALLCONV * pfnDefaultMode) (PDBG_STREAM psStream); -+ IMG_UINT32(IMG_CALLCONV * pfnDBGDrivWrite2) (PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+ IMG_UINT32(IMG_CALLCONV * pfnWriteStringCM) (PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level); -+ IMG_UINT32(IMG_CALLCONV * pfnWriteBINCM) (PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+ IMG_VOID(IMG_CALLCONV * pfnSetMarker) (PDBG_STREAM psStream, -+ IMG_UINT32 ui32Marker); -+ IMG_UINT32(IMG_CALLCONV * pfnGetMarker) (PDBG_STREAM psStream); -+ IMG_VOID(IMG_CALLCONV * pfnEndInitPhase) (PDBG_STREAM psStream); -+ IMG_UINT32(IMG_CALLCONV * pfnIsCaptureFrame) (PDBG_STREAM psStream, -+ IMG_BOOL -+ bCheckPreviousFrame); -+ IMG_UINT32(IMG_CALLCONV * pfnWriteLF) (PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level, -+ IMG_UINT32 ui32Flags); -+ IMG_UINT32(IMG_CALLCONV * pfnReadLF) (PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf); -+ IMG_UINT32(IMG_CALLCONV * pfnGetStreamOffset) (PDBG_STREAM psStream); -+ IMG_VOID(IMG_CALLCONV * pfnSetStreamOffset) (PDBG_STREAM psStream, -+ IMG_UINT32 -+ ui32StreamOffset); -+ IMG_UINT32(IMG_CALLCONV * -+ pfnIsLastCaptureFrame) (PDBG_STREAM psStream); -+} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/deviceclass.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/deviceclass.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/deviceclass.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/deviceclass.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,1586 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "buffer_manager.h" -+#include "kernelbuffer.h" -+#include "pvr_bridge_km.h" -+ -+PVRSRV_ERROR AllocateDeviceID(SYS_DATA * psSysData, IMG_UINT32 * pui32DevID); -+PVRSRV_ERROR FreeDeviceID(SYS_DATA * psSysData, IMG_UINT32 ui32DevID); -+ -+typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE; -+ -+typedef struct PVRSRV_DC_BUFFER_TAG { -+ -+ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer; -+ -+ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo; -+ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain; -+} PVRSRV_DC_BUFFER; -+ -+typedef struct PVRSRV_DC_SWAPCHAIN_TAG { -+ IMG_HANDLE hExtSwapChain; -+ PVRSRV_QUEUE_INFO *psQueue; -+ PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; -+ IMG_UINT32 ui32BufferCount; -+ PVRSRV_DC_BUFFER *psLastFlipBuffer; -+ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo; -+ IMG_HANDLE hResItem; -+} PVRSRV_DC_SWAPCHAIN; -+ -+typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG { -+ IMG_UINT32 ui32RefCount; -+ IMG_UINT32 ui32DeviceID; -+ IMG_HANDLE hExtDevice; -+ PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable; -+ IMG_HANDLE hDevMemContext; -+ PVRSRV_DC_BUFFER sSystemBuffer; -+} PVRSRV_DISPLAYCLASS_INFO; -+ -+typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG { -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PRESMAN_ITEM hResItem; -+} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO; -+ -+typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG -+ *PPVRSRV_BC_SRV2BUFFER_KMJTABLE; -+ -+typedef struct PVRSRV_BC_BUFFER_TAG { -+ -+ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer; -+ -+ struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo; -+} PVRSRV_BC_BUFFER; -+ -+typedef struct PVRSRV_BUFFERCLASS_INFO_TAG { -+ IMG_UINT32 ui32RefCount; -+ IMG_UINT32 ui32DeviceID; -+ IMG_HANDLE hExtDevice; -+ PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable; -+ IMG_HANDLE hDevMemContext; -+ -+ IMG_UINT32 ui32BufferCount; -+ PVRSRV_BC_BUFFER *psBuffer; -+ -+} PVRSRV_BUFFERCLASS_INFO; -+ -+typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG { -+ PVRSRV_BUFFERCLASS_INFO *psBCInfo; -+ IMG_HANDLE hResItem; -+} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO; -+ -+static PVRSRV_DISPLAYCLASS_INFO *DCDeviceHandleToDCInfo(IMG_HANDLE hDeviceKM) -+{ -+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; -+ -+ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *) hDeviceKM; -+ -+ return psDCPerContextInfo->psDCInfo; -+} -+ -+static PVRSRV_BUFFERCLASS_INFO *BCDeviceHandleToBCInfo(IMG_HANDLE hDeviceKM) -+{ -+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; -+ -+ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *) hDeviceKM; -+ -+ return psBCPerContextInfo->psBCInfo; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass, -+ IMG_UINT32 * pui32DevCount, -+ IMG_UINT32 * pui32DevID) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT ui32DevCount = 0; -+ SYS_DATA *psSysData; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVEnumerateDCKM: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode) { -+ if ((psDeviceNode->sDevId.eDeviceClass == DeviceClass) -+ && (psDeviceNode->sDevId.eDeviceType == -+ PVRSRV_DEVICE_TYPE_EXT)) { -+ ui32DevCount++; -+ if (pui32DevID) { -+ *pui32DevID++ = -+ psDeviceNode->sDevId.ui32DeviceIndex; -+ } -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ if (pui32DevCount) { -+ *pui32DevCount = ui32DevCount; -+ } else if (pui32DevID == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVEnumerateDCKM: Invalid parameters")); -+ return (PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRegisterDCDeviceKM(PVRSRV_DC_SRV2DISP_KMJTABLE * psFuncTable, -+ IMG_UINT32 * pui32DeviceID) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = IMG_NULL; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterDCDeviceKM: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(*psDCInfo), -+ (IMG_VOID **) & psDCInfo, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ OSMemSet(psDCInfo, 0, sizeof(*psDCInfo)); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), -+ (IMG_VOID **) & psDCInfo->psFuncTable, -+ IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc")); -+ goto ErrorExit; -+ } -+ OSMemSet(psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE)); -+ -+ *psDCInfo->psFuncTable = *psFuncTable; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DEVICE_NODE), -+ (IMG_VOID **) & psDeviceNode, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc")); -+ goto ErrorExit; -+ } -+ OSMemSet(psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); -+ -+ psDeviceNode->pvDevice = (IMG_VOID *) psDCInfo; -+ psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo); -+ psDeviceNode->ui32RefCount = 1; -+ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT; -+ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY; -+ psDeviceNode->psSysData = psSysData; -+ -+ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex); -+ psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; -+ if (pui32DeviceID) { -+ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; -+ } -+ -+ SysRegisterExternalDevice(psDeviceNode); -+ -+ psDeviceNode->psNext = psSysData->psDeviceNodeList; -+ psSysData->psDeviceNodeList = psDeviceNode; -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ if (psDCInfo->psFuncTable) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), -+ psDCInfo->psFuncTable, IMG_NULL); -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), -+ psDCInfo, IMG_NULL); -+ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+} -+ -+PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_DEVICE_NODE **ppsDeviceNode, *psDeviceNode; -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRemoveDCDeviceKM: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ ppsDeviceNode = &psSysData->psDeviceNodeList; -+ while (*ppsDeviceNode) { -+ switch ((*ppsDeviceNode)->sDevId.eDeviceClass) { -+ case PVRSRV_DEVICE_CLASS_DISPLAY: -+ { -+ if ((*ppsDeviceNode)->sDevId.ui32DeviceIndex == -+ ui32DevIndex) { -+ goto FoundDevice; -+ } -+ break; -+ } -+ default: -+ { -+ break; -+ } -+ } -+ ppsDeviceNode = &((*ppsDeviceNode)->psNext); -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRemoveDCDeviceKM: requested device %d not present", -+ ui32DevIndex)); -+ -+ return PVRSRV_ERROR_GENERIC; -+ -+FoundDevice: -+ -+ psDeviceNode = *ppsDeviceNode; -+ *ppsDeviceNode = psDeviceNode->psNext; -+ -+ SysRemoveExternalDevice(psDeviceNode); -+ -+ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *) psDeviceNode->pvDevice; -+ PVR_ASSERT(psDCInfo->ui32RefCount == 0); -+ FreeDeviceID(psSysData, ui32DevIndex); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), -+ psDCInfo->psFuncTable, IMG_NULL); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), -+ psDCInfo, IMG_NULL); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), -+ psDeviceNode, IMG_NULL); -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRegisterBCDeviceKM(PVRSRV_BC_SRV2BUFFER_KMJTABLE * -+ psFuncTable, IMG_UINT32 * pui32DeviceID) -+{ -+ PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterBCDeviceKM: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(*psBCInfo), -+ (IMG_VOID **) & psBCInfo, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ OSMemSet(psBCInfo, 0, sizeof(*psBCInfo)); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), -+ (IMG_VOID **) & psBCInfo->psFuncTable, -+ IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc")); -+ goto ErrorExit; -+ } -+ OSMemSet(psBCInfo->psFuncTable, 0, -+ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE)); -+ -+ *psBCInfo->psFuncTable = *psFuncTable; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DEVICE_NODE), -+ (IMG_VOID **) & psDeviceNode, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc")); -+ goto ErrorExit; -+ } -+ OSMemSet(psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); -+ -+ psDeviceNode->pvDevice = (IMG_VOID *) psBCInfo; -+ psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo); -+ psDeviceNode->ui32RefCount = 1; -+ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT; -+ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER; -+ psDeviceNode->psSysData = psSysData; -+ -+ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex); -+ psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; -+ if (pui32DeviceID) { -+ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; -+ } -+ -+ psDeviceNode->psNext = psSysData->psDeviceNodeList; -+ psSysData->psDeviceNodeList = psDeviceNode; -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ if (psBCInfo->psFuncTable) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), -+ psBCInfo->psFuncTable, IMG_NULL); -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), -+ psBCInfo, IMG_NULL); -+ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+} -+ -+PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_DEVICE_NODE **ppsDevNode, *psDevNode; -+ PVRSRV_BUFFERCLASS_INFO *psBCInfo; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRemoveBCDeviceKM: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ ppsDevNode = &psSysData->psDeviceNodeList; -+ while (*ppsDevNode) { -+ switch ((*ppsDevNode)->sDevId.eDeviceClass) { -+ case PVRSRV_DEVICE_CLASS_BUFFER: -+ { -+ if ((*ppsDevNode)->sDevId.ui32DeviceIndex == -+ ui32DevIndex) { -+ goto FoundDevice; -+ } -+ break; -+ } -+ default: -+ { -+ break; -+ } -+ } -+ ppsDevNode = &(*ppsDevNode)->psNext; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRemoveBCDeviceKM: requested device %d not present", -+ ui32DevIndex)); -+ -+ return PVRSRV_ERROR_GENERIC; -+ -+FoundDevice: -+ -+ psDevNode = *(ppsDevNode); -+ *ppsDevNode = psDevNode->psNext; -+ -+ FreeDeviceID(psSysData, ui32DevIndex); -+ psBCInfo = (PVRSRV_BUFFERCLASS_INFO *) psDevNode->pvDevice; -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, -+ IMG_NULL); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), -+ psBCInfo, IMG_NULL); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), -+ psDevNode, IMG_NULL); -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, -+ IMG_BOOL bResManCallback) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(bResManCallback); -+ -+ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *) hDeviceKM; -+ -+ eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *) pvParam; -+ psDCInfo = psDCPerContextInfo->psDCInfo; -+ -+ psDCInfo->ui32RefCount--; -+ if (psDCInfo->ui32RefCount == 0) { -+ -+ psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice); -+ -+ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer. -+ psKernelSyncInfo); -+ -+ psDCInfo->hDevMemContext = IMG_NULL; -+ psDCInfo->hExtDevice = IMG_NULL; -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), -+ psDCPerContextInfo, IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_UINT32 ui32DeviceID, -+ IMG_HANDLE hDevCookie, -+ IMG_HANDLE * phDeviceKM) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ -+ if (!phDeviceKM) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenDCDeviceKM: Invalid params")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenDCDeviceKM: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode) { -+ if ((psDeviceNode->sDevId.eDeviceClass == -+ PVRSRV_DEVICE_CLASS_DISPLAY) -+ && (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID)) { -+ -+ psDCInfo = -+ (PVRSRV_DISPLAYCLASS_INFO *) psDeviceNode->pvDevice; -+ goto FoundDevice; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenDCDeviceKM: no devnode matching index %d", -+ ui32DeviceID)); -+ -+ return PVRSRV_ERROR_GENERIC; -+ -+FoundDevice: -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(*psDCPerContextInfo), -+ (IMG_VOID **) & psDCPerContextInfo, -+ IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo)); -+ -+ if (psDCInfo->ui32RefCount++ == 0) { -+ PVRSRV_ERROR eError; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie; -+ PVR_ASSERT(psDeviceNode != IMG_NULL); -+ -+ psDCInfo->hDevMemContext = -+ (IMG_HANDLE) psDeviceNode->sDevMemoryInfo.pBMKernelContext; -+ -+ eError = PVRSRVAllocSyncInfoKM(IMG_NULL, -+ (IMG_HANDLE) psDeviceNode-> -+ sDevMemoryInfo.pBMKernelContext, -+ &psDCInfo->sSystemBuffer. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenDCDeviceKM: Failed sync info alloc")); -+ psDCInfo->ui32RefCount--; -+ return eError; -+ } -+ -+ eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID, -+ &psDCInfo-> -+ hExtDevice, -+ (PVRSRV_SYNC_DATA -+ *) psDCInfo-> -+ sSystemBuffer. -+ sDeviceClassBuffer. -+ psKernelSyncInfo-> -+ psSyncDataMemInfoKM-> -+ pvLinAddrKM); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenDCDeviceKM: Failed to open external DC device")); -+ psDCInfo->ui32RefCount--; -+ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ return eError; -+ } -+ } -+ -+ psDCPerContextInfo->psDCInfo = psDCInfo; -+ psDCPerContextInfo->hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_DISPLAYCLASS_DEVICE, -+ psDCPerContextInfo, 0, CloseDCDeviceCallBack); -+ -+ *phDeviceKM = (IMG_HANDLE) psDCPerContextInfo; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM, -+ IMG_UINT32 * pui32Count, -+ DISPLAY_FORMAT * psFormat) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ -+ if (!hDeviceKM || !pui32Count || !psFormat) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVEnumDCFormatsKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ -+ return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, -+ pui32Count, psFormat); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM, -+ DISPLAY_FORMAT * psFormat, -+ IMG_UINT32 * pui32Count, -+ DISPLAY_DIMS * psDim) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ -+ if (!hDeviceKM || !pui32Count || !psFormat) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVEnumDCDimsKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ -+ return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, -+ psFormat, pui32Count, -+ psDim); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE * phBuffer) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ IMG_HANDLE hExtBuffer; -+ -+ if (!hDeviceKM || !phBuffer) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetDCSystemBufferKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ -+ eError = -+ psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, -+ &hExtBuffer); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver")); -+ return eError; -+ } -+ -+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = -+ psDCInfo->psFuncTable->pfnGetBufferAddr; -+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = -+ psDCInfo->hDevMemContext; -+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = -+ psDCInfo->hExtDevice; -+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer; -+ -+ psDCInfo->sSystemBuffer.psDCInfo = psDCInfo; -+ -+ *phBuffer = (IMG_HANDLE) & (psDCInfo->sSystemBuffer); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM, -+ DISPLAY_INFO * psDisplayInfo) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_ERROR eError; -+ -+ if (!hDeviceKM || !psDisplayInfo) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetDCInfoKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ -+ eError = -+ psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, -+ psDisplayInfo); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ if (psDisplayInfo->ui32MaxSwapChainBuffers > -+ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) { -+ psDisplayInfo->ui32MaxSwapChainBuffers = -+ PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain; -+ -+ if (!hSwapChain) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDestroyDCSwapChainKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psSwapChain = hSwapChain; -+ -+ eError = ResManFreeResByPtr(psSwapChain->hResItem); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR DestroyDCSwapChainCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain = pvParam; -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo; -+ IMG_UINT32 i; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue); -+ -+ eError = -+ psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice, -+ psSwapChain-> -+ hExtSwapChain); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DestroyDCSwapChainCallBack: Failed to destroy DC swap chain")); -+ return eError; -+ } -+ -+ for (i = 0; i < psSwapChain->ui32BufferCount; i++) { -+ if (psSwapChain->asBuffer[i].sDeviceClassBuffer. -+ psKernelSyncInfo) { -+ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i]. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ } -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), -+ psSwapChain, IMG_NULL); -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDeviceKM, -+ IMG_UINT32 ui32Flags, -+ DISPLAY_SURF_ATTRIBUTES * -+ psDstSurfAttrib, -+ DISPLAY_SURF_ATTRIBUTES * -+ psSrcSurfAttrib, -+ IMG_UINT32 ui32BufferCount, -+ IMG_UINT32 ui32OEMFlags, -+ IMG_HANDLE * phSwapChain, -+ IMG_UINT32 * pui32SwapChainID) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL; -+ PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; -+ PVRSRV_QUEUE_INFO *psQueue = IMG_NULL; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+ if (!hDeviceKM -+ || !psDstSurfAttrib -+ || !psSrcSurfAttrib || !phSwapChain || !pui32SwapChainID) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDCSwapChainKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDCSwapChainKM: Too many buffers")); -+ return PVRSRV_ERROR_TOOMANYBUFFERS; -+ } -+ -+ if (ui32BufferCount < 2) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDCSwapChainKM: Too few buffers")); -+ return PVRSRV_ERROR_TOO_FEW_BUFFERS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DC_SWAPCHAIN), -+ (IMG_VOID **) & psSwapChain, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc")); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorExit; -+ } -+ OSMemSet(psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN)); -+ -+ eError = PVRSRVCreateCommandQueueKM(1024, &psQueue); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue")); -+ goto ErrorExit; -+ } -+ -+ psSwapChain->psQueue = psQueue; -+ -+ for (i = 0; i < ui32BufferCount; i++) { -+ eError = PVRSRVAllocSyncInfoKM(IMG_NULL, -+ psDCInfo->hDevMemContext, -+ &psSwapChain->asBuffer[i]. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain")); -+ goto ErrorExit; -+ } -+ -+ psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = -+ psDCInfo->psFuncTable->pfnGetBufferAddr; -+ psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = -+ psDCInfo->hDevMemContext; -+ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = -+ psDCInfo->hExtDevice; -+ -+ psSwapChain->asBuffer[i].psDCInfo = psDCInfo; -+ psSwapChain->asBuffer[i].psSwapChain = psSwapChain; -+ -+ apsSyncData[i] = -+ (PVRSRV_SYNC_DATA *) psSwapChain->asBuffer[i]. -+ sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM-> -+ pvLinAddrKM; -+ } -+ -+ psSwapChain->ui32BufferCount = ui32BufferCount; -+ psSwapChain->psDCInfo = psDCInfo; -+ -+ eError = -+ psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice, -+ ui32Flags, -+ psDstSurfAttrib, -+ psSrcSurfAttrib, -+ ui32BufferCount, -+ apsSyncData, -+ ui32OEMFlags, -+ &psSwapChain-> -+ hExtSwapChain, -+ pui32SwapChainID); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain")); -+ goto ErrorExit; -+ } -+ -+ *phSwapChain = (IMG_HANDLE) psSwapChain; -+ -+ psSwapChain->hResItem = ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN, -+ psSwapChain, -+ 0, -+ DestroyDCSwapChainCallBack); -+ -+ return eError; -+ -+ErrorExit: -+ -+ for (i = 0; i < ui32BufferCount; i++) { -+ if (psSwapChain->asBuffer[i].sDeviceClassBuffer. -+ psKernelSyncInfo) { -+ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i]. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ } -+ } -+ -+ if (psQueue) { -+ PVRSRVDestroyCommandQueueKM(psQueue); -+ } -+ -+ if (psSwapChain) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), -+ psSwapChain, IMG_NULL); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, IMG_RECT * psRect) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain; -+ -+ if (!hDeviceKM || !hSwapChain) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetDCDstRectKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ psSwapChain = (PVRSRV_DC_SWAPCHAIN *) hSwapChain; -+ -+ return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice, -+ psSwapChain-> -+ hExtSwapChain, psRect); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, IMG_RECT * psRect) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain; -+ -+ if (!hDeviceKM || !hSwapChain) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetDCSrcRectKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ psSwapChain = (PVRSRV_DC_SWAPCHAIN *) hSwapChain; -+ -+ return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice, -+ psSwapChain-> -+ hExtSwapChain, psRect); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, -+ IMG_UINT32 ui32CKColour) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain; -+ -+ if (!hDeviceKM || !hSwapChain) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetDCDstColourKeyKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ psSwapChain = (PVRSRV_DC_SWAPCHAIN *) hSwapChain; -+ -+ return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice, -+ psSwapChain-> -+ hExtSwapChain, -+ ui32CKColour); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, -+ IMG_UINT32 ui32CKColour) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain; -+ -+ if (!hDeviceKM || !hSwapChain) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetDCSrcColourKeyKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ psSwapChain = (PVRSRV_DC_SWAPCHAIN *) hSwapChain; -+ -+ return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice, -+ psSwapChain-> -+ hExtSwapChain, -+ ui32CKColour); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, -+ IMG_UINT32 * pui32BufferCount, -+ IMG_HANDLE * phBuffer) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain; -+ IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+ if (!hDeviceKM || !hSwapChain || !phBuffer) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetDCBuffersKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ psSwapChain = (PVRSRV_DC_SWAPCHAIN *) hSwapChain; -+ -+ eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice, -+ psSwapChain-> -+ hExtSwapChain, -+ pui32BufferCount, -+ ahExtBuffer); -+ -+ PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); -+ -+ for (i = 0; i < *pui32BufferCount; i++) { -+ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = -+ ahExtBuffer[i]; -+ phBuffer[i] = (IMG_HANDLE) & psSwapChain->asBuffer[i]; -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hBuffer, -+ IMG_UINT32 ui32SwapInterval, -+ IMG_HANDLE hPrivateTag, -+ IMG_UINT32 ui32ClipRectCount, -+ IMG_RECT * psClipRect) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_BUFFER *psBuffer; -+ PVRSRV_QUEUE_INFO *psQueue; -+ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; -+ IMG_UINT32 i; -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0; -+ IMG_UINT32 ui32NumSrcSyncs = 1; -+ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; -+ PVRSRV_COMMAND *psCommand; -+ -+ if (!hDeviceKM || !hBuffer || !psClipRect) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCBufferKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ psBuffer = (PVRSRV_DC_BUFFER *) hBuffer; -+ -+ psQueue = psBuffer->psSwapChain->psQueue; -+ -+ apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo; -+ if (psBuffer->psSwapChain->psLastFlipBuffer && -+ psBuffer != psBuffer->psSwapChain->psLastFlipBuffer) { -+ apsSrcSync[1] = -+ psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer. -+ psKernelSyncInfo; -+ ui32NumSrcSyncs++; -+ } -+ -+ eError = PVRSRVInsertCommandKM(psQueue, -+ &psCommand, -+ psDCInfo->ui32DeviceID, -+ DC_FLIP_COMMAND, -+ 0, -+ IMG_NULL, -+ ui32NumSrcSyncs, -+ apsSrcSync, -+ sizeof(DISPLAYCLASS_FLIP_COMMAND) + -+ (sizeof(IMG_RECT) * ui32ClipRectCount)); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCBufferKM: Failed to get space in queue")); -+ goto Exit; -+ } -+ -+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND *) psCommand->pvData; -+ -+ psFlipCmd->hExtDevice = psDCInfo->hExtDevice; -+ -+ psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain; -+ -+ psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer; -+ -+ psFlipCmd->hPrivateTag = hPrivateTag; -+ -+ psFlipCmd->ui32ClipRectCount = ui32ClipRectCount; -+ -+ psFlipCmd->psClipRect = -+ (IMG_RECT *) ((IMG_UINT8 *) psFlipCmd + -+ sizeof(DISPLAYCLASS_FLIP_COMMAND)); -+ -+ for (i = 0; i < ui32ClipRectCount; i++) { -+ psFlipCmd->psClipRect[i] = psClipRect[i]; -+ } -+ -+ psFlipCmd->ui32SwapInterval = ui32SwapInterval; -+ -+ eError = PVRSRVSubmitCommandKM(psQueue, psCommand); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCBufferKM: Failed to submit command")); -+ goto Exit; -+ } -+ -+ do { -+ if (PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != -+ PVRSRV_ERROR_PROCESSING_BLOCKED) { -+ goto ProcessedQueues; -+ } -+ -+ if (bStart == IMG_FALSE) { -+ uiStart = OSClockus(); -+ bStart = IMG_TRUE; -+ } -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ } while ((OSClockus() - uiStart) < MAX_HW_TIME_US); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCBufferKM: Failed to process queues")); -+ -+ eError = PVRSRV_ERROR_GENERIC; -+ goto Exit; -+ -+ProcessedQueues: -+ -+ psBuffer->psSwapChain->psLastFlipBuffer = psBuffer; -+ -+Exit: -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_QUEUE_INFO *psQueue; -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DC_SWAPCHAIN *psSwapChain; -+ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0; -+ IMG_UINT32 ui32NumSrcSyncs = 1; -+ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; -+ PVRSRV_COMMAND *psCommand; -+ -+ if (!hDeviceKM || !hSwapChain) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCSystemKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); -+ psSwapChain = (PVRSRV_DC_SWAPCHAIN *) hSwapChain; -+ -+ psQueue = psSwapChain->psQueue; -+ -+ apsSrcSync[0] = -+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo; -+ if (psSwapChain->psLastFlipBuffer) { -+ apsSrcSync[1] = -+ psSwapChain->psLastFlipBuffer->sDeviceClassBuffer. -+ psKernelSyncInfo; -+ ui32NumSrcSyncs++; -+ } -+ -+ eError = PVRSRVInsertCommandKM(psQueue, -+ &psCommand, -+ psDCInfo->ui32DeviceID, -+ DC_FLIP_COMMAND, -+ 0, -+ IMG_NULL, -+ ui32NumSrcSyncs, -+ apsSrcSync, -+ sizeof(DISPLAYCLASS_FLIP_COMMAND)); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCSystemKM: Failed to get space in queue")); -+ goto Exit; -+ } -+ -+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND *) psCommand->pvData; -+ -+ psFlipCmd->hExtDevice = psDCInfo->hExtDevice; -+ -+ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain; -+ -+ psFlipCmd->hExtBuffer = -+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer; -+ -+ psFlipCmd->hPrivateTag = IMG_NULL; -+ -+ psFlipCmd->ui32ClipRectCount = 0; -+ -+ psFlipCmd->ui32SwapInterval = 1; -+ -+ eError = PVRSRVSubmitCommandKM(psQueue, psCommand); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCSystemKM: Failed to submit command")); -+ goto Exit; -+ } -+ -+ do { -+ if (PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != -+ PVRSRV_ERROR_PROCESSING_BLOCKED) { -+ goto ProcessedQueues; -+ } -+ -+ if (bStart == IMG_FALSE) { -+ uiStart = OSClockus(); -+ bStart = IMG_TRUE; -+ } -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ } while ((OSClockus() - uiStart) < MAX_HW_TIME_US); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSwapToDCSystemKM: Failed to process queues")); -+ eError = PVRSRV_ERROR_GENERIC; -+ goto Exit; -+ -+ProcessedQueues: -+ -+ psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer; -+ -+ eError = PVRSRV_OK; -+ -+Exit: -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRegisterSystemISRHandler(PFN_ISR_HANDLER pfnISRHandler, -+ IMG_VOID * pvISRHandlerData, -+ IMG_UINT32 ui32ISRSourceMask, -+ IMG_UINT32 ui32DeviceID) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask); -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterSystemISRHandler: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psDevNode = psSysData->psDeviceNodeList; -+ while (psDevNode) { -+ if (psDevNode->sDevId.ui32DeviceIndex == ui32DeviceID) { -+ break; -+ } -+ psDevNode = psDevNode->psNext; -+ } -+ -+ psDevNode->pvISRData = (IMG_VOID *) pvISRHandlerData; -+ -+ psDevNode->pfnDeviceISR = pfnISRHandler; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State) -+{ -+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetDCState: Failed to get SysData")); -+ return; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode != IMG_NULL) { -+ if (psDeviceNode->sDevId.eDeviceClass == -+ PVRSRV_DEVICE_CLASS_DISPLAY) { -+ psDCInfo = -+ (PVRSRV_DISPLAYCLASS_INFO *) psDeviceNode->pvDevice; -+ if (psDCInfo->psFuncTable->pfnSetDCState -+ && psDCInfo->hExtDevice) { -+ psDCInfo->psFuncTable->pfnSetDCState(psDCInfo-> -+ hExtDevice, -+ ui32State); -+ } -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+} -+ -+IMG_EXPORT -+ IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE * psJTable) -+{ -+ psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE); -+ psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM; -+ psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM; -+ psJTable->pfnPVRSRVOEMFunction = SysOEMFunction; -+ psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM; -+ psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM; -+ psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM; -+ psJTable->pfnPVRSRVRegisterSystemISRHandler = -+ PVRSRVRegisterSystemISRHandler; -+ psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice; -+ -+ return IMG_TRUE; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, -+ IMG_BOOL bResManCallback) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(bResManCallback); -+ -+ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *) hDeviceKM; -+ -+ eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; -+ PVRSRV_BUFFERCLASS_INFO *psBCInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *) pvParam; -+ psBCInfo = psBCPerContextInfo->psBCInfo; -+ -+ psBCInfo->ui32RefCount--; -+ if (psBCInfo->ui32RefCount == 0) { -+ IMG_UINT32 i; -+ -+ psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->hExtDevice); -+ -+ for (i = 0; i < psBCInfo->ui32BufferCount; i++) { -+ if (psBCInfo->psBuffer[i].sDeviceClassBuffer. -+ psKernelSyncInfo) { -+ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i]. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ } -+ } -+ -+ if (psBCInfo->psBuffer) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_BC_BUFFER) * -+ psBCInfo->ui32BufferCount, psBCInfo->psBuffer, -+ IMG_NULL); -+ } -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), -+ psBCPerContextInfo, IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_UINT32 ui32DeviceID, -+ IMG_HANDLE hDevCookie, -+ IMG_HANDLE * phDeviceKM) -+{ -+ PVRSRV_BUFFERCLASS_INFO *psBCInfo; -+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ BUFFER_INFO sBufferInfo; -+ -+ if (!phDeviceKM) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: Invalid params")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: Failed to get SysData")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode) { -+ if ((psDeviceNode->sDevId.eDeviceClass == -+ PVRSRV_DEVICE_CLASS_BUFFER) -+ && (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID)) { -+ -+ psBCInfo = -+ (PVRSRV_BUFFERCLASS_INFO *) psDeviceNode->pvDevice; -+ goto FoundDevice; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: No devnode matching index %d", -+ ui32DeviceID)); -+ -+ return PVRSRV_ERROR_GENERIC; -+ -+FoundDevice: -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(*psBCPerContextInfo), -+ (IMG_VOID **) & psBCPerContextInfo, -+ IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo)); -+ -+ if (psBCInfo->ui32RefCount++ == 0) { -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie; -+ PVR_ASSERT(psDeviceNode != IMG_NULL); -+ -+ psBCInfo->hDevMemContext = -+ (IMG_HANDLE) psDeviceNode->sDevMemoryInfo.pBMKernelContext; -+ -+ eError = -+ psBCInfo->psFuncTable->pfnOpenBCDevice(&psBCInfo-> -+ hExtDevice); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: Failed to open external BC device")); -+ return eError; -+ } -+ -+ eError = -+ psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, -+ &sBufferInfo); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM : Failed to get BC Info")); -+ return eError; -+ } -+ -+ psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount; -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_BC_BUFFER) * -+ sBufferInfo.ui32BufferCount, -+ (IMG_VOID **) & psBCInfo->psBuffer, -+ IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers")); -+ return eError; -+ } -+ OSMemSet(psBCInfo->psBuffer, -+ 0, -+ sizeof(PVRSRV_BC_BUFFER) * -+ sBufferInfo.ui32BufferCount); -+ -+ for (i = 0; i < psBCInfo->ui32BufferCount; i++) { -+ -+ eError = PVRSRVAllocSyncInfoKM(IMG_NULL, -+ psBCInfo->hDevMemContext, -+ &psBCInfo->psBuffer[i]. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: Failed sync info alloc")); -+ goto ErrorExit; -+ } -+ -+ eError = -+ psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo-> -+ hExtDevice, i, -+ psBCInfo-> -+ psBuffer[i]. -+ sDeviceClassBuffer. -+ psKernelSyncInfo-> -+ psSyncData, -+ &psBCInfo-> -+ psBuffer[i]. -+ sDeviceClassBuffer. -+ hExtBuffer); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOpenBCDeviceKM: Failed to get BC buffers")); -+ goto ErrorExit; -+ } -+ -+ psBCInfo->psBuffer[i].sDeviceClassBuffer. -+ pfnGetBufferAddr = -+ psBCInfo->psFuncTable->pfnGetBufferAddr; -+ psBCInfo->psBuffer[i].sDeviceClassBuffer. -+ hDevMemContext = psBCInfo->hDevMemContext; -+ psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = -+ psBCInfo->hExtDevice; -+ } -+ } -+ -+ psBCPerContextInfo->psBCInfo = psBCInfo; -+ psBCPerContextInfo->hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_BUFFERCLASS_DEVICE, -+ psBCPerContextInfo, 0, CloseBCDeviceCallBack); -+ -+ *phDeviceKM = (IMG_HANDLE) psBCPerContextInfo; -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ for (i = 0; i < psBCInfo->ui32BufferCount; i++) { -+ if (psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) { -+ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i]. -+ sDeviceClassBuffer. -+ psKernelSyncInfo); -+ } -+ } -+ -+ if (psBCInfo->psBuffer) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_BC_BUFFER) * -+ sBufferInfo.ui32BufferCount, psBCInfo->psBuffer, -+ IMG_NULL); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE hDeviceKM, -+ BUFFER_INFO * psBufferInfo) -+{ -+ PVRSRV_BUFFERCLASS_INFO *psBCInfo; -+ PVRSRV_ERROR eError; -+ -+ if (!hDeviceKM || !psBufferInfo) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetBCInfoKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM); -+ -+ eError = -+ psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, -+ psBufferInfo); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetBCInfoKM : Failed to get BC Info")); -+ return eError; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE hDeviceKM, -+ IMG_UINT32 ui32BufferIndex, -+ IMG_HANDLE * phBuffer) -+{ -+ PVRSRV_BUFFERCLASS_INFO *psBCInfo; -+ -+ if (!hDeviceKM || !phBuffer) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetBCBufferKM: Invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM); -+ -+ if (ui32BufferIndex < psBCInfo->ui32BufferCount) { -+ *phBuffer = (IMG_HANDLE) & psBCInfo->psBuffer[ui32BufferIndex]; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", -+ ui32BufferIndex, psBCInfo->ui32BufferCount)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE * psJTable) -+{ -+ psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE); -+ -+ psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM; -+ psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM; -+ -+ return IMG_TRUE; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/device.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/device.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/device.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/device.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,237 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __DEVICE_H__ -+#define __DEVICE_H__ -+ -+ -+#include "ra.h" -+#include "resman.h" -+ -+ typedef struct _BM_CONTEXT_ BM_CONTEXT; -+ -+ typedef struct _MMU_HEAP_ MMU_HEAP; -+ typedef struct _MMU_CONTEXT_ MMU_CONTEXT; -+ -+#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0)) -+#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1)) -+#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2)) -+#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3)) -+ -+ typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE; -+#define DEVICE_MEMORY_HEAP_PERCONTEXT 0 -+#define DEVICE_MEMORY_HEAP_KERNEL 1 -+#define DEVICE_MEMORY_HEAP_SHARED 2 -+#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3 -+ -+#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1 -+#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2 -+ -+ typedef struct _DEVICE_MEMORY_HEAP_INFO_ { -+ -+ IMG_UINT32 ui32HeapID; -+ -+ IMG_CHAR *pszName; -+ -+ IMG_CHAR *pszBSName; -+ -+ IMG_DEV_VIRTADDR sDevVAddrBase; -+ -+ IMG_UINT32 ui32HeapSize; -+ -+ IMG_UINT32 ui32Attribs; -+ -+ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType; -+ -+ IMG_HANDLE hDevMemHeap; -+ -+ RA_ARENA *psLocalDevMemArena; -+ -+ } DEVICE_MEMORY_HEAP_INFO; -+ -+ typedef struct _DEVICE_MEMORY_INFO_ { -+ -+ IMG_UINT32 ui32AddressSpaceSizeLog2; -+ -+ IMG_UINT32 ui32Flags; -+ -+ IMG_UINT32 ui32HeapCount; -+ -+ IMG_UINT32 ui32SyncHeapID; -+ -+ IMG_UINT32 ui32MappingHeapID; -+ -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; -+ -+ BM_CONTEXT *pBMKernelContext; -+ -+ BM_CONTEXT *pBMContext; -+ -+ } DEVICE_MEMORY_INFO; -+ -+ typedef struct DEV_ARENA_DESCRIPTOR_TAG { -+ IMG_UINT32 ui32HeapID; -+ -+ IMG_CHAR *pszName; -+ -+ IMG_DEV_VIRTADDR BaseDevVAddr; -+ -+ IMG_UINT32 ui32Size; -+ -+ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType; -+ -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo; -+ -+ } DEV_ARENA_DESCRIPTOR; -+ -+ typedef struct _SYS_DATA_TAG_ *PSYS_DATA; -+ -+ typedef struct _PVRSRV_DEVICE_NODE_ { -+ PVRSRV_DEVICE_IDENTIFIER sDevId; -+ IMG_UINT32 ui32RefCount; -+ -+ PVRSRV_ERROR(*pfnInitDevice) (IMG_VOID *); -+ -+ PVRSRV_ERROR(*pfnDeInitDevice) (IMG_VOID *); -+ -+ PVRSRV_ERROR(*pfnMMUInitialise) (struct _PVRSRV_DEVICE_NODE_ *, -+ MMU_CONTEXT **, -+ IMG_DEV_PHYADDR *); -+ IMG_VOID(*pfnMMUFinalise) (MMU_CONTEXT *); -+ IMG_VOID(*pfnMMUInsertHeap) (MMU_CONTEXT *, MMU_HEAP *); -+ MMU_HEAP *(*pfnMMUCreate) (MMU_CONTEXT *, -+ DEV_ARENA_DESCRIPTOR *, RA_ARENA **); -+ IMG_VOID(*pfnMMUDelete) (MMU_HEAP *); -+ IMG_BOOL(*pfnMMUAlloc) (MMU_HEAP * pMMU, -+ IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 uDevVAddrAlignment, -+ IMG_DEV_VIRTADDR * pDevVAddr); -+ IMG_VOID(*pfnMMUFree) (MMU_HEAP *, IMG_DEV_VIRTADDR, -+ IMG_UINT32); -+ IMG_VOID(*pfnMMUEnable) (MMU_HEAP *); -+ IMG_VOID(*pfnMMUDisable) (MMU_HEAP *); -+ IMG_VOID(*pfnMMUMapPages) (MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR devVAddr, -+ IMG_SYS_PHYADDR SysPAddr, -+ IMG_SIZE_T uSize, -+ IMG_UINT32 ui32MemFlags, -+ IMG_HANDLE hUniqueTag); -+ IMG_VOID(*pfnMMUMapShadow) (MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR MapBaseDevVAddr, -+ IMG_SIZE_T uSize, -+ IMG_CPU_VIRTADDR CpuVAddr, -+ IMG_HANDLE hOSMemHandle, -+ IMG_DEV_VIRTADDR * pDevVAddr, -+ IMG_UINT32 ui32MemFlags, -+ IMG_HANDLE hUniqueTag); -+ IMG_VOID(*pfnMMUUnmapPages) (MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR dev_vaddr, -+ IMG_UINT32 ui32PageCount, -+ IMG_HANDLE hUniqueTag); -+ -+ IMG_VOID(*pfnMMUMapScatter) (MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR DevVAddr, -+ IMG_SYS_PHYADDR * psSysAddr, -+ IMG_SIZE_T uSize, -+ IMG_UINT32 ui32MemFlags, -+ IMG_HANDLE hUniqueTag); -+ -+ IMG_DEV_PHYADDR(*pfnMMUGetPhysPageAddr) (MMU_HEAP * pMMUHeap, -+ IMG_DEV_VIRTADDR -+ sDevVPageAddr); -+ IMG_DEV_PHYADDR(*pfnMMUGetPDDevPAddr) (MMU_CONTEXT * -+ pMMUContext); -+ -+ IMG_BOOL(*pfnDeviceISR) (IMG_VOID *); -+ -+ IMG_VOID *pvISRData; -+ -+ IMG_UINT32 ui32SOCInterruptBit; -+ -+ IMG_VOID(*pfnDeviceMISR) (IMG_VOID *); -+ -+ IMG_VOID(*pfnDeviceCommandComplete) (struct -+ _PVRSRV_DEVICE_NODE_ * -+ psDeviceNode); -+ -+ IMG_BOOL bReProcessDeviceCommandComplete; -+ -+ DEVICE_MEMORY_INFO sDevMemoryInfo; -+ -+ IMG_VOID *pvDevice; -+ IMG_UINT32 ui32pvDeviceSize; -+ IMG_VOID *hDeviceOSMemHandle; -+ -+ PRESMAN_CONTEXT hResManContext; -+ -+ PSYS_DATA psSysData; -+ -+ RA_ARENA *psLocalDevMemArena; -+ -+ IMG_UINT32 ui32Flags; -+ -+ struct _PVRSRV_DEVICE_NODE_ *psNext; -+ } PVRSRV_DEVICE_NODE; -+ -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData, -+ PVRSRV_ERROR -+ (*pfnRegisterDevice) -+ (PVRSRV_DEVICE_NODE *), -+ IMG_UINT32 -+ ui32SOCInterruptBit, -+ IMG_UINT32 * -+ pui32DeviceIndex); -+ -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 -+ ui32DevIndex); -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful); -+ -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 -+ ui32DevIndex); -+ -+ -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32 -+ * pui32LinMemAddr, -+ IMG_UINT32 -+ ui32Value, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 -+ ui32Waitus, -+ IMG_UINT32 -+ ui32Tries); -+ -+ -+ -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData); -+ IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData); -+ IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE * -+ psDeviceNode); -+ IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID * pvSysData); -+ IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID * pvSysData); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/devicemem.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/devicemem.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/devicemem.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/devicemem.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,1166 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+ -+#include "services_headers.h" -+#include "buffer_manager.h" -+#include "pdump_km.h" -+#include "sgxmmu.h" -+#include "sgxapi_km.h" -+#include "pvr_bridge_km.h" -+ -+#include "linux/kernel.h" -+#include "linux/pagemap.h" -+ -+static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie, -+ IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Alignment, -+ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo); -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie, -+ PVRSRV_HEAP_INFO * -+ psHeapInfo) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT32 ui32HeapCount; -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; -+ IMG_UINT32 i; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie; -+ PVR_ASSERT(psDeviceNode != IMG_NULL); -+ -+ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; -+ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; -+ -+ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); -+ -+ for (i = 0; i < ui32HeapCount; i++) { -+ -+ psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; -+ psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap; -+ psHeapInfo[i].sDevVAddrBase = -+ psDeviceMemoryHeap[i].sDevVAddrBase; -+ psHeapInfo[i].ui32HeapByteSize = -+ psDeviceMemoryHeap[i].ui32HeapSize; -+ psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; -+ } -+ -+ for (; i < PVRSRV_MAX_CLIENT_HEAPS; i++) { -+ OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo)); -+ psHeapInfo[i].ui32HeapID = (IMG_UINT32) SGX_UNDEFINED_HEAP_ID; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE -+ hDevCookie, -+ PVRSRV_PER_PROCESS_DATA -+ * psPerProc, -+ IMG_HANDLE * -+ phDevMemContext, -+ IMG_UINT32 * -+ pui32ClientHeapCount, -+ PVRSRV_HEAP_INFO * -+ psHeapInfo, -+ IMG_BOOL * -+ pbCreated -+ , -+ IMG_BOOL * pbShared -+ ) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount = 0; -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; -+ IMG_HANDLE hDevMemContext; -+ IMG_HANDLE hDevMemHeap; -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ IMG_UINT32 i; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie; -+ PVR_ASSERT(psDeviceNode != IMG_NULL); -+ -+ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; -+ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; -+ -+ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); -+ -+ hDevMemContext = BM_CreateContext(psDeviceNode, -+ &sPDDevPAddr, psPerProc, pbCreated); -+ if (hDevMemContext == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ for (i = 0; i < ui32HeapCount; i++) { -+ switch (psDeviceMemoryHeap[i].DevMemHeapType) { -+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: -+ { -+ -+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = -+ psDeviceMemoryHeap[i].ui32HeapID; -+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = -+ psDeviceMemoryHeap[i].hDevMemHeap; -+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = -+ psDeviceMemoryHeap[i].sDevVAddrBase; -+ psHeapInfo[ui32ClientHeapCount]. -+ ui32HeapByteSize = -+ psDeviceMemoryHeap[i].ui32HeapSize; -+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = -+ psDeviceMemoryHeap[i].ui32Attribs; -+ pbShared[ui32ClientHeapCount] = IMG_TRUE; -+ ui32ClientHeapCount++; -+ break; -+ } -+ case DEVICE_MEMORY_HEAP_PERCONTEXT: -+ { -+ hDevMemHeap = BM_CreateHeap(hDevMemContext, -+ &psDeviceMemoryHeap -+ [i]); -+ -+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = -+ psDeviceMemoryHeap[i].ui32HeapID; -+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = -+ hDevMemHeap; -+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = -+ psDeviceMemoryHeap[i].sDevVAddrBase; -+ psHeapInfo[ui32ClientHeapCount]. -+ ui32HeapByteSize = -+ psDeviceMemoryHeap[i].ui32HeapSize; -+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = -+ psDeviceMemoryHeap[i].ui32Attribs; -+ pbShared[ui32ClientHeapCount] = IMG_FALSE; -+ -+ ui32ClientHeapCount++; -+ break; -+ } -+ } -+ } -+ -+ *pui32ClientHeapCount = ui32ClientHeapCount; -+ *phDevMemContext = hDevMemContext; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE -+ hDevCookie, -+ IMG_HANDLE -+ hDevMemContext, -+ IMG_BOOL * -+ pbDestroyed) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDevCookie); -+ -+ return BM_DestroyContext(hDevMemContext, pbDestroyed); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE -+ hDevCookie, -+ IMG_HANDLE -+ hDevMemContext, -+ IMG_UINT32 * -+ pui32ClientHeapCount, -+ PVRSRV_HEAP_INFO * -+ psHeapInfo -+ , IMG_BOOL * pbShared -+ ) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount = 0; -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; -+ IMG_HANDLE hDevMemHeap; -+ IMG_UINT32 i; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie; -+ PVR_ASSERT(psDeviceNode != IMG_NULL); -+ -+ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; -+ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; -+ -+ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); -+ -+ for (i = 0; i < ui32HeapCount; i++) { -+ switch (psDeviceMemoryHeap[i].DevMemHeapType) { -+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: -+ { -+ -+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = -+ psDeviceMemoryHeap[i].ui32HeapID; -+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = -+ psDeviceMemoryHeap[i].hDevMemHeap; -+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = -+ psDeviceMemoryHeap[i].sDevVAddrBase; -+ psHeapInfo[ui32ClientHeapCount]. -+ ui32HeapByteSize = -+ psDeviceMemoryHeap[i].ui32HeapSize; -+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = -+ psDeviceMemoryHeap[i].ui32Attribs; -+ pbShared[ui32ClientHeapCount] = IMG_TRUE; -+ ui32ClientHeapCount++; -+ break; -+ } -+ case DEVICE_MEMORY_HEAP_PERCONTEXT: -+ { -+ hDevMemHeap = BM_CreateHeap(hDevMemContext, -+ &psDeviceMemoryHeap -+ [i]); -+ -+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = -+ psDeviceMemoryHeap[i].ui32HeapID; -+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = -+ hDevMemHeap; -+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = -+ psDeviceMemoryHeap[i].sDevVAddrBase; -+ psHeapInfo[ui32ClientHeapCount]. -+ ui32HeapByteSize = -+ psDeviceMemoryHeap[i].ui32HeapSize; -+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = -+ psDeviceMemoryHeap[i].ui32Attribs; -+ pbShared[ui32ClientHeapCount] = IMG_FALSE; -+ -+ ui32ClientHeapCount++; -+ break; -+ } -+ } -+ } -+ -+ *pui32ClientHeapCount = ui32ClientHeapCount; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie, -+ IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Alignment, -+ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo; -+ BM_HANDLE hBuffer; -+ -+ PVRSRV_MEMBLK *psMemBlock; -+ IMG_BOOL bBMError; -+ -+ PVR_UNREFERENCED_PARAMETER(hDevCookie); -+ -+ *ppsMemInfo = IMG_NULL; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), -+ (IMG_VOID **) & psMemInfo, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocDeviceMem: Failed to alloc memory for block")); -+ return (PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ -+ psMemBlock = &(psMemInfo->sMemBlk); -+ -+ psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION; -+ -+ bBMError = BM_Alloc(hDevMemHeap, -+ IMG_NULL, -+ ui32Size, -+ &psMemInfo->ui32Flags, ui32Alignment, &hBuffer); -+ -+ if (!bBMError) { -+ PVR_DPF((PVR_DBG_ERROR, "AllocDeviceMem: BM_Alloc Failed")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); -+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); -+ -+ psMemBlock->hBuffer = (IMG_HANDLE) hBuffer; -+ -+ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); -+ -+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; -+ -+ psMemInfo->ui32AllocSize = ui32Size; -+ -+ *ppsMemInfo = psMemInfo; -+ -+ return (PVRSRV_OK); -+} -+ -+static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO * psMemInfo) -+{ -+ BM_HANDLE hBuffer; -+ -+ if (!psMemInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ hBuffer = psMemInfo->sMemBlk.hBuffer; -+ -+ BM_Free(hBuffer, psMemInfo->ui32Flags); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), -+ psMemInfo, IMG_NULL); -+ -+ return (PVRSRV_OK); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie, -+ IMG_HANDLE hDevMemContext, -+ PVRSRV_KERNEL_SYNC_INFO ** -+ ppsKernelSyncInfo) -+{ -+ IMG_HANDLE hSyncDevMemHeap; -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ BM_CONTEXT *pBMContext; -+ PVRSRV_ERROR eError; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ PVRSRV_SYNC_DATA *psSyncData; -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_SYNC_INFO), -+ (IMG_VOID **) & psKernelSyncInfo, IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocSyncInfoKM: Failed to alloc memory")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ pBMContext = (BM_CONTEXT *) hDevMemContext; -+ psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo; -+ -+ hSyncDevMemHeap = -+ psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo-> -+ ui32SyncHeapID].hDevMemHeap; -+ -+ eError = AllocDeviceMem(hDevCookie, -+ hSyncDevMemHeap, -+ 0, -+ sizeof(PVRSRV_SYNC_DATA), -+ sizeof(IMG_UINT32), -+ &psKernelSyncInfo->psSyncDataMemInfoKM); -+ -+ if (eError != PVRSRV_OK) { -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocSyncInfoKM: Failed to alloc memory")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, -+ IMG_NULL); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psKernelSyncInfo->psSyncData = -+ psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM; -+ psSyncData = psKernelSyncInfo->psSyncData; -+ -+ psSyncData->ui32WriteOpsPending = 0; -+ psSyncData->ui32WriteOpsComplete = 0; -+ psSyncData->ui32ReadOpsPending = 0; -+ psSyncData->ui32ReadOpsComplete = 0; -+ psSyncData->ui32LastOpDumpVal = 0; -+ psSyncData->ui32LastReadOpDumpVal = 0; -+ -+ psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = -+ psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + -+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete); -+ psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = -+ psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + -+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); -+ -+ psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL; -+ -+ *ppsKernelSyncInfo = psKernelSyncInfo; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO * -+ psKernelSyncInfo) -+{ -+ FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_SYNC_INFO), -+ psKernelSyncInfo, IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ if (psMemInfo->psKernelSyncInfo) { -+ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo); -+ } -+ -+ if (eError == PVRSRV_OK) { -+ eError = FreeDeviceMem(psMemInfo); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie, -+ PVRSRV_KERNEL_MEM_INFO * -+ psMemInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(hDevCookie); -+ -+ if (!psMemInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psMemInfo->sMemBlk.hResItem != IMG_NULL) { -+ eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem); -+ } else { -+ -+ FreeDeviceMemCallBack(psMemInfo, 0); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie, -+ PVRSRV_PER_PROCESS_DATA * -+ psPerProc, -+ IMG_HANDLE hDevMemHeap, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Alignment, -+ PVRSRV_KERNEL_MEM_INFO ** -+ ppsMemInfo) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo; -+ PVRSRV_ERROR eError; -+ BM_HEAP *psBMHeap; -+ IMG_HANDLE hDevMemContext; -+ -+ if (!hDevMemHeap || (ui32Size == 0)) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = AllocDeviceMem(hDevCookie, -+ hDevMemHeap, -+ ui32Flags, ui32Size, ui32Alignment, &psMemInfo); -+ -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) { -+ psMemInfo->psKernelSyncInfo = IMG_NULL; -+ } else { -+ -+ psBMHeap = (BM_HEAP *) hDevMemHeap; -+ hDevMemContext = (IMG_HANDLE) psBMHeap->pBMContext; -+ eError = PVRSRVAllocSyncInfoKM(hDevCookie, -+ hDevMemContext, -+ &psMemInfo->psKernelSyncInfo); -+ if (eError != PVRSRV_OK) { -+ goto free_mainalloc; -+ } -+ } -+ -+ *ppsMemInfo = psMemInfo; -+ -+ if (ui32Flags & PVRSRV_MEM_NO_RESMAN) { -+ psMemInfo->sMemBlk.hResItem = IMG_NULL; -+ } else { -+ -+ psMemInfo->sMemBlk.hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_DEVICEMEM_ALLOCATION, -+ psMemInfo, 0, FreeDeviceMemCallBack); -+ if (psMemInfo->sMemBlk.hResItem == IMG_NULL) { -+ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto free_mainalloc; -+ } -+ } -+ -+ return (PVRSRV_OK); -+ -+free_mainalloc: -+ FreeDeviceMem(psMemInfo); -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie, -+ PVRSRV_KERNEL_MEM_INFO -+ * psMemInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie; -+ -+ PVR_UNREFERENCED_PARAMETER(hDevCookie); -+ -+ if (!psMemInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ ResManDissociateRes(psMemInfo->sMemBlk.hResItem, -+ psDeviceNode->hResManContext); -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags, -+ IMG_UINT32 * pui32Total, -+ IMG_UINT32 * pui32Free, -+ IMG_UINT32 * -+ pui32LargestBlock) -+{ -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Flags); -+ PVR_UNREFERENCED_PARAMETER(pui32Total); -+ PVR_UNREFERENCED_PARAMETER(pui32Free); -+ PVR_UNREFERENCED_PARAMETER(pui32LargestBlock); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO * -+ psMemInfo) -+{ -+ if (!psMemInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem); -+} -+ -+static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam; -+ IMG_HANDLE hOSWrapMem = IMG_NULL; -+ BM_BUF *psBMBuf; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ psBMBuf = (BM_BUF *) psMemInfo->sMemBlk.hBuffer; -+ -+ if ((psBMBuf->ui32RefCount == 1) && (psMemInfo->psKernelSyncInfo)) { -+ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo); -+ hOSWrapMem = psBMBuf->hOSWrapMem; -+ } -+ -+ if (eError == PVRSRV_OK) { -+ eError = FreeDeviceMem(psMemInfo); -+ } -+ -+ if (hOSWrapMem) { -+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVIsWrappedExtMemoryKM(IMG_HANDLE hDevCookie, -+ PVRSRV_PER_PROCESS_DATA -+ *psPerProc, -+ IMG_UINT32 -+ *pui32ByteSize, -+ IMG_VOID -+ **pvLinAddr) -+{ -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ IMG_UINT32 ui32HostPageSize = HOST_PAGESIZE(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_ERROR eError; -+ IMG_SYS_PHYADDR sIntSysPAddr; -+ IMG_HANDLE hOSWrapMem = IMG_NULL; -+ IMG_HANDLE hDevMemHeap; -+ IMG_UINT32 ui32PageOffset = 0; -+ -+ IMG_UINT32 ui32ReturnedByteSize = *pui32ByteSize; -+ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; -+ PVR_ASSERT(psDeviceNode != IMG_NULL); -+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; -+ -+ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].hDevMemHeap; -+ -+ if (pvLinAddr) { -+ ui32PageOffset = ((IMG_UINT32)*pvLinAddr) & ~PAGE_MASK; -+ *pvLinAddr = (IMG_VOID *)((IMG_UINT32)*pvLinAddr & PAGE_MASK); -+ ui32ReturnedByteSize += ui32PageOffset; -+ -+ /* let's start by getting the address of the first page */ -+ eError = OSAcquirePhysPageAddr(*pvLinAddr, -+ ui32HostPageSize, -+ &sIntSysPAddr, -+ &hOSWrapMem, -+ IMG_FALSE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVIsWrappedExtMemoryKM: Failed to alloc memory for block")); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorExitPhase1; -+ } -+ -+ OSReleasePhysPageAddr(hOSWrapMem, IMG_FALSE); -+ hOSWrapMem = IMG_NULL; -+ -+ /* now check if this memory address is already wrapped */ -+ if (BM_IsWrappedCheckSize(hDevMemHeap, -+ ui32PageOffset, -+ sIntSysPAddr, -+ *pui32ByteSize)) { -+ /* already wrapped */ -+ eError = PVRSRV_OK; -+ } else { -+ /* not mapped in this heap */ -+ /* try the alternative heap */ -+ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].hDevMemHeap; -+ -+ if (BM_IsWrappedCheckSize(hDevMemHeap, -+ ui32PageOffset, -+ sIntSysPAddr, -+ *pui32ByteSize)) { -+ /* already wrapped */ -+ eError = PVRSRV_OK; -+ } else { -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ } -+ } -+ } -+ -+ErrorExitPhase1: -+ -+ *pui32ByteSize = ui32ReturnedByteSize; -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie, -+ PVRSRV_PER_PROCESS_DATA * -+ psPerProc, -+ IMG_UINT32 ui32ByteSize, -+ IMG_UINT32 ui32PageOffset, -+ IMG_BOOL bPhysContig, -+ IMG_SYS_PHYADDR * -+ psExtSysPAddr, -+ IMG_VOID * pvLinAddr, -+ PVRSRV_KERNEL_MEM_INFO ** -+ ppsMemInfo) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ IMG_UINT32 ui32HostPageSize = HOST_PAGESIZE(); -+ IMG_HANDLE hDevMemHeap, hDevMemContext; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ BM_HANDLE hBuffer; -+ PVRSRV_MEMBLK *psMemBlock; -+ IMG_BOOL bBMError; -+ BM_HEAP *psBMHeap; -+ PVRSRV_ERROR eError; -+ IMG_VOID *pvPageAlignedCPUVAddr; -+ IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL; -+ IMG_HANDLE hOSWrapMem = IMG_NULL; -+ BM_BUF *psBMBuf; -+ IMG_SYS_PHYADDR *pPageList = psExtSysPAddr; -+ IMG_UINT32 ui32PageCount; -+ -+ IMG_UINT32 ui32CalculatedPageOffset = ((IMG_UINT32)pvLinAddr) & ~PAGE_MASK; -+ if (ui32CalculatedPageOffset != ui32PageOffset) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVWrapExtMemoryKM: offset from address not match offset param")); -+ return PVRSRV_ERROR_BAD_MAPPING; -+ } -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie; -+ PVR_ASSERT(psDeviceNode != IMG_NULL); -+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; -+ hDevMemHeap = -+ psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo-> -+ ui32MappingHeapID].hDevMemHeap; -+ -+ ui32PageCount = -+ HOST_PAGEALIGN(ui32ByteSize + ui32PageOffset) / ui32HostPageSize; -+ -+ if (pvLinAddr) { -+ pvPageAlignedCPUVAddr = -+ (IMG_VOID *) ((IMG_UINT8 *) pvLinAddr - ui32PageOffset); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32PageCount * sizeof(IMG_SYS_PHYADDR), -+ (IMG_VOID **) & psIntSysPAddr, -+ IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* let's start by getting the address of the first page */ -+ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr, -+ ui32HostPageSize, -+ psIntSysPAddr, &hOSWrapMem, IMG_TRUE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorExitPhase1; -+ } -+ /* now check if this memory address is already wrapped */ -+ if (BM_IsWrapped(hDevMemHeap, ui32PageOffset, psIntSysPAddr[0])) { -+ /* already wrapped */ -+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); -+ hOSWrapMem = IMG_NULL; -+ } else if (ui32PageCount > 1) { -+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); -+ hOSWrapMem = IMG_NULL; -+ /* the memory is going to wrapped for the first time, -+ * so we need full page list */ -+ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr, -+ ui32PageCount * -+ ui32HostPageSize, -+ psIntSysPAddr, -+ &hOSWrapMem, -+ IMG_TRUE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorExitPhase1; -+ } -+ } -+ -+ psExtSysPAddr = psIntSysPAddr; -+ } else { -+ -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), -+ (IMG_VOID **) & psMemInfo, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorExitPhase2; -+ } -+ -+ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); -+ -+ psMemBlock = &(psMemInfo->sMemBlk); -+ -+ bBMError = BM_Wrap(hDevMemHeap, -+ ui32ByteSize, -+ ui32PageOffset, -+ bPhysContig, -+ psExtSysPAddr, -+ IMG_TRUE, IMG_NULL, &psMemInfo->ui32Flags, &hBuffer); -+ if (!bBMError) { -+ /* Alloc failed from current mapping heap, try the other one */ -+ psDevMemoryInfo->ui32MappingHeapID = -+ psDevMemoryInfo->ui32MappingHeapID == -+ SGX_GENERAL_MAPPING_HEAP_ID ? SGX_ALT_MAPPING_HEAP_ID : -+ SGX_GENERAL_MAPPING_HEAP_ID; -+ hDevMemHeap = -+ psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo-> -+ ui32MappingHeapID]. -+ hDevMemHeap; -+ bBMError = -+ BM_Wrap(hDevMemHeap, ui32ByteSize, ui32PageOffset, -+ bPhysContig, psExtSysPAddr, IMG_TRUE, IMG_NULL, -+ &psMemInfo->ui32Flags, &hBuffer); -+ if (!bBMError) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVWrapExtMemoryKM: BM_Wrap Failed")); -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ goto ErrorExitPhase2; -+ } -+ } -+ /* wrap was successful and BM_Wrap has taken ownership of the page list, -+ * clear psIntSysPAddr here, so we don't double free the memory */ -+ psIntSysPAddr = IMG_NULL; -+ -+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); -+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); -+ -+ psMemBlock->hBuffer = (IMG_HANDLE) hBuffer; -+ -+ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); -+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; -+ psMemInfo->ui32AllocSize = ui32ByteSize; -+ -+ psBMHeap = (BM_HEAP *) hDevMemHeap; -+ hDevMemContext = (IMG_HANDLE) psBMHeap->pBMContext; -+ psBMBuf = (BM_BUF *) hBuffer; -+ if (psBMBuf->ui32RefCount == 1) { -+ eError = PVRSRVAllocSyncInfoKM(hDevCookie, -+ hDevMemContext, -+ &psMemInfo->psKernelSyncInfo); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExitPhase2; -+ } -+ psBMBuf->pvKernelSyncInfo = psMemInfo->psKernelSyncInfo; -+ psBMBuf->hOSWrapMem = hOSWrapMem; -+ } else { -+ psMemInfo->psKernelSyncInfo = psBMBuf->pvKernelSyncInfo; -+ } -+ -+ psMemInfo->sMemBlk.hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_DEVICEMEM_WRAP, psMemInfo, 0, -+ UnwrapExtMemoryCallBack); -+ /* check if we were passed a page list -+ * but we didn't use use it */ -+ if (pPageList && (pPageList != psExtSysPAddr)) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32PageCount * sizeof(IMG_SYS_PHYADDR), -+ (IMG_VOID *) pPageList, 0); -+ } -+ -+ *ppsMemInfo = psMemInfo; -+ -+ return PVRSRV_OK; -+ -+ErrorExitPhase2: -+ if (psMemInfo) { -+ FreeDeviceMem(psMemInfo); -+ } -+ -+ if (hOSWrapMem) -+ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); -+ErrorExitPhase1: -+ if (psIntSysPAddr) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32PageCount * sizeof(IMG_SYS_PHYADDR), -+ psIntSysPAddr, IMG_NULL); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO * -+ psMemInfo) -+{ -+ if (!psMemInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem); -+} -+ -+static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ return FreeDeviceMem(psMemInfo); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA * -+ psPerProc, -+ PVRSRV_KERNEL_MEM_INFO * -+ psSrcMemInfo, -+ IMG_HANDLE hDstDevMemHeap, -+ PVRSRV_KERNEL_MEM_INFO ** -+ ppsDstMemInfo) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ IMG_UINT32 ui32PageCount, ui32PageOffset; -+ IMG_UINT32 ui32HostPageSize = HOST_PAGESIZE(); -+ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL; -+ IMG_DEV_PHYADDR sDevPAddr; -+ BM_BUF *psBuf; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; -+ BM_HANDLE hBuffer; -+ PVRSRV_MEMBLK *psMemBlock; -+ IMG_BOOL bBMError; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_VOID *pvPageAlignedCPUVAddr; -+ -+ if (!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceMemoryKM: invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *ppsDstMemInfo = IMG_NULL; -+ -+ ui32PageOffset = -+ psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1); -+ ui32PageCount = -+ HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize + -+ ui32PageOffset) / ui32HostPageSize; -+ pvPageAlignedCPUVAddr = -+ (IMG_VOID *) ((IMG_UINT8 *) psSrcMemInfo->pvLinAddrKM - -+ ui32PageOffset); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32PageCount * sizeof(IMG_SYS_PHYADDR), -+ (IMG_VOID **) & psSysPAddr, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psBuf = psSrcMemInfo->sMemBlk.hBuffer; -+ -+ psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode; -+ -+ sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - ui32PageOffset; -+ for (i = 0; i < ui32PageCount; i++) { -+ eError = -+ BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceMemoryKM: Failed to retrieve page list from device")); -+ goto ErrorExit; -+ } -+ -+ psSysPAddr[i] = -+ SysDevPAddrToSysPAddr(psDeviceNode->sDevId.eDeviceType, -+ sDevPAddr); -+ -+ sDevVAddr.uiAddr += ui32HostPageSize; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), -+ (IMG_VOID **) & psMemInfo, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block")); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorExit; -+ } -+ -+ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); -+ -+ psMemBlock = &(psMemInfo->sMemBlk); -+ -+ bBMError = BM_Wrap(psBuf->pMapping->pBMHeap, -+ psSrcMemInfo->ui32AllocSize, -+ ui32PageOffset, -+ IMG_FALSE, -+ psSysPAddr, -+ IMG_TRUE, -+ pvPageAlignedCPUVAddr, -+ &psMemInfo->ui32Flags, &hBuffer); -+ -+ if (!bBMError) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceMemoryKM: BM_Wrap Failed")); -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ goto ErrorExit; -+ } -+ -+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); -+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); -+ -+ psMemBlock->hBuffer = (IMG_HANDLE) hBuffer; -+ -+ psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM; -+ -+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; -+ psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize; -+ psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo; -+ -+ psMemInfo->sMemBlk.hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_DEVICEMEM_MAPPING, psMemInfo, 0, -+ UnmapDeviceMemoryCallBack); -+ -+ *ppsDstMemInfo = psMemInfo; -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ if (psSysPAddr) { -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32PageCount * sizeof(IMG_SYS_PHYADDR), psSysPAddr, -+ IMG_NULL); -+ } -+ -+ if (psMemInfo) { -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV -+PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo) -+{ -+ if (!psMemInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem); -+} -+ -+static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ return FreeDeviceMem(psMemInfo); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV -+PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDeviceClassBuffer, -+ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo, -+ IMG_HANDLE * phOSMapInfo) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_KERNEL_MEM_INFO *psMemInfo; -+ PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer; -+ IMG_SYS_PHYADDR *psSysPAddr; -+ IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr; -+ IMG_BOOL bPhysContig; -+ BM_CONTEXT *psBMContext; -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ IMG_HANDLE hDevMemHeap; -+ IMG_UINT32 ui32ByteSize; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32PageSize = HOST_PAGESIZE(); -+ BM_HANDLE hBuffer; -+ PVRSRV_MEMBLK *psMemBlock; -+ IMG_BOOL bBMError; -+ -+ if (!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceClassMemoryKM: invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER *) hDeviceClassBuffer; -+ -+ eError = -+ psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer-> -+ hExtDevice, -+ psDeviceClassBuffer-> -+ hExtBuffer, &psSysPAddr, -+ &ui32ByteSize, &pvCPUVAddr, -+ phOSMapInfo, &bPhysContig); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceClassMemoryKM: unable to get buffer address")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psBMContext = (BM_CONTEXT *) psDeviceClassBuffer->hDevMemContext; -+ psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo; -+ hDevMemHeap = -+ psDevMemoryInfo->psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID]. -+ hDevMemHeap; -+ -+ ui32Offset = ((IMG_UINT32) pvCPUVAddr) & (ui32PageSize - 1); -+ pvPageAlignedCPUVAddr = -+ (IMG_VOID *) ((IMG_UINT8 *) pvCPUVAddr - ui32Offset); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), -+ (IMG_VOID **) & psMemInfo, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block")); -+ return (PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ -+ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); -+ -+ psMemBlock = &(psMemInfo->sMemBlk); -+ -+ bBMError = BM_Wrap(hDevMemHeap, -+ ui32ByteSize, -+ ui32Offset, -+ bPhysContig, -+ psSysPAddr, -+ IMG_FALSE, -+ pvPageAlignedCPUVAddr, -+ &psMemInfo->ui32Flags, &hBuffer); -+ -+ if (!bBMError) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); -+ return PVRSRV_ERROR_BAD_MAPPING; -+ } -+ -+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); -+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); -+ -+ psMemBlock->hBuffer = (IMG_HANDLE) hBuffer; -+ -+ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); -+ -+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; -+ psMemInfo->ui32AllocSize = ui32ByteSize; -+ psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo; -+ -+ psMemInfo->sMemBlk.hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_DEVICECLASSMEM_MAPPING, psMemInfo, 0, -+ UnmapDeviceClassMemoryCallBack); -+ -+ *ppsMemInfo = psMemInfo; -+ -+ return PVRSRV_OK; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/env_data.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/env_data.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/env_data.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/env_data.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,57 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _ENV_DATA_ -+#define _ENV_DATA_ -+ -+#include -+#include -+#include -+ -+#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000 -+#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 -+ -+typedef struct _PVR_PCI_DEV_TAG { -+ struct pci_dev *psPCIDev; -+ HOST_PCI_INIT_FLAGS ePCIFlags; -+ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; -+} PVR_PCI_DEV; -+ -+typedef struct _ENV_DATA_TAG { -+ IMG_VOID *pvBridgeData; -+ struct pm_dev *psPowerDevice; -+ IMG_BOOL bLISRInstalled; -+ IMG_BOOL bMISRInstalled; -+ IMG_UINT32 ui32IRQ; -+ IMG_VOID *pvISRCookie; -+ struct workqueue_struct *psMISRWorkqueue; -+ struct work_struct sMISRWork; -+ struct workqueue_struct *psPerfWorkqueue; -+ struct delayed_work sPerfWork; -+ IMG_VOID *pvSysData; /*for MISR work */ -+} ENV_DATA; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/event.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/event.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/event.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/event.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,276 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "services_headers.h" -+#include "mm.h" -+#include "pvrmmap.h" -+#include "mmap.h" -+#include "env_data.h" -+#include "proc.h" -+#include "mutex.h" -+ -+extern PVRSRV_LINUX_MUTEX gPVRSRVLock; -+ -+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG { -+ rwlock_t sLock; -+ struct list_head sList; -+ -+} PVRSRV_LINUX_EVENT_OBJECT_LIST; -+ -+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG { -+ atomic_t sTimeStamp; -+ IMG_UINT32 ui32TimeStampPrevious; -+#ifdef DEBUG -+ unsigned int ui32Stats; -+#endif -+ wait_queue_head_t sWait; -+ struct list_head sList; -+ IMG_HANDLE hResItem; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList; -+} PVRSRV_LINUX_EVENT_OBJECT; -+ -+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE * phEventObjectList) -+{ -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList; -+ -+ if (OSAllocMem -+ (PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), -+ (IMG_VOID **) & psEvenObjectList, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "LinuxEventObjectCreate: failed to allocate memory for event list")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ INIT_LIST_HEAD(&psEvenObjectList->sList); -+ -+ rwlock_init(&psEvenObjectList->sLock); -+ -+ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList) -+{ -+ -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = -+ (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList; -+ -+ if (psEvenObjectList) { -+ if (!list_empty(&psEvenObjectList->sList)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "LinuxEventObjectListDestroy: Event List is not empty")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), -+ psEvenObjectList, IMG_NULL); -+ } -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, -+ IMG_HANDLE hOSEventObject) -+{ -+ if (hOSEventObjectList) { -+ if (hOSEventObject) { -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = -+ (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; -+#ifdef DEBUG -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "LinuxEventObjectListDelete: Event object waits: %lu", -+ psLinuxEventObject->ui32Stats)); -+#endif -+ if (ResManFreeResByPtr(psLinuxEventObject->hResItem) != -+ PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+ } -+ } -+ return PVRSRV_ERROR_GENERIC; -+ -+} -+ -+static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = -+ psLinuxEventObject->psLinuxEventObjectList; -+ -+ write_lock_bh(&psLinuxEventObjectList->sLock); -+ list_del(&psLinuxEventObject->sList); -+ write_unlock_bh(&psLinuxEventObjectList->sLock); -+ -+#ifdef DEBUG -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "LinuxEventObjectDeleteCallback: Event object waits: %lu", -+ psLinuxEventObject->ui32Stats)); -+#endif -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, -+ IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, -+ IMG_HANDLE * phOSEventObject) -+{ -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = -+ (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hOSEventObjectList; -+ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); -+ PVRSRV_PER_PROCESS_DATA *psPerProc; -+ -+ psPerProc = PVRSRVPerProcessData(ui32PID); -+ if (psPerProc == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "LinuxEventObjectAdd: Couldn't find per-process data")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ if (OSAllocMem -+ (PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), -+ (IMG_VOID **) & psLinuxEventObject, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "LinuxEventObjectAdd: failed to allocate memory ")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ INIT_LIST_HEAD(&psLinuxEventObject->sList); -+ -+ atomic_set(&psLinuxEventObject->sTimeStamp, 0); -+ psLinuxEventObject->ui32TimeStampPrevious = 0; -+ -+#ifdef DEBUG -+ psLinuxEventObject->ui32Stats = 0; -+#endif -+ init_waitqueue_head(&psLinuxEventObject->sWait); -+ -+ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; -+ -+ psLinuxEventObject->hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_EVENT_OBJECT, psLinuxEventObject, 0, -+ &LinuxEventObjectDeleteCallback); -+ -+ write_lock_bh(&psLinuxEventObjectList->sLock); -+ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); -+ write_unlock_bh(&psLinuxEventObjectList->sLock); -+ -+ *phOSEventObject = psLinuxEventObject; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList) -+{ -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = -+ (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hOSEventObjectList; -+ struct list_head *psListEntry, *psListEntryTemp, *psList; -+ psList = &psLinuxEventObjectList->sList; -+ -+ list_for_each_safe(psListEntry, psListEntryTemp, psList) { -+ -+ psLinuxEventObject = -+ list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); -+ -+ atomic_inc(&psLinuxEventObject->sTimeStamp); -+ wake_up_interruptible(&psLinuxEventObject->sWait); -+ } -+ -+ return PVRSRV_OK; -+ -+} -+ -+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, -+ IMG_UINT32 ui32MSTimeout) -+{ -+ DEFINE_WAIT(sWait); -+ -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = -+ (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; -+ -+ IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout); -+ -+ do { -+ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, -+ TASK_INTERRUPTIBLE); -+ -+ if (psLinuxEventObject->ui32TimeStampPrevious != -+ atomic_read(&psLinuxEventObject->sTimeStamp)) { -+ break; -+ } -+ -+ LinuxUnLockMutex(&gPVRSRVLock); -+ -+ ui32TimeOutJiffies = schedule_timeout(ui32TimeOutJiffies); -+ -+#ifdef DEBUG -+ psLinuxEventObject->ui32Stats++; -+#endif -+ LinuxLockMutex(&gPVRSRVLock); -+ -+ } while (ui32TimeOutJiffies); -+ -+ finish_wait(&psLinuxEventObject->sWait, &sWait); -+ -+ psLinuxEventObject->ui32TimeStampPrevious = -+ atomic_read(&psLinuxEventObject->sTimeStamp); -+ -+ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT; -+ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/event.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/event.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/event.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/event.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,35 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE * phEventObjectList); -+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList); -+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, -+ IMG_HANDLE * phOSEventObject); -+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, -+ IMG_HANDLE hOSEventObject); -+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList); -+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, -+ IMG_UINT32 ui32MSTimeout); -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/handle.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/handle.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/handle.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/handle.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,1312 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+ -+#include "services_headers.h" -+#include "handle.h" -+ -+#ifdef DEBUG -+#define HANDLE_BLOCK_SIZE 1 -+#else -+#define HANDLE_BLOCK_SIZE 256 -+#endif -+ -+#define HANDLE_HASH_TAB_INIT_SIZE 32 -+ -+#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount) -+ -+#define INDEX_TO_HANDLE(psBase, idx) ((IMG_HANDLE)((idx) + 1)) -+#define HANDLE_TO_INDEX(psBase, hand) ((IMG_UINT32)(hand) - 1) -+ -+#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i)) -+#define HANDLE_TO_HANDLE_PTR(psBase, h) (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h))) -+ -+#define HANDLE_PTR_TO_INDEX(psBase, psHandle) ((psHandle) - ((psBase)->psHandleArray)) -+#define HANDLE_PTR_TO_HANDLE(psBase, psHandle) \ -+ INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle)) -+ -+#define ROUND_UP_TO_MULTIPLE(a, b) ((((a) + (b) - 1) / (b)) * (b)) -+ -+#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0) -+ -+#define SET_FLAG(v, f) ((void)((v) |= (f))) -+#define CLEAR_FLAG(v, f) ((void)((v) &= ~(f))) -+#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0)) -+ -+#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f) -+ -+#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f) -+#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f) -+#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f) -+ -+#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) -+ -+#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) -+ -+#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) -+ -+#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE) -+ -+#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE) -+ -+struct sHandleList { -+ IMG_UINT32 ui32Prev; -+ IMG_UINT32 ui32Next; -+ IMG_HANDLE hParent; -+}; -+ -+enum ePVRSRVInternalHandleFlag { -+ INTERNAL_HANDLE_FLAG_BATCHED = 0x01, -+ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02, -+}; -+ -+struct sHandle { -+ -+ PVRSRV_HANDLE_TYPE eType; -+ -+ IMG_VOID *pvData; -+ -+ IMG_UINT32 ui32NextIndexPlusOne; -+ -+ enum ePVRSRVInternalHandleFlag eInternalFlag; -+ -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag; -+ -+ IMG_UINT32 ui32PID; -+ -+ IMG_UINT32 ui32Index; -+ -+ struct sHandleList sChildren; -+ -+ struct sHandleList sSiblings; -+}; -+ -+struct _PVRSRV_HANDLE_BASE_ { -+ -+ IMG_HANDLE hBaseBlockAlloc; -+ -+ IMG_UINT32 ui32PID; -+ -+ IMG_HANDLE hHandBlockAlloc; -+ -+ PRESMAN_ITEM psResManItem; -+ -+ struct sHandle *psHandleArray; -+ -+ HASH_TABLE *psHashTab; -+ -+ IMG_UINT32 ui32FreeHandCount; -+ -+ IMG_UINT32 ui32FirstFreeIndex; -+ -+ IMG_UINT32 ui32TotalHandCount; -+ -+ IMG_UINT32 ui32LastFreeIndexPlusOne; -+ -+ IMG_UINT32 ui32HandBatchSize; -+ -+ IMG_UINT32 ui32TotalHandCountPreBatch; -+ -+ IMG_UINT32 ui32FirstBatchIndexPlusOne; -+ -+ IMG_UINT32 ui32BatchHandAllocFailures; -+}; -+ -+enum eHandKey { -+ HAND_KEY_DATA = 0, -+ HAND_KEY_TYPE, -+ HAND_KEY_PARENT, -+ HAND_KEY_LEN -+}; -+ -+PVRSRV_HANDLE_BASE *gpsKernelHandleBase; -+ -+typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN]; -+ -+static INLINE -+ IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, -+ IMG_HANDLE hParent) -+{ -+ psList->ui32Next = ui32Index; -+ psList->ui32Prev = ui32Index; -+ psList->hParent = hParent; -+} -+ -+static INLINE -+ IMG_VOID InitParentList(PVRSRV_HANDLE_BASE * psBase, -+ struct sHandle *psHandle) -+{ -+ IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle); -+ -+ HandleListInit(ui32Parent, &psHandle->sChildren, -+ INDEX_TO_HANDLE(psBase, ui32Parent)); -+} -+ -+static INLINE -+ IMG_VOID InitChildEntry(PVRSRV_HANDLE_BASE * psBase, -+ struct sHandle *psHandle) -+{ -+ HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle), -+ &psHandle->sSiblings, IMG_NULL); -+} -+ -+static INLINE -+ IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList) -+{ -+ IMG_BOOL bIsEmpty; -+ -+ bIsEmpty = (IMG_BOOL) (psList->ui32Next == ui32Index); -+ -+#ifdef DEBUG -+ { -+ IMG_BOOL bIsEmpty2; -+ -+ bIsEmpty2 = (IMG_BOOL) (psList->ui32Prev == ui32Index); -+ PVR_ASSERT(bIsEmpty == bIsEmpty2); -+ } -+#endif -+ -+ return bIsEmpty; -+} -+ -+#ifdef DEBUG -+static INLINE -+ IMG_BOOL NoChildren(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psHandle) -+{ -+ PVR_ASSERT(psHandle->sChildren.hParent == -+ HANDLE_PTR_TO_HANDLE(psBase, psHandle)); -+ -+ return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), -+ &psHandle->sChildren); -+} -+ -+static INLINE -+ IMG_BOOL NoParent(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psHandle) -+{ -+ if (HandleListIsEmpty -+ (HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings)) { -+ PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL); -+ -+ return IMG_TRUE; -+ } else { -+ PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL); -+ } -+ return IMG_FALSE; -+} -+#endif -+static INLINE IMG_HANDLE ParentHandle(struct sHandle *psHandle) -+{ -+ return psHandle->sSiblings.hParent; -+} -+ -+#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \ -+ ((struct sHandleList *)((char *)(INDEX_TO_HANDLE_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo)))) -+ -+static INLINE -+ IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE * psBase, -+ IMG_UINT32 ui32InsIndex, -+ struct sHandleList *psIns, -+ IMG_SIZE_T uiParentOffset, -+ IMG_UINT32 ui32EntryIndex, -+ struct sHandleList *psEntry, -+ IMG_SIZE_T uiEntryOffset, -+ IMG_UINT32 ui32ParentIndex) -+{ -+ struct sHandleList *psPrevIns = -+ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, -+ ui32ParentIndex, uiParentOffset, -+ uiEntryOffset); -+ -+ PVR_ASSERT(psEntry->hParent == IMG_NULL); -+ PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next); -+ PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET -+ (psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, -+ uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase, -+ ui32ParentIndex)); -+ -+ psEntry->ui32Prev = psIns->ui32Prev; -+ psIns->ui32Prev = ui32EntryIndex; -+ psEntry->ui32Next = ui32InsIndex; -+ psPrevIns->ui32Next = ui32EntryIndex; -+ -+ psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex); -+} -+ -+static INLINE -+ IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE * psBase, struct sHandle *psParent, -+ struct sHandle *psChild) -+{ -+ IMG_UINT32 ui32Parent = -+ HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent); -+ -+ PVR_ASSERT(ui32Parent == -+ (IMG_UINT32) HANDLE_PTR_TO_INDEX(psBase, psParent)); -+ -+ HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, -+ offsetof(struct sHandle, sChildren), -+ HANDLE_PTR_TO_INDEX(psBase, psChild), -+ &psChild->sSiblings, offsetof(struct sHandle, -+ sSiblings), -+ ui32Parent); -+ -+} -+ -+static INLINE -+ IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE * psBase, -+ IMG_UINT32 ui32EntryIndex, -+ struct sHandleList *psEntry, -+ IMG_SIZE_T uiEntryOffset, -+ IMG_SIZE_T uiParentOffset) -+{ -+ if (!HandleListIsEmpty(ui32EntryIndex, psEntry)) { -+ struct sHandleList *psPrev = -+ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, -+ HANDLE_TO_INDEX(psBase, -+ psEntry-> -+ hParent), -+ uiParentOffset, -+ uiEntryOffset); -+ struct sHandleList *psNext = -+ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, -+ HANDLE_TO_INDEX(psBase, -+ psEntry-> -+ hParent), -+ uiParentOffset, -+ uiEntryOffset); -+ -+ PVR_ASSERT(psEntry->hParent != IMG_NULL); -+ -+ psPrev->ui32Next = psEntry->ui32Next; -+ psNext->ui32Prev = psEntry->ui32Prev; -+ -+ HandleListInit(ui32EntryIndex, psEntry, IMG_NULL); -+ } -+} -+ -+static INLINE -+ IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE * psBase, -+ struct sHandle *psHandle) -+{ -+ HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle), -+ &psHandle->sSiblings, offsetof(struct sHandle, -+ sSiblings), -+ offsetof(struct sHandle, sChildren)); -+} -+ -+static INLINE -+ PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE * psBase, -+ struct sHandleList *psHead, -+ IMG_SIZE_T uiParentOffset, -+ IMG_SIZE_T uiEntryOffset, -+ PVRSRV_ERROR(*pfnIterFunc) -+ (PVRSRV_HANDLE_BASE *, struct sHandle *)) -+{ -+ IMG_UINT32 ui32Index; -+ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent); -+ -+ PVR_ASSERT(psHead->hParent != IMG_NULL); -+ -+ for (ui32Index = psHead->ui32Next; ui32Index != ui32Parent;) { -+ struct sHandle *psHandle = -+ INDEX_TO_HANDLE_PTR(psBase, ui32Index); -+ struct sHandleList *psEntry = -+ LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, -+ ui32Parent, uiParentOffset, -+ uiEntryOffset); -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psEntry->hParent == psHead->hParent); -+ -+ ui32Index = psEntry->ui32Next; -+ -+ eError = (*pfnIterFunc) (psBase, psHandle); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static INLINE -+ PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE * psBase, -+ struct sHandle *psParent, -+ PVRSRV_ERROR(*pfnIterFunc) -+ (PVRSRV_HANDLE_BASE *, struct sHandle *)) -+{ -+ return HandleListIterate(psBase, &psParent->sChildren, -+ offsetof(struct sHandle, sChildren), -+ offsetof(struct sHandle, sSiblings), -+ pfnIterFunc); -+} -+ -+static INLINE -+ PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE * psBase, -+ struct sHandle **ppsHandle, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ IMG_UINT32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle); -+ struct sHandle *psHandle; -+ -+ if (!INDEX_IS_VALID(psBase, ui32Index)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "GetHandleStructure: Handle index out of range (%u >= %u)", -+ ui32Index, psBase->ui32TotalHandCount)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index); -+ if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "GetHandleStructure: Handle not allocated (index: %u)", -+ ui32Index)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "GetHandleStructure: Handle type mismatch (%d != %d)", -+ eType, psHandle->eType)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_ASSERT(psBase->ui32PID == psHandle->ui32PID); -+ -+ *ppsHandle = psHandle; -+ -+ return PVRSRV_OK; -+} -+ -+static INLINE IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle) -+{ -+ return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? -+ ParentHandle(psHandle) : IMG_NULL; -+} -+ -+static INLINE -+ IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE * psBase, -+ IMG_VOID * pvData, PVRSRV_HANDLE_TYPE eType, -+ IMG_HANDLE hParent) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBase); -+ -+ aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T) pvData; -+ aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T) eType; -+ aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T) hParent; -+} -+ -+static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE * psBase) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psBase->psHandleArray != IMG_NULL) { -+ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ psBase->ui32TotalHandCount * -+ sizeof(struct sHandle), -+ psBase->psHandleArray, -+ psBase->hHandBlockAlloc); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeHandleArray: Error freeing memory (%d)", -+ eError)); -+ } else { -+ psBase->psHandleArray = IMG_NULL; -+ } -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE * psBase, -+ struct sHandle *psHandle) -+{ -+ HAND_KEY aKey; -+ IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle); -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psBase->ui32PID == psHandle->ui32PID); -+ -+ InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, -+ ParentIfPrivate(psHandle)); -+ -+ if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) -+ && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) { -+ IMG_HANDLE hHandle; -+ hHandle = -+ (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey); -+ -+ PVR_ASSERT(hHandle != IMG_NULL); -+ PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index)); -+ PVR_UNREFERENCED_PARAMETER(hHandle); -+ } -+ -+ UnlinkFromParent(psBase, psHandle); -+ -+ eError = IterateOverChildren(psBase, psHandle, FreeHandle); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeHandle: Error whilst freeing subhandles (%d)", -+ eError)); -+ return eError; -+ } -+ -+ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE; -+ -+ if (BATCHED_HANDLE(psHandle) -+ && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) { -+ SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle); -+ -+ return PVRSRV_OK; -+ } -+ -+ if (psBase->ui32FreeHandCount == 0) { -+ PVR_ASSERT(psBase->ui32FirstFreeIndex == 0); -+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); -+ -+ psBase->ui32FirstFreeIndex = ui32Index; -+ } else { -+ -+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); -+ PVR_ASSERT(INDEX_TO_HANDLE_PTR -+ (psBase, -+ psBase->ui32LastFreeIndexPlusOne - -+ 1)->ui32NextIndexPlusOne == 0); -+ -+ INDEX_TO_HANDLE_PTR(psBase, -+ psBase->ui32LastFreeIndexPlusOne - -+ 1)->ui32NextIndexPlusOne = ui32Index + 1; -+ } -+ -+ PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0); -+ -+ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1; -+ -+ psBase->ui32FreeHandCount++; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE * psBase) -+{ -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount) { -+ return eError; -+ } -+ -+ for (i = 0; i < psBase->ui32TotalHandCount; i++) { -+ struct sHandle *psHandle; -+ -+ psHandle = INDEX_TO_HANDLE_PTR(psBase, i); -+ -+ if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE) { -+ eError = FreeHandle(psBase, psHandle); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeAllHandles: FreeHandle failed (%d)", -+ eError)); -+ break; -+ } -+ -+ if (psBase->ui32FreeHandCount == -+ psBase->ui32TotalHandCount) { -+ break; -+ } -+ } -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE * psBase) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (HANDLES_BATCHED(psBase)) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "FreeHandleBase: Uncommitted/Unreleased handle batch")); -+ PVRSRVReleaseHandleBatch(psBase); -+ } -+ -+ eError = FreeAllHandles(psBase); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeHandleBase: Couldn't free handles (%d)", eError)); -+ return eError; -+ } -+ -+ eError = FreeHandleArray(psBase); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeHandleBase: Couldn't free handle array (%d)", -+ eError)); -+ return eError; -+ } -+ -+ if (psBase->psHashTab != IMG_NULL) { -+ -+ HASH_Delete(psBase->psHashTab); -+ } -+ -+ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(*psBase), psBase, psBase->hBaseBlockAlloc); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeHandleBase: Couldn't free handle base (%d)", -+ eError)); -+ return eError; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static INLINE -+ IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE * psBase, IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent) -+{ -+ HAND_KEY aKey; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ InitKey(aKey, psBase, pvData, eType, hParent); -+ -+ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey); -+} -+ -+static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE * psBase, -+ IMG_UINT32 ui32Delta) -+{ -+ struct sHandle *psNewHandleArray; -+ IMG_HANDLE hNewHandBlockAlloc; -+ PVRSRV_ERROR eError; -+ struct sHandle *psHandle; -+ IMG_UINT32 ui32DeltaRounded = -+ ROUND_UP_TO_MULTIPLE(ui32Delta, HANDLE_BLOCK_SIZE); -+ IMG_UINT32 ui32NewTotalHandCount = -+ psBase->ui32TotalHandCount + ui32DeltaRounded; -+ ; -+ -+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32NewTotalHandCount * sizeof(struct sHandle), -+ (IMG_PVOID *) & psNewHandleArray, -+ &hNewHandBlockAlloc); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "IncreaseHandleArraySize: Couldn't allocate new handle array (%d)", -+ eError)); -+ return eError; -+ } -+ -+ if (psBase->psHandleArray != IMG_NULL) -+ OSMemCopy(psNewHandleArray, -+ psBase->psHandleArray, -+ psBase->ui32TotalHandCount * sizeof(struct sHandle)); -+ -+ for (psHandle = psNewHandleArray + psBase->ui32TotalHandCount; -+ psHandle < psNewHandleArray + ui32NewTotalHandCount; psHandle++) { -+ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE; -+ psHandle->ui32NextIndexPlusOne = 0; -+ } -+ -+ eError = FreeHandleArray(psBase); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psBase->psHandleArray = psNewHandleArray; -+ psBase->hHandBlockAlloc = hNewHandBlockAlloc; -+ -+ psBase->ui32FreeHandCount += ui32DeltaRounded; -+ -+ if (psBase->ui32FirstFreeIndex == 0) { -+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); -+ -+ psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount; -+ } else { -+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0) -+ PVR_ASSERT(INDEX_TO_HANDLE_PTR -+ (psBase, -+ psBase->ui32LastFreeIndexPlusOne - -+ 1)->ui32NextIndexPlusOne == 0); -+ -+ INDEX_TO_HANDLE_PTR(psBase, -+ psBase->ui32LastFreeIndexPlusOne - -+ 1)->ui32NextIndexPlusOne = -+ psBase->ui32TotalHandCount + 1; -+ -+ } -+ psBase->ui32LastFreeIndexPlusOne = ui32NewTotalHandCount; -+ -+ psBase->ui32TotalHandCount = ui32NewTotalHandCount; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE * psBase, -+ IMG_UINT32 ui32Free) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (ui32Free > psBase->ui32FreeHandCount) { -+ IMG_UINT32 ui32FreeHandDelta = -+ ui32Free - psBase->ui32FreeHandCount; -+ eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)", -+ ui32FreeHandDelta, ui32Free, eError)); -+ -+ return eError; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE * phHandle, IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ IMG_HANDLE hParent) -+{ -+ IMG_UINT32 ui32NewIndex; -+ struct sHandle *psNewHandle; -+ IMG_HANDLE hHandle; -+ HAND_KEY aKey; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ PVR_ASSERT(psBase->psHashTab != IMG_NULL); -+ -+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) { -+ -+ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == -+ IMG_NULL); -+ } -+ -+ if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase)) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "AllocHandle: Handle batch size (%u) was too small, allocating additional space", -+ psBase->ui32HandBatchSize)); -+ } -+ -+ eError = EnsureFreeHandles(psBase, 1); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocHandle: EnsureFreeHandles failed (%d)", eError)); -+ return eError; -+ } -+ PVR_ASSERT(psBase->ui32FreeHandCount != 0) -+ -+ ui32NewIndex = psBase->ui32FirstFreeIndex; -+ -+ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex); -+ -+ hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex); -+ -+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) { -+ -+ InitKey(aKey, psBase, pvData, eType, hParent); -+ -+ if (!HASH_Insert_Extended -+ (psBase->psHashTab, aKey, (IMG_UINTPTR_T) hHandle)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocHandle: Couldn't add handle to hash table")); -+ -+ return PVRSRV_ERROR_GENERIC; -+ } -+ } -+ -+ psBase->ui32FreeHandCount--; -+ -+ if (psBase->ui32FreeHandCount == 0) { -+ PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex); -+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == -+ (ui32NewIndex + 1)); -+ -+ psBase->ui32LastFreeIndexPlusOne = 0; -+ psBase->ui32FirstFreeIndex = 0; -+ } else { -+ -+ psBase->ui32FirstFreeIndex = -+ (psNewHandle->ui32NextIndexPlusOne == -+ 0) ? ui32NewIndex + 1 : psNewHandle->ui32NextIndexPlusOne - -+ 1; -+ } -+ -+ psNewHandle->eType = eType; -+ psNewHandle->pvData = pvData; -+ psNewHandle->eInternalFlag = 0; -+ psNewHandle->eFlag = eFlag; -+ psNewHandle->ui32PID = psBase->ui32PID; -+ psNewHandle->ui32Index = ui32NewIndex; -+ -+ InitParentList(psBase, psNewHandle); -+ PVR_ASSERT(NoChildren(psBase, psNewHandle)); -+ -+ InitChildEntry(psBase, psNewHandle); -+ PVR_ASSERT(NoParent(psBase, psNewHandle)); -+ -+ if (HANDLES_BATCHED(psBase)) { -+ -+ psNewHandle->ui32NextIndexPlusOne = -+ psBase->ui32FirstBatchIndexPlusOne; -+ -+ psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1; -+ -+ SET_BATCHED_HANDLE(psNewHandle); -+ } else { -+ psNewHandle->ui32NextIndexPlusOne = 0; -+ } -+ -+ *phHandle = hHandle; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE * phHandle, IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag) -+{ -+ IMG_HANDLE hHandle; -+ PVRSRV_ERROR eError; -+ -+ *phHandle = IMG_NULL; -+ -+ if (HANDLES_BATCHED(psBase)) { -+ -+ psBase->ui32BatchHandAllocFailures++; -+ } -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) { -+ -+ hHandle = FindHandle(psBase, pvData, eType, IMG_NULL); -+ if (hHandle != IMG_NULL) { -+ struct sHandle *psHandle; -+ -+ eError = -+ GetHandleStructure(psBase, &psHandle, hHandle, -+ eType); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocHandle: Lookup of existing handle failed")); -+ return eError; -+ } -+ -+ if (TEST_FLAG -+ (psHandle->eFlag & eFlag, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED)) { -+ *phHandle = hHandle; -+ eError = PVRSRV_OK; -+ goto exit_ok; -+ } -+ return PVRSRV_ERROR_GENERIC; -+ } -+ } -+ -+ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL); -+ -+exit_ok: -+ if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK)) { -+ psBase->ui32BatchHandAllocFailures--; -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE * phHandle, IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ IMG_HANDLE hParent) -+{ -+ struct sHandle *psPHand; -+ struct sHandle *psCHand; -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hParentKey; -+ IMG_HANDLE hHandle; -+ -+ *phHandle = IMG_NULL; -+ -+ if (HANDLES_BATCHED(psBase)) { -+ -+ psBase->ui32BatchHandAllocFailures++; -+ } -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? -+ hParent : IMG_NULL; -+ -+ eError = -+ GetHandleStructure(psBase, &psPHand, hParent, -+ PVRSRV_HANDLE_TYPE_NONE); -+ if (eError != PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) { -+ -+ hHandle = FindHandle(psBase, pvData, eType, hParentKey); -+ if (hHandle != IMG_NULL) { -+ struct sHandle *psCHandle; -+ PVRSRV_ERROR eErr; -+ -+ eErr = -+ GetHandleStructure(psBase, &psCHandle, hHandle, -+ eType); -+ if (eErr != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocSubHandle: Lookup of existing handle failed")); -+ return eErr; -+ } -+ -+ PVR_ASSERT(hParentKey != IMG_NULL -+ && -+ ParentHandle(HANDLE_TO_HANDLE_PTR -+ (psBase, hHandle)) == hParent); -+ -+ if (TEST_FLAG -+ (psCHandle->eFlag & eFlag, -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED) -+ && -+ ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) -+ == hParent) { -+ *phHandle = hHandle; -+ goto exit_ok; -+ } -+ return PVRSRV_ERROR_GENERIC; -+ } -+ } -+ -+ eError = -+ AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent); -+ -+ psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle); -+ -+ AdoptChild(psBase, psPHand, psCHand); -+ -+ *phHandle = hHandle; -+ -+exit_ok: -+ if (HANDLES_BATCHED(psBase)) { -+ psBase->ui32BatchHandAllocFailures--; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE * phHandle, IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ IMG_HANDLE hHandle; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL); -+ if (hHandle == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVFindHandle: couldn't find handle")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ *phHandle = hHandle; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, -+ PVRSRV_HANDLE_TYPE * peType, -+ IMG_HANDLE hHandle) -+{ -+ struct sHandle *psHandle; -+ PVRSRV_ERROR eError; -+ -+ eError = -+ GetHandleStructure(psBase, &psHandle, hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", -+ eError)); -+ return eError; -+ } -+ -+ *ppvData = psHandle->pvData; -+ *peType = psHandle->eType; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ struct sHandle *psHandle; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVLookupHandle: Error looking up handle (%d)", -+ eError)); -+ return eError; -+ } -+ -+ *ppvData = psHandle->pvData; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_HANDLE hAncestor) -+{ -+ struct sHandle *psPHand; -+ struct sHandle *psCHand; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ eError = GetHandleStructure(psBase, &psCHand, hHandle, eType); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", -+ eError)); -+ return eError; -+ } -+ -+ for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor;) { -+ eError = -+ GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), -+ PVRSRV_HANDLE_TYPE_NONE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ } -+ -+ *ppvData = psCHand->pvData; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * phParent, IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ struct sHandle *psHandle; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetParentHandle: Error looking up subhandle (%d)", -+ eError)); -+ return eError; -+ } -+ -+ *phParent = ParentHandle(psHandle); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ struct sHandle *psHandle; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", -+ eError)); -+ return eError; -+ } -+ -+ *ppvData = psHandle->pvData; -+ -+ eError = FreeHandle(psBase, psHandle); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) -+{ -+ struct sHandle *psHandle; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVReleaseHandle: Error looking up handle (%d)", -+ eError)); -+ return eError; -+ } -+ -+ eError = FreeHandle(psBase, psHandle); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE * psBase, -+ IMG_UINT32 ui32BatchSize) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (HANDLES_BATCHED(psBase)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)", -+ psBase->ui32HandBatchSize)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (ui32BatchSize == 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVNewHandleBatch: Invalid batch size (%u)", -+ ui32BatchSize)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = EnsureFreeHandles(psBase, ui32BatchSize); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)", -+ eError)); -+ return eError; -+ } -+ -+ psBase->ui32HandBatchSize = ui32BatchSize; -+ -+ psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount; -+ -+ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0); -+ -+ PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0); -+ -+ PVR_ASSERT(HANDLES_BATCHED(psBase)); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE * -+ psBase, IMG_BOOL bCommit) -+{ -+ -+ IMG_UINT32 ui32IndexPlusOne; -+ IMG_BOOL bCommitBatch = bCommit; -+ -+ if (!HANDLES_BATCHED(psBase)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVHandleBatchCommitOrRelease: There is no handle batch")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ } -+ -+ if (psBase->ui32BatchHandAllocFailures != 0) { -+ if (bCommit) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures.")); -+ } -+ bCommitBatch = IMG_FALSE; -+ } -+ -+ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit); -+ -+ ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne; -+ while (ui32IndexPlusOne != 0) { -+ struct sHandle *psHandle = -+ INDEX_TO_HANDLE_PTR(psBase, ui32IndexPlusOne - 1); -+ IMG_UINT32 ui32NextIndexPlusOne = -+ psHandle->ui32NextIndexPlusOne; -+ PVR_ASSERT(BATCHED_HANDLE(psHandle)); -+ -+ psHandle->ui32NextIndexPlusOne = 0; -+ -+ if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) { -+ PVRSRV_ERROR eError; -+ -+ if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) { -+ SET_UNBATCHED_HANDLE(psHandle); -+ } -+ -+ eError = FreeHandle(psBase, psHandle); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)", -+ eError)); -+ } -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } else { -+ SET_UNBATCHED_HANDLE(psHandle); -+ } -+ -+ ui32IndexPlusOne = ui32NextIndexPlusOne; -+ } -+ -+#ifdef DEBUG -+ if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount) { -+ IMG_UINT32 ui32Delta = -+ psBase->ui32TotalHandCount - -+ psBase->ui32TotalHandCountPreBatch; -+ -+ PVR_ASSERT(psBase->ui32TotalHandCount > -+ psBase->ui32TotalHandCountPreBatch); -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u", -+ psBase->ui32HandBatchSize, -+ psBase->ui32HandBatchSize + ui32Delta)); -+ -+ } -+#endif -+ -+ psBase->ui32HandBatchSize = 0; -+ psBase->ui32FirstBatchIndexPlusOne = 0; -+ psBase->ui32TotalHandCountPreBatch = 0; -+ psBase->ui32BatchHandAllocFailures = 0; -+ -+ if (psBase->ui32BatchHandAllocFailures != 0 && bCommit) { -+ PVR_ASSERT(!bCommitBatch); -+ -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE * psBase) -+{ -+ return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE); -+} -+ -+void PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE * psBase) -+{ -+ (void)PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE); -+} -+ -+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE ** ppsBase, -+ IMG_UINT32 ui32PID) -+{ -+ PVRSRV_HANDLE_BASE *psBase; -+ IMG_HANDLE hBlockAlloc; -+ PVRSRV_ERROR eError; -+ -+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(*psBase), -+ (IMG_PVOID *) & psBase, &hBlockAlloc); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", -+ eError)); -+ return eError; -+ } -+ OSMemSet(psBase, 0, sizeof(*psBase)); -+ -+ psBase->psHashTab = -+ HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), -+ HASH_Func_Default, HASH_Key_Comp_Default); -+ if (psBase->psHashTab == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n")); -+ goto failure; -+ } -+ -+ psBase->hBaseBlockAlloc = hBlockAlloc; -+ psBase->ui32PID = ui32PID; -+ -+ *ppsBase = psBase; -+ -+ return PVRSRV_OK; -+failure: -+ (void)PVRSRVFreeHandleBase(psBase); -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE * psBase) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psBase != gpsKernelHandleBase); -+ -+ eError = FreeHandleBase(psBase); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsKernelHandleBase == IMG_NULL); -+ -+ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase, KERNEL_ID); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (gpsKernelHandleBase != IMG_NULL) { -+ eError = FreeHandleBase(gpsKernelHandleBase); -+ if (eError == PVRSRV_OK) { -+ gpsKernelHandleBase = IMG_NULL; -+ } -+ } -+ -+ return eError; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/handle.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/handle.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/handle.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/handle.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,145 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __HANDLE_H__ -+#define __HANDLE_H__ -+ -+ -+#include "img_types.h" -+#include "hash.h" -+#include "resman.h" -+ -+ typedef enum { -+ PVRSRV_HANDLE_TYPE_NONE = 0, -+ PVRSRV_HANDLE_TYPE_PERPROC_DATA, -+ PVRSRV_HANDLE_TYPE_DEV_NODE, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT, -+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, -+ PVRSRV_HANDLE_TYPE_MEM_INFO, -+ PVRSRV_HANDLE_TYPE_SYNC_INFO, -+ PVRSRV_HANDLE_TYPE_DISP_INFO, -+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, -+ PVRSRV_HANDLE_TYPE_BUF_INFO, -+ PVRSRV_HANDLE_TYPE_DISP_BUFFER, -+ PVRSRV_HANDLE_TYPE_BUF_BUFFER, -+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT, -+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT, -+ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT, -+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, -+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF, -+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, -+ } PVRSRV_HANDLE_TYPE; -+ -+ typedef enum { -+ -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0, -+ -+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 1, -+ -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 2, -+ -+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 4 -+ } PVRSRV_HANDLE_ALLOC_FLAG; -+ -+ struct _PVRSRV_HANDLE_BASE_; -+ typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE; -+ -+ extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; -+ -+#define KERNEL_HANDLE_BASE (gpsKernelHandleBase) -+ -+ PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE * phHandle, IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag); -+ -+ PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE * phHandle, -+ IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ IMG_HANDLE hParent); -+ -+ PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE * phHandle, IMG_VOID * pvData, -+ PVRSRV_HANDLE_TYPE eType); -+ -+ PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, -+ PVRSRV_HANDLE_TYPE * peType, -+ IMG_HANDLE hHandle); -+ -+ PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType); -+ -+ PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_HANDLE hAncestor); -+ -+ PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * phParent, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType); -+ -+ PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_PVOID * ppvData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType); -+ -+ PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE * psBase, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType); -+ -+ PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE * psBase, -+ IMG_UINT32 ui32BatchSize); -+ -+ PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE * psBase); -+ -+ void PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE * psBase); -+ -+ PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE ** ppsBase, -+ IMG_UINT32 ui32PID); -+ -+ PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE * psBase); -+ -+ PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID); -+ -+ PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID); -+ -+ -+#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \ -+ (void)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag) -+ -+#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \ -+ (void)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent) -+ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/hash.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/hash.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/hash.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/hash.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,362 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "pvr_debug.h" -+#include "img_defs.h" -+#include "services.h" -+#include "servicesint.h" -+#include "hash.h" -+#include "osfunc.h" -+ -+#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b)) -+ -+#define KEY_TO_INDEX(pHash, key, uSize) \ -+ ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize) -+ -+#define KEY_COMPARE(pHash, pKey1, pKey2) \ -+ ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2)) -+ -+struct _BUCKET_ { -+ -+ struct _BUCKET_ *pNext; -+ -+ IMG_UINTPTR_T v; -+ -+ IMG_UINTPTR_T k[]; -+}; -+typedef struct _BUCKET_ BUCKET; -+ -+struct _HASH_TABLE_ { -+ -+ BUCKET **ppBucketTable; -+ -+ IMG_UINT32 uSize; -+ -+ IMG_UINT32 uCount; -+ -+ IMG_UINT32 uMinimumSize; -+ -+ IMG_UINT32 uKeySize; -+ -+ HASH_FUNC *pfnHashFunc; -+ -+ HASH_KEY_COMP *pfnKeyComp; -+}; -+ -+IMG_UINT32 -+HASH_Func_Default(IMG_SIZE_T uKeySize, IMG_VOID * pKey, IMG_UINT32 uHashTabLen) -+{ -+ IMG_UINTPTR_T *p = (IMG_UINTPTR_T *) pKey; -+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T); -+ IMG_UINT32 ui; -+ IMG_UINT32 uHashKey = 0; -+ -+ PVR_UNREFERENCED_PARAMETER(uHashTabLen); -+ -+ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0); -+ -+ for (ui = 0; ui < uKeyLen; ui++) { -+ IMG_UINT32 uHashPart = (IMG_UINT32) * p++; -+ -+ uHashPart += (uHashPart << 12); -+ uHashPart ^= (uHashPart >> 22); -+ uHashPart += (uHashPart << 4); -+ uHashPart ^= (uHashPart >> 9); -+ uHashPart += (uHashPart << 10); -+ uHashPart ^= (uHashPart >> 2); -+ uHashPart += (uHashPart << 7); -+ uHashPart ^= (uHashPart >> 12); -+ -+ uHashKey += uHashPart; -+ } -+ -+ return uHashKey; -+} -+ -+IMG_BOOL -+HASH_Key_Comp_Default(IMG_SIZE_T uKeySize, IMG_VOID * pKey1, IMG_VOID * pKey2) -+{ -+ IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *) pKey1; -+ IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *) pKey2; -+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T); -+ IMG_UINT32 ui; -+ -+ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0); -+ -+ for (ui = 0; ui < uKeyLen; ui++) { -+ if (*p1++ != *p2++) -+ return IMG_FALSE; -+ } -+ -+ return IMG_TRUE; -+} -+ -+static void -+_ChainInsert(HASH_TABLE * pHash, BUCKET * pBucket, BUCKET ** ppBucketTable, -+ IMG_UINT32 uSize) -+{ -+ IMG_UINT32 uIndex; -+ -+ PVR_ASSERT(pBucket != IMG_NULL); -+ PVR_ASSERT(ppBucketTable != IMG_NULL); -+ PVR_ASSERT(uSize != 0); -+ -+ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); -+ pBucket->pNext = ppBucketTable[uIndex]; -+ ppBucketTable[uIndex] = pBucket; -+} -+ -+static void -+_Rehash(HASH_TABLE * pHash, -+ BUCKET ** ppOldTable, IMG_UINT32 uOldSize, -+ BUCKET ** ppNewTable, IMG_UINT32 uNewSize) -+{ -+ IMG_UINT32 uIndex; -+ for (uIndex = 0; uIndex < uOldSize; uIndex++) { -+ BUCKET *pBucket; -+ pBucket = ppOldTable[uIndex]; -+ while (pBucket != IMG_NULL) { -+ BUCKET *pNextBucket = pBucket->pNext; -+ _ChainInsert(pHash, pBucket, ppNewTable, uNewSize); -+ pBucket = pNextBucket; -+ } -+ } -+} -+ -+static IMG_BOOL _Resize(HASH_TABLE * pHash, IMG_UINT32 uNewSize) -+{ -+ if (uNewSize != pHash->uSize) { -+ BUCKET **ppNewTable; -+ IMG_UINT32 uIndex; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x", -+ pHash->uSize, uNewSize, pHash->uCount)); -+ -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BUCKET *) * uNewSize, -+ (IMG_PVOID *) & ppNewTable, IMG_NULL); -+ if (ppNewTable == IMG_NULL) -+ return IMG_FALSE; -+ -+ for (uIndex = 0; uIndex < uNewSize; uIndex++) -+ ppNewTable[uIndex] = IMG_NULL; -+ _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, -+ uNewSize); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BUCKET *) * pHash->uSize, pHash->ppBucketTable, -+ IMG_NULL); -+ pHash->ppBucketTable = ppNewTable; -+ pHash->uSize = uNewSize; -+ } -+ return IMG_TRUE; -+} -+ -+HASH_TABLE *HASH_Create_Extended(IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, -+ HASH_FUNC * pfnHashFunc, -+ HASH_KEY_COMP * pfnKeyComp) -+{ -+ HASH_TABLE *pHash; -+ IMG_UINT32 uIndex; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", -+ uInitialLen)); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(HASH_TABLE), -+ (IMG_VOID **) & pHash, IMG_NULL) != PVRSRV_OK) { -+ return IMG_NULL; -+ } -+ -+ pHash->uCount = 0; -+ pHash->uSize = uInitialLen; -+ pHash->uMinimumSize = uInitialLen; -+ pHash->uKeySize = uKeySize; -+ pHash->pfnHashFunc = pfnHashFunc; -+ pHash->pfnKeyComp = pfnKeyComp; -+ -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BUCKET *) * pHash->uSize, -+ (IMG_PVOID *) & pHash->ppBucketTable, IMG_NULL); -+ -+ if (pHash->ppBucketTable == IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(HASH_TABLE), pHash, -+ IMG_NULL); -+ return IMG_NULL; -+ } -+ -+ for (uIndex = 0; uIndex < pHash->uSize; uIndex++) -+ pHash->ppBucketTable[uIndex] = IMG_NULL; -+ return pHash; -+} -+ -+HASH_TABLE *HASH_Create(IMG_UINT32 uInitialLen) -+{ -+ return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T), -+ &HASH_Func_Default, &HASH_Key_Comp_Default); -+} -+ -+IMG_VOID HASH_Delete(HASH_TABLE * pHash) -+{ -+ if (pHash != IMG_NULL) { -+ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Delete")); -+ -+ PVR_ASSERT(pHash->uCount == 0); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BUCKET *) * pHash->uSize, pHash->ppBucketTable, -+ IMG_NULL); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(HASH_TABLE), pHash, -+ IMG_NULL); -+ } -+} -+ -+IMG_BOOL -+HASH_Insert_Extended(HASH_TABLE * pHash, IMG_VOID * pKey, IMG_UINTPTR_T v) -+{ -+ BUCKET *pBucket; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash, -+ pKey, v)); -+ -+ PVR_ASSERT(pHash != IMG_NULL); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BUCKET) + pHash->uKeySize, -+ (IMG_VOID **) & pBucket, IMG_NULL) != PVRSRV_OK) { -+ return IMG_FALSE; -+ } -+ -+ pBucket->v = v; -+ OSMemCopy(pBucket->k, pKey, pHash->uKeySize); -+ _ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize); -+ pHash->uCount++; -+ -+ if (pHash->uCount << 1 > pHash->uSize) { -+ -+ _Resize(pHash, pHash->uSize << 1); -+ } -+ -+ return IMG_TRUE; -+} -+ -+IMG_BOOL HASH_Insert(HASH_TABLE * pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v)); -+ -+ return HASH_Insert_Extended(pHash, &k, v); -+} -+ -+IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE * pHash, IMG_VOID * pKey) -+{ -+ BUCKET **ppBucket; -+ IMG_UINT32 uIndex; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, pKey=%08X", pHash, -+ pKey)); -+ -+ PVR_ASSERT(pHash != IMG_NULL); -+ -+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); -+ -+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; -+ ppBucket = &((*ppBucket)->pNext)) { -+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) { -+ BUCKET *pBucket = *ppBucket; -+ IMG_UINTPTR_T v = pBucket->v; -+ (*ppBucket) = pBucket->pNext; -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BUCKET) + pHash->uKeySize, pBucket, -+ IMG_NULL); -+ -+ pHash->uCount--; -+ -+ if (pHash->uSize > (pHash->uCount << 2) && -+ pHash->uSize > pHash->uMinimumSize) { -+ -+ _Resize(pHash, -+ PRIVATE_MAX(pHash->uSize >> 1, -+ pHash->uMinimumSize)); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x", -+ pHash, pKey, v)); -+ return v; -+ } -+ } -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, -+ pKey)); -+ return 0; -+} -+ -+IMG_UINTPTR_T HASH_Remove(HASH_TABLE * pHash, IMG_UINTPTR_T k) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k)); -+ -+ return HASH_Remove_Extended(pHash, &k); -+} -+ -+IMG_UINTPTR_T HASH_Retrieve_Extended(HASH_TABLE * pHash, IMG_VOID * pKey) -+{ -+ BUCKET **ppBucket; -+ IMG_UINT32 uIndex; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, pKey=%08X", pHash, -+ pKey)); -+ -+ PVR_ASSERT(pHash != IMG_NULL); -+ -+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); -+ -+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; -+ ppBucket = &((*ppBucket)->pNext)) { -+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) { -+ BUCKET *pBucket = *ppBucket; -+ IMG_UINTPTR_T v = pBucket->v; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x", -+ pHash, pKey, v)); -+ return v; -+ } -+ } -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, -+ pKey)); -+ return 0; -+} -+ -+IMG_UINTPTR_T HASH_Retrieve(HASH_TABLE * pHash, IMG_UINTPTR_T k) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash, -+ k)); -+ return HASH_Retrieve_Extended(pHash, &k); -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/hash.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/hash.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/hash.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/hash.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,72 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _HASH_H_ -+#define _HASH_H_ -+ -+#include "img_types.h" -+#include "osfunc.h" -+ -+ -+ typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID * pKey, -+ IMG_UINT32 uHashTabLen); -+ typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID * pKey1, -+ IMG_VOID * pKey2); -+ -+ typedef struct _HASH_TABLE_ HASH_TABLE; -+ -+ IMG_UINT32 HASH_Func_Default(IMG_SIZE_T uKeySize, IMG_VOID * pKey, -+ IMG_UINT32 uHashTabLen); -+ -+ IMG_BOOL HASH_Key_Comp_Default(IMG_SIZE_T uKeySize, IMG_VOID * pKey1, -+ IMG_VOID * pKey2); -+ -+ HASH_TABLE *HASH_Create_Extended(IMG_UINT32 uInitialLen, -+ IMG_SIZE_T uKeySize, -+ HASH_FUNC * pfnHashFunc, -+ HASH_KEY_COMP * pfnKeyComp); -+ -+ HASH_TABLE *HASH_Create(IMG_UINT32 uInitialLen); -+ -+ IMG_VOID HASH_Delete(HASH_TABLE * pHash); -+ -+ IMG_BOOL HASH_Insert_Extended(HASH_TABLE * pHash, IMG_VOID * pKey, -+ IMG_UINTPTR_T v); -+ -+ IMG_BOOL HASH_Insert(HASH_TABLE * pHash, IMG_UINTPTR_T k, -+ IMG_UINTPTR_T v); -+ -+ IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE * pHash, IMG_VOID * pKey); -+ -+ IMG_UINTPTR_T HASH_Remove(HASH_TABLE * pHash, IMG_UINTPTR_T k); -+ -+ IMG_UINTPTR_T HASH_Retrieve_Extended(HASH_TABLE * pHash, -+ IMG_VOID * pKey); -+ -+ IMG_UINTPTR_T HASH_Retrieve(HASH_TABLE * pHash, IMG_UINTPTR_T k); -+ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/img_defs.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/img_defs.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/img_defs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/img_defs.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,72 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined (__IMG_DEFS_H__) -+#define __IMG_DEFS_H__ -+ -+#include "img_types.h" -+ -+typedef enum img_tag_TriStateSwitch { -+ IMG_ON = 0x00, -+ IMG_OFF, -+ IMG_IGNORE -+} img_TriStateSwitch, *img_pTriStateSwitch; -+ -+#define IMG_SUCCESS 0 -+ -+#define IMG_NO_REG 1 -+ -+#define INLINE __inline -+#define FORCE_INLINE static __inline -+ -+#ifndef PVR_UNREFERENCED_PARAMETER -+#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param) -+#endif -+ -+#ifdef __GNUC__ -+#define unref__ __attribute__ ((unused)) -+#else -+#define unref__ -+#endif -+ -+typedef char TCHAR, *PTCHAR, *PTSTR; -+#define _TCHAR_DEFINED -+ -+ -+#define IMG_CALLCONV -+#define IMG_INTERNAL __attribute__ ((visibility ("hidden"))) -+#define IMG_EXPORT -+#define IMG_IMPORT -+#define IMG_RESTRICT __restrict__ -+ -+ -+#define IMG_ABORT() abort() -+ -+#define IMG_MALLOC(A) malloc (A) -+ -+#define IMG_FREE(A) free (A) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/img_types.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/img_types.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/img_types.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/img_types.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,102 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __IMG_TYPES_H__ -+#define __IMG_TYPES_H__ -+ -+typedef unsigned int IMG_UINT, *IMG_PUINT; -+typedef signed int IMG_INT, *IMG_PINT; -+ -+typedef unsigned char IMG_UINT8, *IMG_PUINT8; -+typedef unsigned char IMG_BYTE, *IMG_PBYTE; -+typedef signed char IMG_INT8, *IMG_PINT8; -+typedef char IMG_CHAR, *IMG_PCHAR; -+ -+typedef unsigned short IMG_UINT16, *IMG_PUINT16; -+typedef signed short IMG_INT16, *IMG_PINT16; -+typedef unsigned long IMG_UINT32, *IMG_PUINT32; -+typedef signed long IMG_INT32, *IMG_PINT32; -+ -+typedef unsigned long long IMG_UINT64, *IMG_PUINT64; -+typedef long long IMG_INT64, *IMG_PINT64; -+ -+#ifndef __KERNEL__ -+typedef float IMG_FLOAT, *IMG_PFLOAT; -+typedef double IMG_DOUBLE, *IMG_PDOUBLE; -+#endif -+ -+typedef enum tag_img_bool { -+ IMG_FALSE = 0, -+ IMG_TRUE = 1, -+ IMG_FORCE_ALIGN = 0x7FFFFFFF -+} IMG_BOOL, *IMG_PBOOL; -+ -+typedef void IMG_VOID, *IMG_PVOID; -+ -+typedef IMG_INT32 IMG_RESULT; -+ -+typedef IMG_UINT32 IMG_UINTPTR_T; -+ -+typedef IMG_PVOID IMG_HANDLE; -+ -+typedef void **IMG_HVOID, *IMG_PHVOID; -+ -+typedef IMG_UINT32 IMG_SIZE_T; -+ -+#define IMG_NULL 0 -+ -+typedef IMG_PVOID IMG_CPU_VIRTADDR; -+ -+typedef struct { -+ IMG_UINT32 uiAddr; -+} IMG_CPU_PHYADDR; -+ -+typedef struct { -+ IMG_UINT32 uiAddr; -+} IMG_DEV_VIRTADDR; -+ -+typedef struct { -+ IMG_UINT32 uiAddr; -+} IMG_DEV_PHYADDR; -+ -+typedef struct { -+ IMG_UINT32 uiAddr; -+} IMG_SYS_PHYADDR; -+ -+typedef struct _SYSTEM_ADDR_ { -+ -+ IMG_UINT32 ui32PageCount; -+ union { -+ -+ IMG_SYS_PHYADDR sContig; -+ -+ IMG_SYS_PHYADDR asNonContig[1]; -+ } u; -+} SYSTEM_ADDR; -+ -+#include "img_defs.h" -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/ioctldef.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/ioctldef.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/ioctldef.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/ioctldef.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,95 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __IOCTLDEF_H__ -+#define __IOCTLDEF_H__ -+ -+#define MAKEIOCTLINDEX(i) (((i) >> 2) & 0xFFF) -+ -+ -+#define DEVICE_TYPE ULONG -+ -+#define FILE_DEVICE_BEEP 0x00000001 -+#define FILE_DEVICE_CD_ROM 0x00000002 -+#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003 -+#define FILE_DEVICE_CONTROLLER 0x00000004 -+#define FILE_DEVICE_DATALINK 0x00000005 -+#define FILE_DEVICE_DFS 0x00000006 -+#define FILE_DEVICE_DISK 0x00000007 -+#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008 -+#define FILE_DEVICE_FILE_SYSTEM 0x00000009 -+#define FILE_DEVICE_INPORT_PORT 0x0000000a -+#define FILE_DEVICE_KEYBOARD 0x0000000b -+#define FILE_DEVICE_MAILSLOT 0x0000000c -+#define FILE_DEVICE_MIDI_IN 0x0000000d -+#define FILE_DEVICE_MIDI_OUT 0x0000000e -+#define FILE_DEVICE_MOUSE 0x0000000f -+#define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010 -+#define FILE_DEVICE_NAMED_PIPE 0x00000011 -+#define FILE_DEVICE_NETWORK 0x00000012 -+#define FILE_DEVICE_NETWORK_BROWSER 0x00000013 -+#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014 -+#define FILE_DEVICE_NULL 0x00000015 -+#define FILE_DEVICE_PARALLEL_PORT 0x00000016 -+#define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017 -+#define FILE_DEVICE_PRINTER 0x00000018 -+#define FILE_DEVICE_SCANNER 0x00000019 -+#define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001a -+#define FILE_DEVICE_SERIAL_PORT 0x0000001b -+#define FILE_DEVICE_SCREEN 0x0000001c -+#define FILE_DEVICE_SOUND 0x0000001d -+#define FILE_DEVICE_STREAMS 0x0000001e -+#define FILE_DEVICE_TAPE 0x0000001f -+#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020 -+#define FILE_DEVICE_TRANSPORT 0x00000021 -+#define FILE_DEVICE_UNKNOWN 0x00000022 -+#define FILE_DEVICE_VIDEO 0x00000023 -+#define FILE_DEVICE_VIRTUAL_DISK 0x00000024 -+#define FILE_DEVICE_WAVE_IN 0x00000025 -+#define FILE_DEVICE_WAVE_OUT 0x00000026 -+#define FILE_DEVICE_8042_PORT 0x00000027 -+#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028 -+#define FILE_DEVICE_BATTERY 0x00000029 -+#define FILE_DEVICE_BUS_EXTENDER 0x0000002a -+#define FILE_DEVICE_MODEM 0x0000002b -+#define FILE_DEVICE_VDM 0x0000002c -+#define FILE_DEVICE_MASS_STORAGE 0x0000002d -+ -+#define CTL_CODE( DeviceType, Function, Method, Access ) ( \ -+ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \ -+) -+ -+#define METHOD_BUFFERED 0 -+#define METHOD_IN_DIRECT 1 -+#define METHOD_OUT_DIRECT 2 -+#define METHOD_NEITHER 3 -+ -+#define FILE_ANY_ACCESS 0 -+#define FILE_READ_ACCESS ( 0x0001 ) -+#define FILE_WRITE_ACCESS ( 0x0002 ) -+ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/Kconfig linux-omap-2.6.28-nokia1/drivers/gpu/pvr/Kconfig ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/Kconfig 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,23 @@ -+menuconfig PVR -+ tristate "PowerVR Services" -+ depends on OMAP2_DSS -+ -+if PVR -+ -+choice -+ prompt "Build type" -+ default PVR_RELEASE -+config PVR_RELEASE -+ bool "Release" -+config PVR_DEBUG -+ bool "Debug" -+config PVR_TIMING -+ bool "Timing" -+endchoice -+ -+config PVR_EXAMPLES -+ tristate "Example code" -+ default n -+ -+endif -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/kernelbuffer.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/kernelbuffer.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/kernelbuffer.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/kernelbuffer.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,59 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined (__KERNELBUFFER_H__) -+#define __KERNELBUFFER_H__ -+ -+typedef PVRSRV_ERROR(*PFN_OPEN_BC_DEVICE) (IMG_HANDLE *); -+typedef PVRSRV_ERROR(*PFN_CLOSE_BC_DEVICE) (IMG_HANDLE); -+typedef PVRSRV_ERROR(*PFN_GET_BC_INFO) (IMG_HANDLE, BUFFER_INFO *); -+typedef PVRSRV_ERROR(*PFN_GET_BC_BUFFER) (IMG_HANDLE, IMG_UINT32, -+ PVRSRV_SYNC_DATA *, IMG_HANDLE *); -+ -+typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG { -+ IMG_UINT32 ui32TableSize; -+ PFN_OPEN_BC_DEVICE pfnOpenBCDevice; -+ PFN_CLOSE_BC_DEVICE pfnCloseBCDevice; -+ PFN_GET_BC_INFO pfnGetBCInfo; -+ PFN_GET_BC_BUFFER pfnGetBCBuffer; -+ PFN_GET_BUFFER_ADDR pfnGetBufferAddr; -+ -+} PVRSRV_BC_SRV2BUFFER_KMJTABLE; -+ -+typedef PVRSRV_ERROR(*PFN_BC_REGISTER_BUFFER_DEV) (PVRSRV_BC_SRV2BUFFER_KMJTABLE -+ *, IMG_UINT32 *); -+typedef PVRSRV_ERROR(*PFN_BC_REMOVE_BUFFER_DEV) (IMG_UINT32); -+ -+typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG { -+ IMG_UINT32 ui32TableSize; -+ PFN_BC_REGISTER_BUFFER_DEV pfnPVRSRVRegisterBCDevice; -+ PFN_BC_REMOVE_BUFFER_DEV pfnPVRSRVRemoveBCDevice; -+ -+} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE; -+ -+typedef IMG_BOOL(*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/kerneldisplay.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/kerneldisplay.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/kerneldisplay.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/kerneldisplay.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,144 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined (__KERNELDISPLAY_H__) -+#define __KERNELDISPLAY_H__ -+ -+typedef PVRSRV_ERROR(*PFN_OPEN_DC_DEVICE) (IMG_UINT32, IMG_HANDLE *, -+ PVRSRV_SYNC_DATA *); -+typedef PVRSRV_ERROR(*PFN_CLOSE_DC_DEVICE) (IMG_HANDLE); -+typedef PVRSRV_ERROR(*PFN_ENUM_DC_FORMATS) (IMG_HANDLE, IMG_UINT32 *, -+ DISPLAY_FORMAT *); -+typedef PVRSRV_ERROR(*PFN_ENUM_DC_DIMS) (IMG_HANDLE, DISPLAY_FORMAT *, -+ IMG_UINT32 *, DISPLAY_DIMS *); -+typedef PVRSRV_ERROR(*PFN_GET_DC_SYSTEMBUFFER) (IMG_HANDLE, IMG_HANDLE *); -+typedef PVRSRV_ERROR(*PFN_GET_DC_INFO) (IMG_HANDLE, DISPLAY_INFO *); -+typedef PVRSRV_ERROR(*PFN_CREATE_DC_SWAPCHAIN) (IMG_HANDLE, -+ IMG_UINT32, -+ DISPLAY_SURF_ATTRIBUTES *, -+ DISPLAY_SURF_ATTRIBUTES *, -+ IMG_UINT32, -+ PVRSRV_SYNC_DATA **, -+ IMG_UINT32, -+ IMG_HANDLE *, IMG_UINT32 *); -+typedef PVRSRV_ERROR(*PFN_DESTROY_DC_SWAPCHAIN) (IMG_HANDLE, IMG_HANDLE); -+typedef PVRSRV_ERROR(*PFN_SET_DC_DSTRECT) (IMG_HANDLE, IMG_HANDLE, IMG_RECT *); -+typedef PVRSRV_ERROR(*PFN_SET_DC_SRCRECT) (IMG_HANDLE, IMG_HANDLE, IMG_RECT *); -+typedef PVRSRV_ERROR(*PFN_SET_DC_DSTCK) (IMG_HANDLE, IMG_HANDLE, IMG_UINT32); -+typedef PVRSRV_ERROR(*PFN_SET_DC_SRCCK) (IMG_HANDLE, IMG_HANDLE, IMG_UINT32); -+typedef PVRSRV_ERROR(*PFN_GET_DC_BUFFERS) (IMG_HANDLE, -+ IMG_HANDLE, -+ IMG_UINT32 *, IMG_HANDLE *); -+typedef PVRSRV_ERROR(*PFN_SWAP_TO_DC_BUFFER) (IMG_HANDLE, -+ IMG_HANDLE, -+ IMG_UINT32, -+ IMG_HANDLE, -+ IMG_UINT32, IMG_RECT *); -+typedef PVRSRV_ERROR(*PFN_SWAP_TO_DC_SYSTEM) (IMG_HANDLE, IMG_HANDLE); -+typedef IMG_VOID(*PFN_SET_DC_STATE) (IMG_HANDLE, IMG_UINT32); -+ -+typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG { -+ IMG_UINT32 ui32TableSize; -+ PFN_OPEN_DC_DEVICE pfnOpenDCDevice; -+ PFN_CLOSE_DC_DEVICE pfnCloseDCDevice; -+ PFN_ENUM_DC_FORMATS pfnEnumDCFormats; -+ PFN_ENUM_DC_DIMS pfnEnumDCDims; -+ PFN_GET_DC_SYSTEMBUFFER pfnGetDCSystemBuffer; -+ PFN_GET_DC_INFO pfnGetDCInfo; -+ PFN_GET_BUFFER_ADDR pfnGetBufferAddr; -+ PFN_CREATE_DC_SWAPCHAIN pfnCreateDCSwapChain; -+ PFN_DESTROY_DC_SWAPCHAIN pfnDestroyDCSwapChain; -+ PFN_SET_DC_DSTRECT pfnSetDCDstRect; -+ PFN_SET_DC_SRCRECT pfnSetDCSrcRect; -+ PFN_SET_DC_DSTCK pfnSetDCDstColourKey; -+ PFN_SET_DC_SRCCK pfnSetDCSrcColourKey; -+ PFN_GET_DC_BUFFERS pfnGetDCBuffers; -+ PFN_SWAP_TO_DC_BUFFER pfnSwapToDCBuffer; -+ PFN_SWAP_TO_DC_SYSTEM pfnSwapToDCSystem; -+ PFN_SET_DC_STATE pfnSetDCState; -+ -+} PVRSRV_DC_SRV2DISP_KMJTABLE; -+ -+typedef IMG_BOOL(*PFN_ISR_HANDLER) (IMG_VOID *); -+ -+typedef PVRSRV_ERROR(*PFN_DC_REGISTER_DISPLAY_DEV) (PVRSRV_DC_SRV2DISP_KMJTABLE -+ *, IMG_UINT32 *); -+typedef PVRSRV_ERROR(*PFN_DC_REMOVE_DISPLAY_DEV) (IMG_UINT32); -+typedef PVRSRV_ERROR(*PFN_DC_OEM_FUNCTION) (IMG_UINT32, IMG_VOID *, IMG_UINT32, -+ IMG_VOID *, IMG_UINT32); -+typedef PVRSRV_ERROR(*PFN_DC_REGISTER_COMMANDPROCLIST) (IMG_UINT32, -+ PPFN_CMD_PROC, -+ IMG_UINT32[][2], -+ IMG_UINT32); -+typedef PVRSRV_ERROR(*PFN_DC_REMOVE_COMMANDPROCLIST) (IMG_UINT32, IMG_UINT32); -+typedef IMG_VOID(*PFN_DC_CMD_COMPLETE) (IMG_HANDLE, IMG_BOOL); -+typedef PVRSRV_ERROR(*PFN_DC_REGISTER_SYS_ISR) (PFN_ISR_HANDLER, IMG_VOID *, -+ IMG_UINT32, IMG_UINT32); -+typedef PVRSRV_ERROR(*PFN_DC_REGISTER_POWER) (IMG_UINT32, PFN_PRE_POWER, -+ PFN_POST_POWER, -+ PFN_PRE_CLOCKSPEED_CHANGE, -+ PFN_POST_CLOCKSPEED_CHANGE, -+ IMG_HANDLE, PVR_POWER_STATE, -+ PVR_POWER_STATE); -+ -+typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG { -+ IMG_UINT32 ui32TableSize; -+ PFN_DC_REGISTER_DISPLAY_DEV pfnPVRSRVRegisterDCDevice; -+ PFN_DC_REMOVE_DISPLAY_DEV pfnPVRSRVRemoveDCDevice; -+ PFN_DC_OEM_FUNCTION pfnPVRSRVOEMFunction; -+ PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList; -+ PFN_DC_REMOVE_COMMANDPROCLIST pfnPVRSRVRemoveCmdProcList; -+ PFN_DC_CMD_COMPLETE pfnPVRSRVCmdComplete; -+ PFN_DC_REGISTER_SYS_ISR pfnPVRSRVRegisterSystemISRHandler; -+ PFN_DC_REGISTER_POWER pfnPVRSRVRegisterPowerDevice; -+} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE; -+ -+typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG { -+ -+ IMG_HANDLE hExtDevice; -+ -+ IMG_HANDLE hExtSwapChain; -+ -+ IMG_HANDLE hExtBuffer; -+ -+ IMG_HANDLE hPrivateTag; -+ -+ IMG_UINT32 ui32ClipRectCount; -+ -+ IMG_RECT *psClipRect; -+ -+ IMG_UINT32 ui32SwapInterval; -+ -+} DISPLAYCLASS_FLIP_COMMAND; -+ -+#define DC_FLIP_COMMAND 0 -+ -+#define DC_STATE_NO_FLUSH_COMMANDS 0 -+#define DC_STATE_FLUSH_COMMANDS 1 -+ -+typedef IMG_BOOL(*PFN_DC_GET_PVRJTABLE) (PPVRSRV_DC_DISP2SRV_KMJTABLE); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/Makefile linux-omap-2.6.28-nokia1/drivers/gpu/pvr/Makefile ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/Makefile 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,25 @@ -+obj-$(CONFIG_PVR) += omaplfb.o pvrsrvkm.o -+ -+omaplfb-objs := omaplfb_displayclass.o omaplfb_linux.o -+ -+pvrsrvkm-objs := osfunc.o mmap.o module.o pdump.o proc.o \ -+ pvr_bridge_k.o pvr_debug.o mm.o mutex.o event.o \ -+ buffer_manager.o devicemem.o deviceclass.o \ -+ handle.o hash.o metrics.o pvrsrv.o queue.o ra.o \ -+ resman.o power.o mem.o bridged_pvr_bridge.o \ -+ sgxinit.o sgxreset.o sgxutils.o sgxkick.o \ -+ sgxtransfer.o mmu.o pb.o perproc.o sysconfig.o \ -+ sysutils_linux.o -+ -+obj-$(CONFIG_PVR_EXAMPLES) += bc_example.o -+ -+bc_example-objs := bufferclass_example.o bufferclass_example_linux.o \ -+ bufferclass_example_private.o -+ -+DATE := $(shell date "+%a %B %d %Z %Y" ) -+CBUILD := -O2 \ -+ -DPVR_BUILD_DIR="\"$(PVR_BUILD_DIR)\"" \ -+ -DPVR_BUILD_DATE="\"$(DATE)\"" -+ -+ccflags-y += $(CBUILD) -include $(src)/pvrconfig.h -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mem.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mem.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mem.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mem.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,121 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "sgxapi_km.h" -+#include "pvr_bridge_km.h" -+ -+static PVRSRV_ERROR -+FreeSharedSysMemCallBack(IMG_PVOID pvParam, IMG_UINT32 ui32Param) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ OSFreePages(psKernelMemInfo->ui32Flags, -+ psKernelMemInfo->ui32AllocSize, -+ psKernelMemInfo->pvLinAddrKM, -+ psKernelMemInfo->sMemBlk.hOSMemHandle); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), psKernelMemInfo, IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT PVRSRV_ERROR -+PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size, -+ PVRSRV_KERNEL_MEM_INFO ** ppsKernelMemInfo) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), -+ (IMG_VOID **) & psKernelMemInfo, IMG_NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK; -+ ui32Flags |= PVRSRV_HAP_MULTI_PROCESS; -+ psKernelMemInfo->ui32Flags = ui32Flags; -+ psKernelMemInfo->ui32AllocSize = ui32Size; -+ -+ if (OSAllocPages(psKernelMemInfo->ui32Flags, -+ psKernelMemInfo->ui32AllocSize, -+ &psKernelMemInfo->pvLinAddrKM, -+ &psKernelMemInfo->sMemBlk.hOSMemHandle) -+ != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO), psKernelMemInfo, 0); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psKernelMemInfo->sMemBlk.hResItem = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_SHARED_MEM_INFO, -+ psKernelMemInfo, 0, FreeSharedSysMemCallBack); -+ -+ *ppsKernelMemInfo = psKernelMemInfo; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT PVRSRV_ERROR -+PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO * psKernelMemInfo) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (psKernelMemInfo->sMemBlk.hResItem) { -+ eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem); -+ } else { -+ eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT PVRSRV_ERROR -+PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO * psKernelMemInfo) -+{ -+ if (!psKernelMemInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psKernelMemInfo->sMemBlk.hResItem) { -+ ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem, -+ IMG_NULL); -+ psKernelMemInfo->sMemBlk.hResItem = IMG_NULL; -+ } -+ -+ return PVRSRV_OK; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/metrics.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/metrics.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/metrics.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/metrics.c 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,114 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "sgxapi_km.h" -+#include "metrics.h" -+ -+#if defined(DEBUG) || defined(TIMING) -+ -+static volatile IMG_UINT32 *pui32TimerRegister = 0; -+ -+#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total -+#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS) -+#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count -+ -+Temporal_Data asTimers[PVRSRV_NUM_TIMERS]; -+ -+IMG_UINT32 PVRSRVTimeNow(IMG_VOID) -+{ -+ if (!pui32TimerRegister) { -+ static IMG_BOOL bFirstTime = IMG_TRUE; -+ -+ if (bFirstTime) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVTimeNow: No timer register set up")); -+ -+ bFirstTime = IMG_FALSE; -+ } -+ -+ return 0; -+ } -+ -+ return 0; -+ -+} -+ -+static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID) -+{ -+ IMG_UINT32 ui32Time1, ui32Time2; -+ -+ ui32Time1 = PVRSRVTimeNow(); -+ -+ OSWaitus(1000000); -+ -+ ui32Time2 = PVRSRVTimeNow(); -+ -+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", -+ ui32Time2 - ui32Time1)); -+ -+ return (ui32Time2 - ui32Time1); -+} -+ -+IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID * pvDevInfo) -+{ -+ IMG_UINT32 ui32Loop; -+ -+ PVR_UNREFERENCED_PARAMETER(pvDevInfo); -+ -+ for (ui32Loop = 0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) { -+ asTimers[ui32Loop].ui32Total = 0; -+ asTimers[ui32Loop].ui32Count = 0; -+ } -+ -+ -+ pui32TimerRegister = 0; -+ -+ -+} -+ -+IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID) -+{ -+ IMG_UINT32 ui32TicksPerMS, ui32Loop; -+ -+ ui32TicksPerMS = PVRSRVGetCPUFreq(); -+ -+ if (!ui32TicksPerMS) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVOutputMetricTotals: Failed to get CPU Freq")); -+ return; -+ } -+ -+ for (ui32Loop = 0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) { -+ if (asTimers[ui32Loop].ui32Count & 0x80000000L) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "PVRSRVOutputMetricTotals: Timer %u is still ON", -+ ui32Loop)); -+ } -+ } -+} -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/metrics.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/metrics.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/metrics.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/metrics.h 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,95 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _METRICS_ -+#define _METRICS_ -+ -+ -+#if defined(DEBUG) || defined(TIMING) -+ -+ typedef struct { -+ IMG_UINT32 ui32Start; -+ IMG_UINT32 ui32Stop; -+ IMG_UINT32 ui32Total; -+ IMG_UINT32 ui32Count; -+ } Temporal_Data; -+ -+ extern Temporal_Data asTimers[]; -+ -+ extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID); -+ extern IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID * pvDevInfo); -+ extern IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID); -+ -+#define PVRSRV_TIMER_DUMMY 0 -+ -+#define PVRSRV_TIMER_EXAMPLE_1 1 -+#define PVRSRV_TIMER_EXAMPLE_2 2 -+ -+#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1) -+ -+#define PVRSRV_TIME_START(X) { \ -+ asTimers[X].ui32Count += 1; \ -+ asTimers[X].ui32Count |= 0x80000000L; \ -+ asTimers[X].ui32Start = PVRSRVTimeNow(); \ -+ asTimers[X].ui32Stop = 0; \ -+ } -+ -+#define PVRSRV_TIME_SUSPEND(X) { \ -+ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \ -+ } -+ -+#define PVRSRV_TIME_RESUME(X) { \ -+ asTimers[X].ui32Start = PVRSRVTimeNow(); \ -+ } -+ -+#define PVRSRV_TIME_STOP(X) { \ -+ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \ -+ asTimers[X].ui32Total += asTimers[X].ui32Stop; \ -+ asTimers[X].ui32Count &= 0x7FFFFFFFL; \ -+ } -+ -+#define PVRSRV_TIME_RESET(X) { \ -+ asTimers[X].ui32Start = 0; \ -+ asTimers[X].ui32Stop = 0; \ -+ asTimers[X].ui32Total = 0; \ -+ asTimers[X].ui32Count = 0; \ -+ } -+ -+ -+#else -+ -+#define PVRSRV_TIME_START(X) -+#define PVRSRV_TIME_SUSPEND(X) -+#define PVRSRV_TIME_RESUME(X) -+#define PVRSRV_TIME_STOP(X) -+#define PVRSRV_TIME_RESET(X) -+ -+#define PVRSRVSetupMetricTimers(X) -+#define PVRSRVOutputMetricTotals() -+ -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmap.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmap.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmap.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmap.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,668 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "img_defs.h" -+#include "services.h" -+#include "servicesint.h" -+#include "pvrmmap.h" -+#include "mmap.h" -+#include "mm.h" -+#include "pvr_debug.h" -+#include "osfunc.h" -+#include "proc.h" -+ -+static PKV_OFFSET_STRUCT FindOffsetStructFromLinuxMemArea(LinuxMemArea * -+ psLinuxMemArea); -+static IMG_UINT32 GetFirstFreePageAlignedNumber(void); -+static PKV_OFFSET_STRUCT FindOffsetStructByKVIndexAddress(IMG_VOID * -+ pvVirtAddress, -+ IMG_UINT32 -+ ui32ByteSize); -+static void DeterminUsersSizeAndByteOffset(IMG_VOID * pvKVIndexAddress, -+ LinuxMemArea * psLinuxMemArea, -+ IMG_UINT32 * pui32RealByteSize, -+ IMG_UINT32 * pui32ByteOffset); -+static PKV_OFFSET_STRUCT FindOffsetStructByMMapOffset(IMG_UINT32 ui32Offset); -+static IMG_BOOL DoMapToUser(LinuxMemArea * psLinuxMemArea, -+ struct vm_area_struct *ps_vma, -+ IMG_UINT32 ui32ByteOffset, IMG_UINT32 ui32Size); -+static IMG_BOOL CheckSize(LinuxMemArea * psLinuxMemArea, IMG_UINT32 ui32Size); -+ -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+static off_t PrintMMapRegistrations(char *buffer, size_t size, off_t off); -+#endif -+ -+static void MMapVOpen(struct vm_area_struct *ps_vma); -+static void MMapVClose(struct vm_area_struct *ps_vma); -+ -+static struct vm_operations_struct MMapIOOps = { -+open: MMapVOpen, -+close: MMapVClose -+}; -+ -+static PKV_OFFSET_STRUCT g_psKVOffsetTable = 0; -+static LinuxKMemCache *g_psMemmapCache = 0; -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+static IMG_UINT32 g_ui32RegisteredAreas = 0; -+static IMG_UINT32 g_ui32TotalByteSize = 0; -+#endif -+ -+static struct rw_semaphore g_mmap_sem; -+ -+IMG_VOID PVRMMapInit(IMG_VOID) -+{ -+ g_psKVOffsetTable = 0; -+ -+ g_psMemmapCache = -+ KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0); -+ if (g_psMemmapCache) { -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+ CreateProcReadEntry("mmap", PrintMMapRegistrations); -+#endif -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate kmem_cache", -+ __FUNCTION__)); -+ } -+ init_rwsem(&g_mmap_sem); -+} -+ -+IMG_VOID PVRMMapCleanup(void) -+{ -+ PKV_OFFSET_STRUCT psOffsetStruct; -+ -+ if (!g_psMemmapCache) -+ return; -+ -+ if (g_psKVOffsetTable) { -+ PVR_DPF((PVR_DBG_ERROR, "%s: BUG! g_psMemmapCache isn't empty!", -+ __FUNCTION__)); -+ -+ for (psOffsetStruct = g_psKVOffsetTable; psOffsetStruct; -+ psOffsetStruct = psOffsetStruct->psNext) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: BUG!: Un-registering mmapable area: psLinuxMemArea=0x%p, CpuPAddr=0x%08lx\n", -+ __FUNCTION__, psOffsetStruct->psLinuxMemArea, -+ LinuxMemAreaToCpuPAddr(psOffsetStruct-> -+ psLinuxMemArea, -+ 0).uiAddr)); -+ PVRMMapRemoveRegisteredArea(psOffsetStruct-> -+ psLinuxMemArea); -+ } -+ } -+ -+ RemoveProcEntry("mmap"); -+ KMemCacheDestroyWrapper(g_psMemmapCache); -+ g_psMemmapCache = NULL; -+ PVR_DPF((PVR_DBG_MESSAGE, "PVRMMapCleanup: KVOffsetTable deallocated")); -+} -+ -+PVRSRV_ERROR -+PVRMMapRegisterArea(const IMG_CHAR * pszName, -+ LinuxMemArea * psLinuxMemArea, IMG_UINT32 ui32AllocFlags) -+{ -+ PKV_OFFSET_STRUCT psOffsetStruct; -+ PVRSRV_ERROR iError = PVRSRV_OK; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s(%s, psLinuxMemArea=%p, ui32AllocFlags=0x%8lx)", -+ __FUNCTION__, pszName, psLinuxMemArea, ui32AllocFlags)); -+ -+ down_write(&g_mmap_sem); -+ psOffsetStruct = FindOffsetStructFromLinuxMemArea(psLinuxMemArea); -+ if (psOffsetStruct) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRMMapRegisterArea: psLinuxMemArea=%p is already registered", -+ psOffsetStruct->psLinuxMemArea)); -+ iError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto register_exit; -+ } -+ -+ psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL); -+ if (!psOffsetStruct) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRMMapRegisterArea: Couldn't alloc another mapping record from cache")); -+ iError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto register_exit; -+ } -+ -+ psOffsetStruct->ui32MMapOffset = GetFirstFreePageAlignedNumber(); -+ psOffsetStruct->psLinuxMemArea = psLinuxMemArea; -+ -+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) { -+ psOffsetStruct->ui32AllocFlags = ui32AllocFlags; -+ } else { -+ PKV_OFFSET_STRUCT psParentOffsetStruct; -+ psParentOffsetStruct = -+ FindOffsetStructFromLinuxMemArea(psLinuxMemArea->uData. -+ sSubAlloc. -+ psParentLinuxMemArea); -+ PVR_ASSERT(psParentOffsetStruct); -+ psOffsetStruct->ui32AllocFlags = -+ psParentOffsetStruct->ui32AllocFlags; -+ } -+ -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+ -+ psOffsetStruct->pszName = pszName; -+ psOffsetStruct->pid = current->pid; -+ psOffsetStruct->ui16Mapped = 0; -+ psOffsetStruct->ui16Faults = 0; -+ -+ g_ui32RegisteredAreas++; -+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) { -+ g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize; -+ } -+#endif -+ -+ psOffsetStruct->psNext = g_psKVOffsetTable; -+ -+ g_psKVOffsetTable = psOffsetStruct; -+register_exit: -+ up_write(&g_mmap_sem); -+ return iError; -+} -+ -+PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea * psLinuxMemArea) -+{ -+ PKV_OFFSET_STRUCT *ppsOffsetStruct, psOffsetStruct; -+ PVRSRV_ERROR iError = PVRSRV_OK; -+ -+ down_write(&g_mmap_sem); -+ for (ppsOffsetStruct = &g_psKVOffsetTable; -+ (psOffsetStruct = *ppsOffsetStruct); -+ ppsOffsetStruct = &(*ppsOffsetStruct)->psNext) { -+ if (psOffsetStruct->psLinuxMemArea == psLinuxMemArea) { -+ break; -+ } -+ } -+ -+ if (!psOffsetStruct) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Registration for psLinuxMemArea = 0x%p not found", -+ __FUNCTION__, psLinuxMemArea)); -+ iError = PVRSRV_ERROR_BAD_MAPPING; -+ goto unregister_exit; -+ } -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+ -+ if (psOffsetStruct->ui16Mapped) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unregistering still-mapped area! (psLinuxMemArea=0x%p)\n", -+ __FUNCTION__, psOffsetStruct->psLinuxMemArea)); -+ iError = PVRSRV_ERROR_BAD_MAPPING; -+ goto unregister_exit; -+ } -+ -+ g_ui32RegisteredAreas--; -+ -+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) { -+ g_ui32TotalByteSize -= -+ psOffsetStruct->psLinuxMemArea->ui32ByteSize; -+ } -+#endif -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: " -+ "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__, -+ psOffsetStruct->psLinuxMemArea, -+ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0))); -+ -+ *ppsOffsetStruct = psOffsetStruct->psNext; -+ -+ KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct); -+ -+unregister_exit: -+ up_write(&g_mmap_sem); -+ return iError; -+} -+ -+static PKV_OFFSET_STRUCT -+FindOffsetStructFromLinuxMemArea(LinuxMemArea * psLinuxMemArea) -+{ -+ PKV_OFFSET_STRUCT psOffsetStruct = NULL; -+ -+ for (psOffsetStruct = g_psKVOffsetTable; psOffsetStruct; -+ psOffsetStruct = psOffsetStruct->psNext) { -+ if (psOffsetStruct->psLinuxMemArea == psLinuxMemArea) { -+ return psOffsetStruct; -+ } -+ } -+ return NULL; -+} -+ -+static IMG_UINT32 GetFirstFreePageAlignedNumber(void) -+{ -+ PKV_OFFSET_STRUCT psCurrentRec; -+ IMG_UINT32 ui32CurrentPageOffset; -+ -+ if (!g_psKVOffsetTable) { -+ return 0; -+ } -+ -+ psCurrentRec = g_psKVOffsetTable; -+ ui32CurrentPageOffset = (g_psKVOffsetTable->ui32MMapOffset); -+ -+ while (psCurrentRec) { -+ if (ui32CurrentPageOffset != (psCurrentRec->ui32MMapOffset)) { -+ return ui32CurrentPageOffset; -+ } -+ psCurrentRec = psCurrentRec->psNext; -+ ui32CurrentPageOffset += PAGE_SIZE; -+ } -+ -+ return g_psKVOffsetTable->ui32MMapOffset + PAGE_SIZE; -+} -+ -+PVRSRV_ERROR -+PVRMMapKVIndexAddressToMMapData(IMG_VOID * pvKVIndexAddress, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 * pui32MMapOffset, -+ IMG_UINT32 * pui32ByteOffset, -+ IMG_UINT32 * pui32RealByteSize) -+{ -+ PKV_OFFSET_STRUCT psOffsetStruct; -+ PVRSRV_ERROR iError = PVRSRV_OK; -+ -+ down_read(&g_mmap_sem); -+ psOffsetStruct = -+ FindOffsetStructByKVIndexAddress(pvKVIndexAddress, ui32Size); -+ if (!psOffsetStruct) { -+ iError = PVRSRV_ERROR_BAD_MAPPING; -+ goto indexaddress_exit; -+ } -+ -+ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset; -+ -+ DeterminUsersSizeAndByteOffset(pvKVIndexAddress, -+ psOffsetStruct->psLinuxMemArea, -+ pui32RealByteSize, pui32ByteOffset); -+ -+indexaddress_exit: -+ up_read(&g_mmap_sem); -+ return iError; -+} -+ -+static PKV_OFFSET_STRUCT -+FindOffsetStructByKVIndexAddress(IMG_VOID * pvKVIndexAddress, -+ IMG_UINT32 ui32ByteSize) -+{ -+ PKV_OFFSET_STRUCT psOffsetStruct; -+ IMG_UINT8 *pui8CpuVAddr; -+ IMG_UINT8 *pui8IndexCpuVAddr = (IMG_UINT8 *) pvKVIndexAddress; -+ -+ for (psOffsetStruct = g_psKVOffsetTable; psOffsetStruct; -+ psOffsetStruct = psOffsetStruct->psNext) { -+ LinuxMemArea *psLinuxMemArea = psOffsetStruct->psLinuxMemArea; -+ -+ switch (psLinuxMemArea->eAreaType) { -+ case LINUX_MEM_AREA_IOREMAP: -+ pui8CpuVAddr = -+ psLinuxMemArea->uData.sIORemap.pvIORemapCookie; -+ break; -+ case LINUX_MEM_AREA_VMALLOC: -+ pui8CpuVAddr = -+ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; -+ break; -+ case LINUX_MEM_AREA_EXTERNAL_KV: -+ pui8CpuVAddr = -+ psLinuxMemArea->uData.sExternalKV.pvExternalKV; -+ break; -+ default: -+ pui8CpuVAddr = IMG_NULL; -+ break; -+ } -+ -+ if (pui8CpuVAddr) { -+ if (pui8IndexCpuVAddr >= pui8CpuVAddr -+ && (pui8IndexCpuVAddr + ui32ByteSize) <= -+ (pui8CpuVAddr + psLinuxMemArea->ui32ByteSize)) { -+ return psOffsetStruct; -+ } else { -+ pui8CpuVAddr = NULL; -+ } -+ } -+ -+ if (pvKVIndexAddress == psOffsetStruct->psLinuxMemArea) { -+ if (psLinuxMemArea->eAreaType == -+ LINUX_MEM_AREA_SUB_ALLOC) { -+ PVR_ASSERT(psLinuxMemArea->uData.sSubAlloc. -+ psParentLinuxMemArea->eAreaType != -+ LINUX_MEM_AREA_SUB_ALLOC); -+ } -+ return psOffsetStruct; -+ } -+ } -+ printk(KERN_ERR "%s: Failed to find offset struct (KVAddress=%p)\n", -+ __FUNCTION__, pvKVIndexAddress); -+ return NULL; -+} -+ -+static void -+DeterminUsersSizeAndByteOffset(IMG_VOID * pvKVIndexAddress, -+ LinuxMemArea * psLinuxMemArea, -+ IMG_UINT32 * pui32RealByteSize, -+ IMG_UINT32 * pui32ByteOffset) -+{ -+ IMG_UINT8 *pui8StartVAddr = NULL; -+ IMG_UINT8 *pui8IndexCpuVAddr = (IMG_UINT8 *) pvKVIndexAddress; -+ IMG_UINT32 ui32PageAlignmentOffset = 0; -+ IMG_CPU_PHYADDR CpuPAddr; -+ -+ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0); -+ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr); -+ -+ if (pvKVIndexAddress != psLinuxMemArea && -+ (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP -+ || psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC -+ || psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV)) { -+ pui8StartVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); -+ *pui32ByteOffset = -+ (pui8IndexCpuVAddr - pui8StartVAddr) + -+ ui32PageAlignmentOffset; -+ } else { -+ *pui32ByteOffset = ui32PageAlignmentOffset; -+ } -+ -+ *pui32RealByteSize = -+ PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset); -+} -+ -+int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma) -+{ -+ unsigned long ulBytes; -+ PKV_OFFSET_STRUCT psCurrentRec = NULL; -+ int iRetVal = 0; -+ -+ ulBytes = ps_vma->vm_end - ps_vma->vm_start; -+ down_read(&g_mmap_sem); -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Recieved mmap(2) request with a ui32MMapOffset=0x%08lx," -+ " and ui32ByteSize=%ld(0x%08lx)\n", __FUNCTION__, -+ PFN_TO_PHYS(ps_vma->vm_pgoff), ulBytes, ulBytes)); -+ -+ if ((ps_vma->vm_flags & VM_WRITE) && !(ps_vma->vm_flags & VM_SHARED) -+ ) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRMMap: Error - Cannot mmap non-shareable writable areas.")); -+ iRetVal = -EINVAL; -+ goto pvrmmap_exit; -+ } -+ -+ psCurrentRec = -+ FindOffsetStructByMMapOffset(PFN_TO_PHYS(ps_vma->vm_pgoff)); -+ if (!psCurrentRec) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRMMap: Error - Attempted to mmap unregistered area at vm_pgoff=%ld", -+ ps_vma->vm_pgoff)); -+ iRetVal = -EINVAL; -+ goto pvrmmap_exit; -+ } -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: > psCurrentRec->psLinuxMemArea=%p\n", -+ __FUNCTION__, psCurrentRec->psLinuxMemArea)); -+ if (!CheckSize(psCurrentRec->psLinuxMemArea, ulBytes)) { -+ iRetVal = -EINVAL; -+ goto pvrmmap_exit; -+ } -+ -+ ps_vma->vm_flags |= VM_RESERVED; -+ ps_vma->vm_flags |= VM_IO; -+ -+ ps_vma->vm_flags |= VM_DONTEXPAND; -+ -+ ps_vma->vm_private_data = (void *)psCurrentRec; -+ -+ switch (psCurrentRec->ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK) { -+ case PVRSRV_HAP_CACHED: -+ -+ break; -+ case PVRSRV_HAP_WRITECOMBINE: -+ ps_vma->vm_page_prot = -+ pgprot_writecombine(ps_vma->vm_page_prot); -+ break; -+ case PVRSRV_HAP_UNCACHED: -+ ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", -+ __FUNCTION__)); -+ } -+ -+ ps_vma->vm_ops = &MMapIOOps; -+ -+ if (!DoMapToUser(psCurrentRec->psLinuxMemArea, ps_vma, 0, ulBytes)) { -+ iRetVal = -EAGAIN; -+ goto pvrmmap_exit; -+ } -+ -+ MMapVOpen(ps_vma); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n", -+ __FUNCTION__, ps_vma->vm_pgoff)); -+ -+pvrmmap_exit: -+ up_read(&g_mmap_sem); -+ return iRetVal; -+} -+ -+static PKV_OFFSET_STRUCT FindOffsetStructByMMapOffset(IMG_UINT32 ui32MMapOffset) -+{ -+ PKV_OFFSET_STRUCT psOffsetStruct; -+ -+ for (psOffsetStruct = g_psKVOffsetTable; psOffsetStruct; -+ psOffsetStruct = psOffsetStruct->psNext) { -+ if (psOffsetStruct->ui32MMapOffset == ui32MMapOffset) { -+ return psOffsetStruct; -+ } -+ } -+ return NULL; -+} -+ -+static IMG_BOOL -+DoMapToUser(LinuxMemArea * psLinuxMemArea, -+ struct vm_area_struct *ps_vma, -+ IMG_UINT32 ui32ByteOffset, IMG_UINT32 ui32ByteSize) -+{ -+ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) { -+ return DoMapToUser(psLinuxMemArea->uData.sSubAlloc. -+ psParentLinuxMemArea, ps_vma, -+ psLinuxMemArea->uData.sSubAlloc. -+ ui32ByteOffset + ui32ByteOffset, -+ ui32ByteSize); -+ } -+ -+ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0); -+ -+ -+ if (LinuxMemAreaPhysIsContig(psLinuxMemArea)) { -+ -+ unsigned long pfn = -+ LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset); -+ -+ int result = -+ IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, pfn, -+ ui32ByteSize, ps_vma->vm_page_prot); -+ if (result != 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error - Failed to map contiguous physical address range (%d)", -+ __FUNCTION__, result)); -+ return IMG_FALSE; -+ } -+ } else { -+ -+ unsigned long ulVMAPos = ps_vma->vm_start; -+ IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize; -+ IMG_UINT32 ui32PA; -+ -+ for (ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; -+ ui32PA += PAGE_SIZE) { -+ unsigned long pfn = -+ LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA); -+ -+ int result = -+ REMAP_PFN_RANGE(ps_vma, ulVMAPos, pfn, PAGE_SIZE, -+ ps_vma->vm_page_prot); -+ if (result != 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error - Failed to map discontiguous physical address range (%d)", -+ __FUNCTION__, result)); -+ return IMG_FALSE; -+ } -+ ulVMAPos += PAGE_SIZE; -+ } -+ } -+ -+ return IMG_TRUE; -+} -+ -+static IMG_BOOL -+CheckSize(LinuxMemArea * psLinuxMemArea, IMG_UINT32 ui32ByteSize) -+{ -+ IMG_CPU_PHYADDR CpuPAddr; -+ IMG_UINT32 ui32PageAlignmentOffset; -+ IMG_UINT32 ui32RealByteSize; -+ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0); -+ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr); -+ ui32RealByteSize = -+ PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset); -+ if (ui32RealByteSize < ui32ByteSize) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Cannot mmap %ld bytes from: %-8p %-8p %08lx %-8ld %-24s\n", -+ ui32ByteSize, -+ psLinuxMemArea, -+ LinuxMemAreaToCpuVAddr(psLinuxMemArea), -+ LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0).uiAddr, -+ psLinuxMemArea->ui32ByteSize, -+ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType))); -+ return IMG_FALSE; -+ } -+ return IMG_TRUE; -+} -+ -+static void MMapVOpen(struct vm_area_struct *ps_vma) -+{ -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+ PKV_OFFSET_STRUCT psOffsetStruct = -+ (PKV_OFFSET_STRUCT) ps_vma->vm_private_data; -+ PVR_ASSERT(psOffsetStruct != IMG_NULL) -+ psOffsetStruct->ui16Mapped++; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: psLinuxMemArea=%p, KVAddress=%p MMapOffset=%ld, ui16Mapped=%d", -+ __FUNCTION__, -+ psOffsetStruct->psLinuxMemArea, -+ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea), -+ psOffsetStruct->ui32MMapOffset, psOffsetStruct->ui16Mapped)); -+#endif -+ -+} -+ -+static void MMapVClose(struct vm_area_struct *ps_vma) -+{ -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+ PKV_OFFSET_STRUCT psOffsetStruct = -+ (PKV_OFFSET_STRUCT) ps_vma->vm_private_data; -+ PVR_ASSERT(psOffsetStruct != IMG_NULL) -+ psOffsetStruct->ui16Mapped--; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: psLinuxMemArea=%p, CpuVAddr=%p ui32MMapOffset=%ld, ui16Mapped=%d", -+ __FUNCTION__, -+ psOffsetStruct->psLinuxMemArea, -+ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea), -+ psOffsetStruct->ui32MMapOffset, psOffsetStruct->ui16Mapped)); -+#endif -+ -+} -+ -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+static off_t PrintMMapRegistrations(char *buffer, size_t size, off_t off) -+{ -+ PKV_OFFSET_STRUCT psOffsetStruct; -+ off_t Ret; -+ -+ down_read(&g_mmap_sem); -+ if (!off) { -+ Ret = printAppend(buffer, size, 0, -+ "Allocations registered for mmap: %lu\n" -+ "In total these areas correspond to %lu bytes (excluding SUB areas)\n" -+ "psLinuxMemArea " -+ "CpuVAddr " -+ "CpuPAddr " -+ "MMapOffset " -+ "ByteLength " -+ "LinuxMemType " -+ "Pid Name Mapped Flags\n", -+ g_ui32RegisteredAreas, g_ui32TotalByteSize); -+ -+ goto unlock_and_return; -+ } -+ -+ if (size < 135) { -+ Ret = 0; -+ goto unlock_and_return; -+ } -+ -+ for (psOffsetStruct = g_psKVOffsetTable; --off && psOffsetStruct; -+ psOffsetStruct = psOffsetStruct->psNext) ; -+ if (!psOffsetStruct) { -+ Ret = END_OF_FILE; -+ goto unlock_and_return; -+ } -+ -+ Ret = printAppend(buffer, size, 0, -+ "%-8p %-8p %08lx %08lx %-8ld %-24s %-5d %-8s %-5u %08lx(%s)\n", -+ psOffsetStruct->psLinuxMemArea, -+ LinuxMemAreaToCpuVAddr(psOffsetStruct-> -+ psLinuxMemArea), -+ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, -+ 0).uiAddr, -+ psOffsetStruct->ui32MMapOffset, -+ psOffsetStruct->psLinuxMemArea->ui32ByteSize, -+ LinuxMemAreaTypeToString(psOffsetStruct-> -+ psLinuxMemArea->eAreaType), -+ psOffsetStruct->pid, psOffsetStruct->pszName, -+ psOffsetStruct->ui16Mapped, -+ psOffsetStruct->ui32AllocFlags, -+ HAPFlagsToString(psOffsetStruct->ui32AllocFlags)); -+ -+unlock_and_return: -+ up_read(&g_mmap_sem); -+ return Ret; -+} -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmap.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmap.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmap.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmap.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,70 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__MMAP_H__) -+#define __MMAP_H__ -+ -+#include -+ -+#include "mm.h" -+ -+typedef struct KV_OFFSET_STRUCT_TAG { -+ -+ IMG_UINT32 ui32MMapOffset; -+ -+ LinuxMemArea *psLinuxMemArea; -+ -+ IMG_UINT32 ui32AllocFlags; -+ -+#if defined(DEBUG_LINUX_MMAP_AREAS) -+ pid_t pid; -+ const IMG_CHAR *pszName; -+ IMG_UINT16 ui16Mapped; -+ IMG_UINT16 ui16Faults; -+#endif -+ -+ struct KV_OFFSET_STRUCT_TAG *psNext; -+} KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT; -+ -+IMG_VOID PVRMMapInit(void); -+ -+IMG_VOID PVRMMapCleanup(void); -+ -+PVRSRV_ERROR PVRMMapRegisterArea(const IMG_CHAR * pszName, -+ LinuxMemArea * psLinuxMemArea, -+ IMG_UINT32 ui32AllocFlags); -+ -+PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea * psLinuxMemArea); -+ -+PVRSRV_ERROR PVRMMapKVIndexAddressToMMapData(IMG_VOID * pvKVIndexAddress, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 * pui32MMapOffset, -+ IMG_UINT32 * pui32ByteOffset, -+ IMG_UINT32 * pui32RealByteSize); -+ -+int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mm.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mm.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mm.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,1507 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_defs.h" -+#include "services.h" -+#include "servicesint.h" -+#include "syscommon.h" -+#include "mm.h" -+#include "pvrmmap.h" -+#include "mmap.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+#include "proc.h" -+#include "mutex.h" -+ -+#define PVR_FLUSH_CACHE_BEFORE_KMAP -+ -+#include -+ -+extern PVRSRV_LINUX_MUTEX gPVRSRVLock; -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+typedef enum { -+ DEBUG_MEM_ALLOC_TYPE_KMALLOC, -+ DEBUG_MEM_ALLOC_TYPE_VMALLOC, -+ DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, -+ DEBUG_MEM_ALLOC_TYPE_IOREMAP, -+ DEBUG_MEM_ALLOC_TYPE_IO, -+ DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, -+ DEBUG_MEM_ALLOC_TYPE_KMAP, -+ DEBUG_MEM_ALLOC_TYPE_COUNT -+} DEBUG_MEM_ALLOC_TYPE; -+ -+typedef struct _DEBUG_MEM_ALLOC_REC { -+ DEBUG_MEM_ALLOC_TYPE eAllocType; -+ IMG_VOID *pvKey; -+ IMG_VOID *pvCpuVAddr; -+ unsigned long ulCpuPAddr; -+ IMG_VOID *pvPrivateData; -+ IMG_UINT32 ui32Bytes; -+ pid_t pid; -+ IMG_CHAR *pszFileName; -+ IMG_UINT32 ui32Line; -+ -+ struct _DEBUG_MEM_ALLOC_REC *psNext; -+} DEBUG_MEM_ALLOC_REC; -+ -+static DEBUG_MEM_ALLOC_REC *g_MemoryRecords; -+ -+static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT]; -+static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT]; -+ -+static IMG_UINT32 g_SysRAMWaterMark; -+static IMG_UINT32 g_SysRAMHighWaterMark; -+ -+static IMG_UINT32 g_IOMemWaterMark; -+static IMG_UINT32 g_IOMemHighWaterMark; -+ -+static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType, -+ IMG_VOID * pvKey, -+ IMG_VOID * pvCpuVAddr, -+ unsigned long ulCpuPAddr, -+ IMG_VOID * pvPrivateData, -+ IMG_UINT32 ui32Bytes, -+ IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line); -+ -+static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, -+ IMG_VOID * pvKey, -+ IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line); -+ -+static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE -+ eAllocType); -+ -+static off_t printMemoryRecords(char *buffer, size_t size, off_t off); -+#endif -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+typedef struct _DEBUG_LINUX_MEM_AREA_REC { -+ LinuxMemArea *psLinuxMemArea; -+ IMG_UINT32 ui32Flags; -+ pid_t pid; -+ -+ struct _DEBUG_LINUX_MEM_AREA_REC *psNext; -+} DEBUG_LINUX_MEM_AREA_REC; -+ -+static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords; -+static IMG_UINT32 g_LinuxMemAreaCount; -+static IMG_UINT32 g_LinuxMemAreaWaterMark; -+static IMG_UINT32 g_LinuxMemAreaHighWaterMark; -+ -+static off_t printLinuxMemAreaRecords(char *buffer, size_t size, off_t off); -+#endif -+ -+static LinuxKMemCache *psLinuxMemAreaCache; -+ -+ -+static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID); -+static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea * psLinuxMemArea); -+#if defined(DEBUG_LINUX_MEM_AREAS) -+static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea * psLinuxMemArea, -+ IMG_UINT32 ui32Flags); -+static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea * -+ psLinuxMemArea); -+static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea * psLinuxMemArea); -+#endif -+ -+PVRSRV_ERROR LinuxMMInit(IMG_VOID) -+{ -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ { -+ int iStatus; -+ iStatus = -+ CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords); -+ if (iStatus != 0) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+#endif -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ { -+ int iStatus; -+ iStatus = CreateProcReadEntry("meminfo", printMemoryRecords); -+ if (iStatus != 0) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+#endif -+ -+ psLinuxMemAreaCache = -+ KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0); -+ if (!psLinuxMemAreaCache) { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate kmem_cache", -+ __FUNCTION__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID LinuxMMCleanup(IMG_VOID) -+{ -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ { -+ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord = -+ g_LinuxMemAreaRecords, *psNextRecord; -+ -+ if (g_LinuxMemAreaCount) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%ld bytes)", -+ __FUNCTION__, g_LinuxMemAreaCount, -+ g_LinuxMemAreaWaterMark)); -+ } -+ -+ while (psCurrentRecord) { -+ LinuxMemArea *psLinuxMemArea; -+ -+ psNextRecord = psCurrentRecord->psNext; -+ psLinuxMemArea = psCurrentRecord->psLinuxMemArea; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%ld bytes", -+ __FUNCTION__, psCurrentRecord->psLinuxMemArea, -+ LinuxMemAreaTypeToString(psCurrentRecord-> -+ psLinuxMemArea-> -+ eAreaType), -+ psCurrentRecord->psLinuxMemArea-> -+ ui32ByteSize)); -+ -+ LinuxMemAreaDeepFree(psLinuxMemArea); -+ -+ psCurrentRecord = psNextRecord; -+ } -+ RemoveProcEntry("mem_areas"); -+ } -+#endif -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ { -+ DEBUG_MEM_ALLOC_REC *psCurrentRecord = -+ g_MemoryRecords, *psNextRecord; -+ -+ while (psCurrentRecord) { -+ psNextRecord = psCurrentRecord->psNext; -+ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: " -+ "type=%s " -+ "CpuVAddr=%p " -+ "CpuPAddr=0x%08lx, " -+ "allocated @ file=%s,line=%d", -+ __FUNCTION__, -+ DebugMemAllocRecordTypeToString -+ (psCurrentRecord->eAllocType), -+ psCurrentRecord->pvCpuVAddr, -+ psCurrentRecord->ulCpuPAddr, -+ psCurrentRecord->pszFileName, -+ psCurrentRecord->ui32Line)); -+ switch (psCurrentRecord->eAllocType) { -+ case DEBUG_MEM_ALLOC_TYPE_KMALLOC: -+ KFreeWrapper(psCurrentRecord->pvCpuVAddr); -+ break; -+ case DEBUG_MEM_ALLOC_TYPE_IOREMAP: -+ IOUnmapWrapper(psCurrentRecord->pvCpuVAddr); -+ break; -+ case DEBUG_MEM_ALLOC_TYPE_IO: -+ -+ DebugMemAllocRecordRemove -+ (DEBUG_MEM_ALLOC_TYPE_IO, -+ psCurrentRecord->pvKey, __FILE__, -+ __LINE__); -+ break; -+ case DEBUG_MEM_ALLOC_TYPE_VMALLOC: -+ VFreeWrapper(psCurrentRecord->pvCpuVAddr); -+ break; -+ case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES: -+ -+ DebugMemAllocRecordRemove -+ (DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, -+ psCurrentRecord->pvKey, __FILE__, -+ __LINE__); -+ break; -+ case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE: -+ KMemCacheFreeWrapper(psCurrentRecord-> -+ pvPrivateData, -+ psCurrentRecord-> -+ pvCpuVAddr); -+ break; -+ case DEBUG_MEM_ALLOC_TYPE_KMAP: -+ KUnMapWrapper(psCurrentRecord->pvKey); -+ break; -+ default: -+ PVR_ASSERT(0); -+ } -+ psCurrentRecord = psNextRecord; -+ } -+ RemoveProcEntry("meminfo"); -+ } -+#endif -+ -+ if (psLinuxMemAreaCache) { -+ KMemCacheDestroyWrapper(psLinuxMemAreaCache); -+ psLinuxMemAreaCache = NULL; -+ } -+} -+ -+IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line) -+{ -+ IMG_VOID *pvRet; -+ pvRet = kmalloc(ui32ByteSize, GFP_KERNEL); -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ if (pvRet) { -+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC, -+ pvRet, -+ pvRet, -+ 0, -+ NULL, -+ ui32ByteSize, pszFileName, ui32Line); -+ } -+#endif -+ return pvRet; -+} -+ -+IMG_VOID -+_KFreeWrapper(IMG_VOID * pvCpuVAddr, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line) -+{ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr, -+ pszFileName, ui32Line); -+#endif -+ kfree(pvCpuVAddr); -+} -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+static IMG_VOID -+DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType, -+ IMG_VOID * pvKey, -+ IMG_VOID * pvCpuVAddr, -+ unsigned long ulCpuPAddr, -+ IMG_VOID * pvPrivateData, -+ IMG_UINT32 ui32Bytes, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line) -+{ -+ DEBUG_MEM_ALLOC_REC *psRecord; -+ -+ psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL); -+ -+ psRecord->eAllocType = eAllocType; -+ psRecord->pvKey = pvKey; -+ psRecord->pvCpuVAddr = pvCpuVAddr; -+ psRecord->ulCpuPAddr = ulCpuPAddr; -+ psRecord->pvPrivateData = pvPrivateData; -+ psRecord->pid = current->pid; -+ psRecord->ui32Bytes = ui32Bytes; -+ psRecord->pszFileName = pszFileName; -+ psRecord->ui32Line = ui32Line; -+ -+ psRecord->psNext = g_MemoryRecords; -+ g_MemoryRecords = psRecord; -+ -+ g_WaterMarkData[eAllocType] += ui32Bytes; -+ if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType]) { -+ g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType]; -+ } -+ -+ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) { -+ g_SysRAMWaterMark += ui32Bytes; -+ if (g_SysRAMWaterMark > g_SysRAMHighWaterMark) { -+ g_SysRAMHighWaterMark = g_SysRAMWaterMark; -+ } -+ } else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) { -+ g_IOMemWaterMark += ui32Bytes; -+ if (g_IOMemWaterMark > g_IOMemHighWaterMark) { -+ g_IOMemHighWaterMark = g_IOMemWaterMark; -+ } -+ } -+} -+ -+static IMG_VOID -+DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID * pvKey, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line) -+{ -+ DEBUG_MEM_ALLOC_REC **ppsCurrentRecord; -+ -+ for (ppsCurrentRecord = &g_MemoryRecords; -+ *ppsCurrentRecord; -+ ppsCurrentRecord = &((*ppsCurrentRecord)->psNext)) { -+ if ((*ppsCurrentRecord)->eAllocType == eAllocType -+ && (*ppsCurrentRecord)->pvKey == pvKey) { -+ DEBUG_MEM_ALLOC_REC *psNextRecord; -+ DEBUG_MEM_ALLOC_TYPE eAllocType; -+ -+ psNextRecord = (*ppsCurrentRecord)->psNext; -+ eAllocType = (*ppsCurrentRecord)->eAllocType; -+ g_WaterMarkData[eAllocType] -= -+ (*ppsCurrentRecord)->ui32Bytes; -+ -+ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) { -+ g_SysRAMWaterMark -= -+ (*ppsCurrentRecord)->ui32Bytes; -+ } else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP -+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) { -+ g_IOMemWaterMark -= -+ (*ppsCurrentRecord)->ui32Bytes; -+ } -+ -+ kfree(*ppsCurrentRecord); -+ *ppsCurrentRecord = psNextRecord; -+ return; -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n", -+ __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), -+ pvKey, pszFileName, ui32Line)); -+} -+ -+static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE -+ eAllocType) -+{ -+ char *apszDebugMemoryRecordTypes[] = { -+ "KMALLOC", -+ "VMALLOC", -+ "ALLOC_PAGES", -+ "IOREMAP", -+ "IO", -+ "KMEM_CACHE_ALLOC", -+ "KMAP" -+ }; -+ return apszDebugMemoryRecordTypes[eAllocType]; -+} -+#endif -+ -+IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AllocFlags, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line) -+{ -+ pgprot_t PGProtFlags; -+ IMG_VOID *pvRet; -+ -+ switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK) { -+ case PVRSRV_HAP_CACHED: -+ PGProtFlags = PAGE_KERNEL; -+ break; -+ case PVRSRV_HAP_WRITECOMBINE: -+ PGProtFlags = pgprot_writecombine(PAGE_KERNEL); -+ break; -+ case PVRSRV_HAP_UNCACHED: -+ PGProtFlags = pgprot_noncached(PAGE_KERNEL); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "VMAllocWrapper: unknown mapping flags=0x%08lx", -+ ui32AllocFlags)); -+ dump_stack(); -+ return NULL; -+ } -+ -+ pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ if (pvRet) { -+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC, -+ pvRet, -+ pvRet, -+ 0, -+ NULL, -+ PAGE_ALIGN(ui32Bytes), -+ pszFileName, ui32Line); -+ } -+#endif -+ -+ return pvRet; -+} -+ -+IMG_VOID -+_VFreeWrapper(IMG_VOID * pvCpuVAddr, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line) -+{ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr, -+ pszFileName, ui32Line); -+#endif -+ vfree(pvCpuVAddr); -+} -+ -+LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AreaFlags) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ IMG_VOID *pvCpuVAddr; -+ -+ psLinuxMemArea = LinuxMemAreaStructAlloc(); -+ if (!psLinuxMemArea) { -+ goto failed; -+ } -+ -+ pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags); -+ if (!pvCpuVAddr) { -+ goto failed; -+ } -+ -+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC; -+ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr; -+ psLinuxMemArea->ui32ByteSize = ui32Bytes; -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); -+#endif -+ -+ return psLinuxMemArea; -+ -+failed: -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__)); -+ if (psLinuxMemArea) -+ LinuxMemAreaStructFree(psLinuxMemArea); -+ return NULL; -+} -+ -+IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea * psLinuxMemArea) -+{ -+ PVR_ASSERT(psLinuxMemArea); -+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC); -+ PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea); -+#endif -+ -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: pvCpuVAddr: %p", -+ __FUNCTION__, -+ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress)); -+ VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); -+ -+ LinuxMemAreaStructFree(psLinuxMemArea); -+} -+ -+ -+IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line) -+{ -+ IMG_VOID *pvIORemapCookie = IMG_NULL; -+ -+ switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK) { -+ case PVRSRV_HAP_CACHED: -+#if defined(__arm__) -+ pvIORemapCookie = -+ (IMG_VOID *) ioremap_cached(BasePAddr.uiAddr, ui32Bytes); -+#else -+ pvIORemapCookie = -+ (IMG_VOID *) ioremap(BasePAddr.uiAddr, ui32Bytes); -+#endif -+ break; -+ case PVRSRV_HAP_WRITECOMBINE: -+#if defined(__arm__) -+ pvIORemapCookie = -+ (IMG_VOID *) ioremap_nocache(BasePAddr.uiAddr, ui32Bytes); -+#else -+ pvIORemapCookie = -+ (IMG_VOID *) ioremap_nocache(BasePAddr.uiAddr, ui32Bytes); -+#endif -+ break; -+ case PVRSRV_HAP_UNCACHED: -+ pvIORemapCookie = -+ (IMG_VOID *) ioremap_nocache(BasePAddr.uiAddr, ui32Bytes); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "IORemapWrapper: unknown mapping flags")); -+ return NULL; -+ } -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ if (pvIORemapCookie) { -+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP, -+ pvIORemapCookie, -+ pvIORemapCookie, -+ BasePAddr.uiAddr, -+ NULL, ui32Bytes, pszFileName, ui32Line); -+ } -+#endif -+ -+ return pvIORemapCookie; -+} -+ -+IMG_VOID -+_IOUnmapWrapper(IMG_VOID * pvIORemapCookie, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line) -+{ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie, -+ pszFileName, ui32Line); -+#endif -+ iounmap(pvIORemapCookie); -+} -+ -+LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AreaFlags) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ IMG_VOID *pvIORemapCookie; -+ -+ psLinuxMemArea = LinuxMemAreaStructAlloc(); -+ if (!psLinuxMemArea) { -+ return NULL; -+ } -+ -+ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags); -+ if (!pvIORemapCookie) { -+ LinuxMemAreaStructFree(psLinuxMemArea); -+ return NULL; -+ } -+ -+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP; -+ psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie; -+ psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr; -+ psLinuxMemArea->ui32ByteSize = ui32Bytes; -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); -+#endif -+ -+ return psLinuxMemArea; -+} -+ -+IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea * psLinuxMemArea) -+{ -+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP); -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea); -+#endif -+ -+ IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie); -+ -+ LinuxMemAreaStructFree(psLinuxMemArea); -+} -+ -+LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR * pBasePAddr, -+ IMG_VOID * pvCPUVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_BOOL bPhysContig, -+ IMG_UINT32 ui32AreaFlags) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ -+ psLinuxMemArea = LinuxMemAreaStructAlloc(); -+ if (!psLinuxMemArea) { -+ return NULL; -+ } -+ -+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV; -+ psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr; -+ psLinuxMemArea->uData.sExternalKV.bPhysContig = bPhysContig; -+ if (bPhysContig) { -+ psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = -+ *pBasePAddr; -+ } else { -+ psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = -+ pBasePAddr; -+ } -+ psLinuxMemArea->ui32ByteSize = ui32Bytes; -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); -+#endif -+ -+ return psLinuxMemArea; -+} -+ -+IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea * psLinuxMemArea) -+{ -+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV); -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea); -+#endif -+ -+ LinuxMemAreaStructFree(psLinuxMemArea); -+} -+ -+LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags) -+{ -+ LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc(); -+ if (!psLinuxMemArea) { -+ return NULL; -+ } -+ -+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO; -+ psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr; -+ psLinuxMemArea->ui32ByteSize = ui32Bytes; -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO, -+ (IMG_VOID *) BasePAddr.uiAddr, -+ 0, -+ BasePAddr.uiAddr, NULL, ui32Bytes, "unknown", 0); -+#endif -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); -+#endif -+ -+ return psLinuxMemArea; -+} -+ -+IMG_VOID FreeIOLinuxMemArea(LinuxMemArea * psLinuxMemArea) -+{ -+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO); -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea); -+#endif -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, -+ (IMG_VOID *) psLinuxMemArea->uData.sIO. -+ CPUPhysAddr.uiAddr, __FILE__, __LINE__); -+#endif -+ -+ LinuxMemAreaStructFree(psLinuxMemArea); -+} -+ -+LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AreaFlags) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ IMG_UINT32 ui32PageCount; -+ struct page **pvPageList; -+ IMG_UINT32 i; -+ -+ psLinuxMemArea = LinuxMemAreaStructAlloc(); -+ if (!psLinuxMemArea) { -+ goto failed_area_alloc; -+ } -+ -+ ui32PageCount = RANGE_TO_PAGES(ui32Bytes); -+ pvPageList = -+ VMallocWrapper(sizeof(void *) * ui32PageCount, PVRSRV_HAP_CACHED); -+ if (!pvPageList) { -+ goto failed_vmalloc; -+ } -+ -+ for (i = 0; i < ui32PageCount; i++) { -+ pvPageList[i] = alloc_pages(GFP_KERNEL, 0); -+ if (!pvPageList[i]) { -+ goto failed_alloc_pages; -+ } -+ -+ } -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, -+ pvPageList, -+ 0, 0, NULL, PAGE_ALIGN(ui32Bytes), "unknown", 0); -+#endif -+ -+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES; -+ psLinuxMemArea->uData.sPageList.pvPageList = pvPageList; -+ psLinuxMemArea->ui32ByteSize = ui32Bytes; -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); -+#endif -+ -+ return psLinuxMemArea; -+ -+failed_alloc_pages: -+ for (i--; i >= 0; i--) { -+ __free_pages(pvPageList[i], 0); -+ } -+ VFreeWrapper(pvPageList); -+failed_vmalloc: -+ LinuxMemAreaStructFree(psLinuxMemArea); -+failed_area_alloc: -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__)); -+ -+ return NULL; -+} -+ -+IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea * psLinuxMemArea) -+{ -+ IMG_UINT32 ui32PageCount; -+ struct page **pvPageList; -+ IMG_UINT32 i; -+ -+ PVR_ASSERT(psLinuxMemArea); -+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES); -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea); -+#endif -+ -+ ui32PageCount = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize); -+ pvPageList = psLinuxMemArea->uData.sPageList.pvPageList; -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList, -+ __FILE__, __LINE__); -+#endif -+ -+ for (i = 0; i < ui32PageCount; i++) { -+ __free_pages(pvPageList[i], 0); -+ } -+ VFreeWrapper(psLinuxMemArea->uData.sPageList.pvPageList); -+ -+ LinuxMemAreaStructFree(psLinuxMemArea); -+} -+ -+struct page *LinuxMemAreaOffsetToPage(LinuxMemArea * psLinuxMemArea, -+ IMG_UINT32 ui32ByteOffset) -+{ -+ IMG_UINT32 ui32PageIndex; -+ IMG_CHAR *pui8Addr; -+ -+ switch (psLinuxMemArea->eAreaType) { -+ case LINUX_MEM_AREA_ALLOC_PAGES: -+ ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset); -+ return psLinuxMemArea->uData.sPageList. -+ pvPageList[ui32PageIndex]; -+ break; -+ case LINUX_MEM_AREA_VMALLOC: -+ pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; -+ pui8Addr += ui32ByteOffset; -+ return vmalloc_to_page(pui8Addr); -+ break; -+ case LINUX_MEM_AREA_SUB_ALLOC: -+ return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc. -+ psParentLinuxMemArea, -+ psLinuxMemArea->uData.sSubAlloc. -+ ui32ByteOffset + -+ ui32ByteOffset); -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unsupported request for struct page from LinuxMemArea with type=%s", -+ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType))); -+ return NULL; -+ } -+} -+ -+IMG_VOID *_KMapWrapper(struct page * psPage, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line) -+{ -+ IMG_VOID *pvRet; -+ -+ -+ flush_cache_all(); -+ -+ pvRet = kmap(psPage); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ if (pvRet) { -+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMAP, -+ psPage, -+ pvRet, 0, NULL, PAGE_SIZE, "unknown", 0); -+ } -+#endif -+ -+ return pvRet; -+} -+ -+IMG_VOID -+_KUnMapWrapper(struct page * psPage, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line) -+{ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMAP, psPage, -+ pszFileName, ui32Line); -+#endif -+ -+ kunmap(psPage); -+} -+ -+LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR * pszName, -+ size_t Size, -+ size_t Align, IMG_UINT32 ui32Flags) -+{ -+ return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL -+ ); -+} -+ -+IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache * psCache) -+{ -+ kmem_cache_destroy(psCache); -+} -+ -+IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache * psCache, -+ gfp_t Flags, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line) -+{ -+ IMG_VOID *pvRet; -+ -+ pvRet = kmem_cache_alloc(psCache, Flags); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, -+ pvRet, -+ pvRet, -+ 0, -+ psCache, -+ kmem_cache_size(psCache), pszFileName, ui32Line); -+#endif -+ -+ return pvRet; -+} -+ -+IMG_VOID -+_KMemCacheFreeWrapper(LinuxKMemCache * psCache, IMG_VOID * pvObject, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line) -+{ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject, -+ pszFileName, ui32Line); -+#endif -+ -+ kmem_cache_free(psCache, pvObject); -+} -+ -+const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache * psCache) -+{ -+ -+ return ""; -+} -+ -+LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea * psParentLinuxMemArea, -+ IMG_UINT32 ui32ByteOffset, -+ IMG_UINT32 ui32Bytes) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ -+ PVR_ASSERT((ui32ByteOffset + ui32Bytes) <= -+ psParentLinuxMemArea->ui32ByteSize); -+ -+ psLinuxMemArea = LinuxMemAreaStructAlloc(); -+ if (!psLinuxMemArea) { -+ return NULL; -+ } -+ -+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC; -+ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = -+ psParentLinuxMemArea; -+ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset; -+ psLinuxMemArea->ui32ByteSize = ui32Bytes; -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ { -+ DEBUG_LINUX_MEM_AREA_REC *psParentRecord; -+ psParentRecord = -+ DebugLinuxMemAreaRecordFind(psParentLinuxMemArea); -+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, -+ psParentRecord->ui32Flags); -+ } -+#endif -+ -+ return psLinuxMemArea; -+} -+ -+IMG_VOID FreeSubLinuxMemArea(LinuxMemArea * psLinuxMemArea) -+{ -+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC); -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea); -+#endif -+ -+ LinuxMemAreaStructFree(psLinuxMemArea); -+} -+ -+static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID) -+{ -+ return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL); -+} -+ -+static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea * psLinuxMemArea) -+{ -+ KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea); -+ -+} -+ -+IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea * psLinuxMemArea) -+{ -+ switch (psLinuxMemArea->eAreaType) { -+ case LINUX_MEM_AREA_VMALLOC: -+ FreeVMallocLinuxMemArea(psLinuxMemArea); -+ break; -+ case LINUX_MEM_AREA_ALLOC_PAGES: -+ FreeAllocPagesLinuxMemArea(psLinuxMemArea); -+ break; -+ case LINUX_MEM_AREA_IOREMAP: -+ FreeIORemapLinuxMemArea(psLinuxMemArea); -+ break; -+ case LINUX_MEM_AREA_EXTERNAL_KV: -+ FreeExternalKVLinuxMemArea(psLinuxMemArea); -+ break; -+ case LINUX_MEM_AREA_IO: -+ FreeIOLinuxMemArea(psLinuxMemArea); -+ break; -+ case LINUX_MEM_AREA_SUB_ALLOC: -+ FreeSubLinuxMemArea(psLinuxMemArea); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n", -+ __FUNCTION__, psLinuxMemArea->eAreaType)); -+ } -+} -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+static IMG_VOID -+DebugLinuxMemAreaRecordAdd(LinuxMemArea * psLinuxMemArea, IMG_UINT32 ui32Flags) -+{ -+ DEBUG_LINUX_MEM_AREA_REC *psNewRecord; -+ const char *pi8FlagsString; -+ -+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) { -+ g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize; -+ if (g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark) { -+ g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark; -+ } -+ } -+ g_LinuxMemAreaCount++; -+ -+ psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL); -+ if (psNewRecord) { -+ -+ psNewRecord->psLinuxMemArea = psLinuxMemArea; -+ psNewRecord->ui32Flags = ui32Flags; -+ psNewRecord->pid = current->pid; -+ psNewRecord->psNext = g_LinuxMemAreaRecords; -+ g_LinuxMemAreaRecords = psNewRecord; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to allocate linux memory area record.", -+ __FUNCTION__)); -+ } -+ -+ pi8FlagsString = HAPFlagsToString(ui32Flags); -+ if (strstr(pi8FlagsString, "UNKNOWN")) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unexpected flags (0x%08lx) associated with psLinuxMemArea @ 0x%08lx", -+ __FUNCTION__, ui32Flags, psLinuxMemArea)); -+ -+ } -+} -+ -+static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea * -+ psLinuxMemArea) -+{ -+ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord; -+ -+ for (psCurrentRecord = g_LinuxMemAreaRecords; -+ psCurrentRecord; psCurrentRecord = psCurrentRecord->psNext) { -+ if (psCurrentRecord->psLinuxMemArea == psLinuxMemArea) { -+ return psCurrentRecord; -+ } -+ } -+ return NULL; -+} -+ -+static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea * psLinuxMemArea) -+{ -+ DEBUG_LINUX_MEM_AREA_REC **ppsCurrentRecord; -+ -+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) { -+ g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize; -+ } -+ g_LinuxMemAreaCount--; -+ -+ for (ppsCurrentRecord = &g_LinuxMemAreaRecords; -+ *ppsCurrentRecord; -+ ppsCurrentRecord = &((*ppsCurrentRecord)->psNext)) { -+ if ((*ppsCurrentRecord)->psLinuxMemArea == psLinuxMemArea) { -+ DEBUG_LINUX_MEM_AREA_REC *psNextRecord; -+ -+ psNextRecord = (*ppsCurrentRecord)->psNext; -+ kfree(*ppsCurrentRecord); -+ *ppsCurrentRecord = psNextRecord; -+ return; -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: couldn't find an entry for psLinuxMemArea=%p\n", -+ __FUNCTION__, psLinuxMemArea)); -+} -+#endif -+ -+IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea * psLinuxMemArea) -+{ -+ switch (psLinuxMemArea->eAreaType) { -+ case LINUX_MEM_AREA_VMALLOC: -+ return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; -+ case LINUX_MEM_AREA_IOREMAP: -+ return psLinuxMemArea->uData.sIORemap.pvIORemapCookie; -+ case LINUX_MEM_AREA_EXTERNAL_KV: -+ return psLinuxMemArea->uData.sExternalKV.pvExternalKV; -+ case LINUX_MEM_AREA_SUB_ALLOC: -+ { -+ IMG_CHAR *pAddr = -+ LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData. -+ sSubAlloc. -+ psParentLinuxMemArea); -+ if (!pAddr) { -+ return NULL; -+ } -+ return pAddr + -+ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset; -+ } -+ default: -+ return NULL; -+ } -+} -+ -+IMG_CPU_PHYADDR -+LinuxMemAreaToCpuPAddr(LinuxMemArea * psLinuxMemArea, IMG_UINT32 ui32ByteOffset) -+{ -+ IMG_CPU_PHYADDR CpuPAddr; -+ -+ CpuPAddr.uiAddr = 0; -+ -+ switch (psLinuxMemArea->eAreaType) { -+ case LINUX_MEM_AREA_IOREMAP: -+ { -+ CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr; -+ CpuPAddr.uiAddr += ui32ByteOffset; -+ break; -+ } -+ case LINUX_MEM_AREA_EXTERNAL_KV: -+ { -+ if (psLinuxMemArea->uData.sExternalKV.bPhysContig) { -+ CpuPAddr = -+ SysSysPAddrToCpuPAddr(psLinuxMemArea->uData. -+ sExternalKV.uPhysAddr. -+ SysPhysAddr); -+ CpuPAddr.uiAddr += ui32ByteOffset; -+ } else { -+ IMG_UINT32 ui32PageIndex = -+ PHYS_TO_PFN(ui32ByteOffset); -+ IMG_SYS_PHYADDR SysPAddr = -+ psLinuxMemArea->uData.sExternalKV.uPhysAddr. -+ pSysPhysAddr[ui32PageIndex]; -+ -+ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr); -+ CpuPAddr.uiAddr += -+ ADDR_TO_PAGE_OFFSET(ui32ByteOffset); -+ } -+ break; -+ } -+ case LINUX_MEM_AREA_IO: -+ { -+ CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr; -+ CpuPAddr.uiAddr += ui32ByteOffset; -+ break; -+ } -+ case LINUX_MEM_AREA_VMALLOC: -+ { -+ IMG_CHAR *pCpuVAddr; -+ pCpuVAddr = -+ (IMG_CHAR *) psLinuxMemArea->uData.sVmalloc. -+ pvVmallocAddress; -+ pCpuVAddr += ui32ByteOffset; -+ CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr); -+ break; -+ } -+ case LINUX_MEM_AREA_ALLOC_PAGES: -+ { -+ struct page *page; -+ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset); -+ page = -+ psLinuxMemArea->uData.sPageList. -+ pvPageList[ui32PageIndex]; -+ CpuPAddr.uiAddr = page_to_phys(page); -+ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset); -+ break; -+ } -+ case LINUX_MEM_AREA_SUB_ALLOC: -+ { -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psLinuxMemArea->uData. -+ sSubAlloc. -+ psParentLinuxMemArea, -+ psLinuxMemArea->uData. -+ sSubAlloc.ui32ByteOffset + -+ ui32ByteOffset); -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n", -+ __FUNCTION__, psLinuxMemArea->eAreaType)); -+ } -+ -+ PVR_ASSERT(CpuPAddr.uiAddr); -+ return CpuPAddr; -+} -+ -+IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea * psLinuxMemArea) -+{ -+ switch (psLinuxMemArea->eAreaType) { -+ case LINUX_MEM_AREA_IOREMAP: -+ case LINUX_MEM_AREA_IO: -+ return IMG_TRUE; -+ -+ case LINUX_MEM_AREA_EXTERNAL_KV: -+ return psLinuxMemArea->uData.sExternalKV.bPhysContig; -+ -+ case LINUX_MEM_AREA_VMALLOC: -+ case LINUX_MEM_AREA_ALLOC_PAGES: -+ return IMG_FALSE; -+ -+ case LINUX_MEM_AREA_SUB_ALLOC: -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s is meaningless for LinuxMemArea type (%d)", -+ __FUNCTION__, psLinuxMemArea->eAreaType)); -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n", -+ __FUNCTION__, psLinuxMemArea->eAreaType)); -+ break; -+ } -+ return IMG_FALSE; -+} -+ -+LINUX_MEM_AREA_TYPE LinuxMemAreaRootType(LinuxMemArea * psLinuxMemArea) -+{ -+ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) { -+ return LinuxMemAreaRootType(psLinuxMemArea->uData.sSubAlloc. -+ psParentLinuxMemArea); -+ } else { -+ return psLinuxMemArea->eAreaType; -+ } -+} -+ -+const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType) -+{ -+ -+ switch (eMemAreaType) { -+ case LINUX_MEM_AREA_IOREMAP: -+ return "LINUX_MEM_AREA_IOREMAP"; -+ case LINUX_MEM_AREA_EXTERNAL_KV: -+ return "LINUX_MEM_AREA_EXTERNAL_KV"; -+ case LINUX_MEM_AREA_IO: -+ return "LINUX_MEM_AREA_IO"; -+ case LINUX_MEM_AREA_VMALLOC: -+ return "LINUX_MEM_AREA_VMALLOC"; -+ case LINUX_MEM_AREA_SUB_ALLOC: -+ return "LINUX_MEM_AREA_SUB_ALLOC"; -+ case LINUX_MEM_AREA_ALLOC_PAGES: -+ return "LINUX_MEM_AREA_ALLOC_PAGES"; -+ default: -+ PVR_ASSERT(0); -+ } -+ -+ return ""; -+} -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) -+static off_t printLinuxMemAreaRecords(char *buffer, size_t count, off_t off) -+{ -+ DEBUG_LINUX_MEM_AREA_REC *psRecord; -+ off_t Ret; -+ -+ LinuxLockMutex(&gPVRSRVLock); -+ -+ if (!off) { -+ if (count < 500) { -+ Ret = 0; -+ goto unlock_and_return; -+ } -+ Ret = printAppend(buffer, count, 0, -+ "Number of Linux Memory Areas: %lu\n" -+ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n" -+ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n" -+ "\nDetails for all Linux Memory Areas:\n" -+ "%s %-24s %s %s %-8s %-5s %s\n", -+ g_LinuxMemAreaCount, -+ g_LinuxMemAreaWaterMark, -+ g_LinuxMemAreaHighWaterMark, -+ "psLinuxMemArea", -+ "LinuxMemType", -+ "CpuVAddr", -+ "CpuPAddr", "Bytes", "Pid", "Flags"); -+ goto unlock_and_return; -+ } -+ -+ for (psRecord = g_LinuxMemAreaRecords; --off && psRecord; -+ psRecord = psRecord->psNext) ; -+ if (!psRecord) { -+ Ret = END_OF_FILE; -+ goto unlock_and_return; -+ } -+ -+ if (count < 500) { -+ Ret = 0; -+ goto unlock_and_return; -+ } -+ -+ Ret = printAppend(buffer, count, 0, -+ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n", -+ psRecord->psLinuxMemArea, -+ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea-> -+ eAreaType), -+ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea), -+ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea, -+ 0).uiAddr, -+ psRecord->psLinuxMemArea->ui32ByteSize, psRecord->pid, -+ psRecord->ui32Flags, -+ HAPFlagsToString(psRecord->ui32Flags) -+ ); -+ -+unlock_and_return: -+ -+ LinuxUnLockMutex(&gPVRSRVLock); -+ return Ret; -+} -+#endif -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+static off_t printMemoryRecords(char *buffer, size_t count, off_t off) -+{ -+ DEBUG_MEM_ALLOC_REC *psRecord; -+ off_t Ret; -+ -+ LinuxLockMutex(&gPVRSRVLock); -+ -+ if (!off) { -+ if (count < 1000) { -+ Ret = 0; -+ goto unlock_and_return; -+ } -+ -+ Ret = printAppend(buffer, count, 0, "%-60s: %ld bytes\n", -+ "Current Water Mark of bytes allocated via kmalloc", -+ g_WaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_KMALLOC]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Highest Water Mark of bytes allocated via kmalloc", -+ g_HighWaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_KMALLOC]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Current Water Mark of bytes allocated via vmalloc", -+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Highest Water Mark of bytes allocated via vmalloc", -+ g_HighWaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_VMALLOC]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Current Water Mark of bytes allocated via alloc_pages", -+ g_WaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Highest Water Mark of bytes allocated via alloc_pages", -+ g_HighWaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Current Water Mark of bytes allocated via ioremap", -+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Highest Water Mark of bytes allocated via ioremap", -+ g_HighWaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_IOREMAP]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Current Water Mark of bytes reserved for \"IO\" memory areas", -+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Highest Water Mark of bytes allocated for \"IO\" memory areas", -+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Current Water Mark of bytes allocated via kmem_cache_alloc", -+ g_WaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Highest Water Mark of bytes allocated via kmem_cache_alloc", -+ g_HighWaterMarkData -+ [DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Current Water Mark of bytes mapped via kmap", -+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]); -+ Ret = -+ printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "Highest Water Mark of bytes mapped via kmap", -+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]); -+ -+ Ret = printAppend(buffer, count, Ret, "\n"); -+ -+ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "The Current Water Mark for memory allocated from system RAM", -+ g_SysRAMWaterMark); -+ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "The Highest Water Mark for memory allocated from system RAM", -+ g_SysRAMHighWaterMark); -+ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "The Current Water Mark for memory allocated from IO memory", -+ g_IOMemWaterMark); -+ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n", -+ "The Highest Water Mark for memory allocated from IO memory", -+ g_IOMemHighWaterMark); -+ -+ Ret = printAppend(buffer, count, Ret, "\n"); -+ -+ Ret = -+ printAppend(buffer, count, Ret, -+ "Details for all known allocations:\n" -+ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n", "Type", -+ "CpuVAddr", "CpuPAddr", "Bytes", "PID", -+ "PrivateData", "Filename:Line"); -+ -+ -+ goto unlock_and_return; -+ } -+ -+ if (count < 1000) { -+ Ret = 0; -+ goto unlock_and_return; -+ } -+ -+ for (psRecord = g_MemoryRecords; --off && psRecord; -+ psRecord = psRecord->psNext) ; -+ if (!psRecord) { -+ Ret = END_OF_FILE; -+ goto unlock_and_return; -+ } -+ -+ if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) { -+ Ret = printAppend(buffer, count, 0, -+ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n", -+ DebugMemAllocRecordTypeToString(psRecord-> -+ eAllocType), -+ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr, -+ psRecord->ui32Bytes, psRecord->pid, "NULL", -+ psRecord->pszFileName, psRecord->ui32Line); -+ } else { -+ Ret = printAppend(buffer, count, 0, -+ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n", -+ DebugMemAllocRecordTypeToString(psRecord-> -+ eAllocType), -+ psRecord->pvCpuVAddr, psRecord->ulCpuPAddr, -+ psRecord->ui32Bytes, psRecord->pid, -+ KMemCacheNameWrapper(psRecord->pvPrivateData), -+ psRecord->pszFileName, psRecord->ui32Line); -+ } -+ -+unlock_and_return: -+ -+ LinuxUnLockMutex(&gPVRSRVLock); -+ return Ret; -+} -+#endif -+ -+#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS) -+const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags) -+{ -+ static IMG_CHAR szFlags[50]; -+ IMG_UINT32 ui32Pos = 0; -+ IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex; -+ IMG_CHAR *apszCacheTypes[] = { -+ "UNCACHED", -+ "CACHED", -+ "WRITECOMBINE", -+ "UNKNOWN" -+ }; -+ IMG_CHAR *apszMapType[] = { -+ "KERNEL_ONLY", -+ "SINGLE_PROCESS", -+ "MULTI_PROCESS", -+ "FROM_EXISTING_PROCESS", -+ "NO_CPU_VIRTUAL", -+ "UNKNOWN" -+ }; -+ -+ if (ui32Flags & PVRSRV_HAP_UNCACHED) { -+ ui32CacheTypeIndex = 0; -+ } else if (ui32Flags & PVRSRV_HAP_CACHED) { -+ ui32CacheTypeIndex = 1; -+ } else if (ui32Flags & PVRSRV_HAP_WRITECOMBINE) { -+ ui32CacheTypeIndex = 2; -+ } else { -+ ui32CacheTypeIndex = 3; -+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%d)", -+ __FUNCTION__, -+ (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK))); -+ } -+ -+ if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) { -+ ui32MapTypeIndex = 0; -+ } else if (ui32Flags & PVRSRV_HAP_SINGLE_PROCESS) { -+ ui32MapTypeIndex = 1; -+ } else if (ui32Flags & PVRSRV_HAP_MULTI_PROCESS) { -+ ui32MapTypeIndex = 2; -+ } else if (ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS) { -+ ui32MapTypeIndex = 3; -+ } else if (ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL) { -+ ui32MapTypeIndex = 4; -+ } else { -+ ui32MapTypeIndex = 5; -+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%d)", -+ __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK))); -+ } -+ -+ ui32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]); -+ sprintf(szFlags + ui32Pos, "%s", apszMapType[ui32MapTypeIndex]); -+ -+ return szFlags; -+} -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mm.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mm.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mm.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,271 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __IMG_LINUX_MM_H__ -+#define __IMG_LINUX_MM_H__ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+ -+#include -+ -+#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT) -+#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT) -+ -+#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT) -+ -+#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1)) -+ -+#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot) -+ -+#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot) -+ -+static inline IMG_UINT32 VMallocToPhys(IMG_VOID * pCpuVAddr) -+{ -+ return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + -+ ADDR_TO_PAGE_OFFSET(pCpuVAddr)); -+ -+} -+ -+typedef enum { -+ LINUX_MEM_AREA_IOREMAP, -+ LINUX_MEM_AREA_EXTERNAL_KV, -+ LINUX_MEM_AREA_IO, -+ LINUX_MEM_AREA_VMALLOC, -+ LINUX_MEM_AREA_ALLOC_PAGES, -+ LINUX_MEM_AREA_SUB_ALLOC, -+ LINUX_MEM_AREA_TYPE_COUNT -+} LINUX_MEM_AREA_TYPE; -+ -+typedef struct _LinuxMemArea LinuxMemArea; -+ -+struct _LinuxMemArea { -+ LINUX_MEM_AREA_TYPE eAreaType; -+ union _uData { -+ struct _sIORemap { -+ -+ IMG_CPU_PHYADDR CPUPhysAddr; -+ IMG_VOID *pvIORemapCookie; -+ } sIORemap; -+ struct _sExternalKV { -+ -+ IMG_BOOL bPhysContig; -+ union { -+ -+ IMG_SYS_PHYADDR SysPhysAddr; -+ IMG_SYS_PHYADDR *pSysPhysAddr; -+ } uPhysAddr; -+ IMG_VOID *pvExternalKV; -+ } sExternalKV; -+ struct _sIO { -+ -+ IMG_CPU_PHYADDR CPUPhysAddr; -+ } sIO; -+ struct _sVmalloc { -+ -+ IMG_VOID *pvVmallocAddress; -+ } sVmalloc; -+ struct _sPageList { -+ -+ struct page **pvPageList; -+ } sPageList; -+ struct _sSubAlloc { -+ -+ LinuxMemArea *psParentLinuxMemArea; -+ IMG_UINT32 ui32ByteOffset; -+ } sSubAlloc; -+ } uData; -+ -+ IMG_UINT32 ui32ByteSize; -+}; -+ -+typedef struct kmem_cache LinuxKMemCache; -+ -+PVRSRV_ERROR LinuxMMInit(IMG_VOID); -+ -+IMG_VOID LinuxMMCleanup(IMG_VOID); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__) -+#else -+#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, NULL, 0) -+#endif -+IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR * szFileName, -+ IMG_UINT32 ui32Line); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__) -+#else -+#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0) -+#endif -+IMG_VOID _KFreeWrapper(IMG_VOID * pvCpuVAddr, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__) -+#else -+#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0) -+#endif -+IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__) -+#else -+#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0) -+#endif -+IMG_VOID _VFreeWrapper(IMG_VOID * pvCpuVAddr, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line); -+ -+LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AreaFlags); -+ -+IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea * psLinuxMemArea); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \ -+ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__) -+#else -+#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \ -+ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0) -+#endif -+IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line); -+ -+LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AreaFlags); -+ -+IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea * psLinuxMemArea); -+ -+LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR * pBasePAddr, -+ IMG_VOID * pvCPUVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_BOOL bPhysContig, -+ IMG_UINT32 ui32AreaFlags); -+ -+IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea * psLinuxMemArea); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define IOUnmapWrapper(pvIORemapCookie) \ -+ _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__) -+#else -+#define IOUnmapWrapper(pvIORemapCookie) \ -+ _IOUnmapWrapper(pvIORemapCookie, NULL, 0) -+#endif -+IMG_VOID _IOUnmapWrapper(IMG_VOID * pvIORemapCookie, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line); -+ -+struct page *LinuxMemAreaOffsetToPage(LinuxMemArea * psLinuxMemArea, -+ IMG_UINT32 ui32ByteOffset); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define KMapWrapper(psPage) _KMapWrapper(psPage, __FILE__, __LINE__) -+#else -+#define KMapWrapper(psPage) _KMapWrapper(psPage, NULL, 0) -+#endif -+IMG_VOID *_KMapWrapper(struct page *psPage, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define KUnMapWrapper(psPage) _KUnMapWrapper(psPage, __FILE__, __LINE__) -+#else -+#define KUnMapWrapper(psPage) _KUnMapWrapper(psPage, NULL, 0) -+#endif -+IMG_VOID _KUnMapWrapper(struct page *psPage, IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line); -+ -+LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR * pszName, size_t Size, -+ size_t Align, IMG_UINT32 ui32Flags); -+ -+IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache * psCache); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__) -+#else -+#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0) -+#endif -+ -+IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache * psCache, gfp_t Flags, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line); -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__) -+#else -+#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0) -+#endif -+IMG_VOID _KMemCacheFreeWrapper(LinuxKMemCache * psCache, IMG_VOID * pvObject, -+ IMG_CHAR * pszFileName, IMG_UINT32 ui32Line); -+ -+const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache * psCache); -+ -+LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AreaFlags); -+ -+IMG_VOID FreeIOLinuxMemArea(LinuxMemArea * psLinuxMemArea); -+ -+LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32AreaFlags); -+ -+IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea * psLinuxMemArea); -+ -+LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea * psParentLinuxMemArea, -+ IMG_UINT32 ui32ByteOffset, -+ IMG_UINT32 ui32Bytes); -+ -+IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea * psLinuxMemArea); -+ -+#if defined(LINUX_MEM_AREAS_DEBUG) -+IMG_VOID LinuxMemAreaRegister(LinuxMemArea * psLinuxMemArea); -+#else -+#define LinuxMemAreaRegister(X) -+#endif -+ -+IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea * psLinuxMemArea); -+ -+IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea * psLinuxMemArea, -+ IMG_UINT32 ui32ByteOffset); -+ -+#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr) -+ -+IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea * psLinuxMemArea); -+ -+LINUX_MEM_AREA_TYPE LinuxMemAreaRootType(LinuxMemArea * psLinuxMemArea); -+ -+const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType); -+ -+#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS) -+const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags); -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmu.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmu.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmu.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmu.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,1523 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "sgxdefs.h" -+#include "sgxmmu.h" -+#include "services_headers.h" -+#include "buffer_manager.h" -+#include "hash.h" -+#include "ra.h" -+#include "pdump_km.h" -+#include "sgxapi_km.h" -+#include "sgxinfo.h" -+#include "sgxinfokm.h" -+#include "mmu.h" -+ -+typedef struct _MMU_PT_INFO_ { -+ -+ IMG_VOID *hPTPageOSMemHandle; -+ IMG_CPU_VIRTADDR PTPageCpuVAddr; -+ IMG_UINT32 ui32ValidPTECount; -+} MMU_PT_INFO; -+ -+struct _MMU_CONTEXT_ { -+ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ IMG_CPU_VIRTADDR pvPDCpuVAddr; -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ -+ IMG_VOID *hPDOSMemHandle; -+ -+ MMU_PT_INFO *apsPTInfoList[1024]; -+ -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ -+ struct _MMU_CONTEXT_ *psNext; -+}; -+ -+struct _MMU_HEAP_ { -+ MMU_CONTEXT *psMMUContext; -+ -+ IMG_UINT32 ui32PTBaseIndex; -+ IMG_UINT32 ui32PTPageCount; -+ IMG_UINT32 ui32PTEntryCount; -+ -+ RA_ARENA *psVMArena; -+ -+ DEV_ARENA_DESCRIPTOR *psDevArena; -+}; -+ -+ -+#if defined(PDUMP) -+static IMG_VOID -+MMU_PDumpPageTables(MMU_HEAP * pMMUHeap, -+ IMG_DEV_VIRTADDR DevVAddr, -+ IMG_SIZE_T uSize, -+ IMG_BOOL bForUnmap, IMG_HANDLE hUniqueTag); -+#endif -+ -+#define PAGE_TEST 0 -+ -+ -+IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO * psDevInfo) -+{ -+ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE; -+} -+ -+IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO * psDevInfo) -+{ -+ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE; -+} -+ -+static IMG_BOOL _AllocPageTables(MMU_HEAP * pMMUHeap) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, "_AllocPageTables()")); -+ -+ PVR_ASSERT(pMMUHeap != IMG_NULL); -+ PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE); -+ -+ if (pMMUHeap == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTables: invalid parameter")); -+ return IMG_FALSE; -+ } -+ -+ pMMUHeap->ui32PTEntryCount = -+ pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT; -+ -+ pMMUHeap->ui32PTBaseIndex = -+ (pMMUHeap->psDevArena->BaseDevVAddr. -+ uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >> -+ SGX_MMU_PAGE_SHIFT; -+ -+ pMMUHeap->ui32PTPageCount = -+ (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - -+ 1) >> SGX_MMU_PT_SHIFT; -+ -+ return IMG_TRUE; -+} -+ -+static IMG_VOID -+_DeferredFreePageTable(MMU_HEAP * pMMUHeap, IMG_UINT32 ui32PTIndex) -+{ -+ IMG_UINT32 *pui32PDEntry; -+ IMG_UINT32 i; -+ IMG_UINT32 ui32PDIndex; -+ SYS_DATA *psSysData; -+ MMU_PT_INFO **ppsPTInfoList; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_DeferredFreePageTables: ERROR call to SysAcquireData failed")); -+ return; -+ } -+ -+ ui32PDIndex = -+ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + -+ SGX_MMU_PT_SHIFT); -+ -+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; -+ -+ { -+ -+ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL -+ || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == -+ 0); -+ } -+ -+ PDUMPCOMMENT("Free page table (page count == %08X)", -+ pMMUHeap->ui32PTPageCount); -+ if (ppsPTInfoList[ui32PTIndex] -+ && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr) { -+ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, -+ ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, -+ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG); -+ } -+ -+ switch (pMMUHeap->psDevArena->DevMemHeapType) { -+ case DEVICE_MEMORY_HEAP_SHARED: -+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: -+ { -+ -+ MMU_CONTEXT *psMMUContext = -+ (MMU_CONTEXT *) pMMUHeap->psMMUContext->psDevInfo-> -+ pvMMUContextList; -+ -+ while (psMMUContext) { -+ -+ pui32PDEntry = -+ (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr; -+ pui32PDEntry += ui32PDIndex; -+ -+ -+ pui32PDEntry[ui32PTIndex] = 0; -+ -+ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID *) & -+ pui32PDEntry[ui32PTIndex], -+ sizeof(IMG_UINT32), 0, IMG_FALSE, -+ PDUMP_PT_UNIQUETAG, -+ PDUMP_PT_UNIQUETAG); -+ -+ psMMUContext = psMMUContext->psNext; -+ } -+ break; -+ } -+ case DEVICE_MEMORY_HEAP_PERCONTEXT: -+ case DEVICE_MEMORY_HEAP_KERNEL: -+ { -+ -+ pui32PDEntry = -+ (IMG_UINT32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr; -+ pui32PDEntry += ui32PDIndex; -+ -+ -+ pui32PDEntry[ui32PTIndex] = 0; -+ -+ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID *) & pui32PDEntry[ui32PTIndex], -+ sizeof(IMG_UINT32), 0, IMG_FALSE, -+ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_DeferredFreePagetable: ERROR invalid heap type")); -+ return; -+ } -+ } -+ -+ if (ppsPTInfoList[ui32PTIndex] != IMG_NULL) { -+ if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL) { -+ IMG_PUINT32 pui32Tmp; -+ -+ pui32Tmp = -+ (IMG_UINT32 *) ppsPTInfoList[ui32PTIndex]-> -+ PTPageCpuVAddr; -+ -+ for (i = 0; -+ (i < pMMUHeap->ui32PTEntryCount) && (i < 1024); -+ i++) { -+ pui32Tmp[i] = 0; -+ } -+ -+ if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo-> -+ psLocalDevMemArena == IMG_NULL) { -+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, -+ SGX_MMU_PAGE_SIZE, -+ ppsPTInfoList[ui32PTIndex]-> -+ PTPageCpuVAddr, -+ ppsPTInfoList[ui32PTIndex]-> -+ hPTPageOSMemHandle); -+ } else { -+ IMG_SYS_PHYADDR sSysPAddr; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ -+ sCpuPAddr = -+ OSMapLinToCPUPhys(ppsPTInfoList -+ [ui32PTIndex]-> -+ PTPageCpuVAddr); -+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); -+ -+ OSUnMapPhysToLin(ppsPTInfoList[ui32PTIndex]-> -+ PTPageCpuVAddr, -+ SGX_MMU_PAGE_SIZE, -+ PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, -+ ppsPTInfoList[ui32PTIndex]-> -+ hPTPageOSMemHandle); -+ -+ RA_Free(pMMUHeap->psDevArena-> -+ psDeviceMemoryHeapInfo-> -+ psLocalDevMemArena, sSysPAddr.uiAddr, -+ IMG_FALSE); -+ } -+ -+ pMMUHeap->ui32PTEntryCount -= i; -+ } else { -+ -+ pMMUHeap->ui32PTEntryCount -= 1024; -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(MMU_PT_INFO), -+ ppsPTInfoList[ui32PTIndex], IMG_NULL); -+ ppsPTInfoList[ui32PTIndex] = IMG_NULL; -+ } else { -+ -+ pMMUHeap->ui32PTEntryCount -= 1024; -+ } -+ -+ PDUMPCOMMENT("Finished free page table (page count == %08X)", -+ pMMUHeap->ui32PTPageCount); -+} -+ -+static IMG_VOID _DeferredFreePageTables(MMU_HEAP * pMMUHeap) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < pMMUHeap->ui32PTPageCount; i++) { -+ _DeferredFreePageTable(pMMUHeap, i); -+ } -+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo); -+} -+ -+static IMG_BOOL -+_DeferredAllocPagetables(MMU_HEAP * pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, -+ IMG_UINT32 ui32Size) -+{ -+ IMG_UINT32 ui32PTPageCount; -+ IMG_UINT32 ui32PDIndex; -+ IMG_UINT32 i; -+ IMG_UINT32 *pui32PDEntry; -+ MMU_PT_INFO **ppsPTInfoList; -+ SYS_DATA *psSysData; -+ -+ PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE)); -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ return IMG_FALSE; -+ } -+ -+ ui32PDIndex = -+ DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); -+ -+ ui32PTPageCount = -+ (DevVAddr.uiAddr + ui32Size + -+ (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1) -+ >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); -+ ui32PTPageCount -= ui32PDIndex; -+ -+ pui32PDEntry = (IMG_UINT32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr; -+ pui32PDEntry += ui32PDIndex; -+ -+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; -+ -+ PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount); -+ PDUMPCOMMENT("Page directory mods (page count == %08X)", -+ ui32PTPageCount); -+ -+ for (i = 0; i < ui32PTPageCount; i++) { -+ if (ppsPTInfoList[i] == IMG_NULL) { -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(MMU_PT_INFO), -+ (IMG_VOID **) & ppsPTInfoList[i], IMG_NULL); -+ if (ppsPTInfoList[i] == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_DeferredAllocPagetables: ERROR call to OSAllocMem failed")); -+ return IMG_FALSE; -+ } -+ OSMemSet(ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO)); -+ } -+ -+ if (ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL -+ && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL) { -+ IMG_CPU_PHYADDR sCpuPAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ -+ PVR_ASSERT(pui32PDEntry[i] == 0); -+ -+ if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo-> -+ psLocalDevMemArena == IMG_NULL) { -+ if (OSAllocPages -+ (PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE, -+ (IMG_VOID **) & ppsPTInfoList[i]-> -+ PTPageCpuVAddr, -+ &ppsPTInfoList[i]->hPTPageOSMemHandle) != -+ PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_DeferredAllocPagetables: ERROR call to OSAllocPages failed")); -+ return IMG_FALSE; -+ } -+ -+ if (ppsPTInfoList[i]->PTPageCpuVAddr) { -+ sCpuPAddr = -+ OSMapLinToCPUPhys(ppsPTInfoList[i]-> -+ PTPageCpuVAddr); -+ } else { -+ -+ sCpuPAddr = -+ OSMemHandleToCpuPAddr(ppsPTInfoList -+ [i]-> -+ hPTPageOSMemHandle, -+ 0); -+ } -+ sDevPAddr = -+ SysCpuPAddrToDevPAddr -+ (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); -+ } else { -+ IMG_SYS_PHYADDR sSysPAddr; -+ -+ if (RA_Alloc -+ (pMMUHeap->psDevArena-> -+ psDeviceMemoryHeapInfo->psLocalDevMemArena, -+ SGX_MMU_PAGE_SIZE, IMG_NULL, IMG_NULL, 0, -+ SGX_MMU_PAGE_SIZE, 0, -+ &(sSysPAddr.uiAddr)) != IMG_TRUE) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_DeferredAllocPagetables: ERROR call to RA_Alloc failed")); -+ return IMG_FALSE; -+ } -+ -+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); -+ ppsPTInfoList[i]->PTPageCpuVAddr = -+ OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE, -+ PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, -+ &ppsPTInfoList[i]-> -+ hPTPageOSMemHandle); -+ if (!ppsPTInfoList[i]->PTPageCpuVAddr) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_DeferredAllocPagetables: ERROR failed to map page tables")); -+ return IMG_FALSE; -+ } -+ -+ sDevPAddr = -+ SysCpuPAddrToDevPAddr -+ (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); -+ -+ } -+ -+ -+ OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0, -+ SGX_MMU_PAGE_SIZE); -+ -+ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, -+ ppsPTInfoList[i]->PTPageCpuVAddr, -+ SGX_MMU_PAGE_SIZE, -+ PDUMP_PT_UNIQUETAG); -+ -+ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, -+ ppsPTInfoList[i]->PTPageCpuVAddr, -+ SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, -+ PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); -+ -+ switch (pMMUHeap->psDevArena->DevMemHeapType) { -+ case DEVICE_MEMORY_HEAP_SHARED: -+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: -+ { -+ -+ MMU_CONTEXT *psMMUContext = -+ (MMU_CONTEXT *) pMMUHeap-> -+ psMMUContext->psDevInfo-> -+ pvMMUContextList; -+ -+ while (psMMUContext) { -+ -+ pui32PDEntry = -+ (IMG_UINT32 *) -+ psMMUContext->pvPDCpuVAddr; -+ pui32PDEntry += ui32PDIndex; -+ -+ pui32PDEntry[i] = -+ sDevPAddr. -+ uiAddr | SGX_MMU_PDE_VALID; -+ -+ PDUMPMEM2 -+ (PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID *) & -+ pui32PDEntry[i], -+ sizeof(IMG_UINT32), 0, -+ IMG_FALSE, -+ PDUMP_PD_UNIQUETAG, -+ PDUMP_PT_UNIQUETAG); -+ -+ psMMUContext = -+ psMMUContext->psNext; -+ } -+ break; -+ } -+ case DEVICE_MEMORY_HEAP_PERCONTEXT: -+ case DEVICE_MEMORY_HEAP_KERNEL: -+ { -+ -+ pui32PDEntry[i] = -+ sDevPAddr. -+ uiAddr | SGX_MMU_PDE_VALID; -+ -+ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID *) & -+ pui32PDEntry[i], -+ sizeof(IMG_UINT32), 0, -+ IMG_FALSE, PDUMP_PD_UNIQUETAG, -+ PDUMP_PT_UNIQUETAG); -+ -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_DeferredAllocPagetables: ERROR invalid heap type")); -+ return IMG_FALSE; -+ } -+ } -+ -+ -+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext-> -+ psDevInfo); -+ } else { -+ -+ PVR_ASSERT(pui32PDEntry[i] != 0); -+ } -+ } -+ -+ return IMG_TRUE; -+} -+ -+PVRSRV_ERROR -+MMU_Initialise(PVRSRV_DEVICE_NODE * psDeviceNode, MMU_CONTEXT ** ppsMMUContext, -+ IMG_DEV_PHYADDR * psPDDevPAddr) -+{ -+ IMG_UINT32 *pui32Tmp; -+ IMG_UINT32 i; -+ IMG_CPU_VIRTADDR pvPDCpuVAddr; -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ MMU_CONTEXT *psMMUContext; -+ IMG_HANDLE hPDOSMemHandle; -+ SYS_DATA *psSysData; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Initialise")); -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Initialise: ERROR call to SysAcquireData failed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(MMU_CONTEXT), (IMG_VOID **) & psMMUContext, IMG_NULL); -+ if (psMMUContext == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Initialise: ERROR call to OSAllocMem failed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ OSMemSet(psMMUContext, 0, sizeof(MMU_CONTEXT)); -+ -+ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ psMMUContext->psDevInfo = psDevInfo; -+ -+ psMMUContext->psDeviceNode = psDeviceNode; -+ -+ if (psDeviceNode->psLocalDevMemArena == IMG_NULL) { -+ if (OSAllocPages -+ (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, -+ SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr, -+ &hPDOSMemHandle) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Initialise: ERROR call to OSAllocPages failed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (pvPDCpuVAddr) { -+ sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr); -+ } else { -+ -+ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0); -+ } -+ sPDDevPAddr = -+ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); -+ -+ -+ } else { -+ IMG_SYS_PHYADDR sSysPAddr; -+ -+ if (RA_Alloc(psDeviceNode->psLocalDevMemArena, -+ SGX_MMU_PAGE_SIZE, -+ IMG_NULL, -+ IMG_NULL, -+ 0, -+ SGX_MMU_PAGE_SIZE, -+ 0, &(sSysPAddr.uiAddr)) != IMG_TRUE) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Initialise: ERROR call to RA_Alloc failed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); -+ sPDDevPAddr = -+ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); -+ pvPDCpuVAddr = -+ OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE, -+ PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle); -+ if (!pvPDCpuVAddr) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Initialise: ERROR failed to map page tables")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ } -+ -+ PDUMPCOMMENT("Alloc page directory"); -+ -+ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, -+ SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG); -+ -+ if (pvPDCpuVAddr) { -+ pui32Tmp = (IMG_UINT32 *) pvPDCpuVAddr; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Initialise: pvPDCpuVAddr invalid")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ -+ for (i = 0; i < SGX_MMU_PD_SIZE; i++) { -+ -+ pui32Tmp[i] = 0; -+ } -+ -+ PDUMPCOMMENT("Page directory contents"); -+ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, -+ IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); -+ -+ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr; -+ psMMUContext->sPDDevPAddr = sPDDevPAddr; -+ psMMUContext->hPDOSMemHandle = hPDOSMemHandle; -+ -+ *ppsMMUContext = psMMUContext; -+ -+ *psPDDevPAddr = sPDDevPAddr; -+ -+ psMMUContext->psNext = (MMU_CONTEXT *) psDevInfo->pvMMUContextList; -+ psDevInfo->pvMMUContextList = (IMG_VOID *) psMMUContext; -+ -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID MMU_Finalise(MMU_CONTEXT * psMMUContext) -+{ -+ IMG_UINT32 *pui32Tmp, i; -+ SYS_DATA *psSysData; -+ MMU_CONTEXT **ppsMMUContext; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Finalise: ERROR call to SysAcquireData failed")); -+ return; -+ } -+ -+ PDUMPCOMMENT("Free page directory"); -+ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr, -+ SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG); -+ -+ pui32Tmp = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr; -+ -+ for (i = 0; i < SGX_MMU_PD_SIZE; i++) { -+ -+ pui32Tmp[i] = 0; -+ } -+ -+ if (psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL) { -+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, -+ SGX_MMU_PAGE_SIZE, -+ psMMUContext->pvPDCpuVAddr, -+ psMMUContext->hPDOSMemHandle); -+ -+ } else { -+ IMG_SYS_PHYADDR sSysPAddr; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ -+ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr); -+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); -+ -+ OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr, -+ SGX_MMU_PAGE_SIZE, -+ PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, -+ psMMUContext->hPDOSMemHandle); -+ -+ RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena, -+ sSysPAddr.uiAddr, IMG_FALSE); -+ -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Finalise")); -+ -+ ppsMMUContext = -+ (MMU_CONTEXT **) & psMMUContext->psDevInfo->pvMMUContextList; -+ while (*ppsMMUContext) { -+ if (*ppsMMUContext == psMMUContext) { -+ -+ *ppsMMUContext = psMMUContext->psNext; -+ break; -+ } -+ -+ ppsMMUContext = &((*ppsMMUContext)->psNext); -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, -+ IMG_NULL); -+} -+ -+IMG_VOID MMU_InsertHeap(MMU_CONTEXT * psMMUContext, MMU_HEAP * psMMUHeap) -+{ -+ IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr; -+ IMG_UINT32 *pui32KernelPDCpuVAddr = -+ (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr; -+ IMG_UINT32 ui32PDEntry; -+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; -+ -+ pui32PDCpuVAddr += -+ psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + -+ SGX_MMU_PT_SHIFT); -+ pui32KernelPDCpuVAddr += -+ psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + -+ SGX_MMU_PT_SHIFT); -+ -+ PDUMPCOMMENT("Page directory shared heap range copy"); -+ -+ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount; -+ ui32PDEntry++) { -+ -+ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0); -+ -+ pui32PDCpuVAddr[ui32PDEntry] = -+ pui32KernelPDCpuVAddr[ui32PDEntry]; -+ if (pui32PDCpuVAddr[ui32PDEntry]) { -+ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID *) & pui32PDCpuVAddr[ui32PDEntry], -+ sizeof(IMG_UINT32), 0, IMG_FALSE, -+ PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); -+ -+ bInvalidateDirectoryCache = IMG_TRUE; -+ } -+ } -+ -+ -+ if (bInvalidateDirectoryCache) { -+ -+ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo); -+ } -+} -+ -+static IMG_VOID -+MMU_UnmapPagesAndFreePTs(MMU_HEAP * psMMUHeap, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_UINT32 ui32PageCount, IMG_HANDLE hUniqueTag) -+{ -+ IMG_UINT32 uPageSize = HOST_PAGESIZE(); -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ IMG_UINT32 i; -+ IMG_UINT32 ui32PDIndex; -+ IMG_UINT32 ui32PTIndex; -+ IMG_UINT32 *pui32Tmp; -+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; -+ -+#if !defined (PDUMP) -+ PVR_UNREFERENCED_PARAMETER(hUniqueTag); -+#endif -+ -+ sTmpDevVAddr = sDevVAddr; -+ -+ for (i = 0; i < ui32PageCount; i++) { -+ MMU_PT_INFO **ppsPTInfoList; -+ -+ ui32PDIndex = -+ sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + -+ SGX_MMU_PT_SHIFT); -+ -+ ppsPTInfoList = -+ &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; -+ -+ { -+ -+ ui32PTIndex = -+ (sTmpDevVAddr. -+ uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; -+ -+ if (!ppsPTInfoList[0]) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u", -+ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, -+ i, ui32PDIndex, ui32PTIndex)); -+ -+ sTmpDevVAddr.uiAddr += uPageSize; -+ -+ continue; -+ } -+ -+ pui32Tmp = -+ (IMG_UINT32 *) ppsPTInfoList[0]->PTPageCpuVAddr; -+ -+ if (!pui32Tmp) { -+ continue; -+ } -+ -+ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) { -+ ppsPTInfoList[0]->ui32ValidPTECount--; -+ } else { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u", -+ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, -+ i, ui32PDIndex, ui32PTIndex)); -+ } -+ -+ PVR_ASSERT((IMG_INT32) ppsPTInfoList[0]-> -+ ui32ValidPTECount >= 0); -+ -+ -+ pui32Tmp[ui32PTIndex] = 0; -+ } -+ -+ if (ppsPTInfoList[0] -+ && ppsPTInfoList[0]->ui32ValidPTECount == 0) { -+ _DeferredFreePageTable(psMMUHeap, -+ ui32PDIndex - -+ (psMMUHeap-> -+ ui32PTBaseIndex >> -+ SGX_MMU_PT_SHIFT)); -+ bInvalidateDirectoryCache = IMG_TRUE; -+ } -+ -+ sTmpDevVAddr.uiAddr += uPageSize; -+ } -+ -+ if (bInvalidateDirectoryCache) { -+ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext-> -+ psDevInfo); -+ } else { -+ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext-> -+ psDevInfo); -+ } -+ -+#if defined(PDUMP) -+ MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount, -+ IMG_TRUE, hUniqueTag); -+#endif -+} -+ -+IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32End, IMG_HANDLE hUniqueTag) -+{ -+ MMU_HEAP *pMMUHeap = (MMU_HEAP *) pvMMUHeap; -+ IMG_DEV_VIRTADDR Start; -+ -+ Start.uiAddr = ui32Start; -+ -+ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, -+ (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE, -+ hUniqueTag); -+} -+ -+MMU_HEAP *MMU_Create(MMU_CONTEXT * psMMUContext, -+ DEV_ARENA_DESCRIPTOR * psDevArena, RA_ARENA ** ppsVMArena) -+{ -+ MMU_HEAP *pMMUHeap; -+ IMG_BOOL bRes; -+ -+ PVR_ASSERT(psDevArena != IMG_NULL); -+ -+ if (psDevArena == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter")); -+ return IMG_NULL; -+ } -+ -+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(MMU_HEAP), (IMG_VOID **) & pMMUHeap, IMG_NULL); -+ if (pMMUHeap == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Create: ERROR call to OSAllocMem failed")); -+ return IMG_NULL; -+ } -+ -+ pMMUHeap->psMMUContext = psMMUContext; -+ pMMUHeap->psDevArena = psDevArena; -+ -+ bRes = _AllocPageTables(pMMUHeap); -+ if (!bRes) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Create: ERROR call to _AllocPageTables failed")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, -+ IMG_NULL); -+ return IMG_NULL; -+ } -+ -+ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName, -+ psDevArena->BaseDevVAddr.uiAddr, -+ psDevArena->ui32Size, -+ IMG_NULL, -+ SGX_MMU_PAGE_SIZE, -+ IMG_NULL, -+ IMG_NULL, MMU_FreePageTables, pMMUHeap); -+ -+ if (pMMUHeap->psVMArena == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Create: ERROR call to RA_Create failed")); -+ _DeferredFreePageTables(pMMUHeap); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), -+ pMMUHeap, IMG_NULL); -+ return IMG_NULL; -+ } -+ -+ *ppsVMArena = pMMUHeap->psVMArena; -+ -+ return pMMUHeap; -+} -+ -+IMG_VOID MMU_Delete(MMU_HEAP * pMMUHeap) -+{ -+ if (pMMUHeap != IMG_NULL) { -+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Delete")); -+ -+ if (pMMUHeap->psVMArena) { -+ RA_Delete(pMMUHeap->psVMArena); -+ } -+ _DeferredFreePageTables(pMMUHeap); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), -+ pMMUHeap, IMG_NULL); -+ } -+} -+ -+IMG_BOOL -+MMU_Alloc(MMU_HEAP * pMMUHeap, -+ IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 uDevVAddrAlignment, IMG_DEV_VIRTADDR * psDevVAddr) -+{ -+ IMG_BOOL bStatus; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x", -+ uSize, uFlags, uDevVAddrAlignment)); -+ -+ if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) { -+ bStatus = RA_Alloc(pMMUHeap->psVMArena, -+ uSize, -+ pActualSize, -+ IMG_NULL, -+ 0, -+ uDevVAddrAlignment, -+ 0, &(psDevVAddr->uiAddr)); -+ if (!bStatus) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Alloc: RA_Alloc of VMArena failed")); -+ return bStatus; -+ } -+ } -+ -+ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize); -+ -+ -+ if (!bStatus) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Alloc: _DeferredAllocPagetables failed")); -+ if ((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) { -+ -+ RA_Free(pMMUHeap->psVMArena, psDevVAddr->uiAddr, -+ IMG_FALSE); -+ } -+ } -+ -+ return bStatus; -+} -+ -+IMG_VOID -+MMU_Free(MMU_HEAP * pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size) -+{ -+ PVR_ASSERT(pMMUHeap != IMG_NULL); -+ -+ if (pMMUHeap == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter")); -+ return; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap, -+ DevVAddr.uiAddr)); -+ -+ if ((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) && -+ (DevVAddr.uiAddr + ui32Size <= -+ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + -+ pMMUHeap->psDevArena->ui32Size)) { -+ RA_Free(pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE); -+ return; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_Free: Couldn't find DevVAddr %08X in a DevArena", -+ DevVAddr.uiAddr)); -+} -+ -+IMG_VOID MMU_Enable(MMU_HEAP * pMMUHeap) -+{ -+ PVR_UNREFERENCED_PARAMETER(pMMUHeap); -+ -+} -+ -+IMG_VOID MMU_Disable(MMU_HEAP * pMMUHeap) -+{ -+ PVR_UNREFERENCED_PARAMETER(pMMUHeap); -+ -+} -+ -+#if defined(PDUMP) -+static IMG_VOID -+MMU_PDumpPageTables(MMU_HEAP * pMMUHeap, -+ IMG_DEV_VIRTADDR DevVAddr, -+ IMG_SIZE_T uSize, IMG_BOOL bForUnmap, IMG_HANDLE hUniqueTag) -+{ -+ IMG_UINT32 ui32NumPTEntries; -+ IMG_UINT32 ui32PTIndex; -+ IMG_UINT32 *pui32PTEntry; -+ -+ MMU_PT_INFO **ppsPTInfoList; -+ IMG_UINT32 ui32PDIndex; -+ IMG_UINT32 ui32PTDumpCount; -+ -+ ui32NumPTEntries = -+ (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT; -+ -+ ui32PDIndex = -+ DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); -+ -+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; -+ -+ ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; -+ -+ PDUMPCOMMENT("Page table mods (num entries == %08X) %s", -+ ui32NumPTEntries, bForUnmap ? "(for unmap)" : ""); -+ -+ while (ui32NumPTEntries > 0) { -+ MMU_PT_INFO *psPTInfo = *ppsPTInfoList++; -+ -+ if (ui32NumPTEntries <= 1024 - ui32PTIndex) { -+ ui32PTDumpCount = ui32NumPTEntries; -+ } else { -+ ui32PTDumpCount = 1024 - ui32PTIndex; -+ } -+ -+ if (psPTInfo) { -+ pui32PTEntry = (IMG_UINT32 *) psPTInfo->PTPageCpuVAddr; -+ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID *) & pui32PTEntry[ui32PTIndex], -+ ui32PTDumpCount * sizeof(IMG_UINT32), 0, -+ IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag); -+ } -+ -+ ui32NumPTEntries -= ui32PTDumpCount; -+ -+ ui32PTIndex = 0; -+ } -+ -+ PDUMPCOMMENT("Finished page table mods %s", -+ bForUnmap ? "(for unmap)" : ""); -+} -+#endif -+ -+static IMG_VOID -+MMU_MapPage(MMU_HEAP * pMMUHeap, -+ IMG_DEV_VIRTADDR DevVAddr, -+ IMG_DEV_PHYADDR DevPAddr, IMG_UINT32 ui32MemFlags) -+{ -+ IMG_UINT32 ui32Index; -+ IMG_UINT32 *pui32Tmp; -+ IMG_UINT32 ui32MMUFlags = 0; -+ MMU_PT_INFO **ppsPTInfoList; -+ -+ if (((PVRSRV_MEM_READ | PVRSRV_MEM_WRITE) & ui32MemFlags) == -+ (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE)) { -+ -+ ui32MMUFlags = 0; -+ } else if (PVRSRV_MEM_READ & ui32MemFlags) { -+ -+ ui32MMUFlags |= SGX_MMU_PTE_READONLY; -+ } else if (PVRSRV_MEM_WRITE & ui32MemFlags) { -+ -+ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY; -+ } -+ -+ if (PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags) { -+ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT; -+ } -+#if !defined(FIX_HW_BRN_25503) -+ -+ if (PVRSRV_MEM_EDM_PROTECT & ui32MemFlags) { -+ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT; -+ } -+#endif -+ -+ ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); -+ -+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index]; -+ -+ ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; -+ -+ pui32Tmp = (IMG_UINT32 *) ppsPTInfoList[0]->PTPageCpuVAddr; -+ -+ -+ if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08lX PDIdx:%u PTIdx:%u", -+ DevVAddr.uiAddr, -+ DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + -+ SGX_MMU_PT_SHIFT), ui32Index)); -+ } -+ -+ PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0); -+ -+ ppsPTInfoList[0]->ui32ValidPTECount++; -+ -+ pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK) -+ | SGX_MMU_PTE_VALID | ui32MMUFlags; -+} -+ -+IMG_VOID -+MMU_MapScatter(MMU_HEAP * pMMUHeap, -+ IMG_DEV_VIRTADDR DevVAddr, -+ IMG_SYS_PHYADDR * psSysAddr, -+ IMG_SIZE_T uSize, IMG_UINT32 ui32MemFlags, IMG_HANDLE hUniqueTag) -+{ -+#if defined(PDUMP) -+ IMG_DEV_VIRTADDR MapBaseDevVAddr; -+#endif -+ IMG_UINT32 uCount, i; -+ IMG_DEV_PHYADDR DevPAddr; -+ -+ PVR_ASSERT(pMMUHeap != IMG_NULL); -+ -+#if defined(PDUMP) -+ MapBaseDevVAddr = DevVAddr; -+#else -+ PVR_UNREFERENCED_PARAMETER(hUniqueTag); -+#endif -+ -+ for (i = 0, uCount = 0; uCount < uSize; -+ i++, uCount += SGX_MMU_PAGE_SIZE) { -+ IMG_SYS_PHYADDR sSysAddr; -+ -+ sSysAddr = psSysAddr[i]; -+ -+ DevPAddr = -+ SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr); -+ -+ MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags); -+ DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x", -+ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize)); -+ } -+ -+#if defined(PDUMP) -+ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, -+ hUniqueTag); -+#endif -+} -+ -+IMG_VOID -+MMU_MapPages(MMU_HEAP * pMMUHeap, -+ IMG_DEV_VIRTADDR DevVAddr, -+ IMG_SYS_PHYADDR SysPAddr, -+ IMG_SIZE_T uSize, IMG_UINT32 ui32MemFlags, IMG_HANDLE hUniqueTag) -+{ -+ IMG_DEV_PHYADDR DevPAddr; -+#if defined(PDUMP) -+ IMG_DEV_VIRTADDR MapBaseDevVAddr; -+#endif -+ IMG_UINT32 uCount; -+ IMG_UINT32 ui32VAdvance = SGX_MMU_PAGE_SIZE; -+ IMG_UINT32 ui32PAdvance = SGX_MMU_PAGE_SIZE; -+ -+ PVR_ASSERT(pMMUHeap != IMG_NULL); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "MMU_MapPages: mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x", -+ pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize)); -+ -+#if defined(PDUMP) -+ MapBaseDevVAddr = DevVAddr; -+#else -+ PVR_UNREFERENCED_PARAMETER(hUniqueTag); -+#endif -+ -+ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr); -+ -+#if defined(FIX_HW_BRN_23281) -+ if (ui32MemFlags & PVRSRV_MEM_INTERLEAVED) { -+ ui32VAdvance *= 2; -+ } -+#endif -+ -+ if (ui32MemFlags & PVRSRV_MEM_DUMMY) { -+ ui32PAdvance = 0; -+ } -+ -+ for (uCount = 0; uCount < uSize; uCount += ui32VAdvance) { -+ MMU_MapPage(pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags); -+ DevVAddr.uiAddr += ui32VAdvance; -+ DevPAddr.uiAddr += ui32PAdvance; -+ } -+ -+#if defined(PDUMP) -+ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, -+ hUniqueTag); -+#endif -+} -+ -+IMG_VOID -+MMU_MapShadow(MMU_HEAP * pMMUHeap, -+ IMG_DEV_VIRTADDR MapBaseDevVAddr, -+ IMG_SIZE_T uByteSize, -+ IMG_CPU_VIRTADDR CpuVAddr, -+ IMG_HANDLE hOSMemHandle, -+ IMG_DEV_VIRTADDR * pDevVAddr, -+ IMG_UINT32 ui32MemFlags, IMG_HANDLE hUniqueTag) -+{ -+ IMG_UINT32 i; -+ IMG_UINT32 uOffset = 0; -+ IMG_DEV_VIRTADDR MapDevVAddr; -+ IMG_UINT32 ui32VAdvance = SGX_MMU_PAGE_SIZE; -+ IMG_UINT32 ui32PAdvance = SGX_MMU_PAGE_SIZE; -+ -+#if !defined (PDUMP) -+ PVR_UNREFERENCED_PARAMETER(hUniqueTag); -+#endif -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "MMU_MapShadow: %08X, 0x%x, %08X", -+ MapBaseDevVAddr.uiAddr, uByteSize, CpuVAddr)); -+ -+ PVR_ASSERT(((IMG_UINT32) CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ PVR_ASSERT(((IMG_UINT32) uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr; -+ -+#if defined(FIX_HW_BRN_23281) -+ if (ui32MemFlags & PVRSRV_MEM_INTERLEAVED) { -+ ui32VAdvance *= 2; -+ } -+#endif -+ -+ if (ui32MemFlags & PVRSRV_MEM_DUMMY) { -+ ui32PAdvance = 0; -+ } -+ -+ MapDevVAddr = MapBaseDevVAddr; -+ for (i = 0; i < uByteSize; i += ui32VAdvance) { -+ IMG_CPU_PHYADDR CpuPAddr; -+ IMG_DEV_PHYADDR DevPAddr; -+ -+ if (CpuVAddr) { -+ CpuPAddr = -+ OSMapLinToCPUPhys((IMG_VOID *) ((IMG_UINT32) -+ CpuVAddr + -+ uOffset)); -+ } else { -+ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset); -+ } -+ DevPAddr = -+ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, CpuPAddr); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X", -+ uOffset, -+ (IMG_UINTPTR_T) CpuVAddr + uOffset, -+ CpuPAddr.uiAddr, MapDevVAddr.uiAddr, DevPAddr.uiAddr)); -+ -+ MMU_MapPage(pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags); -+ -+ MapDevVAddr.uiAddr += ui32VAdvance; -+ uOffset += ui32PAdvance; -+ } -+ -+#if defined(PDUMP) -+ MMU_PDumpPageTables(pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, -+ hUniqueTag); -+#endif -+} -+ -+IMG_VOID -+MMU_UnmapPages(MMU_HEAP * psMMUHeap, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_UINT32 ui32PageCount, IMG_HANDLE hUniqueTag) -+{ -+ IMG_UINT32 uPageSize = HOST_PAGESIZE(); -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ IMG_UINT32 i; -+ IMG_UINT32 ui32PDIndex; -+ IMG_UINT32 ui32PTIndex; -+ IMG_UINT32 *pui32Tmp; -+ -+#if !defined (PDUMP) -+ PVR_UNREFERENCED_PARAMETER(hUniqueTag); -+#endif -+ -+ sTmpDevVAddr = sDevVAddr; -+ -+ for (i = 0; i < ui32PageCount; i++) { -+ MMU_PT_INFO **ppsPTInfoList; -+ -+ ui32PDIndex = -+ sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + -+ SGX_MMU_PT_SHIFT); -+ -+ ppsPTInfoList = -+ &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; -+ -+ ui32PTIndex = -+ (sTmpDevVAddr. -+ uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; -+ -+ if (!ppsPTInfoList[0]) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u", -+ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i, -+ ui32PDIndex, ui32PTIndex)); -+ -+ sTmpDevVAddr.uiAddr += uPageSize; -+ -+ continue; -+ } -+ -+ pui32Tmp = (IMG_UINT32 *) ppsPTInfoList[0]->PTPageCpuVAddr; -+ -+ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) { -+ ppsPTInfoList[0]->ui32ValidPTECount--; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u", -+ sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr, i, -+ ui32PDIndex, ui32PTIndex)); -+ } -+ -+ PVR_ASSERT((IMG_INT32) ppsPTInfoList[0]->ui32ValidPTECount >= -+ 0); -+ -+ -+ pui32Tmp[ui32PTIndex] = 0; -+ -+ sTmpDevVAddr.uiAddr += uPageSize; -+ } -+ -+ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo); -+ -+#if defined(PDUMP) -+ MMU_PDumpPageTables(psMMUHeap, sDevVAddr, uPageSize * ui32PageCount, -+ IMG_TRUE, hUniqueTag); -+#endif -+} -+ -+IMG_DEV_PHYADDR -+MMU_GetPhysPageAddr(MMU_HEAP * pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr) -+{ -+ IMG_UINT32 *pui32PageTable; -+ IMG_UINT32 ui32Index; -+ IMG_DEV_PHYADDR sDevPAddr; -+ MMU_PT_INFO **ppsPTInfoList; -+ -+ ui32Index = -+ sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); -+ -+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index]; -+ if (!ppsPTInfoList[0]) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_GetPhysPageAddr: Not mapped in at 0x%08x", -+ sDevVPageAddr.uiAddr)); -+ sDevPAddr.uiAddr = 0; -+ return sDevPAddr; -+ } -+ -+ ui32Index = -+ (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; -+ -+ pui32PageTable = (IMG_UINT32 *) ppsPTInfoList[0]->PTPageCpuVAddr; -+ -+ sDevPAddr.uiAddr = pui32PageTable[ui32Index]; -+ -+ sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK; -+ -+ return sDevPAddr; -+} -+ -+IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT * pMMUContext) -+{ -+ return (pMMUContext->sPDDevPAddr); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEV_PHYADDR * pDevPAddr, -+ IMG_CPU_PHYADDR * pCpuPAddr) -+{ -+ MMU_HEAP *pMMUHeap; -+ IMG_DEV_PHYADDR DevPAddr; -+ -+ pMMUHeap = (MMU_HEAP *) BM_GetMMUHeap(hDevMemHeap); -+ -+ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr); -+ pCpuPAddr->uiAddr = DevPAddr.uiAddr; -+ pDevPAddr->uiAddr = DevPAddr.uiAddr; -+ -+ return (pDevPAddr->uiAddr != -+ 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS; -+} -+ -+PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie, -+ IMG_HANDLE hDevMemContext, -+ IMG_DEV_PHYADDR * psPDDevPAddr) -+{ -+ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(hDevCookie); -+ -+ *psPDDevPAddr = -+ ((BM_CONTEXT *) hDevMemContext)->psMMUContext->sPDDevPAddr; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO * psDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ RA_ARENA *psLocalDevMemArena; -+ IMG_HANDLE hOSMemHandle = IMG_NULL; -+ IMG_BYTE *pui8MemBlock = IMG_NULL; -+ IMG_SYS_PHYADDR sMemBlockSysPAddr; -+ IMG_CPU_PHYADDR sMemBlockCpuPAddr; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed")); -+ return eError; -+ } -+ -+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; -+ -+ if (psLocalDevMemArena == IMG_NULL) { -+ -+ eError = -+ OSAllocPages(PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, 3 * SGX_MMU_PAGE_SIZE, -+ (IMG_VOID **) & pui8MemBlock, &hOSMemHandle); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed")); -+ return eError; -+ } -+ -+ if (pui8MemBlock) { -+ sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock); -+ } else { -+ -+ sMemBlockCpuPAddr = -+ OSMemHandleToCpuPAddr(hOSMemHandle, 0); -+ } -+ } else { -+ -+ if (RA_Alloc(psLocalDevMemArena, -+ 3 * SGX_MMU_PAGE_SIZE, -+ IMG_NULL, -+ IMG_NULL, -+ 0, -+ SGX_MMU_PAGE_SIZE, -+ 0, &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr); -+ pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr, -+ SGX_MMU_PAGE_SIZE * 3, -+ PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, -+ &hOSMemHandle); -+ if (!pui8MemBlock) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_BIFResetPDAlloc: ERROR failed to map page tables")); -+ return PVRSRV_ERROR_BAD_MAPPING; -+ } -+ } -+ -+ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle; -+ psDevInfo->sBIFResetPDDevPAddr = -+ SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr); -+ psDevInfo->sBIFResetPTDevPAddr.uiAddr = -+ psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE; -+ psDevInfo->sBIFResetPageDevPAddr.uiAddr = -+ psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE; -+ psDevInfo->pui32BIFResetPD = (IMG_UINT32 *) pui8MemBlock; -+ psDevInfo->pui32BIFResetPT = -+ (IMG_UINT32 *) (pui8MemBlock + SGX_MMU_PAGE_SIZE); -+ -+ OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE); -+ OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE); -+ -+ OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, -+ SGX_MMU_PAGE_SIZE); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO * psDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ RA_ARENA *psLocalDevMemArena; -+ IMG_SYS_PHYADDR sPDSysPAddr; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MMU_BIFResetPDFree: ERROR call to SysAcquireData failed")); -+ return; -+ } -+ -+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; -+ -+ if (psLocalDevMemArena == IMG_NULL) { -+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, -+ 3 * SGX_MMU_PAGE_SIZE, -+ psDevInfo->pui32BIFResetPD, -+ psDevInfo->hBIFResetPDOSMemHandle); -+ } else { -+ OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD, -+ 3 * SGX_MMU_PAGE_SIZE, -+ PVRSRV_HAP_WRITECOMBINE | -+ PVRSRV_HAP_KERNEL_ONLY, -+ psDevInfo->hBIFResetPDOSMemHandle); -+ -+ sPDSysPAddr = -+ SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, -+ psDevInfo->sBIFResetPDDevPAddr); -+ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE); -+ } -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmu.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmu.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mmu.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mmu.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,98 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _MMU_H_ -+#define _MMU_H_ -+ -+#include "sgxinfokm.h" -+ -+PVRSRV_ERROR -+MMU_Initialise(PVRSRV_DEVICE_NODE * psDeviceNode, MMU_CONTEXT ** ppsMMUContext, -+ IMG_DEV_PHYADDR * psPDDevPAddr); -+ -+IMG_VOID MMU_Finalise(MMU_CONTEXT * psMMUContext); -+ -+IMG_VOID MMU_InsertHeap(MMU_CONTEXT * psMMUContext, MMU_HEAP * psMMUHeap); -+ -+MMU_HEAP *MMU_Create(MMU_CONTEXT * psMMUContext, -+ DEV_ARENA_DESCRIPTOR * psDevArena, RA_ARENA ** ppsVMArena); -+ -+IMG_VOID MMU_Delete(MMU_HEAP * pMMU); -+ -+IMG_BOOL -+MMU_Alloc(MMU_HEAP * pMMU, -+ IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 uDevVAddrAlignment, IMG_DEV_VIRTADDR * pDevVAddr); -+ -+IMG_VOID -+MMU_Free(MMU_HEAP * pMMU, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size); -+ -+IMG_VOID MMU_Enable(MMU_HEAP * pMMU); -+ -+IMG_VOID MMU_Disable(MMU_HEAP * pMMU); -+ -+IMG_VOID -+MMU_MapPages(MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR devVAddr, -+ IMG_SYS_PHYADDR SysPAddr, -+ IMG_SIZE_T uSize, IMG_UINT32 ui32MemFlags, IMG_HANDLE hUniqueTag); -+ -+IMG_VOID -+MMU_MapShadow(MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR MapBaseDevVAddr, -+ IMG_SIZE_T uSize, -+ IMG_CPU_VIRTADDR CpuVAddr, -+ IMG_HANDLE hOSMemHandle, -+ IMG_DEV_VIRTADDR * pDevVAddr, -+ IMG_UINT32 ui32MemFlags, IMG_HANDLE hUniqueTag); -+ -+IMG_VOID -+MMU_UnmapPages(MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR dev_vaddr, -+ IMG_UINT32 ui32PageCount, IMG_HANDLE hUniqueTag); -+ -+IMG_VOID -+MMU_MapScatter(MMU_HEAP * pMMU, -+ IMG_DEV_VIRTADDR DevVAddr, -+ IMG_SYS_PHYADDR * psSysAddr, -+ IMG_SIZE_T uSize, -+ IMG_UINT32 ui32MemFlags, IMG_HANDLE hUniqueTag); -+ -+IMG_DEV_PHYADDR -+MMU_GetPhysPageAddr(MMU_HEAP * pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr); -+ -+IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT * pMMUContext); -+ -+ -+IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO * psDevInfo); -+ -+PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO * psDevInfo); -+ -+IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO * psDevInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/module.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/module.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/module.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/module.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,323 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+ -+#include "img_defs.h" -+#include "services.h" -+#include "kerneldisplay.h" -+#include "kernelbuffer.h" -+#include "syscommon.h" -+#include "pvrmmap.h" -+#include "mm.h" -+#include "mmap.h" -+#include "mutex.h" -+#include "pvr_debug.h" -+#include "srvkm.h" -+#include "perproc.h" -+#include "handle.h" -+#include "pvr_bridge_km.h" -+#include "proc.h" -+#include "pvrmodule.h" -+ -+#define DRVNAME "pvrsrvkm" -+#define DEVNAME "pvrsrvkm" -+ -+MODULE_SUPPORTED_DEVICE(DEVNAME); -+#ifdef DEBUG -+static int debug = DBGPRIV_WARNING; -+#include -+module_param(debug, int, 0); -+#endif -+ -+void PVRDebugSetLevel(IMG_UINT32 uDebugLevel); -+ -+extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE * -+ psJTable); -+extern IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE * -+ psJTable); -+EXPORT_SYMBOL(PVRGetDisplayClassJTable); -+EXPORT_SYMBOL(PVRGetBufferClassJTable); -+ -+static int AssignedMajorNumber; -+ -+extern long PVRSRV_BridgeDispatchKM(struct file *file, unsigned int cmd, -+ unsigned long arg); -+static int PVRSRVOpen(struct inode *pInode, struct file *pFile); -+static int PVRSRVRelease(struct inode *pInode, struct file *pFile); -+ -+PVRSRV_LINUX_MUTEX gPVRSRVLock; -+ -+static struct file_operations pvrsrv_fops = { -+owner: THIS_MODULE, -+unlocked_ioctl:PVRSRV_BridgeDispatchKM, -+open: PVRSRVOpen, -+release:PVRSRVRelease, -+mmap: PVRMMap, -+}; -+ -+#define LDM_DEV struct platform_device -+#define LDM_DRV struct platform_driver -+ -+ -+static int PVRSRVDriverRemove(LDM_DEV * device); -+static int PVRSRVDriverProbe(LDM_DEV * device); -+static int PVRSRVDriverSuspend(LDM_DEV * device, pm_message_t state); -+static void PVRSRVDriverShutdown(LDM_DEV * device); -+static int PVRSRVDriverResume(LDM_DEV * device); -+ -+ -+static LDM_DRV powervr_driver = { -+ .driver = { -+ .name = DRVNAME, -+ }, -+ .probe = PVRSRVDriverProbe, -+ .remove = PVRSRVDriverRemove, -+ .suspend = PVRSRVDriverSuspend, -+ .resume = PVRSRVDriverResume, -+ .shutdown = PVRSRVDriverShutdown, -+}; -+ -+LDM_DEV *gpsPVRLDMDev; -+ -+static void PVRSRVDeviceRelease(struct device *device); -+ -+static struct platform_device powervr_device = { -+ .name = DEVNAME, -+ .id = -1, -+ .dev = { -+ .release = PVRSRVDeviceRelease} -+}; -+ -+static int PVRSRVDriverProbe(LDM_DEV * pDevice) -+{ -+ SYS_DATA *psSysData; -+ -+ PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice)); -+ -+ pDevice->dev.driver_data = NULL; -+ -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ gpsPVRLDMDev = pDevice; -+ -+ if (SysInitialise() != PVRSRV_OK) { -+ return -ENODEV; -+ } -+ } -+ -+ return 0; -+} -+ -+static int PVRSRVDriverRemove(LDM_DEV * pDevice) -+{ -+ SYS_DATA *psSysData; -+ -+ PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice)); -+ -+ if (SysAcquireData(&psSysData) == PVRSRV_OK) { -+ SysDeinitialise(psSysData); -+ -+ gpsPVRLDMDev = IMG_NULL; -+ } -+ -+ return 0; -+} -+ -+static void PVRSRVDriverShutdown(LDM_DEV * pDevice) -+{ -+ PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice)); -+ -+ (void)PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3); -+} -+ -+static int PVRSRVDriverSuspend(LDM_DEV * pDevice, pm_message_t state) -+{ -+ PVR_TRACE(("PVRSRVDriverSuspend(pDevice=%p)", pDevice)); -+ -+ if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK) { -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static int PVRSRVDriverResume(LDM_DEV * pDevice) -+{ -+ PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice)); -+ -+ if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK) { -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static void PVRSRVDeviceRelease(struct device *pDevice) -+{ -+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVDeviceRelease(pDevice=%p)", pDevice)); -+} -+ -+static int PVRSRVOpen(struct inode unref__ * pInode, -+ struct file unref__ * pFile) -+{ -+ int Ret = 0; -+ -+ LinuxLockMutex(&gPVRSRVLock); -+ -+ if (PVRSRVProcessConnect(OSGetCurrentProcessIDKM()) != PVRSRV_OK) { -+ Ret = -ENOMEM; -+ } -+ -+ LinuxUnLockMutex(&gPVRSRVLock); -+ -+ return Ret; -+} -+ -+static int PVRSRVRelease(struct inode unref__ * pInode, -+ struct file unref__ * pFile) -+{ -+ int Ret = 0; -+ -+ LinuxLockMutex(&gPVRSRVLock); -+ -+ PVRSRVProcessDisconnect(OSGetCurrentProcessIDKM()); -+ -+ LinuxUnLockMutex(&gPVRSRVLock); -+ -+ return Ret; -+} -+ -+static int __init PVRCore_Init(void) -+{ -+ int error; -+ -+ PVR_TRACE(("PVRCore_Init")); -+ -+ AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops); -+ -+ if (AssignedMajorNumber <= 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRCore_Init: unable to get major number")); -+ -+ return -EBUSY; -+ } -+ -+ PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber)); -+ -+ if (CreateProcEntries()) { -+ unregister_chrdev(AssignedMajorNumber, DRVNAME); -+ -+ return -ENOMEM; -+ } -+ -+ LinuxInitMutex(&gPVRSRVLock); -+ -+#ifdef DEBUG -+ PVRDebugSetLevel(debug); -+#endif -+ -+ if (LinuxMMInit() != PVRSRV_OK) { -+ error = -ENOMEM; -+ goto init_failed; -+ } -+ -+ LinuxBridgeInit(); -+ -+ PVRMMapInit(); -+ -+ if ((error = platform_driver_register(&powervr_driver)) != 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRCore_Init: unable to register platform driver (%d)", -+ error)); -+ -+ goto init_failed; -+ } -+ -+ powervr_device.dev.devt = MKDEV(AssignedMajorNumber, 0); -+ -+ if ((error = platform_device_register(&powervr_device)) != 0) { -+ platform_driver_unregister(&powervr_driver); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRCore_Init: unable to register platform device (%d)", -+ error)); -+ -+ goto init_failed; -+ } -+ -+ -+ return 0; -+ -+init_failed: -+ -+ PVRMMapCleanup(); -+ LinuxMMCleanup(); -+ RemoveProcEntries(); -+ unregister_chrdev(AssignedMajorNumber, DRVNAME); -+ -+ return error; -+ -+} -+ -+static void __exit PVRCore_Cleanup(void) -+{ -+ SYS_DATA *psSysData; -+ -+ PVR_TRACE(("PVRCore_Cleanup")); -+ -+ SysAcquireData(&psSysData); -+ -+ unregister_chrdev(AssignedMajorNumber, DRVNAME) -+ ; -+ -+ -+ platform_device_unregister(&powervr_device); -+ platform_driver_unregister(&powervr_driver); -+ -+ PVRMMapCleanup(); -+ -+ LinuxMMCleanup(); -+ -+ LinuxBridgeDeInit(); -+ -+ RemoveProcEntries(); -+ -+ PVR_TRACE(("PVRCore_Cleanup: unloading")); -+} -+ -+module_init(PVRCore_Init); -+module_exit(PVRCore_Cleanup); -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mutex.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mutex.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mutex.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mutex.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,71 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+#include -+ -+#include "img_defs.h" -+#include "services.h" -+ -+#include "mutex.h" -+ -+ -+IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex) -+{ -+ mutex_init(psPVRSRVMutex); -+} -+ -+IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex) -+{ -+ mutex_lock(psPVRSRVMutex); -+} -+ -+PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX * psPVRSRVMutex) -+{ -+ if (mutex_lock_interruptible(psPVRSRVMutex) == -EINTR) { -+ return PVRSRV_ERROR_GENERIC; -+ } else { -+ return PVRSRV_OK; -+ } -+} -+ -+IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex) -+{ -+ return mutex_trylock(psPVRSRVMutex); -+} -+ -+IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex) -+{ -+ mutex_unlock(psPVRSRVMutex); -+} -+ -+IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex) -+{ -+ return mutex_is_locked(psPVRSRVMutex); -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/mutex.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mutex.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/mutex.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/mutex.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,51 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __INCLUDED_LINUX_MUTEX_H_ -+#define __INCLUDED_LINUX_MUTEX_H_ -+ -+#include -+ -+#include -+ -+ -+typedef struct mutex PVRSRV_LINUX_MUTEX; -+ -+ -+extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex); -+ -+extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex); -+ -+extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX * -+ psPVRSRVMutex); -+ -+extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex); -+ -+extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex); -+ -+extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX * psPVRSRVMutex); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/oemfuncs.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/oemfuncs.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/oemfuncs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/oemfuncs.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,48 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__OEMFUNCS_H__) -+#define __OEMFUNCS_H__ -+ -+ -+ typedef IMG_UINT32(*PFN_SRV_BRIDGEDISPATCH) (IMG_UINT32 Ioctl, -+ IMG_BYTE * pInBuf, -+ IMG_UINT32 InBufLen, -+ IMG_BYTE * pOutBuf, -+ IMG_UINT32 OutBufLen, -+ IMG_UINT32 * -+ pdwBytesTransferred); -+ typedef struct PVRSRV_DC_OEM_JTABLE_TAG { -+ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch; -+ IMG_PVOID pvDummy1; -+ IMG_PVOID pvDummy2; -+ IMG_PVOID pvDummy3; -+ -+ } PVRSRV_DC_OEM_JTABLE; -+ -+#define OEM_GET_EXT_FUNCS (1<<1) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/omaplfb_displayclass.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/omaplfb_displayclass.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/omaplfb_displayclass.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/omaplfb_displayclass.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,1316 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_defs.h" -+#include "servicesext.h" -+#include "kerneldisplay.h" -+#include "omaplfb.h" -+ -+static IMG_VOID *gpvAnchor; -+ -+static int fb_idx = 0; -+ -+#define OMAPLFB_COMMAND_COUNT 1 -+ -+static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL; -+ -+static OMAPLFB_DEVINFO *GetAnchorPtr(IMG_VOID) -+{ -+ return (OMAPLFB_DEVINFO *) gpvAnchor; -+} -+ -+static IMG_VOID SetAnchorPtr(OMAPLFB_DEVINFO * psDevInfo) -+{ -+ gpvAnchor = (IMG_VOID *) psDevInfo; -+} -+ -+static IMG_VOID FlushInternalVSyncQueue(OMAPLFB_SWAPCHAIN * psSwapChain) -+{ -+ OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem; -+ IMG_UINT32 ui32MaxIndex; -+ IMG_UINT32 i; -+ -+ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex]; -+ ui32MaxIndex = psSwapChain->ui32BufferCount - 1; -+ -+ for (i = 0; i < psSwapChain->ui32BufferCount; i++) { -+ if (!psFlipItem->bValid) { -+ continue; -+ } -+ -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": FlushInternalVSyncQueue: Flushing swap buffer (index %lu)\n", -+ psSwapChain->ui32RemoveIndex)); -+ -+ if (psFlipItem->bFlipped == IMG_FALSE) { -+ -+ OMAPLFBFlip(psSwapChain, -+ (IMG_UINT32) psFlipItem->sSysAddr); -+ } -+ -+ if (psFlipItem->bCmdCompleted == IMG_FALSE) { -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": FlushInternalVSyncQueue: Calling command complete for swap buffer (index %lu)\n", -+ psSwapChain->ui32RemoveIndex)); -+ -+ psSwapChain->psPVRJTable-> -+ pfnPVRSRVCmdComplete(psFlipItem->hCmdComplete, -+ IMG_TRUE); -+ } -+ -+ psSwapChain->ui32RemoveIndex++; -+ -+ if (psSwapChain->ui32RemoveIndex > ui32MaxIndex) { -+ psSwapChain->ui32RemoveIndex = 0; -+ } -+ -+ psFlipItem->bFlipped = IMG_FALSE; -+ psFlipItem->bCmdCompleted = IMG_FALSE; -+ psFlipItem->bValid = IMG_FALSE; -+ -+ psFlipItem = -+ &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex]; -+ } -+ -+ psSwapChain->ui32InsertIndex = 0; -+ psSwapChain->ui32RemoveIndex = 0; -+} -+ -+static IMG_VOID SetFlushStateInternalNoLock(OMAPLFB_DEVINFO * psDevInfo, -+ IMG_BOOL bFlushState) -+{ -+ OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain; -+ -+ if (psSwapChain == IMG_NULL) { -+ return; -+ } -+ -+ if (bFlushState) { -+ if (psSwapChain->ui32SetFlushStateRefCount == 0) { -+ OMAPLFBDisableVSyncInterrupt(psSwapChain); -+ psSwapChain->bFlushCommands = IMG_TRUE; -+ FlushInternalVSyncQueue(psSwapChain); -+ } -+ psSwapChain->ui32SetFlushStateRefCount++; -+ } else { -+ if (psSwapChain->ui32SetFlushStateRefCount != 0) { -+ psSwapChain->ui32SetFlushStateRefCount--; -+ if (psSwapChain->ui32SetFlushStateRefCount == 0) { -+ psSwapChain->bFlushCommands = IMG_FALSE; -+ OMAPLFBEnableVSyncInterrupt(psSwapChain); -+ } -+ } -+ } -+} -+ -+static IMG_VOID SetFlushStateInternal(OMAPLFB_DEVINFO * psDevInfo, -+ IMG_BOOL bFlushState) -+{ -+ unsigned long ulLockFlags; -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ SetFlushStateInternalNoLock(psDevInfo, bFlushState); -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+} -+ -+static IMG_VOID SetFlushStateExternal(OMAPLFB_DEVINFO * psDevInfo, -+ IMG_BOOL bFlushState) -+{ -+ unsigned long ulLockFlags; -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ if (psDevInfo->bFlushCommands != bFlushState) { -+ psDevInfo->bFlushCommands = bFlushState; -+ SetFlushStateInternalNoLock(psDevInfo, bFlushState); -+ } -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+} -+ -+static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State) -+{ -+ OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ switch (ui32State) { -+ case DC_STATE_FLUSH_COMMANDS: -+ SetFlushStateExternal(psDevInfo, IMG_TRUE); -+ break; -+ case DC_STATE_NO_FLUSH_COMMANDS: -+ SetFlushStateExternal(psDevInfo, IMG_FALSE); -+ break; -+ default: -+ break; -+ } -+ -+ return; -+} -+ -+static int FrameBufferEvents(struct notifier_block *psNotif, -+ unsigned long event, void *data) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ OMAPLFB_SWAPCHAIN *psSwapChain; -+ struct fb_event *psFBEvent = (struct fb_event *)data; -+ IMG_BOOL bBlanked;; -+ -+ if (event != FB_EVENT_BLANK) { -+ return 0; -+ } -+ -+ psDevInfo = GetAnchorPtr(); -+ psSwapChain = psDevInfo->psSwapChain; -+ -+ bBlanked = (*(int *)psFBEvent->data != 0); -+ -+ if (bBlanked != psSwapChain->bBlanked) { -+ psSwapChain->bBlanked = bBlanked; -+ -+ if (bBlanked) { -+ -+ SetFlushStateInternal(psDevInfo, IMG_TRUE); -+ } else { -+ -+ SetFlushStateInternal(psDevInfo, IMG_FALSE); -+ } -+ } -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR UnblankDisplay(OMAPLFB_DEVINFO * psDevInfo) -+{ -+ int res; -+ -+ acquire_console_sem(); -+ res = fb_blank(psDevInfo->psLINFBInfo, 0); -+ release_console_sem(); -+ if (res != 0) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": fb_blank failed (%d)", res); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR EnableLFBEventNotification(OMAPLFB_DEVINFO * psDevInfo) -+{ -+ int res; -+ OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain; -+ PVRSRV_ERROR eError; -+ -+ memset(&psDevInfo->sLINNotifBlock, 0, -+ sizeof(psDevInfo->sLINNotifBlock)); -+ -+ psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents; -+ -+ psSwapChain->bBlanked = IMG_FALSE; -+ -+ res = fb_register_client(&psDevInfo->sLINNotifBlock); -+ if (res != 0) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": fb_register_client failed (%d)", res); -+ -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ eError = UnblankDisplay(psDevInfo); -+ if (eError != PVRSRV_OK) { -+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX -+ ": UnblankDisplay failed (%d)", eError)); -+ return eError; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR DisableLFBEventNotification(OMAPLFB_DEVINFO * psDevInfo) -+{ -+ int res; -+ -+ res = fb_unregister_client(&psDevInfo->sLINNotifBlock); -+ if (res != 0) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": fb_unregister_client failed (%d)", res); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID, -+ IMG_HANDLE * phDevice, -+ PVRSRV_SYNC_DATA * psSystemBufferSyncData) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32DeviceID); -+ -+ psDevInfo = GetAnchorPtr(); -+ -+ psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData; -+ -+ eError = UnblankDisplay(psDevInfo); -+ if (eError != PVRSRV_OK) { -+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX -+ ": UnblankDisplay failed (%d)", eError)); -+ return eError; -+ } -+ -+ *phDevice = (IMG_HANDLE) psDevInfo; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDevice); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice, -+ IMG_UINT32 * pui32NumFormats, -+ DISPLAY_FORMAT * psFormat) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ -+ if (!hDevice || !pui32NumFormats) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ *pui32NumFormats = 1; -+ -+ if (psFormat) { -+ psFormat[0] = psDevInfo->sDisplayFormat; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice, -+ DISPLAY_FORMAT * psFormat, -+ IMG_UINT32 * pui32NumDims, DISPLAY_DIMS * psDim) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ -+ if (!hDevice || !psFormat || !pui32NumDims) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ *pui32NumDims = 1; -+ -+ if (psDim) { -+ psDim[0] = psDevInfo->sDisplayDim; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE * phBuffer) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ -+ if (!hDevice || !phBuffer) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ *phBuffer = (IMG_HANDLE) & psDevInfo->sSystemBuffer; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO * psDCInfo) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ -+ if (!hDevice || !psDCInfo) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ *psDCInfo = psDevInfo->sDisplayInfo; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE hDevice, -+ IMG_HANDLE hBuffer, -+ IMG_SYS_PHYADDR ** ppsSysAddr, -+ IMG_UINT32 * pui32ByteSize, -+ IMG_VOID ** ppvCpuVAddr, -+ IMG_HANDLE * phOSMapInfo, -+ IMG_BOOL * pbIsContiguous) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ OMAPLFB_BUFFER *psSystemBuffer; -+ -+ if (!hDevice) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ if (!hBuffer) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ psSystemBuffer = (OMAPLFB_BUFFER *) hBuffer; -+ -+ if (!ppsSysAddr) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *ppsSysAddr = &psSystemBuffer->sSysAddr; -+ -+ if (!pui32ByteSize) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *pui32ByteSize = psDevInfo->sFBInfo.ui32BufferSize; -+ -+ if (ppvCpuVAddr) { -+ *ppvCpuVAddr = psSystemBuffer->sCPUVAddr; -+ } -+ -+ if (phOSMapInfo) { -+ *phOSMapInfo = (IMG_HANDLE) 0; -+ } -+ -+ if (pbIsContiguous) { -+ *pbIsContiguous = IMG_TRUE; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice, -+ IMG_UINT32 ui32Flags, -+ DISPLAY_SURF_ATTRIBUTES * psDstSurfAttrib, -+ DISPLAY_SURF_ATTRIBUTES * psSrcSurfAttrib, -+ IMG_UINT32 ui32BufferCount, -+ PVRSRV_SYNC_DATA ** ppsSyncData, -+ IMG_UINT32 ui32OEMFlags, -+ IMG_HANDLE * phSwapChain, -+ IMG_UINT32 * pui32SwapChainID) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ OMAPLFB_SWAPCHAIN *psSwapChain; -+ OMAPLFB_BUFFER *psBuffer; -+ OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC; -+ unsigned long ulLockFlags; -+ IMG_UINT32 ui32BuffersToSkip; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32OEMFlags); -+ PVR_UNREFERENCED_PARAMETER(pui32SwapChainID); -+ -+ if (!hDevice -+ || !psDstSurfAttrib -+ || !psSrcSurfAttrib || !ppsSyncData || !phSwapChain) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ if (psDevInfo->sDisplayInfo.ui32MaxSwapChains == 0) { -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ -+ if (psDevInfo->psSwapChain != IMG_NULL) { -+ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS; -+ } -+ -+ if (ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers) { -+ return PVRSRV_ERROR_TOOMANYBUFFERS; -+ } -+ -+ if ((psDevInfo->sFBInfo.ui32RoundedBufferSize * ui32BufferCount) > -+ psDevInfo->sFBInfo.ui32FBSize) { -+ return PVRSRV_ERROR_TOOMANYBUFFERS; -+ } -+ -+ ui32BuffersToSkip = -+ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers - ui32BufferCount; -+ -+ if (psDstSurfAttrib->pixelformat != -+ psDevInfo->sDisplayFormat.pixelformat -+ || psDstSurfAttrib->sDims.ui32ByteStride != -+ psDevInfo->sDisplayDim.ui32ByteStride -+ || psDstSurfAttrib->sDims.ui32Width != -+ psDevInfo->sDisplayDim.ui32Width -+ || psDstSurfAttrib->sDims.ui32Height != -+ psDevInfo->sDisplayDim.ui32Height) { -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat -+ || psDstSurfAttrib->sDims.ui32ByteStride != -+ psSrcSurfAttrib->sDims.ui32ByteStride -+ || psDstSurfAttrib->sDims.ui32Width != -+ psSrcSurfAttrib->sDims.ui32Width -+ || psDstSurfAttrib->sDims.ui32Height != -+ psSrcSurfAttrib->sDims.ui32Height) { -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Flags); -+ -+ psSwapChain = -+ (OMAPLFB_SWAPCHAIN *) -+ OMAPLFBAllocKernelMem(sizeof(OMAPLFB_SWAPCHAIN)); -+ if (!psSwapChain) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psBuffer = -+ (OMAPLFB_BUFFER *) OMAPLFBAllocKernelMem(sizeof(OMAPLFB_BUFFER) * -+ ui32BufferCount); -+ if (!psBuffer) { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorFreeSwapChain; -+ } -+ -+ psVSyncFlips = -+ (OMAPLFB_VSYNC_FLIP_ITEM *) -+ OMAPLFBAllocKernelMem(sizeof(OMAPLFB_VSYNC_FLIP_ITEM) * -+ ui32BufferCount); -+ if (!psVSyncFlips) { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorFreeBuffers; -+ } -+ -+ psSwapChain->ui32BufferCount = ui32BufferCount; -+ psSwapChain->psBuffer = psBuffer; -+ psSwapChain->psVSyncFlips = psVSyncFlips; -+ psSwapChain->ui32InsertIndex = 0; -+ psSwapChain->ui32RemoveIndex = 0; -+ psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable; -+ psSwapChain->psSwapChainLock = &psDevInfo->SwapChainLock; -+ -+ for (i = 0; i < ui32BufferCount - 1; i++) { -+ psBuffer[i].psNext = &psBuffer[i + 1]; -+ } -+ -+ psBuffer[i].psNext = &psBuffer[0]; -+ -+ for (i = 0; i < ui32BufferCount; i++) { -+ IMG_UINT32 ui32SwapBuffer = i + ui32BuffersToSkip; -+ IMG_UINT32 ui32BufferOffset = -+ ui32SwapBuffer * psDevInfo->sFBInfo.ui32RoundedBufferSize; -+ -+ psBuffer[i].psSyncData = ppsSyncData[i]; -+ -+ psBuffer[i].sSysAddr.uiAddr = -+ psDevInfo->sFBInfo.sSysAddr.uiAddr + ui32BufferOffset; -+ psBuffer[i].sCPUVAddr = -+ psDevInfo->sFBInfo.sCPUVAddr + ui32BufferOffset; -+ } -+ -+ for (i = 0; i < ui32BufferCount; i++) { -+ psVSyncFlips[i].bValid = IMG_FALSE; -+ psVSyncFlips[i].bFlipped = IMG_FALSE; -+ psVSyncFlips[i].bCmdCompleted = IMG_FALSE; -+ } -+ -+ OMAPLFBEnableDisplayRegisterAccess(); -+ -+ psSwapChain->pvRegs = -+ ioremap(psDevInfo->psLINFBInfo->fix.mmio_start, -+ psDevInfo->psLINFBInfo->fix.mmio_len); -+ if (psSwapChain->pvRegs == IMG_NULL) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Couldn't map registers needed for flipping\n"); -+ goto ErrorDisableDisplayRegisters; -+ } -+ -+ if (OMAPLFBInstallVSyncISR(psSwapChain) != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": ISR handler failed to register\n"); -+ goto ErrorUnmapRegisters; -+ } -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ psDevInfo->psSwapChain = psSwapChain; -+ -+ psSwapChain->bFlushCommands = psDevInfo->bFlushCommands; -+ -+ if (psSwapChain->bFlushCommands) { -+ psSwapChain->ui32SetFlushStateRefCount = 1; -+ } else { -+ psSwapChain->ui32SetFlushStateRefCount = 0; -+ OMAPLFBEnableVSyncInterrupt(psSwapChain); -+ } -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ eError = EnableLFBEventNotification(psDevInfo); -+ if (eError != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Couldn't enable framebuffer event notification\n"); -+ goto ErrorUninstallVSyncInterrupt; -+ } -+ -+ *phSwapChain = (IMG_HANDLE) psSwapChain; -+ -+ return PVRSRV_OK; -+ -+ErrorUninstallVSyncInterrupt: -+ if (OMAPLFBUninstallVSyncISR(psSwapChain) != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Couldn't uninstall VSync ISR\n"); -+ } -+ErrorUnmapRegisters: -+ iounmap(psSwapChain->pvRegs); -+ErrorDisableDisplayRegisters: -+ OMAPLFBDisableDisplayRegisterAccess(); -+ OMAPLFBFreeKernelMem(psVSyncFlips); -+ErrorFreeBuffers: -+ OMAPLFBFreeKernelMem(psBuffer); -+ErrorFreeSwapChain: -+ OMAPLFBFreeKernelMem(psSwapChain); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ OMAPLFB_SWAPCHAIN *psSwapChain; -+ unsigned long ulLockFlags; -+ PVRSRV_ERROR eError; -+ -+ if (!hDevice || !hSwapChain) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ psSwapChain = (OMAPLFB_SWAPCHAIN *) hSwapChain; -+ if (psSwapChain != psDevInfo->psSwapChain) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = DisableLFBEventNotification(psDevInfo); -+ if (eError != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Couldn't disable framebuffer event notification\n"); -+ } -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ OMAPLFBDisableVSyncInterrupt(psSwapChain); -+ -+ FlushInternalVSyncQueue(psSwapChain); -+ -+ OMAPLFBFlip(psSwapChain, psDevInfo->sFBInfo.sSysAddr.uiAddr); -+ -+ psDevInfo->psSwapChain = IMG_NULL; -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ if (OMAPLFBUninstallVSyncISR(psSwapChain) != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Couldn't uninstall VSync ISR\n"); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ iounmap(psSwapChain->pvRegs); -+ -+ OMAPLFBDisableDisplayRegisterAccess(); -+ -+ OMAPLFBFreeKernelMem(psSwapChain->psVSyncFlips); -+ OMAPLFBFreeKernelMem(psSwapChain->psBuffer); -+ OMAPLFBFreeKernelMem(psSwapChain); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, IMG_RECT * psRect) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDevice); -+ PVR_UNREFERENCED_PARAMETER(hSwapChain); -+ PVR_UNREFERENCED_PARAMETER(psRect); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, IMG_RECT * psRect) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDevice); -+ PVR_UNREFERENCED_PARAMETER(hSwapChain); -+ PVR_UNREFERENCED_PARAMETER(psRect); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, -+ IMG_UINT32 ui32CKColour) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDevice); -+ PVR_UNREFERENCED_PARAMETER(hSwapChain); -+ PVR_UNREFERENCED_PARAMETER(ui32CKColour); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, -+ IMG_UINT32 ui32CKColour) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDevice); -+ PVR_UNREFERENCED_PARAMETER(hSwapChain); -+ PVR_UNREFERENCED_PARAMETER(ui32CKColour); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, -+ IMG_UINT32 * pui32BufferCount, -+ IMG_HANDLE * phBuffer) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ OMAPLFB_SWAPCHAIN *psSwapChain; -+ IMG_UINT32 i; -+ -+ if (!hDevice || !hSwapChain || !pui32BufferCount || !phBuffer) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ psSwapChain = (OMAPLFB_SWAPCHAIN *) hSwapChain; -+ if (psSwapChain != psDevInfo->psSwapChain) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *pui32BufferCount = psSwapChain->ui32BufferCount; -+ -+ for (i = 0; i < psSwapChain->ui32BufferCount; i++) { -+ phBuffer[i] = (IMG_HANDLE) & psSwapChain->psBuffer[i]; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice, -+ IMG_HANDLE hBuffer, -+ IMG_UINT32 ui32SwapInterval, -+ IMG_HANDLE hPrivateTag, -+ IMG_UINT32 ui32ClipRectCount, -+ IMG_RECT * psClipRect) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32SwapInterval); -+ PVR_UNREFERENCED_PARAMETER(hPrivateTag); -+ PVR_UNREFERENCED_PARAMETER(psClipRect); -+ -+ if (!hDevice || !hBuffer || (ui32ClipRectCount != 0)) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(hBuffer); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice, IMG_HANDLE hSwapChain) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ OMAPLFB_SWAPCHAIN *psSwapChain; -+ unsigned long ulLockFlags; -+ -+ if (!hDevice || !hSwapChain) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) hDevice; -+ psSwapChain = (OMAPLFB_SWAPCHAIN *) hSwapChain; -+ if (psSwapChain != psDevInfo->psSwapChain) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ FlushInternalVSyncQueue(psSwapChain); -+ -+ OMAPLFBFlip(psSwapChain, psDevInfo->sFBInfo.sSysAddr.uiAddr); -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN * psSwapChain) -+{ -+ IMG_BOOL bStatus = IMG_FALSE; -+ OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem; -+ IMG_UINT32 ui32MaxIndex; -+ unsigned long ulLockFlags; -+ -+ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex]; -+ ui32MaxIndex = psSwapChain->ui32BufferCount - 1; -+ -+ spin_lock_irqsave(psSwapChain->psSwapChainLock, ulLockFlags); -+ -+ if (psSwapChain->bFlushCommands) { -+ goto ExitUnlock; -+ } -+ -+ while (psFlipItem->bValid) { -+ -+ if (psFlipItem->bFlipped) { -+ -+ if (!psFlipItem->bCmdCompleted) { -+ -+ psSwapChain->psPVRJTable-> -+ pfnPVRSRVCmdComplete(psFlipItem-> -+ hCmdComplete, -+ IMG_TRUE); -+ -+ psFlipItem->bCmdCompleted = IMG_TRUE; -+ } -+ -+ psFlipItem->ui32SwapInterval--; -+ -+ if (psFlipItem->ui32SwapInterval == 0) { -+ -+ psSwapChain->ui32RemoveIndex++; -+ -+ if (psSwapChain->ui32RemoveIndex > ui32MaxIndex) { -+ psSwapChain->ui32RemoveIndex = 0; -+ } -+ -+ psFlipItem->bCmdCompleted = IMG_FALSE; -+ psFlipItem->bFlipped = IMG_FALSE; -+ -+ psFlipItem->bValid = IMG_FALSE; -+ } else { -+ -+ break; -+ } -+ } else { -+ -+ OMAPLFBFlip(psSwapChain, -+ (IMG_UINT32) psFlipItem->sSysAddr); -+ -+ psFlipItem->bFlipped = IMG_TRUE; -+ -+ break; -+ } -+ -+ psFlipItem = -+ &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex]; -+ } -+ -+ExitUnlock: -+ spin_unlock_irqrestore(psSwapChain->psSwapChainLock, ulLockFlags); -+ -+ return bStatus; -+} -+ -+static IMG_BOOL ProcessFlip(IMG_HANDLE hCmdCookie, -+ IMG_UINT32 ui32DataSize, IMG_VOID * pvData) -+{ -+ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; -+ OMAPLFB_DEVINFO *psDevInfo; -+ OMAPLFB_BUFFER *psBuffer; -+ OMAPLFB_SWAPCHAIN *psSwapChain; -+ OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem; -+ unsigned long ulLockFlags; -+ -+ if (!hCmdCookie || !pvData) { -+ return IMG_FALSE; -+ } -+ -+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND *) pvData; -+ -+ if (psFlipCmd == IMG_NULL -+ || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize) { -+ return IMG_FALSE; -+ } -+ -+ psDevInfo = (OMAPLFB_DEVINFO *) psFlipCmd->hExtDevice; -+ -+ psBuffer = (OMAPLFB_BUFFER *) psFlipCmd->hExtBuffer; -+ psSwapChain = (OMAPLFB_SWAPCHAIN *) psFlipCmd->hExtSwapChain; -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ if (psDevInfo->bDeviceSuspended) { -+ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, -+ IMG_TRUE); -+ goto ExitTrueUnlock; -+ } -+ -+ if (psFlipCmd->ui32SwapInterval == 0 || psSwapChain->bFlushCommands) { -+ -+ OMAPLFBFlip(psSwapChain, psBuffer->sSysAddr.uiAddr); -+ -+ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, -+ IMG_TRUE); -+ -+ goto ExitTrueUnlock; -+ } -+ -+ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32InsertIndex]; -+ -+ if (!psFlipItem->bValid) { -+ IMG_UINT32 ui32MaxIndex = psSwapChain->ui32BufferCount - 1; -+ -+ if (psSwapChain->ui32InsertIndex == -+ psSwapChain->ui32RemoveIndex) { -+ -+ OMAPLFBFlip(psSwapChain, psBuffer->sSysAddr.uiAddr); -+ -+ psFlipItem->bFlipped = IMG_TRUE; -+ } else { -+ psFlipItem->bFlipped = IMG_FALSE; -+ } -+ -+ psFlipItem->hCmdComplete = hCmdCookie; -+ psFlipItem->ui32SwapInterval = psFlipCmd->ui32SwapInterval; -+ psFlipItem->sSysAddr = &psBuffer->sSysAddr; -+ psFlipItem->bValid = IMG_TRUE; -+ -+ psSwapChain->ui32InsertIndex++; -+ if (psSwapChain->ui32InsertIndex > ui32MaxIndex) { -+ psSwapChain->ui32InsertIndex = 0; -+ } -+ -+ goto ExitTrueUnlock; -+ } -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+ return IMG_FALSE; -+ -+ExitTrueUnlock: -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+ return IMG_TRUE; -+} -+ -+static void SetDevinfo(OMAPLFB_DEVINFO * psDevInfo) -+{ -+ OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo; -+ struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo; -+ unsigned long FBSize; -+ -+ FBSize = (psLINFBInfo->screen_size) != 0 ? -+ psLINFBInfo->screen_size : psLINFBInfo->fix.smem_len; -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer physical address: 0x%lx\n", -+ psLINFBInfo->fix.smem_start)); -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer virtual address: 0x%lx\n", -+ (unsigned long)psLINFBInfo->screen_base)); -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer size: %lu\n", FBSize)); -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer virtual width: %u\n", -+ psLINFBInfo->var.xres_virtual)); -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer virtual height: %u\n", -+ psLINFBInfo->var.yres_virtual)); -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer width: %u\n", psLINFBInfo->var.xres)); -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer height: %u\n", psLINFBInfo->var.yres)); -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Framebuffer stride: %u\n", -+ psLINFBInfo->fix.line_length)); -+ -+ psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start; -+ psPVRFBInfo->sCPUVAddr = psLINFBInfo->screen_base; -+ -+ psPVRFBInfo->ui32Width = psLINFBInfo->var.xres; -+ psPVRFBInfo->ui32Height = psLINFBInfo->var.yres; -+ psPVRFBInfo->ui32ByteStride = psLINFBInfo->fix.line_length; -+ psPVRFBInfo->ui32FBSize = FBSize; -+ psPVRFBInfo->ui32BufferSize = -+ psPVRFBInfo->ui32Height * psPVRFBInfo->ui32ByteStride; -+ -+ psPVRFBInfo->ui32RoundedBufferSize = -+ OMAPLFB_PAGE_ROUNDUP(psPVRFBInfo->ui32BufferSize); -+ -+ if (psLINFBInfo->var.bits_per_pixel == 16) { -+ if ((psLINFBInfo->var.red.length == 5) && -+ (psLINFBInfo->var.green.length == 6) && -+ (psLINFBInfo->var.blue.length == 5) && -+ (psLINFBInfo->var.red.offset == 11) && -+ (psLINFBInfo->var.green.offset == 5) && -+ (psLINFBInfo->var.blue.offset == 0) && -+ (psLINFBInfo->var.red.msb_right == 0)) { -+ psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB565; -+ } else { -+ printk("Unknown FB format\n"); -+ } -+ } else if (psLINFBInfo->var.bits_per_pixel == 32) { -+ if ((psLINFBInfo->var.red.length == 8) && -+ (psLINFBInfo->var.green.length == 8) && -+ (psLINFBInfo->var.blue.length == 8) && -+ (psLINFBInfo->var.red.offset == 16) && -+ (psLINFBInfo->var.green.offset == 8) && -+ (psLINFBInfo->var.blue.offset == 0) && -+ (psLINFBInfo->var.red.msb_right == 0)) { -+ psPVRFBInfo->ePixelFormat = -+ PVRSRV_PIXEL_FORMAT_ARGB8888; -+ } else { -+ printk("Unknown FB format\n"); -+ } -+ } else { -+ printk("Unknown FB format\n"); -+ } -+ psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat; -+ psDevInfo->sDisplayDim.ui32Width = psDevInfo->sFBInfo.ui32Width; -+ psDevInfo->sDisplayDim.ui32Height = psDevInfo->sFBInfo.ui32Height; -+ psDevInfo->sDisplayDim.ui32ByteStride = -+ psDevInfo->sFBInfo.ui32ByteStride; -+ psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr; -+ psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr; -+ psDevInfo->sSystemBuffer.ui32BufferSize = -+ psDevInfo->sFBInfo.ui32RoundedBufferSize; -+} -+ -+static struct FB_EVENTS { -+ struct notifier_block notif; -+ OMAPLFB_DEVINFO *psDevInfo; -+} gFBEventsData; -+ -+static int FBEvents(struct notifier_block *psNotif, -+ unsigned long event, void *data) -+{ -+ if (event == FB_EVENT_MODE_CHANGE) { -+ struct FB_EVENTS *psEvents = -+ container_of(psNotif, struct FB_EVENTS, notif); -+ SetDevinfo(psEvents->psDevInfo); -+ } -+ return 0; -+} -+ -+static PVRSRV_ERROR InitDev(OMAPLFB_DEVINFO * psDevInfo) -+{ -+ struct fb_info *psLINFBInfo; -+ struct module *psLINFBOwner; -+ OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo; -+ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC; -+ -+ acquire_console_sem(); -+ -+ if (fb_idx < 0 || fb_idx >= num_registered_fb) { -+ eError = PVRSRV_ERROR_INVALID_DEVICE; -+ goto errRelSem; -+ } -+ -+ psLINFBInfo = registered_fb[fb_idx]; -+ -+ psLINFBOwner = psLINFBInfo->fbops->owner; -+ if (!try_module_get(psLINFBOwner)) { -+ printk(KERN_INFO DRIVER_PREFIX -+ ": Couldn't get framebuffer module\n"); -+ -+ goto errRelSem; -+ } -+ -+ if (psLINFBInfo->fbops->fb_open != NULL) { -+ int res; -+ -+ res = psLINFBInfo->fbops->fb_open(psLINFBInfo, 0); -+ if (res != 0) { -+ printk(KERN_INFO DRIVER_PREFIX -+ ": Couldn't open framebuffer: %d\n", res); -+ -+ goto errModPut; -+ } -+ } -+ -+ psDevInfo->psLINFBInfo = psLINFBInfo; -+ -+ SetDevinfo(psDevInfo); -+ -+ gFBEventsData.notif.notifier_call = FBEvents; -+ gFBEventsData.psDevInfo = psDevInfo; -+ fb_register_client(&gFBEventsData.notif); -+ -+ psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr; -+ psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr; -+ -+ eError = PVRSRV_OK; -+ goto errRelSem; -+ -+errModPut: -+ module_put(psLINFBOwner); -+errRelSem: -+ release_console_sem(); -+ return eError; -+} -+ -+static IMG_VOID DeInitDev(OMAPLFB_DEVINFO * psDevInfo) -+{ -+ struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo; -+ struct module *psLINFBOwner; -+ -+ acquire_console_sem(); -+ -+ psLINFBOwner = psLINFBInfo->fbops->owner; -+ -+ if (psLINFBInfo->fbops->fb_release != NULL) { -+ (void)psLINFBInfo->fbops->fb_release(psLINFBInfo, 0); -+ } -+ -+ module_put(psLINFBOwner); -+ -+ release_console_sem(); -+} -+ -+PVRSRV_ERROR OMAPLFBInit(IMG_VOID) -+{ -+ OMAPLFB_DEVINFO *psDevInfo; -+ -+ psDevInfo = GetAnchorPtr(); -+ -+ if (psDevInfo == IMG_NULL) { -+ PFN_CMD_PROC pfnCmdProcList[OMAPLFB_COMMAND_COUNT]; -+ IMG_UINT32 aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2]; -+ -+ psDevInfo = -+ (OMAPLFB_DEVINFO *) -+ OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO)); -+ -+ if (!psDevInfo) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO)); -+ -+ SetAnchorPtr((IMG_VOID *) psDevInfo); -+ -+ psDevInfo->ui32RefCount = 0; -+ -+ if (InitDev(psDevInfo) != PVRSRV_OK) { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ if (OMAPLFBGetLibFuncAddr -+ ("PVRGetDisplayClassJTable", -+ &pfnGetPVRJTable) != PVRSRV_OK) { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ if (!(*pfnGetPVRJTable) (&psDevInfo->sPVRJTable)) { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ spin_lock_init(&psDevInfo->SwapChainLock); -+ -+ psDevInfo->psSwapChain = IMG_NULL; -+ psDevInfo->bFlushCommands = IMG_FALSE; -+ psDevInfo->bDeviceSuspended = IMG_FALSE; -+ -+ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = -+ psDevInfo->sFBInfo.ui32FBSize / -+ psDevInfo->sFBInfo.ui32RoundedBufferSize; -+ if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers == 0) { -+ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 0; -+ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 0; -+ } else { -+ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1; -+ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3; -+ } -+ psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0; -+ -+ strncpy(psDevInfo->sDisplayInfo.szDisplayName, -+ DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE); -+ -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": Maximum number of swap chain buffers: %lu\n", -+ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)); -+ -+ psDevInfo->sDCJTable.ui32TableSize = -+ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE); -+ psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice; -+ psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice; -+ psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats; -+ psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims; -+ psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer; -+ psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo; -+ psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr; -+ psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain; -+ psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain; -+ psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect; -+ psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect; -+ psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey; -+ psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey; -+ psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers; -+ psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer; -+ psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem; -+ psDevInfo->sDCJTable.pfnSetDCState = SetDCState; -+ -+ if (psDevInfo->sPVRJTable. -+ pfnPVRSRVRegisterDCDevice(&psDevInfo->sDCJTable, -+ &psDevInfo->ui32DeviceID) != -+ PVRSRV_OK) { -+ return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; -+ } -+ -+ pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip; -+ -+ aui32SyncCountList[DC_FLIP_COMMAND][0] = 0; -+ aui32SyncCountList[DC_FLIP_COMMAND][1] = 2; -+ -+ if (psDevInfo->sPVRJTable. -+ pfnPVRSRVRegisterCmdProcList(psDevInfo->ui32DeviceID, -+ &pfnCmdProcList[0], -+ aui32SyncCountList, -+ OMAPLFB_COMMAND_COUNT) != -+ PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Can't register callback\n"); -+ return PVRSRV_ERROR_CANT_REGISTER_CALLBACK; -+ } -+ -+ } -+ -+ psDevInfo->ui32RefCount++; -+ -+ return PVRSRV_OK; -+ -+} -+ -+PVRSRV_ERROR OMAPLFBDeinit(IMG_VOID) -+{ -+ OMAPLFB_DEVINFO *psDevInfo, *psDevFirst; -+ -+ psDevFirst = GetAnchorPtr(); -+ psDevInfo = psDevFirst; -+ -+ if (psDevInfo == IMG_NULL) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psDevInfo->ui32RefCount--; -+ -+ if (psDevInfo->ui32RefCount == 0) { -+ -+ PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable = &psDevInfo->sPVRJTable; -+ -+ if (psDevInfo->sPVRJTable. -+ pfnPVRSRVRemoveCmdProcList(psDevInfo->ui32DeviceID, -+ OMAPLFB_COMMAND_COUNT) != -+ PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (psJTable-> -+ pfnPVRSRVRemoveDCDevice(psDevInfo->ui32DeviceID) != -+ PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ DeInitDev(psDevInfo); -+ -+ OMAPLFBFreeKernelMem(psDevInfo); -+ } -+ -+ SetAnchorPtr(IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID OMAPLFBDriverSuspend(IMG_VOID) -+{ -+ OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr(); -+ unsigned long ulLockFlags; -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ if (psDevInfo->bDeviceSuspended) { -+ goto ExitUnlock; -+ } -+ psDevInfo->bDeviceSuspended = IMG_TRUE; -+ -+ SetFlushStateInternalNoLock(psDevInfo, IMG_TRUE); -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ if (psDevInfo->psSwapChain != IMG_NULL) { -+ OMAPLFBDisableDisplayRegisterAccess(); -+ } -+ -+ return; -+ -+ExitUnlock: -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+} -+ -+IMG_VOID OMAPLFBDriverResume(IMG_VOID) -+{ -+ OMAPLFB_DEVINFO *psDevInfo = GetAnchorPtr(); -+ unsigned long ulLockFlags; -+ -+ if (!psDevInfo->bDeviceSuspended) { -+ return; -+ } -+ -+ if (psDevInfo->psSwapChain != IMG_NULL) { -+ OMAPLFBEnableDisplayRegisterAccess(); -+ } -+ -+ spin_lock_irqsave(&psDevInfo->SwapChainLock, ulLockFlags); -+ -+ SetFlushStateInternalNoLock(psDevInfo, IMG_FALSE); -+ -+ psDevInfo->bDeviceSuspended = IMG_FALSE; -+ -+ spin_unlock_irqrestore(&psDevInfo->SwapChainLock, ulLockFlags); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/omaplfb.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/omaplfb.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/omaplfb.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/omaplfb.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,201 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __OMAPLFB_H__ -+#define __OMAPLFB_H__ -+ -+extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE * -+ psJTable); -+ -+#define OMAPLCD_IRQ 25 -+ -+#define OMAPLCD_SYSCONFIG 0x0410 -+#define OMAPLCD_CONFIG 0x0444 -+#define OMAPLCD_DEFAULT_COLOR0 0x044C -+#define OMAPLCD_TIMING_H 0x0464 -+#define OMAPLCD_TIMING_V 0x0468 -+#define OMAPLCD_POL_FREQ 0x046C -+#define OMAPLCD_DIVISOR 0x0470 -+#define OMAPLCD_SIZE_DIG 0x0478 -+#define OMAPLCD_SIZE_LCD 0x047C -+#define OMAPLCD_GFX_POSITION 0x0488 -+#define OMAPLCD_GFX_SIZE 0x048C -+#define OMAPLCD_GFX_ATTRIBUTES 0x04a0 -+#define OMAPLCD_GFX_FIFO_THRESHOLD 0x04a4 -+#define OMAPLCD_GFX_WINDOW_SKIP 0x04b4 -+ -+#define OMAPLCD_IRQSTATUS 0x0418 -+#define OMAPLCD_IRQENABLE 0x041c -+#define OMAPLCD_CONTROL 0x0440 -+#define OMAPLCD_GFX_BA0 0x0480 -+#define OMAPLCD_GFX_BA1 0x0484 -+#define OMAPLCD_GFX_ROW_INC 0x04ac -+#define OMAPLCD_GFX_PIX_INC 0x04b0 -+#define OMAPLCD_VID1_BA0 0x04bc -+#define OMAPLCD_VID1_BA1 0x04c0 -+#define OMAPLCD_VID1_ROW_INC 0x04d8 -+#define OMAPLCD_VID1_PIX_INC 0x04dc -+ -+#define OMAP_CONTROL_GODIGITAL (1 << 6) -+#define OMAP_CONTROL_GOLCD (1 << 5) -+#define OMAP_CONTROL_DIGITALENABLE (1 << 1) -+#define OMAP_CONTROL_LCDENABLE (1 << 0) -+ -+#define OMAPLCD_INTMASK_VSYNC (1 << 1) -+#define OMAPLCD_INTMASK_OFF 0 -+ -+typedef struct OMAPLFB_BUFFER_TAG { -+ IMG_SYS_PHYADDR sSysAddr; -+ IMG_CPU_VIRTADDR sCPUVAddr; -+ IMG_UINT32 ui32BufferSize; -+ PVRSRV_SYNC_DATA *psSyncData; -+ struct OMAPLFB_BUFFER_TAG *psNext; -+} OMAPLFB_BUFFER; -+ -+typedef struct OMAPLFB_VSYNC_FLIP_ITEM_TAG { -+ -+ IMG_HANDLE hCmdComplete; -+ -+ IMG_SYS_PHYADDR *sSysAddr; -+ -+ IMG_UINT32 ui32SwapInterval; -+ -+ IMG_BOOL bValid; -+ -+ IMG_BOOL bFlipped; -+ -+ IMG_BOOL bCmdCompleted; -+ -+} OMAPLFB_VSYNC_FLIP_ITEM; -+ -+typedef struct PVRPDP_SWAPCHAIN_TAG { -+ -+ IMG_UINT32 ui32BufferCount; -+ -+ OMAPLFB_BUFFER *psBuffer; -+ -+ OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips; -+ -+ IMG_UINT32 ui32InsertIndex; -+ -+ IMG_UINT32 ui32RemoveIndex; -+ -+ IMG_VOID *pvRegs; -+ -+ PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable; -+ -+ IMG_BOOL bFlushCommands; -+ -+ IMG_UINT32 ui32SetFlushStateRefCount; -+ -+ IMG_BOOL bBlanked; -+ -+ spinlock_t *psSwapChainLock; -+} OMAPLFB_SWAPCHAIN; -+ -+typedef struct OMAPLFB_FBINFO_TAG { -+ IMG_SYS_PHYADDR sSysAddr; -+ IMG_CPU_VIRTADDR sCPUVAddr; -+ IMG_UINT32 ui32FBSize; -+ IMG_UINT32 ui32BufferSize; -+ IMG_UINT32 ui32RoundedBufferSize; -+ IMG_UINT32 ui32Width; -+ IMG_UINT32 ui32Height; -+ IMG_UINT32 ui32ByteStride; -+ -+ PVRSRV_PIXEL_FORMAT ePixelFormat; -+} OMAPLFB_FBINFO; -+ -+typedef struct OMAPLFB_DEVINFO_TAG { -+ IMG_UINT32 ui32DeviceID; -+ DISPLAY_INFO sDisplayInfo; -+ -+ OMAPLFB_BUFFER sSystemBuffer; -+ -+ DISPLAY_FORMAT sDisplayFormat; -+ -+ DISPLAY_DIMS sDisplayDim; -+ -+ PVRSRV_DC_DISP2SRV_KMJTABLE sPVRJTable; -+ -+ PVRSRV_DC_SRV2DISP_KMJTABLE sDCJTable; -+ -+ OMAPLFB_FBINFO sFBInfo; -+ -+ IMG_UINT32 ui32RefCount; -+ -+ OMAPLFB_SWAPCHAIN *psSwapChain; -+ -+ IMG_BOOL bFlushCommands; -+ -+ IMG_DEV_VIRTADDR sDisplayDevVAddr; -+ -+ struct fb_info *psLINFBInfo; -+ -+ struct notifier_block sLINNotifBlock; -+ -+ IMG_BOOL bDeviceSuspended; -+ -+ spinlock_t SwapChainLock; -+} OMAPLFB_DEVINFO; -+ -+#define OMAPLFB_PAGE_SIZE 4096 -+#define OMAPLFB_PAGE_MASK (OMAPLFB_PAGE_SIZE - 1) -+#define OMAPLFB_PAGE_TRUNC (~OMAPLFB_PAGE_MASK) -+ -+#define OMAPLFB_PAGE_ROUNDUP(x) (((x) + OMAPLFB_PAGE_MASK) & OMAPLFB_PAGE_TRUNC) -+ -+#ifdef DEBUG -+#define DEBUG_PRINTK(x) printk x -+#else -+#define DEBUG_PRINTK(x) -+#endif -+ -+#define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver" -+#define DRVNAME "omaplfb" -+#define DEVNAME DRVNAME -+#define DRIVER_PREFIX DRVNAME -+ -+PVRSRV_ERROR OMAPLFBInit(IMG_VOID); -+PVRSRV_ERROR OMAPLFBDeinit(IMG_VOID); -+ -+IMG_VOID OMAPLFBDriverSuspend(IMG_VOID); -+IMG_VOID OMAPLFBDriverResume(IMG_VOID); -+ -+IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size); -+IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID * pvMem); -+PVRSRV_ERROR OMAPLFBGetLibFuncAddr(IMG_CHAR * szFunctionName, -+ PFN_DC_GET_PVRJTABLE * ppfnFuncTable); -+PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN * psSwapChain); -+PVRSRV_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN * psSwapChain); -+IMG_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN * psSwapChain); -+IMG_VOID OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN * psSwapChain); -+IMG_VOID OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN * psSwapChain); -+IMG_VOID OMAPLFBEnableDisplayRegisterAccess(IMG_VOID); -+IMG_VOID OMAPLFBDisableDisplayRegisterAccess(IMG_VOID); -+IMG_VOID OMAPLFBFlip(OMAPLFB_SWAPCHAIN * psSwapChain, IMG_UINT32 aPhyAddr); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/omaplfb_linux.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/omaplfb_linux.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/omaplfb_linux.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/omaplfb_linux.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,251 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+ -+/*#include */ -+ -+#include "img_defs.h" -+#include "servicesext.h" -+#include "kerneldisplay.h" -+#include "omaplfb.h" -+#include "pvrmodule.h" -+ -+#include -+ -+MODULE_SUPPORTED_DEVICE(DEVNAME); -+ -+extern int omap_dispc_request_irq(unsigned long, void (*)(void *), void *); -+extern void omap_dispc_free_irq(unsigned long, void (*)(void *), void *); -+ -+#define unref__ __attribute__ ((unused)) -+ -+IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size) -+{ -+ return kmalloc(ui32Size, GFP_KERNEL); -+} -+ -+IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID * pvMem) -+{ -+ kfree(pvMem); -+} -+ -+PVRSRV_ERROR OMAPLFBGetLibFuncAddr(IMG_CHAR * szFunctionName, -+ PFN_DC_GET_PVRJTABLE * ppfnFuncTable) -+{ -+ if (strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ *ppfnFuncTable = PVRGetDisplayClassJTable; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID OMAPLFBEnableVSyncInterrupt(OMAPLFB_SWAPCHAIN * psSwapChain) -+{ -+} -+ -+IMG_VOID OMAPLFBDisableVSyncInterrupt(OMAPLFB_SWAPCHAIN * psSwapChain) -+{ -+} -+ -+static void OMAPLFBVSyncISR(void *arg, u32 mask) -+{ -+ (void)OMAPLFBVSyncIHandler((OMAPLFB_SWAPCHAIN *) arg); -+} -+ -+ -+PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN * psSwapChain) -+{ -+ if (omap_dispc_register_isr -+ (OMAPLFBVSyncISR, psSwapChain, DISPC_IRQ_VSYNC) != 0) -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN * psSwapChain) -+{ -+ omap_dispc_unregister_isr(OMAPLFBVSyncISR, psSwapChain, -+ DISPC_IRQ_VSYNC); -+ return PVRSRV_OK; -+} -+ -+IMG_VOID OMAPLFBEnableDisplayRegisterAccess(IMG_VOID) -+{ -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Attempting to call OMAPLFBEnableDisplayRegisterAccess\n"); -+ /*omap2_disp_get_dss(); */ -+} -+ -+IMG_VOID OMAPLFBDisableDisplayRegisterAccess(IMG_VOID) -+{ -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": Attempting to call OMAPLFBDisableDisplayRegisterAccess\n"); -+ /*omap2_disp_put_dss(); */ -+} -+ -+IMG_VOID OMAPLFBFlip(OMAPLFB_SWAPCHAIN * psSwapChain, IMG_UINT32 aPhyAddr) -+{ -+ omap_dispc_set_plane_ba0(OMAP_DSS_CHANNEL_LCD, OMAP_DSS_GFX, aPhyAddr); -+} -+ -+ -+static IMG_BOOL bDeviceSuspended; -+ -+static void OMAPLFBCommonSuspend(void) -+{ -+ if (bDeviceSuspended) { -+ return; -+ } -+ -+ OMAPLFBDriverSuspend(); -+ -+ bDeviceSuspended = IMG_TRUE; -+} -+ -+static int OMAPLFBDriverSuspend_Entry(struct platform_device unref__ * pDevice, -+ pm_message_t unref__ state) -+{ -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": OMAPLFBDriverSuspend_Entry\n")); -+ -+ OMAPLFBCommonSuspend(); -+ -+ return 0; -+} -+ -+static int OMAPLFBDriverResume_Entry(struct platform_device unref__ * pDevice) -+{ -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": OMAPLFBDriverResume_Entry\n")); -+ -+ OMAPLFBDriverResume(); -+ -+ bDeviceSuspended = IMG_FALSE; -+ -+ return 0; -+} -+ -+static void OMAPLFBDriverShutdown_Entry(struct platform_device unref__ * -+ pDevice) -+{ -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": OMAPLFBDriverShutdown_Entry\n")); -+ -+ OMAPLFBCommonSuspend(); -+} -+ -+static void OMAPLFBDeviceRelease_Entry(struct device unref__ * pDevice) -+{ -+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX -+ ": OMAPLFBDriverRelease_Entry\n")); -+ -+ OMAPLFBCommonSuspend(); -+} -+ -+static struct platform_driver omaplfb_driver = { -+ .driver = { -+ .name = DRVNAME, -+ }, -+ .suspend = OMAPLFBDriverSuspend_Entry, -+ .resume = OMAPLFBDriverResume_Entry, -+ .shutdown = OMAPLFBDriverShutdown_Entry, -+}; -+ -+static struct platform_device omaplfb_device = { -+ .name = DEVNAME, -+ .id = -1, -+ .dev = { -+ .release = OMAPLFBDeviceRelease_Entry} -+}; -+ -+static int __init OMAPLFB_Init(void) -+{ -+ int error; -+ -+ if (OMAPLFBInit() != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": OMAPLFB_Init: OMAPLFBInit failed\n"); -+ return -ENODEV; -+ } -+ if ((error = platform_driver_register(&omaplfb_driver)) != 0) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": OMAPLFB_Init: Unable to register platform driver (%d)\n", -+ error); -+ -+ goto ExitDeinit; -+ } -+ -+ if ((error = platform_device_register(&omaplfb_device)) != 0) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": OMAPLFB_Init: Unable to register platform device (%d)\n", -+ error); -+ -+ goto ExitDriverUnregister; -+ } -+ -+ return 0; -+ -+ExitDriverUnregister: -+ platform_driver_unregister(&omaplfb_driver); -+ -+ExitDeinit: -+ if (OMAPLFBDeinit() != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": OMAPLFB_Init: OMAPLFBDeinit failed\n"); -+ } -+ -+ return -ENODEV; -+} -+ -+static void __exit OMAPLFB_Cleanup(void) -+{ -+ platform_device_unregister(&omaplfb_device); -+ platform_driver_unregister(&omaplfb_driver); -+ -+ if (OMAPLFBDeinit() != PVRSRV_OK) { -+ printk(KERN_WARNING DRIVER_PREFIX -+ ": OMAPLFB_Cleanup: OMAPLFBDeinit failed\n"); -+ } -+} -+ -+module_init(OMAPLFB_Init); -+module_exit(OMAPLFB_Cleanup); -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/osfunc.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/osfunc.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/osfunc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/osfunc.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,1752 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "services_headers.h" -+#include "mm.h" -+#include "pvrmmap.h" -+#include "mmap.h" -+#include "env_data.h" -+#include "proc.h" -+#include "mutex.h" -+#include "event.h" -+ -+#define EVENT_OBJECT_TIMEOUT_MS (100) -+ -+extern PVRSRV_LINUX_MUTEX gPVRSRVLock; -+ -+#define HOST_ALLOC_MEM_USING_KMALLOC ((IMG_HANDLE)0) -+#define HOST_ALLOC_MEM_USING_VMALLOC ((IMG_HANDLE)1) -+ -+#define LINUX_KMALLOC_LIMIT PAGE_SIZE /* 4k */ -+ -+#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+PVRSRV_ERROR OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID * ppvCpuVAddr, IMG_HANDLE * phBlockAlloc) -+#else -+PVRSRV_ERROR _OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID * ppvCpuVAddr, IMG_HANDLE * phBlockAlloc, -+ IMG_CHAR * pszFilename, IMG_UINT32 ui32Line) -+#endif -+{ -+ IMG_UINT32 ui32Threshold; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Flags); -+ -+ /* determine whether to go straight to vmalloc */ -+ ui32Threshold = LINUX_KMALLOC_LIMIT; -+ -+ if (ui32Size > ui32Threshold) { -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ *ppvCpuVAddr = _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED, -+ pszFilename, ui32Line); -+#else -+ *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED); -+#endif -+ if (!*ppvCpuVAddr) -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ -+ if (phBlockAlloc) -+ *phBlockAlloc = HOST_ALLOC_MEM_USING_VMALLOC; -+ } else { -+ /* default - try kmalloc first */ -+ -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ *ppvCpuVAddr = _KMallocWrapper(ui32Size, pszFilename, ui32Line); -+#else -+ *ppvCpuVAddr = KMallocWrapper(ui32Size); -+#endif -+ -+ if (!*ppvCpuVAddr) -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ -+ if (phBlockAlloc) -+ *phBlockAlloc = HOST_ALLOC_MEM_USING_KMALLOC; -+ -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+PVRSRV_ERROR OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc) -+#else -+PVRSRV_ERROR _OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc, -+ IMG_CHAR * pszFilename, IMG_UINT32 ui32Line) -+#endif -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32Flags); -+ -+ if (ui32Size > LINUX_KMALLOC_LIMIT) { -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line); -+#else -+ VFreeWrapper(pvCpuVAddr); -+#endif -+ } else { -+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) -+ _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line); -+#else -+ KFreeWrapper(pvCpuVAddr); -+#endif -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+OSAllocPages(IMG_UINT32 ui32AllocFlags, -+ IMG_UINT32 ui32Size, -+ IMG_VOID ** ppvCpuVAddr, IMG_HANDLE * phOSMemHandle) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ -+ -+ switch (ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) { -+ case PVRSRV_HAP_KERNEL_ONLY: -+ { -+ psLinuxMemArea = -+ NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags); -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ break; -+ } -+ case PVRSRV_HAP_SINGLE_PROCESS: -+ { -+ -+ psLinuxMemArea = -+ NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags); -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ PVRMMapRegisterArea("Import Arena", psLinuxMemArea, -+ ui32AllocFlags); -+ break; -+ } -+ -+ case PVRSRV_HAP_MULTI_PROCESS: -+ { -+ psLinuxMemArea = -+ NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags); -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ PVRMMapRegisterArea("Import Arena", psLinuxMemArea, -+ ui32AllocFlags); -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", -+ ui32AllocFlags)); -+ *ppvCpuVAddr = NULL; -+ *phOSMemHandle = (IMG_HANDLE) 0; -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); -+ *phOSMemHandle = psLinuxMemArea; -+ -+ LinuxMemAreaRegister(psLinuxMemArea); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, -+ IMG_VOID * pvCpuVAddr, IMG_HANDLE hOSMemHandle) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); -+ -+ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle; -+ -+ switch (ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) { -+ case PVRSRV_HAP_KERNEL_ONLY: -+ break; -+ case PVRSRV_HAP_SINGLE_PROCESS: -+ case PVRSRV_HAP_MULTI_PROCESS: -+ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, " -+ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!", -+ ui32AllocFlags, ui32Bytes, pvCpuVAddr, -+ hOSMemHandle)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid flags 0x%x\n", -+ __FUNCTION__, ui32AllocFlags)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ LinuxMemAreaDeepFree(psLinuxMemArea); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, -+ IMG_UINT32 ui32ByteOffset, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, IMG_HANDLE * phOSMemHandleRet) -+{ -+ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ psParentLinuxMemArea = (LinuxMemArea *) hOSMemHandle; -+ -+ psLinuxMemArea = -+ NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes); -+ if (!psLinuxMemArea) { -+ *phOSMemHandleRet = NULL; -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ *phOSMemHandleRet = psLinuxMemArea; -+ -+ if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) { -+ return PVRSRV_OK; -+ } -+ -+ if (psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO) { -+ eError = PVRMMapRegisterArea("Physical", psLinuxMemArea, 0); -+ if (eError != PVRSRV_OK) { -+ goto failed_register_area; -+ } -+ } else if (psParentLinuxMemArea->eAreaType == -+ LINUX_MEM_AREA_ALLOC_PAGES) { -+ eError = PVRMMapRegisterArea("Import Arena", psLinuxMemArea, 0); -+ if (eError != PVRSRV_OK) { -+ goto failed_register_area; -+ } -+ } -+ -+ return PVRSRV_OK; -+ -+failed_register_area: -+ *phOSMemHandleRet = NULL; -+ LinuxMemAreaDeepFree(psLinuxMemArea); -+ return eError; -+} -+ -+PVRSRV_ERROR -+OSReleaseSubMemHandle(IMG_VOID * hOSMemHandle, IMG_UINT32 ui32Flags) -+{ -+ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea; -+ PVRSRV_ERROR eError; -+ -+ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle; -+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC); -+ -+ psParentLinuxMemArea = -+ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea; -+ -+ if (!(ui32Flags & PVRSRV_HAP_KERNEL_ONLY) -+ && (psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO -+ || psParentLinuxMemArea->eAreaType == -+ LINUX_MEM_AREA_ALLOC_PAGES) -+ ) { -+ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ } -+ LinuxMemAreaDeepFree(psLinuxMemArea); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_CPU_PHYADDR -+OSMemHandleToCpuPAddr(IMG_VOID * hOSMemHandle, IMG_UINT32 ui32ByteOffset) -+{ -+ PVR_ASSERT(hOSMemHandle); -+ -+ return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset); -+} -+ -+IMG_VOID OSMemCopy(IMG_VOID * pvDst, IMG_VOID * pvSrc, IMG_UINT32 ui32Size) -+{ -+ memcpy(pvDst, pvSrc, ui32Size); -+} -+ -+IMG_VOID OSMemSet(IMG_VOID * pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size) -+{ -+ memset(pvDest, (int)ui8Value, (size_t) ui32Size); -+} -+ -+IMG_CHAR *OSStringCopy(IMG_CHAR * pszDest, const IMG_CHAR * pszSrc) -+{ -+ return (strcpy(pszDest, pszSrc)); -+} -+ -+IMG_INT32 OSSNPrintf(IMG_CHAR * pStr, IMG_UINT32 ui32Size, -+ const IMG_CHAR * pszFormat, ...) -+{ -+ va_list argList; -+ IMG_INT32 iCount; -+ -+ va_start(argList, pszFormat); -+ iCount = vsnprintf(pStr, (size_t) ui32Size, pszFormat, argList); -+ va_end(argList); -+ -+ return iCount; -+} -+ -+IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE * psResource, IMG_UINT32 ui32ID) -+{ -+ volatile IMG_UINT32 *pui32Access = -+ (volatile IMG_UINT32 *)&psResource->ui32Lock; -+ -+ if (*pui32Access) { -+ if (psResource->ui32ID == ui32ID) { -+ psResource->ui32ID = 0; -+ *pui32Access = 0; -+ } else { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "OSBreakResourceLock: Resource is not locked for this process.")); -+ } -+ } else { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "OSBreakResourceLock: Resource is not locked")); -+ } -+} -+ -+PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE * psResource) -+{ -+ psResource->ui32ID = 0; -+ psResource->ui32Lock = 0; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE * psResource) -+{ -+ OSBreakResourceLock(psResource, psResource->ui32ID); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSInitEnvData(IMG_PVOID * ppvEnvSpecificData) -+{ -+ ENV_DATA *psEnvData; -+ -+ if (OSAllocMem -+ (PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), -+ (IMG_VOID *) & psEnvData, IMG_NULL) != PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ memset(psEnvData, 0, sizeof(*psEnvData)); -+ -+ if (OSAllocMem -+ (PVRSRV_OS_PAGEABLE_HEAP, -+ PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, -+ &psEnvData->pvBridgeData, IMG_NULL) != PVRSRV_OK) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, -+ IMG_NULL); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psEnvData->bMISRInstalled = IMG_FALSE; -+ psEnvData->bLISRInstalled = IMG_FALSE; -+ -+ *ppvEnvSpecificData = psEnvData; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData) -+{ -+ ENV_DATA *psEnvData = (ENV_DATA *) pvEnvSpecificData; -+ -+ PVR_ASSERT(!psEnvData->bMISRInstalled); -+ PVR_ASSERT(!psEnvData->bLISRInstalled); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, -+ psEnvData->pvBridgeData, IMG_NULL); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, -+ IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID OSReleaseThreadQuanta(IMG_VOID) -+{ -+ schedule(); -+} -+ -+IMG_UINT32 OSClockus(IMG_VOID) -+{ -+ unsigned long time, j = jiffies; -+ -+ time = j * (1000000 / HZ); -+ -+ return time; -+} -+ -+IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus) -+{ -+ udelay(ui32Timeus); -+} -+ -+IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID) -+{ -+ if (in_interrupt()) { -+ return KERNEL_ID; -+ } -+ return task_tgid_nr(current); -+} -+ -+IMG_UINT32 OSGetPageSize(IMG_VOID) -+{ -+ return PAGE_SIZE; -+} -+ -+static irqreturn_t DeviceISRWrapper(int irq, void *dev_id -+ ) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_BOOL bStatus = IMG_FALSE; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) dev_id; -+ if (!psDeviceNode) { -+ PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n")); -+ goto out; -+ } -+ -+ bStatus = PVRSRVDeviceLISR(psDeviceNode); -+ -+ if (bStatus) { -+ SYS_DATA *psSysData = psDeviceNode->psSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ queue_work(psEnvData->psMISRWorkqueue, &psEnvData->sMISRWork); -+ } -+ -+out: -+ return bStatus ? IRQ_HANDLED : IRQ_NONE; -+} -+ -+static irqreturn_t SystemISRWrapper(int irq, void *dev_id -+ ) -+{ -+ SYS_DATA *psSysData; -+ IMG_BOOL bStatus = IMG_FALSE; -+ -+ psSysData = (SYS_DATA *) dev_id; -+ if (!psSysData) { -+ PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n")); -+ goto out; -+ } -+ -+ bStatus = PVRSRVSystemLISR(psSysData); -+ -+ if (bStatus) { -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ queue_work(psEnvData->psMISRWorkqueue, &psEnvData->sMISRWork); -+ } -+ -+out: -+ return bStatus ? IRQ_HANDLED : IRQ_NONE; -+} -+ -+PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID * pvSysData, -+ IMG_UINT32 ui32Irq, -+ IMG_CHAR * pszISRName, IMG_VOID * pvDeviceNode) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (psEnvData->bLISRInstalled) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x", -+ psEnvData->ui32IRQ, psEnvData->pvISRCookie)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x", -+ pszISRName, ui32Irq, pvDeviceNode)); -+ -+ if (request_irq(ui32Irq, DeviceISRWrapper, -+ IRQF_SHARED -+ , pszISRName, pvDeviceNode)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", -+ ui32Irq)); -+ -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psEnvData->ui32IRQ = ui32Irq; -+ psEnvData->pvISRCookie = pvDeviceNode; -+ psEnvData->bLISRInstalled = IMG_TRUE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (!psEnvData->bLISRInstalled) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUninstallDeviceLISR: No LISR has been installed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x", -+ psEnvData->ui32IRQ, psEnvData->pvISRCookie)); -+ -+ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie); -+ -+ psEnvData->bLISRInstalled = IMG_FALSE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID * pvSysData, IMG_UINT32 ui32Irq) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (psEnvData->bLISRInstalled) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x", -+ psEnvData->ui32IRQ, psEnvData->pvISRCookie)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq, -+ pvSysData)); -+ -+ if (request_irq(ui32Irq, SystemISRWrapper, -+ IRQF_SHARED -+ , "PowerVR", pvSysData)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", -+ ui32Irq)); -+ -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psEnvData->ui32IRQ = ui32Irq; -+ psEnvData->pvISRCookie = pvSysData; -+ psEnvData->bLISRInstalled = IMG_TRUE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (!psEnvData->bLISRInstalled) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUninstallSystemLISR: No LISR has been installed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x", -+ psEnvData->ui32IRQ, psEnvData->pvISRCookie)); -+ -+ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie); -+ -+ psEnvData->bLISRInstalled = IMG_FALSE; -+ -+ return PVRSRV_OK; -+} -+ -+static void MISRWrapper(struct work_struct *work) -+{ -+ ENV_DATA *psEnvData = container_of(work, ENV_DATA, sMISRWork); -+ SYS_DATA *psSysData = (SYS_DATA *) psEnvData->pvSysData; -+ PVRSRVMISR(psSysData); -+} -+ -+PVRSRV_ERROR OSInstallMISR(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (psEnvData->bMISRInstalled) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSInstallMISR: An MISR has already been installed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Installing MISR with cookie %x", pvSysData)); -+ -+ psEnvData->pvSysData = pvSysData; -+ psEnvData->psMISRWorkqueue = create_singlethread_workqueue("sgx_misr"); -+ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper); -+ -+ psEnvData->bMISRInstalled = IMG_TRUE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSUninstallMISR(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (!psEnvData->bMISRInstalled) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUninstallMISR: No MISR has been installed")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Uninstalling MISR")); -+ -+ flush_workqueue(psEnvData->psMISRWorkqueue); -+ destroy_workqueue(psEnvData->psMISRWorkqueue); -+ -+ psEnvData->bMISRInstalled = IMG_FALSE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSScheduleMISR(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (psEnvData->bMISRInstalled) { -+ queue_work(psEnvData->psMISRWorkqueue, &psEnvData->sMISRWork); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+#define OS_TAS(p) xchg((p), 1) -+PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE * psResource, IMG_UINT32 ui32ID) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (!OS_TAS(&psResource->ui32Lock)) -+ psResource->ui32ID = ui32ID; -+ else -+ eError = PVRSRV_ERROR_GENERIC; -+ -+ return eError; -+} -+ -+PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE * psResource, IMG_UINT32 ui32ID) -+{ -+ volatile IMG_UINT32 *pui32Access = -+ (volatile IMG_UINT32 *)&psResource->ui32Lock; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (*pui32Access) { -+ if (psResource->ui32ID == ui32ID) { -+ psResource->ui32ID = 0; -+ *pui32Access = 0; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUnlockResource: Resource %p is not locked with expected value.", -+ psResource)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Should be %x is actually %x", -+ ui32ID, psResource->ui32ID)); -+ eError = PVRSRV_ERROR_GENERIC; -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUnlockResource: Resource %p is not locked", -+ psResource)); -+ eError = PVRSRV_ERROR_GENERIC; -+ } -+ -+ return eError; -+} -+ -+IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE * psResource, IMG_UINT32 ui32ID) -+{ -+ volatile IMG_UINT32 *pui32Access = -+ (volatile IMG_UINT32 *)&psResource->ui32Lock; -+ -+ return (*(volatile IMG_UINT32 *)pui32Access == 1) -+ && (psResource->ui32ID == ui32ID) -+ ? IMG_TRUE : IMG_FALSE; -+} -+ -+IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID * pvLinAddr) -+{ -+ IMG_CPU_PHYADDR CpuPAddr; -+ -+ CpuPAddr.uiAddr = (IMG_UINTPTR_T) VMallocToPhys(pvLinAddr); -+ -+ return CpuPAddr; -+} -+ -+IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, -+ IMG_HANDLE * phOSMemHandle) -+{ -+ if (phOSMemHandle) { -+ *phOSMemHandle = (IMG_HANDLE) 0; -+ } -+ -+ if (ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) { -+ IMG_VOID *pvIORemapCookie; -+ pvIORemapCookie = -+ IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags); -+ if (pvIORemapCookie == IMG_NULL) { -+ return NULL; -+ } -+ return pvIORemapCookie; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY " -+ " (Use OSReservePhys otherwise)")); -+ *phOSMemHandle = (IMG_HANDLE) 0; -+ return NULL; -+ } -+ -+ PVR_ASSERT(0); -+ return NULL; -+} -+ -+IMG_BOOL -+OSUnMapPhysToLin(IMG_VOID * pvLinAddr, IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, IMG_HANDLE hPageAlloc) -+{ -+ PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__, -+ ui32Bytes, pvLinAddr)); -+ -+ PVR_UNREFERENCED_PARAMETER(hPageAlloc); -+ -+ if (ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) { -+ IOUnmapWrapper(pvLinAddr); -+ return IMG_TRUE; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY " -+ " (Use OSUnReservePhys otherwise)")); -+ return IMG_FALSE; -+ } -+ -+ PVR_ASSERT(0); -+ return IMG_FALSE; -+} -+ -+static PVRSRV_ERROR -+RegisterExternalMem(IMG_SYS_PHYADDR * pBasePAddr, -+ IMG_VOID * pvCPUVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_BOOL bPhysContig, -+ IMG_UINT32 ui32MappingFlags, IMG_HANDLE * phOSMemHandle) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ -+ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) { -+ case PVRSRV_HAP_KERNEL_ONLY: -+ { -+ psLinuxMemArea = -+ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, -+ ui32Bytes, bPhysContig, -+ ui32MappingFlags); -+ -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ break; -+ } -+ case PVRSRV_HAP_SINGLE_PROCESS: -+ { -+ psLinuxMemArea = -+ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, -+ ui32Bytes, bPhysContig, -+ ui32MappingFlags); -+ -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ PVRMMapRegisterArea("Physical", psLinuxMemArea, -+ ui32MappingFlags); -+ break; -+ } -+ case PVRSRV_HAP_MULTI_PROCESS: -+ { -+ psLinuxMemArea = -+ NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, -+ ui32Bytes, bPhysContig, -+ ui32MappingFlags); -+ -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ PVRMMapRegisterArea("Physical", psLinuxMemArea, -+ ui32MappingFlags); -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "OSRegisterMem : invalid flags 0x%x\n", -+ ui32MappingFlags)); -+ *phOSMemHandle = (IMG_HANDLE) 0; -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ *phOSMemHandle = (IMG_HANDLE) psLinuxMemArea; -+ -+ LinuxMemAreaRegister(psLinuxMemArea); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, -+ IMG_VOID * pvCPUVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, IMG_HANDLE * phOSMemHandle) -+{ -+ IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr); -+ -+ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE, -+ ui32MappingFlags, phOSMemHandle); -+} -+ -+PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR * pBasePAddr, -+ IMG_VOID * pvCPUVAddr, IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, -+ IMG_HANDLE * phOSMemHandle) -+{ -+ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, IMG_FALSE, -+ ui32MappingFlags, phOSMemHandle); -+} -+ -+PVRSRV_ERROR -+OSUnRegisterMem(IMG_VOID * pvCpuVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, IMG_HANDLE hOSMemHandle) -+{ -+ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *) hOSMemHandle; -+ -+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); -+ -+ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) { -+ case PVRSRV_HAP_KERNEL_ONLY: -+ break; -+ case PVRSRV_HAP_SINGLE_PROCESS: -+ case PVRSRV_HAP_MULTI_PROCESS: -+ { -+ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) != -+ PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s(%p, %d, 0x%08X, %p) FAILED!", -+ __FUNCTION__, pvCpuVAddr, ui32Bytes, -+ ui32MappingFlags, hOSMemHandle)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUnRegisterMem : invalid flags 0x%x", -+ ui32MappingFlags)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ -+ LinuxMemAreaDeepFree(psLinuxMemArea); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID * pvCpuVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE hOSMemHandle) -+{ -+ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle); -+} -+ -+PVRSRV_ERROR -+OSReservePhys(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, -+ IMG_VOID ** ppvCpuVAddr, IMG_HANDLE * phOSMemHandle) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ -+ -+ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) { -+ case PVRSRV_HAP_KERNEL_ONLY: -+ { -+ -+ psLinuxMemArea = -+ NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, -+ ui32MappingFlags); -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ break; -+ } -+ case PVRSRV_HAP_SINGLE_PROCESS: -+ { -+ -+ psLinuxMemArea = -+ NewIOLinuxMemArea(BasePAddr, ui32Bytes, -+ ui32MappingFlags); -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ PVRMMapRegisterArea("Physical", psLinuxMemArea, -+ ui32MappingFlags); -+ break; -+ } -+ case PVRSRV_HAP_MULTI_PROCESS: -+ { -+ psLinuxMemArea = -+ NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, -+ ui32MappingFlags); -+ if (!psLinuxMemArea) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ PVRMMapRegisterArea("Physical", psLinuxMemArea, -+ ui32MappingFlags); -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "OSMapPhysToLin : invalid flags 0x%x\n", -+ ui32MappingFlags)); -+ *ppvCpuVAddr = NULL; -+ *phOSMemHandle = (IMG_HANDLE) 0; -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ *phOSMemHandle = (IMG_HANDLE) psLinuxMemArea; -+ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); -+ -+ LinuxMemAreaRegister(psLinuxMemArea); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+OSUnReservePhys(IMG_VOID * pvCpuVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32MappingFlags, IMG_HANDLE hOSMemHandle) -+{ -+ LinuxMemArea *psLinuxMemArea; -+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); -+ -+ psLinuxMemArea = (LinuxMemArea *) hOSMemHandle; -+ -+ switch (ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) { -+ case PVRSRV_HAP_KERNEL_ONLY: -+ break; -+ case PVRSRV_HAP_SINGLE_PROCESS: -+ case PVRSRV_HAP_MULTI_PROCESS: -+ { -+ if (PVRMMapRemoveRegisteredArea(psLinuxMemArea) != -+ PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s(%p, %d, 0x%08X, %p) FAILED!", -+ __FUNCTION__, pvCpuVAddr, ui32Bytes, -+ ui32MappingFlags, hOSMemHandle)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSUnMapPhysToLin : invalid flags 0x%x", -+ ui32MappingFlags)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ -+ LinuxMemAreaDeepFree(psLinuxMemArea); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, -+ IMG_CPU_VIRTADDR * pvLinAddr, -+ IMG_CPU_PHYADDR * psPhysAddr) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32Size); -+ PVR_UNREFERENCED_PARAMETER(pvLinAddr); -+ PVR_UNREFERENCED_PARAMETER(psPhysAddr); -+ PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__)); -+ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+} -+ -+PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_CPU_PHYADDR psPhysAddr) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32Size); -+ PVR_UNREFERENCED_PARAMETER(pvLinAddr); -+ PVR_UNREFERENCED_PARAMETER(psPhysAddr); -+ -+ PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__)); -+ return PVRSRV_OK; -+} -+ -+IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset) -+{ -+ return (IMG_UINT32) readl(pvLinRegBaseAddr + ui32Offset); -+} -+ -+IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value) -+{ -+ writel(ui32Value, pvLinRegBaseAddr + ui32Offset); -+} -+ -+ -+typedef struct TIMER_CALLBACK_DATA_TAG { -+ PFN_TIMER_FUNC pfnTimerFunc; -+ IMG_VOID *pvData; -+ struct timer_list sTimer; -+ IMG_UINT32 ui32Delay; -+ IMG_BOOL bActive; -+} TIMER_CALLBACK_DATA; -+ -+static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA *) ui32Data; -+ -+ if (!psTimerCBData->bActive) -+ return; -+ -+ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData); -+ -+ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies); -+} -+ -+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID * pvData, -+ IMG_UINT32 ui32MsTimeout) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData; -+ -+ if (!pfnTimerFunc) { -+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback")); -+ return IMG_NULL; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(TIMER_CALLBACK_DATA), -+ (IMG_VOID **) & psTimerCBData, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSAddTimer: failed to allocate memory for TIMER_CALLBACK_DATA")); -+ return IMG_NULL; -+ } -+ -+ psTimerCBData->pfnTimerFunc = pfnTimerFunc; -+ psTimerCBData->pvData = pvData; -+ psTimerCBData->bActive = IMG_FALSE; -+ -+ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000) -+ ? 1 : ((HZ * ui32MsTimeout) / 1000); -+ -+ init_timer(&psTimerCBData->sTimer); -+ -+ psTimerCBData->sTimer.function = OSTimerCallbackWrapper; -+ psTimerCBData->sTimer.data = (IMG_UINT32) psTimerCBData; -+ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; -+ -+ return (IMG_HANDLE) psTimerCBData; -+} -+ -+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA *) hTimer; -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(TIMER_CALLBACK_DATA), -+ psTimerCBData, IMG_NULL); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA *) hTimer; -+ -+ psTimerCBData->bActive = IMG_TRUE; -+ -+ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; -+ add_timer(&psTimerCBData->sTimer); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA *) hTimer; -+ -+ psTimerCBData->bActive = IMG_FALSE; -+ -+ del_timer_sync(&psTimerCBData->sTimer); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR * pszName, -+ PVRSRV_EVENTOBJECT * psEventObject) -+{ -+ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psEventObject) { -+ if (pszName) { -+ -+ strncpy(psEventObject->szName, pszName, -+ EVENTOBJNAME_MAXLENGTH); -+ } else { -+ -+ static IMG_UINT16 ui16NameIndex = 0; -+ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, -+ "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++); -+ } -+ -+ if (LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != -+ PVRSRV_OK) { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSEventObjectCreate: psEventObject is not a valid pointer")); -+ eError = PVRSRV_ERROR_GENERIC; -+ } -+ -+ return eError; -+ -+} -+ -+PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT * psEventObject) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psEventObject) { -+ if (psEventObject->hOSEventKM) { -+ LinuxEventObjectListDestroy(psEventObject->hOSEventKM); -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSEventObjectDestroy: hOSEventKM is not a valid pointer")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSEventObjectDestroy: psEventObject is not a valid pointer")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (hOSEventKM) { -+ eError = -+ LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS); -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSEventObjectWait: hOSEventKM is not a valid handle")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT * psEventObject, -+ IMG_HANDLE * phOSEvent) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psEventObject) { -+ if (LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != -+ PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSEventObjectCreate: psEventObject is not a valid pointer")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT * psEventObject, -+ IMG_HANDLE hOSEventKM) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psEventObject) { -+ if (LinuxEventObjectDelete -+ (psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "LinuxEventObjectDelete: failed")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSEventObjectDestroy: psEventObject is not a valid pointer")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eError; -+ -+} -+ -+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (hOSEventKM) { -+ eError = LinuxEventObjectSignal(hOSEventKM); -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OSEventObjectSignal: hOSEventKM is not a valid handle")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eError; -+} -+ -+IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID) -+{ -+ return capable(CAP_SYS_MODULE) != 0; -+} -+ -+PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, -+ IMG_VOID * pvDest, -+ IMG_VOID * pvSrc, IMG_UINT32 ui32Bytes) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvProcess); -+ -+ if (copy_to_user(pvDest, pvSrc, ui32Bytes) == 0) -+ return PVRSRV_OK; -+ else -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, -+ IMG_VOID * pvDest, -+ IMG_VOID * pvSrc, IMG_UINT32 ui32Bytes) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvProcess); -+ -+ if (copy_from_user(pvDest, pvSrc, ui32Bytes) == 0) -+ return PVRSRV_OK; -+ else -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID * pvUserPtr, -+ IMG_UINT32 ui32Bytes) -+{ -+ int linuxType; -+ -+ if (eVerification == PVR_VERIFY_READ) -+ linuxType = VERIFY_READ; -+ else if (eVerification == PVR_VERIFY_WRITE) -+ linuxType = VERIFY_WRITE; -+ else { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown eVerification", -+ __FUNCTION__)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ return (IMG_BOOL) access_ok(linuxType, pvUserPtr, ui32Bytes); -+} -+ -+typedef enum _eWrapMemType_ { -+ WRAP_TYPE_CLEANUP, -+ WRAP_TYPE_GET_USER_PAGES, -+ WRAP_TYPE_FIND_VMA_PAGES, -+ WRAP_TYPE_FIND_VMA_PFN -+} eWrapMemType; -+ -+typedef struct _sWrapMemInfo_ { -+ eWrapMemType eType; -+ int iNumPages; -+ struct page **ppsPages; -+ IMG_SYS_PHYADDR *psPhysAddr; -+ int iPageOffset; -+ int iContiguous; -+#if defined(DEBUG) -+ unsigned long ulStartAddr; -+ unsigned long ulBeyondEndAddr; -+ struct vm_area_struct *psVMArea; -+#endif -+} sWrapMemInfo; -+ -+static void CheckPagesContiguous(sWrapMemInfo * psInfo) -+{ -+ unsigned ui; -+ IMG_UINT32 ui32AddrChk; -+ -+ BUG_ON(psInfo == IMG_NULL); -+ -+ psInfo->iContiguous = 1; -+ -+ for (ui = 0, ui32AddrChk = psInfo->psPhysAddr[0].uiAddr; -+ ui < psInfo->iNumPages; ui++, ui32AddrChk += PAGE_SIZE) { -+ if (psInfo->psPhysAddr[ui].uiAddr != ui32AddrChk) { -+ psInfo->iContiguous = 0; -+ break; -+ } -+ } -+} -+ -+static struct page *CPUVAddrToPage(struct vm_area_struct *psVMArea, -+ unsigned long ulCPUVAddr) -+{ -+ pgd_t *psPGD; -+ pud_t *psPUD; -+ pmd_t *psPMD; -+ pte_t *psPTE; -+ struct mm_struct *psMM = psVMArea->vm_mm; -+ unsigned long ulPFN; -+ spinlock_t *psPTLock; -+ struct page *psPage; -+ -+ psPGD = pgd_offset(psMM, ulCPUVAddr); -+ if (pgd_none(*psPGD) || pgd_bad(*psPGD)) -+ return NULL; -+ -+ psPUD = pud_offset(psPGD, ulCPUVAddr); -+ if (pud_none(*psPUD) || pud_bad(*psPUD)) -+ return NULL; -+ -+ psPMD = pmd_offset(psPUD, ulCPUVAddr); -+ if (pmd_none(*psPMD) || pmd_bad(*psPMD)) -+ return NULL; -+ -+ psPage = NULL; -+ -+ psPTE = pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock); -+ if (pte_none(*psPTE) || !pte_present(*psPTE) || !pte_write(*psPTE)) -+ goto exit_unlock; -+ -+ ulPFN = pte_pfn(*psPTE); -+ if (!pfn_valid(ulPFN)) -+ goto exit_unlock; -+ -+ psPage = pfn_to_page(ulPFN); -+ -+ get_page(psPage); -+ -+exit_unlock: -+ pte_unmap_unlock(psPTE, psPTLock); -+ -+ return psPage; -+} -+ -+PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem, -+ IMG_BOOL bUseLock) -+{ -+ sWrapMemInfo *psInfo = (sWrapMemInfo *) hOSWrapMem; -+ unsigned ui; -+ -+ BUG_ON(psInfo == IMG_NULL); -+ -+#if defined(DEBUG) -+ switch (psInfo->eType) { -+ case WRAP_TYPE_FIND_VMA_PAGES: -+ -+ case WRAP_TYPE_FIND_VMA_PFN: -+ { -+ struct vm_area_struct *psVMArea; -+ -+ if (bUseLock) -+ down_read(¤t->mm->mmap_sem); -+ -+ psVMArea = find_vma(current->mm, psInfo->ulStartAddr); -+ if (psVMArea == NULL) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageListRelease: Couldn't find memory region containing start address %lx", -+ psInfo->ulStartAddr); -+ -+ if (bUseLock) -+ up_read(¤t->mm->mmap_sem); -+ -+ break; -+ } -+ -+ if (psInfo->psVMArea != psVMArea) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageListRelease: vm_area_struct has a different address from the one used in ImportMem (%p != %p)", -+ psVMArea, psInfo->psVMArea); -+ } -+ -+ if (psInfo->ulStartAddr < psVMArea->vm_start) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageListRelease: Start address %lx is outside of the region returned by find_vma", -+ psInfo->ulStartAddr); -+ } -+ -+ if (psInfo->ulBeyondEndAddr > psVMArea->vm_end) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageListRelease: End address %lx is outside of the region returned by find_vma", -+ psInfo->ulBeyondEndAddr); -+ } -+ -+ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != -+ (VM_IO | VM_RESERVED)) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageListRelease: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", -+ psVMArea->vm_flags); -+ } -+ -+ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != -+ (VM_READ | VM_WRITE)) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageListRelease: OSWrapMemReleasePages: No read/write access to memory region (VMA flags: 0x%lx)", -+ psVMArea->vm_flags); -+ } -+ -+ if (bUseLock) -+ up_read(¤t->mm->mmap_sem); -+ -+ break; -+ } -+ default: -+ break; -+ } -+#endif -+ -+ switch (psInfo->eType) { -+ case WRAP_TYPE_CLEANUP: -+ break; -+ case WRAP_TYPE_FIND_VMA_PFN: -+ break; -+ case WRAP_TYPE_GET_USER_PAGES: -+ { -+ for (ui = 0; ui < psInfo->iNumPages; ui++) { -+ struct page *psPage = psInfo->ppsPages[ui]; -+ -+ if (!PageReserved(psPage)) ; -+ { -+ SetPageDirty(psPage); -+ } -+ page_cache_release(psPage); -+ } -+ break; -+ } -+ case WRAP_TYPE_FIND_VMA_PAGES: -+ { -+ for (ui = 0; ui < psInfo->iNumPages; ui++) { -+ put_page_testzero(psInfo->ppsPages[ui]); -+ } -+ break; -+ } -+ default: -+ { -+ printk(KERN_WARNING -+ ": OSCpuVToPageListRelease: Unknown wrap type (%d)", -+ psInfo->eType); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ } -+ -+ if (psInfo->ppsPages != IMG_NULL) { -+ kfree(psInfo->ppsPages); -+ } -+ -+ if (psInfo->psPhysAddr != IMG_NULL) { -+ kfree(psInfo->psPhysAddr); -+ } -+ -+ kfree(psInfo); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID *pvCPUVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_SYS_PHYADDR *psSysPAddr, -+ IMG_HANDLE *phOSWrapMem, -+ IMG_BOOL bUseLock) -+{ -+ unsigned long ulStartAddrOrig = (unsigned long)pvCPUVAddr; -+ unsigned long ulAddrRangeOrig = (unsigned long)ui32Bytes; -+ unsigned long ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig; -+ unsigned long ulStartAddr; -+ unsigned long ulAddrRange; -+ unsigned long ulBeyondEndAddr; -+ unsigned long ulAddr; -+ int iNumPagesMapped; -+ unsigned ui; -+ struct vm_area_struct *psVMArea; -+ sWrapMemInfo *psInfo; -+ -+ ulStartAddr = ulStartAddrOrig & PAGE_MASK; -+ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig); -+ ulAddrRange = ulBeyondEndAddr - ulStartAddr; -+ -+ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL); -+ if (psInfo == NULL) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Couldn't allocate information structure\n"); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ memset(psInfo, 0, sizeof(*psInfo)); -+ -+#if defined(DEBUG) -+ psInfo->ulStartAddr = ulStartAddrOrig; -+ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig; -+#endif -+ -+ psInfo->iNumPages = ulAddrRange >> PAGE_SHIFT; -+ psInfo->iPageOffset = ulStartAddrOrig & ~PAGE_MASK; -+ -+ psInfo->psPhysAddr = -+ kmalloc(psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), -+ GFP_KERNEL); -+ if (psInfo->psPhysAddr == NULL) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Couldn't allocate page array\n"); -+ goto error_free; -+ } -+ -+ psInfo->ppsPages = -+ kmalloc(psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL); -+ if (psInfo->ppsPages == NULL) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Couldn't allocate page array\n"); -+ goto error_free; -+ } -+ -+ if (bUseLock) -+ down_read(¤t->mm->mmap_sem); -+ -+ iNumPagesMapped = -+ get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, -+ 1, 0, psInfo->ppsPages, NULL); -+ -+ if (bUseLock) -+ up_read(¤t->mm->mmap_sem); -+ -+ -+ if (iNumPagesMapped >= 0) { -+ -+ if (iNumPagesMapped != psInfo->iNumPages) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Couldn't map all the pages needed (wanted: %d, got %d \n)", -+ psInfo->iNumPages, iNumPagesMapped); -+ -+ for (ui = 0; ui < iNumPagesMapped; ui++) { -+ page_cache_release(psInfo->ppsPages[ui]); -+ -+ } -+ goto error_free; -+ } -+ -+ for (ui = 0; ui < psInfo->iNumPages; ui++) { -+ IMG_CPU_PHYADDR CPUPhysAddr; -+ -+ CPUPhysAddr.uiAddr = -+ page_to_pfn(psInfo->ppsPages[ui]) << PAGE_SHIFT; -+ psInfo->psPhysAddr[ui] = -+ SysCpuPAddrToSysPAddr(CPUPhysAddr); -+ psSysPAddr[ui] = psInfo->psPhysAddr[ui]; -+ -+ } -+ -+ psInfo->eType = WRAP_TYPE_GET_USER_PAGES; -+ -+ goto exit_check; -+ } -+ -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: get_user_pages failed (%d), trying something else \n", -+ iNumPagesMapped); -+ -+ if (bUseLock) -+ down_read(¤t->mm->mmap_sem); -+ -+ psVMArea = find_vma(current->mm, ulStartAddrOrig); -+ if (psVMArea == NULL) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Couldn't find memory region containing start address %lx \n", -+ ulStartAddrOrig); -+ -+ goto error_release_mmap_sem; -+ } -+#if defined(DEBUG) -+ psInfo->psVMArea = psVMArea; -+#endif -+ -+ if (ulStartAddrOrig < psVMArea->vm_start) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Start address %lx is outside of the region returned by find_vma\n", -+ ulStartAddrOrig); -+ goto error_release_mmap_sem; -+ } -+ -+ if (ulBeyondEndAddrOrig > psVMArea->vm_end) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: End address %lx is outside of the region returned by find_vma\n", -+ ulBeyondEndAddrOrig); -+ goto error_release_mmap_sem; -+ } -+ -+ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != -+ (VM_IO | VM_RESERVED)) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)\n", -+ psVMArea->vm_flags); -+ goto error_release_mmap_sem; -+ } -+ -+ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: No read/write access to memory region (VMA flags: 0x%lx)\n", -+ psVMArea->vm_flags); -+ goto error_release_mmap_sem; -+ } -+ -+ for (ulAddr = ulStartAddrOrig, ui = 0; ulAddr < ulBeyondEndAddrOrig; -+ ulAddr += PAGE_SIZE, ui++) { -+ struct page *psPage; -+ -+ BUG_ON(ui >= psInfo->iNumPages); -+ -+ psPage = CPUVAddrToPage(psVMArea, ulAddr); -+ if (psPage == NULL) { -+ unsigned uj; -+ -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Couldn't lookup page structure for address 0x%lx, trying something else\n", -+ ulAddr); -+ -+ for (uj = 0; uj < ui; uj++) { -+ put_page_testzero(psInfo->ppsPages[uj]); -+ } -+ break; -+ } -+ -+ psInfo->ppsPages[ui] = psPage; -+ } -+ -+ BUG_ON(ui > psInfo->iNumPages); -+ if (ui == psInfo->iNumPages) { -+ -+ for (ui = 0; ui < psInfo->iNumPages; ui++) { -+ struct page *psPage = psInfo->ppsPages[ui]; -+ IMG_CPU_PHYADDR CPUPhysAddr; -+ -+ CPUPhysAddr.uiAddr = page_to_pfn(psPage) << PAGE_SHIFT; -+ -+ psInfo->psPhysAddr[ui] = -+ SysCpuPAddrToSysPAddr(CPUPhysAddr); -+ psSysPAddr[ui] = psInfo->psPhysAddr[ui]; -+ } -+ -+ psInfo->eType = WRAP_TYPE_FIND_VMA_PAGES; -+ } else { -+ -+ if ((psVMArea->vm_flags & VM_PFNMAP) == 0) { -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Region isn't a raw PFN mapping. Giving up.\n"); -+ goto error_release_mmap_sem; -+ } -+ -+ for (ulAddr = ulStartAddrOrig, ui = 0; -+ ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, ui++) { -+ IMG_CPU_PHYADDR CPUPhysAddr; -+ -+ CPUPhysAddr.uiAddr = -+ ((ulAddr - psVMArea->vm_start) + -+ (psVMArea->vm_pgoff << PAGE_SHIFT)) & PAGE_MASK; -+ -+ psInfo->psPhysAddr[ui] = -+ SysCpuPAddrToSysPAddr(CPUPhysAddr); -+ psSysPAddr[ui] = psInfo->psPhysAddr[ui]; -+ } -+ BUG_ON(ui != psInfo->iNumPages); -+ -+ psInfo->eType = WRAP_TYPE_FIND_VMA_PFN; -+ -+ printk(KERN_WARNING -+ ": OSCpuVToPageList: Region can't be locked down\n"); -+ } -+ -+ if (bUseLock) -+ up_read(¤t->mm->mmap_sem); -+ -+exit_check: -+ CheckPagesContiguous(psInfo); -+ -+ *phOSWrapMem = (IMG_HANDLE) psInfo; -+ -+ return PVRSRV_OK; -+ -+error_release_mmap_sem: -+ if (bUseLock) -+ up_read(¤t->mm->mmap_sem); -+ -+error_free: -+ psInfo->eType = WRAP_TYPE_CLEANUP; -+ OSReleasePhysPageAddr((IMG_HANDLE) psInfo, bUseLock); -+ return PVRSRV_ERROR_GENERIC; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/osfunc.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/osfunc.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/osfunc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/osfunc.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,300 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+ -+#ifndef __OSFUNC_H__ -+#define __OSFUNC_H__ -+ -+ -+#ifdef __KERNEL__ -+#include -+#endif -+ -+#define KERNEL_ID 0xffffffffL -+#define POWER_MANAGER_ID 0xfffffffeL -+#define ISR_ID 0xfffffffdL -+#define TIMER_ID 0xfffffffcL -+ -+#define HOST_PAGESIZE OSGetPageSize -+#define HOST_PAGEMASK (~(HOST_PAGESIZE()-1)) -+#define HOST_PAGEALIGN(addr) (((addr)+HOST_PAGESIZE()-1)&HOST_PAGEMASK) -+ -+#define PVRSRV_OS_HEAP_MASK 0xf -+#define PVRSRV_OS_PAGEABLE_HEAP 0x1 -+#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2 -+ -+ IMG_UINT32 OSClockus(IMG_VOID); -+ IMG_UINT32 OSGetPageSize(IMG_VOID); -+ PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID * pvSysData, -+ IMG_UINT32 ui32Irq, -+ IMG_CHAR * pszISRName, -+ IMG_VOID * pvDeviceNode); -+ PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID * pvSysData); -+ PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID * pvSysData, -+ IMG_UINT32 ui32Irq); -+ PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID * pvSysData); -+ PVRSRV_ERROR OSInstallMISR(IMG_VOID * pvSysData); -+ PVRSRV_ERROR OSUninstallMISR(IMG_VOID * pvSysData); -+ PVRSRV_ERROR OSInitPerf(IMG_VOID * pvSysData); -+ PVRSRV_ERROR OSCleanupPerf(IMG_VOID * pvSysData); -+ IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID * pvLinAddr); -+ IMG_VOID OSMemCopy(IMG_VOID * pvDst, IMG_VOID * pvSrc, -+ IMG_UINT32 ui32Size); -+ IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, -+ IMG_HANDLE * phOSMemHandle); -+ IMG_BOOL OSUnMapPhysToLin(IMG_VOID * pvLinAddr, IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE hOSMemHandle); -+ -+ PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, -+ IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, -+ IMG_VOID ** ppvCpuVAddr, -+ IMG_HANDLE * phOSMemHandle); -+ PVRSRV_ERROR OSUnReservePhys(IMG_VOID * pvCpuVAddr, -+ IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, -+ IMG_HANDLE hOSMemHandle); -+ -+ PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR * pBasePAddr, -+ IMG_VOID * pvCpuVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE * phOSMemHandle); -+ PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID * pvCpuVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE hOSMemHandle); -+ -+ static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR * -+ pBasePAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_VOID ** -+ ppvCpuVAddr, -+ IMG_HANDLE * -+ phOSMemHandle) { -+ *ppvCpuVAddr = IMG_NULL; -+ return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, -+ ui32Bytes, ui32Flags, -+ phOSMemHandle); -+ } -+ -+ static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID * -+ pvCpuVAddr, -+ IMG_UINT32 -+ ui32Bytes, -+ IMG_UINT32 -+ ui32Flags, -+ IMG_HANDLE -+ hOSMemHandle) { -+ OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags, -+ hOSMemHandle); -+ -+ return PVRSRV_OK; -+ } -+ -+ PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, -+ IMG_VOID * pvCpuVAddr, IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE * phOSMemHandle); -+ PVRSRV_ERROR OSUnRegisterMem(IMG_VOID * pvCpuVAddr, -+ IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, -+ IMG_HANDLE hOSMemHandle); -+ -+ PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, -+ IMG_UINT32 ui32ByteOffset, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE * phOSMemHandleRet); -+ PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, -+ IMG_UINT32 ui32Flags); -+ -+ IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID); -+ IMG_UINT32 OSGetCurrentThreadID(IMG_VOID); -+ IMG_VOID OSMemSet(IMG_VOID * pvDest, IMG_UINT8 ui8Value, -+ IMG_UINT32 ui32Size); -+ -+#ifdef DEBUG_LINUX_MEMORY_ALLOCATIONS -+ PVRSRV_ERROR _OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID * ppvLinAddr, -+ IMG_HANDLE * phBlockAlloc, -+ IMG_CHAR * pszFilename, IMG_UINT32 ui32Line); -+#define OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc) _OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc, __FILE__, __LINE__) -+ PVRSRV_ERROR _OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc, -+ IMG_CHAR * pszFilename, IMG_UINT32 ui32Line); -+#define OSFreeMem(ui32Flags, ui32Size, pvLinAddr, phBlockAlloc) _OSFreeMem(ui32Flags, ui32Size, pvLinAddr, phBlockAlloc, __FILE__, __LINE__) -+#else -+ PVRSRV_ERROR OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID * ppvLinAddr, -+ IMG_HANDLE * phBlockAlloc); -+ PVRSRV_ERROR OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc); -+#endif -+ PVRSRV_ERROR OSAllocPages(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID * ppvLinAddr, -+ IMG_HANDLE * phPageAlloc); -+ PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, -+ IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc); -+ IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID * hOSMemHandle, -+ IMG_UINT32 ui32ByteOffset); -+ PVRSRV_ERROR OSInitEnvData(IMG_PVOID * ppvEnvSpecificData); -+ PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData); -+ IMG_CHAR *OSStringCopy(IMG_CHAR * pszDest, const IMG_CHAR * pszSrc); -+ IMG_INT32 OSSNPrintf(IMG_CHAR * pStr, IMG_UINT32 ui32Size, -+ const IMG_CHAR * pszFormat, ...); -+#define OSStringLength(pszString) strlen(pszString) -+ -+ PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR * pszName, -+ PVRSRV_EVENTOBJECT * psEventObject); -+ PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT * psEventObject); -+ PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM); -+ PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM); -+ PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT * psEventObject, -+ IMG_HANDLE * phOSEvent); -+ PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT * psEventObject, -+ IMG_HANDLE hOSEventKM); -+ -+ PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, -+ IMG_CPU_VIRTADDR * pLinAddr, -+ IMG_CPU_PHYADDR * pPhysAddr); -+ PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, -+ IMG_CPU_VIRTADDR LinAddr, -+ IMG_CPU_PHYADDR PhysAddr); -+ -+ IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM, IMG_UINT32 ui32Size, -+ IMG_HANDLE * phMemBlock); -+ IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, -+ IMG_SYS_PHYADDR sRegAddr, -+ IMG_UINT32 ulSize, -+ IMG_PVOID * ppvProcess); -+ IMG_VOID OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, -+ IMG_PVOID pvUserAddr, -+ IMG_PVOID pvProcess); -+ -+ IMG_VOID UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_UINT32 ui32Size, -+ IMG_HANDLE hMemBlock); -+ -+ PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie, -+ IMG_SYS_PHYADDR sCPUPhysAddr, -+ IMG_UINT32 uiSizeInBytes, -+ IMG_UINT32 ui32CacheFlags, -+ IMG_PVOID * ppvUserAddr, -+ IMG_UINT32 * puiActualSize, -+ IMG_HANDLE hMappingHandle); -+ -+ PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie, -+ IMG_PVOID pvUserAddr, -+ IMG_PVOID pvProcess); -+ -+ PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE * psResource, -+ IMG_UINT32 ui32ID); -+ PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE * psResource, -+ IMG_UINT32 ui32ID); -+ IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE * psResource, -+ IMG_UINT32 ui32ID); -+ PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE * psResource); -+ PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE * psResource); -+ IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE * psResource, -+ IMG_UINT32 ui32ID); -+ IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus); -+ IMG_VOID OSReleaseThreadQuanta(IMG_VOID); -+ IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, -+ IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg); -+ IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, -+ IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, -+ IMG_UINT32 ui32Value); -+ -+ IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset); -+ IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value); -+ -+ typedef IMG_VOID(*PFN_TIMER_FUNC) (IMG_VOID *); -+ IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID * pvData, -+ IMG_UINT32 ui32MsTimeout); -+ PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer); -+ PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer); -+ PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer); -+ -+ PVRSRV_ERROR OSGetSysMemSize(IMG_UINT32 * pui32Bytes); -+ -+ typedef enum _HOST_PCI_INIT_FLAGS_ { -+ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x1, -+ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff -+ } HOST_PCI_INIT_FLAGS; -+ -+ struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; -+ typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; -+ -+ PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, -+ IMG_UINT16 ui16DeviceID, -+ HOST_PCI_INIT_FLAGS eFlags); -+ PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID * pvPCICookie, -+ HOST_PCI_INIT_FLAGS eFlags); -+ PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); -+ PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 * pui32IRQ); -+ IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index); -+ IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index); -+ IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index); -+ PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index); -+ PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index); -+ PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); -+ PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); -+ -+ PVRSRV_ERROR OSScheduleMISR(IMG_VOID * pvSysData); -+ -+ IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID); -+ -+ typedef enum _img_verify_test { -+ PVR_VERIFY_WRITE = 0, -+ PVR_VERIFY_READ -+ } IMG_VERIFY_TEST; -+ -+ IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID * pvUserPtr, -+ IMG_UINT32 ui32Bytes); -+ -+ PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID * pvDest, -+ IMG_VOID * pvSrc, IMG_UINT32 ui32Bytes); -+ PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID * pvDest, -+ IMG_VOID * pvSrc, IMG_UINT32 ui32Bytes); -+ -+ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID * pvCPUVAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_SYS_PHYADDR * psSysPAddr, -+ IMG_HANDLE * phOSWrapMem, -+ IMG_BOOL bUseLock); -+ PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem, -+ IMG_BOOL bUseLock); -+ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pb.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pb.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pb.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pb.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,466 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+ -+#include "services_headers.h" -+#include "sgxapi_km.h" -+#include "sgxinfo.h" -+#include "sgxinfokm.h" -+#include "pvr_bridge_km.h" -+#include "pdump_km.h" -+ -+ -+static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL; -+static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL; -+ -+static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param); -+static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, -+ IMG_UINT32 -+ ui32Param); -+ -+IMG_EXPORT PVRSRV_ERROR -+SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDevCookie, -+ IMG_BOOL bLockOnFailure, -+ IMG_UINT32 ui32TotalPBSize, -+ IMG_HANDLE * phSharedPBDesc, -+ PVRSRV_KERNEL_MEM_INFO ** ppsSharedPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO ** ppsHWPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO ** ppsBlockKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO *** -+ pppsSharedPBDescSubKernelMemInfos, -+ IMG_UINT32 * ui32SharedPBDescSubKernelMemInfosCount) -+{ -+ PVRSRV_STUB_PBDESC *psStubPBDesc; -+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL; -+ PVRSRV_SGXDEV_INFO *psSGXDevInfo; -+ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC; -+ -+ psSGXDevInfo = ((PVRSRV_DEVICE_NODE *) hDevCookie)->pvDevice; -+ -+ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM; -+ if (psStubPBDesc != IMG_NULL) { -+ if (psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored", -+ ui32TotalPBSize, -+ psStubPBDesc->ui32TotalPBSize)); -+ } -+ { -+ IMG_UINT32 i; -+ PRESMAN_ITEM psResItem; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO *) -+ * -+ psStubPBDesc->ui32SubKernelMemInfosCount, -+ (IMG_VOID **) & -+ ppsSharedPBDescSubKernelMemInfos, -+ IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXFindSharedPBDescKM: OSAllocMem failed")); -+ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ExitNotFound; -+ } -+ -+ psResItem = ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_SHARED_PB_DESC, -+ psStubPBDesc, -+ 0, -+ &SGXCleanupSharedPBDescCallback); -+ -+ if (psResItem == IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO *) -+ * -+ psStubPBDesc-> -+ ui32SubKernelMemInfosCount, -+ ppsSharedPBDescSubKernelMemInfos, 0); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXFindSharedPBDescKM: ResManRegisterRes failed")); -+ -+ eError = PVRSRV_ERROR_GENERIC; -+ goto ExitNotFound; -+ } -+ -+ *ppsSharedPBDescKernelMemInfo = -+ psStubPBDesc->psSharedPBDescKernelMemInfo; -+ *ppsHWPBDescKernelMemInfo = -+ psStubPBDesc->psHWPBDescKernelMemInfo; -+ *ppsBlockKernelMemInfo = -+ psStubPBDesc->psBlockKernelMemInfo; -+ -+ *ui32SharedPBDescSubKernelMemInfosCount = -+ psStubPBDesc->ui32SubKernelMemInfosCount; -+ -+ *pppsSharedPBDescSubKernelMemInfos = -+ ppsSharedPBDescSubKernelMemInfos; -+ -+ for (i = 0; -+ i < psStubPBDesc->ui32SubKernelMemInfosCount; -+ i++) { -+ ppsSharedPBDescSubKernelMemInfos[i] = -+ psStubPBDesc->ppsSubKernelMemInfos[i]; -+ } -+ -+ psStubPBDesc->ui32RefCount++; -+ *phSharedPBDesc = (IMG_HANDLE) psResItem; -+ return PVRSRV_OK; -+ } -+ } -+ -+ eError = PVRSRV_OK; -+ if (bLockOnFailure) { -+ if (psResItemCreateSharedPB == IMG_NULL) { -+ psResItemCreateSharedPB = -+ ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, -+ psPerProc, 0, -+ &SGXCleanupSharedPBDescCreateLockCallback); -+ -+ if (psResItemCreateSharedPB == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXFindSharedPBDescKM: ResManRegisterRes failed")); -+ -+ eError = PVRSRV_ERROR_GENERIC; -+ goto ExitNotFound; -+ } -+ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL); -+ psPerProcCreateSharedPB = psPerProc; -+ } else { -+ eError = PVRSRV_ERROR_PROCESSING_BLOCKED; -+ } -+ } -+ExitNotFound: -+ *phSharedPBDesc = IMG_NULL; -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC * psStubPBDescIn) -+{ -+ PVRSRV_STUB_PBDESC **ppsStubPBDesc; -+ IMG_UINT32 i; -+ PVRSRV_SGXDEV_INFO *psSGXDevInfo; -+ -+ psSGXDevInfo = -+ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) psStubPBDescIn-> -+ hDevCookie)->pvDevice; -+ -+ for (ppsStubPBDesc = -+ (PVRSRV_STUB_PBDESC **) & psSGXDevInfo->psStubPBDescListKM; -+ *ppsStubPBDesc != IMG_NULL; -+ ppsStubPBDesc = &(*ppsStubPBDesc)->psNext) { -+ PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc; -+ -+ if (psStubPBDesc == psStubPBDescIn) { -+ psStubPBDesc->ui32RefCount--; -+ PVR_ASSERT((IMG_INT32) psStubPBDesc->ui32RefCount >= 0); -+ -+ if (psStubPBDesc->ui32RefCount == 0) { -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = -+ (PVRSRV_SGX_HOST_CTL *) psSGXDevInfo-> -+ psSGXHostCtl; -+#if defined (PDUMP) -+ IMG_HANDLE hUniqueTag = -+ MAKEUNIQUETAG(psSGXDevInfo-> -+ psKernelSGXHostCtlMemInfo); -+#endif -+ -+ psSGXHostCtl->sTAHWPBDesc.uiAddr = 0; -+ psSGXHostCtl->s3DHWPBDesc.uiAddr = 0; -+ -+ PDUMPCOMMENT -+ ("TA/3D CCB Control - Reset HW PBDesc records"); -+ PDUMPMEM(IMG_NULL, -+ psSGXDevInfo-> -+ psKernelSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, -+ sTAHWPBDesc), -+ sizeof(IMG_DEV_VIRTADDR), -+ PDUMP_FLAGS_CONTINUOUS, hUniqueTag); -+ PDUMPMEM(IMG_NULL, -+ psSGXDevInfo-> -+ psKernelSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, -+ s3DHWPBDesc), -+ sizeof(IMG_DEV_VIRTADDR), -+ PDUMP_FLAGS_CONTINUOUS, hUniqueTag); -+ -+ *ppsStubPBDesc = psStubPBDesc->psNext; -+ -+ for (i = 0; -+ i < -+ psStubPBDesc->ui32SubKernelMemInfosCount; -+ i++) { -+ -+ PVRSRVFreeDeviceMemKM(psStubPBDesc-> -+ hDevCookie, -+ psStubPBDesc-> -+ ppsSubKernelMemInfos -+ [i]); -+ } -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO *) -+ * -+ psStubPBDesc-> -+ ui32SubKernelMemInfosCount, -+ psStubPBDesc->ppsSubKernelMemInfos, -+ 0); -+ -+ PVRSRVFreeSharedSysMemoryKM(psStubPBDesc-> -+ psBlockKernelMemInfo); -+ -+ PVRSRVFreeDeviceMemKM(psStubPBDesc->hDevCookie, -+ psStubPBDesc-> -+ psHWPBDescKernelMemInfo); -+ -+ PVRSRVFreeSharedSysMemoryKM(psStubPBDesc-> -+ psSharedPBDescKernelMemInfo); -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_STUB_PBDESC), -+ psStubPBDesc, 0); -+ -+ } -+ return PVRSRV_OK; -+ } -+ } -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+} -+ -+static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *) pvParam; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ return SGXCleanupSharedPBDescKM(psStubPBDesc); -+} -+ -+static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, -+ IMG_UINT32 -+ ui32Param) -+{ -+#ifdef DEBUG -+ PVRSRV_PER_PROCESS_DATA *psPerProc = -+ (PVRSRV_PER_PROCESS_DATA *) pvParam; -+#else -+ PVR_UNREFERENCED_PARAMETER(pvParam); -+#endif -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB); -+ -+ psPerProcCreateSharedPB = IMG_NULL; -+ psResItemCreateSharedPB = IMG_NULL; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT PVRSRV_ERROR SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc) -+{ -+ PVR_ASSERT(hSharedPBDesc != IMG_NULL); -+ -+ return ResManFreeResByPtr(hSharedPBDesc); -+} -+ -+IMG_EXPORT PVRSRV_ERROR -+SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDevCookie, -+ PVRSRV_KERNEL_MEM_INFO * psSharedPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO * psHWPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO * psBlockKernelMemInfo, -+ IMG_UINT32 ui32TotalPBSize, -+ IMG_HANDLE * phSharedPBDesc, -+ PVRSRV_KERNEL_MEM_INFO ** ppsSharedPBDescSubKernelMemInfos, -+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount) -+{ -+ PVRSRV_STUB_PBDESC *psStubPBDesc = IMG_NULL; -+ PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC; -+ IMG_UINT32 i; -+ PVRSRV_SGXDEV_INFO *psSGXDevInfo; -+ PRESMAN_ITEM psResItem; -+ -+ if (psPerProcCreateSharedPB != psPerProc) { -+ goto NoAdd; -+ } else { -+ PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL); -+ -+ ResManFreeResByPtr(psResItemCreateSharedPB); -+ -+ PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL); -+ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL); -+ } -+ -+ psSGXDevInfo = -+ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookie)-> -+ pvDevice; -+ -+ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM; -+ if (psStubPBDesc != IMG_NULL) { -+ if (psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored", -+ ui32TotalPBSize, -+ psStubPBDesc->ui32TotalPBSize)); -+ -+ } -+ { -+ -+ psResItem = ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_SHARED_PB_DESC, -+ psStubPBDesc, -+ 0, -+ &SGXCleanupSharedPBDescCallback); -+ if (psResItem == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXAddSharedPBDescKM: " -+ "Failed to register existing shared " -+ "PBDesc with the resource manager")); -+ goto NoAddKeepPB; -+ } -+ -+ psStubPBDesc->ui32RefCount++; -+ -+ *phSharedPBDesc = (IMG_HANDLE) psResItem; -+ eRet = PVRSRV_OK; -+ goto NoAddKeepPB; -+ } -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_STUB_PBDESC), -+ (IMG_VOID **) & psStubPBDesc, 0) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc " -+ "StubPBDesc")); -+ eRet = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto NoAdd; -+ } -+ -+ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL; -+ -+ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO *) -+ * ui32SharedPBDescSubKernelMemInfosCount, -+ (IMG_VOID **) & psStubPBDesc->ppsSubKernelMemInfos, -+ 0) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " -+ "Failed to alloc " -+ "StubPBDesc->ppsSubKernelMemInfos")); -+ eRet = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto NoAdd; -+ } -+ -+ if (PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo) -+ != PVRSRV_OK) { -+ goto NoAdd; -+ } -+ -+ if (PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo) -+ != PVRSRV_OK) { -+ goto NoAdd; -+ } -+ -+ if (PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo) -+ != PVRSRV_OK) { -+ goto NoAdd; -+ } -+ -+ psStubPBDesc->ui32RefCount = 1; -+ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize; -+ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo; -+ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo; -+ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo; -+ -+ psStubPBDesc->ui32SubKernelMemInfosCount = -+ ui32SharedPBDescSubKernelMemInfosCount; -+ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) { -+ psStubPBDesc->ppsSubKernelMemInfos[i] = -+ ppsSharedPBDescSubKernelMemInfos[i]; -+ if (PVRSRVDissociateMemFromResmanKM -+ (ppsSharedPBDescSubKernelMemInfos[i]) -+ != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " -+ "Failed to dissociate shared PBDesc " -+ "from process")); -+ goto NoAdd; -+ } -+ } -+ -+ psResItem = ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_SHARED_PB_DESC, -+ psStubPBDesc, -+ 0, &SGXCleanupSharedPBDescCallback); -+ if (psResItem == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " -+ "Failed to register shared PBDesc " -+ " with the resource manager")); -+ goto NoAdd; -+ } -+ psStubPBDesc->hDevCookie = hDevCookie; -+ -+ psStubPBDesc->psNext = psSGXDevInfo->psStubPBDescListKM; -+ psSGXDevInfo->psStubPBDescListKM = psStubPBDesc; -+ -+ *phSharedPBDesc = (IMG_HANDLE) psResItem; -+ -+ return PVRSRV_OK; -+ -+NoAdd: -+ if (psStubPBDesc) { -+ if (psStubPBDesc->ppsSubKernelMemInfos) { -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_KERNEL_MEM_INFO *) -+ * ui32SharedPBDescSubKernelMemInfosCount, -+ psStubPBDesc->ppsSubKernelMemInfos, 0); -+ } -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_STUB_PBDESC), psStubPBDesc, 0); -+ } -+ -+NoAddKeepPB: -+ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) { -+ PVRSRVFreeDeviceMemKM(hDevCookie, -+ ppsSharedPBDescSubKernelMemInfos[i]); -+ } -+ -+ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo); -+ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo); -+ -+ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo); -+ -+ return eRet; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pdump.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pdump.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pdump.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pdump.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,1382 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if defined (PDUMP) -+#include "sgxdefs.h" -+#include "services_headers.h" -+ -+#include "pvrversion.h" -+#include "pvr_debug.h" -+ -+#include "dbgdrvif.h" -+#include "sgxmmu.h" -+#include "mm.h" -+#include "pdump_km.h" -+ -+#include -+ -+static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags); -+static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 * pui8Data, -+ IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags); -+static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame); -+static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream); -+static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker); -+static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 * pui8Data, -+ IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags); -+ -+#define PDUMP_DATAMASTER_PIXEL (1) -+ -+#define MIN(a,b) (a > b ? b : a) -+ -+#define MAX_FILE_SIZE 0x40000000 -+ -+static IMG_UINT32 gui32PDumpSuspended = 0; -+ -+static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL; -+ -+#define PDUMP_STREAM_PARAM2 0 -+#define PDUMP_STREAM_SCRIPT2 1 -+#define PDUMP_STREAM_DRIVERINFO 2 -+#define PDUMP_NUM_STREAMS 3 -+ -+IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2", -+ "ScriptStream2", -+ "DriverInfoStream" -+}; -+ -+#define __PDBG_PDUMP_STATE_GET_MSG_STRING(ERROR) \ -+ IMG_CHAR *pszMsg = gsDBGPdumpState.pszMsg; \ -+ if(!pszMsg) return ERROR -+ -+#define __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(ERROR) \ -+ IMG_CHAR *pszScript = gsDBGPdumpState.pszScript; \ -+ if(!pszScript) return ERROR -+ -+#define __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(ERROR) \ -+ IMG_CHAR *pszScript = gsDBGPdumpState.pszScript; \ -+ IMG_CHAR *pszFile = gsDBGPdumpState.pszFile; \ -+ if(!pszScript || !pszFile) return ERROR -+ -+typedef struct PDBG_PDUMP_STATE_TAG { -+ PDBG_STREAM psStream[PDUMP_NUM_STREAMS]; -+ IMG_UINT32 ui32ParamFileNum; -+ -+ IMG_CHAR *pszMsg; -+ IMG_CHAR *pszScript; -+ IMG_CHAR *pszFile; -+ -+} PDBG_PDUMP_STATE; -+ -+static PDBG_PDUMP_STATE gsDBGPdumpState = -+ { {IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL }; -+ -+#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 -+#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 -+#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 -+ -+void DBGDrvGetServiceTable(IMG_VOID ** fn_table); -+ -+IMG_VOID PDumpInit(IMG_VOID) -+{ -+ IMG_UINT32 i = 0; -+ -+ if (!gpfnDbgDrv) { -+ DBGDrvGetServiceTable((IMG_VOID **) & gpfnDbgDrv); -+ -+ if (gpfnDbgDrv == IMG_NULL) { -+ return; -+ } -+ -+ if (!gsDBGPdumpState.pszFile) { -+ if (OSAllocMem -+ (PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, -+ (IMG_PVOID *) & gsDBGPdumpState.pszFile, -+ 0) != PVRSRV_OK) { -+ goto init_failed; -+ } -+ } -+ -+ if (!gsDBGPdumpState.pszMsg) { -+ if (OSAllocMem -+ (PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, -+ (IMG_PVOID *) & gsDBGPdumpState.pszMsg, -+ 0) != PVRSRV_OK) { -+ goto init_failed; -+ } -+ } -+ -+ if (!gsDBGPdumpState.pszScript) { -+ if (OSAllocMem -+ (PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, -+ (IMG_PVOID *) & gsDBGPdumpState.pszScript, -+ 0) != PVRSRV_OK) { -+ goto init_failed; -+ } -+ } -+ -+ for (i = 0; i < PDUMP_NUM_STREAMS; i++) { -+ gsDBGPdumpState.psStream[i] = -+ gpfnDbgDrv->pfnCreateStream(pszStreamName[i], -+ DEBUG_CAPMODE_FRAMED, -+ DEBUG_OUTMODE_STREAMENABLE, -+ 0, 10); -+ -+ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState. -+ psStream[i], -+ DEBUG_CAPMODE_FRAMED, -+ 0xFFFFFFFF, 0xFFFFFFFF, -+ 1); -+ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i], 0); -+ } -+ -+ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME); -+ PDUMPCOMMENT("Driver Product Version: %s (%s)", -+ PVRVERSION_STRING, PVRVERSION_FILE); -+ PDUMPCOMMENT("Start of Init Phase"); -+ } -+ -+ return; -+ -+init_failed: -+ -+ if (gsDBGPdumpState.pszFile) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, -+ (IMG_PVOID) gsDBGPdumpState.pszFile, 0); -+ gsDBGPdumpState.pszFile = IMG_NULL; -+ } -+ -+ if (gsDBGPdumpState.pszScript) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, -+ (IMG_PVOID) gsDBGPdumpState.pszScript, 0); -+ gsDBGPdumpState.pszScript = IMG_NULL; -+ } -+ -+ if (gsDBGPdumpState.pszMsg) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, -+ (IMG_PVOID) gsDBGPdumpState.pszMsg, 0); -+ gsDBGPdumpState.pszMsg = IMG_NULL; -+ } -+ -+ gpfnDbgDrv = IMG_NULL; -+} -+ -+IMG_VOID PDumpDeInit(IMG_VOID) -+{ -+ IMG_UINT32 i = 0; -+ -+ for (i = 0; i < PDUMP_NUM_STREAMS; i++) { -+ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]); -+ } -+ -+ if (gsDBGPdumpState.pszFile) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, -+ (IMG_PVOID) gsDBGPdumpState.pszFile, 0); -+ gsDBGPdumpState.pszFile = IMG_NULL; -+ } -+ -+ if (gsDBGPdumpState.pszScript) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, -+ (IMG_PVOID) gsDBGPdumpState.pszScript, 0); -+ gsDBGPdumpState.pszScript = IMG_NULL; -+ } -+ -+ if (gsDBGPdumpState.pszMsg) { -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, -+ (IMG_PVOID) gsDBGPdumpState.pszMsg, 0); -+ gsDBGPdumpState.pszMsg = IMG_NULL; -+ } -+ -+ gpfnDbgDrv = IMG_NULL; -+} -+ -+IMG_VOID PDumpEndInitPhase(IMG_VOID) -+{ -+ IMG_UINT32 i; -+ -+ PDUMPCOMMENT("End of Init Phase"); -+ -+ for (i = 0; i < PDUMP_NUM_STREAMS; i++) { -+ gpfnDbgDrv->pfnEndInitPhase(gsDBGPdumpState.psStream[i]); -+ } -+} -+ -+void PDumpComment(IMG_CHAR * pszFormat, ...) -+{ -+ __PDBG_PDUMP_STATE_GET_MSG_STRING(); -+ -+ vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, -+ (IMG_CHAR *) (&pszFormat + 1)); -+ -+ PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS); -+} -+ -+void PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...) -+{ -+ __PDBG_PDUMP_STATE_GET_MSG_STRING(); -+ -+ vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, -+ (IMG_CHAR *) (&pszFormat + 1)); -+ -+ PDumpCommentKM(pszMsg, ui32Flags); -+} -+ -+IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID) -+{ -+ return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState. -+ psStream -+ [PDUMP_STREAM_SCRIPT2]); -+} -+ -+IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID) -+{ -+ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState. -+ psStream[PDUMP_STREAM_SCRIPT2], -+ IMG_FALSE); -+} -+ -+PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32Reg, IMG_UINT32 ui32Data, -+ IMG_UINT32 ui32Flags) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data); -+ PDumpWriteString2(pszScript, ui32Flags); -+ -+ return PVRSRV_OK; -+} -+ -+void PDumpReg(IMG_UINT32 ui32Reg, IMG_UINT32 ui32Data) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+} -+ -+PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Mask, IMG_UINT32 ui32Flags) -+{ -+#define POLL_DELAY 1000 -+#define POLL_COUNT_LONG (2000000000 / POLL_DELAY) -+#define POLL_COUNT_SHORT (1000000 / POLL_DELAY) -+ -+ IMG_UINT32 ui32PollCount; -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC); -+ -+ if (((ui32RegAddr == EUR_CR_EVENT_STATUS) && -+ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK)) -+ || ((ui32RegAddr == EUR_CR_EVENT_STATUS) -+ && (ui32RegValue & ui32Mask & -+ EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK)) -+ || ((ui32RegAddr == EUR_CR_EVENT_STATUS) -+ && (ui32RegValue & ui32Mask & -+ EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK))) { -+ ui32PollCount = POLL_COUNT_LONG; -+ } else { -+ ui32PollCount = POLL_COUNT_SHORT; -+ } -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "POL :SGXREG:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %lu %d\r\n", -+ ui32RegAddr, ui32RegValue, ui32Mask, 0, ui32PollCount, -+ POLL_DELAY); -+ PDumpWriteString2(pszScript, ui32Flags); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Mask) -+{ -+ return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask, -+ PDUMP_FLAGS_CONTINUOUS); -+} -+ -+IMG_VOID PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_UINT32 ui32DevVAddr, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_HANDLE hOSMemHandle, -+ IMG_UINT32 ui32NumBytes, IMG_HANDLE hUniqueTag) -+{ -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32NumPages; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_UINT32 ui32Page; -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ PVR_UNREFERENCED_PARAMETER(pvLinAddr); -+ -+ PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ PVR_ASSERT(hOSMemHandle); -+ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "-- MALLOC :SGXMEM:VA_%8.8lX 0x%8.8lX %lu\r\n", ui32DevVAddr, -+ ui32NumBytes, SGX_MMU_PAGE_SIZE); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ -+ ui32Offset = 0; -+ ui32NumPages = ui32NumBytes >> SGX_MMU_PAGE_SHIFT; -+ while (ui32NumPages--) { -+ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset); -+ PVR_ASSERT((sCpuPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ ui32Offset += SGX_MMU_PAGE_SIZE; -+ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr); -+ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT; -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "MALLOC :SGXMEM:PA_%p%8.8lX %lu %lu 0x%8.8lX\r\n", -+ hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE, -+ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, -+ ui32Page * SGX_MMU_PAGE_SIZE); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ } -+} -+ -+IMG_VOID PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_UINT32 ui32NumBytes, IMG_HANDLE hUniqueTag) -+{ -+ IMG_PUINT8 pui8LinAddr; -+ IMG_UINT32 ui32NumPages; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_UINT32 ui32Page; -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8lX %lu\r\n", ui32NumBytes, -+ SGX_MMU_PAGE_SIZE); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ -+ pui8LinAddr = (IMG_PUINT8) pvLinAddr; -+ ui32NumPages = ui32NumBytes >> SGX_MMU_PAGE_SHIFT; -+ while (ui32NumPages--) { -+ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr); -+ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr); -+ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT; -+ pui8LinAddr += SGX_MMU_PAGE_SIZE; -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "MALLOC :SGXMEM:PA_%p%8.8lX 0x%lX %lu 0x%8.8lX\r\n", -+ hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE, -+ SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, -+ ui32Page * SGX_MMU_PAGE_SIZE); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ } -+} -+ -+IMG_VOID PDumpFreePages(BM_HEAP * psBMHeap, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_UINT32 ui32NumBytes, -+ IMG_HANDLE hUniqueTag, IMG_BOOL bInterleaved) -+{ -+ IMG_UINT32 ui32NumPages, ui32PageCounter; -+ IMG_DEV_PHYADDR sDevPAddr; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1)) == -+ 0); -+ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- FREE :SGXMEM:VA_%8.8lX\r\n", -+ sDevVAddr.uiAddr); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ -+ ui32NumPages = ui32NumBytes >> SGX_MMU_PAGE_SHIFT; -+ psDeviceNode = psBMHeap->pBMContext->psDeviceNode; -+ for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; -+ ui32PageCounter++) { -+ if (!bInterleaved || (ui32PageCounter % 2) == 0) { -+ sDevPAddr = -+ psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap-> -+ pMMUHeap, -+ sDevVAddr); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "FREE :SGXMEM:PA_%p%8.8lX\r\n", hUniqueTag, -+ sDevPAddr.uiAddr); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ } else { -+ -+ } -+ -+ sDevVAddr.uiAddr += SGX_MMU_PAGE_SIZE; -+ } -+} -+ -+IMG_VOID PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_UINT32 ui32NumBytes, IMG_HANDLE hUniqueTag) -+{ -+ IMG_PUINT8 pui8LinAddr; -+ IMG_UINT32 ui32NumPages; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_UINT32 ui32Page; -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "-- FREE :SGXMEM:PAGE_TABLE\r\n"); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ -+ pui8LinAddr = (IMG_PUINT8) pvLinAddr; -+ ui32NumPages = ui32NumBytes >> SGX_MMU_PAGE_SHIFT; -+ while (ui32NumPages--) { -+ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr); -+ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr); -+ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT; -+ pui8LinAddr += SGX_MMU_PAGE_SIZE; -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "FREE :SGXMEM:PA_%p%8.8lX\r\n", hUniqueTag, -+ ui32Page * SGX_MMU_PAGE_SIZE); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ } -+} -+ -+IMG_VOID PDumpPDReg(IMG_UINT32 ui32Reg, -+ IMG_UINT32 ui32Data, IMG_HANDLE hUniqueTag) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n", -+ ui32Reg, -+ hUniqueTag, -+ ui32Data & ~(SGX_MMU_PAGE_SIZE - 1), -+ ui32Data & (SGX_MMU_PAGE_SIZE - 1)); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+} -+ -+IMG_VOID PDumpPDRegWithFlags(IMG_UINT32 ui32Reg, -+ IMG_UINT32 ui32Data, -+ IMG_UINT32 ui32Flags, IMG_HANDLE hUniqueTag) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n", -+ ui32Reg, -+ hUniqueTag, -+ ui32Data & ~(SGX_MMU_PAGE_SIZE - 1), -+ ui32Data & (SGX_MMU_PAGE_SIZE - 1)); -+ PDumpWriteString2(pszScript, ui32Flags); -+} -+ -+PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_BOOL bLastFrame, -+ IMG_BOOL bOverwrite, IMG_HANDLE hUniqueTag) -+{ -+#define MEMPOLL_DELAY (1000) -+#define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY) -+ -+ IMG_UINT32 ui32PageOffset; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_DEV_VIRTADDR sDevVPageAddr; -+ IMG_CPU_PHYADDR CpuPAddr; -+ IMG_UINT32 ui32Flags; -+ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC); -+ -+ PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= -+ psMemInfo->ui32AllocSize); -+ -+ if (gsDBGPdumpState.ui32ParamFileNum == 0) { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm"); -+ } else { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", -+ gsDBGPdumpState.ui32ParamFileNum); -+ } -+ -+ ui32Flags = 0; -+ -+ if (bLastFrame) { -+ ui32Flags |= PDUMP_FLAGS_LASTFRAME; -+ } -+ -+ if (bOverwrite) { -+ ui32Flags |= PDUMP_FLAGS_RESETLFBUFFER; -+ } -+ -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset); -+ ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1); -+ -+ sDevVPageAddr.uiAddr = -+ psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset; -+ -+ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); -+ -+ sDevPAddr.uiAddr += ui32PageOffset; -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "POL :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n", -+ hUniqueTag, -+ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1), -+ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1), -+ ui32Value, ui32Mask, eOperator, MEMPOLL_COUNT, MEMPOLL_DELAY); -+ PDumpWriteString2(pszScript, ui32Flags); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr, -+ PVRSRV_KERNEL_MEM_INFO * psMemInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, IMG_HANDLE hUniqueTag) -+{ -+ IMG_UINT32 ui32PageByteOffset; -+ IMG_UINT8 *pui8DataLinAddr; -+ IMG_DEV_VIRTADDR sDevVPageAddr; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_CPU_PHYADDR CpuPAddr; -+ IMG_UINT32 ui32ParamOutPos; -+ IMG_UINT32 ui32CurrentOffset; -+ IMG_UINT32 ui32BytesRemaining; -+ LinuxMemArea *psLinuxMemArea; -+ LINUX_MEM_AREA_TYPE eRootAreaType; -+ IMG_CHAR *pui8TransientCpuVAddr; -+ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC); -+ -+ PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize); -+ -+ if (ui32Bytes == 0 || gui32PDumpSuspended) { -+ return PVRSRV_OK; -+ } -+ -+ if (pvAltLinAddr) { -+ pui8DataLinAddr = pvAltLinAddr; -+ } else if (psMemInfo->pvLinAddrKM) { -+ pui8DataLinAddr = -+ (IMG_UINT8 *) psMemInfo->pvLinAddrKM + ui32Offset; -+ } else { -+ pui8DataLinAddr = 0; -+ psLinuxMemArea = -+ (LinuxMemArea *) psMemInfo->sMemBlk.hOSMemHandle; -+ eRootAreaType = LinuxMemAreaRootType(psLinuxMemArea); -+ } -+ -+ ui32ParamOutPos = -+ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState. -+ psStream[PDUMP_STREAM_PARAM2]); -+ -+ if (pui8DataLinAddr) { -+ if (!PDumpWriteILock -+ (gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], -+ pui8DataLinAddr, ui32Bytes, ui32Flags)) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ } -+ -+ else if (eRootAreaType == LINUX_MEM_AREA_IO) { -+ -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, -+ ui32Offset); -+ pui8TransientCpuVAddr = -+ IORemapWrapper(CpuPAddr, ui32Bytes, PVRSRV_HAP_CACHED); -+ if (!PDumpWriteILock -+ (gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], -+ pui8TransientCpuVAddr, ui32Bytes, ui32Flags)) { -+ IOUnmapWrapper(pui8TransientCpuVAddr); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ IOUnmapWrapper(pui8TransientCpuVAddr); -+ } else { -+ -+ PVR_ASSERT(eRootAreaType == LINUX_MEM_AREA_ALLOC_PAGES); -+ -+ ui32BytesRemaining = ui32Bytes; -+ ui32CurrentOffset = ui32Offset; -+ -+ while (ui32BytesRemaining > 0) { -+ IMG_UINT32 ui32BlockBytes = -+ MIN(ui32BytesRemaining, PAGE_SIZE); -+ struct page *psCurrentPage = NULL; -+ -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk. -+ hOSMemHandle, -+ ui32CurrentOffset); -+ -+ if (CpuPAddr.uiAddr & (PAGE_SIZE - 1)) { -+ ui32BlockBytes = -+ MIN(ui32BytesRemaining, -+ PAGE_ALIGN(CpuPAddr.uiAddr) - -+ CpuPAddr.uiAddr); -+ } -+ -+ psCurrentPage = -+ LinuxMemAreaOffsetToPage(psLinuxMemArea, -+ ui32CurrentOffset); -+ pui8TransientCpuVAddr = KMapWrapper(psCurrentPage); -+ pui8TransientCpuVAddr += (CpuPAddr.uiAddr & ~PAGE_MASK); -+ if (!pui8TransientCpuVAddr) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (!PDumpWriteILock -+ (gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], -+ pui8TransientCpuVAddr, ui32BlockBytes, -+ ui32Flags)) { -+ KUnMapWrapper(psCurrentPage); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ KUnMapWrapper(psCurrentPage); -+ -+ ui32BytesRemaining -= ui32BlockBytes; -+ ui32CurrentOffset += ui32BlockBytes; -+ } -+ PVR_ASSERT(ui32BytesRemaining == 0); -+ -+ } -+ -+ if (gsDBGPdumpState.ui32ParamFileNum == 0) { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm"); -+ } else { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", -+ gsDBGPdumpState.ui32ParamFileNum); -+ } -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "-- LDB :SGXMEM:VA_%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n", -+ psMemInfo->sDevVAddr.uiAddr, -+ ui32Offset, ui32Bytes, ui32ParamOutPos, pszFile); -+ PDumpWriteString2(pszScript, ui32Flags); -+ -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset); -+ ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1); -+ -+ sDevVAddr = psMemInfo->sDevVAddr; -+ sDevVAddr.uiAddr += ui32Offset; -+ -+ ui32BytesRemaining = ui32Bytes; -+ ui32CurrentOffset = ui32Offset; -+ -+ while (ui32BytesRemaining > 0) { -+ IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE); -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, -+ ui32CurrentOffset); -+ -+ sDevVPageAddr.uiAddr = -+ psMemInfo->sDevVAddr.uiAddr + ui32CurrentOffset - -+ ui32PageByteOffset; -+ -+ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); -+ -+ sDevPAddr.uiAddr += ui32PageByteOffset; -+ -+ if (ui32PageByteOffset) { -+ ui32BlockBytes = -+ MIN(ui32BytesRemaining, -+ PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr); -+ -+ ui32PageByteOffset = 0; -+ } -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "LDB :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n", -+ hUniqueTag, -+ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1), -+ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1), -+ ui32BlockBytes, ui32ParamOutPos, pszFile); -+ PDumpWriteString2(pszScript, ui32Flags); -+ -+ ui32BytesRemaining -= ui32BlockBytes; -+ ui32CurrentOffset += ui32BlockBytes; -+ ui32ParamOutPos += ui32BlockBytes; -+ } -+ PVR_ASSERT(ui32BytesRemaining == 0); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_BOOL bInitialisePages, -+ IMG_HANDLE hUniqueTag1, IMG_HANDLE hUniqueTag2) -+{ -+ IMG_UINT32 ui32NumPages; -+ IMG_UINT32 ui32PageOffset; -+ IMG_UINT32 ui32BlockBytes; -+ IMG_UINT8 *pui8LinAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32ParamOutPos; -+ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC); -+ -+ if (ui32Flags) ; -+ -+ if (!pvLinAddr) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (gui32PDumpSuspended) { -+ return PVRSRV_OK; -+ } -+ -+ ui32ParamOutPos = -+ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState. -+ psStream[PDUMP_STREAM_PARAM2]); -+ -+ if (bInitialisePages) { -+ -+ if (!PDumpWriteILock -+ (gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], pvLinAddr, -+ ui32Bytes, PDUMP_FLAGS_CONTINUOUS)) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (gsDBGPdumpState.ui32ParamFileNum == 0) { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm"); -+ } else { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", -+ gsDBGPdumpState.ui32ParamFileNum); -+ } -+ } -+ -+ ui32PageOffset = (IMG_UINT32) pvLinAddr & (HOST_PAGESIZE() - 1); -+ ui32NumPages = -+ (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() - -+ 1) / HOST_PAGESIZE(); -+ pui8LinAddr = (IMG_UINT8 *) pvLinAddr; -+ -+ while (ui32NumPages--) { -+ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr); -+ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr); -+ -+ if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE()) { -+ -+ ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset; -+ } else { -+ -+ ui32BlockBytes = ui32Bytes; -+ } -+ -+ if (bInitialisePages) { -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "LDB :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n", -+ hUniqueTag1, -+ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1), -+ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1), -+ ui32BlockBytes, ui32ParamOutPos, pszFile); -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ } else { -+ for (ui32Offset = 0; ui32Offset < ui32BlockBytes; -+ ui32Offset += sizeof(IMG_UINT32)) { -+ IMG_UINT32 ui32PTE = -+ *((IMG_UINT32 *) (pui8LinAddr + -+ ui32Offset)); -+ -+ if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0) { -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n", -+ hUniqueTag1, -+ (sDevPAddr.uiAddr + -+ ui32Offset) & -+ ~(SGX_MMU_PAGE_SIZE - 1), -+ (sDevPAddr.uiAddr + -+ ui32Offset) & -+ (SGX_MMU_PAGE_SIZE - 1), -+ hUniqueTag2, -+ ui32PTE & -+ SGX_MMU_PDE_ADDR_MASK, -+ ui32PTE & -+ ~SGX_MMU_PDE_ADDR_MASK); -+ } else { -+ PVR_ASSERT(! -+ (ui32PTE & -+ SGX_MMU_PTE_VALID)); -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX%p\r\n", -+ hUniqueTag1, -+ (sDevPAddr.uiAddr + -+ ui32Offset) & -+ ~(SGX_MMU_PAGE_SIZE - 1), -+ (sDevPAddr.uiAddr + -+ ui32Offset) & -+ (SGX_MMU_PAGE_SIZE - 1), -+ ui32PTE, hUniqueTag2); -+ } -+ PDumpWriteString2(pszScript, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ } -+ -+ ui32PageOffset = 0; -+ -+ ui32Bytes -= ui32BlockBytes; -+ -+ pui8LinAddr += ui32BlockBytes; -+ -+ ui32ParamOutPos += ui32BlockBytes; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_PHYADDR sPDDevPAddr, -+ IMG_HANDLE hUniqueTag1, IMG_HANDLE hUniqueTag2) -+{ -+ IMG_UINT32 ui32ParamOutPos; -+ IMG_CPU_PHYADDR CpuPAddr; -+ IMG_UINT32 ui32PageByteOffset; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEV_VIRTADDR sDevVPageAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC); -+ -+ ui32ParamOutPos = -+ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState. -+ psStream[PDUMP_STREAM_PARAM2]); -+ -+ if (!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], -+ (IMG_UINT8 *) & sPDDevPAddr, -+ sizeof(IMG_DEV_PHYADDR), PDUMP_FLAGS_CONTINUOUS)) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (gsDBGPdumpState.ui32ParamFileNum == 0) { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm"); -+ } else { -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", -+ gsDBGPdumpState.ui32ParamFileNum); -+ } -+ -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset); -+ ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1); -+ -+ sDevVAddr = psMemInfo->sDevVAddr; -+ sDevVAddr.uiAddr += ui32Offset; -+ -+ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset; -+ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); -+ sDevPAddr.uiAddr += ui32PageByteOffset; -+ -+ if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0) { -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n", -+ hUniqueTag1, -+ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1), -+ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1), -+ hUniqueTag2, -+ sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK, -+ sPDDevPAddr.uiAddr & ~SGX_MMU_PDE_ADDR_MASK); -+ } else { -+ PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID)); -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX\r\n", -+ hUniqueTag1, -+ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1), -+ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1), -+ sPDDevPAddr.uiAddr); -+ } -+ PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame) -+{ -+ IMG_UINT32 ui32Stream; -+ -+ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++) { -+ if (gsDBGPdumpState.psStream[ui32Stream]) { -+ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], -+ ui32Frame); -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpGetFrameKM(IMG_PUINT32 pui32Frame) -+{ -+ *pui32Frame = -+ DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpCommentKM(IMG_CHAR * pszComment, IMG_UINT32 ui32Flags) -+{ -+ IMG_UINT32 ui32Count = 0; -+ PVRSRV_ERROR eError; -+ __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC); -+ -+ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) { -+ eError = PVRSRV_ERROR_GENERIC; -+ } else { -+ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED; -+ } -+ -+ if (!PDumpWriteString2("-- ", ui32Flags)) { -+ return eError; -+ } -+ -+ snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s", pszComment); -+ -+ while ((pszMsg[ui32Count] != 0) && (ui32Count < SZ_MSG_SIZE_MAX)) { -+ ui32Count++; -+ } -+ -+ if ((pszMsg[ui32Count - 1] != '\n') && (ui32Count < SZ_MSG_SIZE_MAX)) { -+ pszMsg[ui32Count] = '\n'; -+ ui32Count++; -+ pszMsg[ui32Count] = '\0'; -+ } -+ if ((pszMsg[ui32Count - 2] != '\r') && (ui32Count < SZ_MSG_SIZE_MAX)) { -+ pszMsg[ui32Count - 1] = '\r'; -+ pszMsg[ui32Count] = '\n'; -+ ui32Count++; -+ pszMsg[ui32Count] = '\0'; -+ } -+ -+ PDumpWriteString2(pszMsg, ui32Flags); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR * pszString, IMG_UINT32 ui32Flags) -+{ -+ IMG_UINT32 ui32Count = 0; -+ __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC); -+ -+ snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s", pszString); -+ -+ while ((pszMsg[ui32Count] != 0) && (ui32Count < SZ_MSG_SIZE_MAX)) { -+ ui32Count++; -+ } -+ -+ if ((pszMsg[ui32Count - 1] != '\n') && (ui32Count < SZ_MSG_SIZE_MAX)) { -+ pszMsg[ui32Count] = '\n'; -+ ui32Count++; -+ pszMsg[ui32Count] = '\0'; -+ } -+ if ((pszMsg[ui32Count - 2] != '\r') && (ui32Count < SZ_MSG_SIZE_MAX)) { -+ pszMsg[ui32Count - 1] = '\r'; -+ pszMsg[ui32Count] = '\n'; -+ ui32Count++; -+ pszMsg[ui32Count] = '\0'; -+ } -+ -+ if (!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_DRIVERINFO], -+ (IMG_UINT8 *) pszMsg, ui32Count, ui32Flags)) { -+ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) { -+ return PVRSRV_ERROR_GENERIC; -+ } else { -+ return PVRSRV_ERROR_CMD_NOT_PROCESSED; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpBitmapKM(IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32Width, -+ IMG_UINT32 ui32Height, -+ IMG_UINT32 ui32StrideInBytes, -+ IMG_DEV_VIRTADDR sDevBaseAddr, -+ IMG_UINT32 ui32Size, -+ PDUMP_PIXEL_FORMAT ePixelFormat, -+ PDUMP_MEM_FORMAT eMemFormat, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC); -+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, -+ "\r\n-- Dump bitmap of render\r\n"); -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n", -+ pszFileName, -+ pszFileName, -+ sDevBaseAddr.uiAddr, -+ ui32Size, -+ ui32FileOffset, -+ ePixelFormat, -+ ui32Width, ui32Height, ui32StrideInBytes, eMemFormat); -+ -+ PDumpWriteString2(pszScript, ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32Address, -+ IMG_UINT32 ui32Size, IMG_UINT32 ui32PDumpFlags) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC); -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n", -+ ui32Address, ui32FileOffset, pszFileName); -+ -+ PDumpWriteString2(pszScript, ui32PDumpFlags); -+ -+ return PVRSRV_OK; -+} -+ -+static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags) -+{ -+ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], -+ (IMG_UINT8 *) pszString, strlen(pszString), -+ ui32Flags); -+} -+ -+static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 * pui8Data, -+ IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags) -+{ -+ IMG_UINT32 ui32Written = 0; -+ IMG_UINT32 ui32Off = 0; -+ -+ if (!psStream || gui32PDumpSuspended || (ui32Flags & PDUMP_FLAGS_NEVER)) { -+ return IMG_TRUE; -+ } -+ -+ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]) { -+ IMG_UINT32 ui32ParamOutPos = -+ gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState. -+ psStream -+ [PDUMP_STREAM_PARAM2]); -+ -+ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE) { -+ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] -+ && -+ PDumpWriteString2 -+ ("\r\n-- Splitting pdump output file\r\n\r\n", -+ ui32Flags))) { -+ DbgSetMarker(gsDBGPdumpState. -+ psStream[PDUMP_STREAM_PARAM2], -+ ui32ParamOutPos); -+ gsDBGPdumpState.ui32ParamFileNum++; -+ } -+ } -+ } -+ -+ while (((IMG_UINT32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF)) { -+ ui32Written = -+ DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, -+ ui32Flags); -+ -+ if (ui32Written == 0) { -+ OSReleaseThreadQuanta(); -+ } -+ -+ if (ui32Written != 0xFFFFFFFF) { -+ ui32Off += ui32Written; -+ ui32Count -= ui32Written; -+ } -+ } -+ -+ if (ui32Written == 0xFFFFFFFF) { -+ return IMG_FALSE; -+ } -+ -+ return IMG_TRUE; -+} -+ -+static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame) -+{ -+ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame); -+} -+ -+static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream) -+{ -+ return gpfnDbgDrv->pfnGetFrame(psStream); -+} -+ -+static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker) -+{ -+ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker); -+} -+ -+static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 * pui8Data, -+ IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags) -+{ -+ IMG_UINT32 ui32BytesWritten; -+ -+ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) { -+ -+ if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) && -+ (psStream->ui32Start == 0xFFFFFFFF) && -+ (psStream->ui32End == 0xFFFFFFFF) && -+ psStream->bInitPhaseComplete) { -+ ui32BytesWritten = ui32BCount; -+ } else { -+ ui32BytesWritten = -+ gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, -+ ui32BCount, 1); -+ } -+ } else { -+ if (ui32Flags & PDUMP_FLAGS_LASTFRAME) { -+ IMG_UINT32 ui32DbgFlags; -+ -+ ui32DbgFlags = 0; -+ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER) { -+ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF; -+ } -+ -+ ui32BytesWritten = -+ gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, -+ ui32BCount, 1, ui32DbgFlags); -+ } else { -+ ui32BytesWritten = -+ gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, -+ ui32BCount, 1); -+ } -+ } -+ -+ return ui32BytesWritten; -+} -+ -+IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame) -+{ -+ IMG_BOOL bFrameDumped; -+ -+ bFrameDumped = IMG_FALSE; -+ PDumpSetFrameKM(ui32CurrentFrame + 1); -+ bFrameDumped = PDumpIsCaptureFrameKM(); -+ PDumpSetFrameKM(ui32CurrentFrame); -+ -+ return bFrameDumped; -+} -+ -+IMG_VOID PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum, -+ IMG_BOOL bLastFrame, -+ IMG_UINT32 * pui32Registers, -+ IMG_UINT32 ui32NumRegisters) -+{ -+ IMG_UINT32 ui32FileOffset, ui32Flags; -+ IMG_UINT32 i; -+ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(); -+ -+ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0; -+ ui32FileOffset = 0; -+ -+ PDUMPCOMMENTWITHFLAGS(ui32Flags, -+ "\r\n-- Dump 3D signature registers\r\n"); -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%lu_3d.sig", -+ ui32DumpFrameNum); -+ -+ for (i = 0; i < ui32NumRegisters; i++) { -+ PDumpReadRegKM(pszFile, ui32FileOffset, pui32Registers[i], -+ sizeof(IMG_UINT32), ui32Flags); -+ ui32FileOffset += sizeof(IMG_UINT32); -+ } -+} -+ -+static IMG_VOID PDumpCountRead(IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Address, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 * pui32FileOffset, -+ IMG_BOOL bLastFrame) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, -+ "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n", ui32Address, -+ *pui32FileOffset, pszFileName); -+ PDumpWriteString2(pszScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); -+ -+ *pui32FileOffset += ui32Size; -+} -+ -+IMG_VOID PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum, -+ IMG_BOOL bLastFrame, -+ IMG_UINT32 * pui32Registers, -+ IMG_UINT32 ui32NumRegisters) -+{ -+ IMG_UINT32 ui32FileOffset; -+ IMG_UINT32 i; -+ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(); -+ -+ PDUMPCOMMENTWITHFLAGS(bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0, -+ "\r\n-- Dump counter registers\r\n"); -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%lu.perf", -+ ui32DumpFrameNum); -+ ui32FileOffset = 0; -+ -+ for (i = 0; i < ui32NumRegisters; i++) { -+ PDumpCountRead(pszFile, pui32Registers[i], sizeof(IMG_UINT32), -+ &ui32FileOffset, bLastFrame); -+ } -+} -+ -+IMG_VOID PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum, -+ IMG_UINT32 ui32TAKickCount, -+ IMG_BOOL bLastFrame, -+ IMG_UINT32 * pui32Registers, -+ IMG_UINT32 ui32NumRegisters) -+{ -+ IMG_UINT32 ui32FileOffset, ui32Flags; -+ IMG_UINT32 i; -+ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(); -+ -+ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0; -+ PDUMPCOMMENTWITHFLAGS(ui32Flags, -+ "\r\n-- Dump TA signature registers\r\n"); -+ snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%lu_ta.sig", -+ ui32DumpFrameNum); -+ -+ ui32FileOffset = -+ ui32TAKickCount * ui32NumRegisters * sizeof(IMG_UINT32); -+ -+ for (i = 0; i < ui32NumRegisters; i++) { -+ PDumpReadRegKM(pszFile, ui32FileOffset, pui32Registers[i], -+ sizeof(IMG_UINT32), ui32Flags); -+ ui32FileOffset += sizeof(IMG_UINT32); -+ } -+} -+ -+IMG_VOID PDumpRegRead(const IMG_UINT32 ui32RegOffset, IMG_UINT32 ui32Flags) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%lX\r\n", -+ ui32RegOffset); -+ PDumpWriteString2(pszScript, ui32Flags); -+} -+ -+IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, -+ IMG_BOOL bLastFrame) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%lX\r\n", -+ ui32RegOffset); -+ PDumpWriteString2(pszScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); -+} -+ -+void PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo, -+ IMG_UINT32 ui32ROffOffset, -+ IMG_UINT32 ui32WPosVal, -+ IMG_UINT32 ui32PacketSize, -+ IMG_UINT32 ui32BufferSize, -+ IMG_UINT32 ui32Flags, IMG_HANDLE hUniqueTag) -+{ -+ IMG_UINT32 ui32PageOffset; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_DEV_VIRTADDR sDevVPageAddr; -+ IMG_CPU_PHYADDR CpuPAddr; -+ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= -+ psROffMemInfo->ui32AllocSize); -+ -+ sDevVAddr = psROffMemInfo->sDevVAddr; -+ -+ sDevVAddr.uiAddr += ui32ROffOffset; -+ -+ CpuPAddr = -+ OSMemHandleToCpuPAddr(psROffMemInfo->sMemBlk.hOSMemHandle, -+ ui32ROffOffset); -+ ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE - 1); -+ -+ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset; -+ -+ BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr); -+ -+ sDevPAddr.uiAddr += ui32PageOffset; -+ -+ snprintf(pszScript, -+ SZ_SCRIPT_SIZE_MAX, -+ "CBP :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX 0x%8.8lX\r\n", -+ hUniqueTag, -+ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1), -+ sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1), -+ ui32WPosVal, ui32PacketSize, ui32BufferSize); -+ PDumpWriteString2(pszScript, ui32Flags); -+} -+ -+IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags) -+{ -+ __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(); -+ -+ sprintf(pszScript, "IDL %lu\r\n", ui32Clocks); -+ PDumpWriteString2(pszScript, ui32Flags); -+} -+ -+IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks) -+{ -+ PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS); -+} -+ -+IMG_VOID PDumpSuspendKM(IMG_VOID) -+{ -+ gui32PDumpSuspended++; -+} -+ -+IMG_VOID PDumpResumeKM(IMG_VOID) -+{ -+ gui32PDumpSuspended--; -+} -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pdumpdefs.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pdumpdefs.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pdumpdefs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pdumpdefs.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,92 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined (__PDUMPDEFS_H__) -+#define __PDUMPDEFS_H__ -+ -+typedef enum _PDUMP_PIXEL_FORMAT_ { -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2, -+ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9, -+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, -+ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11, -+ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12, -+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13, -+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15, -+ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16, -+ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17, -+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18, -+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20, -+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25, -+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26, -+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27, -+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28, -+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29, -+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31, -+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36, -+ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37, -+ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38, -+ -+ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff -+} PDUMP_PIXEL_FORMAT; -+ -+typedef enum _PDUMP_MEM_FORMAT_ { -+ PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0, -+ PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1, -+ PVRSRV_PDUMP_MEM_FORMAT_TILED = 8, -+ PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9, -+ PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10, -+ -+ PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff -+} PDUMP_MEM_FORMAT; -+ -+typedef enum _PDUMP_POLL_OPERATOR { -+ PDUMP_POLL_OPERATOR_EQUAL = 0, -+ PDUMP_POLL_OPERATOR_LESS = 1, -+ PDUMP_POLL_OPERATOR_LESSEQUAL = 2, -+ PDUMP_POLL_OPERATOR_GREATER = 3, -+ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4, -+ PDUMP_POLL_OPERATOR_NOTEQUAL = 5, -+} PDUMP_POLL_OPERATOR; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pdump_km.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pdump_km.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pdump_km.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pdump_km.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,290 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _PDUMP_KM_H_ -+#define _PDUMP_KM_H_ -+ -+ -+#define PDUMP_FLAGS_NEVER 0x08000000 -+#define PDUMP_FLAGS_TOOUT2MEM 0x10000000 -+#define PDUMP_FLAGS_LASTFRAME 0x20000000 -+#define PDUMP_FLAGS_RESETLFBUFFER 0x40000000 -+#define PDUMP_FLAGS_CONTINUOUS 0x80000000 -+ -+#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 -+#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 -+ -+#ifndef PDUMP -+#define MAKEUNIQUETAG(hMemInfo) (0) -+#endif -+ -+#ifdef PDUMP -+ -+#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)hMemInfo)->sMemBlk.hBuffer))->pMapping) -+ -+#define PDUMP_REG_FUNC_NAME PDumpReg -+ -+ IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO * -+ psMemInfo, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_BOOL bLastFrame, -+ IMG_BOOL bOverwrite, -+ IMG_HANDLE hUniqueTag); -+ -+ IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr, -+ PVRSRV_KERNEL_MEM_INFO * psMemInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE hUniqueTag); -+ PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_DEV_PHYADDR * pPages, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEV_VIRTADDR sDevAddr, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32Length, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE hUniqueTag); -+ -+ PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags, -+ IMG_BOOL bInitialisePages, -+ IMG_HANDLE hUniqueTag1, -+ IMG_HANDLE hUniqueTag2); -+ IMG_VOID PDumpInit(IMG_VOID); -+ IMG_VOID PDumpDeInit(IMG_VOID); -+ IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame); -+ IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR * pszComment, -+ IMG_UINT32 ui32Flags); -+ IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR * pszString, -+ IMG_UINT32 ui32Flags); -+ PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Flags); -+ IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32Width, -+ IMG_UINT32 ui32Height, -+ IMG_UINT32 ui32StrideInBytes, -+ IMG_DEV_VIRTADDR sDevBaseAddr, -+ IMG_UINT32 ui32Size, -+ PDUMP_PIXEL_FORMAT ePixelFormat, -+ PDUMP_MEM_FORMAT eMemFormat, -+ IMG_UINT32 ui32PDumpFlags); -+ IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32Address, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32PDumpFlags); -+ IMG_VOID PDUMP_REG_FUNC_NAME(IMG_UINT32 dwReg, IMG_UINT32 dwData); -+ -+ IMG_VOID PDumpMsvdxRegRead(const IMG_CHAR * const pRegRegion, -+ const IMG_UINT32 dwRegOffset); -+ -+ IMG_VOID PDumpMsvdxRegWrite(const IMG_CHAR * const pRegRegion, -+ const IMG_UINT32 dwRegOffset, -+ const IMG_UINT32 dwData); -+ -+ PVRSRV_ERROR PDumpMsvdxRegPol(const IMG_CHAR * const pRegRegion, -+ const IMG_UINT32 ui32Offset, -+ const IMG_UINT32 ui32CheckFuncIdExt, -+ const IMG_UINT32 ui32RequValue, -+ const IMG_UINT32 ui32Enable, -+ const IMG_UINT32 ui32PollCount, -+ const IMG_UINT32 ui32TimeOut); -+ -+ PVRSRV_ERROR PDumpMsvdxWriteRef(const IMG_CHAR * const pRegRegion, -+ const IMG_UINT32 ui32VLROffset, -+ const IMG_UINT32 ui32Physical); -+ -+ IMG_VOID PDumpComment(IMG_CHAR * pszFormat, ...); -+ IMG_VOID PDumpCommentWithFlags(IMG_UINT32 ui32Flags, -+ IMG_CHAR * pszFormat, ...); -+ PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Mask); -+ PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 ui32Flags); -+ IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID); -+ IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID); -+ -+ IMG_VOID PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_UINT32 ui32DevVAddr, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_HANDLE hOSMemHandle, -+ IMG_UINT32 ui32NumBytes, -+ IMG_HANDLE hUniqueTag); -+ IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_UINT32 ui32DevVAddr, -+ IMG_PUINT32 pui32PhysPages, -+ IMG_UINT32 ui32NumPages, -+ IMG_HANDLE hUniqueTag); -+ IMG_VOID PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_UINT32 ui32NumBytes, -+ IMG_HANDLE hUniqueTag); -+ IMG_VOID PDumpFreePages(struct _BM_HEAP_ *psBMHeap, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_UINT32 ui32NumBytes, -+ IMG_HANDLE hUniqueTag, IMG_BOOL bInterleaved); -+ IMG_VOID PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_VIRTADDR pvLinAddr, -+ IMG_UINT32 ui32NumBytes, -+ IMG_HANDLE hUniqueTag); -+ IMG_VOID PDumpPDReg(IMG_UINT32 ui32Reg, -+ IMG_UINT32 ui32dwData, IMG_HANDLE hUniqueTag); -+ IMG_VOID PDumpPDRegWithFlags(IMG_UINT32 ui32Reg, -+ IMG_UINT32 ui32Data, -+ IMG_UINT32 ui32Flags, -+ IMG_HANDLE hUniqueTag); -+ -+ PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_PHYADDR sPDDevPAddr, -+ IMG_HANDLE hUniqueTag1, -+ IMG_HANDLE hUniqueTag2); -+ -+ IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame); -+ -+ IMG_VOID PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum, -+ IMG_UINT32 ui32TAKickCount, -+ IMG_BOOL bLastFrame, -+ IMG_UINT32 * pui32Registers, -+ IMG_UINT32 ui32NumRegisters); -+ -+ IMG_VOID PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum, -+ IMG_BOOL bLastFrame, -+ IMG_UINT32 * pui32Registers, -+ IMG_UINT32 ui32NumRegisters); -+ -+ IMG_VOID PDumpRegRead(const IMG_UINT32 dwRegOffset, -+ IMG_UINT32 ui32Flags); -+ -+ IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, -+ IMG_BOOL bLastFrame); -+ -+ IMG_VOID PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum, -+ IMG_BOOL bLastFrame, -+ IMG_UINT32 * pui32Registers, -+ IMG_UINT32 ui32NumRegisters); -+ -+ IMG_VOID PDumpEndInitPhase(IMG_VOID); -+ -+ void PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo, -+ IMG_UINT32 ui32ROffOffset, -+ IMG_UINT32 ui32WPosVal, -+ IMG_UINT32 ui32PacketSize, -+ IMG_UINT32 ui32BufferSize, -+ IMG_UINT32 ui32Flags, IMG_HANDLE hUniqueTag); -+ -+ IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags); -+ IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks); -+ -+ IMG_VOID PDumpSuspendKM(IMG_VOID); -+ IMG_VOID PDumpResumeKM(IMG_VOID); -+ -+#define PDUMPMEMPOL PDumpMemPolKM -+#define PDUMPMEM PDumpMemKM -+#define PDUMPMEM2 PDumpMem2KM -+#define PDUMPINIT PDumpInit -+#define PDUMPDEINIT PDumpDeInit -+#define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM -+#define PDUMPTESTFRAME PDumpIsCaptureFrameKM -+#define PDUMPTESTNEXTFRAME PDumpTestNextFrame -+#define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM -+#define PDUMPREG PDUMP_REG_FUNC_NAME -+#define PDUMPCOMMENT PDumpComment -+#define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags -+#define PDUMPREGPOL PDumpRegPolKM -+#define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM -+#define PDUMPMALLOCPAGES PDumpMallocPages -+#define PDUMPMALLOCPAGETABLE PDumpMallocPageTable -+#define PDUMPFREEPAGES PDumpFreePages -+#define PDUMPFREEPAGETABLE PDumpFreePageTable -+#define PDUMPPDREG PDumpPDReg -+#define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags -+#define PDUMPCBP PDumpCBP -+#define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys -+#define PDUMPENDINITPHASE PDumpEndInitPhase -+#define PDUMPMSVDXREGWRITE PDumpMsvdxRegWrite -+#define PDUMPMSVDXREGREAD PDumpMsvdxRegRead -+#define PDUMPMSVDXPOL PDumpMsvdxRegPol -+#define PDUMPMSVDXWRITEREF PDumpMsvdxWriteRef -+#define PDUMPBITMAPKM PDumpBitmapKM -+#define PDUMPDRIVERINFO PDumpDriverInfoKM -+#define PDUMPIDLWITHFLAGS PDumpIDLWithFlags -+#define PDUMPIDL PDumpIDL -+#define PDUMPSUSPEND PDumpSuspendKM -+#define PDUMPRESUME PDumpResumeKM -+ -+#else -+#define PDUMPMEMPOL(args...) -+#define PDUMPMEM(args...) -+#define PDUMPMEM2(args...) -+#define PDUMPINIT(args...) -+#define PDUMPDEINIT(args...) -+#define PDUMPISLASTFRAME(args...) -+#define PDUMPTESTFRAME(args...) -+#define PDUMPTESTNEXTFRAME(args...) -+#define PDUMPREGWITHFLAGS(args...) -+#define PDUMPREG(args...) -+#define PDUMPCOMMENT(args...) -+#define PDUMPREGPOL(args...) -+#define PDUMPREGPOLWITHFLAGS(args...) -+#define PDUMPMALLOCPAGES(args...) -+#define PDUMPMALLOCPAGETABLE(args...) -+#define PDUMPFREEPAGES(args...) -+#define PDUMPFREEPAGETABLE(args...) -+#define PDUMPPDREG(args...) -+#define PDUMPPDREGWITHFLAGS(args...) -+#define PDUMPSYNC(args...) -+#define PDUMPCOPYTOMEM(args...) -+#define PDUMPWRITE(args...) -+#define PDUMPCBP(args...) -+#define PDUMPCOMMENTWITHFLAGS(args...) -+#define PDUMPMALLOCPAGESPHYS(args...) -+#define PDUMPENDINITPHASE(args...) -+#define PDUMPMSVDXREG(args...) -+#define PDUMPMSVDXREGWRITE(args...) -+#define PDUMPMSVDXREGREAD(args...) -+#define PDUMPMSVDXPOLEQ(args...) -+#define PDUMPMSVDXPOL(args...) -+#define PDUMPBITMAPKM(args...) -+#define PDUMPDRIVERINFO(args...) -+#define PDUMPIDLWITHFLAGS(args...) -+#define PDUMPIDL(args...) -+#define PDUMPSUSPEND(args...) -+#define PDUMPRESUME(args...) -+#define PDUMPMSVDXWRITEREF(args...) -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/perproc.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/perproc.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/perproc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/perproc.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,242 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "resman.h" -+#include "handle.h" -+#include "perproc.h" -+ -+#define HASH_TAB_INIT_SIZE 32 -+ -+static HASH_TABLE *psHashTab = IMG_NULL; -+ -+static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINTPTR_T uiPerProc; -+ -+ PVR_ASSERT(psPerProc != IMG_NULL); -+ -+ uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T) psPerProc->ui32PID); -+ if (uiPerProc == 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreePerProcessData: Couldn't find process in per-process data hash table")); -+ -+ PVR_ASSERT(psPerProc->ui32PID == 0); -+ } else { -+ PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *) uiPerProc == psPerProc); -+ PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *) uiPerProc)->ui32PID == -+ psPerProc->ui32PID); -+ } -+ -+ if (psPerProc->psHandleBase != IMG_NULL) { -+ eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreePerProcessData: Couldn't free handle base for process (%d)", -+ eError)); -+ return eError; -+ } -+ } -+ -+ if (psPerProc->hPerProcData != IMG_NULL) { -+ eError = -+ PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, -+ psPerProc->hPerProcData, -+ PVRSRV_HANDLE_TYPE_PERPROC_DATA); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreePerProcessData: Couldn't release per-process data handle (%d)", -+ eError)); -+ return eError; -+ } -+ } -+ -+ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(*psPerProc), -+ psPerProc, psPerProc->hBlockAlloc); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreePerProcessData: Couldn't free per-process data (%d)", -+ eError)); -+ return eError; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID) -+{ -+ PVRSRV_PER_PROCESS_DATA *psPerProc; -+ -+ PVR_ASSERT(psHashTab != IMG_NULL); -+ -+ psPerProc = -+ (PVRSRV_PER_PROCESS_DATA *) HASH_Retrieve(psHashTab, -+ (IMG_UINTPTR_T) ui32PID); -+ return psPerProc; -+} -+ -+PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID) -+{ -+ PVRSRV_PER_PROCESS_DATA *psPerProc; -+ IMG_HANDLE hBlockAlloc; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT(psHashTab != IMG_NULL); -+ -+ psPerProc = -+ (PVRSRV_PER_PROCESS_DATA *) HASH_Retrieve(psHashTab, -+ (IMG_UINTPTR_T) ui32PID); -+ -+ if (psPerProc == IMG_NULL) { -+ -+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(*psPerProc), -+ (IMG_PVOID *) & psPerProc, &hBlockAlloc); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", -+ eError)); -+ return eError; -+ } -+ OSMemSet(psPerProc, 0, sizeof(*psPerProc)); -+ psPerProc->hBlockAlloc = hBlockAlloc; -+ -+ if (!HASH_Insert -+ (psHashTab, (IMG_UINTPTR_T) ui32PID, -+ (IMG_UINTPTR_T) psPerProc)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table")); -+ eError = PVRSRV_ERROR_GENERIC; -+ goto failure; -+ } -+ -+ psPerProc->ui32PID = ui32PID; -+ psPerProc->ui32RefCount = 0; -+ -+ eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE, -+ &psPerProc->hPerProcData, -+ psPerProc, -+ PVRSRV_HANDLE_TYPE_PERPROC_DATA, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", -+ eError)); -+ goto failure; -+ } -+ -+ eError = -+ PVRSRVAllocHandleBase(&psPerProc->psHandleBase, ui32PID); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", -+ eError)); -+ goto failure; -+ } -+ -+ eError = -+ PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager")); -+ goto failure; -+ } -+ } -+ -+ psPerProc->ui32RefCount++; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d", -+ ui32PID, psPerProc->ui32RefCount)); -+ -+ return eError; -+ -+failure: -+ (void)FreePerProcessData(psPerProc); -+ return eError; -+} -+ -+IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_PER_PROCESS_DATA *psPerProc; -+ -+ PVR_ASSERT(psHashTab != IMG_NULL); -+ -+ psPerProc = -+ (PVRSRV_PER_PROCESS_DATA *) HASH_Retrieve(psHashTab, -+ (IMG_UINTPTR_T) ui32PID); -+ if (psPerProc == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u", -+ ui32PID)); -+ } else { -+ psPerProc->ui32RefCount--; -+ if (psPerProc->ui32RefCount == 0) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "PVRSRVPerProcessDataDisconnect: " -+ "Last close from process 0x%x received", -+ ui32PID)); -+ -+ PVRSRVResManDisconnect(psPerProc->hResManContext, -+ IMG_FALSE); -+ -+ eError = FreePerProcessData(psPerProc); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataDisconnect: Error freeing per-process data")); -+ } -+ } -+ } -+} -+ -+PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID) -+{ -+ PVR_ASSERT(psHashTab == IMG_NULL); -+ -+ psHashTab = HASH_Create(HASH_TAB_INIT_SIZE); -+ if (psHashTab == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID) -+{ -+ -+ if (psHashTab != IMG_NULL) { -+ -+ HASH_Delete(psHashTab); -+ psHashTab = IMG_NULL; -+ } -+ -+ return PVRSRV_OK; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/perproc.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/perproc.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/perproc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/perproc.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,60 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __PERPROC_H__ -+#define __PERPROC_H__ -+ -+ -+#include "img_types.h" -+#include "resman.h" -+ -+#include "handle.h" -+ -+ typedef struct _PVRSRV_PER_PROCESS_DATA_ { -+ IMG_UINT32 ui32PID; -+ IMG_HANDLE hBlockAlloc; -+ PRESMAN_CONTEXT hResManContext; -+ IMG_HANDLE hPerProcData; -+ PVRSRV_HANDLE_BASE *psHandleBase; -+ -+ IMG_BOOL bHandlesBatched; -+ IMG_UINT32 ui32RefCount; -+ -+ IMG_BOOL bInitProcess; -+ -+ IMG_HANDLE hOsPrivateData; -+ } PVRSRV_PER_PROCESS_DATA; -+ -+ IMG_IMPORT PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 -+ ui32PID); -+ -+ PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID); -+ IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID); -+ -+ PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID); -+ PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/power.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/power.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/power.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/power.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,649 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+#include "services_headers.h" -+#include -+#include -+#include -+#include -+ -+static IMG_BOOL gbInitServerRunning = IMG_FALSE; -+static IMG_BOOL gbInitServerRan = IMG_FALSE; -+static IMG_BOOL gbInitSuccessful = IMG_FALSE; -+static DEFINE_MUTEX(hPowerAndFreqLock); -+static DECLARE_WAIT_QUEUE_HEAD(hDvfsWq); -+static IMG_BOOL gbDvfsActive; -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE -+ eInitServerState, IMG_BOOL bState) -+{ -+ -+ switch (eInitServerState) { -+ case PVRSRV_INIT_SERVER_RUNNING: -+ gbInitServerRunning = bState; -+ break; -+ case PVRSRV_INIT_SERVER_RAN: -+ gbInitServerRan = bState; -+ break; -+ case PVRSRV_INIT_SERVER_SUCCESSFUL: -+ gbInitSuccessful = bState; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetInitServerState : Unknown state %lx", -+ eInitServerState)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState) -+{ -+ IMG_BOOL bReturnVal; -+ -+ switch (eInitServerState) { -+ case PVRSRV_INIT_SERVER_RUNNING: -+ bReturnVal = gbInitServerRunning; -+ break; -+ case PVRSRV_INIT_SERVER_RAN: -+ bReturnVal = gbInitServerRan; -+ break; -+ case PVRSRV_INIT_SERVER_SUCCESSFUL: -+ bReturnVal = gbInitSuccessful; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetInitServerState : Unknown state %lx", -+ eInitServerState)); -+ bReturnVal = IMG_FALSE; -+ } -+ -+ return bReturnVal; -+} -+ -+static IMG_BOOL _IsSystemStatePowered(PVR_POWER_STATE eSystemPowerState) -+{ -+ return (IMG_BOOL) (eSystemPowerState < PVRSRV_POWER_STATE_D2); -+} -+ -+IMG_EXPORT IMG_VOID PVRSRVDvfsLock(IMG_VOID) -+{ -+ mutex_lock(&hPowerAndFreqLock); -+ gbDvfsActive = 1; -+ mutex_unlock(&hPowerAndFreqLock); -+} -+ -+IMG_EXPORT IMG_VOID PVRSRVDvfsUnlock(IMG_VOID) -+{ -+ mutex_lock(&hPowerAndFreqLock); -+ gbDvfsActive = 0; -+ wake_up(&hDvfsWq); -+ mutex_unlock(&hPowerAndFreqLock); -+} -+ -+static IMG_BOOL IsPowerLocked(void) -+{ -+ return mutex_is_locked(&hPowerAndFreqLock) || gbDvfsActive; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, -+ IMG_BOOL bSystemPowerEvent) -+{ -+ if ((ui32CallerID == TIMER_ID) && IsPowerLocked()) -+ return PVRSRV_ERROR_RETRY; -+ mutex_lock(&hPowerAndFreqLock); -+ while (gbDvfsActive) { -+ DEFINE_WAIT(__wait); -+ prepare_to_wait(&hDvfsWq, &__wait, TASK_UNINTERRUPTIBLE); -+ mutex_unlock(&hPowerAndFreqLock); -+ schedule(); -+ mutex_lock(&hPowerAndFreqLock); -+ finish_wait(&hDvfsWq, &__wait); -+ } -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID) -+{ -+ mutex_unlock(&hPowerAndFreqLock); -+} -+ -+static -+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices, -+ IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ PVR_POWER_STATE eNewDevicePowerState; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psPowerDevice = psSysData->psPowerDeviceList; -+ while (psPowerDevice) { -+ if (bAllDevices -+ || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) { -+ eNewDevicePowerState = -+ (eNewPowerState == -+ PVRSRV_POWER_Unspecified) ? psPowerDevice-> -+ eDefaultPowerState : eNewPowerState; -+ -+ if (psPowerDevice->eCurrentPowerState != -+ eNewDevicePowerState) { -+ if (psPowerDevice->pfnPrePower != IMG_NULL) { -+ -+ eError = -+ psPowerDevice-> -+ pfnPrePower(psPowerDevice-> -+ hDevCookie, -+ eNewDevicePowerState, -+ psPowerDevice-> -+ eCurrentPowerState); -+ if (eError != PVRSRV_OK) { -+ pr_err -+ ("pfnPrePower failed (%u)\n", -+ eError); -+ return eError; -+ } -+ } -+ -+ eError = -+ SysDevicePrePowerState(psPowerDevice-> -+ ui32DeviceIndex, -+ eNewDevicePowerState, -+ psPowerDevice-> -+ eCurrentPowerState); -+ if (eError != PVRSRV_OK) { -+ pr_err -+ ("SysDevicePrePowerState failed (%u)\n", -+ eError); -+ return eError; -+ } -+ } -+ } -+ -+ psPowerDevice = psPowerDevice->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static -+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices, -+ IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ PVR_POWER_STATE eNewDevicePowerState; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psPowerDevice = psSysData->psPowerDeviceList; -+ while (psPowerDevice) { -+ if (bAllDevices -+ || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) { -+ eNewDevicePowerState = -+ (eNewPowerState == -+ PVRSRV_POWER_Unspecified) ? psPowerDevice-> -+ eDefaultPowerState : eNewPowerState; -+ -+ if (psPowerDevice->eCurrentPowerState != -+ eNewDevicePowerState) { -+ -+ eError = -+ SysDevicePostPowerState(psPowerDevice-> -+ ui32DeviceIndex, -+ eNewDevicePowerState, -+ psPowerDevice-> -+ eCurrentPowerState); -+ if (eError != PVRSRV_OK) { -+ pr_err -+ ("SysDevicePostPowerState failed (%u)\n", -+ eError); -+ return eError; -+ } -+ -+ if (psPowerDevice->pfnPostPower != IMG_NULL) { -+ -+ eError = -+ psPowerDevice-> -+ pfnPostPower(psPowerDevice-> -+ hDevCookie, -+ eNewDevicePowerState, -+ psPowerDevice-> -+ eCurrentPowerState); -+ if (eError != PVRSRV_OK) { -+ pr_err -+ ("pfnPostPower failed (%u)\n", -+ eError); -+ return eError; -+ } -+ } -+ -+ psPowerDevice->eCurrentPowerState = -+ eNewDevicePowerState; -+ } -+ } -+ -+ psPowerDevice = psPowerDevice->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE eNewPowerState, -+ IMG_UINT32 ui32CallerID, -+ IMG_BOOL bRetainMutex) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ eError = -+ PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, -+ eNewPowerState); -+ if (eError != PVRSRV_OK) { -+ goto Exit; -+ } -+ -+ eError = -+ PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, -+ eNewPowerState); -+ -+Exit: -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", -+ eNewPowerState, eError)); -+ } -+ -+ if (!bRetainMutex || (eError != PVRSRV_OK)) { -+ PVRSRVPowerUnlock(ui32CallerID); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVR_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYS_DATA *psSysData; -+ PVR_POWER_STATE eNewDevicePowerState; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ if (_IsSystemStatePowered(eNewPowerState) != -+ _IsSystemStatePowered(psSysData->eCurrentPowerState)) { -+ if (_IsSystemStatePowered(eNewPowerState)) { -+ -+ eNewDevicePowerState = PVRSRV_POWER_Unspecified; -+ } else { -+ eNewDevicePowerState = PVRSRV_POWER_STATE_D3; -+ } -+ -+ eError = -+ PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, -+ eNewDevicePowerState); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ } -+ -+ if (eNewPowerState != psSysData->eCurrentPowerState) { -+ -+ eError = SysSystemPrePowerState(eNewPowerState); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ } -+ -+ return eError; -+ -+ErrorExit: -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x", -+ psSysData->eCurrentPowerState, eNewPowerState, eError)); -+ -+ psSysData->eFailedPowerState = eNewPowerState; -+ -+ PVRSRVPowerUnlock(KERNEL_ID); -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVR_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYS_DATA *psSysData; -+ PVR_POWER_STATE eNewDevicePowerState; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ goto Exit; -+ } -+ -+ if (eNewPowerState != psSysData->eCurrentPowerState) { -+ -+ eError = SysSystemPostPowerState(eNewPowerState); -+ if (eError != PVRSRV_OK) { -+ goto Exit; -+ } -+ } -+ -+ if (_IsSystemStatePowered(eNewPowerState) != -+ _IsSystemStatePowered(psSysData->eCurrentPowerState)) { -+ if (_IsSystemStatePowered(eNewPowerState)) { -+ -+ eNewDevicePowerState = PVRSRV_POWER_Unspecified; -+ } else { -+ eNewDevicePowerState = PVRSRV_POWER_STATE_D3; -+ } -+ -+ eError = -+ PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, -+ eNewDevicePowerState); -+ if (eError != PVRSRV_OK) { -+ goto Exit; -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK", -+ psSysData->eCurrentPowerState, eNewPowerState)); -+ -+ psSysData->eCurrentPowerState = eNewPowerState; -+ -+Exit: -+ -+ PVRSRVPowerUnlock(KERNEL_ID); -+ -+ if (_IsSystemStatePowered(eNewPowerState) && -+ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL)) { -+ -+ PVRSRVCommandCompleteCallbacks(); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT PVRSRV_ERROR PVRSRVSetPowerStateKM(PVR_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ eError = PVRSRVSystemPrePowerStateKM(eNewPowerState); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ -+ eError = PVRSRVSystemPostPowerStateKM(eNewPowerState); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ -+ psSysData->eFailedPowerState = PVRSRV_POWER_Unspecified; -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x", -+ psSysData->eCurrentPowerState, eNewPowerState, eError)); -+ -+ psSysData->eFailedPowerState = eNewPowerState; -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex, -+ PFN_PRE_POWER pfnPrePower, -+ PFN_POST_POWER pfnPostPower, -+ PFN_PRE_CLOCKSPEED_CHANGE -+ pfnPreClockSpeedChange, -+ PFN_POST_CLOCKSPEED_CHANGE -+ pfnPostClockSpeedChange, -+ IMG_HANDLE hDevCookie, -+ PVR_POWER_STATE eCurrentPowerState, -+ PVR_POWER_STATE eDefaultPowerState) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ if (pfnPrePower == IMG_NULL && pfnPostPower == IMG_NULL) { -+ return PVRSRVRemovePowerDevice(ui32DeviceIndex); -+ } -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_POWER_DEV), -+ (IMG_VOID **) & psPowerDevice, IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV")); -+ return eError; -+ } -+ -+ psPowerDevice->pfnPrePower = pfnPrePower; -+ psPowerDevice->pfnPostPower = pfnPostPower; -+ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; -+ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; -+ psPowerDevice->hDevCookie = hDevCookie; -+ psPowerDevice->ui32DeviceIndex = ui32DeviceIndex; -+ psPowerDevice->eCurrentPowerState = eCurrentPowerState; -+ psPowerDevice->eDefaultPowerState = eDefaultPowerState; -+ -+ psPowerDevice->psNext = psSysData->psPowerDeviceList; -+ psSysData->psPowerDeviceList = psPowerDevice; -+ -+ return (PVRSRV_OK); -+} -+ -+PVRSRV_ERROR PVRSRVRemovePowerDevice(IMG_UINT32 ui32DeviceIndex) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psCurrent, *psPrevious; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psCurrent = psSysData->psPowerDeviceList; -+ psPrevious = IMG_NULL; -+ -+ while (psCurrent) { -+ if (psCurrent->ui32DeviceIndex == ui32DeviceIndex) { -+ -+ if (psPrevious) { -+ psPrevious->psNext = psCurrent->psNext; -+ } else { -+ -+ psSysData->psPowerDeviceList = -+ psCurrent->psNext; -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_POWER_DEV), psCurrent, -+ IMG_NULL); -+ -+ break; -+ } else { -+ psPrevious = psCurrent; -+ psCurrent = psCurrent->psNext; -+ } -+ } -+ -+ return (PVRSRV_OK); -+} -+ -+IMG_EXPORT IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return IMG_FALSE; -+ } -+ -+ if (IsPowerLocked()) -+ return IMG_FALSE; -+ -+ psPowerDevice = psSysData->psPowerDeviceList; -+ while (psPowerDevice) { -+ if (psPowerDevice->ui32DeviceIndex == ui32DeviceIndex) { -+ return (IMG_BOOL) (psPowerDevice->eCurrentPowerState == -+ PVRSRV_POWER_STATE_D0); -+ } -+ -+ psPowerDevice = psPowerDevice->psNext; -+ } -+ -+ return IMG_FALSE; -+} -+ -+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex, -+ IMG_BOOL bIdleDevice, -+ IMG_VOID * pvInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(pvInfo); -+ -+ SysAcquireData(&psSysData); -+ -+ psPowerDevice = psSysData->psPowerDeviceList; -+ while (psPowerDevice) { -+ if (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex) { -+ if (psPowerDevice->pfnPreClockSpeedChange) { -+ eError = -+ psPowerDevice-> -+ pfnPreClockSpeedChange(psPowerDevice-> -+ hDevCookie, -+ bIdleDevice, -+ psPowerDevice-> -+ eCurrentPowerState); -+ if (eError != PVRSRV_OK) { -+ pr_err -+ ("pfnPreClockSpeedChange failed\n"); -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDevicePreClockSpeedChange : Device %lu failed, error:0x%lx", -+ ui32DeviceIndex, eError)); -+ } -+ } -+ } -+ -+ psPowerDevice = psPowerDevice->psNext; -+ } -+ return eError; -+} -+ -+IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex, -+ IMG_BOOL bIdleDevice, -+ IMG_VOID * pvInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(pvInfo); -+ -+ SysAcquireData(&psSysData); -+ -+ psPowerDevice = psSysData->psPowerDeviceList; -+ while (psPowerDevice) { -+ if (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex) { -+ if (psPowerDevice->pfnPostClockSpeedChange) { -+ eError = -+ psPowerDevice-> -+ pfnPostClockSpeedChange(psPowerDevice-> -+ hDevCookie, -+ bIdleDevice, -+ psPowerDevice-> -+ eCurrentPowerState); -+ if (eError != PVRSRV_OK) { -+ pr_err -+ ("pfnPostClockSpeedChange failed\n"); -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDevicePostClockSpeedChange : Device %lu failed, error:0x%lx", -+ ui32DeviceIndex, eError)); -+ } -+ } -+ } -+ -+ psPowerDevice = psPowerDevice->psNext; -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/power.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/power.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/power.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/power.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,118 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef POWER_H -+#define POWER_H -+ -+ -+ typedef struct _PVRSRV_POWER_DEV_TAG_ { -+ PFN_PRE_POWER pfnPrePower; -+ PFN_POST_POWER pfnPostPower; -+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange; -+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; -+ IMG_HANDLE hDevCookie; -+ IMG_UINT32 ui32DeviceIndex; -+ PVR_POWER_STATE eDefaultPowerState; -+ PVR_POWER_STATE eCurrentPowerState; -+ struct _PVRSRV_POWER_DEV_TAG_ *psNext; -+ -+ } PVRSRV_POWER_DEV; -+ -+ typedef enum _PVRSRV_INIT_SERVER_STATE_ { -+ PVRSRV_INIT_SERVER_Unspecified = -1, -+ PVRSRV_INIT_SERVER_RUNNING = 0, -+ PVRSRV_INIT_SERVER_RAN = 1, -+ PVRSRV_INIT_SERVER_SUCCESSFUL = 2, -+ PVRSRV_INIT_SERVER_NUM = 3, -+ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff -+ } PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE; -+ -+ IMG_IMPORT -+ IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE -+ eInitServerState); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE -+ eInitServerState, -+ IMG_BOOL bState); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, -+ IMG_BOOL bSystemPowerEvent); -+ IMG_IMPORT IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID); -+ -+ IMG_IMPORT IMG_VOID PVRSRVDvfsLock(IMG_VOID); -+ -+ IMG_IMPORT IMG_VOID PVRSRVDvfsUnlock(IMG_VOID); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE -+ eNewPowerState, -+ IMG_UINT32 ui32CallerID, -+ IMG_BOOL bRetainMutex); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVR_POWER_STATE -+ eNewPowerState); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVR_POWER_STATE -+ eNewPowerState); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVSetPowerStateKM(PVR_POWER_STATE ePVRState); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex, -+ PFN_PRE_POWER pfnPrePower, -+ PFN_POST_POWER pfnPostPower, -+ PFN_PRE_CLOCKSPEED_CHANGE -+ pfnPreClockSpeedChange, -+ PFN_POST_CLOCKSPEED_CHANGE -+ pfnPostClockSpeedChange, -+ IMG_HANDLE hDevCookie, -+ PVR_POWER_STATE -+ eCurrentPowerState, -+ PVR_POWER_STATE -+ eDefaultPowerState); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVRemovePowerDevice(IMG_UINT32 ui32DeviceIndex); -+ -+ IMG_IMPORT IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 -+ ui32DeviceIndex, -+ IMG_BOOL bIdleDevice, -+ IMG_VOID * pvInfo); -+ -+ IMG_IMPORT -+ IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 -+ ui32DeviceIndex, -+ IMG_BOOL bIdleDevice, -+ IMG_VOID * pvInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/proc.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/proc.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/proc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/proc.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,342 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "services_headers.h" -+ -+#include "queue.h" -+#include "resman.h" -+#include "pvrmmap.h" -+#include "pvr_debug.h" -+#include "pvrversion.h" -+#include "proc.h" -+ -+#ifdef DEBUG -+int PVRDebugProcSetLevel(struct file *file, const char *buffer, -+ unsigned long count, void *data); -+int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, -+ int *eof, void *data); -+ -+#endif -+ -+static struct proc_dir_entry *dir; -+ -+static off_t procDumpSysNodes(char *buf, size_t size, off_t off); -+static off_t procDumpVersion(char *buf, size_t size, off_t off); -+ -+off_t printAppend(char *buffer, size_t size, off_t off, const char *format, ...) -+{ -+ int n; -+ int space = size - off; -+ va_list ap; -+ -+ va_start(ap, format); -+ -+ n = vsnprintf(buffer + off, space, format, ap); -+ -+ va_end(ap); -+ -+ if (n > space || n < 0) { -+ return size; -+ } else { -+ return off + n; -+ } -+} -+ -+static int pvr_read_proc(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ pvr_read_proc_t *pprn = data; -+ -+ off_t len = pprn(page, count, off); -+ -+ if (len == END_OF_FILE) { -+ len = 0; -+ *eof = 1; -+ } else if (!len) { -+ *start = (char *)0; -+ } else { -+ *start = (char *)1; -+ } -+ -+ return len; -+} -+ -+int CreateProcEntry(const char *name, read_proc_t rhandler, -+ write_proc_t whandler, void *data) -+{ -+ struct proc_dir_entry *file; -+ mode_t mode; -+ -+ if (!dir) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "CreateProcEntry: cannot make proc entry /proc/pvr/%s: no parent", -+ name)); -+ return -ENOMEM; -+ } -+ -+ mode = S_IFREG; -+ -+ if (rhandler) { -+ mode |= S_IRUGO; -+ } -+ -+ if (whandler) { -+ mode |= S_IWUSR; -+ } -+ -+ file = create_proc_entry(name, mode, dir); -+ -+ if (file) { -+ file->owner = THIS_MODULE; -+ file->read_proc = rhandler; -+ file->write_proc = whandler; -+ file->data = data; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Created /proc/pvr/%s", name)); -+ -+ return 0; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "CreateProcEntry: cannot make proc entry /proc/pvr/%s: no memory", -+ name)); -+ -+ return -ENOMEM; -+} -+ -+int CreateProcReadEntry(const char *name, pvr_read_proc_t handler) -+{ -+ struct proc_dir_entry *file; -+ -+ if (!dir) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "CreateProcReadEntry: cannot make proc entry /proc/pvr/%s: no parent", -+ name)); -+ -+ return -ENOMEM; -+ } -+ -+ file = -+ create_proc_read_entry(name, S_IFREG | S_IRUGO, dir, pvr_read_proc, -+ (void *)handler); -+ -+ if (file) { -+ file->owner = THIS_MODULE; -+ -+ return 0; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "CreateProcReadEntry: cannot make proc entry /proc/pvr/%s: no memory", -+ name)); -+ -+ return -ENOMEM; -+} -+ -+int CreateProcEntries(void) -+{ -+ dir = proc_mkdir("pvr", NULL); -+ -+ if (!dir) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "CreateProcEntries: cannot make /proc/pvr directory")); -+ -+ return -ENOMEM; -+ } -+ -+ if (CreateProcReadEntry("queue", QueuePrintQueues) || -+ CreateProcReadEntry("version", procDumpVersion) || -+ CreateProcReadEntry("nodes", procDumpSysNodes)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "CreateProcEntries: couldn't make /proc/pvr files")); -+ -+ return -ENOMEM; -+ } -+#ifdef DEBUG -+ if (CreateProcEntry -+ ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "CreateProcEntries: couldn't make /proc/pvr/debug_level")); -+ -+ return -ENOMEM; -+ } -+#endif -+ -+ return 0; -+} -+ -+void RemoveProcEntry(const char *name) -+{ -+ if (dir) { -+ remove_proc_entry(name, dir); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/pvr/%s", name)); -+} -+ -+void RemoveProcEntries(void) -+{ -+#ifdef DEBUG -+ RemoveProcEntry("debug_level"); -+#endif -+ RemoveProcEntry("queue"); -+ RemoveProcEntry("nodes"); -+ RemoveProcEntry("version"); -+ -+ while (dir->subdir) { -+ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/pvr/%s", -+ dir->subdir->name)); -+ -+ RemoveProcEntry(dir->subdir->name); -+ } -+ -+ remove_proc_entry("pvr", NULL); -+} -+ -+static off_t procDumpVersion(char *buf, size_t size, off_t off) -+{ -+ SYS_DATA *psSysData; -+ -+ if (off == 0) { -+ return printAppend(buf, size, 0, -+ "Version %s (%s) %s\n", -+ PVRVERSION_STRING, -+ PVR_BUILD_TYPE, PVR_BUILD_DIR); -+ } -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ if (off == 1) { -+ IMG_CHAR *pszSystemVersionString = "None"; -+ -+ if (psSysData->pszVersionString) { -+ pszSystemVersionString = psSysData->pszVersionString; -+ } -+ -+ if (strlen(pszSystemVersionString) -+ + strlen("System Version String: \n") -+ + 1 > size) { -+ return 0; -+ } -+ return printAppend(buf, size, 0, -+ "System Version String: %s\n", -+ pszSystemVersionString); -+ } -+ -+ return END_OF_FILE; -+} -+ -+static const char *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType) -+{ -+ switch (deviceType) { -+ default: -+ { -+ static char text[10]; -+ -+ sprintf(text, "?%x", deviceType); -+ -+ return text; -+ } -+ } -+} -+ -+static const char *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass) -+{ -+ switch (deviceClass) { -+ case PVRSRV_DEVICE_CLASS_3D: -+ { -+ return "3D"; -+ } -+ case PVRSRV_DEVICE_CLASS_DISPLAY: -+ { -+ return "display"; -+ } -+ case PVRSRV_DEVICE_CLASS_BUFFER: -+ { -+ return "buffer"; -+ } -+ default: -+ { -+ static char text[10]; -+ -+ sprintf(text, "?%x", deviceClass); -+ return text; -+ } -+ } -+} -+ -+static -+off_t procDumpSysNodes(char *buf, size_t size, off_t off) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ off_t len; -+ -+ if (size < 80) { -+ return 0; -+ } -+ -+ if (off == 0) { -+ return printAppend(buf, size, 0, -+ "Registered nodes\n" -+ "Addr Type Class Index Ref pvDev Size Res\n"); -+ } -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ for (psDevNode = psSysData->psDeviceNodeList; -+ --off && psDevNode; psDevNode = psDevNode->psNext) ; -+ -+ if (!psDevNode) { -+ return END_OF_FILE; -+ } -+ -+ len = printAppend(buf, size, 0, -+ "%p %-8s %-8s %4d %2lu %p %3lu %p\n", -+ psDevNode, -+ deviceTypeToString(psDevNode->sDevId.eDeviceType), -+ deviceClassToString(psDevNode->sDevId.eDeviceClass), -+ psDevNode->sDevId.eDeviceClass, -+ psDevNode->ui32RefCount, -+ psDevNode->pvDevice, -+ psDevNode->ui32pvDeviceSize, -+ psDevNode->hResManContext); -+ return (len); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/proc.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/proc.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/proc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/proc.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,51 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __SERVICES_PROC_H__ -+#define __SERVICES_PROC_H__ -+ -+#include -+#include -+ -+#define END_OF_FILE (off_t) -1 -+ -+typedef off_t(pvr_read_proc_t) (char *, size_t, off_t); -+ -+off_t printAppend(char *buffer, size_t size, off_t off, const char *format, ...) -+ __attribute__ ((format(printf, 4, 5))); -+ -+int CreateProcEntries(void); -+ -+int CreateProcReadEntry(const char *name, pvr_read_proc_t handler); -+ -+int CreateProcEntry(const char *name, read_proc_t rhandler, -+ write_proc_t whandler, void *data); -+ -+void RemoveProcEntry(const char *name); -+ -+void RemoveProcEntries(void); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_bridge.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_bridge.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_bridge.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_bridge.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,1013 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __PVR_BRIDGE_H__ -+#define __PVR_BRIDGE_H__ -+ -+ -+#include "servicesint.h" -+ -+ -+#include -+ -+#define PVRSRV_IOC_GID 'g' -+#define PVRSRV_IO(INDEX) _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) -+#define PVRSRV_IOW(INDEX) _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) -+#define PVRSRV_IOR(INDEX) _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) -+#define PVRSRV_IOWR(INDEX) _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) -+ -+ -+#define PVRSRV_BRIDGE_CORE_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_ENUM_DEVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3) -+#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4) -+#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5) -+#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6) -+#define PVRSRV_BRIDGE_FREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7) -+#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8) -+#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9) -+#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10) -+#define PVRSRV_BRIDGE_KV_TO_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11) -+#define PVRSRV_BRIDGE_CONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12) -+#define PVRSRV_BRIDGE_DISCONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13) -+#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14) -+#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15) -+#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16) -+#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17) -+#define PVRSRV_BRIDGE_MAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18) -+#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19) -+#define PVRSRV_BRIDGE_MAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20) -+#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21) -+#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22) -+#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23) -+#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24) -+#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25) -+#define PVRSRV_BRIDGE_CACHE_FLUSH_DRM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26) -+#define PVRSRV_BRIDGE_CORE_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+26) -+ -+#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1) -+#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2) -+ -+#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1) -+#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2) -+ -+#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1) -+#define PVRSRV_BRIDGE_GET_FB_STATS PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0) -+ -+#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1) -+#define PVRSRV_BRIDGE_GET_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_RELEASE_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1) -+ -+#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1) -+#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1) -+ -+#if defined(PDUMP) -+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_PDUMP_INIT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_PDUMP_MEMPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_PDUMP_DUMPMEM PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_PDUMP_REG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3) -+#define PVRSRV_BRIDGE_PDUMP_REGPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4) -+#define PVRSRV_BRIDGE_PDUMP_COMMENT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5) -+#define PVRSRV_BRIDGE_PDUMP_SETFRAME PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6) -+#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7) -+#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8) -+#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9) -+#define PVRSRV_BRIDGE_PDUMP_SYNCPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10) -+#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11) -+#define PVRSRV_BRIDGE_PDUMP_MEMPAGES PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12) -+#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13) -+#define PVRSRV_BRIDGE_PDUMP_PDREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14) -+#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15) -+#define PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16) -+#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17) -+#define PVRSRV_BRIDGE_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18) -+#define PVRSRV_BRIDGE_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+19) -+#define PVRSRV_BRIDGE_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+20) -+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+20) -+#else -+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST -+#endif -+ -+#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1) -+#define PVRSRV_BRIDGE_GET_OEMJTABLE PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0) -+ -+#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1) -+#define PVRSRV_BRIDGE_ENUM_CLASS PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0) -+ -+#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1) -+#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3) -+#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4) -+#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5) -+#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6) -+#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7) -+#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8) -+#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9) -+#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10) -+#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11) -+#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12) -+#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13) -+#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14) -+#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14) -+ -+#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1) -+#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3) -+#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3) -+ -+#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1) -+#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1) -+ -+#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1) -+#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3) -+#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3) -+ -+#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1) -+#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0) -+ -+#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1) -+#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1) -+ -+#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1) -+#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0) -+#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1) -+#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2) -+#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2) -+ -+#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1) -+ -+#define PVRSRV_KERNEL_MODE_CLIENT 1 -+ -+ typedef struct PVRSRV_BRIDGE_RETURN_TAG { -+ PVRSRV_ERROR eError; -+ IMG_VOID *pvData; -+ -+ } PVRSRV_BRIDGE_RETURN; -+ -+ typedef struct PVRSRV_BRIDGE_PACKAGE_TAG { -+ IMG_UINT32 ui32BridgeID; -+ IMG_UINT32 ui32Size; -+ IMG_VOID *pvParamIn; -+ IMG_UINT32 ui32InBufferSize; -+ IMG_VOID *pvParamOut; -+ IMG_UINT32 ui32OutBufferSize; -+ -+ IMG_HANDLE hKernelServices; -+ } PVRSRV_BRIDGE_PACKAGE; -+ -+ typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 uiDevIndex; -+ PVRSRV_DEVICE_TYPE eDeviceType; -+ -+ } PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_DEVICE_CLASS sDeviceClass; -+ } PVRSRV_BRIDGE_IN_ENUMCLASS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ } PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE; -+ -+ typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ } PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ } PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ } PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ } PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ } PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ -+ } PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_DEVICE_CLASS DeviceClass; -+ IMG_VOID *pvDevInfo; -+ -+ } PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hDevMemContext; -+ -+ } PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ -+ } PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hDevMemContext; -+ -+ } PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hDevMemHeap; -+ IMG_UINT32 ui32Attribs; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32Alignment; -+ -+ } PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ -+ } PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ IMG_PVOID pvLinAddr; -+ IMG_HANDLE hMappingInfo; -+ -+ } PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER; -+ -+#define DRM_PVR2D_CFLUSH_FROM_GPU 1 -+#define DRM_PVR2D_CFLUSH_TO_GPU 2 -+ -+ typedef struct PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_UINT32 ui32Type; -+ IMG_VOID *pvVirt; -+ IMG_UINT32 ui32Length; -+ -+ } PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER; -+ -+ typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ -+ } PVRSRV_BRIDGE_IN_FREEDEVICEMEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_UINT32 ui32QueueSize; -+ -+ } PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE; -+ -+ typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ PVRSRV_QUEUE_INFO *psQueueInfo; -+ -+ } PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE; -+ -+ typedef struct PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_VOID *pvKVIndexAddress; -+ IMG_UINT32 ui32Bytes; -+ } PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA; -+ -+ typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevMemHeap; -+ IMG_DEV_VIRTADDR *psDevVAddr; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32Alignment; -+ -+ } PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hKernelServices; -+ } PVRSRV_BRIDGE_OUT_CONNECT_SERVICES; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ -+ } PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ -+ } PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo; -+ IMG_HANDLE hDstDevMemHeap; -+ -+ } PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo; -+ PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo; -+ PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo; -+ -+ } PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ -+ } PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ IMG_SYS_PHYADDR *psSysPAddr; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceClassBuffer; -+ -+ } PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ IMG_HANDLE hMappingInfo; -+ -+ } PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ -+ } PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Value; -+ IMG_UINT32 ui32Mask; -+ IMG_BOOL bLastFrame; -+ IMG_BOOL bOverwrite; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_MEMPOL; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ IMG_BOOL bIsRead; -+ IMG_UINT32 ui32Value; -+ IMG_UINT32 ui32Mask; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_PVOID pvAltLinAddr; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Bytes; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_PVOID pvAltLinAddr; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Bytes; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_HWREG sHWReg; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_DUMPREG; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_HWREG sHWReg; -+ IMG_UINT32 ui32Mask; -+ IMG_UINT32 ui32Flags; -+ } PVRSRV_BRIDGE_IN_PDUMP_REGPOL; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_HWREG sHWReg; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hKernelMemInfo; -+ IMG_DEV_PHYADDR *pPages; -+ IMG_UINT32 ui32NumPages; -+ IMG_DEV_VIRTADDR sDevAddr; -+ IMG_UINT32 ui32Start; -+ IMG_UINT32 ui32Length; -+ IMG_BOOL bContinuous; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE]; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_COMMENT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32Frame; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_SETFRAME; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE]; -+ IMG_UINT32 ui32FileOffset; -+ IMG_UINT32 ui32Width; -+ IMG_UINT32 ui32Height; -+ IMG_UINT32 ui32StrideInBytes; -+ IMG_DEV_VIRTADDR sDevBaseAddr; -+ IMG_UINT32 ui32Size; -+ PDUMP_PIXEL_FORMAT ePixelFormat; -+ PDUMP_MEM_FORMAT eMemFormat; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_BITMAP; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE]; -+ IMG_UINT32 ui32FileOffset; -+ IMG_UINT32 ui32Address; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32Flags; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_READREG; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE]; -+ IMG_BOOL bContinuous; -+ -+ } PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hKernelMemInfo; -+ IMG_UINT32 ui32Offset; -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ } PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR; -+ -+ typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32RegOffset; -+ IMG_BOOL bLastFrame; -+ } PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32NumDevices; -+ PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES]; -+ -+ } PVRSRV_BRIDGE_OUT_ENUMDEVICE; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG { -+ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hDevCookie; -+ -+ } PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32NumDevices; -+ IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES]; -+ -+ } PVRSRV_BRIDGE_OUT_ENUMCLASS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32DeviceID; -+ IMG_HANDLE hDevCookie; -+ -+ } PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hDeviceKM; -+ -+ } PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE; -+ -+ typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_VOID *pvLinAddr; -+ IMG_UINT32 ui32ByteSize; -+ IMG_UINT32 ui32PageOffset; -+ IMG_BOOL bPhysContig; -+ IMG_UINT32 ui32NumPageTableEntries; -+ IMG_SYS_PHYADDR *psSysPAddr; -+ -+ } PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ -+ } PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hKernelMemInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ -+ } PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY; -+ -+#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10 -+#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10 -+#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4 -+#define PVRSRV_MAX_DC_CLIP_RECTS 32 -+ -+ typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Count; -+ DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS]; -+ -+ } PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ DISPLAY_FORMAT sFormat; -+ -+ } PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Count; -+ DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS]; -+ -+ } PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG { -+ PVRSRV_ERROR eError; -+ DISPLAY_INFO sDisplayInfo; -+ -+ } PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hBuffer; -+ -+ } PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER; -+ -+ typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_UINT32 ui32Flags; -+ DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib; -+ DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib; -+ IMG_UINT32 ui32BufferCount; -+ IMG_UINT32 ui32OEMFlags; -+ IMG_UINT32 ui32SwapChainID; -+ -+ } PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hSwapChain; -+ IMG_UINT32 ui32SwapChainID; -+ -+ } PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN; -+ -+ typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_HANDLE hSwapChain; -+ -+ } PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_HANDLE hSwapChain; -+ IMG_RECT sRect; -+ -+ } PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_HANDLE hSwapChain; -+ IMG_UINT32 ui32CKColour; -+ -+ } PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_HANDLE hSwapChain; -+ -+ } PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32BufferCount; -+ IMG_HANDLE ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; -+ -+ } PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_HANDLE hBuffer; -+ IMG_UINT32 ui32SwapInterval; -+ IMG_HANDLE hPrivateTag; -+ IMG_UINT32 ui32ClipRectCount; -+ IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS]; -+ -+ } PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_HANDLE hSwapChain; -+ -+ } PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32DeviceID; -+ IMG_HANDLE hDevCookie; -+ -+ } PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hDeviceKM; -+ -+ } PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG { -+ PVRSRV_ERROR eError; -+ BUFFER_INFO sBufferInfo; -+ -+ } PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDeviceKM; -+ IMG_UINT32 ui32BufferIndex; -+ -+ } PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hBuffer; -+ -+ } PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32ClientHeapCount; -+ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; -+ -+ } PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hDevMemContext; -+ IMG_UINT32 ui32ClientHeapCount; -+ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; -+ -+ } PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hDevMemHeap; -+ -+ } PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ -+ } PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG { -+ PVRSRV_ERROR eError; -+ IMG_PVOID pvLinAddr; -+ IMG_HANDLE hMappingInfo; -+ -+ } PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Total; -+ IMG_UINT32 ui32Free; -+ IMG_UINT32 ui32LargestBlock; -+ -+ } PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM; -+ -+#include "pvrmmap.h" -+ typedef struct PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA_TAG { -+ PVRSRV_ERROR eError; -+ -+ IMG_UINT32 ui32MMapOffset; -+ -+ IMG_UINT32 ui32ByteOffset; -+ -+ IMG_UINT32 ui32RealByteSize; -+ -+ } PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_MISC_INFO sMiscInfo; -+ -+ } PVRSRV_BRIDGE_IN_GET_MISC_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_MISC_INFO sMiscInfo; -+ -+ } PVRSRV_BRIDGE_OUT_GET_MISC_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_MISC_INFO sMiscInfo; -+ -+ } PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_MISC_INFO sMiscInfo; -+ -+ } PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG { -+ PVRSRV_ERROR eError; -+ IMG_BOOL bIsCapturing; -+ -+ } PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32Total; -+ IMG_UINT32 ui32Available; -+ -+ } PVRSRV_BRIDGE_IN_GET_FB_STATS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_SYS_PHYADDR sSysPhysAddr; -+ IMG_UINT32 uiSizeInBytes; -+ -+ } PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG { -+ IMG_PVOID pvUserAddr; -+ IMG_UINT32 uiActualSize; -+ IMG_PVOID pvProcess; -+ -+ } PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_PVOID pvUserAddr; -+ IMG_PVOID pvProcess; -+ -+ } PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG { -+ IMG_PVOID *ppvTbl; -+ IMG_UINT32 uiTblSize; -+ -+ } PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP; -+ -+ typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_PVOID pvProcess; -+ -+ } PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG { -+ IMG_SYS_PHYADDR sRegsPhysBase; -+ IMG_VOID *pvRegsBase; -+ IMG_PVOID pvProcess; -+ IMG_UINT32 ulNoOfEntries; -+ IMG_PVOID pvTblLinAddr; -+ -+ } PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_PVOID pvProcess; -+ IMG_VOID *pvRegsBase; -+ -+ } PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_UINT32 ui32StatusAndMask; -+ PVRSRV_ERROR eError; -+ -+ } PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_BOOL bInitSuccesful; -+ } PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32Flags; -+ IMG_UINT32 ui32Size; -+ } PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG { -+ PVRSRV_ERROR eError; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ } PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ } PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG { -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hKernelMemInfo; -+ } PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG { -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; -+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo; -+ } PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG { -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevMemContext; -+ } PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG { -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR; -+ -+ typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hOSEventKM; -+ } PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG { -+ PVRSRV_EVENTOBJECT sEventObject; -+ } PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG { -+ IMG_HANDLE hOSEvent; -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN; -+ -+ typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG { -+ PVRSRV_EVENTOBJECT sEventObject; -+ IMG_HANDLE hOSEventKM; -+ } PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_bridge_k.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_bridge_k.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_bridge_k.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_bridge_k.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,195 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "img_defs.h" -+#include "services.h" -+#include "pvr_bridge.h" -+#include "perproc.h" -+#include "mutex.h" -+#include "syscommon.h" -+#include "pvr_debug.h" -+#include "proc.h" -+ -+#include "sgx_bridge.h" -+ -+#include "bridged_pvr_bridge.h" -+ -+ -+#if defined(DEBUG_BRIDGE_KM) -+static off_t printLinuxBridgeStats(char *buffer, size_t size, off_t off); -+#endif -+ -+extern PVRSRV_LINUX_MUTEX gPVRSRVLock; -+ -+PVRSRV_ERROR LinuxBridgeInit(IMG_VOID) -+{ -+#if defined(DEBUG_BRIDGE_KM) -+ { -+ int iStatus; -+ iStatus = -+ CreateProcReadEntry("bridge_stats", printLinuxBridgeStats); -+ if (iStatus != 0) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+#endif -+ return CommonBridgeInit(); -+} -+ -+IMG_VOID LinuxBridgeDeInit(IMG_VOID) -+{ -+#if defined(DEBUG_BRIDGE_KM) -+ RemoveProcEntry("bridge_stats"); -+#endif -+} -+ -+#if defined(DEBUG_BRIDGE_KM) -+static off_t printLinuxBridgeStats(char *buffer, size_t count, off_t off) -+{ -+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry; -+ off_t Ret; -+ -+ LinuxLockMutex(&gPVRSRVLock); -+ -+ if (!off) { -+ if (count < 500) { -+ Ret = 0; -+ goto unlock_and_return; -+ } -+ Ret = printAppend(buffer, count, 0, -+ "Total ioctl call count = %lu\n" -+ "Total number of bytes copied via copy_from_user = %lu\n" -+ "Total number of bytes copied via copy_to_user = %lu\n" -+ "Total number of bytes copied via copy_*_user = %lu\n\n" -+ "%-45s | %-40s | %10s | %20s | %10s\n", -+ g_BridgeGlobalStats.ui32IOCTLCount, -+ g_BridgeGlobalStats. -+ ui32TotalCopyFromUserBytes, -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes, -+ g_BridgeGlobalStats. -+ ui32TotalCopyFromUserBytes + -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes, -+ "Bridge Name", "Wrapper Function", -+ "Call Count", "copy_from_user Bytes", -+ "copy_to_user Bytes"); -+ goto unlock_and_return; -+ } -+ -+ if (off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) { -+ Ret = END_OF_FILE; -+ goto unlock_and_return; -+ } -+ -+ if (count < 300) { -+ Ret = 0; -+ goto unlock_and_return; -+ } -+ -+ psEntry = &g_BridgeDispatchTable[off - 1]; -+ Ret = printAppend(buffer, count, 0, -+ "%-45s %-40s %-10lu %-20lu %-10lu\n", -+ psEntry->pszIOCName, -+ psEntry->pszFunctionName, -+ psEntry->ui32CallCount, -+ psEntry->ui32CopyFromUserTotalBytes, -+ psEntry->ui32CopyToUserTotalBytes); -+ -+unlock_and_return: -+ LinuxUnLockMutex(&gPVRSRVLock); -+ return Ret; -+} -+#endif -+ -+long -+PVRSRV_BridgeDispatchKM(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ IMG_UINT32 ui32BridgeID = PVRSRV_GET_BRIDGE_ID(cmd); -+ PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = -+ (PVRSRV_BRIDGE_PACKAGE *) arg; -+ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM; -+ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); -+ PVRSRV_PER_PROCESS_DATA *psPerProc; -+ int err = -EFAULT; -+ -+ LinuxLockMutex(&gPVRSRVLock); -+ -+ if (!OSAccessOK(PVR_VERIFY_WRITE, -+ psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE))) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Received invalid pointer to function arguments", -+ __FUNCTION__)); -+ -+ goto unlock_and_return; -+ } -+ -+ if (OSCopyFromUser(IMG_NULL, -+ &sBridgePackageKM, -+ psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE)) -+ != PVRSRV_OK) { -+ goto unlock_and_return; -+ } -+ -+ if (ui32BridgeID != -+ PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES)) { -+ PVRSRV_ERROR eError; -+ -+ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, -+ (IMG_PVOID *) & psPerProc, -+ sBridgePackageKM.hKernelServices, -+ PVRSRV_HANDLE_TYPE_PERPROC_DATA); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid kernel services handle (%d)", -+ __FUNCTION__, eError)); -+ goto unlock_and_return; -+ } -+ -+ if (psPerProc->ui32PID != ui32PID) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Process %d tried to access data " -+ "belonging to process %d", __FUNCTION__, -+ ui32PID, psPerProc->ui32PID)); -+ goto unlock_and_return; -+ } -+ } else { -+ -+ psPerProc = PVRSRVPerProcessData(ui32PID); -+ if (psPerProc == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: " -+ "Couldn't create per-process data area")); -+ goto unlock_and_return; -+ } -+ } -+ -+ sBridgePackageKM.ui32BridgeID = -+ PVRSRV_GET_BRIDGE_ID(sBridgePackageKM.ui32BridgeID); -+ -+ err = BridgedDispatchKM(psPerProc, &sBridgePackageKM); -+ -+unlock_and_return: -+ LinuxUnLockMutex(&gPVRSRVLock); -+ return err; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_bridge_km.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_bridge_km.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_bridge_km.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_bridge_km.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,350 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __PVR_BRIDGE_KM_H_ -+#define __PVR_BRIDGE_KM_H_ -+ -+ -+#include "pvr_bridge.h" -+#include "perproc.h" -+ -+ PVRSRV_ERROR LinuxBridgeInit(IMG_VOID); -+ IMG_VOID LinuxBridgeDeInit(IMG_VOID); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 * -+ pui32NumDevices, -+ PVRSRV_DEVICE_IDENTIFIER -+ * psDevIdList); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32 -+ uiDevIndex, -+ PVRSRV_DEVICE_TYPE -+ eDeviceType, -+ IMG_HANDLE * -+ phDevCookie); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 -+ ui32QueueSize, -+ PVRSRV_QUEUE_INFO -+ ** -+ ppsQueueInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO * psQueueInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE -+ hDevCookie, -+ PVRSRV_HEAP_INFO -+ * psHeapInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE -+ hDevCookie, -+ PVRSRV_PER_PROCESS_DATA -+ * -+ psPerProc, -+ IMG_HANDLE -+ * -+ phDevMemContext, -+ IMG_UINT32 -+ * -+ pui32ClientHeapCount, -+ PVRSRV_HEAP_INFO -+ * -+ psHeapInfo, -+ IMG_BOOL * -+ pbCreated -+ , -+ IMG_BOOL * -+ pbShared -+ ); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE -+ hDevCookie, -+ IMG_HANDLE -+ hDevMemContext, -+ IMG_BOOL * -+ pbCreated); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE -+ hDevCookie, -+ IMG_HANDLE -+ hDevMemContext, -+ IMG_UINT32 * -+ pui32ClientHeapCount, -+ PVRSRV_HEAP_INFO -+ * psHeapInfo -+ , -+ IMG_BOOL * -+ pbShared -+ ); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMemKM(IMG_HANDLE -+ hDevCookie, -+ PVRSRV_PER_PROCESS_DATA -+ * psPerProc, -+ IMG_HANDLE -+ hDevMemHeap, -+ IMG_UINT32 -+ ui32Flags, -+ IMG_UINT32 -+ ui32Size, -+ IMG_UINT32 -+ ui32Alignment, -+ PVRSRV_KERNEL_MEM_INFO -+ ** ppsMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE -+ hDevCookie, -+ PVRSRV_KERNEL_MEM_INFO -+ * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE -+ hDevCookie, -+ PVRSRV_KERNEL_MEM_INFO -+ * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE -+ hDevMemHeap, -+ IMG_DEV_VIRTADDR -+ * -+ psDevVAddr, -+ IMG_UINT32 -+ ui32Size, -+ IMG_UINT32 -+ ui32Alignment, -+ PVRSRV_KERNEL_MEM_INFO -+ ** -+ ppsMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ PVRSRV_KERNEL_MEM_INFO * psSrcMemInfo, -+ IMG_HANDLE hDstDevMemHeap, -+ PVRSRV_KERNEL_MEM_INFO ** ppsDstMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE -+ hDevCookie, -+ PVRSRV_PER_PROCESS_DATA -+ * psPerProc, -+ IMG_UINT32 -+ ui32ByteSize, -+ IMG_UINT32 -+ ui32PageOffset, -+ IMG_BOOL -+ bPhysContig, -+ IMG_SYS_PHYADDR * -+ psSysAddr, -+ IMG_VOID * -+ pvLinAddr, -+ PVRSRV_KERNEL_MEM_INFO -+ ** ppsMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVIsWrappedExtMemoryKM(IMG_HANDLE hDevCookie, -+ PVRSRV_PER_PROCESS_DATA *psPerProc, -+ IMG_UINT32 *pui32ByteSize, -+ IMG_VOID **pvLinAddr); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass, -+ IMG_UINT32 * pui32DevCount, -+ IMG_UINT32 * pui32DevID); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA * -+ psPerProc, -+ IMG_UINT32 ui32DeviceID, -+ IMG_HANDLE hDevCookie, -+ IMG_HANDLE * phDeviceKM); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, -+ IMG_BOOL bResManCallback); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM, -+ IMG_UINT32 * pui32Count, -+ DISPLAY_FORMAT * psFormat); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM, -+ DISPLAY_FORMAT * psFormat, -+ IMG_UINT32 * pui32Count, -+ DISPLAY_DIMS * psDim); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE * phBuffer); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM, -+ DISPLAY_INFO * psDisplayInfo); -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA * -+ psPerProc, -+ IMG_HANDLE hDeviceKM, -+ IMG_UINT32 ui32Flags, -+ DISPLAY_SURF_ATTRIBUTES * -+ psDstSurfAttrib, -+ DISPLAY_SURF_ATTRIBUTES * -+ psSrcSurfAttrib, -+ IMG_UINT32 ui32BufferCount, -+ IMG_UINT32 ui32OEMFlags, -+ IMG_HANDLE * phSwapChain, -+ IMG_UINT32 * -+ pui32SwapChainID); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE -+ hSwapChain); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, -+ IMG_RECT * psRect); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, -+ IMG_RECT * psRect); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE -+ hSwapChain, -+ IMG_UINT32 -+ ui32CKColour); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE -+ hSwapChain, -+ IMG_UINT32 -+ ui32CKColour); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain, -+ IMG_UINT32 * -+ pui32BufferCount, -+ IMG_HANDLE * phBuffer); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hBuffer, -+ IMG_UINT32 -+ ui32SwapInterval, -+ IMG_HANDLE hPrivateTag, -+ IMG_UINT32 -+ ui32ClipRectCount, -+ IMG_RECT * psClipRect); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM, -+ IMG_HANDLE hSwapChain); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA * -+ psPerProc, -+ IMG_UINT32 ui32DeviceID, -+ IMG_HANDLE hDevCookie, -+ IMG_HANDLE * phDeviceKM); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, -+ IMG_BOOL -+ bResManCallback); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE hDeviceKM, -+ BUFFER_INFO * psBufferInfo); -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE hDeviceKM, -+ IMG_UINT32 ui32BufferIndex, -+ IMG_HANDLE * phBuffer); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDeviceClassBuffer, -+ PVRSRV_KERNEL_MEM_INFO ** ppsMemInfo, -+ IMG_HANDLE * phOSMapInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 -+ ui32Flags, -+ IMG_UINT32 * -+ pui32Total, -+ IMG_UINT32 * -+ pui32Free, -+ IMG_UINT32 * -+ pui32LargestBlock); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE -+ hDevCookie, -+ IMG_HANDLE -+ hDevMemContext, -+ PVRSRV_KERNEL_SYNC_INFO -+ ** -+ ppsKernelSyncInfo); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO * psKernelSyncInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO * -+ psMiscInfo); -+ -+ PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_UINT32 * pui32Total, -+ IMG_UINT32 * pui32Available); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size, -+ PVRSRV_KERNEL_MEM_INFO ** -+ ppsKernelMemInfo); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO * -+ psKernelMemInfo); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO * -+ psKernelMemInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrconfig.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrconfig.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrconfig.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrconfig.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,24 @@ -+#ifndef _PVRCONFIG_H -+#define _PVRCONFIG_H -+ -+#define SGX530 1 -+#define SGX_CORE_REV 121 -+ -+#ifdef CONFIG_PVR_DEBUG -+# define PVR_BUILD_TYPE "debug" -+# define DEBUG 1 -+#elif defined(CONFIG_PVR_TIMING) -+# define PVR_BUILD_TYPE "timing" -+# define TIMING 1 -+#elif defined(CONFIG_PVR_RELEASE) -+# define PVR_BUILD_TYPE "release" -+#endif -+ -+#ifdef DEBUG -+# define DEBUG_LINUX_MEMORY_ALLOCATIONS 1 -+# define DEBUG_LINUX_MEM_AREAS 1 -+# define DEBUG_LINUX_MMAP_AREAS 1 -+# define DEBUG_BRIDGE_KM 1 -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_debug.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_debug.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_debug.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_debug.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,185 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "img_types.h" -+#include "pvr_debug.h" -+#include "proc.h" -+ -+#if defined(DEBUG) || defined(TIMING) -+ -+IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING; -+ -+#define PVR_STRING_TERMINATOR '\0' -+#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') ) -+ -+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, -+ const IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line, const IMG_CHAR * pszFormat, ... -+ ) -+{ -+ IMG_BOOL bTrace, bDebug; -+ IMG_CHAR *pszLeafName; -+ -+ pszLeafName = (char *)strrchr(pszFileName, '\\'); -+ -+ if (pszLeafName) { -+ pszFileName = pszLeafName; -+ } -+ -+ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE; -+ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel); -+ -+ if (bTrace || bDebug) { -+ va_list vaArgs; -+ static char szBuffer[256]; -+ -+ va_start(vaArgs, pszFormat); -+ -+ if (bDebug) { -+ switch (ui32DebugLevel) { -+ case DBGPRIV_FATAL: -+ { -+ strcpy(szBuffer, "PVR_K:(Fatal): "); -+ break; -+ } -+ case DBGPRIV_ERROR: -+ { -+ strcpy(szBuffer, "PVR_K:(Error): "); -+ break; -+ } -+ case DBGPRIV_WARNING: -+ { -+ strcpy(szBuffer, "PVR_K:(Warning): "); -+ break; -+ } -+ case DBGPRIV_MESSAGE: -+ { -+ strcpy(szBuffer, "PVR_K:(Message): "); -+ break; -+ } -+ case DBGPRIV_VERBOSE: -+ { -+ strcpy(szBuffer, "PVR_K:(Verbose): "); -+ break; -+ } -+ default: -+ { -+ strcpy(szBuffer, -+ "PVR_K:(Unknown message level)"); -+ break; -+ } -+ } -+ } else { -+ strcpy(szBuffer, "PVR_K: "); -+ } -+ -+ vsprintf(&szBuffer[strlen(szBuffer)], pszFormat, vaArgs); -+ -+ if (!bTrace) { -+ sprintf(&szBuffer[strlen(szBuffer)], " [%d, %s]", -+ (int)ui32Line, pszFileName); -+ } -+ -+ printk(KERN_INFO "%s\n", szBuffer); -+ -+ va_end(vaArgs); -+ } -+} -+ -+void PVRSRVDebugAssertFail(const IMG_CHAR * pszFile, IMG_UINT32 uLine) -+{ -+ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, -+ "Debug assertion failed!"); -+ BUG(); -+} -+ -+void PVRSRVTrace(const IMG_CHAR * pszFormat, ...) -+{ -+ static IMG_CHAR szMessage[PVR_MAX_DEBUG_MESSAGE_LEN + 1]; -+ IMG_CHAR *pszEndOfMessage = IMG_NULL; -+ va_list ArgList; -+ -+ strncpy(szMessage, "PVR: ", PVR_MAX_DEBUG_MESSAGE_LEN); -+ -+ pszEndOfMessage = &szMessage[strlen(szMessage)]; -+ -+ va_start(ArgList, pszFormat); -+ vsprintf(pszEndOfMessage, pszFormat, ArgList); -+ va_end(ArgList); -+ -+ strcat(szMessage, "\n"); -+ -+ printk(KERN_INFO "%s", szMessage); -+} -+ -+void PVRDebugSetLevel(IMG_UINT32 uDebugLevel) -+{ -+ printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n", -+ (unsigned int)uDebugLevel); -+ -+ gPVRDebugLevel = uDebugLevel; -+} -+ -+int PVRDebugProcSetLevel(struct file *file, const char *buffer, -+ unsigned long count, void *data) -+{ -+#define _PROC_SET_BUFFER_SZ 2 -+ char data_buffer[_PROC_SET_BUFFER_SZ]; -+ -+ if (count != _PROC_SET_BUFFER_SZ) { -+ return -EINVAL; -+ } else { -+ if (copy_from_user(data_buffer, buffer, count)) -+ return -EINVAL; -+ if (data_buffer[count - 1] != '\n') -+ return -EINVAL; -+ PVRDebugSetLevel(data_buffer[0] - '0'); -+ } -+ return (count); -+} -+ -+int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, -+ int *eof, void *data) -+{ -+ if (off == 0) { -+ *start = (char *)1; -+ return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel); -+ } -+ *eof = 1; -+ return 0; -+} -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_debug.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_debug.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvr_debug.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvr_debug.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,100 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __PVR_DEBUG_H__ -+#define __PVR_DEBUG_H__ -+ -+#include "img_types.h" -+ -+ -+#define PVR_MAX_DEBUG_MESSAGE_LEN (512) -+ -+#define DBGPRIV_FATAL 0x01 -+#define DBGPRIV_ERROR 0x02 -+#define DBGPRIV_WARNING 0x04 -+#define DBGPRIV_MESSAGE 0x08 -+#define DBGPRIV_VERBOSE 0x10 -+#define DBGPRIV_CALLTRACE 0x20 -+#define DBGPRIV_ALLOC 0x40 -+#define DBGPRIV_ALLLEVELS (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_MESSAGE | DBGPRIV_VERBOSE) -+ -+#define PVR_DBG_FATAL DBGPRIV_FATAL,__FILE__, __LINE__ -+#define PVR_DBG_ERROR DBGPRIV_ERROR,__FILE__, __LINE__ -+#define PVR_DBG_WARNING DBGPRIV_WARNING,__FILE__, __LINE__ -+#define PVR_DBG_MESSAGE DBGPRIV_MESSAGE,__FILE__, __LINE__ -+#define PVR_DBG_VERBOSE DBGPRIV_VERBOSE,__FILE__, __LINE__ -+#define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE,__FILE__, __LINE__ -+#define PVR_DBG_ALLOC DBGPRIV_ALLOC,__FILE__, __LINE__ -+ -+#if defined(DEBUG) -+#define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__); -+ -+#define PVR_DPF(X) PVRSRVDebugPrintf X -+#define PVR_TRACE(X) PVRSRVTrace X -+ -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR * -+ pszFile, -+ IMG_UINT32 -+ ui32Line); -+ -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 -+ ui32DebugLevel, -+ const IMG_CHAR * -+ pszFileName, -+ IMG_UINT32 ui32Line, -+ const IMG_CHAR * -+ pszFormat, ...); -+ -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR * pszFormat, -+ ...); -+ -+ IMG_VOID PVRSRVDebugSetLevel(IMG_UINT32 uDebugLevel); -+ -+#define PVR_DBG_BREAK -+ -+#else -+ -+#if defined(TIMING) -+ -+#define PVR_ASSERT(EXPR) -+#define PVR_DPF(X) -+#define PVR_TRACE(X) PVRSRVTrace X -+#define PVR_DBG_BREAK -+ -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR * pszFormat, -+ ...); -+ -+#else -+ -+#define PVR_ASSERT(EXPR) -+#define PVR_DPF(X) -+#define PVR_TRACE(X) -+#define PVR_DBG_BREAK -+ -+#endif -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrmmap.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrmmap.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrmmap.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrmmap.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,36 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __PVRMMAP_H__ -+#define __PVRMMAP_H__ -+ -+PVRSRV_ERROR PVRMMAPMapKernelPtr(IMG_HANDLE hModule, IMG_VOID ** ppvLinAddr, -+ IMG_VOID * pvKVIndexAddress, -+ IMG_UINT32 ui32Bytes); -+ -+IMG_BOOL PVRMMAPRemoveMapping(IMG_VOID * pvUserAddress, IMG_UINT32 ui32Bytes); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrmodule.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrmodule.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrmodule.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrmodule.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,31 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _PVRMODULE_H_ -+#define _PVRMODULE_H_ -+MODULE_AUTHOR("Imagination Technologies Ltd. "); -+MODULE_LICENSE("GPL"); -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrsrv.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrsrv.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrsrv.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrsrv.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,916 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "buffer_manager.h" -+#include "handle.h" -+#include "perproc.h" -+#include "pdump_km.h" -+#include "ra.h" -+ -+PVRSRV_ERROR AllocateDeviceID(SYS_DATA * psSysData, IMG_UINT32 * pui32DevID) -+{ -+ SYS_DEVICE_ID *psDeviceWalker; -+ SYS_DEVICE_ID *psDeviceEnd; -+ -+ psDeviceWalker = &psSysData->sDeviceID[0]; -+ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices; -+ -+ while (psDeviceWalker < psDeviceEnd) { -+ if (!psDeviceWalker->bInUse) { -+ psDeviceWalker->bInUse = IMG_TRUE; -+ *pui32DevID = psDeviceWalker->uiID; -+ return PVRSRV_OK; -+ } -+ psDeviceWalker++; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "AllocateDeviceID: No free and valid device IDs available!")); -+ -+ PVR_ASSERT(psDeviceWalker < psDeviceEnd); -+ -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+PVRSRV_ERROR FreeDeviceID(SYS_DATA * psSysData, IMG_UINT32 ui32DevID) -+{ -+ SYS_DEVICE_ID *psDeviceWalker; -+ SYS_DEVICE_ID *psDeviceEnd; -+ -+ psDeviceWalker = &psSysData->sDeviceID[0]; -+ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices; -+ -+ while (psDeviceWalker < psDeviceEnd) { -+ -+ if ((psDeviceWalker->uiID == ui32DevID) && -+ (psDeviceWalker->bInUse) -+ ) { -+ psDeviceWalker->bInUse = IMG_FALSE; -+ return PVRSRV_OK; -+ } -+ psDeviceWalker++; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeDeviceID: no matching dev ID that is in use!")); -+ -+ PVR_ASSERT(psDeviceWalker < psDeviceEnd); -+ -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+IMG_EXPORT -+ IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset) -+{ -+ return *(volatile IMG_UINT32 *)((IMG_UINT32) pvLinRegBaseAddr + -+ ui32Offset); -+} -+ -+IMG_EXPORT -+ IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, "WriteHWReg Base:%x, Offset: %x, Value %x", -+ pvLinRegBaseAddr, ui32Offset, ui32Value)); -+ -+ *(IMG_UINT32 *) ((IMG_UINT32) pvLinRegBaseAddr + ui32Offset) = -+ ui32Value; -+} -+ -+IMG_EXPORT -+ IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, -+ PVRSRV_HWREG * psHWRegs) -+{ -+ while (ui32Count--) { -+ WriteHWReg(pvLinRegBaseAddr, psHWRegs->ui32RegAddr, -+ psHWRegs->ui32RegVal); -+ psHWRegs++; -+ } -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 * -+ pui32NumDevices, -+ PVRSRV_DEVICE_IDENTIFIER -+ * psDevIdList) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT32 i; -+ -+ if (!pui32NumDevices || !psDevIdList) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVEnumerateDevicesKM: Invalid params")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVEnumerateDevicesKM: Failed to get SysData")); -+ return eError; -+ } -+ -+ for (i = 0; i < PVRSRV_MAX_DEVICES; i++) { -+ psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN; -+ } -+ -+ *pui32NumDevices = 0; -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ for (i = 0; psDeviceNode != IMG_NULL; i++) { -+ -+ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT) { -+ -+ *psDevIdList++ = psDeviceNode->sDevId; -+ -+ (*pui32NumDevices)++; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = ResManInit(); -+ if (eError != PVRSRV_OK) { -+ goto Error; -+ } -+ -+ eError = PVRSRVPerProcessDataInit(); -+ if (eError != PVRSRV_OK) { -+ goto Error; -+ } -+ -+ eError = PVRSRVHandleInit(); -+ if (eError != PVRSRV_OK) { -+ goto Error; -+ } -+ -+ eError = OSCreateResource(&psSysData->sPowerStateChangeResource); -+ if (eError != PVRSRV_OK) { -+ goto Error; -+ } -+ -+ gpsSysData->eCurrentPowerState = PVRSRV_POWER_STATE_D0; -+ gpsSysData->eFailedPowerState = PVRSRV_POWER_Unspecified; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_EVENTOBJECT), -+ (IMG_VOID **) & psSysData->psGlobalEventObject, -+ 0) != PVRSRV_OK) { -+ -+ goto Error; -+ } -+ -+ if (OSEventObjectCreate -+ ("PVRSRV_GLOBAL_EVENTOBJECT", -+ psSysData->psGlobalEventObject) != PVRSRV_OK) { -+ goto Error; -+ } -+ -+ return eError; -+ -+Error: -+ PVRSRVDeInit(psSysData); -+ return eError; -+} -+ -+IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psSysData); -+ -+ if (psSysData->psGlobalEventObject) { -+ OSEventObjectDestroy(psSysData->psGlobalEventObject); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_EVENTOBJECT), -+ psSysData->psGlobalEventObject, 0); -+ } -+ -+ eError = PVRSRVHandleDeInit(); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDeInit: PVRSRVHandleDeInit failed")); -+ } -+ -+ eError = PVRSRVPerProcessDataDeInit(); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed")); -+ } -+ -+ ResManDeInit(); -+} -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData, -+ PVRSRV_ERROR(*pfnRegisterDevice) -+ (PVRSRV_DEVICE_NODE *), -+ IMG_UINT32 ui32SOCInterruptBit, -+ IMG_UINT32 * pui32DeviceIndex) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DEVICE_NODE), -+ (IMG_VOID **) & psDeviceNode, IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode")); -+ return (PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ OSMemSet(psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); -+ -+ eError = pfnRegisterDevice(psDeviceNode); -+ if (eError != PVRSRV_OK) { -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterDevice : Failed to register device")); -+ return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED); -+ } -+ -+ psDeviceNode->ui32RefCount = 1; -+ psDeviceNode->psSysData = psSysData; -+ psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit; -+ -+ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex); -+ -+ psDeviceNode->psNext = psSysData->psDeviceNodeList; -+ psSysData->psDeviceNodeList = psDeviceNode; -+ -+ *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice")); -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVInitialiseDevice: Failed to get SysData")); -+ return (eError); -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ -+ while (psDeviceNode) { -+ if (psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex) { -+ goto FoundDevice; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVInitialiseDevice: requested device is not present")); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ -+FoundDevice: -+ -+ PVR_ASSERT(psDeviceNode->ui32RefCount > 0); -+ -+ eError = PVRSRVResManConnect(IMG_NULL, &psDeviceNode->hResManContext); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call")); -+ return eError; -+ } -+ -+ if (psDeviceNode->pfnInitDevice != IMG_NULL) { -+ eError = psDeviceNode->pfnInitDevice(psDeviceNode); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVInitialiseDevice: Failed InitDevice call")); -+ return eError; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem")); -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVFinaliseSystem: Failed to get SysData")); -+ return (eError); -+ } -+ -+ if (bInitSuccessful) { -+ eError = SysFinalise(); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVFinaliseSystem: SysFinalise failed (%d)", -+ eError)); -+ return eError; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode) { -+ eError = -+ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId. -+ ui32DeviceIndex, -+ PVRSRV_POWER_Unspecified, -+ KERNEL_ID, IMG_FALSE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", -+ psDeviceNode->sDevId.ui32DeviceIndex)); -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ } -+ -+ PDUMPENDINITPHASE(); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32 ui32DevIndex, -+ PVRSRV_DEVICE_TYPE -+ eDeviceType, -+ IMG_HANDLE * -+ phDevCookie) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM")); -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAcquireDeviceDataKM: Failed to get SysData")); -+ return (eError); -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ -+ if (eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN) { -+ while (psDeviceNode) { -+ if (psDeviceNode->sDevId.eDeviceType == eDeviceType) { -+ goto FoundDevice; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ } else { -+ while (psDeviceNode) { -+ if (psDeviceNode->sDevId.ui32DeviceIndex == -+ ui32DevIndex) { -+ goto FoundDevice; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVAcquireDeviceDataKM: requested device is not present")); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ -+FoundDevice: -+ -+ PVR_ASSERT(psDeviceNode->ui32RefCount > 0); -+ -+ if (phDevCookie) { -+ *phDevCookie = (IMG_HANDLE) psDeviceNode; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_DEVICE_NODE **ppsDevNode; -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDeinitialiseDevice: Failed to get SysData")); -+ return (eError); -+ } -+ -+ ppsDevNode = &psSysData->psDeviceNodeList; -+ while (*ppsDevNode) { -+ if ((*ppsDevNode)->sDevId.ui32DeviceIndex == ui32DevIndex) { -+ psDeviceNode = *ppsDevNode; -+ goto FoundDevice; -+ } -+ ppsDevNode = &((*ppsDevNode)->psNext); -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDeinitialiseDevice: requested device %d is not present", -+ ui32DevIndex)); -+ -+ return PVRSRV_ERROR_GENERIC; -+ -+FoundDevice: -+ -+ eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex, -+ PVRSRV_POWER_STATE_D3, -+ KERNEL_ID, IMG_FALSE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call")); -+ return eError; -+ } -+ -+ eError = ResManFreeResByCriteria(psDeviceNode->hResManContext, -+ RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DEVICEMEM_ALLOCATION, -+ IMG_NULL, 0); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call")); -+ return eError; -+ } -+ -+ if (psDeviceNode->pfnDeInitDevice != IMG_NULL) { -+ eError = psDeviceNode->pfnDeInitDevice(psDeviceNode); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDeinitialiseDevice: Failed DeInitDevice call")); -+ return eError; -+ } -+ } -+ -+ PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE); -+ psDeviceNode->hResManContext = IMG_NULL; -+ -+ *ppsDevNode = psDeviceNode->psNext; -+ -+ FreeDeviceID(psSysData, ui32DevIndex); -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), -+ psDeviceNode, IMG_NULL); -+ -+ return (PVRSRV_OK); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32 * -+ pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 ui32Waitus, -+ IMG_UINT32 ui32Tries) -+{ -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0, uiCurrent = 0, uiMaxTime; -+ -+ uiMaxTime = ui32Tries * ui32Waitus; -+ -+ do { -+ if ((*pui32LinMemAddr & ui32Mask) == ui32Value) { -+ return PVRSRV_OK; -+ } -+ -+ if (bStart == IMG_FALSE) { -+ bStart = IMG_TRUE; -+ uiStart = OSClockus(); -+ } -+ -+ OSWaitus(ui32Waitus); -+ -+ uiCurrent = OSClockus(); -+ if (uiCurrent < uiStart) { -+ -+ uiStart = 0; -+ } -+ -+ } while ((uiCurrent - uiStart) < uiMaxTime); -+ -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO * psMiscInfo) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ -+ if (!psMiscInfo) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetMiscInfoKM: invalid parameters")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT -+ | -+ PVRSRV_MISC_INFO_CLOCKGATE_PRESENT -+ | PVRSRV_MISC_INFO_MEMSTATS_PRESENT -+ | -+ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetMiscInfoKM: invalid state request flags")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVGetMiscInfoKM: Failed to get SysData")); -+ return eError; -+ } -+ -+ psMiscInfo->ui32StatePresent = 0; -+ -+ if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) -+ && psSysData->pvSOCTimerRegisterKM) { -+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT; -+ psMiscInfo->pvSOCTimerRegisterKM = -+ psSysData->pvSOCTimerRegisterKM; -+ psMiscInfo->hSOCTimerRegisterOSMemHandle = -+ psSysData->hSOCTimerRegisterOSMemHandle; -+ } -+ -+ if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) -+ && psSysData->pvSOCClockGateRegsBase) { -+ psMiscInfo->ui32StatePresent |= -+ PVRSRV_MISC_INFO_CLOCKGATE_PRESENT; -+ psMiscInfo->pvSOCClockGateRegs = -+ psSysData->pvSOCClockGateRegsBase; -+ psMiscInfo->ui32SOCClockGateRegsSize = -+ psSysData->ui32SOCClockGateRegsSize; -+ } -+ -+ if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) -+ && psMiscInfo->pszMemoryStr) { -+ RA_ARENA **ppArena; -+ BM_HEAP *psBMHeap; -+ BM_CONTEXT *psBMContext; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_CHAR *pszStr; -+ IMG_UINT32 ui32StrLen; -+ IMG_INT32 i32Count; -+ -+ pszStr = psMiscInfo->pszMemoryStr; -+ ui32StrLen = psMiscInfo->ui32MemoryStrLen; -+ -+ psMiscInfo->ui32StatePresent |= -+ PVRSRV_MISC_INFO_MEMSTATS_PRESENT; -+ -+ ppArena = &psSysData->apsLocalDevMemArena[0]; -+ while (*ppArena) { -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n"); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ RA_GetStats(*ppArena, &pszStr, &ui32StrLen); -+ -+ ppArena++; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode) { -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "\n\nDevice Type %d:\n", -+ psDeviceNode->sDevId.eDeviceType); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ if (psDeviceNode->sDevMemoryInfo.pBMKernelContext) { -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, -+ "\nKernel Context:\n"); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ psBMHeap = -+ psDeviceNode->sDevMemoryInfo. -+ pBMKernelContext->psBMHeap; -+ while (psBMHeap) { -+ if (psBMHeap->pImportArena) { -+ RA_GetStats(psBMHeap-> -+ pImportArena, -+ &pszStr, -+ &ui32StrLen); -+ } -+ -+ if (psBMHeap->pVMArena) { -+ RA_GetStats(psBMHeap->pVMArena, -+ &pszStr, -+ &ui32StrLen); -+ } -+ psBMHeap = psBMHeap->psNext; -+ } -+ } -+ -+ psBMContext = psDeviceNode->sDevMemoryInfo.pBMContext; -+ while (psBMContext) { -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, -+ "\nApplication Context (hDevMemContext) 0x%08X:\n", -+ (IMG_HANDLE) psBMContext); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ psBMHeap = psBMContext->psBMHeap; -+ while (psBMHeap) { -+ if (psBMHeap->pImportArena) { -+ RA_GetStats(psBMHeap-> -+ pImportArena, -+ &pszStr, -+ &ui32StrLen); -+ } -+ -+ if (psBMHeap->pVMArena) { -+ RA_GetStats(psBMHeap->pVMArena, -+ &pszStr, -+ &ui32StrLen); -+ } -+ psBMHeap = psBMHeap->psNext; -+ } -+ psBMContext = psBMContext->psNext; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ i32Count = OSSNPrintf(pszStr, 100, "\n\0"); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ } -+ -+ if ((psMiscInfo-> -+ ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) -+ && psSysData->psGlobalEventObject) { -+ psMiscInfo->ui32StatePresent |= -+ PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT; -+ psMiscInfo->sGlobalEventObject = -+ *psSysData->psGlobalEventObject; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFBStatsKM(IMG_UINT32 * pui32Total, -+ IMG_UINT32 * pui32Available) -+{ -+ IMG_UINT32 ui32Total = 0, i = 0; -+ IMG_UINT32 ui32Available = 0; -+ -+ *pui32Total = 0; -+ *pui32Available = 0; -+ -+ while (BM_ContiguousStatistics(i, &ui32Total, &ui32Available) == -+ IMG_TRUE) { -+ *pui32Total += ui32Total; -+ *pui32Available += ui32Available; -+ -+ i++; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ SYS_DATA *psSysData; -+ IMG_BOOL bStatus = IMG_FALSE; -+ IMG_UINT32 ui32InterruptSource; -+ -+ if (!psDeviceNode) { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n")); -+ goto out; -+ } -+ psSysData = psDeviceNode->psSysData; -+ -+ ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode); -+ if (ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) { -+ if (psDeviceNode->pfnDeviceISR != IMG_NULL) { -+ bStatus = -+ (*psDeviceNode->pfnDeviceISR) (psDeviceNode-> -+ pvISRData); -+ } -+ -+ SysClearInterrupts(psSysData, -+ psDeviceNode->ui32SOCInterruptBit); -+ } -+ -+out: -+ return bStatus; -+} -+ -+IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = pvSysData; -+ IMG_BOOL bStatus = IMG_FALSE; -+ IMG_UINT32 ui32InterruptSource; -+ IMG_UINT32 ui32ClearInterrupts = 0; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ if (!psSysData) { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n")); -+ goto out; -+ } -+ -+ ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL); -+ -+ if (ui32InterruptSource == 0) { -+ goto out; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode != IMG_NULL) { -+ if (psDeviceNode->pfnDeviceISR != IMG_NULL) { -+ if (ui32InterruptSource & psDeviceNode-> -+ ui32SOCInterruptBit) { -+ if ((*psDeviceNode-> -+ pfnDeviceISR) (psDeviceNode->pvISRData)) { -+ -+ bStatus = IMG_TRUE; -+ } -+ -+ ui32ClearInterrupts |= -+ psDeviceNode->ui32SOCInterruptBit; -+ } -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ SysClearInterrupts(psSysData, ui32ClearInterrupts); -+ -+out: -+ return bStatus; -+} -+ -+IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = pvSysData; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ if (!psSysData) { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n")); -+ return; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode != IMG_NULL) { -+ if (psDeviceNode->pfnDeviceMISR != IMG_NULL) { -+ (*psDeviceNode->pfnDeviceMISR) (psDeviceNode-> -+ pvISRData); -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ if (PVRSRVProcessQueues(ISR_ID, IMG_FALSE) == -+ PVRSRV_ERROR_PROCESSING_BLOCKED) { -+ PVRSRVProcessQueues(ISR_ID, IMG_FALSE); -+ } -+ -+ if (psSysData->psGlobalEventObject) { -+ IMG_HANDLE hOSEventKM = -+ psSysData->psGlobalEventObject->hOSEventKM; -+ if (hOSEventKM) { -+ OSEventObjectSignal(hOSEventKM); -+ } -+ } -+} -+ -+IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID) -+{ -+ return PVRSRVPerProcessDataConnect(ui32PID); -+} -+ -+IMG_EXPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID) -+{ -+ PVRSRVPerProcessDataDisconnect(ui32PID); -+} -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, -+ IMG_PBYTE pbyBuffer, -+ IMG_UINT32 * puiBufSize, -+ IMG_BOOL bSave) -+{ -+ IMG_UINT32 uiBytesSaved = 0; -+ IMG_PVOID pvLocalMemCPUVAddr; -+ RA_SEGMENT_DETAILS sSegDetails; -+ -+ if (hArena == IMG_NULL) { -+ return (PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ sSegDetails.uiSize = 0; -+ sSegDetails.sCpuPhyAddr.uiAddr = 0; -+ sSegDetails.hSegment = 0; -+ -+ while (RA_GetNextLiveSegment(hArena, &sSegDetails)) { -+ if (pbyBuffer == IMG_NULL) { -+ -+ uiBytesSaved += -+ sizeof(sSegDetails.uiSize) + sSegDetails.uiSize; -+ } else { -+ if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + -+ sSegDetails.uiSize) > *puiBufSize) { -+ return (PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", -+ sSegDetails.sCpuPhyAddr.uiAddr, -+ sSegDetails.uiSize)); -+ -+ pvLocalMemCPUVAddr = -+ OSMapPhysToLin(sSegDetails.sCpuPhyAddr, -+ sSegDetails.uiSize, -+ PVRSRV_HAP_KERNEL_ONLY | -+ PVRSRV_HAP_UNCACHED, IMG_NULL); -+ if (pvLocalMemCPUVAddr == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host")); -+ return (PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ -+ if (bSave) { -+ -+ OSMemCopy(pbyBuffer, &sSegDetails.uiSize, -+ sizeof(sSegDetails.uiSize)); -+ pbyBuffer += sizeof(sSegDetails.uiSize); -+ -+ OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, -+ sSegDetails.uiSize); -+ pbyBuffer += sSegDetails.uiSize; -+ } else { -+ IMG_UINT32 uiSize; -+ -+ OSMemCopy(&uiSize, pbyBuffer, -+ sizeof(sSegDetails.uiSize)); -+ -+ if (uiSize != sSegDetails.uiSize) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVSaveRestoreLiveSegments: Segment size error")); -+ } else { -+ pbyBuffer += sizeof(sSegDetails.uiSize); -+ -+ OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, -+ sSegDetails.uiSize); -+ pbyBuffer += sSegDetails.uiSize; -+ } -+ } -+ -+ uiBytesSaved += -+ sizeof(sSegDetails.uiSize) + sSegDetails.uiSize; -+ -+ OSUnMapPhysToLin(pvLocalMemCPUVAddr, -+ sSegDetails.uiSize, -+ PVRSRV_HAP_KERNEL_ONLY | -+ PVRSRV_HAP_UNCACHED, IMG_NULL); -+ } -+ } -+ -+ if (pbyBuffer == IMG_NULL) { -+ *puiBufSize = uiBytesSaved; -+ } -+ -+ return (PVRSRV_OK); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrversion.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrversion.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/pvrversion.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/pvrversion.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,37 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _PVRVERSION_H_ -+#define _PVRVERSION_H_ -+ -+#define PVRVERSION_MAJ 1 -+#define PVRVERSION_MIN 3 -+#define PVRVERSION_BRANCH 13 -+#define PVRVERSION_BUILD 1607 -+#define PVRVERSION_STRING "1.3.13.1607" -+#define PVRVERSION_FILE "eurasiacon.pj" -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/queue.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/queue.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/queue.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/queue.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,900 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+ -+#include "proc.h" -+ -+static int -+QueuePrintCommands(PVRSRV_QUEUE_INFO * psQueue, char *buffer, size_t size) -+{ -+ off_t off = 0; -+ int cmds = 0; -+ IMG_UINT32 ui32ReadOffset = psQueue->ui32ReadOffset; -+ IMG_UINT32 ui32WriteOffset = psQueue->ui32WriteOffset; -+ PVRSRV_COMMAND *psCmd; -+ -+ while (ui32ReadOffset != ui32WriteOffset) { -+ psCmd = -+ (PVRSRV_COMMAND *) ((IMG_UINT32) psQueue->pvLinQueueKM + -+ ui32ReadOffset); -+ -+ off = -+ printAppend(buffer, size, off, -+ "%p %p %5lu %6lu %3lu %5lu %2lu %2lu %3lu \n", -+ psQueue, psCmd, psCmd->ui32ProcessID, -+ psCmd->CommandType, psCmd->ui32CmdSize, -+ psCmd->ui32DevIndex, psCmd->ui32DstSyncCount, -+ psCmd->ui32SrcSyncCount, psCmd->ui32DataSize); -+ -+ ui32ReadOffset += psCmd->ui32CmdSize; -+ ui32ReadOffset &= psQueue->ui32QueueSize - 1; -+ cmds++; -+ } -+ if (cmds == 0) -+ off = printAppend(buffer, size, off, "%p \n", psQueue); -+ return off; -+} -+ -+off_t QueuePrintQueues(char *buffer, size_t size, off_t off) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_QUEUE_INFO *psQueue; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) -+ return END_OF_FILE; -+ -+ if (!off) -+ return printAppend(buffer, size, 0, -+ "Command Queues\n" -+ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n"); -+ -+ for (psQueue = psSysData->psQueueList; --off && psQueue; -+ psQueue = psQueue->psNextKM) ; -+ -+ return psQueue ? QueuePrintCommands(psQueue, buffer, -+ size) : END_OF_FILE; -+} -+ -+#define GET_SPACE_IN_CMDQ(psQueue) \ -+ (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset) \ -+ + (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1)) -+ -+#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \ -+ psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size) \ -+ & (psQueue->ui32QueueSize - 1); -+ -+#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \ -+ (ui32OpsComplete >= ui32OpsPending) -+ -+IMG_UINT32 NearestPower2(IMG_UINT32 ui32Value) -+{ -+ IMG_UINT32 ui32Temp, ui32Result = 1; -+ -+ if (!ui32Value) -+ return 0; -+ -+ ui32Temp = ui32Value - 1; -+ while (ui32Temp) { -+ ui32Result <<= 1; -+ ui32Temp >>= 1; -+ } -+ -+ return ui32Result; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 -+ ui32QueueSize, -+ PVRSRV_QUEUE_INFO ** -+ ppsQueueInfo) -+{ -+ PVRSRV_QUEUE_INFO *psQueueInfo; -+ IMG_UINT32 ui32Power2QueueSize = NearestPower2(ui32QueueSize); -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hMemBlock; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_QUEUE_INFO), -+ (IMG_VOID **) & psQueueInfo, &hMemBlock) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateCommandQueueKM: Failed to alloc queue struct")); -+ goto ErrorExit; -+ } -+ OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO)); -+ -+ psQueueInfo->hMemBlock[0] = hMemBlock; -+ psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM(); -+ -+ if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE, -+ &psQueueInfo->pvLinQueueKM, &hMemBlock) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer")); -+ goto ErrorExit; -+ } -+ -+ psQueueInfo->hMemBlock[1] = hMemBlock; -+ psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM; -+ -+ PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0); -+ PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0); -+ -+ psQueueInfo->ui32QueueSize = ui32Power2QueueSize; -+ -+ if (psSysData->psQueueList == IMG_NULL) { -+ eError = OSCreateResource(&psSysData->sQProcessResource); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ } -+ -+ if (OSLockResource(&psSysData->sQProcessResource, -+ KERNEL_ID) != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ -+ psQueueInfo->psNextKM = psSysData->psQueueList; -+ psSysData->psQueueList = psQueueInfo; -+ -+ if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) != -+ PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ -+ *ppsQueueInfo = psQueueInfo; -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ if (psQueueInfo) { -+ if (psQueueInfo->pvLinQueueKM) { -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ psQueueInfo->ui32QueueSize, -+ psQueueInfo->pvLinQueueKM, -+ psQueueInfo->hMemBlock[1]); -+ } -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_QUEUE_INFO), -+ psQueueInfo, psQueueInfo->hMemBlock[0]); -+ } -+ -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO * -+ psQueueInfo) -+{ -+ PVRSRV_QUEUE_INFO *psQueue; -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bTimeout = IMG_TRUE; -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psQueue = psSysData->psQueueList; -+ -+ do { -+ if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) { -+ bTimeout = IMG_FALSE; -+ break; -+ } -+ -+ if (bStart == IMG_FALSE) { -+ bStart = IMG_TRUE; -+ uiStart = OSClockus(); -+ } -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ } while ((OSClockus() - uiStart) < MAX_HW_TIME_US); -+ -+ if (bTimeout) { -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVDestroyCommandQueueKM : Failed to empty queue")); -+ eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE; -+ } -+ -+ eError = OSLockResource(&psSysData->sQProcessResource, KERNEL_ID); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ -+ if (psQueue == psQueueInfo) { -+ psSysData->psQueueList = psQueueInfo->psNextKM; -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ psQueueInfo->ui32QueueSize, -+ psQueueInfo->pvLinQueueKM, psQueueInfo->hMemBlock[1]); -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_QUEUE_INFO), -+ psQueueInfo, psQueueInfo->hMemBlock[0]); -+ } else { -+ while (psQueue) { -+ if (psQueue->psNextKM == psQueueInfo) { -+ psQueue->psNextKM = psQueueInfo->psNextKM; -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ psQueueInfo->ui32QueueSize, -+ psQueueInfo->pvLinQueueKM, -+ psQueueInfo->hMemBlock[1]); -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(PVRSRV_QUEUE_INFO), -+ psQueueInfo, -+ psQueueInfo->hMemBlock[0]); -+ break; -+ } -+ psQueue = psQueue->psNextKM; -+ } -+ -+ if (!psQueue) { -+ eError = -+ OSUnlockResource(&psSysData->sQProcessResource, -+ KERNEL_ID); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto ErrorExit; -+ } -+ } -+ -+ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ -+ if (psSysData->psQueueList == IMG_NULL) { -+ eError = OSDestroyResource(&psSysData->sQProcessResource); -+ if (eError != PVRSRV_OK) { -+ goto ErrorExit; -+ } -+ } -+ -+ErrorExit: -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO * psQueue, -+ IMG_UINT32 ui32ParamSize, -+ IMG_VOID ** ppvSpace) -+{ -+ IMG_BOOL bTimeout = IMG_TRUE; -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0, uiCurrent = 0; -+ -+ ui32ParamSize = (ui32ParamSize + 3) & 0xFFFFFFFC; -+ -+ if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "PVRSRVGetQueueSpace: max command size is %d bytes", -+ PVRSRV_MAX_CMD_SIZE)); -+ return PVRSRV_ERROR_CMD_TOO_BIG; -+ } -+ -+ do { -+ if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize) { -+ bTimeout = IMG_FALSE; -+ break; -+ } -+ -+ if (bStart == IMG_FALSE) { -+ bStart = IMG_TRUE; -+ uiStart = OSClockus(); -+ } -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ -+ uiCurrent = OSClockus(); -+ if (uiCurrent < uiStart) { -+ -+ uiStart = 0; -+ } -+ } while ((uiCurrent - uiStart) < MAX_HW_TIME_US); -+ -+ if (bTimeout == IMG_TRUE) { -+ *ppvSpace = IMG_NULL; -+ -+ return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE; -+ } else { -+ *ppvSpace = -+ (IMG_VOID *) (psQueue->ui32WriteOffset + -+ (IMG_UINT32) psQueue->pvLinQueueUM); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO * psQueue, -+ PVRSRV_COMMAND ** -+ ppsCommand, -+ IMG_UINT32 ui32DevIndex, -+ IMG_UINT16 CommandType, -+ IMG_UINT32 ui32DstSyncCount, -+ PVRSRV_KERNEL_SYNC_INFO * -+ apsDstSync[], -+ IMG_UINT32 ui32SrcSyncCount, -+ PVRSRV_KERNEL_SYNC_INFO * -+ apsSrcSync[], -+ IMG_UINT32 ui32DataByteSize) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_COMMAND *psCommand; -+ IMG_UINT32 ui32CommandSize; -+ IMG_UINT32 i; -+ -+ ui32DataByteSize = (ui32DataByteSize + 3) & 0xFFFFFFFC; -+ -+ ui32CommandSize = sizeof(PVRSRV_COMMAND) -+ + -+ ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT)) -+ + ui32DataByteSize; -+ -+ eError = -+ PVRSRVGetQueueSpaceKM(psQueue, ui32CommandSize, -+ (IMG_VOID **) & psCommand); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psCommand->ui32ProcessID = OSGetCurrentProcessIDKM(); -+ -+ psCommand->ui32CmdSize = ui32CommandSize; -+ psCommand->ui32DevIndex = ui32DevIndex; -+ psCommand->CommandType = CommandType; -+ psCommand->ui32DstSyncCount = ui32DstSyncCount; -+ psCommand->ui32SrcSyncCount = ui32SrcSyncCount; -+ psCommand->psDstSync = -+ (PVRSRV_SYNC_OBJECT *) (((IMG_UINT8 *) psCommand) + -+ sizeof(PVRSRV_COMMAND)); -+ -+ psCommand->psSrcSync = -+ (PVRSRV_SYNC_OBJECT *) (((IMG_UINT8 *) psCommand->psDstSync) -+ + -+ (ui32DstSyncCount * -+ sizeof(PVRSRV_SYNC_OBJECT))); -+ -+ psCommand->pvData = -+ (PVRSRV_SYNC_OBJECT *) (((IMG_UINT8 *) psCommand->psSrcSync) -+ + -+ (ui32SrcSyncCount * -+ sizeof(PVRSRV_SYNC_OBJECT))); -+ -+ psCommand->ui32DataSize = ui32DataByteSize; -+ -+ for (i = 0; i < ui32DstSyncCount; i++) { -+ psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i]; -+ psCommand->psDstSync[i].ui32WriteOpsPending = -+ PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE); -+ psCommand->psDstSync[i].ui32ReadOpsPending = -+ PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE); -+ } -+ -+ for (i = 0; i < ui32SrcSyncCount; i++) { -+ psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i]; -+ psCommand->psSrcSync[i].ui32WriteOpsPending = -+ PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE); -+ psCommand->psSrcSync[i].ui32ReadOpsPending = -+ PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE); -+ } -+ -+ *ppsCommand = psCommand; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO * psQueue, -+ PVRSRV_COMMAND * psCommand) -+{ -+ -+ if (psCommand->ui32DstSyncCount > 0) { -+ psCommand->psDstSync = -+ (PVRSRV_SYNC_OBJECT -+ *) (((IMG_UINT8 *) psQueue->pvLinQueueKM) -+ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)); -+ } -+ -+ if (psCommand->ui32SrcSyncCount > 0) { -+ psCommand->psSrcSync = -+ (PVRSRV_SYNC_OBJECT -+ *) (((IMG_UINT8 *) psQueue->pvLinQueueKM) -+ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND) -+ + -+ (psCommand->ui32DstSyncCount * -+ sizeof(PVRSRV_SYNC_OBJECT))); -+ } -+ -+ psCommand->pvData = -+ (PVRSRV_SYNC_OBJECT *) (((IMG_UINT8 *) psQueue->pvLinQueueKM) -+ + psQueue->ui32WriteOffset + -+ sizeof(PVRSRV_COMMAND) -+ + -+ (psCommand->ui32DstSyncCount * -+ sizeof(PVRSRV_SYNC_OBJECT)) -+ + -+ (psCommand->ui32SrcSyncCount * -+ sizeof(PVRSRV_SYNC_OBJECT))); -+ -+ UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA * psSysData, -+ PVRSRV_COMMAND * psCommand, -+ IMG_BOOL bFlush) -+{ -+ PVRSRV_SYNC_OBJECT *psWalkerObj; -+ PVRSRV_SYNC_OBJECT *psEndObj; -+ IMG_UINT32 i; -+ COMMAND_COMPLETE_DATA *psCmdCompleteData; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32WriteOpsComplete; -+ IMG_UINT32 ui32ReadOpsComplete; -+ -+ psWalkerObj = psCommand->psDstSync; -+ psEndObj = psWalkerObj + psCommand->ui32DstSyncCount; -+ while (psWalkerObj < psEndObj) { -+ PVRSRV_SYNC_DATA *psSyncData = -+ psWalkerObj->psKernelSyncInfoKM->psSyncData; -+ -+ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; -+ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete; -+ -+ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending) -+ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending)) { -+ if (!bFlush || -+ !SYNCOPS_STALE(ui32WriteOpsComplete, -+ psWalkerObj->ui32WriteOpsPending) -+ || !SYNCOPS_STALE(ui32ReadOpsComplete, -+ psWalkerObj-> -+ ui32ReadOpsPending)) { -+ return PVRSRV_ERROR_FAILED_DEPENDENCIES; -+ } -+ } -+ -+ psWalkerObj++; -+ } -+ -+ psWalkerObj = psCommand->psSrcSync; -+ psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount; -+ while (psWalkerObj < psEndObj) { -+ PVRSRV_SYNC_DATA *psSyncData = -+ psWalkerObj->psKernelSyncInfoKM->psSyncData; -+ -+ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete; -+ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; -+ -+ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending) -+ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending)) { -+ if (!bFlush && -+ SYNCOPS_STALE(ui32WriteOpsComplete, -+ psWalkerObj->ui32WriteOpsPending) -+ && SYNCOPS_STALE(ui32ReadOpsComplete, -+ psWalkerObj->ui32ReadOpsPending)) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x", -+ psSyncData, ui32WriteOpsComplete, -+ psWalkerObj->ui32WriteOpsPending)); -+ } -+ -+ if (!bFlush || -+ !SYNCOPS_STALE(ui32WriteOpsComplete, -+ psWalkerObj->ui32WriteOpsPending) -+ || !SYNCOPS_STALE(ui32ReadOpsComplete, -+ psWalkerObj-> -+ ui32ReadOpsPending)) { -+ return PVRSRV_ERROR_FAILED_DEPENDENCIES; -+ } -+ } -+ psWalkerObj++; -+ } -+ -+ if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVProcessCommand: invalid DeviceType 0x%x", -+ psCommand->ui32DevIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psCmdCompleteData = -+ psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand-> -+ CommandType]; -+ if (psCmdCompleteData->bInUse) { -+ -+ return PVRSRV_ERROR_FAILED_DEPENDENCIES; -+ } -+ -+ psCmdCompleteData->bInUse = IMG_TRUE; -+ -+ psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount; -+ for (i = 0; i < psCommand->ui32DstSyncCount; i++) { -+ psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i]; -+ } -+ -+ psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount; -+ for (i = 0; i < psCommand->ui32SrcSyncCount; i++) { -+ psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i]; -+ } -+ -+ if (psSysData-> -+ ppfnCmdProcList[psCommand->ui32DevIndex][psCommand-> -+ CommandType] ((IMG_HANDLE) -+ psCmdCompleteData, -+ psCommand-> -+ ui32DataSize, -+ psCommand-> -+ pvData) == -+ IMG_FALSE) { -+ -+ psCmdCompleteData->bInUse = IMG_FALSE; -+ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED; -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVProcessQueues(IMG_UINT32 ui32CallerID, IMG_BOOL bFlush) -+{ -+ PVRSRV_QUEUE_INFO *psQueue; -+ SYS_DATA *psSysData; -+ PVRSRV_COMMAND *psCommand; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_ERROR eError; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psSysData->bReProcessQueues = IMG_FALSE; -+ -+ eError = OSLockResource(&psSysData->sQProcessResource, ui32CallerID); -+ if (eError != PVRSRV_OK) { -+ -+ psSysData->bReProcessQueues = IMG_TRUE; -+ -+ if (ui32CallerID == ISR_ID) { -+ if (bFlush) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVProcessQueues: Couldn't acquire queue processing lock for FLUSH")); -+ } else { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "PVRSRVProcessQueues: Couldn't acquire queue processing lock")); -+ } -+ } else { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "PVRSRVProcessQueues: Queue processing lock-acquire failed when called from the Services driver.")); -+ PVR_DPF((PVR_DBG_MESSAGE, -+ " This is due to MISR queue processing being interrupted by the Services driver.")); -+ } -+ -+ return PVRSRV_OK; -+ } -+ -+ psQueue = psSysData->psQueueList; -+ -+ if (!psQueue) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "No Queues installed - cannot process commands")); -+ } -+ -+ if (bFlush) { -+ PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS); -+ } -+ -+ while (psQueue) { -+ while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset) { -+ psCommand = -+ (PVRSRV_COMMAND *) ((IMG_UINT32) psQueue-> -+ pvLinQueueKM + -+ psQueue->ui32ReadOffset); -+ -+ if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) -+ == PVRSRV_OK) { -+ -+ UPDATE_QUEUE_ROFF(psQueue, -+ psCommand->ui32CmdSize) -+ -+ if (bFlush) { -+ continue; -+ } -+ } -+ -+ break; -+ } -+ psQueue = psQueue->psNextKM; -+ } -+ -+ if (bFlush) { -+ PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS); -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode != IMG_NULL) { -+ if (psDeviceNode->bReProcessDeviceCommandComplete && -+ psDeviceNode->pfnDeviceCommandComplete != IMG_NULL) { -+ (*psDeviceNode-> -+ pfnDeviceCommandComplete) (psDeviceNode); -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID); -+ -+ if (psSysData->bReProcessQueues) { -+ return PVRSRV_ERROR_PROCESSING_BLOCKED; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, -+ IMG_BOOL bScheduleMISR) -+{ -+ IMG_UINT32 i; -+ COMMAND_COMPLETE_DATA *psCmdCompleteData = -+ (COMMAND_COMPLETE_DATA *) hCmdCookie; -+ SYS_DATA *psSysData; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ return; -+ } -+ -+ for (i = 0; i < psCmdCompleteData->ui32DstSyncCount; i++) { -+ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData-> -+ ui32WriteOpsComplete++; -+ } -+ -+ for (i = 0; i < psCmdCompleteData->ui32SrcSyncCount; i++) { -+ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData-> -+ ui32ReadOpsComplete++; -+ } -+ -+ psCmdCompleteData->bInUse = IMG_FALSE; -+ -+ PVRSRVCommandCompleteCallbacks(); -+ -+ if (bScheduleMISR) { -+ OSScheduleMISR(psSysData); -+ } -+} -+ -+IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVCommandCompleteCallbacks: SysAcquireData failed")); -+ return; -+ } -+ -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode != IMG_NULL) { -+ if (psDeviceNode->pfnDeviceCommandComplete != IMG_NULL) { -+ -+ (*psDeviceNode-> -+ pfnDeviceCommandComplete) (psDeviceNode); -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex, -+ PFN_CMD_PROC * ppfnCmdProcList, -+ IMG_UINT32 ui32MaxSyncsPerCmd[][2], -+ IMG_UINT32 ui32CmdCount) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ IMG_UINT32 ui32AllocSize; -+ PFN_CMD_PROC *ppfnCmdProc; -+ COMMAND_COMPLETE_DATA *psCmdCompleteData; -+ -+ if (ui32DevIndex >= SYS_DEVICE_COUNT) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x", -+ ui32DevIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterCmdProcListKM: SysAcquireData failed")); -+ return eError; -+ } -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ ui32CmdCount * sizeof(PFN_CMD_PROC), -+ (IMG_VOID **) & psSysData-> -+ ppfnCmdProcList[ui32DevIndex], IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterCmdProcListKM: Failed to alloc queue")); -+ return eError; -+ } -+ -+ ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex]; -+ -+ for (i = 0; i < ui32CmdCount; i++) { -+ ppfnCmdProc[i] = ppfnCmdProcList[i]; -+ } -+ -+ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA *); -+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32AllocSize, -+ (IMG_VOID **) & psSysData-> -+ ppsCmdCompleteData[ui32DevIndex], IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterCmdProcListKM: Failed to alloc CC data")); -+ goto ErrorExit; -+ } -+ /* clear the list to ensure that we don't try to access uninitialised pointer -+ * in the 'error' execution path */ -+ OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex], 0x00, -+ ui32AllocSize); -+ -+ for (i = 0; i < ui32CmdCount; i++) { -+ -+ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA) -+ + ((ui32MaxSyncsPerCmd[i][0] -+ + ui32MaxSyncsPerCmd[i][1]) -+ * sizeof(PVRSRV_SYNC_OBJECT)); -+ -+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32AllocSize, -+ (IMG_VOID **) & psSysData-> -+ ppsCmdCompleteData[ui32DevIndex][i], -+ IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d", -+ i)); -+ goto ErrorExit; -+ } -+ -+ OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00, -+ ui32AllocSize); -+ -+ psCmdCompleteData = -+ psSysData->ppsCmdCompleteData[ui32DevIndex][i]; -+ -+ psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT *) -+ (((IMG_UINT32) psCmdCompleteData) -+ + sizeof(COMMAND_COMPLETE_DATA)); -+ psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT *) -+ (((IMG_UINT32) psCmdCompleteData->psDstSync) -+ + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[i][0])); -+ psCmdCompleteData->ui32AllocSize = ui32AllocSize; -+ } -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ if (psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL) { -+ for (i = 0; i < ui32CmdCount; i++) { -+ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] != -+ IMG_NULL) { -+ psCmdCompleteData = -+ psSysData-> -+ ppsCmdCompleteData[ui32DevIndex][i]; -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ psCmdCompleteData->ui32AllocSize, -+ psCmdCompleteData, IMG_NULL); -+ } -+ } -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA *), -+ psSysData->ppsCmdCompleteData[ui32DevIndex], -+ IMG_NULL); -+ } -+ -+ if (psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32CmdCount * sizeof(PFN_CMD_PROC), -+ psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL); -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex, -+ IMG_UINT32 ui32CmdCount) -+{ -+ SYS_DATA *psSysData; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+ if (ui32DevIndex >= SYS_DEVICE_COUNT) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x", -+ ui32DevIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRemoveCmdProcListKM: SysAcquireData failed")); -+ return eError; -+ } -+ -+ if (psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL) { -+ for (i = 0; i < ui32CmdCount; i++) { -+ -+ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] != -+ IMG_NULL) { -+ COMMAND_COMPLETE_DATA *psCmdCompleteData = -+ psSysData-> -+ ppsCmdCompleteData[ui32DevIndex][i]; -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ psCmdCompleteData->ui32AllocSize, -+ psCmdCompleteData, IMG_NULL); -+ } -+ } -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA *), -+ psSysData->ppsCmdCompleteData[ui32DevIndex], -+ IMG_NULL); -+ } -+ -+ if (psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL) { -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ ui32CmdCount * sizeof(PFN_CMD_PROC), -+ psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL); -+ } -+ -+ return PVRSRV_OK; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/queue.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/queue.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/queue.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/queue.h 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,117 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef QUEUE_H -+#define QUEUE_H -+ -+ -+#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \ -+ psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size) \ -+ & (psQueue->ui32QueueSize - 1); -+ -+ typedef struct _COMMAND_COMPLETE_DATA_ { -+ IMG_BOOL bInUse; -+ -+ IMG_UINT32 ui32DstSyncCount; -+ IMG_UINT32 ui32SrcSyncCount; -+ PVRSRV_SYNC_OBJECT *psDstSync; -+ PVRSRV_SYNC_OBJECT *psSrcSync; -+ IMG_UINT32 ui32AllocSize; -+ } COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA; -+ -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVProcessQueues(IMG_UINT32 ui32CallerID, -+ IMG_BOOL bFlush); -+ -+#ifdef __KERNEL__ -+#include -+ off_t QueuePrintQueues(char *buffer, size_t size, off_t off); -+#endif -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 -+ ui32QueueSize, -+ PVRSRV_QUEUE_INFO -+ ** -+ ppsQueueInfo); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO * psQueueInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO * -+ psQueue, -+ PVRSRV_COMMAND ** -+ ppsCommand, -+ IMG_UINT32 -+ ui32DevIndex, -+ IMG_UINT16 -+ CommandType, -+ IMG_UINT32 -+ ui32DstSyncCount, -+ PVRSRV_KERNEL_SYNC_INFO -+ * apsDstSync[], -+ IMG_UINT32 -+ ui32SrcSyncCount, -+ PVRSRV_KERNEL_SYNC_INFO -+ * apsSrcSync[], -+ IMG_UINT32 -+ ui32DataByteSize); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO * -+ psQueue, -+ IMG_UINT32 -+ ui32ParamSize, -+ IMG_VOID ** -+ ppvSpace); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO * -+ psQueue, -+ PVRSRV_COMMAND * -+ psCommand); -+ -+ IMG_IMPORT -+ IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, -+ IMG_BOOL bScheduleMISR); -+ -+ IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex, -+ PFN_CMD_PROC * -+ ppfnCmdProcList, -+ IMG_UINT32 -+ ui32MaxSyncsPerCmd[][2], -+ IMG_UINT32 ui32CmdCount); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 -+ ui32DevIndex, -+ IMG_UINT32 -+ ui32CmdCount); -+ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/ra.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/ra.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/ra.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/ra.c 2011-06-22 13:19:32.603063278 +0200 -@@ -0,0 +1,1091 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "hash.h" -+#include "ra.h" -+#include "buffer_manager.h" -+#include "osfunc.h" -+ -+#include -+#include "proc.h" -+ -+ -+#define MINIMUM_HASH_SIZE (64) -+ -+struct _BT_ { -+ enum bt_type { -+ btt_span, -+ btt_free, -+ btt_live -+ } type; -+ -+ IMG_UINTPTR_T base; -+ IMG_SIZE_T uSize; -+ -+ struct _BT_ *pNextSegment; -+ struct _BT_ *pPrevSegment; -+ -+ struct _BT_ *pNextFree; -+ struct _BT_ *pPrevFree; -+ -+ BM_MAPPING *psMapping; -+}; -+typedef struct _BT_ BT; -+ -+struct _RA_ARENA_ { -+ -+ char *name; -+ -+ IMG_UINT32 uQuantum; -+ -+ IMG_BOOL(*pImportAlloc) (void *, -+ IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, -+ BM_MAPPING ** ppsMapping, -+ IMG_UINT32 uFlags, IMG_UINTPTR_T * pBase); -+ void (*pImportFree) (void *, IMG_UINTPTR_T, BM_MAPPING * psMapping); -+ void (*pBackingStoreFree) (void *, IMG_UINT32, IMG_UINT32, IMG_HANDLE); -+ -+ void *pImportHandle; -+ -+#define FREE_TABLE_LIMIT 32 -+ -+ BT *aHeadFree[FREE_TABLE_LIMIT]; -+ -+ BT *pHeadSegment; -+ BT *pTailSegment; -+ -+ HASH_TABLE *pSegmentHash; -+ -+#ifdef RA_STATS -+ RA_STATISTICS sStatistics; -+#endif -+ -+#if defined(CONFIG_PROC_FS) && defined(DEBUG) -+#define PROC_NAME_SIZE 32 -+ char szProcInfoName[PROC_NAME_SIZE]; -+ char szProcSegsName[PROC_NAME_SIZE]; -+#endif -+}; -+ -+void RA_Dump(RA_ARENA * pArena); -+ -+#if defined(CONFIG_PROC_FS) && defined(DEBUG) -+static int -+RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof, -+ void *data); -+static int RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof, -+ void *data); -+#endif -+ -+ -+static IMG_BOOL -+_RequestAllocFail(void *_h, -+ IMG_SIZE_T _uSize, -+ IMG_SIZE_T * _pActualSize, -+ BM_MAPPING ** _ppsMapping, -+ IMG_UINT32 _uFlags, IMG_UINTPTR_T * _pBase) -+{ -+ PVR_UNREFERENCED_PARAMETER(_h); -+ PVR_UNREFERENCED_PARAMETER(_uSize); -+ PVR_UNREFERENCED_PARAMETER(_pActualSize); -+ PVR_UNREFERENCED_PARAMETER(_ppsMapping); -+ PVR_UNREFERENCED_PARAMETER(_uFlags); -+ PVR_UNREFERENCED_PARAMETER(_pBase); -+ -+ return IMG_FALSE; -+} -+ -+static IMG_UINT32 pvr_log2(IMG_SIZE_T n) -+{ -+ IMG_UINT32 l = 0; -+ n >>= 1; -+ while (n > 0) { -+ n >>= 1; -+ l++; -+ } -+ return l; -+} -+ -+static void -+_SegmentListInsertAfter(RA_ARENA * pArena, BT * pInsertionPoint, BT * pBT) -+{ -+ PVR_ASSERT(pArena != IMG_NULL); -+ PVR_ASSERT(pInsertionPoint != IMG_NULL); -+ -+ pBT->pNextSegment = pInsertionPoint->pNextSegment; -+ pBT->pPrevSegment = pInsertionPoint; -+ if (pInsertionPoint->pNextSegment == IMG_NULL) -+ pArena->pTailSegment = pBT; -+ else -+ pInsertionPoint->pNextSegment->pPrevSegment = pBT; -+ pInsertionPoint->pNextSegment = pBT; -+} -+ -+static void _SegmentListInsert(RA_ARENA * pArena, BT * pBT) -+{ -+ -+ if (pArena->pHeadSegment == IMG_NULL) { -+ pArena->pHeadSegment = pArena->pTailSegment = pBT; -+ pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL; -+ } else { -+ BT *pBTScan; -+ pBTScan = pArena->pHeadSegment; -+ while (pBTScan->pNextSegment != IMG_NULL -+ && pBT->base >= pBTScan->pNextSegment->base) -+ pBTScan = pBTScan->pNextSegment; -+ _SegmentListInsertAfter(pArena, pBTScan, pBT); -+ } -+} -+ -+static void _SegmentListRemove(RA_ARENA * pArena, BT * pBT) -+{ -+ if (pBT->pPrevSegment == IMG_NULL) -+ pArena->pHeadSegment = pBT->pNextSegment; -+ else -+ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment; -+ -+ if (pBT->pNextSegment == IMG_NULL) -+ pArena->pTailSegment = pBT->pPrevSegment; -+ else -+ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment; -+} -+ -+static BT *_SegmentSplit(RA_ARENA * pArena, BT * pBT, IMG_SIZE_T uSize) -+{ -+ BT *pNeighbour; -+ -+ PVR_ASSERT(pArena != IMG_NULL); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BT), -+ (IMG_VOID **) & pNeighbour, IMG_NULL) != PVRSRV_OK) { -+ return IMG_NULL; -+ } -+ -+ pNeighbour->pPrevSegment = pBT; -+ pNeighbour->pNextSegment = pBT->pNextSegment; -+ if (pBT->pNextSegment == IMG_NULL) -+ pArena->pTailSegment = pNeighbour; -+ else -+ pBT->pNextSegment->pPrevSegment = pNeighbour; -+ pBT->pNextSegment = pNeighbour; -+ -+ pNeighbour->type = btt_free; -+ pNeighbour->uSize = pBT->uSize - uSize; -+ pNeighbour->base = pBT->base + uSize; -+ pNeighbour->psMapping = pBT->psMapping; -+ pBT->uSize = uSize; -+ return pNeighbour; -+} -+ -+static void _FreeListInsert(RA_ARENA * pArena, BT * pBT) -+{ -+ IMG_UINT32 uIndex; -+ uIndex = pvr_log2(pBT->uSize); -+ pBT->type = btt_free; -+ pBT->pNextFree = pArena->aHeadFree[uIndex]; -+ pBT->pPrevFree = IMG_NULL; -+ if (pArena->aHeadFree[uIndex] != IMG_NULL) -+ pArena->aHeadFree[uIndex]->pPrevFree = pBT; -+ pArena->aHeadFree[uIndex] = pBT; -+} -+ -+static void _FreeListRemove(RA_ARENA * pArena, BT * pBT) -+{ -+ IMG_UINT32 uIndex; -+ uIndex = pvr_log2(pBT->uSize); -+ if (pBT->pNextFree != IMG_NULL) -+ pBT->pNextFree->pPrevFree = pBT->pPrevFree; -+ if (pBT->pPrevFree == IMG_NULL) -+ pArena->aHeadFree[uIndex] = pBT->pNextFree; -+ else -+ pBT->pPrevFree->pNextFree = pBT->pNextFree; -+} -+ -+static BT *_BuildSpanMarker(IMG_UINTPTR_T base, IMG_SIZE_T uSize) -+{ -+ BT *pBT; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BT), -+ (IMG_VOID **) & pBT, IMG_NULL) != PVRSRV_OK) { -+ return IMG_NULL; -+ } -+ -+ pBT->type = btt_span; -+ pBT->base = base; -+ pBT->uSize = uSize; -+ pBT->psMapping = IMG_NULL; -+ -+ return pBT; -+} -+ -+static BT *_BuildBT(IMG_UINTPTR_T base, IMG_SIZE_T uSize) -+{ -+ BT *pBT; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(BT), -+ (IMG_VOID **) & pBT, IMG_NULL) != PVRSRV_OK) { -+ return IMG_NULL; -+ } -+ -+ pBT->type = btt_free; -+ pBT->base = base; -+ pBT->uSize = uSize; -+ -+ return pBT; -+} -+ -+static BT *_InsertResource(RA_ARENA * pArena, IMG_UINTPTR_T base, -+ IMG_SIZE_T uSize) -+{ -+ BT *pBT; -+ PVR_ASSERT(pArena != IMG_NULL); -+ pBT = _BuildBT(base, uSize); -+ if (pBT != IMG_NULL) { -+ _SegmentListInsert(pArena, pBT); -+ _FreeListInsert(pArena, pBT); -+#ifdef RA_STATS -+ pArena->sStatistics.uTotalResourceCount += uSize; -+ pArena->sStatistics.uFreeResourceCount += uSize; -+ pArena->sStatistics.uSpanCount++; -+#endif -+ } -+ return pBT; -+} -+ -+static BT *_InsertResourceSpan(RA_ARENA * pArena, IMG_UINTPTR_T base, -+ IMG_SIZE_T uSize) -+{ -+ BT *pSpanStart; -+ BT *pSpanEnd; -+ BT *pBT; -+ -+ PVR_ASSERT(pArena != IMG_NULL); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x", -+ pArena->name, base, uSize)); -+ -+ pSpanStart = _BuildSpanMarker(base, uSize); -+ if (pSpanStart == IMG_NULL) { -+ goto fail_start; -+ } -+ pSpanEnd = _BuildSpanMarker(base + uSize, 0); -+ if (pSpanEnd == IMG_NULL) { -+ goto fail_end; -+ } -+ -+ pBT = _BuildBT(base, uSize); -+ if (pBT == IMG_NULL) { -+ goto fail_bt; -+ } -+ -+ _SegmentListInsert(pArena, pSpanStart); -+ _SegmentListInsertAfter(pArena, pSpanStart, pBT); -+ _FreeListInsert(pArena, pBT); -+ _SegmentListInsertAfter(pArena, pBT, pSpanEnd); -+#ifdef RA_STATS -+ pArena->sStatistics.uTotalResourceCount += uSize; -+#endif -+ return pBT; -+ -+fail_bt: -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL); -+fail_end: -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL); -+fail_start: -+ return IMG_NULL; -+} -+ -+static void _FreeBT(RA_ARENA * pArena, BT * pBT, IMG_BOOL bFreeBackingStore) -+{ -+ BT *pNeighbour; -+ IMG_UINTPTR_T uOrigBase; -+ IMG_SIZE_T uOrigSize; -+ -+ PVR_ASSERT(pArena != IMG_NULL); -+ PVR_ASSERT(pBT != IMG_NULL); -+ -+#ifdef RA_STATS -+ pArena->sStatistics.uLiveSegmentCount--; -+ pArena->sStatistics.uFreeSegmentCount++; -+ pArena->sStatistics.uFreeResourceCount += pBT->uSize; -+#endif -+ -+ uOrigBase = pBT->base; -+ uOrigSize = pBT->uSize; -+ -+ pNeighbour = pBT->pPrevSegment; -+ if (pNeighbour != IMG_NULL -+ && pNeighbour->type == btt_free -+ && pNeighbour->base + pNeighbour->uSize == pBT->base) { -+ _FreeListRemove(pArena, pNeighbour); -+ _SegmentListRemove(pArena, pNeighbour); -+ pBT->base = pNeighbour->base; -+ pBT->uSize += pNeighbour->uSize; -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, -+ IMG_NULL); -+#ifdef RA_STATS -+ pArena->sStatistics.uFreeSegmentCount--; -+#endif -+ } -+ -+ pNeighbour = pBT->pNextSegment; -+ if (pNeighbour != IMG_NULL -+ && pNeighbour->type == btt_free -+ && pBT->base + pBT->uSize == pNeighbour->base) { -+ _FreeListRemove(pArena, pNeighbour); -+ _SegmentListRemove(pArena, pNeighbour); -+ pBT->uSize += pNeighbour->uSize; -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, -+ IMG_NULL); -+#ifdef RA_STATS -+ pArena->sStatistics.uFreeSegmentCount--; -+#endif -+ } -+ -+ if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore) { -+ IMG_UINTPTR_T uRoundedStart, uRoundedEnd; -+ -+ uRoundedStart = -+ (uOrigBase / pArena->uQuantum) * pArena->uQuantum; -+ -+ if (uRoundedStart < pBT->base) { -+ uRoundedStart += pArena->uQuantum; -+ } -+ -+ uRoundedEnd = -+ ((uOrigBase + uOrigSize + pArena->uQuantum - -+ 1) / pArena->uQuantum) * pArena->uQuantum; -+ -+ if (uRoundedEnd > (pBT->base + pBT->uSize)) { -+ uRoundedEnd -= pArena->uQuantum; -+ } -+ -+ if (uRoundedStart < uRoundedEnd) { -+ pArena->pBackingStoreFree(pArena->pImportHandle, -+ uRoundedStart, uRoundedEnd, -+ (IMG_HANDLE) 0); -+ } -+ } -+ -+ if (pBT->pNextSegment != IMG_NULL && pBT->pNextSegment->type == btt_span -+ && pBT->pPrevSegment != IMG_NULL -+ && pBT->pPrevSegment->type == btt_span) { -+ BT *next = pBT->pNextSegment; -+ BT *prev = pBT->pPrevSegment; -+ _SegmentListRemove(pArena, next); -+ _SegmentListRemove(pArena, prev); -+ _SegmentListRemove(pArena, pBT); -+ pArena->pImportFree(pArena->pImportHandle, pBT->base, -+ pBT->psMapping); -+#ifdef RA_STATS -+ pArena->sStatistics.uSpanCount--; -+ pArena->sStatistics.uExportCount++; -+ pArena->sStatistics.uFreeSegmentCount--; -+ pArena->sStatistics.uFreeResourceCount -= pBT->uSize; -+ pArena->sStatistics.uTotalResourceCount -= pBT->uSize; -+#endif -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); -+ } else -+ _FreeListInsert(pArena, pBT); -+} -+ -+static IMG_BOOL -+_AttemptAllocAligned(RA_ARENA * pArena, -+ IMG_SIZE_T uSize, -+ BM_MAPPING ** ppsMapping, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 uAlignment, -+ IMG_UINT32 uAlignmentOffset, IMG_UINTPTR_T * base) -+{ -+ IMG_UINT32 uIndex; -+ PVR_ASSERT(pArena != IMG_NULL); -+ -+ PVR_UNREFERENCED_PARAMETER(uFlags); -+ -+ if (uAlignment > 1) -+ uAlignmentOffset %= uAlignment; -+ -+ uIndex = pvr_log2(uSize); -+ -+ -+ while (uIndex < FREE_TABLE_LIMIT -+ && pArena->aHeadFree[uIndex] == IMG_NULL) -+ uIndex++; -+ -+ while (uIndex < FREE_TABLE_LIMIT) { -+ if (pArena->aHeadFree[uIndex] != IMG_NULL) { -+ -+ BT *pBT; -+ -+ pBT = pArena->aHeadFree[uIndex]; -+ while (pBT != IMG_NULL) { -+ IMG_UINTPTR_T aligned_base; -+ -+ if (uAlignment > 1) -+ aligned_base = -+ (pBT->base + uAlignmentOffset + -+ uAlignment - -+ 1) / uAlignment * uAlignment - -+ uAlignmentOffset; -+ else -+ aligned_base = pBT->base; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_AttemptAllocAligned: pBT-base=0x%x " -+ "pBT-size=0x%x alignedbase=0x%x size=0x%x", -+ pBT->base, pBT->uSize, aligned_base, -+ uSize)); -+ -+ if (pBT->base + pBT->uSize >= -+ aligned_base + uSize) { -+ if (!pBT->psMapping -+ || pBT->psMapping->ui32Flags == -+ uFlags) { -+ _FreeListRemove(pArena, pBT); -+ -+ PVR_ASSERT(pBT->type == -+ btt_free); -+ -+#ifdef RA_STATS -+ pArena->sStatistics. -+ uLiveSegmentCount++; -+ pArena->sStatistics. -+ uFreeSegmentCount--; -+ pArena->sStatistics. -+ uFreeResourceCount -= -+ pBT->uSize; -+#endif -+ -+ if (aligned_base > pBT->base) { -+ BT *pNeighbour; -+ -+ pNeighbour = -+ _SegmentSplit -+ (pArena, pBT, -+ aligned_base - -+ pBT->base); -+ -+ if (pNeighbour == -+ IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "_AttemptAllocAligned: Front split failed")); -+ -+ _FreeListInsert -+ (pArena, -+ pBT); -+ return -+ IMG_FALSE; -+ } -+ -+ _FreeListInsert(pArena, -+ pBT); -+#ifdef RA_STATS -+ pArena->sStatistics. -+ uFreeSegmentCount++; -+ pArena->sStatistics. -+ uFreeResourceCount -+ += pBT->uSize; -+#endif -+ pBT = pNeighbour; -+ } -+ -+ if (pBT->uSize > uSize) { -+ BT *pNeighbour; -+ pNeighbour = -+ _SegmentSplit -+ (pArena, pBT, -+ uSize); -+ -+ if (pNeighbour == -+ IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "_AttemptAllocAligned: Back split failed")); -+ -+ _FreeListInsert -+ (pArena, -+ pBT); -+ return -+ IMG_FALSE; -+ } -+ -+ _FreeListInsert(pArena, -+ pNeighbour); -+#ifdef RA_STATS -+ pArena->sStatistics. -+ uFreeSegmentCount++; -+ pArena->sStatistics. -+ uFreeResourceCount -+ += -+ pNeighbour->uSize; -+#endif -+ } -+ -+ pBT->type = btt_live; -+ -+ if (!HASH_Insert -+ (pArena->pSegmentHash, -+ pBT->base, -+ (IMG_UINTPTR_T) pBT)) { -+ _FreeBT(pArena, pBT, -+ IMG_FALSE); -+ return IMG_FALSE; -+ } -+ -+ if (ppsMapping != IMG_NULL) -+ *ppsMapping = -+ pBT->psMapping; -+ -+ *base = pBT->base; -+ -+ return IMG_TRUE; -+ } else { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", -+ pBT->psMapping-> -+ ui32Flags, uFlags)); -+ -+ } -+ } -+ pBT = pBT->pNextFree; -+ } -+ -+ } -+ uIndex++; -+ } -+ -+ return IMG_FALSE; -+} -+ -+RA_ARENA *RA_Create(IMG_CHAR * name, -+ IMG_UINTPTR_T base, -+ IMG_SIZE_T uSize, -+ BM_MAPPING * psMapping, -+ IMG_SIZE_T uQuantum, -+ IMG_BOOL(*alloc) (IMG_VOID *, IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, -+ BM_MAPPING ** ppsMapping, -+ IMG_UINT32 _flags, IMG_UINTPTR_T * pBase), -+ IMG_VOID(*free) (IMG_VOID *, IMG_UINTPTR_T, -+ BM_MAPPING * psMapping), -+ IMG_VOID(*backingstore_free) (IMG_VOID *, IMG_UINT32, -+ IMG_UINT32, IMG_HANDLE), -+ IMG_VOID * pImportHandle) -+{ -+ RA_ARENA *pArena; -+ BT *pBT; -+ int i; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x", -+ name, base, uSize, alloc, free)); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(*pArena), -+ (IMG_VOID **) & pArena, IMG_NULL) != PVRSRV_OK) { -+ goto arena_fail; -+ } -+ -+ pArena->name = name; -+ pArena->pImportAlloc = alloc != IMG_NULL ? alloc : _RequestAllocFail; -+ pArena->pImportFree = free; -+ pArena->pBackingStoreFree = backingstore_free; -+ pArena->pImportHandle = pImportHandle; -+ for (i = 0; i < FREE_TABLE_LIMIT; i++) -+ pArena->aHeadFree[i] = IMG_NULL; -+ pArena->pHeadSegment = IMG_NULL; -+ pArena->pTailSegment = IMG_NULL; -+ pArena->uQuantum = uQuantum; -+ -+#ifdef RA_STATS -+ pArena->sStatistics.uSpanCount = 0; -+ pArena->sStatistics.uLiveSegmentCount = 0; -+ pArena->sStatistics.uFreeSegmentCount = 0; -+ pArena->sStatistics.uFreeResourceCount = 0; -+ pArena->sStatistics.uTotalResourceCount = 0; -+ pArena->sStatistics.uCumulativeAllocs = 0; -+ pArena->sStatistics.uCumulativeFrees = 0; -+ pArena->sStatistics.uImportCount = 0; -+ pArena->sStatistics.uExportCount = 0; -+#endif -+ -+#if defined(CONFIG_PROC_FS) && defined(DEBUG) -+ if (strcmp(pArena->name, "") != 0) { -+ sprintf(pArena->szProcInfoName, "ra_info_%s", pArena->name); -+ CreateProcEntry(pArena->szProcInfoName, RA_DumpInfo, 0, pArena); -+ sprintf(pArena->szProcSegsName, "ra_segs_%s", pArena->name); -+ CreateProcEntry(pArena->szProcSegsName, RA_DumpSegs, 0, pArena); -+ } -+#endif -+ -+ pArena->pSegmentHash = HASH_Create(MINIMUM_HASH_SIZE); -+ if (pArena->pSegmentHash == IMG_NULL) { -+ goto hash_fail; -+ } -+ if (uSize > 0) { -+ uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum; -+ pBT = _InsertResource(pArena, base, uSize); -+ if (pBT == IMG_NULL) { -+ goto insert_fail; -+ } -+ pBT->psMapping = psMapping; -+ -+ } -+ return pArena; -+ -+insert_fail: -+ HASH_Delete(pArena->pSegmentHash); -+hash_fail: -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL); -+arena_fail: -+ return IMG_NULL; -+} -+ -+void RA_Delete(RA_ARENA * pArena) -+{ -+ IMG_UINT32 uIndex; -+ -+ PVR_ASSERT(pArena != IMG_NULL); -+ PVR_DPF((PVR_DBG_MESSAGE, "RA_Delete: name='%s'", pArena->name)); -+ -+ for (uIndex = 0; uIndex < FREE_TABLE_LIMIT; uIndex++) -+ pArena->aHeadFree[uIndex] = IMG_NULL; -+ -+ while (pArena->pHeadSegment != IMG_NULL) { -+ BT *pBT = pArena->pHeadSegment; -+ PVR_ASSERT(pBT->type == btt_free); -+ _SegmentListRemove(pArena, pBT); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); -+#ifdef RA_STATS -+ pArena->sStatistics.uSpanCount--; -+#endif -+ } -+#if defined(CONFIG_PROC_FS) && defined(DEBUG) -+ RemoveProcEntry(pArena->szProcInfoName); -+ RemoveProcEntry(pArena->szProcSegsName); -+#endif -+ HASH_Delete(pArena->pSegmentHash); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL); -+} -+ -+IMG_BOOL RA_Add(RA_ARENA * pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize) -+{ -+ PVR_ASSERT(pArena != IMG_NULL); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, -+ uSize)); -+ -+ uSize = -+ (uSize + pArena->uQuantum - -+ 1) / pArena->uQuantum * pArena->uQuantum; -+ return ((IMG_BOOL) (_InsertResource(pArena, base, uSize) != IMG_NULL)); -+} -+ -+IMG_BOOL -+RA_Alloc(RA_ARENA * pArena, -+ IMG_SIZE_T uRequestSize, -+ IMG_SIZE_T * pActualSize, -+ BM_MAPPING ** ppsMapping, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 uAlignment, -+ IMG_UINT32 uAlignmentOffset, IMG_UINTPTR_T * base) -+{ -+ IMG_BOOL bResult = IMG_FALSE; -+ IMG_SIZE_T uSize = uRequestSize; -+ -+ PVR_ASSERT(pArena != IMG_NULL); -+ -+ -+ if (pActualSize != IMG_NULL) -+ *pActualSize = uSize; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x", -+ pArena->name, uSize, uRequestSize, uAlignment, -+ uAlignmentOffset)); -+ -+ bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags, -+ uAlignment, uAlignmentOffset, base); -+ if (!bResult) { -+ BM_MAPPING *psImportMapping; -+ IMG_UINTPTR_T import_base; -+ IMG_SIZE_T uImportSize = uSize; -+ -+ if (uAlignment > pArena->uQuantum) { -+ uImportSize += (uAlignment - 1); -+ } -+ -+ uImportSize = -+ ((uImportSize + pArena->uQuantum - -+ 1) / pArena->uQuantum) * pArena->uQuantum; -+ -+ bResult = -+ pArena->pImportAlloc(pArena->pImportHandle, uImportSize, -+ &uImportSize, &psImportMapping, uFlags, -+ &import_base); -+ if (bResult) { -+ BT *pBT; -+ pBT = -+ _InsertResourceSpan(pArena, import_base, -+ uImportSize); -+ -+ if (pBT == IMG_NULL) { -+ -+ pArena->pImportFree(pArena->pImportHandle, -+ import_base, -+ psImportMapping); -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_Alloc: name='%s', size=0x%x failed!", -+ pArena->name, uSize)); -+ -+ return IMG_FALSE; -+ } -+ pBT->psMapping = psImportMapping; -+#ifdef RA_STATS -+ pArena->sStatistics.uFreeSegmentCount++; -+ pArena->sStatistics.uFreeResourceCount += uImportSize; -+ pArena->sStatistics.uImportCount++; -+ pArena->sStatistics.uSpanCount++; -+#endif -+ bResult = -+ _AttemptAllocAligned(pArena, uSize, ppsMapping, -+ uFlags, uAlignment, -+ uAlignmentOffset, base); -+ if (!bResult) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_Alloc: name='%s' uAlignment failed!", -+ pArena->name)); -+ } -+ } -+ } -+#ifdef RA_STATS -+ if (bResult) -+ pArena->sStatistics.uCumulativeAllocs++; -+#endif -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d", -+ pArena->name, uSize, *base, bResult)); -+ -+ return bResult; -+} -+ -+void RA_Free(RA_ARENA * pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore) -+{ -+ BT *pBT; -+ -+ PVR_ASSERT(pArena != IMG_NULL); -+ -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "RA_Free: name='%s', base=0x%x", pArena->name, base)); -+ -+ pBT = (BT *) HASH_Remove(pArena->pSegmentHash, base); -+ PVR_ASSERT(pBT != IMG_NULL); -+ -+ if (pBT) { -+ PVR_ASSERT(pBT->base == base); -+ -+#ifdef RA_STATS -+ pArena->sStatistics.uCumulativeFrees++; -+#endif -+ -+ _FreeBT(pArena, pBT, bFreeBackingStore); -+ } -+} -+ -+IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, -+ RA_SEGMENT_DETAILS * psSegDetails) -+{ -+ BT *pBT; -+ -+ if (psSegDetails->hSegment) { -+ pBT = (BT *) psSegDetails->hSegment; -+ } else { -+ RA_ARENA *pArena = (RA_ARENA *) hArena; -+ -+ pBT = pArena->pHeadSegment; -+ } -+ -+ while (pBT != IMG_NULL) { -+ if (pBT->type == btt_live) { -+ psSegDetails->uiSize = pBT->uSize; -+ psSegDetails->sCpuPhyAddr.uiAddr = pBT->base; -+ psSegDetails->hSegment = (IMG_HANDLE) pBT->pNextSegment; -+ -+ return IMG_TRUE; -+ } -+ -+ pBT = pBT->pNextSegment; -+ } -+ -+ psSegDetails->uiSize = 0; -+ psSegDetails->sCpuPhyAddr.uiAddr = 0; -+ psSegDetails->hSegment = (IMG_HANDLE) - 1; -+ -+ return IMG_FALSE; -+} -+ -+ -+#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS) -+static char *_BTType(int eType) -+{ -+ switch (eType) { -+ case btt_span: -+ return "span"; -+ case btt_free: -+ return "free"; -+ case btt_live: -+ return "live"; -+ } -+ return "junk"; -+} -+#endif -+ -+#if defined(CONFIG_PROC_FS) && defined(DEBUG) -+static int -+RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof, -+ void *data) -+{ -+ BT *pBT = 0; -+ int len = 0; -+ RA_ARENA *pArena = (RA_ARENA *) data; -+ -+ if (count < 80) { -+ *start = (char *)0; -+ return (0); -+ } -+ *eof = 0; -+ *start = (char *)1; -+ if (off == 0) { -+ return printAppend(page, count, 0, -+ "Arena \"%s\"\nBase Size Type Ref\n", -+ pArena->name); -+ } -+ for (pBT = pArena->pHeadSegment; --off && pBT; -+ pBT = pBT->pNextSegment) ; -+ if (pBT) { -+ len = printAppend(page, count, 0, "%08x %8x %4s %08x\n", -+ (unsigned int)pBT->base, -+ (unsigned int)pBT->uSize, _BTType(pBT->type), -+ (unsigned int)pBT->psMapping); -+ } else { -+ *eof = 1; -+ } -+ return (len); -+} -+ -+static int -+RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof, -+ void *data) -+{ -+ int len = 0; -+ RA_ARENA *pArena = (RA_ARENA *) data; -+ -+ if (count < 80) { -+ *start = (char *)0; -+ return (0); -+ } -+ *eof = 0; -+ switch (off) { -+ case 0: -+ len = -+ printAppend(page, count, 0, "quantum\t\t\t%lu\n", -+ pArena->uQuantum); -+ break; -+ case 1: -+ len = -+ printAppend(page, count, 0, "import_handle\t\t%08X\n", -+ (unsigned int)pArena->pImportHandle); -+ break; -+#ifdef RA_STATS -+ case 2: -+ len = -+ printAppend(page, count, 0, "span count\t\t%lu\n", -+ pArena->sStatistics.uSpanCount); -+ break; -+ case 3: -+ len = -+ printAppend(page, count, 0, "live segment count\t%lu\n", -+ pArena->sStatistics.uLiveSegmentCount); -+ break; -+ case 4: -+ len = -+ printAppend(page, count, 0, "free segment count\t%lu\n", -+ pArena->sStatistics.uFreeSegmentCount); -+ break; -+ case 5: -+ len = -+ printAppend(page, count, 0, -+ "free resource count\t%lu (0x%x)\n", -+ pArena->sStatistics.uFreeResourceCount, -+ (unsigned int)pArena->sStatistics. -+ uFreeResourceCount); -+ break; -+ case 6: -+ len = -+ printAppend(page, count, 0, "total allocs\t\t%lu\n", -+ pArena->sStatistics.uCumulativeAllocs); -+ break; -+ case 7: -+ len = -+ printAppend(page, count, 0, "total frees\t\t%lu\n", -+ pArena->sStatistics.uCumulativeFrees); -+ break; -+ case 8: -+ len = -+ printAppend(page, count, 0, "import count\t\t%lu\n", -+ pArena->sStatistics.uImportCount); -+ break; -+ case 9: -+ len = -+ printAppend(page, count, 0, "export count\t\t%lu\n", -+ pArena->sStatistics.uExportCount); -+ break; -+#endif -+ -+ default: -+ *eof = 1; -+ } -+ *start = (char *)1; -+ return (len); -+} -+#endif -+ -+#ifdef RA_STATS -+PVRSRV_ERROR RA_GetStats(RA_ARENA * pArena, -+ IMG_CHAR ** ppszStr, IMG_UINT32 * pui32StrLen) -+{ -+ IMG_CHAR *pszStr = *ppszStr; -+ IMG_UINT32 ui32StrLen = *pui32StrLen; -+ IMG_INT32 i32Count; -+ BT *pBT; -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, -+ " allocCB=%08X freeCB=%08X handle=%08X quantum=%d\n", -+ pArena->pImportAlloc, pArena->pImportFree, -+ pArena->pImportHandle, pArena->uQuantum); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "span count\t\t%lu\n", -+ pArena->sStatistics.uSpanCount); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "live segment count\t%lu\n", -+ pArena->sStatistics.uLiveSegmentCount); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "free segment count\t%lu\n", -+ pArena->sStatistics.uFreeSegmentCount); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%lu (0x%x)\n", -+ pArena->sStatistics.uFreeResourceCount, -+ (unsigned int)pArena->sStatistics. -+ uFreeResourceCount); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "total allocs\t\t%lu\n", -+ pArena->sStatistics.uCumulativeAllocs); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "total frees\t\t%lu\n", -+ pArena->sStatistics.uCumulativeFrees); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "import count\t\t%lu\n", -+ pArena->sStatistics.uImportCount); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, "export count\t\t%lu\n", -+ pArena->sStatistics.uExportCount); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ CHECK_SPACE(ui32StrLen); -+ i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n"); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ -+ if (pArena->pHeadSegment != IMG_NULL && -+ pArena->pHeadSegment->pPrevSegment != IMG_NULL) { -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, -+ " error: head boundary tag has invalid pPrevSegment\n"); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ } -+ -+ if (pArena->pTailSegment != IMG_NULL && -+ pArena->pTailSegment->pNextSegment != IMG_NULL) { -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, -+ " error: tail boundary tag has invalid pNextSegment\n"); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ } -+ -+ for (pBT = pArena->pHeadSegment; pBT != IMG_NULL; -+ pBT = pBT->pNextSegment) { -+ CHECK_SPACE(ui32StrLen); -+ i32Count = -+ OSSNPrintf(pszStr, 100, -+ "\tbase=0x%x size=0x%x type=%s ref=%08X\n", -+ (unsigned long)pBT->base, pBT->uSize, -+ _BTType(pBT->type), pBT->psMapping); -+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen); -+ } -+ -+ *ppszStr = pszStr; -+ *pui32StrLen = ui32StrLen; -+ -+ return PVRSRV_OK; -+} -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/ra.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/ra.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/ra.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/ra.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,130 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _RA_H_ -+#define _RA_H_ -+ -+#include "img_types.h" -+#include "hash.h" -+#include "osfunc.h" -+ -+typedef struct _RA_ARENA_ RA_ARENA; -+typedef struct _BM_MAPPING_ BM_MAPPING; -+ -+#define RA_STATS -+ -+struct _RA_STATISTICS_ { -+ -+ IMG_UINT32 uSpanCount; -+ -+ IMG_UINT32 uLiveSegmentCount; -+ -+ IMG_UINT32 uFreeSegmentCount; -+ -+ IMG_UINT32 uTotalResourceCount; -+ -+ IMG_UINT32 uFreeResourceCount; -+ -+ IMG_UINT32 uCumulativeAllocs; -+ -+ IMG_UINT32 uCumulativeFrees; -+ -+ IMG_UINT32 uImportCount; -+ -+ IMG_UINT32 uExportCount; -+}; -+typedef struct _RA_STATISTICS_ RA_STATISTICS; -+ -+struct _RA_SEGMENT_DETAILS_ { -+ IMG_UINT32 uiSize; -+ IMG_CPU_PHYADDR sCpuPhyAddr; -+ IMG_HANDLE hSegment; -+}; -+typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS; -+ -+RA_ARENA *RA_Create(IMG_CHAR * name, -+ IMG_UINTPTR_T base, -+ IMG_SIZE_T uSize, -+ BM_MAPPING * psMapping, -+ IMG_SIZE_T uQuantum, -+ IMG_BOOL(*alloc) (IMG_VOID * _h, -+ IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, -+ BM_MAPPING ** ppsMapping, -+ IMG_UINT32 uFlags, -+ IMG_UINTPTR_T * pBase), -+ IMG_VOID(*free) (IMG_VOID *, -+ IMG_UINTPTR_T, -+ BM_MAPPING * psMapping), -+ IMG_VOID(*backingstore_free) (IMG_VOID *, -+ IMG_UINT32, -+ IMG_UINT32, -+ IMG_HANDLE), -+ IMG_VOID * import_handle); -+ -+void RA_Delete(RA_ARENA * pArena); -+ -+IMG_BOOL RA_Add(RA_ARENA * pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize); -+ -+IMG_BOOL -+RA_Alloc(RA_ARENA * pArena, -+ IMG_SIZE_T uSize, -+ IMG_SIZE_T * pActualSize, -+ BM_MAPPING ** ppsMapping, -+ IMG_UINT32 uFlags, -+ IMG_UINT32 uAlignment, -+ IMG_UINT32 uAlignmentOffset, IMG_UINTPTR_T * pBase); -+ -+void RA_Free(RA_ARENA * pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore); -+ -+#ifdef RA_STATS -+ -+#define CHECK_SPACE(total) \ -+{ \ -+ if(total<100) \ -+ return PVRSRV_ERROR_INVALID_PARAMS; \ -+} -+ -+#define UPDATE_SPACE(str, count, total) \ -+{ \ -+ if(count == -1) \ -+ return PVRSRV_ERROR_INVALID_PARAMS; \ -+ else \ -+ { \ -+ str += count; \ -+ total -= count; \ -+ } \ -+} -+ -+IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, -+ RA_SEGMENT_DETAILS * psSegDetails); -+ -+PVRSRV_ERROR RA_GetStats(RA_ARENA * pArena, -+ IMG_CHAR ** ppszStr, IMG_UINT32 * pui32StrLen); -+ -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/README linux-omap-2.6.28-nokia1/drivers/gpu/pvr/README ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/README 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/README 2011-06-22 13:19:32.593063278 +0200 -@@ -0,0 +1,27 @@ -+ -+SGX Embedded Systems DDK for Linux kernel. -+Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved. -+====================================================================== -+ -+ -+About -+------------------------------------------- -+ -+This is the Imagination Technologies SGX DDK for the Linux kernel. -+ -+ -+License -+------------------------------------------- -+ -+You may use, distribute and copy this software under the terms of -+GNU General Public License version 2. -+ -+The full GNU General Public License version 2 is included in this -+distribution in the file called "COPYING". -+ -+ -+Contact information: -+------------------------------------------- -+ -+Imagination Technologies Ltd. -+Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/regpaths.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/regpaths.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/regpaths.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/regpaths.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,41 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __REGPATHS_H__ -+#define __REGPATHS_H__ -+ -+#define POWERVR_REG_ROOT "Drivers\\Display\\PowerVR" -+#define POWERVR_CHIP_KEY "\\SGX1\\" -+ -+#define POWERVR_EURASIA_KEY "PowerVREurasia\\" -+ -+#define POWERVR_SERVICES_KEY "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\" -+ -+#define PVRSRV_REGISTRY_ROOT POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM" -+ -+#define MAX_REG_STRING_SIZE 128 -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/resman.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/resman.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/resman.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/resman.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,593 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "resman.h" -+ -+#ifndef AUTOCONF_INCLUDED -+#include -+#endif -+ -+#include -+#include -+#include -+ -+#include -+ -+static DECLARE_MUTEX(lock); -+ -+#define ACQUIRE_SYNC_OBJ do { \ -+ if (in_interrupt()) { \ -+ printk ("ISR cannot take RESMAN mutex\n"); \ -+ BUG(); \ -+ } \ -+ else down (&lock); \ -+} while (0) -+#define RELEASE_SYNC_OBJ up (&lock) -+ -+ -+#define RESMAN_SIGNATURE 0x12345678 -+ -+typedef struct _RESMAN_ITEM_ { -+#ifdef DEBUG -+ IMG_UINT32 ui32Signature; -+#endif -+ struct _RESMAN_ITEM_ **ppsThis; -+ struct _RESMAN_ITEM_ *psNext; -+ -+ IMG_UINT32 ui32Flags; -+ IMG_UINT32 ui32ResType; -+ -+ IMG_PVOID pvParam; -+ IMG_UINT32 ui32Param; -+ -+ RESMAN_FREE_FN pfnFreeResource; -+} RESMAN_ITEM; -+ -+typedef struct _RESMAN_CONTEXT_ { -+#ifdef DEBUG -+ IMG_UINT32 ui32Signature; -+#endif -+ struct _RESMAN_CONTEXT_ **ppsThis; -+ struct _RESMAN_CONTEXT_ *psNext; -+ -+ PVRSRV_PER_PROCESS_DATA *psPerProc; -+ -+ RESMAN_ITEM *psResItemList; -+ -+} RESMAN_CONTEXT; -+ -+typedef struct { -+ RESMAN_CONTEXT *psContextList; -+ -+} RESMAN_LIST, *PRESMAN_LIST; -+ -+PRESMAN_LIST gpsResList = IMG_NULL; -+ -+#define PRINT_RESLIST(x, y, z) -+ -+static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM * psItem, -+ IMG_BOOL bExecuteCallback); -+ -+static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext, -+ IMG_UINT32 ui32SearchCriteria, -+ IMG_UINT32 ui32ResType, -+ IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param, -+ IMG_BOOL bExecuteCallback); -+ -+#ifdef DEBUG -+static IMG_VOID ValidateResList(PRESMAN_LIST psResList); -+#define VALIDATERESLIST() ValidateResList(gpsResList) -+#else -+#define VALIDATERESLIST() -+#endif -+ -+PVRSRV_ERROR ResManInit(IMG_VOID) -+{ -+ if (gpsResList == IMG_NULL) { -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(*gpsResList), -+ (IMG_VOID **) & gpsResList, -+ IMG_NULL) != PVRSRV_OK) { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ gpsResList->psContextList = IMG_NULL; -+ -+ VALIDATERESLIST(); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID ResManDeInit(IMG_VOID) -+{ -+ if (gpsResList != IMG_NULL) { -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), -+ gpsResList, IMG_NULL); -+ } -+} -+ -+PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc, -+ PRESMAN_CONTEXT * phResManContext) -+{ -+ PVRSRV_ERROR eError; -+ PRESMAN_CONTEXT psResManContext; -+ -+ ACQUIRE_SYNC_OBJ; -+ -+ VALIDATERESLIST(); -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext), -+ (IMG_VOID **) & psResManContext, IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVResManConnect: ERROR allocating new RESMAN context struct")); -+ -+ VALIDATERESLIST(); -+ -+ RELEASE_SYNC_OBJ; -+ -+ return eError; -+ } -+#ifdef DEBUG -+ psResManContext->ui32Signature = RESMAN_SIGNATURE; -+#endif -+ psResManContext->psResItemList = IMG_NULL; -+ psResManContext->psPerProc = hPerProc; -+ -+ psResManContext->psNext = gpsResList->psContextList; -+ psResManContext->ppsThis = &gpsResList->psContextList; -+ gpsResList->psContextList = psResManContext; -+ if (psResManContext->psNext) { -+ psResManContext->psNext->ppsThis = &(psResManContext->psNext); -+ } -+ -+ VALIDATERESLIST(); -+ -+ RELEASE_SYNC_OBJ; -+ -+ *phResManContext = psResManContext; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext, -+ IMG_BOOL bKernelContext) -+{ -+ -+ ACQUIRE_SYNC_OBJ; -+ -+ VALIDATERESLIST(); -+ -+ PRINT_RESLIST(gpsResList, psResManContext, IMG_TRUE); -+ -+ if (!bKernelContext) { -+ -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, -+ IMG_TRUE); -+ -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_EVENT_OBJECT, 0, 0, -+ IMG_TRUE); -+ -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, -+ 0, 0, IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_SHARED_PB_DESC, 0, 0, -+ IMG_TRUE); -+ -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, -+ IMG_TRUE); -+ -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, -+ IMG_TRUE); -+ -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, -+ 0, 0, IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, -+ IMG_TRUE); -+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, -+ RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, -+ IMG_TRUE); -+ } -+ -+ PVR_ASSERT(psResManContext->psResItemList == IMG_NULL); -+ -+ *(psResManContext->ppsThis) = psResManContext->psNext; -+ if (psResManContext->psNext) { -+ psResManContext->psNext->ppsThis = psResManContext->ppsThis; -+ } -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT), -+ psResManContext, IMG_NULL); -+ -+ VALIDATERESLIST(); -+ -+ PRINT_RESLIST(gpsResList, psResManContext, IMG_FALSE); -+ -+ RELEASE_SYNC_OBJ; -+} -+ -+PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext, -+ IMG_UINT32 ui32ResType, -+ IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param, -+ RESMAN_FREE_FN pfnFreeResource) -+{ -+ PRESMAN_ITEM psNewResItem; -+ -+ PVR_ASSERT(psResManContext != IMG_NULL); -+ PVR_ASSERT(ui32ResType != 0); -+ -+ ACQUIRE_SYNC_OBJ; -+ -+ VALIDATERESLIST(); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource " -+ "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, " -+ "FreeFunc %08X", -+ psResManContext, ui32ResType, (IMG_UINT32) pvParam, -+ ui32Param, pfnFreeResource)); -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(RESMAN_ITEM), (IMG_VOID **) & psNewResItem, -+ IMG_NULL) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: " -+ "ERROR allocating new resource item")); -+ -+ RELEASE_SYNC_OBJ; -+ -+ return ((PRESMAN_ITEM) IMG_NULL); -+ } -+ -+#ifdef DEBUG -+ psNewResItem->ui32Signature = RESMAN_SIGNATURE; -+#endif -+ psNewResItem->ui32ResType = ui32ResType; -+ psNewResItem->pvParam = pvParam; -+ psNewResItem->ui32Param = ui32Param; -+ psNewResItem->pfnFreeResource = pfnFreeResource; -+ psNewResItem->ui32Flags = 0; -+ -+ psNewResItem->ppsThis = &psResManContext->psResItemList; -+ psNewResItem->psNext = psResManContext->psResItemList; -+ psResManContext->psResItemList = psNewResItem; -+ if (psNewResItem->psNext) { -+ psNewResItem->psNext->ppsThis = &psNewResItem->psNext; -+ } -+ -+ VALIDATERESLIST(); -+ -+ RELEASE_SYNC_OBJ; -+ -+ return (psNewResItem); -+} -+ -+PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM * psResItem) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psResItem != IMG_NULL); -+ -+ if (psResItem == IMG_NULL) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "ResManFreeResByPtr: NULL ptr - nothing to do")); -+ return PVRSRV_OK; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "ResManFreeResByPtr: freeing resource at %08X", psResItem)); -+ -+ ACQUIRE_SYNC_OBJ; -+ -+ VALIDATERESLIST(); -+ -+ eError = FreeResourceByPtr(psResItem, IMG_TRUE); -+ -+ VALIDATERESLIST(); -+ -+ RELEASE_SYNC_OBJ; -+ -+ return (eError); -+} -+ -+PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext, -+ IMG_UINT32 ui32SearchCriteria, -+ IMG_UINT32 ui32ResType, -+ IMG_PVOID pvParam, IMG_UINT32 ui32Param) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psResManContext != IMG_NULL); -+ -+ ACQUIRE_SYNC_OBJ; -+ -+ VALIDATERESLIST(); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: " -+ "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x", -+ psResManContext, ui32SearchCriteria, ui32ResType, -+ (IMG_UINT32) pvParam, ui32Param)); -+ -+ eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria, -+ ui32ResType, pvParam, ui32Param, -+ IMG_TRUE); -+ -+ VALIDATERESLIST(); -+ -+ RELEASE_SYNC_OBJ; -+ -+ return eError; -+} -+ -+IMG_VOID ResManDissociateRes(RESMAN_ITEM * psResItem, -+ PRESMAN_CONTEXT psNewResManContext) -+{ -+ PVR_ASSERT(psResItem != IMG_NULL); -+ PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE); -+ -+ if (psNewResManContext != IMG_NULL) { -+ -+ if (psResItem->psNext) { -+ psResItem->psNext->ppsThis = psResItem->ppsThis; -+ } -+ *psResItem->ppsThis = psResItem->psNext; -+ -+ psResItem->ppsThis = &psNewResManContext->psResItemList; -+ psResItem->psNext = psNewResManContext->psResItemList; -+ psNewResManContext->psResItemList = psResItem; -+ if (psResItem->psNext) { -+ psResItem->psNext->ppsThis = &psResItem->psNext; -+ } -+ } else { -+ FreeResourceByPtr(psResItem, IMG_FALSE); -+ } -+} -+ -+IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT -+ psResManContext, -+ RESMAN_ITEM * psItem) -+{ -+ RESMAN_ITEM *psCurItem; -+ -+ PVR_ASSERT(psResManContext != IMG_NULL); -+ PVR_ASSERT(psItem != IMG_NULL); -+ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE); -+ -+ ACQUIRE_SYNC_OBJ; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X", -+ psItem, psItem->psNext)); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, " -+ "Param 0x%x, FnCall %08X, Flags 0x%x", -+ psResManContext, -+ psItem->ui32ResType, (IMG_UINT32) psItem->pvParam, -+ psItem->ui32Param, psItem->pfnFreeResource, -+ psItem->ui32Flags)); -+ -+ psCurItem = psResManContext->psResItemList; -+ -+ while (psCurItem != IMG_NULL) { -+ -+ if (psCurItem != psItem) { -+ -+ psCurItem = psCurItem->psNext; -+ } else { -+ -+ RELEASE_SYNC_OBJ; -+ return PVRSRV_OK; -+ } -+ } -+ -+ RELEASE_SYNC_OBJ; -+ -+ return PVRSRV_ERROR_NOT_OWNER; -+} -+ -+static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM * psItem, -+ IMG_BOOL bExecuteCallback) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT(psItem != IMG_NULL); -+ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X", -+ psItem, psItem->psNext)); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "FreeResourceByPtr: Type 0x%x, Addr 0x%x, " -+ "Param 0x%x, FnCall %08X, Flags 0x%x", -+ psItem->ui32ResType, (IMG_UINT32) psItem->pvParam, -+ psItem->ui32Param, psItem->pfnFreeResource, -+ psItem->ui32Flags)); -+ -+ if (psItem->psNext) { -+ psItem->psNext->ppsThis = psItem->ppsThis; -+ } -+ *psItem->ppsThis = psItem->psNext; -+ -+ RELEASE_SYNC_OBJ; -+ -+ if (bExecuteCallback) { -+ eError = -+ psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeResourceByPtr: ERROR calling FreeResource function")); -+ } -+ } -+ -+ ACQUIRE_SYNC_OBJ; -+ -+ if (OSFreeMem -+ (PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem, IMG_NULL) -+ != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeResourceByPtr: ERROR freeing resource list item memory")); -+ eError = PVRSRV_ERROR_GENERIC; -+ } -+ -+ return (eError); -+} -+ -+static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext, -+ IMG_UINT32 ui32SearchCriteria, -+ IMG_UINT32 ui32ResType, -+ IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param, -+ IMG_BOOL bExecuteCallback) -+{ -+ PRESMAN_ITEM psCurItem; -+ IMG_BOOL bMatch; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ psCurItem = psResManContext->psResItemList; -+ -+ while (psCurItem != IMG_NULL) { -+ -+ bMatch = IMG_TRUE; -+ -+ if ((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) && -+ psCurItem->ui32ResType != ui32ResType) { -+ bMatch = IMG_FALSE; -+ } -+ -+ else if ((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) && -+ psCurItem->pvParam != pvParam) { -+ bMatch = IMG_FALSE; -+ } -+ -+ else if ((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) && -+ psCurItem->ui32Param != ui32Param) { -+ bMatch = IMG_FALSE; -+ } -+ -+ if (!bMatch) { -+ -+ psCurItem = psCurItem->psNext; -+ } else { -+ -+ eError = FreeResourceByPtr(psCurItem, bExecuteCallback); -+ -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ psCurItem = psResManContext->psResItemList; -+ } -+ } -+ -+ return eError; -+} -+ -+#ifdef DEBUG -+static IMG_VOID ValidateResList(PRESMAN_LIST psResList) -+{ -+ PRESMAN_ITEM psCurItem, *ppsThisItem; -+ PRESMAN_CONTEXT psCurContext, *ppsThisContext; -+ -+ if (psResList == IMG_NULL) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "ValidateResList: resman not initialised yet")); -+ return; -+ } -+ -+ psCurContext = psResList->psContextList; -+ ppsThisContext = &psResList->psContextList; -+ -+ while (psCurContext != IMG_NULL) { -+ -+ PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE); -+ if (psCurContext->ppsThis != ppsThisContext) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "psCC=%08X psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X", -+ psCurContext, psCurContext->ppsThis, -+ psCurContext->psNext, ppsThisContext)); -+ PVR_ASSERT(psCurContext->ppsThis == ppsThisContext); -+ } -+ -+ psCurItem = psCurContext->psResItemList; -+ ppsThisItem = &psCurContext->psResItemList; -+ while (psCurItem != IMG_NULL) { -+ -+ PVR_ASSERT(psCurItem->ui32Signature == -+ RESMAN_SIGNATURE); -+ if (psCurItem->ppsThis != ppsThisItem) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X", -+ psCurItem, psCurItem->ppsThis, -+ psCurItem->psNext, ppsThisItem)); -+ PVR_ASSERT(psCurItem->ppsThis == ppsThisItem); -+ } -+ -+ ppsThisItem = &psCurItem->psNext; -+ psCurItem = psCurItem->psNext; -+ } -+ -+ ppsThisContext = &psCurContext->psNext; -+ psCurContext = psCurContext->psNext; -+ } -+} -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/resman.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/resman.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/resman.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/resman.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,97 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __RESMAN_H__ -+#define __RESMAN_H__ -+ -+ -+ enum { -+ -+ RESMAN_TYPE_SHARED_PB_DESC = 1, -+ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, -+ RESMAN_TYPE_HW_RENDER_CONTEXT, -+ RESMAN_TYPE_HW_TRANSFER_CONTEXT, -+ RESMAN_TYPE_HW_2D_CONTEXT, -+ RESMAN_TYPE_TRANSFER_CONTEXT, -+ -+ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN, -+ RESMAN_TYPE_DISPLAYCLASS_DEVICE, -+ -+ RESMAN_TYPE_BUFFERCLASS_DEVICE, -+ -+ RESMAN_TYPE_OS_USERMODE_MAPPING, -+ -+ RESMAN_TYPE_DEVICEMEM_CONTEXT, -+ RESMAN_TYPE_DEVICECLASSMEM_MAPPING, -+ RESMAN_TYPE_DEVICEMEM_MAPPING, -+ RESMAN_TYPE_DEVICEMEM_WRAP, -+ RESMAN_TYPE_DEVICEMEM_ALLOCATION, -+ RESMAN_TYPE_EVENT_OBJECT, -+ RESMAN_TYPE_SHARED_MEM_INFO, -+ -+ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION -+ }; -+ -+#define RESMAN_CRITERIA_ALL 0x00000000 -+#define RESMAN_CRITERIA_RESTYPE 0x00000001 -+#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002 -+#define RESMAN_CRITERIA_UI32_PARAM 0x00000004 -+ -+ typedef PVRSRV_ERROR(*RESMAN_FREE_FN) (IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param); -+ -+ typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM; -+ typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT; -+ -+ PVRSRV_ERROR ResManInit(IMG_VOID); -+ IMG_VOID ResManDeInit(IMG_VOID); -+ -+ PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext, -+ IMG_UINT32 ui32ResType, -+ IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param, -+ RESMAN_FREE_FN pfnFreeResource); -+ -+ PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem); -+ -+ PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext, -+ IMG_UINT32 ui32SearchCriteria, -+ IMG_UINT32 ui32ResType, -+ IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param); -+ -+ IMG_VOID ResManDissociateRes(PRESMAN_ITEM psResItem, -+ PRESMAN_CONTEXT psNewResManContext); -+ -+ PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext, -+ PRESMAN_ITEM psItem); -+ -+ PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc, -+ PRESMAN_CONTEXT * phResManContext); -+ IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext, -+ IMG_BOOL bKernelContext); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/servicesext.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/servicesext.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/servicesext.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/servicesext.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,396 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined (__SERVICESEXT_H__) -+#define __SERVICESEXT_H__ -+ -+#define PVRSRV_LOCKFLG_READONLY (1) -+ -+typedef enum _PVRSRV_ERROR_ { -+ PVRSRV_OK = 0, -+ PVRSRV_ERROR_GENERIC = 1, -+ PVRSRV_ERROR_OUT_OF_MEMORY = 2, -+ PVRSRV_ERROR_TOO_FEW_BUFFERS = 3, -+ PVRSRV_ERROR_SYMBOL_NOT_FOUND = 4, -+ PVRSRV_ERROR_OUT_OF_HSPACE = 5, -+ PVRSRV_ERROR_INVALID_PARAMS = 6, -+ PVRSRV_ERROR_TILE_MAP_FAILED = 7, -+ PVRSRV_ERROR_INIT_FAILURE = 8, -+ PVRSRV_ERROR_CANT_REGISTER_CALLBACK = 9, -+ PVRSRV_ERROR_INVALID_DEVICE = 10, -+ PVRSRV_ERROR_NOT_OWNER = 11, -+ PVRSRV_ERROR_BAD_MAPPING = 12, -+ PVRSRV_ERROR_TIMEOUT = 13, -+ PVRSRV_ERROR_NO_PRIMARY = 14, -+ PVRSRV_ERROR_FLIP_CHAIN_EXISTS = 15, -+ PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA = 16, -+ PVRSRV_ERROR_SCENE_INVALID = 17, -+ PVRSRV_ERROR_STREAM_ERROR = 18, -+ PVRSRV_ERROR_INVALID_INTERRUPT = 19, -+ PVRSRV_ERROR_FAILED_DEPENDENCIES = 20, -+ PVRSRV_ERROR_CMD_NOT_PROCESSED = 21, -+ PVRSRV_ERROR_CMD_TOO_BIG = 22, -+ PVRSRV_ERROR_DEVICE_REGISTER_FAILED = 23, -+ PVRSRV_ERROR_FIFO_SPACE = 24, -+ PVRSRV_ERROR_TA_RECOVERY = 25, -+ PVRSRV_ERROR_INDOSORLOWPOWER = 26, -+ PVRSRV_ERROR_TOOMANYBUFFERS = 27, -+ PVRSRV_ERROR_NOT_SUPPORTED = 28, -+ PVRSRV_ERROR_PROCESSING_BLOCKED = 29, -+ -+ PVRSRV_ERROR_CANNOT_FLUSH_QUEUE = 31, -+ PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE = 32, -+ PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS = 33, -+ PVRSRV_ERROR_RETRY = 34, -+ -+ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff -+} PVRSRV_ERROR; -+ -+typedef enum _PVRSRV_DEVICE_CLASS_ { -+ PVRSRV_DEVICE_CLASS_3D = 0, -+ PVRSRV_DEVICE_CLASS_DISPLAY = 1, -+ PVRSRV_DEVICE_CLASS_BUFFER = 2, -+ PVRSRV_DEVICE_CLASS_VIDEO = 3, -+ -+ PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff -+} PVRSRV_DEVICE_CLASS; -+ -+typedef enum _PVRSRV_POWER_STATE_ { -+ PVRSRV_POWER_Unspecified = -1, -+ PVRSRV_POWER_STATE_D0 = 0, -+ PVRSRV_POWER_STATE_D1 = 1, -+ PVRSRV_POWER_STATE_D2 = 2, -+ PVRSRV_POWER_STATE_D3 = 3, -+ PVRSRV_POWER_STATE_D4 = 4, -+ -+ PVRSRV_POWER_STATE_FORCE_I32 = 0x7fffffff -+} PVR_POWER_STATE, *PPVR_POWER_STATE; -+ -+typedef PVRSRV_ERROR(*PFN_PRE_POWER) (IMG_HANDLE, PVR_POWER_STATE, -+ PVR_POWER_STATE); -+typedef PVRSRV_ERROR(*PFN_POST_POWER) (IMG_HANDLE, PVR_POWER_STATE, -+ PVR_POWER_STATE); -+ -+typedef PVRSRV_ERROR(*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE, IMG_BOOL, -+ PVR_POWER_STATE); -+typedef PVRSRV_ERROR(*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE, IMG_BOOL, -+ PVR_POWER_STATE); -+ -+typedef enum _PVRSRV_PIXEL_FORMAT_ { -+ PVRSRV_PIXEL_FORMAT_UNKNOWN = 0, -+ PVRSRV_PIXEL_FORMAT_RGB565 = 1, -+ PVRSRV_PIXEL_FORMAT_RGB555 = 2, -+ PVRSRV_PIXEL_FORMAT_RGB888 = 3, -+ PVRSRV_PIXEL_FORMAT_BGR888 = 4, -+ PVRSRV_PIXEL_FORMAT_YUV420 = 5, -+ PVRSRV_PIXEL_FORMAT_YUV444 = 6, -+ PVRSRV_PIXEL_FORMAT_VUY444 = 7, -+ PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8, -+ PVRSRV_PIXEL_FORMAT_YUYV = 9, -+ PVRSRV_PIXEL_FORMAT_YVYU = 10, -+ PVRSRV_PIXEL_FORMAT_UYVY = 11, -+ PVRSRV_PIXEL_FORMAT_VYUY = 12, -+ PVRSRV_PIXEL_FORMAT_PAL12 = 13, -+ PVRSRV_PIXEL_FORMAT_PAL8 = 14, -+ PVRSRV_PIXEL_FORMAT_PAL4 = 15, -+ PVRSRV_PIXEL_FORMAT_PAL2 = 16, -+ PVRSRV_PIXEL_FORMAT_PAL1 = 17, -+ PVRSRV_PIXEL_FORMAT_ARGB1555 = 18, -+ PVRSRV_PIXEL_FORMAT_ARGB4444 = 19, -+ PVRSRV_PIXEL_FORMAT_ARGB8888 = 20, -+ PVRSRV_PIXEL_FORMAT_ABGR8888 = 21, -+ PVRSRV_PIXEL_FORMAT_YV12 = 22, -+ PVRSRV_PIXEL_FORMAT_I420 = 23, -+ PVRSRV_PIXEL_FORMAT_DXT1 = 24, -+ PVRSRV_PIXEL_FORMAT_IMC2 = 25, -+ -+ PVRSRV_PIXEL_FORMAT_XRGB8888, -+ PVRSRV_PIXEL_FORMAT_XBGR8888, -+ PVRSRV_PIXEL_FORMAT_XRGB4444, -+ PVRSRV_PIXEL_FORMAT_G16R16, -+ PVRSRV_PIXEL_FORMAT_G16R16F, -+ PVRSRV_PIXEL_FORMAT_ARGB8332, -+ PVRSRV_PIXEL_FORMAT_A2RGB10, -+ PVRSRV_PIXEL_FORMAT_A2BGR10, -+ PVRSRV_PIXEL_FORMAT_B10GR11, -+ PVRSRV_PIXEL_FORMAT_GR88, -+ PVRSRV_PIXEL_FORMAT_ABGR16, -+ PVRSRV_PIXEL_FORMAT_ABGR32, -+ PVRSRV_PIXEL_FORMAT_BGR32, -+ PVRSRV_PIXEL_FORMAT_GR32, -+ PVRSRV_PIXEL_FORMAT_ABGR16F, -+ PVRSRV_PIXEL_FORMAT_ABGR32F, -+ PVRSRV_PIXEL_FORMAT_R32, -+ PVRSRV_PIXEL_FORMAT_R32F, -+ PVRSRV_PIXEL_FORMAT_R8, -+ PVRSRV_PIXEL_FORMAT_A8, -+ PVRSRV_PIXEL_FORMAT_P8, -+ PVRSRV_PIXEL_FORMAT_L8, -+ PVRSRV_PIXEL_FORMAT_A8L8, -+ PVRSRV_PIXEL_FORMAT_A4L4, -+ PVRSRV_PIXEL_FORMAT_R1, -+ PVRSRV_PIXEL_FORMAT_L16, -+ PVRSRV_PIXEL_FORMAT_R16, -+ PVRSRV_PIXEL_FORMAT_R16F, -+ PVRSRV_PIXEL_FORMAT_L6V5U5, -+ PVRSRV_PIXEL_FORMAT_V8U8, -+ PVRSRV_PIXEL_FORMAT_V16U16, -+ PVRSRV_PIXEL_FORMAT_QWVU8888, -+ PVRSRV_PIXEL_FORMAT_XLVU8888, -+ PVRSRV_PIXEL_FORMAT_QWVU16, -+ PVRSRV_PIXEL_FORMAT_D16, -+ PVRSRV_PIXEL_FORMAT_D24S8, -+ PVRSRV_PIXEL_FORMAT_D24X8, -+ PVRSRV_PIXEL_FORMAT_D32F, -+ PVRSRV_PIXEL_FORMAT_R8G8_B8G8, -+ PVRSRV_PIXEL_FORMAT_G8R8_G8B8, -+ PVRSRV_PIXEL_FORMAT_YUY2, -+ PVRSRV_PIXEL_FORMAT_DXT23, -+ PVRSRV_PIXEL_FORMAT_DXT45, -+ PVRSRV_PIXEL_FORMAT_G32R32F, -+ PVRSRV_PIXEL_FORMAT_NV11, -+ PVRSRV_PIXEL_FORMAT_NV12, -+ -+ PVRSRV_PIXEL_FORMAT_X24G8R32, -+ PVRSRV_PIXEL_FORMAT_G8R24, -+ PVRSRV_PIXEL_FORMAT_E5BGR9, -+ -+ PVRSRV_PIXEL_FORMAT_BC1, -+ PVRSRV_PIXEL_FORMAT_BC2, -+ PVRSRV_PIXEL_FORMAT_BC3, -+ PVRSRV_PIXEL_FORMAT_BC4, -+ PVRSRV_PIXEL_FORMAT_BC5, -+ -+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY, -+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV, -+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU, -+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY, -+ -+ PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff, -+} PVRSRV_PIXEL_FORMAT; -+ -+typedef enum _PVRSRV_ALPHA_FORMAT_ { -+ PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000, -+ PVRSRV_ALPHA_FORMAT_PRE = 0x00000001, -+ PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002, -+ PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F, -+} PVRSRV_ALPHA_FORMAT; -+ -+typedef enum _PVRSRV_COLOURSPACE_FORMAT_ { -+ PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000, -+ PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000, -+ PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000, -+ PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000, -+} PVRSRV_COLOURSPACE_FORMAT; -+ -+#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0) -+#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1) -+ -+typedef struct _PVRSRV_SYNC_DATA_ { -+ -+ IMG_UINT32 ui32WriteOpsPending; -+ volatile IMG_UINT32 ui32WriteOpsComplete; -+ -+ IMG_UINT32 ui32ReadOpsPending; -+ volatile IMG_UINT32 ui32ReadOpsComplete; -+ -+ IMG_UINT32 ui32LastOpDumpVal; -+ IMG_UINT32 ui32LastReadOpDumpVal; -+ -+} PVRSRV_SYNC_DATA; -+ -+typedef struct _PVRSRV_CLIENT_SYNC_INFO_ { -+ -+ PVRSRV_SYNC_DATA *psSyncData; -+ -+ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; -+ -+ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; -+ -+ IMG_HANDLE hMappingInfo; -+ -+ IMG_HANDLE hKernelSyncInfo; -+ -+} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO; -+ -+typedef struct PVRSRV_RESOURCE_TAG { -+ volatile IMG_UINT32 ui32Lock; -+ IMG_UINT32 ui32ID; -+} PVRSRV_RESOURCE; -+typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE; -+ -+typedef IMG_VOID(*PFN_CMD_COMPLETE) (IMG_HANDLE); -+typedef IMG_VOID(**PPFN_CMD_COMPLETE) (IMG_HANDLE); -+ -+typedef IMG_BOOL(*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID *); -+typedef IMG_BOOL(**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID *); -+ -+typedef struct _IMG_RECT_ { -+ IMG_INT32 x0; -+ IMG_INT32 y0; -+ IMG_INT32 x1; -+ IMG_INT32 y1; -+} IMG_RECT; -+ -+typedef struct _IMG_RECT_16_ { -+ IMG_INT16 x0; -+ IMG_INT16 y0; -+ IMG_INT16 x1; -+ IMG_INT16 y1; -+} IMG_RECT_16; -+ -+typedef PVRSRV_ERROR(*PFN_GET_BUFFER_ADDR) (IMG_HANDLE, -+ IMG_HANDLE, -+ IMG_SYS_PHYADDR **, -+ IMG_UINT32 *, -+ IMG_VOID **, -+ IMG_HANDLE *, IMG_BOOL *); -+ -+typedef struct DISPLAY_DIMS_TAG { -+ IMG_UINT32 ui32ByteStride; -+ IMG_UINT32 ui32Width; -+ IMG_UINT32 ui32Height; -+} DISPLAY_DIMS; -+ -+typedef struct DISPLAY_FORMAT_TAG { -+ -+ PVRSRV_PIXEL_FORMAT pixelformat; -+} DISPLAY_FORMAT; -+ -+typedef struct DISPLAY_SURF_ATTRIBUTES_TAG { -+ -+ PVRSRV_PIXEL_FORMAT pixelformat; -+ -+ DISPLAY_DIMS sDims; -+} DISPLAY_SURF_ATTRIBUTES; -+ -+typedef struct DISPLAY_MODE_INFO_TAG { -+ -+ PVRSRV_PIXEL_FORMAT pixelformat; -+ -+ DISPLAY_DIMS sDims; -+ -+ IMG_UINT32 ui32RefreshHZ; -+ -+ IMG_UINT32 ui32OEMFlags; -+} DISPLAY_MODE_INFO; -+ -+#define MAX_DISPLAY_NAME_SIZE (50) -+ -+typedef struct DISPLAY_INFO_TAG { -+ IMG_UINT32 ui32MaxSwapChains; -+ -+ IMG_UINT32 ui32MaxSwapChainBuffers; -+ -+ IMG_UINT32 ui32MinSwapInterval; -+ -+ IMG_UINT32 ui32MaxSwapInterval; -+ -+ IMG_CHAR szDisplayName[MAX_DISPLAY_NAME_SIZE]; -+ -+ -+} DISPLAY_INFO; -+ -+typedef struct ACCESS_INFO_TAG { -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32FBPhysBaseAddress; -+ IMG_UINT32 ui32FBMemAvailable; -+ IMG_UINT32 ui32SysPhysBaseAddress; -+ IMG_UINT32 ui32SysSize; -+ IMG_UINT32 ui32DevIRQ; -+} ACCESS_INFO; -+ -+typedef struct PVRSRV_CURSOR_SHAPE_TAG { -+ IMG_UINT16 ui16Width; -+ IMG_UINT16 ui16Height; -+ IMG_INT16 i16XHot; -+ IMG_INT16 i16YHot; -+ -+ IMG_VOID *pvMask; -+ IMG_INT16 i16MaskByteStride; -+ -+ IMG_VOID *pvColour; -+ IMG_INT16 i16ColourByteStride; -+ PVRSRV_PIXEL_FORMAT eColourPixelFormat; -+} PVRSRV_CURSOR_SHAPE; -+ -+#define PVRSRV_SET_CURSOR_VISIBILITY (1<<0) -+#define PVRSRV_SET_CURSOR_POSITION (1<<1) -+#define PVRSRV_SET_CURSOR_SHAPE (1<<2) -+#define PVRSRV_SET_CURSOR_ROTATION (1<<3) -+ -+typedef struct PVRSRV_CURSOR_INFO_TAG { -+ -+ IMG_UINT32 ui32Flags; -+ -+ IMG_BOOL bVisible; -+ -+ IMG_INT16 i16XPos; -+ IMG_INT16 i16YPos; -+ -+ PVRSRV_CURSOR_SHAPE sCursorShape; -+ -+ IMG_UINT32 ui32Rotation; -+ -+} PVRSRV_CURSOR_INFO; -+ -+typedef struct _PVRSRV_REGISTRY_INFO_ { -+ IMG_UINT32 ui32DevCookie; -+ IMG_PCHAR pszKey; -+ IMG_PCHAR pszValue; -+ IMG_PCHAR pszBuf; -+ IMG_UINT32 ui32BufSize; -+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO; -+ -+PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString(PPVRSRV_REGISTRY_INFO -+ psRegInfo); -+PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString(PPVRSRV_REGISTRY_INFO -+ psRegInfo); -+ -+#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0) -+#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0) -+ -+#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1) -+#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1) -+ -+typedef struct BUFFER_INFO_TAG { -+ IMG_UINT32 ui32BufferCount; -+ IMG_UINT32 ui32BufferDeviceID; -+ PVRSRV_PIXEL_FORMAT pixelformat; -+ IMG_UINT32 ui32ByteStride; -+ IMG_UINT32 ui32Width; -+ IMG_UINT32 ui32Height; -+ IMG_UINT32 ui32Flags; -+} BUFFER_INFO; -+ -+typedef enum _OVERLAY_DEINTERLACE_MODE_ { -+ WEAVE = 0x0, -+ BOB_ODD, -+ BOB_EVEN, -+ BOB_EVEN_NONINTERLEAVED -+} OVERLAY_DEINTERLACE_MODE; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/services.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/services.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/services.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/services.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,968 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __SERVICES_H__ -+#define __SERVICES_H__ -+ -+ -+#include "img_defs.h" -+#include "servicesext.h" -+#include "pdumpdefs.h" -+ -+#define IMG_CONST const -+ -+#define PVRSRV_MAX_CMD_SIZE 1024 -+ -+#define PVRSRV_MAX_DEVICES 16 -+ -+#define EVENTOBJNAME_MAXLENGTH (50) -+ -+#define PVRSRV_MEM_READ (1UL<<0) -+#define PVRSRV_MEM_WRITE (1UL<<1) -+#define PVRSRV_MEM_CACHE_CONSISTENT (1UL<<2) -+#define PVRSRV_MEM_NO_SYNCOBJ (1UL<<3) -+#define PVRSRV_MEM_INTERLEAVED (1UL<<4) -+#define PVRSRV_MEM_DUMMY (1UL<<5) -+#define PVRSRV_MEM_EDM_PROTECT (1UL<<6) -+#define PVRSRV_MEM_ZERO (1UL<<7) -+#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1UL<<8) -+#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1UL<<9) -+#define PVRSRV_MEM_NO_RESMAN (1UL<<10) -+ -+#define PVRSRV_HAP_CACHED (1UL<<12) -+#define PVRSRV_HAP_UNCACHED (1UL<<13) -+#define PVRSRV_HAP_WRITECOMBINE (1UL<<14) -+#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE) -+#define PVRSRV_HAP_KERNEL_ONLY (1UL<<15) -+#define PVRSRV_HAP_SINGLE_PROCESS (1UL<<16) -+#define PVRSRV_HAP_MULTI_PROCESS (1UL<<17) -+#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1UL<<18) -+#define PVRSRV_HAP_NO_CPU_VIRTUAL (1UL<<19) -+#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \ -+ |PVRSRV_HAP_SINGLE_PROCESS \ -+ |PVRSRV_HAP_MULTI_PROCESS \ -+ |PVRSRV_HAP_FROM_EXISTING_PROCESS \ -+ |PVRSRV_HAP_NO_CPU_VIRTUAL) -+#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24) -+ -+#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27) -+ -+#define PVRSRV_NO_CONTEXT_LOSS 0 -+#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1 -+#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80 -+ -+#define PVRSRV_DEFAULT_DEV_COOKIE (1) -+ -+#define PVRSRV_MISC_INFO_TIMER_PRESENT (1UL<<0) -+#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1UL<<1) -+#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1UL<<2) -+#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1UL<<3) -+ -+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20 -+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200 -+ -+#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001 -+ -+#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001 -+#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002 -+ -+ typedef enum _PVRSRV_DEVICE_TYPE_ { -+ PVRSRV_DEVICE_TYPE_UNKNOWN = 0, -+ PVRSRV_DEVICE_TYPE_MBX1 = 1, -+ PVRSRV_DEVICE_TYPE_MBX1_LITE = 2, -+ -+ PVRSRV_DEVICE_TYPE_M24VA = 3, -+ PVRSRV_DEVICE_TYPE_MVDA2 = 4, -+ PVRSRV_DEVICE_TYPE_MVED1 = 5, -+ PVRSRV_DEVICE_TYPE_MSVDX = 6, -+ -+ PVRSRV_DEVICE_TYPE_SGX = 7, -+ -+ PVRSRV_DEVICE_TYPE_EXT = 8, -+ -+ PVRSRV_DEVICE_TYPE_LAST = 8, -+ -+ PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff -+ } PVRSRV_DEVICE_TYPE; -+ -+#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) ) -+#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) ) -+#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 ) -+ -+ typedef enum { -+ IMG_EGL = 0x00000001, -+ IMG_OPENGLES1 = 0x00000002, -+ IMG_OPENGLES2 = 0x00000003, -+ IMG_D3DM = 0x00000004, -+ IMG_SRV_UM = 0x00000005, -+ IMG_OPENVG = 0x00000006, -+ IMG_SRVCLIENT = 0x00000007, -+ IMG_VISTAKMD = 0x00000008, -+ IMG_VISTA3DNODE = 0x00000009, -+ IMG_VISTAMVIDEONODE = 0x0000000A, -+ IMG_VISTAVPBNODE = 0x0000000B, -+ IMG_OPENGL = 0x0000000C, -+ IMG_D3D = 0x0000000D -+ } IMG_MODULE_ID; -+ -+#define APPHINT_MAX_STRING_SIZE 256 -+ -+ typedef enum { -+ IMG_STRING_TYPE = 1, -+ IMG_FLOAT_TYPE, -+ IMG_UINT_TYPE, -+ IMG_INT_TYPE, -+ IMG_FLAG_TYPE -+ } IMG_DATA_TYPE; -+ -+ typedef struct _PVRSRV_CONNECTION_ { -+ IMG_HANDLE hServices; -+ IMG_UINT32 ui32ProcessID; -+ } PVRSRV_CONNECTION; -+ -+ typedef struct _PVRSRV_DEV_DATA_ { -+ PVRSRV_CONNECTION sConnection; -+ IMG_HANDLE hDevCookie; -+ -+ } PVRSRV_DEV_DATA, *PPVRSRV_DEV_DATA; -+ -+ typedef struct _PVRSRV_MEMUPDATE_ { -+ IMG_UINT32 ui32UpdateAddr; -+ IMG_UINT32 ui32UpdateVal; -+ } PVRSRV_MEMUPDATE; -+ -+ typedef struct _PVRSRV_HWREG_ { -+ IMG_UINT32 ui32RegAddr; -+ IMG_UINT32 ui32RegVal; -+ } PVRSRV_HWREG; -+ -+ typedef struct _PVRSRV_MEMBLK_ { -+ IMG_DEV_VIRTADDR sDevVirtAddr; -+ IMG_HANDLE hOSMemHandle; -+ IMG_HANDLE hBuffer; -+ IMG_HANDLE hResItem; -+ } PVRSRV_MEMBLK; -+ -+ typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO; -+ -+ typedef struct _PVRSRV_CLIENT_MEM_INFO_ { -+ -+ IMG_PVOID pvLinAddr; -+ -+ IMG_PVOID pvLinAddrKM; -+ -+ IMG_DEV_VIRTADDR sDevVAddr; -+ -+ IMG_CPU_PHYADDR sCpuPAddr; -+ -+ IMG_UINT32 ui32Flags; -+ -+ IMG_UINT32 ui32ClientFlags; -+ -+ IMG_UINT32 ui32AllocSize; -+ -+ struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo; -+ -+ IMG_HANDLE hMappingInfo; -+ -+ IMG_HANDLE hKernelMemInfo; -+ -+ IMG_HANDLE hResItem; -+ -+ struct _PVRSRV_CLIENT_MEM_INFO_ *psNext; -+ -+ } PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO; -+ -+ -+#define PVRSRV_MAX_CLIENT_HEAPS (32) -+ typedef struct _PVRSRV_HEAP_INFO_ { -+ IMG_UINT32 ui32HeapID; -+ IMG_HANDLE hDevMemHeap; -+ IMG_DEV_VIRTADDR sDevVAddrBase; -+ IMG_UINT32 ui32HeapByteSize; -+ IMG_UINT32 ui32Attribs; -+ } PVRSRV_HEAP_INFO; -+ -+ typedef struct _PVRSRV_DEVICE_IDENTIFIER_ { -+ PVRSRV_DEVICE_TYPE eDeviceType; -+ PVRSRV_DEVICE_CLASS eDeviceClass; -+ IMG_UINT32 ui32DeviceIndex; -+ -+ } PVRSRV_DEVICE_IDENTIFIER; -+ -+ typedef struct _PVRSRV_EVENTOBJECT_ { -+ -+ IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH]; -+ -+ IMG_HANDLE hOSEventKM; -+ -+ } PVRSRV_EVENTOBJECT; -+ -+ typedef struct _PVRSRV_MISC_INFO_ { -+ IMG_UINT32 ui32StateRequest; -+ IMG_UINT32 ui32StatePresent; -+ -+ IMG_VOID *pvSOCTimerRegisterKM; -+ IMG_VOID *pvSOCTimerRegisterUM; -+ IMG_HANDLE hSOCTimerRegisterOSMemHandle; -+ -+ IMG_VOID *pvSOCClockGateRegs; -+ IMG_UINT32 ui32SOCClockGateRegsSize; -+ -+ IMG_CHAR *pszMemoryStr; -+ IMG_UINT32 ui32MemoryStrLen; -+ -+ PVRSRV_EVENTOBJECT sGlobalEventObject; -+ IMG_HANDLE hOSGlobalEvent; -+ -+ } PVRSRV_MISC_INFO; -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION * -+ psConnection); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(PVRSRV_CONNECTION * -+ psConnection); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_UINT32 * -+ puiNumDevices, -+ PVRSRV_DEVICE_IDENTIFIER -+ * puiDevIDs); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST -+ PVRSRV_CONNECTION -+ * -+ psConnection, -+ IMG_UINT32 -+ uiDevIndex, -+ PVRSRV_DEV_DATA -+ * -+ psDevData, -+ PVRSRV_DEVICE_TYPE -+ eDeviceType); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo(IMG_CONST -+ PVRSRV_CONNECTION -+ * psConnection, -+ PVRSRV_MISC_INFO -+ * psMiscInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ PVRSRV_MISC_INFO * -+ psMiscInfo); -+ -+ IMG_IMPORT -+ IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset); -+ -+ IMG_IMPORT -+ IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); -+ -+ IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, -+ IMG_UINT32 ui32Count, -+ PVRSRV_HWREG * psHWRegs); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVPollForValue(PVRSRV_CONNECTION * psConnection, -+ IMG_HANDLE hOSEvent, -+ volatile IMG_UINT32 * -+ pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 ui32Waitus, -+ IMG_UINT32 ui32Tries); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_HANDLE * -+ phDevMemContext, -+ IMG_UINT32 * -+ pui32SharedHeapCount, -+ PVRSRV_HEAP_INFO -+ * -+ psHeapInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_HANDLE -+ hDevMemContext); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfo(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_HANDLE -+ hDevMemContext, -+ IMG_UINT32 * -+ pui32SharedHeapCount, -+ PVRSRV_HEAP_INFO -+ * psHeapInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVMapMemInfoToUser(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * psMemInfo, -+ IMG_VOID * -+ ppvUserLinAddr, -+ IMG_HANDLE * -+ phUserMappingInfo); -+ -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapMemInfoFromUser(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * psMemInfo, -+ IMG_PVOID -+ pvUserLinAddr, -+ IMG_HANDLE -+ hUserMappingInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ IMG_HANDLE -+ hDevMemHeap, -+ IMG_UINT32 -+ ui32Attribs, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 -+ ui32Alignment, -+ PVRSRV_CLIENT_MEM_INFO -+ ** ppsMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_HANDLE -+ hDevMemHeap, -+ IMG_DEV_VIRTADDR -+ * -+ psDevVAddr, -+ IMG_UINT32 -+ ui32Size, -+ IMG_UINT32 -+ ui32Alignment, -+ PVRSRV_CLIENT_MEM_INFO -+ ** -+ ppsMemInfo); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA * psDevData, -+ PVRSRV_CLIENT_MEM_INFO * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * psSrcMemInfo, -+ IMG_HANDLE -+ hDstDevMemHeap, -+ PVRSRV_CLIENT_MEM_INFO -+ ** ppsDstMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * psMemInfo, -+ IMG_SYS_PHYADDR * -+ psSysPAddr, -+ IMG_UINT32 ui32Flags); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * psMemInfo, -+ IMG_UINT32 -+ ui32Flags); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ IMG_UINT32 -+ ui32ByteSize, -+ IMG_UINT32 -+ ui32PageOffset, -+ IMG_BOOL bPhysContig, -+ IMG_SYS_PHYADDR * -+ psSysPAddr, -+ IMG_VOID * pvLinAddr, -+ PVRSRV_CLIENT_MEM_INFO -+ ** ppsMemInfo); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ PVRSRV_CLIENT_MEM_INFO -+ * -+ psMemInfo); -+ -+ PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ PVRSRV_CLIENT_MEM_INFO * -+ psClientMemInfo, -+ IMG_UINT32 ui32Attribs); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_HANDLE -+ hDeviceClassBuffer, -+ PVRSRV_CLIENT_MEM_INFO -+ ** ppsMemInfo); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVUnmapDeviceClassMemory(IMG_CONST PVRSRV_DEV_DATA * psDevData, -+ PVRSRV_CLIENT_MEM_INFO * psMemInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST -+ PVRSRV_DEV_DATA * -+ psDevData, -+ IMG_SYS_PHYADDR -+ sSysPhysAddr, -+ IMG_UINT32 -+ uiSizeInBytes, -+ IMG_PVOID * -+ ppvUserAddr, -+ IMG_UINT32 * -+ puiActualSize, -+ IMG_PVOID * -+ ppvProcess); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST -+ PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_PVOID -+ pvUserAddr, -+ IMG_PVOID -+ pvProcess); -+ -+ typedef enum _PVRSRV_SYNCVAL_MODE_ { -+ PVRSRV_SYNCVAL_READ = IMG_TRUE, -+ PVRSRV_SYNCVAL_WRITE = IMG_FALSE, -+ -+ } PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE; -+ -+ typedef IMG_UINT32 PVRSRV_SYNCVAL; -+ -+ IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO -+ psMemInfo, -+ PVRSRV_SYNCVAL_MODE -+ eMode, -+ PVRSRV_SYNCVAL -+ OpRequired); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, -+ PVRSRV_SYNCVAL_MODE eMode); -+ -+ IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO -+ psMemInfo, -+ PVRSRV_SYNCVAL_MODE eMode, -+ PVRSRV_SYNCVAL OpRequired); -+ -+ IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO -+ psMemInfo, -+ PVRSRV_SYNCVAL_MODE eMode); -+ -+ IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO -+ psMemInfo, -+ PVRSRV_SYNCVAL_MODE eMode, -+ PVRSRV_SYNCVAL OpRequired); -+ -+ IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO -+ psMemInfo, -+ PVRSRV_SYNCVAL_MODE -+ eMode); -+ -+ IMG_IMPORT PVRSRV_SYNCVAL -+ PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo, -+ PVRSRV_SYNCVAL_MODE eMode); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST -+ PVRSRV_CONNECTION -+ * psConnection, -+ PVRSRV_DEVICE_CLASS -+ DeviceClass, -+ IMG_UINT32 * -+ pui32DevCount, -+ IMG_UINT32 * -+ pui32DevID); -+ -+ IMG_IMPORT -+ IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_UINT32 ui32DeviceID); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_HANDLE hDevice); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats(IMG_HANDLE hDevice, -+ IMG_UINT32 * -+ pui32Count, -+ DISPLAY_FORMAT * -+ psFormat); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims(IMG_HANDLE hDevice, -+ IMG_UINT32 * pui32Count, -+ DISPLAY_FORMAT * -+ psFormat, -+ DISPLAY_DIMS * psDims); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE -+ hDevice, -+ IMG_HANDLE * -+ phBuffer); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice, -+ DISPLAY_INFO * -+ psDisplayInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain(IMG_HANDLE -+ hDevice, -+ IMG_UINT32 -+ ui32Flags, -+ DISPLAY_SURF_ATTRIBUTES -+ * psDstSurfAttrib, -+ DISPLAY_SURF_ATTRIBUTES -+ * psSrcSurfAttrib, -+ IMG_UINT32 -+ ui32BufferCount, -+ IMG_UINT32 -+ ui32OEMFlags, -+ IMG_UINT32 * -+ pui32SwapChainID, -+ IMG_HANDLE * -+ phSwapChain); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain(IMG_HANDLE -+ hDevice, -+ IMG_HANDLE -+ hSwapChain); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, -+ IMG_RECT * psDstRect); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, -+ IMG_RECT * psSrcRect); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey(IMG_HANDLE -+ hDevice, -+ IMG_HANDLE -+ hSwapChain, -+ IMG_UINT32 -+ ui32CKColour); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey(IMG_HANDLE -+ hDevice, -+ IMG_HANDLE -+ hSwapChain, -+ IMG_UINT32 -+ ui32CKColour); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice, -+ IMG_HANDLE hSwapChain, -+ IMG_HANDLE * phBuffer); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer(IMG_HANDLE hDevice, -+ IMG_HANDLE hBuffer, -+ IMG_UINT32 -+ ui32ClipRectCount, -+ IMG_RECT * -+ psClipRect, -+ IMG_UINT32 -+ ui32SwapInterval, -+ IMG_HANDLE -+ hPrivateTag); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem(IMG_HANDLE hDevice, -+ IMG_HANDLE -+ hSwapChain); -+ -+ IMG_IMPORT -+ IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA -+ * psDevData, -+ IMG_UINT32 ui32DeviceID); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_HANDLE hDevice); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice, -+ BUFFER_INFO * -+ psBuffer); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice, -+ IMG_UINT32 -+ ui32BufferIndex, -+ IMG_HANDLE * phBuffer); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ PVRSRV_CLIENT_MEM_INFO * -+ psMemInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ IMG_BOOL bLastFrame, -+ IMG_BOOL bOverwrite); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ PVRSRV_CLIENT_SYNC_INFO -+ * psClientSyncInfo, -+ IMG_BOOL bIsRead, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION -+ * psConnection, -+ IMG_PVOID pvAltLinAddr, -+ PVRSRV_CLIENT_MEM_INFO * -+ psMemInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Bytes, -+ IMG_UINT32 ui32Flags); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_PVOID pvAltLinAddr, -+ PVRSRV_CLIENT_SYNC_INFO * -+ psClientSyncInfo, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Bytes); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_CONNECTION -+ * psConnection, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Flags); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST -+ PVRSRV_CONNECTION -+ * psConnection, -+ IMG_UINT32 -+ ui32RegAddr, -+ IMG_UINT32 -+ ui32RegValue, -+ IMG_UINT32 -+ ui32Mask, -+ IMG_UINT32 -+ ui32Flags); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST -+ PVRSRV_CONNECTION -+ * psConnection, -+ IMG_UINT32 -+ ui32RegAddr, -+ IMG_UINT32 -+ ui32RegValue, -+ IMG_UINT32 -+ ui32Mask); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST -+ PVRSRV_CONNECTION -+ * -+ psConnection, -+ PVRSRV_CLIENT_MEM_INFO -+ * psMemInfo, -+ IMG_UINT32 -+ ui32Offset, -+ IMG_DEV_PHYADDR -+ sPDDevPAddr); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_HANDLE -+ hKernelMemInfo, -+ IMG_DEV_PHYADDR * -+ pPages, -+ IMG_UINT32 -+ ui32NumPages, -+ IMG_DEV_VIRTADDR -+ sDevAddr, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32Length, -+ IMG_BOOL bContinuous); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_UINT32 ui32Frame); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_CONST IMG_CHAR * -+ pszComment, -+ IMG_BOOL bContinuous); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_BOOL bContinuous, -+ IMG_CONST IMG_CHAR * -+ pszFormat, ...); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST -+ PVRSRV_CONNECTION -+ * -+ psConnection, -+ IMG_UINT32 -+ ui32Flags, -+ IMG_CONST -+ IMG_CHAR * -+ pszFormat, -+ ...); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_CHAR * -+ pszString, -+ IMG_BOOL -+ bContinuous); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_BOOL * -+ pbIsCapturing); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_CHAR * pszFileName, -+ IMG_UINT32 -+ ui32FileOffset, -+ IMG_UINT32 ui32Width, -+ IMG_UINT32 ui32Height, -+ IMG_UINT32 -+ ui32StrideInBytes, -+ IMG_DEV_VIRTADDR -+ sDevBaseAddr, -+ IMG_UINT32 ui32Size, -+ PDUMP_PIXEL_FORMAT -+ ePixelFormat, -+ PDUMP_MEM_FORMAT -+ eMemFormat, -+ IMG_UINT32 -+ ui32PDumpFlags); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection, -+ IMG_CONST IMG_CHAR * -+ pszFileName, -+ IMG_UINT32 -+ ui32FileOffset, -+ IMG_UINT32 ui32Address, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 -+ ui32PDumpFlags); -+ -+ IMG_IMPORT -+ IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST -+ PVRSRV_CONNECTION * -+ psConnection); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST -+ PVRSRV_CONNECTION -+ * -+ psConnection, -+ IMG_UINT32 -+ ui32RegOffset, -+ IMG_BOOL -+ bLastFrame); -+ -+ IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(const IMG_CHAR * -+ pszLibraryName); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv); -+ IMG_IMPORT PVRSRV_ERROR PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, -+ const IMG_CHAR * -+ pszFunctionName, -+ IMG_VOID ** ppvFuncAddr); -+ -+ IMG_IMPORT IMG_UINT32 PVRSRVClockus(void); -+ IMG_IMPORT IMG_VOID PVRSRVWaitus(IMG_UINT32 ui32Timeus); -+ IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta(void); -+ IMG_IMPORT IMG_UINT32 IMG_CALLCONV PVRSRVGetCurrentProcessID(void); -+ -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID -+ eModuleID, -+ const IMG_CHAR -+ * pszAppName, -+ IMG_VOID ** -+ ppvState); -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID -+ eModuleID, -+ IMG_VOID * -+ pvHintState); -+ -+ IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID * -+ pvHintState, -+ const IMG_CHAR * -+ pszHintName, -+ IMG_DATA_TYPE -+ eDataType, -+ const IMG_VOID * -+ pvDefault, -+ IMG_VOID * pvReturn); -+ -+ IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem(IMG_UINT32 -+ ui32Size); -+ IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem(IMG_UINT32 -+ ui32Size); -+ IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem(IMG_PVOID -+ pvBase, -+ IMG_SIZE_T -+ uNewSize); -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMem(IMG_PVOID pvMem); -+ IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID * pvDst, -+ const IMG_VOID * pvSrc, -+ IMG_UINT32 ui32Size); -+ IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID * pvDest, IMG_UINT8 ui8Value, -+ IMG_UINT32 ui32Size); -+ -+ struct _PVRSRV_MUTEX_OPAQUE_STRUCT_; -+ typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE; -+ -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE * phMutex); -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex); -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE -+ hMutex); -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE -+ hMutex); -+ -+#ifdef DEBUG -+ IMG_PVOID PVRSRVAllocUserModeMemTracking(IMG_UINT32 ui32Size, -+ IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32LineNumber); -+ IMG_PVOID PVRSRVCallocUserModeMemTracking(IMG_UINT32 ui32Size, -+ IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32LineNumber); -+ IMG_VOID PVRSRVFreeUserModeMemTracking(IMG_VOID * pvMem); -+ IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID * pvMem, -+ IMG_UINT32 ui32NewSize, -+ IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32LineNumber); -+#endif -+ -+ IMG_IMPORT PVRSRV_ERROR PVRSRVEventObjectWait(PVRSRV_CONNECTION * -+ psConnection, -+ IMG_HANDLE hOSEvent); -+ -+#define TIME_NOT_PASSED_UINT32(a,b,c) ((a - b) < c) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/services_headers.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/services_headers.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/services_headers.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/services_headers.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,44 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef SERVICES_HEADERS_H -+#define SERVICES_HEADERS_H -+ -+ -+#include "img_defs.h" -+#include "services.h" -+#include "servicesint.h" -+#include "power.h" -+#include "resman.h" -+#include "queue.h" -+#include "srvkm.h" -+#include "kerneldisplay.h" -+#include "syscommon.h" -+#include "pvr_debug.h" -+#include "metrics.h" -+#include "osfunc.h" -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/servicesint.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/servicesint.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/servicesint.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/servicesint.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,198 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined (__SERVICESINT_H__) -+#define __SERVICESINT_H__ -+ -+ -+#include "services.h" -+#include "sysinfo.h" -+ -+#define HWREC_DEFAULT_TIMEOUT (500) -+ -+#define DRIVERNAME_MAXLENGTH (100) -+ -+ typedef struct _PVRSRV_KERNEL_MEM_INFO_ { -+ -+ IMG_PVOID pvLinAddrKM; -+ -+ IMG_DEV_VIRTADDR sDevVAddr; -+ -+ IMG_UINT32 ui32Flags; -+ -+ IMG_UINT32 ui32AllocSize; -+ -+ PVRSRV_MEMBLK sMemBlk; -+ -+ struct _PVRSRV_KERNEL_SYNC_INFO_ *psKernelSyncInfo; -+ -+ } PVRSRV_KERNEL_MEM_INFO; -+ -+ typedef struct _PVRSRV_KERNEL_SYNC_INFO_ { -+ -+ PVRSRV_SYNC_DATA *psSyncData; -+ -+ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; -+ -+ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; -+ -+ PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM; -+ -+ } PVRSRV_KERNEL_SYNC_INFO; -+ -+ typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_ { -+ IMG_UINT32 ui32ReadOpPendingVal; -+ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; -+ IMG_UINT32 ui32WriteOpPendingVal; -+ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; -+ } PVRSRV_DEVICE_SYNC_OBJECT; -+ -+ typedef struct _PVRSRV_SYNC_OBJECT { -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM; -+ IMG_UINT32 ui32WriteOpsPending; -+ IMG_UINT32 ui32ReadOpsPending; -+ -+ } PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT; -+ -+ typedef struct _PVRSRV_COMMAND { -+ IMG_UINT32 ui32CmdSize; -+ IMG_UINT32 ui32DevIndex; -+ IMG_UINT32 CommandType; -+ IMG_UINT32 ui32DstSyncCount; -+ IMG_UINT32 ui32SrcSyncCount; -+ PVRSRV_SYNC_OBJECT *psDstSync; -+ PVRSRV_SYNC_OBJECT *psSrcSync; -+ IMG_UINT32 ui32DataSize; -+ IMG_UINT32 ui32ProcessID; -+ IMG_VOID *pvData; -+ } PVRSRV_COMMAND, *PPVRSRV_COMMAND; -+ -+ typedef struct _PVRSRV_QUEUE_INFO_ { -+ IMG_VOID *pvLinQueueKM; -+ IMG_VOID *pvLinQueueUM; -+ volatile IMG_UINT32 ui32ReadOffset; -+ volatile IMG_UINT32 ui32WriteOffset; -+ IMG_UINT32 *pui32KickerAddrKM; -+ IMG_UINT32 *pui32KickerAddrUM; -+ IMG_UINT32 ui32QueueSize; -+ -+ IMG_UINT32 ui32ProcessID; -+ -+ IMG_HANDLE hMemBlock[2]; -+ -+ struct _PVRSRV_QUEUE_INFO_ *psNextKM; -+ } PVRSRV_QUEUE_INFO; -+ -+ typedef PVRSRV_ERROR(*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO *, -+ PVRSRV_COMMAND **, -+ IMG_UINT32, -+ IMG_UINT16, -+ IMG_UINT32, -+ PVRSRV_KERNEL_SYNC_INFO *[], -+ IMG_UINT32, -+ PVRSRV_KERNEL_SYNC_INFO *[], -+ IMG_UINT32); -+ typedef PVRSRV_ERROR(*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO *, -+ PVRSRV_COMMAND *, IMG_BOOL); -+ -+ typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG { -+ PFN_GET_BUFFER_ADDR pfnGetBufferAddr; -+ IMG_HANDLE hDevMemContext; -+ IMG_HANDLE hExtDevice; -+ IMG_HANDLE hExtBuffer; -+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; -+ -+ } PVRSRV_DEVICECLASS_BUFFER; -+ -+ typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG { -+ IMG_HANDLE hDeviceKM; -+ IMG_HANDLE hServices; -+ } PVRSRV_CLIENT_DEVICECLASS_INFO; -+ -+ static INLINE -+ IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO * -+ psSyncInfo, -+ IMG_BOOL bIsReadOp) { -+ IMG_UINT32 ui32WriteOpsPending; -+ -+ if (bIsReadOp) { -+ ui32WriteOpsPending = -+ psSyncInfo->psSyncData->ui32WriteOpsPending; -+ } else { -+ -+ ui32WriteOpsPending = -+ psSyncInfo->psSyncData->ui32WriteOpsPending++; -+ } -+ -+ return ui32WriteOpsPending; -+ } -+ -+ static INLINE -+ IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO * -+ psSyncInfo, IMG_BOOL bIsReadOp) { -+ IMG_UINT32 ui32ReadOpsPending; -+ -+ if (bIsReadOp) { -+ ui32ReadOpsPending = -+ psSyncInfo->psSyncData->ui32ReadOpsPending++; -+ } else { -+ ui32ReadOpsPending = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ } -+ -+ return ui32ReadOpsPending; -+ } -+ -+ IMG_IMPORT -+ PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo, -+ PVRSRV_COMMAND * psCommand); -+ -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION * -+ psConnection, -+ IMG_HANDLE hDevMemContext, -+ IMG_DEV_PHYADDR * sPDDevPAddr); -+ -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION * psConnection, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size, -+ PVRSRV_CLIENT_MEM_INFO ** ppsClientMemInfo); -+ -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION * psConnection, -+ PVRSRV_CLIENT_MEM_INFO * psClientMemInfo); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION * psConnection, -+ PVRSRV_CLIENT_MEM_INFO * psClientMemInfo); -+ -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV -+ PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION * psConnection, -+ IMG_HANDLE hKernelMemInfo, -+ PVRSRV_CLIENT_MEM_INFO ** ppsClientMemInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxapi_km.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxapi_km.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxapi_km.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxapi_km.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,175 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __SGXAPI_KM_H__ -+#define __SGXAPI_KM_H__ -+ -+ -+#include "sgxdefs.h" -+ -+ -+#define SGX_GENERAL_HEAP_ID 0 -+#define SGX_TADATA_HEAP_ID 1 -+#define SGX_KERNEL_CODE_HEAP_ID 2 -+#define SGX_VIDEO_CODE_HEAP_ID 3 -+#define SGX_KERNEL_VIDEO_DATA_HEAP_ID 4 -+#define SGX_PIXELSHADER_HEAP_ID 5 -+#define SGX_VERTEXSHADER_HEAP_ID 6 -+#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 7 -+#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 8 -+#define SGX_SYNCINFO_HEAP_ID 9 -+#define SGX_3DPARAMETERS_HEAP_ID 10 -+#define SGX_GENERAL_MAPPING_HEAP_ID 11 -+#define SGX_UNDEFINED_HEAP_ID (~0LU) -+ -+#define SGX_ALT_MAPPING_HEAP_ID 12 -+#define SGX_FB_MAPPING_HEAP_ID 13 -+#define SGX_MAX_HEAP_ID 14 -+ -+#define SGX_MAX_TA_STATUS_VALS 32 -+#define SGX_MAX_3D_STATUS_VALS 2 -+ -+#define SGX_MAX_SRC_SYNCS 4 -+ -+ typedef struct _SGX_SLAVE_PORT_ { -+ IMG_PVOID pvData; -+ IMG_UINT32 ui32DataRange; -+ IMG_PUINT32 pui32Offset; -+ IMG_SYS_PHYADDR sPhysBase; -+ } SGX_SLAVE_PORT; -+ -+ -+#define PVRSRV_SGX_HWPERF_CBSIZE 0x100 -+ -+#define PVRSRV_SGX_HWPERF_INVALID 1 -+#define PVRSRV_SGX_HWPERF_TRANSFER 2 -+#define PVRSRV_SGX_HWPERF_TA 3 -+#define PVRSRV_SGX_HWPERF_3D 4 -+ -+#define PVRSRV_SGX_HWPERF_ON 0x40 -+ -+ typedef struct _PVRSRV_SGX_HWPERF_CBDATA_ { -+ IMG_UINT32 ui32FrameNo; -+ IMG_UINT32 ui32Type; -+ IMG_UINT32 ui32StartTimeWraps; -+ IMG_UINT32 ui32StartTime; -+ IMG_UINT32 ui32EndTimeWraps; -+ IMG_UINT32 ui32EndTime; -+ IMG_UINT32 ui32ClockSpeed; -+ IMG_UINT32 ui32TimeMax; -+ } PVRSRV_SGX_HWPERF_CBDATA; -+ -+ typedef struct _PVRSRV_SGX_HWPERF_CB_ { -+ IMG_UINT32 ui32Woff; -+ IMG_UINT32 ui32Roff; -+ PVRSRV_SGX_HWPERF_CBDATA -+ psHWPerfCBData[PVRSRV_SGX_HWPERF_CBSIZE]; -+ } PVRSRV_SGX_HWPERF_CB; -+ -+ typedef struct _SGX_MISC_INFO_HWPERF_RETRIEVE_CB { -+ PVRSRV_SGX_HWPERF_CBDATA *psHWPerfData; -+ IMG_UINT32 ui32ArraySize; -+ IMG_UINT32 ui32DataCount; -+ IMG_UINT32 ui32Time; -+ } SGX_MISC_INFO_HWPERF_RETRIEVE_CB; -+ -+ typedef enum _SGX_MISC_INFO_REQUEST_ { -+ SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0, -+ SGX_MISC_INFO_REQUEST_HWPERF_CB_ON, -+ SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF, -+ SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB, -+ SGX_MISC_INFO_REQUEST_FORCE_I16 = 0x7fff -+ } SGX_MISC_INFO_REQUEST; -+ -+ typedef struct _SGX_MISC_INFO_ { -+ SGX_MISC_INFO_REQUEST eRequest; -+ -+ union { -+ IMG_UINT32 reserved; -+ IMG_UINT32 ui32SGXClockSpeed; -+ SGX_MISC_INFO_HWPERF_RETRIEVE_CB sRetrieveCB; -+ } uData; -+ } SGX_MISC_INFO; -+ -+ -+#define PVR3DIF4_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256 -+ -+ typedef struct _PVR3DIF4_KICKTA_DUMPBITMAP_ { -+ IMG_DEV_VIRTADDR sDevBaseAddr; -+ IMG_UINT32 ui32Flags; -+ IMG_UINT32 ui32Width; -+ IMG_UINT32 ui32Height; -+ IMG_UINT32 ui32Stride; -+ IMG_UINT32 ui32PDUMPFormat; -+ IMG_UINT32 ui32BytesPP; -+ IMG_CHAR pszName[PVR3DIF4_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH]; -+ } PVR3DIF4_KICKTA_DUMPBITMAP, *PPVR3DIF4_KICKTA_DUMPBITMAP; -+ -+#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE (16) -+ -+ typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_ { -+ -+ IMG_UINT32 ui32CacheControl; -+ -+ } PVRSRV_SGX_PDUMP_CONTEXT; -+ -+ typedef struct _PVR3DIF4_KICKTA_DUMP_ROFF_ { -+ IMG_HANDLE hKernelMemInfo; -+ IMG_UINT32 uiAllocIndex; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Value; -+ IMG_PCHAR pszName; -+ } PVR3DIF4_KICKTA_DUMP_ROFF, *PPVR3DIF4_KICKTA_DUMP_ROFF; -+ -+ typedef struct _PVR3DIF4_KICKTA_DUMP_BUFFER_ { -+ IMG_UINT32 ui32SpaceUsed; -+ IMG_UINT32 ui32Start; -+ IMG_UINT32 ui32End; -+ IMG_UINT32 ui32BufferSize; -+ IMG_UINT32 ui32BackEndLength; -+ IMG_UINT32 uiAllocIndex; -+ IMG_HANDLE hKernelMemInfo; -+ IMG_PCHAR pszName; -+ } PVR3DIF4_KICKTA_DUMP_BUFFER, *PPVR3DIF4_KICKTA_DUMP_BUFFER; -+ -+#ifdef PDUMP -+ typedef struct _PVR3DIF4_KICKTA_PDUMP_ { -+ -+ PPVR3DIF4_KICKTA_DUMPBITMAP psPDumpBitmapArray; -+ IMG_UINT32 ui32PDumpBitmapSize; -+ -+ PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray; -+ IMG_UINT32 ui32BufferArraySize; -+ -+ PPVR3DIF4_KICKTA_DUMP_ROFF psROffArray; -+ IMG_UINT32 ui32ROffArraySize; -+ } PVR3DIF4_KICKTA_PDUMP, *PPVR3DIF4_KICKTA_PDUMP; -+#endif -+ -+#define SGX_MAX_TRANSFER_STATUS_VALS 2 -+#define SGX_MAX_TRANSFER_SYNC_OPS 5 -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgx_bridge.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgx_bridge.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgx_bridge.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgx_bridge.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,323 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__SGX_BRIDGE_H__) -+#define __SGX_BRIDGE_H__ -+ -+#include "sgxapi_km.h" -+#include "sgxinfo.h" -+#include "pvr_bridge.h" -+ -+ -+#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1) -+#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0) -+#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1) -+#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2) -+#define PVRSRV_BRIDGE_SGX_DOKICK PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3) -+#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4) -+#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5) -+#define PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+6) -+ -+#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9) -+ -+#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10) -+ -+#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13) -+#define PVRSRV_BRIDGE_SGX_GETMISCINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14) -+#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15) -+#define PVRSRV_BRIDGE_SGX_DEVINITPART2 PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16) -+ -+#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17) -+#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18) -+#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19) -+#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20) -+#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21) -+#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22) -+#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26) -+#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27) -+#define PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28) -+ -+#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+28) -+ -+ typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevMemHeap; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ } PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR { -+ PVRSRV_ERROR eError; -+ IMG_DEV_PHYADDR DevPAddr; -+ IMG_CPU_PHYADDR CpuPAddr; -+ } PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hDevMemContext; -+ } PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG { -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ } PVRSRV_BRIDGE_IN_GETCLIENTINFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG { -+ PVR3DIF4_INTERNAL_DEVINFO sSGXInternalDevInfo; -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ } PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG { -+ PVR3DIF4_CLIENT_INFO sClientInfo; -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_GETCLIENTINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ PVR3DIF4_CLIENT_INFO sClientInfo; -+ } PVRSRV_BRIDGE_IN_RELEASECLIENTINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ } PVRSRV_BRIDGE_IN_ISPBREAKPOLL; -+ -+ typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ PVR3DIF4_CCB_KICK sCCBKick; -+ } PVRSRV_BRIDGE_IN_DOKICK; -+ -+ -+ typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ PVRSRV_TRANSFER_SGX_KICK sKick; -+ } PVRSRV_BRIDGE_IN_SUBMITTRANSFER; -+ -+ -+ typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_PCHAR pszKey; -+ IMG_PCHAR pszValue; -+ } PVRSRV_BRIDGE_IN_READREGDWORD; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Data; -+ } PVRSRV_BRIDGE_OUT_READREGDWORD; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SCHEDULECOMMAND_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ PVRSRV_SGX_COMMAND_TYPE eCommandType; -+ PVRSRV_SGX_COMMAND *psCommandData; -+ -+ } PVRSRV_BRIDGE_IN_SCHEDULECOMMAND; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ SGX_MISC_INFO *psMiscInfo; -+ } PVRSRV_BRIDGE_IN_SGXGETMISCINFO; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ } PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG { -+ PVRSRV_ERROR eError; -+ SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo; -+ } PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ SGX_BRIDGE_INIT_INFO sInitInfo; -+ } PVRSRV_BRIDGE_IN_SGXDEVINITPART2; -+ -+ typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hKernSyncInfo; -+ IMG_BOOL bWaitForComplete; -+ } PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE; -+ -+#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10 -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_BOOL bLockOnFailure; -+ IMG_UINT32 ui32TotalPBSize; -+ } PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG { -+ IMG_HANDLE hKernelMemInfo; -+ IMG_HANDLE hSharedPBDesc; -+ IMG_HANDLE hSharedPBDescKernelMemInfoHandle; -+ IMG_HANDLE hHWPBDescKernelMemInfoHandle; -+ IMG_HANDLE hBlockKernelMemInfoHandle; -+ IMG_HANDLE -+ ahSharedPBDescSubKernelMemInfoHandles -+ [PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS]; -+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount; -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hSharedPBDesc; -+ } PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG { -+ PVRSRV_ERROR eError; -+ } PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hSharedPBDescKernelMemInfo; -+ IMG_HANDLE hHWPBDescKernelMemInfo; -+ IMG_HANDLE hBlockKernelMemInfo; -+ IMG_UINT32 ui32TotalPBSize; -+ IMG_HANDLE *phKernelMemInfoHandles; -+ IMG_UINT32 ui32KernelMemInfoHandlesCount; -+ } PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hSharedPBDesc; -+ } PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC; -+ -+#ifdef PDUMP -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ PVR3DIF4_KICKTA_DUMP_BUFFER *psBufferArray; -+ IMG_UINT32 ui32BufferArrayLength; -+ IMG_BOOL bDumpPolls; -+ } PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32DumpFrameNum; -+ IMG_BOOL bLastFrame; -+ IMG_UINT32 *pui32Registers; -+ IMG_UINT32 ui32NumRegisters; -+ } PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMPCOUNTER_REGISTERS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32DumpFrameNum; -+ IMG_BOOL bLastFrame; -+ IMG_UINT32 *pui32Registers; -+ IMG_UINT32 ui32NumRegisters; -+ } PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS; -+ -+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_UINT32 ui32DumpFrameNum; -+ IMG_UINT32 ui32TAKickCount; -+ IMG_BOOL bLastFrame; -+ IMG_UINT32 *pui32Registers; -+ IMG_UINT32 ui32NumRegisters; -+ } PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS; -+ -+#endif -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr; -+ } PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hHWRenderContext; -+ } PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hHWRenderContext; -+ } PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr; -+ } PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG { -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hHWTransferContext; -+ } PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_HANDLE hHWTransferContext; -+ } PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT; -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr; -+ } PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET; -+ -+ -+ typedef struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS_TAG { -+ IMG_UINT32 ui32BridgeFlags; -+ IMG_HANDLE hDevCookie; -+ IMG_UINT32 ui32Reg; -+ IMG_BOOL bNew; -+ IMG_UINT32 ui32New; -+ IMG_UINT32 ui32NewReset; -+ IMG_UINT32 ui32CountersReg; -+ } PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS; -+ -+ typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS_TAG { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Old; -+ IMG_UINT32 ui32Time; -+ IMG_BOOL bActive; -+ PVRSRV_SGXDEV_DIFF_INFO sDiffs; -+ } PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgx_bridge_km.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgx_bridge_km.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgx_bridge_km.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgx_bridge_km.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,139 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__SGX_BRIDGE_KM_H__) -+#define __SGX_BRIDGE_KM_H__ -+ -+#include "sgxapi_km.h" -+#include "sgxinfo.h" -+#include "sgxinfokm.h" -+#include "sgx_bridge.h" -+#include "pvr_bridge.h" -+#include "perproc.h" -+ -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, -+ PVRSRV_TRANSFER_SGX_KICK * psKick); -+ -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, -+ PVR3DIF4_CCB_KICK * psCCBKick); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEV_PHYADDR * pDevPAddr, -+ IMG_CPU_PHYADDR * pCpuPAddr); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie, -+ IMG_HANDLE -+ hDevMemContext, -+ IMG_DEV_PHYADDR * -+ psPDDevPAddr); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie, -+ PVR3DIF4_CLIENT_INFO * -+ psClientInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO * psDevInfo, -+ SGX_MISC_INFO * psMiscInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle, -+ IMG_UINT32 ui32Reg, -+ IMG_UINT32 * pui32Old, -+ IMG_BOOL bNew, -+ IMG_UINT32 ui32New, -+ IMG_UINT32 ui32NewReset, -+ IMG_UINT32 ui32CountersReg, -+ IMG_UINT32 * pui32Time, -+ IMG_BOOL * pbActive, -+ PVRSRV_SGXDEV_DIFF_INFO * -+ psDiffs); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO * -+ psDevInfo, -+ PVRSRV_KERNEL_SYNC_INFO * -+ psSyncInfo, -+ IMG_BOOL bWaitForComplete); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, -+ SGX_BRIDGE_INFO_FOR_SRVINIT * -+ psInitInfo); -+ -+ IMG_IMPORT -+ PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDevHandle, -+ SGX_BRIDGE_INIT_INFO * psInitInfo); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDevCookie, -+ IMG_BOOL bLockOnFailure, -+ IMG_UINT32 ui32TotalPBSize, -+ IMG_HANDLE * phSharedPBDesc, -+ PVRSRV_KERNEL_MEM_INFO ** -+ ppsSharedPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO ** -+ ppsHWPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO ** -+ ppsBlockKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO *** -+ pppsSharedPBDescSubKernelMemInfos, -+ IMG_UINT32 * -+ ui32SharedPBDescSubKernelMemInfosCount); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDevCookie, -+ PVRSRV_KERNEL_MEM_INFO * -+ psSharedPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO * -+ psHWPBDescKernelMemInfo, -+ PVRSRV_KERNEL_MEM_INFO * psBlockKernelMemInfo, -+ IMG_UINT32 ui32TotalPBSize, -+ IMG_HANDLE * phSharedPBDesc, -+ PVRSRV_KERNEL_MEM_INFO ** -+ psSharedPBDescSubKernelMemInfos, -+ IMG_UINT32 -+ ui32SharedPBDescSubKernelMemInfosCount); -+ -+ IMG_IMPORT PVRSRV_ERROR -+ SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, -+ PVR3DIF4_INTERNAL_DEVINFO * -+ psSGXInternalDevInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxconfig.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxconfig.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxconfig.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxconfig.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,85 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __SGXCONFIG_H__ -+#define __SGXCONFIG_H__ -+ -+#ifndef SGX530 -+#error "sgxconfig.h: ERROR: unspecified SGX Core version" -+#endif -+ -+#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX -+#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D -+ -+#define DEV_MAJOR_VERSION 1 -+#define DEV_MINOR_VERSION 0 -+ -+ -+#define SGX_ADDRESS_SPACE_SIZE 28 -+ -+#define SGX_GENERAL_HEAP_BASE 0x00400000 -+#define SGX_GENERAL_HEAP_SIZE (0x05000000-0x00401000) -+ -+#define SGX_GENERAL_MAPPING_HEAP_BASE 0x05000000 -+#define SGX_GENERAL_MAPPING_HEAP_SIZE (0x06800000-0x05001000) -+ -+#define SGX_FB_MAPPING_HEAP_BASE 0x06800000 -+#define SGX_FB_MAPPING_HEAP_SIZE (0x07000000-0x06801000) -+ -+#define SGX_TADATA_HEAP_BASE 0x07000000 -+#define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000) -+ -+#define SGX_3DPARAMETERS_HEAP_BASE 0x08000000 -+#define SGX_3DPARAMETERS_HEAP_SIZE (0x04000000-0x00001000) -+ -+#define SGX_ALT_MAPPING_HEAP_BASE (0x0C000000) -+#define SGX_ALT_MAPPING_HEAP_SIZE (0x0D000000 - 0x0C001000) -+ -+#define SGX_PIXELSHADER_HEAP_BASE 0x0D000000 -+#define SGX_PIXELSHADER_HEAP_SIZE 0x00500000 -+ -+#define SGX_VERTEXSHADER_HEAP_BASE 0x0D800000 -+#define SGX_VERTEXSHADER_HEAP_SIZE 0x00200000 -+ -+#define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0E000000 -+#define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000) -+ -+#define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000 -+#define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000) -+ -+#define SGX_KERNEL_CODE_HEAP_BASE 0x0F000000 -+#define SGX_KERNEL_CODE_HEAP_SIZE 0x00080000 -+ -+#define SGX_VIDEO_CODE_HEAP_BASE 0x0F400000 -+#define SGX_VIDEO_CODE_HEAP_SIZE 0x00080000 -+ -+#define SGX_KERNEL_VIDEO_DATA_HEAP_BASE 0x0F800000 -+#define SGX_KERNEL_VIDEO_DATA_HEAP_SIZE (0x00400000-0x00001000) -+ -+#define SGX_SYNCINFO_HEAP_BASE 0x0FC00000 -+#define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxcoretypes.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxcoretypes.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxcoretypes.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxcoretypes.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,41 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _SGXCORETYPES_KM_H_ -+#define _SGXCORETYPES_KM_H_ -+ -+typedef enum { -+ SGX_CORE_ID_INVALID = 0, -+ SGX_CORE_ID_530 = 2, -+ SGX_UNUSED = 3, -+} SGX_CORE_ID_TYPE; -+ -+typedef struct _SGX_CORE_INFO { -+ SGX_CORE_ID_TYPE eID; -+ IMG_UINT32 uiRev; -+} SGX_CORE_INFO, *PSGX_CORE_INFO; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxdefs.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxdefs.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxdefs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxdefs.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,34 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _SGXDEFS_H_ -+#define _SGXDEFS_H_ -+ -+#include "sgx530defs.h" -+#include "sgxerrata.h" -+#include "sgxfeaturedefs.h" -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxerrata.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxerrata.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxerrata.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxerrata.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,51 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _SGXERRATA_KM_H_ -+#define _SGXERRATA_KM_H_ -+ -+#if SGX_CORE_REV == 103 -+#else -+#if SGX_CORE_REV == 110 -+#else -+#if SGX_CORE_REV == 111 -+#else -+#if SGX_CORE_REV == 120 -+#else -+#if SGX_CORE_REV == 121 -+#else -+#if SGX_CORE_REV == 125 -+ -+#else -+#error "sgxerrata.h: SGX530 Core Revision unspecified" -+#endif -+#endif -+#endif -+#endif -+#endif -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxfeaturedefs.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxfeaturedefs.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxfeaturedefs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxfeaturedefs.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,38 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if defined(SGX530) -+#define SGX_CORE_FRIENDLY_NAME "SGX530" -+#define SGX_CORE_ID SGX_CORE_ID_530 -+#define SGX_FEATURE_ADDRESS_SPACE_SIZE 28 -+#define SGX_FEATURE_AUTOCLOCKGATING -+#else -+#error Unsupported SGX core -+#endif -+ -+#include "img_types.h" -+ -+#include "sgxcoretypes.h" -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxinfo.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxinfo.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxinfo.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxinfo.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,309 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined (__SGXINFO_H__) -+#define __SGXINFO_H__ -+ -+#include "sgxscript.h" -+ -+#include "servicesint.h" -+ -+#include "services.h" -+#include "sgxapi_km.h" -+ -+#define SGX_MAX_DEV_DATA 24 -+#define SGX_MAX_INIT_MEM_HANDLES 16 -+ -+typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT { -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; -+} SGX_BRIDGE_INFO_FOR_SRVINIT; -+ -+typedef struct _SGX_BRIDGE_INIT_INFO_ { -+ IMG_HANDLE hKernelCCBMemInfo; -+ IMG_HANDLE hKernelCCBCtlMemInfo; -+ IMG_HANDLE hKernelCCBEventKickerMemInfo; -+ IMG_HANDLE hKernelSGXHostCtlMemInfo; -+ IMG_UINT32 ui32TAKickAddress; -+ IMG_UINT32 ui32VideoHandlerAddress; -+ IMG_HANDLE hKernelHWPerfCBMemInfo; -+ -+ IMG_UINT32 ui32EDMTaskReg0; -+ IMG_UINT32 ui32EDMTaskReg1; -+ -+ IMG_UINT32 ui32ClkGateCtl; -+ IMG_UINT32 ui32ClkGateCtl2; -+ IMG_UINT32 ui32ClkGateStatusMask; -+ -+ IMG_UINT32 ui32CacheControl; -+ -+ IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA]; -+ IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES]; -+ -+ SGX_INIT_SCRIPTS sScripts; -+ -+} SGX_BRIDGE_INIT_INFO; -+ -+typedef struct _PVRSRV_SGX_COMMAND_ { -+ IMG_UINT32 ui32ServiceAddress; -+ IMG_UINT32 ui32Data[7]; -+} PVRSRV_SGX_COMMAND; -+ -+typedef struct _PVRSRV_SGX_KERNEL_CCB_ { -+ PVRSRV_SGX_COMMAND asCommands[256]; -+} PVRSRV_SGX_KERNEL_CCB; -+ -+typedef struct _PVRSRV_SGX_CCB_CTL_ { -+ IMG_UINT32 ui32WriteOffset; -+ IMG_UINT32 ui32ReadOffset; -+} PVRSRV_SGX_CCB_CTL; -+ -+#define SGX_AUXCCBFLAGS_SHARED 0x00000001 -+ -+typedef enum _PVRSRV_SGX_COMMAND_TYPE_ { -+ PVRSRV_SGX_COMMAND_EDM_KICK = 0, -+ PVRSRV_SGX_COMMAND_VIDEO_KICK = 1, -+ -+ PVRSRV_SGX_COMMAND_FORCE_I32 = 0xFFFFFFFF, -+ -+} PVRSRV_SGX_COMMAND_TYPE; -+ -+#define PVRSRV_CCBFLAGS_RASTERCMD 0x1 -+#define PVRSRV_CCBFLAGS_TRANSFERCMD 0x2 -+#define PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD 0x3 -+ -+#define SGX_BIF_INVALIDATE_PTCACHE 0x1 -+#define SGX_BIF_INVALIDATE_PDCACHE 0x2 -+ -+typedef struct _PVR3DIF4_CCB_KICK_ { -+ PVRSRV_SGX_COMMAND_TYPE eCommand; -+ PVRSRV_SGX_COMMAND sCommand; -+ IMG_HANDLE hCCBKernelMemInfo; -+ IMG_HANDLE hRenderSurfSyncInfo; -+ -+ IMG_UINT32 ui32NumTAStatusVals; -+ IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS]; -+ -+ IMG_UINT32 ui32Num3DStatusVals; -+ IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS]; -+ -+ IMG_BOOL bFirstKickOrResume; -+#ifdef PDUMP -+ IMG_BOOL bTerminateOrAbort; -+#endif -+ IMG_BOOL bKickRender; -+ -+ IMG_UINT32 ui32CCBOffset; -+ -+ IMG_UINT32 ui32NumSrcSyncs; -+ IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS]; -+ -+ IMG_BOOL bTADependency; -+ IMG_HANDLE hTA3DSyncInfo; -+ -+ IMG_HANDLE hTASyncInfo; -+ IMG_HANDLE h3DSyncInfo; -+#if defined(PDUMP) -+ IMG_UINT32 ui32CCBDumpWOff; -+#endif -+} PVR3DIF4_CCB_KICK; -+ -+#define SGX_VIDEO_USE_CODE_BASE_INDEX 14 -+#define SGX_KERNEL_USE_CODE_BASE_INDEX 15 -+ -+typedef struct _PVRSRV_SGX_HOST_CTL_ { -+ -+ volatile IMG_UINT32 ui32PowManFlags; -+ IMG_UINT32 ui32uKernelDetectedLockups; -+ IMG_UINT32 ui32HostDetectedLockups; -+ IMG_UINT32 ui32HWRecoverySampleRate; -+ IMG_UINT32 ui32ActivePowManSampleRate; -+ IMG_UINT32 ui32InterruptFlags; -+ IMG_UINT32 ui32InterruptClearFlags; -+ -+ IMG_UINT32 ui32ResManFlags; -+ IMG_DEV_VIRTADDR sResManCleanupData; -+ -+ IMG_DEV_VIRTADDR sTAHWPBDesc; -+ IMG_DEV_VIRTADDR s3DHWPBDesc; -+ IMG_DEV_VIRTADDR sHostHWPBDesc; -+ -+ IMG_UINT32 ui32NumActivePowerEvents; -+ -+ IMG_UINT32 ui32HWPerfFlags; -+ -+ IMG_UINT32 ui32TimeWraps; -+} PVRSRV_SGX_HOST_CTL; -+ -+typedef struct _PVR3DIF4_CLIENT_INFO_ { -+ IMG_UINT32 ui32ProcessID; -+ IMG_VOID *pvProcess; -+ PVRSRV_MISC_INFO sMiscInfo; -+ -+ IMG_UINT32 asDevData[SGX_MAX_DEV_DATA]; -+ -+} PVR3DIF4_CLIENT_INFO; -+ -+typedef struct _PVR3DIF4_INTERNAL_DEVINFO_ { -+ IMG_UINT32 ui32Flags; -+ IMG_HANDLE hCtlKernelMemInfoHandle; -+ IMG_BOOL bForcePTOff; -+} PVR3DIF4_INTERNAL_DEVINFO; -+ -+typedef struct _PVRSRV_SGX_SHARED_CCB_ { -+ PVRSRV_CLIENT_MEM_INFO *psCCBClientMemInfo; -+ PVRSRV_CLIENT_MEM_INFO *psCCBCtlClientMemInfo; -+ IMG_UINT32 *pui32CCBLinAddr; -+ IMG_DEV_VIRTADDR sCCBDevAddr; -+ IMG_UINT32 *pui32WriteOffset; -+ volatile IMG_UINT32 *pui32ReadOffset; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32AllocGran; -+ -+#ifdef PDUMP -+ IMG_UINT32 ui32CCBDumpWOff; -+#endif -+} PVRSRV_SGX_SHARED_CCB; -+ -+typedef struct _PVRSRV_SGX_CCB_ { -+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo; -+ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo; -+ IMG_PUINT32 pui32CCBLinAddr; -+ IMG_DEV_VIRTADDR sCCBDevAddr; -+ IMG_UINT32 *pui32WriteOffset; -+ volatile IMG_UINT32 *pui32ReadOffset; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32AllocGran; -+ -+#ifdef PDUMP -+ IMG_UINT32 ui32CCBDumpWOff; -+#endif -+} PVRSRV_SGX_CCB; -+ -+typedef struct _CTL_STATUS_ { -+ IMG_DEV_VIRTADDR sStatusDevAddr; -+ IMG_UINT32 ui32StatusValue; -+} CTL_STATUS, *PCTL_STATUS; -+ -+#define SGXTQ_MAX_STATUS SGX_MAX_TRANSFER_STATUS_VALS + 2 -+typedef struct _PVR3DIF4_CMDTA_SHARED_ { -+ IMG_UINT32 ui32NumTAStatusVals; -+ IMG_UINT32 ui32Num3DStatusVals; -+ -+ IMG_UINT32 ui32WriteOpsPendingVal; -+ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; -+ IMG_UINT32 ui32ReadOpsPendingVal; -+ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; -+ -+ IMG_UINT32 ui32TQSyncWriteOpsPendingVal; -+ IMG_DEV_VIRTADDR sTQSyncWriteOpsCompleteDevVAddr; -+ IMG_UINT32 ui32TQSyncReadOpsPendingVal; -+ IMG_DEV_VIRTADDR sTQSyncReadOpsCompleteDevVAddr; -+ -+ IMG_UINT32 ui323DTQSyncWriteOpsPendingVal; -+ IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr; -+ IMG_UINT32 ui323DTQSyncReadOpsPendingVal; -+ IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr; -+ -+ IMG_UINT32 ui32NumSrcSyncs; -+ PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS]; -+ -+ CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS]; -+ CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS]; -+ -+ PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependancy; -+ -+} PVR3DIF4_CMDTA_SHARED; -+ -+typedef struct _PVR3DIF4_TRANSFERCMD_SHARED_ { -+ -+ IMG_UINT32 ui32SrcReadOpPendingVal; -+ IMG_DEV_VIRTADDR sSrcReadOpsCompleteDevAddr; -+ -+ IMG_UINT32 ui32SrcWriteOpPendingVal; -+ IMG_DEV_VIRTADDR sSrcWriteOpsCompleteDevAddr; -+ -+ IMG_UINT32 ui32DstReadOpPendingVal; -+ IMG_DEV_VIRTADDR sDstReadOpsCompleteDevAddr; -+ -+ IMG_UINT32 ui32DstWriteOpPendingVal; -+ IMG_DEV_VIRTADDR sDstWriteOpsCompleteDevAddr; -+ -+ IMG_UINT32 ui32TASyncWriteOpsPendingVal; -+ IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr; -+ IMG_UINT32 ui32TASyncReadOpsPendingVal; -+ IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr; -+ -+ IMG_UINT32 ui323DSyncWriteOpsPendingVal; -+ IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr; -+ IMG_UINT32 ui323DSyncReadOpsPendingVal; -+ IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr; -+ -+ IMG_UINT32 ui32NumStatusVals; -+ CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS]; -+ -+ IMG_UINT32 ui32NumSrcSync; -+ IMG_UINT32 ui32NumDstSync; -+ -+ IMG_DEV_VIRTADDR sSrcWriteOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS]; -+ IMG_DEV_VIRTADDR sSrcReadOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS]; -+ -+ IMG_DEV_VIRTADDR sDstWriteOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS]; -+ IMG_DEV_VIRTADDR sDstReadOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS]; -+} PVR3DIF4_TRANSFERCMD_SHARED, *PPVR3DIF4_TRANSFERCMD_SHARED; -+ -+typedef struct _PVRSRV_TRANSFER_SGX_KICK_ { -+ IMG_HANDLE hCCBMemInfo; -+ IMG_UINT32 ui32SharedCmdCCBOffset; -+ -+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr; -+ -+ IMG_HANDLE hTASyncInfo; -+ IMG_HANDLE h3DSyncInfo; -+ -+ IMG_UINT32 ui32NumSrcSync; -+ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; -+ -+ IMG_UINT32 ui32NumDstSync; -+ IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; -+ -+ IMG_UINT32 ui32StatusFirstSync; -+ -+#if defined(PDUMP) -+ IMG_UINT32 ui32CCBDumpWOff; -+#endif -+} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK; -+ -+ -+#define PVRSRV_SGX_DIFF_NUM_COUNTERS 9 -+ -+typedef struct _PVRSRV_SGXDEV_DIFF_INFO_ { -+ IMG_UINT32 aui32Counters[PVRSRV_SGX_DIFF_NUM_COUNTERS]; -+ IMG_UINT32 ui32Time[2]; -+ IMG_UINT32 ui32Marker[2]; -+} PVRSRV_SGXDEV_DIFF_INFO, *PPVRSRV_SGXDEV_DIFF_INFO; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxinfokm.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxinfokm.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxinfokm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxinfokm.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,206 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __SGXINFOKM_H__ -+#define __SGXINFOKM_H__ -+ -+#include "sgxdefs.h" -+#include "device.h" -+#include "sysconfig.h" -+#include "sgxscript.h" -+#include "sgxinfo.h" -+ -+ -+#define SGX_HOSTPORT_PRESENT 0x00000001UL -+ -+#define PVRSRV_USSE_EDM_POWMAN_IDLE_REQUEST (1UL << 0) -+#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST (1UL << 1) -+#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2) -+#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3) -+#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4) -+#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5) -+ -+#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0) -+#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1) -+ -+#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST 0x01UL -+#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST 0x02UL -+#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST 0x04UL -+#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_2DC_REQUEST 0x08UL -+#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE 0x10UL -+#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD 0x20UL -+#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT 0x40UL -+ -+ typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO; -+ -+ typedef struct _PVRSRV_SGXDEV_INFO_ { -+ PVRSRV_DEVICE_TYPE eDeviceType; -+ PVRSRV_DEVICE_CLASS eDeviceClass; -+ -+ IMG_UINT8 ui8VersionMajor; -+ IMG_UINT8 ui8VersionMinor; -+ IMG_UINT32 ui32CoreConfig; -+ IMG_UINT32 ui32CoreFlags; -+ -+ IMG_PVOID pvRegsBaseKM; -+ -+ IMG_HANDLE hRegMapping; -+ -+ IMG_SYS_PHYADDR sRegsPhysBase; -+ -+ IMG_UINT32 ui32RegSize; -+ -+ IMG_UINT32 ui32CoreClockSpeed; -+ IMG_UINT32 ui32uKernelTimerClock; -+ -+ -+ IMG_VOID *psStubPBDescListKM; -+ -+ IMG_DEV_PHYADDR sKernelPDDevPAddr; -+ -+ IMG_VOID *pvDeviceMemoryHeap; -+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo; -+ PVRSRV_SGX_KERNEL_CCB *psKernelCCB; -+ PPVRSRV_SGX_CCB_INFO psKernelCCBInfo; -+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo; -+ PVRSRV_SGX_CCB_CTL *psKernelCCBCtl; -+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; -+ IMG_UINT32 *pui32KernelCCBEventKicker; -+ IMG_UINT32 ui32TAKickAddress; -+ IMG_UINT32 ui32TexLoadKickAddress; -+ IMG_UINT32 ui32VideoHandlerAddress; -+ IMG_UINT32 ui32KickTACounter; -+ IMG_UINT32 ui32KickTARenderCounter; -+ PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo; -+ PVRSRV_SGXDEV_DIFF_INFO sDiffInfo; -+ IMG_UINT32 ui32HWGroupRequested; -+ IMG_UINT32 ui32HWReset; -+ -+ IMG_UINT32 ui32ClientRefCount; -+ -+ IMG_UINT32 ui32CacheControl; -+ -+ IMG_VOID *pvMMUContextList; -+ -+ IMG_BOOL bForcePTOff; -+ -+ IMG_UINT32 ui32EDMTaskReg0; -+ IMG_UINT32 ui32EDMTaskReg1; -+ -+ IMG_UINT32 ui32ClkGateCtl; -+ IMG_UINT32 ui32ClkGateCtl2; -+ IMG_UINT32 ui32ClkGateStatusMask; -+ SGX_INIT_SCRIPTS sScripts; -+ -+ IMG_HANDLE hBIFResetPDOSMemHandle; -+ IMG_DEV_PHYADDR sBIFResetPDDevPAddr; -+ IMG_DEV_PHYADDR sBIFResetPTDevPAddr; -+ IMG_DEV_PHYADDR sBIFResetPageDevPAddr; -+ IMG_UINT32 *pui32BIFResetPD; -+ IMG_UINT32 *pui32BIFResetPT; -+ -+ -+ IMG_HANDLE hTimer; -+ -+ IMG_UINT32 ui32TimeStamp; -+ -+ IMG_UINT32 ui32NumResets; -+ -+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo; -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl; -+ -+ IMG_UINT32 ui32Flags; -+ -+#if defined(PDUMP) -+ PVRSRV_SGX_PDUMP_CONTEXT sPDContext; -+#endif -+ -+ -+ IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA]; -+ -+ } PVRSRV_SGXDEV_INFO; -+ -+ typedef struct _SGX_TIMING_INFORMATION_ { -+ IMG_UINT32 ui32CoreClockSpeed; -+ IMG_UINT32 ui32HWRecoveryFreq; -+ IMG_UINT32 ui32ActivePowManLatencyms; -+ IMG_UINT32 ui32uKernelFreq; -+ } SGX_TIMING_INFORMATION; -+ -+ typedef struct _SGX_DEVICE_MAP_ { -+ IMG_UINT32 ui32Flags; -+ -+ IMG_SYS_PHYADDR sRegsSysPBase; -+ IMG_CPU_PHYADDR sRegsCpuPBase; -+ IMG_CPU_VIRTADDR pvRegsCpuVBase; -+ IMG_UINT32 ui32RegsSize; -+ -+ IMG_SYS_PHYADDR sSPSysPBase; -+ IMG_CPU_PHYADDR sSPCpuPBase; -+ IMG_CPU_VIRTADDR pvSPCpuVBase; -+ IMG_UINT32 ui32SPSize; -+ -+ IMG_SYS_PHYADDR sLocalMemSysPBase; -+ IMG_DEV_PHYADDR sLocalMemDevPBase; -+ IMG_CPU_PHYADDR sLocalMemCpuPBase; -+ IMG_UINT32 ui32LocalMemSize; -+ -+ IMG_UINT32 ui32IRQ; -+ -+ } SGX_DEVICE_MAP; -+ -+ typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; -+ struct _PVRSRV_STUB_PBDESC_ { -+ IMG_UINT32 ui32RefCount; -+ IMG_UINT32 ui32TotalPBSize; -+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; -+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; -+ PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos; -+ IMG_UINT32 ui32SubKernelMemInfosCount; -+ IMG_HANDLE hDevCookie; -+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; -+ PVRSRV_STUB_PBDESC *psNext; -+ }; -+ -+ typedef struct _PVRSRV_SGX_CCB_INFO_ { -+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo; -+ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo; -+ PVRSRV_SGX_COMMAND *psCommands; -+ IMG_UINT32 *pui32WriteOffset; -+ volatile IMG_UINT32 *pui32ReadOffset; -+#if defined(PDUMP) -+ IMG_UINT32 ui32CCBDumpWOff; -+#endif -+ } PVRSRV_SGX_CCB_INFO; -+ -+ PVRSRV_ERROR SGXRegisterDevice(PVRSRV_DEVICE_NODE * psDeviceNode); -+ -+ IMG_VOID SGXOSTimer(IMG_VOID * pvData); -+ -+ IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION * -+ psSGXTimingInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxinit.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxinit.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxinit.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxinit.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,1621 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+ -+#include "sgxdefs.h" -+#include "sgxmmu.h" -+#include "services_headers.h" -+#include "buffer_manager.h" -+#include "sgxapi_km.h" -+#include "sgxinfo.h" -+#include "sgxinfokm.h" -+#include "sgxconfig.h" -+#include "sysconfig.h" -+#include "pvr_bridge_km.h" -+ -+#include "pdump_km.h" -+#include "ra.h" -+#include "mmu.h" -+#include "handle.h" -+#include "perproc.h" -+ -+#include "sgxutils.h" -+ -+ -+IMG_BOOL SGX_ISRHandler(IMG_VOID * pvData); -+ -+IMG_UINT32 gui32EventStatusServicesByISR = 0; -+ -+IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO * psDevInfo, IMG_UINT32 ui32PDUMPFlags); -+ -+static PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO * psDevInfo, -+ IMG_BOOL bHardwareRecovery); -+PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie); -+ -+typedef enum _PVR_DEVICE_POWER_STATE_ { -+ PVR_DEVICE_POWER_STATE_ON = 0, -+ PVR_DEVICE_POWER_STATE_IDLE = 1, -+ PVR_DEVICE_POWER_STATE_OFF = 2, -+ -+ PVR_DEVICE_POWER_STATE_FORCE_I32 = 0x7fffffff -+} PVR_DEVICE_POWER_STATE, *PPVR_DEVICE_POWER_STATE; -+ -+static PVR_DEVICE_POWER_STATE MapDevicePowerState(PVR_POWER_STATE ePowerState) -+{ -+ PVR_DEVICE_POWER_STATE eDevicePowerState; -+ -+ switch (ePowerState) { -+ case PVRSRV_POWER_STATE_D0: -+ { -+ eDevicePowerState = PVR_DEVICE_POWER_STATE_ON; -+ break; -+ } -+ case PVRSRV_POWER_STATE_D3: -+ { -+ eDevicePowerState = PVR_DEVICE_POWER_STATE_OFF; -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "MapDevicePowerState: Invalid state: %ld", -+ ePowerState)); -+ eDevicePowerState = PVR_DEVICE_POWER_STATE_FORCE_I32; -+ PVR_ASSERT(eDevicePowerState != -+ PVR_DEVICE_POWER_STATE_FORCE_I32); -+ } -+ } -+ -+ return eDevicePowerState; -+} -+ -+static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ SGXScheduleProcessQueues(psDeviceNode); -+} -+ -+static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO * psDevInfo) -+{ -+ if (psDevInfo->psKernelCCBInfo != IMG_NULL) { -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), -+ psDevInfo->psKernelCCBInfo, IMG_NULL); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ SGX_BRIDGE_INIT_INFO * psInitInfo) -+{ -+ PVRSRV_SGXDEV_INFO *psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ -+ PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psPerProc); -+ psDevInfo->sScripts = psInitInfo->sScripts; -+ -+ psDevInfo->psKernelCCBMemInfo = -+ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelCCBMemInfo; -+ psDevInfo->psKernelCCB = -+ (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo-> -+ pvLinAddrKM; -+ -+ psDevInfo->psKernelCCBCtlMemInfo = -+ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelCCBCtlMemInfo; -+ psDevInfo->psKernelCCBCtl = -+ (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo-> -+ pvLinAddrKM; -+ -+ psDevInfo->psKernelCCBEventKickerMemInfo = -+ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelCCBEventKickerMemInfo; -+ psDevInfo->pui32KernelCCBEventKicker = -+ (IMG_UINT32 *) psDevInfo->psKernelCCBEventKickerMemInfo-> -+ pvLinAddrKM; -+ -+ psDevInfo->psKernelSGXHostCtlMemInfo = -+ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelSGXHostCtlMemInfo; -+ psDevInfo->psSGXHostCtl = -+ (PVRSRV_SGX_HOST_CTL *) psDevInfo->psKernelSGXHostCtlMemInfo-> -+ pvLinAddrKM; -+ -+ psDevInfo->psKernelHWPerfCBMemInfo = -+ (PVRSRV_KERNEL_MEM_INFO *) psInitInfo->hKernelHWPerfCBMemInfo; -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(PVRSRV_SGX_CCB_INFO), -+ (IMG_VOID **) & psKernelCCBInfo, 0); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "InitDevInfo: Failed to alloc memory")); -+ goto failed_allockernelccb; -+ } -+ -+ OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO)); -+ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo; -+ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo; -+ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands; -+ psKernelCCBInfo->pui32WriteOffset = -+ &psDevInfo->psKernelCCBCtl->ui32WriteOffset; -+ psKernelCCBInfo->pui32ReadOffset = -+ &psDevInfo->psKernelCCBCtl->ui32ReadOffset; -+ psDevInfo->psKernelCCBInfo = psKernelCCBInfo; -+ -+ psDevInfo->ui32TAKickAddress = psInitInfo->ui32TAKickAddress; -+ -+ psDevInfo->ui32VideoHandlerAddress = -+ psInitInfo->ui32VideoHandlerAddress; -+ -+ psDevInfo->bForcePTOff = IMG_FALSE; -+ -+ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl; -+ -+ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0; -+ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1; -+ psDevInfo->ui32ClkGateCtl = psInitInfo->ui32ClkGateCtl; -+ psDevInfo->ui32ClkGateCtl2 = psInitInfo->ui32ClkGateCtl2; -+ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask; -+ -+ OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, -+ sizeof(psDevInfo->asSGXDevData)); -+ -+ return PVRSRV_OK; -+ -+failed_allockernelccb: -+ DeinitDevInfo(psDevInfo); -+ -+ return eError; -+} -+ -+static IMG_VOID SGXGetTimingInfo(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ SGX_TIMING_INFORMATION sSGXTimingInfo = { 0 }; -+ IMG_UINT32 ui32ActivePowManSampleRate; -+ SGX_TIMING_INFORMATION *psSGXTimingInfo; -+ -+ psSGXTimingInfo = &sSGXTimingInfo; -+ SysGetSGXTimingInformation(psSGXTimingInfo); -+ -+ { -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32OlduKernelFreq; -+ -+ if (psDevInfo->hTimer != IMG_NULL) { -+ ui32OlduKernelFreq = -+ psDevInfo->ui32CoreClockSpeed / -+ psDevInfo->ui32uKernelTimerClock; -+ if (ui32OlduKernelFreq != -+ psSGXTimingInfo->ui32uKernelFreq) { -+ eError = OSRemoveTimer(psDevInfo->hTimer); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXGetTimingInfo: Failed to remove timer")); -+ } -+ psDevInfo->hTimer = IMG_NULL; -+ } -+ } -+ if (psDevInfo->hTimer == IMG_NULL) { -+ -+ /* -+ * the magic calculation below sets the hardware lock-up -+ * detection and recovery timer interval to ~150msecs -+ */ -+ psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode, -+ 1000 * 150 / -+ psSGXTimingInfo-> -+ ui32uKernelFreq); -+ if (psDevInfo->hTimer == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXGetTimingInfo : Failed to register timer callback function")); -+ } -+ } -+ -+ psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate = -+ psSGXTimingInfo->ui32uKernelFreq / -+ psSGXTimingInfo->ui32HWRecoveryFreq; -+ } -+ -+ psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed; -+ psDevInfo->ui32uKernelTimerClock = -+ psSGXTimingInfo->ui32CoreClockSpeed / -+ psSGXTimingInfo->ui32uKernelFreq; -+ -+ ui32ActivePowManSampleRate = -+ psSGXTimingInfo->ui32uKernelFreq * -+ psSGXTimingInfo->ui32ActivePowManLatencyms / 1000; -+ ui32ActivePowManSampleRate += 1; -+ psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = -+ ui32ActivePowManSampleRate; -+} -+ -+static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO * psDevInfo, -+ IMG_BOOL bStartOSTimer) -+{ -+ IMG_UINT32 ui32RegVal; -+ -+ -+ ui32RegVal = -+ EUR_CR_EVENT_TIMER_ENABLE_MASK | psDevInfo->ui32uKernelTimerClock; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_TIMER, ui32RegVal); -+ PDUMPREGWITHFLAGS(EUR_CR_EVENT_TIMER, ui32RegVal, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ if (bStartOSTimer) { -+ PVRSRV_ERROR eError; -+ eError = OSEnableTimer(psDevInfo->hTimer); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXStartTimer : Failed to enable host timer")); -+ } -+ } -+} -+ -+static PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle, -+ PVR_DEVICE_POWER_STATE eNewPowerState, -+ PVR_DEVICE_POWER_STATE eCurrentPowerState) -+{ -+ if ((eNewPowerState != eCurrentPowerState) && -+ (eNewPowerState != PVR_DEVICE_POWER_STATE_ON)) { -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; -+ IMG_UINT32 ui32PowManRequest, ui32PowManComplete; -+ -+ eError = OSDisableTimer(psDevInfo->hTimer); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXPrePowerState: Failed to disable timer")); -+ return eError; -+ } -+ -+ if (eNewPowerState == PVR_DEVICE_POWER_STATE_OFF) { -+ ui32PowManRequest = -+ PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST; -+ ui32PowManComplete = -+ PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE; -+ PDUMPCOMMENT -+ ("TA/3D CCB Control - SGX power off request"); -+ } else { -+ ui32PowManRequest = PVRSRV_USSE_EDM_POWMAN_IDLE_REQUEST; -+ ui32PowManComplete = -+ PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE; -+ PDUMPCOMMENT("TA/3D CCB Control - SGX idle request"); -+ } -+ -+ psSGXHostCtl->ui32PowManFlags |= ui32PowManRequest; -+#if defined(PDUMP) -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags), -+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); -+#endif -+ -+ if (PollForValueKM(&psSGXHostCtl->ui32PowManFlags, -+ ui32PowManComplete, -+ ui32PowManComplete, -+ MAX_HW_TIME_US / WAIT_TRY_COUNT, -+ WAIT_TRY_COUNT) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXPrePowerState: Wait for SGX ukernel power transition failed.")); -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT -+ ("TA/3D CCB Control - Wait for power event on uKernel."); -+ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags), -+ ui32PowManComplete, ui32PowManComplete, -+ PDUMP_POLL_OPERATOR_EQUAL, IMG_FALSE, IMG_FALSE, -+ MAKEUNIQUETAG(psDevInfo-> -+ psKernelSGXHostCtlMemInfo)); -+#endif -+ -+ -+ { -+ if (PollForValueKM -+ ((IMG_UINT32 *) psDevInfo->pvRegsBaseKM + -+ (EUR_CR_CLKGATESTATUS >> 2), 0, -+ psDevInfo->ui32ClkGateStatusMask, -+ MAX_HW_TIME_US / WAIT_TRY_COUNT, -+ WAIT_TRY_COUNT) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXPrePowerState: Wait for SGX clock gating failed.")); -+ } -+ -+ PDUMPCOMMENT("Wait for SGX clock gating."); -+ PDUMPREGPOL(EUR_CR_CLKGATESTATUS, 0, -+ psDevInfo->ui32ClkGateStatusMask); -+ } -+ -+ if (eNewPowerState == PVR_DEVICE_POWER_STATE_OFF) { -+ eError = SGXDeinitialise(psDevInfo); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXPrePowerState: SGXDeinitialise failed: %lu", -+ eError)); -+ return eError; -+ } -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle, -+ PVR_DEVICE_POWER_STATE eNewPowerState, -+ PVR_DEVICE_POWER_STATE eCurrentPowerState) -+{ -+ if ((eNewPowerState != eCurrentPowerState) && -+ (eCurrentPowerState != PVR_DEVICE_POWER_STATE_ON)) { -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; -+ -+ psSGXHostCtl->ui32PowManFlags = 0; -+ PDUMPCOMMENT("TA/3D CCB Control - Reset Power Manager flags"); -+#if defined(PDUMP) -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags), -+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); -+#endif -+ -+ if (eCurrentPowerState == PVR_DEVICE_POWER_STATE_OFF) { -+ -+ SGXGetTimingInfo(psDeviceNode); -+ -+ eError = SGXInitialise(psDevInfo, IMG_FALSE); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXPostPowerState: SGXInitialise failed")); -+ return eError; -+ } -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR SGXPrePowerStateExt(IMG_HANDLE hDevHandle, -+ PVR_POWER_STATE eNewPowerState, -+ PVR_POWER_STATE eCurrentPowerState) -+{ -+ PVR_DEVICE_POWER_STATE eNewDevicePowerState = -+ MapDevicePowerState(eNewPowerState); -+ PVR_DEVICE_POWER_STATE eCurrentDevicePowerState = -+ MapDevicePowerState(eCurrentPowerState); -+ -+ return SGXPrePowerState(hDevHandle, eNewDevicePowerState, -+ eCurrentDevicePowerState); -+} -+ -+PVRSRV_ERROR SGXPostPowerStateExt(IMG_HANDLE hDevHandle, -+ PVR_POWER_STATE eNewPowerState, -+ PVR_POWER_STATE eCurrentPowerState) -+{ -+ PVRSRV_ERROR eError; -+ PVR_DEVICE_POWER_STATE eNewDevicePowerState = -+ MapDevicePowerState(eNewPowerState); -+ PVR_DEVICE_POWER_STATE eCurrentDevicePowerState = -+ MapDevicePowerState(eCurrentPowerState); -+ -+ eError = -+ SGXPostPowerState(hDevHandle, eNewDevicePowerState, -+ eCurrentDevicePowerState); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "SGXPostPowerState : SGX Power Transition from %d to %d OK", -+ eCurrentPowerState, eNewPowerState)); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle, -+ IMG_BOOL bIdleDevice, -+ PVR_POWER_STATE eCurrentPowerState) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ -+ if (eCurrentPowerState == PVRSRV_POWER_STATE_D0) { -+ if (bIdleDevice) { -+ -+ eError = -+ SGXPrePowerState(hDevHandle, -+ PVR_DEVICE_POWER_STATE_IDLE, -+ PVR_DEVICE_POWER_STATE_ON); -+ -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "SGXPreClockSpeedChange: SGX clock speed was %luHz", -+ psDevInfo->ui32CoreClockSpeed)); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle, -+ IMG_BOOL bIdleDevice, -+ PVR_POWER_STATE eCurrentPowerState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed); -+ -+ if (eCurrentPowerState == PVRSRV_POWER_STATE_D0) { -+ SGXGetTimingInfo(psDeviceNode); -+ if (bIdleDevice) { -+ eError = -+ SGXPostPowerState(hDevHandle, -+ PVR_DEVICE_POWER_STATE_ON, -+ PVR_DEVICE_POWER_STATE_IDLE); -+ -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ } -+ SGXStartTimer(psDevInfo, IMG_TRUE); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "SGXPostClockSpeedChange: SGX clock speed changed from %luHz to %luHz", -+ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed)); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO * psDevInfo, -+ SGX_INIT_COMMAND * psScript, -+ IMG_UINT32 ui32NumInitCommands) -+{ -+ IMG_UINT32 ui32PC; -+ SGX_INIT_COMMAND *psComm; -+ -+ for (ui32PC = 0, psComm = psScript; -+ ui32PC < ui32NumInitCommands; ui32PC++, psComm++) { -+ switch (psComm->eOp) { -+ case SGX_INIT_OP_WRITE_HW_REG: -+ { -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, -+ psComm->sWriteHWReg.ui32Offset, -+ psComm->sWriteHWReg.ui32Value); -+ PDUMPREG(psComm->sWriteHWReg.ui32Offset, -+ psComm->sWriteHWReg.ui32Value); -+ break; -+ } -+#if defined(PDUMP) -+ case SGX_INIT_OP_PDUMP_HW_REG: -+ { -+ PDUMPREG(psComm->sPDumpHWReg.ui32Offset, -+ psComm->sPDumpHWReg.ui32Value); -+ break; -+ } -+#endif -+ case SGX_INIT_OP_HALT: -+ { -+ return PVRSRV_OK; -+ } -+ case SGX_INIT_OP_ILLEGAL: -+ -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXRunScript: PC %d: Illegal command: %d", -+ ui32PC, psComm->eOp)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ } -+ -+ } -+ -+ return PVRSRV_ERROR_GENERIC;; -+} -+ -+static PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO * psDevInfo, -+ IMG_BOOL bHardwareRecovery) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32ReadOffset, ui32WriteOffset; -+ -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL, -+ psDevInfo->ui32ClkGateCtl); -+ PDUMPREGWITHFLAGS(EUR_CR_CLKGATECTL, psDevInfo->ui32ClkGateCtl, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS); -+ -+ -+ -+ *psDevInfo->pui32KernelCCBEventKicker = 0; -+#if defined(PDUMP) -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, -+ sizeof(*psDevInfo->pui32KernelCCBEventKicker), -+ PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); -+#endif -+ -+ psDevInfo->psSGXHostCtl->sTAHWPBDesc.uiAddr = 0; -+ psDevInfo->psSGXHostCtl->s3DHWPBDesc.uiAddr = 0; -+#if defined(PDUMP) -+ PDUMPCOMMENT(" CCB Control - Reset HW PBDesc records"); -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, sTAHWPBDesc), -+ sizeof(IMG_DEV_VIRTADDR), PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, s3DHWPBDesc), -+ sizeof(IMG_DEV_VIRTADDR), PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); -+#endif -+ -+ eError = -+ SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommands, -+ SGX_MAX_INIT_COMMANDS); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXInitialise: SGXRunScript failed (%d)", eError)); -+ return (PVRSRV_ERROR_GENERIC); -+ } -+ -+ SGXStartTimer(psDevInfo, !bHardwareRecovery); -+ -+ if (bHardwareRecovery) { -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = -+ (PVRSRV_SGX_HOST_CTL *) psDevInfo->psSGXHostCtl; -+ -+ if (PollForValueKM -+ ((volatile IMG_UINT32 *)(&psSGXHostCtl-> -+ ui32InterruptClearFlags), 0, -+ PVRSRV_USSE_EDM_INTERRUPT_HWR, -+ MAX_HW_TIME_US / WAIT_TRY_COUNT, 1000) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXInitialise: Wait for uKernel HW Recovery failed")); -+ return PVRSRV_ERROR_RETRY; -+ } -+ } -+ -+ for (ui32ReadOffset = psDevInfo->psKernelCCBCtl->ui32ReadOffset, -+ ui32WriteOffset = psDevInfo->psKernelCCBCtl->ui32WriteOffset; -+ ui32ReadOffset != ui32WriteOffset; -+ ui32ReadOffset = (ui32ReadOffset + 1) & 0xFF) { -+ *psDevInfo->pui32KernelCCBEventKicker = -+ (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_KICK, -+ EUR_CR_EVENT_KICK_NOW_MASK); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie) -+{ -+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie; -+ PVRSRV_ERROR eError; -+ -+ if (psDevInfo->pvRegsBaseKM == IMG_NULL) { -+ return PVRSRV_OK; -+ } -+ -+ eError = -+ SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, -+ SGX_MAX_DEINIT_COMMANDS); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXDeinitialise: SGXRunScript failed (%d)", eError)); -+ return (PVRSRV_ERROR_GENERIC); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR DevInitSGXPart1(IMG_VOID * pvDeviceNode) -+{ -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ IMG_HANDLE hKernelDevMemContext; -+ IMG_DEV_PHYADDR sPDDevPAddr; -+ IMG_UINT32 i; -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *) pvDeviceNode; -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = -+ psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; -+ IMG_HANDLE hDevInfoOSMemHandle = (IMG_HANDLE) IMG_NULL; -+ PVRSRV_ERROR eError; -+ -+ PDUMPCOMMENT("SGX Initialisation Part 1"); -+ -+ PDUMPCOMMENT("SGX Core Version Information: %s", -+ SGX_CORE_FRIENDLY_NAME); -+#ifdef SGX_CORE_REV -+ PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV); -+#else -+ PDUMPCOMMENT("SGX Core Revision Information: head rtl"); -+#endif -+ -+ if (OSAllocPages -+ (PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_HAP_MULTI_PROCESS | -+ PVRSRV_HAP_CACHED, sizeof(PVRSRV_SGXDEV_INFO), -+ (IMG_VOID **) & psDevInfo, &hDevInfoOSMemHandle) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart1 : Failed to alloc memory for DevInfo")); -+ return (PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ OSMemSet(psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO)); -+ -+ psDevInfo->eDeviceType = DEV_DEVICE_TYPE; -+ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS; -+ -+ psDeviceNode->pvDevice = (IMG_PVOID) psDevInfo; -+ psDeviceNode->hDeviceOSMemHandle = hDevInfoOSMemHandle; -+ -+ psDevInfo->pvDeviceMemoryHeap = (IMG_VOID *) psDeviceMemoryHeap; -+ -+ hKernelDevMemContext = BM_CreateContext(psDeviceNode, -+ &sPDDevPAddr, -+ IMG_NULL, IMG_NULL); -+ -+ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr; -+ -+ for (i = 0; i < psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++) { -+ IMG_HANDLE hDevMemHeap; -+ -+ switch (psDeviceMemoryHeap[i].DevMemHeapType) { -+ case DEVICE_MEMORY_HEAP_KERNEL: -+ case DEVICE_MEMORY_HEAP_SHARED: -+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: -+ { -+ hDevMemHeap = -+ BM_CreateHeap(hKernelDevMemContext, -+ &psDeviceMemoryHeap[i]); -+ -+ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap; -+ break; -+ } -+ } -+ } -+ -+ eError = MMU_BIFResetPDAlloc(psDevInfo); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGX : Failed to alloc memory for BIF reset")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, -+ SGX_BRIDGE_INFO_FOR_SRVINIT * -+ psInitInfo) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ PVRSRV_ERROR eError; -+ -+ PDUMPCOMMENT("SGXGetInfoForSrvinit"); -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevHandle; -+ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ -+ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr; -+ -+ eError = -+ PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", -+ eError)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA * psPerProc, -+ IMG_HANDLE hDevHandle, -+ SGX_BRIDGE_INIT_INFO * psInitInfo) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ PVRSRV_ERROR eError; -+ SGX_DEVICE_MAP *psSGXDeviceMap; -+ PVR_POWER_STATE eDefaultPowerState; -+ -+ PDUMPCOMMENT("SGX Initialisation Part 2"); -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevHandle; -+ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ -+ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart2KM: Failed to load EDM program")); -+ goto failed_init_dev_info; -+ } -+ -+ -+ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID **) & psSGXDeviceMap); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart2KM: Failed to get device memory map!")); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ if (psSGXDeviceMap->pvRegsCpuVBase) { -+ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase; -+ } else { -+ -+ psDevInfo->pvRegsBaseKM = -+ OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase, -+ psSGXDeviceMap->ui32RegsSize, -+ PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_UNCACHED, -+ IMG_NULL); -+ if (!psDevInfo->pvRegsBaseKM) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart2KM: Failed to map in regs\n")); -+ return PVRSRV_ERROR_BAD_MAPPING; -+ } -+ } -+ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize; -+ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase; -+ -+ -+ -+ psDeviceNode->pvISRData = psDeviceNode; -+ -+ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler); -+ -+ -+ -+ psDevInfo->psSGXHostCtl->ui32PowManFlags |= -+ PVRSRV_USSE_EDM_POWMAN_NO_WORK; -+ eDefaultPowerState = PVRSRV_POWER_STATE_D3; -+ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex, -+ SGXPrePowerStateExt, -+ SGXPostPowerStateExt, -+ SGXPreClockSpeedChange, -+ SGXPostClockSpeedChange, -+ (IMG_HANDLE) psDeviceNode, -+ PVRSRV_POWER_STATE_D3, -+ eDefaultPowerState); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitSGXPart2KM: failed to register device with power manager")); -+ return eError; -+ } -+ -+ OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB)); -+ OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL)); -+ OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, -+ sizeof(*psDevInfo->pui32KernelCCBEventKicker)); -+ PDUMPCOMMENT("Kernel CCB"); -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, -+ sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo)); -+ PDUMPCOMMENT("Kernel CCB Control"); -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, -+ sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo)); -+ PDUMPCOMMENT("Kernel CCB Event Kicker"); -+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, -+ sizeof(*psDevInfo->pui32KernelCCBEventKicker), -+ PDUMP_FLAGS_CONTINUOUS, -+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); -+ -+ return PVRSRV_OK; -+ -+failed_init_dev_info: -+ return eError; -+} -+ -+static PVRSRV_ERROR DevDeInitSGX(IMG_VOID * pvDeviceNode) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *) pvDeviceNode; -+ PVRSRV_SGXDEV_INFO *psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ IMG_HANDLE hDevInfoOSMemHandle = psDeviceNode->hDeviceOSMemHandle; -+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; -+ IMG_UINT32 ui32Heap; -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; -+ SGX_DEVICE_MAP *psSGXDeviceMap; -+ -+ if (!psDevInfo) { -+ -+ PVR_DPF((PVR_DBG_ERROR, "DevDeInitSGX: Null DevInfo")); -+ return PVRSRV_OK; -+ } -+ if (psDevInfo->hTimer) { -+ eError = OSRemoveTimer(psDevInfo->hTimer); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevDeInitSGX: Failed to remove timer")); -+ return eError; -+ } -+ psDevInfo->hTimer = IMG_NULL; -+ } -+ -+ MMU_BIFResetPDFree(psDevInfo); -+ -+ DeinitDevInfo(psDevInfo); -+ -+ -+ psDeviceMemoryHeap = -+ (DEVICE_MEMORY_HEAP_INFO *) psDevInfo->pvDeviceMemoryHeap; -+ for (ui32Heap = 0; -+ ui32Heap < psDeviceNode->sDevMemoryInfo.ui32HeapCount; -+ ui32Heap++) { -+ switch (psDeviceMemoryHeap[ui32Heap].DevMemHeapType) { -+ case DEVICE_MEMORY_HEAP_KERNEL: -+ case DEVICE_MEMORY_HEAP_SHARED: -+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: -+ { -+ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != -+ IMG_NULL) { -+ BM_DestroyHeap(psDeviceMemoryHeap -+ [ui32Heap].hDevMemHeap); -+ } -+ break; -+ } -+ } -+ } -+ -+ eError = -+ BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, -+ IMG_NULL); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevDeInitSGX : Failed to destroy kernel context")); -+ return eError; -+ } -+ -+ eError = -+ PVRSRVRemovePowerDevice(((PVRSRV_DEVICE_NODE *) pvDeviceNode)-> -+ sDevId.ui32DeviceIndex); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, -+ (IMG_VOID **) & psSGXDeviceMap); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevDeInitSGX: Failed to get device memory map!")); -+ return eError; -+ } -+ -+ if (!psSGXDeviceMap->pvRegsCpuVBase) { -+ -+ if (psDevInfo->pvRegsBaseKM != IMG_NULL) { -+ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM, -+ psDevInfo->ui32RegSize, -+ PVRSRV_HAP_KERNEL_ONLY | -+ PVRSRV_HAP_UNCACHED, IMG_NULL); -+ } -+ } -+ -+ OSFreePages(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_HAP_MULTI_PROCESS, -+ sizeof(PVRSRV_SGXDEV_INFO), psDevInfo, hDevInfoOSMemHandle); -+ psDeviceNode->pvDevice = IMG_NULL; -+ -+ if (psDeviceMemoryHeap != IMG_NULL) { -+ -+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, -+ sizeof(DEVICE_MEMORY_HEAP_INFO) * -+ psDeviceNode->sDevMemoryInfo.ui32HeapCount, -+ psDeviceMemoryHeap, 0); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static -+IMG_VOID HWRecoveryResetSGX(PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32Component, IMG_UINT32 ui32CallerID) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_SGXDEV_INFO *psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = -+ (PVRSRV_SGX_HOST_CTL *) psDevInfo->psSGXHostCtl; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Component); -+ -+ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); -+ if (eError != PVRSRV_OK) { -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "HWRecoveryResetSGX: Power transition in progress")); -+ return; -+ } -+ -+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR; -+ -+ pr_err("HWRecoveryResetSGX: SGX Hardware Recovery triggered\n"); -+ -+ PDUMPSUSPEND(); -+ -+ do { -+ eError = SGXInitialise(psDevInfo, IMG_TRUE); -+ } -+ while (eError == PVRSRV_ERROR_RETRY); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "HWRecoveryResetSGX: SGXInitialise failed (%d)", -+ eError)); -+ } -+ -+ PDUMPRESUME(); -+ -+ PVRSRVPowerUnlock(ui32CallerID); -+ -+ SGXScheduleProcessQueues(psDeviceNode); -+ -+ PVRSRVProcessQueues(ui32CallerID, IMG_TRUE); -+} -+ -+static struct workdata { -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT32 ui32Component; -+ IMG_UINT32 ui32CallerID; -+} gHWRecoveryParams; -+ -+static void HWRecoveryWrapper(struct work_struct *work) -+{ -+ HWRecoveryResetSGX(gHWRecoveryParams.psDeviceNode, -+ gHWRecoveryParams.ui32Component, -+ gHWRecoveryParams.ui32CallerID); -+} -+ -+DECLARE_WORK(gWork, HWRecoveryWrapper); -+ -+IMG_VOID SGXOSTimer(IMG_VOID * pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ static IMG_UINT32 ui32EDMTasks = 0; -+ static IMG_UINT32 ui32LockupCounter = 0; -+ static IMG_UINT32 ui32NumResets = 0; -+ IMG_UINT32 ui32CurrentEDMTasks; -+ IMG_BOOL bLockup = IMG_FALSE; -+ IMG_BOOL bPoweredDown; -+ -+ psDevInfo->ui32TimeStamp++; -+ -+ bPoweredDown = (IMG_BOOL) ! SGXIsDevicePowered(psDeviceNode); -+ -+ if (bPoweredDown) { -+ ui32LockupCounter = 0; -+ } else { -+ -+ ui32CurrentEDMTasks = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, -+ psDevInfo->ui32EDMTaskReg0); -+ if (psDevInfo->ui32EDMTaskReg1 != 0) { -+ ui32CurrentEDMTasks ^= -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, -+ psDevInfo->ui32EDMTaskReg1); -+ } -+ if ((ui32CurrentEDMTasks == ui32EDMTasks) && -+ (psDevInfo->ui32NumResets == ui32NumResets)) { -+ ui32LockupCounter++; -+ if (ui32LockupCounter == 3) { -+ ui32LockupCounter = 0; -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXOSTimer() detected SGX lockup (0x%x tasks)", -+ ui32EDMTasks)); -+ -+ bLockup = IMG_TRUE; -+ } -+ } else { -+ ui32LockupCounter = 0; -+ ui32EDMTasks = ui32CurrentEDMTasks; -+ ui32NumResets = psDevInfo->ui32NumResets; -+ } -+ } -+ -+ if (bLockup) { -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = -+ (PVRSRV_SGX_HOST_CTL *) psDevInfo->psSGXHostCtl; -+ -+ psSGXHostCtl->ui32HostDetectedLockups++; -+ -+ /* -+ * schedule HWRecoveryResetSGX from a work -+ * in the shared queue -+ */ -+ gHWRecoveryParams.psDeviceNode = psDeviceNode; -+ gHWRecoveryParams.ui32Component = 0; -+ gHWRecoveryParams.ui32CallerID = TIMER_ID; -+ schedule_work(&gWork); -+ } -+} -+ -+ -+IMG_BOOL SGX_ISRHandler(IMG_VOID * pvData) -+{ -+ IMG_BOOL bInterruptProcessed = IMG_FALSE; -+ -+ { -+ IMG_UINT32 ui32EventStatus, ui32EventEnable; -+ IMG_UINT32 ui32EventClear = 0; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ -+ if (pvData == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGX_ISRHandler: Invalid params\n")); -+ return bInterruptProcessed; -+ } -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) pvData; -+ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ -+ ui32EventStatus = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); -+ ui32EventEnable = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, -+ EUR_CR_EVENT_HOST_ENABLE); -+ -+ gui32EventStatusServicesByISR = ui32EventStatus; -+ -+ ui32EventStatus &= ui32EventEnable; -+ -+ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK) { -+ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK; -+ } -+ -+ if (ui32EventClear) { -+ bInterruptProcessed = IMG_TRUE; -+ -+ ui32EventClear |= -+ EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK; -+ -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, -+ EUR_CR_EVENT_HOST_CLEAR, ui32EventClear); -+ } -+ } -+ -+ return bInterruptProcessed; -+} -+ -+IMG_VOID SGX_MISRHandler(IMG_VOID * pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *) pvData; -+ PVRSRV_SGXDEV_INFO *psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = -+ (PVRSRV_SGX_HOST_CTL *) psDevInfo->psSGXHostCtl; -+ -+ if ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) -+ && !(psSGXHostCtl-> -+ ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR)) { -+ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID); -+ } -+ SGXTestActivePowerEvent(psDeviceNode, ISR_ID); -+} -+ -+PVRSRV_ERROR SGXRegisterDevice(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; -+ -+ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE; -+ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS; -+ -+ psDeviceNode->pfnInitDevice = DevInitSGXPart1; -+ psDeviceNode->pfnDeInitDevice = DevDeInitSGX; -+ -+ psDeviceNode->pfnMMUInitialise = MMU_Initialise; -+ psDeviceNode->pfnMMUFinalise = MMU_Finalise; -+ psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap; -+ psDeviceNode->pfnMMUCreate = MMU_Create; -+ psDeviceNode->pfnMMUDelete = MMU_Delete; -+ psDeviceNode->pfnMMUAlloc = MMU_Alloc; -+ psDeviceNode->pfnMMUFree = MMU_Free; -+ psDeviceNode->pfnMMUMapPages = MMU_MapPages; -+ psDeviceNode->pfnMMUMapShadow = MMU_MapShadow; -+ psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages; -+ psDeviceNode->pfnMMUMapScatter = MMU_MapScatter; -+ psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr; -+ psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr; -+ -+ -+ psDeviceNode->pfnDeviceISR = SGX_ISRHandler; -+ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler; -+ -+ psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete; -+ -+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; -+ -+ psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_ADDRESS_SPACE_SIZE; -+ -+ psDevMemoryInfo->ui32Flags = 0; -+ -+ psDevMemoryInfo->ui32HeapCount = SGX_MAX_HEAP_ID; -+ -+ psDevMemoryInfo->ui32SyncHeapID = SGX_SYNCINFO_HEAP_ID; -+ -+ psDevMemoryInfo->ui32MappingHeapID = SGX_GENERAL_MAPPING_HEAP_ID; -+ -+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(DEVICE_MEMORY_HEAP_INFO) * -+ psDevMemoryInfo->ui32HeapCount, -+ (IMG_VOID **) & psDevMemoryInfo->psDeviceMemoryHeap, -+ 0) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO")); -+ return (PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, -+ sizeof(DEVICE_MEMORY_HEAP_INFO) * -+ psDevMemoryInfo->ui32HeapCount); -+ -+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; -+ -+ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID); -+ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_GENERAL_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapSize = -+ SGX_GENERAL_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_SINGLE_PROCESS; -+ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszName = "General"; -+ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszBSName = "General BS"; -+ psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_PERCONTEXT; -+ -+ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID); -+ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_TADATA_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapSize = -+ SGX_TADATA_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION -+ | PVRSRV_HAP_MULTI_PROCESS; -+ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszName = "TA Data"; -+ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszBSName = "TA Data BS"; -+ psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_PERCONTEXT; -+ -+ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID); -+ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_KERNEL_CODE_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapSize = -+ SGX_KERNEL_CODE_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION -+ | PVRSRV_HAP_MULTI_PROCESS; -+ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszName = "Kernel"; -+ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszBSName = "Kernel BS"; -+ psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_SHARED_EXPORTED; -+ -+ psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_VIDEO_CODE_HEAP_ID); -+ psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_VIDEO_CODE_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32HeapSize = -+ SGX_VIDEO_CODE_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_KERNEL_ONLY; -+ psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].pszName = "Video"; -+ psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].pszBSName = "Video BS"; -+ psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_SHARED; -+ -+ psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_VIDEO_DATA_HEAP_ID); -+ psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_KERNEL_VIDEO_DATA_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32HeapSize = -+ SGX_KERNEL_VIDEO_DATA_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_MULTI_PROCESS; -+ psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].pszName = -+ "KernelVideoData"; -+ psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].pszBSName = -+ "KernelVideoData BS"; -+ psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_SHARED_EXPORTED; -+ -+ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID); -+ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_PIXELSHADER_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapSize = -+ SGX_PIXELSHADER_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_SINGLE_PROCESS; -+ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszName = "PixelShaderUSSE"; -+ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszBSName = -+ "PixelShaderUSSE BS"; -+ psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_PERCONTEXT; -+ -+ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID); -+ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_VERTEXSHADER_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapSize = -+ SGX_VERTEXSHADER_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_SINGLE_PROCESS; -+ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszName = -+ "VertexShaderUSSE"; -+ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszBSName = -+ "VertexShaderUSSE BS"; -+ psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_PERCONTEXT; -+ -+ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID); -+ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_PDSPIXEL_CODEDATA_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapSize = -+ SGX_PDSPIXEL_CODEDATA_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_SINGLE_PROCESS; -+ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszName = -+ "PDSPixelCodeData"; -+ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszBSName = -+ "PDSPixelCodeData BS"; -+ psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_PERCONTEXT; -+ -+ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID); -+ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].sDevVAddrBase. -+ uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapSize = -+ SGX_PDSVERTEX_CODEDATA_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_SINGLE_PROCESS; -+ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszName = -+ "PDSVertexCodeData"; -+ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszBSName = -+ "PDSVertexCodeData BS"; -+ psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_PERCONTEXT; -+ -+ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID); -+ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_SYNCINFO_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapSize = -+ SGX_SYNCINFO_HEAP_SIZE; -+ -+ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_MULTI_PROCESS; -+ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszName = "CacheCoherent"; -+ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszBSName = "CacheCoherent BS"; -+ -+ psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_SHARED_EXPORTED; -+ -+ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID); -+ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_3DPARAMETERS_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapSize = -+ SGX_3DPARAMETERS_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszName = "3DParameters"; -+ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszBSName = -+ "3DParameters BS"; -+ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | -+ PVRSRV_HAP_SINGLE_PROCESS; -+ psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_PERCONTEXT; -+ -+ psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID); -+ psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_GENERAL_MAPPING_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapSize = -+ SGX_GENERAL_MAPPING_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS; -+ psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszName = -+ "GeneralMapping"; -+ psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszBSName = -+ "GeneralMapping BS"; -+ -+ psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_SHARED_EXPORTED; -+ -+ psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_FB_MAPPING_HEAP_ID); -+ psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_FB_MAPPING_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID].ui32HeapSize = -+ SGX_FB_MAPPING_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS; -+ psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID].pszName = -+ "FramebufferMapping"; -+ psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID].pszBSName = -+ "FramebufferMapping BS"; -+ -+ psDeviceMemoryHeap[SGX_FB_MAPPING_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_SHARED_EXPORTED; -+ -+ psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].ui32HeapID = -+ HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_ALT_MAPPING_HEAP_ID); -+ psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].sDevVAddrBase.uiAddr = -+ SGX_ALT_MAPPING_HEAP_BASE; -+ psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].ui32HeapSize = -+ SGX_ALT_MAPPING_HEAP_SIZE; -+ psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].ui32Attribs = -+ PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS; -+ psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].pszName = "AltMapping"; -+ psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].pszBSName = "AltMapping BS"; -+ -+ psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].DevMemHeapType = -+ DEVICE_MEMORY_HEAP_SHARED_EXPORTED; -+ -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie, -+ PVR3DIF4_CLIENT_INFO * psClientInfo) -+{ -+ PVRSRV_SGXDEV_INFO *psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookie)-> -+ pvDevice; -+ -+ psDevInfo->ui32ClientRefCount++; -+#ifdef PDUMP -+ if (psDevInfo->ui32ClientRefCount == 1) { -+ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0; -+ } -+#endif -+ -+ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM(); -+ -+ OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, -+ sizeof(psClientInfo->asDevData)); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO * psDevInfo, -+ SGX_MISC_INFO * psMiscInfo) -+{ -+ switch (psMiscInfo->eRequest) { -+ case SGX_MISC_INFO_REQUEST_CLOCKSPEED: -+ { -+ psMiscInfo->uData.ui32SGXClockSpeed = -+ psDevInfo->ui32CoreClockSpeed; -+ return PVRSRV_OK; -+ } -+ case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON: -+ { -+ psDevInfo->psSGXHostCtl->ui32HWPerfFlags |= -+ PVRSRV_SGX_HWPERF_ON; -+ return PVRSRV_OK; -+ } -+ case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF: -+ { -+ psDevInfo->psSGXHostCtl->ui32HWPerfFlags &= -+ ~PVRSRV_SGX_HWPERF_ON; -+ return PVRSRV_OK; -+ } -+ case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB: -+ { -+ SGX_MISC_INFO_HWPERF_RETRIEVE_CB *psRetrieve = -+ &psMiscInfo->uData.sRetrieveCB; -+ PVRSRV_SGX_HWPERF_CB *psHWPerfCB = -+ (PVRSRV_SGX_HWPERF_CB *) psDevInfo-> -+ psKernelHWPerfCBMemInfo->pvLinAddrKM; -+ IMG_UINT i = 0; -+ -+ for (; -+ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff -+ && i < psRetrieve->ui32ArraySize; i++) { -+ PVRSRV_SGX_HWPERF_CBDATA *psData = -+ &psHWPerfCB->psHWPerfCBData[psHWPerfCB-> -+ ui32Roff]; -+ OSMemCopy(&psRetrieve->psHWPerfData[i], psData, -+ sizeof(PVRSRV_SGX_HWPERF_CBDATA)); -+ psRetrieve->psHWPerfData[i].ui32ClockSpeed = -+ psDevInfo->ui32CoreClockSpeed; -+ psRetrieve->psHWPerfData[i].ui32TimeMax = -+ psDevInfo->ui32uKernelTimerClock; -+ psHWPerfCB->ui32Roff = -+ (psHWPerfCB->ui32Roff + -+ 1) & (PVRSRV_SGX_HWPERF_CBSIZE - 1); -+ } -+ psRetrieve->ui32DataCount = i; -+ psRetrieve->ui32Time = OSClockus(); -+ return PVRSRV_OK; -+ } -+ default: -+ { -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle, -+ IMG_UINT32 ui32Reg, -+ IMG_UINT32 * pui32Old, -+ IMG_BOOL bNew, -+ IMG_UINT32 ui32New, -+ IMG_UINT32 ui32NewReset, -+ IMG_UINT32 ui32CountersReg, -+ IMG_UINT32 * pui32Time, -+ IMG_BOOL * pbActive, -+ PVRSRV_SGXDEV_DIFF_INFO * psDiffs) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ IMG_BOOL bPowered = IMG_FALSE; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (bNew) { -+ psDevInfo->ui32HWGroupRequested = ui32New; -+ } -+ psDevInfo->ui32HWReset |= ui32NewReset; -+ -+ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); -+ if (eError != PVRSRV_OK) { -+ return eError; -+ } -+ -+ SysAcquireData(&psSysData); -+ -+ psPowerDevice = psSysData->psPowerDeviceList; -+ while (psPowerDevice) { -+ if (psPowerDevice->ui32DeviceIndex == -+ psDeviceNode->sDevId.ui32DeviceIndex) { -+ bPowered = -+ (IMG_BOOL) (psPowerDevice->eCurrentPowerState == -+ PVRSRV_POWER_STATE_D0); -+ break; -+ } -+ -+ psPowerDevice = psPowerDevice->psNext; -+ } -+ -+ *pbActive = bPowered; -+ -+ { -+ PVRSRV_SGXDEV_DIFF_INFO sNew, *psPrev = &psDevInfo->sDiffInfo; -+ IMG_UINT32 i; -+ -+ sNew.ui32Time[0] = OSClockus(); -+ -+ *pui32Time = sNew.ui32Time[0]; -+ -+ if (sNew.ui32Time[0] != psPrev->ui32Time[0] && bPowered) { -+ -+ *pui32Old = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg); -+ -+ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i) { -+ sNew.aui32Counters[i] = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, -+ ui32CountersReg + (i * 4)); -+ } -+ -+ if (psDevInfo->ui32HWGroupRequested != *pui32Old) { -+ -+ if (psDevInfo->ui32HWReset != 0) { -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, -+ ui32Reg, -+ psDevInfo-> -+ ui32HWGroupRequested | -+ psDevInfo->ui32HWReset); -+ psDevInfo->ui32HWReset = 0; -+ } -+ -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg, -+ psDevInfo->ui32HWGroupRequested); -+ } -+ -+ sNew.ui32Marker[0] = psDevInfo->ui32KickTACounter; -+ sNew.ui32Marker[1] = psDevInfo->ui32KickTARenderCounter; -+ -+ sNew.ui32Time[1] = -+ psDevInfo->psSGXHostCtl->ui32TimeWraps; -+ -+ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i) { -+ psDiffs->aui32Counters[i] = -+ sNew.aui32Counters[i] - -+ psPrev->aui32Counters[i]; -+ } -+ -+ psDiffs->ui32Marker[0] = -+ sNew.ui32Marker[0] - psPrev->ui32Marker[0]; -+ psDiffs->ui32Marker[1] = -+ sNew.ui32Marker[1] - psPrev->ui32Marker[1]; -+ -+ psDiffs->ui32Time[0] = -+ sNew.ui32Time[0] - psPrev->ui32Time[0]; -+ psDiffs->ui32Time[1] = -+ sNew.ui32Time[1] - psPrev->ui32Time[1]; -+ -+ *psPrev = sNew; -+ } else { -+ -+ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i) { -+ psDiffs->aui32Counters[i] = 0; -+ } -+ -+ psDiffs->ui32Marker[0] = 0; -+ psDiffs->ui32Marker[1] = 0; -+ -+ psDiffs->ui32Time[0] = 0; -+ psDiffs->ui32Time[1] = 0; -+ } -+ } -+ -+ PVRSRVPowerUnlock(KERNEL_ID); -+ -+ SGXTestActivePowerEvent(psDeviceNode, KERNEL_ID); -+ -+ return eError; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxkick.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxkick.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxkick.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxkick.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,324 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include "services_headers.h" -+#include "sgxinfo.h" -+#include "sgxinfokm.h" -+#if defined (PDUMP) -+#include "sgxapi_km.h" -+#include "pdump_km.h" -+#endif -+#include "sgx_bridge_km.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+#include "sgxutils.h" -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, -+ PVR3DIF4_CCB_KICK * psCCBKick) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; -+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = -+ (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo; -+ PVR3DIF4_CMDTA_SHARED *psTACmd; -+ IMG_UINT32 i; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevHandle; -+ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ -+ if (psCCBKick->bKickRender) { -+ ++psDevInfo->ui32KickTARenderCounter; -+ } -+ ++psDevInfo->ui32KickTACounter; -+ -+ if (!CCB_OFFSET_IS_VALID -+ (PVR3DIF4_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset)) { -+ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ psTACmd = -+ CCB_DATA_FROM_OFFSET(PVR3DIF4_CMDTA_SHARED, psCCBMemInfo, psCCBKick, -+ ui32CCBOffset); -+ -+ if (psCCBKick->hTA3DSyncInfo) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hTA3DSyncInfo; -+ psTACmd->sTA3DDependancy.sWriteOpsCompleteDevVAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ -+ psTACmd->sTA3DDependancy.ui32WriteOpPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending; -+ -+ if (psCCBKick->bTADependency) { -+ psSyncInfo->psSyncData->ui32WriteOpsPending++; -+ } -+ } -+ -+ if (psCCBKick->hTASyncInfo != IMG_NULL) { -+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hTASyncInfo; -+ -+ psTACmd->sTQSyncReadOpsCompleteDevVAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ psTACmd->sTQSyncWriteOpsCompleteDevVAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ -+ psTACmd->ui32TQSyncReadOpsPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending++; -+ psTACmd->ui32TQSyncWriteOpsPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending; -+ } -+ -+ if (psCCBKick->h3DSyncInfo != IMG_NULL) { -+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->h3DSyncInfo; -+ -+ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ -+ psTACmd->ui323DTQSyncReadOpsPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending++; -+ psTACmd->ui323DTQSyncWriteOpsPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending; -+ } -+ -+ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals; -+ if (psCCBKick->ui32NumTAStatusVals != 0) { -+ -+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ ahTAStatusSyncInfo[i]; -+ -+ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ } -+ } -+ -+ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals; -+ if (psCCBKick->ui32Num3DStatusVals != 0) { -+ -+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ ah3DStatusSyncInfo[i]; -+ -+ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ } -+ } -+ -+ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs; -+ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ ahSrcKernelSyncInfo[i]; -+ -+ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psTACmd->asSrcSyncs[i].ui32ReadOpPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending++; -+ -+ psTACmd->asSrcSyncs[i].ui32WriteOpPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending; -+ -+ } -+ -+ if (psCCBKick->bFirstKickOrResume -+ && psCCBKick->hRenderSurfSyncInfo != IMG_NULL) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hRenderSurfSyncInfo; -+ psTACmd->sWriteOpsCompleteDevVAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ psTACmd->sReadOpsCompleteDevVAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psTACmd->ui32ReadOpsPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ psTACmd->ui32WriteOpsPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending++; -+ -+#if defined(PDUMP) -+ if (PDumpIsCaptureFrameKM()) { -+ if (psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) { -+ -+ PDUMPCOMMENT("Init render surface last op\r\n"); -+ -+ PDUMPMEM(IMG_NULL, -+ psSyncInfo->psSyncDataMemInfoKM, -+ 0, -+ sizeof(PVRSRV_SYNC_DATA), -+ 0, -+ MAKEUNIQUETAG(psSyncInfo-> -+ psSyncDataMemInfoKM)); -+ -+ PDUMPMEM(&psSyncInfo->psSyncData-> -+ ui32LastOpDumpVal, -+ psSyncInfo->psSyncDataMemInfoKM, -+ offsetof(PVRSRV_SYNC_DATA, -+ ui32WriteOpsComplete), -+ sizeof(psSyncInfo->psSyncData-> -+ ui32WriteOpsComplete), 0, -+ MAKEUNIQUETAG(psSyncInfo-> -+ psSyncDataMemInfoKM)); -+ } -+ -+ psSyncInfo->psSyncData->ui32LastOpDumpVal++; -+ } -+#endif -+ } -+#if defined(PDUMP) -+ if (PDumpIsCaptureFrameKM()) { -+ PDUMPCOMMENT("Shared part of TA command\r\n"); -+ -+ PDUMPMEM(psTACmd, -+ psCCBMemInfo, -+ psCCBKick->ui32CCBDumpWOff, -+ sizeof(PVR3DIF4_CMDTA_SHARED), -+ 0, MAKEUNIQUETAG(psCCBMemInfo)); -+ -+ if (psCCBKick->hRenderSurfSyncInfo != IMG_NULL) { -+ IMG_UINT32 ui32HackValue; -+ -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ hRenderSurfSyncInfo; -+ ui32HackValue = -+ psSyncInfo->psSyncData->ui32LastOpDumpVal - 1; -+ -+ PDUMPCOMMENT -+ ("Hack render surface last op in TA cmd\r\n"); -+ -+ PDUMPMEM(&ui32HackValue, -+ psCCBMemInfo, -+ psCCBKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_CMDTA_SHARED, -+ ui32WriteOpsPendingVal), -+ sizeof(IMG_UINT32), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ -+ ui32HackValue = 0; -+ PDUMPCOMMENT -+ ("Hack render surface read op in TA cmd\r\n"); -+ -+ PDUMPMEM(&ui32HackValue, -+ psCCBMemInfo, -+ psCCBKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_CMDTA_SHARED, -+ sReadOpsCompleteDevVAddr), -+ sizeof(IMG_UINT32), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ } -+ -+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ ahTAStatusSyncInfo[i]; -+ -+ PDUMPCOMMENT("Hack TA status value in TA cmd\r\n"); -+ -+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, -+ psCCBMemInfo, -+ psCCBKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_CMDTA_SHARED, -+ sCtlTAStatusInfo[i].ui32StatusValue), -+ sizeof(IMG_UINT32), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ } -+ -+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ ah3DStatusSyncInfo[i]; -+ -+ PDUMPCOMMENT("Hack 3D status value in TA cmd\r\n"); -+ -+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, -+ psCCBMemInfo, -+ psCCBKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_CMDTA_SHARED, -+ sCtl3DStatusInfo[i].ui32StatusValue), -+ sizeof(IMG_UINT32), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ } -+ } -+#endif -+ -+ eError = -+ SGXScheduleCCBCommandKM(hDevHandle, psCCBKick->eCommand, -+ &psCCBKick->sCommand, KERNEL_ID); -+ if (eError == PVRSRV_ERROR_RETRY) { -+ if (psCCBKick->bFirstKickOrResume -+ && psCCBKick->hRenderSurfSyncInfo != IMG_NULL) { -+ -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ hRenderSurfSyncInfo; -+ psSyncInfo->psSyncData->ui32WriteOpsPending--; -+ } -+ -+ for (i = 0; i < psCCBKick->ui32NumSrcSyncs; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ ahSrcKernelSyncInfo[i]; -+ psSyncInfo->psSyncData->ui32ReadOpsPending--; -+ } -+ -+#if defined(PDUMP) -+ if (psCCBKick->bFirstKickOrResume -+ && psCCBKick->hRenderSurfSyncInfo != IMG_NULL) { -+ if (PDumpIsCaptureFrameKM()) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick-> -+ hRenderSurfSyncInfo; -+ psSyncInfo->psSyncData->ui32LastOpDumpVal--; -+ } -+ } -+#endif -+ -+ return eError; -+ } else if (PVRSRV_OK != eError) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXDoKickKM: SGXScheduleCCBCommandKM failed.")); -+ return eError; -+ } -+ -+ -+ return eError; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxmmu.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxmmu.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxmmu.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxmmu.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,56 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__SGXMMU_KM_H__) -+#define __SGXMMU_KM_H__ -+ -+#define SGX_MMU_PAGE_SHIFT (12) -+#define SGX_MMU_PAGE_SIZE (1UL< -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "sgxdefs.h" -+#include "sgxmmu.h" -+#include "services_headers.h" -+#include "sgxinfokm.h" -+#include "sgxconfig.h" -+ -+#include "pdump_km.h" -+ -+#define SGX_BIF_DIR_LIST_REG_EDM EUR_CR_BIF_DIR_LIST_BASE0 -+ -+static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO * psDevInfo, -+ IMG_BOOL bResetBIF, -+ IMG_UINT32 ui32PDUMPFlags, IMG_BOOL bPDump) -+{ -+ IMG_UINT32 ui32SoftResetRegVal = -+#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK -+ EUR_CR_SOFT_RESET_TWOD_RESET_MASK | -+#endif -+ EUR_CR_SOFT_RESET_DPM_RESET_MASK | -+ EUR_CR_SOFT_RESET_TA_RESET_MASK | -+ EUR_CR_SOFT_RESET_USE_RESET_MASK | -+ EUR_CR_SOFT_RESET_ISP_RESET_MASK | EUR_CR_SOFT_RESET_TSP_RESET_MASK; -+ -+#if !defined(PDUMP) -+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); -+#endif -+ -+ if (bResetBIF) { -+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK; -+ } -+ -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, -+ ui32SoftResetRegVal); -+ if (bPDump) { -+ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal, -+ ui32PDUMPFlags); -+ } -+} -+ -+static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO * psDevInfo, -+ IMG_UINT32 ui32PDUMPFlags, IMG_BOOL bPDump) -+{ -+#if !defined(PDUMP) -+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); -+#endif -+ -+ OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed); -+ if (bPDump) { -+ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags); -+#if defined(PDUMP) -+ PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags); -+#endif -+ } -+ -+} -+ -+static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO * psDevInfo, -+ IMG_UINT32 ui32PDUMPFlags, IMG_BOOL bPDump) -+{ -+ IMG_UINT32 ui32RegVal; -+ -+ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); -+ if (bPDump) { -+ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); -+ } -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump); -+ -+ ui32RegVal = 0; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); -+ if (bPDump) { -+ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); -+ } -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump); -+ -+ { -+ -+ if (PollForValueKM -+ ((IMG_UINT32 *) ((IMG_UINT8 *) psDevInfo->pvRegsBaseKM + -+ EUR_CR_BIF_MEM_REQ_STAT), 0, -+ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, -+ MAX_HW_TIME_US / WAIT_TRY_COUNT, -+ WAIT_TRY_COUNT) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Wait for DC invalidate failed.")); -+ } -+ -+ if (bPDump) { -+ PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0, -+ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, -+ ui32PDUMPFlags); -+ } -+ } -+} -+ -+IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO * psDevInfo, IMG_UINT32 ui32PDUMPFlags) -+{ -+ IMG_UINT32 ui32RegVal; -+ -+ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; -+ -+ -+#ifndef PDUMP -+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); -+#endif -+ -+ psDevInfo->ui32NumResets++; -+ -+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, -+ "Start of SGX reset sequence\r\n"); -+ -+#if defined(FIX_HW_BRN_23944) -+ -+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); -+ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); -+ -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); -+ -+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); -+ if (ui32RegVal & ui32BifFaultMask) { -+ -+ ui32RegVal = -+ EUR_CR_BIF_CTRL_PAUSE_MASK | -+ EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, -+ ui32RegVal); -+ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); -+ -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); -+ -+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, -+ ui32RegVal); -+ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); -+ -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); -+ } -+#endif -+ -+ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); -+ -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); -+ -+ -+ ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, -+ ui32RegVal); -+ -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); -+ -+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); -+ -+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); -+ -+ for (;;) { -+ IMG_UINT32 ui32BifIntStat = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); -+ IMG_DEV_VIRTADDR sBifFault; -+ IMG_UINT32 ui32PDIndex, ui32PTIndex; -+ -+ if ((ui32BifIntStat & ui32BifFaultMask) == 0) { -+ break; -+ } -+ -+ sBifFault.uiAddr = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); -+ PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", -+ ui32BifIntStat, sBifFault.uiAddr)); -+ ui32PDIndex = -+ sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); -+ ui32PTIndex = -+ (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; -+ -+ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, -+ IMG_FALSE); -+ -+ psDevInfo->pui32BIFResetPD[ui32PDIndex] = -+ psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID; -+ psDevInfo->pui32BIFResetPT[ui32PTIndex] = -+ psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID; -+ -+ ui32RegVal = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, -+ ui32RegVal); -+ ui32RegVal = -+ OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, -+ ui32RegVal); -+ -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); -+ -+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, -+ IMG_FALSE); -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); -+ -+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); -+ -+ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; -+ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; -+ } -+ -+ -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, -+ psDevInfo->sKernelPDDevPAddr.uiAddr); -+ PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, -+ psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, -+ PDUMP_PD_UNIQUETAG); -+ -+ -+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE); -+ -+ PVR_DPF((PVR_DBG_WARNING, "Soft Reset of SGX")); -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); -+ -+ ui32RegVal = 0; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal); -+ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); -+ -+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); -+ -+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n"); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxscript.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxscript.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxscript.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxscript.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,64 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef __SGXSCRIPT_H__ -+#define __SGXSCRIPT_H__ -+ -+ -+#define SGX_MAX_INIT_COMMANDS 64 -+#define SGX_MAX_DEINIT_COMMANDS 16 -+ -+ typedef enum _SGX_INIT_OPERATION { -+ SGX_INIT_OP_ILLEGAL = 0, -+ SGX_INIT_OP_WRITE_HW_REG, -+#if defined(PDUMP) -+ SGX_INIT_OP_PDUMP_HW_REG, -+#endif -+ SGX_INIT_OP_HALT -+ } SGX_INIT_OPERATION; -+ -+ typedef union _SGX_INIT_COMMAND { -+ SGX_INIT_OPERATION eOp; -+ struct { -+ SGX_INIT_OPERATION eOp; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Value; -+ } sWriteHWReg; -+#if defined(PDUMP) -+ struct { -+ SGX_INIT_OPERATION eOp; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Value; -+ } sPDumpHWReg; -+#endif -+ } SGX_INIT_COMMAND; -+ -+ typedef struct _SGX_INIT_SCRIPTS_ { -+ SGX_INIT_COMMAND asInitCommands[SGX_MAX_INIT_COMMANDS]; -+ SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS]; -+ } SGX_INIT_SCRIPTS; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxtransfer.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxtransfer.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxtransfer.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxtransfer.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,281 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+ -+#include -+ -+#include "sgxdefs.h" -+#include "services_headers.h" -+#include "buffer_manager.h" -+#include "sgxinfo.h" -+#include "sysconfig.h" -+#include "regpaths.h" -+#include "pdump_km.h" -+#include "mmu.h" -+#include "pvr_bridge.h" -+#include "sgx_bridge_km.h" -+#include "sgxinfokm.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+#include "sgxutils.h" -+ -+IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, -+ PVRSRV_TRANSFER_SGX_KICK * psKick) -+{ -+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = -+ (PVRSRV_KERNEL_MEM_INFO *) psKick->hCCBMemInfo; -+ PVRSRV_SGX_COMMAND sCommand = { 0 }; -+ PVR3DIF4_TRANSFERCMD_SHARED *psTransferCmd; -+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ -+ if (!CCB_OFFSET_IS_VALID -+ (PVR3DIF4_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, -+ ui32SharedCmdCCBOffset)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXSubmitTransferKM: Invalid CCB offset")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ psTransferCmd = -+ CCB_DATA_FROM_OFFSET(PVR3DIF4_TRANSFERCMD_SHARED, psCCBMemInfo, -+ psKick, ui32SharedCmdCCBOffset); -+ -+ if (psTransferCmd->ui32NumStatusVals > SGXTQ_MAX_STATUS) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psKick->ui32StatusFirstSync + -+ (psKick->ui32NumSrcSync ? (psKick->ui32NumSrcSync - 1) : 0) + -+ (psKick->ui32NumDstSync ? (psKick->ui32NumDstSync - 1) : 0) > -+ psTransferCmd->ui32NumStatusVals) { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psKick->hTASyncInfo != IMG_NULL) { -+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->hTASyncInfo; -+ -+ psTransferCmd->ui32TASyncWriteOpsPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending++; -+ psTransferCmd->ui32TASyncReadOpsPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ -+ psTransferCmd->sTASyncWriteOpsCompleteDevVAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ psTransferCmd->sTASyncReadOpsCompleteDevVAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ } else { -+ psTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0; -+ psTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0; -+ } -+ -+ if (psKick->h3DSyncInfo != IMG_NULL) { -+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psKick->h3DSyncInfo; -+ -+ psTransferCmd->ui323DSyncWriteOpsPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending++; -+ psTransferCmd->ui323DSyncReadOpsPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ -+ psTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ psTransferCmd->s3DSyncReadOpsCompleteDevVAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ } else { -+ psTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0; -+ psTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0; -+ } -+ -+ psTransferCmd->ui32NumSrcSync = psKick->ui32NumSrcSync; -+ psTransferCmd->ui32NumDstSync = psKick->ui32NumDstSync; -+ -+ if (psKick->ui32NumSrcSync > 0) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick->ahSrcSyncInfo[0]; -+ -+ psTransferCmd->ui32SrcWriteOpPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending; -+ psTransferCmd->ui32SrcReadOpPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ -+ psTransferCmd->sSrcWriteOpsCompleteDevAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ psTransferCmd->sSrcReadOpsCompleteDevAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ } -+ if (psKick->ui32NumDstSync > 0) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick->ahDstSyncInfo[0]; -+ -+ psTransferCmd->ui32DstWriteOpPendingVal = -+ psSyncInfo->psSyncData->ui32WriteOpsPending; -+ psTransferCmd->ui32DstReadOpPendingVal = -+ psSyncInfo->psSyncData->ui32ReadOpsPending; -+ -+ psTransferCmd->sDstWriteOpsCompleteDevAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ psTransferCmd->sDstReadOpsCompleteDevAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ } -+ -+ if (psKick->ui32NumSrcSync > 0) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick->ahSrcSyncInfo[0]; -+ psSyncInfo->psSyncData->ui32ReadOpsPending++; -+ -+ } -+ if (psKick->ui32NumDstSync > 0) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick->ahDstSyncInfo[0]; -+ psSyncInfo->psSyncData->ui32WriteOpsPending++; -+ } -+ -+ if (psKick->ui32NumSrcSync > 1) { -+ for (i = 1; i < psKick->ui32NumSrcSync; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick-> -+ ahSrcSyncInfo[i]; -+ -+ psTransferCmd->sCtlStatusInfo[psKick-> -+ ui32StatusFirstSync]. -+ ui32StatusValue = -+ psSyncInfo->psSyncData->ui32ReadOpsPending++; -+ -+ psTransferCmd->sCtlStatusInfo[psKick-> -+ ui32StatusFirstSync]. -+ sStatusDevAddr = -+ psSyncInfo->sReadOpsCompleteDevVAddr; -+ -+ psKick->ui32StatusFirstSync++; -+ } -+ } -+ -+ if (psKick->ui32NumDstSync > 1) { -+ for (i = 1; i < psKick->ui32NumDstSync; i++) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick-> -+ ahDstSyncInfo[i]; -+ -+ psTransferCmd->sCtlStatusInfo[psKick-> -+ ui32StatusFirstSync]. -+ ui32StatusValue = -+ psSyncInfo->psSyncData->ui32WriteOpsPending++; -+ -+ psTransferCmd->sCtlStatusInfo[psKick-> -+ ui32StatusFirstSync]. -+ sStatusDevAddr = -+ psSyncInfo->sWriteOpsCompleteDevVAddr; -+ -+ psKick->ui32StatusFirstSync++; -+ } -+ } -+#if defined(PDUMP) -+ if (PDumpIsCaptureFrameKM()) { -+ PDUMPCOMMENT("Shared part of transfer command\r\n"); -+ PDUMPMEM(psTransferCmd, -+ psCCBMemInfo, -+ psKick->ui32CCBDumpWOff, -+ sizeof(PVR3DIF4_TRANSFERCMD_SHARED), -+ 0, MAKEUNIQUETAG(psCCBMemInfo)); -+ -+ if (psKick->ui32NumSrcSync > 0) { -+ psSyncInfo = psKick->ahSrcSyncInfo[0]; -+ -+ PDUMPCOMMENT -+ ("Hack src surface write op in transfer cmd\r\n"); -+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, -+ psCCBMemInfo, -+ psKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_TRANSFERCMD_SHARED, -+ ui32SrcWriteOpPendingVal), -+ sizeof(psSyncInfo->psSyncData-> -+ ui32LastOpDumpVal), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ -+ PDUMPCOMMENT -+ ("Hack src surface read op in transfer cmd\r\n"); -+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, -+ psCCBMemInfo, -+ psKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_TRANSFERCMD_SHARED, -+ ui32SrcReadOpPendingVal), -+ sizeof(psSyncInfo->psSyncData-> -+ ui32LastReadOpDumpVal), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ } -+ if (psKick->ui32NumDstSync > 0) { -+ psSyncInfo = psKick->ahDstSyncInfo[0]; -+ -+ PDUMPCOMMENT -+ ("Hack dest surface write op in transfer cmd\r\n"); -+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, -+ psCCBMemInfo, -+ psKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_TRANSFERCMD_SHARED, -+ ui32DstWriteOpPendingVal), -+ sizeof(psSyncInfo->psSyncData-> -+ ui32LastOpDumpVal), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ -+ PDUMPCOMMENT -+ ("Hack dest surface read op in transfer cmd\r\n"); -+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, -+ psCCBMemInfo, -+ psKick->ui32CCBDumpWOff + -+ offsetof(PVR3DIF4_TRANSFERCMD_SHARED, -+ ui32DstReadOpPendingVal), -+ sizeof(psSyncInfo->psSyncData-> -+ ui32LastReadOpDumpVal), 0, -+ MAKEUNIQUETAG(psCCBMemInfo)); -+ } -+ -+ if (psKick->ui32NumSrcSync > 0) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick-> -+ ahSrcSyncInfo[0]; -+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; -+ -+ } -+ if (psKick->ui32NumDstSync > 0) { -+ psSyncInfo = -+ (PVRSRV_KERNEL_SYNC_INFO *) psKick-> -+ ahDstSyncInfo[0]; -+ psSyncInfo->psSyncData->ui32LastOpDumpVal++; -+ } -+ } -+#endif -+ -+ sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD; -+ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr; -+ -+ eError = -+ SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, -+ &sCommand, KERNEL_ID); -+ -+ -+ return eError; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxutils.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxutils.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxutils.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxutils.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,792 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+ -+#include "sgxdefs.h" -+#include "services_headers.h" -+#include "buffer_manager.h" -+#include "sgxapi_km.h" -+#include "sgxinfo.h" -+#include "sgxinfokm.h" -+#include "sysconfig.h" -+#include "pdump_km.h" -+#include "mmu.h" -+#include "pvr_bridge_km.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+#include "sgxutils.h" -+ -+#include -+ -+ -+IMG_BOOL gbPowerUpPDumped = IMG_FALSE; -+ -+IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32CallerID) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; -+ -+ if ((psSGXHostCtl-> -+ ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) -+ && !(psSGXHostCtl-> -+ ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST)) { -+ -+ { -+ -+ PDUMPSUSPEND(); -+ -+ eError = -+ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId. -+ ui32DeviceIndex, -+ PVRSRV_POWER_STATE_D3, -+ ui32CallerID, -+ IMG_FALSE); -+ if (eError == PVRSRV_OK) { -+ -+ psSGXHostCtl->ui32NumActivePowerEvents++; -+ -+ if ((*(volatile IMG_UINT32 *) -+ (&psSGXHostCtl->ui32PowManFlags) -+ & -+ PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) -+ != 0) { -+ -+ if (ui32CallerID == ISR_ID) { -+ psDeviceNode-> -+ bReProcessDeviceCommandComplete -+ = IMG_TRUE; -+ } else { -+ SGXScheduleProcessQueues -+ (psDeviceNode); -+ } -+ } -+ } -+ if (eError == PVRSRV_ERROR_RETRY) { -+ -+ eError = PVRSRV_OK; -+ } -+ -+ PDUMPRESUME(); -+ } -+ } -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu", -+ eError)); -+ } -+} -+ -+static INLINE PVRSRV_SGX_COMMAND *SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO * -+ psCCB) -+{ -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0; -+ -+ do { -+ if (((*psCCB->pui32WriteOffset + 1) & 255) != -+ *psCCB->pui32ReadOffset) { -+ return &psCCB->psCommands[*psCCB->pui32WriteOffset]; -+ } -+ -+ if (bStart == IMG_FALSE) { -+ bStart = IMG_TRUE; -+ uiStart = OSClockus(); -+ } -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ } while ((OSClockus() - uiStart) < MAX_HW_TIME_US); -+ -+ return IMG_NULL; -+} -+ -+PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE * psDeviceNode, -+ PVRSRV_SGX_COMMAND_TYPE eCommandType, -+ PVRSRV_SGX_COMMAND * psCommandData, -+ IMG_UINT32 ui32CallerID) -+{ -+ PVRSRV_SGX_CCB_INFO *psKernelCCB; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_SGXDEV_INFO *psDevInfo; -+ PVRSRV_SGX_COMMAND *psSGXCommand; -+#if defined(PDUMP) -+ IMG_VOID *pvDumpCommand; -+#endif -+ -+ psDevInfo = (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ psKernelCCB = psDevInfo->psKernelCCBInfo; -+ -+ { -+ if (ui32CallerID == ISR_ID || gbPowerUpPDumped) { -+ PDUMPSUSPEND(); -+ } -+ -+ eError = -+ PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId. -+ ui32DeviceIndex, -+ PVRSRV_POWER_STATE_D0, -+ ui32CallerID, IMG_TRUE); -+ -+ if (ui32CallerID == ISR_ID || gbPowerUpPDumped) { -+ PDUMPRESUME(); -+ } else if (eError == PVRSRV_OK) { -+ gbPowerUpPDumped = IMG_TRUE; -+ } -+ } -+ -+ if (eError == PVRSRV_OK) { -+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE; -+ } else { -+ if (eError == PVRSRV_ERROR_RETRY) { -+ if (ui32CallerID == ISR_ID) { -+ -+ psDeviceNode->bReProcessDeviceCommandComplete = -+ IMG_TRUE; -+ eError = PVRSRV_OK; -+ } else { -+ -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXScheduleCCBCommandKM failed to acquire lock - " -+ "ui32CallerID:%ld eError:%lu", ui32CallerID, -+ eError)); -+ } -+ -+ return eError; -+ } -+ -+ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB); -+ -+ if (!psSGXCommand) { -+ eError = PVRSRV_ERROR_TIMEOUT; -+ goto Exit; -+ } -+ -+ psCommandData->ui32Data[2] = psDevInfo->ui32CacheControl; -+ -+#if defined(PDUMP) -+ -+ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl; -+#endif -+ -+ psDevInfo->ui32CacheControl = 0; -+ -+ *psSGXCommand = *psCommandData; -+ -+ switch (eCommandType) { -+ case PVRSRV_SGX_COMMAND_EDM_KICK: -+ psSGXCommand->ui32ServiceAddress = psDevInfo->ui32TAKickAddress; -+ break; -+ case PVRSRV_SGX_COMMAND_VIDEO_KICK: -+ psSGXCommand->ui32ServiceAddress = -+ psDevInfo->ui32VideoHandlerAddress; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXScheduleCCBCommandKM: Unknown command type: %d", -+ eCommandType)); -+ eError = PVRSRV_ERROR_GENERIC; -+ goto Exit; -+ } -+ -+#if defined(PDUMP) -+ if (ui32CallerID != ISR_ID) { -+ -+ PDUMPCOMMENTWITHFLAGS(0, -+ "Poll for space in the Kernel CCB\r\n"); -+ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo, -+ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset), -+ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff, 0xff, -+ PDUMP_POLL_OPERATOR_NOTEQUAL, IMG_FALSE, IMG_FALSE, -+ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); -+ -+ PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB command\r\n"); -+ pvDumpCommand = -+ (IMG_VOID *) ((IMG_UINT8 *) psKernelCCB->psCCBMemInfo-> -+ pvLinAddrKM + -+ (*psKernelCCB->pui32WriteOffset * -+ sizeof(PVRSRV_SGX_COMMAND))); -+ -+ PDUMPMEM(pvDumpCommand, -+ psKernelCCB->psCCBMemInfo, -+ psKernelCCB->ui32CCBDumpWOff * -+ sizeof(PVRSRV_SGX_COMMAND), sizeof(PVRSRV_SGX_COMMAND), -+ 0, MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo)); -+ -+ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl, -+ psKernelCCB->psCCBMemInfo, -+ psKernelCCB->ui32CCBDumpWOff * -+ sizeof(PVRSRV_SGX_COMMAND) + -+ offsetof(PVRSRV_SGX_COMMAND, ui32Data[2]), -+ sizeof(IMG_UINT32), 0, -+ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo)); -+ -+ if (PDumpIsCaptureFrameKM()) { -+ -+ psDevInfo->sPDContext.ui32CacheControl = 0; -+ } -+ } -+#endif -+ -+ *psKernelCCB->pui32WriteOffset = -+ (*psKernelCCB->pui32WriteOffset + 1) & 255; -+ -+#if defined(PDUMP) -+ if (ui32CallerID != ISR_ID) { -+ if (PDumpIsCaptureFrameKM()) { -+ psKernelCCB->ui32CCBDumpWOff = -+ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF; -+ } -+ -+ PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB write offset\r\n"); -+ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff, -+ psKernelCCB->psCCBCtlMemInfo, -+ offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset), -+ sizeof(IMG_UINT32), -+ 0, MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); -+ PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB event kicker\r\n"); -+ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff, -+ psDevInfo->psKernelCCBEventKickerMemInfo, -+ 0, -+ sizeof(IMG_UINT32), -+ 0, -+ MAKEUNIQUETAG(psDevInfo-> -+ psKernelCCBEventKickerMemInfo)); -+ PDUMPCOMMENTWITHFLAGS(0, "Event kick\r\n"); -+ PDUMPREGWITHFLAGS(EUR_CR_EVENT_KICK, EUR_CR_EVENT_KICK_NOW_MASK, -+ 0); -+ } -+#endif -+ -+ *psDevInfo->pui32KernelCCBEventKicker = -+ (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF; -+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_KICK, -+ EUR_CR_EVENT_KICK_NOW_MASK); -+ -+ -+Exit: -+ PVRSRVPowerUnlock(ui32CallerID); -+ -+ if (ui32CallerID != ISR_ID) { -+ -+ SGXTestActivePowerEvent(psDeviceNode, ui32CallerID); -+ } -+ -+ return eError; -+} -+ -+IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_SGX_HOST_CTL *psHostCtl = -+ psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM; -+ IMG_UINT32 ui32PowManFlags; -+ PVRSRV_SGX_COMMAND sCommand = { 0 }; -+ -+ ui32PowManFlags = psHostCtl->ui32PowManFlags; -+ if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0) { -+ -+ return; -+ } -+ -+ sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD; -+ eError = -+ SGXScheduleCCBCommandKM(psDeviceNode, PVRSRV_SGX_COMMAND_EDM_KICK, -+ &sCommand, ISR_ID); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXScheduleProcessQueues failed to schedule CCB command: %lu", -+ eError)); -+ } -+} -+ -+#if defined (PDUMP) -+IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray, -+ IMG_UINT32 ui32BufferArrayLength, IMG_BOOL bDumpPolls) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < ui32BufferArrayLength; i++) { -+ PPVR3DIF4_KICKTA_DUMP_BUFFER psBuffer; -+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; -+ IMG_CHAR *pszName; -+ IMG_HANDLE hUniqueTag; -+ -+ psBuffer = &psBufferArray[i]; -+ pszName = psBuffer->pszName; -+ if (!pszName) { -+ pszName = "Nameless buffer"; -+ } -+ -+ hUniqueTag = -+ MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *) psBuffer-> -+ hKernelMemInfo); -+ psSyncInfo = -+ ((PVRSRV_KERNEL_MEM_INFO *) psBuffer->hKernelMemInfo)-> -+ psKernelSyncInfo; -+ -+ if (psBuffer->ui32Start <= psBuffer->ui32End) { -+ if (bDumpPolls) { -+ PDUMPCOMMENTWITHFLAGS(0, -+ "Wait for %s space\r\n", -+ pszName); -+ PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM, -+ offsetof(PVRSRV_SYNC_DATA, -+ ui32ReadOpsComplete), -+ psBuffer->ui32Start, -+ psBuffer->ui32SpaceUsed, -+ psBuffer->ui32BufferSize, 0, -+ MAKEUNIQUETAG(psSyncInfo-> -+ psSyncDataMemInfoKM)); -+ } -+ -+ PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName); -+ PDUMPMEM(NULL, -+ (PVRSRV_KERNEL_MEM_INFO *) psBuffer-> -+ hKernelMemInfo, psBuffer->ui32Start, -+ psBuffer->ui32End - psBuffer->ui32Start, 0, -+ hUniqueTag); -+ } else { -+ -+ if (bDumpPolls) { -+ PDUMPCOMMENTWITHFLAGS(0, -+ "Wait for %s space\r\n", -+ pszName); -+ PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM, -+ offsetof(PVRSRV_SYNC_DATA, -+ ui32ReadOpsComplete), -+ psBuffer->ui32Start, -+ psBuffer->ui32BackEndLength, -+ psBuffer->ui32BufferSize, 0, -+ MAKEUNIQUETAG(psSyncInfo-> -+ psSyncDataMemInfoKM)); -+ } -+ PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName); -+ PDUMPMEM(NULL, -+ (PVRSRV_KERNEL_MEM_INFO *) psBuffer-> -+ hKernelMemInfo, psBuffer->ui32Start, -+ psBuffer->ui32BackEndLength, 0, hUniqueTag); -+ -+ if (bDumpPolls) { -+ PDUMPMEMPOL(psSyncInfo->psSyncDataMemInfoKM, -+ offsetof(PVRSRV_SYNC_DATA, -+ ui32ReadOpsComplete), 0, -+ 0xFFFFFFFF, -+ PDUMP_POLL_OPERATOR_NOTEQUAL, -+ IMG_FALSE, IMG_FALSE, -+ MAKEUNIQUETAG(psSyncInfo-> -+ psSyncDataMemInfoKM)); -+ -+ PDUMPCOMMENTWITHFLAGS(0, -+ "Wait for %s space\r\n", -+ pszName); -+ PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM, -+ offsetof(PVRSRV_SYNC_DATA, -+ ui32ReadOpsComplete), 0, -+ psBuffer->ui32End, -+ psBuffer->ui32BufferSize, 0, -+ MAKEUNIQUETAG(psSyncInfo-> -+ psSyncDataMemInfoKM)); -+ } -+ PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName); -+ PDUMPMEM(NULL, -+ (PVRSRV_KERNEL_MEM_INFO *) psBuffer-> -+ hKernelMemInfo, 0, psBuffer->ui32End, 0, -+ hUniqueTag); -+ } -+ } -+} -+#endif -+ -+IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, -+ PVR3DIF4_INTERNAL_DEVINFO * -+ psSGXInternalDevInfo) -+{ -+ PVRSRV_SGXDEV_INFO *psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) ((PVRSRV_DEVICE_NODE *) hDevCookie)-> -+ pvDevice; -+ -+ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags; -+ psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL) psDevInfo->bForcePTOff; -+ -+ psSGXInternalDevInfo->hCtlKernelMemInfoHandle = -+ (IMG_HANDLE) psDevInfo->psKernelSGXHostCtlMemInfo; -+ -+ return PVRSRV_OK; -+} -+ -+static IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_DEV_VIRTADDR * psHWDataDevVAddr, -+ IMG_UINT32 ui32ResManRequestFlag) -+{ -+ PVRSRV_SGXDEV_INFO *psSGXDevInfo = -+ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = -+ psSGXDevInfo->psKernelSGXHostCtlMemInfo; -+ PVRSRV_SGX_HOST_CTL *psSGXHostCtl = -+ (PVRSRV_SGX_HOST_CTL *) psSGXHostCtlMemInfo->pvLinAddrKM; -+ IMG_UINT32 ui32PowManFlags; -+#if defined (PDUMP) -+ IMG_HANDLE hUniqueTag = MAKEUNIQUETAG(psSGXHostCtlMemInfo); -+#endif -+ -+ ui32PowManFlags = psSGXHostCtl->ui32PowManFlags; -+ if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0) { -+ -+ } else { -+ -+ if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PDCACHE) { -+ psSGXHostCtl->ui32ResManFlags |= -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD; -+ psSGXDevInfo->ui32CacheControl ^= -+ SGX_BIF_INVALIDATE_PDCACHE; -+ } -+ if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PTCACHE) { -+ psSGXHostCtl->ui32ResManFlags |= -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT; -+ psSGXDevInfo->ui32CacheControl ^= -+ SGX_BIF_INVALIDATE_PTCACHE; -+ } -+ -+ psSGXHostCtl->sResManCleanupData.uiAddr = -+ psHWDataDevVAddr->uiAddr; -+ -+ psSGXHostCtl->ui32ResManFlags |= ui32ResManRequestFlag; -+ -+ PDUMPCOMMENT -+ ("TA/3D CCB Control - Request clean-up event on uKernel..."); -+ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, -+ sResManCleanupData.uiAddr), -+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, -+ hUniqueTag); -+ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), -+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, -+ hUniqueTag); -+ -+ SGXScheduleProcessQueues(psDeviceNode); -+ -+ if (PollForValueKM -+ ((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32ResManFlags), -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE, -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE, -+ MAX_HW_TIME_US / WAIT_TRY_COUNT, -+ WAIT_TRY_COUNT) != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXCleanupRequest: Wait for uKernel to clean up render context failed")); -+ } -+ -+#ifdef PDUMP -+ -+ PDUMPCOMMENT -+ ("TA/3D CCB Control - Wait for clean-up request to complete..."); -+ PDUMPMEMPOL(psSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE, -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE, -+ PDUMP_POLL_OPERATOR_EQUAL, IMG_FALSE, IMG_FALSE, -+ hUniqueTag); -+#endif -+ -+ psSGXHostCtl->ui32ResManFlags &= ~(ui32ResManRequestFlag); -+ psSGXHostCtl->ui32ResManFlags &= -+ ~(PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE); -+ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, -+ offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), -+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, -+ hUniqueTag); -+ } -+} -+ -+typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_ { -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr; -+ IMG_HANDLE hBlockAlloc; -+ PRESMAN_ITEM psResItem; -+} SGX_HW_RENDER_CONTEXT_CLEANUP; -+ -+static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ SGXCleanupRequest(psCleanup->psDeviceNode, -+ &psCleanup->sHWRenderContextDevVAddr, -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), -+ psCleanup, psCleanup->hBlockAlloc); -+ -+ return PVRSRV_OK; -+} -+ -+typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_ { -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr; -+ IMG_HANDLE hBlockAlloc; -+ PRESMAN_ITEM psResItem; -+} SGX_HW_TRANSFER_CONTEXT_CLEANUP; -+ -+static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam, -+ IMG_UINT32 ui32Param) -+{ -+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = -+ (SGX_HW_TRANSFER_CONTEXT_CLEANUP *) pvParam; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32Param); -+ -+ SGXCleanupRequest(psCleanup->psDeviceNode, -+ &psCleanup->sHWTransferContextDevVAddr, -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST); -+ -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), -+ psCleanup, psCleanup->hBlockAlloc); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_EXPORT -+ IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode, -+ IMG_DEV_VIRTADDR * -+ psHWRenderContextDevVAddr, -+ PVRSRV_PER_PROCESS_DATA * psPerProc) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hBlockAlloc; -+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; -+ PRESMAN_ITEM psResItem; -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), -+ (IMG_VOID **) & psCleanup, &hBlockAlloc); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure")); -+ return IMG_NULL; -+ } -+ -+ psCleanup->hBlockAlloc = hBlockAlloc; -+ psCleanup->psDeviceNode = psDeviceNode; -+ psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr; -+ -+ psResItem = ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_HW_RENDER_CONTEXT, -+ (IMG_VOID *) psCleanup, -+ 0, &SGXCleanupHWRenderContextCallback); -+ -+ if (psResItem == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXRegisterHWRenderContextKM: ResManRegisterRes failed")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), psCleanup, -+ psCleanup->hBlockAlloc); -+ -+ return IMG_NULL; -+ } -+ -+ psCleanup->psResItem = psResItem; -+ -+ return (IMG_HANDLE) psCleanup; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext) -+{ -+ PVRSRV_ERROR eError; -+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; -+ -+ PVR_ASSERT(hHWRenderContext != IMG_NULL); -+ -+ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *) hHWRenderContext; -+ -+ eError = ResManFreeResByPtr(psCleanup->psResItem); -+ -+ return eError; -+} -+ -+IMG_EXPORT -+ IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode, -+ IMG_DEV_VIRTADDR * -+ psHWTransferContextDevVAddr, -+ PVRSRV_PER_PROCESS_DATA * -+ psPerProc) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hBlockAlloc; -+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; -+ PRESMAN_ITEM psResItem; -+ -+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), -+ (IMG_VOID **) & psCleanup, &hBlockAlloc); -+ -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure")); -+ return IMG_NULL; -+ } -+ -+ psCleanup->hBlockAlloc = hBlockAlloc; -+ psCleanup->psDeviceNode = psDeviceNode; -+ psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr; -+ -+ psResItem = ResManRegisterRes(psPerProc->hResManContext, -+ RESMAN_TYPE_HW_TRANSFER_CONTEXT, -+ psCleanup, -+ 0, &SGXCleanupHWTransferContextCallback); -+ -+ if (psResItem == IMG_NULL) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGXRegisterHWTransferContextKM: ResManRegisterRes failed")); -+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, -+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), psCleanup, -+ psCleanup->hBlockAlloc); -+ -+ return IMG_NULL; -+ } -+ -+ psCleanup->psResItem = psResItem; -+ -+ return (IMG_HANDLE) psCleanup; -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext) -+{ -+ PVRSRV_ERROR eError; -+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; -+ -+ PVR_ASSERT(hHWTransferContext != IMG_NULL); -+ -+ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *) hHWTransferContext; -+ -+ eError = ResManFreeResByPtr(psCleanup->psResItem); -+ -+ return eError; -+} -+ -+ -+static INLINE -+ IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO * psSyncInfo) -+{ -+ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData; -+ -+ return (IMG_BOOL) ((psSyncData->ui32ReadOpsComplete == -+ psSyncData->ui32ReadOpsPending) -+ && (psSyncData->ui32WriteOpsComplete == -+ psSyncData->ui32WriteOpsPending) -+ ); -+} -+ -+IMG_EXPORT -+ PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO * psDevInfo, -+ PVRSRV_KERNEL_SYNC_INFO * psSyncInfo, -+ IMG_BOOL bWaitForComplete) -+{ -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0; -+ -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ -+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start")); -+ -+ if (SGX2DQuerySyncOpsComplete(psSyncInfo)) { -+ -+ PVR_DPF((PVR_DBG_CALLTRACE, -+ "SGX2DQueryBlitsCompleteKM: No wait. Blits complete.")); -+ return PVRSRV_OK; -+ } -+ -+ if (!bWaitForComplete) { -+ -+ PVR_DPF((PVR_DBG_CALLTRACE, -+ "SGX2DQueryBlitsCompleteKM: No wait. Ops pending.")); -+ return PVRSRV_ERROR_CMD_NOT_PROCESSED; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling.")); -+ do { -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ -+ if (SGX2DQuerySyncOpsComplete(psSyncInfo)) { -+ -+ PVR_DPF((PVR_DBG_CALLTRACE, -+ "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete.")); -+ return PVRSRV_OK; -+ } -+ -+ if (bStart == IMG_FALSE) { -+ uiStart = OSClockus(); -+ bStart = IMG_TRUE; -+ } -+ -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ } while ((OSClockus() - uiStart) < MAX_HW_TIME_US); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "SGX2DQueryBlitsCompleteKM: Timed out. Ops pending.")); -+ -+#if defined(DEBUG) -+ { -+ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData; -+ -+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: %p, Syncdata: %p", psSyncInfo, psSyncData)); -+ -+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending)); -+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending)); -+ -+ } -+#endif -+ -+ return PVRSRV_ERROR_TIMEOUT; -+} -+ -+IMG_EXPORT -+ IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, -+ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr) -+{ -+ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL); -+ -+ SGXCleanupRequest((PVRSRV_DEVICE_NODE *) psDeviceNode, -+ &sHWRTDataSetDevVAddr, -+ PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxutils.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxutils.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgxutils.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgxutils.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,117 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "perproc.h" -+ -+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \ -+ (((ROff - WOff) + (CCBSize - 1)) & (CCBSize - 1)) -+ -+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \ -+ Off = ((Off + PacketSize) & (CCBSize - 1)) -+ -+#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \ -+ ((sizeof(type) <= (psCCBMemInfo)->ui32AllocSize) && \ -+ ((psCCBKick)->offset <= (psCCBMemInfo)->ui32AllocSize - sizeof(type))) -+ -+#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \ -+ ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \ -+ (psCCBKick)->offset)) -+ -+static INLINE IMG_UINT32 SGXCalcContextCCBParamSize(IMG_UINT32 ui32ParamSize, -+ IMG_UINT32 ui32AllocGran) -+{ -+ return (ui32ParamSize + (ui32AllocGran - 1)) & ~(ui32AllocGran - 1); -+} -+ -+static INLINE IMG_PVOID SGXAcquireCCB(PVRSRV_SGX_CCB * psCCB, -+ IMG_UINT32 ui32CmdSize) -+{ -+ IMG_BOOL bStart = IMG_FALSE; -+ IMG_UINT32 uiStart = 0; -+ -+ do { -+ if (GET_CCB_SPACE -+ (*psCCB->pui32WriteOffset, *psCCB->pui32ReadOffset, -+ psCCB->ui32Size) > ui32CmdSize) { -+ return (IMG_PVOID) ((IMG_UINT32) psCCB->psCCBMemInfo-> -+ pvLinAddrKM + -+ *psCCB->pui32WriteOffset); -+ } -+ -+ if (bStart == IMG_FALSE) { -+ bStart = IMG_TRUE; -+ uiStart = OSClockus(); -+ } -+ OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); -+ } while ((OSClockus() - uiStart) < MAX_HW_TIME_US); -+ -+ return IMG_NULL; -+} -+ -+#if defined (PDUMP) -+IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray, -+ IMG_UINT32 ui32BufferArrayLength, IMG_BOOL bDumpPolls); -+#endif -+ -+IMG_IMPORT -+ IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32CallerID); -+ -+IMG_IMPORT -+ PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE * psDeviceNode, -+ PVRSRV_SGX_COMMAND_TYPE eCommandType, -+ PVRSRV_SGX_COMMAND * psCommandData, -+ IMG_UINT32 ui32CallerID); -+ -+IMG_IMPORT IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE * psDeviceNode); -+ -+IMG_IMPORT IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE * psDeviceNode); -+ -+IMG_IMPORT -+ IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode, -+ IMG_DEV_VIRTADDR * -+ psHWRenderContextDevVAddr, -+ PVRSRV_PER_PROCESS_DATA * -+ psPerProc); -+ -+IMG_IMPORT -+ IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode, -+ IMG_DEV_VIRTADDR * -+ psHWTransferContextDevVAddr, -+ PVRSRV_PER_PROCESS_DATA * -+ psPerProc); -+ -+IMG_IMPORT -+ IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, -+ IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr); -+ -+IMG_IMPORT -+ PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext); -+ -+IMG_IMPORT -+ PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE -+ hHWTransferContext); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgx530defs.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgx530defs.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sgx530defs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sgx530defs.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,427 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _SGX530DEFS_KM_H_ -+#define _SGX530DEFS_KM_H_ -+ -+#define EUR_CR_CLKGATECTL 0x0000 -+#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003 -+#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0 -+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030 -+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4 -+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300 -+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8 -+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000 -+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12 -+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000 -+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16 -+#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000 -+#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20 -+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000 -+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 -+#define EUR_CR_CLKGATESTATUS 0x0004 -+#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001 -+#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0 -+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010 -+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4 -+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100 -+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8 -+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000 -+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12 -+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000 -+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16 -+#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000 -+#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20 -+#define EUR_CR_CLKGATECTLOVR 0x0008 -+#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003 -+#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0 -+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030 -+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4 -+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300 -+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8 -+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000 -+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12 -+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000 -+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16 -+#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000 -+#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20 -+#define EUR_CR_CORE_ID 0x0010 -+#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFF -+#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 -+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000 -+#define EUR_CR_CORE_ID_ID_SHIFT 16 -+#define EUR_CR_CORE_REVISION 0x0014 -+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FF -+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 -+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00 -+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 -+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000 -+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 -+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000 -+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 -+#define EUR_CR_DESIGNER_REV_FIELD1 0x0018 -+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFF -+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 -+#define EUR_CR_DESIGNER_REV_FIELD2 0x001C -+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFF -+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 -+#define EUR_CR_SOFT_RESET 0x0080 -+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001 -+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 -+#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002 -+#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1 -+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004 -+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 -+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008 -+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3 -+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010 -+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4 -+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020 -+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 -+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040 -+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6 -+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 -+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002 -+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 -+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001 -+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 -+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 -+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002 -+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 -+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001 -+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 -+#define EUR_CR_EVENT_STATUS2 0x0118 -+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002 -+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 -+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001 -+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 -+#define EUR_CR_EVENT_STATUS 0x012C -+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000 -+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 -+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000 -+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 -+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000 -+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 -+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000 -+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27 -+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000 -+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 -+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000 -+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 -+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000 -+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 -+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000 -+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 -+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000 -+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 -+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000 -+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 -+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000 -+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 -+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000 -+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 -+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000 -+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 -+#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000 -+#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 -+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000 -+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 -+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000 -+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 -+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000 -+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 -+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000 -+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 -+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000 -+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 -+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800 -+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 -+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400 -+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 -+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200 -+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 -+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100 -+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 -+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080 -+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 -+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040 -+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 -+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020 -+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 -+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010 -+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 -+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008 -+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 -+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004 -+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 -+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002 -+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 -+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001 -+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 -+#define EUR_CR_EVENT_HOST_ENABLE 0x0130 -+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000 -+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 -+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000 -+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 -+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000 -+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 -+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000 -+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27 -+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000 -+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 -+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000 -+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 -+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000 -+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 -+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000 -+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 -+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000 -+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 -+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000 -+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 -+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000 -+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 -+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000 -+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 -+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000 -+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 -+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000 -+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 -+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000 -+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 -+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000 -+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 -+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800 -+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 -+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400 -+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001 -+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 -+#define EUR_CR_EVENT_HOST_CLEAR 0x0134 -+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000 -+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 -+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000 -+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 -+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000 -+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 -+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000 -+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27 -+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000 -+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 -+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000 -+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 -+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000 -+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 -+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000 -+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 -+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000 -+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 -+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000 -+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 -+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000 -+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 -+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000 -+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 -+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000 -+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 -+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000 -+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 -+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000 -+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 -+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000 -+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 -+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800 -+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 -+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400 -+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001 -+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 -+#define EUR_CR_PDS 0x0ABC -+#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040 -+#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6 -+#define EUR_CR_PDS_EXEC_BASE 0x0AB8 -+#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000 -+#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 -+#define EUR_CR_EVENT_KICKER 0x0AC4 -+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0 -+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 -+#define EUR_CR_EVENT_KICK 0x0AC8 -+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001 -+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 -+#define EUR_CR_EVENT_TIMER 0x0ACC -+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000 -+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 -+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFF -+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 -+#define EUR_CR_PDS_INV0 0x0AD0 -+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001 -+#define EUR_CR_PDS_INV0_DSC_SHIFT 0 -+#define EUR_CR_PDS_INV1 0x0AD4 -+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001 -+#define EUR_CR_PDS_INV1_DSC_SHIFT 0 -+#define EUR_CR_PDS_INV2 0x0AD8 -+#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001 -+#define EUR_CR_PDS_INV2_DSC_SHIFT 0 -+#define EUR_CR_PDS_INV3 0x0ADC -+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001 -+#define EUR_CR_PDS_INV3_DSC_SHIFT 0 -+#define EUR_CR_PDS_INV_CSC 0x0AE0 -+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001 -+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 -+#define EUR_CR_PDS_PC_BASE 0x0B2C -+#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFF -+#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 -+#define EUR_CR_BIF_CTRL 0x0C00 -+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001 -+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 -+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002 -+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 -+#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004 -+#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 -+#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008 -+#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 -+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010 -+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000 -+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 -+#define EUR_CR_BIF_INT_STAT 0x0C04 -+#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFF -+#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 -+#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000 -+#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 -+#define EUR_CR_BIF_FAULT 0x0C08 -+#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000 -+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 -+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 -+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000 -+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 -+#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88 -+#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0x0FF00000 -+#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20 -+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 -+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000 -+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 -+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 -+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FF -+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 -+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC -+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000 -+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 -+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 -+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000 -+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 -+#define EUR_CR_2D_BLIT_STATUS 0x0E04 -+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFF -+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 -+#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000 -+#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 -+#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 -+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001 -+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 -+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000E -+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 -+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0 -+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 -+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000 -+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 -+#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 -+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFF -+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 -+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000 -+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 -+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000 -+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 -+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) -+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFF -+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 -+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000 -+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24 -+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 -+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/srvkm.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/srvkm.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/srvkm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/srvkm.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,46 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef SRVKM_H -+#define SRVKM_H -+ -+ -+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 -+ ui32PID); -+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 -+ ui32PID); -+ -+ IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State); -+ -+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE -+ hArena, -+ IMG_PBYTE -+ pbyBuffer, -+ IMG_UINT32 * -+ puiBufSize, -+ IMG_BOOL bSave); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/syscommon.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/syscommon.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/syscommon.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/syscommon.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,158 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _SYSCOMMON_H -+#define _SYSCOMMON_H -+ -+#include "sysconfig.h" -+#include "sysinfo.h" -+#include "servicesint.h" -+#include "queue.h" -+#include "power.h" -+#include "resman.h" -+#include "ra.h" -+#include "device.h" -+#include "buffer_manager.h" -+ -+ -+ typedef struct _SYS_DEVICE_ID_TAG { -+ IMG_UINT32 uiID; -+ IMG_BOOL bInUse; -+ -+ } SYS_DEVICE_ID; -+ -+#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4 -+ -+ typedef struct _SYS_DATA_TAG_ { -+ IMG_UINT32 ui32NumDevices; -+ SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT]; -+ PVRSRV_DEVICE_NODE *psDeviceNodeList; -+ PVRSRV_POWER_DEV *psPowerDeviceList; -+ PVRSRV_RESOURCE sPowerStateChangeResource; -+ PVR_POWER_STATE eCurrentPowerState; -+ PVR_POWER_STATE eFailedPowerState; -+ IMG_UINT32 ui32CurrentOSPowerState; -+ PVRSRV_QUEUE_INFO *psQueueList; -+ PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList; -+ IMG_PVOID pvEnvSpecificData; -+ IMG_PVOID pvSysSpecificData; -+ PVRSRV_RESOURCE sQProcessResource; -+ IMG_VOID *pvSOCRegsBase; -+ IMG_HANDLE hSOCTimerRegisterOSMemHandle; -+ IMG_UINT32 *pvSOCTimerRegisterKM; -+ IMG_VOID *pvSOCClockGateRegsBase; -+ IMG_UINT32 ui32SOCClockGateRegsSize; -+ PFN_CMD_PROC *ppfnCmdProcList[SYS_DEVICE_COUNT]; -+ -+ PCOMMAND_COMPLETE_DATA *ppsCmdCompleteData[SYS_DEVICE_COUNT]; -+ -+ IMG_BOOL bReProcessQueues; -+ -+ RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS]; -+ -+ IMG_CHAR *pszVersionString; -+ PVRSRV_EVENTOBJECT *psGlobalEventObject; -+ } SYS_DATA; -+ -+ PVRSRV_ERROR SysInitialise(IMG_VOID); -+ PVRSRV_ERROR SysFinalise(IMG_VOID); -+ -+ IMG_UINT32 GetCPUTranslatedAddress(IMG_VOID); -+ -+ PVRSRV_ERROR SysDeinitialise(SYS_DATA * psSysData); -+ -+ PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_VOID ** ppvDeviceMap); -+ -+ IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE * psDeviceNode); -+ IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE * psDeviceNode); -+ -+ IMG_UINT32 SysGetInterruptSource(SYS_DATA * psSysData, -+ PVRSRV_DEVICE_NODE * psDeviceNode); -+ -+ IMG_VOID SysClearInterrupts(SYS_DATA * psSysData, -+ IMG_UINT32 ui32ClearBits); -+ -+ PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex); -+ -+ PVRSRV_ERROR SysSystemPrePowerState(PVR_POWER_STATE eNewPowerState); -+ PVRSRV_ERROR SysSystemPostPowerState(PVR_POWER_STATE eNewPowerState); -+ PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE eNewPowerState, -+ PVR_POWER_STATE eCurrentPowerState); -+ PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE eNewPowerState, -+ PVR_POWER_STATE -+ eCurrentPowerState); -+ -+ PVRSRV_ERROR SysOEMFunction(IMG_UINT32 ui32ID, -+ IMG_VOID * pvIn, -+ IMG_UINT32 ulInSize, -+ IMG_VOID * pvOut, IMG_UINT32 ulOutSize); -+ -+ IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_PHYADDR cpu_paddr); -+ IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_SYS_PHYADDR SysPAddr); -+ IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_DEV_PHYADDR SysPAddr); -+ IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR SysPAddr); -+ IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr(IMG_CPU_PHYADDR cpu_paddr); -+ -+ extern SYS_DATA *gpsSysData; -+ -+ -+ static INLINE PVRSRV_ERROR SysAcquireData(SYS_DATA ** ppsSysData) { -+ -+ *ppsSysData = gpsSysData; -+ -+ if (!gpsSysData) { -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ return PVRSRV_OK; -+ } -+ -+ static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA * psSysData) { -+ PVRSRV_ERROR eError; -+ -+ eError = PVRSRVInit(psSysData); -+ -+ return eError; -+ } -+ -+ static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA * psSysData) { -+ -+ PVRSRVDeInit(psSysData); -+ -+ OSDestroyResource(&psSysData->sPowerStateChangeResource); -+ } -+ -+#define SysReadHWReg(p, o) OSReadHWReg(p, o) -+#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v) -+ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysconfig.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysconfig.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysconfig.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysconfig.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,687 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "services_headers.h" -+#include "kerneldisplay.h" -+#include "oemfuncs.h" -+#include "sgxinfo.h" -+#include "pdump_km.h" -+#include "sgxinfokm.h" -+#include "syslocal.h" -+#include "sysconfig.h" -+ -+SYS_DATA *gpsSysData = (SYS_DATA *) IMG_NULL; -+SYS_DATA gsSysData; -+ -+static SYS_SPECIFIC_DATA gsSysSpecificData; -+SYS_SPECIFIC_DATA *gpsSysSpecificData; -+ -+static IMG_UINT32 gui32SGXDeviceID; -+static SGX_DEVICE_MAP gsSGXDeviceMap; -+static PVRSRV_DEVICE_NODE *gpsSGXDevNode; -+ -+#define DEVICE_SGX_INTERRUPT (1 << 0) -+ -+ -+IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl, -+ IMG_BYTE * pInBuf, -+ IMG_UINT32 InBufLen, -+ IMG_BYTE * pOutBuf, -+ IMG_UINT32 OutBufLen, -+ IMG_UINT32 * pdwBytesTransferred); -+ -+static PVRSRV_ERROR SysLocateDevices(SYS_DATA * psSysData) -+{ -+ -+ PVR_UNREFERENCED_PARAMETER(psSysData); -+ -+ gsSGXDeviceMap.ui32Flags = 0x0; -+ -+ -+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = -+ SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE; -+ gsSGXDeviceMap.sRegsCpuPBase = -+ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); -+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE; -+ -+ gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ; -+ -+ -+ return PVRSRV_OK; -+} -+ -+IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion) -+{ -+ static IMG_CHAR aszVersionString[100]; -+ SYS_DATA *psSysData; -+ IMG_UINT32 ui32SGXRevision; -+ IMG_INT32 i32Count; -+ IMG_VOID *pvRegsLinAddr; -+ -+ pvRegsLinAddr = OSMapPhysToLin(sRegRegion, -+ SYS_OMAP3430_SGX_REGS_SIZE, -+ PVRSRV_HAP_UNCACHED | -+ PVRSRV_HAP_KERNEL_ONLY, IMG_NULL); -+ if (!pvRegsLinAddr) { -+ return IMG_NULL; -+ } -+ -+ ui32SGXRevision = OSReadHWReg((IMG_PVOID) ((IMG_PBYTE) pvRegsLinAddr), -+ EUR_CR_CORE_REVISION); -+ -+ if (SysAcquireData(&psSysData) != PVRSRV_OK) { -+ return IMG_NULL; -+ } -+ -+ i32Count = OSSNPrintf(aszVersionString, 100, -+ "SGX revision = %u.%u.%u", -+ (unsigned -+ int)((ui32SGXRevision & -+ EUR_CR_CORE_REVISION_MAJOR_MASK) -+ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT), -+ (unsigned -+ int)((ui32SGXRevision & -+ EUR_CR_CORE_REVISION_MINOR_MASK) -+ >> EUR_CR_CORE_REVISION_MINOR_SHIFT), -+ (unsigned -+ int)((ui32SGXRevision & -+ EUR_CR_CORE_REVISION_MAINTENANCE_MASK) -+ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT) -+ ); -+ -+ OSUnMapPhysToLin(pvRegsLinAddr, -+ SYS_OMAP3430_SGX_REGS_SIZE, -+ PVRSRV_HAP_UNCACHED | PVRSRV_HAP_KERNEL_ONLY, -+ IMG_NULL); -+ -+ if (i32Count == -1) { -+ return IMG_NULL; -+ } -+ -+ return aszVersionString; -+} -+ -+PVRSRV_ERROR SysInitialise(IMG_VOID) -+{ -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_CPU_PHYADDR TimerRegPhysBase; -+ -+ gpsSysData = &gsSysData; -+ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA)); -+ -+ gpsSysSpecificData = &gsSysSpecificData; -+ OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA)); -+ -+ gpsSysData->pvSysSpecificData = gpsSysSpecificData; -+ -+ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to setup env structure")); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_ENVDATA); -+ -+ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT; -+ -+ for (i = 0; i < SYS_DEVICE_COUNT; i++) { -+ gpsSysData->sDeviceID[i].uiID = i; -+ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE; -+ } -+ -+ gpsSysData->psDeviceNodeList = IMG_NULL; -+ gpsSysData->psQueueList = IMG_NULL; -+ -+ eError = SysInitialiseCommon(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed in SysInitialiseCommon")); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ -+ TimerRegPhysBase.uiAddr = -+ SYS_OMAP3430_GP11TIMER_PHYS_BASE + SYS_OMAP3430_GPTIMER_REGS; -+ gpsSysData->pvSOCTimerRegisterKM = IMG_NULL; -+ gpsSysData->hSOCTimerRegisterOSMemHandle = 0; -+ OSReservePhys(TimerRegPhysBase, -+ 4, -+ PVRSRV_HAP_MULTI_PROCESS | PVRSRV_HAP_UNCACHED, -+ (IMG_VOID **) & gpsSysData->pvSOCTimerRegisterKM, -+ &gpsSysData->hSOCTimerRegisterOSMemHandle); -+ -+ -+ eError = SysLocateDevices(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to locate devices")); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV); -+ -+ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice, -+ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to register device!")); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_REGDEV); -+ -+ psDeviceNode = gpsSysData->psDeviceNodeList; -+ while (psDeviceNode) { -+ -+ switch (psDeviceNode->sDevId.eDeviceType) { -+ case PVRSRV_DEVICE_TYPE_SGX: -+ { -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; -+ -+ psDeviceNode->psLocalDevMemArena = IMG_NULL; -+ -+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; -+ psDeviceMemoryHeap = -+ psDevMemoryInfo->psDeviceMemoryHeap; -+ -+ for (i = 0; i < psDevMemoryInfo->ui32HeapCount; -+ i++) { -+ psDeviceMemoryHeap[i].ui32Attribs |= -+ PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG; -+ } -+ -+ gpsSGXDevNode = psDeviceNode; -+ gsSysSpecificData.psSGXDevNode = psDeviceNode; -+ -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to find SGX device node!")); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ PDUMPINIT(); -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT); -+ -+ eError = InitSystemClocks(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to init system clocks (%d)", -+ eError)); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ -+ eError = EnableSystemClocks(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to Enable system clocks (%d)", -+ eError)); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS); -+ -+ eError = OSInitPerf(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to init DVFS (%d)", eError)); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ eError = EnableSGXClocks(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to Enable SGX clocks (%d)", -+ eError)); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ -+ eError = PVRSRVInitialiseDevice(gui32SGXDeviceID); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to initialise device!")); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_INITDEV); -+ -+ -+ DisableSGXClocks(gpsSysData); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR SysFinalise(IMG_VOID) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ eError = EnableSGXClocks(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysInitialise: Failed to Enable SGX clocks (%d)", -+ eError)); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ -+ -+ eError = OSInstallMISR(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "SysFinalise: Failed to install MISR")); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_MISR); -+ -+ eError = -+ OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", -+ gpsSGXDevNode); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, "SysFinalise: Failed to install ISR")); -+ SysDeinitialise(gpsSysData); -+ gpsSysData = IMG_NULL; -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_LISR); -+ -+ gpsSysData->pszVersionString = -+ SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase); -+ if (!gpsSysData->pszVersionString) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysFinalise: Failed to create a system version string")); -+ } else { -+ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", -+ gpsSysData->pszVersionString)); -+ } -+ -+ -+ DisableSGXClocks(gpsSysData); -+ -+ gpsSysSpecificData->bSGXInitComplete = IMG_TRUE; -+ -+ return eError; -+} -+ -+PVRSRV_ERROR SysDeinitialise(SYS_DATA * psSysData) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psSysData); -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) { -+ eError = OSUninstallDeviceLISR(psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysDeinitialise: OSUninstallDeviceLISR failed")); -+ return eError; -+ } -+ } -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR)) { -+ eError = OSUninstallMISR(psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysDeinitialise: OSUninstallMISR failed")); -+ return eError; -+ } -+ } -+ -+ eError = OSCleanupPerf(psSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysDeinitialise: OSCleanupDvfs failed")); -+ return eError; -+ } -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV)) { -+ PVR_ASSERT(SYS_SPECIFIC_DATA_TEST -+ (gpsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)); -+ -+ eError = EnableSGXClocks(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysDeinitialise: EnableSGXClocks failed")); -+ return eError; -+ } -+ -+ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysDeinitialise: failed to de-init the device")); -+ return eError; -+ } -+ } -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)) { -+ DisableSystemClocks(gpsSysData); -+ } -+ -+ CleanupSystemClocks(gpsSysData); -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA)) { -+ eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysDeinitialise: failed to de-init env structure")); -+ return eError; -+ } -+ } -+ -+ if (gpsSysData->pvSOCTimerRegisterKM) { -+ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM, -+ 4, -+ PVRSRV_HAP_MULTI_PROCESS | PVRSRV_HAP_UNCACHED, -+ gpsSysData->hSOCTimerRegisterOSMemHandle); -+ } -+ -+ SysDeinitialiseCommon(gpsSysData); -+ -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT)) { -+ PDUMPDEINIT(); -+ } -+ -+ gpsSysSpecificData->ui32SysSpecificData = 0; -+ gpsSysSpecificData->bSGXInitComplete = IMG_FALSE; -+ -+ gpsSysData = IMG_NULL; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_VOID ** ppvDeviceMap) -+{ -+ -+ switch (eDeviceType) { -+ case PVRSRV_DEVICE_TYPE_SGX: -+ { -+ -+ *ppvDeviceMap = (IMG_VOID *) & gsSGXDeviceMap; -+ -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysGetDeviceMemoryMap: unsupported device type")); -+ } -+ } -+ return PVRSRV_OK; -+} -+ -+IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_CPU_PHYADDR CpuPAddr) -+{ -+ IMG_DEV_PHYADDR DevPAddr; -+ -+ PVR_UNREFERENCED_PARAMETER(eDeviceType); -+ -+ DevPAddr.uiAddr = CpuPAddr.uiAddr; -+ -+ return DevPAddr; -+} -+ -+IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR sys_paddr) -+{ -+ IMG_CPU_PHYADDR cpu_paddr; -+ -+ cpu_paddr.uiAddr = sys_paddr.uiAddr; -+ return cpu_paddr; -+} -+ -+IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr(IMG_CPU_PHYADDR cpu_paddr) -+{ -+ IMG_SYS_PHYADDR sys_paddr; -+ -+ sys_paddr.uiAddr = cpu_paddr.uiAddr; -+ return sys_paddr; -+} -+ -+IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_SYS_PHYADDR SysPAddr) -+{ -+ IMG_DEV_PHYADDR DevPAddr; -+ -+ PVR_UNREFERENCED_PARAMETER(eDeviceType); -+ -+ DevPAddr.uiAddr = SysPAddr.uiAddr; -+ -+ return DevPAddr; -+} -+ -+IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, -+ IMG_DEV_PHYADDR DevPAddr) -+{ -+ IMG_SYS_PHYADDR SysPAddr; -+ -+ PVR_UNREFERENCED_PARAMETER(eDeviceType); -+ -+ SysPAddr.uiAddr = DevPAddr.uiAddr; -+ -+ return SysPAddr; -+} -+ -+IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+IMG_UINT32 SysGetInterruptSource(SYS_DATA * psSysData, -+ PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSysData); -+ -+ return psDeviceNode->ui32SOCInterruptBit; -+} -+ -+IMG_VOID SysClearInterrupts(SYS_DATA * psSysData, IMG_UINT32 ui32ClearBits) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSysData); -+ PVR_UNREFERENCED_PARAMETER(ui32ClearBits); -+ -+ /* Flush posted write for the irq status to avoid spurious interrupts */ -+ OSReadHWReg(((PVRSRV_SGXDEV_INFO *) gpsSGXDevNode->pvDevice)-> -+ pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR); -+} -+ -+PVRSRV_ERROR SysSystemPrePowerState(PVR_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (eNewPowerState == PVRSRV_POWER_STATE_D3) { -+ PVR_TRACE(("SysSystemPrePowerState: Entering state D3")); -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) { -+ eError = OSUninstallDeviceLISR(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", -+ eError)); -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR); -+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_LISR); -+ } -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)) { -+ DisableSystemClocks(gpsSysData); -+ -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS); -+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS); -+ } -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR SysSystemPostPowerState(PVR_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (eNewPowerState == PVRSRV_POWER_STATE_D0) { -+ PVR_TRACE(("SysSystemPostPowerState: Entering state D0")); -+ -+ if (SYS_SPECIFIC_DATA_TEST -+ (&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS)) { -+ eError = EnableSystemClocks(gpsSysData); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysSystemPostPowerState: EnableSystemClocks failed (%d)", -+ eError)); -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS); -+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS); -+ } -+ if (SYS_SPECIFIC_DATA_TEST -+ (&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR)) { -+ eError = -+ OSInstallDeviceLISR(gpsSysData, -+ gsSGXDeviceMap.ui32IRQ, -+ "SGX ISR", gpsSGXDevNode); -+ if (eError != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", -+ eError)); -+ return eError; -+ } -+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_ENABLE_LISR); -+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, -+ SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR); -+ } -+ } -+ return eError; -+} -+ -+PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE eNewPowerState, -+ PVR_POWER_STATE eCurrentPowerState) -+{ -+ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState); -+ -+ if (ui32DeviceIndex != gui32SGXDeviceID) { -+ return PVRSRV_OK; -+ } -+ if (eNewPowerState == PVRSRV_POWER_STATE_D3) { -+ PVR_TRACE(("SysDevicePrePowerState: SGX Entering state D3")); -+ DisableSGXClocks(gpsSysData); -+ } -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex, -+ PVR_POWER_STATE eNewPowerState, -+ PVR_POWER_STATE eCurrentPowerState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(eNewPowerState); -+ -+ if (ui32DeviceIndex != gui32SGXDeviceID) { -+ return eError; -+ } -+ if (eCurrentPowerState == PVRSRV_POWER_STATE_D3) { -+ PVR_TRACE(("SysDevicePostPowerState: SGX Leaving state D3")); -+ eError = EnableSGXClocks(gpsSysData); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR SysOEMFunction(IMG_UINT32 ui32ID, -+ IMG_VOID * pvIn, -+ IMG_UINT32 ulInSize, -+ IMG_VOID * pvOut, IMG_UINT32 ulOutSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32ID); -+ PVR_UNREFERENCED_PARAMETER(pvIn); -+ PVR_UNREFERENCED_PARAMETER(ulInSize); -+ PVR_UNREFERENCED_PARAMETER(pvOut); -+ PVR_UNREFERENCED_PARAMETER(ulOutSize); -+ -+ if ((ui32ID == OEM_GET_EXT_FUNCS) && -+ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE))) { -+ -+ PVRSRV_DC_OEM_JTABLE *psOEMJTable = -+ (PVRSRV_DC_OEM_JTABLE *) pvOut; -+ psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM; -+ return PVRSRV_OK; -+ } -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysconfig.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysconfig.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysconfig.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysconfig.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,53 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__SOCCONFIG_H__) -+#define __SOCCONFIG_H__ -+ -+#include "syscommon.h" -+ -+#define VS_PRODUCT_NAME "OMAP3430" -+ -+#define SYS_SGX_CLOCK_SPEED 110666666 -+#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100) -+#define SYS_SGX_PDS_TIMER_FREQ (1000) -+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (100) -+ -+#define SYS_OMAP3430_VDD2_OPP3_SGX_CLOCK_SPEED SYS_SGX_CLOCK_SPEED -+#define SYS_OMAP3430_VDD2_OPP2_SGX_CLOCK_SPEED (SYS_SGX_CLOCK_SPEED / 2) -+ -+#define SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE 0x50000000 -+#define SYS_OMAP3430_SGX_REGS_SIZE 0x4000 -+ -+#define SYS_OMAP3430_SGX_IRQ 21 -+ -+#define SYS_OMAP3430_GP11TIMER_PHYS_BASE 0x48088000 -+#define SYS_OMAP3430_GPTIMER_ENABLE 0x24 -+#define SYS_OMAP3430_GPTIMER_REGS 0x28 -+#define SYS_OMAP3430_GPTIMER_TSICR 0x40 -+#define SYS_OMAP3430_GPTIMER_SIZE 1024 -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysinfo.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysinfo.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysinfo.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysinfo.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,95 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__SYSINFO_H__) -+#define __SYSINFO_H__ -+ -+#define MAX_HW_TIME_US (500000) -+#define WAIT_TRY_COUNT (10000) -+ -+typedef enum _SYS_DEVICE_TYPE_ { -+ SYS_DEVICE_SGX = 0, -+ -+ SYS_DEVICE_FORCE_I16 = 0x7fff -+} SYS_DEVICE_TYPE; -+ -+#define SYS_DEVICE_COUNT 3 -+ -+#define PRM_REG32(offset) (offset) -+#define CM_REG32(offset) (offset) -+ -+#define CM_FCLKEN_SGX CM_REG32(0xB00) -+#define CM_FCLKEN_SGX_EN_3D 0x00000002 -+ -+#define CM_ICLKEN_SGX CM_REG32(0xB10) -+#define CM_ICLKEN_SGX_EN_SGX 0x00000001 -+ -+#define CM_IDLEST_SGX CM_REG32(0xB20) -+#define CM_IDLEST_SGX_ST_SGX 0x00000001 -+ -+#define CM_CLKSEL_SGX CM_REG32(0xB40) -+#define CM_CLKSEL_SGX_MASK 0x0000000f -+#define CM_CLKSEL_SGX_L3DIV3 0x00000000 -+#define CM_CLKSEL_SGX_L3DIV4 0x00000001 -+#define CM_CLKSEL_SGX_L3DIV6 0x00000002 -+#define CM_CLKSEL_SGX_96M 0x00000003 -+ -+#define CM_SLEEPDEP_SGX CM_REG32(0xB44) -+#define CM_CLKSTCTRL_SGX CM_REG32(0xB48) -+#define CM_CLKSTCTRL_SGX_AUTOSTATE 0x00008001 -+ -+#define CM_CLKSTST_SGX CM_REG32(0xB4C) -+#define CM_CLKSTST_SGX_STATUS_VALID 0x00000001 -+ -+#define RM_RSTST_SGX PRM_REG32(0xB58) -+#define RM_RSTST_SGX_RST_MASK 0x0000000F -+#define RM_RSTST_SGX_COREDOMAINWKUP_RST 0x00000008 -+#define RM_RSTST_SGX_DOMAINWKUP_RST 0x00000004 -+#define RM_RSTST_SGX_GLOBALWARM_RST 0x00000002 -+#define RM_RSTST_SGX_GLOBALCOLD_RST 0x00000001 -+ -+#define PM_WKDEP_SGX PRM_REG32(0xBC8) -+#define PM_WKDEP_SGX_EN_WAKEUP 0x00000010 -+#define PM_WKDEP_SGX_EN_MPU 0x00000002 -+#define PM_WKDEP_SGX_EN_CORE 0x00000001 -+ -+#define PM_PWSTCTRL_SGX PRM_REG32(0xBE0) -+#define PM_PWSTCTRL_SGX_POWERSTATE_MASK 0x00000003 -+#define PM_PWSTCTRL_SGX_OFF 0x00000000 -+#define PM_PWSTCTRL_SGX_RETENTION 0x00000001 -+#define PM_PWSTCTRL_SGX_ON 0x00000003 -+ -+#define PM_PWSTST_SGX PRM_REG32(0xBE4) -+#define PM_PWSTST_SGX_INTRANSITION 0x00100000 -+#define PM_PWSTST_SGX_CLKACTIVITY 0x00080000 -+#define PM_PWSTST_SGX_POWERSTATE_MASK 0x00000003 -+#define PM_PWSTST_SGX_OFF 0x00000003 -+#define PM_PWSTST_SGX_RETENTION 0x00000001 -+#define PM_PWSTST_SGX_ON 0x00000000 -+ -+#define PM_PREPWSTST_SGX PRM_REG32(0xBE8) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/syslocal.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/syslocal.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/syslocal.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/syslocal.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,84 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#if !defined(__SYSLOCAL_H__) -+#define __SYSLOCAL_H__ -+ -+#include -+#include -+#include -+/*#include */ -+ -+ -+ IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion); -+ -+ PVRSRV_ERROR InitSystemClocks(SYS_DATA * psSysData); -+ IMG_VOID CleanupSystemClocks(SYS_DATA * psSysData); -+ IMG_VOID DisableSystemClocks(SYS_DATA * psSysData); -+ PVRSRV_ERROR EnableSystemClocks(SYS_DATA * psSysData); -+ -+ IMG_VOID DisableSGXClocks(SYS_DATA * psSysData); -+ PVRSRV_ERROR EnableSGXClocks(SYS_DATA * psSysData); -+ -+#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS 0x00000001 -+#define SYS_SPECIFIC_DATA_ENABLE_LISR 0x00000002 -+#define SYS_SPECIFIC_DATA_ENABLE_MISR 0x00000004 -+#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA 0x00000008 -+#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV 0x00000010 -+#define SYS_SPECIFIC_DATA_ENABLE_REGDEV 0x00000020 -+#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT 0x00000040 -+#define SYS_SPECIFIC_DATA_ENABLE_INITDEV 0x00000080 -+#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV 0x00000100 -+ -+#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200 -+#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400 -+ -+#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((void)((psSysSpecData)->ui32SysSpecificData |= (flag))) -+ -+#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((void)((psSysSpecData)->ui32SysSpecificData &= ~(flag))) -+ -+#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0) -+ -+ typedef struct _SYS_SPECIFIC_DATA_TAG_ { -+ IMG_UINT32 ui32SysSpecificData; -+ PVRSRV_DEVICE_NODE *psSGXDevNode; -+ IMG_BOOL bSGXInitComplete; -+ IMG_BOOL bSGXClocksEnabled; -+ struct clk *psCORE_CK; -+ struct clk *psSGX_FCK; -+ struct clk *psSGX_ICK; -+ struct clk *psMPU_CK; -+#if defined(DEBUG) || defined(TIMING) -+ struct clk *psGPT11_FCK; -+ struct clk *psGPT11_ICK; -+ void __iomem *gpt_base; -+#endif -+ struct constraint_handle *pVdd2Handle; -+ } SYS_SPECIFIC_DATA; -+ -+ extern SYS_SPECIFIC_DATA *gpsSysSpecificData; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysutils_linux.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysutils_linux.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/sysutils_linux.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/sysutils_linux.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,653 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "sgxdefs.h" -+#include "services_headers.h" -+#include "sysinfo.h" -+#include "sgxapi_km.h" -+#include "sysconfig.h" -+#include "sgxinfokm.h" -+#include "syslocal.h" -+#include "env_data.h" -+ -+#define HZ_TO_MHZ(m) ((m) / 1000000) -+ -+static inline unsigned long scale_by_rate(unsigned long val, -+ unsigned long rate1, -+ unsigned long rate2) -+{ -+ if (rate1 >= rate2) { -+ return val * (rate1 / rate2); -+ } -+ -+ return val / (rate2 / rate1); -+} -+ -+static inline unsigned long scale_prop_to_SGX_clock(unsigned long val, -+ unsigned long rate) -+{ -+ return scale_by_rate(val, rate, SYS_SGX_CLOCK_SPEED); -+} -+ -+static inline unsigned long scale_inv_prop_to_SGX_clock(unsigned long val, -+ unsigned long rate) -+{ -+ return scale_by_rate(val, SYS_SGX_CLOCK_SPEED, rate); -+} -+ -+IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION * psTimingInfo) -+{ -+ unsigned long rate; -+ -+ PVR_ASSERT(gpsSysSpecificData->bSGXClocksEnabled); -+ -+ rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK); -+ PVR_ASSERT(rate != 0); -+ psTimingInfo->ui32CoreClockSpeed = rate; -+ psTimingInfo->ui32HWRecoveryFreq = -+ scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate); -+ psTimingInfo->ui32uKernelFreq = -+ scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate); -+ psTimingInfo->ui32ActivePowManLatencyms = -+ SYS_SGX_ACTIVE_POWER_LATENCY_MS; -+} -+ -+ -+static int vdd2_post_func(struct notifier_block *n, unsigned long event, -+ void *ptr) -+{ -+ PVR_UNREFERENCED_PARAMETER(n); -+ PVR_UNREFERENCED_PARAMETER(event); -+ PVR_UNREFERENCED_PARAMETER(ptr); -+ -+ if (gpsSysSpecificData->bSGXClocksEnabled -+ && gpsSysSpecificData->bSGXInitComplete) { -+#if defined(DEBUG) -+ unsigned long rate; -+ -+ rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK); -+ -+ PVR_ASSERT(rate != 0); -+ -+ PVR_TRACE(("%s: SGX clock rate: %dMHz", __FUNCTION__, -+ HZ_TO_MHZ(rate))); -+#endif -+ PVRSRVDevicePostClockSpeedChange(gpsSysSpecificData-> -+ psSGXDevNode->sDevId. -+ ui32DeviceIndex, IMG_TRUE, -+ IMG_NULL); -+ } -+ return 0; -+} -+ -+static int vdd2_pre_func(struct notifier_block *n, unsigned long event, -+ void *ptr) -+{ -+ PVR_UNREFERENCED_PARAMETER(n); -+ PVR_UNREFERENCED_PARAMETER(event); -+ PVR_UNREFERENCED_PARAMETER(ptr); -+ -+ if (gpsSysSpecificData->bSGXClocksEnabled -+ && gpsSysSpecificData->bSGXInitComplete) { -+ BUG_ON(gpsSysData->eCurrentPowerState > PVRSRV_POWER_STATE_D1); -+ PVRSRVDevicePreClockSpeedChange(gpsSysSpecificData-> -+ psSGXDevNode->sDevId. -+ ui32DeviceIndex, IMG_TRUE, -+ IMG_NULL); -+ } -+ -+ return 0; -+} -+ -+static int vdd2_pre_post_func(struct notifier_block *n, unsigned long event, -+ void *ptr) -+{ -+ struct clk_notifier_data *cnd; -+ -+ PVR_UNREFERENCED_PARAMETER(n); -+ -+ cnd = (struct clk_notifier_data *)ptr; -+ -+ PVR_TRACE(("vdd2_pre_post_func: old clock rate = %lu", cnd->old_rate)); -+ PVR_TRACE(("vdd2_pre_post_func: new clock rate = %lu", cnd->new_rate)); -+ -+ if (CLK_PRE_RATE_CHANGE == event) { -+ PVRSRVDvfsLock(); -+ PVR_TRACE(("vdd2_pre_post_func: CLK_PRE_RATE_CHANGE event")); -+ vdd2_pre_func(n, event, ptr); -+ } else if (CLK_POST_RATE_CHANGE == event) { -+ PVR_TRACE(("vdd2_pre_post_func: CLK_POST_RATE_CHANGE event")); -+ vdd2_post_func(n, event, ptr); -+ PVRSRVDvfsUnlock(); -+ } else if (CLK_ABORT_RATE_CHANGE == event) { -+ PVR_TRACE(("vdd2_pre_post_func: CLK_ABORT_RATE_CHANGE event")); -+ PVRSRVDvfsUnlock(); -+ } else { -+ printk("vdd2_pre_post_func: unexpected event (%lu)\n", event); -+ PVR_DPF((PVR_DBG_ERROR, -+ "vdd2_pre_post_func: unexpected event (%lu)", event)); -+ } -+ PVR_TRACE(("vdd2_pre_post_func end.")); -+ return 0; -+} -+ -+static struct notifier_block vdd2_pre_post = { -+ vdd2_pre_post_func, -+ NULL -+}; -+ -+static IMG_VOID RegisterConstraintNotifications(SYS_SPECIFIC_DATA * -+ psSysSpecData) -+{ -+ PVR_TRACE(("Registering constraint notifications")); -+ -+ clk_notifier_register(psSysSpecData->psSGX_FCK, &vdd2_pre_post); -+ PVR_TRACE(("VDD2 constraint notifications registered")); -+} -+ -+static IMG_VOID UnRegisterConstraintNotifications(SYS_SPECIFIC_DATA * -+ psSysSpecData) -+{ -+ PVR_TRACE(("Unregistering constraint notifications")); -+ -+ clk_notifier_unregister(psSysSpecData->psSGX_FCK, &vdd2_pre_post); -+} -+ -+static struct device sgx_dev; -+static int sgx_clock_enabled; -+ -+/* return value: current sgx load -+ * 0 - not busy -+ * 100 - busy -+ */ -+static unsigned int sgx_current_load(void) -+{ -+ PVRSRV_ERROR eError; -+ SYS_DATA *psSysData; -+ SYS_SPECIFIC_DATA *psSysSpecData; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ static unsigned int kicks_prev; -+ static long time_prev; -+ -+ eError = SysAcquireData(&psSysData); -+ if (eError != PVRSRV_OK) -+ return 0; -+ psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; -+ if ((!psSysSpecData) || (!psSysSpecData->bSGXClocksEnabled)) -+ return 0; -+ psDeviceNode = psSysData->psDeviceNodeList; -+ while (psDeviceNode) { -+ if ((psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_SGX) -+ && (psDeviceNode->pvDevice)) { -+ PVRSRV_SGXDEV_INFO *psDevInfo = -+ (PVRSRV_SGXDEV_INFO *) psDeviceNode->pvDevice; -+ unsigned int kicks = psDevInfo->ui32KickTACounter; -+ unsigned int load; -+ long time_elapsed; -+ -+ time_elapsed = jiffies - time_prev; -+ if (likely(time_elapsed)) -+ load = -+ 1000 * (kicks - kicks_prev) / time_elapsed; -+ else -+ load = 0; -+ kicks_prev = kicks; -+ time_prev += time_elapsed; -+ /* if the period between calls to this function was too long, -+ * then load stats are invalid -+ */ -+ if (time_elapsed > 5 * HZ) -+ return 0; -+ /*pr_err("SGX load %u\n", load); */ -+ -+ /* 'load' shows how many times sgx was kicked per 1000 jiffies -+ * 150 is arbitrarily chosen threshold. -+ * If the number of kicks is below threshold then sgx is doing -+ * some small jobs and we can keep the clock freq low. -+ */ -+ if (load < 150) -+ return 0; -+ else -+ return 100; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ return 0; -+} -+ -+static void sgx_lock_perf(struct work_struct *work) -+{ -+ int vdd1, vdd2; -+ static int bHigh; -+ int high; -+ unsigned int load; -+ struct delayed_work *d_work = -+ container_of(work, struct delayed_work, work); -+ ENV_DATA *psEnvData = container_of(d_work, ENV_DATA, sPerfWork); -+ -+ load = sgx_current_load(); -+ if (load) { -+ vdd1 = 500000000; -+ vdd2 = 400000; -+ high = 1; -+ } else { -+ vdd1 = 0; -+ vdd2 = 0; -+ high = 0; -+ } -+ if (high != bHigh) { -+ omap_pm_set_min_bus_tput(&sgx_dev, OCP_INITIATOR_AGENT, vdd2); -+ omap_pm_set_min_mpu_freq(&sgx_dev, vdd1); -+ bHigh = high; -+ } -+ if (sgx_clock_enabled || load) -+ queue_delayed_work(psEnvData->psPerfWorkqueue, -+ &psEnvData->sPerfWork, HZ / 5); -+} -+ -+static void sgx_need_perf(SYS_DATA * psSysData, int ena) -+{ -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ sgx_clock_enabled = ena; -+ cancel_delayed_work(&psEnvData->sPerfWork); -+ queue_delayed_work(psEnvData->psPerfWorkqueue, &psEnvData->sPerfWork, -+ 0); -+} -+ -+PVRSRV_ERROR OSInitPerf(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (psEnvData->psPerfWorkqueue) { -+ PVR_DPF((PVR_DBG_ERROR, "OSInitPerf: already inited")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Initing DVFS %x", pvSysData)); -+ -+ psEnvData->psPerfWorkqueue = create_singlethread_workqueue("sgx_perf"); -+ INIT_DELAYED_WORK(&psEnvData->sPerfWork, sgx_lock_perf); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSCleanupPerf(IMG_VOID * pvSysData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) pvSysData; -+ ENV_DATA *psEnvData = (ENV_DATA *) psSysData->pvEnvSpecificData; -+ -+ if (!psEnvData->psPerfWorkqueue) { -+ PVR_DPF((PVR_DBG_ERROR, "OSCleanupPerf: not inited")); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ PVR_TRACE(("Cleaning up DVFS")); -+ -+ flush_workqueue(psEnvData->psPerfWorkqueue); -+ destroy_workqueue(psEnvData->psPerfWorkqueue); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR EnableSGXClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = -+ (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; -+#if defined(DEBUG) -+ unsigned long rate; -+#endif -+ int res; -+ -+ if (psSysSpecData->bSGXClocksEnabled) { -+ return PVRSRV_OK; -+ } -+ -+ PVR_TRACE(("EnableSGXClocks: Enabling SGX Clocks")); -+ -+#if defined(DEBUG) -+ rate = clk_get_rate(psSysSpecData->psMPU_CK); -+ PVR_TRACE(("CPU Clock is %dMhz", HZ_TO_MHZ(rate))); -+#endif -+ -+ res = clk_enable(psSysSpecData->psSGX_FCK); -+ if (res < 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", -+ res)); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ res = clk_enable(psSysSpecData->psSGX_ICK); -+ if (res < 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", -+ res)); -+ -+ clk_disable(psSysSpecData->psSGX_FCK); -+ return PVRSRV_ERROR_GENERIC; -+ } -+ -+ psSysSpecData->bSGXClocksEnabled = IMG_TRUE; -+ sgx_need_perf(psSysData, 1); -+ return PVRSRV_OK; -+} -+ -+IMG_VOID DisableSGXClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = -+ (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; -+ -+ if (!psSysSpecData->bSGXClocksEnabled) { -+ return; -+ } -+ -+ PVR_TRACE(("DisableSGXClocks: Disabling SGX Clocks")); -+ -+ if (psSysSpecData->psSGX_ICK) { -+ clk_disable(psSysSpecData->psSGX_ICK); -+ } -+ -+ if (psSysSpecData->psSGX_FCK) { -+ clk_disable(psSysSpecData->psSGX_FCK); -+ } -+ -+ psSysSpecData->bSGXClocksEnabled = IMG_FALSE; -+ sgx_need_perf(psSysData, 0); -+} -+ -+static PVRSRV_ERROR InitSgxClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ struct clk *psCLK; -+ struct clk *core_ck = NULL; -+ -+ psCLK = clk_get(NULL, "sgx_fck"); -+ if (IS_ERR(psCLK)) -+ goto err0; -+ psSysSpecData->psSGX_FCK = psCLK; -+ -+ psCLK = clk_get(NULL, "sgx_ick"); -+ if (IS_ERR(psCLK)) -+ goto err1; -+ psSysSpecData->psSGX_ICK = psCLK; -+ -+ core_ck = clk_get(NULL, "core_ck"); -+ if (IS_ERR(core_ck)) -+ goto err2; -+ if (clk_set_parent(psSysSpecData->psSGX_FCK, core_ck) < 0) { -+ clk_put(core_ck); -+ goto err2; -+ } -+ clk_put(core_ck); -+ -+ RegisterConstraintNotifications(psSysSpecData); -+ -+ return PVRSRV_OK; -+ -+err2: -+ clk_put(psSysSpecData->psSGX_ICK); -+err1: -+ clk_put(psSysSpecData->psSGX_FCK); -+err0: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: couldn't init clocks fck %p ick %p core %p", __func__, -+ psSysSpecData->psSGX_FCK, psSysSpecData->psSGX_ICK, core_ck)); -+ psSysSpecData->psSGX_FCK = NULL; -+ psSysSpecData->psSGX_ICK = NULL; -+ -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+static void CleanupSgxClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ -+ UnRegisterConstraintNotifications(psSysSpecData); -+ -+ if (psSysSpecData->psSGX_ICK) { -+ clk_put(psSysSpecData->psSGX_ICK); -+ psSysSpecData->psSGX_ICK = NULL; -+ } -+ -+ if (psSysSpecData->psSGX_FCK) { -+ clk_put(psSysSpecData->psSGX_FCK); -+ psSysSpecData->psSGX_FCK = NULL; -+ } -+} -+ -+#if defined(DEBUG) || defined(TIMING) -+static u32 inline gpt_read_reg(SYS_DATA * psSysData, u32 reg) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ -+ return __raw_readl(psSysSpecData->gpt_base + reg); -+} -+ -+static void inline gpt_write_reg(SYS_DATA * psSysData, u32 reg, u32 val) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ -+ __raw_writel(val, psSysSpecData->gpt_base + reg); -+} -+ -+static PVRSRV_ERROR InitDebugClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ struct clk *psCLK; -+ struct clk *sys_ck = NULL; -+ u32 rate; -+ -+ psCLK = clk_get(NULL, "mpu_ck"); -+ if (IS_ERR(psCLK)) -+ goto err0; -+ psSysSpecData->psMPU_CK = psCLK; -+ -+ psCLK = clk_get(NULL, "gpt11_fck"); -+ if (IS_ERR(psCLK)) -+ goto err1; -+ psSysSpecData->psGPT11_FCK = psCLK; -+ -+ psCLK = clk_get(NULL, "gpt11_ick"); -+ if (IS_ERR(psCLK)) -+ goto err2; -+ psSysSpecData->psGPT11_ICK = psCLK; -+ -+ sys_ck = clk_get(NULL, "sys_ck"); -+ if (IS_ERR(sys_ck)) -+ goto err3; -+ if (clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck) { -+ if (clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck) < 0) { -+ clk_put(sys_ck); -+ goto err3; -+ } -+ } -+ clk_put(sys_ck); -+ -+ PVR_TRACE(("GPTIMER11 clock is %dMHz", -+ HZ_TO_MHZ(clk_get_rate(psSysSpecData->psGPT11_FCK)))); -+ -+ psSysSpecData->gpt_base = ioremap(SYS_OMAP3430_GP11TIMER_PHYS_BASE, -+ SYS_OMAP3430_GPTIMER_SIZE); -+ if (!psSysSpecData->gpt_base) -+ goto err3; -+ -+ clk_enable(psSysSpecData->psGPT11_ICK); -+ clk_enable(psSysSpecData->psGPT11_FCK); -+ -+ rate = gpt_read_reg(psSysData, SYS_OMAP3430_GPTIMER_TSICR); -+ if (!(rate & 4)) { -+ PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)")); -+ gpt_write_reg(psSysData, SYS_OMAP3430_GPTIMER_TSICR, rate | 4); -+ } -+ -+ clk_disable(psSysSpecData->psGPT11_FCK); -+ clk_disable(psSysSpecData->psGPT11_ICK); -+ -+ return PVRSRV_OK; -+ -+err3: -+ clk_put(psSysSpecData->psGPT11_ICK); -+err2: -+ clk_put(psSysSpecData->psGPT11_FCK); -+err1: -+ clk_put(psSysSpecData->psMPU_CK); -+err0: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: couldn't init clocks: mpu %p sys %p fck %p ick %p", -+ __func__, psSysSpecData->psMPU_CK, sys_ck, -+ psSysSpecData->psGPT11_FCK, psSysSpecData->psGPT11_ICK)); -+ -+ psSysSpecData->psMPU_CK = NULL; -+ psSysSpecData->psGPT11_FCK = NULL; -+ psSysSpecData->psGPT11_ICK = NULL; -+ -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+static void CleanupDebugClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ -+ if (psSysSpecData->psMPU_CK) { -+ clk_put(psSysSpecData->psMPU_CK); -+ psSysSpecData->psMPU_CK = NULL; -+ } -+ if (psSysSpecData->psGPT11_FCK) { -+ clk_put(psSysSpecData->psGPT11_FCK); -+ psSysSpecData->psGPT11_FCK = NULL; -+ } -+ if (psSysSpecData->psGPT11_ICK) { -+ clk_put(psSysSpecData->psGPT11_ICK); -+ psSysSpecData->psGPT11_ICK = NULL; -+ } -+} -+ -+static PVRSRV_ERROR EnableDebugClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ -+ if (clk_enable(psSysSpecData->psGPT11_FCK) < 0) -+ goto err0; -+ -+ if (clk_enable(psSysSpecData->psGPT11_ICK) < 0) -+ goto err1; -+ -+ gpt_write_reg(psSysData, SYS_OMAP3430_GPTIMER_ENABLE, 3); -+ -+ return PVRSRV_OK; -+ -+err1: -+ clk_disable(psSysSpecData->psGPT11_FCK); -+err0: -+ PVR_DPF((PVR_DBG_ERROR, "%s: can't enable clocks", __func__)); -+ -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+static inline void DisableDebugClocks(SYS_DATA * psSysData) -+{ -+ SYS_SPECIFIC_DATA *psSysSpecData = psSysData->pvSysSpecificData; -+ -+ gpt_write_reg(psSysData, SYS_OMAP3430_GPTIMER_ENABLE, 0); -+ -+ clk_disable(psSysSpecData->psGPT11_ICK); -+ clk_disable(psSysSpecData->psGPT11_FCK); -+} -+ -+#else -+ -+PVRSRV_ERROR inline InitDebugClocks(SYS_DATA * psSysData) -+{ -+ return PVRSRV_OK; -+} -+ -+static void inline CleanupDebugClocks(SYS_DATA * psSysData) -+{ -+} -+ -+static inline PVRSRV_ERROR EnableDebugClocks(SYS_DATA * psSysData) -+{ -+ return PVRSRV_OK; -+} -+ -+static inline void DisableDebugClocks(SYS_DATA * psSysData) -+{ -+} -+#endif -+ -+PVRSRV_ERROR InitSystemClocks(SYS_DATA * psSysData) -+{ -+ if (InitSgxClocks(psSysData) != PVRSRV_OK) -+ goto err0; -+ -+ if (InitDebugClocks(psSysData) != PVRSRV_OK) -+ goto err1; -+ -+ return PVRSRV_OK; -+ -+err1: -+ CleanupSgxClocks(psSysData); -+err0: -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+void CleanupSystemClocks(SYS_DATA * psSysData) -+{ -+ CleanupDebugClocks(psSysData); -+ CleanupSgxClocks(psSysData); -+} -+ -+PVRSRV_ERROR EnableSystemClocks(SYS_DATA * psSysData) -+{ -+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks")); -+ -+ -+ if (EnableDebugClocks(psSysData) != PVRSRV_OK) -+ goto err1; -+ -+ return PVRSRV_OK; -+ -+err1: -+ return PVRSRV_ERROR_GENERIC; -+} -+ -+IMG_VOID DisableSystemClocks(SYS_DATA * psSysData) -+{ -+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks")); -+ -+ DisableSGXClocks(psSysData); -+ DisableDebugClocks(psSysData); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/dbgdriv.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/dbgdriv.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/dbgdriv.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/dbgdriv.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,1701 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+ -+#include "img_types.h" -+#include "pvr_debug.h" -+#include "dbgdrvif.h" -+#include "dbgdriv.h" -+#include "hotkey.h" -+#include "hostfunc.h" -+ -+#define LAST_FRAME_BUF_SIZE 1024 -+ -+typedef struct _DBG_LASTFRAME_BUFFER_ { -+ PDBG_STREAM psStream; -+ IMG_UINT8 ui8Buffer[LAST_FRAME_BUF_SIZE]; -+ IMG_UINT32 ui32BufLen; -+ struct _DBG_LASTFRAME_BUFFER_ *psNext; -+} DBG_LASTFRAME_BUFFER, *PDBG_LASTFRAME_BUFFER; -+ -+static PDBG_STREAM g_psStreamList = 0; -+static PDBG_LASTFRAME_BUFFER g_psLFBufferList; -+ -+static IMG_UINT32 g_ui32LOff = 0; -+static IMG_UINT32 g_ui32Line = 0; -+static IMG_UINT32 g_ui32MonoLines = 25; -+ -+static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE; -+static IMG_UINT32 g_ui32HotkeyMiddumpStart = 0xffffffff; -+static IMG_UINT32 g_ui32HotkeyMiddumpEnd = 0xffffffff; -+ -+IMG_VOID *g_pvAPIMutex = IMG_NULL; -+ -+extern IMG_UINT32 g_ui32HotKeyFrame; -+extern IMG_BOOL g_bHotKeyPressed; -+extern IMG_BOOL g_bHotKeyRegistered; -+ -+IMG_BOOL gbDumpThisFrame = IMG_FALSE; -+ -+IMG_UINT32 SpaceInStream(PDBG_STREAM psStream); -+IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize); -+PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream); -+ -+DBGKM_SERVICE_TABLE g_sDBGKMServices = { -+ sizeof(DBGKM_SERVICE_TABLE), -+ ExtDBGDrivCreateStream, -+ ExtDBGDrivDestroyStream, -+ ExtDBGDrivFindStream, -+ ExtDBGDrivWriteString, -+ ExtDBGDrivReadString, -+ ExtDBGDrivWrite, -+ ExtDBGDrivRead, -+ ExtDBGDrivSetCaptureMode, -+ ExtDBGDrivSetOutputMode, -+ ExtDBGDrivSetDebugLevel, -+ ExtDBGDrivSetFrame, -+ ExtDBGDrivGetFrame, -+ ExtDBGDrivOverrideMode, -+ ExtDBGDrivDefaultMode, -+ ExtDBGDrivWrite2, -+ ExtDBGDrivWriteStringCM, -+ ExtDBGDrivWriteCM, -+ ExtDBGDrivSetMarker, -+ ExtDBGDrivGetMarker, -+ ExtDBGDrivEndInitPhase, -+ ExtDBGDrivIsCaptureFrame, -+ ExtDBGDrivWriteLF, -+ ExtDBGDrivReadLF, -+ ExtDBGDrivGetStreamOffset, -+ ExtDBGDrivSetStreamOffset, -+ ExtDBGDrivIsLastCaptureFrame, -+}; -+ -+IMG_VOID *IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, -+ IMG_UINT32 ui32CapMode, -+ IMG_UINT32 ui32OutMode, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size) -+{ -+ IMG_VOID *pvRet; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ pvRet = -+ DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags, -+ ui32Size); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return pvRet; -+} -+ -+void IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivDestroyStream(psStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+IMG_VOID *IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, -+ IMG_BOOL bResetStream) -+{ -+ IMG_VOID *pvRet; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ pvRet = DBGDrivFindStream(pszName, bResetStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return pvRet; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivWriteString(psStream, pszString, ui32Level); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Limit) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivReadString(psStream, pszString, ui32Limit); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, -+ IMG_BOOL bReadInitBuffer, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = -+ DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+void IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32End, -+ IMG_UINT32 ui32SampleRate) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End, -+ ui32SampleRate); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+void IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutMode) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivSetOutputMode(psStream, ui32OutMode); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+void IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream, -+ IMG_UINT32 ui32DebugLevel) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivSetDebugLevel(psStream, ui32DebugLevel); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+void IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivSetFrame(psStream, ui32Frame); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivGetFrame(psStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivIsLastCaptureFrame(psStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, -+ IMG_BOOL bCheckPreviousFrame) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+void IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivOverrideMode(psStream, ui32Mode); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+void IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivDefaultMode(psStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivWriteStringCM(psStream, pszString, ui32Level); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = -+ DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Marker) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivSetMarker(psStream, ui32Marker); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream) -+{ -+ IMG_UINT32 ui32Marker; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Marker = DBGDrivGetMarker(psStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Marker; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level, -+ IMG_UINT32 ui32Flags) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = -+ DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level, -+ ui32Flags); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_VOID IMG_CALLCONV ExtDBGDrivEndInitPhase(PDBG_STREAM psStream) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivEndInitPhase(psStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return; -+} -+ -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream) -+{ -+ IMG_UINT32 ui32Ret; -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ ui32Ret = DBGDrivGetStreamOffset(psStream); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+ -+ return ui32Ret; -+} -+ -+IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, -+ IMG_UINT32 ui32StreamOffset) -+{ -+ -+ HostAquireMutex(g_pvAPIMutex); -+ -+ DBGDrivSetStreamOffset(psStream, ui32StreamOffset); -+ -+ HostReleaseMutex(g_pvAPIMutex); -+} -+ -+IMG_UINT32 AtoI(char *szIn) -+{ -+ IMG_UINT32 ui32Len = 0; -+ IMG_UINT32 ui32Value = 0; -+ IMG_UINT32 ui32Digit = 1; -+ IMG_UINT32 ui32Base = 10; -+ int iPos; -+ char bc; -+ -+ while (szIn[ui32Len] > 0) { -+ ui32Len++; -+ } -+ -+ if (ui32Len == 0) { -+ return (0); -+ } -+ -+ iPos = 0; -+ while (szIn[iPos] == '0') { -+ iPos++; -+ } -+ if (szIn[iPos] == '\0') { -+ return 0; -+ } -+ if (szIn[iPos] == 'x' || szIn[iPos] == 'X') { -+ ui32Base = 16; -+ szIn[iPos] = '0'; -+ } -+ -+ for (iPos = ui32Len - 1; iPos >= 0; iPos--) { -+ bc = szIn[iPos]; -+ -+ if ((bc >= 'a') && (bc <= 'f') && ui32Base == 16) { -+ bc -= 'a' - 0xa; -+ } else if ((bc >= 'A') && (bc <= 'F') && ui32Base == 16) { -+ bc -= 'A' - 0xa; -+ } else if ((bc >= '0') && (bc <= '9')) { -+ bc -= '0'; -+ } else -+ return (0); -+ -+ ui32Value += bc * ui32Digit; -+ -+ ui32Digit = ui32Digit * ui32Base; -+ } -+ return (ui32Value); -+} -+ -+IMG_BOOL StreamValid(PDBG_STREAM psStream) -+{ -+ PDBG_STREAM psThis; -+ -+ psThis = g_psStreamList; -+ -+ while (psThis) { -+ if (psStream && (psThis == psStream)) { -+ return (IMG_TRUE); -+ } else { -+ psThis = psThis->psNext; -+ } -+ } -+ -+ return (IMG_FALSE); -+} -+ -+void Write(PDBG_STREAM psStream, IMG_UINT8 * pui8Data, -+ IMG_UINT32 ui32InBuffSize) -+{ -+ -+ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size) { -+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr; -+ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1; -+ -+ HostMemCopy((IMG_VOID *) (psStream->ui32Base + -+ psStream->ui32WPtr), -+ (IMG_VOID *) pui8Data, ui32B1); -+ -+ HostMemCopy((IMG_VOID *) psStream->ui32Base, -+ (IMG_VOID *) ((IMG_UINT32) pui8Data + ui32B1), -+ ui32B2); -+ -+ psStream->ui32WPtr = ui32B2; -+ } else { -+ HostMemCopy((IMG_VOID *) (psStream->ui32Base + -+ psStream->ui32WPtr), -+ (IMG_VOID *) pui8Data, ui32InBuffSize); -+ -+ psStream->ui32WPtr += ui32InBuffSize; -+ -+ if (psStream->ui32WPtr == psStream->ui32Size) { -+ psStream->ui32WPtr = 0; -+ } -+ } -+ psStream->ui32DataWritten += ui32InBuffSize; -+} -+ -+void MonoOut(IMG_CHAR * pszString, IMG_BOOL bNewLine) -+{ -+ IMG_UINT32 i; -+ IMG_CHAR *pScreen; -+ -+ pScreen = (char *)DBGDRIV_MONOBASE; -+ -+ pScreen += g_ui32Line * 160; -+ -+ i = 0; -+ do { -+ pScreen[g_ui32LOff + (i * 2)] = pszString[i]; -+ pScreen[g_ui32LOff + (i * 2) + 1] = 127; -+ i++; -+ } -+ while ((pszString[i] != 0) && (i < 4096)); -+ -+ g_ui32LOff += i * 2; -+ -+ if (bNewLine) { -+ g_ui32LOff = 0; -+ g_ui32Line++; -+ } -+ -+ if (g_ui32Line == g_ui32MonoLines) { -+ g_ui32Line = g_ui32MonoLines - 1; -+ -+ HostMemCopy((IMG_VOID *) DBGDRIV_MONOBASE, -+ (IMG_VOID *) (DBGDRIV_MONOBASE + 160), -+ 160 * (g_ui32MonoLines - 1)); -+ -+ HostMemSet((IMG_VOID *) (DBGDRIV_MONOBASE + -+ (160 * (g_ui32MonoLines - 1))), 0, -+ 160); -+ } -+} -+ -+void AppendName(IMG_CHAR * pszOut, IMG_CHAR * pszBase, IMG_CHAR * pszName) -+{ -+ IMG_UINT32 i; -+ IMG_UINT32 ui32Off; -+ -+ i = 0; -+ -+ while (pszBase[i] != 0) { -+ pszOut[i] = pszBase[i]; -+ i++; -+ } -+ -+ ui32Off = i; -+ i = 0; -+ -+ while (pszName[i] != 0) { -+ pszOut[ui32Off + i] = pszName[i]; -+ i++; -+ } -+ -+ pszOut[ui32Off + i] = pszName[i]; -+} -+ -+IMG_VOID *IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName, -+ IMG_UINT32 ui32CapMode, -+ IMG_UINT32 ui32OutMode, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size) -+{ -+ PDBG_STREAM psStream; -+ PDBG_STREAM psInitStream; -+ PDBG_LASTFRAME_BUFFER psLFBuffer; -+ IMG_UINT32 ui32Off; -+ IMG_VOID *pvBase; -+ -+ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE); -+ -+ if (psStream) { -+ return ((IMG_VOID *) psStream); -+ } -+ -+ psStream = HostNonPageablePageAlloc(1); -+ psInitStream = HostNonPageablePageAlloc(1); -+ psLFBuffer = HostNonPageablePageAlloc(1); -+ if ((!psStream) || (!psInitStream) || (!psLFBuffer) -+ ) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DBGDriv: Couldn't create buffer !!!!!\n\r")); -+ return ((IMG_VOID *) 0); -+ } -+ -+ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) { -+ pvBase = HostNonPageablePageAlloc(ui32Size); -+ } else { -+ pvBase = HostPageablePageAlloc(ui32Size); -+ } -+ -+ if (!pvBase) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DBGDriv: Couldn't create buffer !!!!!\n\r")); -+ HostNonPageablePageFree(psStream); -+ return ((IMG_VOID *) 0); -+ } -+ -+ psStream->psNext = 0; -+ psStream->ui32Flags = ui32Flags; -+ psStream->ui32Base = (IMG_UINT32) pvBase; -+ psStream->ui32Size = ui32Size * 4096; -+ psStream->ui32RPtr = 0; -+ psStream->ui32WPtr = 0; -+ psStream->ui32DataWritten = 0; -+ psStream->ui32CapMode = ui32CapMode; -+ psStream->ui32OutMode = ui32OutMode; -+ psStream->ui32DebugLevel = DEBUG_LEVEL_0; -+ psStream->ui32DefaultMode = ui32CapMode; -+ psStream->ui32Start = 0; -+ psStream->ui32End = 0; -+ psStream->ui32Current = 0; -+ psStream->ui32SampleRate = 1; -+ psStream->ui32Access = 0; -+ psStream->ui32Timeout = 0; -+ psStream->ui32Marker = 0; -+ psStream->bInitPhaseComplete = IMG_FALSE; -+ -+ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) { -+ pvBase = HostNonPageablePageAlloc(ui32Size); -+ } else { -+ pvBase = HostPageablePageAlloc(ui32Size); -+ } -+ -+ if (!pvBase) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DBGDriv: Couldn't create buffer !!!!!\n\r")); -+ -+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) { -+ HostNonPageablePageFree((IMG_VOID *) psStream-> -+ ui32Base); -+ } else { -+ HostPageablePageFree((IMG_VOID *) psStream->ui32Base); -+ } -+ HostNonPageablePageFree(psStream); -+ return ((IMG_VOID *) 0); -+ } -+ -+ psInitStream->psNext = 0; -+ psInitStream->ui32Flags = ui32Flags; -+ psInitStream->ui32Base = (IMG_UINT32) pvBase; -+ psInitStream->ui32Size = ui32Size * 4096; -+ psInitStream->ui32RPtr = 0; -+ psInitStream->ui32WPtr = 0; -+ psInitStream->ui32DataWritten = 0; -+ psInitStream->ui32CapMode = ui32CapMode; -+ psInitStream->ui32OutMode = ui32OutMode; -+ psInitStream->ui32DebugLevel = DEBUG_LEVEL_0; -+ psInitStream->ui32DefaultMode = ui32CapMode; -+ psInitStream->ui32Start = 0; -+ psInitStream->ui32End = 0; -+ psInitStream->ui32Current = 0; -+ psInitStream->ui32SampleRate = 1; -+ psInitStream->ui32Access = 0; -+ psInitStream->ui32Timeout = 0; -+ psInitStream->ui32Marker = 0; -+ psInitStream->bInitPhaseComplete = IMG_FALSE; -+ -+ psStream->psInitStream = psInitStream; -+ -+ psLFBuffer->psStream = psStream; -+ psLFBuffer->ui32BufLen = 0; -+ -+ g_bHotkeyMiddump = IMG_FALSE; -+ g_ui32HotkeyMiddumpStart = 0xffffffff; -+ g_ui32HotkeyMiddumpEnd = 0xffffffff; -+ -+ ui32Off = 0; -+ -+ do { -+ psStream->szName[ui32Off] = pszName[ui32Off]; -+ -+ ui32Off++; -+ } -+ while ((pszName[ui32Off] != 0) -+ && (ui32Off < (4096 - sizeof(DBG_STREAM)))); -+ -+ psStream->szName[ui32Off] = pszName[ui32Off]; -+ -+ psStream->psNext = g_psStreamList; -+ g_psStreamList = psStream; -+ -+ psLFBuffer->psNext = g_psLFBufferList; -+ g_psLFBufferList = psLFBuffer; -+ -+ return ((IMG_VOID *) psStream); -+} -+ -+void IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream) -+{ -+ PDBG_STREAM psStreamThis; -+ PDBG_STREAM psStreamPrev; -+ PDBG_LASTFRAME_BUFFER psLFBuffer; -+ PDBG_LASTFRAME_BUFFER psLFThis; -+ PDBG_LASTFRAME_BUFFER psLFPrev; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", -+ psStream->szName)); -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psLFBuffer = FindLFBuf(psStream); -+ -+ psStreamThis = g_psStreamList; -+ psStreamPrev = 0; -+ -+ while (psStreamThis) { -+ if (psStreamThis == psStream) { -+ if (psStreamPrev) { -+ psStreamPrev->psNext = psStreamThis->psNext; -+ } else { -+ g_psStreamList = psStreamThis->psNext; -+ } -+ -+ psStreamThis = 0; -+ } else { -+ psStreamPrev = psStreamThis; -+ psStreamThis = psStreamThis->psNext; -+ } -+ } -+ -+ psLFThis = g_psLFBufferList; -+ psLFPrev = 0; -+ -+ while (psLFThis) { -+ if (psLFThis == psLFBuffer) { -+ if (psLFPrev) { -+ psLFPrev->psNext = psLFThis->psNext; -+ } else { -+ g_psLFBufferList = psLFThis->psNext; -+ } -+ -+ psLFThis = 0; -+ } else { -+ psLFPrev = psLFThis; -+ psLFThis = psLFThis->psNext; -+ } -+ } -+ -+ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY) { -+ DeactivateHotKeys(); -+ } -+ -+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) { -+ HostNonPageablePageFree((IMG_VOID *) psStream->ui32Base); -+ HostNonPageablePageFree((IMG_VOID *) psStream->psInitStream-> -+ ui32Base); -+ } else { -+ HostPageablePageFree((IMG_VOID *) psStream->ui32Base); -+ HostPageablePageFree((IMG_VOID *) psStream->psInitStream-> -+ ui32Base); -+ } -+ -+ HostNonPageablePageFree(psStream->psInitStream); -+ HostNonPageablePageFree(psStream); -+ HostNonPageablePageFree(psLFBuffer); -+ -+ if (g_psStreamList == 0) { -+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Stream list now empty")); -+ } -+ -+ return; -+} -+ -+IMG_VOID *IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, -+ IMG_BOOL bResetStream) -+{ -+ PDBG_STREAM psStream; -+ PDBG_STREAM psThis; -+ IMG_UINT32 ui32Off; -+ IMG_BOOL bAreSame; -+ -+ psStream = 0; -+ -+ for (psThis = g_psStreamList; psThis != IMG_NULL; -+ psThis = psThis->psNext) { -+ bAreSame = IMG_TRUE; -+ ui32Off = 0; -+ -+ if (strlen(psThis->szName) == strlen(pszName)) { -+ while ((psThis->szName[ui32Off] != 0) -+ && (pszName[ui32Off] != 0) && (ui32Off < 128) -+ && bAreSame) { -+ if (psThis->szName[ui32Off] != pszName[ui32Off]) { -+ bAreSame = IMG_FALSE; -+ } -+ -+ ui32Off++; -+ } -+ } else { -+ bAreSame = IMG_FALSE; -+ } -+ -+ if (bAreSame) { -+ psStream = psThis; -+ break; -+ } -+ } -+ -+ if (bResetStream && psStream) { -+ static char szComment[] = "-- Init phase terminated\r\n"; -+ psStream->psInitStream->ui32RPtr = 0; -+ psStream->ui32RPtr = 0; -+ psStream->ui32WPtr = 0; -+ psStream->ui32DataWritten = -+ psStream->psInitStream->ui32DataWritten; -+ if (psStream->bInitPhaseComplete == IMG_FALSE) { -+ if (psStream->ui32Flags & DEBUG_FLAGS_TEXTSTREAM) { -+ DBGDrivWrite2(psStream, (IMG_UINT8 *) szComment, -+ sizeof(szComment) - 1, 0x01); -+ } -+ psStream->bInitPhaseComplete = IMG_TRUE; -+ } -+ } -+ -+ return ((IMG_VOID *) psStream); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) { -+ if (!(psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE)) { -+ return (0); -+ } -+ } else { -+ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY) { -+ if ((psStream->ui32Current != g_ui32HotKeyFrame) -+ || (g_bHotKeyPressed == IMG_FALSE)) { -+ return (0); -+ } -+ } -+ } -+ -+ return (DBGDrivWriteString(psStream, pszString, ui32Level)); -+ -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Len; -+ IMG_UINT32 ui32Space; -+ IMG_UINT32 ui32WPtr; -+ IMG_UINT8 *pui8Buffer; -+ -+ if (!StreamValid(psStream)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (!(psStream->ui32DebugLevel & ui32Level)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (!(psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC)) { -+ if (psStream->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG) { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: %s\r\n", -+ psStream->szName, pszString)); -+ } -+ -+ if (psStream->ui32OutMode & DEBUG_OUTMODE_MONO) { -+ MonoOut(psStream->szName, IMG_FALSE); -+ MonoOut(": ", IMG_FALSE); -+ MonoOut(pszString, IMG_TRUE); -+ } -+ } -+ -+ if (!((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) || -+ (psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC) -+ ) -+ ) { -+ return (0xFFFFFFFF); -+ } -+ -+ ui32Space = SpaceInStream(psStream); -+ -+ if (ui32Space > 0) { -+ ui32Space--; -+ } -+ -+ ui32Len = 0; -+ ui32WPtr = psStream->ui32WPtr; -+ pui8Buffer = (IMG_UINT8 *) psStream->ui32Base; -+ -+ while ((pszString[ui32Len] != 0) && (ui32Len < ui32Space)) { -+ pui8Buffer[ui32WPtr] = pszString[ui32Len]; -+ ui32Len++; -+ ui32WPtr++; -+ if (ui32WPtr == psStream->ui32Size) { -+ ui32WPtr = 0; -+ } -+ } -+ -+ if (ui32Len < ui32Space) { -+ -+ pui8Buffer[ui32WPtr] = pszString[ui32Len]; -+ ui32Len++; -+ ui32WPtr++; -+ if (ui32WPtr == psStream->ui32Size) { -+ ui32WPtr = 0; -+ } -+ -+ psStream->ui32WPtr = ui32WPtr; -+ psStream->ui32DataWritten += ui32Len; -+ } else { -+ ui32Len = 0; -+ } -+ -+ return (ui32Len); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Limit) -+{ -+ IMG_UINT32 ui32OutLen; -+ IMG_UINT32 ui32Len; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT8 *pui8Buff; -+ -+ if (!StreamValid(psStream)) { -+ return (0); -+ } -+ -+ pui8Buff = (IMG_UINT8 *) psStream->ui32Base; -+ ui32Offset = psStream->ui32RPtr; -+ -+ if (psStream->ui32RPtr == psStream->ui32WPtr) { -+ return (0); -+ } -+ -+ ui32Len = 0; -+ while ((pui8Buff[ui32Offset] != 0) -+ && (ui32Offset != psStream->ui32WPtr)) { -+ ui32Offset++; -+ ui32Len++; -+ -+ if (ui32Offset == psStream->ui32Size) { -+ ui32Offset = 0; -+ } -+ } -+ -+ ui32OutLen = ui32Len + 1; -+ -+ if (ui32Len > ui32Limit) { -+ return (0); -+ } -+ -+ ui32Offset = psStream->ui32RPtr; -+ ui32Len = 0; -+ -+ while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit)) { -+ pszString[ui32Len] = pui8Buff[ui32Offset]; -+ ui32Offset++; -+ ui32Len++; -+ -+ if (ui32Offset == psStream->ui32Size) { -+ ui32Offset = 0; -+ } -+ } -+ -+ pszString[ui32Len] = pui8Buff[ui32Offset]; -+ -+ psStream->ui32RPtr = ui32Offset + 1; -+ -+ if (psStream->ui32RPtr == psStream->ui32Size) { -+ psStream->ui32RPtr = 0; -+ } -+ -+ return (ui32OutLen); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psMainStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Space; -+ DBG_STREAM *psStream; -+ -+ if (!StreamValid(psMainStream)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (!(psMainStream->ui32DebugLevel & ui32Level)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (psMainStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) { -+ if (!(psMainStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE)) { -+ return (0xFFFFFFFF); -+ } -+ } else if (psMainStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY) { -+ if ((psMainStream->ui32Current != g_ui32HotKeyFrame) -+ || (g_bHotKeyPressed == IMG_FALSE)) -+ return (0xFFFFFFFF); -+ } -+ -+ if (psMainStream->bInitPhaseComplete) { -+ psStream = psMainStream; -+ } else { -+ psStream = psMainStream->psInitStream; -+ } -+ -+ ui32Space = SpaceInStream(psStream); -+ -+ if (!(psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE)) { -+ return (0); -+ } -+ -+ if (ui32Space < 8) { -+ return (0); -+ } -+ -+ if (ui32Space <= (ui32InBuffSize + 4)) { -+ ui32InBuffSize = ui32Space - 8; -+ } -+ -+ Write(psStream, (IMG_UINT8 *) & ui32InBuffSize, 4); -+ Write(psStream, pui8InBuf, ui32InBuffSize); -+ -+ return (ui32InBuffSize); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) { -+ if (!(psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE)) { -+ return (0xFFFFFFFF); -+ } -+ } else { -+ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY) { -+ if ((psStream->ui32Current != g_ui32HotKeyFrame) -+ || (g_bHotKeyPressed == IMG_FALSE)) { -+ return (0xFFFFFFFF); -+ } -+ } -+ } -+ -+ return (DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level)); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psMainStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level) -+{ -+ IMG_UINT32 ui32Space; -+ DBG_STREAM *psStream; -+ -+ if (!StreamValid(psMainStream)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (!(psMainStream->ui32DebugLevel & ui32Level)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (psMainStream->bInitPhaseComplete) { -+ psStream = psMainStream; -+ } else { -+ psStream = psMainStream->psInitStream; -+ } -+ -+ ui32Space = SpaceInStream(psStream); -+ -+ if (!(psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE)) { -+ return (0); -+ } -+ -+ if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION) { -+ -+ if (ui32Space < 32) { -+ return (0); -+ } -+ } else { -+ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4))) { -+ IMG_UINT32 ui32NewBufSize; -+ -+ ui32NewBufSize = 2 * psStream->ui32Size; -+ -+ if (ui32InBuffSize > psStream->ui32Size) { -+ ui32NewBufSize += ui32InBuffSize; -+ } -+ -+ if (!ExpandStreamBuffer(psStream, ui32NewBufSize)) { -+ if (ui32Space < 32) { -+ return (0); -+ } -+ } -+ -+ ui32Space = SpaceInStream(psStream); -+ } -+ } -+ -+ if (ui32Space <= (ui32InBuffSize + 4)) { -+ ui32InBuffSize = ui32Space - 4; -+ } -+ -+ Write(psStream, pui8InBuf, ui32InBuffSize); -+ -+ return (ui32InBuffSize); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, -+ IMG_BOOL bReadInitBuffer, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf) -+{ -+ IMG_UINT32 ui32Data; -+ DBG_STREAM *psStream; -+ -+ if (!StreamValid(psMainStream)) { -+ return (0); -+ } -+ -+ if (bReadInitBuffer) { -+ psStream = psMainStream->psInitStream; -+ } else { -+ psStream = psMainStream; -+ } -+ -+ if (psStream->ui32RPtr == psStream->ui32WPtr) { -+ return (0); -+ } -+ -+ if (psStream->ui32RPtr <= psStream->ui32WPtr) { -+ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr; -+ } else { -+ ui32Data = -+ psStream->ui32WPtr + (psStream->ui32Size - -+ psStream->ui32RPtr); -+ } -+ -+ if (ui32Data > ui32OutBuffSize) { -+ ui32Data = ui32OutBuffSize; -+ } -+ -+ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size) { -+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr; -+ IMG_UINT32 ui32B2 = ui32Data - ui32B1; -+ -+ HostMemCopy((IMG_VOID *) pui8OutBuf, -+ (IMG_VOID *) (psStream->ui32Base + -+ psStream->ui32RPtr), ui32B1); -+ -+ HostMemCopy((IMG_VOID *) ((IMG_UINT32) pui8OutBuf + ui32B1), -+ (IMG_VOID *) psStream->ui32Base, ui32B2); -+ -+ psStream->ui32RPtr = ui32B2; -+ } else { -+ HostMemCopy((IMG_VOID *) pui8OutBuf, -+ (IMG_VOID *) (psStream->ui32Base + -+ psStream->ui32RPtr), ui32Data); -+ -+ psStream->ui32RPtr += ui32Data; -+ -+ if (psStream->ui32RPtr == psStream->ui32Size) { -+ psStream->ui32RPtr = 0; -+ } -+ } -+ -+ return (ui32Data); -+} -+ -+void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32End, -+ IMG_UINT32 ui32SampleRate) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psStream->ui32CapMode = ui32Mode; -+ psStream->ui32DefaultMode = ui32Mode; -+ psStream->ui32Start = ui32Start; -+ psStream->ui32End = ui32End; -+ psStream->ui32SampleRate = ui32SampleRate; -+ -+ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY) { -+ ActivateHotKeys(psStream); -+ } -+} -+ -+void IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutMode) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psStream->ui32OutMode = ui32OutMode; -+} -+ -+void IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream, -+ IMG_UINT32 ui32DebugLevel) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psStream->ui32DebugLevel = ui32DebugLevel; -+} -+ -+void IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psStream->ui32Current = ui32Frame; -+ -+ if ((ui32Frame >= psStream->ui32Start) && -+ (ui32Frame <= psStream->ui32End) && -+ (((ui32Frame - psStream->ui32Start) % psStream->ui32SampleRate) == -+ 0)) { -+ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE; -+ } else { -+ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE; -+ } -+ -+ if (g_bHotkeyMiddump) { -+ if ((ui32Frame >= g_ui32HotkeyMiddumpStart) && -+ (ui32Frame <= g_ui32HotkeyMiddumpEnd) && -+ (((ui32Frame - -+ g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) == -+ 0)) { -+ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE; -+ } else { -+ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE; -+ if (psStream->ui32Current > g_ui32HotkeyMiddumpEnd) { -+ g_bHotkeyMiddump = IMG_FALSE; -+ } -+ } -+ } -+ -+ if (g_bHotKeyRegistered) { -+ g_bHotKeyRegistered = IMG_FALSE; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Hotkey pressed (%08x)!\n", -+ psStream)); -+ -+ if (!g_bHotKeyPressed) { -+ -+ g_ui32HotKeyFrame = psStream->ui32Current + 2; -+ -+ g_bHotKeyPressed = IMG_TRUE; -+ } -+ -+ if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) -+ && (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)) { -+ if (!g_bHotkeyMiddump) { -+ -+ g_ui32HotkeyMiddumpStart = -+ g_ui32HotKeyFrame + 1; -+ g_ui32HotkeyMiddumpEnd = 0xffffffff; -+ g_bHotkeyMiddump = IMG_TRUE; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Sampling every %d frame(s)\n", -+ psStream->ui32SampleRate)); -+ } else { -+ -+ g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Turning off sampling\n")); -+ } -+ } -+ -+ } -+ -+ if (psStream->ui32Current > g_ui32HotKeyFrame) { -+ g_bHotKeyPressed = IMG_FALSE; -+ } -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return (0); -+ } -+ -+ return (psStream->ui32Current); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream) -+{ -+ IMG_UINT32 ui32NextFrame; -+ -+ if (!StreamValid(psStream)) { -+ return IMG_FALSE; -+ } -+ -+ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) { -+ ui32NextFrame = -+ psStream->ui32Current + psStream->ui32SampleRate; -+ if (ui32NextFrame > psStream->ui32End) { -+ return IMG_TRUE; -+ } -+ } -+ return IMG_FALSE; -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, -+ IMG_BOOL bCheckPreviousFrame) -+{ -+ IMG_UINT32 ui32FrameShift = bCheckPreviousFrame ? 1 : 0; -+ -+ if (!StreamValid(psStream)) { -+ return IMG_FALSE; -+ } -+ -+ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) { -+ -+ if (g_bHotkeyMiddump) { -+ if ((psStream->ui32Current >= -+ (g_ui32HotkeyMiddumpStart - ui32FrameShift)) -+ && (psStream->ui32Current <= -+ (g_ui32HotkeyMiddumpEnd - ui32FrameShift)) -+ && -+ ((((psStream->ui32Current + ui32FrameShift) - -+ g_ui32HotkeyMiddumpStart) % -+ psStream->ui32SampleRate) == 0)) { -+ return IMG_TRUE; -+ } -+ } else { -+ if ((psStream->ui32Current >= -+ (psStream->ui32Start - ui32FrameShift)) -+ && (psStream->ui32Current <= -+ (psStream->ui32End - ui32FrameShift)) -+ && -+ ((((psStream->ui32Current + ui32FrameShift) - -+ psStream->ui32Start) % -+ psStream->ui32SampleRate) == 0)) { -+ return IMG_TRUE; -+ } -+ } -+ } else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY) { -+ if ((psStream->ui32Current == -+ (g_ui32HotKeyFrame - ui32FrameShift)) -+ && (g_bHotKeyPressed)) { -+ return IMG_TRUE; -+ } -+ } -+ return IMG_FALSE; -+} -+ -+void IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psStream->ui32CapMode = ui32Mode; -+} -+ -+void IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psStream->ui32CapMode = psStream->ui32DefaultMode; -+} -+ -+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return; -+ } -+ -+ psStream->ui32Marker = ui32Marker; -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream) -+{ -+ -+ if (!StreamValid(psStream)) { -+ return 0; -+ } -+ -+ return psStream->ui32Marker; -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psMainStream) -+{ -+ PDBG_STREAM psStream; -+ -+ if (!StreamValid(psMainStream)) { -+ return 0; -+ } -+ -+ if (psMainStream->bInitPhaseComplete) { -+ psStream = psMainStream; -+ } else { -+ psStream = psMainStream->psInitStream; -+ } -+ -+ return psStream->ui32DataWritten; -+} -+ -+IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psMainStream, -+ IMG_UINT32 ui32StreamOffset) -+{ -+ PDBG_STREAM psStream; -+ -+ if (!StreamValid(psMainStream)) { -+ return; -+ } -+ -+ if (psMainStream->bInitPhaseComplete) { -+ psStream = psMainStream; -+ } else { -+ psStream = psMainStream->psInitStream; -+ } -+ -+ psStream->ui32DataWritten = ui32StreamOffset; -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(void) -+{ -+ return ((IMG_UINT32) & g_sDBGKMServices); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level, -+ IMG_UINT32 ui32Flags) -+{ -+ PDBG_LASTFRAME_BUFFER psLFBuffer; -+ -+ if (!StreamValid(psStream)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (!(psStream->ui32DebugLevel & ui32Level)) { -+ return (0xFFFFFFFF); -+ } -+ -+ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) { -+ if (!(psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE)) { -+ return (0xFFFFFFFF); -+ } -+ } else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY) { -+ if ((psStream->ui32Current != g_ui32HotKeyFrame) -+ || (g_bHotKeyPressed == IMG_FALSE)) -+ return (0xFFFFFFFF); -+ } -+ -+ psLFBuffer = FindLFBuf(psStream); -+ -+ if (ui32Flags & WRITELF_FLAGS_RESETBUF) { -+ -+ ui32InBuffSize = -+ (ui32InBuffSize > -+ LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE : -+ ui32InBuffSize; -+ HostMemCopy((IMG_VOID *) psLFBuffer->ui8Buffer, -+ (IMG_VOID *) pui8InBuf, ui32InBuffSize); -+ psLFBuffer->ui32BufLen = ui32InBuffSize; -+ } else { -+ -+ ui32InBuffSize = -+ ((psLFBuffer->ui32BufLen + ui32InBuffSize) > -+ LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE - -+ psLFBuffer-> -+ ui32BufLen) : ui32InBuffSize; -+ HostMemCopy((IMG_VOID *) (&psLFBuffer-> -+ ui8Buffer[psLFBuffer->ui32BufLen]), -+ (IMG_VOID *) pui8InBuf, ui32InBuffSize); -+ psLFBuffer->ui32BufLen += ui32InBuffSize; -+ } -+ -+ return (ui32InBuffSize); -+} -+ -+IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf) -+{ -+ PDBG_LASTFRAME_BUFFER psLFBuffer; -+ IMG_UINT32 ui32Data; -+ -+ if (!StreamValid(psStream)) { -+ return (0); -+ } -+ -+ psLFBuffer = FindLFBuf(psStream); -+ -+ ui32Data = -+ (ui32OutBuffSize < -+ psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen; -+ -+ HostMemCopy((IMG_VOID *) pui8OutBuf, (IMG_VOID *) psLFBuffer->ui8Buffer, -+ ui32Data); -+ -+ return ui32Data; -+} -+ -+IMG_VOID IMG_CALLCONV DBGDrivEndInitPhase(PDBG_STREAM psStream) -+{ -+ psStream->bInitPhaseComplete = IMG_TRUE; -+} -+ -+IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize) -+{ -+ IMG_VOID *pvNewBuf; -+ IMG_UINT32 ui32NewSizeInPages; -+ IMG_UINT32 ui32NewWOffset; -+ IMG_UINT32 ui32SpaceInOldBuf; -+ -+ if (psStream->ui32Size >= ui32NewSize) { -+ return IMG_FALSE; -+ } -+ -+ ui32SpaceInOldBuf = SpaceInStream(psStream); -+ -+ ui32NewSizeInPages = ((ui32NewSize + 0xfff) & ~0xfff) / 4096; -+ -+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) { -+ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages); -+ } else { -+ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages); -+ } -+ -+ if (pvNewBuf == IMG_NULL) { -+ return IMG_FALSE; -+ } -+ -+ if (psStream->ui32RPtr <= psStream->ui32WPtr) { -+ -+ HostMemCopy((IMG_VOID *) pvNewBuf, -+ (IMG_VOID *) (psStream->ui32Base + -+ psStream->ui32RPtr), -+ psStream->ui32WPtr - psStream->ui32RPtr); -+ } else { -+ IMG_UINT32 ui32FirstCopySize; -+ -+ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr; -+ -+ HostMemCopy((IMG_VOID *) pvNewBuf, -+ (IMG_VOID *) (psStream->ui32Base + -+ psStream->ui32RPtr), -+ ui32FirstCopySize); -+ -+ HostMemCopy((IMG_VOID *) ((IMG_UINT32) pvNewBuf + -+ ui32FirstCopySize), -+ (IMG_VOID *) psStream->ui32Base, -+ psStream->ui32WPtr); -+ } -+ -+ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf; -+ -+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) { -+ HostNonPageablePageFree((IMG_VOID *) psStream->ui32Base); -+ } else { -+ HostPageablePageFree((IMG_VOID *) psStream->ui32Base); -+ } -+ -+ psStream->ui32Base = (IMG_UINT32) pvNewBuf; -+ psStream->ui32RPtr = 0; -+ psStream->ui32WPtr = ui32NewWOffset; -+ psStream->ui32Size = ui32NewSizeInPages * 4096; -+ -+ return IMG_TRUE; -+} -+ -+IMG_UINT32 SpaceInStream(PDBG_STREAM psStream) -+{ -+ IMG_UINT32 ui32Space; -+ -+ if (psStream->ui32RPtr > psStream->ui32WPtr) { -+ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr; -+ } else { -+ ui32Space = -+ psStream->ui32RPtr + (psStream->ui32Size - -+ psStream->ui32WPtr); -+ } -+ -+ return ui32Space; -+} -+ -+void DestroyAllStreams(void) -+{ -+ while (g_psStreamList != IMG_NULL) { -+ DBGDrivDestroyStream(g_psStreamList); -+ } -+ return; -+} -+ -+PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream) -+{ -+ PDBG_LASTFRAME_BUFFER psLFBuffer; -+ -+ psLFBuffer = g_psLFBufferList; -+ -+ while (psLFBuffer) { -+ if (psLFBuffer->psStream == psStream) { -+ break; -+ } -+ -+ psLFBuffer = psLFBuffer->psNext; -+ } -+ -+ return psLFBuffer; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/dbgdriv.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/dbgdriv.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/dbgdriv.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/dbgdriv.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,186 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _DBGDRIV_ -+#define _DBGDRIV_ -+ -+#define BUFFER_SIZE 64*PAGESIZE -+ -+#define DBGDRIV_VERSION 0x100 -+#define MAX_PROCESSES 2 -+#define BLOCK_USED 0x01 -+#define BLOCK_LOCKED 0x02 -+#define DBGDRIV_MONOBASE 0x000B0000 -+ -+extern IMG_VOID *g_pvAPIMutex; -+ -+IMG_VOID *IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName, -+ IMG_UINT32 ui32CapMode, -+ IMG_UINT32 ui32OutMode, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Pages); -+IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream); -+IMG_VOID *IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, -+ IMG_BOOL bResetStream); -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Limit); -+IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, -+ IMG_BOOL bReadInitBuffer, -+ IMG_UINT32 ui32OutBufferSize, -+ IMG_UINT8 * pui8OutBuf); -+IMG_VOID IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32Stop, -+ IMG_UINT32 ui32SampleRate); -+IMG_VOID IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutMode); -+IMG_VOID IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream, -+ IMG_UINT32 ui32DebugLevel); -+IMG_VOID IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Frame); -+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream); -+IMG_VOID IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode); -+IMG_VOID IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream); -+IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID); -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Marker); -+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream); -+IMG_UINT32 IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream); -+IMG_UINT32 IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, -+ IMG_BOOL bCheckPreviousFrame); -+IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level, -+ IMG_UINT32 ui32Flags); -+IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf); -+IMG_VOID IMG_CALLCONV DBGDrivEndInitPhase(PDBG_STREAM psStream); -+IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psStream); -+IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psStream, -+ IMG_UINT32 ui32StreamOffset); -+ -+IMG_VOID DestroyAllStreams(IMG_VOID); -+ -+IMG_UINT32 AtoI(char *szIn); -+ -+IMG_VOID HostMemSet(IMG_VOID * pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size); -+IMG_VOID HostMemCopy(IMG_VOID * pvDest, IMG_VOID * pvSrc, IMG_UINT32 ui32Size); -+IMG_BOOL StreamValid(PDBG_STREAM psStream); -+IMG_VOID Write(PDBG_STREAM psStream, IMG_UINT8 * pui8Data, -+ IMG_UINT32 ui32InBuffSize); -+IMG_VOID MonoOut(IMG_CHAR * pszString, IMG_BOOL bNewLine); -+ -+IMG_VOID *IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, -+ IMG_UINT32 ui32CapMode, -+ IMG_UINT32 ui32OutMode, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32Size); -+IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream); -+IMG_VOID *IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, -+ IMG_BOOL bResetStream); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Limit); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, -+ IMG_BOOL bReadInitBuffer, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf); -+IMG_VOID IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32End, -+ IMG_UINT32 ui32SampleRate); -+IMG_VOID IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutMode); -+IMG_VOID IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream, -+ IMG_UINT32 ui32DebugLevel); -+IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Frame); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream); -+IMG_VOID IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Mode); -+IMG_VOID IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream, -+ IMG_CHAR * pszString, -+ IMG_UINT32 ui32Level); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level); -+IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, -+ IMG_UINT32 ui32Marker); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream); -+IMG_VOID IMG_CALLCONV ExtDBGDrivEndInitPhase(PDBG_STREAM psStream); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, -+ IMG_BOOL bCheckPreviousFrame); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, -+ IMG_UINT8 * pui8InBuf, -+ IMG_UINT32 ui32InBuffSize, -+ IMG_UINT32 ui32Level, -+ IMG_UINT32 ui32Flags); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, -+ IMG_UINT32 ui32OutBuffSize, -+ IMG_UINT8 * pui8OutBuf); -+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream); -+IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, -+ IMG_UINT32 ui32StreamOffset); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hostfunc.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hostfunc.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hostfunc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hostfunc.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,192 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "pvr_debug.h" -+ -+IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING; -+ -+#define PVR_STRING_TERMINATOR '\0' -+#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') ) -+ -+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, -+ const IMG_CHAR * pszFileName, -+ IMG_UINT32 ui32Line, const IMG_CHAR * pszFormat, ... -+ ) -+{ -+ IMG_BOOL bTrace, bDebug; -+ IMG_CHAR *pszLeafName; -+ -+ pszLeafName = (char *)strrchr(pszFileName, '\\'); -+ -+ if (pszLeafName) { -+ pszFileName = pszLeafName; -+ } -+ -+ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE; -+ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel); -+ -+ if (bTrace || bDebug) { -+ va_list vaArgs; -+ static char szBuffer[256]; -+ -+ va_start(vaArgs, pszFormat); -+ -+ if (bDebug) { -+ switch (ui32DebugLevel) { -+ case DBGPRIV_FATAL: -+ { -+ strcpy(szBuffer, "PVR_K:(Fatal): "); -+ break; -+ } -+ case DBGPRIV_ERROR: -+ { -+ strcpy(szBuffer, "PVR_K:(Error): "); -+ break; -+ } -+ case DBGPRIV_WARNING: -+ { -+ strcpy(szBuffer, "PVR_K:(Warning): "); -+ break; -+ } -+ case DBGPRIV_MESSAGE: -+ { -+ strcpy(szBuffer, "PVR_K:(Message): "); -+ break; -+ } -+ case DBGPRIV_VERBOSE: -+ { -+ strcpy(szBuffer, "PVR_K:(Verbose): "); -+ break; -+ } -+ default: -+ { -+ strcpy(szBuffer, -+ "PVR_K:(Unknown message level)"); -+ break; -+ } -+ } -+ } else { -+ strcpy(szBuffer, "PVR_K: "); -+ } -+ -+ vsprintf(&szBuffer[strlen(szBuffer)], pszFormat, vaArgs); -+ -+ if (!bTrace) { -+ sprintf(&szBuffer[strlen(szBuffer)], " [%d, %s]", -+ (int)ui32Line, pszFileName); -+ } -+ -+ printk(KERN_INFO "%s\r\n", szBuffer); -+ -+ va_end(vaArgs); -+ } -+} -+ -+IMG_VOID HostMemSet(IMG_VOID * pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size) -+{ -+ memset(pvDest, (int)ui8Value, (size_t) ui32Size); -+} -+ -+IMG_VOID HostMemCopy(IMG_VOID * pvDst, IMG_VOID * pvSrc, IMG_UINT32 ui32Size) -+{ -+ memcpy(pvDst, pvSrc, ui32Size); -+} -+ -+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, -+ IMG_UINT32 * pui32Data) -+{ -+ -+ return 0; -+} -+ -+IMG_VOID *HostPageablePageAlloc(IMG_UINT32 ui32Pages) -+{ -+ return (void *)vmalloc(ui32Pages * PAGE_SIZE); -+} -+ -+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase) -+{ -+ vfree(pvBase); -+} -+ -+IMG_VOID *HostNonPageablePageAlloc(IMG_UINT32 ui32Pages) -+{ -+ return (void *)vmalloc(ui32Pages * PAGE_SIZE); -+} -+ -+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase) -+{ -+ vfree(pvBase); -+} -+ -+IMG_VOID *HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, -+ IMG_VOID ** ppvMdl) -+{ -+ -+ return IMG_NULL; -+} -+ -+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, -+ IMG_VOID * pvProcess) -+{ -+ -+} -+ -+IMG_VOID HostCreateRegDeclStreams(IMG_VOID) -+{ -+ -+} -+ -+IMG_VOID *HostCreateMutex(IMG_VOID) -+{ -+ -+ return IMG_NULL; -+} -+ -+IMG_VOID HostAquireMutex(IMG_VOID * pvMutex) -+{ -+ -+} -+ -+IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex) -+{ -+ -+} -+ -+IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex) -+{ -+ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hostfunc.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hostfunc.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hostfunc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hostfunc.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,53 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _HOSTFUNC_ -+#define _HOSTFUNC_ -+ -+#define HOST_PAGESIZE (4096) -+#define DBG_MEMORY_INITIALIZER (0xe2) -+ -+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, -+ IMG_UINT32 * pui32Data); -+ -+IMG_VOID *HostPageablePageAlloc(IMG_UINT32 ui32Pages); -+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase); -+IMG_VOID *HostNonPageablePageAlloc(IMG_UINT32 ui32Pages); -+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase); -+ -+IMG_VOID *HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, -+ IMG_VOID * *ppvMdl); -+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, -+ IMG_VOID * pvProcess); -+ -+IMG_VOID HostCreateRegDeclStreams(IMG_VOID); -+ -+IMG_VOID *HostCreateMutex(IMG_VOID); -+IMG_VOID HostAquireMutex(IMG_VOID * pvMutex); -+IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex); -+IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hotkey.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hotkey.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hotkey.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hotkey.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,101 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include "img_types.h" -+#include "pvr_debug.h" -+#include "dbgdrvif.h" -+#include "dbgdriv.h" -+#include "hotkey.h" -+#include "hostfunc.h" -+ -+IMG_UINT32 g_ui32HotKeyFrame = 0xFFFFFFFF; -+IMG_BOOL g_bHotKeyPressed = IMG_FALSE; -+IMG_BOOL g_bHotKeyRegistered = IMG_FALSE; -+ -+PRIVATEHOTKEYDATA g_PrivateHotKeyData; -+ -+IMG_VOID ReadInHotKeys(IMG_VOID) -+{ -+ g_PrivateHotKeyData.ui32ScanCode = 0x58; -+ g_PrivateHotKeyData.ui32ShiftState = 0x0; -+ -+ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode", -+ &g_PrivateHotKeyData.ui32ScanCode); -+ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState", -+ &g_PrivateHotKeyData.ui32ShiftState); -+} -+ -+IMG_VOID RegisterKeyPressed(IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo) -+{ -+ PDBG_STREAM psStream; -+ -+ PVR_UNREFERENCED_PARAMETER(pInfo); -+ -+ if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode) { -+ PVR_DPF((PVR_DBG_MESSAGE, "PDUMP Hotkey pressed !\n")); -+ -+ psStream = -+ (PDBG_STREAM) g_PrivateHotKeyData.sHotKeyInfo.pvStream; -+ -+ if (!g_bHotKeyPressed) { -+ -+ g_ui32HotKeyFrame = psStream->ui32Current + 2; -+ -+ g_bHotKeyPressed = IMG_TRUE; -+ } -+ } -+} -+ -+IMG_VOID ActivateHotKeys(PDBG_STREAM psStream) -+{ -+ -+ ReadInHotKeys(); -+ -+ if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey) { -+ if (g_PrivateHotKeyData.ui32ScanCode != 0) { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Activate HotKey for PDUMP.\n")); -+ -+ g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream; -+ -+ DefineHotKey(g_PrivateHotKeyData.ui32ScanCode, -+ g_PrivateHotKeyData.ui32ShiftState, -+ &g_PrivateHotKeyData.sHotKeyInfo); -+ } else { -+ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0; -+ } -+ } -+} -+ -+IMG_VOID DeactivateHotKeys(IMG_VOID) -+{ -+ if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0) { -+ PVR_DPF((PVR_DBG_MESSAGE, "Deactivate HotKey.\n")); -+ -+ RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey); -+ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0; -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hotkey.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hotkey.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/hotkey.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/hotkey.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,56 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _HOTKEY_ -+#define _HOTKEY_ -+ -+typedef struct _hotkeyinfo { -+ IMG_UINT8 ui8ScanCode; -+ IMG_UINT8 ui8Type; -+ IMG_UINT8 ui8Flag; -+ IMG_UINT8 ui8Filler1; -+ IMG_UINT32 ui32ShiftState; -+ IMG_UINT32 ui32HotKeyProc; -+ IMG_VOID *pvStream; -+ IMG_UINT32 hHotKey; -+} HOTKEYINFO, *PHOTKEYINFO; -+ -+typedef struct _privatehotkeydata { -+ IMG_UINT32 ui32ScanCode; -+ IMG_UINT32 ui32ShiftState; -+ HOTKEYINFO sHotKeyInfo; -+} PRIVATEHOTKEYDATA, *PPRIVATEHOTKEYDATA; -+ -+IMG_VOID ReadInHotKeys(IMG_VOID); -+IMG_VOID ActivateHotKeys(PDBG_STREAM psStream); -+IMG_VOID DeactivateHotKeys(IMG_VOID); -+ -+IMG_VOID RemoveHotKey(IMG_UINT32 hHotKey); -+IMG_VOID DefineHotKey(IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, -+ PHOTKEYINFO psInfo); -+IMG_VOID RegisterKeyPressed(IMG_UINT32 ui32ScanCode, PHOTKEYINFO psInfo); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/ioctl.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/ioctl.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/ioctl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/ioctl.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,404 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+ -+#include "img_types.h" -+#include "dbgdrvif.h" -+#include "dbgdriv.h" -+#include "hotkey.h" -+ -+IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_CREATESTREAM psIn; -+ IMG_VOID **ppvOut; -+ static char name[32]; -+ -+ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer; -+ ppvOut = (IMG_VOID * *)pvOutBuffer; -+ -+ -+ if (copy_from_user(name, psIn->pszName, 32) != 0) -+ return IMG_FALSE; -+ *ppvOut = -+ ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode, -+ 0, psIn->ui32Pages); -+ -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pStream; -+ PDBG_STREAM psStream; -+ -+ pStream = (IMG_UINT32 *) pvInBuffer; -+ psStream = (PDBG_STREAM) * pStream; -+ -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivDestroyStream(psStream); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_FINDSTREAM psParams; -+ IMG_UINT32 *pui32Stream; -+ -+ psParams = (PDBG_IN_FINDSTREAM) pvInBuffer; -+ pui32Stream = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32Stream = -+ (IMG_UINT32) ExtDBGDrivFindStream(psParams->pszName, -+ psParams->bResetStream); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_WRITESTRING psParams; -+ IMG_UINT32 *pui32OutLen; -+ -+ psParams = (PDBG_IN_WRITESTRING) pvInBuffer; -+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32OutLen = -+ ExtDBGDrivWriteString((PDBG_STREAM) psParams->pvStream, -+ psParams->pszString, psParams->ui32Level); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_WRITESTRING psParams; -+ IMG_UINT32 *pui32OutLen; -+ -+ psParams = (PDBG_IN_WRITESTRING) pvInBuffer; -+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32OutLen = -+ ExtDBGDrivWriteStringCM((PDBG_STREAM) psParams->pvStream, -+ psParams->pszString, psParams->ui32Level); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32OutLen; -+ PDBG_IN_READSTRING psParams; -+ -+ psParams = (PDBG_IN_READSTRING) pvInBuffer; -+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32OutLen = -+ ExtDBGDrivReadString(psParams->pvStream, psParams->pszString, -+ psParams->ui32StringLen); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32BytesCopied; -+ PDBG_IN_WRITE psInParams; -+ -+ psInParams = (PDBG_IN_WRITE) pvInBuffer; -+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32BytesCopied = -+ ExtDBGDrivWrite((PDBG_STREAM) psInParams->pvStream, -+ psInParams->pui8InBuffer, -+ psInParams->ui32TransferSize, -+ psInParams->ui32Level); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32BytesCopied; -+ PDBG_IN_WRITE psInParams; -+ -+ psInParams = (PDBG_IN_WRITE) pvInBuffer; -+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32BytesCopied = -+ ExtDBGDrivWrite2((PDBG_STREAM) psInParams->pvStream, -+ psInParams->pui8InBuffer, -+ psInParams->ui32TransferSize, -+ psInParams->ui32Level); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32BytesCopied; -+ PDBG_IN_WRITE psInParams; -+ -+ psInParams = (PDBG_IN_WRITE) pvInBuffer; -+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32BytesCopied = -+ ExtDBGDrivWriteCM((PDBG_STREAM) psInParams->pvStream, -+ psInParams->pui8InBuffer, -+ psInParams->ui32TransferSize, -+ psInParams->ui32Level); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32BytesCopied; -+ PDBG_IN_READ psInParams; -+ -+ psInParams = (PDBG_IN_READ) pvInBuffer; -+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32BytesCopied = -+ ExtDBGDrivRead((PDBG_STREAM) psInParams->pvStream, -+ psInParams->bReadInitBuffer, -+ psInParams->ui32OutBufferSize, -+ psInParams->pui8OutBuffer); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_SETDEBUGMODE psParams; -+ -+ psParams = (PDBG_IN_SETDEBUGMODE) pvInBuffer; -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivSetCaptureMode((PDBG_STREAM) psParams->pvStream, -+ psParams->ui32Mode, -+ psParams->ui32Start, -+ psParams->ui32End, psParams->ui32SampleRate); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_SETDEBUGOUTMODE psParams; -+ -+ psParams = (PDBG_IN_SETDEBUGOUTMODE) pvInBuffer; -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivSetOutputMode((PDBG_STREAM) psParams->pvStream, -+ psParams->ui32Mode); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_SETDEBUGLEVEL psParams; -+ -+ psParams = (PDBG_IN_SETDEBUGLEVEL) pvInBuffer; -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivSetDebugLevel((PDBG_STREAM) psParams->pvStream, -+ psParams->ui32Level); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_SETFRAME psParams; -+ -+ psParams = (PDBG_IN_SETFRAME) pvInBuffer; -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivSetFrame((PDBG_STREAM) psParams->pvStream, -+ psParams->ui32Frame); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pStream; -+ PDBG_STREAM psStream; -+ IMG_UINT32 *pui32Current; -+ -+ pStream = (IMG_UINT32 *) pvInBuffer; -+ psStream = (PDBG_STREAM) * pStream; -+ pui32Current = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32Current = ExtDBGDrivGetFrame(psStream); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_ISCAPTUREFRAME psParams; -+ IMG_UINT32 *pui32Current; -+ -+ psParams = (PDBG_IN_ISCAPTUREFRAME) pvInBuffer; -+ pui32Current = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32Current = -+ ExtDBGDrivIsCaptureFrame((PDBG_STREAM) psParams->pvStream, -+ psParams->bCheckPreviousFrame); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_OVERRIDEMODE psParams; -+ -+ psParams = (PDBG_IN_OVERRIDEMODE) pvInBuffer; -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivOverrideMode((PDBG_STREAM) psParams->pvStream, -+ psParams->ui32Mode); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pStream; -+ PDBG_STREAM psStream; -+ -+ pStream = (IMG_UINT32 *) pvInBuffer; -+ psStream = (PDBG_STREAM) * pStream; -+ -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivDefaultMode(psStream); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_SETMARKER psParams; -+ -+ psParams = (PDBG_IN_SETMARKER) pvInBuffer; -+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer); -+ -+ ExtDBGDrivSetMarker((PDBG_STREAM) psParams->pvStream, -+ psParams->ui32Marker); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pStream; -+ PDBG_STREAM psStream; -+ IMG_UINT32 *pui32Current; -+ -+ pStream = (IMG_UINT32 *) pvInBuffer; -+ psStream = (PDBG_STREAM) * pStream; -+ pui32Current = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32Current = ExtDBGDrivGetMarker(psStream); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, -+ IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32Out; -+ -+ PVR_UNREFERENCED_PARAMETER(pvInBuffer); -+ pui32Out = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32Out = DBGDrivGetServiceTable(); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ PDBG_IN_WRITE_LF psInParams; -+ IMG_UINT32 *pui32BytesCopied; -+ -+ psInParams = (PDBG_IN_WRITE_LF) pvInBuffer; -+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32BytesCopied = ExtDBGDrivWriteLF(psInParams->pvStream, -+ psInParams->pui8InBuffer, -+ psInParams->ui32BufferSize, -+ psInParams->ui32Level, -+ psInParams->ui32Flags); -+ -+ return IMG_TRUE; -+} -+ -+IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32BytesCopied; -+ PDBG_IN_READ psInParams; -+ -+ psInParams = (PDBG_IN_READ) pvInBuffer; -+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32BytesCopied = -+ ExtDBGDrivReadLF((PDBG_STREAM) psInParams->pvStream, -+ psInParams->ui32OutBufferSize, -+ psInParams->pui8OutBuffer); -+ -+ return (IMG_TRUE); -+} -+ -+IMG_UINT32 DBGDIOCDrivResetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) -+{ -+ IMG_UINT32 *pui32BytesCopied; -+ PDBG_IN_READ psInParams; -+ -+ psInParams = (PDBG_IN_READ) pvInBuffer; -+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; -+ -+ *pui32BytesCopied = -+ ExtDBGDrivReadLF((PDBG_STREAM) psInParams->pvStream, -+ psInParams->ui32OutBufferSize, -+ psInParams->pui8OutBuffer); -+ -+ return (IMG_TRUE); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/ioctl.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/ioctl.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/ioctl.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/ioctl.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,81 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _IOCTL_ -+#define _IOCTL_ -+ -+IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivRead(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID *, IMG_VOID *); -+IMG_UINT32 DBGDIOCDrivResetStream(IMG_VOID *, IMG_VOID *); -+ -+IMG_UINT32(*g_DBGDrivProc[])(IMG_VOID *, IMG_VOID *) = { -+DBGDIOCDrivCreateStream, -+ DBGDIOCDrivDestroyStream, -+ DBGDIOCDrivGetStream, -+ DBGDIOCDrivWriteString, -+ DBGDIOCDrivReadString, -+ DBGDIOCDrivWrite, -+ DBGDIOCDrivRead, -+ DBGDIOCDrivSetCaptureMode, -+ DBGDIOCDrivSetOutMode, -+ DBGDIOCDrivSetDebugLevel, -+ DBGDIOCDrivSetFrame, -+ DBGDIOCDrivGetFrame, -+ DBGDIOCDrivOverrideMode, -+ DBGDIOCDrivDefaultMode, -+ DBGDIOCDrivGetServiceTable, -+ DBGDIOCDrivWrite2, -+ DBGDIOCDrivWriteStringCM, -+ DBGDIOCDrivWriteCM, -+ DBGDIOCDrivSetMarker, -+ DBGDIOCDrivGetMarker, -+ DBGDIOCDrivIsCaptureFrame, -+ DBGDIOCDrivWriteLF, DBGDIOCDrivReadLF, DBGDIOCDrivResetStream,}; -+ -+#define MAX_DBGVXD_W32_API (sizeof(g_DBGDrivProc)/sizeof(IMG_UINT32)) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/linuxsrv.h linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/linuxsrv.h ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/linuxsrv.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/linuxsrv.h 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,47 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#ifndef _LINUXSRV_H__ -+#define _LINUXSRV_H__ -+ -+typedef struct tagIOCTL_PACKAGE { -+ IMG_UINT32 ui32Cmd; -+ IMG_UINT32 ui32Size; -+ IMG_VOID *pInBuffer; -+ IMG_UINT32 ui32InBufferSize; -+ IMG_VOID *pOutBuffer; -+ IMG_UINT32 ui32OutBufferSize; -+} IOCTL_PACKAGE; -+ -+IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice, -+ IMG_UINT32 ui32ControlCode, -+ IMG_VOID * pInBuffer, -+ IMG_UINT32 ui32InBufferSize, -+ IMG_VOID * pOutBuffer, -+ IMG_UINT32 ui32OutBufferSize, -+ IMG_UINT32 * pui32BytesReturned); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/main.c linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/main.c ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/main.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/main.c 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,184 @@ -+/********************************************************************** -+ * -+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful but, except -+ * as otherwise stated in writing, without any warranty; without even the -+ * implied warranty of merchantability or fitness for a particular purpose. -+ * See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * The full GNU General Public License is included in this distribution in -+ * the file called "COPYING". -+ * -+ * Contact Information: -+ * Imagination Technologies Ltd. -+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "linuxsrv.h" -+#include "ioctl.h" -+#include "dbgdrvif.h" -+#include "dbgdriv.h" -+#include "hostfunc.h" -+#include "pvr_debug.h" -+ -+#define DRVNAME "dbgdrv" -+ -+MODULE_AUTHOR("Imagination Technologies Ltd. "); -+MODULE_LICENSE("GPL"); -+MODULE_SUPPORTED_DEVICE(DRVNAME); -+ -+static int AssignedMajorNumber = 0; -+ -+extern DBGKM_SERVICE_TABLE g_sDBGKMServices; -+ -+int dbgdrv_ioctl(struct inode *, struct file *, unsigned int, unsigned long); -+ -+static int dbgdrv_open(struct inode unref__ * pInode, -+ struct file unref__ * pFile) -+{ -+ return 0; -+} -+ -+static int dbgdrv_release(struct inode unref__ * pInode, -+ struct file unref__ * pFile) -+{ -+ return 0; -+} -+ -+static int dbgdrv_mmap(struct file *pFile, struct vm_area_struct *ps_vma) -+{ -+ return 0; -+} -+ -+static struct file_operations dbgdrv_fops = { -+owner: THIS_MODULE, -+ioctl: dbgdrv_ioctl, -+open: dbgdrv_open, -+release:dbgdrv_release, -+mmap: dbgdrv_mmap, -+}; -+ -+void DBGDrvGetServiceTable(void **fn_table) -+{ -+ *fn_table = &g_sDBGKMServices; -+ -+} -+ -+int init_module(void) -+{ -+ AssignedMajorNumber = -+ register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops); -+ -+ if (AssignedMajorNumber <= 0) { -+ PVR_DPF((PVR_DBG_ERROR, " unable to get major\n")); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+void cleanup_module(void) -+{ -+ unregister_chrdev(AssignedMajorNumber, DRVNAME); -+ return; -+} -+ -+int dbgdrv_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg; -+ -+ char *buffer, *in, *out; -+ -+ if ((pIP->ui32InBufferSize > (PAGE_SIZE >> 1)) -+ || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1))) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Sizes of the buffers are too large, cannot do ioctl\n")); -+ return -1; -+ } -+ -+ buffer = (char *)HostPageablePageAlloc(1); -+ if (!buffer) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to allocate buffer, cannot do ioctl\n")); -+ return -EFAULT; -+ } -+ -+ in = buffer; -+ out = buffer + (PAGE_SIZE >> 1); -+ -+ if (copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0) { -+ goto init_failed; -+ } -+ -+ cmd = ((pIP->ui32Cmd >> 2) & 0xFFF) - 0x801; -+ -+ if (pIP->ui32Cmd == DEBUG_SERVICE_READ) { -+ IMG_CHAR *ui8Tmp; -+ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *) out; -+ DBG_IN_READ *psReadInParams = (DBG_IN_READ *) in; -+ -+ ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize); -+ if (!ui8Tmp) { -+ goto init_failed; -+ } -+ *pui32BytesCopied = -+ ExtDBGDrivRead((DBG_STREAM *) psReadInParams->pvStream, -+ psReadInParams->bReadInitBuffer, -+ psReadInParams->ui32OutBufferSize, ui8Tmp); -+ if (copy_to_user -+ (psReadInParams->pui8OutBuffer, ui8Tmp, -+ *pui32BytesCopied) != 0) { -+ vfree(ui8Tmp); -+ goto init_failed; -+ } -+ vfree(ui8Tmp); -+ } else { -+ (g_DBGDrivProc[cmd]) (in, out); -+ } -+ -+ if (copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0) { -+ goto init_failed; -+ } -+ -+ HostPageablePageFree((IMG_VOID *) buffer); -+ return 0; -+ -+init_failed: -+ HostPageablePageFree((IMG_VOID *) buffer); -+ return -EFAULT; -+} -+ -+void RemoveHotKey(unsigned hHotKey) -+{ -+ -+} -+ -+void DefineHotKey(unsigned ScanCode, unsigned ShiftState, void *pInfo) -+{ -+ -+} -+ -+EXPORT_SYMBOL(DBGDrvGetServiceTable); -diff -Nurp linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/Makefile linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/Makefile ---- linux-omap-2.6.28-omap1/drivers/gpu/pvr/tools/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/gpu/pvr/tools/Makefile 2011-06-22 13:19:32.613063278 +0200 -@@ -0,0 +1,29 @@ -+# -+# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved. -+# -+# This program is free software; you can redistribute it and/or modify it -+# under the terms and conditions of the GNU General Public License, -+# version 2, as published by the Free Software Foundation. -+# -+# This program is distributed in the hope it will be useful but, except -+# as otherwise stated in writing, without any warranty; without even the -+# implied warranty of merchantability or fitness for a particular purpose. -+# See the GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License along with -+# this program; if not, write to the Free Software Foundation, Inc., -+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+# -+# The full GNU General Public License is included in this distribution in -+# the file called "COPYING". -+# -+# Contact Information: -+# Imagination Technologies Ltd. -+# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK -+ -+ -+objs-$(CONFIG_PVR_TOOLS) += dbgdrv -+ -+dbgdrv-objs := main.c dbgdriv.c ioctl.c hostfunc.c \ -+ hotkey.c -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/hw_random/Kconfig linux-omap-2.6.28-nokia1/drivers/char/hw_random/Kconfig ---- linux-omap-2.6.28-omap1/drivers/char/hw_random/Kconfig 2011-06-22 13:14:17.363067759 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/char/hw_random/Kconfig 2011-06-22 13:19:32.523063279 +0200 -@@ -134,3 +134,16 @@ config HW_RANDOM_VIRTIO - - To compile this driver as a module, choose M here: the - module will be called virtio-rng. If unsure, say N. -+ -+config HW_RANDOM_OMAP3_ROM -+ tristate "OMAP3 ROM Random Number Generator support" -+ depends on HW_RANDOM && ARCH_OMAP34XX -+ default HW_RANDOM -+ ---help--- -+ This driver provides kernel-side support for the Random Number -+ Generator hardware found on OMAP34xx processors. -+ -+ To compile this driver as a module, choose M here: the -+ module will be called omap3-rom-rng. -+ -+ If unsure, say Y. -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/hw_random/Makefile linux-omap-2.6.28-nokia1/drivers/char/hw_random/Makefile ---- linux-omap-2.6.28-omap1/drivers/char/hw_random/Makefile 2011-06-22 13:14:17.363067759 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/char/hw_random/Makefile 2011-06-22 13:19:32.523063279 +0200 -@@ -14,3 +14,5 @@ obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx - obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o - obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o - obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o -+obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o -+omap3-rom-rng-y := omap3-rom-drv.o omap3-rom-asm.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/hw_random/omap3-rom-asm.S linux-omap-2.6.28-nokia1/drivers/char/hw_random/omap3-rom-asm.S ---- linux-omap-2.6.28-omap1/drivers/char/hw_random/omap3-rom-asm.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/char/hw_random/omap3-rom-asm.S 2011-06-22 13:19:32.523063279 +0200 -@@ -0,0 +1,26 @@ -+/* -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ */ -+ -+#include -+#include -+ -+ENTRY(omap3_rng_call_rom_asm) -+ stmfd sp!, {r4-r12, lr} -+ stmfd sp!, {r0-r3} -+ bl v7_flush_dcache_all -+ ldmfd sp!, {r0-r3} -+ mov r6, #0xff -+ mov r12, r0 -+ smc #1 -+ mov r12, r0 -+ bl v7_flush_dcache_all -+ mov r0, #0 -+ mcr p15, 0, r0, c7, c5, 0 -+ mov r0, r12 -+ ldmfd sp!, {r4-r12, pc} -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/hw_random/omap3-rom-drv.c linux-omap-2.6.28-nokia1/drivers/char/hw_random/omap3-rom-drv.c ---- linux-omap-2.6.28-omap1/drivers/char/hw_random/omap3-rom-drv.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/char/hw_random/omap3-rom-drv.c 2011-06-22 13:19:32.523063279 +0200 -@@ -0,0 +1,156 @@ -+/* -+ * omap3-rom-drv.c - RNG driver for TI OMAP3 CPU family -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Juha Yrjola -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define SEC_HAL_RNG_GENERATE 29 -+#define RNG_RESET 0x01 -+#define RNG_GEN_PRNG_HW_INIT 0x02 -+#define RNG_GEN_HW 0x08 -+ -+static const char *omap3_rom_rng_name = "OMAP3 ROM RNG"; -+ -+extern u32 omap3_rng_call_rom_asm(u32 id, u32 proc, u32 flags, u32 va_ptr); -+ -+static int call_sec_rom(u32 appl_id, u32 proc_id, u32 flag, ...) -+{ -+ va_list ap; -+ u32 ret; -+ u32 val; -+ -+ va_start(ap, flag); -+ val = *(u32 *) ≈ -+ local_irq_disable(); -+ local_fiq_disable(); -+ ret = omap3_rng_call_rom_asm(appl_id, proc_id, flag, -+ (u32) virt_to_phys((void *) val)); -+ local_fiq_enable(); -+ local_irq_enable(); -+ va_end(ap); -+ -+ return ret; -+} -+ -+static struct timer_list idle_timer; -+static int rng_idle; -+static struct clk *rng_clk; -+ -+static void omap3_rom_idle_rng(unsigned long data) -+{ -+ int r; -+ -+ r = call_sec_rom(SEC_HAL_RNG_GENERATE, 0, 0, 3, NULL, 0, -+ RNG_RESET); -+ if (r != 0) { -+ printk(KERN_ERR "%s: reset failed: %d\n", -+ omap3_rom_rng_name, r); -+ return; -+ } -+ clk_disable(rng_clk); -+ rng_idle = 1; -+} -+ -+static int omap3_rom_get_random(void *buf, unsigned int count) -+{ -+ u32 r; -+ u32 ptr; -+ -+ del_timer_sync(&idle_timer); -+ if (rng_idle) { -+ clk_enable(rng_clk); -+ r = call_sec_rom(SEC_HAL_RNG_GENERATE, 0, 0, 3, NULL, 0, -+ RNG_GEN_PRNG_HW_INIT); -+ if (r != 0) { -+ clk_disable(rng_clk); -+ printk(KERN_ERR "%s: HW init failed: %d\n", -+ omap3_rom_rng_name, r); -+ return -EIO; -+ } -+ rng_idle = 0; -+ } -+ -+ ptr = virt_to_phys(buf); -+ r = call_sec_rom(SEC_HAL_RNG_GENERATE, 0, 0, 3, ptr, -+ count, RNG_GEN_HW); -+ mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); -+ if (r != 0) -+ return -EINVAL; -+ return 0; -+} -+ -+static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) -+{ -+ return 1; -+} -+ -+static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) -+{ -+ int r; -+ -+ r = omap3_rom_get_random(data, 4); -+ if (r < 0) -+ return r; -+ return 4; -+} -+ -+static struct hwrng omap3_rom_rng_ops = { -+ .name = "omap3-rom", -+ .data_present = omap3_rom_rng_data_present, -+ .data_read = omap3_rom_rng_data_read, -+}; -+ -+static int __init omap3_rom_rng_init(void) -+{ -+ printk(KERN_INFO "%s: initializing\n", omap3_rom_rng_name); -+ if (!cpu_is_omap34xx()) { -+ printk(KERN_ERR "%s: currently supports only OMAP34xx CPUs\n", -+ omap3_rom_rng_name); -+ return -ENODEV; -+ } -+ if (omap_type() == OMAP2_DEVICE_TYPE_GP) { -+ printk(KERN_ERR "%s: GP OMAPs not supported\n", -+ omap3_rom_rng_name); -+ return -ENODEV; -+ } -+ -+ setup_timer(&idle_timer, omap3_rom_idle_rng, 0); -+ rng_clk = clk_get(NULL, "rng_ick"); -+ if (IS_ERR(rng_clk)) { -+ printk(KERN_ERR "%s: unable to get RNG clock\n", -+ omap3_rom_rng_name); -+ return IS_ERR(rng_clk); -+ } -+ -+ /* Leave the RNG in reset state. */ -+ clk_enable(rng_clk); -+ omap3_rom_idle_rng(0); -+ -+ return hwrng_register(&omap3_rom_rng_ops); -+} -+ -+static void __exit omap3_rom_rng_exit(void) -+{ -+ hwrng_unregister(&omap3_rom_rng_ops); -+} -+ -+module_init(omap3_rom_rng_init); -+module_exit(omap3_rom_rng_exit); -+ -+MODULE_AUTHOR("Juha Yrjola"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/keyboard.c linux-omap-2.6.28-nokia1/drivers/char/keyboard.c ---- linux-omap-2.6.28-omap1/drivers/char/keyboard.c 2011-06-22 13:14:17.393067759 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/char/keyboard.c 2011-06-22 13:19:32.523063279 +0200 -@@ -1035,6 +1035,7 @@ DECLARE_TASKLET_DISABLED(keyboard_taskle - defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\ - defined(CONFIG_PARISC) || defined(CONFIG_SUPERH) ||\ - (defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_ATKBD) && !defined(CONFIG_ARCH_RPC)) ||\ -+ (defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_TWL4030) && !defined(CONFIG_ARCH_RPC)) ||\ - defined(CONFIG_AVR32) - - #define HW_RAW(dev) (test_bit(EV_MSC, dev->evbit) && test_bit(MSC_RAW, dev->mscbit) &&\ -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/n_tty.c linux-omap-2.6.28-nokia1/drivers/char/n_tty.c ---- linux-omap-2.6.28-omap1/drivers/char/n_tty.c 2011-06-22 13:14:17.403067759 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/char/n_tty.c 2011-06-22 13:19:32.523063279 +0200 -@@ -942,7 +942,7 @@ static void n_tty_write_wakeup(struct tt - * calls one at a time and in order (or using flush_to_ldisc) - */ - --static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, -+static int n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) - { - const unsigned char *p; -@@ -950,9 +950,10 @@ static void n_tty_receive_buf(struct tty - int i; - char buf[64]; - unsigned long cpuflags; -+ int ret = 0; - - if (!tty->read_buf) -- return; -+ return 0; - - if (tty->real_raw) { - spin_lock_irqsave(&tty->read_lock, cpuflags); -@@ -964,6 +965,7 @@ static void n_tty_receive_buf(struct tty - tty->read_cnt += i; - cp += i; - count -= i; -+ ret += i; - - i = min(N_TTY_BUF_SIZE - tty->read_cnt, - N_TTY_BUF_SIZE - tty->read_head); -@@ -971,8 +973,11 @@ static void n_tty_receive_buf(struct tty - memcpy(tty->read_buf + tty->read_head, cp, i); - tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); - tty->read_cnt += i; -+ ret += i; - spin_unlock_irqrestore(&tty->read_lock, cpuflags); -+ n_tty_set_room(tty); - } else { -+ ret = count; - for (i = count, p = cp, f = fp; i; i--, p++) { - if (f) - flags = *f++; -@@ -998,10 +1003,9 @@ static void n_tty_receive_buf(struct tty - } - if (tty->ops->flush_chars) - tty->ops->flush_chars(tty); -+ n_tty_set_room(tty); - } - -- n_tty_set_room(tty); -- - if (!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) { - kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) -@@ -1015,6 +1019,8 @@ static void n_tty_receive_buf(struct tty - */ - if (tty->receive_room < TTY_THRESHOLD_THROTTLE) - tty_throttle(tty); -+ -+ return ret; - } - - int is_ignored(int sig) -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/random.c linux-omap-2.6.28-nokia1/drivers/char/random.c ---- linux-omap-2.6.28-omap1/drivers/char/random.c 2011-06-22 13:14:17.413067759 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/char/random.c 2011-06-22 13:19:32.523063279 +0200 -@@ -1469,7 +1469,8 @@ static void rekey_seq_generator(struct w - keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS; - smp_wmb(); - ip_cnt++; -- schedule_delayed_work(&rekey_work, REKEY_INTERVAL); -+ schedule_delayed_work(&rekey_work, -+ round_jiffies_relative(REKEY_INTERVAL)); - } - - static inline struct keydata *get_keyptr(void) -diff -Nurp linux-omap-2.6.28-omap1/drivers/char/tty_buffer.c linux-omap-2.6.28-nokia1/drivers/char/tty_buffer.c ---- linux-omap-2.6.28-omap1/drivers/char/tty_buffer.c 2011-06-22 13:14:17.463067758 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/char/tty_buffer.c 2011-06-22 13:19:32.523063279 +0200 -@@ -58,7 +58,7 @@ static struct tty_buffer *tty_buffer_all - { - struct tty_buffer *p; - -- if (tty->buf.memory_used + size > 65536) -+ if (tty->buf.memory_used + size > 96 * 1024) - return NULL; - p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); - if (p == NULL) -@@ -417,6 +417,7 @@ static void flush_to_ldisc(struct work_s - if (head != NULL) { - tty->buf.head = NULL; - for (;;) { -+ int copied; - int count = head->commit - head->read; - if (!count) { - if (head->next == NULL) -@@ -439,11 +440,11 @@ static void flush_to_ldisc(struct work_s - count = tty->receive_room; - char_buf = head->char_buf_ptr + head->read; - flag_buf = head->flag_buf_ptr + head->read; -- head->read += count; - spin_unlock_irqrestore(&tty->buf.lock, flags); -- disc->ops->receive_buf(tty, char_buf, -+ copied = disc->ops->receive_buf(tty, char_buf, - flag_buf, count); - spin_lock_irqsave(&tty->buf.lock, flags); -+ head->read += copied; - } - /* Restore the queue head */ - tty->buf.head = head; -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/Kconfig linux-omap-2.6.28-nokia1/drivers/input/Kconfig ---- linux-omap-2.6.28-omap1/drivers/input/Kconfig 2011-06-22 13:14:18.143067748 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/input/Kconfig 2011-06-22 13:19:32.673063277 +0200 -@@ -170,6 +170,8 @@ source "drivers/input/tablet/Kconfig" - - source "drivers/input/touchscreen/Kconfig" - -+source "drivers/input/lirc/Kconfig" -+ - source "drivers/input/misc/Kconfig" - - endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/keyboard/omap-twl4030keypad.c linux-omap-2.6.28-nokia1/drivers/input/keyboard/omap-twl4030keypad.c ---- linux-omap-2.6.28-omap1/drivers/input/keyboard/omap-twl4030keypad.c 2011-06-22 13:14:18.153067748 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/input/keyboard/omap-twl4030keypad.c 2011-06-22 13:19:32.693063278 +0200 -@@ -55,6 +55,8 @@ struct omap_keypad { - int n_rows; - int n_cols; - int irq; -+ unsigned user_disabled:1; -+ unsigned disable_depth; - - struct device *dbg_dev; - struct input_dev *omap_twl4030kp; -@@ -93,6 +95,26 @@ static int twl4030_kpwrite_u8(struct oma - return ret; - } - -+static int twl4030_kp_enable_interrupts(struct omap_keypad *kp) -+{ -+ u8 reg; -+ int ret; -+ /* Enable KP and TO interrupts now. */ -+ reg = ~(KEYP_IMR1_KP | KEYP_IMR1_TO); -+ ret = twl4030_kpwrite_u8(kp, TWL4030_MODULE_KEYPAD, -+ reg, KEYP_IMR1); -+ return ret; -+} -+ -+static void twl4030_kp_disable_interrupts(struct omap_keypad *kp) -+{ -+ u8 reg; -+ /* mask all events - we don't care about the result */ -+ reg = KEYP_IMR1_MIS | KEYP_IMR1_TO | KEYP_IMR1_LK | KEYP_IMR1_KP; -+ (void)twl4030_kpwrite_u8(kp, TWL4030_MODULE_KEYPAD, -+ reg, KEYP_IMR1); -+} -+ - static int omap_kp_find_key(struct omap_keypad *kp, int col, int row) - { - int i, rc; -@@ -146,26 +168,9 @@ static int omap_kp_is_in_ghost_state(str - - return 0; - } -- --static void twl4030_kp_scan(struct omap_keypad *kp, int release_all) -+static void twl4030_kp_report_changes(struct omap_keypad *kp, u16 *new_state) - { -- u16 new_state[MAX_ROWS]; - int col, row; -- -- if (release_all) -- memset(new_state, 0, sizeof(new_state)); -- else { -- /* check for any changes */ -- int ret = omap_kp_read_kp_matrix_state(kp, new_state); -- if (ret < 0) /* panic ... */ -- return; -- -- if (omap_kp_is_in_ghost_state(kp, new_state)) -- return; -- } -- -- mutex_lock(&kp->mutex); -- - /* check for changes and print those */ - for (row = 0; row < kp->n_rows; row++) { - int changed = new_state[row] ^ kp->kp_state[row]; -@@ -196,8 +201,81 @@ static void twl4030_kp_scan(struct omap_ - } - kp->kp_state[row] = new_state[row]; - } -+ input_sync(kp->omap_twl4030kp); -+} - -+static inline int twl4030_kp_disabled(struct omap_keypad *kp) -+{ -+ return kp->disable_depth != 0; -+} -+ -+static int twl4030_kp_scan(struct omap_keypad *kp, u16 *new_state) -+{ -+ /* check for any changes */ -+ int ret = omap_kp_read_kp_matrix_state(kp, new_state); -+ if (ret < 0) /* panic ... */ -+ return ret; -+ -+ return omap_kp_is_in_ghost_state(kp, new_state); -+} -+ -+static void twl4030_kp_enable(struct omap_keypad *kp) -+{ -+ BUG_ON(!twl4030_kp_disabled(kp)); -+ if (--kp->disable_depth == 0) { -+ enable_irq(kp->irq); -+ twl4030_kp_enable_interrupts(kp); -+ } -+} -+ -+static void twl4030_kp_disable(struct omap_keypad *kp) -+{ -+ u16 new_state[MAX_ROWS]; -+ -+ if (kp->disable_depth++ == 0) { -+ memset(new_state, 0, sizeof(new_state)); -+ twl4030_kp_report_changes(kp, new_state); -+ twl4030_kp_disable_interrupts(kp); -+ disable_irq(kp->irq); -+ } -+} -+ -+ -+static ssize_t twl4030_kp_disable_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct omap_keypad *kp = dev_get_drvdata(dev); -+ -+ return sprintf(buf, "%u\n", twl4030_kp_disabled(kp)); -+} -+ -+static ssize_t twl4030_kp_disable_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct omap_keypad *kp = dev_get_drvdata(dev); -+ long i = 0; -+ int ret; -+ -+ ret = strict_strtoul(buf, 10, &i); -+ if (ret) -+ return -EINVAL; -+ i = !!i; -+ -+ mutex_lock(&kp->mutex); -+ if (i == kp->user_disabled) { -+ mutex_unlock(&kp->mutex); -+ return count; -+ } -+ kp->user_disabled = i; -+ -+ if (i) -+ twl4030_kp_disable(kp); -+ else -+ twl4030_kp_enable(kp); - mutex_unlock(&kp->mutex); -+ -+ return count; - } - - /* -@@ -208,6 +286,10 @@ static irqreturn_t do_kp_irq(int irq, vo - struct omap_keypad *kp = _kp; - u8 reg; - int ret; -+ u16 new_state[MAX_ROWS]; -+ /* This not real interrupt handler. -+ * This can only be called from thread context. */ -+ BUG_ON(in_irq()); - - #ifdef CONFIG_LOCKDEP - /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which -@@ -220,16 +302,29 @@ static irqreturn_t do_kp_irq(int irq, vo - /* Read & Clear TWL4030 pending interrupt */ - ret = twl4030_kpread(kp, TWL4030_MODULE_KEYPAD, ®, KEYP_ISR1, 1); - -+ mutex_lock(&kp->mutex); -+ -+ if (twl4030_kp_disabled(kp)) { -+ mutex_unlock(&kp->mutex); -+ return IRQ_HANDLED; -+ } - /* Release all keys if I2C has gone bad or - * the KEYP has gone to idle state */ - if ((ret >= 0) && (reg & KEYP_IMR1_KP)) -- twl4030_kp_scan(kp, 0); -+ (void)twl4030_kp_scan(kp, new_state); - else -- twl4030_kp_scan(kp, 1); -+ memset(new_state, 0, sizeof(new_state)); -+ -+ twl4030_kp_report_changes(kp, new_state); -+ -+ mutex_unlock(&kp->mutex); - - return IRQ_HANDLED; - } - -+static DEVICE_ATTR(disable_kp, 0664, twl4030_kp_disable_show, -+ twl4030_kp_disable_store); -+ - /* - * Registers keypad device with input sub system - * and configures TWL4030 keypad registers -@@ -355,10 +450,7 @@ static int __init omap_kp_probe(struct p - kp->irq); - goto err3; - } else { -- /* Enable KP and TO interrupts now. */ -- reg = ~(KEYP_IMR1_KP | KEYP_IMR1_TO); -- ret = twl4030_kpwrite_u8(kp, TWL4030_MODULE_KEYPAD, -- reg, KEYP_IMR1); -+ ret = twl4030_kp_enable_interrupts(kp); - if (ret < 0) - goto err5; - } -@@ -367,10 +459,14 @@ static int __init omap_kp_probe(struct p - if (ret < 0) - goto err4; - -+ ret = device_create_file(&pdev->dev, &dev_attr_disable_kp); -+ -+ if (ret < 0) -+ goto err5; -+ - return ret; - err5: -- /* mask all events - we don't care about the result */ -- (void) twl4030_kpwrite_u8(kp, TWL4030_MODULE_KEYPAD, 0xff, KEYP_IMR1); -+ twl4030_kp_disable_interrupts(kp); - err4: - free_irq(kp->irq, NULL); - err3: -@@ -387,15 +483,43 @@ static int omap_kp_remove(struct platfor - - free_irq(kp->irq, kp); - input_unregister_device(kp->omap_twl4030kp); -+ device_remove_file(&pdev->dev, &dev_attr_disable_kp); - kfree(kp); - - return 0; - } - -+#ifdef CONFIG_PM -+static int twl4030_kp_suspend(struct platform_device *pdev, pm_message_t mesg) -+{ -+ struct omap_keypad *kp = dev_get_drvdata(&pdev->dev); -+ mutex_lock(&kp->mutex); -+ twl4030_kp_disable(kp); -+ mutex_unlock(&kp->mutex); -+ return 0; -+} -+ -+static int twl4030_kp_resume(struct platform_device *pdev) -+{ -+ struct omap_keypad *kp = dev_get_drvdata(&pdev->dev); -+ mutex_lock(&kp->mutex); -+ twl4030_kp_enable(kp); -+ mutex_unlock(&kp->mutex); -+ return 0; -+} -+#else -+ -+#define twl4030_kp_suspend NULL -+#define twl4030_kp_resume NULL -+ -+#endif /* CONFIG_PM */ -+ - - static struct platform_driver omap_kp_driver = { - .probe = omap_kp_probe, - .remove = __devexit_p(omap_kp_remove), -+ .suspend = twl4030_kp_suspend, -+ .resume = twl4030_kp_resume, - .driver = { - .name = "twl4030_keypad", - .owner = THIS_MODULE, -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/lirc/Kconfig linux-omap-2.6.28-nokia1/drivers/input/lirc/Kconfig ---- linux-omap-2.6.28-omap1/drivers/input/lirc/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/input/lirc/Kconfig 2011-06-22 13:19:32.693063278 +0200 -@@ -0,0 +1,26 @@ -+# -+# LIRC driver(s) configuration -+# -+menuconfig INPUT_LIRC -+ bool "Linux Infrared Remote Control IR receiver/transmitter drivers" -+ help -+ Say Y here, and all supported Linux Infrared Remote Control IR and -+ RF receiver and transmitter drivers will be displayed. When paired -+ with a remote control and the lirc daemon, the receiver drivers -+ allow control of your Linux system via remote control. -+ -+if INPUT_LIRC -+ -+config LIRC_DEV -+ tristate "LIRC device loadable module support" -+ help -+ LIRC device loadable module support, required for most LIRC drivers -+ -+config LIRC_RX51 -+ tristate "RX51 CIR transmitter" -+ depends on LIRC_DEV -+ depends on ARCH_OMAP -+ help -+ Driver for Nokia RX51 CIR circuitry -+ -+endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_dev.c linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_dev.c ---- linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_dev.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_dev.c 2011-06-22 13:19:32.693063278 +0200 -@@ -0,0 +1,759 @@ -+/* -+ * LIRC base driver -+ * -+ * by Artur Lipowski -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "lirc.h" -+#include "lirc_dev.h" -+ -+static int debug; -+#define dprintk(fmt, args...) \ -+ do { \ -+ if (debug) \ -+ printk(KERN_DEBUG fmt, ## args); \ -+ } while (0) -+ -+#define IRCTL_DEV_NAME "BaseRemoteCtl" -+#define NOPLUG -1 -+#define LOGHEAD "lirc_dev (%s[%d]): " -+ -+static dev_t lirc_base_dev; -+ -+struct irctl { -+ struct lirc_driver d; -+ int attached; -+ int open; -+ -+ struct mutex buffer_lock; -+ struct lirc_buffer *buf; -+ -+ struct task_struct *task; -+ long jiffies_to_wait; -+ -+ struct cdev cdev; -+}; -+ -+static DEFINE_MUTEX(driver_lock); -+ -+static struct irctl *irctls[MAX_IRCTL_DEVICES]; -+ -+/* Only used for sysfs but defined to void otherwise */ -+static struct class *lirc_class; -+ -+/* helper function -+ * initializes the irctl structure -+ */ -+static void init_irctl(struct irctl *ir) -+{ -+ mutex_init(&ir->buffer_lock); -+ ir->d.minor = NOPLUG; -+} -+ -+static void cleanup(struct irctl *ir) -+{ -+ dprintk(LOGHEAD "cleaning up\n", ir->d.name, ir->d.minor); -+ -+ device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); -+ -+ if (ir->buf != ir->d.rbuf) { -+ lirc_buffer_free(ir->buf); -+ kfree(ir->buf); -+ } -+ ir->buf = NULL; -+} -+ -+/* helper function -+ * reads key codes from driver and puts them into buffer -+ * buffer free space is checked and locking performed -+ * returns 0 on success -+ */ -+static int add_to_buf(struct irctl *ir) -+{ -+ if (lirc_buffer_full(ir->buf)) { -+ dprintk(LOGHEAD "buffer overflow\n", -+ ir->d.name, ir->d.minor); -+ return -EOVERFLOW; -+ } -+ -+ if (ir->d.add_to_buf) { -+ int res = -ENODATA; -+ int got_data = 0; -+ -+ /* service the device as long as it is returning -+ * data and we have space -+ */ -+ while (!lirc_buffer_full(ir->buf)) { -+ res = ir->d.add_to_buf(ir->d.data, ir->buf); -+ if (!res) -+ got_data++; -+ else -+ break; -+ } -+ -+ if (res == -ENODEV) -+ kthread_stop(ir->task); -+ -+ return got_data ? 0 : res; -+ } -+ -+ return 0; -+} -+ -+/* main function of the polling thread -+ */ -+static int lirc_thread(void *irctl) -+{ -+ struct irctl *ir = irctl; -+ -+ dprintk(LOGHEAD "poll thread started\n", ir->d.name, ir->d.minor); -+ -+ do { -+ if (ir->open) { -+ if (ir->jiffies_to_wait) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule_timeout(ir->jiffies_to_wait); -+ } -+ if (kthread_should_stop()) -+ break; -+ if (!add_to_buf(ir)) -+ wake_up_interruptible(&ir->buf->wait_poll); -+ } else { -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule(); -+ } -+ } while (!kthread_should_stop()); -+ -+ dprintk(LOGHEAD "poll thread ended\n", ir->d.name, ir->d.minor); -+ -+ return 0; -+} -+ -+ -+static struct file_operations fops = { -+ .read = lirc_dev_fop_read, -+ .write = lirc_dev_fop_write, -+ .poll = lirc_dev_fop_poll, -+ .ioctl = lirc_dev_fop_ioctl, -+ .open = lirc_dev_fop_open, -+ .release = lirc_dev_fop_close, -+ .owner = THIS_MODULE, -+}; -+ -+static int lirc_cdev_add(struct irctl *ir) -+{ -+ int retval; -+ struct lirc_driver *d = &ir->d; -+ -+ if (d->fops) { -+ cdev_init(&ir->cdev, d->fops); -+ ir->cdev.owner = d->owner; -+ } else { -+ cdev_init(&ir->cdev, &fops); -+ ir->cdev.owner = THIS_MODULE; -+ } -+ kobject_set_name(&ir->cdev.kobj, "lircv%d", d->minor); -+ -+ retval = cdev_add(&ir->cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1); -+ if (retval) -+ kobject_put(&ir->cdev.kobj); -+ -+ return retval; -+} -+ -+int lirc_register_driver(struct lirc_driver *d) -+{ -+ struct irctl *ir; -+ int minor; -+ int bytes_in_key; -+ unsigned int chunk_size; -+ unsigned int buffer_size; -+ int err; -+ -+ if (!d) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "driver pointer must be not NULL!\n"); -+ err = -EBADRQC; -+ goto out; -+ } -+ -+ if (MAX_IRCTL_DEVICES <= d->minor) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "\"minor\" must be between 0 and %d (%d)!\n", -+ MAX_IRCTL_DEVICES-1, d->minor); -+ err = -EBADRQC; -+ goto out; -+ } -+ -+ if (1 > d->code_length || (BUFLEN * 8) < d->code_length) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "code length in bits for minor (%d) " -+ "must be less than %d!\n", -+ d->minor, BUFLEN * 8); -+ err = -EBADRQC; -+ goto out; -+ } -+ -+ printk(KERN_INFO "lirc_dev: lirc_register_driver: sample_rate: %d\n", -+ d->sample_rate); -+ if (d->sample_rate) { -+ if (2 > d->sample_rate || HZ < d->sample_rate) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "sample_rate must be between 2 and %d!\n", HZ); -+ err = -EBADRQC; -+ goto out; -+ } -+ if (!d->add_to_buf) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "add_to_buf cannot be NULL when " -+ "sample_rate is set\n"); -+ err = -EBADRQC; -+ goto out; -+ } -+ } else if (!(d->fops && d->fops->read) && !d->rbuf) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "fops->read and rbuf " -+ "cannot all be NULL!\n"); -+ err = -EBADRQC; -+ goto out; -+ } else if (!d->rbuf) { -+ if (!(d->fops && d->fops->read && d->fops->poll) -+ || (!d->fops->ioctl)) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "neither read, poll nor ioctl can be NULL!\n"); -+ err = -EBADRQC; -+ goto out; -+ } -+ } -+ -+ if (d->owner == NULL) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "no module owner registered\n"); -+ err = -EBADRQC; -+ goto out; -+ } -+ -+ mutex_lock(&driver_lock); -+ -+ minor = d->minor; -+ -+ if (minor < 0) { -+ /* find first free slot for driver */ -+ for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++) -+ if (!irctls[minor]) -+ break; -+ if (MAX_IRCTL_DEVICES == minor) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "no free slots for drivers!\n"); -+ err = -ENOMEM; -+ goto out_lock; -+ } -+ } else if (irctls[minor]) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "minor (%d) just registered!\n", minor); -+ err = -EBUSY; -+ goto out_lock; -+ } -+ -+ ir = kzalloc(sizeof(struct irctl), GFP_KERNEL); -+ if (!ir) { -+ err = -ENOMEM; -+ goto out_lock; -+ } -+ init_irctl(ir); -+ irctls[minor] = ir; -+ -+ if (d->sample_rate) { -+ ir->jiffies_to_wait = HZ / d->sample_rate; -+ } else { -+ /* it means - wait for external event in task queue */ -+ ir->jiffies_to_wait = 0; -+ } -+ -+ /* some safety check 8-) */ -+ d->name[sizeof(d->name)-1] = '\0'; -+ -+ bytes_in_key = BITS_TO_LONGS(d->code_length); -+ -+ chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key; -+ buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key; -+ -+ if (d->rbuf) { -+ ir->buf = d->rbuf; -+ } else { -+ ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); -+ if (!ir->buf) { -+ err = -ENOMEM; -+ goto out_lock; -+ } -+ err = lirc_buffer_init(ir->buf, chunk_size, buffer_size); -+ if (err) { -+ kfree(ir->buf); -+ goto out_lock; -+ } -+ } -+ -+ if (d->features == 0) -+ d->features = (d->code_length > 8) ? -+ LIRC_CAN_REC_LIRCCODE : LIRC_CAN_REC_CODE; -+ -+ ir->d = *d; -+ ir->d.minor = minor; -+ -+ device_create(lirc_class, ir->d.dev, -+ MKDEV(MAJOR(lirc_base_dev), ir->d.minor), NULL, -+ "lirc%u", ir->d.minor); -+ -+ if (d->sample_rate) { -+ /* try to fire up polling thread */ -+ ir->task = kthread_run(lirc_thread, (void *)ir, "lirc_dev"); -+ if (IS_ERR(ir->task)) { -+ printk(KERN_ERR "lirc_dev: lirc_register_driver: " -+ "cannot run poll thread for minor = %d\n", -+ d->minor); -+ err = -ECHILD; -+ goto out_sysfs; -+ } -+ } -+ -+ err = lirc_cdev_add(ir); -+ if (err) -+ goto out_sysfs; -+ -+ ir->attached = 1; -+ mutex_unlock(&driver_lock); -+ -+ dprintk("lirc_dev: driver %s registered at minor number = %d\n", -+ ir->d.name, ir->d.minor); -+ return minor; -+ -+out_sysfs: -+ device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); -+out_lock: -+ mutex_unlock(&driver_lock); -+out: -+ return err; -+} -+EXPORT_SYMBOL(lirc_register_driver); -+ -+int lirc_unregister_driver(int minor) -+{ -+ struct irctl *ir; -+ -+ if (minor < 0 || minor >= MAX_IRCTL_DEVICES) { -+ printk(KERN_ERR "lirc_dev: lirc_unregister_driver: " -+ "\"minor\" must be between 0 and %d!\n", -+ MAX_IRCTL_DEVICES-1); -+ return -EBADRQC; -+ } -+ -+ ir = irctls[minor]; -+ -+ mutex_lock(&driver_lock); -+ -+ if (ir->d.minor != minor) { -+ printk(KERN_ERR "lirc_dev: lirc_unregister_driver: " -+ "minor (%d) device not registered!", minor); -+ mutex_unlock(&driver_lock); -+ return -ENOENT; -+ } -+ -+ /* end up polling thread */ -+ if (ir->task) -+ kthread_stop(ir->task); -+ -+ dprintk("lirc_dev: driver %s unregistered from minor number = %d\n", -+ ir->d.name, ir->d.minor); -+ -+ ir->attached = 0; -+ if (ir->open) { -+ dprintk(LOGHEAD "releasing opened driver\n", -+ ir->d.name, ir->d.minor); -+ wake_up_interruptible(&ir->buf->wait_poll); -+ mutex_lock(&ir->buffer_lock); -+ ir->d.set_use_dec(ir->d.data); -+ module_put(ir->d.owner); -+ mutex_unlock(&ir->buffer_lock); -+ cdev_del(&ir->cdev); -+ } else { -+ cleanup(ir); -+ cdev_del(&ir->cdev); -+ kfree(ir); -+ irctls[minor] = NULL; -+ } -+ -+ mutex_unlock(&driver_lock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(lirc_unregister_driver); -+ -+int lirc_dev_fop_open(struct inode *inode, struct file *file) -+{ -+ struct irctl *ir; -+ int retval; -+ -+ if (iminor(inode) >= MAX_IRCTL_DEVICES) { -+ dprintk("lirc_dev [%d]: open result = -ENODEV\n", -+ iminor(inode)); -+ return -ENODEV; -+ } -+ -+ if (mutex_lock_interruptible(&driver_lock)) -+ return -ERESTARTSYS; -+ -+ ir = irctls[iminor(inode)]; -+ if (!ir) { -+ retval = -ENODEV; -+ goto error; -+ } -+ -+ dprintk(LOGHEAD "open called\n", ir->d.name, ir->d.minor); -+ -+ if (ir->d.minor == NOPLUG) { -+ retval = -ENODEV; -+ goto error; -+ } -+ -+ if (ir->open) { -+ retval = -EBUSY; -+ goto error; -+ } -+ -+ /* there is no need for locking here because ir->open is 0 -+ * and lirc_thread isn't using buffer -+ * drivers which use irq's should allocate them on set_use_inc, -+ * so there should be no problem with those either. -+ */ -+ lirc_buffer_clear(ir->buf); -+ -+ if (ir->d.owner != NULL && try_module_get(ir->d.owner)) { -+ ++ir->open; -+ retval = ir->d.set_use_inc(ir->d.data); -+ -+ if (retval) { -+ module_put(ir->d.owner); -+ --ir->open; -+ } -+ if (ir->task) -+ wake_up_process(ir->task); -+ } else { -+ if (ir->d.owner == NULL) -+ dprintk(LOGHEAD "no module owner!!!\n", -+ ir->d.name, ir->d.minor); -+ -+ retval = -ENODEV; -+ } -+ -+error: -+ if (ir) -+ dprintk(LOGHEAD "open result = %d\n", ir->d.name, ir->d.minor, -+ retval); -+ -+ mutex_unlock(&driver_lock); -+ -+ return retval; -+} -+EXPORT_SYMBOL(lirc_dev_fop_open); -+ -+int lirc_dev_fop_close(struct inode *inode, struct file *file) -+{ -+ struct irctl *ir = irctls[iminor(inode)]; -+ -+ dprintk(LOGHEAD "close called\n", ir->d.name, ir->d.minor); -+ -+ WARN_ON(mutex_lock_killable(&driver_lock)); -+ -+ --ir->open; -+ if (ir->attached) { -+ ir->d.set_use_dec(ir->d.data); -+ module_put(ir->d.owner); -+ } else { -+ cleanup(ir); -+ irctls[ir->d.minor] = NULL; -+ kfree(ir); -+ } -+ -+ mutex_unlock(&driver_lock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(lirc_dev_fop_close); -+ -+unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait) -+{ -+ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; -+ unsigned int ret; -+ -+ dprintk(LOGHEAD "poll called\n", ir->d.name, ir->d.minor); -+ -+ if (!ir->attached) { -+ mutex_unlock(&ir->buffer_lock); -+ return POLLERR; -+ } -+ -+ poll_wait(file, &ir->buf->wait_poll, wait); -+ -+ if (ir->buf) -+ if (lirc_buffer_empty(ir->buf)) -+ ret = 0; -+ else -+ ret = POLLIN | POLLRDNORM; -+ else -+ ret = POLLERR; -+ -+ dprintk(LOGHEAD "poll result = %d\n", -+ ir->d.name, ir->d.minor, ret); -+ -+ return ret; -+} -+EXPORT_SYMBOL(lirc_dev_fop_poll); -+ -+int lirc_dev_fop_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ unsigned long mode; -+ int result = 0; -+ struct irctl *ir = irctls[iminor(inode)]; -+ -+ dprintk(LOGHEAD "ioctl called (0x%x)\n", -+ ir->d.name, ir->d.minor, cmd); -+ -+ if (ir->d.minor == NOPLUG || !ir->attached) { -+ dprintk(LOGHEAD "ioctl result = -ENODEV\n", -+ ir->d.name, ir->d.minor); -+ return -ENODEV; -+ } -+ -+ switch (cmd) { -+ case LIRC_GET_FEATURES: -+ result = put_user(ir->d.features, (unsigned long *)arg); -+ break; -+ case LIRC_GET_REC_MODE: -+ if (!(ir->d.features & LIRC_CAN_REC_MASK)) -+ return -ENOSYS; -+ -+ result = put_user(LIRC_REC2MODE -+ (ir->d.features & LIRC_CAN_REC_MASK), -+ (unsigned long *)arg); -+ break; -+ case LIRC_SET_REC_MODE: -+ if (!(ir->d.features & LIRC_CAN_REC_MASK)) -+ return -ENOSYS; -+ -+ result = get_user(mode, (unsigned long *)arg); -+ if (!result && !(LIRC_MODE2REC(mode) & ir->d.features)) -+ result = -EINVAL; -+ /* -+ * FIXME: We should actually set the mode somehow but -+ * for now, lirc_serial doesn't support mode changing either -+ */ -+ break; -+ case LIRC_GET_LENGTH: -+ result = put_user((unsigned long)ir->d.code_length, -+ (unsigned long *)arg); -+ break; -+ default: -+ result = -EINVAL; -+ } -+ -+ dprintk(LOGHEAD "ioctl result = %d\n", -+ ir->d.name, ir->d.minor, result); -+ -+ return result; -+} -+EXPORT_SYMBOL(lirc_dev_fop_ioctl); -+ -+ssize_t lirc_dev_fop_read(struct file *file, -+ char *buffer, -+ size_t length, -+ loff_t *ppos) -+{ -+ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; -+ unsigned char buf[ir->buf->chunk_size]; -+ int ret = 0, written = 0; -+ DECLARE_WAITQUEUE(wait, current); -+ -+ dprintk(LOGHEAD "read called\n", ir->d.name, ir->d.minor); -+ -+ if (mutex_lock_interruptible(&ir->buffer_lock)) -+ return -ERESTARTSYS; -+ if (!ir->attached) { -+ mutex_unlock(&ir->buffer_lock); -+ return -ENODEV; -+ } -+ -+ if (length % ir->buf->chunk_size) { -+ dprintk(LOGHEAD "read result = -EINVAL\n", -+ ir->d.name, ir->d.minor); -+ mutex_unlock(&ir->buffer_lock); -+ return -EINVAL; -+ } -+ -+ /* -+ * we add ourselves to the task queue before buffer check -+ * to avoid losing scan code (in case when queue is awaken somewhere -+ * between while condition checking and scheduling) -+ */ -+ add_wait_queue(&ir->buf->wait_poll, &wait); -+ set_current_state(TASK_INTERRUPTIBLE); -+ -+ /* -+ * while we didn't provide 'length' bytes, device is opened in blocking -+ * mode and 'copy_to_user' is happy, wait for data. -+ */ -+ while (written < length && ret == 0) { -+ if (lirc_buffer_empty(ir->buf)) { -+ /* According to the read(2) man page, 'written' can be -+ * returned as less than 'length', instead of blocking -+ * again, returning -EWOULDBLOCK, or returning -+ * -ERESTARTSYS */ -+ if (written) -+ break; -+ if (file->f_flags & O_NONBLOCK) { -+ ret = -EWOULDBLOCK; -+ break; -+ } -+ if (signal_pending(current)) { -+ ret = -ERESTARTSYS; -+ break; -+ } -+ -+ mutex_unlock(&ir->buffer_lock); -+ schedule(); -+ set_current_state(TASK_INTERRUPTIBLE); -+ -+ if (mutex_lock_interruptible(&ir->buffer_lock)) { -+ ret = -ERESTARTSYS; -+ break; -+ } -+ -+ if (!ir->attached) { -+ ret = -ENODEV; -+ break; -+ } -+ } else { -+ lirc_buffer_read(ir->buf, buf); -+ ret = copy_to_user((void *)buffer+written, buf, -+ ir->buf->chunk_size); -+ written += ir->buf->chunk_size; -+ } -+ } -+ -+ remove_wait_queue(&ir->buf->wait_poll, &wait); -+ set_current_state(TASK_RUNNING); -+ mutex_unlock(&ir->buffer_lock); -+ -+ dprintk(LOGHEAD "read result = %s (%d)\n", -+ ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret); -+ -+ return ret ? ret : written; -+} -+EXPORT_SYMBOL(lirc_dev_fop_read); -+ -+void *lirc_get_pdata(struct file *file) -+{ -+ void *data = NULL; -+ -+ if (file && file->f_dentry && file->f_dentry->d_inode && -+ file->f_dentry->d_inode->i_rdev) { -+ struct irctl *ir; -+ ir = irctls[iminor(file->f_dentry->d_inode)]; -+ data = ir->d.data; -+ } -+ -+ return data; -+} -+EXPORT_SYMBOL(lirc_get_pdata); -+ -+ -+ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, -+ size_t length, loff_t *ppos) -+{ -+ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; -+ -+ dprintk(LOGHEAD "write called\n", ir->d.name, ir->d.minor); -+ -+ if (!ir->attached) -+ return -ENODEV; -+ -+ return -EINVAL; -+} -+EXPORT_SYMBOL(lirc_dev_fop_write); -+ -+ -+static int __init lirc_dev_init(void) -+{ -+ int retval; -+ -+ lirc_class = class_create(THIS_MODULE, "lirc"); -+ if (IS_ERR(lirc_class)) { -+ retval = PTR_ERR(lirc_class); -+ printk(KERN_ERR "lirc_dev: class_create failed\n"); -+ goto error; -+ } -+ -+ retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES, IRCTL_DEV_NAME); -+ if (retval) { -+ class_destroy(lirc_class); -+ printk(KERN_ERR "lirc_dev: alloc_chrdev_region failed\n"); -+ goto error; -+ } -+ -+ -+ printk(KERN_INFO "lirc_dev: IR Remote Control driver registered, " -+ "major %d \n", MAJOR(lirc_base_dev)); -+ -+error: -+ return retval; -+} -+ -+ -+ -+static void __exit lirc_dev_exit(void) -+{ -+ class_destroy(lirc_class); -+ dprintk("lirc_dev: module unloaded\n"); -+} -+ -+module_init(lirc_dev_init); -+module_exit(lirc_dev_exit); -+ -+MODULE_DESCRIPTION("LIRC base driver module"); -+MODULE_AUTHOR("Artur Lipowski"); -+MODULE_LICENSE("GPL"); -+ -+module_param(debug, bool, 0644); -+MODULE_PARM_DESC(debug, "Enable debugging messages"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_dev.h linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_dev.h ---- linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_dev.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_dev.h 2011-06-22 13:19:32.693063278 +0200 -@@ -0,0 +1,178 @@ -+/* -+ * LIRC base driver -+ * -+ * by Artur Lipowski -+ * This code is licensed under GNU GPL -+ * -+ */ -+ -+#ifndef _LINUX_LIRC_DEV_H -+#define _LINUX_LIRC_DEV_H -+ -+#define MAX_IRCTL_DEVICES 4 -+#define BUFLEN 16 -+ -+#define mod(n, div) ((n) % (div)) -+ -+#include -+#include -+#include -+#include -+#include -+ -+struct lirc_buffer { -+ wait_queue_head_t wait_poll; -+ spinlock_t lock; -+ unsigned int chunk_size; -+ unsigned int size; /* in chunks */ -+ /* Using chunks instead of bytes pretends to simplify boundary checking -+ * And should allow for some performance fine tunning later */ -+ struct kfifo *fifo; -+}; -+static void lirc_buffer_clear(struct lirc_buffer *buf) -+{ -+ if (buf->fifo) -+ kfifo_reset(buf->fifo); -+ else -+ WARN(1, "calling lirc_buffer_clear on an uninitialized lirc_buffer\n"); -+} -+static int lirc_buffer_init(struct lirc_buffer *buf, -+ unsigned int chunk_size, -+ unsigned int size) -+{ -+ init_waitqueue_head(&buf->wait_poll); -+ spin_lock_init(&buf->lock); -+ buf->chunk_size = chunk_size; -+ buf->size = size; -+ buf->fifo = kfifo_alloc(size*chunk_size, GFP_KERNEL, &buf->lock); -+ if (!buf->fifo) -+ return -ENOMEM; -+ return 0; -+} -+static void lirc_buffer_free(struct lirc_buffer *buf) -+{ -+ if (buf->fifo) -+ kfifo_free(buf->fifo); -+ else -+ WARN(1, "calling lirc_buffer_free on an uninitialized lirc_buffer\n"); -+} -+static int lirc_buffer_full(struct lirc_buffer *buf) -+{ -+ return kfifo_len(buf->fifo) == buf->fifo->size; -+} -+static int lirc_buffer_empty(struct lirc_buffer *buf) -+{ -+ return !kfifo_len(buf->fifo); -+} -+static int lirc_buffer_available(struct lirc_buffer *buf) -+{ -+ return (buf->size - kfifo_len(buf->fifo)) / buf->chunk_size; -+} -+ -+static void lirc_buffer_read(struct lirc_buffer *buf, -+ unsigned char *dest) -+{ -+ if (kfifo_len(buf->fifo) > buf->chunk_size) -+ kfifo_get(buf->fifo, dest, buf->chunk_size); -+} -+static void lirc_buffer_write(struct lirc_buffer *buf, -+ unsigned char *orig) -+{ -+ kfifo_put(buf->fifo, orig, buf->chunk_size); -+} -+ -+struct lirc_driver { -+ char name[40]; -+ int minor; -+ int code_length; -+ int sample_rate; -+ unsigned long features; -+ -+ unsigned int chunk_size; -+ unsigned int buffer_size; /* in chunks */ -+ -+ void *data; -+ int (*add_to_buf) (void *data, struct lirc_buffer *buf); -+ struct lirc_buffer *rbuf; -+ int (*set_use_inc) (void *data); -+ void (*set_use_dec) (void *data); -+ struct file_operations *fops; -+ struct device *dev; -+ struct module *owner; -+}; -+/* name: -+ * this string will be used for logs -+ * -+ * minor: -+ * indicates minor device (/dev/lirc) number for registered driver -+ * if caller fills it with negative value, then the first free minor -+ * number will be used (if available) -+ * -+ * code_length: -+ * length of the remote control key code expressed in bits -+ * -+ * sample_rate: -+ * -+ * data: -+ * it may point to any driver data and this pointer will be passed to -+ * all callback functions -+ * -+ * add_to_buf: -+ * add_to_buf will be called after specified period of the time or -+ * triggered by the external event, this behavior depends on value of -+ * the sample_rate this function will be called in user context. This -+ * routine should return 0 if data was added to the buffer and -+ * -ENODATA if none was available. This should add some number of bits -+ * evenly divisible by code_length to the buffer -+ * -+ * rbuf: -+ * if not NULL, it will be used as a read buffer, you will have to -+ * write to the buffer by other means, like irq's (see also -+ * lirc_serial.c). -+ * -+ * set_use_inc: -+ * set_use_inc will be called after device is opened -+ * -+ * set_use_dec: -+ * set_use_dec will be called after device is closed -+ * -+ * fops: -+ * file_operations for drivers which don't fit the current driver model. -+ * -+ * owner: -+ * the module owning this struct -+ * -+ */ -+ -+ -+/* following functions can be called ONLY from user context -+ * -+ * returns negative value on error or minor number -+ * of the registered device if success -+ * contents of the structure pointed by p is copied -+ */ -+extern int lirc_register_driver(struct lirc_driver *d); -+ -+/* returns negative value on error or 0 if success -+*/ -+extern int lirc_unregister_driver(int minor); -+ -+/* Returns the private data stored in the lirc_driver -+ * associated with the given device file pointer. -+ */ -+void *lirc_get_pdata(struct file *file); -+ -+/* default file operations -+ * used by drivers if they override only some operations -+ */ -+int lirc_dev_fop_open(struct inode *inode, struct file *file); -+int lirc_dev_fop_close(struct inode *inode, struct file *file); -+unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait); -+int lirc_dev_fop_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg); -+ssize_t lirc_dev_fop_read(struct file *file, char *buffer, size_t length, -+ loff_t *ppos); -+ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, size_t length, -+ loff_t *ppos); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/lirc/lirc.h linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc.h ---- linux-omap-2.6.28-omap1/drivers/input/lirc/lirc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc.h 2011-06-22 13:19:32.693063278 +0200 -@@ -0,0 +1,100 @@ -+/* -+ * lirc.h - linux infrared remote control header file -+ * last modified 2007/09/27 -+ */ -+ -+#ifndef _LINUX_LIRC_H -+#define _LINUX_LIRC_H -+ -+#include -+#include -+ -+#define PULSE_BIT 0x01000000 -+#define PULSE_MASK 0x00FFFFFF -+ -+/*** lirc compatible hardware features ***/ -+ -+#define LIRC_MODE2SEND(x) (x) -+#define LIRC_SEND2MODE(x) (x) -+#define LIRC_MODE2REC(x) ((x) << 16) -+#define LIRC_REC2MODE(x) ((x) >> 16) -+ -+#define LIRC_MODE_RAW 0x00000001 -+#define LIRC_MODE_PULSE 0x00000002 -+#define LIRC_MODE_MODE2 0x00000004 -+#define LIRC_MODE_CODE 0x00000008 -+#define LIRC_MODE_LIRCCODE 0x00000010 -+#define LIRC_MODE_STRING 0x00000020 -+ -+ -+#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) -+#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) -+#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) -+#define LIRC_CAN_SEND_CODE LIRC_MODE2SEND(LIRC_MODE_CODE) -+#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) -+#define LIRC_CAN_SEND_STRING LIRC_MODE2SEND(LIRC_MODE_STRING) -+ -+#define LIRC_CAN_SEND_MASK 0x0000003f -+ -+#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 -+#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 -+#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 -+ -+#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) -+#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) -+#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) -+#define LIRC_CAN_REC_CODE LIRC_MODE2REC(LIRC_MODE_CODE) -+#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) -+#define LIRC_CAN_REC_STRING LIRC_MODE2REC(LIRC_MODE_STRING) -+ -+#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) -+ -+#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) -+#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) -+ -+#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 -+#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 -+#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 -+ -+#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) -+#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) -+ -+#define LIRC_CAN_NOTIFY_DECODE 0x01000000 -+ -+/*** IOCTL commands for lirc driver ***/ -+ -+#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) -+ -+#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) -+#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) -+#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, __u32) -+#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, __u32) -+#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, __u32) -+#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, __u32) -+#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) -+ -+/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ -+#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) -+ -+#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) -+#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) -+/* Note: these can reset the according pulse_width */ -+#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) -+#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) -+#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) -+#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, __u32) -+#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) -+ -+/* -+ * to set a range use -+ * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the -+ * lower bound first and later -+ * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound -+ */ -+ -+#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, __u32) -+#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) -+ -+#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_rx51.c linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_rx51.c ---- linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_rx51.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_rx51.c 2011-06-22 13:19:32.693063278 +0200 -@@ -0,0 +1,484 @@ -+/* -+ * Copyright (C) 2008 Nokia Corporation -+ * Contact: Timo Kokkonen -+ * -+ * Based on lirc_serial.c -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "lirc.h" -+#include "lirc_dev.h" -+#include "lirc_rx51.h" -+ -+#define LIRC_RX51_DRIVER_FEATURES (LIRC_CAN_SET_SEND_DUTY_CYCLE | \ -+ LIRC_CAN_SET_SEND_CARRIER | \ -+ LIRC_CAN_SEND_PULSE) -+ -+#define DRIVER_NAME "lirc_rx51" -+ -+#define WBUF_LEN 256 -+ -+#define TIMER_MAX_VALUE 0xffffffff -+ -+struct lirc_rx51 { -+ struct omap_dm_timer *pwm_timer; -+ struct omap_dm_timer *pulse_timer; -+ struct device *dev; -+ struct lirc_rx51_platform_data *pdata; -+ wait_queue_head_t wqueue; -+ -+ unsigned long fclk_khz; -+ unsigned int freq; /* carrier frequency */ -+ unsigned int duty_cycle; /* carrier duty cycle */ -+ unsigned int irq_num; -+ unsigned int match; -+ int wbuf[WBUF_LEN]; -+ int wbuf_index; -+ unsigned long device_is_open; -+ unsigned int pwm_timer_num; -+}; -+ -+static void lirc_rx51_on(struct lirc_rx51 *lirc_rx51) -+{ -+ omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1, -+ OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE); -+} -+ -+static void lirc_rx51_off(struct lirc_rx51 *lirc_rx51) -+{ -+ omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1, -+ OMAP_TIMER_TRIGGER_NONE); -+} -+ -+static int init_timing_params(struct lirc_rx51 *lirc_rx51) -+{ -+ u32 load, match; -+ -+ load = -(lirc_rx51->fclk_khz * 1000 / lirc_rx51->freq); -+ match = -(lirc_rx51->duty_cycle * -load / 100); -+ omap_dm_timer_set_load(lirc_rx51->pwm_timer, 1, load); -+ omap_dm_timer_set_match(lirc_rx51->pwm_timer, 1, match); -+ omap_dm_timer_write_counter(lirc_rx51->pwm_timer, TIMER_MAX_VALUE - 2); -+ omap_dm_timer_start(lirc_rx51->pwm_timer); -+ omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0); -+ omap_dm_timer_start(lirc_rx51->pulse_timer); -+ -+ lirc_rx51->match = 0; -+ -+ return 0; -+} -+ -+#define tics_after(a, b) ((long)(b) - (long)(a) < 0) -+ -+static int pulse_timer_set_timeout(struct lirc_rx51 *lirc_rx51, int usec) -+{ -+ int counter; -+ -+ BUG_ON(usec < 0); -+ -+ if (lirc_rx51->match == 0) -+ counter = omap_dm_timer_read_counter(lirc_rx51->pulse_timer); -+ else -+ counter = lirc_rx51->match; -+ -+ counter += (u32)(lirc_rx51->fclk_khz * usec / (1000)); -+ omap_dm_timer_set_match(lirc_rx51->pulse_timer, 1, counter); -+ omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, -+ OMAP_TIMER_INT_MATCH); -+ if (tics_after(omap_dm_timer_read_counter(lirc_rx51->pulse_timer), -+ counter)) { -+ return 1; -+ } -+ return 0; -+} -+ -+static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr) -+{ -+ unsigned int retval; -+ struct lirc_rx51 *lirc_rx51 = ptr; -+ -+ retval = omap_dm_timer_read_status(lirc_rx51->pulse_timer); -+ if (!retval) -+ return IRQ_NONE; -+ -+ if ((retval & ~OMAP_TIMER_INT_MATCH)) -+ if (unlikely(!printk_ratelimit())) -+ dev_err(lirc_rx51->dev, -+ ": Unexpected interrupt source: %x\n", retval); -+ -+ omap_dm_timer_write_status(lirc_rx51->pulse_timer, 7); -+ if (lirc_rx51->wbuf_index < 0) { -+ if (unlikely(!printk_ratelimit())) -+ dev_err(lirc_rx51->dev, -+ ": BUG wbuf_index has value of %i\n", -+ lirc_rx51->wbuf_index); -+ goto end; -+ } -+ -+ /* -+ * If we happend to hit an odd latency spike, loop through the -+ * pulses until we catch up. -+ */ -+ do { -+ if (lirc_rx51->wbuf_index >= WBUF_LEN) -+ goto end; -+ if (lirc_rx51->wbuf[lirc_rx51->wbuf_index] == -1) -+ goto end; -+ -+ if (lirc_rx51->wbuf_index % 2) -+ lirc_rx51_off(lirc_rx51); -+ else -+ lirc_rx51_on(lirc_rx51); -+ -+ retval = pulse_timer_set_timeout(lirc_rx51, -+ lirc_rx51->wbuf[lirc_rx51->wbuf_index]); -+ lirc_rx51->wbuf_index++; -+ -+ } while (retval); -+ -+ return IRQ_HANDLED; -+end: -+ /* Stop TX here */ -+ lirc_rx51_off(lirc_rx51); -+ lirc_rx51->wbuf_index = -1; -+ omap_dm_timer_stop(lirc_rx51->pulse_timer); -+ omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0); -+ wake_up_interruptible(&lirc_rx51->wqueue); -+ -+ return IRQ_HANDLED; -+} -+ -+static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51) -+{ -+ struct clk *clk_fclk; -+ int retval, pwm_timer = lirc_rx51->pwm_timer_num; -+ -+ lirc_rx51->pwm_timer = omap_dm_timer_request_specific(pwm_timer); -+ if (lirc_rx51->pwm_timer == NULL) { -+ dev_err(lirc_rx51->dev, ": Error requesting GPT%d timer\n", -+ pwm_timer); -+ return -EBUSY; -+ } -+ -+ lirc_rx51->pulse_timer = omap_dm_timer_request(); -+ if (lirc_rx51->pulse_timer == NULL) { -+ dev_err(lirc_rx51->dev, ": Error requesting pulse timer\n"); -+ retval = -EBUSY; -+ goto err1; -+ } -+ -+ lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer); -+ retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler, -+ IRQF_DISABLED | IRQF_SHARED, -+ "lirc_pulse_timer", lirc_rx51); -+ if (retval) { -+ dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n"); -+ goto err2; -+ } -+ -+ omap_dm_timer_set_source(lirc_rx51->pwm_timer, OMAP_TIMER_SRC_SYS_CLK); -+ omap_dm_timer_set_source(lirc_rx51->pulse_timer, -+ OMAP_TIMER_SRC_SYS_CLK); -+ clk_fclk = omap_dm_timer_get_fclk(lirc_rx51->pwm_timer); -+ lirc_rx51->fclk_khz = clk_fclk->rate/1000; -+ -+ return 0; -+ -+err2: -+ omap_dm_timer_free(lirc_rx51->pulse_timer); -+err1: -+ omap_dm_timer_free(lirc_rx51->pwm_timer); -+ -+ return retval; -+} -+ -+static int lirc_rx51_free_port(struct lirc_rx51 *lirc_rx51) -+{ -+ omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0); -+ free_irq(lirc_rx51->irq_num, lirc_rx51); -+ lirc_rx51_off(lirc_rx51); -+ omap_dm_timer_free(lirc_rx51->pwm_timer); -+ omap_dm_timer_free(lirc_rx51->pulse_timer); -+ lirc_rx51->wbuf_index = -1; -+ -+ return 0; -+} -+ -+static ssize_t lirc_rx51_write(struct file *file, const char *buf, -+ size_t n, loff_t *ppos) -+{ -+ int count, i; -+ struct lirc_rx51 *lirc_rx51 = file->private_data; -+ -+ if (n % sizeof(int)) -+ return -EINVAL; -+ -+ count = n / sizeof(int); -+ if ((count > WBUF_LEN) || (count % 2 == 0)) -+ return -EINVAL; -+ -+ /* Wait any pending transfers to finish */ -+ wait_event_interruptible(lirc_rx51->wqueue, lirc_rx51->wbuf_index < 0); -+ -+ if (copy_from_user(lirc_rx51->wbuf, buf, n)) -+ return -EFAULT; -+ -+ /* Sanity check the input pulses */ -+ for (i = 0; i < count; i++) -+ if (lirc_rx51->wbuf[i] < 0) -+ return -EINVAL; -+ -+ init_timing_params(lirc_rx51); -+ if (count < WBUF_LEN) -+ lirc_rx51->wbuf[count] = -1; /* Insert termination mark */ -+ -+ /* -+ * Adjust latency requirements so the device doesn't go in too -+ * deep sleep states -+ */ -+ lirc_rx51->pdata->set_max_mpu_wakeup_lat(lirc_rx51->dev, 50); -+ -+ lirc_rx51_on(lirc_rx51); -+ lirc_rx51->wbuf_index = 1; -+ pulse_timer_set_timeout(lirc_rx51, lirc_rx51->wbuf[0]); -+ -+ /* -+ * Don't return back to the userspace until the transfer has -+ * finished -+ */ -+ wait_event_interruptible(lirc_rx51->wqueue, lirc_rx51->wbuf_index < 0); -+ -+ /* We can sleep again */ -+ lirc_rx51->pdata->set_max_mpu_wakeup_lat(lirc_rx51->dev, -1); -+ -+ return n; -+} -+ -+static int lirc_rx51_ioctl(struct inode *node, struct file *filep, -+ unsigned int cmd, unsigned long arg) -+{ -+ int result; -+ unsigned long value; -+ unsigned int ivalue; -+ struct lirc_rx51 *lirc_rx51 = filep->private_data; -+ -+ switch (cmd) { -+ case LIRC_GET_SEND_MODE: -+ result = put_user(LIRC_MODE_PULSE, (unsigned long *)arg); -+ if (result) -+ return result; -+ break; -+ -+ case LIRC_SET_SEND_MODE: -+ result = get_user(value, (unsigned long *)arg); -+ if (result) -+ return result; -+ -+ /* only LIRC_MODE_PULSE supported */ -+ if (value != LIRC_MODE_PULSE) -+ return -ENOSYS; -+ break; -+ -+ case LIRC_GET_REC_MODE: -+ result = put_user(0, (unsigned long *) arg); -+ if (result) -+ return result; -+ break; -+ -+ case LIRC_GET_LENGTH: -+ return -ENOSYS; -+ break; -+ -+ case LIRC_SET_SEND_DUTY_CYCLE: -+ result = get_user(ivalue, (unsigned int *) arg); -+ if (result) -+ return result; -+ -+ if (ivalue <= 0 || ivalue > 100) { -+ dev_err(lirc_rx51->dev, ": invalid duty cycle %d\n", -+ ivalue); -+ return -EINVAL; -+ } -+ -+ lirc_rx51->duty_cycle = ivalue; -+ break; -+ -+ case LIRC_SET_SEND_CARRIER: -+ result = get_user(ivalue, (unsigned int *) arg); -+ if (result) -+ return result; -+ -+ if (ivalue > 500000 || ivalue < 20000) { -+ dev_err(lirc_rx51->dev, ": invalid carrier freq %d\n", -+ ivalue); -+ return -EINVAL; -+ } -+ -+ lirc_rx51->freq = ivalue; -+ break; -+ -+ case LIRC_GET_FEATURES: -+ result = put_user(LIRC_RX51_DRIVER_FEATURES, -+ (unsigned long *) arg); -+ if (result) -+ return result; -+ break; -+ -+ default: -+ return -ENOIOCTLCMD; -+ break; -+ } -+ -+ return 0; -+} -+ -+static int lirc_rx51_open(struct inode *inode, struct file *file) -+{ -+ struct lirc_rx51 *lirc_rx51 = lirc_get_pdata(file); -+ BUG_ON(!lirc_rx51); -+ -+ file->private_data = lirc_rx51; -+ -+ if (test_and_set_bit(1, &lirc_rx51->device_is_open)) -+ return -EBUSY; -+ -+ return lirc_rx51_init_port(lirc_rx51); -+} -+ -+static int lirc_rx51_release(struct inode *inode, struct file *file) -+{ -+ struct lirc_rx51 *lirc_rx51 = file->private_data; -+ -+ lirc_rx51_free_port(lirc_rx51); -+ -+ lirc_rx51->device_is_open = 0; -+ -+ return 0; -+} -+ -+static struct lirc_rx51 lirc_rx51 = { -+ .freq = 38000, -+ .duty_cycle = 50, -+ .wbuf_index = -1, -+}; -+ -+static struct file_operations lirc_fops = { -+ .owner = THIS_MODULE, -+ .write = lirc_rx51_write, -+ .ioctl = lirc_rx51_ioctl, -+ .read = lirc_dev_fop_read, -+ .poll = lirc_dev_fop_poll, -+ .open = lirc_rx51_open, -+ .release = lirc_rx51_release, -+}; -+ -+static struct lirc_driver lirc_rx51_driver = { -+ .name = DRIVER_NAME, -+ .minor = -1, -+ .code_length = 1, -+ .data = &lirc_rx51, -+ .fops = &lirc_fops, -+ .owner = THIS_MODULE, -+}; -+ -+#ifdef CONFIG_PM -+ -+static int lirc_rx51_suspend(struct platform_device *dev, pm_message_t state) -+{ -+ if (lirc_rx51.device_is_open) -+ lirc_rx51_free_port(&lirc_rx51); -+ -+ return 0; -+} -+ -+static int lirc_rx51_resume(struct platform_device *dev) -+{ -+ if (lirc_rx51.device_is_open) -+ return lirc_rx51_init_port(&lirc_rx51); -+ -+ return 0; -+} -+ -+#else -+ -+#define lirc_rx51_suspend NULL -+#define lirc_rx51_resume NULL -+ -+#endif /* CONFIG_PM */ -+ -+static int __init lirc_rx51_probe(struct platform_device *dev) -+{ -+ lirc_rx51_driver.features = LIRC_RX51_DRIVER_FEATURES; -+ lirc_rx51.pdata = dev->dev.platform_data; -+ lirc_rx51.pwm_timer_num = lirc_rx51.pdata->pwm_timer; -+ lirc_rx51.dev = &dev->dev; -+ lirc_rx51_driver.minor = lirc_register_driver(&lirc_rx51_driver); -+ init_waitqueue_head(&lirc_rx51.wqueue); -+ -+ if (lirc_rx51_driver.minor < 0) { -+ dev_err(lirc_rx51.dev, ": lirc_register_driver failed: %d\n", -+ lirc_rx51_driver.minor); -+ return lirc_rx51_driver.minor; -+ } -+ dev_info(lirc_rx51.dev, "registration ok, minor: %d, pwm: %d\n", -+ lirc_rx51_driver.minor, lirc_rx51.pwm_timer_num); -+ -+ return 0; -+} -+ -+static int __exit lirc_rx51_remove(struct platform_device *dev) -+{ -+ return lirc_unregister_driver(lirc_rx51_driver.minor); -+} -+ -+struct platform_driver lirc_rx51_platform_driver = { -+ .probe = lirc_rx51_probe, -+ .remove = __exit_p(lirc_rx51_remove), -+ .suspend = lirc_rx51_suspend, -+ .resume = lirc_rx51_resume, -+ .remove = __exit_p(lirc_rx51_remove), -+ .driver = { -+ .name = DRIVER_NAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init lirc_rx51_init(void) -+{ -+ return platform_driver_register(&lirc_rx51_platform_driver); -+} -+module_init(lirc_rx51_init); -+ -+static void __exit lirc_rx51_exit(void) -+{ -+ platform_driver_unregister(&lirc_rx51_platform_driver); -+} -+module_exit(lirc_rx51_exit); -+ -+MODULE_DESCRIPTION("LIRC TX driver for Nokia RX51"); -+MODULE_AUTHOR("Nokia Corporation"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_rx51.h linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_rx51.h ---- linux-omap-2.6.28-omap1/drivers/input/lirc/lirc_rx51.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/input/lirc/lirc_rx51.h 2011-06-22 13:19:32.693063278 +0200 -@@ -0,0 +1,10 @@ -+#ifndef _LIRC_RX51_H -+#define _LIRC_RX51_H -+ -+struct lirc_rx51_platform_data { -+ int pwm_timer; -+ -+ void(*set_max_mpu_wakeup_lat)(struct device *dev, long t); -+}; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/lirc/Makefile linux-omap-2.6.28-nokia1/drivers/input/lirc/Makefile ---- linux-omap-2.6.28-omap1/drivers/input/lirc/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/input/lirc/Makefile 2011-06-22 13:19:32.693063278 +0200 -@@ -0,0 +1,7 @@ -+# Makefile for the lirc drivers. -+# -+ -+# Each configuration option enables a list of files. -+ -+obj-$(CONFIG_LIRC_DEV) += lirc_dev.o -+obj-$(CONFIG_LIRC_RX51) += lirc_rx51.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/Makefile linux-omap-2.6.28-nokia1/drivers/input/Makefile ---- linux-omap-2.6.28-omap1/drivers/input/Makefile 2011-06-22 13:14:18.143067748 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/input/Makefile 2011-06-22 13:19:32.673063277 +0200 -@@ -25,3 +25,5 @@ obj-$(CONFIG_INPUT_MISC) += misc/ - obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o - - obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o -+ -+obj-$(CONFIG_INPUT_LIRC) += lirc/ -diff -Nurp linux-omap-2.6.28-omap1/drivers/input/touchscreen/tsc2005.c linux-omap-2.6.28-nokia1/drivers/input/touchscreen/tsc2005.c ---- linux-omap-2.6.28-omap1/drivers/input/touchscreen/tsc2005.c 2011-06-22 13:14:18.173067748 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/input/touchscreen/tsc2005.c 2011-06-22 13:19:32.703063276 +0200 -@@ -27,7 +27,6 @@ - #include - #include - #include --#include - #include - - #include -@@ -37,12 +36,12 @@ - * - * Initialize: - * Request access to GPIO103 (DAV) -- * tsc2005_dav_irq_handler will trigger when DAV line goes down -+ * tsc2005_ts_irq_handler will trigger when DAV line goes down - * - * 1) Pen is pressed against touchscreeen - * 2) TSC2005 performs AD conversion - * 3) After the conversion is done TSC2005 drives DAV line down -- * 4) GPIO IRQ is received and tsc2005_dav_irq_handler is called -+ * 4) GPIO IRQ is received and tsc2005_ts_irq_handler is called - * 5) tsc2005_ts_irq_handler queues up an spi transfer to fetch - * the x, y, z1, z2 values - * 6) tsc2005_ts_rx() reports coordinates to input layer and -@@ -77,8 +76,8 @@ - #define TSC2005_CMD_AUX_CONT (8 << 3) - #define TSC2005_CMD_TEST_X_CONN (9 << 3) - #define TSC2005_CMD_TEST_Y_CONN (10 << 3) --/* command 11 reserved */ --#define TSC2005_CMD_TEST_SHORT (12 << 3) -+#define TSC2005_CMD_TEST_SHORT (11 << 3) -+/* command 12 reserved, according to 2008-03 erratum */ - #define TSC2005_CMD_DRIVE_XX (13 << 3) - #define TSC2005_CMD_DRIVE_YY (14 << 3) - #define TSC2005_CMD_DRIVE_YX (15 << 3) -@@ -136,6 +135,9 @@ - TSC2005_CFR0_PRECHARGE_276US | \ - TSC2005_CFR0_PENMODE) - -+/* Bits common to both read and write of config register 0 */ -+#define TSC2005_CFR0_RW_MASK 0x3fff -+ - #define TSC2005_CFR1_BATCHDELAY_0MS (0x0000) - #define TSC2005_CFR1_BATCHDELAY_1MS (0x0001) - #define TSC2005_CFR1_BATCHDELAY_2MS (0x0002) -@@ -145,7 +147,7 @@ - #define TSC2005_CFR1_BATCHDELAY_40MS (0x0006) - #define TSC2005_CFR1_BATCHDELAY_100MS (0x0007) - --#define TSC2005_CFR1_INITVALUE (TSC2005_CFR1_BATCHDELAY_2MS) -+#define TSC2005_CFR1_INITVALUE (TSC2005_CFR1_BATCHDELAY_4MS) - - #define TSC2005_CFR2_MAVE_TEMP (0x0001) - #define TSC2005_CFR2_MAVE_AUX (0x0002) -@@ -160,11 +162,12 @@ - #define TSC2005_CFR2_MEDIUM_7 (0x2000) - #define TSC2005_CFR2_MEDIUM_15 (0x3000) - -+#define TSC2005_CFR2_IRQ_MASK (0xC000) - #define TSC2005_CFR2_IRQ_DAV (0x4000) - #define TSC2005_CFR2_IRQ_PEN (0x8000) - #define TSC2005_CFR2_IRQ_PENDAV (0x0000) - --#define TSC2005_CFR2_INITVALUE (TSC2005_CFR2_IRQ_DAV | \ -+#define TSC2005_CFR2_INITVALUE (TSC2005_CFR2_IRQ_PENDAV | \ - TSC2005_CFR2_MAVE_X | \ - TSC2005_CFR2_MAVE_Y | \ - TSC2005_CFR2_MAVE_Z | \ -@@ -173,8 +176,7 @@ - - #define MAX_12BIT ((1 << 12) - 1) - #define TS_SAMPLES 4 --#define TS_RECT_SIZE 8 --#define TSC2005_TS_PENUP_TIME 20 -+#define TSC2005_TS_PENUP_TIME 40 - - static const u32 tsc2005_read_reg[] = { - (TSC2005_REG | TSC2005_REG_X | TSC2005_REG_READ) << 16, -@@ -190,6 +192,16 @@ struct tsc2005 { - struct input_dev *idev; - char phys[32]; - struct timer_list penup_timer; -+ -+ /* ESD recovery via a hardware reset if the tsc2005 -+ * doesn't respond after a configurable period (in ms) of -+ * IRQ/SPI inactivity. If esd_timeout is 0, timer and work -+ * fields are used. -+ */ -+ u32 esd_timeout; -+ struct timer_list esd_timer; -+ struct work_struct esd_work; -+ - spinlock_t lock; - struct mutex mutex; - -@@ -197,10 +209,19 @@ struct tsc2005 { - struct spi_transfer read_xfer[NUM_READ_REGS]; - u32 data[NUM_READ_REGS]; - -- /* previous x,y,z */ -- int x; -- int y; -- int p; -+ /* previously reported x,y,p (if pen_down) */ -+ int out_x; -+ int out_y; -+ int out_p; -+ /* fudge parameters - changes must exceed one of these. */ -+ int fudge_x; -+ int fudge_y; -+ int fudge_p; -+ /* raw copy of previous x,y,z */ -+ int in_x; -+ int in_y; -+ int in_z1; -+ int in_z2; - /* average accumulators for each component */ - int sample_cnt; - int avg_x; -@@ -213,19 +234,19 @@ struct tsc2005 { - int stab_time; - int p_max; - int touch_pressure; -- int irq; -- s16 dav_gpio; - /* status */ - u8 sample_sent; - u8 pen_down; - u8 disabled; - u8 disable_depth; -- u8 spi_active; -+ u8 spi_pending; -+ -+ void (*set_reset)(bool enable); - }; - - static void tsc2005_cmd(struct tsc2005 *ts, u8 cmd) - { -- u16 data = TSC2005_CMD | TSC2005_CMD_12BIT | cmd; -+ u8 data = TSC2005_CMD | TSC2005_CMD_12BIT | cmd; - struct spi_message msg; - struct spi_transfer xfer = { 0 }; - -@@ -259,6 +280,26 @@ static void tsc2005_write(struct tsc2005 - spi_sync(ts->spi, &msg); - } - -+static void tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value) -+{ -+ u32 tx; -+ u32 rx = 0; -+ struct spi_message msg; -+ struct spi_transfer xfer = { 0 }; -+ -+ tx = (TSC2005_REG | reg | TSC2005_REG_READ) << 16; -+ -+ xfer.tx_buf = &tx; -+ xfer.rx_buf = ℞ -+ xfer.len = 4; -+ xfer.bits_per_word = 24; -+ -+ spi_message_init(&msg); -+ spi_message_add_tail(&xfer, &msg); -+ spi_sync(ts->spi, &msg); -+ *value = rx; -+} -+ - static void tsc2005_ts_update_pen_state(struct tsc2005 *ts, - int x, int y, int pressure) - { -@@ -294,6 +335,11 @@ static void tsc2005_ts_rx(void *arg) - - spin_lock_irqsave(&ts->lock, flags); - -+ if (ts->disable_depth) { -+ ts->spi_pending = 0; -+ goto out; -+ } -+ - x = ts->data[0]; - y = ts->data[1]; - z1 = ts->data[2]; -@@ -304,9 +350,27 @@ static void tsc2005_ts_rx(void *arg) - goto out; - - /* skip coords if the pressure-components are out of range */ -- if (z1 < 100 || z2 > 4000) -+ if (z1 < 100 || z2 > MAX_12BIT || z1 >= z2) - goto out; - -+ /* skip point if this is a pen down with the exact same values as -+ * the value before pen-up - that implies SPI fed us stale data -+ */ -+ if (!ts->pen_down && -+ ts->in_x == x && -+ ts->in_y == y && -+ ts->in_z1 == z1 && -+ ts->in_z2 == z2) -+ goto out; -+ -+ /* At this point we are happy we have a valid and useful reading. -+ * Remember it for later comparisons. We may now begin downsampling -+ */ -+ ts->in_x = x; -+ ts->in_y = y; -+ ts->in_z1 = z1; -+ ts->in_z2 = z2; -+ - /* don't run average on the "pen down" event */ - if (ts->sample_sent) { - ts->avg_x += x; -@@ -329,48 +393,70 @@ static void tsc2005_ts_rx(void *arg) - ts->avg_z1 = 0; - ts->avg_z2 = 0; - -- if (z1) { -- pressure = x * (z2 - z1) / z1; -- pressure = pressure * ts->x_plate_ohm / 4096; -- } else -- goto out; -+ pressure = x * (z2 - z1) / z1; -+ pressure = pressure * ts->x_plate_ohm / 4096; - - pressure_limit = ts->sample_sent? ts->p_max: ts->touch_pressure; - if (pressure > pressure_limit) - goto out; - -- /* discard the event if it still is within the previous rect - unless -- * if the pressure is harder, but then use previous x,y position */ -+ /* Discard the event if it still is within the previous rect - -+ * unless the pressure is clearly harder, but then use previous -+ * x,y position. If any coordinate deviates enough, fudging -+ * of all three will still take place in the input layer. -+ */ - inside_rect = (ts->sample_sent && -- x > (int)ts->x - TS_RECT_SIZE && -- x < (int)ts->x + TS_RECT_SIZE && -- y > (int)ts->y - TS_RECT_SIZE && -- y < (int)ts->y + TS_RECT_SIZE); -+ x > (int)ts->out_x - ts->fudge_x && -+ x < (int)ts->out_x + ts->fudge_x && -+ y > (int)ts->out_y - ts->fudge_y && -+ y < (int)ts->out_y + ts->fudge_y); - if (inside_rect) -- x = ts->x, y = ts->y; -+ x = ts->out_x, y = ts->out_y; - -- if (!inside_rect || pressure < ts->p) { -+ if (!inside_rect || pressure < (ts->out_p - ts->fudge_p)) { - tsc2005_ts_update_pen_state(ts, x, y, pressure); - ts->sample_sent = 1; -- ts->x = x; -- ts->y = y; -- ts->p = pressure; -+ ts->out_x = x; -+ ts->out_y = y; -+ ts->out_p = pressure; - } - out: -- ts->spi_active = 0; -- spin_unlock_irqrestore(&ts->lock, flags); -+ if (ts->spi_pending > 1) { -+ /* One or more interrupts (sometimes several dozens) -+ * occured while waiting for the SPI read - get -+ * another read going. -+ */ -+ ts->spi_pending = 1; -+ if (spi_async(ts->spi, &ts->read_msg)) { -+ dev_err(&ts->spi->dev, "ts: spi_async() failed"); -+ ts->spi_pending = 0; -+ } -+ } else -+ ts->spi_pending = 0; - - /* kick pen up timer - to make sure it expires again(!) */ -- if (ts->sample_sent) -+ if (ts->sample_sent) { - mod_timer(&ts->penup_timer, - jiffies + msecs_to_jiffies(TSC2005_TS_PENUP_TIME)); -+ /* Also kick the watchdog, as we still think we're alive */ -+ if (ts->esd_timeout && ts->disable_depth == 0) { -+ unsigned long wdj = msecs_to_jiffies(ts->esd_timeout); -+ mod_timer(&ts->esd_timer, round_jiffies(jiffies+wdj)); -+ } -+ } -+ spin_unlock_irqrestore(&ts->lock, flags); - } - -+/* This penup timer is very forgiving of delayed SPI reads. The -+ * (ESD) watchdog will rescue us if spi_pending remains set, unless -+ * we are enterring the disabled state. In that case we must just -+ * handle the pen up, and let disabling complete. -+ */ - static void tsc2005_ts_penup_timer_handler(unsigned long data) - { - struct tsc2005 *ts = (struct tsc2005 *)data; -- -- if (ts->sample_sent) { -+ if ((!ts->spi_pending || ts->disable_depth) && -+ ts->sample_sent) { - tsc2005_ts_update_pen_state(ts, 0, 0, 0); - ts->sample_sent = 0; - } -@@ -378,25 +464,38 @@ static void tsc2005_ts_penup_timer_handl - - /* - * This interrupt is called when pen is down and coordinates are -- * available. That is indicated by a falling edge on DAV line. -+ * available. That is indicated by a either: -+ * a) a rising edge on PINTDAV or (PENDAV mode) -+ * b) a falling edge on DAV line (DAV mode) -+ * depending on the setting of the IRQ bits in the CFR2 setting above. - */ - static irqreturn_t tsc2005_ts_irq_handler(int irq, void *dev_id) - { - struct tsc2005 *ts = dev_id; -- int r; -- -- if (ts->spi_active) -- return IRQ_HANDLED; -- -- ts->spi_active = 1; -- r = spi_async(ts->spi, &ts->read_msg); -- if (r) -- dev_err(&ts->spi->dev, "ts: spi_async() failed"); -+ if (ts->disable_depth) -+ goto out; - -- /* kick pen up timer */ -- mod_timer(&ts->penup_timer, -- jiffies + msecs_to_jiffies(TSC2005_TS_PENUP_TIME)); -+ if (!ts->spi_pending) { -+ if (spi_async(ts->spi, &ts->read_msg)) { -+ dev_err(&ts->spi->dev, "ts: spi_async() failed"); -+ goto out; -+ } -+ } -+ /* By shifting in 1s we can never wrap */ -+ ts->spi_pending = (ts->spi_pending<<1)+1; - -+ /* Kick pen up timer only if it's not been started yet. Strictly, -+ * it isn't even necessary to start it at all here, but doing so -+ * keeps an equivalence between pen state and timer state. -+ * The SPI read loop will keep pushing it into the future. -+ * If it times out with an SPI pending, it's ignored anyway. -+ */ -+ if (!timer_pending(&ts->penup_timer)) { -+ unsigned long pu = msecs_to_jiffies(TSC2005_TS_PENUP_TIME); -+ ts->penup_timer.expires = jiffies + pu; -+ add_timer(&ts->penup_timer); -+ } -+out: - return IRQ_HANDLED; - } - -@@ -425,31 +524,31 @@ static ssize_t tsc2005_ts_pen_down_show( - struct device_attribute *attr, - char *buf) - { -- struct tsc2005 *tsc = dev_get_drvdata(dev); -+ struct tsc2005 *ts = dev_get_drvdata(dev); - -- return sprintf(buf, "%u\n", tsc->pen_down); -+ return sprintf(buf, "%u\n", ts->pen_down); - } - - static DEVICE_ATTR(pen_down, S_IRUGO, tsc2005_ts_pen_down_show, NULL); - --static int tsc2005_configure(struct tsc2005 *tsc, int flags) -+static int tsc2005_configure(struct tsc2005 *ts, int flags) - { -- tsc2005_write(tsc, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE); -- tsc2005_write(tsc, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE); -- tsc2005_write(tsc, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE); -- tsc2005_cmd(tsc, flags); -+ tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE); -+ tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE); -+ tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE); -+ tsc2005_cmd(ts, flags); - - return 0; - } - --static void tsc2005_start_scan(struct tsc2005 *tsc) -+static void tsc2005_start_scan(struct tsc2005 *ts) - { -- tsc2005_configure(tsc, TSC2005_CMD_SCAN_XYZZ); -+ tsc2005_configure(ts, TSC2005_CMD_SCAN_XYZZ); - } - --static void tsc2005_stop_scan(struct tsc2005 *tsc) -+static void tsc2005_stop_scan(struct tsc2005 *ts) - { -- tsc2005_cmd(tsc, TSC2005_CMD_STOP); -+ tsc2005_cmd(ts, TSC2005_CMD_STOP); - } - - /* Must be called with mutex held */ -@@ -458,7 +557,9 @@ static void tsc2005_disable(struct tsc20 - if (ts->disable_depth++ != 0) - return; - -- disable_irq(ts->irq); -+ disable_irq(ts->spi->irq); -+ if (ts->esd_timeout) -+ del_timer(&ts->esd_timer); - - /* wait until penup timer expire normally */ - do { -@@ -470,12 +571,18 @@ static void tsc2005_disable(struct tsc20 - - static void tsc2005_enable(struct tsc2005 *ts) - { -- if (--ts->disable_depth != 0) -- return; -- -- enable_irq(ts->irq); -+ if (ts->disable_depth != 1) -+ goto out; - -+ if (ts->esd_timeout) { -+ unsigned long wdj = msecs_to_jiffies(ts->esd_timeout); -+ ts->esd_timer.expires = round_jiffies(jiffies+wdj); -+ add_timer(&ts->esd_timer); -+ } - tsc2005_start_scan(ts); -+ enable_irq(ts->spi->irq); -+out: -+ --ts->disable_depth; - } - - static ssize_t tsc2005_disable_show(struct device *dev, -@@ -490,55 +597,144 @@ static ssize_t tsc2005_disable_store(str - struct device_attribute *attr, - const char *buf, size_t count) - { -- struct tsc2005 *tsc = dev_get_drvdata(dev); -+ struct tsc2005 *ts = dev_get_drvdata(dev); - unsigned long res; - int i; - -- i = strict_strtoul(buf, 10, &res); -- i = i ? 1 : 0; -+ if (strict_strtoul(buf, 10, &res) < 0) -+ return -EINVAL; -+ i = res ? 1 : 0; - -- mutex_lock(&tsc->mutex); -- if (i == tsc->disabled) -+ mutex_lock(&ts->mutex); -+ if (i == ts->disabled) - goto out; -- tsc->disabled = i; -+ ts->disabled = i; - - if (i) -- tsc2005_disable(tsc); -+ tsc2005_disable(ts); - else -- tsc2005_enable(tsc); -+ tsc2005_enable(ts); - out: -- mutex_unlock(&tsc->mutex); -+ mutex_unlock(&ts->mutex); - return count; - } - - static DEVICE_ATTR(disable_ts, 0664, tsc2005_disable_show, - tsc2005_disable_store); - -+static ssize_t tsc2005_ctrl_selftest_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ u16 temp_high_orig, temp_high_test, temp_high; -+ unsigned int result = 1; -+ struct tsc2005 *ts = dev_get_drvdata(dev); -+ -+ if (!ts->set_reset) { -+ dev_warn(&ts->spi->dev, -+ "unable to selftest: reset not configured\n"); -+ result = 0; -+ goto out; -+ } -+ -+ mutex_lock(&ts->mutex); -+ tsc2005_disable(ts); -+ -+ /* Test ctrl communications via temp high / low registers */ -+ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig); -+ -+ temp_high_test = (temp_high_orig - 1) & 0x0FFF; -+ -+ tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test); -+ -+ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high); -+ -+ if (temp_high != temp_high_test) { -+ result = 0; -+ dev_warn(dev, "selftest failed: %d != %d\n", -+ temp_high, temp_high_test); -+ } -+ -+ /* HW Reset */ -+ ts->set_reset(0); -+ msleep(1); /* only 10us required */ -+ ts->set_reset(1); -+ -+ tsc2005_enable(ts); -+ -+ /* Test that reset really happened */ -+ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high); -+ -+ if (temp_high != temp_high_orig) { -+ result = 0; -+ dev_warn(dev, "selftest failed after reset: " -+ "%d != %d\n", -+ temp_high, temp_high_orig); -+ } -+ -+ mutex_unlock(&ts->mutex); -+ -+out: -+ return sprintf(buf, "%u\n", result); -+} -+ -+static DEVICE_ATTR(ts_ctrl_selftest, S_IRUGO, tsc2005_ctrl_selftest_show, NULL); -+ -+static void tsc2005_esd_timer_handler(unsigned long data) -+{ -+ struct tsc2005 *ts = (struct tsc2005 *)data; -+ if (!ts->disable_depth) -+ schedule_work(&ts->esd_work); -+} -+ -+static void tsc2005_rst_handler(struct work_struct *work) -+{ -+ u16 reg_val; -+ struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work); -+ unsigned long wdj; -+ -+ mutex_lock(&ts->mutex); -+ -+ /* If we are disabled, or the a touch has been detected, -+ * then ignore this timeout. The enable will restart the -+ * watchdog, as it restarts scanning -+ */ -+ if (ts->disable_depth) -+ goto out; -+ -+ /* If we cannot read our known value from configuration register 0 -+ * then reset the controller as if from power-up and start -+ * scanning again. Always re-arm the watchdog. -+ */ -+ tsc2005_read(ts, TSC2005_REG_CFR0, ®_val); -+ if ((reg_val ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK) { -+ dev_info(&ts->spi->dev, "TSC not responding, resetting.\n"); -+ /* If this timer kicked in, the penup timer, if ever active -+ * at all, must have expired ages ago, so no need to del it. -+ */ -+ ts->set_reset(0); -+ if (ts->sample_sent) { -+ tsc2005_ts_update_pen_state(ts, 0, 0, 0); -+ ts->sample_sent = 0; -+ } -+ ts->spi_pending = 0; -+ msleep(1); /* only 10us required */ -+ ts->set_reset(1); -+ tsc2005_start_scan(ts); -+ } -+ wdj = msecs_to_jiffies(ts->esd_timeout); -+ mod_timer(&ts->esd_timer, round_jiffies(jiffies+wdj)); -+ -+out: -+ mutex_unlock(&ts->mutex); -+} - - static int __devinit tsc2005_ts_init(struct tsc2005 *ts, - struct tsc2005_platform_data *pdata) - { - struct input_dev *idev; -- int dav_gpio, r; -+ int r; - int x_max, y_max; -- int x_fudge, y_fudge, p_fudge; -- -- if (pdata->dav_gpio < 0) { -- dev_err(&ts->spi->dev, "need DAV GPIO"); -- return -EINVAL; -- } -- dav_gpio = pdata->dav_gpio; -- ts->dav_gpio = dav_gpio; -- dev_dbg(&ts->spi->dev, "TSC2005: DAV GPIO = %d\n", dav_gpio); -- -- r = gpio_request(dav_gpio, "TSC2005 dav"); -- if (r < 0) { -- dev_err(&ts->spi->dev, "unable to get DAV GPIO"); -- goto err1; -- } -- gpio_direction_input(dav_gpio); -- ts->irq = gpio_to_irq(dav_gpio); -- dev_dbg(&ts->spi->dev, "TSC2005: DAV IRQ = %d\n", ts->irq); - - init_timer(&ts->penup_timer); - setup_timer(&ts->penup_timer, tsc2005_ts_penup_timer_handler, -@@ -551,17 +747,19 @@ static int __devinit tsc2005_ts_init(str - ts->hw_avg_max = pdata->ts_hw_avg; - ts->stab_time = pdata->ts_stab_time; - x_max = pdata->ts_x_max ? : 4096; -- x_fudge = pdata->ts_x_fudge ? : 4; -+ ts->fudge_x = pdata->ts_x_fudge ? : 4; - y_max = pdata->ts_y_max ? : 4096; -- y_fudge = pdata->ts_y_fudge ? : 8; -+ ts->fudge_y = pdata->ts_y_fudge ? : 8; - ts->p_max = pdata->ts_pressure_max ? : MAX_12BIT; - ts->touch_pressure = pdata->ts_touch_pressure ? : ts->p_max; -- p_fudge = pdata->ts_pressure_fudge ? : 2; -+ ts->fudge_p = pdata->ts_pressure_fudge ? : 2; -+ -+ ts->set_reset = pdata->set_reset; - - idev = input_allocate_device(); - if (idev == NULL) { - r = -ENOMEM; -- goto err2; -+ goto err1; - } - - idev->name = "TSC2005 touchscreen"; -@@ -571,65 +769,95 @@ static int __devinit tsc2005_ts_init(str - - idev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY); - idev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE); -+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); - ts->idev = idev; - - tsc2005_ts_setup_spi_xfer(ts); - -- input_set_abs_params(idev, ABS_X, 0, x_max, x_fudge, 0); -- input_set_abs_params(idev, ABS_Y, 0, y_max, y_fudge, 0); -- input_set_abs_params(idev, ABS_PRESSURE, 0, ts->p_max, p_fudge, 0); -+ input_set_abs_params(idev, ABS_X, 0, x_max, ts->fudge_x, 0); -+ input_set_abs_params(idev, ABS_Y, 0, y_max, ts->fudge_y, 0); -+ input_set_abs_params(idev, ABS_PRESSURE, 0, ts->p_max, ts->fudge_p, 0); - - tsc2005_start_scan(ts); - -- r = request_irq(ts->irq, tsc2005_ts_irq_handler, -- IRQF_TRIGGER_FALLING | IRQF_DISABLED | -- IRQF_SAMPLE_RANDOM, "tsc2005", ts); -+ r = request_irq(ts->spi->irq, tsc2005_ts_irq_handler, -+ (((TSC2005_CFR2_INITVALUE & TSC2005_CFR2_IRQ_MASK) == -+ TSC2005_CFR2_IRQ_PENDAV) -+ ? IRQF_TRIGGER_RISING -+ : IRQF_TRIGGER_FALLING) | -+ IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "tsc2005", ts); - if (r < 0) { - dev_err(&ts->spi->dev, "unable to get DAV IRQ"); -- goto err3; -+ goto err2; - } - -- set_irq_wake(ts->irq, 1); -+ set_irq_wake(ts->spi->irq, 1); - - r = input_register_device(idev); - if (r < 0) { - dev_err(&ts->spi->dev, "can't register touchscreen device\n"); -- goto err4; -+ goto err3; - } - - /* We can tolerate these failing */ -- if (device_create_file(&ts->spi->dev, &dev_attr_pen_down)); -- if (device_create_file(&ts->spi->dev, &dev_attr_disable_ts)); -+ r = device_create_file(&ts->spi->dev, &dev_attr_ts_ctrl_selftest); -+ if (r < 0) -+ dev_warn(&ts->spi->dev, "can't create sysfs file for %s: %d\n", -+ dev_attr_ts_ctrl_selftest.attr.name, r); -+ -+ r = device_create_file(&ts->spi->dev, &dev_attr_pen_down); -+ if (r < 0) -+ dev_warn(&ts->spi->dev, "can't create sysfs file for %s: %d\n", -+ dev_attr_pen_down.attr.name, r); -+ -+ r = device_create_file(&ts->spi->dev, &dev_attr_disable_ts); -+ if (r < 0) -+ dev_warn(&ts->spi->dev, "can't create sysfs file for %s: %d\n", -+ dev_attr_disable_ts.attr.name, r); -+ -+ /* Finally, configure and start the optional EDD watchdog. */ -+ ts->esd_timeout = pdata->esd_timeout; -+ if (ts->esd_timeout && ts->set_reset) { -+ unsigned long wdj; -+ setup_timer(&ts->esd_timer, tsc2005_esd_timer_handler, -+ (unsigned long)ts); -+ INIT_WORK(&ts->esd_work, tsc2005_rst_handler); -+ wdj = msecs_to_jiffies(ts->esd_timeout); -+ ts->esd_timer.expires = round_jiffies(jiffies+wdj); -+ add_timer(&ts->esd_timer); -+ } - - return 0; --err4: -- free_irq(ts->irq, ts); - err3: -+ free_irq(ts->spi->irq, ts); -+err2: - tsc2005_stop_scan(ts); - input_free_device(idev); --err2: -- gpio_free(dav_gpio); - err1: - return r; - } - - static int __devinit tsc2005_probe(struct spi_device *spi) - { -- struct tsc2005 *tsc; -+ struct tsc2005 *ts; - struct tsc2005_platform_data *pdata = spi->dev.platform_data; - int r; - -+ if (spi->irq < 0) { -+ dev_dbg(&spi->dev, "no irq?\n"); -+ return -ENODEV; -+ } - if (!pdata) { - dev_dbg(&spi->dev, "no platform data?\n"); - return -ENODEV; - } - -- tsc = kzalloc(sizeof(*tsc), GFP_KERNEL); -- if (tsc == NULL) -+ ts = kzalloc(sizeof(*ts), GFP_KERNEL); -+ if (ts == NULL) - return -ENOMEM; - -- dev_set_drvdata(&spi->dev, tsc); -- tsc->spi = spi; -+ dev_set_drvdata(&spi->dev, ts); -+ ts->spi = spi; - spi->dev.power.power_state = PMSG_ON; - - spi->mode = SPI_MODE_0; -@@ -641,14 +869,14 @@ static int __devinit tsc2005_probe(struc - - spi_setup(spi); - -- r = tsc2005_ts_init(tsc, pdata); -+ r = tsc2005_ts_init(ts, pdata); - if (r) - goto err1; - - return 0; - - err1: -- kfree(tsc); -+ kfree(ts); - return r; - } - -@@ -662,11 +890,13 @@ static int __devexit tsc2005_remove(stru - - device_remove_file(&ts->spi->dev, &dev_attr_disable_ts); - device_remove_file(&ts->spi->dev, &dev_attr_pen_down); -+ device_remove_file(&ts->spi->dev, &dev_attr_ts_ctrl_selftest); - -- free_irq(ts->irq, ts); -+ free_irq(ts->spi->irq, ts); - input_unregister_device(ts->idev); - -- gpio_free(ts->dav_gpio); -+ if (ts->esd_timeout) -+ del_timer(&ts->esd_timer); - kfree(ts); - - return 0; -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/busses/i2c-omap.c linux-omap-2.6.28-nokia1/drivers/i2c/busses/i2c-omap.c ---- linux-omap-2.6.28-omap1/drivers/i2c/busses/i2c-omap.c 2011-06-22 13:14:17.983067751 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/busses/i2c-omap.c 2011-06-22 13:19:32.663063277 +0200 -@@ -37,6 +37,7 @@ - #include - #include - #include -+#include - - /* I2C controller revisions */ - #define OMAP_I2C_REV_2 0x20 -@@ -92,8 +93,10 @@ - #define OMAP_I2C_STAT_AL (1 << 0) /* Arbitration lost int ena */ - - /* I2C WE wakeup enable register */ --#define OMAP_I2C_WE_XDR_WE (1 << 14) /* TX drain wakup */ -+#define OMAP_I2C_WE_XDR_WE (1 << 14) /* TX drain wakeup */ - #define OMAP_I2C_WE_RDR_WE (1 << 13) /* RX drain wakeup */ -+#define OMAP_I2C_WE_ROVR_WE (1 << 11) /* RX overflow wakeup */ -+#define OMAP_I2C_WE_XUDF_WE (1 << 10) /* TX underflow wakeup */ - #define OMAP_I2C_WE_AAS_WE (1 << 9) /* Address as slave wakeup*/ - #define OMAP_I2C_WE_BF_WE (1 << 8) /* Bus free wakeup */ - #define OMAP_I2C_WE_STC_WE (1 << 6) /* Start condition wakeup */ -@@ -104,6 +107,7 @@ - #define OMAP_I2C_WE_AL_WE (1 << 0) /* Arbitration lost wakeup */ - - #define OMAP_I2C_WE_ALL (OMAP_I2C_WE_XDR_WE | OMAP_I2C_WE_RDR_WE | \ -+ OMAP_I2C_WE_ROVR_WE | OMAP_I2C_WE_XUDF_WE | \ - OMAP_I2C_WE_AAS_WE | OMAP_I2C_WE_BF_WE | \ - OMAP_I2C_WE_STC_WE | OMAP_I2C_WE_GC_WE | \ - OMAP_I2C_WE_DRDY_WE | OMAP_I2C_WE_ARDY_WE | \ -@@ -178,6 +182,12 @@ struct omap_i2c_dev { - unsigned b_hw:1; /* bad h/w fixes */ - unsigned idle:1; - u16 iestate; /* Saved interrupt register */ -+ u16 pscstate; -+ u16 scllstate; -+ u16 sclhstate; -+ u16 bufstate; -+ u16 syscstate; -+ u16 westate; - }; - - static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev, -@@ -231,9 +241,16 @@ static void omap_i2c_unidle(struct omap_ - if (dev->iclk != NULL) - clk_enable(dev->iclk); - clk_enable(dev->fclk); -+ if (cpu_is_omap34xx()) { -+ omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate); -+ omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate); -+ omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate); -+ omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, dev->bufstate); -+ omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, dev->syscstate); -+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); -+ } - dev->idle = 0; -- if (dev->iestate) -- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); -+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); - } - - static void omap_i2c_idle(struct omap_i2c_dev *dev) -@@ -260,11 +277,12 @@ static void omap_i2c_idle(struct omap_i2 - - static int omap_i2c_init(struct omap_i2c_dev *dev) - { -- u16 psc = 0, scll = 0, sclh = 0; -+ u16 psc = 0, scll = 0, sclh = 0, buf = 0; - u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0; - unsigned long fclk_rate = 12000000; - unsigned long timeout; - unsigned long internal_clk = 0; -+ int delay_count = 100; - - if (dev->rev >= OMAP_I2C_REV_2) { - omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK); -@@ -279,7 +297,10 @@ static int omap_i2c_init(struct omap_i2c - "for controller reset\n"); - return -ETIMEDOUT; - } -- msleep(1); -+ if (--delay_count > 0) -+ udelay(1); -+ else -+ msleep(1); - } - - /* SYSC register is cleared by the reset; rewrite it */ -@@ -291,21 +312,22 @@ static int omap_i2c_init(struct omap_i2c - } else if (dev->rev >= OMAP_I2C_REV_ON_3430) { - u32 v; - -- v = SYSC_AUTOIDLE_MASK; -- v |= SYSC_ENAWAKEUP_MASK; -- v |= (SYSC_IDLEMODE_SMART << -+ dev->syscstate = SYSC_AUTOIDLE_MASK; -+ dev->syscstate |= SYSC_ENAWAKEUP_MASK; -+ dev->syscstate |= (SYSC_IDLEMODE_SMART << - __ffs(SYSC_SIDLEMODE_MASK)); -- v |= (SYSC_CLOCKACTIVITY_FCLK << -+ dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK << - __ffs(SYSC_CLOCKACTIVITY_MASK)); - -- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, v); -+ omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, -+ dev->syscstate); - /* - * Enabling all wakup sources to stop I2C freezing on - * WFI instruction. - * REVISIT: Some wkup sources might not be needed. - */ -- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, -- OMAP_I2C_WE_ALL); -+ dev->westate = OMAP_I2C_WE_ALL; -+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); - - } - } -@@ -352,13 +374,17 @@ static int omap_i2c_init(struct omap_i2c - - /* If configured for High Speed */ - if (dev->speed > 400) { -+ unsigned long scl; -+ - /* For first phase of HS mode */ -- fsscll = internal_clk / (400 * 2) - 6; -- fssclh = internal_clk / (400 * 2) - 6; -+ scl = internal_clk / 400; -+ fsscll = scl - (scl / 3) - 7; -+ fssclh = (scl / 3) - 5; - - /* For second phase of HS mode */ -- hsscll = fclk_rate / (dev->speed * 2) - 6; -- hssclh = fclk_rate / (dev->speed * 2) - 6; -+ scl = fclk_rate / dev->speed; -+ hsscll = scl - (scl / 3) - 7; -+ hssclh = (scl / 3) - 9; - } else { - /* To handle F/S modes */ - fsscll = internal_clk / (dev->speed * 2) - 3; -@@ -382,23 +408,28 @@ static int omap_i2c_init(struct omap_i2c - omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll); - omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh); - -- if (dev->fifo_size) -- /* Note: setup required fifo size - 1 */ -- omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, -- (dev->fifo_size - 1) << 8 | /* RTRSH */ -- OMAP_I2C_BUF_RXFIF_CLR | -- (dev->fifo_size - 1) | /* XTRSH */ -- OMAP_I2C_BUF_TXFIF_CLR); -+ if (dev->fifo_size) { -+ /* Note: setup required fifo size - 1. RTRSH and XTRSH */ -+ buf = (dev->fifo_size - 1) << 8 | OMAP_I2C_BUF_RXFIF_CLR | -+ (dev->fifo_size - 1) | OMAP_I2C_BUF_TXFIF_CLR; -+ omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf); -+ } - - /* Take the I2C module out of reset: */ - omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); - - /* Enable interrupts */ -- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, -- (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | -+ dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | - OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK | - OMAP_I2C_IE_AL) | ((dev->fifo_size) ? -- (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0)); -+ (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); -+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); -+ if (cpu_is_omap34xx()) { -+ dev->pscstate = psc; -+ dev->scllstate = scll; -+ dev->sclhstate = sclh; -+ dev->bufstate = buf; -+ } - return 0; - } - -@@ -408,6 +439,7 @@ static int omap_i2c_init(struct omap_i2c - static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev) - { - unsigned long timeout; -+ int delay_count = 100; - - timeout = jiffies + OMAP_I2C_TIMEOUT; - while (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) { -@@ -415,7 +447,10 @@ static int omap_i2c_wait_for_bb(struct o - dev_warn(dev->dev, "timeout waiting for bus ready\n"); - return -ETIMEDOUT; - } -- msleep(1); -+ if (--delay_count > 0) -+ udelay(1); -+ else -+ msleep(1); - } - - return 0; -@@ -487,6 +522,8 @@ static int omap_i2c_xfer_msg(struct i2c_ - cpu_relax(); - } - -+ /* FIXME: should consider ARDY value before writing to I2C_CON -+ */ - w |= OMAP_I2C_CON_STP; - w &= ~OMAP_I2C_CON_STT; - omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); -@@ -496,8 +533,15 @@ static int omap_i2c_xfer_msg(struct i2c_ - * REVISIT: We should abort the transfer on signals, but the bus goes - * into arbitration and we're currently unable to recover from it. - */ -+ /* -+ * REVISIT: Add a mpu wake-up latency constraint to let us wake -+ * quickly enough for i2c transfers to work properly. Should change -+ * the code to use a latency constraint function passed from pdata. -+ */ -+ omap_pm_set_max_mpu_wakeup_lat(dev->dev, 500); - r = wait_for_completion_timeout(&dev->cmd_complete, - OMAP_I2C_TIMEOUT); -+ omap_pm_set_max_mpu_wakeup_lat(dev->dev, -1); - dev->buf_len = 0; - if (r < 0) - return r; -@@ -673,8 +717,12 @@ omap_i2c_isr(int this_irq, void *dev_id) - err |= OMAP_I2C_STAT_AL; - } - if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | -- OMAP_I2C_STAT_AL)) -+ OMAP_I2C_STAT_AL)) { -+ /* errata: ARDY needs double clear for some hardware */ -+ omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, -+ OMAP_I2C_STAT_ARDY); - omap_i2c_complete_cmd(dev, err); -+ } - if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { - u8 num_bytes = 1; - if (dev->fifo_size) { -@@ -846,14 +894,16 @@ omap_i2c_probe(struct platform_device *p - * call back latencies. - */ - dev->fifo_size = (dev->fifo_size / 2); -- dev->b_hw = 1; /* Enable hardware fixes */ - } - -+ if (cpu_is_omap2430()) -+ dev->b_hw = 1; /* Enable hardware fix (might be es1 only) */ -+ - /* reset ASAP, clearing any IRQs */ - omap_i2c_init(dev); - - isr = (dev->rev < OMAP_I2C_REV_2) ? omap_i2c_rev1_isr : omap_i2c_isr; -- r = request_irq(dev->irq, isr, 0, pdev->name, dev); -+ r = request_irq(dev->irq, isr, IRQF_DISABLED, pdev->name, dev); - - if (r) { - dev_err(dev->dev, "failure requesting irq %i\n", dev->irq); -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/isp1301_omap.c linux-omap-2.6.28-nokia1/drivers/i2c/chips/isp1301_omap.c ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/isp1301_omap.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/isp1301_omap.c 2008-12-25 00:26:37.000000000 +0100 -@@ -0,0 +1,1683 @@ -+/* -+ * isp1301_omap - ISP 1301 USB transceiver, talking to OMAP OTG controller -+ * -+ * Copyright (C) 2004 Texas Instruments -+ * Copyright (C) 2004 David Brownell -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+ -+ -+#ifndef DEBUG -+#undef VERBOSE -+#endif -+ -+ -+#define DRIVER_VERSION "24 August 2004" -+#define DRIVER_NAME (isp1301_driver.driver.name) -+ -+MODULE_DESCRIPTION("ISP1301 USB OTG Transceiver Driver"); -+MODULE_LICENSE("GPL"); -+ -+struct isp1301 { -+ struct otg_transceiver otg; -+ struct i2c_client *client; -+ void (*i2c_release)(struct device *dev); -+ -+ int irq_type; -+ -+ u32 last_otg_ctrl; -+ unsigned working:1; -+ -+ struct timer_list timer; -+ -+ /* use keventd context to change the state for us */ -+ struct work_struct work; -+ -+ unsigned long todo; -+# define WORK_UPDATE_ISP 0 /* update ISP from OTG */ -+# define WORK_UPDATE_OTG 1 /* update OTG from ISP */ -+# define WORK_HOST_RESUME 4 /* resume host */ -+# define WORK_TIMER 6 /* timer fired */ -+# define WORK_STOP 7 /* don't resubmit */ -+}; -+ -+ -+/* bits in OTG_CTRL */ -+ -+#define OTG_XCEIV_OUTPUTS \ -+ (OTG_ASESSVLD|OTG_BSESSEND|OTG_BSESSVLD|OTG_VBUSVLD|OTG_ID) -+#define OTG_XCEIV_INPUTS \ -+ (OTG_PULLDOWN|OTG_PULLUP|OTG_DRV_VBUS|OTG_PD_VBUS|OTG_PU_VBUS|OTG_PU_ID) -+#define OTG_CTRL_BITS \ -+ (OTG_A_BUSREQ|OTG_A_SETB_HNPEN|OTG_B_BUSREQ|OTG_B_HNPEN|OTG_BUSDROP) -+ /* and OTG_PULLUP is sometimes written */ -+ -+#define OTG_CTRL_MASK (OTG_DRIVER_SEL| \ -+ OTG_XCEIV_OUTPUTS|OTG_XCEIV_INPUTS| \ -+ OTG_CTRL_BITS) -+ -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* board-specific PM hooks */ -+ -+#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3) -+ -+#if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE) -+ -+#include -+ -+#else -+ -+static inline int tps65010_set_vbus_draw(unsigned mA) -+{ -+ pr_debug("tps65010: draw %d mA (STUB)\n", mA); -+ return 0; -+} -+ -+#endif -+ -+static void enable_vbus_draw(struct isp1301 *isp, unsigned mA) -+{ -+ int status = tps65010_set_vbus_draw(mA); -+ if (status < 0) -+ pr_debug(" VBUS %d mA error %d\n", mA, status); -+} -+ -+static void enable_vbus_source(struct isp1301 *isp) -+{ -+ /* this board won't supply more than 8mA vbus power. -+ * some boards can switch a 100ma "unit load" (or more). -+ */ -+} -+ -+ -+/* products will deliver OTG messages with LEDs, GUI, etc */ -+static inline void notresponding(struct isp1301 *isp) -+{ -+ printk(KERN_NOTICE "OTG device not responding.\n"); -+} -+ -+ -+#endif -+ -+#if defined(CONFIG_MACH_OMAP_H4) -+ -+static void enable_vbus_draw(struct isp1301 *isp, unsigned mA) -+{ -+ /* H4 controls this by DIP switch S2.4; no soft control. -+ * ON means the charger is always enabled. Leave it OFF -+ * unless the OTG port is used only in B-peripheral mode. -+ */ -+} -+ -+static void enable_vbus_source(struct isp1301 *isp) -+{ -+ /* this board won't supply more than 8mA vbus power. -+ * some boards can switch a 100ma "unit load" (or more). -+ */ -+} -+ -+ -+/* products will deliver OTG messages with LEDs, GUI, etc */ -+static inline void notresponding(struct isp1301 *isp) -+{ -+ printk(KERN_NOTICE "OTG device not responding.\n"); -+} -+ -+ -+#endif -+ -+/*-------------------------------------------------------------------------*/ -+ -+static struct i2c_driver isp1301_driver; -+ -+/* smbus apis are used for portability */ -+ -+static inline u8 -+isp1301_get_u8(struct isp1301 *isp, u8 reg) -+{ -+ return i2c_smbus_read_byte_data(isp->client, reg + 0); -+} -+ -+static inline int -+isp1301_get_u16(struct isp1301 *isp, u8 reg) -+{ -+ return i2c_smbus_read_word_data(isp->client, reg); -+} -+ -+static inline int -+isp1301_set_bits(struct isp1301 *isp, u8 reg, u8 bits) -+{ -+ return i2c_smbus_write_byte_data(isp->client, reg + 0, bits); -+} -+ -+static inline int -+isp1301_clear_bits(struct isp1301 *isp, u8 reg, u8 bits) -+{ -+ return i2c_smbus_write_byte_data(isp->client, reg + 1, bits); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* identification */ -+#define ISP1301_VENDOR_ID 0x00 /* u16 read */ -+#define ISP1301_PRODUCT_ID 0x02 /* u16 read */ -+#define ISP1301_BCD_DEVICE 0x14 /* u16 read */ -+ -+#define I2C_VENDOR_ID_PHILIPS 0x04cc -+#define I2C_PRODUCT_ID_PHILIPS_1301 0x1301 -+ -+/* operational registers */ -+#define ISP1301_MODE_CONTROL_1 0x04 /* u8 read, set, +1 clear */ -+# define MC1_SPEED (1 << 0) -+# define MC1_SUSPEND (1 << 1) -+# define MC1_DAT_SE0 (1 << 2) -+# define MC1_TRANSPARENT (1 << 3) -+# define MC1_BDIS_ACON_EN (1 << 4) -+# define MC1_OE_INT_EN (1 << 5) -+# define MC1_UART_EN (1 << 6) -+# define MC1_MASK 0x7f -+#define ISP1301_MODE_CONTROL_2 0x12 /* u8 read, set, +1 clear */ -+# define MC2_GLOBAL_PWR_DN (1 << 0) -+# define MC2_SPD_SUSP_CTRL (1 << 1) -+# define MC2_BI_DI (1 << 2) -+# define MC2_TRANSP_BDIR0 (1 << 3) -+# define MC2_TRANSP_BDIR1 (1 << 4) -+# define MC2_AUDIO_EN (1 << 5) -+# define MC2_PSW_EN (1 << 6) -+# define MC2_EN2V7 (1 << 7) -+#define ISP1301_OTG_CONTROL_1 0x06 /* u8 read, set, +1 clear */ -+# define OTG1_DP_PULLUP (1 << 0) -+# define OTG1_DM_PULLUP (1 << 1) -+# define OTG1_DP_PULLDOWN (1 << 2) -+# define OTG1_DM_PULLDOWN (1 << 3) -+# define OTG1_ID_PULLDOWN (1 << 4) -+# define OTG1_VBUS_DRV (1 << 5) -+# define OTG1_VBUS_DISCHRG (1 << 6) -+# define OTG1_VBUS_CHRG (1 << 7) -+#define ISP1301_OTG_STATUS 0x10 /* u8 readonly */ -+# define OTG_B_SESS_END (1 << 6) -+# define OTG_B_SESS_VLD (1 << 7) -+ -+#define ISP1301_INTERRUPT_SOURCE 0x08 /* u8 read */ -+#define ISP1301_INTERRUPT_LATCH 0x0A /* u8 read, set, +1 clear */ -+ -+#define ISP1301_INTERRUPT_FALLING 0x0C /* u8 read, set, +1 clear */ -+#define ISP1301_INTERRUPT_RISING 0x0E /* u8 read, set, +1 clear */ -+ -+/* same bitfields in all interrupt registers */ -+# define INTR_VBUS_VLD (1 << 0) -+# define INTR_SESS_VLD (1 << 1) -+# define INTR_DP_HI (1 << 2) -+# define INTR_ID_GND (1 << 3) -+# define INTR_DM_HI (1 << 4) -+# define INTR_ID_FLOAT (1 << 5) -+# define INTR_BDIS_ACON (1 << 6) -+# define INTR_CR_INT (1 << 7) -+ -+/*-------------------------------------------------------------------------*/ -+ -+static const char *state_string(enum usb_otg_state state) -+{ -+ switch (state) { -+ case OTG_STATE_A_IDLE: return "a_idle"; -+ case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; -+ case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; -+ case OTG_STATE_A_HOST: return "a_host"; -+ case OTG_STATE_A_SUSPEND: return "a_suspend"; -+ case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; -+ case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; -+ case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; -+ case OTG_STATE_B_IDLE: return "b_idle"; -+ case OTG_STATE_B_SRP_INIT: return "b_srp_init"; -+ case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; -+ case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; -+ case OTG_STATE_B_HOST: return "b_host"; -+ default: return "UNDEFINED"; -+ } -+} -+ -+static inline const char *state_name(struct isp1301 *isp) -+{ -+ return state_string(isp->otg.state); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* NOTE: some of this ISP1301 setup is specific to H2 boards; -+ * not everything is guarded by board-specific checks, or even using -+ * omap_usb_config data to deduce MC1_DAT_SE0 and MC2_BI_DI. -+ * -+ * ALSO: this currently doesn't use ISP1301 low-power modes -+ * while OTG is running. -+ */ -+ -+static void power_down(struct isp1301 *isp) -+{ -+ isp->otg.state = OTG_STATE_UNDEFINED; -+ -+ // isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND); -+ -+ isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_ID_PULLDOWN); -+ isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); -+} -+ -+static void power_up(struct isp1301 *isp) -+{ -+ // isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN); -+ isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND); -+ -+ /* do this only when cpu is driving transceiver, -+ * so host won't see a low speed device... -+ */ -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); -+} -+ -+#define NO_HOST_SUSPEND -+ -+static int host_suspend(struct isp1301 *isp) -+{ -+#ifdef NO_HOST_SUSPEND -+ return 0; -+#else -+ struct device *dev; -+ -+ if (!isp->otg.host) -+ return -ENODEV; -+ -+ /* Currently ASSUMES only the OTG port matters; -+ * other ports could be active... -+ */ -+ dev = isp->otg.host->controller; -+ return dev->driver->suspend(dev, 3, 0); -+#endif -+} -+ -+static int host_resume(struct isp1301 *isp) -+{ -+#ifdef NO_HOST_SUSPEND -+ return 0; -+#else -+ struct device *dev; -+ -+ if (!isp->otg.host) -+ return -ENODEV; -+ -+ dev = isp->otg.host->controller; -+ return dev->driver->resume(dev, 0); -+#endif -+} -+ -+static int gadget_suspend(struct isp1301 *isp) -+{ -+ isp->otg.gadget->b_hnp_enable = 0; -+ isp->otg.gadget->a_hnp_support = 0; -+ isp->otg.gadget->a_alt_hnp_support = 0; -+ return usb_gadget_vbus_disconnect(isp->otg.gadget); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+#define TIMER_MINUTES 10 -+#define TIMER_JIFFIES (TIMER_MINUTES * 60 * HZ) -+ -+/* Almost all our I2C messaging comes from a work queue's task context. -+ * NOTE: guaranteeing certain response times might mean we shouldn't -+ * share keventd's work queue; a realtime task might be safest. -+ */ -+static void isp1301_defer_work(struct isp1301 *isp, int work) -+{ -+ int status; -+ -+ if (isp && !test_and_set_bit(work, &isp->todo)) { -+ (void) get_device(&isp->client->dev); -+ status = schedule_work(&isp->work); -+ if (!status && !isp->working) -+ dev_vdbg(&isp->client->dev, -+ "work item %d may be lost\n", work); -+ } -+} -+ -+/* called from irq handlers */ -+static void a_idle(struct isp1301 *isp, const char *tag) -+{ -+ u32 l; -+ -+ if (isp->otg.state == OTG_STATE_A_IDLE) -+ return; -+ -+ isp->otg.default_a = 1; -+ if (isp->otg.host) { -+ isp->otg.host->is_b_host = 0; -+ host_suspend(isp); -+ } -+ if (isp->otg.gadget) { -+ isp->otg.gadget->is_a_peripheral = 1; -+ gadget_suspend(isp); -+ } -+ isp->otg.state = OTG_STATE_A_IDLE; -+ l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; -+ omap_writel(l, OTG_CTRL); -+ isp->last_otg_ctrl = l; -+ pr_debug(" --> %s/%s\n", state_name(isp), tag); -+} -+ -+/* called from irq handlers */ -+static void b_idle(struct isp1301 *isp, const char *tag) -+{ -+ u32 l; -+ -+ if (isp->otg.state == OTG_STATE_B_IDLE) -+ return; -+ -+ isp->otg.default_a = 0; -+ if (isp->otg.host) { -+ isp->otg.host->is_b_host = 1; -+ host_suspend(isp); -+ } -+ if (isp->otg.gadget) { -+ isp->otg.gadget->is_a_peripheral = 0; -+ gadget_suspend(isp); -+ } -+ isp->otg.state = OTG_STATE_B_IDLE; -+ l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; -+ omap_writel(l, OTG_CTRL); -+ isp->last_otg_ctrl = l; -+ pr_debug(" --> %s/%s\n", state_name(isp), tag); -+} -+ -+static void -+dump_regs(struct isp1301 *isp, const char *label) -+{ -+#ifdef DEBUG -+ u8 ctrl = isp1301_get_u8(isp, ISP1301_OTG_CONTROL_1); -+ u8 status = isp1301_get_u8(isp, ISP1301_OTG_STATUS); -+ u8 src = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE); -+ -+ pr_debug("otg: %06x, %s %s, otg/%02x stat/%02x.%02x\n", -+ omap_readl(OTG_CTRL), label, state_name(isp), -+ ctrl, status, src); -+ /* mode control and irq enables don't change much */ -+#endif -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+#ifdef CONFIG_USB_OTG -+ -+/* -+ * The OMAP OTG controller handles most of the OTG state transitions. -+ * -+ * We translate isp1301 outputs (mostly voltage comparator status) into -+ * OTG inputs; OTG outputs (mostly pullup/pulldown controls) and HNP state -+ * flags into isp1301 inputs ... and infer state transitions. -+ */ -+ -+#ifdef VERBOSE -+ -+static void check_state(struct isp1301 *isp, const char *tag) -+{ -+ enum usb_otg_state state = OTG_STATE_UNDEFINED; -+ u8 fsm = omap_readw(OTG_TEST) & 0x0ff; -+ unsigned extra = 0; -+ -+ switch (fsm) { -+ -+ /* default-b */ -+ case 0x0: -+ state = OTG_STATE_B_IDLE; -+ break; -+ case 0x3: -+ case 0x7: -+ extra = 1; -+ case 0x1: -+ state = OTG_STATE_B_PERIPHERAL; -+ break; -+ case 0x11: -+ state = OTG_STATE_B_SRP_INIT; -+ break; -+ -+ /* extra dual-role default-b states */ -+ case 0x12: -+ case 0x13: -+ case 0x16: -+ extra = 1; -+ case 0x17: -+ state = OTG_STATE_B_WAIT_ACON; -+ break; -+ case 0x34: -+ state = OTG_STATE_B_HOST; -+ break; -+ -+ /* default-a */ -+ case 0x36: -+ state = OTG_STATE_A_IDLE; -+ break; -+ case 0x3c: -+ state = OTG_STATE_A_WAIT_VFALL; -+ break; -+ case 0x7d: -+ state = OTG_STATE_A_VBUS_ERR; -+ break; -+ case 0x9e: -+ case 0x9f: -+ extra = 1; -+ case 0x89: -+ state = OTG_STATE_A_PERIPHERAL; -+ break; -+ case 0xb7: -+ state = OTG_STATE_A_WAIT_VRISE; -+ break; -+ case 0xb8: -+ state = OTG_STATE_A_WAIT_BCON; -+ break; -+ case 0xb9: -+ state = OTG_STATE_A_HOST; -+ break; -+ case 0xba: -+ state = OTG_STATE_A_SUSPEND; -+ break; -+ default: -+ break; -+ } -+ if (isp->otg.state == state && !extra) -+ return; -+ pr_debug("otg: %s FSM %s/%02x, %s, %06x\n", tag, -+ state_string(state), fsm, state_name(isp), -+ omap_readl(OTG_CTRL)); -+} -+ -+#else -+ -+static inline void check_state(struct isp1301 *isp, const char *tag) { } -+ -+#endif -+ -+/* outputs from ISP1301_INTERRUPT_SOURCE */ -+static void update_otg1(struct isp1301 *isp, u8 int_src) -+{ -+ u32 otg_ctrl; -+ -+ otg_ctrl = omap_readl(OTG_CTRL) & OTG_CTRL_MASK; -+ otg_ctrl &= ~OTG_XCEIV_INPUTS; -+ otg_ctrl &= ~(OTG_ID|OTG_ASESSVLD|OTG_VBUSVLD); -+ -+ if (int_src & INTR_SESS_VLD) -+ otg_ctrl |= OTG_ASESSVLD; -+ else if (isp->otg.state == OTG_STATE_A_WAIT_VFALL) { -+ a_idle(isp, "vfall"); -+ otg_ctrl &= ~OTG_CTRL_BITS; -+ } -+ if (int_src & INTR_VBUS_VLD) -+ otg_ctrl |= OTG_VBUSVLD; -+ if (int_src & INTR_ID_GND) { /* default-A */ -+ if (isp->otg.state == OTG_STATE_B_IDLE -+ || isp->otg.state == OTG_STATE_UNDEFINED) { -+ a_idle(isp, "init"); -+ return; -+ } -+ } else { /* default-B */ -+ otg_ctrl |= OTG_ID; -+ if (isp->otg.state == OTG_STATE_A_IDLE -+ || isp->otg.state == OTG_STATE_UNDEFINED) { -+ b_idle(isp, "init"); -+ return; -+ } -+ } -+ omap_writel(otg_ctrl, OTG_CTRL); -+} -+ -+/* outputs from ISP1301_OTG_STATUS */ -+static void update_otg2(struct isp1301 *isp, u8 otg_status) -+{ -+ u32 otg_ctrl; -+ -+ otg_ctrl = omap_readl(OTG_CTRL) & OTG_CTRL_MASK; -+ otg_ctrl &= ~OTG_XCEIV_INPUTS; -+ otg_ctrl &= ~(OTG_BSESSVLD | OTG_BSESSEND); -+ if (otg_status & OTG_B_SESS_VLD) -+ otg_ctrl |= OTG_BSESSVLD; -+ else if (otg_status & OTG_B_SESS_END) -+ otg_ctrl |= OTG_BSESSEND; -+ omap_writel(otg_ctrl, OTG_CTRL); -+} -+ -+/* inputs going to ISP1301 */ -+static void otg_update_isp(struct isp1301 *isp) -+{ -+ u32 otg_ctrl, otg_change; -+ u8 set = OTG1_DM_PULLDOWN, clr = OTG1_DM_PULLUP; -+ -+ otg_ctrl = omap_readl(OTG_CTRL); -+ otg_change = otg_ctrl ^ isp->last_otg_ctrl; -+ isp->last_otg_ctrl = otg_ctrl; -+ otg_ctrl = otg_ctrl & OTG_XCEIV_INPUTS; -+ -+ switch (isp->otg.state) { -+ case OTG_STATE_B_IDLE: -+ case OTG_STATE_B_PERIPHERAL: -+ case OTG_STATE_B_SRP_INIT: -+ if (!(otg_ctrl & OTG_PULLUP)) { -+ // if (otg_ctrl & OTG_B_HNPEN) { -+ if (isp->otg.gadget->b_hnp_enable) { -+ isp->otg.state = OTG_STATE_B_WAIT_ACON; -+ pr_debug(" --> b_wait_acon\n"); -+ } -+ goto pulldown; -+ } -+pullup: -+ set |= OTG1_DP_PULLUP; -+ clr |= OTG1_DP_PULLDOWN; -+ break; -+ case OTG_STATE_A_SUSPEND: -+ case OTG_STATE_A_PERIPHERAL: -+ if (otg_ctrl & OTG_PULLUP) -+ goto pullup; -+ /* FALLTHROUGH */ -+ // case OTG_STATE_B_WAIT_ACON: -+ default: -+pulldown: -+ set |= OTG1_DP_PULLDOWN; -+ clr |= OTG1_DP_PULLUP; -+ break; -+ } -+ -+# define toggle(OTG,ISP) do { \ -+ if (otg_ctrl & OTG) set |= ISP; \ -+ else clr |= ISP; \ -+ } while (0) -+ -+ if (!(isp->otg.host)) -+ otg_ctrl &= ~OTG_DRV_VBUS; -+ -+ switch (isp->otg.state) { -+ case OTG_STATE_A_SUSPEND: -+ if (otg_ctrl & OTG_DRV_VBUS) { -+ set |= OTG1_VBUS_DRV; -+ break; -+ } -+ /* HNP failed for some reason (A_AIDL_BDIS timeout) */ -+ notresponding(isp); -+ -+ /* FALLTHROUGH */ -+ case OTG_STATE_A_VBUS_ERR: -+ isp->otg.state = OTG_STATE_A_WAIT_VFALL; -+ pr_debug(" --> a_wait_vfall\n"); -+ /* FALLTHROUGH */ -+ case OTG_STATE_A_WAIT_VFALL: -+ /* FIXME usbcore thinks port power is still on ... */ -+ clr |= OTG1_VBUS_DRV; -+ break; -+ case OTG_STATE_A_IDLE: -+ if (otg_ctrl & OTG_DRV_VBUS) { -+ isp->otg.state = OTG_STATE_A_WAIT_VRISE; -+ pr_debug(" --> a_wait_vrise\n"); -+ } -+ /* FALLTHROUGH */ -+ default: -+ toggle(OTG_DRV_VBUS, OTG1_VBUS_DRV); -+ } -+ -+ toggle(OTG_PU_VBUS, OTG1_VBUS_CHRG); -+ toggle(OTG_PD_VBUS, OTG1_VBUS_DISCHRG); -+ -+# undef toggle -+ -+ isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, set); -+ isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, clr); -+ -+ /* HNP switch to host or peripheral; and SRP */ -+ if (otg_change & OTG_PULLUP) { -+ u32 l; -+ -+ switch (isp->otg.state) { -+ case OTG_STATE_B_IDLE: -+ if (clr & OTG1_DP_PULLUP) -+ break; -+ isp->otg.state = OTG_STATE_B_PERIPHERAL; -+ pr_debug(" --> b_peripheral\n"); -+ break; -+ case OTG_STATE_A_SUSPEND: -+ if (clr & OTG1_DP_PULLUP) -+ break; -+ isp->otg.state = OTG_STATE_A_PERIPHERAL; -+ pr_debug(" --> a_peripheral\n"); -+ break; -+ default: -+ break; -+ } -+ l = omap_readl(OTG_CTRL); -+ l |= OTG_PULLUP; -+ omap_writel(l, OTG_CTRL); -+ } -+ -+ check_state(isp, __func__); -+ dump_regs(isp, "otg->isp1301"); -+} -+ -+static irqreturn_t omap_otg_irq(int irq, void *_isp) -+{ -+ u16 otg_irq = omap_readw(OTG_IRQ_SRC); -+ u32 otg_ctrl; -+ int ret = IRQ_NONE; -+ struct isp1301 *isp = _isp; -+ -+ /* update ISP1301 transciever from OTG controller */ -+ if (otg_irq & OPRT_CHG) { -+ omap_writew(OPRT_CHG, OTG_IRQ_SRC); -+ isp1301_defer_work(isp, WORK_UPDATE_ISP); -+ ret = IRQ_HANDLED; -+ -+ /* SRP to become b_peripheral failed */ -+ } else if (otg_irq & B_SRP_TMROUT) { -+ pr_debug("otg: B_SRP_TIMEOUT, %06x\n", omap_readl(OTG_CTRL)); -+ notresponding(isp); -+ -+ /* gadget drivers that care should monitor all kinds of -+ * remote wakeup (SRP, normal) using their own timer -+ * to give "check cable and A-device" messages. -+ */ -+ if (isp->otg.state == OTG_STATE_B_SRP_INIT) -+ b_idle(isp, "srp_timeout"); -+ -+ omap_writew(B_SRP_TMROUT, OTG_IRQ_SRC); -+ ret = IRQ_HANDLED; -+ -+ /* HNP to become b_host failed */ -+ } else if (otg_irq & B_HNP_FAIL) { -+ pr_debug("otg: %s B_HNP_FAIL, %06x\n", -+ state_name(isp), omap_readl(OTG_CTRL)); -+ notresponding(isp); -+ -+ otg_ctrl = omap_readl(OTG_CTRL); -+ otg_ctrl |= OTG_BUSDROP; -+ otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; -+ omap_writel(otg_ctrl, OTG_CTRL); -+ -+ /* subset of b_peripheral()... */ -+ isp->otg.state = OTG_STATE_B_PERIPHERAL; -+ pr_debug(" --> b_peripheral\n"); -+ -+ omap_writew(B_HNP_FAIL, OTG_IRQ_SRC); -+ ret = IRQ_HANDLED; -+ -+ /* detect SRP from B-device ... */ -+ } else if (otg_irq & A_SRP_DETECT) { -+ pr_debug("otg: %s SRP_DETECT, %06x\n", -+ state_name(isp), omap_readl(OTG_CTRL)); -+ -+ isp1301_defer_work(isp, WORK_UPDATE_OTG); -+ switch (isp->otg.state) { -+ case OTG_STATE_A_IDLE: -+ if (!isp->otg.host) -+ break; -+ isp1301_defer_work(isp, WORK_HOST_RESUME); -+ otg_ctrl = omap_readl(OTG_CTRL); -+ otg_ctrl |= OTG_A_BUSREQ; -+ otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ) -+ & ~OTG_XCEIV_INPUTS -+ & OTG_CTRL_MASK; -+ omap_writel(otg_ctrl, OTG_CTRL); -+ break; -+ default: -+ break; -+ } -+ -+ omap_writew(A_SRP_DETECT, OTG_IRQ_SRC); -+ ret = IRQ_HANDLED; -+ -+ /* timer expired: T(a_wait_bcon) and maybe T(a_wait_vrise) -+ * we don't track them separately -+ */ -+ } else if (otg_irq & A_REQ_TMROUT) { -+ otg_ctrl = omap_readl(OTG_CTRL); -+ pr_info("otg: BCON_TMOUT from %s, %06x\n", -+ state_name(isp), otg_ctrl); -+ notresponding(isp); -+ -+ otg_ctrl |= OTG_BUSDROP; -+ otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; -+ omap_writel(otg_ctrl, OTG_CTRL); -+ isp->otg.state = OTG_STATE_A_WAIT_VFALL; -+ -+ omap_writew(A_REQ_TMROUT, OTG_IRQ_SRC); -+ ret = IRQ_HANDLED; -+ -+ /* A-supplied voltage fell too low; overcurrent */ -+ } else if (otg_irq & A_VBUS_ERR) { -+ otg_ctrl = omap_readl(OTG_CTRL); -+ printk(KERN_ERR "otg: %s, VBUS_ERR %04x ctrl %06x\n", -+ state_name(isp), otg_irq, otg_ctrl); -+ -+ otg_ctrl |= OTG_BUSDROP; -+ otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; -+ omap_writel(otg_ctrl, OTG_CTRL); -+ isp->otg.state = OTG_STATE_A_VBUS_ERR; -+ -+ omap_writew(A_VBUS_ERR, OTG_IRQ_SRC); -+ ret = IRQ_HANDLED; -+ -+ /* switch driver; the transciever code activates it, -+ * ungating the udc clock or resuming OHCI. -+ */ -+ } else if (otg_irq & DRIVER_SWITCH) { -+ int kick = 0; -+ -+ otg_ctrl = omap_readl(OTG_CTRL); -+ printk(KERN_NOTICE "otg: %s, SWITCH to %s, ctrl %06x\n", -+ state_name(isp), -+ (otg_ctrl & OTG_DRIVER_SEL) -+ ? "gadget" : "host", -+ otg_ctrl); -+ isp1301_defer_work(isp, WORK_UPDATE_ISP); -+ -+ /* role is peripheral */ -+ if (otg_ctrl & OTG_DRIVER_SEL) { -+ switch (isp->otg.state) { -+ case OTG_STATE_A_IDLE: -+ b_idle(isp, __func__); -+ break; -+ default: -+ break; -+ } -+ isp1301_defer_work(isp, WORK_UPDATE_ISP); -+ -+ /* role is host */ -+ } else { -+ if (!(otg_ctrl & OTG_ID)) { -+ otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS; -+ omap_writel(otg_ctrl | OTG_A_BUSREQ, OTG_CTRL); -+ } -+ -+ if (isp->otg.host) { -+ switch (isp->otg.state) { -+ case OTG_STATE_B_WAIT_ACON: -+ isp->otg.state = OTG_STATE_B_HOST; -+ pr_debug(" --> b_host\n"); -+ kick = 1; -+ break; -+ case OTG_STATE_A_WAIT_BCON: -+ isp->otg.state = OTG_STATE_A_HOST; -+ pr_debug(" --> a_host\n"); -+ break; -+ case OTG_STATE_A_PERIPHERAL: -+ isp->otg.state = OTG_STATE_A_WAIT_BCON; -+ pr_debug(" --> a_wait_bcon\n"); -+ break; -+ default: -+ break; -+ } -+ isp1301_defer_work(isp, WORK_HOST_RESUME); -+ } -+ } -+ -+ omap_writew(DRIVER_SWITCH, OTG_IRQ_SRC); -+ ret = IRQ_HANDLED; -+ -+ if (kick) -+ usb_bus_start_enum(isp->otg.host, -+ isp->otg.host->otg_port); -+ } -+ -+ check_state(isp, __func__); -+ return ret; -+} -+ -+static struct platform_device *otg_dev; -+ -+static int otg_init(struct isp1301 *isp) -+{ -+ u32 l; -+ -+ if (!otg_dev) -+ return -ENODEV; -+ -+ dump_regs(isp, __func__); -+ /* some of these values are board-specific... */ -+ l = omap_readl(OTG_SYSCON_2); -+ l |= OTG_EN -+ /* for B-device: */ -+ | SRP_GPDATA /* 9msec Bdev D+ pulse */ -+ | SRP_GPDVBUS /* discharge after VBUS pulse */ -+ // | (3 << 24) /* 2msec VBUS pulse */ -+ /* for A-device: */ -+ | (0 << 20) /* 200ms nominal A_WAIT_VRISE timer */ -+ | SRP_DPW /* detect 167+ns SRP pulses */ -+ | SRP_DATA | SRP_VBUS /* accept both kinds of SRP pulse */ -+ ; -+ omap_writel(l, OTG_SYSCON_2); -+ -+ update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE)); -+ update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS)); -+ -+ check_state(isp, __func__); -+ pr_debug("otg: %s, %s %06x\n", -+ state_name(isp), __func__, omap_readl(OTG_CTRL)); -+ -+ omap_writew(DRIVER_SWITCH | OPRT_CHG -+ | B_SRP_TMROUT | B_HNP_FAIL -+ | A_VBUS_ERR | A_SRP_DETECT | A_REQ_TMROUT, OTG_IRQ_EN); -+ -+ l = omap_readl(OTG_SYSCON_2); -+ l |= OTG_EN; -+ omap_writel(l, OTG_SYSCON_2); -+ -+ return 0; -+} -+ -+static int otg_probe(struct platform_device *dev) -+{ -+ // struct omap_usb_config *config = dev->platform_data; -+ -+ otg_dev = dev; -+ return 0; -+} -+ -+static int otg_remove(struct platform_device *dev) -+{ -+ otg_dev = NULL; -+ return 0; -+} -+ -+static struct platform_driver omap_otg_driver = { -+ .probe = otg_probe, -+ .remove = otg_remove, -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = "omap_otg", -+ }, -+}; -+ -+static int otg_bind(struct isp1301 *isp) -+{ -+ int status; -+ -+ if (otg_dev) -+ return -EBUSY; -+ -+ status = platform_driver_register(&omap_otg_driver); -+ if (status < 0) -+ return status; -+ -+ if (otg_dev) -+ status = request_irq(otg_dev->resource[1].start, omap_otg_irq, -+ IRQF_DISABLED, DRIVER_NAME, isp); -+ else -+ status = -ENODEV; -+ -+ if (status < 0) -+ platform_driver_unregister(&omap_otg_driver); -+ return status; -+} -+ -+static void otg_unbind(struct isp1301 *isp) -+{ -+ if (!otg_dev) -+ return; -+ free_irq(otg_dev->resource[1].start, isp); -+} -+ -+#else -+ -+/* OTG controller isn't clocked */ -+ -+#endif /* CONFIG_USB_OTG */ -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void b_peripheral(struct isp1301 *isp) -+{ -+ u32 l; -+ -+ l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; -+ omap_writel(l, OTG_CTRL); -+ -+ usb_gadget_vbus_connect(isp->otg.gadget); -+ -+#ifdef CONFIG_USB_OTG -+ enable_vbus_draw(isp, 8); -+ otg_update_isp(isp); -+#else -+ enable_vbus_draw(isp, 100); -+ /* UDC driver just set OTG_BSESSVLD */ -+ isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_DP_PULLUP); -+ isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_DP_PULLDOWN); -+ isp->otg.state = OTG_STATE_B_PERIPHERAL; -+ pr_debug(" --> b_peripheral\n"); -+ dump_regs(isp, "2periph"); -+#endif -+} -+ -+static void isp_update_otg(struct isp1301 *isp, u8 stat) -+{ -+ u8 isp_stat, isp_bstat; -+ enum usb_otg_state state = isp->otg.state; -+ -+ if (stat & INTR_BDIS_ACON) -+ pr_debug("OTG: BDIS_ACON, %s\n", state_name(isp)); -+ -+ /* start certain state transitions right away */ -+ isp_stat = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE); -+ if (isp_stat & INTR_ID_GND) { -+ if (isp->otg.default_a) { -+ switch (state) { -+ case OTG_STATE_B_IDLE: -+ a_idle(isp, "idle"); -+ /* FALLTHROUGH */ -+ case OTG_STATE_A_IDLE: -+ enable_vbus_source(isp); -+ /* FALLTHROUGH */ -+ case OTG_STATE_A_WAIT_VRISE: -+ /* we skip over OTG_STATE_A_WAIT_BCON, since -+ * the HC will transition to A_HOST (or -+ * A_SUSPEND!) without our noticing except -+ * when HNP is used. -+ */ -+ if (isp_stat & INTR_VBUS_VLD) -+ isp->otg.state = OTG_STATE_A_HOST; -+ break; -+ case OTG_STATE_A_WAIT_VFALL: -+ if (!(isp_stat & INTR_SESS_VLD)) -+ a_idle(isp, "vfell"); -+ break; -+ default: -+ if (!(isp_stat & INTR_VBUS_VLD)) -+ isp->otg.state = OTG_STATE_A_VBUS_ERR; -+ break; -+ } -+ isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS); -+ } else { -+ switch (state) { -+ case OTG_STATE_B_PERIPHERAL: -+ case OTG_STATE_B_HOST: -+ case OTG_STATE_B_WAIT_ACON: -+ usb_gadget_vbus_disconnect(isp->otg.gadget); -+ break; -+ default: -+ break; -+ } -+ if (state != OTG_STATE_A_IDLE) -+ a_idle(isp, "id"); -+ if (isp->otg.host && state == OTG_STATE_A_IDLE) -+ isp1301_defer_work(isp, WORK_HOST_RESUME); -+ isp_bstat = 0; -+ } -+ } else { -+ u32 l; -+ -+ /* if user unplugged mini-A end of cable, -+ * don't bypass A_WAIT_VFALL. -+ */ -+ if (isp->otg.default_a) { -+ switch (state) { -+ default: -+ isp->otg.state = OTG_STATE_A_WAIT_VFALL; -+ break; -+ case OTG_STATE_A_WAIT_VFALL: -+ state = OTG_STATE_A_IDLE; -+ /* khubd may take a while to notice and -+ * handle this disconnect, so don't go -+ * to B_IDLE quite yet. -+ */ -+ break; -+ case OTG_STATE_A_IDLE: -+ host_suspend(isp); -+ isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, -+ MC1_BDIS_ACON_EN); -+ isp->otg.state = OTG_STATE_B_IDLE; -+ l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK; -+ l &= ~OTG_CTRL_BITS; -+ omap_writel(l, OTG_CTRL); -+ break; -+ case OTG_STATE_B_IDLE: -+ break; -+ } -+ } -+ isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS); -+ -+ switch (isp->otg.state) { -+ case OTG_STATE_B_PERIPHERAL: -+ case OTG_STATE_B_WAIT_ACON: -+ case OTG_STATE_B_HOST: -+ if (likely(isp_bstat & OTG_B_SESS_VLD)) -+ break; -+ enable_vbus_draw(isp, 0); -+#ifndef CONFIG_USB_OTG -+ /* UDC driver will clear OTG_BSESSVLD */ -+ isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, -+ OTG1_DP_PULLDOWN); -+ isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, -+ OTG1_DP_PULLUP); -+ dump_regs(isp, __func__); -+#endif -+ /* FALLTHROUGH */ -+ case OTG_STATE_B_SRP_INIT: -+ b_idle(isp, __func__); -+ l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; -+ omap_writel(l, OTG_CTRL); -+ /* FALLTHROUGH */ -+ case OTG_STATE_B_IDLE: -+ if (isp->otg.gadget && (isp_bstat & OTG_B_SESS_VLD)) { -+#ifdef CONFIG_USB_OTG -+ update_otg1(isp, isp_stat); -+ update_otg2(isp, isp_bstat); -+#endif -+ b_peripheral(isp); -+ } else if (!(isp_stat & (INTR_VBUS_VLD|INTR_SESS_VLD))) -+ isp_bstat |= OTG_B_SESS_END; -+ break; -+ case OTG_STATE_A_WAIT_VFALL: -+ break; -+ default: -+ pr_debug("otg: unsupported b-device %s\n", -+ state_name(isp)); -+ break; -+ } -+ } -+ -+ if (state != isp->otg.state) -+ pr_debug(" isp, %s -> %s\n", -+ state_string(state), state_name(isp)); -+ -+#ifdef CONFIG_USB_OTG -+ /* update the OTG controller state to match the isp1301; may -+ * trigger OPRT_CHG irqs for changes going to the isp1301. -+ */ -+ update_otg1(isp, isp_stat); -+ update_otg2(isp, isp_bstat); -+ check_state(isp, __func__); -+#endif -+ -+ dump_regs(isp, "isp1301->otg"); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static u8 isp1301_clear_latch(struct isp1301 *isp) -+{ -+ u8 latch = isp1301_get_u8(isp, ISP1301_INTERRUPT_LATCH); -+ isp1301_clear_bits(isp, ISP1301_INTERRUPT_LATCH, latch); -+ return latch; -+} -+ -+static void -+isp1301_work(struct work_struct *work) -+{ -+ struct isp1301 *isp = container_of(work, struct isp1301, work); -+ int stop; -+ -+ /* implicit lock: we're the only task using this device */ -+ isp->working = 1; -+ do { -+ stop = test_bit(WORK_STOP, &isp->todo); -+ -+#ifdef CONFIG_USB_OTG -+ /* transfer state from otg engine to isp1301 */ -+ if (test_and_clear_bit(WORK_UPDATE_ISP, &isp->todo)) { -+ otg_update_isp(isp); -+ put_device(&isp->client->dev); -+ } -+#endif -+ /* transfer state from isp1301 to otg engine */ -+ if (test_and_clear_bit(WORK_UPDATE_OTG, &isp->todo)) { -+ u8 stat = isp1301_clear_latch(isp); -+ -+ isp_update_otg(isp, stat); -+ put_device(&isp->client->dev); -+ } -+ -+ if (test_and_clear_bit(WORK_HOST_RESUME, &isp->todo)) { -+ u32 otg_ctrl; -+ -+ /* -+ * skip A_WAIT_VRISE; hc transitions invisibly -+ * skip A_WAIT_BCON; same. -+ */ -+ switch (isp->otg.state) { -+ case OTG_STATE_A_WAIT_BCON: -+ case OTG_STATE_A_WAIT_VRISE: -+ isp->otg.state = OTG_STATE_A_HOST; -+ pr_debug(" --> a_host\n"); -+ otg_ctrl = omap_readl(OTG_CTRL); -+ otg_ctrl |= OTG_A_BUSREQ; -+ otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ) -+ & OTG_CTRL_MASK; -+ omap_writel(otg_ctrl, OTG_CTRL); -+ break; -+ case OTG_STATE_B_WAIT_ACON: -+ isp->otg.state = OTG_STATE_B_HOST; -+ pr_debug(" --> b_host (acon)\n"); -+ break; -+ case OTG_STATE_B_HOST: -+ case OTG_STATE_B_IDLE: -+ case OTG_STATE_A_IDLE: -+ break; -+ default: -+ pr_debug(" host resume in %s\n", -+ state_name(isp)); -+ } -+ host_resume(isp); -+ // mdelay(10); -+ put_device(&isp->client->dev); -+ } -+ -+ if (test_and_clear_bit(WORK_TIMER, &isp->todo)) { -+#ifdef VERBOSE -+ dump_regs(isp, "timer"); -+ if (!stop) -+ mod_timer(&isp->timer, jiffies + TIMER_JIFFIES); -+#endif -+ put_device(&isp->client->dev); -+ } -+ -+ if (isp->todo) -+ dev_vdbg(&isp->client->dev, -+ "work done, todo = 0x%lx\n", -+ isp->todo); -+ if (stop) { -+ dev_dbg(&isp->client->dev, "stop\n"); -+ break; -+ } -+ } while (isp->todo); -+ isp->working = 0; -+} -+ -+static irqreturn_t isp1301_irq(int irq, void *isp) -+{ -+ isp1301_defer_work(isp, WORK_UPDATE_OTG); -+ return IRQ_HANDLED; -+} -+ -+static void isp1301_timer(unsigned long _isp) -+{ -+ isp1301_defer_work((void *)_isp, WORK_TIMER); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void isp1301_release(struct device *dev) -+{ -+ struct isp1301 *isp; -+ -+ isp = dev_get_drvdata(dev); -+ -+ /* FIXME -- not with a "new style" driver, it doesn't!! */ -+ -+ /* ugly -- i2c hijacks our memory hook to wait_for_completion() */ -+ if (isp->i2c_release) -+ isp->i2c_release(dev); -+ kfree (isp); -+} -+ -+static struct isp1301 *the_transceiver; -+ -+static int __exit isp1301_remove(struct i2c_client *i2c) -+{ -+ struct isp1301 *isp; -+ -+ isp = i2c_get_clientdata(i2c); -+ -+ isp1301_clear_bits(isp, ISP1301_INTERRUPT_FALLING, ~0); -+ isp1301_clear_bits(isp, ISP1301_INTERRUPT_RISING, ~0); -+ free_irq(i2c->irq, isp); -+#ifdef CONFIG_USB_OTG -+ otg_unbind(isp); -+#endif -+ if (machine_is_omap_h2()) -+ gpio_free(2); -+ -+ isp->timer.data = 0; -+ set_bit(WORK_STOP, &isp->todo); -+ del_timer_sync(&isp->timer); -+ flush_scheduled_work(); -+ -+ put_device(&i2c->dev); -+ the_transceiver = NULL; -+ -+ return 0; -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* NOTE: three modes are possible here, only one of which -+ * will be standards-conformant on any given system: -+ * -+ * - OTG mode (dual-role), required if there's a Mini-AB connector -+ * - HOST mode, for when there's one or more A (host) connectors -+ * - DEVICE mode, for when there's a B/Mini-B (device) connector -+ * -+ * As a rule, you won't have an isp1301 chip unless it's there to -+ * support the OTG mode. Other modes help testing USB controllers -+ * in isolation from (full) OTG support, or maybe so later board -+ * revisions can help to support those feature. -+ */ -+ -+#ifdef CONFIG_USB_OTG -+ -+static int isp1301_otg_enable(struct isp1301 *isp) -+{ -+ power_up(isp); -+ otg_init(isp); -+ -+ /* NOTE: since we don't change this, this provides -+ * a few more interrupts than are strictly needed. -+ */ -+ isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING, -+ INTR_VBUS_VLD | INTR_SESS_VLD | INTR_ID_GND); -+ isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING, -+ INTR_VBUS_VLD | INTR_SESS_VLD | INTR_ID_GND); -+ -+ dev_info(&isp->client->dev, "ready for dual-role USB ...\n"); -+ -+ return 0; -+} -+ -+#endif -+ -+/* add or disable the host device+driver */ -+static int -+isp1301_set_host(struct otg_transceiver *otg, struct usb_bus *host) -+{ -+ struct isp1301 *isp = container_of(otg, struct isp1301, otg); -+ -+ if (!otg || isp != the_transceiver) -+ return -ENODEV; -+ -+ if (!host) { -+ omap_writew(0, OTG_IRQ_EN); -+ power_down(isp); -+ isp->otg.host = NULL; -+ return 0; -+ } -+ -+#ifdef CONFIG_USB_OTG -+ isp->otg.host = host; -+ dev_dbg(&isp->client->dev, "registered host\n"); -+ host_suspend(isp); -+ if (isp->otg.gadget) -+ return isp1301_otg_enable(isp); -+ return 0; -+ -+#elif !defined(CONFIG_USB_GADGET_OMAP) -+ // FIXME update its refcount -+ isp->otg.host = host; -+ -+ power_up(isp); -+ -+ if (machine_is_omap_h2()) -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); -+ -+ dev_info(&isp->client->dev, "A-Host sessions ok\n"); -+ isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING, -+ INTR_ID_GND); -+ isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING, -+ INTR_ID_GND); -+ -+ /* If this has a Mini-AB connector, this mode is highly -+ * nonstandard ... but can be handy for testing, especially with -+ * the Mini-A end of an OTG cable. (Or something nonstandard -+ * like MiniB-to-StandardB, maybe built with a gender mender.) -+ */ -+ isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_VBUS_DRV); -+ -+ dump_regs(isp, __func__); -+ -+ return 0; -+ -+#else -+ dev_dbg(&isp->client->dev, "host sessions not allowed\n"); -+ return -EINVAL; -+#endif -+ -+} -+ -+static int -+isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget) -+{ -+ struct isp1301 *isp = container_of(otg, struct isp1301, otg); -+#ifndef CONFIG_USB_OTG -+ u32 l; -+#endif -+ -+ if (!otg || isp != the_transceiver) -+ return -ENODEV; -+ -+ if (!gadget) { -+ omap_writew(0, OTG_IRQ_EN); -+ if (!isp->otg.default_a) -+ enable_vbus_draw(isp, 0); -+ usb_gadget_vbus_disconnect(isp->otg.gadget); -+ isp->otg.gadget = NULL; -+ power_down(isp); -+ return 0; -+ } -+ -+#ifdef CONFIG_USB_OTG -+ isp->otg.gadget = gadget; -+ dev_dbg(&isp->client->dev, "registered gadget\n"); -+ /* gadget driver may be suspended until vbus_connect () */ -+ if (isp->otg.host) -+ return isp1301_otg_enable(isp); -+ return 0; -+ -+#elif !defined(CONFIG_USB_OHCI_HCD) && !defined(CONFIG_USB_OHCI_HCD_MODULE) -+ isp->otg.gadget = gadget; -+ // FIXME update its refcount -+ -+ l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK; -+ l &= ~(OTG_XCEIV_OUTPUTS|OTG_CTRL_BITS); -+ l |= OTG_ID; -+ omap_writel(l, OTG_CTRL); -+ -+ power_up(isp); -+ isp->otg.state = OTG_STATE_B_IDLE; -+ -+ if (machine_is_omap_h2() || machine_is_omap_h3()) -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0); -+ -+ isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING, -+ INTR_SESS_VLD); -+ isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING, -+ INTR_VBUS_VLD); -+ dev_info(&isp->client->dev, "B-Peripheral sessions ok\n"); -+ dump_regs(isp, __func__); -+ -+ /* If this has a Mini-AB connector, this mode is highly -+ * nonstandard ... but can be handy for testing, so long -+ * as you don't plug a Mini-A cable into the jack. -+ */ -+ if (isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE) & INTR_VBUS_VLD) -+ b_peripheral(isp); -+ -+ return 0; -+ -+#else -+ dev_dbg(&isp->client->dev, "peripheral sessions not allowed\n"); -+ return -EINVAL; -+#endif -+} -+ -+ -+/*-------------------------------------------------------------------------*/ -+ -+static int -+isp1301_set_power(struct otg_transceiver *dev, unsigned mA) -+{ -+ if (!the_transceiver) -+ return -ENODEV; -+ if (dev->state == OTG_STATE_B_PERIPHERAL) -+ enable_vbus_draw(the_transceiver, mA); -+ return 0; -+} -+ -+static int -+isp1301_start_srp(struct otg_transceiver *dev) -+{ -+ struct isp1301 *isp = container_of(dev, struct isp1301, otg); -+ u32 otg_ctrl; -+ -+ if (!dev || isp != the_transceiver -+ || isp->otg.state != OTG_STATE_B_IDLE) -+ return -ENODEV; -+ -+ otg_ctrl = omap_readl(OTG_CTRL); -+ if (!(otg_ctrl & OTG_BSESSEND)) -+ return -EINVAL; -+ -+ otg_ctrl |= OTG_B_BUSREQ; -+ otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK; -+ omap_writel(otg_ctrl, OTG_CTRL); -+ isp->otg.state = OTG_STATE_B_SRP_INIT; -+ -+ pr_debug("otg: SRP, %s ... %06x\n", state_name(isp), -+ omap_readl(OTG_CTRL)); -+#ifdef CONFIG_USB_OTG -+ check_state(isp, __func__); -+#endif -+ return 0; -+} -+ -+static int -+isp1301_start_hnp(struct otg_transceiver *dev) -+{ -+#ifdef CONFIG_USB_OTG -+ struct isp1301 *isp = container_of(dev, struct isp1301, otg); -+ u32 l; -+ -+ if (!dev || isp != the_transceiver) -+ return -ENODEV; -+ if (isp->otg.default_a && (isp->otg.host == NULL -+ || !isp->otg.host->b_hnp_enable)) -+ return -ENOTCONN; -+ if (!isp->otg.default_a && (isp->otg.gadget == NULL -+ || !isp->otg.gadget->b_hnp_enable)) -+ return -ENOTCONN; -+ -+ /* We want hardware to manage most HNP protocol timings. -+ * So do this part as early as possible... -+ */ -+ switch (isp->otg.state) { -+ case OTG_STATE_B_HOST: -+ isp->otg.state = OTG_STATE_B_PERIPHERAL; -+ /* caller will suspend next */ -+ break; -+ case OTG_STATE_A_HOST: -+#if 0 -+ /* autoconnect mode avoids irq latency bugs */ -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, -+ MC1_BDIS_ACON_EN); -+#endif -+ /* caller must suspend then clear A_BUSREQ */ -+ usb_gadget_vbus_connect(isp->otg.gadget); -+ l = omap_readl(OTG_CTRL); -+ l |= OTG_A_SETB_HNPEN; -+ omap_writel(l, OTG_CTRL); -+ -+ break; -+ case OTG_STATE_A_PERIPHERAL: -+ /* initiated by B-Host suspend */ -+ break; -+ default: -+ return -EILSEQ; -+ } -+ pr_debug("otg: HNP %s, %06x ...\n", -+ state_name(isp), omap_readl(OTG_CTRL)); -+ check_state(isp, __func__); -+ return 0; -+#else -+ /* srp-only */ -+ return -EINVAL; -+#endif -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static int __init -+isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id) -+{ -+ int status; -+ struct isp1301 *isp; -+ -+ if (the_transceiver) -+ return 0; -+ -+ isp = kzalloc(sizeof *isp, GFP_KERNEL); -+ if (!isp) -+ return 0; -+ -+ INIT_WORK(&isp->work, isp1301_work); -+ init_timer(&isp->timer); -+ isp->timer.function = isp1301_timer; -+ isp->timer.data = (unsigned long) isp; -+ -+ i2c_set_clientdata(i2c, isp); -+ isp->client = i2c; -+ -+ /* verify the chip (shouldn't be necesary) */ -+ status = isp1301_get_u16(isp, ISP1301_VENDOR_ID); -+ if (status != I2C_VENDOR_ID_PHILIPS) { -+ dev_dbg(&i2c->dev, "not philips id: %d\n", status); -+ goto fail; -+ } -+ status = isp1301_get_u16(isp, ISP1301_PRODUCT_ID); -+ if (status != I2C_PRODUCT_ID_PHILIPS_1301) { -+ dev_dbg(&i2c->dev, "not isp1301, %d\n", status); -+ goto fail; -+ } -+ isp->i2c_release = i2c->dev.release; -+ i2c->dev.release = isp1301_release; -+ -+ /* initial development used chiprev 2.00 */ -+ status = i2c_smbus_read_word_data(i2c, ISP1301_BCD_DEVICE); -+ dev_info(&i2c->dev, "chiprev %x.%02x, driver " DRIVER_VERSION "\n", -+ status >> 8, status & 0xff); -+ -+ /* make like power-on reset */ -+ isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_MASK); -+ -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_BI_DI); -+ isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, ~MC2_BI_DI); -+ -+ isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, -+ OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN); -+ isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, -+ ~(OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN)); -+ -+ isp1301_clear_bits(isp, ISP1301_INTERRUPT_LATCH, ~0); -+ isp1301_clear_bits(isp, ISP1301_INTERRUPT_FALLING, ~0); -+ isp1301_clear_bits(isp, ISP1301_INTERRUPT_RISING, ~0); -+ -+#ifdef CONFIG_USB_OTG -+ status = otg_bind(isp); -+ if (status < 0) { -+ dev_dbg(&i2c->dev, "can't bind OTG\n"); -+ goto fail; -+ } -+#endif -+ -+ if (machine_is_omap_h2()) { -+ /* full speed signaling by default */ -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, -+ MC1_SPEED); -+ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, -+ MC2_SPD_SUSP_CTRL); -+ -+ /* IRQ wired at M14 */ -+ omap_cfg_reg(M14_1510_GPIO2); -+ if (gpio_request(2, "isp1301") == 0) -+ gpio_direction_input(2); -+ isp->irq_type = IRQF_TRIGGER_FALLING; -+ } -+ -+ isp->irq_type |= IRQF_SAMPLE_RANDOM; -+ status = request_irq(i2c->irq, isp1301_irq, -+ isp->irq_type, DRIVER_NAME, isp); -+ if (status < 0) { -+ dev_dbg(&i2c->dev, "can't get IRQ %d, err %d\n", -+ i2c->irq, status); -+ goto fail; -+ } -+ -+ isp->otg.dev = &i2c->dev; -+ isp->otg.label = DRIVER_NAME; -+ -+ isp->otg.set_host = isp1301_set_host, -+ isp->otg.set_peripheral = isp1301_set_peripheral, -+ isp->otg.set_power = isp1301_set_power, -+ isp->otg.start_srp = isp1301_start_srp, -+ isp->otg.start_hnp = isp1301_start_hnp, -+ -+ enable_vbus_draw(isp, 0); -+ power_down(isp); -+ the_transceiver = isp; -+ -+#ifdef CONFIG_USB_OTG -+ update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE)); -+ update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS)); -+#endif -+ -+ dump_regs(isp, __func__); -+ -+#ifdef VERBOSE -+ mod_timer(&isp->timer, jiffies + TIMER_JIFFIES); -+ dev_dbg(&i2c->dev, "scheduled timer, %d min\n", TIMER_MINUTES); -+#endif -+ -+ status = otg_set_transceiver(&isp->otg); -+ if (status < 0) -+ dev_err(&i2c->dev, "can't register transceiver, %d\n", -+ status); -+ -+ return 0; -+ -+fail: -+ kfree(isp); -+ return -ENODEV; -+} -+ -+static const struct i2c_device_id isp1301_id[] = { -+ { "isp1301_omap", 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, isp1301_id); -+ -+static struct i2c_driver isp1301_driver = { -+ .driver = { -+ .name = "isp1301_omap", -+ }, -+ .probe = isp1301_probe, -+ .remove = __exit_p(isp1301_remove), -+ .id_table = isp1301_id, -+}; -+ -+/*-------------------------------------------------------------------------*/ -+ -+static int __init isp_init(void) -+{ -+ return i2c_add_driver(&isp1301_driver); -+} -+module_init(isp_init); -+ -+static void __exit isp_exit(void) -+{ -+ if (the_transceiver) -+ otg_set_transceiver(NULL); -+ i2c_del_driver(&isp1301_driver); -+} -+module_exit(isp_exit); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/Kconfig linux-omap-2.6.28-nokia1/drivers/i2c/chips/Kconfig ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/Kconfig 2011-06-22 13:14:17.993067752 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/Kconfig 2011-06-22 13:19:32.663063277 +0200 -@@ -53,6 +53,13 @@ config SENSORS_EEPROM - This driver can also be built as a module. If so, the module - will be called eeprom. - -+config TPA6130A2 -+ tristate "TPA6130a2 headphone amplifier support" -+ depends on I2C -+ help -+ Say yes here to enable support for Texas Instruments TPA6130A2 -+ headphone amplifier. -+ - config SENSORS_PCF8574 - tristate "Philips PCF8574 and PCF8574A (DEPRECATED)" - depends on EXPERIMENTAL && GPIO_PCF857X = "n" -@@ -198,6 +205,12 @@ config LP5521 - If you say yes here you get support for the National Semiconductor - LP5521 LED driver. - -+config LIS302DL -+ tristate "STMicroelectronics LIS302DL Acceleration Sensor Driver" -+ depends on I2C -+ help -+ Say yes here if you want support for the LIS302DL accelerometer chip -+ - config MENELAUS - bool "TWL92330/Menelaus PM chip" - depends on I2C=y && ARCH_OMAP24XX -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/lis302dl.c linux-omap-2.6.28-nokia1/drivers/i2c/chips/lis302dl.c ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/lis302dl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/lis302dl.c 2011-06-22 13:19:32.663063277 +0200 -@@ -0,0 +1,863 @@ -+/* -+ * drivers/i2c/chips/lis302dl.c -+ * Driver for STMicroelectronics LIS302DL acceleration sensor -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Written by Henrik Saari -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRIVER_NAME "lis302dl" -+ -+#define LIS302_WHOAMI 0x0f -+#define LIS302_CTRL_1 0x20 -+# define LIS302_CTRL1_DR (1 << 7) -+# define LIS302_CTRL1_PD (1 << 6) -+# define LIS302_CTRL1_FS (1 << 5) -+# define LIS302_CTRL1_STP (1 << 4) -+# define LIS302_CTRL1_STM (1 << 3) -+# define LIS302_CTRL1_Z (1 << 2) -+# define LIS302_CTRL1_Y (1 << 1) -+# define LIS302_CTRL1_X (1 << 0) -+#define LIS302_CTRL_2 0x21 -+#define LIS302_CTRL_3 0x22 -+# define LIS302_CTRL3_GND 0x00 -+# define LIS302_CTRL3_FF_WU_1 0x01 -+# define LIS302_CTRL3_FF_WU_2 0x02 -+# define LIS302_CTRL3_FF_WU_12 0x03 -+# define LIS302_CTRL3_DATA_RDY 0x04 -+# define LIS302_CTRL3_CLICK 0x07 -+#define LIS302_HP_FILTER_RESET 0x23 -+#define LIS302_STATUS_REG 0x27 -+#define LIS302_X 0x29 -+#define LIS302_Y 0x2b -+#define LIS302_Z 0x2d -+#define LIS302_FF_WU_CFG_1 0x30 -+ -+/* configurable interrupt events */ -+#define LIS302_X_LOW (1 << 0) -+#define LIS302_X_HIGH (1 << 1) -+#define LIS302_Y_LOW (1 << 2) -+#define LIS302_Y_HIGH (1 << 3) -+#define LIS302_Z_LOW (1 << 4) -+#define LIS302_Z_HIGH (1 << 5) -+#define LIS302_LIR (1 << 6) -+#define LIS302_AOI (1 << 7) -+ -+#define LIS302_FF_WU_SRC_1 0x31 -+#define LIS302_FF_THS_1 0x32 -+#define LIS302_FF_WU_DURATION_1 0x33 -+#define LIS302_FF_WU_CFG_2 0x34 -+#define LIS302_FF_WU_SRC_2 0x34 -+#define LIS302_FF_THS_2 0x35 -+#define LIS302_FF_WU_DURATION_2 0x37 -+ -+/* Default values */ -+#define LIS302_THS 810 /* mg */ -+#define LIS302_DURATION 500 /* ms */ -+#define LIS302_400HZ 1 /* sample rate 400Hz */ -+#define LIS302_100HZ 0 /* sample rate 100Hz */ -+#define LIS302_FS 0 /* full scale 0 / 1 */ -+#define LIS302_SAMPLES 1 -+#define LIS302_SMALL_UNIT 18 /* Typical value 18 mg/digit */ -+#define LIS302_BIG_UNIT 72 /* Typical value 72 mg/digit */ -+#define LIS302_TURN_ON_TIME 3000 /* Turn on time 3000ms / data rate */ -+ -+#define LIS302_POWEROFF_DELAY (5 * HZ) -+ -+/* A lis302dl chip will contain this value in LIS302_WHOAMI register */ -+#define LIS302_WHOAMI_VALUE 0x3b -+#define LIS302_IRQ_FLAGS (IRQF_TRIGGER_RISING | IRQF_SAMPLE_RANDOM) -+ -+struct lis302dl_chip { -+ struct mutex lock; -+ struct i2c_client *client; -+ struct work_struct work1, work2; -+ struct delayed_work poweroff_work; -+ int irq1, irq2; -+ uint8_t power; -+ int threshold; -+ int duration; -+ uint8_t sample_rate; -+ uint8_t fs; -+ unsigned int samples; -+}; -+ -+static inline s32 lis302dl_write(struct i2c_client *c, int reg, u8 value) -+{ -+ return i2c_smbus_write_byte_data(c, reg, value); -+} -+ -+static inline s32 lis302dl_read(struct i2c_client *c, int reg) -+{ -+ return i2c_smbus_read_byte_data(c, reg); -+} -+ -+/* -+ * Detect LIS302DL chip. Return value is zero if -+ * chip detected, otherwise a negative error code. -+ */ -+static int lis302dl_detect(struct i2c_client *c) -+{ -+ int r; -+ -+ r = lis302dl_read(c, LIS302_WHOAMI); -+ if (r < 0) -+ return r; -+ -+ if (r != LIS302_WHOAMI_VALUE) -+ return -ENODEV; -+ -+ return 0; -+} -+ -+static inline u8 intmode(int pin, u8 mode) -+{ -+ if (pin == 1) -+ return mode; -+ if (pin == 2) -+ return (mode << 3); -+ -+ return 0; -+} -+ -+static int lis302dl_configure(struct i2c_client *c) -+{ -+ -+ struct lis302dl_chip *lis = i2c_get_clientdata(c); -+ int ts = 0, ret; -+ u8 duration, r = 0; -+ -+ /* REG 1*/ -+ /* Controls power, scale, data rate, and enabled axis */ -+ r |= lis->sample_rate ? LIS302_CTRL1_DR : 0; -+ r |= lis->fs ? LIS302_CTRL1_FS : 0; -+ r |= LIS302_CTRL1_PD | LIS302_CTRL1_X | LIS302_CTRL1_Y | LIS302_CTRL1_Z; -+ ret = lis302dl_write(c, LIS302_CTRL_1, r); -+ if (ret < 0) -+ goto out; -+ -+ /* REG 2 */ -+ /* Control High Pass filter selection. not used */ -+ -+ /* REG 3 -+ * Interrupt CTRL register. One interrupt pin is used for -+ * inertial wakeup -+ */ -+ r = intmode(1, LIS302_CTRL3_FF_WU_1) | intmode(2, LIS302_CTRL3_GND); -+ ret = lis302dl_write(c, LIS302_CTRL_3, r); -+ if (ret < 0) -+ goto out; -+ -+ /* Configure interrupt pin thresholds */ -+ ts = lis->threshold / (lis->fs ? LIS302_BIG_UNIT : LIS302_SMALL_UNIT); -+ ts &= 0x7f; -+ duration = lis->duration / (lis->sample_rate ? 40 : 10); -+ -+ ret = lis302dl_write(c, LIS302_FF_THS_1, ts); -+ if (ret < 0) -+ goto out; -+ ret = lis302dl_write(c, LIS302_FF_WU_DURATION_1, duration); -+ if (ret < 0) -+ goto out; -+ /* Enable interrupt wakeup on x and y axis */ -+ ret = lis302dl_write(c, LIS302_FF_WU_CFG_1, -+ (LIS302_X_HIGH | LIS302_Y_HIGH)); -+ if (ret < 0) -+ goto out; -+ out: -+ return ret; -+} -+ -+static inline void lis302dl_print_event(struct device *dev, u8 event) -+{ -+ if (event & 0x01) -+ dev_dbg(dev, "X Low event\n"); -+ if (event & 0x02) -+ dev_dbg(dev, "X High event\n"); -+ if (event & 0x04) -+ dev_dbg(dev, "Y Low event\n"); -+ if (event & 0x08) -+ dev_dbg(dev, "Y High event\n"); -+ if (event & 0x10) -+ dev_dbg(dev, "Z Low event\n"); -+ if (event & 0x20) -+ dev_dbg(dev, "Z High event\n"); -+} -+ -+/* Interrupt handler bottom halves. */ -+static void lis302dl_work1(struct work_struct *work) -+{ -+ struct lis302dl_chip *chip = -+ container_of(work, struct lis302dl_chip, work1); -+ u8 reg; -+ -+ mutex_lock(&chip->lock); -+ /* ack the interrupt */ -+ reg = lis302dl_read(chip->client, LIS302_FF_WU_SRC_1); -+ mutex_unlock(&chip->lock); -+ sysfs_notify(&chip->client->dev.kobj, NULL, "coord"); -+ lis302dl_print_event(&chip->client->dev, reg); -+} -+ -+static void lis302dl_work2(struct work_struct *work) -+{ -+ struct lis302dl_chip *chip = -+ container_of(work, struct lis302dl_chip, work2); -+ u8 reg; -+ -+ mutex_lock(&chip->lock); -+ /* ack the interrupt */ -+ reg = lis302dl_read(chip->client, LIS302_FF_WU_SRC_2); -+ mutex_unlock(&chip->lock); -+ lis302dl_print_event(&chip->client->dev, reg); -+} -+ -+/* -+ * We cannot use I2C in interrupt context, so we just schedule work. -+ */ -+static irqreturn_t lis302dl_irq1(int irq, void *_chip) -+{ -+ struct lis302dl_chip *chip = _chip; -+ schedule_work(&chip->work1); -+ -+ return IRQ_HANDLED; -+} -+ -+static irqreturn_t lis302dl_irq2(int irq, void *_chip) -+{ -+ return IRQ_HANDLED; -+} -+ -+/* duration depends on chips data rate */ -+static void set_duration(struct i2c_client *c, int dr, int msec) -+{ -+ u8 duration; -+ if (dr) -+ /* 400 Hz data rate max duration is 637.5 ms */ -+ if (msec > 637) -+ duration = 0xff; -+ else -+ duration = (msec / 10) * 4; -+ else -+ duration = msec / 10; -+ lis302dl_write(c, LIS302_FF_WU_DURATION_1, duration); -+} -+ -+static void set_ths(struct i2c_client *c, int full_scale, int ths) -+{ -+ u8 threshold; -+ -+ if (full_scale) -+ threshold = ths / LIS302_BIG_UNIT; -+ else -+ /* max threshold is 2286 mg when normal scale is used*/ -+ if (ths > (127 * LIS302_SMALL_UNIT)) -+ threshold = 0x7f; -+ else -+ threshold = ths / LIS302_SMALL_UNIT; -+ -+ threshold &= 0x7f; -+ lis302dl_write(c, LIS302_FF_THS_1, threshold); -+} -+ -+static int lis302dl_power(struct lis302dl_chip *chip, int on) -+{ -+ u8 reg, regwant; -+ int result, delay; -+ -+ reg = lis302dl_read(chip->client, LIS302_CTRL_1); -+ if (on) -+ regwant = reg | LIS302_CTRL1_PD; -+ else -+ regwant = reg & ~LIS302_CTRL1_PD; -+ -+ /* Avoid unnecessary writes */ -+ if (reg == regwant) -+ return 0; -+ -+ result = lis302dl_write(chip->client, LIS302_CTRL_1, regwant); -+ -+ /* turn on time delay depends on data rate */ -+ if (on) { -+ delay = (chip->sample_rate ? (LIS302_TURN_ON_TIME / 400) : -+ (LIS302_TURN_ON_TIME / 100)) + 1; -+ msleep(delay); -+ } -+ if (!result) -+ chip->power = !!on; -+ -+ return !!result; -+} -+ -+static void lis302dl_poweroff_work(struct work_struct *work) -+{ -+ struct lis302dl_chip *chip = -+ container_of(work, struct lis302dl_chip, poweroff_work.work); -+ mutex_lock(&chip->lock); -+ lis302dl_power(chip, 0); -+ mutex_unlock(&chip->lock); -+} -+ -+static int lis302dl_selftest(struct lis302dl_chip *chip) -+{ -+ u8 reg; -+ s8 x, y, z; -+ s8 powerbit; -+ -+ reg = lis302dl_read(chip->client, LIS302_CTRL_1); -+ powerbit = reg & LIS302_CTRL1_PD; -+ reg |= LIS302_CTRL1_PD; -+ lis302dl_write(chip->client, LIS302_CTRL_1, (reg | LIS302_CTRL1_STP)); -+ msleep(30); -+ x = (s8)lis302dl_read(chip->client, LIS302_X); -+ y = (s8)lis302dl_read(chip->client, LIS302_Y); -+ z = (s8)lis302dl_read(chip->client, LIS302_Z); -+ /* back to normal settings */ -+ lis302dl_write(chip->client, LIS302_CTRL_1, reg); -+ msleep(30); -+ x -= (s8)lis302dl_read(chip->client, LIS302_X); -+ y -= (s8)lis302dl_read(chip->client, LIS302_Y); -+ z -= (s8)lis302dl_read(chip->client, LIS302_Z); -+ -+ /* Return to passive state if we were in it. */ -+ if (!powerbit) -+ lis302dl_write(chip->client, -+ LIS302_CTRL_1, -+ reg & ~LIS302_CTRL1_PD); -+ -+ /* Now check that delta is within specified range for each axis */ -+ if (x < -32 || x > -3) -+ return -1; -+ if (y < 3 || y > 32) -+ return -1; -+ if (z < 3 || z > 32) -+ return -1; -+ -+ /* test passed */ -+ return 0; -+} -+ -+/******************************************************************************* -+ * SYSFS * -+ ******************************************************************************/ -+ -+static ssize_t lis302dl_show_power(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ int val; -+ int ret; -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ val = lis302dl_read(chip->client, LIS302_CTRL_1); -+ if (val >= 0) -+ if (val & LIS302_CTRL1_PD) -+ ret = snprintf(buf, PAGE_SIZE, "on\n"); -+ else -+ ret = snprintf(buf, PAGE_SIZE, "off\n"); -+ else -+ ret = val; -+ mutex_unlock(&chip->lock); -+ return ret; -+} -+ -+static ssize_t lis302dl_set_power(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ -+ if (!strcmp(buf, "on\n")) -+ lis302dl_power(chip, 1); -+ else if (!strcmp(buf, "off\n")) -+ lis302dl_power(chip, 0); -+ -+ mutex_unlock(&chip->lock); -+ -+ return len; -+} -+ -+static ssize_t lis302dl_show_rate(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ u8 val; -+ int ret; -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ val = lis302dl_read(chip->client, LIS302_CTRL_1); -+ ret = snprintf(buf, PAGE_SIZE, "%d\n", -+ (val & LIS302_CTRL1_DR) ? 400 : 100); -+ mutex_unlock(&chip->lock); -+ return ret; -+} -+ -+static ssize_t lis302dl_set_rate(struct device *dev, -+ struct device_attribute *attr, const char *buf, -+ size_t len) -+{ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ u8 reg; -+ -+ mutex_lock(&chip->lock); -+ reg = lis302dl_read(chip->client, LIS302_CTRL_1); -+ if (!strcmp(buf, "400\n")) { -+ reg |= LIS302_CTRL1_DR; -+ chip->sample_rate = 1; -+ lis302dl_write(chip->client, LIS302_CTRL_1, reg); -+ set_duration(chip->client, chip->sample_rate, chip->duration); -+ } else if (!strcmp(buf, "100\n")) { -+ reg &= ~LIS302_CTRL1_DR; -+ chip->sample_rate = 0; -+ lis302dl_write(chip->client, LIS302_CTRL_1, reg); -+ set_duration(chip->client, chip->sample_rate, chip->duration); -+ -+ } -+ mutex_unlock(&chip->lock); -+ -+ return len; -+} -+ -+static ssize_t lis302dl_show_scale(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ int val, ret; -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ val = lis302dl_read(chip->client, LIS302_CTRL_1); -+ -+ if (val >= 0) -+ if (val & LIS302_CTRL1_FS) -+ ret = snprintf(buf, PAGE_SIZE, "full\n"); -+ else -+ ret = snprintf(buf, PAGE_SIZE, "normal\n"); -+ else -+ ret = val; -+ mutex_unlock(&chip->lock); -+ -+ return ret; -+} -+ -+static ssize_t lis302dl_set_scale(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ u8 reg; -+ -+ mutex_lock(&chip->lock); -+ reg = lis302dl_read(chip->client, LIS302_CTRL_1); -+ -+ if (!strcmp(buf, "full\n")) { -+ reg |= LIS302_CTRL1_FS; -+ chip->fs = 1; -+ lis302dl_write(chip->client, LIS302_CTRL_1, reg); -+ set_ths(chip->client, chip->fs, chip->threshold); -+ } else if (!strcmp(buf, "normal\n")) { -+ reg &= ~LIS302_CTRL1_FS; -+ chip->fs = 0; -+ lis302dl_write(chip->client, LIS302_CTRL_1, reg); -+ set_ths(chip->client, chip->fs, chip->threshold); -+ } -+ mutex_unlock(&chip->lock); -+ -+ return len; -+} -+ -+static ssize_t lis302dl_show_duration(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ int val; -+ int ret; -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ val = lis302dl_read(chip->client, LIS302_FF_WU_DURATION_1); -+ -+ if (val >= 0) -+ ret = snprintf(buf, PAGE_SIZE, "%d ms\n", -+ chip->sample_rate ? (val * 10 / 4) : (val * 10)); -+ else -+ ret = val; -+ mutex_unlock(&chip->lock); -+ return ret; -+} -+ -+static ssize_t lis302dl_set_duration(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ unsigned long duration; -+ int ret; -+ -+ ret = strict_strtoul(buf, 0, &duration); -+ if (ret || duration < 0) -+ return -EINVAL; -+ mutex_lock(&chip->lock); -+ /* max duration is 2.55 s when data rate is 100Hz */ -+ if (duration > 2550) -+ duration = 2550; -+ set_duration(chip->client, chip->sample_rate, duration); -+ chip->duration = duration; -+ mutex_unlock(&chip->lock); -+ return len; -+} -+ -+static ssize_t lis302dl_show_ths(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ int val, ret; -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ val = lis302dl_read(chip->client, LIS302_FF_THS_1); -+ -+ if (val >= 0) -+ ret = snprintf(buf, PAGE_SIZE, "%d mg\n", val * (chip->fs ? -+ LIS302_BIG_UNIT : LIS302_SMALL_UNIT)); -+ else -+ ret = val; -+ mutex_unlock(&chip->lock); -+ return ret; -+} -+ -+static ssize_t lis302dl_set_ths(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ unsigned long ths; -+ int ret; -+ -+ ret = strict_strtoul(buf, 0, &ths); -+ if (ret) -+ return -EINVAL; -+ mutex_lock(&chip->lock); -+ chip->threshold = ths; -+ set_ths(chip->client, chip->fs, chip->threshold); -+ mutex_unlock(&chip->lock); -+ -+ return len; -+} -+ -+static ssize_t lis302dl_show_samples(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ int ret; -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ ret = snprintf(buf, PAGE_SIZE, "%d\n", chip->samples); -+ -+ mutex_unlock(&chip->lock); -+ return ret; -+} -+ -+static ssize_t lis302dl_set_samples(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ unsigned long samples; -+ int ret; -+ -+ ret = strict_strtoul(buf, 0, &samples); -+ if (ret || samples < 1) -+ return -EINVAL; -+ -+ mutex_lock(&chip->lock); -+ chip->samples = samples; -+ mutex_unlock(&chip->lock); -+ -+ return len; -+} -+ -+static ssize_t lis302dl_show_coord(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ int ret, i; -+ int x, y, z; -+ -+ x = y = z = 0; -+ -+ /* Cannot cancel synchronously within the mutex */ -+ cancel_delayed_work_sync(&chip->poweroff_work); -+ -+ mutex_lock(&chip->lock); -+ -+ if (!chip->power) -+ ret = lis302dl_power(chip, 1); -+ -+ for (i = 0; i < chip->samples; i++) { -+ x += (s8)lis302dl_read(chip->client, LIS302_X); -+ y += (s8)lis302dl_read(chip->client, LIS302_Y); -+ z += (s8)lis302dl_read(chip->client, LIS302_Z); -+ } -+ x /= (int)chip->samples; -+ y /= (int)chip->samples; -+ z /= (int)chip->samples; -+ -+ /* convert to mg */ -+ x *= (chip->fs ? LIS302_BIG_UNIT : LIS302_SMALL_UNIT); -+ y *= (chip->fs ? LIS302_BIG_UNIT : LIS302_SMALL_UNIT); -+ z *= (chip->fs ? LIS302_BIG_UNIT : LIS302_SMALL_UNIT); -+ ret = snprintf(buf, PAGE_SIZE, "%d %d %d\n", x, y, z); -+ mutex_unlock(&chip->lock); -+ -+ schedule_delayed_work(&chip->poweroff_work, LIS302_POWEROFF_DELAY); -+ -+ return ret; -+} -+ -+static ssize_t lis302dl_show_selftest(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ int ret; -+ -+ struct lis302dl_chip *chip = dev_get_drvdata(dev); -+ -+ mutex_lock(&chip->lock); -+ if (!lis302dl_selftest(chip)) -+ ret = snprintf(buf, PAGE_SIZE, "OK\n"); -+ else -+ ret = snprintf(buf, PAGE_SIZE, "FAIL\n"); -+ mutex_unlock(&chip->lock); -+ -+ return ret; -+} -+ -+static struct device_attribute lis302dl_attrs[] = { -+ __ATTR(enable, S_IRUGO|S_IWUSR, -+ lis302dl_show_power, lis302dl_set_power), -+ __ATTR(rate, S_IRUGO|S_IWUSR, -+ lis302dl_show_rate, lis302dl_set_rate), -+ __ATTR(scale, S_IRUGO|S_IWUSR, -+ lis302dl_show_scale, lis302dl_set_scale), -+ __ATTR(duration, S_IRUGO|S_IWUSR, -+ lis302dl_show_duration, lis302dl_set_duration), -+ __ATTR(ths, S_IRUGO|S_IWUSR, -+ lis302dl_show_ths, lis302dl_set_ths), -+ __ATTR(samples, S_IRUGO|S_IWUSR, -+ lis302dl_show_samples, lis302dl_set_samples), -+ __ATTR(coord, S_IRUGO|S_IWUSR, -+ lis302dl_show_coord, NULL), -+ __ATTR(selftest, S_IRUGO|S_IWUSR, -+ lis302dl_show_selftest, NULL), -+}; -+ -+static int lis302dl_register_sysfs(struct i2c_client *c) -+{ -+ struct device *d = &c->dev; -+ int r, i; -+ -+ for (i = 0; i < ARRAY_SIZE(lis302dl_attrs); i++) { -+ r = device_create_file(d, &lis302dl_attrs[i]); -+ if (r) -+ goto fail; -+ } -+ return 0; -+fail: -+ while (i--) -+ device_remove_file(d, &lis302dl_attrs[i]); -+ -+ return r; -+} -+ -+static void lis302dl_unregister_sysfs(struct i2c_client *c) -+{ -+ struct device *d = &c->dev; -+ int i; -+ -+ for (i = ARRAY_SIZE(lis302dl_attrs) - 1; i >= 0; i--) -+ device_remove_file(d, &lis302dl_attrs[i]); -+} -+ -+/******************************************************************************* -+ * INIT -+ ******************************************************************************/ -+static struct i2c_driver lis302dl_i2c_driver; -+ -+static int lis302dl_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct lis302dl_chip *lis; -+ struct lis302dl_platform_data *pdata = client->dev.platform_data; -+ int err = 0; -+ -+ if (!pdata) { -+ dev_dbg(&client->dev, "no platform data?\n"); -+ return -EINVAL; -+ } -+ lis = kzalloc(sizeof(struct lis302dl_chip), GFP_KERNEL); -+ if (!lis) -+ return -ENOMEM; -+ -+ i2c_set_clientdata(client, lis); -+ lis->client = client; -+ -+ err = lis302dl_detect(client); -+ if (err) -+ goto fail2; -+ -+ /* default startup values */ -+ lis->power = 1; -+ lis->threshold = LIS302_THS; -+ lis->duration = LIS302_DURATION; -+ lis->fs = LIS302_FS; -+ lis->sample_rate = LIS302_100HZ; -+ lis->samples = LIS302_SAMPLES; -+ -+ mutex_init(&lis->lock); -+ -+ err = lis302dl_configure(client); -+ if (err < 0) { -+ dev_err(&client->dev, "lis302dl error configuring chip\n"); -+ goto fail2; -+ } -+ -+ err = lis302dl_register_sysfs(client); -+ if (err) { -+ printk(KERN_ALERT -+ "lis302dl: sysfs registration failed, error %d\n", err); -+ goto fail2; -+ } -+ -+ lis->irq1 = pdata->int1_gpio; -+ lis->irq2 = pdata->int2_gpio; -+ -+ /* gpio for interrupt pin 1 */ -+ err = gpio_request(lis->irq1, "lis302dl_irq1"); -+ if (err) { -+ printk(KERN_ALERT "lis302dl: cannot request gpio for int 1\n"); -+ goto fail2; -+ } -+ gpio_direction_input(lis->irq1); -+ INIT_WORK(&lis->work1, lis302dl_work1); -+ INIT_DELAYED_WORK(&lis->poweroff_work, lis302dl_poweroff_work); -+ -+ err = request_irq(gpio_to_irq(lis->irq1), lis302dl_irq1, -+ LIS302_IRQ_FLAGS, DRIVER_NAME, lis); -+ if (err) { -+ dev_err(&client->dev, "could not get IRQ_1 = %d\n", -+ gpio_to_irq(lis->irq1)); -+ goto fail3; -+ } -+ schedule_delayed_work(&lis->poweroff_work, LIS302_POWEROFF_DELAY); -+ -+ return 0; -+ -+ fail3: -+ free_irq(gpio_to_irq(lis->irq1), lis); -+ gpio_free(lis->irq1); -+ fail2: -+ -+ kfree(lis); -+ return err; -+} -+ -+static int lis302dl_remove(struct i2c_client *client) -+{ -+ struct lis302dl_chip *chip = i2c_get_clientdata(client); -+ -+ lis302dl_unregister_sysfs(client); -+ free_irq(gpio_to_irq(chip->irq1), chip); -+ gpio_free(chip->irq1); -+ kfree(chip); -+ -+ return 0; -+} -+ -+static int lis302dl_suspend(struct i2c_client *client, pm_message_t state) -+{ -+ struct lis302dl_chip *chip = i2c_get_clientdata(client); -+ int ret; -+ -+ mutex_lock(&chip->lock); -+ ret = lis302dl_power(chip, 0); -+ mutex_unlock(&chip->lock); -+ -+ return ret; -+} -+ -+static int lis302dl_resume(struct i2c_client *client) -+{ -+ struct lis302dl_chip *chip = i2c_get_clientdata(client); -+ int ret; -+ -+ mutex_lock(&chip->lock); -+ ret = lis302dl_power(chip, 1); -+ mutex_unlock(&chip->lock); -+ -+ return ret; -+} -+ -+static const struct i2c_device_id lis302dl_id[] = { -+ { "lis302dl", 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, lis302dl_id); -+ -+static struct i2c_driver lis302dl_i2c_driver = { -+ .driver = { -+ .name = DRIVER_NAME, -+ }, -+ .suspend = lis302dl_suspend, -+ .resume = lis302dl_resume, -+ .probe = lis302dl_probe, -+ .remove = lis302dl_remove, -+ .id_table = lis302dl_id, -+}; -+ -+static int __init lis302dl_init(void) -+{ -+ int ret; -+ -+ ret = i2c_add_driver(&lis302dl_i2c_driver); -+ if (ret < 0) -+ printk(KERN_ALERT "lis302dl driver registration failed\n"); -+ -+ return ret; -+} -+ -+static void __exit lis302dl_exit(void) -+{ -+ i2c_del_driver(&lis302dl_i2c_driver); -+} -+ -+MODULE_AUTHOR("Nokia Corporation"); -+MODULE_DESCRIPTION("LIS302DL acceleration sensor driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(lis302dl_init); -+module_exit(lis302dl_exit); -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/Makefile linux-omap-2.6.28-nokia1/drivers/i2c/chips/Makefile ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/Makefile 2011-06-22 13:14:17.993067752 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/Makefile 2011-06-22 13:19:32.663063277 +0200 -@@ -13,6 +13,7 @@ - obj-$(CONFIG_DS1682) += ds1682.o - obj-$(CONFIG_AT24) += at24.o - obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o -+obj-$(CONFIG_TPA6130A2) += tpa6130a2.o - obj-$(CONFIG_SENSORS_MAX6875) += max6875.o - obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o - obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o -@@ -21,11 +22,13 @@ obj-$(CONFIG_SENSORS_PCF8591) += pcf8591 - obj-$(CONFIG_TPS65010) += tps65010.o - obj-$(CONFIG_MENELAUS) += menelaus.o - obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o -+obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o - obj-$(CONFIG_MCU_MPC8349EMITX) += mcu_mpc8349emitx.o - obj-$(CONFIG_TWL4030_POWEROFF) += twl4030-poweroff.o - obj-$(CONFIG_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o - obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o - obj-$(CONFIG_RTC_X1205_I2C) += x1205.o -+obj-$(CONFIG_LIS302DL) += lis302dl.o - obj-$(CONFIG_LP5521) += lp5521.o - - ifeq ($(CONFIG_I2C_DEBUG_CHIP),y) -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/tpa6130a2.c linux-omap-2.6.28-nokia1/drivers/i2c/chips/tpa6130a2.c ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/tpa6130a2.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/tpa6130a2.c 2011-06-22 13:19:32.663063277 +0200 -@@ -0,0 +1,497 @@ -+/* -+ * drivers/i2c/chips/tpa6130a2.c -+ * -+ * Simple driver to modify TPA6130A2 amplifier chip gain levels trough -+ * sysfs interface. -+ * -+ * Copyright (C) Nokia Corporation -+ * -+ * Written by Timo Kokkonen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define TPA6130A2_REG_ENABLE 0x1 -+#define TPA6130A2_REG_VOLUME 0x2 -+#define TPA6130A2_REG_HI_Z 0x3 -+#define TPA6130A2_REG_VERSION 0x4 -+#define TPA6130A2_REGS 4 -+ -+#define TPA6130A2_MASK_CHANNEL (3 << 6) -+#define TPA6130A2_MASK_VOLUME 0x3f -+#define TPA6130A2_MASK_HI_Z 0x03 -+#define TPA6130A2_SWS 0x01 -+ -+#define TPA6130A2_CHANNEL_LEFT (1 << 7) -+#define TPA6130A2_CHANNEL_RIGHT (1 << 6) -+ -+struct i2c_client *tpa6130a2_client; -+static long int initialized; -+ -+/* This struct is used to save the context */ -+struct tpa6130a2_data { -+ struct mutex mutex; -+ unsigned char regs[TPA6130A2_REGS]; -+ unsigned char power_state; -+ int (*set_power)(int state); -+}; -+ -+static int tpa6130a2_read(int reg) -+{ -+ struct tpa6130a2_data *data; -+ int val; -+ -+ BUG_ON(tpa6130a2_client == NULL); -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ /* If powered off, return the cached value */ -+ if (data->power_state) { -+ val = i2c_smbus_read_byte_data(tpa6130a2_client, reg); -+ if (val < 0) -+ dev_err(&tpa6130a2_client->dev, "Read failed\n"); -+ else -+ data->regs[reg - 1] = val; -+ } else { -+ val = data->regs[reg - 1]; -+ } -+ -+ return val; -+} -+ -+static int tpa6130a2_write(int reg, u8 value) -+{ -+ struct tpa6130a2_data *data; -+ int val = 0; -+ -+ BUG_ON(tpa6130a2_client == NULL); -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ if (data->power_state) { -+ val = i2c_smbus_write_byte_data(tpa6130a2_client, reg, value); -+ if (val < 0) -+ dev_err(&tpa6130a2_client->dev, "Write failed\n"); -+ } -+ -+ /* Either powered on or off, we save the context */ -+ data->regs[reg - 1] = value; -+ -+ return val; -+} -+ -+/* Control interface */ -+static int tpa6130a2_get_mute(void) -+{ -+ struct tpa6130a2_data *data; -+ int ret; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ ret = tpa6130a2_read(TPA6130A2_REG_VOLUME) & TPA6130A2_MASK_CHANNEL; -+ mutex_unlock(&data->mutex); -+ -+ return ret; -+} -+ -+static void tpa6130a2_set_mute(int channel) -+{ -+ struct tpa6130a2_data *data; -+ int val; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ val = tpa6130a2_read(TPA6130A2_REG_VOLUME) & ~TPA6130A2_MASK_CHANNEL; -+ val |= channel & TPA6130A2_MASK_CHANNEL; -+ -+ tpa6130a2_write(TPA6130A2_REG_VOLUME, val); -+ mutex_unlock(&data->mutex); -+} -+ -+static int tpa6130a2_get_hp_en(void) -+{ -+ struct tpa6130a2_data *data; -+ int ret; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ ret = tpa6130a2_read(TPA6130A2_REG_ENABLE) & TPA6130A2_MASK_CHANNEL; -+ mutex_unlock(&data->mutex); -+ -+ return ret; -+} -+ -+static void tpa6130a2_set_hp_en(int channel) -+{ -+ struct tpa6130a2_data *data; -+ int val; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ val = tpa6130a2_read(TPA6130A2_REG_ENABLE) & ~TPA6130A2_MASK_CHANNEL; -+ val |= channel & TPA6130A2_MASK_CHANNEL; -+ -+ if (channel) -+ val &= ~TPA6130A2_SWS; -+ else -+ val |= TPA6130A2_SWS; -+ -+ tpa6130a2_write(TPA6130A2_REG_ENABLE, val); -+ mutex_unlock(&data->mutex); -+} -+ -+static int tpa6130a2_get_hi_z(void) -+{ -+ struct tpa6130a2_data *data; -+ int ret; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ ret = (tpa6130a2_read(TPA6130A2_REG_HI_Z) & TPA6130A2_MASK_HI_Z) << 6; -+ mutex_unlock(&data->mutex); -+ -+ return ret; -+} -+ -+static void tpa6130a2_set_hi_z(int channel) -+{ -+ struct tpa6130a2_data *data; -+ int val; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ val = tpa6130a2_read(TPA6130A2_REG_HI_Z) & ~TPA6130A2_MASK_HI_Z; -+ val |= (channel & TPA6130A2_MASK_CHANNEL) >> 6; -+ tpa6130a2_write(TPA6130A2_REG_HI_Z, val); -+ mutex_unlock(&data->mutex); -+} -+ -+int tpa6130a2_get_volume(void) -+{ -+ struct tpa6130a2_data *data; -+ int vol; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ vol = tpa6130a2_read(TPA6130A2_REG_VOLUME); -+ mutex_unlock(&data->mutex); -+ vol &= TPA6130A2_MASK_VOLUME; -+ -+ return vol; -+} -+ -+int tpa6130a2_set_volume(int vol) -+{ -+ struct tpa6130a2_data *data; -+ int ret; -+ -+ if (vol < 0) -+ vol = 0; -+ if (vol > 0x3f) -+ vol = 0x3f; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ vol |= tpa6130a2_read(TPA6130A2_REG_VOLUME) & ~TPA6130A2_MASK_VOLUME; -+ ret = tpa6130a2_write(TPA6130A2_REG_VOLUME, vol); -+ mutex_unlock(&data->mutex); -+ -+ return ret; -+} -+ -+static void tpa6130a2_power_on(void) -+{ -+ struct tpa6130a2_data *data; -+ int i; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ data->set_power(1); -+ data->power_state = 1; -+ -+ /* Rewrite all except the read only register */ -+ for (i = TPA6130A2_REG_ENABLE; i < TPA6130A2_REGS; i++) -+ tpa6130a2_write(i, data->regs[i - 1]); -+ mutex_unlock(&data->mutex); -+} -+ -+static void tpa6130a2_power_off(void) -+{ -+ struct tpa6130a2_data *data; -+ -+ data = i2c_get_clientdata(tpa6130a2_client); -+ -+ mutex_lock(&data->mutex); -+ data->power_state = 0; -+ data->set_power(0); -+ mutex_unlock(&data->mutex); -+} -+ -+void tpa6130a2_set_enabled(int enabled) -+{ -+ BUG_ON(tpa6130a2_client == NULL); -+ -+ if (enabled) { -+ tpa6130a2_set_hp_en(TPA6130A2_CHANNEL_LEFT | -+ TPA6130A2_CHANNEL_RIGHT); -+ tpa6130a2_power_on(); -+ } else { -+ /* Disable the HPs prior to powering down the chip */ -+ tpa6130a2_set_hp_en(0); -+ tpa6130a2_power_off(); -+ } -+} -+ -+/* Sysfs interface */ -+#define tpa6130a2_sys_property(name) \ -+static ssize_t tpa6130a2_##name##_show(struct device *dev, \ -+ struct device_attribute *attr, char *buf) \ -+{ \ -+ int val = tpa6130a2_get_##name(); \ -+ \ -+ return snprintf(buf, PAGE_SIZE, "%c%c\n", \ -+ (val & TPA6130A2_CHANNEL_LEFT) ? 'l' : ' ', \ -+ (val & TPA6130A2_CHANNEL_RIGHT) ? 'r' : ' '); \ -+} \ -+ \ -+static ssize_t tpa6130a2_##name##_store(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, size_t len) \ -+{ \ -+ int val = 0; \ -+ \ -+ if (strpbrk(buf, "lL") != NULL) \ -+ val |= TPA6130A2_CHANNEL_LEFT; \ -+ if (strpbrk(buf, "rR") != NULL) \ -+ val |= TPA6130A2_CHANNEL_RIGHT; \ -+ tpa6130a2_set_##name(val); \ -+ \ -+ return len; \ -+} \ -+ \ -+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, tpa6130a2_##name##_show, \ -+ tpa6130a2_##name##_store); -+ -+tpa6130a2_sys_property(mute) -+tpa6130a2_sys_property(hp_en) -+tpa6130a2_sys_property(hi_z) -+ -+static ssize_t tpa6130a2_volume_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ int vol; -+ vol = tpa6130a2_get_volume(); -+ vol = snprintf(buf, PAGE_SIZE, "%d\n", vol); -+ return vol; -+} -+ -+static ssize_t tpa6130a2_volume_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ int vol; -+ char str[10]; -+ -+ strncpy(str, buf, min(len, sizeof(str))); -+ str[min(len, sizeof(str) - 1)] = 0; -+ -+ if (sscanf(str, " %d", &vol) == 1) -+ tpa6130a2_set_volume(vol); -+ -+ return len; -+} -+ -+static DEVICE_ATTR(volume, S_IRUGO | S_IWUSR, tpa6130a2_volume_show, -+ tpa6130a2_volume_store); -+ -+static struct attribute *attrs[] = { -+ &dev_attr_volume.attr, -+ &dev_attr_mute.attr, -+ &dev_attr_hi_z.attr, -+ &dev_attr_hp_en.attr, -+ NULL, -+}; -+ -+static const struct attribute_group attr_group = { -+ .attrs = attrs, -+}; -+ -+static int tpa6130a2_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ int err; -+ struct device *dev; -+ struct tpa6130a2_data *data; -+ struct tpa6130a2_platform_data *pdata; -+ -+ dev = &client->dev; -+ if (test_and_set_bit(1, &initialized)) { -+ dev_info(dev, "Driver already initialized\n"); -+ return -ENODEV; -+ } -+ -+ tpa6130a2_client = client; -+ -+ data = kzalloc(sizeof(*data), GFP_KERNEL); -+ if (data == NULL) { -+ err = -ENOMEM; -+ goto fail1; -+ } -+ -+ i2c_set_clientdata(tpa6130a2_client, data); -+ -+ if (client->dev.platform_data == NULL) { -+ dev_err(dev, "Platform data not set\n"); -+ dump_stack(); -+ err = -ENODEV; -+ goto fail2; -+ } -+ -+ pdata = (struct tpa6130a2_platform_data *)client->dev.platform_data; -+ data->set_power = pdata->set_power; -+ data->set_power(1); -+ data->power_state = 1; -+ mutex_init(&data->mutex); -+ -+ /* Read version */ -+ err = tpa6130a2_read(TPA6130A2_REG_VERSION) & 0x0f; -+ if ((err != 1) && (err != 2)) { -+ dev_err(dev, "Unexpected headphone amplifier chip version " -+ "of 0x%02x, was expecting 0x01 or 0x02\n", err); -+ err = -ENODEV; -+ -+ goto fail2; -+ } -+ -+ err = sysfs_create_group(&dev->kobj, &attr_group); -+ if (err) { -+ dev_err(dev, "Sysfs node creation failed\n"); -+ goto fail2; -+ } -+ -+ dev_info(dev, "Headphone amplifier initialized successfully\n"); -+ -+ /* enable both channels */ -+ tpa6130a2_set_hp_en(TPA6130A2_CHANNEL_LEFT | TPA6130A2_CHANNEL_RIGHT); -+ /* Some sort of default volume that doesn't kill your ears.. */ -+ tpa6130a2_set_volume(20); -+ tpa6130a2_set_mute(0); /* Mute off */ -+ tpa6130a2_set_hp_en(0); /* Disable the chip until we actually need it */ -+ -+ /* Disable the chip */ -+ data->power_state = 0; -+ data->set_power(0); -+ return 0; -+ -+fail2: -+ kfree(data); -+fail1: -+ tpa6130a2_client = 0; -+ initialized = 0; -+ -+ return err; -+} -+ -+static int tpa6130a2_remove(struct i2c_client *client) -+{ -+ struct device *dev = &client->dev; -+ struct tpa6130a2_data *data = i2c_get_clientdata(client); -+ -+ data->set_power(0); -+ sysfs_remove_group(&dev->kobj, &attr_group); -+ kfree(data); -+ tpa6130a2_client = 0; -+ initialized = 0; -+ -+ return 0; -+} -+ -+static int tpa6130a2_suspend(struct i2c_client *client, pm_message_t mesg) -+{ -+ tpa6130a2_power_off(); -+ -+ return 0; -+} -+ -+static int tpa6130a2_resume(struct i2c_client *client) -+{ -+ tpa6130a2_power_on(); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id tpa6130a2_id[] = { -+ { -+ .name = "tpa6130a2", -+ .driver_data = 0, -+ }, -+ { }, -+}; -+ -+static struct i2c_driver tpa6130a2_i2c_driver = { -+ .driver = { -+ .name = "tpa6130a2", -+ }, -+ .id = I2C_DRIVERID_MISC, -+ .class = I2C_CLASS_HWMON, -+ .probe = tpa6130a2_probe, -+ .remove = tpa6130a2_remove, -+ .suspend = tpa6130a2_suspend, -+ .resume = tpa6130a2_resume, -+ .id_table = tpa6130a2_id, -+}; -+ -+static int __init tpa6130a2_init(void) -+{ -+ int ret; -+ -+ ret = i2c_add_driver(&tpa6130a2_i2c_driver); -+ if (ret < 0) { -+ printk(KERN_ERR "Unable to register TPA6130A2 I2C driver\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static void __exit tpa6130a2_exit(void) -+{ -+ i2c_del_driver(&tpa6130a2_i2c_driver); -+} -+ -+MODULE_AUTHOR("Nokia Corporation"); -+MODULE_DESCRIPTION("TPA6130A2 Headphone amplifier driver"); -+MODULE_LICENSE("GPL"); -+ -+late_initcall(tpa6130a2_init); -+module_exit(tpa6130a2_exit); -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/tsl2563.c linux-omap-2.6.28-nokia1/drivers/i2c/chips/tsl2563.c ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/tsl2563.c 2011-06-22 13:14:18.013067750 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/tsl2563.c 2011-06-22 13:19:32.663063277 +0200 -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #include - - #define DRIVER_NAME "tsl2563" -@@ -111,6 +112,7 @@ struct tsl2563_chip { - struct mutex lock; - struct i2c_client *client; - struct device *hwmon_dev; -+ struct delayed_work poweroff_work; - - /* Remember state for suspend and resume functions */ - pm_message_t state; -@@ -125,6 +127,7 @@ struct tsl2563_chip { - /* Calibration coefficients */ - u32 calib0; - u32 calib1; -+ int cover_comp_gain; - - /* Cache current values, to be returned while suspended */ - u32 data0; -@@ -197,6 +200,13 @@ out: - return ret; - } - -+static void tsl2563_poweroff_work(struct work_struct *work) -+{ -+ struct tsl2563_chip *chip = -+ container_of(work, struct tsl2563_chip, poweroff_work.work); -+ tsl2563_set_power(chip, 0); -+} -+ - static int tsl2563_detect(struct tsl2563_chip *chip) - { - int ret; -@@ -310,6 +320,18 @@ static int tsl2563_get_adc(struct tsl256 - if (chip->state.event != PM_EVENT_ON) - goto out; - -+ cancel_delayed_work(&chip->poweroff_work); -+ -+ if (!tsl2563_get_power(chip)) { -+ ret = tsl2563_set_power(chip, 1); -+ if (ret) -+ goto out; -+ ret = tsl2563_configure(chip); -+ if (ret) -+ goto out; -+ tsl2563_wait_adc(chip); -+ } -+ - while (retry) { - ret = tsl2563_read(client, - TSL2563_REG_DATA0LOW | TSL2563_CLEARINT, -@@ -331,6 +353,8 @@ static int tsl2563_get_adc(struct tsl256 - chip->data0 = normalize_adc(adc0, chip->gainlevel->gaintime); - chip->data1 = normalize_adc(adc1, chip->gainlevel->gaintime); - -+ schedule_delayed_work(&chip->poweroff_work, 5 * HZ); -+ - ret = 0; - out: - return ret; -@@ -432,11 +456,11 @@ static ssize_t tsl2563_adc0_show(struct - - ret = tsl2563_get_adc(chip); - if (ret) -- return ret; -+ goto out; - - ret = snprintf(buf, PAGE_SIZE, "%d\n", chip->data0); -+out: - mutex_unlock(&chip->lock); -- - return ret; - } - -@@ -450,11 +474,11 @@ static ssize_t tsl2563_adc1_show(struct - - ret = tsl2563_get_adc(chip); - if (ret) -- return ret; -+ goto out; - - ret = snprintf(buf, PAGE_SIZE, "%d\n", chip->data1); -+out: - mutex_unlock(&chip->lock); -- - return ret; - } - -@@ -482,8 +506,8 @@ static ssize_t tsl2563_lux_show(struct d - if (ret) - goto out; - -- calib0 = calib_adc(chip->data0, chip->calib0); -- calib1 = calib_adc(chip->data1, chip->calib1); -+ calib0 = calib_adc(chip->data0, chip->calib0) * chip->cover_comp_gain; -+ calib1 = calib_adc(chip->data1, chip->calib1) * chip->cover_comp_gain; - - ret = snprintf(buf, PAGE_SIZE, "%d\n", adc_to_lux(calib0, calib1)); - -@@ -599,6 +623,7 @@ static int tsl2563_probe(struct i2c_clie - const struct i2c_device_id *device_id) - { - struct tsl2563_chip *chip; -+ struct tsl2563_platform_data *pdata = client->dev.platform_data; - int err = 0; - u8 id; - -@@ -629,6 +654,11 @@ static int tsl2563_probe(struct i2c_clie - chip->calib0 = calib_from_sysfs(CALIB_BASE_SYSFS); - chip->calib1 = calib_from_sysfs(CALIB_BASE_SYSFS); - -+ if (pdata) -+ chip->cover_comp_gain = pdata->cover_comp_gain; -+ else -+ chip->cover_comp_gain = 1; -+ - dev_info(&client->dev, "model %d, rev. %d\n", id >> 4, id & 0x0f); - - err = tsl2563_configure(chip); -@@ -645,6 +675,9 @@ static int tsl2563_probe(struct i2c_clie - goto fail2; - } - -+ INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work); -+ schedule_delayed_work(&chip->poweroff_work, 5 * HZ); -+ - return 0; - fail2: - hwmon_device_unregister(chip->hwmon_dev); -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/twl4030-madc.c linux-omap-2.6.28-nokia1/drivers/i2c/chips/twl4030-madc.c ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/twl4030-madc.c 2011-06-22 13:14:18.013067750 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/twl4030-madc.c 2011-06-22 13:19:32.663063277 +0200 -@@ -48,6 +48,7 @@ struct twl4030_madc_data { - }; - - static struct twl4030_madc_data *the_madc; -+static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc, int chan, int on); - - static - const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = { -@@ -246,40 +247,51 @@ static inline void twl4030_madc_start_co - } - } - --static void twl4030_madc_wait_conversion_ready_ms( -+static int twl4030_madc_wait_conversion_ready( - struct twl4030_madc_data *madc, -- u8 *time, u8 status_reg) -+ unsigned int timeout_ms, u8 status_reg) - { -- u8 reg = 0; -+ unsigned long timeout; - -+ timeout = jiffies + msecs_to_jiffies(timeout_ms); - do { -- msleep(1); -- (*time)--; -+ int reg; -+ - reg = twl4030_madc_read(madc, status_reg); -- } while (((reg & TWL4030_MADC_BUSY) && !(reg & TWL4030_MADC_EOC_SW)) && -- (*time != 0)); -+ if (unlikely(reg < 0)) -+ return reg; -+ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW)) -+ return 0; -+ } while (!time_after(jiffies, timeout)); -+ -+ return -EAGAIN; - } - -+static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on); - int twl4030_madc_conversion(struct twl4030_madc_request *req) - { - const struct twl4030_madc_conversion_method *method; -- u8 wait_time, ch_msb, ch_lsb; -+ u8 ch_msb, ch_lsb; - int ret; - - if (unlikely(!req)) - return -EINVAL; - -+ mutex_lock(&the_madc->lock); -+ -+ twl4030_madc_set_power(the_madc, 1); -+ - /* Do we have a conversion request ongoing */ -- if (the_madc->requests[req->method].active) -- return -EBUSY; -+ if (the_madc->requests[req->method].active) { -+ ret = -EBUSY; -+ goto out; -+ } - - ch_msb = (req->channels >> 8) & 0xff; - ch_lsb = req->channels & 0xff; - - method = &twl4030_conversion_methods[req->method]; - -- mutex_lock(&the_madc->lock); -- - /* Select channels to be converted */ - twl4030_madc_write(the_madc, method->sel + 1, ch_msb); - twl4030_madc_write(the_madc, method->sel, ch_lsb); -@@ -308,12 +320,10 @@ int twl4030_madc_conversion(struct twl40 - the_madc->requests[req->method].active = 1; - - /* Wait until conversion is ready (ctrl register returns EOC) */ -- wait_time = 50; -- twl4030_madc_wait_conversion_ready_ms(the_madc, -- &wait_time, method->ctrl); -- if (wait_time == 0) { -+ ret = twl4030_madc_wait_conversion_ready(the_madc, 5, method->ctrl); -+ if (ret) { - dev_dbg(the_madc->dev, "conversion timeout!\n"); -- ret = -EAGAIN; -+ the_madc->requests[req->method].active = 0; - goto out; - } - -@@ -322,6 +332,8 @@ int twl4030_madc_conversion(struct twl40 - - the_madc->requests[req->method].active = 0; - -+ twl4030_madc_set_power(the_madc, 0); -+ - out: - mutex_unlock(&the_madc->lock); - -@@ -342,10 +354,15 @@ static int twl4030_madc_set_current_gene - - ret = twl4030_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, - ®val, TWL4030_BCI_BCICTL1); -- if (on) -+ if (on) { - regval |= (chan) ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN; -- else -+ regval |= TWL4030_BCI_MESBAT; -+ } -+ else { - regval &= (chan) ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN; -+ regval &= ~TWL4030_BCI_MESBAT; -+ } -+ - ret = twl4030_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE, - regval, TWL4030_BCI_BCICTL1); - -@@ -354,20 +371,28 @@ static int twl4030_madc_set_current_gene - - static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on) - { -+ int ret = 0; - u8 regval; - -- regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1); -- if (on) -+ if (on) { -+ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1); - regval |= TWL4030_MADC_MADCON; -- else -- regval &= ~TWL4030_MADC_MADCON; -- twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval); -+ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval); - -- return 0; -+ ret |= twl4030_madc_set_current_generator(madc, 0, 1); -+ -+ } else { -+ ret |= twl4030_madc_set_current_generator(madc, 0, 0); -+ -+ regval = twl4030_madc_read(madc, TWL4030_MADC_CTRL1); -+ regval &= ~TWL4030_MADC_MADCON; -+ twl4030_madc_write(madc, TWL4030_MADC_CTRL1, regval); -+ } -+ return ret; - } - --static int twl4030_madc_ioctl(struct inode *inode, struct file *filp, -- unsigned int cmd, unsigned long arg) -+static long twl4030_madc_ioctl(struct file *filp, unsigned int cmd, -+ unsigned long arg) - { - struct twl4030_madc_user_parms par; - int val, ret; -@@ -388,13 +413,16 @@ static int twl4030_madc_ioctl(struct ino - req.do_avg = par.average; - req.method = TWL4030_MADC_SW1; - req.func_cb = NULL; -+ req.type = TWL4030_MADC_WAIT; - - val = twl4030_madc_conversion(&req); -- if (val <= 0) { -- par.status = -1; -- } else { -+ if (likely(val > 0)) { - par.status = 0; - par.result = (u16)req.rbuf[par.channel]; -+ } else if (val == 0) { -+ par.status = -ENODATA; -+ } else { -+ par.status = val; - } - break; - } -@@ -413,12 +441,12 @@ static int twl4030_madc_ioctl(struct ino - - static struct file_operations twl4030_madc_fileops = { - .owner = THIS_MODULE, -- .ioctl = twl4030_madc_ioctl -+ .unlocked_ioctl = twl4030_madc_ioctl - }; - - static struct miscdevice twl4030_madc_device = { - .minor = MISC_DYNAMIC_MINOR, -- .name = "twl4030-madc", -+ .name = "twl4030-adc", - .fops = &twl4030_madc_fileops - }; - -@@ -447,16 +475,6 @@ static int __init twl4030_madc_probe(str - dev_dbg(&pdev->dev, "could not register misc_device\n"); - goto err_misc; - } -- twl4030_madc_set_power(madc, 1); -- twl4030_madc_set_current_generator(madc, 0, 1); -- -- ret = twl4030_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, -- ®val, TWL4030_BCI_BCICTL1); -- -- regval |= TWL4030_BCI_MESBAT; -- -- ret = twl4030_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE, -- regval, TWL4030_BCI_BCICTL1); - - ret = request_irq(platform_get_irq(pdev, 0), twl4030_madc_irq_handler, - 0, "twl4030_madc", madc); -@@ -487,8 +505,6 @@ static int __exit twl4030_madc_remove(st - { - struct twl4030_madc_data *madc = platform_get_drvdata(pdev); - -- twl4030_madc_set_power(madc, 0); -- twl4030_madc_set_current_generator(madc, 0, 0); - free_irq(platform_get_irq(pdev, 0), madc); - cancel_work_sync(&madc->ws); - misc_deregister(&twl4030_madc_device); -diff -Nurp linux-omap-2.6.28-omap1/drivers/i2c/chips/twl4030-poweroff.c linux-omap-2.6.28-nokia1/drivers/i2c/chips/twl4030-poweroff.c ---- linux-omap-2.6.28-omap1/drivers/i2c/chips/twl4030-poweroff.c 2011-06-22 13:14:18.013067750 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/i2c/chips/twl4030-poweroff.c 2011-06-22 13:19:32.663063277 +0200 -@@ -25,15 +25,36 @@ - #include - #include - -+#define STS_HW_CONDITIONS 0x0f -+#define STS_VBUS (1<<7) -+ - #define PWR_P1_SW_EVENTS 0x10 - #define PWR_DEVOFF (1<<0) - -+#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3 -+ - static void twl4030_poweroff(void) - { - u8 val; - int err; - - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val, -+ STS_HW_CONDITIONS); -+ if (err) -+ printk(KERN_WARNING "I2C error %d while reading TWL4030" -+ " PM_MASTER HW_CONDITIONS\n", err); -+ -+ if (val & STS_VBUS) { -+ printk(KERN_EMERG "twl4030-poweroff: VBUS on," -+ " forcing restart!\n"); -+ /* Set watchdog, Triton goes to WAIT-ON state. -+ VBUS will cause start up */ -+ twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 1, -+ TWL4030_WATCHDOG_CFG_REG_OFFS); -+ while (1); -+ } -+ -+ err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val, - PWR_P1_SW_EVENTS); - if (err) { - printk(KERN_WARNING "I2C error %d while reading TWL4030" -diff -Nurp linux-omap-2.6.28-omap1/drivers/leds/Kconfig linux-omap-2.6.28-nokia1/drivers/leds/Kconfig ---- linux-omap-2.6.28-omap1/drivers/leds/Kconfig 2011-06-22 13:14:18.243067747 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/leds/Kconfig 2011-06-22 13:19:32.713063276 +0200 -@@ -179,6 +179,12 @@ config LEDS_PCA955X - LED driver chips accessed via the I2C bus. Supported - devices include PCA9550, PCA9551, PCA9552, and PCA9553. - -+config LEDS_TWL4030_VIBRA -+ tristate "LED Support for TWL4030 Vibrator" -+ depends on LEDS_CLASS && TWL4030_CORE -+ help -+ This option enables support for TWL4030 Vibrator Driver. -+ - config LEDS_DA903X - tristate "LED Support for DA9030/DA9034 PMIC" - depends on LEDS_CLASS && PMIC_DA903X -@@ -186,6 +192,13 @@ config LEDS_DA903X - This option enables support for on-chip LED drivers found - on Dialog Semiconductor DA9030/DA9034 PMICs. - -+config LEDS_LP5523 -+ tristate "LP5523 LED driver chip" -+ depends on LEDS_CLASS && I2C -+ help -+ If you say yes here you get support for the National Semiconductor -+ LP5523 LED driver. -+ - comment "LED Triggers" - - config LEDS_TRIGGERS -diff -Nurp linux-omap-2.6.28-omap1/drivers/leds/leds-lp5523.c linux-omap-2.6.28-nokia1/drivers/leds/leds-lp5523.c ---- linux-omap-2.6.28-omap1/drivers/leds/leds-lp5523.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/leds/leds-lp5523.c 2011-06-22 13:19:32.713063276 +0200 -@@ -0,0 +1,980 @@ -+/* -+ * lp5523.c - LP5523 LED Driver -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Mathias Nyman -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define LP5523_DRIVER_NAME "lp5523" -+#define LP5523_REG_ENABLE 0x00 -+#define LP5523_REG_OP_MODE 0x01 -+#define LP5523_REG_RATIOMETRIC_MSB 0x02 -+#define LP5523_REG_RATIOMETRIC_LSB 0x03 -+#define LP5523_REG_ENABLE_LEDS_MSB 0x04 -+#define LP5523_REG_ENABLE_LEDS_LSB 0x05 -+#define LP5523_REG_LED_CNTRL_BASE 0x06 -+#define LP5523_REG_LED_PWM_BASE 0x16 -+#define LP5523_REG_LED_CURRENT_BASE 0x26 -+#define LP5523_REG_CONFIG 0x36 -+#define LP5523_REG_CHANNEL1_PC 0x37 -+#define LP5523_REG_CHANNEL2_PC 0x38 -+#define LP5523_REG_CHANNEL3_PC 0x39 -+#define LP5523_REG_STATUS 0x3a -+#define LP5523_REG_GPO 0x3b -+#define LP5523_REG_VARIABLE 0x3c -+#define LP5523_REG_RESET 0x3d -+#define LP5523_REG_TEMP_CTRL 0x3e -+#define LP5523_REG_TEMP_READ 0x3f -+#define LP5523_REG_TEMP_WRITE 0x40 -+#define LP5523_REG_LED_TEST_CTRL 0x41 -+#define LP5523_REG_LED_TEST_ADC 0x42 -+#define LP5523_REG_ENG1_VARIABLE 0x45 -+#define LP5523_REG_ENG2_VARIABLE 0x46 -+#define LP5523_REG_ENG3_VARIABLE 0x47 -+#define LP5523_REG_MASTER_FADER1 0x48 -+#define LP5523_REG_MASTER_FADER2 0x49 -+#define LP5523_REG_MASTER_FADER3 0x4a -+#define LP5523_REG_CH1_PROG_START 0x4c -+#define LP5523_REG_CH2_PROG_START 0x4d -+#define LP5523_REG_CH3_PROG_START 0x4e -+#define LP5523_REG_PROG_PAGE_SEL 0x4f -+#define LP5523_REG_PROG_MEM 0x50 -+ -+#define LP5523_CMD_LOAD 0x15 /* 00010101 */ -+#define LP5523_CMD_RUN 0x2a /* 00101010 */ -+#define LP5523_CMD_DISABLED 0x00 /* 00000000 */ -+ -+#define LP5523_ENABLE 0x40 -+#define LP5523_AUTO_INC 0x40 -+#define LP5523_PWR_SAVE 0x20 -+#define LP5523_PWM_PWR_SAVE 0x04 -+#define LP5523_CP_1 0x08 -+#define LP5523_CP_1_5 0x10 -+#define LP5523_CP_AUTO 0x18 -+#define LP5523_INT_CLK 0x01 -+#define LP5523_AUTO_CLK 0x02 -+#define LP5523_EN_LEDTEST 0x80 -+#define LP5523_LEDTEST_DONE 0x80 -+ -+#define LP5523_DEFAULT_CURRENT 50 /* microAmps */ -+#define LP5523_PROGRAM_LENGTH 32 /* in bytes */ -+#define LP5523_PROGRAM_PAGES 6 -+#define LP5523_ADC_SHORTCIRC_LIM 80 -+#define LP5523_ADC_OPEN_LIM 180 -+ -+#define LP5523_LEDS 9 -+#define LP5523_CHANNELS 3 -+ -+#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */ -+ -+#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */ -+ -+#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING -+ -+ -+#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led))) -+#define SHIFT_MASK(id) (((id) - 1) * 2) -+ -+ -+ -+struct lp5523_engine { -+ const struct attribute_group *attributes; -+ int id; -+ u8 mode; -+ u8 prog_page; -+ u8 mux_page; -+ u16 led_mux; -+ u8 engine_mask; -+}; -+ -+struct lp5523_led { -+ int id; -+ u8 led_nr; -+ u8 led_current; -+ struct led_classdev cdev; -+}; -+ -+struct lp5523_chip { -+ struct mutex lock; -+ struct i2c_client *client; -+ struct work_struct work; -+ u8 active_led; -+ struct lp5523_engine engines[LP5523_CHANNELS]; -+ struct lp5523_led leds[LP5523_LEDS]; -+ u8 num_leds; -+ int irq; -+ int chip_en; -+ /* for initialisation */ -+ wait_queue_head_t configured; -+ u8 engine_config; -+}; -+ -+#define cdev_to_led(c) container_of(c, struct lp5523_led, cdev) -+ -+ -+static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) -+{ -+ return container_of(engine, struct lp5523_chip, -+ engines[engine->id - 1]); -+} -+ -+static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) -+{ -+ return container_of(led, struct lp5523_chip, -+ leds[led->id]); -+} -+ -+ -+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode); -+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode); -+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern); -+ -+ -+static void lp5523_work(struct work_struct *work); -+static irqreturn_t lp5523_irq(int irq, void *_chip); -+ -+ -+static int lp5523_write(struct i2c_client *client, u8 reg, u8 value) -+{ -+ return i2c_smbus_write_byte_data(client, reg, value); -+} -+ -+static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf) -+{ -+ s32 ret = i2c_smbus_read_byte_data(client, reg); -+ -+ if (ret < 0) -+ return -EIO; -+ -+ *buf = ret; -+ return 0; -+} -+ -+static int lp5523_detect(struct i2c_client *client) -+{ -+ int ret; -+ u8 buf; -+ -+ if ((ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40))) -+ return ret; -+ if ((ret = lp5523_read(client, LP5523_REG_ENABLE, &buf))) -+ return ret; -+ if (buf == 0x40) -+ return 0; -+ else -+ return -ENODEV; -+} -+ -+static int lp5523_configure(struct i2c_client *client) -+{ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); -+ int i, ret = 0; -+ -+ /* one pattern per engine setting led mux start and stop addresses */ -+ u8 pattern[][LP5523_PROGRAM_LENGTH] = { -+ { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0}, -+ { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0}, -+ { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, -+ }; -+ -+ INIT_WORK(&chip->work, lp5523_work); -+ ret |= request_irq(chip->irq, lp5523_irq, -+ LP5523_IRQ_FLAGS, LP5523_DRIVER_NAME, chip); -+ if (ret) { -+ dev_err(&client->dev, "could not get IRQ = %d\n", -+ chip->irq); -+ goto fail1; -+ } -+ -+ -+ lp5523_write(client, LP5523_REG_RESET, 0xff); -+ -+ msleep(10); -+ -+ ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); -+ /* Chip startup time after reset is 500 us */ -+ msleep(1); -+ -+ ret |= lp5523_write(client, LP5523_REG_CONFIG, -+ LP5523_AUTO_INC | LP5523_PWR_SAVE | -+ LP5523_CP_AUTO | LP5523_AUTO_CLK | -+ LP5523_PWM_PWR_SAVE); -+ -+ /* turn on all leds */ -+ ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01); -+ ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff); -+ -+ /* set current for all leds */ -+ for (i = 0; i < chip->num_leds; i++) -+ lp5523_write(client, -+ LP5523_REG_LED_CURRENT_BASE + chip->leds[i].led_nr, -+ chip->leds[i].led_current); -+ -+ /* hardcode 32 bytes of memory for each engine from program memory */ -+ ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00); -+ ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10); -+ ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20); -+ -+ /* write led mux address space for each channel */ -+ ret |= lp5523_load_program(&chip->engines[0], pattern[0]); -+ ret |= lp5523_load_program(&chip->engines[1], pattern[1]); -+ ret |= lp5523_load_program(&chip->engines[2], pattern[2]); -+ -+ if (ret) { -+ dev_err(&client->dev, "could not load mux programs\n"); -+ goto fail2; -+ } -+ -+ init_waitqueue_head(&chip->configured); -+ -+ chip->engine_config = 0; -+ -+ /* set all engines exec state and mode to run 00101010 */ -+ ret |= lp5523_write(client, LP5523_REG_ENABLE, -+ (LP5523_CMD_RUN | LP5523_ENABLE)); -+ -+ ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN); -+ -+ if (ret) { -+ dev_err(&client->dev, "could not start mux programs\n"); -+ goto fail2; -+ } -+ -+ ret |= wait_event_interruptible(chip->configured, -+ (chip->engine_config == LP5523_ENG_STATUS_MASK)); -+ -+ if (ret) { -+ dev_err(&client->dev, -+ "got signal while waiting for interrupt\n"); -+ goto fail2; -+ } -+ -+ dev_info(&client->dev, "disabling engines\n"); -+ -+ ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED); -+ -+fail2: -+ free_irq(chip->irq, chip); -+fail1: -+ return ret; -+} -+ -+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode) -+{ -+ struct lp5523_chip *chip = engine_to_lp5523(engine); -+ struct i2c_client *client = chip->client; -+ int ret; -+ u8 engine_state; -+ -+ ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state); -+ if (ret) -+ goto fail; -+ -+ engine_state &= ~(engine->engine_mask); -+ -+ /* set mode only for this engine */ -+ mode &= engine->engine_mask; -+ -+ engine_state |= mode; -+ -+ ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state); -+fail: -+ return ret; -+} -+ -+static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux) -+{ -+ struct lp5523_chip *chip = engine_to_lp5523(engine); -+ struct i2c_client *client = chip->client; -+ int ret = 0; -+ -+ ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); -+ -+ ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page); -+ ret |= lp5523_write(client, LP5523_REG_PROG_MEM, -+ (u8)(mux >> 8)); -+ ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux)); -+ engine->led_mux = mux; -+ -+ return ret; -+} -+ -+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern) -+{ -+ struct lp5523_chip *chip = engine_to_lp5523(engine); -+ struct i2c_client *client = chip->client; -+ -+ int ret = 0; -+ -+ ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); -+ -+ ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, -+ engine->prog_page); -+ ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM, -+ LP5523_PROGRAM_LENGTH, pattern); -+ -+ return ret; -+} -+ -+static int lp5523_run_program(struct lp5523_engine *engine) -+{ -+ struct lp5523_chip *chip = engine_to_lp5523(engine); -+ struct i2c_client *client = chip->client; -+ int ret; -+ -+ ret = lp5523_write(client, LP5523_REG_ENABLE, LP5523_CMD_RUN | LP5523_ENABLE); -+ if (ret) -+ goto fail; -+ -+ ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN); -+fail: -+ return ret; -+} -+ -+static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len) -+{ -+ int i; -+ u16 tmp_mux = 0; -+ len = len < LP5523_LEDS ? len : LP5523_LEDS; -+ for (i = 0; i < len; i++) { -+ switch (buf[i]) { -+ case '1': -+ tmp_mux |= (1 << i); -+ break; -+ case '0': -+ break; -+ case '\n': -+ i = len; -+ break; -+ default: -+ return -1; -+ } -+ } -+ *mux = tmp_mux; -+ -+ return 0; -+} -+ -+static void lp5523_mux_to_array(u16 led_mux, char *array) -+{ -+ int i, pos = 0; -+ for (i = 0; i < LP5523_LEDS; i++) -+ pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i)); -+ -+ array[pos] = '\0'; -+} -+ -+/*--------------------------------------------------------------*/ -+/* Sysfs interface */ -+/*--------------------------------------------------------------*/ -+ -+#define show_leds(nr) \ -+static ssize_t show_engine##nr##_leds(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+{ \ -+ struct i2c_client *client = to_i2c_client(dev); \ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); \ -+ char mux[LP5523_LEDS + 1]; \ -+ \ -+ lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux); \ -+ \ -+ return sprintf(buf, "%s\n", mux); \ -+} -+show_leds(1) -+show_leds(2) -+show_leds(3) -+ -+#define store_leds(nr) \ -+static ssize_t store_engine##nr##_leds(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, size_t len) \ -+{ \ -+ struct i2c_client *client = to_i2c_client(dev); \ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); \ -+ u16 mux = 0; \ -+ \ -+ if (lp5523_mux_parse(buf, &mux, len)) \ -+ return -EINVAL; \ -+ \ -+ if (lp5523_load_mux(&chip->engines[nr - 1], mux)) \ -+ return -EINVAL; \ -+ \ -+ return len; \ -+} -+store_leds(1) -+store_leds(2) -+store_leds(3) -+ -+static ssize_t lp5523_selftest(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct lp5523_chip *chip = i2c_get_clientdata(client); -+ int i, ret, pos = 0; -+ u8 status, adc; -+ -+ mutex_lock(&chip->lock); -+ -+ for (i = 0; i < LP5523_LEDS; i++) { -+ lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); -+ /* let current stabilize 2ms before measurements start */ -+ msleep(2); -+ lp5523_write(chip->client, -+ LP5523_REG_LED_TEST_CTRL, -+ LP5523_EN_LEDTEST | i); -+ /* ledtest takes 2.7ms */ -+ msleep(3); -+ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); -+ if (!(status & LP5523_LEDTEST_DONE)) -+ msleep(3); -+ ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); -+ -+ if (adc > LP5523_ADC_OPEN_LIM || adc < LP5523_ADC_SHORTCIRC_LIM) -+ pos += sprintf(buf + pos, "LED %d FAIL\n", i); -+ -+ lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00); -+ } -+ if (pos == 0) -+ pos = sprintf(buf, "OK\n"); -+ -+ mutex_unlock(&chip->lock); -+ -+ return pos; -+} -+ -+static void lp5523_set_brightness(struct led_classdev *cdev, -+ enum led_brightness brightness) -+{ -+ struct lp5523_led *led = cdev_to_led(cdev); -+ struct lp5523_chip *chip = led_to_lp5523(led); -+ struct i2c_client *client = chip->client; -+ -+ mutex_lock(&chip->lock); -+ -+ lp5523_write(client, -+ LP5523_REG_LED_PWM_BASE + led->led_nr, -+ (u8)brightness); -+ -+ mutex_unlock(&chip->lock); -+} -+ -+static int lp5523_do_store_load(struct lp5523_engine *engine, -+ const char *buf, size_t len) -+{ -+ struct lp5523_chip *chip = engine_to_lp5523(engine); -+ struct i2c_client *client = chip->client; -+ int ret, nrchars, offset = 0, i = 0; -+ char c[3]; -+ unsigned cmd; -+ u8 pattern[LP5523_PROGRAM_LENGTH] = {0}; -+ -+ while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) { -+ /* separate sscanfs because length is working only for %s */ -+ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); -+ ret = sscanf(c, "%2x", &cmd); -+ if (ret != 1) -+ goto fail; -+ pattern[i] = (u8)cmd; -+ -+ offset += nrchars; -+ i++; -+ } -+ -+ /* pattern commands are always two bytes long */ -+ if (i % 2) -+ goto fail; -+ -+ mutex_lock(&chip->lock); -+ -+ ret = lp5523_load_program(engine, pattern); -+ mutex_unlock(&chip->lock); -+ -+ if (ret) { -+ dev_err(&client->dev, "failed loading pattern\n"); -+ return ret; -+ } -+ -+ return len; -+fail: -+ dev_err(&client->dev, "wrong pattern format\n"); -+ return -EINVAL; -+} -+ -+#define store_load(nr) \ -+static ssize_t store_engine##nr##_load(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, size_t len) \ -+{ \ -+ struct i2c_client *client = to_i2c_client(dev); \ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); \ -+ int ret; \ -+ ret = lp5523_do_store_load(&chip->engines[nr - 1], buf, len); \ -+ return ret; \ -+} -+store_load(1) -+store_load(2) -+store_load(3) -+ -+#define show_mode(nr) \ -+static ssize_t show_engine##nr##_mode(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+{ \ -+ struct i2c_client *client = to_i2c_client(dev); \ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); \ -+ switch (chip->engines[nr - 1].mode) { \ -+ case LP5523_CMD_RUN: \ -+ return sprintf(buf, "run\n"); \ -+ case LP5523_CMD_LOAD: \ -+ return sprintf(buf, "load\n"); \ -+ case LP5523_CMD_DISABLED: \ -+ return sprintf(buf, "disabled\n"); \ -+ default: \ -+ return sprintf(buf, "disabled\n"); \ -+ } \ -+} -+show_mode(1) -+show_mode(2) -+show_mode(3) -+ -+#define store_mode(nr) \ -+static ssize_t store_engine##nr##_mode(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, size_t len) \ -+{ \ -+ struct i2c_client *client = to_i2c_client(dev); \ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); \ -+ struct lp5523_engine *engine = &chip->engines[nr - 1]; \ -+ mutex_lock(&chip->lock); \ -+ \ -+ if (!strncmp(buf, "run", 3)) \ -+ lp5523_set_mode(engine, LP5523_CMD_RUN); \ -+ else if (!strncmp(buf, "load", 4)) \ -+ lp5523_set_mode(engine, LP5523_CMD_LOAD); \ -+ else if (!strncmp(buf, "disabled", 8)) \ -+ lp5523_set_mode(engine, LP5523_CMD_DISABLED); \ -+ \ -+ mutex_unlock(&chip->lock); \ -+ return len; \ -+} -+store_mode(1) -+store_mode(2) -+store_mode(3) -+ -+static ssize_t show_current(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct led_classdev *led_cdev = dev_get_drvdata(dev); -+ struct lp5523_led *led = cdev_to_led(led_cdev); -+ -+ return sprintf(buf, "%d\n", led->led_current); -+} -+ -+static ssize_t store_current(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ struct led_classdev *led_cdev = dev_get_drvdata(dev); -+ struct lp5523_led *led = cdev_to_led(led_cdev); -+ struct lp5523_chip *chip = led_to_lp5523(led); -+ ssize_t ret = -EINVAL; -+ char *after; -+ unsigned long curr = simple_strtoul(buf, &after, 10); -+ size_t count = after - buf; -+ -+ if (*after && isspace(*after)) -+ count++; -+ -+ if (count == len) { -+ ret = count; -+ -+ mutex_lock(&chip->lock); -+ lp5523_write(chip->client, -+ LP5523_REG_LED_CURRENT_BASE + led->led_nr, -+ (u8)curr); -+ mutex_unlock(&chip->lock); -+ -+ led->led_current = (u8)curr; -+ } -+ return ret; -+} -+ -+/* led class device attributes */ -+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); -+ -+/* device attributes */ -+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, -+ show_engine1_mode, store_engine1_mode); -+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, -+ show_engine2_mode, store_engine2_mode); -+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, -+ show_engine3_mode, store_engine3_mode); -+static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, -+ show_engine1_leds, store_engine1_leds); -+static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, -+ show_engine2_leds, store_engine2_leds); -+static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, -+ show_engine3_leds, store_engine3_leds); -+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); -+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); -+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); -+static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); -+ -+static struct attribute *lp5523_attributes[] = { -+ &dev_attr_engine1_mode.attr, -+ &dev_attr_engine2_mode.attr, -+ &dev_attr_engine3_mode.attr, -+ &dev_attr_selftest.attr, -+ NULL -+}; -+ -+static struct attribute *lp5523_engine1_attributes[] = { -+ &dev_attr_engine1_load.attr, -+ &dev_attr_engine1_leds.attr, -+ NULL -+}; -+ -+static struct attribute *lp5523_engine2_attributes[] = { -+ &dev_attr_engine2_load.attr, -+ &dev_attr_engine2_leds.attr, -+ NULL -+}; -+ -+static struct attribute *lp5523_engine3_attributes[] = { -+ &dev_attr_engine3_load.attr, -+ &dev_attr_engine3_leds.attr, -+ NULL -+}; -+ -+static const struct attribute_group lp5523_group = { -+ .attrs = lp5523_attributes, -+}; -+ -+static const struct attribute_group lp5523_engine_group[] = { -+ {.attrs = lp5523_engine1_attributes }, -+ {.attrs = lp5523_engine2_attributes }, -+ {.attrs = lp5523_engine3_attributes }, -+}; -+ -+static int lp5523_register_sysfs(struct i2c_client *client) -+{ -+ struct device *dev = &client->dev; -+ int ret; -+ -+ ret = sysfs_create_group(&dev->kobj, &lp5523_group); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static void lp5523_unregister_sysfs(struct i2c_client *client) -+{ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); -+ struct device *dev = &client->dev; -+ int i; -+ -+ sysfs_remove_group(&dev->kobj, &lp5523_group); -+ -+ for (i = 0; i < LP5523_CHANNELS; i++) { -+ if (chip->engines[i].mode == LP5523_CMD_LOAD) -+ sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]); -+ } -+ -+ for (i = 0; i < chip->num_leds; i++) -+ device_remove_file(chip->leds[i].cdev.dev, -+ &dev_attr_led_current); -+} -+ -+/*--------------------------------------------------------------*/ -+/* Set chip operating mode */ -+/*--------------------------------------------------------------*/ -+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode) -+{ -+ /* engine to chip */ -+ struct lp5523_chip *chip = engine_to_lp5523(engine); -+ struct i2c_client *client = chip->client; -+ struct device *dev = &client->dev; -+ int ret = 0; -+ -+ /* if in that mode already do nothing, except for run */ -+ if (mode == engine->mode && mode != LP5523_CMD_RUN) -+ return 0; -+ -+ if (mode == LP5523_CMD_RUN) -+ ret = lp5523_run_program(engine); -+ -+ else if (mode == LP5523_CMD_LOAD) { -+ -+ lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); -+ lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); -+ -+ if ((ret = sysfs_create_group(&dev->kobj, engine->attributes))) -+ return ret; -+ } -+ -+ else if (mode == LP5523_CMD_DISABLED) -+ lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); -+ -+ /* remove load attribute from sysfs if not in load mode */ -+ if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD) -+ sysfs_remove_group(&dev->kobj, engine->attributes); -+ -+ engine->mode = mode; -+ -+ return ret; -+} -+ -+/*--------------------------------------------------------------*/ -+/* Probe, Attach, Remove */ -+/*--------------------------------------------------------------*/ -+static int __init lp5523_init_engine(struct lp5523_engine *engine, int id) -+{ -+ if (id < 1 || id > LP5523_CHANNELS) -+ return -1; -+ engine->id = id; -+ engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id); -+ engine->prog_page = id - 1; -+ engine->mux_page = id + 2; -+ engine->attributes = &lp5523_engine_group[id - 1]; -+ -+ return 0; -+} -+ -+static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, -+ int id, struct lp5523_platform_data *pdata) -+{ -+ char name[32]; -+ if (id >= LP5523_LEDS) -+ return -1; -+ led->led_current = LP5523_DEFAULT_CURRENT; -+ led->id = id; -+ led->led_nr = pdata->led_config[id].led_nr; -+ -+ if (pdata->led_config[id].led_current) -+ led->led_current = pdata->led_config[id].led_current; -+ if (pdata->led_config[id].name) -+ snprintf(name, 32, "lp5523:%s", -+ pdata->led_config[id].name); -+ else -+ snprintf(name, 32, "lp5523:led%d", id); -+ -+ led->cdev.name = name; -+ led->cdev.brightness_set = lp5523_set_brightness; -+ if (led_classdev_register(dev, &led->cdev) < 0) { -+ dev_err(dev, "couldn't register led %d\n", id); -+ return -1; -+ } -+ if (device_create_file(led->cdev.dev, &dev_attr_led_current) < 0) { -+ dev_err(dev, "couldn't register current attribute\n"); -+ led_classdev_unregister(&led->cdev); -+ return -1; -+ } -+ return 0; -+} -+ -+static struct i2c_driver lp5523_driver; -+ -+/* Interrupt handler bottom half. */ -+static void lp5523_work(struct work_struct *work) -+{ -+ struct lp5523_chip *chip = -+ container_of(work, struct lp5523_chip, work); -+ u8 reg; -+ -+ dev_info(&chip->client->dev, "got interrupt from led chip\n"); -+ -+ mutex_lock(&chip->lock); -+ -+ if (chip->engine_config != LP5523_ENG_STATUS_MASK) { -+ -+ /* ack the interrupt */ -+ lp5523_read(chip->client, LP5523_REG_STATUS, ®); -+ -+ dev_info(&chip->client->dev, -+ "interrupt from led chip %x\n", reg); -+ -+ chip->engine_config |= (reg & LP5523_ENG_STATUS_MASK); -+ -+ if (chip->engine_config == LP5523_ENG_STATUS_MASK) { -+ -+ dev_info(&chip->client->dev, -+ "all engines configured\n"); -+ wake_up(&chip->configured); -+ } else { -+ dev_info(&chip->client->dev, -+ "engine_config == %x\n", chip->engine_config); -+ } -+ } -+ -+ mutex_unlock(&chip->lock); -+} -+ -+/* -+ * We cannot use I2C in interrupt context, so we just schedule work. -+ */ -+static irqreturn_t lp5523_irq(int irq, void *_chip) -+{ -+ struct lp5523_chip *chip = _chip; -+ schedule_work(&chip->work); -+ -+ return IRQ_HANDLED; -+} -+ -+ -+static int lp5523_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct lp5523_chip *chip; -+ struct lp5523_platform_data *pdata; -+ int ret, i; -+ -+ chip = kzalloc(sizeof(*chip), GFP_KERNEL); -+ if (!chip) -+ return -ENOMEM; -+ -+ i2c_set_clientdata(client, chip); -+ chip->client = client; -+ -+ pdata = client->dev.platform_data; -+ -+ if (!pdata) { -+ dev_err(&client->dev, "no platform data\n"); -+ ret = -EINVAL; -+ goto fail1; -+ } -+ -+ mutex_init(&chip->lock); -+ -+ if ((ret = lp5523_detect(client))) -+ goto fail1; -+ -+ dev_info(&client->dev, "LP5523 Programmable led chip found\n"); -+ -+ chip->irq = pdata->irq; -+ chip->chip_en = pdata->chip_en; -+ -+ /* Initialize engines */ -+ for (i = 0; i < LP5523_CHANNELS; i++) { -+ ret = lp5523_init_engine(&chip->engines[i], i + 1); -+ if (ret) { -+ dev_err(&client->dev, "error initializing engine\n"); -+ goto fail1; -+ } -+ } -+ ret = lp5523_configure(client); -+ if (ret < 0) { -+ dev_err(&client->dev, "error configuring chip \n"); -+ goto fail1; -+ } -+ -+ /* Initialize leds */ -+ chip->num_leds = pdata->num_leds; -+ for (i = 0; i < pdata->num_leds; i++) { -+ ret = lp5523_init_led(&chip->leds[i], &client->dev, i, pdata); -+ if (ret) { -+ dev_err(&client->dev, "error initializing leds\n"); -+ goto fail2; -+ } -+ } -+ -+ ret = lp5523_register_sysfs(client); -+ if (ret) { -+ dev_err(&client->dev, "registering sysfs failed \n"); -+ goto fail2; -+ } -+ return ret; -+fail2: -+ for (i = 0; i < pdata->num_leds; i++) -+ led_classdev_unregister(&chip->leds[i].cdev); -+ -+fail1: -+ kfree(chip); -+ return ret; -+} -+ -+static int lp5523_remove(struct i2c_client *client) -+{ -+ struct lp5523_chip *chip = i2c_get_clientdata(client); -+ int i; -+ -+ lp5523_unregister_sysfs(client); -+ -+ for (i = 0; i < chip->num_leds; i++) -+ led_classdev_unregister(&chip->leds[i].cdev); -+ -+ kfree(chip); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id lp5523_id[] = { -+ { "lp5523", 0 }, -+ { } -+}; -+ -+MODULE_DEVICE_TABLE(i2c, lp5523_id); -+ -+static struct i2c_driver lp5523_driver = { -+ .driver = { -+ .name = LP5523_DRIVER_NAME, -+ }, -+ .probe = lp5523_probe, -+ .remove = lp5523_remove, -+ .id_table = lp5523_id, -+}; -+ -+static int __init lp5523_init(void) -+{ -+ int ret; -+ -+ ret = i2c_add_driver(&lp5523_driver); -+ -+ if (ret < 0) -+ printk(KERN_ALERT "Adding lp5523 driver failed \n"); -+ -+ return ret; -+} -+ -+static void __exit lp5523_exit(void) -+{ -+ i2c_del_driver(&lp5523_driver); -+} -+ -+MODULE_AUTHOR("Mathias Nyman "); -+MODULE_DESCRIPTION("lp5523 LED driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(lp5523_init); -+module_exit(lp5523_exit); -diff -Nurp linux-omap-2.6.28-omap1/drivers/leds/leds-twl4030-vibra.c linux-omap-2.6.28-nokia1/drivers/leds/leds-twl4030-vibra.c ---- linux-omap-2.6.28-omap1/drivers/leds/leds-twl4030-vibra.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/leds/leds-twl4030-vibra.c 2011-06-22 13:19:32.713063276 +0200 -@@ -0,0 +1,456 @@ -+/* -+ * leds-twl4030-vibra.c - TWL4030 Vibrator driver -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Written by Henrik Saari -+ * Updates by Felipe Balbi -+ * -+ * This file is subject to the terms and conditions of the GNU General -+ * Public License. See the file "COPYING" in the main directory of this -+ * archive for more details. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* MODULE ID1 */ -+#define CODEC_MODE 0x01 -+# define CODECPDZ (1 << 1) -+#define VIBRA_CTL 0x45 -+# define VIBRA_EN (1 << 0) -+# define VIBRA_DIR (1 << 1) -+#define VIBRA_SET 0x46 /* PWM register */ -+# define VIB_CFG (1 << 3) -+# define VIB_PWM (1 << 2) -+#define APLL_CTL 0x3a -+# define APLL_EN (1 << 4) -+# define APLL_FREQ_26 0x06 -+ -+/* MODULE ID2 */ -+#define LEDEN 0xee -+ -+/* MODULE ID3 */ -+#define VIBRA_CFG 0x60 -+ -+#define MAX_SEQ_LEN 5 -+ -+struct pulse_info { -+ unsigned int dir:1; -+ unsigned int pwm:31; -+ unsigned long duration; -+}; -+ -+struct vibra_info { -+ struct mutex lock; -+ struct device *dev; -+ -+ struct workqueue_struct *workqueue; -+ struct delayed_work work; -+ struct work_struct led_work; -+ -+ struct led_classdev vibra; -+ -+ unsigned long duration; -+ int enabled; -+ int speed; -+ -+ struct pulse_info seq[MAX_SEQ_LEN]; -+ unsigned int iseq; -+ unsigned int nseq; -+}; -+ -+/* Powers H-Bridge and enables audio clk */ -+static void vibra_enable(struct vibra_info *info) -+{ -+ u8 reg; -+ -+ /* Disable LEDA & LEDB, cannot be used with vibra */ -+ twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, ®, LEDEN); -+ reg &= ~0x03; -+ twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, LEDEN, reg); -+ -+ /* Turn codec on */ -+ twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, -+ ®, CODEC_MODE); -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ (reg |= CODECPDZ), CODEC_MODE); -+ -+ /* turn H-Bridge on */ -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ VIBRA_EN, VIBRA_CTL); -+ -+ /* set audio clock on */ -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ (APLL_EN | APLL_FREQ_26), APLL_CTL); -+ -+ info->enabled = 1; -+} -+ -+static void vibra_disable(struct vibra_info *info) -+{ -+ u8 reg; -+ -+ /* Power down H-Bridge */ -+ twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, -+ ®, VIBRA_CTL); -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ (reg & ~VIBRA_EN), VIBRA_CTL); -+ -+ /* Turn codec OFF */ -+ twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, -+ ®, CODEC_MODE); -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ reg & ~CODECPDZ, CODEC_MODE); -+ -+ /* disable audio clk */ -+ twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, -+ ®, APLL_CTL); -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ (reg & ~APLL_EN), APLL_CTL); -+ info->enabled = 0; -+} -+ -+static int vibra_seq(struct vibra_info *info, const struct pulse_info *seq, -+ unsigned int n) -+{ -+ if (n == 0 || n > ARRAY_SIZE(info->seq)) -+ return -EINVAL; -+ -+ /* stop previous sequence, if any */ -+ cancel_delayed_work_sync(&info->work); -+ -+ mutex_lock(&info->lock); -+ -+ info->iseq = 0; -+ info->nseq = n; -+ memcpy(info->seq, seq, n * sizeof(*seq)); -+ -+ queue_delayed_work(info->workqueue, &info->work, 0); -+ -+ mutex_unlock(&info->lock); -+ -+ return 0; -+} -+ -+static void vibra_pwm(struct vibra_info *info, int dir, int pwm) -+{ -+ struct pulse_info seq[2] = { -+ { .dir = dir, .pwm = pwm, .duration = info->duration }, -+ }; -+ -+ vibra_seq(info, seq, ARRAY_SIZE(seq)); -+} -+ -+static void vibra_next_pulse(struct vibra_info *info) -+{ -+ unsigned int pwm; -+ unsigned int dir; -+ unsigned long duration; -+ -+ mutex_lock(&info->lock); -+ -+ pwm = info->seq[info->iseq].pwm; -+ dir = info->seq[info->iseq].dir; -+ duration = info->seq[info->iseq].duration; -+ -+ if (pwm) { -+ u8 reg; -+ -+ if (!info->enabled) -+ vibra_enable(info); -+ -+ /* set vibra rotation direction */ -+ twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, -+ ®, VIBRA_CTL); -+ reg = (dir) ? (reg | VIBRA_DIR) : (reg & ~VIBRA_DIR); -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ reg, VIBRA_CTL); -+ -+ /* set PWM, 1 = max, 255 = min */ -+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, -+ 256 - pwm, VIBRA_SET); -+ } else { -+ vibra_disable(info); -+ } -+ -+ info->iseq++; -+ if (info->iseq < info->nseq && duration) -+ queue_delayed_work(info->workqueue, &info->work, -+ msecs_to_jiffies(duration)); -+ -+ mutex_unlock(&info->lock); -+} -+ -+static void vibra_work(struct work_struct *work) -+{ -+ struct vibra_info *info = container_of(work, -+ struct vibra_info, work.work); -+ -+ vibra_next_pulse(info); -+} -+ -+static void vibra_led_work(struct work_struct *work) -+{ -+ struct vibra_info *info = container_of(work, -+ struct vibra_info, led_work); -+ -+ vibra_pwm(info, 1, info->speed); -+} -+ -+static void vibra_led_set(struct led_classdev *led, -+ enum led_brightness value) -+{ -+ struct vibra_info *info = container_of(led, struct vibra_info, vibra); -+ -+ info->speed = value; -+ -+ schedule_work(&info->led_work); -+} -+ -+/******************************************************************************* -+ * SYSFS * -+ ******************************************************************************/ -+ -+static ssize_t vibra_set_seq(struct device *dev, -+ struct device_attribute *attr, const char *buf, size_t len) -+{ -+ const char *p; -+ unsigned int i; -+ struct vibra_info *info = dev_get_drvdata(dev); -+ struct pulse_info seq[ARRAY_SIZE(info->seq)]; -+ -+ for (p = buf, i = 0; -+ *p != '\0' && *p != '\n' && i < ARRAY_SIZE(seq); i++) { -+ long val; -+ char *endp; -+ -+ /* speed and direction */ -+ val = simple_strtol(p, &endp, 0); -+ if (p == endp || *endp != ' ') -+ return -EINVAL; -+ for (p = endp; *p == ' '; p++) -+ ; -+ -+ seq[i].dir = val < 0 ? 1 : 0; -+ seq[i].pwm = min(abs(val), 255); -+ -+ /* duration */ -+ val = simple_strtol(p, &endp, 0); -+ if (p == endp || -+ (*endp != ' ' && *endp != '\0' && *endp != '\n')) -+ return -EINVAL; -+ for (p = endp; *p == ' '; p++) -+ ; -+ -+ if (val < 0) -+ return -EINVAL; -+ -+ seq[i].duration = val; -+ } -+ -+ /* no room for end of sequence */ -+ if (i == ARRAY_SIZE(seq)) -+ return -EINVAL; -+ -+ /* end of sequence */ -+ seq[i].pwm = 0; -+ seq[i].dir = 0; -+ seq[i++].duration = 0; -+ -+ vibra_seq(info, seq, i); -+ -+ return len; -+} -+ -+static ssize_t vibra_set_pwm(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ long pwm; -+ int ret; -+ int dir = 0; -+ -+ struct vibra_info *info = dev_get_drvdata(dev); -+ -+ ret = strict_strtol(buf, 0, &pwm); -+ if (ret < 0) -+ return -EINVAL; -+ if (pwm < 0) -+ dir = 1; -+ pwm = abs(pwm); -+ if (pwm > 255) -+ pwm = 255; -+ vibra_pwm(info, dir, pwm); -+ -+ return len; -+} -+ -+static ssize_t vibra_show_pwm(struct device *dev, struct device_attribute *attr, -+ char *buf) -+{ -+ struct vibra_info *info = dev_get_drvdata(dev); -+ -+ u8 reg; -+ mutex_lock(&info->lock); -+ twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, ®, VIBRA_SET); -+ mutex_unlock(&info->lock); -+ -+ return sprintf(buf, "%d\n", 256 - reg); -+} -+ -+static ssize_t vibra_set_duration(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ unsigned long duration; -+ int ret; -+ struct vibra_info *info = dev_get_drvdata(dev); -+ -+ ret = strict_strtoul(buf, 0, &duration); -+ if (ret < 0) -+ return -EINVAL; -+ -+ mutex_lock(&info->lock); -+ info->duration = duration; -+ mutex_unlock(&info->lock); -+ -+ return len; -+} -+ -+static ssize_t vibra_show_duration(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct vibra_info *info = dev_get_drvdata(dev); -+ -+ return sprintf(buf, "%ld\n", info->duration); -+} -+ -+static struct device_attribute vibra_attrs[] = { -+ __ATTR(speed, S_IRUGO | S_IWUSR, -+ vibra_show_pwm, vibra_set_pwm), -+ __ATTR(duration, S_IRUGO | S_IWUSR, -+ vibra_show_duration, vibra_set_duration), -+ __ATTR(pulse, S_IWUSR, -+ NULL, vibra_set_seq), -+}; -+ -+static int vibra_register_sysfs(struct vibra_info *info) -+{ -+ int r, i; -+ -+ for (i = 0; i < ARRAY_SIZE(vibra_attrs); i++) { -+ r = device_create_file(info->dev, &vibra_attrs[i]); -+ if (r) -+ goto fail; -+ } -+ return 0; -+fail: -+ while (i--) -+ device_remove_file(info->dev, &vibra_attrs[i]); -+ -+ return r; -+} -+ -+static void vibra_unregister_sysfs(struct vibra_info *info) -+{ -+ int i; -+ -+ for (i = ARRAY_SIZE(vibra_attrs) - 1; i >= 0; i--) -+ device_remove_file(info->dev, &vibra_attrs[i]); -+} -+ -+static int __init twl4030_vibra_probe(struct platform_device *pdev) -+{ -+ struct vibra_info *info; -+ -+ info = kzalloc(sizeof(*info), GFP_KERNEL); -+ if (!info) -+ return -ENOMEM; -+ -+ info->dev = &pdev->dev; -+ info->enabled = 0; -+ info->duration = 0; -+ info->iseq = 0; -+ info->nseq = 0; -+ -+ platform_set_drvdata(pdev, info); -+ -+ info->workqueue = create_singlethread_workqueue("vibra"); -+ if (info->workqueue == NULL) { -+ dev_err(&pdev->dev, "couldn't create workqueue\n"); -+ kfree(info); -+ return -ENOMEM; -+ } -+ -+ mutex_init(&info->lock); -+ INIT_DELAYED_WORK(&info->work, vibra_work); -+ INIT_WORK(&info->led_work, vibra_led_work); -+ -+ info->vibra.name = "twl4030:vibrator"; -+ info->vibra.default_trigger = NULL; -+ info->vibra.brightness = 0; -+ info->vibra.brightness_set = vibra_led_set; -+ info->vibra.brightness_get = NULL; -+ -+ if (led_classdev_register(&pdev->dev, &info->vibra) < 0) -+ dev_dbg(&pdev->dev, "could not register vibrator to LED FW\n"); -+ -+ if (vibra_register_sysfs(info) < 0) -+ dev_dbg(&pdev->dev, "could not register sysfs files\n"); -+ -+ return 0; -+} -+ -+static int __exit twl4030_vibra_remove(struct platform_device *pdev) -+{ -+ struct vibra_info *info = platform_get_drvdata(pdev); -+ -+ cancel_delayed_work_sync(&info->work); -+ destroy_workqueue(info->workqueue); -+ -+ vibra_unregister_sysfs(info); -+ led_classdev_unregister(&info->vibra); -+ kfree(info); -+ -+ return 0; -+} -+ -+MODULE_DESCRIPTION("Triton2 Vibra driver"); -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Nokia Corporation"); -+ -+static struct platform_driver twl4030_vibra_driver = { -+ .probe = twl4030_vibra_probe, -+ .remove = __exit_p(twl4030_vibra_remove), -+ .driver = { -+ .name = "twl4030_vibra", -+ }, -+}; -+ -+static int __init twl4030_vibra_init(void) -+{ -+ return platform_driver_register(&twl4030_vibra_driver); -+} -+late_initcall(twl4030_vibra_init); -+ -+static void __exit twl4030_vibra_exit(void) -+{ -+ platform_driver_unregister(&twl4030_vibra_driver); -+} -+module_exit(twl4030_vibra_exit); -+ -+MODULE_ALIAS("platform:twl4030-vibra"); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/leds/Makefile linux-omap-2.6.28-nokia1/drivers/leds/Makefile ---- linux-omap-2.6.28-omap1/drivers/leds/Makefile 2011-06-22 13:14:18.243067747 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/leds/Makefile 2011-06-22 13:19:32.713063276 +0200 -@@ -20,11 +20,13 @@ obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunf - obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o - obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o - obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o -+obj-$(CONFIG_LEDS_TWL4030_VIBRA) += leds-twl4030-vibra.o - obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o - obj-$(CONFIG_LEDS_FSG) += leds-fsg.o - obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o - obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o - obj-$(CONFIG_LEDS_HP_DISK) += leds-hp-disk.o -+obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o - - # LED Triggers - obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/Makefile linux-omap-2.6.28-nokia1/drivers/Makefile ---- linux-omap-2.6.28-omap1/drivers/Makefile 2011-06-22 13:14:17.083067763 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/Makefile 2011-06-22 13:19:32.513063279 +0200 -@@ -18,6 +18,9 @@ obj-$(CONFIG_ARM_AMBA) += amba/ - - obj-$(CONFIG_XEN) += xen/ - -+# regulators early, since some subsystems rely on them to initialize -+obj-$(CONFIG_REGULATOR) += regulator/ -+ - # char/ comes before serial/ etc so that the VT console is the boot-time - # default. - obj-y += char/ -@@ -36,10 +39,11 @@ obj-$(CONFIG_FB_INTEL) += video - obj-$(CONFIG_SERIO) += input/serio/ - obj-y += serial/ - obj-$(CONFIG_PARPORT) += parport/ --obj-y += base/ block/ misc/ mfd/ net/ media/ cbus/ -+obj-y += base/ block/ misc/ mfd/ net/ media/ - obj-y += i2c/ - obj-y += cbus/ - obj-$(CONFIG_ARCH_OMAP) += dsp/dspgateway/ -+obj-$(CONFIG_MPU_BRIDGE) += dsp/bridge/ - obj-$(CONFIG_NUBUS) += nubus/ - obj-$(CONFIG_ATM) += atm/ - obj-y += macintosh/ -@@ -105,5 +109,4 @@ obj-$(CONFIG_PPC_PS3) += ps3/ - obj-$(CONFIG_OF) += of/ - obj-$(CONFIG_SSB) += ssb/ - obj-$(CONFIG_VIRTIO) += virtio/ --obj-$(CONFIG_REGULATOR) += regulator/ - obj-$(CONFIG_STAGING) += staging/ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/radio/Kconfig linux-omap-2.6.28-nokia1/drivers/media/radio/Kconfig ---- linux-omap-2.6.28-omap1/drivers/media/radio/Kconfig 2011-06-22 13:14:18.403067745 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/media/radio/Kconfig 2011-06-22 13:19:32.713063276 +0200 -@@ -339,6 +339,18 @@ config RADIO_ZOLTRIX_PORT - help - Enter the I/O port of your Zoltrix radio card. - -+config I2C_SI4713 -+ tristate "Silicon Labs Si4713 FM Radio Transmitter support" -+ depends on I2C && VIDEO_V4L2 -+ ---help--- -+ Say Y here if you want support to Si4713 FM Radio Transmitter. -+ This device can transmit audio through FM. It can transmit -+ EDS and EBDS signals as well. This device driver supports only -+ i2c bus. -+ -+ To compile this driver as a module, choose M here: the -+ module will be called fmtx-si4713. -+ - config USB_DSBR - tristate "D-Link/GemTek USB FM radio support" - depends on USB && VIDEO_V4L2 -@@ -375,6 +387,16 @@ config USB_SI470X - To compile this driver as a module, choose M here: the - module will be called radio-si470x. - -+config I2C_BCM2048 -+ tristate "Broadcom BCM2048 FM Radio Receiver support" -+ depends on I2C && VIDEO_V4L2 -+ ---help--- -+ Say Y here if you want support to BCM2048 FM Radio Receiver. -+ This device driver supports only i2c bus. -+ -+ To compile this driver as a module, choose M here: the -+ module will be called radio-bcm2048. -+ - config USB_MR800 - tristate "AverMedia MR 800 USB FM radio support" - depends on USB && VIDEO_V4L2 -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/radio/Makefile linux-omap-2.6.28-nokia1/drivers/media/radio/Makefile ---- linux-omap-2.6.28-omap1/drivers/media/radio/Makefile 2011-06-22 13:14:18.403067745 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/media/radio/Makefile 2011-06-22 13:19:32.713063276 +0200 -@@ -15,8 +15,11 @@ obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zol - obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o - obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o - obj-$(CONFIG_RADIO_TRUST) += radio-trust.o -+obj-$(CONFIG_I2C_SI4713) += fmtx-si4713.o -+fmtx-si4713-objs := radio-si4713.o si4713.o - obj-$(CONFIG_RADIO_MAESTRO) += radio-maestro.o - obj-$(CONFIG_USB_DSBR) += dsbr100.o -+obj-$(CONFIG_I2C_BCM2048) += radio-bcm2048.o - obj-$(CONFIG_USB_SI470X) += radio-si470x.o - obj-$(CONFIG_USB_MR800) += radio-mr800.o - -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/radio/radio-bcm2048.c linux-omap-2.6.28-nokia1/drivers/media/radio/radio-bcm2048.c ---- linux-omap-2.6.28-omap1/drivers/media/radio/radio-bcm2048.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/radio/radio-bcm2048.c 2011-06-22 13:19:32.713063276 +0200 -@@ -0,0 +1,2612 @@ -+/* -+ * drivers/media/radio/radio-bcm2048.c -+ * -+ * Driver for I2C Broadcom BCM2048 FM Radio Receiver: -+ * -+ * Copyright (C) Nokia Corporation -+ * Contact: Eero Nurkkala -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* driver definitions */ -+#define BCM2048_DRIVER_AUTHOR "Eero Nurkkala " -+#define BCM2048_DRIVER_NAME BCM2048_NAME -+#define BCM2048_DRIVER_VERSION KERNEL_VERSION(0, 0, 1) -+#define BCM2048_DRIVER_CARD "Broadcom bcm2048 FM Radio Receiver" -+#define BCM2048_DRIVER_DESC "I2C driver for BCM2048 FM Radio Receiver" -+ -+/* I2C Control Registers */ -+#define BCM2048_I2C_FM_RDS_SYSTEM 0x00 -+#define BCM2048_I2C_FM_CTRL 0x01 -+#define BCM2048_I2C_RDS_CTRL0 0x02 -+#define BCM2048_I2C_RDS_CTRL1 0x03 -+#define BCM2048_I2C_FM_AUDIO_PAUSE 0x04 -+#define BCM2048_I2C_FM_AUDIO_CTRL0 0x05 -+#define BCM2048_I2C_FM_AUDIO_CTRL1 0x06 -+#define BCM2048_I2C_FM_SEARCH_CTRL0 0x07 -+#define BCM2048_I2C_FM_SEARCH_CTRL1 0x08 -+#define BCM2048_I2C_FM_SEARCH_TUNE_MODE 0x09 -+#define BCM2048_I2C_FM_FREQ0 0x0a -+#define BCM2048_I2C_FM_FREQ1 0x0b -+#define BCM2048_I2C_FM_AF_FREQ0 0x0c -+#define BCM2048_I2C_FM_AF_FREQ1 0x0d -+#define BCM2048_I2C_FM_CARRIER 0x0e -+#define BCM2048_I2C_FM_RSSI 0x0f -+#define BCM2048_I2C_FM_RDS_MASK0 0x10 -+#define BCM2048_I2C_FM_RDS_MASK1 0x11 -+#define BCM2048_I2C_FM_RDS_FLAG0 0x12 -+#define BCM2048_I2C_FM_RDS_FLAG1 0x13 -+#define BCM2048_I2C_RDS_WLINE 0x14 -+#define BCM2048_I2C_RDS_BLKB_MATCH0 0x16 -+#define BCM2048_I2C_RDS_BLKB_MATCH1 0x17 -+#define BCM2048_I2C_RDS_BLKB_MASK0 0x18 -+#define BCM2048_I2C_RDS_BLKB_MASK1 0x19 -+#define BCM2048_I2C_RDS_PI_MATCH0 0x1a -+#define BCM2048_I2C_RDS_PI_MATCH1 0x1b -+#define BCM2048_I2C_RDS_PI_MASK0 0x1c -+#define BCM2048_I2C_RDS_PI_MASK1 0x1d -+#define BCM2048_I2C_SPARE1 0x20 -+#define BCM2048_I2C_SPARE2 0x21 -+#define BCM2048_I2C_FM_RDS_REV 0x28 -+#define BCM2048_I2C_SLAVE_CONFIGURATION 0x29 -+#define BCM2048_I2C_RDS_DATA 0x80 -+#define BCM2048_I2C_FM_BEST_TUNE_MODE 0x90 -+ -+/* BCM2048_I2C_FM_RDS_SYSTEM */ -+#define BCM2048_FM_ON 0x01 -+#define BCM2048_RDS_ON 0x02 -+ -+/* BCM2048_I2C_FM_CTRL */ -+#define BCM2048_BAND_SELECT 0x01 -+#define BCM2048_STEREO_MONO_AUTO_SELECT 0x02 -+#define BCM2048_STEREO_MONO_MANUAL_SELECT 0x04 -+#define BCM2048_STEREO_MONO_BLEND_SWITCH 0x08 -+#define BCM2048_HI_LO_INJECTION 0x10 -+ -+/* BCM2048_I2C_RDS_CTRL0 */ -+#define BCM2048_RBDS_RDS_SELECT 0x01 -+#define BCM2048_FLUSH_FIFO 0x02 -+ -+/* BCM2048_I2C_FM_AUDIO_PAUSE */ -+#define BCM2048_AUDIO_PAUSE_RSSI_TRESH 0x0f -+#define BCM2048_AUDIO_PAUSE_DURATION 0xf0 -+ -+/* BCM2048_I2C_FM_AUDIO_CTRL0 */ -+#define BCM2048_RF_MUTE 0x01 -+#define BCM2048_MANUAL_MUTE 0x02 -+#define BCM2048_DAC_OUTPUT_LEFT 0x04 -+#define BCM2048_DAC_OUTPUT_RIGHT 0x08 -+#define BCM2048_AUDIO_ROUTE_DAC 0x10 -+#define BCM2048_AUDIO_ROUTE_I2S 0x20 -+#define BCM2048_DE_EMPHASIS_SELECT 0x40 -+#define BCM2048_AUDIO_BANDWIDTH_SELECT 0x80 -+ -+/* BCM2048_I2C_FM_SEARCH_CTRL0 */ -+#define BCM2048_SEARCH_RSSI_THRESHOLD 0x7f -+#define BCM2048_SEARCH_DIRECTION 0x80 -+ -+/* BCM2048_I2C_FM_SEARCH_TUNE_MODE */ -+#define BCM2048_FM_AUTO_SEARCH 0x03 -+ -+/* BCM2048_I2C_FM_RSSI */ -+#define BCM2048_RSSI_VALUE 0xff -+ -+/* BCM2048_I2C_FM_RDS_MASK0 */ -+/* BCM2048_I2C_FM_RDS_MASK1 */ -+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01 -+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02 -+#define BCM2048_FM_FLAG_RSSI_LOW 0x04 -+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08 -+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10 -+#define BCM2048_FLAG_STEREO_DETECTED 0x20 -+#define BCM2048_FLAG_STEREO_ACTIVE 0x40 -+ -+/* BCM2048_I2C_RDS_DATA */ -+#define BCM2048_SLAVE_ADDRESS 0x3f -+#define BCM2048_SLAVE_ENABLE 0x80 -+ -+/* BCM2048_I2C_FM_BEST_TUNE_MODE */ -+#define BCM2048_BEST_TUNE_MODE 0x80 -+ -+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01 -+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02 -+#define BCM2048_FM_FLAG_RSSI_LOW 0x04 -+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08 -+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10 -+#define BCM2048_FLAG_STEREO_DETECTED 0x20 -+#define BCM2048_FLAG_STEREO_ACTIVE 0x40 -+ -+#define BCM2048_RDS_FLAG_FIFO_WLINE 0x02 -+#define BCM2048_RDS_FLAG_B_BLOCK_MATCH 0x08 -+#define BCM2048_RDS_FLAG_SYNC_LOST 0x10 -+#define BCM2048_RDS_FLAG_PI_MATCH 0x20 -+ -+#define BCM2048_RDS_MARK_END_BYTE0 0x7C -+#define BCM2048_RDS_MARK_END_BYTEN 0xFF -+ -+#define BCM2048_FM_FLAGS_ALL (FM_FLAG_SEARCH_TUNE_FINISHED | \ -+ FM_FLAG_SEARCH_TUNE_FAIL | \ -+ FM_FLAG_RSSI_LOW | \ -+ FM_FLAG_CARRIER_ERROR_HIGH | \ -+ FM_FLAG_AUDIO_PAUSE_INDICATION | \ -+ FLAG_STEREO_DETECTED | FLAG_STEREO_ACTIVE) -+ -+#define BCM2048_RDS_FLAGS_ALL (RDS_FLAG_FIFO_WLINE | \ -+ RDS_FLAG_B_BLOCK_MATCH | \ -+ RDS_FLAG_SYNC_LOST | RDS_FLAG_PI_MATCH) -+ -+#define BCM2048_DEFAULT_TIMEOUT 1500 -+#define BCM2048_AUTO_SEARCH_TIMEOUT 3000 -+ -+ -+#define BCM2048_FREQDEV_UNIT 10000 -+#define BCM2048_FREQV4L2_MULTI 625 -+#define dev_to_v4l2(f) ((f * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI) -+#define v4l2_to_dev(f) ((f * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT) -+ -+#define msb(x) ((u8)((u16) x >> 8)) -+#define lsb(x) ((u8)((u16) x & 0x00FF)) -+#define compose_u16(msb, lsb) (((u16)msb << 8) | lsb) -+ -+#define BCM2048_DEFAULT_POWERING_DELAY 20 -+#define BCM2048_DEFAULT_REGION 0x02 -+#define BCM2048_DEFAULT_MUTE 0x01 -+#define BCM2048_DEFAULT_RSSI_THRESHOLD 0x64 -+#define BCM2048_DEFAULT_RDS_WLINE 0x7E -+ -+#define BCM2048_FM_SEARCH_INACTIVE 0x00 -+#define BCM2048_FM_PRE_SET_MODE 0x01 -+#define BCM2048_FM_AUTO_SEARCH_MODE 0x02 -+#define BCM2048_FM_AF_JUMP_MODE 0x03 -+ -+#define BCM2048_FREQUENCY_BASE 64000 -+ -+#define BCM2048_POWER_ON 0x01 -+#define BCM2048_POWER_OFF 0x00 -+ -+#define BCM2048_ITEM_ENABLED 0x01 -+#define BCM2048_SEARCH_DIRECTION_UP 0x01 -+ -+#define BCM2048_DE_EMPHASIS_75us 75 -+#define BCM2048_DE_EMPHASIS_50us 50 -+ -+#define BCM2048_SCAN_FAIL 0x00 -+#define BCM2048_SCAN_OK 0x01 -+ -+#define BCM2048_FREQ_ERROR_FLOOR -20 -+#define BCM2048_FREQ_ERROR_ROOF 20 -+ -+/* -60 dB is reported as full signal strenght */ -+#define BCM2048_RSSI_LEVEL_BASE -60 -+#define BCM2048_RSSI_LEVEL_ROOF -100 -+#define BCM2048_RSSI_LEVEL_ROOF_NEG 100 -+#define BCM2048_SIGNAL_MULTIPLIER (0xFFFF / \ -+ (BCM2048_RSSI_LEVEL_ROOF_NEG + \ -+ BCM2048_RSSI_LEVEL_BASE)) -+ -+#define BCM2048_RDS_FIFO_DUPLE_SIZE 0x03 -+#define BCM2048_RDS_CRC_MASK 0x0F -+#define BCM2048_RDS_CRC_NONE 0x00 -+#define BCM2048_RDS_CRC_MAX_2BITS 0x04 -+#define BCM2048_RDS_CRC_LEAST_2BITS 0x08 -+#define BCM2048_RDS_CRC_UNRECOVARABLE 0x0C -+ -+#define BCM2048_RDS_BLOCK_MASK 0xF0 -+#define BCM2048_RDS_BLOCK_A 0x00 -+#define BCM2048_RDS_BLOCK_B 0x10 -+#define BCM2048_RDS_BLOCK_C 0x20 -+#define BCM2048_RDS_BLOCK_D 0x30 -+#define BCM2048_RDS_BLOCK_C_SCORED 0x40 -+#define BCM2048_RDS_BLOCK_E 0x60 -+ -+#define BCM2048_RDS_RT 0x20 -+#define BCM2048_RDS_PS 0x00 -+ -+#define BCM2048_RDS_GROUP_AB_MASK 0x08 -+#define BCM2048_RDS_GROUP_A 0x00 -+#define BCM2048_RDS_GROUP_B 0x08 -+ -+#define BCM2048_RDS_RT_AB_MASK 0x10 -+#define BCM2048_RDS_RT_A 0x00 -+#define BCM2048_RDS_RT_B 0x10 -+#define BCM2048_RDS_RT_INDEX 0x0F -+ -+#define BCM2048_RDS_PS_INDEX 0x03 -+ -+struct rds_info { -+ u16 rds_pi; -+#define BCM2048_MAX_RDS_RT (64 + 1) -+ u8 rds_rt[BCM2048_MAX_RDS_RT]; -+ u8 rds_rt_group_b; -+ u8 rds_rt_ab; -+#define BCM2048_MAX_RDS_PS (8 + 1) -+ u8 rds_ps[BCM2048_MAX_RDS_PS]; -+ u8 rds_ps_group; -+ u8 rds_ps_group_cnt; -+#define BCM2048_MAX_RDS_RADIO_TEXT 255 -+ u8 radio_text[BCM2048_MAX_RDS_RADIO_TEXT + 3]; -+ u8 text_len; -+}; -+ -+struct region_info { -+ u32 bottom_frequency; -+ u32 top_frequency; -+ u8 deemphasis; -+ u8 channel_spacing; -+ u8 region; -+}; -+ -+struct bcm2048_device { -+ struct i2c_client *client; -+ struct video_device *videodev; -+ struct work_struct work; -+ struct completion compl; -+ struct mutex mutex; -+ struct bcm2048_platform_data *platform_data; -+ struct rds_info rds_info; -+ struct region_info region_info; -+ u16 frequency; -+ u8 cache_fm_rds_system; -+ u8 cache_fm_ctrl; -+ u8 cache_fm_audio_ctrl0; -+ u8 cache_fm_search_ctrl0; -+ u8 power_state; -+ u8 rds_state; -+ u8 fifo_size; -+ u8 scan_state; -+ u8 mute_state; -+}; -+ -+static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */ -+module_param(radio_nr, int, 0); -+MODULE_PARM_DESC(radio_nr, -+ "Minor number for radio device (-1 ==> auto assign)"); -+ -+static struct region_info region_configs[] = { -+ /* USA */ -+ { -+ .channel_spacing = 20, -+ .bottom_frequency = 87500, -+ .top_frequency = 108000, -+ .deemphasis = 75, -+ .region = 0, -+ }, -+ /* Australia */ -+ { -+ .channel_spacing = 20, -+ .bottom_frequency = 87500, -+ .top_frequency = 108000, -+ .deemphasis = 50, -+ .region = 1, -+ }, -+ /* Europe */ -+ { -+ .channel_spacing = 10, -+ .bottom_frequency = 87500, -+ .top_frequency = 108000, -+ .deemphasis = 50, -+ .region = 2, -+ }, -+ /* Japan */ -+ { -+ .channel_spacing = 10, -+ .bottom_frequency = 76000, -+ .top_frequency = 90000, -+ .deemphasis = 50, -+ .region = 3, -+ }, -+ /* Japan wide band */ -+ { -+ .channel_spacing = 10, -+ .bottom_frequency = 76000, -+ .top_frequency = 108000, -+ .deemphasis = 50, -+ .region = 4, -+ }, -+}; -+ -+/* -+ * I2C Interface read / write -+ */ -+static int bcm2048_send_command(struct bcm2048_device *bdev, unsigned int reg, -+ unsigned int value) -+{ -+ struct i2c_client *client = bdev->client; -+ u8 data[2]; -+ -+ if (!bdev->power_state) { -+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n"); -+ return -EIO; -+ } -+ -+ data[0] = reg & 0xff; -+ data[1] = value & 0xff; -+ -+ if (i2c_master_send(client, data, 2) == 2) { -+ return 0; -+ } else { -+ dev_err(&bdev->client->dev, "BCM I2C error!\n"); -+ dev_err(&bdev->client->dev, "Is Bluetooth up and running?\n"); -+ return -EIO; -+ } -+} -+ -+static int bcm2048_recv_command(struct bcm2048_device *bdev, unsigned int reg, -+ u8 *value) -+{ -+ struct i2c_client *client = bdev->client; -+ -+ if (!bdev->power_state) { -+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n"); -+ return -EIO; -+ } -+ -+ value[0] = i2c_smbus_read_byte_data(client, reg & 0xff); -+ -+ return 0; -+} -+ -+static int bcm2048_recv_duples(struct bcm2048_device *bdev, unsigned int reg, -+ u8 *value, u8 duples) -+{ -+ struct i2c_client *client = bdev->client; -+ struct i2c_adapter *adap = client->adapter; -+ struct i2c_msg msg[2]; -+ u8 buf; -+ -+ if (!bdev->power_state) { -+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n"); -+ return -EIO; -+ } -+ -+ buf = reg & 0xff; -+ -+ msg[0].addr = client->addr; -+ msg[0].flags = client->flags & I2C_M_TEN; -+ msg[0].len = 1; -+ msg[0].buf = &buf; -+ -+ msg[1].addr = client->addr; -+ msg[1].flags = client->flags & I2C_M_TEN; -+ msg[1].flags |= I2C_M_RD; -+ msg[1].len = duples; -+ msg[1].buf = value; -+ -+ return i2c_transfer(adap, msg, 2); -+} -+ -+/* -+ * BCM2048 - I2C register programming helpers -+ */ -+static int bcm2048_set_power_state(struct bcm2048_device *bdev, u8 power) -+{ -+ int err = 0; -+ -+ mutex_lock(&bdev->mutex); -+ -+ if (power) { -+ bdev->power_state = BCM2048_POWER_ON; -+ bdev->cache_fm_rds_system |= BCM2048_FM_ON; -+ } else { -+ bdev->cache_fm_rds_system &= ~BCM2048_FM_ON; -+ } -+ -+ /* Warning! FM cannot be turned off because then -+ * the I2C communications get ruined! -+ * Comment off the "if (power)" when the chip works! -+ */ -+ if (power) -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, -+ bdev->cache_fm_rds_system); -+ msleep(BCM2048_DEFAULT_POWERING_DELAY); -+ -+ if (!power) -+ bdev->power_state = BCM2048_POWER_OFF; -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_power_state(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err && (value & BCM2048_FM_ON)) -+ return BCM2048_POWER_ON; -+ -+ return err; -+} -+ -+static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on) -+{ -+ int err; -+ u8 flags; -+ -+ bdev->cache_fm_rds_system &= ~BCM2048_RDS_ON; -+ -+ if (rds_on) { -+ bdev->cache_fm_rds_system |= BCM2048_RDS_ON; -+ bdev->rds_state = BCM2048_RDS_ON; -+ flags = BCM2048_RDS_FLAG_FIFO_WLINE; -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1, -+ flags); -+ } else { -+ flags = 0; -+ bdev->rds_state = 0; -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1, -+ flags); -+ memset(&bdev->rds_info, 0, sizeof(bdev->rds_info)); -+ } -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, -+ bdev->cache_fm_rds_system); -+ -+ return err; -+} -+ -+static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value); -+ -+ if (!err && (value & BCM2048_RDS_ON)) -+ return BCM2048_ITEM_ENABLED; -+ -+ return err; -+} -+ -+static int bcm2048_set_rds(struct bcm2048_device *bdev, u8 rds_on) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_set_rds_no_lock(bdev, rds_on); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds(struct bcm2048_device *bdev) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_get_rds_no_lock(bdev); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds_pi(struct bcm2048_device *bdev) -+{ -+ return bdev->rds_info.rds_pi; -+} -+ -+static int bcm2048_set_fm_automatic_stereo_mono(struct bcm2048_device *bdev, -+ u8 enabled) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ bdev->cache_fm_ctrl &= ~BCM2048_STEREO_MONO_AUTO_SELECT; -+ -+ if (enabled) -+ bdev->cache_fm_ctrl |= BCM2048_STEREO_MONO_AUTO_SELECT; -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL, -+ bdev->cache_fm_ctrl); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_set_fm_hi_lo_injection(struct bcm2048_device *bdev, -+ u8 hi_lo) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ bdev->cache_fm_ctrl &= ~BCM2048_HI_LO_INJECTION; -+ -+ if (hi_lo) -+ bdev->cache_fm_ctrl |= BCM2048_HI_LO_INJECTION; -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL, -+ bdev->cache_fm_ctrl); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_fm_hi_lo_injection(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CTRL, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err && (value & BCM2048_HI_LO_INJECTION)) -+ return BCM2048_ITEM_ENABLED; -+ -+ return err; -+} -+ -+static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency) -+{ -+ int err; -+ -+ if (frequency < bdev->region_info.bottom_frequency || -+ frequency > bdev->region_info.top_frequency) -+ return -EDOM; -+ -+ frequency -= BCM2048_FREQUENCY_BASE; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ0, lsb(frequency)); -+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ1, -+ msb(frequency)); -+ -+ if (!err) -+ bdev->frequency = frequency; -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 lsb, msb; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ0, &lsb); -+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ1, &msb); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (err) -+ return err; -+ -+ err = compose_u16(msb, lsb); -+ err += BCM2048_FREQUENCY_BASE; -+ -+ return err; -+} -+ -+static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev, -+ u32 frequency) -+{ -+ int err; -+ -+ if (frequency < bdev->region_info.bottom_frequency || -+ frequency > bdev->region_info.top_frequency) -+ return -EDOM; -+ -+ frequency -= BCM2048_FREQUENCY_BASE; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ0, -+ lsb(frequency)); -+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ1, -+ msb(frequency)); -+ if (!err) -+ bdev->frequency = frequency; -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 lsb, msb; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ0, &lsb); -+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ1, &msb); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (err) -+ return err; -+ -+ err = compose_u16(msb, lsb); -+ err += BCM2048_FREQUENCY_BASE; -+ -+ return err; -+} -+ -+static int bcm2048_set_fm_deemphasis(struct bcm2048_device *bdev, int d) -+{ -+ int err; -+ u8 deemphasis; -+ -+ if (d == BCM2048_DE_EMPHASIS_75us) -+ deemphasis = BCM2048_DE_EMPHASIS_SELECT; -+ else -+ deemphasis = 0; -+ -+ mutex_lock(&bdev->mutex); -+ -+ bdev->cache_fm_audio_ctrl0 &= ~BCM2048_DE_EMPHASIS_SELECT; -+ bdev->cache_fm_audio_ctrl0 |= deemphasis; -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, -+ bdev->cache_fm_audio_ctrl0); -+ -+ if (!err) -+ bdev->region_info.deemphasis = d; -+ -+ mutex_unlock(&bdev->mutex); -+ -+ return err; -+} -+ -+static int bcm2048_get_fm_deemphasis(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) { -+ if (value & BCM2048_DE_EMPHASIS_SELECT) -+ return BCM2048_DE_EMPHASIS_75us; -+ else -+ return BCM2048_DE_EMPHASIS_50us; -+ } -+ -+ return err; -+} -+ -+static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region) -+{ -+ int err; -+ u32 new_frequency = 0; -+ -+ if (region >= ARRAY_SIZE(region_configs)) -+ return -EINVAL; -+ -+ mutex_lock(&bdev->mutex); -+ memcpy(&bdev->region_info, ®ion_configs[region], -+ sizeof(struct region_info)); -+ mutex_unlock(&bdev->mutex); -+ -+ if (bdev->frequency < region_configs[region].bottom_frequency || -+ bdev->frequency > region_configs[region].top_frequency) -+ new_frequency = region_configs[region].bottom_frequency; -+ -+ if (new_frequency > 0) { -+ err = bcm2048_set_fm_frequency(bdev, new_frequency); -+ -+ if (err) -+ goto done; -+ } -+ -+ err = bcm2048_set_fm_deemphasis(bdev, -+ region_configs[region].deemphasis); -+ -+done: -+ return err; -+} -+ -+static int bcm2048_get_region(struct bcm2048_device *bdev) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ err = bdev->region_info.region; -+ mutex_unlock(&bdev->mutex); -+ -+ return err; -+} -+ -+static int bcm2048_set_mute(struct bcm2048_device *bdev, u16 mute) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE); -+ -+ if (mute) -+ bdev->cache_fm_audio_ctrl0 |= (BCM2048_RF_MUTE | -+ BCM2048_MANUAL_MUTE); -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, -+ bdev->cache_fm_audio_ctrl0); -+ -+ if (!err) -+ bdev->mute_state = mute; -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_mute(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ if (bdev->power_state) { -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, -+ &value); -+ if (!err) -+ err = value & (BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE); -+ } else { -+ err = bdev->mute_state; -+ } -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_set_audio_route(struct bcm2048_device *bdev, u8 route) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ route &= (BCM2048_AUDIO_ROUTE_DAC | BCM2048_AUDIO_ROUTE_I2S); -+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_AUDIO_ROUTE_DAC | -+ BCM2048_AUDIO_ROUTE_I2S); -+ bdev->cache_fm_audio_ctrl0 |= route; -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, -+ bdev->cache_fm_audio_ctrl0); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_audio_route(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return value & (BCM2048_AUDIO_ROUTE_DAC | -+ BCM2048_AUDIO_ROUTE_I2S); -+ -+ return err; -+} -+ -+static int bcm2048_set_dac_output(struct bcm2048_device *bdev, u8 channels) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_DAC_OUTPUT_LEFT | -+ BCM2048_DAC_OUTPUT_RIGHT); -+ bdev->cache_fm_audio_ctrl0 |= channels; -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, -+ bdev->cache_fm_audio_ctrl0); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_dac_output(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return value & (BCM2048_DAC_OUTPUT_LEFT | -+ BCM2048_DAC_OUTPUT_RIGHT); -+ -+ return err; -+} -+ -+static int bcm2048_set_fm_search_rssi_threshold(struct bcm2048_device *bdev, -+ u8 threshold) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ threshold &= BCM2048_SEARCH_RSSI_THRESHOLD; -+ bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_RSSI_THRESHOLD; -+ bdev->cache_fm_search_ctrl0 |= threshold; -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, -+ bdev->cache_fm_search_ctrl0); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_fm_search_rssi_threshold(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return value & BCM2048_SEARCH_RSSI_THRESHOLD; -+ -+ return err; -+} -+ -+static int bcm2048_set_fm_search_mode_direction(struct bcm2048_device *bdev, -+ u8 direction) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_DIRECTION; -+ -+ if (direction) -+ bdev->cache_fm_search_ctrl0 |= BCM2048_SEARCH_DIRECTION; -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, -+ bdev->cache_fm_search_ctrl0); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_fm_search_mode_direction(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err && (value & BCM2048_SEARCH_DIRECTION)) -+ return BCM2048_SEARCH_DIRECTION_UP; -+ -+ return err; -+} -+ -+static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev, -+ u8 mode) -+{ -+ int err, timeout, restart_rds = 0; -+ u8 value, flags; -+ -+ value = mode & BCM2048_FM_AUTO_SEARCH; -+ -+ flags = BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED | -+ BCM2048_FM_FLAG_SEARCH_TUNE_FAIL; -+ -+ mutex_lock(&bdev->mutex); -+ -+ /* -+ * If RDS is enabled, and frequency is changed, RDS quits working. -+ * Thus, always restart RDS if it's enabled. Moreover, RDS must -+ * not be enabled while changing the frequency because it can -+ * provide a race to the mutex from the workqueue handler if RDS -+ * IRQ occurs while waiting for frequency changed IRQ. -+ */ -+ if (bcm2048_get_rds_no_lock(bdev)) { -+ err = bcm2048_set_rds_no_lock(bdev, 0); -+ if (err) -+ goto unlock; -+ restart_rds = 1; -+ } -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, flags); -+ -+ if (err) -+ goto unlock; -+ -+ bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, value); -+ -+ if (mode != BCM2048_FM_AUTO_SEARCH_MODE) -+ timeout = BCM2048_DEFAULT_TIMEOUT; -+ else -+ timeout = BCM2048_AUTO_SEARCH_TIMEOUT; -+ -+ if (!wait_for_completion_timeout(&bdev->compl, -+ msecs_to_jiffies(timeout))) -+ dev_err(&bdev->client->dev, "IRQ timeout.\n"); -+ -+ if (value) -+ if (!bdev->scan_state) -+ err = -EIO; -+ -+unlock: -+ if (restart_rds) -+ err |= bcm2048_set_rds_no_lock(bdev, 1); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ return err; -+} -+ -+static int bcm2048_get_fm_search_tune_mode(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, -+ &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return value & BCM2048_FM_AUTO_SEARCH; -+ -+ return err; -+} -+ -+static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MASK0, lsb(mask)); -+ err |= bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MASK1, msb(mask)); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 lsb, msb; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MASK0, &lsb); -+ err |= bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MASK1, &msb); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return compose_u16(msb, lsb); -+ -+ return err; -+} -+ -+static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev, -+ u16 match) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MATCH0, lsb(match)); -+ err |= bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MATCH1, msb(match)); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 lsb, msb; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MATCH0, &lsb); -+ err |= bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_BLKB_MATCH1, &msb); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return compose_u16(msb, lsb); -+ -+ return err; -+} -+ -+static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_PI_MASK0, lsb(mask)); -+ err |= bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_PI_MASK1, msb(mask)); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 lsb, msb; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_PI_MASK0, &lsb); -+ err |= bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_PI_MASK1, &msb); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return compose_u16(msb, lsb); -+ -+ return err; -+} -+ -+static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_PI_MATCH0, lsb(match)); -+ err |= bcm2048_send_command(bdev, -+ BCM2048_I2C_RDS_PI_MATCH1, msb(match)); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 lsb, msb; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_PI_MATCH0, &lsb); -+ err |= bcm2048_recv_command(bdev, -+ BCM2048_I2C_RDS_PI_MATCH1, &msb); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return compose_u16(msb, lsb); -+ -+ return err; -+} -+ -+static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, -+ BCM2048_I2C_FM_RDS_MASK0, lsb(mask)); -+ err |= bcm2048_send_command(bdev, -+ BCM2048_I2C_FM_RDS_MASK1, msb(mask)); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value0, value1; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK0, &value0); -+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK1, &value1); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return compose_u16(value1, value0); -+ -+ return err; -+} -+ -+static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value0, value1; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &value0); -+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &value1); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return compose_u16(value1, value0); -+ -+ return err; -+} -+ -+static int bcm2048_get_region_bottom_frequency(struct bcm2048_device *bdev) -+{ -+ return bdev->region_info.bottom_frequency; -+} -+ -+static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev) -+{ -+ return bdev->region_info.top_frequency; -+} -+ -+static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ /* Perform read as the manual indicates */ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE, -+ &value); -+ value &= ~BCM2048_BEST_TUNE_MODE; -+ -+ if (mode) -+ value |= BCM2048_BEST_TUNE_MODE; -+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE, -+ value); -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_fm_best_tune_mode(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE, -+ &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err && (value & BCM2048_BEST_TUNE_MODE)) -+ return BCM2048_ITEM_ENABLED; -+ -+ return err; -+} -+ -+static int bcm2048_get_fm_carrier_error(struct bcm2048_device *bdev) -+{ -+ int err = 0; -+ s8 value; -+ -+ mutex_lock(&bdev->mutex); -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CARRIER, &value); -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return value; -+ -+ return err; -+} -+ -+static int bcm2048_get_fm_rssi(struct bcm2048_device *bdev) -+{ -+ int err; -+ s8 value; -+ -+ mutex_lock(&bdev->mutex); -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RSSI, &value); -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) -+ return value; -+ -+ return err; -+} -+ -+static int bcm2048_set_rds_wline(struct bcm2048_device *bdev, u8 wline) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_WLINE, wline); -+ -+ if (!err) -+ bdev->fifo_size = wline; -+ -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds_wline(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 value; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_WLINE, &value); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) { -+ bdev->fifo_size = value; -+ return value; -+ } -+ -+ return err; -+} -+ -+static int bcm2048_checkrev(struct bcm2048_device *bdev) -+{ -+ int err; -+ u8 version; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_REV, &version); -+ -+ mutex_unlock(&bdev->mutex); -+ -+ if (!err) { -+ dev_info(&bdev->client->dev, "BCM2048 Version 0x%x\n", -+ version); -+ return version; -+ } -+ -+ return err; -+} -+ -+static int bcm2048_get_rds_rt(struct bcm2048_device *bdev, char *data) -+{ -+ int err = 0, i, j = 0, ce = 0, cr = 0; -+ char data_buffer[BCM2048_MAX_RDS_RT+1]; -+ -+ mutex_lock(&bdev->mutex); -+ -+ if (!bdev->rds_info.text_len) { -+ err = -EINVAL; -+ goto unlock; -+ } -+ -+ for (i = 0; i < BCM2048_MAX_RDS_RT; i++) { -+ if (bdev->rds_info.rds_rt[i]) { -+ ce = i; -+ /* Skip the carriage return */ -+ if (bdev->rds_info.rds_rt[i] != 0x0d) { -+ data_buffer[j++] = bdev->rds_info.rds_rt[i]; -+ } else { -+ cr = i; -+ break; -+ } -+ } -+ } -+ -+ if (j <= BCM2048_MAX_RDS_RT) -+ data_buffer[j] = 0; -+ -+ for (i = 0; i < BCM2048_MAX_RDS_RT; i++) { -+ if (!bdev->rds_info.rds_rt[i]) { -+ if (cr && (i < cr)) { -+ err = -EBUSY; -+ goto unlock; -+ } -+ if (i < ce) { -+ if (cr && (i >= cr)) -+ break; -+ err = -EBUSY; -+ goto unlock; -+ } -+ } -+ } -+ -+ memcpy(data, data_buffer, sizeof(data_buffer)); -+ -+unlock: -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static int bcm2048_get_rds_ps(struct bcm2048_device *bdev, char *data) -+{ -+ int err = 0, i, j = 0; -+ char data_buffer[BCM2048_MAX_RDS_PS+1]; -+ -+ mutex_lock(&bdev->mutex); -+ -+ if (!bdev->rds_info.text_len) { -+ err = -EINVAL; -+ goto unlock; -+ } -+ -+ for (i = 0; i < BCM2048_MAX_RDS_PS; i++) { -+ if (bdev->rds_info.rds_ps[i]) { -+ data_buffer[j++] = bdev->rds_info.rds_ps[i]; -+ } else { -+ if (i < (BCM2048_MAX_RDS_PS - 1)) { -+ err = -EBUSY; -+ goto unlock; -+ } -+ } -+ } -+ -+ if (j <= BCM2048_MAX_RDS_PS) -+ data_buffer[j] = 0; -+ -+ memcpy(data, data_buffer, sizeof(data_buffer)); -+ -+unlock: -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+static void bcm2048_parse_rds_pi(struct bcm2048_device *bdev) -+{ -+ int i, cnt = 0; -+ u16 pi; -+ -+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) { -+ -+ /* Block A match, only data without crc errors taken */ -+ if (bdev->rds_info.radio_text[i] == BCM2048_RDS_BLOCK_A) { -+ -+ pi = ((bdev->rds_info.radio_text[i+1] << 8) + -+ bdev->rds_info.radio_text[i+2]); -+ -+ if (!bdev->rds_info.rds_pi) { -+ bdev->rds_info.rds_pi = pi; -+ return; -+ } -+ if (pi != bdev->rds_info.rds_pi) { -+ cnt++; -+ if (cnt > 3) { -+ bdev->rds_info.rds_pi = pi; -+ cnt = 0; -+ } -+ } else { -+ cnt = 0; -+ } -+ } -+ } -+} -+ -+static int bcm2048_rds_block_crc(struct bcm2048_device *bdev, int i) -+{ -+ return bdev->rds_info.radio_text[i] & BCM2048_RDS_CRC_MASK; -+} -+ -+static void bcm2048_parse_rds_rt_block(struct bcm2048_device *bdev, int i, -+ int index, int crc) -+{ -+ /* Good data will overwrite poor data */ -+ if (crc) { -+ if (!bdev->rds_info.rds_rt[index]) -+ bdev->rds_info.rds_rt[index] = -+ bdev->rds_info.radio_text[i+1]; -+ if (!bdev->rds_info.rds_rt[index+1]) -+ bdev->rds_info.rds_rt[index+1] = -+ bdev->rds_info.radio_text[i+2]; -+ } else { -+ bdev->rds_info.rds_rt[index] = bdev->rds_info.radio_text[i+1]; -+ bdev->rds_info.rds_rt[index+1] = -+ bdev->rds_info.radio_text[i+2]; -+ } -+} -+ -+static int bcm2048_parse_rt_match_b(struct bcm2048_device *bdev, int i) -+{ -+ int crc, rt_id, rt_group_b, rt_ab, index = 0; -+ -+ crc = bcm2048_rds_block_crc(bdev, i); -+ -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ return -EIO; -+ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == -+ BCM2048_RDS_BLOCK_B) { -+ -+ rt_id = (bdev->rds_info.radio_text[i+1] & -+ BCM2048_RDS_BLOCK_MASK); -+ rt_group_b = bdev->rds_info.radio_text[i+1] & -+ BCM2048_RDS_GROUP_AB_MASK; -+ rt_ab = bdev->rds_info.radio_text[i+2] & -+ BCM2048_RDS_RT_AB_MASK; -+ -+ if (rt_group_b != bdev->rds_info.rds_rt_group_b) { -+ memset(bdev->rds_info.rds_rt, 0, -+ sizeof(bdev->rds_info.rds_rt)); -+ bdev->rds_info.rds_rt_group_b = rt_group_b; -+ } -+ -+ if (rt_id == BCM2048_RDS_RT) { -+ /* A to B or (vice versa), means: clear screen */ -+ if (rt_ab != bdev->rds_info.rds_rt_ab) { -+ memset(bdev->rds_info.rds_rt, 0, -+ sizeof(bdev->rds_info.rds_rt)); -+ bdev->rds_info.rds_rt_ab = rt_ab; -+ } -+ -+ index = bdev->rds_info.radio_text[i+2] & -+ BCM2048_RDS_RT_INDEX; -+ -+ if (bdev->rds_info.rds_rt_group_b) -+ index <<= 1; -+ else -+ index <<= 2; -+ -+ return index; -+ } -+ } -+ -+ return -EIO; -+} -+ -+static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i, -+ int index) -+{ -+ int crc; -+ -+ crc = bcm2048_rds_block_crc(bdev, i); -+ -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ return 0; -+ -+ BUG_ON((index+2) >= BCM2048_MAX_RDS_RT); -+ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == -+ BCM2048_RDS_BLOCK_C) { -+ if (bdev->rds_info.rds_rt_group_b) -+ return 1; -+ bcm2048_parse_rds_rt_block(bdev, i, index, crc); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i, -+ int index) -+{ -+ int crc; -+ -+ crc = bcm2048_rds_block_crc(bdev, i); -+ -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ return; -+ -+ BUG_ON((index+4) >= BCM2048_MAX_RDS_RT); -+ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == -+ BCM2048_RDS_BLOCK_D) -+ bcm2048_parse_rds_rt_block(bdev, i, index+2, crc); -+} -+ -+static int bcm2048_parse_rds_rt(struct bcm2048_device *bdev) -+{ -+ int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0; -+ -+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) { -+ -+ if (match_b) { -+ match_b = 0; -+ index = bcm2048_parse_rt_match_b(bdev, i); -+ if (index >= 0 && index <= (BCM2048_MAX_RDS_RT - 5)) -+ match_c = 1; -+ continue; -+ } else if (match_c) { -+ match_c = 0; -+ if (bcm2048_parse_rt_match_c(bdev, i, index)) -+ match_d = 1; -+ continue; -+ } else if (match_d) { -+ match_d = 0; -+ bcm2048_parse_rt_match_d(bdev, i, index); -+ continue; -+ } -+ -+ /* Skip erroneous blocks due to messed up A block altogether */ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) -+ == BCM2048_RDS_BLOCK_A) { -+ crc = bcm2048_rds_block_crc(bdev, i); -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ continue; -+ /* Syncronize to a good RDS PI */ -+ if (((bdev->rds_info.radio_text[i+1] << 8) + -+ bdev->rds_info.radio_text[i+2]) == -+ bdev->rds_info.rds_pi) -+ match_b = 1; -+ } -+ } -+ -+ return 0; -+} -+ -+static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i, -+ int index, int crc) -+{ -+ /* Good data will overwrite poor data */ -+ if (crc) { -+ if (!bdev->rds_info.rds_ps[index]) -+ bdev->rds_info.rds_ps[index] = -+ bdev->rds_info.radio_text[i+1]; -+ if (!bdev->rds_info.rds_ps[index+1]) -+ bdev->rds_info.rds_ps[index+1] = -+ bdev->rds_info.radio_text[i+2]; -+ } else { -+ bdev->rds_info.rds_ps[index] = bdev->rds_info.radio_text[i+1]; -+ bdev->rds_info.rds_ps[index+1] = -+ bdev->rds_info.radio_text[i+2]; -+ } -+} -+ -+static int bcm2048_parse_ps_match_c(struct bcm2048_device *bdev, int i, -+ int index) -+{ -+ int crc; -+ -+ crc = bcm2048_rds_block_crc(bdev, i); -+ -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ return 0; -+ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == -+ BCM2048_RDS_BLOCK_C) -+ return 1; -+ -+ return 0; -+} -+ -+static void bcm2048_parse_ps_match_d(struct bcm2048_device *bdev, int i, -+ int index) -+{ -+ int crc; -+ -+ crc = bcm2048_rds_block_crc(bdev, i); -+ -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ return; -+ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == -+ BCM2048_RDS_BLOCK_D) -+ bcm2048_parse_rds_ps_block(bdev, i, index, crc); -+} -+ -+static int bcm2048_parse_ps_match_b(struct bcm2048_device *bdev, int i) -+{ -+ int crc, index, ps_id, ps_group; -+ -+ crc = bcm2048_rds_block_crc(bdev, i); -+ -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ return -EIO; -+ -+ /* Block B Radio PS match */ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == -+ BCM2048_RDS_BLOCK_B) { -+ ps_id = bdev->rds_info.radio_text[i+1] & -+ BCM2048_RDS_BLOCK_MASK; -+ ps_group = bdev->rds_info.radio_text[i+1] & -+ BCM2048_RDS_GROUP_AB_MASK; -+ -+ /* Poor RSSI will lead to RDS data corruption -+ * So using 3 (same) sequential values to justify major changes -+ */ -+ if (ps_group != bdev->rds_info.rds_ps_group) { -+ if (crc == BCM2048_RDS_CRC_NONE) { -+ bdev->rds_info.rds_ps_group_cnt++; -+ if (bdev->rds_info.rds_ps_group_cnt > 2) { -+ bdev->rds_info.rds_ps_group = ps_group; -+ bdev->rds_info.rds_ps_group_cnt = 0; -+ dev_err(&bdev->client->dev, -+ "RDS PS Group change!\n"); -+ } else { -+ return -EIO; -+ } -+ } else { -+ bdev->rds_info.rds_ps_group_cnt = 0; -+ } -+ } -+ -+ if (ps_id == BCM2048_RDS_PS) { -+ index = bdev->rds_info.radio_text[i+2] & -+ BCM2048_RDS_PS_INDEX; -+ index <<= 1; -+ return index; -+ } -+ } -+ -+ return -EIO; -+} -+ -+static void bcm2048_parse_rds_ps(struct bcm2048_device *bdev) -+{ -+ int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0; -+ -+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) { -+ -+ if (match_b) { -+ match_b = 0; -+ index = bcm2048_parse_ps_match_b(bdev, i); -+ if (index >= 0 && index < (BCM2048_MAX_RDS_PS - 1)) -+ match_c = 1; -+ continue; -+ } else if (match_c) { -+ match_c = 0; -+ if (bcm2048_parse_ps_match_c(bdev, i, index)) -+ match_d = 1; -+ continue; -+ } else if (match_d) { -+ match_d = 0; -+ bcm2048_parse_ps_match_d(bdev, i, index); -+ continue; -+ } -+ -+ /* Skip erroneous blocks due to messed up A block altogether */ -+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) -+ == BCM2048_RDS_BLOCK_A) { -+ crc = bcm2048_rds_block_crc(bdev, i); -+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE) -+ continue; -+ /* Syncronize to a good RDS PI */ -+ if (((bdev->rds_info.radio_text[i+1] << 8) + -+ bdev->rds_info.radio_text[i+2]) == -+ bdev->rds_info.rds_pi) -+ match_b = 1; -+ } -+ } -+} -+ -+static void bcm2048_rds_fifo_receive(struct bcm2048_device *bdev) -+{ -+ int err; -+ -+ mutex_lock(&bdev->mutex); -+ -+ err = bcm2048_recv_duples(bdev, BCM2048_I2C_RDS_DATA, -+ bdev->rds_info.radio_text, bdev->fifo_size); -+ if (err != 2) { -+ dev_err(&bdev->client->dev, "RDS Read problem\n"); -+ return; -+ } -+ -+ bdev->rds_info.text_len = bdev->fifo_size; -+ -+ bcm2048_parse_rds_pi(bdev); -+ bcm2048_parse_rds_rt(bdev); -+ bcm2048_parse_rds_ps(bdev); -+ -+ mutex_unlock(&bdev->mutex); -+} -+ -+static int bcm2048_get_rds_data(struct bcm2048_device *bdev, char *data) -+{ -+ int err = 0, i, p = 0; -+ char data_buffer[BCM2048_MAX_RDS_RADIO_TEXT*5]; -+ -+ mutex_lock(&bdev->mutex); -+ -+ if (!bdev->rds_info.text_len) { -+ err = -EINVAL; -+ goto unlock; -+ } -+ -+ memset(data_buffer, 0, sizeof(data_buffer)); -+ -+ for (i = 0; i < bdev->rds_info.text_len; i++) { -+ p += sprintf(data_buffer+p, "%x ", -+ bdev->rds_info.radio_text[i]); -+ } -+ -+ memcpy(data, data_buffer, p); -+ -+unlock: -+ mutex_unlock(&bdev->mutex); -+ return err; -+} -+ -+/* -+ * BCM2048 default initialization sequence -+ */ -+static int bcm2048_init(struct bcm2048_device *bdev) -+{ -+ int err; -+ -+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON); -+ if (err < 0) -+ goto exit; -+ -+ err = bcm2048_set_audio_route(bdev, BCM2048_AUDIO_ROUTE_DAC); -+ if (err < 0) -+ goto exit; -+ -+ err = bcm2048_set_dac_output(bdev, BCM2048_DAC_OUTPUT_LEFT | -+ BCM2048_DAC_OUTPUT_RIGHT); -+ -+exit: -+ return err; -+} -+ -+/* -+ * BCM2048 default deinitialization sequence -+ */ -+static int bcm2048_deinit(struct bcm2048_device *bdev) -+{ -+ int err; -+ -+ err = bcm2048_set_audio_route(bdev, 0); -+ if (err < 0) -+ goto exit; -+ -+ err = bcm2048_set_dac_output(bdev, 0); -+ if (err < 0) -+ goto exit; -+ -+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF); -+ if (err < 0) -+ goto exit; -+ -+exit: -+ return err; -+} -+ -+/* -+ * BCM2048 probe sequence -+ */ -+static int bcm2048_probe(struct bcm2048_device *bdev) -+{ -+ int err; -+ -+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON); -+ if (err < 0) -+ goto unlock; -+ -+ err = bcm2048_checkrev(bdev); -+ if (err < 0) -+ goto unlock; -+ -+ err = bcm2048_set_mute(bdev, BCM2048_DEFAULT_MUTE); -+ if (err < 0) -+ goto unlock; -+ -+ err = bcm2048_set_region(bdev, BCM2048_DEFAULT_REGION); -+ if (err < 0) -+ goto unlock; -+ -+ err = bcm2048_set_fm_search_rssi_threshold(bdev, -+ BCM2048_DEFAULT_RSSI_THRESHOLD); -+ if (err < 0) -+ goto unlock; -+ -+ err = bcm2048_set_fm_automatic_stereo_mono(bdev, BCM2048_ITEM_ENABLED); -+ if (err < 0) -+ goto unlock; -+ -+ err = bcm2048_get_rds_wline(bdev); -+ if (err < BCM2048_DEFAULT_RDS_WLINE) -+ err = bcm2048_set_rds_wline(bdev, BCM2048_DEFAULT_RDS_WLINE); -+ if (err < 0) -+ goto unlock; -+ -+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF); -+ -+unlock: -+ return err; -+} -+ -+/* -+ * BCM2048 workqueue handler -+ */ -+static void bcm2048_work(struct work_struct *work) -+{ -+ struct bcm2048_device *bdev; -+ u8 flag_lsb, flag_msb, flags; -+ -+ bdev = container_of(work, struct bcm2048_device, work); -+ bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb); -+ bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &flag_msb); -+ -+ if (flag_lsb & (BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED | -+ BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)) { -+ -+ if (flag_lsb & BCM2048_FM_FLAG_SEARCH_TUNE_FAIL) -+ bdev->scan_state = BCM2048_SCAN_FAIL; -+ else -+ bdev->scan_state = BCM2048_SCAN_OK; -+ -+ complete(&bdev->compl); -+ } -+ -+ if (flag_msb & BCM2048_RDS_FLAG_FIFO_WLINE) { -+ bcm2048_rds_fifo_receive(bdev); -+ if (bdev->rds_state) { -+ flags = BCM2048_RDS_FLAG_FIFO_WLINE; -+ bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1, -+ flags); -+ } -+ -+ } -+} -+ -+/* -+ * BCM2048 interrupt handler -+ */ -+static irqreturn_t bcm2048_handler(int irq, void *dev) -+{ -+ struct bcm2048_device *bdev = dev; -+ -+ dev_dbg(&bdev->client->dev, "IRQ called, queuing work\n"); -+ if (bdev->power_state) -+ schedule_work(&bdev->work); -+ -+ return IRQ_HANDLED; -+} -+ -+/* -+ * BCM2048 sysfs interface definitions -+ */ -+#define property_write(prop, type, mask, check) \ -+static ssize_t bcm2048_##prop##_write(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, \ -+ size_t count) \ -+{ \ -+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \ -+ type value; \ -+ int err; \ -+ \ -+ if (!bdev) \ -+ return -ENODEV; \ -+ \ -+ sscanf(buf, mask, &value); \ -+ \ -+ if (check) \ -+ return -EDOM; \ -+ \ -+ err = bcm2048_set_##prop(bdev, value); \ -+ \ -+ return err < 0 ? err : count; \ -+} -+ -+#define property_read(prop, size, mask) \ -+static ssize_t bcm2048_##prop##_read(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+{ \ -+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \ -+ size value; \ -+ \ -+ if (!bdev) \ -+ return -ENODEV; \ -+ \ -+ value = bcm2048_get_##prop(bdev); \ -+ \ -+ if (value >= 0) \ -+ value = sprintf(buf, mask "\n", value); \ -+ \ -+ return value; \ -+} -+ -+#define property_signed_read(prop, size, mask) \ -+static ssize_t bcm2048_##prop##_read(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+{ \ -+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \ -+ size value; \ -+ \ -+ if (!bdev) \ -+ return -ENODEV; \ -+ \ -+ value = bcm2048_get_##prop(bdev); \ -+ \ -+ value = sprintf(buf, mask "\n", value); \ -+ \ -+ return value; \ -+} -+ -+#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \ -+property_write(prop, signal size, mask, check) \ -+property_read(prop, size, mask) -+ -+#define property_str_read(prop, size) \ -+static ssize_t bcm2048_##prop##_read(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+{ \ -+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \ -+ int count; \ -+ u8 *out; \ -+ \ -+ if (!bdev) \ -+ return -ENODEV; \ -+ \ -+ out = kzalloc(size + 1, GFP_KERNEL); \ -+ if (!out) \ -+ return -ENOMEM; \ -+ \ -+ bcm2048_get_##prop(bdev, out); \ -+ count = sprintf(buf, "%s\n", out); \ -+ \ -+ kfree(out); \ -+ \ -+ return count; \ -+} -+ -+DEFINE_SYSFS_PROPERTY(power_state, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(mute, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(audio_route, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(dac_output, unsigned, int, "%u", 0) -+ -+DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned, int, "%u", value > 3) -+ -+DEFINE_SYSFS_PROPERTY(rds, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned, int, "%u", 0) -+DEFINE_SYSFS_PROPERTY(rds_wline, unsigned, int, "%u", 0) -+property_read(rds_pi, unsigned int, "%x") -+property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1)) -+property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1)) -+ -+property_read(fm_rds_flags, unsigned int, "%u") -+property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT*5) -+ -+property_read(region_bottom_frequency, unsigned int, "%u") -+property_read(region_top_frequency, unsigned int, "%u") -+property_signed_read(fm_carrier_error, int, "%d") -+property_signed_read(fm_rssi, int, "%d") -+DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0) -+ -+static struct device_attribute attrs[] = { -+ __ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read, -+ bcm2048_power_state_write), -+ __ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read, -+ bcm2048_mute_write), -+ __ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read, -+ bcm2048_audio_route_write), -+ __ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read, -+ bcm2048_dac_output_write), -+ __ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR, -+ bcm2048_fm_hi_lo_injection_read, -+ bcm2048_fm_hi_lo_injection_write), -+ __ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read, -+ bcm2048_fm_frequency_write), -+ __ATTR(fm_af_frequency, S_IRUGO | S_IWUSR, -+ bcm2048_fm_af_frequency_read, -+ bcm2048_fm_af_frequency_write), -+ __ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read, -+ bcm2048_fm_deemphasis_write), -+ __ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read, -+ bcm2048_fm_rds_mask_write), -+ __ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR, -+ bcm2048_fm_best_tune_mode_read, -+ bcm2048_fm_best_tune_mode_write), -+ __ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR, -+ bcm2048_fm_search_rssi_threshold_read, -+ bcm2048_fm_search_rssi_threshold_write), -+ __ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR, -+ bcm2048_fm_search_mode_direction_read, -+ bcm2048_fm_search_mode_direction_write), -+ __ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR, -+ bcm2048_fm_search_tune_mode_read, -+ bcm2048_fm_search_tune_mode_write), -+ __ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read, -+ bcm2048_rds_write), -+ __ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR, -+ bcm2048_rds_b_block_mask_read, -+ bcm2048_rds_b_block_mask_write), -+ __ATTR(rds_b_block_match, S_IRUGO | S_IWUSR, -+ bcm2048_rds_b_block_match_read, -+ bcm2048_rds_b_block_match_write), -+ __ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read, -+ bcm2048_rds_pi_mask_write), -+ __ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read, -+ bcm2048_rds_pi_match_write), -+ __ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read, -+ bcm2048_rds_wline_write), -+ __ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL), -+ __ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL), -+ __ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL), -+ __ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL), -+ __ATTR(region_bottom_frequency, S_IRUGO, -+ bcm2048_region_bottom_frequency_read, NULL), -+ __ATTR(region_top_frequency, S_IRUGO, -+ bcm2048_region_top_frequency_read, NULL), -+ __ATTR(fm_carrier_error, S_IRUGO, -+ bcm2048_fm_carrier_error_read, NULL), -+ __ATTR(fm_rssi, S_IRUGO, -+ bcm2048_fm_rssi_read, NULL), -+ __ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read, -+ bcm2048_region_write), -+ __ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL), -+}; -+ -+static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev, -+ int size) -+{ -+ int i; -+ -+ for (i = 0; i < size; i++) -+ device_remove_file(&bdev->client->dev, &attrs[i]); -+ -+ return 0; -+} -+ -+static int bcm2048_sysfs_register_properties(struct bcm2048_device *bdev) -+{ -+ int err = 0; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(attrs); i++) { -+ if (device_create_file(&bdev->client->dev, &attrs[i]) != 0) { -+ dev_err(&bdev->client->dev, -+ "could not register sysfs entry\n"); -+ err = -EBUSY; -+ bcm2048_sysfs_unregister_properties(bdev, i); -+ break; -+ } -+ } -+ -+ return err; -+} -+ -+/* -+ * bcm2048_fops - file operations interface -+ */ -+static const struct file_operations bcm2048_fops = { -+ .owner = THIS_MODULE, -+ .llseek = no_llseek, -+ .ioctl = video_ioctl2, -+ .compat_ioctl = v4l_compat_ioctl32, -+}; -+ -+/* -+ * Video4Linux Interface -+ */ -+static struct v4l2_queryctrl bcm2048_v4l2_queryctrl[] = { -+ { -+ .id = V4L2_CID_AUDIO_VOLUME, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_BALANCE, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_BASS, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_TREBLE, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_MUTE, -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .name = "Mute", -+ .minimum = 0, -+ .maximum = 1, -+ .step = 1, -+ .default_value = 1, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_LOUDNESS, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+}; -+ -+static int bcm2048_vidioc_querycap(struct file *file, void *priv, -+ struct v4l2_capability *capability) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ -+ strlcpy(capability->driver, BCM2048_DRIVER_NAME, -+ sizeof(capability->driver)); -+ strlcpy(capability->card, BCM2048_DRIVER_CARD, -+ sizeof(capability->card)); -+ snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr); -+ capability->version = BCM2048_DRIVER_VERSION; -+ capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO | -+ V4L2_CAP_HW_FREQ_SEEK; -+ -+ return 0; -+} -+ -+static int bcm2048_vidioc_g_input(struct file *filp, void *priv, -+ unsigned int *i) -+{ -+ *i = 0; -+ -+ return 0; -+} -+ -+static int bcm2048_vidioc_s_input(struct file *filp, void *priv, -+ unsigned int i) -+{ -+ if (i) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int bcm2048_vidioc_queryctrl(struct file *file, void *priv, -+ struct v4l2_queryctrl *qc) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(bcm2048_v4l2_queryctrl); i++) { -+ if (qc->id && qc->id == bcm2048_v4l2_queryctrl[i].id) { -+ memcpy(qc, &(bcm2048_v4l2_queryctrl[i]), sizeof(*qc)); -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+static int bcm2048_vidioc_g_ctrl(struct file *file, void *priv, -+ struct v4l2_control *ctrl) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ int err = 0; -+ -+ if (!bdev) -+ return -ENODEV; -+ -+ switch (ctrl->id) { -+ case V4L2_CID_AUDIO_MUTE: -+ err = bcm2048_get_mute(bdev); -+ if (err >= 0) -+ ctrl->value = err; -+ break; -+ } -+ -+ return err; -+} -+ -+static int bcm2048_vidioc_s_ctrl(struct file *file, void *priv, -+ struct v4l2_control *ctrl) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ int err = 0; -+ -+ if (!bdev) -+ return -ENODEV; -+ -+ switch (ctrl->id) { -+ case V4L2_CID_AUDIO_MUTE: -+ if (ctrl->value) { -+ if (bdev->power_state) { -+ err = bcm2048_set_mute(bdev, ctrl->value); -+ err |= bcm2048_deinit(bdev); -+ } -+ } else { -+ if (!bdev->power_state) { -+ err = bcm2048_init(bdev); -+ err |= bcm2048_set_mute(bdev, ctrl->value); -+ } -+ } -+ break; -+ } -+ -+ return err; -+} -+ -+static int bcm2048_vidioc_g_audio(struct file *file, void *priv, -+ struct v4l2_audio *audio) -+{ -+ if (audio->index > 1) -+ return -EINVAL; -+ -+ strncpy(audio->name, "Radio", 32); -+ audio->capability = V4L2_AUDCAP_STEREO; -+ -+ return 0; -+} -+ -+static int bcm2048_vidioc_s_audio(struct file *file, void *priv, -+ struct v4l2_audio *audio) -+{ -+ if (audio->index != 0) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int bcm2048_vidioc_g_tuner(struct file *file, void *priv, -+ struct v4l2_tuner *tuner) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ s8 f_error; -+ s8 rssi; -+ -+ if (!bdev) -+ return -ENODEV; -+ -+ if (tuner->index > 0) -+ return -EINVAL; -+ -+ strncpy(tuner->name, "FM Receiver", 32); -+ tuner->type = V4L2_TUNER_RADIO; -+ tuner->rangelow = -+ dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev)); -+ tuner->rangehigh = -+ dev_to_v4l2(bcm2048_get_region_top_frequency(bdev)); -+ tuner->rxsubchans = V4L2_TUNER_SUB_STEREO; -+ tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW; -+ tuner->audmode = V4L2_TUNER_MODE_STEREO; -+ tuner->afc = 0; -+ if (bdev->power_state) { -+ /* Report frequencies with high carrier errors as zero -+ * signal level -+ */ -+ f_error = bcm2048_get_fm_carrier_error(bdev); -+ if (f_error < BCM2048_FREQ_ERROR_FLOOR || -+ f_error > BCM2048_FREQ_ERROR_ROOF) { -+ tuner->signal = 0; -+ } else { -+ /* RSSI level -60 dB is defined to report full -+ * signal strenght -+ */ -+ rssi = bcm2048_get_fm_rssi(bdev); -+ if (rssi >= BCM2048_RSSI_LEVEL_BASE) { -+ tuner->signal = 0xFFFF; -+ } else if (rssi > BCM2048_RSSI_LEVEL_ROOF) { -+ tuner->signal = (rssi + -+ BCM2048_RSSI_LEVEL_ROOF_NEG) -+ * BCM2048_SIGNAL_MULTIPLIER; -+ } else { -+ tuner->signal = 0; -+ } -+ } -+ } else { -+ tuner->signal = 0; -+ } -+ -+ return 0; -+} -+ -+static int bcm2048_vidioc_s_tuner(struct file *file, void *priv, -+ struct v4l2_tuner *tuner) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ -+ if (!bdev) -+ return -ENODEV; -+ -+ if (tuner->index > 0) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int bcm2048_vidioc_g_frequency(struct file *file, void *priv, -+ struct v4l2_frequency *freq) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ int err = 0; -+ int f; -+ -+ if (!bdev->power_state) -+ return -ENODEV; -+ -+ freq->type = V4L2_TUNER_RADIO; -+ f = bcm2048_get_fm_frequency(bdev); -+ -+ if (f < 0) -+ err = f; -+ else -+ freq->frequency = dev_to_v4l2(f); -+ -+ return err; -+} -+ -+static int bcm2048_vidioc_s_frequency(struct file *file, void *priv, -+ struct v4l2_frequency *freq) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ int err; -+ -+ if (freq->type != V4L2_TUNER_RADIO) -+ return -EINVAL; -+ -+ if (!bdev->power_state) -+ return -ENODEV; -+ -+ err = bcm2048_set_fm_frequency(bdev, v4l2_to_dev(freq->frequency)); -+ err |= bcm2048_set_fm_search_tune_mode(bdev, BCM2048_FM_PRE_SET_MODE); -+ -+ return err; -+} -+ -+static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv, -+ struct v4l2_hw_freq_seek *seek) -+{ -+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); -+ int err; -+ -+ if (!bdev->power_state) -+ return -ENODEV; -+ -+ if ((seek->tuner != 0) || (seek->type != V4L2_TUNER_RADIO)) -+ return -EINVAL; -+ -+ err = bcm2048_set_fm_search_mode_direction(bdev, seek->seek_upward); -+ err |= bcm2048_set_fm_search_tune_mode(bdev, -+ BCM2048_FM_AUTO_SEARCH_MODE); -+ -+ return err; -+} -+ -+static struct v4l2_ioctl_ops bcm2048_ioctl_ops = { -+ .vidioc_querycap = bcm2048_vidioc_querycap, -+ .vidioc_g_input = bcm2048_vidioc_g_input, -+ .vidioc_s_input = bcm2048_vidioc_s_input, -+ .vidioc_queryctrl = bcm2048_vidioc_queryctrl, -+ .vidioc_g_ctrl = bcm2048_vidioc_g_ctrl, -+ .vidioc_s_ctrl = bcm2048_vidioc_s_ctrl, -+ .vidioc_g_audio = bcm2048_vidioc_g_audio, -+ .vidioc_s_audio = bcm2048_vidioc_s_audio, -+ .vidioc_g_tuner = bcm2048_vidioc_g_tuner, -+ .vidioc_s_tuner = bcm2048_vidioc_s_tuner, -+ .vidioc_g_frequency = bcm2048_vidioc_g_frequency, -+ .vidioc_s_frequency = bcm2048_vidioc_s_frequency, -+ .vidioc_s_hw_freq_seek = bcm2048_vidioc_s_hw_freq_seek, -+}; -+ -+/* -+ * bcm2048_viddev_template - video device interface -+ */ -+static struct video_device bcm2048_viddev_template = { -+ .fops = &bcm2048_fops, -+ .name = BCM2048_DRIVER_NAME, -+ .vfl_type = VID_TYPE_TUNER, -+ .release = video_device_release, -+ .ioctl_ops = &bcm2048_ioctl_ops, -+}; -+ -+/* -+ * I2C driver interface -+ */ -+static int bcm2048_i2c_driver_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct bcm2048_device *bdev; -+ int err, skip_release = 0; -+ -+ bdev = kzalloc(sizeof *bdev, GFP_KERNEL); -+ if (!bdev) { -+ dev_dbg(&client->dev, "Failed to alloc video device.\n"); -+ err = -ENOMEM; -+ goto exit; -+ } -+ -+ bdev->videodev = video_device_alloc(); -+ if (!bdev->videodev) { -+ dev_dbg(&client->dev, "Failed to alloc video device.\n"); -+ err = -ENOMEM; -+ goto free_bdev; -+ } -+ -+ bdev->client = client; -+ i2c_set_clientdata(client, bdev); -+ mutex_init(&bdev->mutex); -+ init_completion(&bdev->compl); -+ INIT_WORK(&bdev->work, bcm2048_work); -+ -+ if (client->irq) { -+ err = request_irq(client->irq, -+ bcm2048_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED, -+ client->name, bdev); -+ if (err < 0) { -+ dev_err(&client->dev, "Could not request IRQ\n"); -+ goto free_vdev; -+ } -+ dev_dbg(&client->dev, "IRQ requested.\n"); -+ } else { -+ dev_dbg(&client->dev, "IRQ not configure. Using timeouts.\n"); -+ } -+ -+ memcpy(bdev->videodev, &bcm2048_viddev_template, -+ sizeof(bcm2048_viddev_template)); -+ video_set_drvdata(bdev->videodev, bdev); -+ if (video_register_device(bdev->videodev, VFL_TYPE_RADIO, radio_nr)) { -+ dev_dbg(&client->dev, "Could not register video device.\n"); -+ err = -EIO; -+ goto free_irq; -+ } -+ -+ err = bcm2048_sysfs_register_properties(bdev); -+ if (err < 0) { -+ dev_dbg(&client->dev, "Could not register sysfs interface.\n"); -+ goto free_registration; -+ } -+ -+ err = bcm2048_probe(bdev); -+ if (err < 0) { -+ dev_dbg(&client->dev, "Failed to probe device information.\n"); -+ goto free_sysfs; -+ } -+ -+ return 0; -+ -+free_sysfs: -+ bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs)); -+free_registration: -+ video_unregister_device(bdev->videodev); -+ /* video_unregister_device frees bdev->videodev */ -+ bdev->videodev = NULL; -+ skip_release = 1; -+free_irq: -+ if (client->irq) -+ free_irq(client->irq, bdev); -+free_vdev: -+ if (!skip_release) -+ video_device_release(bdev->videodev); -+ i2c_set_clientdata(client, NULL); -+free_bdev: -+ kfree(bdev); -+exit: -+ return err; -+} -+ -+static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client) -+{ -+ struct bcm2048_device *bdev = i2c_get_clientdata(client); -+ struct video_device *vd; -+ -+ if (!client->adapter) -+ return -ENODEV; -+ -+ if (bdev) { -+ vd = bdev->videodev; -+ -+ bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs)); -+ -+ if (vd) -+ video_unregister_device(vd); -+ -+ if (bdev->power_state) -+ bcm2048_set_power_state(bdev, BCM2048_POWER_OFF); -+ -+ if (client->irq > 0) -+ free_irq(client->irq, bdev); -+ -+ cancel_work_sync(&bdev->work); -+ -+ kfree(bdev); -+ } -+ -+ i2c_set_clientdata(client, NULL); -+ -+ return 0; -+} -+ -+/* -+ * bcm2048_i2c_driver - i2c driver interface -+ */ -+static const struct i2c_device_id bcm2048_id[] = { -+ { "bcm2048" , 0 }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(i2c, bcm2048_id); -+ -+static struct i2c_driver bcm2048_i2c_driver = { -+ .driver = { -+ .name = BCM2048_DRIVER_NAME, -+ }, -+ .probe = bcm2048_i2c_driver_probe, -+ .remove = __exit_p(bcm2048_i2c_driver_remove), -+ .id_table = bcm2048_id, -+}; -+ -+/* -+ * Module Interface -+ */ -+static int __init bcm2048_module_init(void) -+{ -+ printk(KERN_INFO BCM2048_DRIVER_DESC "\n"); -+ -+ return i2c_add_driver(&bcm2048_i2c_driver); -+} -+module_init(bcm2048_module_init); -+ -+static void __exit bcm2048_module_exit(void) -+{ -+ i2c_del_driver(&bcm2048_i2c_driver); -+} -+module_exit(bcm2048_module_exit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR(BCM2048_DRIVER_AUTHOR); -+MODULE_DESCRIPTION(BCM2048_DRIVER_DESC); -+MODULE_VERSION("0.0.1"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/radio/radio-si4713.c linux-omap-2.6.28-nokia1/drivers/media/radio/radio-si4713.c ---- linux-omap-2.6.28-omap1/drivers/media/radio/radio-si4713.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/radio/radio-si4713.c 2011-06-22 13:19:32.713063276 +0200 -@@ -0,0 +1,857 @@ -+/* -+ * drivers/media/radio/radio-si4713.c -+ * -+ * Driver for I2C Silicon Labs Si4713 FM Radio Transmitter: -+ * -+ * Copyright (c) 2008 Instituto Nokia de Tecnologia - INdT -+ * Author: Eduardo Valentin -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+/* driver definitions */ -+#define DRIVER_AUTHOR "Eduardo Valentin " -+#define DRIVER_NAME SI4713_NAME -+#define DRIVER_VERSION KERNEL_VERSION(0, 0, 1) -+#define DRIVER_CARD "Silicon Labs Si4713 FM Radio Transmitter" -+#define DRIVER_DESC "I2C driver for Si4713 FM Radio Transmitter" -+ -+/* frequency domain transformation (using times 10 to avoid floats) */ -+#define FREQDEV_UNIT 100000 -+#define FREQV4L2_MULTI 625 -+#define dev_to_v4l2(f) ((f * FREQDEV_UNIT) / FREQV4L2_MULTI) -+#define v4l2_to_dev(f) ((f * FREQV4L2_MULTI) / FREQDEV_UNIT) -+ -+/* kernel includes */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "radio-si4713.h" -+#include "si4713.h" -+ -+/* module parameters */ -+static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */ -+ -+/* -+ * Sysfs properties -+ * Read and write functions -+ */ -+#define property_write(prop, type, mask, check) \ -+static ssize_t si4713_##prop##_write(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, \ -+ size_t count) \ -+{ \ -+ struct si4713_device *sdev = dev_get_drvdata(dev); \ -+ type value; \ -+ int rval; \ -+ \ -+ if (!sdev) \ -+ return -ENODEV; \ -+ \ -+ sscanf(buf, mask, &value); \ -+ \ -+ if (check) \ -+ return -EDOM; \ -+ \ -+ rval = si4713_set_##prop(sdev, value); \ -+ \ -+ return rval < 0 ? rval : count; \ -+} -+ -+#define property_read(prop, size, mask) \ -+static ssize_t si4713_##prop##_read(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+{ \ -+ struct si4713_device *sdev = dev_get_drvdata(dev); \ -+ size value; \ -+ \ -+ if (!sdev) \ -+ return -ENODEV; \ -+ \ -+ value = si4713_get_##prop(sdev); \ -+ \ -+ if (value >= 0) \ -+ value = sprintf(buf, mask "\n", value); \ -+ \ -+ return value; \ -+} -+ -+#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \ -+property_write(prop, signal size, mask, check) \ -+property_read(prop, size, mask) \ -+static DEVICE_ATTR(prop, S_IRUGO | S_IWUSR, si4713_##prop##_read, \ -+ si4713_##prop##_write); -+#define DEFINE_SYSFS_PROPERTY_RO(prop, signal, size, mask) \ -+property_read(prop, size, mask) \ -+static DEVICE_ATTR(prop, S_IRUGO, si4713_##prop##_read, NULL); -+ -+ -+#define property_str_write(prop, size) \ -+static ssize_t si4713_##prop##_write(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, \ -+ size_t count) \ -+{ \ -+ struct si4713_device *sdev = dev_get_drvdata(dev); \ -+ int rval; \ -+ u8 *in; \ -+ \ -+ if (!sdev) \ -+ return -ENODEV; \ -+ \ -+ in = kzalloc(size + 1, GFP_KERNEL); \ -+ if (!in) \ -+ return -ENOMEM; \ -+ \ -+ /* We don't want to miss the spaces */ \ -+ strncpy(in, buf, size); \ -+ rval = si4713_set_##prop(sdev, in); \ -+ \ -+ kfree(in); \ -+ \ -+ return rval < 0 ? rval : count; \ -+} -+ -+#define property_str_read(prop, size) \ -+static ssize_t si4713_##prop##_read(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+{ \ -+ struct si4713_device *sdev = dev_get_drvdata(dev); \ -+ int count; \ -+ u8 *out; \ -+ \ -+ if (!sdev) \ -+ return -ENODEV; \ -+ \ -+ out = kzalloc(size + 1, GFP_KERNEL); \ -+ if (!out) \ -+ return -ENOMEM; \ -+ \ -+ si4713_get_##prop(sdev, out); \ -+ count = sprintf(buf, "%s\n", out); \ -+ \ -+ kfree(out); \ -+ \ -+ return count; \ -+} -+ -+#define DEFINE_SYSFS_PROPERTY_STR(prop, size) \ -+property_str_write(prop, size) \ -+property_str_read(prop, size) \ -+static DEVICE_ATTR(prop, S_IRUGO | S_IWUSR, si4713_##prop##_read, \ -+ si4713_##prop##_write); -+ -+/* -+ * Power level property -+ */ -+/* power_level (rw) 88 - 115 or 0 */ -+static ssize_t si4713_power_level_write(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ struct si4713_device *sdev = dev_get_drvdata(dev); -+ unsigned int p; -+ int rval, pl; -+ -+ if (!sdev) { -+ rval = -ENODEV; -+ goto exit; -+ } -+ -+ sscanf(buf, "%u", &p); -+ -+ pl = si4713_get_power_level(sdev); -+ if (pl < 0) { -+ rval = pl; -+ goto exit; -+ } -+ -+ rval = si4713_set_power_level(sdev, p); -+ -+exit: -+ return rval < 0 ? rval : count; -+} -+property_read(power_level, unsigned int, "%u") -+static DEVICE_ATTR(power_level, S_IRUGO | S_IWUSR, si4713_power_level_read, -+ si4713_power_level_write); -+ -+DEFINE_SYSFS_PROPERTY(antenna_capacitor, unsigned, int, "%u", -+ value > SI4713_MAX_ANTCAP) -+/* -+ * RDS properties -+ */ -+/* rds_pi (rw) 0 - 0xFFFF */ -+DEFINE_SYSFS_PROPERTY(rds_pi, unsigned, int, "%x", 0) -+/* rds_pty (rw) 0 - 0x1F */ -+DEFINE_SYSFS_PROPERTY(rds_pty, unsigned, int, "%u", value > MAX_RDS_PTY) -+/* rds_enabled (rw) 0 - 1 */ -+DEFINE_SYSFS_PROPERTY(rds_enabled, unsigned, int, "%u", 0) -+/* rds_ps_name (rw) strlen (8 - 96) */ -+DEFINE_SYSFS_PROPERTY_STR(rds_ps_name, MAX_RDS_PS_NAME) -+/* rds_radio_text (rw) strlen (0 - 384) */ -+DEFINE_SYSFS_PROPERTY_STR(rds_radio_text, MAX_RDS_RADIO_TEXT) -+ -+/* -+ * Limiter properties -+ */ -+/* limiter_release_time (rw) 0 - 102390 */ -+DEFINE_SYSFS_PROPERTY(limiter_release_time, unsigned, long, "%lu", -+ value > MAX_LIMITER_RELEASE_TIME) -+/* limiter_deviation (rw) 0 - 90000 */ -+DEFINE_SYSFS_PROPERTY(limiter_deviation, unsigned, long, "%lu", -+ value > MAX_LIMITER_DEVIATION) -+/* limiter_enabled (rw) 0 - 1 */ -+DEFINE_SYSFS_PROPERTY(limiter_enabled, unsigned, int, "%u", 0) -+ -+/* -+ * Pilot tone properties -+ */ -+/* pilot_frequency (rw) 0 - 19000 */ -+DEFINE_SYSFS_PROPERTY(pilot_frequency, unsigned, int, "%u", -+ value > MAX_PILOT_FREQUENCY) -+/* pilot_deviation (rw) 0 - 90000 */ -+DEFINE_SYSFS_PROPERTY(pilot_deviation, unsigned, long, "%lu", -+ value > MAX_PILOT_DEVIATION) -+/* pilot_enabled (rw) 0 - 1 */ -+DEFINE_SYSFS_PROPERTY(pilot_enabled, unsigned, int, "%u", 0) -+ -+/* -+ * Stereo properties -+ */ -+/* stereo_enabled (rw) 0 - 1 */ -+DEFINE_SYSFS_PROPERTY(stereo_enabled, unsigned, int, "%u", 0) -+ -+/* -+ * Audio Compression properties -+ */ -+/* acomp_release_time (rw) 0 - 1000000 */ -+DEFINE_SYSFS_PROPERTY(acomp_release_time, unsigned, long, "%lu", -+ value > MAX_ACOMP_RELEASE_TIME) -+/* acomp_attack_time (rw) 0 - 5000 */ -+DEFINE_SYSFS_PROPERTY(acomp_attack_time, unsigned, int, "%u", -+ value > MAX_ACOMP_ATTACK_TIME) -+/* acomp_threshold (rw) -40 - 0 */ -+property_write(acomp_threshold, int, "%d", -+ value > MAX_ACOMP_THRESHOLD || -+ value < MIN_ACOMP_THRESHOLD) -+ -+static ssize_t si4713_acomp_threshold_read(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct si4713_device *sdev = dev_get_drvdata(dev); -+ int count; -+ s8 thres; -+ -+ if (!sdev) -+ return -ENODEV; -+ -+ count = si4713_get_acomp_threshold(sdev, &thres); -+ -+ if (count >= 0) -+ count = sprintf(buf, "%d\n", thres); -+ -+ return count; -+} -+static DEVICE_ATTR(acomp_threshold, S_IRUGO | S_IWUSR, -+ si4713_acomp_threshold_read, -+ si4713_acomp_threshold_write); -+ -+/* acomp_gain (rw) 0 - 20 */ -+DEFINE_SYSFS_PROPERTY(acomp_gain, unsigned, int, "%u", value > MAX_ACOMP_GAIN) -+/* acomp_enabled (rw) 0 - 1 */ -+DEFINE_SYSFS_PROPERTY(acomp_enabled, unsigned, int, "%u", 0) -+ -+/* Tune_measure (rw) */ -+DEFINE_SYSFS_PROPERTY(tune_measure, unsigned, int, "%u", 0) -+ -+/* -+ * Region properties -+ */ -+DEFINE_SYSFS_PROPERTY_RO(region_bottom_frequency, unsigned, int, "%u") -+DEFINE_SYSFS_PROPERTY_RO(region_top_frequency, unsigned, int, "%u") -+DEFINE_SYSFS_PROPERTY_RO(region_channel_spacing, unsigned, int, "%u") -+DEFINE_SYSFS_PROPERTY(region_preemphasis, unsigned, int, "%u", -+ ((value != PREEMPHASIS_USA) && -+ (value != PREEMPHASIS_EU) && -+ (value != PREEMPHASIS_DISABLED))) -+DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0) -+ -+/* -+ * Tone properties -+ */ -+/* tone_frequency (rw) 0 - 19000 */ -+DEFINE_SYSFS_PROPERTY(tone_frequency, unsigned, int, "%u", -+ value > MAX_TONE_FREQUENCY) -+/* tone_deviation (rw) 0 - 90000 */ -+DEFINE_SYSFS_PROPERTY(tone_deviation, unsigned, long, "%lu", -+ value > MAX_TONE_DEVIATION) -+/* tone_on_time (rw) 0 - 65535 */ -+DEFINE_SYSFS_PROPERTY(tone_on_time, unsigned, int, "%u", -+ value > MAX_TONE_ON_TIME) -+/* tone_off_time (rw) 0 - 65535 */ -+DEFINE_SYSFS_PROPERTY(tone_off_time, unsigned, int, "%u", -+ value > MAX_TONE_OFF_TIME) -+ -+static struct attribute *attrs[] = { -+ &dev_attr_power_level.attr, -+ &dev_attr_antenna_capacitor.attr, -+ &dev_attr_rds_pi.attr, -+ &dev_attr_rds_pty.attr, -+ &dev_attr_rds_ps_name.attr, -+ &dev_attr_rds_radio_text.attr, -+ &dev_attr_rds_enabled.attr, -+ &dev_attr_limiter_release_time.attr, -+ &dev_attr_limiter_deviation.attr, -+ &dev_attr_limiter_enabled.attr, -+ &dev_attr_pilot_frequency.attr, -+ &dev_attr_pilot_deviation.attr, -+ &dev_attr_pilot_enabled.attr, -+ &dev_attr_stereo_enabled.attr, -+ &dev_attr_acomp_release_time.attr, -+ &dev_attr_acomp_attack_time.attr, -+ &dev_attr_acomp_threshold.attr, -+ &dev_attr_acomp_gain.attr, -+ &dev_attr_acomp_enabled.attr, -+ &dev_attr_region_bottom_frequency.attr, -+ &dev_attr_region_top_frequency.attr, -+ &dev_attr_region_preemphasis.attr, -+ &dev_attr_region_channel_spacing.attr, -+ &dev_attr_region.attr, -+ &dev_attr_tune_measure.attr, -+ &dev_attr_tone_frequency.attr, -+ &dev_attr_tone_deviation.attr, -+ &dev_attr_tone_on_time.attr, -+ &dev_attr_tone_off_time.attr, -+ NULL, -+}; -+ -+static const struct attribute_group attr_group = { -+ .attrs = attrs, -+}; -+ -+static irqreturn_t si4713_handler(int irq, void *dev) -+{ -+ struct si4713_device *sdev = dev; -+ -+ dev_dbg(&sdev->client->dev, "IRQ called, signaling completion work\n"); -+ complete(&sdev->work); -+ -+ return IRQ_HANDLED; -+} -+ -+/* -+ * si4713_fops - file operations interface -+ */ -+static const struct file_operations si4713_fops = { -+ .owner = THIS_MODULE, -+ .llseek = no_llseek, -+ .ioctl = video_ioctl2, -+ .compat_ioctl = v4l_compat_ioctl32, -+}; -+ -+/* -+ * Video4Linux Interface -+ */ -+/* -+ * si4713_v4l2_queryctrl - query control -+ */ -+static struct v4l2_queryctrl si4713_v4l2_queryctrl[] = { -+/* HINT: the disabled controls are only here to satify kradio and such apps */ -+ { -+ .id = V4L2_CID_AUDIO_VOLUME, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_BALANCE, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_BASS, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_TREBLE, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_MUTE, -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .name = "Mute", -+ .minimum = 0, -+ .maximum = 1, -+ .step = 1, -+ .default_value = 1, -+ }, -+ { -+ .id = V4L2_CID_AUDIO_LOUDNESS, -+ .flags = V4L2_CTRL_FLAG_DISABLED, -+ }, -+}; -+ -+/* -+ * si4713_vidioc_querycap - query device capabilities -+ */ -+static int si4713_vidioc_querycap(struct file *file, void *priv, -+ struct v4l2_capability *capability) -+{ -+ struct si4713_device *sdev = video_get_drvdata(video_devdata(file)); -+ -+ strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); -+ strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); -+ snprintf(capability->bus_info, 32, "I2C: 0x%X", sdev->client->addr); -+ capability->version = DRIVER_VERSION; -+ capability->capabilities = V4L2_CAP_TUNER; -+ -+ return 0; -+} -+ -+/* -+ * si4713_vidioc_g_input - get input -+ */ -+static int si4713_vidioc_g_input(struct file *filp, void *priv, -+ unsigned int *i) -+{ -+ *i = 0; -+ -+ return 0; -+} -+ -+/* -+ * si4713_vidioc_s_input - set input -+ */ -+static int si4713_vidioc_s_input(struct file *filp, void *priv, unsigned int i) -+{ -+ if (i) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+/* -+ * si4713_vidioc_queryctrl - enumerate control items -+ */ -+static int si4713_vidioc_queryctrl(struct file *file, void *priv, -+ struct v4l2_queryctrl *qc) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(si4713_v4l2_queryctrl); i++) { -+ if (qc->id && qc->id == si4713_v4l2_queryctrl[i].id) { -+ memcpy(qc, &(si4713_v4l2_queryctrl[i]), sizeof(*qc)); -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+/* -+ * si4713_vidioc_g_ctrl - get the value of a control -+ */ -+static int si4713_vidioc_g_ctrl(struct file *file, void *priv, -+ struct v4l2_control *ctrl) -+{ -+ struct si4713_device *sdev = video_get_drvdata(video_devdata(file)); -+ int rval = 0; -+ -+ if (!sdev) -+ return -ENODEV; -+ -+ switch (ctrl->id) { -+ case V4L2_CID_AUDIO_MUTE: -+ rval = si4713_get_mute(sdev); -+ if (rval >= 0) -+ ctrl->value = rval; -+ break; -+ } -+ -+ return rval; -+} -+ -+/* -+ * si4713_vidioc_s_ctrl - set the value of a control -+ */ -+static int si4713_vidioc_s_ctrl(struct file *file, void *priv, -+ struct v4l2_control *ctrl) -+{ -+ struct si4713_device *sdev = video_get_drvdata(video_devdata(file)); -+ int rval = 0; -+ -+ if (!sdev) -+ return -ENODEV; -+ -+ switch (ctrl->id) { -+ case V4L2_CID_AUDIO_MUTE: -+ if (ctrl->value) { -+ rval = si4713_set_mute(sdev, ctrl->value); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_power_state(sdev, POWER_DOWN); -+ } else { -+ rval = si4713_set_power_state(sdev, POWER_UP); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_setup(sdev); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_mute(sdev, ctrl->value); -+ } -+ break; -+ } -+ -+exit: -+ return rval; -+} -+ -+/* -+ * si4713_vidioc_g_audio - get audio attributes -+ */ -+static int si4713_vidioc_g_audio(struct file *file, void *priv, -+ struct v4l2_audio *audio) -+{ -+ if (audio->index > 1) -+ return -EINVAL; -+ -+ strncpy(audio->name, "Radio", 32); -+ audio->capability = V4L2_AUDCAP_STEREO; -+ -+ return 0; -+} -+ -+/* -+ * si4713_vidioc_s_audio - set audio attributes -+ */ -+static int si4713_vidioc_s_audio(struct file *file, void *priv, -+ struct v4l2_audio *audio) -+{ -+ if (audio->index != 0) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+/* -+ * si4713_vidioc_g_tuner - get tuner attributes -+ */ -+static int si4713_vidioc_g_tuner(struct file *file, void *priv, -+ struct v4l2_tuner *tuner) -+{ -+ struct si4713_device *sdev = video_get_drvdata(video_devdata(file)); -+ -+ if (!sdev) -+ return -ENODEV; -+ -+ if (tuner->index > 0) -+ return -EINVAL; -+ -+ strncpy(tuner->name, "FM Transmitter", 32); -+ tuner->type = V4L2_TUNER_RADIO; -+ tuner->rangelow = -+ dev_to_v4l2(si4713_get_region_bottom_frequency(sdev) / 10); -+ tuner->rangehigh = -+ dev_to_v4l2(si4713_get_region_top_frequency(sdev) / 10); -+ tuner->rxsubchans = V4L2_TUNER_SUB_STEREO; -+ tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW; -+ tuner->audmode = V4L2_TUNER_MODE_STEREO; -+ -+ /* automatic frequency control: -1: freq to low, 1 freq to high */ -+ tuner->afc = 0; -+ -+ return 0; -+} -+ -+/* -+ * si4713_vidioc_s_tuner - set tuner attributes -+ */ -+static int si4713_vidioc_s_tuner(struct file *file, void *priv, -+ struct v4l2_tuner *tuner) -+{ -+ struct si4713_device *sdev = video_get_drvdata(video_devdata(file)); -+ -+ if (!sdev) -+ return -ENODEV; -+ -+ if (tuner->index > 0) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+/* -+ * si4713_vidioc_g_frequency - get tuner or modulator radio frequency -+ */ -+static int si4713_vidioc_g_frequency(struct file *file, void *priv, -+ struct v4l2_frequency *freq) -+{ -+ struct si4713_device *sdev = video_get_drvdata(video_devdata(file)); -+ int rval = 0; -+ int f; -+ -+ freq->type = V4L2_TUNER_RADIO; -+ f = si4713_get_frequency(sdev); -+ -+ if (f < 0) -+ rval = f; -+ else -+ freq->frequency = dev_to_v4l2(f); -+ -+ return rval; -+} -+ -+/* -+ * si4713_vidioc_s_frequency - set tuner or modulator radio frequency -+ */ -+static int si4713_vidioc_s_frequency(struct file *file, void *priv, -+ struct v4l2_frequency *freq) -+{ -+ struct si4713_device *sdev = video_get_drvdata(video_devdata(file)); -+ int rval = 0; -+ -+ if (freq->type != V4L2_TUNER_RADIO) { -+ rval = -EINVAL; -+ goto exit; -+ } -+ -+ rval = si4713_set_frequency(sdev, v4l2_to_dev(freq->frequency)); -+ -+exit: -+ return rval; -+} -+ -+static struct v4l2_ioctl_ops si4713_ioctl_ops = { -+ .vidioc_querycap = si4713_vidioc_querycap, -+ .vidioc_g_input = si4713_vidioc_g_input, -+ .vidioc_s_input = si4713_vidioc_s_input, -+ .vidioc_queryctrl = si4713_vidioc_queryctrl, -+ .vidioc_g_ctrl = si4713_vidioc_g_ctrl, -+ .vidioc_s_ctrl = si4713_vidioc_s_ctrl, -+ .vidioc_g_audio = si4713_vidioc_g_audio, -+ .vidioc_s_audio = si4713_vidioc_s_audio, -+ .vidioc_g_tuner = si4713_vidioc_g_tuner, -+ .vidioc_s_tuner = si4713_vidioc_s_tuner, -+ .vidioc_g_frequency = si4713_vidioc_g_frequency, -+ .vidioc_s_frequency = si4713_vidioc_s_frequency, -+}; -+ -+/* -+ * si4713_viddev_tamples - video device interface -+ */ -+static struct video_device si4713_viddev_template = { -+ .fops = &si4713_fops, -+ .name = DRIVER_NAME, -+ .vfl_type = VID_TYPE_TUNER, -+ .release = video_device_release, -+ .ioctl_ops = &si4713_ioctl_ops, -+}; -+ -+/* -+ * I2C driver interface -+ */ -+/* -+ * si4713_i2c_driver_probe - probe for the device -+ */ -+static int si4713_i2c_driver_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct si4713_device *sdev; -+ int rval, skip_release = 0; -+ -+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL); -+ if (!sdev) { -+ dev_dbg(&client->dev, "Failed to alloc video device.\n"); -+ rval = -ENOMEM; -+ goto exit; -+ } -+ -+ sdev->videodev = video_device_alloc(); -+ if (!sdev->videodev) { -+ dev_dbg(&client->dev, "Failed to alloc video device.\n"); -+ rval = -ENOMEM; -+ goto free_sdev; -+ } -+ -+ sdev->platform_data = client->dev.platform_data; -+ if (!sdev->platform_data) { -+ dev_dbg(&client->dev, "No platform data registered.\n"); -+ rval = -ENODEV; -+ goto free_vdev; -+ } -+ -+ sdev->client = client; -+ i2c_set_clientdata(client, sdev); -+ -+ mutex_init(&sdev->mutex); -+ init_completion(&sdev->work); -+ -+ if (client->irq) { -+ rval = request_irq(client->irq, -+ si4713_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED, -+ client->name, sdev); -+ if (rval < 0) { -+ dev_err(&client->dev, "Could not request IRQ\n"); -+ goto free_vdev; -+ } -+ dev_dbg(&client->dev, "IRQ requested.\n"); -+ } else { -+ dev_dbg(&client->dev, "IRQ not configure. Using timeouts.\n"); -+ } -+ -+ memcpy(sdev->videodev, &si4713_viddev_template, -+ sizeof(si4713_viddev_template)); -+ video_set_drvdata(sdev->videodev, sdev); -+ if (video_register_device(sdev->videodev, VFL_TYPE_RADIO, radio_nr)) { -+ dev_dbg(&client->dev, "Could not register video device.\n"); -+ rval = -EIO; -+ goto free_irq; -+ } -+ -+ rval = sysfs_create_group(&sdev->client->dev.kobj, &attr_group); -+ if (rval < 0) { -+ dev_dbg(&client->dev, "Could not register sysfs interface.\n"); -+ goto free_registration; -+ } -+ -+ rval = si4713_probe(sdev); -+ if (rval < 0) { -+ dev_dbg(&client->dev, "Failed to probe device information.\n"); -+ goto free_sysfs; -+ } -+ -+ return 0; -+ -+free_sysfs: -+ sysfs_remove_group(&sdev->client->dev.kobj, &attr_group); -+free_registration: -+ video_unregister_device(sdev->videodev); -+ skip_release = 1; -+free_irq: -+ if (client->irq) -+ free_irq(client->irq, sdev); -+free_vdev: -+ if (!skip_release) -+ video_device_release(sdev->videodev); -+free_sdev: -+ kfree(sdev); -+exit: -+ return rval; -+} -+ -+/* -+ * si4713_i2c_driver_remove - remove the device -+ */ -+static int __exit si4713_i2c_driver_remove(struct i2c_client *client) -+{ -+ struct si4713_device *sdev = i2c_get_clientdata(client); -+ struct video_device *vd; -+ -+ /* our client isn't attached */ -+ if (!client->adapter) -+ return -ENODEV; -+ -+ if (sdev) { -+ vd = sdev->videodev; -+ -+ sysfs_remove_group(&sdev->client->dev.kobj, &attr_group); -+ -+ if (vd) -+ video_unregister_device(vd); -+ -+ if (sdev->power_state) -+ si4713_set_power_state(sdev, POWER_DOWN); -+ -+ if (client->irq > 0) -+ free_irq(client->irq, sdev); -+ -+ kfree(sdev); -+ } -+ i2c_set_clientdata(client, NULL); -+ -+ return 0; -+} -+ -+/* -+ * si4713_i2c_driver - i2c driver interface -+ */ -+static const struct i2c_device_id si4713_id[] = { -+ { "si4713" , 0 }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(i2c, si4713_id); -+ -+static struct i2c_driver si4713_i2c_driver = { -+ .driver = { -+ .name = DRIVER_NAME, -+ }, -+ .probe = si4713_i2c_driver_probe, -+ .remove = __exit_p(si4713_i2c_driver_remove), -+ .id_table = si4713_id, -+}; -+ -+/* -+ * Module Interface -+ */ -+/* -+ * si4713_module_init - module init -+ */ -+static int __init si4713_module_init(void) -+{ -+ int rval; -+ -+ printk(KERN_INFO DRIVER_DESC "\n"); -+ -+ rval = i2c_add_driver(&si4713_i2c_driver); -+ if (rval < 0) -+ printk(KERN_ERR DRIVER_NAME ": driver registration failed\n"); -+ -+ return rval; -+} -+ -+/* -+ * si4713_module_exit - module exit -+ */ -+static void __exit si4713_module_exit(void) -+{ -+ i2c_del_driver(&si4713_i2c_driver); -+} -+ -+module_init(si4713_module_init); -+module_exit(si4713_module_exit); -+ -+module_param(radio_nr, int, 0); -+MODULE_PARM_DESC(radio_nr, -+ "Minor number for radio device (-1 ==> auto assign)"); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR(DRIVER_AUTHOR); -+MODULE_DESCRIPTION(DRIVER_DESC); -+MODULE_VERSION("0.0.1"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/radio/radio-si4713.h linux-omap-2.6.28-nokia1/drivers/media/radio/radio-si4713.h ---- linux-omap-2.6.28-omap1/drivers/media/radio/radio-si4713.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/radio/radio-si4713.h 2011-06-22 13:19:32.713063276 +0200 -@@ -0,0 +1,32 @@ -+/* -+ * drivers/media/radio/radio-si4713.h -+ * -+ * Property and commands definitions for Si4713 radio transmitter chip. -+ * -+ * Copyright (c) 2008 Instituto Nokia de Tecnologia - INdT -+ * Author: Eduardo Valentin -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ * -+ */ -+ -+#ifndef RADIO_SI4713_H -+#define RADIO_SI4713_H -+ -+#define SI4713_NAME "radio-si4713" -+ -+/* The SI4713 I2C sensor chip has a fixed slave address of 0xc6. */ -+#define SI4713_I2C_ADDR_BUSEN_HIGH 0x63 -+#define SI4713_I2C_ADDR_BUSEN_LOW 0x11 -+ -+/* -+ * Platform dependent definition -+ */ -+struct si4713_platform_data { -+ /* Set power state, zero is off, non-zero is on. */ -+ int (*set_power)(int power); -+}; -+ -+#endif /* ifndef RADIO_SI4713_H*/ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/radio/si4713.c linux-omap-2.6.28-nokia1/drivers/media/radio/si4713.c ---- linux-omap-2.6.28-omap1/drivers/media/radio/si4713.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/radio/si4713.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,2450 @@ -+/* -+ * drivers/media/radio/si4713.c -+ * -+ * Silicon Labs Si4713 FM Radio Transmitter I2C commands. -+ * -+ * Copyright (c) 2008 Instituto Nokia de Tecnologia - INdT -+ * Author: Eduardo Valentin -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "si4713.h" -+ -+#define MAX_ARGS 7 -+ -+#define RDS_BLOCK 8 -+#define RDS_BLOCK_CLEAR 0x03 -+#define RDS_BLOCK_LOAD 0x04 -+#define RDS_RADIOTEXT_2A 0x20 -+#define RDS_RADIOTEXT_BLK_SIZE 4 -+#define RDS_RADIOTEXT_INDEX_MAX 0x0F -+#define RDS_CARRIAGE_RETURN 0x0D -+ -+#define rds_ps_nblocks(len) ((len / RDS_BLOCK) + (len % RDS_BLOCK ? 1 : 0)) -+#define enable_rds(p) (p | (1 << 2)) -+#define disable_rds(p) (p & ~(1 << 2)) -+#define get_rds_status(p) ((p >> 2) & 0x01) -+ -+#define enable_stereo(p) (p | (1 << 1)) -+#define disable_stereo(p) (p & ~(1 << 1)) -+#define get_stereo_status(p) ((p >> 1) & 0x01) -+ -+#define enable_limiter(p) (p | (1 << 1)) -+#define disable_limiter(p) (p & ~(1 << 1)) -+#define get_limiter_status(p) ((p >> 1) & 0x01) -+ -+#define enable_pilot(p) (p | (1 << 0)) -+#define disable_pilot(p) (p & ~(1 << 0)) -+#define get_pilot_status(p) ((p >> 0) & 0x01) -+ -+#define enable_acomp(p) (p | (1 << 0)) -+#define disable_acomp(p) (p & ~(1 << 0)) -+#define get_acomp_status(p) ((p >> 0) & 0x01) -+#define ATTACK_TIME_UNIT 500 -+ -+#define DEFAULT_RDS_PI 0x00 -+#define DEFAULT_RDS_PTY 0x00 -+#define DEFAULT_RDS_PS_NAME "Si4713 " -+#define DEFAULT_RDS_RADIO_TEXT DEFAULT_RDS_PS_NAME -+#define DEFAULT_RDS_DEVIATION 0x00C8 -+#define DEFAULT_RDS_PS_REPEAT_COUNT 0x0003 -+#define DEFAULT_LIMITER_RTIME 0x1392 -+#define DEFAULT_LIMITER_DEV 0x102CA -+#define DEFAULT_PILOT_FREQUENCY 0x4A38 -+#define DEFAULT_PILOT_DEVIATION 0x1A5E -+#define DEFAULT_ACOMP_ATIME 0x0000 -+#define DEFAULT_ACOMP_RTIME 0xF4240L -+#define DEFAULT_ACOMP_GAIN 0x0F -+#define DEFAULT_ACOMP_THRESHOLD (-0x28) -+#define DEFAULT_REGION_SETTINGS 0x02 -+#define DEFAULT_MUTE 0x00 -+#define DEFAULT_POWER_LEVEL 88 -+#define DEFAULT_TUNE_RSSI 0xFF -+#define DEFAULT_TONE_FREQUENCY 0x00 -+#define DEFAULT_TONE_DEVIATION 0x00 -+#define DEFAULT_TONE_ON_TIME 0x00 -+#define DEFAULT_TONE_OFF_TIME 0x00 -+ -+#define POWER_OFF 0x00 -+#define POWER_ON 0x01 -+ -+#define msb(x) ((u8)((u16) x >> 8)) -+#define lsb(x) ((u8)((u16) x & 0x00FF)) -+#define compose_u16(msb, lsb) (((u16)msb << 8) | lsb) -+#define check_command_failed(status) (!(status & SI4713_CTS) || \ -+ (status & SI4713_ERR)) -+/* mute definition */ -+#define set_mute(p) ((p & 1) | ((p & 1) << 1)); -+#define get_mute(p) (p & 0x01) -+#define set_pty(v, pty) ((v & 0xFC1F) | (pty << 5)) -+#define get_pty(v) ((v >> 5) & 0x1F) -+ -+ -+#ifdef DEBUG -+#define DBG_BUFFER(device, message, buffer, size) \ -+ { \ -+ int i; \ -+ char str[(size)*5]; \ -+ for (i = 0; i < size; i++) \ -+ sprintf(str + i * 5, " 0x%02x", buffer[i]); \ -+ dev_dbg(device, "%s:%s\n", message, str); \ -+ } -+#else -+#define DBG_BUFFER(device, message, buffer, size) -+#endif -+ -+/* -+ * Values for limiter release time -+ * device release -+ * value time (us) -+ */ -+static unsigned long const limiter_times[] = { -+ 2000, 250, -+ 1000, 500, -+ 510, 1000, -+ 255, 2000, -+ 170, 3000, -+ 127, 4020, -+ 102, 5010, -+ 85, 6020, -+ 73, 7010, -+ 64, 7990, -+ 57, 8970, -+ 51, 10030, -+ 25, 20470, -+ 17, 30110, -+ 13, 39380, -+ 10, 51190, -+ 8, 63690, -+ 7, 73140, -+ 6, 85330, -+ 5, 102390, -+}; -+ -+/* -+ * Values for audio compression release time -+ * device release -+ * value time (us) -+ */ -+static unsigned long const acomp_rtimes[] = { -+ 0, 100000, -+ 1, 200000, -+ 2, 350000, -+ 3, 525000, -+ 4, 1000000, -+}; -+ -+/* -+ * Values for region specific configurations -+ * (spacing, bottom and top frequencies, preemphasis) -+ */ -+static struct region_info region_configs[] = { -+ /* USA */ -+ { -+ .channel_spacing = 20, -+ .bottom_frequency = 8750, -+ .top_frequency = 10800, -+ .preemphasis = 0, -+ .region = 0, -+ }, -+ /* Australia */ -+ { -+ .channel_spacing = 20, -+ .bottom_frequency = 8750, -+ .top_frequency = 10800, -+ .preemphasis = 1, -+ .region = 1, -+ }, -+ /* Europe */ -+ { -+ .channel_spacing = 10, -+ .bottom_frequency = 8750, -+ .top_frequency = 10800, -+ .preemphasis = 1, -+ .region = 2, -+ }, -+ /* Japan */ -+ { -+ .channel_spacing = 10, -+ .bottom_frequency = 7600, -+ .top_frequency = 9000, -+ .preemphasis = 1, -+ .region = 3, -+ }, -+ /* Japan wide band */ -+ { -+ .channel_spacing = 10, -+ .bottom_frequency = 7600, -+ .top_frequency = 10800, -+ .preemphasis = 1, -+ .region = 4, -+ }, -+}; -+ -+static int usecs_to_dev(unsigned long usecs, unsigned long const array[], -+ int size) -+{ -+ int i; -+ int rval = -EINVAL; -+ -+ for (i = 0; i < size / 2; i++) -+ if (array[(i * 2) + 1] >= usecs) { -+ rval = array[i * 2]; -+ break; -+ } -+ -+ return rval; -+} -+ -+static unsigned long dev_to_usecs(int value, unsigned long const array[], -+ int size) -+{ -+ int i; -+ int rval = -EINVAL; -+ -+ for (i = 0; i < size / 2; i++) -+ if (array[i * 2] == value) { -+ rval = array[(i * 2) + 1]; -+ break; -+ } -+ -+ return rval; -+} -+ -+/* -+ * si4713_send_command - sends a command to si4713 and waits its response -+ * @sdev: si4713_device structure for the device we are communicating -+ * @command: command id -+ * @args: command arguments we are sending (up to 7) -+ * @argn: actual size of @args -+ * @response: buffer to place the expected response from the device (up to 15) -+ * @respn: actual size of @response -+ * @usecs: amount of time to wait before reading the response (in usecs) -+ */ -+static int si4713_send_command(struct si4713_device *sdev, const u8 command, -+ const u8 args[], const int argn, -+ u8 response[], const int respn, const int usecs) -+{ -+ struct i2c_client *client = sdev->client; -+ u8 data1[MAX_ARGS + 1]; -+ int err; -+ -+ if (!client->adapter) -+ return -ENODEV; -+ -+ /* First send the command and its arguments */ -+ data1[0] = command; -+ memcpy(data1 + 1, args, argn); -+ DBG_BUFFER(&client->dev, "Parameters", data1, argn + 1); -+ -+ err = i2c_master_send(client, data1, argn + 1); -+ if (err != argn + 1) { -+ dev_err(&client->dev, "Error while sending command 0x%02x\n", -+ command); -+ return (err > 0) ? -EIO : err; -+ } -+ -+ /* Wait response from interrupt */ -+ if (!wait_for_completion_timeout(&sdev->work, -+ usecs_to_jiffies(usecs) + 1)) -+ dev_dbg(&client->dev, "Device took too much time.\n"); -+ -+ /* Then get the response */ -+ err = i2c_master_recv(client, response, respn); -+ if (err != respn) { -+ dev_err(&client->dev, -+ "Error while reading response for command 0x%02x\n", -+ command); -+ return (err > 0) ? -EIO : err; -+ } -+ -+ DBG_BUFFER(&client->dev, "Response", response, respn); -+ if (check_command_failed(response[0])) -+ return -EBUSY; -+ -+ return 0; -+} -+ -+/* -+ * si4713_read_property - reads a si4713 property -+ * @sdev: si4713_device structure for the device we are communicating -+ * @prop: property identification number -+ */ -+static int si4713_read_property(struct si4713_device *sdev, u16 prop) -+{ -+ int err; -+ u8 val[SI4713_GET_PROP_NRESP]; -+ /* -+ * REVISIT: From Programming Manual -+ * .First byte = 0 -+ * .Second byte = property's MSB -+ * .Third byte = property's LSB -+ */ -+ const u8 args[SI4713_GET_PROP_NARGS] = { -+ 0x00, -+ msb(prop), -+ lsb(prop), -+ }; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_GET_PROPERTY, -+ args, ARRAY_SIZE(args), val, -+ ARRAY_SIZE(val), DEFAULT_TIMEOUT); -+ -+ if (err < 0) -+ return err; -+ -+ dev_dbg(&sdev->client->dev, "Status from read prop: 0x%02x\n", val[0]); -+ -+ return compose_u16(val[2], val[3]); -+} -+ -+/* -+ * si4713_write_property - modifies a si4713 property -+ * @sdev: si4713_device structure for the device we are communicating -+ * @prop: property identification number -+ * @val: new value for that property -+ */ -+static int si4713_write_property(struct si4713_device *sdev, u16 prop, u16 val) -+{ -+ int rval; -+ u8 resp[SI4713_SET_PROP_NRESP]; -+ /* -+ * REVISIT: From Programming Manual -+ * .First byte = 0 -+ * .Second byte = property's MSB -+ * .Third byte = property's LSB -+ * .Fourth byte = value's MSB -+ * .Fifth byte = value's LSB -+ */ -+ const u8 args[SI4713_SET_PROP_NARGS] = { -+ 0x00, -+ msb(prop), -+ lsb(prop), -+ msb(val), -+ lsb(val), -+ }; -+ -+ rval = si4713_send_command(sdev, SI4713_CMD_SET_PROPERTY, -+ args, ARRAY_SIZE(args), -+ resp, ARRAY_SIZE(resp), -+ DEFAULT_TIMEOUT); -+ -+ if (rval < 0) -+ return rval; -+ -+ dev_dbg(&sdev->client->dev, "Status from write prop: 0x%02x\n", -+ resp[0]); -+ -+ /* -+ * As there is no command response for SET_PROPERTY, -+ * wait Tcomp time to finish before proceed, in order -+ * to have property properly set. -+ */ -+ msleep(TIMEOUT_SET_PROPERTY); -+ -+ return rval; -+} -+ -+/* -+ * si4713_powerup - Powers the device up -+ * @sdev: si4713_device structure for the device we are communicating -+ */ -+static int si4713_powerup(struct si4713_device *sdev) -+{ -+ int err; -+ u8 resp[SI4713_PWUP_NRESP]; -+ /* -+ * REVISIT: From Programming Manual -+ * .First byte = Enabled interrupts and boot function -+ * .Second byte = Input operation mode -+ */ -+ const u8 args[SI4713_PWUP_NARGS] = { -+ SI4713_PWUP_CTSIEN | SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX, -+ SI4713_PWUP_OPMOD_ANALOG, -+ }; -+ -+ if (sdev->power_state) -+ return 0; -+ -+ sdev->platform_data->set_power(1); -+ err = si4713_send_command(sdev, SI4713_CMD_POWER_UP, -+ args, ARRAY_SIZE(args), -+ resp, ARRAY_SIZE(resp), -+ TIMEOUT_POWER_UP); -+ -+ if (!err) { -+ dev_dbg(&sdev->client->dev, "Powerup response: 0x%02x\n", -+ resp[0]); -+ dev_dbg(&sdev->client->dev, "Device in power up mode\n"); -+ sdev->power_state = POWER_ON; -+ -+ err = si4713_write_property(sdev, SI4713_GPO_IEN, -+ SI4713_STC_INT | SI4713_CTS); -+ } else { -+ sdev->platform_data->set_power(0); -+ } -+ -+ return err; -+} -+ -+/* -+ * si4713_powerdown - Powers the device down -+ * @sdev: si4713_device structure for the device we are communicating -+ */ -+static int si4713_powerdown(struct si4713_device *sdev) -+{ -+ int err; -+ u8 resp[SI4713_PWDN_NRESP]; -+ -+ if (!sdev->power_state) -+ return 0; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_POWER_DOWN, -+ NULL, 0, -+ resp, ARRAY_SIZE(resp), -+ DEFAULT_TIMEOUT); -+ -+ if (!err) { -+ dev_dbg(&sdev->client->dev, "Power down response: 0x%02x\n", -+ resp[0]); -+ dev_dbg(&sdev->client->dev, "Device in reset mode\n"); -+ sdev->platform_data->set_power(0); -+ sdev->power_state = POWER_OFF; -+ } -+ -+ return err; -+} -+ -+/* -+ * si4713_checkrev - Checks if we are treating a device with the correct rev. -+ * @sdev: si4713_device structure for the device we are communicating -+ */ -+#define pr_revision(devicep, buffer) \ -+ dev_info(devicep, "Detected %s (0x%02x) Firmware: %d.%d" \ -+ " Patch ID: %02x:%02x Component: %d.%d" \ -+ " Chip Rev.: %s\n", \ -+ buffer[1] == SI4713_PRODUCT_NUMBER ? "Si4713" : "",\ -+ buffer[1], \ -+ buffer[2] & 0xF, buffer[3] & 0xF, \ -+ buffer[4], buffer[5], \ -+ buffer[6] & 0xF, buffer[7] & 0xF, \ -+ buffer[8] == 0x41 ? "revA" : "unknown") -+static int si4713_checkrev(struct si4713_device *sdev) -+{ -+ int rval; -+ u8 resp[SI4713_GETREV_NRESP]; -+ -+ mutex_lock(&sdev->mutex); -+ -+ rval = si4713_send_command(sdev, SI4713_CMD_GET_REV, -+ NULL, 0, -+ resp, ARRAY_SIZE(resp), -+ DEFAULT_TIMEOUT); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ if (resp[1] == SI4713_PRODUCT_NUMBER) { -+ pr_revision(&sdev->client->dev, resp); -+ } else { -+ dev_err(&sdev->client->dev, "Invalid product number\n"); -+ rval = -EINVAL; -+ } -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+/* -+ * si4713_wait_stc - Waits STC interrupt and clears status bits. Usefull -+ * for TX_TUNE_POWER, TX_TUNE_FREQ and TX_TUNE_MEAS -+ * @sdev: si4713_device structure for the device we are communicating -+ * @usecs: timeout to wait for STC interrupt signal -+ */ -+static int si4713_wait_stc(struct si4713_device *sdev, const int usecs) -+{ -+ int err; -+ u8 resp[SI4713_GET_STATUS_NRESP]; -+ -+ /* Wait response from STC interrupt */ -+ if (!wait_for_completion_timeout(&sdev->work, -+ usecs_to_jiffies(TIMEOUT_TX_TUNE) + 1)) -+ dev_dbg(&sdev->client->dev, "Device took too much time.\n"); -+ -+ /* Clear status bits */ -+ err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS, -+ NULL, 0, -+ resp, ARRAY_SIZE(resp), -+ DEFAULT_TIMEOUT); -+ -+ if (err < 0) -+ goto exit; -+ -+ dev_dbg(&sdev->client->dev, "Status bits: 0x%02x\n", resp[0]); -+ -+ if (!(resp[0] & SI4713_STC_INT)) -+ err = -EIO; -+ -+exit: -+ return err; -+} -+ -+/* -+ * si4713_tx_tune_freq - Sets the state of the RF carrier and sets the tuning -+ * frequency between 76 and 108 MHz in 10 kHz units and -+ * steps of 50 kHz. -+ * @sdev: si4713_device structure for the device we are communicating -+ * @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz) -+ */ -+static int si4713_tx_tune_freq(struct si4713_device *sdev, u16 frequency) -+{ -+ int err; -+ u8 val[SI4713_TXFREQ_NRESP]; -+ /* -+ * REVISIT: From Programming Manual -+ * .First byte = 0 -+ * .Second byte = frequency's MSB -+ * .Third byte = frequency's LSB -+ */ -+ const u8 args[SI4713_TXFREQ_NARGS] = { -+ 0x00, -+ msb(frequency), -+ lsb((frequency - -+ (frequency % sdev->region_info.channel_spacing))), -+ }; -+ -+ if (frequency < sdev->region_info.bottom_frequency || -+ frequency > sdev->region_info.top_frequency) -+ return -EDOM; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_FREQ, -+ args, ARRAY_SIZE(args), val, -+ ARRAY_SIZE(val), DEFAULT_TIMEOUT); -+ -+ if (err < 0) -+ return err; -+ -+ dev_dbg(&sdev->client->dev, "Status from tx tune freq: 0x%02x\n", -+ val[0]); -+ -+ err = si4713_wait_stc(sdev, TIMEOUT_TX_TUNE); -+ if (err < 0) -+ return err; -+ -+ return compose_u16(args[1], args[2]); -+} -+ -+/* -+ * si4713_tx_tune_power - Sets the RF voltage level between 88 and 115 dBuV in -+ * 1 dB units. A value of 0x00 indicates off. The command -+ * also sets the antenna tuning capacitance. A value of 0 -+ * indicates autotuning, and a value of 1 - 191 indicates -+ * a manual override, which results in a tuning -+ * capacitance of 0.25 pF x @antcap. -+ * @sdev: si4713_device structure for the device we are communicating -+ * @power: tuning power (88 - 115 dBuV, unit/step 1 dB) -+ * @antcap: value of antenna tuning capacitor (0 - 191) -+ */ -+static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power, -+ u8 antcap) -+{ -+ int err; -+ u8 val[SI4713_TXPWR_NRESP]; -+ /* -+ * REVISIT: From Programming Manual -+ * .First byte = 0 -+ * .Second byte = 0 -+ * .Third byte = power -+ * .Fourth byte = antcap -+ */ -+ const u8 args[SI4713_TXPWR_NARGS] = { -+ 0x00, -+ 0x00, -+ power, -+ antcap, -+ }; -+ -+ if (((power > 0) && (power < SI4713_MIN_POWER)) || -+ power > SI4713_MAX_POWER || antcap > SI4713_MAX_ANTCAP) -+ return -EDOM; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_POWER, -+ args, ARRAY_SIZE(args), val, -+ ARRAY_SIZE(val), DEFAULT_TIMEOUT); -+ -+ if (err < 0) -+ return err; -+ -+ dev_dbg(&sdev->client->dev, "Status from tx tune power: 0x%02x\n", -+ val[0]); -+ -+ return si4713_wait_stc(sdev, TIMEOUT_TX_TUNE_POWER); -+} -+ -+/* -+ * si4713_tx_tune_measure - Enters receive mode and measures the received noise -+ * level in units of dBuV on the selected frequency. -+ * The Frequency must be between 76 and 108 MHz in 10 kHz -+ * units and steps of 50 kHz. The command also sets the -+ * antenna tuning capacitance. A value of 0 means -+ * autotuning, and a value of 1 to 191 indicates manual -+ * override. -+ * @sdev: si4713_device structure for the device we are communicating -+ * @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz) -+ * @antcap: value of antenna tuning capacitor (0 - 191) -+ */ -+static int si4713_tx_tune_measure(struct si4713_device *sdev, u16 frequency, -+ u8 antcap) -+{ -+ int err; -+ u8 val[SI4713_TXMEA_NRESP]; -+ /* -+ * REVISIT: From Programming Manual -+ * .First byte = 0 -+ * .Second byte = frequency's MSB -+ * .Third byte = frequency's LSB -+ * .Fourth byte = antcap -+ */ -+ const u8 args[SI4713_TXMEA_NARGS] = { -+ 0x00, -+ msb(frequency), -+ lsb((frequency - -+ (frequency % sdev->region_info.channel_spacing))), -+ antcap, -+ }; -+ -+ sdev->tune_rssi = DEFAULT_TUNE_RSSI; -+ -+ if (frequency < sdev->region_info.bottom_frequency || -+ frequency > sdev->region_info.top_frequency || -+ antcap > SI4713_MAX_ANTCAP) -+ return -EDOM; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_MEASURE, -+ args, ARRAY_SIZE(args), val, -+ ARRAY_SIZE(val), DEFAULT_TIMEOUT); -+ -+ if (err < 0) -+ return err; -+ -+ dev_dbg(&sdev->client->dev, "Status from tx tune measure: 0x%02x\n", -+ val[0]); -+ -+ return si4713_wait_stc(sdev, TIMEOUT_TX_TUNE); -+} -+ -+/* -+ * si4713_tx_tune_status- Returns the status of the tx_tune_freq, tx_tune_mea or -+ * tx_tune_power commands. This command return the current -+ * frequency, output voltage in dBuV, the antenna tunning -+ * capacitance value and the received noise level. The -+ * command also clears the stcint interrupt bit when the -+ * first bit of its arguments is high. -+ * @sdev: si4713_device structure for the device we are communicating -+ * @intack: 0x01 to clear the seek/tune complete interrupt status indicator. -+ * @frequency: returned frequency -+ * @power: returned power -+ * @antcap: returned antenna capacitance -+ * @noise: returned noise level -+ */ -+static int si4713_tx_tune_status(struct si4713_device *sdev, u8 intack, -+ u16 *frequency, u8 *power, -+ u8 *antcap, u8 *noise) -+{ -+ int err; -+ u8 val[SI4713_TXSTATUS_NRESP]; -+ /* -+ * REVISIT: From Programming Manual -+ * .First byte = intack bit -+ */ -+ const u8 args[SI4713_TXSTATUS_NARGS] = { -+ intack & SI4713_INTACK_MASK, -+ }; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_STATUS, -+ args, ARRAY_SIZE(args), val, -+ ARRAY_SIZE(val), DEFAULT_TIMEOUT); -+ -+ if (!err) { -+ dev_dbg(&sdev->client->dev, -+ "Status from tx tune status: 0x%02x\n", val[0]); -+ *frequency = compose_u16(val[2], val[3]); -+ sdev->frequency = *frequency; -+ *power = val[5]; -+ *antcap = val[6]; -+ *noise = val[7]; -+ dev_dbg(&sdev->client->dev, "Tune status: %d x 10 kHz " -+ "(power %d, antcap %d, rnl %d)\n", -+ *frequency, *power, *antcap, *noise); -+ } -+ -+ return err; -+} -+ -+/* -+ * si4713_tx_rds_buff - Loads the RDS group buffer FIFO or circular buffer. -+ * @sdev: si4713_device structure for the device we are communicating -+ * @mode: the buffer operation mode. -+ * @rdsb: RDS Block B -+ * @rdsc: RDS Block C -+ * @rdsd: RDS Block D -+ * @intstatus: returns current interrupt status -+ * @cbavail: returns the number of available circular buffer blocks. -+ * @cbused: returns the number of used circular buffer blocks. -+ * @fifoavail: returns the number of available fifo buffer blocks. -+ * @fifoused: returns the number of used fifo buffer blocks. -+ */ -+static int si4713_tx_rds_buff(struct si4713_device *sdev, u8 mode, u16 rdsb, -+ u16 rdsc, u16 rdsd, u8 *intstatus, u8 *cbavail, -+ u8 *cbused, u8 *fifoavail, u8 *fifoused) -+{ -+ int err; -+ u8 val[SI4713_RDSBUFF_NRESP]; -+ -+ const u8 args[SI4713_RDSBUFF_NARGS] = { -+ mode & SI4713_RDSBUFF_MODE_MASK, -+ msb(rdsb), -+ lsb(rdsb), -+ msb(rdsc), -+ lsb(rdsc), -+ msb(rdsd), -+ lsb(rdsd), -+ }; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_TX_RDS_BUFF, -+ args, ARRAY_SIZE(args), val, -+ ARRAY_SIZE(val), DEFAULT_TIMEOUT); -+ -+ if (!err) { -+ dev_dbg(&sdev->client->dev, -+ "Status from tx rds buff: 0x%02x\n", val[0]); -+ *intstatus = val[1]; -+ *cbavail = val[2]; -+ *cbused = val[3]; -+ *fifoavail = val[4]; -+ *fifoused = val[5]; -+ dev_dbg(&sdev->client->dev, "rds buffer status: interrupts" -+ " 0x%02x cb avail: %d cb used %d fifo avail" -+ " %d fifo used %d\n", *intstatus, *cbavail, -+ *cbused, *fifoavail, *fifoused); -+ } -+ -+ return err; -+} -+ -+/* -+ * si4713_tx_rds_ps - Loads the program service buffer. -+ * @sdev: si4713_device structure for the device we are communicating -+ * @psid: program service id to be loaded. -+ * @pschar: assumed 4 size char array to be loaded into the program service -+ */ -+static int si4713_tx_rds_ps(struct si4713_device *sdev, u8 psid, -+ unsigned char *pschar) -+{ -+ int err; -+ u8 val[SI4713_RDSPS_NRESP]; -+ -+ const u8 args[SI4713_RDSPS_NARGS] = { -+ psid & SI4713_RDSPS_PSID_MASK, -+ pschar[0], -+ pschar[1], -+ pschar[2], -+ pschar[3], -+ }; -+ -+ err = si4713_send_command(sdev, SI4713_CMD_TX_RDS_PS, -+ args, ARRAY_SIZE(args), val, -+ ARRAY_SIZE(val), DEFAULT_TIMEOUT); -+ -+ if (err < 0) -+ return err; -+ -+ dev_dbg(&sdev->client->dev, "Status from tx rds ps: 0x%02x\n", -+ val[0]); -+ -+ return err; -+} -+ -+/* -+ * si4713_init - Sets the device up with default configuration. -+ * @sdev: si4713_device structure for the device we are communicating -+ */ -+int si4713_init(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ rval = si4713_set_rds_pi(sdev, DEFAULT_RDS_PI); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_pty(sdev, DEFAULT_RDS_PTY); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_ps_name(sdev, DEFAULT_RDS_PS_NAME); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_radio_text(sdev, DEFAULT_RDS_RADIO_TEXT); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_enabled(sdev, 1); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_limiter_release_time(sdev, DEFAULT_LIMITER_RTIME); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_limiter_deviation(sdev, DEFAULT_LIMITER_DEV); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_limiter_enabled(sdev, 1); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_pilot_frequency(sdev, DEFAULT_PILOT_FREQUENCY); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_pilot_deviation(sdev, DEFAULT_PILOT_DEVIATION); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_pilot_enabled(sdev, 1); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_stereo_enabled(sdev, 1); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_attack_time(sdev, DEFAULT_ACOMP_ATIME); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_release_time(sdev, DEFAULT_ACOMP_RTIME); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_gain(sdev, DEFAULT_ACOMP_GAIN); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_threshold(sdev, DEFAULT_ACOMP_THRESHOLD); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_enabled(sdev, 1); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_region(sdev, DEFAULT_REGION_SETTINGS); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_mute(sdev, DEFAULT_MUTE); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_frequency(sdev, DEFAULT_TONE_FREQUENCY); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_deviation(sdev, DEFAULT_TONE_DEVIATION); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_on_time(sdev, DEFAULT_TONE_ON_TIME); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_off_time(sdev, DEFAULT_TONE_OFF_TIME); -+ if (rval < 0) -+ goto exit; -+ -+exit: -+ return rval; -+} -+ -+/* -+ * si4713_setup - Sets the device up with current configuration. -+ * @sdev: si4713_device structure for the device we are communicating -+ */ -+int si4713_setup(struct si4713_device *sdev) -+{ -+ struct si4713_device *tmp; -+ int rval; -+ -+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); -+ if (!tmp) -+ return -ENOMEM; -+ -+ /* Get a local copy to avoid race */ -+ mutex_lock(&sdev->mutex); -+ memcpy(tmp, sdev, sizeof(*sdev)); -+ mutex_unlock(&sdev->mutex); -+ -+ rval = si4713_set_rds_pi(sdev, tmp->rds_info.pi); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_pty(sdev, tmp->rds_info.pty); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_ps_name(sdev, tmp->rds_info.ps_name); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_radio_text(sdev, tmp->rds_info.radio_text); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_rds_enabled(sdev, tmp->rds_info.enabled); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_limiter_release_time(sdev, -+ tmp->limiter_info.release_time); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_limiter_deviation(sdev, tmp->limiter_info.deviation); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_limiter_enabled(sdev, tmp->limiter_info.enabled); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_region(sdev, tmp->region_info.region); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_pilot_frequency(sdev, tmp->pilot_info.frequency); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_pilot_deviation(sdev, tmp->pilot_info.deviation); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_pilot_enabled(sdev, tmp->pilot_info.enabled); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_stereo_enabled(sdev, tmp->stereo); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_attack_time(sdev, tmp->acomp_info.attack_time); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_release_time(sdev, -+ tmp->acomp_info.release_time); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_gain(sdev, tmp->acomp_info.gain); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_threshold(sdev, tmp->acomp_info.threshold); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_acomp_enabled(sdev, tmp->acomp_info.enabled); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_mute(sdev, tmp->mute); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_frequency(sdev, tmp->tone_info.frequency); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_deviation(sdev, tmp->tone_info.deviation); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_on_time(sdev, tmp->tone_info.on_time); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_tone_off_time(sdev, tmp->tone_info.off_time); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_frequency(sdev, tmp->frequency ? tmp->frequency : -+ tmp->region_info.bottom_frequency); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_power_level(sdev, tmp->power_level ? -+ tmp->power_level : -+ DEFAULT_POWER_LEVEL); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_antenna_capacitor(sdev, tmp->antenna_capacitor); -+ -+exit: -+ kfree(tmp); -+ return rval; -+} -+ -+int si4713_set_power_level(struct si4713_device *sdev, u8 power_level) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_tx_tune_power(sdev, power_level, -+ sdev->antenna_capacitor); -+ -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ sdev->power_level = power_level; -+ rval = 0; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_power_level(struct si4713_device *sdev) -+{ -+ int rval; -+ u16 f = 0; -+ u8 p, a, n; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_tx_tune_status(sdev, 0x00, &f, &p, &a, &n); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->power_level = p; -+ } -+ -+ rval = sdev->power_level; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_antenna_capacitor(struct si4713_device *sdev, u8 value) -+{ -+ int rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_tx_tune_power(sdev, sdev->power_level, value); -+ -+ if (!rval) -+ sdev->antenna_capacitor = value; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_antenna_capacitor(struct si4713_device *sdev) -+{ -+ int rval = -EINVAL; -+ u16 f = 0; -+ u8 p, a, n; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_tx_tune_status(sdev, 0x00, &f, &p, &a, &n); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ rval = a; -+ } -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_power_state(struct si4713_device *sdev, u8 value) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (value) -+ rval = si4713_powerup(sdev); -+ else -+ rval = si4713_powerdown(sdev); -+ -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_probe(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ rval = si4713_set_power_state(sdev, POWER_ON); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_checkrev(sdev); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_set_power_state(sdev, POWER_OFF); -+ if (rval < 0) -+ goto exit; -+ -+ rval = si4713_init(sdev); -+ -+exit: -+ return rval; -+} -+ -+int si4713_set_frequency(struct si4713_device *sdev, u16 frequency) -+{ -+ int rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_tx_tune_freq(sdev, frequency); -+ if (rval < 0) -+ goto unlock; -+ sdev->frequency = rval; -+ } else { -+ rval = -ENODEV; -+ } -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_frequency(struct si4713_device *sdev) -+{ -+ int rval; -+ u16 f = 0; -+ u8 p, a, n; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_tx_tune_status(sdev, 0x00, &f, &p, &a, &n); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->frequency = f; -+ } -+ -+ rval = sdev->frequency; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_mute(struct si4713_device *sdev, u16 mute) -+{ -+ int rval = 0; -+ -+ mute = set_mute(mute); -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, -+ SI4713_TX_LINE_INPUT_MUTE, mute); -+ -+ if (rval >= 0) -+ sdev->mute = get_mute(mute); -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_mute(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_LINE_INPUT_MUTE); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->mute = rval; -+ } -+ -+ rval = get_mute(sdev->mute); -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_rds_pi(struct si4713_device *sdev, u16 pi) -+{ -+ int rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_RDS_PI, pi); -+ -+ if (rval >= 0) -+ sdev->rds_info.pi = pi; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_rds_pi(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_RDS_PI); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->rds_info.pi = rval; -+ } -+ -+ rval = sdev->rds_info.pi; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_rds_pty(struct si4713_device *sdev, u8 pty) -+{ -+ int rval = 0; -+ u16 p; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_RDS_PS_MISC); -+ if (rval < 0) -+ goto unlock; -+ -+ p = set_pty(rval, pty); -+ -+ rval = si4713_write_property(sdev, SI4713_TX_RDS_PS_MISC, p); -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ sdev->rds_info.pty = pty; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_rds_pty(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_RDS_PS_MISC); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->rds_info.pty = get_pty(rval); -+ } -+ -+ rval = sdev->rds_info.pty; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_rds_ps_name(struct si4713_device *sdev, char *ps_name) -+{ -+ int rval = 0, i; -+ u8 len = 0; -+ u8 *tmp; -+ -+ if (!strlen(ps_name)) -+ return -EINVAL; -+ -+ tmp = kzalloc(MAX_RDS_PS_NAME + 1, GFP_KERNEL); -+ if (!tmp) -+ return -ENOMEM; -+ -+ strncpy(tmp, ps_name, MAX_RDS_PS_NAME); -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ /* Write the new ps name and clear the padding */ -+ for (i = 0; i < MAX_RDS_PS_NAME; i += (RDS_BLOCK / 2)) { -+ rval = si4713_tx_rds_ps(sdev, (i / (RDS_BLOCK / 2)), -+ tmp + i); -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ /* Setup the size to be sent */ -+ len = strlen(tmp) - 1; -+ -+ rval = si4713_write_property(sdev, -+ SI4713_TX_RDS_PS_MESSAGE_COUNT, -+ rds_ps_nblocks(len)); -+ if (rval < 0) -+ goto unlock; -+ -+ rval = si4713_write_property(sdev, -+ SI4713_TX_RDS_PS_REPEAT_COUNT, -+ DEFAULT_RDS_PS_REPEAT_COUNT * 2); -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ strncpy(sdev->rds_info.ps_name, tmp, MAX_RDS_PS_NAME); -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ kfree(tmp); -+ return rval; -+} -+ -+int si4713_get_rds_ps_name(struct si4713_device *sdev, char *ps_name) -+{ -+ mutex_lock(&sdev->mutex); -+ strncpy(ps_name, sdev->rds_info.ps_name, MAX_RDS_PS_NAME); -+ mutex_unlock(&sdev->mutex); -+ -+ return 0; -+} -+ -+int si4713_set_rds_radio_text(struct si4713_device *sdev, char *radio_text) -+{ -+ int rval = 0, i; -+ u16 t_index = 0; -+ u8 s, a, u, fa, fu, b_index = 0, cr_inserted = 0; -+ u8 *tmp; -+ -+ if (!strlen(radio_text)) -+ return -EINVAL; -+ -+ tmp = kzalloc(MAX_RDS_RADIO_TEXT + 1, GFP_KERNEL); -+ if (!tmp) -+ return -ENOMEM; -+ -+ strncpy(tmp, radio_text, MAX_RDS_RADIO_TEXT); -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_tx_rds_buff(sdev, RDS_BLOCK_CLEAR, 0, 0, 0, -+ &s, &a, &u, &fa, &fu); -+ if (rval < 0) -+ goto unlock; -+ do { -+ /* RDS spec says that if the last block isn't used, -+ * then apply a carriage return -+ */ -+ if (t_index < (RDS_RADIOTEXT_INDEX_MAX * \ -+ RDS_RADIOTEXT_BLK_SIZE)) { -+ for (i = 0; i < RDS_RADIOTEXT_BLK_SIZE; i++) { -+ if (!tmp[t_index + i] || -+ tmp[t_index + i] == \ -+ RDS_CARRIAGE_RETURN) { -+ tmp[t_index + i] = -+ RDS_CARRIAGE_RETURN; -+ cr_inserted = 1; -+ break; -+ } -+ } -+ } -+ -+ rval = si4713_tx_rds_buff(sdev, RDS_BLOCK_LOAD, -+ compose_u16(RDS_RADIOTEXT_2A, -+ b_index++), -+ compose_u16(tmp[t_index], -+ tmp[t_index + 1]), -+ compose_u16(tmp[t_index + 2], -+ tmp[t_index + 3]), -+ &s, &a, &u, &fa, &fu); -+ if (rval < 0) -+ goto unlock; -+ -+ t_index += RDS_RADIOTEXT_BLK_SIZE; -+ -+ if (cr_inserted) -+ break; -+ } while (u < a); -+ } -+ -+ strncpy(sdev->rds_info.radio_text, tmp, MAX_RDS_RADIO_TEXT); -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ kfree(tmp); -+ return rval; -+} -+ -+int si4713_get_rds_radio_text(struct si4713_device *sdev, char *radio_text) -+{ -+ mutex_lock(&sdev->mutex); -+ strncpy(radio_text, sdev->rds_info.radio_text, MAX_RDS_RADIO_TEXT); -+ mutex_unlock(&sdev->mutex); -+ -+ return 0; -+} -+ -+int si4713_set_rds_enabled(struct si4713_device *sdev, u8 enabled) -+{ -+ int rval = 0; -+ u16 p; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_COMPONENT_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ p = rval; -+ if (enabled) -+ p = enable_rds(p); -+ else -+ p = disable_rds(p); -+ -+ rval = si4713_write_property(sdev, SI4713_TX_COMPONENT_ENABLE, -+ p); -+ if (rval < 0) -+ goto unlock; -+ -+ if (enabled) { -+ rval = si4713_write_property(sdev, -+ SI4713_TX_RDS_DEVIATION, -+ DEFAULT_RDS_DEVIATION); -+ if (rval < 0) -+ goto unlock; -+ } -+ } -+ -+ sdev->rds_info.enabled = enabled & 0x01; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_rds_enabled(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_COMPONENT_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->rds_info.enabled = get_rds_status(rval); -+ } -+ -+ rval = sdev->rds_info.enabled; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_region_bottom_frequency(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ /* Device works in 10kHz units */ -+ rval = sdev->region_info.bottom_frequency * 10; -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_region_top_frequency(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ /* Device works in 10kHz units */ -+ rval = sdev->region_info.top_frequency * 10; -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_set_region_preemphasis(struct si4713_device *sdev, u8 preemphasis) -+{ -+ int rval = 0; -+ -+ switch (preemphasis) { -+ case PREEMPHASIS_USA: -+ preemphasis = FMPE_USA; -+ break; -+ case PREEMPHASIS_EU: -+ preemphasis = FMPE_EU; -+ break; -+ case PREEMPHASIS_DISABLED: -+ preemphasis = FMPE_DISABLED; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_PREEMPHASIS, -+ preemphasis); -+ -+ if (rval >= 0) -+ sdev->region_info.preemphasis = preemphasis; -+ -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_region_preemphasis(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_PREEMPHASIS); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->region_info.preemphasis = rval; -+ } -+ -+ switch (sdev->region_info.preemphasis) { -+ case FMPE_USA: -+ rval = PREEMPHASIS_USA; -+ break; -+ case FMPE_EU: -+ rval = PREEMPHASIS_EU; -+ break; -+ case FMPE_DISABLED: -+ rval = PREEMPHASIS_DISABLED; -+ break; -+ default: -+ rval = -EINVAL; -+ goto unlock; -+ } -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_region_channel_spacing(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ /* Device works in 10kHz units */ -+ rval = sdev->region_info.channel_spacing * 10; -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_set_region(struct si4713_device *sdev, u8 region) -+{ -+ int rval = 0; -+ u16 new_frequency = 0; -+ -+ if (region >= ARRAY_SIZE(region_configs)) -+ return -EINVAL; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->frequency < region_configs[region].bottom_frequency || -+ sdev->frequency > region_configs[region].top_frequency) -+ new_frequency = region_configs[region].bottom_frequency; -+ -+ memcpy(&sdev->region_info, ®ion_configs[region], -+ sizeof(sdev->region_info)); -+ -+ if (sdev->power_state) { -+ if (new_frequency > 0) { -+ rval = si4713_tx_tune_freq(sdev, new_frequency); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ new_frequency = rval; -+ } -+ -+ rval = si4713_write_property(sdev, SI4713_TX_PREEMPHASIS, -+ region_configs[region].preemphasis); -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ sdev->frequency = new_frequency; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_region(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ rval = sdev->region_info.region; -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_set_limiter_enabled(struct si4713_device *sdev, u8 enabled) -+{ -+ int rval = 0; -+ u16 p; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_ACOMP_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ p = rval; -+ if (enabled) -+ p = enable_limiter(p); -+ else -+ p = disable_limiter(p); -+ -+ rval = si4713_write_property(sdev, SI4713_TX_ACOMP_ENABLE, -+ p); -+ -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ sdev->limiter_info.enabled = enabled & 0x01; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_limiter_enabled(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_ACOMP_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->limiter_info.enabled = get_limiter_status(rval); -+ } -+ -+ rval = sdev->limiter_info.enabled; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_limiter_deviation(struct si4713_device *sdev, -+ unsigned long deviation) -+{ -+ int rval = 0; -+ -+ /* Device receives in 10Hz units */ -+ deviation /= 10; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_AUDIO_DEVIATION, -+ deviation); -+ -+ /* Device returns in 10Hz units */ -+ if (rval >= 0) -+ sdev->limiter_info.deviation = deviation * 10; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+long si4713_get_limiter_deviation(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_AUDIO_DEVIATION); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ /* Device returns in 10Hz units */ -+ sdev->limiter_info.deviation = rval * 10; -+ } -+ -+ rval = sdev->limiter_info.deviation; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_limiter_release_time(struct si4713_device *sdev, -+ unsigned long rtime) -+{ -+ int rval; -+ -+ rval = usecs_to_dev(rtime, limiter_times, ARRAY_SIZE(limiter_times)); -+ if (rval < 0) -+ goto exit; -+ -+ rtime = rval; -+ rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, -+ SI4713_TX_LIMITER_RELEASE_TIME, rtime); -+ -+ if (rval >= 0) -+ sdev->limiter_info.release_time = dev_to_usecs(rtime, -+ limiter_times, -+ ARRAY_SIZE(limiter_times)); -+ -+ mutex_unlock(&sdev->mutex); -+ -+exit: -+ return rval; -+} -+ -+long si4713_get_limiter_release_time(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, -+ SI4713_TX_LIMITER_RELEASE_TIME); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->limiter_info.release_time = dev_to_usecs(rval, -+ limiter_times, -+ ARRAY_SIZE(limiter_times)); -+ } -+ -+ rval = sdev->limiter_info.release_time; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_stereo_enabled(struct si4713_device *sdev, u8 enabled) -+{ -+ int rval = 0; -+ u16 p; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_COMPONENT_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ p = rval; -+ if (enabled) -+ p = enable_stereo(p); -+ else -+ p = disable_stereo(p); -+ -+ rval = si4713_write_property(sdev, SI4713_TX_COMPONENT_ENABLE, -+ p); -+ -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ sdev->stereo = enabled & 0x01; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_stereo_enabled(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_COMPONENT_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->stereo = get_stereo_status(rval); -+ } -+ -+ rval = sdev->stereo; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_pilot_enabled(struct si4713_device *sdev, u8 enabled) -+{ -+ int rval = 0; -+ u16 p; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_COMPONENT_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ p = rval; -+ if (enabled) -+ p = enable_pilot(p); -+ else -+ p = disable_pilot(p); -+ -+ rval = si4713_write_property(sdev, SI4713_TX_COMPONENT_ENABLE, -+ p); -+ -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ sdev->pilot_info.enabled = enabled & 0x01; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_pilot_enabled(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_COMPONENT_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->pilot_info.enabled = get_pilot_status(rval); -+ } -+ -+ rval = sdev->pilot_info.enabled; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_pilot_deviation(struct si4713_device *sdev, -+ unsigned long deviation) -+{ -+ int rval = 0; -+ -+ /* Device receives in 10Hz units */ -+ deviation /= 10; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_PILOT_DEVIATION, -+ deviation); -+ -+ /* Device returns in 10Hz units */ -+ if (rval >= 0) -+ sdev->pilot_info.deviation = deviation * 10; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+long si4713_get_pilot_deviation(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_PILOT_DEVIATION); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ /* Device returns in 10Hz units */ -+ sdev->pilot_info.deviation = rval * 10; -+ } -+ -+ rval = sdev->pilot_info.deviation; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_pilot_frequency(struct si4713_device *sdev, u16 freq) -+{ -+ int rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_PILOT_FREQUENCY, -+ freq); -+ -+ if (rval >= 0) -+ sdev->pilot_info.frequency = freq; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_pilot_frequency(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_PILOT_FREQUENCY); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->pilot_info.frequency = rval; -+ } -+ -+ rval = sdev->pilot_info.frequency; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_tone_deviation(struct si4713_device *sdev, -+ unsigned long deviation) -+{ -+ int rval = 0; -+ -+ /* Device receives in 10Hz units */ -+ deviation /= 10; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_TONE_DEVIATION, -+ deviation); -+ -+ /* Device returns in 10Hz units */ -+ if (rval >= 0) -+ sdev->tone_info.deviation = deviation * 10; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+long si4713_get_tone_deviation(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_TONE_DEVIATION); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ /* Device returns in 10Hz units */ -+ sdev->tone_info.deviation = rval * 10; -+ } -+ -+ rval = sdev->tone_info.deviation; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_tone_frequency(struct si4713_device *sdev, u16 freq) -+{ -+ int rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_TONE_FREQUENCY, -+ freq); -+ -+ if (rval >= 0) -+ sdev->tone_info.frequency = freq; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_tone_frequency(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_TONE_FREQUENCY); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->tone_info.frequency = rval; -+ } -+ -+ rval = sdev->tone_info.frequency; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_tone_on_time(struct si4713_device *sdev, u16 on_time) -+{ -+ int rval = 0; -+ -+ /* Device receives in 2ms units */ -+ on_time >>= 1; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_TONE_ON_TIME, -+ on_time); -+ -+ /* Device returns in 2ms units */ -+ if (rval >= 0) -+ sdev->tone_info.on_time = on_time << 1; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_tone_on_time(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_TONE_ON_TIME); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ /* Device returns in 2ms units */ -+ sdev->tone_info.on_time = rval << 1; -+ } -+ -+ rval = sdev->tone_info.on_time; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_tone_off_time(struct si4713_device *sdev, u16 off_time) -+{ -+ int rval = 0; -+ -+ /* Device receives in 2ms units */ -+ off_time >>= 1; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_TONE_OFF_TIME, -+ off_time); -+ -+ /* Device returns in 2ms units */ -+ if (rval >= 0) -+ sdev->tone_info.off_time = off_time << 1; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_tone_off_time(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_TONE_OFF_TIME); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ /* Device returns in 2ms units */ -+ sdev->tone_info.off_time = rval << 1; -+ } -+ -+ rval = sdev->tone_info.off_time; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_acomp_enabled(struct si4713_device *sdev, u8 enabled) -+{ -+ int rval = 0; -+ u16 p; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_ACOMP_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ p = rval; -+ if (enabled) -+ p = enable_acomp(p); -+ else -+ p = disable_acomp(p); -+ -+ rval = si4713_write_property(sdev, SI4713_TX_ACOMP_ENABLE, p); -+ -+ if (rval < 0) -+ goto unlock; -+ } -+ -+ sdev->acomp_info.enabled = enabled & 0x01; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_get_acomp_enabled(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_ACOMP_ENABLE); -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->acomp_info.enabled = get_acomp_status(rval); -+ } -+ -+ rval = sdev->acomp_info.enabled; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_acomp_gain(struct si4713_device *sdev, u8 gain) -+{ -+ int rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_ACOMP_GAIN, gain); -+ -+ if (rval >= 0) -+ sdev->acomp_info.gain = gain; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_acomp_gain(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_ACOMP_GAIN); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->acomp_info.gain = rval; -+ } -+ -+ rval = sdev->acomp_info.gain; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_acomp_threshold(struct si4713_device *sdev, s8 threshold) -+{ -+ int rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, SI4713_TX_ACOMP_THRESHOLD, -+ threshold); -+ -+ if (rval >= 0) -+ sdev->acomp_info.threshold = threshold; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_acomp_threshold(struct si4713_device *sdev, s8 *threshold) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, SI4713_TX_ACOMP_THRESHOLD); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->acomp_info.threshold = rval; -+ } -+ -+ *threshold = sdev->acomp_info.threshold; -+ rval = 0; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_acomp_release_time(struct si4713_device *sdev, -+ unsigned long rtime) -+{ -+ int rval; -+ -+ rval = usecs_to_dev(rtime, acomp_rtimes, ARRAY_SIZE(acomp_rtimes)); -+ if (rval < 0) -+ goto exit; -+ -+ rtime = rval; -+ rval = 0; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, -+ SI4713_TX_ACOMP_RELEASE_TIME, rtime); -+ -+ if (rval >= 0) -+ sdev->acomp_info.release_time = dev_to_usecs(rtime, -+ acomp_rtimes, -+ ARRAY_SIZE(acomp_rtimes)); -+ -+ mutex_unlock(&sdev->mutex); -+ -+exit: -+ return rval; -+} -+ -+long si4713_get_acomp_release_time(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, -+ SI4713_TX_ACOMP_RELEASE_TIME); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->acomp_info.release_time = dev_to_usecs(rval, -+ acomp_rtimes, -+ ARRAY_SIZE(acomp_rtimes)); -+ } -+ -+ rval = sdev->acomp_info.release_time; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+ -+int si4713_set_acomp_attack_time(struct si4713_device *sdev, u16 atime) -+{ -+ int rval = 0; -+ -+ /* Device receives in 0.5 ms units */ -+ atime /= ATTACK_TIME_UNIT; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) -+ rval = si4713_write_property(sdev, -+ SI4713_TX_ACOMP_ATTACK_TIME, atime); -+ -+ if (rval >= 0) -+ sdev->acomp_info.attack_time = atime * ATTACK_TIME_UNIT; -+ -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_acomp_attack_time(struct si4713_device *sdev) -+{ -+ int rval; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_read_property(sdev, -+ SI4713_TX_ACOMP_RELEASE_TIME); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->acomp_info.release_time = rval * ATTACK_TIME_UNIT; -+ } -+ -+ rval = sdev->acomp_info.attack_time; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -+ -+int si4713_set_tune_measure(struct si4713_device *sdev, u32 frequency) -+{ -+ int rval = -ENODEV; -+ -+ mutex_lock(&sdev->mutex); -+ if (sdev->power_state) -+ rval = si4713_tx_tune_measure(sdev, frequency / 10, 0); -+ mutex_unlock(&sdev->mutex); -+ -+ return rval; -+} -+ -+int si4713_get_tune_measure(struct si4713_device *sdev) -+{ -+ int rval; -+ u16 f = 0; -+ u8 p, a, n; -+ -+ mutex_lock(&sdev->mutex); -+ -+ if (sdev->power_state) { -+ rval = si4713_tx_tune_status(sdev, 0x00, &f, &p, &a, &n); -+ -+ if (rval < 0) -+ goto unlock; -+ -+ sdev->tune_rssi = n; -+ } -+ -+ rval = sdev->tune_rssi; -+ -+unlock: -+ mutex_unlock(&sdev->mutex); -+ return rval; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/radio/si4713.h linux-omap-2.6.28-nokia1/drivers/media/radio/si4713.h ---- linux-omap-2.6.28-omap1/drivers/media/radio/si4713.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/radio/si4713.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,319 @@ -+/* -+ * drivers/media/radio/si4713.h -+ * -+ * Property and commands definitions for Si4713 radio transmitter chip. -+ * -+ * Copyright (c) 2008 Instituto Nokia de Tecnologia - INdT -+ * Author: Eduardo Valentin -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ * -+ */ -+ -+#ifndef SI4713_H -+#define SI4713_H -+ -+#include "radio-si4713.h" -+ -+#define SI4713_PRODUCT_NUMBER 0x0D -+ -+/* Command Timeouts */ -+#define DEFAULT_TIMEOUT 500 -+#define TIMEOUT_SET_PROPERTY 20 -+#define TIMEOUT_TX_TUNE_POWER 30000 -+#define TIMEOUT_TX_TUNE 110000 -+#define TIMEOUT_POWER_UP 200000 -+ -+/* -+ * Command and its arguments definitions -+ */ -+#define SI4713_PWUP_CTSIEN (1<<7) -+#define SI4713_PWUP_GPO2OEN (1<<6) -+#define SI4713_PWUP_PATCH (1<<5) -+#define SI4713_PWUP_XOSCEN (1<<4) -+#define SI4713_PWUP_FUNC_TX 0x02 -+#define SI4713_PWUP_FUNC_PATCH 0x0F -+#define SI4713_PWUP_OPMOD_ANALOG 0x50 -+#define SI4713_PWUP_OPMOD_DIGITAL 0x0F -+#define SI4713_PWUP_NARGS 2 -+#define SI4713_PWUP_NRESP 1 -+#define SI4713_CMD_POWER_UP 0x01 -+ -+#define SI4713_GETREV_NRESP 9 -+#define SI4713_CMD_GET_REV 0x10 -+ -+#define SI4713_PWDN_NRESP 1 -+#define SI4713_CMD_POWER_DOWN 0x11 -+ -+#define SI4713_SET_PROP_NARGS 5 -+#define SI4713_SET_PROP_NRESP 1 -+#define SI4713_CMD_SET_PROPERTY 0x12 -+ -+#define SI4713_GET_PROP_NARGS 3 -+#define SI4713_GET_PROP_NRESP 4 -+#define SI4713_CMD_GET_PROPERTY 0x13 -+ -+#define SI4713_GET_STATUS_NRESP 1 -+#define SI4713_CMD_GET_INT_STATUS 0x14 -+ -+#define SI4713_CMD_PATCH_ARGS 0x15 -+#define SI4713_CMD_PATCH_DATA 0x16 -+ -+#define SI4713_MAX_FREQ 10800 -+#define SI4713_MIN_FREQ 7600 -+#define SI4713_TXFREQ_NARGS 3 -+#define SI4713_TXFREQ_NRESP 1 -+#define SI4713_CMD_TX_TUNE_FREQ 0x30 -+ -+#define SI4713_MAX_POWER 120 -+#define SI4713_MIN_POWER 88 -+#define SI4713_MAX_ANTCAP 191 -+#define SI4713_MIN_ANTCAP 0 -+#define SI4713_TXPWR_NARGS 4 -+#define SI4713_TXPWR_NRESP 1 -+#define SI4713_CMD_TX_TUNE_POWER 0x31 -+ -+#define SI4713_TXMEA_NARGS 4 -+#define SI4713_TXMEA_NRESP 1 -+#define SI4713_CMD_TX_TUNE_MEASURE 0x32 -+ -+#define SI4713_INTACK_MASK 0x01 -+#define SI4713_TXSTATUS_NARGS 1 -+#define SI4713_TXSTATUS_NRESP 8 -+#define SI4713_CMD_TX_TUNE_STATUS 0x33 -+ -+#define SI4713_OVERMOD_BIT (1 << 2) -+#define SI4713_IALH_BIT (1 << 1) -+#define SI4713_IALL_BIT (1 << 0) -+#define SI4713_ASQSTATUS_NARGS 1 -+#define SI4713_ASQSTATUS_NRESP 5 -+#define SI4713_CMD_TX_ASQ_STATUS 0x34 -+ -+#define SI4713_RDSBUFF_MODE_MASK 0x87 -+#define SI4713_RDSBUFF_NARGS 7 -+#define SI4713_RDSBUFF_NRESP 6 -+#define SI4713_CMD_TX_RDS_BUFF 0x35 -+ -+#define SI4713_RDSPS_PSID_MASK 0x1F -+#define SI4713_RDSPS_NARGS 5 -+#define SI4713_RDSPS_NRESP 1 -+#define SI4713_CMD_TX_RDS_PS 0x36 -+ -+#define SI4713_CMD_GPO_CTL 0x80 -+#define SI4713_CMD_GPO_SET 0x81 -+ -+/* -+ * Bits from status response -+ */ -+#define SI4713_CTS (1<<7) -+#define SI4713_ERR (1<<6) -+#define SI4713_RDS_INT (1<<2) -+#define SI4713_ASQ_INT (1<<1) -+#define SI4713_STC_INT (1<<0) -+ -+/* -+ * Property definitions -+ */ -+#define SI4713_GPO_IEN 0x0001 -+#define SI4713_DIG_INPUT_FORMAT 0x0101 -+#define SI4713_DIG_INPUT_SAMPLE_RATE 0x0103 -+#define SI4713_REFCLK_FREQ 0x0201 -+#define SI4713_REFCLK_PRESCALE 0x0202 -+#define SI4713_TX_COMPONENT_ENABLE 0x2100 -+#define SI4713_TX_AUDIO_DEVIATION 0x2101 -+#define SI4713_TX_PILOT_DEVIATION 0x2102 -+#define SI4713_TX_RDS_DEVIATION 0x2103 -+#define SI4713_TX_LINE_INPUT_LEVEL 0x2104 -+#define SI4713_TX_LINE_INPUT_MUTE 0x2105 -+#define SI4713_TX_PREEMPHASIS 0x2106 -+#define SI4713_TX_PILOT_FREQUENCY 0x2107 -+#define SI4713_TX_TONE_DEVIATION 0x2108 -+#define SI4713_TX_TONE_FREQUENCY 0x2109 -+#define SI4713_TX_ACOMP_ENABLE 0x2200 -+#define SI4713_TX_ACOMP_THRESHOLD 0x2201 -+#define SI4713_TX_ACOMP_ATTACK_TIME 0x2202 -+#define SI4713_TX_ACOMP_RELEASE_TIME 0x2203 -+#define SI4713_TX_ACOMP_GAIN 0x2204 -+#define SI4713_TX_LIMITER_RELEASE_TIME 0x2205 -+#define SI4713_TX_ASQ_INTERRUPT_SOURCE 0x2300 -+#define SI4713_TX_ASQ_LEVEL_LOW 0x2301 -+#define SI4713_TX_ASQ_DURATION_LOW 0x2302 -+#define SI4713_TX_ASQ_LEVEL_HIGH 0x2303 -+#define SI4713_TX_ASQ_DURATION_HIGH 0x2304 -+#define SI4713_TX_RDS_INTERRUPT_SOURCE 0x2C00 -+#define SI4713_TX_RDS_PI 0x2C01 -+#define SI4713_TX_RDS_PS_MIX 0x2C02 -+#define SI4713_TX_RDS_PS_MISC 0x2C03 -+#define SI4713_TX_RDS_PS_REPEAT_COUNT 0x2C04 -+#define SI4713_TX_RDS_PS_MESSAGE_COUNT 0x2C05 -+#define SI4713_TX_RDS_PS_AF 0x2C06 -+#define SI4713_TX_RDS_FIFO_SIZE 0x2C07 -+#define SI4713_TX_TONE_ON_TIME 0xF000 -+#define SI4713_TX_TONE_OFF_TIME 0xF001 -+ -+#define PREEMPHASIS_USA 75 -+#define PREEMPHASIS_EU 50 -+#define PREEMPHASIS_DISABLED 0 -+#define FMPE_USA 0x00 -+#define FMPE_EU 0x01 -+#define FMPE_DISABLED 0x02 -+ -+#define POWER_UP 0x01 -+#define POWER_DOWN 0x00 -+ -+struct rds_info { -+ u16 pi; -+#define MAX_RDS_PTY 31 -+ u8 pty; -+#define MAX_RDS_PS_NAME 96 -+ u8 ps_name[MAX_RDS_PS_NAME + 1]; -+#define MAX_RDS_RADIO_TEXT 384 -+ u8 radio_text[MAX_RDS_RADIO_TEXT + 1]; -+ u8 enabled; -+}; -+ -+struct limiter_info { -+#define MAX_LIMITER_RELEASE_TIME 102390 -+ unsigned long release_time; -+#define MAX_LIMITER_DEVIATION 90000 -+ unsigned long deviation; -+ u8 enabled; -+}; -+ -+struct pilot_info { -+#define MAX_PILOT_DEVIATION 90000 -+ unsigned long deviation; -+#define MAX_PILOT_FREQUENCY 19000 -+ u16 frequency; -+ u8 enabled; -+}; -+ -+struct acomp_info { -+#define MAX_ACOMP_RELEASE_TIME 1000000 -+ unsigned long release_time; -+#define MAX_ACOMP_ATTACK_TIME 5000 -+ u16 attack_time; -+#define MAX_ACOMP_THRESHOLD 0 -+#define MIN_ACOMP_THRESHOLD (-40) -+ s8 threshold; -+#define MAX_ACOMP_GAIN 20 -+ u8 gain; -+ u8 enabled; -+}; -+ -+struct region_info { -+ u16 bottom_frequency; -+ u16 top_frequency; -+ u8 preemphasis; -+ u8 channel_spacing; -+ u8 region; -+}; -+ -+struct tone_info { -+#define MAX_TONE_DEVIATION 90000 -+ unsigned long deviation; -+#define MAX_TONE_FREQUENCY 19000 -+ u16 frequency; -+#define MAX_TONE_ON_TIME 0xFFFF -+ u16 on_time; -+#define MAX_TONE_OFF_TIME 0xFFFF -+ u16 off_time; -+}; -+ -+/* -+ * si4713_device - private data -+ */ -+struct si4713_device { -+ /* reference to i2c and video device */ -+ struct i2c_client *client; -+ struct video_device *videodev; -+ /* private data structures */ -+ struct mutex mutex; -+ struct completion work; -+ struct si4713_platform_data *platform_data; -+ struct rds_info rds_info; -+ struct limiter_info limiter_info; -+ struct pilot_info pilot_info; -+ struct acomp_info acomp_info; -+ struct region_info region_info; -+ struct tone_info tone_info; -+ u16 frequency; -+ u8 mute; -+ u8 power_level; -+ u8 power_state; -+ u8 antenna_capacitor; -+ u8 stereo; -+ u8 tune_rssi; -+}; -+ -+int si4713_init(struct si4713_device *sdev); -+int si4713_setup(struct si4713_device *sdev); -+int si4713_probe(struct si4713_device *sdev); -+int si4713_set_power_level(struct si4713_device *sdev, u8 power_level); -+int si4713_get_power_level(struct si4713_device *sdev); -+int si4713_set_antenna_capacitor(struct si4713_device *sdev, u8 value); -+int si4713_get_antenna_capacitor(struct si4713_device *sdev); -+int si4713_set_power_state(struct si4713_device *sdev, u8 value); -+int si4713_set_frequency(struct si4713_device *sdev, u16 frequency); -+int si4713_get_frequency(struct si4713_device *sdev); -+int si4713_set_mute(struct si4713_device *sdev, u16 mute); -+int si4713_get_mute(struct si4713_device *sdev); -+int si4713_set_rds_pi(struct si4713_device *sdev, u16 pi); -+int si4713_get_rds_pi(struct si4713_device *sdev); -+int si4713_set_rds_pty(struct si4713_device *sdev, u8 pty); -+int si4713_get_rds_pty(struct si4713_device *sdev); -+int si4713_set_rds_ps_name(struct si4713_device *sdev, char *ps_name); -+int si4713_get_rds_ps_name(struct si4713_device *sdev, char *ps_name); -+int si4713_set_rds_radio_text(struct si4713_device *sdev, char *radio_text); -+int si4713_get_rds_radio_text(struct si4713_device *sdev, char *radio_text); -+int si4713_set_rds_enabled(struct si4713_device *sdev, u8 enabled); -+int si4713_get_rds_enabled(struct si4713_device *sdev); -+int si4713_set_limiter_release_time(struct si4713_device *sdev, -+ unsigned long rtime); -+long si4713_get_limiter_release_time(struct si4713_device *sdev); -+int si4713_set_limiter_deviation(struct si4713_device *sdev, -+ unsigned long deviation); -+long si4713_get_limiter_deviation(struct si4713_device *sdev); -+int si4713_set_limiter_enabled(struct si4713_device *sdev, u8 enabled); -+int si4713_get_limiter_enabled(struct si4713_device *sdev); -+int si4713_set_pilot_frequency(struct si4713_device *sdev, u16 freq); -+int si4713_get_pilot_frequency(struct si4713_device *sdev); -+int si4713_set_pilot_deviation(struct si4713_device *sdev, -+ unsigned long deviation); -+long si4713_get_pilot_deviation(struct si4713_device *sdev); -+int si4713_set_pilot_enabled(struct si4713_device *sdev, u8 enabled); -+int si4713_get_pilot_enabled(struct si4713_device *sdev); -+int si4713_set_stereo_enabled(struct si4713_device *sdev, u8 enabled); -+int si4713_get_stereo_enabled(struct si4713_device *sdev); -+int si4713_set_acomp_enabled(struct si4713_device *sdev, u8 enabled); -+int si4713_get_acomp_enabled(struct si4713_device *sdev); -+int si4713_set_acomp_threshold(struct si4713_device *sdev, s8 threshold); -+int si4713_get_acomp_threshold(struct si4713_device *sdev, s8 *threshold); -+int si4713_set_acomp_gain(struct si4713_device *sdev, u8 gain); -+int si4713_get_acomp_gain(struct si4713_device *sdev); -+int si4713_set_acomp_release_time(struct si4713_device *sdev, -+ unsigned long rtime); -+long si4713_get_acomp_release_time(struct si4713_device *sdev); -+int si4713_set_acomp_attack_time(struct si4713_device *sdev, u16 atime); -+int si4713_get_acomp_attack_time(struct si4713_device *sdev); -+int si4713_get_region_bottom_frequency(struct si4713_device *sdev); -+int si4713_get_region_top_frequency(struct si4713_device *sdev); -+int si4713_get_region_channel_spacing(struct si4713_device *sdev); -+int si4713_set_region_preemphasis(struct si4713_device *sdev, u8 preemphasis); -+int si4713_get_region_preemphasis(struct si4713_device *sdev); -+int si4713_set_region(struct si4713_device *sdev, u8 region); -+int si4713_get_region(struct si4713_device *sdev); -+int si4713_set_tune_measure(struct si4713_device *sdev, u32 frequency); -+int si4713_get_tune_measure(struct si4713_device *sdev); -+int si4713_set_tone_frequency(struct si4713_device *sdev, u16 freq); -+int si4713_get_tone_frequency(struct si4713_device *sdev); -+int si4713_set_tone_deviation(struct si4713_device *sdev, -+ unsigned long deviation); -+long si4713_get_tone_deviation(struct si4713_device *sdev); -+int si4713_set_tone_on_time(struct si4713_device *sdev, u16 on_time); -+int si4713_get_tone_on_time(struct si4713_device *sdev); -+int si4713_set_tone_off_time(struct si4713_device *sdev, u16 off_time); -+int si4713_get_tone_off_time(struct si4713_device *sdev); -+#endif /* ifndef SI4713_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/adp1653.c linux-omap-2.6.28-nokia1/drivers/media/video/adp1653.c ---- linux-omap-2.6.28-omap1/drivers/media/video/adp1653.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/adp1653.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,574 @@ -+/* -+ * drivers/media/video/adp1653.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ * NOTES: -+ * - Torch and Indicator lights are enabled by just increasing -+ * intensity from zero -+ * - Increasing Flash light intensity does nothing until it is -+ * strobed (strobe control set to 1) -+ * - Strobing flash disables Torch light (sets intensity to zero). -+ * This might be changed later. -+ * -+ * TODO: -+ * - fault interrupt handling -+ * - faster strobe (use i/o pin instead of i2c) -+ * - should ensure that the pin is in some sane state even if not used -+ * - strobe control could return whether flash is still on (measure time) -+ * - power doesn't need to be ON if all lights are off -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#define TIMEOUT_US_TO_CODE(t) ((820000 + 27300 - (t))/54600) -+#define TIMEOUT_CODE_TO_US(c) (820000 - (c) * 54600) -+ -+/* Write values into ADP1653 registers. Do nothing if power is off. */ -+static int adp1653_update_hw(struct v4l2_int_device *s) -+{ -+ struct adp1653_flash *flash = s->priv; -+ int rval; -+ u8 out_sel = 0; -+ u8 config = 0; -+ -+ if (flash->power != V4L2_POWER_ON) -+ return 0; -+ -+ out_sel |= flash->indicator_intensity << ADP1653_REG_OUT_SEL_ILED_SHIFT; -+ /* Set torch intensity to zero--prevents false triggering of SC Fault */ -+ rval = i2c_smbus_write_byte_data(flash->i2c_client, -+ ADP1653_REG_OUT_SEL, out_sel); -+ if (rval < 0) -+ return rval; -+ -+ if (flash->torch_intensity > 0) { -+ /* Torch mode, light immediately on, duration indefinite */ -+ out_sel |= flash->torch_intensity -+ << ADP1653_REG_OUT_SEL_HPLED_SHIFT; -+ } else { -+ /* Flash mode, light on with strobe, duration from timer */ -+ out_sel |= flash->flash_intensity -+ << ADP1653_REG_OUT_SEL_HPLED_SHIFT; -+ config |= ADP1653_REG_CONFIG_TMR_CFG; -+ config |= TIMEOUT_US_TO_CODE(flash->flash_timeout) -+ << ADP1653_REG_CONFIG_TMR_SET_SHIFT; -+ } -+ -+ rval = i2c_smbus_write_byte_data(flash->i2c_client, -+ ADP1653_REG_OUT_SEL, out_sel); -+ if (rval < 0) -+ return rval; -+ -+ rval = i2c_smbus_write_byte_data(flash->i2c_client, -+ ADP1653_REG_CONFIG, config); -+ if (rval < 0) -+ return rval; -+ -+ return 0; -+} -+ -+static int adp1653_strobe(struct v4l2_int_device *s) -+{ -+ struct adp1653_flash *flash = s->priv; -+ int rval; -+ -+ if (flash->torch_intensity > 0) { -+ /* Disabling torch enables flash in update_hw() */ -+ flash->torch_intensity = 0; -+ rval = adp1653_update_hw(s); -+ if (rval) -+ return rval; -+ } -+ -+ if (flash->platform_data->strobe) { -+ /* Hardware-specific strobe using I/O pin */ -+ return flash->platform_data->strobe(s); -+ } else { -+ /* Software strobe using i2c */ -+ rval = i2c_smbus_write_byte_data(flash->i2c_client, -+ ADP1653_REG_SW_STROBE, ADP1653_REG_SW_STROBE_SW_STROBE); -+ if (rval) -+ return rval; -+ rval = i2c_smbus_write_byte_data(flash->i2c_client, -+ ADP1653_REG_SW_STROBE, 0); -+ return rval; -+ } -+} -+ -+static int adp1653_get_fault(struct v4l2_int_device *s) -+{ -+ struct adp1653_flash *flash = s->priv; -+ -+ return i2c_smbus_read_byte_data(flash->i2c_client, ADP1653_REG_FAULT); -+} -+ -+#define CTRL_CAMERA_FLASH_STROBE 0 -+#define CTRL_CAMERA_FLASH_TIMEOUT 1 -+#define CTRL_CAMERA_FLASH_INTENSITY 2 -+#define CTRL_CAMERA_FLASH_TORCH_INTENSITY 3 -+#define CTRL_CAMERA_FLASH_INDICATOR_INTENSITY 4 -+#define CTRL_CAMERA_FLASH_FAULT_SCP 5 -+#define CTRL_CAMERA_FLASH_FAULT_OT 6 -+#define CTRL_CAMERA_FLASH_FAULT_TMR 7 -+#define CTRL_CAMERA_FLASH_FAULT_OV 8 -+ -+static struct v4l2_queryctrl adp1653_ctrls[] = { -+ { -+ .id = V4L2_CID_FLASH_STROBE, -+ .type = V4L2_CTRL_TYPE_BUTTON, -+ .name = "Flash strobe", -+ .minimum = 0, -+ .maximum = 0, -+ .step = 0, -+ .default_value = 0, -+ .flags = V4L2_CTRL_FLAG_UPDATE, -+ }, -+ -+ { -+ .id = V4L2_CID_FLASH_TIMEOUT, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Flash timeout [us]", -+ .minimum = 1000, -+ .step = 54600, -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+ { -+ .id = V4L2_CID_FLASH_INTENSITY, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Flash intensity", -+ .minimum = ADP1653_TORCH_INTENSITY_MAX + 1, -+ .step = 1, -+ .default_value = ADP1653_TORCH_INTENSITY_MAX + 1, -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+ { -+ .id = V4L2_CID_TORCH_INTENSITY, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Torch intensity", -+ .minimum = 0, -+ .step = 1, -+ .default_value = 0, -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+ { -+ .id = V4L2_CID_INDICATOR_INTENSITY, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Indicator intensity", -+ .minimum = 0, -+ .step = 1, -+ .default_value = 0, -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+ -+ /* Faults */ -+ { -+ .id = V4L2_CID_FLASH_ADP1653_FAULT_SCP, -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .name = "Short-circuit fault", -+ .minimum = 0, -+ .maximum = 1, -+ .step = 1, -+ .default_value = 0, -+ .flags = V4L2_CTRL_FLAG_READ_ONLY, -+ }, -+ { -+ .id = V4L2_CID_FLASH_ADP1653_FAULT_OT, -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .name = "Overtemperature fault", -+ .minimum = 0, -+ .maximum = 1, -+ .step = 1, -+ .default_value = 0, -+ .flags = V4L2_CTRL_FLAG_READ_ONLY, -+ }, -+ { -+ .id = V4L2_CID_FLASH_ADP1653_FAULT_TMR, -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .name = "Timeout fault", -+ .minimum = 0, -+ .maximum = 1, -+ .step = 1, -+ .default_value = 0, -+ .flags = V4L2_CTRL_FLAG_READ_ONLY, -+ }, -+ { -+ .id = V4L2_CID_FLASH_ADP1653_FAULT_OV, -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .name = "Overvoltage fault", -+ .minimum = 0, -+ .maximum = 1, -+ .step = 1, -+ .default_value = 0, -+ .flags = V4L2_CTRL_FLAG_READ_ONLY, -+ } -+}; -+ -+static int adp1653_ioctl_queryctrl(struct v4l2_int_device *s, -+ struct v4l2_queryctrl *a) -+{ -+ return smia_ctrl_query(adp1653_ctrls, ARRAY_SIZE(adp1653_ctrls), a); -+} -+ -+static int adp1653_ioctl_g_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct adp1653_flash *flash = s->priv; -+ -+ switch (vc->id) { -+ case V4L2_CID_FLASH_TIMEOUT: -+ vc->value = flash->flash_timeout; -+ break; -+ case V4L2_CID_FLASH_INTENSITY: -+ vc->value = flash->flash_intensity; -+ break; -+ case V4L2_CID_TORCH_INTENSITY: -+ vc->value = flash->torch_intensity; -+ break; -+ case V4L2_CID_INDICATOR_INTENSITY: -+ vc->value = flash->indicator_intensity; -+ break; -+ -+ case V4L2_CID_FLASH_ADP1653_FAULT_SCP: -+ vc->value = (adp1653_get_fault(s) -+ & ADP1653_REG_FAULT_FLT_SCP) != 0; -+ break; -+ case V4L2_CID_FLASH_ADP1653_FAULT_OT: -+ vc->value = (adp1653_get_fault(s) -+ & ADP1653_REG_FAULT_FLT_OT) != 0; -+ break; -+ case V4L2_CID_FLASH_ADP1653_FAULT_TMR: -+ vc->value = (adp1653_get_fault(s) -+ & ADP1653_REG_FAULT_FLT_TMR) != 0; -+ break; -+ case V4L2_CID_FLASH_ADP1653_FAULT_OV: -+ vc->value = (adp1653_get_fault(s) -+ & ADP1653_REG_FAULT_FLT_OV) != 0; -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static int adp1653_ioctl_s_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct adp1653_flash *flash = s->priv; -+ int ctrl; -+ int *value; -+ -+ switch (vc->id) { -+ case V4L2_CID_FLASH_STROBE: -+ return adp1653_strobe(s); -+ -+ case V4L2_CID_FLASH_TIMEOUT: -+ ctrl = CTRL_CAMERA_FLASH_TIMEOUT; -+ value = &flash->flash_timeout; -+ break; -+ case V4L2_CID_FLASH_INTENSITY: -+ ctrl = CTRL_CAMERA_FLASH_INTENSITY; -+ value = &flash->flash_intensity; -+ break; -+ case V4L2_CID_TORCH_INTENSITY: -+ ctrl = CTRL_CAMERA_FLASH_TORCH_INTENSITY; -+ value = &flash->torch_intensity; -+ break; -+ case V4L2_CID_INDICATOR_INTENSITY: -+ ctrl = CTRL_CAMERA_FLASH_INDICATOR_INTENSITY; -+ value = &flash->indicator_intensity; -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ -+ if (vc->value < adp1653_ctrls[ctrl].minimum) -+ vc->value = adp1653_ctrls[ctrl].minimum; -+ if (vc->value > adp1653_ctrls[ctrl].maximum) -+ vc->value = adp1653_ctrls[ctrl].maximum; -+ vc->value = (vc->value - adp1653_ctrls[ctrl].minimum -+ + (adp1653_ctrls[ctrl].step >> 1)) -+ / adp1653_ctrls[ctrl].step; -+ vc->value = vc->value * adp1653_ctrls[ctrl].step -+ + adp1653_ctrls[ctrl].minimum; -+ *value = vc->value; -+ -+ return adp1653_update_hw(s); -+} -+ -+static int adp1653_init_device(struct v4l2_int_device *s) -+{ -+ struct adp1653_flash *flash = s->priv; -+ int rval; -+ -+ /* Clear FAULT register by writing zero to OUT_SEL */ -+ rval = i2c_smbus_write_byte_data(flash->i2c_client, -+ ADP1653_REG_OUT_SEL, 0); -+ if (rval < 0) { -+ dev_err(&flash->i2c_client->dev, -+ "failed writing fault register\n"); -+ return -ENODEV; -+ } -+ -+ /* Read FAULT register */ -+ rval = i2c_smbus_read_byte_data(flash->i2c_client, ADP1653_REG_FAULT); -+ if (rval < 0) { -+ dev_err(&flash->i2c_client->dev, -+ "failed reading fault register\n"); -+ return -ENODEV; -+ } -+ -+ if ((rval & 0x0f) != 0) { -+ dev_err(&flash->i2c_client->dev, "device fault\n"); -+ return -ENODEV; -+ } -+ -+ rval = adp1653_update_hw(s); -+ if (rval) { -+ dev_err(&flash->i2c_client->dev, -+ "adp1653_update_hw failed at adp1653_init_device\n"); -+ return -ENODEV; -+ } -+ -+ return 0; -+} -+ -+static int adp1653_ioctl_dev_init(struct v4l2_int_device *s) -+{ -+ struct adp1653_flash *flash = s->priv; -+ -+ adp1653_ctrls[CTRL_CAMERA_FLASH_TIMEOUT].default_value = -+ adp1653_ctrls[CTRL_CAMERA_FLASH_TIMEOUT].maximum = -+ flash->platform_data->max_flash_timeout; -+ adp1653_ctrls[CTRL_CAMERA_FLASH_INTENSITY].maximum = -+ flash->platform_data->max_flash_intensity; -+ adp1653_ctrls[CTRL_CAMERA_FLASH_TORCH_INTENSITY].maximum = -+ flash->platform_data->max_torch_intensity; -+ adp1653_ctrls[CTRL_CAMERA_FLASH_INDICATOR_INTENSITY].maximum = -+ flash->platform_data->max_indicator_intensity; -+ -+ flash->flash_timeout = adp1653_ctrls -+ [CTRL_CAMERA_FLASH_TIMEOUT].default_value; -+ flash->flash_intensity = adp1653_ctrls -+ [CTRL_CAMERA_FLASH_INTENSITY].default_value; -+ flash->torch_intensity = adp1653_ctrls -+ [CTRL_CAMERA_FLASH_TORCH_INTENSITY].default_value; -+ flash->indicator_intensity = adp1653_ctrls -+ [CTRL_CAMERA_FLASH_INDICATOR_INTENSITY].default_value; -+ return 0; -+} -+ -+static int adp1653_ioctl_s_power(struct v4l2_int_device *s, -+ enum v4l2_power state) -+{ -+ struct adp1653_flash *flash = s->priv; -+ int rval = 0; -+ -+ if (state == V4L2_POWER_STANDBY) -+ state = V4L2_POWER_ON; -+ if (state == flash->power) -+ return 0; -+ -+ switch (state) { -+ case V4L2_POWER_STANDBY: -+ case V4L2_POWER_ON: -+ rval = flash->platform_data->power_on(s); -+ if (rval) -+ return rval; -+ flash->power = V4L2_POWER_ON; -+ -+ rval = adp1653_init_device(s); -+ if (rval) -+ goto fail; -+ -+ break; -+ -+ case V4L2_POWER_OFF: -+ rval = flash->platform_data->power_off(s); -+ flash->power = V4L2_POWER_OFF; -+ break; -+ } -+ return 0; -+ -+fail: -+ flash->platform_data->power_off(s); -+ flash->power = V4L2_POWER_OFF; -+ return rval; -+} -+ -+static int adp1653_ioctl_g_priv(struct v4l2_int_device *s, void *priv) -+{ -+ struct adp1653_flash *flash = s->priv; -+ -+ return flash->platform_data->g_priv(s, priv); -+} -+ -+static struct v4l2_int_ioctl_desc adp1653_ioctl_desc[] = { -+ { vidioc_int_queryctrl_num, -+ (v4l2_int_ioctl_func *)adp1653_ioctl_queryctrl }, -+ { vidioc_int_g_ctrl_num, -+ (v4l2_int_ioctl_func *)adp1653_ioctl_g_ctrl }, -+ { vidioc_int_s_ctrl_num, -+ (v4l2_int_ioctl_func *)adp1653_ioctl_s_ctrl }, -+ { vidioc_int_s_power_num, -+ (v4l2_int_ioctl_func *)adp1653_ioctl_s_power }, -+ { vidioc_int_g_priv_num, -+ (v4l2_int_ioctl_func *)adp1653_ioctl_g_priv }, -+ { vidioc_int_dev_init_num, -+ (v4l2_int_ioctl_func *)adp1653_ioctl_dev_init }, -+}; -+ -+static struct v4l2_int_slave adp1653_slave = { -+ .ioctls = adp1653_ioctl_desc, -+ .num_ioctls = ARRAY_SIZE(adp1653_ioctl_desc), -+}; -+ -+static struct adp1653_flash adp1653; -+ -+static struct v4l2_int_device adp1653_int_device = { -+ .module = THIS_MODULE, -+ .name = ADP1653_NAME, -+ .priv = &adp1653, -+ .type = v4l2_int_type_slave, -+ .u = { -+ .slave = &adp1653_slave, -+ }, -+}; -+ -+#ifdef CONFIG_PM -+ -+static int adp1653_suspend(struct i2c_client *client, pm_message_t mesg) -+{ -+ struct adp1653_flash *flash = i2c_get_clientdata(client); -+ -+ if (flash->power == V4L2_POWER_OFF) -+ return 0; -+ -+ return flash->platform_data->power_off(flash->v4l2_int_device); -+} -+ -+static int adp1653_resume(struct i2c_client *client) -+{ -+ struct adp1653_flash *flash = i2c_get_clientdata(client); -+ enum v4l2_power resume_power; -+ -+ if (flash->power == V4L2_POWER_OFF) -+ return 0; -+ -+ resume_power = flash->power; -+ flash->power = V4L2_POWER_OFF; -+ -+ return adp1653_ioctl_s_power(flash->v4l2_int_device, resume_power); -+} -+ -+#else -+ -+#define adp1653_suspend NULL -+#define adp1653_resume NULL -+ -+#endif /* CONFIG_PM */ -+ -+static int adp1653_probe(struct i2c_client *client, -+ const struct i2c_device_id *devid) -+{ -+ struct adp1653_flash *flash = &adp1653; -+ int rval; -+ -+ if (i2c_get_clientdata(client)) -+ return -EBUSY; -+ -+ flash->platform_data = client->dev.platform_data; -+ -+ if (flash->platform_data == NULL) -+ return -ENODEV; -+ -+ flash->v4l2_int_device = &adp1653_int_device; -+ -+ flash->i2c_client = client; -+ i2c_set_clientdata(client, flash); -+ -+ rval = v4l2_int_device_register(flash->v4l2_int_device); -+ if (rval) -+ i2c_set_clientdata(client, NULL); -+ -+ return rval; -+} -+ -+static int __exit adp1653_remove(struct i2c_client *client) -+{ -+ struct adp1653_flash *flash = i2c_get_clientdata(client); -+ -+ if (!client->adapter) -+ return -ENODEV; /* our client isn't attached */ -+ -+ v4l2_int_device_unregister(flash->v4l2_int_device); -+ i2c_set_clientdata(client, NULL); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id adp1653_id_table[] = { -+ { ADP1653_NAME, 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, adp1653_id_table); -+ -+static struct i2c_driver adp1653_i2c_driver = { -+ .driver = { -+ .name = ADP1653_NAME, -+ }, -+ .probe = adp1653_probe, -+ .remove = __exit_p(adp1653_remove), -+ .suspend = adp1653_suspend, -+ .resume = adp1653_resume, -+ .id_table = adp1653_id_table, -+}; -+ -+static int __init adp1653_init(void) -+{ -+ int rval; -+ -+ rval = i2c_add_driver(&adp1653_i2c_driver); -+ if (rval) -+ printk(KERN_ALERT "%s: failed at i2c_add_driver\n", __func__); -+ -+ return rval; -+} -+ -+static void __exit adp1653_exit(void) -+{ -+ i2c_del_driver(&adp1653_i2c_driver); -+} -+ -+/* -+ * FIXME: Menelaus isn't ready (?) at module_init stage, so use -+ * late_initcall for now. -+ */ -+late_initcall(adp1653_init); -+module_exit(adp1653_exit); -+ -+MODULE_AUTHOR("Sakari Ailus "); -+MODULE_DESCRIPTION("Analog Devices ADP1653 LED flash driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/ad5820.c linux-omap-2.6.28-nokia1/drivers/media/video/ad5820.c ---- linux-omap-2.6.28-omap1/drivers/media/video/ad5820.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/ad5820.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,495 @@ -+/* -+ * drivers/media/video/ad5820.c -+ * -+ * AD5820 DAC driver for camera voice coil focus. -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Copyright (C) 2007 Texas Instruments -+ * -+ * Contact: Tuukka Toivonen -+ * Sakari Ailus -+ * -+ * Based on af_d88.c by Texas Instruments. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+ -+#include -+ -+#define BIT_POWER_DOWN (1<<15) -+ -+#define CODE_TO_RAMP_US(s) ((s) == 0 ? 0 : (1 << ((s) - 1)) * 50) -+#define RAMP_US_TO_CODE(c) fls(((c) + ((c)>>1)) / 50) -+ -+static struct ad5820_device ad5820; -+ -+#define CTRL_FOCUS_ABSOLUTE 0 -+#define CTRL_FOCUS_RAMP_TIME 1 -+#define CTRL_FOCUS_RAMP_MODE 2 -+ -+static struct v4l2_queryctrl ad5820_ctrls[] = { -+ /* Minimum current is 0 mA, maximum is 100 mA. Thus, -+ * 1 code is equivalent to 100/1023 = 0.0978 mA. -+ * Nevertheless, we do not use [mA] for focus position, -+ * because it is meaningless for user. Meaningful would -+ * be to use focus distance or even its inverse, but -+ * since the driver doesn't have sufficiently knowledge -+ * to do the conversion, we will just use abstract codes here. -+ * In any case, smaller value = focus position farther from camera. -+ * The default zero value means focus at infinity, -+ * and also least current consumption. -+ */ -+ { -+ .id = V4L2_CID_FOCUS_ABSOLUTE, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Focus, Absolute", -+ .minimum = 0, -+ .maximum = 1023, -+ .step = 1, -+ .default_value = 0, -+ .flags = 0, -+ }, -+ { -+ .id = V4L2_CID_FOCUS_AD5820_RAMP_TIME, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Focus ramping time [us]", -+ .minimum = 0, -+ .maximum = 3200, -+ .step = 50, -+ .default_value = 0, -+ .flags = 0, -+ }, -+ { -+ .id = V4L2_CID_FOCUS_AD5820_RAMP_MODE, -+ .type = V4L2_CTRL_TYPE_MENU, -+ .name = "Focus ramping mode", -+ .minimum = 0, -+ .maximum = 1, -+ .step = 1, -+ .default_value = 0, -+ .flags = 0, -+ }, -+}; -+ -+/** -+ * @brief I2C write using i2c_transfer(). -+ * @param lens - the driver data structure -+ * @param data - register value to be written -+ * @returns nonnegative on success, negative if failed -+ */ -+static int ad5820_write(struct v4l2_int_device *s, u16 data) -+{ -+ struct ad5820_device *coil = s->priv; -+ struct i2c_client *client = coil->i2c_client; -+ struct i2c_msg msg; -+ int r; -+ -+ if (!client->adapter) -+ return -ENODEV; -+ -+ data = cpu_to_be16(data); -+ msg.addr = client->addr; -+ msg.flags = 0; -+ msg.len = 2; -+ msg.buf = (u8 *)&data; -+ -+ r = i2c_transfer(client->adapter, &msg, 1); -+ if (r >= 0) -+ return 0; -+ -+ dev_err(&coil->i2c_client->dev, "write failed, error %d\n", r); -+ -+ return r; -+} -+ -+/** -+ * @brief I2C read using i2c_transfer(). -+ * @param lens - the driver data structure -+ * @returns unsigned 16-bit register value on success, negative if failed -+ */ -+static int ad5820_read(struct v4l2_int_device *s) -+{ -+ struct ad5820_device *coil = s->priv; -+ struct i2c_client *client = coil->i2c_client; -+ struct i2c_msg msg; -+ int r; -+ u16 data = 0; -+ -+ if (!client->adapter) -+ return -ENODEV; -+ -+ msg.addr = client->addr; -+ msg.flags = I2C_M_RD; -+ msg.len = 2; -+ msg.buf = (u8 *)&data; -+ -+ r = i2c_transfer(client->adapter, &msg, 1); -+ if (r >= 0) -+ return be16_to_cpu(data); -+ -+ dev_err(&coil->i2c_client->dev, "read failed, error %d\n", r); -+ -+ return r; -+} -+ -+/* Calculate status word and write it to the device based on current -+ * values of V4L2 controls. It is assumed that the stored V4L2 control -+ * values are properly limited and rounded. */ -+static int ad5820_update_hw(struct v4l2_int_device *s) -+{ -+ struct ad5820_device *coil = s->priv; -+ u16 status; -+ -+ if (coil->power == V4L2_POWER_OFF) -+ return 0; -+ -+ status = RAMP_US_TO_CODE(coil->focus_ramp_time); -+ status |= coil->focus_ramp_mode << 3; -+ status |= coil->focus_absolute << 4; -+ -+ if (coil->power == V4L2_POWER_STANDBY) -+ status |= BIT_POWER_DOWN; -+ -+ return ad5820_write(s, status); -+} -+ -+static int ad5820_ioctl_queryctrl(struct v4l2_int_device *s, -+ struct v4l2_queryctrl *qc) -+{ -+ return smia_ctrl_query(ad5820_ctrls, ARRAY_SIZE(ad5820_ctrls), qc); -+} -+ -+static int ad5820_ioctl_querymenu(struct v4l2_int_device *s, -+ struct v4l2_querymenu *qm) -+{ -+ switch (qm->id) { -+ case V4L2_CID_FOCUS_AD5820_RAMP_MODE: -+ if (qm->index & ~1) -+ return -EINVAL; -+ strcpy(qm->name, qm->index == 0 ? "Linear ramp" : "64/16 ramp"); -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static int ad5820_ioctl_g_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct ad5820_device *coil = s->priv; -+ -+ switch (vc->id) { -+ case V4L2_CID_FOCUS_ABSOLUTE: -+ vc->value = coil->focus_absolute; -+ break; -+ case V4L2_CID_FOCUS_AD5820_RAMP_TIME: -+ vc->value = coil->focus_ramp_time; -+ break; -+ case V4L2_CID_FOCUS_AD5820_RAMP_MODE: -+ vc->value = coil->focus_ramp_mode; -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static int ad5820_ioctl_s_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct ad5820_device *coil = s->priv; -+ u32 code; -+ int r = 0; -+ -+ switch (vc->id) { -+ case V4L2_CID_FOCUS_ABSOLUTE: -+ coil->focus_absolute = clamp(vc->value, -+ ad5820_ctrls[CTRL_FOCUS_ABSOLUTE].minimum, -+ ad5820_ctrls[CTRL_FOCUS_ABSOLUTE].maximum); -+ r = ad5820_update_hw(s); -+ break; -+ -+ case V4L2_CID_FOCUS_AD5820_RAMP_TIME: -+ code = clamp(vc->value, -+ ad5820_ctrls[CTRL_FOCUS_RAMP_TIME].minimum, -+ ad5820_ctrls[CTRL_FOCUS_RAMP_TIME].maximum); -+ code = RAMP_US_TO_CODE(code); -+ coil->focus_ramp_time = CODE_TO_RAMP_US(code); -+ break; -+ -+ case V4L2_CID_FOCUS_AD5820_RAMP_MODE: -+ coil->focus_ramp_mode = clamp(vc->value, -+ ad5820_ctrls[CTRL_FOCUS_RAMP_MODE].minimum, -+ ad5820_ctrls[CTRL_FOCUS_RAMP_MODE].maximum); -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ -+ return r; -+} -+ -+static int ad5820_ioctl_dev_init(struct v4l2_int_device *s) -+{ -+ /* Detect that the chip is there */ -+ -+ struct ad5820_device *coil = s->priv; -+ static const int CHECK_VALUE = 0x3FF0; -+ u16 status = BIT_POWER_DOWN | CHECK_VALUE; -+ int rval; -+ -+ rval = coil->platform_data->s_power(s, V4L2_POWER_ON); -+ if (rval) -+ goto not_detected; -+ rval = ad5820_write(s, status); -+ if (rval) -+ goto not_detected; -+ rval = ad5820_read(s); -+ if (rval != status) -+ goto not_detected; -+ -+ coil->platform_data->s_power(s, V4L2_POWER_OFF); -+ return 0; -+ -+not_detected: -+ dev_err(&coil->i2c_client->dev, "not detected\n"); -+ return -ENODEV; -+} -+ -+static int ad5820_ioctl_s_power(struct v4l2_int_device *s, -+ enum v4l2_power new_state) -+{ -+ struct ad5820_device *coil = s->priv; -+ enum v4l2_power orig_state = coil->power; -+ int rval; -+ -+ if (new_state == V4L2_POWER_STANDBY) -+ new_state = V4L2_POWER_ON; -+ -+ if (orig_state == new_state) -+ return 0; -+ if (orig_state == V4L2_POWER_OFF) { -+ /* Requested STANDBY or ON -- enable power */ -+ rval = coil->platform_data->s_power(s, V4L2_POWER_ON); -+ if (rval) -+ return rval; -+ } -+ coil->power = new_state; -+ if (new_state == V4L2_POWER_OFF) { -+ /* Requested OFF -- before disabling power, set to standby */ -+ coil->power = V4L2_POWER_STANDBY; -+ } -+ /* -+ * Here power is on. If OFF is requested, the chip -+ * is first set into STANDBY mode. This is necessary -+ * because sensor driver might keep power enabled even -+ * if lens driver requests it off. -+ */ -+ rval = ad5820_update_hw(s); -+ if (rval) -+ goto fail; -+ coil->power = new_state; -+ if (new_state == V4L2_POWER_OFF) { -+ /* Requested OFF -- disable power */ -+ rval = coil->platform_data->s_power(s, V4L2_POWER_OFF); -+ if (rval) -+ goto fail; -+ } -+ -+ return 0; -+ -+fail: -+ /* Try to restore original state and return error code */ -+ coil->platform_data->s_power(s, orig_state == V4L2_POWER_OFF ? -+ V4L2_POWER_OFF : V4L2_POWER_ON); -+ coil->power = orig_state; -+ ad5820_update_hw(s); -+ return rval; -+} -+ -+static int ad5820_ioctl_g_priv(struct v4l2_int_device *s, void *priv) -+{ -+ struct ad5820_device *coil = s->priv; -+ -+ return coil->platform_data->g_priv(s, priv); -+} -+ -+static struct v4l2_int_ioctl_desc ad5820_ioctl_desc[] = { -+ { vidioc_int_queryctrl_num, -+ (v4l2_int_ioctl_func *)ad5820_ioctl_queryctrl }, -+ { vidioc_int_querymenu_num, -+ (v4l2_int_ioctl_func *)ad5820_ioctl_querymenu }, -+ { vidioc_int_g_ctrl_num, -+ (v4l2_int_ioctl_func *)ad5820_ioctl_g_ctrl }, -+ { vidioc_int_s_ctrl_num, -+ (v4l2_int_ioctl_func *)ad5820_ioctl_s_ctrl }, -+ { vidioc_int_s_power_num, -+ (v4l2_int_ioctl_func *)ad5820_ioctl_s_power }, -+ { vidioc_int_g_priv_num, -+ (v4l2_int_ioctl_func *)ad5820_ioctl_g_priv }, -+ { vidioc_int_dev_init_num, -+ (v4l2_int_ioctl_func *)ad5820_ioctl_dev_init }, -+}; -+ -+static struct v4l2_int_slave ad5820_slave = { -+ .ioctls = ad5820_ioctl_desc, -+ .num_ioctls = ARRAY_SIZE(ad5820_ioctl_desc), -+}; -+ -+static struct v4l2_int_device ad5820_int_device = { -+ .module = THIS_MODULE, -+ .name = AD5820_NAME, -+ .priv = &ad5820, -+ .type = v4l2_int_type_slave, -+ .u = { -+ .slave = &ad5820_slave, -+ }, -+}; -+ -+#ifdef CONFIG_PM -+ -+static int ad5820_suspend(struct i2c_client *client, pm_message_t mesg) -+{ -+ struct ad5820_device *coil = i2c_get_clientdata(client); -+ -+ if (coil->power == V4L2_POWER_OFF) -+ return 0; -+ -+ return coil->platform_data->s_power(coil->v4l2_int_device, V4L2_POWER_OFF); -+} -+ -+static int ad5820_resume(struct i2c_client *client) -+{ -+ struct ad5820_device *coil = i2c_get_clientdata(client); -+ enum v4l2_power resume_power; -+ -+ if (coil->power == V4L2_POWER_OFF) -+ return 0; -+ -+ resume_power = coil->power; -+ coil->power = V4L2_POWER_OFF; -+ -+ return ad5820_ioctl_s_power(coil->v4l2_int_device, resume_power); -+} -+ -+#else -+ -+#define ad5820_suspend NULL -+#define ad5820_resume NULL -+ -+#endif /* CONFIG_PM */ -+ -+static int ad5820_probe(struct i2c_client *client, -+ const struct i2c_device_id *devid) -+{ -+ struct ad5820_device *coil = &ad5820; -+ int rval; -+ -+ if (i2c_get_clientdata(client)) -+ return -EBUSY; -+ -+ coil->platform_data = client->dev.platform_data; -+ -+ if (coil->platform_data == NULL) -+ return -ENODEV; -+ -+ coil->focus_absolute = -+ ad5820_ctrls[CTRL_FOCUS_ABSOLUTE].default_value; -+ coil->focus_ramp_time = -+ ad5820_ctrls[CTRL_FOCUS_RAMP_TIME].default_value; -+ coil->focus_ramp_mode = -+ ad5820_ctrls[CTRL_FOCUS_RAMP_MODE].default_value; -+ -+ coil->v4l2_int_device = &ad5820_int_device; -+ -+ coil->i2c_client = client; -+ i2c_set_clientdata(client, coil); -+ -+ rval = v4l2_int_device_register(coil->v4l2_int_device); -+ if (rval) -+ i2c_set_clientdata(client, NULL); -+ -+ return rval; -+} -+ -+static int __exit ad5820_remove(struct i2c_client *client) -+{ -+ struct ad5820_device *coil = i2c_get_clientdata(client); -+ -+ if (!client->adapter) -+ return -ENODEV; /* our client isn't attached */ -+ -+ v4l2_int_device_unregister(coil->v4l2_int_device); -+ i2c_set_clientdata(client, NULL); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id ad5820_id_table[] = { -+ { AD5820_NAME, 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, ad5820_id_table); -+ -+static struct i2c_driver ad5820_i2c_driver = { -+ .driver = { -+ .name = AD5820_NAME, -+ }, -+ .probe = ad5820_probe, -+ .remove = __exit_p(ad5820_remove), -+ .suspend = ad5820_suspend, -+ .resume = ad5820_resume, -+ .id_table = ad5820_id_table, -+}; -+ -+static int __init ad5820_init(void) -+{ -+ int rval; -+ -+ rval = i2c_add_driver(&ad5820_i2c_driver); -+ if (rval) -+ printk(KERN_INFO "%s: failed registering " AD5820_NAME "\n", -+ __func__); -+ -+ return rval; -+} -+ -+static void __exit ad5820_exit(void) -+{ -+ i2c_del_driver(&ad5820_i2c_driver); -+} -+ -+ -+late_initcall(ad5820_init); -+module_exit(ad5820_exit); -+ -+MODULE_AUTHOR("Tuukka Toivonen "); -+MODULE_DESCRIPTION("AD5820 camera lens driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/et8ek8.c linux-omap-2.6.28-nokia1/drivers/media/video/et8ek8.c ---- linux-omap-2.6.28-omap1/drivers/media/video/et8ek8.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/et8ek8.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,1065 @@ -+/* -+ * drivers/media/video/et8ek8.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * Based on code from Toni Leinonen . -+ * -+ * This driver is based on the Micron MT9T012 camera imager driver -+ * (C) Texas Instruments. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "et8ek8.h" -+ -+#define ET8EK8_XCLK_HZ 9600000 -+ -+#define CTRL_GAIN 0 -+#define CTRL_EXPOSURE 1 -+#define CTRL_TEST_PATTERN 2 -+ -+#define CID_TO_CTRL(id) ((id)==V4L2_CID_GAIN ? CTRL_GAIN : \ -+ (id)==V4L2_CID_EXPOSURE ? CTRL_EXPOSURE : \ -+ (id)==V4L2_CID_TEST_PATTERN ? CTRL_TEST_PATTERN : \ -+ -EINVAL) -+ -+enum et8ek8_versions { -+ ET8EK8_REV_1 = 0x0001, -+ ET8EK8_REV_2, -+}; -+ -+/* -+ * This table describes what should be written to the sensor register -+ * for each gain value. The gain(index in the table) is in terms of -+ * 0.1EV, i.e. 10 indexes in the table give 2 time more gain [0] in -+ * the *analog gain, [1] in the digital gain -+ * -+ * Analog gain [dB] = 20*log10(regvalue/32); 0x20..0x100 -+ */ -+static struct et8ek8_gain { -+ u16 analog; -+ u16 digital; -+} const et8ek8_gain_table[] = { -+ { 32, 0}, /* x1 */ -+ { 34, 0}, -+ { 37, 0}, -+ { 39, 0}, -+ { 42, 0}, -+ { 45, 0}, -+ { 49, 0}, -+ { 52, 0}, -+ { 56, 0}, -+ { 60, 0}, -+ { 64, 0}, /* x2 */ -+ { 69, 0}, -+ { 74, 0}, -+ { 79, 0}, -+ { 84, 0}, -+ { 91, 0}, -+ { 97, 0}, -+ {104, 0}, -+ {111, 0}, -+ {119, 0}, -+ {128, 0}, /* x4 */ -+ {137, 0}, -+ {147, 0}, -+ {158, 0}, -+ {169, 0}, -+ {181, 0}, -+ {194, 0}, -+ {208, 0}, -+ {223, 0}, -+ {239, 0}, -+ {256, 0}, /* x8 */ -+ {256, 73}, -+ {256, 152}, -+ {256, 236}, -+ {256, 327}, -+ {256, 424}, -+ {256, 528}, -+ {256, 639}, -+ {256, 758}, -+ {256, 886}, -+ {256, 1023}, /* x16 */ -+}; -+ -+/* Register definitions */ -+#define REG_REVISION_NUMBER_L 0x1200 -+#define REG_REVISION_NUMBER_H 0x1201 -+ -+#define PRIV_MEM_START_REG 0x0008 -+#define PRIV_MEM_WIN_SIZE 8 -+ -+#define ET8EK8_I2C_DELAY 3 /* msec delay b/w accesses */ -+ -+#define USE_CRC 1 -+ -+/* Called to change the V4L2 gain control value. This function -+ * rounds and clamps the given value and updates the V4L2 control value. -+ * If power is on, also updates the sensor analog and digital gains. -+ * gain is in 0.1 EV (exposure value) units. -+ */ -+static int et8ek8_set_gain(struct et8ek8_sensor *sensor, s32 gain) -+{ -+ struct et8ek8_gain new; -+ int r; -+ -+ sensor->controls[CTRL_GAIN].value = clamp(gain, -+ sensor->controls[CTRL_GAIN].minimum, -+ sensor->controls[CTRL_GAIN].maximum); -+ -+ if (sensor->power == V4L2_POWER_OFF) -+ return 0; -+ -+ new = et8ek8_gain_table[sensor->controls[CTRL_GAIN].value]; -+ -+ /* FIXME: optimise I2C writes! */ -+ r = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x124a, new.analog >> 8); -+ if (r) -+ return r; -+ r = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x1249, new.analog & 0xff); -+ if (r) -+ return r; -+ -+ r = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x124d, new.digital >> 8); -+ if (r) -+ return r; -+ r = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x124c, new.digital & 0xff); -+ -+ return r; -+} -+ -+/* Called to change the V4L2 exposure control value. This function -+ * rounds and clamps the given value and updates the V4L2 control value. -+ * If power is on, also update the sensor exposure time. -+ * exptime is in microseconds. -+ */ -+static int et8ek8_set_exposure(struct et8ek8_sensor *sensor, s32 exptime) -+{ -+ unsigned int clock; /* Pixel clock in Hz>>10 fixed point */ -+ unsigned int rt; /* Row time in .8 fixed point */ -+ unsigned int rows; /* Exposure value as written to HW (ie. rows) */ -+ -+ exptime = clamp(exptime, sensor->controls[CTRL_EXPOSURE].minimum, -+ sensor->controls[CTRL_EXPOSURE].maximum); -+ -+ /* Assume that the maximum exposure time is at most ~8 s, -+ * and the maximum width (with blanking) ~8000 pixels. -+ * The formula here is in principle as simple as -+ * rows = exptime / 1e6 / width * pixel_clock -+ * but to get accurate results while coping with value ranges, -+ * have to do some fixed point math. -+ */ -+ clock = sensor->current_reglist->mode.pixel_clock; -+ clock = (clock + (1 << 9)) >> 10; -+ rt = sensor->current_reglist->mode.width * (1000000 >> 2); -+ rt = (rt + (clock >> 1)) / clock; -+ rows = ((exptime << 8) + (rt >> 1)) / rt; -+ -+ /* Set the V4L2 control for exposure time to the rounded value */ -+ sensor->controls[CTRL_EXPOSURE].value = (rt * rows + (1 << 7)) >> 8; -+ -+ if (sensor->power == V4L2_POWER_OFF) -+ return 0; -+ -+ return smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_16BIT, 0x1243, -+ swab16(rows)); -+} -+ -+static int et8ek8_set_test_pattern(struct et8ek8_sensor *sensor, s32 mode) -+{ -+ int cbh_mode, cbv_mode, tp_mode, din_sw, r1420, rval; -+ -+ if (mode < 0 || mode > 8) -+ return -EINVAL; -+ -+ sensor->controls[CTRL_TEST_PATTERN].value = mode; -+ -+ if (sensor->power == V4L2_POWER_OFF) -+ return 0; -+ -+ /* Values for normal mode */ -+ cbh_mode = 0; -+ cbv_mode = 0; -+ tp_mode = 0; -+ din_sw = 0x00; -+ r1420 = 0xF0; -+ -+ if (mode != 0) { -+ /* Test pattern mode */ -+ if (mode < 5) { -+ cbh_mode = 1; -+ cbv_mode = 1; -+ tp_mode = mode + 3; -+ } else { -+ cbh_mode = 0; -+ cbv_mode = 0; -+ tp_mode = mode - 4 + 3; -+ } -+ din_sw = 0x01; -+ r1420 = 0xE0; -+ } -+ -+ rval = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x111B, tp_mode << 4); -+ if (rval) -+ goto out; -+ -+ rval = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x1121, cbh_mode << 7); -+ if (rval) -+ goto out; -+ -+ rval = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x1124, cbv_mode << 7); -+ if (rval) -+ goto out; -+ -+ rval = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x112C, din_sw); -+ if (rval) -+ goto out; -+ -+ rval = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ 0x1420, r1420); -+ if (rval) -+ goto out; -+ -+out: -+ return rval; -+ -+} -+ -+static int et8ek8_update_controls(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ int i; -+ unsigned int rt; /* Row time in us */ -+ unsigned int clock; /* Pixel clock in Hz>>2 fixed point */ -+ -+ if (sensor->current_reglist->mode.pixel_clock <= 0 || -+ sensor->current_reglist->mode.width <= 0) { -+ dev_err(&sensor->i2c_client->dev, "bad firmware\n"); -+ return -EIO; -+ } -+ -+ clock = sensor->current_reglist->mode.pixel_clock; -+ clock = (clock + (1 << 1)) >> 2; -+ rt = sensor->current_reglist->mode.width * (1000000 >> 2); -+ rt = (rt + (clock >> 1)) / clock; -+ -+ sensor->controls[CTRL_EXPOSURE].minimum = rt; -+ sensor->controls[CTRL_EXPOSURE].maximum = -+ sensor->current_reglist->mode.max_exp * rt; -+ sensor->controls[CTRL_EXPOSURE].step = rt; -+ sensor->controls[CTRL_EXPOSURE].default_value = -+ sensor->controls[CTRL_EXPOSURE].maximum; -+ if (sensor->controls[CTRL_EXPOSURE].value == 0) -+ sensor->controls[CTRL_EXPOSURE].value = -+ sensor->controls[CTRL_EXPOSURE].maximum; -+ -+ /* Adjust V4L2 control values and write them to the sensor */ -+ -+ for (i=0; icontrols); i++) { -+ int rval = sensor->controls[i].set(sensor, -+ sensor->controls[i].value); -+ if (rval) -+ return rval; -+ } -+ return 0; -+} -+ -+static int et8ek8_configure(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ int rval; -+ -+ rval = et8ek8_update_controls(s); -+ if (rval) -+ goto fail; -+ -+ rval = smia_i2c_write_regs(sensor->i2c_client, -+ sensor->current_reglist->regs); -+ if (rval) -+ goto fail; -+ -+ rval = sensor->platform_data->configure_interface( -+ s, &sensor->current_reglist->mode); -+ if (rval) -+ goto fail; -+ -+ return 0; -+ -+fail: -+ dev_err(&sensor->i2c_client->dev, "sensor configuration failed\n"); -+ return rval; -+} -+ -+static int et8ek8_stream_on(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ return smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x1252, 0xB0); -+} -+ -+static int et8ek8_stream_off(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ return smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x1252, 0x30); -+} -+ -+static int et8ek8_power_off(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ int rval; -+ -+ rval = sensor->platform_data->power_off(s); -+ if (rval) -+ return rval; -+ udelay(1); -+ rval = sensor->platform_data->set_xclk(s, 0); -+ return rval; -+} -+ -+static int et8ek8_power_on(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ unsigned int hz = ET8EK8_XCLK_HZ; -+ int val, rval; -+ -+ if (sensor->current_reglist) -+ hz = sensor->current_reglist->mode.ext_clock; -+ -+ rval = sensor->platform_data->set_xclk(s, hz); -+ if (rval) -+ goto out; -+ -+ udelay(10); /* I wish this is a good value */ -+ -+ rval = sensor->platform_data->power_on(s); -+ if (rval) -+ goto out; -+ -+ msleep(5000*1000/hz+1); /* Wait 5000 cycles */ -+ -+ if (sensor->meta_reglist) { -+ rval = smia_i2c_reglist_find_write(sensor->i2c_client, -+ sensor->meta_reglist, -+ SMIA_REGLIST_POWERON); -+ if (rval) -+ goto out; -+ } -+ -+ rval = et8ek8_stream_off(s); -+ if (rval) -+ goto out; -+ -+#ifdef USE_CRC -+ rval = smia_i2c_read_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x1263, &val); -+ if (rval) -+ goto out; -+#if USE_CRC -+ val |= (1<<4); -+#else -+ val &= ~(1<<4); -+#endif -+ rval = smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x1263, val); -+ if (rval) -+ goto out; -+#endif -+ -+out: -+ if (rval) -+ et8ek8_power_off(s); -+ -+ return rval; -+} -+ -+static struct v4l2_queryctrl et8ek8_ctrls[] = { -+ { -+ .id = V4L2_CID_GAIN, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Gain [0.1 EV]", -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+ { -+ .id = V4L2_CID_EXPOSURE, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Exposure time [us]", -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+ { -+ .id = V4L2_CID_TEST_PATTERN, -+ .type = V4L2_CTRL_TYPE_MENU, -+ .name = "Test pattern mode", -+ .flags = 0, -+ .minimum = 0, -+ .maximum = 8, -+ .step = 1, -+ .default_value = 0, -+ }, -+}; -+ -+static const __u32 et8ek8_mode_ctrls[] = { -+ V4L2_CID_MODE_FRAME_WIDTH, -+ V4L2_CID_MODE_FRAME_HEIGHT, -+ V4L2_CID_MODE_VISIBLE_WIDTH, -+ V4L2_CID_MODE_VISIBLE_HEIGHT, -+ V4L2_CID_MODE_PIXELCLOCK, -+ V4L2_CID_MODE_SENSITIVITY, -+}; -+ -+static int et8ek8_ioctl_queryctrl(struct v4l2_int_device *s, -+ struct v4l2_queryctrl *a) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ int rval, ctrl; -+ -+ rval = smia_ctrl_query(et8ek8_ctrls, ARRAY_SIZE(et8ek8_ctrls), a); -+ if (rval) { -+ return smia_mode_query(et8ek8_mode_ctrls, -+ ARRAY_SIZE(et8ek8_mode_ctrls), a); -+ } -+ -+ ctrl = CID_TO_CTRL(a->id); -+ if (ctrl < 0) -+ return ctrl; -+ -+ a->minimum = sensor->controls[ctrl].minimum; -+ a->maximum = sensor->controls[ctrl].maximum; -+ a->step = sensor->controls[ctrl].step; -+ a->default_value = sensor->controls[ctrl].default_value; -+ -+ return 0; -+} -+ -+static int et8ek8_ioctl_querymenu(struct v4l2_int_device *s, -+ struct v4l2_querymenu *qm) -+{ -+ static const char *menu_name[] = { -+ "Normal", -+ "Vertical colorbar", -+ "Horizontal colorbar", -+ "Scale", -+ "Ramp", -+ "Small vertical colorbar", -+ "Small horizontal colorbar", -+ "Small scale", -+ "Small ramp", -+ }; -+ -+ switch (qm->id) { -+ case V4L2_CID_TEST_PATTERN: -+ if (qm->index >= ARRAY_SIZE(menu_name)) -+ return -EINVAL; -+ strcpy(qm->name, menu_name[qm->index]); -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static int et8ek8_ioctl_g_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ int ctrl; -+ -+ int rval = smia_mode_g_ctrl(et8ek8_mode_ctrls, -+ ARRAY_SIZE(et8ek8_mode_ctrls), -+ vc, &sensor->current_reglist->mode); -+ if (rval == 0) -+ return 0; -+ -+ ctrl = CID_TO_CTRL(vc->id); -+ if (ctrl < 0) -+ return ctrl; -+ vc->value = sensor->controls[ctrl].value; -+ return 0; -+} -+ -+static int et8ek8_ioctl_s_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ int ctrl = CID_TO_CTRL(vc->id); -+ if (ctrl < 0) -+ return ctrl; -+ return sensor->controls[ctrl].set(sensor, vc->value); -+} -+ -+static int et8ek8_ioctl_enum_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_fmtdesc *fmt) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ -+ return smia_reglist_enum_fmt(sensor->meta_reglist, fmt); -+} -+ -+ -+static int et8ek8_ioctl_g_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_format *f) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ struct v4l2_pix_format *pix = &f->fmt.pix; -+ -+ pix->width = sensor->current_reglist->mode.window_width; -+ pix->height = sensor->current_reglist->mode.window_height; -+ pix->pixelformat = sensor->current_reglist->mode.pixel_format; -+ -+ return 0; -+} -+ -+static int et8ek8_ioctl_s_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_format *f) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ struct smia_reglist *reglist; -+ -+ reglist = smia_reglist_find_mode_fmt(sensor->meta_reglist, -+ sensor->current_reglist, f); -+ -+ if (!reglist) -+ return -EINVAL; -+ -+ if (sensor->power != V4L2_POWER_OFF && -+ sensor->current_reglist->mode.ext_clock != reglist->mode.ext_clock) -+ return -EINVAL; -+ -+ sensor->current_reglist = reglist; -+ -+ return et8ek8_update_controls(s); -+} -+ -+static int et8ek8_ioctl_g_parm(struct v4l2_int_device *s, -+ struct v4l2_streamparm *a) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ struct v4l2_captureparm *cparm = &a->parm.capture; -+ -+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ return -EINVAL; -+ -+ memset(a, 0, sizeof(*a)); -+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ -+ cparm->capability = V4L2_CAP_TIMEPERFRAME; -+ cparm->timeperframe = sensor->current_reglist->mode.timeperframe; -+ -+ return 0; -+} -+ -+static int et8ek8_ioctl_s_parm(struct v4l2_int_device *s, -+ struct v4l2_streamparm *a) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ struct smia_reglist *reglist; -+ -+ reglist = smia_reglist_find_mode_streamparm(sensor->meta_reglist, -+ sensor->current_reglist, a); -+ -+ if (!reglist) -+ return -EINVAL; -+ -+ if (sensor->power != V4L2_POWER_OFF && -+ sensor->current_reglist->mode.ext_clock != reglist->mode.ext_clock) -+ return -EINVAL; -+ -+ sensor->current_reglist = reglist; -+ -+ return et8ek8_update_controls(s); -+} -+ -+static int et8ek8_g_priv_mem(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ struct i2c_client *client = sensor->i2c_client; -+ unsigned int length = ET8EK8_PRIV_MEM_SIZE; -+ unsigned int offset = 0; -+ u8 *ptr = sensor->priv_mem; -+ int rval = 0; -+ -+ /* Read the EEPROM window-by-window, each window 8 bytes */ -+ do { -+ u8 buffer[PRIV_MEM_WIN_SIZE]; -+ struct i2c_msg msg; -+ int bytes, i; -+ int ofs; -+ -+ /* Set the current window */ -+ rval = smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, -+ 0x0001, -+ 0xe0 | (offset >> 3)); -+ if (rval < 0) -+ goto out; -+ -+ /* Wait for status bit */ -+ i = 1000; -+ do { -+ u32 status; -+ rval = smia_i2c_read_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, -+ 0x0003, -+ &status); -+ if (rval < 0) -+ goto out; -+ if ((status & 0x08) == 0) -+ break; -+ if (--i == 0) { -+ rval = -EIO; -+ goto out; -+ } -+ msleep(1); -+ } while (1); -+ -+ /* Read window, 8 bytes at once, and copy to user space */ -+ ofs = offset & 0x07; /* Offset within this window */ -+ bytes = length + ofs > 8 ? 8-ofs : length; -+ msg.addr = client->addr; -+ msg.flags = 0; -+ msg.len = 2; -+ msg.buf = buffer; -+ ofs += PRIV_MEM_START_REG; -+ buffer[0] = (u8)(ofs >> 8); -+ buffer[1] = (u8)(ofs & 0xFF); -+ rval = i2c_transfer(client->adapter, &msg, 1); -+ if (rval < 0) -+ goto out; -+ mdelay(ET8EK8_I2C_DELAY); -+ msg.addr = client->addr; -+ msg.len = bytes; -+ msg.flags = I2C_M_RD; -+ msg.buf = buffer; -+ memset(buffer, 0, sizeof(buffer)); -+ rval = i2c_transfer(client->adapter, &msg, 1); -+ if (rval < 0) -+ goto out; -+ rval = 0; -+ memcpy(ptr, buffer, bytes); -+ -+ length -= bytes; -+ offset += bytes; -+ ptr += bytes; -+ } while (length > 0); -+ -+out: -+ return rval; -+} -+ -+static int et8ek8_ioctl_dev_init(struct v4l2_int_device *s) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ char name[FIRMWARE_NAME_MAX]; -+ int rval, rev_l, rev_h; -+ -+ rval = et8ek8_power_on(s); -+ if (rval) -+ return -ENODEV; -+ -+ if (smia_i2c_read_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ REG_REVISION_NUMBER_L, &rev_l) != 0 -+ || smia_i2c_read_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ REG_REVISION_NUMBER_H, &rev_h) != 0) { -+ dev_err(&sensor->i2c_client->dev, -+ "no et8ek8 sensor detected\n"); -+ rval = -ENODEV; -+ goto out_poweroff; -+ } -+ sensor->version = (rev_h << 8) + rev_l; -+ if (sensor->version != ET8EK8_REV_1 -+ && sensor->version != ET8EK8_REV_2) -+ dev_info(&sensor->i2c_client->dev, -+ "unknown version 0x%x detected, " -+ "continuing anyway\n", sensor->version); -+ -+ snprintf(name, FIRMWARE_NAME_MAX, "%s-%4.4x.bin", ET8EK8_NAME, -+ sensor->version); -+ if (request_firmware(&sensor->fw, name, -+ &sensor->i2c_client->dev)) { -+ dev_err(&sensor->i2c_client->dev, -+ "can't load firmware %s\n", name); -+ rval = -ENODEV; -+ goto out_poweroff; -+ } -+ sensor->meta_reglist = -+ (struct smia_meta_reglist *)sensor->fw->data; -+ rval = smia_reglist_import(sensor->meta_reglist); -+ if (rval) { -+ dev_err(&sensor->i2c_client->dev, -+ "invalid register list %s, import failed\n", -+ name); -+ goto out_release; -+ } -+ -+ sensor->current_reglist = -+ smia_reglist_find_type(sensor->meta_reglist, -+ SMIA_REGLIST_MODE); -+ if (!sensor->current_reglist) { -+ dev_err(&sensor->i2c_client->dev, -+ "invalid register list %s, no mode found\n", -+ name); -+ rval = -ENODEV; -+ goto out_release; -+ } -+ -+ rval = smia_i2c_reglist_find_write(sensor->i2c_client, -+ sensor->meta_reglist, -+ SMIA_REGLIST_POWERON); -+ if (rval) { -+ dev_err(&sensor->i2c_client->dev, -+ "invalid register list %s, no POWERON mode found\n", -+ name); -+ goto out_release; -+ } -+ rval = et8ek8_stream_on(s); /* Needed to be able to read EEPROM */ -+ if (rval) -+ goto out_release; -+ rval = et8ek8_g_priv_mem(s); -+ if (rval) -+ dev_warn(&sensor->i2c_client->dev, -+ "can not read OTP (EEPROM) memory from sensor\n"); -+ rval = et8ek8_stream_off(s); -+ if (rval) -+ goto out_release; -+ -+ rval = et8ek8_power_off(s); -+ if (rval) -+ goto out_release; -+ -+ return 0; -+ -+out_release: -+ release_firmware(sensor->fw); -+out_poweroff: -+ sensor->meta_reglist = NULL; -+ sensor->fw = NULL; -+ et8ek8_power_off(s); -+ -+ return rval; -+} -+ -+static int et8ek8_ioctl_s_power(struct v4l2_int_device *s, -+ enum v4l2_power new_state) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ enum v4l2_power old_state = sensor->power; -+ int rval = 0; -+ -+ /* If we are already in this mode, do nothing */ -+ if (old_state == new_state) -+ return 0; -+ -+ /* Disable power if so requested (it was enabled) */ -+ if (new_state == V4L2_POWER_OFF) { -+ rval = et8ek8_stream_off(s); -+ if (rval) -+ dev_err(&sensor->i2c_client->dev, -+ "can not stop streaming\n"); -+ rval = et8ek8_power_off(s); -+ goto out; -+ } -+ -+ /* Either STANDBY or ON requested */ -+ -+ /* Enable power and move to standby if it was off */ -+ if (old_state == V4L2_POWER_OFF) { -+ rval = et8ek8_power_on(s); -+ if (rval) -+ goto out; -+ } -+ -+ /* Now sensor is powered (standby or streaming) */ -+ -+ if (new_state == V4L2_POWER_ON) { -+ /* Standby -> streaming */ -+ sensor->power = V4L2_POWER_ON; -+ rval = et8ek8_configure(s); -+ if (rval) { -+ et8ek8_stream_off(s); -+ if (old_state == V4L2_POWER_OFF) -+ et8ek8_power_off(s); -+ goto out; -+ } -+ rval = et8ek8_stream_on(s); -+ } else { -+ /* Streaming -> standby */ -+ rval = et8ek8_stream_off(s); -+ } -+ -+out: -+ sensor->power = (rval == 0) ? new_state : old_state; -+ return rval; -+} -+ -+static int et8ek8_ioctl_g_priv(struct v4l2_int_device *s, void *priv) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ -+ return sensor->platform_data->g_priv(s, priv); -+} -+ -+static int et8ek8_ioctl_enum_framesizes(struct v4l2_int_device *s, -+ struct v4l2_frmsizeenum *frm) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ -+ return smia_reglist_enum_framesizes(sensor->meta_reglist, frm); -+} -+ -+static int et8ek8_ioctl_enum_frameintervals(struct v4l2_int_device *s, -+ struct v4l2_frmivalenum *frm) -+{ -+ struct et8ek8_sensor *sensor = s->priv; -+ -+ return smia_reglist_enum_frameintervals(sensor->meta_reglist, frm); -+} -+ -+static ssize_t -+et8ek8_priv_mem_read(struct device *dev, struct device_attribute *attr, -+ char *buf) -+{ -+ struct et8ek8_sensor *sensor = dev_get_drvdata(dev); -+ -+#if PAGE_SIZE < ET8EK8_PRIV_MEM_SIZE -+#error PAGE_SIZE too small! -+#endif -+ -+ memcpy(buf, sensor->priv_mem, ET8EK8_PRIV_MEM_SIZE); -+ -+ return ET8EK8_PRIV_MEM_SIZE; -+} -+static DEVICE_ATTR(priv_mem, S_IRUGO, et8ek8_priv_mem_read, NULL); -+ -+static struct v4l2_int_ioctl_desc et8ek8_ioctl_desc[] = { -+ { vidioc_int_enum_fmt_cap_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_enum_fmt_cap }, -+ { vidioc_int_try_fmt_cap_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_g_fmt_cap }, -+ { vidioc_int_g_fmt_cap_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_g_fmt_cap }, -+ { vidioc_int_s_fmt_cap_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_s_fmt_cap }, -+ { vidioc_int_queryctrl_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_queryctrl }, -+ { vidioc_int_querymenu_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_querymenu }, -+ { vidioc_int_g_ctrl_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_g_ctrl }, -+ { vidioc_int_s_ctrl_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_s_ctrl }, -+ { vidioc_int_g_parm_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_g_parm }, -+ { vidioc_int_s_parm_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_s_parm }, -+ { vidioc_int_s_power_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_s_power }, -+ { vidioc_int_g_priv_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_g_priv }, -+ { vidioc_int_enum_framesizes_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_enum_framesizes }, -+ { vidioc_int_enum_frameintervals_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_enum_frameintervals }, -+ { vidioc_int_dev_init_num, -+ (v4l2_int_ioctl_func *)et8ek8_ioctl_dev_init }, -+}; -+ -+static struct v4l2_int_slave et8ek8_slave = { -+ .ioctls = et8ek8_ioctl_desc, -+ .num_ioctls = ARRAY_SIZE(et8ek8_ioctl_desc), -+}; -+ -+static struct et8ek8_sensor et8ek8; -+ -+static struct v4l2_int_device et8ek8_int_device = { -+ .module = THIS_MODULE, -+ .name = ET8EK8_NAME, -+ .priv = &et8ek8, -+ .type = v4l2_int_type_slave, -+ .u = { -+ .slave = &et8ek8_slave, -+ }, -+}; -+ -+#ifdef CONFIG_PM -+ -+static int et8ek8_suspend(struct i2c_client *client, pm_message_t mesg) -+{ -+ struct et8ek8_sensor *sensor = dev_get_drvdata(&client->dev); -+ enum v4l2_power resume_state = sensor->power; -+ int rval; -+ -+ rval = et8ek8_ioctl_s_power(sensor->v4l2_int_device, V4L2_POWER_OFF); -+ if (rval == 0) -+ sensor->power = resume_state; -+ return rval; -+} -+ -+static int et8ek8_resume(struct i2c_client *client) -+{ -+ struct et8ek8_sensor *sensor = dev_get_drvdata(&client->dev); -+ enum v4l2_power resume_state = sensor->power; -+ -+ sensor->power = V4L2_POWER_OFF; -+ return et8ek8_ioctl_s_power(sensor->v4l2_int_device, resume_state); -+} -+ -+#else -+ -+#define et8ek8_suspend NULL -+#define et8ek8_resume NULL -+ -+#endif /* CONFIG_PM */ -+ -+static int et8ek8_probe(struct i2c_client *client, -+ const struct i2c_device_id *devid) -+{ -+ struct et8ek8_sensor *sensor = &et8ek8; -+ int rval; -+ -+ if (i2c_get_clientdata(client)) -+ return -EBUSY; -+ -+ sensor->platform_data = client->dev.platform_data; -+ -+ if (sensor->platform_data == NULL) -+ return -ENODEV; -+ -+ if (device_create_file(&client->dev, &dev_attr_priv_mem) != 0) { -+ dev_err(&client->dev, "could not register sysfs entry\n"); -+ return -EBUSY; -+ } -+ -+ sensor->v4l2_int_device = &et8ek8_int_device; -+ -+ /* Gain is initialized here permanently */ -+ sensor->controls[CTRL_GAIN].minimum = 0; -+ sensor->controls[CTRL_GAIN].maximum = ARRAY_SIZE(et8ek8_gain_table) - 1; -+ sensor->controls[CTRL_GAIN].step = 1; -+ sensor->controls[CTRL_GAIN].default_value = 0; -+ sensor->controls[CTRL_GAIN].value = 0; -+ sensor->controls[CTRL_GAIN].set = et8ek8_set_gain; -+ -+ /* Exposure parameters may change at each mode change, just zero here */ -+ sensor->controls[CTRL_EXPOSURE].minimum = 0; -+ sensor->controls[CTRL_EXPOSURE].maximum = 0; -+ sensor->controls[CTRL_EXPOSURE].step = 0; -+ sensor->controls[CTRL_EXPOSURE].default_value = 0; -+ sensor->controls[CTRL_EXPOSURE].value = 0; -+ sensor->controls[CTRL_EXPOSURE].set = et8ek8_set_exposure; -+ -+ /* Test pattern mode control */ -+ sensor->controls[CTRL_TEST_PATTERN].minimum = et8ek8_ctrls[CTRL_TEST_PATTERN].minimum; -+ sensor->controls[CTRL_TEST_PATTERN].maximum = et8ek8_ctrls[CTRL_TEST_PATTERN].maximum; -+ sensor->controls[CTRL_TEST_PATTERN].step = et8ek8_ctrls[CTRL_TEST_PATTERN].step; -+ sensor->controls[CTRL_TEST_PATTERN].default_value = et8ek8_ctrls[CTRL_TEST_PATTERN].default_value; -+ sensor->controls[CTRL_TEST_PATTERN].value = 0; -+ sensor->controls[CTRL_TEST_PATTERN].set = et8ek8_set_test_pattern; -+ -+ sensor->i2c_client = client; -+ i2c_set_clientdata(client, sensor); -+ dev_set_drvdata(&client->dev, sensor); -+ -+ rval = v4l2_int_device_register(sensor->v4l2_int_device); -+ if (rval) { -+ device_remove_file(&client->dev, &dev_attr_priv_mem); -+ i2c_set_clientdata(client, NULL); -+ dev_set_drvdata(&client->dev, NULL); -+ } -+ -+ return rval; -+} -+ -+static int __exit et8ek8_remove(struct i2c_client *client) -+{ -+ struct et8ek8_sensor *sensor = i2c_get_clientdata(client); -+ -+ if (!client->adapter) -+ return -ENODEV; /* our client isn't attached */ -+ -+ v4l2_int_device_unregister(sensor->v4l2_int_device); -+ dev_set_drvdata(&client->dev, NULL); -+ i2c_set_clientdata(client, NULL); -+ device_remove_file(&client->dev, &dev_attr_priv_mem); -+ release_firmware(sensor->fw); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id et8ek8_id_table[] = { -+ { ET8EK8_NAME, 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, et8ek8_id_table); -+ -+static struct i2c_driver et8ek8_i2c_driver = { -+ .driver = { -+ .name = ET8EK8_NAME, -+ }, -+ .probe = et8ek8_probe, -+ .remove = __exit_p(et8ek8_remove), -+ .suspend = et8ek8_suspend, -+ .resume = et8ek8_resume, -+ .id_table = et8ek8_id_table, -+}; -+ -+static int __init et8ek8_init(void) -+{ -+ int rval; -+ -+ rval = i2c_add_driver(&et8ek8_i2c_driver); -+ if (rval) -+ printk(KERN_ALERT "%s: failed at i2c_add_driver\n", __func__); -+ -+ return rval; -+} -+ -+static void __exit et8ek8_exit(void) -+{ -+ i2c_del_driver(&et8ek8_i2c_driver); -+} -+ -+/* -+ * FIXME: Menelaus isn't ready (?) at module_init stage, so use -+ * late_initcall for now. -+ */ -+late_initcall(et8ek8_init); -+module_exit(et8ek8_exit); -+ -+MODULE_AUTHOR("Sakari Ailus "); -+MODULE_DESCRIPTION("Toshiba ET8EK8 camera sensor driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/et8ek8.h linux-omap-2.6.28-nokia1/drivers/media/video/et8ek8.h ---- linux-omap-2.6.28-omap1/drivers/media/video/et8ek8.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/et8ek8.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,79 @@ -+/* -+ * drivers/media/video/et8ek8.h -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef ET8EK8_H -+#define ET8EK8_H -+ -+#include -+#include -+#include -+ -+#define ET8EK8_NAME "et8ek8" -+#define ET8EK8_I2C_ADDR (0x7C >> 1) -+ -+#define ET8EK8_PRIV_MEM_SIZE 128 -+#define ET8EK8_NCTRLS 3 -+ -+struct et8ek8_platform_data { -+ int (*g_priv)(struct v4l2_int_device *s, void *priv); -+ int (*configure_interface)(struct v4l2_int_device *s, -+ struct smia_mode *mode); -+ int (*set_xclk)(struct v4l2_int_device *s, int hz); -+ int (*power_on)(struct v4l2_int_device *s); -+ int (*power_off)(struct v4l2_int_device *s); -+}; -+ -+struct et8ek8_sensor; -+ -+/* Current values for V4L2 controls */ -+struct et8ek8_control { -+ s32 minimum; -+ s32 maximum; -+ s32 step; -+ s32 default_value; -+ s32 value; -+ int (*set)(struct et8ek8_sensor *sensor, s32 value); -+}; -+ -+struct et8ek8_sensor { -+ struct i2c_client *i2c_client; -+ struct i2c_driver driver; -+ -+ u16 version; -+ -+ struct et8ek8_control controls[ET8EK8_NCTRLS]; -+ -+ struct smia_reglist *current_reglist; -+ struct v4l2_int_device *v4l2_int_device; -+ struct v4l2_fract timeperframe; -+ -+ struct et8ek8_platform_data *platform_data; -+ -+ const struct firmware *fw; -+ struct smia_meta_reglist *meta_reglist; -+ u8 priv_mem[ET8EK8_PRIV_MEM_SIZE]; -+ -+ enum v4l2_power power; -+}; -+ -+#endif /* ET8EK8_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/et8ek8-modes.h linux-omap-2.6.28-nokia1/drivers/media/video/et8ek8-modes.h ---- linux-omap-2.6.28-omap1/drivers/media/video/et8ek8-modes.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/et8ek8-modes.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,367 @@ -+/* Automatically generated code from Scooby -+ * configuration file by makemodes.pl. */ -+ -+const static struct smia_reg list_init[] = { -+/* VBAT 3.80 Battery voltage */ -+/* VANA 2.80 Analog supply voltage */ -+/* VDIG 1.80 Digital supply voltage */ -+#define CAMERA_XCLK_HZ 9600000 /* EXTCLK 1 1 ExtClk source: 1=19.200, 2=26.000, 3=98.304MHz Divider: 0=/1,1=/2, 2=/4 ... 7=/14 */ -+/* SPEED 400.0 I2C speed in kHz */ -+/* MODE 4 Actuator drive mode (bits S3,S2,S1,S0 for AD5820) */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode0_powerdown */ -+const static struct smia_reg list_mode0_powerdown[] = { -+/* XSHD 0 XSHUTDOWN low */ -+ { SMIA_REG_DELAY, 0, 1 }, -+/* VDIG 0 VDIG off */ -+ { SMIA_REG_DELAY, 0, 1 }, -+/* VANA 0 VANA off */ -+ { SMIA_REG_DELAY, 0, 1 }, -+/* VBAT 0 VBAT off */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode1_poweron_Mode2_16VGA_2592x1968_12.07fps */ -+const static struct smia_reg list_mode1_poweron_mode2_16vga_2592x1968_12_07fps[] = { -+/* XSHD 0 XSHUTDOWN lo */ -+/* VBAT 1 VBAT on */ -+ { SMIA_REG_DELAY, 0, 1 }, -+/* VANA 1 VANA on */ -+ { SMIA_REG_DELAY, 0, 1 }, -+/* VDIG 1 VDIG on */ -+ { SMIA_REG_DELAY, 0, 5 }, -+/* XSHD 1 XSHUTDOWN hi */ -+ { SMIA_REG_DELAY, 0, 5 }, -+ { SMIA_REG_8BIT, 0x126C, 0xCC }, /* Need to set firstly */ -+ { SMIA_REG_8BIT, 0x1252, 0xB0 }, /* Need to set secondary (from Sleep to active) */ -+ { SMIA_REG_8BIT, 0x1220, 0x89 }, /* Refined value of Min H_COUNT */ -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, /* Frequency of SPCK setting (SPCK=MRCK) */ -+ { SMIA_REG_8BIT, 0x1241, 0x94 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1242, 0x02 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x124B, 0x00 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1255, 0xFF }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1256, 0x9F }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1258, 0x00 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* From parallel out to serial out */ -+ { SMIA_REG_8BIT, 0x125E, 0xC0 }, /* From w/ embedded data to w/o embedded data */ -+ { SMIA_REG_8BIT, 0x1263, 0x98 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1268, 0xC6 }, /* CCP2 out is from STOP to ACTIVE */ -+ { SMIA_REG_8BIT, 0x1434, 0x00 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1163, 0x44 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1166, 0x29 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1140, 0x02 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1011, 0x24 }, /* Initial setting */ -+ { SMIA_REG_8BIT, 0x1151, 0x80 }, /* Initial setting( for improvement of lower frequency noise ) */ -+ { SMIA_REG_8BIT, 0x1152, 0x23 }, /* Initial setting( for improvement of lower frequency noise ) */ -+ { SMIA_REG_8BIT, 0x1014, 0x05 }, /* Initial setting( for improvement2 of lower frequency noise ) */ -+ { SMIA_REG_8BIT, 0x1033, 0x06 }, -+ { SMIA_REG_8BIT, 0x1034, 0x79 }, -+ { SMIA_REG_8BIT, 0x1423, 0x3F }, -+ { SMIA_REG_8BIT, 0x1424, 0x3F }, -+ { SMIA_REG_8BIT, 0x1426, 0x00 }, -+ { SMIA_REG_8BIT, 0x1439, 0x00 }, /* 0 */ -+ { SMIA_REG_8BIT, 0x161F, 0x60 }, /* 0 blemish correction is off */ -+ { SMIA_REG_8BIT, 0x1634, 0x00 }, /* 0 Auto noise reduction is off */ -+ { SMIA_REG_8BIT, 0x1646, 0x00 }, /* 0 */ -+ { SMIA_REG_8BIT, 0x1648, 0x00 }, /* 0 */ -+ { SMIA_REG_8BIT, 0x113E, 0x01 }, /* 1 */ -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x70 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x64 }, -+ { SMIA_REG_8BIT, 0x121D, 0x64 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0x89 }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* imageFormat 1 1=raw10 */ -+/* imageWidth 2592 Number of pixels in one line */ -+/* imageHeight 1968 Number of lines in active image */ -+/* paxelTopLine 328 Top line of AFV&APS window (y0) */ -+/* paxelLeftPixel 432 Left column of AFV&APS window (x0) */ -+/* paxelWidth 576 Number of pixels in one Paxel */ -+/* paxelHeight 437 Number of lines in one Paxel */ -+/* Mode1_16VGA_2592x1968_13.12fps_DPCM10-8 */ -+const static struct smia_reg list_mode1_16vga_2592x1968_13_12fps_dpcm10_8[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x57 }, -+ { SMIA_REG_8BIT, 0x1238, 0x82 }, -+ { SMIA_REG_8BIT, 0x123B, 0x70 }, -+ { SMIA_REG_8BIT, 0x123A, 0x06 }, -+ { SMIA_REG_8BIT, 0x121B, 0x64 }, -+ { SMIA_REG_8BIT, 0x121D, 0x64 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0x7E }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x83 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* imageFormat 0 0=raw8, 1=raw10 */ -+/* Mode2_16VGA_2592x1968_12.07fps */ -+const static struct smia_reg list_mode2_16vga_2592x1968_12_07fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x70 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x64 }, -+ { SMIA_REG_8BIT, 0x121D, 0x64 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0x89 }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode3_4VGA_1296x984_14.91fps_DPCM10-8 */ -+const static struct smia_reg list_mode3_4vga_1296x984_14_91fps_dpcm10_8[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x7B }, -+ { SMIA_REG_8BIT, 0x1238, 0x82 }, -+ { SMIA_REG_8BIT, 0x123B, 0x70 }, -+ { SMIA_REG_8BIT, 0x123A, 0x17 }, -+ { SMIA_REG_8BIT, 0x121B, 0x63 }, -+ { SMIA_REG_8BIT, 0x121D, 0x63 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0x89 }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x83 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* imageFormat 0 0=raw8, 1=raw10 */ -+/* Mode4_SVGA_864x656_14.94fps */ -+const static struct smia_reg list_mode4_svga_864x656_14_94fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x17 }, -+ { SMIA_REG_8BIT, 0x121B, 0x62 }, -+ { SMIA_REG_8BIT, 0x121D, 0x62 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xA6 }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* imageFormat 1 1=raw10 */ -+/* Mode5_VGA_648x492_14.96fps */ -+const static struct smia_reg list_mode5_vga_648x492_14_96fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x17 }, -+ { SMIA_REG_8BIT, 0x121B, 0x61 }, -+ { SMIA_REG_8BIT, 0x121D, 0x61 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xDD }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode13_1/2_1296x984_15.00fps_DPCM10-8 */ -+const static struct smia_reg list_mode13_1_2_1296x984_15_00fps_dpcm10_8[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x16 }, -+ { SMIA_REG_8BIT, 0x121B, 0x34 }, -+ { SMIA_REG_8BIT, 0x121D, 0x34 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0x7E }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x83 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* imageFormat 0 0=raw8, 1=raw10 */ -+/* Mode16_1/3_864x656_14.95fps */ -+const static struct smia_reg list_mode16_1_3_864x656_14_95fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x17 }, -+ { SMIA_REG_8BIT, 0x121B, 0x24 }, -+ { SMIA_REG_8BIT, 0x121D, 0x24 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xA4 }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x55 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode18_1/4_648x492_14.96fps */ -+const static struct smia_reg list_mode18_1_4_648x492_14_96fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x17 }, -+ { SMIA_REG_8BIT, 0x121B, 0x14 }, -+ { SMIA_REG_8BIT, 0x121D, 0x14 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xDD }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode19_1/6_432x328_14.99fps */ -+const static struct smia_reg list_mode19_1_6_432x328_14_99fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x17 }, -+ { SMIA_REG_8BIT, 0x121B, 0x04 }, -+ { SMIA_REG_8BIT, 0x121D, 0x04 }, -+ { SMIA_REG_8BIT, 0x1221, 0x01 }, -+ { SMIA_REG_8BIT, 0x1220, 0x4B }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode3_4VGA_1296x984_29.99fps_DPCM10-8 */ -+const static struct smia_reg list_mode3_4vga_1296x984_29_99fps_dpcm10_8[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x06 }, -+ { SMIA_REG_8BIT, 0x121B, 0x63 }, -+ { SMIA_REG_8BIT, 0x121D, 0x63 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0x7E }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x83 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* imageFormat 0 0=raw8, 1=raw10 */ -+/* Mode4_SVGA_864x656_29.88fps */ -+const static struct smia_reg list_mode4_svga_864x656_29_88fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x62 }, -+ { SMIA_REG_8BIT, 0x121D, 0x62 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xA6 }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode5_VGA_648x492_29.93fps */ -+const static struct smia_reg list_mode5_vga_648x492_29_93fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x61 }, -+ { SMIA_REG_8BIT, 0x121D, 0x61 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xDD }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode13_1/2_1296x984_29.99fps_DPCM10-8 */ -+const static struct smia_reg list_mode13_1_2_1296x984_29_99fps_dpcm10_8[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x06 }, -+ { SMIA_REG_8BIT, 0x121B, 0x34 }, -+ { SMIA_REG_8BIT, 0x121D, 0x34 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0x7E }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x83 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode16_1/3_864x656_29.89fps */ -+const static struct smia_reg list_mode16_1_3_864x656_29_89fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x24 }, -+ { SMIA_REG_8BIT, 0x121D, 0x24 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xA4 }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x55 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode18_1/4_648x492_29.93fps */ -+const static struct smia_reg list_mode18_1_4_648x492_29_93fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x14 }, -+ { SMIA_REG_8BIT, 0x121D, 0x14 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xDD }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode19_1/6_432x328_29.97fps */ -+const static struct smia_reg list_mode19_1_6_432x328_29_97fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x04 }, -+ { SMIA_REG_8BIT, 0x121D, 0x04 }, -+ { SMIA_REG_8BIT, 0x1221, 0x01 }, -+ { SMIA_REG_8BIT, 0x1220, 0x4B }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ ## ## ## ## CCP_COMP_MODE[2-0] */ -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* Mode3_4VGA_1296x984_14.96fps */ -+const static struct smia_reg list_mode3_4vga_1296x984_14_96fps[] = { -+ { SMIA_REG_8BIT, 0x1239, 0x64 }, -+ { SMIA_REG_8BIT, 0x1238, 0x02 }, -+ { SMIA_REG_8BIT, 0x123B, 0x71 }, -+ { SMIA_REG_8BIT, 0x123A, 0x07 }, -+ { SMIA_REG_8BIT, 0x121B, 0x63 }, -+ { SMIA_REG_8BIT, 0x121D, 0x63 }, -+ { SMIA_REG_8BIT, 0x1221, 0x00 }, -+ { SMIA_REG_8BIT, 0x1220, 0xDD }, -+ { SMIA_REG_8BIT, 0x1223, 0x00 }, -+ { SMIA_REG_8BIT, 0x1222, 0x54 }, -+ { SMIA_REG_TERM, 0, 0} -+}; -+ -+/* imageFormat 1 0=raw8, 1=raw10 */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/bluegamma_table.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/bluegamma_table.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/bluegamma_table.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/bluegamma_table.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,1040 @@ -+/* -+ * bluegamma_table.h -+ * -+ * Gamma Table values for BLUE for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+0, -+0, -+1, -+2, -+3, -+3, -+4, -+5, -+6, -+8, -+10, -+12, -+14, -+16, -+18, -+20, -+22, -+23, -+25, -+26, -+28, -+29, -+31, -+32, -+34, -+35, -+36, -+37, -+39, -+40, -+41, -+42, -+43, -+44, -+45, -+46, -+47, -+48, -+49, -+50, -+51, -+52, -+52, -+53, -+54, -+55, -+56, -+57, -+58, -+59, -+60, -+61, -+62, -+63, -+63, -+64, -+65, -+66, -+66, -+67, -+68, -+69, -+69, -+70, -+71, -+72, -+72, -+73, -+74, -+75, -+75, -+76, -+77, -+78, -+78, -+79, -+80, -+81, -+81, -+82, -+83, -+84, -+84, -+85, -+86, -+87, -+88, -+88, -+89, -+90, -+91, -+91, -+92, -+93, -+94, -+94, -+95, -+96, -+97, -+97, -+98, -+98, -+99, -+99, -+100, -+100, -+101, -+101, -+102, -+103, -+104, -+104, -+105, -+106, -+107, -+108, -+108, -+109, -+110, -+111, -+111, -+112, -+113, -+114, -+114, -+115, -+116, -+117, -+117, -+118, -+119, -+119, -+120, -+120, -+121, -+121, -+122, -+122, -+123, -+123, -+124, -+124, -+125, -+125, -+126, -+126, -+127, -+127, -+128, -+128, -+129, -+129, -+130, -+130, -+131, -+131, -+132, -+132, -+133, -+133, -+134, -+134, -+135, -+135, -+136, -+136, -+137, -+137, -+138, -+138, -+139, -+139, -+140, -+140, -+141, -+141, -+142, -+142, -+143, -+143, -+144, -+144, -+145, -+145, -+146, -+146, -+147, -+147, -+148, -+148, -+149, -+149, -+150, -+150, -+151, -+151, -+152, -+152, -+153, -+153, -+153, -+153, -+154, -+154, -+154, -+154, -+155, -+155, -+156, -+156, -+157, -+157, -+158, -+158, -+158, -+159, -+159, -+159, -+160, -+160, -+160, -+161, -+161, -+162, -+162, -+163, -+163, -+164, -+164, -+164, -+164, -+165, -+165, -+165, -+165, -+166, -+166, -+167, -+167, -+168, -+168, -+169, -+169, -+170, -+170, -+170, -+170, -+171, -+171, -+171, -+171, -+172, -+172, -+173, -+173, -+174, -+174, -+175, -+175, -+176, -+176, -+176, -+176, -+177, -+177, -+177, -+177, -+178, -+178, -+178, -+178, -+179, -+179, -+179, -+179, -+180, -+180, -+180, -+180, -+181, -+181, -+181, -+181, -+182, -+182, -+182, -+182, -+183, -+183, -+183, -+183, -+184, -+184, -+184, -+184, -+185, -+185, -+185, -+185, -+186, -+186, -+186, -+186, -+187, -+187, -+187, -+187, -+188, -+188, -+188, -+188, -+189, -+189, -+189, -+189, -+190, -+190, -+190, -+190, -+191, -+191, -+191, -+191, -+192, -+192, -+192, -+192, -+193, -+193, -+193, -+193, -+194, -+194, -+194, -+194, -+195, -+195, -+195, -+195, -+196, -+196, -+196, -+196, -+197, -+197, -+197, -+197, -+198, -+198, -+198, -+198, -+199, -+199, -+199, -+199, -+200, -+200, -+200, -+200, -+201, -+201, -+201, -+201, -+202, -+202, -+202, -+203, -+203, -+203, -+203, -+204, -+204, -+204, -+204, -+205, -+205, -+205, -+205, -+206, -+206, -+206, -+206, -+207, -+207, -+207, -+207, -+208, -+208, -+208, -+208, -+209, -+209, -+209, -+209, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+212, -+212, -+212, -+212, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+214, -+214, -+214, -+214, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+216, -+216, -+216, -+216, -+217, -+217, -+217, -+217, -+218, -+218, -+218, -+218, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+220, -+220, -+220, -+220, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+222, -+222, -+222, -+222, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+224, -+224, -+224, -+224, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+226, -+226, -+226, -+226, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+228, -+228, -+228, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+230, -+230, -+230, -+230, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+233, -+233, -+233, -+233, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+235, -+235, -+235, -+235, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+237, -+237, -+237, -+237, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+239, -+239, -+239, -+239, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+241, -+241, -+241, -+241, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+243, -+243, -+243, -+243, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+245, -+245, -+245, -+245, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+247, -+247, -+247, -+247, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+249, -+249, -+249, -+249, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+251, -+251, -+251, -+251, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+254, -+254, -+254, -+254, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255 -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/cfa_coef_table.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/cfa_coef_table.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/cfa_coef_table.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/cfa_coef_table.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,603 @@ -+/* -+ * cfa_coef_table.h -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * Written by Gjorgji Rosikopulos -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+244, -+0, -+247, -+0, -+12, -+27, -+36, -+247, -+250, -+0, -+27, -+0, -+4, -+250, -+12, -+244, -+248, -+0, -+0, -+0, -+0, -+40, -+0, -+0, -+244, -+12, -+250, -+4, -+0, -+27, -+0, -+250, -+247, -+36, -+27, -+12, -+0, -+247, -+0, -+244, -+0, -+0, -+40, -+0, -+0, -+0, -+0, -+248, -+244, -+0, -+247, -+0, -+12, -+27, -+36, -+247, -+250, -+0, -+27, -+0, -+4, -+250, -+12, -+244, -+248, -+0, -+0, -+0, -+0, -+40, -+0, -+0, -+244, -+12, -+250, -+4, -+0, -+27, -+0, -+250, -+247, -+36, -+27, -+12, -+0, -+247, -+0, -+244, -+0, -+0, -+40, -+0, -+0, -+0, -+0, -+248, -+244, -+0, -+247, -+0, -+12, -+27, -+36, -+247, -+250, -+0, -+27, -+0, -+4, -+250, -+12, -+244, -+248, -+0, -+0, -+0, -+0, -+40, -+0, -+0, -+244, -+12, -+250, -+4, -+0, -+27, -+0, -+250, -+247, -+36, -+27, -+12, -+0, -+247, -+0, -+244, -+0, -+0, -+40, -+0, -+0, -+0, -+0, -+248, -+0, -+247, -+0, -+244, -+247, -+36, -+27, -+12, -+0, -+27, -+0, -+250, -+244, -+12, -+250, -+4, -+0, -+0, -+0, -+248, -+0, -+0, -+40, -+0, -+4, -+250, -+12, -+244, -+250, -+0, -+27, -+0, -+12, -+27, -+36, -+247, -+244, -+0, -+247, -+0, -+0, -+40, -+0, -+0, -+248, -+0, -+0, -+0, -+0, -+247, -+0, -+244, -+247, -+36, -+27, -+12, -+0, -+27, -+0, -+250, -+244, -+12, -+250, -+4, -+0, -+0, -+0, -+248, -+0, -+0, -+40, -+0, -+4, -+250, -+12, -+244, -+250, -+0, -+27, -+0, -+12, -+27, -+36, -+247, -+244, -+0, -+247, -+0, -+0, -+40, -+0, -+0, -+248, -+0, -+0, -+0, -+0, -+247, -+0, -+244, -+247, -+36, -+27, -+12, -+0, -+27, -+0, -+250, -+244, -+12, -+250, -+4, -+0, -+0, -+0, -+248, -+0, -+0, -+40, -+0, -+4, -+250, -+12, -+244, -+250, -+0, -+27, -+0, -+12, -+27, -+36, -+247, -+244, -+0, -+247, -+0, -+0, -+40, -+0, -+0, -+248, -+0, -+0, -+0, -+4, -+250, -+12, -+244, -+250, -+0, -+27, -+0, -+12, -+27, -+36, -+247, -+244, -+0, -+247, -+0, -+0, -+0, -+0, -+248, -+0, -+0, -+40, -+0, -+0, -+247, -+0, -+244, -+247, -+36, -+27, -+12, -+0, -+27, -+0, -+250, -+244, -+12, -+250, -+4, -+0, -+40, -+0, -+0, -+248, -+0, -+0, -+0, -+4, -+250, -+12, -+244, -+250, -+0, -+27, -+0, -+12, -+27, -+36, -+247, -+244, -+0, -+247, -+0, -+0, -+0, -+0, -+248, -+0, -+0, -+40, -+0, -+0, -+247, -+0, -+244, -+247, -+36, -+27, -+12, -+0, -+27, -+0, -+250, -+244, -+12, -+250, -+4, -+0, -+40, -+0, -+0, -+248, -+0, -+0, -+0, -+4, -+250, -+12, -+244, -+250, -+0, -+27, -+0, -+12, -+27, -+36, -+247, -+244, -+0, -+247, -+0, -+0, -+0, -+0, -+248, -+0, -+0, -+40, -+0, -+0, -+247, -+0, -+244, -+247, -+36, -+27, -+12, -+0, -+27, -+0, -+250, -+244, -+12, -+250, -+4, -+0, -+40, -+0, -+0, -+248, -+0, -+0, -+0, -+244, -+12, -+250, -+4, -+0, -+27, -+0, -+250, -+247, -+36, -+27, -+12, -+0, -+247, -+0, -+244, -+248, -+0, -+0, -+0, -+0, -+40, -+0, -+0, -+244, -+0, -+247, -+0, -+12, -+27, -+36, -+247, -+250, -+0, -+27, -+0, -+4, -+250, -+12, -+244, -+0, -+0, -+40, -+0, -+0, -+0, -+0, -+248, -+244, -+12, -+250, -+4, -+0, -+27, -+0, -+250, -+247, -+36, -+27, -+12, -+0, -+247, -+0, -+244, -+248, -+0, -+0, -+0, -+0, -+40, -+0, -+0, -+244, -+0, -+247, -+0, -+12, -+27, -+36, -+247, -+250, -+0, -+27, -+0, -+4, -+250, -+12, -+244, -+0, -+0, -+40, -+0, -+0, -+0, -+0, -+248, -+244, -+12, -+250, -+4, -+0, -+27, -+0, -+250, -+247, -+36, -+27, -+12, -+0, -+247, -+0, -+244, -+248, -+0, -+0, -+0, -+0, -+40, -+0, -+0, -+244, -+0, -+247, -+0, -+12, -+27, -+36, -+247, -+250, -+0, -+27, -+0, -+4, -+250, -+12, -+244, -+0, -+0, -+40, -+0, -+0, -+0, -+0, -+248 -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/greengamma_table.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/greengamma_table.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/greengamma_table.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/greengamma_table.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,1040 @@ -+/* -+ * greengamma_table.h -+ * -+ * Gamma Table values for GREEN for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+0, -+0, -+1, -+2, -+3, -+3, -+4, -+5, -+6, -+8, -+10, -+12, -+14, -+16, -+18, -+20, -+22, -+23, -+25, -+26, -+28, -+29, -+31, -+32, -+34, -+35, -+36, -+37, -+39, -+40, -+41, -+42, -+43, -+44, -+45, -+46, -+47, -+48, -+49, -+50, -+51, -+52, -+52, -+53, -+54, -+55, -+56, -+57, -+58, -+59, -+60, -+61, -+62, -+63, -+63, -+64, -+65, -+66, -+66, -+67, -+68, -+69, -+69, -+70, -+71, -+72, -+72, -+73, -+74, -+75, -+75, -+76, -+77, -+78, -+78, -+79, -+80, -+81, -+81, -+82, -+83, -+84, -+84, -+85, -+86, -+87, -+88, -+88, -+89, -+90, -+91, -+91, -+92, -+93, -+94, -+94, -+95, -+96, -+97, -+97, -+98, -+98, -+99, -+99, -+100, -+100, -+101, -+101, -+102, -+103, -+104, -+104, -+105, -+106, -+107, -+108, -+108, -+109, -+110, -+111, -+111, -+112, -+113, -+114, -+114, -+115, -+116, -+117, -+117, -+118, -+119, -+119, -+120, -+120, -+121, -+121, -+122, -+122, -+123, -+123, -+124, -+124, -+125, -+125, -+126, -+126, -+127, -+127, -+128, -+128, -+129, -+129, -+130, -+130, -+131, -+131, -+132, -+132, -+133, -+133, -+134, -+134, -+135, -+135, -+136, -+136, -+137, -+137, -+138, -+138, -+139, -+139, -+140, -+140, -+141, -+141, -+142, -+142, -+143, -+143, -+144, -+144, -+145, -+145, -+146, -+146, -+147, -+147, -+148, -+148, -+149, -+149, -+150, -+150, -+151, -+151, -+152, -+152, -+153, -+153, -+153, -+153, -+154, -+154, -+154, -+154, -+155, -+155, -+156, -+156, -+157, -+157, -+158, -+158, -+158, -+159, -+159, -+159, -+160, -+160, -+160, -+161, -+161, -+162, -+162, -+163, -+163, -+164, -+164, -+164, -+164, -+165, -+165, -+165, -+165, -+166, -+166, -+167, -+167, -+168, -+168, -+169, -+169, -+170, -+170, -+170, -+170, -+171, -+171, -+171, -+171, -+172, -+172, -+173, -+173, -+174, -+174, -+175, -+175, -+176, -+176, -+176, -+176, -+177, -+177, -+177, -+177, -+178, -+178, -+178, -+178, -+179, -+179, -+179, -+179, -+180, -+180, -+180, -+180, -+181, -+181, -+181, -+181, -+182, -+182, -+182, -+182, -+183, -+183, -+183, -+183, -+184, -+184, -+184, -+184, -+185, -+185, -+185, -+185, -+186, -+186, -+186, -+186, -+187, -+187, -+187, -+187, -+188, -+188, -+188, -+188, -+189, -+189, -+189, -+189, -+190, -+190, -+190, -+190, -+191, -+191, -+191, -+191, -+192, -+192, -+192, -+192, -+193, -+193, -+193, -+193, -+194, -+194, -+194, -+194, -+195, -+195, -+195, -+195, -+196, -+196, -+196, -+196, -+197, -+197, -+197, -+197, -+198, -+198, -+198, -+198, -+199, -+199, -+199, -+199, -+200, -+200, -+200, -+200, -+201, -+201, -+201, -+201, -+202, -+202, -+202, -+203, -+203, -+203, -+203, -+204, -+204, -+204, -+204, -+205, -+205, -+205, -+205, -+206, -+206, -+206, -+206, -+207, -+207, -+207, -+207, -+208, -+208, -+208, -+208, -+209, -+209, -+209, -+209, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+212, -+212, -+212, -+212, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+214, -+214, -+214, -+214, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+216, -+216, -+216, -+216, -+217, -+217, -+217, -+217, -+218, -+218, -+218, -+218, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+220, -+220, -+220, -+220, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+222, -+222, -+222, -+222, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+224, -+224, -+224, -+224, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+226, -+226, -+226, -+226, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+228, -+228, -+228, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+230, -+230, -+230, -+230, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+233, -+233, -+233, -+233, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+235, -+235, -+235, -+235, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+237, -+237, -+237, -+237, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+239, -+239, -+239, -+239, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+241, -+241, -+241, -+241, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+243, -+243, -+243, -+243, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+245, -+245, -+245, -+245, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+247, -+247, -+247, -+247, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+249, -+249, -+249, -+249, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+251, -+251, -+251, -+251, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+254, -+254, -+254, -+254, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255 -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isp_af.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp_af.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isp_af.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp_af.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,477 @@ -+/* -+ * isp_af.c -+ * -+ * AF module for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Sergio Aguirre -+ * Troy Laramy -+ * David Cohen -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* Linux specific include files */ -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "isph3a.h" -+#include "isp_af.h" -+ -+#define IS_OUT_OF_BOUNDS(value, min, max) \ -+ (((value) < (min)) || ((value) > (max))) -+ -+/* Function to check paxel parameters */ -+static int isp_af_check_params(struct isp_af_device *isp_af, -+ struct af_configuration *afconfig) -+{ -+ struct af_paxel *paxel_cfg = &afconfig->paxel_config; -+ struct af_iir *iir_cfg = &afconfig->iir_config; -+ int index; -+ -+ /* Check horizontal Count */ -+ if (IS_OUT_OF_BOUNDS(paxel_cfg->hz_cnt, AF_PAXEL_HORIZONTAL_COUNT_MIN, -+ AF_PAXEL_HORIZONTAL_COUNT_MAX)) -+ return -AF_ERR_HZ_COUNT; -+ -+ /* Check Vertical Count */ -+ if (IS_OUT_OF_BOUNDS(paxel_cfg->vt_cnt, AF_PAXEL_VERTICAL_COUNT_MIN, -+ AF_PAXEL_VERTICAL_COUNT_MAX)) -+ return -AF_ERR_VT_COUNT; -+ -+ /* Check Height */ -+ if (IS_OUT_OF_BOUNDS(paxel_cfg->height, AF_PAXEL_HEIGHT_MIN, -+ AF_PAXEL_HEIGHT_MAX)) -+ return -AF_ERR_HEIGHT; -+ -+ /* Check width */ -+ if (IS_OUT_OF_BOUNDS(paxel_cfg->width, AF_PAXEL_WIDTH_MIN, -+ AF_PAXEL_WIDTH_MAX)) -+ return -AF_ERR_WIDTH; -+ -+ /* Check Line Increment */ -+ if (IS_OUT_OF_BOUNDS(paxel_cfg->line_incr, AF_PAXEL_INCREMENT_MIN, -+ AF_PAXEL_INCREMENT_MAX)) -+ return -AF_ERR_INCR; -+ -+ /* Check Horizontal Start */ -+ if ((paxel_cfg->hz_start % 2 != 0) || -+ (paxel_cfg->hz_start < (iir_cfg->hz_start_pos + 2)) || -+ IS_OUT_OF_BOUNDS(paxel_cfg->hz_start, -+ AF_PAXEL_HZSTART_MIN, AF_PAXEL_HZSTART_MAX)) -+ return -AF_ERR_HZ_START; -+ -+ /* Check Vertical Start */ -+ if (IS_OUT_OF_BOUNDS(paxel_cfg->vt_start, AF_PAXEL_VTSTART_MIN, -+ AF_PAXEL_VTSTART_MAX)) -+ return -AF_ERR_VT_START; -+ -+ /* Check IIR */ -+ for (index = 0; index < AF_NUMBER_OF_COEF; index++) { -+ if ((iir_cfg->coeff_set0[index]) > AF_COEF_MAX) -+ return -AF_ERR_IIR_COEF; -+ -+ if ((iir_cfg->coeff_set1[index]) > AF_COEF_MAX) -+ return -AF_ERR_IIR_COEF; -+ } -+ -+ if (IS_OUT_OF_BOUNDS(iir_cfg->hz_start_pos, AF_IIRSH_MIN, -+ AF_IIRSH_MAX)) -+ return -AF_ERR_IIRSH; -+ -+ /* Check HMF Threshold Values */ -+ if (afconfig->hmf_config.threshold > AF_THRESHOLD_MAX) -+ return -AF_ERR_THRESHOLD; -+ -+ return 0; -+} -+ -+void isp_af_config_registers(struct isp_af_device *isp_af) -+{ -+ struct device *dev = to_device(isp_af); -+ unsigned int pcr = 0, pax1 = 0, pax2 = 0, paxstart = 0; -+ unsigned int coef = 0; -+ unsigned int base_coef_set0 = 0; -+ unsigned int base_coef_set1 = 0; -+ int index; -+ unsigned long irqflags; -+ -+ if (!isp_af->config.af_config) -+ return; -+ -+ spin_lock_irqsave(isp_af->lock, irqflags); -+ -+ isp_reg_writel(dev, isp_af->buf_next->iommu_addr, OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AFBUFST); -+ -+ if (!isp_af->update) { -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+ return; -+ } -+ -+ /* Configure Hardware Registers */ -+ pax1 |= isp_af->config.paxel_config.width << AF_PAXW_SHIFT; -+ /* Set height in AFPAX1 */ -+ pax1 |= isp_af->config.paxel_config.height; -+ isp_reg_writel(dev, pax1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1); -+ -+ /* Configure AFPAX2 Register */ -+ /* Set Line Increment in AFPAX2 Register */ -+ pax2 |= isp_af->config.paxel_config.line_incr << AF_LINE_INCR_SHIFT; -+ /* Set Vertical Count */ -+ pax2 |= isp_af->config.paxel_config.vt_cnt << AF_VT_COUNT_SHIFT; -+ /* Set Horizontal Count */ -+ pax2 |= isp_af->config.paxel_config.hz_cnt; -+ isp_reg_writel(dev, pax2, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2); -+ -+ /* Configure PAXSTART Register */ -+ /*Configure Horizontal Start */ -+ paxstart |= isp_af->config.paxel_config.hz_start << AF_HZ_START_SHIFT; -+ /* Configure Vertical Start */ -+ paxstart |= isp_af->config.paxel_config.vt_start; -+ isp_reg_writel(dev, paxstart, OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AFPAXSTART); -+ -+ /*SetIIRSH Register */ -+ isp_reg_writel(dev, isp_af->config.iir_config.hz_start_pos, -+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH); -+ -+ base_coef_set0 = ISPH3A_AFCOEF010; -+ base_coef_set1 = ISPH3A_AFCOEF110; -+ for (index = 0; index <= 8; index += 2) { -+ /*Set IIR Filter0 Coefficients */ -+ coef = 0; -+ coef |= isp_af->config.iir_config.coeff_set0[index]; -+ coef |= isp_af->config.iir_config.coeff_set0[index + 1] << -+ AF_COEF_SHIFT; -+ isp_reg_writel(dev, coef, OMAP3_ISP_IOMEM_H3A, -+ base_coef_set0); -+ base_coef_set0 += AFCOEF_OFFSET; -+ -+ /*Set IIR Filter1 Coefficients */ -+ coef = 0; -+ coef |= isp_af->config.iir_config.coeff_set1[index]; -+ coef |= isp_af->config.iir_config.coeff_set1[index + 1] << -+ AF_COEF_SHIFT; -+ isp_reg_writel(dev, coef, OMAP3_ISP_IOMEM_H3A, -+ base_coef_set1); -+ base_coef_set1 += AFCOEF_OFFSET; -+ } -+ /* set AFCOEF0010 Register */ -+ isp_reg_writel(dev, isp_af->config.iir_config.coeff_set0[10], -+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010); -+ /* set AFCOEF1010 Register */ -+ isp_reg_writel(dev, isp_af->config.iir_config.coeff_set1[10], -+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010); -+ -+ /* PCR Register */ -+ /* Set Accumulator Mode */ -+ if (isp_af->config.mode == ACCUMULATOR_PEAK) -+ pcr |= FVMODE; -+ /* Set A-law */ -+ if (isp_af->config.alaw_enable == H3A_AF_ALAW_ENABLE) -+ pcr |= AF_ALAW_EN; -+ /* Set RGB Position */ -+ pcr |= isp_af->config.rgb_pos << AF_RGBPOS_SHIFT; -+ /* HMF Configurations */ -+ if (isp_af->config.hmf_config.enable == H3A_AF_HMF_ENABLE) { -+ /* Enable HMF */ -+ pcr |= AF_MED_EN; -+ /* Set Median Threshold */ -+ pcr |= isp_af->config.hmf_config.threshold << AF_MED_TH_SHIFT; -+ } -+ /* Set PCR Register */ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, -+ ~AF_PCR_MASK, pcr); -+ -+ isp_af->update = 0; -+ isp_af->stat.config_counter++; -+ ispstat_bufs_set_size(&isp_af->stat, isp_af->buf_size); -+ -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+} -+ -+/* Update local parameters */ -+static void isp_af_update_params(struct isp_af_device *isp_af, -+ struct af_configuration *afconfig) -+{ -+ int update = 0; -+ int index; -+ -+ /* alaw */ -+ if (isp_af->config.alaw_enable != afconfig->alaw_enable) { -+ update = 1; -+ goto out; -+ } -+ -+ /* hmf */ -+ if (isp_af->config.hmf_config.enable != afconfig->hmf_config.enable) { -+ update = 1; -+ goto out; -+ } -+ if (isp_af->config.hmf_config.threshold != -+ afconfig->hmf_config.threshold) { -+ update = 1; -+ goto out; -+ } -+ -+ /* rgbpos */ -+ if (isp_af->config.rgb_pos != afconfig->rgb_pos) { -+ update = 1; -+ goto out; -+ } -+ -+ /* iir */ -+ if (isp_af->config.iir_config.hz_start_pos != -+ afconfig->iir_config.hz_start_pos) { -+ update = 1; -+ goto out; -+ } -+ for (index = 0; index < AF_NUMBER_OF_COEF; index++) { -+ if (isp_af->config.iir_config.coeff_set0[index] != -+ afconfig->iir_config.coeff_set0[index]) { -+ update = 1; -+ goto out; -+ } -+ if (isp_af->config.iir_config.coeff_set1[index] != -+ afconfig->iir_config.coeff_set1[index]) { -+ update = 1; -+ goto out; -+ } -+ } -+ -+ /* paxel */ -+ if ((isp_af->config.paxel_config.width != -+ afconfig->paxel_config.width) || -+ (isp_af->config.paxel_config.height != -+ afconfig->paxel_config.height) || -+ (isp_af->config.paxel_config.hz_start != -+ afconfig->paxel_config.hz_start) || -+ (isp_af->config.paxel_config.vt_start != -+ afconfig->paxel_config.vt_start) || -+ (isp_af->config.paxel_config.hz_cnt != -+ afconfig->paxel_config.hz_cnt) || -+ (isp_af->config.paxel_config.line_incr != -+ afconfig->paxel_config.line_incr)) { -+ update = 1; -+ goto out; -+ } -+ -+ /* af_mode */ -+ if (isp_af->config.mode != afconfig->mode) { -+ update = 1; -+ goto out; -+ } -+ -+ isp_af->config.af_config = afconfig->af_config; -+ -+out: -+ if (update) { -+ memcpy(&isp_af->config, afconfig, sizeof(*afconfig)); -+ isp_af->update = 1; -+ } -+} -+ -+void isp_af_try_enable(struct isp_af_device *isp_af) -+{ -+ unsigned long irqflags; -+ -+ if (!isp_af->config.af_config) -+ return; -+ -+ spin_lock_irqsave(isp_af->lock, irqflags); -+ if (unlikely(!isp_af->enabled && isp_af->config.af_config)) { -+ isp_af->update = 1; -+ isp_af->buf_next = ispstat_buf_next(&isp_af->stat); -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+ isp_af_config_registers(isp_af); -+ isp_af_enable(isp_af, 1); -+ } else -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+} -+ -+/* Function to perform hardware set up */ -+int isp_af_config(struct isp_af_device *isp_af, -+ struct af_configuration *afconfig) -+{ -+ struct device *dev = to_device(isp_af); -+ int result; -+ int buf_size; -+ unsigned long irqflags; -+ -+ if (!afconfig) { -+ dev_dbg(dev, "af: Null argument in configuration.\n"); -+ return -EINVAL; -+ } -+ -+ /* Check Parameters */ -+ spin_lock_irqsave(isp_af->lock, irqflags); -+ result = isp_af_check_params(isp_af, afconfig); -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+ if (result) { -+ dev_dbg(dev, "af: wrong configure params received.\n"); -+ return result; -+ } -+ -+ /* Compute buffer size */ -+ buf_size = (afconfig->paxel_config.hz_cnt + 1) * -+ (afconfig->paxel_config.vt_cnt + 1) * AF_PAXEL_SIZE; -+ -+ result = ispstat_bufs_alloc(&isp_af->stat, buf_size, 0); -+ if (result) -+ return result; -+ -+ spin_lock_irqsave(isp_af->lock, irqflags); -+ isp_af->buf_size = buf_size; -+ isp_af_update_params(isp_af, afconfig); -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+ -+ /* Success */ -+ return 0; -+} -+EXPORT_SYMBOL(isp_af_config); -+ -+/* -+ * This API allows the user to update White Balance gains, as well as -+ * exposure time and analog gain. It is also used to request frame -+ * statistics. -+ */ -+int isp_af_request_statistics(struct isp_af_device *isp_af, -+ struct isp_af_data *afdata) -+{ -+ struct device *dev = to_device(isp_af); -+ struct ispstat_buffer *buf; -+ -+ if (!isp_af->config.af_config) { -+ dev_dbg(dev, "af: statistics requested while af engine" -+ " is not configured\n"); -+ return -EINVAL; -+ } -+ -+ if (afdata->update & REQUEST_STATISTICS) { -+ buf = ispstat_buf_get(&isp_af->stat, -+ (void *)afdata->af_statistics_buf, -+ afdata->frame_number); -+ if (IS_ERR(buf)) -+ return PTR_ERR(buf); -+ -+ afdata->xtrastats.ts = buf->ts; -+ afdata->config_counter = buf->config_counter; -+ afdata->frame_number = buf->frame_number; -+ -+ ispstat_buf_release(&isp_af->stat); -+ } -+ -+ afdata->curr_frame = isp_af->stat.frame_number; -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_af_request_statistics); -+ -+/* This function will handle the AF buffer. */ -+int isp_af_buf_process(struct isp_af_device *isp_af) -+{ -+ if (likely(!isp_af->buf_err && isp_af->config.af_config)) { -+ int ret; -+ -+ ret = ispstat_buf_queue(&isp_af->stat); -+ isp_af->buf_next = ispstat_buf_next(&isp_af->stat); -+ return ret; -+ } else { -+ isp_af->buf_err = 0; -+ return -1; -+ } -+} -+ -+static void __isp_af_enable(struct isp_af_device *isp_af, int enable) -+{ -+ struct device *dev = to_device(isp_af); -+ unsigned int pcr; -+ -+ pcr = isp_reg_readl(dev, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR); -+ -+ /* Set AF_EN bit in PCR Register */ -+ if (enable) -+ pcr |= AF_EN; -+ else -+ pcr &= ~AF_EN; -+ -+ isp_reg_writel(dev, pcr, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR); -+} -+ -+/* Function to Enable/Disable AF Engine */ -+void isp_af_enable(struct isp_af_device *isp_af, int enable) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(isp_af->lock, irqflags); -+ -+ if (!isp_af->config.af_config && enable) { -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+ return; -+ } -+ -+ __isp_af_enable(isp_af, enable); -+ isp_af->enabled = enable; -+ -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+} -+ -+/* Function to Suspend AF Engine */ -+void isp_af_suspend(struct isp_af_device *isp_af) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(isp_af->lock, irqflags); -+ if (isp_af->enabled) -+ __isp_af_enable(isp_af, 0); -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+} -+ -+/* Function to Resume AF Engine */ -+void isp_af_resume(struct isp_af_device *isp_af) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(isp_af->lock, irqflags); -+ if (isp_af->enabled) -+ __isp_af_enable(isp_af, 1); -+ spin_unlock_irqrestore(isp_af->lock, irqflags); -+} -+ -+int isp_af_busy(struct isp_af_device *isp_af) -+{ -+ struct device *dev = to_device(isp_af); -+ -+ return isp_reg_readl(dev, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR) -+ & ISPH3A_PCR_BUSYAF; -+} -+ -+/* Function to register the AF character device driver. */ -+int __init isp_af_init(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_af_device *isp_af = &isp->isp_af; -+ -+ isp_af->lock = &isp->h3a_lock; -+ ispstat_init(dev, "AF", &isp_af->stat, H3A_MAX_BUFF, MAX_FRAME_COUNT); -+ -+ return 0; -+} -+ -+void isp_af_exit(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ /* Free buffers */ -+ ispstat_free(&isp->isp_af.stat); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isp_af.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp_af.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isp_af.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp_af.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,138 @@ -+/* -+ * isp_af.h -+ * -+ * Include file for AF module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Sergio Aguirre -+ * Troy Laramy -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+/* Device Constants */ -+#ifndef OMAP_ISP_AF_H -+#define OMAP_ISP_AF_H -+ -+#include -+ -+#include "isph3a.h" -+#include "ispstat.h" -+ -+#define AF_MAJOR_NUMBER 0 -+#define ISPAF_NAME "OMAPISP_AF" -+#define AF_NR_DEVS 1 -+#define AF_TIMEOUT ((300 * HZ) / 1000) -+ -+ -+ -+/* Print Macros */ -+/*list of error code */ -+#define AF_ERR_HZ_COUNT 800 /* Invalid Horizontal Count */ -+#define AF_ERR_VT_COUNT 801 /* Invalid Vertical Count */ -+#define AF_ERR_HEIGHT 802 /* Invalid Height */ -+#define AF_ERR_WIDTH 803 /* Invalid width */ -+#define AF_ERR_INCR 804 /* Invalid Increment */ -+#define AF_ERR_HZ_START 805 /* Invalid horizontal Start */ -+#define AF_ERR_VT_START 806 /* Invalud vertical Start */ -+#define AF_ERR_IIRSH 807 /* Invalid IIRSH value */ -+#define AF_ERR_IIR_COEF 808 /* Invalid Coefficient */ -+#define AF_ERR_SETUP 809 /* Setup not done */ -+#define AF_ERR_THRESHOLD 810 /* Invalid Threshold */ -+#define AF_ERR_ENGINE_BUSY 811 /* Engine is busy */ -+ -+#define AFPID 0x0 /* Peripheral Revision -+ * and Class Information -+ */ -+ -+#define AFCOEF_OFFSET 0x00000004 /* COEFFICIENT BASE -+ * ADDRESS -+ */ -+ -+/* -+ * PCR fields -+ */ -+#define AF_BUSYAF (1 << 15) -+#define FVMODE (1 << 14) -+#define RGBPOS (0x7 << 11) -+#define MED_TH (0xFF << 3) -+#define AF_MED_EN (1 << 2) -+#define AF_ALAW_EN (1 << 1) -+#define AF_EN (1 << 0) -+#define AF_PCR_MASK (FVMODE | RGBPOS | MED_TH | \ -+ AF_MED_EN | AF_ALAW_EN) -+ -+/* -+ * AFPAX1 fields -+ */ -+#define PAXW (0x7F << 16) -+#define PAXH 0x7F -+ -+/* -+ * AFPAX2 fields -+ */ -+#define AFINCV (0xF << 13) -+#define PAXVC (0x7F << 6) -+#define PAXHC 0x3F -+ -+/* -+ * AFPAXSTART fields -+ */ -+#define PAXSH (0xFFF<<16) -+#define PAXSV 0xFFF -+ -+/* -+ * COEFFICIENT MASK -+ */ -+ -+#define COEF_MASK0 0xFFF -+#define COEF_MASK1 (0xFFF<<16) -+ -+/* BIT SHIFTS */ -+#define AF_RGBPOS_SHIFT 11 -+#define AF_MED_TH_SHIFT 3 -+#define AF_PAXW_SHIFT 16 -+#define AF_LINE_INCR_SHIFT 13 -+#define AF_VT_COUNT_SHIFT 6 -+#define AF_HZ_START_SHIFT 16 -+#define AF_COEF_SHIFT 16 -+ -+#define AF_UPDATEXS_TS (1 << 0) -+#define AF_UPDATEXS_FIELDCOUNT (1 << 1) -+#define AF_UPDATEXS_LENSPOS (1 << 2) -+ -+/** -+ * struct isp_af_status - AF status. -+ * @update: 1 - Update registers. -+ */ -+struct isp_af_device { -+ u8 update; -+ u8 buf_err; -+ int enabled; -+ unsigned int buf_size; -+ struct ispstat stat; -+ struct af_configuration config; /*Device configuration structure */ -+ struct ispstat_buffer *buf_next; -+ spinlock_t *lock; -+}; -+ -+int isp_af_buf_process(struct isp_af_device *isp_af); -+void isp_af_enable(struct isp_af_device *, int); -+void isp_af_try_enable(struct isp_af_device *isp_af); -+void isp_af_suspend(struct isp_af_device *); -+void isp_af_resume(struct isp_af_device *); -+int isp_af_busy(struct isp_af_device *); -+void isp_af_config_registers(struct isp_af_device *isp_af); -+int isp_af_request_statistics(struct isp_af_device *, -+ struct isp_af_data *afdata); -+int isp_af_config(struct isp_af_device *, struct af_configuration *afconfig); -+ -+#endif /* OMAP_ISP_AF_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isp.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isp.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,2542 @@ -+/* -+ * isp.c -+ * -+ * Driver Library for ISP Control module in TI's OMAP3 Camera ISP -+ * ISP interface and IRQ related APIs are defined here. -+ * -+ * Copyright (C) 2009 Texas Instruments. -+ * Copyright (C) 2009 Nokia. -+ * -+ * Contributors: -+ * Sameer Venkatraman -+ * Mohit Jalori -+ * Sergio Aguirre -+ * Sakari Ailus -+ * Tuukka Toivonen -+ * Toni Leinonen -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "ispccdc.h" -+#include "isph3a.h" -+#include "isphist.h" -+#include "isp_af.h" -+#include "isppreview.h" -+#include "ispresizer.h" -+#include "ispcsi2.h" -+ -+static struct platform_device *omap3isp_pdev; -+ -+static void isp_save_ctx(struct device *dev); -+ -+static void isp_restore_ctx(struct device *dev); -+ -+static void isp_buf_init(struct device *dev); -+ -+/* List of image formats supported via OMAP ISP */ -+const static struct v4l2_fmtdesc isp_formats[] = { -+ { -+ .description = "UYVY, packed", -+ .pixelformat = V4L2_PIX_FMT_UYVY, -+ }, -+ { -+ .description = "YUYV (YUV 4:2:2), packed", -+ .pixelformat = V4L2_PIX_FMT_YUYV, -+ }, -+ { -+ .description = "Bayer10 (GrR/BGb)", -+ .pixelformat = V4L2_PIX_FMT_SGRBG10, -+ }, -+}; -+ -+/** -+ * struct vcontrol - Video control structure. -+ * @qc: V4L2 Query control structure. -+ * @current_value: Current value of the control. -+ */ -+static struct vcontrol { -+ struct v4l2_queryctrl qc; -+ int current_value; -+} video_control[] = { -+ { -+ { -+ .id = V4L2_CID_BRIGHTNESS, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Brightness", -+ .minimum = ISPPRV_BRIGHT_LOW, -+ .maximum = ISPPRV_BRIGHT_HIGH, -+ .step = ISPPRV_BRIGHT_STEP, -+ .default_value = ISPPRV_BRIGHT_DEF, -+ }, -+ .current_value = ISPPRV_BRIGHT_DEF, -+ }, -+ { -+ { -+ .id = V4L2_CID_CONTRAST, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Contrast", -+ .minimum = ISPPRV_CONTRAST_LOW, -+ .maximum = ISPPRV_CONTRAST_HIGH, -+ .step = ISPPRV_CONTRAST_STEP, -+ .default_value = ISPPRV_CONTRAST_DEF, -+ }, -+ .current_value = ISPPRV_CONTRAST_DEF, -+ }, -+ { -+ { -+ .id = V4L2_CID_COLORFX, -+ .type = V4L2_CTRL_TYPE_MENU, -+ .name = "Color Effects", -+ .minimum = V4L2_COLORFX_NONE, -+ .maximum = V4L2_COLORFX_SEPIA, -+ .step = 1, -+ .default_value = V4L2_COLORFX_NONE, -+ }, -+ .current_value = V4L2_COLORFX_NONE, -+ } -+}; -+ -+static struct v4l2_querymenu video_menu[] = { -+ { -+ .id = V4L2_CID_COLORFX, -+ .index = 0, -+ .name = "None", -+ }, -+ { -+ .id = V4L2_CID_COLORFX, -+ .index = 1, -+ .name = "B&W", -+ }, -+ { -+ .id = V4L2_CID_COLORFX, -+ .index = 2, -+ .name = "Sepia", -+ }, -+}; -+ -+/* Structure for saving/restoring ISP module registers */ -+static struct isp_reg isp_reg_list[] = { -+ {OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_GRESET_LENGTH, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_PSTRB_REPLAY, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_FRAME, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_PSTRB_DELAY, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_STRB_DELAY, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_SHUT_DELAY, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_PSTRB_LENGTH, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_STRB_LENGTH, 0}, -+ {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_SHUT_LENGTH, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF_SYSCONFIG, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF_IRQENABLE, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_CTRL, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_CTRL, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_START, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_START, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_END, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_END, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_WINDOWSIZE, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_WINDOWSIZE, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF0_THRESHOLD, 0}, -+ {OMAP3_ISP_IOMEM_CBUFF, ISP_CBUFF1_THRESHOLD, 0}, -+ {0, ISP_TOK_TERM, 0} -+}; -+ -+void isp_flush(struct device *dev) -+{ -+ isp_reg_writel(dev, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION); -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION); -+} -+ -+/* -+ * -+ * V4L2 Handling -+ * -+ */ -+ -+/** -+ * find_vctrl - Returns the index of the ctrl array of the requested ctrl ID. -+ * @id: Requested control ID. -+ * -+ * Returns 0 if successful, -EINVAL if not found, or -EDOM if its out of -+ * domain. -+ **/ -+static int find_vctrl(int id) -+{ -+ int i; -+ -+ if (id < V4L2_CID_BASE) -+ return -EDOM; -+ -+ for (i = (ARRAY_SIZE(video_control) - 1); i >= 0; i--) -+ if (video_control[i].qc.id == id) -+ break; -+ -+ if (i < 0) -+ i = -EINVAL; -+ -+ return i; -+} -+ -+static int find_next_vctrl(int id) -+{ -+ int i; -+ u32 best = (u32)-1; -+ -+ for (i = 0; i < ARRAY_SIZE(video_control); i++) { -+ if (video_control[i].qc.id > id && -+ (best == (u32)-1 || -+ video_control[i].qc.id < -+ video_control[best].qc.id)) { -+ best = i; -+ } -+ } -+ -+ if (best == (u32)-1) -+ return -EINVAL; -+ -+ return best; -+} -+ -+/** -+ * find_vmenu - Returns index of the menu array of the requested ctrl option. -+ * @id: Requested control ID. -+ * @index: Requested menu option index. -+ * -+ * Returns 0 if successful, -EINVAL if not found, or -EDOM if its out of -+ * domain. -+ **/ -+static int find_vmenu(int id, int index) -+{ -+ int i; -+ -+ if (id < V4L2_CID_BASE) -+ return -EDOM; -+ -+ for (i = (ARRAY_SIZE(video_menu) - 1); i >= 0; i--) { -+ if (video_menu[i].id != id || video_menu[i].index != index) -+ continue; -+ return i; -+ } -+ -+ return -EINVAL; -+} -+ -+/** -+ * isp_release_resources - Free ISP submodules -+ **/ -+static void isp_release_resources(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ if (isp->pipeline.modules & OMAP_ISP_CCDC) -+ ispccdc_free(&isp->isp_ccdc); -+ -+ if (isp->pipeline.modules & OMAP_ISP_PREVIEW) -+ isppreview_free(&isp->isp_prev); -+ -+ if (isp->pipeline.modules & OMAP_ISP_RESIZER) -+ ispresizer_free(&isp->isp_res); -+ return; -+} -+ -+static int isp_wait(struct device *dev, int (*busy)(void *), int wait_for_busy, -+ int max_wait, void *priv) -+{ -+ int wait = 0; -+ -+ if (max_wait == 0) -+ max_wait = 10000; /* 10 ms */ -+ -+ while ((wait_for_busy && !busy(priv)) -+ || (!wait_for_busy && busy(priv))) { -+ rmb(); -+ udelay(1); -+ wait++; -+ if (wait > max_wait) -+ return -EBUSY; -+ } -+ DPRINTK_ISPCTRL(KERN_ALERT "%s: wait %d\n", __func__, wait); -+ -+ return 0; -+} -+ -+static int ispccdc_sbl_wait_idle(struct isp_ccdc_device *isp_ccdc, int max_wait) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ return isp_wait(dev, ispccdc_sbl_busy, 0, max_wait, isp_ccdc); -+} -+ -+static void isp_enable_interrupts(struct device *dev, int is_raw) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ u32 irq0enable; -+ -+ irq0enable = IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ -+ | IRQ0ENABLE_CCDC_VD0_IRQ | IRQ0ENABLE_HS_VS_IRQ -+ | IRQ0ENABLE_CSIA_IRQ -+ | IRQ0ENABLE_CSIB_IRQ | IRQ0ENABLE_HIST_DONE_IRQ -+ | IRQ0ENABLE_H3A_AWB_DONE_IRQ | IRQ0ENABLE_H3A_AF_DONE_IRQ -+ | isp->interrupts; -+ -+ if (!is_raw) -+ irq0enable |= IRQ0ENABLE_PRV_DONE_IRQ | IRQ0ENABLE_RSZ_DONE_IRQ; -+ -+ isp_reg_writel(dev, -1, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); -+ isp_reg_writel(dev, irq0enable, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE); -+ -+ return; -+} -+ -+static void isp_disable_interrupts(struct device *dev) -+{ -+ isp_reg_writel(dev, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE); -+} -+ -+/** -+ * isp_set_callback - Sets the callback for the ISP module done events. -+ * @type: Type of the event for which callback is requested. -+ * @callback: Method to be called as callback in the ISR context. -+ * @arg1: First argument to be passed when callback is called in ISR. -+ * @arg2: Second argument to be passed when callback is called in ISR. -+ * -+ * This function sets a callback function for a done event in the ISP -+ * module, and enables the corresponding interrupt. -+ **/ -+int isp_set_callback(struct device *dev, enum isp_callback_type type, -+ isp_callback_t callback, isp_vbq_callback_ptr arg1, -+ void *arg2) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ unsigned long irqflags = 0; -+ -+ if (callback == NULL) { -+ DPRINTK_ISPCTRL("ISP_ERR : Null Callback\n"); -+ return -EINVAL; -+ } -+ -+ spin_lock_irqsave(&isp->lock, irqflags); -+ isp->irq.isp_callbk[type] = callback; -+ isp->irq.isp_callbk_arg1[type] = arg1; -+ isp->irq.isp_callbk_arg2[type] = arg2; -+ spin_unlock_irqrestore(&isp->lock, irqflags); -+ -+ switch (type) { -+ case CBK_PREV_DONE: -+ isp_reg_writel(dev, IRQ0ENABLE_PRV_DONE_IRQ, -+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE, -+ IRQ0ENABLE_PRV_DONE_IRQ); -+ break; -+ default: -+ break; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_set_callback); -+ -+/** -+ * isp_unset_callback - Clears the callback for the ISP module done events. -+ * @type: Type of the event for which callback to be cleared. -+ * -+ * This function clears a callback function for a done event in the ISP -+ * module, and disables the corresponding interrupt. -+ **/ -+int isp_unset_callback(struct device *dev, enum isp_callback_type type) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ unsigned long irqflags = 0; -+ -+ spin_lock_irqsave(&isp->lock, irqflags); -+ isp->irq.isp_callbk[type] = NULL; -+ isp->irq.isp_callbk_arg1[type] = NULL; -+ isp->irq.isp_callbk_arg2[type] = NULL; -+ spin_unlock_irqrestore(&isp->lock, irqflags); -+ -+ switch (type) { -+ case CBK_PREV_DONE: -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE, -+ ~IRQ0ENABLE_PRV_DONE_IRQ); -+ break; -+ default: -+ break; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_unset_callback); -+ -+/** -+ * isp_set_xclk - Configures the specified cam_xclk to the desired frequency. -+ * @xclk: Desired frequency of the clock in Hz. -+ * @xclksel: XCLK to configure (0 = A, 1 = B). -+ * -+ * Configures the specified MCLK divisor in the ISP timing control register -+ * (TCTRL_CTRL) to generate the desired xclk clock value. -+ * -+ * Divisor = CM_CAM_MCLK_HZ / xclk -+ * -+ * Returns the final frequency that is actually being generated -+ **/ -+u32 isp_set_xclk(struct device *dev, u32 xclk, u8 xclksel) -+{ -+ u32 divisor; -+ u32 currentxclk; -+ -+ if (xclk >= CM_CAM_MCLK_HZ) { -+ divisor = ISPTCTRL_CTRL_DIV_BYPASS; -+ currentxclk = CM_CAM_MCLK_HZ; -+ } else if (xclk >= 2) { -+ divisor = CM_CAM_MCLK_HZ / xclk; -+ if (divisor >= ISPTCTRL_CTRL_DIV_BYPASS) -+ divisor = ISPTCTRL_CTRL_DIV_BYPASS - 1; -+ currentxclk = CM_CAM_MCLK_HZ / divisor; -+ } else { -+ divisor = xclk; -+ currentxclk = 0; -+ } -+ -+ switch (xclksel) { -+ case 0: -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, -+ ~ISPTCTRL_CTRL_DIVA_MASK, -+ divisor << ISPTCTRL_CTRL_DIVA_SHIFT); -+ DPRINTK_ISPCTRL("isp_set_xclk(): cam_xclka set to %d Hz\n", -+ currentxclk); -+ break; -+ case 1: -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, -+ ~ISPTCTRL_CTRL_DIVB_MASK, -+ divisor << ISPTCTRL_CTRL_DIVB_SHIFT); -+ DPRINTK_ISPCTRL("isp_set_xclk(): cam_xclkb set to %d Hz\n", -+ currentxclk); -+ break; -+ default: -+ DPRINTK_ISPCTRL("ISP_ERR: isp_set_xclk(): Invalid requested " -+ "xclk. Must be 0 (A) or 1 (B).\n"); -+ return -EINVAL; -+ } -+ -+ return currentxclk; -+} -+EXPORT_SYMBOL(isp_set_xclk); -+ -+/** -+ * isp_power_settings - Sysconfig settings, for Power Management. -+ * @isp_sysconfig: Structure containing the power settings for ISP to configure -+ * -+ * Sets the power settings for the ISP, and SBL bus. -+ **/ -+static void isp_power_settings(struct device *dev, int idle) -+{ -+ if (idle) { -+ isp_reg_writel(dev, -+ (ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY << -+ ISP_SYSCONFIG_MIDLEMODE_SHIFT), -+ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG); -+ if (omap_rev() == OMAP3430_REV_ES1_0) { -+ isp_reg_writel(dev, ISPCSI1_AUTOIDLE | -+ (ISPCSI1_MIDLEMODE_SMARTSTANDBY << -+ ISPCSI1_MIDLEMODE_SHIFT), -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISP_CSIA_SYSCONFIG); -+ isp_reg_writel(dev, ISPCSI1_AUTOIDLE | -+ (ISPCSI1_MIDLEMODE_SMARTSTANDBY << -+ ISPCSI1_MIDLEMODE_SHIFT), -+ OMAP3_ISP_IOMEM_CCP2, -+ ISP_CSIB_SYSCONFIG); -+ } -+ isp_reg_writel(dev, ISPCTRL_SBL_AUTOIDLE, OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL); -+ -+ } else { -+ isp_reg_writel(dev, -+ (ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY << -+ ISP_SYSCONFIG_MIDLEMODE_SHIFT), -+ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG); -+ if (omap_rev() == OMAP3430_REV_ES1_0) { -+ isp_reg_writel(dev, ISPCSI1_AUTOIDLE | -+ (ISPCSI1_MIDLEMODE_FORCESTANDBY << -+ ISPCSI1_MIDLEMODE_SHIFT), -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISP_CSIA_SYSCONFIG); -+ -+ isp_reg_writel(dev, ISPCSI1_AUTOIDLE | -+ (ISPCSI1_MIDLEMODE_FORCESTANDBY << -+ ISPCSI1_MIDLEMODE_SHIFT), -+ OMAP3_ISP_IOMEM_CCP2, -+ ISP_CSIB_SYSCONFIG); -+ } -+ -+ isp_reg_writel(dev, ISPCTRL_SBL_AUTOIDLE, OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL); -+ } -+} -+ -+#define BIT_SET(var, shift, mask, val) \ -+ do { \ -+ var = (var & ~(mask << shift)) \ -+ | (val << shift); \ -+ } while (0) -+ -+static void isp_csi_enable(struct device *dev, u8 enable) -+{ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCP2, ISPCSI1_CTRL, -+ ~(BIT(0) | BIT(4)), -+ enable ? (BIT(0) | BIT(4)) : 0); -+} -+ -+static int isp_init_csi(struct device *dev, struct isp_interface_config *config) -+{ -+ u32 i = 0, val, reg; -+ int format; -+ -+ switch (config->u.csi.format) { -+ case V4L2_PIX_FMT_SGRBG10: -+ format = 0x16; /* RAW10+VP */ -+ break; -+ case V4L2_PIX_FMT_SGRBG10DPCM8: -+ format = 0x12; /* RAW8+DPCM10+VP */ -+ break; -+ default: -+ dev_err(dev, "isp_init_csi: bad csi format\n"); -+ return -EINVAL; -+ } -+ -+ /* Reset the CSI and wait for reset to complete */ -+ isp_reg_writel(dev, isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, -+ ISPCSI1_SYSCONFIG) | BIT(1), -+ OMAP3_ISP_IOMEM_CCP2, ISPCSI1_SYSCONFIG); -+ while (!(isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, ISPCSI1_SYSSTATUS) & -+ BIT(0))) { -+ udelay(10); -+ if (i++ > 10) -+ break; -+ } -+ if (!(isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, ISPCSI1_SYSSTATUS) & -+ BIT(0))) { -+ dev_warn(dev, -+ "omap3_isp: timeout waiting for csi reset\n"); -+ } -+ -+ /* ISPCSI1_CTRL */ -+ val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, ISPCSI1_CTRL); -+ val &= ~BIT(11); /* Enable VP only off -> -+ extract embedded data to interconnect */ -+ BIT_SET(val, 8, 0x3, config->u.csi.vpclk); /* Video port clock */ -+/* val |= BIT(3); */ /* Wait for FEC before disabling interface */ -+ val |= BIT(2); /* I/O cell output is parallel -+ (no effect, but errata says should be enabled -+ for class 1/2) */ -+ val |= BIT(12); /* VP clock polarity to falling edge -+ (needed or bad picture!) */ -+ -+ /* Data/strobe physical layer */ -+ BIT_SET(val, 1, 1, config->u.csi.signalling); -+ BIT_SET(val, 10, 1, config->u.csi.strobe_clock_inv); -+ val |= BIT(4); /* Magic bit to enable CSI1 and strobe mode */ -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_CCP2, ISPCSI1_CTRL); -+ -+ /* ISPCSI1_LCx_CTRL logical channel #0 */ -+ reg = ISPCSI1_LCx_CTRL(0); /* reg = ISPCSI1_CTRL1; */ -+ val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, reg); -+ /* Format = RAW10+VP or RAW8+DPCM10+VP*/ -+ BIT_SET(val, 3, 0x1f, format); -+ /* Enable setting of frame regions of interest */ -+ BIT_SET(val, 1, 1, 1); -+ BIT_SET(val, 2, 1, config->u.csi.crc); -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_CCP2, reg); -+ -+ /* ISPCSI1_DAT_START for logical channel #0 */ -+ reg = ISPCSI1_LCx_DAT_START(0); /* reg = ISPCSI1_DAT_START; */ -+ val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, reg); -+ BIT_SET(val, 16, 0xfff, config->u.csi.data_start); -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_CCP2, reg); -+ -+ /* ISPCSI1_DAT_SIZE for logical channel #0 */ -+ reg = ISPCSI1_LCx_DAT_SIZE(0); /* reg = ISPCSI1_DAT_SIZE; */ -+ val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, reg); -+ BIT_SET(val, 16, 0xfff, config->u.csi.data_size); -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_CCP2, reg); -+ -+ /* Clear status bits for logical channel #0 */ -+ val = ISPCSI1_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_CRC_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_FSP_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_FW_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_FSC_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_SSC_IRQ; -+ -+ /* Clear IRQ status bits for logical channel #0 */ -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_CCP2, -+ ISPCSI1_LC01_IRQSTATUS); -+ -+ /* Enable IRQs for logical channel #0 */ -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCP2, ISPCSI1_LC01_IRQENABLE, val); -+ -+ /* Enable CSI1 */ -+ isp_csi_enable(dev, 1); -+ -+ if (!(isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, -+ ISPCSI1_CTRL) & BIT(4))) { -+ dev_warn(dev, "OMAP3 CSI1 bus not available\n"); -+ if (config->u.csi.signalling) { -+ /* Strobe mode requires CCP2 */ -+ return -EIO; -+ } -+ } -+ -+ return 0; -+} -+ -+/** -+ * isp_configure_interface - Configures ISP Control I/F related parameters. -+ * @config: Pointer to structure containing the desired configuration for the -+ * ISP. -+ * -+ * Configures ISP control register (ISP_CTRL) with the values specified inside -+ * the config structure. Controls: -+ * - Selection of parallel or serial input to the preview hardware. -+ * - Data lane shifter. -+ * - Pixel clock polarity. -+ * - 8 to 16-bit bridge at the input of CCDC module. -+ * - HS or VS synchronization signal detection -+ **/ -+int isp_configure_interface(struct device *dev, -+ struct isp_interface_config *config) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ u32 ispctrl_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); -+ u32 fmtcfg; -+ int r; -+ -+ isp->config = config; -+ -+ ispctrl_val &= ISPCTRL_SHIFT_MASK; -+ ispctrl_val |= config->dataline_shift << ISPCTRL_SHIFT_SHIFT; -+ ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV; -+ -+ ispctrl_val &= ISPCTRL_PAR_SER_CLK_SEL_MASK; -+ -+ isp_buf_init(dev); -+ -+ switch (config->ccdc_par_ser) { -+ case ISP_PARLL: -+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL; -+ ispctrl_val |= config->u.par.par_clk_pol -+ << ISPCTRL_PAR_CLK_POL_SHIFT; -+ ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_BENDIAN; -+ ispctrl_val |= config->u.par.par_bridge -+ << ISPCTRL_PAR_BRIDGE_SHIFT; -+ break; -+ case ISP_CSIA: -+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA; -+ ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_BENDIAN; -+ -+ isp_csi2_ctx_config_format(0, config->u.csi.format); -+ isp_csi2_ctx_update(0, false); -+ -+ if (config->u.csi.crc) -+ isp_csi2_ctrl_config_ecc_enable(true); -+ -+ isp_csi2_ctrl_config_vp_out_ctrl(config->u.csi.vpclk); -+ isp_csi2_ctrl_config_vp_only_enable(true); -+ isp_csi2_ctrl_config_vp_clk_enable(true); -+ isp_csi2_ctrl_update(false); -+ -+ isp_csi2_irq_complexio1_set(1); -+ isp_csi2_irq_status_set(1); -+ -+ isp_csi2_enable(1); -+ mdelay(3); -+ break; -+ case ISP_CSIB: -+ ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB; -+ r = isp_init_csi(dev, config); -+ if (r) -+ return r; -+ break; -+ case ISP_NONE: -+ return 0; -+ default: -+ return -EINVAL; -+ } -+ -+ ispctrl_val &= ~ISPCTRL_SYNC_DETECT_VSRISE; -+ ispctrl_val |= config->hsvs_syncdetect; -+ -+ isp_reg_writel(dev, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); -+ -+ /* Set sensor specific fields in CCDC and Previewer module. */ -+ isppreview_set_skip(&isp->isp_prev, config->prev_sph, config->prev_slv); -+ ispccdc_set_wenlog(&isp->isp_ccdc, config->wenlog); -+ -+ /* FIXME: this should be set in ispccdc_config_vp() */ -+ fmtcfg = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); -+ fmtcfg &= ISPCCDC_FMTCFG_VPIF_FRQ_MASK; -+ if (config->pixelclk) { -+ unsigned long l3_ick = clk_get_rate(isp->l3_ick); -+ unsigned long div = l3_ick / config->pixelclk; -+ if (div < 2) -+ div = 2; -+ if (div > 6) -+ div = 6; -+ fmtcfg |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT; -+ } -+ isp_reg_writel(dev, fmtcfg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_configure_interface); -+ -+void isp_hist_dma_done(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_irq *irqdis = &isp->irq; -+ -+ isp_hist_enable(&isp->isp_hist, 1); -+ if (ispccdc_busy(&isp->isp_ccdc)) { -+ /* Histogram cannot be enabled in this frame anymore */ -+ isp_hist_enable(&isp->isp_hist, 0); -+ if (isp_hist_busy(&isp->isp_hist)) -+ isp_hist_mark_invalid_buf(&isp->isp_hist); -+ } -+ if (irqdis->isp_callbk[CBK_CATCHALL]) { -+ irqdis->isp_callbk[CBK_CATCHALL]( -+ HIST_DONE, -+ irqdis->isp_callbk_arg1[CBK_CATCHALL], -+ irqdis->isp_callbk_arg2[CBK_CATCHALL]); -+ } -+} -+ -+static void isp_buf_process(struct device *dev, struct isp_bufs *bufs); -+ -+/** -+ * isp_isr - Interrupt Service Routine for Camera ISP module. -+ * @irq: Not used currently. -+ * @ispirq_disp: Pointer to the object that is passed while request_irq is -+ * called. This is the isp->irq object containing info on the -+ * callback. -+ * -+ * Handles the corresponding callback if plugged in. -+ * -+ * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the -+ * IRQ wasn't handled. -+ **/ -+static irqreturn_t isp_isr(int irq, void *_pdev) -+{ -+ struct device *dev = &((struct platform_device *)_pdev)->dev; -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_irq *irqdis = &isp->irq; -+ struct isp_bufs *bufs = &isp->bufs; -+ struct isp_buf *buf; -+ unsigned long flags; -+ u32 irqstatus = 0; -+ u32 sbl_pcr; -+ int wait_hs_vs; -+ int ret; -+ -+ if (isp->running == ISP_STOPPED) -+ return IRQ_NONE; -+ -+ irqstatus = isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); -+ isp_reg_writel(dev, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); -+ -+ if (isp->running == ISP_STOPPING) { -+ isp_flush(dev); -+ return IRQ_HANDLED; -+ } -+ -+ spin_lock_irqsave(&isp->lock, flags); -+ wait_hs_vs = bufs->wait_hs_vs; -+ if (irqstatus & CCDC_VD0 && bufs->wait_hs_vs) -+ bufs->wait_hs_vs--; -+ if (irqstatus & HS_VS && bufs->wait_stats && !bufs->wait_hs_vs) -+ bufs->wait_stats = 0; -+ /* -+ * We need to wait for the first HS_VS interrupt from CCDC. -+ * Otherwise our frame (and everything else) might be bad. -+ */ -+ switch (wait_hs_vs) { -+ case 1: -+ /* -+ * Enable preview for the first time. We just have -+ * missed the start-of-frame so we can do it now. -+ */ -+ if (irqstatus & CCDC_VD0) { -+ isp_af_try_enable(&isp->isp_af); -+ isph3a_aewb_try_enable(&isp->isp_h3a); -+ isp_hist_try_enable(&isp->isp_hist); -+ if (!RAW_CAPTURE(isp) && -+ !(isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_PCR) & -+ (ISPPRV_PCR_BUSY | ISPPRV_PCR_EN))) { -+ isppreview_config_shadow_registers( -+ &isp->isp_prev); -+ isppreview_enable(&isp->isp_prev, 1); -+ } -+ } -+ default: -+ /* -+ * For some sensors (like stingray), after a _restart_ -+ * from sw standby state, starting couple of frames -+ * are erroneous. From stingray datasheet: -+ * "When sensor restarts, Normal image can get 2 frames after" -+ * -+ * So while we wait for HS_VS, check cnd clear the CSIB -+ * error interrupts, if any -+ */ -+ if (irqstatus & IRQ0STATUS_CSIB_IRQ) { -+ u32 csib; -+ -+ csib = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, -+ ISPCSI1_LC01_IRQSTATUS); -+ isp_reg_writel(dev, csib, OMAP3_ISP_IOMEM_CCP2, -+ ISPCSI1_LC01_IRQSTATUS); -+ } -+ -+ goto out_ignore_buff; -+ case 0: -+ if (bufs->wait_stats) { -+ if (irqstatus & (H3A_AWB_DONE | H3A_AF_DONE)) -+ irqstatus &= ~(H3A_AWB_DONE | H3A_AF_DONE); -+ if (irqstatus & HIST_DONE) -+ isp_hist_mark_invalid_buf(&isp->isp_hist); -+ } -+ -+ break; -+ } -+ -+ buf = ISP_BUF_DONE(bufs); -+ -+ if (irqstatus & LSC_PRE_ERR) { -+ /* Mark buffer faulty. */ -+ buf->vb_state = VIDEOBUF_ERROR; -+ ispccdc_lsc_error_handler(&isp->isp_ccdc); -+ dev_dbg(dev, "lsc prefetch error\n"); -+ } -+ -+ if (irqstatus & CSIA) { -+ int ret = isp_csi2_isr(); -+ if (ret) -+ buf->vb_state = VIDEOBUF_ERROR; -+ } -+ -+ if (irqstatus & IRQ0STATUS_CSIB_IRQ) { -+ static const u32 ISPCSI1_LC01_ERROR = -+ ISPCSI1_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_CRC_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_FSP_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_FW_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_FSC_IRQ | -+ ISPCSI1_LC01_IRQSTATUS_LC0_SSC_IRQ; -+ u32 ispcsi1_irqstatus; -+ -+ ispcsi1_irqstatus = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCP2, -+ ISPCSI1_LC01_IRQSTATUS); -+ isp_reg_writel(dev, ispcsi1_irqstatus, OMAP3_ISP_IOMEM_CCP2, -+ ISPCSI1_LC01_IRQSTATUS); -+ if (ispcsi1_irqstatus & ISPCSI1_LC01_ERROR) { -+ buf->vb_state = VIDEOBUF_ERROR; -+ dev_dbg(dev, "CCP2 err:%x\n", ispcsi1_irqstatus); -+ } -+ } -+ -+ if (irqstatus & RESZ_DONE && !RAW_CAPTURE(isp)) -+ isp_buf_process(dev, bufs); -+ -+ if (irqstatus & CCDC_VD0) { -+ if (RAW_CAPTURE(isp)) -+ isp_buf_process(dev, bufs); -+ if (!ispccdc_busy(&isp->isp_ccdc)) -+ ispccdc_config_shadow_registers(&isp->isp_ccdc); -+ -+ /* Enabling configured statistic modules */ -+ if (!(irqstatus & H3A_AWB_DONE)) -+ isph3a_aewb_try_enable(&isp->isp_h3a); -+ if (!(irqstatus & H3A_AF_DONE)) -+ isp_af_try_enable(&isp->isp_af); -+ if (!(irqstatus & HIST_DONE)) -+ isp_hist_try_enable(&isp->isp_hist); -+ } -+ -+ if (irqstatus & PREV_DONE) { -+ if (irqdis->isp_callbk[CBK_PREV_DONE]) -+ irqdis->isp_callbk[CBK_PREV_DONE]( -+ PREV_DONE, -+ irqdis->isp_callbk_arg1[CBK_PREV_DONE], -+ irqdis->isp_callbk_arg2[CBK_PREV_DONE]); -+ else if (!RAW_CAPTURE(isp)) { -+ if (ispresizer_busy(&isp->isp_res)) { -+ buf->vb_state = VIDEOBUF_ERROR; -+ dev_dbg(dev, "resizer busy.\n"); -+ } else { -+ ispresizer_config_shadow_registers( -+ &isp->isp_res); -+ ispresizer_enable(&isp->isp_res, 1); -+ } -+ if (!ISP_BUFS_IS_EMPTY(bufs)) { -+ isppreview_config_shadow_registers( -+ &isp->isp_prev); -+ isppreview_enable(&isp->isp_prev, 1); -+ } -+ } -+ } -+ -+ /* -+ * Handle shared buffer logic overflows for video buffers. -+ * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored. -+ */ -+ sbl_pcr = isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR) & -+ ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF; -+ isp_reg_writel(dev, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR); -+ if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF -+ | ISPSBL_PCR_RSZ2_WBL_OVF -+ | ISPSBL_PCR_RSZ3_WBL_OVF -+ | ISPSBL_PCR_RSZ4_WBL_OVF -+ | ISPSBL_PCR_PRV_WBL_OVF -+ | ISPSBL_PCR_CCDC_WBL_OVF -+ | ISPSBL_PCR_CSIA_WBL_OVF -+ | ISPSBL_PCR_CSIB_WBL_OVF)) { -+ buf->vb_state = VIDEOBUF_ERROR; -+ isp->isp_af.buf_err = 1; -+ isp->isp_h3a.buf_err = 1; -+ isp_hist_mark_invalid_buf(&isp->isp_hist); -+ dev_dbg(dev, "sbl overflow, sbl_pcr = %8.8x\n", sbl_pcr); -+ } -+ -+ if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF) { -+ dev_dbg(dev, "af: sbl overflow detected.\n"); -+ isp->isp_af.buf_err = 1; -+ } -+ -+ if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF) { -+ dev_dbg(dev, "h3a: sbl overflow detected.\n"); -+ isp->isp_h3a.buf_err = 1; -+ } -+ -+ if (irqstatus & H3A_AWB_DONE) { -+ isph3a_aewb_enable(&isp->isp_h3a, 0); -+ /* If it's busy we can't process this buffer anymore */ -+ if (!isph3a_aewb_busy(&isp->isp_h3a)) { -+ ret = isph3a_aewb_buf_process(&isp->isp_h3a); -+ isph3a_aewb_config_registers(&isp->isp_h3a); -+ } else { -+ ret = -1; -+ dev_dbg(dev, "h3a: cannot process buffer, device is " -+ "busy.\n"); -+ } -+ if (ret) -+ irqstatus &= ~H3A_AWB_DONE; -+ isph3a_aewb_enable(&isp->isp_h3a, 1); -+ } -+ -+ if (irqstatus & H3A_AF_DONE) { -+ isp_af_enable(&isp->isp_af, 0); -+ /* If it's busy we can't process this buffer anymore */ -+ if (!isp_af_busy(&isp->isp_af)) { -+ ret = isp_af_buf_process(&isp->isp_af); -+ isp_af_config_registers(&isp->isp_af); -+ } else { -+ ret = -1; -+ dev_dbg(dev, "af: cannot process buffer, device is " -+ "busy.\n"); -+ } -+ if (ret) -+ irqstatus &= ~H3A_AF_DONE; -+ isp_af_enable(&isp->isp_af, 1); -+ } -+ -+ if (irqstatus & HIST_DONE) { -+ isp_hist_enable(&isp->isp_hist, 0); -+ /* If it's busy we can't process this buffer anymore */ -+ if (!isp_hist_busy(&isp->isp_hist)) { -+ ret = isp_hist_buf_process(&isp->isp_hist); -+ isp_hist_config_registers(&isp->isp_hist); -+ } else { -+ dev_dbg(dev, "hist: cannot process buffer, device is " -+ "busy.\n"); -+ /* current and next buffer might have invalid data */ -+ isp_hist_mark_invalid_buf(&isp->isp_hist); -+ ret = HIST_NO_BUF; -+ } -+ if (ret != HIST_BUF_WAITING_DMA) -+ isp_hist_enable(&isp->isp_hist, 1); -+ if (ret != HIST_BUF_DONE) -+ irqstatus &= ~HIST_DONE; -+ } -+ -+ if (irqdis->isp_callbk[CBK_CATCHALL] && irqstatus) { -+ irqdis->isp_callbk[CBK_CATCHALL]( -+ irqstatus, -+ irqdis->isp_callbk_arg1[CBK_CATCHALL], -+ irqdis->isp_callbk_arg2[CBK_CATCHALL]); -+ } -+ -+out_ignore_buff: -+ spin_unlock_irqrestore(&isp->lock, flags); -+ -+ isp_flush(dev); -+ -+#if 1 -+ { -+ static const struct { -+ int num; -+ char *name; -+ } bits[] = { -+ { 31, "HS_VS_IRQ" }, -+ { 30, "SEC_ERR_IRQ" }, -+ { 29, "OCP_ERR_IRQ" }, -+ { 28, "MMU_ERR_IRQ" }, -+ { 27, "res27" }, -+ { 26, "res26" }, -+ { 25, "OVF_IRQ" }, -+ { 24, "RSZ_DONE_IRQ" }, -+ { 23, "res23" }, -+ { 22, "res22" }, -+ { 21, "CBUFF_IRQ" }, -+ { 20, "PRV_DONE_IRQ" }, -+ { 19, "CCDC_LSC_PREFETCH_ERROR" }, -+ { 18, "CCDC_LSC_PREFETCH_COMPLETED" }, -+ { 17, "CCDC_LSC_DONE" }, -+ { 16, "HIST_DONE_IRQ" }, -+ { 15, "res15" }, -+ { 14, "res14" }, -+ { 13, "H3A_AWB_DONE_IRQ" }, -+ { 12, "H3A_AF_DONE_IRQ" }, -+ { 11, "CCDC_ERR_IRQ" }, -+ { 10, "CCDC_VD2_IRQ" }, -+ { 9, "CCDC_VD1_IRQ" }, -+ { 8, "CCDC_VD0_IRQ" }, -+ { 7, "res7" }, -+ { 6, "res6" }, -+ { 5, "res5" }, -+ { 4, "CSIB_IRQ" }, -+ { 3, "CSIB_LCM_IRQ" }, -+ { 2, "res2" }, -+ { 1, "res1" }, -+ { 0, "CSIA_IRQ" }, -+ }; -+ int i; -+ for (i = 0; i < ARRAY_SIZE(bits); i++) { -+ if ((1 << bits[i].num) & irqstatus) -+ DPRINTK_ISPCTRL("%s ", bits[i].name); -+ } -+ DPRINTK_ISPCTRL("\n"); -+ } -+#endif -+ -+ return IRQ_HANDLED; -+} -+ -+/* Device name, needed for resource tracking layer */ -+struct device_driver camera_drv = { -+ .name = "camera" -+}; -+ -+struct device camera_dev = { -+ .driver = &camera_drv, -+}; -+ -+/** -+ * isp_tmp_buf_free - To free allocated 10MB memory -+ * -+ **/ -+static void isp_tmp_buf_free(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ if (isp->tmp_buf) { -+ iommu_vfree(isp->iommu, isp->tmp_buf); -+ isp->tmp_buf = 0; -+ isp->tmp_buf_size = 0; -+ } -+} -+ -+/** -+ * isp_tmp_buf_alloc - To allocate a 10MB memory -+ * -+ **/ -+static u32 isp_tmp_buf_alloc(struct device *dev, size_t size) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ u32 da; -+ -+ isp_tmp_buf_free(dev); -+ -+ da = iommu_vmalloc(isp->iommu, 0, size, IOMMU_FLAG); -+ if (IS_ERR_VALUE(da)) { -+ dev_err(dev, "iommu_vmap mapping failed\n"); -+ return -ENOMEM; -+ } -+ isp->tmp_buf = da; -+ isp->tmp_buf_size = size; -+ -+ isppreview_set_outaddr(&isp->isp_prev, isp->tmp_buf); -+ ispresizer_set_inaddr(&isp->isp_res, isp->tmp_buf); -+ -+ return 0; -+} -+ -+/** -+ * isp_start - Starts ISP submodule -+ * -+ * Start the needed isp components assuming these components -+ * are configured correctly. -+ **/ -+void isp_start(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ isp->running = ISP_RUNNING; -+ -+ return; -+} -+EXPORT_SYMBOL(isp_start); -+ -+#define ISP_STATISTICS_BUSY \ -+ () -+#define ISP_STOP_TIMEOUT msecs_to_jiffies(1000) -+static int __isp_disable_modules(struct device *dev, int suspend) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ unsigned long timeout = jiffies + ISP_STOP_TIMEOUT; -+ int reset = 0; -+ -+ /* -+ * We need to stop all the modules after CCDC first or they'll -+ * never stop since they may not get a full frame from CCDC. -+ */ -+ if (suspend) { -+ isp_af_suspend(&isp->isp_af); -+ isph3a_aewb_suspend(&isp->isp_h3a); -+ isp_hist_suspend(&isp->isp_hist); -+ } else { -+ isp_af_enable(&isp->isp_af, 0); -+ isph3a_aewb_enable(&isp->isp_h3a, 0); -+ isp_hist_enable(&isp->isp_hist, 0); -+ -+ /* FIXME: find me a better interface */ -+ isp->isp_af.config.af_config = 0; -+ isp->isp_h3a.aewb_config_local.aewb_enable = 0; -+ isp->isp_hist.config.enable = 0; -+ } -+ ispresizer_enable(&isp->isp_res, 0); -+ isppreview_enable(&isp->isp_prev, 0); -+ -+ timeout = jiffies + ISP_STOP_TIMEOUT; -+ while (isp_af_busy(&isp->isp_af) -+ || isph3a_aewb_busy(&isp->isp_h3a) -+ || isp_hist_busy(&isp->isp_hist) -+ || isppreview_busy(&isp->isp_prev) -+ || ispresizer_busy(&isp->isp_res)) { -+ if (time_after(jiffies, timeout)) { -+ dev_info(dev, "can't stop non-ccdc modules.\n"); -+ reset = 1; -+ break; -+ } -+ msleep(1); -+ } -+ -+ /* Let's stop CCDC now. */ -+ ispccdc_enable(&isp->isp_ccdc, 0); -+ -+ timeout = jiffies + ISP_STOP_TIMEOUT; -+ while (ispccdc_busy(&isp->isp_ccdc)) { -+ if (time_after(jiffies, timeout)) { -+ dev_info(dev, "can't stop ccdc module.\n"); -+ reset = 1; -+ break; -+ } -+ msleep(1); -+ } -+ -+ isp_csi_enable(dev, 0); -+ isp_csi2_enable(0); -+ isp_buf_init(dev); -+ -+ return reset; -+} -+ -+static int isp_stop_modules(struct device *dev) -+{ -+ return __isp_disable_modules(dev, 0); -+} -+ -+static int isp_suspend_modules(struct device *dev) -+{ -+ return __isp_disable_modules(dev, 1); -+} -+ -+static void isp_resume_modules(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ isp_hist_resume(&isp->isp_hist); -+ isph3a_aewb_resume(&isp->isp_h3a); -+ isp_af_resume(&isp->isp_af); -+} -+ -+static void isp_reset(struct device *dev) -+{ -+ unsigned long timeout = 0; -+ -+ isp_reg_writel(dev, -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG) -+ | ISP_SYSCONFIG_SOFTRESET, -+ OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG); -+ while (!(isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_SYSSTATUS) & 0x1)) { -+ if (timeout++ > 10000) { -+ dev_alert(dev, "cannot reset ISP\n"); -+ break; -+ } -+ udelay(1); -+ } -+} -+ -+/** -+ * isp_stop - Stops isp submodules -+ **/ -+void isp_stop(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ int reset; -+ -+ isp->running = ISP_STOPPING; -+ isp_disable_interrupts(dev); -+ synchronize_irq(((struct isp_device *)dev_get_drvdata(dev))->irq_num); -+ isp->running = ISP_STOPPED; -+ reset = isp_stop_modules(dev); -+ if (!reset) -+ return; -+ -+ isp_save_ctx(dev); -+ isp_reset(dev); -+ isp_restore_ctx(dev); -+} -+EXPORT_SYMBOL(isp_stop); -+ -+static void isp_set_buf(struct device *dev, struct isp_buf *buf) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ if (isp->pipeline.modules & OMAP_ISP_RESIZER -+ && is_ispresizer_enabled()) -+ ispresizer_set_outaddr(&isp->isp_res, buf->isp_addr); -+ else if (isp->pipeline.modules & OMAP_ISP_CCDC) -+ ispccdc_set_outaddr(&isp->isp_ccdc, buf->isp_addr); -+ -+} -+ -+static int isp_try_pipeline(struct device *dev, -+ struct v4l2_pix_format *pix_input, -+ struct isp_pipeline *pipe) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct v4l2_pix_format *pix_output = &pipe->pix; -+ unsigned int wanted_width = pix_output->width; -+ unsigned int wanted_height = pix_output->height; -+ int ifmt; -+ int rval; -+ -+ if ((pix_input->pixelformat == V4L2_PIX_FMT_SGRBG10 -+ || pix_input->pixelformat == V4L2_PIX_FMT_SGRBG10DPCM8) -+ && pix_output->pixelformat != V4L2_PIX_FMT_SGRBG10) { -+ pipe->modules = OMAP_ISP_CCDC | OMAP_ISP_PREVIEW -+ | OMAP_ISP_RESIZER; -+ pipe->ccdc_in = CCDC_RAW; -+ pipe->ccdc_out = CCDC_OTHERS_VP; -+ } else { -+ pipe->modules = OMAP_ISP_CCDC; -+ if (pix_input->pixelformat == V4L2_PIX_FMT_SGRBG10 -+ || pix_input->pixelformat == V4L2_PIX_FMT_SGRBG10DPCM8) { -+ pipe->ccdc_in = CCDC_RAW; -+ pipe->ccdc_out = CCDC_OTHERS_VP_MEM; -+ } else { -+ pipe->ccdc_in = CCDC_YUV_SYNC; -+ pipe->ccdc_out = CCDC_OTHERS_MEM; -+ } -+ } -+ -+ if (pipe->modules & OMAP_ISP_CCDC) { -+ pipe->ccdc_in_w = pix_input->width; -+ pipe->ccdc_in_h = pix_input->height; -+ rval = ispccdc_try_pipeline(&isp->isp_ccdc, pipe); -+ if (rval) { -+ dev_dbg(dev, "the dimensions %dx%d are not" -+ " supported\n", pix_input->width, -+ pix_input->height); -+ return rval; -+ } -+ pix_output->width = pipe->ccdc_out_w_img; -+ pix_output->height = pipe->ccdc_out_h; -+ pix_output->bytesperline = -+ pipe->ccdc_out_w * ISP_BYTES_PER_PIXEL; -+ } -+ -+ if (pipe->modules & OMAP_ISP_PREVIEW) { -+ rval = isppreview_try_pipeline(&isp->isp_prev, pipe); -+ if (rval) { -+ dev_dbg(dev, "the dimensions %dx%d are not" -+ " supported\n", pix_input->width, -+ pix_input->height); -+ return rval; -+ } -+ pix_output->width = pipe->prv_out_w; -+ pix_output->height = pipe->prv_out_h; -+ } -+ -+ if (pipe->modules & OMAP_ISP_RESIZER) { -+ pipe->rsz_out_w = wanted_width; -+ pipe->rsz_out_h = wanted_height; -+ -+ pipe->rsz_crop.left = pipe->rsz_crop.top = 0; -+ pipe->rsz_crop.width = pipe->prv_out_w_img; -+ pipe->rsz_crop.height = pipe->prv_out_h_img; -+ -+ rval = ispresizer_try_pipeline(&isp->isp_res, pipe); -+ if (rval) { -+ dev_dbg(dev, "The dimensions %dx%d are not" -+ " supported\n", pix_input->width, -+ pix_input->height); -+ return rval; -+ } -+ -+ pix_output->width = pipe->rsz_out_w; -+ pix_output->height = pipe->rsz_out_h; -+ pix_output->bytesperline = -+ pipe->rsz_out_w * ISP_BYTES_PER_PIXEL; -+ } -+ -+ pix_output->field = V4L2_FIELD_NONE; -+ pix_output->sizeimage = -+ PAGE_ALIGN(pix_output->bytesperline * pix_output->height); -+ pix_output->priv = 0; -+ -+ for (ifmt = 0; ifmt < NUM_ISP_CAPTURE_FORMATS; ifmt++) { -+ if (pix_output->pixelformat == isp_formats[ifmt].pixelformat) -+ break; -+ } -+ if (ifmt == NUM_ISP_CAPTURE_FORMATS) -+ pix_output->pixelformat = V4L2_PIX_FMT_YUYV; -+ -+ switch (pix_output->pixelformat) { -+ case V4L2_PIX_FMT_YUYV: -+ case V4L2_PIX_FMT_UYVY: -+ pix_output->colorspace = V4L2_COLORSPACE_JPEG; -+ break; -+ default: -+ pix_output->colorspace = V4L2_COLORSPACE_SRGB; -+ } -+ -+ return 0; -+} -+ -+static int isp_s_pipeline(struct device *dev, -+ struct v4l2_pix_format *pix_input, -+ struct v4l2_pix_format *pix_output) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_pipeline pipe; -+ int rval; -+ -+ isp_release_resources(dev); -+ -+ pipe.pix = *pix_output; -+ -+ rval = isp_try_pipeline(dev, pix_input, &pipe); -+ if (rval) -+ return rval; -+ -+ ispccdc_request(&isp->isp_ccdc); -+ ispccdc_s_pipeline(&isp->isp_ccdc, &pipe); -+ -+ if (pipe.modules & OMAP_ISP_PREVIEW) { -+ isppreview_request(&isp->isp_prev); -+ pipe.prv_in = PRV_RAW_CCDC; -+ pipe.prv_out = PREVIEW_MEM; -+ isppreview_s_pipeline(&isp->isp_prev, &pipe); -+ } -+ -+ if (pipe.modules & OMAP_ISP_RESIZER) { -+ ispresizer_request(&isp->isp_res); -+ pipe.rsz_in = RSZ_MEM_YUV; -+ ispresizer_s_pipeline(&isp->isp_res, &pipe); -+ } -+ -+ isp->pipeline = pipe; -+ *pix_output = isp->pipeline.pix; -+ -+ return 0; -+} -+ -+/** -+ * isp_vbq_sync - keep the video buffers coherent between cpu and isp -+ * -+ * The typical operation required here is Cache Invalidation across -+ * the (user space) buffer address range. And this _must_ be done -+ * at QBUF stage (and *only* at QBUF). -+ * -+ * We try to use optimal cache invalidation function: -+ * - dmac_inv_range: -+ * - used when the number of pages are _low_. -+ * - it becomes quite slow as the number of pages increase. -+ * - for 648x492 viewfinder (150 pages) it takes 1.3 ms. -+ * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms. -+ * -+ * - flush_cache_all: -+ * - used when the number of pages are _high_. -+ * - time taken in the range of 500-900 us. -+ * - has a higher penalty but, as whole dcache + icache is invalidated -+ **/ -+/** -+ * FIXME: dmac_inv_range crashes randomly on the user space buffer -+ * address. Fall back to flush_cache_all for now. -+ */ -+#define ISP_CACHE_FLUSH_PAGES_MAX 0 -+ -+static int isp_vbq_sync(struct videobuf_buffer *vb) -+{ -+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb); -+ -+ if (!vb->baddr || !dma || !dma->nr_pages || -+ dma->nr_pages > ISP_CACHE_FLUSH_PAGES_MAX) -+ flush_cache_all(); -+ else { -+ dmac_inv_range((void *)vb->baddr, -+ (void *)vb->baddr + vb->bsize); -+ outer_inv_range(vb->baddr, vb->baddr + vb->bsize); -+ } -+ -+ return 0; -+} -+ -+static void isp_buf_init(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_bufs *bufs = &isp->bufs; -+ int sg; -+ -+ bufs->queue = 0; -+ bufs->done = 0; -+ bufs->wait_hs_vs = isp->config->wait_hs_vs; -+ bufs->wait_stats = bufs->wait_hs_vs; -+ for (sg = 0; sg < NUM_BUFS; sg++) { -+ if (bufs->buf[sg].vb) { -+ bufs->buf[sg].vb->state = VIDEOBUF_ERROR; -+ bufs->buf[sg].complete(bufs->buf[sg].vb, -+ bufs->buf[sg].priv); -+ } -+ bufs->buf[sg].complete = NULL; -+ bufs->buf[sg].vb = NULL; -+ bufs->buf[sg].priv = NULL; -+ } -+} -+ -+static void isp_buf_process(struct device *dev, struct isp_bufs *bufs) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_buf *buf; -+ int last; -+ -+ if (ISP_BUFS_IS_EMPTY(bufs)) -+ return; -+ -+ if (RAW_CAPTURE(isp)) { -+ ispccdc_enable(&isp->isp_ccdc, 0); -+ if (ispccdc_sbl_wait_idle(&isp->isp_ccdc, 1000)) { -+ ispccdc_enable(&isp->isp_ccdc, 1); -+ dev_info(dev, "ccdc won't become idle!\n"); -+ return; -+ } -+ } -+ -+ /* We had at least one buffer in queue. */ -+ buf = ISP_BUF_DONE(bufs); -+ last = ISP_BUFS_IS_LAST(bufs); -+ -+ if (!last) { -+ /* Set new buffer address. */ -+ isp_set_buf(dev, ISP_BUF_NEXT_DONE(bufs)); -+ if (RAW_CAPTURE(isp)) -+ ispccdc_enable(&isp->isp_ccdc, 1); -+ } else { -+ /* Tell ISP not to write any of our buffers. */ -+ isp_disable_interrupts(dev); -+ } -+ -+ /* Mark the current buffer as done. */ -+ ISP_BUF_MARK_DONE(bufs); -+ -+ DPRINTK_ISPCTRL(KERN_ALERT "%s: finish %d mmu %p\n", __func__, -+ (bufs->done - 1 + NUM_BUFS) % NUM_BUFS, -+ (bufs->buf+((bufs->done - 1 + NUM_BUFS) -+ % NUM_BUFS))->isp_addr); -+ -+ /* -+ * We want to dequeue a buffer from the video buffer -+ * queue. Let's do it! -+ */ -+ buf->vb->state = buf->vb_state; -+ buf->complete(buf->vb, buf->priv); -+ buf->vb = NULL; -+} -+ -+int isp_buf_queue(struct device *dev, struct videobuf_buffer *vb, -+ void (*complete)(struct videobuf_buffer *vb, void *priv), -+ void *priv) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ unsigned long flags; -+ struct isp_buf *buf; -+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb); -+ const struct scatterlist *sglist = dma->sglist; -+ struct isp_bufs *bufs = &isp->bufs; -+ int sglen = dma->sglen; -+ -+ if (isp->running != ISP_RUNNING) { -+ vb->state = VIDEOBUF_ERROR; -+ complete(vb, priv); -+ -+ return 0; -+ } -+ -+ BUG_ON(sglen < 0 || !sglist); -+ -+ isp_vbq_sync(vb); -+ -+ spin_lock_irqsave(&isp->lock, flags); -+ -+ BUG_ON(ISP_BUFS_IS_FULL(bufs)); -+ -+ buf = ISP_BUF_QUEUE(bufs); -+ -+ buf->isp_addr = bufs->isp_addr_capture[vb->i]; -+ buf->complete = complete; -+ buf->vb = vb; -+ buf->priv = priv; -+ buf->vb_state = VIDEOBUF_DONE; -+ buf->vb->state = VIDEOBUF_ACTIVE; -+ -+ if (ISP_BUFS_IS_EMPTY(bufs)) { -+ /* -+ * We must wait for the HS_VS since before that the -+ * CCDC may trigger interrupts even if it's not -+ * receiving a frame. -+ */ -+ bufs->wait_hs_vs++; -+ bufs->wait_stats = 1; -+ isp_enable_interrupts(dev, RAW_CAPTURE(isp)); -+ isp_set_buf(dev, buf); -+ ispccdc_enable(&isp->isp_ccdc, 1); -+ } -+ -+ ISP_BUF_MARK_QUEUED(bufs); -+ -+ spin_unlock_irqrestore(&isp->lock, flags); -+ -+ DPRINTK_ISPCTRL(KERN_ALERT "%s: queue %d vb %d, mmu %p\n", __func__, -+ (bufs->queue - 1 + NUM_BUFS) % NUM_BUFS, vb->i, -+ buf->isp_addr); -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_buf_queue); -+ -+int isp_vbq_setup(struct device *dev, struct videobuf_queue *vbq, -+ unsigned int *cnt, unsigned int *size) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ size_t tmp_size = PAGE_ALIGN(isp->pipeline.prv_out_w -+ * isp->pipeline.prv_out_h -+ * ISP_BYTES_PER_PIXEL); -+ -+ if (isp->pipeline.modules & OMAP_ISP_PREVIEW -+ && isp->tmp_buf_size < tmp_size) -+ return isp_tmp_buf_alloc(dev, tmp_size); -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_vbq_setup); -+ -+dma_addr_t ispmmu_vmap(struct device *dev, const struct scatterlist *sglist, -+ int sglen) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ int err; -+ u32 da; -+ struct sg_table *sgt; -+ unsigned int i; -+ struct scatterlist *sg, *src = (struct scatterlist *)sglist; -+ -+ /* -+ * convert isp sglist to iommu sgt -+ * FIXME: should be fixed in the upper layer? -+ */ -+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); -+ if (!sgt) -+ return -ENOMEM; -+ err = sg_alloc_table(sgt, sglen, GFP_KERNEL); -+ if (err) -+ goto err_sg_alloc; -+ -+ for_each_sg(sgt->sgl, sg, sgt->nents, i) -+ sg_set_buf(sg, phys_to_virt(sg_dma_address(src + i)), -+ sg_dma_len(src + i)); -+ -+ da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG); -+ if (IS_ERR_VALUE(da)) -+ goto err_vmap; -+ -+ return (dma_addr_t)da; -+ -+err_vmap: -+ sg_free_table(sgt); -+err_sg_alloc: -+ kfree(sgt); -+ return -ENOMEM; -+} -+EXPORT_SYMBOL_GPL(ispmmu_vmap); -+ -+void ispmmu_vunmap(struct device *dev, dma_addr_t da) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct sg_table *sgt; -+ -+ sgt = iommu_vunmap(isp->iommu, (u32)da); -+ if (!sgt) -+ return; -+ sg_free_table(sgt); -+ kfree(sgt); -+} -+EXPORT_SYMBOL_GPL(ispmmu_vunmap); -+ -+/** -+ * isp_vbq_prepare - Videobuffer queue prepare. -+ * @vbq: Pointer to videobuf_queue structure. -+ * @vb: Pointer to videobuf_buffer structure. -+ * @field: Requested Field order for the videobuffer. -+ * -+ * Returns 0 if successful, or -EIO if the ispmmu was unable to map a -+ * scatter-gather linked list data space. -+ **/ -+int isp_vbq_prepare(struct device *dev, struct videobuf_queue *vbq, -+ struct videobuf_buffer *vb, enum v4l2_field field) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ unsigned int isp_addr; -+ struct videobuf_dmabuf *vdma; -+ struct isp_bufs *bufs = &isp->bufs; -+ -+ int err = 0; -+ -+ vdma = videobuf_to_dma(vb); -+ -+ isp_addr = ispmmu_vmap(dev, vdma->sglist, vdma->sglen); -+ -+ if (IS_ERR_VALUE(isp_addr)) -+ err = -EIO; -+ else -+ bufs->isp_addr_capture[vb->i] = isp_addr; -+ -+ return err; -+} -+EXPORT_SYMBOL(isp_vbq_prepare); -+ -+/** -+ * isp_vbq_release - Videobuffer queue release. -+ * @vbq: Pointer to videobuf_queue structure. -+ * @vb: Pointer to videobuf_buffer structure. -+ **/ -+void isp_vbq_release(struct device *dev, struct videobuf_queue *vbq, -+ struct videobuf_buffer *vb) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_bufs *bufs = &isp->bufs; -+ -+ ispmmu_vunmap(dev, bufs->isp_addr_capture[vb->i]); -+ bufs->isp_addr_capture[vb->i] = (dma_addr_t)NULL; -+ return; -+} -+EXPORT_SYMBOL(isp_vbq_release); -+ -+/** -+ * isp_queryctrl - Query V4L2 control from existing controls in ISP. -+ * @a: Pointer to v4l2_queryctrl structure. It only needs the id field filled. -+ * -+ * Returns 0 if successful, or -EINVAL if not found in ISP. -+ **/ -+int isp_queryctrl(struct v4l2_queryctrl *a) -+{ -+ int i; -+ -+ if (a->id & V4L2_CTRL_FLAG_NEXT_CTRL) { -+ a->id &= ~V4L2_CTRL_FLAG_NEXT_CTRL; -+ i = find_next_vctrl(a->id); -+ } else { -+ i = find_vctrl(a->id); -+ } -+ -+ if (i < 0) -+ return -EINVAL; -+ -+ *a = video_control[i].qc; -+ return 0; -+} -+EXPORT_SYMBOL(isp_queryctrl); -+ -+/** -+ * isp_queryctrl - Query V4L2 control from existing controls in ISP. -+ * @a: Pointer to v4l2_queryctrl structure. It only needs the id field filled. -+ * -+ * Returns 0 if successful, or -EINVAL if not found in ISP. -+ **/ -+int isp_querymenu(struct v4l2_querymenu *a) -+{ -+ int i; -+ -+ i = find_vmenu(a->id, a->index); -+ -+ if (i < 0) -+ return -EINVAL; -+ -+ *a = video_menu[i]; -+ return 0; -+} -+EXPORT_SYMBOL(isp_querymenu); -+ -+/** -+ * isp_g_ctrl - Gets value of the desired V4L2 control. -+ * @a: V4L2 control to read actual value from. -+ * -+ * Return 0 if successful, or -EINVAL if chosen control is not found. -+ **/ -+int isp_g_ctrl(struct device *dev, struct v4l2_control *a) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ u8 current_value; -+ int rval = 0; -+ -+ if (!isp->ref_count) -+ return -EINVAL; -+ -+ switch (a->id) { -+ case V4L2_CID_BRIGHTNESS: -+ isppreview_query_brightness(&isp->isp_prev, ¤t_value); -+ a->value = current_value / ISPPRV_BRIGHT_UNITS; -+ break; -+ case V4L2_CID_CONTRAST: -+ isppreview_query_contrast(&isp->isp_prev, ¤t_value); -+ a->value = current_value / ISPPRV_CONTRAST_UNITS; -+ break; -+ case V4L2_CID_COLORFX: -+ isppreview_get_color(&isp->isp_prev, ¤t_value); -+ a->value = current_value; -+ break; -+ default: -+ rval = -EINVAL; -+ break; -+ } -+ -+ return rval; -+} -+EXPORT_SYMBOL(isp_g_ctrl); -+ -+/** -+ * isp_s_ctrl - Sets value of the desired V4L2 control. -+ * @a: V4L2 control to read actual value from. -+ * -+ * Return 0 if successful, -EINVAL if chosen control is not found or value -+ * is out of bounds, -EFAULT if copy_from_user or copy_to_user operation fails -+ * from camera abstraction layer related controls or the transfered user space -+ * pointer via the value field is not set properly. -+ **/ -+int isp_s_ctrl(struct device *dev, struct v4l2_control *a) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ int rval = 0; -+ u8 new_value = a->value; -+ -+ if (!isp->ref_count) -+ return -EINVAL; -+ -+ switch (a->id) { -+ case V4L2_CID_BRIGHTNESS: -+ if (a->value > ISPPRV_BRIGHT_HIGH) -+ rval = -EINVAL; -+ else -+ isppreview_update_brightness(&isp->isp_prev, -+ &new_value); -+ break; -+ case V4L2_CID_CONTRAST: -+ if (a->value > ISPPRV_CONTRAST_HIGH) -+ rval = -EINVAL; -+ else -+ isppreview_update_contrast(&isp->isp_prev, &new_value); -+ break; -+ case V4L2_CID_COLORFX: -+ if (a->value > V4L2_COLORFX_SEPIA) -+ rval = -EINVAL; -+ else -+ isppreview_set_color(&isp->isp_prev, &new_value); -+ break; -+ default: -+ rval = -EINVAL; -+ break; -+ } -+ -+ return rval; -+} -+EXPORT_SYMBOL(isp_s_ctrl); -+ -+/** -+ * isp_handle_private - Handle all private ioctls for isp module. -+ * @cmd: ioctl cmd value -+ * @arg: ioctl arg value -+ * -+ * Return 0 if successful, -EINVAL if chosen cmd value is not handled or value -+ * is out of bounds, -EFAULT if ioctl arg value is not valid. -+ * Function simply routes the input ioctl cmd id to the appropriate handler in -+ * the isp module. -+ **/ -+int isp_handle_private(struct device *dev, int cmd, void *arg) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ int rval = 0; -+ -+ if (!isp->ref_count) -+ return -EINVAL; -+ -+ switch (cmd) { -+ case VIDIOC_PRIVATE_ISP_CCDC_CFG: -+ rval = ispccdc_config(&isp->isp_ccdc, arg); -+ break; -+ case VIDIOC_PRIVATE_ISP_PRV_CFG: -+ rval = isppreview_config(&isp->isp_prev, arg); -+ break; -+ case VIDIOC_PRIVATE_ISP_AEWB_CFG: { -+ struct isph3a_aewb_config *params; -+ params = (struct isph3a_aewb_config *)arg; -+ rval = isph3a_aewb_config(&isp->isp_h3a, params); -+ } -+ break; -+ case VIDIOC_PRIVATE_ISP_AEWB_REQ: { -+ struct isph3a_aewb_data *data; -+ data = (struct isph3a_aewb_data *)arg; -+ rval = isph3a_aewb_request_statistics(&isp->isp_h3a, -+ data); -+ } -+ break; -+ case VIDIOC_PRIVATE_ISP_HIST_CFG: { -+ struct isp_hist_config *params; -+ params = (struct isp_hist_config *)arg; -+ rval = isp_hist_config(&isp->isp_hist, params); -+ } -+ break; -+ case VIDIOC_PRIVATE_ISP_HIST_REQ: { -+ struct isp_hist_data *data; -+ data = (struct isp_hist_data *)arg; -+ rval = isp_hist_request_statistics(&isp->isp_hist, -+ data); -+ } -+ break; -+ case VIDIOC_PRIVATE_ISP_AF_CFG: { -+ struct af_configuration *params; -+ params = (struct af_configuration *)arg; -+ rval = isp_af_config(&isp->isp_af, params); -+ -+ } -+ break; -+ case VIDIOC_PRIVATE_ISP_AF_REQ: { -+ struct isp_af_data *data; -+ data = (struct isp_af_data *)arg; -+ rval = isp_af_request_statistics(&isp->isp_af, data); -+ } -+ break; -+ default: -+ rval = -EINVAL; -+ break; -+ } -+ return rval; -+} -+EXPORT_SYMBOL(isp_handle_private); -+ -+/** -+ * isp_enum_fmt_cap - Gets more information of chosen format index and type -+ * @f: Pointer to structure containing index and type of format to read from. -+ * -+ * Returns 0 if successful, or -EINVAL if format index or format type is -+ * invalid. -+ **/ -+int isp_enum_fmt_cap(struct v4l2_fmtdesc *f) -+{ -+ int index = f->index; -+ enum v4l2_buf_type type = f->type; -+ int rval = -EINVAL; -+ -+ if (index >= NUM_ISP_CAPTURE_FORMATS) -+ goto err; -+ -+ memset(f, 0, sizeof(*f)); -+ f->index = index; -+ f->type = type; -+ -+ switch (f->type) { -+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: -+ rval = 0; -+ break; -+ default: -+ goto err; -+ } -+ -+ f->flags = isp_formats[index].flags; -+ strncpy(f->description, isp_formats[index].description, -+ sizeof(f->description)); -+ f->pixelformat = isp_formats[index].pixelformat; -+err: -+ return rval; -+} -+EXPORT_SYMBOL(isp_enum_fmt_cap); -+ -+/** -+ * isp_g_fmt_cap - Gets current output image format. -+ * @f: Pointer to V4L2 format structure to be filled with current output format -+ **/ -+void isp_g_fmt_cap(struct device *dev, struct v4l2_pix_format *pix) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ *pix = isp->pipeline.pix; -+ return; -+} -+EXPORT_SYMBOL(isp_g_fmt_cap); -+ -+/** -+ * isp_s_fmt_cap - Sets I/O formats and crop and configures pipeline in ISP -+ * @f: Pointer to V4L2 format structure to be filled with current output format -+ * -+ * Returns 0 if successful, or return value of either isp_try_size or -+ * isp_try_fmt if there is an error. -+ **/ -+int isp_s_fmt_cap(struct device *dev, struct v4l2_pix_format *pix_input, -+ struct v4l2_pix_format *pix_output) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ if (!isp->ref_count) -+ return -EINVAL; -+ -+ return isp_s_pipeline(dev, pix_input, pix_output); -+} -+EXPORT_SYMBOL(isp_s_fmt_cap); -+ -+/** -+ * isp_g_crop - Gets crop rectangle size and position. -+ * @a: Pointer to V4L2 crop structure to be filled. -+ * -+ * Always returns 0. -+ **/ -+int isp_g_crop(struct device *dev, struct v4l2_crop *crop) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ if (isp->pipeline.modules & OMAP_ISP_RESIZER) { -+ crop->c = isp->pipeline.rsz_crop; -+ } else { -+ crop->c.left = 0; -+ crop->c.top = 0; -+ crop->c.width = isp->pipeline.ccdc_out_w_img; -+ crop->c.height = isp->pipeline.ccdc_out_h; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_g_crop); -+ -+/** -+ * isp_s_crop - Sets crop rectangle size and position and queues crop operation -+ * @a: Pointer to V4L2 crop structure with desired parameters. -+ * @pix: Pointer to V4L2 pixel format structure with desired parameters. -+ * -+ * Returns 0 if successful, or -EINVAL if crop parameters are out of bounds. -+ **/ -+int isp_s_crop(struct device *dev, struct v4l2_crop *a) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_pipeline *pipe = &isp->pipeline; -+ -+ /* -+ * Reset resizer output size. -+ * FIXME: resizer should not touch the output size in the first place, -+ * it should always correspond to the size set by S_FMT or S_FMT -+ * should fail if not possible. If necessary, resizer should adjust -+ * the source rectangle in ispresizer_try_pipeline instead. -+ * When the resizer is fixed, its output size does not need to be -+ * adjusted anymore here. -+ */ -+ pipe->rsz_out_w_img = pipe->pix.width; -+ pipe->rsz_out_w = pipe->pix.width; -+ pipe->rsz_out_h = pipe->pix.height; -+ -+ ispresizer_config_crop(&isp->isp_res, a); -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_s_crop); -+ -+/** -+ * isp_try_fmt_cap - Tries desired input/output image formats -+ * @pix_input: Pointer to V4L2 pixel format structure for input image. -+ * @pix_output: Pointer to V4L2 pixel format structure for output image. -+ * -+ * Returns 0 if successful, or return value of either isp_try_size or -+ * isp_try_fmt if there is an error. -+ **/ -+int isp_try_fmt_cap(struct device *dev, struct v4l2_pix_format *pix_input, -+ struct v4l2_pix_format *pix_output) -+{ -+ struct isp_pipeline pipe; -+ int rval; -+ -+ pipe.pix = *pix_output; -+ -+ rval = isp_try_pipeline(dev, pix_input, &pipe); -+ if (rval) -+ return rval; -+ -+ *pix_output = pipe.pix; -+ -+ return 0; -+} -+EXPORT_SYMBOL(isp_try_fmt_cap); -+ -+/** -+ * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context. -+ * -+ * Routine for saving the context of each module in the ISP. -+ * CCDC, HIST, H3A, PREV, RESZ and MMU. -+ **/ -+static void isp_save_ctx(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ isp_save_context(dev, isp_reg_list); -+ ispccdc_save_context(dev); -+ if (isp->iommu) -+ iommu_save_ctx(isp->iommu); -+ isp_hist_save_context(dev); -+ isph3a_save_context(dev); -+ isppreview_save_context(dev); -+ ispresizer_save_context(dev); -+} -+ -+/** -+ * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context. -+ * -+ * Routine for restoring the context of each module in the ISP. -+ * CCDC, HIST, H3A, PREV, RESZ and MMU. -+ **/ -+static void isp_restore_ctx(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ isp_restore_context(dev, isp_reg_list); -+ ispccdc_restore_context(dev); -+ if (isp->iommu) -+ iommu_restore_ctx(isp->iommu); -+ isp_hist_restore_context(dev); -+ isph3a_restore_context(dev); -+ isppreview_restore_context(dev); -+ ispresizer_restore_context(dev); -+} -+ -+static int isp_enable_clocks(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ int r; -+ -+ r = clk_enable(isp->cam_ick); -+ if (r) { -+ dev_err(dev, "clk_enable cam_ick failed\n"); -+ goto out_clk_enable_ick; -+ } -+ r = clk_set_rate(isp->dpll4_m5_ck, CM_CAM_MCLK_HZ/2); -+ if (r) { -+ dev_err(dev, "clk_set_rate for dpll4_m5_ck failed\n"); -+ goto out_clk_enable_mclk; -+ } -+ r = clk_enable(isp->cam_mclk); -+ if (r) { -+ dev_err(dev, "clk_enable cam_mclk failed\n"); -+ goto out_clk_enable_mclk; -+ } -+ r = clk_enable(isp->csi2_fck); -+ if (r) { -+ dev_err(dev, "clk_enable csi2_fck failed\n"); -+ goto out_clk_enable_csi2_fclk; -+ } -+ return 0; -+ -+out_clk_enable_csi2_fclk: -+ clk_disable(isp->cam_mclk); -+out_clk_enable_mclk: -+ clk_disable(isp->cam_ick); -+out_clk_enable_ick: -+ return r; -+} -+ -+static void isp_disable_clocks(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ clk_disable(isp->cam_ick); -+ clk_disable(isp->cam_mclk); -+ clk_disable(isp->csi2_fck); -+} -+ -+/** -+ * isp_get - Adquires the ISP resource. -+ * -+ * Initializes the clocks for the first acquire. -+ **/ -+struct device *isp_get(void) -+{ -+ struct platform_device *pdev = omap3isp_pdev; -+ struct isp_device *isp; -+ static int has_context; -+ int ret_err = 0; -+ -+ if (!pdev) -+ return NULL; -+ isp = platform_get_drvdata(pdev); -+ -+ DPRINTK_ISPCTRL("isp_get: old %d\n", isp->ref_count); -+ mutex_lock(&(isp->isp_mutex)); -+ if (isp->ref_count == 0) { -+ ret_err = isp_enable_clocks(&pdev->dev); -+ if (ret_err) -+ goto out_err; -+ /* We don't want to restore context before saving it! */ -+ if (has_context) -+ isp_restore_ctx(&pdev->dev); -+ else -+ has_context = 1; -+ } else { -+ mutex_unlock(&isp->isp_mutex); -+ return NULL; -+ } -+ isp->ref_count++; -+ mutex_unlock(&(isp->isp_mutex)); -+ -+ DPRINTK_ISPCTRL("isp_get: new %d\n", isp->ref_count); -+ /* FIXME: ISP should register as v4l2 device to store its priv data */ -+ return &pdev->dev; -+ -+out_err: -+ mutex_unlock(&(isp->isp_mutex)); -+ return NULL; -+} -+EXPORT_SYMBOL(isp_get); -+ -+/** -+ * isp_put - Releases the ISP resource. -+ * -+ * Releases the clocks also for the last release. -+ **/ -+int isp_put(void) -+{ -+ struct platform_device *pdev = omap3isp_pdev; -+ struct isp_device *isp = platform_get_drvdata(pdev); -+ -+ if (!isp) -+ return -EBUSY; -+ -+ DPRINTK_ISPCTRL("isp_put: old %d\n", isp->ref_count); -+ mutex_lock(&(isp->isp_mutex)); -+ if (isp->ref_count) { -+ if (--isp->ref_count == 0) { -+ isp_save_ctx(&pdev->dev); -+ isp_tmp_buf_free(&pdev->dev); -+ isp_release_resources(&pdev->dev); -+ isp_disable_clocks(&pdev->dev); -+ } -+ } -+ mutex_unlock(&(isp->isp_mutex)); -+ DPRINTK_ISPCTRL("isp_put: new %d\n", isp->ref_count); -+ return isp->ref_count; -+} -+EXPORT_SYMBOL(isp_put); -+ -+/** -+ * isp_save_context - Saves the values of the ISP module registers. -+ * @reg_list: Structure containing pairs of register address and value to -+ * modify on OMAP. -+ **/ -+void isp_save_context(struct device *dev, struct isp_reg *reg_list) -+{ -+ struct isp_reg *next = reg_list; -+ -+ for (; next->reg != ISP_TOK_TERM; next++) -+ next->val = isp_reg_readl(dev, next->mmio_range, next->reg); -+} -+ -+/** -+ * isp_restore_context - Restores the values of the ISP module registers. -+ * @reg_list: Structure containing pairs of register address and value to -+ * modify on OMAP. -+ **/ -+void isp_restore_context(struct device *dev, struct isp_reg *reg_list) -+{ -+ struct isp_reg *next = reg_list; -+ -+ for (; next->reg != ISP_TOK_TERM; next++) -+ isp_reg_writel(dev, next->val, next->mmio_range, next->reg); -+} -+ -+static int isp_remove(struct platform_device *pdev) -+{ -+ struct isp_device *isp = platform_get_drvdata(pdev); -+ int i; -+ -+ if (!isp) -+ return 0; -+ -+ isp_csi2_cleanup(&pdev->dev); -+ isp_af_exit(&pdev->dev); -+ isp_resizer_cleanup(&pdev->dev); -+ isp_get(); -+ if (isp->iommu) -+ iommu_put(isp->iommu); -+ isp_put(); -+ isph3a_aewb_cleanup(&pdev->dev); -+ isp_hist_cleanup(&pdev->dev); -+ isp_ccdc_cleanup(&pdev->dev); -+ -+ clk_put(isp->cam_ick); -+ clk_put(isp->cam_mclk); -+ clk_put(isp->dpll4_m5_ck); -+ clk_put(isp->csi2_fck); -+ clk_put(isp->l3_ick); -+ -+ free_irq(isp->irq_num, isp); -+ -+ for (i = 0; i <= OMAP3_ISP_IOMEM_CSI2PHY; i++) { -+ if (isp->mmio_base[i]) { -+ iounmap((void *)isp->mmio_base[i]); -+ isp->mmio_base[i] = 0; -+ } -+ -+ if (isp->mmio_base_phys[i]) { -+ release_mem_region(isp->mmio_base_phys[i], -+ isp->mmio_size[i]); -+ isp->mmio_base_phys[i] = 0; -+ } -+ } -+ -+ omap3isp_pdev = NULL; -+ kfree(isp); -+ -+ return 0; -+} -+ -+#ifdef CONFIG_PM -+ -+static int isp_suspend(struct platform_device *pdev, pm_message_t state) -+{ -+ struct isp_device *isp = platform_get_drvdata(pdev); -+ int reset; -+ -+ DPRINTK_ISPCTRL("isp_suspend: starting\n"); -+ -+ WARN_ON(mutex_is_locked(&isp->isp_mutex)); -+ -+ if (isp->ref_count == 0) -+ goto out; -+ -+ isp_disable_interrupts(&pdev->dev); -+ reset = isp_suspend_modules(&pdev->dev); -+ isp_save_ctx(&pdev->dev); -+ if (reset) -+ isp_reset(&pdev->dev); -+ -+ isp_disable_clocks(&pdev->dev); -+ -+out: -+ DPRINTK_ISPCTRL("isp_suspend: done\n"); -+ -+ return 0; -+} -+ -+static int isp_resume(struct platform_device *pdev) -+{ -+ struct isp_device *isp = platform_get_drvdata(pdev); -+ int ret_err = 0; -+ -+ DPRINTK_ISPCTRL("isp_resume: starting\n"); -+ -+ if (isp->ref_count == 0) -+ goto out; -+ -+ ret_err = isp_enable_clocks(&pdev->dev); -+ if (ret_err) -+ goto out; -+ isp_restore_ctx(&pdev->dev); -+ isp_resume_modules(&pdev->dev); -+ -+out: -+ DPRINTK_ISPCTRL("isp_resume: done \n"); -+ -+ return ret_err; -+} -+ -+#else -+ -+#define isp_suspend NULL -+#define isp_resume NULL -+ -+#endif /* CONFIG_PM */ -+ -+static u64 raw_dmamask = DMA_32BIT_MASK; -+ -+static int isp_probe(struct platform_device *pdev) -+{ -+ struct isp_device *isp; -+ int ret_err = 0; -+ int i; -+ -+ isp = kzalloc(sizeof(*isp), GFP_KERNEL); -+ if (!isp) { -+ dev_err(&pdev->dev, "could not allocate memory\n"); -+ return -ENOMEM; -+ } -+ -+ platform_set_drvdata(pdev, isp); -+ -+ isp->dev = &pdev->dev; -+ -+ for (i = 0; i <= OMAP3_ISP_IOMEM_CSI2PHY; i++) { -+ struct resource *mem; -+ /* request the mem region for the camera registers */ -+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i); -+ if (!mem) { -+ dev_err(isp->dev, "no mem resource?\n"); -+ ret_err = -ENODEV; -+ goto out_free_mmio; -+ } -+ -+ if (!request_mem_region(mem->start, mem->end - mem->start + 1, -+ pdev->name)) { -+ dev_err(isp->dev, -+ "cannot reserve camera register I/O region\n"); -+ ret_err = -ENODEV; -+ goto out_free_mmio; -+ } -+ isp->mmio_base_phys[i] = mem->start; -+ isp->mmio_size[i] = mem->end - mem->start + 1; -+ -+ /* map the region */ -+ isp->mmio_base[i] = (unsigned long) -+ ioremap_nocache(isp->mmio_base_phys[i], -+ isp->mmio_size[i]); -+ if (!isp->mmio_base[i]) { -+ dev_err(isp->dev, -+ "cannot map camera register I/O region\n"); -+ ret_err = -ENODEV; -+ goto out_free_mmio; -+ } -+ } -+ -+ isp->irq_num = platform_get_irq(pdev, 0); -+ if (isp->irq_num <= 0) { -+ dev_err(isp->dev, "no irq for camera?\n"); -+ ret_err = -ENODEV; -+ goto out_free_mmio; -+ } -+ -+ isp->cam_ick = clk_get(&camera_dev, "cam_ick"); -+ if (IS_ERR(isp->cam_ick)) { -+ dev_err(isp->dev, "clk_get cam_ick failed\n"); -+ ret_err = PTR_ERR(isp->cam_ick); -+ goto out_free_mmio; -+ } -+ isp->cam_mclk = clk_get(&camera_dev, "cam_mclk"); -+ if (IS_ERR(isp->cam_mclk)) { -+ dev_err(isp->dev, "clk_get cam_mclk failed\n"); -+ ret_err = PTR_ERR(isp->cam_mclk); -+ goto out_clk_get_mclk; -+ } -+ isp->dpll4_m5_ck = clk_get(&camera_dev, "dpll4_m5_ck"); -+ if (IS_ERR(isp->dpll4_m5_ck)) { -+ dev_err(isp->dev, "clk_get dpll4_m5_ck failed\n"); -+ ret_err = PTR_ERR(isp->dpll4_m5_ck); -+ goto out_clk_get_dpll4_m5_ck; -+ } -+ isp->csi2_fck = clk_get(&camera_dev, "csi2_96m_fck"); -+ if (IS_ERR(isp->csi2_fck)) { -+ dev_err(isp->dev, "clk_get csi2_96m_fck failed\n"); -+ ret_err = PTR_ERR(isp->csi2_fck); -+ goto out_clk_get_csi2_fclk; -+ } -+ isp->l3_ick = clk_get(&camera_dev, "l3_ick"); -+ if (IS_ERR(isp->l3_ick)) { -+ dev_err(isp->dev, "clk_get l3_ick failed\n"); -+ ret_err = PTR_ERR(isp->l3_ick); -+ goto out_clk_get_l3_ick; -+ } -+ -+ if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, -+ "Omap 3 Camera ISP", pdev)) { -+ dev_err(isp->dev, "could not install isr\n"); -+ ret_err = -EINVAL; -+ goto out_request_irq; -+ } -+ -+ isp->ref_count = 0; -+ omap3isp_pdev = pdev; -+ -+ mutex_init(&(isp->isp_mutex)); -+ spin_lock_init(&isp->lock); -+ spin_lock_init(&isp->h3a_lock); -+ -+ isp->dev->dma_mask = &raw_dmamask; -+ isp->dev->coherent_dma_mask = DMA_32BIT_MASK; -+ -+ isp_get(); -+ isp->iommu = iommu_get("isp"); -+ if (IS_ERR(isp->iommu)) { -+ ret_err = PTR_ERR(isp->iommu); -+ isp->iommu = NULL; -+ } -+ isp_put(); -+ if (!isp->iommu) -+ goto out_iommu_get; -+ -+ isp_ccdc_init(&pdev->dev); -+ isp_hist_init(&pdev->dev); -+ isph3a_aewb_init(&pdev->dev); -+ isp_preview_init(&pdev->dev); -+ isp_resizer_init(&pdev->dev); -+ isp_af_init(&pdev->dev); -+ isp_csi2_init(&pdev->dev); -+ -+ isp_get(); -+ isp_power_settings(&pdev->dev, 1); -+ isp_put(); -+ -+ return 0; -+ -+out_iommu_get: -+ free_irq(isp->irq_num, isp); -+ omap3isp_pdev = NULL; -+out_request_irq: -+ clk_put(isp->l3_ick); -+out_clk_get_l3_ick: -+ clk_put(isp->csi2_fck); -+out_clk_get_csi2_fclk: -+ clk_put(isp->dpll4_m5_ck); -+out_clk_get_dpll4_m5_ck: -+ clk_put(isp->cam_mclk); -+out_clk_get_mclk: -+ clk_put(isp->cam_ick); -+out_free_mmio: -+ for (i = 0; i <= OMAP3_ISP_IOMEM_CSI2PHY; i++) { -+ if (isp->mmio_base[i]) { -+ iounmap((void *)isp->mmio_base[i]); -+ isp->mmio_base[i] = 0; -+ } -+ -+ if (isp->mmio_base_phys[i]) { -+ release_mem_region(isp->mmio_base_phys[i], -+ isp->mmio_size[i]); -+ isp->mmio_base_phys[i] = 0; -+ } -+ } -+ -+ kfree(isp); -+ return ret_err; -+} -+ -+static struct platform_driver omap3isp_driver = { -+ .probe = isp_probe, -+ .remove = isp_remove, -+ .suspend = isp_suspend, -+ .resume = isp_resume, -+ .driver = { -+ .name = "omap3isp", -+ }, -+}; -+ -+/** -+ * isp_init - ISP module initialization. -+ **/ -+static int __init isp_init(void) -+{ -+ return platform_driver_register(&omap3isp_driver); -+} -+ -+/** -+ * isp_cleanup - ISP module cleanup. -+ **/ -+static void __exit isp_cleanup(void) -+{ -+ platform_driver_unregister(&omap3isp_driver); -+} -+ -+/** -+ * isp_print_status - Prints the values of the ISP Control Module registers -+ * -+ * Also prints other debug information stored in the ISP module structure. -+ **/ -+void isp_print_status(struct device *dev) -+{ -+ if (!is_ispctrl_debug_enabled()) -+ return; -+ -+ DPRINTK_ISPCTRL("###ISP_CTRL=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL)); -+ DPRINTK_ISPCTRL("###ISP_TCTRL_CTRL=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_TCTRL_CTRL)); -+ DPRINTK_ISPCTRL("###ISP_SYSCONFIG=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_SYSCONFIG)); -+ DPRINTK_ISPCTRL("###ISP_SYSSTATUS=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_SYSSTATUS)); -+ DPRINTK_ISPCTRL("###ISP_IRQ0ENABLE=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_IRQ0ENABLE)); -+ DPRINTK_ISPCTRL("###ISP_IRQ0STATUS=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_IRQ0STATUS)); -+} -+ -+module_init(isp_init); -+module_exit(isp_cleanup); -+ -+MODULE_AUTHOR("Texas Instruments"); -+MODULE_DESCRIPTION("ISP Control Module Library"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispccdc.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispccdc.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispccdc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispccdc.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,1510 @@ -+/* -+ * ispccdc.c -+ * -+ * Driver Library for CCDC module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Senthilvadivu Guruswamy -+ * Pallavi Kulkarni -+ * Sergio Aguirre -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "ispccdc.h" -+ -+#define LSC_TABLE_INIT_SIZE 50052 -+#define PTR_FREE ((u32)(-ENOMEM)) -+ -+/* Structure for saving/restoring CCDC module registers*/ -+static struct isp_reg ispccdc_reg_list[] = { -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HD_VD_WID, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PIX_LINES, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CULLING, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_BLKCMP, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR0, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR1, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR2, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR3, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR4, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR5, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR6, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR7, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGEVEN0, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGEVEN1, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGODD0, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGODD1, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_INITIAL, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE, 0}, -+ {OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_OFFSET, 0}, -+ {0, ISP_TOK_TERM, 0} -+}; -+ -+/** -+ * ispccdc_print_status - Prints the values of the CCDC Module registers -+ * -+ * Also prints other debug information stored in the CCDC module. -+ **/ -+static void ispccdc_print_status(struct isp_ccdc_device *isp_ccdc, -+ struct isp_pipeline *pipe) -+{ -+ if (!is_ispccdc_debug_enabled()) -+ return; -+ -+ DPRINTK_ISPCCDC("Module in use =%d\n", isp_ccdc->ccdc_inuse); -+ DPRINTK_ISPCCDC("Accepted CCDC Input (width = %d,Height = %d)\n", -+ isp_ccdc->ccdcin_w, -+ isp_ccdc->ccdcin_h); -+ DPRINTK_ISPCCDC("Accepted CCDC Output (width = %d,Height = %d)\n", -+ isp_ccdc->ccdcout_w, -+ isp_ccdc->ccdcout_h); -+ DPRINTK_ISPCCDC("###CCDC PCR=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_PCR)); -+ DPRINTK_ISPCCDC("ISP_CTRL =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL)); -+ switch (pipe->ccdc_in) { -+ case CCDC_RAW: -+ DPRINTK_ISPCCDC("ccdc input format is CCDC_RAW\n"); -+ break; -+ case CCDC_YUV_SYNC: -+ DPRINTK_ISPCCDC("ccdc input format is CCDC_YUV_SYNC\n"); -+ break; -+ case CCDC_YUV_BT: -+ DPRINTK_ISPCCDC("ccdc input format is CCDC_YUV_BT\n"); -+ break; -+ default: -+ break; -+ } -+ -+ switch (pipe->ccdc_out) { -+ case CCDC_OTHERS_VP: -+ DPRINTK_ISPCCDC("ccdc output format is CCDC_OTHERS_VP\n"); -+ break; -+ case CCDC_OTHERS_MEM: -+ DPRINTK_ISPCCDC("ccdc output format is CCDC_OTHERS_MEM\n"); -+ break; -+ case CCDC_YUV_RSZ: -+ DPRINTK_ISPCCDC("ccdc output format is CCDC_YUV_RSZ\n"); -+ break; -+ default: -+ break; -+ } -+ -+ DPRINTK_ISPCCDC("###ISP_CTRL in ccdc =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL)); -+ DPRINTK_ISPCCDC("###ISP_IRQ0ENABLE in ccdc =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_IRQ0ENABLE)); -+ DPRINTK_ISPCCDC("###ISP_IRQ0STATUS in ccdc =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_IRQ0STATUS)); -+ DPRINTK_ISPCCDC("###CCDC SYN_MODE=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SYN_MODE)); -+ DPRINTK_ISPCCDC("###CCDC HORZ_INFO=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_HORZ_INFO)); -+ DPRINTK_ISPCCDC("###CCDC VERT_START=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VERT_START)); -+ DPRINTK_ISPCCDC("###CCDC VERT_LINES=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VERT_LINES)); -+ DPRINTK_ISPCCDC("###CCDC CULLING=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_CULLING)); -+ DPRINTK_ISPCCDC("###CCDC HSIZE_OFF=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_HSIZE_OFF)); -+ DPRINTK_ISPCCDC("###CCDC SDOFST=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SDOFST)); -+ DPRINTK_ISPCCDC("###CCDC SDR_ADDR=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SDR_ADDR)); -+ DPRINTK_ISPCCDC("###CCDC CLAMP=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_CLAMP)); -+ DPRINTK_ISPCCDC("###CCDC COLPTN=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_COLPTN)); -+ DPRINTK_ISPCCDC("###CCDC CFG=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_CFG)); -+ DPRINTK_ISPCCDC("###CCDC VP_OUT=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VP_OUT)); -+ DPRINTK_ISPCCDC("###CCDC_SDR_ADDR= 0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SDR_ADDR)); -+ DPRINTK_ISPCCDC("###CCDC FMTCFG=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_FMTCFG)); -+ DPRINTK_ISPCCDC("###CCDC FMT_HORZ=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_FMT_HORZ)); -+ DPRINTK_ISPCCDC("###CCDC FMT_VERT=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_FMT_VERT)); -+ DPRINTK_ISPCCDC("###CCDC LSC_CONFIG=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_CONFIG)); -+ DPRINTK_ISPCCDC("###CCDC LSC_INIT=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_INITIAL)); -+ DPRINTK_ISPCCDC("###CCDC LSC_TABLE BASE=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_TABLE_BASE)); -+ DPRINTK_ISPCCDC("###CCDC LSC TABLE OFFSET=0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_TABLE_OFFSET)); -+} -+ -+/** -+ * ispccdc_config_black_clamp - Configures the clamp parameters in CCDC. -+ * @bclamp: Structure containing the optical black average gain, optical black -+ * sample length, sample lines, and the start pixel position of the -+ * samples w.r.t the HS pulse. -+ * Configures the clamp parameters in CCDC. Either if its being used the -+ * optical black clamp, or the digital clamp. If its a digital clamp, then -+ * assures to put a valid DC substraction level. -+ * -+ * Returns always 0 when completed. -+ **/ -+static int ispccdc_config_black_clamp(struct isp_ccdc_device *isp_ccdc, -+ struct ispccdc_bclamp bclamp) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ u32 bclamp_val = 0; -+ -+ if (isp_ccdc->obclamp_en) { -+ bclamp_val |= bclamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT; -+ bclamp_val |= bclamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT; -+ bclamp_val |= bclamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT; -+ bclamp_val |= bclamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT; -+ isp_reg_writel(dev, bclamp_val, -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP); -+ } else { -+ if (omap_rev() < OMAP3430_REV_ES2_0) -+ if (isp_ccdc->syncif_ipmod == YUV16 || -+ isp_ccdc->syncif_ipmod == YUV8 || -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_REC656IF) & -+ ISPCCDC_REC656IF_R656ON) -+ bclamp.dcsubval = 0; -+ isp_reg_writel(dev, bclamp.dcsubval, -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB); -+ } -+ return 0; -+} -+ -+/** -+ * ispccdc_enable_black_clamp - Enables/Disables the optical black clamp. -+ * @enable: 0 Disables optical black clamp, 1 Enables optical black clamp. -+ * -+ * Enables or disables the optical black clamp. When disabled, the digital -+ * clamp operates. -+ **/ -+static void ispccdc_enable_black_clamp(struct isp_ccdc_device *isp_ccdc, -+ u8 enable) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP, -+ ~ISPCCDC_CLAMP_CLAMPEN, -+ enable ? ISPCCDC_CLAMP_CLAMPEN : 0); -+ isp_ccdc->obclamp_en = enable; -+} -+ -+/** -+ * ispccdc_config_fpc - Configures the Faulty Pixel Correction parameters. -+ * @fpc: Structure containing the number of faulty pixels corrected in the -+ * frame, address of the FPC table. -+ * -+ * Returns 0 if successful, or -EINVAL if FPC Address is not on the 64 byte -+ * boundary. -+ **/ -+static int ispccdc_config_fpc(struct isp_ccdc_device *isp_ccdc, -+ struct ispccdc_fpc fpc) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ u32 fpc_val = 0; -+ -+ fpc_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); -+ -+ if ((fpc.fpcaddr & 0xFFFFFFC0) == fpc.fpcaddr) { -+ isp_reg_writel(dev, fpc_val & (~ISPCCDC_FPC_FPCEN), -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); -+ isp_reg_writel(dev, fpc.fpcaddr, -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR); -+ } else { -+ DPRINTK_ISPCCDC("FPC Address should be on 64byte boundary\n"); -+ return -EINVAL; -+ } -+ isp_reg_writel(dev, fpc_val | (fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); -+ return 0; -+} -+ -+/** -+ * ispccdc_enable_fpc - Enables the Faulty Pixel Correction. -+ * @enable: 0 Disables FPC, 1 Enables FPC. -+ **/ -+static void ispccdc_enable_fpc(struct isp_ccdc_device *isp_ccdc, u8 enable) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, -+ ~ISPCCDC_FPC_FPCEN, enable ? ISPCCDC_FPC_FPCEN : 0); -+} -+ -+/** -+ * ispccdc_config_black_comp - Configures Black Level Compensation parameters. -+ * @blcomp: Structure containing the black level compensation value for RGrGbB -+ * pixels. in 2's complement. -+ **/ -+static void ispccdc_config_black_comp(struct isp_ccdc_device *isp_ccdc, -+ struct ispccdc_blcomp blcomp) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ u32 blcomp_val = 0; -+ -+ blcomp_val |= blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT; -+ blcomp_val |= blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT; -+ blcomp_val |= blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT; -+ blcomp_val |= blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT; -+ -+ isp_reg_writel(dev, blcomp_val, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_BLKCMP); -+} -+ -+/** -+ * ispccdc_config_vp - Configures the Video Port Configuration parameters. -+ * @vpcfg: Structure containing the Video Port input frequency, and the 10 bit -+ * format. -+ **/ -+static void ispccdc_config_vp(struct isp_ccdc_device *isp_ccdc, -+ struct ispccdc_vp vpcfg) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ u32 fmtcfg_vp = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_FMTCFG); -+ -+ fmtcfg_vp &= ISPCCDC_FMTCFG_VPIN_MASK & ISPCCDC_FMTCFG_VPIF_FRQ_MASK; -+ -+ switch (vpcfg.bitshift_sel) { -+ case BIT9_0: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0; -+ break; -+ case BIT10_1: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1; -+ break; -+ case BIT11_2: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2; -+ break; -+ case BIT12_3: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3; -+ break; -+ }; -+ switch (vpcfg.freq_sel) { -+ case PIXCLKBY2: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY2; -+ break; -+ case PIXCLKBY3_5: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY3; -+ break; -+ case PIXCLKBY4_5: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY4; -+ break; -+ case PIXCLKBY5_5: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY5; -+ break; -+ case PIXCLKBY6_5: -+ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY6; -+ break; -+ }; -+ isp_reg_writel(dev, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); -+} -+ -+/** -+ * ispccdc_enable_vp - Enables the Video Port. -+ * @enable: 0 Disables VP, 1 Enables VP -+ **/ -+static void ispccdc_enable_vp(struct isp_ccdc_device *isp_ccdc, u8 enable) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG, -+ ~ISPCCDC_FMTCFG_VPEN, -+ enable ? ISPCCDC_FMTCFG_VPEN : 0); -+} -+ -+/** -+ * ispccdc_config_culling - Configures the culling parameters. -+ * @cull: Structure containing the vertical culling pattern, and horizontal -+ * culling pattern for odd and even lines. -+ **/ -+static void ispccdc_config_culling(struct isp_ccdc_device *isp_ccdc, -+ struct ispccdc_culling cull) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ u32 culling_val = 0; -+ -+ culling_val |= cull.v_pattern << ISPCCDC_CULLING_CULV_SHIFT; -+ culling_val |= cull.h_even << ISPCCDC_CULLING_CULHEVN_SHIFT; -+ culling_val |= cull.h_odd << ISPCCDC_CULLING_CULHODD_SHIFT; -+ -+ isp_reg_writel(dev, culling_val, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_CULLING); -+} -+ -+/** -+ * ispccdc_enable_lpf - Enables the Low-Pass Filter (LPF). -+ * @enable: 0 Disables LPF, 1 Enables LPF -+ **/ -+static void ispccdc_enable_lpf(struct isp_ccdc_device *isp_ccdc, u8 enable) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE, -+ ~ISPCCDC_SYN_MODE_LPF, -+ enable ? ISPCCDC_SYN_MODE_LPF : 0); -+} -+ -+/** -+ * ispccdc_config_alaw - Configures the input width for A-law. -+ * @ipwidth: Input width for A-law -+ **/ -+static void ispccdc_config_alaw(struct isp_ccdc_device *isp_ccdc, -+ enum alaw_ipwidth ipwidth) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ isp_reg_writel(dev, ipwidth << ISPCCDC_ALAW_GWDI_SHIFT, -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW); -+} -+ -+/** -+ * ispccdc_enable_alaw - Enables the A-law compression. -+ * @enable: 0 - Disables A-law, 1 - Enables A-law -+ **/ -+static void ispccdc_enable_alaw(struct isp_ccdc_device *isp_ccdc, u8 enable) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW, -+ ~ISPCCDC_ALAW_CCDTBL, -+ enable ? ISPCCDC_ALAW_CCDTBL : 0); -+} -+ -+/** -+ * ispccdc_config_imgattr - Configures the sensor image specific attributes. -+ * @colptn: Color pattern of the sensor. -+ **/ -+static void ispccdc_config_imgattr(struct isp_ccdc_device *isp_ccdc, u32 colptn) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ isp_reg_writel(dev, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN); -+} -+ -+/** -+ * ispccdc_validate_config_lsc - Check that LSC configuration is valid. -+ * @lsc_cfg: the LSC configuration to check. -+ * @pipe: if not NULL, verify the table size against CCDC input size. -+ * -+ * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid. -+ **/ -+static int ispccdc_validate_config_lsc(struct isp_ccdc_device *isp_ccdc, -+ struct ispccdc_lsc_config *lsc_cfg, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ unsigned int paxel_width, paxel_height; -+ unsigned int paxel_shift_x, paxel_shift_y; -+ unsigned int min_width, min_height, min_size; -+ unsigned int input_width, input_height; -+ -+ paxel_shift_x = lsc_cfg->gain_mode_m; -+ paxel_shift_y = lsc_cfg->gain_mode_n; -+ -+ if ((paxel_shift_x < 2) || (paxel_shift_x > 6) || -+ (paxel_shift_y < 2) || (paxel_shift_y > 6)) { -+ dev_dbg(dev, "CCDC: LSC: Invalid paxel size\n"); -+ return -EINVAL; -+ } -+ -+ if (lsc_cfg->offset & 3) { -+ dev_dbg(dev, "CCDC: LSC: Offset must be a multiple of 4\n"); -+ return -EINVAL; -+ } -+ -+ if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) { -+ dev_dbg(dev, "CCDC: LSC: initial_x and y must be even\n"); -+ return -EINVAL; -+ } -+ -+ if (!pipe) -+ return 0; -+ -+ input_width = pipe->ccdc_in_w; -+ input_height = pipe->ccdc_in_h; -+ -+ /* Calculate minimum bytesize for validation */ -+ paxel_width = 1 << paxel_shift_x; -+ min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1) -+ >> paxel_shift_x) + 1; -+ -+ paxel_height = 1 << paxel_shift_y; -+ min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1) -+ >> paxel_shift_y) + 1; -+ -+ min_size = 4 * min_width * min_height; -+ if (min_size > lsc_cfg->size) { -+ dev_dbg(dev, "CCDC: LSC: too small table\n"); -+ return -EINVAL; -+ } -+ if (lsc_cfg->offset < (min_width * 4)) { -+ dev_dbg(dev, "CCDC: LSC: Offset is too small\n"); -+ return -EINVAL; -+ } -+ if ((lsc_cfg->size / lsc_cfg->offset) < min_height) { -+ dev_dbg(dev, "CCDC: LSC: Wrong size/offset combination\n"); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+/** -+ * ispccdc_program_lsc - Program Lens Shading Compensation table address. -+ **/ -+static void ispccdc_program_lsc(struct isp_ccdc_device *isp_ccdc) -+{ -+ isp_reg_writel(to_device(isp_ccdc), isp_ccdc->lsc_table_inuse, -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); -+} -+ -+/** -+ * ispccdc_config_lsc - Configures the lens shading compensation module -+ **/ -+static void ispccdc_config_lsc(struct isp_ccdc_device *isp_ccdc) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ struct ispccdc_lsc_config *lsc_cfg = &isp_ccdc->lsc_config; -+ int reg; -+ -+ isp_reg_writel(dev, lsc_cfg->offset, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_TABLE_OFFSET); -+ -+ reg = 0; -+ reg |= lsc_cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT; -+ reg |= lsc_cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT; -+ reg |= lsc_cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT; -+ isp_reg_writel(dev, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG); -+ -+ reg = 0; -+ reg &= ~ISPCCDC_LSC_INITIAL_X_MASK; -+ reg |= lsc_cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT; -+ reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK; -+ reg |= lsc_cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT; -+ isp_reg_writel(dev, reg, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_INITIAL); -+} -+ -+/** -+ * ispccdc_enable_lsc - Enables/Disables the Lens Shading Compensation module. -+ * @enable: 0 Disables LSC, 1 Enables LSC. -+ **/ -+static void ispccdc_enable_lsc(struct isp_ccdc_device *isp_ccdc, u8 enable) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ if (enable) { -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL, ISPCTRL_SBL_SHARED_RPORTB -+ | ISPCTRL_SBL_RD_RAM_EN); -+ -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE); -+ } else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_LSC_CONFIG, ~ISPCCDC_LSC_ENABLE); -+ } -+} -+ -+/** -+ * ispccdc_setup_lsc - apply user LSC settings -+ * Consume the new LSC configuration and table set by user space application -+ * and program to CCDC. This function must be called from process context -+ * before streamon when ISP is not yet running. This function does not yet -+ * actually enable LSC, that has to be done separately. -+ */ -+static void ispccdc_setup_lsc(struct isp_ccdc_device *isp_ccdc, -+ struct isp_pipeline *pipe) -+{ -+ ispccdc_enable_lsc(isp_ccdc, 0); /* Disable LSC */ -+ if (pipe->ccdc_in == CCDC_RAW && isp_ccdc->lsc_request_enable) { -+ /* LSC is requested to be enabled, so configure it */ -+ if (isp_ccdc->update_lsc_table) { -+ struct isp_device *isp = to_isp_device(isp_ccdc); -+ BUG_ON(isp_ccdc->lsc_table_new == PTR_FREE); -+ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_inuse); -+ isp_ccdc->lsc_table_inuse = isp_ccdc->lsc_table_new; -+ isp_ccdc->lsc_table_new = PTR_FREE; -+ isp_ccdc->update_lsc_table = 0; -+ } -+ ispccdc_config_lsc(isp_ccdc); -+ ispccdc_program_lsc(isp_ccdc); -+ } -+ isp_ccdc->update_lsc_config = 0; -+} -+ -+void ispccdc_lsc_error_handler(struct isp_ccdc_device *isp_ccdc) -+{ -+ ispccdc_enable_lsc(isp_ccdc, 0); -+} -+ -+/** -+ * ispccdc_config_crop - Configures crop parameters for the ISP CCDC. -+ * @left: Left offset of the crop area. -+ * @top: Top offset of the crop area. -+ * @height: Height of the crop area. -+ * @width: Width of the crop area. -+ * -+ * The following restrictions are applied for the crop settings. If incoming -+ * values do not follow these restrictions then we map the settings to the -+ * closest acceptable crop value. -+ * 1) Left offset is always odd. This can be avoided if we enable byte swap -+ * option for incoming data into CCDC. -+ * 2) Top offset is always even. -+ * 3) Crop height is always even. -+ * 4) Crop width is always a multiple of 16 pixels -+ **/ -+static void ispccdc_config_crop(struct isp_ccdc_device *isp_ccdc, -+ u32 left, u32 top, u32 height, u32 width) -+{ -+ isp_ccdc->ccdcin_woffset = left + (left % 2); -+ isp_ccdc->ccdcin_hoffset = top + (top % 2); -+ -+ isp_ccdc->crop_w = width - (width % 16); -+ isp_ccdc->crop_h = height + (height % 2); -+ -+ DPRINTK_ISPCCDC("\n\tOffsets L %d T %d W %d H %d\n", -+ isp_ccdc->ccdcin_woffset, -+ isp_ccdc->ccdcin_hoffset, -+ isp_ccdc->crop_w, -+ isp_ccdc->crop_h); -+} -+ -+/** -+ * ispccdc_config_outlineoffset - Configures the output line offset -+ * @offset: Must be twice the Output width and aligned on 32 byte boundary -+ * @oddeven: Specifies the odd/even line pattern to be chosen to store the -+ * output. -+ * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines. -+ * -+ * - Configures the output line offset when stored in memory -+ * - Sets the odd/even line pattern to store the output -+ * (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4)) -+ * - Configures the number of even and odd line fields in case of rearranging -+ * the lines. -+ * -+ * Returns 0 if successful, or -EINVAL if the offset is not in 32 byte -+ * boundary. -+ **/ -+static int ispccdc_config_outlineoffset(struct isp_ccdc_device *isp_ccdc, -+ u32 offset, u8 oddeven, u8 numlines) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ if ((offset & ISP_32B_BOUNDARY_OFFSET) == offset) { -+ isp_reg_writel(dev, (offset & 0xFFFF), -+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF); -+ } else { -+ DPRINTK_ISPCCDC("ISP_ERR : Offset should be in 32 byte" -+ " boundary\n"); -+ return -EINVAL; -+ } -+ -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, -+ ~ISPCCDC_SDOFST_FINV); -+ -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, -+ ~ISPCCDC_SDOFST_FOFST_4L); -+ -+ switch (oddeven) { -+ case EVENEVEN: -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, -+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT); -+ break; -+ case ODDEVEN: -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, -+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT); -+ break; -+ case EVENODD: -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, -+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT); -+ break; -+ case ODDODD: -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, -+ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT); -+ break; -+ default: -+ break; -+ } -+ return 0; -+} -+ -+/** -+ * ispccdc_set_outaddr - Sets the memory address where the output will be saved -+ * @addr: 32-bit memory address aligned on 32 byte boundary. -+ * -+ * Sets the memory address where the output will be saved. -+ * -+ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte -+ * boundary. -+ **/ -+int ispccdc_set_outaddr(struct isp_ccdc_device *isp_ccdc, u32 addr) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ if ((addr & ISP_32B_BOUNDARY_BUF) == addr) { -+ isp_reg_writel(dev, addr, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SDR_ADDR); -+ return 0; -+ } else { -+ DPRINTK_ISPCCDC("ISP_ERR : Address should be in 32 byte" -+ " boundary\n"); -+ return -EINVAL; -+ } -+ -+} -+ -+/** -+ * ispccdc_config_sync_if - Sets the sync i/f params between sensor and CCDC. -+ * @syncif: Structure containing the sync parameters like field state, CCDC in -+ * master/slave mode, raw/yuv data, polarity of data, field, hs, vs -+ * signals. -+ **/ -+static void ispccdc_config_sync_if(struct isp_ccdc_device *isp_ccdc, -+ struct ispccdc_syncif syncif) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ u32 syn_mode = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SYN_MODE); -+ -+ syn_mode |= ISPCCDC_SYN_MODE_VDHDEN; -+ -+ if (syncif.fldstat) -+ syn_mode |= ISPCCDC_SYN_MODE_FLDSTAT; -+ else -+ syn_mode &= ~ISPCCDC_SYN_MODE_FLDSTAT; -+ -+ syn_mode &= ISPCCDC_SYN_MODE_INPMOD_MASK; -+ isp_ccdc->syncif_ipmod = syncif.ipmod; -+ -+ switch (syncif.ipmod) { -+ case RAW: -+ break; -+ case YUV16: -+ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16; -+ break; -+ case YUV8: -+ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR8; -+ break; -+ }; -+ -+ syn_mode &= ISPCCDC_SYN_MODE_DATSIZ_MASK; -+ switch (syncif.datsz) { -+ case DAT8: -+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8; -+ break; -+ case DAT10: -+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10; -+ break; -+ case DAT11: -+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11; -+ break; -+ case DAT12: -+ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12; -+ break; -+ }; -+ -+ if (syncif.fldmode) -+ syn_mode |= ISPCCDC_SYN_MODE_FLDMODE; -+ else -+ syn_mode &= ~ISPCCDC_SYN_MODE_FLDMODE; -+ -+ if (syncif.datapol) -+ syn_mode |= ISPCCDC_SYN_MODE_DATAPOL; -+ else -+ syn_mode &= ~ISPCCDC_SYN_MODE_DATAPOL; -+ -+ if (syncif.fldpol) -+ syn_mode |= ISPCCDC_SYN_MODE_FLDPOL; -+ else -+ syn_mode &= ~ISPCCDC_SYN_MODE_FLDPOL; -+ -+ if (syncif.hdpol) -+ syn_mode |= ISPCCDC_SYN_MODE_HDPOL; -+ else -+ syn_mode &= ~ISPCCDC_SYN_MODE_HDPOL; -+ -+ if (syncif.vdpol) -+ syn_mode |= ISPCCDC_SYN_MODE_VDPOL; -+ else -+ syn_mode &= ~ISPCCDC_SYN_MODE_VDPOL; -+ -+ if (syncif.ccdc_mastermode) { -+ syn_mode |= ISPCCDC_SYN_MODE_FLDOUT | ISPCCDC_SYN_MODE_VDHDOUT; -+ isp_reg_writel(dev, -+ syncif.hs_width << ISPCCDC_HD_VD_WID_HDW_SHIFT -+ | syncif.vs_width << ISPCCDC_HD_VD_WID_VDW_SHIFT, -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_HD_VD_WID); -+ -+ isp_reg_writel(dev, -+ syncif.ppln << ISPCCDC_PIX_LINES_PPLN_SHIFT -+ | syncif.hlprf << ISPCCDC_PIX_LINES_HLPRF_SHIFT, -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_PIX_LINES); -+ } else -+ syn_mode &= ~(ISPCCDC_SYN_MODE_FLDOUT | -+ ISPCCDC_SYN_MODE_VDHDOUT); -+ -+ isp_reg_writel(dev, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); -+ -+ if (!(syncif.bt_r656_en)) { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_REC656IF, ~ISPCCDC_REC656IF_R656ON); -+ } -+} -+ -+/** -+ * Set the value to be used for CCDC_CFG.WENLOG. -+ * w - Value of wenlog. -+ */ -+void ispccdc_set_wenlog(struct isp_ccdc_device *isp_ccdc, u32 wenlog) -+{ -+ isp_ccdc->wenlog = wenlog; -+} -+ -+/** -+ * ispccdc_config_datapath - Specifies the input and output modules for CCDC. -+ * @input: Indicates the module that inputs the image to the CCDC. -+ * @output: Indicates the module to which the CCDC outputs the image. -+ * -+ * Configures the default configuration for the CCDC to work with. -+ * -+ * The valid values for the input are CCDC_RAW (0), CCDC_YUV_SYNC (1), -+ * CCDC_YUV_BT (2), and CCDC_OTHERS (3). -+ * -+ * The valid values for the output are CCDC_YUV_RSZ (0), CCDC_YUV_MEM_RSZ (1), -+ * CCDC_OTHERS_VP (2), CCDC_OTHERS_MEM (3), CCDC_OTHERS_VP_MEM (4). -+ * -+ * Returns 0 if successful, or -EINVAL if wrong I/O combination or wrong input -+ * or output values. -+ **/ -+static int ispccdc_config_datapath(struct isp_ccdc_device *isp_ccdc, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ u32 syn_mode = 0; -+ struct ispccdc_vp vpcfg; -+ struct ispccdc_syncif syncif; -+ struct ispccdc_bclamp blkcfg; -+ -+ u32 colptn = ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT | -+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT | -+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT | -+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT | -+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT | -+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT | -+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT | -+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT | -+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT | -+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT | -+ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT | -+ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT | -+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT | -+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT | -+ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT | -+ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT; -+ -+ syn_mode = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); -+ -+ switch (pipe->ccdc_out) { -+ case CCDC_YUV_RSZ: -+ syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; -+ syn_mode &= ~ISPCCDC_SYN_MODE_WEN; -+ break; -+ -+ case CCDC_YUV_MEM_RSZ: -+ syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; -+ isp_ccdc->wen = 1; -+ syn_mode |= ISPCCDC_SYN_MODE_WEN; -+ break; -+ -+ case CCDC_OTHERS_VP: -+ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; -+ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; -+ syn_mode &= ~ISPCCDC_SYN_MODE_WEN; -+ vpcfg.bitshift_sel = BIT9_0; -+ vpcfg.freq_sel = PIXCLKBY2; -+ ispccdc_config_vp(isp_ccdc, vpcfg); -+ ispccdc_enable_vp(isp_ccdc, 1); -+ break; -+ -+ case CCDC_OTHERS_MEM: -+ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; -+ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; -+ syn_mode |= ISPCCDC_SYN_MODE_WEN; -+ syn_mode &= ~ISPCCDC_SYN_MODE_EXWEN; -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, -+ ~ISPCCDC_CFG_WENLOG); -+ vpcfg.bitshift_sel = BIT11_2; -+ vpcfg.freq_sel = PIXCLKBY2; -+ ispccdc_config_vp(isp_ccdc, vpcfg); -+ ispccdc_enable_vp(isp_ccdc, 0); -+ break; -+ -+ case CCDC_OTHERS_VP_MEM: -+ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; -+ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; -+ syn_mode |= ISPCCDC_SYN_MODE_WEN; -+ syn_mode &= ~ISPCCDC_SYN_MODE_EXWEN; -+ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, -+ ~ISPCCDC_CFG_WENLOG, isp_ccdc->wenlog); -+ vpcfg.bitshift_sel = BIT9_0; -+ vpcfg.freq_sel = PIXCLKBY2; -+ ispccdc_config_vp(isp_ccdc, vpcfg); -+ ispccdc_enable_vp(isp_ccdc, 1); -+ break; -+ default: -+ DPRINTK_ISPCCDC("ISP_ERR: Wrong CCDC Output\n"); -+ return -EINVAL; -+ }; -+ -+ isp_reg_writel(dev, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); -+ -+ switch (pipe->ccdc_in) { -+ case CCDC_RAW: -+ syncif.ccdc_mastermode = 0; -+ syncif.datapol = 0; -+ syncif.datsz = DAT10; -+ syncif.fldmode = 0; -+ syncif.fldout = 0; -+ syncif.fldpol = 0; -+ syncif.fldstat = 0; -+ syncif.hdpol = 0; -+ syncif.ipmod = RAW; -+ syncif.vdpol = 0; -+ ispccdc_config_sync_if(isp_ccdc, syncif); -+ ispccdc_config_imgattr(isp_ccdc, colptn); -+ blkcfg.oblen = 0; -+ blkcfg.dcsubval = 64; -+ ispccdc_config_black_clamp(isp_ccdc, blkcfg); -+ break; -+ case CCDC_YUV_SYNC: -+ syncif.ccdc_mastermode = 0; -+ syncif.datapol = 0; -+ syncif.datsz = DAT8; -+ syncif.fldmode = 0; -+ syncif.fldout = 0; -+ syncif.fldpol = 0; -+ syncif.fldstat = 0; -+ syncif.hdpol = 0; -+ syncif.ipmod = YUV16; -+ syncif.vdpol = 1; -+ ispccdc_config_imgattr(isp_ccdc, 0); -+ ispccdc_config_sync_if(isp_ccdc, syncif); -+ blkcfg.oblen = 0; -+ blkcfg.dcsubval = 0; -+ ispccdc_config_black_clamp(isp_ccdc, blkcfg); -+ break; -+ case CCDC_YUV_BT: -+ break; -+ case CCDC_OTHERS: -+ break; -+ default: -+ DPRINTK_ISPCCDC("ISP_ERR: Wrong CCDC Input\n"); -+ return -EINVAL; -+ } -+ -+ ispccdc_print_status(isp_ccdc, pipe); -+ isp_print_status(dev); -+ return 0; -+} -+ -+/** -+ * ispccdc_try_size - Checks if requested Input/output dimensions are valid -+ * @input_w: input width for the CCDC in number of pixels per line -+ * @input_h: input height for the CCDC in number of lines -+ * @output_w: output width from the CCDC in number of pixels per line -+ * @output_h: output height for the CCDC in number of lines -+ * -+ * Calculates the number of pixels cropped if the reformater is disabled, -+ * Fills up the output width and height variables in the isp_ccdc structure. -+ * -+ * Returns 0 if successful, or -EINVAL if the input width is less than 2 pixels -+ **/ -+int ispccdc_try_pipeline(struct isp_ccdc_device *isp_ccdc, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ if (pipe->ccdc_in_w < 32 || pipe->ccdc_in_h < 32) { -+ DPRINTK_ISPCCDC("ISP_ERR: CCDC cannot handle input width less" -+ " than 32 pixels or height less than 32\n"); -+ return -EINVAL; -+ } -+ -+ /* CCDC does not convert the image format */ -+ if ((pipe->ccdc_in == CCDC_RAW || pipe->ccdc_in == CCDC_OTHERS) -+ && pipe->ccdc_out == CCDC_YUV_RSZ) { -+ dev_info(dev, "wrong CCDC I/O Combination\n"); -+ return -EINVAL; -+ } -+ -+ pipe->ccdc_out_w = pipe->ccdc_in_w; -+ pipe->ccdc_out_h = pipe->ccdc_in_h; -+ -+ if (!isp_ccdc->refmt_en -+ && pipe->ccdc_out != CCDC_OTHERS_MEM -+ && pipe->ccdc_out != CCDC_OTHERS_VP_MEM) -+ pipe->ccdc_out_h -= 1; -+ -+ pipe->ccdc_out_w_img = pipe->ccdc_out_w; -+ /* Round up to nearest 16 pixels. */ -+ pipe->ccdc_out_w = ALIGN(pipe->ccdc_out_w, 0x10); -+ -+ return 0; -+} -+ -+/** -+ * ispccdc_config_size - Configure the dimensions of the CCDC input/output -+ * @input_w: input width for the CCDC in number of pixels per line -+ * @input_h: input height for the CCDC in number of lines -+ * @output_w: output width from the CCDC in number of pixels per line -+ * @output_h: output height for the CCDC in number of lines -+ * -+ * Configures the appropriate values stored in the isp_ccdc structure to -+ * HORZ/VERT_INFO registers and the VP_OUT depending on whether the image -+ * is stored in memory or given to the another module in the ISP pipeline. -+ * -+ * Returns 0 if successful, or -EINVAL if try_size was not called before to -+ * validate the requested dimensions. -+ **/ -+int ispccdc_s_pipeline(struct isp_ccdc_device *isp_ccdc, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ int rval; -+ -+ rval = ispccdc_config_datapath(isp_ccdc, pipe); -+ if (rval) -+ return rval; -+ -+ isp_reg_writel(dev, -+ (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) | -+ (pipe->ccdc_in_w << -+ ISPCCDC_FMT_HORZ_FMTLNH_SHIFT), -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_FMT_HORZ); -+ isp_reg_writel(dev, -+ (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) | -+ (pipe->ccdc_in_h << -+ ISPCCDC_FMT_VERT_FMTLNV_SHIFT), -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_FMT_VERT); -+ isp_reg_writel(dev, -+ 0 << ISPCCDC_VERT_START_SLV0_SHIFT, -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VERT_START); -+ isp_reg_writel(dev, (pipe->ccdc_out_h - 1) << -+ ISPCCDC_VERT_LINES_NLV_SHIFT, -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VERT_LINES); -+ isp_reg_writel(dev, -+ 0 << ISPCCDC_HORZ_INFO_SPH_SHIFT -+ | ((pipe->ccdc_out_w - 1) -+ << ISPCCDC_HORZ_INFO_NPH_SHIFT), -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_HORZ_INFO); -+ ispccdc_config_outlineoffset(isp_ccdc, -+ pipe->ccdc_out_w * ISP_BYTES_PER_PIXEL, -+ 0, 0); -+ isp_reg_writel(dev, -+ (((pipe->ccdc_out_h - 2) & -+ ISPCCDC_VDINT_0_MASK) << -+ ISPCCDC_VDINT_0_SHIFT) | -+ ((0 & ISPCCDC_VDINT_1_MASK) << -+ ISPCCDC_VDINT_1_SHIFT), -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VDINT); -+ -+ if (pipe->ccdc_out == CCDC_OTHERS_MEM) -+ isp_reg_writel(dev, 0, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VP_OUT); -+ else -+ isp_reg_writel(dev, -+ (pipe->ccdc_out_w -+ << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) | -+ ((pipe->ccdc_out_h - 1) << -+ ISPCCDC_VP_OUT_VERT_NUM_SHIFT), -+ OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VP_OUT); -+ -+ ispccdc_setup_lsc(isp_ccdc, pipe); -+ -+ return 0; -+} -+ -+/** -+ * ispccdc_enable - Enables the CCDC module. -+ * @enable: 0 Disables CCDC, 1 Enables CCDC -+ * -+ * Client should configure all the sub modules in CCDC before this. -+ **/ -+void ispccdc_enable(struct isp_ccdc_device *isp_ccdc, u8 enable) -+{ -+ struct isp_device *isp = to_isp_device(isp_ccdc); -+ int enable_lsc; -+ -+ enable_lsc = enable && -+ isp->pipeline.ccdc_in == CCDC_RAW && -+ isp_ccdc->lsc_request_enable && -+ ispccdc_validate_config_lsc(isp_ccdc, -+ &isp_ccdc->lsc_config, &isp->pipeline) == 0; -+ ispccdc_enable_lsc(isp_ccdc, enable_lsc); -+ isp_reg_and_or(isp->dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR, -+ ~ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0); -+} -+ -+/* -+ * Returns zero if the CCDC is idle and the image has been written to -+ * memory, too. -+ */ -+int ispccdc_sbl_busy(void *_isp_ccdc) -+{ -+ struct isp_ccdc_device *isp_ccdc = _isp_ccdc; -+ struct device *dev = to_device(isp_ccdc); -+ -+ return ispccdc_busy(isp_ccdc) -+ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) & -+ ISPSBL_CCDC_WR_0_DATA_READY) -+ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) & -+ ISPSBL_CCDC_WR_0_DATA_READY) -+ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) & -+ ISPSBL_CCDC_WR_0_DATA_READY) -+ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) & -+ ISPSBL_CCDC_WR_0_DATA_READY); -+} -+ -+/** -+ * ispccdc_busy - Gets busy state of the CCDC. -+ **/ -+int ispccdc_busy(struct isp_ccdc_device *isp_ccdc) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ return isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) & -+ ISPCCDC_PCR_BUSY; -+} -+ -+void ispccdc_config_shadow_registers(struct isp_ccdc_device *isp_ccdc) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&isp_ccdc->lock, flags); -+ if (isp_ccdc->shadow_update) -+ goto out; -+ -+#if 0 /* FIXME: Do not support on-the-fly-LSC configuration yet */ -+ if (isp_ccdc->update_lsc_config) { -+ ispccdc_config_lsc(isp_ccdc); -+ ispccdc_enable_lsc(isp_ccdc, isp_ccdc->lsc_request_enable); -+ isp_ccdc->update_lsc_config = 0; -+ } -+ -+ if (isp_ccdc->update_lsc_table) { -+ u32 n = isp_ccdc->lsc_table_new; -+ /* Swap tables--no need to vfree in interrupt context */ -+ isp_ccdc->lsc_table_new = isp_ccdc->lsc_table_inuse; -+ isp_ccdc->lsc_table_inuse = n; -+ ispccdc_program_lsc(isp_ccdc); -+ isp_ccdc->update_lsc_table = 0; -+ } -+#endif -+ -+out: -+ spin_unlock_irqrestore(&isp_ccdc->lock, flags); -+} -+ -+/** -+ * ispccdc_config - Sets CCDC configuration from userspace -+ * @userspace_add: Structure containing CCDC configuration sent from userspace. -+ * -+ * Returns 0 if successful, -EINVAL if the pointer to the configuration -+ * structure is null, or the copy_from_user function fails to copy user space -+ * memory to kernel space memory. -+ **/ -+int ispccdc_config(struct isp_ccdc_device *isp_ccdc, -+ void *userspace_add) -+{ -+ struct isp_device *isp = to_isp_device(isp_ccdc); -+ struct ispccdc_bclamp bclamp_t; -+ struct ispccdc_blcomp blcomp_t; -+ struct ispccdc_fpc fpc_t; -+ struct ispccdc_culling cull_t; -+ struct ispccdc_update_config *ccdc_struct; -+ unsigned long flags; -+ int ret = 0; -+ -+ if (userspace_add == NULL) -+ return -EINVAL; -+ -+ ccdc_struct = userspace_add; -+ -+ spin_lock_irqsave(&isp_ccdc->lock, flags); -+ isp_ccdc->shadow_update = 1; -+ spin_unlock_irqrestore(&isp_ccdc->lock, flags); -+ -+ if (ISP_ABS_CCDC_ALAW & ccdc_struct->flag) { -+ if (ISP_ABS_CCDC_ALAW & ccdc_struct->update) -+ ispccdc_config_alaw(isp_ccdc, ccdc_struct->alawip); -+ ispccdc_enable_alaw(isp_ccdc, 1); -+ } else if (ISP_ABS_CCDC_ALAW & ccdc_struct->update) -+ ispccdc_enable_alaw(isp_ccdc, 0); -+ -+ if (ISP_ABS_CCDC_LPF & ccdc_struct->flag) -+ ispccdc_enable_lpf(isp_ccdc, 1); -+ else -+ ispccdc_enable_lpf(isp_ccdc, 0); -+ -+ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->flag) { -+ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) { -+ if (copy_from_user(&bclamp_t, (struct ispccdc_bclamp *) -+ ccdc_struct->bclamp, -+ sizeof(struct ispccdc_bclamp))) { -+ ret = -EFAULT; -+ goto out; -+ } -+ -+ ispccdc_enable_black_clamp(isp_ccdc, 1); -+ ispccdc_config_black_clamp(isp_ccdc, bclamp_t); -+ } else -+ ispccdc_enable_black_clamp(isp_ccdc, 1); -+ } else { -+ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) { -+ if (copy_from_user(&bclamp_t, (struct ispccdc_bclamp *) -+ ccdc_struct->bclamp, -+ sizeof(struct ispccdc_bclamp))) { -+ ret = -EFAULT; -+ goto out; -+ } -+ -+ ispccdc_enable_black_clamp(isp_ccdc, 0); -+ ispccdc_config_black_clamp(isp_ccdc, bclamp_t); -+ } -+ } -+ -+ if (ISP_ABS_CCDC_BCOMP & ccdc_struct->update) { -+ if (copy_from_user(&blcomp_t, (struct ispccdc_blcomp *) -+ ccdc_struct->blcomp, -+ sizeof(blcomp_t))) { -+ ret = -EFAULT; -+ goto out; -+ } -+ -+ ispccdc_config_black_comp(isp_ccdc, blcomp_t); -+ } -+ -+ if (ISP_ABS_CCDC_FPC & ccdc_struct->flag) { -+ if (ISP_ABS_CCDC_FPC & ccdc_struct->update) { -+ if (copy_from_user(&fpc_t, (struct ispccdc_fpc *) -+ ccdc_struct->fpc, -+ sizeof(fpc_t))) { -+ ret = -EFAULT; -+ goto out; -+ } -+ isp_ccdc->fpc_table_add = kmalloc(64 + fpc_t.fpnum * 4, -+ GFP_KERNEL | GFP_DMA); -+ if (!isp_ccdc->fpc_table_add) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ while (((unsigned long)isp_ccdc->fpc_table_add -+ & 0xFFFFFFC0) -+ != (unsigned long)isp_ccdc->fpc_table_add) -+ isp_ccdc->fpc_table_add++; -+ -+ isp_ccdc->fpc_table_add_m = iommu_kmap( -+ isp->iommu, -+ 0, -+ virt_to_phys(isp_ccdc->fpc_table_add), -+ fpc_t.fpnum * 4, -+ IOMMU_FLAG); -+ /* FIXME: Correct unwinding */ -+ BUG_ON(IS_ERR_VALUE(isp_ccdc->fpc_table_add_m)); -+ -+ if (copy_from_user(isp_ccdc->fpc_table_add, -+ (u32 *)fpc_t.fpcaddr, -+ fpc_t.fpnum * 4)) { -+ ret = -EFAULT; -+ goto out; -+ } -+ -+ fpc_t.fpcaddr = isp_ccdc->fpc_table_add_m; -+ ispccdc_config_fpc(isp_ccdc, fpc_t); -+ } -+ ispccdc_enable_fpc(isp_ccdc, 1); -+ } else if (ISP_ABS_CCDC_FPC & ccdc_struct->update) -+ ispccdc_enable_fpc(isp_ccdc, 0); -+ -+ if (ISP_ABS_CCDC_CULL & ccdc_struct->update) { -+ if (copy_from_user(&cull_t, (struct ispccdc_culling *) -+ ccdc_struct->cull, -+ sizeof(cull_t))) { -+ ret = -EFAULT; -+ goto out; -+ } -+ ispccdc_config_culling(isp_ccdc, cull_t); -+ } -+ -+ if (ISP_ABS_CCDC_CONFIG_LSC & ccdc_struct->update) { -+ if (ISP_ABS_CCDC_CONFIG_LSC & ccdc_struct->flag) { -+ struct ispccdc_lsc_config cfg; -+ if (copy_from_user(&cfg, ccdc_struct->lsc_cfg, -+ sizeof(cfg))) { -+ ret = -EFAULT; -+ goto out; -+ } -+ ret = ispccdc_validate_config_lsc(isp_ccdc, &cfg, -+ isp->running == ISP_RUNNING ? -+ &isp->pipeline : NULL); -+ if (ret) -+ goto out; -+ memcpy(&isp_ccdc->lsc_config, &cfg, -+ sizeof(isp_ccdc->lsc_config)); -+ isp_ccdc->lsc_request_enable = 1; -+ } else { -+ isp_ccdc->lsc_request_enable = 0; -+ } -+ isp_ccdc->update_lsc_config = 1; -+ } -+ -+ if (ISP_ABS_TBL_LSC & ccdc_struct->update) { -+ void *n; -+ if (isp_ccdc->lsc_table_new != PTR_FREE) -+ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_new); -+ isp_ccdc->lsc_table_new = iommu_vmalloc(isp->iommu, 0, -+ isp_ccdc->lsc_config.size, IOMMU_FLAG); -+ if (IS_ERR_VALUE(isp_ccdc->lsc_table_new)) { -+ /* Disable LSC if table can not be allocated */ -+ isp_ccdc->lsc_table_new = PTR_FREE; -+ isp_ccdc->lsc_request_enable = 0; -+ isp_ccdc->update_lsc_config = 1; -+ ret = -ENOMEM; -+ goto out; -+ } -+ n = da_to_va(isp->iommu, isp_ccdc->lsc_table_new); -+ if (copy_from_user(n, ccdc_struct->lsc, -+ isp_ccdc->lsc_config.size)) { -+ ret = -EFAULT; -+ goto out; -+ } -+ isp_ccdc->update_lsc_table = 1; -+ } -+ -+ if (isp->running == ISP_STOPPED && -+ (isp_ccdc->update_lsc_table || isp_ccdc->update_lsc_config)) -+ ispccdc_setup_lsc(isp_ccdc, &isp->pipeline); -+ -+ if (ISP_ABS_CCDC_COLPTN & ccdc_struct->update) -+ ispccdc_config_imgattr(isp_ccdc, ccdc_struct->colptn); -+ -+out: -+ if (ret == -EFAULT) -+ dev_err(to_device(isp_ccdc), -+ "ccdc: user provided bad configuration data address"); -+ -+ if (ret == -ENOMEM) -+ dev_err(to_device(isp_ccdc), -+ "ccdc: can not allocate memory"); -+ -+ isp_ccdc->shadow_update = 0; -+ return ret; -+} -+ -+/** -+ * ispccdc_request - Reserves the CCDC module. -+ * -+ * Reserves the CCDC module and assures that is used only once at a time. -+ * -+ * Returns 0 if successful, or -EBUSY if CCDC module is busy. -+ **/ -+int ispccdc_request(struct isp_ccdc_device *isp_ccdc) -+{ -+ struct device *dev = to_device(isp_ccdc); -+ -+ mutex_lock(&isp_ccdc->mutexlock); -+ if (isp_ccdc->ccdc_inuse) { -+ mutex_unlock(&isp_ccdc->mutexlock); -+ DPRINTK_ISPCCDC("ISP_ERR : CCDC Module Busy\n"); -+ return -EBUSY; -+ } -+ -+ isp_ccdc->ccdc_inuse = 1; -+ mutex_unlock(&isp_ccdc->mutexlock); -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, -+ ISPCTRL_CCDC_RAM_EN | ISPCTRL_CCDC_CLK_EN | -+ ISPCTRL_SBL_WR1_RAM_EN); -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, -+ ISPCCDC_CFG_VDLC); -+ return 0; -+} -+ -+/** -+ * ispccdc_free - Frees the CCDC module. -+ * -+ * Frees the CCDC module so it can be used by another process. -+ * -+ * Returns 0 if successful, or -EINVAL if module has been already freed. -+ **/ -+int ispccdc_free(struct isp_ccdc_device *isp_ccdc) -+{ -+ mutex_lock(&isp_ccdc->mutexlock); -+ if (!isp_ccdc->ccdc_inuse) { -+ mutex_unlock(&isp_ccdc->mutexlock); -+ DPRINTK_ISPCCDC("ISP_ERR: CCDC Module already freed\n"); -+ return -EINVAL; -+ } -+ -+ isp_ccdc->ccdc_inuse = 0; -+ mutex_unlock(&isp_ccdc->mutexlock); -+ isp_reg_and(to_device(isp_ccdc), OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL, ~(ISPCTRL_CCDC_CLK_EN | -+ ISPCTRL_CCDC_RAM_EN | -+ ISPCTRL_SBL_WR1_RAM_EN)); -+ return 0; -+} -+ -+/** -+ * ispccdc_save_context - Saves the values of the CCDC module registers -+ **/ -+void ispccdc_save_context(struct device *dev) -+{ -+ DPRINTK_ISPCCDC("Saving context\n"); -+ isp_save_context(dev, ispccdc_reg_list); -+} -+ -+/** -+ * ispccdc_restore_context - Restores the values of the CCDC module registers -+ **/ -+void ispccdc_restore_context(struct device *dev) -+{ -+ DPRINTK_ISPCCDC("Restoring context\n"); -+ isp_restore_context(dev, ispccdc_reg_list); -+} -+ -+/** -+ * isp_ccdc_init - CCDC module initialization. -+ * -+ * Always returns 0 -+ **/ -+int __init isp_ccdc_init(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_ccdc_device *isp_ccdc = &isp->isp_ccdc; -+ void *p; -+ -+ isp_ccdc->ccdc_inuse = 0; -+ ispccdc_config_crop(isp_ccdc, 0, 0, 0, 0); -+ mutex_init(&isp_ccdc->mutexlock); -+ -+ isp_ccdc->update_lsc_config = 0; -+ isp_ccdc->lsc_request_enable = 1; -+ -+ isp_ccdc->lsc_config.initial_x = 0; -+ isp_ccdc->lsc_config.initial_y = 0; -+ isp_ccdc->lsc_config.gain_mode_n = 0x6; -+ isp_ccdc->lsc_config.gain_mode_m = 0x6; -+ isp_ccdc->lsc_config.gain_format = 0x4; -+ isp_ccdc->lsc_config.offset = 0x60; -+ isp_ccdc->lsc_config.size = LSC_TABLE_INIT_SIZE; -+ -+ isp_ccdc->update_lsc_table = 0; -+ isp_ccdc->lsc_table_new = PTR_FREE; -+ isp_ccdc->lsc_table_inuse = iommu_vmalloc(isp->iommu, 0, -+ LSC_TABLE_INIT_SIZE, IOMMU_FLAG); -+ if (IS_ERR_VALUE(isp_ccdc->lsc_table_inuse)) -+ return -ENOMEM; -+ p = da_to_va(isp->iommu, isp_ccdc->lsc_table_inuse); -+ memset(p, 0x40, LSC_TABLE_INIT_SIZE); -+ -+ isp_ccdc->shadow_update = 0; -+ spin_lock_init(&isp_ccdc->lock); -+ -+ return 0; -+} -+ -+/** -+ * isp_ccdc_cleanup - CCDC module cleanup. -+ **/ -+void isp_ccdc_cleanup(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_ccdc_device *isp_ccdc = &isp->isp_ccdc; -+ -+ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_inuse); -+ if (isp_ccdc->lsc_table_new != PTR_FREE) -+ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_new); -+ -+ if (isp_ccdc->fpc_table_add_m != 0) { -+ iommu_kunmap(isp->iommu, isp_ccdc->fpc_table_add_m); -+ kfree(isp_ccdc->fpc_table_add); -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispccdc.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispccdc.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispccdc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispccdc.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,210 @@ -+/* -+ * ispccdc.h -+ * -+ * Driver header file for CCDC module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Senthilvadivu Guruswamy -+ * Pallavi Kulkarni -+ * Sergio Aguirre -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef OMAP_ISP_CCDC_H -+#define OMAP_ISP_CCDC_H -+ -+#include -+ -+/* Enumeration constants for CCDC input output format */ -+enum ccdc_input { -+ CCDC_RAW, -+ CCDC_YUV_SYNC, -+ CCDC_YUV_BT, -+ CCDC_OTHERS -+}; -+ -+enum ccdc_output { -+ CCDC_YUV_RSZ, -+ CCDC_YUV_MEM_RSZ, -+ CCDC_OTHERS_VP, -+ CCDC_OTHERS_MEM, -+ CCDC_OTHERS_VP_MEM -+}; -+ -+/* Enumeration constants for the sync interface parameters */ -+enum inpmode { -+ RAW, -+ YUV16, -+ YUV8 -+}; -+enum datasize { -+ DAT8, -+ DAT10, -+ DAT11, -+ DAT12 -+}; -+ -+ -+/** -+ * struct ispccdc_syncif - Structure for Sync Interface between sensor and CCDC -+ * @ccdc_mastermode: Master mode. 1 - Master, 0 - Slave. -+ * @fldstat: Field state. 0 - Odd Field, 1 - Even Field. -+ * @ipmod: Input mode. -+ * @datsz: Data size. -+ * @fldmode: 0 - Progressive, 1 - Interlaced. -+ * @datapol: 0 - Positive, 1 - Negative. -+ * @fldpol: 0 - Positive, 1 - Negative. -+ * @hdpol: 0 - Positive, 1 - Negative. -+ * @vdpol: 0 - Positive, 1 - Negative. -+ * @fldout: 0 - Input, 1 - Output. -+ * @hs_width: Width of the Horizontal Sync pulse, used for HS/VS Output. -+ * @vs_width: Width of the Vertical Sync pulse, used for HS/VS Output. -+ * @ppln: Number of pixels per line, used for HS/VS Output. -+ * @hlprf: Number of half lines per frame, used for HS/VS Output. -+ * @bt_r656_en: 1 - Enable ITU-R BT656 mode, 0 - Sync mode. -+ */ -+struct ispccdc_syncif { -+ u8 ccdc_mastermode; -+ u8 fldstat; -+ enum inpmode ipmod; -+ enum datasize datsz; -+ u8 fldmode; -+ u8 datapol; -+ u8 fldpol; -+ u8 hdpol; -+ u8 vdpol; -+ u8 fldout; -+ u8 hs_width; -+ u8 vs_width; -+ u8 ppln; -+ u8 hlprf; -+ u8 bt_r656_en; -+}; -+ -+/** -+ * ispccdc_refmt - Structure for Reformatter parameters -+ * @lnalt: Line alternating mode enable. 0 - Enable, 1 - Disable. -+ * @lnum: Number of output lines from 1 input line. 1 to 4 lines. -+ * @plen_even: Number of program entries in even line minus 1. -+ * @plen_odd: Number of program entries in odd line minus 1. -+ * @prgeven0: Program entries 0-7 for even lines register -+ * @prgeven1: Program entries 8-15 for even lines register -+ * @prgodd0: Program entries 0-7 for odd lines register -+ * @prgodd1: Program entries 8-15 for odd lines register -+ * @fmtaddr0: Output line in which the original pixel is to be placed -+ * @fmtaddr1: Output line in which the original pixel is to be placed -+ * @fmtaddr2: Output line in which the original pixel is to be placed -+ * @fmtaddr3: Output line in which the original pixel is to be placed -+ * @fmtaddr4: Output line in which the original pixel is to be placed -+ * @fmtaddr5: Output line in which the original pixel is to be placed -+ * @fmtaddr6: Output line in which the original pixel is to be placed -+ * @fmtaddr7: Output line in which the original pixel is to be placed -+ */ -+struct ispccdc_refmt { -+ u8 lnalt; -+ u8 lnum; -+ u8 plen_even; -+ u8 plen_odd; -+ u32 prgeven0; -+ u32 prgeven1; -+ u32 prgodd0; -+ u32 prgodd1; -+ u32 fmtaddr0; -+ u32 fmtaddr1; -+ u32 fmtaddr2; -+ u32 fmtaddr3; -+ u32 fmtaddr4; -+ u32 fmtaddr5; -+ u32 fmtaddr6; -+ u32 fmtaddr7; -+}; -+ -+/** -+ * struct isp_ccdc_device - Structure for the CCDC module to store its own -+ information -+ * @ccdc_inuse: Flag to determine if CCDC has been reserved or not (0 or 1). -+ * @ccdcout_w: CCDC output width. -+ * @ccdcout_h: CCDC output height. -+ * @ccdcin_w: CCDC input width. -+ * @ccdcin_h: CCDC input height. -+ * @ccdcin_woffset: CCDC input horizontal offset. -+ * @ccdcin_hoffset: CCDC input vertical offset. -+ * @crop_w: Crop width. -+ * @crop_h: Crop weight. -+ * @ccdc_inpfmt: CCDC input format. -+ * @ccdc_outfmt: CCDC output format. -+ * @vpout_en: Video port output enable. -+ * @wen: Data write enable. -+ * @exwen: External data write enable. -+ * @refmt_en: Reformatter enable. -+ * @ccdcslave: CCDC slave mode enable. -+ * @syncif_ipmod: Image -+ * @obclamp_en: Data input format. -+ * @mutexlock: Mutex used to get access to the CCDC. -+ * @update_lsc_config: Set when user changes lsc_config -+ * @lsc_request_enable: Whether LSC is requested to be enabled -+ * @lsc_config: LSC config set by user -+ * @update_lsc_table: Set when user provides a new LSC table to lsc_table_new -+ * @lsc_table_new: LSC table set by user, ISP address -+ * @lsc_table_inuse: LSC table currently in use, ISP address -+ * @shadow_update: non-zero when user is updating CCDC configuration -+ * @lock: serializes shadow_update with interrupt handler -+ */ -+struct isp_ccdc_device { -+ u8 ccdc_inuse; -+ u32 ccdcin_woffset; -+ u32 ccdcin_hoffset; -+ u32 crop_w; -+ u32 crop_h; -+ u8 vpout_en; -+ u8 wen; -+ u8 exwen; -+ u8 refmt_en; -+ u8 ccdcslave; -+ u8 syncif_ipmod; -+ u8 obclamp_en; -+ struct mutex mutexlock; /* For checking/modifying ccdc_inuse */ -+ u32 wenlog; -+ unsigned long fpc_table_add_m; -+ u32 *fpc_table_add; -+ -+ /* LSC related fields */ -+ u8 update_lsc_config; -+ u8 lsc_request_enable; -+ struct ispccdc_lsc_config lsc_config; -+ u8 update_lsc_table; -+ u32 lsc_table_new; -+ u32 lsc_table_inuse; -+ -+ int shadow_update; -+ spinlock_t lock; -+}; -+ -+void ispccdc_lsc_error_handler(struct isp_ccdc_device *isp_ccdc); -+int ispccdc_set_outaddr(struct isp_ccdc_device *isp_ccdc, u32 addr); -+void ispccdc_set_wenlog(struct isp_ccdc_device *isp_ccdc, u32 wenlog); -+int ispccdc_try_pipeline(struct isp_ccdc_device *isp_ccdc, -+ struct isp_pipeline *pipe); -+int ispccdc_s_pipeline(struct isp_ccdc_device *isp_ccdc, -+ struct isp_pipeline *pipe); -+void ispccdc_enable(struct isp_ccdc_device *isp_ccdc, u8 enable); -+int ispccdc_sbl_busy(void *_isp_ccdc); -+int ispccdc_busy(struct isp_ccdc_device *isp_ccdc); -+void ispccdc_config_shadow_registers(struct isp_ccdc_device *isp_ccdc); -+int ispccdc_config(struct isp_ccdc_device *isp_ccdc, -+ void *userspace_add); -+int ispccdc_request(struct isp_ccdc_device *isp_ccdc); -+int ispccdc_free(struct isp_ccdc_device *isp_ccdc); -+void ispccdc_save_context(struct device *dev); -+void ispccdc_restore_context(struct device *dev); -+ -+#endif /* OMAP_ISP_CCDC_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispcsi2.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispcsi2.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispcsi2.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispcsi2.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,2177 @@ -+/* -+ * ispcsi2.c -+ * -+ * Driver Library for ISP CSI Control module in TI's OMAP3 Camera ISP -+ * ISP CSI interface and IRQ related APIs are defined here. -+ * -+ * Copyright (C) 2009 Texas Instruments. -+ * -+ * Contributors: -+ * Sergio Aguirre -+ * Dominic Curran -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "ispcsi2.h" -+ -+static struct isp_csi2_cfg current_csi2_cfg; -+static struct isp_csi2_cfg_update current_csi2_cfg_update; -+ -+static bool update_complexio_cfg1; -+static bool update_phy_cfg0; -+static bool update_phy_cfg1; -+static bool update_ctx_ctrl1[8]; -+static bool update_ctx_ctrl2[8]; -+static bool update_ctx_ctrl3[8]; -+static bool update_timing; -+static bool update_ctrl; -+static bool uses_videoport; -+ -+/** -+ * isp_csi2_complexio_lanes_config - Configuration of CSI2 ComplexIO lanes. -+ * @reqcfg: Pointer to structure containing desired lane configuration -+ * -+ * Validates and saves to internal driver memory the passed configuration. -+ * Returns 0 if successful, or -EINVAL if null pointer is passed, invalid -+ * lane position or polarity is set, and if 2 lanes try to occupy the same -+ * position. To apply this settings, use the isp_csi2_complexio_lanes_update() -+ * function just after calling this function. -+ **/ -+int isp_csi2_complexio_lanes_config(struct isp_csi2_lanes_cfg *reqcfg) -+{ -+ int i; -+ bool pos_occupied[5] = {false, false, false, false, false}; -+ struct isp_csi2_lanes_cfg *currlanes = ¤t_csi2_cfg.lanes; -+ struct isp_csi2_lanes_cfg_update *currlanes_u = -+ ¤t_csi2_cfg_update.lanes; -+ -+ /* Validating parameters sent by driver */ -+ if (reqcfg == NULL) { -+ printk(KERN_ERR "Invalid Complex IO Configuration sent by" -+ " sensor\n"); -+ goto err_einval; -+ } -+ -+ /* Data lanes verification */ -+ for (i = 0; i < 4; i++) { -+ if ((reqcfg->data[i].pol > 1) || (reqcfg->data[i].pos > 5)) { -+ printk(KERN_ERR "Invalid CSI-2 Complex IO configuration" -+ " parameters for data lane #%d\n", i); -+ goto err_einval; -+ } -+ if (pos_occupied[reqcfg->data[i].pos - 1] && -+ reqcfg->data[i].pos > 0) { -+ printk(KERN_ERR "Lane #%d already occupied\n", -+ reqcfg->data[i].pos); -+ goto err_einval; -+ } else -+ pos_occupied[reqcfg->data[i].pos - 1] = true; -+ } -+ -+ /* Clock lane verification */ -+ if ((reqcfg->clk.pol > 1) || (reqcfg->clk.pos > 5) || -+ (reqcfg->clk.pos == 0)) { -+ printk(KERN_ERR "Invalid CSI-2 Complex IO configuration" -+ " parameters for clock lane\n"); -+ goto err_einval; -+ } -+ if (pos_occupied[reqcfg->clk.pos - 1]) { -+ printk(KERN_ERR "Lane #%d already occupied", -+ reqcfg->clk.pos); -+ goto err_einval; -+ } else -+ pos_occupied[reqcfg->clk.pos - 1] = true; -+ -+ for (i = 0; i < 4; i++) { -+ if (currlanes->data[i].pos != reqcfg->data[i].pos) { -+ currlanes->data[i].pos = reqcfg->data[i].pos; -+ currlanes_u->data[i] = true; -+ update_complexio_cfg1 = true; -+ } -+ if (currlanes->data[i].pol != reqcfg->data[i].pol) { -+ currlanes->data[i].pol = reqcfg->data[i].pol; -+ currlanes_u->data[i] = true; -+ update_complexio_cfg1 = true; -+ } -+ } -+ -+ if (currlanes->clk.pos != reqcfg->clk.pos) { -+ currlanes->clk.pos = reqcfg->clk.pos; -+ currlanes_u->clk = true; -+ update_complexio_cfg1 = true; -+ } -+ if (currlanes->clk.pol != reqcfg->clk.pol) { -+ currlanes->clk.pol = reqcfg->clk.pol; -+ currlanes_u->clk = true; -+ update_complexio_cfg1 = true; -+ } -+ return 0; -+err_einval: -+ return -EINVAL; -+} -+ -+/** -+ * isp_csi2_complexio_lanes_update - Applies CSI2 ComplexIO lanes configuration. -+ * @force_update: Flag to force rewrite of registers, even if they haven't been -+ * updated with the isp_csi2_complexio_lanes_config() function. -+ * -+ * It only saves settings when they were previously updated using the -+ * isp_csi2_complexio_lanes_config() function, unless the force_update flag is -+ * set to true. -+ * Always returns 0. -+ **/ -+int isp_csi2_complexio_lanes_update(bool force_update) -+{ -+ struct isp_csi2_lanes_cfg *currlanes = ¤t_csi2_cfg.lanes; -+ struct isp_csi2_lanes_cfg_update *currlanes_u = -+ ¤t_csi2_cfg_update.lanes; -+ u32 reg; -+ int i; -+ -+ if (!update_complexio_cfg1 && !force_update) -+ return 0; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1); -+ for (i = 0; i < 4; i++) { -+ if (currlanes_u->data[i] || force_update) { -+ reg &= ~(ISPCSI2_COMPLEXIO_CFG1_DATA_POL_MASK(i + 1) | -+ ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_MASK(i + -+ 1)); -+ reg |= (currlanes->data[i].pol << -+ ISPCSI2_COMPLEXIO_CFG1_DATA_POL_SHIFT(i + 1)); -+ reg |= (currlanes->data[i].pos << -+ ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(i + -+ 1)); -+ currlanes_u->data[i] = false; -+ } -+ } -+ -+ if (currlanes_u->clk || force_update) { -+ reg &= ~(ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_MASK | -+ ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_MASK); -+ reg |= (currlanes->clk.pol << -+ ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_SHIFT); -+ reg |= (currlanes->clk.pos << -+ ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT); -+ currlanes_u->clk = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1); -+ -+ update_complexio_cfg1 = false; -+ return 0; -+} -+ -+/** -+ * isp_csi2_complexio_lanes_get - Gets CSI2 ComplexIO lanes configuration. -+ * -+ * Gets settings from HW registers and fills in the internal driver memory -+ * Always returns 0. -+ **/ -+int isp_csi2_complexio_lanes_get(void) -+{ -+ struct isp_csi2_lanes_cfg *currlanes = ¤t_csi2_cfg.lanes; -+ struct isp_csi2_lanes_cfg_update *currlanes_u = -+ ¤t_csi2_cfg_update.lanes; -+ u32 reg; -+ int i; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1); -+ for (i = 0; i < 4; i++) { -+ currlanes->data[i].pol = (reg & -+ ISPCSI2_COMPLEXIO_CFG1_DATA_POL_MASK(i + 1)) >> -+ ISPCSI2_COMPLEXIO_CFG1_DATA_POL_SHIFT(i + 1); -+ currlanes->data[i].pos = (reg & -+ ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_MASK(i + 1)) >> -+ ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(i + 1); -+ currlanes_u->data[i] = false; -+ } -+ currlanes->clk.pol = (reg & ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_MASK) >> -+ ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_SHIFT; -+ currlanes->clk.pos = (reg & -+ ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_MASK) >> -+ ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT; -+ currlanes_u->clk = false; -+ -+ update_complexio_cfg1 = false; -+ return 0; -+} -+ -+/** -+ * isp_csi2_complexio_power_status - Gets CSI2 ComplexIO power status. -+ * -+ * Returns 3 possible valid states: ISP_CSI2_POWER_OFF, ISP_CSI2_POWER_ON, -+ * and ISP_CSI2_POWER_ULPW. -+ **/ -+static enum isp_csi2_power_cmds isp_csi2_complexio_power_status(void) -+{ -+ enum isp_csi2_power_cmds ret; -+ u32 reg; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1) & -+ ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_MASK; -+ switch (reg) { -+ case ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_OFF: -+ ret = ISP_CSI2_POWER_OFF; -+ break; -+ case ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_ON: -+ ret = ISP_CSI2_POWER_ON; -+ break; -+ case ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_ULPW: -+ ret = ISP_CSI2_POWER_ULPW; -+ break; -+ default: -+ return -EINVAL; -+ } -+ return ret; -+} -+ -+/** -+ * isp_csi2_complexio_power_autoswitch - Sets CSI2 ComplexIO power autoswitch. -+ * @enable: Sets or clears the autoswitch function enable flag. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_complexio_power_autoswitch(bool enable) -+{ -+ u32 reg; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1); -+ reg &= ~ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_MASK; -+ -+ if (enable) -+ reg |= ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_ENABLE; -+ else -+ reg |= ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_DISABLE; -+ -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1); -+ return 0; -+} -+ -+/** -+ * isp_csi2_complexio_power - Sets the desired power command for CSI2 ComplexIO. -+ * @power_cmd: Power command to be set. -+ * -+ * Returns 0 if successful, or -EBUSY if the retry count is exceeded. -+ **/ -+int isp_csi2_complexio_power(enum isp_csi2_power_cmds power_cmd) -+{ -+ enum isp_csi2_power_cmds current_state; -+ u32 reg; -+ u8 retry_count; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1) & -+ ~ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_MASK; -+ switch (power_cmd) { -+ case ISP_CSI2_POWER_OFF: -+ reg |= ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_OFF; -+ break; -+ case ISP_CSI2_POWER_ON: -+ reg |= ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_ON; -+ break; -+ case ISP_CSI2_POWER_ULPW: -+ reg |= ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_ULPW; -+ break; -+ default: -+ printk(KERN_ERR "CSI2: ERROR - Wrong Power command!\n"); -+ return -EINVAL; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1); -+ -+ retry_count = 0; -+ do { -+ udelay(50); -+ current_state = isp_csi2_complexio_power_status(); -+ -+ if (current_state != power_cmd) { -+ printk(KERN_DEBUG "CSI2: Complex IO power command not" -+ " yet taken."); -+ if (++retry_count < 100) { -+ printk(KERN_DEBUG " Retrying...\n"); -+ udelay(50); -+ } else { -+ printk(KERN_DEBUG " Retry count exceeded!\n"); -+ } -+ } -+ } while ((current_state != power_cmd) && (retry_count < 100)); -+ -+ if (retry_count == 100) -+ return -EBUSY; -+ -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_frame_mode - Configure if_en behaviour for CSI2 -+ * @frame_mode: Desired action for IF_EN switch off. 0 - disable IF immediately -+ * 1 - disable after all Frame end Code is received in all -+ * contexts. -+ * -+ * Validates and saves to internal driver memory the passed configuration. -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_config_frame_mode(enum isp_csi2_frame_mode frame_mode) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if (currctrl->frame_mode != frame_mode) { -+ currctrl->frame_mode = frame_mode; -+ currctrl_u->frame_mode = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_vp_clk_enable - Enables/disables CSI2 Videoport clock. -+ * @vp_clk_enable: Boolean value to specify the Videoport clock state. -+ * -+ * Validates and saves to internal driver memory the passed configuration. -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_config_vp_clk_enable(bool vp_clk_enable) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if (currctrl->vp_clk_enable != vp_clk_enable) { -+ currctrl->vp_clk_enable = vp_clk_enable; -+ currctrl_u->vp_clk_enable = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_vp_only_enable - Sets CSI2 Videoport clock as exclusive -+ * @vp_only_enable: Boolean value to specify if the Videoport clock is -+ * exclusive, setting the OCP port as disabled. -+ * -+ * Validates and saves to internal driver memory the passed configuration. -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_config_vp_only_enable(bool vp_only_enable) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if (currctrl->vp_only_enable != vp_only_enable) { -+ currctrl->vp_only_enable = vp_only_enable; -+ currctrl_u->vp_only_enable = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_vp_out_ctrl - Sets CSI2 Videoport clock divider -+ * @vp_out_ctrl: Divider value for setting videoport clock frequency based on -+ * OCP port frequency, valid dividers are between 1 and 4. -+ * -+ * Validates and saves to internal driver memory the passed configuration. -+ * Returns 0 if successful, or -EINVAL if wrong divider value is passed. -+ **/ -+int isp_csi2_ctrl_config_vp_out_ctrl(u8 vp_out_ctrl) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if ((vp_out_ctrl == 0) || (vp_out_ctrl > 4)) { -+ printk(KERN_ERR "CSI2: Wrong divisor value. Must be between" -+ " 1 and 4"); -+ return -EINVAL; -+ } -+ -+ if (currctrl->vp_out_ctrl != vp_out_ctrl) { -+ currctrl->vp_out_ctrl = vp_out_ctrl; -+ currctrl_u->vp_out_ctrl = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_debug_enable - Sets CSI2 debug -+ * @debug_enable: Boolean for setting debug configuration on CSI2. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_config_debug_enable(bool debug_enable) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if (currctrl->debug_enable != debug_enable) { -+ currctrl->debug_enable = debug_enable; -+ currctrl_u->debug_enable = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_burst_size - Sets CSI2 burst size. -+ * @burst_size: Burst size of the memory saving capability of receiver. -+ * -+ * Returns 0 if successful, or -EINVAL if burst size is wrong. -+ **/ -+int isp_csi2_ctrl_config_burst_size(u8 burst_size) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ if (burst_size > 3) { -+ printk(KERN_ERR "CSI2: Wrong burst size. Must be between" -+ " 0 and 3"); -+ return -EINVAL; -+ } -+ -+ if (currctrl->burst_size != burst_size) { -+ currctrl->burst_size = burst_size; -+ currctrl_u->burst_size = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_ecc_enable - Enables ECC on CSI2 Receiver -+ * @ecc_enable: Boolean to enable/disable the CSI2 receiver ECC handling. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_config_ecc_enable(bool ecc_enable) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if (currctrl->ecc_enable != ecc_enable) { -+ currctrl->ecc_enable = ecc_enable; -+ currctrl_u->ecc_enable = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_ecc_enable - Enables ECC on CSI2 Receiver -+ * @ecc_enable: Boolean to enable/disable the CSI2 receiver ECC handling. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_config_secure_mode(bool secure_mode) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if (currctrl->secure_mode != secure_mode) { -+ currctrl->secure_mode = secure_mode; -+ currctrl_u->secure_mode = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_config_if_enable - Enables CSI2 Receiver interface. -+ * @if_enable: Boolean to enable/disable the CSI2 receiver interface. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_config_if_enable(bool if_enable) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ -+ if (currctrl->if_enable != if_enable) { -+ currctrl->if_enable = if_enable; -+ currctrl_u->if_enable = true; -+ update_ctrl = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_update - Applies CSI2 control configuration. -+ * @force_update: Flag to force rewrite of registers, even if they haven't been -+ * updated with the isp_csi2_ctrl_config_*() functions. -+ * -+ * It only saves settings when they were previously updated using the -+ * isp_csi2_ctrl_config_*() functions, unless the force_update flag is -+ * set to true. -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_update(bool force_update) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ u32 reg; -+ -+ if (update_ctrl || force_update) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTRL); -+ if (currctrl_u->frame_mode || force_update) { -+ reg &= ~ISPCSI2_CTRL_FRAME_MASK; -+ if (currctrl->frame_mode) -+ reg |= ISPCSI2_CTRL_FRAME_DISABLE_FEC; -+ else -+ reg |= ISPCSI2_CTRL_FRAME_DISABLE_IMM; -+ currctrl_u->frame_mode = false; -+ } -+ if (currctrl_u->vp_clk_enable || force_update) { -+ reg &= ~ISPCSI2_CTRL_VP_CLK_EN_MASK; -+ if (currctrl->vp_clk_enable) -+ reg |= ISPCSI2_CTRL_VP_CLK_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTRL_VP_CLK_EN_DISABLE; -+ currctrl_u->vp_clk_enable = false; -+ } -+ if (currctrl_u->vp_only_enable || force_update) { -+ reg &= ~ISPCSI2_CTRL_VP_ONLY_EN_MASK; -+ uses_videoport = currctrl->vp_only_enable; -+ if (currctrl->vp_only_enable) -+ reg |= ISPCSI2_CTRL_VP_ONLY_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTRL_VP_ONLY_EN_DISABLE; -+ currctrl_u->vp_only_enable = false; -+ } -+ if (currctrl_u->vp_out_ctrl || force_update) { -+ reg &= ~ISPCSI2_CTRL_VP_OUT_CTRL_MASK; -+ reg |= (currctrl->vp_out_ctrl - 1) << -+ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT; -+ currctrl_u->vp_out_ctrl = false; -+ } -+ if (currctrl_u->debug_enable || force_update) { -+ reg &= ~ISPCSI2_CTRL_DBG_EN_MASK; -+ if (currctrl->debug_enable) -+ reg |= ISPCSI2_CTRL_DBG_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTRL_DBG_EN_DISABLE; -+ currctrl_u->debug_enable = false; -+ } -+ if (currctrl_u->burst_size || force_update) { -+ reg &= ~ISPCSI2_CTRL_BURST_SIZE_MASK; -+ reg |= currctrl->burst_size << -+ ISPCSI2_CTRL_BURST_SIZE_SHIFT; -+ currctrl_u->burst_size = false; -+ } -+ if (currctrl_u->ecc_enable || force_update) { -+ reg &= ~ISPCSI2_CTRL_ECC_EN_MASK; -+ if (currctrl->ecc_enable) -+ reg |= ISPCSI2_CTRL_ECC_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTRL_ECC_EN_DISABLE; -+ currctrl_u->ecc_enable = false; -+ } -+ if (currctrl_u->secure_mode || force_update) { -+ reg &= ~ISPCSI2_CTRL_SECURE_MASK; -+ if (currctrl->secure_mode) -+ reg |= ISPCSI2_CTRL_SECURE_ENABLE; -+ else -+ reg |= ISPCSI2_CTRL_SECURE_DISABLE; -+ currctrl_u->secure_mode = false; -+ } -+ if (currctrl_u->if_enable || force_update) { -+ reg &= ~ISPCSI2_CTRL_IF_EN_MASK; -+ if (currctrl->if_enable) -+ reg |= ISPCSI2_CTRL_IF_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTRL_IF_EN_DISABLE; -+ currctrl_u->if_enable = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTRL); -+ update_ctrl = false; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctrl_get - Gets CSI2 control configuration -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctrl_get(void) -+{ -+ struct isp_csi2_ctrl_cfg *currctrl = ¤t_csi2_cfg.ctrl; -+ struct isp_csi2_ctrl_cfg_update *currctrl_u = -+ ¤t_csi2_cfg_update.ctrl; -+ u32 reg; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTRL); -+ currctrl->frame_mode = (reg & ISPCSI2_CTRL_FRAME_MASK) >> -+ ISPCSI2_CTRL_FRAME_SHIFT; -+ currctrl_u->frame_mode = false; -+ -+ if ((reg & ISPCSI2_CTRL_VP_CLK_EN_MASK) == -+ ISPCSI2_CTRL_VP_CLK_EN_ENABLE) -+ currctrl->vp_clk_enable = true; -+ else -+ currctrl->vp_clk_enable = false; -+ currctrl_u->vp_clk_enable = false; -+ -+ if ((reg & ISPCSI2_CTRL_VP_ONLY_EN_MASK) == -+ ISPCSI2_CTRL_VP_ONLY_EN_ENABLE) -+ currctrl->vp_only_enable = true; -+ else -+ currctrl->vp_only_enable = false; -+ uses_videoport = currctrl->vp_only_enable; -+ currctrl_u->vp_only_enable = false; -+ -+ currctrl->vp_out_ctrl = ((reg & ISPCSI2_CTRL_VP_OUT_CTRL_MASK) >> -+ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT) + 1; -+ currctrl_u->vp_out_ctrl = false; -+ -+ if ((reg & ISPCSI2_CTRL_DBG_EN_MASK) == ISPCSI2_CTRL_DBG_EN_ENABLE) -+ currctrl->debug_enable = true; -+ else -+ currctrl->debug_enable = false; -+ currctrl_u->debug_enable = false; -+ -+ currctrl->burst_size = (reg & ISPCSI2_CTRL_BURST_SIZE_MASK) >> -+ ISPCSI2_CTRL_BURST_SIZE_SHIFT; -+ currctrl_u->burst_size = false; -+ -+ if ((reg & ISPCSI2_CTRL_ECC_EN_MASK) == ISPCSI2_CTRL_ECC_EN_ENABLE) -+ currctrl->ecc_enable = true; -+ else -+ currctrl->ecc_enable = false; -+ currctrl_u->ecc_enable = false; -+ -+ if ((reg & ISPCSI2_CTRL_SECURE_MASK) == ISPCSI2_CTRL_SECURE_ENABLE) -+ currctrl->secure_mode = true; -+ else -+ currctrl->secure_mode = false; -+ currctrl_u->secure_mode = false; -+ -+ if ((reg & ISPCSI2_CTRL_IF_EN_MASK) == ISPCSI2_CTRL_IF_EN_ENABLE) -+ currctrl->if_enable = true; -+ else -+ currctrl->if_enable = false; -+ currctrl_u->if_enable = false; -+ -+ update_ctrl = false; -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_validate - Validates the context number value -+ * @ctxnum: Pointer to variable containing context number. -+ * -+ * If the value is not in range (3 bits), it is being ANDed with 0x7 to force -+ * it to be on range. -+ **/ -+static void isp_csi2_ctx_validate(u8 *ctxnum) -+{ -+ if (*ctxnum > 7) { -+ printk(KERN_ERR "Invalid context number. Forcing valid" -+ " value...\n"); -+ *ctxnum &= ~(0x7); -+ } -+} -+ -+/** -+ * isp_csi2_ctx_config_virtual_id - Maps a virtual ID with a CSI2 Rx context -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @virtual_id: CSI2 Virtual ID to associate with specified context number. -+ * -+ * Returns 0 if successful, or -EINVAL if Virtual ID is not in range (0-3). -+ **/ -+int isp_csi2_ctx_config_virtual_id(u8 ctxnum, u8 virtual_id) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ if (virtual_id > 3) { -+ printk(KERN_ERR "Wrong requested virtual_id\n"); -+ return -EINVAL; -+ } -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->virtual_id != virtual_id) { -+ selected_ctx->virtual_id = virtual_id; -+ selected_ctx_u->virtual_id = true; -+ update_ctx_ctrl2[ctxnum] = true; -+ } -+ -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_frame_count - Sets frame count to be received in CSI2 Rx. -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @frame_count: Number of frames to acquire. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_config_frame_count(u8 ctxnum, u8 frame_count) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->frame_count != frame_count) { -+ selected_ctx->frame_count = frame_count; -+ selected_ctx_u->frame_count = true; -+ update_ctx_ctrl1[ctxnum] = true; -+ } -+ -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_format - Maps a pixel format to a specified context. -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @pixformat: V4L2 structure for pixel format. -+ * -+ * Returns 0 if successful, or -EINVAL if the format is not supported by the -+ * receiver. -+ **/ -+int isp_csi2_ctx_config_format(u8 ctxnum, u32 pixformat) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ struct v4l2_pix_format pix; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ pix.pixelformat = pixformat; -+ switch (pix.pixelformat) { -+ case V4L2_PIX_FMT_RGB565: -+ case V4L2_PIX_FMT_RGB565X: -+ case V4L2_PIX_FMT_YUYV: -+ case V4L2_PIX_FMT_UYVY: -+ case V4L2_PIX_FMT_RGB555: -+ case V4L2_PIX_FMT_RGB555X: -+ case V4L2_PIX_FMT_SGRBG10: -+ break; -+ default: -+ printk(KERN_ERR "Context config pixel format unsupported\n"); -+ return -EINVAL; -+ } -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ selected_ctx->format = pix; -+ selected_ctx_u->format = true; -+ update_ctx_ctrl2[ctxnum] = true; -+ -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_alpha - Sets the alpha value for pixel format -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @alpha: Alpha value. -+ * -+ * Returns 0 if successful, or -EINVAL if the alpha value is bigger than 16383. -+ **/ -+int isp_csi2_ctx_config_alpha(u8 ctxnum, u16 alpha) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ if (alpha > 0x3FFF) { -+ printk(KERN_ERR "Wrong alpha value\n"); -+ return -EINVAL; -+ } -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->alpha != alpha) { -+ selected_ctx->alpha = alpha; -+ selected_ctx_u->alpha = true; -+ update_ctx_ctrl3[ctxnum] = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_data_offset - Sets the offset between received lines -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @data_offset: Offset between first pixel of each 2 contiguous lines. -+ * -+ * Returns 0 if successful, or -EINVAL if the line offset is bigger than 1023. -+ **/ -+int isp_csi2_ctx_config_data_offset(u8 ctxnum, u16 data_offset) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ if (data_offset > 0x3FF) { -+ printk(KERN_ERR "Wrong line offset\n"); -+ return -EINVAL; -+ } -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->data_offset != data_offset) { -+ selected_ctx->data_offset = data_offset; -+ selected_ctx_u->data_offset = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_ping_addr - Sets Ping address for CSI2 Rx. buffer saving -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @ping_addr: 32 bit ISP MMU mapped address. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_config_ping_addr(u8 ctxnum, u32 ping_addr) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ ping_addr &= ~(0x1F); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->ping_addr != ping_addr) { -+ selected_ctx->ping_addr = ping_addr; -+ selected_ctx_u->ping_addr = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_pong_addr - Sets Pong address for CSI2 Rx. buffer saving -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @pong_addr: 32 bit ISP MMU mapped address. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_config_pong_addr(u8 ctxnum, u32 pong_addr) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ pong_addr &= ~(0x1F); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->pong_addr != pong_addr) { -+ selected_ctx->pong_addr = pong_addr; -+ selected_ctx_u->pong_addr = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_eof_enabled - Enables EOF signal assertion -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @eof_enabled: Boolean to enable/disable EOF signal assertion on received -+ * packets. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_config_eof_enabled(u8 ctxnum, bool eof_enabled) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->eof_enabled != eof_enabled) { -+ selected_ctx->eof_enabled = eof_enabled; -+ selected_ctx_u->eof_enabled = true; -+ update_ctx_ctrl1[ctxnum] = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_eol_enabled - Enables EOL signal assertion -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @eol_enabled: Boolean to enable/disable EOL signal assertion on received -+ * packets. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_config_eol_enabled(u8 ctxnum, bool eol_enabled) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->eol_enabled != eol_enabled) { -+ selected_ctx->eol_enabled = eol_enabled; -+ selected_ctx_u->eol_enabled = true; -+ update_ctx_ctrl1[ctxnum] = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_checksum_enabled - Enables Checksum check in rcvd packets -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @checksum_enabled: Boolean to enable/disable Checksum check on received -+ * packets -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_config_checksum_enabled(u8 ctxnum, bool checksum_enabled) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->checksum_enabled != checksum_enabled) { -+ selected_ctx->checksum_enabled = checksum_enabled; -+ selected_ctx_u->checksum_enabled = true; -+ update_ctx_ctrl1[ctxnum] = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_config_enabled - Enables specified CSI2 context -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @enabled: Boolean to enable/disable specified context. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_config_enabled(u8 ctxnum, bool enabled) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (selected_ctx->enabled != enabled) { -+ selected_ctx->enabled = enabled; -+ selected_ctx_u->enabled = true; -+ update_ctx_ctrl1[ctxnum] = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_update - Applies CSI2 context configuration. -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * @force_update: Flag to force rewrite of registers, even if they haven't been -+ * updated with the isp_csi2_ctx_config_*() functions. -+ * -+ * It only saves settings when they were previously updated using the -+ * isp_csi2_ctx_config_*() functions, unless the force_update flag is -+ * set to true. -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_update(u8 ctxnum, bool force_update) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ u32 reg; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ if (update_ctx_ctrl1[ctxnum] || force_update) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL1(ctxnum)); -+ if (selected_ctx_u->frame_count || force_update) { -+ reg &= ~(ISPCSI2_CTX_CTRL1_COUNT_MASK); -+ reg |= selected_ctx->frame_count << -+ ISPCSI2_CTX_CTRL1_COUNT_SHIFT; -+ selected_ctx_u->frame_count = false; -+ } -+ if (selected_ctx_u->eof_enabled || force_update) { -+ reg &= ~(ISPCSI2_CTX_CTRL1_EOF_EN_MASK); -+ if (selected_ctx->eof_enabled) -+ reg |= ISPCSI2_CTX_CTRL1_EOF_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTX_CTRL1_EOF_EN_DISABLE; -+ selected_ctx_u->eof_enabled = false; -+ } -+ if (selected_ctx_u->eol_enabled || force_update) { -+ reg &= ~(ISPCSI2_CTX_CTRL1_EOL_EN_MASK); -+ if (selected_ctx->eol_enabled) -+ reg |= ISPCSI2_CTX_CTRL1_EOL_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTX_CTRL1_EOL_EN_DISABLE; -+ selected_ctx_u->eol_enabled = false; -+ } -+ if (selected_ctx_u->checksum_enabled || force_update) { -+ reg &= ~(ISPCSI2_CTX_CTRL1_CS_EN_MASK); -+ if (selected_ctx->checksum_enabled) -+ reg |= ISPCSI2_CTX_CTRL1_CS_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTX_CTRL1_CS_EN_DISABLE; -+ selected_ctx_u->checksum_enabled = false; -+ } -+ if (selected_ctx_u->enabled || force_update) { -+ reg &= ~(ISPCSI2_CTX_CTRL1_CTX_EN_MASK); -+ if (selected_ctx->enabled) -+ reg |= ISPCSI2_CTX_CTRL1_CTX_EN_ENABLE; -+ else -+ reg |= ISPCSI2_CTX_CTRL1_CTX_EN_DISABLE; -+ selected_ctx_u->enabled = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL1(ctxnum)); -+ update_ctx_ctrl1[ctxnum] = false; -+ } -+ -+ if (update_ctx_ctrl2[ctxnum] || force_update) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL2(ctxnum)); -+ if (selected_ctx_u->virtual_id || force_update) { -+ reg &= ~(ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK); -+ reg |= selected_ctx->virtual_id << -+ ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT; -+ selected_ctx_u->virtual_id = false; -+ } -+ -+ if (selected_ctx_u->format || force_update) { -+ struct v4l2_pix_format *pix; -+ u16 new_format = 0; -+ -+ reg &= ~(ISPCSI2_CTX_CTRL2_FORMAT_MASK); -+ pix = &selected_ctx->format; -+ switch (pix->pixelformat) { -+ case V4L2_PIX_FMT_RGB565: -+ case V4L2_PIX_FMT_RGB565X: -+ new_format = 0x22; -+ break; -+ case V4L2_PIX_FMT_YUYV: -+ case V4L2_PIX_FMT_UYVY: -+ if (uses_videoport) -+ new_format = 0x9E; -+ else -+ new_format = 0x1E; -+ break; -+ case V4L2_PIX_FMT_RGB555: -+ case V4L2_PIX_FMT_RGB555X: -+ new_format = 0xA1; -+ break; -+ case V4L2_PIX_FMT_SGRBG10: -+ if (uses_videoport) -+ new_format = 0x12F; -+ else -+ new_format = 0xAB; -+ break; -+ } -+ reg |= (new_format << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT); -+ selected_ctx_u->format = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL2(ctxnum)); -+ update_ctx_ctrl2[ctxnum] = false; -+ } -+ -+ if (update_ctx_ctrl3[ctxnum] || force_update) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL3(ctxnum)); -+ if (selected_ctx_u->alpha || force_update) { -+ reg &= ~(ISPCSI2_CTX_CTRL3_ALPHA_MASK); -+ reg |= (selected_ctx->alpha << -+ ISPCSI2_CTX_CTRL3_ALPHA_SHIFT); -+ selected_ctx_u->alpha = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL3(ctxnum)); -+ update_ctx_ctrl3[ctxnum] = false; -+ } -+ -+ if (selected_ctx_u->data_offset) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_OFST(ctxnum)); -+ reg &= ~ISPCSI2_CTX_DAT_OFST_OFST_MASK; -+ reg |= selected_ctx->data_offset << -+ ISPCSI2_CTX_DAT_OFST_OFST_SHIFT; -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_OFST(ctxnum)); -+ selected_ctx_u->data_offset = false; -+ } -+ -+ if (selected_ctx_u->ping_addr) { -+ reg = selected_ctx->ping_addr; -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_PING_ADDR(ctxnum)); -+ selected_ctx_u->ping_addr = false; -+ } -+ -+ if (selected_ctx_u->pong_addr) { -+ reg = selected_ctx->pong_addr; -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_PONG_ADDR(ctxnum)); -+ selected_ctx_u->pong_addr = false; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_get - Gets specific CSI2 Context configuration -+ * @ctxnum: Context number, valid between 0 and 7 values. -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_get(u8 ctxnum) -+{ -+ struct isp_csi2_ctx_cfg *selected_ctx; -+ struct isp_csi2_ctx_cfg_update *selected_ctx_u; -+ u32 reg; -+ -+ isp_csi2_ctx_validate(&ctxnum); -+ -+ selected_ctx = ¤t_csi2_cfg.contexts[ctxnum]; -+ selected_ctx_u = ¤t_csi2_cfg_update.contexts[ctxnum]; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL1(ctxnum)); -+ selected_ctx->frame_count = (reg & ISPCSI2_CTX_CTRL1_COUNT_MASK) >> -+ ISPCSI2_CTX_CTRL1_COUNT_SHIFT; -+ selected_ctx_u->frame_count = false; -+ -+ if ((reg & ISPCSI2_CTX_CTRL1_EOF_EN_MASK) == -+ ISPCSI2_CTX_CTRL1_EOF_EN_ENABLE) -+ selected_ctx->eof_enabled = true; -+ else -+ selected_ctx->eof_enabled = false; -+ selected_ctx_u->eof_enabled = false; -+ -+ if ((reg & ISPCSI2_CTX_CTRL1_EOL_EN_MASK) == -+ ISPCSI2_CTX_CTRL1_EOL_EN_ENABLE) -+ selected_ctx->eol_enabled = true; -+ else -+ selected_ctx->eol_enabled = false; -+ selected_ctx_u->eol_enabled = false; -+ -+ if ((reg & ISPCSI2_CTX_CTRL1_CS_EN_MASK) == -+ ISPCSI2_CTX_CTRL1_CS_EN_ENABLE) -+ selected_ctx->checksum_enabled = true; -+ else -+ selected_ctx->checksum_enabled = false; -+ selected_ctx_u->checksum_enabled = false; -+ -+ if ((reg & ISPCSI2_CTX_CTRL1_CTX_EN_MASK) == -+ ISPCSI2_CTX_CTRL1_CTX_EN_ENABLE) -+ selected_ctx->enabled = true; -+ else -+ selected_ctx->enabled = false; -+ selected_ctx_u->enabled = false; -+ update_ctx_ctrl1[ctxnum] = false; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL2(ctxnum)); -+ -+ selected_ctx->virtual_id = (reg & ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK) >> -+ ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT; -+ selected_ctx_u->virtual_id = false; -+ -+ switch ((reg & ISPCSI2_CTX_CTRL2_FORMAT_MASK) >> -+ ISPCSI2_CTX_CTRL2_FORMAT_SHIFT) { -+ case 0x22: -+ selected_ctx->format.pixelformat = V4L2_PIX_FMT_RGB565; -+ break; -+ case 0x9E: -+ case 0x1E: -+ selected_ctx->format.pixelformat = V4L2_PIX_FMT_YUYV; -+ break; -+ case 0xA1: -+ selected_ctx->format.pixelformat = V4L2_PIX_FMT_RGB555; -+ break; -+ case 0xAB: -+ case 0x12F: -+ selected_ctx->format.pixelformat = V4L2_PIX_FMT_SGRBG10; -+ break; -+ } -+ selected_ctx_u->format = false; -+ update_ctx_ctrl2[ctxnum] = false; -+ -+ selected_ctx->alpha = (isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL3(ctxnum)) & -+ ISPCSI2_CTX_CTRL3_ALPHA_MASK) >> -+ ISPCSI2_CTX_CTRL3_ALPHA_SHIFT; -+ selected_ctx_u->alpha = false; -+ update_ctx_ctrl3[ctxnum] = false; -+ -+ selected_ctx->data_offset = (isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_OFST(ctxnum)) & -+ ISPCSI2_CTX_DAT_OFST_OFST_MASK) >> -+ ISPCSI2_CTX_DAT_OFST_OFST_SHIFT; -+ selected_ctx_u->data_offset = false; -+ -+ selected_ctx->ping_addr = isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_PING_ADDR(ctxnum)); -+ selected_ctx_u->ping_addr = false; -+ -+ selected_ctx->pong_addr = isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_PONG_ADDR(ctxnum)); -+ selected_ctx_u->pong_addr = false; -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_update_all - Applies all CSI2 context configuration. -+ * @force_update: Flag to force rewrite of registers, even if they haven't been -+ * updated with the isp_csi2_ctx_config_*() functions. -+ * -+ * It only saves settings when they were previously updated using the -+ * isp_csi2_ctx_config_*() functions, unless the force_update flag is -+ * set to true. -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_update_all(bool force_update) -+{ -+ u8 ctxnum; -+ -+ for (ctxnum = 0; ctxnum < 8; ctxnum++) -+ isp_csi2_ctx_update(ctxnum, force_update); -+ -+ return 0; -+} -+ -+/** -+ * isp_csi2_ctx_get_all - Gets all CSI2 Context configurations -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_ctx_get_all(void) -+{ -+ u8 ctxnum; -+ -+ for (ctxnum = 0; ctxnum < 8; ctxnum++) -+ isp_csi2_ctx_get(ctxnum); -+ -+ return 0; -+} -+ -+int isp_csi2_phy_config(struct isp_csi2_phy_cfg *desiredphyconfig) -+{ -+ struct isp_csi2_phy_cfg *currphy = ¤t_csi2_cfg.phy; -+ struct isp_csi2_phy_cfg_update *currphy_u = -+ ¤t_csi2_cfg_update.phy; -+ -+ if ((desiredphyconfig->tclk_term > 0x7f) || -+ (desiredphyconfig->tclk_miss > 0x3)) { -+ printk(KERN_ERR "Invalid PHY configuration sent by the" -+ " driver\n"); -+ return -EINVAL; -+ } -+ -+ if (currphy->ths_term != desiredphyconfig->ths_term) { -+ currphy->ths_term = desiredphyconfig->ths_term; -+ currphy_u->ths_term = true; -+ update_phy_cfg0 = true; -+ } -+ if (currphy->ths_settle != desiredphyconfig->ths_settle) { -+ currphy->ths_settle = desiredphyconfig->ths_settle; -+ currphy_u->ths_settle = true; -+ update_phy_cfg0 = true; -+ } -+ if (currphy->tclk_term != desiredphyconfig->tclk_term) { -+ currphy->tclk_term = desiredphyconfig->tclk_term; -+ currphy_u->tclk_term = true; -+ update_phy_cfg1 = true; -+ } -+ if (currphy->tclk_miss != desiredphyconfig->tclk_miss) { -+ currphy->tclk_miss = desiredphyconfig->tclk_miss; -+ currphy_u->tclk_miss = true; -+ update_phy_cfg1 = true; -+ } -+ if (currphy->tclk_settle != desiredphyconfig->tclk_settle) { -+ currphy->tclk_settle = desiredphyconfig->tclk_settle; -+ currphy_u->tclk_settle = true; -+ update_phy_cfg1 = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_calc_phy_cfg0 - Calculates D-PHY config based on the MIPIClk speed. -+ * @mipiclk: MIPI clock frequency being used with CSI2 sensor. -+ * @lbound_hs_settle: Lower bound for CSI2 High Speed Settle transition. -+ * @ubound_hs_settle: Upper bound for CSI2 High Speed Settle transition. -+ * -+ * From TRM, we have the same calculation for HS Termination signal. -+ * THS_TERM = ceil( 12.5ns / DDRCLK period ) - 1 -+ * But for Settle, we use the mid value between the two passed boundaries from -+ * sensor: -+ * THS_SETTLE = (Upper bound + Lower bound) / 2 -+ * -+ * Always returns 0. -+ */ -+int isp_csi2_calc_phy_cfg0(u32 mipiclk, u32 lbound_hs_settle, -+ u32 ubound_hs_settle) -+{ -+ struct isp_csi2_phy_cfg *currphy = ¤t_csi2_cfg.phy; -+ struct isp_csi2_phy_cfg_update *currphy_u = -+ ¤t_csi2_cfg_update.phy; -+ u32 tmp, ddrclk = mipiclk >> 1; -+ -+ /* Calculate THS_TERM */ -+ tmp = ddrclk / 80000000; -+ if ((ddrclk % 80000000) > 0) -+ tmp++; -+ currphy->ths_term = tmp - 1; -+ currphy_u->ths_term = true; -+ -+ /* Calculate THS_SETTLE */ -+ currphy->ths_settle = (ubound_hs_settle + lbound_hs_settle) / 2; -+ -+ currphy_u->ths_settle = true; -+ isp_csi2_phy_update(true); -+ return 0; -+} -+EXPORT_SYMBOL(isp_csi2_calc_phy_cfg0); -+ -+/** -+ * isp_csi2_phy_update - Applies CSI2 D-PHY configuration. -+ * @force_update: Flag to force rewrite of registers, even if they haven't been -+ * updated with the isp_csi2_phy_config_*() functions. -+ * -+ * It only saves settings when they were previously updated using the -+ * isp_csi2_phy_config_*() functions, unless the force_update flag is -+ * set to true. -+ * Always returns 0. -+ **/ -+int isp_csi2_phy_update(bool force_update) -+{ -+ struct isp_csi2_phy_cfg *currphy = ¤t_csi2_cfg.phy; -+ struct isp_csi2_phy_cfg_update *currphy_u = -+ ¤t_csi2_cfg_update.phy; -+ u32 reg; -+ -+ if (update_phy_cfg0 || force_update) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2PHY, -+ ISPCSI2PHY_CFG0); -+ if (currphy_u->ths_term || force_update) { -+ reg &= ~ISPCSI2PHY_CFG0_THS_TERM_MASK; -+ reg |= (currphy->ths_term << -+ ISPCSI2PHY_CFG0_THS_TERM_SHIFT); -+ currphy_u->ths_term = false; -+ } -+ if (currphy_u->ths_settle || force_update) { -+ reg &= ~ISPCSI2PHY_CFG0_THS_SETTLE_MASK; -+ reg |= (currphy->ths_settle << -+ ISPCSI2PHY_CFG0_THS_SETTLE_SHIFT); -+ currphy_u->ths_settle = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, -+ OMAP3_ISP_IOMEM_CSI2PHY, ISPCSI2PHY_CFG0); -+ update_phy_cfg0 = false; -+ } -+ -+ if (update_phy_cfg1 || force_update) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2PHY, ISPCSI2PHY_CFG1); -+ if (currphy_u->tclk_term || force_update) { -+ reg &= ~ISPCSI2PHY_CFG1_TCLK_TERM_MASK; -+ reg |= (currphy->tclk_term << -+ ISPCSI2PHY_CFG1_TCLK_TERM_SHIFT); -+ currphy_u->tclk_term = false; -+ } -+ if (currphy_u->tclk_miss || force_update) { -+ reg &= ~ISPCSI2PHY_CFG1_TCLK_MISS_MASK; -+ reg |= (currphy->tclk_miss << -+ ISPCSI2PHY_CFG1_TCLK_MISS_SHIFT); -+ currphy_u->tclk_miss = false; -+ } -+ if (currphy_u->tclk_settle || force_update) { -+ reg &= ~ISPCSI2PHY_CFG1_TCLK_SETTLE_MASK; -+ reg |= (currphy->tclk_settle << -+ ISPCSI2PHY_CFG1_TCLK_SETTLE_SHIFT); -+ currphy_u->tclk_settle = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, -+ OMAP3_ISP_IOMEM_CSI2PHY, ISPCSI2PHY_CFG1); -+ update_phy_cfg1 = false; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_phy_get - Gets CSI2 D-PHY configuration -+ * -+ * Gets settings from HW registers and fills in the internal driver memory -+ * Always returns 0. -+ **/ -+int isp_csi2_phy_get(void) -+{ -+ struct isp_csi2_phy_cfg *currphy = ¤t_csi2_cfg.phy; -+ struct isp_csi2_phy_cfg_update *currphy_u = -+ ¤t_csi2_cfg_update.phy; -+ u32 reg; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2PHY, -+ ISPCSI2PHY_CFG0); -+ currphy->ths_term = (reg & ISPCSI2PHY_CFG0_THS_TERM_MASK) >> -+ ISPCSI2PHY_CFG0_THS_TERM_SHIFT; -+ currphy_u->ths_term = false; -+ -+ currphy->ths_settle = (reg & ISPCSI2PHY_CFG0_THS_SETTLE_MASK) >> -+ ISPCSI2PHY_CFG0_THS_SETTLE_SHIFT; -+ currphy_u->ths_settle = false; -+ update_phy_cfg0 = false; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2PHY, -+ ISPCSI2PHY_CFG1); -+ -+ currphy->tclk_term = (reg & ISPCSI2PHY_CFG1_TCLK_TERM_MASK) >> -+ ISPCSI2PHY_CFG1_TCLK_TERM_SHIFT; -+ currphy_u->tclk_term = false; -+ -+ currphy->tclk_miss = (reg & ISPCSI2PHY_CFG1_TCLK_MISS_MASK) >> -+ ISPCSI2PHY_CFG1_TCLK_MISS_SHIFT; -+ currphy_u->tclk_miss = false; -+ -+ currphy->tclk_settle = (reg & ISPCSI2PHY_CFG1_TCLK_SETTLE_MASK) >> -+ ISPCSI2PHY_CFG1_TCLK_SETTLE_SHIFT; -+ currphy_u->tclk_settle = false; -+ -+ update_phy_cfg1 = false; -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_config_forcerxmode - Sets Force Rx mode on stop state count -+ * @force_rx_mode: Boolean to enable/disable forcing Rx mode in CSI2 receiver -+ * -+ * Returns 0 if successful, or -EINVAL if wrong ComplexIO number is selected. -+ **/ -+int isp_csi2_timings_config_forcerxmode(u8 io, bool force_rx_mode) -+{ -+ struct isp_csi2_timings_cfg *currtimings; -+ struct isp_csi2_timings_cfg_update *currtimings_u; -+ -+ if (io < 1 || io > 2) { -+ printk(KERN_ERR "CSI2 - Timings config: Invalid IO number\n"); -+ return -EINVAL; -+ } -+ -+ currtimings = ¤t_csi2_cfg.timings[io - 1]; -+ currtimings_u = ¤t_csi2_cfg_update.timings[io - 1]; -+ if (currtimings->force_rx_mode != force_rx_mode) { -+ currtimings->force_rx_mode = force_rx_mode; -+ currtimings_u->force_rx_mode = true; -+ update_timing = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_config_stopstate_16x - Sets 16x factor for L3 cycles -+ * @stop_state_16x: Boolean to use or not use the 16x multiplier for stop count -+ * -+ * Returns 0 if successful, or -EINVAL if wrong ComplexIO number is selected. -+ **/ -+int isp_csi2_timings_config_stopstate_16x(u8 io, bool stop_state_16x) -+{ -+ struct isp_csi2_timings_cfg *currtimings; -+ struct isp_csi2_timings_cfg_update *currtimings_u; -+ -+ if (io < 1 || io > 2) { -+ printk(KERN_ERR "CSI2 - Timings config: Invalid IO number\n"); -+ return -EINVAL; -+ } -+ -+ currtimings = ¤t_csi2_cfg.timings[io - 1]; -+ currtimings_u = ¤t_csi2_cfg_update.timings[io - 1]; -+ if (currtimings->stop_state_16x != stop_state_16x) { -+ currtimings->stop_state_16x = stop_state_16x; -+ currtimings_u->stop_state_16x = true; -+ update_timing = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_config_stopstate_4x - Sets 4x factor for L3 cycles -+ * @stop_state_4x: Boolean to use or not use the 4x multiplier for stop count -+ * -+ * Returns 0 if successful, or -EINVAL if wrong ComplexIO number is selected. -+ **/ -+int isp_csi2_timings_config_stopstate_4x(u8 io, bool stop_state_4x) -+{ -+ struct isp_csi2_timings_cfg *currtimings; -+ struct isp_csi2_timings_cfg_update *currtimings_u; -+ -+ if (io < 1 || io > 2) { -+ printk(KERN_ERR "CSI2 - Timings config: Invalid IO number\n"); -+ return -EINVAL; -+ } -+ -+ currtimings = ¤t_csi2_cfg.timings[io - 1]; -+ currtimings_u = ¤t_csi2_cfg_update.timings[io - 1]; -+ if (currtimings->stop_state_4x != stop_state_4x) { -+ currtimings->stop_state_4x = stop_state_4x; -+ currtimings_u->stop_state_4x = true; -+ update_timing = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_config_stopstate_cnt - Sets L3 cycles -+ * @stop_state_counter: Stop state counter value for L3 cycles -+ * -+ * Returns 0 if successful, or -EINVAL if wrong ComplexIO number is selected. -+ **/ -+int isp_csi2_timings_config_stopstate_cnt(u8 io, u16 stop_state_counter) -+{ -+ struct isp_csi2_timings_cfg *currtimings; -+ struct isp_csi2_timings_cfg_update *currtimings_u; -+ -+ if (io < 1 || io > 2) { -+ printk(KERN_ERR "CSI2 - Timings config: Invalid IO number\n"); -+ return -EINVAL; -+ } -+ -+ currtimings = ¤t_csi2_cfg.timings[io - 1]; -+ currtimings_u = ¤t_csi2_cfg_update.timings[io - 1]; -+ if (currtimings->stop_state_counter != stop_state_counter) { -+ currtimings->stop_state_counter = (stop_state_counter & 0x1FFF); -+ currtimings_u->stop_state_counter = true; -+ update_timing = true; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_update - Applies specified CSI2 timing configuration. -+ * @io: IO number (1 or 2) which specifies which ComplexIO are we updating -+ * @force_update: Flag to force rewrite of registers, even if they haven't been -+ * updated with the isp_csi2_timings_config_*() functions. -+ * -+ * It only saves settings when they were previously updated using the -+ * isp_csi2_timings_config_*() functions, unless the force_update flag is -+ * set to true. -+ * Returns 0 if successful, or -EINVAL if invalid IO number is passed. -+ **/ -+int isp_csi2_timings_update(u8 io, bool force_update) -+{ -+ struct isp_csi2_timings_cfg *currtimings; -+ struct isp_csi2_timings_cfg_update *currtimings_u; -+ u32 reg; -+ -+ if (io < 1 || io > 2) { -+ printk(KERN_ERR "CSI2 - Timings config: Invalid IO number\n"); -+ return -EINVAL; -+ } -+ -+ currtimings = ¤t_csi2_cfg.timings[io - 1]; -+ currtimings_u = ¤t_csi2_cfg_update.timings[io - 1]; -+ -+ if (update_timing || force_update) { -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_TIMING); -+ if (currtimings_u->force_rx_mode || force_update) { -+ reg &= ~ISPCSI2_TIMING_FORCE_RX_MODE_IO_MASK(io); -+ if (currtimings->force_rx_mode) -+ reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO_ENABLE -+ (io); -+ else -+ reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO_DISABLE -+ (io); -+ currtimings_u->force_rx_mode = false; -+ } -+ if (currtimings_u->stop_state_16x || force_update) { -+ reg &= ~ISPCSI2_TIMING_STOP_STATE_X16_IO_MASK(io); -+ if (currtimings->stop_state_16x) -+ reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO_ENABLE -+ (io); -+ else -+ reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO_DISABLE -+ (io); -+ currtimings_u->stop_state_16x = false; -+ } -+ if (currtimings_u->stop_state_4x || force_update) { -+ reg &= ~ISPCSI2_TIMING_STOP_STATE_X4_IO_MASK(io); -+ if (currtimings->stop_state_4x) { -+ reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO_ENABLE -+ (io); -+ } else { -+ reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO_DISABLE -+ (io); -+ } -+ currtimings_u->stop_state_4x = false; -+ } -+ if (currtimings_u->stop_state_counter || force_update) { -+ reg &= ~ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(io); -+ reg |= currtimings->stop_state_counter << -+ ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(io); -+ currtimings_u->stop_state_counter = false; -+ } -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_TIMING); -+ update_timing = false; -+ } -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_get - Gets specific CSI2 ComplexIO timing configuration -+ * @io: IO number (1 or 2) which specifies which ComplexIO are we getting -+ * -+ * Gets settings from HW registers and fills in the internal driver memory -+ * Returns 0 if successful, or -EINVAL if invalid IO number is passed. -+ **/ -+int isp_csi2_timings_get(u8 io) -+{ -+ struct isp_csi2_timings_cfg *currtimings; -+ struct isp_csi2_timings_cfg_update *currtimings_u; -+ u32 reg; -+ -+ if (io < 1 || io > 2) { -+ printk(KERN_ERR "CSI2 - Timings config: Invalid IO number\n"); -+ return -EINVAL; -+ } -+ -+ currtimings = ¤t_csi2_cfg.timings[io - 1]; -+ currtimings_u = ¤t_csi2_cfg_update.timings[io - 1]; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_TIMING); -+ if ((reg & ISPCSI2_TIMING_FORCE_RX_MODE_IO_MASK(io)) == -+ ISPCSI2_TIMING_FORCE_RX_MODE_IO_ENABLE(io)) -+ currtimings->force_rx_mode = true; -+ else -+ currtimings->force_rx_mode = false; -+ currtimings_u->force_rx_mode = false; -+ -+ if ((reg & ISPCSI2_TIMING_STOP_STATE_X16_IO_MASK(io)) == -+ ISPCSI2_TIMING_STOP_STATE_X16_IO_ENABLE(io)) -+ currtimings->stop_state_16x = true; -+ else -+ currtimings->stop_state_16x = false; -+ currtimings_u->stop_state_16x = false; -+ -+ if ((reg & ISPCSI2_TIMING_STOP_STATE_X4_IO_MASK(io)) == -+ ISPCSI2_TIMING_STOP_STATE_X4_IO_ENABLE(io)) -+ currtimings->stop_state_4x = true; -+ else -+ currtimings->stop_state_4x = false; -+ currtimings_u->stop_state_4x = false; -+ -+ currtimings->stop_state_counter = (reg & -+ ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(io)) >> -+ ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(io); -+ currtimings_u->stop_state_counter = false; -+ update_timing = false; -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_update_all - Applies specified CSI2 timing configuration. -+ * @force_update: Flag to force rewrite of registers, even if they haven't been -+ * updated with the isp_csi2_timings_config_*() functions. -+ * -+ * It only saves settings when they were previously updated using the -+ * isp_csi2_timings_config_*() functions, unless the force_update flag is -+ * set to true. -+ * Always returns 0. -+ **/ -+int isp_csi2_timings_update_all(bool force_update) -+{ -+ int i; -+ -+ for (i = 1; i < 3; i++) -+ isp_csi2_timings_update(i, force_update); -+ return 0; -+} -+ -+/** -+ * isp_csi2_timings_get_all - Gets all CSI2 ComplexIO timing configurations -+ * -+ * Always returns 0. -+ **/ -+int isp_csi2_timings_get_all(void) -+{ -+ int i; -+ -+ for (i = 1; i < 3; i++) -+ isp_csi2_timings_get(i); -+ return 0; -+} -+ -+/** -+ * isp_csi2_isr - CSI2 interrupt handling. -+ * -+ * Return -EIO on Transmission error -+ **/ -+int isp_csi2_isr(void) -+{ -+ int retval = 0; -+ u32 csi2_irqstatus, cpxio1_irqstatus, ctxirqstatus; -+ -+ csi2_irqstatus = isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_IRQSTATUS); -+ isp_reg_writel(current_csi2_cfg.dev, csi2_irqstatus, -+ OMAP3_ISP_IOMEM_CSI2A, ISPCSI2_IRQSTATUS); -+ -+ /* Failure Cases */ -+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ) { -+ cpxio1_irqstatus = isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO1_IRQSTATUS); -+ isp_reg_writel(current_csi2_cfg.dev, cpxio1_irqstatus, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO1_IRQSTATUS); -+ dev_dbg(current_csi2_cfg.dev, "CSI2: ComplexIO Error IRQ %x\n", -+ cpxio1_irqstatus); -+ retval = -EIO; -+ } -+ -+ if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ | -+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ | -+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ | -+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ | -+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) { -+ dev_dbg(current_csi2_cfg.dev, "CSI2 Err:" -+ " OCP:%d," -+ " Short_pack:%d," -+ " ECC:%d," -+ " CPXIO2:%d," -+ " FIFO_OVF:%d," -+ "\n", -+ (csi2_irqstatus & -+ ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0, -+ (csi2_irqstatus & -+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ) ? 1 : 0, -+ (csi2_irqstatus & -+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ) ? 1 : 0, -+ (csi2_irqstatus & -+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0, -+ (csi2_irqstatus & -+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0); -+ retval = -EIO; -+ } -+ -+ /* Successful cases */ -+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0)) { -+ ctxirqstatus = isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_IRQSTATUS(0)); -+ isp_reg_writel(current_csi2_cfg.dev, ctxirqstatus, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_IRQSTATUS(0)); -+ } -+ -+ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ) -+ dev_dbg(current_csi2_cfg.dev, "CSI2: ECC correction done\n"); -+ -+ return retval; -+} -+EXPORT_SYMBOL(isp_csi2_isr); -+ -+/** -+ * isp_csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs. -+ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts -+ **/ -+void isp_csi2_irq_complexio1_set(int enable) -+{ -+ u32 reg; -+ reg = ISPCSI2_COMPLEXIO1_IRQENABLE_STATEALLULPMEXIT | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_STATEALLULPMENTER | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM5 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL5 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC5 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS5 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS5 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM4 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL4 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC4 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS4 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS4 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM3 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL3 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC3 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS3 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS3 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM2 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL2 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC2 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS2 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS2 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM1 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL1 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC1 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS1 | -+ ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS1; -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO1_IRQSTATUS); -+ if (enable) { -+ reg |= isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO1_IRQENABLE); -+ } else -+ reg = 0; -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO1_IRQENABLE); -+} -+EXPORT_SYMBOL(isp_csi2_irq_complexio1_set); -+ -+/** -+ * isp_csi2_irq_ctx_set - Enables CSI2 Context IRQs. -+ * @enable: Enable/disable CSI2 Context interrupts -+ **/ -+void isp_csi2_irq_ctx_set(int enable) -+{ -+ u32 reg; -+ int i; -+ -+ reg = ISPCSI2_CTX_IRQSTATUS_FS_IRQ | ISPCSI2_CTX_IRQSTATUS_FE_IRQ; -+ for (i = 0; i < 8; i++) { -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_IRQSTATUS(i)); -+ if (enable) { -+ isp_reg_or(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_IRQENABLE(i), reg); -+ } else { -+ isp_reg_writel(current_csi2_cfg.dev, 0, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_IRQENABLE(i)); -+ } -+ } -+ -+} -+EXPORT_SYMBOL(isp_csi2_irq_ctx_set); -+ -+/** -+ * isp_csi2_irq_status_set - Enables CSI2 Status IRQs. -+ * @enable: Enable/disable CSI2 Status interrupts -+ **/ -+void isp_csi2_irq_status_set(int enable) -+{ -+ u32 reg; -+ reg = ISPCSI2_IRQSTATUS_OCP_ERR_IRQ | -+ ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ | -+ ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ | -+ ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ | -+ ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ | -+ ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ | -+ ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ | -+ ISPCSI2_IRQSTATUS_CONTEXT(0); -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_IRQSTATUS); -+ if (enable) -+ reg |= isp_reg_readl(current_csi2_cfg.dev, -+ OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_IRQENABLE); -+ else -+ reg = 0; -+ -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_IRQENABLE); -+} -+EXPORT_SYMBOL(isp_csi2_irq_status_set); -+ -+/** -+ * isp_csi2_irq_all_set - Enable/disable CSI2 interrupts. -+ * @enable: 0-Disable, 1-Enable. -+ **/ -+void isp_csi2_irq_all_set(int enable) -+{ -+ if (enable) { -+ isp_csi2_irq_complexio1_set(enable); -+ isp_csi2_irq_ctx_set(enable); -+ isp_csi2_irq_status_set(enable); -+ } else { -+ isp_csi2_irq_status_set(enable); -+ isp_csi2_irq_ctx_set(enable); -+ isp_csi2_irq_complexio1_set(enable); -+ } -+ return; -+} -+EXPORT_SYMBOL(isp_csi2_irq_all_set); -+ -+/** -+ * isp_csi2_reset - Resets the CSI2 module. -+ * -+ * Returns 0 if successful, or -EBUSY if power command didn't respond. -+ **/ -+int isp_csi2_reset(void) -+{ -+ u32 reg; -+ u8 soft_reset_retries = 0; -+ int i; -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_SYSCONFIG); -+ reg |= ISPCSI2_SYSCONFIG_SOFT_RESET_RESET; -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_SYSCONFIG); -+ -+ do { -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_SYSSTATUS) & -+ ISPCSI2_SYSSTATUS_RESET_DONE_MASK; -+ if (reg == ISPCSI2_SYSSTATUS_RESET_DONE_DONE) -+ break; -+ soft_reset_retries++; -+ if (soft_reset_retries < 5) -+ udelay(100); -+ } while (soft_reset_retries < 5); -+ -+ if (soft_reset_retries == 5) { -+ printk(KERN_ERR "CSI2: Soft reset try count exceeded!\n"); -+ return -EBUSY; -+ } -+ -+ reg = isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_SYSCONFIG); -+ reg &= ~ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK; -+ reg |= ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO; -+ reg &= ~ISPCSI2_SYSCONFIG_AUTO_IDLE_MASK; -+ isp_reg_writel(current_csi2_cfg.dev, reg, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_SYSCONFIG); -+ -+ uses_videoport = false; -+ update_complexio_cfg1 = false; -+ update_phy_cfg0 = false; -+ update_phy_cfg1 = false; -+ for (i = 0; i < 8; i++) { -+ update_ctx_ctrl1[i] = false; -+ update_ctx_ctrl2[i] = false; -+ update_ctx_ctrl3[i] = false; -+ } -+ update_timing = false; -+ update_ctrl = false; -+ -+ isp_csi2_complexio_lanes_get(); -+ isp_csi2_ctrl_get(); -+ isp_csi2_ctx_get_all(); -+ isp_csi2_phy_get(); -+ isp_csi2_timings_get_all(); -+ -+ isp_csi2_complexio_power_autoswitch(true); -+ isp_csi2_complexio_power(ISP_CSI2_POWER_ON); -+ -+ isp_csi2_timings_config_forcerxmode(1, true); -+ isp_csi2_timings_config_stopstate_cnt(1, 0x1FF); -+ isp_csi2_timings_update_all(true); -+ -+ return 0; -+} -+ -+/** -+ * isp_csi2_enable - Enables the CSI2 module. -+ * @enable: Enables/disables the CSI2 module. -+ **/ -+void isp_csi2_enable(int enable) -+{ -+ if (enable) { -+ isp_csi2_ctx_config_enabled(0, true); -+ isp_csi2_ctx_config_eof_enabled(0, true); -+ isp_csi2_ctx_config_checksum_enabled(0, true); -+ isp_csi2_ctx_update(0, false); -+ -+ isp_csi2_ctrl_config_ecc_enable(true); -+ isp_csi2_ctrl_config_if_enable(true); -+ isp_csi2_ctrl_update(false); -+ } else { -+ isp_csi2_ctx_config_enabled(0, false); -+ isp_csi2_ctx_config_eof_enabled(0, false); -+ isp_csi2_ctx_config_checksum_enabled(0, false); -+ isp_csi2_ctx_update(0, false); -+ -+ isp_csi2_ctrl_config_ecc_enable(false); -+ isp_csi2_ctrl_config_if_enable(false); -+ isp_csi2_ctrl_update(false); -+ } -+} -+EXPORT_SYMBOL(isp_csi2_enable); -+ -+/** -+ * isp_csi2_regdump - Prints CSI2 debug information. -+ **/ -+void isp_csi2_regdump(void) -+{ -+ printk(KERN_DEBUG "-------------Register dump-------------\n"); -+ -+ printk(KERN_DEBUG "ISP_CTRL: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL)); -+ printk(KERN_DEBUG "ISP_TCTRL_CTRL: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_TCTRL_CTRL)); -+ printk(KERN_DEBUG "ISPCCDC_SDR_ADDR: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SDR_ADDR)); -+ printk(KERN_DEBUG "ISPCCDC_SYN_MODE: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_SYN_MODE)); -+ printk(KERN_DEBUG "ISPCCDC_CFG: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_CFG)); -+ printk(KERN_DEBUG "ISPCCDC_FMTCFG: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_FMTCFG)); -+ printk(KERN_DEBUG "ISPCCDC_HSIZE_OFF: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_HSIZE_OFF)); -+ printk(KERN_DEBUG "ISPCCDC_HORZ_INFO: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_HORZ_INFO)); -+ printk(KERN_DEBUG "ISPCCDC_VERT_START: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VERT_START)); -+ printk(KERN_DEBUG "ISPCCDC_VERT_LINES: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CCDC, -+ ISPCCDC_VERT_LINES)); -+ -+ printk(KERN_DEBUG "ISPCSI2_COMPLEXIO_CFG1: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_COMPLEXIO_CFG1)); -+ printk(KERN_DEBUG "ISPCSI2_SYSSTATUS: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_SYSSTATUS)); -+ printk(KERN_DEBUG "ISPCSI2_SYSCONFIG: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_SYSCONFIG)); -+ printk(KERN_DEBUG "ISPCSI2_IRQENABLE: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_IRQENABLE)); -+ printk(KERN_DEBUG "ISPCSI2_IRQSTATUS: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_IRQSTATUS)); -+ -+ printk(KERN_DEBUG "ISPCSI2_CTX_IRQENABLE(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_IRQENABLE(0))); -+ printk(KERN_DEBUG "ISPCSI2_CTX_IRQSTATUS(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_IRQSTATUS(0))); -+ printk(KERN_DEBUG "ISPCSI2_TIMING: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_TIMING)); -+ printk(KERN_DEBUG "ISPCSI2PHY_CFG0: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2PHY, -+ ISPCSI2PHY_CFG0)); -+ printk(KERN_DEBUG "ISPCSI2PHY_CFG1: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2PHY, -+ ISPCSI2PHY_CFG1)); -+ printk(KERN_DEBUG "ISPCSI2_CTX_CTRL1(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL1(0))); -+ printk(KERN_DEBUG "ISPCSI2_CTX_CTRL2(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL2(0))); -+ printk(KERN_DEBUG "ISPCSI2_CTX_CTRL3(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_CTRL3(0))); -+ printk(KERN_DEBUG "ISPCSI2_CTX_DAT_OFST(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_OFST(0))); -+ printk(KERN_DEBUG "ISPCSI2_CTX_DAT_PING_ADDR(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_PING_ADDR(0))); -+ printk(KERN_DEBUG "ISPCSI2_CTX_DAT_PONG_ADDR(0): %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTX_DAT_PONG_ADDR(0))); -+ printk(KERN_DEBUG "ISPCSI2_CTRL: %x\n", -+ isp_reg_readl(current_csi2_cfg.dev, OMAP3_ISP_IOMEM_CSI2A, -+ ISPCSI2_CTRL)); -+ printk(KERN_DEBUG "---------------------------------------\n"); -+} -+ -+/** -+ * isp_csi2_cleanup - Routine for module driver cleanup -+ **/ -+void isp_csi2_cleanup(struct device *dev) -+{ -+ return; -+} -+ -+/** -+ * isp_csi2_init - Routine for module driver init -+ **/ -+int __init isp_csi2_init(struct device *dev) -+{ -+ int i; -+ -+ update_complexio_cfg1 = false; -+ update_phy_cfg0 = false; -+ update_phy_cfg1 = false; -+ for (i = 0; i < 8; i++) { -+ update_ctx_ctrl1[i] = false; -+ update_ctx_ctrl2[i] = false; -+ update_ctx_ctrl3[i] = false; -+ } -+ update_timing = false; -+ update_ctrl = false; -+ -+ memset(¤t_csi2_cfg, 0, sizeof(current_csi2_cfg)); -+ memset(¤t_csi2_cfg_update, 0, sizeof(current_csi2_cfg_update)); -+ current_csi2_cfg.dev = dev; -+ return 0; -+} -+ -+MODULE_AUTHOR("Texas Instruments"); -+MODULE_DESCRIPTION("ISP CSI2 Receiver Module"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispcsi2.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispcsi2.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispcsi2.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispcsi2.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,232 @@ -+/* -+ * ispcsi2.h -+ * -+ * Copyright (C) 2009 Texas Instruments. -+ * -+ * Contributors: -+ * Sergio Aguirre -+ * Dominic Curran -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef OMAP_ISP_CSI2_API_H -+#define OMAP_ISP_CSI2_API_H -+#include -+ -+enum isp_csi2_irqevents { -+ OCP_ERR_IRQ = 0x4000, -+ SHORT_PACKET_IRQ = 0x2000, -+ ECC_CORRECTION_IRQ = 0x1000, -+ ECC_NO_CORRECTION_IRQ = 0x800, -+ COMPLEXIO2_ERR_IRQ = 0x400, -+ COMPLEXIO1_ERR_IRQ = 0x200, -+ FIFO_OVF_IRQ = 0x100, -+ CONTEXT7 = 0x80, -+ CONTEXT6 = 0x40, -+ CONTEXT5 = 0x20, -+ CONTEXT4 = 0x10, -+ CONTEXT3 = 0x8, -+ CONTEXT2 = 0x4, -+ CONTEXT1 = 0x2, -+ CONTEXT0 = 0x1, -+}; -+ -+enum isp_csi2_ctx_irqevents { -+ CTX_ECC_CORRECTION = 0x100, -+ CTX_LINE_NUMBER = 0x80, -+ CTX_FRAME_NUMBER = 0x40, -+ CTX_CS = 0x20, -+ CTX_LE = 0x8, -+ CTX_LS = 0x4, -+ CTX_FE = 0x2, -+ CTX_FS = 0x1, -+}; -+ -+enum isp_csi2_power_cmds { -+ ISP_CSI2_POWER_OFF, -+ ISP_CSI2_POWER_ON, -+ ISP_CSI2_POWER_ULPW, -+}; -+ -+enum isp_csi2_frame_mode { -+ ISP_CSI2_FRAME_IMMEDIATE, -+ ISP_CSI2_FRAME_AFTERFEC, -+}; -+ -+struct csi2_lanecfg { -+ u8 pos; -+ u8 pol; -+}; -+ -+struct isp_csi2_lanes_cfg { -+ struct csi2_lanecfg data[4]; -+ struct csi2_lanecfg clk; -+}; -+ -+struct isp_csi2_lanes_cfg_update { -+ bool data[4]; -+ bool clk; -+}; -+ -+struct isp_csi2_phy_cfg { -+ u8 ths_term; -+ u8 ths_settle; -+ u8 tclk_term; -+ unsigned tclk_miss:1; -+ u8 tclk_settle; -+}; -+ -+struct isp_csi2_phy_cfg_update { -+ bool ths_term; -+ bool ths_settle; -+ bool tclk_term; -+ bool tclk_miss; -+ bool tclk_settle; -+}; -+ -+struct isp_csi2_ctx_cfg { -+ u8 virtual_id; -+ u8 frame_count; -+ struct v4l2_pix_format format; -+ u16 alpha; -+ u16 data_offset; -+ u32 ping_addr; -+ u32 pong_addr; -+ bool eof_enabled; -+ bool eol_enabled; -+ bool checksum_enabled; -+ bool enabled; -+}; -+ -+struct isp_csi2_ctx_cfg_update { -+ bool virtual_id; -+ bool frame_count; -+ bool format; -+ bool alpha; -+ bool data_offset; -+ bool ping_addr; -+ bool pong_addr; -+ bool eof_enabled; -+ bool eol_enabled; -+ bool checksum_enabled; -+ bool enabled; -+}; -+ -+struct isp_csi2_timings_cfg { -+ bool force_rx_mode; -+ bool stop_state_16x; -+ bool stop_state_4x; -+ u16 stop_state_counter; -+}; -+ -+struct isp_csi2_timings_cfg_update { -+ bool force_rx_mode; -+ bool stop_state_16x; -+ bool stop_state_4x; -+ bool stop_state_counter; -+}; -+ -+struct isp_csi2_ctrl_cfg { -+ bool vp_clk_enable; -+ bool vp_only_enable; -+ u8 vp_out_ctrl; -+ bool debug_enable; -+ u8 burst_size; -+ enum isp_csi2_frame_mode frame_mode; -+ bool ecc_enable; -+ bool secure_mode; -+ bool if_enable; -+}; -+ -+struct isp_csi2_ctrl_cfg_update { -+ bool vp_clk_enable; -+ bool vp_only_enable; -+ bool vp_out_ctrl; -+ bool debug_enable; -+ bool burst_size; -+ bool frame_mode; -+ bool ecc_enable; -+ bool secure_mode; -+ bool if_enable; -+}; -+ -+struct isp_csi2_cfg { -+ struct isp_csi2_lanes_cfg lanes; -+ struct isp_csi2_phy_cfg phy; -+ struct isp_csi2_ctx_cfg contexts[8]; -+ struct isp_csi2_timings_cfg timings[2]; -+ struct isp_csi2_ctrl_cfg ctrl; -+ struct device *dev; -+}; -+ -+struct isp_csi2_cfg_update { -+ struct isp_csi2_lanes_cfg_update lanes; -+ struct isp_csi2_phy_cfg_update phy; -+ struct isp_csi2_ctx_cfg_update contexts[8]; -+ struct isp_csi2_timings_cfg_update timings[2]; -+ struct isp_csi2_ctrl_cfg_update ctrl; -+}; -+ -+int isp_csi2_complexio_lanes_config(struct isp_csi2_lanes_cfg *reqcfg); -+int isp_csi2_complexio_lanes_update(bool force_update); -+int isp_csi2_complexio_lanes_get(void); -+int isp_csi2_complexio_power_autoswitch(bool enable); -+int isp_csi2_complexio_power(enum isp_csi2_power_cmds power_cmd); -+int isp_csi2_ctrl_config_frame_mode(enum isp_csi2_frame_mode frame_mode); -+int isp_csi2_ctrl_config_vp_clk_enable(bool vp_clk_enable); -+int isp_csi2_ctrl_config_vp_only_enable(bool vp_only_enable); -+int isp_csi2_ctrl_config_debug_enable(bool debug_enable); -+int isp_csi2_ctrl_config_burst_size(u8 burst_size); -+int isp_csi2_ctrl_config_ecc_enable(bool ecc_enable); -+int isp_csi2_ctrl_config_secure_mode(bool secure_mode); -+int isp_csi2_ctrl_config_if_enable(bool if_enable); -+int isp_csi2_ctrl_config_vp_out_ctrl(u8 vp_out_ctrl); -+int isp_csi2_ctrl_update(bool force_update); -+int isp_csi2_ctrl_get(void); -+int isp_csi2_ctx_config_virtual_id(u8 ctxnum, u8 virtual_id); -+int isp_csi2_ctx_config_frame_count(u8 ctxnum, u8 frame_count); -+int isp_csi2_ctx_config_format(u8 ctxnum, u32 pixformat); -+int isp_csi2_ctx_config_alpha(u8 ctxnum, u16 alpha); -+int isp_csi2_ctx_config_data_offset(u8 ctxnum, u16 data_offset); -+int isp_csi2_ctx_config_ping_addr(u8 ctxnum, u32 ping_addr); -+int isp_csi2_ctx_config_pong_addr(u8 ctxnum, u32 pong_addr); -+int isp_csi2_ctx_config_eof_enabled(u8 ctxnum, bool eof_enabled); -+int isp_csi2_ctx_config_eol_enabled(u8 ctxnum, bool eol_enabled); -+int isp_csi2_ctx_config_checksum_enabled(u8 ctxnum, bool checksum_enabled); -+int isp_csi2_ctx_config_enabled(u8 ctxnum, bool enabled); -+int isp_csi2_ctx_update(u8 ctxnum, bool force_update); -+int isp_csi2_ctx_get(u8 ctxnum); -+int isp_csi2_ctx_update_all(bool force_update); -+int isp_csi2_ctx_get_all(void); -+int isp_csi2_phy_config(struct isp_csi2_phy_cfg *desiredphyconfig); -+int isp_csi2_calc_phy_cfg0(u32 mipiclk, u32 lbound_hs_settle, -+ u32 ubound_hs_settle); -+int isp_csi2_phy_update(bool force_update); -+int isp_csi2_phy_get(void); -+int isp_csi2_timings_config_forcerxmode(u8 io, bool force_rx_mode); -+int isp_csi2_timings_config_stopstate_16x(u8 io, bool stop_state_16x); -+int isp_csi2_timings_config_stopstate_4x(u8 io, bool stop_state_4x); -+int isp_csi2_timings_config_stopstate_cnt(u8 io, u16 stop_state_counter); -+int isp_csi2_timings_update(u8 io, bool force_update); -+int isp_csi2_timings_get(u8 io); -+int isp_csi2_timings_update_all(bool force_update); -+int isp_csi2_timings_get_all(void); -+void isp_csi2_irq_complexio1_set(int enable); -+void isp_csi2_irq_ctx_set(int enable); -+void isp_csi2_irq_status_set(int enable); -+void isp_csi2_irq_all_set(int enable); -+ -+int isp_csi2_isr(void); -+int isp_csi2_reset(void); -+void isp_csi2_enable(int enable); -+void isp_csi2_regdump(void); -+ -+#endif /* OMAP_ISP_CSI2_H */ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isp.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isp.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isp.h 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,500 @@ -+/* -+ * isp.h -+ * -+ * Top level public header file for ISP Control module in -+ * TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments. -+ * Copyright (C) 2009 Nokia. -+ * -+ * Contributors: -+ * Sameer Venkatraman -+ * Mohit Jalori -+ * Sergio Aguirre -+ * Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef OMAP_ISP_TOP_H -+#define OMAP_ISP_TOP_H -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+ -+struct isp_pipeline; -+ -+#include "ispstat.h" -+#include "isp_af.h" -+#include "isphist.h" -+#include "ispccdc.h" -+#include "ispreg.h" -+#include "isph3a.h" -+#include "ispresizer.h" -+#include "isppreview.h" -+ -+#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8) -+ -+#define OMAP_ISP_CCDC (1 << 0) -+#define OMAP_ISP_PREVIEW (1 << 1) -+#define OMAP_ISP_RESIZER (1 << 2) -+#define OMAP_ISP_AEWB (1 << 3) -+#define OMAP_ISP_AF (1 << 4) -+#define OMAP_ISP_HIST (1 << 5) -+ -+#define ISP_TOK_TERM 0xFFFFFFFF /* -+ * terminating token for ISP -+ * modules reg list -+ */ -+#define NUM_BUFS VIDEO_MAX_FRAME -+ -+#ifndef CONFIG_ARCH_OMAP3410 -+#define USE_ISP_PREVIEW -+#define USE_ISP_RESZ -+#define is_isppreview_enabled() 1 -+#define is_ispresizer_enabled() 1 -+#else -+#define is_isppreview_enabled() 0 -+#define is_ispresizer_enabled() 0 -+#endif -+ -+#define ISP_BYTES_PER_PIXEL 2 -+#define NUM_ISP_CAPTURE_FORMATS (sizeof(isp_formats) / \ -+ sizeof(isp_formats[0])) -+ -+#define to_isp_device(ptr_module) \ -+ container_of(ptr_module, struct isp_device, ptr_module) -+#define to_device(ptr_module) \ -+ (to_isp_device(ptr_module)->dev) -+ -+typedef int (*isp_vbq_callback_ptr) (struct videobuf_buffer *vb); -+typedef void (*isp_callback_t) (unsigned long status, -+ isp_vbq_callback_ptr arg1, void *arg2); -+ -+enum isp_mem_resources { -+ OMAP3_ISP_IOMEM_MAIN, -+ OMAP3_ISP_IOMEM_CBUFF, -+ OMAP3_ISP_IOMEM_CCP2, -+ OMAP3_ISP_IOMEM_CCDC, -+ OMAP3_ISP_IOMEM_HIST, -+ OMAP3_ISP_IOMEM_H3A, -+ OMAP3_ISP_IOMEM_PREV, -+ OMAP3_ISP_IOMEM_RESZ, -+ OMAP3_ISP_IOMEM_SBL, -+ OMAP3_ISP_IOMEM_CSI2A, -+ OMAP3_ISP_IOMEM_CSI2PHY -+}; -+ -+enum isp_interface_type { -+ ISP_PARLL = 1, -+ ISP_CSIA = 2, -+ ISP_CSIB = 4, -+ ISP_NONE = 8 /* memory input to preview / resizer */ -+}; -+ -+enum isp_irqevents { -+ CSIA = 0x01, -+ CSIB = 0x10, -+ CCDC_VD0 = 0x100, -+ CCDC_VD1 = 0x200, -+ CCDC_VD2 = 0x400, -+ CCDC_ERR = 0x800, -+ H3A_AWB_DONE = 0x2000, -+ H3A_AF_DONE = 0x1000, -+ HIST_DONE = 0x10000, -+ PREV_DONE = 0x100000, -+ LSC_DONE = 0x20000, -+ LSC_PRE_COMP = 0x40000, -+ LSC_PRE_ERR = 0x80000, -+ RESZ_DONE = 0x1000000, -+ SBL_OVF = 0x2000000, -+ MMU_ERR = 0x10000000, -+ OCP_ERR = 0x20000000, -+ HS_VS = 0x80000000 -+}; -+ -+enum isp_callback_type { -+ CBK_CCDC_VD0, -+ CBK_CCDC_VD1, -+ CBK_PREV_DONE, -+ CBK_RESZ_DONE, -+ CBK_MMU_ERR, -+ CBK_HIST_DONE, -+ CBK_HS_VS, -+ CBK_LSC_ISR, -+ CBK_CATCHALL, -+ CBK_CSIA, -+ CBK_CSIB, -+ CBK_END, -+}; -+ -+enum isp_running { -+ ISP_STOPPED, -+ ISP_RUNNING, -+ ISP_STOPPING, -+}; -+ -+/** -+ * struct isp_reg - Structure for ISP register values. -+ * @reg: 32-bit Register address. -+ * @val: 32-bit Register value. -+ */ -+struct isp_reg { -+ enum isp_mem_resources mmio_range; -+ u32 reg; -+ u32 val; -+}; -+ -+/** -+ * struct isp_interface_config - ISP interface configuration. -+ * @ccdc_par_ser: ISP interface type. 0 - Parallel, 1 - CSIA, 2 - CSIB to CCDC. -+ * @par_bridge: CCDC Bridge input control. Parallel interface. -+ * 0 - Disable, 1 - Enable, first byte->cam_d(bits 7 to 0) -+ * 2 - Enable, first byte -> cam_d(bits 15 to 8) -+ * @par_clk_pol: Pixel clock polarity on the parallel interface. -+ * 0 - Non Inverted, 1 - Inverted -+ * @dataline_shift: Data lane shifter. -+ * 0 - No Shift, 1 - CAMEXT[13 to 2]->CAM[11 to 0] -+ * 2 - CAMEXT[13 to 4]->CAM[9 to 0] -+ * 3 - CAMEXT[13 to 6]->CAM[7 to 0] -+ * @hsvs_syncdetect: HS or VS synchronization signal detection. -+ * 0 - HS Falling, 1 - HS rising -+ * 2 - VS falling, 3 - VS rising -+ * @strobe: Strobe related parameter. -+ * @prestrobe: PreStrobe related parameter. -+ * @shutter: Shutter related parameter. -+ * @hskip: Horizontal Start Pixel performed in Preview module. -+ * @vskip: Vertical Start Line performed in Preview module. -+ * @wenlog: Store the value for the sensor specific wenlog field. -+ * @wait_hs_vs: Wait for this many hs_vs before anything else in the beginning. -+ * @pixelclk: Pixel data rate from sensor. -+ */ -+struct isp_interface_config { -+ enum isp_interface_type ccdc_par_ser; -+ u8 dataline_shift; -+ u32 hsvs_syncdetect; -+ int strobe; -+ int prestrobe; -+ int shutter; -+ u32 prev_sph; -+ u32 prev_slv; -+ u32 wenlog; -+ int wait_hs_vs; -+ unsigned int pixelclk; -+ union { -+ struct par { -+ unsigned par_bridge:2; -+ unsigned par_clk_pol:1; -+ } par; -+ struct csi { -+ unsigned crc:1; -+ unsigned mode:1; -+ unsigned edge:1; -+ unsigned signalling:1; -+ unsigned strobe_clock_inv:1; -+ unsigned vs_edge:1; -+ unsigned channel:3; -+ unsigned vpclk:2; /* Video port output clock */ -+ unsigned int data_start; -+ unsigned int data_size; -+ u32 format; /* V4L2_PIX_FMT_* */ -+ } csi; -+ } u; -+}; -+ -+struct isp_buf { -+ dma_addr_t isp_addr; -+ void (*complete)(struct videobuf_buffer *vb, void *priv); -+ struct videobuf_buffer *vb; -+ void *priv; -+ u32 vb_state; -+}; -+ -+#define ISP_BUFS_IS_FULL(bufs) \ -+ (((bufs)->queue + 1) % NUM_BUFS == (bufs)->done) -+#define ISP_BUFS_IS_EMPTY(bufs) ((bufs)->queue == (bufs)->done) -+#define ISP_BUFS_IS_LAST(bufs) \ -+ ((bufs)->queue == ((bufs)->done + 1) % NUM_BUFS) -+#define ISP_BUFS_QUEUED(bufs) \ -+ ((((bufs)->done - (bufs)->queue + NUM_BUFS)) % NUM_BUFS) -+#define ISP_BUF_DONE(bufs) ((bufs)->buf + (bufs)->done) -+#define ISP_BUF_NEXT_DONE(bufs) \ -+ ((bufs)->buf + ((bufs)->done + 1) % NUM_BUFS) -+#define ISP_BUF_QUEUE(bufs) ((bufs)->buf + (bufs)->queue) -+#define ISP_BUF_MARK_DONE(bufs) \ -+ (bufs)->done = ((bufs)->done + 1) % NUM_BUFS; -+#define ISP_BUF_MARK_QUEUED(bufs) \ -+ (bufs)->queue = ((bufs)->queue + 1) % NUM_BUFS; -+ -+struct isp_bufs { -+ dma_addr_t isp_addr_capture[VIDEO_MAX_FRAME]; -+ /* queue full: (ispsg.queue + 1) % NUM_BUFS == ispsg.done -+ queue empty: ispsg.queue == ispsg.done */ -+ struct isp_buf buf[NUM_BUFS]; -+ /* Next slot to queue a buffer. */ -+ int queue; -+ /* Buffer that is being processed. */ -+ int done; -+ /* Wait for this many hs_vs before anything else. */ -+ int wait_hs_vs; -+ /* Ignore statistic's interrupts until first good hs_vs. */ -+ int wait_stats; -+}; -+ -+/** -+ * struct ispirq - Structure for containing callbacks to be called in ISP ISR. -+ * @isp_callbk: Array which stores callback functions, indexed by the type of -+ * callback (8 possible types). -+ * @isp_callbk_arg1: Pointer to array containing pointers to the first argument -+ * to be passed to the requested callback function. -+ * @isp_callbk_arg2: Pointer to array containing pointers to the second -+ * argument to be passed to the requested callback function. -+ * -+ * This structure is used to contain all the callback functions related for -+ * each callback type (CBK_CCDC_VD0, CBK_CCDC_VD1, CBK_PREV_DONE, -+ * CBK_RESZ_DONE, CBK_MMU_ERR, CBK_H3A_AWB_DONE, CBK_HIST_DONE, CBK_HS_VS, -+ * CBK_LSC_ISR). -+ */ -+struct isp_irq { -+ isp_callback_t isp_callbk[CBK_END]; -+ isp_vbq_callback_ptr isp_callbk_arg1[CBK_END]; -+ void *isp_callbk_arg2[CBK_END]; -+}; -+ -+struct isp_pipeline { -+ unsigned int modules; /* modules in use */ -+ struct v4l2_pix_format pix; /* output pix */ -+ unsigned int ccdc_in_w; -+ unsigned int ccdc_in_h; -+ unsigned int ccdc_out_w; /* ccdc output data width (pixels) */ -+ unsigned int ccdc_out_h; /* ccdc output data height */ -+ unsigned int ccdc_out_w_img; /* ccdc output visible image width */ -+ enum ccdc_input ccdc_in; -+ enum ccdc_output ccdc_out; -+ unsigned int prv_out_w; -+ unsigned int prv_out_h; -+ unsigned int prv_out_w_img; -+ unsigned int prv_out_h_img; -+ unsigned int prv_fmt_avg; -+ enum preview_input prv_in; -+ enum preview_output prv_out; -+ struct v4l2_rect rsz_crop; -+ unsigned int rsz_out_w; -+ unsigned int rsz_out_h; -+ unsigned int rsz_out_w_img; -+ enum resizer_input rsz_in; -+}; -+ -+#define RAW_CAPTURE(isp) \ -+ (!((isp)->pipeline.modules & OMAP_ISP_PREVIEW)) -+ -+/** -+ * struct isp - Structure for storing ISP Control module information -+ * @lock: Spinlock to sync between isr and processes. -+ * @isp_mutex: Semaphore used to get access to the ISP. -+ * @ref_count: Reference counter. -+ * @cam_ick: Pointer to ISP Interface clock. -+ * @cam_fck: Pointer to ISP Functional clock. -+ * -+ * This structure is used to store the OMAP ISP Information. -+ */ -+struct isp_device { -+ struct device *dev; -+ struct isp *isp_obj; -+ -+ /*** platform HW resources ***/ -+ unsigned int irq_num; -+ -+#define mmio_base_main mmio_base[OMAP3_ISP_IOMEM_MAIN] -+#define mmio_cbuff_main mmio_base[OMAP3_ISP_IOMEM_CBUFF] -+#define mmio_ccp2_main mmio_base[OMAP3_ISP_IOMEM_CCP2] -+#define mmio_ccdc_main mmio_base[OMAP3_ISP_IOMEM_CCDC] -+#define mmio_hist_main mmio_base[OMAP3_ISP_IOMEM_HIST] -+#define mmio_h3a_main mmio_base[OMAP3_ISP_IOMEM_H3A] -+#define mmio_prev_main mmio_base[OMAP3_ISP_IOMEM_PREV] -+#define mmio_resz_main mmio_base[OMAP3_ISP_IOMEM_RESZ] -+#define mmio_sbl_main mmio_base[OMAP3_ISP_IOMEM_SBL] -+#define mmio_csi2_main mmio_base[OMAP3_ISP_IOMEM_CSI2A] -+#define mmio_csi2phy_main mmio_base[OMAP3_ISP_IOMEM_CSI2PHY] -+ unsigned long mmio_base[OMAP3_ISP_IOMEM_CSI2PHY + 1]; -+ unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_CSI2PHY + 1]; -+ unsigned long mmio_size[OMAP3_ISP_IOMEM_CSI2PHY + 1]; -+ -+ /* ISP Obj */ -+ spinlock_t lock; /* For handling registered ISP callbacks */ -+ spinlock_t h3a_lock; -+ struct mutex isp_mutex; /* For handling ref_count field */ -+ int ref_count; -+ struct clk *cam_ick; -+ struct clk *cam_mclk; -+ struct clk *dpll4_m5_ck; -+ struct clk *csi2_fck; -+ struct clk *l3_ick; -+ struct isp_interface_config *config; -+ dma_addr_t tmp_buf; -+ size_t tmp_buf_size; -+ unsigned long tmp_buf_offset; -+ struct isp_bufs bufs; -+ struct isp_irq irq; -+ struct isp_pipeline pipeline; -+ u32 interrupts; -+ enum isp_running running; -+ -+ /* ISP modules */ -+ struct isp_af_device isp_af; -+ struct isp_hist_device isp_hist; -+ struct isp_h3a_device isp_h3a; -+ struct isp_res_device isp_res; -+ struct isp_prev_device isp_prev; -+ struct isp_ccdc_device isp_ccdc; -+ -+ struct iommu *iommu; -+}; -+ -+void isp_hist_dma_done(struct device *dev); -+ -+void isp_flush(struct device *dev); -+ -+void isp_start(struct device *dev); -+ -+void isp_stop(struct device *dev); -+ -+int isp_buf_queue(struct device *dev, struct videobuf_buffer *vb, -+ void (*complete)(struct videobuf_buffer *vb, void *priv), -+ void *priv); -+ -+int isp_vbq_setup(struct device *dev, struct videobuf_queue *vbq, -+ unsigned int *cnt, unsigned int *size); -+ -+int isp_vbq_prepare(struct device *dev, struct videobuf_queue *vbq, -+ struct videobuf_buffer *vb, enum v4l2_field field); -+ -+void isp_vbq_release(struct device *dev, struct videobuf_queue *vbq, -+ struct videobuf_buffer *vb); -+ -+int isp_set_callback(struct device *dev, enum isp_callback_type type, -+ isp_callback_t callback, isp_vbq_callback_ptr arg1, -+ void *arg2); -+ -+int isp_unset_callback(struct device *dev, enum isp_callback_type type); -+ -+u32 isp_set_xclk(struct device *dev, u32 xclk, u8 xclksel); -+ -+int isp_configure_interface(struct device *dev, -+ struct isp_interface_config *config); -+ -+struct device *isp_get(void); -+ -+int isp_put(void); -+ -+int isp_queryctrl(struct v4l2_queryctrl *a); -+ -+int isp_querymenu(struct v4l2_querymenu *a); -+ -+int isp_g_ctrl(struct device *dev, struct v4l2_control *a); -+ -+int isp_s_ctrl(struct device *dev, struct v4l2_control *a); -+ -+int isp_enum_fmt_cap(struct v4l2_fmtdesc *f); -+ -+int isp_try_fmt_cap(struct device *dev, struct v4l2_pix_format *pix_input, -+ struct v4l2_pix_format *pix_output); -+ -+void isp_g_fmt_cap(struct device *dev, struct v4l2_pix_format *pix); -+ -+int isp_s_fmt_cap(struct device *dev, struct v4l2_pix_format *pix_input, -+ struct v4l2_pix_format *pix_output); -+ -+int isp_g_crop(struct device *dev, struct v4l2_crop *a); -+ -+int isp_s_crop(struct device *dev, struct v4l2_crop *a); -+ -+int isp_try_fmt(struct device *dev, struct v4l2_pix_format *pix_input, -+ struct v4l2_pix_format *pix_output); -+ -+int isp_handle_private(struct device *dev, int cmd, void *arg); -+ -+void isp_save_context(struct device *dev, struct isp_reg *); -+ -+void isp_restore_context(struct device *dev, struct isp_reg *); -+ -+void isp_print_status(struct device *dev); -+ -+int __init isp_ccdc_init(struct device *dev); -+int __init isp_hist_init(struct device *dev); -+int __init isph3a_aewb_init(struct device *dev); -+int __init isp_preview_init(struct device *dev); -+int __init isp_resizer_init(struct device *dev); -+int __init isp_af_init(struct device *dev); -+int __init isp_csi2_init(struct device *dev); -+ -+void isp_ccdc_cleanup(struct device *dev); -+void isp_hist_cleanup(struct device *dev); -+void isph3a_aewb_cleanup(struct device *dev); -+void isp_resizer_cleanup(struct device *dev); -+void isp_af_exit(struct device *dev); -+void isp_csi2_cleanup(struct device *dev); -+ -+/* FIXME: Remove these when iommu supports these directly. */ -+dma_addr_t ispmmu_vmap(struct device *dev, const struct scatterlist *sglist, -+ int sglen); -+void ispmmu_vunmap(struct device *dev, dma_addr_t da); -+ -+static inline -+u32 isp_reg_readl(struct device *dev, enum isp_mem_resources isp_mmio_range, -+ u32 reg_offset) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ return __raw_readl(isp->mmio_base[isp_mmio_range] + reg_offset); -+} -+ -+static inline -+void isp_reg_writel(struct device *dev, u32 reg_value, -+ enum isp_mem_resources isp_mmio_range, u32 reg_offset) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ __raw_writel(reg_value, isp->mmio_base[isp_mmio_range] + reg_offset); -+} -+ -+static inline -+void isp_reg_and(struct device *dev, enum isp_mem_resources mmio_range, u32 reg, -+ u32 and_bits) -+{ -+ u32 v = isp_reg_readl(dev, mmio_range, reg); -+ -+ isp_reg_writel(dev, v & and_bits, mmio_range, reg); -+} -+ -+static inline -+void isp_reg_or(struct device *dev, enum isp_mem_resources mmio_range, u32 reg, -+ u32 or_bits) -+{ -+ u32 v = isp_reg_readl(dev, mmio_range, reg); -+ -+ isp_reg_writel(dev, v | or_bits, mmio_range, reg); -+} -+ -+static inline -+void isp_reg_and_or(struct device *dev, enum isp_mem_resources mmio_range, -+ u32 reg, u32 and_bits, u32 or_bits) -+{ -+ u32 v = isp_reg_readl(dev, mmio_range, reg); -+ -+ isp_reg_writel(dev, (v & and_bits) | or_bits, mmio_range, reg); -+} -+ -+#endif /* OMAP_ISP_TOP_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isphist.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/isphist.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isphist.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isphist.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,774 @@ -+/* -+ * isphist.c -+ * -+ * HISTOGRAM module for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * David Cohen -+ * Sergio Aguirre -+ * Troy Laramy -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "isphist.h" -+ -+#define HIST_USE_DMA 1 -+ -+/* Structure for saving/restoring histogram module registers */ -+struct isp_reg isphist_reg_list[] = { -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_RADD, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_RADD_OFF, 0}, -+ {OMAP3_ISP_IOMEM_HIST, ISPHIST_H_V_INFO, 0}, -+ {0, ISP_TOK_TERM, 0} -+}; -+ -+static void isp_hist_print_status(struct isp_hist_device *isp_hist); -+ -+static void __isp_hist_enable(struct isp_hist_device *isp_hist, u8 enable) -+{ -+ struct device *dev = to_device(isp_hist); -+ unsigned int pcr; -+ -+ pcr = isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR); -+ -+ /* Set AF_EN bit in PCR Register */ -+ if (enable) -+ pcr |= ISPHIST_PCR_EN; -+ else -+ pcr &= ~ISPHIST_PCR_EN; -+ -+ isp_reg_writel(dev, pcr, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR); -+} -+ -+/** -+ * isp_hist_enable - Enables ISP Histogram submodule operation. -+ * @enable: 1 - Enables the histogram submodule. -+ * -+ * Client should configure all the Histogram registers before calling this -+ * function. -+ **/ -+void isp_hist_enable(struct isp_hist_device *isp_hist, u8 enable) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ -+ if (!isp_hist->config.enable) { -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+ return; -+ } -+ -+ __isp_hist_enable(isp_hist, enable); -+ isp_hist->enabled = enable; -+ -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+} -+ -+/** -+ * isp_hist_suspend - Suspend ISP Histogram submodule. -+ **/ -+void isp_hist_suspend(struct isp_hist_device *isp_hist) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ if (isp_hist->enabled) -+ __isp_hist_enable(isp_hist, 0); -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+} -+ -+void isp_hist_try_enable(struct isp_hist_device *isp_hist) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ if (unlikely(!isp_hist->enabled && isp_hist->config.enable && -+ !isp_hist->waiting_dma)) { -+ isp_hist->update = 1; -+ isp_hist->active_buf = ispstat_buf_next(&isp_hist->stat); -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+ isp_hist_config_registers(isp_hist); -+ isp_hist_enable(isp_hist, 1); -+ } else -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+} -+ -+/** -+ * isp_hist_resume - Resume ISP Histogram submodule. -+ **/ -+void isp_hist_resume(struct isp_hist_device *isp_hist) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ if (isp_hist->enabled) -+ __isp_hist_enable(isp_hist, 1); -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+} -+ -+int isp_hist_busy(struct isp_hist_device *isp_hist) -+{ -+ struct device *dev = to_device(isp_hist); -+ -+ return isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR) -+ & ISPHIST_PCR_BUSY; -+} -+ -+/** -+ * isp_hist_reset_mem - clear Histogram memory before start stats engine. -+ **/ -+static void isp_hist_reset_mem(struct isp_hist_device *isp_hist) -+{ -+ struct device *dev = to_device(isp_hist); -+ unsigned int i; -+ -+ isp_reg_writel(dev, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); -+ -+ /* -+ * By setting it, the histogram internal buffer is being cleared at the -+ * same time it's being read. This bit must be cleared afterwards. -+ */ -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLR_EN); -+ -+ /* -+ * We'll clear 4 words at each iteration for optimization. It avoids -+ * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4. -+ */ -+ for (i = HIST_MEM_SIZE / 4; i > 0; i--) { -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ } -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, -+ ~ISPHIST_CNT_CLR_EN); -+} -+ -+static void isp_hist_dma_config(struct isp_hist_device *isp_hist) -+{ -+ struct omap_dma_channel_params *dma_config = &isp_hist->dma_config; -+ -+ dma_config->data_type = OMAP_DMA_DATA_TYPE_S32; -+ dma_config->sync_mode = OMAP_DMA_SYNC_ELEMENT; -+ dma_config->elem_count = (isp_hist->buf_size / sizeof(u32)); -+ dma_config->frame_count = 1; -+ dma_config->src_amode = OMAP_DMA_AMODE_CONSTANT; -+ dma_config->src_start = OMAP3ISP_HIST_REG_BASE + ISPHIST_DATA; -+ dma_config->dst_amode = OMAP_DMA_AMODE_POST_INC; -+ dma_config->src_or_dst_synch = OMAP_DMA_SRC_SYNC; -+} -+ -+/** -+ * isp_hist_set_regs - Helper function to update Histogram registers. -+ **/ -+void isp_hist_config_registers(struct isp_hist_device *isp_hist) -+{ -+ struct device *dev = to_device(isp_hist); -+ unsigned long irqflags; -+ -+ if (!isp_hist->update || !isp_hist->config.enable) -+ return; -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ isp_hist->num_acc_frames = isp_hist->config.num_acc_frames; -+ isp_hist_reset_mem(isp_hist); -+ -+ isp_reg_writel(dev, isp_hist->regs.cnt, OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_CNT); -+ isp_reg_writel(dev, isp_hist->regs.wb_gain, -+ OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN); -+ isp_reg_writel(dev, isp_hist->regs.reg_hor[0], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R0_HORZ); -+ isp_reg_writel(dev, isp_hist->regs.reg_ver[0], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R0_VERT); -+ isp_reg_writel(dev, isp_hist->regs.reg_hor[1], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R1_HORZ); -+ isp_reg_writel(dev, isp_hist->regs.reg_ver[1], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R1_VERT); -+ isp_reg_writel(dev, isp_hist->regs.reg_hor[2], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R2_HORZ); -+ isp_reg_writel(dev, isp_hist->regs.reg_ver[2], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R2_VERT); -+ isp_reg_writel(dev, isp_hist->regs.reg_hor[3], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R3_HORZ); -+ isp_reg_writel(dev, isp_hist->regs.reg_ver[3], OMAP3_ISP_IOMEM_HIST, -+ ISPHIST_R3_VERT); -+ isp_reg_writel(dev, isp_hist->regs.hist_radd, -+ OMAP3_ISP_IOMEM_HIST, ISPHIST_RADD); -+ isp_reg_writel(dev, isp_hist->regs.hist_radd_off, -+ OMAP3_ISP_IOMEM_HIST, ISPHIST_RADD_OFF); -+ isp_reg_writel(dev, isp_hist->regs.h_v_info, -+ OMAP3_ISP_IOMEM_HIST, ISPHIST_H_V_INFO); -+ -+ isp_hist_dma_config(isp_hist); -+ -+ isp_hist->update = 0; -+ isp_hist->stat.config_counter++; -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+ -+ isp_hist_print_status(isp_hist); -+} -+ -+static void isp_hist_dma_cb(int lch, u16 ch_status, void *data) -+{ -+ struct isp_hist_device *isp_hist = data; -+ struct device *dev = to_device(isp_hist); -+ -+ if (ch_status & ~OMAP_DMA_BLOCK_IRQ) { -+ dev_dbg(dev, "hist: DMA error. status = %02x\n", ch_status); -+ omap_stop_dma(lch); -+ isp_hist_reset_mem(isp_hist); -+ } else { -+ int ret; -+ -+ ret = ispstat_buf_queue(&isp_hist->stat); -+ isp_hist->active_buf = ispstat_buf_next(&isp_hist->stat); -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, -+ ~ISPHIST_CNT_CLR_EN); -+ if (!ret) -+ isp_hist_dma_done(dev); -+ } -+ isp_hist->waiting_dma = 0; -+} -+ -+static int isp_hist_buf_dma(struct isp_hist_device *isp_hist) -+{ -+ struct device *dev = to_device(isp_hist); -+ dma_addr_t dma_addr = isp_hist->active_buf->dma_addr; -+ -+ if (!dma_addr) { -+ dev_dbg(dev, "hist: invalid DMA buffer address\n"); -+ isp_hist_reset_mem(isp_hist); -+ return HIST_NO_BUF; -+ } -+ -+ isp_hist->waiting_dma = 1; -+ isp_reg_writel(dev, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, -+ ISPHIST_CNT_CLR_EN); -+ isp_flush(dev); -+ isp_hist->dma_config.dst_start = dma_addr; -+ omap_set_dma_params(isp_hist->dma_ch, &isp_hist->dma_config); -+ omap_start_dma(isp_hist->dma_ch); -+ -+ return HIST_BUF_WAITING_DMA; -+} -+ -+static int isp_hist_buf_pio(struct isp_hist_device *isp_hist) -+{ -+ struct device *dev = to_device(isp_hist); -+ u32 *buf = isp_hist->active_buf->virt_addr; -+ unsigned int i; -+ int ret; -+ -+ if (!buf) { -+ dev_dbg(dev, "hist: invalid PIO buffer address\n"); -+ isp_hist_reset_mem(isp_hist); -+ return HIST_NO_BUF; -+ } -+ -+ isp_reg_writel(dev, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); -+ -+ /* -+ * By setting it, the histogram internal buffer is being cleared at the -+ * same time it's being read. This bit must be cleared just after all -+ * data is acquired. -+ */ -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, -+ ISPHIST_CNT_CLR_EN); -+ -+ /* -+ * We'll read 4 times a 4-bytes-word at each iteration for -+ * optimization. It avoids 3/4 of the jumps. We also know buf_size is -+ * divisible by 16. -+ */ -+ for (i = isp_hist->buf_size / 16; i > 0; i--) { -+ *buf++ = isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ *buf++ = isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ *buf++ = isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ *buf++ = isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); -+ } -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, -+ ~ISPHIST_CNT_CLR_EN); -+ -+ ret = ispstat_buf_queue(&isp_hist->stat); -+ isp_hist->active_buf = ispstat_buf_next(&isp_hist->stat); -+ -+ if (ret) -+ return HIST_NO_BUF; -+ else -+ return HIST_BUF_DONE; -+} -+ -+/** -+ * isp_hist_isr - Callback from ISP driver for HIST interrupt. -+ **/ -+int isp_hist_buf_process(struct isp_hist_device *isp_hist) -+{ -+ unsigned long irqflags; -+ int ret = HIST_NO_BUF; -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ -+ if (isp_hist->invalid_buf || !isp_hist->config.enable) { -+ isp_hist->invalid_buf = 0; -+ isp_hist_reset_mem(isp_hist); -+ goto out_invalid; -+ } -+ -+ if (--(isp_hist->num_acc_frames)) -+ goto out_acc; -+ -+ if (isp_hist->use_dma) -+ ret = isp_hist_buf_dma(isp_hist); -+ else -+ ret = isp_hist_buf_pio(isp_hist); -+ -+out_invalid: -+ isp_hist->num_acc_frames = isp_hist->config.num_acc_frames; -+out_acc: -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+ -+ return ret; -+} -+ -+void isp_hist_mark_invalid_buf(struct isp_hist_device *isp_hist) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ isp_hist->invalid_buf = 1; -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+} -+ -+/** -+ * isp_hist_validate_params - Helper function to check user given params. -+ * @user_cfg: Pointer to user configuration structure. -+ * -+ * Returns 0 on success configuration. -+ **/ -+static int isp_hist_validate_params(struct isp_hist_config *user_cfg) -+{ -+ int c; -+ -+ if (user_cfg->source > HIST_SOURCE_MEM) -+ return -EINVAL; -+ -+ if (user_cfg->source == HIST_SOURCE_MEM) { -+ if ((user_cfg->input_bit_width < HIST_MIN_BIT_WIDTH) || -+ (user_cfg->input_bit_width > HIST_MAX_BIT_WIDTH)) -+ return -EINVAL; -+ -+ /* Should be in 32 byte boundary if source is mem */ -+ if ((user_cfg->hist_radd & ~ISP_32B_BOUNDARY_BUF) || -+ (user_cfg->hist_radd_off & ~ISP_32B_BOUNDARY_OFFSET)) -+ return -EINVAL; -+ } else if (user_cfg->input_bit_width != 10) /* CCDC must be 10bits */ -+ return -EINVAL; -+ -+ if (user_cfg->cfa > HIST_CFA_FOVEONX3) -+ return -EINVAL; -+ -+ /* Regions size and position */ -+ -+ if ((user_cfg->num_regions < HIST_MIN_REGIONS) || -+ (user_cfg->num_regions > HIST_MAX_REGIONS)) -+ return -EINVAL; -+ -+ /* Regions */ -+ for (c = 0; c < user_cfg->num_regions; c++) { -+ if (user_cfg->reg_hor[c] & ~ISPHIST_REGHORIZ_HEND_MASK & -+ ~ISPHIST_REGHORIZ_HSTART_MASK) -+ return -EINVAL; -+ if ((user_cfg->reg_hor[c] & ISPHIST_REGHORIZ_HEND_MASK) <= -+ ((user_cfg->reg_hor[c] & ISPHIST_REGHORIZ_HSTART_MASK) >> -+ ISPHIST_REGHORIZ_HSTART_SHIFT)) -+ return -EINVAL; -+ if (user_cfg->reg_ver[c] & ~ISPHIST_REGVERT_VEND_MASK & -+ ~ISPHIST_REGVERT_VSTART_MASK) -+ return -EINVAL; -+ if ((user_cfg->reg_ver[c] & ISPHIST_REGVERT_VEND_MASK) <= -+ ((user_cfg->reg_ver[c] & ISPHIST_REGVERT_VSTART_MASK) >> -+ ISPHIST_REGVERT_VSTART_SHIFT)) -+ return -EINVAL; -+ } -+ -+ switch (user_cfg->num_regions) { -+ case 1: -+ if (user_cfg->hist_bins > HIST_BINS_256) -+ return -EINVAL; -+ break; -+ case 2: -+ if (user_cfg->hist_bins > HIST_BINS_128) -+ return -EINVAL; -+ break; -+ default: /* 3 or 4 */ -+ if (user_cfg->hist_bins > HIST_BINS_64) -+ return -EINVAL; -+ break; -+ } -+ -+ return 0; -+} -+ -+static int isp_hist_comp_params(struct isp_hist_device *isp_hist, -+ struct isp_hist_config *user_cfg) -+{ -+ struct isp_hist_config *cur_cfg = &isp_hist->config; -+ int c; -+ -+ if ((cur_cfg->source && !user_cfg->source) || -+ (!cur_cfg->source && user_cfg->source)) -+ return 1; -+ -+ if (cur_cfg->input_bit_width != user_cfg->input_bit_width) -+ return 1; -+ -+ if (user_cfg->source) { -+ if (cur_cfg->hist_h_v_info != user_cfg->hist_h_v_info) -+ return 1; -+ if (cur_cfg->hist_radd != user_cfg->hist_radd) -+ return 1; -+ if (cur_cfg->hist_radd_off != user_cfg->hist_radd_off) -+ return 1; -+ } -+ -+ if (cur_cfg->cfa != user_cfg->cfa) -+ return 1; -+ -+ if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames) -+ return 1; -+ -+ if (cur_cfg->hist_bins != user_cfg->hist_bins) -+ return 1; -+ -+ for (c = 0; c < HIST_MAX_WG; c++) { -+ if (c == 3 && user_cfg->cfa == HIST_CFA_FOVEONX3) -+ break; -+ else if (cur_cfg->wg[c] != user_cfg->wg[c]) -+ return 1; -+ } -+ -+ if (cur_cfg->num_regions != user_cfg->num_regions) -+ return 1; -+ -+ /* Regions */ -+ for (c = 0; c < user_cfg->num_regions; c++) { -+ if (cur_cfg->reg_hor[c] != user_cfg->reg_hor[c]) -+ return 1; -+ if (cur_cfg->reg_ver[c] != user_cfg->reg_ver[c]) -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/** -+ * isp_hist_update_params - Helper function to check and store user given params. -+ * @user_cfg: Pointer to user configuration structure. -+ * -+ * Returns 0 on success configuration. -+ **/ -+static void isp_hist_update_params(struct isp_hist_device *isp_hist, -+ struct isp_hist_config *user_cfg) -+{ -+ int bit_shift; -+ int c; -+ -+ if (!isp_hist_comp_params(isp_hist, user_cfg)) { -+ isp_hist->config.enable = user_cfg->enable; -+ return; -+ } -+ -+ memcpy(&isp_hist->config, user_cfg, sizeof(*user_cfg)); -+ -+ if (user_cfg->input_bit_width > HIST_MIN_BIT_WIDTH) -+ WRITE_DATA_SIZE(isp_hist->regs.cnt, 0); -+ else -+ WRITE_DATA_SIZE(isp_hist->regs.cnt, 1); -+ -+ WRITE_SOURCE(isp_hist->regs.cnt, user_cfg->source); -+ -+ if (user_cfg->source == HIST_SOURCE_MEM) { -+ WRITE_HV_INFO(isp_hist->regs.h_v_info, user_cfg->hist_h_v_info); -+ WRITE_RADD(isp_hist->regs.hist_radd, user_cfg->hist_radd); -+ WRITE_RADD_OFF(isp_hist->regs.hist_radd_off, -+ user_cfg->hist_radd_off); -+ } -+ -+ WRITE_CFA(isp_hist->regs.cnt, user_cfg->cfa); -+ -+ WRITE_WG0(isp_hist->regs.wb_gain, user_cfg->wg[0]); -+ WRITE_WG1(isp_hist->regs.wb_gain, user_cfg->wg[1]); -+ WRITE_WG2(isp_hist->regs.wb_gain, user_cfg->wg[2]); -+ if (user_cfg->cfa == HIST_CFA_BAYER) -+ WRITE_WG3(isp_hist->regs.wb_gain, user_cfg->wg[3]); -+ -+ /* Regions size and position */ -+ for (c = 0; c < HIST_MAX_REGIONS; c++) { -+ if (c < user_cfg->num_regions) { -+ WRITE_REG_HORIZ(isp_hist->regs.reg_hor[c], -+ user_cfg->reg_hor[c]); -+ WRITE_REG_VERT(isp_hist->regs.reg_ver[c], -+ user_cfg->reg_ver[c]); -+ } else { -+ isp_hist->regs.reg_hor[c] = 0; -+ isp_hist->regs.reg_ver[c] = 0; -+ } -+ } -+ -+ WRITE_NUM_BINS(isp_hist->regs.cnt, user_cfg->hist_bins); -+ switch (user_cfg->hist_bins) { -+ case HIST_BINS_256: -+ bit_shift = user_cfg->input_bit_width - 8; -+ break; -+ case HIST_BINS_128: -+ bit_shift = user_cfg->input_bit_width - 7; -+ break; -+ case HIST_BINS_64: -+ bit_shift = user_cfg->input_bit_width - 6; -+ break; -+ default: /* HIST_BINS_32 */ -+ bit_shift = user_cfg->input_bit_width - 5; -+ break; -+ } -+ WRITE_BIT_SHIFT(isp_hist->regs.cnt, bit_shift); -+ -+ isp_hist->update = 1; -+} -+ -+/** -+ * isp_hist_configure - API to configure HIST registers. -+ * @histcfg: Pointer to user configuration structure. -+ * -+ * Returns 0 on success configuration. -+ **/ -+int isp_hist_config(struct isp_hist_device *isp_hist, -+ struct isp_hist_config *histcfg) -+{ -+ struct device *dev = to_device(isp_hist); -+ unsigned long irqflags; -+ int ret = 0; -+ unsigned int size; -+ int use_dma = HIST_USE_DMA; -+ const unsigned int size_bins[] = -+ { HIST_MEM_SIZE_BINS(32), HIST_MEM_SIZE_BINS(64), -+ HIST_MEM_SIZE_BINS(128), HIST_MEM_SIZE_BINS(256) }; -+ -+ if (!histcfg) { -+ dev_dbg(dev, "hist: Null argument in configuration.\n"); -+ return -EINVAL; -+ } -+ -+ /* Check Parameters */ -+ ret = isp_hist_validate_params(histcfg); -+ if (ret) { -+ dev_dbg(dev, "hist: wrong configure params received.\n"); -+ return ret; -+ } -+ -+ size = size_bins[histcfg->hist_bins] * histcfg->num_regions; -+ -+ /* Cannot use DMA if no channel is available */ -+ if (unlikely(HIST_USE_DMA && (isp_hist->dma_ch < 0))) -+ use_dma = 0; -+ -+ /* Alloc buffers */ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ if (isp_hist->waiting_dma) { -+ omap_stop_dma(isp_hist->dma_ch); -+ isp_hist->waiting_dma = 0; -+ } -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+ -+ ret = ispstat_bufs_alloc(&isp_hist->stat, size, use_dma); -+ if (ret) { -+ if (use_dma) -+ ret = ispstat_bufs_alloc(&isp_hist->stat, size, 0); -+ -+ if (ret) { -+ dev_err(dev, "hist: unable to alloc buffers.\n"); -+ isp_hist->config.enable = 0; -+ return ret; -+ } else { -+ use_dma = 0; -+ dev_dbg(dev, "hist: unable to alloc buffers for DMA. " -+ "PIO will be used.\n"); -+ } -+ } -+ -+ spin_lock_irqsave(&isp_hist->lock, irqflags); -+ isp_hist->buf_size = size; -+ isp_hist->use_dma = use_dma; -+ isp_hist_update_params(isp_hist, histcfg); -+ spin_unlock_irqrestore(&isp_hist->lock, irqflags); -+ -+ return 0; -+} -+ -+/** -+ * isp_hist_request_statistics - Request statistics in Histogram. -+ * @histdata: Pointer to data structure. -+ * -+ * This API allows the user to request for histogram statistics. -+ * -+ * Returns 0 on successful request. -+ **/ -+int isp_hist_request_statistics(struct isp_hist_device *isp_hist, -+ struct isp_hist_data *histdata) -+{ -+ struct device *dev = to_device(isp_hist); -+ struct ispstat_buffer *buf; -+ -+ if (!isp_hist->config.enable) { -+ dev_dbg(dev, "hist: statistics requested while engine is not " -+ "configured\n"); -+ return -EINVAL; -+ } -+ -+ if (histdata->update & REQUEST_STATISTICS) { -+ buf = ispstat_buf_get(&isp_hist->stat, -+ (void *)histdata->hist_statistics_buf, -+ histdata->frame_number); -+ if (IS_ERR(buf)) -+ return PTR_ERR(buf); -+ -+ histdata->ts = buf->ts; -+ histdata->config_counter = buf->config_counter; -+ histdata->frame_number = buf->frame_number; -+ -+ ispstat_buf_release(&isp_hist->stat); -+ } -+ -+ histdata->curr_frame = isp_hist->stat.frame_number; -+ -+ return 0; -+} -+ -+/** -+ * isp_hist_init - Module Initialization. -+ * -+ * Returns 0 if successful. -+ **/ -+int __init isp_hist_init(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_hist_device *isp_hist = &isp->isp_hist; -+ int ret = -1; -+ -+ if (HIST_USE_DMA) -+ ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST", -+ isp_hist_dma_cb, isp_hist, -+ &isp_hist->dma_ch); -+ if (ret) { -+ if (HIST_USE_DMA) -+ dev_info(dev, "hist: DMA request channel failed. Using " -+ "PIO only.\n"); -+ isp_hist->dma_ch = -1; -+ } else { -+ dev_dbg(dev, "hist: DMA channel = %d\n", isp_hist->dma_ch); -+ omap_enable_dma_irq(isp_hist->dma_ch, OMAP_DMA_BLOCK_IRQ); -+ } -+ -+ spin_lock_init(&isp_hist->lock); -+ ret = ispstat_init(dev, "HIST", &isp_hist->stat, HIST_MAX_BUFF, -+ MAX_FRAME_COUNT); -+ if (ret && (isp_hist->dma_ch >= 0)) -+ omap_free_dma(isp_hist->dma_ch); -+ -+ return ret; -+} -+ -+/** -+ * isp_hist_cleanup - Module cleanup. -+ **/ -+void isp_hist_cleanup(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ isp->isp_hist.active_buf = NULL; -+ ispstat_free(&isp->isp_hist.stat); -+ if (isp->isp_hist.dma_ch >= 0) -+ omap_free_dma(isp->isp_hist.dma_ch); -+} -+ -+/** -+ * isp_hist_save_context - Saves the values of the histogram module registers. -+ **/ -+void isp_hist_save_context(struct device *dev) -+{ -+ isp_save_context(dev, isphist_reg_list); -+} -+ -+/** -+ * isp_hist_restore_context - Restores the values of the histogram module regs. -+ **/ -+void isp_hist_restore_context(struct device *dev) -+{ -+ isp_restore_context(dev, isphist_reg_list); -+} -+ -+/** -+ * isp_hist_print_status - Debug print -+ **/ -+static void isp_hist_print_status(struct isp_hist_device *isp_hist) -+{ -+#ifdef ISP_HIST_DEBUG -+ struct device *dev = to_device(isp_hist); -+ -+ dev_dbg(dev, "hist: ISPHIST_PCR = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR)); -+ dev_dbg(dev, "hist: ISPHIST_CNT = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT)); -+ dev_dbg(dev, "hist: ISPHIST_WB_GAIN = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN)); -+ dev_dbg(dev, "hist: ISPHIST_R0_HORZ = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ)); -+ dev_dbg(dev, "hist: ISPHIST_R0_VERT = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT)); -+ dev_dbg(dev, "hist: ISPHIST_R1_HORZ = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ)); -+ dev_dbg(dev, "hist: ISPHIST_R1_VERT = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT)); -+ dev_dbg(dev, "hist: ISPHIST_R2_HORZ = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ)); -+ dev_dbg(dev, "hist: ISPHIST_R2_VERT = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT)); -+ dev_dbg(dev, "hist: ISPHIST_R3_HORZ = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ)); -+ dev_dbg(dev, "hist: ISPHIST_R3_VERT = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT)); -+ dev_dbg(dev, "hist: ISPHIST_RADD = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_RADD)); -+ dev_dbg(dev, "hist: ISPHIST_RADD_OFF = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_RADD_OFF)); -+ dev_dbg(dev, "hist: ISPHIST_H_V_INFO = 0x%08x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_H_V_INFO)); -+#endif -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isphist.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/isphist.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isphist.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isphist.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,159 @@ -+/* -+ * isphist.h -+ * -+ * Header file for HISTOGRAM module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * David Cohen -+ * Sergio Aguirre -+ * Troy Laramy -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef OMAP_ISP_HIST_H -+#define OMAP_ISP_HIST_H -+ -+#include -+#include -+ -+#include "ispstat.h" -+ -+#define ISPHIST_PCR_EN (1 << 0) -+#define ISPHIST_CNT_CLR_EN (1 << 7) -+ -+#define WRITE_SOURCE(reg, source) \ -+ (reg = (reg & ~(ISPHIST_CNT_SOURCE_MASK)) \ -+ | (source << ISPHIST_CNT_SOURCE_SHIFT)) -+ -+#define WRITE_HV_INFO(reg, hv_info) \ -+ (reg = ((reg & ~(ISPHIST_HV_INFO_MASK)) \ -+ | (hv_info & ISPHIST_HV_INFO_MASK))) -+ -+#define WRITE_RADD(reg, radd) \ -+ (reg = (reg & ~(ISPHIST_RADD_MASK)) \ -+ | (radd << ISPHIST_RADD_SHIFT)) -+ -+#define WRITE_RADD_OFF(reg, radd_off) \ -+ (reg = (reg & ~(ISPHIST_RADD_OFF_MASK)) \ -+ | (radd_off << ISPHIST_RADD_OFF_SHIFT)) -+ -+#define WRITE_BIT_SHIFT(reg, bit_shift) \ -+ (reg = (reg & ~(ISPHIST_CNT_SHIFT_MASK)) \ -+ | (bit_shift << ISPHIST_CNT_SHIFT_SHIFT)) -+ -+#define WRITE_DATA_SIZE(reg, data_size) \ -+ (reg = (reg & ~(ISPHIST_CNT_DATASIZE_MASK)) \ -+ | (data_size << ISPHIST_CNT_DATASIZE_SHIFT)) -+ -+#define WRITE_NUM_BINS(reg, num_bins) \ -+ (reg = (reg & ~(ISPHIST_CNT_BINS_MASK)) \ -+ | (num_bins << ISPHIST_CNT_BINS_SHIFT)) -+ -+#define WRITE_CFA(reg, cfa) \ -+ (reg = (reg & ~(ISPHIST_CNT_CFA_MASK)) \ -+ | (cfa << ISPHIST_CNT_CFA_SHIFT)) -+ -+#define WRITE_WG0(reg, reg_wb_gain) \ -+ reg = ((reg & ~(ISPHIST_WB_GAIN_WG00_MASK)) \ -+ | (reg_wb_gain << ISPHIST_WB_GAIN_WG00_SHIFT)) -+ -+#define WRITE_WG1(reg, reg_wb_gain) \ -+ (reg = (reg & ~(ISPHIST_WB_GAIN_WG01_MASK)) \ -+ | (reg_wb_gain << ISPHIST_WB_GAIN_WG01_SHIFT)) -+ -+#define WRITE_WG2(reg, reg_wb_gain) \ -+ (reg = (reg & ~(ISPHIST_WB_GAIN_WG02_MASK)) \ -+ | (reg_wb_gain << ISPHIST_WB_GAIN_WG02_SHIFT)) -+ -+#define WRITE_WG3(reg, reg_wb_gain) \ -+ (reg = (reg & ~(ISPHIST_WB_GAIN_WG03_MASK)) \ -+ | (reg_wb_gain << ISPHIST_WB_GAIN_WG03_SHIFT)) -+ -+#define WRITE_REG_HORIZ(reg, reg_n_hor) \ -+ (reg = ((reg & ~ISPHIST_REGHORIZ_MASK) \ -+ | (reg_n_hor & ISPHIST_REGHORIZ_MASK))) -+ -+#define WRITE_REG_VERT(reg, reg_n_vert) \ -+ (reg = ((reg & ~ISPHIST_REGVERT_MASK) \ -+ | (reg_n_vert & ISPHIST_REGVERT_MASK))) -+ -+/** -+ * struct isp_hist_regs - Current value of Histogram configuration registers. -+ * @pcr: Peripheral control register. -+ * @cnt: Histogram control register. -+ * @wb_gain: Histogram white balance gain register. -+ * @reg_hor[]: Region N horizontal register. -+ * @reg_ver[]: Region N vertical register. -+ * @hist_radd: Address register. When input data comes from mem. -+ * @hist_radd_off: Address offset register. When input data comes from mem. -+ * @h_v_info: Image size register. When input data comes from mem. -+ */ -+struct isp_hist_regs { -+ u32 pcr; -+ u32 cnt; -+ u32 wb_gain; -+ u32 reg_hor[HIST_MAX_REGIONS]; -+ u32 reg_ver[HIST_MAX_REGIONS]; -+ u32 hist_radd; -+ u32 hist_radd_off; -+ u32 h_v_info; -+}; -+ -+/** -+ * struct isp_hist_status - Histogram status. -+ * @hist_enable: Enables the histogram module. -+ * @initialized: Flag to indicate that the module is correctly initialized. -+ * @frame_cnt: Actual frame count. -+ * @num_acc_frames: Num accumulated image frames per hist frame -+ * @completed: Flag to indicate if a frame request is completed. -+ */ -+struct isp_hist_device { -+ u8 enabled; -+ u8 update; -+ u8 num_acc_frames; -+ u8 waiting_dma; -+ u8 invalid_buf; -+ u8 use_dma; -+ int dma_ch; -+ struct timeval ts; -+ -+ struct omap_dma_channel_params dma_config; -+ struct isp_hist_regs regs; -+ struct isp_hist_config config; -+ struct ispstat_buffer *active_buf; -+ unsigned int buf_size; -+ struct ispstat stat; -+ -+ spinlock_t lock; /* serialize access to hist device's fields */ -+}; -+ -+#define HIST_BUF_DONE 0 -+#define HIST_NO_BUF 1 -+#define HIST_BUF_WAITING_DMA 2 -+ -+int isp_hist_busy(struct isp_hist_device *isp_hist); -+void isp_hist_enable(struct isp_hist_device *isp_hist, u8 enable); -+void isp_hist_try_enable(struct isp_hist_device *isp_hist); -+int isp_hist_busy(struct isp_hist_device *isp_hist); -+int isp_hist_buf_process(struct isp_hist_device *isp_hist); -+void isp_hist_mark_invalid_buf(struct isp_hist_device *isp_hist); -+void isp_hist_config_registers(struct isp_hist_device *isp_hist); -+void isp_hist_suspend(struct isp_hist_device *isp_hist); -+void isp_hist_resume(struct isp_hist_device *isp_hist); -+void isp_hist_save_context(struct device *dev); -+void isp_hist_restore_context(struct device *dev); -+int isp_hist_config(struct isp_hist_device *isp_hist, -+ struct isp_hist_config *histcfg); -+int isp_hist_request_statistics(struct isp_hist_device *isp_hist, -+ struct isp_hist_data *histdata); -+ -+#endif /* OMAP_ISP_HIST */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isph3a.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/isph3a.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isph3a.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isph3a.c 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,603 @@ -+/* -+ * isph3a.c -+ * -+ * H3A module for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Sergio Aguirre -+ * Troy Laramy -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+ -+#include "isp.h" -+ -+/* Structure for saving/restoring h3a module registers */ -+static struct isp_reg isph3a_reg_list[] = { -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, 0}, /* Should be the first one */ -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWWIN1, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINSTART, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINBLK, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWSUBWIN, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAXSTART, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFBUFST, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF010, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF032, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF054, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF076, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF098, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF110, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF132, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF154, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF176, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF198, 0}, -+ {OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010, 0}, -+ {0, ISP_TOK_TERM, 0} -+}; -+ -+static void isph3a_print_status(struct isp_h3a_device *isp_h3a); -+ -+void __isph3a_aewb_enable(struct isp_h3a_device *isp_h3a, u8 enable) -+{ -+ struct device *dev = to_device(isp_h3a); -+ u32 pcr = isp_reg_readl(dev, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR); -+ -+ if (enable) -+ pcr |= ISPH3A_PCR_AEW_EN; -+ else -+ pcr &= ~ISPH3A_PCR_AEW_EN; -+ isp_reg_writel(dev, pcr, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR); -+} -+ -+/** -+ * isph3a_aewb_enable - Enables AE, AWB engine in the H3A module. -+ * @enable: 1 - Enables the AE & AWB engine. -+ * -+ * Client should configure all the AE & AWB registers in H3A before this. -+ **/ -+void isph3a_aewb_enable(struct isp_h3a_device *isp_h3a, u8 enable) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(isp_h3a->lock, irqflags); -+ -+ if (!isp_h3a->aewb_config_local.aewb_enable && enable) { -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+ return; -+ } -+ -+ __isph3a_aewb_enable(isp_h3a, enable); -+ isp_h3a->enabled = enable; -+ -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+} -+ -+/** -+ * isph3a_aewb_suspend - Suspend AE, AWB engine in the H3A module. -+ **/ -+void isph3a_aewb_suspend(struct isp_h3a_device *isp_h3a) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(isp_h3a->lock, flags); -+ -+ if (isp_h3a->enabled) -+ __isph3a_aewb_enable(isp_h3a, 0); -+ -+ spin_unlock_irqrestore(isp_h3a->lock, flags); -+} -+ -+/** -+ * isph3a_aewb_resume - Resume AE, AWB engine in the H3A module. -+ **/ -+void isph3a_aewb_resume(struct isp_h3a_device *isp_h3a) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(isp_h3a->lock, flags); -+ -+ if (isp_h3a->enabled) -+ __isph3a_aewb_enable(isp_h3a, 1); -+ -+ spin_unlock_irqrestore(isp_h3a->lock, flags); -+} -+ -+int isph3a_aewb_busy(struct isp_h3a_device *isp_h3a) -+{ -+ struct device *dev = to_device(isp_h3a); -+ -+ return isp_reg_readl(dev, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR) -+ & ISPH3A_PCR_BUSYAEAWB; -+} -+ -+void isph3a_aewb_try_enable(struct isp_h3a_device *isp_h3a) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(isp_h3a->lock, irqflags); -+ if (!isp_h3a->enabled && isp_h3a->aewb_config_local.aewb_enable) { -+ isp_h3a->update = 1; -+ isp_h3a->buf_next = ispstat_buf_next(&isp_h3a->stat); -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+ isph3a_aewb_config_registers(isp_h3a); -+ isph3a_aewb_enable(isp_h3a, 1); -+ } else -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+} -+ -+/** -+ * isph3a_update_wb - Updates WB parameters. -+ * -+ * Needs to be called when no ISP Preview processing is taking place. -+ **/ -+void isph3a_update_wb(struct isp_h3a_device *isp_h3a) -+{ -+ struct isp_device *isp = to_isp_device(isp_h3a); -+ -+ if (isp_h3a->wb_update) { -+ /* FIXME: Get the preview crap out of here!!! */ -+ isppreview_config_whitebalance(&isp->isp_prev, -+ isp_h3a->h3awb_update); -+ isp_h3a->wb_update = 0; -+ } -+ return; -+} -+EXPORT_SYMBOL(isph3a_update_wb); -+ -+/** -+ * isph3a_aewb_update_regs - Helper function to update h3a registers. -+ **/ -+void isph3a_aewb_config_registers(struct isp_h3a_device *isp_h3a) -+{ -+ struct device *dev = to_device(isp_h3a); -+ unsigned long irqflags; -+ -+ if (!isp_h3a->aewb_config_local.aewb_enable) -+ return; -+ -+ spin_lock_irqsave(isp_h3a->lock, irqflags); -+ -+ isp_reg_writel(dev, isp_h3a->buf_next->iommu_addr, -+ OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST); -+ -+ if (!isp_h3a->update) { -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+ return; -+ } -+ -+ isp_reg_writel(dev, isp_h3a->regs.win1, OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWWIN1); -+ isp_reg_writel(dev, isp_h3a->regs.start, OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWINSTART); -+ isp_reg_writel(dev, isp_h3a->regs.blk, OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWINBLK); -+ isp_reg_writel(dev, isp_h3a->regs.subwin, OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWSUBWIN); -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, -+ ~ISPH3A_PCR_AEW_MASK, isp_h3a->regs.pcr); -+ -+ ispstat_bufs_set_size(&isp_h3a->stat, isp_h3a->buf_size); -+ isp_h3a->update = 0; -+ isp_h3a->stat.config_counter++; -+ -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+} -+ -+/** -+ * isph3a_aewb_stats_available - Check for stats available of specified frame. -+ * @aewbdata: Pointer to return AE AWB statistics data -+ * -+ * Returns 0 if successful, or -1 if statistics are unavailable. -+ **/ -+static int isph3a_aewb_get_stats(struct isp_h3a_device *isp_h3a, -+ struct isph3a_aewb_data *aewbdata) -+{ -+ struct ispstat_buffer *buf; -+ -+ buf = ispstat_buf_get(&isp_h3a->stat, -+ (void *)aewbdata->h3a_aewb_statistics_buf, -+ aewbdata->frame_number); -+ -+ if (IS_ERR(buf)) -+ return PTR_ERR(buf); -+ -+ aewbdata->ts = buf->ts; -+ aewbdata->config_counter = buf->config_counter; -+ aewbdata->frame_number = buf->frame_number; -+ -+ ispstat_buf_release(&isp_h3a->stat); -+ -+ return 0; -+} -+ -+/** -+ * isph3a_aewb_buf_process - Process H3A AEWB buffer. -+ */ -+int isph3a_aewb_buf_process(struct isp_h3a_device *isp_h3a) -+{ -+ isph3a_update_wb(isp_h3a); -+ if (likely(!isp_h3a->buf_err && -+ isp_h3a->aewb_config_local.aewb_enable)) { -+ int ret; -+ -+ ret = ispstat_buf_queue(&isp_h3a->stat); -+ isp_h3a->buf_next = ispstat_buf_next(&isp_h3a->stat); -+ return ret; -+ } else { -+ isp_h3a->buf_err = 0; -+ return -1; -+ } -+} -+ -+static int isph3a_aewb_validate_params(struct isp_h3a_device *isp_h3a, -+ struct isph3a_aewb_config *user_cfg) -+{ -+ if (unlikely(user_cfg->saturation_limit > MAX_SATURATION_LIM)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->win_height < MIN_WIN_H || -+ user_cfg->win_height > MAX_WIN_H || -+ user_cfg->win_height & 0x01)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->win_width < MIN_WIN_W || -+ user_cfg->win_width > MAX_WIN_W || -+ user_cfg->win_width & 0x01)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->ver_win_count < 1 || -+ user_cfg->ver_win_count > MAX_WINVC)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->hor_win_count < 1 || -+ user_cfg->hor_win_count > MAX_WINHC)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->ver_win_start > MAX_WINSTART)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->hor_win_start > MAX_WINSTART)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->blk_ver_win_start > MAX_WINSTART)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->blk_win_height < MIN_WIN_H || -+ user_cfg->blk_win_height > MAX_WIN_H || -+ user_cfg->blk_win_height & 0x01)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->subsample_ver_inc < MIN_SUB_INC || -+ user_cfg->subsample_ver_inc > MAX_SUB_INC || -+ user_cfg->subsample_ver_inc & 0x01)) -+ return -EINVAL; -+ -+ if (unlikely(user_cfg->subsample_hor_inc < MIN_SUB_INC || -+ user_cfg->subsample_hor_inc > MAX_SUB_INC || -+ user_cfg->subsample_hor_inc & 0x01)) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+/** -+ * isph3a_aewb_set_params - Helper function to check & store user given params. -+ * @user_cfg: Pointer to AE and AWB parameters struct. -+ * -+ * As most of them are busy-lock registers, need to wait until AEW_BUSY = 0 to -+ * program them during ISR. -+ * -+ * Returns 0 if successful, or -EINVAL if any of the parameters are invalid. -+ **/ -+static void isph3a_aewb_set_params(struct isp_h3a_device *isp_h3a, -+ struct isph3a_aewb_config *user_cfg) -+{ -+ if (isp_h3a->aewb_config_local.saturation_limit != -+ user_cfg->saturation_limit) { -+ WRITE_SAT_LIM(isp_h3a->regs.pcr, user_cfg->saturation_limit); -+ isp_h3a->aewb_config_local.saturation_limit = -+ user_cfg->saturation_limit; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.alaw_enable != user_cfg->alaw_enable) { -+ WRITE_ALAW(isp_h3a->regs.pcr, user_cfg->alaw_enable); -+ isp_h3a->aewb_config_local.alaw_enable = user_cfg->alaw_enable; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.win_height != user_cfg->win_height) { -+ WRITE_WIN_H(isp_h3a->regs.win1, user_cfg->win_height); -+ isp_h3a->aewb_config_local.win_height = user_cfg->win_height; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.win_width != user_cfg->win_width) { -+ WRITE_WIN_W(isp_h3a->regs.win1, user_cfg->win_width); -+ isp_h3a->aewb_config_local.win_width = user_cfg->win_width; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.ver_win_count != -+ user_cfg->ver_win_count) { -+ WRITE_VER_C(isp_h3a->regs.win1, user_cfg->ver_win_count); -+ isp_h3a->aewb_config_local.ver_win_count = -+ user_cfg->ver_win_count; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.hor_win_count != -+ user_cfg->hor_win_count) { -+ WRITE_HOR_C(isp_h3a->regs.win1, user_cfg->hor_win_count); -+ isp_h3a->aewb_config_local.hor_win_count = -+ user_cfg->hor_win_count; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.ver_win_start != -+ user_cfg->ver_win_start) { -+ WRITE_VER_WIN_ST(isp_h3a->regs.start, user_cfg->ver_win_start); -+ isp_h3a->aewb_config_local.ver_win_start = -+ user_cfg->ver_win_start; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.hor_win_start != -+ user_cfg->hor_win_start) { -+ WRITE_HOR_WIN_ST(isp_h3a->regs.start, user_cfg->hor_win_start); -+ isp_h3a->aewb_config_local.hor_win_start = -+ user_cfg->hor_win_start; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.blk_ver_win_start != -+ user_cfg->blk_ver_win_start) { -+ WRITE_BLK_VER_WIN_ST(isp_h3a->regs.blk, -+ user_cfg->blk_ver_win_start); -+ isp_h3a->aewb_config_local.blk_ver_win_start = -+ user_cfg->blk_ver_win_start; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.blk_win_height != -+ user_cfg->blk_win_height) { -+ WRITE_BLK_WIN_H(isp_h3a->regs.blk, user_cfg->blk_win_height); -+ isp_h3a->aewb_config_local.blk_win_height = -+ user_cfg->blk_win_height; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.subsample_ver_inc != -+ user_cfg->subsample_ver_inc) { -+ WRITE_SUB_VER_INC(isp_h3a->regs.subwin, -+ user_cfg->subsample_ver_inc); -+ isp_h3a->aewb_config_local.subsample_ver_inc = -+ user_cfg->subsample_ver_inc; -+ isp_h3a->update = 1; -+ } -+ -+ if (isp_h3a->aewb_config_local.subsample_hor_inc != -+ user_cfg->subsample_hor_inc) { -+ WRITE_SUB_HOR_INC(isp_h3a->regs.subwin, -+ user_cfg->subsample_hor_inc); -+ isp_h3a->aewb_config_local.subsample_hor_inc = -+ user_cfg->subsample_hor_inc; -+ isp_h3a->update = 1; -+ } -+ -+ isp_h3a->aewb_config_local.aewb_enable = user_cfg->aewb_enable;; -+} -+ -+/** -+ * isph3a_aewb_config - Configure AEWB regs, enable/disable H3A engine. -+ * @aewbcfg: Pointer to AEWB config structure. -+ * -+ * Returns 0 if successful, -EINVAL if aewbcfg pointer is NULL, -ENOMEM if -+ * was unable to allocate memory for the buffer, of other errors if H3A -+ * callback is not set or the parameters for AEWB are invalid. -+ **/ -+int isph3a_aewb_config(struct isp_h3a_device *isp_h3a, -+ struct isph3a_aewb_config *aewbcfg) -+{ -+ struct device *dev = to_device(isp_h3a); -+ int ret = 0; -+ int win_count = 0; -+ unsigned int buf_size; -+ unsigned long irqflags; -+ -+ if (NULL == aewbcfg) { -+ dev_dbg(dev, "h3a: Null argument in configuration\n"); -+ return -EINVAL; -+ } -+ -+ ret = isph3a_aewb_validate_params(isp_h3a, aewbcfg); -+ if (ret) -+ return ret; -+ -+ /* FIXME: This win_count handling looks really fishy. */ -+ win_count = aewbcfg->ver_win_count * aewbcfg->hor_win_count; -+ win_count += aewbcfg->hor_win_count; -+ ret = win_count / 8; -+ win_count += win_count % 8 ? 1 : 0; -+ win_count += ret; -+ -+ buf_size = win_count * AEWB_PACKET_SIZE; -+ -+ ret = ispstat_bufs_alloc(&isp_h3a->stat, buf_size, 0); -+ if (ret) -+ return ret; -+ -+ spin_lock_irqsave(isp_h3a->lock, irqflags); -+ -+ isp_h3a->win_count = win_count; -+ isp_h3a->buf_size = buf_size; -+ isph3a_aewb_set_params(isp_h3a, aewbcfg); -+ -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+ -+ isph3a_print_status(isp_h3a); -+ -+ return 0; -+} -+EXPORT_SYMBOL(isph3a_aewb_config); -+ -+/** -+ * isph3a_aewb_request_statistics - REquest statistics and update gains in AEWB -+ * @aewbdata: Pointer to return AE AWB statistics data. -+ * -+ * This API allows the user to update White Balance gains, as well as -+ * exposure time and analog gain. It is also used to request frame -+ * statistics. -+ * -+ * Returns 0 if successful, -EINVAL when H3A engine is not enabled, or other -+ * errors when setting gains. -+ **/ -+int isph3a_aewb_request_statistics(struct isp_h3a_device *isp_h3a, -+ struct isph3a_aewb_data *aewbdata) -+{ -+ struct device *dev = to_device(isp_h3a); -+ unsigned long irqflags; -+ int ret = 0; -+ -+ if (!isp_h3a->aewb_config_local.aewb_enable) { -+ dev_dbg(dev, "h3a: engine not enabled\n"); -+ return -EINVAL; -+ } -+ -+ DPRINTK_ISPH3A("isph3a_aewb_request_statistics: Enter " -+ "(frame req. => %d, current frame => %d," -+ "update => %d)\n", -+ aewbdata->frame_number, isp_h3a->stat.frame_number, -+ aewbdata->update); -+ DPRINTK_ISPH3A("User data received: \n"); -+ DPRINTK_ISPH3A("Digital gain = 0x%04x\n", aewbdata->dgain); -+ DPRINTK_ISPH3A("WB gain b *= 0x%04x\n", aewbdata->wb_gain_b); -+ DPRINTK_ISPH3A("WB gain r *= 0x%04x\n", aewbdata->wb_gain_r); -+ DPRINTK_ISPH3A("WB gain gb = 0x%04x\n", aewbdata->wb_gain_gb); -+ DPRINTK_ISPH3A("WB gain gr = 0x%04x\n", aewbdata->wb_gain_gr); -+ -+ spin_lock_irqsave(isp_h3a->lock, irqflags); -+ -+ if (aewbdata->update & SET_DIGITAL_GAIN) -+ isp_h3a->h3awb_update.dgain = (u16)aewbdata->dgain; -+ if (aewbdata->update & SET_COLOR_GAINS) { -+ isp_h3a->h3awb_update.coef0 = (u8)aewbdata->wb_gain_gr; -+ isp_h3a->h3awb_update.coef1 = (u8)aewbdata->wb_gain_r; -+ isp_h3a->h3awb_update.coef2 = (u8)aewbdata->wb_gain_b; -+ isp_h3a->h3awb_update.coef3 = (u8)aewbdata->wb_gain_gb; -+ } -+ if (aewbdata->update & (SET_COLOR_GAINS | SET_DIGITAL_GAIN)) -+ isp_h3a->wb_update = 1; -+ -+ spin_unlock_irqrestore(isp_h3a->lock, irqflags); -+ -+ if (aewbdata->update & REQUEST_STATISTICS) -+ ret = isph3a_aewb_get_stats(isp_h3a, aewbdata); -+ -+ aewbdata->curr_frame = isp_h3a->stat.frame_number; -+ -+ DPRINTK_ISPH3A("isph3a_aewb_request_statistics: " -+ "aewbdata->h3a_aewb_statistics_buf => %p\n", -+ aewbdata->h3a_aewb_statistics_buf); -+ -+ return ret; -+} -+EXPORT_SYMBOL(isph3a_aewb_request_statistics); -+ -+/** -+ * isph3a_aewb_init - Module Initialisation. -+ * -+ * Always returns 0. -+ **/ -+int __init isph3a_aewb_init(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_h3a_device *isp_h3a = &isp->isp_h3a; -+ -+ isp_h3a->lock = &isp->h3a_lock; -+ isp_h3a->aewb_config_local.saturation_limit = AEWB_SATURATION_LIMIT; -+ ispstat_init(dev, "H3A", &isp_h3a->stat, H3A_MAX_BUFF, MAX_FRAME_COUNT); -+ -+ return 0; -+} -+ -+/** -+ * isph3a_aewb_cleanup - Module exit. -+ **/ -+void isph3a_aewb_cleanup(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ -+ ispstat_free(&isp->isp_h3a.stat); -+} -+ -+/** -+ * isph3a_print_status - Debug print. Values of H3A related registers. -+ **/ -+static void isph3a_print_status(struct isp_h3a_device *isp_h3a) -+{ -+ DPRINTK_ISPH3A("ISPH3A_PCR = 0x%08x\n", -+ isp_reg_readl(to_device(isp_h3a), -+ OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_PCR)); -+ DPRINTK_ISPH3A("ISPH3A_AEWWIN1 = 0x%08x\n", -+ isp_reg_readl(to_device(isp_h3a), -+ OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWWIN1)); -+ DPRINTK_ISPH3A("ISPH3A_AEWINSTART = 0x%08x\n", -+ isp_reg_readl(to_device(isp_h3a), -+ OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWINSTART)); -+ DPRINTK_ISPH3A("ISPH3A_AEWINBLK = 0x%08x\n", -+ isp_reg_readl(to_device(isp_h3a), -+ OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWINBLK)); -+ DPRINTK_ISPH3A("ISPH3A_AEWSUBWIN = 0x%08x\n", -+ isp_reg_readl(to_device(isp_h3a), -+ OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWSUBWIN)); -+ DPRINTK_ISPH3A("ISPH3A_AEWBUFST = 0x%08x\n", -+ isp_reg_readl(to_device(isp_h3a), -+ OMAP3_ISP_IOMEM_H3A, -+ ISPH3A_AEWBUFST)); -+ DPRINTK_ISPH3A("stats windows = %d\n", isp_h3a->win_count); -+ DPRINTK_ISPH3A("stats buf size = %d\n", isp_h3a->stat.buf_size); -+} -+ -+/** -+ * isph3a_save_context - Saves the values of the h3a module registers. -+ **/ -+void isph3a_save_context(struct device *dev) -+{ -+ DPRINTK_ISPH3A(" Saving context\n"); -+ isp_save_context(dev, isph3a_reg_list); -+ /* Avoid enable during restore ctx */ -+ isph3a_reg_list[0].val &= ~(ISPH3A_PCR_AEW_EN | ISPH3A_PCR_AF_EN); -+} -+EXPORT_SYMBOL(isph3a_save_context); -+ -+/** -+ * isph3a_restore_context - Restores the values of the h3a module registers. -+ **/ -+void isph3a_restore_context(struct device *dev) -+{ -+ DPRINTK_ISPH3A(" Restoring context\n"); -+ isp_restore_context(dev, isph3a_reg_list); -+} -+EXPORT_SYMBOL(isph3a_restore_context); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isph3a.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/isph3a.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isph3a.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isph3a.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,159 @@ -+/* -+ * isph3a.h -+ * -+ * Include file for H3A module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Sergio Aguirre -+ * Troy Laramy -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef OMAP_ISP_H3A_H -+#define OMAP_ISP_H3A_H -+ -+#include -+ -+#define AEWB_PACKET_SIZE 16 -+#define H3A_MAX_BUFF 5 -+#define AEWB_SATURATION_LIMIT 0x3FF -+ -+/* Flags for changed registers */ -+#define PCR_CHNG (1 << 0) -+#define AEWWIN1_CHNG (1 << 1) -+#define AEWINSTART_CHNG (1 << 2) -+#define AEWINBLK_CHNG (1 << 3) -+#define AEWSUBWIN_CHNG (1 << 4) -+#define PRV_WBDGAIN_CHNG (1 << 5) -+#define PRV_WBGAIN_CHNG (1 << 6) -+ -+/* ISPH3A REGISTERS bits */ -+#define ISPH3A_PCR_AF_EN (1 << 0) -+#define ISPH3A_PCR_AF_ALAW_EN (1 << 1) -+#define ISPH3A_PCR_AF_MED_EN (1 << 2) -+#define ISPH3A_PCR_AF_BUSY (1 << 15) -+#define ISPH3A_PCR_AEW_EN (1 << 16) -+#define ISPH3A_PCR_AEW_ALAW_EN (1 << 17) -+#define ISPH3A_PCR_AEW_BUSY (1 << 18) -+#define ISPH3A_PCR_AEW_MASK (ISPH3A_PCR_AEW_ALAW_EN | \ -+ ISPH3A_PCR_AEW_AVE2LMT_MASK) -+ -+#define WRITE_SAT_LIM(reg, sat_limit) \ -+ (reg = (reg & (~(ISPH3A_PCR_AEW_AVE2LMT_MASK))) \ -+ | (sat_limit << ISPH3A_PCR_AEW_AVE2LMT_SHIFT)) -+ -+#define WRITE_ALAW(reg, alaw_en) \ -+ (reg = (reg & (~(ISPH3A_PCR_AEW_ALAW_EN))) \ -+ | ((alaw_en & ISPH3A_PCR_AF_ALAW_EN) \ -+ << ISPH3A_PCR_AEW_ALAW_EN_SHIFT)) -+ -+#define WRITE_WIN_H(reg, height) \ -+ (reg = (reg & (~(ISPH3A_AEWWIN1_WINH_MASK))) \ -+ | (((height >> 1) - 1) << ISPH3A_AEWWIN1_WINH_SHIFT)) -+ -+#define WRITE_WIN_W(reg, width) \ -+ (reg = (reg & (~(ISPH3A_AEWWIN1_WINW_MASK))) \ -+ | (((width >> 1) - 1) << ISPH3A_AEWWIN1_WINW_SHIFT)) -+ -+#define WRITE_VER_C(reg, ver_count) \ -+ (reg = (reg & ~(ISPH3A_AEWWIN1_WINVC_MASK)) \ -+ | ((ver_count - 1) << ISPH3A_AEWWIN1_WINVC_SHIFT)) -+ -+#define WRITE_HOR_C(reg, hor_count) \ -+ (reg = (reg & ~(ISPH3A_AEWWIN1_WINHC_MASK)) \ -+ | ((hor_count - 1) << ISPH3A_AEWWIN1_WINHC_SHIFT)) -+ -+#define WRITE_VER_WIN_ST(reg, ver_win_st) \ -+ (reg = (reg & ~(ISPH3A_AEWINSTART_WINSV_MASK)) \ -+ | (ver_win_st << ISPH3A_AEWINSTART_WINSV_SHIFT)) -+ -+#define WRITE_HOR_WIN_ST(reg, hor_win_st) \ -+ (reg = (reg & ~(ISPH3A_AEWINSTART_WINSH_MASK)) \ -+ | (hor_win_st << ISPH3A_AEWINSTART_WINSH_SHIFT)) -+ -+#define WRITE_BLK_VER_WIN_ST(reg, blk_win_st) \ -+ (reg = (reg & ~(ISPH3A_AEWINBLK_WINSV_MASK)) \ -+ | (blk_win_st << ISPH3A_AEWINBLK_WINSV_SHIFT)) -+ -+#define WRITE_BLK_WIN_H(reg, height) \ -+ (reg = (reg & ~(ISPH3A_AEWINBLK_WINH_MASK)) \ -+ | (((height >> 1) - 1) << ISPH3A_AEWINBLK_WINH_SHIFT)) -+ -+#define WRITE_SUB_VER_INC(reg, sub_ver_inc) \ -+ (reg = (reg & ~(ISPH3A_AEWSUBWIN_AEWINCV_MASK)) \ -+ | (((sub_ver_inc >> 1) - 1) << ISPH3A_AEWSUBWIN_AEWINCV_SHIFT)) -+ -+#define WRITE_SUB_HOR_INC(reg, sub_hor_inc) \ -+ (reg = (reg & ~(ISPH3A_AEWSUBWIN_AEWINCH_MASK)) \ -+ | (((sub_hor_inc >> 1) - 1) << ISPH3A_AEWSUBWIN_AEWINCH_SHIFT)) -+ -+/** -+ * struct isph3a_aewb_regs - Current value of AE, AWB configuration registers. -+ * pcr: Peripheral control register. -+ * win1: Control register. -+ * start: Start position register. -+ * blk: Black line register. -+ * subwin: Configuration register. -+ */ -+struct isph3a_aewb_regs { -+ u32 pcr; -+ u32 win1; -+ u32 start; -+ u32 blk; -+ u32 subwin; -+}; -+ -+struct isp_h3a_device { -+ spinlock_t *lock; /* Lock for this struct */ -+ -+ u8 update; -+ u8 buf_err; -+ int enabled; -+ int wb_update; -+ -+ struct isph3a_aewb_regs regs; -+ struct ispprev_wbal h3awb_update; -+ struct isph3a_aewb_config aewb_config_local; -+ struct ispstat_buffer *buf_next; -+ u16 win_count; -+ unsigned int buf_size; -+ -+ struct ispstat stat; -+}; -+ -+int isph3a_aewb_config(struct isp_h3a_device *isp_h3a, -+ struct isph3a_aewb_config *aewbcfg); -+ -+int isph3a_aewb_request_statistics(struct isp_h3a_device *isp_h3a, -+ struct isph3a_aewb_data *aewbdata); -+ -+void isph3a_save_context(struct device *dev); -+ -+void isph3a_restore_context(struct device *dev); -+ -+void isph3a_aewb_enable(struct isp_h3a_device *isp_h3a, u8 enable); -+ -+void isph3a_aewb_try_enable(struct isp_h3a_device *isp_h3a); -+ -+int isph3a_aewb_busy(struct isp_h3a_device *isp_h3a); -+ -+void isph3a_aewb_suspend(struct isp_h3a_device *isp_h3a); -+ -+void isph3a_aewb_resume(struct isp_h3a_device *isp_h3a); -+ -+void isph3a_update_wb(struct isp_h3a_device *isp_h3a); -+ -+int isph3a_aewb_buf_process(struct isp_h3a_device *isp_h3a); -+ -+void isph3a_aewb_config_registers(struct isp_h3a_device *isp_h3a); -+ -+#endif /* OMAP_ISP_H3A_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isppreview.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/isppreview.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isppreview.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isppreview.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,2017 @@ -+/* -+ * isppreview.c -+ * -+ * Driver Library for Preview module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Senthilvadivu Guruswamy -+ * Pallavi Kulkarni -+ * Sergio Aguirre -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "isppreview.h" -+ -+/* Structure for saving/restoring preview module registers */ -+static struct isp_reg ispprev_reg_list[] = { -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, 0x0000}, /* See context saving. */ -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_VERT_INFO, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RSDR_ADDR, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RADR_OFFSET, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_DSDR_ADDR, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_DRKF_OFFSET, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WSDR_ADDR, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WADD_OFFSET, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_HMED, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_NF, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WB_DGAIN, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WBGAIN, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CFA, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_BLKADJOFF, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT1, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT2, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT3, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT4, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT5, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF1, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF2, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC0, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC1, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC2, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC_OFFSET, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CSUP, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR0, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR1, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR2, 0x0000}, -+ {OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR3, 0x0000}, -+ {0, ISP_TOK_TERM, 0x0000} -+}; -+ -+ -+/* Default values in Office Flourescent Light for RGBtoRGB Blending */ -+static struct ispprev_rgbtorgb flr_rgb2rgb = { -+ { /* RGB-RGB Matrix */ -+ {0x01E2, 0x0F30, 0x0FEE}, -+ {0x0F9B, 0x01AC, 0x0FB9}, -+ {0x0FE0, 0x0EC0, 0x0260} -+ }, /* RGB Offset */ -+ {0x0000, 0x0000, 0x0000} -+}; -+ -+/* Default values in Office Flourescent Light for RGB to YUV Conversion*/ -+static struct ispprev_csc flr_prev_csc[] = { -+ { -+ { /* CSC Coef Matrix */ -+ {66, 129, 25}, -+ {-38, -75, 112}, -+ {112, -94 , -18} -+ }, /* CSC Offset */ -+ {0x0, 0x0, 0x0} -+ }, -+ { -+ { /* CSC Coef Matrix BW */ -+ {66, 129, 25}, -+ {0, 0, 0}, -+ {0, 0, 0} -+ }, /* CSC Offset */ -+ {0x0, 0x0, 0x0} -+ }, -+ { -+ { /* CSC Coef Matrix Sepia */ -+ {19, 38, 7}, -+ {0, 0, 0}, -+ {0, 0, 0} -+ }, /* CSC Offset */ -+ {0x0, 0xE7, 0x14} -+ } -+}; -+ -+ -+/* Default values in Office Flourescent Light for CFA Gradient*/ -+#define FLR_CFA_GRADTHRS_HORZ 0x28 -+#define FLR_CFA_GRADTHRS_VERT 0x28 -+ -+/* Default values in Office Flourescent Light for Chroma Suppression*/ -+#define FLR_CSUP_GAIN 0x0D -+#define FLR_CSUP_THRES 0xEB -+ -+/* Default values in Office Flourescent Light for Noise Filter*/ -+#define FLR_NF_STRGTH 0x03 -+ -+/* Default values in Office Flourescent Light for White Balance*/ -+#define FLR_WBAL_DGAIN 0x100 -+#define FLR_WBAL_COEF0 0x20 -+#define FLR_WBAL_COEF1 0x29 -+#define FLR_WBAL_COEF2 0x2d -+#define FLR_WBAL_COEF3 0x20 -+ -+#define FLR_WBAL_COEF0_ES1 0x20 -+#define FLR_WBAL_COEF1_ES1 0x23 -+#define FLR_WBAL_COEF2_ES1 0x39 -+#define FLR_WBAL_COEF3_ES1 0x20 -+ -+/* Default values in Office Flourescent Light for Black Adjustment*/ -+#define FLR_BLKADJ_BLUE 0x0 -+#define FLR_BLKADJ_GREEN 0x0 -+#define FLR_BLKADJ_RED 0x0 -+ -+/* -+ * Coeficient Tables for the submodules in Preview. -+ * Array is initialised with the values from.the tables text file. -+ */ -+ -+/* -+ * CFA Filter Coefficient Table -+ * -+ */ -+static u32 cfa_coef_table[] = { -+#include "cfa_coef_table.h" -+}; -+ -+/* -+ * Gamma Correction Table - Red -+ */ -+static u32 redgamma_table[] = { -+#include "redgamma_table.h" -+}; -+ -+/* -+ * Gamma Correction Table - Green -+ */ -+static u32 greengamma_table[] = { -+#include "greengamma_table.h" -+}; -+ -+/* -+ * Gamma Correction Table - Blue -+ */ -+static u32 bluegamma_table[] = { -+#include "bluegamma_table.h" -+}; -+ -+/* -+ * Noise Filter Threshold table -+ */ -+static u32 noise_filter_table[] = { -+#include "noise_filter_table.h" -+}; -+ -+/* -+ * Luminance Enhancement Table -+ */ -+static u32 luma_enhance_table[] = { -+#include "luma_enhance_table.h" -+}; -+ -+static int isppreview_tables_update(struct isp_prev_device *isp_prev, -+ struct isptables_update *isptables_struct); -+ -+ -+/** -+ * isppreview_config - Abstraction layer Preview configuration. -+ * @userspace_add: Pointer from Userspace to structure with flags and data to -+ * update. -+ **/ -+int isppreview_config(struct isp_prev_device *isp_prev, void *userspace_add) -+{ -+ struct isp_device *isp = to_isp_device(isp_prev); -+ struct device *dev = to_device(isp_prev); -+ struct ispprev_hmed prev_hmed_t; -+ struct ispprev_csup csup_t; -+ struct ispprev_blkadj prev_blkadj_t; -+ struct ispprev_yclimit yclimit_t; -+ struct ispprev_dcor prev_dcor_t; -+ struct ispprv_update_config *config; -+ struct isptables_update isp_table_update; -+ int yen_t[ISPPRV_YENH_TBL_SIZE]; -+ unsigned long flags; -+ -+ if (userspace_add == NULL) -+ return -EINVAL; -+ -+ spin_lock_irqsave(&isp_prev->lock, flags); -+ isp_prev->shadow_update = 1; -+ spin_unlock_irqrestore(&isp_prev->lock, flags); -+ -+ config = userspace_add; -+ -+ if (isp->running != ISP_STOPPED) -+ goto out_config_shadow; -+ -+ if (ISP_ABS_PREV_LUMAENH & config->flag) { -+ if (ISP_ABS_PREV_LUMAENH & config->update) { -+ if (copy_from_user(yen_t, config->yen, -+ sizeof(yen_t))) -+ goto err_copy_from_user; -+ isppreview_config_luma_enhancement(isp_prev, yen_t); -+ } -+ isp_prev->params.features |= PREV_LUMA_ENHANCE; -+ } else if (ISP_ABS_PREV_LUMAENH & config->update) -+ isp_prev->params.features &= ~PREV_LUMA_ENHANCE; -+ -+ if (ISP_ABS_PREV_INVALAW & config->flag) { -+ isppreview_enable_invalaw(isp_prev, 1); -+ isp_prev->params.features |= PREV_INVERSE_ALAW; -+ } else { -+ isppreview_enable_invalaw(isp_prev, 0); -+ isp_prev->params.features &= ~PREV_INVERSE_ALAW; -+ } -+ -+ if (ISP_ABS_PREV_HRZ_MED & config->flag) { -+ if (ISP_ABS_PREV_HRZ_MED & config->update) { -+ if (copy_from_user(&prev_hmed_t, -+ (struct ispprev_hmed *) -+ config->prev_hmed, -+ sizeof(struct ispprev_hmed))) -+ goto err_copy_from_user; -+ isppreview_config_hmed(isp_prev, prev_hmed_t); -+ } -+ isppreview_enable_hmed(isp_prev, 1); -+ isp_prev->params.features |= PREV_HORZ_MEDIAN_FILTER; -+ } else if (ISP_ABS_PREV_HRZ_MED & config->update) { -+ isppreview_enable_hmed(isp_prev, 0); -+ isp_prev->params.features &= ~PREV_HORZ_MEDIAN_FILTER; -+ } -+ -+ if (ISP_ABS_PREV_CHROMA_SUPP & config->flag) { -+ if (ISP_ABS_PREV_CHROMA_SUPP & config->update) { -+ if (copy_from_user(&csup_t, -+ (struct ispprev_csup *) -+ config->csup, -+ sizeof(struct ispprev_csup))) -+ goto err_copy_from_user; -+ isppreview_config_chroma_suppression(isp_prev, csup_t); -+ } -+ isppreview_enable_chroma_suppression(isp_prev, 1); -+ isp_prev->params.features |= PREV_CHROMA_SUPPRESS; -+ } else if (ISP_ABS_PREV_CHROMA_SUPP & config->update) { -+ isppreview_enable_chroma_suppression(isp_prev, 0); -+ isp_prev->params.features &= ~PREV_CHROMA_SUPPRESS; -+ } -+ -+ if (ISP_ABS_PREV_BLKADJ & config->update) { -+ if (copy_from_user(&prev_blkadj_t, (struct ispprev_blkadjl *) -+ config->prev_blkadj, -+ sizeof(struct ispprev_blkadj))) -+ goto err_copy_from_user; -+ isppreview_config_blkadj(isp_prev, prev_blkadj_t); -+ } -+ -+ if (ISP_ABS_PREV_YC_LIMIT & config->update) { -+ if (copy_from_user(&yclimit_t, (struct ispprev_yclimit *) -+ config->yclimit, -+ sizeof(struct ispprev_yclimit))) -+ goto err_copy_from_user; -+ isppreview_config_yc_range(isp_prev, yclimit_t); -+ } -+ -+ if (ISP_ABS_PREV_DEFECT_COR & config->flag) { -+ if (ISP_ABS_PREV_DEFECT_COR & config->update) { -+ if (copy_from_user(&prev_dcor_t, -+ (struct ispprev_dcor *) -+ config->prev_dcor, -+ sizeof(struct ispprev_dcor))) -+ goto err_copy_from_user; -+ isppreview_config_dcor(isp_prev, prev_dcor_t); -+ } -+ isppreview_enable_dcor(isp_prev, 1); -+ isp_prev->params.features |= PREV_DEFECT_COR; -+ } else if (ISP_ABS_PREV_DEFECT_COR & config->update) { -+ isppreview_enable_dcor(isp_prev, 0); -+ isp_prev->params.features &= ~PREV_DEFECT_COR; -+ } -+ -+ if (ISP_ABS_PREV_GAMMABYPASS & config->flag) { -+ isppreview_enable_gammabypass(isp_prev, 1); -+ isp_prev->params.features |= PREV_GAMMA_BYPASS; -+ } else { -+ isppreview_enable_gammabypass(isp_prev, 0); -+ isp_prev->params.features &= ~PREV_GAMMA_BYPASS; -+ } -+ -+out_config_shadow: -+ if (ISP_ABS_PREV_RGB2RGB & config->update) { -+ if (copy_from_user(&isp_prev->params.rgb2rgb, -+ (struct ispprev_rgbtorgb *) -+ config->rgb2rgb, -+ sizeof(struct ispprev_rgbtorgb))) -+ goto err_copy_from_user; -+ isppreview_config_rgb_blending(isp_prev, -+ isp_prev->params.rgb2rgb); -+ /* The function call above prevents compiler from reordering -+ * writes so that the flag below is always set after -+ * isp_prev->params.rgb2rgb is written to. */ -+ isp_prev->update_rgb_blending = 1; -+ } -+ -+ if (ISP_ABS_PREV_COLOR_CONV & config->update) { -+ if (copy_from_user(&isp_prev->params.rgb2ycbcr, -+ (struct ispprev_csc *) -+ config->prev_csc, -+ sizeof(struct ispprev_csc))) -+ goto err_copy_from_user; -+ isppreview_config_rgb_to_ycbcr(isp_prev, -+ isp_prev->params.rgb2ycbcr); -+ /* Same here... this flag has to be set after rgb2ycbcr -+ * structure is written to. */ -+ isp_prev->update_rgb_to_ycbcr = 1; -+ } -+ -+ isp_table_update.update = config->update; -+ isp_table_update.flag = config->flag; -+ isp_table_update.prev_nf = config->prev_nf; -+ isp_table_update.red_gamma = config->red_gamma; -+ isp_table_update.green_gamma = config->green_gamma; -+ isp_table_update.blue_gamma = config->blue_gamma; -+ isp_table_update.prev_cfa = config->prev_cfa; -+ isp_table_update.prev_wbal = config->prev_wbal; -+ -+ if (isppreview_tables_update(isp_prev, &isp_table_update)) -+ goto err_copy_from_user; -+ -+ isp_prev->shadow_update = 0; -+ return 0; -+ -+err_copy_from_user: -+ isp_prev->shadow_update = 0; -+ dev_err(dev, "preview: Config: Copy From User Error\n"); -+ return -EFAULT; -+} -+EXPORT_SYMBOL_GPL(isppreview_config); -+ -+/** -+ * isppreview_tables_update - Abstraction layer Tables update. -+ * @isptables_struct: Pointer from Userspace to structure with flags and table -+ * data to update. -+ **/ -+static int isppreview_tables_update(struct isp_prev_device *isp_prev, -+ struct isptables_update *isptables_struct) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (ISP_ABS_PREV_WB & isptables_struct->update) { -+ if (copy_from_user(&isp_prev->params.wbal, -+ isptables_struct->prev_wbal, -+ sizeof(struct ispprev_wbal))) -+ goto err_copy_from_user; -+ -+ isp_prev->wbal_update = 1; -+ } -+ -+ if (ISP_ABS_TBL_NF & isptables_struct->flag) { -+ isp_prev->nf_enable = 1; -+ isp_prev->params.features |= PREV_NOISE_FILTER; -+ if (ISP_ABS_TBL_NF & isptables_struct->update) { -+ if (copy_from_user(&isp_prev->prev_nf_t, -+ (struct ispprev_nf *) -+ isptables_struct->prev_nf, -+ sizeof(struct ispprev_nf))) -+ goto err_copy_from_user; -+ -+ isp_prev->nf_update = 1; -+ } else -+ isp_prev->nf_update = 0; -+ } else { -+ isp_prev->nf_enable = 0; -+ isp_prev->params.features &= ~PREV_NOISE_FILTER; -+ if (ISP_ABS_TBL_NF & isptables_struct->update) -+ isp_prev->nf_update = 1; -+ else -+ isp_prev->nf_update = 0; -+ } -+ -+ if (ISP_ABS_TBL_REDGAMMA & isptables_struct->update) { -+ if (copy_from_user(redgamma_table, isptables_struct->red_gamma, -+ sizeof(redgamma_table))) { -+ goto err_copy_from_user; -+ } -+ isp_prev->rg_update = 1; -+ } else -+ isp_prev->rg_update = 0; -+ -+ if (ISP_ABS_TBL_GREENGAMMA & isptables_struct->update) { -+ if (copy_from_user(greengamma_table, -+ isptables_struct->green_gamma, -+ sizeof(greengamma_table))) -+ goto err_copy_from_user; -+ isp_prev->gg_update = 1; -+ } else -+ isp_prev->gg_update = 0; -+ -+ if (ISP_ABS_TBL_BLUEGAMMA & isptables_struct->update) { -+ if (copy_from_user(bluegamma_table, -+ isptables_struct->blue_gamma, -+ sizeof(bluegamma_table))) { -+ goto err_copy_from_user; -+ } -+ isp_prev->bg_update = 1; -+ } else -+ isp_prev->bg_update = 0; -+ -+ if (ISP_ABS_PREV_CFA & isptables_struct->update) { -+ struct ispprev_cfa cfa; -+ if (isptables_struct->prev_cfa) { -+ if (copy_from_user(&cfa, -+ isptables_struct->prev_cfa, -+ sizeof(struct ispprev_cfa))) -+ goto err_copy_from_user; -+ if (cfa.cfa_table != NULL) { -+ if (copy_from_user(cfa_coef_table, -+ cfa.cfa_table, -+ sizeof(cfa_coef_table))) -+ goto err_copy_from_user; -+ } -+ cfa.cfa_table = cfa_coef_table; -+ isp_prev->params.cfa = cfa; -+ } -+ if (ISP_ABS_PREV_CFA & isptables_struct->flag) { -+ isp_prev->cfa_en = 1; -+ isp_prev->params.features |= PREV_CFA; -+ } else { -+ isp_prev->cfa_en = 0; -+ isp_prev->params.features &= ~PREV_CFA; -+ } -+ isp_prev->cfa_update = 1; -+ } -+ -+ return 0; -+ -+err_copy_from_user: -+ dev_err(dev, "preview tables: Copy From User Error\n"); -+ return -EFAULT; -+} -+ -+/** -+ * isppreview_config_shadow_registers - Program shadow registers for preview. -+ * -+ * Allows user to program shadow registers associated with preview module. -+ **/ -+void isppreview_config_shadow_registers(struct isp_prev_device *isp_prev) -+{ -+ struct device *dev = to_device(isp_prev); -+ u8 current_brightness_contrast; -+ int ctr; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&isp_prev->lock, flags); -+ if (isp_prev->shadow_update) { -+ spin_unlock_irqrestore(&isp_prev->lock, flags); -+ return; -+ } -+ -+ isppreview_query_brightness(isp_prev, ¤t_brightness_contrast); -+ if (current_brightness_contrast != -+ (isp_prev->brightness * ISPPRV_BRIGHT_UNITS)) { -+ DPRINTK_ISPPREV(" Changing Brightness level to %d\n", -+ isp_prev->brightness); -+ isppreview_config_brightness(isp_prev, isp_prev->brightness * -+ ISPPRV_BRIGHT_UNITS); -+ } -+ -+ isppreview_query_contrast(isp_prev, ¤t_brightness_contrast); -+ if (current_brightness_contrast != -+ (isp_prev->contrast * ISPPRV_CONTRAST_UNITS)) { -+ DPRINTK_ISPPREV(" Changing Contrast level to %d\n", -+ isp_prev->contrast); -+ isppreview_config_contrast(isp_prev, isp_prev->contrast * -+ ISPPRV_CONTRAST_UNITS); -+ } -+ if (isp_prev->wbal_update) { -+ isppreview_config_whitebalance(isp_prev, isp_prev->params.wbal); -+ isp_prev->wbal_update = 0; -+ } -+ if (isp_prev->update_color_matrix) { -+ isppreview_config_rgb_to_ycbcr(isp_prev, -+ flr_prev_csc[isp_prev->color]); -+ isp_prev->update_color_matrix = 0; -+ } -+ if (isp_prev->update_rgb_blending) { -+ isp_prev->update_rgb_blending = 0; -+ isppreview_config_rgb_blending(isp_prev, -+ isp_prev->params.rgb2rgb); -+ } -+ if (isp_prev->update_rgb_to_ycbcr) { -+ isp_prev->update_rgb_to_ycbcr = 0; -+ isppreview_config_rgb_to_ycbcr(isp_prev, -+ isp_prev->params.rgb2ycbcr); -+ } -+ -+ if (isp_prev->gg_update) { -+ isp_reg_writel(dev, ISPPRV_TBL_ADDR_GREEN_G_START, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ -+ for (ctr = 0; ctr < ISP_GAMMA_TABLE_SIZE; ctr++) { -+ isp_reg_writel(dev, greengamma_table[ctr], -+ OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_SET_TBL_DATA); -+ } -+ isp_prev->gg_update = 0; -+ } -+ -+ if (isp_prev->rg_update) { -+ isp_reg_writel(dev, ISPPRV_TBL_ADDR_RED_G_START, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ -+ for (ctr = 0; ctr < ISP_GAMMA_TABLE_SIZE; ctr++) { -+ isp_reg_writel(dev, redgamma_table[ctr], -+ OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_SET_TBL_DATA); -+ } -+ isp_prev->rg_update = 0; -+ } -+ -+ if (isp_prev->bg_update) { -+ isp_reg_writel(dev, ISPPRV_TBL_ADDR_BLUE_G_START, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ -+ for (ctr = 0; ctr < ISP_GAMMA_TABLE_SIZE; ctr++) { -+ isp_reg_writel(dev, bluegamma_table[ctr], -+ OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_SET_TBL_DATA); -+ } -+ isp_prev->bg_update = 0; -+ } -+ -+ if (isp_prev->cfa_update) { -+ isp_prev->cfa_update = 0; -+ isppreview_config_cfa(isp_prev, &isp_prev->params.cfa); -+ isppreview_enable_cfa(isp_prev, isp_prev->cfa_en); -+ } -+ -+ if (isp_prev->nf_update && isp_prev->nf_enable) { -+ isp_reg_writel(dev, 0xC00, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ isp_reg_writel(dev, isp_prev->prev_nf_t.spread, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_NF); -+ for (ctr = 0; ctr < ISPPRV_NF_TBL_SIZE; ctr++) { -+ isp_reg_writel(dev, -+ isp_prev->prev_nf_t.table[ctr], -+ OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_SET_TBL_DATA); -+ } -+ isppreview_enable_noisefilter(isp_prev, 1); -+ isp_prev->nf_update = 0; -+ } -+ -+ if (~isp_prev->nf_update && isp_prev->nf_enable) -+ isppreview_enable_noisefilter(isp_prev, 1); -+ -+ if (isp_prev->nf_update && ~isp_prev->nf_enable) -+ isppreview_enable_noisefilter(isp_prev, 0); -+ -+ spin_unlock_irqrestore(&isp_prev->lock, flags); -+} -+ -+/** -+ * isppreview_request - Reserves the preview module. -+ * -+ * Returns 0 if successful, or -EBUSY if the module was already reserved. -+ **/ -+int isppreview_request(struct isp_prev_device *isp_prev) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ isp_reg_or(dev, -+ OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, ISPCTRL_PREV_RAM_EN | -+ ISPCTRL_PREV_CLK_EN | ISPCTRL_SBL_WR1_RAM_EN); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_request); -+ -+/** -+ * isppreview_free - Frees the preview module. -+ * -+ * Returns 0 if successful, or -EINVAL if the module was already freed. -+ **/ -+int isppreview_free(struct isp_prev_device *isp_prev) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, -+ ~(ISPCTRL_PREV_CLK_EN | -+ ISPCTRL_PREV_RAM_EN | -+ ISPCTRL_SBL_WR1_RAM_EN)); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_free); -+ -+/** isppreview_config_datapath - Specifies input and output modules for Preview -+ * @input: Indicates the module that gives the image to preview. -+ * @output: Indicates the module to which the preview outputs to. -+ * -+ * Configures the default configuration for the CCDC to work with. -+ * -+ * The valid values for the input are PRV_RAW_CCDC (0), PRV_RAW_MEM (1), -+ * PRV_RGBBAYERCFA (2), PRV_COMPCFA (3), PRV_CCDC_DRKF (4), PRV_OTHERS (5). -+ * -+ * The valid values for the output are PREVIEW_RSZ (0), PREVIEW_MEM (1). -+ * -+ * Returns 0 if successful, or -EINVAL if wrong input or output values are -+ * specified. -+ **/ -+int isppreview_config_datapath(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 pcr = 0; -+ u8 enable = 0; -+ struct prev_params *params = &isp_prev->params; -+ struct ispprev_yclimit yclimit; -+ -+ pcr = isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+ -+ switch (pipe->prv_in) { -+ case PRV_RAW_CCDC: -+ pcr &= ~ISPPRV_PCR_SOURCE; -+ break; -+ case PRV_RAW_MEM: -+ pcr |= ISPPRV_PCR_SOURCE; -+ break; -+ case PRV_CCDC_DRKF: -+ pcr |= ISPPRV_PCR_DRKFCAP; -+ break; -+ case PRV_COMPCFA: -+ break; -+ case PRV_OTHERS: -+ break; -+ case PRV_RGBBAYERCFA: -+ break; -+ default: -+ dev_err(dev, "preview: Wrong Input\n"); -+ return -EINVAL; -+ }; -+ -+ switch (pipe->prv_out) { -+ case PREVIEW_RSZ: -+ pcr |= ISPPRV_PCR_RSZPORT; -+ pcr &= ~ISPPRV_PCR_SDRPORT; -+ break; -+ case PREVIEW_MEM: -+ pcr &= ~ISPPRV_PCR_RSZPORT; -+ pcr |= ISPPRV_PCR_SDRPORT; -+ break; -+ default: -+ dev_err(dev, "preview: Wrong Output\n"); -+ return -EINVAL; -+ } -+ -+ isp_reg_writel(dev, pcr, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+ -+ if (params->csup.hypf_en == 1) -+ isppreview_config_chroma_suppression(isp_prev, params->csup); -+ if (params->ytable != NULL) -+ isppreview_config_luma_enhancement(isp_prev, params->ytable); -+ -+ if (params->gtable.redtable != NULL) -+ isppreview_config_gammacorrn(isp_prev, params->gtable); -+ -+ isp_prev->cfa_update = 0; -+ isppreview_config_cfa(isp_prev, ¶ms->cfa); -+ enable = (params->features & PREV_CFA) ? 1 : 0; -+ isppreview_enable_cfa(isp_prev, enable); -+ -+ enable = (params->features & PREV_CHROMA_SUPPRESS) ? 1 : 0; -+ isppreview_enable_chroma_suppression(isp_prev, enable); -+ -+ enable = (params->features & PREV_LUMA_ENHANCE) ? 1 : 0; -+ isppreview_enable_luma_enhancement(isp_prev, enable); -+ -+ enable = (params->features & PREV_NOISE_FILTER) ? 1 : 0; -+ if (enable) -+ isppreview_config_noisefilter(isp_prev, params->nf); -+ isppreview_enable_noisefilter(isp_prev, enable); -+ -+ enable = (params->features & PREV_DEFECT_COR) ? 1 : 0; -+ if (enable) -+ isppreview_config_dcor(isp_prev, params->dcor); -+ isppreview_enable_dcor(isp_prev, enable); -+ -+ enable = (params->features & PREV_GAMMA_BYPASS) ? 1 : 0; -+ isppreview_enable_gammabypass(isp_prev, enable); -+ -+ isppreview_config_whitebalance(isp_prev, params->wbal); -+ isp_prev->wbal_update = 0; -+ -+ isppreview_config_blkadj(isp_prev, params->blk_adj); -+ isppreview_config_rgb_blending(isp_prev, params->rgb2rgb); -+ isppreview_config_rgb_to_ycbcr(isp_prev, params->rgb2ycbcr); -+ -+ isppreview_config_contrast(isp_prev, -+ params->contrast * ISPPRV_CONTRAST_UNITS); -+ isppreview_config_brightness(isp_prev, -+ params->brightness * ISPPRV_BRIGHT_UNITS); -+ -+ yclimit.minC = ISPPRV_YC_MIN; -+ yclimit.maxC = ISPPRV_YC_MAX; -+ yclimit.minY = ISPPRV_YC_MIN; -+ yclimit.maxY = ISPPRV_YC_MAX; -+ isppreview_config_yc_range(isp_prev, yclimit); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_config_datapath); -+ -+/** -+ * isppreview_set_skip - Set the number of rows/columns that should be skipped. -+ * h - Start Pixel Horizontal. -+ * v - Start Line Vertical. -+ **/ -+void isppreview_set_skip(struct isp_prev_device *isp_prev, u32 h, u32 v) -+{ -+ isp_prev->sph = h; -+ isp_prev->slv = v; -+} -+EXPORT_SYMBOL_GPL(isppreview_set_skip); -+ -+/** -+ * isppreview_config_ycpos - Configure byte layout of YUV image. -+ * @mode: Indicates the required byte layout. -+ **/ -+void isppreview_config_ycpos(struct isp_prev_device *isp_prev, -+ enum preview_ycpos_mode mode) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 pcr = isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+ pcr &= ~ISPPRV_PCR_YCPOS_CrYCbY; -+ pcr |= (mode << ISPPRV_PCR_YCPOS_SHIFT); -+ isp_reg_writel(dev, pcr, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_ycpos); -+ -+/** -+ * isppreview_config_averager - Enable / disable / configure averager -+ * @average: Average value to be configured. -+ **/ -+void isppreview_config_averager(struct isp_prev_device *isp_prev, u8 average) -+{ -+ struct device *dev = to_device(isp_prev); -+ int reg = 0; -+ -+ reg = AVE_ODD_PIXEL_DIST | AVE_EVEN_PIXEL_DIST | average; -+ isp_reg_writel(dev, reg, OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_averager); -+ -+/** -+ * isppreview_enable_invalaw - Enable/Disable Inverse A-Law module in Preview. -+ * @enable: 1 - Reverse the A-Law done in CCDC. -+ **/ -+void isppreview_enable_invalaw(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 pcr_val = 0; -+ -+ pcr_val = isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+ -+ if (enable) { -+ isp_reg_writel(dev, -+ pcr_val | ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+ } else { -+ isp_reg_writel(dev, pcr_val & -+ ~(ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_invalaw); -+ -+/** -+ * isppreview_enable_drkframe - Enable/Disable of the darkframe subtract. -+ * @enable: 1 - Acquires memory bandwidth since the pixels in each frame is -+ * subtracted with the pixels in the current frame. -+ * -+ * The proccess is applied for each captured frame. -+ **/ -+void isppreview_enable_drkframe(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) -+ isp_reg_or(dev, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DRKFEN); -+ else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_DRKFEN); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_drkframe); -+ -+/** -+ * isppreview_enable_shadcomp - Enables/Disables the shading compensation. -+ * @enable: 1 - Enables the shading compensation. -+ * -+ * If dark frame subtract won't be used, then enable this shading -+ * compensation. -+ **/ -+void isppreview_enable_shadcomp(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) { -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_SCOMP_EN); -+ isppreview_enable_drkframe(isp_prev, 1); -+ } else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_SCOMP_EN); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_shadcomp); -+ -+/** -+ * isppreview_config_drkf_shadcomp - Configures shift value in shading comp. -+ * @scomp_shtval: 3bit value of shift used in shading compensation. -+ **/ -+void isppreview_config_drkf_shadcomp(struct isp_prev_device *isp_prev, -+ u8 scomp_shtval) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 pcr_val = isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+ -+ pcr_val &= ISPPRV_PCR_SCOMP_SFT_MASK; -+ isp_reg_writel(dev, -+ pcr_val | (scomp_shtval << ISPPRV_PCR_SCOMP_SFT_SHIFT), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_drkf_shadcomp); -+ -+/** -+ * isppreview_enable_hmed - Enables/Disables of the Horizontal Median Filter. -+ * @enable: 1 - Enables Horizontal Median Filter. -+ **/ -+void isppreview_enable_hmed(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_HMEDEN); -+ else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_HMEDEN); -+ } -+ isp_prev->hmed_en = enable ? 1 : 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_hmed); -+ -+/** -+ * isppreview_config_hmed - Configures the Horizontal Median Filter. -+ * @prev_hmed: Structure containing the odd and even distance between the -+ * pixels in the image along with the filter threshold. -+ **/ -+void isppreview_config_hmed(struct isp_prev_device *isp_prev, -+ struct ispprev_hmed prev_hmed) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ u32 odddist = 0; -+ u32 evendist = 0; -+ -+ if (prev_hmed.odddist == 1) -+ odddist = ~ISPPRV_HMED_ODDDIST; -+ else -+ odddist = ISPPRV_HMED_ODDDIST; -+ -+ if (prev_hmed.evendist == 1) -+ evendist = ~ISPPRV_HMED_EVENDIST; -+ else -+ evendist = ISPPRV_HMED_EVENDIST; -+ -+ isp_reg_writel(dev, odddist | evendist | (prev_hmed.thres << -+ ISPPRV_HMED_THRESHOLD_SHIFT), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_HMED); -+ -+} -+EXPORT_SYMBOL_GPL(isppreview_config_hmed); -+ -+/** -+ * isppreview_config_noisefilter - Configures the Noise Filter. -+ * @prev_nf: Structure containing the noisefilter table, strength to be used -+ * for the noise filter and the defect correction enable flag. -+ **/ -+void isppreview_config_noisefilter(struct isp_prev_device *isp_prev, -+ struct ispprev_nf prev_nf) -+{ -+ struct device *dev = to_device(isp_prev); -+ int i = 0; -+ -+ isp_reg_writel(dev, prev_nf.spread, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_NF); -+ isp_reg_writel(dev, ISPPRV_NF_TABLE_ADDR, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ for (i = 0; i < ISPPRV_NF_TBL_SIZE; i++) { -+ isp_reg_writel(dev, prev_nf.table[i], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_config_noisefilter); -+ -+/** -+ * isppreview_config_dcor - Configures the defect correction -+ * @prev_nf: Structure containing the defect correction structure -+ **/ -+void isppreview_config_dcor(struct isp_prev_device *isp_prev, -+ struct ispprev_dcor prev_dcor) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (prev_dcor.couplet_mode_en) { -+ isp_reg_writel(dev, prev_dcor.detect_correct[0], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR0); -+ isp_reg_writel(dev, prev_dcor.detect_correct[1], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR1); -+ isp_reg_writel(dev, prev_dcor.detect_correct[2], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR2); -+ isp_reg_writel(dev, prev_dcor.detect_correct[3], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR3); -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_DCCOUP); -+ } else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_DCCOUP); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_config_dcor); -+ -+/** -+ * isppreview_config_cfa - Configures the CFA Interpolation parameters. -+ * @prev_cfa: Structure containing the CFA interpolation table, CFA format -+ * in the image, vertical and horizontal gradient threshold. -+ **/ -+void isppreview_config_cfa(struct isp_prev_device *isp_prev, -+ struct ispprev_cfa *prev_cfa) -+{ -+ struct device *dev = to_device(isp_prev); -+ int i = 0; -+ -+ isp_prev->cfafmt = prev_cfa->cfafmt; -+ -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_CFAFMT_MASK, -+ (prev_cfa->cfafmt << ISPPRV_PCR_CFAFMT_SHIFT)); -+ -+ isp_reg_writel(dev, -+ (prev_cfa->cfa_gradthrs_vert << ISPPRV_CFA_GRADTH_VER_SHIFT) | -+ (prev_cfa->cfa_gradthrs_horz << ISPPRV_CFA_GRADTH_HOR_SHIFT), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CFA); -+ -+ isp_reg_writel(dev, ISPPRV_CFA_TABLE_ADDR, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ -+ for (i = 0; i < ISPPRV_CFA_TBL_SIZE; i++) { -+ isp_reg_writel(dev, prev_cfa->cfa_table[i], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_config_cfa); -+ -+/** -+ * isppreview_config_gammacorrn - Configures the Gamma Correction table values -+ * @gtable: Structure containing the table for red, blue, green gamma table. -+ **/ -+void isppreview_config_gammacorrn(struct isp_prev_device *isp_prev, -+ struct ispprev_gtable gtable) -+{ -+ struct device *dev = to_device(isp_prev); -+ int i = 0; -+ -+ isp_reg_writel(dev, ISPPRV_REDGAMMA_TABLE_ADDR, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ for (i = 0; i < ISPPRV_GAMMA_TBL_SIZE; i++) { -+ isp_reg_writel(dev, gtable.redtable[i], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); -+ } -+ -+ isp_reg_writel(dev, ISPPRV_GREENGAMMA_TABLE_ADDR, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ for (i = 0; i < ISPPRV_GAMMA_TBL_SIZE; i++) { -+ isp_reg_writel(dev, gtable.greentable[i], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); -+ } -+ -+ isp_reg_writel(dev, ISPPRV_BLUEGAMMA_TABLE_ADDR, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ for (i = 0; i < ISPPRV_GAMMA_TBL_SIZE; i++) { -+ isp_reg_writel(dev, gtable.bluetable[i], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_config_gammacorrn); -+ -+/** -+ * isppreview_config_luma_enhancement - Sets the Luminance Enhancement table. -+ * @ytable: Structure containing the table for Luminance Enhancement table. -+ **/ -+void isppreview_config_luma_enhancement(struct isp_prev_device *isp_prev, -+ u32 *ytable) -+{ -+ struct device *dev = to_device(isp_prev); -+ int i = 0; -+ -+ isp_reg_writel(dev, ISPPRV_YENH_TABLE_ADDR, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); -+ for (i = 0; i < ISPPRV_YENH_TBL_SIZE; i++) { -+ isp_reg_writel(dev, ytable[i], -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_config_luma_enhancement); -+ -+/** -+ * isppreview_config_chroma_suppression - Configures the Chroma Suppression. -+ * @csup: Structure containing the threshold value for suppression -+ * and the hypass filter enable flag. -+ **/ -+void isppreview_config_chroma_suppression(struct isp_prev_device *isp_prev, -+ struct ispprev_csup csup) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ isp_reg_writel(dev, -+ csup.gain | (csup.thres << ISPPRV_CSUP_THRES_SHIFT) | -+ (csup.hypf_en << ISPPRV_CSUP_HPYF_SHIFT), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CSUP); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_chroma_suppression); -+ -+/** -+ * isppreview_enable_noisefilter - Enables/Disables the Noise Filter. -+ * @enable: 1 - Enables the Noise Filter. -+ **/ -+void isppreview_enable_noisefilter(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_NFEN); -+ else -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_NFEN); -+ isp_prev->nf_en = enable ? 1 : 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_noisefilter); -+ -+/** -+ * isppreview_enable_dcor - Enables/Disables the defect correction. -+ * @enable: 1 - Enables the defect correction. -+ **/ -+void isppreview_enable_dcor(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_DCOREN); -+ else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_DCOREN); -+ } -+ isp_prev->dcor_en = enable ? 1 : 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_dcor); -+ -+/** -+ * isppreview_enable_cfa - Enable/Disable the CFA Interpolation. -+ * @enable: 1 - Enables the CFA. -+ **/ -+void isppreview_enable_cfa(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_CFAEN); -+ else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_CFAEN); -+ } -+ isp_prev->cfa_en = enable ? 1 : 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_cfa); -+ -+/** -+ * isppreview_enable_gammabypass - Enables/Disables the GammaByPass -+ * @enable: 1 - Bypasses Gamma - 10bit input is cropped to 8MSB. -+ * 0 - Goes through Gamma Correction. input and output is 10bit. -+ **/ -+void isppreview_enable_gammabypass(struct isp_prev_device *isp_prev, u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) { -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_GAMMA_BYPASS); -+ } else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_GAMMA_BYPASS); -+ } -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_gammabypass); -+ -+/** -+ * isppreview_enable_luma_enhancement - Enables/Disables Luminance Enhancement -+ * @enable: 1 - Enable the Luminance Enhancement. -+ **/ -+void isppreview_enable_luma_enhancement(struct isp_prev_device *isp_prev, -+ u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) { -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_YNENHEN); -+ } else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_YNENHEN); -+ } -+ isp_prev->yenh_en = enable ? 1 : 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_luma_enhancement); -+ -+/** -+ * isppreview_enable_chroma_suppression - Enables/Disables Chrominance Suppr. -+ * @enable: 1 - Enable the Chrominance Suppression. -+ **/ -+void isppreview_enable_chroma_suppression(struct isp_prev_device *isp_prev, -+ u8 enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_SUPEN); -+ else { -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~ISPPRV_PCR_SUPEN); -+ } -+ isp_prev->csup_en = enable ? 1 : 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_enable_chroma_suppression); -+ -+/** -+ * isppreview_config_whitebalance - Configures the White Balance parameters. -+ * @prev_wbal: Structure containing the digital gain and white balance -+ * coefficient. -+ * -+ * Coefficient matrix always with default values. -+ **/ -+void isppreview_config_whitebalance(struct isp_prev_device *isp_prev, -+ struct ispprev_wbal prev_wbal) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 val; -+ -+ isp_reg_writel(dev, prev_wbal.dgain, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WB_DGAIN); -+ -+ val = prev_wbal.coef0 << ISPPRV_WBGAIN_COEF0_SHIFT; -+ val |= prev_wbal.coef1 << ISPPRV_WBGAIN_COEF1_SHIFT; -+ val |= prev_wbal.coef2 << ISPPRV_WBGAIN_COEF2_SHIFT; -+ val |= prev_wbal.coef3 << ISPPRV_WBGAIN_COEF3_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WBGAIN); -+ -+ isp_reg_writel(dev, -+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_0_SHIFT | -+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_1_SHIFT | -+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_2_SHIFT | -+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_3_SHIFT | -+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_0_SHIFT | -+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_1_SHIFT | -+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_2_SHIFT | -+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_3_SHIFT | -+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_0_SHIFT | -+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_1_SHIFT | -+ ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_2_SHIFT | -+ ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_3_SHIFT | -+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_0_SHIFT | -+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_1_SHIFT | -+ ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_2_SHIFT | -+ ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_3_SHIFT, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_whitebalance); -+ -+/** -+ * isppreview_config_whitebalance2 - Configures the White Balance parameters. -+ * @prev_wbal: Structure containing the digital gain and white balance -+ * coefficient. -+ * -+ * Coefficient matrix can be changed. -+ **/ -+void isppreview_config_whitebalance2(struct isp_prev_device *isp_prev, -+ struct prev_white_balance prev_wbal) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ isp_reg_writel(dev, prev_wbal.wb_dgain, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_WB_DGAIN); -+ isp_reg_writel(dev, prev_wbal.wb_gain[0] | -+ prev_wbal.wb_gain[1] << ISPPRV_WBGAIN_COEF1_SHIFT | -+ prev_wbal.wb_gain[2] << ISPPRV_WBGAIN_COEF2_SHIFT | -+ prev_wbal.wb_gain[3] << ISPPRV_WBGAIN_COEF3_SHIFT, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_WBGAIN); -+ -+ isp_reg_writel(dev, -+ prev_wbal.wb_coefmatrix[0][0] << ISPPRV_WBSEL_N0_0_SHIFT | -+ prev_wbal.wb_coefmatrix[0][1] << ISPPRV_WBSEL_N0_1_SHIFT | -+ prev_wbal.wb_coefmatrix[0][2] << ISPPRV_WBSEL_N0_2_SHIFT | -+ prev_wbal.wb_coefmatrix[0][3] << ISPPRV_WBSEL_N0_3_SHIFT | -+ prev_wbal.wb_coefmatrix[1][0] << ISPPRV_WBSEL_N1_0_SHIFT | -+ prev_wbal.wb_coefmatrix[1][1] << ISPPRV_WBSEL_N1_1_SHIFT | -+ prev_wbal.wb_coefmatrix[1][2] << ISPPRV_WBSEL_N1_2_SHIFT | -+ prev_wbal.wb_coefmatrix[1][3] << ISPPRV_WBSEL_N1_3_SHIFT | -+ prev_wbal.wb_coefmatrix[2][0] << ISPPRV_WBSEL_N2_0_SHIFT | -+ prev_wbal.wb_coefmatrix[2][1] << ISPPRV_WBSEL_N2_1_SHIFT | -+ prev_wbal.wb_coefmatrix[2][2] << ISPPRV_WBSEL_N2_2_SHIFT | -+ prev_wbal.wb_coefmatrix[2][3] << ISPPRV_WBSEL_N2_3_SHIFT | -+ prev_wbal.wb_coefmatrix[3][0] << ISPPRV_WBSEL_N3_0_SHIFT | -+ prev_wbal.wb_coefmatrix[3][1] << ISPPRV_WBSEL_N3_1_SHIFT | -+ prev_wbal.wb_coefmatrix[3][2] << ISPPRV_WBSEL_N3_2_SHIFT | -+ prev_wbal.wb_coefmatrix[3][3] << ISPPRV_WBSEL_N3_3_SHIFT, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_whitebalance2); -+ -+/** -+ * isppreview_config_blkadj - Configures the Black Adjustment parameters. -+ * @prev_blkadj: Structure containing the black adjustment towards red, green, -+ * blue. -+ **/ -+void isppreview_config_blkadj(struct isp_prev_device *isp_prev, -+ struct ispprev_blkadj prev_blkadj) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ isp_reg_writel(dev, prev_blkadj.blue | -+ (prev_blkadj.green << ISPPRV_BLKADJOFF_G_SHIFT) | -+ (prev_blkadj.red << ISPPRV_BLKADJOFF_R_SHIFT), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_BLKADJOFF); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_blkadj); -+ -+/** -+ * isppreview_config_rgb_blending - Configures the RGB-RGB Blending matrix. -+ * @rgb2rgb: Structure containing the rgb to rgb blending matrix and the rgb -+ * offset. -+ **/ -+void isppreview_config_rgb_blending(struct isp_prev_device *isp_prev, -+ struct ispprev_rgbtorgb rgb2rgb) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 val = 0; -+ -+ val = (rgb2rgb.matrix[0][0] & 0xfff) << ISPPRV_RGB_MAT1_MTX_RR_SHIFT; -+ val |= (rgb2rgb.matrix[0][1] & 0xfff) << ISPPRV_RGB_MAT1_MTX_GR_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT1); -+ -+ val = (rgb2rgb.matrix[0][2] & 0xfff) << ISPPRV_RGB_MAT2_MTX_BR_SHIFT; -+ val |= (rgb2rgb.matrix[1][0] & 0xfff) << ISPPRV_RGB_MAT2_MTX_RG_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT2); -+ -+ val = (rgb2rgb.matrix[1][1] & 0xfff) << ISPPRV_RGB_MAT3_MTX_GG_SHIFT; -+ val |= (rgb2rgb.matrix[1][2] & 0xfff) << ISPPRV_RGB_MAT3_MTX_BG_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT3); -+ -+ val = (rgb2rgb.matrix[2][0] & 0xfff) << ISPPRV_RGB_MAT4_MTX_RB_SHIFT; -+ val |= (rgb2rgb.matrix[2][1] & 0xfff) << ISPPRV_RGB_MAT4_MTX_GB_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT4); -+ -+ val = (rgb2rgb.matrix[2][2] & 0xfff) << ISPPRV_RGB_MAT5_MTX_BB_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT5); -+ -+ val = (rgb2rgb.offset[0] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT; -+ val |= (rgb2rgb.offset[1] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_OFF1); -+ -+ val = (rgb2rgb.offset[2] & 0x3ff) << ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_OFF2); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_rgb_blending); -+ -+/** -+ * Configures the RGB-YCbYCr conversion matrix -+ * @prev_csc: Structure containing the RGB to YCbYCr matrix and the -+ * YCbCr offset. -+ **/ -+void isppreview_config_rgb_to_ycbcr(struct isp_prev_device *isp_prev, -+ struct ispprev_csc prev_csc) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 val = 0; -+ -+ val = (prev_csc.matrix[0][0] & 0x3ff) << ISPPRV_CSC0_RY_SHIFT; -+ val |= (prev_csc.matrix[0][1] & 0x3ff) << ISPPRV_CSC0_GY_SHIFT; -+ val |= (prev_csc.matrix[0][2] & 0x3ff) << ISPPRV_CSC0_BY_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC0); -+ -+ val = (prev_csc.matrix[1][0] & 0x3ff) << ISPPRV_CSC1_RCB_SHIFT; -+ val |= (prev_csc.matrix[1][1] & 0x3ff) << ISPPRV_CSC1_GCB_SHIFT; -+ val |= (prev_csc.matrix[1][2] & 0x3ff) << ISPPRV_CSC1_BCB_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC1); -+ -+ val = (prev_csc.matrix[2][0] & 0x3ff) << ISPPRV_CSC2_RCR_SHIFT; -+ val |= (prev_csc.matrix[2][1] & 0x3ff) << ISPPRV_CSC2_GCR_SHIFT; -+ val |= (prev_csc.matrix[2][2] & 0x3ff) << ISPPRV_CSC2_BCR_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC2); -+ -+ val = (prev_csc.offset[0] & 0xff) << ISPPRV_CSC_OFFSET_Y_SHIFT; -+ val |= (prev_csc.offset[1] & 0xff) << ISPPRV_CSC_OFFSET_CB_SHIFT; -+ val |= (prev_csc.offset[2] & 0xff) << ISPPRV_CSC_OFFSET_CR_SHIFT; -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CSC_OFFSET); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_rgb_to_ycbcr); -+ -+/** -+ * isppreview_query_contrast - Query the contrast. -+ * @contrast: Pointer to hold the current programmed contrast value. -+ **/ -+void isppreview_query_contrast(struct isp_prev_device *isp_prev, u8 *contrast) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 brt_cnt_val = 0; -+ -+ brt_cnt_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT); -+ *contrast = (brt_cnt_val >> ISPPRV_CNT_BRT_CNT_SHIFT) & 0xff; -+ DPRINTK_ISPPREV(" Current brt cnt value in hw is %x\n", brt_cnt_val); -+} -+EXPORT_SYMBOL_GPL(isppreview_query_contrast); -+ -+/** -+ * isppreview_update_contrast - Updates the contrast. -+ * @contrast: Pointer to hold the current programmed contrast value. -+ * -+ * Value should be programmed before enabling the module. -+ **/ -+void isppreview_update_contrast(struct isp_prev_device *isp_prev, u8 *contrast) -+{ -+ isp_prev->contrast = *contrast; -+} -+EXPORT_SYMBOL_GPL(isppreview_update_contrast); -+ -+/** -+ * isppreview_config_contrast - Configures the Contrast. -+ * @contrast: 8 bit value in U8Q4 format. -+ * -+ * Value should be programmed before enabling the module. -+ **/ -+void isppreview_config_contrast(struct isp_prev_device *isp_prev, u8 contrast) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 brt_cnt_val = 0; -+ -+ brt_cnt_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CNT_BRT); -+ brt_cnt_val &= ~(0xff << ISPPRV_CNT_BRT_CNT_SHIFT); -+ contrast &= 0xff; -+ isp_reg_writel(dev, -+ brt_cnt_val | contrast << ISPPRV_CNT_BRT_CNT_SHIFT, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_contrast); -+ -+/** -+ * isppreview_get_contrast_range - Gets the range contrast value. -+ * @min_contrast: Pointer to hold the minimum Contrast value. -+ * @max_contrast: Pointer to hold the maximum Contrast value. -+ **/ -+void isppreview_get_contrast_range(u8 *min_contrast, u8 *max_contrast) -+{ -+ *min_contrast = ISPPRV_CONTRAST_MIN; -+ *max_contrast = ISPPRV_CONTRAST_MAX; -+} -+EXPORT_SYMBOL_GPL(isppreview_get_contrast_range); -+ -+/** -+ * isppreview_update_brightness - Updates the brightness in preview module. -+ * @brightness: Pointer to hold the current programmed brightness value. -+ * -+ **/ -+void isppreview_update_brightness(struct isp_prev_device *isp_prev, -+ u8 *brightness) -+{ -+ isp_prev->brightness = *brightness; -+} -+EXPORT_SYMBOL_GPL(isppreview_update_brightness); -+ -+/** -+ * isppreview_config_brightness - Configures the brightness. -+ * @contrast: 8bitvalue in U8Q0 format. -+ **/ -+void isppreview_config_brightness(struct isp_prev_device *isp_prev, -+ u8 brightness) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 brt_cnt_val = 0; -+ -+ DPRINTK_ISPPREV("\tConfiguring brightness in ISP: %d\n", brightness); -+ brt_cnt_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CNT_BRT); -+ brt_cnt_val &= ~(0xff << ISPPRV_CNT_BRT_BRT_SHIFT); -+ brightness &= 0xff; -+ isp_reg_writel(dev, -+ brt_cnt_val | brightness << ISPPRV_CNT_BRT_BRT_SHIFT, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_brightness); -+ -+/** -+ * isppreview_query_brightness - Query the brightness. -+ * @brightness: Pointer to hold the current programmed brightness value. -+ **/ -+void isppreview_query_brightness(struct isp_prev_device *isp_prev, -+ u8 *brightness) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ *brightness = isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CNT_BRT); -+} -+EXPORT_SYMBOL_GPL(isppreview_query_brightness); -+ -+/** -+ * isppreview_get_brightness_range - Gets the range brightness value -+ * @min_brightness: Pointer to hold the minimum brightness value -+ * @max_brightness: Pointer to hold the maximum brightness value -+ **/ -+void isppreview_get_brightness_range(u8 *min_brightness, u8 *max_brightness) -+{ -+ *min_brightness = ISPPRV_BRIGHT_MIN; -+ *max_brightness = ISPPRV_BRIGHT_MAX; -+} -+EXPORT_SYMBOL_GPL(isppreview_get_brightness_range); -+ -+/** -+ * isppreview_set_color - Sets the color effect. -+ * @mode: Indicates the required color effect. -+ **/ -+void isppreview_set_color(struct isp_prev_device *isp_prev, u8 *mode) -+{ -+ isp_prev->color = *mode; -+ isp_prev->update_color_matrix = 1; -+} -+EXPORT_SYMBOL_GPL(isppreview_set_color); -+ -+/** -+ * isppreview_get_color - Gets the current color effect. -+ * @mode: Indicates the current color effect. -+ **/ -+void isppreview_get_color(struct isp_prev_device *isp_prev, u8 *mode) -+{ -+ *mode = isp_prev->color; -+} -+EXPORT_SYMBOL_GPL(isppreview_get_color); -+ -+/** -+ * isppreview_config_yc_range - Configures the max and min Y and C values. -+ * @yclimit: Structure containing the range of Y and C values. -+ **/ -+void isppreview_config_yc_range(struct isp_prev_device *isp_prev, -+ struct ispprev_yclimit yclimit) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ isp_reg_writel(dev, -+ yclimit.maxC << ISPPRV_SETUP_YC_MAXC_SHIFT | -+ yclimit.maxY << ISPPRV_SETUP_YC_MAXY_SHIFT | -+ yclimit.minC << ISPPRV_SETUP_YC_MINC_SHIFT | -+ yclimit.minY << ISPPRV_SETUP_YC_MINY_SHIFT, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC); -+} -+EXPORT_SYMBOL_GPL(isppreview_config_yc_range); -+ -+/** -+ * isppreview_try_size - Calculates output dimensions with the modules enabled. -+ * @input_w: input width for the preview in number of pixels per line -+ * @input_h: input height for the preview in number of lines -+ * @output_w: output width from the preview in number of pixels per line -+ * @output_h: output height for the preview in number of lines -+ * -+ * Calculates the number of pixels cropped in the submodules that are enabled, -+ * Fills up the output width height variables in the isp_prev structure. -+ **/ -+int isppreview_try_pipeline(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 div = 0; -+ int max_out; -+ -+ if (pipe->ccdc_out_w_img < 32 || pipe->ccdc_out_h < 32) { -+ dev_err(dev, "preview does not support " -+ "width < 16 or height < 32 \n"); -+ return -EINVAL; -+ } -+ if (omap_rev() == OMAP3430_REV_ES1_0) -+ max_out = ISPPRV_MAXOUTPUT_WIDTH; -+ else -+ max_out = ISPPRV_MAXOUTPUT_WIDTH_ES2; -+ -+ pipe->prv_out_w = pipe->ccdc_out_w; -+ pipe->prv_out_h = pipe->ccdc_out_h; -+ pipe->prv_out_w_img = pipe->ccdc_out_w_img; -+ pipe->prv_out_h_img = pipe->ccdc_out_h; -+ -+/* if (isp_prev->hmed_en) */ -+ pipe->prv_out_w_img -= 4; -+/* if (isp_prev->nf_en) */ -+ pipe->prv_out_w_img -= 4; -+ pipe->prv_out_h_img -= 4; -+/* if (isp_prev->cfa_en) */ -+ switch (isp_prev->cfafmt) { -+ case CFAFMT_BAYER: -+ case CFAFMT_SONYVGA: -+ pipe->prv_out_w_img -= 4; -+ pipe->prv_out_h_img -= 4; -+ break; -+ case CFAFMT_RGBFOVEON: -+ case CFAFMT_RRGGBBFOVEON: -+ case CFAFMT_DNSPL: -+ case CFAFMT_HONEYCOMB: -+ pipe->prv_out_h_img -= 2; -+ break; -+ }; -+/* if (isp_prev->yenh_en || isp_prev->csup_en) */ -+ pipe->prv_out_w_img -= 2; -+ -+ /* Start at the correct row/column by skipping -+ * a Sensor specific amount. -+ */ -+ pipe->prv_out_w_img -= isp_prev->sph; -+ pipe->prv_out_h_img -= isp_prev->slv; -+ -+ div = DIV_ROUND_UP(pipe->ccdc_out_w_img, max_out); -+ if (div == 1) { -+ pipe->prv_fmt_avg = 0; -+ } else if (div <= 2) { -+ pipe->prv_fmt_avg = 1; -+ pipe->prv_out_w_img /= 2; -+ } else if (div <= 4) { -+ pipe->prv_fmt_avg = 2; -+ pipe->prv_out_w_img /= 4; -+ } else if (div <= 8) { -+ pipe->prv_fmt_avg = 3; -+ pipe->prv_out_w_img /= 8; -+ } else { -+ return -EINVAL; -+ } -+ -+ /* output width must be even */ -+ pipe->prv_out_w_img &= ~1; -+ -+ /* FIXME: This doesn't apply for prv -> rsz. */ -+ pipe->prv_out_w = ALIGN(pipe->prv_out_w, 0x20); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_try_pipeline); -+ -+/** -+ * isppreview_config_size - Sets the size of ISP preview output. -+ * @pipe->ccdc_out_w: input width for the preview in number of pixels per line -+ * @pipe->ccdc_out_h: input height for the preview in number of lines -+ * @output_w: output width from the preview in number of pixels per line -+ * @output_h: output height for the preview in number of lines -+ * -+ * Configures the appropriate values stored in the isp_prev structure to -+ * HORZ/VERT_INFO. Configures PRV_AVE if needed for downsampling as calculated -+ * in trysize. -+ **/ -+int isppreview_s_pipeline(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_prev); -+ u32 prevsdroff; -+ int rval; -+ -+ rval = isppreview_config_datapath(isp_prev, pipe); -+ if (rval) -+ return rval; -+ -+ isp_reg_writel(dev, -+ (isp_prev->sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | -+ (pipe->ccdc_out_w - 1), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO); -+ isp_reg_writel(dev, -+ (isp_prev->slv << ISPPRV_VERT_INFO_SLV_SHIFT) | -+ (pipe->ccdc_out_h - 2), -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_VERT_INFO); -+ -+ if (isp_prev->cfafmt == CFAFMT_BAYER) -+ isp_reg_writel(dev, ISPPRV_AVE_EVENDIST_2 << -+ ISPPRV_AVE_EVENDIST_SHIFT | -+ ISPPRV_AVE_ODDDIST_2 << -+ ISPPRV_AVE_ODDDIST_SHIFT | -+ pipe->prv_fmt_avg, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE); -+ -+ if (pipe->prv_out == PREVIEW_MEM) { -+ prevsdroff = pipe->prv_out_w * ISP_BYTES_PER_PIXEL; -+ if ((prevsdroff & ISP_32B_BOUNDARY_OFFSET) != prevsdroff) { -+ DPRINTK_ISPPREV("ISP_WARN: Preview output buffer line" -+ " size is truncated" -+ " to 32byte boundary\n"); -+ prevsdroff &= ISP_32B_BOUNDARY_BUF ; -+ } -+ isppreview_config_outlineoffset(isp_prev, prevsdroff); -+ } -+ -+ if (pipe->pix.pixelformat == V4L2_PIX_FMT_UYVY) -+ isppreview_config_ycpos(isp_prev, YCPOS_YCrYCb); -+ else -+ isppreview_config_ycpos(isp_prev, YCPOS_CrYCbY); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_s_pipeline); -+ -+/** -+ * isppreview_config_inlineoffset - Configures the Read address line offset. -+ * @offset: Line Offset for the input image. -+ **/ -+int isppreview_config_inlineoffset(struct isp_prev_device *isp_prev, u32 offset) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if ((offset & ISP_32B_BOUNDARY_OFFSET) == offset) { -+ isp_reg_writel(dev, offset & 0xffff, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_RADR_OFFSET); -+ } else { -+ dev_err(dev, "preview: Offset should be in 32 byte " -+ "boundary\n"); -+ return -EINVAL; -+ } -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_config_inlineoffset); -+ -+/** -+ * isppreview_set_inaddr - Sets memory address of input frame. -+ * @addr: 32bit memory address aligned on 32byte boundary. -+ * -+ * Configures the memory address from which the input frame is to be read. -+ **/ -+int isppreview_set_inaddr(struct isp_prev_device *isp_prev, u32 addr) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if ((addr & ISP_32B_BOUNDARY_BUF) == addr) -+ isp_reg_writel(dev, addr, -+ OMAP3_ISP_IOMEM_PREV, ISPPRV_RSDR_ADDR); -+ else { -+ dev_err(dev, "preview: Address should be in 32 byte " -+ "boundary\n"); -+ return -EINVAL; -+ } -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_set_inaddr); -+ -+/** -+ * isppreview_config_outlineoffset - Configures the Write address line offset. -+ * @offset: Line Offset for the preview output. -+ **/ -+int isppreview_config_outlineoffset(struct isp_prev_device *isp_prev, -+ u32 offset) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if ((offset & ISP_32B_BOUNDARY_OFFSET) != offset) { -+ dev_err(dev, "preview: Offset should be in 32 byte " -+ "boundary\n"); -+ return -EINVAL; -+ } -+ isp_reg_writel(dev, offset & 0xffff, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WADD_OFFSET); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_config_outlineoffset); -+ -+/** -+ * isppreview_set_outaddr - Sets the memory address to store output frame -+ * @addr: 32bit memory address aligned on 32byte boundary. -+ * -+ * Configures the memory address to which the output frame is written. -+ **/ -+int isppreview_set_outaddr(struct isp_prev_device *isp_prev, u32 addr) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if ((addr & ISP_32B_BOUNDARY_BUF) != addr) { -+ dev_err(dev, "preview: Address should be in 32 byte " -+ "boundary\n"); -+ return -EINVAL; -+ } -+ isp_reg_writel(dev, addr, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WSDR_ADDR); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_set_outaddr); -+ -+/** -+ * isppreview_config_darklineoffset - Sets the Dark frame address line offset. -+ * @offset: Line Offset for the Darkframe. -+ **/ -+int isppreview_config_darklineoffset(struct isp_prev_device *isp_prev, -+ u32 offset) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if ((offset & ISP_32B_BOUNDARY_OFFSET) != offset) { -+ dev_err(dev, "preview: Offset should be in 32 byte " -+ "boundary\n"); -+ return -EINVAL; -+ } -+ isp_reg_writel(dev, offset & 0xffff, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_DRKF_OFFSET); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_config_darklineoffset); -+ -+/** -+ * isppreview_set_darkaddr - Sets the memory address to store Dark frame. -+ * @addr: 32bit memory address aligned on 32 bit boundary. -+ **/ -+int isppreview_set_darkaddr(struct isp_prev_device *isp_prev, u32 addr) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if ((addr & ISP_32B_BOUNDARY_BUF) != addr) { -+ dev_err(dev, "preview: Address should be in 32 byte " -+ "boundary\n"); -+ return -EINVAL; -+ } -+ isp_reg_writel(dev, addr, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_DSDR_ADDR); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(isppreview_set_darkaddr); -+ -+/** -+ * isppreview_enable - Enables the Preview module. -+ * @enable: 1 - Enables the preview module. -+ * -+ * Client should configure all the sub modules in Preview before this. -+ **/ -+void isppreview_enable(struct isp_prev_device *isp_prev, int enable) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ if (enable) -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ISPPRV_PCR_EN | ISPPRV_PCR_ONESHOT); -+ else -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, -+ ~(ISPPRV_PCR_EN | ISPPRV_PCR_ONESHOT)); -+} -+EXPORT_SYMBOL_GPL(isppreview_enable); -+ -+/** -+ * isppreview_busy - Gets busy state of preview module. -+ **/ -+int isppreview_busy(struct isp_prev_device *isp_prev) -+{ -+ struct device *dev = to_device(isp_prev); -+ -+ return isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR) -+ & ISPPRV_PCR_BUSY; -+} -+EXPORT_SYMBOL_GPL(isppreview_busy); -+ -+/** -+ * isppreview_save_context - Saves the values of the preview module registers. -+ **/ -+void isppreview_save_context(struct device *dev) -+{ -+ DPRINTK_ISPPREV("Saving context\n"); -+ isp_save_context(dev, ispprev_reg_list); -+ /* Avoid unwanted enabling when restoring the context. */ -+ ispprev_reg_list[0].val &= ~ISPPRV_PCR_EN; -+} -+EXPORT_SYMBOL_GPL(isppreview_save_context); -+ -+/** -+ * isppreview_restore_context - Restores the values of preview module registers -+ **/ -+void isppreview_restore_context(struct device *dev) -+{ -+ DPRINTK_ISPPREV("Restoring context\n"); -+ isp_restore_context(dev, ispprev_reg_list); -+} -+EXPORT_SYMBOL_GPL(isppreview_restore_context); -+ -+/** -+ * isppreview_print_status - Prints the values of the Preview Module registers. -+ * -+ * Also prints other debug information stored in the preview moduel. -+ **/ -+void isppreview_print_status(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe) -+{ -+#ifdef OMAP_ISPPREV_DEBUG -+ struct device *dev = to_device(isp_prev); -+#endif -+ -+ DPRINTK_ISPPREV("Preview Input format =%d, Output Format =%d\n", -+ pipe->prv_inp, pipe->prv_out); -+ DPRINTK_ISPPREV("Accepted Preview Input (width = %d,Height = %d)\n", -+ isp_prev->previn_w, -+ isp_prev->previn_h); -+ DPRINTK_ISPPREV("Accepted Preview Output (width = %d,Height = %d)\n", -+ isp_prev->prevout_w, -+ isp_prev->prevout_h); -+ DPRINTK_ISPPREV("###ISP_CTRL in preview =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_CTRL)); -+ DPRINTK_ISPPREV("###ISP_IRQ0ENABLE in preview =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_IRQ0ENABLE)); -+ DPRINTK_ISPPREV("###ISP_IRQ0STATUS in preview =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, -+ ISP_IRQ0STATUS)); -+ DPRINTK_ISPPREV("###PRV PCR =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_PCR)); -+ DPRINTK_ISPPREV("###PRV HORZ_INFO =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_HORZ_INFO)); -+ DPRINTK_ISPPREV("###PRV VERT_INFO =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_VERT_INFO)); -+ DPRINTK_ISPPREV("###PRV WSDR_ADDR =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WSDR_ADDR)); -+ DPRINTK_ISPPREV("###PRV WADD_OFFSET =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WADD_OFFSET)); -+ DPRINTK_ISPPREV("###PRV AVE =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_AVE)); -+ DPRINTK_ISPPREV("###PRV HMED =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_HMED)); -+ DPRINTK_ISPPREV("###PRV NF =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_NF)); -+ DPRINTK_ISPPREV("###PRV WB_DGAIN =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WB_DGAIN)); -+ DPRINTK_ISPPREV("###PRV WBGAIN =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WBGAIN)); -+ DPRINTK_ISPPREV("###PRV WBSEL =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_WBSEL)); -+ DPRINTK_ISPPREV("###PRV CFA =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CFA)); -+ DPRINTK_ISPPREV("###PRV BLKADJOFF =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_BLKADJOFF)); -+ DPRINTK_ISPPREV("###PRV RGB_MAT1 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT1)); -+ DPRINTK_ISPPREV("###PRV RGB_MAT2 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT2)); -+ DPRINTK_ISPPREV("###PRV RGB_MAT3 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT3)); -+ DPRINTK_ISPPREV("###PRV RGB_MAT4 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT4)); -+ DPRINTK_ISPPREV("###PRV RGB_MAT5 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_MAT5)); -+ DPRINTK_ISPPREV("###PRV RGB_OFF1 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_OFF1)); -+ DPRINTK_ISPPREV("###PRV RGB_OFF2 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_RGB_OFF2)); -+ DPRINTK_ISPPREV("###PRV CSC0 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CSC0)); -+ DPRINTK_ISPPREV("###PRV CSC1 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CSC1)); -+ DPRINTK_ISPPREV("###PRV CSC2 =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CSC2)); -+ DPRINTK_ISPPREV("###PRV CSC_OFFSET =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CSC_OFFSET)); -+ DPRINTK_ISPPREV("###PRV CNT_BRT =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CNT_BRT)); -+ DPRINTK_ISPPREV("###PRV CSUP =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_CSUP)); -+ DPRINTK_ISPPREV("###PRV SETUP_YC =0x%x\n", -+ isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, -+ ISPPRV_SETUP_YC)); -+} -+EXPORT_SYMBOL_GPL(isppreview_print_status); -+ -+/** -+ * isp_preview_init - Module Initialization. -+ **/ -+int __init isp_preview_init(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_prev_device *isp_prev = &isp->isp_prev; -+ struct prev_params *params = &isp_prev->params; -+ int i = 0; -+ -+ /* Init values */ -+ isp_prev->sph = 2; -+ isp_prev->slv = 0; -+ isp_prev->color = V4L2_COLORFX_NONE; -+ isp_prev->contrast = ISPPRV_CONTRAST_DEF; -+ params->contrast = ISPPRV_CONTRAST_DEF; -+ isp_prev->brightness = ISPPRV_BRIGHT_DEF; -+ params->brightness = ISPPRV_BRIGHT_DEF; -+ params->average = NO_AVE; -+ params->lens_shading_shift = 0; -+ params->cfa.cfafmt = CFAFMT_BAYER; -+ params->cfa.cfa_table = cfa_coef_table; -+ params->cfa.cfa_gradthrs_horz = FLR_CFA_GRADTHRS_HORZ; -+ params->cfa.cfa_gradthrs_vert = FLR_CFA_GRADTHRS_VERT; -+ params->csup.gain = FLR_CSUP_GAIN; -+ params->csup.thres = FLR_CSUP_THRES; -+ params->csup.hypf_en = 0; -+ params->ytable = luma_enhance_table; -+ params->nf.spread = FLR_NF_STRGTH; -+ memcpy(params->nf.table, noise_filter_table, sizeof(params->nf.table)); -+ params->dcor.couplet_mode_en = 1; -+ for (i = 0; i < 4; i++) -+ params->dcor.detect_correct[i] = 0xE; -+ params->gtable.bluetable = bluegamma_table; -+ params->gtable.greentable = greengamma_table; -+ params->gtable.redtable = redgamma_table; -+ params->wbal.dgain = FLR_WBAL_DGAIN; -+ if (omap_rev() > OMAP3430_REV_ES1_0) { -+ params->wbal.coef0 = FLR_WBAL_COEF0_ES1; -+ params->wbal.coef1 = FLR_WBAL_COEF1_ES1; -+ params->wbal.coef2 = FLR_WBAL_COEF2_ES1; -+ params->wbal.coef3 = FLR_WBAL_COEF3_ES1; -+ } else { -+ params->wbal.coef0 = FLR_WBAL_COEF0; -+ params->wbal.coef1 = FLR_WBAL_COEF1; -+ params->wbal.coef2 = FLR_WBAL_COEF2; -+ params->wbal.coef3 = FLR_WBAL_COEF3; -+ } -+ params->blk_adj.red = FLR_BLKADJ_RED; -+ params->blk_adj.green = FLR_BLKADJ_GREEN; -+ params->blk_adj.blue = FLR_BLKADJ_BLUE; -+ params->rgb2rgb = flr_rgb2rgb; -+ params->rgb2ycbcr = flr_prev_csc[isp_prev->color]; -+ -+ params->features = PREV_CFA | PREV_DEFECT_COR | PREV_NOISE_FILTER; -+ params->features &= ~(PREV_AVERAGER | PREV_INVERSE_ALAW | -+ PREV_HORZ_MEDIAN_FILTER | -+ PREV_GAMMA_BYPASS | -+ PREV_DARK_FRAME_SUBTRACT | -+ PREV_LENS_SHADING | -+ PREV_DARK_FRAME_CAPTURE | -+ PREV_CHROMA_SUPPRESS | -+ PREV_LUMA_ENHANCE); -+ -+ spin_lock_init(&isp_prev->lock); -+ -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/isppreview.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/isppreview.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/isppreview.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/isppreview.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,426 @@ -+/* -+ * isppreview.h -+ * -+ * Driver header file for Preview module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Senthilvadivu Guruswamy -+ * Pallavi Kulkarni -+ * Sergio Aguirre -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef OMAP_ISP_PREVIEW_H -+#define OMAP_ISP_PREVIEW_H -+ -+#include -+/* Isp query control structure */ -+ -+#define ISPPRV_BRIGHT_STEP 0x1 -+#define ISPPRV_BRIGHT_DEF 0x0 -+#define ISPPRV_BRIGHT_LOW 0x0 -+#define ISPPRV_BRIGHT_HIGH 0xFF -+#define ISPPRV_BRIGHT_UNITS 0x1 -+ -+#define ISPPRV_CONTRAST_STEP 0x1 -+#define ISPPRV_CONTRAST_DEF 0x10 -+#define ISPPRV_CONTRAST_LOW 0x0 -+#define ISPPRV_CONTRAST_HIGH 0xFF -+#define ISPPRV_CONTRAST_UNITS 0x1 -+ -+#define NO_AVE 0x0 -+#define AVE_2_PIX 0x1 -+#define AVE_4_PIX 0x2 -+#define AVE_8_PIX 0x3 -+#define AVE_ODD_PIXEL_DIST (1 << 4) /* For Bayer Sensors */ -+#define AVE_EVEN_PIXEL_DIST (1 << 2) -+ -+#define WB_GAIN_MAX 4 -+ -+/* Features list */ -+#define PREV_AVERAGER (1 << 0) -+#define PREV_INVERSE_ALAW (1 << 1) -+#define PREV_HORZ_MEDIAN_FILTER (1 << 2) -+#define PREV_NOISE_FILTER (1 << 3) -+#define PREV_CFA (1 << 4) -+#define PREV_GAMMA_BYPASS (1 << 5) -+#define PREV_LUMA_ENHANCE (1 << 6) -+#define PREV_CHROMA_SUPPRESS (1 << 7) -+#define PREV_DARK_FRAME_SUBTRACT (1 << 8) -+#define PREV_LENS_SHADING (1 << 9) -+#define PREV_DARK_FRAME_CAPTURE (1 << 10) -+#define PREV_DEFECT_COR (1 << 11) -+ -+ -+#define ISP_NF_TABLE_SIZE (1 << 10) -+ -+#define ISP_GAMMA_TABLE_SIZE (1 << 10) -+ -+/* Table addresses */ -+#define ISPPRV_TBL_ADDR_RED_G_START 0x00 -+#define ISPPRV_TBL_ADDR_BLUE_G_START 0x800 -+#define ISPPRV_TBL_ADDR_GREEN_G_START 0x400 -+ -+/* -+ *Enumeration Constants for input and output format -+ */ -+enum preview_input { -+ PRV_RAW_CCDC, -+ PRV_RAW_MEM, -+ PRV_RGBBAYERCFA, -+ PRV_COMPCFA, -+ PRV_CCDC_DRKF, -+ PRV_OTHERS -+}; -+enum preview_output { -+ PREVIEW_RSZ, -+ PREVIEW_MEM -+}; -+/* -+ * Configure byte layout of YUV image -+ */ -+enum preview_ycpos_mode { -+ YCPOS_YCrYCb = 0, -+ YCPOS_YCbYCr = 1, -+ YCPOS_CbYCrY = 2, -+ YCPOS_CrYCbY = 3 -+}; -+ -+/** -+ * struct ispprev_gtable - Structure for Gamma Correction. -+ * @redtable: Pointer to the red gamma table. -+ * @greentable: Pointer to the green gamma table. -+ * @bluetable: Pointer to the blue gamma table. -+ */ -+struct ispprev_gtable { -+ u32 *redtable; -+ u32 *greentable; -+ u32 *bluetable; -+}; -+ -+/** -+ * struct prev_white_balance - Structure for White Balance 2. -+ * @wb_dgain: White balance common gain. -+ * @wb_gain: Individual color gains. -+ * @wb_coefmatrix: Coefficient matrix -+ */ -+struct prev_white_balance { -+ u16 wb_dgain; /* white balance common gain */ -+ u8 wb_gain[WB_GAIN_MAX]; /* individual color gains */ -+ u8 wb_coefmatrix[WB_GAIN_MAX][WB_GAIN_MAX]; -+}; -+ -+/** -+ * struct prev_size_params - Structure for size parameters. -+ * @hstart: Starting pixel. -+ * @vstart: Starting line. -+ * @hsize: Width of input image. -+ * @vsize: Height of input image. -+ * @pixsize: Pixel size of the image in terms of bits. -+ * @in_pitch: Line offset of input image. -+ * @out_pitch: Line offset of output image. -+ */ -+struct prev_size_params { -+ unsigned int hstart; -+ unsigned int vstart; -+ unsigned int hsize; -+ unsigned int vsize; -+ unsigned char pixsize; -+ unsigned short in_pitch; -+ unsigned short out_pitch; -+}; -+ -+/** -+ * struct prev_rgb2ycbcr_coeffs - Structure RGB2YCbCr parameters. -+ * @coeff: Color conversion gains in 3x3 matrix. -+ * @offset: Color conversion offsets. -+ */ -+struct prev_rgb2ycbcr_coeffs { -+ short coeff[RGB_MAX][RGB_MAX]; -+ short offset[RGB_MAX]; -+}; -+ -+/** -+ * struct prev_darkfrm_params - Structure for Dark frame suppression. -+ * @addr: Memory start address. -+ * @offset: Line offset. -+ */ -+struct prev_darkfrm_params { -+ u32 addr; -+ u32 offset; -+ }; -+ -+/** -+ * struct prev_params - Structure for all configuration -+ * @features: Set of features enabled. -+ * @cfa: CFA coefficients. -+ * @csup: Chroma suppression coefficients. -+ * @ytable: Pointer to Luma enhancement coefficients. -+ * @nf: Noise filter coefficients. -+ * @dcor: Noise filter coefficients. -+ * @gtable: Gamma coefficients. -+ * @wbal: White Balance parameters. -+ * @blk_adj: Black adjustment parameters. -+ * @rgb2rgb: RGB blending parameters. -+ * @rgb2ycbcr: RGB to ycbcr parameters. -+ * @hmf_params: Horizontal median filter. -+ * @size_params: Size parameters. -+ * @drkf_params: Darkframe parameters. -+ * @lens_shading_shift: -+ * @average: Downsampling rate for averager. -+ * @contrast: Contrast. -+ * @brightness: Brightness. -+ */ -+struct prev_params { -+ u16 features; -+ enum preview_ycpos_mode pix_fmt; -+ struct ispprev_cfa cfa; -+ struct ispprev_csup csup; -+ u32 *ytable; -+ struct ispprev_nf nf; -+ struct ispprev_dcor dcor; -+ struct ispprev_gtable gtable; -+ struct ispprev_wbal wbal; -+ struct ispprev_blkadj blk_adj; -+ struct ispprev_rgbtorgb rgb2rgb; -+ struct ispprev_csc rgb2ycbcr; -+ struct ispprev_hmed hmf_params; -+ struct prev_size_params size_params; -+ struct prev_darkfrm_params drkf_params; -+ u8 lens_shading_shift; -+ u8 average; -+ u8 contrast; -+ u8 brightness; -+}; -+ -+/** -+ * struct isptables_update - Structure for Table Configuration. -+ * @update: Specifies which tables should be updated. -+ * @flag: Specifies which tables should be enabled. -+ * @prev_nf: Pointer to structure for Noise Filter -+ * @lsc: Pointer to LSC gain table. (currently not used) -+ * @red_gamma: Pointer to red gamma correction table. -+ * @green_gamma: Pointer to green gamma correction table. -+ * @blue_gamma: Pointer to blue gamma correction table. -+ * @prev_cfa: Pointer to color filter array configuration. -+ * @prev_wbal: Pointer to colour and digital gain configuration. -+ */ -+struct isptables_update { -+ u16 update; -+ u16 flag; -+ struct ispprev_nf *prev_nf; -+ u32 *lsc; -+ u32 *red_gamma; -+ u32 *green_gamma; -+ u32 *blue_gamma; -+ struct ispprev_cfa *prev_cfa; -+ struct ispprev_wbal *prev_wbal; -+}; -+ -+/** -+ * struct isp_prev_device - Structure for storing ISP Preview module information -+ * @prevout_w: Preview output width. -+ * @prevout_h: Preview output height. -+ * @previn_w: Preview input width. -+ * @previn_h: Preview input height. -+ * @prev_inpfmt: Preview input format. -+ * @prev_outfmt: Preview output format. -+ * @hmed_en: Horizontal median filter enable. -+ * @nf_en: Noise filter enable. -+ * @dcor_en: Defect correction enable. -+ * @cfa_en: Color Filter Array (CFA) interpolation enable. -+ * @csup_en: Chrominance suppression enable. -+ * @yenh_en: Luma enhancement enable. -+ * @fmtavg: Number of horizontal pixels to average in input formatter. The -+ * input width should be a multiple of this number. -+ * @brightness: Brightness in preview module. -+ * @contrast: Contrast in preview module. -+ * @color: Color effect in preview module. -+ * @cfafmt: Color Filter Array (CFA) Format. -+ * @wbal_update: Update digital and colour gains in Previewer -+ * -+ * This structure is used to store the OMAP ISP Preview module Information. -+ */ -+struct isp_prev_device { -+ u8 update_color_matrix; -+ u8 update_rgb_blending; -+ u8 update_rgb_to_ycbcr; -+ u8 hmed_en; -+ u8 nf_en; -+ u8 dcor_en; -+ u8 cfa_en; -+ u8 csup_en; -+ u8 yenh_en; -+ u8 rg_update; -+ u8 gg_update; -+ u8 bg_update; -+ u8 cfa_update; -+ u8 nf_enable; -+ u8 nf_update; -+ u8 wbal_update; -+ u8 fmtavg; -+ u8 brightness; -+ u8 contrast; -+ enum v4l2_colorfx color; -+ enum cfa_fmt cfafmt; -+ struct ispprev_nf prev_nf_t; -+ struct prev_params params; -+ int shadow_update; -+ u32 sph; -+ u32 slv; -+ spinlock_t lock; -+}; -+ -+void isppreview_config_shadow_registers(struct isp_prev_device *isp_prev); -+ -+int isppreview_request(struct isp_prev_device *isp_prev); -+ -+int isppreview_free(struct isp_prev_device *isp_prev); -+ -+int isppreview_config_datapath(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe); -+ -+void isppreview_config_ycpos(struct isp_prev_device *isp_prev, -+ enum preview_ycpos_mode mode); -+ -+void isppreview_config_averager(struct isp_prev_device *isp_prev, u8 average); -+ -+void isppreview_enable_invalaw(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_enable_drkframe(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_enable_shadcomp(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_config_drkf_shadcomp(struct isp_prev_device *isp_prev, -+ u8 scomp_shtval); -+ -+void isppreview_enable_gammabypass(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_enable_hmed(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_config_hmed(struct isp_prev_device *isp_prev, -+ struct ispprev_hmed); -+ -+void isppreview_enable_noisefilter(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_config_noisefilter(struct isp_prev_device *isp_prev, -+ struct ispprev_nf prev_nf); -+ -+void isppreview_enable_dcor(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_config_dcor(struct isp_prev_device *isp_prev, -+ struct ispprev_dcor prev_dcor); -+ -+ -+void isppreview_config_cfa(struct isp_prev_device *isp_prev, -+ struct ispprev_cfa *cfa); -+ -+void isppreview_config_gammacorrn(struct isp_prev_device *isp_prev, -+ struct ispprev_gtable); -+ -+void isppreview_config_chroma_suppression(struct isp_prev_device *isp_prev, -+ struct ispprev_csup csup); -+ -+void isppreview_enable_cfa(struct isp_prev_device *isp_prev, u8 enable); -+ -+void isppreview_config_luma_enhancement(struct isp_prev_device *isp_prev, -+ u32 *ytable); -+ -+void isppreview_enable_luma_enhancement(struct isp_prev_device *isp_prev, -+ u8 enable); -+ -+void isppreview_enable_chroma_suppression(struct isp_prev_device *isp_prev, -+ u8 enable); -+ -+void isppreview_config_whitebalance(struct isp_prev_device *isp_prev, -+ struct ispprev_wbal); -+ -+void isppreview_config_blkadj(struct isp_prev_device *isp_prev, -+ struct ispprev_blkadj); -+ -+void isppreview_config_rgb_blending(struct isp_prev_device *isp_prev, -+ struct ispprev_rgbtorgb); -+ -+void isppreview_config_rgb_to_ycbcr(struct isp_prev_device *isp_prev, -+ struct ispprev_csc); -+ -+void isppreview_update_contrast(struct isp_prev_device *isp_prev, u8 *contrast); -+ -+void isppreview_query_contrast(struct isp_prev_device *isp_prev, u8 *contrast); -+ -+void isppreview_config_contrast(struct isp_prev_device *isp_prev, u8 contrast); -+ -+void isppreview_get_contrast_range(u8 *min_contrast, u8 *max_contrast); -+ -+void isppreview_update_brightness(struct isp_prev_device *isp_prev, -+ u8 *brightness); -+ -+void isppreview_config_brightness(struct isp_prev_device *isp_prev, -+ u8 brightness); -+ -+void isppreview_get_brightness_range(u8 *min_brightness, u8 *max_brightness); -+ -+void isppreview_set_color(struct isp_prev_device *isp_prev, u8 *mode); -+ -+void isppreview_get_color(struct isp_prev_device *isp_prev, u8 *mode); -+ -+void isppreview_query_brightness(struct isp_prev_device *isp_prev, -+ u8 *brightness); -+ -+void isppreview_config_yc_range(struct isp_prev_device *isp_prev, -+ struct ispprev_yclimit yclimit); -+ -+int isppreview_try_pipeline(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe); -+ -+int isppreview_s_pipeline(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe); -+ -+int isppreview_config_inlineoffset(struct isp_prev_device *isp_prev, -+ u32 offset); -+ -+int isppreview_set_inaddr(struct isp_prev_device *isp_prev, u32 addr); -+ -+int isppreview_config_outlineoffset(struct isp_prev_device *isp_prev, -+ u32 offset); -+ -+int isppreview_set_outaddr(struct isp_prev_device *isp_prev, u32 addr); -+ -+int isppreview_config_darklineoffset(struct isp_prev_device *isp_prev, -+ u32 offset); -+ -+int isppreview_set_darkaddr(struct isp_prev_device *isp_prev, u32 addr); -+ -+void isppreview_enable(struct isp_prev_device *isp_prev, int enable); -+ -+int isppreview_busy(struct isp_prev_device *isp_prev); -+ -+void isppreview_print_status(struct isp_prev_device *isp_prev, -+ struct isp_pipeline *pipe); -+ -+#ifndef CONFIG_ARCH_OMAP3410 -+void isppreview_save_context(struct device *dev); -+#else -+static inline void isppreview_save_context(struct device *dev) {} -+#endif -+ -+#ifndef CONFIG_ARCH_OMAP3410 -+void isppreview_restore_context(struct device *dev); -+#else -+static inline void isppreview_restore_context(struct device *dev) {} -+#endif -+ -+int isppreview_config(struct isp_prev_device *isp_prev, void *userspace_add); -+ -+void isppreview_set_skip(struct isp_prev_device *isp_prev, u32 h, u32 v); -+ -+#endif/* OMAP_ISP_PREVIEW_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispreg.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispreg.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispreg.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispreg.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,1708 @@ -+/* -+ * ispreg.h -+ * -+ * Header file for all the ISP module in TI's OMAP3 Camera ISP. -+ * It has the OMAP HW register definitions. -+ * -+ * Copyright (C) 2009 Texas Instruments. -+ * Copyright (C) 2009 Nokia. -+ * -+ * Contributors: -+ * Tuukka Toivonen -+ * Thara Gopinath -+ * Sergio Aguirre -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef __ISPREG_H__ -+#define __ISPREG_H__ -+ -+#include -+ -+/* Note: Uncomment below defines as needed for enabling module specific debug -+ * messages -+ */ -+ -+/* -+ #define OMAP_ISPCTRL_DEBUG -+ #define OMAP_ISPCCDC_DEBUG -+ #define OMAP_ISPPREV_DEBUG -+ #define OMAP_ISPRESZ_DEBUG -+ #define OMAP_ISPMMU_DEBUG -+ #define OMAP_ISPH3A_DEBUG -+ #define OMAP_ISP_AF_DEBUG -+ #define OMAP_ISPHIST_DEBUG -+*/ -+ -+#ifdef OMAP_ISPCTRL_DEBUG -+#define DPRINTK_ISPCTRL(format, ...) \ -+ printk(KERN_INFO "ISPCTRL: " format, ## __VA_ARGS__) -+#define is_ispctrl_debug_enabled() 1 -+#else -+#define DPRINTK_ISPCTRL(format, ...) -+#define is_ispctrl_debug_enabled() 0 -+#endif -+ -+#ifdef OMAP_ISPCCDC_DEBUG -+#define DPRINTK_ISPCCDC(format, ...) \ -+ printk(KERN_INFO "ISPCCDC: " format, ## __VA_ARGS__) -+#define is_ispccdc_debug_enabled() 1 -+#else -+#define DPRINTK_ISPCCDC(format, ...) -+#define is_ispccdc_debug_enabled() 0 -+#endif -+ -+#ifdef OMAP_ISPPREV_DEBUG -+#define DPRINTK_ISPPREV(format, ...) \ -+ printk(KERN_INFO "ISPPREV: " format, ## __VA_ARGS__) -+#define is_ispprev_debug_enabled() 1 -+#else -+#define DPRINTK_ISPPREV(format, ...) -+#define is_ispprev_debug_enabled() 0 -+#endif -+ -+#ifdef OMAP_ISPRESZ_DEBUG -+#define DPRINTK_ISPRESZ(format, ...) \ -+ printk(KERN_INFO "ISPRESZ: " format, ## __VA_ARGS__) -+#define is_ispresz_debug_enabled() 1 -+#else -+#define DPRINTK_ISPRESZ(format, ...) -+#define is_ispresz_debug_enabled() 0 -+#endif -+ -+#ifdef OMAP_ISPMMU_DEBUG -+#define DPRINTK_ISPMMU(format, ...) \ -+ printk(KERN_INFO "ISPMMU: " format, ## __VA_ARGS__) -+#define is_ispmmu_debug_enabled() 1 -+#else -+#define DPRINTK_ISPMMU(format, ...) -+#define is_ispmmu_debug_enabled() 0 -+#endif -+ -+#ifdef OMAP_ISPH3A_DEBUG -+#define DPRINTK_ISPH3A(format, ...) \ -+ printk(KERN_INFO "ISPH3A: " format, ## __VA_ARGS__) -+#define is_isph3a_debug_enabled() 1 -+#else -+#define DPRINTK_ISPH3A(format, ...) -+#define is_isph3a_debug_enabled() 0 -+#endif -+ -+#ifdef OMAP_ISP_AF_DEBUG -+#define DPRINTK_ISP_AF(format, ...) \ -+ printk(KERN_INFO "ISP_AF: " format, ## __VA_ARGS__) -+#define is_isp_af_debug_enabled() 1 -+#else -+#define DPRINTK_ISP_AF(format, ...) -+#define is_isp_af_debug_enabled() 0 -+#endif -+ -+#ifdef OMAP_ISPHIST_DEBUG -+#define DPRINTK_ISPHIST(format, ...) \ -+ printk(KERN_INFO "ISPHIST: " format, ## __VA_ARGS__) -+#define is_isphist_debug_enabled() 1 -+#else -+#define DPRINTK_ISPHIST(format, ...) -+#define is_isphist_debug_enabled() 0 -+#endif -+ -+#define ISP_32B_BOUNDARY_BUF 0xFFFFFFE0 -+#define ISP_32B_BOUNDARY_OFFSET 0x0000FFE0 -+ -+#define CM_CAM_MCLK_HZ 172800000 /* Hz */ -+ -+/* ISP Submodules offset */ -+ -+#define OMAP3ISP_REG_BASE OMAP3430_ISP_BASE -+#define OMAP3ISP_REG(offset) (OMAP3ISP_REG_BASE + (offset)) -+ -+#define OMAP3ISP_CBUFF_REG_OFFSET 0x0100 -+#define OMAP3ISP_CBUFF_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_CBUFF_REG_OFFSET) -+#define OMAP3ISP_CBUFF_REG(offset) (OMAP3ISP_CBUFF_REG_BASE + (offset)) -+ -+#define OMAP3ISP_CCP2_REG_OFFSET 0x0400 -+#define OMAP3ISP_CCP2_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_CCP2_REG_OFFSET) -+#define OMAP3ISP_CCP2_REG(offset) (OMAP3ISP_CCP2_REG_BASE + (offset)) -+ -+#define OMAP3ISP_CCDC_REG_OFFSET 0x0600 -+#define OMAP3ISP_CCDC_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_CCDC_REG_OFFSET) -+#define OMAP3ISP_CCDC_REG(offset) (OMAP3ISP_CCDC_REG_BASE + (offset)) -+ -+#define OMAP3ISP_HIST_REG_OFFSET 0x0A00 -+#define OMAP3ISP_HIST_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_HIST_REG_OFFSET) -+#define OMAP3ISP_HIST_REG(offset) (OMAP3ISP_HIST_REG_BASE + (offset)) -+ -+#define OMAP3ISP_H3A_REG_OFFSET 0x0C00 -+#define OMAP3ISP_H3A_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_H3A_REG_OFFSET) -+#define OMAP3ISP_H3A_REG(offset) (OMAP3ISP_H3A_REG_BASE + (offset)) -+ -+#define OMAP3ISP_PREV_REG_OFFSET 0x0E00 -+#define OMAP3ISP_PREV_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_PREV_REG_OFFSET) -+#define OMAP3ISP_PREV_REG(offset) (OMAP3ISP_PREV_REG_BASE + (offset)) -+ -+#define OMAP3ISP_RESZ_REG_OFFSET 0x1000 -+#define OMAP3ISP_RESZ_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_RESZ_REG_OFFSET) -+#define OMAP3ISP_RESZ_REG(offset) (OMAP3ISP_RESZ_REG_BASE + (offset)) -+ -+#define OMAP3ISP_SBL_REG_OFFSET 0x1200 -+#define OMAP3ISP_SBL_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_SBL_REG_OFFSET) -+#define OMAP3ISP_SBL_REG(offset) (OMAP3ISP_SBL_REG_BASE + (offset)) -+ -+#define OMAP3ISP_MMU_REG_OFFSET 0x1400 -+#define OMAP3ISP_MMU_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_MMU_REG_OFFSET) -+#define OMAP3ISP_MMU_REG(offset) (OMAP3ISP_MMU_REG_BASE + (offset)) -+ -+#define OMAP3ISP_CSI2A_REG_OFFSET 0x1800 -+#define OMAP3ISP_CSI2A_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_CSI2A_REG_OFFSET) -+#define OMAP3ISP_CSI2A_REG(offset) (OMAP3ISP_CSI2A_REG_BASE + (offset)) -+ -+#define OMAP3ISP_CSI2PHY_REG_OFFSET 0x1970 -+#define OMAP3ISP_CSI2PHY_REG_BASE (OMAP3ISP_REG_BASE + \ -+ OMAP3ISP_CSI2PHY_REG_OFFSET) -+#define OMAP3ISP_CSI2PHY_REG(offset) (OMAP3ISP_CSI2PHY_REG_BASE + (offset)) -+ -+/* ISP module register offset */ -+ -+#define ISP_REVISION (0x000) -+#define ISP_SYSCONFIG (0x004) -+#define ISP_SYSSTATUS (0x008) -+#define ISP_IRQ0ENABLE (0x00C) -+#define ISP_IRQ0STATUS (0x010) -+#define ISP_IRQ1ENABLE (0x014) -+#define ISP_IRQ1STATUS (0x018) -+#define ISP_TCTRL_GRESET_LENGTH (0x030) -+#define ISP_TCTRL_PSTRB_REPLAY (0x034) -+#define ISP_CTRL (0x040) -+#define ISP_SECURE (0x044) -+#define ISP_TCTRL_CTRL (0x050) -+#define ISP_TCTRL_FRAME (0x054) -+#define ISP_TCTRL_PSTRB_DELAY (0x058) -+#define ISP_TCTRL_STRB_DELAY (0x05C) -+#define ISP_TCTRL_SHUT_DELAY (0x060) -+#define ISP_TCTRL_PSTRB_LENGTH (0x064) -+#define ISP_TCTRL_STRB_LENGTH (0x068) -+#define ISP_TCTRL_SHUT_LENGTH (0x06C) -+#define ISP_PING_PONG_ADDR (0x070) -+#define ISP_PING_PONG_MEM_RANGE (0x074) -+#define ISP_PING_PONG_BUF_SIZE (0x078) -+ -+/* CSI1 receiver registers (ES2.0) */ -+#define ISPCSI1_REVISION (0x000) -+#define ISPCSI1_SYSCONFIG (0x004) -+#define ISPCSI1_SYSSTATUS (0x008) -+#define ISPCSI1_LC01_IRQENABLE (0x00C) -+#define ISPCSI1_LC01_IRQSTATUS (0x010) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_FS_IRQ (1 << 11) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_LE_IRQ (1 << 10) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_LS_IRQ (1 << 9) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_FE_IRQ (1 << 8) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_COUNT_IRQ (1 << 7) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ (1 << 5) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_CRC_IRQ (1 << 4) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_FSP_IRQ (1 << 3) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_FW_IRQ (1 << 2) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_FSC_IRQ (1 << 1) -+#define ISPCSI1_LC01_IRQSTATUS_LC0_SSC_IRQ (1 << 0) -+ -+#define ISPCSI1_LC23_IRQENABLE (0x014) -+#define ISPCSI1_LC23_IRQSTATUS (0x018) -+#define ISPCSI1_LCM_IRQENABLE (0x02C) -+#define ISPCSI1_LCM_IRQSTATUS (0x030) -+#define ISPCSI1_CTRL (0x040) -+#define ISPCSI1_DBG (0x044) -+#define ISPCSI1_GNQ (0x048) -+#define ISPCSI1_LCx_CTRL(x) ((0x050)+0x30*(x)) -+#define ISPCSI1_LCx_CODE(x) ((0x054)+0x30*(x)) -+#define ISPCSI1_LCx_STAT_START(x) ((0x058)+0x30*(x)) -+#define ISPCSI1_LCx_STAT_SIZE(x) ((0x05C)+0x30*(x)) -+#define ISPCSI1_LCx_SOF_ADDR(x) ((0x060)+0x30*(x)) -+#define ISPCSI1_LCx_EOF_ADDR(x) ((0x064)+0x30*(x)) -+#define ISPCSI1_LCx_DAT_START(x) ((0x068)+0x30*(x)) -+#define ISPCSI1_LCx_DAT_SIZE(x) ((0x06C)+0x30*(x)) -+#define ISPCSI1_LCx_DAT_PING_ADDR(x) ((0x070)+0x30*(x)) -+#define ISPCSI1_LCx_DAT_PONG_ADDR(x) ((0x074)+0x30*(x)) -+#define ISPCSI1_LCx_DAT_OFST(x) ((0x078)+0x30*(x)) -+#define ISPCSI1_LCM_CTRL (0x1D0) -+#define ISPCSI1_LCM_VSIZE (0x1D4) -+#define ISPCSI1_LCM_HSIZE (0x1D8) -+#define ISPCSI1_LCM_PREFETCH (0x1DC) -+#define ISPCSI1_LCM_SRC_ADDR (0x1E0) -+#define ISPCSI1_LCM_SRC_OFST (0x1E4) -+#define ISPCSI1_LCM_DST_ADDR (0x1E8) -+#define ISPCSI1_LCM_DST_OFST (0x1EC) -+#define ISP_CSIB_SYSCONFIG ISPCSI1_SYSCONFIG -+#define ISP_CSIA_SYSCONFIG ISPCSI2_SYSCONFIG -+ -+/* ISP_CBUFF Registers */ -+ -+#define ISP_CBUFF_SYSCONFIG (0x010) -+#define ISP_CBUFF_IRQENABLE (0x01C) -+ -+#define ISP_CBUFF0_CTRL (0x020) -+#define ISP_CBUFF1_CTRL (0x024) -+ -+#define ISP_CBUFF0_START (0x040) -+#define ISP_CBUFF1_START (0x044) -+ -+#define ISP_CBUFF0_END (0x050) -+#define ISP_CBUFF1_END (0x054) -+ -+#define ISP_CBUFF0_WINDOWSIZE (0x060) -+#define ISP_CBUFF1_WINDOWSIZE (0x064) -+ -+#define ISP_CBUFF0_THRESHOLD (0x070) -+#define ISP_CBUFF1_THRESHOLD (0x074) -+ -+/* CCDC module register offset */ -+ -+#define ISPCCDC_PID (0x000) -+#define ISPCCDC_PCR (0x004) -+#define ISPCCDC_SYN_MODE (0x008) -+#define ISPCCDC_HD_VD_WID (0x00C) -+#define ISPCCDC_PIX_LINES (0x010) -+#define ISPCCDC_HORZ_INFO (0x014) -+#define ISPCCDC_VERT_START (0x018) -+#define ISPCCDC_VERT_LINES (0x01C) -+#define ISPCCDC_CULLING (0x020) -+#define ISPCCDC_HSIZE_OFF (0x024) -+#define ISPCCDC_SDOFST (0x028) -+#define ISPCCDC_SDR_ADDR (0x02C) -+#define ISPCCDC_CLAMP (0x030) -+#define ISPCCDC_DCSUB (0x034) -+#define ISPCCDC_COLPTN (0x038) -+#define ISPCCDC_BLKCMP (0x03C) -+#define ISPCCDC_FPC (0x040) -+#define ISPCCDC_FPC_ADDR (0x044) -+#define ISPCCDC_VDINT (0x048) -+#define ISPCCDC_ALAW (0x04C) -+#define ISPCCDC_REC656IF (0x050) -+#define ISPCCDC_CFG (0x054) -+#define ISPCCDC_FMTCFG (0x058) -+#define ISPCCDC_FMT_HORZ (0x05C) -+#define ISPCCDC_FMT_VERT (0x060) -+#define ISPCCDC_FMT_ADDR0 (0x064) -+#define ISPCCDC_FMT_ADDR1 (0x068) -+#define ISPCCDC_FMT_ADDR2 (0x06C) -+#define ISPCCDC_FMT_ADDR3 (0x070) -+#define ISPCCDC_FMT_ADDR4 (0x074) -+#define ISPCCDC_FMT_ADDR5 (0x078) -+#define ISPCCDC_FMT_ADDR6 (0x07C) -+#define ISPCCDC_FMT_ADDR7 (0x080) -+#define ISPCCDC_PRGEVEN0 (0x084) -+#define ISPCCDC_PRGEVEN1 (0x088) -+#define ISPCCDC_PRGODD0 (0x08C) -+#define ISPCCDC_PRGODD1 (0x090) -+#define ISPCCDC_VP_OUT (0x094) -+ -+#define ISPCCDC_LSC_CONFIG (0x098) -+#define ISPCCDC_LSC_INITIAL (0x09C) -+#define ISPCCDC_LSC_TABLE_BASE (0x0A0) -+#define ISPCCDC_LSC_TABLE_OFFSET (0x0A4) -+ -+/* SBL */ -+#define ISPSBL_PCR 0x4 -+#define ISPSBL_PCR_H3A_AEAWB_WBL_OVF (1 << 16) -+#define ISPSBL_PCR_H3A_AF_WBL_OVF (1 << 17) -+#define ISPSBL_PCR_RSZ4_WBL_OVF (1 << 18) -+#define ISPSBL_PCR_RSZ3_WBL_OVF (1 << 19) -+#define ISPSBL_PCR_RSZ2_WBL_OVF (1 << 20) -+#define ISPSBL_PCR_RSZ1_WBL_OVF (1 << 21) -+#define ISPSBL_PCR_PRV_WBL_OVF (1 << 22) -+#define ISPSBL_PCR_CCDC_WBL_OVF (1 << 23) -+#define ISPSBL_PCR_CCDCPRV_2_RSZ_OVF (1 << 24) -+#define ISPSBL_PCR_CSIA_WBL_OVF (1 << 25) -+#define ISPSBL_PCR_CSIB_WBL_OVF (1 << 26) -+#define ISPSBL_CCDC_WR_0 (0x028) -+#define ISPSBL_CCDC_WR_0_DATA_READY (1 << 21) -+#define ISPSBL_CCDC_WR_1 (0x02C) -+#define ISPSBL_CCDC_WR_2 (0x030) -+#define ISPSBL_CCDC_WR_3 (0x034) -+ -+#define ISPSBL_SDR_REQ_EXP 0xF8 -+#define ISPSBL_SDR_REQ_HIST_EXP_SHIFT 0 -+#define ISPSBL_SDR_REQ_HIST_EXP_MASK (0x3FF) -+#define ISPSBL_SDR_REQ_RSZ_EXP_SHIFT 10 -+#define ISPSBL_SDR_REQ_RSZ_EXP_MASK (0x3FF << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT) -+#define ISPSBL_SDR_REQ_PRV_EXP_SHIFT 20 -+#define ISPSBL_SDR_REQ_PRV_EXP_MASK (0x3FF << ISPSBL_SDR_REQ_PRV_EXP_SHIFT) -+ -+/* Histogram registers */ -+#define ISPHIST_PID (0x000) -+#define ISPHIST_PCR (0x004) -+#define ISPHIST_CNT (0x008) -+#define ISPHIST_WB_GAIN (0x00C) -+#define ISPHIST_R0_HORZ (0x010) -+#define ISPHIST_R0_VERT (0x014) -+#define ISPHIST_R1_HORZ (0x018) -+#define ISPHIST_R1_VERT (0x01C) -+#define ISPHIST_R2_HORZ (0x020) -+#define ISPHIST_R2_VERT (0x024) -+#define ISPHIST_R3_HORZ (0x028) -+#define ISPHIST_R3_VERT (0x02C) -+#define ISPHIST_ADDR (0x030) -+#define ISPHIST_DATA (0x034) -+#define ISPHIST_RADD (0x038) -+#define ISPHIST_RADD_OFF (0x03C) -+#define ISPHIST_H_V_INFO (0x040) -+ -+/* H3A module registers */ -+#define ISPH3A_PID (0x000) -+#define ISPH3A_PCR (0x004) -+#define ISPH3A_AEWWIN1 (0x04C) -+#define ISPH3A_AEWINSTART (0x050) -+#define ISPH3A_AEWINBLK (0x054) -+#define ISPH3A_AEWSUBWIN (0x058) -+#define ISPH3A_AEWBUFST (0x05C) -+#define ISPH3A_AFPAX1 (0x008) -+#define ISPH3A_AFPAX2 (0x00C) -+#define ISPH3A_AFPAXSTART (0x010) -+#define ISPH3A_AFIIRSH (0x014) -+#define ISPH3A_AFBUFST (0x018) -+#define ISPH3A_AFCOEF010 (0x01C) -+#define ISPH3A_AFCOEF032 (0x020) -+#define ISPH3A_AFCOEF054 (0x024) -+#define ISPH3A_AFCOEF076 (0x028) -+#define ISPH3A_AFCOEF098 (0x02C) -+#define ISPH3A_AFCOEF0010 (0x030) -+#define ISPH3A_AFCOEF110 (0x034) -+#define ISPH3A_AFCOEF132 (0x038) -+#define ISPH3A_AFCOEF154 (0x03C) -+#define ISPH3A_AFCOEF176 (0x040) -+#define ISPH3A_AFCOEF198 (0x044) -+#define ISPH3A_AFCOEF1010 (0x048) -+ -+#define ISPPRV_PCR (0x004) -+#define ISPPRV_HORZ_INFO (0x008) -+#define ISPPRV_VERT_INFO (0x00C) -+#define ISPPRV_RSDR_ADDR (0x010) -+#define ISPPRV_RADR_OFFSET (0x014) -+#define ISPPRV_DSDR_ADDR (0x018) -+#define ISPPRV_DRKF_OFFSET (0x01C) -+#define ISPPRV_WSDR_ADDR (0x020) -+#define ISPPRV_WADD_OFFSET (0x024) -+#define ISPPRV_AVE (0x028) -+#define ISPPRV_HMED (0x02C) -+#define ISPPRV_NF (0x030) -+#define ISPPRV_WB_DGAIN (0x034) -+#define ISPPRV_WBGAIN (0x038) -+#define ISPPRV_WBSEL (0x03C) -+#define ISPPRV_CFA (0x040) -+#define ISPPRV_BLKADJOFF (0x044) -+#define ISPPRV_RGB_MAT1 (0x048) -+#define ISPPRV_RGB_MAT2 (0x04C) -+#define ISPPRV_RGB_MAT3 (0x050) -+#define ISPPRV_RGB_MAT4 (0x054) -+#define ISPPRV_RGB_MAT5 (0x058) -+#define ISPPRV_RGB_OFF1 (0x05C) -+#define ISPPRV_RGB_OFF2 (0x060) -+#define ISPPRV_CSC0 (0x064) -+#define ISPPRV_CSC1 (0x068) -+#define ISPPRV_CSC2 (0x06C) -+#define ISPPRV_CSC_OFFSET (0x070) -+#define ISPPRV_CNT_BRT (0x074) -+#define ISPPRV_CSUP (0x078) -+#define ISPPRV_SETUP_YC (0x07C) -+#define ISPPRV_SET_TBL_ADDR (0x080) -+#define ISPPRV_SET_TBL_DATA (0x084) -+#define ISPPRV_CDC_THR0 (0x090) -+#define ISPPRV_CDC_THR1 (ISPPRV_CDC_THR0 + (0x4)) -+#define ISPPRV_CDC_THR2 (ISPPRV_CDC_THR0 + (0x4) * 2) -+#define ISPPRV_CDC_THR3 (ISPPRV_CDC_THR0 + (0x4) * 3) -+ -+#define ISPPRV_REDGAMMA_TABLE_ADDR 0x0000 -+#define ISPPRV_GREENGAMMA_TABLE_ADDR 0x0400 -+#define ISPPRV_BLUEGAMMA_TABLE_ADDR 0x0800 -+#define ISPPRV_NF_TABLE_ADDR 0x0C00 -+#define ISPPRV_YENH_TABLE_ADDR 0x1000 -+#define ISPPRV_CFA_TABLE_ADDR 0x1400 -+ -+#define ISPPRV_MAXOUTPUT_WIDTH 1280 -+#define ISPPRV_MAXOUTPUT_WIDTH_ES2 3300 -+#define ISPRSZ_MIN_OUTPUT 64 -+#define ISPRSZ_MAX_OUTPUT 3312 -+ -+/* Resizer module register offset */ -+#define ISPRSZ_PID (0x000) -+#define ISPRSZ_PCR (0x004) -+#define ISPRSZ_CNT (0x008) -+#define ISPRSZ_OUT_SIZE (0x00C) -+#define ISPRSZ_IN_START (0x010) -+#define ISPRSZ_IN_SIZE (0x014) -+#define ISPRSZ_SDR_INADD (0x018) -+#define ISPRSZ_SDR_INOFF (0x01C) -+#define ISPRSZ_SDR_OUTADD (0x020) -+#define ISPRSZ_SDR_OUTOFF (0x024) -+#define ISPRSZ_HFILT10 (0x028) -+#define ISPRSZ_HFILT32 (0x02C) -+#define ISPRSZ_HFILT54 (0x030) -+#define ISPRSZ_HFILT76 (0x034) -+#define ISPRSZ_HFILT98 (0x038) -+#define ISPRSZ_HFILT1110 (0x03C) -+#define ISPRSZ_HFILT1312 (0x040) -+#define ISPRSZ_HFILT1514 (0x044) -+#define ISPRSZ_HFILT1716 (0x048) -+#define ISPRSZ_HFILT1918 (0x04C) -+#define ISPRSZ_HFILT2120 (0x050) -+#define ISPRSZ_HFILT2322 (0x054) -+#define ISPRSZ_HFILT2524 (0x058) -+#define ISPRSZ_HFILT2726 (0x05C) -+#define ISPRSZ_HFILT2928 (0x060) -+#define ISPRSZ_HFILT3130 (0x064) -+#define ISPRSZ_VFILT10 (0x068) -+#define ISPRSZ_VFILT32 (0x06C) -+#define ISPRSZ_VFILT54 (0x070) -+#define ISPRSZ_VFILT76 (0x074) -+#define ISPRSZ_VFILT98 (0x078) -+#define ISPRSZ_VFILT1110 (0x07C) -+#define ISPRSZ_VFILT1312 (0x080) -+#define ISPRSZ_VFILT1514 (0x084) -+#define ISPRSZ_VFILT1716 (0x088) -+#define ISPRSZ_VFILT1918 (0x08C) -+#define ISPRSZ_VFILT2120 (0x090) -+#define ISPRSZ_VFILT2322 (0x094) -+#define ISPRSZ_VFILT2524 (0x098) -+#define ISPRSZ_VFILT2726 (0x09C) -+#define ISPRSZ_VFILT2928 (0x0A0) -+#define ISPRSZ_VFILT3130 (0x0A4) -+#define ISPRSZ_YENH (0x0A8) -+ -+/* MMU module registers */ -+#define ISPMMU_REVISION (0x000) -+#define ISPMMU_SYSCONFIG (0x010) -+#define ISPMMU_SYSSTATUS (0x014) -+#define ISPMMU_IRQSTATUS (0x018) -+#define ISPMMU_IRQENABLE (0x01C) -+#define ISPMMU_WALKING_ST (0x040) -+#define ISPMMU_CNTL (0x044) -+#define ISPMMU_FAULT_AD (0x048) -+#define ISPMMU_TTB (0x04C) -+#define ISPMMU_LOCK (0x050) -+#define ISPMMU_LD_TLB (0x054) -+#define ISPMMU_CAM (0x058) -+#define ISPMMU_RAM (0x05C) -+#define ISPMMU_GFLUSH (0x060) -+#define ISPMMU_FLUSH_ENTRY (0x064) -+#define ISPMMU_READ_CAM (0x068) -+#define ISPMMU_READ_RAM (0x06c) -+#define ISPMMU_EMU_FAULT_AD (0x070) -+ -+#define ISP_INT_CLR 0xFF113F11 -+#define ISPPRV_PCR_EN 1 -+#define ISPPRV_PCR_BUSY (1 << 1) -+#define ISPPRV_PCR_SOURCE (1 << 2) -+#define ISPPRV_PCR_ONESHOT (1 << 3) -+#define ISPPRV_PCR_WIDTH (1 << 4) -+#define ISPPRV_PCR_INVALAW (1 << 5) -+#define ISPPRV_PCR_DRKFEN (1 << 6) -+#define ISPPRV_PCR_DRKFCAP (1 << 7) -+#define ISPPRV_PCR_HMEDEN (1 << 8) -+#define ISPPRV_PCR_NFEN (1 << 9) -+#define ISPPRV_PCR_CFAEN (1 << 10) -+#define ISPPRV_PCR_CFAFMT_SHIFT 11 -+#define ISPPRV_PCR_CFAFMT_MASK 0x7800 -+#define ISPPRV_PCR_CFAFMT_BAYER (0 << 11) -+#define ISPPRV_PCR_CFAFMT_SONYVGA (1 << 11) -+#define ISPPRV_PCR_CFAFMT_RGBFOVEON (2 << 11) -+#define ISPPRV_PCR_CFAFMT_DNSPL (3 << 11) -+#define ISPPRV_PCR_CFAFMT_HONEYCOMB (4 << 11) -+#define ISPPRV_PCR_CFAFMT_RRGGBBFOVEON (5 << 11) -+#define ISPPRV_PCR_YNENHEN (1 << 15) -+#define ISPPRV_PCR_SUPEN (1 << 16) -+#define ISPPRV_PCR_YCPOS_SHIFT 17 -+#define ISPPRV_PCR_YCPOS_YCrYCb (0 << 17) -+#define ISPPRV_PCR_YCPOS_YCbYCr (1 << 17) -+#define ISPPRV_PCR_YCPOS_CbYCrY (2 << 17) -+#define ISPPRV_PCR_YCPOS_CrYCbY (3 << 17) -+#define ISPPRV_PCR_RSZPORT (1 << 19) -+#define ISPPRV_PCR_SDRPORT (1 << 20) -+#define ISPPRV_PCR_SCOMP_EN (1 << 21) -+#define ISPPRV_PCR_SCOMP_SFT_SHIFT (22) -+#define ISPPRV_PCR_SCOMP_SFT_MASK (~(7 << 22)) -+#define ISPPRV_PCR_GAMMA_BYPASS (1 << 26) -+#define ISPPRV_PCR_DCOREN (1 << 27) -+#define ISPPRV_PCR_DCCOUP (1 << 28) -+#define ISPPRV_PCR_DRK_FAIL (1 << 31) -+ -+#define ISPPRV_HORZ_INFO_EPH_SHIFT 0 -+#define ISPPRV_HORZ_INFO_EPH_MASK 0x3fff -+#define ISPPRV_HORZ_INFO_SPH_SHIFT 16 -+#define ISPPRV_HORZ_INFO_SPH_MASK 0x3fff0 -+ -+#define ISPPRV_VERT_INFO_ELV_SHIFT 0 -+#define ISPPRV_VERT_INFO_ELV_MASK 0x3fff -+#define ISPPRV_VERT_INFO_SLV_SHIFT 16 -+#define ISPPRV_VERT_INFO_SLV_MASK 0x3fff0 -+ -+#define ISPPRV_AVE_EVENDIST_SHIFT 2 -+#define ISPPRV_AVE_EVENDIST_1 0x0 -+#define ISPPRV_AVE_EVENDIST_2 0x1 -+#define ISPPRV_AVE_EVENDIST_3 0x2 -+#define ISPPRV_AVE_EVENDIST_4 0x3 -+#define ISPPRV_AVE_ODDDIST_SHIFT 4 -+#define ISPPRV_AVE_ODDDIST_1 0x0 -+#define ISPPRV_AVE_ODDDIST_2 0x1 -+#define ISPPRV_AVE_ODDDIST_3 0x2 -+#define ISPPRV_AVE_ODDDIST_4 0x3 -+ -+#define ISPPRV_HMED_THRESHOLD_SHIFT 0 -+#define ISPPRV_HMED_EVENDIST (1 << 8) -+#define ISPPRV_HMED_ODDDIST (1 << 9) -+ -+#define ISPPRV_WBGAIN_COEF0_SHIFT 0 -+#define ISPPRV_WBGAIN_COEF1_SHIFT 8 -+#define ISPPRV_WBGAIN_COEF2_SHIFT 16 -+#define ISPPRV_WBGAIN_COEF3_SHIFT 24 -+ -+#define ISPPRV_WBSEL_COEF0 0x0 -+#define ISPPRV_WBSEL_COEF1 0x1 -+#define ISPPRV_WBSEL_COEF2 0x2 -+#define ISPPRV_WBSEL_COEF3 0x3 -+ -+#define ISPPRV_WBSEL_N0_0_SHIFT 0 -+#define ISPPRV_WBSEL_N0_1_SHIFT 2 -+#define ISPPRV_WBSEL_N0_2_SHIFT 4 -+#define ISPPRV_WBSEL_N0_3_SHIFT 6 -+#define ISPPRV_WBSEL_N1_0_SHIFT 8 -+#define ISPPRV_WBSEL_N1_1_SHIFT 10 -+#define ISPPRV_WBSEL_N1_2_SHIFT 12 -+#define ISPPRV_WBSEL_N1_3_SHIFT 14 -+#define ISPPRV_WBSEL_N2_0_SHIFT 16 -+#define ISPPRV_WBSEL_N2_1_SHIFT 18 -+#define ISPPRV_WBSEL_N2_2_SHIFT 20 -+#define ISPPRV_WBSEL_N2_3_SHIFT 22 -+#define ISPPRV_WBSEL_N3_0_SHIFT 24 -+#define ISPPRV_WBSEL_N3_1_SHIFT 26 -+#define ISPPRV_WBSEL_N3_2_SHIFT 28 -+#define ISPPRV_WBSEL_N3_3_SHIFT 30 -+ -+#define ISPPRV_CFA_GRADTH_HOR_SHIFT 0 -+#define ISPPRV_CFA_GRADTH_VER_SHIFT 8 -+ -+#define ISPPRV_BLKADJOFF_B_SHIFT 0 -+#define ISPPRV_BLKADJOFF_G_SHIFT 8 -+#define ISPPRV_BLKADJOFF_R_SHIFT 16 -+ -+#define ISPPRV_RGB_MAT1_MTX_RR_SHIFT 0 -+#define ISPPRV_RGB_MAT1_MTX_GR_SHIFT 16 -+ -+#define ISPPRV_RGB_MAT2_MTX_BR_SHIFT 0 -+#define ISPPRV_RGB_MAT2_MTX_RG_SHIFT 16 -+ -+#define ISPPRV_RGB_MAT3_MTX_GG_SHIFT 0 -+#define ISPPRV_RGB_MAT3_MTX_BG_SHIFT 16 -+ -+#define ISPPRV_RGB_MAT4_MTX_RB_SHIFT 0 -+#define ISPPRV_RGB_MAT4_MTX_GB_SHIFT 16 -+ -+#define ISPPRV_RGB_MAT5_MTX_BB_SHIFT 0 -+ -+#define ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT 0 -+#define ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT 16 -+ -+#define ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT 0 -+ -+#define ISPPRV_CSC0_RY_SHIFT 0 -+#define ISPPRV_CSC0_GY_SHIFT 10 -+#define ISPPRV_CSC0_BY_SHIFT 20 -+ -+#define ISPPRV_CSC1_RCB_SHIFT 0 -+#define ISPPRV_CSC1_GCB_SHIFT 10 -+#define ISPPRV_CSC1_BCB_SHIFT 20 -+ -+#define ISPPRV_CSC2_RCR_SHIFT 0 -+#define ISPPRV_CSC2_GCR_SHIFT 10 -+#define ISPPRV_CSC2_BCR_SHIFT 20 -+ -+#define ISPPRV_CSC_OFFSET_CR_SHIFT 0 -+#define ISPPRV_CSC_OFFSET_CB_SHIFT 8 -+#define ISPPRV_CSC_OFFSET_Y_SHIFT 16 -+ -+#define ISPPRV_CNT_BRT_BRT_SHIFT 0 -+#define ISPPRV_CNT_BRT_CNT_SHIFT 8 -+ -+#define ISPPRV_CONTRAST_MAX 0x10 -+#define ISPPRV_CONTRAST_MIN 0xFF -+#define ISPPRV_BRIGHT_MIN 0x00 -+#define ISPPRV_BRIGHT_MAX 0xFF -+ -+#define ISPPRV_CSUP_CSUPG_SHIFT 0 -+#define ISPPRV_CSUP_THRES_SHIFT 8 -+#define ISPPRV_CSUP_HPYF_SHIFT 16 -+ -+#define ISPPRV_SETUP_YC_MINC_SHIFT 0 -+#define ISPPRV_SETUP_YC_MAXC_SHIFT 8 -+#define ISPPRV_SETUP_YC_MINY_SHIFT 16 -+#define ISPPRV_SETUP_YC_MAXY_SHIFT 24 -+#define ISPPRV_YC_MAX 0xFF -+#define ISPPRV_YC_MIN 0x0 -+ -+/* Define bit fields within selected registers */ -+#define ISP_REVISION_SHIFT 0 -+ -+#define ISP_SYSCONFIG_AUTOIDLE (1 << 0) -+#define ISP_SYSCONFIG_SOFTRESET (1 << 1) -+#define ISP_SYSCONFIG_MIDLEMODE_SHIFT 12 -+#define ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY 0x0 -+#define ISP_SYSCONFIG_MIDLEMODE_NOSTANBY 0x1 -+#define ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY 0x2 -+ -+#define ISP_SYSSTATUS_RESETDONE 0 -+ -+#define IRQ0ENABLE_CSIA_IRQ 1 -+#define IRQ0ENABLE_CSIA_LC1_IRQ (1 << 1) -+#define IRQ0ENABLE_CSIA_LC2_IRQ (1 << 2) -+#define IRQ0ENABLE_CSIA_LC3_IRQ (1 << 3) -+#define IRQ0ENABLE_CSIB_IRQ (1 << 4) -+#define IRQ0ENABLE_CSIB_LC1_IRQ (1 << 5) -+#define IRQ0ENABLE_CSIB_LC2_IRQ (1 << 6) -+#define IRQ0ENABLE_CSIB_LC3_IRQ (1 << 7) -+#define IRQ0ENABLE_CCDC_VD0_IRQ (1 << 8) -+#define IRQ0ENABLE_CCDC_VD1_IRQ (1 << 9) -+#define IRQ0ENABLE_CCDC_VD2_IRQ (1 << 10) -+#define IRQ0ENABLE_CCDC_ERR_IRQ (1 << 11) -+#define IRQ0ENABLE_H3A_AF_DONE_IRQ (1 << 12) -+#define IRQ0ENABLE_H3A_AWB_DONE_IRQ (1 << 13) -+#define IRQ0ENABLE_HIST_DONE_IRQ (1 << 16) -+#define IRQ0ENABLE_CCDC_LSC_DONE_IRQ (1 << 17) -+#define IRQ0ENABLE_CCDC_LSC_PREF_COMP_IRQ (1 << 18) -+#define IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ (1 << 19) -+#define IRQ0ENABLE_PRV_DONE_IRQ (1 << 20) -+#define IRQ0ENABLE_RSZ_DONE_IRQ (1 << 24) -+#define IRQ0ENABLE_OVF_IRQ (1 << 25) -+#define IRQ0ENABLE_PING_IRQ (1 << 26) -+#define IRQ0ENABLE_PONG_IRQ (1 << 27) -+#define IRQ0ENABLE_MMU_ERR_IRQ (1 << 28) -+#define IRQ0ENABLE_OCP_ERR_IRQ (1 << 29) -+#define IRQ0ENABLE_SEC_ERR_IRQ (1 << 30) -+#define IRQ0ENABLE_HS_VS_IRQ (1 << 31) -+ -+#define IRQ0STATUS_CSIA_IRQ 1 -+#define IRQ0STATUS_CSIA_LC1_IRQ (1 << 1) -+#define IRQ0STATUS_CSIA_LC2_IRQ (1 << 2) -+#define IRQ0STATUS_CSIA_LC3_IRQ (1 << 3) -+#define IRQ0STATUS_CSIB_IRQ (1 << 4) -+#define IRQ0STATUS_CSIB_LC1_IRQ (1 << 5) -+#define IRQ0STATUS_CSIB_LC2_IRQ (1 << 6) -+#define IRQ0STATUS_CSIB_LC3_IRQ (1 << 7) -+#define IRQ0STATUS_CCDC_VD0_IRQ (1 << 8) -+#define IRQ0STATUS_CCDC_VD1_IRQ (1 << 9) -+#define IRQ0STATUS_CCDC_VD2_IRQ (1 << 10) -+#define IRQ0STATUS_CCDC_ERR_IRQ (1 << 11) -+#define IRQ0STATUS_H3A_AF_DONE_IRQ (1 << 12) -+#define IRQ0STATUS_H3A_AWB_DONE_IRQ (1 << 13) -+#define IRQ0STATUS_HIST_DONE_IRQ (1 << 16) -+#define IRQ0STATUS_PRV_DONE_IRQ (1 << 20) -+#define IRQ0STATUS_RSZ_DONE_IRQ (1 << 24) -+#define IRQ0STATUS_OVF_IRQ (1 << 25) -+#define IRQ0STATUS_PING_IRQ (1 << 26) -+#define IRQ0STATUS_PONG_IRQ (1 << 27) -+#define IRQ0STATUS_MMU_ERR_IRQ (1 << 28) -+#define IRQ0STATUS_OCP_ERR_IRQ (1 << 29) -+#define IRQ0STATUS_SEC_ERR_IRQ (1 << 30) -+#define IRQ0STATUS_HS_VS_IRQ (1 << 31) -+ -+#define TCTRL_GRESET_LEN 0 -+ -+#define TCTRL_PSTRB_REPLAY_DELAY 0 -+#define TCTRL_PSTRB_REPLAY_COUNTER_SHIFT 25 -+ -+#define ISPCTRL_PAR_SER_CLK_SEL_PARALLEL 0x0 -+#define ISPCTRL_PAR_SER_CLK_SEL_CSIA 0x1 -+#define ISPCTRL_PAR_SER_CLK_SEL_CSIB 0x2 -+#define ISPCTRL_PAR_SER_CLK_SEL_MASK 0xFFFFFFFC -+ -+#define ISPCTRL_PAR_BRIDGE_SHIFT 2 -+#define ISPCTRL_PAR_BRIDGE_DISABLE (0x0 << 2) -+#define ISPCTRL_PAR_BRIDGE_LENDIAN (0x2 << 2) -+#define ISPCTRL_PAR_BRIDGE_BENDIAN (0x3 << 2) -+ -+#define ISPCTRL_PAR_CLK_POL_SHIFT 4 -+#define ISPCTRL_PAR_CLK_POL_INV (1 << 4) -+#define ISPCTRL_PING_PONG_EN (1 << 5) -+#define ISPCTRL_SHIFT_SHIFT 6 -+#define ISPCTRL_SHIFT_0 (0x0 << 6) -+#define ISPCTRL_SHIFT_2 (0x1 << 6) -+#define ISPCTRL_SHIFT_4 (0x2 << 6) -+#define ISPCTRL_SHIFT_MASK (~(0x3 << 6)) -+ -+#define ISPCTRL_CCDC_CLK_EN (1 << 8) -+#define ISPCTRL_SCMP_CLK_EN (1 << 9) -+#define ISPCTRL_H3A_CLK_EN (1 << 10) -+#define ISPCTRL_HIST_CLK_EN (1 << 11) -+#define ISPCTRL_PREV_CLK_EN (1 << 12) -+#define ISPCTRL_RSZ_CLK_EN (1 << 13) -+#define ISPCTRL_SYNC_DETECT_SHIFT 14 -+#define ISPCTRL_SYNC_DETECT_HSFALL (0x0 << ISPCTRL_SYNC_DETECT_SHIFT) -+#define ISPCTRL_SYNC_DETECT_HSRISE (0x1 << ISPCTRL_SYNC_DETECT_SHIFT) -+#define ISPCTRL_SYNC_DETECT_VSFALL (0x2 << ISPCTRL_SYNC_DETECT_SHIFT) -+#define ISPCTRL_SYNC_DETECT_VSRISE (0x3 << ISPCTRL_SYNC_DETECT_SHIFT) -+#define ISPCTRL_SYNC_DETECT_MASK (0x3 << ISPCTRL_SYNC_DETECT_SHIFT) -+ -+#define ISPCTRL_CCDC_RAM_EN (1 << 16) -+#define ISPCTRL_PREV_RAM_EN (1 << 17) -+#define ISPCTRL_SBL_RD_RAM_EN (1 << 18) -+#define ISPCTRL_SBL_WR1_RAM_EN (1 << 19) -+#define ISPCTRL_SBL_WR0_RAM_EN (1 << 20) -+#define ISPCTRL_SBL_AUTOIDLE (1 << 21) -+#define ISPCTRL_SBL_SHARED_RPORTB (1 << 28) -+#define ISPCTRL_JPEG_FLUSH (1 << 30) -+#define ISPCTRL_CCDC_FLUSH (1 << 31) -+ -+#define ISPSECURE_SECUREMODE 0 -+ -+#define ISPTCTRL_CTRL_DIV_LOW 0x0 -+#define ISPTCTRL_CTRL_DIV_HIGH 0x1 -+#define ISPTCTRL_CTRL_DIV_BYPASS 0x1F -+ -+#define ISPTCTRL_CTRL_DIVA_SHIFT 0 -+#define ISPTCTRL_CTRL_DIVA_MASK (0x1F << ISPTCTRL_CTRL_DIVA_SHIFT) -+ -+#define ISPTCTRL_CTRL_DIVB_SHIFT 5 -+#define ISPTCTRL_CTRL_DIVB_MASK (0x1F << ISPTCTRL_CTRL_DIVB_SHIFT) -+ -+#define ISPTCTRL_CTRL_DIVC_SHIFT 10 -+#define ISPTCTRL_CTRL_DIVC_NOCLOCK (0x0 << 10) -+ -+#define ISPTCTRL_CTRL_SHUTEN (1 << 21) -+#define ISPTCTRL_CTRL_PSTRBEN (1 << 22) -+#define ISPTCTRL_CTRL_STRBEN (1 << 23) -+#define ISPTCTRL_CTRL_SHUTPOL (1 << 24) -+#define ISPTCTRL_CTRL_STRBPSTRBPOL (1 << 26) -+ -+#define ISPTCTRL_CTRL_INSEL_SHIFT 27 -+#define ISPTCTRL_CTRL_INSEL_PARALLEL (0x0 << 27) -+#define ISPTCTRL_CTRL_INSEL_CSIA (0x1 << 27) -+#define ISPTCTRL_CTRL_INSEL_CSIB (0x2 << 27) -+ -+#define ISPTCTRL_CTRL_GRESETEn (1 << 29) -+#define ISPTCTRL_CTRL_GRESETPOL (1 << 30) -+#define ISPTCTRL_CTRL_GRESETDIR (1 << 31) -+ -+#define ISPTCTRL_FRAME_SHUT_SHIFT 0 -+#define ISPTCTRL_FRAME_PSTRB_SHIFT 6 -+#define ISPTCTRL_FRAME_STRB_SHIFT 12 -+ -+#define ISPCCDC_PID_PREV_SHIFT 0 -+#define ISPCCDC_PID_CID_SHIFT 8 -+#define ISPCCDC_PID_TID_SHIFT 16 -+ -+#define ISPCCDC_PCR_EN 1 -+#define ISPCCDC_PCR_BUSY (1 << 1) -+ -+#define ISPCCDC_SYN_MODE_VDHDOUT 0x1 -+#define ISPCCDC_SYN_MODE_FLDOUT (1 << 1) -+#define ISPCCDC_SYN_MODE_VDPOL (1 << 2) -+#define ISPCCDC_SYN_MODE_HDPOL (1 << 3) -+#define ISPCCDC_SYN_MODE_FLDPOL (1 << 4) -+#define ISPCCDC_SYN_MODE_EXWEN (1 << 5) -+#define ISPCCDC_SYN_MODE_DATAPOL (1 << 6) -+#define ISPCCDC_SYN_MODE_FLDMODE (1 << 7) -+#define ISPCCDC_SYN_MODE_DATSIZ_MASK 0xFFFFF8FF -+#define ISPCCDC_SYN_MODE_DATSIZ_8_16 (0x0 << 8) -+#define ISPCCDC_SYN_MODE_DATSIZ_12 (0x4 << 8) -+#define ISPCCDC_SYN_MODE_DATSIZ_11 (0x5 << 8) -+#define ISPCCDC_SYN_MODE_DATSIZ_10 (0x6 << 8) -+#define ISPCCDC_SYN_MODE_DATSIZ_8 (0x7 << 8) -+#define ISPCCDC_SYN_MODE_PACK8 (1 << 11) -+#define ISPCCDC_SYN_MODE_INPMOD_MASK 0xFFFFCFFF -+#define ISPCCDC_SYN_MODE_INPMOD_RAW (0 << 12) -+#define ISPCCDC_SYN_MODE_INPMOD_YCBCR16 (1 << 12) -+#define ISPCCDC_SYN_MODE_INPMOD_YCBCR8 (2 << 12) -+#define ISPCCDC_SYN_MODE_LPF (1 << 14) -+#define ISPCCDC_SYN_MODE_FLDSTAT (1 << 15) -+#define ISPCCDC_SYN_MODE_VDHDEN (1 << 16) -+#define ISPCCDC_SYN_MODE_WEN (1 << 17) -+#define ISPCCDC_SYN_MODE_VP2SDR (1 << 18) -+#define ISPCCDC_SYN_MODE_SDR2RSZ (1 << 19) -+ -+#define ISPCCDC_HD_VD_WID_VDW_SHIFT 0 -+#define ISPCCDC_HD_VD_WID_HDW_SHIFT 16 -+ -+#define ISPCCDC_PIX_LINES_HLPRF_SHIFT 0 -+#define ISPCCDC_PIX_LINES_PPLN_SHIFT 16 -+ -+#define ISPCCDC_HORZ_INFO_NPH_SHIFT 0 -+#define ISPCCDC_HORZ_INFO_NPH_MASK 0xFFFF8000 -+#define ISPCCDC_HORZ_INFO_SPH_MASK 0x1000FFFF -+#define ISPCCDC_HORZ_INFO_SPH_SHIFT 16 -+ -+#define ISPCCDC_VERT_START_SLV0_SHIFT 16 -+#define ISPCCDC_VERT_START_SLV0_MASK 0x1000FFFF -+#define ISPCCDC_VERT_START_SLV1_SHIFT 0 -+ -+#define ISPCCDC_VERT_LINES_NLV_MASK 0xFFFF8000 -+#define ISPCCDC_VERT_LINES_NLV_SHIFT 0 -+ -+#define ISPCCDC_CULLING_CULV_SHIFT 0 -+#define ISPCCDC_CULLING_CULHODD_SHIFT 16 -+#define ISPCCDC_CULLING_CULHEVN_SHIFT 24 -+ -+#define ISPCCDC_HSIZE_OFF_SHIFT 0 -+ -+#define ISPCCDC_SDOFST_FINV (1 << 14) -+#define ISPCCDC_SDOFST_FOFST_1L 0 -+#define ISPCCDC_SDOFST_FOFST_4L (3 << 12) -+#define ISPCCDC_SDOFST_LOFST3_SHIFT 0 -+#define ISPCCDC_SDOFST_LOFST2_SHIFT 3 -+#define ISPCCDC_SDOFST_LOFST1_SHIFT 6 -+#define ISPCCDC_SDOFST_LOFST0_SHIFT 9 -+#define EVENEVEN 1 -+#define ODDEVEN 2 -+#define EVENODD 3 -+#define ODDODD 4 -+ -+#define ISPCCDC_CLAMP_OBGAIN_SHIFT 0 -+#define ISPCCDC_CLAMP_OBST_SHIFT 10 -+#define ISPCCDC_CLAMP_OBSLN_SHIFT 25 -+#define ISPCCDC_CLAMP_OBSLEN_SHIFT 28 -+#define ISPCCDC_CLAMP_CLAMPEN (1 << 31) -+ -+#define ISPCCDC_COLPTN_R_Ye 0x0 -+#define ISPCCDC_COLPTN_Gr_Cy 0x1 -+#define ISPCCDC_COLPTN_Gb_G 0x2 -+#define ISPCCDC_COLPTN_B_Mg 0x3 -+#define ISPCCDC_COLPTN_CP0PLC0_SHIFT 0 -+#define ISPCCDC_COLPTN_CP0PLC1_SHIFT 2 -+#define ISPCCDC_COLPTN_CP0PLC2_SHIFT 4 -+#define ISPCCDC_COLPTN_CP0PLC3_SHIFT 6 -+#define ISPCCDC_COLPTN_CP1PLC0_SHIFT 8 -+#define ISPCCDC_COLPTN_CP1PLC1_SHIFT 10 -+#define ISPCCDC_COLPTN_CP1PLC2_SHIFT 12 -+#define ISPCCDC_COLPTN_CP1PLC3_SHIFT 14 -+#define ISPCCDC_COLPTN_CP2PLC0_SHIFT 16 -+#define ISPCCDC_COLPTN_CP2PLC1_SHIFT 18 -+#define ISPCCDC_COLPTN_CP2PLC2_SHIFT 20 -+#define ISPCCDC_COLPTN_CP2PLC3_SHIFT 22 -+#define ISPCCDC_COLPTN_CP3PLC0_SHIFT 24 -+#define ISPCCDC_COLPTN_CP3PLC1_SHIFT 26 -+#define ISPCCDC_COLPTN_CP3PLC2_SHIFT 28 -+#define ISPCCDC_COLPTN_CP3PLC3_SHIFT 30 -+ -+#define ISPCCDC_BLKCMP_B_MG_SHIFT 0 -+#define ISPCCDC_BLKCMP_GB_G_SHIFT 8 -+#define ISPCCDC_BLKCMP_GR_CY_SHIFT 16 -+#define ISPCCDC_BLKCMP_R_YE_SHIFT 24 -+ -+#define ISPCCDC_FPC_FPNUM_SHIFT 0 -+#define ISPCCDC_FPC_FPCEN (1 << 15) -+#define ISPCCDC_FPC_FPERR (1 << 16) -+ -+#define ISPCCDC_VDINT_1_SHIFT 0 -+#define ISPCCDC_VDINT_0_SHIFT 16 -+#define ISPCCDC_VDINT_0_MASK 0x7FFF -+#define ISPCCDC_VDINT_1_MASK 0x7FFF -+ -+#define ISPCCDC_ALAW_GWDI_SHIFT 0 -+#define ISPCCDC_ALAW_CCDTBL (1 << 3) -+ -+#define ISPCCDC_REC656IF_R656ON 1 -+#define ISPCCDC_REC656IF_ECCFVH (1 << 1) -+ -+#define ISPCCDC_CFG_BW656 (1 << 5) -+#define ISPCCDC_CFG_FIDMD_SHIFT 6 -+#define ISPCCDC_CFG_WENLOG (1 << 8) -+#define ISPCCDC_CFG_WENLOG_AND (0 << 8) -+#define ISPCCDC_CFG_WENLOG_OR (1 << 8) -+#define ISPCCDC_CFG_Y8POS (1 << 11) -+#define ISPCCDC_CFG_BSWD (1 << 12) -+#define ISPCCDC_CFG_MSBINVI (1 << 13) -+#define ISPCCDC_CFG_VDLC (1 << 15) -+ -+#define ISPCCDC_FMTCFG_FMTEN 0x1 -+#define ISPCCDC_FMTCFG_LNALT (1 << 1) -+#define ISPCCDC_FMTCFG_LNUM_SHIFT 2 -+#define ISPCCDC_FMTCFG_PLEN_ODD_SHIFT 4 -+#define ISPCCDC_FMTCFG_PLEN_EVEN_SHIFT 8 -+#define ISPCCDC_FMTCFG_VPIN_MASK 0xFFFF8000 -+#define ISPCCDC_FMTCFG_VPIN_12_3 (0x3 << 12) -+#define ISPCCDC_FMTCFG_VPIN_11_2 (0x4 << 12) -+#define ISPCCDC_FMTCFG_VPIN_10_1 (0x5 << 12) -+#define ISPCCDC_FMTCFG_VPIN_9_0 (0x6 << 12) -+#define ISPCCDC_FMTCFG_VPEN (1 << 15) -+ -+#define ISPCCDC_FMTCFG_VPIF_FRQ_MASK 0xFFF8FFFF -+#define ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT 16 -+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY2 (0x0 << 16) -+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY3 (0x1 << 16) -+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY4 (0x2 << 16) -+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY5 (0x3 << 16) -+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY6 (0x4 << 16) -+ -+#define ISPCCDC_FMT_HORZ_FMTLNH_SHIFT 0 -+#define ISPCCDC_FMT_HORZ_FMTSPH_SHIFT 16 -+ -+#define ISPCCDC_FMT_VERT_FMTLNV_SHIFT 0 -+#define ISPCCDC_FMT_VERT_FMTSLV_SHIFT 16 -+ -+#define ISPCCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF0000 -+#define ISPCCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF -+ -+#define ISPCCDC_FMT_VERT_FMTSLV_MASK 0x1FFF0000 -+#define ISPCCDC_FMT_VERT_FMTLNV_MASK 0x1FFF -+ -+#define ISPCCDC_VP_OUT_HORZ_ST_SHIFT 0 -+#define ISPCCDC_VP_OUT_HORZ_NUM_SHIFT 4 -+#define ISPCCDC_VP_OUT_VERT_NUM_SHIFT 17 -+ -+#define ISPRSZ_PID_PREV_SHIFT 0 -+#define ISPRSZ_PID_CID_SHIFT 8 -+#define ISPRSZ_PID_TID_SHIFT 16 -+ -+#define ISPRSZ_PCR_ENABLE 0x5 -+#define ISPRSZ_PCR_BUSY (1 << 1) -+ -+#define ISPRSZ_CNT_HRSZ_SHIFT 0 -+#define ISPRSZ_CNT_HRSZ_MASK 0x3FF -+#define ISPRSZ_CNT_VRSZ_SHIFT 10 -+#define ISPRSZ_CNT_VRSZ_MASK 0xFFC00 -+#define ISPRSZ_CNT_HSTPH_SHIFT 20 -+#define ISPRSZ_CNT_HSTPH_MASK 0x700000 -+#define ISPRSZ_CNT_VSTPH_SHIFT 23 -+#define ISPRSZ_CNT_VSTPH_MASK 0x3800000 -+#define ISPRSZ_CNT_CBILIN_MASK 0x20000000 -+#define ISPRSZ_CNT_INPTYP_MASK 0x08000000 -+#define ISPRSZ_CNT_PIXFMT_MASK 0x04000000 -+#define ISPRSZ_CNT_YCPOS (1 << 26) -+#define ISPRSZ_CNT_INPTYP (1 << 27) -+#define ISPRSZ_CNT_INPSRC (1 << 28) -+#define ISPRSZ_CNT_CBILIN (1 << 29) -+ -+#define ISPRSZ_OUT_SIZE_HORZ_SHIFT 0 -+#define ISPRSZ_OUT_SIZE_HORZ_MASK 0x7FF -+#define ISPRSZ_OUT_SIZE_VERT_SHIFT 16 -+#define ISPRSZ_OUT_SIZE_VERT_MASK 0x7FF0000 -+ -+ -+#define ISPRSZ_IN_START_HORZ_ST_SHIFT 0 -+#define ISPRSZ_IN_START_HORZ_ST_MASK 0x1FFF -+#define ISPRSZ_IN_START_VERT_ST_SHIFT 16 -+#define ISPRSZ_IN_START_VERT_ST_MASK 0x1FFF0000 -+ -+ -+#define ISPRSZ_IN_SIZE_HORZ_SHIFT 0 -+#define ISPRSZ_IN_SIZE_HORZ_MASK 0x1FFF -+#define ISPRSZ_IN_SIZE_VERT_SHIFT 16 -+#define ISPRSZ_IN_SIZE_VERT_MASK 0x1FFF0000 -+ -+#define ISPRSZ_SDR_INADD_ADDR_SHIFT 0 -+#define ISPRSZ_SDR_INADD_ADDR_MASK 0xFFFFFFFF -+ -+#define ISPRSZ_SDR_INOFF_OFFSET_SHIFT 0 -+#define ISPRSZ_SDR_INOFF_OFFSET_MASK 0xFFFF -+ -+#define ISPRSZ_SDR_OUTADD_ADDR_SHIFT 0 -+#define ISPRSZ_SDR_OUTADD_ADDR_MASK 0xFFFFFFFF -+ -+ -+#define ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT 0 -+#define ISPRSZ_SDR_OUTOFF_OFFSET_MASK 0xFFFF -+ -+#define ISPRSZ_HFILT10_COEF0_SHIFT 0 -+#define ISPRSZ_HFILT10_COEF0_MASK 0x3FF -+#define ISPRSZ_HFILT10_COEF1_SHIFT 16 -+#define ISPRSZ_HFILT10_COEF1_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT32_COEF2_SHIFT 0 -+#define ISPRSZ_HFILT32_COEF2_MASK 0x3FF -+#define ISPRSZ_HFILT32_COEF3_SHIFT 16 -+#define ISPRSZ_HFILT32_COEF3_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT54_COEF4_SHIFT 0 -+#define ISPRSZ_HFILT54_COEF4_MASK 0x3FF -+#define ISPRSZ_HFILT54_COEF5_SHIFT 16 -+#define ISPRSZ_HFILT54_COEF5_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT76_COEFF6_SHIFT 0 -+#define ISPRSZ_HFILT76_COEFF6_MASK 0x3FF -+#define ISPRSZ_HFILT76_COEFF7_SHIFT 16 -+#define ISPRSZ_HFILT76_COEFF7_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT98_COEFF8_SHIFT 0 -+#define ISPRSZ_HFILT98_COEFF8_MASK 0x3FF -+#define ISPRSZ_HFILT98_COEFF9_SHIFT 16 -+#define ISPRSZ_HFILT98_COEFF9_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT1110_COEF10_SHIFT 0 -+#define ISPRSZ_HFILT1110_COEF10_MASK 0x3FF -+#define ISPRSZ_HFILT1110_COEF11_SHIFT 16 -+#define ISPRSZ_HFILT1110_COEF11_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT1312_COEFF12_SHIFT 0 -+#define ISPRSZ_HFILT1312_COEFF12_MASK 0x3FF -+#define ISPRSZ_HFILT1312_COEFF13_SHIFT 16 -+#define ISPRSZ_HFILT1312_COEFF13_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT1514_COEFF14_SHIFT 0 -+#define ISPRSZ_HFILT1514_COEFF14_MASK 0x3FF -+#define ISPRSZ_HFILT1514_COEFF15_SHIFT 16 -+#define ISPRSZ_HFILT1514_COEFF15_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT1716_COEF16_SHIFT 0 -+#define ISPRSZ_HFILT1716_COEF16_MASK 0x3FF -+#define ISPRSZ_HFILT1716_COEF17_SHIFT 16 -+#define ISPRSZ_HFILT1716_COEF17_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT1918_COEF18_SHIFT 0 -+#define ISPRSZ_HFILT1918_COEF18_MASK 0x3FF -+#define ISPRSZ_HFILT1918_COEF19_SHIFT 16 -+#define ISPRSZ_HFILT1918_COEF19_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT2120_COEF20_SHIFT 0 -+#define ISPRSZ_HFILT2120_COEF20_MASK 0x3FF -+#define ISPRSZ_HFILT2120_COEF21_SHIFT 16 -+#define ISPRSZ_HFILT2120_COEF21_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT2322_COEF22_SHIFT 0 -+#define ISPRSZ_HFILT2322_COEF22_MASK 0x3FF -+#define ISPRSZ_HFILT2322_COEF23_SHIFT 16 -+#define ISPRSZ_HFILT2322_COEF23_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT2524_COEF24_SHIFT 0 -+#define ISPRSZ_HFILT2524_COEF24_MASK 0x3FF -+#define ISPRSZ_HFILT2524_COEF25_SHIFT 16 -+#define ISPRSZ_HFILT2524_COEF25_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT2726_COEF26_SHIFT 0 -+#define ISPRSZ_HFILT2726_COEF26_MASK 0x3FF -+#define ISPRSZ_HFILT2726_COEF27_SHIFT 16 -+#define ISPRSZ_HFILT2726_COEF27_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT2928_COEF28_SHIFT 0 -+#define ISPRSZ_HFILT2928_COEF28_MASK 0x3FF -+#define ISPRSZ_HFILT2928_COEF29_SHIFT 16 -+#define ISPRSZ_HFILT2928_COEF29_MASK 0x3FF0000 -+ -+#define ISPRSZ_HFILT3130_COEF30_SHIFT 0 -+#define ISPRSZ_HFILT3130_COEF30_MASK 0x3FF -+#define ISPRSZ_HFILT3130_COEF31_SHIFT 16 -+#define ISPRSZ_HFILT3130_COEF31_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT10_COEF0_SHIFT 0 -+#define ISPRSZ_VFILT10_COEF0_MASK 0x3FF -+#define ISPRSZ_VFILT10_COEF1_SHIFT 16 -+#define ISPRSZ_VFILT10_COEF1_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT32_COEF2_SHIFT 0 -+#define ISPRSZ_VFILT32_COEF2_MASK 0x3FF -+#define ISPRSZ_VFILT32_COEF3_SHIFT 16 -+#define ISPRSZ_VFILT32_COEF3_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT54_COEF4_SHIFT 0 -+#define ISPRSZ_VFILT54_COEF4_MASK 0x3FF -+#define ISPRSZ_VFILT54_COEF5_SHIFT 16 -+#define ISPRSZ_VFILT54_COEF5_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT76_COEFF6_SHIFT 0 -+#define ISPRSZ_VFILT76_COEFF6_MASK 0x3FF -+#define ISPRSZ_VFILT76_COEFF7_SHIFT 16 -+#define ISPRSZ_VFILT76_COEFF7_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT98_COEFF8_SHIFT 0 -+#define ISPRSZ_VFILT98_COEFF8_MASK 0x3FF -+#define ISPRSZ_VFILT98_COEFF9_SHIFT 16 -+#define ISPRSZ_VFILT98_COEFF9_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT1110_COEF10_SHIFT 0 -+#define ISPRSZ_VFILT1110_COEF10_MASK 0x3FF -+#define ISPRSZ_VFILT1110_COEF11_SHIFT 16 -+#define ISPRSZ_VFILT1110_COEF11_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT1312_COEFF12_SHIFT 0 -+#define ISPRSZ_VFILT1312_COEFF12_MASK 0x3FF -+#define ISPRSZ_VFILT1312_COEFF13_SHIFT 16 -+#define ISPRSZ_VFILT1312_COEFF13_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT1514_COEFF14_SHIFT 0 -+#define ISPRSZ_VFILT1514_COEFF14_MASK 0x3FF -+#define ISPRSZ_VFILT1514_COEFF15_SHIFT 16 -+#define ISPRSZ_VFILT1514_COEFF15_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT1716_COEF16_SHIFT 0 -+#define ISPRSZ_VFILT1716_COEF16_MASK 0x3FF -+#define ISPRSZ_VFILT1716_COEF17_SHIFT 16 -+#define ISPRSZ_VFILT1716_COEF17_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT1918_COEF18_SHIFT 0 -+#define ISPRSZ_VFILT1918_COEF18_MASK 0x3FF -+#define ISPRSZ_VFILT1918_COEF19_SHIFT 16 -+#define ISPRSZ_VFILT1918_COEF19_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT2120_COEF20_SHIFT 0 -+#define ISPRSZ_VFILT2120_COEF20_MASK 0x3FF -+#define ISPRSZ_VFILT2120_COEF21_SHIFT 16 -+#define ISPRSZ_VFILT2120_COEF21_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT2322_COEF22_SHIFT 0 -+#define ISPRSZ_VFILT2322_COEF22_MASK 0x3FF -+#define ISPRSZ_VFILT2322_COEF23_SHIFT 16 -+#define ISPRSZ_VFILT2322_COEF23_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT2524_COEF24_SHIFT 0 -+#define ISPRSZ_VFILT2524_COEF24_MASK 0x3FF -+#define ISPRSZ_VFILT2524_COEF25_SHIFT 16 -+#define ISPRSZ_VFILT2524_COEF25_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT2726_COEF26_SHIFT 0 -+#define ISPRSZ_VFILT2726_COEF26_MASK 0x3FF -+#define ISPRSZ_VFILT2726_COEF27_SHIFT 16 -+#define ISPRSZ_VFILT2726_COEF27_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT2928_COEF28_SHIFT 0 -+#define ISPRSZ_VFILT2928_COEF28_MASK 0x3FF -+#define ISPRSZ_VFILT2928_COEF29_SHIFT 16 -+#define ISPRSZ_VFILT2928_COEF29_MASK 0x3FF0000 -+ -+#define ISPRSZ_VFILT3130_COEF30_SHIFT 0 -+#define ISPRSZ_VFILT3130_COEF30_MASK 0x3FF -+#define ISPRSZ_VFILT3130_COEF31_SHIFT 16 -+#define ISPRSZ_VFILT3130_COEF31_MASK 0x3FF0000 -+ -+#define ISPRSZ_YENH_CORE_SHIFT 0 -+#define ISPRSZ_YENH_CORE_MASK 0xFF -+#define ISPRSZ_YENH_SLOP_SHIFT 8 -+#define ISPRSZ_YENH_SLOP_MASK 0xF00 -+#define ISPRSZ_YENH_GAIN_SHIFT 12 -+#define ISPRSZ_YENH_GAIN_MASK 0xF000 -+#define ISPRSZ_YENH_ALGO_SHIFT 16 -+#define ISPRSZ_YENH_ALGO_MASK 0x30000 -+ -+#define ISPH3A_PCR_AEW_ALAW_EN_SHIFT 1 -+#define ISPH3A_PCR_AF_MED_TH_SHIFT 3 -+#define ISPH3A_PCR_AF_RGBPOS_SHIFT 11 -+#define ISPH3A_PCR_AEW_AVE2LMT_SHIFT 22 -+#define ISPH3A_PCR_AEW_AVE2LMT_MASK 0xFFC00000 -+#define ISPH3A_PCR_BUSYAF (1 << 15) -+#define ISPH3A_PCR_BUSYAEAWB (1 << 18) -+ -+#define ISPH3A_AEWWIN1_WINHC_SHIFT 0 -+#define ISPH3A_AEWWIN1_WINHC_MASK 0x3F -+#define ISPH3A_AEWWIN1_WINVC_SHIFT 6 -+#define ISPH3A_AEWWIN1_WINVC_MASK 0x1FC0 -+#define ISPH3A_AEWWIN1_WINW_SHIFT 13 -+#define ISPH3A_AEWWIN1_WINW_MASK 0xFE000 -+#define ISPH3A_AEWWIN1_WINH_SHIFT 24 -+#define ISPH3A_AEWWIN1_WINH_MASK 0x7F000000 -+ -+#define ISPH3A_AEWINSTART_WINSH_SHIFT 0 -+#define ISPH3A_AEWINSTART_WINSH_MASK 0x0FFF -+#define ISPH3A_AEWINSTART_WINSV_SHIFT 16 -+#define ISPH3A_AEWINSTART_WINSV_MASK 0x0FFF0000 -+ -+#define ISPH3A_AEWINBLK_WINH_SHIFT 0 -+#define ISPH3A_AEWINBLK_WINH_MASK 0x7F -+#define ISPH3A_AEWINBLK_WINSV_SHIFT 16 -+#define ISPH3A_AEWINBLK_WINSV_MASK 0x0FFF0000 -+ -+#define ISPH3A_AEWSUBWIN_AEWINCH_SHIFT 0 -+#define ISPH3A_AEWSUBWIN_AEWINCH_MASK 0x0F -+#define ISPH3A_AEWSUBWIN_AEWINCV_SHIFT 8 -+#define ISPH3A_AEWSUBWIN_AEWINCV_MASK 0x0F00 -+ -+#define ISPHIST_PCR_ENABLE_SHIFT 0 -+#define ISPHIST_PCR_ENABLE_MASK 0x01 -+#define ISPHIST_PCR_BUSY 0x02 -+ -+#define ISPHIST_CNT_DATASIZE_SHIFT 8 -+#define ISPHIST_CNT_DATASIZE_MASK 0x0100 -+#define ISPHIST_CNT_CLEAR_SHIFT 7 -+#define ISPHIST_CNT_CLEAR_MASK 0x080 -+#define ISPHIST_CNT_CFA_SHIFT 6 -+#define ISPHIST_CNT_CFA_MASK 0x040 -+#define ISPHIST_CNT_BINS_SHIFT 4 -+#define ISPHIST_CNT_BINS_MASK 0x030 -+#define ISPHIST_CNT_SOURCE_SHIFT 3 -+#define ISPHIST_CNT_SOURCE_MASK 0x08 -+#define ISPHIST_CNT_SHIFT_SHIFT 0 -+#define ISPHIST_CNT_SHIFT_MASK 0x07 -+ -+#define ISPHIST_WB_GAIN_WG00_SHIFT 24 -+#define ISPHIST_WB_GAIN_WG00_MASK 0xFF000000 -+#define ISPHIST_WB_GAIN_WG01_SHIFT 16 -+#define ISPHIST_WB_GAIN_WG01_MASK 0xFF0000 -+#define ISPHIST_WB_GAIN_WG02_SHIFT 8 -+#define ISPHIST_WB_GAIN_WG02_MASK 0xFF00 -+#define ISPHIST_WB_GAIN_WG03_SHIFT 0 -+#define ISPHIST_WB_GAIN_WG03_MASK 0xFF -+ -+#define ISPHIST_REGHORIZ_HSTART_SHIFT 16 /* -+ * REGION 0 to 3 HORZ -+ * and VERT -+ */ -+#define ISPHIST_REGHORIZ_HSTART_MASK 0x3FFF0000 -+#define ISPHIST_REGHORIZ_HEND_SHIFT 0 -+#define ISPHIST_REGHORIZ_HEND_MASK 0x3FFF -+#define ISPHIST_REGVERT_VSTART_SHIFT 16 -+#define ISPHIST_REGVERT_VSTART_MASK 0x3FFF0000 -+#define ISPHIST_REGVERT_VEND_SHIFT 0 -+#define ISPHIST_REGVERT_VEND_MASK 0x3FFF -+ -+#define ISPHIST_REGHORIZ_MASK 0x3FFF3FFF -+#define ISPHIST_REGVERT_MASK 0x3FFF3FFF -+ -+#define ISPHIST_ADDR_SHIFT 0 -+#define ISPHIST_ADDR_MASK 0x3FF -+ -+#define ISPHIST_DATA_SHIFT 0 -+#define ISPHIST_DATA_MASK 0xFFFFF -+ -+#define ISPHIST_RADD_SHIFT 0 -+#define ISPHIST_RADD_MASK 0xFFFFFFFF -+ -+#define ISPHIST_RADD_OFF_SHIFT 0 -+#define ISPHIST_RADD_OFF_MASK 0xFFFF -+ -+#define ISPHIST_HV_INFO_HSIZE_SHIFT 16 -+#define ISPHIST_HV_INFO_HSIZE_MASK 0x3FFF0000 -+#define ISPHIST_HV_INFO_VSIZE_SHIFT 0 -+#define ISPHIST_HV_INFO_VSIZE_MASK 0x3FFF -+ -+#define ISPHIST_HV_INFO_MASK 0x3FFF3FFF -+ -+#define ISPCCDC_LSC_ENABLE 1 -+#define ISPCCDC_LSC_GAIN_MODE_N_MASK 0x700 -+#define ISPCCDC_LSC_GAIN_MODE_N_SHIFT 8 -+#define ISPCCDC_LSC_GAIN_MODE_M_MASK 0x3800 -+#define ISPCCDC_LSC_GAIN_MODE_M_SHIFT 12 -+#define ISPCCDC_LSC_GAIN_FORMAT_MASK 0xE -+#define ISPCCDC_LSC_GAIN_FORMAT_SHIFT 1 -+#define ISPCCDC_LSC_AFTER_REFORMATTER_MASK (1<<6) -+ -+#define ISPCCDC_LSC_INITIAL_X_MASK 0x3F -+#define ISPCCDC_LSC_INITIAL_X_SHIFT 0 -+#define ISPCCDC_LSC_INITIAL_Y_MASK 0x3F0000 -+#define ISPCCDC_LSC_INITIAL_Y_SHIFT 16 -+ -+#define ISPMMU_REVISION_REV_MINOR_MASK 0xF -+#define ISPMMU_REVISION_REV_MAJOR_SHIFT 0x4 -+ -+#define IRQENABLE_MULTIHITFAULT (1<<4) -+#define IRQENABLE_TWFAULT (1<<3) -+#define IRQENABLE_EMUMISS (1<<2) -+#define IRQENABLE_TRANSLNFAULT (1<<1) -+#define IRQENABLE_TLBMISS (1) -+ -+#define ISPMMU_MMUCNTL_MMU_EN (1<<1) -+#define ISPMMU_MMUCNTL_TWL_EN (1<<2) -+#define ISPMMU_MMUCNTL_EMUTLBUPDATE (1<<3) -+#define ISPMMU_AUTOIDLE 0x1 -+#define ISPMMU_SIDLEMODE_FORCEIDLE 0 -+#define ISPMMU_SIDLEMODE_NOIDLE 1 -+#define ISPMMU_SIDLEMODE_SMARTIDLE 2 -+#define ISPMMU_SIDLEMODE_SHIFT 3 -+ -+#define ISPCSI1_AUTOIDLE 0x1 -+#define ISPCSI1_MIDLEMODE_SHIFT 12 -+#define ISPCSI1_MIDLEMODE_FORCESTANDBY 0x0 -+#define ISPCSI1_MIDLEMODE_NOSTANDBY 0x1 -+#define ISPCSI1_MIDLEMODE_SMARTSTANDBY 0x2 -+ -+/* CSI2 receiver registers (ES2.0) */ -+#define ISPCSI2_REVISION (0x000) -+#define ISPCSI2_SYSCONFIG (0x010) -+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT 12 -+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK \ -+ (0x3 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT) -+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_FORCE \ -+ (0x0 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT) -+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO \ -+ (0x1 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT) -+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART \ -+ (0x2 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT) -+#define ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT 1 -+#define ISPCSI2_SYSCONFIG_SOFT_RESET_MASK \ -+ (0x1 << ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT) -+#define ISPCSI2_SYSCONFIG_SOFT_RESET_NORMAL \ -+ (0x0 << ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT) -+#define ISPCSI2_SYSCONFIG_SOFT_RESET_RESET \ -+ (0x1 << ISPCSI2_SYSCONFIG_SOFT_RESET_SHIFT) -+#define ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT 0 -+#define ISPCSI2_SYSCONFIG_AUTO_IDLE_MASK \ -+ (0x1 << ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT) -+#define ISPCSI2_SYSCONFIG_AUTO_IDLE_FREE \ -+ (0x0 << ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT) -+#define ISPCSI2_SYSCONFIG_AUTO_IDLE_AUTO \ -+ (0x1 << ISPCSI2_SYSCONFIG_AUTO_IDLE_SHIFT) -+#define ISPCSI2_SYSSTATUS (0x014) -+#define ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT 0 -+#define ISPCSI2_SYSSTATUS_RESET_DONE_MASK \ -+ (0x1 << ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT) -+#define ISPCSI2_SYSSTATUS_RESET_DONE_ONGOING \ -+ (0x0 << ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT) -+#define ISPCSI2_SYSSTATUS_RESET_DONE_DONE \ -+ (0x1 << ISPCSI2_SYSSTATUS_RESET_DONE_SHIFT) -+#define ISPCSI2_IRQSTATUS (0x018) -+#define ISPCSI2_IRQSTATUS_OCP_ERR_IRQ (1 << 14) -+#define ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ (1 << 13) -+#define ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 12) -+#define ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ (1 << 11) -+#define ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ (1 << 10) -+#define ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ (1 << 9) -+#define ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ (1 << 8) -+#define ISPCSI2_IRQSTATUS_CONTEXT(n) (1 << (n)) -+ -+#define ISPCSI2_IRQENABLE (0x01C) -+#define ISPCSI2_CTRL (0x040) -+#define ISPCSI2_CTRL_VP_CLK_EN_SHIFT 15 -+#define ISPCSI2_CTRL_VP_CLK_EN_MASK (0x1 << ISPCSI2_CTRL_VP_CLK_EN_SHIFT) -+#define ISPCSI2_CTRL_VP_CLK_EN_DISABLE (0x0 << ISPCSI2_CTRL_VP_CLK_EN_SHIFT) -+#define ISPCSI2_CTRL_VP_CLK_EN_ENABLE (0x1 << ISPCSI2_CTRL_VP_CLK_EN_SHIFT) -+ -+#define ISPCSI2_CTRL_VP_ONLY_EN_SHIFT 11 -+#define ISPCSI2_CTRL_VP_ONLY_EN_MASK (0x1 << ISPCSI2_CTRL_VP_ONLY_EN_SHIFT) -+#define ISPCSI2_CTRL_VP_ONLY_EN_DISABLE (0x0 << ISPCSI2_CTRL_VP_ONLY_EN_SHIFT) -+#define ISPCSI2_CTRL_VP_ONLY_EN_ENABLE (0x1 << ISPCSI2_CTRL_VP_ONLY_EN_SHIFT) -+ -+#define ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT 8 -+#define ISPCSI2_CTRL_VP_OUT_CTRL_MASK (0x3 << \ -+ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT) -+#define ISPCSI2_CTRL_VP_OUT_CTRL_DISABLE (0x0 << \ -+ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT) -+#define ISPCSI2_CTRL_VP_OUT_CTRL_DIV2 (0x1 << \ -+ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT) -+#define ISPCSI2_CTRL_VP_OUT_CTRL_DIV3 (0x2 << \ -+ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT) -+#define ISPCSI2_CTRL_VP_OUT_CTRL_DIV4 (0x3 << \ -+ ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT) -+ -+#define ISPCSI2_CTRL_DBG_EN_SHIFT 7 -+#define ISPCSI2_CTRL_DBG_EN_MASK (0x1 << ISPCSI2_CTRL_DBG_EN_SHIFT) -+#define ISPCSI2_CTRL_DBG_EN_DISABLE (0x0 << ISPCSI2_CTRL_DBG_EN_SHIFT) -+#define ISPCSI2_CTRL_DBG_EN_ENABLE (0x1 << ISPCSI2_CTRL_DBG_EN_SHIFT) -+ -+#define ISPCSI2_CTRL_BURST_SIZE_SHIFT 5 -+#define ISPCSI2_CTRL_BURST_SIZE_MASK (0x3 << \ -+ ISPCSI2_CTRL_BURST_SIZE_SHIFT) -+#define ISPCSI2_CTRL_BURST_SIZE_MYSTERY_VAL (0x2 << \ -+ ISPCSI2_CTRL_BURST_SIZE_SHIFT) -+ -+#define ISPCSI2_CTRL_FRAME_SHIFT 3 -+#define ISPCSI2_CTRL_FRAME_MASK (0x1 << ISPCSI2_CTRL_FRAME_SHIFT) -+#define ISPCSI2_CTRL_FRAME_DISABLE_IMM (0x0 << ISPCSI2_CTRL_FRAME_SHIFT) -+#define ISPCSI2_CTRL_FRAME_DISABLE_FEC (0x1 << ISPCSI2_CTRL_FRAME_SHIFT) -+ -+#define ISPCSI2_CTRL_ECC_EN_SHIFT 2 -+#define ISPCSI2_CTRL_ECC_EN_MASK (0x1 << ISPCSI2_CTRL_ECC_EN_SHIFT) -+#define ISPCSI2_CTRL_ECC_EN_DISABLE (0x0 << ISPCSI2_CTRL_ECC_EN_SHIFT) -+#define ISPCSI2_CTRL_ECC_EN_ENABLE (0x1 << ISPCSI2_CTRL_ECC_EN_SHIFT) -+ -+#define ISPCSI2_CTRL_SECURE_SHIFT 1 -+#define ISPCSI2_CTRL_SECURE_MASK (0x1 << ISPCSI2_CTRL_SECURE_SHIFT) -+#define ISPCSI2_CTRL_SECURE_DISABLE (0x0 << ISPCSI2_CTRL_SECURE_SHIFT) -+#define ISPCSI2_CTRL_SECURE_ENABLE (0x1 << ISPCSI2_CTRL_SECURE_SHIFT) -+ -+#define ISPCSI2_CTRL_IF_EN_SHIFT 0 -+#define ISPCSI2_CTRL_IF_EN_MASK (0x1 << ISPCSI2_CTRL_IF_EN_SHIFT) -+#define ISPCSI2_CTRL_IF_EN_DISABLE (0x0 << ISPCSI2_CTRL_IF_EN_SHIFT) -+#define ISPCSI2_CTRL_IF_EN_ENABLE (0x1 << ISPCSI2_CTRL_IF_EN_SHIFT) -+ -+#define ISPCSI2_DBG_H (0x044) -+#define ISPCSI2_GNQ (0x048) -+#define ISPCSI2_COMPLEXIO_CFG1 (0x050) -+#define ISPCSI2_COMPLEXIO_CFG1_RESET_DONE_SHIFT 29 -+#define ISPCSI2_COMPLEXIO_CFG1_RESET_DONE_MASK \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_RESET_DONE_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_RESET_DONE_ONGOING \ -+ (0x0 << ISPCSI2_COMPLEXIO_CFG1_RESET_DONE_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_RESET_DONE_DONE \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_RESET_DONE_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_SHIFT 27 -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_MASK \ -+ (0x3 << ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_OFF \ -+ (0x0 << ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_ON \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_ULPW \ -+ (0x2 << ISPCSI2_COMPLEXIO_CFG1_PWR_CMD_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_SHIFT 25 -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_MASK \ -+ (0x3 << ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_OFF \ -+ (0x0 << ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_ON \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_ULPW \ -+ (0x2 << ISPCSI2_COMPLEXIO_CFG1_PWR_STATUS_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_SHIFT 24 -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_MASK \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_DISABLE \ -+ (0x0 << ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_ENABLE \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_PWR_AUTO_SHIFT) -+ -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POL_SHIFT(n) (3 + ((n) * 4)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POL_MASK(n) \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_DATA_POL_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POL_PN(n) \ -+ (0x0 << ISPCSI2_COMPLEXIO_CFG1_DATA_POL_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POL_NP(n) \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_DATA_POL_SHIFT(n)) -+ -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n) ((n) * 4) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_MASK(n) \ -+ (0x7 << ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_NC(n) \ -+ (0x0 << ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_1(n) \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_2(n) \ -+ (0x2 << ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_3(n) \ -+ (0x3 << ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_4(n) \ -+ (0x4 << ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n)) -+#define ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_5(n) \ -+ (0x5 << ISPCSI2_COMPLEXIO_CFG1_DATA_POSITION_SHIFT(n)) -+ -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_SHIFT 3 -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_MASK \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_PN \ -+ (0x0 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_NP \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POL_SHIFT) -+ -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT 0 -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_MASK \ -+ (0x7 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_1 \ -+ (0x1 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_2 \ -+ (0x2 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_3 \ -+ (0x3 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_4 \ -+ (0x4 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT) -+#define ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_5 \ -+ (0x5 << ISPCSI2_COMPLEXIO_CFG1_CLOCK_POSITION_SHIFT) -+ -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS (0x054) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_STATEALLULPMEXIT (1 << 26) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_STATEALLULPMENTER (1 << 25) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_STATEULPM5 (1 << 24) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_STATEULPM4 (1 << 23) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_STATEULPM3 (1 << 22) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_STATEULPM2 (1 << 21) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_STATEULPM1 (1 << 20) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRCONTROL5 (1 << 19) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRCONTROL4 (1 << 18) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRCONTROL3 (1 << 17) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRCONTROL2 (1 << 16) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRCONTROL1 (1 << 15) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRESC5 (1 << 14) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRESC4 (1 << 13) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRESC3 (1 << 12) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRESC2 (1 << 11) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRESC1 (1 << 10) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTSYNCHS5 (1 << 9) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTSYNCHS4 (1 << 8) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTSYNCHS3 (1 << 7) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTSYNCHS2 (1 << 6) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTSYNCHS1 (1 << 5) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTHS5 (1 << 4) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTHS4 (1 << 3) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTHS3 (1 << 2) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTHS2 (1 << 1) -+#define ISPCSI2_COMPLEXIO1_IRQSTATUS_ERRSOTHS1 1 -+ -+#define ISPCSI2_SHORT_PACKET (0x05C) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE (0x060) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_STATEALLULPMEXIT (1 << 26) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_STATEALLULPMENTER (1 << 25) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM5 (1 << 24) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM4 (1 << 23) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM3 (1 << 22) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM2 (1 << 21) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_STATEULPM1 (1 << 20) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL5 (1 << 19) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL4 (1 << 18) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL3 (1 << 17) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL2 (1 << 16) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRCONTROL1 (1 << 15) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC5 (1 << 14) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC4 (1 << 13) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC3 (1 << 12) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC2 (1 << 11) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRESC1 (1 << 10) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS5 (1 << 9) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS4 (1 << 8) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS3 (1 << 7) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS2 (1 << 6) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTSYNCHS1 (1 << 5) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS5 (1 << 4) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS4 (1 << 3) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS3 (1 << 2) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS2 (1 << 1) -+#define ISPCSI2_COMPLEXIO1_IRQENABLE_ERRSOTHS1 1 -+#define ISPCSI2_DBG_P (0x068) -+#define ISPCSI2_TIMING (0x06C) -+ -+ -+#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n) \ -+ ((16 * ((n) - 1)) + 15) -+#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_MASK(n) \ -+ (0x1 << ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_DISABLE(n) \ -+ (0x0 << ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_FORCE_RX_MODE_IO_ENABLE(n) \ -+ (0x1 << ISPCSI2_TIMING_FORCE_RX_MODE_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n) ((16 * ((n) - 1)) + 14) -+#define ISPCSI2_TIMING_STOP_STATE_X16_IO_MASK(n) \ -+ (0x1 << ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_STOP_STATE_X16_IO_DISABLE(n) \ -+ (0x0 << ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_STOP_STATE_X16_IO_ENABLE(n) \ -+ (0x1 << ISPCSI2_TIMING_STOP_STATE_X16_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n) ((16 * ((n) - 1)) + 13) -+#define ISPCSI2_TIMING_STOP_STATE_X4_IO_MASK(n) \ -+ (0x1 << ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_STOP_STATE_X4_IO_DISABLE(n) \ -+ (0x0 << ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_STOP_STATE_X4_IO_ENABLE(n) \ -+ (0x1 << ISPCSI2_TIMING_STOP_STATE_X4_IO_SHIFT(n)) -+#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n) (16 * ((n) - 1)) -+#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(n) \ -+ (0x1fff << ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n)) -+ -+#define ISPCSI2_CTX_CTRL1(n) ((0x070) + 0x20 * (n)) -+#define ISPCSI2_CTX_CTRL1_COUNT_SHIFT 8 -+#define ISPCSI2_CTX_CTRL1_COUNT_MASK (0xFF << \ -+ ISPCSI2_CTX_CTRL1_COUNT_SHIFT) -+#define ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT 7 -+#define ISPCSI2_CTX_CTRL1_EOF_EN_MASK \ -+ (0x1 << ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_EOF_EN_DISABLE \ -+ (0x0 << ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_EOF_EN_ENABLE \ -+ (0x1 << ISPCSI2_CTX_CTRL1_EOF_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT 6 -+#define ISPCSI2_CTX_CTRL1_EOL_EN_MASK \ -+ (0x1 << ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_EOL_EN_DISABLE \ -+ (0x0 << ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_EOL_EN_ENABLE \ -+ (0x1 << ISPCSI2_CTX_CTRL1_EOL_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_CS_EN_SHIFT 5 -+#define ISPCSI2_CTX_CTRL1_CS_EN_MASK \ -+ (0x1 << ISPCSI2_CTX_CTRL1_CS_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_CS_EN_DISABLE \ -+ (0x0 << ISPCSI2_CTX_CTRL1_CS_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_CS_EN_ENABLE \ -+ (0x1 << ISPCSI2_CTX_CTRL1_CS_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT 4 -+#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_MASK \ -+ (0x1 << ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_DISABLE \ -+ (0x0 << ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_ENABLE \ -+ (0x1 << ISPCSI2_CTX_CTRL1_COUNT_UNLOCK_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_PING_PONG_SHIFT 3 -+#define ISPCSI2_CTX_CTRL1_PING_PONG_MASK \ -+ (0x1 << ISPCSI2_CTX_CTRL1_PING_PONG_SHIFT) -+#define ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT 0 -+#define ISPCSI2_CTX_CTRL1_CTX_EN_MASK \ -+ (0x1 << ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_CTX_EN_DISABLE \ -+ (0x0 << ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT) -+#define ISPCSI2_CTX_CTRL1_CTX_EN_ENABLE \ -+ (0x1 << ISPCSI2_CTX_CTRL1_CTX_EN_SHIFT) -+ -+#define ISPCSI2_CTX_CTRL2(n) ((0x074) + 0x20 * (n)) -+#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT 11 -+#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK \ -+ (0x3 << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT) -+#define ISPCSI2_CTX_CTRL2_FORMAT_SHIFT 0 -+#define ISPCSI2_CTX_CTRL2_FORMAT_MASK (0x3FF << \ -+ ISPCSI2_CTX_CTRL2_FORMAT_SHIFT) -+ -+#define ISPCSI2_CTX_DAT_OFST(n) ((0x078) + 0x20 * (n)) -+#define ISPCSI2_CTX_DAT_OFST_OFST_SHIFT 5 -+#define ISPCSI2_CTX_DAT_OFST_OFST_MASK (0x7FF << \ -+ ISPCSI2_CTX_DAT_OFST_OFST_SHIFT) -+ -+#define ISPCSI2_CTX_DAT_PING_ADDR(n) ((0x07C) + 0x20 * (n)) -+#define ISPCSI2_CTX_DAT_PONG_ADDR(n) ((0x080) + 0x20 * (n)) -+#define ISPCSI2_CTX_IRQENABLE(n) ((0x084) + 0x20 * (n)) -+#define ISPCSI2_CTX_IRQENABLE_ECC_CORRECTION_IRQ (1 << 8) -+#define ISPCSI2_CTX_IRQENABLE_LINE_NUMBER_IRQ (1 << 7) -+#define ISPCSI2_CTX_IRQENABLE_FRAME_NUMBER_IRQ (1 << 6) -+#define ISPCSI2_CTX_IRQENABLE_CS_IRQ (1 << 5) -+#define ISPCSI2_CTX_IRQENABLE_LE_IRQ (1 << 3) -+#define ISPCSI2_CTX_IRQENABLE_LS_IRQ (1 << 2) -+#define ISPCSI2_CTX_IRQENABLE_FE_IRQ (1 << 1) -+#define ISPCSI2_CTX_IRQENABLE_FS_IRQ 1 -+#define ISPCSI2_CTX_IRQSTATUS(n) ((0x088) + 0x20 * (n)) -+#define ISPCSI2_CTX_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 8) -+#define ISPCSI2_CTX_IRQSTATUS_LINE_NUMBER_IRQ (1 << 7) -+#define ISPCSI2_CTX_IRQSTATUS_FRAME_NUMBER_IRQ (1 << 6) -+#define ISPCSI2_CTX_IRQSTATUS_CS_IRQ (1 << 5) -+#define ISPCSI2_CTX_IRQSTATUS_LE_IRQ (1 << 3) -+#define ISPCSI2_CTX_IRQSTATUS_LS_IRQ (1 << 2) -+#define ISPCSI2_CTX_IRQSTATUS_FE_IRQ (1 << 1) -+#define ISPCSI2_CTX_IRQSTATUS_FS_IRQ 1 -+ -+#define ISPCSI2_CTX_CTRL3(n) ((0x08C) + 0x20 * (n)) -+#define ISPCSI2_CTX_CTRL3_ALPHA_SHIFT 5 -+#define ISPCSI2_CTX_CTRL3_ALPHA_MASK (0x3FFF << \ -+ ISPCSI2_CTX_CTRL3_ALPHA_SHIFT) -+ -+#define ISPCSI2PHY_CFG0 (0x000) -+#define ISPCSI2PHY_CFG0_THS_TERM_SHIFT 8 -+#define ISPCSI2PHY_CFG0_THS_TERM_MASK \ -+ (0xFF << ISPCSI2PHY_CFG0_THS_TERM_SHIFT) -+#define ISPCSI2PHY_CFG0_THS_TERM_RESETVAL \ -+ (0x04 << ISPCSI2PHY_CFG0_THS_TERM_SHIFT) -+#define ISPCSI2PHY_CFG0_THS_SETTLE_SHIFT 0 -+#define ISPCSI2PHY_CFG0_THS_SETTLE_MASK \ -+ (0xFF << ISPCSI2PHY_CFG0_THS_SETTLE_SHIFT) -+#define ISPCSI2PHY_CFG0_THS_SETTLE_RESETVAL \ -+ (0x27 << ISPCSI2PHY_CFG0_THS_SETTLE_SHIFT) -+#define ISPCSI2PHY_CFG1 (0x004) -+#define ISPCSI2PHY_CFG1_TCLK_TERM_SHIFT 18 -+#define ISPCSI2PHY_CFG1_TCLK_TERM_MASK \ -+ (0x7F << ISPCSI2PHY_CFG1_TCLK_TERM_SHIFT) -+#define ISPCSI2PHY_CFG1_TCLK_TERM__RESETVAL \ -+ (0x00 << ISPCSI2PHY_CFG1_TCLK_TERM_SHIFT) -+#define ISPCSI2PHY_CFG1_RESERVED1_SHIFT 10 -+#define ISPCSI2PHY_CFG1_RESERVED1_MASK \ -+ (0xFF << ISPCSI2PHY_CFG1_RESERVED1_SHIFT) -+#define ISPCSI2PHY_CFG1_RESERVED1__RESETVAL \ -+ (0xB8 << ISPCSI2PHY_CFG1_RESERVED1_SHIFT) -+#define ISPCSI2PHY_CFG1_TCLK_MISS_SHIFT 8 -+#define ISPCSI2PHY_CFG1_TCLK_MISS_MASK \ -+ (0x3 << ISPCSI2PHY_CFG1_TCLK_MISS_SHIFT) -+#define ISPCSI2PHY_CFG1_TCLK_MISS__RESETVAL \ -+ (0x1 << ISPCSI2PHY_CFG1_TCLK_MISS_SHIFT) -+#define ISPCSI2PHY_CFG1_TCLK_SETTLE_SHIFT 0 -+#define ISPCSI2PHY_CFG1_TCLK_SETTLE_MASK \ -+ (0xFF << ISPCSI2PHY_CFG1_TCLK_TERM_SHIFT) -+#define ISPCSI2PHY_CFG1_TCLK_SETTLE__RESETVAL \ -+ (0x0E << ISPCSI2PHY_CFG1_TCLK_TERM_SHIFT) -+#define ISPCSI2PHY_CFG1__RESETVAL (ISPCSI2PHY_CFG1_TCLK_TERM__RESETVAL | \ -+ ISPCSI2PHY_CFG1_RESERVED1__RESETVAL | \ -+ ISPCSI2PHY_CFG1_TCLK_MISS__RESETVAL | \ -+ ISPCSI2PHY_CFG1_TCLK_SETTLE__RESETVAL) -+#define ISPCSI2PHY_CFG1__EDITABLE_MASK (ISPCSI2PHY_CFG1_TCLK_TERM_MASK | \ -+ ISPCSI2PHY_CFG1_RESERVED1_MASK | \ -+ ISPCSI2PHY_CFG1_TCLK_MISS_MASK | \ -+ ISPCSI2PHY_CFG1_TCLK_SETTLE_MASK) -+ -+#endif /* __ISPREG_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispresizer.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispresizer.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispresizer.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispresizer.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,914 @@ -+/* -+ * ispresizer.c -+ * -+ * Driver Library for Resizer module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C)2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Sameer Venkatraman -+ * Mohit Jalori -+ * Sergio Aguirre -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "ispresizer.h" -+ -+/* Default configuration of resizer,filter coefficients,yenh for camera isp */ -+static struct isprsz_coef ispreszdefcoef = { -+ { -+ 0x0000, 0x0100, 0x0000, 0x0000, -+ 0x03FA, 0x00F6, 0x0010, 0x0000, -+ 0x03F9, 0x00DB, 0x002C, 0x0000, -+ 0x03FB, 0x00B3, 0x0053, 0x03FF, -+ 0x03FD, 0x0082, 0x0084, 0x03FD, -+ 0x03FF, 0x0053, 0x00B3, 0x03FB, -+ 0x0000, 0x002C, 0x00DB, 0x03F9, -+ 0x0000, 0x0010, 0x00F6, 0x03FA -+ }, -+ { -+ 0x0000, 0x0100, 0x0000, 0x0000, -+ 0x03FA, 0x00F6, 0x0010, 0x0000, -+ 0x03F9, 0x00DB, 0x002C, 0x0000, -+ 0x03FB, 0x00B3, 0x0053, 0x03FF, -+ 0x03FD, 0x0082, 0x0084, 0x03FD, -+ 0x03FF, 0x0053, 0x00B3, 0x03FB, -+ 0x0000, 0x002C, 0x00DB, 0x03F9, -+ 0x0000, 0x0010, 0x00F6, 0x03FA -+ }, -+ { -+ 0x0004, 0x0023, 0x005A, 0x0058, -+ 0x0023, 0x0004, 0x0000, 0x0002, -+ 0x0018, 0x004d, 0x0060, 0x0031, -+ 0x0008, 0x0000, 0x0001, 0x000f, -+ 0x003f, 0x0062, 0x003f, 0x000f, -+ 0x0001, 0x0000, 0x0008, 0x0031, -+ 0x0060, 0x004d, 0x0018, 0x0002 -+ }, -+ { -+ 0x0004, 0x0023, 0x005A, 0x0058, -+ 0x0023, 0x0004, 0x0000, 0x0002, -+ 0x0018, 0x004d, 0x0060, 0x0031, -+ 0x0008, 0x0000, 0x0001, 0x000f, -+ 0x003f, 0x0062, 0x003f, 0x000f, -+ 0x0001, 0x0000, 0x0008, 0x0031, -+ 0x0060, 0x004d, 0x0018, 0x0002 -+ } -+}; -+ -+/* Structure for saving/restoring resizer module registers */ -+static struct isp_reg isprsz_reg_list[] = { -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_OUT_SIZE, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_SIZE, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT10, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT32, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT54, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT76, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT98, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1110, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1312, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1514, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1716, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT1918, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2120, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2322, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2524, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2726, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT2928, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_HFILT3130, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT10, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT32, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT54, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT76, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT98, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1110, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1312, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1514, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1716, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT1918, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2120, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2322, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2524, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2726, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT2928, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_VFILT3130, 0x0000}, -+ {OMAP3_ISP_IOMEM_RESZ, ISPRSZ_YENH, 0x0000}, -+ {0, ISP_TOK_TERM, 0x0000} -+}; -+ -+/** -+ * ispresizer_applycrop - Apply crop to input image. -+ **/ -+void ispresizer_applycrop(struct isp_res_device *isp_res) -+{ -+ struct isp_device *isp = to_isp_device(isp_res); -+ -+ if (!isp_res->applycrop) -+ return; -+ -+ ispresizer_s_pipeline(isp_res, &isp->pipeline); -+ -+ isp_res->applycrop = 0; -+ -+ return; -+} -+ -+/** -+ * ispresizer_config_shadow_registers - Configure shadow registers. -+ **/ -+void ispresizer_config_shadow_registers(struct isp_res_device *isp_res) -+{ -+ ispresizer_applycrop(isp_res); -+ -+ return; -+} -+ -+int ispresizer_config_crop(struct isp_res_device *isp_res, -+ struct v4l2_crop *a) -+{ -+ struct isp_device *isp = to_isp_device(isp_res); -+ struct v4l2_crop *crop = a; -+ int rval; -+ -+ if (crop->c.left < 0) -+ crop->c.left = 0; -+ if (crop->c.width < 0) -+ crop->c.width = 0; -+ if (crop->c.top < 0) -+ crop->c.top = 0; -+ if (crop->c.height < 0) -+ crop->c.height = 0; -+ -+ if (crop->c.left >= isp->pipeline.prv_out_w_img) -+ crop->c.left = isp->pipeline.prv_out_w_img - 1; -+ if (crop->c.top >= isp->pipeline.rsz_out_h) -+ crop->c.top = isp->pipeline.rsz_out_h - 1; -+ -+ /* Make sure the crop rectangle is never smaller than width -+ * and height divided by 4, since the resizer cannot upscale it -+ * by more than 4x. */ -+ -+ if (crop->c.width < (isp->pipeline.rsz_out_w + 3) / 4) -+ crop->c.width = (isp->pipeline.rsz_out_w + 3) / 4; -+ if (crop->c.height < (isp->pipeline.rsz_out_h + 3) / 4) -+ crop->c.height = (isp->pipeline.rsz_out_h + 3) / 4; -+ -+ if (crop->c.left + crop->c.width > isp->pipeline.prv_out_w_img) -+ crop->c.width = isp->pipeline.prv_out_w_img - crop->c.left; -+ if (crop->c.top + crop->c.height > isp->pipeline.prv_out_h_img) -+ crop->c.height = isp->pipeline.prv_out_h_img - crop->c.top; -+ -+ isp->pipeline.rsz_crop = crop->c; -+ -+ rval = ispresizer_try_pipeline(isp_res, &isp->pipeline); -+ if (rval) -+ return rval; -+ -+ isp_res->applycrop = 1; -+ -+ if (isp->running == ISP_STOPPED) -+ ispresizer_applycrop(isp_res); -+ -+ return 0; -+} -+ -+/** -+ * ispresizer_request - Reserves the Resizer module. -+ * -+ * Allows only one user at a time. -+ * -+ * Returns 0 if successful, or -EBUSY if resizer module was already requested. -+ **/ -+int ispresizer_request(struct isp_res_device *isp_res) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ mutex_lock(&isp_res->ispres_mutex); -+ if (!isp_res->res_inuse) { -+ isp_res->res_inuse = 1; -+ mutex_unlock(&isp_res->ispres_mutex); -+ isp_reg_writel(dev, -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_MAIN, ISP_CTRL) | -+ ISPCTRL_SBL_WR0_RAM_EN | -+ ISPCTRL_RSZ_CLK_EN, -+ OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); -+ return 0; -+ } else { -+ mutex_unlock(&isp_res->ispres_mutex); -+ dev_err(dev, "resizer: Module Busy\n"); -+ return -EBUSY; -+ } -+} -+ -+/** -+ * ispresizer_free - Makes Resizer module free. -+ * -+ * Returns 0 if successful, or -EINVAL if resizer module was already freed. -+ **/ -+int ispresizer_free(struct isp_res_device *isp_res) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ mutex_lock(&isp_res->ispres_mutex); -+ if (isp_res->res_inuse) { -+ isp_res->res_inuse = 0; -+ mutex_unlock(&isp_res->ispres_mutex); -+ isp_reg_and(dev, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, -+ ~(ISPCTRL_RSZ_CLK_EN | ISPCTRL_SBL_WR0_RAM_EN)); -+ return 0; -+ } else { -+ mutex_unlock(&isp_res->ispres_mutex); -+ DPRINTK_ISPRESZ("ISP_ERR : Resizer Module already freed\n"); -+ return -EINVAL; -+ } -+} -+ -+/** -+ * ispresizer_config_datapath - Specifies which input to use in resizer module -+ * @input: Indicates the module that gives the image to resizer. -+ * -+ * Sets up the default resizer configuration according to the arguments. -+ * -+ * Returns 0 if successful, or -EINVAL if an unsupported input was requested. -+ **/ -+int ispresizer_config_datapath(struct isp_res_device *isp_res, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_res); -+ u32 cnt = 0; -+ -+ DPRINTK_ISPRESZ("ispresizer_config_datapath()+\n"); -+ -+ switch (pipe->rsz_in) { -+ case RSZ_OTFLY_YUV: -+ cnt &= ~ISPRSZ_CNT_INPTYP; -+ cnt &= ~ISPRSZ_CNT_INPSRC; -+ ispresizer_set_inaddr(isp_res, 0); -+ ispresizer_config_inlineoffset(isp_res, 0); -+ break; -+ case RSZ_MEM_YUV: -+ cnt |= ISPRSZ_CNT_INPSRC; -+ cnt &= ~ISPRSZ_CNT_INPTYP; -+ break; -+ case RSZ_MEM_COL8: -+ cnt |= ISPRSZ_CNT_INPSRC; -+ cnt |= ISPRSZ_CNT_INPTYP; -+ break; -+ default: -+ dev_err(dev, "resizer: Wrong Input\n"); -+ return -EINVAL; -+ } -+ isp_reg_or(dev, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, cnt); -+ ispresizer_config_ycpos(isp_res, 0); -+ ispresizer_config_filter_coef(isp_res, &ispreszdefcoef); -+ ispresizer_enable_cbilin(isp_res, 0); -+ ispresizer_config_luma_enhance(isp_res, &isp_res->defaultyenh); -+ DPRINTK_ISPRESZ("ispresizer_config_datapath()-\n"); -+ return 0; -+} -+ -+/** -+ * ispresizer_try_size - Validates input and output images size. -+ * @input_w: input width for the resizer in number of pixels per line -+ * @input_h: input height for the resizer in number of lines -+ * @output_w: output width from the resizer in number of pixels per line -+ * resizer when writing to memory needs this to be multiple of 16. -+ * @pipe->rsz_out_h: output height for the resizer in number of lines, must be -+ * even. -+ * -+ * Calculates the horizontal and vertical resize ratio, number of pixels to -+ * be cropped in the resizer module and checks the validity of various -+ * parameters. Formula used for calculation is:- -+ * -+ * 8-phase 4-tap mode :- -+ * inputwidth = (32 * sph + (ow - 1) * hrsz + 16) >> 8 + 7 -+ * inputheight = (32 * spv + (oh - 1) * vrsz + 16) >> 8 + 4 -+ * endpahse for width = ((32 * sph + (ow - 1) * hrsz + 16) >> 5) % 8 -+ * endphase for height = ((32 * sph + (oh - 1) * hrsz + 16) >> 5) % 8 -+ * -+ * 4-phase 7-tap mode :- -+ * inputwidth = (64 * sph + (ow - 1) * hrsz + 32) >> 8 + 7 -+ * inputheight = (64 * spv + (oh - 1) * vrsz + 32) >> 8 + 7 -+ * endpahse for width = ((64 * sph + (ow - 1) * hrsz + 32) >> 6) % 4 -+ * endphase for height = ((64 * sph + (oh - 1) * hrsz + 32) >> 6) % 4 -+ * -+ * Where: -+ * sph = Start phase horizontal -+ * spv = Start phase vertical -+ * ow = Output width -+ * oh = Output height -+ * hrsz = Horizontal resize value -+ * vrsz = Vertical resize value -+ * -+ * Fills up the output/input widht/height, horizontal/vertical resize ratio, -+ * horizontal/vertical crop variables in the isp_res structure. -+ **/ -+int ispresizer_try_pipeline(struct isp_res_device *isp_res, -+ struct isp_pipeline *pipe) -+{ -+ u32 rsz, rsz_7, rsz_4; -+ u32 sph; -+ int max_in_otf, max_out_7tap; -+ -+ if (pipe->rsz_crop.width < 32 || pipe->rsz_crop.height < 32) { -+ DPRINTK_ISPCCDC("ISP_ERR: RESIZER cannot handle input width" -+ " less than 32 pixels or height less than" -+ " 32\n"); -+ return -EINVAL; -+ } -+ -+ if (pipe->rsz_crop.height > MAX_IN_HEIGHT) -+ return -EINVAL; -+ -+ if (pipe->rsz_out_w < 16) -+ pipe->rsz_out_w = 16; -+ -+ if (pipe->rsz_out_h < 2) -+ pipe->rsz_out_h = 2; -+ -+ if (omap_rev() == OMAP3430_REV_ES1_0) { -+ max_in_otf = MAX_IN_WIDTH_ONTHEFLY_MODE; -+ max_out_7tap = MAX_7TAP_VRSZ_OUTWIDTH; -+ } else { -+ max_in_otf = MAX_IN_WIDTH_ONTHEFLY_MODE_ES2; -+ max_out_7tap = MAX_7TAP_VRSZ_OUTWIDTH_ES2; -+ } -+ -+ if (pipe->rsz_in == RSZ_OTFLY_YUV) { -+ if (pipe->rsz_crop.width > max_in_otf) -+ return -EINVAL; -+ } else { -+ if (pipe->rsz_crop.width > MAX_IN_WIDTH_MEMORY_MODE) -+ return -EINVAL; -+ } -+ -+ pipe->rsz_out_h &= 0xfffffffe; -+ sph = DEFAULTSTPHASE; -+ -+ rsz_7 = ((pipe->rsz_crop.height - 7) * 256) / (pipe->rsz_out_h - 1); -+ rsz_4 = ((pipe->rsz_crop.height - 4) * 256) / (pipe->rsz_out_h - 1); -+ -+ rsz = (pipe->rsz_crop.height * 256) / pipe->rsz_out_h; -+ -+ if (rsz <= MID_RESIZE_VALUE) { -+ rsz = rsz_4; -+ if (rsz < MINIMUM_RESIZE_VALUE) { -+ rsz = MINIMUM_RESIZE_VALUE; -+ pipe->rsz_out_h = -+ (((pipe->rsz_crop.height - 4) * 256) / rsz) + 1; -+ } -+ } else { -+ rsz = rsz_7; -+ if (pipe->rsz_out_w > max_out_7tap) -+ pipe->rsz_out_w = max_out_7tap; -+ if (rsz > MAXIMUM_RESIZE_VALUE) { -+ rsz = MAXIMUM_RESIZE_VALUE; -+ pipe->rsz_out_h = -+ (((pipe->rsz_crop.height - 7) * 256) / rsz) + 1; -+ } -+ } -+ -+ if (rsz > MID_RESIZE_VALUE) { -+ pipe->rsz_crop.height = -+ (((64 * sph) + ((pipe->rsz_out_h - 1) * rsz) + 32) -+ / 256) + 7; -+ } else { -+ pipe->rsz_crop.height = -+ (((32 * sph) + ((pipe->rsz_out_h - 1) * rsz) + 16) -+ / 256) + 4; -+ } -+ -+ isp_res->v_resz = rsz; -+ /* FIXME: pipe->rsz_crop.height here is the real input height! */ -+ isp_res->v_startphase = sph; -+ -+ pipe->rsz_out_w &= 0xfffffff0; -+ sph = DEFAULTSTPHASE; -+ -+ rsz_7 = ((pipe->rsz_crop.width - 7) * 256) / (pipe->rsz_out_w - 1); -+ rsz_4 = ((pipe->rsz_crop.width - 4) * 256) / (pipe->rsz_out_w - 1); -+ -+ rsz = (pipe->rsz_crop.width * 256) / pipe->rsz_out_w; -+ if (rsz > MID_RESIZE_VALUE) { -+ rsz = rsz_7; -+ if (rsz > MAXIMUM_RESIZE_VALUE) { -+ rsz = MAXIMUM_RESIZE_VALUE; -+ pipe->rsz_out_w = -+ (((pipe->rsz_crop.width - 7) * 256) / rsz) + 1; -+ pipe->rsz_out_w = (pipe->rsz_out_w + 0xf) & 0xfffffff0; -+ } -+ } else { -+ rsz = rsz_4; -+ if (rsz < MINIMUM_RESIZE_VALUE) { -+ rsz = MINIMUM_RESIZE_VALUE; -+ pipe->rsz_out_w = -+ (((pipe->rsz_crop.width - 4) * 256) / rsz) + 1; -+ pipe->rsz_out_w = (pipe->rsz_out_w + 0xf) & 0xfffffff0; -+ } -+ } -+ -+ /* Recalculate input based on TRM equations */ -+ if (rsz > MID_RESIZE_VALUE) { -+ pipe->rsz_crop.width = -+ (((64 * sph) + ((pipe->rsz_out_w - 1) * rsz) + 32) -+ / 256) + 7; -+ } else { -+ pipe->rsz_crop.width = -+ (((32 * sph) + ((pipe->rsz_out_w - 1) * rsz) + 16) -+ / 256) + 7; -+ } -+ -+ isp_res->h_resz = rsz; -+ /* FIXME: pipe->rsz_crop.width here is the real input width! */ -+ isp_res->h_startphase = sph; -+ -+ pipe->rsz_out_w_img = pipe->rsz_out_w; -+ -+ return 0; -+} -+ -+/** -+ * ispresizer_config_size - Configures input and output image size. -+ * @pipe->rsz_crop.width: input width for the resizer in number of pixels per -+ * line. -+ * @pipe->rsz_crop.height: input height for the resizer in number of lines. -+ * @pipe->rsz_out_w: output width from the resizer in number of pixels per line. -+ * @pipe->rsz_out_h: output height for the resizer in number of lines. -+ * -+ * Configures the appropriate values stored in the isp_res structure in the -+ * resizer registers. -+ * -+ * Returns 0 if successful, or -EINVAL if passed values haven't been verified -+ * with ispresizer_try_size() previously. -+ **/ -+int ispresizer_s_pipeline(struct isp_res_device *isp_res, -+ struct isp_pipeline *pipe) -+{ -+ struct device *dev = to_device(isp_res); -+ int i, j; -+ u32 res; -+ int rval; -+ -+ rval = ispresizer_config_datapath(isp_res, pipe); -+ if (rval) -+ return rval; -+ -+ /* Set Resizer input address and offset adderss */ -+ ispresizer_config_inlineoffset(isp_res, -+ pipe->prv_out_w * ISP_BYTES_PER_PIXEL); -+ -+ res = isp_reg_readl(dev, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) & -+ ~(ISPRSZ_CNT_HSTPH_MASK | ISPRSZ_CNT_VSTPH_MASK); -+ isp_reg_writel(dev, res | -+ (isp_res->h_startphase << ISPRSZ_CNT_HSTPH_SHIFT) | -+ (isp_res->v_startphase << ISPRSZ_CNT_VSTPH_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_CNT); -+ /* Set start address for cropping */ -+ ispresizer_set_inaddr(isp_res, isp_res->tmp_buf); -+ -+ isp_reg_writel(dev, -+ (pipe->rsz_crop.width << ISPRSZ_IN_SIZE_HORZ_SHIFT) | -+ (pipe->rsz_crop.height << -+ ISPRSZ_IN_SIZE_VERT_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_IN_SIZE); -+ if (!isp_res->algo) { -+ isp_reg_writel(dev, -+ (pipe->rsz_out_w << ISPRSZ_OUT_SIZE_HORZ_SHIFT) | -+ (pipe->rsz_out_h << ISPRSZ_OUT_SIZE_VERT_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_OUT_SIZE); -+ } else { -+ isp_reg_writel(dev, -+ ((pipe->rsz_out_w - 4) -+ << ISPRSZ_OUT_SIZE_HORZ_SHIFT) | -+ (pipe->rsz_out_h << ISPRSZ_OUT_SIZE_VERT_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_OUT_SIZE); -+ } -+ -+ res = isp_reg_readl(dev, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) & -+ ~(ISPRSZ_CNT_HRSZ_MASK | ISPRSZ_CNT_VRSZ_MASK); -+ isp_reg_writel(dev, res | -+ ((isp_res->h_resz - 1) << ISPRSZ_CNT_HRSZ_SHIFT) | -+ ((isp_res->v_resz - 1) << ISPRSZ_CNT_VRSZ_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_CNT); -+ if (isp_res->h_resz <= MID_RESIZE_VALUE) { -+ j = 0; -+ for (i = 0; i < 16; i++) { -+ isp_reg_writel(dev, -+ (isp_res->coeflist.h_filter_coef_4tap[j] -+ << ISPRSZ_HFILT10_COEF0_SHIFT) | -+ (isp_res->coeflist.h_filter_coef_4tap[j + 1] -+ << ISPRSZ_HFILT10_COEF1_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_HFILT10 + (i * 0x04)); -+ j += 2; -+ } -+ } else { -+ j = 0; -+ for (i = 0; i < 16; i++) { -+ if ((i + 1) % 4 == 0) { -+ isp_reg_writel(dev, -+ (isp_res->coeflist. -+ h_filter_coef_7tap[j] << -+ ISPRSZ_HFILT10_COEF0_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_HFILT10 + (i * 0x04)); -+ j += 1; -+ } else { -+ isp_reg_writel(dev, -+ (isp_res->coeflist. -+ h_filter_coef_7tap[j] << -+ ISPRSZ_HFILT10_COEF0_SHIFT) | -+ (isp_res->coeflist. -+ h_filter_coef_7tap[j+1] << -+ ISPRSZ_HFILT10_COEF1_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_HFILT10 + (i * 0x04)); -+ j += 2; -+ } -+ } -+ } -+ if (isp_res->v_resz <= MID_RESIZE_VALUE) { -+ j = 0; -+ for (i = 0; i < 16; i++) { -+ isp_reg_writel(dev, (isp_res->coeflist. -+ v_filter_coef_4tap[j] << -+ ISPRSZ_VFILT10_COEF0_SHIFT) | -+ (isp_res->coeflist. -+ v_filter_coef_4tap[j + 1] << -+ ISPRSZ_VFILT10_COEF1_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_VFILT10 + (i * 0x04)); -+ j += 2; -+ } -+ } else { -+ j = 0; -+ for (i = 0; i < 16; i++) { -+ if ((i + 1) % 4 == 0) { -+ isp_reg_writel(dev, -+ (isp_res->coeflist. -+ v_filter_coef_7tap[j] << -+ ISPRSZ_VFILT10_COEF0_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_VFILT10 + (i * 0x04)); -+ j += 1; -+ } else { -+ isp_reg_writel(dev, -+ (isp_res->coeflist. -+ v_filter_coef_7tap[j] << -+ ISPRSZ_VFILT10_COEF0_SHIFT) | -+ (isp_res->coeflist. -+ v_filter_coef_7tap[j+1] << -+ ISPRSZ_VFILT10_COEF1_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_VFILT10 + (i * 0x04)); -+ j += 2; -+ } -+ } -+ } -+ -+ ispresizer_config_outlineoffset(isp_res, pipe->rsz_out_w*2); -+ -+ if (pipe->pix.pixelformat == V4L2_PIX_FMT_UYVY) -+ ispresizer_config_ycpos(isp_res, 0); -+ else -+ ispresizer_config_ycpos(isp_res, 1); -+ -+ DPRINTK_ISPRESZ("ispresizer_config_size()-\n"); -+ return 0; -+} -+ -+/** -+ * ispresizer_enable - Enables the resizer module. -+ * @enable: 1 - Enable, 0 - Disable -+ * -+ * Client should configure all the sub modules in resizer before this. -+ **/ -+void ispresizer_enable(struct isp_res_device *isp_res, int enable) -+{ -+ struct device *dev = to_device(isp_res); -+ int val; -+ -+ DPRINTK_ISPRESZ("+ispresizer_enable()+\n"); -+ if (enable) { -+ val = (isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR) & 0x2) | -+ ISPRSZ_PCR_ENABLE; -+ } else { -+ val = isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR) & -+ ~ISPRSZ_PCR_ENABLE; -+ } -+ isp_reg_writel(dev, val, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR); -+ DPRINTK_ISPRESZ("+ispresizer_enable()-\n"); -+} -+ -+/** -+ * ispresizer_busy - Checks if ISP resizer is busy. -+ * -+ * Returns busy field from ISPRSZ_PCR register. -+ **/ -+int ispresizer_busy(struct isp_res_device *isp_res) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ return isp_reg_readl(dev, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR) & -+ ISPPRV_PCR_BUSY; -+} -+ -+/** -+ * ispresizer_config_startphase - Sets the horizontal and vertical start phase. -+ * @hstartphase: horizontal start phase (0 - 7). -+ * @vstartphase: vertical startphase (0 - 7). -+ * -+ * This API just updates the isp_res struct. Actual register write happens in -+ * ispresizer_config_size. -+ **/ -+void ispresizer_config_startphase(struct isp_res_device *isp_res, -+ u8 hstartphase, u8 vstartphase) -+{ -+ DPRINTK_ISPRESZ("ispresizer_config_startphase()+\n"); -+ isp_res->h_startphase = hstartphase; -+ isp_res->v_startphase = vstartphase; -+ DPRINTK_ISPRESZ("ispresizer_config_startphase()-\n"); -+} -+ -+/** -+ * ispresizer_config_ycpos - Specifies if output should be in YC or CY format. -+ * @yc: 0 - YC format, 1 - CY format -+ **/ -+void ispresizer_config_ycpos(struct isp_res_device *isp_res, u8 yc) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ DPRINTK_ISPRESZ("ispresizer_config_ycpos()+\n"); -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, -+ ~ISPRSZ_CNT_YCPOS, (yc ? ISPRSZ_CNT_YCPOS : 0)); -+ DPRINTK_ISPRESZ("ispresizer_config_ycpos()-\n"); -+} -+ -+/** -+ * Sets the chrominance algorithm -+ * @cbilin: 0 - chrominance uses same processing as luminance, -+ * 1 - bilinear interpolation processing -+ **/ -+void ispresizer_enable_cbilin(struct isp_res_device *isp_res, u8 enable) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ DPRINTK_ISPRESZ("ispresizer_enable_cbilin()+\n"); -+ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, -+ ~ISPRSZ_CNT_CBILIN, (enable ? ISPRSZ_CNT_CBILIN : 0)); -+ DPRINTK_ISPRESZ("ispresizer_enable_cbilin()-\n"); -+} -+ -+/** -+ * ispresizer_config_luma_enhance - Configures luminance enhancer parameters. -+ * @yenh: Pointer to structure containing desired values for core, slope, gain -+ * and algo parameters. -+ **/ -+void ispresizer_config_luma_enhance(struct isp_res_device *isp_res, -+ struct isprsz_yenh *yenh) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ DPRINTK_ISPRESZ("ispresizer_config_luma_enhance()+\n"); -+ isp_res->algo = yenh->algo; -+ isp_reg_writel(dev, (yenh->algo << ISPRSZ_YENH_ALGO_SHIFT) | -+ (yenh->gain << ISPRSZ_YENH_GAIN_SHIFT) | -+ (yenh->slope << ISPRSZ_YENH_SLOP_SHIFT) | -+ (yenh->coreoffset << ISPRSZ_YENH_CORE_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, -+ ISPRSZ_YENH); -+ DPRINTK_ISPRESZ("ispresizer_config_luma_enhance()-\n"); -+} -+ -+/** -+ * ispresizer_config_filter_coef - Sets filter coefficients for 4 & 7-tap mode. -+ * This API just updates the isp_res struct.Actual register write happens in -+ * ispresizer_config_size. -+ * @coef: Structure containing horizontal and vertical filter coefficients for -+ * both 4-tap and 7-tap mode. -+ **/ -+void ispresizer_config_filter_coef(struct isp_res_device *isp_res, -+ struct isprsz_coef *coef) -+{ -+ int i; -+ DPRINTK_ISPRESZ("ispresizer_config_filter_coef()+\n"); -+ for (i = 0; i < 32; i++) { -+ isp_res->coeflist.h_filter_coef_4tap[i] = -+ coef->h_filter_coef_4tap[i]; -+ isp_res->coeflist.v_filter_coef_4tap[i] = -+ coef->v_filter_coef_4tap[i]; -+ } -+ for (i = 0; i < 28; i++) { -+ isp_res->coeflist.h_filter_coef_7tap[i] = -+ coef->h_filter_coef_7tap[i]; -+ isp_res->coeflist.v_filter_coef_7tap[i] = -+ coef->v_filter_coef_7tap[i]; -+ } -+ DPRINTK_ISPRESZ("ispresizer_config_filter_coef()-\n"); -+} -+ -+/** -+ * ispresizer_config_inlineoffset - Configures the read address line offset. -+ * @offset: Line Offset for the input image. -+ * -+ * Returns 0 if successful, or -EINVAL if offset is not 32 bits aligned. -+ **/ -+int ispresizer_config_inlineoffset(struct isp_res_device *isp_res, u32 offset) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ DPRINTK_ISPRESZ("ispresizer_config_inlineoffset()+\n"); -+ if (offset % 32) -+ return -EINVAL; -+ isp_reg_writel(dev, offset << ISPRSZ_SDR_INOFF_OFFSET_SHIFT, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF); -+ DPRINTK_ISPRESZ("ispresizer_config_inlineoffset()-\n"); -+ return 0; -+} -+ -+/** -+ * ispresizer_set_inaddr - Sets the memory address of the input frame. -+ * @addr: 32bit memory address aligned on 32byte boundary. -+ * -+ * Returns 0 if successful, or -EINVAL if address is not 32 bits aligned. -+ **/ -+int ispresizer_set_inaddr(struct isp_res_device *isp_res, u32 addr) -+{ -+ struct device *dev = to_device(isp_res); -+ struct isp_device *isp = to_isp_device(isp_res); -+ -+ DPRINTK_ISPRESZ("ispresizer_set_inaddr()+\n"); -+ -+ if (addr % 32) -+ return -EINVAL; -+ isp_res->tmp_buf = addr; -+ /* FIXME: is this the right place to put crop-related junk? */ -+ isp_reg_writel(dev, -+ isp_res->tmp_buf + ISP_BYTES_PER_PIXEL -+ * ((isp->pipeline.rsz_crop.left & ~0xf) + -+ isp->pipeline.prv_out_w -+ * isp->pipeline.rsz_crop.top), -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD); -+ /* Set the fractional part of the starting address. Needed for crop */ -+ isp_reg_writel(dev, ((isp->pipeline.rsz_crop.left & 0xf) << -+ ISPRSZ_IN_START_HORZ_ST_SHIFT) | -+ (0x00 << ISPRSZ_IN_START_VERT_ST_SHIFT), -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START); -+ -+ DPRINTK_ISPRESZ("ispresizer_set_inaddr()-\n"); -+ return 0; -+} -+ -+/** -+ * ispresizer_config_outlineoffset - Configures the write address line offset. -+ * @offset: Line offset for the preview output. -+ * -+ * Returns 0 if successful, or -EINVAL if address is not 32 bits aligned. -+ **/ -+int ispresizer_config_outlineoffset(struct isp_res_device *isp_res, u32 offset) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ DPRINTK_ISPRESZ("ispresizer_config_outlineoffset()+\n"); -+ if (offset % 32) -+ return -EINVAL; -+ isp_reg_writel(dev, offset << ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF); -+ DPRINTK_ISPRESZ("ispresizer_config_outlineoffset()-\n"); -+ return 0; -+} -+ -+/** -+ * Configures the memory address to which the output frame is written. -+ * @addr: 32bit memory address aligned on 32byte boundary. -+ **/ -+int ispresizer_set_outaddr(struct isp_res_device *isp_res, u32 addr) -+{ -+ struct device *dev = to_device(isp_res); -+ -+ DPRINTK_ISPRESZ("ispresizer_set_outaddr()+\n"); -+ if (addr % 32) -+ return -EINVAL; -+ isp_reg_writel(dev, addr << ISPRSZ_SDR_OUTADD_ADDR_SHIFT, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD); -+ DPRINTK_ISPRESZ("ispresizer_set_outaddr()-\n"); -+ return 0; -+} -+ -+/** -+ * ispresizer_save_context - Saves the values of the resizer module registers. -+ **/ -+void ispresizer_save_context(struct device *dev) -+{ -+ DPRINTK_ISPRESZ("Saving context\n"); -+ isp_save_context(dev, isprsz_reg_list); -+} -+ -+/** -+ * ispresizer_restore_context - Restores resizer module register values. -+ **/ -+void ispresizer_restore_context(struct device *dev) -+{ -+ DPRINTK_ISPRESZ("Restoring context\n"); -+ isp_restore_context(dev, isprsz_reg_list); -+} -+ -+/** -+ * ispresizer_print_status - Prints the values of the resizer module registers. -+ **/ -+void ispresizer_print_status(struct isp_res_device *isp_res) -+{ -+#ifdef OMAP_ISPRESZ_DEBUG -+ struct device *dev = to_device(isp_res); -+#endif -+ -+ if (!is_ispresz_debug_enabled()) -+ return; -+ DPRINTK_ISPRESZ("###ISP_CTRL inresizer =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_MAIN, ISP_CTRL)); -+ DPRINTK_ISPRESZ("###ISP_IRQ0ENABLE in resizer =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE)); -+ DPRINTK_ISPRESZ("###ISP_IRQ0STATUS in resizer =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS)); -+ DPRINTK_ISPRESZ("###RSZ PCR =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR)); -+ DPRINTK_ISPRESZ("###RSZ CNT =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT)); -+ DPRINTK_ISPRESZ("###RSZ OUT SIZE =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_OUT_SIZE)); -+ DPRINTK_ISPRESZ("###RSZ IN START =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START)); -+ DPRINTK_ISPRESZ("###RSZ IN SIZE =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_SIZE)); -+ DPRINTK_ISPRESZ("###RSZ SDR INADD =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD)); -+ DPRINTK_ISPRESZ("###RSZ SDR INOFF =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF)); -+ DPRINTK_ISPRESZ("###RSZ SDR OUTADD =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD)); -+ DPRINTK_ISPRESZ("###RSZ SDR OTOFF =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF)); -+ DPRINTK_ISPRESZ("###RSZ YENH =0x%x\n", -+ isp_reg_readl(dev, -+ OMAP3_ISP_IOMEM_RESZ, ISPRSZ_YENH)); -+} -+ -+/** -+ * isp_resizer_init - Module Initialisation. -+ * -+ * Always returns 0. -+ **/ -+int __init isp_resizer_init(struct device *dev) -+{ -+ struct isp_device *isp = dev_get_drvdata(dev); -+ struct isp_res_device *isp_res = &isp->isp_res; -+ -+ mutex_init(&isp_res->ispres_mutex); -+ -+ return 0; -+} -+ -+/** -+ * isp_resizer_cleanup - Module Cleanup. -+ **/ -+void isp_resizer_cleanup(struct device *dev) -+{ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispresizer.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispresizer.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispresizer.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispresizer.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,189 @@ -+/* -+ * ispresizer.h -+ * -+ * Driver header file for Resizer module in TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Sameer Venkatraman -+ * Mohit Jalori -+ * Sergio Aguirre -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#ifndef OMAP_ISP_RESIZER_H -+#define OMAP_ISP_RESIZER_H -+ -+/* -+ * Resizer Constants -+ */ -+#define MAX_IN_WIDTH_MEMORY_MODE 4095 -+ -+#define MAX_IN_WIDTH_ONTHEFLY_MODE 1280 -+#define MAX_IN_WIDTH_ONTHEFLY_MODE_ES2 4095 -+#define MAX_IN_HEIGHT 4095 -+#define MINIMUM_RESIZE_VALUE 64 -+#define MAXIMUM_RESIZE_VALUE 1024 -+#define MID_RESIZE_VALUE 512 -+ -+#define MAX_7TAP_HRSZ_OUTWIDTH 1280 -+#define MAX_7TAP_VRSZ_OUTWIDTH 640 -+ -+#define MAX_7TAP_HRSZ_OUTWIDTH_ES2 3300 -+#define MAX_7TAP_VRSZ_OUTWIDTH_ES2 1650 -+ -+#define DEFAULTSTPIXEL 0 -+#define DEFAULTSTPHASE 1 -+#define DEFAULTHSTPIXEL4TAPMODE 3 -+#define FOURPHASE 4 -+#define EIGHTPHASE 8 -+#define RESIZECONSTANT 256 -+#define SHIFTER4TAPMODE 0 -+#define SHIFTER7TAPMODE 1 -+#define DEFAULTOFFSET 7 -+#define OFFSETVERT4TAPMODE 4 -+#define OPWDALIGNCONSTANT 0xfffffff0 -+ -+/* -+ * The client is supposed to call resizer API in the following sequence: -+ * - request() -+ * - config_datatpath() -+ * - optionally config/enable sub modules -+ * - try/config size -+ * - setup callback -+ * - setup in/out memory offsets and ptrs -+ * - enable() -+ * ... -+ * - disable() -+ * - free() -+ */ -+ -+enum resizer_input { -+ RSZ_OTFLY_YUV, -+ RSZ_MEM_YUV, -+ RSZ_MEM_COL8 -+}; -+ -+/** -+ * struct isprsz_coef - Structure for resizer filter coeffcients. -+ * @h_filter_coef_4tap: Horizontal filter coefficients for 8-phase/4-tap -+ * mode (.5x-4x) -+ * @v_filter_coef_4tap: Vertical filter coefficients for 8-phase/4-tap -+ * mode (.5x-4x) -+ * @h_filter_coef_7tap: Horizontal filter coefficients for 4-phase/7-tap -+ * mode (.25x-.5x) -+ * @v_filter_coef_7tap: Vertical filter coefficients for 4-phase/7-tap -+ * mode (.25x-.5x) -+ */ -+struct isprsz_coef { -+ u16 h_filter_coef_4tap[32]; -+ u16 v_filter_coef_4tap[32]; -+ u16 h_filter_coef_7tap[28]; -+ u16 v_filter_coef_7tap[28]; -+}; -+ -+/** -+ * struct isprsz_yenh - Structure for resizer luminance enhancer parameters. -+ * @algo: Algorithm select. -+ * @gain: Maximum gain. -+ * @slope: Slope. -+ * @coreoffset: Coring offset. -+ */ -+struct isprsz_yenh { -+ u8 algo; -+ u8 gain; -+ u8 slope; -+ u8 coreoffset; -+}; -+ -+/** -+ * struct isp_res_device - Structure for the resizer module to store its -+ * information. -+ * @res_inuse: Indicates if resizer module has been reserved. 1 - Reserved, -+ * 0 - Freed. -+ * @h_startphase: Horizontal starting phase. -+ * @v_startphase: Vertical starting phase. -+ * @h_resz: Horizontal resizing value. -+ * @v_resz: Vertical resizing value. -+ * @outputwidth: Output Image Width in pixels. -+ * @outputheight: Output Image Height in pixels. -+ * @inputwidth: Input Image Width in pixels. -+ * @inputheight: Input Image Height in pixels. -+ * @algo: Algorithm select. 0 - Disable, 1 - [-1 2 -1]/2 high-pass filter, -+ * 2 - [-1 -2 6 -2 -1]/4 high-pass filter. -+ * @ipht_crop: Vertical start line for cropping. -+ * @ipwd_crop: Horizontal start pixel for cropping. -+ * @cropwidth: Crop Width. -+ * @cropheight: Crop Height. -+ * @resinput: Resizer input. -+ * @coeflist: Register configuration for Resizer. -+ * @ispres_mutex: Mutex for isp resizer. -+ */ -+struct isp_res_device { -+ u8 res_inuse; -+ u8 h_startphase; -+ u8 v_startphase; -+ u16 h_resz; -+ u16 v_resz; -+ u8 algo; -+ dma_addr_t tmp_buf; -+ struct isprsz_coef coeflist; -+ struct mutex ispres_mutex; /* For checking/modifying res_inuse */ -+ struct isprsz_yenh defaultyenh; -+ int applycrop; -+}; -+ -+int ispresizer_config_crop(struct isp_res_device *isp_res, -+ struct v4l2_crop *a); -+void ispresizer_config_shadow_registers(struct isp_res_device *isp_res); -+ -+int ispresizer_request(struct isp_res_device *isp_res); -+ -+int ispresizer_free(struct isp_res_device *isp_res); -+ -+void ispresizer_enable_cbilin(struct isp_res_device *isp_res, u8 enable); -+ -+void ispresizer_config_ycpos(struct isp_res_device *isp_res, u8 yc); -+ -+void ispresizer_config_startphase(struct isp_res_device *isp_res, -+ u8 hstartphase, u8 vstartphase); -+ -+void ispresizer_config_filter_coef(struct isp_res_device *isp_res, -+ struct isprsz_coef *coef); -+ -+void ispresizer_config_luma_enhance(struct isp_res_device *isp_res, -+ struct isprsz_yenh *yenh); -+ -+int ispresizer_try_pipeline(struct isp_res_device *isp_res, -+ struct isp_pipeline *pipe); -+ -+int ispresizer_s_pipeline(struct isp_res_device *isp_res, -+ struct isp_pipeline *pipe); -+ -+int ispresizer_config_inlineoffset(struct isp_res_device *isp_res, u32 offset); -+ -+int ispresizer_set_inaddr(struct isp_res_device *isp_res, u32 addr); -+ -+int ispresizer_config_outlineoffset(struct isp_res_device *isp_res, u32 offset); -+ -+int ispresizer_set_outaddr(struct isp_res_device *isp_res, u32 addr); -+ -+void ispresizer_enable(struct isp_res_device *isp_res, int enable); -+ -+int ispresizer_busy(struct isp_res_device *isp_res); -+ -+void ispresizer_save_context(struct device *dev); -+ -+void ispresizer_restore_context(struct device *dev); -+ -+void ispresizer_print_status(struct isp_res_device *isp_res); -+ -+#endif /* OMAP_ISP_RESIZER_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispstat.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispstat.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispstat.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispstat.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,350 @@ -+/* -+ * ispstat.c -+ * -+ * STAT module for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * Contributors: -+ * Sergio Aguirre -+ * Troy Laramy -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+ -+#include "isp.h" -+ -+inline int greater_overflow(int a, int b, int limit) -+{ -+ int limit2 = limit / 2; -+ -+ if (b - a > limit2) -+ return 1; -+ else if (a - b > limit2) -+ return 0; -+ else -+ return a > b; -+} -+ -+int ispstat_buf_queue(struct ispstat *stat) -+{ -+ unsigned long flags; -+ -+ if (!stat->active_buf) -+ return -1; -+ -+ do_gettimeofday(&stat->active_buf->ts); -+ -+ spin_lock_irqsave(&stat->lock, flags); -+ -+ stat->active_buf->config_counter = stat->config_counter; -+ stat->active_buf->frame_number = stat->frame_number; -+ stat->active_buf->buf_size = stat->buf_size; -+ -+ stat->frame_number++; -+ if (stat->frame_number == stat->max_frame) -+ stat->frame_number = 0; -+ -+ stat->active_buf = NULL; -+ -+ spin_unlock_irqrestore(&stat->lock, flags); -+ -+ return 0; -+} -+ -+/* Get next free buffer to write the statistics to and mark it active. */ -+struct ispstat_buffer *ispstat_buf_next(struct ispstat *stat) -+{ -+ unsigned long flags; -+ struct ispstat_buffer *found = NULL; -+ int i; -+ -+ spin_lock_irqsave(&stat->lock, flags); -+ -+ if (stat->active_buf) { -+ dev_dbg(stat->dev, "%s: new buffer requested without queuing " -+ "active one.\n", stat->tag); -+ return stat->active_buf; -+ } -+ -+ for (i = 0; i < stat->nbufs; i++) { -+ struct ispstat_buffer *curr = &stat->buf[i]; -+ -+ /* -+ * Don't select the buffer which is being copied to -+ * userspace. -+ */ -+ if (curr == stat->locked_buf) -+ continue; -+ -+ /* Uninitialised buffer -- pick that one over anything else. */ -+ if (curr->frame_number == stat->max_frame) { -+ found = curr; -+ break; -+ } -+ -+ if (!found || -+ !greater_overflow(curr->frame_number, found->frame_number, -+ stat->max_frame)) -+ found = curr; -+ } -+ -+ stat->active_buf = found; -+ -+ spin_unlock_irqrestore(&stat->lock, flags); -+ -+ return found; -+} -+ -+/* Get buffer to userspace. */ -+static struct ispstat_buffer *ispstat_buf_find( -+ struct ispstat *stat, u32 frame_number) -+{ -+ int i; -+ -+ for (i = 0; i < stat->nbufs; i++) { -+ struct ispstat_buffer *curr = &stat->buf[i]; -+ -+ /* We cannot deal with the active buffer. */ -+ if (curr == stat->active_buf) -+ continue; -+ -+ /* Don't take uninitialised buffers. */ -+ if (curr->frame_number == stat->max_frame) -+ continue; -+ -+ /* Found correct number. */ -+ if (curr->frame_number == frame_number) -+ return curr; -+ } -+ -+ return NULL; -+} -+ -+/** -+ * ispstat_stats_available - Check for stats available of specified frame. -+ * @aewbdata: Pointer to return AE AWB statistics data -+ * -+ * Returns 0 if successful, or -1 if statistics are unavailable. -+ **/ -+struct ispstat_buffer *ispstat_buf_get(struct ispstat *stat, -+ void __user *ptr, -+ unsigned int frame_number) -+{ -+ int rval = 0; -+ unsigned long flags; -+ struct ispstat_buffer *buf; -+ -+ spin_lock_irqsave(&stat->lock, flags); -+ -+ buf = ispstat_buf_find(stat, frame_number); -+ if (!buf) { -+ spin_unlock_irqrestore(&stat->lock, flags); -+ dev_dbg(stat->dev, "%s: cannot find requested buffer. " -+ "frame_number = %d\n", stat->tag, frame_number); -+ return ERR_PTR(-EBUSY); -+ } -+ -+ stat->locked_buf = buf; -+ -+ spin_unlock_irqrestore(&stat->lock, flags); -+ -+ rval = copy_to_user((void *)ptr, -+ buf->virt_addr, -+ buf->buf_size); -+ -+ if (rval) { -+ dev_info(stat->dev, -+ "%s: failed copying %d bytes of stat data\n", -+ stat->tag, rval); -+ buf = ERR_PTR(-EFAULT); -+ ispstat_buf_release(stat); -+ } -+ -+ return buf; -+} -+ -+void ispstat_buf_release(struct ispstat *stat) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&stat->lock, flags); -+ stat->locked_buf = NULL; -+ spin_unlock_irqrestore(&stat->lock, flags); -+} -+ -+void ispstat_bufs_free(struct ispstat *stat) -+{ -+ struct isp_device *isp = dev_get_drvdata(stat->dev); -+ int i; -+ -+ for (i = 0; i < stat->nbufs; i++) { -+ struct ispstat_buffer *buf = &stat->buf[i]; -+ -+ if (!stat->dma_buf) { -+ if (!buf->iommu_addr) -+ continue; -+ -+ iommu_vfree(isp->iommu, buf->iommu_addr); -+ } else { -+ if (!buf->virt_addr) -+ continue; -+ -+ dma_free_coherent(stat->dev, stat->buf_alloc_size, -+ buf->virt_addr, buf->dma_addr); -+ } -+ buf->iommu_addr = 0; -+ buf->dma_addr = 0; -+ buf->virt_addr = NULL; -+ } -+ -+ stat->buf_alloc_size = 0; -+} -+ -+static int ispstat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) -+{ -+ struct isp_device *isp = dev_get_drvdata(stat->dev); -+ int i; -+ -+ stat->buf_alloc_size = size; -+ -+ for (i = 0; i < stat->nbufs; i++) { -+ struct ispstat_buffer *buf = &stat->buf[i]; -+ -+ WARN_ON(buf->dma_addr); -+ buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size, -+ IOMMU_FLAG); -+ if (buf->iommu_addr == 0) { -+ dev_err(stat->dev, -+ "%s stat: Can't acquire memory for " -+ "buffer %d\n", stat->tag, i); -+ ispstat_bufs_free(stat); -+ return -ENOMEM; -+ } -+ buf->virt_addr = da_to_va(isp->iommu, (u32)buf->iommu_addr); -+ buf->frame_number = stat->max_frame; -+ } -+ stat->dma_buf = 0; -+ -+ return 0; -+} -+ -+static int ispstat_bufs_alloc_dma(struct ispstat *stat, unsigned int size) -+{ -+ int i; -+ -+ /* dma_alloc_coherent() size is PAGE_ALIGNED */ -+ size = PAGE_ALIGN(size); -+ stat->buf_alloc_size = size; -+ -+ for (i = 0; i < stat->nbufs; i++) { -+ struct ispstat_buffer *buf = &stat->buf[i]; -+ -+ WARN_ON(buf->iommu_addr); -+ buf->virt_addr = dma_alloc_coherent(stat->dev, size, -+ &buf->dma_addr, GFP_KERNEL | GFP_DMA); -+ -+ if (!buf->virt_addr || !buf->dma_addr) { -+ dev_info(stat->dev, -+ "%s stat: Can't acquire memory for " -+ "DMA buffer %d\n", stat->tag, i); -+ ispstat_bufs_free(stat); -+ return -ENOMEM; -+ } -+ buf->frame_number = stat->max_frame; -+ } -+ stat->dma_buf = 1; -+ -+ return 0; -+} -+ -+void ispstat_bufs_set_size(struct ispstat *stat, unsigned int size) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&stat->lock, flags); -+ stat->buf_size = size; -+ spin_unlock_irqrestore(&stat->lock, flags); -+} -+ -+int ispstat_bufs_alloc(struct ispstat *stat, -+ unsigned int size, int dma_buf) -+{ -+ struct isp_device *isp = dev_get_drvdata(stat->dev); -+ unsigned long flags; -+ int ret = 0; -+ -+ spin_lock_irqsave(&stat->lock, flags); -+ -+ BUG_ON(stat->locked_buf != NULL); -+ -+ dma_buf = dma_buf ? 1 : 0; -+ -+ /* Are the old buffers big enough? */ -+ if ((stat->buf_alloc_size >= size) && (stat->dma_buf == dma_buf)) { -+ spin_unlock_irqrestore(&stat->lock, flags); -+ goto out; -+ } -+ -+ if (isp->running != ISP_STOPPED) { -+ dev_info(stat->dev, -+ "%s stat: trying to configure when busy\n", -+ stat->tag); -+ spin_unlock_irqrestore(&stat->lock, flags); -+ return -EBUSY; -+ } -+ -+ spin_unlock_irqrestore(&stat->lock, flags); -+ -+ ispstat_bufs_free(stat); -+ -+ if (dma_buf) -+ ret = ispstat_bufs_alloc_dma(stat, size); -+ else -+ ret = ispstat_bufs_alloc_iommu(stat, size); -+ if (ret) -+ size = 0; -+ -+out: -+ stat->active_buf = NULL; -+ -+ return ret; -+} -+ -+int ispstat_init(struct device *dev, char *tag, struct ispstat *stat, -+ unsigned int nbufs, unsigned int max_frame) -+{ -+ BUG_ON(nbufs < 2); -+ BUG_ON(max_frame < 2); -+ BUG_ON(nbufs >= max_frame); -+ -+ memset(stat, 0, sizeof(*stat)); -+ -+ stat->buf = kcalloc(nbufs, sizeof(*stat->buf), GFP_KERNEL); -+ if (!stat->buf) -+ return -ENOMEM; -+ -+ spin_lock_init(&stat->lock); -+ stat->nbufs = nbufs; -+ stat->dev = dev; -+ stat->tag = tag; -+ stat->max_frame = max_frame; -+ stat->frame_number = 1; -+ -+ return 0; -+} -+ -+void ispstat_free(struct ispstat *stat) -+{ -+ ispstat_bufs_free(stat); -+ kfree(stat->buf); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/ispstat.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispstat.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/ispstat.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/ispstat.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,71 @@ -+/* -+ * ispstat.h -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef ISPSTAT_H -+#define ISPSTAT_H -+ -+#include "isp.h" -+ -+struct ispstat_buffer { -+ unsigned long iommu_addr; -+ void *virt_addr; -+ dma_addr_t dma_addr; -+ struct timeval ts; -+ u32 config_counter; -+ u32 frame_number; -+ unsigned int buf_size; -+}; -+ -+struct ispstat { -+ spinlock_t lock; /* Lock for this struct */ -+ -+ u8 dma_buf; -+ unsigned int nbufs; -+ struct ispstat_buffer *buf; -+ unsigned int buf_size; -+ unsigned int buf_alloc_size; -+ struct ispstat_buffer *active_buf; -+ struct ispstat_buffer *locked_buf; -+ unsigned int frame_number; -+ unsigned int max_frame; -+ unsigned int config_counter; -+ -+ struct device *dev; -+ char *tag; /* ispstat instantiation tag */ -+}; -+ -+int ispstat_buf_queue(struct ispstat *stat); -+struct ispstat_buffer *ispstat_buf_next(struct ispstat *stat); -+struct ispstat_buffer *ispstat_buf_get(struct ispstat *stat, -+ void __user *ptr, -+ unsigned int frame_number); -+void ispstat_buf_release(struct ispstat *stat); -+void ispstat_bufs_free(struct ispstat *stat); -+void ispstat_bufs_set_size(struct ispstat *stat, unsigned int size); -+int ispstat_bufs_alloc(struct ispstat *stat, -+ unsigned int size, int dma_buf); -+int ispstat_init(struct device *dev, char *tag, struct ispstat *stat, -+ unsigned int nbufs, unsigned int max_frame); -+void ispstat_free(struct ispstat *stat); -+ -+#endif /* ISPSTAT_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/luma_enhance_table.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/luma_enhance_table.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/luma_enhance_table.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/luma_enhance_table.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,144 @@ -+/* -+ * luma_enhance_table.h -+ * -+ * Luminance Enhancement table values for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1047552, -+1048575, -+1047551, -+1046527, -+1045503, -+1044479, -+1043455, -+1042431, -+1041407, -+1040383, -+1039359, -+1038335, -+1037311, -+1036287, -+1035263, -+1034239, -+1033215, -+1032191, -+1031167, -+1030143, -+1028096, -+1028096, -+1028096, -+1028096, -+1028096, -+1028096, -+1028096, -+1028096, -+1028096, -+1028096, -+1028100, -+1032196, -+1036292, -+1040388, -+1044484, -+0, -+0, -+0, -+5, -+5125, -+10245, -+15365, -+20485, -+25605, -+30720, -+30720, -+30720, -+30720, -+30720, -+30720, -+30720, -+30720, -+30720, -+30720, -+30720, -+31743, -+30719, -+29695, -+28671, -+27647, -+26623, -+25599, -+24575, -+23551, -+22527, -+21503, -+20479, -+19455, -+18431, -+17407, -+16383, -+15359, -+14335, -+13311, -+12287, -+11263, -+10239, -+9215, -+8191, -+7167, -+6143, -+5119, -+4095, -+3071, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024, -+1024 -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/Makefile linux-omap-2.6.28-nokia1/drivers/media/video/isp/Makefile ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/Makefile 2011-06-22 13:19:32.723063276 +0200 -@@ -0,0 +1,14 @@ -+# Makefile for OMAP3 ISP driver -+ -+ifdef CONFIG_ARCH_OMAP3410 -+isp-mod-objs += \ -+ isp.o ispccdc.o -+else -+isp-mod-objs += \ -+ isp.o ispccdc.o \ -+ isppreview.o ispresizer.o isph3a.o isphist.o isp_af.o ispcsi2.o \ -+ ispstat.o -+endif -+ -+obj-$(CONFIG_VIDEO_OMAP3) += isp-mod.o -+obj-$(CONFIG_VIDEO_OMAP3) += omap_previewer_hack.o -\ No newline at end of file -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/noise_filter_table.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/noise_filter_table.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/noise_filter_table.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/noise_filter_table.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,79 @@ -+/* -+ * noise_filter_table.h -+ * -+ * Noise Filter Table values for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+16, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31, -+31 -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/omap_previewer_hack.c linux-omap-2.6.28-nokia1/drivers/media/video/isp/omap_previewer_hack.c ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/omap_previewer_hack.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/omap_previewer_hack.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,1247 @@ -+/* -+ * drivers/media/video/isp/omap_previewer.c -+ * -+ * Wrapper for Preview module in TI's OMAP3430 ISP -+ * -+ * Copyright (C) 2008 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "isp.h" -+#include "ispreg.h" -+#include "isppreview.h" -+ -+#define PREV_IOC_BASE 'P' -+#define PREV_REQBUF _IOWR(PREV_IOC_BASE, 1,\ -+ struct v4l2_requestbuffers) -+#define PREV_QUERYBUF _IOWR(PREV_IOC_BASE, 2,\ -+ struct v4l2_buffer) -+#define PREV_SET_PARAM _IOW(PREV_IOC_BASE, 3,\ -+ struct prev_params) -+#define PREV_PREVIEW _IOR(PREV_IOC_BASE, 5, int) -+#define PREV_GET_CROPSIZE _IOR(PREV_IOC_BASE, 7,\ -+ struct prev_cropsize) -+#define PREV_QUEUEBUF _IOWR(PREV_IOC_BASE, 8,\ -+ struct v4l2_buffer) -+#define PREV_IOC_MAXNR 8 -+ -+#define MAX_IMAGE_WIDTH 3300 -+ -+#define PREV_INWIDTH_8BIT 0 /* pixel width of 8 bitS */ -+#define PREV_INWIDTH_10BIT 1 /* pixel width of 10 bits */ -+ -+#define PREV_32BYTES_ALIGN_MASK 0xFFFFFFE0 -+#define PREV_16PIX_ALIGN_MASK 0xFFFFFFF0 -+ -+/* list of structures */ -+ -+/* device structure keeps track of global information */ -+struct prev_device { -+ unsigned char opened; /* state of the device */ -+ struct completion wfc; -+ struct mutex prevwrap_mutex; -+ /* spinlock for in/out videbuf queue */ -+ spinlock_t inout_vbq_lock; -+ /* spinlock for lsc videobuf queues */ -+ spinlock_t lsc_vbq_lock; -+ struct videobuf_queue_ops vbq_ops; /* videobuf queue operations */ -+ dma_addr_t isp_addr_read; /* Input/Output address */ -+ dma_addr_t isp_addr_lsc; /* lsc address */ -+ struct device *isp; -+ struct prev_size_params size_params; -+}; -+ -+/* per-filehandle data structure */ -+struct prev_fh { -+ /* in/out videobuf queue */ -+ enum v4l2_buf_type inout_type; -+ struct videobuf_queue inout_vbq; -+ /* lsc videobuf queue */ -+ enum v4l2_buf_type lsc_type; -+ struct videobuf_queue lsc_vbq; -+ /* device structure */ -+ struct prev_device *device; -+}; -+ -+#define OMAP_PREV_NAME "omap-previewer" /* "omap3hack" */ -+ -+#define BIT_SET(var,shift,mask,val) \ -+ do { \ -+ var = (var & ~(mask << shift)) \ -+ | (val << shift); \ -+ } while (0) -+ -+ -+#define ISP_CTRL_SBL_SHARED_RPORTB (1 << 28) -+#define ISP_CTRL_SBL_SHARED_RPORTA (1 << 27) -+#define SBL_SHARED_RPORTB 28 -+#define SBL_RD_RAM_EN 18 -+ -+/* structure to know crop size */ -+struct prev_cropsize { -+ int hcrop; -+ int vcrop; -+}; -+ -+static struct isp_interface_config prevwrap_config = { -+ .ccdc_par_ser = ISP_NONE, -+ .dataline_shift = 0, -+ .hsvs_syncdetect = ISPCTRL_SYNC_DETECT_VSRISE, -+ .strobe = 0, -+ .prestrobe = 0, -+ .shutter = 0, -+ .wait_hs_vs = 0, -+}; -+ -+static u32 isp_ctrl; -+static int prev_major = -1; -+static struct device *prev_dev; -+static struct class *prev_class; -+static struct prev_device *prevdevice; -+static struct platform_driver omap_previewer_driver; -+static u32 prev_bufsize; -+static u32 lsc_bufsize; -+ -+/** -+ * prev_calculate_crop - Calculate crop size according to device parameters -+ * @device: Structure containing ISP preview wrapper global information -+ * @crop: Structure containing crop size -+ * -+ * This function is used to calculate frame size reduction depending on -+ * the features enabled by the application. -+ **/ -+static int prev_calculate_crop(struct prev_device *device, -+ struct prev_cropsize *crop) -+{ -+ struct isp_device *isp = dev_get_drvdata(device->isp); -+ int ret; -+ struct isp_pipeline pipe; -+ -+ pipe.ccdc_out_w = pipe.ccdc_out_w_img = -+ device->size_params.hsize; -+ pipe.ccdc_out_h = device->size_params.vsize; -+ -+ ret = isppreview_try_pipeline(&isp->isp_prev, &pipe); -+ -+ crop->hcrop = pipe.prv_out_w; -+ crop->vcrop = pipe.prv_out_h; -+ -+ return ret; -+} -+ -+/** -+ * prev_hw_setup - Stores the desired configuration in the proper HW registers -+ * @config: Structure containing the desired configuration for ISP preview -+ * module. -+ * -+ * Reads the structure sent, and modifies the desired registers. -+ * -+ * Always returns 0. -+ **/ -+static int prev_hw_setup(struct prev_params *config) -+{ -+ struct prev_device *device = prevdevice; -+ struct isp_device *isp = dev_get_drvdata(device->isp); -+ struct isp_prev_device *isp_prev = &isp->isp_prev; -+ -+ if (config->features & PREV_AVERAGER) -+ isppreview_config_averager(isp_prev, config->average); -+ else -+ isppreview_config_averager(isp_prev, 0); -+ -+ if (config->features & PREV_INVERSE_ALAW) -+ isppreview_enable_invalaw(isp_prev, 1); -+ else -+ isppreview_enable_invalaw(isp_prev, 0); -+ -+ if (config->features & PREV_HORZ_MEDIAN_FILTER) { -+ isppreview_config_hmed(isp_prev, config->hmf_params); -+ isppreview_enable_hmed(isp_prev, 1); -+ } else -+ isppreview_enable_hmed(isp_prev, 0); -+ -+ if (config->features & PREV_DARK_FRAME_SUBTRACT) { -+ isppreview_set_darkaddr(isp_prev, config->drkf_params.addr); -+ isppreview_config_darklineoffset(isp_prev, -+ config->drkf_params.offset); -+ isppreview_enable_drkframe(isp_prev, 1); -+ } else -+ isppreview_enable_drkframe(isp_prev, 0); -+ -+ if (config->features & PREV_LENS_SHADING) { -+ isppreview_config_drkf_shadcomp(isp_prev, -+ config->lens_shading_shift); -+ isppreview_enable_shadcomp(isp_prev, 1); -+ } else -+ isppreview_enable_shadcomp(isp_prev, 0); -+ -+ return 0; -+} -+ -+/** -+ * prev_validate_params - Validate configuration parameters for Preview Wrapper -+ * @params: Structure containing configuration parameters -+ * -+ * Validate configuration parameters for Preview Wrapper -+ * -+ * Returns 0 if successful, or -EINVAL if a parameter value is invalid. -+ **/ -+static int prev_validate_params(struct prev_params *params) -+{ -+ if (!params) { -+ dev_err(prev_dev, "validate_params: error in argument"); -+ goto err_einval; -+ } -+ -+ if ((params->features & PREV_AVERAGER) == PREV_AVERAGER) { -+ if ((params->average != NO_AVE) -+ && (params->average != AVE_2_PIX) -+ && (params->average != AVE_4_PIX) -+ && (params->average != AVE_8_PIX)) { -+ dev_err(prev_dev, "validate_params: wrong pix average\n"); -+ goto err_einval; -+ } else if (((params->average == AVE_2_PIX) -+ && (params->size_params.hsize % 2)) -+ || ((params->average == AVE_4_PIX) -+ && (params->size_params.hsize % 4)) -+ || ((params->average == AVE_8_PIX) -+ && (params->size_params.hsize % 8))) { -+ dev_err(prev_dev, "validate_params: " -+ "wrong pix average for input size\n"); -+ goto err_einval; -+ } -+ } -+ -+ if ((params->size_params.pixsize != PREV_INWIDTH_8BIT) -+ && (params->size_params.pixsize -+ != PREV_INWIDTH_10BIT)) { -+ dev_err(prev_dev, "validate_params: wrong pixsize\n"); -+ goto err_einval; -+ } -+ -+ if (params->size_params.hsize > MAX_IMAGE_WIDTH -+ || params->size_params.hsize < 0) { -+ dev_err(prev_dev, "validate_params: wrong hsize\n"); -+ goto err_einval; -+ } -+ -+ if ((params->pix_fmt != YCPOS_YCrYCb) -+ && (YCPOS_YCbYCr != params->pix_fmt) -+ && (YCPOS_CbYCrY != params->pix_fmt) -+ && (YCPOS_CrYCbY != params->pix_fmt)) { -+ dev_err(prev_dev, "validate_params: wrong pix_fmt"); -+ goto err_einval; -+ } -+ -+ if ((params->features & PREV_DARK_FRAME_SUBTRACT) -+ && (params->features -+ & PREV_DARK_FRAME_CAPTURE)) { -+ dev_err(prev_dev, "validate_params: DARK FRAME CAPTURE and " -+ "SUBSTRACT cannot be enabled " -+ "at same time\n"); -+ goto err_einval; -+ } -+#if 0 -+ if (params->features & PREV_DARK_FRAME_SUBTRACT) { -+ /* Is it truth place ??? */ -+ -+ if (!params->drkf_params.addr -+ || (params->drkf_params.offset % 32)) { -+ dev_err(prev_dev, "validate_params: dark frame address\n"); -+ goto err_einval; -+ } -+ } -+ -+ if (params->features & PREV_LENS_SHADING) -+ if ((params->lens_shading_shift > 7) -+ || !params->drkf_params.addr -+ || (params->drkf_params.offset % 32)) { -+ dev_err(prev_dev, "validate_params: lens shading shift\n"); -+ goto err_einval; -+ } -+#endif -+ if ((params->size_params.in_pitch <= 0) -+ || (params->size_params.in_pitch % 32)) { -+ params->size_params.in_pitch = -+ (params->size_params.hsize * 2) & 0xFFE0; -+ dev_err(prev_dev, "Error in in_pitch; new value = %d\n", -+ params->size_params.in_pitch); -+ } -+ -+ return 0; -+err_einval: -+ return -EINVAL; -+} -+ -+/** -+ * preview_isr - Callback from ISP driver for ISP Preview Interrupt -+ * @status: ISP IRQ0STATUS register value -+ * @arg1: Structure containing ISP preview wrapper global information -+ * @arg2: Currently not used -+ **/ -+static void prev_isr(unsigned long status, isp_vbq_callback_ptr arg1, -+ void *arg2) -+{ -+ struct prev_device *device = (struct prev_device *)arg1; -+ -+ if ((status & PREV_DONE) != PREV_DONE) -+ return; -+ -+ if (device) -+ complete(&device->wfc); -+} -+ -+/* -+ * Set shared ports for using dark frame (lens shading) -+ */ -+static void prev_set_isp_ctrl(u16 mode) -+{ -+ struct prev_device *device = prevdevice; -+ u32 val; -+ -+ val = isp_reg_readl(device->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); -+ -+ isp_ctrl = val; -+ -+ /* Read port used by preview module data read */ -+ val &= ~ISP_CTRL_SBL_SHARED_RPORTA; -+ -+ if (mode & (PREV_DARK_FRAME_SUBTRACT | PREV_LENS_SHADING)) { -+ /* Read port used by preview module dark frame read */ -+ val &= ~ISP_CTRL_SBL_SHARED_RPORTB; -+ } -+ -+ BIT_SET(val, SBL_RD_RAM_EN, 0x1, 0x1); -+ -+ /* write ISP CTRL register */ -+ isp_reg_writel(device->isp, val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); -+} -+ -+/* -+ * Set old isp shared port configuration -+ */ -+static void prev_unset_isp_ctrl(void) -+{ -+ struct prev_device *device = prevdevice; -+ u32 val; -+ -+ val = isp_reg_readl(device->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); -+ -+ if (isp_ctrl & ISP_CTRL_SBL_SHARED_RPORTB) -+ val |= ISP_CTRL_SBL_SHARED_RPORTB; -+ -+ if (isp_ctrl & ISP_CTRL_SBL_SHARED_RPORTA) -+ val |= ISP_CTRL_SBL_SHARED_RPORTA; -+ -+ if (isp_ctrl & (1 << SBL_RD_RAM_EN)) -+ val &= ~(1 << SBL_RD_RAM_EN); -+ -+ /* write ISP CTRL register */ -+ isp_reg_writel(device->isp, val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); -+} -+ -+static void isp_enable_interrupts(struct device *dev, int is_raw) -+{ -+ isp_reg_writel(dev, IRQ0ENABLE_PRV_DONE_IRQ, -+ OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE); -+} -+ -+/** -+ * prev_do_preview - Performs the Preview process -+ * @device: Structure containing ISP preview wrapper global information -+ * @arg: Currently not used -+ * -+ * Returns 0 if successful, or -EINVAL if the sent parameters are invalid. -+ **/ -+static int prev_do_preview(struct prev_device *device) -+{ -+ struct isp_device *isp = dev_get_drvdata(device->isp); -+ struct isp_prev_device *isp_prev = &isp->isp_prev; -+ struct prev_params *config = &isp_prev->params; -+ int bpp, size; -+ int ret = 0; -+ struct isp_pipeline pipe; -+ -+ memset(&pipe, 0, sizeof(pipe)); -+ pipe.pix.pixelformat = V4L2_PIX_FMT_UYVY; -+ -+ prev_set_isp_ctrl(config->features); -+ -+ if (device->size_params.pixsize == PREV_INWIDTH_8BIT) -+ bpp = 1; -+ else -+ bpp = 2; -+ -+ size = device->size_params.hsize * -+ device->size_params.vsize * bpp; -+ -+ pipe.prv_in = PRV_RAW_MEM; -+ pipe.prv_out = PREVIEW_MEM; -+ -+ isppreview_set_skip(isp_prev, 2, 0); -+ -+ pipe.ccdc_out_w = pipe.ccdc_out_w_img -+ = device->size_params.hsize; -+ pipe.ccdc_out_h = device->size_params.vsize & ~0xf; -+ -+ ret = isppreview_try_pipeline(&isp->isp_prev, &pipe); -+ if (ret) { -+ dev_err(prev_dev, "ERROR while try size!\n"); -+ goto out; -+ } -+ -+ ret = isppreview_s_pipeline(isp_prev, &pipe); -+ if (ret) { -+ dev_err(prev_dev, "ERROR while config size!\n"); -+ goto out; -+ } -+ -+ ret = isppreview_config_inlineoffset(isp_prev, pipe.prv_out_w * bpp); -+ if (ret) { -+ dev_err(prev_dev, "ERROR while config inline offset!\n"); -+ goto out; -+ } -+ -+ ret = isppreview_config_outlineoffset(isp_prev, -+ pipe.prv_out_w * bpp - 32); -+ if (ret) { -+ dev_err(prev_dev, "ERROR while config outline offset!\n"); -+ goto out; -+ } -+ -+ config->drkf_params.addr = device->isp_addr_lsc; -+ -+ prev_hw_setup(config); -+ -+ ret = isppreview_set_inaddr(isp_prev, device->isp_addr_read); -+ if (ret) { -+ dev_err(prev_dev, "ERROR while set read addr!\n"); -+ goto out; -+ } -+ -+ ret = isppreview_set_outaddr(isp_prev, device->isp_addr_read); -+ if (ret) { -+ dev_err(prev_dev, "ERROR while set write addr!\n"); -+ goto out; -+ } -+ -+ ret = isp_set_callback(device->isp, CBK_PREV_DONE, prev_isr, -+ (void *)device, (void *)NULL); -+ if (ret) { -+ dev_err(prev_dev, "ERROR while setting Previewer callback!\n"); -+ goto out; -+ } -+ isp_configure_interface(device->isp, &prevwrap_config); -+ -+ isp_start(device->isp); -+ -+ isp_enable_interrupts(device->isp, 0); -+ -+ isppreview_enable(isp_prev, 1); -+ -+ wait_for_completion_interruptible(&device->wfc); -+ -+#if 0 -+ if (device->isp_addr_read) { -+ ispmmu_vunmap(device->isp_addr_read); -+ device->isp_addr_read = 0; -+ } -+#endif -+ -+ ret = isp_unset_callback(device->isp, CBK_PREV_DONE); -+ -+ prev_unset_isp_ctrl(); -+ -+ isp_stop(device->isp); -+ -+out: -+ return ret; -+} -+ -+static int previewer_vb_lock_vma(struct videobuf_buffer *vb, int lock) -+{ -+ unsigned long start, end; -+ struct vm_area_struct *vma; -+ int rval = 0; -+ -+ if (vb->memory == V4L2_MEMORY_MMAP) -+ return 0; -+ -+ end = vb->baddr + vb->bsize; -+ -+ down_write(¤t->mm->mmap_sem); -+ spin_lock(¤t->mm->page_table_lock); -+ for (start = vb->baddr; ; ) { -+ unsigned int newflags; -+ -+ vma = find_vma(current->mm, start); -+ if (!vma || vma->vm_start > start) { -+ rval = -ENOMEM; -+ goto out; -+ } -+ -+ newflags = vma->vm_flags | VM_LOCKED; -+ if (!lock) -+ newflags &= ~VM_LOCKED; -+ -+ vma->vm_flags = newflags; -+ -+ if (vma->vm_end >= end) -+ break; -+ -+ start = vma->vm_end; -+ } -+ -+out: -+ spin_unlock(¤t->mm->page_table_lock); -+ up_write(¤t->mm->mmap_sem); -+ return rval; -+} -+ -+/** -+ * previewer_vbq_release - Videobuffer queue release -+ * @q: Structure containing the videobuffer queue. -+ * @vb: Structure containing the videobuffer used for previewer processing. -+ **/ -+static void previewer_vbq_release(struct videobuf_queue *q, -+ struct videobuf_buffer *vb) -+{ -+ struct prev_fh *fh = q->priv_data; -+ struct prev_device *device = fh->device; -+ -+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { -+ ispmmu_vunmap(device->isp, device->isp_addr_read); -+ device->isp_addr_read = 0; -+ spin_lock(&device->inout_vbq_lock); -+ vb->state = VIDEOBUF_NEEDS_INIT; -+ spin_unlock(&device->inout_vbq_lock); -+ } else if (q->type == V4L2_BUF_TYPE_PRIVATE) { -+ ispmmu_vunmap(device->isp, device->isp_addr_lsc); -+ device->isp_addr_lsc = 0; -+ spin_lock(&device->lsc_vbq_lock); -+ vb->state = VIDEOBUF_NEEDS_INIT; -+ spin_unlock(&device->lsc_vbq_lock); -+ } -+ -+ previewer_vb_lock_vma(vb, 0); -+ if (vb->memory != V4L2_MEMORY_MMAP) { -+ videobuf_dma_unmap(q, videobuf_to_dma(vb)); -+ videobuf_dma_free(videobuf_to_dma(vb)); -+ } -+ -+} -+ -+/** -+ * previewer_vbq_setup - Sets up the videobuffer size and validates count. -+ * @q: Structure containing the videobuffer queue. -+ * @cnt: Number of buffers requested -+ * @size: Size in bytes of the buffer used for previewing -+ * -+ * Always returns 0. -+ **/ -+static int previewer_vbq_setup(struct videobuf_queue *q, -+ unsigned int *cnt, -+ unsigned int *size) -+{ -+ struct prev_fh *fh = q->priv_data; -+ struct prev_device *device = fh->device; -+ u32 bpp = 1; -+ -+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { -+ spin_lock(&device->inout_vbq_lock); -+ -+ if (*cnt <= 0) -+ *cnt = 1; -+ -+ if (*cnt > VIDEO_MAX_FRAME) -+ *cnt = VIDEO_MAX_FRAME; -+ -+ if (!device->size_params.hsize || -+ !device->size_params.vsize) { -+ dev_err(prev_dev, "Can't setup inout buffer size\n"); -+ spin_unlock(&device->inout_vbq_lock); -+ return -EINVAL; -+ } -+ -+ if (device->size_params.pixsize == PREV_INWIDTH_10BIT) -+ bpp = 2; -+ -+ *size = prev_bufsize = bpp * device->size_params.hsize -+ * device->size_params.vsize; -+ spin_unlock(&device->inout_vbq_lock); -+ -+ } else if (q->type == V4L2_BUF_TYPE_PRIVATE) { -+ spin_lock(&device->lsc_vbq_lock); -+ if (*cnt <= 0) -+ *cnt = 1; -+ -+ if (*cnt > 1) -+ *cnt = 1; -+ -+ if (!device->size_params.hsize || -+ !device->size_params.vsize) { -+ dev_err(prev_dev, "Can't setup lsc buffer size\n"); -+ spin_unlock(&device->lsc_vbq_lock); -+ return -EINVAL; -+ } -+ -+ /* upsampled lsc table size - for now bpp = 2 */ -+ bpp = 2; -+ *size = lsc_bufsize = bpp * device->size_params.hsize * -+ device->size_params.vsize; -+ -+ spin_unlock(&device->lsc_vbq_lock); -+ } else { -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+/** -+ * previewer_vbq_prepare - Videobuffer is prepared and mmapped. -+ * @q: Structure containing the videobuffer queue. -+ * @vb: Structure containing the videobuffer used for previewer processing. -+ * @field: Type of field to set in videobuffer device. -+ * -+ * Returns 0 if successful, or -EINVAL if buffer couldn't get allocated, or -+ * -EIO if the ISP MMU mapping fails -+ **/ -+static int previewer_vbq_prepare(struct videobuf_queue *q, -+ struct videobuf_buffer *vb, -+ enum v4l2_field field) -+{ -+ struct prev_fh *fh = q->priv_data; -+ struct prev_device *device = fh->device; -+ int err = -EINVAL; -+ unsigned int isp_addr; -+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb); -+ -+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { -+ -+ spin_lock(&device->inout_vbq_lock); -+ -+ if (vb->baddr) { -+ vb->size = prev_bufsize; -+ vb->bsize = prev_bufsize; -+ } else { -+ spin_unlock(&device->inout_vbq_lock); -+ dev_err(prev_dev, "No user buffer allocated\n"); -+ goto out; -+ } -+ -+ vb->width = device->size_params.hsize; -+ vb->height = device->size_params.vsize; -+ vb->field = field; -+ spin_unlock(&device->inout_vbq_lock); -+ -+ if (vb->state == VIDEOBUF_NEEDS_INIT) { -+ err = previewer_vb_lock_vma(vb, 1); -+ if (err) -+ goto buf_init_err1; -+ -+ err = videobuf_iolock(q, vb, NULL); -+ if (err) -+ goto buf_init_err1; -+ -+ isp_addr = ispmmu_vmap(device->isp, -+ dma->sglist, dma->sglen); -+ if (!isp_addr) -+ err = -EIO; -+ else -+ device->isp_addr_read = isp_addr; -+ } -+ -+buf_init_err1: -+ if (!err) -+ vb->state = VIDEOBUF_PREPARED; -+ else -+ previewer_vbq_release(q, vb); -+ -+ } else if (q->type == V4L2_BUF_TYPE_PRIVATE) { -+ -+ spin_lock(&device->lsc_vbq_lock); -+ -+ if (vb->baddr) { -+ vb->size = lsc_bufsize; -+ vb->bsize = lsc_bufsize; -+ } else { -+ spin_unlock(&device->lsc_vbq_lock); -+ dev_err(prev_dev, "No user buffer allocated\n"); -+ goto out; -+ } -+ -+ vb->width = device->size_params.hsize; -+ vb->height = device->size_params.vsize; -+ vb->field = field; -+ spin_unlock(&device->lsc_vbq_lock); -+ -+ if (vb->state == VIDEOBUF_NEEDS_INIT) { -+ err = previewer_vb_lock_vma(vb, 1); -+ if (err) -+ goto buf_init_err2; -+ -+ err = videobuf_iolock(q, vb, NULL); -+ if (err) -+ goto buf_init_err2; -+ -+ isp_addr = ispmmu_vmap(device->isp, -+ dma->sglist, dma->sglen); -+ if (!isp_addr) -+ err = -EIO; -+ else -+ device->isp_addr_lsc = isp_addr; -+ } -+ -+buf_init_err2: -+ if (!err) -+ vb->state = VIDEOBUF_PREPARED; -+ else -+ previewer_vbq_release(q, vb); -+ -+ } else { -+ return -EINVAL; -+ } -+ -+out: -+ return err; -+} -+ -+static void previewer_vbq_queue(struct videobuf_queue *q, -+ struct videobuf_buffer *vb) -+{ -+ return; -+} -+ -+/** -+ * previewer_open - Initializes and opens the Preview Wrapper -+ * @inode: Inode structure associated with the Preview Wrapper -+ * @filp: File structure associated with the Preview Wrapper -+ * -+ * Returns 0 if successful, -EACCES if its unable to initialize default config, -+ * -EBUSY if its already opened or the ISP module is not available, or -ENOMEM -+ * if its unable to allocate the device in kernel space memory. -+ **/ -+static int previewer_open(struct inode *inode, struct file *filp) -+{ -+ int ret = 0; -+ struct prev_device *device = prevdevice; -+ struct prev_fh *fh; -+ struct device *isp; -+ struct isp_device *isp_dev; -+ -+ if (device->opened || (filp->f_flags & O_NONBLOCK)) { -+ dev_err(prev_dev, "previewer_open: device is already " -+ "opened\n"); -+ return -EBUSY; -+ } -+ -+ fh = kzalloc(sizeof(struct prev_fh), GFP_KERNEL); -+ if (NULL == fh) { -+ ret = -ENOMEM; -+ goto err_fh; -+ } -+ -+ isp = isp_get(); -+ if (!isp) { -+ printk(KERN_ERR "Can't enable ISP clocks (ret %d)\n", ret); -+ ret = -EACCES; -+ goto err_isp; -+ } -+ device->isp = isp; -+ isp_dev = dev_get_drvdata(isp); -+ -+ ret = isppreview_request(&isp_dev->isp_prev); -+ if (ret) { -+ dev_err(prev_dev, "Can't acquire isppreview\n"); -+ goto err_prev; -+ } -+ -+ device->opened = 1; -+ -+ filp->private_data = fh; -+ fh->inout_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ fh->lsc_type = V4L2_BUF_TYPE_PRIVATE; -+ fh->device = device; -+ -+ videobuf_queue_sg_init(&fh->inout_vbq, &device->vbq_ops, NULL, -+ &device->inout_vbq_lock, fh->inout_type, -+ V4L2_FIELD_NONE, -+ sizeof(struct videobuf_buffer), fh); -+ -+ videobuf_queue_sg_init(&fh->lsc_vbq, &device->vbq_ops, NULL, -+ &device->lsc_vbq_lock, fh->lsc_type, -+ V4L2_FIELD_NONE, -+ sizeof(struct videobuf_buffer), fh); -+ -+ init_completion(&device->wfc); -+ device->wfc.done = 0; -+ mutex_init(&device->prevwrap_mutex); -+ -+ return 0; -+ -+err_prev: -+ isp_put(); -+err_isp: -+ kfree(fh); -+err_fh: -+ return ret; -+} -+ -+/** -+ * previewer_release - Releases Preview Wrapper and frees up allocated memory -+ * @inode: Inode structure associated with the Preview Wrapper -+ * @filp: File structure associated with the Preview Wrapper -+ * -+ * Always returns 0. -+ **/ -+static int previewer_release(struct inode *inode, struct file *filp) -+{ -+ struct prev_fh *fh = filp->private_data; -+ struct prev_device *device = fh->device; -+ struct videobuf_queue *q1 = &fh->inout_vbq; -+ struct videobuf_queue *q2 = &fh->lsc_vbq; -+ struct isp_device *isp = dev_get_drvdata(device->isp); -+ -+ device->opened = 0; -+ videobuf_mmap_free(q1); -+ videobuf_mmap_free(q2); -+ isppreview_free(&isp->isp_prev); -+ isp_put(); -+ prev_bufsize = 0; -+ lsc_bufsize = 0; -+ filp->private_data = NULL; -+ kfree(fh); -+ -+ return 0; -+} -+ -+/** -+ * previewer_mmap - Memory maps the Preview Wrapper module. -+ * @file: File structure associated with the Preview Wrapper -+ * @vma: Virtual memory area structure. -+ * -+ * Returns 0 if successful, or returned value by the videobuf_mmap_mapper() -+ * function. -+ **/ -+static int previewer_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+/* -+ struct prev_fh *fh = file->private_data; -+ -+ dev_dbg(prev_dev, "previewer_mmap\n"); -+ -+ return videobuf_mmap_mapper(&fh->inout_vbq, vma); -+*/ -+ return -EINVAL; -+} -+ -+#define COPY_USERTABLE(dst, src, size) \ -+ if (src) { \ -+ if (!dst) \ -+ return -EACCES; \ -+ if (copy_from_user(dst, src, (size) * sizeof(*(dst)))) \ -+ return -EFAULT; \ -+ } -+ -+/* Copy preview module configuration into use */ -+static int previewer_set_param(struct prev_device *device, -+ struct prev_params __user *uparams) -+{ -+ struct isp_device *isp_dev = dev_get_drvdata(device->isp); -+ struct prev_params *config = &isp_dev->isp_prev.params; -+ /* Here it should be safe to allocate 420 bytes from stack */ -+ struct prev_params p; -+ struct prev_params *params = &p; -+ int ret; -+ -+ if (copy_from_user(params, uparams, sizeof(*params))) -+ return -EFAULT; -+ ret = prev_validate_params(params); -+ if (ret < 0) -+ return -EINVAL; -+ -+ config->features = params->features; -+ config->pix_fmt = params->pix_fmt; -+ config->cfa.cfafmt = params->cfa.cfafmt; -+ -+ /* struct ispprev_cfa */ -+ config->cfa.cfa_gradthrs_vert = params->cfa.cfa_gradthrs_vert; -+ config->cfa.cfa_gradthrs_horz = params->cfa.cfa_gradthrs_horz; -+ COPY_USERTABLE(config->cfa.cfa_table, params->cfa.cfa_table, -+ ISPPRV_CFA_TBL_SIZE); -+ -+ /* struct ispprev_csup csup */ -+ config->csup.gain = params->csup.gain; -+ config->csup.thres = params->csup.thres; -+ config->csup.hypf_en = params->csup.hypf_en; -+ -+ COPY_USERTABLE(config->ytable, params->ytable, ISPPRV_YENH_TBL_SIZE); -+ -+ /* struct ispprev_nf nf */ -+ config->nf.spread = params->nf.spread; -+ memcpy(&config->nf.table, ¶ms->nf.table, sizeof(config->nf.table)); -+ -+ /* struct ispprev_dcor dcor */ -+ config->dcor.couplet_mode_en = params->dcor.couplet_mode_en; -+ memcpy(&config->dcor.detect_correct, ¶ms->dcor.detect_correct, -+ sizeof(config->dcor.detect_correct)); -+ -+ /* struct ispprev_gtable gtable */ -+ COPY_USERTABLE(config->gtable.redtable, params->gtable.redtable, -+ ISPPRV_GAMMA_TBL_SIZE); -+ COPY_USERTABLE(config->gtable.greentable, params->gtable.greentable, -+ ISPPRV_GAMMA_TBL_SIZE); -+ COPY_USERTABLE(config->gtable.bluetable, params->gtable.bluetable, -+ ISPPRV_GAMMA_TBL_SIZE); -+ -+ /* struct ispprev_wbal wbal */ -+ config->wbal.dgain = params->wbal.dgain; -+ config->wbal.coef3 = params->wbal.coef3; -+ config->wbal.coef2 = params->wbal.coef2; -+ config->wbal.coef1 = params->wbal.coef1; -+ config->wbal.coef0 = params->wbal.coef0; -+ -+ /* struct ispprev_blkadj blk_adj */ -+ config->blk_adj.red = params->blk_adj.red; -+ config->blk_adj.green = params->blk_adj.green; -+ config->blk_adj.blue = params->blk_adj.blue; -+ -+ /* struct ispprev_rgbtorgb rgb2rgb */ -+ memcpy(&config->rgb2rgb.matrix, ¶ms->rgb2rgb.matrix, -+ sizeof(config->rgb2rgb.matrix)); -+ memcpy(&config->rgb2rgb.offset, ¶ms->rgb2rgb.offset, -+ sizeof(config->rgb2rgb.offset)); -+ -+ /* struct ispprev_csc rgb2ycbcr */ -+ memcpy(&config->rgb2ycbcr.matrix, ¶ms->rgb2ycbcr.matrix, -+ sizeof(config->rgb2ycbcr.matrix)); -+ memcpy(&config->rgb2ycbcr.offset, ¶ms->rgb2ycbcr.offset, -+ sizeof(config->rgb2ycbcr.offset)); -+ -+ /* struct ispprev_hmed hmf_params */ -+ config->hmf_params.odddist = params->hmf_params.odddist; -+ config->hmf_params.evendist = params->hmf_params.evendist; -+ config->hmf_params.thres = params->hmf_params.thres; -+ -+ /* struct prev_darkfrm_params drkf_params not set here */ -+ -+ config->lens_shading_shift = params->lens_shading_shift; -+ config->average = params->average; -+ config->contrast = params->contrast; -+ config->brightness = params->brightness; -+ -+ device->size_params = params->size_params; -+ -+ return 0; -+} -+ -+/** -+ * previewer_ioctl - I/O control function for Preview Wrapper -+ * @inode: Inode structure associated with the Preview Wrapper. -+ * @file: File structure associated with the Preview Wrapper. -+ * @cmd: Type of command to execute. -+ * @arg: Argument to send to requested command. -+ * -+ * Returns 0 if successful, -1 if bad command passed or access is denied, -+ * -EFAULT if copy_from_user() or copy_to_user() fails, -EINVAL if parameter -+ * validation fails or parameter structure is not present -+ **/ -+static int previewer_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ int ret = 0; -+ struct prev_fh *fh = file->private_data; -+ struct prev_device *device = fh->device; -+ -+ switch (cmd) { -+ case PREV_REQBUF: { -+ struct v4l2_requestbuffers req; -+ -+ if (copy_from_user(&req, (void *)arg, sizeof(req))) -+ goto err_efault; -+ -+ if (mutex_lock_interruptible(&device->prevwrap_mutex)) -+ goto err_eintr; -+ -+ if (req.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ ret = videobuf_reqbufs(&fh->inout_vbq, &req); -+ else if (req.type == V4L2_BUF_TYPE_PRIVATE) -+ ret = videobuf_reqbufs(&fh->lsc_vbq, &req); -+ else -+ ret = -EINVAL; -+ -+ mutex_unlock(&device->prevwrap_mutex); -+ -+ if (ret) -+ goto out; -+ -+ if (copy_to_user((void *)arg, &req, sizeof(req))) -+ goto err_efault; -+ -+ break; -+ } -+ -+ case PREV_QUERYBUF: { -+ struct v4l2_buffer b; -+ -+ if (copy_from_user(&b, (void *)arg, sizeof(b))) -+ goto err_efault; -+ -+ if (mutex_lock_interruptible(&device->prevwrap_mutex)) -+ goto err_eintr; -+ -+ if (b.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ ret = videobuf_querybuf(&fh->inout_vbq, &b); -+ else if (b.type == V4L2_BUF_TYPE_PRIVATE) -+ ret = videobuf_querybuf(&fh->lsc_vbq, &b); -+ else -+ ret = -EINVAL; -+ -+ mutex_unlock(&device->prevwrap_mutex); -+ -+ if (ret) -+ goto out; -+ -+ if (copy_to_user((void *)arg, &b, sizeof(b))) -+ goto err_efault; -+ -+ break; -+ } -+ -+ case PREV_QUEUEBUF: { -+ struct v4l2_buffer b; -+ -+ if (copy_from_user(&b, (void *)arg, sizeof(b))) -+ goto err_efault; -+ -+ if (mutex_lock_interruptible(&device->prevwrap_mutex)) -+ goto err_eintr; -+ -+ if (b.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ ret = videobuf_qbuf(&fh->inout_vbq, &b); -+ else if (b.type == V4L2_BUF_TYPE_PRIVATE) -+ ret = videobuf_qbuf(&fh->lsc_vbq, &b); -+ else -+ ret = -EINVAL; -+ -+ mutex_unlock(&device->prevwrap_mutex); -+ -+ if (ret) -+ goto out; -+ -+ if (copy_to_user((void *)arg, &b, sizeof(b))) -+ goto err_efault; -+ -+ break; -+ } -+ -+ case PREV_SET_PARAM: -+ if (mutex_lock_interruptible(&device->prevwrap_mutex)) -+ goto err_eintr; -+ ret = previewer_set_param(device, (struct prev_params *)arg); -+ mutex_unlock(&device->prevwrap_mutex); -+ break; -+ -+ case PREV_PREVIEW: -+ if (mutex_lock_interruptible(&device->prevwrap_mutex)) -+ goto err_eintr; -+ ret = prev_do_preview(device); -+ mutex_unlock(&device->prevwrap_mutex); -+ break; -+ -+ case PREV_GET_CROPSIZE: { -+ struct prev_cropsize outputsize; -+ -+ memset(&outputsize, 0, sizeof(outputsize)); -+ ret = prev_calculate_crop(device, &outputsize); -+ if (ret) -+ break; -+ -+ if (copy_to_user((struct prev_cropsize *)arg, &outputsize, -+ sizeof(struct prev_cropsize))) -+ ret = -EFAULT; -+ break; -+ } -+ -+ default: -+ dev_err(prev_dev, "previewer_ioctl: Invalid Command Value\n"); -+ ret = -EINVAL; -+ } -+out: -+ return ret; -+err_efault: -+ return -EFAULT; -+err_eintr: -+ return -EINTR; -+} -+ -+/** -+ * previewer_platform_release - Acts when Reference count is zero -+ * @device: Structure containing ISP preview wrapper global information -+ * -+ * This is called when the reference count goes to zero -+ **/ -+static void previewer_platform_release(struct device *device) -+{ -+} -+ -+static struct file_operations prev_fops = { -+ .owner = THIS_MODULE, -+ .open = previewer_open, -+ .release = previewer_release, -+ .mmap = previewer_mmap, -+ .ioctl = previewer_ioctl, -+}; -+ -+static struct platform_device omap_previewer_device = { -+ .name = OMAP_PREV_NAME, -+ .id = -1, -+ .dev = { -+ .release = previewer_platform_release, -+ } -+}; -+ -+/** -+ * previewer_probe - Checks for device presence -+ * @pdev: Structure containing details of the current device. -+ * -+ * Always returns 0 -+ **/ -+static int previewer_probe(struct platform_device *pdev) -+{ -+ return 0; -+} -+ -+/** -+ * previewer_remove - Handles the removal of the driver -+ * @pdev: Structure containing details of the current device. -+ * -+ * Always returns 0. -+ **/ -+static int previewer_remove(struct platform_device *pdev) -+{ -+ return 0; -+} -+ -+static struct platform_driver omap_previewer_driver = { -+ .probe = previewer_probe, -+ .remove = previewer_remove, -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = OMAP_PREV_NAME, -+ }, -+}; -+ -+/** -+ * omap_previewer_init - Initialization of Preview Wrapper -+ * -+ * Returns 0 if successful, -ENOMEM if could not allocate memory, -ENODEV if -+ * could not register the wrapper as a character device, or other errors if the -+ * device or driver can't register. -+ **/ -+static int __init omap_previewer_init(void) -+{ -+ int ret; -+ struct prev_device *device; -+ -+ device = kzalloc(sizeof(struct prev_device), GFP_KERNEL); -+ if (!device) { -+ printk(KERN_ERR OMAP_PREV_NAME -+ " could not allocate memory\n"); -+ return -ENOMEM; -+ } -+ prev_major = register_chrdev(0, OMAP_PREV_NAME, &prev_fops); -+ -+ if (prev_major < 0) { -+ printk(KERN_ERR OMAP_PREV_NAME " initialization " -+ "failed. could not register character " -+ "device\n"); -+ kfree(device); -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&omap_previewer_driver); -+ if (ret) { -+ printk(KERN_ERR OMAP_PREV_NAME -+ "failed to register platform driver!\n"); -+ goto fail2; -+ } -+ ret = platform_device_register(&omap_previewer_device); -+ if (ret) { -+ printk(KERN_ERR OMAP_PREV_NAME -+ " failed to register platform device!\n"); -+ goto fail3; -+ } -+ -+ prev_class = class_create(THIS_MODULE, OMAP_PREV_NAME); -+ if (!prev_class) -+ goto fail4; -+ -+ prev_dev = device_create(prev_class, prev_dev, MKDEV(prev_major, 0), -+ NULL, OMAP_PREV_NAME); -+ -+ device->opened = 0; -+ -+ device->vbq_ops.buf_setup = previewer_vbq_setup; -+ device->vbq_ops.buf_prepare = previewer_vbq_prepare; -+ device->vbq_ops.buf_release = previewer_vbq_release; -+ device->vbq_ops.buf_queue = previewer_vbq_queue; -+ spin_lock_init(&device->inout_vbq_lock); -+ spin_lock_init(&device->lsc_vbq_lock); -+ prevdevice = device; -+ return 0; -+ -+fail4: -+ platform_device_unregister(&omap_previewer_device); -+fail3: -+ platform_driver_unregister(&omap_previewer_driver); -+fail2: -+ unregister_chrdev(prev_major, OMAP_PREV_NAME); -+ -+ kfree(device); -+ -+ return ret; -+} -+ -+/** -+ * omap_previewer_exit - Close of Preview Wrapper -+ **/ -+static void __exit omap_previewer_exit(void) -+{ -+ device_destroy(prev_class, MKDEV(prev_major, 0)); -+ class_destroy(prev_class); -+ platform_device_unregister(&omap_previewer_device); -+ platform_driver_unregister(&omap_previewer_driver); -+ unregister_chrdev(prev_major, OMAP_PREV_NAME); -+ -+ kfree(prevdevice); -+ prev_major = -1; -+} -+ -+module_init(omap_previewer_init); -+module_exit(omap_previewer_exit); -+ -+MODULE_AUTHOR("Texas Instruments"); -+MODULE_DESCRIPTION("OMAP ISP Previewer"); -+MODULE_LICENSE("GPL"); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/isp/redgamma_table.h linux-omap-2.6.28-nokia1/drivers/media/video/isp/redgamma_table.h ---- linux-omap-2.6.28-omap1/drivers/media/video/isp/redgamma_table.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/isp/redgamma_table.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,1040 @@ -+/* -+ * redgamma_table.h -+ * -+ * Gamma Table values for RED for TI's OMAP3 Camera ISP -+ * -+ * Copyright (C) 2009 Texas Instruments, Inc. -+ * -+ * This package is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR -+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED -+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -+ */ -+ -+0, -+0, -+1, -+2, -+3, -+3, -+4, -+5, -+6, -+8, -+10, -+12, -+14, -+16, -+18, -+20, -+22, -+23, -+25, -+26, -+28, -+29, -+31, -+32, -+34, -+35, -+36, -+37, -+39, -+40, -+41, -+42, -+43, -+44, -+45, -+46, -+47, -+48, -+49, -+50, -+51, -+52, -+52, -+53, -+54, -+55, -+56, -+57, -+58, -+59, -+60, -+61, -+62, -+63, -+63, -+64, -+65, -+66, -+66, -+67, -+68, -+69, -+69, -+70, -+71, -+72, -+72, -+73, -+74, -+75, -+75, -+76, -+77, -+78, -+78, -+79, -+80, -+81, -+81, -+82, -+83, -+84, -+84, -+85, -+86, -+87, -+88, -+88, -+89, -+90, -+91, -+91, -+92, -+93, -+94, -+94, -+95, -+96, -+97, -+97, -+98, -+98, -+99, -+99, -+100, -+100, -+101, -+101, -+102, -+103, -+104, -+104, -+105, -+106, -+107, -+108, -+108, -+109, -+110, -+111, -+111, -+112, -+113, -+114, -+114, -+115, -+116, -+117, -+117, -+118, -+119, -+119, -+120, -+120, -+121, -+121, -+122, -+122, -+123, -+123, -+124, -+124, -+125, -+125, -+126, -+126, -+127, -+127, -+128, -+128, -+129, -+129, -+130, -+130, -+131, -+131, -+132, -+132, -+133, -+133, -+134, -+134, -+135, -+135, -+136, -+136, -+137, -+137, -+138, -+138, -+139, -+139, -+140, -+140, -+141, -+141, -+142, -+142, -+143, -+143, -+144, -+144, -+145, -+145, -+146, -+146, -+147, -+147, -+148, -+148, -+149, -+149, -+150, -+150, -+151, -+151, -+152, -+152, -+153, -+153, -+153, -+153, -+154, -+154, -+154, -+154, -+155, -+155, -+156, -+156, -+157, -+157, -+158, -+158, -+158, -+159, -+159, -+159, -+160, -+160, -+160, -+161, -+161, -+162, -+162, -+163, -+163, -+164, -+164, -+164, -+164, -+165, -+165, -+165, -+165, -+166, -+166, -+167, -+167, -+168, -+168, -+169, -+169, -+170, -+170, -+170, -+170, -+171, -+171, -+171, -+171, -+172, -+172, -+173, -+173, -+174, -+174, -+175, -+175, -+176, -+176, -+176, -+176, -+177, -+177, -+177, -+177, -+178, -+178, -+178, -+178, -+179, -+179, -+179, -+179, -+180, -+180, -+180, -+180, -+181, -+181, -+181, -+181, -+182, -+182, -+182, -+182, -+183, -+183, -+183, -+183, -+184, -+184, -+184, -+184, -+185, -+185, -+185, -+185, -+186, -+186, -+186, -+186, -+187, -+187, -+187, -+187, -+188, -+188, -+188, -+188, -+189, -+189, -+189, -+189, -+190, -+190, -+190, -+190, -+191, -+191, -+191, -+191, -+192, -+192, -+192, -+192, -+193, -+193, -+193, -+193, -+194, -+194, -+194, -+194, -+195, -+195, -+195, -+195, -+196, -+196, -+196, -+196, -+197, -+197, -+197, -+197, -+198, -+198, -+198, -+198, -+199, -+199, -+199, -+199, -+200, -+200, -+200, -+200, -+201, -+201, -+201, -+201, -+202, -+202, -+202, -+203, -+203, -+203, -+203, -+204, -+204, -+204, -+204, -+205, -+205, -+205, -+205, -+206, -+206, -+206, -+206, -+207, -+207, -+207, -+207, -+208, -+208, -+208, -+208, -+209, -+209, -+209, -+209, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+210, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+211, -+212, -+212, -+212, -+212, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+213, -+214, -+214, -+214, -+214, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+215, -+216, -+216, -+216, -+216, -+217, -+217, -+217, -+217, -+218, -+218, -+218, -+218, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+219, -+220, -+220, -+220, -+220, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+221, -+222, -+222, -+222, -+222, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+223, -+224, -+224, -+224, -+224, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+225, -+226, -+226, -+226, -+226, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+227, -+228, -+228, -+228, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+229, -+230, -+230, -+230, -+230, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+231, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+232, -+233, -+233, -+233, -+233, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+234, -+235, -+235, -+235, -+235, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+236, -+237, -+237, -+237, -+237, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+238, -+239, -+239, -+239, -+239, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+240, -+241, -+241, -+241, -+241, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+242, -+243, -+243, -+243, -+243, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+244, -+245, -+245, -+245, -+245, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+246, -+247, -+247, -+247, -+247, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+248, -+249, -+249, -+249, -+249, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+250, -+251, -+251, -+251, -+251, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+252, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+253, -+254, -+254, -+254, -+254, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255, -+255 -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/Kconfig linux-omap-2.6.28-nokia1/drivers/media/video/Kconfig ---- linux-omap-2.6.28-omap1/drivers/media/video/Kconfig 2011-06-22 13:14:18.413067746 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/Kconfig 2011-06-22 13:19:32.723063276 +0200 -@@ -305,6 +305,28 @@ config VIDEO_TCM825X - This is a driver for the Toshiba TCM825x VGA camera sensor. - It is used for example in Nokia N800. - -+config VIDEO_ET8EK8 -+ tristate "ET8EK8 camera sensor support" -+ depends on I2C && VIDEO_V4L2 -+ select VIDEO_SMIAREGS -+ ---help--- -+ This is a driver for the Toshiba ET8EK8 5 MP camera sensor. -+ It is used for example in Nokia RX51. -+ -+config VIDEO_AD5820 -+ tristate "AD5820 lens voice coil support" -+ depends on I2C && VIDEO_V4L2 -+ ---help--- -+ This is a driver for the AD5820 camera lens voice coil. -+ It is used for example in Nokia RX51. -+ -+config VIDEO_ADP1653 -+ tristate "ADP1653 flash support" -+ depends on I2C && VIDEO_V4L2 -+ ---help--- -+ This is a driver for the ADP1653 flash. It is used for -+ example in Nokia RX51. -+ - config VIDEO_SAA7110 - tristate "Philips SAA7110 video decoder" - depends on VIDEO_V4L1 && I2C -@@ -379,6 +401,15 @@ config VIDEO_VPX3220 - To compile this driver as a module, choose M here: the - module will be called vpx3220. - -+config VIDEO_SMIA_SENSOR -+ tristate "Generic SMIA-compatible camera sensor support" -+ depends on I2C && VIDEO_V4L2 -+ select VIDEO_SMIAREGS -+ ---help--- -+ This is a generic driver for SMIA-compatible camera sensors. -+ It works at least with ST VS6555 and Toshiba TCM8330MD -+ VGA camera sensors. -+ - comment "Video and audio decoders" - - source "drivers/media/video/cx25840/Kconfig" -@@ -700,6 +731,25 @@ config VIDEO_CAFE_CCIC - CMOS camera controller. This is the controller found on first- - generation OLPC systems. - -+config VIDEO_OMAP3 -+ tristate "OMAP 3 Camera support" -+ select VIDEOBUF_GEN -+ select VIDEOBUF_DMA_SG -+ select OMAP_IOMMU -+ depends on VIDEO_V4L2 && ARCH_OMAP34XX -+ ---help--- -+ Driver for an OMAP 3 camera controller. -+ -+config VIDEO_SMIAREGS -+ tristate "Generic SMIA I2C register access and register list helper" -+ depends on I2C -+ ---help--- -+ This allows writing and reading SMIA image sensors' I2C registers -+ easily. -+ -+ Also a few helper functions are provided to work with binary -+ register lists. -+ - config SOC_CAMERA - tristate "SoC camera support" - depends on VIDEO_V4L2 && HAS_DMA -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/Makefile linux-omap-2.6.28-nokia1/drivers/media/video/Makefile ---- linux-omap-2.6.28-omap1/drivers/media/video/Makefile 2011-06-22 13:14:18.413067746 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/Makefile 2011-06-22 13:19:32.723063276 +0200 -@@ -47,6 +47,7 @@ obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o - obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o - obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o - obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o -+obj-$(CONFIG_VIDEO_SMIA_SENSOR) += smia-sensor.o - obj-$(CONFIG_VIDEO_BT819) += bt819.o - obj-$(CONFIG_VIDEO_BT856) += bt856.o - obj-$(CONFIG_VIDEO_BT866) += bt866.o -@@ -99,7 +100,16 @@ obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o - obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o - obj-$(CONFIG_VIDEO_OV7670) += ov7670.o - -+omap34xxcam-mod-objs := omap34xxcam.o omap34xxcam-daemon.o \ -+ omap34xxcam-daemon-req.o -+obj-$(CONFIG_VIDEO_OMAP3) += omap34xxcam-mod.o isp/ -+ -+obj-$(CONFIG_VIDEO_SMIAREGS) += smiaregs.o -+ - obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o -+obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8.o -+obj-$(CONFIG_VIDEO_AD5820) += ad5820.o -+obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o - - obj-$(CONFIG_USB_DABUSB) += dabusb.o - obj-$(CONFIG_USB_OV511) += ov511.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam.c linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam.c ---- linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,2160 @@ -+/* -+ * omap34xxcam.c -+ * -+ * Copyright (C) 2006--2009 Nokia Corporation -+ * Copyright (C) 2007--2009 Texas Instruments -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * Originally based on the OMAP 2 camera driver. -+ * -+ * Written by Sakari Ailus -+ * Tuukka Toivonen -+ * Sergio Aguirre -+ * Mohit Jalori -+ * Sameer Venkatraman -+ * Leonides Martinez -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "omap34xxcam.h" -+#include "isp/isp.h" -+ -+#define OMAP34XXCAM_VERSION KERNEL_VERSION(0, 0, 0) -+ -+/* global variables */ -+static struct omap34xxcam_device *omap34xxcam; -+ -+/* -+ * -+ * Sensor handling. -+ * -+ */ -+ -+/** -+ * omap34xxcam_slave_power_set - set slave power state -+ * @vdev: per-video device data structure -+ * @power: new power state -+ */ -+int omap34xxcam_slave_power_set(struct omap34xxcam_videodev *vdev, -+ enum v4l2_power power, int mask) -+{ -+ int rval = 0, i = 0; -+ int start, end, dir; -+ -+ BUG_ON(!mutex_is_locked(&vdev->mutex)); -+ -+ if (power != V4L2_POWER_OFF) { -+ /* Sensor has to be powered on first */ -+ start = 0; -+ end = OMAP34XXCAM_SLAVE_FLASH; -+ dir = 1; -+ } else { -+ /* Sensor has to be powered off last */ -+ start = OMAP34XXCAM_SLAVE_FLASH; -+ end = 0; -+ dir = -1; -+ } -+ -+ for (i = start; i != end + dir; i += dir) { -+ if (vdev->slave[i] == v4l2_int_device_dummy()) -+ continue; -+ -+ if (!(mask & (1 << i)) -+ || power == vdev->power_state[i]) -+ continue; -+ -+ rval = vidioc_int_s_power(vdev->slave[i], power); -+ -+ if (rval && power != V4L2_POWER_OFF) { -+ power = V4L2_POWER_OFF; -+ goto out; -+ } -+ -+ vdev->power_state[i] = power; -+ } -+ -+ return 0; -+ -+out: -+ for (i -= dir; i != start - dir; i -= dir) { -+ if (vdev->slave[i] == v4l2_int_device_dummy()) -+ continue; -+ -+ if (!(mask & (1 << i))) -+ continue; -+ -+ vidioc_int_s_power(vdev->slave[i], power); -+ vdev->power_state[i] = power; -+ } -+ -+ return rval; -+} -+ -+/** -+ * omap34xxcam_update_vbq - Updates VBQ with completed input buffer -+ * @vb: ptr. to standard V4L2 video buffer structure -+ * -+ * Updates video buffer queue with completed buffer passed as -+ * input parameter. Also updates ISP H3A timestamp and field count -+ * statistics. -+ */ -+void omap34xxcam_vbq_complete(struct videobuf_buffer *vb, void *priv) -+{ -+ struct omap34xxcam_fh *fh = priv; -+ -+ do_gettimeofday(&vb->ts); -+ vb->field_count = atomic_add_return(2, &fh->field_count); -+ -+ wake_up(&vb->done); -+} -+ -+/** -+ * omap34xxcam_vbq_setup - Calcs size and num of buffs allowed in queue -+ * @vbq: ptr. to standard V4L2 video buffer queue structure -+ * @cnt: ptr to location to hold the count of buffers to be in the queue -+ * @size: ptr to location to hold the size of a frame -+ * -+ * Calculates the number of buffers of current image size that can be -+ * supported by the available capture memory. -+ */ -+static int omap34xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt, -+ unsigned int *size) -+{ -+ struct omap34xxcam_fh *fh = vbq->priv_data; -+ struct omap34xxcam_videodev *vdev = fh->vdev; -+ -+ if (*cnt <= 0) -+ *cnt = VIDEO_MAX_FRAME; /* supply a default number of buffers */ -+ -+ if (*cnt > VIDEO_MAX_FRAME) -+ *cnt = VIDEO_MAX_FRAME; -+ -+ *size = vdev->pix.sizeimage; -+ -+ while (*size * *cnt > fh->vdev->vdev_sensor_config.capture_mem) -+ (*cnt)--; -+ -+ return isp_vbq_setup(vdev->cam->isp, vbq, cnt, size); -+} -+ -+static int omap34xxcam_vb_lock_vma(struct videobuf_buffer *vb, int lock) -+{ -+ unsigned long start, end; -+ struct vm_area_struct *vma; -+ int rval = 0; -+ -+ if (vb->memory == V4L2_MEMORY_MMAP) -+ return 0; -+ -+ if (current->flags & PF_EXITING) { -+ /** -+ * task is getting shutdown. -+ * current->mm could have been released. -+ * -+ * For locking, we return error. -+ * For unlocking, the subsequent release of -+ * buffer should set things right -+ */ -+ if (lock) -+ return -EINVAL; -+ else -+ return 0; -+ } -+ -+ end = vb->baddr + vb->bsize; -+ -+ down_write(¤t->mm->mmap_sem); -+ spin_lock(¤t->mm->page_table_lock); -+ -+ for (start = vb->baddr; ; ) { -+ unsigned int newflags; -+ -+ vma = find_vma(current->mm, start); -+ if (!vma || vma->vm_start > start) { -+ rval = -ENOMEM; -+ goto out; -+ } -+ -+ newflags = vma->vm_flags | VM_LOCKED; -+ if (!lock) -+ newflags &= ~VM_LOCKED; -+ -+ vma->vm_flags = newflags; -+ -+ if (vma->vm_end >= end) -+ break; -+ -+ start = vma->vm_end; -+ } -+ -+out: -+ spin_unlock(¤t->mm->page_table_lock); -+ up_write(¤t->mm->mmap_sem); -+ return rval; -+} -+ -+/** -+ * omap34xxcam_vbq_release - Free resources for input VBQ and VB -+ * @vbq: ptr. to standard V4L2 video buffer queue structure -+ * @vb: ptr to standard V4L2 video buffer structure -+ * -+ * Unmap and free all memory associated with input VBQ and VB, also -+ * unmap the address in ISP MMU. Reset the VB state. -+ */ -+static void omap34xxcam_vbq_release(struct videobuf_queue *vbq, -+ struct videobuf_buffer *vb) -+{ -+ struct omap34xxcam_fh *fh = vbq->priv_data; -+ struct omap34xxcam_videodev *vdev = fh->vdev; -+ struct device *isp = vdev->cam->isp; -+ -+ if (!vbq->streaming) { -+ isp_vbq_release(isp, vbq, vb); -+ omap34xxcam_vb_lock_vma(vb, 0); -+ videobuf_dma_unmap(vbq, videobuf_to_dma(vb)); -+ videobuf_dma_free(videobuf_to_dma(vb)); -+ vb->state = VIDEOBUF_NEEDS_INIT; -+ } -+ return; -+} -+ -+/** -+ * omap34xxcam_vbq_prepare - V4L2 video ops buf_prepare handler -+ * @vbq: ptr. to standard V4L2 video buffer queue structure -+ * @vb: ptr to standard V4L2 video buffer structure -+ * @field: standard V4L2 field enum -+ * -+ * Verifies there is sufficient locked memory for the requested -+ * buffer, or if there is not, allocates, locks and initializes -+ * it. -+ */ -+static int omap34xxcam_vbq_prepare(struct videobuf_queue *vbq, -+ struct videobuf_buffer *vb, -+ enum v4l2_field field) -+{ -+ struct omap34xxcam_fh *fh = vbq->priv_data; -+ struct omap34xxcam_videodev *vdev = fh->vdev; -+ struct device *isp = vdev->cam->isp; -+ -+ int err = 0; -+ -+ /* -+ * Accessing pix here is okay since it's constant while -+ * streaming is on (and we only get called then). -+ */ -+ if (vb->baddr) { -+ /* This is a userspace buffer. */ -+ if (vdev->pix.sizeimage > vb->bsize) -+ /* The buffer isn't big enough. */ -+ return -EINVAL; -+ } else { -+ if (vb->state != VIDEOBUF_NEEDS_INIT -+ && vdev->pix.sizeimage > vb->bsize) -+ /* -+ * We have a kernel bounce buffer that has -+ * already been allocated. -+ */ -+ omap34xxcam_vbq_release(vbq, vb); -+ } -+ -+ vb->size = vdev->pix.bytesperline * vdev->pix.height; -+ vb->width = vdev->pix.width; -+ vb->height = vdev->pix.height; -+ vb->field = field; -+ -+ if (vb->state == VIDEOBUF_NEEDS_INIT) { -+ err = omap34xxcam_vb_lock_vma(vb, 1); -+ if (err) -+ goto buf_init_err; -+ -+ err = videobuf_iolock(vbq, vb, NULL); -+ if (err) -+ goto buf_init_err; -+ -+ /* isp_addr will be stored locally inside isp code */ -+ err = isp_vbq_prepare(isp, vbq, vb, field); -+ } -+ -+buf_init_err: -+ if (!err) -+ vb->state = VIDEOBUF_PREPARED; -+ else -+ omap34xxcam_vbq_release(vbq, vb); -+ -+ return err; -+} -+ -+/** -+ * omap34xxcam_vbq_queue - V4L2 video ops buf_queue handler -+ * @vbq: ptr. to standard V4L2 video buffer queue structure -+ * @vb: ptr to standard V4L2 video buffer structure -+ * -+ * Maps the video buffer to sgdma and through the isp, sets -+ * the isp buffer done callback and sets the video buffer state -+ * to active. -+ */ -+static void omap34xxcam_vbq_queue(struct videobuf_queue *vbq, -+ struct videobuf_buffer *vb) -+{ -+ struct omap34xxcam_fh *fh = vbq->priv_data; -+ struct omap34xxcam_videodev *vdev = fh->vdev; -+ struct device *isp = vdev->cam->isp; -+ -+ isp_buf_queue(isp, vb, omap34xxcam_vbq_complete, (void *)fh); -+} -+ -+static struct videobuf_queue_ops omap34xxcam_vbq_ops = { -+ .buf_setup = omap34xxcam_vbq_setup, -+ .buf_prepare = omap34xxcam_vbq_prepare, -+ .buf_queue = omap34xxcam_vbq_queue, -+ .buf_release = omap34xxcam_vbq_release, -+}; -+ -+/* -+ * -+ * IOCTL interface. -+ * -+ */ -+ -+/** -+ * vidioc_querycap - V4L2 query capabilities IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @cap: ptr to standard V4L2 capability structure -+ * -+ * Fill in the V4L2 capabliity structure for the camera device -+ */ -+static int vidioc_querycap(struct file *file, void *fh, -+ struct v4l2_capability *cap) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ -+ strlcpy(cap->driver, CAM_SHORT_NAME, sizeof(cap->driver)); -+ strlcpy(cap->card, vdev->vfd->name, sizeof(cap->card)); -+ cap->version = OMAP34XXCAM_VERSION; -+ if (vdev->vdev_sensor != v4l2_int_device_dummy()) -+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; -+ -+ return 0; -+} -+ -+/** -+ * vidioc_enum_fmt_vid_cap - V4L2 enumerate format capabilities IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @f: ptr to standard V4L2 format description structure -+ * -+ * Fills in enumerate format capabilities information for sensor (if SOC -+ * sensor attached) or ISP (if raw sensor attached). -+ */ -+static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh, -+ struct v4l2_fmtdesc *f) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ int rval; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ if (vdev->vdev_sensor_config.sensor_isp) -+ rval = vidioc_int_enum_fmt_cap(vdev->vdev_sensor, f); -+ else -+ rval = isp_enum_fmt_cap(f); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_g_fmt_vid_cap - V4L2 get format capabilities IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @f: ptr to standard V4L2 format structure -+ * -+ * Fills in format capabilities for sensor (if SOC sensor attached) or ISP -+ * (if raw sensor attached). -+ */ -+static int vidioc_g_fmt_vid_cap(struct file *file, void *fh, -+ struct v4l2_format *f) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ f->fmt.pix = vdev->pix; -+ mutex_unlock(&vdev->mutex); -+ -+ return 0; -+} -+ -+static int try_pix_parm(struct omap34xxcam_videodev *vdev, -+ struct v4l2_pix_format *best_pix_in, -+ struct v4l2_pix_format *wanted_pix_out, -+ struct v4l2_fract *best_ival) -+{ -+ int fps; -+ int fmtd_index; -+ int rval; -+ struct v4l2_pix_format best_pix_out; -+ struct device *isp = vdev->cam->isp; -+ -+ if (best_ival->numerator == 0 -+ || best_ival->denominator == 0) -+ *best_ival = vdev->vdev_sensor_config.ival_default; -+ -+ fps = best_ival->denominator / best_ival->numerator; -+ -+ memset(best_pix_in, 0, sizeof(*best_pix_in)); -+ -+ best_ival->denominator = 0; -+ best_pix_out.height = INT_MAX >> 1; -+ best_pix_out.width = best_pix_out.height; -+ -+ for (fmtd_index = 0; ; fmtd_index++) { -+ int size_index; -+ struct v4l2_fmtdesc fmtd; -+ -+ fmtd.index = fmtd_index; -+ fmtd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ rval = vidioc_int_enum_fmt_cap(vdev->vdev_sensor, &fmtd); -+ if (rval) -+ break; -+ dev_dbg(&vdev->vfd->dev, "trying fmt %8.8x (%d)\n", -+ fmtd.pixelformat, fmtd_index); -+ /* -+ * Get supported resolutions. -+ */ -+ for (size_index = 0; ; size_index++) { -+ struct v4l2_frmsizeenum frms; -+ struct v4l2_pix_format pix_tmp_in, pix_tmp_out; -+ int ival_index; -+ -+ frms.index = size_index; -+ frms.pixel_format = fmtd.pixelformat; -+ -+ rval = vidioc_int_enum_framesizes(vdev->vdev_sensor, -+ &frms); -+ if (rval) -+ break; -+ -+ pix_tmp_in.pixelformat = frms.pixel_format; -+ pix_tmp_in.width = frms.discrete.width; -+ pix_tmp_in.height = frms.discrete.height; -+ pix_tmp_out = *wanted_pix_out; -+ /* Don't do upscaling. */ -+ if (pix_tmp_out.width > pix_tmp_in.width) -+ pix_tmp_out.width = pix_tmp_in.width; -+ if (pix_tmp_out.height > pix_tmp_in.height) -+ pix_tmp_out.height = pix_tmp_in.height; -+ rval = isp_try_fmt_cap(isp, &pix_tmp_in, &pix_tmp_out); -+ if (rval) -+ return rval; -+ -+ dev_dbg(&vdev->vfd->dev, "this w %d\th %d\tfmt %8.8x\t" -+ "-> w %d\th %d\t fmt %8.8x" -+ "\twanted w %d\th %d\t fmt %8.8x\n", -+ pix_tmp_in.width, pix_tmp_in.height, -+ pix_tmp_in.pixelformat, -+ pix_tmp_out.width, pix_tmp_out.height, -+ pix_tmp_out.pixelformat, -+ wanted_pix_out->width, wanted_pix_out->height, -+ wanted_pix_out->pixelformat); -+ -+#define IS_SMALLER_OR_EQUAL(pix1, pix2) \ -+ ((pix1)->width + (pix1)->height \ -+ < (pix2)->width + (pix2)->height) -+#define SIZE_DIFF(pix1, pix2) \ -+ (abs((pix1)->width - (pix2)->width) \ -+ + abs((pix1)->height - (pix2)->height)) -+ -+ /* -+ * Don't use modes that are farther from wanted size -+ * that what we already got. -+ */ -+ if (SIZE_DIFF(&pix_tmp_out, wanted_pix_out) -+ > SIZE_DIFF(&best_pix_out, wanted_pix_out)) { -+ dev_dbg(&vdev->vfd->dev, "size diff bigger: " -+ "w %d\th %d\tw %d\th %d\n", -+ pix_tmp_out.width, pix_tmp_out.height, -+ best_pix_out.width, -+ best_pix_out.height); -+ continue; -+ } -+ -+ /* -+ * There's an input mode that can provide output -+ * closer to wanted. -+ */ -+ if (SIZE_DIFF(&pix_tmp_out, wanted_pix_out) -+ < SIZE_DIFF(&best_pix_out, wanted_pix_out)) { -+ /* Force renegotation of fps etc. */ -+ best_ival->denominator = 0; -+ dev_dbg(&vdev->vfd->dev, "renegotiate: " -+ "w %d\th %d\tw %d\th %d\n", -+ pix_tmp_out.width, pix_tmp_out.height, -+ best_pix_out.width, -+ best_pix_out.height); -+ } -+ -+ for (ival_index = 0; ; ival_index++) { -+ struct v4l2_frmivalenum frmi; -+ -+ frmi.index = ival_index; -+ frmi.pixel_format = frms.pixel_format; -+ frmi.width = frms.discrete.width; -+ frmi.height = frms.discrete.height; -+ /* FIXME: try to fix standard... */ -+ frmi.reserved[0] = 0xdeafbeef; -+ -+ rval = vidioc_int_enum_frameintervals( -+ vdev->vdev_sensor, &frmi); -+ if (rval) -+ break; -+ -+ dev_dbg(&vdev->vfd->dev, "fps %d\n", -+ frmi.discrete.denominator -+ / frmi.discrete.numerator); -+ -+ if (best_ival->denominator == 0) -+ goto do_it_now; -+ -+ if (best_pix_in->width == 0) -+ goto do_it_now; -+ -+ /* -+ * We aim to use maximum resolution -+ * from the sensor, provided that the -+ * fps is at least as close as on the -+ * current mode. -+ */ -+#define FPS_ABS_DIFF(fps, ival) abs(fps - (ival).denominator / (ival).numerator) -+ -+ /* Select mode with closest fps. */ -+ if (FPS_ABS_DIFF(fps, frmi.discrete) -+ < FPS_ABS_DIFF(fps, *best_ival)) { -+ dev_dbg(&vdev->vfd->dev, "closer fps: " -+ "fps %d\t fps %d\n", -+ FPS_ABS_DIFF(fps, -+ frmi.discrete), -+ FPS_ABS_DIFF(fps, *best_ival)); -+ goto do_it_now; -+ } -+ -+ /* -+ * Select bigger resolution if it's available -+ * at same fps. -+ */ -+ if (frmi.width + frmi.height -+ > best_pix_in->width + best_pix_in->height -+ && FPS_ABS_DIFF(fps, frmi.discrete) -+ <= FPS_ABS_DIFF(fps, *best_ival)) { -+ dev_dbg(&vdev->vfd->dev, "bigger res, " -+ "same fps: " -+ "w %d\th %d\tw %d\th %d\n", -+ frmi.width, frmi.height, -+ best_pix_in->width, -+ best_pix_in->height); -+ goto do_it_now; -+ } -+ -+ dev_dbg(&vdev->vfd->dev, "falling through\n"); -+ -+ continue; -+ -+do_it_now: -+ *best_ival = frmi.discrete; -+ best_pix_out = pix_tmp_out; -+ best_pix_in->width = frmi.width; -+ best_pix_in->height = frmi.height; -+ best_pix_in->pixelformat = frmi.pixel_format; -+ -+ dev_dbg(&vdev->vfd->dev, -+ "best_pix_in: w %d\th %d\tfmt %8.8x" -+ "\tival %d/%d\n", -+ best_pix_in->width, -+ best_pix_in->height, -+ best_pix_in->pixelformat, -+ best_ival->numerator, -+ best_ival->denominator); -+ } -+ } -+ } -+ -+ if (best_ival->denominator == 0) -+ return -EINVAL; -+ -+ *wanted_pix_out = best_pix_out; -+ -+ dev_dbg(&vdev->vfd->dev, "w %d, h %d, fmt %8.8x -> w %d, h %d\n", -+ best_pix_in->width, best_pix_in->height, -+ best_pix_in->pixelformat, -+ best_pix_out.width, best_pix_out.height); -+ -+ return 0; -+} -+ -+static int s_pix_parm(struct omap34xxcam_videodev *vdev, -+ struct v4l2_pix_format *best_pix, -+ struct v4l2_pix_format *pix, -+ struct v4l2_fract *best_ival) -+{ -+ struct device *isp = vdev->cam->isp; -+ struct v4l2_streamparm a; -+ struct v4l2_format fmt; -+ struct v4l2_format old_fmt; -+ int rval; -+ -+ rval = try_pix_parm(vdev, best_pix, pix, best_ival); -+ if (rval) -+ return rval; -+ -+ rval = isp_s_fmt_cap(isp, best_pix, pix); -+ if (rval) -+ return rval; -+ -+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ fmt.fmt.pix = *best_pix; -+ vidioc_int_g_fmt_cap(vdev->vdev_sensor, &old_fmt); -+ rval = vidioc_int_s_fmt_cap(vdev->vdev_sensor, &fmt); -+ if (rval) -+ return rval; -+ -+ a.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ a.parm.capture.timeperframe = *best_ival; -+ rval = vidioc_int_s_parm(vdev->vdev_sensor, &a); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_s_fmt_vid_cap - V4L2 set format capabilities IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @f: ptr to standard V4L2 format structure -+ * -+ * Attempts to set input format with the sensor driver (first) and then the -+ * ISP. Returns the return code from vidioc_g_fmt_vid_cap(). -+ */ -+static int vidioc_s_fmt_vid_cap(struct file *file, void *fh, -+ struct v4l2_format *f) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct v4l2_pix_format pix_tmp; -+ struct v4l2_fract timeperframe; -+ int rval; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ omap34xxcam_daemon_req_hw_reconfig( -+ vdev, -+ OMAP34XXCAM_DAEMON_HW_RECONFIG_FMT); -+ -+ mutex_lock(&vdev->mutex); -+ if (vdev->streaming) { -+ rval = -EBUSY; -+ goto out; -+ } -+ -+ vdev->want_pix = f->fmt.pix; -+ -+ timeperframe = vdev->want_timeperframe; -+ -+ rval = s_pix_parm(vdev, &pix_tmp, &f->fmt.pix, &timeperframe); -+ if (!rval) -+ vdev->pix = f->fmt.pix; -+ -+out: -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_try_fmt_vid_cap - V4L2 try format capabilities IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @f: ptr to standard V4L2 format structure -+ * -+ * Checks if the given format is supported by the sensor driver and -+ * by the ISP. -+ */ -+static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, -+ struct v4l2_format *f) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct v4l2_pix_format pix_tmp; -+ struct v4l2_fract timeperframe; -+ int rval; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ -+ timeperframe = vdev->want_timeperframe; -+ -+ rval = try_pix_parm(vdev, &pix_tmp, &f->fmt.pix, &timeperframe); -+ -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_reqbufs - V4L2 request buffers IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @b: ptr to standard V4L2 request buffers structure -+ * -+ * Attempts to get a buffer from the buffer queue associated with the -+ * fh through the video buffer library API. -+ */ -+static int vidioc_reqbufs(struct file *file, void *fh, -+ struct v4l2_requestbuffers *b) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ int rval; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ if (vdev->streaming) { -+ mutex_unlock(&vdev->mutex); -+ return -EBUSY; -+ } -+ -+ rval = videobuf_reqbufs(&ofh->vbq, b); -+ -+ mutex_unlock(&vdev->mutex); -+ -+ /* -+ * Either videobuf_reqbufs failed or the buffers are not -+ * memory-mapped (which would need special attention). -+ */ -+ if (rval < 0 || b->memory != V4L2_MEMORY_MMAP) -+ goto out; -+ -+out: -+ return rval; -+} -+ -+/** -+ * vidioc_querybuf - V4L2 query buffer IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @b: ptr to standard V4L2 buffer structure -+ * -+ * Attempts to fill in the v4l2_buffer structure for the buffer queue -+ * associated with the fh through the video buffer library API. -+ */ -+static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ -+ return videobuf_querybuf(&ofh->vbq, b); -+} -+ -+/** -+ * vidioc_qbuf - V4L2 queue buffer IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @b: ptr to standard V4L2 buffer structure -+ * -+ * Attempts to queue the v4l2_buffer on the buffer queue -+ * associated with the fh through the video buffer library API. -+ */ -+static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ -+ return videobuf_qbuf(&ofh->vbq, b); -+} -+ -+/** -+ * vidioc_dqbuf - V4L2 dequeue buffer IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @b: ptr to standard V4L2 buffer structure -+ * -+ * Attempts to dequeue the v4l2_buffer from the buffer queue -+ * associated with the fh through the video buffer library API. If the -+ * buffer is a user space buffer, then this function will also requeue it, -+ * as user does not expect to do this. -+ */ -+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ int rval; -+ -+videobuf_dqbuf_again: -+ rval = videobuf_dqbuf(&ofh->vbq, b, file->f_flags & O_NONBLOCK); -+ -+ /* -+ * This is a hack. We don't want to show -EIO to the user -+ * space. Requeue the buffer and try again if we're not doing -+ * this in non-blocking mode. -+ */ -+ if (rval == -EIO) { -+ videobuf_qbuf(&ofh->vbq, b); -+ if (!(file->f_flags & O_NONBLOCK)) -+ goto videobuf_dqbuf_again; -+ /* -+ * We don't have a videobuf_buffer now --- maybe next -+ * time... -+ */ -+ rval = -EAGAIN; -+ } -+ -+ return rval; -+} -+ -+/** -+ * vidioc_streamon - V4L2 streamon IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @i: V4L2 buffer type -+ * -+ * Attempts to start streaming by enabling the sensor interface and turning -+ * on video buffer streaming through the video buffer library API. Upon -+ * success the function returns 0, otherwise an error code is returned. -+ */ -+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct device *isp = vdev->cam->isp; -+ int rval; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ if (vdev->streaming) { -+ rval = -EBUSY; -+ goto out; -+ } -+ -+ rval = omap34xxcam_slave_power_set(vdev, V4L2_POWER_ON, -+ OMAP34XXCAM_SLAVE_POWER_ALL); -+ if (rval) { -+ dev_dbg(&vdev->vfd->dev, -+ "omap34xxcam_slave_power_set failed\n"); -+ goto out; -+ } -+ -+ isp_start(isp); -+ -+ isp_set_callback(isp, CBK_CATCHALL, omap34xxcam_daemon_event_cb, -+ (void *)vdev, NULL); -+ -+ rval = videobuf_streamon(&ofh->vbq); -+ if (rval) { -+ isp_stop(isp); -+ omap34xxcam_slave_power_set( -+ vdev, V4L2_POWER_STANDBY, -+ OMAP34XXCAM_SLAVE_POWER_ALL); -+ } else -+ vdev->streaming = file; -+ -+out: -+ mutex_unlock(&vdev->mutex); -+ -+ if (!rval) -+ omap34xxcam_daemon_req_hw_reconfig( -+ vdev, -+ OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMON); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_streamoff - V4L2 streamoff IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @i: V4L2 buffer type -+ * -+ * Attempts to stop streaming by flushing all scheduled work, waiting on -+ * any queued buffers to complete and then stopping the ISP and turning -+ * off video buffer streaming through the video buffer library API. Upon -+ * success the function returns 0, otherwise an error code is returned. -+ */ -+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct device *isp = vdev->cam->isp; -+ struct videobuf_queue *q = &ofh->vbq; -+ int rval; -+ -+ omap34xxcam_daemon_req_hw_reconfig( -+ vdev, OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMOFF); -+ -+ mutex_lock(&vdev->mutex); -+ -+ if (vdev->streaming == file) -+ isp_stop(isp); -+ -+ rval = videobuf_streamoff(q); -+ if (!rval) { -+ vdev->streaming = NULL; -+ -+ omap34xxcam_slave_power_set(vdev, V4L2_POWER_STANDBY, -+ OMAP34XXCAM_SLAVE_POWER_ALL); -+ isp_unset_callback(isp, CBK_CATCHALL); -+ } -+ -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_enum_input - V4L2 enumerate input IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @inp: V4L2 input type information structure -+ * -+ * Fills in v4l2_input structure. Returns 0. -+ */ -+static int vidioc_enum_input(struct file *file, void *fh, -+ struct v4l2_input *inp) -+{ -+ if (inp->index > 0) -+ return -EINVAL; -+ -+ strlcpy(inp->name, "camera", sizeof(inp->name)); -+ inp->type = V4L2_INPUT_TYPE_CAMERA; -+ -+ return 0; -+} -+ -+/** -+ * vidioc_g_input - V4L2 get input IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @i: address to hold index of input supported -+ * -+ * Sets index to 0. -+ */ -+static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) -+{ -+ *i = 0; -+ -+ return 0; -+} -+ -+/** -+ * vidioc_s_input - V4L2 set input IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @i: index of input selected -+ * -+ * 0 is only index supported. -+ */ -+static int vidioc_s_input(struct file *file, void *fh, unsigned int i) -+{ -+ if (i > 0) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+/** -+ * vidioc_queryctrl - V4L2 query control IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @a: standard V4L2 query control ioctl structure -+ * -+ * If the requested control is supported, returns the control information -+ * in the v4l2_queryctrl structure. Otherwise, returns -EINVAL if the -+ * control is not supported. If the sensor being used is a "smart sensor", -+ * this request is passed to the sensor driver, otherwise the ISP is -+ * queried and if it does not support the requested control, the request -+ * is forwarded to the "raw" sensor driver to see if it supports it. -+ */ -+static int vidioc_queryctrl(struct file *file, void *fh, -+ struct v4l2_queryctrl *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct v4l2_queryctrl a_tmp; -+ int best_slave = -1; -+ u32 best_ctrl = (u32)-1; -+ int i; -+ -+ if (vdev->vdev_sensor_config.sensor_isp) -+ return vidioc_int_queryctrl(vdev->vdev_sensor, a); -+ -+ /* No next flags: try slaves directly. */ -+ if (!(a->id & V4L2_CTRL_FLAG_NEXT_CTRL)) { -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) { -+ if (!vidioc_int_queryctrl(vdev->slave[i], a)) -+ return 0; -+ } -+ return isp_queryctrl(a); -+ } -+ -+ /* Find slave with smallest next control id. */ -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) { -+ a_tmp = *a; -+ -+ if (vidioc_int_queryctrl(vdev->slave[i], &a_tmp)) -+ continue; -+ -+ if (a_tmp.id < best_ctrl) { -+ best_slave = i; -+ best_ctrl = a_tmp.id; -+ } -+ } -+ -+ a_tmp = *a; -+ if (!isp_queryctrl(&a_tmp)) { -+ if (a_tmp.id < best_ctrl) { -+ *a = a_tmp; -+ -+ return 0; -+ } -+ } -+ -+ if (best_slave == -1) -+ return -EINVAL; -+ -+ a->id = best_ctrl; -+ return vidioc_int_queryctrl(vdev->slave[best_slave], a); -+} -+ -+/** -+ * vidioc_querymenu - V4L2 query menu IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @a: standard V4L2 query menu ioctl structure -+ * -+ * If the requested control is supported, returns the menu information -+ * in the v4l2_querymenu structure. Otherwise, returns -EINVAL if the -+ * control is not supported or is not a menu. If the sensor being used -+ * is a "smart sensor", this request is passed to the sensor driver, -+ * otherwise the ISP is queried and if it does not support the requested -+ * menu control, the request is forwarded to the "raw" sensor driver to -+ * see if it supports it. -+ */ -+static int vidioc_querymenu(struct file *file, void *fh, -+ struct v4l2_querymenu *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ int i; -+ -+ if (vdev->vdev_sensor_config.sensor_isp) -+ return vidioc_int_querymenu(vdev->vdev_sensor, a); -+ -+ /* Try slaves directly. */ -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) { -+ if (!vidioc_int_querymenu(vdev->slave[i], a)) -+ return 0; -+ } -+ return isp_querymenu(a); -+} -+ -+static int vidioc_g_ext_ctrls(struct file *file, void *fh, -+ struct v4l2_ext_controls *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct device *isp = vdev->cam->isp; -+ int i, ctrl_idx, rval = 0; -+ -+ mutex_lock(&vdev->mutex); -+ -+ for (ctrl_idx = 0; ctrl_idx < a->count; ctrl_idx++) { -+ struct v4l2_control ctrl; -+ -+ ctrl.id = a->controls[ctrl_idx].id; -+ -+ if (vdev->vdev_sensor_config.sensor_isp) { -+ rval = vidioc_int_g_ctrl(vdev->vdev_sensor, &ctrl); -+ } else { -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) { -+ rval = vidioc_int_g_ctrl(vdev->slave[i], &ctrl); -+ if (!rval) -+ break; -+ } -+ } -+ -+ if (rval) -+ rval = isp_g_ctrl(isp, &ctrl); -+ -+ if (rval) { -+ a->error_idx = ctrl_idx; -+ break; -+ } -+ -+ a->controls[ctrl_idx].value = ctrl.value; -+ } -+ -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+static int vidioc_s_ext_ctrls(struct file *file, void *fh, -+ struct v4l2_ext_controls *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct device *isp = vdev->cam->isp; -+ int i, ctrl_idx, rval = 0; -+ -+ mutex_lock(&vdev->mutex); -+ -+ for (ctrl_idx = 0; ctrl_idx < a->count; ctrl_idx++) { -+ struct v4l2_control ctrl; -+ -+ ctrl.id = a->controls[ctrl_idx].id; -+ ctrl.value = a->controls[ctrl_idx].value; -+ -+ if (vdev->vdev_sensor_config.sensor_isp) { -+ rval = vidioc_int_s_ctrl(vdev->vdev_sensor, &ctrl); -+ } else { -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) { -+ rval = vidioc_int_s_ctrl(vdev->slave[i], &ctrl); -+ if (!rval) -+ break; -+ } -+ } -+ -+ if (rval) -+ rval = isp_s_ctrl(isp, &ctrl); -+ -+ if (rval) { -+ a->error_idx = ctrl_idx; -+ break; -+ } -+ -+ a->controls[ctrl_idx].value = ctrl.value; -+ } -+ -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_g_parm - V4L2 get parameters IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @a: standard V4L2 stream parameters structure -+ * -+ * If request is for video capture buffer type, handles request by -+ * forwarding to sensor driver. -+ */ -+static int vidioc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ int rval; -+ -+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ rval = vidioc_int_g_parm(vdev->vdev_sensor, a); -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_s_parm - V4L2 set parameters IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @a: standard V4L2 stream parameters structure -+ * -+ * If request is for video capture buffer type, handles request by -+ * first getting current stream parameters from sensor, then forwarding -+ * request to set new parameters to sensor driver. It then attempts to -+ * enable the sensor interface with the new parameters. If this fails, it -+ * reverts back to the previous parameters. -+ */ -+static int vidioc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct v4l2_pix_format pix_tmp_sensor, pix_tmp; -+ int rval; -+ -+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ return -EINVAL; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ if (vdev->streaming) { -+ rval = -EBUSY; -+ goto out; -+ } -+ -+ vdev->want_timeperframe = a->parm.capture.timeperframe; -+ -+ pix_tmp = vdev->want_pix; -+ -+ rval = s_pix_parm(vdev, &pix_tmp_sensor, &pix_tmp, -+ &a->parm.capture.timeperframe); -+ -+out: -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_cropcap - V4L2 crop capture IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @a: standard V4L2 crop capture structure -+ * -+ * If using a "smart" sensor, just forwards request to the sensor driver, -+ * otherwise fills in the v4l2_cropcap values locally. -+ */ -+static int vidioc_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct v4l2_cropcap *cropcap = a; -+ int rval; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ -+ rval = vidioc_int_cropcap(vdev->vdev_sensor, a); -+ -+ if (rval && !vdev->vdev_sensor_config.sensor_isp) { -+ struct v4l2_format f; -+ -+ /* cropcap failed, try to do this via g_fmt_cap */ -+ rval = vidioc_int_g_fmt_cap(vdev->vdev_sensor, &f); -+ if (!rval) { -+ cropcap->bounds.top = 0; -+ cropcap->bounds.left = 0; -+ cropcap->bounds.width = f.fmt.pix.width; -+ cropcap->bounds.height = f.fmt.pix.height; -+ cropcap->defrect = cropcap->bounds; -+ cropcap->pixelaspect.numerator = 1; -+ cropcap->pixelaspect.denominator = 1; -+ } -+ } -+ -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_g_crop - V4L2 get capture crop IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @a: standard V4L2 crop structure -+ * -+ * If using a "smart" sensor, just forwards request to the sensor driver, -+ * otherwise calls the isp functions to fill in current crop values. -+ */ -+static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct device *isp = vdev->cam->isp; -+ int rval = 0; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ -+ if (vdev->vdev_sensor_config.sensor_isp) -+ rval = vidioc_int_g_crop(vdev->vdev_sensor, a); -+ else -+ rval = isp_g_crop(isp, a); -+ -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+/** -+ * vidioc_s_crop - V4L2 set capture crop IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @a: standard V4L2 crop structure -+ * -+ * If using a "smart" sensor, just forwards request to the sensor driver, -+ * otherwise calls the isp functions to set the current crop values. -+ */ -+static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *a) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct device *isp = vdev->cam->isp; -+ int rval = 0; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ omap34xxcam_daemon_req_hw_reconfig( -+ vdev, -+ OMAP34XXCAM_DAEMON_HW_RECONFIG_CROP); -+ -+ mutex_lock(&vdev->mutex); -+ -+ if (vdev->vdev_sensor_config.sensor_isp) -+ rval = vidioc_int_s_crop(vdev->vdev_sensor, a); -+ else -+ rval = isp_s_crop(isp, a); -+ -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+static int vidioc_enum_framesizes(struct file *file, void *fh, -+ struct v4l2_frmsizeenum *frms) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct v4l2_pix_format pix_in; -+ struct v4l2_pix_format pix_out; -+ struct v4l2_fract ival; -+ u32 pixel_format; -+ int rval; -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy()) -+ return -EINVAL; -+ -+ mutex_lock(&vdev->mutex); -+ -+ if (vdev->vdev_sensor_config.sensor_isp) { -+ rval = vidioc_int_enum_framesizes(vdev->vdev_sensor, frms); -+ goto done; -+ } -+ -+ pixel_format = frms->pixel_format; -+ frms->pixel_format = -1; /* ISP does format conversion */ -+ rval = vidioc_int_enum_framesizes(vdev->vdev_sensor, frms); -+ frms->pixel_format = pixel_format; -+ -+ if (rval < 0) -+ goto done; -+ -+ /* Let the ISP pipeline mangle the frame size as it sees fit. */ -+ memset(&pix_out, 0, sizeof(pix_out)); -+ pix_out.width = frms->discrete.width; -+ pix_out.height = frms->discrete.height; -+ pix_out.pixelformat = frms->pixel_format; -+ -+ ival = vdev->want_timeperframe; -+ rval = try_pix_parm(vdev, &pix_in, &pix_out, &ival); -+ if (rval < 0) -+ goto done; -+ -+ frms->discrete.width = pix_out.width; -+ frms->discrete.height = pix_out.height; -+ -+done: -+ mutex_unlock(&vdev->mutex); -+ return rval; -+} -+ -+static int vidioc_enum_frameintervals(struct file *file, void *fh, -+ struct v4l2_frmivalenum *frmi) -+{ -+ struct omap34xxcam_fh *ofh = fh; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct v4l2_frmsizeenum frms; -+ unsigned int frmi_width; -+ unsigned int frmi_height; -+ unsigned int width; -+ unsigned int height; -+ unsigned int max_dist; -+ unsigned int dist; -+ u32 pixel_format; -+ unsigned int i; -+ int rval; -+ -+ mutex_lock(&vdev->mutex); -+ -+ if (vdev->vdev_sensor_config.sensor_isp) { -+ rval = vidioc_int_enum_frameintervals(vdev->vdev_sensor, frmi); -+ goto done; -+ } -+ -+ /* -+ * Frame size enumeration returned sizes mangled by the ISP. -+ * We can't pass the size directly to the sensor for frame -+ * interval enumeration, as they will not be recognized by the -+ * sensor driver. Enumerate the native sensor sizes and select -+ * the one closest to the requested size. -+ */ -+ -+ for (i = 0, max_dist = (unsigned int)-1; ; ++i) { -+ frms.index = i; -+ frms.pixel_format = -1; -+ rval = vidioc_int_enum_framesizes(vdev->vdev_sensor, -+ &frms); -+ if (rval < 0) -+ break; -+ -+ /* -+ * The distance between frame sizes is the size in -+ * pixels of the non-overlapping regions. -+ */ -+ dist = min(frms.discrete.width, frmi->width) -+ * min(frms.discrete.height, frmi->height); -+ dist = frms.discrete.width * frms.discrete.height -+ + frmi->width * frmi->height -+ - 2*dist; -+ -+ if (dist < max_dist) { -+ width = frms.discrete.width; -+ height = frms.discrete.height; -+ max_dist = dist; -+ } -+ } -+ -+ if (max_dist == (unsigned int)-1) { -+ rval = -EINVAL; -+ goto done; -+ } -+ -+ pixel_format = frmi->pixel_format; -+ frmi_width = frmi->width; -+ frmi_height = frmi->height; -+ -+ frmi->pixel_format = -1; /* ISP does format conversion */ -+ frmi->width = width; -+ frmi->height = height; -+ rval = vidioc_int_enum_frameintervals(vdev->vdev_sensor, frmi); -+ -+ frmi->pixel_format = pixel_format; -+ frmi->height = frmi_height; -+ frmi->width = frmi_width; -+ -+done: -+ mutex_unlock(&vdev->mutex); -+ return rval; -+} -+ -+/** -+ * vidioc_default - private IOCTL handler -+ * @file: ptr. to system file structure -+ * @fh: ptr to hold address of omap34xxcam_fh struct (per-filehandle data) -+ * @cmd: ioctl cmd value -+ * @arg: ioctl arg value -+ * -+ * If the sensor being used is a "smart sensor", this request is returned to -+ * caller with -EINVAL err code. Otherwise if the control id is the private -+ * VIDIOC_PRIVATE_ISP_AEWB_REQ to update the analog gain or exposure, -+ * then this request is forwared directly to the sensor to incorporate the -+ * feedback. The request is then passed on to the ISP private IOCTL handler, -+ * isp_handle_private() -+ */ -+static int vidioc_default(struct file *file, void *fh, int cmd, void *arg) -+{ -+ struct omap34xxcam_fh *ofh = file->private_data; -+ struct omap34xxcam_videodev *vdev = ofh->vdev; -+ struct device *isp = vdev->cam->isp; -+ int rval; -+ -+ if (vdev->vdev_sensor_config.sensor_isp) { -+ rval = -EINVAL; -+ } else { -+ switch (cmd) { -+ case VIDIOC_ENUM_FRAMESIZES: -+ rval = vidioc_enum_framesizes(file, fh, arg); -+ goto out; -+ case VIDIOC_ENUM_FRAMEINTERVALS: -+ rval = vidioc_enum_frameintervals(file, fh, arg); -+ goto out; -+ case VIDIOC_DAEMON_REQ: -+ rval = omap34xxcam_daemon_req_user(vdev, arg, file); -+ goto out; -+ case VIDIOC_DAEMON_INSTALL: -+ rval = omap34xxcam_daemon_install(file); -+ goto out; -+ case VIDIOC_DAEMON_SET_EVENTS: -+ rval = omap34xxcam_daemon_set_events(vdev, arg, file); -+ goto out; -+ case VIDIOC_DAEMON_DAEMON_REQ_GET: -+ rval = omap34xxcam_daemon_daemon_req_get_user(vdev, -+ arg, -+ file); -+ goto out; -+ case VIDIOC_DAEMON_DAEMON_REQ_COMPLETE: -+ rval = omap34xxcam_daemon_daemon_req_complete_user -+ (vdev, arg, file); -+ goto out; -+ case VIDIOC_PRIVATE_ISP_AEWB_REQ: -+ { -+ /* Need to update sensor first */ -+ struct isph3a_aewb_data *data; -+ struct v4l2_control vc; -+ -+ data = (struct isph3a_aewb_data *) arg; -+ if (data->update & SET_EXPOSURE) { -+ dev_dbg(&vdev->vfd->dev, "using " -+ "VIDIOC_PRIVATE_ISP_AEWB_REQ to set " -+ "exposure is deprecated!\n"); -+ vc.id = V4L2_CID_EXPOSURE; -+ vc.value = data->shutter; -+ mutex_lock(&vdev->mutex); -+ rval = vidioc_int_s_ctrl(vdev->vdev_sensor, -+ &vc); -+ mutex_unlock(&vdev->mutex); -+ if (rval) -+ goto out; -+ } -+ if (data->update & SET_ANALOG_GAIN) { -+ dev_dbg(&vdev->vfd->dev, "using " -+ "VIDIOC_PRIVATE_ISP_AEWB_REQ to set " -+ "gain is deprecated!\n"); -+ vc.id = V4L2_CID_GAIN; -+ vc.value = data->gain; -+ mutex_lock(&vdev->mutex); -+ rval = vidioc_int_s_ctrl(vdev->vdev_sensor, -+ &vc); -+ mutex_unlock(&vdev->mutex); -+ if (rval) -+ goto out; -+ } -+ } -+ break; -+ case VIDIOC_PRIVATE_ISP_AF_REQ: { -+ /* Need to update lens first */ -+ struct isp_af_data *data; -+ struct v4l2_control vc; -+ -+ if (!vdev->vdev_lens) { -+ rval = -EINVAL; -+ goto out; -+ } -+ data = (struct isp_af_data *) arg; -+ if (data->update & LENS_DESIRED_POSITION) { -+ dev_dbg(&vdev->vfd->dev, "using " -+ "VIDIOC_PRIVATE_ISP_AF_REQ to set " -+ "lens position is deprecated!\n"); -+ vc.id = V4L2_CID_FOCUS_ABSOLUTE; -+ vc.value = data->desired_lens_direction; -+ mutex_lock(&vdev->mutex); -+ rval = vidioc_int_s_ctrl(vdev->vdev_lens, &vc); -+ mutex_unlock(&vdev->mutex); -+ if (rval) -+ goto out; -+ } -+ } -+ break; -+ } -+ -+ mutex_lock(&vdev->mutex); -+ rval = isp_handle_private(isp, cmd, arg); -+ mutex_unlock(&vdev->mutex); -+ } -+out: -+ return rval; -+} -+ -+/* -+ * -+ * File operations. -+ * -+ */ -+ -+static long omap34xxcam_unlocked_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ return (long)video_ioctl2(file->f_dentry->d_inode, file, cmd, arg); -+} -+ -+/** -+ * omap34xxcam_poll - file operations poll handler -+ * @file: ptr. to system file structure -+ * @wait: system poll table structure -+ * -+ */ -+static unsigned int omap34xxcam_poll(struct file *file, -+ struct poll_table_struct *wait) -+{ -+ struct omap34xxcam_fh *fh = file->private_data; -+ struct omap34xxcam_videodev *vdev = fh->vdev; -+ struct videobuf_buffer *vb; -+ -+ if (file == vdev->daemon.file) { -+ unsigned long flags; -+ u32 pending; -+ -+ poll_wait(file, &vdev->daemon.poll_wait, wait); -+ -+ spin_lock_irqsave(&vdev->daemon.event_lock, flags); -+ pending = vdev->daemon.req_pending; -+ spin_unlock_irqrestore(&vdev->daemon.event_lock, flags); -+ -+ if (pending) -+ return POLLIN | POLLRDNORM; -+ else -+ return 0; -+ } -+ -+ mutex_lock(&vdev->mutex); -+ if (vdev->streaming != file) { -+ mutex_unlock(&vdev->mutex); -+ return POLLERR; -+ } -+ mutex_unlock(&vdev->mutex); -+ -+ mutex_lock(&fh->vbq.vb_lock); -+ if (list_empty(&fh->vbq.stream)) { -+ mutex_unlock(&fh->vbq.vb_lock); -+ return POLLERR; -+ } -+ vb = list_entry(fh->vbq.stream.next, struct videobuf_buffer, stream); -+ mutex_unlock(&fh->vbq.vb_lock); -+ -+ poll_wait(file, &vb->done, wait); -+ -+ if (vb->state == VIDEOBUF_DONE || vb->state == VIDEOBUF_ERROR) -+ return POLLIN | POLLRDNORM; -+ -+ return 0; -+} -+ -+/** -+ * omap34xxcam_mmap - file operations mmap handler -+ * @file: ptr. to system file structure -+ * @vma: system virt. mem. area structure -+ * -+ * Maps a virtual memory area via the video buffer API -+ */ -+static int omap34xxcam_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ struct omap34xxcam_fh *fh = file->private_data; -+ return videobuf_mmap_mapper(&fh->vbq, vma); -+} -+ -+/** -+ * omap34xxcam_open - file operations open handler -+ * @inode: ptr. to system inode structure -+ * @file: ptr. to system file structure -+ * -+ * Allocates and initializes the per-filehandle data (omap34xxcam_fh), -+ * enables the sensor, opens/initializes the ISP interface and the -+ * video buffer queue. Note that this function will allow multiple -+ * file handles to be open simultaneously, however only the first -+ * handle opened will initialize the ISP. It is the application -+ * responsibility to only use one handle for streaming and the others -+ * for control only. -+ * This function returns 0 upon success and -ENODEV upon error. -+ */ -+static int omap34xxcam_open(struct inode *inode, struct file *file) -+{ -+ int rval = 0; -+ struct omap34xxcam_videodev *vdev = NULL; -+ struct omap34xxcam_device *cam = omap34xxcam; -+ struct device *isp; -+ struct omap34xxcam_fh *fh; -+ struct v4l2_format sensor_format; -+ int first_user = 0; -+ int i; -+ -+ for (i = 0; i < OMAP34XXCAM_VIDEODEVS; i++) { -+ if (cam->vdevs[i].vfd -+ && cam->vdevs[i].vfd->minor == -+ iminor(file->f_dentry->d_inode)) { -+ vdev = &cam->vdevs[i]; -+ break; -+ } -+ } -+ -+ if (!vdev || !vdev->vfd) -+ return -ENODEV; -+ -+ fh = kzalloc(sizeof(*fh), GFP_KERNEL); -+ if (fh == NULL) -+ return -ENOMEM; -+ -+ fh->vdev = vdev; -+ -+ mutex_lock(&vdev->mutex); -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) { -+ if (vdev->slave[i] != v4l2_int_device_dummy() -+ && !try_module_get(vdev->slave[i]->module)) { -+ mutex_unlock(&vdev->mutex); -+ dev_err(&vdev->vfd->dev, "can't try_module_get %s\n", -+ vdev->slave[i]->name); -+ rval = -ENODEV; -+ goto out_try_module_get; -+ } -+ } -+ -+ if (atomic_inc_return(&vdev->users) == 1) { -+ first_user = 1; -+ isp = isp_get(); -+ if (!isp) { -+ rval = -EBUSY; -+ dev_err(&vdev->vfd->dev, "can't get isp\n"); -+ goto out_isp_get; -+ } -+ cam->isp = isp; -+ if (omap34xxcam_slave_power_set(vdev, V4L2_POWER_STANDBY, -+ OMAP34XXCAM_SLAVE_POWER_ALL)) { -+ dev_err(&vdev->vfd->dev, "can't power up slaves\n"); -+ rval = -EBUSY; -+ goto out_slave_power_set_standby; -+ } -+ } -+ -+ if (vdev->vdev_sensor == v4l2_int_device_dummy() || !first_user) -+ goto out_no_pix; -+ -+ /* Get the format the sensor is using. */ -+ rval = vidioc_int_g_fmt_cap(vdev->vdev_sensor, &sensor_format); -+ if (rval) { -+ dev_err(&vdev->vfd->dev, -+ "can't get current pix from sensor!\n"); -+ goto out_vidioc_int_g_fmt_cap; -+ } -+ -+ if (!vdev->pix.width) -+ vdev->pix = sensor_format.fmt.pix; -+ -+ if (!vdev->vdev_sensor_config.sensor_isp) { -+ struct v4l2_pix_format pix; -+ struct v4l2_fract timeperframe = -+ vdev->want_timeperframe; -+ -+ rval = s_pix_parm(vdev, &pix, &vdev->pix, &timeperframe); -+ if (rval) { -+ dev_err(&vdev->vfd->dev, -+ "isp doesn't like the sensor!\n"); -+ goto out_isp_s_fmt_cap; -+ } -+ } -+ -+out_no_pix: -+ mutex_unlock(&vdev->mutex); -+ -+ if (first_user && vdev->daemon.file) { -+ rval = omap34xxcam_daemon_req_hw_init(vdev); -+ if (rval) { -+ mutex_lock(&vdev->mutex); -+ goto out_slave_power_set_standby; -+ } -+ } -+ -+ file->private_data = fh; -+ -+ spin_lock_init(&fh->vbq_lock); -+ -+ videobuf_queue_sg_init(&fh->vbq, &omap34xxcam_vbq_ops, NULL, -+ &fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, -+ V4L2_FIELD_NONE, -+ sizeof(struct videobuf_buffer), fh); -+ -+ return 0; -+ -+out_isp_s_fmt_cap: -+out_vidioc_int_g_fmt_cap: -+ omap34xxcam_slave_power_set(vdev, V4L2_POWER_OFF, -+ OMAP34XXCAM_SLAVE_POWER_ALL); -+out_slave_power_set_standby: -+ isp_put(); -+ -+out_isp_get: -+ atomic_dec(&vdev->users); -+ mutex_unlock(&vdev->mutex); -+ -+out_try_module_get: -+ for (i--; i >= 0; i--) -+ if (vdev->slave[i] != v4l2_int_device_dummy()) -+ module_put(vdev->slave[i]->module); -+ -+ kfree(fh); -+ -+ return rval; -+} -+ -+/** -+ * omap34xxcam_release - file operations release handler -+ * @inode: ptr. to system inode structure -+ * @file: ptr. to system file structure -+ * -+ * Complement of omap34xxcam_open. This function will flush any scheduled -+ * work, disable the sensor, close the ISP interface, stop the -+ * video buffer queue from streaming and free the per-filehandle data -+ * (omap34xxcam_fh). Note that because multiple open file handles -+ * are allowed, this function will only close the ISP and disable the -+ * sensor when the last open file handle (by count) is closed. -+ * This function returns 0. -+ */ -+static int omap34xxcam_release(struct inode *inode, struct file *file) -+{ -+ struct omap34xxcam_fh *fh = file->private_data; -+ struct omap34xxcam_videodev *vdev = fh->vdev; -+ struct device *isp = vdev->cam->isp; -+ int i; -+ -+ if (omap34xxcam_daemon_release(vdev, file)) -+ goto daemon_out; -+ -+ mutex_lock(&vdev->mutex); -+ if (vdev->streaming == file) { -+ isp_stop(isp); -+ videobuf_streamoff(&fh->vbq); -+ omap34xxcam_slave_power_set(vdev, V4L2_POWER_STANDBY, -+ OMAP34XXCAM_SLAVE_POWER_ALL); -+ vdev->streaming = NULL; -+ } -+ -+ if (atomic_dec_return(&vdev->users) == 0) { -+ omap34xxcam_slave_power_set(vdev, V4L2_POWER_OFF, -+ OMAP34XXCAM_SLAVE_POWER_ALL); -+ isp_put(); -+ } -+ mutex_unlock(&vdev->mutex); -+ -+daemon_out: -+ file->private_data = NULL; -+ -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) -+ if (vdev->slave[i] != v4l2_int_device_dummy()) -+ module_put(vdev->slave[i]->module); -+ -+ kfree(fh); -+ -+ return 0; -+} -+ -+static struct file_operations omap34xxcam_fops = { -+ .owner = THIS_MODULE, -+ .llseek = no_llseek, -+ .unlocked_ioctl = omap34xxcam_unlocked_ioctl, -+ .poll = omap34xxcam_poll, -+ .mmap = omap34xxcam_mmap, -+ .open = omap34xxcam_open, -+ .release = omap34xxcam_release, -+}; -+ -+static void omap34xxcam_vfd_name_update(struct omap34xxcam_videodev *vdev) -+{ -+ struct video_device *vfd = vdev->vfd; -+ int i; -+ -+ strlcpy(vfd->name, CAM_SHORT_NAME, sizeof(vfd->name)); -+ for (i = 0; i <= OMAP34XXCAM_SLAVE_FLASH; i++) { -+ strlcat(vfd->name, "/", sizeof(vfd->name)); -+ if (vdev->slave[i] == v4l2_int_device_dummy()) -+ continue; -+ strlcat(vfd->name, vdev->slave[i]->name, sizeof(vfd->name)); -+ } -+ dev_dbg(&vdev->vfd->dev, "video%d is now %s\n", vfd->num, vfd->name); -+} -+ -+/** -+ * omap34xxcam_device_unregister - V4L2 detach handler -+ * @s: ptr. to standard V4L2 device information structure -+ * -+ * Detach sensor and unregister and release the video device. -+ */ -+static void omap34xxcam_device_unregister(struct v4l2_int_device *s) -+{ -+ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; -+ struct omap34xxcam_hw_config hwc; -+ -+ BUG_ON(vidioc_int_g_priv(s, &hwc) < 0); -+ -+ mutex_lock(&vdev->mutex); -+ -+ if (vdev->slave[hwc.dev_type] != v4l2_int_device_dummy()) { -+ vdev->slave[hwc.dev_type] = v4l2_int_device_dummy(); -+ vdev->slaves--; -+ omap34xxcam_vfd_name_update(vdev); -+ } -+ -+ if (vdev->slaves == 0 && vdev->vfd) { -+ if (vdev->vfd->minor == -1) { -+ /* -+ * The device was never registered, so release the -+ * video_device struct directly. -+ */ -+ video_device_release(vdev->vfd); -+ } else { -+ /* -+ * The unregister function will release the -+ * video_device struct as well as -+ * unregistering it. -+ */ -+ video_unregister_device(vdev->vfd); -+ } -+ vdev->vfd = NULL; -+ } -+ -+ mutex_unlock(&vdev->mutex); -+} -+ -+static const struct v4l2_ioctl_ops omap34xxcam_ioctl_ops = { -+ .vidioc_querycap = vidioc_querycap, -+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, -+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, -+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, -+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, -+ .vidioc_reqbufs = vidioc_reqbufs, -+ .vidioc_querybuf = vidioc_querybuf, -+ .vidioc_qbuf = vidioc_qbuf, -+ .vidioc_dqbuf = vidioc_dqbuf, -+ .vidioc_streamon = vidioc_streamon, -+ .vidioc_streamoff = vidioc_streamoff, -+ .vidioc_enum_input = vidioc_enum_input, -+ .vidioc_g_input = vidioc_g_input, -+ .vidioc_s_input = vidioc_s_input, -+ .vidioc_queryctrl = vidioc_queryctrl, -+ .vidioc_querymenu = vidioc_querymenu, -+ .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls, -+ .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls, -+ .vidioc_g_parm = vidioc_g_parm, -+ .vidioc_s_parm = vidioc_s_parm, -+ .vidioc_cropcap = vidioc_cropcap, -+ .vidioc_g_crop = vidioc_g_crop, -+ .vidioc_s_crop = vidioc_s_crop, -+ .vidioc_default = vidioc_default, -+}; -+ -+/** -+ * omap34xxcam_device_register - V4L2 attach handler -+ * @s: ptr. to standard V4L2 device information structure -+ * -+ * Allocates and initializes the V4L2 video_device structure, initializes -+ * the sensor, and finally -+ registers the device with V4L2 based on the -+ * video_device structure. -+ * -+ * Returns 0 on success, otherwise an appropriate error code on -+ * failure. -+ */ -+static int omap34xxcam_device_register(struct v4l2_int_device *s) -+{ -+ struct omap34xxcam_videodev *vdev = s->u.slave->master->priv; -+ struct omap34xxcam_hw_config hwc; -+ struct device *isp; -+ int rval; -+ -+ /* We need to check rval just once. The place is here. */ -+ if (vidioc_int_g_priv(s, &hwc)) -+ return -ENODEV; -+ -+ if (vdev->index != hwc.dev_index) -+ return -ENODEV; -+ -+ if (hwc.dev_type < 0 || hwc.dev_type > OMAP34XXCAM_SLAVE_FLASH) -+ return -EINVAL; -+ -+ if (vdev->slave[hwc.dev_type] != v4l2_int_device_dummy()) -+ return -EBUSY; -+ -+ mutex_lock(&vdev->mutex); -+ if (atomic_read(&vdev->users)) { -+ printk(KERN_ERR "%s: we're open (%d), can't register\n", -+ __func__, atomic_read(&vdev->users)); -+ mutex_unlock(&vdev->mutex); -+ return -EBUSY; -+ } -+ -+ vdev->slaves++; -+ vdev->slave[hwc.dev_type] = s; -+ vdev->slave_config[hwc.dev_type] = hwc; -+ -+ if (hwc.dev_type == OMAP34XXCAM_SLAVE_SENSOR) { -+ isp = isp_get(); -+ if (!isp) { -+ rval = -EBUSY; -+ printk(KERN_ERR "%s: can't get ISP, " -+ "sensor init failed\n", __func__); -+ goto err; -+ } -+ vdev->cam->isp = isp; -+ } -+ rval = vidioc_int_dev_init(s); -+ if (rval) -+ goto err_omap34xxcam_slave_init; -+ if (hwc.dev_type == OMAP34XXCAM_SLAVE_SENSOR) { -+ struct v4l2_format format; -+ -+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ rval = vidioc_int_g_fmt_cap(vdev->vdev_sensor, &format); -+ if (rval) -+ rval = -EBUSY; -+ -+ vdev->want_pix = format.fmt.pix; -+ } -+ omap34xxcam_slave_power_set(vdev, V4L2_POWER_OFF, 1 << hwc.dev_type); -+ if (hwc.dev_type == OMAP34XXCAM_SLAVE_SENSOR) -+ isp_put(); -+ -+ /* Are we the first slave? */ -+ if (vdev->slaves == 1) { -+ /* initialize the video_device struct */ -+ vdev->vfd = video_device_alloc(); -+ if (!vdev->vfd) { -+ printk(KERN_ERR "%s: could not allocate " -+ "video device struct\n", __func__); -+ rval = -ENOMEM; -+ goto err; -+ } -+ vdev->vfd->release = video_device_release; -+ vdev->vfd->minor = -1; -+ vdev->vfd->fops = &omap34xxcam_fops; -+ vdev->vfd->ioctl_ops = &omap34xxcam_ioctl_ops; -+ video_set_drvdata(vdev->vfd, vdev); -+ -+ if (video_register_device(vdev->vfd, VFL_TYPE_GRABBER, -+ hwc.dev_minor) < 0) { -+ printk(KERN_ERR "%s: could not register V4L device\n", -+ __func__); -+ vdev->vfd->minor = -1; -+ rval = -EBUSY; -+ goto err; -+ } -+ } -+ -+ omap34xxcam_vfd_name_update(vdev); -+ -+ mutex_unlock(&vdev->mutex); -+ -+ omap34xxcam_daemon_init(vdev); -+ -+ return 0; -+ -+err_omap34xxcam_slave_init: -+ if (hwc.dev_type == OMAP34XXCAM_SLAVE_SENSOR) -+ isp_put(); -+ -+err: -+ if (s == vdev->slave[hwc.dev_type]) { -+ vdev->slave[hwc.dev_type] = v4l2_int_device_dummy(); -+ vdev->slaves--; -+ } -+ -+ mutex_unlock(&vdev->mutex); -+ omap34xxcam_device_unregister(s); -+ -+ return rval; -+} -+ -+static struct v4l2_int_master omap34xxcam_master = { -+ .attach = omap34xxcam_device_register, -+ .detach = omap34xxcam_device_unregister, -+}; -+ -+/* -+ * -+ * Module initialisation and deinitialisation -+ * -+ */ -+ -+static void omap34xxcam_exit(void) -+{ -+ struct omap34xxcam_device *cam = omap34xxcam; -+ int i; -+ -+ if (!cam) -+ return; -+ -+ for (i = 0; i < OMAP34XXCAM_VIDEODEVS; i++) { -+ if (cam->vdevs[i].cam == NULL) -+ continue; -+ -+ v4l2_int_device_unregister(&cam->vdevs[i].master); -+ cam->vdevs[i].cam = NULL; -+ } -+ -+ omap34xxcam = NULL; -+ -+ kfree(cam); -+} -+ -+static int __init omap34xxcam_init(void) -+{ -+ struct omap34xxcam_device *cam; -+ int i; -+ -+ cam = kzalloc(sizeof(*cam), GFP_KERNEL); -+ if (!cam) { -+ printk(KERN_ERR "%s: could not allocate memory\n", __func__); -+ return -ENOMEM; -+ } -+ -+ omap34xxcam = cam; -+ -+ for (i = 0; i < OMAP34XXCAM_VIDEODEVS; i++) { -+ struct omap34xxcam_videodev *vdev = &cam->vdevs[i]; -+ struct v4l2_int_device *m = &vdev->master; -+ -+ m->module = THIS_MODULE; -+ strlcpy(m->name, CAM_NAME, sizeof(m->name)); -+ m->type = v4l2_int_type_master; -+ m->u.master = &omap34xxcam_master; -+ m->priv = vdev; -+ -+ mutex_init(&vdev->mutex); -+ vdev->index = i; -+ vdev->cam = cam; -+ vdev->vdev_sensor = -+ vdev->vdev_lens = -+ vdev->vdev_flash = v4l2_int_device_dummy(); -+ -+ if (v4l2_int_device_register(m)) -+ goto err; -+ } -+ -+ return 0; -+ -+err: -+ omap34xxcam_exit(); -+ return -ENODEV; -+} -+ -+MODULE_AUTHOR("Sakari Ailus "); -+MODULE_DESCRIPTION("OMAP34xx Video for Linux camera driver"); -+MODULE_LICENSE("GPL"); -+ -+late_initcall(omap34xxcam_init); -+module_exit(omap34xxcam_exit); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam-daemon.c linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam-daemon.c ---- linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam-daemon.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam-daemon.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,423 @@ -+/* -+ * drivers/media/video/omap/omap34xcam-daemon.c -+ * -+ * OMAP 3 camera driver daemon support. -+ * -+ * Copyright (C) 2008 Nokia Corporation. -+ * -+ * Contact: Sakari Ailus -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "isp/isp.h" -+ -+#include "omap34xxcam.h" -+ -+/* Kernel requests stuff from daemon. */ -+int omap34xxcam_daemon_req(struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_req *req, -+ struct file *file) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ unsigned long flags; -+ static int missing = 10; -+ int rval; -+ -+ if (!d->file) { -+ if (missing > 0) { -+ missing--; -+ dev_info(&vdev->vfd->dev, "%s: daemon is missing!\n", -+ __func__); -+ } -+ return 0; -+ } -+ -+ if (req->max_size > OMAP34XXCAM_DAEMON_REQ_MAX_SIZE -+ || req->size > req->max_size) -+ return -EFBIG; -+ -+ if (d->file == file) { -+ dev_info(&vdev->vfd->dev, "%s: invalid ioctl for daemon!\n", -+ __func__); -+ return -EINVAL; -+ } -+ -+ mutex_lock(&d->request_mutex); -+ d->request_state = OMAP34XXCAM_DAEMON_REQUEST_USER_START; -+ -+ spin_lock_irqsave(&d->event_lock, flags); -+ d->req_pending |= OMAP34XXCAM_DAEMON_SYNC; -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ -+ wake_up_all(&d->poll_wait); -+ -+ d->req = req; -+ up(&d->begin); -+ down(&d->finish); -+ rval = d->req_rval; -+ -+ d->request_state = OMAP34XXCAM_DAEMON_REQUEST_USER_FINISH; -+ -+ mutex_unlock(&d->request_mutex); -+ -+ return rval; -+} -+ -+/* -+ * User space requests stuff from daemon. The same as above but -+ * expects user-space pointers. -+ */ -+int omap34xxcam_daemon_req_user(struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_req *req, -+ struct file *file) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ void __user *blob_ptr; -+ int rval = 0; -+ size_t myblob_size; -+ size_t stack_alloc; -+ -+ if (!d->file) -+ return -ENOIOCTLCMD; -+ -+ if (req->max_size > OMAP34XXCAM_DAEMON_REQ_MAX_SIZE -+ || req->size > req->max_size) -+ return -EFBIG; -+ -+ if (req->max_size > OMAP34XXCAM_DAEMON_REQ_STACK_ALLOC) { -+ myblob_size = 0; -+ stack_alloc = 0; -+ } else { -+ myblob_size = req->max_size; -+ stack_alloc = 1; -+ } -+ -+ { -+ char myblob[myblob_size]; -+ void *tmp; -+ -+ if (stack_alloc) -+ tmp = myblob; -+ else { -+ tmp = vmalloc(req->size); -+ if (tmp == NULL) -+ return -ENOMEM; -+ } -+ -+ blob_ptr = req->blob; -+ req->blob = tmp; -+ -+/* printk(KERN_INFO "%s: request size %d, blob %p\n", */ -+/* __func__, req->size, req->blob); */ -+ if (copy_from_user(tmp, blob_ptr, req->size)) { -+ printk(KERN_INFO "%s: copy_from_user failed\n", -+ __func__); -+ rval = -EFAULT; -+ goto out_free; -+ } -+ -+ rval = omap34xxcam_daemon_req(vdev, req, file); -+ if (rval) { -+ printk(KERN_INFO "%s: request failed, error %d\n", -+ __func__, rval); -+ goto out_free; -+ } -+ -+ if (req->max_size > OMAP34XXCAM_DAEMON_REQ_MAX_SIZE -+ || req->size > req->max_size) { -+ rval = -EFBIG; -+ goto out_free; -+ } -+ -+ req->blob = blob_ptr; -+ if (copy_to_user(blob_ptr, tmp, req->size)) { -+ printk(KERN_INFO "%s: copy_to_user failed\n", __func__); -+ rval = -EFAULT; -+ } -+ -+ out_free: -+ if (!stack_alloc) -+ vfree(tmp); -+ -+/* printk(KERN_INFO "%s: request end\n", __func__); */ -+ } -+ return rval; -+} -+ -+/* Get an event. Only daemon calls this. */ -+int omap34xxcam_daemon_daemon_req_get_user( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *get, -+ struct file *file) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ unsigned long flags; -+ u32 pending; -+ int rval; -+ -+ mutex_lock(&d->mutex); -+ if (d->file != file) { -+ rval = -EBUSY; -+ goto out; -+ } -+ -+ spin_lock_irqsave(&d->event_lock, flags); -+ pending = d->req_pending; -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ -+/* printk(KERN_INFO "%s: pending %x\n", __func__, pending); */ -+ -+ if (pending & OMAP34XXCAM_DAEMON_SYNC) { -+ get->u.sync = 1; -+ -+ rval = omap34xxcam_daemon_daemon_req_sync(vdev, get); -+ if (!rval) { -+ spin_lock_irqsave(&d->event_lock, flags); -+ d->req_pending &= ~OMAP34XXCAM_DAEMON_SYNC; -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ } -+ } else if (pending & OMAP34XXCAM_DAEMON_ASYNC) { -+ get->u.sync = 0; -+ -+ rval = omap34xxcam_daemon_daemon_req_async(vdev, get); -+ if (!rval) { -+ spin_lock_irqsave(&d->event_lock, flags); -+ d->req_pending &= ~OMAP34XXCAM_DAEMON_ASYNC; -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ } -+ } else { -+ rval = -EINVAL; -+ } -+ -+out: -+ mutex_unlock(&d->mutex); -+ return rval; -+} -+ -+/* Complete an event. Only daemon calls this. */ -+int omap34xxcam_daemon_daemon_req_complete_user( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *complete, -+ struct file *file) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ int rval = 0; -+ -+ mutex_lock(&d->mutex); -+ if (d->file != file) { -+ rval = -EBUSY; -+ goto out; -+ } -+ -+ complete->u.rval = d->req_rval; -+/* printk(KERN_INFO "%s: reqest rval %d\n", __func__, d->req_rval); */ -+ -+ if (!d->req) { -+ rval = -EINVAL; -+ goto out; -+ } -+ -+ if (d->req->max_size < complete->req.size) { -+ d->req_rval = -EFBIG; -+ rval = -EFBIG; -+ goto out_up; -+ } -+ -+ d->req->size = complete->req.size; -+ -+ if (copy_from_user(d->req->blob, complete->req.blob, -+ d->req->size)) { -+ printk(KERN_INFO "%s: copy_from_user failed\n", __func__); -+ d->req_rval = -EINVAL; -+ rval = -EFAULT; -+ goto out_up; -+ } -+ -+out_up: -+/* d->req_rval = complete->u.rval; */ -+ d->request_state = OMAP34XXCAM_DAEMON_REQUEST_DAEMON_FINISH; -+ up(&d->finish); -+ -+out: -+ mutex_unlock(&d->mutex); -+ return 0; -+} -+ -+void omap34xxcam_daemon_init(struct omap34xxcam_videodev *vdev) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ -+ mutex_init(&d->mutex); -+ mutex_init(&d->request_mutex); -+ init_waitqueue_head(&d->poll_wait); -+ sema_init(&d->begin, 0); -+ sema_init(&d->finish, 0); -+ spin_lock_init(&d->event_lock); -+} -+ -+int omap34xxcam_daemon_install(struct file *file) -+{ -+ struct omap34xxcam_fh *fh = file->private_data; -+ struct omap34xxcam_videodev *vdev = fh->vdev; -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ int rval = 0; -+ -+/* if (!capable(CAP_SYS_ADMIN)) */ -+/* return -EPERM; */ -+ -+ mutex_lock(&vdev->mutex); -+ mutex_lock(&d->mutex); -+ -+ if (d->file) { -+ mutex_unlock(&d->mutex); -+ mutex_unlock(&vdev->mutex); -+ return -EBUSY; -+ } -+ -+ d->file = file; -+ -+ mutex_unlock(&d->mutex); -+ -+ /* Drop us from use count, except the modules. */ -+ if (atomic_dec_return(&vdev->users) == 0) { -+ omap34xxcam_slave_power_set(vdev, V4L2_POWER_OFF, -+ OMAP34XXCAM_SLAVE_POWER_ALL); -+ isp_put(); -+ } -+ mutex_unlock(&vdev->mutex); -+ -+ return rval; -+} -+ -+int omap34xxcam_daemon_release(struct omap34xxcam_videodev *vdev, -+ struct file *file) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ -+ if (d->file != file) -+ return 0; -+ -+ mutex_lock(&d->mutex); -+ -+ /* printk(KERN_ALERT "%s: state %d\n", __func__, -+ * d->request_state); */ -+ switch (d->request_state) { -+ case OMAP34XXCAM_DAEMON_REQUEST_USER_START: -+ down(&d->begin); -+ case OMAP34XXCAM_DAEMON_REQUEST_DAEMON_START: -+ d->req_rval = -EBUSY; -+ d->request_state = -+ OMAP34XXCAM_DAEMON_REQUEST_DAEMON_FINISH; -+ up(&d->finish); -+ d->request_state = -+ OMAP34XXCAM_DAEMON_REQUEST_DAEMON_FINISH; -+ break; -+ case OMAP34XXCAM_DAEMON_REQUEST_DAEMON_FINISH: -+ break; -+ case OMAP34XXCAM_DAEMON_REQUEST_USER_FINISH: -+ break; -+ } -+ d->file = NULL; -+ -+ mutex_unlock(&d->mutex); -+ -+ return 1; -+} -+ -+void omap34xxcam_daemon_event_cb(unsigned long status, int (*arg1) -+ (struct videobuf_buffer *vb), void *arg2) -+{ -+ struct omap34xxcam_videodev *vdev = -+ (struct omap34xxcam_videodev *)arg1; -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ struct timeval stamp; -+ unsigned long flags; -+ u32 event = 0; -+ -+ if (status & HIST_DONE) -+ event |= OMAP34XXCAM_DAEMON_EVENT_HIST_DONE; -+ if (status & H3A_AWB_DONE) -+ event |= OMAP34XXCAM_DAEMON_EVENT_H3A_AWB_DONE; -+ if (status & H3A_AF_DONE) -+ event |= OMAP34XXCAM_DAEMON_EVENT_H3A_AF_DONE; -+ if (status & HS_VS) -+ event |= OMAP34XXCAM_DAEMON_EVENT_HS_VS; -+ -+ spin_lock_irqsave(&d->event_lock, flags); -+ -+ event &= d->event_mask; -+ if (!event) { -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ return; -+ } -+ -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ -+ /* Enable interrupts during do_gettimeofday */ -+ do_gettimeofday(&stamp); -+ -+ spin_lock_irqsave(&d->event_lock, flags); -+ -+ if (event & OMAP34XXCAM_DAEMON_EVENT_HIST_DONE) -+ d->event.hist_done_stamp = stamp; -+ if (event & OMAP34XXCAM_DAEMON_EVENT_H3A_AWB_DONE) -+ d->event.h3a_awb_done_stamp = stamp; -+ if (event & OMAP34XXCAM_DAEMON_EVENT_H3A_AF_DONE) -+ d->event.h3a_af_done_stamp = stamp; -+ if (event & OMAP34XXCAM_DAEMON_EVENT_HS_VS) -+ d->event.hs_vs_stamp = stamp; -+ -+ d->event.mask |= event; -+ -+ if (d->event.mask) { -+ d->req_pending |= OMAP34XXCAM_DAEMON_ASYNC; -+ wake_up_all(&d->poll_wait); -+ } -+ -+ spin_unlock_irqrestore(&d->event_lock, flags); -+} -+ -+int omap34xxcam_daemon_set_events(struct omap34xxcam_videodev *vdev, u32 *mask, -+ struct file *file) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ unsigned long flags; -+ int rval = 0; -+ -+ mutex_lock(&d->mutex); -+ -+ if (d->file != file) { -+ rval = -EBUSY; -+ goto out; -+ } -+ -+ spin_lock_irqsave(&d->event_lock, flags); -+ d->event_mask = *mask; -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ -+out: -+ mutex_unlock(&d->mutex); -+ -+ return rval; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam-daemon-req.c linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam-daemon-req.c ---- linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam-daemon-req.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam-daemon-req.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,126 @@ -+/* -+ * drivers/media/video/omap/omap34xcam-daemon-req.c -+ * -+ * OMAP 3 camera driver daemon support. -+ * -+ * Copyright (C) 2008 Nokia Corporation. -+ * -+ * Contact: Sakari Ailus -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "isp/isp.h" -+ -+#include "omap34xxcam.h" -+ -+/* -+ * request handlers for specific request --- to be called from -+ * application context -+ */ -+ -+int omap34xxcam_daemon_req_hw_init(struct omap34xxcam_videodev *vdev) -+{ -+ struct omap34xxcam_daemon_req req; -+ -+ req.size = req.max_size = 0; -+ req.type = OMAP34XXCAM_DAEMON_REQ_HW_INIT; -+ req.blob = NULL; -+ -+ return omap34xxcam_daemon_req(vdev, &req, NULL); -+} -+ -+int omap34xxcam_daemon_req_hw_reconfig(struct omap34xxcam_videodev *vdev, -+ u32 what) -+{ -+ struct omap34xxcam_daemon_req req; -+ struct omap34xxcam_daemon_req_hw_reconfig hw_reconfig; -+ -+ req.size = req.max_size = sizeof(hw_reconfig); -+ req.type = OMAP34XXCAM_DAEMON_REQ_HW_RECONFIG; -+ req.blob = &hw_reconfig; -+ -+ hw_reconfig.mask = what; -+ -+ return omap34xxcam_daemon_req(vdev, &req, NULL); -+} -+ -+/* request handlers --- to be called from daemon context */ -+ -+/* Any synchronous request. */ -+int omap34xxcam_daemon_daemon_req_sync( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *get) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ -+ if (down_interruptible(&d->begin)) -+ return -ERESTARTSYS; -+ -+ d->request_state = OMAP34XXCAM_DAEMON_REQUEST_DAEMON_START; -+ -+ if (get->req.max_size < d->req->size) { -+ d->req_rval = -E2BIG; -+ up(&d->finish); -+ return -E2BIG; -+ } -+ get->req.size = d->req->size; -+ get->req.type = d->req->type; -+ -+/* printk(KERN_INFO "%s: size %d\n", __func__, get->req.size); */ -+/* printk(KERN_INFO "%s: maximum size %d\n", */ -+/* __func__, get->req.max_size); */ -+/* printk(KERN_INFO "%s: blob %p\n",__func__,get->req.blob); */ -+ -+ if (copy_to_user(get->req.blob, d->req->blob, d->req->size)) { -+ printk(KERN_INFO "%s: copy_to_user failed\n", __func__); -+ d->req_rval = -EINVAL; -+ up(&d->finish); -+ return -EFAULT; -+ } -+ -+ return 0; -+} -+ -+/* The only async request is to get ISP events. */ -+int omap34xxcam_daemon_daemon_req_async( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *get) -+{ -+ struct omap34xxcam_daemon *d = &vdev->daemon; -+ unsigned long flags; -+ int rval = 0; -+ -+ if (get->req.max_size < sizeof(d->event)) -+ return -E2BIG; -+ -+ get->req.size = sizeof(d->event); -+ get->req.type = OMAP34XXCAM_DAEMON_REQ_EVENTS; -+ -+ spin_lock_irqsave(&d->event_lock, flags); -+ if (copy_to_user(get->req.blob, &d->event, sizeof(d->event))) -+ rval = -EFAULT; -+ spin_unlock_irqrestore(&d->event_lock, flags); -+ -+ return rval; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam.h linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam.h ---- linux-omap-2.6.28-omap1/drivers/media/video/omap34xxcam.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/omap34xxcam.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,181 @@ -+/* -+ * omap34xxcam.h -+ * -+ * Copyright (C) 2006--2009 Nokia Corporation -+ * Copyright (C) 2007--2009 Texas Instruments -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * Originally based on the OMAP 2 camera driver. -+ * -+ * Written by Sakari Ailus -+ * Tuukka Toivonen -+ * Sergio Aguirre -+ * Mohit Jalori -+ * Sameer Venkatraman -+ * Leonides Martinez -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef OMAP34XXCAM_H -+#define OMAP34XXCAM_H -+ -+#include -+#include -+#include "isp/isp.h" -+ -+#define CAM_NAME "omap34xxcam" -+#define CAM_SHORT_NAME "omap3" -+ -+#define OMAP34XXCAM_XCLK_NONE -1 -+#define OMAP34XXCAM_XCLK_A 0 -+#define OMAP34XXCAM_XCLK_B 1 -+ -+#define OMAP34XXCAM_SLAVE_SENSOR 0 -+#define OMAP34XXCAM_SLAVE_LENS 1 -+#define OMAP34XXCAM_SLAVE_FLASH 2 /* This is the last slave! */ -+ -+/* mask for omap34xxcam_slave_power_set */ -+#define OMAP34XXCAM_SLAVE_POWER_SENSOR (1 << OMAP34XXCAM_SLAVE_SENSOR) -+#define OMAP34XXCAM_SLAVE_POWER_LENS (1 << OMAP34XXCAM_SLAVE_LENS) -+#define OMAP34XXCAM_SLAVE_POWER_SENSOR_LENS \ -+ (OMAP34XXCAM_SLAVE_POWER_SENSOR | OMAP34XXCAM_SLAVE_POWER_LENS) -+#define OMAP34XXCAM_SLAVE_POWER_FLASH (1 << OMAP34XXCAM_SLAVE_FLASH) -+#define OMAP34XXCAM_SLAVE_POWER_ALL -1 -+ -+#define OMAP34XXCAM_VIDEODEVS 4 -+ -+struct omap34xxcam_device; -+struct omap34xxcam_videodev; -+ -+/** -+ * struct omap34xxcam_sensor_config - struct for vidioc_int_g_priv ioctl -+ * @sensor_isp: Is sensor smart/SOC or raw -+ * @capture_mem: Size limit to mmap buffers. -+ * @ival_default: Default frame interval for sensor. -+ */ -+struct omap34xxcam_sensor_config { -+ int sensor_isp; -+ u32 capture_mem; -+ struct v4l2_fract ival_default; -+}; -+ -+struct omap34xxcam_lens_config { -+}; -+ -+struct omap34xxcam_flash_config { -+}; -+ -+struct omap34xxcam_hw_config { -+ int dev_index; /* Index in omap34xxcam_sensors */ -+ int dev_minor; /* Video device minor number */ -+ int dev_type; /* OMAP34XXCAM_SLAVE_* */ -+ union { -+ struct omap34xxcam_sensor_config sensor; -+ struct omap34xxcam_lens_config lens; -+ struct omap34xxcam_flash_config flash; -+ } u; -+}; -+ -+/** -+ * struct omap34xxcam_videodev - per /dev/video* structure -+ * @mutex: serialises access to this structure -+ * @cam: pointer to cam hw structure -+ * @master: we are v4l2_int_device master -+ * @sensor: sensor device -+ * @lens: lens device -+ * @flash: flash device -+ * @slaves: how many slaves we have at the moment -+ * @vfd: our video device -+ * @index: index of this structure in cam->vdevs -+ * @users: how many users we have -+ * @power_state: Current power state -+ * @power_state_wish: New power state when poweroff_timer expires -+ * @power_state_mask: Bitmask of devices to set the new power state -+ * @poweroff_timer: Timer for dispatching poweroff_work -+ * @poweroff_work: Work for slave power state change -+ * @sensor_config: ISP-speicific sensor configuration -+ * @lens_config: ISP-speicific lens configuration -+ * @flash_config: ISP-speicific flash configuration -+ * @streaming: streaming file handle, if streaming is enabled -+ * @want_timeperframe: Desired timeperframe -+ * @want_pix: Desired pix -+ * @pix: Current pix -+ */ -+struct omap34xxcam_videodev { -+ struct mutex mutex; /* serialises access to this structure */ -+ -+ struct omap34xxcam_device *cam; -+ struct v4l2_int_device master; -+ -+#define vdev_sensor slave[OMAP34XXCAM_SLAVE_SENSOR] -+#define vdev_lens slave[OMAP34XXCAM_SLAVE_LENS] -+#define vdev_flash slave[OMAP34XXCAM_SLAVE_FLASH] -+ struct v4l2_int_device *slave[OMAP34XXCAM_SLAVE_FLASH + 1]; -+ -+ struct omap34xxcam_daemon daemon; -+ -+ /* number of slaves attached */ -+ int slaves; -+ -+ /*** video device parameters ***/ -+ struct video_device *vfd; -+ -+ /*** general driver state information ***/ -+ int index; -+ atomic_t users; -+ enum v4l2_power power_state[OMAP34XXCAM_SLAVE_FLASH + 1]; -+#define vdev_sensor_config slave_config[OMAP34XXCAM_SLAVE_SENSOR].u.sensor -+#define vdev_lens_config slave_config[OMAP34XXCAM_SLAVE_LENS].u.lens -+#define vdev_flash_config slave_config[OMAP34XXCAM_SLAVE_FLASH].u.flash -+ struct omap34xxcam_hw_config slave_config[OMAP34XXCAM_SLAVE_FLASH + 1]; -+ -+ /*** capture data ***/ -+ struct file *streaming; -+ struct v4l2_fract want_timeperframe; -+ struct v4l2_pix_format want_pix; -+ struct v4l2_pix_format pix; -+}; -+ -+/** -+ * struct omap34xxcam_device - per-device data structure -+ * @vdevs: /dev/video specific structures -+ */ -+struct omap34xxcam_device { -+ struct omap34xxcam_videodev vdevs[OMAP34XXCAM_VIDEODEVS]; -+ struct device *isp; -+}; -+ -+/** -+ * struct omap34xxcam_fh - per-filehandle data structure -+ * @vbq_lock: spinlock for the videobuf queue -+ * @vbq: V4L2 video buffer queue structure -+ * @field_count: field counter for videobuf_buffer -+ * @vdev: our /dev/video specific structure -+ */ -+struct omap34xxcam_fh { -+ spinlock_t vbq_lock; /* spinlock for the videobuf queue */ -+ struct videobuf_queue vbq; -+ atomic_t field_count; -+ struct omap34xxcam_videodev *vdev; -+}; -+ -+int omap34xxcam_slave_power_set(struct omap34xxcam_videodev *vdev, -+ enum v4l2_power power, int mask); -+ -+#endif /* ifndef OMAP34XXCAM_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/smiaregs.c linux-omap-2.6.28-nokia1/drivers/media/video/smiaregs.c ---- linux-omap-2.6.28-omap1/drivers/media/video/smiaregs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/smiaregs.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,718 @@ -+/* -+ * drivers/media/video/smiaregs.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * -+ * Video control helpers -+ * -+ */ -+ -+int smia_ctrl_find(struct v4l2_queryctrl *ctrls, size_t nctrls, int id) -+{ -+ size_t i; -+ -+ for (i = 0; i < nctrls; i++) -+ if (ctrls[i].id == id) -+ break; -+ -+ if (i == nctrls) -+ i = -EINVAL; -+ -+ return i; -+} -+EXPORT_SYMBOL_GPL(smia_ctrl_find); -+ -+int smia_ctrl_find_next(struct v4l2_queryctrl *ctrls, size_t nctrls, int id) -+{ -+ int i; -+ u32 best = (u32)-1; -+ -+ for (i = 0; i < nctrls; i++) -+ if (ctrls[i].id > id -+ && (best == (u32)-1 || ctrls[i].id < ctrls[best].id)) -+ best = i; -+ -+ if (best == (u32)-1) -+ return -EINVAL; -+ -+ return best; -+} -+EXPORT_SYMBOL_GPL(smia_ctrl_find_next); -+ -+int smia_ctrl_query(struct v4l2_queryctrl *ctrls, size_t nctrls, -+ struct v4l2_queryctrl *a) -+{ -+ int id, i; -+ -+ id = a->id; -+ if (id & V4L2_CTRL_FLAG_NEXT_CTRL) { -+ id &= ~V4L2_CTRL_FLAG_NEXT_CTRL; -+ i = smia_ctrl_find_next(ctrls, nctrls, id); -+ } else { -+ i = smia_ctrl_find(ctrls, nctrls, id); -+ } -+ -+ if (i < 0) -+ return -EINVAL; -+ -+ *a = ctrls[i]; -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(smia_ctrl_query); -+ -+int smia_mode_query(const __u32 *ctrls, size_t nctrls, struct v4l2_queryctrl *a) -+{ -+ static const struct { -+ __u32 id; -+ char *name; -+ } ctrl[] = { -+ { .id = V4L2_CID_MODE_FRAME_WIDTH, .name = "Frame width" }, -+ { .id = V4L2_CID_MODE_FRAME_HEIGHT, .name = "Frame height" }, -+ { .id = V4L2_CID_MODE_VISIBLE_WIDTH, .name = "Visible width" }, -+ { .id = V4L2_CID_MODE_VISIBLE_HEIGHT, -+ .name = "Visible height" }, -+ { .id = V4L2_CID_MODE_PIXELCLOCK, -+ .name = "Pixel clock [Hz]" }, -+ { .id = V4L2_CID_MODE_SENSITIVITY, .name = "Sensitivity" }, -+ }; -+ int id, next = 0, i; -+ -+ id = a->id; -+ if (id & V4L2_CTRL_FLAG_NEXT_CTRL) { -+ id &= ~V4L2_CTRL_FLAG_NEXT_CTRL; -+ next = 1; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(ctrl); i++) { -+ if ((!next && ctrl[i].id == id) || -+ (next && ctrl[i].id > id)) { -+ int j; -+ for (j = 0; j < nctrls; j++) -+ if (ctrl[i].id == ctrls[j]) -+ goto found; -+ } -+ } -+ return -EINVAL; -+ -+found: -+ a->id = ctrl[i].id; -+ strcpy(a->name, ctrl[i].name); -+ a->type = V4L2_CTRL_TYPE_INTEGER; -+ a->minimum = 0; -+ a->maximum = 0; -+ a->step = 0; -+ a->default_value = 0; -+ a->flags = V4L2_CTRL_FLAG_READ_ONLY; -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(smia_mode_query); -+ -+int smia_mode_g_ctrl(const __u32 *ctrls, size_t nctrls, struct v4l2_control *vc, -+ const struct smia_mode *sm) -+{ -+ int i; -+ -+ for (i = 0; i < nctrls; i++) -+ if (ctrls[i] == vc->id) -+ break; -+ if (i >= nctrls) -+ return -EINVAL; -+ -+ switch (vc->id) { -+ case V4L2_CID_MODE_FRAME_WIDTH: -+ vc->value = sm->width; -+ break; -+ case V4L2_CID_MODE_FRAME_HEIGHT: -+ vc->value = sm->height; -+ break; -+ case V4L2_CID_MODE_VISIBLE_WIDTH: -+ vc->value = sm->window_width; -+ break; -+ case V4L2_CID_MODE_VISIBLE_HEIGHT: -+ vc->value = sm->window_height; -+ break; -+ case V4L2_CID_MODE_PIXELCLOCK: -+ vc->value = sm->pixel_clock; -+ break; -+ case V4L2_CID_MODE_SENSITIVITY: -+ vc->value = sm->sensitivity; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(smia_mode_g_ctrl); -+ -+/* -+ * -+ * Reglist helpers -+ * -+ */ -+ -+static int smia_reglist_cmp(const void *a, const void *b) -+{ -+ const struct smia_reglist **list1 = (const struct smia_reglist **)a, -+ **list2 = (const struct smia_reglist **)b; -+ -+ /* Put real modes in the beginning. */ -+ if ((*list1)->type == SMIA_REGLIST_MODE && -+ (*list2)->type != SMIA_REGLIST_MODE) -+ return -1; -+ else if ((*list1)->type != SMIA_REGLIST_MODE && -+ (*list2)->type == SMIA_REGLIST_MODE) -+ return 1; -+ -+ /* Descending width. */ -+ if ((*list1)->mode.window_width > (*list2)->mode.window_width) -+ return -1; -+ else if ((*list1)->mode.window_width < (*list2)->mode.window_width) -+ return 1; -+ else -+ return 0; -+} -+ -+/* -+ * Prepare register list created by dcc-pulautin for use in kernel. -+ * The pointers in the list are actually offsets from the beginning of -+ * the blob. -+ */ -+int smia_reglist_import(struct smia_meta_reglist *meta) -+{ -+ uintptr_t nlists = 0; -+ -+ if (meta->magic != SMIA_MAGIC) { -+ printk(KERN_ERR "invalid camera sensor firmware (0x%08X)\n", -+ meta->magic); -+ return -EILSEQ; -+ } -+ -+ printk(KERN_ALERT "%s: meta_reglist version %s\n", -+ __func__, meta->version); -+ -+ while (meta->reglist[nlists].offset != 0) { -+ struct smia_reglist *list; -+ -+ meta->reglist[nlists].offset = -+ (uintptr_t)meta + meta->reglist[nlists].offset; -+ -+ list = meta->reglist[nlists].ptr; -+ -+ nlists++; -+ } -+ -+ if (!nlists) -+ return -EINVAL; -+ -+ sort(&meta->reglist[0].offset, nlists, sizeof(meta->reglist[0].offset), -+ smia_reglist_cmp, NULL); -+ -+ nlists = 0; -+ while (meta->reglist[nlists].offset != 0) { -+ struct smia_reglist *list; -+ -+ list = meta->reglist[nlists].ptr; -+ -+ printk(KERN_DEBUG -+ "%s: type %d\tw %d\th %d\tfmt %x\tival %d/%d\tptr %p\n", -+ __func__, -+ list->type, -+ list->mode.window_width, list->mode.window_height, -+ list->mode.pixel_format, -+ list->mode.timeperframe.numerator, -+ list->mode.timeperframe.denominator, -+ (void *)meta->reglist[nlists].offset); -+ -+ nlists++; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_import); -+ -+struct smia_reglist *smia_reglist_find_type(struct smia_meta_reglist *meta, -+ u16 type) -+{ -+ struct smia_reglist **next = &meta->reglist[0].ptr; -+ -+ while (*next) { -+ if ((*next)->type == type) -+ return *next; -+ -+ next++; -+ } -+ -+ return NULL; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_find_type); -+ -+struct smia_reglist **smia_reglist_first(struct smia_meta_reglist *meta) -+{ -+ return &meta->reglist[0].ptr; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_first); -+ -+struct smia_reglist *smia_reglist_find_mode_fmt( -+ struct smia_meta_reglist *meta, -+ struct smia_reglist *current_reglist, -+ struct v4l2_format *f) -+{ -+ struct v4l2_pix_format *pix = &f->fmt.pix; -+ struct smia_reglist **list = smia_reglist_first(meta); -+ -+ for (; *list; list++) { -+ struct smia_mode *mode = &(*list)->mode; -+ -+ if ((*list)->type != SMIA_REGLIST_MODE) -+ continue; -+ -+ /* Ignore modes that do cropping on sensor */ -+ if (mode->sensor_window_width + 8 < mode->sensor_width || -+ mode->sensor_window_height + 8 < mode->sensor_height) -+ continue; -+ -+ if (mode->window_width == pix->width && -+ mode->window_height == pix->height) -+ return *list; -+ } -+ -+ return NULL; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_find_mode_fmt); -+ -+#define TIMEPERFRAME_AVG_FPS(t) \ -+ (((t).denominator + ((t).numerator >> 1)) / (t).numerator) -+struct smia_reglist *smia_reglist_find_mode_streamparm( -+ struct smia_meta_reglist *meta, -+ struct smia_reglist *current_reglist, -+ struct v4l2_streamparm *a) -+{ -+ struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe; -+ int fps = TIMEPERFRAME_AVG_FPS(*timeperframe); -+ struct smia_reglist **list = smia_reglist_first(meta); -+ struct smia_mode *current_mode = ¤t_reglist->mode; -+ -+ for (; *list; list++) { -+ struct smia_mode *mode = &(*list)->mode; -+ -+ if ((*list)->type != SMIA_REGLIST_MODE) -+ continue; -+ -+ if (mode->window_width != current_mode->window_width -+ || mode->window_height != current_mode->window_height) -+ continue; -+ -+ /* Ignore modes that do cropping on sensor */ -+ if (mode->sensor_window_width + 8 < mode->sensor_width || -+ mode->sensor_window_height + 8 < mode->sensor_height) -+ continue; -+ -+ if (TIMEPERFRAME_AVG_FPS(mode->timeperframe) == fps) -+ return *list; -+ } -+ -+ return NULL; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_find_mode_streamparm); -+ -+#define MAX_FMTS 4 -+int smia_reglist_enum_fmt(struct smia_meta_reglist *meta, -+ struct v4l2_fmtdesc *f) -+{ -+ struct smia_reglist **list = smia_reglist_first(meta); -+ u32 pixelformat[MAX_FMTS]; -+ int npixelformat = 0; -+ -+ if (f->index >= MAX_FMTS) -+ return -EINVAL; -+ -+ for (; *list; list++) { -+ struct smia_mode *mode = &(*list)->mode; -+ int i; -+ -+ if ((*list)->type != SMIA_REGLIST_MODE) -+ continue; -+ -+ for (i = 0; i < npixelformat; i++) { -+ if (pixelformat[i] == mode->pixel_format) -+ break; -+ } -+ if (i != npixelformat) -+ continue; -+ -+ if (f->index == npixelformat) { -+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ f->pixelformat = mode->pixel_format; -+ -+ return 0; -+ } -+ -+ pixelformat[npixelformat] = mode->pixel_format; -+ npixelformat++; -+ } -+ -+ return -EINVAL; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_enum_fmt); -+ -+int smia_reglist_enum_framesizes(struct smia_meta_reglist *meta, -+ struct v4l2_frmsizeenum *frm) -+{ -+ struct smia_reglist **list = smia_reglist_first(meta); -+ int frm_index = frm->index; -+ int width_low = INT_MAX; -+ -+ for (; *list; list++) { -+ struct smia_mode *mode = &(*list)->mode; -+ -+ if ((*list)->type != SMIA_REGLIST_MODE) -+ continue; -+ -+ /* Ignore modes that do cropping on sensor */ -+ if (mode->sensor_window_width + 8 < mode->sensor_width || -+ mode->sensor_window_height + 8 < mode->sensor_height) -+ continue; -+ -+ if (frm->pixel_format != mode->pixel_format -+ && frm->pixel_format != -1) -+ continue; -+ -+ /* -+ * Assume that the modes are in descending width -+ * ordered. -+ */ -+ if (mode->window_width >= width_low) -+ continue; -+ -+ width_low = mode->window_width; -+ -+ if (frm_index-- == 0) { -+ frm->type = V4L2_FRMSIZE_TYPE_DISCRETE; -+ frm->discrete.width = mode->window_width; -+ frm->discrete.height = mode->window_height; -+ -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_enum_framesizes); -+ -+int smia_reglist_enum_frameintervals(struct smia_meta_reglist *meta, -+ struct v4l2_frmivalenum *frm) -+{ -+ struct smia_reglist **list = smia_reglist_first(meta); -+ int frm_index = frm->index; -+ -+ /* FIXME: check pixelformat! */ -+ -+ for (; *list; list++) { -+ struct smia_mode *mode = &(*list)->mode; -+ -+ if ((*list)->type != SMIA_REGLIST_MODE) -+ continue; -+ -+ /* Ignore modes that do cropping on sensor */ -+ if (mode->sensor_window_width + 8 < mode->sensor_width || -+ mode->sensor_window_height + 8 < mode->sensor_height) -+ continue; -+ -+ if (frm->pixel_format != mode->pixel_format -+ && frm->pixel_format != -1) -+ continue; -+ -+ if (frm->width != mode->window_width || -+ frm->height != mode->window_height) -+ continue; -+ -+ if (frm_index-- != 0) -+ continue; -+ -+ frm->type = V4L2_FRMIVAL_TYPE_DISCRETE; -+ /* FIXME: try to fix standard... */ -+ frm->discrete = mode->timeperframe; -+ if (frm->reserved[0] == 0xdeafbeef) { -+ frm->discrete.numerator = 1; -+ frm->discrete.denominator = -+ TIMEPERFRAME_AVG_FPS(mode->timeperframe); -+ } -+ -+ return 0; -+ } -+ -+ return -EINVAL; -+} -+EXPORT_SYMBOL_GPL(smia_reglist_enum_frameintervals); -+ -+/* -+ * -+ * Register access helpers -+ * -+ */ -+ -+/* -+ * Read a 8/16/32-bit i2c register. The value is returned in 'val'. -+ * Returns zero if successful, or non-zero otherwise. -+ */ -+int smia_i2c_read_reg(struct i2c_client *client, u16 data_length, -+ u16 reg, u32 *val) -+{ -+ int r; -+ struct i2c_msg msg[1]; -+ unsigned char data[4]; -+ -+ if (!client->adapter) -+ return -ENODEV; -+ if (data_length != SMIA_REG_8BIT && data_length != SMIA_REG_16BIT) -+ return -EINVAL; -+ -+ msg->addr = client->addr; -+ msg->flags = 0; -+ msg->len = 2; -+ msg->buf = data; -+ -+ /* high byte goes out first */ -+ data[0] = (u8) (reg >> 8);; -+ data[1] = (u8) (reg & 0xff); -+ r = i2c_transfer(client->adapter, msg, 1); -+ if (r < 0) -+ goto err; -+ -+ msg->len = data_length; -+ msg->flags = I2C_M_RD; -+ r = i2c_transfer(client->adapter, msg, 1); -+ if (r < 0) -+ goto err; -+ -+ *val = 0; -+ /* high byte comes first */ -+ if (data_length == SMIA_REG_8BIT) -+ *val = data[0]; -+ else -+ *val = (data[0] << 8) + data[1]; -+ -+ return 0; -+ -+err: -+ dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r); -+ -+ return r; -+} -+EXPORT_SYMBOL_GPL(smia_i2c_read_reg); -+ -+static void smia_i2c_create_msg(struct i2c_client *client, u16 len, u16 reg, -+ u32 val, struct i2c_msg *msg, -+ unsigned char *buf) -+{ -+ msg->addr = client->addr; -+ msg->flags = 0; /* Write */ -+ msg->len = 2 + len; -+ msg->buf = buf; -+ -+ /* high byte goes out first */ -+ buf[0] = (u8) (reg >> 8);; -+ buf[1] = (u8) (reg & 0xff); -+ -+ switch (len) { -+ case SMIA_REG_8BIT: -+ buf[2] = (u8) (val) & 0xff; -+ break; -+ case SMIA_REG_16BIT: -+ buf[2] = (u8) (val >> 8) & 0xff; -+ buf[3] = (u8) (val & 0xff); -+ break; -+ case SMIA_REG_32BIT: -+ buf[2] = (u8) (val >> 24) & 0xff; -+ buf[3] = (u8) (val >> 16) & 0xff; -+ buf[4] = (u8) (val >> 8) & 0xff; -+ buf[5] = (u8) (val & 0xff); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+/* -+ * Write to a 8/16-bit register. -+ * Returns zero if successful, or non-zero otherwise. -+ */ -+int smia_i2c_write_reg(struct i2c_client *client, u16 data_length, u16 reg, -+ u32 val) -+{ -+ int r; -+ struct i2c_msg msg[1]; -+ unsigned char data[6]; -+ -+ if (!client->adapter) -+ return -ENODEV; -+ if (data_length != SMIA_REG_8BIT && data_length != SMIA_REG_16BIT) -+ return -EINVAL; -+ -+ smia_i2c_create_msg(client, data_length, reg, val, msg, data); -+ -+ r = i2c_transfer(client->adapter, msg, 1); -+ if (r < 0) -+ dev_err(&client->dev, -+ "wrote 0x%x to offset 0x%x error %d\n", val, reg, r); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(smia_i2c_write_reg); -+ -+/* -+ * A buffered write method that puts the wanted register write -+ * commands in a message list and passes the list to the i2c framework -+ */ -+static int smia_i2c_buffered_write_regs(struct i2c_client *client, -+ const struct smia_reg *wnext, int cnt) -+{ -+ /* FIXME: check how big cnt is */ -+ struct i2c_msg msg[cnt]; -+ unsigned char data[cnt][6]; -+ int wcnt = 0; -+ u16 reg, data_length; -+ u32 val; -+ -+ /* Create new write messages for all writes */ -+ while (wcnt < cnt) { -+ data_length = wnext->type; -+ reg = wnext->reg; -+ val = wnext->val; -+ wnext++; -+ -+ smia_i2c_create_msg(client, data_length, reg, -+ val, &msg[wcnt], &data[wcnt][0]); -+ -+ /* Update write count */ -+ wcnt++; -+ } -+ -+ /* Now we send everything ... */ -+ return i2c_transfer(client->adapter, msg, wcnt); -+} -+ -+/* -+ * Write a list of registers to i2c device. -+ * -+ * The list of registers is terminated by SMIA_REG_TERM. -+ * Returns zero if successful, or non-zero otherwise. -+ */ -+int smia_i2c_write_regs(struct i2c_client *client, -+ const struct smia_reg reglist[]) -+{ -+ int r, cnt = 0; -+ const struct smia_reg *next, *wnext; -+ -+ if (!client->adapter) -+ return -ENODEV; -+ -+ if (reglist == NULL) -+ return -EINVAL; -+ -+ /* Initialize list pointers to the start of the list */ -+ next = wnext = reglist; -+ -+ do { -+ /* -+ * We have to go through the list to figure out how -+ * many regular writes we have in a row -+ */ -+ while (next->type != SMIA_REG_TERM -+ && next->type != SMIA_REG_DELAY) { -+ /* -+ * Here we check that the actual lenght fields -+ * are valid -+ */ -+ if (next->type != SMIA_REG_8BIT -+ && next->type != SMIA_REG_16BIT) { -+ dev_err(&client->dev, -+ "Invalid value on entry %d 0x%x\n", -+ cnt, next->type); -+ return -EINVAL; -+ } -+ -+ /* -+ * Increment count of successive writes and -+ * read pointer -+ */ -+ cnt++; -+ next++; -+ } -+ -+ /* Now we start writing ... */ -+ r = smia_i2c_buffered_write_regs(client, wnext, cnt); -+ -+ /* ... and then check that everything was OK */ -+ if (r < 0) { -+ dev_err(&client->dev, "i2c transfer error !!!\n"); -+ return r; -+ } -+ -+ /* -+ * If we ran into a sleep statement when going through -+ * the list, this is where we snooze for the required time -+ */ -+ if (next->type == SMIA_REG_DELAY) { -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ schedule_timeout(msecs_to_jiffies(next->val)); -+ /* -+ * ZZZ ... -+ * Update list pointers and cnt and start over ... -+ */ -+ next++; -+ wnext = next; -+ cnt = 0; -+ } -+ } while (next->type != SMIA_REG_TERM); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(smia_i2c_write_regs); -+ -+int smia_i2c_reglist_find_write(struct i2c_client *client, -+ struct smia_meta_reglist *meta, u16 type) -+{ -+ struct smia_reglist *reglist; -+ -+ reglist = smia_reglist_find_type(meta, type); -+ if (IS_ERR(reglist)) -+ return PTR_ERR(reglist); -+ -+ return smia_i2c_write_regs(client, reglist->regs); -+} -+EXPORT_SYMBOL_GPL(smia_i2c_reglist_find_write); -+ -+MODULE_AUTHOR("Sakari Ailus "); -+MODULE_DESCRIPTION("Generic SMIA configuration and i2c register access"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/smia-sensor.c linux-omap-2.6.28-nokia1/drivers/media/video/smia-sensor.c ---- linux-omap-2.6.28-omap1/drivers/media/video/smia-sensor.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/smia-sensor.c 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,1060 @@ -+/* -+ * drivers/media/video/smia-sensor.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Tuukka Toivonen -+ * -+ * Based on code from Toni Leinonen -+ * and Sakari Ailus . -+ * -+ * This driver is based on the Micron MT9T012 camera imager driver -+ * (C) Texas Instruments and Toshiba ET8EK8 driver (C) Nokia. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "smia-sensor.h" -+ -+#define DEFAULT_XCLK 9600000 /* [Hz] */ -+ -+#define SMIA_CTRL_GAIN 0 -+#define SMIA_CTRL_EXPOSURE 1 -+#define SMIA_NCTRLS 2 -+ -+#define CID_TO_CTRL(id) ((id) == V4L2_CID_GAIN ? SMIA_CTRL_GAIN : \ -+ (id) == V4L2_CID_EXPOSURE ? \ -+ SMIA_CTRL_EXPOSURE : \ -+ -EINVAL) -+ -+#define VS6555_RESET_SHIFT_HACK 1 -+ -+/* Register definitions */ -+ -+/* Status registers */ -+#define REG_MODEL_ID 0x0000 -+#define REG_REVISION_NUMBER 0x0002 -+#define REG_MANUFACTURER_ID 0x0003 -+#define REG_SMIA_VERSION 0x0004 -+ -+/* Exposure time and gain registers */ -+#define REG_FINE_EXPOSURE 0x0200 -+#define REG_COARSE_EXPOSURE 0x0202 -+#define REG_ANALOG_GAIN 0x0204 -+ -+struct smia_sensor; -+ -+struct smia_sensor_type { -+ u8 manufacturer_id; -+ u16 model_id; -+ char *name; -+ int ev_table_size; -+ u16 *ev_table; -+}; -+ -+/* Current values for V4L2 controls */ -+struct smia_control { -+ s32 minimum; -+ s32 maximum; -+ s32 step; -+ s32 default_value; -+ s32 value; -+ int (*set)(struct smia_sensor *, s32 value); -+}; -+ -+struct smia_sensor { -+ struct i2c_client *i2c_client; -+ struct i2c_driver driver; -+ -+ /* Sensor information */ -+ struct smia_sensor_type *type; -+ u8 revision_number; -+ u8 smia_version; -+ -+ /* V4L2 current control values */ -+ struct smia_control controls[SMIA_NCTRLS]; -+ -+ struct smia_reglist *current_reglist; -+ struct v4l2_int_device *v4l2_int_device; -+ struct v4l2_fract timeperframe; -+ -+ struct smia_sensor_platform_data *platform_data; -+ -+ const struct firmware *fw; -+ struct smia_meta_reglist *meta_reglist; -+ -+ enum v4l2_power power; -+}; -+ -+static int smia_ioctl_queryctrl(struct v4l2_int_device *s, -+ struct v4l2_queryctrl *a); -+static int smia_ioctl_g_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc); -+static int smia_ioctl_s_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc); -+static int smia_ioctl_enum_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_fmtdesc *fmt); -+static int smia_ioctl_g_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_format *f); -+static int smia_ioctl_s_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_format *f); -+static int smia_ioctl_g_parm(struct v4l2_int_device *s, -+ struct v4l2_streamparm *a); -+static int smia_ioctl_s_parm(struct v4l2_int_device *s, -+ struct v4l2_streamparm *a); -+static int smia_ioctl_s_power(struct v4l2_int_device *s, enum v4l2_power state); -+static int smia_ioctl_g_priv(struct v4l2_int_device *s, void *priv); -+static int smia_ioctl_enum_framesizes(struct v4l2_int_device *s, -+ struct v4l2_frmsizeenum *frm); -+static int smia_ioctl_enum_frameintervals(struct v4l2_int_device *s, -+ struct v4l2_frmivalenum *frm); -+static int smia_ioctl_dev_init(struct v4l2_int_device *s); -+ -+/* SMIA-model gain is stored in precalculated tables here. In the model, -+ * reg = (c0-gain*c1) / (gain*m1-m0) -+ * gain = 2^ev -+ * The constants c0, m0, c1 and m1 depend on sensor. -+ */ -+ -+/* Analog gain table for VS6555. -+ * m0 = 0 -+ * c0 = 256 -+ * m1 = -1 (erroneously -16 in silicon) -+ * c1 = 256 -+ * step = 16 -+ */ -+static u16 smia_gain_vs6555[] = { -+/* reg EV gain */ -+ 0, /* 0.0 1.00000 */ -+ 16, /* 0.1 1.07177 */ -+ 32, /* 0.2 1.14870 */ -+ 48, /* 0.3 1.23114 */ -+ 64, /* 0.4 1.31951 */ -+ 80, /* 0.5 1.41421 */ -+ 80, /* 0.6 1.51572 */ -+ 96, /* 0.7 1.62450 */ -+ 112, /* 0.8 1.74110 */ -+ 112, /* 0.9 1.86607 */ -+ 128, /* 1.0 2.00000 */ -+ 144, /* 1.1 2.14355 */ -+ 144, /* 1.2 2.29740 */ -+ 160, /* 1.3 2.46229 */ -+ 160, /* 1.4 2.63902 */ -+ 160, /* 1.5 2.82843 */ -+ 176, /* 1.6 3.03143 */ -+ 176, /* 1.7 3.24901 */ -+ 176, /* 1.8 3.48220 */ -+ 192, /* 1.9 3.73213 */ -+ 192, /* 2.0 4.00000 */ -+ 192, /* 2.1 4.28709 */ -+ 208, /* 2.2 4.59479 */ -+ 208, /* 2.3 4.92458 */ -+ 208, /* 2.4 5.27803 */ -+ 208, /* 2.5 5.65685 */ -+ 208, /* 2.6 6.06287 */ -+ 224, /* 2.7 6.49802 */ -+ 224, /* 2.8 6.96440 */ -+ 224, /* 2.9 7.46426 */ -+ 224, /* 3.0 8.00000 */ -+ 224, /* 3.1 8.57419 */ -+ 224, /* 3.2 9.18959 */ -+ 224, /* 3.3 9.84916 */ -+ 224, /* 3.4 10.55606 */ -+ 240, /* 3.5 11.31371 */ -+ 240, /* 3.6 12.12573 */ -+ 240, /* 3.7 12.99604 */ -+ 240, /* 3.8 13.92881 */ -+ 240, /* 3.9 14.92853 */ -+ 240, /* 4.0 16.00000 */ -+}; -+ -+/* Analog gain table for TCM8330MD. -+ * m0 = 1 -+ * c0 = 0 -+ * m1 = 0 -+ * c1 = 36 (MMS uses 29) -+ * step = 1 -+ */ -+static u16 smia_gain_tcm8330md[] = { -+/* reg EV gain */ -+ 36, /* 0.0 1.00000 */ -+ 39, /* 0.1 1.07177 */ -+ 41, /* 0.2 1.14870 */ -+ 44, /* 0.3 1.23114 */ -+ 48, /* 0.4 1.31951 */ -+ 51, /* 0.5 1.41421 */ -+ 55, /* 0.6 1.51572 */ -+ 58, /* 0.7 1.62450 */ -+ 63, /* 0.8 1.74110 */ -+ 67, /* 0.9 1.86607 */ -+ 72, /* 1.0 2.00000 */ -+ 77, /* 1.1 2.14355 */ -+ 83, /* 1.2 2.29740 */ -+ 89, /* 1.3 2.46229 */ -+ 95, /* 1.4 2.63902 */ -+ 102, /* 1.5 2.82843 */ -+ 109, /* 1.6 3.03143 */ -+ 117, /* 1.7 3.24901 */ -+ 125, /* 1.8 3.48220 */ -+ 134, /* 1.9 3.73213 */ -+ 144, /* 2.0 4.00000 */ -+ 154, /* 2.1 4.28709 */ -+ 165, /* 2.2 4.59479 */ -+ 177, /* 2.3 4.92458 */ -+ 190, /* 2.4 5.27803 */ -+ 204, /* 2.5 5.65685 */ -+ 218, /* 2.6 6.06287 */ -+ 234, /* 2.7 6.49802 */ -+ 251, /* 2.8 6.96440 */ -+ 269, /* 2.9 7.46426 */ -+ 288, /* 3.0 8.00000 */ -+}; -+ -+static struct v4l2_int_ioctl_desc smia_ioctl_desc[] = { -+ { vidioc_int_enum_fmt_cap_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_enum_fmt_cap }, -+ { vidioc_int_try_fmt_cap_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_g_fmt_cap }, -+ { vidioc_int_g_fmt_cap_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_g_fmt_cap }, -+ { vidioc_int_s_fmt_cap_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_s_fmt_cap }, -+ { vidioc_int_queryctrl_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_queryctrl }, -+ { vidioc_int_g_ctrl_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_g_ctrl }, -+ { vidioc_int_s_ctrl_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_s_ctrl }, -+ { vidioc_int_g_parm_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_g_parm }, -+ { vidioc_int_s_parm_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_s_parm }, -+ { vidioc_int_s_power_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_s_power }, -+ { vidioc_int_g_priv_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_g_priv }, -+ { vidioc_int_enum_framesizes_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_enum_framesizes }, -+ { vidioc_int_enum_frameintervals_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_enum_frameintervals }, -+ { vidioc_int_dev_init_num, -+ (v4l2_int_ioctl_func *)smia_ioctl_dev_init }, -+}; -+ -+static struct v4l2_int_slave smia_slave = { -+ .ioctls = smia_ioctl_desc, -+ .num_ioctls = ARRAY_SIZE(smia_ioctl_desc), -+}; -+ -+static struct smia_sensor smia; -+ -+static struct v4l2_int_device smia_int_device = { -+ .module = THIS_MODULE, -+ .name = SMIA_SENSOR_NAME, -+ .priv = &smia, -+ .type = v4l2_int_type_slave, -+ .u = { -+ .slave = &smia_slave, -+ }, -+}; -+ -+static struct smia_sensor_type smia_sensors[] = { -+ { 0, 0, "unknown", 0, NULL }, -+ { -+ 0x01, 0x022b, "vs6555", -+ ARRAY_SIZE(smia_gain_vs6555), smia_gain_vs6555 -+ }, -+ { -+ 0x0c, 0x208a, "tcm8330md", -+ ARRAY_SIZE(smia_gain_tcm8330md), smia_gain_tcm8330md -+ }, -+}; -+ -+static const __u32 smia_mode_ctrls[] = { -+ V4L2_CID_MODE_FRAME_WIDTH, -+ V4L2_CID_MODE_FRAME_HEIGHT, -+ V4L2_CID_MODE_VISIBLE_WIDTH, -+ V4L2_CID_MODE_VISIBLE_HEIGHT, -+ V4L2_CID_MODE_PIXELCLOCK, -+ V4L2_CID_MODE_SENSITIVITY, -+}; -+ -+/* Return time of one row in microseconds, .8 fixed point format. -+ * If the sensor is not set to any mode, return zero. */ -+static int smia_get_row_time(struct smia_sensor *sensor) -+{ -+ unsigned int clock; /* Pixel clock in Hz>>10 fixed point */ -+ unsigned int rt; /* Row time in .8 fixed point */ -+ -+ if (!sensor->current_reglist) -+ return 0; -+ -+ clock = sensor->current_reglist->mode.pixel_clock; -+ clock = (clock + (1 << 9)) >> 10; -+ rt = sensor->current_reglist->mode.width * (1000000 >> 2); -+ rt = (rt + (clock >> 1)) / clock; -+ -+ return rt; -+} -+ -+/* Convert exposure time `us' to rows. Modify `us' to make it to -+ * correspond to the actual exposure time. -+ */ -+static int smia_exposure_us_to_rows(struct smia_sensor *sensor, s32 *us) -+{ -+ unsigned int rows; /* Exposure value as written to HW (ie. rows) */ -+ unsigned int rt; /* Row time in .8 fixed point */ -+ -+ if (*us < 0) -+ *us = 0; -+ -+ /* Assume that the maximum exposure time is at most ~8 s, -+ * and the maximum width (with blanking) ~8000 pixels. -+ * The formula here is in principle as simple as -+ * rows = exptime / 1e6 / width * pixel_clock -+ * but to get accurate results while coping with value ranges, -+ * have to do some fixed point math. -+ */ -+ -+ rt = smia_get_row_time(sensor); -+ rows = ((*us << 8) + (rt >> 1)) / rt; -+ -+ if (rows > sensor->current_reglist->mode.max_exp) -+ rows = sensor->current_reglist->mode.max_exp; -+ -+ /* Set the exposure time to the rounded value */ -+ *us = (rt * rows + (1 << 7)) >> 8; -+ -+ return rows; -+} -+ -+/* Convert exposure time in rows to microseconds */ -+static int smia_exposure_rows_to_us(struct smia_sensor *sensor, int rows) -+{ -+ return (smia_get_row_time(sensor) * rows + (1 << 7)) >> 8; -+} -+ -+/* Called to change the V4L2 gain control value. This function -+ * rounds and clamps the given value and updates the V4L2 control value. -+ * If power is on, also updates the sensor analog gain. -+ */ -+static int smia_set_gain(struct smia_sensor *sensor, s32 gain) -+{ -+ gain = clamp(gain, -+ sensor->controls[SMIA_CTRL_GAIN].minimum, -+ sensor->controls[SMIA_CTRL_GAIN].maximum); -+ sensor->controls[SMIA_CTRL_GAIN].value = gain; -+ -+ if (sensor->power == V4L2_POWER_OFF) -+ return 0; -+ -+ return smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_16BIT, REG_ANALOG_GAIN, -+ sensor->type->ev_table[gain]); -+} -+ -+/* Called to change the V4L2 exposure control value. This function -+ * rounds and clamps the given value and updates the V4L2 control value. -+ * If power is on, also update the sensor exposure time. -+ * exptime is in microseconds. -+ */ -+static int smia_set_exposure(struct smia_sensor *sensor, s32 exptime) -+{ -+ int exposure_rows; -+ -+ exptime = clamp(exptime, sensor->controls[SMIA_CTRL_EXPOSURE].minimum, -+ sensor->controls[SMIA_CTRL_EXPOSURE].maximum); -+ -+ exposure_rows = smia_exposure_us_to_rows(sensor, &exptime); -+ sensor->controls[SMIA_CTRL_EXPOSURE].value = exptime; -+ -+ if (sensor->power == V4L2_POWER_OFF) -+ return 0; -+ -+ return smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_16BIT, REG_COARSE_EXPOSURE, exposure_rows); -+} -+ -+static int smia_stream_on(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ return smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x0100, 0x01); -+} -+ -+static int smia_stream_off(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ return smia_i2c_write_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x0100, 0x00); -+} -+ -+static int smia_update_controls(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ int i; -+ -+ sensor->controls[SMIA_CTRL_EXPOSURE].minimum = 0; -+ sensor->controls[SMIA_CTRL_EXPOSURE].maximum = -+ smia_exposure_rows_to_us(sensor, -+ sensor->current_reglist->mode.max_exp); -+ sensor->controls[SMIA_CTRL_EXPOSURE].step = -+ smia_exposure_rows_to_us(sensor, 1); -+ sensor->controls[SMIA_CTRL_EXPOSURE].default_value = -+ sensor->controls[SMIA_CTRL_EXPOSURE].maximum; -+ if (sensor->controls[SMIA_CTRL_EXPOSURE].value == 0) -+ sensor->controls[SMIA_CTRL_EXPOSURE].value = -+ sensor->controls[SMIA_CTRL_EXPOSURE].maximum; -+ -+ /* Adjust V4L2 control values and write them to the sensor */ -+ -+ for (i = 0; i < ARRAY_SIZE(sensor->controls); i++) { -+ int rval; -+ if (!sensor->controls[i].set) -+ continue; -+ rval = sensor->controls[i].set(sensor, -+ sensor->controls[i].value); -+ if (rval) -+ return rval; -+ } -+ return 0; -+} -+ -+/* Must be called with power already enabled on the sensor */ -+static int smia_configure(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ int rval; -+ -+ rval = smia_i2c_write_regs(sensor->i2c_client, -+ sensor->current_reglist->regs); -+ if (rval) -+ goto fail; -+ -+ /* -+ * FIXME: remove stream_off from here as soon as camera-firmware -+ * is modified to not enable streaming automatically. -+ */ -+ rval = smia_stream_off(s); -+ if (rval) -+ goto fail; -+ -+ rval = smia_update_controls(s); -+ if (rval) -+ goto fail; -+ -+ rval = sensor->platform_data->configure_interface( -+ s, -+ sensor->current_reglist->mode.window_width, -+ sensor->current_reglist->mode.window_height); -+ if (rval) -+ goto fail; -+ -+ return 0; -+ -+fail: -+ dev_err(&sensor->i2c_client->dev, "sensor configuration failed\n"); -+ return rval; -+ -+} -+ -+static int smia_power_off(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ int rval; -+ -+ rval = sensor->platform_data->set_xclk(s, 0); -+ if (rval) -+ return rval; -+ -+ return sensor->platform_data->power_off(s); -+} -+ -+static int smia_power_on(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ struct smia_reglist *reglist = NULL; -+ int rval; -+ unsigned int hz = DEFAULT_XCLK; -+ -+ if (sensor->meta_reglist) { -+ reglist = smia_reglist_find_type(sensor->meta_reglist, -+ SMIA_REGLIST_POWERON); -+ hz = reglist->mode.ext_clock; -+ } -+ -+ rval = sensor->platform_data->power_on(s); -+ if (rval) -+ goto out; -+ -+ sensor->platform_data->set_xclk(s, hz); -+ -+ /* -+ * At least 10 ms is required between xshutdown up and first -+ * i2c transaction. Clock must start at least 2400 cycles -+ * before first i2c transaction. -+ */ -+ msleep(10); -+ -+ if (reglist) { -+ rval = smia_i2c_write_regs(sensor->i2c_client, -+ reglist->regs); -+ if (rval) -+ goto out; -+ } -+ -+out: -+ if (rval) -+ smia_power_off(s); -+ -+ return rval; -+} -+ -+static struct v4l2_queryctrl smia_ctrls[] = { -+ { -+ .id = V4L2_CID_GAIN, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Analog gain [0.1 EV]", -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+ { -+ .id = V4L2_CID_EXPOSURE, -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .name = "Exposure time [us]", -+ .flags = V4L2_CTRL_FLAG_SLIDER, -+ }, -+}; -+ -+static int smia_ioctl_queryctrl(struct v4l2_int_device *s, -+ struct v4l2_queryctrl *a) -+{ -+ struct smia_sensor *sensor = s->priv; -+ int rval, ctrl; -+ -+ rval = smia_ctrl_query(smia_ctrls, ARRAY_SIZE(smia_ctrls), a); -+ if (rval) { -+ return smia_mode_query(smia_mode_ctrls, -+ ARRAY_SIZE(smia_mode_ctrls), a); -+ } -+ -+ ctrl = CID_TO_CTRL(a->id); -+ if (ctrl < 0) -+ return ctrl; -+ if (!sensor->controls[ctrl].set) -+ return -EINVAL; -+ -+ a->minimum = sensor->controls[ctrl].minimum; -+ a->maximum = sensor->controls[ctrl].maximum; -+ a->step = sensor->controls[ctrl].step; -+ a->default_value = sensor->controls[ctrl].default_value; -+ -+ return 0; -+} -+ -+static int smia_ioctl_g_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct smia_sensor *sensor = s->priv; -+ int ctrl; -+ -+ int rval = smia_mode_g_ctrl(smia_mode_ctrls, -+ ARRAY_SIZE(smia_mode_ctrls), -+ vc, &sensor->current_reglist->mode); -+ if (rval == 0) -+ return 0; -+ -+ ctrl = CID_TO_CTRL(vc->id); -+ if (ctrl < 0) -+ return ctrl; -+ if (!sensor->controls[ctrl].set) -+ return -EINVAL; -+ vc->value = sensor->controls[ctrl].value; -+ -+ return 0; -+} -+ -+static int smia_ioctl_s_ctrl(struct v4l2_int_device *s, -+ struct v4l2_control *vc) -+{ -+ struct smia_sensor *sensor = s->priv; -+ -+ int ctrl = CID_TO_CTRL(vc->id); -+ if (ctrl < 0) -+ return ctrl; -+ if (!sensor->controls[ctrl].set) -+ return -EINVAL; -+ return sensor->controls[ctrl].set(sensor, vc->value); -+} -+ -+static int smia_ioctl_enum_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_fmtdesc *fmt) -+{ -+ struct smia_sensor *sensor = s->priv; -+ return smia_reglist_enum_fmt(sensor->meta_reglist, fmt); -+} -+ -+static int smia_ioctl_g_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_format *f) -+{ -+ struct smia_sensor *sensor = s->priv; -+ struct v4l2_pix_format *pix = &f->fmt.pix; -+ -+ pix->width = sensor->current_reglist->mode.window_width; -+ pix->height = sensor->current_reglist->mode.window_height; -+ pix->pixelformat = sensor->current_reglist->mode.pixel_format; -+ -+ return 0; -+} -+ -+static int smia_ioctl_s_fmt_cap(struct v4l2_int_device *s, -+ struct v4l2_format *f) -+{ -+ struct smia_sensor *sensor = s->priv; -+ struct smia_reglist *reglist; -+ -+ reglist = smia_reglist_find_mode_fmt(sensor->meta_reglist, -+ sensor->current_reglist, f); -+ if (!reglist) -+ return -EINVAL; -+ sensor->current_reglist = reglist; -+ return smia_update_controls(s); -+} -+ -+static int smia_ioctl_g_parm(struct v4l2_int_device *s, -+ struct v4l2_streamparm *a) -+{ -+ struct smia_sensor *sensor = s->priv; -+ struct v4l2_captureparm *cparm = &a->parm.capture; -+ -+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ return -EINVAL; -+ -+ memset(a, 0, sizeof(*a)); -+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ -+ cparm->capability = V4L2_CAP_TIMEPERFRAME; -+ cparm->timeperframe = sensor->current_reglist->mode.timeperframe; -+ -+ return 0; -+} -+ -+static int smia_ioctl_s_parm(struct v4l2_int_device *s, -+ struct v4l2_streamparm *a) -+{ -+ struct smia_sensor *sensor = s->priv; -+ struct smia_reglist *reglist; -+ -+ reglist = smia_reglist_find_mode_streamparm(sensor->meta_reglist, -+ sensor->current_reglist, a); -+ -+ if (!reglist) -+ return -EINVAL; -+ sensor->current_reglist = reglist; -+ return smia_update_controls(s); -+} -+ -+static int smia_ioctl_dev_init(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ char name[FIRMWARE_NAME_MAX]; -+ int model_id, revision_number, manufacturer_id, smia_version; -+ int i, rval; -+ -+ rval = smia_power_on(s); -+ if (rval) -+ return -ENODEV; -+ -+ /* Read and check sensor identification registers */ -+ if (smia_i2c_read_reg(sensor->i2c_client, SMIA_REG_16BIT, -+ REG_MODEL_ID, &model_id) -+ || smia_i2c_read_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ REG_REVISION_NUMBER, &revision_number) -+ || smia_i2c_read_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ REG_MANUFACTURER_ID, &manufacturer_id) -+ || smia_i2c_read_reg(sensor->i2c_client, SMIA_REG_8BIT, -+ REG_SMIA_VERSION, &smia_version)) { -+ rval = -ENODEV; -+ goto out_poweroff; -+ } -+ -+ sensor->revision_number = revision_number; -+ sensor->smia_version = smia_version; -+ -+ if (smia_version != 10) { -+ /* We support only SMIA version 1.0 at the moment */ -+ dev_err(&sensor->i2c_client->dev, -+ "unknown sensor 0x%04x detected (smia ver %i.%i)\n", -+ model_id, smia_version / 10, smia_version % 10); -+ rval = -ENODEV; -+ goto out_poweroff; -+ } -+ -+ /* Detect which sensor we have */ -+ for (i = 1; i < ARRAY_SIZE(smia_sensors); i++) { -+ if (smia_sensors[i].manufacturer_id == manufacturer_id -+ && smia_sensors[i].model_id == model_id) -+ break; -+ } -+ if (i >= ARRAY_SIZE(smia_sensors)) -+ i = 0; /* Unknown sensor */ -+ sensor->type = &smia_sensors[i]; -+ -+ /* Initialize V4L2 controls */ -+ -+ /* Gain is initialized here permanently */ -+ sensor->controls[SMIA_CTRL_GAIN].minimum = 0; -+ sensor->controls[SMIA_CTRL_GAIN].maximum = -+ sensor->type->ev_table_size - 1; -+ sensor->controls[SMIA_CTRL_GAIN].step = 1; -+ sensor->controls[SMIA_CTRL_GAIN].default_value = 0; -+ sensor->controls[SMIA_CTRL_GAIN].value = 0; -+ sensor->controls[SMIA_CTRL_GAIN].set = -+ sensor->type->ev_table ? smia_set_gain : NULL; -+ -+ /* Exposure parameters may change at each mode change, just zero here */ -+ sensor->controls[SMIA_CTRL_EXPOSURE].minimum = 0; -+ sensor->controls[SMIA_CTRL_EXPOSURE].maximum = 0; -+ sensor->controls[SMIA_CTRL_EXPOSURE].step = 0; -+ sensor->controls[SMIA_CTRL_EXPOSURE].default_value = 0; -+ sensor->controls[SMIA_CTRL_EXPOSURE].value = 0; -+ sensor->controls[SMIA_CTRL_EXPOSURE].set = smia_set_exposure; -+ -+ /* Update identification string */ -+ strncpy(s->name, sensor->type->name, V4L2NAMESIZE); -+ s->name[V4L2NAMESIZE-1] = 0; /* Ensure NULL terminated string */ -+ -+ /* Import firmware */ -+ snprintf(name, FIRMWARE_NAME_MAX, "%s-%02x-%04x-%02x.bin", -+ SMIA_SENSOR_NAME, sensor->type->manufacturer_id, -+ sensor->type->model_id, sensor->revision_number); -+ -+ if (request_firmware(&sensor->fw, name, -+ &sensor->i2c_client->dev)) { -+ dev_err(&sensor->i2c_client->dev, -+ "can't load firmware %s\n", name); -+ rval = -ENODEV; -+ goto out_poweroff; -+ } -+ -+ sensor->meta_reglist = -+ (struct smia_meta_reglist *)sensor->fw->data; -+ -+ rval = smia_reglist_import(sensor->meta_reglist); -+ if (rval) { -+ dev_err(&sensor->i2c_client->dev, -+ "invalid register list %s, import failed\n", -+ name); -+ goto out_release; -+ } -+ -+ /* Select initial mode */ -+ sensor->current_reglist = -+ smia_reglist_find_type(sensor->meta_reglist, -+ SMIA_REGLIST_MODE); -+ if (!sensor->current_reglist) { -+ dev_err(&sensor->i2c_client->dev, -+ "invalid register list %s, no mode found\n", -+ name); -+ rval = -ENODEV; -+ goto out_release; -+ } -+ -+ rval = smia_power_off(s); -+ if (rval) -+ goto out_release; -+ -+ return 0; -+ -+out_release: -+ release_firmware(sensor->fw); -+out_poweroff: -+ sensor->meta_reglist = NULL; -+ sensor->fw = NULL; -+ smia_power_off(s); -+ -+ return rval; -+} -+ -+#if VS6555_RESET_SHIFT_HACK -+/* -+ * Check if certain undocumented registers have values we expect. -+ * If not, reset sensor and recheck. -+ * This should be called when streaming is already enabled. -+ */ -+static int smia_vs6555_reset_shift_hack(struct v4l2_int_device *s) -+{ -+ struct smia_sensor *sensor = s->priv; -+ int count = 10; -+ int r381c = 0; -+ int r381d = 0; -+ int r381e = 0; -+ int r381f = 0; -+ int rval; -+ -+ do { -+ rval = smia_i2c_read_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x381c, &r381c); -+ if (rval) -+ return rval; -+ rval = smia_i2c_read_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x381d, &r381d); -+ if (rval) -+ return rval; -+ rval = smia_i2c_read_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x381e, &r381e); -+ if (rval) -+ return rval; -+ rval = smia_i2c_read_reg(sensor->i2c_client, -+ SMIA_REG_8BIT, 0x381f, &r381f); -+ if (rval) -+ return rval; -+ -+ if (r381d != 0 && r381f != 0 && -+ r381c == 0 && r381e == 0) -+ return 0; -+ -+ dev_dbg(&sensor->i2c_client->dev, "VS6555 HW misconfigured--" -+ "trying to reset (%02X%02X%02X%02X)\n", -+ r381c, r381d, r381e, r381f); -+ -+ smia_stream_off(s); -+ smia_power_off(s); -+ msleep(2); -+ rval = smia_power_on(s); -+ if (rval) -+ return rval; -+ rval = smia_configure(s); -+ if (rval) -+ return rval; -+ rval = smia_stream_on(s); -+ if (rval) -+ return rval; -+ } while (--count > 0); -+ -+ dev_warn(&sensor->i2c_client->dev, -+ "VS6555 reset failed--expect bad image\n"); -+ -+ return 0; /* Return zero nevertheless -- at least we tried */ -+} -+#endif -+ -+static int smia_ioctl_s_power(struct v4l2_int_device *s, -+ enum v4l2_power new_state) -+{ -+ struct smia_sensor *sensor = s->priv; -+ enum v4l2_power old_state = sensor->power; -+ int rval = 0; -+ -+ /* -+ * Map STANDBY to OFF mode: there is no reason to keep the sensor -+ * powered if not streaming. -+ */ -+ if (new_state == V4L2_POWER_STANDBY) -+ new_state = V4L2_POWER_OFF; -+ -+ /* If we are already in this mode, do nothing */ -+ if (old_state == new_state) -+ return 0; -+ -+ /* Disable power if so requested (it was enabled) */ -+ if (new_state == V4L2_POWER_OFF) { -+ rval = smia_stream_off(s); -+ if (rval) -+ dev_err(&sensor->i2c_client->dev, -+ "can not stop streaming\n"); -+ rval = smia_power_off(s); -+ goto out; -+ } -+ -+ /* Either STANDBY or ON requested */ -+ -+ /* Enable power and move to standby if it was off */ -+ if (old_state == V4L2_POWER_OFF) { -+ rval = smia_power_on(s); -+ if (rval) -+ goto out; -+ } -+ -+ /* Now sensor is powered (standby or streaming) */ -+ -+ if (new_state == V4L2_POWER_ON) { -+ /* Standby -> streaming */ -+ sensor->power = V4L2_POWER_ON; -+ rval = smia_configure(s); -+ if (rval) { -+ smia_stream_off(s); -+ if (old_state == V4L2_POWER_OFF) -+ smia_power_off(s); -+ goto out; -+ } -+ rval = smia_stream_on(s); -+#if VS6555_RESET_SHIFT_HACK -+ if (rval == 0 && sensor->type->manufacturer_id == 0x01) -+ rval = smia_vs6555_reset_shift_hack(s); -+#endif -+ } else { -+ /* Streaming -> standby */ -+ rval = smia_stream_off(s); -+ } -+ -+out: -+ sensor->power = (rval == 0) ? new_state : old_state; -+ return rval; -+} -+ -+static int smia_ioctl_g_priv(struct v4l2_int_device *s, void *priv) -+{ -+ struct smia_sensor *sensor = s->priv; -+ -+ return sensor->platform_data->g_priv(s, priv); -+} -+ -+static int smia_ioctl_enum_framesizes(struct v4l2_int_device *s, -+ struct v4l2_frmsizeenum *frm) -+{ -+ struct smia_sensor *sensor = s->priv; -+ -+ return smia_reglist_enum_framesizes(sensor->meta_reglist, frm); -+} -+ -+static int smia_ioctl_enum_frameintervals(struct v4l2_int_device *s, -+ struct v4l2_frmivalenum *frm) -+{ -+ struct smia_sensor *sensor = s->priv; -+ -+ return smia_reglist_enum_frameintervals(sensor->meta_reglist, frm); -+} -+ -+#ifdef CONFIG_PM -+ -+static int smia_suspend(struct i2c_client *client, pm_message_t mesg) -+{ -+ struct smia_sensor *sensor = dev_get_drvdata(&client->dev); -+ enum v4l2_power resume_state = sensor->power; -+ int rval; -+ -+ rval = smia_ioctl_s_power(sensor->v4l2_int_device, V4L2_POWER_OFF); -+ if (rval == 0) -+ sensor->power = resume_state; -+ return rval; -+} -+ -+static int smia_resume(struct i2c_client *client) -+{ -+ struct smia_sensor *sensor = dev_get_drvdata(&client->dev); -+ enum v4l2_power resume_state = sensor->power; -+ -+ sensor->power = V4L2_POWER_OFF; -+ return smia_ioctl_s_power(sensor->v4l2_int_device, resume_state); -+} -+ -+#else -+ -+#define smia_suspend NULL -+#define smia_resume NULL -+ -+#endif /* CONFIG_PM */ -+ -+static int smia_probe(struct i2c_client *client, -+ const struct i2c_device_id *devid) -+{ -+ struct smia_sensor *sensor = &smia; -+ int rval; -+ -+ if (i2c_get_clientdata(client)) -+ return -EBUSY; -+ -+ sensor->platform_data = client->dev.platform_data; -+ -+ if (sensor->platform_data == NULL) -+ return -ENODEV; -+ -+ sensor->v4l2_int_device = &smia_int_device; -+ -+ sensor->i2c_client = client; -+ i2c_set_clientdata(client, sensor); -+ -+ rval = v4l2_int_device_register(sensor->v4l2_int_device); -+ if (rval) -+ i2c_set_clientdata(client, NULL); -+ -+ return rval; -+} -+ -+static int __exit smia_remove(struct i2c_client *client) -+{ -+ struct smia_sensor *sensor = i2c_get_clientdata(client); -+ -+ if (!client->adapter) -+ return -ENODEV; /* our client isn't attached */ -+ -+ v4l2_int_device_unregister(sensor->v4l2_int_device); -+ i2c_set_clientdata(client, NULL); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id smia_id_table[] = { -+ { SMIA_SENSOR_NAME, 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, smia_id_table); -+ -+static struct i2c_driver smia_i2c_driver = { -+ .driver = { -+ .name = SMIA_SENSOR_NAME, -+ }, -+ .probe = smia_probe, -+ .remove = __exit_p(smia_remove), -+ .suspend = smia_suspend, -+ .resume = smia_resume, -+ .id_table = smia_id_table, -+}; -+ -+static int __init smia_init(void) -+{ -+ int rval; -+ -+ rval = i2c_add_driver(&smia_i2c_driver); -+ if (rval) -+ printk(KERN_ALERT "%s: failed at i2c_add_driver\n", __func__); -+ -+ return rval; -+} -+ -+static void __exit smia_exit(void) -+{ -+ i2c_del_driver(&smia_i2c_driver); -+} -+ -+/* -+ * FIXME: Menelaus isn't ready (?) at module_init stage, so use -+ * late_initcall for now. -+ */ -+late_initcall(smia_init); -+module_exit(smia_exit); -+ -+MODULE_AUTHOR("Tuukka Toivonen "); -+MODULE_DESCRIPTION("Generic SMIA-compatible camera sensor driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/smia-sensor.h linux-omap-2.6.28-nokia1/drivers/media/video/smia-sensor.h ---- linux-omap-2.6.28-omap1/drivers/media/video/smia-sensor.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/smia-sensor.h 2011-06-22 13:19:32.733063276 +0200 -@@ -0,0 +1,42 @@ -+/* -+ * drivers/media/video/smia-sensor.h -+ * -+ * Copyright (C) 2008,2009 Nokia Corporation -+ * -+ * Contact: Tuukka Toivonen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef SMIA_SENSOR_H -+#define SMIA_SENSOR_H -+ -+#include -+ -+#define SMIA_SENSOR_NAME "smia-sensor" -+#define SMIA_SENSOR_I2C_ADDR (0x20 >> 1) -+ -+struct smia_sensor_platform_data { -+ int (*g_priv)(struct v4l2_int_device *s, void *priv); -+ int (*configure_interface)(struct v4l2_int_device *s, -+ int width, int height); -+ int (*set_xclk)(struct v4l2_int_device *s, int hz); -+ int (*power_on)(struct v4l2_int_device *s); -+ int (*power_off)(struct v4l2_int_device *s); -+}; -+ -+ -+#endif /* SMIA_SENSOR_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/videobuf-dma-sg.c linux-omap-2.6.28-nokia1/drivers/media/video/videobuf-dma-sg.c ---- linux-omap-2.6.28-omap1/drivers/media/video/videobuf-dma-sg.c 2011-06-22 13:14:18.643067741 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/videobuf-dma-sg.c 2011-06-22 13:19:32.743063276 +0200 -@@ -58,9 +58,10 @@ videobuf_vmalloc_to_sg(unsigned char *vi - struct page *pg; - int i; - -- sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); -+ sglist = vmalloc(nr_pages * sizeof(*sglist)); - if (NULL == sglist) - return NULL; -+ memset(sglist, 0, nr_pages * sizeof(*sglist)); - sg_init_table(sglist, nr_pages); - for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { - pg = vmalloc_to_page(virt); -@@ -72,7 +73,7 @@ videobuf_vmalloc_to_sg(unsigned char *vi - return sglist; - - err: -- kfree(sglist); -+ vfree(sglist); - return NULL; - } - -@@ -84,7 +85,7 @@ videobuf_pages_to_sg(struct page **pages - - if (NULL == pages[0]) - return NULL; -- sglist = kmalloc(nr_pages * sizeof(*sglist), GFP_KERNEL); -+ sglist = vmalloc(nr_pages * sizeof(*sglist)); - if (NULL == sglist) - return NULL; - sg_init_table(sglist, nr_pages); -@@ -104,12 +105,12 @@ videobuf_pages_to_sg(struct page **pages - - nopage: - dprintk(2,"sgl: oops - no page\n"); -- kfree(sglist); -+ vfree(sglist); - return NULL; - - highmem: - dprintk(2,"sgl: oops - highmem page\n"); -- kfree(sglist); -+ vfree(sglist); - return NULL; - } - -@@ -230,7 +231,7 @@ int videobuf_dma_map(struct videobuf_que - (dma->vmalloc,dma->nr_pages); - } - if (dma->bus_addr) { -- dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); -+ dma->sglist = vmalloc(sizeof(*dma->sglist)); - if (NULL != dma->sglist) { - dma->sglen = 1; - sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK; -@@ -248,7 +249,7 @@ int videobuf_dma_map(struct videobuf_que - if (0 == dma->sglen) { - printk(KERN_WARNING - "%s: videobuf_map_sg failed\n",__func__); -- kfree(dma->sglist); -+ vfree(dma->sglist); - dma->sglist = NULL; - dma->sglen = 0; - return -EIO; -@@ -274,7 +275,7 @@ int videobuf_dma_unmap(struct videobuf_q - - dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction); - -- kfree(dma->sglist); -+ vfree(dma->sglist); - dma->sglist = NULL; - dma->sglen = 0; - return 0; -diff -Nurp linux-omap-2.6.28-omap1/drivers/media/video/v4l2-int-device.c linux-omap-2.6.28-nokia1/drivers/media/video/v4l2-int-device.c ---- linux-omap-2.6.28-omap1/drivers/media/video/v4l2-int-device.c 2011-06-22 13:10:43.373070802 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/media/video/v4l2-int-device.c 2011-06-22 13:19:32.743063276 +0200 -@@ -32,7 +32,7 @@ - static DEFINE_MUTEX(mutex); - static LIST_HEAD(int_list); - --void v4l2_int_device_try_attach_all(void) -+static void __v4l2_int_device_try_attach_all(void) - { - struct v4l2_int_device *m, *s; - -@@ -66,6 +66,33 @@ void v4l2_int_device_try_attach_all(void - } - } - } -+ -+static struct v4l2_int_slave dummy_slave = { -+ /* Dummy pointer to avoid underflow in find_ioctl. */ -+ .ioctls = (void *)0x80000000, -+ .num_ioctls = 0, -+}; -+ -+static struct v4l2_int_device dummy = { -+ .type = v4l2_int_type_slave, -+ .u = { -+ .slave = &dummy_slave, -+ }, -+}; -+ -+struct v4l2_int_device *v4l2_int_device_dummy() -+{ -+ return &dummy; -+} -+EXPORT_SYMBOL_GPL(v4l2_int_device_dummy); -+ -+void v4l2_int_device_try_attach_all(void) -+{ -+ mutex_lock(&mutex); -+ __v4l2_int_device_try_attach_all(); -+ mutex_unlock(&mutex); -+} -+ - EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all); - - static int ioctl_sort_cmp(const void *a, const void *b) -@@ -89,7 +116,7 @@ int v4l2_int_device_register(struct v4l2 - &ioctl_sort_cmp, NULL); - mutex_lock(&mutex); - list_add(&d->head, &int_list); -- v4l2_int_device_try_attach_all(); -+ __v4l2_int_device_try_attach_all(); - mutex_unlock(&mutex); - - return 0; -diff -Nurp linux-omap-2.6.28-omap1/drivers/mfd/twl4030-core.c linux-omap-2.6.28-nokia1/drivers/mfd/twl4030-core.c ---- linux-omap-2.6.28-omap1/drivers/mfd/twl4030-core.c 2011-06-22 13:14:18.743067740 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mfd/twl4030-core.c 2011-06-22 13:19:32.743063276 +0200 -@@ -104,6 +104,19 @@ - #define twl_has_usb() false - #endif - -+#if defined(CONFIG_LEDS_TWL4030_VIBRA) || \ -+ defined(CONFIG_LEDS_TWL4030_VIBRA_MODULE) -+#define twl_has_vibra() true -+#else -+#define twl_has_vibra() false -+#endif -+ -+#if defined(CONFIG_TWL4030_WATCHDOG) || \ -+ defined(CONFIG_TWL4030_WATCHDOG_MODULE) -+#define twl_has_watchdog() true -+#else -+#define twl_has_watchdog() false -+#endif - - /* Triton Core internal information (BEGIN) */ - -@@ -349,7 +362,7 @@ EXPORT_SYMBOL(twl4030_i2c_read); - int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg) - { - -- /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */ -+ /* 2 bytes: offset 1 contains the data, offset 0 is used by i2c_write */ - u8 temp_buffer[2] = { 0 }; - /* offset 1 contains the data */ - temp_buffer[1] = value; -@@ -531,6 +544,18 @@ add_children(struct twl4030_platform_dat - usb_transceiver = child; - } - -+ if (twl_has_vibra()) { -+ child = add_child(0, "twl4030_vibra", NULL, 0, true, 0, 0); -+ if (IS_ERR(child)) -+ return PTR_ERR(child); -+ } -+ -+ if (twl_has_watchdog()) { -+ child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0); -+ if (IS_ERR(child)) -+ return PTR_ERR(child); -+ } -+ - if (twl_has_regulator()) { - /* - child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); -@@ -761,7 +786,7 @@ twl4030_probe(struct i2c_client *client, - twl->client = i2c_new_dummy(client->adapter, - twl->address); - if (!twl->client) { -- dev_err(&twl->client->dev, -+ dev_err(&client->dev, - "can't attach client %d\n", i); - status = -ENOMEM; - goto fail; -diff -Nurp linux-omap-2.6.28-omap1/drivers/mfd/twl4030-power.c linux-omap-2.6.28-nokia1/drivers/mfd/twl4030-power.c ---- linux-omap-2.6.28-omap1/drivers/mfd/twl4030-power.c 2011-06-22 13:14:18.743067740 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mfd/twl4030-power.c 2011-06-22 13:19:32.743063276 +0200 -@@ -25,6 +25,7 @@ - - #include - #include -+#include - #include - #include - -@@ -38,6 +39,8 @@ static u8 triton_next_free_address = 0x2 - #define PHY_TO_OFF_PM_MASTER(p) (p - 0x36) - #define PHY_TO_OFF_PM_RECEIVER(p) (p - 0x5b) - -+#define NUM_OF_RESOURCES 28 -+ - /* resource - hfclk */ - #define R_HFCLKOUT_DEV_GRP PHY_TO_OFF_PM_RECEIVER(0xe6) - -@@ -66,6 +69,48 @@ static u8 triton_next_free_address = 0x2 - #define KEY_1 0xC0 - #define KEY_2 0x0C - -+#define R_VDD1_OSC 0x5C -+#define R_VDD2_OSC 0x6A -+#define R_VIO_OSC 0x52 -+#define EXT_FS_CLK_EN (0x1 << 6) -+ -+/* resource configuration registers */ -+ -+#define DEVGROUP_OFFSET 0 -+#define TYPE_OFFSET 1 -+#define REMAP_OFFSET 2 -+ -+static int res_config_addrs[] = { -+ [RES_VAUX1] = 0x17, -+ [RES_VAUX2] = 0x1b, -+ [RES_VAUX3] = 0x1f, -+ [RES_VAUX4] = 0x23, -+ [RES_VMMC1] = 0x27, -+ [RES_VMMC2] = 0x2b, -+ [RES_VPLL1] = 0x2f, -+ [RES_VPLL2] = 0x33, -+ [RES_VSIM] = 0x37, -+ [RES_VDAC] = 0x3b, -+ [RES_VINTANA1] = 0x3f, -+ [RES_VINTANA2] = 0x43, -+ [RES_VINTDIG] = 0x47, -+ [RES_VIO] = 0x4b, -+ [RES_VDD1] = 0x55, -+ [RES_VDD2] = 0x63, -+ [RES_VUSB_1v5] = 0x71, -+ [RES_VUSB_1v8] = 0x74, -+ [RES_VUSB_3v1] = 0x77, -+ [RES_VUSBCP] = 0x7a, -+ [RES_REGEN] = 0x7f, -+ [RES_NRES_PWRON] = 0x82, -+ [RES_CLKEN] = 0x85, -+ [RES_SYSEN] = 0x88, -+ [RES_HFCLKOUT] = 0x8b, -+ [RES_32KCLKOUT] = 0x8e, -+ [RES_RESET] = 0x91, -+ [RES_Main_Ref] = 0x94, -+}; -+ - static int __init twl4030_write_script_byte(u8 address, u8 byte) - { - int err; -@@ -117,12 +162,16 @@ static int __init config_wakeup3_sequenc - { - - int err = 0; -+ u8 data; - - /* Set SLEEP to ACTIVE SEQ address for P3 */ - err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, - R_SEQ_ADD_S2A3); - -- err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, LVL_WAKEUP, -+ err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, -+ R_P3_SW_EVENTS); -+ data |= LVL_WAKEUP; -+ err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, - R_P3_SW_EVENTS); - if (err) - printk(KERN_ERR "TWL4030 wakeup sequence for P3" \ -@@ -134,19 +183,26 @@ static int __init config_wakeup3_sequenc - static int __init config_wakeup12_sequence(u8 address) - { - int err = 0; -+ u8 data; - - /* Set SLEEP to ACTIVE SEQ address for P1 and P2 */ - err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, - R_SEQ_ADD_SA12); - -- /* P1/P2/P3 LVL_WAKEUP should be on LEVEL */ -- err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, LVL_WAKEUP, -+ /* P1/P2 LVL_WAKEUP should be on LEVEL */ -+ err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, -+ R_P1_SW_EVENTS); -+ data |= LVL_WAKEUP; -+ err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, - R_P1_SW_EVENTS); -- err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, LVL_WAKEUP, -+ -+ err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, -+ R_P2_SW_EVENTS); -+ data |= LVL_WAKEUP; -+ err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, - R_P2_SW_EVENTS); - - if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) { -- u8 data; - /* Disabling AC charger effect on sleep-active transitions */ - err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, - R_CFG_P1_TRANSITION); -@@ -166,14 +222,6 @@ static int __init config_sleep_sequence( - { - int err = 0; - -- /* -- * CLKREQ is pulled high on the 2430SDP, therefore, we need to take -- * it out of the HFCLKOUT DEV_GRP for P1 else HFCLKOUT can't be stopped. -- */ -- -- err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -- 0x20, R_HFCLKOUT_DEV_GRP); -- - /* Set ACTIVE to SLEEP SEQ address in T2 memory*/ - err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, - R_SEQ_ADD_A2S); -@@ -223,6 +271,7 @@ static int __init load_triton_script(str - { - u8 address = triton_next_free_address; - int err; -+ int i; - - err = twl4030_write_script(address, tscript->script, tscript->size); - if (err) -@@ -230,25 +279,229 @@ static int __init load_triton_script(str - - triton_next_free_address += tscript->size; - -- if (tscript->flags & TRITON_WRST_SCRIPT) -- err |= config_warmreset_sequence(address); -+ for (i = 0; i < tscript->number_of_events; i++) { -+ -+ /* Check if script is going beyond last valid address */ -+ if (address + tscript->size > END_OF_SCRIPT) { -+ -+ WARN(1, "TWL4030 script event %d" \ -+ " do not fit in memory\n" -+ , tscript->events[i].event); -+ return -EINVAL; -+ } -+ -+ /* Check if event pointer is in script area */ -+ if (tscript->events[i].offset >= tscript->size) { -+ -+ WARN(1, "TWL4030 script event %d has invalid start" \ -+ " at 0x%x in the script ending at 0x%x\n", -+ tscript->events[i].event, -+ address + tscript->events[i].offset, -+ address + tscript->size); -+ return -EINVAL; -+ } -+ -+ switch (tscript->events[i].event) { -+ case TRITON_WRST: -+ err |= config_warmreset_sequence(address -+ + tscript->events[i].offset); -+ break; -+ case TRITON_WAKEUP12: -+ err |= config_wakeup12_sequence(address -+ + tscript->events[i].offset); -+ break; -+ case TRITON_WAKEUP3: -+ err |= config_wakeup3_sequence(address -+ + tscript->events[i].offset); -+ break; -+ case TRITON_SLEEP: -+ err |= config_sleep_sequence(address -+ + tscript->events[i].offset); -+ break; -+ default: -+ WARN(1, "Event number %d unknown\n" -+ , tscript->events[i].event); -+ return -EINVAL; -+ } -+ } -+ return err; -+} -+ -+static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) -+{ -+ int rconfig_addr; -+ int err; -+ u8 type; -+ -+ if (rconfig->resource > NUM_OF_RESOURCES) { -+ printk(KERN_ERR -+ "TWL4030 Resource %d does not exist\n", -+ rconfig->resource); -+ return -EINVAL; -+ } -+ -+ rconfig_addr = res_config_addrs[rconfig->resource]; - -- if (tscript->flags & TRITON_WAKEUP12_SCRIPT) -- err |= config_wakeup12_sequence(address); -+ /* Set resource group */ -+ if (rconfig->devgroup >= 0) { -+ err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -+ rconfig->devgroup << 5, -+ rconfig_addr + DEVGROUP_OFFSET); -+ if (err < 0) { -+ printk(KERN_ERR -+ "TWL4030 failed to program devgroup"); -+ return err; -+ } -+ } - -- if (tscript->flags & TRITON_WAKEUP3_SCRIPT) -- err |= config_wakeup3_sequence(address); -+ /* Set resource types */ -+ err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type, -+ rconfig_addr + TYPE_OFFSET); -+ if (err < 0) { -+ printk(KERN_ERR -+ "TWL4030 Resource %d type could not read\n", -+ rconfig->resource); -+ return err; -+ } - -- if (tscript->flags & TRITON_SLEEP_SCRIPT) -- err |= config_sleep_sequence(address); -+ if (rconfig->type >= 0) { -+ type &= ~7; -+ type |= rconfig->type; -+ } - -- return err; -+ if (rconfig->type2 >= 0) { -+ type &= ~(3 << 3); -+ type |= rconfig->type2 << 3; -+ } -+ -+ err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -+ type, rconfig_addr + TYPE_OFFSET); -+ if (err < 0) { -+ printk(KERN_ERR -+ "TWL4030 failed to program resource type"); -+ return err; -+ } -+ -+ if (rconfig->remap >= 0) { -+ err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -+ rconfig->remap, -+ rconfig_addr + REMAP_OFFSET); -+ if (err < 0) { -+ printk(KERN_ERR "TWL4030 failed to program remap"); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+struct twl4030_reg_data { -+ int usecount; -+ u8 resource; -+ u8 base; -+ u8 offstate; -+ u8 initdone; -+ int delay; -+}; -+struct twl4030_reg_data twl4030_custom_regs[] = { -+ { 0, RES_VAUX1, TWL4030_VAUX1_DEV_GRP, 0x88, 0, 500 }, -+ { 0, RES_VMMC2, TWL4030_VMMC2_DEV_GRP, 0x88, 0, 100 }, -+ { 0, 0, 0, 0, 0 }, -+}; -+static DEFINE_MUTEX(reg_mutex); -+ -+static int twl4030_set_regulator_state(int res, int enable) -+{ -+ u8 val; -+ int ret; -+ struct twl4030_reg_data *reg_data = twl4030_custom_regs; -+ -+ while (1) { -+ if (!reg_data->resource) -+ return -EINVAL; -+ if (reg_data->resource == res) -+ break; -+ reg_data++; -+ } -+ if (enable && !(reg_data->usecount++)) -+ val = 0xee; -+ else if (!enable && !(--reg_data->usecount)) -+ val = reg_data->offstate; -+ else -+ return 0; -+ -+ ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -+ val, reg_data->base + 2); -+ -+ if (!ret && !reg_data->initdone) { -+ ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -+ 0, reg_data->base); -+ if (!ret) -+ reg_data->initdone = 1; -+ } -+ -+ if (ret) { -+ if (enable) -+ reg_data->usecount--; -+ else -+ reg_data->usecount++; -+ } -+ /* Wait until voltage stabilizes */ -+ if (enable) -+ udelay(reg_data->delay); -+ return ret; -+} -+ -+int twl4030_enable_regulator(int res) -+{ -+ int ret; -+ mutex_lock(®_mutex); -+ ret = twl4030_set_regulator_state(res, 1); -+ mutex_unlock(®_mutex); -+ return ret; -+} -+EXPORT_SYMBOL(twl4030_enable_regulator); -+ -+int twl4030_disable_regulator(int res) -+{ -+ int ret; -+ mutex_lock(®_mutex); -+ ret = twl4030_set_regulator_state(res, 0); -+ mutex_unlock(®_mutex); -+ return ret; -+} -+EXPORT_SYMBOL(twl4030_disable_regulator); -+ -+/** -+ * @brief twl_workaround - implement errata XYZ -+ * XYZ errata workaround requires the TWL DCDCs to use -+ * HFCLK - for this you need to write to all OSC regs to -+ * enable this path -+ * WARNING: you SHOULD change your board dependent script -+ * file to handle RET and OFF mode sequences correctly -+ * -+ * @return -+ */ -+static void __init twl_workaround(void) -+{ -+ u8 val; -+ u8 reg[]={R_VDD1_OSC, R_VDD2_OSC, R_VIO_OSC}; -+ int i; -+ int err = 0; -+ for (i = 0; i < sizeof(reg); i++) { -+ err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &val, reg[i]); -+ val |= EXT_FS_CLK_EN; -+ err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val, reg[i]); -+ } -+ if (err) -+ pr_warning("TWL4030: workaround setup failed!\n"); - } - - void __init twl4030_power_init(struct twl4030_power_data *triton2_scripts) - { - int err = 0; - int i; -+ struct twl4030_resconfig *resconfig; - - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_1, - R_PROTECT_KEY); -@@ -258,12 +511,29 @@ void __init twl4030_power_init(struct tw - printk(KERN_ERR - "TWL4030 Unable to unlock registers\n"); - -- for (i = 0; i < triton2_scripts->size; i++) { -+ for (i = 0; i < triton2_scripts->scripts_size; i++) { - err = load_triton_script(triton2_scripts->scripts[i]); -- if (err) -+ if (err < 0) { -+ printk(KERN_ERR "TWL4030 failed to load scripts"); - break; -+ } -+ } -+ -+ resconfig = triton2_scripts->resource_config; -+ if (resconfig) { -+ while (resconfig->resource) { -+ err = twl4030_configure_resource(resconfig); -+ resconfig++; -+ if (err < 0) { -+ printk(KERN_ERR -+ "TWL4030 failed to configure resource"); -+ break; -+ } -+ } - } - -+ /* TODO: introduce workaround based on TWL4030 revision */ -+ twl_workaround(); - if (twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY)) - printk(KERN_ERR - "TWL4030 Unable to relock registers\n"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-core.c linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-core.c ---- linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-core.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-core.c 2011-06-22 13:19:32.743063276 +0200 -@@ -0,0 +1,438 @@ -+/* -+ * cs-core.c -+ * -+ * Part of the CMT speech driver, implements the character device -+ * interface. -+ * -+ * Copyright (C) 2008,2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Kai Vehmanen -+ * Original author: Peter Ujfalusi -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "cs-debug.h" -+#include "cs-core.h" -+#include "cs-ssi.h" -+ -+#define CS_MMAP_SIZE PAGE_SIZE -+ -+struct char_queue { -+ struct list_head list; -+ u32 msg; -+}; -+ -+ -+struct cs_char { -+ unsigned int opened; -+ -+ struct list_head chardev_queue; -+ -+ /* mmap things */ -+ unsigned long mmap_base; -+ unsigned long mmap_size; -+ -+ spinlock_t lock; -+ struct fasync_struct *async_queue; -+ wait_queue_head_t wait; -+}; -+ -+ -+static struct cs_char cs_char_data; -+ -+void cs_notify(u32 message) -+{ -+ struct char_queue *entry; -+ DENTER(); -+ -+ spin_lock(&cs_char_data.lock); -+ -+ if (!cs_char_data.opened) { -+ spin_unlock(&cs_char_data.lock); -+ goto out; -+ } -+ -+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); -+ if (!entry) { -+ pr_err("CS_SSI: Can't allocate new entry for the queue.\n"); -+ spin_unlock(&cs_char_data.lock); -+ goto out; -+ } -+ -+ entry->msg = message; -+ list_add_tail(&entry->list, &cs_char_data.chardev_queue); -+ -+ spin_unlock(&cs_char_data.lock); -+ -+ wake_up_interruptible(&cs_char_data.wait); -+ kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN); -+ -+out: -+ DLEAVE(0); -+} -+ -+static void cs_char_vma_open(struct vm_area_struct *vma) -+{ -+ DENTER(); -+ DLEAVE(0); -+} -+ -+static void cs_char_vma_close(struct vm_area_struct *vma) -+{ -+ DENTER(); -+ DLEAVE(0); -+} -+ -+static int cs_char_vma_fault(struct vm_area_struct *vma, -+ struct vm_fault *vmf) -+{ -+ struct page *page; -+ DENTER(); -+ -+ page = virt_to_page(cs_char_data.mmap_base); -+ get_page(page); -+ vmf->page = page; -+ -+ DLEAVE(0); -+ return 0; -+} -+ -+static struct vm_operations_struct cs_char_vm_ops = { -+ .open = cs_char_vma_open, -+ .close = cs_char_vma_close, -+ .fault = cs_char_vma_fault, -+}; -+ -+static int cs_char_fasync(int fd, struct file *file, int on) -+{ -+ if (fasync_helper(fd, file, on, &cs_char_data.async_queue) >= 0) -+ return 0; -+ else -+ return -EIO; -+} -+ -+ -+static unsigned int cs_char_poll(struct file *file, poll_table *wait) -+{ -+ unsigned int ret = 0; -+ -+ poll_wait(file, &cs_char_data.wait, wait); -+ -+ spin_lock_bh(&cs_char_data.lock); -+ if (!list_empty(&cs_char_data.chardev_queue)) { -+ ret = POLLIN | POLLRDNORM; -+ DPRINTK("There is something in the queue...\n"); -+ } -+ spin_unlock_bh(&cs_char_data.lock); -+ -+ return ret; -+} -+ -+ -+static ssize_t cs_char_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ u32 data; -+ DECLARE_WAITQUEUE(wait, current); -+ ssize_t retval; -+ struct char_queue *entry; -+ DENTER(); -+ -+ if (count < sizeof(data)) -+ return -EINVAL; -+ -+ add_wait_queue(&cs_char_data.wait, &wait); -+ -+ for ( ; ; ) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ -+ spin_lock_bh(&cs_char_data.lock); -+ if (!list_empty(&cs_char_data.chardev_queue)) { -+ entry = list_entry(cs_char_data.chardev_queue.next, -+ struct char_queue, list); -+ data = entry->msg; -+ list_del(&entry->list); -+ kfree(entry); -+ } else { -+ data = 0; -+ } -+ spin_unlock_bh(&cs_char_data.lock); -+ -+ if (data) -+ break; -+ else if (file->f_flags & O_NONBLOCK) { -+ retval = -EAGAIN; -+ goto out; -+ } else if (signal_pending(current)) { -+ retval = -ERESTARTSYS; -+ goto out; -+ } -+ schedule(); -+ } -+ -+ retval = put_user(data, (u32 __user *)buf); -+ if (!retval) -+ retval = sizeof(data); -+out: -+ __set_current_state(TASK_RUNNING); -+ remove_wait_queue(&cs_char_data.wait, &wait); -+ -+ DLEAVE(retval); -+ return retval; -+} -+ -+static ssize_t cs_char_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ u32 data; -+ int err; -+ ssize_t retval; -+ DENTER(); -+ -+ if (count < sizeof(data)) -+ return -EINVAL; -+ -+ if (get_user(data, (u32 __user *)buf)) -+ retval = -EFAULT; -+ else -+ retval = count; -+ -+ err = cs_ssi_command(data); -+ if (err < 0) -+ retval = err; -+ -+ DLEAVE(retval); -+ return retval; -+} -+ -+static int cs_char_ioctl(struct inode *inode, struct file *f, -+ unsigned int cmd, unsigned long arg) -+{ -+ int r = 0; -+ DENTER(); -+ -+ switch (cmd) { -+ case CS_GET_STATE: { -+ unsigned int state; -+ -+ state = cs_ssi_get_state(); -+ if (copy_to_user((void __user *)arg, &state, sizeof(state))) -+ r = -EFAULT; -+ } -+ break; -+ case CS_CONFIG: -+ r = -ENOTTY; -+ break; -+ case CS_SET_WAKELINE: { -+ unsigned int state; -+ -+ if (copy_from_user(&state, (void __user *)arg, -+ sizeof(state))) -+ r = -EFAULT; -+ else -+ cs_ssi_set_wakeline(state); -+ } -+ break; -+ case CS_CONFIG_BUFS: { -+ struct cs_buffer_config buf_cfg; -+ -+ if (copy_from_user(&buf_cfg, (void __user *)arg, -+ sizeof(buf_cfg))) -+ r = -EFAULT; -+ else -+ r = cs_ssi_buf_config(&buf_cfg); -+ break; -+ } -+ default: -+ r = -ENOTTY; -+ break; -+ } -+ -+ DLEAVE(r); -+ return r; -+} -+ -+static int cs_char_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ DENTER(); -+ -+ if (vma->vm_end < vma->vm_start) { -+ DLEAVE(1); -+ return -EINVAL; -+ } -+ -+ if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1) { -+ DLEAVE(2); -+ return -EINVAL; -+ } -+ -+ vma->vm_flags |= VM_RESERVED; -+ vma->vm_ops = &cs_char_vm_ops; -+ vma->vm_private_data = file->private_data; -+ -+ cs_char_vma_open(vma); -+ -+ DLEAVE(0); -+ return 0; -+} -+ -+static int cs_char_open(struct inode *inode, struct file *file) -+{ -+ int ret = 0; -+ DENTER(); -+ -+ spin_lock_bh(&cs_char_data.lock); -+ -+ if (cs_char_data.opened) { -+ ret = -EBUSY; -+ spin_unlock_bh(&cs_char_data.lock); -+ goto out; -+ } -+ -+ cs_char_data.mmap_base = get_zeroed_page(GFP_ATOMIC); -+ if (!cs_char_data.mmap_base) { -+ pr_err("CS_SSI: Shared memory allocation failed.\n"); -+ ret = -ENOMEM; -+ spin_unlock_bh(&cs_char_data.lock); -+ goto out; -+ } -+ -+ cs_char_data.mmap_size = CS_MMAP_SIZE; -+ cs_char_data.opened = 1; -+ file->private_data = &cs_char_data; -+ -+ spin_unlock_bh(&cs_char_data.lock); -+ -+ cs_ssi_start(cs_char_data.mmap_base, cs_char_data.mmap_size); -+ -+out: -+ DLEAVE(ret); -+ return ret; -+} -+ -+static int cs_char_release(struct inode *inode, struct file *file) -+{ -+ struct char_queue *entry; -+ struct list_head *cursor, *next; -+ DENTER(); -+ -+ cs_ssi_stop(); -+ -+ spin_lock_bh(&cs_char_data.lock); -+ -+ free_page(cs_char_data.mmap_base); -+ cs_char_data.mmap_base = 0; -+ cs_char_data.mmap_size = 0; -+ cs_char_data.opened = 0; -+ -+ if (!list_empty(&cs_char_data.chardev_queue)) { -+ list_for_each_safe(cursor, next, &cs_char_data.chardev_queue) { -+ entry = list_entry(cursor, struct char_queue, list); -+ list_del(&entry->list); -+ kfree(entry); -+ } -+ } -+ -+ spin_unlock_bh(&cs_char_data.lock); -+ -+ DLEAVE(0); -+ return 0; -+} -+ -+static const struct file_operations cs_char_fops = { -+ .owner = THIS_MODULE, -+ .read = cs_char_read, -+ .write = cs_char_write, -+ .poll = cs_char_poll, -+ .ioctl = cs_char_ioctl, -+ .mmap = cs_char_mmap, -+ .open = cs_char_open, -+ .release = cs_char_release, -+ .fasync = cs_char_fasync, -+}; -+ -+static struct miscdevice cs_char_miscdev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "cmt_speech", -+ .fops = &cs_char_fops -+}; -+ -+static int __init cs_char_init(void) -+{ -+ int ret; -+ DENTER(); -+ -+ printk(KERN_INFO "CMT speech driver v%d.%d.%d\n", -+ CS_VER_MAJOR, CS_VER_MINOR, CS_VER_EXTRA); -+ -+ ret = misc_register(&cs_char_miscdev); -+ if (ret) { -+ pr_err("CMT speech: Failed to register\n"); -+ goto out; -+ } -+ -+ init_waitqueue_head(&cs_char_data.wait); -+ spin_lock_init(&cs_char_data.lock); -+ -+ /* will be moved to cs_char_open */ -+ cs_char_data.mmap_base = 0; -+ cs_char_data.mmap_size = 0; -+ -+ cs_char_data.opened = 0; -+ -+ INIT_LIST_HEAD(&cs_char_data.chardev_queue); -+ -+ cs_ssi_init(); -+ -+out: -+ DLEAVE(ret); -+ return ret; -+} -+ -+static void __exit cs_char_exit(void) -+{ -+ DENTER(); -+ -+ misc_deregister(&cs_char_miscdev); -+ -+ cs_ssi_exit(); -+ -+ DLEAVE(0); -+} -+MODULE_AUTHOR("Peter Ujfalusi "); -+MODULE_AUTHOR("Kai Vehmanen "); -+MODULE_DESCRIPTION("CMT speech driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(cs_char_init); -+module_exit(cs_char_exit); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-core.h linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-core.h ---- linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-core.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-core.h 2011-06-22 13:19:32.743063276 +0200 -@@ -0,0 +1,36 @@ -+/* -+ * cs-core.h -+ * -+ * Part of the CMT speech driver. -+ * -+ * Copyright (C) 2008,2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Kai Vehmanen -+ * Original author: Peter Ujfalusi -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef _CS_CORE_H -+#define _CS_CORE_H -+ -+void cs_notify(u32 message); -+ -+#define CS_VER_MAJOR 0 -+#define CS_VER_MINOR 2 -+#define CS_VER_EXTRA 0 -+ -+#endif /* _CS_CORE_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-debug.h linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-debug.h ---- linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-debug.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-debug.h 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,40 @@ -+/* -+ * cs-debug.h -+ * -+ * Part of the CMT speech driver. Debug. -+ * -+ * Copyright (C) 2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Kai Vehmanen -+ * Original author: Peter Ujfalusi -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef _CS_DEBUG_H -+#define _CS_DEBUG_H -+ -+#ifdef CONFIG_SSI_CMT_SPEECH_DEBUG -+#define DPRINTK(fmt, arg...) printk(KERN_DEBUG "%s(): " fmt, __func__, ##arg) -+#define DENTER() printk(KERN_DEBUG "ENTER %s()\n", __func__) -+#define DLEAVE(a) printk(KERN_DEBUG "LEAVE %s() %d\n", __func__, a) -+#else -+#define DPRINTK(fmt, arg...) while (0) -+#define DENTER() while (0) -+#define DLEAVE(a) while (0) -+#endif -+ -+#endif /* _CS_DEBUG_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-ssi.c linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-ssi.c ---- linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-ssi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-ssi.c 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,682 @@ -+/* -+ * cs-ssi.c -+ * -+ * Part of the CMT speech driver, implements the SSI interface. -+ * -+ * Copyright (C) 2008,2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Kai Vehmanen -+ * Original author: Peter Ujfalusi -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "cs-debug.h" -+#include "cs-core.h" -+#include "cs-ssi.h" -+ -+#define SSI_CHANNEL_STATE_READING (1 << 0) -+#define SSI_CHANNEL_STATE_WRITING (1 << 1) -+ -+#define CONTROL_CH 0 -+#define DATA_CH 1 -+ -+#define TARGET_MASK (0x0f000000) -+#define TARGET_CDSP (0x1<lock); -+ -+ if (channel->state & SSI_CHANNEL_STATE_READING) { -+ DPRINTK("Read already pending.\n"); -+ spin_unlock(&channel->lock); -+ DLEAVE(-1); -+ return; -+ } -+ -+ DPRINTK("Read issued\n"); -+ channel->state |= SSI_CHANNEL_STATE_READING; -+ spin_unlock(&channel->lock); -+ -+ ssi_read(channel->dev, &ssi_iface.control_rx, 1); -+ -+ DLEAVE(0); -+} -+ -+static void cs_ssi_read_control_done(struct ssi_device *dev) -+{ -+ u32 msg; -+ struct cs_ssi_channel *channel; -+ DENTER(); -+ -+ channel = &ssi_iface.channels[CONTROL_CH]; -+ spin_lock(&channel->lock); -+ -+ if (ssi_iface.flags & CS_FEAT_TSTAMP_RX_CTRL) { -+ struct timespec *tstamp = -+ &ssi_iface.mmap_cfg->tstamp_rx_ctrl; -+ do_posix_clock_monotonic_gettime(tstamp); -+ } -+ -+ channel->state &= ~SSI_CHANNEL_STATE_READING; -+ -+ msg = ssi_iface.control_rx; -+ -+ spin_unlock(&channel->lock); -+ -+ cs_notify(msg); -+ -+ /* Place read on control */ -+ cs_ssi_read_on_control(); -+ -+ DLEAVE(0); -+} -+ -+int cs_ssi_write_on_control(u32 message) -+{ -+ struct cs_ssi_channel *channel; -+ int err; -+ DENTER(); -+ -+ channel = &ssi_iface.channels[CONTROL_CH]; -+ spin_lock(&channel->lock); -+ -+ if (channel->state & SSI_CHANNEL_STATE_WRITING) { -+ pr_err("CS_SSI: Write still pending on control channel.\n"); -+ spin_unlock(&channel->lock); -+ return -EBUSY; -+ } -+ -+ ssi_iface.control_tx = message; -+ channel->state |= SSI_CHANNEL_STATE_WRITING; -+ spin_unlock(&channel->lock); -+ -+ err = ssi_write(channel->dev, &ssi_iface.control_tx, 1); -+ -+ DLEAVE(err); -+ return err; -+} -+ -+static void cs_ssi_write_control_done(struct ssi_device *dev) -+{ -+ struct cs_ssi_channel *channel; -+ DENTER(); -+ -+ channel = &ssi_iface.channels[CONTROL_CH]; -+ spin_lock(&channel->lock); -+ -+ channel->state &= ~SSI_CHANNEL_STATE_WRITING; -+ ssi_iface.control_tx = 0; -+ -+ spin_unlock(&channel->lock); -+ -+ DLEAVE(0); -+} -+ -+static void cs_ssi_read_on_data(void) -+{ -+ u32 *address; -+ struct cs_ssi_channel *channel; -+ DENTER(); -+ -+ channel = &ssi_iface.channels[DATA_CH]; -+ spin_lock(&channel->lock); -+ -+ if (channel->state & SSI_CHANNEL_STATE_READING) { -+ DPRINTK("Read already pending.\n"); -+ spin_unlock(&channel->lock); -+ DLEAVE(-1); -+ return; -+ } -+ -+ DPRINTK("Read issued\n"); -+ channel->state |= SSI_CHANNEL_STATE_READING; -+ -+ address = (u32 *) (ssi_iface.mmap_base + -+ ssi_iface.rx_offsets[ssi_iface.rx_slot]); -+ -+ spin_unlock(&channel->lock); -+ -+ ssi_read(channel->dev, address, ssi_iface.buf_size/4); -+ -+ DLEAVE(0); -+} -+ -+static void cs_ssi_read_data_done(struct ssi_device *dev) -+{ -+ u32 msg; -+ struct cs_ssi_channel *channel; -+ DENTER(); -+ -+ channel = &ssi_iface.channels[DATA_CH]; -+ spin_lock(&channel->lock); -+ -+ channel->state &= ~SSI_CHANNEL_STATE_READING; -+ -+ msg = CS_RX_DATA_RECEIVED; -+ msg |= ssi_iface.rx_slot; -+ -+ ssi_iface.rx_slot++; -+ ssi_iface.rx_slot %= ssi_iface.rx_bufs; -+ -+ spin_unlock(&channel->lock); -+ -+ cs_notify(msg); -+ -+ cs_ssi_read_on_data(); -+ -+ DLEAVE(0); -+} -+ -+int cs_ssi_write_on_data(unsigned int slot) -+{ -+ u32 *address; -+ struct cs_ssi_channel *channel; -+ int err; -+ DENTER(); -+ -+ channel = &ssi_iface.channels[DATA_CH]; -+ spin_lock(&channel->lock); -+ -+ if (ssi_iface.cdsp_state != CS_STATE_CONFIGURED) { -+ DPRINTK("Not configured, aborting\n"); -+ spin_unlock(&channel->lock); -+ return -EINVAL; -+ } -+ -+ if (channel->state & SSI_CHANNEL_STATE_WRITING) { -+ pr_err("CS_SSI: Write still pending on data channel.\n"); -+ spin_unlock(&channel->lock); -+ return -EBUSY; -+ } -+ -+ ssi_iface.tx_slot = slot; -+ address = (u32 *) (ssi_iface.mmap_base + -+ ssi_iface.tx_offsets[ssi_iface.tx_slot]); -+ -+ channel->state |= SSI_CHANNEL_STATE_WRITING; -+ -+ spin_unlock(&channel->lock); -+ -+ err = ssi_write(channel->dev, (u32 *)address, ssi_iface.buf_size/4); -+ -+ DLEAVE(err); -+ return err; -+} -+ -+static void cs_ssi_write_data_done(struct ssi_device *dev) -+{ -+ struct cs_ssi_channel *channel; -+ DENTER(); -+ -+ channel = &ssi_iface.channels[DATA_CH]; -+ spin_lock(&channel->lock); -+ -+ channel->state &= ~SSI_CHANNEL_STATE_WRITING; -+ -+ spin_unlock(&channel->lock); -+ -+ DLEAVE(0); -+} -+ -+unsigned int cs_ssi_get_state() -+{ -+ return ssi_iface.cdsp_state; -+} -+ -+int cs_ssi_command(u32 cmd) -+{ -+ int ret = 0; -+ DENTER(); -+ -+ spin_lock_bh(&ssi_iface.lock); -+ -+ switch (cmd & TARGET_MASK) { -+ case TARGET_CDSP: -+ ret = cs_ssi_write_on_control(cmd); -+ break; -+ case TARGET_LOCAL: -+ if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY) -+ ret = cs_ssi_write_on_data(cmd & CS_PARAM_MASK); -+ else -+ ret = -EINVAL; -+ break; -+ default: -+ ret = -EINVAL; -+ break; -+ } -+ -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ DLEAVE(ret); -+ return ret; -+} -+ -+void cs_ssi_set_wakeline(unsigned int new_state) -+{ -+ DENTER(); -+ -+ spin_lock_bh(&ssi_iface.lock); -+ -+ if (ssi_iface.wakeline_state != new_state) { -+ ssi_iface.wakeline_state = new_state; -+ ssi_ioctl(ssi_iface.channels[CONTROL_CH].dev, -+ new_state ? SSI_IOCTL_WAKE_UP : SSI_IOCTL_WAKE_DOWN, -+ NULL); -+ } -+ -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ DLEAVE(0); -+} -+ -+static int cs_ssi_openchannel(struct cs_ssi_channel *channel) -+{ -+ int ret = 0; -+ -+ spin_lock(&channel->lock); -+ -+ if (channel->opened) -+ goto leave; -+ -+ if (!channel->dev) { -+ pr_err("CS_SSI: %s channel is not ready??\n", -+ channel->channel_id ? "DATA" : "CONTROL"); -+ ret = -ENODEV; -+ goto leave; -+ } -+ ret = ssi_open(channel->dev); -+ if (ret < 0) { -+ pr_err("CS_SSI: Could not open %s channel\n", -+ channel->channel_id ? "DATA" : "CONTROL"); -+ goto leave; -+ } -+ -+ channel->opened = 1; -+leave: -+ spin_unlock(&channel->lock); -+ return ret; -+} -+ -+static int cs_ssi_closechannel(struct cs_ssi_channel *channel) -+{ -+ int ret = 0; -+ -+ spin_lock(&channel->lock); -+ -+ if (!channel->opened) -+ goto leave; -+ -+ if (!channel->dev) { -+ pr_err("CS_SSI: %s channel is not ready??\n", -+ channel->channel_id ? "DATA" : "CONTROL"); -+ ret = -ENODEV; -+ goto leave; -+ } -+ -+ /* Stop any pending read/write */ -+ if (channel->state & SSI_CHANNEL_STATE_READING) { -+ ssi_read_cancel(channel->dev); -+ channel->state &= ~SSI_CHANNEL_STATE_READING; -+ } -+ if (channel->state & SSI_CHANNEL_STATE_WRITING) { -+ ssi_write_cancel(channel->dev); -+ channel->state &= ~SSI_CHANNEL_STATE_WRITING; -+ } -+ -+ ssi_close(channel->dev); -+ -+ channel->opened = 0; -+leave: -+ spin_unlock(&channel->lock); -+ return ret; -+} -+ -+int cs_ssi_buf_config(struct cs_buffer_config *buf_cfg) -+{ -+ struct cs_ssi_channel *channel; -+ const unsigned cache_align = DMA_TRANSFER_ALIGN_BYTES; -+ int r = 0; -+ -+ DENTER(); -+ -+ spin_lock_bh(&ssi_iface.lock); -+ -+ if (ssi_iface.mmap_size <= -+ (ALIGN(buf_cfg->buf_size, cache_align) * -+ (buf_cfg->rx_bufs + buf_cfg->tx_bufs)) + -+ ALIGN(sizeof(*ssi_iface.mmap_cfg), cache_align)) { -+ -+ pr_err("CS_SSI: no space for the requested buffer configuration\n"); -+ spin_unlock_bh(&ssi_iface.lock); -+ r = -ENOBUFS; -+ goto error; -+ } -+ -+ ssi_iface.cdsp_state = CS_STATE_OPENED; -+ -+ channel = &ssi_iface.channels[DATA_CH]; -+ -+ cs_ssi_closechannel(channel); -+ -+ ssi_iface.buf_size = -+ ssi_iface.mmap_cfg->buf_size = buf_cfg->buf_size; -+ if (buf_cfg->buf_size > 0) { -+ ssi_iface.rx_bufs = -+ ssi_iface.mmap_cfg->rx_bufs = buf_cfg->rx_bufs; -+ ssi_iface.tx_bufs = -+ ssi_iface.mmap_cfg->tx_bufs = buf_cfg->tx_bufs; -+ } -+ -+ ssi_iface.rx_slot = 0; -+ ssi_iface.tx_slot = 0; -+ ssi_iface.slot_size = 0; -+ ssi_iface.flags = buf_cfg->flags; -+ ssi_iface.channels[DATA_CH].state = 0; -+ -+ if (ssi_iface.mmap_cfg->buf_size) { -+ unsigned data_start; -+ int i; -+ -+ if (cs_ssi_openchannel(channel)) { -+ pr_err("CS_SSI: Could not open DATA channel\n"); -+ spin_unlock_bh(&ssi_iface.lock); -+ r = -EINVAL; -+ goto error; -+ } -+ -+ ssi_iface.slot_size = ALIGN(ssi_iface.mmap_cfg->buf_size, cache_align); -+ DPRINTK("setting slot size to %u, buf size %u, align %u\n", -+ ssi_iface.slot_size, ssi_iface.mmap_cfg->buf_size, cache_align); -+ -+ data_start = ALIGN(sizeof(*ssi_iface.mmap_cfg), cache_align); -+ DPRINTK("setting data start at %u, cfg block %u, align %u\n", -+ data_start, sizeof(*ssi_iface.mmap_cfg), cache_align); -+ -+ -+ for (i = 0; i < ssi_iface.mmap_cfg->rx_bufs; i++) { -+ ssi_iface.rx_offsets[i] = -+ ssi_iface.mmap_cfg->rx_offsets[i] = data_start + i * ssi_iface.slot_size; -+ DPRINTK("DL buf #%u at %u\n", -+ i, ssi_iface.mmap_cfg->rx_offsets[i]); -+ } -+ -+ for (i = 0; i < ssi_iface.mmap_cfg->tx_bufs; i++) { -+ ssi_iface.tx_offsets[i] = -+ ssi_iface.mmap_cfg->tx_offsets[i] = -+ data_start + (i + ssi_iface.mmap_cfg->rx_bufs) * ssi_iface.slot_size; -+ DPRINTK("UL buf #%u at %u\n", -+ i, ssi_iface.mmap_cfg->rx_offsets[i]); -+ } -+ -+ ssi_iface.cdsp_state = CS_STATE_CONFIGURED; -+ } -+ -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ if (ssi_iface.buf_size) { -+ local_bh_disable(); -+ cs_ssi_read_on_data(); -+ local_bh_enable(); -+ } -+ -+error: -+ DLEAVE(r); -+ return r; -+} -+ -+int cs_ssi_start(unsigned long mmap_base, unsigned long mmap_size) -+{ -+ int err = 0; -+ DENTER(); -+ -+ spin_lock_bh(&ssi_iface.lock); -+ -+ ssi_iface.rx_slot = 0; -+ ssi_iface.tx_slot = 0; -+ ssi_iface.slot_size = 0; -+ -+ ssi_iface.channels[CONTROL_CH].state = 0; -+ ssi_iface.channels[DATA_CH].state = 0; -+ -+ ssi_iface.mmap_cfg = (struct cs_mmap_config_block *)mmap_base; -+ ssi_iface.mmap_base = mmap_base; -+ ssi_iface.mmap_size = mmap_size; -+ -+ memset(ssi_iface.mmap_cfg, 0, sizeof(*ssi_iface.mmap_cfg)); -+ ssi_iface.mmap_cfg->version = CS_VER_MAJOR << 8 | CS_VER_MINOR; -+ -+ err = cs_ssi_openchannel(&ssi_iface.channels[CONTROL_CH]); -+ if (err < 0) { -+ pr_err("CS_SSI: Could not open CONTROL channel\n"); -+ spin_unlock_bh(&ssi_iface.lock); -+ goto error; -+ } -+ -+ ssi_iface.cdsp_state = CS_STATE_OPENED; -+ -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ local_bh_disable(); -+ cs_ssi_read_on_control(); -+ local_bh_enable(); -+ -+error: -+ DLEAVE(err); -+ return err; -+} -+ -+void cs_ssi_stop(void) -+{ -+ DENTER(); -+ -+ cs_ssi_set_wakeline(0); -+ -+ spin_lock_bh(&ssi_iface.lock); -+ -+ cs_ssi_closechannel(&ssi_iface.channels[CONTROL_CH]); -+ cs_ssi_closechannel(&ssi_iface.channels[DATA_CH]); -+ -+ ssi_iface.cdsp_state = CS_STATE_CLOSED; -+ -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+static int __devinit cs_ssi_probe(struct ssi_device *dev) -+{ -+ int err = 0; -+ struct cs_ssi_channel *channel; -+ DENTER(); -+ -+ spin_lock_bh(&ssi_iface.lock); -+ -+ if ((dev->n_ch == 1) && (dev->n_p == 0)) { -+ ssi_set_read_cb(dev, cs_ssi_read_control_done); -+ ssi_set_write_cb(dev, cs_ssi_write_control_done); -+ channel = &ssi_iface.channels[CONTROL_CH]; -+ } else if ((dev->n_ch == 2) && (dev->n_p == 0)) { -+ ssi_set_read_cb(dev, cs_ssi_read_data_done); -+ ssi_set_write_cb(dev, cs_ssi_write_data_done); -+ channel = &ssi_iface.channels[DATA_CH]; -+ } else { -+ err = -ENXIO; -+ goto leave; -+ } -+ -+ channel->dev = dev; -+ channel->state = 0; -+leave: -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ DLEAVE(err); -+ return err; -+} -+ -+static int __devexit cs_ssi_remove(struct ssi_device *dev) -+{ -+ int err = 0; -+ struct cs_ssi_channel *channel; -+ DENTER(); -+ -+ spin_lock_bh(&ssi_iface.lock); -+ -+ if ((dev->n_ch == 1) && (dev->n_p == 0)) -+ channel = &ssi_iface.channels[CONTROL_CH]; -+ else if ((dev->n_ch == 2) && (dev->n_p == 0)) -+ channel = &ssi_iface.channels[DATA_CH]; -+ else { -+ err = -ENXIO; -+ goto leave; -+ } -+ -+ ssi_set_read_cb(dev, NULL); -+ ssi_set_write_cb(dev, NULL); -+ channel->dev = NULL; -+ channel->state = 0; -+leave: -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ DLEAVE(err); -+ return err; -+} -+ -+static struct ssi_device_driver cs_ssi_speech_driver = { -+ .ctrl_mask = ANY_SSI_CONTROLLER, -+ .ch_mask[0] = CHANNEL(1) | CHANNEL(2), -+ .probe = cs_ssi_probe, -+ .remove = __devexit_p(cs_ssi_remove), -+ .driver = { -+ .name = "cmt_speech", -+ }, -+}; -+ -+int __init cs_ssi_init(void) -+{ -+ int err = 0; -+ DENTER(); -+ -+ spin_lock_init(&ssi_iface.lock); -+ -+ ssi_iface.channels[CONTROL_CH].dev = NULL; -+ ssi_iface.channels[CONTROL_CH].opened = 0; -+ ssi_iface.channels[CONTROL_CH].state = 0; -+ ssi_iface.channels[CONTROL_CH].channel_id = CONTROL_CH; -+ spin_lock_init(&ssi_iface.channels[CONTROL_CH].lock); -+ -+ ssi_iface.channels[DATA_CH].dev = NULL; -+ ssi_iface.channels[DATA_CH].opened = 0; -+ ssi_iface.channels[DATA_CH].state = 0; -+ ssi_iface.channels[DATA_CH].channel_id = DATA_CH; -+ spin_lock_init(&ssi_iface.channels[DATA_CH].lock); -+ -+ ssi_iface.cdsp_state = CS_STATE_CLOSED; -+ ssi_iface.wakeline_state = 0; -+ -+ err = register_ssi_driver(&cs_ssi_speech_driver); -+ if (err) -+ pr_err("Error when registering ssi driver %d", err); -+ -+ DLEAVE(err); -+ return err; -+} -+ -+int __exit cs_ssi_exit(void) -+{ -+ DENTER(); -+ -+ cs_ssi_set_wakeline(0); -+ -+ cs_ssi_closechannel(&ssi_iface.channels[CONTROL_CH]); -+ cs_ssi_closechannel(&ssi_iface.channels[DATA_CH]); -+ -+ unregister_ssi_driver(&cs_ssi_speech_driver); -+ -+ DLEAVE(0); -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-ssi.h linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-ssi.h ---- linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/cs-ssi.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/cs-ssi.h 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,39 @@ -+/* -+ * cs-ssi.h -+ * -+ * Part of the CMT speech driver. -+ * -+ * Copyright (C) 2008,2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Peter Ujfalusi -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef _CS_SSI_H -+#define _CS_SSI_H -+ -+int cs_ssi_init(void); -+int cs_ssi_exit(void); -+ -+int cs_ssi_start(unsigned long mmap_base, unsigned long mmap_size); -+void cs_ssi_stop(void); -+int cs_ssi_buf_config(struct cs_buffer_config *buf_cfg); -+void cs_ssi_set_wakeline(unsigned int new_state); -+unsigned int cs_ssi_get_state(void); -+int cs_ssi_command(u32 cmd); -+ -+#endif /* _CS_SSI_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/Kconfig linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/Kconfig ---- linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/Kconfig 2011-06-22 13:19:32.743063276 +0200 -@@ -0,0 +1,24 @@ -+# -+# OMAP SSI kernel configuration -+# -+ -+config SSI_CMT_SPEECH -+ tristate "CMT speech driver" -+ depends on OMAP_SSI -+ ---help--- -+ If you say Y here, you will enable the CMT speech driver. -+ This driver provides a simple interface for the user space speech -+ protocol implementation to communicate with the cellular engine over -+ the SSI bus. -+ -+ If unsure, say Y, or else you will not be able to make voice calls. -+ -+config SSI_CMT_SPEECH_DEBUG -+ bool "Debug CMT speech driver" -+ depends on SSI_CMT_SPEECH && DEBUG_KERNEL -+ default n -+ ---help--- -+ Enable the debug information in the CMT speech driver. Be warned -+ that it can be quite noisy. -+ -+ If unsure, say N. -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/Makefile linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/Makefile ---- linux-omap-2.6.28-omap1/drivers/misc/cmt-speech/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/cmt-speech/Makefile 2011-06-22 13:19:32.743063276 +0200 -@@ -0,0 +1,7 @@ -+# -+# Makefile for SSI CMT speech driver -+# -+ -+obj-$(CONFIG_SSI_CMT_SPEECH) += cmt_speech.o -+ -+cmt_speech-objs := cs-core.o cs-ssi.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/Kconfig linux-omap-2.6.28-nokia1/drivers/misc/Kconfig ---- linux-omap-2.6.28-omap1/drivers/misc/Kconfig 2011-06-22 13:14:18.743067740 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/misc/Kconfig 2011-06-22 13:19:32.743063276 +0200 -@@ -178,6 +178,15 @@ config ASUS_LAPTOP - - If you have an ACPI-compatible ASUS laptop, say Y or M here. - -+config NOKIA_AV_DETECT -+ tristate "Nokia AV accessory detection support" -+ depends on SND_OMAP_SOC_RX51 && TWL4030_MADC -+ select SND_JACK if INPUT=y || INPUT=SND -+ default n -+ help -+ Say Y here if you want to support Nokia AV accessory -+ detection in the Nokia AV connector. -+ - config FUJITSU_LAPTOP - tristate "Fujitsu Laptop Extras" - depends on X86 -@@ -511,5 +520,9 @@ config SGI_GRU_DEBUG - you are unsure, say N. - - source "drivers/misc/c2port/Kconfig" -+source "drivers/misc/ssi/Kconfig" -+source "drivers/misc/mcsaab/Kconfig" -+source "drivers/misc/cmt-speech/Kconfig" -+source "drivers/misc/ssi-char/Kconfig" - - endif # MISC_DEVICES -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/Makefile linux-omap-2.6.28-nokia1/drivers/misc/Makefile ---- linux-omap-2.6.28-omap1/drivers/misc/Makefile 2011-06-22 13:14:18.743067740 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/misc/Makefile 2011-06-22 13:19:32.743063276 +0200 -@@ -25,6 +25,7 @@ obj-$(CONFIG_SGI_IOC4) += ioc4.o - obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o - obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o - obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o -+obj-$(CONFIG_NOKIA_AV_DETECT) += nokia-av.o - obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o - obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o - obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o -@@ -34,3 +35,8 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/ - obj-$(CONFIG_SGI_GRU) += sgi-gru/ - obj-$(CONFIG_HP_ILO) += hpilo.o - obj-$(CONFIG_C2PORT) += c2port/ -+obj-$(CONFIG_OMAP_SSI) += ssi/ -+obj-$(CONFIG_SSI_MCSAAB_IMP) += mcsaab/ -+obj-$(CONFIG_SSI_CMT_SPEECH) += cmt-speech/ -+obj-$(CONFIG_SSI_CHAR) += ssi-char/ -+obj-$(CONFIG_PANIC_INFO_BUFF) += panic_info_buff.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/mcsaab/Kconfig linux-omap-2.6.28-nokia1/drivers/misc/mcsaab/Kconfig ---- linux-omap-2.6.28-omap1/drivers/misc/mcsaab/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/mcsaab/Kconfig 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,10 @@ -+# -+# SSI protocol kernel configuration -+# -+config SSI_MCSAAB_IMP -+ tristate "Enable SSI McSAAB improved protocol" -+ depends on OMAP_SSI && PHONET -+ ---help--- -+ If you say Y here, you will enable the SSI McSAAB improved protocol. -+ -+ If unsure, say Y, or else you will not be able to connect to the CMT. -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/mcsaab/Makefile linux-omap-2.6.28-nokia1/drivers/misc/mcsaab/Makefile ---- linux-omap-2.6.28-omap1/drivers/misc/mcsaab/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/mcsaab/Makefile 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,8 @@ -+# -+# Makefile for SSI protocol modules -+# -+obj-$(CONFIG_SSI_MCSAAB_IMP) += ssi_mcsaab_imp.o -+ -+ifeq ($(CONFIG_SSI_MCSAAB_DEBUG),y) -+EXTRA_CFLAGS += -DSSI_DEBUG -+endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/mcsaab/ssi_mcsaab_imp.c linux-omap-2.6.28-nokia1/drivers/misc/mcsaab/ssi_mcsaab_imp.c ---- linux-omap-2.6.28-omap1/drivers/misc/mcsaab/ssi_mcsaab_imp.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/mcsaab/ssi_mcsaab_imp.c 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,1274 @@ -+/* -+ * ssi_mcsaab_imp.c -+ * -+ * Implementation of the SSI McSAAB improved protocol. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MCSAAB_IMP_VERSION "2.0-rc1" -+#define MCSAAB_IMP_DESC "SSI McSAAB Improved protocol implementeation" -+#define MCSAAB_IMP_NAME "SSI McSAAB PROTOCOL" -+ -+#define LOG_NAME "McSAAB: " -+/* ssi_proto flags values */ -+ -+/* CMT online/offline */ -+#define CMT_ONLINE 0x01 -+/* Keep track of clocks */ -+#define CLK_ENABLE 0x02 -+/* -+ * Flag use to check if the WAKELINE TEST down has been already made -+ * This is needed to avoid the race condition where the CMT puts down -+ * the CAWAKE line before we have processed the WAKELINE_TEST result. -+ * If this is not done clock management in the HW driver will fail. -+ */ -+#define WAKEDOWN_TEST 0x04 -+ -+/* end ssi_proto flags */ -+ -+#define LOCAL_D_VER_ID 0x01 -+ -+#define MCSAAB_TX_QUEUE_LEN 100 -+ -+#define C_QUEUE_LEN 4 -+ -+#define SSI_MAX_MTU 65535 -+#define SSI_DEFAULT_MTU 4000 -+ -+#define WD_TIMEOUT 2000 /* 500 msecs */ -+#define KA_TIMEOUT 15 /* 15 msecs */ -+ -+#define PN_MEDIA_SOS 21 -+ -+/* -+ * McSAAB command definitions -+ */ -+#define COMMAND(data) ((data) >> 28) -+#define PAYLOAD(data) ((data) & 0x0fffffff) -+ -+/* Commands */ -+#define SW_BREAK 0x0 -+#define BOOT_INFO_REQ 0x1 -+#define BOOT_INFO_RESP 0x2 -+#define WAKE_TEST_RES 0x3 -+#define START_TRANS 0x4 -+#define READY 0x5 -+#define DUMMY 0xc -+ -+/* Payloads */ -+#define RESERVED 0X0000000 -+#define DATA_VERSION_MASK 0xff -+#define DATA_VERSION(data) ((data) & DATA_VERSION_MASK) -+#define DATA_RESULT_MASK 0X0f -+#define DATA_RESULT(data) ((data) & DATA_RESULT_MASK) -+#define WAKE_TEST_OK 0x0 -+#define WAKE_TEST_FAILED 0x1 -+#define PDU_LENGTH_MASK 0xffff -+#define PDU_LENGTH(data) (((data) >> 8) & PDU_LENGTH_MASK) -+#define MSG_ID_MASK 0xff -+#define MSG_ID(data) ((data) & MSG_ID_MASK) -+#define ACK_TO_CMD_MASK 0x0f -+#define ACK_TO_CMD(data) ((data) & ACK_TO_CMD_MASK) -+ -+#define DUMMY_PAYLOAD 0xaaccaaa -+ -+#define CMD(command, payload) (((command) << 28) | ((payload) & 0x0fffffff)) -+ -+/* Commands for the control channel (channel number 0) */ -+#define SWBREAK_CMD CMD(SW_BREAK, 0x000000) -+#define BOOT_INFO_REQ_CMD(verid) \ -+ CMD(BOOT_INFO_REQ, (verid) & DATA_VERSION_MASK) -+#define BOOT_INFO_RESP_CMD(verid) \ -+ CMD(BOOT_INFO_RESP, (verid) & DATA_VERSION_MASK) -+#define START_TRANS_CMD(pdu_len, message_id) \ -+ CMD(START_TRANS, (((pdu_len) << 8) | (message_id))) -+#define READY_CMD CMD(READY, RESERVED) -+#define FQ_CHANGE_REQ_CMD(max_tx_speed) CMD(FQ_CHANGE_REQ, max_tx_speed) -+#define FQ_CHANGE_DONE_CMD CMD(FQ_CHANGE_DONE, RESERVED) -+#define ACK_CMD(ack_cmd) CMD(ACK, ack_cmd) -+ -+/* -+ * End McSAAB command definitions -+ */ -+ -+/* Main state machine states */ -+enum { -+ INIT, -+ HANDSHAKE, -+ ACTIVE, -+ MAIN_NUM_STATES, /* NOTE: Must be always the last one*/ -+}; -+ -+/* Send state machine states */ -+enum { -+ SEND_IDLE, -+ WAIT4READY, -+ SEND_READY, -+ SENDING, -+ SENDING_SWBREAK, -+ SEND_NUM_STATES, /* NOTE: Must be always the last one */ -+}; -+ -+/* Recevice state machine states */ -+enum { -+ RECV_IDLE, -+ RECV_READY, -+ RECEIVING, -+ RECV_BUSY, -+ RECV_NUM_STATES, /* NOTE: Must be always the last one */ -+}; -+ -+ -+/** -+ * struct mcsaab_imp - McSAAB improved protocol data -+ * @main_state: State of the general state machine -+ * @send_state: State of the TX state machine -+ * @recv_state: State of the RX state machine -+ * @flags: Keeps tracks of several events, mainly used for workarounds -+ * @rcv_c_msg: Control channel RX buffer -+ * @c_queue: Control channel TX queue. -+ * @head: First frame in control channel TX queue -+ * @tail: Last frame in the control channel TX queue -+ * @rcv_msg_id: Expeceted next RX message id -+ * @send_msg_id: Next TX messaged id -+ * @dev_d_ch: Data channel -+ * @dev_c_ch: Control channel -+ * @boot_wd: Boot handshake watchdog -+ * @tx_wd: TX path watchdog -+ * @rx_wd: RX path watchdog -+ * @keep_alive: Workaround timer to wakeup the MPU from inactive state -+ * @tx_queue: TX packets queue -+ * @rx_queue: RX packets queue -+ * @netdev: Phonet network interface -+ * @cmt_rst_gpio: CMT reset gpio line -+ * @cmt_rst_gpio_irq: IRQ associted to the CMT reset gpio line -+ * @cmt_rst_tasklet: Bottom half for CMT reset line events -+ */ -+struct mcsaab_imp { -+ unsigned int main_state; -+ unsigned int send_state; -+ unsigned int recv_state; -+ unsigned int flags; -+ -+ u32 rcv_c_msg; -+ -+ u32 c_queue[C_QUEUE_LEN]; -+ int head; -+ int tail; -+ -+ u8 rcv_msg_id; -+ u8 send_msg_id; -+ -+ struct ssi_device *dev_d_ch; -+ struct ssi_device *dev_c_ch; -+ -+ struct timer_list boot_wd; -+ struct timer_list tx_wd; -+ struct timer_list rx_wd; -+ struct timer_list keep_alive; -+ -+ struct clk *ssi_clk; -+ -+ spinlock_t lock; -+ -+ /* Network interface */ -+ struct sk_buff_head tx_queue; -+ struct sk_buff_head rx_queue; -+ -+ struct net_device *netdev; -+ -+ int cmt_rst_gpio; -+ int cmt_rst_gpio_irq; -+ struct tasklet_struct cmt_rst_tasklet; -+}; -+ -+static struct mcsaab_imp ssi_protocol; -+ -+static void mcsaab_clk_enable(void) -+{ -+ if (!(ssi_protocol.flags & CLK_ENABLE)) { -+ ssi_protocol.flags |= CLK_ENABLE; -+ clk_enable(ssi_protocol.ssi_clk); -+ } -+} -+ -+static void mcsaab_clk_disable(void) -+{ -+ if (ssi_protocol.flags & CLK_ENABLE) { -+ ssi_protocol.flags &= ~CLK_ENABLE; -+ clk_disable(ssi_protocol.ssi_clk); -+ } -+} -+ -+static void reset_mcsaab(void) -+{ -+ mcsaab_clk_disable(); /* Release clk, if held */ -+ del_timer(&ssi_protocol.boot_wd); -+ del_timer(&ssi_protocol.rx_wd); -+ del_timer(&ssi_protocol.tx_wd); -+ del_timer(&ssi_protocol.keep_alive); -+ ssi_protocol.main_state = INIT; -+ ssi_protocol.send_msg_id = 0; -+ ssi_protocol.rcv_msg_id = 0; -+ ssi_protocol.send_state = SEND_IDLE; -+ ssi_protocol.recv_state = RECV_IDLE; -+ ssi_protocol.flags = 0; -+ ssi_protocol.head = 0; -+ ssi_protocol.tail = 0; -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_FLUSH_TX, NULL); -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_FLUSH_RX, NULL); -+ if (ssi_protocol.dev_d_ch) { -+ ssi_read_cancel(ssi_protocol.dev_d_ch); -+ ssi_write_cancel(ssi_protocol.dev_d_ch); -+ } -+ if (ssi_protocol.dev_c_ch) -+ ssi_write_cancel(ssi_protocol.dev_c_ch); -+ skb_queue_purge(&ssi_protocol.tx_queue); -+ skb_queue_purge(&ssi_protocol.rx_queue); -+ pr_debug(LOG_NAME "CMT is OFFLINE\n"); -+ netif_carrier_off(ssi_protocol.netdev); -+} -+ -+static int mcsaab_need_keep_alive(void) -+{ -+ if (ssi_protocol.recv_state == RECV_IDLE) { -+ switch (ssi_protocol.send_state) { -+ case SEND_IDLE: -+ return 0; -+ case SEND_READY: /* Check needed cause cmtspeech workaround */ -+ if (!skb_queue_len(&ssi_protocol.tx_queue)) -+ return 0; -+ break; -+ } -+ } -+ return 1; -+} -+ -+static void mcsaab_stop_keep_alive(void) -+{ -+ if (!mcsaab_need_keep_alive()) -+ del_timer(&ssi_protocol.keep_alive); -+} -+ -+static void mcsaab_restart_keep_alive(void) -+{ -+ if (mcsaab_need_keep_alive()) -+ mod_timer(&ssi_protocol.keep_alive, -+ jiffies + msecs_to_jiffies(KA_TIMEOUT)); -+} -+ -+static void send_c_msg(u32 c_msg) -+{ -+ int size; -+ -+ size = (C_QUEUE_LEN + ssi_protocol.tail - ssi_protocol.head) -+ % C_QUEUE_LEN; -+ if (size >= (C_QUEUE_LEN - 1)) { -+ pr_debug(LOG_NAME "Control message queue OVERRUN !\n"); -+ return; -+ } -+ pr_debug(LOG_NAME "Queue head %d tail %d size %d\n", -+ ssi_protocol.head, ssi_protocol.tail, size); -+ ssi_protocol.c_queue[ssi_protocol.tail] = c_msg; -+ ssi_protocol.tail = (ssi_protocol.tail + 1) % C_QUEUE_LEN; -+ -+ if (size == 0) -+ ssi_write(ssi_protocol.dev_c_ch, -+ &ssi_protocol.c_queue[ssi_protocol.head], 1); -+ -+} -+ -+static void mcsaab_start_tx(void) -+{ -+ struct sk_buff *skb; -+ -+ skb = skb_peek(&ssi_protocol.tx_queue); -+ ssi_protocol.send_state = SENDING; -+ mod_timer(&ssi_protocol.tx_wd, jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ send_c_msg(START_TRANS_CMD((skb->len + 3) / 4, -+ ssi_protocol.send_msg_id)); -+} -+ -+/* Watchdog functions */ -+static void mcsaab_watchdog_dump(struct mcsaab_imp *prot) -+{ -+ struct sk_buff *skb; -+ u32 acwake; -+ unsigned int cawake; -+ unsigned int last; -+ -+ ssi_ioctl(prot->dev_c_ch, SSI_IOCTL_WAKE, &acwake); -+ ssi_ioctl(prot->dev_c_ch, SSI_IOCTL_CAWAKE, &cawake); -+ last = (C_QUEUE_LEN - 1 + ssi_protocol.head) % C_QUEUE_LEN; -+ -+ pr_err(LOG_NAME "ACWake line %08X\n", acwake); -+ pr_err(LOG_NAME "CAWake line %d\n", cawake); -+ pr_err(LOG_NAME "Main state: %d\n", prot->main_state); -+ pr_err(LOG_NAME "RX state:%02X\n", prot->recv_state); -+ pr_err(LOG_NAME "TX state:%02X\n", prot->send_state); -+ pr_err(LOG_NAME "CMT was %s\n", -+ (prot->flags & CMT_ONLINE) ? "ONLINE" : "OFFLINE"); -+ pr_err(LOG_NAME "FLAGS: %04X\n", prot->flags); -+ pr_err(LOG_NAME "Last RX control msg %08X\n", prot->rcv_c_msg); -+ pr_err(LOG_NAME "Last TX control msg %08X\n", prot->c_queue[last]); -+ pr_err(LOG_NAME "TX C queue head %d tail %d\n", prot->head, prot->tail); -+ pr_err(LOG_NAME "Data RX ID: %d\n", prot->rcv_msg_id); -+ pr_err(LOG_NAME "Data TX ID: %d\n", prot->send_msg_id); -+ pr_err(LOG_NAME "TX queue len: %d\n", skb_queue_len(&prot->tx_queue)); -+ if (skb_queue_len(&prot->tx_queue) > 0) { -+ skb = skb_peek(&prot->tx_queue); -+ pr_err(LOG_NAME "TX HEAD packet:\n"); -+ print_hex_dump_bytes(LOG_NAME, DUMP_PREFIX_ADDRESS, skb->data, -+ min(skb->len, (unsigned int)32)); -+ pr_err(LOG_NAME "END TX HEAD packet.\n"); -+ } -+ pr_err(LOG_NAME "RX queue len: %d\n", skb_queue_len(&prot->rx_queue)); -+ if (skb_queue_len(&prot->rx_queue) > 0) { -+ skb = skb_peek(&prot->rx_queue); -+ pr_err(LOG_NAME "RX HEAD packet:\n"); -+ print_hex_dump_bytes(LOG_NAME, DUMP_PREFIX_ADDRESS, skb->data, -+ min(skb->len, (unsigned int)32)); -+ pr_err(LOG_NAME "END RX HEAD packet.\n"); -+ } -+} -+ -+static void mcsaab_watchdog(unsigned long data) -+{ -+ struct mcsaab_imp *prot = (struct mcsaab_imp *)data; -+ pr_debug(LOG_NAME "------ WATCHDOG TIMER trigerred ------\n"); -+ mcsaab_watchdog_dump(prot); -+ pr_debug(LOG_NAME "--------------------------------------\n"); -+ -+ reset_mcsaab(); -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE_DOWN, NULL); -+} -+ -+static void mcsaab_watchdog_rx(unsigned long data) -+{ -+ pr_err(LOG_NAME "------- RX WATCHDOG TIMER trigerred -----\n"); -+ mcsaab_watchdog(data); -+} -+ -+static void mcsaab_watchdog_tx(unsigned long data) -+{ -+ pr_err(LOG_NAME "------- TX WATCHDOG TIMER trigerred -----\n"); -+ mcsaab_watchdog(data); -+} -+ -+static void keep_alive_timer(unsigned long data) -+{ -+ spin_lock(&ssi_protocol.lock); -+ -+ pr_debug("Keep alive states r(%d) s(%d)\n", -+ ssi_protocol.recv_state, ssi_protocol.send_state); -+ -+ mcsaab_restart_keep_alive(); -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+/* End watchdog functions */ -+ -+/* -+ * Network device callbacks -+ */ -+static int ssi_pn_xmit(struct sk_buff *skb, struct net_device *dev) -+{ -+ u32 acwake = 0; -+ int qlen; -+ -+ if (skb->protocol != htons(ETH_P_PHONET)) -+ goto drop; -+ -+ /* Pad to 32-bits */ -+ if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) { -+ dev->stats.tx_dropped++; -+ return 0; -+ } -+ -+ /* Modem sends Phonet messages over SSI with its own endianess... -+ * Assume that modem has the same endianess as we do. */ -+ if (skb_cow_head(skb, 0)) -+ goto drop; -+#ifdef __LITTLE_ENDIAN -+ if (likely(skb->len >= 6)) { -+ u8 buf = skb->data[4]; -+ skb->data[4] = skb->data[5]; -+ skb->data[5] = buf; -+ } -+#endif -+ -+ spin_lock_bh(&ssi_protocol.lock); -+ -+ if (unlikely(!(ssi_protocol.flags & CMT_ONLINE))) { -+ pr_notice(LOG_NAME "Dropping TX data. CMT is OFFLINE\n"); -+ spin_unlock_bh(&ssi_protocol.lock); -+ goto drop; -+ } -+ -+ skb_queue_tail(&ssi_protocol.tx_queue, skb); -+ qlen = skb_queue_len(&ssi_protocol.tx_queue); -+ -+ if ((dev->tx_queue_len > 1) && (qlen >= dev->tx_queue_len)) { -+ pr_debug(LOG_NAME "TX queue full %d\n", qlen); -+ netif_stop_queue(dev); -+ goto out; -+ } else if (qlen > 1) { -+ pr_debug(LOG_NAME "Pending frame on TX queue %d\n", qlen); -+ goto out; -+ } -+ -+ /* -+ * Check if ACWAKE line is down. We need to check if audio driver -+ * has put the wakeline down so we know that we need to wait for a -+ * READY command when McSAAB sets it up. -+ */ -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE, &acwake); -+ mod_timer(&ssi_protocol.keep_alive, -+ jiffies + msecs_to_jiffies(KA_TIMEOUT)); -+ pr_debug(LOG_NAME "ACWAKE %d\n", acwake); -+ if (!acwake) -+ ssi_protocol.send_state = WAIT4READY; -+ -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE_UP, NULL); -+ ssi_protocol.main_state = ACTIVE; -+ if (ssi_protocol.send_state == SEND_READY) -+ mcsaab_start_tx(); -+ else { -+ pr_debug(LOG_NAME "TX pending of READY cmd\n"); -+ mod_timer(&ssi_protocol.tx_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ } -+out: -+ spin_unlock_bh(&ssi_protocol.lock); -+ dev->stats.tx_packets++; -+ dev->stats.tx_bytes += skb->len; -+ return 0; -+ -+drop: -+ dev->stats.tx_dropped++; -+ dev_kfree_skb(skb); -+ return 0; -+} -+ -+static int ssi_pn_set_mtu(struct net_device *dev, int new_mtu) -+{ -+ if (new_mtu > SSI_MAX_MTU || new_mtu < PHONET_MIN_MTU) -+ return -EINVAL; -+ dev->mtu = new_mtu; -+ return 0; -+} -+ -+static void ssi_pn_setup(struct net_device *dev) -+{ -+ dev->features = 0; -+ dev->type = ARPHRD_PHONET; -+ dev->flags = IFF_POINTOPOINT | IFF_NOARP; -+ dev->mtu = SSI_DEFAULT_MTU; -+ dev->hard_header_len = 1; -+ dev->dev_addr[0] = PN_MEDIA_SOS; -+ dev->addr_len = 1; -+ dev->tx_queue_len = MCSAAB_TX_QUEUE_LEN; -+ -+ dev->destructor = free_netdev; -+ dev->header_ops = &phonet_header_ops; -+ dev->hard_start_xmit = ssi_pn_xmit; /* mandatory */ -+ dev->change_mtu = ssi_pn_set_mtu; -+} -+ -+/* In soft IRQ context */ -+static int ssi_pn_rx(struct sk_buff *skb) -+{ -+ struct net_device *dev = skb->dev; -+ -+ if (unlikely(!netif_running(dev))) { -+ dev->stats.rx_dropped++; -+ goto drop; -+ } -+ if (unlikely(!pskb_may_pull(skb, 6))) { -+ dev->stats.rx_errors++; -+ dev->stats.rx_length_errors++; -+ goto drop; -+ } -+ -+ dev->stats.rx_packets++; -+ dev->stats.rx_bytes += skb->len; -+ -+#ifdef __LITTLE_ENDIAN -+ if (likely(skb->len >= 6)) -+ ((u16 *)skb->data)[2] = swab16(((u16 *)skb->data)[2]); -+ pr_debug(LOG_NAME "RX length fixed (%04x -> %u)\n", -+ ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); -+#endif -+ -+ skb->protocol = htons(ETH_P_PHONET); -+ skb_reset_mac_header(skb); -+ __skb_pull(skb, 1); -+ -+ pr_debug(LOG_NAME "RX done\n"); -+ netif_rx(skb); -+ return 0; -+ -+drop: -+ pr_debug(LOG_NAME "Drop RX packet\n"); -+ dev_kfree_skb(skb); -+ return 0; -+} -+/* -+ * End network device callbacks -+ */ -+ -+/* Incoming commands */ -+static void boot_info_req_h(u32 msg) -+{ -+ switch (ssi_protocol.main_state) { -+ case INIT: -+ mcsaab_clk_enable(); -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE_UP, NULL); -+ send_c_msg(BOOT_INFO_RESP_CMD(LOCAL_D_VER_ID)); -+ ssi_protocol.flags &= ~WAKEDOWN_TEST; -+ ssi_protocol.main_state = HANDSHAKE; -+ /* Start BOOT HANDSHAKE timer */ -+ mod_timer(&ssi_protocol.boot_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ break; -+ case HANDSHAKE: -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE_UP, NULL); -+ send_c_msg(BOOT_INFO_RESP_CMD(LOCAL_D_VER_ID)); -+ ssi_protocol.flags &= ~WAKEDOWN_TEST; -+ /* Start BOOT HANDSHAKE timer */ -+ mod_timer(&ssi_protocol.boot_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ break; -+ case ACTIVE: -+ pr_warning(LOG_NAME "Rebooting sequence started.\n"); -+ mcsaab_watchdog_dump(&ssi_protocol); -+ reset_mcsaab(); -+ mcsaab_clk_enable(); -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE_UP, NULL); -+ send_c_msg(BOOT_INFO_RESP_CMD(LOCAL_D_VER_ID)); -+ ssi_protocol.main_state = HANDSHAKE; -+ /* Start BOOT HANDSHAKE timer */ -+ mod_timer(&ssi_protocol.boot_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ break; -+ default: -+ pr_debug(LOG_NAME "Wrong protocol state %d\n", -+ ssi_protocol.main_state); -+ break; -+ } -+} -+ -+static void boot_info_resp_h(u32 msg) -+{ -+ if (ssi_protocol.main_state != INIT) { -+ pr_debug(LOG_NAME "BOOT_INFO_RESP in wrong state:\n"); -+ pr_debug(LOG_NAME " MAIN_STATE %d\n", -+ ssi_protocol.main_state); -+ return; -+ } -+ -+ mcsaab_clk_enable(); -+ ssi_protocol.main_state = HANDSHAKE; -+} -+ -+static void wakelines_test_result_h(u32 msg) -+{ -+ if (ssi_protocol.main_state != HANDSHAKE) { -+ pr_debug(LOG_NAME "WAKELINES_TEST in wrong state:\n"); -+ pr_debug(LOG_NAME " MAIN_STATE %d\n", -+ ssi_protocol.main_state); -+ return; -+ } -+ -+ pr_notice(LOG_NAME "WAKELINES TEST %s\n", -+ (PAYLOAD(msg) & WAKE_TEST_FAILED) ? "FAILED" : "OK"); -+ -+ if (PAYLOAD(msg) & WAKE_TEST_FAILED) { -+ mcsaab_watchdog_dump(&ssi_protocol); -+ reset_mcsaab(); -+ } else { -+ ssi_protocol.main_state = ACTIVE; -+ ssi_protocol.flags &= ~WAKEDOWN_TEST; -+ ssi_protocol.flags |= CMT_ONLINE; -+ pr_debug(LOG_NAME "CMT is ONLINE\n"); -+ netif_wake_queue(ssi_protocol.netdev); -+ netif_carrier_on(ssi_protocol.netdev); -+ } -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE_DOWN, NULL); -+ mcsaab_clk_disable(); /* Drop clk usecount */ -+ /* Stop BOOT HANDSHAKE timer */ -+ del_timer(&ssi_protocol.boot_wd); -+} -+ -+static void start_trans_h(u32 msg) -+{ -+ struct sk_buff *skb; -+ int len = PDU_LENGTH(msg); -+ u8 r_msg_id = 0; -+ -+ r_msg_id = msg & MSG_ID_MASK; -+ pr_debug(LOG_NAME "Receiving START_TRANS len %d\n", PDU_LENGTH(msg)); -+ pr_debug(LOG_NAME "START_TRANS msg id %d expected msg id %d\n", -+ r_msg_id, ssi_protocol.rcv_msg_id); -+ -+ if (unlikely(ssi_protocol.main_state != ACTIVE)) { -+ pr_debug(LOG_NAME "START_TRANS in wrong state:\n"); -+ pr_debug(LOG_NAME " SEND STATE %d\n", -+ ssi_protocol.send_state); -+ pr_debug(LOG_NAME " MAIN_STATE %d\n", -+ ssi_protocol.main_state); -+ return; -+ } -+ -+ if (unlikely(r_msg_id != ssi_protocol.rcv_msg_id)) { -+ pr_debug(LOG_NAME "RX msg id mismatch (MSG ID: %d " -+ "McSAAB RX ID: %d)\n", r_msg_id, ssi_protocol.rcv_msg_id); -+ mcsaab_watchdog_dump(&ssi_protocol); -+ reset_mcsaab(); -+ return; -+ } -+ ssi_protocol.rcv_msg_id = (ssi_protocol.rcv_msg_id + 1) & 0xff; -+ -+ skb = netdev_alloc_skb(ssi_protocol.netdev, len * 4); -+ if (unlikely(!skb)) { -+ printk(KERN_DEBUG LOG_NAME "Out of memory RX skb.\n"); -+ reset_mcsaab(); -+ return; -+ } -+ -+ skb_put(skb, len * 4); -+ skb_queue_tail(&ssi_protocol.rx_queue, skb); -+ if (skb_queue_len(&ssi_protocol.rx_queue) == 1) { -+ mod_timer(&ssi_protocol.rx_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ ssi_protocol.recv_state = RECEIVING; -+ ssi_read(ssi_protocol.dev_d_ch, (u32 *)skb->data, len); -+ } -+} -+ -+static void ready_h(u32 msg) -+{ -+ if (unlikely((ssi_protocol.main_state != ACTIVE) || -+ (ssi_protocol.send_state >= SENDING))) { -+ pr_debug(LOG_NAME "READY CMD on wrong state:\n"); -+ pr_debug(LOG_NAME " SEND STATE %d\n", -+ ssi_protocol.send_state); -+ pr_debug(LOG_NAME " MAIN_STATE %d\n", -+ ssi_protocol.main_state); -+ pr_debug(LOG_NAME " FLAGS %02X\n", ssi_protocol.flags); -+ return; -+ } -+ if (skb_queue_len(&ssi_protocol.tx_queue) > 0) -+ mcsaab_start_tx(); -+ else -+ ssi_protocol.send_state = SEND_READY; -+} -+ -+static void swbreak_h(void) -+{ -+ if (ssi_protocol.main_state != ACTIVE) { -+ pr_debug(LOG_NAME "SW BREAK in wrong state:\n"); -+ pr_debug(LOG_NAME " SEND STATE %d\n", -+ ssi_protocol.send_state); -+ pr_debug(LOG_NAME " MAIN_STATE %d\n", -+ ssi_protocol.main_state); -+ return; -+ } -+ pr_debug(LOG_NAME "SWBREAK Ignored\n"); -+ mcsaab_clk_disable(); -+} -+/* End incoming commands */ -+ -+/* OMAP SSI driver callbacks */ -+static void c_send_done_cb(struct ssi_device *c_dev) -+{ -+ u32 acwake = 0; -+ u32 cmd; -+ struct sk_buff *skb; -+ -+ -+ spin_lock(&ssi_protocol.lock); -+ -+ mcsaab_restart_keep_alive(); -+ -+ cmd = ssi_protocol.c_queue[ssi_protocol.head]; -+ pr_debug(LOG_NAME "Control message 0x%08X sent\n", cmd); -+ -+ if ((COMMAND(cmd) == START_TRANS) && -+ (ssi_protocol.send_state == SENDING)) { -+ skb = skb_peek(&ssi_protocol.tx_queue); -+ ssi_write(ssi_protocol.dev_d_ch, (u32 *)skb->data, -+ (skb->len + 3) / 4); -+ } else if ((COMMAND(cmd) == SW_BREAK) && -+ (ssi_protocol.send_state == SENDING_SWBREAK)) { -+ if (skb_queue_len(&ssi_protocol.tx_queue) > 0) { -+ pr_debug(LOG_NAME "Got SKB while sending SW_BREAK\n"); -+ mcsaab_start_tx(); -+ } else { -+ pr_debug(LOG_NAME "SW BREAK: Trying to set ACWake " -+ "line DOWN\n"); -+ ssi_ioctl(ssi_protocol.dev_c_ch, SSI_IOCTL_WAKE_DOWN, -+ NULL); -+ /* -+ * We need to check that other modules does not hold -+ * still the wakeup line. -+ */ -+ ssi_ioctl(c_dev, SSI_IOCTL_WAKE, &acwake); -+ pr_debug(LOG_NAME "ACWAKE %d\n", acwake); -+ if (!acwake) -+ ssi_protocol.send_state = SEND_IDLE; -+ else -+ ssi_protocol.send_state = SEND_READY; -+ mcsaab_stop_keep_alive(); -+ } -+ netif_wake_queue(ssi_protocol.netdev); -+ } -+ -+ /* Check for pending TX commands */ -+ ++ssi_protocol.head; -+ ssi_protocol.head %= C_QUEUE_LEN; -+ -+ if (ssi_protocol.tail != ssi_protocol.head) { -+ pr_debug(LOG_NAME "Dequeue message on pos %d\n", -+ ssi_protocol.head); -+ pr_debug(LOG_NAME "Sending queued msg 0x%08x\n", -+ ssi_protocol.c_queue[ssi_protocol.head]); -+ ssi_write(ssi_protocol.dev_c_ch, -+ &ssi_protocol.c_queue[ssi_protocol.head], 1); -+ } -+ -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+/* Forward declaration */ -+static void d_send_done_cb(struct ssi_device *d_dev); -+ -+/* -+ * d_tx_complete_cb - Callback called when a TX has completed in the wire. -+ * @d_dev - the channel were the TX has completed. -+ */ -+static void d_tx_complete_cb(struct ssi_device *d_dev) -+{ -+ unsigned int busy; -+ -+ spin_lock(&ssi_protocol.lock); -+ -+ ssi_ioctl(d_dev, SSI_IOCTL_TX_CH_FULL, &busy); -+ if (busy) { -+ ssi_ioctl(d_dev, SSI_IOCTL_CH_DATAACCEPT, NULL); -+ goto out; -+ } -+ -+ ssi_set_write_cb(d_dev, d_send_done_cb); -+ -+ if (skb_queue_len(&ssi_protocol.tx_queue) <= 0) { -+ pr_debug(LOG_NAME "Delayed Sending SWBREAK\n"); -+ send_c_msg(SWBREAK_CMD); -+ ssi_protocol.send_state = SENDING_SWBREAK; -+ } else { -+ mcsaab_start_tx(); -+ } -+ -+out: -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+static void d_send_done_cb(struct ssi_device *d_dev) -+{ -+ struct sk_buff *skb; -+ unsigned int busy; -+ -+ spin_lock(&ssi_protocol.lock); -+ -+ mcsaab_restart_keep_alive(); -+ -+ skb = skb_dequeue(&ssi_protocol.tx_queue); -+ if (!skb) -+ goto out; -+ del_timer(&ssi_protocol.tx_wd); -+ dev_kfree_skb(skb); -+ ssi_protocol.send_msg_id++; -+ ssi_protocol.send_msg_id &= 0xff; -+ if (skb_queue_len(&ssi_protocol.tx_queue) <= 0) { -+ ssi_ioctl(d_dev, SSI_IOCTL_TX_CH_FULL, &busy); -+ if (busy) { -+ /* -+ * Program DATAACCEPT interrupt to know when ch 3 -+ * has completed TX last frame. -+ */ -+ ssi_set_write_cb(d_dev, d_tx_complete_cb); -+ ssi_ioctl(d_dev, SSI_IOCTL_CH_DATAACCEPT, NULL); -+ pr_debug(LOG_NAME "Waiting for last frame\n"); -+ goto out; -+ } -+ pr_debug(LOG_NAME "Sending SWBREAK\n"); -+ send_c_msg(SWBREAK_CMD); -+ ssi_protocol.send_state = SENDING_SWBREAK; -+ } else { -+ mcsaab_start_tx(); -+ } -+out: -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+static void c_rcv_done_cb(struct ssi_device *c_dev) -+{ -+ u32 message = ssi_protocol.rcv_c_msg; -+ unsigned int command = COMMAND(message); -+ -+ spin_lock(&ssi_protocol.lock); -+ -+ mcsaab_restart_keep_alive(); -+ -+ ssi_read(c_dev, &ssi_protocol.rcv_c_msg, 1); -+ -+ pr_debug(LOG_NAME "Protocol state %d\n", ssi_protocol.main_state); -+ pr_debug(LOG_NAME "CMT Message 0x%08x\n", message); -+ -+ switch (command) { -+ case SW_BREAK: -+ swbreak_h(); -+ break; -+ case BOOT_INFO_REQ: -+ boot_info_req_h(message); -+ break; -+ case BOOT_INFO_RESP: -+ boot_info_resp_h(message); -+ break; -+ case WAKE_TEST_RES: -+ wakelines_test_result_h(message); -+ break; -+ case START_TRANS: -+ start_trans_h(message); -+ break; -+ case READY: -+ ready_h(message); -+ break; -+ case DUMMY: -+ pr_warning(LOG_NAME "Received dummy sync 0x%08x\n", message); -+ pr_warning(LOG_NAME "OLD McSAAB Protocol DETECTED\n"); -+ pr_warning(LOG_NAME "OLD PROTOCOL NOT SUPPORTED\n"); -+ break; -+ default: -+ pr_warning(LOG_NAME "COMMAND NOT SUPPORTED\n"); -+ pr_warning(LOG_NAME "Message 0x%08X\n", message); -+ break; -+ } -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+static void d_rcv_done_cb(struct ssi_device *d_dev) -+{ -+ struct sk_buff *skb; -+ -+ spin_lock(&ssi_protocol.lock); -+ -+ mcsaab_restart_keep_alive(); -+ -+ skb = skb_dequeue(&ssi_protocol.rx_queue); -+ if (!skb) -+ goto out; -+ skb->dev = ssi_protocol.netdev; -+ del_timer(&ssi_protocol.rx_wd); /* Stop RX timer */ -+ ssi_pn_rx(skb); -+ if (skb_queue_len(&ssi_protocol.rx_queue) > 0) { -+ skb = skb_peek(&ssi_protocol.rx_queue); -+ mod_timer(&ssi_protocol.rx_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ ssi_protocol.recv_state = RECEIVING; -+ pr_debug(LOG_NAME "Data len: %d\n", skb->len / 4); -+ ssi_read(ssi_protocol.dev_d_ch, (u32 *)skb->data, skb->len / 4); -+ } -+out: -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+static void wake_up_event(struct ssi_device *c_dev) -+{ -+ -+ spin_lock(&ssi_protocol.lock); -+ -+ switch (ssi_protocol.main_state) { -+ case INIT: -+ ssi_ioctl(c_dev, SSI_IOCTL_WAKE_UP, NULL); -+ break; -+ case HANDSHAKE: -+ if (ssi_protocol.flags & WAKEDOWN_TEST) { -+ /* Need this safeguard to avoid race condition */ -+ pr_notice(LOG_NAME "ACWAKE UP\n"); -+ ssi_ioctl(c_dev, SSI_IOCTL_WAKE_UP, NULL); -+ } -+ break; -+ case ACTIVE: -+ /* -+ * We can have two UP events in a row due to a short low -+ * high transition. Therefore we need to ignore the -+ * sencond UP event. -+ */ -+ if (ssi_protocol.recv_state == RECV_READY) -+ break; -+ -+ ssi_protocol.recv_state = RECV_READY; -+ mcsaab_clk_enable(); -+ send_c_msg(READY_CMD); -+ /* Start RX timer */ -+ mod_timer(&ssi_protocol.rx_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ /* Wake MPU workaround */ -+ mod_timer(&ssi_protocol.keep_alive, -+ jiffies + msecs_to_jiffies(KA_TIMEOUT)); -+ break; -+ default: -+ pr_debug(LOG_NAME "Wrong protocol state %d\n", -+ ssi_protocol.main_state); -+ break; -+ } -+ -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+static void wake_down_event(struct ssi_device *c_dev) -+{ -+ spin_lock(&ssi_protocol.lock); -+ pr_debug(LOG_NAME "WAKE DOWN in state %d\n", ssi_protocol.main_state); -+ -+ switch (ssi_protocol.main_state) { -+ case INIT: -+ break; -+ case HANDSHAKE: -+ if (!(ssi_protocol.flags & WAKEDOWN_TEST)) { -+ /* Need this safeguard to avoid race condition */ -+ pr_notice(LOG_NAME "ACWAKE DOWN\n"); -+ ssi_ioctl(c_dev, SSI_IOCTL_WAKE_DOWN, NULL); -+ ssi_protocol.flags |= WAKEDOWN_TEST; -+ } -+ break; -+ case ACTIVE: -+ ssi_protocol.recv_state = RECV_IDLE; -+ mcsaab_stop_keep_alive(); -+ break; -+ default: -+ pr_debug(LOG_NAME "Wrong protocol state %d\n", -+ ssi_protocol.main_state); -+ break; -+ } -+ -+ spin_unlock(&ssi_protocol.lock); -+} -+ -+static void port_event_cb(struct ssi_device *ssi_dev, unsigned int event, -+ void *arg) -+{ -+ switch (event) { -+ case SSI_EVENT_BREAK_DETECTED: -+ pr_debug(LOG_NAME "HWBREAK detected.\n"); -+ break; -+ case SSI_EVENT_ERROR: -+ pr_err(LOG_NAME "HW ERROR detected\n"); -+ reset_mcsaab(); -+ break; -+ case SSI_EVENT_CAWAKE_UP: -+ wake_up_event(ssi_protocol.dev_c_ch); -+ break; -+ case SSI_EVENT_CAWAKE_DOWN: -+ wake_down_event(ssi_protocol.dev_c_ch); -+ break; -+ default: -+ pr_debug(LOG_NAME "Recevived an UNKNOWN event\n"); -+ break; -+ } -+} -+/* End OMAP SSI callabcks */ -+ -+/* CMT reset support */ -+static void do_cmt_rst_tasklet(unsigned long ssi_proto) -+{ -+ struct mcsaab_imp *ssi_protocol = (struct mcsaab_imp *)ssi_proto; -+ int v; -+ -+ v = gpio_get_value(ssi_protocol->cmt_rst_gpio); -+ pr_warning("******\n* CMT rst line change detected (%d)\n*****\n", v); -+ spin_lock(&ssi_protocol->lock); -+ if (!v) { -+ mcsaab_watchdog_dump(ssi_protocol); -+ reset_mcsaab(); -+ } -+ spin_unlock(&ssi_protocol->lock); -+} -+ -+static irqreturn_t cmt_rst_isr(int irq, void *ssi_proto) -+{ -+ struct mcsaab_imp *ssi_protocol = (struct mcsaab_imp *)ssi_proto; -+ -+ tasklet_hi_schedule(&ssi_protocol->cmt_rst_tasklet); -+ -+ return IRQ_HANDLED; -+} -+ -+int __init cmt_rst_init(struct mcsaab_imp *p, const char *gpio_name) -+{ -+ -+ if (gpio_request(p->cmt_rst_gpio, gpio_name) < 0) { -+ pr_err(LOG_NAME "FAILED to request %s GPIO %d\n", -+ gpio_name, p->cmt_rst_gpio); -+ return -EBUSY; -+ } -+ gpio_direction_input(p->cmt_rst_gpio); -+ tasklet_init(&p->cmt_rst_tasklet, do_cmt_rst_tasklet, (unsigned long)p); -+ if (request_irq(p->cmt_rst_gpio_irq, cmt_rst_isr, -+ IRQF_SHARED | IRQF_TRIGGER_FALLING, gpio_name, p) < 0) { -+ gpio_free(p->cmt_rst_gpio); -+ pr_err(LOG_NAME "FAILED to request %s GPIO IRQ %d\n", -+ gpio_name, p->cmt_rst_gpio_irq); -+ return -EBUSY; -+ } -+ enable_irq_wake(p->cmt_rst_gpio_irq); -+ -+ return 0; -+} -+ -+void cmt_rst_exit(struct mcsaab_imp *p) -+{ -+ if (p->cmt_rst_gpio < 0) -+ return; /* Nothing to do */ -+ -+ disable_irq_wake(p->cmt_rst_gpio_irq); -+ tasklet_kill(&p->cmt_rst_tasklet); -+ free_irq(p->cmt_rst_gpio_irq, p); -+ gpio_free(p->cmt_rst_gpio); -+} -+/* End CMT reset support */ -+ -+static int __devinit open_ssi_hw_drv(struct mcsaab_imp *prot) -+{ -+ int err = 0; -+ unsigned int cawake = 0; -+ -+ err = ssi_open(prot->dev_c_ch); -+ if (err < 0) { -+ pr_err(LOG_NAME "Could not open CONTROL channel 0\n"); -+ goto rback1; -+ } -+ err = ssi_open(prot->dev_d_ch); -+ if (err < 0) { -+ pr_err(LOG_NAME "Could not open DATA channel 3\n"); -+ goto rback2; -+ } -+ -+ pr_debug(LOG_NAME "Submitting read on the control channel\n"); -+ err = ssi_read(prot->dev_c_ch, &prot->rcv_c_msg, 1); -+ if (err < 0) { -+ pr_err(LOG_NAME "Error when submiting first control read\n"); -+ goto rback3; -+ } -+ ssi_ioctl(prot->dev_c_ch, SSI_IOCTL_CAWAKE, &cawake); -+ if (cawake) { -+ /* Start BOOT HANDSHAKE timer */ -+ mod_timer(&ssi_protocol.boot_wd, -+ jiffies + msecs_to_jiffies(WD_TIMEOUT)); -+ ssi_ioctl(prot->dev_c_ch, SSI_IOCTL_WAKE_UP, NULL); -+ send_c_msg(BOOT_INFO_REQ_CMD(0x1)); -+ } -+ -+ return 0; -+rback3: -+ ssi_close(prot->dev_d_ch); -+rback2: -+ ssi_close(prot->dev_c_ch); -+rback1: -+ return err; -+} -+ -+static int __devinit mcsaab_probe(struct ssi_device *ssi_dev) -+{ -+ int err = 0; -+ -+ if ((ssi_dev->n_ch == 0) && (ssi_dev->n_p == 0)) { -+ ssi_set_read_cb(ssi_dev, c_rcv_done_cb); -+ ssi_set_write_cb(ssi_dev, c_send_done_cb); -+ ssi_set_port_event_cb(ssi_dev, port_event_cb); -+ spin_lock_bh(&ssi_protocol.lock); -+ ssi_protocol.dev_c_ch = ssi_dev; -+ spin_unlock_bh(&ssi_protocol.lock); -+ } else if ((ssi_dev->n_ch == 3) && (ssi_dev->n_p == 0)) { -+ ssi_set_read_cb(ssi_dev, d_rcv_done_cb); -+ ssi_set_write_cb(ssi_dev, d_send_done_cb); -+ spin_lock_bh(&ssi_protocol.lock); -+ ssi_protocol.dev_d_ch = ssi_dev; -+ spin_unlock_bh(&ssi_protocol.lock); -+ } else -+ return -ENXIO; -+ -+ spin_lock_bh(&ssi_protocol.lock); -+ -+ if ((ssi_protocol.dev_d_ch) && (ssi_protocol.dev_c_ch)) -+ err = open_ssi_hw_drv(&ssi_protocol); -+ -+ spin_unlock_bh(&ssi_protocol.lock); -+ -+ return err; -+} -+ -+static int __devexit mcsaab_remove(struct ssi_device *ssi_dev) -+{ -+ spin_lock_bh(&ssi_protocol.lock); -+ if (ssi_protocol.flags & CMT_ONLINE) -+ netif_carrier_off(ssi_protocol.netdev); -+ -+ if (ssi_dev == ssi_protocol.dev_c_ch) { -+ ssi_protocol.main_state = INIT; -+ ssi_protocol.send_state = SEND_IDLE; -+ ssi_protocol.recv_state = RECV_IDLE; -+ ssi_protocol.flags = 0; -+ ssi_protocol.head = 0; -+ ssi_protocol.tail = 0; -+ ssi_protocol.dev_c_ch = NULL; -+ } else if (ssi_dev == ssi_protocol.dev_d_ch) { -+ ssi_protocol.dev_d_ch = NULL; -+ } -+ spin_unlock_bh(&ssi_protocol.lock); -+ ssi_set_read_cb(ssi_dev, NULL); -+ ssi_set_write_cb(ssi_dev, NULL); -+ ssi_set_port_event_cb(ssi_dev, NULL); -+ ssi_close(ssi_dev); -+ -+ return 0; -+} -+ -+static struct ssi_device_driver ssi_mcsaab_driver = { -+ .ctrl_mask = ANY_SSI_CONTROLLER, -+ .ch_mask[0] = CHANNEL(0) | CHANNEL(3), -+ .probe = mcsaab_probe, -+ .remove = __devexit_p(mcsaab_remove), -+ .driver = { -+ .name = "ssi_mcsaab_imp", -+ }, -+}; -+ -+/* NOTE: Notice that the WAKE line test. Must be done between 1ms time. -+ * So too much DEBUG information can provoke that we are late and -+ * failed the handshaking !*/ -+static int __init ssi_proto_init(void) -+{ -+ static const char ifname[] = "phonet%d"; -+ int err = 0; -+ -+ pr_info(MCSAAB_IMP_NAME " Version: " MCSAAB_IMP_VERSION "\n"); -+ -+ spin_lock_init(&ssi_protocol.lock); -+ init_timer_deferrable(&ssi_protocol.boot_wd); -+ init_timer_deferrable(&ssi_protocol.rx_wd); -+ init_timer_deferrable(&ssi_protocol.tx_wd); -+ init_timer(&ssi_protocol.keep_alive); -+ ssi_protocol.main_state = INIT; -+ ssi_protocol.send_state = SEND_IDLE; -+ ssi_protocol.recv_state = RECV_IDLE; -+ ssi_protocol.flags = 0; -+ ssi_protocol.head = 0; -+ ssi_protocol.tail = 0; -+ ssi_protocol.dev_c_ch = NULL; -+ ssi_protocol.dev_d_ch = NULL; -+ ssi_protocol.boot_wd.data = (unsigned long)&ssi_protocol; -+ ssi_protocol.boot_wd.function = mcsaab_watchdog; -+ ssi_protocol.rx_wd.data = (unsigned long)&ssi_protocol; -+ ssi_protocol.rx_wd.function = mcsaab_watchdog_rx; -+ ssi_protocol.tx_wd.data = (unsigned long)&ssi_protocol; -+ ssi_protocol.tx_wd.function = mcsaab_watchdog_tx; -+ ssi_protocol.keep_alive.data = (unsigned long)&ssi_protocol; -+ ssi_protocol.keep_alive.function = keep_alive_timer; -+ ssi_protocol.ssi_clk = NULL; -+ ssi_protocol.cmt_rst_gpio = 72; /* FIXME */ -+ ssi_protocol.cmt_rst_gpio_irq = gpio_to_irq(72); /* FIXME */ -+ -+ skb_queue_head_init(&ssi_protocol.tx_queue); -+ skb_queue_head_init(&ssi_protocol.rx_queue); -+ -+ ssi_protocol.netdev = alloc_netdev(0, ifname, ssi_pn_setup); -+ if (!ssi_protocol.netdev) -+ return -ENOMEM; -+ -+ /* FIXME: */ -+ /*SET_NETDEV_DEV(ssi_protoco.netdev, &p->dev_d_ch->device);*/ -+ netif_carrier_off(ssi_protocol.netdev); -+ err = register_netdev(ssi_protocol.netdev); -+ if (err) { -+ free_netdev(ssi_protocol.netdev); -+ return err; -+ } -+ -+ ssi_protocol.ssi_clk = clk_get(NULL, "ssi_clk"); -+ if (IS_ERR(ssi_protocol.ssi_clk)) { -+ pr_err(LOG_NAME "Could not claim SSI fck clock\n"); -+ err = PTR_ERR(ssi_protocol.ssi_clk); -+ goto rback1; -+ } -+ -+ err = register_ssi_driver(&ssi_mcsaab_driver); -+ if (err < 0) { -+ pr_err(LOG_NAME "Error when registering ssi driver: %d\n", err); -+ goto rback2; -+ } -+ -+ err = cmt_rst_init(&ssi_protocol, "ape_rst_rq"); -+ if (err < 0) { -+ pr_err(LOG_NAME "Error setting CMT reset support (%d)\n", err); -+ goto rback3; -+ } -+ -+ return 0; -+rback3: -+ unregister_ssi_driver(&ssi_mcsaab_driver); -+rback2: -+ clk_put(ssi_protocol.ssi_clk); -+rback1: -+ unregister_netdev(ssi_protocol.netdev); -+ return err; -+} -+ -+static void __exit ssi_proto_exit(void) -+{ -+ reset_mcsaab(); -+ cmt_rst_exit(&ssi_protocol); -+ unregister_ssi_driver(&ssi_mcsaab_driver); -+ clk_put(ssi_protocol.ssi_clk); -+ unregister_netdev(ssi_protocol.netdev); -+ -+ pr_info(MCSAAB_IMP_NAME "REMOVED\n"); -+} -+ -+module_init(ssi_proto_init); -+module_exit(ssi_proto_exit); -+ -+MODULE_ALIAS("ssi:omap_ssi-p0.c0"); -+MODULE_AUTHOR("Carlos Chinea, Remi Denis-Courmont, Nokia"); -+MODULE_DESCRIPTION(MCSAAB_IMP_DESC); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/nokia-av.c linux-omap-2.6.28-nokia1/drivers/misc/nokia-av.c ---- linux-omap-2.6.28-omap1/drivers/misc/nokia-av.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/nokia-av.c 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,786 @@ -+/* -+ * nokia-av.c - Nokia AV accessory detection -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+ -+#include -+ -+/* FIXME */ -+#include "../../sound/soc/omap/rx51.h" -+ -+#define DRIVER_NAME "nokia-av" -+ -+#define HS_BTN_KEY KEY_PHONE -+#define HS_BTN_IRQ_FLAGS (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) -+#define HS_BTN_DEBOUNCE_PRESS 100 -+#define HS_BTN_DEBOUNCE_RELEASE 100 -+#define HS_BTN_REPORT_DELAY 1000 -+ -+#define HEADPH_IRQ_FLAGS (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |\ -+ IRQF_SHARED) -+#define HEADPH_DEBOUNCE 300 -+ -+#define DET_OPEN_CABLE_DELAY 100 -+#define DET_REPEAT_DELAY 50 -+#define DET_PLUG_DELAY 10 -+#define DET_PROBE_DELAY 10 -+#define DET_ECI_RESET_DELAY 600 -+#define DET_REPEAT_COUNT 5 -+#define DET_COUNT_MAX 10 -+ -+enum { -+ UNKNOWN, -+ HEADPHONES, /* or line input cable or external mic */ -+ VIDEO_CABLE, -+ OPEN_CABLE, -+ BASIC_HEADSET, -+}; -+ -+struct nokia_av_drvdata { -+ struct device *dev; -+ struct mutex lock; -+ struct mutex detection_lock; -+ -+ struct workqueue_struct *workqueue; -+ struct work_struct headph_work; -+ struct timer_list headph_timer; -+ struct delayed_work detection_work; -+ -+ struct input_dev *input; -+ struct work_struct hs_btn_work; -+ struct timer_list hs_btn_timer; -+ struct delayed_work hs_btn_report_work; -+ int hs_btn_pressed; -+ -+ int autodetect; -+ int type; -+ int dettype; -+ int detcount; -+ int dettotal; -+ -+ int eci0_gpio; -+ int eci1_gpio; -+ int headph_gpio; -+ int headph_plugged; -+}; -+ -+/* Delayed reporting of button press-release cycle */ -+static void hs_btn_report(struct work_struct *work) -+{ -+ struct nokia_av_drvdata *drvdata = container_of(work, -+ struct nokia_av_drvdata, hs_btn_report_work.work); -+ -+ /* Don't report if unplugged */ -+ if (drvdata->input && drvdata->headph_plugged) { -+ input_report_key(drvdata->input, HS_BTN_KEY, 1); -+ input_sync(drvdata->input); -+ input_report_key(drvdata->input, HS_BTN_KEY, 0); -+ input_sync(drvdata->input); -+ } -+} -+ -+/* Timer for debouncing */ -+static void hs_btn_timer(unsigned long arg) -+{ -+ struct nokia_av_drvdata *drvdata = (struct nokia_av_drvdata *) arg; -+ -+ schedule_work(&drvdata->hs_btn_work); -+} -+ -+/* Handle debounced button press/release */ -+static void hs_btn_handler(struct work_struct *work) -+{ -+ struct nokia_av_drvdata *drvdata = -+ container_of(work, struct nokia_av_drvdata, hs_btn_work); -+ int pressed; -+ -+ if (!allow_button_press()) -+ return; -+ -+ pressed = !gpio_get_value(drvdata->eci0_gpio); -+ if (drvdata->hs_btn_pressed == pressed) -+ return; -+ -+ drvdata->hs_btn_pressed = pressed; -+ -+ /* Only report on key release */ -+ if (drvdata->type == BASIC_HEADSET && !pressed) { -+ /* Delay reporting to avoid false events on unplug */ -+ queue_delayed_work(drvdata->workqueue, -+ &drvdata->hs_btn_report_work, -+ msecs_to_jiffies(HS_BTN_REPORT_DELAY)); -+ } -+} -+ -+/* Button press/release */ -+static irqreturn_t hs_btn_irq(int irq, void *_drvdata) -+{ -+ struct nokia_av_drvdata *drvdata = _drvdata; -+ int pressed, timeout; -+ -+ pressed = !gpio_get_value(drvdata->eci0_gpio); -+ if (drvdata->hs_btn_pressed == pressed) -+ return IRQ_HANDLED; -+ -+ if (pressed) -+ timeout = HS_BTN_DEBOUNCE_PRESS; -+ else -+ timeout = HS_BTN_DEBOUNCE_RELEASE; -+ -+ if (!timeout) -+ schedule_work(&drvdata->hs_btn_work); -+ else -+ mod_timer(&drvdata->hs_btn_timer, -+ jiffies + msecs_to_jiffies(timeout)); -+ -+ return IRQ_HANDLED; -+} -+ -+static int hs_btn_input_init(struct nokia_av_drvdata *drvdata) -+{ -+ int ret; -+ -+ if (drvdata->input) -+ return -EEXIST; -+ -+ drvdata->hs_btn_pressed = 0; -+ -+ drvdata->input = input_allocate_device(); -+ if (!drvdata->input) { -+ dev_err(drvdata->dev, "Could not allocate input device\n"); -+ ret = -ENOMEM; -+ return ret; -+ } -+ -+ input_set_capability(drvdata->input, EV_KEY, HS_BTN_KEY); -+ drvdata->input->name = "headset button"; -+ -+ ret = input_register_device(drvdata->input); -+ if (ret) { -+ dev_err(drvdata->dev, "Could not register input device\n"); -+ input_free_device(drvdata->input); -+ drvdata->input = NULL; -+ return ret; -+ } -+ -+ ret = request_irq(gpio_to_irq(drvdata->eci0_gpio), hs_btn_irq, -+ HS_BTN_IRQ_FLAGS, "hs_btn", drvdata); -+ if (ret) { -+ dev_err(drvdata->dev, "Could not request irq %d\n", -+ gpio_to_irq(drvdata->eci0_gpio)); -+ input_unregister_device(drvdata->input); -+ drvdata->input = NULL; -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static void hs_btn_input_free(struct nokia_av_drvdata *drvdata) -+{ -+ if (!drvdata->input) -+ return; -+ -+ free_irq(gpio_to_irq(drvdata->eci0_gpio), drvdata); -+ -+ del_timer(&drvdata->hs_btn_timer); -+ cancel_delayed_work(&drvdata->hs_btn_report_work); -+ -+ input_unregister_device(drvdata->input); -+ drvdata->input = NULL; -+} -+ -+static int madc(void) -+{ -+ struct twl4030_madc_request req; -+ -+ req.channels = (1 << 2); -+ req.do_avg = 0; -+ req.method = TWL4030_MADC_SW1; -+ req.active = 0; -+ req.func_cb = NULL; -+ twl4030_madc_conversion(&req); -+ -+ return req.rbuf[2]; -+} -+ -+/* Get voltage in mV */ -+static inline int madc_voltage(void) -+{ -+ return madc()*147/60; -+} -+ -+/* Get voltage in mV, wait for voltage to settle to smaller than d/t -+ * mV/ms slope, wait at most tmax ms (not accurate, but just to make -+ * sure it's less than infinity) */ -+static int madc_stable_voltage(int d, int t, int tmax) -+{ -+ int mv1, mv2, dmv; -+ -+ mv1 = madc_voltage(); -+ do { -+ if (t) { -+ msleep(t); -+ tmax -= t; -+ } else { -+ tmax--; -+ } -+ mv2 = madc_voltage(); -+ dmv = abs(mv2 - mv1); -+ mv1 = mv2; -+ } while (dmv > d && tmax > 0); -+ -+ return mv2; -+} -+ -+/* < pre-B3 */ -+#define NEED_BIAS_CORRECTION(hwid) (((hwid) > 0x0013 && \ -+ (hwid) <= 0x1700) || ((hwid) < 0x0008)) -+#define BIAS_CORRECTION 80 -+ -+#define THRESHOLD_GROUNDED 40 -+#define THRESHOLD_VIDEO_HI 150 -+#define THRESHOLD_HEADSET_HI 200 -+#define THRESHOLD_ECI_LO 1950 -+#define THRESHOLD_ECI_HI 2200 -+ -+static int detect(struct nokia_av_drvdata *drvdata) -+{ -+ int mv; -+ int type = UNKNOWN; -+ -+ mutex_lock(&drvdata->detection_lock); -+ -+ rx51_set_eci_mode(4); -+ msleep(20); -+ -+ /* Detection point 1 */ -+ if (gpio_get_value(drvdata->eci0_gpio)) { -+ rx51_set_eci_mode(3); -+ msleep(20); -+ -+ /* Detection point 3 */ -+ if (gpio_get_value(drvdata->eci1_gpio)) { -+ type = OPEN_CABLE; -+ goto done; -+ } -+ -+ rx51_set_eci_mode(0); -+ -+ /* Detection point 4 */ -+ mv = madc_stable_voltage(50, 5, 100); -+ if (mv < THRESHOLD_HEADSET_HI) { -+ type = BASIC_HEADSET; -+ goto done; -+ } -+ } else { -+ /* Detection point 2 */ -+ mv = madc_voltage(); -+ -+ /* Measurements made with mic bias need to be -+ * corrected on old hardware revisions */ -+ if (NEED_BIAS_CORRECTION(system_rev)) -+ mv -= BIAS_CORRECTION; -+ -+ if (mv < THRESHOLD_GROUNDED) { -+ type = HEADPHONES; -+ goto done; -+ } -+ -+ if (mv < THRESHOLD_VIDEO_HI) { -+ type = VIDEO_CABLE; -+ goto done; -+ } -+ } -+done: -+ rx51_set_eci_mode(1); -+ mutex_unlock(&drvdata->detection_lock); -+ -+ return type; -+} -+ -+/* HACK: Try to detect ECI headsets */ -+static int detect_eci(struct nokia_av_drvdata *drvdata) -+{ -+ int t = DET_ECI_RESET_DELAY; -+ int mv; -+ int type = UNKNOWN; -+ -+ mutex_lock(&drvdata->detection_lock); -+ -+ rx51_set_eci_mode(4); -+ -+ /* Give the ECI headset sufficient time (more than 500 ms) to -+ * reset and stabilize the mic line, bail out on unplug */ -+ while (t > 0) { -+ if (!drvdata->headph_plugged) { -+ type = -1; -+ goto out; -+ } -+ -+ msleep(t < 100 ? t : 100); -+ t -= 100; -+ } -+ -+ mv = madc_stable_voltage(50, 5, 100); -+ if (mv > THRESHOLD_ECI_LO && mv < THRESHOLD_ECI_HI) -+ type = BASIC_HEADSET; -+ -+ rx51_set_eci_mode(1); -+out: -+ mutex_unlock(&drvdata->detection_lock); -+ -+ return type; -+} -+ -+/* Main accessory detection routine. */ -+static void detection_handler(struct work_struct *work) -+{ -+ struct nokia_av_drvdata *drvdata = container_of(work, -+ struct nokia_av_drvdata, detection_work.work); -+ int type; -+ -+ /* This is a shortcut detection for connecting open cable */ -+ if (drvdata->type == OPEN_CABLE && gpio_get_value(drvdata->eci1_gpio)) { -+ queue_delayed_work(drvdata->workqueue, -+ &drvdata->detection_work, -+ msecs_to_jiffies(DET_OPEN_CABLE_DELAY)); -+ return; -+ } -+ drvdata->type = UNKNOWN; -+ -+ type = detect(drvdata); -+ -+ mutex_lock(&drvdata->lock); -+ -+ /* Unplug in the middle of detection */ -+ if (!drvdata->headph_plugged) -+ goto out; -+ -+ if (type == drvdata->dettype) { -+ drvdata->detcount++; -+ } else { -+ drvdata->detcount = 1; -+ drvdata->dettype = type; -+ } -+ -+ drvdata->dettotal++; -+ -+ if (drvdata->detcount >= DET_REPEAT_COUNT || -+ drvdata->dettotal >= DET_COUNT_MAX) { -+ int status = 0; -+ -+ /* HACK: Try to detect the accessory as an ECI headset -+ * only if unable to detect it as anything else. */ -+ if (type == UNKNOWN || drvdata->dettotal >= DET_COUNT_MAX) { -+ /* Unlock to allow headph_handler to work */ -+ mutex_unlock(&drvdata->lock); -+ type = detect_eci(drvdata); -+ mutex_lock(&drvdata->lock); -+ -+ /* Unplug in the middle of ECI detection. */ -+ if (type < 0 || !drvdata->headph_plugged) -+ goto out; -+ } -+ -+ drvdata->type = type; -+ drvdata->dettype = UNKNOWN; -+ drvdata->detcount = 0; -+ drvdata->dettotal = 0; -+ -+ switch (type) { -+ case BASIC_HEADSET: -+ status = SND_JACK_HEADSET; -+ hs_btn_input_init(drvdata); -+ break; -+ case HEADPHONES: -+ status = SND_JACK_HEADPHONE; -+ break; -+ case VIDEO_CABLE: -+ status = SND_JACK_AVOUT; -+ break; -+ case OPEN_CABLE: -+ rx51_set_eci_mode(3); /* Detect connection */ -+ queue_delayed_work(drvdata->workqueue, -+ &drvdata->detection_work, -+ msecs_to_jiffies(DET_OPEN_CABLE_DELAY)); -+ break; -+ } -+ status |= SND_JACK_MECHANICAL; -+ -+ rx51_jack_report(status); -+ -+ } else { -+ queue_delayed_work(drvdata->workqueue, -+ &drvdata->detection_work, -+ msecs_to_jiffies(DET_REPEAT_DELAY)); -+ } -+out: -+ mutex_unlock(&drvdata->lock); -+} -+ -+/* Debounced headphone plug handler */ -+static void headph_handler(struct work_struct *work) -+{ -+ struct nokia_av_drvdata *drvdata = -+ container_of(work, struct nokia_av_drvdata, headph_work); -+ int plugged; -+ -+ if (!drvdata->autodetect) { -+ return; -+ } -+ -+ plugged = !gpio_get_value(drvdata->headph_gpio); -+ if (drvdata->headph_plugged == plugged) -+ return; -+ -+ mutex_lock(&drvdata->lock); -+ -+ drvdata->headph_plugged = plugged; -+ -+ drvdata->type = UNKNOWN; -+ drvdata->dettype = UNKNOWN; -+ drvdata->detcount = 0; -+ drvdata->dettotal = 0; -+ -+ hs_btn_input_free(drvdata); -+ -+ mutex_unlock(&drvdata->lock); -+ -+ if (drvdata->headph_plugged) { -+ queue_delayed_work(drvdata->workqueue, -+ &drvdata->detection_work, -+ msecs_to_jiffies(DET_PLUG_DELAY)); -+ } else { -+ cancel_delayed_work_sync(&drvdata->detection_work); -+ -+ rx51_set_eci_mode(1); -+ rx51_jack_report(0); -+ } -+} -+ -+/* Headphone plug debounce timer */ -+static void headph_timer(unsigned long arg) -+{ -+ struct nokia_av_drvdata *drvdata = (struct nokia_av_drvdata *) arg; -+ -+ schedule_work(&drvdata->headph_work); -+} -+ -+/* Headphone plug irq */ -+static irqreturn_t headph_irq(int irq, void *_drvdata) -+{ -+ struct nokia_av_drvdata *drvdata = _drvdata; -+ -+ mod_timer(&drvdata->headph_timer, -+ jiffies + msecs_to_jiffies(HEADPH_DEBOUNCE)); -+ -+ return IRQ_HANDLED; -+} -+ -+static ssize_t detect_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct nokia_av_drvdata *drvdata = dev_get_drvdata(dev); -+ int type; -+ -+ type = detect(drvdata); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", type); -+} -+ -+static ssize_t eci0_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct nokia_av_drvdata *drvdata = dev_get_drvdata(dev); -+ int val = gpio_get_value(drvdata->eci0_gpio); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", val); -+} -+ -+static ssize_t eci1_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct nokia_av_drvdata *drvdata = dev_get_drvdata(dev); -+ int val = gpio_get_value(drvdata->eci1_gpio); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", val); -+} -+ -+static ssize_t autodetect_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct nokia_av_drvdata *drvdata = dev_get_drvdata(dev); -+ int val; -+ -+ val = drvdata->autodetect; -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", val); -+} -+ -+static ssize_t autodetect_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ struct nokia_av_drvdata *drvdata = dev_get_drvdata(dev); -+ int val; -+ -+ if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1) -+ return -EINVAL; -+ -+ drvdata->autodetect = val; -+ -+ return len; -+} -+ -+static ssize_t type_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct nokia_av_drvdata *drvdata = dev_get_drvdata(dev); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", drvdata->type); -+} -+ -+static ssize_t madc_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d\n", madc()); -+} -+ -+static DEVICE_ATTR(detect, S_IRUGO, detect_show, NULL); -+static DEVICE_ATTR(type, S_IRUGO, type_show, NULL); -+static DEVICE_ATTR(autodetect, S_IRUGO|S_IWUGO, autodetect_show, -+ autodetect_store); -+ -+static DEVICE_ATTR(eci0, S_IRUGO, eci0_show, NULL); -+static DEVICE_ATTR(eci1, S_IRUGO, eci1_show, NULL); -+ -+static DEVICE_ATTR(madc, S_IRUGO, madc_show, NULL); -+ -+static struct attribute *nokia_av_attributes[] = { -+ &dev_attr_detect.attr, -+ &dev_attr_type.attr, -+ &dev_attr_autodetect.attr, -+ &dev_attr_eci0.attr, -+ &dev_attr_eci1.attr, -+ &dev_attr_madc.attr, -+ NULL -+}; -+ -+static const struct attribute_group nokia_av_group = { -+ .attrs = nokia_av_attributes, -+}; -+ -+static int nokia_av_register_sysfs(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ -+ return sysfs_create_group(&dev->kobj, &nokia_av_group); -+} -+ -+static void nokia_av_unregister_sysfs(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ -+ sysfs_remove_group(&dev->kobj, &nokia_av_group); -+} -+ -+static int __init nokia_av_probe(struct platform_device *pdev) -+{ -+ struct nokia_av_platform_data *pdata; -+ struct nokia_av_drvdata *drvdata; -+ int ret; -+ -+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL); -+ if (!drvdata) { -+ dev_err(&pdev->dev, "could not allocate memory\n"); -+ ret = -ENOMEM; -+ goto err_alloc; -+ } -+ -+ pdata = pdev->dev.platform_data; -+ if (!pdata) { -+ dev_err(&pdev->dev, "no platform data?\n"); -+ ret = -EINVAL; -+ goto err_pdata; -+ } -+ -+ drvdata->workqueue = create_singlethread_workqueue(DRIVER_NAME); -+ if (!drvdata->workqueue) { -+ dev_err(&pdev->dev, "couldn't create workqueue\n"); -+ ret = -ENOMEM; -+ goto err_workqueue; -+ } -+ -+ drvdata->eci0_gpio = pdata->eci0_gpio; -+ drvdata->eci1_gpio = pdata->eci1_gpio; -+ drvdata->headph_gpio = pdata->headph_gpio; -+ drvdata->autodetect = 1; -+ -+ drvdata->dev = &pdev->dev; -+ -+ mutex_init(&drvdata->lock); -+ mutex_init(&drvdata->detection_lock); -+ INIT_DELAYED_WORK(&drvdata->detection_work, detection_handler); -+ -+ INIT_WORK(&drvdata->hs_btn_work, hs_btn_handler); -+ init_timer(&drvdata->hs_btn_timer); -+ drvdata->hs_btn_timer.function = hs_btn_timer; -+ drvdata->hs_btn_timer.data = (unsigned long)drvdata; -+ INIT_DELAYED_WORK(&drvdata->hs_btn_report_work, hs_btn_report); -+ -+ platform_set_drvdata(pdev, drvdata); -+ -+ ret = nokia_av_register_sysfs(pdev); -+ if (ret) { -+ dev_err(&pdev->dev, "sysfs registration failed, %d\n", ret); -+ goto err_sysfs; -+ } -+ -+ ret = gpio_request(drvdata->eci0_gpio, "eci0"); -+ if (ret) { -+ dev_err(&pdev->dev, "gpio %d request failed, %d\n", -+ drvdata->eci0_gpio, ret); -+ goto err_eci0; -+ } -+ -+ -+ ret = gpio_request(drvdata->eci1_gpio, "eci1"); -+ if (ret) { -+ dev_err(&pdev->dev, "gpio %d request failed, %d\n", -+ drvdata->eci1_gpio, ret); -+ goto err_eci1; -+ } -+ -+ gpio_direction_input(drvdata->eci0_gpio); -+ gpio_direction_input(drvdata->eci1_gpio); -+ -+ /* Plug/unplug detection */ -+ drvdata->headph_plugged = !gpio_get_value(drvdata->headph_gpio); -+ -+ INIT_WORK(&drvdata->headph_work, headph_handler); -+ init_timer(&drvdata->headph_timer); -+ drvdata->headph_timer.function = headph_timer; -+ drvdata->headph_timer.data = (unsigned long)drvdata; -+ -+ ret = request_irq(gpio_to_irq(drvdata->headph_gpio), headph_irq, -+ HEADPH_IRQ_FLAGS, "headph", drvdata); -+ if (ret) { -+ dev_err(&pdev->dev, "gpio %d irq request failed, %d\n", -+ drvdata->headph_gpio, ret); -+ goto err_headph; -+ } -+ -+ dev_info(&pdev->dev, "accessory detect module initialized\n"); -+ -+ if (drvdata->headph_plugged) -+ queue_delayed_work(drvdata->workqueue, -+ &drvdata->detection_work, -+ msecs_to_jiffies(DET_PROBE_DELAY)); -+ -+ return 0; -+ -+err_headph: -+ gpio_free(drvdata->eci1_gpio); -+ -+err_eci1: -+ gpio_free(drvdata->eci0_gpio); -+ -+err_eci0: -+ nokia_av_unregister_sysfs(pdev); -+ -+err_sysfs: -+ destroy_workqueue(drvdata->workqueue); -+ -+err_workqueue: -+ platform_set_drvdata(pdev, NULL); -+ -+err_pdata: -+ kfree(drvdata); -+ -+err_alloc: -+ -+ return ret; -+} -+ -+static int __exit nokia_av_remove(struct platform_device *pdev) -+{ -+ struct nokia_av_drvdata *drvdata = platform_get_drvdata(pdev); -+ -+ free_irq(gpio_to_irq(drvdata->headph_gpio), drvdata); -+ -+ hs_btn_input_free(drvdata); -+ -+ nokia_av_unregister_sysfs(pdev); -+ -+ gpio_free(drvdata->eci0_gpio); -+ gpio_free(drvdata->eci1_gpio); -+ -+ cancel_delayed_work_sync(&drvdata->detection_work); -+ -+ destroy_workqueue(drvdata->workqueue); -+ -+ platform_set_drvdata(pdev, NULL); -+ kfree(drvdata); -+ -+ return 0; -+} -+ -+static struct platform_driver nokia_av_driver = { -+ .driver = { -+ .name = DRIVER_NAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = nokia_av_probe, -+ .remove = __exit_p(nokia_av_remove), -+}; -+ -+static int __init nokia_av_init(void) -+{ -+ return platform_driver_register(&nokia_av_driver); -+} -+module_init(nokia_av_init); -+ -+static void __exit nokia_av_exit(void) -+{ -+ platform_driver_unregister(&nokia_av_driver); -+} -+module_exit(nokia_av_exit); -+ -+MODULE_ALIAS("platform:" DRIVER_NAME); -+MODULE_AUTHOR("Nokia"); -+MODULE_DESCRIPTION("Nokia AV accessory detection"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/panic_info_buff.c linux-omap-2.6.28-nokia1/drivers/misc/panic_info_buff.c ---- linux-omap-2.6.28-omap1/drivers/misc/panic_info_buff.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/panic_info_buff.c 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,93 @@ -+/* -+ * Copyright (C) Nokia Corporation -+ * -+ * Contact: Atal Shargorodsky -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#define PANIC_BUFFER_MAX_LEN 1024 -+static char panic_info_buff[PANIC_BUFFER_MAX_LEN]; -+static struct dentry *panic_info_buff_debugfs; -+ -+static int panic_info_buff_open(struct inode *inode, struct file *file) -+{ -+ return 0; -+} -+ -+static ssize_t panic_info_buff_write(struct file *file, -+ const char __user *buf, size_t len, loff_t *off) -+{ -+ if (len >= PANIC_BUFFER_MAX_LEN) -+ return -EINVAL; -+ if (copy_from_user(panic_info_buff, buf, len)) -+ return -EFAULT; -+ panic_info_buff[len] = '\0'; -+ return len; -+} -+ -+static struct file_operations panic_info_buff_fops = { -+ .open = panic_info_buff_open, -+ .write = panic_info_buff_write, -+ .llseek = no_llseek, -+ .owner = THIS_MODULE, -+}; -+ -+static int panic_info_buff_event(struct notifier_block *this, -+ unsigned long event, void *ptr) -+{ -+ if (panic_info_buff[0] == '\0') { -+ printk(KERN_EMERG "Panic info buffer is empty.\n"); -+ } else { -+ printk(KERN_EMERG "Panic info buffer:\n"); -+ printk(KERN_EMERG "%s\n", panic_info_buff); -+ } -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block panic_info_buff_block = { -+ .notifier_call = panic_info_buff_event, -+ .priority = 1, -+}; -+ -+static int __devinit panic_info_buff_init(void) -+{ -+ panic_info_buff_debugfs = debugfs_create_file("panic_info_buff", -+ S_IFREG | S_IWUSR | S_IWGRP, -+ NULL, NULL, &panic_info_buff_fops); -+ atomic_notifier_chain_register(&panic_notifier_list, -+ &panic_info_buff_block); -+ return 0; -+} -+module_init(panic_info_buff_init); -+ -+static void __devexit panic_info_buff_exit(void) -+{ -+ debugfs_remove(panic_info_buff_debugfs); -+ atomic_notifier_chain_unregister(&panic_notifier_list, -+ &panic_info_buff_block); -+ -+} -+module_exit(panic_info_buff_exit); -+ -+MODULE_AUTHOR("Nokia Corporation"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("panic_info_buff"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/Kconfig linux-omap-2.6.28-nokia1/drivers/misc/ssi/Kconfig ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/Kconfig 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,11 @@ -+# -+# OMAP SSI HW kernel configuration -+# -+config OMAP_SSI -+ tristate "OMAP SSI hardware driver" -+ depends on ARCH_OMAP -+ default n -+ ---help--- -+ If you say Y here, you will enable the OMAP SSI hardware driver. -+ -+ If unsure, say N. -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/Makefile linux-omap-2.6.28-nokia1/drivers/misc/ssi/Makefile ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/Makefile 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,15 @@ -+# -+# Makefile for SSI drivers -+# -+EXTRA_CFLAGS := \ -+ -I$(src)/../../../arch/arm/plat-omap/include \ -+ -I$(src)/../../../include -+ -+omap_ssi-objs := ssi_driver.o ssi_driver_dma.o ssi_driver_int.o \ -+ ssi_driver_if.o ssi_driver_bus.o ssi_driver_gpio.o -+ -+ifeq ($(CONFIG_DEBUG_FS), y) -+ omap_ssi-objs += ssi_driver_debugfs.o -+endif -+ -+obj-$(CONFIG_OMAP_SSI) += omap_ssi.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_bus.c linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_bus.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_bus.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_bus.c 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,153 @@ -+/* -+ * ssi_driver_bus.c -+ * -+ * Implements SSI bus, device and driver interface. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+#include -+#include "ssi_driver.h" -+ -+#define SSI_PREFIX "ssi:" -+ -+struct bus_type ssi_bus_type; -+ -+static ssize_t modalias_show(struct device *dev, struct device_attribute *a, -+ char *buf) -+{ -+ return snprintf(buf, BUS_ID_SIZE + 1, "%s%s\n", SSI_PREFIX, -+ dev->bus_id); -+} -+ -+static struct device_attribute ssi_dev_attrs[] = { -+ __ATTR_RO(modalias), -+ __ATTR_NULL, -+}; -+ -+static int ssi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) -+{ -+ add_uevent_var(env, "MODALIAS=%s%s", SSI_PREFIX, dev->bus_id); -+ return 0; -+} -+ -+static int ssi_bus_match(struct device *device, struct device_driver *driver) -+{ -+ struct ssi_device *dev = to_ssi_device(device); -+ struct ssi_device_driver *drv = to_ssi_device_driver(driver); -+ -+ if (!test_bit(dev->n_ctrl, &drv->ctrl_mask)) -+ return 0; -+ -+ if (!test_bit(dev->n_ch, &drv->ch_mask[dev->n_p])) -+ return 0; -+ -+ return 1; -+} -+ -+int ssi_bus_unreg_dev(struct device *device, void *p) -+{ -+ device->release(device); -+ device_unregister(device); -+ -+ return 0; -+} -+ -+int __init ssi_bus_init(void) -+{ -+ return bus_register(&ssi_bus_type); -+} -+ -+void ssi_bus_exit(void) -+{ -+ bus_for_each_dev(&ssi_bus_type, NULL, NULL, ssi_bus_unreg_dev); -+ bus_unregister(&ssi_bus_type); -+} -+ -+static int ssi_driver_probe(struct device *dev) -+{ -+ struct ssi_device_driver *drv = to_ssi_device_driver(dev->driver); -+ -+ return drv->probe(to_ssi_device(dev)); -+} -+ -+static int ssi_driver_remove(struct device *dev) -+{ -+ struct ssi_device_driver *drv = to_ssi_device_driver(dev->driver); -+ -+ return drv->remove(to_ssi_device(dev)); -+} -+ -+static int ssi_driver_suspend(struct device *dev, pm_message_t mesg) -+{ -+ struct ssi_device_driver *drv = to_ssi_device_driver(dev->driver); -+ -+ return drv->suspend(to_ssi_device(dev), mesg); -+} -+ -+static int ssi_driver_resume(struct device *dev) -+{ -+ struct ssi_device_driver *drv = to_ssi_device_driver(dev->driver); -+ -+ return drv->resume(to_ssi_device(dev)); -+} -+ -+struct bus_type ssi_bus_type = { -+ .name = "ssi", -+ .dev_attrs = ssi_dev_attrs, -+ .match = ssi_bus_match, -+ .uevent = ssi_bus_uevent, -+}; -+ -+/** -+ * register_ssi_driver - Register SSI device driver -+ * @driver - reference to the SSI device driver. -+ */ -+int register_ssi_driver(struct ssi_device_driver *driver) -+{ -+ int ret = 0; -+ -+ BUG_ON(driver == NULL); -+ -+ driver->driver.bus = &ssi_bus_type; -+ if (driver->probe) -+ driver->driver.probe = ssi_driver_probe; -+ if (driver->remove) -+ driver->driver.remove = ssi_driver_remove; -+ if (driver->suspend) -+ driver->driver.suspend = ssi_driver_suspend; -+ if (driver->resume) -+ driver->driver.resume = ssi_driver_resume; -+ -+ ret = driver_register(&driver->driver); -+ -+ return ret; -+} -+EXPORT_SYMBOL(register_ssi_driver); -+ -+/** -+ * unregister_ssi_driver - Unregister SSI device driver -+ * @driver - reference to the SSI device driver. -+ */ -+void unregister_ssi_driver(struct ssi_device_driver *driver) -+{ -+ BUG_ON(driver == NULL); -+ -+ driver_unregister(&driver->driver); -+} -+EXPORT_SYMBOL(unregister_ssi_driver); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver.c linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver.c 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,540 @@ -+/* -+ * ssi_driver.c -+ * -+ * Implements SSI module interface, initialization, and PM related functions. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include "ssi_driver.h" -+ -+#define SSI_DRIVER_VERSION "1.1-rc2" -+#define SSI_RESETDONE_TIMEOUT 10 /* 10 ms */ -+#define SSI_RESETDONE_RETRIES 20 /* => max 200 ms waiting for reset */ -+ -+/* NOTE: Function called in interrupt context */ -+int ssi_port_event_handler(struct ssi_port *p, unsigned int event, void *arg) -+{ -+ int ch; -+ -+ for (ch = 0; ch < p->max_ch; ch++) { -+ struct ssi_channel *ssi_channel = p->ssi_channel + ch; -+ -+ read_lock(&ssi_channel->rw_lock); -+ if ((ssi_channel->dev) && (ssi_channel->port_event)) -+ ssi_channel->port_event(ssi_channel->dev, event, arg); -+ read_unlock(&ssi_channel->rw_lock); -+ } -+ -+ return 0; -+} -+ -+static int ssi_clk_event(struct notifier_block *nb, unsigned long event, -+ void *data) -+{ -+ switch (event) { -+ case CLK_PRE_RATE_CHANGE: -+ break; -+ case CLK_ABORT_RATE_CHANGE: -+ break; -+ case CLK_POST_RATE_CHANGE: -+ break; -+ default: -+ break; -+ } -+ /* -+ * TODO: At this point we may emit a port event warning about the -+ * clk frequency change to the upper layers. -+ */ -+ return NOTIFY_DONE; -+} -+ -+static void ssi_dev_release(struct device *dev) -+{ -+} -+ -+static int __init reg_ssi_dev_ch(struct ssi_dev *ssi_ctrl, unsigned int p, -+ unsigned int ch) -+{ -+ struct ssi_device *dev; -+ struct ssi_port *port = &ssi_ctrl->ssi_port[p]; -+ int err; -+ -+ dev = kzalloc(sizeof(*dev), GFP_KERNEL); -+ if (!dev) -+ return -ENOMEM; -+ -+ dev->n_ctrl = ssi_ctrl->id; -+ dev->n_p = p; -+ dev->n_ch = ch; -+ dev->ch = &port->ssi_channel[ch]; -+ dev->device.bus = &ssi_bus_type; -+ dev->device.parent = ssi_ctrl->dev; -+ dev->device.release = ssi_dev_release; -+ if (dev->n_ctrl < 0) -+ snprintf(dev->device.bus_id, sizeof(dev->device.bus_id), -+ "omap_ssi-p%u.c%u", p, ch); -+ else -+ snprintf(dev->device.bus_id, sizeof(dev->device.bus_id), -+ "omap_ssi%d-p%u.c%u", dev->n_ctrl, p, ch); -+ -+ err = device_register(&dev->device); -+ if (err >= 0) { -+ write_lock_bh(&port->ssi_channel[ch].rw_lock); -+ port->ssi_channel[ch].dev = dev; -+ write_unlock_bh(&port->ssi_channel[ch].rw_lock); -+ } else { -+ kfree(dev); -+ } -+ return err; -+} -+ -+static int __init register_ssi_devices(struct ssi_dev *ssi_ctrl) -+{ -+ int port; -+ int ch; -+ int err; -+ -+ for (port = 0; port < ssi_ctrl->max_p; port++) -+ for (ch = 0; ch < ssi_ctrl->ssi_port[port].max_ch; ch++) { -+ err = reg_ssi_dev_ch(ssi_ctrl, port, ch); -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int __init ssi_softreset(struct ssi_dev *ssi_ctrl) -+{ -+ int ind = 0; -+ void __iomem *base = ssi_ctrl->base; -+ u32 status; -+ -+ ssi_outl_or(SSI_SOFTRESET, base, SSI_SYS_SYSCONFIG_REG); -+ -+ status = ssi_inl(base, SSI_SYS_SYSSTATUS_REG); -+ while ((!(status & SSI_RESETDONE)) && (ind < SSI_RESETDONE_RETRIES)) { -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ schedule_timeout(msecs_to_jiffies(SSI_RESETDONE_TIMEOUT)); -+ status = ssi_inl(base, SSI_SYS_SYSSTATUS_REG); -+ ind++; -+ } -+ -+ if (ind >= SSI_RESETDONE_RETRIES) -+ return -EIO; -+ -+ /* Reseting GDD */ -+ ssi_outl_or(SSI_SWRESET, base, SSI_GDD_GRST_REG); -+ -+ return 0; -+} -+ -+static void __init set_ssi_ports_default(struct ssi_dev *ssi_ctrl, -+ struct platform_device *pd) -+{ -+ struct port_ctx *cfg; -+ struct ssi_platform_data *pdata = pd->dev.platform_data; -+ unsigned int port = 0; -+ void __iomem *base = ssi_ctrl->base; -+ -+ for (port = 1; port <= pdata->num_ports; port++) { -+ cfg = &pdata->ctx.pctx[port - 1]; -+ ssi_outl(cfg->sst.mode, base, SSI_SST_MODE_REG(port)); -+ ssi_outl(cfg->sst.frame_size, base, -+ SSI_SST_FRAMESIZE_REG(port)); -+ ssi_outl(cfg->sst.divisor, base, SSI_SST_DIVISOR_REG(port)); -+ ssi_outl(cfg->sst.channels, base, SSI_SST_CHANNELS_REG(port)); -+ ssi_outl(cfg->sst.arb_mode, base, SSI_SST_ARBMODE_REG(port)); -+ -+ ssi_outl(cfg->ssr.mode, base, SSI_SSR_MODE_REG(port)); -+ ssi_outl(cfg->ssr.frame_size, base, -+ SSI_SSR_FRAMESIZE_REG(port)); -+ ssi_outl(cfg->ssr.channels, base, SSI_SSR_CHANNELS_REG(port)); -+ ssi_outl(cfg->ssr.timeout, base, SSI_SSR_TIMEOUT_REG(port)); -+ } -+} -+ -+static int __init ssi_port_channels_init(struct ssi_port *port) -+{ -+ struct ssi_channel *ch; -+ unsigned int ch_i; -+ -+ for (ch_i = 0; ch_i < port->max_ch; ch_i++) { -+ ch = &port->ssi_channel[ch_i]; -+ ch->channel_number = ch_i; -+ rwlock_init(&ch->rw_lock); -+ ch->flags = 0; -+ ch->ssi_port = port; -+ ch->read_data.addr = NULL; -+ ch->read_data.size = 0; -+ ch->read_data.lch = -1; -+ ch->write_data.addr = NULL; -+ ch->write_data.size = 0; -+ ch->write_data.lch = -1; -+ ch->dev = NULL; -+ ch->read_done = NULL; -+ ch->write_done = NULL; -+ ch->port_event = NULL; -+ } -+ -+ return 0; -+} -+ -+static void ssi_ports_exit(struct ssi_dev *ssi_ctrl, unsigned int max_ports) -+{ -+ struct ssi_port *ssi_p; -+ unsigned int port; -+ -+ for (port = 0; port < max_ports; port++) { -+ ssi_p = &ssi_ctrl->ssi_port[port]; -+ ssi_mpu_exit(ssi_p); -+ ssi_cawake_exit(ssi_p); -+ } -+} -+ -+static int __init ssi_request_mpu_irq(struct ssi_port *ssi_p) -+{ -+ struct ssi_dev *ssi_ctrl = ssi_p->ssi_controller; -+ struct platform_device *pd = to_platform_device(ssi_ctrl->dev); -+ struct resource *mpu_irq; -+ -+ mpu_irq = platform_get_resource(pd, IORESOURCE_IRQ, -+ (ssi_p->port_number - 1) * 2); -+ if (!mpu_irq) { -+ dev_err(ssi_ctrl->dev, "SSI misses info for MPU IRQ on" -+ " port %d\n", ssi_p->port_number); -+ return -ENXIO; -+ } -+ ssi_p->n_irq = 0; /* We only use one irq line */ -+ ssi_p->irq = mpu_irq->start; -+ return ssi_mpu_init(ssi_p, mpu_irq->name); -+} -+ -+static int __init ssi_request_cawake_irq(struct ssi_port *ssi_p) -+{ -+ struct ssi_dev *ssi_ctrl = ssi_p->ssi_controller; -+ struct platform_device *pd = to_platform_device(ssi_ctrl->dev); -+ struct resource *cawake_irq; -+ -+ cawake_irq = platform_get_resource(pd, IORESOURCE_IRQ, -+ 4 + ssi_p->port_number); -+ if (!cawake_irq) { -+ dev_err(ssi_ctrl->dev, "SSI device misses info for CAWAKE" -+ "IRQ on port %d\n", ssi_p->port_number); -+ return -ENXIO; -+ } -+ if (cawake_irq->flags & IORESOURCE_UNSET) { -+ dev_info(ssi_ctrl->dev, "No CAWAKE GPIO support\n"); -+ ssi_p->cawake_gpio = -1; -+ return 0; -+ } -+ -+ ssi_p->cawake_gpio_irq = cawake_irq->start; -+ ssi_p->cawake_gpio = irq_to_gpio(cawake_irq->start); -+ return ssi_cawake_init(ssi_p, cawake_irq->name); -+} -+ -+static int __init ssi_ports_init(struct ssi_dev *ssi_ctrl) -+{ -+ struct platform_device *pd = to_platform_device(ssi_ctrl->dev); -+ struct ssi_platform_data *pdata = pd->dev.platform_data; -+ struct ssi_port *ssi_p; -+ unsigned int port; -+ int err; -+ -+ for (port = 0; port < ssi_ctrl->max_p; port++) { -+ ssi_p = &ssi_ctrl->ssi_port[port]; -+ ssi_p->port_number = port + 1; -+ ssi_p->ssi_controller = ssi_ctrl; -+ ssi_p->max_ch = max(pdata->ctx.pctx[port].sst.channels, -+ pdata->ctx.pctx[port].ssr.channels); -+ ssi_p->irq = 0; -+ spin_lock_init(&ssi_p->lock); -+ err = ssi_port_channels_init(&ssi_ctrl->ssi_port[port]); -+ if (err < 0) -+ goto rback1; -+ err = ssi_request_mpu_irq(ssi_p); -+ if (err < 0) -+ goto rback2; -+ err = ssi_request_cawake_irq(ssi_p); -+ if (err < 0) -+ goto rback3; -+ } -+ return 0; -+rback3: -+ ssi_mpu_exit(ssi_p); -+rback2: -+ ssi_ports_exit(ssi_ctrl, port + 1); -+rback1: -+ return err; -+} -+ -+static int __init ssi_request_gdd_irq(struct ssi_dev *ssi_ctrl) -+{ -+ struct platform_device *pd = to_platform_device(ssi_ctrl->dev); -+ struct resource *gdd_irq; -+ -+ gdd_irq = platform_get_resource(pd, IORESOURCE_IRQ, 4); -+ if (!gdd_irq) { -+ dev_err(ssi_ctrl->dev, "SSI has no GDD IRQ resource\n"); -+ return -ENXIO; -+ } -+ -+ ssi_ctrl->gdd_irq = gdd_irq->start; -+ return ssi_gdd_init(ssi_ctrl, gdd_irq->name); -+} -+ -+static int __init ssi_controller_init(struct ssi_dev *ssi_ctrl, -+ struct platform_device *pd) -+{ -+ struct ssi_platform_data *pdata = pd->dev.platform_data; -+ struct resource *mem, *ioarea; -+ int err; -+ -+ mem = platform_get_resource(pd, IORESOURCE_MEM, 0); -+ if (!mem) { -+ dev_err(&pd->dev, "SSI device does not have " -+ "SSI IO memory region information\n"); -+ return -ENXIO; -+ } -+ -+ ioarea = devm_request_mem_region(&pd->dev, mem->start, -+ (mem->end - mem->start) + 1, pd->dev.bus_id); -+ if (!ioarea) { -+ dev_err(&pd->dev, "Unable to request SSI IO mem region\n"); -+ return -EBUSY; -+ } -+ -+ ssi_ctrl->base = devm_ioremap(&pd->dev, mem->start, -+ (mem->end - mem->start) + 1); -+ if (!ssi_ctrl->base) { -+ dev_err(&pd->dev, "Unable to ioremap SSI base IO address\n"); -+ return -ENXIO; -+ } -+ -+ ssi_ctrl->id = pd->id; -+ ssi_ctrl->max_p = pdata->num_ports; -+ ssi_ctrl->dev = &pd->dev; -+ spin_lock_init(&ssi_ctrl->lock); -+ ssi_ctrl->ssi_clk = clk_get(&pd->dev, "ssi_clk"); -+ -+ if (IS_ERR(ssi_ctrl->ssi_clk)) { -+ dev_err(ssi_ctrl->dev, "Unable to get SSI clocks\n"); -+ return PTR_ERR(ssi_ctrl->ssi_clk); -+ } -+ -+ if (pdata->clk_notifier_register) { -+ ssi_ctrl->ssi_nb.notifier_call = ssi_clk_event; -+ ssi_ctrl->ssi_nb.priority = INT_MAX; /* Let's try to be first */ -+ err = pdata->clk_notifier_register(ssi_ctrl->ssi_clk, -+ &ssi_ctrl->ssi_nb); -+ if (err < 0) -+ goto rback1; -+ } -+ -+ err = ssi_ports_init(ssi_ctrl); -+ if (err < 0) -+ goto rback2; -+ -+ err = ssi_request_gdd_irq(ssi_ctrl); -+ if (err < 0) -+ goto rback3; -+ -+ return 0; -+rback3: -+ ssi_ports_exit(ssi_ctrl, ssi_ctrl->max_p); -+rback2: -+ if (pdata->clk_notifier_unregister) -+ pdata->clk_notifier_unregister(ssi_ctrl->ssi_clk, -+ &ssi_ctrl->ssi_nb); -+rback1: -+ clk_put(ssi_ctrl->ssi_clk); -+ dev_err(&pd->dev, "Error on ssi_controller initialization\n"); -+ return err; -+} -+ -+static void ssi_controller_exit(struct ssi_dev *ssi_ctrl) -+{ -+ struct ssi_platform_data *pdata = ssi_ctrl->dev->platform_data; -+ -+ ssi_gdd_exit(ssi_ctrl); -+ ssi_ports_exit(ssi_ctrl, ssi_ctrl->max_p); -+ if (pdata->clk_notifier_unregister) -+ pdata->clk_notifier_unregister(ssi_ctrl->ssi_clk, -+ &ssi_ctrl->ssi_nb); -+ clk_put(ssi_ctrl->ssi_clk); -+} -+ -+static int __init ssi_probe(struct platform_device *pd) -+{ -+ struct ssi_platform_data *pdata = pd->dev.platform_data; -+ struct ssi_dev *ssi_ctrl; -+ u32 revision; -+ int err; -+ -+ if (!pdata) { -+ pr_err(LOG_NAME "No platform_data found on ssi device\n"); -+ return -ENXIO; -+ } -+ -+ ssi_ctrl = kzalloc(sizeof(*ssi_ctrl), GFP_KERNEL); -+ if (ssi_ctrl == NULL) { -+ dev_err(&pd->dev, "Could not allocate memory for" -+ " struct ssi_dev\n"); -+ return -ENOMEM; -+ } -+ -+ platform_set_drvdata(pd, ssi_ctrl); -+ err = ssi_controller_init(ssi_ctrl, pd); -+ if (err < 0) { -+ dev_err(&pd->dev, "Could not initialize ssi controller:" -+ " %d\n", err); -+ goto rollback1; -+ } -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ err = ssi_softreset(ssi_ctrl); -+ if (err < 0) -+ goto rollback2; -+ -+ /* Set default PM settings */ -+ ssi_outl((SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART), -+ ssi_ctrl->base, SSI_SYS_SYSCONFIG_REG); -+ ssi_outl(SSI_CLK_AUTOGATING_ON, ssi_ctrl->base, SSI_GDD_GCR_REG); -+ -+ /* Configure SSI ports */ -+ set_ssi_ports_default(ssi_ctrl, pd); -+ -+ /* Gather info from registers for the driver.(REVISION) */ -+ revision = ssi_inl(ssi_ctrl->base, SSI_SYS_REVISION_REG); -+ dev_info(ssi_ctrl->dev, "SSI Hardware REVISION %d.%d\n", -+ (revision & SSI_REV_MAJOR) >> 4, (revision & SSI_REV_MINOR)); -+ -+ err = ssi_debug_add_ctrl(ssi_ctrl); -+ if (err < 0) -+ goto rollback2; -+ -+ err = register_ssi_devices(ssi_ctrl); -+ if (err < 0) -+ goto rollback3; -+ -+ clk_disable(ssi_ctrl->ssi_clk); -+ -+ return err; -+ -+rollback3: -+ ssi_debug_remove_ctrl(ssi_ctrl); -+rollback2: -+ clk_disable(ssi_ctrl->ssi_clk); -+ ssi_controller_exit(ssi_ctrl); -+rollback1: -+ kfree(ssi_ctrl); -+ return err; -+} -+ -+static void __exit unregister_ssi_devices(struct ssi_dev *ssi_ctrl) -+{ -+ struct ssi_port *ssi_p; -+ struct ssi_device *device; -+ unsigned int port; -+ unsigned int ch; -+ -+ for (port = 0; port < ssi_ctrl->max_p; port++) { -+ ssi_p = &ssi_ctrl->ssi_port[port]; -+ for (ch = 0; ch < ssi_p->max_ch; ch++) { -+ device = ssi_p->ssi_channel[ch].dev; -+ ssi_close(device); -+ device_unregister(&device->device); -+ kfree(device); -+ } -+ } -+} -+ -+static int __exit ssi_remove(struct platform_device *pd) -+{ -+ struct ssi_dev *ssi_ctrl = platform_get_drvdata(pd); -+ -+ if (!ssi_ctrl) -+ return 0; -+ -+ unregister_ssi_devices(ssi_ctrl); -+ ssi_debug_remove_ctrl(ssi_ctrl); -+ ssi_controller_exit(ssi_ctrl); -+ kfree(ssi_ctrl); -+ -+ return 0; -+} -+ -+static struct platform_driver ssi_pdriver = { -+ .probe = ssi_probe, -+ .remove = __exit_p(ssi_remove), -+ .driver = { -+ .name = "omap_ssi", -+ .owner = THIS_MODULE, -+ } -+}; -+ -+static int __init ssi_driver_init(void) -+{ -+ int err = 0; -+ -+ pr_info("SSI DRIVER Version " SSI_DRIVER_VERSION "\n"); -+ -+ ssi_bus_init(); -+ err = ssi_debug_init(); -+ if (err < 0) { -+ pr_err(LOG_NAME "SSI Debugfs failed %d\n", err); -+ goto rback1; -+ } -+ err = platform_driver_probe(&ssi_pdriver, ssi_probe); -+ if (err < 0) { -+ pr_err(LOG_NAME "Platform DRIVER register FAILED: %d\n", err); -+ goto rback2; -+ } -+ -+ return 0; -+rback2: -+ ssi_debug_exit(); -+rback1: -+ ssi_bus_exit(); -+ return err; -+} -+ -+static void __exit ssi_driver_exit(void) -+{ -+ platform_driver_unregister(&ssi_pdriver); -+ ssi_debug_exit(); -+ ssi_bus_exit(); -+ -+ pr_info("SSI DRIVER removed\n"); -+} -+ -+module_init(ssi_driver_init); -+module_exit(ssi_driver_exit); -+ -+MODULE_ALIAS("platform:omap_ssi"); -+MODULE_AUTHOR("Carlos Chinea / Nokia"); -+MODULE_DESCRIPTION("Synchronous Serial Interface Driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_debugfs.c linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_debugfs.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_debugfs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_debugfs.c 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,259 @@ -+/* -+ * ssi_driver_debugfs.c -+ * -+ * Implements SSI debugfs. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+#include -+#include "ssi_driver.h" -+ -+#define SSI_DIR_NAME_SIZE 64 -+ -+static struct dentry *ssi_dir; -+ -+static int ssi_debug_show(struct seq_file *m, void *p) -+{ -+ struct ssi_dev *ssi_ctrl = m->private; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ seq_printf(m, "REVISION\t: 0x%08x\n", -+ ssi_inl(ssi_ctrl->base, SSI_SYS_REVISION_REG)); -+ seq_printf(m, "SYSCONFIG\t: 0x%08x\n", -+ ssi_inl(ssi_ctrl->base, SSI_SYS_SYSCONFIG_REG)); -+ seq_printf(m, "SYSSTATUS\t: 0x%08x\n", -+ ssi_inl(ssi_ctrl->base, SSI_SYS_SYSSTATUS_REG)); -+ -+ clk_disable(ssi_ctrl->ssi_clk); -+ return 0; -+} -+ -+static int ssi_debug_port_show(struct seq_file *m, void *p) -+{ -+ struct ssi_port *ssi_port = m->private; -+ struct ssi_dev *ssi_ctrl = ssi_port->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ unsigned int port = ssi_port->port_number; -+ int ch; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ if (ssi_port->cawake_gpio >= 0) -+ seq_printf(m, "CAWAKE\t\t: %d\n", ssi_cawake(ssi_port)); -+ -+ seq_printf(m, "WAKE\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SYS_WAKE_REG(port))); -+ seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", ssi_port->n_irq, -+ ssi_inl(base, SSI_SYS_MPU_ENABLE_REG(port, ssi_port->n_irq))); -+ seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", ssi_port->n_irq, -+ ssi_inl(base, SSI_SYS_MPU_STATUS_REG(port, ssi_port->n_irq))); -+ /* SST */ -+ seq_printf(m, "\nSST\n===\n"); -+ seq_printf(m, "MODE\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_MODE_REG(port))); -+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_FRAMESIZE_REG(port))); -+ seq_printf(m, "DIVISOR\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_DIVISOR_REG(port))); -+ seq_printf(m, "CHANNELS\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_CHANNELS_REG(port))); -+ seq_printf(m, "ARBMODE\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_ARBMODE_REG(port))); -+ seq_printf(m, "TXSTATE\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_TXSTATE_REG(port))); -+ seq_printf(m, "BUFSTATE\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_BUFSTATE_REG(port))); -+ seq_printf(m, "BREAK\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SST_BREAK_REG(port))); -+ for (ch = 0; ch < ssi_port->max_ch; ch++) { -+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, -+ ssi_inl(base, SSI_SST_BUFFER_CH_REG(port, ch))); -+ } -+ /* SSR */ -+ seq_printf(m, "\nSSR\n===\n"); -+ seq_printf(m, "MODE\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_MODE_REG(port))); -+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_FRAMESIZE_REG(port))); -+ seq_printf(m, "CHANNELS\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_CHANNELS_REG(port))); -+ seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_TIMEOUT_REG(port))); -+ seq_printf(m, "RXSTATE\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_RXSTATE_REG(port))); -+ seq_printf(m, "BUFSTATE\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_BUFSTATE_REG(port))); -+ seq_printf(m, "BREAK\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_BREAK_REG(port))); -+ seq_printf(m, "ERROR\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_ERROR_REG(port))); -+ seq_printf(m, "ERRORACK\t: 0x%08x\n", -+ ssi_inl(base, SSI_SSR_ERRORACK_REG(port))); -+ for (ch = 0; ch < ssi_port->max_ch; ch++) { -+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, -+ ssi_inl(base, SSI_SSR_BUFFER_CH_REG(port, ch))); -+ } -+ clk_disable(ssi_ctrl->ssi_clk); -+ return 0; -+} -+ -+static int ssi_debug_gdd_show(struct seq_file *m, void *p) -+{ -+ struct ssi_dev *ssi_ctrl = m->private; -+ void __iomem *base = ssi_ctrl->base; -+ int lch; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", -+ ssi_inl(base, SSI_SYS_GDD_MPU_IRQ_STATUS_REG)); -+ seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", -+ ssi_inl(base, SSI_SYS_GDD_MPU_IRQ_ENABLE_REG)); -+ -+ seq_printf(m, "HW_ID\t\t: 0x%08x\n", ssi_inl(base, SSI_GDD_HW_ID_REG)); -+ seq_printf(m, "PPORT_ID\t: 0x%08x\n", -+ ssi_inl(base, SSI_GDD_PPORT_ID_REG)); -+ seq_printf(m, "MPORT_ID\t: 0x%08x\n", -+ ssi_inl(base, SSI_GDD_MPORT_ID_REG)); -+ seq_printf(m, "TEST\t\t: 0x%08x\n", ssi_inl(base, SSI_GDD_TEST_REG)); -+ seq_printf(m, "GCR\t\t: 0x%08x\n", ssi_inl(base, SSI_GDD_GCR_REG)); -+ -+ for (lch = 0; lch < SSI_NUM_LCH; lch++) { -+ seq_printf(m, "\nGDD LCH %d\n=========\n", lch); -+ seq_printf(m, "CSDP\t\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CSDP_REG(lch))); -+ seq_printf(m, "CCR\t\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CCR_REG(lch))); -+ seq_printf(m, "CICR\t\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CICR_REG(lch))); -+ seq_printf(m, "CSR\t\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CSR_REG(lch))); -+ seq_printf(m, "CSSA\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_GDD_CSSA_REG(lch))); -+ seq_printf(m, "CDSA\t\t: 0x%08x\n", -+ ssi_inl(base, SSI_GDD_CDSA_REG(lch))); -+ seq_printf(m, "CEN\t\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CEN_REG(lch))); -+ seq_printf(m, "CSAC\t\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CSAC_REG(lch))); -+ seq_printf(m, "CDAC\t\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CDAC_REG(lch))); -+ seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", -+ ssi_inw(base, SSI_GDD_CLNK_CTRL_REG(lch))); -+ } -+ -+ clk_disable(ssi_ctrl->ssi_clk); -+ return 0; -+} -+ -+static int ssi_regs_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, ssi_debug_show, inode->i_private); -+} -+ -+static int ssi_port_regs_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, ssi_debug_port_show, inode->i_private); -+} -+ -+static int ssi_gdd_regs_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, ssi_debug_gdd_show, inode->i_private); -+} -+ -+static const struct file_operations ssi_regs_fops = { -+ .open = ssi_regs_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static const struct file_operations ssi_port_regs_fops = { -+ .open = ssi_port_regs_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static const struct file_operations ssi_gdd_regs_fops = { -+ .open = ssi_gdd_regs_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+int __init ssi_debug_add_ctrl(struct ssi_dev *ssi_ctrl) -+{ -+ struct platform_device *pdev = to_platform_device(ssi_ctrl->dev); -+ unsigned char dir_name[SSI_DIR_NAME_SIZE]; -+ struct dentry *dir; -+ unsigned int port; -+ -+ if (pdev->id < 0) { -+ ssi_ctrl->dir = debugfs_create_dir(pdev->name, ssi_dir); -+ } else { -+ snprintf(dir_name, sizeof(dir_name), "%s%d", pdev->name, -+ pdev->id); -+ ssi_ctrl->dir = debugfs_create_dir(dir_name, ssi_dir); -+ } -+ if (IS_ERR(ssi_ctrl->dir)) -+ return PTR_ERR(ssi_ctrl->dir); -+ -+ debugfs_create_file("regs", S_IRUGO, ssi_ctrl->dir, ssi_ctrl, -+ &ssi_regs_fops); -+ -+ for (port = 0; port < ssi_ctrl->max_p; port++) { -+ snprintf(dir_name, sizeof(dir_name), "port%d", port + 1); -+ dir = debugfs_create_dir(dir_name, ssi_ctrl->dir); -+ if (IS_ERR(dir)) -+ goto rback; -+ debugfs_create_file("regs", S_IRUGO, dir, -+ &ssi_ctrl->ssi_port[port], &ssi_port_regs_fops); -+ } -+ -+ dir = debugfs_create_dir("gdd", ssi_ctrl->dir); -+ if (IS_ERR(dir)) -+ goto rback; -+ debugfs_create_file("regs", S_IRUGO, dir, ssi_ctrl, &ssi_gdd_regs_fops); -+ -+ return 0; -+rback: -+ debugfs_remove_recursive(ssi_ctrl->dir); -+ return PTR_ERR(dir); -+} -+ -+void ssi_debug_remove_ctrl(struct ssi_dev *ssi_ctrl) -+{ -+ debugfs_remove_recursive(ssi_ctrl->dir); -+} -+ -+int __init ssi_debug_init(void) -+{ -+ ssi_dir = debugfs_create_dir("ssi", NULL); -+ if (IS_ERR(ssi_dir)) -+ return PTR_ERR(ssi_dir); -+ -+ return 0; -+} -+ -+void ssi_debug_exit(void) -+{ -+ debugfs_remove_recursive(ssi_dir); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_dma.c linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_dma.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_dma.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_dma.c 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,424 @@ -+/* -+ * ssi_driver_dma.c -+ * -+ * Implements SSI low level interface driver functionality with DMA support. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+#include -+#include "ssi_driver.h" -+#include -+ -+#define SSI_SYNC_WRITE 0 -+#define SSI_SYNC_READ 1 -+#define SSI_L3_TPUT 13428 /* 13428 KiB/s => ~110 Mbit/s*/ -+ -+static unsigned char ssi_sync_table[2][2][8] = { -+ { -+ {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, -+ {0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00} -+ }, { -+ {0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17}, -+ {0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f} -+ } -+}; -+ -+/** -+ * ssi_get_sync_port - Get the port number associate to a GDD sync. -+ * @sync - The sync mask from where to deduce the port. -+ * -+ * There is not masking scheme that can retrieve easily the port number -+ * from the sync value. TI spec made our live harder by for example -+ * associating 0x18 and 0x08 values to different ports :( -+ * -+ * Return port number (1 or 2) associated to the sync mask. -+ */ -+static inline unsigned int ssi_get_sync_port(unsigned int sync) -+{ -+ return (((sync > 0x00) && (sync < 0x09)) || -+ ((sync > 0x0f) && (sync < 0x18))) ? 1 : 2; -+} -+ -+/** -+ * ssi_get_free_lch - Get a free GDD(DMA)logical channel -+ * @ssi_ctrl- SSI controller of the GDD. -+ * -+ * Needs to be called holding the ssi_controller lock -+ * -+ * Return a free logical channel number. If there is no free lch -+ * then returns an out of range value -+ */ -+static unsigned int ssi_get_free_lch(struct ssi_dev *ssi_ctrl) -+{ -+ unsigned int enable_reg; -+ unsigned int i; -+ unsigned int lch = ssi_ctrl->last_gdd_lch; -+ -+ enable_reg = ssi_inl(ssi_ctrl->base, SSI_SYS_GDD_MPU_IRQ_ENABLE_REG); -+ for (i = 1; i <= SSI_NUM_LCH; i++) { -+ lch = (lch + i) & (SSI_NUM_LCH - 1); -+ if (!(enable_reg & SSI_GDD_LCH(lch))) { -+ ssi_ctrl->last_gdd_lch = lch; -+ return lch; -+ } -+ } -+ -+ return lch; -+} -+ -+/** -+ * ssi_driver_write_dma - Program GDD [DMA] to write data from memory to -+ * the ssi channel buffer. -+ * @ssi_channel - pointer to the ssi_channel to write data to. -+ * @data - 32-bit word pointer to the data. -+ * @size - Number of 32bit words to be transfered. -+ * -+ * ssi_controller lock must be held before calling this function. -+ * -+ * Return 0 on success and < 0 on error. -+ */ -+int ssi_driver_write_dma(struct ssi_channel *ssi_channel, u32 *data, -+ unsigned int size) -+{ -+ struct ssi_dev *ssi_ctrl = ssi_channel->ssi_port->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ struct ssi_platform_data *pdata = ssi_ctrl->dev->platform_data; -+ unsigned int port = ssi_channel->ssi_port->port_number; -+ unsigned int channel = ssi_channel->channel_number; -+ unsigned int sync; -+ int lch; -+ dma_addr_t dma_data; -+ dma_addr_t s_addr; -+ u16 tmp; -+ -+ if ((size < 1) || (data == NULL)) -+ return -EINVAL; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ lch = ssi_get_free_lch(ssi_ctrl); -+ if (lch >= SSI_NUM_LCH) { -+ dev_err(ssi_ctrl->dev, "No free GDD logical " -+ "channels.\n"); -+ clk_disable(ssi_ctrl->ssi_clk); -+ return -EBUSY; /* No free GDD logical channels. */ -+ } -+ -+ if ((pdata->set_min_bus_tput) && (ssi_ctrl->gdd_usecount++ == 0)) -+ pdata->set_min_bus_tput(ssi_ctrl->dev, OCP_INITIATOR_AGENT, -+ SSI_L3_TPUT); -+ /* NOTE: Gettting a free gdd logical channel and -+ * reserve it must be done atomicaly. */ -+ ssi_channel->write_data.lch = lch; -+ -+ sync = ssi_sync_table[SSI_SYNC_WRITE][port - 1][channel]; -+ dma_data = dma_map_single(ssi_ctrl->dev, data, size * 4, -+ DMA_TO_DEVICE); -+ -+ tmp = SSI_SRC_SINGLE_ACCESS0 | -+ SSI_SRC_MEMORY_PORT | -+ SSI_DST_SINGLE_ACCESS0 | -+ SSI_DST_PERIPHERAL_PORT | -+ SSI_DATA_TYPE_S32; -+ ssi_outw(tmp, base, SSI_GDD_CSDP_REG(lch)); -+ -+ tmp = SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | sync; -+ ssi_outw(tmp, base, SSI_GDD_CCR_REG(lch)); -+ -+ ssi_outw((SSI_BLOCK_IE | SSI_TOUT_IE), base, SSI_GDD_CICR_REG(lch)); -+ -+ s_addr = (dma_addr_t)io_v2p(base + -+ SSI_SST_BUFFER_CH_REG(port, channel)); -+ ssi_outl(s_addr, base, SSI_GDD_CDSA_REG(lch)); -+ -+ ssi_outl(dma_data, base, SSI_GDD_CSSA_REG(lch)); -+ ssi_outw(size, base, SSI_GDD_CEN_REG(lch)); -+ -+ ssi_outl_or(SSI_GDD_LCH(lch), base, SSI_SYS_GDD_MPU_IRQ_ENABLE_REG); -+ ssi_outw_or(SSI_CCR_ENABLE, base, SSI_GDD_CCR_REG(lch)); -+ -+ return 0; -+} -+ -+/** -+ * ssi_driver_read_dma - Program GDD [DMA] to write data to memory from -+ * the ssi channel buffer. -+ * @ssi_channel - pointer to the ssi_channel to read data from. -+ * @data - 32-bit word pointer where to store the incoming data. -+ * @size - Number of 32bit words to be transfered to the buffer. -+ * -+ * ssi_controller lock must be held before calling this function. -+ * -+ * Return 0 on success and < 0 on error. -+ */ -+int ssi_driver_read_dma(struct ssi_channel *ssi_channel, u32 *data, -+ unsigned int count) -+{ -+ struct ssi_dev *ssi_ctrl = ssi_channel->ssi_port->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ struct ssi_platform_data *pdata = ssi_ctrl->dev->platform_data; -+ unsigned int port = ssi_channel->ssi_port->port_number; -+ unsigned int channel = ssi_channel->channel_number; -+ unsigned int sync; -+ unsigned int lch; -+ dma_addr_t dma_data; -+ dma_addr_t d_addr; -+ u16 tmp; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ lch = ssi_get_free_lch(ssi_ctrl); -+ if (lch >= SSI_NUM_LCH) { -+ dev_err(ssi_ctrl->dev, "No free GDD logical channels.\n"); -+ clk_disable(ssi_ctrl->ssi_clk); -+ return -EBUSY; /* No free GDD logical channels. */ -+ } -+ if ((pdata->set_min_bus_tput) && (ssi_ctrl->gdd_usecount++ == 0)) -+ pdata->set_min_bus_tput(ssi_ctrl->dev, OCP_INITIATOR_AGENT, -+ SSI_L3_TPUT); -+ /* -+ * NOTE: Gettting a free gdd logical channel and -+ * reserve it must be done atomicaly. -+ */ -+ ssi_channel->read_data.lch = lch; -+ -+ sync = ssi_sync_table[SSI_SYNC_READ][port - 1][channel]; -+ -+ dma_data = dma_map_single(ssi_ctrl->dev, data, count * 4, -+ DMA_FROM_DEVICE); -+ -+ tmp = SSI_DST_SINGLE_ACCESS0 | -+ SSI_DST_MEMORY_PORT | -+ SSI_SRC_SINGLE_ACCESS0 | -+ SSI_SRC_PERIPHERAL_PORT | -+ SSI_DATA_TYPE_S32; -+ ssi_outw(tmp, base, SSI_GDD_CSDP_REG(lch)); -+ -+ tmp = SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | sync; -+ ssi_outw(tmp, base, SSI_GDD_CCR_REG(lch)); -+ -+ ssi_outw((SSI_BLOCK_IE | SSI_TOUT_IE), base, SSI_GDD_CICR_REG(lch)); -+ -+ d_addr = (dma_addr_t)io_v2p(base + -+ SSI_SSR_BUFFER_CH_REG(port, channel)); -+ ssi_outl(d_addr, base, SSI_GDD_CSSA_REG(lch)); -+ -+ ssi_outl(dma_data, base, SSI_GDD_CDSA_REG(lch)); -+ ssi_outw(count, base, SSI_GDD_CEN_REG(lch)); -+ -+ ssi_outl_or(SSI_GDD_LCH(lch), base, SSI_SYS_GDD_MPU_IRQ_ENABLE_REG); -+ ssi_outw_or(SSI_CCR_ENABLE, base, SSI_GDD_CCR_REG(lch)); -+ -+ return 0; -+} -+ -+void ssi_driver_cancel_write_dma(struct ssi_channel *ssi_ch) -+{ -+ int lch = ssi_ch->write_data.lch; -+ unsigned int port = ssi_ch->ssi_port->port_number; -+ unsigned int channel = ssi_ch->channel_number; -+ struct ssi_dev *ssi_ctrl = ssi_ch->ssi_port->ssi_controller; -+ struct ssi_platform_data *pdata = ssi_ctrl->dev->platform_data; -+ u32 ccr; -+ -+ if (lch < 0) -+ return; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ ccr = ssi_inw(ssi_ctrl->base, SSI_GDD_CCR_REG(lch)); -+ if (!(ccr & SSI_CCR_ENABLE)) { -+ dev_dbg(&ssi_ch->dev->device, LOG_NAME "Write cancel on not " -+ "enabled logical channel %d CCR REG 0x%08X\n", lch, ccr); -+ clk_disable(ssi_ctrl->ssi_clk); -+ return; -+ } -+ -+ if ((pdata->set_min_bus_tput) && (--ssi_ctrl->gdd_usecount == 0)) -+ pdata->set_min_bus_tput(ssi_ctrl->dev, OCP_INITIATOR_AGENT, 0); -+ -+ ssi_outw_and(~SSI_CCR_ENABLE, ssi_ctrl->base, SSI_GDD_CCR_REG(lch)); -+ ssi_outl_and(~SSI_GDD_LCH(lch), ssi_ctrl->base, -+ SSI_SYS_GDD_MPU_IRQ_ENABLE_REG); -+ ssi_outl(SSI_GDD_LCH(lch), ssi_ctrl->base, -+ SSI_SYS_GDD_MPU_IRQ_STATUS_REG); -+ -+ ssi_outl_and(~NOTFULL(channel), ssi_ctrl->base, -+ SSI_SST_BUFSTATE_REG(port)); -+ -+ ssi_reset_ch_write(ssi_ch); -+ clk_disable(ssi_ctrl->ssi_clk); -+ clk_disable(ssi_ctrl->ssi_clk); -+} -+ -+void ssi_driver_cancel_read_dma(struct ssi_channel *ssi_ch) -+{ -+ int lch = ssi_ch->read_data.lch; -+ struct ssi_dev *ssi_ctrl = ssi_ch->ssi_port->ssi_controller; -+ struct ssi_platform_data *pdata = ssi_ctrl->dev->platform_data; -+ unsigned int port = ssi_ch->ssi_port->port_number; -+ unsigned int channel = ssi_ch->channel_number; -+ u32 reg; -+ -+ if (lch < 0) -+ return; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ reg = ssi_inw(ssi_ctrl->base, SSI_GDD_CCR_REG(lch)); -+ if (!(reg & SSI_CCR_ENABLE)) { -+ dev_dbg(&ssi_ch->dev->device, LOG_NAME "Read cancel on not " -+ "enable logical channel %d CCR REG 0x%08X\n", lch, reg); -+ clk_disable(ssi_ctrl->ssi_clk); -+ return; -+ } -+ -+ if ((pdata->set_min_bus_tput) && (--ssi_ctrl->gdd_usecount == 0)) -+ pdata->set_min_bus_tput(ssi_ctrl->dev, OCP_INITIATOR_AGENT, 0); -+ -+ ssi_outw_and(~SSI_CCR_ENABLE, ssi_ctrl->base, SSI_GDD_CCR_REG(lch)); -+ ssi_outl_and(~SSI_GDD_LCH(lch), ssi_ctrl->base, -+ SSI_SYS_GDD_MPU_IRQ_ENABLE_REG); -+ ssi_outl(SSI_GDD_LCH(lch), ssi_ctrl->base, -+ SSI_SYS_GDD_MPU_IRQ_STATUS_REG); -+ -+ ssi_outl_and(~NOTEMPTY(channel), ssi_ctrl->base, -+ SSI_SSR_BUFSTATE_REG(port)); -+ -+ ssi_reset_ch_read(ssi_ch); -+ clk_disable(ssi_ctrl->ssi_clk); -+ clk_disable(ssi_ctrl->ssi_clk); -+} -+ -+static void do_ssi_gdd_lch(struct ssi_dev *ssi_ctrl, unsigned int gdd_lch) -+{ -+ struct ssi_platform_data *pdata = ssi_ctrl->dev->platform_data; -+ void __iomem *base = ssi_ctrl->base; -+ struct ssi_channel *ch; -+ unsigned int port; -+ unsigned int channel; -+ u32 sync; -+ u32 gdd_csr; -+ dma_addr_t dma_h; -+ size_t size; -+ -+ sync = ssi_inw(base, SSI_GDD_CCR_REG(gdd_lch)) & SSI_CCR_SYNC_MASK; -+ port = ssi_get_sync_port(sync); -+ -+ spin_lock(&ssi_ctrl->lock); -+ -+ ssi_outl_and(~SSI_GDD_LCH(gdd_lch), base, -+ SSI_SYS_GDD_MPU_IRQ_ENABLE_REG); -+ gdd_csr = ssi_inw(base, SSI_GDD_CSR_REG(gdd_lch)); -+ -+ if (!(gdd_csr & SSI_CSR_TOUR)) { -+ if (sync & 0x10) { /* Read path */ -+ channel = sync & 0x7; -+ dma_h = ssi_inl(base, SSI_GDD_CDSA_REG(gdd_lch)); -+ size = ssi_inw(base, SSI_GDD_CEN_REG(gdd_lch)) * 4; -+ dma_sync_single(ssi_ctrl->dev, dma_h, size, -+ DMA_FROM_DEVICE); -+ dma_unmap_single(ssi_ctrl->dev, dma_h, size, -+ DMA_FROM_DEVICE); -+ ch = ctrl_get_ch(ssi_ctrl, port, channel); -+ ssi_reset_ch_read(ch); -+ spin_unlock(&ssi_ctrl->lock); -+ ch->read_done(ch->dev); -+ } else { -+ channel = (sync - 1) & 0x7; -+ dma_h = ssi_inl(base, SSI_GDD_CSSA_REG(gdd_lch)); -+ size = ssi_inw(base, SSI_GDD_CEN_REG(gdd_lch)) * 4; -+ dma_unmap_single(ssi_ctrl->dev, dma_h, size, -+ DMA_TO_DEVICE); -+ ch = ctrl_get_ch(ssi_ctrl, port, channel); -+ ssi_reset_ch_write(ch); -+ spin_unlock(&ssi_ctrl->lock); -+ ch->write_done(ch->dev); -+ } -+ } else { -+ dev_err(ssi_ctrl->dev, "Error on GDD transfer " -+ "on gdd channel %d sync %d\n", gdd_lch, sync); -+ spin_unlock(&ssi_ctrl->lock); -+ ssi_port_event_handler(&ssi_ctrl->ssi_port[port - 1], -+ SSI_EVENT_ERROR, NULL); -+ } -+ -+ if ((pdata->set_min_bus_tput) && (--ssi_ctrl->gdd_usecount == 0)) -+ pdata->set_min_bus_tput(ssi_ctrl->dev, OCP_INITIATOR_AGENT, 0); -+ /* Decrease clk usecount which was increased in -+ * ssi_driver_{read,write}_dma() */ -+ clk_disable(ssi_ctrl->ssi_clk); -+} -+ -+static void do_ssi_gdd_tasklet(unsigned long device) -+{ -+ struct ssi_dev *ssi_ctrl = (struct ssi_dev *)device; -+ void __iomem *base = ssi_ctrl->base; -+ unsigned int gdd_lch = 0; -+ u32 status_reg = 0; -+ u32 lch_served = 0; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ status_reg = ssi_inl(base, SSI_SYS_GDD_MPU_IRQ_STATUS_REG); -+ -+ for (gdd_lch = 0; gdd_lch < SSI_NUM_LCH; gdd_lch++) { -+ if (status_reg & SSI_GDD_LCH(gdd_lch)) { -+ do_ssi_gdd_lch(ssi_ctrl, gdd_lch); -+ lch_served |= SSI_GDD_LCH(gdd_lch); -+ } -+ } -+ -+ ssi_outl(lch_served, base, SSI_SYS_GDD_MPU_IRQ_STATUS_REG); -+ -+ status_reg = ssi_inl(base, SSI_SYS_GDD_MPU_IRQ_STATUS_REG); -+ clk_disable(ssi_ctrl->ssi_clk); -+ -+ if (status_reg) -+ tasklet_hi_schedule(&ssi_ctrl->ssi_gdd_tasklet); -+ else -+ enable_irq(ssi_ctrl->gdd_irq); -+} -+ -+static irqreturn_t ssi_gdd_mpu_handler(int irq, void *ssi_controller) -+{ -+ struct ssi_dev *ssi_ctrl = ssi_controller; -+ -+ tasklet_hi_schedule(&ssi_ctrl->ssi_gdd_tasklet); -+ disable_irq_nosync(ssi_ctrl->gdd_irq); -+ -+ return IRQ_HANDLED; -+} -+ -+int __init ssi_gdd_init(struct ssi_dev *ssi_ctrl, const char *irq_name) -+{ -+ tasklet_init(&ssi_ctrl->ssi_gdd_tasklet, do_ssi_gdd_tasklet, -+ (unsigned long)ssi_ctrl); -+ if (request_irq(ssi_ctrl->gdd_irq, ssi_gdd_mpu_handler, IRQF_DISABLED, -+ irq_name, ssi_ctrl) < 0) { -+ dev_err(ssi_ctrl->dev, "FAILED to request GDD IRQ %d", -+ ssi_ctrl->gdd_irq); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+void ssi_gdd_exit(struct ssi_dev *ssi_ctrl) -+{ -+ tasklet_disable(&ssi_ctrl->ssi_gdd_tasklet); -+ free_irq(ssi_ctrl->gdd_irq, ssi_ctrl); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_gpio.c linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_gpio.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_gpio.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_gpio.c 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,81 @@ -+/* -+ * ssi_driver_gpio.c -+ * -+ * Implements SSI GPIO related functionality. (i.e: wake lines management) -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+#include -+#include "ssi_driver.h" -+ -+static void do_ssi_cawake_tasklet(unsigned long ssi_p) -+{ -+ struct ssi_port *port = (struct ssi_port *)ssi_p; -+ struct ssi_dev *ssi_ctrl = port->ssi_controller; -+ -+ if (ssi_cawake(port)) { -+ if (!ssi_ctrl->cawake_clk_enable) { -+ ssi_ctrl->cawake_clk_enable = 1; -+ clk_enable(ssi_ctrl->ssi_clk); -+ } -+ ssi_port_event_handler(port, SSI_EVENT_CAWAKE_UP, NULL); -+ } else { -+ ssi_port_event_handler(port, SSI_EVENT_CAWAKE_DOWN, NULL); -+ if (ssi_ctrl->cawake_clk_enable) { -+ ssi_ctrl->cawake_clk_enable = 0; -+ clk_disable(ssi_ctrl->ssi_clk); -+ } -+ } -+} -+ -+static irqreturn_t ssi_cawake_isr(int irq, void *ssi_p) -+{ -+ struct ssi_port *port = ssi_p; -+ -+ tasklet_hi_schedule(&port->cawake_tasklet); -+ -+ return IRQ_HANDLED; -+} -+ -+int __init ssi_cawake_init(struct ssi_port *port, const char *irq_name) -+{ -+ tasklet_init(&port->cawake_tasklet, do_ssi_cawake_tasklet, -+ (unsigned long)port); -+ if (request_irq(port->cawake_gpio_irq, ssi_cawake_isr, -+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, -+ irq_name, port) < 0) { -+ dev_err(port->ssi_controller->dev, -+ "FAILED to request %s GPIO IRQ %d on port %d\n", -+ irq_name, port->cawake_gpio_irq, port->port_number); -+ return -EBUSY; -+ } -+ enable_irq_wake(port->cawake_gpio_irq); -+ -+ return 0; -+} -+ -+void ssi_cawake_exit(struct ssi_port *port) -+{ -+ if (port->cawake_gpio < 0) -+ return; /* Nothing to do */ -+ -+ disable_irq_wake(port->cawake_gpio_irq); -+ tasklet_kill(&port->cawake_tasklet); -+ free_irq(port->cawake_gpio_irq, port); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver.h linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver.h ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver.h 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,273 @@ -+/* -+ * ssi_driver.h -+ * -+ * Header file for the SSI driver low level interface. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#ifndef __SSI_DRIVER_H__ -+#define __SSI_DRIVER_H__ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+/* Channel states */ -+#define SSI_CH_OPEN 0x01 -+#define SSI_CH_RX_POLL 0x10 -+ -+/* -+ * The number of channels to use by the driver in the ports, or the highest -+ * port channel number (+1) used. (MAX:8) -+ */ -+#define SSI_PORT_MAX_CH 4 -+/* Number of logical channel in GDD */ -+#define SSI_NUM_LCH 8 -+ -+#define LOG_NAME "OMAP SSI: " -+ -+/** -+ * struct ssi_data - SSI buffer descriptor -+ * @addr: pointer to the buffer where to send or receive data -+ * @size: size in words (32 bits) of the buffer -+ * @lch: associated GDD (DMA) logical channel number, if any -+ */ -+struct ssi_data { -+ u32 *addr; -+ unsigned int size; -+ int lch; -+}; -+ -+/** -+ * struct ssi_channel - SSI channel data -+ * @read_data: Incoming SSI buffer descriptor -+ * @write_data: Outgoing SSI buffer descriptor -+ * @ssi_port: Reference to port where the channel belongs to -+ * @flags: Tracks if channel has been open FIXME -+ * @channel_number: SSI channel number -+ * @rw_lock: Read/Write lock to serialize access to callback and ssi_device -+ * @dev: Reference to the associated ssi_device channel -+ * @write_done: Callback to signal TX completed. -+ * @read_done: Callback to signal RX completed. -+ * @port_event: Callback to signal port events (RX Error, HWBREAK, CAWAKE ...) -+ */ -+struct ssi_channel { -+ struct ssi_data read_data; -+ struct ssi_data write_data; -+ struct ssi_port *ssi_port; -+ u8 flags; -+ u8 channel_number; -+ rwlock_t rw_lock; -+ struct ssi_device *dev; -+ void (*write_done) (struct ssi_device *dev); -+ void (*read_done) (struct ssi_device *dev); -+ void (*port_event)(struct ssi_device *dev, unsigned int event, -+ void *arg); -+}; -+ -+/** -+ * struct ssi_port - ssi port driver data -+ * @ssi_channel: Array of channels in the port -+ * @ssi_controller: Reference to the SSI controller -+ * @port_number: port number -+ * @max_ch: maximum number of channels enabled in the port -+ * @n_irq: SSI irq line use to handle interrupts (0 or 1) -+ * @irq: IRQ number -+ * @cawake_gpio: GPIO number for cawake line (-1 if none) -+ * @cawake_gpio_irq: IRQ number for cawake gpio events -+ * @lock: Serialize access to the port registers and internal data -+ * @ssi_tasklet: Bottom half for interrupts -+ * @cawake_tasklet: Bottom half for cawake events -+ */ -+struct ssi_port { -+ struct ssi_channel ssi_channel[SSI_PORT_MAX_CH]; -+ struct ssi_dev *ssi_controller; -+ u8 flags; -+ u8 port_number; -+ u8 max_ch; -+ u8 n_irq; -+ int irq; -+ int cawake_gpio; -+ int cawake_gpio_irq; -+ spinlock_t lock; -+ struct tasklet_struct ssi_tasklet; -+ struct tasklet_struct cawake_tasklet; -+}; -+ -+/** -+ * struct ssi_dev - ssi controller driver data -+ * @ssi_port: Array of ssi ports enabled in the controller -+ * @id: SSI controller platform id number -+ * @max_p: Number of ports enabled in the controller -+ * @ssi_clk: Reference to the SSI custom clock -+ * @base: SSI registers base virtual address -+ * @lock: Serializes access to internal data and regs -+ * @cawake_clk_enable: Tracks if a cawake event has enable the clocks -+ * @gdd_irq: GDD (DMA) irq number -+ * @gdd_usecount: Holds the number of ongoning DMA transfers -+ * @last_gdd_lch: Last used GDD logical channel -+ * @set_min_bus_tput: (PM) callback to set minimun bus throuput -+ * @clk_notifier_register: (PM) callabck for DVFS support -+ * @clk_notifier_unregister: (PM) callabck for DVFS support -+ * @ssi_nb: (PM) Notification block for DVFS notification chain -+ * @ssi_gdd_tasklet: Bottom half for DMA transfers -+ * @dir: debugfs base directory -+ * @dev: Reference to the SSI platform device -+ */ -+struct ssi_dev { -+ struct ssi_port ssi_port[SSI_MAX_PORTS]; -+ int id; -+ u8 max_p; -+ struct clk *ssi_clk; -+ void __iomem *base; -+ spinlock_t lock; -+ unsigned int cawake_clk_enable:1; -+ int gdd_irq; -+ unsigned int gdd_usecount; -+ unsigned int last_gdd_lch; -+ void (*set_min_bus_tput)(struct device *dev, u8 agent_id, -+ unsigned long r); -+ int (*clk_notifier_register)(struct clk *clk, -+ struct notifier_block *nb); -+ int (*clk_notifier_unregister)(struct clk *clk, -+ struct notifier_block *nb); -+ struct notifier_block ssi_nb; -+ struct tasklet_struct ssi_gdd_tasklet; -+#ifdef CONFIG_DEBUG_FS -+ struct dentry *dir; -+#endif -+ struct device *dev; -+}; -+ -+/* SSI Bus */ -+extern struct bus_type ssi_bus_type; -+ -+int ssi_port_event_handler(struct ssi_port *p, unsigned int event, void *arg); -+int ssi_bus_init(void); -+void ssi_bus_exit(void); -+/* End SSI Bus */ -+ -+void ssi_reset_ch_read(struct ssi_channel *ch); -+void ssi_reset_ch_write(struct ssi_channel *ch); -+ -+int ssi_driver_read_interrupt(struct ssi_channel *ssi_channel, u32 *data); -+int ssi_driver_write_interrupt(struct ssi_channel *ssi_channel, u32 *data); -+int ssi_driver_read_dma(struct ssi_channel *ssi_channel, u32 *data, -+ unsigned int count); -+int ssi_driver_write_dma(struct ssi_channel *ssi_channel, u32 *data, -+ unsigned int count); -+ -+void ssi_driver_cancel_write_interrupt(struct ssi_channel *ch); -+void ssi_driver_cancel_read_interrupt(struct ssi_channel *ch); -+void ssi_driver_cancel_write_dma(struct ssi_channel *ch); -+void ssi_driver_cancel_read_dma(struct ssi_channel *ch); -+ -+int ssi_mpu_init(struct ssi_port *ssi_p, const char *irq_name); -+void ssi_mpu_exit(struct ssi_port *ssi_p); -+ -+int ssi_gdd_init(struct ssi_dev *ssi_ctrl, const char *irq_name); -+void ssi_gdd_exit(struct ssi_dev *ssi_ctrl); -+ -+int ssi_cawake_init(struct ssi_port *port, const char *irq_name); -+void ssi_cawake_exit(struct ssi_port *port); -+ -+ -+#ifdef CONFIG_DEBUG_FS -+int ssi_debug_init(void); -+void ssi_debug_exit(void); -+int ssi_debug_add_ctrl(struct ssi_dev *ssi_ctrl); -+void ssi_debug_remove_ctrl(struct ssi_dev *ssi_ctrl); -+#else -+#define ssi_debug_add_ctrl(ssi_ctrl) 0 -+#define ssi_debug_remove_ctrl(ssi_ctrl) -+#define ssi_debug_init() 0 -+#define ssi_debug_exit() -+#endif /* CONFIG_DEBUG_FS */ -+ -+static inline unsigned int ssi_cawake(struct ssi_port *port) -+{ -+ return gpio_get_value(port->cawake_gpio); -+} -+ -+static inline struct ssi_channel *ctrl_get_ch(struct ssi_dev *ssi_ctrl, -+ unsigned int port, unsigned int channel) -+{ -+ return &ssi_ctrl->ssi_port[port - 1].ssi_channel[channel]; -+} -+ -+/* SSI IO access */ -+static inline u32 ssi_inl(void __iomem *base, u32 offset) -+{ -+ return inl(base + offset); -+} -+ -+static inline void ssi_outl(u32 data, void __iomem *base, u32 offset) -+{ -+ outl(data, base + offset); -+} -+ -+static inline void ssi_outl_or(u32 data, void __iomem *base, u32 offset) -+{ -+ u32 tmp = ssi_inl(base, offset); -+ ssi_outl((tmp | data), base, offset); -+} -+ -+static inline void ssi_outl_and(u32 data, void __iomem *base, u32 offset) -+{ -+ u32 tmp = ssi_inl(base, offset); -+ ssi_outl((tmp & data), base, offset); -+} -+ -+static inline u16 ssi_inw(void __iomem *base, u32 offset) -+{ -+ return inw(base + offset); -+} -+ -+static inline void ssi_outw(u16 data, void __iomem *base, u32 offset) -+{ -+ outw(data, base + offset); -+} -+ -+static inline void ssi_outw_or(u16 data, void __iomem *base, u32 offset) -+{ -+ u16 tmp = ssi_inw(base, offset); -+ ssi_outw((tmp | data), base, offset); -+} -+ -+static inline void ssi_outw_and(u16 data, void __iomem *base, u32 offset) -+{ -+ u16 tmp = ssi_inw(base, offset); -+ ssi_outw((tmp & data), base, offset); -+} -+ -+#endif /* __SSI_DRIVER_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_if.c linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_if.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_if.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_if.c 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,572 @@ -+/* -+ * ssi_driver_if.c -+ * -+ * Implements SSI hardware driver interfaces for the upper layers. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include "ssi_driver.h" -+ -+#define NOT_SET (-1) -+ -+int ssi_set_rx(struct ssi_port *sport, struct ssr_ctx *cfg) -+{ -+ struct ssi_dev *ssi_ctrl = sport->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ int port = sport->port_number; -+ -+ if ((cfg->mode != SSI_MODE_STREAM) && -+ (cfg->mode != SSI_MODE_FRAME) && -+ (cfg->mode != SSI_MODE_SLEEP) && -+ (cfg->mode != NOT_SET)) -+ return -EINVAL; -+ -+ if ((cfg->frame_size > SSI_MAX_FRAME_SIZE) && -+ (cfg->frame_size != NOT_SET)) -+ return -EINVAL; -+ -+ if ((cfg->channels == 0) || -+ ((cfg->channels > SSI_CHANNELS_DEFAULT) && -+ (cfg->channels != NOT_SET))) -+ return -EINVAL; -+ -+ if ((cfg->timeout > SSI_MAX_RX_TIMEOUT) && (cfg->timeout != NOT_SET)) -+ return -EINVAL; -+ -+ if (cfg->mode != NOT_SET) -+ ssi_outl(cfg->mode, base, SSI_SSR_MODE_REG(port)); -+ -+ if (cfg->frame_size != NOT_SET) -+ ssi_outl(cfg->frame_size, base, SSI_SSR_FRAMESIZE_REG(port)); -+ -+ if (cfg->channels != NOT_SET) { -+ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels) -+ return -EINVAL; -+ else -+ ssi_outl(cfg->channels, base, -+ SSI_SSR_CHANNELS_REG(port)); -+ } -+ -+ if (cfg->timeout != NOT_SET) -+ ssi_outl(cfg->timeout, base, SSI_SSR_TIMEOUT_REG(port)); -+ -+ return 0; -+} -+ -+void ssi_get_rx(struct ssi_port *sport, struct ssr_ctx *cfg) -+{ -+ struct ssi_dev *ssi_ctrl = sport->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ int port = sport->port_number; -+ -+ cfg->mode = ssi_inl(base, SSI_SSR_MODE_REG(port)); -+ cfg->frame_size = ssi_inl(base, SSI_SSR_FRAMESIZE_REG(port)); -+ cfg->channels = ssi_inl(base, SSI_SSR_CHANNELS_REG(port)); -+ cfg->timeout = ssi_inl(base, SSI_SSR_TIMEOUT_REG(port)); -+} -+ -+int ssi_set_tx(struct ssi_port *sport, struct sst_ctx *cfg) -+{ -+ struct ssi_dev *ssi_ctrl = sport->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ int port = sport->port_number; -+ -+ if ((cfg->mode != SSI_MODE_STREAM) && -+ (cfg->mode != SSI_MODE_FRAME) && -+ (cfg->mode != NOT_SET)) -+ return -EINVAL; -+ -+ if ((cfg->frame_size > SSI_MAX_FRAME_SIZE) && -+ (cfg->frame_size != NOT_SET)) -+ return -EINVAL; -+ -+ if ((cfg->channels == 0) || -+ ((cfg->channels > SSI_CHANNELS_DEFAULT) && -+ (cfg->channels != NOT_SET))) -+ return -EINVAL; -+ -+ if ((cfg->divisor > SSI_MAX_TX_DIVISOR) && (cfg->divisor != NOT_SET)) -+ return -EINVAL; -+ -+ if ((cfg->arb_mode != SSI_ARBMODE_ROUNDROBIN) && -+ (cfg->arb_mode != SSI_ARBMODE_PRIORITY) && -+ (cfg->mode != NOT_SET)) -+ return -EINVAL; -+ -+ if (cfg->mode != NOT_SET) -+ ssi_outl(cfg->channels, base, SSI_SST_CHANNELS_REG(port)); -+ -+ if (cfg->frame_size != NOT_SET) -+ ssi_outl(cfg->frame_size, base, SSI_SST_FRAMESIZE_REG(port)); -+ -+ if (cfg->channels != NOT_SET) { -+ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels) -+ return -EINVAL; -+ else -+ ssi_outl(cfg->mode, base, SSI_SST_MODE_REG(port)); -+ } -+ -+ if (cfg->divisor != NOT_SET) -+ ssi_outl(cfg->divisor, base, SSI_SST_DIVISOR_REG(port)); -+ -+ if (cfg->arb_mode != NOT_SET) -+ ssi_outl(cfg->arb_mode, base, SSI_SST_ARBMODE_REG(port)); -+ -+ return 0; -+} -+ -+void ssi_get_tx(struct ssi_port *sport, struct sst_ctx *cfg) -+{ -+ struct ssi_dev *ssi_ctrl = sport->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ int port = sport->port_number; -+ -+ cfg->mode = ssi_inl(base, SSI_SST_MODE_REG(port)); -+ cfg->frame_size = ssi_inl(base, SSI_SST_FRAMESIZE_REG(port)); -+ cfg->channels = ssi_inl(base, SSI_SST_CHANNELS_REG(port)); -+ cfg->divisor = ssi_inl(base, SSI_SST_DIVISOR_REG(port)); -+ cfg->arb_mode = ssi_inl(base, SSI_SST_ARBMODE_REG(port)); -+} -+ -+/** -+ * ssi_open - open a ssi device channel. -+ * @dev - Reference to the ssi device channel to be openned. -+ * -+ * Returns 0 on success, -EINVAL on bad parameters, -EBUSY if is already opened. -+ */ -+int ssi_open(struct ssi_device *dev) -+{ -+ struct ssi_channel *ch; -+ struct ssi_port *port; -+ struct ssi_dev *ssi_ctrl; -+ -+ if (!dev || !dev->ch) { -+ pr_err(LOG_NAME "Wrong SSI device %p\n", dev); -+ return -EINVAL; -+ } -+ -+ ch = dev->ch; -+ if (!ch->read_done || !ch->write_done) { -+ dev_err(&dev->device, "Trying to open with no (read/write) " -+ "callbacks registered\n"); -+ return -EINVAL; -+ } -+ port = ch->ssi_port; -+ ssi_ctrl = port->ssi_controller; -+ spin_lock_bh(&ssi_ctrl->lock); -+ if (ch->flags & SSI_CH_OPEN) { -+ dev_err(&dev->device, "Port %d Channel %d already OPENED\n", -+ dev->n_p, dev->n_ch); -+ spin_unlock_bh(&ssi_ctrl->lock); -+ return -EBUSY; -+ } -+ clk_enable(ssi_ctrl->ssi_clk); -+ ch->flags |= SSI_CH_OPEN; -+ ssi_outl_or(SSI_ERROROCCURED | SSI_BREAKDETECTED, ssi_ctrl->base, -+ SSI_SYS_MPU_ENABLE_REG(port->port_number, port->n_irq)); -+ clk_disable(ssi_ctrl->ssi_clk); -+ spin_unlock_bh(&ssi_ctrl->lock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(ssi_open); -+ -+/** -+ * ssi_write - write data into the ssi device channel -+ * @dev - reference to the ssi device channel to write into. -+ * @data - pointer to a 32-bit word data to be written. -+ * @count - number of 32-bit word to be written. -+ * -+ * Return 0 on sucess, a negative value on failure. -+ * A success values only indicates that the request has been accepted. -+ * Transfer is only completed when the write_done callback is called. -+ * -+ */ -+int ssi_write(struct ssi_device *dev, u32 *data, unsigned int count) -+{ -+ struct ssi_channel *ch; -+ int err; -+ -+ if (unlikely(!dev || !dev->ch || !data || (count <= 0))) { -+ dev_err(&dev->device, "Wrong paramenters " -+ "ssi_device %p data %p count %d", dev, data, count); -+ return -EINVAL; -+ } -+ if (unlikely(!(dev->ch->flags & SSI_CH_OPEN))) { -+ dev_err(&dev->device, "SSI device NOT open\n"); -+ return -EINVAL; -+ } -+ -+ ch = dev->ch; -+ spin_lock_bh(&ch->ssi_port->ssi_controller->lock); -+ ch->write_data.addr = data; -+ ch->write_data.size = count; -+ -+ if (count == 1) -+ err = ssi_driver_write_interrupt(ch, data); -+ else -+ err = ssi_driver_write_dma(ch, data, count); -+ -+ if (unlikely(err < 0)) { -+ ch->write_data.addr = NULL; -+ ch->write_data.size = 0; -+ } -+ spin_unlock_bh(&ch->ssi_port->ssi_controller->lock); -+ -+ return err; -+ -+} -+EXPORT_SYMBOL(ssi_write); -+ -+/** -+ * ssi_read - read data from the ssi device channel -+ * @dev - ssi device channel reference to read data from. -+ * @data - pointer to a 32-bit word data to store the data. -+ * @count - number of 32-bit word to be stored. -+ * -+ * Return 0 on sucess, a negative value on failure. -+ * A success values only indicates that the request has been accepted. -+ * Data is only available in the buffer when the read_done callback is called. -+ * -+ */ -+int ssi_read(struct ssi_device *dev, u32 *data, unsigned int count) -+{ -+ struct ssi_channel *ch; -+ int err; -+ -+ if (unlikely(!dev || !dev->ch || !data || (count <= 0))) { -+ dev_err(&dev->device, "Wrong paramenters " -+ "ssi_device %p data %p count %d", dev, data, count); -+ return -EINVAL; -+ } -+ if (unlikely(!(dev->ch->flags & SSI_CH_OPEN))) { -+ dev_err(&dev->device, "SSI device NOT open\n"); -+ return -EINVAL; -+ } -+ -+ ch = dev->ch; -+ spin_lock_bh(&ch->ssi_port->ssi_controller->lock); -+ ch->read_data.addr = data; -+ ch->read_data.size = count; -+ -+ if (count == 1) -+ err = ssi_driver_read_interrupt(ch, data); -+ else -+ err = ssi_driver_read_dma(ch, data, count); -+ -+ if (unlikely(err < 0)) { -+ ch->read_data.addr = NULL; -+ ch->read_data.size = 0; -+ } -+ spin_unlock_bh(&ch->ssi_port->ssi_controller->lock); -+ -+ return err; -+} -+EXPORT_SYMBOL(ssi_read); -+ -+void __ssi_write_cancel(struct ssi_channel *ch) -+{ -+ if (ch->write_data.size == 1) -+ ssi_driver_cancel_write_interrupt(ch); -+ else if (ch->write_data.size > 1) -+ ssi_driver_cancel_write_dma(ch); -+ -+} -+/** -+ * ssi_write_cancel - Cancel pending write request. -+ * @dev - ssi device channel where to cancel the pending write. -+ * -+ * write_done() callback will not be called after sucess of this function. -+ */ -+void ssi_write_cancel(struct ssi_device *dev) -+{ -+ if (unlikely(!dev || !dev->ch)) { -+ pr_err(LOG_NAME "Wrong SSI device %p\n", dev); -+ return; -+ } -+ if (unlikely(!(dev->ch->flags & SSI_CH_OPEN))) { -+ dev_err(&dev->device, "SSI device NOT open\n"); -+ return; -+ } -+ -+ spin_lock_bh(&dev->ch->ssi_port->ssi_controller->lock); -+ __ssi_write_cancel(dev->ch); -+ spin_unlock_bh(&dev->ch->ssi_port->ssi_controller->lock); -+} -+EXPORT_SYMBOL(ssi_write_cancel); -+ -+void __ssi_read_cancel(struct ssi_channel *ch) -+{ -+ if (ch->read_data.size == 1) -+ ssi_driver_cancel_read_interrupt(ch); -+ else if (ch->read_data.size > 1) -+ ssi_driver_cancel_read_dma(ch); -+} -+ -+/** -+ * ssi_read_cancel - Cancel pending read request. -+ * @dev - ssi device channel where to cancel the pending read. -+ * -+ * read_done() callback will not be called after sucess of this function. -+ */ -+void ssi_read_cancel(struct ssi_device *dev) -+{ -+ if (unlikely(!dev || !dev->ch)) { -+ pr_err(LOG_NAME "Wrong SSI device %p\n", dev); -+ return; -+ } -+ -+ if (unlikely(!(dev->ch->flags & SSI_CH_OPEN))) { -+ dev_err(&dev->device, "SSI device NOT open\n"); -+ return; -+ } -+ -+ spin_lock_bh(&dev->ch->ssi_port->ssi_controller->lock); -+ __ssi_read_cancel(dev->ch); -+ spin_unlock_bh(&dev->ch->ssi_port->ssi_controller->lock); -+ -+} -+EXPORT_SYMBOL(ssi_read_cancel); -+ -+/** -+ * ssi_poll - SSI poll -+ * @dev - ssi device channel reference to apply the I/O control -+ * (or port associated to it) -+ * -+ * Return 0 on sucess, a negative value on failure. -+ * -+ */ -+int ssi_poll(struct ssi_device *dev) -+{ -+ struct ssi_channel *ch; -+ int err; -+ -+ if (unlikely(!dev || !dev->ch)) -+ return -EINVAL; -+ -+ if (unlikely(!(dev->ch->flags & SSI_CH_OPEN))) { -+ dev_err(&dev->device, "SSI device NOT open\n"); -+ return -EINVAL; -+ } -+ -+ ch = dev->ch; -+ spin_lock_bh(&ch->ssi_port->ssi_controller->lock); -+ ch->flags |= SSI_CH_RX_POLL; -+ err = ssi_driver_read_interrupt(ch, NULL); -+ spin_unlock_bh(&ch->ssi_port->ssi_controller->lock); -+ -+ return err; -+} -+EXPORT_SYMBOL(ssi_poll); -+ -+ -+/** -+ * ssi_ioctl - SSI I/O control -+ * @dev - ssi device channel reference to apply the I/O control -+ * (or port associated to it) -+ * @command - SSI I/O control command -+ * @arg - parameter associated to the control command. NULL, if no parameter. -+ * -+ * Return 0 on sucess, a negative value on failure. -+ * -+ */ -+int ssi_ioctl(struct ssi_device *dev, unsigned int command, void *arg) -+{ -+ struct ssi_channel *ch; -+ struct ssi_dev *ssi_ctrl; -+ void __iomem *base; -+ unsigned int port, channel; -+ u32 wake; -+ u32 v; -+ int err = 0; -+ -+ if (unlikely((!dev) || -+ (!dev->ch) || -+ (!dev->ch->ssi_port) || -+ (!dev->ch->ssi_port->ssi_controller)) || -+ (!(dev->ch->flags & SSI_CH_OPEN))) { -+ pr_err(LOG_NAME "SSI IOCTL Invalid parameter\n"); -+ return -EINVAL; -+ } -+ -+ -+ ch = dev->ch; -+ ssi_ctrl = ch->ssi_port->ssi_controller; -+ port = ch->ssi_port->port_number; -+ channel = ch->channel_number; -+ base = ssi_ctrl->base; -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ switch (command) { -+ case SSI_IOCTL_WAKE_UP: -+ /* We only claim once the wake line per channel */ -+ wake = ssi_inl(base, SSI_SYS_WAKE_REG(port)); -+ if (!(wake & SSI_WAKE(channel))) { -+ clk_enable(ssi_ctrl->ssi_clk); -+ ssi_outl(SSI_WAKE(channel), base, -+ SSI_SYS_SET_WAKE_REG(port)); -+ } -+ break; -+ case SSI_IOCTL_WAKE_DOWN: -+ wake = ssi_inl(base, SSI_SYS_WAKE_REG(port)); -+ if ((wake & SSI_WAKE(channel))) { -+ ssi_outl(SSI_WAKE(channel), base, -+ SSI_SYS_CLEAR_WAKE_REG(port)); -+ clk_disable(ssi_ctrl->ssi_clk); -+ } -+ break; -+ case SSI_IOCTL_SEND_BREAK: -+ ssi_outl(1, base, SSI_SST_BREAK_REG(port)); -+ break; -+ case SSI_IOCTL_WAKE: -+ if (arg == NULL) -+ err = -EINVAL; -+ else -+ *(u32 *)arg = ssi_inl(base, SSI_SYS_WAKE_REG(port)); -+ break; -+ case SSI_IOCTL_FLUSH_RX: -+ ssi_outl(0, base, SSI_SSR_RXSTATE_REG(port)); -+ break; -+ case SSI_IOCTL_FLUSH_TX: -+ ssi_outl(0, base, SSI_SST_TXSTATE_REG(port)); -+ break; -+ case SSI_IOCTL_CAWAKE: -+ if (!arg) { -+ err = -EINVAL; -+ goto out; -+ } -+ if (dev->ch->ssi_port->cawake_gpio < 0) { -+ err = -ENODEV; -+ goto out; -+ } -+ *(unsigned int *)arg = ssi_cawake(dev->ch->ssi_port); -+ break; -+ case SSI_IOCTL_SET_RX: -+ if (!arg) { -+ err = -EINVAL; -+ goto out; -+ } -+ err = ssi_set_rx(dev->ch->ssi_port, (struct ssr_ctx *)arg); -+ break; -+ case SSI_IOCTL_GET_RX: -+ if (!arg) { -+ err = -EINVAL; -+ goto out; -+ } -+ ssi_get_rx(dev->ch->ssi_port, (struct ssr_ctx *)arg); -+ break; -+ case SSI_IOCTL_SET_TX: -+ if (!arg) { -+ err = -EINVAL; -+ goto out; -+ } -+ err = ssi_set_tx(dev->ch->ssi_port, (struct sst_ctx *)arg); -+ break; -+ case SSI_IOCTL_GET_TX: -+ if (!arg) { -+ err = -EINVAL; -+ goto out; -+ } -+ ssi_get_tx(dev->ch->ssi_port, (struct sst_ctx *)arg); -+ case SSI_IOCTL_TX_CH_FULL: -+ if (!arg) { -+ err = -EINVAL; -+ goto out; -+ } -+ v = ssi_inl(base, SSI_SST_BUFSTATE_REG(port)); -+ *(unsigned int *)arg = v & (1 << channel); -+ break; -+ case SSI_IOCTL_CH_DATAACCEPT: -+ ssi_driver_write_interrupt(dev->ch, NULL); -+ break; -+ default: -+ err = -ENOIOCTLCMD; -+ break; -+ } -+out: -+ clk_disable(ssi_ctrl->ssi_clk); -+ -+ return err; -+} -+EXPORT_SYMBOL(ssi_ioctl); -+ -+/** -+ * ssi_close - close given ssi device channel -+ * @dev - reference to ssi device channel. -+ */ -+void ssi_close(struct ssi_device *dev) -+{ -+ if (!dev || !dev->ch) { -+ pr_err(LOG_NAME "Trying to close wrong SSI device %p\n", dev); -+ return; -+ } -+ -+ spin_lock_bh(&dev->ch->ssi_port->ssi_controller->lock); -+ if (dev->ch->flags & SSI_CH_OPEN) { -+ dev->ch->flags &= ~SSI_CH_OPEN; -+ __ssi_write_cancel(dev->ch); -+ __ssi_read_cancel(dev->ch); -+ } -+ spin_unlock_bh(&dev->ch->ssi_port->ssi_controller->lock); -+} -+EXPORT_SYMBOL(ssi_close); -+ -+/** -+ * ssi_set_read_cb - register read_done() callback. -+ * @dev - reference to ssi device channel where the callback is associated to. -+ * @read_cb - callback to signal read transfer completed. -+ * -+ * NOTE: Write callback must be only set when channel is not open ! -+ */ -+void ssi_set_read_cb(struct ssi_device *dev, -+ void (*read_cb)(struct ssi_device *dev)) -+{ -+ dev->ch->read_done = read_cb; -+} -+EXPORT_SYMBOL(ssi_set_read_cb); -+ -+/** -+ * ssi_set_read_cb - register write_done() callback. -+ * @dev - reference to ssi device channel where the callback is associated to. -+ * @write_cb - callback to signal read transfer completed. -+ * -+ * NOTE: Read callback must be only set when channel is not open ! -+ */ -+void ssi_set_write_cb(struct ssi_device *dev, -+ void (*write_cb)(struct ssi_device *dev)) -+{ -+ dev->ch->write_done = write_cb; -+} -+EXPORT_SYMBOL(ssi_set_write_cb); -+ -+/** -+ * ssi_set_port_event_cb - register port_event callback. -+ * @dev - reference to ssi device channel where the callback is associated to. -+ * @port_event_cb - callback to signal events from the channel port. -+ */ -+void ssi_set_port_event_cb(struct ssi_device *dev, -+ void (*port_event_cb)(struct ssi_device *dev, -+ unsigned int event, void *arg)) -+{ -+ write_lock_bh(&dev->ch->rw_lock); -+ dev->ch->port_event = port_event_cb; -+ write_unlock_bh(&dev->ch->rw_lock); -+} -+EXPORT_SYMBOL(ssi_set_port_event_cb); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_int.c linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_int.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi/ssi_driver_int.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi/ssi_driver_int.c 2011-06-22 13:19:32.763063277 +0200 -@@ -0,0 +1,284 @@ -+/* -+ * ssi_driver_int.c -+ * -+ * Implements SSI interrupt functionality. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+#include "ssi_driver.h" -+ -+void ssi_reset_ch_read(struct ssi_channel *ch) -+{ -+ struct ssi_port *p = ch->ssi_port; -+ struct ssi_dev *ssi_ctrl = p->ssi_controller; -+ unsigned int channel = ch->channel_number; -+ void __iomem *base = ssi_ctrl->base; -+ unsigned int port = p->port_number; -+ unsigned int irq = p->n_irq; -+ -+ ch->read_data.addr = NULL; -+ ch->read_data.size = 0; -+ ch->read_data.lch = -1; -+ -+ ssi_outl(SSI_SSR_DATAAVAILABLE(channel), base, -+ SSI_SYS_MPU_STATUS_REG(port, irq)); -+} -+ -+void ssi_reset_ch_write(struct ssi_channel *ch) -+{ -+ ch->write_data.addr = NULL; -+ ch->write_data.size = 0; -+ ch->write_data.lch = -1; -+} -+ -+int ssi_driver_write_interrupt(struct ssi_channel *ch, u32 *data) -+{ -+ struct ssi_port *p = ch->ssi_port; -+ unsigned int port = p->port_number; -+ unsigned int channel = ch->channel_number; -+ -+ clk_enable(p->ssi_controller->ssi_clk); -+ ssi_outl_or(SSI_SST_DATAACCEPT(channel), p->ssi_controller->base, -+ SSI_SYS_MPU_ENABLE_REG(port, p->n_irq)); -+ -+ return 0; -+} -+ -+int ssi_driver_read_interrupt(struct ssi_channel *ch, u32 *data) -+{ -+ struct ssi_port *p = ch->ssi_port; -+ unsigned int port = p->port_number; -+ unsigned int channel = ch->channel_number; -+ -+ clk_enable(p->ssi_controller->ssi_clk); -+ -+ ssi_outl_or(SSI_SSR_DATAAVAILABLE(channel), p->ssi_controller->base, -+ SSI_SYS_MPU_ENABLE_REG(port, p->n_irq)); -+ -+ clk_disable(p->ssi_controller->ssi_clk); -+ -+ return 0; -+} -+ -+void ssi_driver_cancel_write_interrupt(struct ssi_channel *ch) -+{ -+ struct ssi_port *p = ch->ssi_port; -+ unsigned int port = p->port_number; -+ unsigned int channel = ch->channel_number; -+ void __iomem *base = p->ssi_controller->base; -+ u32 enable; -+ -+ clk_enable(p->ssi_controller->ssi_clk); -+ -+ enable = ssi_inl(base, SSI_SYS_MPU_ENABLE_REG(port, p->n_irq)); -+ if (!(enable & SSI_SST_DATAACCEPT(channel))) { -+ dev_dbg(&ch->dev->device, LOG_NAME "Write cancel on not " -+ "enabled channel %d ENABLE REG 0x%08X", channel, enable); -+ clk_disable(p->ssi_controller->ssi_clk); -+ return; -+ } -+ ssi_outl_and(~SSI_SST_DATAACCEPT(channel), base, -+ SSI_SYS_MPU_ENABLE_REG(port, p->n_irq)); -+ ssi_outl_and(~NOTFULL(channel), base, SSI_SST_BUFSTATE_REG(port)); -+ ssi_reset_ch_write(ch); -+ -+ clk_disable(p->ssi_controller->ssi_clk); -+ clk_disable(p->ssi_controller->ssi_clk); -+} -+ -+void ssi_driver_cancel_read_interrupt(struct ssi_channel *ch) -+{ -+ struct ssi_port *p = ch->ssi_port; -+ unsigned int port = p->port_number; -+ unsigned int channel = ch->channel_number; -+ void __iomem *base = p->ssi_controller->base; -+ -+ clk_enable(p->ssi_controller->ssi_clk); -+ -+ ssi_outl_and(~SSI_SSR_DATAAVAILABLE(channel), base, -+ SSI_SYS_MPU_ENABLE_REG(port, p->n_irq)); -+ ssi_outl_and(~NOTEMPTY(channel), base, SSI_SSR_BUFSTATE_REG(port)); -+ ssi_reset_ch_read(ch); -+ -+ clk_disable(p->ssi_controller->ssi_clk); -+} -+ -+static void do_channel_tx(struct ssi_channel *ch) -+{ -+ struct ssi_dev *ssi_ctrl = ch->ssi_port->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ unsigned int n_ch; -+ unsigned int n_p; -+ unsigned int irq; -+ -+ n_ch = ch->channel_number; -+ n_p = ch->ssi_port->port_number; -+ irq = ch->ssi_port->n_irq; -+ -+ spin_lock(&ssi_ctrl->lock); -+ -+ if (ch->write_data.addr == NULL) { -+ ssi_outl_and(~SSI_SST_DATAACCEPT(n_ch), base, -+ SSI_SYS_MPU_ENABLE_REG(n_p, irq)); -+ ssi_reset_ch_write(ch); -+ spin_unlock(&ssi_ctrl->lock); -+ clk_disable(ssi_ctrl->ssi_clk); -+ (*ch->write_done)(ch->dev); -+ } else { -+ ssi_outl(*(ch->write_data.addr), base, -+ SSI_SST_BUFFER_CH_REG(n_p, n_ch)); -+ ch->write_data.addr = NULL; -+ spin_unlock(&ssi_ctrl->lock); -+ } -+} -+ -+static void do_channel_rx(struct ssi_channel *ch) -+{ -+ struct ssi_dev *ssi_ctrl = ch->ssi_port->ssi_controller; -+ void __iomem *base = ch->ssi_port->ssi_controller->base; -+ unsigned int n_ch; -+ unsigned int n_p; -+ unsigned int irq; -+ int rx_poll = 0; -+ int data_read = 0; -+ -+ n_ch = ch->channel_number; -+ n_p = ch->ssi_port->port_number; -+ irq = ch->ssi_port->n_irq; -+ -+ spin_lock(&ssi_ctrl->lock); -+ -+ if (ch->flags & SSI_CH_RX_POLL) -+ rx_poll = 1; -+ -+ if (ch->read_data.addr) { -+ data_read = 1; -+ *(ch->read_data.addr) = ssi_inl(base, -+ SSI_SSR_BUFFER_CH_REG(n_p, n_ch)); -+ } -+ -+ ssi_outl_and(~SSI_SSR_DATAAVAILABLE(n_ch), base, -+ SSI_SYS_MPU_ENABLE_REG(n_p, irq)); -+ ssi_reset_ch_read(ch); -+ -+ spin_unlock(&ssi_ctrl->lock); -+ -+ if (rx_poll) -+ ssi_port_event_handler(ch->ssi_port, -+ SSI_EVENT_SSR_DATAAVAILABLE, -+ (void *)n_ch); -+ -+ if (data_read) -+ (*ch->read_done)(ch->dev); -+} -+ -+static void do_ssi_tasklet(unsigned long ssi_port) -+{ -+ struct ssi_port *pport = (struct ssi_port *)ssi_port; -+ struct ssi_dev *ssi_ctrl = pport->ssi_controller; -+ void __iomem *base = ssi_ctrl->base; -+ unsigned int port = pport->port_number; -+ unsigned int channel; -+ unsigned int irq = pport->n_irq; -+ u32 channels_served = 0; -+ u32 status_reg; -+ u32 ssr_err_reg; -+ -+ clk_enable(ssi_ctrl->ssi_clk); -+ -+ status_reg = ssi_inl(base, SSI_SYS_MPU_STATUS_REG(port, irq)); -+ status_reg &= ssi_inl(base, SSI_SYS_MPU_ENABLE_REG(port, irq)); -+ -+ for (channel = 0; channel < pport->max_ch; channel++) { -+ if (status_reg & SSI_SST_DATAACCEPT(channel)) { -+ do_channel_tx(&pport->ssi_channel[channel]); -+ channels_served |= SSI_SST_DATAACCEPT(channel); -+ } -+ -+ if (status_reg & SSI_SSR_DATAAVAILABLE(channel)) { -+ do_channel_rx(&pport->ssi_channel[channel]); -+ channels_served |= SSI_SSR_DATAAVAILABLE(channel); -+ } -+ } -+ -+ if (status_reg & SSI_BREAKDETECTED) { -+ dev_info(ssi_ctrl->dev, "Hardware BREAK on port %d\n", port); -+ ssi_outl(0, base, SSI_SSR_BREAK_REG(port)); -+ ssi_port_event_handler(pport, SSI_EVENT_BREAK_DETECTED, NULL); -+ channels_served |= SSI_BREAKDETECTED; -+ } -+ -+ if (status_reg & SSI_ERROROCCURED) { -+ ssr_err_reg = ssi_inl(base, SSI_SSR_ERROR_REG(port)); -+ dev_err(ssi_ctrl->dev, "SSI ERROR Port %d: 0x%02x\n", -+ port, ssr_err_reg); -+ ssi_outl(ssr_err_reg, base, SSI_SSR_ERRORACK_REG(port)); -+ if (ssr_err_reg) /* Ignore spurios errors */ -+ ssi_port_event_handler(pport, SSI_EVENT_ERROR, NULL); -+ else -+ dev_dbg(ssi_ctrl->dev, "spurious SSI error!\n"); -+ -+ channels_served |= SSI_ERROROCCURED; -+ } -+ -+ ssi_outl(channels_served, base, SSI_SYS_MPU_STATUS_REG(port, irq)); -+ -+ status_reg = ssi_inl(base, SSI_SYS_MPU_STATUS_REG(port, irq)); -+ status_reg &= ssi_inl(base, SSI_SYS_MPU_ENABLE_REG(port, irq)); -+ -+ clk_disable(ssi_ctrl->ssi_clk); -+ -+ if (status_reg) -+ tasklet_hi_schedule(&pport->ssi_tasklet); -+ else -+ enable_irq(pport->irq); -+} -+ -+static irqreturn_t ssi_mpu_handler(int irq, void *ssi_port) -+{ -+ struct ssi_port *p = ssi_port; -+ -+ tasklet_hi_schedule(&p->ssi_tasklet); -+ disable_irq_nosync(p->irq); -+ -+ return IRQ_HANDLED; -+} -+ -+int __init ssi_mpu_init(struct ssi_port *ssi_p, const char *irq_name) -+{ -+ int err; -+ -+ tasklet_init(&ssi_p->ssi_tasklet, do_ssi_tasklet, -+ (unsigned long)ssi_p); -+ err = request_irq(ssi_p->irq, ssi_mpu_handler, IRQF_DISABLED, -+ irq_name, ssi_p); -+ if (err < 0) { -+ dev_err(ssi_p->ssi_controller->dev, "FAILED to MPU request" -+ " IRQ (%d) on port %d", ssi_p->irq, ssi_p->port_number); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+void ssi_mpu_exit(struct ssi_port *ssi_p) -+{ -+ tasklet_disable(&ssi_p->ssi_tasklet); -+ free_irq(ssi_p->irq, ssi_p); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi-char/Kconfig linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/Kconfig ---- linux-omap-2.6.28-omap1/drivers/misc/ssi-char/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/Kconfig 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,18 @@ -+# -+# OMAP SSI kernel configuration -+# -+ -+config SSI_CHAR -+ tristate "SSI character driver" -+ depends on OMAP_SSI -+ ---help--- -+ If you say Y here, you will enable the CMT character driver. -+ This driver provides a simple character device interface for -+ serial communication with the cellular modem over the SSI bus. -+ -+config SSI_CHAR_DEBUG -+ bool "Debug CMT character driver" -+ depends on SSI_CHAR && DEBUG_KERNEL -+ default n -+ ---help--- -+ Enable the debug information in the CMT character driver. -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi-char/Makefile linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/Makefile ---- linux-omap-2.6.28-omap1/drivers/misc/ssi-char/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/Makefile 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,8 @@ -+# -+# Makefile for SSI CHAR driver -+# -+#EXTRA_CFLAGS := -I$(src)/../../../include -+ -+obj-$(CONFIG_SSI_CHAR) += ssi_char.o -+ -+ssi_char-objs := ssi-char.o ssi-if.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-char.c linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-char.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-char.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-char.c 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,493 @@ -+/* -+ * ssi-char.c -+ * -+ * SSI character device driver, implements the character device -+ * interface. -+ * -+ * Copyright (C) 2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Andras Domokos -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "ssi-char-debug.h" -+#include "ssi-char.h" -+ -+#define DRIVER_VERSION "0.1.0" -+ -+static unsigned int port = 1; -+module_param(port, uint, 1); -+MODULE_PARM_DESC(port, "SSI port to be probed"); -+ -+static unsigned int channels_map[SSI_MAX_CHAR_DEVS] = {1}; -+module_param_array(channels_map, uint, NULL, 0); -+MODULE_PARM_DESC(channels_map, "SSI channels to be probed"); -+ -+dev_t ssi_char_dev; -+ -+struct char_queue { -+ struct list_head list; -+ u32 *data; -+ unsigned int count; -+}; -+ -+struct ssi_char { -+ unsigned int opened; -+ int poll_event; -+ struct list_head rx_queue; -+ struct list_head tx_queue; -+ spinlock_t lock; -+ struct fasync_struct *async_queue; -+ wait_queue_head_t rx_wait; -+ wait_queue_head_t tx_wait; -+ wait_queue_head_t poll_wait; -+}; -+ -+static struct ssi_char ssi_char_data[SSI_MAX_CHAR_DEVS]; -+ -+void if_notify(int ch, struct ssi_event *ev) -+{ -+ struct char_queue *entry; -+ -+ spin_lock(&ssi_char_data[ch].lock); -+ -+ if (!ssi_char_data[ch].opened) { -+ printk(KERN_DEBUG "device not opened\n!"); -+ spin_unlock(&ssi_char_data[ch].lock); -+ return; -+ } -+ -+ switch (SSI_EV_TYPE(ev->event)) { -+ case SSI_EV_IN: -+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); -+ if (!entry) { -+ pr_err("SSI-CHAR: entry allocation failed.\n"); -+ spin_unlock(&ssi_char_data[ch].lock); -+ return; -+ } -+ entry->data = ev->data; -+ entry->count = ev->count; -+ list_add_tail(&entry->list, &ssi_char_data[ch].rx_queue); -+ spin_unlock(&ssi_char_data[ch].lock); -+ wake_up_interruptible(&ssi_char_data[ch].rx_wait); -+ break; -+ case SSI_EV_OUT: -+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); -+ if (!entry) { -+ pr_err("SSI-CHAR: entry allocation failed.\n"); -+ spin_unlock(&ssi_char_data[ch].lock); -+ return; -+ } -+ entry->data = ev->data; -+ entry->count = ev->count; -+ ssi_char_data[ch].poll_event |= (POLLOUT | POLLWRNORM); -+ list_add_tail(&entry->list, &ssi_char_data[ch].tx_queue); -+ spin_unlock(&ssi_char_data[ch].lock); -+ wake_up_interruptible(&ssi_char_data[ch].tx_wait); -+ break; -+ case SSI_EV_EXCEP: -+ ssi_char_data[ch].poll_event |= POLLPRI; -+ spin_unlock(&ssi_char_data[ch].lock); -+ wake_up_interruptible(&ssi_char_data[ch].poll_wait); -+ break; -+ case SSI_EV_AVAIL: -+ ssi_char_data[ch].poll_event |= (POLLIN | POLLRDNORM); -+ spin_unlock(&ssi_char_data[ch].lock); -+ wake_up_interruptible(&ssi_char_data[ch].poll_wait); -+ break; -+ default: -+ spin_unlock(&ssi_char_data[ch].lock); -+ break; -+ } -+} -+ -+ -+static int ssi_char_fasync(int fd, struct file *file, int on) -+{ -+ int ch = (int)file->private_data; -+ if (fasync_helper(fd, file, on, &ssi_char_data[ch].async_queue) >= 0) -+ return 0; -+ else -+ return -EIO; -+} -+ -+ -+static unsigned int ssi_char_poll(struct file *file, poll_table *wait) -+{ -+ int ch = (int)file->private_data; -+ unsigned int ret = 0; -+ -+ poll_wait(file, &ssi_char_data[ch].poll_wait, wait); -+ poll_wait(file, &ssi_char_data[ch].tx_wait, wait); -+ spin_lock_bh(&ssi_char_data[ch].lock); -+ ret = ssi_char_data[ch].poll_event; -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ -+ return ret; -+} -+ -+ -+static ssize_t ssi_char_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ int ch = (int)file->private_data; -+ DECLARE_WAITQUEUE(wait, current); -+ u32 *data; -+ unsigned int data_len; -+ struct char_queue *entry; -+ ssize_t ret; -+ -+ /* only 32bit data is supported for now */ -+ if ((count < 4) || (count & 3)) -+ return -EINVAL; -+ -+ data = kmalloc(count, GFP_ATOMIC); -+ -+ ret = if_ssi_read(ch, data, count); -+ if (ret < 0) { -+ kfree(data); -+ goto out2; -+ } -+ -+ add_wait_queue(&ssi_char_data[ch].rx_wait, &wait); -+ -+ for ( ; ; ) { -+ data = NULL; -+ data_len = 0; -+ -+ set_current_state(TASK_INTERRUPTIBLE); -+ -+ spin_lock_bh(&ssi_char_data[ch].lock); -+ if (!list_empty(&ssi_char_data[ch].rx_queue)) { -+ entry = list_entry(ssi_char_data[ch].rx_queue.next, -+ struct char_queue, list); -+ data = entry->data; -+ data_len = entry->count; -+ list_del(&entry->list); -+ kfree(entry); -+ } -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ -+ if (data_len) { -+ spin_lock_bh(&ssi_char_data[ch].lock); -+ ssi_char_data[ch].poll_event &= ~(POLLIN | POLLRDNORM | -+ POLLPRI); -+ if_ssi_poll(ch); -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ break; -+ } else if (file->f_flags & O_NONBLOCK) { -+ ret = -EAGAIN; -+ goto out; -+ } else if (signal_pending(current)) { -+ ret = -EAGAIN; -+ if_ssi_cancel_read(ch); -+ break; -+ } -+ -+ schedule(); -+ } -+ -+ if (data_len) { -+ ret = copy_to_user((void __user *)buf, data, data_len); -+ if (!ret) -+ ret = data_len; -+ } -+ -+ kfree(data); -+ -+out: -+ __set_current_state(TASK_RUNNING); -+ remove_wait_queue(&ssi_char_data[ch].rx_wait, &wait); -+ -+out2: -+ return ret; -+} -+ -+static ssize_t ssi_char_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ int ch = (int)file->private_data; -+ DECLARE_WAITQUEUE(wait, current); -+ u32 *data; -+ unsigned int data_len = 0; -+ struct char_queue *entry; -+ ssize_t ret; -+ -+ /* only 32bit data is supported for now */ -+ if ((count < 4) || (count & 3)) -+ return -EINVAL; -+ -+ data = kmalloc(count, GFP_ATOMIC); -+ -+ if (copy_from_user(data, (void __user *)buf, count)) { -+ ret = -EFAULT; -+ kfree(data); -+ } else { -+ ret = count; -+ } -+ -+ spin_lock_bh(&ssi_char_data[ch].lock); -+ ret = if_ssi_write(ch, data, count); -+ if (ret < 0) { -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ kfree(data); -+ goto out2; -+ } -+ ssi_char_data[ch].poll_event &= ~(POLLOUT | POLLWRNORM); -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ -+ add_wait_queue(&ssi_char_data[ch].tx_wait, &wait); -+ -+ for ( ; ; ) { -+ data = NULL; -+ data_len = 0; -+ -+ set_current_state(TASK_INTERRUPTIBLE); -+ -+ spin_lock_bh(&ssi_char_data[ch].lock); -+ if (!list_empty(&ssi_char_data[ch].tx_queue)) { -+ entry = list_entry(ssi_char_data[ch].tx_queue.next, -+ struct char_queue, list); -+ data = entry->data; -+ data_len = entry->count; -+ list_del(&entry->list); -+ kfree(entry); -+ } -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ -+ if (data_len) { -+ ret = data_len; -+ break; -+ } else if (file->f_flags & O_NONBLOCK) { -+ ret = -EAGAIN; -+ goto out; -+ } else if (signal_pending(current)) { -+ ret = -ERESTARTSYS; -+ goto out; -+ } -+ -+ schedule(); -+ } -+ -+ kfree(data); -+ -+out: -+ __set_current_state(TASK_RUNNING); -+ remove_wait_queue(&ssi_char_data[ch].tx_wait, &wait); -+ -+out2: -+ return ret; -+} -+ -+static int ssi_char_ioctl(struct inode *inode, struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ int ch = (int)file->private_data; -+ unsigned int state; -+ struct ssi_rx_config rx_cfg; -+ struct ssi_tx_config tx_cfg; -+ int ret = 0; -+ -+ switch (cmd) { -+ case CS_SEND_BREAK: -+ if_ssi_send_break(ch); -+ break; -+ case CS_FLUSH_RX: -+ if_ssi_flush_rx(ch); -+ break; -+ case CS_FLUSH_TX: -+ if_ssi_flush_tx(ch); -+ break; -+ case CS_SET_WAKELINE: -+ if (copy_from_user(&state, (void __user *)arg, -+ sizeof(state))) -+ ret = -EFAULT; -+ else -+ if_ssi_set_wakeline(ch, state); -+ break; -+ case CS_GET_WAKELINE: -+ if_ssi_get_wakeline(ch, &state); -+ if (copy_to_user((void __user *)arg, &state, sizeof(state))) -+ ret = -EFAULT; -+ break; -+ case CS_SET_RX: { -+ if (copy_from_user(&rx_cfg, (void __user *)arg, -+ sizeof(rx_cfg))) -+ ret = -EFAULT; -+ else -+ ret = if_ssi_set_rx(ch, &rx_cfg); -+ } -+ break; -+ case CS_GET_RX: -+ if_ssi_get_rx(ch, &rx_cfg); -+ if (copy_to_user((void __user *)arg, &rx_cfg, sizeof(rx_cfg))) -+ ret = -EFAULT; -+ break; -+ case CS_SET_TX: -+ if (copy_from_user(&tx_cfg, (void __user *)arg, -+ sizeof(tx_cfg))) -+ ret = -EFAULT; -+ else -+ ret = if_ssi_set_tx(ch, &tx_cfg); -+ break; -+ case CS_GET_TX: -+ if_ssi_get_tx(ch, &tx_cfg); -+ if (copy_to_user((void __user *)arg, &tx_cfg, sizeof(tx_cfg))) -+ ret = -EFAULT; -+ break; -+ default: -+ return -ENOIOCTLCMD; -+ break; -+ } -+ -+ return ret; -+} -+ -+static int ssi_char_open(struct inode *inode, struct file *file) -+{ -+ int ret = 0, ch = iminor(inode); -+ -+ if (!channels_map[ch]) -+ return -ENODEV; -+ -+ spin_lock_bh(&ssi_char_data[ch].lock); -+#if 0 -+ if (ssi_char_data[ch].opened) { -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ return -EBUSY; -+ } -+#endif -+ file->private_data = (void *)ch; -+ ssi_char_data[ch].opened++; -+ ssi_char_data[ch].poll_event = (POLLOUT | POLLWRNORM); -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ -+ ret = if_ssi_start(ch); -+ -+ return ret; -+} -+ -+static int ssi_char_release(struct inode *inode, struct file *file) -+{ -+ int ch = (int)file->private_data; -+ struct char_queue *entry; -+ struct list_head *cursor, *next; -+ -+ if_ssi_stop(ch); -+ spin_lock_bh(&ssi_char_data[ch].lock); -+ ssi_char_data[ch].opened--; -+ -+ if (!list_empty(&ssi_char_data[ch].rx_queue)) { -+ list_for_each_safe(cursor, next, &ssi_char_data[ch].rx_queue) { -+ entry = list_entry(cursor, struct char_queue, list); -+ list_del(&entry->list); -+ kfree(entry); -+ } -+ } -+ -+ if (!list_empty(&ssi_char_data[ch].tx_queue)) { -+ list_for_each_safe(cursor, next, &ssi_char_data[ch].tx_queue) { -+ entry = list_entry(cursor, struct char_queue, list); -+ list_del(&entry->list); -+ kfree(entry); -+ } -+ } -+ -+ spin_unlock_bh(&ssi_char_data[ch].lock); -+ -+ return 0; -+} -+ -+static const struct file_operations ssi_char_fops = { -+ .owner = THIS_MODULE, -+ .read = ssi_char_read, -+ .write = ssi_char_write, -+ .poll = ssi_char_poll, -+ .ioctl = ssi_char_ioctl, -+ .open = ssi_char_open, -+ .release = ssi_char_release, -+ .fasync = ssi_char_fasync, -+}; -+ -+static struct cdev ssi_char_cdev; -+ -+static int __init ssi_char_init(void) -+{ -+ char devname[] = "ssi_char"; -+ int ret, i; -+ -+ pr_info("SSI character device version " DRIVER_VERSION "\n"); -+ -+ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { -+ init_waitqueue_head(&ssi_char_data[i].rx_wait); -+ init_waitqueue_head(&ssi_char_data[i].tx_wait); -+ init_waitqueue_head(&ssi_char_data[i].poll_wait); -+ spin_lock_init(&ssi_char_data[i].lock); -+ ssi_char_data[i].opened = 0; -+ INIT_LIST_HEAD(&ssi_char_data[i].rx_queue); -+ INIT_LIST_HEAD(&ssi_char_data[i].tx_queue); -+ } -+ -+ ret = if_ssi_init(port, channels_map); -+ if (ret) -+ return ret; -+ -+ ret = alloc_chrdev_region(&ssi_char_dev, 0, SSI_MAX_CHAR_DEVS, devname); -+ if (ret < 0) { -+ pr_err("SSI character driver: Failed to register\n"); -+ return ret; -+ } -+ -+ cdev_init(&ssi_char_cdev, &ssi_char_fops); -+ cdev_add(&ssi_char_cdev, ssi_char_dev, SSI_MAX_CHAR_DEVS); -+ -+ return 0; -+} -+ -+static void __exit ssi_char_exit(void) -+{ -+ cdev_del(&ssi_char_cdev); -+ unregister_chrdev_region(ssi_char_dev, SSI_MAX_CHAR_DEVS); -+ if_ssi_exit(); -+} -+ -+MODULE_AUTHOR("Andras Domokos "); -+MODULE_DESCRIPTION("SSI character device"); -+MODULE_LICENSE("GPL"); -+MODULE_VERSION(DRIVER_VERSION); -+ -+module_init(ssi_char_init); -+module_exit(ssi_char_exit); -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-char-debug.h linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-char-debug.h ---- linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-char-debug.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-char-debug.h 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,39 @@ -+/* -+ * ssi-char-debug.h -+ * -+ * Part of the SSI character driver. Debugging related definitions. -+ * -+ * Copyright (C) 2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Andras Domokos -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef _SSI_CHAR_DEBUG_H -+#define _SSI_CHAR_DEBUG_H -+ -+#ifdef CONFIG_SSI_CHAR_DEBUG -+#define DPRINTK(fmt, arg...) printk(KERN_DEBUG "%s(): " fmt, __func__, ##arg) -+#define DENTER() printk(KERN_DEBUG "ENTER %s()\n", __func__) -+#define DLEAVE(a) printk(KERN_DEBUG "LEAVE %s() %d\n", __func__, a) -+#else -+#define DPRINTK(fmt, arg...) while (0) -+#define DENTER() while (0) -+#define DLEAVE(a) while (0) -+#endif -+ -+#endif /* _SSI_CHAR_DEBUG_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-char.h linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-char.h ---- linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-char.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-char.h 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,36 @@ -+/* -+ * ssi-char.h -+ * -+ * Part of the SSI character device driver. -+ * -+ * Copyright (C) 2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Andras Domokos -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef _SSI_CHAR_H -+#define _SSI_CHAR_H -+ -+#include "ssi-if.h" -+ -+/* how many char devices would be created at most */ -+#define SSI_MAX_CHAR_DEVS 8 -+ -+void if_notify(int ch, struct ssi_event *ev); -+ -+#endif /* _SSI_CHAR_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-if.c linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-if.c ---- linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-if.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-if.c 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,625 @@ -+/* -+ * ssi-if.c -+ * -+ * Part of the SSI character driver, implements the SSI interface. -+ * -+ * Copyright (C) 2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Andras Domokos -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "ssi-char-debug.h" -+#include "ssi-char.h" -+#include "ssi-if.h" -+ -+#define SSI_CHANNEL_STATE_UNAVAIL (1 << 0) -+#define SSI_CHANNEL_STATE_READING (1 << 1) -+#define SSI_CHANNEL_STATE_WRITING (1 << 2) -+ -+#define PORT1 0 -+#define PORT2 1 -+ -+#define SSI_RX_PARAM(cfg, mod, fsize, n, tmo) \ -+ do { \ -+ (cfg)->mode = mod; \ -+ (cfg)->frame_size = fsize; \ -+ (cfg)->channels = n; \ -+ (cfg)->timeout = tmo; \ -+ } while (0) -+ -+#define SSI_TX_PARAM(cfg, mod, fsize, n, div, arb) \ -+ do { \ -+ (cfg)->mode = mod; \ -+ (cfg)->frame_size = fsize; \ -+ (cfg)->channels = n; \ -+ (cfg)->divisor = div; \ -+ (cfg)->arb_mode = arb; \ -+ } while (0) -+ -+#define RXCONV(dst, src) \ -+ do { \ -+ (dst)->mode = (src)->mode; \ -+ (dst)->frame_size = (src)->frame_size; \ -+ (dst)->channels = (src)->channels; \ -+ (dst)->timeout = (src)->timeout; \ -+ } while (0) -+ -+#define TXCONV(dst, src) \ -+ do { \ -+ (dst)->mode = (src)->mode; \ -+ (dst)->frame_size = (src)->frame_size; \ -+ (dst)->channels = (src)->channels; \ -+ (dst)->divisor = (src)->divisor; \ -+ (dst)->arb_mode = (src)->arb_mode; \ -+ } while (0) -+ -+struct if_ssi_channel { -+ struct ssi_device *dev; -+ unsigned int channel_id; -+ u32 *tx_data; -+ unsigned int tx_count; -+ u32 *rx_data; -+ unsigned int rx_count; -+ unsigned int opened; -+ unsigned int state; -+ spinlock_t lock; -+}; -+ -+struct if_ssi_iface { -+ struct if_ssi_channel channels[SSI_MAX_CHAR_DEVS]; -+ int bootstrap; -+ spinlock_t lock; -+}; -+ -+static void if_ssi_port_event(struct ssi_device *dev, unsigned int event, -+ void *arg); -+static int __devinit if_ssi_probe(struct ssi_device *dev); -+static int __devexit if_ssi_remove(struct ssi_device *dev); -+ -+static struct ssi_device_driver if_ssi_char_driver = { -+ .ctrl_mask = ANY_SSI_CONTROLLER, -+ .probe = if_ssi_probe, -+ .remove = __devexit_p(if_ssi_remove), -+ .driver = { -+ .name = "ssi_char" -+ }, -+}; -+ -+static struct if_ssi_iface ssi_iface; -+ -+static int if_ssi_read_on(int ch, u32 *data, unsigned int count) -+{ -+ struct if_ssi_channel *channel; -+ int ret; -+ -+ channel = &ssi_iface.channels[ch]; -+ -+ spin_lock(&channel->lock); -+ if (channel->state & SSI_CHANNEL_STATE_READING) { -+ pr_err("Read still pending on channel %d\n", ch); -+ spin_unlock(&channel->lock); -+ return -EBUSY; -+ } -+ channel->state |= SSI_CHANNEL_STATE_READING; -+ channel->rx_data = data; -+ channel->rx_count = count; -+ spin_unlock(&channel->lock); -+ -+ ret = ssi_read(channel->dev, data, count/4); -+ -+ return ret; -+} -+ -+static void if_ssi_read_done(struct ssi_device *dev) -+{ -+ struct if_ssi_channel *channel; -+ struct ssi_event ev; -+ -+ channel = &ssi_iface.channels[dev->n_ch]; -+ spin_lock(&channel->lock); -+ channel->state &= ~SSI_CHANNEL_STATE_READING; -+ ev.event = SSI_EV_IN; -+ ev.data = channel->rx_data; -+ ev.count = channel->rx_count; -+ spin_unlock(&channel->lock); -+ if_notify(dev->n_ch, &ev); -+} -+ -+int if_ssi_read(int ch, u32 *data, unsigned int count) -+{ -+ int ret = 0; -+ spin_lock_bh(&ssi_iface.lock); -+ ret = if_ssi_read_on(ch, data, count); -+ spin_unlock_bh(&ssi_iface.lock); -+ return ret; -+} -+ -+int if_ssi_poll(int ch) -+{ -+ struct if_ssi_channel *channel; -+ int ret = 0; -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ret = ssi_poll(channel->dev); -+ spin_unlock_bh(&ssi_iface.lock); -+ return ret; -+} -+ -+static int if_ssi_write_on(int ch, u32 *address, unsigned int count) -+{ -+ struct if_ssi_channel *channel; -+ int ret; -+ -+ channel = &ssi_iface.channels[ch]; -+ -+ spin_lock(&channel->lock); -+ if (channel->state & SSI_CHANNEL_STATE_WRITING) { -+ pr_err("Write still pending on channel %d\n", ch); -+ spin_unlock(&channel->lock); -+ return -EBUSY; -+ } -+ -+ channel->tx_data = address; -+ channel->tx_count = count; -+ channel->state |= SSI_CHANNEL_STATE_WRITING; -+ spin_unlock(&channel->lock); -+ ret = ssi_write(channel->dev, address, count/4); -+ return ret; -+} -+ -+static void if_ssi_write_done(struct ssi_device *dev) -+{ -+ struct if_ssi_channel *channel; -+ struct ssi_event ev; -+ -+ channel = &ssi_iface.channels[dev->n_ch]; -+ -+ spin_lock(&channel->lock); -+ channel->state &= ~SSI_CHANNEL_STATE_WRITING; -+ ev.event = SSI_EV_OUT; -+ ev.data = channel->tx_data; -+ ev.count = channel->tx_count; -+ spin_unlock(&channel->lock); -+ if_notify(dev->n_ch, &ev); -+} -+ -+int if_ssi_write(int ch, u32 *data, unsigned int count) -+{ -+ int ret = 0; -+ -+ spin_lock_bh(&ssi_iface.lock); -+ ret = if_ssi_write_on(ch, data, count); -+ spin_unlock_bh(&ssi_iface.lock); -+ return ret; -+} -+ -+void if_ssi_send_break(int ch) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ssi_ioctl(channel->dev, SSI_IOCTL_SEND_BREAK, NULL); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+void if_ssi_flush_rx(int ch) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ssi_ioctl(channel->dev, SSI_IOCTL_FLUSH_RX, NULL); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+void if_ssi_flush_ch(int ch) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock(&channel->lock); -+ spin_unlock(&channel->lock); -+} -+ -+void if_ssi_flush_tx(int ch) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ssi_ioctl(channel->dev, SSI_IOCTL_FLUSH_TX, NULL); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+void if_ssi_get_wakeline(int ch, unsigned int *state) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ssi_ioctl(channel->dev, SSI_IOCTL_WAKE, state); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+void if_ssi_set_wakeline(int ch, unsigned int state) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ssi_ioctl(channel->dev, state, NULL); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+int if_ssi_set_rx(int ch, struct ssi_rx_config *cfg) -+{ -+ int ret; -+ struct if_ssi_channel *channel; -+ struct ssr_ctx ctx; -+ -+ RXCONV(&ctx, cfg); -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ret = ssi_ioctl(channel->dev, SSI_IOCTL_SET_RX, &ctx); -+ spin_unlock_bh(&ssi_iface.lock); -+ return ret; -+} -+ -+void if_ssi_get_rx(int ch, struct ssi_rx_config *cfg) -+{ -+ struct if_ssi_channel *channel; -+ struct ssr_ctx ctx; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ssi_ioctl(channel->dev, SSI_IOCTL_GET_RX, &ctx); -+ RXCONV(cfg, &ctx); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+int if_ssi_set_tx(int ch, struct ssi_tx_config *cfg) -+{ -+ int ret; -+ struct if_ssi_channel *channel; -+ struct sst_ctx ctx; -+ -+ TXCONV(&ctx, cfg); -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ret = ssi_ioctl(channel->dev, SSI_IOCTL_SET_TX, &ctx); -+ spin_unlock_bh(&ssi_iface.lock); -+ return ret; -+} -+ -+void if_ssi_get_tx(int ch, struct ssi_tx_config *cfg) -+{ -+ struct if_ssi_channel *channel; -+ struct sst_ctx ctx; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ ssi_ioctl(channel->dev, SSI_IOCTL_GET_TX, &ctx); -+ TXCONV(cfg, &ctx); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+void if_ssi_cancel_read(int ch) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock(&channel->lock); -+ if (channel->state & SSI_CHANNEL_STATE_READING) -+ ssi_read_cancel(channel->dev); -+ channel->state &= ~SSI_CHANNEL_STATE_READING; -+ spin_unlock(&channel->lock); -+} -+ -+void if_ssi_cancel_write(int ch) -+{ -+ struct if_ssi_channel *channel; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock(&channel->lock); -+ if (channel->state & SSI_CHANNEL_STATE_WRITING) -+ ssi_write_cancel(channel->dev); -+ channel->state &= ~SSI_CHANNEL_STATE_WRITING; -+ spin_unlock(&channel->lock); -+} -+ -+static int if_ssi_openchannel(struct if_ssi_channel *channel) -+{ -+ int ret = 0; -+ -+ spin_lock(&channel->lock); -+ -+ if (channel->state == SSI_CHANNEL_STATE_UNAVAIL) -+ return -ENODEV; -+ -+ if (channel->opened) { -+ ret = -EBUSY; -+ goto leave; -+ } -+ -+ if (!channel->dev) { -+ pr_err("Channel %d is not ready??\n", -+ channel->channel_id); -+ ret = -ENODEV; -+ goto leave; -+ } -+ -+ ret = ssi_open(channel->dev); -+ if (ret < 0) { -+ pr_err("Could not open channel %d\n", -+ channel->channel_id); -+ goto leave; -+ } -+ -+ channel->opened = 1; -+ -+leave: -+ spin_unlock(&channel->lock); -+ return ret; -+} -+ -+ -+static int if_ssi_closechannel(struct if_ssi_channel *channel) -+{ -+ int ret = 0; -+ -+ spin_lock(&channel->lock); -+ -+ if (!channel->opened) -+ goto leave; -+ -+ if (!channel->dev) { -+ pr_err("Channel %d is not ready??\n", -+ channel->channel_id); -+ ret = -ENODEV; -+ goto leave; -+ } -+ -+ /* Stop any pending read/write */ -+ if (channel->state & SSI_CHANNEL_STATE_READING) { -+ ssi_read_cancel(channel->dev); -+ channel->state &= ~SSI_CHANNEL_STATE_READING; -+ } -+ if (channel->state & SSI_CHANNEL_STATE_WRITING) { -+ ssi_write_cancel(channel->dev); -+ channel->state &= ~SSI_CHANNEL_STATE_WRITING; -+ } -+ -+ ssi_close(channel->dev); -+ -+ channel->opened = 0; -+leave: -+ spin_unlock(&channel->lock); -+ return ret; -+} -+ -+ -+int if_ssi_start(int ch) -+{ -+ struct if_ssi_channel *channel; -+ int ret = 0; -+ -+ channel = &ssi_iface.channels[ch]; -+ spin_lock_bh(&ssi_iface.lock); -+ channel->state = 0; -+ ret = if_ssi_openchannel(channel); -+ if (ret < 0) { -+ pr_err("Could not open channel %d\n", ch); -+ spin_unlock_bh(&ssi_iface.lock); -+ goto error; -+ } -+ if_ssi_poll(ch); -+ spin_unlock_bh(&ssi_iface.lock); -+ -+error: -+ return ret; -+} -+ -+void if_ssi_stop(int ch) -+{ -+ struct if_ssi_channel *channel; -+ channel = &ssi_iface.channels[ch]; -+ if_ssi_set_wakeline(ch, 1); -+ spin_lock_bh(&ssi_iface.lock); -+ if_ssi_closechannel(channel); -+ spin_unlock_bh(&ssi_iface.lock); -+} -+ -+static int __devinit if_ssi_probe(struct ssi_device *dev) -+{ -+ struct if_ssi_channel *channel; -+ unsigned long *address; -+ int ret = -ENXIO, port; -+ -+ for (port = 0; port < SSI_MAX_PORTS; port++) { -+ if (if_ssi_char_driver.ch_mask[port]) -+ break; -+ } -+ -+ if (port == SSI_MAX_PORTS) -+ return -ENXIO; -+ -+ address = &if_ssi_char_driver.ch_mask[port]; -+ -+ spin_lock_bh(&ssi_iface.lock); -+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) { -+ ssi_set_read_cb(dev, if_ssi_read_done); -+ ssi_set_write_cb(dev, if_ssi_write_done); -+ ssi_set_port_event_cb(dev, if_ssi_port_event); -+ channel = &ssi_iface.channels[dev->n_ch]; -+ channel->dev = dev; -+ channel->state = 0; -+ ret = 0; -+ } -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ return ret; -+} -+ -+static int __devexit if_ssi_remove(struct ssi_device *dev) -+{ -+ struct if_ssi_channel *channel; -+ unsigned long *address; -+ int ret = -ENXIO, port; -+ -+ for (port = 0; port < SSI_MAX_PORTS; port++) { -+ if (if_ssi_char_driver.ch_mask[port]) -+ break; -+ } -+ -+ if (port == SSI_MAX_PORTS) -+ return -ENXIO; -+ -+ address = &if_ssi_char_driver.ch_mask[port]; -+ -+ spin_lock_bh(&ssi_iface.lock); -+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) { -+ ssi_set_read_cb(dev, NULL); -+ ssi_set_write_cb(dev, NULL); -+ channel = &ssi_iface.channels[dev->n_ch]; -+ channel->dev = NULL; -+ channel->state = SSI_CHANNEL_STATE_UNAVAIL; -+ ret = 0; -+ } -+ spin_unlock_bh(&ssi_iface.lock); -+ -+ return ret; -+} -+ -+static void if_ssi_port_event(struct ssi_device *dev, unsigned int event, -+ void *arg) -+{ -+ struct ssi_event ev; -+ int i; -+ -+ ev.event = SSI_EV_EXCEP; -+ ev.data = (u32 *)0; -+ ev.count = 0; -+ -+ switch (event) { -+ case SSI_EVENT_BREAK_DETECTED: -+ ev.data = (u32 *)SSI_HWBREAK; -+ spin_lock_bh(&ssi_iface.lock); -+ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { -+ if (ssi_iface.channels[i].opened) -+ if_notify(i, &ev); -+ } -+ spin_unlock_bh(&ssi_iface.lock); -+ break; -+ case SSI_EVENT_SSR_DATAAVAILABLE: -+ i = (int)arg; -+ ev.event = SSI_EV_AVAIL; -+ spin_lock_bh(&ssi_iface.lock); -+ if (ssi_iface.channels[i].opened) -+ if_notify(i, &ev); -+ spin_unlock_bh(&ssi_iface.lock); -+ break; -+ case SSI_EVENT_CAWAKE_UP: -+ break; -+ case SSI_EVENT_CAWAKE_DOWN: -+ break; -+ case SSI_EVENT_ERROR: -+ break; -+ default: -+ printk(KERN_DEBUG "%s, Unknown event(%d)\n", __func__, event); -+ break; -+ } -+} -+ -+int __init if_ssi_init(unsigned int port, unsigned int *channels_map) -+{ -+ struct if_ssi_channel *channel; -+ int i, ret = 0; -+ -+ port -= 1; -+ if (port >= SSI_MAX_PORTS) -+ return -EINVAL; -+ -+ ssi_iface.bootstrap = 1; -+ spin_lock_init(&ssi_iface.lock); -+ -+ for (i = 0; i < SSI_MAX_PORTS; i++) -+ if_ssi_char_driver.ch_mask[i] = 0; -+ -+ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { -+ channel = &ssi_iface.channels[i]; -+ channel->dev = NULL; -+ channel->opened = 0; -+ channel->state = 0; -+ channel->channel_id = i; -+ spin_lock_init(&channel->lock); -+ channel->state = SSI_CHANNEL_STATE_UNAVAIL; -+ } -+ -+ for (i = 0; (i < SSI_MAX_CHAR_DEVS) && channels_map[i]; i++) { -+ if ((channels_map[i] - 1) < SSI_MAX_CHAR_DEVS) -+ if_ssi_char_driver.ch_mask[port] |= (1 << ((channels_map[i] - 1))); -+ } -+ -+ ret = register_ssi_driver(&if_ssi_char_driver); -+ if (ret) -+ pr_err("Error while registering SSI driver %d", ret); -+ -+ return ret; -+} -+ -+int __exit if_ssi_exit(void) -+{ -+ struct if_ssi_channel *channel; -+ unsigned long *address; -+ int i, port; -+ -+ for (port = 0; port < SSI_MAX_PORTS; port++) { -+ if (if_ssi_char_driver.ch_mask[port]) -+ break; -+ } -+ -+ if (port == SSI_MAX_PORTS) -+ return -ENXIO; -+ -+ address = &if_ssi_char_driver.ch_mask[port]; -+ -+ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { -+ channel = &ssi_iface.channels[i]; -+ if (channel->opened) { -+ if_ssi_set_wakeline(i, 1); -+ if_ssi_closechannel(channel); -+ } -+ } -+ unregister_ssi_driver(&if_ssi_char_driver); -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-if.h linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-if.h ---- linux-omap-2.6.28-omap1/drivers/misc/ssi-char/ssi-if.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/misc/ssi-char/ssi-if.h 2011-06-22 13:19:32.753063276 +0200 -@@ -0,0 +1,70 @@ -+/* -+ * ssi-if.h -+ * -+ * Part of the SSI character driver. -+ * -+ * Copyright (C) 2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Andras Domokos -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef _SSI_IF_H -+#define _SSI_IF_H -+ -+#define SSI_EV_MASK (0xffff << 0) -+#define SSI_EV_TYPE_MASK (0x0f << 16) -+#define SSI_EV_IN (0x01 << 16) -+#define SSI_EV_OUT (0x02 << 16) -+#define SSI_EV_EXCEP (0x03 << 16) -+#define SSI_EV_AVAIL (0x04 << 16) -+#define SSI_EV_TYPE(event) ((event) & SSI_EV_TYPE_MASK) -+ -+#define SSI_HWBREAK 1 -+#define SSI_ERROR 2 -+ -+struct ssi_event { -+ unsigned int event; -+ u32 *data; -+ unsigned int count; -+}; -+ -+int if_ssi_init(unsigned int port, unsigned int *channels_map); -+int if_ssi_exit(void); -+ -+int if_ssi_start(int ch); -+void if_ssi_stop(int ch); -+ -+void if_ssi_send_break(int ch); -+void if_ssi_flush_rx(int ch); -+void if_ssi_flush_tx(int ch); -+void if_ssi_bootstrap(int ch); -+void if_ssi_set_wakeline(int ch, unsigned int state); -+void if_ssi_get_wakeline(int ch, unsigned int *state); -+int if_ssi_set_rx(int ch, struct ssi_rx_config *cfg); -+void if_ssi_get_rx(int ch, struct ssi_rx_config *cfg); -+int if_ssi_set_tx(int ch, struct ssi_tx_config *cfg); -+void if_ssi_get_tx(int ch, struct ssi_tx_config *cfg); -+ -+int if_ssi_read(int ch, u32 *data, unsigned int count); -+int if_ssi_poll(int ch); -+int if_ssi_write(int ch, u32 *data, unsigned int count); -+ -+void if_ssi_cancel_read(int ch); -+void if_ssi_cancel_write(int ch); -+ -+#endif /* _SSI_IF_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/misc/sti/sdti.c linux-omap-2.6.28-nokia1/drivers/misc/sti/sdti.c ---- linux-omap-2.6.28-omap1/drivers/misc/sti/sdti.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/misc/sti/sdti.c 2011-06-22 13:19:32.763063277 +0200 -@@ -37,20 +37,26 @@ - static struct clk *sdti_fck, *sdti_ick; - void __iomem *sti_base, *sti_channel_base; - static DEFINE_SPINLOCK(sdti_lock); -+static int sdti_initialized; - - void sti_channel_write_trace(int len, int id, void *data, - unsigned int channel) - { - const u8 *tpntr = data; -+ unsigned long flags; - -- spin_lock_irq(&sdti_lock); -+ spin_lock_irqsave(&sdti_lock, flags); -+ -+ if (unlikely(!sdti_initialized)) -+ goto skip; - - sti_channel_writeb(id, channel); - while (len--) - sti_channel_writeb(*tpntr++, channel); - sti_channel_flush(channel); - -- spin_unlock_irq(&sdti_lock); -+skip: -+ spin_unlock_irqrestore(&sdti_lock, flags); - } - EXPORT_SYMBOL(sti_channel_write_trace); - -@@ -117,6 +123,10 @@ static int __init omap_sdti_init(void) - /* Enable SDTI */ - sti_writel((1 << 31) | (i & 0x3FFFFFFF), SDTI_WINCTRL); - -+ spin_lock_irq(&sdti_lock); -+ sdti_initialized = 1; -+ spin_unlock_irq(&sdti_lock); -+ - i = sti_readl(SDTI_REVISION); - snprintf(buf, sizeof(buf), "OMAP SDTI support loaded (HW v%u.%u)\n", - (i >> 4) & 0x0f, i & 0x0f); -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/card/block.c linux-omap-2.6.28-nokia1/drivers/mmc/card/block.c ---- linux-omap-2.6.28-omap1/drivers/mmc/card/block.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/card/block.c 2011-06-22 13:19:32.793063275 +0200 -@@ -83,7 +83,14 @@ static void mmc_blk_put(struct mmc_blk_d - mutex_lock(&open_lock); - md->usage--; - if (md->usage == 0) { -+ int devmaj = MAJOR(disk_devt(md->disk)); - int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; -+ -+ if (!devmaj) -+ devidx = md->disk->first_minor >> MMC_SHIFT; -+ -+ blk_cleanup_queue(md->queue.queue); -+ - __clear_bit(devidx, dev_use); - - put_disk(md->disk); -@@ -209,18 +216,35 @@ static u32 mmc_sd_num_wr_blocks(struct m - return blocks; - } - -+static u32 get_card_status(struct mmc_card *card, struct request *req) -+{ -+ struct mmc_command cmd; -+ int err; -+ -+ memset(&cmd, 0, sizeof(struct mmc_command)); -+ cmd.opcode = MMC_SEND_STATUS; -+ if (!mmc_host_is_spi(card->host)) -+ cmd.arg = card->rca << 16; -+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; -+ err = mmc_wait_for_cmd(card->host, &cmd, 0); -+ if (err) -+ printk(KERN_ERR "%s: error %d sending status comand", -+ req->rq_disk->disk_name, err); -+ return cmd.resp[0]; -+} -+ - static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) - { - struct mmc_blk_data *md = mq->data; - struct mmc_card *card = md->queue.card; - struct mmc_blk_request brq; -- int ret = 1; -+ int ret = 1, disable_multi = 0; - - mmc_claim_host(card->host); - - do { - struct mmc_command cmd; -- u32 readcmd, writecmd; -+ u32 readcmd, writecmd, status = 0; - - memset(&brq, 0, sizeof(struct mmc_blk_request)); - brq.mrq.cmd = &brq.cmd; -@@ -236,6 +260,14 @@ static int mmc_blk_issue_rq(struct mmc_q - brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - brq.data.blocks = req->nr_sectors; - -+ /* -+ * After a read error, we redo the request one sector at a time -+ * in order to accurately determine which sectors can be read -+ * successfully. -+ */ -+ if (disable_multi && brq.data.blocks > 1) -+ brq.data.blocks = 1; -+ - if (brq.data.blocks > 1) { - /* SPI multiblock writes terminate using a special - * token, not a STOP_TRANSMISSION request. -@@ -264,10 +296,38 @@ static int mmc_blk_issue_rq(struct mmc_q - brq.data.sg = mq->sg; - brq.data.sg_len = mmc_queue_map_sg(mq); - -+ /* -+ * Adjust the sg list so it is the same size as the -+ * request. -+ */ -+ if (brq.data.blocks != req->nr_sectors) { -+ int i, data_size = brq.data.blocks << 9; -+ struct scatterlist *sg; -+ -+ for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { -+ data_size -= sg->length; -+ if (data_size <= 0) { -+ sg->length += data_size; -+ i++; -+ break; -+ } -+ } -+ brq.data.sg_len = i; -+ } -+ - mmc_queue_bounce_pre(mq); - - mmc_wait_for_req(card->host, &brq.mrq); - -+ /* Give up early if the card has gone away */ -+ if (brq.cmd.error == -ENODEV || brq.data.error == -ENODEV || brq.stop.error == -ENODEV) { -+ req->cmd_flags |= REQ_QUIET; -+ spin_lock_irq(&md->lock); -+ ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); -+ spin_unlock_irq(&md->lock); -+ break; -+ } -+ - mmc_queue_bounce_post(mq); - - /* -@@ -275,19 +335,40 @@ static int mmc_blk_issue_rq(struct mmc_q - * until later as we need to wait for the card to leave - * programming mode even when things go wrong. - */ -+ if (brq.cmd.error || brq.data.error || brq.stop.error) { -+ if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { -+ /* Redo read one sector at a time */ -+ printk(KERN_WARNING "%s: retrying using single " -+ "block read\n", req->rq_disk->disk_name); -+ disable_multi = 1; -+ continue; -+ } -+ status = get_card_status(card, req); -+ } -+ - if (brq.cmd.error) { -- printk(KERN_ERR "%s: error %d sending read/write command\n", -- req->rq_disk->disk_name, brq.cmd.error); -+ printk(KERN_ERR "%s: error %d sending read/write " -+ "command, response %#x, card status %#x\n", -+ req->rq_disk->disk_name, brq.cmd.error, -+ brq.cmd.resp[0], status); - } - - if (brq.data.error) { -- printk(KERN_ERR "%s: error %d transferring data\n", -- req->rq_disk->disk_name, brq.data.error); -+ if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) -+ /* 'Stop' response contains card status */ -+ status = brq.mrq.stop->resp[0]; -+ printk(KERN_ERR "%s: error %d transferring data," -+ " sector %u, nr %u, card status %#x\n", -+ req->rq_disk->disk_name, brq.data.error, -+ (unsigned)req->sector, -+ (unsigned)req->nr_sectors, status); - } - - if (brq.stop.error) { -- printk(KERN_ERR "%s: error %d sending stop command\n", -- req->rq_disk->disk_name, brq.stop.error); -+ printk(KERN_ERR "%s: error %d sending stop command, " -+ "response %#x, card status %#x\n", -+ req->rq_disk->disk_name, brq.stop.error, -+ brq.stop.resp[0], status); - } - - if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { -@@ -298,6 +379,11 @@ static int mmc_blk_issue_rq(struct mmc_q - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - err = mmc_wait_for_cmd(card->host, &cmd, 5); -+ if (err == -ENODEV) { -+ /* Card was removed so quiet errors */ -+ req->cmd_flags |= REQ_QUIET; -+ goto cmd_err; -+ } - if (err) { - printk(KERN_ERR "%s: error %d requesting status\n", - req->rq_disk->disk_name, err); -@@ -320,8 +406,20 @@ static int mmc_blk_issue_rq(struct mmc_q - #endif - } - -- if (brq.cmd.error || brq.data.error || brq.stop.error) -+ if (brq.cmd.error || brq.stop.error || brq.data.error) { -+ if (rq_data_dir(req) == READ) { -+ /* -+ * After an error, we redo I/O one sector at a -+ * time, so we only reach here after trying to -+ * read a single sector. -+ */ -+ spin_lock_irq(&md->lock); -+ ret = __blk_end_request(req, -EIO, brq.data.blksz); -+ spin_unlock_irq(&md->lock); -+ continue; -+ } - goto cmd_err; -+ } - - /* - * A block was successfully transferred. -@@ -343,25 +441,20 @@ static int mmc_blk_issue_rq(struct mmc_q - * If the card is not SD, we can still ok written sectors - * as reported by the controller (which might be less than - * the real number of written sectors, but never more). -- * -- * For reads we just fail the entire chunk as that should -- * be safe in all cases. - */ -- if (rq_data_dir(req) != READ) { -- if (mmc_card_sd(card)) { -- u32 blocks; -+ if (mmc_card_sd(card)) { -+ u32 blocks; - -- blocks = mmc_sd_num_wr_blocks(card); -- if (blocks != (u32)-1) { -- spin_lock_irq(&md->lock); -- ret = __blk_end_request(req, 0, blocks << 9); -- spin_unlock_irq(&md->lock); -- } -- } else { -+ blocks = mmc_sd_num_wr_blocks(card); -+ if (blocks != (u32)-1) { - spin_lock_irq(&md->lock); -- ret = __blk_end_request(req, 0, brq.data.bytes_xfered); -+ ret = __blk_end_request(req, 0, blocks << 9); - spin_unlock_irq(&md->lock); - } -+ } else { -+ spin_lock_irq(&md->lock); -+ ret = __blk_end_request(req, 0, brq.data.bytes_xfered); -+ spin_unlock_irq(&md->lock); - } - - mmc_release_host(card->host); -@@ -511,10 +604,13 @@ static int mmc_blk_probe(struct mmc_card - return PTR_ERR(md); - - err = mmc_blk_set_blksize(md, card); -- if (err) -- goto out; -+ if (err) { -+ mmc_cleanup_queue(&md->queue); -+ mmc_blk_put(md); -+ return err; -+ } - -- string_get_size(get_capacity(md->disk) << 9, STRING_UNITS_2, -+ string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, - cap_str, sizeof(cap_str)); - printk(KERN_INFO "%s: %s %s %s %s\n", - md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), -@@ -523,11 +619,6 @@ static int mmc_blk_probe(struct mmc_card - mmc_set_drvdata(card, md); - add_disk(md->disk); - return 0; -- -- out: -- mmc_blk_put(md); -- -- return err; - } - - static void mmc_blk_remove(struct mmc_card *card) -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/card/queue.c linux-omap-2.6.28-nokia1/drivers/mmc/card/queue.c ---- linux-omap-2.6.28-omap1/drivers/mmc/card/queue.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/card/queue.c 2011-06-22 13:19:32.793063275 +0200 -@@ -91,9 +91,9 @@ static void mmc_request(struct request_q - int ret; - - if (!mq) { -- printk(KERN_ERR "MMC: killing requests for dead queue\n"); - while ((req = elv_next_request(q)) != NULL) { - do { -+ req->cmd_flags |= REQ_QUIET; - ret = __blk_end_request(req, -EIO, - blk_rq_cur_bytes(req)); - } while (ret); -@@ -228,17 +228,18 @@ void mmc_cleanup_queue(struct mmc_queue - struct request_queue *q = mq->queue; - unsigned long flags; - -- /* Mark that we should start throwing out stragglers */ -- spin_lock_irqsave(q->queue_lock, flags); -- q->queuedata = NULL; -- spin_unlock_irqrestore(q->queue_lock, flags); -- - /* Make sure the queue isn't suspended, as that will deadlock */ - mmc_queue_resume(mq); - - /* Then terminate our worker thread */ - kthread_stop(mq->thread); - -+ /* Empty the queue */ -+ spin_lock_irqsave(q->queue_lock, flags); -+ q->queuedata = NULL; -+ blk_start_queue(q); -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ - if (mq->bounce_sg) - kfree(mq->bounce_sg); - mq->bounce_sg = NULL; -@@ -250,8 +251,6 @@ void mmc_cleanup_queue(struct mmc_queue - kfree(mq->bounce_buf); - mq->bounce_buf = NULL; - -- blk_cleanup_queue(mq->queue); -- - mq->card = NULL; - } - EXPORT_SYMBOL(mmc_cleanup_queue); -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/core.c linux-omap-2.6.28-nokia1/drivers/mmc/core/core.c ---- linux-omap-2.6.28-omap1/drivers/mmc/core/core.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/core.c 2011-06-22 13:19:32.803063275 +0200 -@@ -327,6 +327,98 @@ unsigned int mmc_align_data_size(struct - EXPORT_SYMBOL(mmc_align_data_size); - - /** -+ * mmc_host_enable - enable a host. -+ * @host: mmc host to enable -+ * -+ * Hosts that support power saving can use the 'enable' and 'disable' -+ * methods to exit and enter power saving states. For more information -+ * see comments for struct mmc_host_ops. -+ */ -+int mmc_host_enable(struct mmc_host *host) -+{ -+ if (!(host->caps & MMC_CAP_DISABLE)) -+ return 0; -+ -+ if (host->en_dis_recurs) -+ return 0; -+ -+ if (host->nesting_cnt++) -+ return 0; -+ -+ cancel_delayed_work_sync(&host->disable); -+ -+ if (host->enabled) -+ return 0; -+ -+ if (host->ops->enable) { -+ int err; -+ -+ host->en_dis_recurs = 1; -+ err = host->ops->enable(host); -+ host->en_dis_recurs = 0; -+ -+ if (err) { -+ pr_debug("%s: enable error %d\n", -+ mmc_hostname(host), err); -+ return err; -+ } -+ } -+ host->enabled = 1; -+ return 0; -+} -+EXPORT_SYMBOL(mmc_host_enable); -+ -+static int mmc_host_do_disable(struct mmc_host *host, int lazy) -+{ -+ if (host->ops->disable) { -+ int err; -+ -+ host->en_dis_recurs = 1; -+ err = host->ops->disable(host, lazy); -+ host->en_dis_recurs = 0; -+ -+ if (err < 0) { -+ pr_debug("%s: disable error %d\n", -+ mmc_hostname(host), err); -+ return err; -+ } -+ if (err > 0) -+ mmc_schedule_delayed_work(&host->disable, err); -+ } -+ host->enabled = 0; -+ return 0; -+} -+ -+/** -+ * mmc_host_disable - disable a host. -+ * @host: mmc host to disable -+ * -+ * Hosts that support power saving can use the 'enable' and 'disable' -+ * methods to exit and enter power saving states. For more information -+ * see comments for struct mmc_host_ops. -+ */ -+int mmc_host_disable(struct mmc_host *host) -+{ -+ int err; -+ -+ if (!(host->caps & MMC_CAP_DISABLE)) -+ return 0; -+ -+ if (host->en_dis_recurs) -+ return 0; -+ -+ if (--host->nesting_cnt) -+ return 0; -+ -+ if (!host->enabled) -+ return 0; -+ -+ err = mmc_host_do_disable(host, 0); -+ return err; -+} -+EXPORT_SYMBOL(mmc_host_disable); -+ -+/** - * __mmc_claim_host - exclusively claim a host - * @host: mmc host to claim - * @abort: whether or not the operation should be aborted -@@ -349,25 +441,111 @@ int __mmc_claim_host(struct mmc_host *ho - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - stop = abort ? atomic_read(abort) : 0; -- if (stop || !host->claimed) -+ if (stop || !host->claimed || host->claimer == current) - break; - spin_unlock_irqrestore(&host->lock, flags); - schedule(); - spin_lock_irqsave(&host->lock, flags); - } - set_current_state(TASK_RUNNING); -- if (!stop) -+ if (!stop) { - host->claimed = 1; -- else -+ host->claimer = current; -+ host->claim_cnt += 1; -+ } else - wake_up(&host->wq); - spin_unlock_irqrestore(&host->lock, flags); - remove_wait_queue(&host->wq, &wait); -+ if (!stop) -+ mmc_host_enable(host); - return stop; - } - - EXPORT_SYMBOL(__mmc_claim_host); - - /** -+ * mmc_try_claim_host - try exclusively to claim a host -+ * @host: mmc host to claim -+ * -+ * Returns %1 if the host is claimed, %0 otherwise. -+ */ -+int mmc_try_claim_host(struct mmc_host *host) -+{ -+ int claimed_host = 0; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&host->lock, flags); -+ if (!host->claimed || host->claimer == current) { -+ host->claimed = 1; -+ host->claimer = current; -+ host->claim_cnt += 1; -+ claimed_host = 1; -+ } -+ spin_unlock_irqrestore(&host->lock, flags); -+ return claimed_host; -+} -+EXPORT_SYMBOL(mmc_try_claim_host); -+ -+static void mmc_do_release_host(struct mmc_host *host) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&host->lock, flags); -+ if (--host->claim_cnt) { -+ /* Release for nested claim */ -+ spin_unlock_irqrestore(&host->lock, flags); -+ } else { -+ host->claimed = 0; -+ host->claimer = NULL; -+ spin_unlock_irqrestore(&host->lock, flags); -+ wake_up(&host->wq); -+ } -+} -+ -+void mmc_host_deeper_disable(struct work_struct *work) -+{ -+ struct mmc_host *host = -+ container_of(work, struct mmc_host, disable.work); -+ -+ /* If the host is claimed then we do not want to disable it anymore */ -+ if (!mmc_try_claim_host(host)) -+ return; -+ mmc_host_do_disable(host, 1); -+ mmc_do_release_host(host); -+} -+ -+/** -+ * mmc_host_lazy_disable - lazily disable a host. -+ * @host: mmc host to disable -+ * -+ * Hosts that support power saving can use the 'enable' and 'disable' -+ * methods to exit and enter power saving states. For more information -+ * see comments for struct mmc_host_ops. -+ */ -+int mmc_host_lazy_disable(struct mmc_host *host) -+{ -+ if (!(host->caps & MMC_CAP_DISABLE)) -+ return 0; -+ -+ if (host->en_dis_recurs) -+ return 0; -+ -+ if (--host->nesting_cnt) -+ return 0; -+ -+ if (!host->enabled) -+ return 0; -+ -+ if (host->disable_delay) { -+ mmc_schedule_delayed_work(&host->disable, -+ msecs_to_jiffies(host->disable_delay)); -+ return 0; -+ } else -+ return mmc_host_do_disable(host, 1); -+} -+EXPORT_SYMBOL(mmc_host_lazy_disable); -+ -+/** - * mmc_release_host - release a host - * @host: mmc host to release - * -@@ -376,15 +554,11 @@ EXPORT_SYMBOL(__mmc_claim_host); - */ - void mmc_release_host(struct mmc_host *host) - { -- unsigned long flags; -- - WARN_ON(!host->claimed); - -- spin_lock_irqsave(&host->lock, flags); -- host->claimed = 0; -- spin_unlock_irqrestore(&host->lock, flags); -+ mmc_host_lazy_disable(host); - -- wake_up(&host->wq); -+ mmc_do_release_host(host); - } - - EXPORT_SYMBOL(mmc_release_host); -@@ -663,6 +837,22 @@ void mmc_rescan(struct work_struct *work - - mmc_bus_get(host); - -+ /* if there is a card registered */ -+ if (host->bus_ops != NULL) { -+ -+ if (host->bus_ops->detect && !host->bus_dead) { -+ -+ /* check whether the card is still present */ -+ host->bus_ops->detect(host); -+ -+ /* release the bus and update bus status in case -+ the card was removed */ -+ mmc_bus_put(host); -+ mmc_bus_get(host); -+ } -+ } -+ -+ /* if there is no card registered */ - if (host->bus_ops == NULL) { - /* - * Only we can add a new handler, so it's safe to -@@ -678,7 +868,11 @@ void mmc_rescan(struct work_struct *work - mmc_power_up(host); - mmc_go_idle(host); - -- mmc_send_if_cond(host, host->ocr_avail); -+ if (!(host->caps & MMC_CAP_NOT_SDIO) || !(host->caps & MMC_CAP_NOT_SD)) -+ mmc_send_if_cond(host, host->ocr_avail); -+ -+ if (host->caps & MMC_CAP_NOT_SDIO) -+ goto not_sdio; - - /* - * First we search for SDIO... -@@ -690,6 +884,9 @@ void mmc_rescan(struct work_struct *work - goto out; - } - -+not_sdio: if (host->caps & MMC_CAP_NOT_SD) -+ goto not_sd; -+ - /* - * ...then normal SD... - */ -@@ -700,6 +897,9 @@ void mmc_rescan(struct work_struct *work - goto out; - } - -+not_sd: if (host->caps & MMC_CAP_NOT_MMC) -+ goto not_mmc; -+ - /* - * ...and finally MMC. - */ -@@ -709,15 +909,11 @@ void mmc_rescan(struct work_struct *work - mmc_power_off(host); - goto out; - } -- -+not_mmc: - mmc_release_host(host); - mmc_power_off(host); -- } else { -- if (host->bus_ops->detect && !host->bus_dead) -- host->bus_ops->detect(host); -- -+ } else - mmc_bus_put(host); -- } - out: - if (host->caps & MMC_CAP_NEEDS_POLL) - mmc_schedule_delayed_work(&host->detect, HZ); -@@ -738,6 +934,9 @@ void mmc_stop_host(struct mmc_host *host - spin_unlock_irqrestore(&host->lock, flags); - #endif - -+ if (host->caps & MMC_CAP_DISABLE) -+ cancel_delayed_work(&host->disable); -+ cancel_delayed_work(&host->detect); - mmc_flush_scheduled_work(); - - mmc_bus_get(host); -@@ -748,6 +947,8 @@ void mmc_stop_host(struct mmc_host *host - mmc_claim_host(host); - mmc_detach_bus(host); - mmc_release_host(host); -+ mmc_bus_put(host); -+ return; - } - mmc_bus_put(host); - -@@ -756,6 +957,80 @@ void mmc_stop_host(struct mmc_host *host - mmc_power_off(host); - } - -+void mmc_power_save_host(struct mmc_host *host) -+{ -+ mmc_bus_get(host); -+ -+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { -+ mmc_bus_put(host); -+ return; -+ } -+ -+ if (host->bus_ops->power_save) -+ host->bus_ops->power_save(host); -+ -+ mmc_bus_put(host); -+ -+ mmc_power_off(host); -+} -+EXPORT_SYMBOL(mmc_power_save_host); -+ -+void mmc_power_restore_host(struct mmc_host *host) -+{ -+ mmc_bus_get(host); -+ -+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { -+ mmc_bus_put(host); -+ return; -+ } -+ -+ mmc_power_up(host); -+ host->bus_ops->power_restore(host); -+ -+ mmc_bus_put(host); -+} -+EXPORT_SYMBOL(mmc_power_restore_host); -+ -+int mmc_card_awake(struct mmc_host *host) -+{ -+ int err = -ENOSYS; -+ -+ mmc_bus_get(host); -+ -+ if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) -+ err = host->bus_ops->awake(host); -+ -+ mmc_bus_put(host); -+ -+ return err; -+} -+EXPORT_SYMBOL(mmc_card_awake); -+ -+int mmc_card_sleep(struct mmc_host *host) -+{ -+ int err = -ENOSYS; -+ -+ mmc_bus_get(host); -+ -+ if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) -+ err = host->bus_ops->sleep(host); -+ -+ mmc_bus_put(host); -+ -+ return err; -+} -+EXPORT_SYMBOL(mmc_card_sleep); -+ -+int mmc_card_can_sleep(struct mmc_host *host) -+{ -+ struct mmc_card *card = host->card; -+ -+ if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) -+ return 1; -+ return 0; -+} -+EXPORT_SYMBOL(mmc_card_can_sleep); -+ - #ifdef CONFIG_PM - - /** -@@ -765,6 +1040,9 @@ void mmc_stop_host(struct mmc_host *host - */ - int mmc_suspend_host(struct mmc_host *host, pm_message_t state) - { -+ if (host->caps & MMC_CAP_DISABLE) -+ cancel_delayed_work(&host->disable); -+ cancel_delayed_work(&host->detect); - mmc_flush_scheduled_work(); - - mmc_bus_get(host); -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/core.h linux-omap-2.6.28-nokia1/drivers/mmc/core/core.h ---- linux-omap-2.6.28-omap1/drivers/mmc/core/core.h 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/core.h 2011-06-22 13:19:32.803063275 +0200 -@@ -16,10 +16,14 @@ - #define MMC_CMD_RETRIES 3 - - struct mmc_bus_ops { -+ int (*awake)(struct mmc_host *); -+ int (*sleep)(struct mmc_host *); - void (*remove)(struct mmc_host *); - void (*detect)(struct mmc_host *); - void (*suspend)(struct mmc_host *); - void (*resume)(struct mmc_host *); -+ void (*power_save)(struct mmc_host *); -+ void (*power_restore)(struct mmc_host *); - }; - - void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/debugfs.c linux-omap-2.6.28-nokia1/drivers/mmc/core/debugfs.c ---- linux-omap-2.6.28-omap1/drivers/mmc/core/debugfs.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/debugfs.c 2011-06-22 13:19:32.803063275 +0200 -@@ -184,6 +184,68 @@ static int mmc_dbg_card_status_get(void - DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, - NULL, "%08llx\n"); - -+#define EXT_CSD_STR_LEN 1025 -+ -+static int mmc_ext_csd_open(struct inode *inode, struct file *filp) -+{ -+ struct mmc_card *card = inode->i_private; -+ char *buf; -+ ssize_t n = 0; -+ u8 *ext_csd; -+ int err, i; -+ -+ buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); -+ if (!buf) -+ return -ENOMEM; -+ -+ ext_csd = kmalloc(512, GFP_KERNEL); -+ if (!ext_csd) { -+ err = -ENOMEM; -+ goto out_free; -+ } -+ -+ mmc_claim_host(card->host); -+ err = mmc_send_ext_csd(card, ext_csd); -+ mmc_release_host(card->host); -+ if (err) -+ goto out_free; -+ -+ for (i = 511; i >= 0; i--) -+ n += sprintf(buf + n, "%02x", ext_csd[i]); -+ n += sprintf(buf + n, "\n"); -+ BUG_ON(n != EXT_CSD_STR_LEN); -+ -+ filp->private_data = buf; -+ kfree(ext_csd); -+ return 0; -+ -+out_free: -+ kfree(buf); -+ kfree(ext_csd); -+ return err; -+} -+ -+static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ char *buf = filp->private_data; -+ -+ return simple_read_from_buffer(ubuf, cnt, ppos, -+ buf, EXT_CSD_STR_LEN); -+} -+ -+static int mmc_ext_csd_release(struct inode *inode, struct file *file) -+{ -+ kfree(file->private_data); -+ return 0; -+} -+ -+static struct file_operations mmc_dbg_ext_csd_fops = { -+ .open = mmc_ext_csd_open, -+ .read = mmc_ext_csd_read, -+ .release = mmc_ext_csd_release, -+}; -+ - void mmc_add_card_debugfs(struct mmc_card *card) - { - struct mmc_host *host = card->host; -@@ -211,6 +273,11 @@ void mmc_add_card_debugfs(struct mmc_car - &mmc_dbg_card_status_fops)) - goto err; - -+ if (mmc_card_mmc(card)) -+ if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, -+ &mmc_dbg_ext_csd_fops)) -+ goto err; -+ - return; - - err: -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/host.c linux-omap-2.6.28-nokia1/drivers/mmc/core/host.c ---- linux-omap-2.6.28-omap1/drivers/mmc/core/host.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/host.c 2011-06-22 13:19:32.803063275 +0200 -@@ -83,6 +83,7 @@ struct mmc_host *mmc_alloc_host(int extr - spin_lock_init(&host->lock); - init_waitqueue_head(&host->wq); - INIT_DELAYED_WORK(&host->detect, mmc_rescan); -+ INIT_DELAYED_WORK(&host->disable, mmc_host_deeper_disable); - - /* - * By default, hosts do not support SGIO or large requests. -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/host.h linux-omap-2.6.28-nokia1/drivers/mmc/core/host.h ---- linux-omap-2.6.28-omap1/drivers/mmc/core/host.h 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/host.h 2011-06-22 13:19:32.803063275 +0200 -@@ -14,5 +14,7 @@ - int mmc_register_host_class(void); - void mmc_unregister_host_class(void); - -+void mmc_host_deeper_disable(struct work_struct *work); -+ - #endif - -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/mmc.c linux-omap-2.6.28-nokia1/drivers/mmc/core/mmc.c ---- linux-omap-2.6.28-omap1/drivers/mmc/core/mmc.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/mmc.c 2011-06-22 13:19:32.803063275 +0200 -@@ -160,7 +160,6 @@ static int mmc_read_ext_csd(struct mmc_c - { - int err; - u8 *ext_csd; -- unsigned int ext_csd_struct; - - BUG_ON(!card); - -@@ -207,16 +206,16 @@ static int mmc_read_ext_csd(struct mmc_c - goto out; - } - -- ext_csd_struct = ext_csd[EXT_CSD_REV]; -- if (ext_csd_struct > 2) { -+ card->ext_csd.rev = ext_csd[EXT_CSD_REV]; -+ if (card->ext_csd.rev > 3) { - printk(KERN_ERR "%s: unrecognised EXT_CSD structure " - "version %d\n", mmc_hostname(card->host), -- ext_csd_struct); -+ card->ext_csd.rev); - err = -EINVAL; - goto out; - } - -- if (ext_csd_struct >= 2) { -+ if (card->ext_csd.rev >= 2) { - card->ext_csd.sectors = - ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | - ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | -@@ -241,6 +240,15 @@ static int mmc_read_ext_csd(struct mmc_c - goto out; - } - -+ if (card->ext_csd.rev >= 3) { -+ u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; -+ -+ /* Sleep / awake timeout in 100ns units */ -+ if (sa_shift > 0 && sa_shift <= 0x17) -+ card->ext_csd.sa_timeout = -+ 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; -+ } -+ - out: - kfree(ext_csd); - -@@ -408,12 +416,17 @@ static int mmc_init_card(struct mmc_host - (host->caps & MMC_CAP_MMC_HIGHSPEED)) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, 1); -- if (err) -+ if (err && err != -EBADMSG) - goto free_card; - -- mmc_card_set_highspeed(card); -- -- mmc_set_timing(card->host, MMC_TIMING_MMC_HS); -+ if (err) { -+ printk(KERN_WARNING "%s: switch to highspeed failed\n", -+ mmc_hostname(card->host)); -+ err = 0; -+ } else { -+ mmc_card_set_highspeed(card); -+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS); -+ } - } - - /* -@@ -434,13 +447,31 @@ static int mmc_init_card(struct mmc_host - * Activate wide bus (if supported). - */ - if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && -- (host->caps & MMC_CAP_4_BIT_DATA)) { -+ (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { -+ unsigned ext_csd_bit, bus_width; -+ -+ if (host->caps & MMC_CAP_8_BIT_DATA) { -+ ext_csd_bit = EXT_CSD_BUS_WIDTH_8; -+ bus_width = MMC_BUS_WIDTH_8; -+ } else { -+ ext_csd_bit = EXT_CSD_BUS_WIDTH_4; -+ bus_width = MMC_BUS_WIDTH_4; -+ } -+ - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, -- EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_4); -- if (err) -+ EXT_CSD_BUS_WIDTH, ext_csd_bit); -+ -+ if (err && err != -EBADMSG) - goto free_card; - -- mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); -+ if (err) { -+ printk(KERN_WARNING "%s: switch to bus width %d " -+ "failed\n", mmc_hostname(card->host), -+ 1 << bus_width); -+ err = 0; -+ } else { -+ mmc_set_bus_width(card->host, bus_width); -+ } - } - - if (!oldcard) -@@ -496,8 +527,6 @@ static void mmc_detect(struct mmc_host * - } - } - --#ifdef CONFIG_MMC_UNSAFE_RESUME -- - /* - * Suspend callback from host. - */ -@@ -540,20 +569,96 @@ static void mmc_resume(struct mmc_host * - - } - --#else -+static void mmc_power_restore(struct mmc_host *host) -+{ -+ host->card->state &= ~MMC_STATE_HIGHSPEED; -+ mmc_claim_host(host); -+ mmc_init_card(host, host->ocr, host->card); -+ mmc_release_host(host); -+} - --#define mmc_suspend NULL --#define mmc_resume NULL -+static int mmc_sleep(struct mmc_host *host) -+{ -+ struct mmc_card *card = host->card; -+ int err = -ENOSYS; - --#endif -+ if (card && card->ext_csd.rev >= 3) { -+ err = mmc_card_sleepawake(host, 1); -+ if (err < 0) -+ pr_debug("%s: Error %d while putting card into sleep", -+ mmc_hostname(host), err); -+ } -+ -+ return err; -+} -+ -+static int mmc_awake(struct mmc_host *host) -+{ -+ struct mmc_card *card = host->card; -+ int err = -ENOSYS; -+ -+ if (card && card->ext_csd.rev >= 3) { -+ err = mmc_card_sleepawake(host, 0); -+ if (err < 0) -+ pr_debug("%s: Error %d while awaking sleeping card", -+ mmc_hostname(host), err); -+ } -+ -+ return err; -+} -+ -+#ifdef CONFIG_MMC_UNSAFE_RESUME - - static const struct mmc_bus_ops mmc_ops = { -+ .awake = mmc_awake, -+ .sleep = mmc_sleep, - .remove = mmc_remove, - .detect = mmc_detect, - .suspend = mmc_suspend, - .resume = mmc_resume, -+ .power_restore = mmc_power_restore, - }; - -+static void mmc_attach_bus_ops(struct mmc_host *host) -+{ -+ mmc_attach_bus(host, &mmc_ops); -+} -+ -+#else -+ -+static const struct mmc_bus_ops mmc_ops = { -+ .awake = mmc_awake, -+ .sleep = mmc_sleep, -+ .remove = mmc_remove, -+ .detect = mmc_detect, -+ .suspend = NULL, -+ .resume = NULL, -+ .power_restore = mmc_power_restore, -+}; -+ -+static const struct mmc_bus_ops mmc_ops_unsafe = { -+ .awake = mmc_awake, -+ .sleep = mmc_sleep, -+ .remove = mmc_remove, -+ .detect = mmc_detect, -+ .suspend = mmc_suspend, -+ .resume = mmc_resume, -+ .power_restore = mmc_power_restore, -+}; -+ -+static void mmc_attach_bus_ops(struct mmc_host *host) -+{ -+ const struct mmc_bus_ops *bus_ops; -+ -+ if (host->caps & MMC_CAP_NONREMOVABLE) -+ bus_ops = &mmc_ops_unsafe; -+ else -+ bus_ops = &mmc_ops; -+ mmc_attach_bus(host, bus_ops); -+} -+ -+#endif -+ - /* - * Starting point for MMC card init. - */ -@@ -564,7 +669,7 @@ int mmc_attach_mmc(struct mmc_host *host - BUG_ON(!host); - WARN_ON(!host->claimed); - -- mmc_attach_bus(host, &mmc_ops); -+ mmc_attach_bus_ops(host); - - /* - * We need to get OCR a different way for SPI. -@@ -624,4 +729,3 @@ err: - - return err; - } -- -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/mmc_ops.c linux-omap-2.6.28-nokia1/drivers/mmc/core/mmc_ops.c ---- linux-omap-2.6.28-omap1/drivers/mmc/core/mmc_ops.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/mmc_ops.c 2011-06-22 13:19:32.803063275 +0200 -@@ -57,6 +57,34 @@ int mmc_deselect_cards(struct mmc_host * - return _mmc_select_card(host, NULL); - } - -+int mmc_card_sleepawake(struct mmc_host *host, int sleep) -+{ -+ struct mmc_command cmd; -+ struct mmc_card *card = host->card; -+ int err; -+ -+ if (sleep) -+ mmc_deselect_cards(host); -+ -+ memset(&cmd, 0, sizeof(struct mmc_command)); -+ -+ cmd.opcode = MMC_SLEEP_AWAKE; -+ cmd.arg = card->rca << 16; -+ if (sleep) -+ cmd.arg |= 1 << 15; -+ -+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; -+ err = mmc_wait_for_cmd(host, &cmd, 0); -+ -+ if (err) -+ return err; -+ -+ if (!sleep) -+ err = mmc_select_card(card); -+ -+ return err; -+} -+ - int mmc_go_idle(struct mmc_host *host) - { - int err; -@@ -248,12 +276,16 @@ mmc_send_cxd_data(struct mmc_card *card, - - sg_init_one(&sg, data_buf, len); - -- /* -- * The spec states that CSR and CID accesses have a timeout -- * of 64 clock cycles. -- */ -- data.timeout_ns = 0; -- data.timeout_clks = 64; -+ if (!mmc_host_is_spi(host) && opcode == MMC_SEND_EXT_CSD) -+ mmc_set_data_timeout(&data, card); -+ else { -+ /* -+ * The spec states that CSR and CID accesses have a timeout -+ * of 64 clock cycles (8 for SPI). -+ */ -+ data.timeout_ns = 0; -+ data.timeout_clks = 64; -+ } - - mmc_wait_for_req(host, &mrq); - -@@ -351,6 +383,7 @@ int mmc_switch(struct mmc_card *card, u8 - { - int err; - struct mmc_command cmd; -+ u32 status; - - BUG_ON(!card); - BUG_ON(!card->host); -@@ -368,6 +401,21 @@ int mmc_switch(struct mmc_card *card, u8 - if (err) - return err; - -+ err = mmc_send_status(card, &status); -+ if (err) -+ return err; -+ -+ if (mmc_host_is_spi(card->host)) { -+ if (status & R1_SPI_ILLEGAL_COMMAND) -+ return -EBADMSG; -+ } else { -+ if (status & 0xFDFFA000) -+ printk(KERN_WARNING "%s: unexpected status %#x after " -+ "switch", mmc_hostname(card->host), status); -+ if (status & R1_SWITCH_ERROR) -+ return -EBADMSG; -+ } -+ - return 0; - } - -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/mmc_ops.h linux-omap-2.6.28-nokia1/drivers/mmc/core/mmc_ops.h ---- linux-omap-2.6.28-omap1/drivers/mmc/core/mmc_ops.h 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/mmc_ops.h 2011-06-22 13:19:32.803063275 +0200 -@@ -25,6 +25,7 @@ int mmc_send_status(struct mmc_card *car - int mmc_send_cid(struct mmc_host *host, u32 *cid); - int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); - int mmc_spi_set_crc(struct mmc_host *host, int use_crc); -+int mmc_card_sleepawake(struct mmc_host *host, int sleep); - - #endif - -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/core/sd.c linux-omap-2.6.28-nokia1/drivers/mmc/core/sd.c ---- linux-omap-2.6.28-omap1/drivers/mmc/core/sd.c 2011-06-22 13:14:18.793067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/core/sd.c 2011-06-22 13:19:32.803063275 +0200 -@@ -558,8 +558,6 @@ static void mmc_sd_detect(struct mmc_hos - } - } - --#ifdef CONFIG_MMC_UNSAFE_RESUME -- - /* - * Suspend callback from host. - */ -@@ -602,20 +600,60 @@ static void mmc_sd_resume(struct mmc_hos - - } - --#else -+static void mmc_sd_power_restore(struct mmc_host *host) -+{ -+ host->card->state &= ~MMC_STATE_HIGHSPEED; -+ mmc_claim_host(host); -+ mmc_sd_init_card(host, host->ocr, host->card); -+ mmc_release_host(host); -+} - --#define mmc_sd_suspend NULL --#define mmc_sd_resume NULL -+#ifdef CONFIG_MMC_UNSAFE_RESUME - --#endif -+static const struct mmc_bus_ops mmc_sd_ops = { -+ .remove = mmc_sd_remove, -+ .detect = mmc_sd_detect, -+ .suspend = mmc_sd_suspend, -+ .resume = mmc_sd_resume, -+ .power_restore = mmc_sd_power_restore, -+}; -+ -+static void mmc_sd_attach_bus_ops(struct mmc_host *host) -+{ -+ mmc_attach_bus(host, &mmc_sd_ops); -+} -+ -+#else - - static const struct mmc_bus_ops mmc_sd_ops = { - .remove = mmc_sd_remove, - .detect = mmc_sd_detect, -+ .suspend = NULL, -+ .resume = NULL, -+ .power_restore = mmc_sd_power_restore, -+}; -+ -+static const struct mmc_bus_ops mmc_sd_ops_unsafe = { -+ .remove = mmc_sd_remove, -+ .detect = mmc_sd_detect, - .suspend = mmc_sd_suspend, - .resume = mmc_sd_resume, -+ .power_restore = mmc_sd_power_restore, - }; - -+static void mmc_sd_attach_bus_ops(struct mmc_host *host) -+{ -+ const struct mmc_bus_ops *bus_ops; -+ -+ if (host->caps & MMC_CAP_NONREMOVABLE) -+ bus_ops = &mmc_sd_ops_unsafe; -+ else -+ bus_ops = &mmc_sd_ops; -+ mmc_attach_bus(host, bus_ops); -+} -+ -+#endif -+ - /* - * Starting point for SD card init. - */ -@@ -626,7 +664,7 @@ int mmc_attach_sd(struct mmc_host *host, - BUG_ON(!host); - WARN_ON(!host->claimed); - -- mmc_attach_bus(host, &mmc_sd_ops); -+ mmc_sd_attach_bus_ops(host); - - /* - * We need to get OCR a different way for SPI. -diff -Nurp linux-omap-2.6.28-omap1/drivers/mmc/host/omap_hsmmc.c linux-omap-2.6.28-nokia1/drivers/mmc/host/omap_hsmmc.c ---- linux-omap-2.6.28-omap1/drivers/mmc/host/omap_hsmmc.c 2011-06-22 13:14:18.803067739 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mmc/host/omap_hsmmc.c 2011-06-22 13:19:32.803063275 +0200 -@@ -17,6 +17,8 @@ - - #include - #include -+#include -+#include - #include - #include - #include -@@ -25,6 +27,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -35,6 +38,7 @@ - - /* OMAP HSMMC Host Controller Registers */ - #define OMAP_HSMMC_SYSCONFIG 0x0010 -+#define OMAP_HSMMC_SYSSTATUS 0x0014 - #define OMAP_HSMMC_CON 0x002C - #define OMAP_HSMMC_BLK 0x0104 - #define OMAP_HSMMC_ARG 0x0108 -@@ -75,6 +79,7 @@ - #define MSBS (1 << 5) - #define BCE (1 << 1) - #define FOUR_BIT (1 << 1) -+#define DW8 (1 << 5) - #define CC 0x1 - #define TC 0x02 - #define OD 0x1 -@@ -89,6 +94,8 @@ - #define DUAL_VOLT_OCR_BIT 7 - #define SRC (1 << 25) - #define SRD (1 << 26) -+#define SOFTRESET (1 << 1) -+#define RESETDONE (1 << 0) - - /* - * FIXME: Most likely all the data using these _DEVID defines should come -@@ -98,13 +105,15 @@ - #define OMAP_MMC1_DEVID 0 - #define OMAP_MMC2_DEVID 1 - --#define OMAP_MMC_DATADIR_NONE 0 --#define OMAP_MMC_DATADIR_READ 1 --#define OMAP_MMC_DATADIR_WRITE 2 - #define MMC_TIMEOUT_MS 20 - #define OMAP_MMC_MASTER_CLOCK 96000000 - #define DRIVER_NAME "mmci-omap-hs" - -+/* Timeouts for entering power saving states on inactivity, msec */ -+#define OMAP_MMC_DISABLED_TIMEOUT 100 -+#define OMAP_MMC_SLEEP_TIMEOUT 1000 -+#define OMAP_MMC_OFF_TIMEOUT 8000 -+ - /* - * One controller can have multiple slots, like on some omap boards using - * omap.c controller driver. Luckily this is not currently done on any known -@@ -121,10 +130,7 @@ - #define OMAP_HSMMC_WRITE(base, reg, val) \ - __raw_writel((val), (base) + OMAP_HSMMC_##reg) - --enum {OFF = 0, ON}; --#define IDLE_TIMEOUT (jiffies_to_msecs(10)) -- --struct mmc_omap_host { -+struct omap_hsmmc_host { - struct device *dev; - struct mmc_host *mmc; - struct mmc_request *mrq; -@@ -139,84 +145,209 @@ struct mmc_omap_host { - resource_size_t mapbase; - unsigned int id; - unsigned int dma_len; -- unsigned int dma_dir; -+ unsigned int dma_sg_idx; - unsigned char bus_mode; -- unsigned char datadir; -+ unsigned char power_mode; - u32 *buffer; - u32 bytesleft; - int suspended; - int irq; -- int carddetect; - int use_dma, dma_ch; -- int initstr; - int slot_id; - int dbclk_enabled; -- -- struct timer_list idle_timer; -- spinlock_t clk_lock; /* for changing enabled state */ -- unsigned int fclk_enabled:1; -+ int response_busy; -+ int context_loss; -+ int dpm_state; -+ int vdd; -+ int protect_card; -+ int reqs_blocked; - - struct omap_mmc_platform_data *pdata; - }; - --static int mmc_omap_fclk_state(struct mmc_omap_host *host, unsigned int state) -+/* -+ * Stop clock to the card -+ */ -+static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) - { -- unsigned long flags; -- int ret = 0; -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, -+ OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); -+ if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) -+ dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); -+} - -- spin_lock_irqsave(&host->clk_lock, flags); -- del_timer(&host->idle_timer); -- if (host->fclk_enabled != state) { -- if (state == ON) { -- ret = clk_enable(host->fclk); -- if (ret != 0) -- goto err_out; -+#ifdef CONFIG_PM - -- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n"); -- } else { -- clk_disable(host->fclk); -- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n"); -- } -- host->fclk_enabled = state; -+/* -+ * Restore the MMC host context, if it was lost as result of a -+ * power state change. -+ */ -+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) -+{ -+ struct mmc_ios *ios = &host->mmc->ios; -+ struct omap_mmc_platform_data *pdata = host->pdata; -+ int context_loss = 0; -+ u32 hctl, capa, con; -+ u16 dsor = 0; -+ unsigned long timeout; -+ -+ if (pdata->get_context_loss_count) { -+ context_loss = pdata->get_context_loss_count(host->dev); -+ if (context_loss < 0) -+ return 1; - } - --err_out: -- spin_unlock_irqrestore(&host->clk_lock, flags); -- return ret; -+ dev_dbg(mmc_dev(host->mmc), "context was %slost\n", -+ context_loss == host->context_loss ? "not " : ""); -+ if (host->context_loss == context_loss) -+ return 1; -+ -+ /* Wait for hardware reset */ -+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -+ while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE -+ && time_before(jiffies, timeout)) -+ ; -+ -+ /* Do software reset */ -+ OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET); -+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -+ while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE -+ && time_before(jiffies, timeout)) -+ ; -+ -+ OMAP_HSMMC_WRITE(host->base, SYSCONFIG, -+ OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); -+ -+ if (host->id == OMAP_MMC1_DEVID) { -+ if (host->power_mode != MMC_POWER_OFF && -+ (1 << ios->vdd) <= MMC_VDD_23_24) -+ hctl = SDVS18; -+ else -+ hctl = SDVS30; -+ capa = VS30 | VS18; -+ } else { -+ hctl = SDVS18; -+ capa = VS18; -+ } -+ -+ OMAP_HSMMC_WRITE(host->base, HCTL, -+ OMAP_HSMMC_READ(host->base, HCTL) | hctl); -+ -+ OMAP_HSMMC_WRITE(host->base, CAPA, -+ OMAP_HSMMC_READ(host->base, CAPA) | capa); -+ -+ OMAP_HSMMC_WRITE(host->base, HCTL, -+ OMAP_HSMMC_READ(host->base, HCTL) | SDBP); -+ -+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -+ while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP -+ && time_before(jiffies, timeout)) -+ ; -+ -+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); -+ OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); -+ OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); -+ -+ /* Do not initialize card-specific things if the power is off */ -+ if (host->power_mode == MMC_POWER_OFF) -+ goto out; -+ -+ con = OMAP_HSMMC_READ(host->base, CON); -+ switch (ios->bus_width) { -+ case MMC_BUS_WIDTH_8: -+ OMAP_HSMMC_WRITE(host->base, CON, con | DW8); -+ break; -+ case MMC_BUS_WIDTH_4: -+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); -+ OMAP_HSMMC_WRITE(host->base, HCTL, -+ OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); -+ break; -+ case MMC_BUS_WIDTH_1: -+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); -+ OMAP_HSMMC_WRITE(host->base, HCTL, -+ OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); -+ break; -+ } -+ -+ if (ios->clock) { -+ dsor = OMAP_MMC_MASTER_CLOCK / ios->clock; -+ if (dsor < 1) -+ dsor = 1; -+ -+ if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock) -+ dsor++; -+ -+ if (dsor > 250) -+ dsor = 250; -+ } -+ -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, -+ OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16)); -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, -+ OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); -+ -+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS -+ && time_before(jiffies, timeout)) -+ ; -+ -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, -+ OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); -+ -+ con = OMAP_HSMMC_READ(host->base, CON); -+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) -+ OMAP_HSMMC_WRITE(host->base, CON, con | OD); -+ else -+ OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); -+out: -+ host->context_loss = context_loss; -+ -+ dev_dbg(mmc_dev(host->mmc), "context is restored\n"); -+ return 0; - } - --static void mmc_omap_idle_timer(unsigned long data) -+/* -+ * Save the MMC host context (store the number of power state changes so far). -+ */ -+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) - { -- struct mmc_omap_host *host = (struct mmc_omap_host *) data; -+ struct omap_mmc_platform_data *pdata = host->pdata; -+ int context_loss; - -- mmc_omap_fclk_state(host, OFF); -+ if (pdata->get_context_loss_count) { -+ context_loss = pdata->get_context_loss_count(host->dev); -+ if (context_loss < 0) -+ return; -+ host->context_loss = context_loss; -+ } - } - --static void mmc_omap_fclk_lazy_disable(struct mmc_omap_host *host) -+#else -+ -+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) - { -- mod_timer(&host->idle_timer, jiffies + IDLE_TIMEOUT); -+ return 0; - } - --/* -- * Stop clock to the card -- */ --static void omap_mmc_stop_clock(struct mmc_omap_host *host) -+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) - { -- OMAP_HSMMC_WRITE(host->base, SYSCTL, -- OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); -- if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) -- dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); - } - -+#endif -+ - /* - * Send init stream sequence to card - * before sending IDLE command - */ --static void send_init_stream(struct mmc_omap_host *host) -+static void send_init_stream(struct omap_hsmmc_host *host) - { - int reg = 0; - unsigned long timeout; - -+ if (host->protect_card) -+ return; -+ - disable_irq(host->irq); - OMAP_HSMMC_WRITE(host->base, CON, - OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); -@@ -228,51 +359,53 @@ static void send_init_stream(struct mmc_ - - OMAP_HSMMC_WRITE(host->base, CON, - OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); -+ -+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); -+ OMAP_HSMMC_READ(host->base, STAT); -+ - enable_irq(host->irq); - } - - static inline --int mmc_omap_cover_is_closed(struct mmc_omap_host *host) -+int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host) - { - int r = 1; - -- if (host->pdata->slots[host->slot_id].get_cover_state) -- r = host->pdata->slots[host->slot_id].get_cover_state(host->dev, -- host->slot_id); -+ if (mmc_slot(host).get_cover_state) -+ r = mmc_slot(host).get_cover_state(host->dev, host->slot_id); - return r; - } - - static ssize_t --mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr, -+omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr, - char *buf) - { - struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); -- struct mmc_omap_host *host = mmc_priv(mmc); -+ struct omap_hsmmc_host *host = mmc_priv(mmc); - -- return sprintf(buf, "%s\n", mmc_omap_cover_is_closed(host) ? "closed" : -- "open"); -+ return sprintf(buf, "%s\n", -+ omap_hsmmc_cover_is_closed(host) ? "closed" : "open"); - } - --static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL); -+static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL); - - static ssize_t --mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr, -+omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr, - char *buf) - { - struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); -- struct mmc_omap_host *host = mmc_priv(mmc); -- struct omap_mmc_slot_data slot = host->pdata->slots[host->slot_id]; -+ struct omap_hsmmc_host *host = mmc_priv(mmc); - -- return sprintf(buf, "slot:%s\n", slot.name); -+ return sprintf(buf, "%s\n", mmc_slot(host).name); - } - --static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL); -+static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL); - - /* - * Configure the response type and send the cmd. - */ - static void --mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd, -+omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, - struct mmc_data *data) - { - int cmdreg = 0, resptype = 0, cmdtype = 0; -@@ -288,10 +421,14 @@ mmc_omap_start_command(struct mmc_omap_h - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); - -+ host->response_busy = 0; - if (cmd->flags & MMC_RSP_PRESENT) { - if (cmd->flags & MMC_RSP_136) - resptype = 1; -- else -+ else if (cmd->flags & MMC_RSP_BUSY) { -+ resptype = 3; -+ host->response_busy = 1; -+ } else - resptype = 2; - } - -@@ -316,23 +453,52 @@ mmc_omap_start_command(struct mmc_omap_h - if (host->use_dma) - cmdreg |= DMA_EN; - -+ /* -+ * In an interrupt context (i.e. STOP command), the interrupt is already -+ * enabled, otherwise it is not (i.e. new request). -+ */ -+ if (!in_interrupt()) -+ enable_irq(host->irq); -+ - OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); - OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); - } - -+static int -+omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) -+{ -+ if (data->flags & MMC_DATA_WRITE) -+ return DMA_TO_DEVICE; -+ else -+ return DMA_FROM_DEVICE; -+} -+ - /* - * Notify the transfer complete to MMC core - */ - static void --mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) -+omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) - { -+ if (!data) { -+ struct mmc_request *mrq = host->mrq; -+ -+ /* TC before CC from CMD6 - don't know why, but it happens */ -+ if (host->cmd && host->cmd->opcode == 6 && -+ host->response_busy) { -+ host->response_busy = 0; -+ return; -+ } -+ -+ host->mrq = NULL; -+ mmc_request_done(host->mmc, mrq); -+ return; -+ } -+ - host->data = NULL; - - if (host->use_dma && host->dma_ch != -1) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, -- host->dma_dir); -- -- host->datadir = OMAP_MMC_DATADIR_NONE; -+ omap_hsmmc_get_dma_dir(host, data)); - - if (!data->error) - data->bytes_xfered += data->blocks * (data->blksz); -@@ -341,18 +507,17 @@ mmc_omap_xfer_done(struct mmc_omap_host - - if (!data->stop) { - host->mrq = NULL; -- mmc_omap_fclk_lazy_disable(host); - mmc_request_done(host->mmc, data->mrq); - return; - } -- mmc_omap_start_command(host, data->stop, NULL); -+ omap_hsmmc_start_command(host, data->stop, NULL); - } - - /* - * Notify the core about command completion - */ - static void --mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) -+omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) - { - host->cmd = NULL; - -@@ -368,9 +533,8 @@ mmc_omap_cmd_done(struct mmc_omap_host * - cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); - } - } -- if (host->data == NULL || cmd->error) { -+ if ((host->data == NULL && !host->response_busy) || cmd->error) { - host->mrq = NULL; -- mmc_omap_fclk_lazy_disable(host); - mmc_request_done(host->mmc, cmd->mrq); - } - } -@@ -378,29 +542,28 @@ mmc_omap_cmd_done(struct mmc_omap_host * - /* - * DMA clean up for command errors - */ --static void mmc_dma_cleanup(struct mmc_omap_host *host) -+static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) - { -- host->data->error = -ETIMEDOUT; -+ host->data->error = errno; - - if (host->use_dma && host->dma_ch != -1) { - dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, -- host->dma_dir); -+ omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - up(&host->sem); - } - host->data = NULL; -- host->datadir = OMAP_MMC_DATADIR_NONE; - } - - /* - * Readable error output - */ - #ifdef CONFIG_MMC_DEBUG --static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status) -+static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status) - { - /* --- means reserved bit without definition at documentation */ -- static const char *mmc_omap_status_bits[] = { -+ static const char *omap_hsmmc_status_bits[] = { - "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ", - "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC", - "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---", -@@ -413,9 +576,9 @@ static void mmc_omap_report_irq(struct m - len = sprintf(buf, "MMC IRQ 0x%x :", status); - buf += len; - -- for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) -+ for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++) - if (status & (1 << i)) { -- len = sprintf(buf, " %s", mmc_omap_status_bits[i]); -+ len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]); - buf += len; - } - -@@ -427,15 +590,16 @@ static void mmc_omap_report_irq(struct m - /* - * MMC controller IRQ handler - */ --static irqreturn_t mmc_omap_irq(int irq, void *dev_id) -+static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) - { -- struct mmc_omap_host *host = dev_id; -+ struct omap_hsmmc_host *host = dev_id; - struct mmc_data *data; - int end_cmd = 0, end_trans = 0, status; - -- if (host->cmd == NULL && host->data == NULL) { -+ if (host->mrq == NULL) { - OMAP_HSMMC_WRITE(host->base, STAT, - OMAP_HSMMC_READ(host->base, STAT)); -+ OMAP_HSMMC_READ(host->base, STAT); - return IRQ_HANDLED; - } - -@@ -445,7 +609,7 @@ static irqreturn_t mmc_omap_irq(int irq, - - if (status & ERR) { - #ifdef CONFIG_MMC_DEBUG -- mmc_omap_report_irq(host, status); -+ omap_hsmmc_report_irq(host, status); - #endif - if ((status & CMD_TIMEOUT) || - (status & CMD_CRC)) { -@@ -464,16 +628,30 @@ static irqreturn_t mmc_omap_irq(int irq, - } - end_cmd = 1; - } -- if (host->data) -- mmc_dma_cleanup(host); -+ if (host->data || host->response_busy) { -+ if (host->data) -+ omap_hsmmc_dma_cleanup(host, -+ -ETIMEDOUT); -+ host->response_busy = 0; -+ -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, -+ OMAP_HSMMC_READ(host->base, -+ SYSCTL) | SRD); -+ while (OMAP_HSMMC_READ(host->base, -+ SYSCTL) & SRD) ; -+ } - } -- if ((status & DATA_TIMEOUT) || -- (status & DATA_CRC)) { -- if (host->data) { -- if (status & DATA_TIMEOUT) -- mmc_dma_cleanup(host); -+ if ((status & DATA_TIMEOUT) || (status & DATA_CRC)) { -+ if (host->data || host->response_busy) { -+ int err = (status & DATA_TIMEOUT) ? -+ -ETIMEDOUT : -EILSEQ; -+ -+ if (host->data) -+ omap_hsmmc_dma_cleanup(host, err); - else -- host->data->error = -EILSEQ; -+ host->mrq->cmd->error = err; -+ host->response_busy = 0; -+ - OMAP_HSMMC_WRITE(host->base, SYSCTL, - OMAP_HSMMC_READ(host->base, - SYSCTL) | SRD); -@@ -494,11 +672,12 @@ static irqreturn_t mmc_omap_irq(int irq, - } - - OMAP_HSMMC_WRITE(host->base, STAT, status); -+ OMAP_HSMMC_READ(host->base, STAT); - -- if (end_cmd || (status & CC)) -- mmc_omap_cmd_done(host, host->cmd); -- if (end_trans || (status & TC)) -- mmc_omap_xfer_done(host, data); -+ if (end_cmd || ((status & CC) && host->cmd)) -+ omap_hsmmc_cmd_done(host, host->cmd); -+ if ((end_trans || (status & TC)) && host->mrq) -+ omap_hsmmc_xfer_done(host, data); - - return IRQ_HANDLED; - } -@@ -506,29 +685,31 @@ static irqreturn_t mmc_omap_irq(int irq, - /* - * Switch MMC operating voltage - */ --static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd) -+static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) - { - u32 reg_val = 0; - int ret; - - /* Disable the clocks */ -- mmc_omap_fclk_state(host, OFF); -+ clk_disable(host->fclk); - clk_disable(host->iclk); -- clk_disable(host->dbclk); -+ if (host->dbclk_enabled) -+ clk_disable(host->dbclk); - - /* Turn the power off */ - ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); -- if (ret != 0) -- goto err; - - /* Turn the power ON with given VDD 1.8 or 3.0v */ -- ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); -- if (ret != 0) -- goto err; -+ if (!ret) -+ ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); - -- mmc_omap_fclk_state(host, ON); - clk_enable(host->iclk); -- clk_enable(host->dbclk); -+ if (host->dbclk_enabled) -+ clk_enable(host->dbclk); -+ clk_enable(host->fclk); -+ -+ if (ret != 0) -+ goto err; - - OMAP_HSMMC_WRITE(host->base, HCTL, - OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR); -@@ -536,17 +717,16 @@ static int omap_mmc_switch_opcond(struct - /* - * If a MMC dual voltage card is detected, the set_ios fn calls - * this fn with VDD bit set for 1.8V. Upon card removal from the -- * slot, omap_mmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. -+ * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. - * - * Only MMC1 supports 3.0V. MMC2 will not function if SDVS30 is - * set in HCTL. - */ - if (host->id == OMAP_MMC1_DEVID) { -- if (((1 << vdd) == MMC_VDD_32_33) || -- ((1 << vdd) == MMC_VDD_33_34)) -- reg_val |= SDVS30; -- else if ((1 << vdd) == MMC_VDD_165_195) -+ if ((1 << vdd) == MMC_VDD_165_195) - reg_val |= SDVS18; -+ else -+ reg_val |= SDVS30; - } else - reg_val |= SDVS18; - -@@ -561,48 +741,131 @@ err: - return ret; - } - -+/* Protect the card while the cover is open */ -+static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) -+{ -+ if (!mmc_slot(host).get_cover_state) -+ return; -+ -+ host->reqs_blocked = 0; -+ if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { -+ if (host->protect_card) { -+ printk(KERN_INFO "%s: cover is closed, " -+ "card is now accessible\n", -+ mmc_hostname(host->mmc)); -+ host->protect_card = 0; -+ } -+ } else { -+ if (!host->protect_card) { -+ printk(KERN_INFO "%s: cover is open, " -+ "card is now inaccessible\n", -+ mmc_hostname(host->mmc)); -+ host->protect_card = 1; -+ } -+ } -+} -+ - /* - * Work Item to notify the core about card insertion/removal - */ --static void mmc_omap_detect(struct work_struct *work) -+static void omap_hsmmc_detect(struct work_struct *work) - { -- struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, -- mmc_carddetect_work); -+ struct omap_hsmmc_host *host = -+ container_of(work, struct omap_hsmmc_host, mmc_carddetect_work); -+ int carddetect; - -+ if (host->suspended) -+ return; - sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); -- mmc_omap_fclk_state(host, ON); -- if (host->carddetect) { -+ if (mmc_slot(host).card_detect) -+ carddetect = mmc_slot(host).card_detect(mmc_slot(host).card_detect_irq); -+ else { -+ omap_hsmmc_protect_card(host); -+ carddetect = -ENOSYS; -+ } -+ if (carddetect) { - mmc_detect_change(host->mmc, (HZ * 200) / 1000); - } else { -+ mmc_host_enable(host->mmc); - OMAP_HSMMC_WRITE(host->base, SYSCTL, - OMAP_HSMMC_READ(host->base, SYSCTL) | SRD); - while (OMAP_HSMMC_READ(host->base, SYSCTL) & SRD) - ; -+ mmc_host_lazy_disable(host->mmc); - - mmc_detect_change(host->mmc, (HZ * 50) / 1000); - } -- mmc_omap_fclk_lazy_disable(host); - } - - /* - * ISR for handling card insertion and removal - */ --static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id) -+static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id) - { -- struct mmc_omap_host *host = (struct mmc_omap_host *)dev_id; -+ struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id; - -- host->carddetect = mmc_slot(host).card_detect(irq); -+ if (host->suspended) -+ return IRQ_HANDLED; - schedule_work(&host->mmc_carddetect_work); - - return IRQ_HANDLED; - } - -+static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, -+ struct mmc_data *data) -+{ -+ int sync_dev; -+ -+ if (data->flags & MMC_DATA_WRITE) { -+ if (host->id == OMAP_MMC1_DEVID) -+ sync_dev = OMAP24XX_DMA_MMC1_TX; -+ else -+ sync_dev = OMAP24XX_DMA_MMC2_TX; -+ } else { -+ if (host->id == OMAP_MMC1_DEVID) -+ sync_dev = OMAP24XX_DMA_MMC1_RX; -+ else -+ sync_dev = OMAP24XX_DMA_MMC2_RX; -+ } -+ return sync_dev; -+} -+ -+static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, -+ struct mmc_data *data, -+ struct scatterlist *sgl) -+{ -+ int blksz, nblk, dma_ch; -+ -+ dma_ch = host->dma_ch; -+ if (data->flags & MMC_DATA_WRITE) { -+ omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, -+ (host->mapbase + OMAP_HSMMC_DATA), 0, 0); -+ omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, -+ sg_dma_address(sgl), 0, 0); -+ } else { -+ omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, -+ (host->mapbase + OMAP_HSMMC_DATA), 0, 0); -+ omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, -+ sg_dma_address(sgl), 0, 0); -+ } -+ -+ blksz = host->data->blksz; -+ nblk = sg_dma_len(sgl) / blksz; -+ -+ omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, -+ blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, -+ omap_hsmmc_get_dma_sync_dev(host, data), -+ !(data->flags & MMC_DATA_WRITE)); -+ -+ omap_start_dma(dma_ch); -+} -+ - /* - * DMA call back function - */ --static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) -+static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) - { -- struct mmc_omap_host *host = data; -+ struct omap_hsmmc_host *host = data; - - if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) - dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); -@@ -610,6 +873,14 @@ static void mmc_omap_dma_cb(int lch, u16 - if (host->dma_ch < 0) - return; - -+ host->dma_sg_idx++; -+ if (host->dma_sg_idx < host->dma_len) { -+ /* Fire up the next transfer. */ -+ omap_hsmmc_config_dma_params(host, host->data, -+ host->data->sg + host->dma_sg_idx); -+ return; -+ } -+ - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - /* -@@ -620,38 +891,28 @@ static void mmc_omap_dma_cb(int lch, u16 - } - - /* -- * Configure dma src and destination parameters -- */ --static int mmc_omap_config_dma_param(int sync_dir, struct mmc_omap_host *host, -- struct mmc_data *data) --{ -- if (sync_dir == 0) { -- omap_set_dma_dest_params(host->dma_ch, 0, -- OMAP_DMA_AMODE_CONSTANT, -- (host->mapbase + OMAP_HSMMC_DATA), 0, 0); -- omap_set_dma_src_params(host->dma_ch, 0, -- OMAP_DMA_AMODE_POST_INC, -- sg_dma_address(&data->sg[0]), 0, 0); -- } else { -- omap_set_dma_src_params(host->dma_ch, 0, -- OMAP_DMA_AMODE_CONSTANT, -- (host->mapbase + OMAP_HSMMC_DATA), 0, 0); -- omap_set_dma_dest_params(host->dma_ch, 0, -- OMAP_DMA_AMODE_POST_INC, -- sg_dma_address(&data->sg[0]), 0, 0); -- } -- return 0; --} --/* - * Routine to configure and start DMA for the MMC card - */ --static int --mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req) -+static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, -+ struct mmc_request *req) - { -- int sync_dev, sync_dir = 0; -- int dma_ch = 0, ret = 0, err = 1; -+ int dma_ch = 0, ret = 0, err = 1, i; - struct mmc_data *data = req->data; - -+ /* Sanity check: all the SG entries must be aligned by block size. */ -+ for (i = 0; i < data->sg_len; i++) { -+ struct scatterlist *sgl; -+ -+ sgl = data->sg + i; -+ if (sgl->length % data->blksz) -+ return -EINVAL; -+ } -+ if ((data->blksz % 4) != 0) -+ /* REVISIT: The MMC buffer increments only when MSB is written. -+ * Return error for blksz which is non multiple of four. -+ */ -+ return -EINVAL; -+ - /* - * If for some reason the DMA transfer is still active, - * we wait for timeout period and free the dma -@@ -670,54 +931,28 @@ mmc_omap_start_dma_transfer(struct mmc_o - return err; - } - -- if (!(data->flags & MMC_DATA_WRITE)) { -- host->dma_dir = DMA_FROM_DEVICE; -- if (host->id == OMAP_MMC1_DEVID) -- sync_dev = OMAP24XX_DMA_MMC1_RX; -- else -- sync_dev = OMAP24XX_DMA_MMC2_RX; -- } else { -- host->dma_dir = DMA_TO_DEVICE; -- if (host->id == OMAP_MMC1_DEVID) -- sync_dev = OMAP24XX_DMA_MMC1_TX; -- else -- sync_dev = OMAP24XX_DMA_MMC2_TX; -- } -- -- ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb, -- host, &dma_ch); -+ ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), -+ "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); - if (ret != 0) { -- dev_dbg(mmc_dev(host->mmc), -+ dev_err(mmc_dev(host->mmc), - "%s: omap_request_dma() failed with %d\n", - mmc_hostname(host->mmc), ret); - return ret; - } - - host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, -- data->sg_len, host->dma_dir); -+ data->sg_len, omap_hsmmc_get_dma_dir(host, data)); - host->dma_ch = dma_ch; -+ host->dma_sg_idx = 0; - -- if (!(data->flags & MMC_DATA_WRITE)) -- mmc_omap_config_dma_param(1, host, data); -- else -- mmc_omap_config_dma_param(0, host, data); -- -- if ((data->blksz % 4) == 0) -- omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, -- (data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME, -- sync_dev, sync_dir); -- else -- /* REVISIT: The MMC buffer increments only when MSB is written. -- * Return error for blksz which is non multiple of four. -- */ -- return -EINVAL; -+ omap_hsmmc_config_dma_params(host, data, data->sg); - -- omap_start_dma(dma_ch); - return 0; - } - --static void set_data_timeout(struct mmc_omap_host *host, -- struct mmc_request *req) -+static void set_data_timeout(struct omap_hsmmc_host *host, -+ unsigned int timeout_ns, -+ unsigned int timeout_clks) - { - unsigned int timeout, cycle_ns; - uint32_t reg, clkd, dto = 0; -@@ -728,8 +963,8 @@ static void set_data_timeout(struct mmc_ - clkd = 1; - - cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd); -- timeout = req->data->timeout_ns / cycle_ns; -- timeout += req->data->timeout_clks; -+ timeout = timeout_ns / cycle_ns; -+ timeout += timeout_clks; - if (timeout) { - while ((timeout & 0x80000000) == 0) { - dto += 1; -@@ -756,26 +991,28 @@ static void set_data_timeout(struct mmc_ - * Configure block length for MMC/SD cards and initiate the transfer. - */ - static int --mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) -+omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) - { - int ret; - host->data = req->data; - - if (req->data == NULL) { -- host->datadir = OMAP_MMC_DATADIR_NONE; - OMAP_HSMMC_WRITE(host->base, BLK, 0); -+ /* -+ * Set an arbitrary 100ms data timeout for commands with -+ * busy signal. -+ */ -+ if (req->cmd->flags & MMC_RSP_BUSY) -+ set_data_timeout(host, 100000000U, 0); - return 0; - } - - OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) - | (req->data->blocks << 16)); -- set_data_timeout(host, req); -- -- host->datadir = (req->data->flags & MMC_DATA_WRITE) ? -- OMAP_MMC_DATADIR_WRITE : OMAP_MMC_DATADIR_READ; -+ set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks); - - if (host->use_dma) { -- ret = mmc_omap_start_dma_transfer(host, req); -+ ret = omap_hsmmc_start_dma_transfer(host, req); - if (ret != 0) { - dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n"); - return ret; -@@ -787,52 +1024,111 @@ mmc_omap_prepare_data(struct mmc_omap_ho - /* - * Request function. for read/write operation - */ --static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req) -+static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) - { -- struct mmc_omap_host *host = mmc_priv(mmc); -+ struct omap_hsmmc_host *host = mmc_priv(mmc); -+ int err; - -+ /* -+ * Prevent races with the interrupt handler because of unexpected -+ * interrupts, but not if we are already in interrupt context i.e. -+ * retries. -+ */ -+ if (!in_interrupt()) { -+ disable_irq(host->irq); -+ /* -+ * Protect the card from I/O if there is a possibility -+ * it can be removed. -+ */ -+ if (host->protect_card) { -+ if (host->reqs_blocked < 3) { -+ u32 reg; -+ -+ /* -+ * Ensure the controller is left in a consistent state by resetting the -+ * command and data state machines. -+ */ -+ reg = OMAP_HSMMC_READ(host->base, SYSCTL); -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, reg | SRD); -+ while (OMAP_HSMMC_READ(host->base, SYSCTL) & SRD) -+ ; -+ reg = OMAP_HSMMC_READ(host->base, SYSCTL); -+ OMAP_HSMMC_WRITE(host->base, SYSCTL, reg | SRC); -+ while (OMAP_HSMMC_READ(host->base, SYSCTL) & SRC) -+ ; -+ host->reqs_blocked += 1; -+ } -+ req->cmd->error = -ENODEV; -+ if (req->data) -+ req->data->error = -ENODEV; -+ enable_irq(host->irq); -+ mmc_request_done(mmc, req); -+ return; -+ } else if (host->reqs_blocked) -+ host->reqs_blocked = 0; -+ } - WARN_ON(host->mrq != NULL); - host->mrq = req; -- mmc_omap_fclk_state(host, ON); -- mmc_omap_prepare_data(host, req); -- mmc_omap_start_command(host, req->cmd, req->data); -+ err = omap_hsmmc_prepare_data(host, req); -+ if (err) { -+ req->cmd->error = err; -+ if (req->data) -+ req->data->error = err; -+ host->mrq = NULL; -+ if (!in_interrupt()) -+ enable_irq(host->irq); -+ mmc_request_done(mmc, req); -+ return; -+ } -+ -+ omap_hsmmc_start_command(host, req->cmd, req->data); - } - - /* Routine to configure clock values. Exposed API to core */ --static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -+static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) - { -- struct mmc_omap_host *host = mmc_priv(mmc); -+ struct omap_hsmmc_host *host = mmc_priv(mmc); - u16 dsor = 0; - unsigned long regval; - unsigned long timeout; -+ u32 con; -+ int do_send_init_stream = 0; - -- mmc_omap_fclk_state(host, ON); -+ mmc_host_enable(host->mmc); - -- switch (ios->power_mode) { -- case MMC_POWER_OFF: -- mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); -- /* -- * Reset bus voltage to 3V if it got set to 1.8V earlier. -- * REVISIT: If we are able to detect cards after unplugging -- * a 1.8V card, this code should not be needed. -- */ -- if (!(OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET)) { -- int vdd = fls(host->mmc->ocr_avail) - 1; -- if (omap_mmc_switch_opcond(host, vdd) != 0) -- host->mmc->ios.vdd = vdd; -+ if (ios->power_mode != host->power_mode) { -+ switch (ios->power_mode) { -+ case MMC_POWER_OFF: -+ mmc_slot(host).set_power(host->dev, host->slot_id, -+ 0, 0); -+ host->vdd = 0; -+ break; -+ case MMC_POWER_UP: -+ mmc_slot(host).set_power(host->dev, host->slot_id, -+ 1, ios->vdd); -+ host->vdd = ios->vdd; -+ break; -+ case MMC_POWER_ON: -+ do_send_init_stream = 1; -+ break; - } -- break; -- case MMC_POWER_UP: -- mmc_slot(host).set_power(host->dev, host->slot_id, 1, ios->vdd); -- break; -+ host->power_mode = ios->power_mode; - } - -+ /* FIXME: set registers based only on changes to ios */ -+ -+ con = OMAP_HSMMC_READ(host->base, CON); - switch (mmc->ios.bus_width) { -+ case MMC_BUS_WIDTH_8: -+ OMAP_HSMMC_WRITE(host->base, CON, con | DW8); -+ break; - case MMC_BUS_WIDTH_4: -+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); - OMAP_HSMMC_WRITE(host->base, HCTL, - OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); - break; - case MMC_BUS_WIDTH_1: -+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); - OMAP_HSMMC_WRITE(host->base, HCTL, - OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); - break; -@@ -848,8 +1144,8 @@ static void omap_mmc_set_ios(struct mmc_ - * MMC_POWER_UP upon recalculating the voltage. - * vdd 1.8v. - */ -- if (omap_mmc_switch_opcond(host, ios->vdd) != 0) -- dev_dbg(mmc_dev(host->mmc), -+ if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0) -+ dev_dbg(mmc_dev(host->mmc), - "Switch operation failed\n"); - } - } -@@ -865,7 +1161,7 @@ static void omap_mmc_set_ios(struct mmc_ - if (dsor > 250) - dsor = 250; - } -- omap_mmc_stop_clock(host); -+ omap_hsmmc_stop_clock(host); - regval = OMAP_HSMMC_READ(host->base, SYSCTL); - regval = regval & ~(CLKD_MASK); - regval = regval | (dsor << 6) | (DTO << 16); -@@ -875,59 +1171,434 @@ static void omap_mmc_set_ios(struct mmc_ - - /* Wait till the ICS bit is set */ - timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -- while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != 0x2 -+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS - && time_before(jiffies, timeout)) - msleep(1); - - OMAP_HSMMC_WRITE(host->base, SYSCTL, - OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); - -- if (ios->power_mode == MMC_POWER_ON) -+ if (do_send_init_stream) - send_init_stream(host); - -+ con = OMAP_HSMMC_READ(host->base, CON); - if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) -- OMAP_HSMMC_WRITE(host->base, CON, -- OMAP_HSMMC_READ(host->base, CON) | OD); -+ OMAP_HSMMC_WRITE(host->base, CON, con | OD); -+ else -+ OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); - -- mmc_omap_fclk_lazy_disable(host); -+ if (host->power_mode == MMC_POWER_OFF) -+ mmc_host_disable(host->mmc); -+ else -+ mmc_host_lazy_disable(host->mmc); - } - - static int omap_hsmmc_get_cd(struct mmc_host *mmc) - { -- struct mmc_omap_host *host = mmc_priv(mmc); -- struct omap_mmc_platform_data *pdata = host->pdata; -+ struct omap_hsmmc_host *host = mmc_priv(mmc); - -- if (!pdata->slots[0].card_detect) -+ if (!mmc_slot(host).card_detect) - return -ENOSYS; -- return pdata->slots[0].card_detect(pdata->slots[0].card_detect_irq); -+ return mmc_slot(host).card_detect(mmc_slot(host).card_detect_irq); - } - - static int omap_hsmmc_get_ro(struct mmc_host *mmc) - { -- struct mmc_omap_host *host = mmc_priv(mmc); -- struct omap_mmc_platform_data *pdata = host->pdata; -+ struct omap_hsmmc_host *host = mmc_priv(mmc); - -- if (!pdata->slots[0].get_ro) -+ if (!mmc_slot(host).get_ro) - return -ENOSYS; -- return pdata->slots[0].get_ro(host->dev, 0); -+ return mmc_slot(host).get_ro(host->dev, 0); -+} -+ -+static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) -+{ -+ int i; -+ u32 hctl, capa; -+ -+ /* Only MMC1 supports 3.0V */ -+ if (host->id == OMAP_MMC1_DEVID) { -+ hctl = SDVS30; -+ capa = VS30 | VS18; -+ } else { -+ hctl = SDVS18; -+ capa = VS18; -+ } -+ -+ OMAP_HSMMC_WRITE(host->base, HCTL, -+ OMAP_HSMMC_READ(host->base, HCTL) | hctl); -+ -+ OMAP_HSMMC_WRITE(host->base, CAPA, -+ OMAP_HSMMC_READ(host->base, CAPA) | capa); -+ -+ /* Set the controller to AUTO IDLE mode */ -+ OMAP_HSMMC_WRITE(host->base, SYSCONFIG, -+ OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); -+ -+ /* Set SD bus power bit */ -+ OMAP_HSMMC_WRITE(host->base, HCTL, -+ OMAP_HSMMC_READ(host->base, HCTL) | SDBP); -+ -+ for (i = 0; i < 100; i++) -+ if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP) -+ break; -+} -+ -+/* -+ * Dynamic power saving handling, FSM: -+ * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF -+ * ^___________| | | -+ * |______________________|______________________| -+ * -+ * ENABLED: mmc host is fully functional -+ * DISABLED: fclk is off -+ * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep -+ * REGSLEEP: fclk is off, voltage regulator is asleep -+ * OFF: fclk is off, voltage regulator is off -+ * -+ * Transition handlers return the timeout for the next state transition -+ * or negative error. -+ */ -+ -+enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF}; -+ -+/* Handler for [ENABLED -> DISABLED] transition */ -+static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host) -+{ -+ omap_hsmmc_context_save(host); -+ -+ clk_disable(host->fclk); -+ -+ /* drop PM/DVFS constraints */ -+ if (host->pdata->set_pm_constraints) -+ host->pdata->set_pm_constraints(host->dev, 0); -+ -+ host->dpm_state = DISABLED; -+ -+ dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n"); -+ -+ if (host->power_mode == MMC_POWER_OFF) -+ return 0; -+ -+ return msecs_to_jiffies(OMAP_MMC_SLEEP_TIMEOUT); - } - --static struct mmc_host_ops mmc_omap_ops = { -- .request = omap_mmc_request, -- .set_ios = omap_mmc_set_ios, -+/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */ -+static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host) -+{ -+ int err, new_state; -+ -+ if (!mmc_try_claim_host(host->mmc)) -+ return 0; -+ -+ clk_enable(host->fclk); -+ omap_hsmmc_context_restore(host); -+ if (mmc_card_can_sleep(host->mmc)) { -+ err = mmc_card_sleep(host->mmc); -+ if (err < 0) { -+ clk_disable(host->fclk); -+ mmc_release_host(host->mmc); -+ return err; -+ } -+ new_state = CARDSLEEP; -+ } else { -+ new_state = REGSLEEP; -+ } -+ if (mmc_slot(host).set_sleep) -+ mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0, -+ new_state == CARDSLEEP); -+ /* FIXME: turn off bus power and perhaps interrupts too */ -+ clk_disable(host->fclk); -+ host->dpm_state = new_state; -+ -+ mmc_release_host(host->mmc); -+ -+ dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n", -+ host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); -+ -+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) || -+ mmc_slot(host).card_detect || -+ (mmc_slot(host).get_cover_state && -+ mmc_slot(host).get_cover_state(host->dev, host->slot_id))) -+ return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT); -+ -+ return 0; -+} -+ -+/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */ -+static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host) -+{ -+ if (!mmc_try_claim_host(host->mmc)) -+ return 0; -+ -+ if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) || -+ mmc_slot(host).card_detect || -+ (mmc_slot(host).get_cover_state && -+ mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) { -+ mmc_release_host(host->mmc); -+ return 0; -+ } -+ -+ mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); -+ host->vdd = 0; -+ host->power_mode = MMC_POWER_OFF; -+ -+ dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n", -+ host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); -+ -+ host->dpm_state = OFF; -+ -+ mmc_release_host(host->mmc); -+ -+ return 0; -+} -+ -+/* Handler for [DISABLED -> ENABLED] transition */ -+static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host) -+{ -+ int err; -+ -+ err = clk_enable(host->fclk); -+ if (err < 0) -+ return err; -+ -+ omap_hsmmc_context_restore(host); -+ -+ if (host->pdata->set_pm_constraints) -+ host->pdata->set_pm_constraints(host->dev, 1); -+ -+ host->dpm_state = ENABLED; -+ -+ dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n"); -+ -+ return 0; -+} -+ -+static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host) -+{ -+ if (!mmc_try_claim_host(host->mmc)) -+ return 0; -+ -+ clk_enable(host->fclk); -+ omap_hsmmc_context_restore(host); -+ if (mmc_slot(host).set_sleep) -+ mmc_slot(host).set_sleep(host->dev, host->slot_id, 0, -+ host->vdd, host->dpm_state == CARDSLEEP); -+ if (mmc_card_can_sleep(host->mmc)) -+ mmc_card_awake(host->mmc); -+ -+ dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n", -+ host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); -+ -+ if (host->pdata->set_pm_constraints) -+ host->pdata->set_pm_constraints(host->dev, 1); -+ -+ host->dpm_state = ENABLED; -+ -+ mmc_release_host(host->mmc); -+ -+ return 0; -+} -+ -+ -+/* Handler for [OFF -> ENABLED] transition */ -+static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host) -+{ -+ clk_enable(host->fclk); -+ -+ omap_hsmmc_context_restore(host); -+ omap_hsmmc_conf_bus_power(host); -+ mmc_power_restore_host(host->mmc); -+ -+ if (host->pdata->set_pm_constraints) -+ host->pdata->set_pm_constraints(host->dev, 1); -+ -+ host->dpm_state = ENABLED; -+ -+ dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n"); -+ -+ return 0; -+} -+ -+/* -+ * Bring MMC host to ENABLED from any other PM state. -+ */ -+static int omap_hsmmc_enable(struct mmc_host *mmc) -+{ -+ struct omap_hsmmc_host *host = mmc_priv(mmc); -+ -+ switch (host->dpm_state) { -+ case DISABLED: -+ return omap_hsmmc_disabled_to_enabled(host); -+ case CARDSLEEP: -+ case REGSLEEP: -+ return omap_hsmmc_sleep_to_enabled(host); -+ case OFF: -+ return omap_hsmmc_off_to_enabled(host); -+ default: -+ dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); -+ return -EINVAL; -+ } -+ -+} -+ -+/* -+ * Bring MMC host in PM state (one level deeper). -+ */ -+static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy) -+{ -+ struct omap_hsmmc_host *host = mmc_priv(mmc); -+ -+ switch (host->dpm_state) { -+ case ENABLED: { -+ int delay; -+ -+ delay = omap_hsmmc_enabled_to_disabled(host); -+ if (lazy || delay < 0) -+ return delay; -+ return 0; -+ } -+ case DISABLED: -+ return omap_hsmmc_disabled_to_sleep(host); -+ case CARDSLEEP: -+ case REGSLEEP: -+ return omap_hsmmc_sleep_to_off(host); -+ default: -+ dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); -+ return -EINVAL; -+ } -+} -+ -+static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) -+{ -+ struct omap_hsmmc_host *host = mmc_priv(mmc); -+ int err; -+ -+ err = clk_enable(host->fclk); -+ if (err) -+ return err; -+ dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n"); -+ omap_hsmmc_context_restore(host); -+ return 0; -+} -+ -+static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy) -+{ -+ struct omap_hsmmc_host *host = mmc_priv(mmc); -+ -+ omap_hsmmc_context_save(host); -+ clk_disable(host->fclk); -+ dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n"); -+ return 0; -+} -+ -+static const struct mmc_host_ops omap_hsmmc_ops = { -+ .enable = omap_hsmmc_enable_fclk, -+ .disable = omap_hsmmc_disable_fclk, -+ .request = omap_hsmmc_request, -+ .set_ios = omap_hsmmc_set_ios, -+ .get_cd = omap_hsmmc_get_cd, -+ .get_ro = omap_hsmmc_get_ro, -+ /* NYET -- enable_sdio_irq */ -+}; -+ -+static const struct mmc_host_ops omap_hsmmc_ps_ops = { -+ .enable = omap_hsmmc_enable, -+ .disable = omap_hsmmc_disable, -+ .request = omap_hsmmc_request, -+ .set_ios = omap_hsmmc_set_ios, - .get_cd = omap_hsmmc_get_cd, - .get_ro = omap_hsmmc_get_ro, - /* NYET -- enable_sdio_irq */ - }; - --static int __init omap_mmc_probe(struct platform_device *pdev) -+#ifdef CONFIG_DEBUG_FS -+ -+static int omap_hsmmc_regs_show(struct seq_file *s, void *data) -+{ -+ struct mmc_host *mmc = s->private; -+ struct omap_hsmmc_host *host = mmc_priv(mmc); -+ int context_loss = 0; -+ -+#ifdef CONFIG_PM -+ if (host->pdata->get_context_loss_count) -+ context_loss = host->pdata->get_context_loss_count(host->dev); -+#endif -+ -+ seq_printf(s, "mmc%d:\n" -+ " enabled:\t%d\n" -+ " dpm_state:\t%d\n" -+ " nesting_cnt:\t%d\n" -+ " ctx_loss:\t%d:%d\n" -+ "\nregs:\n", -+ mmc->index, mmc->enabled ? 1 : 0, -+ host->dpm_state, mmc->nesting_cnt, -+ host->context_loss, context_loss); -+ -+ if (host->suspended || host->dpm_state == OFF) { -+ seq_printf(s, "host suspended, can't read registers\n"); -+ return 0; -+ } -+ -+ if (clk_enable(host->fclk) != 0) { -+ seq_printf(s, "can't read the regs\n"); -+ return 0; -+ } -+ -+ seq_printf(s, "SYSCONFIG:\t0x%08x\n", -+ OMAP_HSMMC_READ(host->base, SYSCONFIG)); -+ seq_printf(s, "CON:\t\t0x%08x\n", -+ OMAP_HSMMC_READ(host->base, CON)); -+ seq_printf(s, "HCTL:\t\t0x%08x\n", -+ OMAP_HSMMC_READ(host->base, HCTL)); -+ seq_printf(s, "SYSCTL:\t\t0x%08x\n", -+ OMAP_HSMMC_READ(host->base, SYSCTL)); -+ seq_printf(s, "IE:\t\t0x%08x\n", -+ OMAP_HSMMC_READ(host->base, IE)); -+ seq_printf(s, "ISE:\t\t0x%08x\n", -+ OMAP_HSMMC_READ(host->base, ISE)); -+ seq_printf(s, "CAPA:\t\t0x%08x\n", -+ OMAP_HSMMC_READ(host->base, CAPA)); -+ -+ clk_disable(host->fclk); -+ -+ return 0; -+} -+ -+static int omap_hsmmc_regs_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, omap_hsmmc_regs_show, inode->i_private); -+} -+ -+static const struct file_operations mmc_regs_fops = { -+ .open = omap_hsmmc_regs_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static void omap_hsmmc_debugfs(struct mmc_host *mmc) -+{ -+ if (mmc->debugfs_root) -+ debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root, -+ mmc, &mmc_regs_fops); -+} -+ -+#else -+ -+static void omap_hsmmc_debugfs(struct mmc_host *mmc) -+{ -+} -+ -+#endif -+ -+static int __init omap_hsmmc_probe(struct platform_device *pdev) - { - struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; - struct mmc_host *mmc; -- struct mmc_omap_host *host = NULL; -+ struct omap_hsmmc_host *host = NULL; - struct resource *res; - int ret = 0, irq; -- u32 hctl, capa; - - if (pdata == NULL) { - dev_err(&pdev->dev, "Platform Data is missing\n"); -@@ -949,7 +1620,7 @@ static int __init omap_mmc_probe(struct - if (res == NULL) - return -EBUSY; - -- mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev); -+ mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - goto err; -@@ -967,11 +1638,16 @@ static int __init omap_mmc_probe(struct - host->slot_id = 0; - host->mapbase = res->start; - host->base = ioremap(host->mapbase, SZ_4K); -+ host->power_mode = -1; - - platform_set_drvdata(pdev, host); -- INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect); -+ INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); -+ -+ if (mmc_slot(host).power_saving) -+ mmc->ops = &omap_hsmmc_ps_ops; -+ else -+ mmc->ops = &omap_hsmmc_ops; - -- mmc->ops = &mmc_omap_ops; - mmc->f_min = 400000; - mmc->f_max = 52000000; - -@@ -991,17 +1667,20 @@ static int __init omap_mmc_probe(struct - goto err1; - } - -- spin_lock_init(&host->clk_lock); -- setup_timer(&host->idle_timer, mmc_omap_idle_timer, -- (unsigned long) host); -+ omap_hsmmc_context_save(host); -+ -+ mmc->caps |= MMC_CAP_DISABLE; -+ mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT); -+ /* we start off in DISABLED state */ -+ host->dpm_state = DISABLED; - -- if (mmc_omap_fclk_state(host, ON) != 0) { -+ if (mmc_host_enable(host->mmc) != 0) { - clk_put(host->iclk); - clk_put(host->fclk); - goto err1; - } - if (clk_enable(host->iclk) != 0) { -- mmc_omap_fclk_state(host, OFF); -+ mmc_host_disable(host->mmc); - clk_put(host->iclk); - clk_put(host->fclk); - goto err1; -@@ -1020,10 +1699,11 @@ static int __init omap_mmc_probe(struct - else - host->dbclk_enabled = 1; - --#ifdef CONFIG_MMC_BLOCK_BOUNCE -- mmc->max_phys_segs = 1; -- mmc->max_hw_segs = 1; --#endif -+ /* Since we do only SG emulation, we can have as many segs -+ * as we want. */ -+ mmc->max_phys_segs = 1024; -+ mmc->max_hw_segs = 1024; -+ - mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ - mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ - mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; -@@ -1032,34 +1712,17 @@ static int __init omap_mmc_probe(struct - mmc->ocr_avail = mmc_slot(host).ocr_mask; - mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; - -- if (pdata->slots[host->slot_id].wires >= 4) -+ if (mmc_slot(host).wires >= 8) -+ mmc->caps |= MMC_CAP_8_BIT_DATA; -+ else if (mmc_slot(host).wires >= 4) - mmc->caps |= MMC_CAP_4_BIT_DATA; - -- /* Only MMC1 supports 3.0V */ -- if (host->id == OMAP_MMC1_DEVID) { -- hctl = SDVS30; -- capa = VS30 | VS18; -- } else { -- hctl = SDVS18; -- capa = VS18; -- } -- -- OMAP_HSMMC_WRITE(host->base, HCTL, -- OMAP_HSMMC_READ(host->base, HCTL) | hctl); -+ mmc->caps |= mmc_slot(host).caps; - -- OMAP_HSMMC_WRITE(host->base, CAPA, -- OMAP_HSMMC_READ(host->base, CAPA) | capa); -- -- /* Set the controller to AUTO IDLE mode */ -- OMAP_HSMMC_WRITE(host->base, SYSCONFIG, -- OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); -- -- /* Set SD bus power bit */ -- OMAP_HSMMC_WRITE(host->base, HCTL, -- OMAP_HSMMC_READ(host->base, HCTL) | SDBP); -+ omap_hsmmc_conf_bus_power(host); - - /* Request IRQ for MMC operations */ -- ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED, -+ ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED, - mmc_hostname(mmc), host); - if (ret) { - dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); -@@ -1075,9 +1738,9 @@ static int __init omap_mmc_probe(struct - } - - /* Request IRQ for card detect */ -- if ((mmc_slot(host).card_detect_irq) && (mmc_slot(host).card_detect)) { -+ if ((mmc_slot(host).card_detect_irq)) { - ret = request_irq(mmc_slot(host).card_detect_irq, -- omap_mmc_cd_handler, -+ omap_hsmmc_cd_handler, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING - | IRQF_DISABLED, - mmc_hostname(mmc), host); -@@ -1091,21 +1754,25 @@ static int __init omap_mmc_probe(struct - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); - -+ mmc_host_lazy_disable(host->mmc); -+ -+ omap_hsmmc_protect_card(host); -+ - mmc_add_host(mmc); - -- if (host->pdata->slots[host->slot_id].name != NULL) { -+ if (mmc_slot(host).name != NULL) { - ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); - if (ret < 0) - goto err_slot_name; - } -- if (mmc_slot(host).card_detect_irq && mmc_slot(host).card_detect && -- host->pdata->slots[host->slot_id].get_cover_state) { -+ if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) { - ret = device_create_file(&mmc->class_dev, - &dev_attr_cover_switch); - if (ret < 0) - goto err_cover_switch; - } -- mmc_omap_fclk_lazy_disable(host); -+ -+ omap_hsmmc_debugfs(mmc); - - return 0; - -@@ -1118,7 +1785,7 @@ err_irq_cd: - err_irq_cd_init: - free_irq(host->irq, host); - err_irq: -- mmc_omap_fclk_state(host, OFF); -+ mmc_host_disable(host->mmc); - clk_disable(host->iclk); - clk_put(host->fclk); - clk_put(host->iclk); -@@ -1137,12 +1804,13 @@ err: - return ret; - } - --static int omap_mmc_remove(struct platform_device *pdev) -+static int omap_hsmmc_remove(struct platform_device *pdev) - { -- struct mmc_omap_host *host = platform_get_drvdata(pdev); -+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev); - struct resource *res; - - if (host) { -+ mmc_host_enable(host->mmc); - mmc_remove_host(host->mmc); - if (host->pdata->cleanup) - host->pdata->cleanup(&pdev->dev); -@@ -1150,8 +1818,7 @@ static int omap_mmc_remove(struct platfo - if (mmc_slot(host).card_detect_irq) - free_irq(mmc_slot(host).card_detect_irq, host); - flush_scheduled_work(); -- -- mmc_omap_fclk_state(host, OFF); -+ mmc_host_disable(host->mmc); - clk_disable(host->iclk); - clk_put(host->fclk); - clk_put(host->iclk); -@@ -1173,37 +1840,51 @@ static int omap_mmc_remove(struct platfo - } - - #ifdef CONFIG_PM --static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state) -+static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) - { - int ret = 0; -- struct mmc_omap_host *host = platform_get_drvdata(pdev); -+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev); - - if (host && host->suspended) - return 0; - - if (host) { -+ host->suspended = 1; -+ if (host->pdata->suspend) { -+ ret = host->pdata->suspend(&pdev->dev, -+ host->slot_id); -+ if (ret) { -+ dev_dbg(mmc_dev(host->mmc), -+ "Unable to handle MMC board" -+ " level suspend\n"); -+ host->suspended = 0; -+ return ret; -+ } -+ } -+ cancel_work_sync(&host->mmc_carddetect_work); -+ mmc_host_enable(host->mmc); - ret = mmc_suspend_host(host->mmc, state); - if (ret == 0) { -- host->suspended = 1; -- -- mmc_omap_fclk_state(host, ON); - OMAP_HSMMC_WRITE(host->base, ISE, 0); - OMAP_HSMMC_WRITE(host->base, IE, 0); - -- if (host->pdata->suspend) { -- ret = host->pdata->suspend(&pdev->dev, -- host->slot_id); -- if (ret) -- dev_dbg(mmc_dev(host->mmc), -- "Unable to handle MMC board" -- " level suspend\n"); -- } - - OMAP_HSMMC_WRITE(host->base, HCTL, -- OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); -- mmc_omap_fclk_state(host, OFF); -+ OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); -+ mmc_host_disable(host->mmc); - clk_disable(host->iclk); -- clk_disable(host->dbclk); -+ if (host->dbclk_enabled) -+ clk_disable(host->dbclk); -+ } else { -+ host->suspended = 0; -+ if (host->pdata->resume) { -+ ret = host->pdata->resume(&pdev->dev, -+ host->slot_id); -+ if (ret) -+ dev_dbg(mmc_dev(host->mmc), -+ "Unmask interrupt failed\n"); -+ } -+ mmc_host_disable(host->mmc); - } - - } -@@ -1211,36 +1892,28 @@ static int omap_mmc_suspend(struct platf - } - - /* Routine to resume the MMC device */ --static int omap_mmc_resume(struct platform_device *pdev) -+static int omap_hsmmc_resume(struct platform_device *pdev) - { - int ret = 0; -- struct mmc_omap_host *host = platform_get_drvdata(pdev); -+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev); - - if (host && !host->suspended) - return 0; - - if (host) { -- int i; -- if (mmc_omap_fclk_state(host, ON) != 0) -- goto clk_en_err; -- - ret = clk_enable(host->iclk); -- if (ret) { -- mmc_omap_fclk_state(host, OFF); -- clk_put(host->fclk); -+ if (ret) - goto clk_en_err; -- } - -- if (clk_enable(host->dbclk) != 0) -- dev_dbg(mmc_dev(host->mmc), -- "Enabling debounce clk failed\n"); -+ if (host->dbclk_enabled) -+ clk_enable(host->dbclk); - -- OMAP_HSMMC_WRITE(host->base, HCTL, -- OMAP_HSMMC_READ(host->base, HCTL) | SDBP); -+ if (mmc_host_enable(host->mmc) != 0) { -+ clk_disable(host->iclk); -+ goto clk_en_err; -+ } - -- for (i = 0; i < 100; i++) -- if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP) -- break; -+ omap_hsmmc_conf_bus_power(host); - - if (host->pdata->resume) { - ret = host->pdata->resume(&pdev->dev, host->slot_id); -@@ -1249,12 +1922,14 @@ static int omap_mmc_resume(struct platfo - "Unmask interrupt failed\n"); - } - -+ omap_hsmmc_protect_card(host); -+ - /* Notify the core to resume the host */ - ret = mmc_resume_host(host->mmc); - if (ret == 0) - host->suspended = 0; - -- mmc_omap_fclk_lazy_disable(host); -+ mmc_host_lazy_disable(host->mmc); - } - - return ret; -@@ -1266,35 +1941,35 @@ clk_en_err: - } - - #else --#define omap_mmc_suspend NULL --#define omap_mmc_resume NULL -+#define omap_hsmmc_suspend NULL -+#define omap_hsmmc_resume NULL - #endif - --static struct platform_driver omap_mmc_driver = { -- .probe = omap_mmc_probe, -- .remove = omap_mmc_remove, -- .suspend = omap_mmc_suspend, -- .resume = omap_mmc_resume, -+static struct platform_driver omap_hsmmc_driver = { -+ .probe = omap_hsmmc_probe, -+ .remove = omap_hsmmc_remove, -+ .suspend = omap_hsmmc_suspend, -+ .resume = omap_hsmmc_resume, - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - }, - }; - --static int __init omap_mmc_init(void) -+static int __init omap_hsmmc_init(void) - { - /* Register the MMC driver */ -- return platform_driver_register(&omap_mmc_driver); -+ return platform_driver_register(&omap_hsmmc_driver); - } - --static void __exit omap_mmc_cleanup(void) -+static void __exit omap_hsmmc_cleanup(void) - { - /* Unregister MMC driver */ -- platform_driver_unregister(&omap_mmc_driver); -+ platform_driver_unregister(&omap_hsmmc_driver); - } - --module_init(omap_mmc_init); --module_exit(omap_mmc_cleanup); -+module_init(omap_hsmmc_init); -+module_exit(omap_hsmmc_cleanup); - - MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); - MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/mtdoops.c linux-omap-2.6.28-nokia1/drivers/mtd/mtdoops.c ---- linux-omap-2.6.28-omap1/drivers/mtd/mtdoops.c 2011-06-22 13:14:18.833067740 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/mtdoops.c 2011-06-22 13:19:32.823063275 +0200 -@@ -44,6 +44,7 @@ static struct mtdoops_context { - int oops_pages; - int nextpage; - int nextcount; -+ char *name; - - void *oops_buf; - -@@ -273,6 +274,9 @@ static void mtdoops_notify_add(struct mt - { - struct mtdoops_context *cxt = &oops_cxt; - -+ if (cxt->name && !strcmp(mtd->name, cxt->name)) -+ cxt->mtd_index = mtd->index; -+ - if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) - return; - -@@ -328,7 +332,7 @@ static void mtdoops_console_sync(void) - cxt->ready = 0; - spin_unlock_irqrestore(&cxt->writecount_lock, flags); - -- if (mtd->panic_write && in_interrupt()) -+ if (mtd->panic_write && (in_interrupt() || panic_on_oops)) - /* Interrupt context, we're going to panic so try and log */ - mtdoops_write(cxt, 1); - else -@@ -354,8 +358,10 @@ mtdoops_console_write(struct console *co - spin_lock_irqsave(&cxt->writecount_lock, flags); - - /* Check ready status didn't change whilst waiting for the lock */ -- if (!cxt->ready) -+ if (!cxt->ready) { -+ spin_unlock_irqrestore(&cxt->writecount_lock, flags); - return; -+ } - - if (cxt->writecount == 0) { - u32 *stamp = cxt->oops_buf; -@@ -380,8 +386,12 @@ static int __init mtdoops_console_setup( - { - struct mtdoops_context *cxt = co->data; - -- if (cxt->mtd_index != -1) -+ if (cxt->mtd_index != -1 || cxt->name) - return -EBUSY; -+ if (options) { -+ cxt->name = kstrdup(options, GFP_KERNEL); -+ return 0; -+ } - if (co->index == -1) - return -EINVAL; - -@@ -409,6 +419,7 @@ static int __init mtdoops_console_init(v - - cxt->mtd_index = -1; - cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); -+ spin_lock_init(&cxt->writecount_lock); - - if (!cxt->oops_buf) { - printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); -@@ -429,6 +440,7 @@ static void __exit mtdoops_console_exit( - - unregister_mtd_user(&mtdoops_notifier); - unregister_console(&mtdoops_console); -+ kfree(cxt->name); - vfree(cxt->oops_buf); - } - -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/mtdpart.c linux-omap-2.6.28-nokia1/drivers/mtd/mtdpart.c ---- linux-omap-2.6.28-omap1/drivers/mtd/mtdpart.c 2011-06-22 13:14:18.833067740 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/mtdpart.c 2011-06-22 13:19:32.823063275 +0200 -@@ -18,6 +18,7 @@ - #include - #include - #include -+#include - - /* Our partition linked list */ - static LIST_HEAD(mtd_partitions); -@@ -54,6 +55,8 @@ static int part_read(struct mtd_info *mt - len = 0; - else if (from + len > mtd->size) - len = mtd->size - from; -+ mtd->read_cnt += 1; -+ mtd->read_sz += len; - res = part->master->read(part->master, from + part->offset, - len, retlen, buf); - if (unlikely(res)) { -@@ -73,6 +76,7 @@ static int part_point(struct mtd_info *m - len = 0; - else if (from + len > mtd->size) - len = mtd->size - from; -+ mtd->other_cnt += 1; - return part->master->point (part->master, from + part->offset, - len, retlen, virt, phys); - } -@@ -81,6 +85,7 @@ static void part_unpoint(struct mtd_info - { - struct mtd_part *part = PART(mtd); - -+ mtd->other_cnt += 1; - part->master->unpoint(part->master, from + part->offset, len); - } - -@@ -94,6 +99,7 @@ static int part_read_oob(struct mtd_info - return -EINVAL; - if (ops->datbuf && from + ops->len > mtd->size) - return -EINVAL; -+ mtd->other_cnt += 1; - res = part->master->read_oob(part->master, from + part->offset, ops); - - if (unlikely(res)) { -@@ -109,6 +115,7 @@ static int part_read_user_prot_reg(struc - size_t len, size_t *retlen, u_char *buf) - { - struct mtd_part *part = PART(mtd); -+ mtd->other_cnt += 1; - return part->master->read_user_prot_reg(part->master, from, - len, retlen, buf); - } -@@ -117,6 +124,7 @@ static int part_get_user_prot_info(struc - struct otp_info *buf, size_t len) - { - struct mtd_part *part = PART(mtd); -+ mtd->other_cnt += 1; - return part->master->get_user_prot_info(part->master, buf, len); - } - -@@ -124,6 +132,7 @@ static int part_read_fact_prot_reg(struc - size_t len, size_t *retlen, u_char *buf) - { - struct mtd_part *part = PART(mtd); -+ mtd->other_cnt += 1; - return part->master->read_fact_prot_reg(part->master, from, - len, retlen, buf); - } -@@ -132,6 +141,7 @@ static int part_get_fact_prot_info(struc - size_t len) - { - struct mtd_part *part = PART(mtd); -+ mtd->other_cnt += 1; - return part->master->get_fact_prot_info(part->master, buf, len); - } - -@@ -145,6 +155,8 @@ static int part_write(struct mtd_info *m - len = 0; - else if (to + len > mtd->size) - len = mtd->size - to; -+ mtd->write_cnt += 1; -+ mtd->write_sz += len; - return part->master->write(part->master, to + part->offset, - len, retlen, buf); - } -@@ -159,6 +171,8 @@ static int part_panic_write(struct mtd_i - len = 0; - else if (to + len > mtd->size) - len = mtd->size - to; -+ mtd->write_cnt += 1; -+ mtd->write_sz += len; - return part->master->panic_write(part->master, to + part->offset, - len, retlen, buf); - } -@@ -175,6 +189,7 @@ static int part_write_oob(struct mtd_inf - return -EINVAL; - if (ops->datbuf && to + ops->len > mtd->size) - return -EINVAL; -+ mtd->other_cnt += 1; - return part->master->write_oob(part->master, to + part->offset, ops); - } - -@@ -182,6 +197,7 @@ static int part_write_user_prot_reg(stru - size_t len, size_t *retlen, u_char *buf) - { - struct mtd_part *part = PART(mtd); -+ mtd->other_cnt += 1; - return part->master->write_user_prot_reg(part->master, from, - len, retlen, buf); - } -@@ -190,6 +206,7 @@ static int part_lock_user_prot_reg(struc - size_t len) - { - struct mtd_part *part = PART(mtd); -+ mtd->other_cnt += 1; - return part->master->lock_user_prot_reg(part->master, from, len); - } - -@@ -197,8 +214,13 @@ static int part_writev(struct mtd_info * - unsigned long count, loff_t to, size_t *retlen) - { - struct mtd_part *part = PART(mtd); -+ unsigned long i; - if (!(mtd->flags & MTD_WRITEABLE)) - return -EROFS; -+ for (i = 0; i < count; i++) { -+ mtd->write_cnt += 1; -+ mtd->write_sz += vecs[i].iov_len; -+ } - return part->master->writev(part->master, vecs, count, - to + part->offset, retlen); - } -@@ -212,6 +234,8 @@ static int part_erase(struct mtd_info *m - if (instr->addr >= mtd->size) - return -EINVAL; - instr->addr += part->offset; -+ mtd->erase_cnt += 1; -+ mtd->erase_sz += instr->len; - ret = part->master->erase(part->master, instr); - if (ret) { - if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) -@@ -240,6 +264,7 @@ static int part_lock(struct mtd_info *mt - struct mtd_part *part = PART(mtd); - if ((len + ofs) > mtd->size) - return -EINVAL; -+ mtd->other_cnt += 1; - return part->master->lock(part->master, ofs + part->offset, len); - } - -@@ -248,6 +273,7 @@ static int part_unlock(struct mtd_info * - struct mtd_part *part = PART(mtd); - if ((len + ofs) > mtd->size) - return -EINVAL; -+ mtd->other_cnt += 1; - return part->master->unlock(part->master, ofs + part->offset, len); - } - -@@ -287,6 +313,7 @@ static int part_block_markbad(struct mtd - return -EROFS; - if (ofs >= mtd->size) - return -EINVAL; -+ mtd->other_cnt += 1; - ofs += part->offset; - res = part->master->block_markbad(part->master, ofs); - if (!res) -@@ -581,3 +608,32 @@ int parse_mtd_partitions(struct mtd_info - return ret; - } - EXPORT_SYMBOL_GPL(parse_mtd_partitions); -+ -+void mtd_diskstats(struct seq_file *seqf) -+{ -+ struct mtd_part *part; -+ -+ list_for_each_entry(part, &mtd_partitions, list) { -+ struct mtd_info *mtd = &part->mtd; -+ -+ seq_printf(seqf, "%4d %7d %s %u %u %u " -+ "%u %u %u %u %u %u %u %u %u %u %u\n", -+ MTD_CHAR_MAJOR, mtd->index << 1, -+ mtd->name, -+ mtd->read_cnt, -+ 0, /* reads merged */ -+ mtd->read_sz >> 9, -+ 0, /* read time */ -+ mtd->write_cnt, -+ 0, /* writes merged */ -+ mtd->write_sz >> 9, -+ 0, /* write time */ -+ 0, /* I/Os in progress */ -+ 0, /* I/O time */ -+ 0, /* weighted I/O time */ -+ mtd->erase_cnt, -+ mtd->erase_sz, -+ mtd->other_cnt -+ ); -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/onenand/omap2.c linux-omap-2.6.28-nokia1/drivers/mtd/onenand/omap2.c ---- linux-omap-2.6.28-omap1/drivers/mtd/onenand/omap2.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/onenand/omap2.c 2011-06-22 13:19:32.873063274 +0200 -@@ -161,8 +161,14 @@ static int omap2_onenand_wait(struct mtd - if (result == 0) { - int retry_cnt = 0; - retry: -+ /* -+ * Block sleep while OneNAND is writing, to prevent PM -+ * from putting OneNAND's power regulator to sleep. -+ */ -+ omap2_block_sleep(); - result = wait_for_completion_timeout(&c->irq_done, - msecs_to_jiffies(20)); -+ omap2_allow_sleep(); - if (result == 0) { - /* Timeout after 20ms */ - ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); -@@ -294,6 +300,10 @@ static int omap3_onenand_read_bufferram( - if (bram_offset & 3 || (size_t)buf & 3 || count < 384) - goto out_copy; - -+ /* panic_write() may be in an interrupt context */ -+ if (in_interrupt() || oops_in_progress) -+ goto out_copy; -+ - if (buf >= high_memory) { - struct page *p1; - -@@ -368,7 +378,7 @@ static int omap3_onenand_write_bufferram - goto out_copy; - - /* panic_write() may be in an interrupt context */ -- if (in_interrupt()) -+ if (in_interrupt() || oops_in_progress) - goto out_copy; - - if (buf >= high_memory) { -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/onenand/onenand_base.c linux-omap-2.6.28-nokia1/drivers/mtd/onenand/onenand_base.c ---- linux-omap-2.6.28-omap1/drivers/mtd/onenand/onenand_base.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/onenand/onenand_base.c 2011-06-22 13:19:32.873063274 +0200 -@@ -1455,7 +1455,8 @@ static int onenand_write_ops_nolock(stru - struct mtd_oob_ops *ops) - { - struct onenand_chip *this = mtd->priv; -- int written = 0, column, thislen, subpage; -+ int written = 0, column, thislen = 0, subpage = 0; -+ int prev = 0, prevlen = 0, prev_subpage = 0, first = 1; - int oobwritten = 0, oobcolumn, thisooblen, oobsize; - size_t len = ops->len; - size_t ooblen = ops->ooblen; -@@ -1482,6 +1483,10 @@ static int onenand_write_ops_nolock(stru - return -EINVAL; - } - -+ /* Check zero length */ -+ if (!len) -+ return 0; -+ - if (ops->mode == MTD_OOB_AUTO) - oobsize = this->ecclayout->oobavail; - else -@@ -1492,79 +1497,121 @@ static int onenand_write_ops_nolock(stru - column = to & (mtd->writesize - 1); - - /* Loop until all data write */ -- while (written < len) { -- u_char *wbuf = (u_char *) buf; -+ while (1) { -+ if (written < len) { -+ u_char *wbuf = (u_char *) buf; -+ -+ thislen = min_t(int, mtd->writesize - column, len - written); -+ thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten); -+ -+ cond_resched(); -+ -+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen); -+ -+ /* Partial page write */ -+ subpage = thislen < mtd->writesize; -+ if (subpage) { -+ memset(this->page_buf, 0xff, mtd->writesize); -+ memcpy(this->page_buf + column, buf, thislen); -+ wbuf = this->page_buf; -+ } - -- thislen = min_t(int, mtd->writesize - column, len - written); -- thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten); -+ this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize); - -- cond_resched(); -+ if (oob) { -+ oobbuf = this->oob_buf; - -- this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen); -+ /* We send data to spare ram with oobsize -+ * to prevent byte access */ -+ memset(oobbuf, 0xff, mtd->oobsize); -+ if (ops->mode == MTD_OOB_AUTO) -+ onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); -+ else -+ memcpy(oobbuf + oobcolumn, oob, thisooblen); -+ -+ oobwritten += thisooblen; -+ oob += thisooblen; -+ oobcolumn = 0; -+ } else -+ oobbuf = (u_char *) ffchars; - -- /* Partial page write */ -- subpage = thislen < mtd->writesize; -- if (subpage) { -- memset(this->page_buf, 0xff, mtd->writesize); -- memcpy(this->page_buf + column, buf, thislen); -- wbuf = this->page_buf; -- } -- -- this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize); -+ this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); -+ } else -+ ONENAND_SET_NEXT_BUFFERRAM(this); - -- if (oob) { -- oobbuf = this->oob_buf; -+ /* -+ * 2 PLANE, MLC, and Flex-OneNAND doesn't support -+ * write-while-programe feature. -+ */ -+ if (!ONENAND_IS_2PLANE(this) && !first) { -+ ONENAND_SET_PREV_BUFFERRAM(this); -+ -+ ret = this->wait(mtd, FL_WRITING); -+ -+ /* In partial page write we don't update bufferram */ -+ onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); -+ if (ret) { -+ written -= prevlen; -+ printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); -+ break; -+ } - -- /* We send data to spare ram with oobsize -- * to prevent byte access */ -- memset(oobbuf, 0xff, mtd->oobsize); -- if (ops->mode == MTD_OOB_AUTO) -- onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); -- else -- memcpy(oobbuf + oobcolumn, oob, thisooblen); -- -- oobwritten += thisooblen; -- oob += thisooblen; -- oobcolumn = 0; -- } else -- oobbuf = (u_char *) ffchars; -+ if (written == len) { -+ /* Only check verify write turn on */ -+ ret = onenand_verify(mtd, buf - len, to - len, len); -+ if (ret) -+ printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); -+ break; -+ } - -- this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); -+ ONENAND_SET_NEXT_BUFFERRAM(this); -+ } - - this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize); - -- ret = this->wait(mtd, FL_WRITING); -- -- /* In partial page write we don't update bufferram */ -- onenand_update_bufferram(mtd, to, !ret && !subpage); -+ /* -+ * 2 PLANE, MLC, and Flex-OneNAND wait here -+ */ - if (ONENAND_IS_2PLANE(this)) { -- ONENAND_SET_BUFFERRAM1(this); -- onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage); -- } -+ ret = this->wait(mtd, FL_WRITING); - -- if (ret) { -- printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); -- break; -- } -+ /* In partial page write we don't update bufferram */ -+ onenand_update_bufferram(mtd, to, !ret && !subpage); -+ if (ret) { -+ printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); -+ break; -+ } - -- /* Only check verify write turn on */ -- ret = onenand_verify(mtd, buf, to, thislen); -- if (ret) { -- printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); -- break; -- } -+ /* Only check verify write turn on */ -+ ret = onenand_verify(mtd, buf, to, thislen); -+ if (ret) { -+ printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); -+ break; -+ } - -- written += thislen; -+ written += thislen; - -- if (written == len) -- break; -+ if (written == len) -+ break; -+ -+ } else -+ written += thislen; - - column = 0; -+ prev_subpage = subpage; -+ prev = to; -+ prevlen = thislen; - to += thislen; - buf += thislen; -+ first = 0; - } - -+ /* In error case, clear all bufferrams */ -+ if (written != len) -+ onenand_invalidate_bufferram(mtd, 0, -1); -+ - ops->retlen = written; -+ ops->oobretlen = oobwritten; - - return ret; - } -@@ -2529,6 +2576,7 @@ static void onenand_print_device_info(in - - static const struct onenand_manufacturers onenand_manuf_ids[] = { - {ONENAND_MFR_SAMSUNG, "Samsung"}, -+ {ONENAND_MFR_NUMONYX, "Numonyx"}, - }; - - /** -@@ -2574,7 +2622,7 @@ static int onenand_probe(struct mtd_info - /* Save system configuration 1 */ - syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1); - /* Clear Sync. Burst Read mode to read BootRAM */ -- this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1); -+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1); - - /* Send the command for reading device ID from BootRAM */ - this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM); -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/build.c linux-omap-2.6.28-nokia1/drivers/mtd/ubi/build.c ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/build.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/build.c 2011-06-22 13:19:32.873063274 +0200 -@@ -632,6 +632,15 @@ static int io_init(struct ubi_device *ub - } - - /* -+ * Set maximum amount of physical erroneous eraseblocks to be 10%. -+ * Erroneous PEB are those which have read errors. -+ */ -+ ubi->max_erroneous = ubi->peb_count / 10; -+ if (ubi->max_erroneous < 16) -+ ubi->max_erroneous = 16; -+ dbg_msg("max_erroneous %d", ubi->max_erroneous); -+ -+ /* - * It may happen that EC and VID headers are situated in one minimal - * I/O unit. In this case we can only accept this UBI image in - * read-only mode. -@@ -815,19 +824,20 @@ int ubi_attach_mtd_dev(struct mtd_info * - if (err) - goto out_free; - -+ err = -ENOMEM; - ubi->peb_buf1 = vmalloc(ubi->peb_size); - if (!ubi->peb_buf1) - goto out_free; - - ubi->peb_buf2 = vmalloc(ubi->peb_size); - if (!ubi->peb_buf2) -- goto out_free; -+ goto out_free; - - #ifdef CONFIG_MTD_UBI_DEBUG - mutex_init(&ubi->dbg_buf_mutex); - ubi->dbg_peb_buf = vmalloc(ubi->peb_size); - if (!ubi->dbg_peb_buf) -- goto out_free; -+ goto out_free; - #endif - - err = attach_by_scanning(ubi); -@@ -869,6 +879,7 @@ int ubi_attach_mtd_dev(struct mtd_info * - ubi_msg("number of PEBs reserved for bad PEB handling: %d", - ubi->beb_rsvd_pebs); - ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); -+ ubi_msg("image sequence number: %d", ubi->image_seq); - - if (!DBG_DISABLE_BGT) - ubi->thread_enabled = 1; -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/cdev.c linux-omap-2.6.28-nokia1/drivers/mtd/ubi/cdev.c ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/cdev.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/cdev.c 2011-06-22 13:19:32.873063274 +0200 -@@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_dev - * It seems we need to remove volume with name @re->new_name, - * if it exists. - */ -- desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); -+ desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, -+ UBI_EXCLUSIVE); - if (IS_ERR(desc)) { - err = PTR_ERR(desc); - if (err == -ENODEV) -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/debug.c linux-omap-2.6.28-nokia1/drivers/mtd/ubi/debug.c ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/debug.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/debug.c 2011-06-22 13:19:32.873063274 +0200 -@@ -44,6 +44,8 @@ void ubi_dbg_dump_ec_hdr(const struct ub - be32_to_cpu(ec_hdr->vid_hdr_offset)); - printk(KERN_DEBUG "\tdata_offset %d\n", - be32_to_cpu(ec_hdr->data_offset)); -+ printk(KERN_DEBUG "\timage_seq %d\n", -+ be32_to_cpu(ec_hdr->image_seq)); - printk(KERN_DEBUG "\thdr_crc %#08x\n", - be32_to_cpu(ec_hdr->hdr_crc)); - printk(KERN_DEBUG "erase counter header hexdump:\n"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/debug.h linux-omap-2.6.28-nokia1/drivers/mtd/ubi/debug.h ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/debug.h 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/debug.h 2011-06-22 13:19:32.873063274 +0200 -@@ -27,11 +27,11 @@ - #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) - - #define ubi_assert(expr) do { \ -- if (unlikely(!(expr))) { \ -- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ -- __func__, __LINE__, current->pid); \ -- ubi_dbg_dump_stack(); \ -- } \ -+ if (unlikely(!(expr))) { \ -+ printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ -+ __func__, __LINE__, current->pid); \ -+ ubi_dbg_dump_stack(); \ -+ } \ - } while (0) - - #define dbg_msg(fmt, ...) \ -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/eba.c linux-omap-2.6.28-nokia1/drivers/mtd/ubi/eba.c ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/eba.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/eba.c 2011-06-22 13:19:32.883063274 +0200 -@@ -419,8 +419,9 @@ retry: - * not implemented. - */ - if (err == UBI_IO_BAD_VID_HDR) { -- ubi_warn("bad VID header at PEB %d, LEB" -- "%d:%d", pnum, vol_id, lnum); -+ ubi_warn("corrupted VID header at PEB " -+ "%d, LEB %d:%d", pnum, vol_id, -+ lnum); - err = -EBADMSG; - } else - ubi_ro_mode(ubi); -@@ -504,12 +505,9 @@ static int recover_peb(struct ubi_device - if (!vid_hdr) - return -ENOMEM; - -- mutex_lock(&ubi->buf_mutex); -- - retry: - new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); - if (new_pnum < 0) { -- mutex_unlock(&ubi->buf_mutex); - ubi_free_vid_hdr(ubi, vid_hdr); - return new_pnum; - } -@@ -529,20 +527,23 @@ retry: - goto write_error; - - data_size = offset + len; -+ mutex_lock(&ubi->buf_mutex); - memset(ubi->peb_buf1 + offset, 0xFF, len); - - /* Read everything before the area where the write failure happened */ - if (offset > 0) { - err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); - if (err && err != UBI_IO_BITFLIPS) -- goto out_put; -+ goto out_unlock; - } - - memcpy(ubi->peb_buf1 + offset, buf, len); - - err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); -- if (err) -+ if (err) { -+ mutex_unlock(&ubi->buf_mutex); - goto write_error; -+ } - - mutex_unlock(&ubi->buf_mutex); - ubi_free_vid_hdr(ubi, vid_hdr); -@@ -553,8 +554,9 @@ retry: - ubi_msg("data was successfully recovered"); - return 0; - --out_put: -+out_unlock: - mutex_unlock(&ubi->buf_mutex); -+out_put: - ubi_wl_put_peb(ubi, new_pnum, 1); - ubi_free_vid_hdr(ubi, vid_hdr); - return err; -@@ -567,7 +569,6 @@ write_error: - ubi_warn("failed to write to PEB %d", new_pnum); - ubi_wl_put_peb(ubi, new_pnum, 1); - if (++tries > UBI_IO_RETRIES) { -- mutex_unlock(&ubi->buf_mutex); - ubi_free_vid_hdr(ubi, vid_hdr); - return err; - } -@@ -940,6 +941,33 @@ write_error: - } - - /** -+ * is_error_sane - check whether a read error is sane. -+ * @err: code of the error happened during reading -+ * -+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we -+ * cannot read data from the target PEB (an error @err happened). If the error -+ * code is sane, then we treat this error as non-fatal. Otherwise the error is -+ * fatal and UBI will be switched to R/O mode later. -+ * -+ * The idea is that we try not to switch to R/O mode if the read error is -+ * something which suggests there was a real read problem. E.g., %-EIO. Or a -+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O -+ * mode, simply because we do not know what happened at the MTD level, and we -+ * cannot handle this. E.g., the underlying driver may have become crazy, and -+ * it is safer to switch to R/O mode to preserve the data. -+ * -+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB -+ * which we have just written. -+ */ -+static int is_error_sane(int err) -+{ -+ if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_VID_HDR || -+ err == -ETIMEDOUT) -+ return 0; -+ return 1; -+} -+ -+/** - * ubi_eba_copy_leb - copy logical eraseblock. - * @ubi: UBI device description object - * @from: physical eraseblock number from where to copy -@@ -949,10 +977,9 @@ write_error: - * This function copies logical eraseblock from physical eraseblock @from to - * physical eraseblock @to. The @vid_hdr buffer may be changed by this - * function. Returns: -- * o %0 in case of success; -- * o %1 if the operation was canceled and should be tried later (e.g., -- * because a bit-flip was detected at the target PEB); -- * o %2 if the volume is being deleted and this LEB should not be moved. -+ * o %0 in case of success; -+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc; -+ * o a negative error code in case of failure. - */ - int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, - struct ubi_vid_hdr *vid_hdr) -@@ -964,7 +991,7 @@ int ubi_eba_copy_leb(struct ubi_device * - vol_id = be32_to_cpu(vid_hdr->vol_id); - lnum = be32_to_cpu(vid_hdr->lnum); - -- dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); -+ dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); - - if (vid_hdr->vol_type == UBI_VID_STATIC) { - data_size = be32_to_cpu(vid_hdr->data_size); -@@ -978,17 +1005,16 @@ int ubi_eba_copy_leb(struct ubi_device * - /* - * Note, we may race with volume deletion, which means that the volume - * this logical eraseblock belongs to might be being deleted. Since the -- * volume deletion unmaps all the volume's logical eraseblocks, it will -+ * volume deletion un-maps all the volume's logical eraseblocks, it will - * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. - */ - vol = ubi->volumes[idx]; -+ spin_unlock(&ubi->volumes_lock); - if (!vol) { - /* No need to do further work, cancel */ -- dbg_eba("volume %d is being removed, cancel", vol_id); -- spin_unlock(&ubi->volumes_lock); -- return 2; -+ dbg_wl("volume %d is being removed, cancel", vol_id); -+ return MOVE_CANCEL_RACE; - } -- spin_unlock(&ubi->volumes_lock); - - /* - * We do not want anybody to write to this logical eraseblock while we -@@ -1000,12 +1026,13 @@ int ubi_eba_copy_leb(struct ubi_device * - * (@from). This task locks the LEB and goes sleep in the - * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are - * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the -- * LEB is already locked, we just do not move it and return %1. -+ * LEB is already locked, we just do not move it and return -+ * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later. - */ - err = leb_write_trylock(ubi, vol_id, lnum); - if (err) { -- dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); -- return err; -+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); -+ return MOVE_CANCEL_RACE; - } - - /* -@@ -1014,25 +1041,26 @@ int ubi_eba_copy_leb(struct ubi_device * - * cancel it. - */ - if (vol->eba_tbl[lnum] != from) { -- dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " -- "PEB %d, cancel", vol_id, lnum, from, -- vol->eba_tbl[lnum]); -- err = 1; -+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to " -+ "PEB %d, cancel", vol_id, lnum, from, -+ vol->eba_tbl[lnum]); -+ err = MOVE_CANCEL_RACE; - goto out_unlock_leb; - } - - /* - * OK, now the LEB is locked and we can safely start moving it. Since -- * this function utilizes thie @ubi->peb1_buf buffer which is shared -- * with some other functions, so lock the buffer by taking the -+ * this function utilizes the @ubi->peb_buf1 buffer which is shared -+ * with some other functions - we lock the buffer by taking the - * @ubi->buf_mutex. - */ - mutex_lock(&ubi->buf_mutex); -- dbg_eba("read %d bytes of data", aldata_size); -+ dbg_wl("read %d bytes of data", aldata_size); - err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); - if (err && err != UBI_IO_BITFLIPS) { - ubi_warn("error %d while reading data from PEB %d", - err, from); -+ err = MOVE_SOURCE_RD_ERR; - goto out_unlock_buf; - } - -@@ -1055,7 +1083,7 @@ int ubi_eba_copy_leb(struct ubi_device * - cond_resched(); - - /* -- * It may turn out to me that the whole @from physical eraseblock -+ * It may turn out to be that the whole @from physical eraseblock - * contains only 0xFF bytes. Then we have to only write the VID header - * and do not write any data. This also means we should not set - * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. -@@ -1068,25 +1096,34 @@ int ubi_eba_copy_leb(struct ubi_device * - vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); - - err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); -- if (err) -+ if (err) { -+ if (err == -EIO) -+ err = MOVE_TARGET_WR_ERR; - goto out_unlock_buf; -+ } - - cond_resched(); - - /* Read the VID header back and check if it was written correctly */ - err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); - if (err) { -- if (err != UBI_IO_BITFLIPS) -- ubi_warn("cannot read VID header back from PEB %d", to); -- else -- err = 1; -+ if (err != UBI_IO_BITFLIPS) { -+ ubi_warn("error %d while reading VID header back from " -+ "PEB %d", err, to); -+ if (is_error_sane(err)) -+ err = MOVE_TARGET_RD_ERR; -+ } else -+ err = MOVE_CANCEL_BITFLIPS; - goto out_unlock_buf; - } - - if (data_size > 0) { - err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); -- if (err) -+ if (err) { -+ if (err == -EIO) -+ err = MOVE_TARGET_WR_ERR; - goto out_unlock_buf; -+ } - - cond_resched(); - -@@ -1097,19 +1134,22 @@ int ubi_eba_copy_leb(struct ubi_device * - - err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); - if (err) { -- if (err != UBI_IO_BITFLIPS) -- ubi_warn("cannot read data back from PEB %d", -- to); -- else -- err = 1; -+ if (err != UBI_IO_BITFLIPS) { -+ ubi_warn("error %d while reading data back " -+ "from PEB %d", err, to); -+ if (is_error_sane(err)) -+ err = MOVE_TARGET_RD_ERR; -+ } else -+ err = MOVE_CANCEL_BITFLIPS; - goto out_unlock_buf; - } - - cond_resched(); - - if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { -- ubi_warn("read data back from PEB %d - it is different", -- to); -+ ubi_warn("read data back from PEB %d and it is " -+ "different", to); -+ err = -EINVAL; - goto out_unlock_buf; - } - } -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/io.c linux-omap-2.6.28-nokia1/drivers/mtd/ubi/io.c ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/io.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/io.c 2011-06-22 13:19:32.883063274 +0200 -@@ -566,15 +566,16 @@ int ubi_io_mark_bad(const struct ubi_dev - * This function returns zero if the erase counter header is OK, and %1 if - * not. - */ --static int validate_ec_hdr(const struct ubi_device *ubi, -+static int validate_ec_hdr(struct ubi_device *ubi, - const struct ubi_ec_hdr *ec_hdr) - { - long long ec; -- int vid_hdr_offset, leb_start; -+ int vid_hdr_offset, leb_start, image_seq; - - ec = be64_to_cpu(ec_hdr->ec); - vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset); - leb_start = be32_to_cpu(ec_hdr->data_offset); -+ image_seq = be32_to_cpu(ec_hdr->image_seq); - - if (ec_hdr->version != UBI_VERSION) { - ubi_err("node with incompatible UBI version found: " -@@ -600,6 +601,15 @@ static int validate_ec_hdr(const struct - goto bad; - } - -+ if (!ubi->image_seq_set) { -+ ubi->image_seq = image_seq; -+ ubi->image_seq_set = 1; -+ } else if (ubi->image_seq && image_seq && ubi->image_seq != image_seq) { -+ ubi_err("bad image sequence number %d, expected %d", -+ image_seq, ubi->image_seq); -+ goto bad; -+ } -+ - return 0; - - bad: -@@ -637,8 +647,6 @@ int ubi_io_read_ec_hdr(struct ubi_device - - dbg_io("read EC header from PEB %d", pnum); - ubi_assert(pnum >= 0 && pnum < ubi->peb_count); -- if (UBI_IO_DEBUG) -- verbose = 1; - - err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); - if (err) { -@@ -685,6 +693,9 @@ int ubi_io_read_ec_hdr(struct ubi_device - if (verbose) - ubi_warn("no EC header found at PEB %d, " - "only 0xFF bytes", pnum); -+ else if (UBI_IO_DEBUG) -+ dbg_msg("no EC header found at PEB %d, " -+ "only 0xFF bytes", pnum); - return UBI_IO_PEB_EMPTY; - } - -@@ -696,7 +707,9 @@ int ubi_io_read_ec_hdr(struct ubi_device - ubi_warn("bad magic number at PEB %d: %08x instead of " - "%08x", pnum, magic, UBI_EC_HDR_MAGIC); - ubi_dbg_dump_ec_hdr(ec_hdr); -- } -+ } else if (UBI_IO_DEBUG) -+ dbg_msg("bad magic number at PEB %d: %08x instead of " -+ "%08x", pnum, magic, UBI_EC_HDR_MAGIC); - return UBI_IO_BAD_EC_HDR; - } - -@@ -708,7 +721,9 @@ int ubi_io_read_ec_hdr(struct ubi_device - ubi_warn("bad EC header CRC at PEB %d, calculated " - "%#08x, read %#08x", pnum, crc, hdr_crc); - ubi_dbg_dump_ec_hdr(ec_hdr); -- } -+ } else if (UBI_IO_DEBUG) -+ dbg_msg("bad EC header CRC at PEB %d, calculated " -+ "%#08x, read %#08x", pnum, crc, hdr_crc); - return UBI_IO_BAD_EC_HDR; - } - -@@ -750,6 +765,7 @@ int ubi_io_write_ec_hdr(struct ubi_devic - ec_hdr->version = UBI_VERSION; - ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); - ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); -+ ec_hdr->image_seq = cpu_to_be32(ubi->image_seq); - crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); - ec_hdr->hdr_crc = cpu_to_be32(crc); - -@@ -897,7 +913,7 @@ bad: - * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected - * and corrected by the flash driver; this is harmless but may indicate that - * this eraseblock may become bad soon; -- * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC -+ * o %UBI_IO_BAD_VID_HDR if the volume identifier header is corrupted (a CRC - * error detected); - * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID - * header there); -@@ -912,8 +928,6 @@ int ubi_io_read_vid_hdr(struct ubi_devic - - dbg_io("read VID header from PEB %d", pnum); - ubi_assert(pnum >= 0 && pnum < ubi->peb_count); -- if (UBI_IO_DEBUG) -- verbose = 1; - - p = (char *)vid_hdr - ubi->vid_hdr_shift; - err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, -@@ -960,6 +974,9 @@ int ubi_io_read_vid_hdr(struct ubi_devic - if (verbose) - ubi_warn("no VID header found at PEB %d, " - "only 0xFF bytes", pnum); -+ else if (UBI_IO_DEBUG) -+ dbg_msg("no VID header found at PEB %d, " -+ "only 0xFF bytes", pnum); - return UBI_IO_PEB_FREE; - } - -@@ -971,7 +988,9 @@ int ubi_io_read_vid_hdr(struct ubi_devic - ubi_warn("bad magic number at PEB %d: %08x instead of " - "%08x", pnum, magic, UBI_VID_HDR_MAGIC); - ubi_dbg_dump_vid_hdr(vid_hdr); -- } -+ } else if (UBI_IO_DEBUG) -+ dbg_msg("bad magic number at PEB %d: %08x instead of " -+ "%08x", pnum, magic, UBI_VID_HDR_MAGIC); - return UBI_IO_BAD_VID_HDR; - } - -@@ -983,7 +1002,9 @@ int ubi_io_read_vid_hdr(struct ubi_devic - ubi_warn("bad CRC at PEB %d, calculated %#08x, " - "read %#08x", pnum, crc, hdr_crc); - ubi_dbg_dump_vid_hdr(vid_hdr); -- } -+ } else if (UBI_IO_DEBUG) -+ dbg_msg("bad CRC at PEB %d, calculated %#08x, " -+ "read %#08x", pnum, crc, hdr_crc); - return UBI_IO_BAD_VID_HDR; - } - -@@ -1024,7 +1045,7 @@ int ubi_io_write_vid_hdr(struct ubi_devi - - err = paranoid_check_peb_ec_hdr(ubi, pnum); - if (err) -- return err > 0 ? -EINVAL: err; -+ return err > 0 ? -EINVAL : err; - - vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); - vid_hdr->version = UBI_VERSION; -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/scan.c linux-omap-2.6.28-nokia1/drivers/mtd/ubi/scan.c ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/scan.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/scan.c 2011-06-22 13:19:32.883063274 +0200 -@@ -912,6 +912,8 @@ struct ubi_scan_info *ubi_scan(struct ub - if (si->is_empty) - ubi_msg("empty MTD device detected"); - -+ ubi->image_seq_set = 1; -+ - /* - * In case of unknown erase counter we use the mean erase counter - * value. -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/ubi.h linux-omap-2.6.28-nokia1/drivers/mtd/ubi/ubi.h ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/ubi.h 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/ubi.h 2011-06-22 13:19:32.883063274 +0200 -@@ -74,6 +74,13 @@ - #define UBI_IO_RETRIES 3 - - /* -+ * Length of the protection queue. The length is effectively equivalent to the -+ * number of (global) erase cycles PEBs are protected from the wear-leveling -+ * worker. -+ */ -+#define UBI_PROT_QUEUE_LEN 10 -+ -+/* - * Error codes returned by the I/O sub-system. - * - * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only -@@ -93,9 +100,32 @@ enum { - UBI_IO_BITFLIPS - }; - -+/* -+ * Return codes of the 'ubi_eba_copy_leb()' function. -+ * -+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source -+ * PEB was put meanwhile, or there is I/O on the source PEB -+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source -+ * PEB -+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target -+ * PEB -+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target -+ * PEB -+ * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the -+ * target PEB -+ */ -+enum { -+ MOVE_CANCEL_RACE = 1, -+ MOVE_SOURCE_RD_ERR, -+ MOVE_TARGET_RD_ERR, -+ MOVE_TARGET_WR_ERR, -+ MOVE_CANCEL_BITFLIPS, -+}; -+ - /** - * struct ubi_wl_entry - wear-leveling entry. -- * @rb: link in the corresponding RB-tree -+ * @u.rb: link in the corresponding (free/used) RB-tree -+ * @u.list: link in the protection queue - * @ec: erase counter - * @pnum: physical eraseblock number - * -@@ -104,7 +134,10 @@ enum { - * RB-trees. See WL sub-system for details. - */ - struct ubi_wl_entry { -- struct rb_node rb; -+ union { -+ struct rb_node rb; -+ struct list_head list; -+ } u; - int ec; - int pnum; - }; -@@ -280,6 +313,8 @@ struct ubi_wl_entry; - * @vol->readers, @vol->writers, @vol->exclusive, - * @vol->ref_count, @vol->mapping and @vol->eba_tbl. - * @ref_count: count of references on the UBI device -+ * @image_seq: image sequence number recorded on EC headers -+ * @image_seq_set: indicates @image_seq is known - * - * @rsvd_pebs: count of reserved physical eraseblocks - * @avail_pebs: count of available physical eraseblocks -@@ -288,7 +323,7 @@ struct ubi_wl_entry; - * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling - * - * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end -- * of UBI ititializetion -+ * of UBI initialization - * @vtbl_slots: how many slots are available in the volume table - * @vtbl_size: size of the volume table in bytes - * @vtbl: in-RAM volume table copy -@@ -304,20 +339,20 @@ struct ubi_wl_entry; - * @alc_mutex: serializes "atomic LEB change" operations - * - * @used: RB-tree of used physical eraseblocks -+ * @erroneous: RB-tree of erroneous used physical eraseblocks - * @free: RB-tree of free physical eraseblocks - * @scrub: RB-tree of physical eraseblocks which need scrubbing -- * @prot: protection trees -- * @prot.pnum: protection tree indexed by physical eraseblock numbers -- * @prot.aec: protection tree indexed by absolute erase counter value -- * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, -- * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works -- * fields -+ * @pq: protection queue (contain physical eraseblocks which are temporarily -+ * protected from the wear-leveling worker) -+ * @pq_head: protection queue head -+ * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, -+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, -+ * @erroneous, and @erroneous_peb_count fields - * @move_mutex: serializes eraseblock moves -- * @work_sem: sycnhronizes the WL worker with use tasks -+ * @work_sem: synchronizes the WL worker with use tasks - * @wl_scheduled: non-zero if the wear-leveling was scheduled - * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any - * physical eraseblock -- * @abs_ec: absolute erase counter - * @move_from: physical eraseblock from where the data is being moved - * @move_to: physical eraseblock where the data is being moved to - * @move_to_put: if the "to" PEB was put -@@ -332,6 +367,8 @@ struct ubi_wl_entry; - * @peb_size: physical eraseblock size - * @bad_peb_count: count of bad physical eraseblocks - * @good_peb_count: count of good physical eraseblocks -+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous -+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks - * @min_io_size: minimal input/output unit size of the underlying MTD device - * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers - * @ro_mode: if the UBI device is in read-only mode -@@ -351,11 +388,11 @@ struct ubi_wl_entry; - * - * @peb_buf1: a buffer of PEB size used for different purposes - * @peb_buf2: another buffer of PEB size used for different purposes -- * @buf_mutex: proptects @peb_buf1 and @peb_buf2 -+ * @buf_mutex: protects @peb_buf1 and @peb_buf2 - * @ckvol_mutex: serializes static volume checking when opening -- * @mult_mutex: serializes operations on multiple volumes, like re-nameing -+ * @mult_mutex: serializes operations on multiple volumes, like re-naming - * @dbg_peb_buf: buffer of PEB size used for debugging -- * @dbg_buf_mutex: proptects @dbg_peb_buf -+ * @dbg_buf_mutex: protects @dbg_peb_buf - */ - struct ubi_device { - struct cdev cdev; -@@ -366,6 +403,8 @@ struct ubi_device { - struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; - spinlock_t volumes_lock; - int ref_count; -+ int image_seq; -+ int image_seq_set; - - int rsvd_pebs; - int avail_pebs; -@@ -390,18 +429,16 @@ struct ubi_device { - - /* Wear-leveling sub-system's stuff */ - struct rb_root used; -+ struct rb_root erroneous; - struct rb_root free; - struct rb_root scrub; -- struct { -- struct rb_root pnum; -- struct rb_root aec; -- } prot; -+ struct list_head pq[UBI_PROT_QUEUE_LEN]; -+ int pq_head; - spinlock_t wl_lock; - struct mutex move_mutex; - struct rw_semaphore work_sem; - int wl_scheduled; - struct ubi_wl_entry **lookuptbl; -- unsigned long long abs_ec; - struct ubi_wl_entry *move_from; - struct ubi_wl_entry *move_to; - int move_to_put; -@@ -417,6 +454,8 @@ struct ubi_device { - int peb_size; - int bad_peb_count; - int good_peb_count; -+ int erroneous_peb_count; -+ int max_erroneous; - int min_io_size; - int hdrs_min_io_size; - int ro_mode; -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/ubi-media.h linux-omap-2.6.28-nokia1/drivers/mtd/ubi/ubi-media.h ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/ubi-media.h 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/ubi-media.h 2011-06-22 13:19:32.883063274 +0200 -@@ -129,6 +129,7 @@ enum { - * @ec: the erase counter - * @vid_hdr_offset: where the VID header starts - * @data_offset: where the user data start -+ * @image_seq: image sequence number - * @padding2: reserved for future, zeroes - * @hdr_crc: erase counter header CRC checksum - * -@@ -144,6 +145,14 @@ enum { - * volume identifier header and user data, relative to the beginning of the - * physical eraseblock. These values have to be the same for all physical - * eraseblocks. -+ * -+ * The @image_seq field is used to validate a UBI image that has been prepared -+ * for a UBI device. The @image_seq value can be any value, but it must be the -+ * same on all eraseblocks. UBI will ensure that all new erase counter headers -+ * also contain this value, and will check the value when scanning at start-up. -+ * One way to make use of @image_seq is to increase its value by one every time -+ * an image is flashed over an existing image, then, if the flashing does not -+ * complete, UBI will detect the error when scanning. - */ - struct ubi_ec_hdr { - __be32 magic; -@@ -152,7 +161,8 @@ struct ubi_ec_hdr { - __be64 ec; /* Warning: the current limit is 31-bit anyway! */ - __be32 vid_hdr_offset; - __be32 data_offset; -- __u8 padding2[36]; -+ __be32 image_seq; -+ __u8 padding2[32]; - __be32 hdr_crc; - } __attribute__ ((packed)); - -diff -Nurp linux-omap-2.6.28-omap1/drivers/mtd/ubi/wl.c linux-omap-2.6.28-nokia1/drivers/mtd/ubi/wl.c ---- linux-omap-2.6.28-omap1/drivers/mtd/ubi/wl.c 2011-06-22 13:14:18.863067738 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/mtd/ubi/wl.c 2011-06-22 13:19:32.883063274 +0200 -@@ -22,7 +22,7 @@ - * UBI wear-leveling sub-system. - * - * This sub-system is responsible for wear-leveling. It works in terms of -- * physical* eraseblocks and erase counters and knows nothing about logical -+ * physical eraseblocks and erase counters and knows nothing about logical - * eraseblocks, volumes, etc. From this sub-system's perspective all physical - * eraseblocks are of two types - used and free. Used physical eraseblocks are - * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical -@@ -55,8 +55,41 @@ - * - * As it was said, for the UBI sub-system all physical eraseblocks are either - * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while -- * used eraseblocks are kept in a set of different RB-trees: @wl->used, -- * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. -+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub -+ * RB-trees, as well as (temporarily) in the @wl->pq queue. -+ * -+ * When the WL sub-system returns a physical eraseblock, the physical -+ * eraseblock is protected from being moved for some "time". For this reason, -+ * the physical eraseblock is not directly moved from the @wl->free tree to the -+ * @wl->used tree. There is a protection queue in between where this -+ * physical eraseblock is temporarily stored (@wl->pq). -+ * -+ * All this protection stuff is needed because: -+ * o we don't want to move physical eraseblocks just after we have given them -+ * to the user; instead, we first want to let users fill them up with data; -+ * -+ * o there is a chance that the user will put the physical eraseblock very -+ * soon, so it makes sense not to move it for some time, but wait; this is -+ * especially important in case of "short term" physical eraseblocks. -+ * -+ * Physical eraseblocks stay protected only for limited time. But the "time" is -+ * measured in erase cycles in this case. This is implemented with help of the -+ * protection queue. Eraseblocks are put to the tail of this queue when they -+ * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the -+ * head of the queue on each erase operation (for any eraseblock). So the -+ * length of the queue defines how may (global) erase cycles PEBs are protected. -+ * -+ * To put it differently, each physical eraseblock has 2 main states: free and -+ * used. The former state corresponds to the @wl->free tree. The latter state -+ * is split up on several sub-states: -+ * o the WL movement is allowed (@wl->used tree); -+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is -+ * erroneous - e.g., there was a read error; -+ * o the WL movement is temporarily prohibited (@wl->pq queue); -+ * o scrubbing is needed (@wl->scrub tree). -+ * -+ * Depending on the sub-state, wear-leveling entries of the used physical -+ * eraseblocks may be kept in one of those structures. - * - * Note, in this implementation, we keep a small in-RAM object for each physical - * eraseblock. This is surely not a scalable solution. But it appears to be good -@@ -70,9 +103,6 @@ - * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we - * pick target PEB with an average EC if our PEB is not very "old". This is a - * room for future re-works of the WL sub-system. -- * -- * Note: the stuff with protection trees looks too complex and is difficult to -- * understand. Should be fixed. - */ - - #include -@@ -85,14 +115,6 @@ - #define WL_RESERVED_PEBS 1 - - /* -- * How many erase cycles are short term, unknown, and long term physical -- * eraseblocks protected. -- */ --#define ST_PROTECTION 16 --#define U_PROTECTION 10 --#define LT_PROTECTION 4 -- --/* - * Maximum difference between two erase counters. If this threshold is - * exceeded, the WL sub-system starts moving data from used physical - * eraseblocks with low erase counter to free physical eraseblocks with high -@@ -120,64 +142,9 @@ - #define WL_MAX_FAILURES 32 - - /** -- * struct ubi_wl_prot_entry - PEB protection entry. -- * @rb_pnum: link in the @wl->prot.pnum RB-tree -- * @rb_aec: link in the @wl->prot.aec RB-tree -- * @abs_ec: the absolute erase counter value when the protection ends -- * @e: the wear-leveling entry of the physical eraseblock under protection -- * -- * When the WL sub-system returns a physical eraseblock, the physical -- * eraseblock is protected from being moved for some "time". For this reason, -- * the physical eraseblock is not directly moved from the @wl->free tree to the -- * @wl->used tree. There is one more tree in between where this physical -- * eraseblock is temporarily stored (@wl->prot). -- * -- * All this protection stuff is needed because: -- * o we don't want to move physical eraseblocks just after we have given them -- * to the user; instead, we first want to let users fill them up with data; -- * -- * o there is a chance that the user will put the physical eraseblock very -- * soon, so it makes sense not to move it for some time, but wait; this is -- * especially important in case of "short term" physical eraseblocks. -- * -- * Physical eraseblocks stay protected only for limited time. But the "time" is -- * measured in erase cycles in this case. This is implemented with help of the -- * absolute erase counter (@wl->abs_ec). When it reaches certain value, the -- * physical eraseblocks are moved from the protection trees (@wl->prot.*) to -- * the @wl->used tree. -- * -- * Protected physical eraseblocks are searched by physical eraseblock number -- * (when they are put) and by the absolute erase counter (to check if it is -- * time to move them to the @wl->used tree). So there are actually 2 RB-trees -- * storing the protected physical eraseblocks: @wl->prot.pnum and -- * @wl->prot.aec. They are referred to as the "protection" trees. The -- * first one is indexed by the physical eraseblock number. The second one is -- * indexed by the absolute erase counter. Both trees store -- * &struct ubi_wl_prot_entry objects. -- * -- * Each physical eraseblock has 2 main states: free and used. The former state -- * corresponds to the @wl->free tree. The latter state is split up on several -- * sub-states: -- * o the WL movement is allowed (@wl->used tree); -- * o the WL movement is temporarily prohibited (@wl->prot.pnum and -- * @wl->prot.aec trees); -- * o scrubbing is needed (@wl->scrub tree). -- * -- * Depending on the sub-state, wear-leveling entries of the used physical -- * eraseblocks may be kept in one of those trees. -- */ --struct ubi_wl_prot_entry { -- struct rb_node rb_pnum; -- struct rb_node rb_aec; -- unsigned long long abs_ec; -- struct ubi_wl_entry *e; --}; -- --/** - * struct ubi_work - UBI work description data structure. - * @list: a link in the list of pending works - * @func: worker function -- * @priv: private data of the worker function - * @e: physical eraseblock to erase - * @torture: if the physical eraseblock has to be tortured - * -@@ -198,9 +165,11 @@ struct ubi_work { - static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); - static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, - struct rb_root *root); -+static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e); - #else - #define paranoid_check_ec(ubi, pnum, ec) 0 - #define paranoid_check_in_wl_tree(e, root) -+#define paranoid_check_in_pq(ubi, e) 0 - #endif - - /** -@@ -220,7 +189,7 @@ static void wl_tree_add(struct ubi_wl_en - struct ubi_wl_entry *e1; - - parent = *p; -- e1 = rb_entry(parent, struct ubi_wl_entry, rb); -+ e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); - - if (e->ec < e1->ec) - p = &(*p)->rb_left; -@@ -235,8 +204,8 @@ static void wl_tree_add(struct ubi_wl_en - } - } - -- rb_link_node(&e->rb, parent, p); -- rb_insert_color(&e->rb, root); -+ rb_link_node(&e->u.rb, parent, p); -+ rb_insert_color(&e->u.rb, root); - } - - /** -@@ -331,7 +300,7 @@ static int in_wl_tree(struct ubi_wl_entr - while (p) { - struct ubi_wl_entry *e1; - -- e1 = rb_entry(p, struct ubi_wl_entry, rb); -+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb); - - if (e->pnum == e1->pnum) { - ubi_assert(e == e1); -@@ -355,50 +324,24 @@ static int in_wl_tree(struct ubi_wl_entr - } - - /** -- * prot_tree_add - add physical eraseblock to protection trees. -+ * prot_queue_add - add physical eraseblock to the protection queue. - * @ubi: UBI device description object - * @e: the physical eraseblock to add -- * @pe: protection entry object to use -- * @abs_ec: absolute erase counter value when this physical eraseblock has -- * to be removed from the protection trees. - * -- * @wl->lock has to be locked. -- */ --static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, -- struct ubi_wl_prot_entry *pe, int abs_ec) --{ -- struct rb_node **p, *parent = NULL; -- struct ubi_wl_prot_entry *pe1; -- -- pe->e = e; -- pe->abs_ec = ubi->abs_ec + abs_ec; -- -- p = &ubi->prot.pnum.rb_node; -- while (*p) { -- parent = *p; -- pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); -- -- if (e->pnum < pe1->e->pnum) -- p = &(*p)->rb_left; -- else -- p = &(*p)->rb_right; -- } -- rb_link_node(&pe->rb_pnum, parent, p); -- rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); -- -- p = &ubi->prot.aec.rb_node; -- parent = NULL; -- while (*p) { -- parent = *p; -- pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); -- -- if (pe->abs_ec < pe1->abs_ec) -- p = &(*p)->rb_left; -- else -- p = &(*p)->rb_right; -- } -- rb_link_node(&pe->rb_aec, parent, p); -- rb_insert_color(&pe->rb_aec, &ubi->prot.aec); -+ * This function adds @e to the tail of the protection queue @ubi->pq, where -+ * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be -+ * temporarily protected from the wear-leveling worker. Note, @wl->lock has to -+ * be locked. -+ */ -+static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) -+{ -+ int pq_tail = ubi->pq_head - 1; -+ -+ if (pq_tail < 0) -+ pq_tail = UBI_PROT_QUEUE_LEN - 1; -+ ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); -+ list_add_tail(&e->u.list, &ubi->pq[pq_tail]); -+ dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); - } - - /** -@@ -414,14 +357,14 @@ static struct ubi_wl_entry *find_wl_entr - struct rb_node *p; - struct ubi_wl_entry *e; - -- e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); -+ e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); - max += e->ec; - - p = root->rb_node; - while (p) { - struct ubi_wl_entry *e1; - -- e1 = rb_entry(p, struct ubi_wl_entry, rb); -+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb); - if (e1->ec >= max) - p = p->rb_left; - else { -@@ -443,17 +386,12 @@ static struct ubi_wl_entry *find_wl_entr - */ - int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) - { -- int err, protect, medium_ec; -+ int err, medium_ec; - struct ubi_wl_entry *e, *first, *last; -- struct ubi_wl_prot_entry *pe; - - ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || - dtype == UBI_UNKNOWN); - -- pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); -- if (!pe) -- return -ENOMEM; -- - retry: - spin_lock(&ubi->wl_lock); - if (!ubi->free.rb_node) { -@@ -461,16 +399,13 @@ retry: - ubi_assert(list_empty(&ubi->works)); - ubi_err("no free eraseblocks"); - spin_unlock(&ubi->wl_lock); -- kfree(pe); - return -ENOSPC; - } - spin_unlock(&ubi->wl_lock); - - err = produce_free_peb(ubi); -- if (err < 0) { -- kfree(pe); -+ if (err < 0) - return err; -- } - goto retry; - } - -@@ -483,7 +418,6 @@ retry: - * %WL_FREE_MAX_DIFF. - */ - e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); -- protect = LT_PROTECTION; - break; - case UBI_UNKNOWN: - /* -@@ -492,81 +426,63 @@ retry: - * eraseblock with erase counter greater or equivalent than the - * lowest erase counter plus %WL_FREE_MAX_DIFF. - */ -- first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); -- last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); -+ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, -+ u.rb); -+ last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); - - if (last->ec - first->ec < WL_FREE_MAX_DIFF) - e = rb_entry(ubi->free.rb_node, -- struct ubi_wl_entry, rb); -+ struct ubi_wl_entry, u.rb); - else { - medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; - e = find_wl_entry(&ubi->free, medium_ec); - } -- protect = U_PROTECTION; - break; - case UBI_SHORTTERM: - /* - * For short term data we pick a physical eraseblock with the - * lowest erase counter as we expect it will be erased soon. - */ -- e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); -- protect = ST_PROTECTION; -+ e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); - break; - default: -- protect = 0; -- e = NULL; - BUG(); - } - -+ paranoid_check_in_wl_tree(e, &ubi->free); -+ - /* -- * Move the physical eraseblock to the protection trees where it will -+ * Move the physical eraseblock to the protection queue where it will - * be protected from being moved for some time. - */ -- paranoid_check_in_wl_tree(e, &ubi->free); -- rb_erase(&e->rb, &ubi->free); -- prot_tree_add(ubi, e, pe, protect); -- -- dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); -+ rb_erase(&e->u.rb, &ubi->free); -+ dbg_wl("PEB %d EC %d", e->pnum, e->ec); -+ prot_queue_add(ubi, e); - spin_unlock(&ubi->wl_lock); -- - return e->pnum; - } - - /** -- * prot_tree_del - remove a physical eraseblock from the protection trees -+ * prot_queue_del - remove a physical eraseblock from the protection queue. - * @ubi: UBI device description object - * @pnum: the physical eraseblock to remove - * -- * This function returns PEB @pnum from the protection trees and returns zero -- * in case of success and %-ENODEV if the PEB was not found in the protection -- * trees. -+ * This function deletes PEB @pnum from the protection queue and returns zero -+ * in case of success and %-ENODEV if the PEB was not found. - */ --static int prot_tree_del(struct ubi_device *ubi, int pnum) -+static int prot_queue_del(struct ubi_device *ubi, int pnum) - { -- struct rb_node *p; -- struct ubi_wl_prot_entry *pe = NULL; -- -- p = ubi->prot.pnum.rb_node; -- while (p) { -- -- pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); -- -- if (pnum == pe->e->pnum) -- goto found; -+ struct ubi_wl_entry *e; - -- if (pnum < pe->e->pnum) -- p = p->rb_left; -- else -- p = p->rb_right; -- } -+ e = ubi->lookuptbl[pnum]; -+ if (!e) -+ return -ENODEV; - -- return -ENODEV; -+ if (paranoid_check_in_pq(ubi, e)) -+ return -ENODEV; - --found: -- ubi_assert(pe->e->pnum == pnum); -- rb_erase(&pe->rb_aec, &ubi->prot.aec); -- rb_erase(&pe->rb_pnum, &ubi->prot.pnum); -- kfree(pe); -+ list_del(&e->u.list); -+ dbg_wl("deleted PEB %d from the protection queue", e->pnum); - return 0; - } - -@@ -632,47 +548,47 @@ out_free: - } - - /** -- * check_protection_over - check if it is time to stop protecting some PEBs. -+ * serve_prot_queue - check if it is time to stop protecting PEBs. - * @ubi: UBI device description object - * -- * This function is called after each erase operation, when the absolute erase -- * counter is incremented, to check if some physical eraseblock have not to be -- * protected any longer. These physical eraseblocks are moved from the -- * protection trees to the used tree. -+ * This function is called after each erase operation and removes PEBs from the -+ * tail of the protection queue. These PEBs have been protected for long enough -+ * and should be moved to the used tree. - */ --static void check_protection_over(struct ubi_device *ubi) -+static void serve_prot_queue(struct ubi_device *ubi) - { -- struct ubi_wl_prot_entry *pe; -+ struct ubi_wl_entry *e, *tmp; -+ int count; - - /* - * There may be several protected physical eraseblock to remove, - * process them all. - */ -- while (1) { -- spin_lock(&ubi->wl_lock); -- if (!ubi->prot.aec.rb_node) { -- spin_unlock(&ubi->wl_lock); -- break; -- } -- -- pe = rb_entry(rb_first(&ubi->prot.aec), -- struct ubi_wl_prot_entry, rb_aec); -+repeat: -+ count = 0; -+ spin_lock(&ubi->wl_lock); -+ list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { -+ dbg_wl("PEB %d EC %d protection over, move to used tree", -+ e->pnum, e->ec); - -- if (pe->abs_ec > ubi->abs_ec) { -+ list_del(&e->u.list); -+ wl_tree_add(e, &ubi->used); -+ if (count++ > 32) { -+ /* -+ * Let's be nice and avoid holding the spinlock for -+ * too long. -+ */ - spin_unlock(&ubi->wl_lock); -- break; -+ cond_resched(); -+ goto repeat; - } -- -- dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", -- pe->e->pnum, ubi->abs_ec, pe->abs_ec); -- rb_erase(&pe->rb_aec, &ubi->prot.aec); -- rb_erase(&pe->rb_pnum, &ubi->prot.pnum); -- wl_tree_add(pe->e, &ubi->used); -- spin_unlock(&ubi->wl_lock); -- -- kfree(pe); -- cond_resched(); - } -+ -+ ubi->pq_head += 1; -+ if (ubi->pq_head == UBI_PROT_QUEUE_LEN) -+ ubi->pq_head = 0; -+ ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); -+ spin_unlock(&ubi->wl_lock); - } - - /** -@@ -680,8 +596,8 @@ static void check_protection_over(struct - * @ubi: UBI device description object - * @wrk: the work to schedule - * -- * This function enqueues a work defined by @wrk to the tail of the pending -- * works list. -+ * This function adds a work defined by @wrk to the tail of the pending works -+ * list. - */ - static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) - { -@@ -739,13 +655,11 @@ static int schedule_erase(struct ubi_dev - static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, - int cancel) - { -- int err, put = 0, scrubbing = 0, protect = 0; -- struct ubi_wl_prot_entry *uninitialized_var(pe); -+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; - struct ubi_wl_entry *e1, *e2; - struct ubi_vid_hdr *vid_hdr; - - kfree(wrk); -- - if (cancel) - return 0; - -@@ -781,7 +695,7 @@ static int wear_leveling_worker(struct u - * highly worn-out free physical eraseblock. If the erase - * counters differ much enough, start wear-leveling. - */ -- e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); -+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); - e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); - - if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { -@@ -790,21 +704,21 @@ static int wear_leveling_worker(struct u - goto out_cancel; - } - paranoid_check_in_wl_tree(e1, &ubi->used); -- rb_erase(&e1->rb, &ubi->used); -+ rb_erase(&e1->u.rb, &ubi->used); - dbg_wl("move PEB %d EC %d to PEB %d EC %d", - e1->pnum, e1->ec, e2->pnum, e2->ec); - } else { - /* Perform scrubbing */ - scrubbing = 1; -- e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); -+ e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); - e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); - paranoid_check_in_wl_tree(e1, &ubi->scrub); -- rb_erase(&e1->rb, &ubi->scrub); -+ rb_erase(&e1->u.rb, &ubi->scrub); - dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); - } - - paranoid_check_in_wl_tree(e2, &ubi->free); -- rb_erase(&e2->rb, &ubi->free); -+ rb_erase(&e2->u.rb, &ubi->free); - ubi->move_from = e1; - ubi->move_to = e2; - spin_unlock(&ubi->wl_lock); -@@ -826,81 +740,104 @@ static int wear_leveling_worker(struct u - /* - * We are trying to move PEB without a VID header. UBI - * always write VID headers shortly after the PEB was -- * given, so we have a situation when it did not have -- * chance to write it down because it was preempted. -- * Just re-schedule the work, so that next time it will -- * likely have the VID header in place. -+ * given, so we have a situation when it has not yet -+ * had a chance to write it, because it was preempted. -+ * So add this PEB to the protection queue so far, -+ * because presumably more data will be written there -+ * (including the missing VID header), and then we'll -+ * move it. - */ - dbg_wl("PEB %d has no VID header", e1->pnum); -+ protect = 1; - goto out_not_moved; - } - - ubi_err("error %d while reading VID header from PEB %d", - err, e1->pnum); -- if (err > 0) -- err = -EIO; - goto out_error; - } - - err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); - if (err) { -+ if (err == MOVE_CANCEL_RACE) { -+ /* -+ * The LEB has not been moved because the volume is -+ * being deleted or the PEB has been put meanwhile. We -+ * should prevent this PEB from being selected for -+ * wear-leveling movement again, so put it to the -+ * protection queue. -+ */ -+ protect = 1; -+ goto out_not_moved; -+ } - -- if (err < 0) -- goto out_error; -- if (err == 1) -+ if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR || -+ err == MOVE_TARGET_RD_ERR) { -+ /* Target PEB bit-flips or write error, torture it */ -+ torture = 1; - goto out_not_moved; -+ } - -- /* -- * For some reason the LEB was not moved - it might be because -- * the volume is being deleted. We should prevent this PEB from -- * being selected for wear-levelling movement for some "time", -- * so put it to the protection tree. -- */ -+ if (err == MOVE_SOURCE_RD_ERR) { -+ /* -+ * An error happened while reading the source PEB. Do -+ * not switch to R/O mode in this case, and give the -+ * upper layers a possibility to recover from this, -+ * e.g. by unmapping corresponding LEB. Instead, just -+ * put this PEB to the @ubi->erroneous list to prevent -+ * UBI from trying to move it over and over again. -+ */ -+ if (ubi->erroneous_peb_count > ubi->max_erroneous) { -+ ubi_err("too many erroneous eraseblocks (%d)", -+ ubi->erroneous_peb_count); -+ goto out_error; -+ } -+ erroneous = 1; -+ goto out_not_moved; -+ } - -- dbg_wl("cancelled moving PEB %d", e1->pnum); -- pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); -- if (!pe) { -- err = -ENOMEM; -+ if (err < 0) - goto out_error; -- } - -- protect = 1; -+ ubi_assert(0); - } - -+ /* The PEB has been successfully moved */ - ubi_free_vid_hdr(ubi, vid_hdr); -- if (scrubbing && !protect) -+ if (scrubbing) - ubi_msg("scrubbed PEB %d, data moved to PEB %d", - e1->pnum, e2->pnum); - - spin_lock(&ubi->wl_lock); -- if (protect) -- prot_tree_add(ubi, e1, pe, protect); -- if (!ubi->move_to_put) -+ if (!ubi->move_to_put) { - wl_tree_add(e2, &ubi->used); -- else -- put = 1; -+ e2 = NULL; -+ } - ubi->move_from = ubi->move_to = NULL; - ubi->move_to_put = ubi->wl_scheduled = 0; - spin_unlock(&ubi->wl_lock); - -- if (put) { -+ err = schedule_erase(ubi, e1, 0); -+ if (err) { -+ kmem_cache_free(ubi_wl_entry_slab, e1); -+ if (e2) -+ kmem_cache_free(ubi_wl_entry_slab, e2); -+ goto out_ro; -+ } -+ -+ if (e2) { - /* - * Well, the target PEB was put meanwhile, schedule it for - * erasure. - */ - dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); - err = schedule_erase(ubi, e2, 0); -- if (err) -- goto out_error; -- } -- -- if (!protect) { -- err = schedule_erase(ubi, e1, 0); -- if (err) -- goto out_error; -+ if (err) { -+ kmem_cache_free(ubi_wl_entry_slab, e2); -+ goto out_ro; -+ } - } - -- - dbg_wl("done"); - mutex_unlock(&ubi->move_mutex); - return 0; -@@ -908,42 +845,52 @@ static int wear_leveling_worker(struct u - /* - * For some reasons the LEB was not moved, might be an error, might be - * something else. @e1 was not changed, so return it back. @e2 might -- * be changed, schedule it for erasure. -+ * have been changed, schedule it for erasure. - */ - out_not_moved: -- ubi_free_vid_hdr(ubi, vid_hdr); -+ dbg_wl("cancel moving PEB %d to PEB %d (%d)", -+ e1->pnum, e2->pnum, err); - spin_lock(&ubi->wl_lock); -- if (scrubbing) -+ if (protect) -+ prot_queue_add(ubi, e1); -+ else if (erroneous) { -+ wl_tree_add(e1, &ubi->erroneous); -+ ubi->erroneous_peb_count += 1; -+ } else if (scrubbing) - wl_tree_add(e1, &ubi->scrub); - else - wl_tree_add(e1, &ubi->used); -+ ubi_assert(!ubi->move_to_put); - ubi->move_from = ubi->move_to = NULL; -- ubi->move_to_put = ubi->wl_scheduled = 0; -+ ubi->wl_scheduled = 0; - spin_unlock(&ubi->wl_lock); - -- err = schedule_erase(ubi, e2, 0); -- if (err) -- goto out_error; -- -+ ubi_free_vid_hdr(ubi, vid_hdr); -+ err = schedule_erase(ubi, e2, torture); -+ if (err) { -+ kmem_cache_free(ubi_wl_entry_slab, e2); -+ goto out_ro; -+ } - mutex_unlock(&ubi->move_mutex); - return 0; - - out_error: - ubi_err("error %d while moving PEB %d to PEB %d", - err, e1->pnum, e2->pnum); -- -- ubi_free_vid_hdr(ubi, vid_hdr); - spin_lock(&ubi->wl_lock); - ubi->move_from = ubi->move_to = NULL; - ubi->move_to_put = ubi->wl_scheduled = 0; - spin_unlock(&ubi->wl_lock); - -+ ubi_free_vid_hdr(ubi, vid_hdr); - kmem_cache_free(ubi_wl_entry_slab, e1); - kmem_cache_free(ubi_wl_entry_slab, e2); -- ubi_ro_mode(ubi); - -+out_ro: -+ ubi_ro_mode(ubi); - mutex_unlock(&ubi->move_mutex); -- return err; -+ ubi_assert(err != 0); -+ return err < 0 ? err : -EIO; - - out_cancel: - ubi->wl_scheduled = 0; -@@ -988,7 +935,7 @@ static int ensure_wear_leveling(struct u - * erase counter of free physical eraseblocks is greater then - * %UBI_WL_THRESHOLD. - */ -- e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); -+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); - e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); - - if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) -@@ -1050,7 +997,6 @@ static int erase_worker(struct ubi_devic - kfree(wl_wrk); - - spin_lock(&ubi->wl_lock); -- ubi->abs_ec += 1; - wl_tree_add(e, &ubi->free); - spin_unlock(&ubi->wl_lock); - -@@ -1058,7 +1004,7 @@ static int erase_worker(struct ubi_devic - * One more erase operation has happened, take care about - * protected physical eraseblocks. - */ -- check_protection_over(ubi); -+ serve_prot_queue(ubi); - - /* And take care about wear-leveling */ - err = ensure_wear_leveling(ubi); -@@ -1084,7 +1030,7 @@ static int erase_worker(struct ubi_devic - /* - * If this is not %-EIO, we have no idea what to do. Scheduling - * this physical eraseblock for erasure again would cause -- * errors again and again. Well, lets switch to RO mode. -+ * errors again and again. Well, lets switch to R/O mode. - */ - goto out_ro; - } -@@ -1190,12 +1136,19 @@ retry: - } else { - if (in_wl_tree(e, &ubi->used)) { - paranoid_check_in_wl_tree(e, &ubi->used); -- rb_erase(&e->rb, &ubi->used); -+ rb_erase(&e->u.rb, &ubi->used); - } else if (in_wl_tree(e, &ubi->scrub)) { - paranoid_check_in_wl_tree(e, &ubi->scrub); -- rb_erase(&e->rb, &ubi->scrub); -+ rb_erase(&e->u.rb, &ubi->scrub); -+ } else if (in_wl_tree(e, &ubi->erroneous)) { -+ paranoid_check_in_wl_tree(e, &ubi->erroneous); -+ rb_erase(&e->u.rb, &ubi->erroneous); -+ ubi->erroneous_peb_count -= 1; -+ ubi_assert(ubi->erroneous_peb_count >= 0); -+ /* Erroneous PEBs should be tortured */ -+ torture = 1; - } else { -- err = prot_tree_del(ubi, e->pnum); -+ err = prot_queue_del(ubi, e->pnum); - if (err) { - ubi_err("PEB %d not found", pnum); - ubi_ro_mode(ubi); -@@ -1255,11 +1208,11 @@ retry: - - if (in_wl_tree(e, &ubi->used)) { - paranoid_check_in_wl_tree(e, &ubi->used); -- rb_erase(&e->rb, &ubi->used); -+ rb_erase(&e->u.rb, &ubi->used); - } else { - int err; - -- err = prot_tree_del(ubi, e->pnum); -+ err = prot_queue_del(ubi, e->pnum); - if (err) { - ubi_err("PEB %d not found", pnum); - ubi_ro_mode(ubi); -@@ -1290,7 +1243,7 @@ int ubi_wl_flush(struct ubi_device *ubi) - int err; - - /* -- * Erase while the pending works queue is not empty, but not more then -+ * Erase while the pending works queue is not empty, but not more than - * the number of currently pending works. - */ - dbg_wl("flush (%d pending works)", ubi->works_count); -@@ -1308,7 +1261,7 @@ int ubi_wl_flush(struct ubi_device *ubi) - up_write(&ubi->work_sem); - - /* -- * And in case last was the WL worker and it cancelled the LEB -+ * And in case last was the WL worker and it canceled the LEB - * movement, flush again. - */ - while (ubi->works_count) { -@@ -1337,11 +1290,11 @@ static void tree_destroy(struct rb_root - else if (rb->rb_right) - rb = rb->rb_right; - else { -- e = rb_entry(rb, struct ubi_wl_entry, rb); -+ e = rb_entry(rb, struct ubi_wl_entry, u.rb); - - rb = rb_parent(rb); - if (rb) { -- if (rb->rb_left == &e->rb) -+ if (rb->rb_left == &e->u.rb) - rb->rb_left = NULL; - else - rb->rb_right = NULL; -@@ -1436,15 +1389,13 @@ static void cancel_pending(struct ubi_de - */ - int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) - { -- int err; -+ int err, i; - struct rb_node *rb1, *rb2; - struct ubi_scan_volume *sv; - struct ubi_scan_leb *seb, *tmp; - struct ubi_wl_entry *e; - -- -- ubi->used = ubi->free = ubi->scrub = RB_ROOT; -- ubi->prot.pnum = ubi->prot.aec = RB_ROOT; -+ ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; - spin_lock_init(&ubi->wl_lock); - mutex_init(&ubi->move_mutex); - init_rwsem(&ubi->work_sem); -@@ -1458,6 +1409,10 @@ int ubi_wl_init_scan(struct ubi_device * - if (!ubi->lookuptbl) - return err; - -+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) -+ INIT_LIST_HEAD(&ubi->pq[i]); -+ ubi->pq_head = 0; -+ - list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { - cond_resched(); - -@@ -1552,33 +1507,18 @@ out_free: - } - - /** -- * protection_trees_destroy - destroy the protection RB-trees. -+ * protection_queue_destroy - destroy the protection queue. - * @ubi: UBI device description object - */ --static void protection_trees_destroy(struct ubi_device *ubi) -+static void protection_queue_destroy(struct ubi_device *ubi) - { -- struct rb_node *rb; -- struct ubi_wl_prot_entry *pe; -- -- rb = ubi->prot.aec.rb_node; -- while (rb) { -- if (rb->rb_left) -- rb = rb->rb_left; -- else if (rb->rb_right) -- rb = rb->rb_right; -- else { -- pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); -- -- rb = rb_parent(rb); -- if (rb) { -- if (rb->rb_left == &pe->rb_aec) -- rb->rb_left = NULL; -- else -- rb->rb_right = NULL; -- } -+ int i; -+ struct ubi_wl_entry *e, *tmp; - -- kmem_cache_free(ubi_wl_entry_slab, pe->e); -- kfree(pe); -+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { -+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { -+ list_del(&e->u.list); -+ kmem_cache_free(ubi_wl_entry_slab, e); - } - } - } -@@ -1591,8 +1531,9 @@ void ubi_wl_close(struct ubi_device *ubi - { - dbg_wl("close the WL sub-system"); - cancel_pending(ubi); -- protection_trees_destroy(ubi); -+ protection_queue_destroy(ubi); - tree_destroy(&ubi->used); -+ tree_destroy(&ubi->erroneous); - tree_destroy(&ubi->free); - tree_destroy(&ubi->scrub); - kfree(ubi->lookuptbl); -@@ -1661,4 +1602,27 @@ static int paranoid_check_in_wl_tree(str - return 1; - } - -+/** -+ * paranoid_check_in_pq - check if wear-leveling entry is in the protection -+ * queue. -+ * @ubi: UBI device description object -+ * @e: the wear-leveling entry to check -+ * -+ * This function returns zero if @e is in @ubi->pq and %1 if it is not. -+ */ -+static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e) -+{ -+ struct ubi_wl_entry *p; -+ int i; -+ -+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) -+ list_for_each_entry(p, &ubi->pq[i], u.list) -+ if (p == e) -+ return 0; -+ -+ ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue", -+ e->pnum, e->ec); -+ ubi_dbg_dump_stack(); -+ return 1; -+} - #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/Kconfig linux-omap-2.6.28-nokia1/drivers/net/wireless/Kconfig ---- linux-omap-2.6.28-omap1/drivers/net/wireless/Kconfig 2011-06-22 13:14:19.313067732 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/Kconfig 2011-06-22 13:19:32.903063275 +0200 -@@ -717,5 +717,6 @@ source "drivers/net/wireless/b43/Kconfig - source "drivers/net/wireless/b43legacy/Kconfig" - source "drivers/net/wireless/zd1211rw/Kconfig" - source "drivers/net/wireless/rt2x00/Kconfig" -+source "drivers/net/wireless/wl12xx/Kconfig" - - endmenu -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/Makefile linux-omap-2.6.28-nokia1/drivers/net/wireless/Makefile ---- linux-omap-2.6.28-omap1/drivers/net/wireless/Makefile 2011-06-22 13:14:19.313067732 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/Makefile 2011-06-22 13:19:32.913063273 +0200 -@@ -67,3 +67,5 @@ obj-$(CONFIG_ATH5K) += ath5k/ - obj-$(CONFIG_ATH9K) += ath9k/ - - obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o -+ -+obj-$(CONFIG_WL12XX) += wl12xx/ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/Kconfig linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/Kconfig ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/Kconfig 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,31 @@ -+menuconfig WL12XX -+ boolean "TI wl12xx driver support" -+ depends on MAC80211 && WLAN_80211 && EXPERIMENTAL -+ ---help--- -+ This will enable TI wl12xx driver support. The drivers make -+ use of the mac80211 stack. -+ -+config WL1251 -+ tristate "TI wl1251 support" -+ depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS -+ select FW_LOADER -+ select CRC7 -+ ---help--- -+ This module adds support for wireless adapters based on -+ TI wl1251 chipset. -+ -+ If you choose to build a module, it'll be called wl1251. Say N if -+ unsure. -+ -+config WL1271 -+ tristate "TI wl1271 support" -+ depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS -+ select FW_LOADER -+ select CRC7 -+ ---help--- -+ This module adds support for wireless adapters based on -+ TI wl1271 chipset. -+ -+ If you choose to build a module, it'll be called wl1271. Say N if -+ unsure. -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/Makefile linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/Makefile ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/Makefile 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,13 @@ -+# FIXME: use wl12xx.ko for now to not break the user space -+wl12xx-objs = wl1251_main.o wl1251_netlink.o wl1251_event.o \ -+ wl1251_tx.o wl1251_rx.o wl1251_ps.o wl1251_cmd.o \ -+ wl1251_acx.o wl1251_boot.o wl1251_init.o \ -+ wl1251_debugfs.o wl1251_spi.o -+obj-$(CONFIG_WL1251) += wl12xx.o -+ -+wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \ -+ wl1271_netlink.o wl1271_event.o \ -+ wl1271_tx.o wl1271_rx.o wl1271_ps.o \ -+ wl1271_acx.o wl1271_boot.o wl1271_init.o \ -+ wl1271_debugfs.o -+obj-$(CONFIG_WL1271) += wl1271.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl12xx_80211.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl12xx_80211.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl12xx_80211.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl12xx_80211.h 2011-06-22 13:19:32.953063273 +0200 -@@ -0,0 +1,156 @@ -+#ifndef __WL12XX_80211_H__ -+#define __WL12XX_80211_H__ -+ -+#include /* ETH_ALEN */ -+ -+/* RATES */ -+#define IEEE80211_CCK_RATE_1MB 0x02 -+#define IEEE80211_CCK_RATE_2MB 0x04 -+#define IEEE80211_CCK_RATE_5MB 0x0B -+#define IEEE80211_CCK_RATE_11MB 0x16 -+#define IEEE80211_OFDM_RATE_6MB 0x0C -+#define IEEE80211_OFDM_RATE_9MB 0x12 -+#define IEEE80211_OFDM_RATE_12MB 0x18 -+#define IEEE80211_OFDM_RATE_18MB 0x24 -+#define IEEE80211_OFDM_RATE_24MB 0x30 -+#define IEEE80211_OFDM_RATE_36MB 0x48 -+#define IEEE80211_OFDM_RATE_48MB 0x60 -+#define IEEE80211_OFDM_RATE_54MB 0x6C -+#define IEEE80211_BASIC_RATE_MASK 0x80 -+ -+#define IEEE80211_CCK_RATE_1MB_MASK (1<<0) -+#define IEEE80211_CCK_RATE_2MB_MASK (1<<1) -+#define IEEE80211_CCK_RATE_5MB_MASK (1<<2) -+#define IEEE80211_CCK_RATE_11MB_MASK (1<<3) -+#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4) -+#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5) -+#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6) -+#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7) -+#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8) -+#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9) -+#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10) -+#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11) -+ -+#define IEEE80211_CCK_RATES_MASK 0x0000000F -+#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \ -+ IEEE80211_CCK_RATE_2MB_MASK) -+#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \ -+ IEEE80211_CCK_RATE_5MB_MASK | \ -+ IEEE80211_CCK_RATE_11MB_MASK) -+ -+#define IEEE80211_OFDM_RATES_MASK 0x00000FF0 -+#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \ -+ IEEE80211_OFDM_RATE_12MB_MASK | \ -+ IEEE80211_OFDM_RATE_24MB_MASK) -+#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \ -+ IEEE80211_OFDM_RATE_9MB_MASK | \ -+ IEEE80211_OFDM_RATE_18MB_MASK | \ -+ IEEE80211_OFDM_RATE_36MB_MASK | \ -+ IEEE80211_OFDM_RATE_48MB_MASK | \ -+ IEEE80211_OFDM_RATE_54MB_MASK) -+#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \ -+ IEEE80211_CCK_DEFAULT_RATES_MASK) -+ -+ -+/* This really should be 8, but not for our firmware */ -+#define MAX_SUPPORTED_RATES 32 -+#define COUNTRY_STRING_LEN 3 -+#define MAX_COUNTRY_TRIPLETS 32 -+ -+/* Headers */ -+struct ieee80211_header { -+ __le16 frame_ctl; -+ __le16 duration_id; -+ u8 da[ETH_ALEN]; -+ u8 sa[ETH_ALEN]; -+ u8 bssid[ETH_ALEN]; -+ __le16 seq_ctl; -+ u8 payload[0]; -+} __attribute__ ((packed)); -+ -+struct wl12xx_ie_header { -+ u8 id; -+ u8 len; -+} __attribute__ ((packed)); -+ -+/* IEs */ -+ -+struct wl12xx_ie_ssid { -+ struct wl12xx_ie_header header; -+ char ssid[IW_ESSID_MAX_SIZE]; -+} __attribute__ ((packed)); -+ -+struct wl12xx_ie_rates { -+ struct wl12xx_ie_header header; -+ u8 rates[MAX_SUPPORTED_RATES]; -+} __attribute__ ((packed)); -+ -+struct wl12xx_ie_ds_params { -+ struct wl12xx_ie_header header; -+ u8 channel; -+} __attribute__ ((packed)); -+ -+struct country_triplet { -+ u8 channel; -+ u8 num_channels; -+ u8 max_tx_power; -+} __attribute__ ((packed)); -+ -+struct wl12xx_ie_country { -+ struct wl12xx_ie_header header; -+ u8 country_string[COUNTRY_STRING_LEN]; -+ struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; -+} __attribute__ ((packed)); -+ -+ -+/* Templates */ -+ -+struct wl12xx_beacon_template { -+ struct ieee80211_header header; -+ __le32 time_stamp[2]; -+ __le16 beacon_interval; -+ __le16 capability; -+ struct wl12xx_ie_ssid ssid; -+ struct wl12xx_ie_rates rates; -+ struct wl12xx_ie_rates ext_rates; -+ struct wl12xx_ie_ds_params ds_params; -+ struct wl12xx_ie_country country; -+} __attribute__ ((packed)); -+ -+struct wl12xx_null_data_template { -+ struct ieee80211_header header; -+} __attribute__ ((packed)); -+ -+struct wl12xx_ps_poll_template { -+ u16 fc; -+ u16 aid; -+ u8 bssid[ETH_ALEN]; -+ u8 ta[ETH_ALEN]; -+} __attribute__ ((packed)); -+ -+struct wl12xx_qos_null_data_template { -+ struct ieee80211_header header; -+ __le16 qos_ctl; -+} __attribute__ ((packed)); -+ -+struct wl12xx_probe_req_template { -+ struct ieee80211_header header; -+ struct wl12xx_ie_ssid ssid; -+ struct wl12xx_ie_rates rates; -+ struct wl12xx_ie_rates ext_rates; -+} __attribute__ ((packed)); -+ -+ -+struct wl12xx_probe_resp_template { -+ struct ieee80211_header header; -+ __le32 time_stamp[2]; -+ __le16 beacon_interval; -+ __le16 capability; -+ struct wl12xx_ie_ssid ssid; -+ struct wl12xx_ie_rates rates; -+ struct wl12xx_ie_rates ext_rates; -+ struct wl12xx_ie_ds_params ds_params; -+ struct wl12xx_ie_country country; -+} __attribute__ ((packed)); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_acx.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_acx.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_acx.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_acx.c 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,1106 @@ -+#include "wl1251_acx.h" -+ -+#include -+#include -+#include -+#include -+ -+#include "wl1251.h" -+#include "wl1251_reg.h" -+#include "wl1251_spi.h" -+#include "wl1251_ps.h" -+ -+int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod, -+ u8 mgt_rate, u8 mgt_mod) -+{ -+ struct acx_fw_gen_frame_rates *rates; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx frame rates"); -+ -+ rates = kzalloc(sizeof(*rates), GFP_KERNEL); -+ if (!rates) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ rates->tx_ctrl_frame_rate = ctrl_rate; -+ rates->tx_ctrl_frame_mod = ctrl_mod; -+ rates->tx_mgt_frame_rate = mgt_rate; -+ rates->tx_mgt_frame_mod = mgt_mod; -+ -+ ret = wl1251_cmd_configure(wl, ACX_FW_GEN_FRAME_RATES, -+ rates, sizeof(*rates)); -+ if (ret < 0) { -+ wl1251_error("Failed to set FW rates and modulation"); -+ goto out; -+ } -+ -+out: -+ kfree(rates); -+ return ret; -+} -+ -+ -+int wl1251_acx_station_id(struct wl1251 *wl) -+{ -+ struct acx_dot11_station_id *mac; -+ int ret, i; -+ -+ wl1251_debug(DEBUG_ACX, "acx dot11_station_id"); -+ -+ mac = kzalloc(sizeof(*mac), GFP_KERNEL); -+ if (!mac) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ for (i = 0; i < ETH_ALEN; i++) -+ mac->mac[i] = wl->mac_addr[ETH_ALEN - 1 - i]; -+ -+ ret = wl1251_cmd_configure(wl, DOT11_STATION_ID, mac, sizeof(*mac)); -+ if (ret < 0) -+ goto out; -+ -+out: -+ kfree(mac); -+ return ret; -+} -+ -+int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id) -+{ -+ struct acx_dot11_default_key *default_key; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx dot11_default_key (%d)", key_id); -+ -+ default_key = kzalloc(sizeof(*default_key), GFP_KERNEL); -+ if (!default_key) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ default_key->id = key_id; -+ -+ ret = wl1251_cmd_configure(wl, DOT11_DEFAULT_KEY, -+ default_key, sizeof(*default_key)); -+ if (ret < 0) { -+ wl1251_error("Couldnt set default key"); -+ goto out; -+ } -+ -+ wl->default_key = key_id; -+ -+out: -+ kfree(default_key); -+ return ret; -+} -+ -+int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event, -+ u8 listen_interval) -+{ -+ struct acx_wake_up_condition *wake_up; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx wake up conditions"); -+ -+ wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL); -+ if (!wake_up) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ wake_up->wake_up_event = wake_up_event; -+ wake_up->listen_interval = listen_interval; -+ -+ ret = wl1251_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, -+ wake_up, sizeof(*wake_up)); -+ if (ret < 0) { -+ wl1251_warning("could not set wake up conditions: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(wake_up); -+ return ret; -+} -+ -+int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth) -+{ -+ struct acx_sleep_auth *auth; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx sleep auth"); -+ -+ auth = kzalloc(sizeof(*auth), GFP_KERNEL); -+ if (!auth) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ auth->sleep_auth = sleep_auth; -+ -+ ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); -+ if (ret < 0) -+ return ret; -+ -+out: -+ kfree(auth); -+ return ret; -+} -+ -+int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len) -+{ -+ struct acx_revision *rev; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx fw rev"); -+ -+ rev = kzalloc(sizeof(*rev), GFP_KERNEL); -+ if (!rev) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ret = wl1251_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev)); -+ if (ret < 0) { -+ wl1251_warning("ACX_FW_REV interrogate failed"); -+ goto out; -+ } -+ -+ /* be careful with the buffer sizes */ -+ strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version))); -+ -+ /* -+ * if the firmware version string is exactly -+ * sizeof(rev->fw_version) long or fw_len is less than -+ * sizeof(rev->fw_version) it won't be null terminated -+ */ -+ buf[min(len, sizeof(rev->fw_version)) - 1] = '\0'; -+ -+out: -+ kfree(rev); -+ return ret; -+} -+ -+int wl1251_acx_tx_power(struct wl1251 *wl, int power) -+{ -+ struct acx_current_tx_power *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr"); -+ -+ if (power < 0 || power > 25) -+ return -EINVAL; -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->current_tx_power = power * 10; -+ -+ ret = wl1251_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("configure of tx power failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_feature_cfg(struct wl1251 *wl) -+{ -+ struct acx_feature_config *feature; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx feature cfg"); -+ -+ feature = kzalloc(sizeof(*feature), GFP_KERNEL); -+ if (!feature) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ -+ feature->data_flow_options = 0; -+ feature->options = 0; -+ -+ ret = wl1251_cmd_configure(wl, ACX_FEATURE_CFG, -+ feature, sizeof(*feature)); -+ if (ret < 0) { -+ wl1251_error("Couldnt set HW encryption"); -+ goto out; -+ } -+ -+out: -+ kfree(feature); -+ return ret; -+} -+ -+int wl1251_acx_mem_map(struct wl1251 *wl, struct acx_header *mem_map, -+ size_t len) -+{ -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx mem map"); -+ -+ ret = wl1251_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_acx_data_path_params(struct wl1251 *wl, -+ struct acx_data_path_params_resp *resp) -+{ -+ struct acx_data_path_params *params; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx data path params"); -+ -+ params = kzalloc(sizeof(*params), GFP_KERNEL); -+ if (!params) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ params->rx_packet_ring_chunk_size = DP_RX_PACKET_RING_CHUNK_SIZE; -+ params->tx_packet_ring_chunk_size = DP_TX_PACKET_RING_CHUNK_SIZE; -+ -+ params->rx_packet_ring_chunk_num = DP_RX_PACKET_RING_CHUNK_NUM; -+ params->tx_packet_ring_chunk_num = DP_TX_PACKET_RING_CHUNK_NUM; -+ -+ params->tx_complete_threshold = 1; -+ -+ params->tx_complete_ring_depth = FW_TX_CMPLT_BLOCK_SIZE; -+ -+ params->tx_complete_timeout = DP_TX_COMPLETE_TIME_OUT; -+ -+ ret = wl1251_cmd_configure(wl, ACX_DATA_PATH_PARAMS, -+ params, sizeof(*params)); -+ if (ret < 0) -+ goto out; -+ -+ /* FIXME: shouldn't this be ACX_DATA_PATH_RESP_PARAMS? */ -+ ret = wl1251_cmd_interrogate(wl, ACX_DATA_PATH_PARAMS, -+ resp, sizeof(*resp)); -+ -+ if (ret < 0) { -+ wl1251_warning("failed to read data path parameters: %d", ret); -+ goto out; -+ } else if (resp->header.cmd.status != CMD_STATUS_SUCCESS) { -+ wl1251_warning("data path parameter acx status failed"); -+ ret = -EIO; -+ goto out; -+ } -+ -+out: -+ kfree(params); -+ return ret; -+} -+ -+int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time) -+{ -+ struct acx_rx_msdu_lifetime *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx rx msdu life time"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->lifetime = life_time; -+ ret = wl1251_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, -+ acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("failed to set rx msdu life time: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter) -+{ -+ struct acx_rx_config *rx_config; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx rx config"); -+ -+ rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL); -+ if (!rx_config) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ rx_config->config_options = config; -+ rx_config->filter_options = filter; -+ -+ ret = wl1251_cmd_configure(wl, ACX_RX_CFG, -+ rx_config, sizeof(*rx_config)); -+ if (ret < 0) { -+ wl1251_warning("failed to set rx config: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(rx_config); -+ return ret; -+} -+ -+int wl1251_acx_pd_threshold(struct wl1251 *wl) -+{ -+ struct acx_packet_detection *pd; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx data pd threshold"); -+ -+ pd = kzalloc(sizeof(*pd), GFP_KERNEL); -+ if (!pd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* FIXME: threshold value not set */ -+ -+ ret = wl1251_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); -+ if (ret < 0) { -+ wl1251_warning("failed to set pd threshold: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(pd); -+ return 0; -+} -+ -+int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time) -+{ -+ struct acx_slot *slot; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx slot"); -+ -+ slot = kzalloc(sizeof(*slot), GFP_KERNEL); -+ if (!slot) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ slot->wone_index = STATION_WONE_INDEX; -+ slot->slot_time = slot_time; -+ -+ ret = wl1251_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot)); -+ if (ret < 0) { -+ wl1251_warning("failed to set slot time: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(slot); -+ return ret; -+} -+ -+int wl1251_acx_group_address_tbl(struct wl1251 *wl, void *mc_list, -+ u32 mc_list_len, bool enable) -+{ -+ struct acx_dot11_grp_addr_tbl *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx group address tbl"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* MAC filtering */ -+ acx->enabled = enable; -+ acx->num_groups = mc_list_len; -+ memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); -+ -+ ret = wl1251_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, -+ acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("failed to set group addr table: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_service_period_timeout(struct wl1251 *wl) -+{ -+ struct acx_rx_timeout *rx_timeout; -+ int ret; -+ -+ rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL); -+ if (!rx_timeout) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ wl1251_debug(DEBUG_ACX, "acx service period timeout"); -+ -+ rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF; -+ rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF; -+ -+ ret = wl1251_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, -+ rx_timeout, sizeof(*rx_timeout)); -+ if (ret < 0) { -+ wl1251_warning("failed to set service period timeout: %d", -+ ret); -+ goto out; -+ } -+ -+out: -+ kfree(rx_timeout); -+ return ret; -+} -+ -+int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold) -+{ -+ struct acx_rts_threshold *rts; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx rts threshold"); -+ -+ rts = kzalloc(sizeof(*rts), GFP_KERNEL); -+ if (!rts) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ rts->threshold = rts_threshold; -+ -+ ret = wl1251_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); -+ if (ret < 0) { -+ wl1251_warning("failed to set rts threshold: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(rts); -+ return ret; -+} -+ -+int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter) -+{ -+ struct acx_beacon_filter_option *beacon_filter; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx beacon filter opt"); -+ -+ beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); -+ if (!beacon_filter) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ beacon_filter->enable = enable_filter; -+ beacon_filter->max_num_beacons = 0; -+ -+ ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_OPT, -+ beacon_filter, sizeof(*beacon_filter)); -+ if (ret < 0) { -+ wl1251_warning("failed to set beacon filter opt: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(beacon_filter); -+ return ret; -+} -+ -+int wl1251_acx_beacon_filter_table(struct wl1251 *wl) -+{ -+ struct acx_beacon_filter_ie_table *ie_table; -+ int idx = 0; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx beacon filter table"); -+ -+ ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL); -+ if (!ie_table) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* configure default beacon pass-through rules */ -+ ie_table->num_ie = 1; -+ ie_table->table[idx++] = BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN; -+ ie_table->table[idx++] = BEACON_RULE_PASS_ON_APPEARANCE; -+ -+ ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, -+ ie_table, sizeof(*ie_table)); -+ if (ret < 0) { -+ wl1251_warning("failed to set beacon filter table: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(ie_table); -+ return ret; -+} -+ -+int wl1251_acx_conn_monit_params(struct wl1251 *wl) -+{ -+ struct acx_conn_monit_params *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx connection monitor parameters"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD; -+ acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT; -+ -+ ret = wl1251_cmd_configure(wl, ACX_CONN_MONIT_PARAMS, -+ acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("failed to set connection monitor " -+ "parameters: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_sg_enable(struct wl1251 *wl, u8 mode) -+{ -+ struct acx_bt_wlan_coex *pta; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx sg enable"); -+ -+ pta = kzalloc(sizeof(*pta), GFP_KERNEL); -+ if (!pta) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ pta->enable = mode; -+ -+ ret = wl1251_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); -+ if (ret < 0) { -+ wl1251_warning("failed to set softgemini enable: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(pta); -+ return ret; -+} -+ -+int wl1251_acx_sg_cfg(struct wl1251 *wl, u16 wake_up_beacon) -+{ -+ struct acx_bt_wlan_coex_param *param; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx sg cfg"); -+ -+ param = kzalloc(sizeof(*param), GFP_KERNEL); -+ if (!param) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* BT-WLAN coext parameters */ -+ param->min_rate = RATE_INDEX_24MBPS; -+ param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF; -+ param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF; -+ param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF; -+ param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF; -+ param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF; -+ param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF; -+ param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF; -+ param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF; -+ param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF; -+ param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF; -+ param->wake_up_beacon = wake_up_beacon; -+ param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF; -+ param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF; -+ param->antenna_type = PTA_ANTENNA_TYPE_DEF; -+ param->signal_type = PTA_SIGNALING_TYPE_DEF; -+ param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF; -+ param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF; -+ param->max_cts = PTA_MAX_NUM_CTS_DEF; -+ param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF; -+ param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF; -+ param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF; -+ param->wlan_elp_hp = PTA_ELP_HP_DEF; -+ param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF; -+ param->ack_mode_dual_ant = PTA_ACK_MODE_DEF; -+ param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF; -+ param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF; -+ param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF; -+ -+ ret = wl1251_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); -+ if (ret < 0) { -+ wl1251_warning("failed to set sg config: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(param); -+ return ret; -+} -+ -+int wl1251_acx_sg_configure(struct wl1251 *wl, bool force) -+{ -+ int ret; -+ -+ if (wl->state == WL1251_STATE_OFF && !force) -+ return 0; -+ -+ switch (wl->bt_coex_mode) { -+ case WL1251_BT_COEX_OFF: -+ ret = wl1251_acx_sg_enable(wl, SG_DISABLE); -+ if (ret) -+ break; -+ ret = wl1251_acx_sg_cfg(wl, 0); -+ break; -+ case WL1251_BT_COEX_ENABLE: -+ ret = wl1251_acx_sg_enable(wl, SG_ENABLE); -+ if (ret) -+ break; -+ ret = wl1251_acx_sg_cfg(wl, PTA_TIME_BEFORE_BEACON_DEF); -+ break; -+ case WL1251_BT_COEX_MONOAUDIO: -+ ret = wl1251_acx_sg_enable(wl, SG_ENABLE); -+ if (ret) -+ break; -+ ret = wl1251_acx_sg_cfg(wl, PTA_TIME_BEFORE_BEACON_MONO_AUDIO); -+ break; -+ default: -+ wl1251_error("Invalid BT co-ex mode!"); -+ ret = -EOPNOTSUPP; -+ break; -+ } -+ -+ return ret; -+} -+ -+int wl1251_acx_cca_threshold(struct wl1251 *wl) -+{ -+ struct acx_energy_detection *detection; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx cca threshold"); -+ -+ detection = kzalloc(sizeof(*detection), GFP_KERNEL); -+ if (!detection) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D; -+ detection->tx_energy_detection = 0; -+ -+ ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, -+ detection, sizeof(*detection)); -+ if (ret < 0) { -+ wl1251_warning("failed to set cca threshold: %d", ret); -+ return ret; -+ } -+ -+out: -+ kfree(detection); -+ return ret; -+} -+ -+int wl1251_acx_bcn_dtim_options(struct wl1251 *wl) -+{ -+ struct acx_beacon_broadcast *bb; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx bcn dtim options"); -+ -+ bb = kzalloc(sizeof(*bb), GFP_KERNEL); -+ if (!bb) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE; -+ bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE; -+ bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE; -+ bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF; -+ -+ ret = wl1251_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); -+ if (ret < 0) { -+ wl1251_warning("failed to set rx config: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(bb); -+ return ret; -+} -+ -+int wl1251_acx_aid(struct wl1251 *wl, u16 aid) -+{ -+ struct acx_aid *acx_aid; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx aid"); -+ -+ acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL); -+ if (!acx_aid) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx_aid->aid = aid; -+ -+ ret = wl1251_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); -+ if (ret < 0) { -+ wl1251_warning("failed to set aid: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx_aid); -+ return ret; -+} -+ -+int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask) -+{ -+ struct acx_event_mask *mask; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx event mbox mask"); -+ -+ mask = kzalloc(sizeof(*mask), GFP_KERNEL); -+ if (!mask) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* high event mask is unused */ -+ mask->high_event_mask = 0xffffffff; -+ -+ mask->event_mask = event_mask; -+ -+ ret = wl1251_cmd_configure(wl, ACX_EVENT_MBOX_MASK, -+ mask, sizeof(*mask)); -+ if (ret < 0) { -+ wl1251_warning("failed to set acx_event_mbox_mask: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(mask); -+ return ret; -+} -+ -+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight, -+ u8 depth, enum wl12xx_acx_low_rssi_type type) -+{ -+ struct acx_low_rssi *rssi; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx low rssi"); -+ -+ rssi = kzalloc(sizeof(*rssi), GFP_KERNEL); -+ if (!rssi) -+ return -ENOMEM; -+ -+ rssi->threshold = threshold; -+ rssi->weight = weight; -+ rssi->depth = depth; -+ rssi->type = type; -+ -+ ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi)); -+ if (ret < 0) -+ wl1251_warning("failed to set low rssi threshold: %d", ret); -+ -+ kfree(rssi); -+ return ret; -+} -+ -+int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble) -+{ -+ struct acx_preamble *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx_set_preamble"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->preamble = preamble; -+ -+ ret = wl1251_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("Setting of preamble failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_cts_protect(struct wl1251 *wl, -+ enum acx_ctsprotect_type ctsprotect) -+{ -+ struct acx_ctsprotect *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx_set_ctsprotect"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->ctsprotect = ctsprotect; -+ -+ ret = wl1251_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("Setting of ctsprotect failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime) -+{ -+ struct acx_tsf_info *tsf_info; -+ int ret; -+ -+ tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL); -+ if (!tsf_info) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ret = wl1251_cmd_interrogate(wl, ACX_TSF_INFO, -+ tsf_info, sizeof(*tsf_info)); -+ if (ret < 0) { -+ wl1251_warning("ACX_FW_REV interrogate failed"); -+ goto out; -+ } -+ -+ *mactime = tsf_info->current_tsf_lsb | -+ (tsf_info->current_tsf_msb << 31); -+ -+out: -+ kfree(tsf_info); -+ return ret; -+} -+ -+int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats) -+{ -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx statistics"); -+ -+ ret = wl1251_cmd_interrogate(wl, ACX_STATISTICS, stats, -+ sizeof(*stats)); -+ if (ret < 0) { -+ wl1251_warning("acx statistics failed: %d", ret); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+int wl1251_acx_rate_policies(struct wl1251 *wl) -+{ -+ struct acx_rate_policy *acx; -+ int ret = 0; -+ -+ wl1251_debug(DEBUG_ACX, "acx rate policies"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* configure one default (one-size-fits-all) rate class */ -+ acx->rate_class_cnt = 1; -+ acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED; -+ acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT; -+ acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT; -+ acx->rate_class[0].aflags = 0; -+ -+ ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("Setting of rate policies failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_mem_cfg(struct wl1251 *wl) -+{ -+ struct wl1251_acx_config_memory *mem_conf; -+ int ret, i; -+ -+ wl1251_debug(DEBUG_ACX, "acx mem cfg"); -+ -+ mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL); -+ if (!mem_conf) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* memory config */ -+ mem_conf->mem_config.num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS); -+ mem_conf->mem_config.rx_mem_block_num = 35; -+ mem_conf->mem_config.tx_min_mem_block_num = 64; -+ mem_conf->mem_config.num_tx_queues = MAX_TX_QUEUES; -+ mem_conf->mem_config.host_if_options = HOSTIF_PKT_RING; -+ mem_conf->mem_config.num_ssid_profiles = 1; -+ mem_conf->mem_config.debug_buffer_size = -+ cpu_to_le16(TRACE_BUFFER_MAX_SIZE); -+ -+ /* RX queue config */ -+ mem_conf->rx_queue_config.dma_address = 0; -+ mem_conf->rx_queue_config.num_descs = ACX_RX_DESC_DEF; -+ mem_conf->rx_queue_config.priority = DEFAULT_RXQ_PRIORITY; -+ mem_conf->rx_queue_config.type = DEFAULT_RXQ_TYPE; -+ -+ /* TX queue config */ -+ for (i = 0; i < MAX_TX_QUEUES; i++) { -+ mem_conf->tx_queue_config[i].num_descs = ACX_TX_DESC_DEF; -+ mem_conf->tx_queue_config[i].attributes = i; -+ } -+ -+ ret = wl1251_cmd_configure(wl, ACX_MEM_CFG, mem_conf, -+ sizeof(*mem_conf)); -+ if (ret < 0) { -+ wl1251_warning("wl1251 mem config failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(mem_conf); -+ return ret; -+} -+ -+int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim) -+{ -+ struct wl1251_acx_wr_tbtt_and_dtim *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx tbtt and dtim"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->tbtt = tbtt; -+ acx->dtim = dtim; -+ -+ ret = wl1251_cmd_configure(wl, ACX_WR_TBTT_AND_DTIM, -+ acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("failed to set tbtt and dtim: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode, -+ u8 max_consecutive) -+{ -+ struct wl1251_acx_bet_enable *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx bet enable"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->enable = mode; -+ acx->max_consecutive = max_consecutive; -+ -+ ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("wl1251 acx bet enable failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1251_acx_ip_config(struct wl1251 *wl, bool enable, u8 *address, -+ u8 version) -+{ -+ struct wl1251_acx_arp_filter *acx; -+ int ret; -+ -+ wl1251_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->version = version; -+ acx->enable = enable; -+ -+ if (enable == true) { -+ if (version == IPV4_VERSION) -+ memcpy(acx->address, address, IPV4_ADDR_SIZE); -+ else if (version == IPV6_VERSION) -+ memcpy(acx->address, address, sizeof(acx->address)); -+ else -+ wl1251_error("Invalid IP version"); -+ } -+ ret = wl1251_cmd_configure(wl, ACX_ARP_IP_FILTER, -+ acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_warning("failed to set arp filter: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_acx.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_acx.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_acx.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_acx.h 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,1414 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_ACX_H__ -+#define __WL1251_ACX_H__ -+ -+#include "wl1251.h" -+#include "wl1251_cmd.h" -+ -+/* Target's information element */ -+struct acx_header { -+ struct wl1251_cmd_header cmd; -+ -+ /* acx (or information element) header */ -+ u16 id; -+ -+ /* payload length (not including headers */ -+ u16 len; -+}; -+ -+struct acx_error_counter { -+ struct acx_header header; -+ -+ /* The number of PLCP errors since the last time this */ -+ /* information element was interrogated. This field is */ -+ /* automatically cleared when it is interrogated.*/ -+ u32 PLCP_error; -+ -+ /* The number of FCS errors since the last time this */ -+ /* information element was interrogated. This field is */ -+ /* automatically cleared when it is interrogated.*/ -+ u32 FCS_error; -+ -+ /* The number of MPDUs without PLCP header errors received*/ -+ /* since the last time this information element was interrogated. */ -+ /* This field is automatically cleared when it is interrogated.*/ -+ u32 valid_frame; -+ -+ /* the number of missed sequence numbers in the squentially */ -+ /* values of frames seq numbers */ -+ u32 seq_num_miss; -+} __attribute__ ((packed)); -+ -+struct acx_revision { -+ struct acx_header header; -+ -+ /* -+ * The WiLink firmware version, an ASCII string x.x.x.x, -+ * that uniquely identifies the current firmware. -+ * The left most digit is incremented each time a -+ * significant change is made to the firmware, such as -+ * code redesign or new platform support. -+ * The second digit is incremented when major enhancements -+ * are added or major fixes are made. -+ * The third digit is incremented for each GA release. -+ * The fourth digit is incremented for each build. -+ * The first two digits identify a firmware release version, -+ * in other words, a unique set of features. -+ * The first three digits identify a GA release. -+ */ -+ char fw_version[20]; -+ -+ /* -+ * This 4 byte field specifies the WiLink hardware version. -+ * bits 0 - 15: Reserved. -+ * bits 16 - 23: Version ID - The WiLink version ID -+ * (1 = first spin, 2 = second spin, and so on). -+ * bits 24 - 31: Chip ID - The WiLink chip ID. -+ */ -+ u32 hw_version; -+} __attribute__ ((packed)); -+ -+enum wl1251_psm_mode { -+ /* Active mode */ -+ WL1251_PSM_CAM = 0, -+ -+ /* Power save mode */ -+ WL1251_PSM_PS = 1, -+ -+ /* Extreme low power */ -+ WL1251_PSM_ELP = 2, -+}; -+ -+struct acx_sleep_auth { -+ struct acx_header header; -+ -+ /* The sleep level authorization of the device. */ -+ /* 0 - Always active*/ -+ /* 1 - Power down mode: light / fast sleep*/ -+ /* 2 - ELP mode: Deep / Max sleep*/ -+ u8 sleep_auth; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+enum { -+ HOSTIF_PCI_MASTER_HOST_INDIRECT, -+ HOSTIF_PCI_MASTER_HOST_DIRECT, -+ HOSTIF_SLAVE, -+ HOSTIF_PKT_RING, -+ HOSTIF_DONTCARE = 0xFF -+}; -+ -+#define DEFAULT_UCAST_PRIORITY 0 -+#define DEFAULT_RX_Q_PRIORITY 0 -+#define DEFAULT_NUM_STATIONS 1 -+#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ -+#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ -+#define TRACE_BUFFER_MAX_SIZE 256 -+ -+#define DP_RX_PACKET_RING_CHUNK_SIZE 1600 -+#define DP_TX_PACKET_RING_CHUNK_SIZE 1600 -+#define DP_RX_PACKET_RING_CHUNK_NUM 2 -+#define DP_TX_PACKET_RING_CHUNK_NUM 2 -+#define DP_TX_COMPLETE_TIME_OUT 20 -+#define FW_TX_CMPLT_BLOCK_SIZE 16 -+ -+struct acx_data_path_params { -+ struct acx_header header; -+ -+ u16 rx_packet_ring_chunk_size; -+ u16 tx_packet_ring_chunk_size; -+ -+ u8 rx_packet_ring_chunk_num; -+ u8 tx_packet_ring_chunk_num; -+ -+ /* -+ * Maximum number of packets that can be gathered -+ * in the TX complete ring before an interrupt -+ * is generated. -+ */ -+ u8 tx_complete_threshold; -+ -+ /* Number of pending TX complete entries in cyclic ring.*/ -+ u8 tx_complete_ring_depth; -+ -+ /* -+ * Max num microseconds since a packet enters the TX -+ * complete ring until an interrupt is generated. -+ */ -+ u32 tx_complete_timeout; -+} __attribute__ ((packed)); -+ -+ -+struct acx_data_path_params_resp { -+ struct acx_header header; -+ -+ u16 rx_packet_ring_chunk_size; -+ u16 tx_packet_ring_chunk_size; -+ -+ u8 rx_packet_ring_chunk_num; -+ u8 tx_packet_ring_chunk_num; -+ -+ u8 pad[2]; -+ -+ u32 rx_packet_ring_addr; -+ u32 tx_packet_ring_addr; -+ -+ u32 rx_control_addr; -+ u32 tx_control_addr; -+ -+ u32 tx_complete_addr; -+} __attribute__ ((packed)); -+ -+#define TX_MSDU_LIFETIME_MIN 0 -+#define TX_MSDU_LIFETIME_MAX 3000 -+#define TX_MSDU_LIFETIME_DEF 512 -+#define RX_MSDU_LIFETIME_MIN 0 -+#define RX_MSDU_LIFETIME_MAX 0xFFFFFFFF -+#define RX_MSDU_LIFETIME_DEF 512000 -+ -+struct acx_rx_msdu_lifetime { -+ struct acx_header header; -+ -+ /* -+ * The maximum amount of time, in TU, before the -+ * firmware discards the MSDU. -+ */ -+ u32 lifetime; -+} __attribute__ ((packed)); -+ -+/* -+ * RX Config Options Table -+ * Bit Definition -+ * === ========== -+ * 31:14 Reserved -+ * 13 Copy RX Status - when set, write three receive status words -+ * to top of rx'd MPDUs. -+ * When cleared, do not write three status words (added rev 1.5) -+ * 12 Reserved -+ * 11 RX Complete upon FCS error - when set, give rx complete -+ * interrupt for FCS errors, after the rx filtering, e.g. unicast -+ * frames not to us with FCS error will not generate an interrupt. -+ * 10 SSID Filter Enable - When set, the WiLink discards all beacon, -+ * probe request, and probe response frames with an SSID that does -+ * not match the SSID specified by the host in the START/JOIN -+ * command. -+ * When clear, the WiLink receives frames with any SSID. -+ * 9 Broadcast Filter Enable - When set, the WiLink discards all -+ * broadcast frames. When clear, the WiLink receives all received -+ * broadcast frames. -+ * 8:6 Reserved -+ * 5 BSSID Filter Enable - When set, the WiLink discards any frames -+ * with a BSSID that does not match the BSSID specified by the -+ * host. -+ * When clear, the WiLink receives frames from any BSSID. -+ * 4 MAC Addr Filter - When set, the WiLink discards any frames -+ * with a destination address that does not match the MAC address -+ * of the adaptor. -+ * When clear, the WiLink receives frames destined to any MAC -+ * address. -+ * 3 Promiscuous - When set, the WiLink receives all valid frames -+ * (i.e., all frames that pass the FCS check). -+ * When clear, only frames that pass the other filters specified -+ * are received. -+ * 2 FCS - When set, the WiLink includes the FCS with the received -+ * frame. -+ * When cleared, the FCS is discarded. -+ * 1 PLCP header - When set, write all data from baseband to frame -+ * buffer including PHY header. -+ * 0 Reserved - Always equal to 0. -+ * -+ * RX Filter Options Table -+ * Bit Definition -+ * === ========== -+ * 31:12 Reserved - Always equal to 0. -+ * 11 Association - When set, the WiLink receives all association -+ * related frames (association request/response, reassocation -+ * request/response, and disassociation). When clear, these frames -+ * are discarded. -+ * 10 Auth/De auth - When set, the WiLink receives all authentication -+ * and de-authentication frames. When clear, these frames are -+ * discarded. -+ * 9 Beacon - When set, the WiLink receives all beacon frames. -+ * When clear, these frames are discarded. -+ * 8 Contention Free - When set, the WiLink receives all contention -+ * free frames. -+ * When clear, these frames are discarded. -+ * 7 Control - When set, the WiLink receives all control frames. -+ * When clear, these frames are discarded. -+ * 6 Data - When set, the WiLink receives all data frames. -+ * When clear, these frames are discarded. -+ * 5 FCS Error - When set, the WiLink receives frames that have FCS -+ * errors. -+ * When clear, these frames are discarded. -+ * 4 Management - When set, the WiLink receives all management -+ * frames. -+ * When clear, these frames are discarded. -+ * 3 Probe Request - When set, the WiLink receives all probe request -+ * frames. -+ * When clear, these frames are discarded. -+ * 2 Probe Response - When set, the WiLink receives all probe -+ * response frames. -+ * When clear, these frames are discarded. -+ * 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK -+ * frames. -+ * When clear, these frames are discarded. -+ * 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames -+ * that have reserved frame types and sub types as defined by the -+ * 802.11 specification. -+ * When clear, these frames are discarded. -+ */ -+struct acx_rx_config { -+ struct acx_header header; -+ -+ u32 config_options; -+ u32 filter_options; -+} __attribute__ ((packed)); -+ -+enum { -+ QOS_AC_BE = 0, -+ QOS_AC_BK, -+ QOS_AC_VI, -+ QOS_AC_VO, -+ QOS_HIGHEST_AC_INDEX = QOS_AC_VO, -+}; -+ -+#define MAX_NUM_OF_AC (QOS_HIGHEST_AC_INDEX+1) -+#define FIRST_AC_INDEX QOS_AC_BE -+#define MAX_NUM_OF_802_1d_TAGS 8 -+#define AC_PARAMS_MAX_TSID 15 -+#define MAX_APSD_CONF 0xffff -+ -+#define QOS_TX_HIGH_MIN (0) -+#define QOS_TX_HIGH_MAX (100) -+ -+#define QOS_TX_HIGH_BK_DEF (25) -+#define QOS_TX_HIGH_BE_DEF (35) -+#define QOS_TX_HIGH_VI_DEF (35) -+#define QOS_TX_HIGH_VO_DEF (35) -+ -+#define QOS_TX_LOW_BK_DEF (15) -+#define QOS_TX_LOW_BE_DEF (25) -+#define QOS_TX_LOW_VI_DEF (25) -+#define QOS_TX_LOW_VO_DEF (25) -+ -+struct acx_tx_queue_qos_config { -+ struct acx_header header; -+ -+ u8 qid; -+ u8 pad[3]; -+ -+ /* Max number of blocks allowd in the queue */ -+ u16 high_threshold; -+ -+ /* Lowest memory blocks guaranteed for this queue */ -+ u16 low_threshold; -+} __attribute__ ((packed)); -+ -+struct acx_packet_detection { -+ struct acx_header header; -+ -+ u32 threshold; -+} __attribute__ ((packed)); -+ -+ -+enum acx_slot_type { -+ SLOT_TIME_LONG = 0, -+ SLOT_TIME_SHORT = 1, -+ DEFAULT_SLOT_TIME = SLOT_TIME_SHORT, -+ MAX_SLOT_TIMES = 0xFF -+}; -+ -+#define STATION_WONE_INDEX 0 -+ -+struct acx_slot { -+ struct acx_header header; -+ -+ u8 wone_index; /* Reserved */ -+ u8 slot_time; -+ u8 reserved[6]; -+} __attribute__ ((packed)); -+ -+ -+#define ACX_MC_ADDRESS_GROUP_MAX (8) -+#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX) -+ -+struct acx_dot11_grp_addr_tbl { -+ struct acx_header header; -+ -+ u8 enabled; -+ u8 num_groups; -+ u8 pad[2]; -+ u8 mac_table[ADDRESS_GROUP_MAX_LEN]; -+} __attribute__ ((packed)); -+ -+ -+#define RX_TIMEOUT_PS_POLL_MIN 0 -+#define RX_TIMEOUT_PS_POLL_MAX (200000) -+#define RX_TIMEOUT_PS_POLL_DEF (15) -+#define RX_TIMEOUT_UPSD_MIN 0 -+#define RX_TIMEOUT_UPSD_MAX (200000) -+#define RX_TIMEOUT_UPSD_DEF (15) -+ -+struct acx_rx_timeout { -+ struct acx_header header; -+ -+ /* -+ * The longest time the STA will wait to receive -+ * traffic from the AP after a PS-poll has been -+ * transmitted. -+ */ -+ u16 ps_poll_timeout; -+ -+ /* -+ * The longest time the STA will wait to receive -+ * traffic from the AP after a frame has been sent -+ * from an UPSD enabled queue. -+ */ -+ u16 upsd_timeout; -+} __attribute__ ((packed)); -+ -+#define RTS_THRESHOLD_MIN 0 -+#define RTS_THRESHOLD_MAX 4096 -+#define RTS_THRESHOLD_DEF 2347 -+ -+struct acx_rts_threshold { -+ struct acx_header header; -+ -+ u16 threshold; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+enum wl12xx_acx_low_rssi_type { -+ /* The event is a "Level" indication which keeps */ -+ /* triggering as long as the average RSSI is below*/ -+ /* the threshold.*/ -+ WL12XX_ACX_LOW_RSSI_TYPE_LEVEL = 0, -+ -+ /* The event is an "Edge" indication which triggers*/ -+ /* only when the RSSI threshold is crossed from above.*/ -+ WL12XX_ACX_LOW_RSSI_TYPE_EDGE = 1, -+}; -+ -+ -+struct acx_low_rssi { -+ struct acx_header header; -+ -+ /* -+ * The threshold (in dBm) below (or above after low rssi -+ * indication) which the firmware generates an interrupt to the -+ * host. This parameter is signed. -+ */ -+ s8 threshold; -+ -+ /* -+ * The weight of the current RSSI sample, before adding the new -+ * sample, that is used to calculate the average RSSI. -+ */ -+ u8 weight; -+ -+ /* -+ * The number of Beacons/Probe response frames that will be -+ * received before issuing the Low or Regained RSSI event. -+ */ -+ u8 depth; -+ -+ /* -+ * Configures how the Low RSSI Event is triggered. Refer to -+ * enum wl12xx_acx_low_rssi_type for more. -+ */ -+ u8 type; -+}; -+ -+struct acx_beacon_filter_option { -+ struct acx_header header; -+ -+ u8 enable; -+ -+ /* -+ * The number of beacons without the unicast TIM -+ * bit set that the firmware buffers before -+ * signaling the host about ready frames. -+ * When set to 0 and the filter is enabled, beacons -+ * without the unicast TIM bit set are dropped. -+ */ -+ u8 max_num_beacons; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+/* -+ * ACXBeaconFilterEntry (not 221) -+ * Byte Offset Size (Bytes) Definition -+ * =========== ============ ========== -+ * 0 1 IE identifier -+ * 1 1 Treatment bit mask -+ * -+ * ACXBeaconFilterEntry (221) -+ * Byte Offset Size (Bytes) Definition -+ * =========== ============ ========== -+ * 0 1 IE identifier -+ * 1 1 Treatment bit mask -+ * 2 3 OUI -+ * 5 1 Type -+ * 6 2 Version -+ * -+ * -+ * Treatment bit mask - The information element handling: -+ * bit 0 - The information element is compared and transferred -+ * in case of change. -+ * bit 1 - The information element is transferred to the host -+ * with each appearance or disappearance. -+ * Note that both bits can be set at the same time. -+ */ -+#define BEACON_FILTER_TABLE_MAX_IE_NUM (32) -+#define BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM (6) -+#define BEACON_FILTER_TABLE_IE_ENTRY_SIZE (2) -+#define BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE (6) -+#define BEACON_FILTER_TABLE_MAX_SIZE ((BEACON_FILTER_TABLE_MAX_IE_NUM * \ -+ BEACON_FILTER_TABLE_IE_ENTRY_SIZE) + \ -+ (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \ -+ BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE)) -+ -+#define BEACON_RULE_PASS_ON_CHANGE BIT(0) -+#define BEACON_RULE_PASS_ON_APPEARANCE BIT(1) -+ -+#define BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN (37) -+ -+struct acx_beacon_filter_ie_table { -+ struct acx_header header; -+ -+ u8 num_ie; -+ u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; -+ u8 pad[3]; -+} __attribute__ ((packed)); -+ -+#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ -+#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */ -+ -+struct acx_conn_monit_params { -+ struct acx_header header; -+ -+ u32 synch_fail_thold; /* number of beacons missed */ -+ u32 bss_lose_timeout; /* number of TU's from synch fail */ -+}; -+ -+enum { -+ SG_ENABLE = 0, -+ SG_DISABLE, -+ SG_SENSE_NO_ACTIVITY, -+ SG_SENSE_ACTIVE -+}; -+ -+struct acx_bt_wlan_coex { -+ struct acx_header header; -+ -+ /* -+ * 0 -> PTA enabled -+ * 1 -> PTA disabled -+ * 2 -> sense no active mode, i.e. -+ * an interrupt is sent upon -+ * BT activity. -+ * 3 -> PTA is switched on in response -+ * to the interrupt sending. -+ */ -+ u8 enable; -+ u8 pad[3]; -+} __attribute__ ((packed)); -+ -+#define PTA_ANTENNA_TYPE_DEF (0) -+#define PTA_BT_HP_MAXTIME_DEF (2000) -+#define PTA_WLAN_HP_MAX_TIME_DEF (5000) -+#define PTA_SENSE_DISABLE_TIMER_DEF (1350) -+#define PTA_PROTECTIVE_RX_TIME_DEF (1500) -+#define PTA_PROTECTIVE_TX_TIME_DEF (1500) -+#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000) -+#define PTA_SIGNALING_TYPE_DEF (1) -+#define PTA_AFH_LEVERAGE_ON_DEF (0) -+#define PTA_NUMBER_QUIET_CYCLE_DEF (0) -+#define PTA_MAX_NUM_CTS_DEF (3) -+#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2) -+#define PTA_NUMBER_OF_BT_PACKETS_DEF (2) -+#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500) -+#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000) -+#define PTA_CYCLE_TIME_FAST_DEF (8700) -+#define PTA_RX_FOR_AVALANCHE_DEF (5) -+#define PTA_ELP_HP_DEF (0) -+#define PTA_ANTI_STARVE_PERIOD_DEF (500) -+#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4) -+#define PTA_ALLOW_PA_SD_DEF (1) -+#define PTA_TIME_BEFORE_BEACON_DEF (500) -+#define PTA_TIME_BEFORE_BEACON_MONO_AUDIO (6300) -+#define PTA_HPDM_MAX_TIME_DEF (1600) -+#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550) -+#define PTA_AUTO_MODE_NO_CTS_DEF (0) -+#define PTA_BT_HP_RESPECTED_DEF (3) -+#define PTA_WLAN_RX_MIN_RATE_DEF (24) -+#define PTA_ACK_MODE_DEF (1) -+ -+struct acx_bt_wlan_coex_param { -+ struct acx_header header; -+ -+ /* -+ * The minimum rate of a received WLAN packet in the STA, -+ * during protective mode, of which a new BT-HP request -+ * during this Rx will always be respected and gain the antenna. -+ */ -+ u32 min_rate; -+ -+ /* Max time the BT HP will be respected. */ -+ u16 bt_hp_max_time; -+ -+ /* Max time the WLAN HP will be respected. */ -+ u16 wlan_hp_max_time; -+ -+ /* -+ * The time between the last BT activity -+ * and the moment when the sense mode returns -+ * to SENSE_INACTIVE. -+ */ -+ u16 sense_disable_timer; -+ -+ /* Time before the next BT HP instance */ -+ u16 rx_time_bt_hp; -+ u16 tx_time_bt_hp; -+ -+ /* range: 10-20000 default: 1500 */ -+ u16 rx_time_bt_hp_fast; -+ u16 tx_time_bt_hp_fast; -+ -+ /* range: 2000-65535 default: 8700 */ -+ u16 wlan_cycle_fast; -+ -+ /* range: 0 - 15000 (Msec) default: 1000 */ -+ u16 bt_anti_starvation_period; -+ -+ /* range 400-10000(Usec) default: 3000 */ -+ u16 next_bt_lp_packet; -+ -+ /* Deafult: worst case for BT DH5 traffic */ -+ u16 wake_up_beacon; -+ -+ /* range: 0-50000(Usec) default: 1050 */ -+ u16 hp_dm_max_guard_time; -+ -+ /* -+ * This is to prevent both BT & WLAN antenna -+ * starvation. -+ * Range: 100-50000(Usec) default:2550 -+ */ -+ u16 next_wlan_packet; -+ -+ /* 0 -> shared antenna */ -+ u8 antenna_type; -+ -+ /* -+ * 0 -> TI legacy -+ * 1 -> Palau -+ */ -+ u8 signal_type; -+ -+ /* -+ * BT AFH status -+ * 0 -> no AFH -+ * 1 -> from dedicated GPIO -+ * 2 -> AFH on (from host) -+ */ -+ u8 afh_leverage_on; -+ -+ /* -+ * The number of cycles during which no -+ * TX will be sent after 1 cycle of RX -+ * transaction in protective mode -+ */ -+ u8 quiet_cycle_num; -+ -+ /* -+ * The maximum number of CTSs that will -+ * be sent for receiving RX packet in -+ * protective mode -+ */ -+ u8 max_cts; -+ -+ /* -+ * The number of WLAN packets -+ * transferred in common mode before -+ * switching to BT. -+ */ -+ u8 wlan_packets_num; -+ -+ /* -+ * The number of BT packets -+ * transferred in common mode before -+ * switching to WLAN. -+ */ -+ u8 bt_packets_num; -+ -+ /* range: 1-255 default: 5 */ -+ u8 missed_rx_avalanche; -+ -+ /* range: 0-1 default: 1 */ -+ u8 wlan_elp_hp; -+ -+ /* range: 0 - 15 default: 4 */ -+ u8 bt_anti_starvation_cycles; -+ -+ u8 ack_mode_dual_ant; -+ -+ /* -+ * Allow PA_SD assertion/de-assertion -+ * during enabled BT activity. -+ */ -+ u8 pa_sd_enable; -+ -+ /* -+ * Enable/Disable PTA in auto mode: -+ * Support Both Active & P.S modes -+ */ -+ u8 pta_auto_mode_enable; -+ -+ /* range: 0 - 20 default: 1 */ -+ u8 bt_hp_respected_num; -+} __attribute__ ((packed)); -+ -+#define CCA_THRSH_ENABLE_ENERGY_D 0x140A -+#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF -+ -+struct acx_energy_detection { -+ struct acx_header header; -+ -+ /* The RX Clear Channel Assessment threshold in the PHY */ -+ u16 rx_cca_threshold; -+ u8 tx_energy_detection; -+ u8 pad; -+} __attribute__ ((packed)); -+ -+#define BCN_RX_TIMEOUT_DEF_VALUE 10000 -+#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 -+#define RX_BROADCAST_IN_PS_DEF_VALUE 1 -+#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4 -+ -+struct acx_beacon_broadcast { -+ struct acx_header header; -+ -+ u16 beacon_rx_timeout; -+ u16 broadcast_timeout; -+ -+ /* Enables receiving of broadcast packets in PS mode */ -+ u8 rx_broadcast_in_ps; -+ -+ /* Consecutive PS Poll failures before updating the host */ -+ u8 ps_poll_threshold; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+struct acx_event_mask { -+ struct acx_header header; -+ -+ u32 event_mask; -+ u32 high_event_mask; /* Unused */ -+} __attribute__ ((packed)); -+ -+#define CFG_RX_FCS BIT(2) -+#define CFG_RX_ALL_GOOD BIT(3) -+#define CFG_UNI_FILTER_EN BIT(4) -+#define CFG_BSSID_FILTER_EN BIT(5) -+#define CFG_MC_FILTER_EN BIT(6) -+#define CFG_MC_ADDR0_EN BIT(7) -+#define CFG_MC_ADDR1_EN BIT(8) -+#define CFG_BC_REJECT_EN BIT(9) -+#define CFG_SSID_FILTER_EN BIT(10) -+#define CFG_RX_INT_FCS_ERROR BIT(11) -+#define CFG_RX_INT_ENCRYPTED BIT(12) -+#define CFG_RX_WR_RX_STATUS BIT(13) -+#define CFG_RX_FILTER_NULTI BIT(14) -+#define CFG_RX_RESERVE BIT(15) -+#define CFG_RX_TIMESTAMP_TSF BIT(16) -+ -+#define CFG_RX_RSV_EN BIT(0) -+#define CFG_RX_RCTS_ACK BIT(1) -+#define CFG_RX_PRSP_EN BIT(2) -+#define CFG_RX_PREQ_EN BIT(3) -+#define CFG_RX_MGMT_EN BIT(4) -+#define CFG_RX_FCS_ERROR BIT(5) -+#define CFG_RX_DATA_EN BIT(6) -+#define CFG_RX_CTL_EN BIT(7) -+#define CFG_RX_CF_EN BIT(8) -+#define CFG_RX_BCN_EN BIT(9) -+#define CFG_RX_AUTH_EN BIT(10) -+#define CFG_RX_ASSOC_EN BIT(11) -+ -+#define SCAN_PASSIVE BIT(0) -+#define SCAN_5GHZ_BAND BIT(1) -+#define SCAN_TRIGGERED BIT(2) -+#define SCAN_PRIORITY_HIGH BIT(3) -+ -+struct acx_fw_gen_frame_rates { -+ struct acx_header header; -+ -+ u8 tx_ctrl_frame_rate; /* RATE_* */ -+ u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */ -+ u8 tx_mgt_frame_rate; -+ u8 tx_mgt_frame_mod; -+} __attribute__ ((packed)); -+ -+/* STA MAC */ -+struct acx_dot11_station_id { -+ struct acx_header header; -+ -+ u8 mac[ETH_ALEN]; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+struct acx_feature_config { -+ struct acx_header header; -+ -+ u32 options; -+ u32 data_flow_options; -+} __attribute__ ((packed)); -+ -+struct acx_current_tx_power { -+ struct acx_header header; -+ -+ u8 current_tx_power; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+struct acx_dot11_default_key { -+ struct acx_header header; -+ -+ u8 id; -+ u8 pad[3]; -+} __attribute__ ((packed)); -+ -+struct acx_tsf_info { -+ struct acx_header header; -+ -+ u32 current_tsf_msb; -+ u32 current_tsf_lsb; -+ u32 last_TBTT_msb; -+ u32 last_TBTT_lsb; -+ u8 last_dtim_count; -+ u8 pad[3]; -+} __attribute__ ((packed)); -+ -+enum acx_wake_up_event { -+ WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ -+ WAKE_UP_EVENT_DTIM_BITMAP = 0x02, /* Wake on every DTIM*/ -+ WAKE_UP_EVENT_N_DTIM_BITMAP = 0x04, /* Wake on every Nth DTIM */ -+ WAKE_UP_EVENT_N_BEACONS_BITMAP = 0x08, /* Wake on every Nth Beacon */ -+ WAKE_UP_EVENT_BITS_MASK = 0x0F -+}; -+ -+struct acx_wake_up_condition { -+ struct acx_header header; -+ -+ u8 wake_up_event; /* Only one bit can be set */ -+ u8 listen_interval; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+struct acx_aid { -+ struct acx_header header; -+ -+ /* -+ * To be set when associated with an AP. -+ */ -+ u16 aid; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+enum acx_preamble_type { -+ ACX_PREAMBLE_LONG = 0, -+ ACX_PREAMBLE_SHORT = 1 -+}; -+ -+struct acx_preamble { -+ struct acx_header header; -+ -+ /* -+ * When set, the WiLink transmits the frames with a short preamble and -+ * when cleared, the WiLink transmits the frames with a long preamble. -+ */ -+ u8 preamble; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+enum acx_ctsprotect_type { -+ CTSPROTECT_DISABLE = 0, -+ CTSPROTECT_ENABLE = 1 -+}; -+ -+struct acx_ctsprotect { -+ struct acx_header header; -+ u8 ctsprotect; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+struct acx_tx_statistics { -+ u32 internal_desc_overflow; -+} __attribute__ ((packed)); -+ -+struct acx_rx_statistics { -+ u32 out_of_mem; -+ u32 hdr_overflow; -+ u32 hw_stuck; -+ u32 dropped; -+ u32 fcs_err; -+ u32 xfr_hint_trig; -+ u32 path_reset; -+ u32 reset_counter; -+} __attribute__ ((packed)); -+ -+struct acx_dma_statistics { -+ u32 rx_requested; -+ u32 rx_errors; -+ u32 tx_requested; -+ u32 tx_errors; -+} __attribute__ ((packed)); -+ -+struct acx_isr_statistics { -+ /* host command complete */ -+ u32 cmd_cmplt; -+ -+ /* fiqisr() */ -+ u32 fiqs; -+ -+ /* (INT_STS_ND & INT_TRIG_RX_HEADER) */ -+ u32 rx_headers; -+ -+ /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */ -+ u32 rx_completes; -+ -+ /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */ -+ u32 rx_mem_overflow; -+ -+ /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */ -+ u32 rx_rdys; -+ -+ /* irqisr() */ -+ u32 irqs; -+ -+ /* (INT_STS_ND & INT_TRIG_TX_PROC) */ -+ u32 tx_procs; -+ -+ /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */ -+ u32 decrypt_done; -+ -+ /* (INT_STS_ND & INT_TRIG_DMA0) */ -+ u32 dma0_done; -+ -+ /* (INT_STS_ND & INT_TRIG_DMA1) */ -+ u32 dma1_done; -+ -+ /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */ -+ u32 tx_exch_complete; -+ -+ /* (INT_STS_ND & INT_TRIG_COMMAND) */ -+ u32 commands; -+ -+ /* (INT_STS_ND & INT_TRIG_RX_PROC) */ -+ u32 rx_procs; -+ -+ /* (INT_STS_ND & INT_TRIG_PM_802) */ -+ u32 hw_pm_mode_changes; -+ -+ /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */ -+ u32 host_acknowledges; -+ -+ /* (INT_STS_ND & INT_TRIG_PM_PCI) */ -+ u32 pci_pm; -+ -+ /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */ -+ u32 wakeups; -+ -+ /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ -+ u32 low_rssi; -+} __attribute__ ((packed)); -+ -+struct acx_wep_statistics { -+ /* WEP address keys configured */ -+ u32 addr_key_count; -+ -+ /* default keys configured */ -+ u32 default_key_count; -+ -+ u32 reserved; -+ -+ /* number of times that WEP key not found on lookup */ -+ u32 key_not_found; -+ -+ /* number of times that WEP key decryption failed */ -+ u32 decrypt_fail; -+ -+ /* WEP packets decrypted */ -+ u32 packets; -+ -+ /* WEP decrypt interrupts */ -+ u32 interrupt; -+} __attribute__ ((packed)); -+ -+#define ACX_MISSED_BEACONS_SPREAD 10 -+ -+struct acx_pwr_statistics { -+ /* the amount of enters into power save mode (both PD & ELP) */ -+ u32 ps_enter; -+ -+ /* the amount of enters into ELP mode */ -+ u32 elp_enter; -+ -+ /* the amount of missing beacon interrupts to the host */ -+ u32 missing_bcns; -+ -+ /* the amount of wake on host-access times */ -+ u32 wake_on_host; -+ -+ /* the amount of wake on timer-expire */ -+ u32 wake_on_timer_exp; -+ -+ /* the number of packets that were transmitted with PS bit set */ -+ u32 tx_with_ps; -+ -+ /* the number of packets that were transmitted with PS bit clear */ -+ u32 tx_without_ps; -+ -+ /* the number of received beacons */ -+ u32 rcvd_beacons; -+ -+ /* the number of entering into PowerOn (power save off) */ -+ u32 power_save_off; -+ -+ /* the number of entries into power save mode */ -+ u16 enable_ps; -+ -+ /* -+ * the number of exits from power save, not including failed PS -+ * transitions -+ */ -+ u16 disable_ps; -+ -+ /* -+ * the number of times the TSF counter was adjusted because -+ * of drift -+ */ -+ u32 fix_tsf_ps; -+ -+ /* Gives statistics about the spread continuous missed beacons. -+ * The 16 LSB are dedicated for the PS mode. -+ * The 16 MSB are dedicated for the PS mode. -+ * cont_miss_bcns_spread[0] - single missed beacon. -+ * cont_miss_bcns_spread[1] - two continuous missed beacons. -+ * cont_miss_bcns_spread[2] - three continuous missed beacons. -+ * ... -+ * cont_miss_bcns_spread[9] - ten and more continuous missed beacons. -+ */ -+ u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD]; -+ -+ /* the number of beacons in awake mode */ -+ u32 rcvd_awake_beacons; -+} __attribute__ ((packed)); -+ -+struct acx_mic_statistics { -+ u32 rx_pkts; -+ u32 calc_failure; -+} __attribute__ ((packed)); -+ -+struct acx_aes_statistics { -+ u32 encrypt_fail; -+ u32 decrypt_fail; -+ u32 encrypt_packets; -+ u32 decrypt_packets; -+ u32 encrypt_interrupt; -+ u32 decrypt_interrupt; -+} __attribute__ ((packed)); -+ -+struct acx_event_statistics { -+ u32 heart_beat; -+ u32 calibration; -+ u32 rx_mismatch; -+ u32 rx_mem_empty; -+ u32 rx_pool; -+ u32 oom_late; -+ u32 phy_transmit_error; -+ u32 tx_stuck; -+} __attribute__ ((packed)); -+ -+struct acx_ps_statistics { -+ u32 pspoll_timeouts; -+ u32 upsd_timeouts; -+ u32 upsd_max_sptime; -+ u32 upsd_max_apturn; -+ u32 pspoll_max_apturn; -+ u32 pspoll_utilization; -+ u32 upsd_utilization; -+} __attribute__ ((packed)); -+ -+struct acx_rxpipe_statistics { -+ u32 rx_prep_beacon_drop; -+ u32 descr_host_int_trig_rx_data; -+ u32 beacon_buffer_thres_host_int_trig_rx_data; -+ u32 missed_beacon_host_int_trig_rx_data; -+ u32 tx_xfr_host_int_trig_rx_data; -+} __attribute__ ((packed)); -+ -+struct acx_statistics { -+ struct acx_header header; -+ -+ struct acx_tx_statistics tx; -+ struct acx_rx_statistics rx; -+ struct acx_dma_statistics dma; -+ struct acx_isr_statistics isr; -+ struct acx_wep_statistics wep; -+ struct acx_pwr_statistics pwr; -+ struct acx_aes_statistics aes; -+ struct acx_mic_statistics mic; -+ struct acx_event_statistics event; -+ struct acx_ps_statistics ps; -+ struct acx_rxpipe_statistics rxpipe; -+} __attribute__ ((packed)); -+ -+#define ACX_MAX_RATE_CLASSES 8 -+#define ACX_RATE_MASK_UNSPECIFIED 0 -+#define ACX_RATE_RETRY_LIMIT 10 -+ -+struct acx_rate_class { -+ u32 enabled_rates; -+ u8 short_retry_limit; -+ u8 long_retry_limit; -+ u8 aflags; -+ u8 reserved; -+}; -+ -+struct acx_rate_policy { -+ struct acx_header header; -+ -+ u32 rate_class_cnt; -+ struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; -+} __attribute__ ((packed)); -+ -+struct wl1251_acx_memory { -+ __le16 num_stations; /* number of STAs to be supported. */ -+ u16 reserved_1; -+ -+ /* -+ * Nmber of memory buffers for the RX mem pool. -+ * The actual number may be less if there are -+ * not enough blocks left for the minimum num -+ * of TX ones. -+ */ -+ u8 rx_mem_block_num; -+ u8 reserved_2; -+ u8 num_tx_queues; /* From 1 to 16 */ -+ u8 host_if_options; /* HOST_IF* */ -+ u8 tx_min_mem_block_num; -+ u8 num_ssid_profiles; -+ __le16 debug_buffer_size; -+} __attribute__ ((packed)); -+ -+ -+#define ACX_RX_DESC_MIN 1 -+#define ACX_RX_DESC_MAX 127 -+#define ACX_RX_DESC_DEF 32 -+struct wl1251_acx_rx_queue_config { -+ u8 num_descs; -+ u8 pad; -+ u8 type; -+ u8 priority; -+ __le32 dma_address; -+} __attribute__ ((packed)); -+ -+#define ACX_TX_DESC_MIN 1 -+#define ACX_TX_DESC_MAX 127 -+#define ACX_TX_DESC_DEF 16 -+struct wl1251_acx_tx_queue_config { -+ u8 num_descs; -+ u8 pad[2]; -+ u8 attributes; -+} __attribute__ ((packed)); -+ -+#define MAX_TX_QUEUE_CONFIGS 5 -+#define MAX_TX_QUEUES 4 -+struct wl1251_acx_config_memory { -+ struct acx_header header; -+ -+ struct wl1251_acx_memory mem_config; -+ struct wl1251_acx_rx_queue_config rx_queue_config; -+ struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS]; -+} __attribute__ ((packed)); -+ -+struct wl1251_acx_mem_map { -+ struct acx_header header; -+ -+ void *code_start; -+ void *code_end; -+ -+ void *wep_defkey_start; -+ void *wep_defkey_end; -+ -+ void *sta_table_start; -+ void *sta_table_end; -+ -+ void *packet_template_start; -+ void *packet_template_end; -+ -+ void *queue_memory_start; -+ void *queue_memory_end; -+ -+ void *packet_memory_pool_start; -+ void *packet_memory_pool_end; -+ -+ void *debug_buffer1_start; -+ void *debug_buffer1_end; -+ -+ void *debug_buffer2_start; -+ void *debug_buffer2_end; -+ -+ /* Number of blocks FW allocated for TX packets */ -+ u32 num_tx_mem_blocks; -+ -+ /* Number of blocks FW allocated for RX packets */ -+ u32 num_rx_mem_blocks; -+} __attribute__ ((packed)); -+ -+struct wl1251_acx_wr_tbtt_and_dtim { -+ -+ struct acx_header header; -+ -+ /* Time in TUs between two consecutive beacons */ -+ u16 tbtt; -+ -+ /* -+ * DTIM period -+ * For BSS: Number of TBTTs in a DTIM period (range: 1-10) -+ * For IBSS: value shall be set to 1 -+ */ -+ u8 dtim; -+ u8 padding; -+} __attribute__ ((packed)); -+ -+#define IPV4_VERSION 4 -+#define IPV6_VERSION 6 -+#define IPV4_ADDR_SIZE 4 -+struct wl1251_acx_arp_filter { -+ struct acx_header header; -+ u8 version; /* The IP version: 4 - IPv4, 6 - IPv6.*/ -+ u8 enable; /* 1 - ARP filtering is enabled, 0 - disabled */ -+ u8 padding[2]; -+ u8 address[16]; /* The IP address used to filter ARP packets. -+ ARP packets that do not match this address are -+ dropped. When the IP Version is 4, the last 12 -+ bytes of the the address are ignored.*/ -+} __attribute__((packed)); -+ -+enum wl1251_acx_bet_mode { -+ WL1251_ACX_BET_DISABLE = 0, -+ WL1251_ACX_BET_ENABLE = 1, -+}; -+ -+struct wl1251_acx_bet_enable { -+ struct acx_header header; -+ -+ /* -+ * Specifies if beacon early termination procedure is enabled or -+ * disabled, see enum wl1251_acx_bet_mode. -+ */ -+ u8 enable; -+ -+ /* -+ * Specifies the maximum number of consecutive beacons that may be -+ * early terminated. After this number is reached at least one full -+ * beacon must be correctly received in FW before beacon ET -+ * resumes. Range 0 - 255. -+ */ -+ u8 max_consecutive; -+ -+ u8 padding[2]; -+} __attribute__ ((packed)); -+ -+/************************************************************************* -+ -+ Host Interrupt Register (WiLink -> Host) -+ -+**************************************************************************/ -+ -+/* RX packet is ready in Xfer buffer #0 */ -+#define WL1251_ACX_INTR_RX0_DATA BIT(0) -+ -+/* TX result(s) are in the TX complete buffer */ -+#define WL1251_ACX_INTR_TX_RESULT BIT(1) -+ -+/* OBSOLETE */ -+#define WL1251_ACX_INTR_TX_XFR BIT(2) -+ -+/* RX packet is ready in Xfer buffer #1 */ -+#define WL1251_ACX_INTR_RX1_DATA BIT(3) -+ -+/* Event was entered to Event MBOX #A */ -+#define WL1251_ACX_INTR_EVENT_A BIT(4) -+ -+/* Event was entered to Event MBOX #B */ -+#define WL1251_ACX_INTR_EVENT_B BIT(5) -+ -+/* OBSOLETE */ -+#define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6) -+ -+/* Trace meassge on MBOX #A */ -+#define WL1251_ACX_INTR_TRACE_A BIT(7) -+ -+/* Trace meassge on MBOX #B */ -+#define WL1251_ACX_INTR_TRACE_B BIT(8) -+ -+/* Command processing completion */ -+#define WL1251_ACX_INTR_CMD_COMPLETE BIT(9) -+ -+/* Init sequence is done */ -+#define WL1251_ACX_INTR_INIT_COMPLETE BIT(14) -+ -+#define WL1251_ACX_INTR_ALL 0xFFFFFFFF -+ -+enum { -+ ACX_WAKE_UP_CONDITIONS = 0x0002, -+ ACX_MEM_CFG = 0x0003, -+ ACX_SLOT = 0x0004, -+ ACX_QUEUE_HEAD = 0x0005, /* for MASTER mode only */ -+ ACX_AC_CFG = 0x0007, -+ ACX_MEM_MAP = 0x0008, -+ ACX_AID = 0x000A, -+ ACX_RADIO_PARAM = 0x000B, /* Not used */ -+ ACX_CFG = 0x000C, /* Not used */ -+ ACX_FW_REV = 0x000D, -+ ACX_MEDIUM_USAGE = 0x000F, -+ ACX_RX_CFG = 0x0010, -+ ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */ -+ ACX_BSS_IN_PS = 0x0012, /* for AP only */ -+ ACX_STATISTICS = 0x0013, /* Debug API */ -+ ACX_FEATURE_CFG = 0x0015, -+ ACX_MISC_CFG = 0x0017, /* Not used */ -+ ACX_TID_CFG = 0x001A, -+ ACX_BEACON_FILTER_OPT = 0x001F, -+ ACX_LOW_RSSI = 0x0020, -+ ACX_NOISE_HIST = 0x0021, -+ ACX_HDK_VERSION = 0x0022, /* ??? */ -+ ACX_PD_THRESHOLD = 0x0023, -+ ACX_DATA_PATH_PARAMS = 0x0024, /* WO */ -+ ACX_DATA_PATH_RESP_PARAMS = 0x0024, /* RO */ -+ ACX_CCA_THRESHOLD = 0x0025, -+ ACX_EVENT_MBOX_MASK = 0x0026, -+#ifdef FW_RUNNING_AS_AP -+ ACX_DTIM_PERIOD = 0x0027, /* for AP only */ -+#else -+ ACX_WR_TBTT_AND_DTIM = 0x0027, /* STA only */ -+#endif -+ ACX_ACI_OPTION_CFG = 0x0029, /* OBSOLETE (for 1251)*/ -+ ACX_GPIO_CFG = 0x002A, /* Not used */ -+ ACX_GPIO_SET = 0x002B, /* Not used */ -+ ACX_PM_CFG = 0x002C, /* To Be Documented */ -+ ACX_CONN_MONIT_PARAMS = 0x002D, -+ ACX_AVERAGE_RSSI = 0x002E, /* Not used */ -+ ACX_CONS_TX_FAILURE = 0x002F, -+ ACX_BCN_DTIM_OPTIONS = 0x0031, -+ ACX_SG_ENABLE = 0x0032, -+ ACX_SG_CFG = 0x0033, -+ ACX_ANTENNA_DIVERSITY_CFG = 0x0035, /* To Be Documented */ -+ ACX_LOW_SNR = 0x0037, /* To Be Documented */ -+ ACX_BEACON_FILTER_TABLE = 0x0038, -+ ACX_ARP_IP_FILTER = 0x0039, -+ ACX_ROAMING_STATISTICS_TBL = 0x003B, -+ ACX_RATE_POLICY = 0x003D, -+ ACX_CTS_PROTECTION = 0x003E, -+ ACX_SLEEP_AUTH = 0x003F, -+ ACX_PREAMBLE_TYPE = 0x0040, -+ ACX_ERROR_CNT = 0x0041, -+ ACX_FW_GEN_FRAME_RATES = 0x0042, -+ ACX_IBSS_FILTER = 0x0044, -+ ACX_SERVICE_PERIOD_TIMEOUT = 0x0045, -+ ACX_TSF_INFO = 0x0046, -+ ACX_CONFIG_PS_WMM = 0x0049, -+ ACX_ENABLE_RX_DATA_FILTER = 0x004A, -+ ACX_SET_RX_DATA_FILTER = 0x004B, -+ ACX_GET_DATA_FILTER_STATISTICS = 0x004C, -+ ACX_POWER_LEVEL_TABLE = 0x004D, -+ ACX_BET_ENABLE = 0x0050, -+ DOT11_STATION_ID = 0x1001, -+ DOT11_RX_MSDU_LIFE_TIME = 0x1004, -+ DOT11_CUR_TX_PWR = 0x100D, -+ DOT11_DEFAULT_KEY = 0x1010, -+ DOT11_RX_DOT11_MODE = 0x1012, -+ DOT11_RTS_THRESHOLD = 0x1013, -+ DOT11_GROUP_ADDRESS_TBL = 0x1014, -+ -+ MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, -+ -+ MAX_IE = 0xFFFF -+}; -+ -+ -+int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod, -+ u8 mgt_rate, u8 mgt_mod); -+int wl1251_acx_station_id(struct wl1251 *wl); -+int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id); -+int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event, -+ u8 listen_interval); -+int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth); -+int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len); -+int wl1251_acx_tx_power(struct wl1251 *wl, int power); -+int wl1251_acx_feature_cfg(struct wl1251 *wl); -+int wl1251_acx_mem_map(struct wl1251 *wl, -+ struct acx_header *mem_map, size_t len); -+int wl1251_acx_data_path_params(struct wl1251 *wl, -+ struct acx_data_path_params_resp *data_path); -+int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time); -+int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter); -+int wl1251_acx_pd_threshold(struct wl1251 *wl); -+int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time); -+int wl1251_acx_group_address_tbl(struct wl1251 *wl, void *mc_list, -+ u32 mc_list_len, bool enable); -+int wl1251_acx_service_period_timeout(struct wl1251 *wl); -+int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold); -+int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter); -+int wl1251_acx_beacon_filter_table(struct wl1251 *wl); -+int wl1251_acx_conn_monit_params(struct wl1251 *wl); -+int wl1251_acx_sg_enable(struct wl1251 *wl, u8 mode); -+int wl1251_acx_sg_cfg(struct wl1251 *wl, u16 wake_up_beacon); -+int wl1251_acx_sg_configure(struct wl1251 *wl, bool force); -+int wl1251_acx_cca_threshold(struct wl1251 *wl); -+int wl1251_acx_bcn_dtim_options(struct wl1251 *wl); -+int wl1251_acx_aid(struct wl1251 *wl, u16 aid); -+int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask); -+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight, -+ u8 depth, enum wl12xx_acx_low_rssi_type type); -+int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble); -+int wl1251_acx_cts_protect(struct wl1251 *wl, -+ enum acx_ctsprotect_type ctsprotect); -+int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats); -+int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime); -+int wl1251_acx_rate_policies(struct wl1251 *wl); -+int wl1251_acx_mem_cfg(struct wl1251 *wl); -+int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); -+int wl1251_acx_ip_config(struct wl1251 *wl, bool enable, u8 *address, -+ u8 version); -+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode, -+ u8 max_consecutive); -+ -+#endif /* __WL1251_ACX_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_boot.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_boot.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_boot.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_boot.c 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,552 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+ -+#include "wl1251_reg.h" -+#include "wl1251_boot.h" -+#include "wl1251_spi.h" -+#include "wl1251_event.h" -+#include "wl1251_acx.h" -+ -+static void wl1251_boot_enable_interrupts(struct wl1251 *wl) -+{ -+ enable_irq(wl->irq); -+} -+ -+void wl1251_boot_target_enable_interrupts(struct wl1251 *wl) -+{ -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask)); -+ wl1251_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL); -+} -+ -+int wl1251_boot_soft_reset(struct wl1251 *wl) -+{ -+ unsigned long timeout; -+ u32 boot_data; -+ -+ /* perform soft reset */ -+ wl1251_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); -+ -+ /* SOFT_RESET is self clearing */ -+ timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); -+ while (1) { -+ boot_data = wl1251_reg_read32(wl, ACX_REG_SLV_SOFT_RESET); -+ wl1251_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); -+ if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) -+ break; -+ -+ if (time_after(jiffies, timeout)) { -+ /* 1.2 check pWhalBus->uSelfClearTime if the -+ * timeout was reached */ -+ wl1251_error("soft reset timeout"); -+ return -1; -+ } -+ -+ udelay(SOFT_RESET_STALL_TIME); -+ } -+ -+ /* disable Rx/Tx */ -+ wl1251_reg_write32(wl, ENABLE, 0x0); -+ -+ /* disable auto calibration on start*/ -+ wl1251_reg_write32(wl, SPARE_A2, 0xffff); -+ -+ return 0; -+} -+ -+int wl1251_boot_init_seq(struct wl1251 *wl) -+{ -+ u32 scr_pad6, init_data, tmp, elp_cmd, ref_freq; -+ -+ /* -+ * col #1: INTEGER_DIVIDER -+ * col #2: FRACTIONAL_DIVIDER -+ * col #3: ATTN_BB -+ * col #4: ALPHA_BB -+ * col #5: STOP_TIME_BB -+ * col #6: BB_PLL_LOOP_FILTER -+ */ -+ static const u32 LUT[REF_FREQ_NUM][LUT_PARAM_NUM] = { -+ -+ { 83, 87381, 0xB, 5, 0xF00, 3}, /* REF_FREQ_19_2*/ -+ { 61, 141154, 0xB, 5, 0x1450, 2}, /* REF_FREQ_26_0*/ -+ { 41, 174763, 0xC, 6, 0x2D00, 1}, /* REF_FREQ_38_4*/ -+ { 40, 0, 0xC, 6, 0x2EE0, 1}, /* REF_FREQ_40_0*/ -+ { 47, 162280, 0xC, 6, 0x2760, 1} /* REF_FREQ_33_6 */ -+ }; -+ -+ /* read NVS params */ -+ scr_pad6 = wl1251_reg_read32(wl, SCR_PAD6); -+ wl1251_debug(DEBUG_BOOT, "scr_pad6 0x%x", scr_pad6); -+ -+ /* read ELP_CMD */ -+ elp_cmd = wl1251_reg_read32(wl, ELP_CMD); -+ wl1251_debug(DEBUG_BOOT, "elp_cmd 0x%x", elp_cmd); -+ -+ /* set the BB calibration time to be 300 usec (PLL_CAL_TIME) */ -+ ref_freq = scr_pad6 & 0x000000FF; -+ wl1251_debug(DEBUG_BOOT, "ref_freq 0x%x", ref_freq); -+ -+ wl1251_reg_write32(wl, PLL_CAL_TIME, 0x9); -+ -+ /* -+ * PG 1.2: set the clock buffer time to be 210 usec (CLK_BUF_TIME) -+ */ -+ wl1251_reg_write32(wl, CLK_BUF_TIME, 0x6); -+ -+ /* -+ * set the clock detect feature to work in the restart wu procedure -+ * (ELP_CFG_MODE[14]) and Select the clock source type -+ * (ELP_CFG_MODE[13:12]) -+ */ -+ tmp = ((scr_pad6 & 0x0000FF00) << 4) | 0x00004000; -+ wl1251_reg_write32(wl, ELP_CFG_MODE, tmp); -+ -+ /* PG 1.2: enable the BB PLL fix. Enable the PLL_LIMP_CLK_EN_CMD */ -+ elp_cmd |= 0x00000040; -+ wl1251_reg_write32(wl, ELP_CMD, elp_cmd); -+ -+ /* PG 1.2: Set the BB PLL stable time to be 1000usec -+ * (PLL_STABLE_TIME) */ -+ wl1251_reg_write32(wl, CFG_PLL_SYNC_CNT, 0x20); -+ -+ /* PG 1.2: read clock request time */ -+ init_data = wl1251_reg_read32(wl, CLK_REQ_TIME); -+ -+ /* -+ * PG 1.2: set the clock request time to be ref_clk_settling_time - -+ * 1ms = 4ms -+ */ -+ if (init_data > 0x21) -+ tmp = init_data - 0x21; -+ else -+ tmp = 0; -+ wl1251_reg_write32(wl, CLK_REQ_TIME, tmp); -+ -+ /* set BB PLL configurations in RF AFE */ -+ wl1251_reg_write32(wl, 0x003058cc, 0x4B5); -+ -+ /* set RF_AFE_REG_5 */ -+ wl1251_reg_write32(wl, 0x003058d4, 0x50); -+ -+ /* set RF_AFE_CTRL_REG_2 */ -+ wl1251_reg_write32(wl, 0x00305948, 0x11c001); -+ -+ /* -+ * change RF PLL and BB PLL divider for VCO clock and adjust VCO -+ * bais current(RF_AFE_REG_13) -+ */ -+ wl1251_reg_write32(wl, 0x003058f4, 0x1e); -+ -+ /* set BB PLL configurations */ -+ tmp = LUT[ref_freq][LUT_PARAM_INTEGER_DIVIDER] | 0x00017000; -+ wl1251_reg_write32(wl, 0x00305840, tmp); -+ -+ /* set fractional divider according to Appendix C-BB PLL -+ * Calculations -+ */ -+ tmp = LUT[ref_freq][LUT_PARAM_FRACTIONAL_DIVIDER]; -+ wl1251_reg_write32(wl, 0x00305844, tmp); -+ -+ /* set the initial data for the sigma delta */ -+ wl1251_reg_write32(wl, 0x00305848, 0x3039); -+ -+ /* -+ * set the accumulator attenuation value, calibration loop1 -+ * (alpha), calibration loop2 (beta), calibration loop3 (gamma) and -+ * the VCO gain -+ */ -+ tmp = (LUT[ref_freq][LUT_PARAM_ATTN_BB] << 16) | -+ (LUT[ref_freq][LUT_PARAM_ALPHA_BB] << 12) | 0x1; -+ wl1251_reg_write32(wl, 0x00305854, tmp); -+ -+ /* -+ * set the calibration stop time after holdoff time expires and set -+ * settling time HOLD_OFF_TIME_BB -+ */ -+ tmp = LUT[ref_freq][LUT_PARAM_STOP_TIME_BB] | 0x000A0000; -+ wl1251_reg_write32(wl, 0x00305858, tmp); -+ -+ /* -+ * set BB PLL Loop filter capacitor3- BB_C3[2:0] and set BB PLL -+ * constant leakage current to linearize PFD to 0uA - -+ * BB_ILOOPF[7:3] -+ */ -+ tmp = LUT[ref_freq][LUT_PARAM_BB_PLL_LOOP_FILTER] | 0x00000030; -+ wl1251_reg_write32(wl, 0x003058f8, tmp); -+ -+ /* -+ * set regulator output voltage for n divider to -+ * 1.35-BB_REFDIV[1:0], set charge pump current- BB_CPGAIN[4:2], -+ * set BB PLL Loop filter capacitor2- BB_C2[7:5], set gain of BB -+ * PLL auto-call to normal mode- BB_CALGAIN_3DB[8] -+ */ -+ wl1251_reg_write32(wl, 0x003058f0, 0x29); -+ -+ /* enable restart wakeup sequence (ELP_CMD[0]) */ -+ wl1251_reg_write32(wl, ELP_CMD, elp_cmd | 0x1); -+ -+ /* restart sequence completed */ -+ udelay(2000); -+ -+ return 0; -+} -+ -+static void wl1251_boot_set_ecpu_ctrl(struct wl1251 *wl, u32 flag) -+{ -+ u32 cpu_ctrl; -+ -+ /* 10.5.0 run the firmware (I) */ -+ cpu_ctrl = wl1251_reg_read32(wl, ACX_REG_ECPU_CONTROL); -+ -+ /* 10.5.1 run the firmware (II) */ -+ cpu_ctrl &= ~flag; -+ wl1251_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); -+} -+ -+int wl1251_boot_run_firmware(struct wl1251 *wl) -+{ -+ int loop, ret; -+ u32 chip_id, interrupt; -+ -+ wl1251_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); -+ -+ chip_id = wl1251_reg_read32(wl, CHIP_ID_B); -+ -+ wl1251_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); -+ -+ if (chip_id != wl->chip_id) { -+ wl1251_error("chip id doesn't match after firmware boot"); -+ return -EIO; -+ } -+ -+ /* wait for init to complete */ -+ loop = 0; -+ while (loop++ < INIT_LOOP) { -+ udelay(INIT_LOOP_DELAY); -+ interrupt = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); -+ -+ if (interrupt == 0xffffffff) { -+ wl1251_error("error reading hardware complete " -+ "init indication"); -+ return -EIO; -+ } -+ /* check that ACX_INTR_INIT_COMPLETE is enabled */ -+ else if (interrupt & WL1251_ACX_INTR_INIT_COMPLETE) { -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK, -+ WL1251_ACX_INTR_INIT_COMPLETE); -+ break; -+ } -+ } -+ -+ if (loop >= INIT_LOOP) { -+ wl1251_error("timeout waiting for the hardware to " -+ "complete initialization"); -+ return -EIO; -+ } -+ -+ /* get hardware config command mail box */ -+ wl->cmd_box_addr = wl1251_reg_read32(wl, REG_COMMAND_MAILBOX_PTR); -+ -+ /* get hardware config event mail box */ -+ wl->event_box_addr = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR); -+ -+ /* set the working partition to its "running" mode offset */ -+ wl1251_set_partition(wl, WL1251_PART_WORK_MEM_START, -+ WL1251_PART_WORK_MEM_SIZE, -+ WL1251_PART_WORK_REG_START, -+ WL1251_PART_WORK_REG_SIZE); -+ -+ wl1251_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", -+ wl->cmd_box_addr, wl->event_box_addr); -+ -+ wl1251_acx_fw_version(wl, wl->fw_ver, sizeof(wl->fw_ver)); -+ -+ /* -+ * in case of full asynchronous mode the firmware event must be -+ * ready to receive event from the command mailbox -+ */ -+ -+ /* enable gpio interrupts */ -+ wl1251_boot_enable_interrupts(wl); -+ -+ /* Enable target's interrupts */ -+ wl->intr_mask = WL1251_ACX_INTR_RX0_DATA | -+ WL1251_ACX_INTR_RX1_DATA | -+ WL1251_ACX_INTR_TX_RESULT | -+ WL1251_ACX_INTR_EVENT_A | -+ WL1251_ACX_INTR_EVENT_B | -+ WL1251_ACX_INTR_INIT_COMPLETE; -+ wl1251_boot_target_enable_interrupts(wl); -+ -+ wl->event_mask = SCAN_COMPLETE_EVENT_ID | BSS_LOSE_EVENT_ID | -+ SYNCHRONIZATION_TIMEOUT_EVENT_ID | -+ ROAMING_TRIGGER_LOW_RSSI_EVENT_ID | -+ ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID | -+ REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID | -+ BT_PTA_PREDICTION_EVENT_ID | PS_REPORT_EVENT_ID; -+ -+ ret = wl1251_event_unmask(wl); -+ if (ret < 0) { -+ wl1251_error("EVENT mask setting failed"); -+ return ret; -+ } -+ -+ wl1251_event_mbox_config(wl); -+ -+ /* firmware startup completed */ -+ return 0; -+} -+ -+static int wl1251_boot_upload_firmware(struct wl1251 *wl) -+{ -+ int addr, chunk_num, partition_limit; -+ size_t fw_data_len, len; -+ u8 *p, *buf; -+ -+ /* whal_FwCtrl_LoadFwImageSm() */ -+ -+ wl1251_debug(DEBUG_BOOT, "chip id before fw upload: 0x%x", -+ wl1251_reg_read32(wl, CHIP_ID_B)); -+ -+ /* 10.0 check firmware length and set partition */ -+ fw_data_len = (wl->fw[4] << 24) | (wl->fw[5] << 16) | -+ (wl->fw[6] << 8) | (wl->fw[7]); -+ -+ wl1251_debug(DEBUG_BOOT, "fw_data_len %zu chunk_size %d", fw_data_len, -+ CHUNK_SIZE); -+ -+ if ((fw_data_len % 4) != 0) { -+ wl1251_error("firmware length not multiple of four"); -+ return -EIO; -+ } -+ -+ buf = kmalloc(CHUNK_SIZE, GFP_KERNEL); -+ if (!buf) { -+ wl1251_error("allocation for firmware upload chunk failed"); -+ return -ENOMEM; -+ } -+ -+ wl1251_set_partition(wl, WL1251_PART_DOWN_MEM_START, -+ WL1251_PART_DOWN_MEM_SIZE, -+ WL1251_PART_DOWN_REG_START, -+ WL1251_PART_DOWN_REG_SIZE); -+ -+ /* 10.1 set partition limit and chunk num */ -+ chunk_num = 0; -+ partition_limit = WL1251_PART_DOWN_MEM_SIZE; -+ -+ while (chunk_num < fw_data_len / CHUNK_SIZE) { -+ /* 10.2 update partition, if needed */ -+ addr = WL1251_PART_DOWN_MEM_START + -+ (chunk_num + 2) * CHUNK_SIZE; -+ if (addr > partition_limit) { -+ addr = WL1251_PART_DOWN_MEM_START + -+ chunk_num * CHUNK_SIZE; -+ partition_limit = chunk_num * CHUNK_SIZE + -+ WL1251_PART_DOWN_MEM_SIZE; -+ wl1251_set_partition(wl, -+ addr, -+ WL1251_PART_DOWN_MEM_SIZE, -+ WL1251_PART_DOWN_REG_START, -+ WL1251_PART_DOWN_REG_SIZE); -+ } -+ -+ /* 10.3 upload the chunk */ -+ addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE; -+ p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE; -+ wl1251_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", -+ p, addr); -+ -+ /* need to copy the chunk for dma */ -+ len = CHUNK_SIZE; -+ memcpy(buf, p, len); -+ wl1251_spi_mem_write(wl, addr, buf, len); -+ -+ chunk_num++; -+ } -+ -+ /* 10.4 upload the last chunk */ -+ addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE; -+ p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE; -+ -+ /* need to copy the chunk for dma */ -+ len = fw_data_len % CHUNK_SIZE; -+ memcpy(buf, p, len); -+ -+ wl1251_debug(DEBUG_BOOT, "uploading fw last chunk (%zu B) 0x%p to 0x%x", -+ len, p, addr); -+ wl1251_spi_mem_write(wl, addr, buf, len); -+ -+ kfree(buf); -+ -+ return 0; -+} -+ -+static int wl1251_boot_upload_nvs(struct wl1251 *wl) -+{ -+ size_t nvs_len, nvs_bytes_written, burst_len; -+ int nvs_start, i; -+ u32 dest_addr, val; -+ u8 *nvs_ptr, *nvs; -+ -+ nvs = wl->nvs; -+ if (nvs == NULL) -+ return -ENODEV; -+ -+ nvs_ptr = nvs; -+ -+ nvs_len = wl->nvs_len; -+ nvs_start = wl->fw_len; -+ -+ /* -+ * Layout before the actual NVS tables: -+ * 1 byte : burst length. -+ * 2 bytes: destination address. -+ * n bytes: data to burst copy. -+ * -+ * This is ended by a 0 length, then the NVS tables. -+ */ -+ -+ while (nvs_ptr[0]) { -+ burst_len = nvs_ptr[0]; -+ dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); -+ -+ /* We move our pointer to the data */ -+ nvs_ptr += 3; -+ -+ for (i = 0; i < burst_len; i++) { -+ val = (nvs_ptr[0] | (nvs_ptr[1] << 8) -+ | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); -+ -+ wl1251_debug(DEBUG_BOOT, -+ "nvs burst write 0x%x: 0x%x", -+ dest_addr, val); -+ wl1251_mem_write32(wl, dest_addr, val); -+ -+ nvs_ptr += 4; -+ dest_addr += 4; -+ } -+ } -+ -+ /* -+ * We've reached the first zero length, the first NVS table -+ * is 7 bytes further. -+ */ -+ nvs_ptr += 7; -+ nvs_len -= nvs_ptr - nvs; -+ nvs_len = ALIGN(nvs_len, 4); -+ -+ /* Now we must set the partition correctly */ -+ wl1251_set_partition(wl, nvs_start, -+ WL1251_PART_DOWN_MEM_SIZE, -+ WL1251_PART_DOWN_REG_START, -+ WL1251_PART_DOWN_REG_SIZE); -+ -+ /* And finally we upload the NVS tables */ -+ nvs_bytes_written = 0; -+ while (nvs_bytes_written < nvs_len) { -+ val = (nvs_ptr[0] | (nvs_ptr[1] << 8) -+ | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); -+ -+ val = cpu_to_le32(val); -+ -+ wl1251_debug(DEBUG_BOOT, -+ "nvs write table 0x%x: 0x%x", -+ nvs_start, val); -+ wl1251_mem_write32(wl, nvs_start, val); -+ -+ nvs_ptr += 4; -+ nvs_bytes_written += 4; -+ nvs_start += 4; -+ } -+ -+ return 0; -+} -+ -+int wl1251_boot(struct wl1251 *wl) -+{ -+ int ret = 0, minor_minor_e2_ver; -+ u32 tmp, boot_data; -+ -+ ret = wl1251_boot_soft_reset(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* 2. start processing NVS file */ -+ ret = wl1251_boot_upload_nvs(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* write firmware's last address (ie. it's length) to -+ * ACX_EEPROMLESS_IND_REG */ -+ wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len); -+ -+ /* 6. read the EEPROM parameters */ -+ tmp = wl1251_reg_read32(wl, SCR_PAD2); -+ -+ /* 7. read bootdata */ -+ wl->boot_attr.radio_type = (tmp & 0x0000FF00) >> 8; -+ wl->boot_attr.major = (tmp & 0x00FF0000) >> 16; -+ tmp = wl1251_reg_read32(wl, SCR_PAD3); -+ -+ /* 8. check bootdata and call restart sequence */ -+ wl->boot_attr.minor = (tmp & 0x00FF0000) >> 16; -+ minor_minor_e2_ver = (tmp & 0xFF000000) >> 24; -+ -+ wl1251_debug(DEBUG_BOOT, "radioType 0x%x majorE2Ver 0x%x " -+ "minorE2Ver 0x%x minor_minor_e2_ver 0x%x", -+ wl->boot_attr.radio_type, wl->boot_attr.major, -+ wl->boot_attr.minor, minor_minor_e2_ver); -+ -+ ret = wl1251_boot_init_seq(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* 9. NVS processing done */ -+ boot_data = wl1251_reg_read32(wl, ACX_REG_ECPU_CONTROL); -+ -+ wl1251_debug(DEBUG_BOOT, "halt boot_data 0x%x", boot_data); -+ -+ /* 10. check that ECPU_CONTROL_HALT bits are set in -+ * pWhalBus->uBootData and start uploading firmware -+ */ -+ if ((boot_data & ECPU_CONTROL_HALT) == 0) { -+ wl1251_error("boot failed, ECPU_CONTROL_HALT not set"); -+ ret = -EIO; -+ goto out; -+ } -+ -+ ret = wl1251_boot_upload_firmware(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* 10.5 start firmware */ -+ ret = wl1251_boot_run_firmware(wl); -+ if (ret < 0) -+ goto out; -+ -+out: -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_boot.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_boot.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_boot.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_boot.h 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,41 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __BOOT_H__ -+#define __BOOT_H__ -+ -+#include "wl1251.h" -+ -+int wl1251_boot_soft_reset(struct wl1251 *wl); -+int wl1251_boot_init_seq(struct wl1251 *wl); -+int wl1251_boot_run_firmware(struct wl1251 *wl); -+void wl1251_boot_target_enable_interrupts(struct wl1251 *wl); -+int wl1251_boot(struct wl1251 *wl); -+ -+/* number of times we try to read the INIT interrupt */ -+#define INIT_LOOP 20000 -+ -+/* delay between retries */ -+#define INIT_LOOP_DELAY 50 -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_cmd.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_cmd.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_cmd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_cmd.c 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,412 @@ -+#include "wl1251_cmd.h" -+ -+#include -+#include -+#include -+#include -+ -+#include "wl1251.h" -+#include "wl1251_reg.h" -+#include "wl1251_spi.h" -+#include "wl1251_ps.h" -+#include "wl1251_acx.h" -+ -+/** -+ * send command to firmware -+ * -+ * @wl: wl struct -+ * @id: command id -+ * @buf: buffer containing the command, must work with dma -+ * @len: length of the buffer -+ */ -+int wl1251_cmd_send(struct wl1251 *wl, u16 id, void *buf, size_t len) -+{ -+ struct wl1251_cmd_header *cmd; -+ unsigned long timeout; -+ u32 intr; -+ int ret = 0; -+ -+ cmd = buf; -+ cmd->id = id; -+ cmd->status = 0; -+ -+ WARN_ON(len % 4 != 0); -+ -+ wl1251_spi_mem_write(wl, wl->cmd_box_addr, buf, len); -+ -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); -+ -+ timeout = jiffies + msecs_to_jiffies(WL1251_COMMAND_TIMEOUT); -+ -+ intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); -+ while (!(intr & WL1251_ACX_INTR_CMD_COMPLETE)) { -+ if (time_after(jiffies, timeout)) { -+ wl1251_error("command complete timeout"); -+ ret = -ETIMEDOUT; -+ goto out; -+ } -+ -+ msleep(1); -+ -+ intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); -+ } -+ -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK, -+ WL1251_ACX_INTR_CMD_COMPLETE); -+ -+out: -+ return ret; -+} -+ -+/** -+ * send test command to firmware -+ * -+ * @wl: wl struct -+ * @buf: buffer containing the command, with all headers, must work with dma -+ * @len: length of the buffer -+ * @answer: is answer needed -+ */ -+int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer) -+{ -+ int ret; -+ -+ wl1251_debug(DEBUG_CMD, "cmd test"); -+ -+ ret = wl1251_cmd_send(wl, CMD_TEST, buf, buf_len); -+ -+ if (ret < 0) { -+ wl1251_warning("TEST command failed"); -+ return ret; -+ } -+ -+ if (answer) { -+ struct wl1251_command *cmd_answer; -+ -+ /* -+ * The test command got in, we can read the answer. -+ * The answer would be a wl1251_command, where the -+ * parameter array contains the actual answer. -+ */ -+ wl1251_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len); -+ -+ cmd_answer = buf; -+ -+ if (cmd_answer->header.status != CMD_STATUS_SUCCESS) -+ wl1251_error("TEST command answer error: %d", -+ cmd_answer->header.status); -+ } -+ -+ return 0; -+} -+ -+/** -+ * read acx from firmware -+ * -+ * @wl: wl struct -+ * @id: acx id -+ * @buf: buffer for the response, including all headers, must work with dma -+ * @len: lenght of buf -+ */ -+int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len) -+{ -+ struct acx_header *acx = buf; -+ int ret; -+ -+ wl1251_debug(DEBUG_CMD, "cmd interrogate"); -+ -+ acx->id = id; -+ -+ /* payload length, does not include any headers */ -+ acx->len = len - sizeof(*acx); -+ -+ ret = wl1251_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1251_error("INTERROGATE command failed"); -+ goto out; -+ } -+ -+ /* the interrogate command got in, we can read the answer */ -+ wl1251_spi_mem_read(wl, wl->cmd_box_addr, buf, len); -+ -+ acx = buf; -+ if (acx->cmd.status != CMD_STATUS_SUCCESS) -+ wl1251_error("INTERROGATE command error: %d", -+ acx->cmd.status); -+ -+out: -+ return ret; -+} -+ -+/** -+ * write acx value to firmware -+ * -+ * @wl: wl struct -+ * @id: acx id -+ * @buf: buffer containing acx, including all headers, must work with dma -+ * @len: length of buf -+ */ -+int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len) -+{ -+ struct acx_header *acx = buf; -+ int ret; -+ -+ wl1251_debug(DEBUG_CMD, "cmd configure"); -+ -+ acx->id = id; -+ -+ /* payload length, does not include any headers */ -+ acx->len = len - sizeof(*acx); -+ -+ ret = wl1251_cmd_send(wl, CMD_CONFIGURE, acx, len); -+ if (ret < 0) { -+ wl1251_warning("CONFIGURE command NOK"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity, -+ void *bitmap, u16 bitmap_len, u8 bitmap_control) -+{ -+ struct wl1251_cmd_vbm_update *vbm; -+ int ret; -+ -+ wl1251_debug(DEBUG_CMD, "cmd vbm"); -+ -+ vbm = kzalloc(sizeof(*vbm), GFP_KERNEL); -+ if (!vbm) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* Count and period will be filled by the target */ -+ vbm->tim.bitmap_ctrl = bitmap_control; -+ if (bitmap_len > PARTIAL_VBM_MAX) { -+ wl1251_warning("cmd vbm len is %d B, truncating to %d", -+ bitmap_len, PARTIAL_VBM_MAX); -+ bitmap_len = PARTIAL_VBM_MAX; -+ } -+ memcpy(vbm->tim.pvb_field, bitmap, bitmap_len); -+ vbm->tim.identity = identity; -+ vbm->tim.length = bitmap_len + 3; -+ -+ vbm->len = cpu_to_le16(bitmap_len + 5); -+ -+ ret = wl1251_cmd_send(wl, CMD_VBM, vbm, sizeof(*vbm)); -+ if (ret < 0) { -+ wl1251_error("VBM command failed"); -+ goto out; -+ } -+ -+out: -+ kfree(vbm); -+ return 0; -+} -+ -+int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable) -+{ -+ struct cmd_enabledisable_path *cmd; -+ int ret; -+ u16 cmd_rx, cmd_tx; -+ -+ wl1251_debug(DEBUG_CMD, "cmd data path"); -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ cmd->channel = channel; -+ -+ if (enable) { -+ cmd_rx = CMD_ENABLE_RX; -+ cmd_tx = CMD_ENABLE_TX; -+ } else { -+ cmd_rx = CMD_DISABLE_RX; -+ cmd_tx = CMD_DISABLE_TX; -+ } -+ -+ ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1251_error("rx %s cmd for channel %d failed", -+ enable ? "start" : "stop", channel); -+ goto out; -+ } -+ -+ wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d", -+ enable ? "start" : "stop", channel); -+ -+ ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1251_error("tx %s cmd for channel %d failed", -+ enable ? "start" : "stop", channel); -+ return ret; -+ } -+ -+ wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", -+ enable ? "start" : "stop", channel); -+ -+out: -+ kfree(cmd); -+ return ret; -+} -+ -+int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel, -+ u16 beacon_interval, u8 dtim_interval) -+{ -+ struct cmd_join *join; -+ int ret, i; -+ u8 *bssid; -+ -+ join = kzalloc(sizeof(*join), GFP_KERNEL); -+ if (!join) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ wl1251_debug(DEBUG_CMD, "cmd join%s ch %d %d/%d", -+ bss_type == BSS_TYPE_IBSS ? " ibss" : "", -+ channel, beacon_interval, dtim_interval); -+ -+ /* Reverse order BSSID */ -+ bssid = (u8 *) &join->bssid_lsb; -+ for (i = 0; i < ETH_ALEN; i++) -+ bssid[i] = wl->bssid[ETH_ALEN - i - 1]; -+ -+ join->rx_config_options = wl->rx_config; -+ join->rx_filter_options = wl->rx_filter; -+ -+ join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | -+ RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; -+ -+ join->beacon_interval = beacon_interval; -+ join->dtim_interval = dtim_interval; -+ join->bss_type = bss_type; -+ join->channel = channel; -+ join->ssid_len = wl->ssid_len; -+ memcpy(join->ssid, wl->ssid, wl->ssid_len); -+ join->ctrl = JOIN_CMD_CTRL_TX_FLUSH; -+ -+ /* increment the session counter */ -+ wl->session_counter++; -+ if (wl->session_counter >= SESSION_COUNTER_MAX) -+ wl->session_counter = 0; -+ -+ ret = wl1251_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join)); -+ if (ret < 0) { -+ wl1251_error("failed to initiate cmd join"); -+ goto out; -+ } -+ -+out: -+ kfree(join); -+ return ret; -+} -+ -+int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode) -+{ -+ struct wl1251_cmd_ps_params *ps_params = NULL; -+ int ret = 0; -+ -+ wl1251_debug(DEBUG_CMD, "cmd set ps mode"); -+ -+ ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL); -+ if (!ps_params) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ps_params->ps_mode = ps_mode; -+ ps_params->send_null_data = 1; -+ ps_params->retries = 5; -+ ps_params->hang_over_period = 128; -+ ps_params->null_data_rate = 1; /* 1 Mbps */ -+ -+ ret = wl1251_cmd_send(wl, CMD_SET_PS_MODE, ps_params, -+ sizeof(*ps_params)); -+ if (ret < 0) { -+ wl1251_error("cmd set_ps_mode failed"); -+ goto out; -+ } -+ -+out: -+ kfree(ps_params); -+ return ret; -+} -+ -+int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer, -+ size_t len) -+{ -+ struct cmd_read_write_memory *cmd; -+ int ret = 0; -+ -+ wl1251_debug(DEBUG_CMD, "cmd read memory"); -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ WARN_ON(len > MAX_READ_SIZE); -+ len = min_t(size_t, len, MAX_READ_SIZE); -+ -+ cmd->addr = addr; -+ cmd->size = len; -+ -+ ret = wl1251_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1251_error("read memory command failed: %d", ret); -+ goto out; -+ } -+ -+ /* the read command got in, we can now read the answer */ -+ wl1251_spi_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd)); -+ -+ if (cmd->header.status != CMD_STATUS_SUCCESS) -+ wl1251_error("error in read command result: %d", -+ cmd->header.status); -+ -+ memcpy(answer, cmd->value, len); -+ -+out: -+ kfree(cmd); -+ return ret; -+} -+ -+int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id, -+ void *buf, size_t buf_len) -+{ -+ struct wl1251_cmd_packet_template *cmd; -+ size_t cmd_len; -+ int ret = 0; -+ -+ wl1251_debug(DEBUG_CMD, "cmd template %d", cmd_id); -+ -+ WARN_ON(buf_len > WL1251_MAX_TEMPLATE_SIZE); -+ buf_len = min_t(size_t, buf_len, WL1251_MAX_TEMPLATE_SIZE); -+ cmd_len = ALIGN(sizeof(*cmd) + buf_len, 4); -+ -+ cmd = kzalloc(cmd_len, GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ cmd->size = cpu_to_le16(buf_len); -+ -+ if (buf) -+ memcpy(cmd->data, buf, buf_len); -+ -+ ret = wl1251_cmd_send(wl, cmd_id, cmd, cmd_len); -+ if (ret < 0) { -+ wl1251_warning("cmd set_template failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(cmd); -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_cmd.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_cmd.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_cmd.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_cmd.h 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,407 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_CMD_H__ -+#define __WL1251_CMD_H__ -+ -+#include "wl1251.h" -+ -+struct acx_header; -+ -+int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len); -+int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer); -+int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len); -+int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len); -+int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity, -+ void *bitmap, u16 bitmap_len, u8 bitmap_control); -+int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable); -+int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel, -+ u16 beacon_interval, u8 dtim_interval); -+int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode); -+int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer, -+ size_t len); -+int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id, -+ void *buf, size_t buf_len); -+ -+/* unit ms */ -+#define WL1251_COMMAND_TIMEOUT 2000 -+ -+enum wl1251_commands { -+ CMD_RESET = 0, -+ CMD_INTERROGATE = 1, /*use this to read information elements*/ -+ CMD_CONFIGURE = 2, /*use this to write information elements*/ -+ CMD_ENABLE_RX = 3, -+ CMD_ENABLE_TX = 4, -+ CMD_DISABLE_RX = 5, -+ CMD_DISABLE_TX = 6, -+ CMD_SCAN = 8, -+ CMD_STOP_SCAN = 9, -+ CMD_VBM = 10, -+ CMD_START_JOIN = 11, -+ CMD_SET_KEYS = 12, -+ CMD_READ_MEMORY = 13, -+ CMD_WRITE_MEMORY = 14, -+ CMD_BEACON = 19, -+ CMD_PROBE_RESP = 20, -+ CMD_NULL_DATA = 21, -+ CMD_PROBE_REQ = 22, -+ CMD_TEST = 23, -+ CMD_RADIO_CALIBRATE = 25, /* OBSOLETE */ -+ CMD_ENABLE_RX_PATH = 27, /* OBSOLETE */ -+ CMD_NOISE_HIST = 28, -+ CMD_RX_RESET = 29, -+ CMD_PS_POLL = 30, -+ CMD_QOS_NULL_DATA = 31, -+ CMD_LNA_CONTROL = 32, -+ CMD_SET_BCN_MODE = 33, -+ CMD_MEASUREMENT = 34, -+ CMD_STOP_MEASUREMENT = 35, -+ CMD_DISCONNECT = 36, -+ CMD_SET_PS_MODE = 37, -+ CMD_CHANNEL_SWITCH = 38, -+ CMD_STOP_CHANNEL_SWICTH = 39, -+ CMD_AP_DISCOVERY = 40, -+ CMD_STOP_AP_DISCOVERY = 41, -+ CMD_SPS_SCAN = 42, -+ CMD_STOP_SPS_SCAN = 43, -+ CMD_HEALTH_CHECK = 45, -+ CMD_DEBUG = 46, -+ CMD_TRIGGER_SCAN_TO = 47, -+ -+ NUM_COMMANDS, -+ MAX_COMMAND_ID = 0xFFFF, -+}; -+ -+#define MAX_CMD_PARAMS 572 -+ -+struct wl1251_cmd_header { -+ u16 id; -+ u16 status; -+ /* payload */ -+ u8 data[0]; -+} __attribute__ ((packed)); -+ -+struct wl1251_command { -+ struct wl1251_cmd_header header; -+ u8 parameters[MAX_CMD_PARAMS]; -+}; -+ -+enum { -+ CMD_MAILBOX_IDLE = 0, -+ CMD_STATUS_SUCCESS = 1, -+ CMD_STATUS_UNKNOWN_CMD = 2, -+ CMD_STATUS_UNKNOWN_IE = 3, -+ CMD_STATUS_REJECT_MEAS_SG_ACTIVE = 11, -+ CMD_STATUS_RX_BUSY = 13, -+ CMD_STATUS_INVALID_PARAM = 14, -+ CMD_STATUS_TEMPLATE_TOO_LARGE = 15, -+ CMD_STATUS_OUT_OF_MEMORY = 16, -+ CMD_STATUS_STA_TABLE_FULL = 17, -+ CMD_STATUS_RADIO_ERROR = 18, -+ CMD_STATUS_WRONG_NESTING = 19, -+ CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/ -+ CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/ -+ MAX_COMMAND_STATUS = 0xff -+}; -+ -+ -+/* -+ * CMD_READ_MEMORY -+ * -+ * The host issues this command to read the WiLink device memory/registers. -+ * -+ * Note: The Base Band address has special handling (16 bits registers and -+ * addresses). For more information, see the hardware specification. -+ */ -+/* -+ * CMD_WRITE_MEMORY -+ * -+ * The host issues this command to write the WiLink device memory/registers. -+ * -+ * The Base Band address has special handling (16 bits registers and -+ * addresses). For more information, see the hardware specification. -+ */ -+#define MAX_READ_SIZE 256 -+ -+struct cmd_read_write_memory { -+ struct wl1251_cmd_header header; -+ -+ /* The address of the memory to read from or write to.*/ -+ u32 addr; -+ -+ /* The amount of data in bytes to read from or write to the WiLink -+ * device.*/ -+ u32 size; -+ -+ /* The actual value read from or written to the Wilink. The source -+ of this field is the Host in WRITE command or the Wilink in READ -+ command. */ -+ u8 value[MAX_READ_SIZE]; -+}; -+ -+#define CMDMBOX_HEADER_LEN 4 -+#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 -+ -+ -+struct basic_scan_parameters { -+ u32 rx_config_options; -+ u32 rx_filter_options; -+ -+ /* -+ * Scan options: -+ * bit 0: When this bit is set, passive scan. -+ * bit 1: Band, when this bit is set we scan -+ * in the 5Ghz band. -+ * bit 2: voice mode, 0 for normal scan. -+ * bit 3: scan priority, 1 for high priority. -+ */ -+ u16 scan_options; -+ -+ /* Number of channels to scan */ -+ u8 num_channels; -+ -+ /* Number opf probe requests to send, per channel */ -+ u8 num_probe_requests; -+ -+ /* Rate and modulation for probe requests */ -+ u16 tx_rate; -+ -+ u8 tid_trigger; -+ u8 ssid_len; -+ u32 ssid[8]; -+ -+} __attribute__ ((packed)); -+ -+struct basic_scan_channel_parameters { -+ u32 min_duration; /* in TU */ -+ u32 max_duration; /* in TU */ -+ u32 bssid_lsb; -+ u16 bssid_msb; -+ -+ /* -+ * bits 0-3: Early termination count. -+ * bits 4-5: Early termination condition. -+ */ -+ u8 early_termination; -+ -+ u8 tx_power_att; -+ u8 channel; -+ u8 pad[3]; -+} __attribute__ ((packed)); -+ -+/* SCAN parameters */ -+#define SCAN_MAX_NUM_OF_CHANNELS 16 -+ -+struct cmd_scan { -+ struct wl1251_cmd_header header; -+ -+ struct basic_scan_parameters params; -+ struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; -+} __attribute__ ((packed)); -+ -+enum { -+ BSS_TYPE_IBSS = 0, -+ BSS_TYPE_STA_BSS = 2, -+ BSS_TYPE_AP_BSS = 3, -+ MAX_BSS_TYPE = 0xFF -+}; -+ -+#define JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ -+#define JOIN_CMD_CTRL_EARLY_WAKEUP_ENABLE 0x01 /* Early wakeup time */ -+ -+ -+struct cmd_join { -+ struct wl1251_cmd_header header; -+ -+ u32 bssid_lsb; -+ u16 bssid_msb; -+ u16 beacon_interval; /* in TBTTs */ -+ u32 rx_config_options; -+ u32 rx_filter_options; -+ -+ /* -+ * The target uses this field to determine the rate at -+ * which to transmit control frame responses (such as -+ * ACK or CTS frames). -+ */ -+ u16 basic_rate_set; -+ u8 dtim_interval; -+ u8 tx_ctrl_frame_rate; /* OBSOLETE */ -+ u8 tx_ctrl_frame_mod; /* OBSOLETE */ -+ /* -+ * bits 0-2: This bitwise field specifies the type -+ * of BSS to start or join (BSS_TYPE_*). -+ * bit 4: Band - The radio band in which to join -+ * or start. -+ * 0 - 2.4GHz band -+ * 1 - 5GHz band -+ * bits 3, 5-7: Reserved -+ */ -+ u8 bss_type; -+ u8 channel; -+ u8 ssid_len; -+ u8 ssid[IW_ESSID_MAX_SIZE]; -+ u8 ctrl; /* JOIN_CMD_CTRL_* */ -+ u8 tx_mgt_frame_rate; /* OBSOLETE */ -+ u8 tx_mgt_frame_mod; /* OBSOLETE */ -+ u8 reserved; -+} __attribute__ ((packed)); -+ -+struct cmd_enabledisable_path { -+ struct wl1251_cmd_header header; -+ -+ u8 channel; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+#define WL1251_MAX_TEMPLATE_SIZE 300 -+ -+struct wl1251_cmd_packet_template { -+ struct wl1251_cmd_header header; -+ -+ __le16 size; -+ u8 data[0]; -+} __attribute__ ((packed)); -+ -+#define TIM_ELE_ID 5 -+#define PARTIAL_VBM_MAX 251 -+ -+struct wl1251_tim { -+ u8 identity; -+ u8 length; -+ u8 dtim_count; -+ u8 dtim_period; -+ u8 bitmap_ctrl; -+ u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ -+} __attribute__ ((packed)); -+ -+/* Virtual Bit Map update */ -+struct wl1251_cmd_vbm_update { -+ struct wl1251_cmd_header header; -+ __le16 len; -+ u8 padding[2]; -+ struct wl1251_tim tim; -+} __attribute__ ((packed)); -+ -+enum wl1251_cmd_ps_mode { -+ STATION_ACTIVE_MODE, -+ STATION_POWER_SAVE_MODE -+}; -+ -+struct wl1251_cmd_ps_params { -+ struct wl1251_cmd_header header; -+ -+ u8 ps_mode; /* STATION_* */ -+ u8 send_null_data; /* Do we have to send NULL data packet ? */ -+ u8 retries; /* Number of retires for the initial NULL data packet */ -+ -+ /* -+ * TUs during which the target stays awake after switching -+ * to power save mode. -+ */ -+ u8 hang_over_period; -+ u16 null_data_rate; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+struct wl1251_cmd_trigger_scan_to { -+ struct wl1251_cmd_header header; -+ -+ u32 timeout; -+}; -+ -+/* HW encryption keys */ -+#define NUM_ACCESS_CATEGORIES_COPY 4 -+#define MAX_KEY_SIZE 32 -+ -+/* When set, disable HW encryption */ -+#define DF_ENCRYPTION_DISABLE 0x01 -+/* When set, disable HW decryption */ -+#define DF_SNIFF_MODE_ENABLE 0x80 -+ -+enum wl1251_cmd_key_action { -+ KEY_ADD_OR_REPLACE = 1, -+ KEY_REMOVE = 2, -+ KEY_SET_ID = 3, -+ MAX_KEY_ACTION = 0xffff, -+}; -+ -+enum wl1251_cmd_key_type { -+ KEY_WEP_DEFAULT = 0, -+ KEY_WEP_ADDR = 1, -+ KEY_AES_GROUP = 4, -+ KEY_AES_PAIRWISE = 5, -+ KEY_WEP_GROUP = 6, -+ KEY_TKIP_MIC_GROUP = 10, -+ KEY_TKIP_MIC_PAIRWISE = 11, -+}; -+ -+/* -+ * -+ * key_type_e key size key format -+ * ---------- --------- ---------- -+ * 0x00 5, 13, 29 Key data -+ * 0x01 5, 13, 29 Key data -+ * 0x04 16 16 bytes of key data -+ * 0x05 16 16 bytes of key data -+ * 0x0a 32 16 bytes of TKIP key data -+ * 8 bytes of RX MIC key data -+ * 8 bytes of TX MIC key data -+ * 0x0b 32 16 bytes of TKIP key data -+ * 8 bytes of RX MIC key data -+ * 8 bytes of TX MIC key data -+ * -+ */ -+ -+struct wl1251_cmd_set_keys { -+ struct wl1251_cmd_header header; -+ -+ /* Ignored for default WEP key */ -+ u8 addr[ETH_ALEN]; -+ -+ /* key_action_e */ -+ u16 key_action; -+ -+ u16 reserved_1; -+ -+ /* key size in bytes */ -+ u8 key_size; -+ -+ /* key_type_e */ -+ u8 key_type; -+ u8 ssid_profile; -+ -+ /* -+ * TKIP, AES: frame's key id field. -+ * For WEP default key: key id; -+ */ -+ u8 id; -+ u8 reserved_2[6]; -+ u8 key[MAX_KEY_SIZE]; -+ u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; -+ u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; -+} __attribute__ ((packed)); -+ -+ -+#endif /* __WL1251_CMD_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_debugfs.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_debugfs.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_debugfs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_debugfs.c 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,518 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1251_debugfs.h" -+ -+#include -+ -+#include "wl1251.h" -+#include "wl1251_acx.h" -+#include "wl1251_ps.h" -+ -+/* ms */ -+#define WL1251_DEBUGFS_STATS_LIFETIME 1000 -+ -+/* debugfs macros idea from mac80211 */ -+ -+#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ -+static ssize_t name## _read(struct file *file, char __user *userbuf, \ -+ size_t count, loff_t *ppos) \ -+{ \ -+ struct wl1251 *wl = file->private_data; \ -+ char buf[buflen]; \ -+ int res; \ -+ \ -+ res = scnprintf(buf, buflen, fmt "\n", ##value); \ -+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ -+} \ -+ \ -+static const struct file_operations name## _ops = { \ -+ .read = name## _read, \ -+ .open = wl1251_open_file_generic, \ -+}; -+ -+#define DEBUGFS_ADD(name, parent) \ -+ wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \ -+ wl, &name## _ops); \ -+ if (IS_ERR(wl->debugfs.name)) { \ -+ ret = PTR_ERR(wl->debugfs.name); \ -+ wl->debugfs.name = NULL; \ -+ goto out; \ -+ } -+ -+#define DEBUGFS_DEL(name) \ -+ do { \ -+ debugfs_remove(wl->debugfs.name); \ -+ wl->debugfs.name = NULL; \ -+ } while (0) -+ -+#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \ -+static ssize_t sub## _ ##name## _read(struct file *file, \ -+ char __user *userbuf, \ -+ size_t count, loff_t *ppos) \ -+{ \ -+ struct wl1251 *wl = file->private_data; \ -+ char buf[buflen]; \ -+ int res; \ -+ \ -+ wl1251_debugfs_update_stats(wl); \ -+ \ -+ res = scnprintf(buf, buflen, fmt "\n", \ -+ wl->stats.fw_stats->sub.name); \ -+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ -+} \ -+ \ -+static const struct file_operations sub## _ ##name## _ops = { \ -+ .read = sub## _ ##name## _read, \ -+ .open = wl1251_open_file_generic, \ -+}; -+ -+#define DEBUGFS_FWSTATS_ADD(sub, name) \ -+ DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics) -+ -+#define DEBUGFS_FWSTATS_DEL(sub, name) \ -+ DEBUGFS_DEL(sub## _ ##name) -+ -+static void wl1251_debugfs_update_stats(struct wl1251 *wl) -+{ -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ if (wl->state == WL1251_STATE_ON && -+ time_after(jiffies, wl->stats.fw_stats_update + -+ msecs_to_jiffies(WL1251_DEBUGFS_STATS_LIFETIME))) { -+ wl1251_acx_statistics(wl, wl->stats.fw_stats); -+ wl->stats.fw_stats_update = jiffies; -+ } -+ -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+static int wl1251_open_file_generic(struct inode *inode, struct file *file) -+{ -+ file->private_data = inode->i_private; -+ return 0; -+} -+ -+DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u"); -+/* skipping wep.reserved */ -+DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u"); -+/* skipping cont_miss_bcns_spread for now */ -+DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, -+ 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u"); -+ -+DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count); -+DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u", -+ wl->stats.excessive_retries); -+ -+static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, -+ size_t count, loff_t *ppos) -+{ -+ struct wl1251 *wl = file->private_data; -+ u32 queue_len; -+ char buf[20]; -+ int res; -+ -+ queue_len = skb_queue_len(&wl->tx_queue); -+ -+ res = scnprintf(buf, sizeof(buf), "%u\n", queue_len); -+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); -+} -+ -+static const struct file_operations tx_queue_len_ops = { -+ .read = tx_queue_len_read, -+ .open = wl1251_open_file_generic, -+}; -+ -+static void wl1251_debugfs_delete_files(struct wl1251 *wl) -+{ -+ DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); -+ -+ DEBUGFS_FWSTATS_DEL(rx, out_of_mem); -+ DEBUGFS_FWSTATS_DEL(rx, hdr_overflow); -+ DEBUGFS_FWSTATS_DEL(rx, hw_stuck); -+ DEBUGFS_FWSTATS_DEL(rx, dropped); -+ DEBUGFS_FWSTATS_DEL(rx, fcs_err); -+ DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig); -+ DEBUGFS_FWSTATS_DEL(rx, path_reset); -+ DEBUGFS_FWSTATS_DEL(rx, reset_counter); -+ -+ DEBUGFS_FWSTATS_DEL(dma, rx_requested); -+ DEBUGFS_FWSTATS_DEL(dma, rx_errors); -+ DEBUGFS_FWSTATS_DEL(dma, tx_requested); -+ DEBUGFS_FWSTATS_DEL(dma, tx_errors); -+ -+ DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt); -+ DEBUGFS_FWSTATS_DEL(isr, fiqs); -+ DEBUGFS_FWSTATS_DEL(isr, rx_headers); -+ DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow); -+ DEBUGFS_FWSTATS_DEL(isr, rx_rdys); -+ DEBUGFS_FWSTATS_DEL(isr, irqs); -+ DEBUGFS_FWSTATS_DEL(isr, tx_procs); -+ DEBUGFS_FWSTATS_DEL(isr, decrypt_done); -+ DEBUGFS_FWSTATS_DEL(isr, dma0_done); -+ DEBUGFS_FWSTATS_DEL(isr, dma1_done); -+ DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete); -+ DEBUGFS_FWSTATS_DEL(isr, commands); -+ DEBUGFS_FWSTATS_DEL(isr, rx_procs); -+ DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes); -+ DEBUGFS_FWSTATS_DEL(isr, host_acknowledges); -+ DEBUGFS_FWSTATS_DEL(isr, pci_pm); -+ DEBUGFS_FWSTATS_DEL(isr, wakeups); -+ DEBUGFS_FWSTATS_DEL(isr, low_rssi); -+ -+ DEBUGFS_FWSTATS_DEL(wep, addr_key_count); -+ DEBUGFS_FWSTATS_DEL(wep, default_key_count); -+ /* skipping wep.reserved */ -+ DEBUGFS_FWSTATS_DEL(wep, key_not_found); -+ DEBUGFS_FWSTATS_DEL(wep, decrypt_fail); -+ DEBUGFS_FWSTATS_DEL(wep, packets); -+ DEBUGFS_FWSTATS_DEL(wep, interrupt); -+ -+ DEBUGFS_FWSTATS_DEL(pwr, ps_enter); -+ DEBUGFS_FWSTATS_DEL(pwr, elp_enter); -+ DEBUGFS_FWSTATS_DEL(pwr, missing_bcns); -+ DEBUGFS_FWSTATS_DEL(pwr, wake_on_host); -+ DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp); -+ DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons); -+ DEBUGFS_FWSTATS_DEL(pwr, power_save_off); -+ DEBUGFS_FWSTATS_DEL(pwr, enable_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, disable_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps); -+ /* skipping cont_miss_bcns_spread for now */ -+ DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons); -+ -+ DEBUGFS_FWSTATS_DEL(mic, rx_pkts); -+ DEBUGFS_FWSTATS_DEL(mic, calc_failure); -+ -+ DEBUGFS_FWSTATS_DEL(aes, encrypt_fail); -+ DEBUGFS_FWSTATS_DEL(aes, decrypt_fail); -+ DEBUGFS_FWSTATS_DEL(aes, encrypt_packets); -+ DEBUGFS_FWSTATS_DEL(aes, decrypt_packets); -+ DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt); -+ DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt); -+ -+ DEBUGFS_FWSTATS_DEL(event, heart_beat); -+ DEBUGFS_FWSTATS_DEL(event, calibration); -+ DEBUGFS_FWSTATS_DEL(event, rx_mismatch); -+ DEBUGFS_FWSTATS_DEL(event, rx_mem_empty); -+ DEBUGFS_FWSTATS_DEL(event, rx_pool); -+ DEBUGFS_FWSTATS_DEL(event, oom_late); -+ DEBUGFS_FWSTATS_DEL(event, phy_transmit_error); -+ DEBUGFS_FWSTATS_DEL(event, tx_stuck); -+ -+ DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn); -+ DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn); -+ DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_utilization); -+ -+ DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop); -+ DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data); -+ -+ DEBUGFS_DEL(tx_queue_len); -+ DEBUGFS_DEL(retry_count); -+ DEBUGFS_DEL(excessive_retries); -+} -+ -+static int wl1251_debugfs_add_files(struct wl1251 *wl) -+{ -+ int ret = 0; -+ -+ DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); -+ -+ DEBUGFS_FWSTATS_ADD(rx, out_of_mem); -+ DEBUGFS_FWSTATS_ADD(rx, hdr_overflow); -+ DEBUGFS_FWSTATS_ADD(rx, hw_stuck); -+ DEBUGFS_FWSTATS_ADD(rx, dropped); -+ DEBUGFS_FWSTATS_ADD(rx, fcs_err); -+ DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig); -+ DEBUGFS_FWSTATS_ADD(rx, path_reset); -+ DEBUGFS_FWSTATS_ADD(rx, reset_counter); -+ -+ DEBUGFS_FWSTATS_ADD(dma, rx_requested); -+ DEBUGFS_FWSTATS_ADD(dma, rx_errors); -+ DEBUGFS_FWSTATS_ADD(dma, tx_requested); -+ DEBUGFS_FWSTATS_ADD(dma, tx_errors); -+ -+ DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt); -+ DEBUGFS_FWSTATS_ADD(isr, fiqs); -+ DEBUGFS_FWSTATS_ADD(isr, rx_headers); -+ DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow); -+ DEBUGFS_FWSTATS_ADD(isr, rx_rdys); -+ DEBUGFS_FWSTATS_ADD(isr, irqs); -+ DEBUGFS_FWSTATS_ADD(isr, tx_procs); -+ DEBUGFS_FWSTATS_ADD(isr, decrypt_done); -+ DEBUGFS_FWSTATS_ADD(isr, dma0_done); -+ DEBUGFS_FWSTATS_ADD(isr, dma1_done); -+ DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete); -+ DEBUGFS_FWSTATS_ADD(isr, commands); -+ DEBUGFS_FWSTATS_ADD(isr, rx_procs); -+ DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes); -+ DEBUGFS_FWSTATS_ADD(isr, host_acknowledges); -+ DEBUGFS_FWSTATS_ADD(isr, pci_pm); -+ DEBUGFS_FWSTATS_ADD(isr, wakeups); -+ DEBUGFS_FWSTATS_ADD(isr, low_rssi); -+ -+ DEBUGFS_FWSTATS_ADD(wep, addr_key_count); -+ DEBUGFS_FWSTATS_ADD(wep, default_key_count); -+ /* skipping wep.reserved */ -+ DEBUGFS_FWSTATS_ADD(wep, key_not_found); -+ DEBUGFS_FWSTATS_ADD(wep, decrypt_fail); -+ DEBUGFS_FWSTATS_ADD(wep, packets); -+ DEBUGFS_FWSTATS_ADD(wep, interrupt); -+ -+ DEBUGFS_FWSTATS_ADD(pwr, ps_enter); -+ DEBUGFS_FWSTATS_ADD(pwr, elp_enter); -+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns); -+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_host); -+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp); -+ DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons); -+ DEBUGFS_FWSTATS_ADD(pwr, power_save_off); -+ DEBUGFS_FWSTATS_ADD(pwr, enable_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, disable_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps); -+ /* skipping cont_miss_bcns_spread for now */ -+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons); -+ -+ DEBUGFS_FWSTATS_ADD(mic, rx_pkts); -+ DEBUGFS_FWSTATS_ADD(mic, calc_failure); -+ -+ DEBUGFS_FWSTATS_ADD(aes, encrypt_fail); -+ DEBUGFS_FWSTATS_ADD(aes, decrypt_fail); -+ DEBUGFS_FWSTATS_ADD(aes, encrypt_packets); -+ DEBUGFS_FWSTATS_ADD(aes, decrypt_packets); -+ DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt); -+ DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt); -+ -+ DEBUGFS_FWSTATS_ADD(event, heart_beat); -+ DEBUGFS_FWSTATS_ADD(event, calibration); -+ DEBUGFS_FWSTATS_ADD(event, rx_mismatch); -+ DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); -+ DEBUGFS_FWSTATS_ADD(event, rx_pool); -+ DEBUGFS_FWSTATS_ADD(event, oom_late); -+ DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); -+ DEBUGFS_FWSTATS_ADD(event, tx_stuck); -+ -+ DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn); -+ DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn); -+ DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_utilization); -+ -+ DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop); -+ DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); -+ -+ DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir); -+ DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); -+ DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); -+ -+out: -+ if (ret < 0) -+ wl1251_debugfs_delete_files(wl); -+ -+ return ret; -+} -+ -+void wl1251_debugfs_reset(struct wl1251 *wl) -+{ -+ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); -+ wl->stats.retry_count = 0; -+ wl->stats.excessive_retries = 0; -+} -+ -+int wl1251_debugfs_init(struct wl1251 *wl) -+{ -+ int ret; -+ -+ wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); -+ -+ if (IS_ERR(wl->debugfs.rootdir)) { -+ ret = PTR_ERR(wl->debugfs.rootdir); -+ wl->debugfs.rootdir = NULL; -+ goto err; -+ } -+ -+ wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics", -+ wl->debugfs.rootdir); -+ -+ if (IS_ERR(wl->debugfs.fw_statistics)) { -+ ret = PTR_ERR(wl->debugfs.fw_statistics); -+ wl->debugfs.fw_statistics = NULL; -+ goto err_root; -+ } -+ -+ wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), -+ GFP_KERNEL); -+ -+ if (!wl->stats.fw_stats) { -+ ret = -ENOMEM; -+ goto err_fw; -+ } -+ -+ wl->stats.fw_stats_update = jiffies; -+ -+ ret = wl1251_debugfs_add_files(wl); -+ -+ if (ret < 0) -+ goto err_file; -+ -+ return 0; -+ -+err_file: -+ kfree(wl->stats.fw_stats); -+ wl->stats.fw_stats = NULL; -+ -+err_fw: -+ debugfs_remove(wl->debugfs.fw_statistics); -+ wl->debugfs.fw_statistics = NULL; -+ -+err_root: -+ debugfs_remove(wl->debugfs.rootdir); -+ wl->debugfs.rootdir = NULL; -+ -+err: -+ return ret; -+} -+ -+void wl1251_debugfs_exit(struct wl1251 *wl) -+{ -+ wl1251_debugfs_delete_files(wl); -+ -+ kfree(wl->stats.fw_stats); -+ wl->stats.fw_stats = NULL; -+ -+ debugfs_remove(wl->debugfs.fw_statistics); -+ wl->debugfs.fw_statistics = NULL; -+ -+ debugfs_remove(wl->debugfs.rootdir); -+ wl->debugfs.rootdir = NULL; -+ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_debugfs.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_debugfs.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_debugfs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_debugfs.h 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,33 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef WL1251_DEBUGFS_H -+#define WL1251_DEBUGFS_H -+ -+#include "wl1251.h" -+ -+int wl1251_debugfs_init(struct wl1251 *wl); -+void wl1251_debugfs_exit(struct wl1251 *wl); -+void wl1251_debugfs_reset(struct wl1251 *wl); -+ -+#endif /* WL1251_DEBUGFS_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_event.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_event.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_event.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_event.c 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,177 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1251.h" -+#include "wl1251_reg.h" -+#include "wl1251_spi.h" -+#include "wl1251_event.h" -+#include "wl1251_ps.h" -+ -+static int wl1251_event_scan_complete(struct wl1251 *wl, -+ struct event_mailbox *mbox) -+{ -+ if (wl->scanning) { -+ mutex_unlock(&wl->mutex); -+ ieee80211_scan_completed(wl->hw); -+ mutex_lock(&wl->mutex); -+ wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); -+ wl->scanning = false; -+ } -+ -+ return 0; -+} -+ -+#define WL1251_PS_ENTRY_RETRIES 3 -+static int wl1251_event_ps_report(struct wl1251 *wl, -+ struct event_mailbox *mbox) -+{ -+ int ret = 0; -+ -+ wl1251_debug(DEBUG_EVENT, "ps status: %x", mbox->ps_status); -+ -+ switch (mbox->ps_status) { -+ case ENTER_POWER_SAVE_FAIL: -+ if (!wl->psm) { -+ wl->ps_entry_retry = 0; -+ break; -+ } -+ -+ if (wl->ps_entry_retry < WL1251_PS_ENTRY_RETRIES) { -+ ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); -+ wl->ps_entry_retry++; -+ } else { -+ wl1251_error("Power save entry failed, giving up"); -+ wl->ps_entry_retry = 0; -+ } -+ break; -+ case ENTER_POWER_SAVE_SUCCESS: -+ default: -+ wl->ps_entry_retry = 0; -+ break; -+ } -+ -+ return 0; -+} -+ -+static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox) -+{ -+ int ret; -+ u32 vector; -+ -+ vector = mbox->events_vector & ~(mbox->events_mask); -+ -+ if (vector & SCAN_COMPLETE_EVENT_ID) { -+ ret = wl1251_event_scan_complete(wl, mbox); -+ if (ret < 0) -+ return ret; -+ } -+ -+ if (vector & BSS_LOSE_EVENT_ID) { -+ wl1251_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); -+ -+ if (wl->psm_requested && wl->psm) { -+ ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); -+ if (ret < 0) -+ return ret; -+ } -+ } -+ -+ if (vector & PS_REPORT_EVENT_ID) { -+ wl1251_debug(DEBUG_EVENT, "PS_REPORT_EVENT_ID"); -+ ret = wl1251_event_ps_report(wl, mbox); -+ if (ret < 0) -+ return ret; -+ } -+ -+ if (wl->vif && (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID)) { -+ wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); -+ /* need to unlock mutex to avoid deadlocking with rtnl */ -+ mutex_unlock(&wl->mutex); -+ -+ /* indicate to the stack, that beacons have been lost */ -+ ieee80211_beacon_loss(wl->vif); -+ mutex_lock(&wl->mutex); -+ } -+ -+ if (vector & REGAINED_BSS_EVENT_ID) { -+ if (wl->psm_requested) { -+ ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); -+ if (ret < 0) -+ return ret; -+ } -+ } -+ -+ if (wl->vif && (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID)) { -+ wl1251_debug(DEBUG_EVENT, "ROAMING_TRIGGER_LOW_RSSI_EVENT"); -+ ieee80211_rssi_changed(wl->vif, IEEE80211_RSSI_STATE_LOW); -+ } -+ -+ if (wl->vif && (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID)) { -+ wl1251_debug(DEBUG_EVENT, -+ "ROAMING_TRIGGER_REGAINED_RSSI_EVENT"); -+ ieee80211_rssi_changed(wl->vif, IEEE80211_RSSI_STATE_HIGH); -+ } -+ -+ return 0; -+} -+ -+int wl1251_event_unmask(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_acx_event_mbox_mask(wl, ~(wl->event_mask)); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+void wl1251_event_mbox_config(struct wl1251 *wl) -+{ -+ wl->mbox_ptr[0] = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR); -+ wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); -+} -+ -+int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num) -+{ -+ struct event_mailbox mbox; -+ int ret; -+ -+ if (mbox_num > 1) -+ return -EINVAL; -+ -+ /* first we read the mbox descriptor */ -+ wl1251_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, -+ sizeof(struct event_mailbox)); -+ -+ /* process the descriptor */ -+ ret = wl1251_event_process(wl, &mbox); -+ if (ret < 0) -+ return ret; -+ -+ /* then we let the firmware know it can go on...*/ -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); -+ -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_event.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_event.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_event.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_event.h 2011-06-22 13:19:32.923063273 +0200 -@@ -0,0 +1,128 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_EVENT_H__ -+#define __WL1251_EVENT_H__ -+ -+/* -+ * Mbox events -+ * -+ * The event mechanism is based on a pair of event buffers (buffers A and -+ * B) at fixed locations in the target's memory. The host processes one -+ * buffer while the other buffer continues to collect events. If the host -+ * is not processing events, an interrupt is issued to signal that a buffer -+ * is ready. Once the host is done with processing events from one buffer, -+ * it signals the target (with an ACK interrupt) that the event buffer is -+ * free. -+ */ -+ -+enum { -+ RESERVED1_EVENT_ID = BIT(0), -+ RESERVED2_EVENT_ID = BIT(1), -+ MEASUREMENT_START_EVENT_ID = BIT(2), -+ SCAN_COMPLETE_EVENT_ID = BIT(3), -+ CALIBRATION_COMPLETE_EVENT_ID = BIT(4), -+ ROAMING_TRIGGER_LOW_RSSI_EVENT_ID = BIT(5), -+ PS_REPORT_EVENT_ID = BIT(6), -+ SYNCHRONIZATION_TIMEOUT_EVENT_ID = BIT(7), -+ HEALTH_REPORT_EVENT_ID = BIT(8), -+ ACI_DETECTION_EVENT_ID = BIT(9), -+ DEBUG_REPORT_EVENT_ID = BIT(10), -+ MAC_STATUS_EVENT_ID = BIT(11), -+ DISCONNECT_EVENT_COMPLETE_ID = BIT(12), -+ JOIN_EVENT_COMPLETE_ID = BIT(13), -+ CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(14), -+ BSS_LOSE_EVENT_ID = BIT(15), -+ ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(16), -+ MEASUREMENT_COMPLETE_EVENT_ID = BIT(17), -+ AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(18), -+ SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(19), -+ PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(20), -+ RESET_BSS_EVENT_ID = BIT(21), -+ REGAINED_BSS_EVENT_ID = BIT(22), -+ ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID = BIT(23), -+ ROAMING_TRIGGER_LOW_SNR_EVENT_ID = BIT(24), -+ ROAMING_TRIGGER_REGAINED_SNR_EVENT_ID = BIT(25), -+ -+ DBG_EVENT_ID = BIT(26), -+ BT_PTA_SENSE_EVENT_ID = BIT(27), -+ BT_PTA_PREDICTION_EVENT_ID = BIT(28), -+ BT_PTA_AVALANCHE_EVENT_ID = BIT(29), -+ -+ PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(30), -+ -+ EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, -+}; -+ -+enum { -+ ENTER_POWER_SAVE_FAIL = 0, -+ ENTER_POWER_SAVE_SUCCESS, -+ EXIT_POWER_SAVE_FAIL, -+ EXIT_POWER_SAVE_SUCCESS -+}; -+ -+struct event_debug_report { -+ u8 debug_event_id; -+ u8 num_params; -+ u16 pad; -+ u32 report_1; -+ u32 report_2; -+ u32 report_3; -+} __attribute__ ((packed)); -+ -+struct event_mailbox { -+ u32 events_vector; -+ u32 events_mask; -+ u32 reserved_1; -+ u32 reserved_2; -+ -+ char average_rssi_level; -+ u8 ps_status; -+ u8 channel_switch_status; -+ u8 scheduled_scan_status; -+ -+ /* Channels scanned by the scheduled scan */ -+ u16 scheduled_scan_channels; -+ -+ /* If bit 0 is set -> target's fatal error */ -+ u16 health_report; -+ u16 bad_fft_counter; -+ u8 bt_pta_sense_info; -+ u8 bt_pta_protective_info; -+ u32 reserved; -+ u32 debug_report[2]; -+ -+ /* Number of FCS errors since last event */ -+ u32 fcs_err_counter; -+ -+ struct event_debug_report report; -+ u8 average_snr_level; -+ u8 padding[19]; -+} __attribute__ ((packed)); -+ -+int wl1251_event_unmask(struct wl1251 *wl); -+void wl1251_event_mbox_config(struct wl1251 *wl); -+int wl1251_event_handle(struct wl1251 *wl, u8 mbox); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251.h 2011-06-22 13:19:32.913063273 +0200 -@@ -0,0 +1,455 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008-2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_H__ -+#define __WL1251_H__ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRIVER_NAME "wl1251" -+#define DRIVER_PREFIX DRIVER_NAME ": " -+ -+enum { -+ DEBUG_NONE = 0, -+ DEBUG_IRQ = BIT(0), -+ DEBUG_SPI = BIT(1), -+ DEBUG_BOOT = BIT(2), -+ DEBUG_MAILBOX = BIT(3), -+ DEBUG_NETLINK = BIT(4), -+ DEBUG_EVENT = BIT(5), -+ DEBUG_TX = BIT(6), -+ DEBUG_RX = BIT(7), -+ DEBUG_SCAN = BIT(8), -+ DEBUG_CRYPT = BIT(9), -+ DEBUG_PSM = BIT(10), -+ DEBUG_MAC80211 = BIT(11), -+ DEBUG_CMD = BIT(12), -+ DEBUG_ACX = BIT(13), -+ DEBUG_ALL = ~0, -+}; -+ -+#define DEBUG_LEVEL (DEBUG_NONE) -+ -+#define DEBUG_DUMP_LIMIT 1024 -+ -+#define wl1251_error(fmt, arg...) \ -+ printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg) -+ -+#define wl1251_warning(fmt, arg...) \ -+ printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg) -+ -+#define wl1251_notice(fmt, arg...) \ -+ printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg) -+ -+#define wl1251_info(fmt, arg...) \ -+ printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg) -+ -+#define wl1251_debug(level, fmt, arg...) \ -+ do { \ -+ if (level & DEBUG_LEVEL) \ -+ printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \ -+ } while (0) -+ -+#define wl1251_dump(level, prefix, buf, len) \ -+ do { \ -+ if (level & DEBUG_LEVEL) \ -+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ -+ DUMP_PREFIX_OFFSET, 16, 1, \ -+ buf, \ -+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \ -+ 0); \ -+ } while (0) -+ -+#define wl1251_dump_ascii(level, prefix, buf, len) \ -+ do { \ -+ if (level & DEBUG_LEVEL) \ -+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ -+ DUMP_PREFIX_OFFSET, 16, 1, \ -+ buf, \ -+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \ -+ true); \ -+ } while (0) -+ -+#define WL1251_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ -+ CFG_BSSID_FILTER_EN | \ -+ CFG_MC_FILTER_EN) -+ -+#define WL1251_DEFAULT_RX_FILTER (CFG_RX_PRSP_EN | \ -+ CFG_RX_MGMT_EN | \ -+ CFG_RX_DATA_EN | \ -+ CFG_RX_CTL_EN | \ -+ CFG_RX_BCN_EN | \ -+ CFG_RX_AUTH_EN | \ -+ CFG_RX_ASSOC_EN) -+ -+#define WL1251_BUSY_WORD_LEN 8 -+ -+struct boot_attr { -+ u32 radio_type; -+ u8 mac_clock; -+ u8 arm_clock; -+ int firmware_debug; -+ u32 minor; -+ u32 major; -+ u32 bugfix; -+}; -+ -+enum wl1251_state { -+ WL1251_STATE_OFF, -+ WL1251_STATE_ON, -+ WL1251_STATE_PLT, -+}; -+ -+enum wl1251_partition_type { -+ PART_DOWN, -+ PART_WORK, -+ PART_DRPW, -+ -+ PART_TABLE_LEN -+}; -+ -+struct wl1251_partition { -+ u32 size; -+ u32 start; -+}; -+ -+struct wl1251_partition_set { -+ struct wl1251_partition mem; -+ struct wl1251_partition reg; -+}; -+ -+struct wl1251_stats { -+ struct acx_statistics *fw_stats; -+ unsigned long fw_stats_update; -+ -+ unsigned int retry_count; -+ unsigned int excessive_retries; -+}; -+ -+struct wl1251_debugfs { -+ struct dentry *rootdir; -+ struct dentry *fw_statistics; -+ -+ struct dentry *tx_internal_desc_overflow; -+ -+ struct dentry *rx_out_of_mem; -+ struct dentry *rx_hdr_overflow; -+ struct dentry *rx_hw_stuck; -+ struct dentry *rx_dropped; -+ struct dentry *rx_fcs_err; -+ struct dentry *rx_xfr_hint_trig; -+ struct dentry *rx_path_reset; -+ struct dentry *rx_reset_counter; -+ -+ struct dentry *dma_rx_requested; -+ struct dentry *dma_rx_errors; -+ struct dentry *dma_tx_requested; -+ struct dentry *dma_tx_errors; -+ -+ struct dentry *isr_cmd_cmplt; -+ struct dentry *isr_fiqs; -+ struct dentry *isr_rx_headers; -+ struct dentry *isr_rx_mem_overflow; -+ struct dentry *isr_rx_rdys; -+ struct dentry *isr_irqs; -+ struct dentry *isr_tx_procs; -+ struct dentry *isr_decrypt_done; -+ struct dentry *isr_dma0_done; -+ struct dentry *isr_dma1_done; -+ struct dentry *isr_tx_exch_complete; -+ struct dentry *isr_commands; -+ struct dentry *isr_rx_procs; -+ struct dentry *isr_hw_pm_mode_changes; -+ struct dentry *isr_host_acknowledges; -+ struct dentry *isr_pci_pm; -+ struct dentry *isr_wakeups; -+ struct dentry *isr_low_rssi; -+ -+ struct dentry *wep_addr_key_count; -+ struct dentry *wep_default_key_count; -+ /* skipping wep.reserved */ -+ struct dentry *wep_key_not_found; -+ struct dentry *wep_decrypt_fail; -+ struct dentry *wep_packets; -+ struct dentry *wep_interrupt; -+ -+ struct dentry *pwr_ps_enter; -+ struct dentry *pwr_elp_enter; -+ struct dentry *pwr_missing_bcns; -+ struct dentry *pwr_wake_on_host; -+ struct dentry *pwr_wake_on_timer_exp; -+ struct dentry *pwr_tx_with_ps; -+ struct dentry *pwr_tx_without_ps; -+ struct dentry *pwr_rcvd_beacons; -+ struct dentry *pwr_power_save_off; -+ struct dentry *pwr_enable_ps; -+ struct dentry *pwr_disable_ps; -+ struct dentry *pwr_fix_tsf_ps; -+ /* skipping cont_miss_bcns_spread for now */ -+ struct dentry *pwr_rcvd_awake_beacons; -+ -+ struct dentry *mic_rx_pkts; -+ struct dentry *mic_calc_failure; -+ -+ struct dentry *aes_encrypt_fail; -+ struct dentry *aes_decrypt_fail; -+ struct dentry *aes_encrypt_packets; -+ struct dentry *aes_decrypt_packets; -+ struct dentry *aes_encrypt_interrupt; -+ struct dentry *aes_decrypt_interrupt; -+ -+ struct dentry *event_heart_beat; -+ struct dentry *event_calibration; -+ struct dentry *event_rx_mismatch; -+ struct dentry *event_rx_mem_empty; -+ struct dentry *event_rx_pool; -+ struct dentry *event_oom_late; -+ struct dentry *event_phy_transmit_error; -+ struct dentry *event_tx_stuck; -+ -+ struct dentry *ps_pspoll_timeouts; -+ struct dentry *ps_upsd_timeouts; -+ struct dentry *ps_upsd_max_sptime; -+ struct dentry *ps_upsd_max_apturn; -+ struct dentry *ps_pspoll_max_apturn; -+ struct dentry *ps_pspoll_utilization; -+ struct dentry *ps_upsd_utilization; -+ -+ struct dentry *rxpipe_rx_prep_beacon_drop; -+ struct dentry *rxpipe_descr_host_int_trig_rx_data; -+ struct dentry *rxpipe_beacon_buffer_thres_host_int_trig_rx_data; -+ struct dentry *rxpipe_missed_beacon_host_int_trig_rx_data; -+ struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data; -+ -+ struct dentry *tx_queue_len; -+ -+ struct dentry *retry_count; -+ struct dentry *excessive_retries; -+}; -+ -+enum wl1251_bt_coex_mode { -+ WL1251_BT_COEX_OFF, -+ WL1251_BT_COEX_ENABLE, -+ WL1251_BT_COEX_MONOAUDIO -+}; -+ -+struct wl1251 { -+ struct ieee80211_hw *hw; -+ bool mac80211_registered; -+ -+ struct spi_device *spi; -+ -+ void (*set_power)(bool enable); -+ int irq; -+ -+ spinlock_t wl_lock; -+ -+ enum wl1251_state state; -+ struct mutex mutex; -+ -+ int physical_mem_addr; -+ int physical_reg_addr; -+ int virtual_mem_addr; -+ int virtual_reg_addr; -+ -+ int cmd_box_addr; -+ int event_box_addr; -+ struct boot_attr boot_attr; -+ -+ u8 *fw; -+ size_t fw_len; -+ u8 *nvs; -+ size_t nvs_len; -+ -+ u8 bssid[ETH_ALEN]; -+ u8 mac_addr[ETH_ALEN]; -+ u8 bss_type; -+ u8 ssid[IW_ESSID_MAX_SIZE + 1]; -+ u8 ssid_len; -+ u8 listen_int; -+ int channel; -+ -+ void *target_mem_map; -+ struct acx_data_path_params_resp *data_path; -+ -+ /* Accounting for allocated / available TX blocks on HW */ -+ u32 tx_blocks_available; -+ u32 tx_descriptors_available; -+ u32 tx_results_count; -+ -+ /* Transmitted TX packets counter for chipset interface */ -+ int tx_packets_count; -+ -+ /* Time-offset between host and chipset clocks */ -+ int time_offset; -+ -+ /* Session counter for the chipset */ -+ int session_counter; -+ -+ /* Number of TX packets transferred to the FW, modulo 16 */ -+ u32 data_in_count; -+ -+ /* Frames scheduled for transmission, not handled yet */ -+ struct sk_buff_head tx_queue; -+ bool tx_queue_stopped; -+ -+ struct work_struct tx_work; -+ struct work_struct filter_work; -+ struct wl1251_filter_params *filter_params; -+ -+ /* Pending TX frames */ -+ struct sk_buff *tx_frames[16]; -+ -+ /* -+ * Index pointing to the next TX complete entry -+ * in the cyclic XT complete array we get from -+ * the FW. -+ */ -+ u32 next_tx_complete; -+ -+ /* FW Rx counter */ -+ u32 rx_counter; -+ -+ /* Rx frames handled */ -+ u32 rx_handled; -+ -+ /* Current double buffer */ -+ u32 rx_current_buffer; -+ u32 rx_last_id; -+ -+ /* The target interrupt mask */ -+ u32 intr_mask; -+ struct work_struct irq_work; -+ -+ /* The mbox event mask */ -+ u32 event_mask; -+ -+ /* Mailbox pointers */ -+ u32 mbox_ptr[2]; -+ -+ /* Are we currently scanning */ -+ bool scanning; -+ -+ unsigned long last_event; -+ -+ /* Our association ID */ -+ u16 aid; -+ -+ /* Default key (for WEP) */ -+ u32 default_key; -+ -+ unsigned int tx_mgmt_frm_rate; -+ unsigned int tx_mgmt_frm_mod; -+ -+ unsigned int rx_config; -+ unsigned int rx_filter; -+ -+ /* is firmware in elp mode */ -+ bool elp; -+ -+ struct delayed_work elp_work; -+ -+ /* we can be in psm, but not in elp, we have to differentiate */ -+ bool psm; -+ -+ /* PSM mode requested */ -+ bool psm_requested; -+ -+ u8 ps_entry_retry; -+ -+ u16 beacon_int; -+ u8 dtim_period; -+ -+ /* in dBm */ -+ int power_level; -+ -+ struct wl1251_stats stats; -+ struct wl1251_debugfs debugfs; -+ -+ u32 buffer_32; -+ u32 buffer_cmd; -+ u8 buffer_busyword[WL1251_BUSY_WORD_LEN]; -+ struct wl1251_rx_descriptor *rx_descriptor; -+ -+ struct wl1271_fw_status *fw_status; -+ -+ struct ieee80211_vif *vif; -+ -+ enum wl1251_bt_coex_mode bt_coex_mode; -+ -+ u32 chip_id; -+ char fw_ver[21]; -+}; -+ -+int wl1251_plt_start(struct wl1251 *wl); -+int wl1251_plt_stop(struct wl1251 *wl); -+ -+#define DEFAULT_HW_GEN_MODULATION_TYPE CCK_LONG /* Long Preamble */ -+#define DEFAULT_HW_GEN_TX_RATE RATE_2MBPS -+#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ -+ -+#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */ -+ -+#define WL1251_DEFAULT_POWER_LEVEL 20 -+ -+#define WL1251_TX_QUEUE_MAX_LENGTH 20 -+ -+#define WL1251_DEFAULT_BEACON_INT 100 -+#define WL1251_DEFAULT_DTIM_PERIOD 1 -+ -+#define WL1251_DEFAULT_CHANNEL 0 -+ -+#define WL1251_DEFAULT_BET_CONSECUTIVE 10 -+ -+#define CHIP_ID_1251_PG10 (0x7010101) -+#define CHIP_ID_1251_PG11 (0x7020101) -+#define CHIP_ID_1251_PG12 (0x7030101) -+#define CHIP_ID_1271_PG10 (0x4030101) -+#define CHIP_ID_1271_PG20 (0x4030111) -+ -+#define WL1251_FW_NAME "wl1251-fw.bin" -+#define WL1251_NVS_NAME "wl1251-nvs.bin" -+ -+#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */ -+ -+#define WL1251_PART_DOWN_MEM_START 0x0 -+#define WL1251_PART_DOWN_MEM_SIZE 0x16800 -+#define WL1251_PART_DOWN_REG_START REGISTERS_BASE -+#define WL1251_PART_DOWN_REG_SIZE REGISTERS_DOWN_SIZE -+ -+#define WL1251_PART_WORK_MEM_START 0x28000 -+#define WL1251_PART_WORK_MEM_SIZE 0x14000 -+#define WL1251_PART_WORK_REG_START REGISTERS_BASE -+#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE -+ -+#define WL12XX_DEFAULT_LOW_RSSI_THRESHOLD -75 -+#define WL12XX_DEFAULT_LOW_RSSI_WEIGHT 10 -+#define WL12XX_DEFAULT_LOW_RSSI_DEPTH 10 -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_init.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_init.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_init.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_init.c 2011-06-22 13:19:32.923063273 +0200 -@@ -0,0 +1,423 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+ -+#include "wl1251_init.h" -+#include "wl1251.h" -+#include "wl12xx_80211.h" -+#include "wl1251_acx.h" -+#include "wl1251_cmd.h" -+#include "wl1251_reg.h" -+ -+int wl1251_hw_init_hwenc_config(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_acx_feature_cfg(wl); -+ if (ret < 0) { -+ wl1251_warning("couldn't set feature config"); -+ return ret; -+ } -+ -+ ret = wl1251_acx_default_key(wl, wl->default_key); -+ if (ret < 0) { -+ wl1251_warning("couldn't set default key"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+int wl1251_hw_init_templates_config(struct wl1251 *wl) -+{ -+ int ret; -+ u8 partial_vbm[PARTIAL_VBM_MAX]; -+ -+ /* send empty templates for fw memory reservation */ -+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, NULL, -+ sizeof(struct wl12xx_probe_req_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, NULL, -+ sizeof(struct wl12xx_null_data_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, NULL, -+ sizeof(struct wl12xx_ps_poll_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL, -+ sizeof -+ (struct wl12xx_qos_null_data_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, NULL, -+ sizeof -+ (struct wl12xx_probe_resp_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_template_set(wl, CMD_BEACON, NULL, -+ sizeof -+ (struct wl12xx_beacon_template)); -+ if (ret < 0) -+ return ret; -+ -+ /* tim templates, first reserve space then allocate an empty one */ -+ memset(partial_vbm, 0, PARTIAL_VBM_MAX); -+ ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter) -+{ -+ int ret; -+ -+ ret = wl1251_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_rx_config(wl, config, filter); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_hw_init_phy_config(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_acx_pd_threshold(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_slot(wl, DEFAULT_SLOT_TIME); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_group_address_tbl(wl, NULL, 0, true); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_service_period_timeout(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_low_rssi(wl, WL12XX_DEFAULT_LOW_RSSI_THRESHOLD, -+ WL12XX_DEFAULT_LOW_RSSI_WEIGHT, -+ WL12XX_DEFAULT_LOW_RSSI_DEPTH, -+ WL12XX_ACX_LOW_RSSI_TYPE_EDGE); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_hw_init_beacon_filter(struct wl1251 *wl) -+{ -+ int ret; -+ -+ /* disable beacon filtering at this stage */ -+ ret = wl1251_acx_beacon_filter_opt(wl, false); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_beacon_filter_table(wl); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_hw_init_pta(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_acx_sg_configure(wl, true); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_hw_init_energy_detection(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_acx_cca_threshold(wl); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_acx_bcn_dtim_options(wl); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_hw_init_power_auth(struct wl1251 *wl) -+{ -+ return wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); -+} -+ -+int wl1251_hw_init_mem_config(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_acx_mem_cfg(wl); -+ if (ret < 0) -+ return ret; -+ -+ wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map), -+ GFP_KERNEL); -+ if (!wl->target_mem_map) { -+ wl1251_error("couldn't allocate target memory map"); -+ return -ENOMEM; -+ } -+ -+ /* we now ask for the firmware built memory map */ -+ ret = wl1251_acx_mem_map(wl, wl->target_mem_map, -+ sizeof(struct wl1251_acx_mem_map)); -+ if (ret < 0) { -+ wl1251_error("couldn't retrieve firmware memory map"); -+ kfree(wl->target_mem_map); -+ wl->target_mem_map = NULL; -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int wl1251_hw_init_txq_fill(u8 qid, -+ struct acx_tx_queue_qos_config *config, -+ u32 num_blocks) -+{ -+ config->qid = qid; -+ -+ switch (qid) { -+ case QOS_AC_BE: -+ config->high_threshold = -+ (QOS_TX_HIGH_BE_DEF * num_blocks) / 100; -+ config->low_threshold = -+ (QOS_TX_LOW_BE_DEF * num_blocks) / 100; -+ break; -+ case QOS_AC_BK: -+ config->high_threshold = -+ (QOS_TX_HIGH_BK_DEF * num_blocks) / 100; -+ config->low_threshold = -+ (QOS_TX_LOW_BK_DEF * num_blocks) / 100; -+ break; -+ case QOS_AC_VI: -+ config->high_threshold = -+ (QOS_TX_HIGH_VI_DEF * num_blocks) / 100; -+ config->low_threshold = -+ (QOS_TX_LOW_VI_DEF * num_blocks) / 100; -+ break; -+ case QOS_AC_VO: -+ config->high_threshold = -+ (QOS_TX_HIGH_VO_DEF * num_blocks) / 100; -+ config->low_threshold = -+ (QOS_TX_LOW_VO_DEF * num_blocks) / 100; -+ break; -+ default: -+ wl1251_error("Invalid TX queue id: %d", qid); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl) -+{ -+ struct acx_tx_queue_qos_config *config; -+ struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map; -+ int ret, i; -+ -+ wl1251_debug(DEBUG_ACX, "acx tx queue config"); -+ -+ config = kzalloc(sizeof(*config), GFP_KERNEL); -+ if (!config) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ for (i = 0; i < MAX_NUM_OF_AC; i++) { -+ ret = wl1251_hw_init_txq_fill(i, config, -+ wl_mem_map->num_tx_mem_blocks); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1251_cmd_configure(wl, ACX_TX_QUEUE_CFG, -+ config, sizeof(*config)); -+ if (ret < 0) -+ goto out; -+ } -+ -+out: -+ kfree(config); -+ return ret; -+} -+ -+static int wl1251_hw_init_data_path_config(struct wl1251 *wl) -+{ -+ int ret; -+ -+ /* asking for the data path parameters */ -+ wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp), -+ GFP_KERNEL); -+ if (!wl->data_path) { -+ wl1251_error("Couldnt allocate data path parameters"); -+ return -ENOMEM; -+ } -+ -+ ret = wl1251_acx_data_path_params(wl, wl->data_path); -+ if (ret < 0) { -+ kfree(wl->data_path); -+ wl->data_path = NULL; -+ return ret; -+ } -+ -+ return 0; -+} -+ -+ -+int wl1251_hw_init(struct wl1251 *wl) -+{ -+ struct wl1251_acx_mem_map *wl_mem_map; -+ int ret; -+ -+ ret = wl1251_hw_init_hwenc_config(wl); -+ if (ret < 0) -+ return ret; -+ -+ /* Template settings */ -+ ret = wl1251_hw_init_templates_config(wl); -+ if (ret < 0) -+ return ret; -+ -+ /* Default memory configuration */ -+ ret = wl1251_hw_init_mem_config(wl); -+ if (ret < 0) -+ return ret; -+ -+ /* Default data path configuration */ -+ ret = wl1251_hw_init_data_path_config(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* RX config */ -+ ret = wl1251_hw_init_rx_config(wl, -+ RX_CFG_PROMISCUOUS | RX_CFG_TSF, -+ RX_FILTER_OPTION_DEF); -+ /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, -+ RX_FILTER_OPTION_FILTER_ALL); */ -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* TX queues config */ -+ ret = wl1251_hw_init_tx_queue_config(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* PHY layer config */ -+ ret = wl1251_hw_init_phy_config(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* Initialize connection monitoring thresholds */ -+ ret = wl1251_acx_conn_monit_params(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* Beacon filtering */ -+ ret = wl1251_hw_init_beacon_filter(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* Bluetooth WLAN coexistence */ -+ ret = wl1251_hw_init_pta(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* Energy detection */ -+ ret = wl1251_hw_init_energy_detection(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* Beacons and boradcast settings */ -+ ret = wl1251_hw_init_beacon_broadcast(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* Enable data path */ -+ ret = wl1251_cmd_data_path(wl, wl->channel, 1); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ /* Default power state */ -+ ret = wl1251_hw_init_power_auth(wl); -+ if (ret < 0) -+ goto out_free_data_path; -+ -+ wl_mem_map = wl->target_mem_map; -+ wl1251_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x", -+ wl_mem_map->num_tx_mem_blocks, -+ wl->data_path->tx_control_addr, -+ wl_mem_map->num_rx_mem_blocks, -+ wl->data_path->rx_control_addr); -+ -+ return 0; -+ -+ out_free_data_path: -+ kfree(wl->data_path); -+ -+ out_free_memmap: -+ kfree(wl->target_mem_map); -+ -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_init.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_init.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_init.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_init.h 2011-06-22 13:19:32.923063273 +0200 -@@ -0,0 +1,41 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_INIT_H__ -+#define __WL1251_INIT_H__ -+ -+#include "wl1251.h" -+ -+int wl1251_hw_init_hwenc_config(struct wl1251 *wl); -+int wl1251_hw_init_templates_config(struct wl1251 *wl); -+int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter); -+int wl1251_hw_init_phy_config(struct wl1251 *wl); -+int wl1251_hw_init_beacon_filter(struct wl1251 *wl); -+int wl1251_hw_init_pta(struct wl1251 *wl); -+int wl1251_hw_init_energy_detection(struct wl1251 *wl); -+int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl); -+int wl1251_hw_init_power_auth(struct wl1251 *wl); -+int wl1251_hw_init_mem_config(struct wl1251 *wl); -+int wl1251_hw_init(struct wl1251 *wl); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_main.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_main.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_main.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_main.c 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,2128 @@ -+ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2008-2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "wl1251.h" -+#include "wl12xx_80211.h" -+#include "wl1251_reg.h" -+#include "wl1251_spi.h" -+#include "wl1251_event.h" -+#include "wl1251_tx.h" -+#include "wl1251_rx.h" -+#include "wl1251_ps.h" -+#include "wl1251_init.h" -+#include "wl1251_netlink.h" -+#include "wl1251_debugfs.h" -+#include "wl1251_boot.h" -+ -+static ssize_t wl1251_sysfs_show_tx_mgmt_frm_rate(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct wl1251 *wl = dev_get_drvdata(dev); -+ ssize_t len; -+ int val; -+ -+ /* FIXME: what's the maximum length of buf? page size?*/ -+ len = 500; -+ -+ switch (wl->tx_mgmt_frm_rate) { -+ /* skip 1 and 12 Mbps because they have same value 0x0a */ -+ case RATE_2MBPS: -+ val = 20; -+ break; -+ case RATE_5_5MBPS: -+ val = 55; -+ break; -+ case RATE_11MBPS: -+ val = 110; -+ break; -+ case RATE_6MBPS: -+ val = 60; -+ break; -+ case RATE_9MBPS: -+ val = 90; -+ break; -+ case RATE_12MBPS: -+ val = 120; -+ break; -+ case RATE_18MBPS: -+ val = 180; -+ break; -+ case RATE_24MBPS: -+ val = 240; -+ break; -+ case RATE_36MBPS: -+ val = 360; -+ break; -+ case RATE_48MBPS: -+ val = 480; -+ break; -+ case RATE_54MBPS: -+ val = 540; -+ break; -+ default: -+ val = 10; -+ } -+ -+ /* for 1 and 12 Mbps we have to check the modulation */ -+ if (wl->tx_mgmt_frm_rate == RATE_1MBPS) { -+ switch (wl->tx_mgmt_frm_rate) { -+ case CCK_LONG: -+ val = 10; -+ break; -+ case OFDM: -+ val = 120; -+ break; -+ default: -+ val = 10; -+ break; -+ } -+ } -+ len = snprintf(buf, len, "%d", val); -+ -+ return len; -+} -+ -+static ssize_t wl1251_sysfs_store_tx_mgmt_frm_rate(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct wl1251 *wl = dev_get_drvdata(dev); -+ unsigned long res; -+ int ret; -+ -+ ret = strict_strtoul(buf, 10, &res); -+ -+ if (ret < 0) { -+ wl1251_warning("incorrect value written to tx_mgmt_frm_rate"); -+ return 0; -+ } -+ -+ switch (res) { -+ case 10: -+ wl->tx_mgmt_frm_rate = RATE_1MBPS; -+ wl->tx_mgmt_frm_mod = CCK_LONG; -+ break; -+ case 20: -+ wl->tx_mgmt_frm_rate = RATE_2MBPS; -+ wl->tx_mgmt_frm_mod = CCK_LONG; -+ break; -+ case 55: -+ wl->tx_mgmt_frm_rate = RATE_5_5MBPS; -+ wl->tx_mgmt_frm_mod = CCK_LONG; -+ break; -+ case 110: -+ wl->tx_mgmt_frm_rate = RATE_11MBPS; -+ wl->tx_mgmt_frm_mod = CCK_LONG; -+ break; -+ case 60: -+ wl->tx_mgmt_frm_rate = RATE_6MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ case 90: -+ wl->tx_mgmt_frm_rate = RATE_9MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ case 120: -+ wl->tx_mgmt_frm_rate = RATE_12MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ case 180: -+ wl->tx_mgmt_frm_rate = RATE_18MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ case 240: -+ wl->tx_mgmt_frm_rate = RATE_24MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ case 360: -+ wl->tx_mgmt_frm_rate = RATE_36MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ case 480: -+ wl->tx_mgmt_frm_rate = RATE_48MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ case 540: -+ wl->tx_mgmt_frm_rate = RATE_54MBPS; -+ wl->tx_mgmt_frm_mod = OFDM; -+ break; -+ default: -+ wl1251_warning("incorrect value written to tx_mgmt_frm_rate"); -+ return 0; -+ } -+ -+ return count; -+} -+ -+static ssize_t wl1251_sysfs_show_bt_coex_mode(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct wl1251 *wl = dev_get_drvdata(dev); -+ ssize_t len; -+ -+ /* FIXME: what's the maximum length of buf? page size?*/ -+ len = 500; -+ -+ mutex_lock(&wl->mutex); -+ len = snprintf(buf, len, "%d\n\n%d - off\n%d - on\n%d - monoaudio\n", -+ wl->bt_coex_mode, -+ WL1251_BT_COEX_OFF, -+ WL1251_BT_COEX_ENABLE, -+ WL1251_BT_COEX_MONOAUDIO); -+ mutex_unlock(&wl->mutex); -+ -+ return len; -+ -+} -+ -+static ssize_t wl1251_sysfs_store_bt_coex_mode(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct wl1251 *wl = dev_get_drvdata(dev); -+ unsigned long res; -+ int ret; -+ -+ ret = strict_strtoul(buf, 10, &res); -+ -+ if (ret < 0) { -+ wl1251_warning("incorrect value written to bt_coex_mode"); -+ return count; -+ } -+ -+ mutex_lock(&wl->mutex); -+ -+ if (res == wl->bt_coex_mode) -+ goto out; -+ -+ switch (res) { -+ case WL1251_BT_COEX_OFF: -+ case WL1251_BT_COEX_ENABLE: -+ case WL1251_BT_COEX_MONOAUDIO: -+ wl->bt_coex_mode = res; -+ break; -+ default: -+ wl1251_warning("incorrect value written to bt_coex_mode"); -+ goto out; -+ } -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ wl1251_acx_sg_configure(wl, false); -+ wl1251_ps_elp_sleep(wl); -+ -+ out: -+ mutex_unlock(&wl->mutex); -+ return count; -+} -+ -+static DEVICE_ATTR(tx_mgmt_frm_rate, S_IRUGO | S_IWUSR, -+ wl1251_sysfs_show_tx_mgmt_frm_rate, -+ wl1251_sysfs_store_tx_mgmt_frm_rate); -+ -+static DEVICE_ATTR(bt_coex_mode, S_IRUGO | S_IWUSR, -+ wl1251_sysfs_show_bt_coex_mode, -+ wl1251_sysfs_store_bt_coex_mode); -+ -+static void wl1251_disable_interrupts(struct wl1251 *wl) -+{ -+ disable_irq(wl->irq); -+} -+ -+static void wl1251_power_off(struct wl1251 *wl) -+{ -+ wl->set_power(false); -+} -+ -+static void wl1251_power_on(struct wl1251 *wl) -+{ -+ wl->set_power(true); -+} -+ -+static irqreturn_t wl1251_irq(int irq, void *cookie) -+{ -+ struct wl1251 *wl; -+ -+ wl1251_debug(DEBUG_IRQ, "IRQ"); -+ -+ wl = cookie; -+ -+ queue_work(wl->hw->workqueue, &wl->irq_work); -+ -+ return IRQ_HANDLED; -+} -+ -+static int wl1251_fetch_firmware(struct wl1251 *wl) -+{ -+ const struct firmware *fw; -+ int ret; -+ -+ ret = request_firmware(&fw, WL1251_FW_NAME, &wl->spi->dev); -+ -+ if (ret < 0) { -+ wl1251_error("could not get firmware: %d", ret); -+ return ret; -+ } -+ -+ if (fw->size % 4) { -+ wl1251_error("firmware size is not multiple of 32 bits: %zu", -+ fw->size); -+ ret = -EILSEQ; -+ goto out; -+ } -+ -+ wl->fw_len = fw->size; -+ wl->fw = vmalloc(wl->fw_len); -+ -+ if (!wl->fw) { -+ wl1251_error("could not allocate memory for the firmware"); -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ memcpy(wl->fw, fw->data, wl->fw_len); -+ -+ ret = 0; -+ -+out: -+ release_firmware(fw); -+ -+ return ret; -+} -+ -+static int wl1251_fetch_nvs(struct wl1251 *wl) -+{ -+ const struct firmware *fw; -+ int ret; -+ -+ ret = request_firmware(&fw, WL1251_NVS_NAME, &wl->spi->dev); -+ -+ if (ret < 0) { -+ wl1251_error("could not get nvs file: %d", ret); -+ return ret; -+ } -+ -+ if (fw->size % 4) { -+ wl1251_error("nvs size is not multiple of 32 bits: %zu", -+ fw->size); -+ ret = -EILSEQ; -+ goto out; -+ } -+ -+ wl->nvs_len = fw->size; -+ wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL); -+ -+ if (!wl->nvs) { -+ wl1251_error("could not allocate memory for the nvs file"); -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ memcpy(wl->nvs, fw->data, wl->nvs_len); -+ -+ ret = 0; -+ -+out: -+ release_firmware(fw); -+ -+ return ret; -+} -+ -+static void wl1251_fw_wakeup(struct wl1251 *wl) -+{ -+ u32 elp_reg; -+ -+ elp_reg = ELPCTRL_WAKE_UP; -+ wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); -+ elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); -+ -+ if (!(elp_reg & ELPCTRL_WLAN_READY)) -+ wl1251_warning("WLAN not ready"); -+} -+ -+static int wl1251_chip_wakeup(struct wl1251 *wl) -+{ -+ int ret = 0; -+ -+ wl1251_power_on(wl); -+ msleep(WL1251_POWER_ON_SLEEP); -+ wl1251_spi_reset(wl); -+ wl1251_spi_init(wl); -+ -+ /* We don't need a real memory partition here, because we only want -+ * to use the registers at this point. */ -+ wl1251_set_partition(wl, -+ 0x00000000, -+ 0x00000000, -+ REGISTERS_BASE, -+ REGISTERS_DOWN_SIZE); -+ -+ /* ELP module wake up */ -+ wl1251_fw_wakeup(wl); -+ -+ /* whal_FwCtrl_BootSm() */ -+ -+ /* 0. read chip id from CHIP_ID */ -+ wl->chip_id = wl1251_reg_read32(wl, CHIP_ID_B); -+ -+ /* 1. check if chip id is valid */ -+ -+ switch (wl->chip_id) { -+ case CHIP_ID_1251_PG12: -+ wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)", -+ wl->chip_id); -+ break; -+ case CHIP_ID_1251_PG10: -+ case CHIP_ID_1251_PG11: -+ default: -+ wl1251_error("unsupported chip id: 0x%x", wl->chip_id); -+ ret = -ENODEV; -+ goto out; -+ } -+ -+ if (wl->fw == NULL) { -+ ret = wl1251_fetch_firmware(wl); -+ if (ret < 0) -+ goto out; -+ } -+ -+ /* No NVS from netlink, try to get it from the filesystem */ -+ if (wl->nvs == NULL) { -+ ret = wl1251_fetch_nvs(wl); -+ if (ret < 0) -+ goto out; -+ } -+ -+out: -+ return ret; -+} -+ -+#define WL1251_EVENT_TIMEOUT 10000 -+#define WL1251_IRQ_LOOP_COUNT 10 -+static void wl1251_irq_work(struct work_struct *work) -+{ -+ u32 intr, ctr = WL1251_IRQ_LOOP_COUNT; -+ struct wl1251 *wl = -+ container_of(work, struct wl1251, irq_work); -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ wl1251_debug(DEBUG_IRQ, "IRQ work"); -+ -+ if (wl->state == WL1251_STATE_OFF) -+ goto out; -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1251_ACX_INTR_ALL); -+ -+ intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); -+ wl1251_debug(DEBUG_IRQ, "intr: 0x%x", intr); -+ -+ do { -+ if (wl->data_path) { -+ wl->rx_counter = wl1251_mem_read32( -+ wl, wl->data_path->rx_control_addr); -+ -+ /* We handle a frmware bug here */ -+ switch ((wl->rx_counter - wl->rx_handled) & 0xf) { -+ case 0: -+ wl1251_debug(DEBUG_IRQ, -+ "RX: FW and host in sync"); -+ intr &= ~WL1251_ACX_INTR_RX0_DATA; -+ intr &= ~WL1251_ACX_INTR_RX1_DATA; -+ break; -+ case 1: -+ wl1251_debug(DEBUG_IRQ, "RX: FW +1"); -+ intr |= WL1251_ACX_INTR_RX0_DATA; -+ intr &= ~WL1251_ACX_INTR_RX1_DATA; -+ break; -+ case 2: -+ wl1251_debug(DEBUG_IRQ, "RX: FW +2"); -+ intr |= WL1251_ACX_INTR_RX0_DATA; -+ intr |= WL1251_ACX_INTR_RX1_DATA; -+ break; -+ default: -+ wl1251_warning( -+ "RX: FW and host out of sync: %d", -+ wl->rx_counter - wl->rx_handled); -+ break; -+ } -+ -+ wl->rx_handled = wl->rx_counter; -+ -+ wl1251_debug(DEBUG_IRQ, "RX counter: %d", -+ wl->rx_counter); -+ } -+ -+ intr &= wl->intr_mask; -+ -+ if (intr == 0) { -+ wl1251_debug(DEBUG_IRQ, "INTR is 0"); -+ goto out_sleep; -+ } -+ -+ if (intr & WL1251_ACX_INTR_RX0_DATA) { -+ wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA"); -+ wl1251_rx(wl); -+ } -+ -+ if (intr & WL1251_ACX_INTR_RX1_DATA) { -+ wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA"); -+ wl1251_rx(wl); -+ } -+ -+ if (intr & WL1251_ACX_INTR_TX_RESULT) { -+ wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT"); -+ wl1251_tx_complete(wl); -+ } -+ -+ if (intr & (WL1251_ACX_INTR_EVENT_A | -+ WL1251_ACX_INTR_EVENT_B)) { -+ wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)", -+ intr); -+ if (intr & WL1251_ACX_INTR_EVENT_A) -+ wl1251_event_handle(wl, 0); -+ else -+ wl1251_event_handle(wl, 1); -+ -+ wl->last_event = jiffies + -+ msecs_to_jiffies(WL1251_EVENT_TIMEOUT); -+ } -+ -+ if (intr & WL1251_ACX_INTR_INIT_COMPLETE) -+ wl1251_debug(DEBUG_IRQ, -+ "WL1251_ACX_INTR_INIT_COMPLETE"); -+ -+ if (--ctr == 0) -+ break; -+ -+ intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); -+ } while (intr); -+ -+out_sleep: -+ /* FIXME: -+ * Occasionally the firmware puts mailbox events into the mailbox -+ * for the host to read, but fails to flag the appropriate mailbox -+ * interrupt. This causes the event mailbox to get jammed. This -+ * work-a-round wakes the event queue periodically to avoid the jam. -+ * -+ * The real fix involves a firmware-side and host-side counter -+ * mechanism, similar to the one above for the RX path. -+ */ -+ if (time_after(jiffies, wl->last_event) && ctr) { -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, -+ INTR_TRIG_EVENT_ACK); -+ wl->last_event = jiffies + -+ msecs_to_jiffies(WL1251_EVENT_TIMEOUT); -+ } -+ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask)); -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel, -+ u16 beacon_interval, u8 dtim_period) -+{ -+ int ret; -+ -+ ret = wl1251_acx_frame_rates(wl, DEFAULT_HW_GEN_TX_RATE, -+ DEFAULT_HW_GEN_MODULATION_TYPE, -+ wl->tx_mgmt_frm_rate, -+ wl->tx_mgmt_frm_mod); -+ if (ret < 0) -+ goto out; -+ -+ -+ ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval, -+ dtim_period); -+ if (ret < 0) -+ goto out; -+ -+ /* -+ * FIXME: we should wait for JOIN_EVENT_COMPLETE_ID but to simplify -+ * locking we just sleep instead, for now -+ */ -+ msleep(10); -+ -+out: -+ return ret; -+} -+struct wl1251_filter_params { -+ unsigned int filters; -+ unsigned int changed; -+ int mc_list_length; -+ u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; -+}; -+ -+#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ -+ FIF_ALLMULTI | \ -+ FIF_FCSFAIL | \ -+ FIF_BCN_PRBRESP_PROMISC | \ -+ FIF_CONTROL | \ -+ FIF_OTHER_BSS) -+ -+static void wl1251_filter_work(struct work_struct *work) -+{ -+ struct wl1251 *wl = -+ container_of(work, struct wl1251, filter_work); -+ struct wl1251_filter_params *fp; -+ unsigned long flags; -+ bool enabled = true; -+ int ret; -+ -+ /* first, get the filter parameters */ -+ spin_lock_irqsave(&wl->wl_lock, flags); -+ fp = wl->filter_params; -+ wl->filter_params = NULL; -+ spin_unlock_irqrestore(&wl->wl_lock, flags); -+ -+ if (!fp) -+ return; -+ -+ /* then, lock the mutex without risk of lock-up */ -+ mutex_lock(&wl->mutex); -+ -+ if (wl->state == WL1251_STATE_OFF) -+ goto out; -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* configure the mc filter regardless of the changed flags */ -+ if (fp->filters & FIF_ALLMULTI) -+ enabled = false; -+ -+ ret = wl1251_acx_group_address_tbl(wl, fp->mc_list, fp->mc_list_length, -+ enabled); -+ if (ret < 0) -+ goto out_sleep; -+ -+ /* determine, whether supported filter values have changed */ -+ if (fp->changed == 0) -+ goto out; -+ -+ /* apply configured filters */ -+ ret = wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter); -+ if (ret < 0) -+ goto out_sleep; -+ -+out_sleep: -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ kfree(fp); -+} -+ -+static int wl1251_plt_init(struct wl1251 *wl) -+{ -+ int ret; -+ -+ ret = wl1251_hw_init_mem_config(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_data_path(wl, wl->channel, 1); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1251_plt_start(struct wl1251 *wl) -+{ -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ wl1251_notice("power up"); -+ -+ if (wl->state != WL1251_STATE_OFF) { -+ wl1251_error("cannot go into PLT state because not " -+ "in off state: %d", wl->state); -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ wl->state = WL1251_STATE_PLT; -+ -+ ret = wl1251_chip_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1251_boot(wl); -+ if (ret < 0) -+ goto out; -+ -+ wl1251_notice("firmware booted in PLT mode (%s)", wl->fw_ver); -+ -+ ret = wl1251_plt_init(wl); -+ if (ret < 0) -+ goto out; -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+int wl1251_plt_stop(struct wl1251 *wl) -+{ -+ int ret = 0; -+ -+ mutex_lock(&wl->mutex); -+ -+ wl1251_notice("power down"); -+ -+ if (wl->state != WL1251_STATE_PLT) { -+ wl1251_error("cannot power down because not in PLT " -+ "state: %d", wl->state); -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ wl1251_disable_interrupts(wl); -+ wl1251_power_off(wl); -+ -+ wl->state = WL1251_STATE_OFF; -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+ -+static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -+{ -+ struct wl1251 *wl = hw->priv; -+ -+ skb_queue_tail(&wl->tx_queue, skb); -+ -+ /* -+ * The chip specific setup must run before the first TX packet - -+ * before that, the tx_work will not be initialized! -+ */ -+ -+ queue_work(wl->hw->workqueue, &wl->tx_work); -+ -+ /* -+ * The workqueue is slow to process the tx_queue and we need stop -+ * the queue here, otherwise the queue will get too long. -+ */ -+ if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) { -+ ieee80211_stop_queues(wl->hw); -+ -+ /* -+ * FIXME: this is racy, the variable is not properly -+ * protected. Maybe fix this by removing the stupid -+ * variable altogether and checking the real queue state? -+ */ -+ wl->tx_queue_stopped = true; -+ } -+ -+ return NETDEV_TX_OK; -+} -+ -+static int wl1251_dev_notify(struct notifier_block *me, unsigned long what, -+ void *arg) -+{ -+ struct net_device *dev; -+ struct wireless_dev *wdev; -+ struct wiphy *wiphy; -+ struct ieee80211_hw *hw; -+ struct wl1251 *wl; -+ struct in_ifaddr *ifa = arg; -+ int ret = 0; -+ -+ dev = ifa->ifa_dev->dev; -+ -+ wdev = dev->ieee80211_ptr; -+ if (wdev == NULL) -+ return -ENODEV; -+ -+ wiphy = wdev->wiphy; -+ if (wiphy == NULL) -+ return -ENODEV; -+ -+ hw = wiphy_priv(wiphy); -+ if (hw == NULL) -+ return -ENODEV; -+ -+ /* FIXME, we assume here that the notification was for wl12xx. -+ That is not true if there are multiple WLAN adapters in the device. -+ FIXME, we should probably not install ARP filter if the interface -+ has multiple addresses. -+ */ -+ wl = hw->priv; -+ -+ mutex_lock(&wl->mutex); -+ -+ if (wl->state == WL1251_STATE_OFF) -+ goto out; -+ -+ /* FIXME, add support for IPv6 */ -+ if (what == NETDEV_UP) { -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1251_acx_ip_config(wl, true, (u8 *)&ifa->ifa_address, -+ IPV4_VERSION); -+ wl1251_ps_elp_sleep(wl); -+ -+ } else if (what == NETDEV_DOWN) { -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ ret = wl1251_acx_ip_config(wl, false, NULL, IPV4_VERSION); -+ wl1251_ps_elp_sleep(wl); -+ } -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static struct notifier_block wl1251_dev_notifier = { -+ .notifier_call = wl1251_dev_notify, -+}; -+ -+static int wl1251_op_start(struct ieee80211_hw *hw) -+{ -+ struct wl1251 *wl = hw->priv; -+ int ret = 0; -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 start"); -+ -+ mutex_lock(&wl->mutex); -+ -+ if (wl->state != WL1251_STATE_OFF) { -+ wl1251_error("cannot start because not in off state: %d", -+ wl->state); -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ ret = wl1251_chip_wakeup(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_boot(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1251_hw_init(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1251_acx_station_id(wl); -+ if (ret < 0) -+ goto out; -+ -+ wl->state = WL1251_STATE_ON; -+ -+ wl1251_info("firmware booted (%s)", wl->fw_ver); -+ -+out: -+ if (ret < 0) -+ wl1251_power_off(wl); -+ -+ mutex_unlock(&wl->mutex); -+ -+ register_inetaddr_notifier(&wl1251_dev_notifier); -+ -+ return ret; -+} -+ -+static void wl1251_op_stop(struct ieee80211_hw *hw) -+{ -+ struct wl1251 *wl = hw->priv; -+ unsigned long flags; -+ -+ wl1251_info("down"); -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 stop"); -+ -+ /* complete/cancel ongoing work */ -+ cancel_work_sync(&wl->filter_work); -+ spin_lock_irqsave(&wl->wl_lock, flags); -+ kfree(wl->filter_params); -+ wl->filter_params = NULL; -+ spin_unlock_irqrestore(&wl->wl_lock, flags); -+ -+ unregister_inetaddr_notifier(&wl1251_dev_notifier); -+ -+ mutex_lock(&wl->mutex); -+ -+ WARN_ON(wl->state != WL1251_STATE_ON); -+ -+ if (wl->scanning) { -+ mutex_unlock(&wl->mutex); -+ ieee80211_scan_completed(wl->hw); -+ mutex_lock(&wl->mutex); -+ wl->scanning = false; -+ } -+ -+ wl->state = WL1251_STATE_OFF; -+ -+ wl1251_disable_interrupts(wl); -+ -+ mutex_unlock(&wl->mutex); -+ -+ cancel_work_sync(&wl->irq_work); -+ cancel_work_sync(&wl->tx_work); -+ cancel_work_sync(&wl->filter_work); -+ -+ mutex_lock(&wl->mutex); -+ -+ /* let's notify MAC80211 about the remaining pending TX frames */ -+ wl1251_tx_flush(wl); -+ wl1251_power_off(wl); -+ -+ memset(wl->bssid, 0, ETH_ALEN); -+ memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); -+ wl->ssid_len = 0; -+ wl->listen_int = 1; -+ wl->bss_type = MAX_BSS_TYPE; -+ -+ wl->data_in_count = 0; -+ wl->rx_counter = 0; -+ wl->rx_handled = 0; -+ wl->rx_current_buffer = 0; -+ wl->rx_last_id = 0; -+ wl->next_tx_complete = 0; -+ wl->elp = false; -+ wl->psm = 0; -+ wl->ps_entry_retry = 0; -+ wl->tx_queue_stopped = false; -+ wl->power_level = WL1251_DEFAULT_POWER_LEVEL; -+ wl->channel = WL1251_DEFAULT_CHANNEL; -+ wl->last_event = 0; -+ -+ wl1251_debugfs_reset(wl); -+ -+ mutex_unlock(&wl->mutex); -+} -+ -+static int wl1251_op_add_interface(struct ieee80211_hw *hw, -+ struct ieee80211_if_init_conf *conf) -+{ -+ struct wl1251 *wl = hw->priv; -+ DECLARE_MAC_BUF(mac); -+ int ret = 0; -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %s", -+ conf->type, print_mac(mac, conf->mac_addr)); -+ -+ mutex_lock(&wl->mutex); -+ if (wl->vif) { -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ wl->vif = conf->vif; -+ -+ switch (conf->type) { -+ case NL80211_IFTYPE_STATION: -+ wl->bss_type = BSS_TYPE_STA_BSS; -+ break; -+ case NL80211_IFTYPE_ADHOC: -+ wl->bss_type = BSS_TYPE_IBSS; -+ break; -+ default: -+ ret = -EOPNOTSUPP; -+ goto out; -+ } -+ -+ if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) { -+ memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); -+ SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); -+ ret = wl1251_acx_station_id(wl); -+ if (ret < 0) -+ goto out; -+ } -+ -+out: -+ mutex_unlock(&wl->mutex); -+ return ret; -+} -+ -+static void wl1251_op_remove_interface(struct ieee80211_hw *hw, -+ struct ieee80211_if_init_conf *conf) -+{ -+ struct wl1251 *wl = hw->priv; -+ -+ mutex_lock(&wl->mutex); -+ wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface"); -+ wl->vif = NULL; -+ mutex_unlock(&wl->mutex); -+} -+ -+static int wl1251_build_null_data(struct wl1251 *wl) -+{ -+ struct wl12xx_null_data_template template; -+ -+ if (!is_zero_ether_addr(wl->bssid)) { -+ memcpy(template.header.da, wl->bssid, ETH_ALEN); -+ memcpy(template.header.bssid, wl->bssid, ETH_ALEN); -+ } else { -+ memset(template.header.da, 0xff, ETH_ALEN); -+ memset(template.header.bssid, 0xff, ETH_ALEN); -+ } -+ -+ memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); -+ template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | -+ IEEE80211_STYPE_NULLFUNC | -+ IEEE80211_FCTL_TODS); -+ -+ return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template, -+ sizeof(template)); -+ -+} -+ -+static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid) -+{ -+ struct wl12xx_ps_poll_template template; -+ -+ memcpy(template.bssid, wl->bssid, ETH_ALEN); -+ memcpy(template.ta, wl->mac_addr, ETH_ALEN); -+ -+ /* aid in PS-Poll has its two MSBs each set to 1 */ -+ template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid); -+ -+ template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); -+ -+ return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template, -+ sizeof(template)); -+ -+} -+ -+static void wl1251_update_support_rates(struct wl12xx_beacon_template *beacon) -+{ -+ int index, rate_len; -+ u16 size; -+ struct wl12xx_ie_rates *rates; -+ struct wl12xx_ie_ssid *ssid; -+ u8 *ptr; -+ -+ ptr = (u8 *)beacon; -+ size = sizeof(struct ieee80211_header); -+ -+ ptr += size; -+ /* Pass through time stamp, beacon interval and capability */ -+ ptr += (6 * sizeof(u16)); -+ -+ ssid = (struct wl12xx_ie_ssid *)ptr; -+ size = sizeof(struct wl12xx_ie_header) + ssid->header.len; -+ ptr += size; -+ -+ rates = (struct wl12xx_ie_rates *)ptr; -+ rate_len = rates->header.len; -+ size = sizeof(struct wl12xx_ie_header) + rate_len; -+ for (index = 0; index < rate_len; index++) { -+ if (rates->rates[index] == IEEE80211_CCK_RATE_1MB) { -+ rates->rates[index] = IEEE80211_BASIC_RATE_MASK | -+ IEEE80211_CCK_RATE_1MB; -+ } -+ if (rates->rates[index] == IEEE80211_CCK_RATE_2MB) { -+ rates->rates[index] = IEEE80211_BASIC_RATE_MASK | -+ IEEE80211_CCK_RATE_2MB; -+ } -+ if (rates->rates[index] == IEEE80211_CCK_RATE_5MB) { -+ rates->rates[index] = IEEE80211_BASIC_RATE_MASK | -+ IEEE80211_CCK_RATE_5MB; -+ } -+ if (rates->rates[index] == IEEE80211_CCK_RATE_11MB) { -+ rates->rates[index] = IEEE80211_BASIC_RATE_MASK | -+ IEEE80211_CCK_RATE_11MB; -+ } -+ } -+} -+ -+ -+static int wl1251_op_config_interface(struct ieee80211_hw *hw, -+ struct ieee80211_vif *vif, -+ struct ieee80211_if_conf *conf) -+{ -+ struct wl1251 *wl = hw->priv; -+ struct sk_buff *beacon; -+ DECLARE_MAC_BUF(mac); -+ bool do_join = false; -+ int ret; -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %s", -+ print_mac(mac, conf->bssid)); -+ wl1251_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid, -+ conf->ssid_len); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ if (!is_zero_ether_addr(conf->bssid)) -+ do_join = true; -+ -+ memcpy(wl->bssid, conf->bssid, ETH_ALEN); -+ -+ if (do_join) { -+ ret = wl1251_build_null_data(wl); -+ if (ret < 0) -+ goto out_sleep; -+ } -+ -+ wl->ssid_len = conf->ssid_len; -+ if (wl->ssid_len) -+ memcpy(wl->ssid, conf->ssid, wl->ssid_len); -+ -+ if (conf->changed & IEEE80211_IFCC_BEACON) { -+ beacon = ieee80211_beacon_get(hw, vif); -+ wl1251_update_support_rates((struct wl12xx_beacon_template *) -+ beacon->data); -+ -+ ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, -+ beacon->len); -+ -+ if (ret < 0) { -+ dev_kfree_skb(beacon); -+ goto out_sleep; -+ } -+ -+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data, -+ beacon->len); -+ -+ dev_kfree_skb(beacon); -+ -+ if (ret < 0) -+ goto out_sleep; -+ } -+ -+ if (do_join) { -+ ret = wl1251_join(wl, wl->bss_type, wl->channel, -+ wl->beacon_int, wl->dtim_period); -+ if (ret < 0) -+ goto out_sleep; -+ } -+ -+out_sleep: -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static int wl1251_op_config(struct ieee80211_hw *hw, -+ struct ieee80211_conf *conf) -+{ -+ struct wl1251 *wl = hw->priv; -+ int channel, ret = 0; -+ -+ channel = ieee80211_frequency_to_channel(conf->channel->center_freq); -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d", -+ channel, -+ conf->flags & IEEE80211_CONF_PS ? "on" : "off", -+ conf->power_level); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ wl->channel = channel; -+ -+ if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { -+ wl1251_debug(DEBUG_PSM, "psm enabled"); -+ -+ wl->psm_requested = true; -+ -+ /* -+ * We enter PSM only if we're already associated. -+ * If we're not, we'll enter it when joining an SSID, -+ * through the bss_info_changed() hook. -+ */ -+ -+ ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); -+ } else if (!(conf->flags & IEEE80211_CONF_PS) && -+ wl->psm_requested) { -+ wl1251_debug(DEBUG_PSM, "psm disabled"); -+ -+ wl->psm_requested = false; -+ -+ if (wl->psm) -+ ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); -+ } -+ -+ if (conf->power_level != wl->power_level) { -+ ret = wl1251_acx_tx_power(wl, conf->power_level); -+ if (ret < 0) -+ goto out_sleep; -+ -+ wl->power_level = conf->power_level; -+ } -+ -+out_sleep: -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static void wl1251_op_configure_filter(struct ieee80211_hw *hw, -+ unsigned int changed, -+ unsigned int *total, -+ int mc_count, -+ struct dev_addr_list *mc_list) -+{ -+ struct wl1251 *wl = hw->priv; -+ struct wl1251_filter_params *fp; -+ struct dev_addr_list *mc; -+ unsigned long flags; -+ int i; -+ DECLARE_MAC_BUF(mac); -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter"); -+ -+ *total &= WL1251_SUPPORTED_FILTERS; -+ changed &= WL1251_SUPPORTED_FILTERS; -+ -+ fp = kzalloc(sizeof(*fp), GFP_ATOMIC); -+ if (!fp) { -+ wl1251_error("Out of memory setting filters."); -+ return; -+ } -+ -+ /* store current filter config */ -+ fp->filters = *total; -+ fp->changed = changed; -+ -+ /* update multicast filtering parameters */ -+ if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) { -+ mc_count = 0; -+ fp->filters |= FIF_ALLMULTI; -+ } -+ -+ fp->mc_list_length = 0; -+ mc = mc_list; -+ for (i = 0; i < mc_count; i++) { -+ if (mc->da_addrlen == ETH_ALEN) { -+ wl1251_debug(DEBUG_MAC80211, "multicast mac %s", -+ print_mac(mac, mc->da_addr)); -+ memcpy(fp->mc_list[fp->mc_list_length], -+ mc->da_addr, ETH_ALEN); -+ fp->mc_list_length++; -+ } else { -+ wl1251_warning("Unknown mc address length."); -+ } -+ mc = mc->next; -+ } -+ -+ spin_lock_irqsave(&wl->wl_lock, flags); -+ kfree(wl->filter_params); -+ wl->filter_params = fp; -+ spin_unlock_irqrestore(&wl->wl_lock, flags); -+ -+ if (changed == 0) -+ /* no filters which we support changed */ -+ goto out; -+ -+ /* FIXME: wl->rx_config and wl->rx_filter are not protected */ -+ -+ wl->rx_config = WL1251_DEFAULT_RX_CONFIG; -+ wl->rx_filter = WL1251_DEFAULT_RX_FILTER; -+ -+ if (*total & FIF_PROMISC_IN_BSS) { -+ wl->rx_config |= CFG_BSSID_FILTER_EN; -+ wl->rx_config |= CFG_RX_ALL_GOOD; -+ } -+ if (*total & FIF_ALLMULTI) -+ /* -+ * CFG_MC_FILTER_EN in rx_config needs to be 0 to receive -+ * all multicast frames -+ */ -+ wl->rx_config &= ~CFG_MC_FILTER_EN; -+ if (*total & FIF_FCSFAIL) -+ wl->rx_filter |= CFG_RX_FCS_ERROR; -+ if (*total & FIF_BCN_PRBRESP_PROMISC) { -+ wl->rx_config &= ~CFG_BSSID_FILTER_EN; -+ wl->rx_config &= ~CFG_SSID_FILTER_EN; -+ } -+ if (*total & FIF_CONTROL) -+ wl->rx_filter |= CFG_RX_CTL_EN; -+ if (*total & FIF_OTHER_BSS) -+ wl->rx_filter &= ~CFG_BSSID_FILTER_EN; -+ -+out: -+ queue_work(wl->hw->workqueue, &wl->filter_work); -+} -+ -+/* HW encryption */ -+static int wl1251_set_key_type(struct wl1251 *wl, -+ struct wl1251_cmd_set_keys *key, -+ enum set_key_cmd cmd, -+ struct ieee80211_key_conf *mac80211_key, -+ const u8 *addr) -+{ -+ switch (mac80211_key->alg) { -+ case ALG_WEP: -+ if (is_broadcast_ether_addr(addr)) -+ key->key_type = KEY_WEP_DEFAULT; -+ else -+ key->key_type = KEY_WEP_ADDR; -+ -+ mac80211_key->hw_key_idx = mac80211_key->keyidx; -+ break; -+ case ALG_TKIP: -+ if (is_broadcast_ether_addr(addr)) -+ key->key_type = KEY_TKIP_MIC_GROUP; -+ else -+ key->key_type = KEY_TKIP_MIC_PAIRWISE; -+ -+ mac80211_key->hw_key_idx = mac80211_key->keyidx; -+ break; -+ case ALG_CCMP: -+ if (is_broadcast_ether_addr(addr)) -+ key->key_type = KEY_AES_GROUP; -+ else -+ key->key_type = KEY_AES_PAIRWISE; -+ mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; -+ break; -+ default: -+ wl1251_error("Unknown key algo 0x%x", mac80211_key->alg); -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, -+ const u8 *local_addr, const u8 *addr, -+ struct ieee80211_key_conf *key) -+{ -+ struct wl1251 *wl = hw->priv; -+ struct wl1251_cmd_set_keys *wl_cmd; -+ int ret; -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 set key"); -+ -+ wl_cmd = kzalloc(sizeof(*wl_cmd), GFP_KERNEL); -+ if (!wl_cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); -+ wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); -+ wl1251_dump(DEBUG_CRYPT, "LOCAL_ADDR: ", local_addr, ETH_ALEN); -+ wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", -+ key->alg, key->keyidx, key->keylen, key->flags); -+ wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen); -+ -+ if (is_zero_ether_addr(addr)) { -+ /* We dont support TX only encryption */ -+ ret = -EOPNOTSUPP; -+ goto out; -+ } -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out_unlock; -+ -+ switch (cmd) { -+ case SET_KEY: -+ wl_cmd->key_action = KEY_ADD_OR_REPLACE; -+ break; -+ case DISABLE_KEY: -+ wl_cmd->key_action = KEY_REMOVE; -+ break; -+ default: -+ wl1251_error("Unsupported key cmd 0x%x", cmd); -+ break; -+ } -+ -+ ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr); -+ if (ret < 0) { -+ wl1251_error("Set KEY type failed"); -+ goto out_sleep; -+ } -+ -+ if (wl_cmd->key_type != KEY_WEP_DEFAULT) -+ memcpy(wl_cmd->addr, addr, ETH_ALEN); -+ -+ if ((wl_cmd->key_type == KEY_TKIP_MIC_GROUP) || -+ (wl_cmd->key_type == KEY_TKIP_MIC_PAIRWISE)) { -+ /* -+ * We get the key in the following form: -+ * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes) -+ * but the target is expecting: -+ * TKIP - RX MIC - TX MIC -+ */ -+ memcpy(wl_cmd->key, key->key, 16); -+ memcpy(wl_cmd->key + 16, key->key + 24, 8); -+ memcpy(wl_cmd->key + 24, key->key + 16, 8); -+ -+ } else { -+ memcpy(wl_cmd->key, key->key, key->keylen); -+ } -+ wl_cmd->key_size = key->keylen; -+ -+ wl_cmd->id = key->keyidx; -+ wl_cmd->ssid_profile = 0; -+ -+ wl1251_dump(DEBUG_CRYPT, "TARGET KEY: ", wl_cmd, sizeof(*wl_cmd)); -+ -+ ret = wl1251_cmd_send(wl, CMD_SET_KEYS, wl_cmd, sizeof(*wl_cmd)); -+ if (ret < 0) { -+ wl1251_warning("could not set keys"); -+ goto out_sleep; -+ } -+ -+out_sleep: -+ wl1251_ps_elp_sleep(wl); -+ -+out_unlock: -+ mutex_unlock(&wl->mutex); -+ -+out: -+ kfree(wl_cmd); -+ -+ return ret; -+} -+ -+static int wl1251_build_basic_rates(char *rates) -+{ -+ u8 index = 0; -+ -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; -+ -+ return index; -+} -+ -+static int wl1251_build_extended_rates(char *rates) -+{ -+ u8 index = 0; -+ -+ rates[index++] = IEEE80211_OFDM_RATE_6MB; -+ rates[index++] = IEEE80211_OFDM_RATE_9MB; -+ rates[index++] = IEEE80211_OFDM_RATE_12MB; -+ rates[index++] = IEEE80211_OFDM_RATE_18MB; -+ rates[index++] = IEEE80211_OFDM_RATE_24MB; -+ rates[index++] = IEEE80211_OFDM_RATE_36MB; -+ rates[index++] = IEEE80211_OFDM_RATE_48MB; -+ rates[index++] = IEEE80211_OFDM_RATE_54MB; -+ -+ return index; -+} -+ -+ -+static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len) -+{ -+ struct wl12xx_probe_req_template template; -+ struct wl12xx_ie_rates *rates; -+ char *ptr; -+ u16 size; -+ -+ ptr = (char *)&template; -+ size = sizeof(struct ieee80211_header); -+ -+ memset(template.header.da, 0xff, ETH_ALEN); -+ memset(template.header.bssid, 0xff, ETH_ALEN); -+ memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); -+ template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); -+ -+ /* IEs */ -+ /* SSID */ -+ template.ssid.header.id = WLAN_EID_SSID; -+ template.ssid.header.len = ssid_len; -+ if (ssid_len && ssid) -+ memcpy(template.ssid.ssid, ssid, ssid_len); -+ size += sizeof(struct wl12xx_ie_header) + ssid_len; -+ ptr += size; -+ -+ /* Basic Rates */ -+ rates = (struct wl12xx_ie_rates *)ptr; -+ rates->header.id = WLAN_EID_SUPP_RATES; -+ rates->header.len = wl1251_build_basic_rates(rates->rates); -+ size += sizeof(struct wl12xx_ie_header) + rates->header.len; -+ ptr += sizeof(struct wl12xx_ie_header) + rates->header.len; -+ -+ /* Extended rates */ -+ rates = (struct wl12xx_ie_rates *)ptr; -+ rates->header.id = WLAN_EID_EXT_SUPP_RATES; -+ rates->header.len = wl1251_build_extended_rates(rates->rates); -+ size += sizeof(struct wl12xx_ie_header) + rates->header.len; -+ -+ wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size); -+ -+ return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template, -+ size); -+} -+ -+static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len, -+ u8 active_scan, u8 high_prio, u8 num_channels, -+ u8 probe_requests) -+{ -+ struct wl1251_cmd_trigger_scan_to *trigger = NULL; -+ struct cmd_scan *params = NULL; -+ int i, ret; -+ u16 scan_options = 0; -+ -+ if (wl->scanning) -+ return -EINVAL; -+ -+ params = kzalloc(sizeof(*params), GFP_KERNEL); -+ if (!params) -+ return -ENOMEM; -+ -+ params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD); -+ params->params.rx_filter_options = -+ cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN); -+ -+ /* High priority scan */ -+ if (!active_scan) -+ scan_options |= SCAN_PASSIVE; -+ if (high_prio) -+ scan_options |= SCAN_PRIORITY_HIGH; -+ params->params.scan_options = scan_options; -+ -+ params->params.num_channels = num_channels; -+ params->params.num_probe_requests = probe_requests; -+ params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */ -+ params->params.tid_trigger = 0; -+ -+ for (i = 0; i < num_channels; i++) { -+ params->channels[i].min_duration = cpu_to_le32(30000); -+ params->channels[i].max_duration = cpu_to_le32(60000); -+ memset(¶ms->channels[i].bssid_lsb, 0xff, 4); -+ memset(¶ms->channels[i].bssid_msb, 0xff, 2); -+ params->channels[i].early_termination = 0; -+ params->channels[i].tx_power_att = 0; -+ params->channels[i].channel = i + 1; -+ memset(params->channels[i].pad, 0, 3); -+ } -+ -+ for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++) -+ memset(¶ms->channels[i], 0, -+ sizeof(struct basic_scan_channel_parameters)); -+ -+ if (len && ssid) { -+ params->params.ssid_len = len; -+ memcpy(params->params.ssid, ssid, len); -+ } else { -+ params->params.ssid_len = 0; -+ memset(params->params.ssid, 0, 32); -+ } -+ -+ ret = wl1251_build_probe_req(wl, ssid, len); -+ if (ret < 0) { -+ wl1251_error("PROBE request template failed"); -+ goto out; -+ } -+ -+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); -+ if (!trigger) -+ goto out; -+ -+ trigger->timeout = 0; -+ -+ ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, -+ sizeof(*trigger)); -+ if (ret < 0) { -+ wl1251_error("trigger scan to failed for hw scan"); -+ goto out; -+ } -+ -+ wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); -+ -+ wl->scanning = true; -+ -+ ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); -+ if (ret < 0) -+ wl1251_error("SCAN failed"); -+ -+ wl1251_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params)); -+ -+ if (params->header.status != CMD_STATUS_SUCCESS) { -+ wl1251_error("TEST command answer error: %d", -+ params->header.status); -+ wl->scanning = false; -+ ret = -EIO; -+ goto out; -+ } -+ -+out: -+ kfree(params); -+ return ret; -+ -+} -+ -+static int wl1251_op_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) -+{ -+ struct wl1251 *wl = hw->priv; -+ int ret; -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan"); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1251_hw_scan(hw->priv, ssid, len, 1, 0, 13, 3); -+ -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static int wl1251_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) -+{ -+ struct wl1251 *wl = hw->priv; -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1251_acx_rts_threshold(wl, (u16) value); -+ if (ret < 0) -+ wl1251_warning("wl1251_op_set_rts_threshold failed: %d", ret); -+ -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, -+ struct ieee80211_vif *vif, -+ struct ieee80211_bss_conf *bss_conf, -+ u32 changed) -+{ -+ enum wl1251_cmd_ps_mode mode; -+ struct wl1251 *wl = hw->priv; -+ int ret; -+ -+ wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed"); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ if (changed & BSS_CHANGED_ASSOC) { -+ if (bss_conf->assoc) { -+ wl->beacon_int = bss_conf->beacon_int; -+ wl->dtim_period = bss_conf->dtim_period; -+ -+ ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int, -+ wl->dtim_period); -+ wl->aid = bss_conf->aid; -+ -+ ret = wl1251_build_ps_poll(wl, wl->aid); -+ if (ret < 0) -+ goto out_sleep; -+ -+ ret = wl1251_acx_aid(wl, wl->aid); -+ if (ret < 0) -+ goto out_sleep; -+ -+ /* If we want to go in PSM but we're not there yet */ -+ if (wl->psm_requested && !wl->psm) { -+ mode = STATION_POWER_SAVE_MODE; -+ ret = wl1251_ps_set_mode(wl, mode); -+ if (ret < 0) -+ goto out_sleep; -+ } -+ } else { -+ /* use defaults when not associated */ -+ wl->beacon_int = WL1251_DEFAULT_BEACON_INT; -+ wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; -+ } -+ } -+ if (changed & BSS_CHANGED_ERP_SLOT) { -+ if (bss_conf->use_short_slot) -+ ret = wl1251_acx_slot(wl, SLOT_TIME_SHORT); -+ else -+ ret = wl1251_acx_slot(wl, SLOT_TIME_LONG); -+ if (ret < 0) { -+ wl1251_warning("Set slot time failed %d", ret); -+ goto out_sleep; -+ } -+ } -+ -+ if (changed & BSS_CHANGED_ERP_PREAMBLE) { -+ if (bss_conf->use_short_preamble) -+ wl1251_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); -+ else -+ wl1251_acx_set_preamble(wl, ACX_PREAMBLE_LONG); -+ } -+ -+ if (changed & BSS_CHANGED_ERP_CTS_PROT) { -+ if (bss_conf->use_cts_prot) -+ ret = wl1251_acx_cts_protect(wl, CTSPROTECT_ENABLE); -+ else -+ ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE); -+ if (ret < 0) { -+ wl1251_warning("Set ctsprotect failed %d", ret); -+ goto out_sleep; -+ } -+ } -+ -+out_sleep: -+ wl1251_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+/* can't be const, mac80211 writes to this */ -+static struct ieee80211_rate wl1251_rates[] = { -+ { .bitrate = 10, -+ .hw_value = 0x1, -+ .hw_value_short = 0x1, }, -+ { .bitrate = 20, -+ .hw_value = 0x2, -+ .hw_value_short = 0x2, -+ .flags = IEEE80211_RATE_SHORT_PREAMBLE }, -+ { .bitrate = 55, -+ .hw_value = 0x4, -+ .hw_value_short = 0x4, -+ .flags = IEEE80211_RATE_SHORT_PREAMBLE }, -+ { .bitrate = 110, -+ .hw_value = 0x20, -+ .hw_value_short = 0x20, -+ .flags = IEEE80211_RATE_SHORT_PREAMBLE }, -+ { .bitrate = 60, -+ .hw_value = 0x8, -+ .hw_value_short = 0x8, }, -+ { .bitrate = 90, -+ .hw_value = 0x10, -+ .hw_value_short = 0x10, }, -+ { .bitrate = 120, -+ .hw_value = 0x40, -+ .hw_value_short = 0x40, }, -+ { .bitrate = 180, -+ .hw_value = 0x80, -+ .hw_value_short = 0x80, }, -+ { .bitrate = 240, -+ .hw_value = 0x200, -+ .hw_value_short = 0x200, }, -+ { .bitrate = 360, -+ .hw_value = 0x400, -+ .hw_value_short = 0x400, }, -+ { .bitrate = 480, -+ .hw_value = 0x800, -+ .hw_value_short = 0x800, }, -+ { .bitrate = 540, -+ .hw_value = 0x1000, -+ .hw_value_short = 0x1000, }, -+}; -+ -+/* can't be const, mac80211 writes to this */ -+static struct ieee80211_channel wl1251_channels[] = { -+ { .hw_value = 1, .center_freq = 2412}, -+ { .hw_value = 2, .center_freq = 2417}, -+ { .hw_value = 3, .center_freq = 2422}, -+ { .hw_value = 4, .center_freq = 2427}, -+ { .hw_value = 5, .center_freq = 2432}, -+ { .hw_value = 6, .center_freq = 2437}, -+ { .hw_value = 7, .center_freq = 2442}, -+ { .hw_value = 8, .center_freq = 2447}, -+ { .hw_value = 9, .center_freq = 2452}, -+ { .hw_value = 10, .center_freq = 2457}, -+ { .hw_value = 11, .center_freq = 2462}, -+ { .hw_value = 12, .center_freq = 2467}, -+ { .hw_value = 13, .center_freq = 2472}, -+}; -+ -+/* can't be const, mac80211 writes to this */ -+static struct ieee80211_supported_band wl1251_band_2ghz = { -+ .channels = wl1251_channels, -+ .n_channels = ARRAY_SIZE(wl1251_channels), -+ .bitrates = wl1251_rates, -+ .n_bitrates = ARRAY_SIZE(wl1251_rates), -+}; -+ -+static const struct ieee80211_ops wl1251_ops = { -+ .start = wl1251_op_start, -+ .stop = wl1251_op_stop, -+ .add_interface = wl1251_op_add_interface, -+ .remove_interface = wl1251_op_remove_interface, -+ .config = wl1251_op_config, -+ .config_interface = wl1251_op_config_interface, -+ .configure_filter = wl1251_op_configure_filter, -+ .tx = wl1251_op_tx, -+ .set_key = wl1251_op_set_key, -+ .hw_scan = wl1251_op_hw_scan, -+ .bss_info_changed = wl1251_op_bss_info_changed, -+ .set_rts_threshold = wl1251_op_set_rts_threshold, -+}; -+ -+static int wl1251_register_hw(struct wl1251 *wl) -+{ -+ int ret; -+ -+ if (wl->mac80211_registered) -+ return 0; -+ -+ SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); -+ -+ ret = ieee80211_register_hw(wl->hw); -+ if (ret < 0) { -+ wl1251_error("unable to register mac80211 hw: %d", ret); -+ return ret; -+ } -+ -+ wl->mac80211_registered = true; -+ -+ wl1251_notice("loaded"); -+ -+ return 0; -+} -+ -+static int wl1251_init_ieee80211(struct wl1251 *wl) -+{ -+ /* The tx descriptor buffer and the TKIP space */ -+ wl->hw->extra_tx_headroom = sizeof(struct tx_double_buffer_desc) -+ + WL1251_TKIP_IV_SPACE; -+ -+ /* unit us */ -+ /* FIXME: find a proper value */ -+ wl->hw->channel_change_time = 10000; -+ -+ wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | -+ IEEE80211_HW_NOISE_DBM | -+ IEEE80211_HW_BEACON_FILTER; -+ -+ wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; -+ -+ SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); -+ -+ return 0; -+} -+ -+static void wl1251_device_release(struct device *dev) -+{ -+ -+} -+ -+static struct platform_device wl1251_device = { -+ /* FIXME: use wl12xx name to not break the user space */ -+ .name = "wl12xx", -+ .id = -1, -+ -+ /* device model insists to have a release function */ -+ .dev = { -+ .release = wl1251_device_release, -+ }, -+}; -+ -+static int __devinit wl1251_probe(struct spi_device *spi) -+{ -+ struct wl12xx_platform_data *pdata; -+ struct ieee80211_hw *hw; -+ struct wl1251 *wl; -+ int ret, i; -+ static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; -+ -+ pdata = spi->dev.platform_data; -+ if (!pdata) { -+ wl1251_error("no platform data"); -+ return -ENODEV; -+ } -+ -+ hw = ieee80211_alloc_hw(sizeof(*wl), &wl1251_ops); -+ if (!hw) { -+ wl1251_error("could not alloc ieee80211_hw"); -+ return -ENOMEM; -+ } -+ -+ wl = hw->priv; -+ memset(wl, 0, sizeof(*wl)); -+ -+ wl->hw = hw; -+ dev_set_drvdata(&spi->dev, wl); -+ wl->spi = spi; -+ -+ wl->data_in_count = 0; -+ -+ skb_queue_head_init(&wl->tx_queue); -+ -+ INIT_WORK(&wl->filter_work, wl1251_filter_work); -+ INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work); -+ wl->channel = WL1251_DEFAULT_CHANNEL; -+ wl->scanning = false; -+ wl->default_key = 0; -+ wl->listen_int = 1; -+ wl->rx_counter = 0; -+ wl->rx_handled = 0; -+ wl->rx_current_buffer = 0; -+ wl->rx_last_id = 0; -+ wl->rx_config = WL1251_DEFAULT_RX_CONFIG; -+ wl->rx_filter = WL1251_DEFAULT_RX_FILTER; -+ wl->elp = false; -+ wl->psm = 0; -+ wl->psm_requested = false; -+ wl->ps_entry_retry = 0; -+ wl->tx_queue_stopped = false; -+ wl->power_level = WL1251_DEFAULT_POWER_LEVEL; -+ wl->beacon_int = WL1251_DEFAULT_BEACON_INT; -+ wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; -+ wl->vif = NULL; -+ wl->bt_coex_mode = WL1251_BT_COEX_OFF; -+ wl->last_event = 0; -+ -+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) -+ wl->tx_frames[i] = NULL; -+ -+ wl->next_tx_complete = 0; -+ -+ INIT_WORK(&wl->irq_work, wl1251_irq_work); -+ INIT_WORK(&wl->tx_work, wl1251_tx_work); -+ -+ spin_lock_init(&wl->wl_lock); -+ -+ /* -+ * In case our MAC address is not correctly set, -+ * we use a random but Nokia MAC. -+ */ -+ memcpy(wl->mac_addr, nokia_oui, 3); -+ get_random_bytes(wl->mac_addr + 3, 3); -+ -+ wl->state = WL1251_STATE_OFF; -+ mutex_init(&wl->mutex); -+ -+ wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; -+ wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; -+ -+ wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL); -+ if (!wl->rx_descriptor) { -+ wl1251_error("could not allocate memory for rx descriptor"); -+ ret = -ENOMEM; -+ goto out_free; -+ } -+ -+ /* This is the only SPI value that we need to set here, the rest -+ * comes from the board-peripherals file */ -+ spi->bits_per_word = 32; -+ -+ ret = spi_setup(spi); -+ if (ret < 0) { -+ wl1251_error("spi_setup failed"); -+ goto out_free; -+ } -+ -+ wl->set_power = pdata->set_power; -+ if (!wl->set_power) { -+ wl1251_error("set power function missing in platform data"); -+ ret = -ENODEV; -+ goto out_free; -+ } -+ -+ wl->irq = spi->irq; -+ if (wl->irq < 0) { -+ wl1251_error("irq missing in platform data"); -+ ret = -ENODEV; -+ goto out_free; -+ } -+ -+ ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); -+ if (ret < 0) { -+ wl1251_error("request_irq() failed: %d", ret); -+ goto out_free; -+ } -+ -+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); -+ -+ disable_irq(wl->irq); -+ -+ ret = platform_device_register(&wl1251_device); -+ if (ret) { -+ wl1251_error("couldn't register platform device"); -+ goto out_irq; -+ } -+ dev_set_drvdata(&wl1251_device.dev, wl); -+ -+ ret = wl1251_init_ieee80211(wl); -+ if (ret) -+ goto out_platform; -+ -+ ret = wl1251_register_hw(wl); -+ if (ret) -+ goto out_platform; -+ -+ ret = wl1251_nl_register(); -+ if (ret) -+ goto out_register_hw; -+ -+ ret = device_create_file(&wl1251_device.dev, -+ &dev_attr_tx_mgmt_frm_rate); -+ if (ret < 0) { -+ wl1251_error("failed to create sysfs file tx_mgmt_frm_rate"); -+ goto out_register_hw; -+ } -+ -+ ret = device_create_file(&wl1251_device.dev, &dev_attr_bt_coex_mode); -+ if (ret < 0) { -+ wl1251_error("failed to create sysfs file bt_coex_mode"); -+ goto out_register_hw; -+ } -+ -+ wl1251_debugfs_init(wl); -+ -+ wl1251_notice("initialized"); -+ -+ return 0; -+ -+ out_register_hw: -+ ieee80211_unregister_hw(hw); -+ wl->mac80211_registered = false; -+ -+ out_platform: -+ platform_device_unregister(&wl1251_device); -+ -+ out_irq: -+ free_irq(wl->irq, wl); -+ -+ out_free: -+ kfree(wl->rx_descriptor); -+ wl->rx_descriptor = NULL; -+ -+ ieee80211_free_hw(hw); -+ -+ return ret; -+} -+ -+static int __devexit wl1251_remove(struct spi_device *spi) -+{ -+ struct wl1251 *wl = dev_get_drvdata(&spi->dev); -+ -+ ieee80211_unregister_hw(wl->hw); -+ -+ wl1251_debugfs_exit(wl); -+ platform_device_unregister(&wl1251_device); -+ free_irq(wl->irq, wl); -+ kfree(wl->target_mem_map); -+ kfree(wl->data_path); -+ vfree(wl->fw); -+ wl->fw = NULL; -+ kfree(wl->nvs); -+ wl->nvs = NULL; -+ -+ kfree(wl->rx_descriptor); -+ wl->rx_descriptor = NULL; -+ -+ kfree(wl->fw_status); -+ -+ ieee80211_free_hw(wl->hw); -+ wl1251_nl_unregister(); -+ -+ return 0; -+} -+ -+ -+static struct spi_driver wl1251_spi_driver = { -+ .driver = { -+ /* FIXME: use wl12xx name to not break the user space */ -+ .name = "wl12xx", -+ .bus = &spi_bus_type, -+ .owner = THIS_MODULE, -+ }, -+ -+ .probe = wl1251_probe, -+ .remove = __devexit_p(wl1251_remove), -+}; -+ -+static int __init wl1251_init(void) -+{ -+ int ret; -+ -+ ret = spi_register_driver(&wl1251_spi_driver); -+ if (ret < 0) { -+ wl1251_error("failed to register spi driver: %d", ret); -+ goto out; -+ } -+ -+out: -+ return ret; -+} -+ -+static void __exit wl1251_exit(void) -+{ -+ spi_unregister_driver(&wl1251_spi_driver); -+ -+ wl1251_notice("unloaded"); -+} -+ -+module_init(wl1251_init); -+module_exit(wl1251_exit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Kalle Valo "); -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_netlink.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_netlink.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_netlink.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_netlink.c 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,679 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+#include "wl1251_netlink.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "wl1251.h" -+#include "wl1251_spi.h" -+#include "wl1251_acx.h" -+ -+/* FIXME: this should be changed as soon as user space catches up */ -+#define WL1251_NL_NAME "wl1251" -+#define WL1251_NL_VERSION 1 -+ -+#define WL1251_MAX_TEST_LENGTH 1024 -+#define WL1251_MAX_NVS_LENGTH 1024 -+ -+enum wl1251_nl_commands { -+ WL1251_NL_CMD_UNSPEC, -+ WL1251_NL_CMD_TEST, -+ WL1251_NL_CMD_INTERROGATE, -+ WL1251_NL_CMD_CONFIGURE, -+ WL1251_NL_CMD_PHY_REG_READ, -+ WL1251_NL_CMD_NVS_PUSH, -+ WL1251_NL_CMD_REG_WRITE, -+ WL1251_NL_CMD_REG_READ, -+ WL1251_NL_CMD_SET_PLT_MODE, -+ -+ __WL1251_NL_CMD_AFTER_LAST -+}; -+#define WL1251_NL_CMD_MAX (__WL1251_NL_CMD_AFTER_LAST - 1) -+ -+enum wl1251_nl_attrs { -+ WL1251_NL_ATTR_UNSPEC, -+ WL1251_NL_ATTR_IFNAME, -+ WL1251_NL_ATTR_CMD_TEST_PARAM, -+ WL1251_NL_ATTR_CMD_TEST_ANSWER, -+ WL1251_NL_ATTR_CMD_IE, -+ WL1251_NL_ATTR_CMD_IE_LEN, -+ WL1251_NL_ATTR_CMD_IE_BUFFER, -+ WL1251_NL_ATTR_CMD_IE_ANSWER, -+ WL1251_NL_ATTR_REG_ADDR, -+ WL1251_NL_ATTR_REG_VAL, -+ WL1251_NL_ATTR_NVS_BUFFER, -+ WL1251_NL_ATTR_NVS_LEN, -+ WL1251_NL_ATTR_PLT_MODE, -+ -+ __WL1251_NL_ATTR_AFTER_LAST -+}; -+#define WL1251_NL_ATTR_MAX (__WL1251_NL_ATTR_AFTER_LAST - 1) -+ -+static struct genl_family wl1251_nl_family = { -+ .id = GENL_ID_GENERATE, -+ .name = WL1251_NL_NAME, -+ .hdrsize = 0, -+ .version = WL1251_NL_VERSION, -+ .maxattr = WL1251_NL_ATTR_MAX, -+}; -+ -+static struct net_device *ifname_to_netdev(struct net *net, -+ struct genl_info *info) -+{ -+ char *ifname; -+ -+ if (!info->attrs[WL1251_NL_ATTR_IFNAME]) -+ return NULL; -+ -+ ifname = nla_data(info->attrs[WL1251_NL_ATTR_IFNAME]); -+ -+ wl1251_debug(DEBUG_NETLINK, "Looking for %s", ifname); -+ -+ return dev_get_by_name(net, ifname); -+} -+ -+static struct wl1251 *ifname_to_wl1251(struct net *net, struct genl_info *info) -+{ -+ struct net_device *netdev; -+ struct wireless_dev *wdev; -+ struct wiphy *wiphy; -+ struct ieee80211_hw *hw; -+ -+ netdev = ifname_to_netdev(net, info); -+ if (netdev == NULL) { -+ wl1251_error("Wrong interface"); -+ return NULL; -+ } -+ -+ wdev = netdev->ieee80211_ptr; -+ if (wdev == NULL) { -+ wl1251_error("ieee80211_ptr is NULL"); -+ return NULL; -+ } -+ -+ wiphy = wdev->wiphy; -+ if (wiphy == NULL) { -+ wl1251_error("wiphy is NULL"); -+ return NULL; -+ } -+ -+ hw = wiphy_priv(wiphy); -+ if (hw == NULL) { -+ wl1251_error("hw is NULL"); -+ return NULL; -+ } -+ -+ dev_put(netdev); -+ -+ return hw->priv; -+} -+ -+static int wl1251_nl_test_cmd(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1251 *wl; -+ struct wl1251_command *cmd; -+ char *buf; -+ int buf_len, ret, cmd_len; -+ u8 answer; -+ -+ if (!info->attrs[WL1251_NL_ATTR_CMD_TEST_PARAM]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ return -EINVAL; -+ } -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ buf = nla_data(info->attrs[WL1251_NL_ATTR_CMD_TEST_PARAM]); -+ buf_len = nla_len(info->attrs[WL1251_NL_ATTR_CMD_TEST_PARAM]); -+ answer = nla_get_u8(info->attrs[WL1251_NL_ATTR_CMD_TEST_ANSWER]); -+ -+ cmd->header.id = CMD_TEST; -+ memcpy(cmd->parameters, buf, buf_len); -+ cmd_len = sizeof(struct wl1251_cmd_header) + buf_len; -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1251_cmd_test(wl, cmd, cmd_len, answer); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto out; -+ } -+ -+ if (answer) { -+ struct sk_buff *msg; -+ void *hdr; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1251_nl_family, 0, WL1251_NL_CMD_TEST); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1251_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1251_NL_ATTR_IFNAME])); -+ NLA_PUT(msg, WL1251_NL_ATTR_CMD_TEST_ANSWER, -+ sizeof(*cmd), cmd); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ wl1251_debug(DEBUG_NETLINK, "TEST cmd sent, answer"); -+ ret = genlmsg_reply(msg, info); -+ goto out; -+ -+ nla_put_failure: -+ nlmsg_free(msg); -+ } else -+ wl1251_debug(DEBUG_NETLINK, "TEST cmd sent"); -+ -+out: -+ kfree(cmd); -+ return ret; -+} -+ -+static int wl1251_nl_interrogate(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1251 *wl; -+ struct sk_buff *msg; -+ int ret = -ENOBUFS, cmd_ie, cmd_ie_len; -+ struct wl1251_command *cmd; -+ void *hdr; -+ -+ if (!info->attrs[WL1251_NL_ATTR_CMD_IE]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1251_NL_ATTR_CMD_IE_LEN]) -+ return -EINVAL; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ ret = -EINVAL; -+ goto nla_put_failure; -+ } -+ -+ /* acx id */ -+ cmd_ie = nla_get_u32(info->attrs[WL1251_NL_ATTR_CMD_IE]); -+ -+ /* maximum length of acx, including all headers */ -+ cmd_ie_len = nla_get_u32(info->attrs[WL1251_NL_ATTR_CMD_IE_LEN]); -+ -+ wl1251_debug(DEBUG_NETLINK, "Getting IE 0x%x (len %d)", -+ cmd_ie, cmd_ie_len); -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1251_cmd_interrogate(wl, cmd_ie, cmd, cmd_ie_len); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1251_nl_family, 0, WL1251_NL_CMD_INTERROGATE); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1251_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1251_NL_ATTR_IFNAME])); -+ NLA_PUT(msg, WL1251_NL_ATTR_CMD_IE_ANSWER, cmd_ie_len, cmd); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ kfree(cmd); -+ return genlmsg_reply(msg, info); -+ -+ nla_put_failure: -+ kfree(cmd); -+ nlmsg_free(msg); -+ -+ return ret; -+} -+ -+static int wl1251_nl_configure(struct sk_buff *skb, struct genl_info *info) -+{ -+ int ret = 0, cmd_ie_len, acx_len; -+ struct acx_header *acx = NULL; -+ struct sk_buff *msg; -+ struct wl1251 *wl; -+ void *cmd_ie; -+ u16 *id; -+ -+ if (!info->attrs[WL1251_NL_ATTR_CMD_IE_BUFFER]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1251_NL_ATTR_CMD_IE_LEN]) -+ return -EINVAL; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ ret = -EINVAL; -+ goto nla_put_failure; -+ } -+ -+ /* contains the acx header but not the cmd header */ -+ cmd_ie = nla_data(info->attrs[WL1251_NL_ATTR_CMD_IE_BUFFER]); -+ -+ cmd_ie_len = nla_get_u32(info->attrs[WL1251_NL_ATTR_CMD_IE_LEN]); -+ -+ /* acx id is in the first two bytes */ -+ id = cmd_ie; -+ -+ /* need to add acx_header before cmd_ie, so create a new command */ -+ acx_len = sizeof(struct acx_header) + cmd_ie_len; -+ acx = kzalloc(acx_len, GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto nla_put_failure; -+ } -+ -+ /* copy the acx header and the payload */ -+ memcpy(&acx->id, cmd_ie, cmd_ie_len); -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1251_cmd_configure(wl, *id, acx, acx_len); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ wl1251_debug(DEBUG_NETLINK, "CONFIGURE cmd sent"); -+ -+ nla_put_failure: -+ kfree(acx); -+ nlmsg_free(msg); -+ -+ return ret; -+} -+ -+static int wl1251_nl_phy_reg_read(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1251 *wl; -+ struct sk_buff *msg; -+ u32 reg_addr, *reg_value = NULL; -+ int ret = 0; -+ void *hdr; -+ -+ if (!info->attrs[WL1251_NL_ATTR_REG_ADDR]) -+ return -EINVAL; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ ret = -EINVAL; -+ goto nla_put_failure; -+ } -+ -+ reg_value = kmalloc(sizeof(*reg_value), GFP_KERNEL); -+ if (!reg_value) { -+ ret = -ENOMEM; -+ goto nla_put_failure; -+ } -+ -+ reg_addr = nla_get_u32(info->attrs[WL1251_NL_ATTR_REG_ADDR]); -+ -+ wl1251_debug(DEBUG_NETLINK, "Reading PHY reg 0x%x", reg_addr); -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1251_cmd_read_memory(wl, reg_addr, reg_value, -+ sizeof(*reg_value)); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1251_nl_family, 0, WL1251_NL_CMD_PHY_REG_READ); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1251_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1251_NL_ATTR_IFNAME])); -+ -+ NLA_PUT_U32(msg, WL1251_NL_ATTR_REG_VAL, *reg_value); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ kfree(reg_value); -+ -+ return genlmsg_reply(msg, info); -+ -+ nla_put_failure: -+ nlmsg_free(msg); -+ kfree(reg_value); -+ -+ return ret; -+} -+ -+static int wl1251_nl_nvs_push(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1251 *wl; -+ int ret = 0; -+ -+ if (!info->attrs[WL1251_NL_ATTR_NVS_BUFFER]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1251_NL_ATTR_NVS_LEN]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ return -EINVAL; -+ } -+ -+ mutex_lock(&wl->mutex); -+ wl->nvs_len = nla_get_u32(info->attrs[WL1251_NL_ATTR_NVS_LEN]); -+ if (wl->nvs_len % 4) { -+ wl1251_error("NVS size is not multiple of 32: %d", wl->nvs_len); -+ ret = -EILSEQ; -+ goto out; -+ } -+ -+ /* If we already have an NVS, we should free it */ -+ kfree(wl->nvs); -+ -+ wl->nvs = kzalloc(wl->nvs_len, GFP_KERNEL); -+ if (wl->nvs == NULL) { -+ wl1251_error("Can't allocate NVS"); -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ memcpy(wl->nvs, -+ nla_data(info->attrs[WL1251_NL_ATTR_NVS_BUFFER]), -+ wl->nvs_len); -+ -+ wl1251_debug(DEBUG_NETLINK, "got NVS from userspace, %d bytes", -+ wl->nvs_len); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static int wl1251_nl_reg_read(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1251 *wl; -+ u32 addr, val; -+ int ret = 0; -+ struct sk_buff *msg; -+ void *hdr; -+ -+ if (!info->attrs[WL1251_NL_ATTR_REG_ADDR]) -+ return -EINVAL; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ return -EINVAL; -+ } -+ -+ addr = nla_get_u32(info->attrs[WL1251_NL_ATTR_REG_ADDR]); -+ -+ mutex_lock(&wl->mutex); -+ val = wl1251_reg_read32(wl, addr); -+ mutex_unlock(&wl->mutex); -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1251_nl_family, 0, WL1251_NL_CMD_PHY_REG_READ); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1251_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1251_NL_ATTR_IFNAME])); -+ -+ NLA_PUT_U32(msg, WL1251_NL_ATTR_REG_VAL, val); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1251_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ return genlmsg_reply(msg, info); -+ -+ nla_put_failure: -+ nlmsg_free(msg); -+ -+ return ret; -+} -+ -+static int wl1251_nl_reg_write(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1251 *wl; -+ u32 addr, val; -+ -+ if (!info->attrs[WL1251_NL_ATTR_REG_ADDR]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1251_NL_ATTR_REG_VAL]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ return -EINVAL; -+ } -+ -+ addr = nla_get_u32(info->attrs[WL1251_NL_ATTR_REG_ADDR]); -+ val = nla_get_u32(info->attrs[WL1251_NL_ATTR_REG_VAL]); -+ -+ mutex_lock(&wl->mutex); -+ wl1251_reg_write32(wl, addr, val); -+ mutex_unlock(&wl->mutex); -+ -+ return 0; -+} -+ -+static int wl1251_nl_set_plt_mode(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1251 *wl; -+ u32 val; -+ int ret; -+ -+ if (!info->attrs[WL1251_NL_ATTR_PLT_MODE]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1251(&init_net, info); -+ if (wl == NULL) { -+ wl1251_error("wl1251 not found"); -+ return -EINVAL; -+ } -+ -+ val = nla_get_u32(info->attrs[WL1251_NL_ATTR_PLT_MODE]); -+ -+ switch (val) { -+ case 0: -+ ret = wl1251_plt_stop(wl); -+ break; -+ case 1: -+ ret = wl1251_plt_start(wl); -+ break; -+ default: -+ ret = -EINVAL; -+ break; -+ } -+ -+ return ret; -+} -+ -+static struct nla_policy wl1251_nl_policy[WL1251_NL_ATTR_MAX + 1] = { -+ [WL1251_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, -+ .len = IFNAMSIZ-1 }, -+ [WL1251_NL_ATTR_CMD_TEST_PARAM] = { .type = NLA_BINARY, -+ .len = WL1251_MAX_TEST_LENGTH }, -+ [WL1251_NL_ATTR_CMD_TEST_ANSWER] = { .type = NLA_U8 }, -+ [WL1251_NL_ATTR_CMD_IE] = { .type = NLA_U32 }, -+ [WL1251_NL_ATTR_CMD_IE_LEN] = { .type = NLA_U32 }, -+ [WL1251_NL_ATTR_CMD_IE_BUFFER] = { .type = NLA_BINARY, -+ .len = WL1251_MAX_TEST_LENGTH }, -+ [WL1251_NL_ATTR_CMD_IE_ANSWER] = { .type = NLA_BINARY, -+ .len = WL1251_MAX_TEST_LENGTH }, -+ [WL1251_NL_ATTR_REG_ADDR] = { .type = NLA_U32 }, -+ [WL1251_NL_ATTR_REG_VAL] = { .type = NLA_U32 }, -+ [WL1251_NL_ATTR_NVS_BUFFER] = { .type = NLA_BINARY, -+ .len = WL1251_MAX_NVS_LENGTH }, -+ [WL1251_NL_ATTR_NVS_LEN] = { .type = NLA_U32 }, -+ [WL1251_NL_ATTR_PLT_MODE] = { .type = NLA_U32 }, -+}; -+ -+static struct genl_ops wl1251_nl_ops[] = { -+ { -+ .cmd = WL1251_NL_CMD_TEST, -+ .doit = wl1251_nl_test_cmd, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1251_NL_CMD_INTERROGATE, -+ .doit = wl1251_nl_interrogate, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1251_NL_CMD_CONFIGURE, -+ .doit = wl1251_nl_configure, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1251_NL_CMD_PHY_REG_READ, -+ .doit = wl1251_nl_phy_reg_read, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1251_NL_CMD_NVS_PUSH, -+ .doit = wl1251_nl_nvs_push, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1251_NL_CMD_REG_WRITE, -+ .doit = wl1251_nl_reg_write, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1251_NL_CMD_REG_READ, -+ .doit = wl1251_nl_reg_read, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1251_NL_CMD_SET_PLT_MODE, -+ .doit = wl1251_nl_set_plt_mode, -+ .policy = wl1251_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+}; -+ -+int wl1251_nl_register(void) -+{ -+ int err, i; -+ -+ err = genl_register_family(&wl1251_nl_family); -+ if (err) -+ return err; -+ -+ for (i = 0; i < ARRAY_SIZE(wl1251_nl_ops); i++) { -+ err = genl_register_ops(&wl1251_nl_family, &wl1251_nl_ops[i]); -+ if (err) -+ goto err_out; -+ } -+ return 0; -+ err_out: -+ genl_unregister_family(&wl1251_nl_family); -+ return err; -+} -+ -+void wl1251_nl_unregister(void) -+{ -+ genl_unregister_family(&wl1251_nl_family); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_netlink.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_netlink.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_netlink.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_netlink.h 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,30 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_NETLINK_H__ -+#define __WL1251_NETLINK_H__ -+ -+int wl1251_nl_register(void); -+void wl1251_nl_unregister(void); -+ -+#endif /* __WL1251_NETLINK_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_ps.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_ps.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_ps.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_ps.c 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,203 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1251_reg.h" -+#include "wl1251_ps.h" -+#include "wl1251_spi.h" -+ -+#define WL1251_WAKEUP_TIMEOUT 2000 -+ -+void wl1251_elp_work(struct work_struct *work) -+{ -+ struct delayed_work *dwork; -+ struct wl1251 *wl; -+ -+ dwork = container_of(work, struct delayed_work, work); -+ wl = container_of(dwork, struct wl1251, elp_work); -+ -+ wl1251_debug(DEBUG_PSM, "elp work"); -+ -+ mutex_lock(&wl->mutex); -+ -+ if (wl->elp || !wl->psm) -+ goto out; -+ -+ wl1251_debug(DEBUG_PSM, "chip to elp"); -+ wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); -+ wl->elp = true; -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+#define ELP_ENTRY_DELAY 5 -+ -+/* Routines to toggle sleep mode while in ELP */ -+void wl1251_ps_elp_sleep(struct wl1251 *wl) -+{ -+ if (wl->psm) { -+ cancel_delayed_work(&wl->elp_work); -+ queue_delayed_work(wl->hw->workqueue, &wl->elp_work, -+ msecs_to_jiffies(ELP_ENTRY_DELAY)); -+ } -+} -+ -+int wl1251_ps_elp_wakeup(struct wl1251 *wl) -+{ -+ unsigned long timeout; -+ u32 elp_reg; -+ -+ if (!wl->elp) -+ return 0; -+ -+ wl1251_debug(DEBUG_PSM, "waking up chip from elp"); -+ -+ timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); -+ -+ wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); -+ -+ elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); -+ -+ /* -+ * FIXME: we should wait for irq from chip but, as a temporary -+ * solution to simplify locking, let's poll instead -+ */ -+ while (!(elp_reg & ELPCTRL_WLAN_READY)) { -+ if (time_after(jiffies, timeout)) { -+ wl1251_error("elp wakeup timeout"); -+ return -ETIMEDOUT; -+ } -+ msleep(1); -+ elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); -+ } -+ -+ wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", -+ jiffies_to_msecs(jiffies) - -+ (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT)); -+ -+ wl->elp = false; -+ -+ return 0; -+} -+ -+static int wl1251_ps_set_elp(struct wl1251 *wl, bool enable) -+{ -+ int ret; -+ -+ if (enable) { -+ wl1251_debug(DEBUG_PSM, "sleep auth psm/elp"); -+ -+ ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP); -+ if (ret < 0) -+ return ret; -+ -+ wl1251_ps_elp_sleep(wl); -+ } else { -+ wl1251_debug(DEBUG_PSM, "sleep auth cam"); -+ -+ /* -+ * When the target is in ELP, we can only -+ * access the ELP control register. Thus, -+ * we have to wake the target up before -+ * changing the power authorization. -+ */ -+ -+ wl1251_ps_elp_wakeup(wl); -+ -+ ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode) -+{ -+ int ret; -+ -+ switch (mode) { -+ case STATION_POWER_SAVE_MODE: -+ wl1251_debug(DEBUG_PSM, "entering psm"); -+ -+ /* enable beacon filtering */ -+ ret = wl1251_acx_beacon_filter_opt(wl, true); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_wake_up_conditions(wl, -+ WAKE_UP_EVENT_DTIM_BITMAP, -+ wl->listen_int); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE, -+ WL1251_DEFAULT_BET_CONSECUTIVE); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_ps_set_elp(wl, true); -+ if (ret < 0) -+ return ret; -+ -+ wl->psm = 1; -+ break; -+ case STATION_ACTIVE_MODE: -+ default: -+ wl1251_debug(DEBUG_PSM, "leaving psm"); -+ ret = wl1251_ps_set_elp(wl, false); -+ if (ret < 0) -+ return ret; -+ -+ /* disable BET */ -+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE, -+ WL1251_DEFAULT_BET_CONSECUTIVE); -+ if (ret < 0) -+ return ret; -+ -+ /* disable beacon filtering */ -+ ret = wl1251_acx_beacon_filter_opt(wl, false); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_acx_wake_up_conditions(wl, -+ WAKE_UP_EVENT_DTIM_BITMAP, -+ wl->listen_int); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_cmd_ps_mode(wl, STATION_ACTIVE_MODE); -+ if (ret < 0) -+ return ret; -+ -+ wl->psm = 0; -+ break; -+ } -+ -+ return ret; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_ps.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_ps.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_ps.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_ps.h 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,37 @@ -+#ifndef __WL1251_PS_H__ -+#define __WL1251_PS_H__ -+ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1251.h" -+#include "wl1251_acx.h" -+ -+int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode); -+void wl1251_ps_elp_sleep(struct wl1251 *wl); -+int wl1251_ps_elp_wakeup(struct wl1251 *wl); -+void wl1251_elp_work(struct work_struct *work); -+ -+ -+#endif /* __WL1251_PS_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_reg.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_reg.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_reg.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_reg.h 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,744 @@ -+/* -+ * This file is part of wl12xx -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __REG_H__ -+#define __REG_H__ -+ -+#include -+ -+#define REGISTERS_BASE 0x00300000 -+#define DRPW_BASE 0x00310000 -+ -+#define REGISTERS_DOWN_SIZE 0x00008800 -+#define REGISTERS_WORK_SIZE 0x0000b000 -+ -+#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC -+ -+/* ELP register commands */ -+#define ELPCTRL_WAKE_UP 0x1 -+#define ELPCTRL_WAKE_UP_WLAN_READY 0x5 -+#define ELPCTRL_SLEEP 0x0 -+/* ELP WLAN_READY bit */ -+#define ELPCTRL_WLAN_READY 0x2 -+ -+/* -+ * Interrupt registers. -+ * 64 bit interrupt sources registers ws ced. -+ * sme interupts were removed and new ones were added. -+ * Order was changed. -+ */ -+#define FIQ_MASK (REGISTERS_BASE + 0x0400) -+#define FIQ_MASK_L (REGISTERS_BASE + 0x0400) -+#define FIQ_MASK_H (REGISTERS_BASE + 0x0404) -+#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408) -+#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408) -+#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C) -+#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410) -+#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410) -+#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414) -+#define IRQ_MASK (REGISTERS_BASE + 0x0418) -+#define IRQ_MASK_L (REGISTERS_BASE + 0x0418) -+#define IRQ_MASK_H (REGISTERS_BASE + 0x041C) -+#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420) -+#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420) -+#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424) -+#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428) -+#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428) -+#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C) -+#define ECPU_MASK (REGISTERS_BASE + 0x0448) -+#define FIQ_STS_L (REGISTERS_BASE + 0x044C) -+#define FIQ_STS_H (REGISTERS_BASE + 0x0450) -+#define IRQ_STS_L (REGISTERS_BASE + 0x0454) -+#define IRQ_STS_H (REGISTERS_BASE + 0x0458) -+#define INT_STS_ND (REGISTERS_BASE + 0x0464) -+#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464) -+#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468) -+#define INT_STS_CLR (REGISTERS_BASE + 0x04B4) -+#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4) -+#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8) -+#define INT_ACK (REGISTERS_BASE + 0x046C) -+#define INT_ACK_L (REGISTERS_BASE + 0x046C) -+#define INT_ACK_H (REGISTERS_BASE + 0x0470) -+#define INT_TRIG (REGISTERS_BASE + 0x0474) -+#define INT_TRIG_L (REGISTERS_BASE + 0x0474) -+#define INT_TRIG_H (REGISTERS_BASE + 0x0478) -+#define HOST_STS_L (REGISTERS_BASE + 0x045C) -+#define HOST_STS_H (REGISTERS_BASE + 0x0460) -+#define HOST_MASK (REGISTERS_BASE + 0x0430) -+#define HOST_MASK_L (REGISTERS_BASE + 0x0430) -+#define HOST_MASK_H (REGISTERS_BASE + 0x0434) -+#define HOST_MASK_SET (REGISTERS_BASE + 0x0438) -+#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438) -+#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C) -+#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440) -+#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440) -+#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444) -+ -+/* Host Interrupts*/ -+#define HINT_MASK (REGISTERS_BASE + 0x0494) -+#define HINT_MASK_SET (REGISTERS_BASE + 0x0498) -+#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C) -+#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0) -+/*1150 spec calls this HINT_STS_RAW*/ -+#define HINT_STS_ND (REGISTERS_BASE + 0x04B0) -+#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4) -+#define HINT_ACK (REGISTERS_BASE + 0x04A8) -+#define HINT_TRIG (REGISTERS_BASE + 0x04AC) -+ -+/* Device Configuration registers*/ -+#define SOR_CFG (REGISTERS_BASE + 0x0800) -+#define ECPU_CTRL (REGISTERS_BASE + 0x0804) -+#define HI_CFG (REGISTERS_BASE + 0x0808) -+#define EE_START (REGISTERS_BASE + 0x080C) -+ -+#define CHIP_ID_B (REGISTERS_BASE + 0x5674) -+ -+#define CHIP_ID_1251_PG10 (0x7010101) -+#define CHIP_ID_1251_PG11 (0x7020101) -+#define CHIP_ID_1251_PG12 (0x7030101) -+ -+#define ENABLE (REGISTERS_BASE + 0x5450) -+ -+/* Power Management registers */ -+#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804) -+#define ELP_CMD (REGISTERS_BASE + 0x5808) -+#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810) -+#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814) -+#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818) -+ -+#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) -+ -+/* Scratch Pad registers*/ -+#define SCR_PAD0 (REGISTERS_BASE + 0x5608) -+#define SCR_PAD1 (REGISTERS_BASE + 0x560C) -+#define SCR_PAD2 (REGISTERS_BASE + 0x5610) -+#define SCR_PAD3 (REGISTERS_BASE + 0x5614) -+#define SCR_PAD4 (REGISTERS_BASE + 0x5618) -+#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C) -+#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) -+#define SCR_PAD5 (REGISTERS_BASE + 0x5624) -+#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628) -+#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) -+#define SCR_PAD6 (REGISTERS_BASE + 0x5630) -+#define SCR_PAD7 (REGISTERS_BASE + 0x5634) -+#define SCR_PAD8 (REGISTERS_BASE + 0x5638) -+#define SCR_PAD9 (REGISTERS_BASE + 0x563C) -+ -+/* Spare registers*/ -+#define SPARE_A1 (REGISTERS_BASE + 0x0994) -+#define SPARE_A2 (REGISTERS_BASE + 0x0998) -+#define SPARE_A3 (REGISTERS_BASE + 0x099C) -+#define SPARE_A4 (REGISTERS_BASE + 0x09A0) -+#define SPARE_A5 (REGISTERS_BASE + 0x09A4) -+#define SPARE_A6 (REGISTERS_BASE + 0x09A8) -+#define SPARE_A7 (REGISTERS_BASE + 0x09AC) -+#define SPARE_A8 (REGISTERS_BASE + 0x09B0) -+#define SPARE_B1 (REGISTERS_BASE + 0x5420) -+#define SPARE_B2 (REGISTERS_BASE + 0x5424) -+#define SPARE_B3 (REGISTERS_BASE + 0x5428) -+#define SPARE_B4 (REGISTERS_BASE + 0x542C) -+#define SPARE_B5 (REGISTERS_BASE + 0x5430) -+#define SPARE_B6 (REGISTERS_BASE + 0x5434) -+#define SPARE_B7 (REGISTERS_BASE + 0x5438) -+#define SPARE_B8 (REGISTERS_BASE + 0x543C) -+ -+enum wl12xx_acx_int_reg { -+ ACX_REG_INTERRUPT_TRIG, -+ ACX_REG_INTERRUPT_TRIG_H, -+ -+/*============================================= -+ Host Interrupt Mask Register - 32bit (RW) -+ ------------------------------------------ -+ Setting a bit in this register masks the -+ corresponding interrupt to the host. -+ 0 - RX0 - Rx first dubble buffer Data Interrupt -+ 1 - TXD - Tx Data Interrupt -+ 2 - TXXFR - Tx Transfer Interrupt -+ 3 - RX1 - Rx second dubble buffer Data Interrupt -+ 4 - RXXFR - Rx Transfer Interrupt -+ 5 - EVENT_A - Event Mailbox interrupt -+ 6 - EVENT_B - Event Mailbox interrupt -+ 7 - WNONHST - Wake On Host Interrupt -+ 8 - TRACE_A - Debug Trace interrupt -+ 9 - TRACE_B - Debug Trace interrupt -+ 10 - CDCMP - Command Complete Interrupt -+ 11 - -+ 12 - -+ 13 - -+ 14 - ICOMP - Initialization Complete Interrupt -+ 16 - SG SE - Soft Gemini - Sense enable interrupt -+ 17 - SG SD - Soft Gemini - Sense disable interrupt -+ 18 - - -+ 19 - - -+ 20 - - -+ 21- - -+ Default: 0x0001 -+*==============================================*/ -+ ACX_REG_INTERRUPT_MASK, -+ -+/*============================================= -+ Host Interrupt Mask Set 16bit, (Write only) -+ ------------------------------------------ -+ Setting a bit in this register sets -+ the corresponding bin in ACX_HINT_MASK register -+ without effecting the mask -+ state of other bits (0 = no effect). -+==============================================*/ -+ ACX_REG_HINT_MASK_SET, -+ -+/*============================================= -+ Host Interrupt Mask Clear 16bit,(Write only) -+ ------------------------------------------ -+ Setting a bit in this register clears -+ the corresponding bin in ACX_HINT_MASK register -+ without effecting the mask -+ state of other bits (0 = no effect). -+=============================================*/ -+ ACX_REG_HINT_MASK_CLR, -+ -+/*============================================= -+ Host Interrupt Status Nondestructive Read -+ 16bit,(Read only) -+ ------------------------------------------ -+ The host can read this register to determine -+ which interrupts are active. -+ Reading this register doesn't -+ effect its content. -+=============================================*/ -+ ACX_REG_INTERRUPT_NO_CLEAR, -+ -+/*============================================= -+ Host Interrupt Status Clear on Read Register -+ 16bit,(Read only) -+ ------------------------------------------ -+ The host can read this register to determine -+ which interrupts are active. -+ Reading this register clears it, -+ thus making all interrupts inactive. -+==============================================*/ -+ ACX_REG_INTERRUPT_CLEAR, -+ -+/*============================================= -+ Host Interrupt Acknowledge Register -+ 16bit,(Write only) -+ ------------------------------------------ -+ The host can set individual bits in this -+ register to clear (acknowledge) the corresp. -+ interrupt status bits in the HINT_STS_CLR and -+ HINT_STS_ND registers, thus making the -+ assotiated interrupt inactive. (0-no effect) -+==============================================*/ -+ ACX_REG_INTERRUPT_ACK, -+ -+/*=============================================== -+ Host Software Reset - 32bit RW -+ ------------------------------------------ -+ [31:1] Reserved -+ 0 SOFT_RESET Soft Reset - When this bit is set, -+ it holds the Wlan hardware in a soft reset state. -+ This reset disables all MAC and baseband processor -+ clocks except the CardBus/PCI interface clock. -+ It also initializes all MAC state machines except -+ the host interface. It does not reload the -+ contents of the EEPROM. When this bit is cleared -+ (not self-clearing), the Wlan hardware -+ exits the software reset state. -+===============================================*/ -+ ACX_REG_SLV_SOFT_RESET, -+ -+/*=============================================== -+ EEPROM Burst Read Start - 32bit RW -+ ------------------------------------------ -+ [31:1] Reserved -+ 0 ACX_EE_START - EEPROM Burst Read Start 0 -+ Setting this bit starts a burst read from -+ the external EEPROM. -+ If this bit is set (after reset) before an EEPROM read/write, -+ the burst read starts at EEPROM address 0. -+ Otherwise, it starts at the address -+ following the address of the previous access. -+ TheWlan hardware hardware clears this bit automatically. -+ -+ Default: 0x00000000 -+*================================================*/ -+ ACX_REG_EE_START, -+ -+/* Embedded ARM CPU Control */ -+ -+/*=============================================== -+ Halt eCPU - 32bit RW -+ ------------------------------------------ -+ 0 HALT_ECPU Halt Embedded CPU - This bit is the -+ compliment of bit 1 (MDATA2) in the SOR_CFG register. -+ During a hardware reset, this bit holds -+ the inverse of MDATA2. -+ When downloading firmware from the host, -+ set this bit (pull down MDATA2). -+ The host clears this bit after downloading the firmware into -+ zero-wait-state SSRAM. -+ When loading firmware from Flash, clear this bit (pull up MDATA2) -+ so that the eCPU can run the bootloader code in Flash -+ HALT_ECPU eCPU State -+ -------------------- -+ 1 halt eCPU -+ 0 enable eCPU -+ ===============================================*/ -+ ACX_REG_ECPU_CONTROL, -+ -+ ACX_REG_TABLE_LEN -+}; -+ -+#define ACX_SLV_SOFT_RESET_BIT BIT(1) -+#define ACX_REG_EEPROM_START_BIT BIT(1) -+ -+/* Command/Information Mailbox Pointers */ -+ -+/*=============================================== -+ Command Mailbox Pointer - 32bit RW -+ ------------------------------------------ -+ This register holds the start address of -+ the command mailbox located in the Wlan hardware memory. -+ The host must read this pointer after a reset to -+ find the location of the command mailbox. -+ The Wlan hardware initializes the command mailbox -+ pointer with the default address of the command mailbox. -+ The command mailbox pointer is not valid until after -+ the host receives the Init Complete interrupt from -+ the Wlan hardware. -+ ===============================================*/ -+#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0) -+ -+/*=============================================== -+ Information Mailbox Pointer - 32bit RW -+ ------------------------------------------ -+ This register holds the start address of -+ the information mailbox located in the Wlan hardware memory. -+ The host must read this pointer after a reset to find -+ the location of the information mailbox. -+ The Wlan hardware initializes the information mailbox pointer -+ with the default address of the information mailbox. -+ The information mailbox pointer is not valid -+ until after the host receives the Init Complete interrupt from -+ the Wlan hardware. -+ ===============================================*/ -+#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) -+ -+ -+/* Misc */ -+ -+#define REG_ENABLE_TX_RX (ENABLE) -+/* -+ * Rx configuration (filter) information element -+ * --------------------------------------------- -+ */ -+#define REG_RX_CONFIG (RX_CFG) -+#define REG_RX_FILTER (RX_FILTER_CFG) -+ -+ -+#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002 -+ -+/* promiscuous - receives all valid frames */ -+#define RX_CFG_PROMISCUOUS 0x0008 -+ -+/* receives frames from any BSSID */ -+#define RX_CFG_BSSID 0x0020 -+ -+/* receives frames destined to any MAC address */ -+#define RX_CFG_MAC 0x0010 -+ -+#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010 -+#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000 -+#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020 -+#define RX_CFG_ENABLE_ANY_BSSID 0x0000 -+ -+/* discards all broadcast frames */ -+#define RX_CFG_DISABLE_BCAST 0x0200 -+ -+#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400 -+#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800 -+#define RX_CFG_COPY_RX_STATUS 0x2000 -+#define RX_CFG_TSF 0x10000 -+ -+#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ -+ RX_CFG_ENABLE_ONLY_MY_BSSID) -+ -+#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ -+ | RX_CFG_ENABLE_ANY_BSSID) -+ -+#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ -+ RX_CFG_ENABLE_ANY_BSSID) -+ -+#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ -+ | RX_CFG_ENABLE_ONLY_MY_BSSID) -+ -+#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \ -+ | RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \ -+ | RX_CFG_COPY_RX_STATUS | RX_CFG_TSF) -+ -+#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC) -+ -+#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \ -+ RX_CFG_ENABLE_ONLY_MY_DEST_MAC) -+ -+#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \ -+ RX_CFG_ENABLE_ONLY_MY_DEST_MAC) -+ -+#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ -+ | CFG_RX_CTL_EN | CFG_RX_BCN_EN\ -+ | CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) -+ -+#define RX_FILTER_OPTION_FILTER_ALL 0 -+ -+#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\ -+ | CFG_RX_RCTS_ACK | CFG_RX_BCN_EN) -+ -+#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ -+ | CFG_RX_BCN_EN | CFG_RX_AUTH_EN\ -+ | CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\ -+ | CFG_RX_PRSP_EN) -+ -+ -+/*=============================================== -+ Phy regs -+ ===============================================*/ -+#define ACX_PHY_ADDR_REG SBB_ADDR -+#define ACX_PHY_DATA_REG SBB_DATA -+#define ACX_PHY_CTRL_REG SBB_CTL -+#define ACX_PHY_REG_WR_MASK 0x00000001ul -+#define ACX_PHY_REG_RD_MASK 0x00000002ul -+ -+ -+/*=============================================== -+ EEPROM Read/Write Request 32bit RW -+ ------------------------------------------ -+ 1 EE_READ - EEPROM Read Request 1 - Setting this bit -+ loads a single byte of data into the EE_DATA -+ register from the EEPROM location specified in -+ the EE_ADDR register. -+ The Wlan hardware hardware clears this bit automatically. -+ EE_DATA is valid when this bit is cleared. -+ -+ 0 EE_WRITE - EEPROM Write Request - Setting this bit -+ writes a single byte of data from the EE_DATA register into the -+ EEPROM location specified in the EE_ADDR register. -+ The Wlan hardware hardware clears this bit automatically. -+*===============================================*/ -+#define ACX_EE_CTL_REG EE_CTL -+#define EE_WRITE 0x00000001ul -+#define EE_READ 0x00000002ul -+ -+/*=============================================== -+ EEPROM Address - 32bit RW -+ ------------------------------------------ -+ This register specifies the address -+ within the EEPROM from/to which to read/write data. -+ ===============================================*/ -+#define ACX_EE_ADDR_REG EE_ADDR -+ -+/*=============================================== -+ EEPROM Data - 32bit RW -+ ------------------------------------------ -+ This register either holds the read 8 bits of -+ data from the EEPROM or the write data -+ to be written to the EEPROM. -+ ===============================================*/ -+#define ACX_EE_DATA_REG EE_DATA -+ -+/*=============================================== -+ EEPROM Base Address - 32bit RW -+ ------------------------------------------ -+ This register holds the upper nine bits -+ [23:15] of the 24-bit Wlan hardware memory -+ address for burst reads from EEPROM accesses. -+ The EEPROM provides the lower 15 bits of this address. -+ The MSB of the address from the EEPROM is ignored. -+ ===============================================*/ -+#define ACX_EE_CFG EE_CFG -+ -+/*=============================================== -+ GPIO Output Values -32bit, RW -+ ------------------------------------------ -+ [31:16] Reserved -+ [15: 0] Specify the output values (at the output driver inputs) for -+ GPIO[15:0], respectively. -+ ===============================================*/ -+#define ACX_GPIO_OUT_REG GPIO_OUT -+#define ACX_MAX_GPIO_LINES 15 -+ -+/*=============================================== -+ Contention window -32bit, RW -+ ------------------------------------------ -+ [31:26] Reserved -+ [25:16] Max (0x3ff) -+ [15:07] Reserved -+ [06:00] Current contention window value - default is 0x1F -+ ===============================================*/ -+#define ACX_CONT_WIND_CFG_REG CONT_WIND_CFG -+#define ACX_CONT_WIND_MIN_MASK 0x0000007f -+#define ACX_CONT_WIND_MAX 0x03ff0000 -+ -+/* -+ * Indirect slave register/memory registers -+ * ---------------------------------------- -+ */ -+#define HW_SLAVE_REG_ADDR_REG 0x00000004 -+#define HW_SLAVE_REG_DATA_REG 0x00000008 -+#define HW_SLAVE_REG_CTRL_REG 0x0000000c -+ -+#define SLAVE_AUTO_INC 0x00010000 -+#define SLAVE_NO_AUTO_INC 0x00000000 -+#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000 -+ -+#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR -+#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA -+#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL -+#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL -+ -+#define HW_FUNC_EVENT_INT_EN 0x8000 -+#define HW_FUNC_EVENT_MASK_REG 0x00000034 -+ -+#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP) -+ -+/*=============================================== -+ HI_CFG Interface Configuration Register Values -+ ------------------------------------------ -+ ===============================================*/ -+#define HI_CFG_UART_ENABLE 0x00000004 -+#define HI_CFG_RST232_ENABLE 0x00000008 -+#define HI_CFG_CLOCK_REQ_SELECT 0x00000010 -+#define HI_CFG_HOST_INT_ENABLE 0x00000020 -+#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040 -+#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080 -+#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100 -+#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200 -+#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400 -+ -+/* -+ * NOTE: USE_ACTIVE_HIGH compilation flag should be defined in makefile -+ * for platforms using active high interrupt level -+ */ -+#ifdef USE_ACTIVE_HIGH -+#define HI_CFG_DEF_VAL \ -+ (HI_CFG_UART_ENABLE | \ -+ HI_CFG_RST232_ENABLE | \ -+ HI_CFG_CLOCK_REQ_SELECT | \ -+ HI_CFG_HOST_INT_ENABLE) -+#else -+#define HI_CFG_DEF_VAL \ -+ (HI_CFG_UART_ENABLE | \ -+ HI_CFG_RST232_ENABLE | \ -+ HI_CFG_CLOCK_REQ_SELECT | \ -+ HI_CFG_HOST_INT_ENABLE) -+ -+#endif -+ -+#define REF_FREQ_19_2 0 -+#define REF_FREQ_26_0 1 -+#define REF_FREQ_38_4 2 -+#define REF_FREQ_40_0 3 -+#define REF_FREQ_33_6 4 -+#define REF_FREQ_NUM 5 -+ -+#define LUT_PARAM_INTEGER_DIVIDER 0 -+#define LUT_PARAM_FRACTIONAL_DIVIDER 1 -+#define LUT_PARAM_ATTN_BB 2 -+#define LUT_PARAM_ALPHA_BB 3 -+#define LUT_PARAM_STOP_TIME_BB 4 -+#define LUT_PARAM_BB_PLL_LOOP_FILTER 5 -+#define LUT_PARAM_NUM 6 -+ -+#define ACX_EEPROMLESS_IND_REG (SCR_PAD4) -+#define USE_EEPROM 0 -+#define SOFT_RESET_MAX_TIME 1000000 -+#define SOFT_RESET_STALL_TIME 1000 -+#define NVS_DATA_BUNDARY_ALIGNMENT 4 -+ -+ -+/* Firmware image load chunk size */ -+#define CHUNK_SIZE 512 -+ -+/* Firmware image header size */ -+#define FW_HDR_SIZE 8 -+ -+#define ECPU_CONTROL_HALT 0x00000101 -+ -+ -+/****************************************************************************** -+ -+ CHANNELS, BAND & REG DOMAINS definitions -+ -+******************************************************************************/ -+ -+ -+enum { -+ RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */ -+ RADIO_BAND_5GHZ = 1, /* 5 Ghz band */ -+ RADIO_BAND_JAPAN_4_9_GHZ = 2, -+ DEFAULT_BAND = RADIO_BAND_2_4GHZ, -+ INVALID_BAND = 0xFE, -+ MAX_RADIO_BANDS = 0xFF -+}; -+ -+enum { -+ NO_RATE = 0, -+ RATE_1MBPS = 0x0A, -+ RATE_2MBPS = 0x14, -+ RATE_5_5MBPS = 0x37, -+ RATE_6MBPS = 0x0B, -+ RATE_9MBPS = 0x0F, -+ RATE_11MBPS = 0x6E, -+ RATE_12MBPS = 0x0A, -+ RATE_18MBPS = 0x0E, -+ RATE_22MBPS = 0xDC, -+ RATE_24MBPS = 0x09, -+ RATE_36MBPS = 0x0D, -+ RATE_48MBPS = 0x08, -+ RATE_54MBPS = 0x0C -+}; -+ -+enum { -+ RATE_INDEX_1MBPS = 0, -+ RATE_INDEX_2MBPS = 1, -+ RATE_INDEX_5_5MBPS = 2, -+ RATE_INDEX_6MBPS = 3, -+ RATE_INDEX_9MBPS = 4, -+ RATE_INDEX_11MBPS = 5, -+ RATE_INDEX_12MBPS = 6, -+ RATE_INDEX_18MBPS = 7, -+ RATE_INDEX_22MBPS = 8, -+ RATE_INDEX_24MBPS = 9, -+ RATE_INDEX_36MBPS = 10, -+ RATE_INDEX_48MBPS = 11, -+ RATE_INDEX_54MBPS = 12, -+ RATE_INDEX_MAX = RATE_INDEX_54MBPS, -+ MAX_RATE_INDEX, -+ INVALID_RATE_INDEX = MAX_RATE_INDEX, -+ RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF -+}; -+ -+enum { -+ RATE_MASK_1MBPS = 0x1, -+ RATE_MASK_2MBPS = 0x2, -+ RATE_MASK_5_5MBPS = 0x4, -+ RATE_MASK_11MBPS = 0x20, -+}; -+ -+#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ -+#define OFDM_RATE_BIT BIT(6) -+#define PBCC_RATE_BIT BIT(7) -+ -+enum { -+ CCK_LONG = 0, -+ CCK_SHORT = SHORT_PREAMBLE_BIT, -+ PBCC_LONG = PBCC_RATE_BIT, -+ PBCC_SHORT = PBCC_RATE_BIT | SHORT_PREAMBLE_BIT, -+ OFDM = OFDM_RATE_BIT -+}; -+ -+/****************************************************************************** -+ -+Transmit-Descriptor RATE-SET field definitions... -+ -+Define a new "Rate-Set" for TX path that incorporates the -+Rate & Modulation info into a single 16-bit field. -+ -+TxdRateSet_t: -+b15 - Indicates Preamble type (1=SHORT, 0=LONG). -+ Notes: -+ Must be LONG (0) for 1Mbps rate. -+ Does not apply (set to 0) for RevG-OFDM rates. -+b14 - Indicates PBCC encoding (1=PBCC, 0=not). -+ Notes: -+ Does not apply (set to 0) for rates 1 and 2 Mbps. -+ Does not apply (set to 0) for RevG-OFDM rates. -+b13 - Unused (set to 0). -+b12-b0 - Supported Rate indicator bits as defined below. -+ -+******************************************************************************/ -+ -+ -+#define TNETW1251_CHIP_ID_PG1_0 0x07010101 -+#define TNETW1251_CHIP_ID_PG1_1 0x07020101 -+#define TNETW1251_CHIP_ID_PG1_2 0x07030101 -+ -+/************************************************************************* -+ -+ Interrupt Trigger Register (Host -> WiLink) -+ -+**************************************************************************/ -+ -+/* Hardware to Embedded CPU Interrupts - first 32-bit register set */ -+ -+/* -+ * Host Command Interrupt. Setting this bit masks -+ * the interrupt that the host issues to inform -+ * the FW that it has sent a command -+ * to the Wlan hardware Command Mailbox. -+ */ -+#define INTR_TRIG_CMD BIT(0) -+ -+/* -+ * Host Event Acknowlegde Interrupt. The host -+ * sets this bit to acknowledge that it received -+ * the unsolicited information from the event -+ * mailbox. -+ */ -+#define INTR_TRIG_EVENT_ACK BIT(1) -+ -+/* -+ * The host sets this bit to inform the Wlan -+ * FW that a TX packet is in the XFER -+ * Buffer #0. -+ */ -+#define INTR_TRIG_TX_PROC0 BIT(2) -+ -+/* -+ * The host sets this bit to inform the FW -+ * that it read a packet from RX XFER -+ * Buffer #0. -+ */ -+#define INTR_TRIG_RX_PROC0 BIT(3) -+ -+#define INTR_TRIG_DEBUG_ACK BIT(4) -+ -+#define INTR_TRIG_STATE_CHANGED BIT(5) -+ -+ -+/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ -+ -+/* -+ * The host sets this bit to inform the FW -+ * that it read a packet from RX XFER -+ * Buffer #1. -+ */ -+#define INTR_TRIG_RX_PROC1 BIT(17) -+ -+/* -+ * The host sets this bit to inform the Wlan -+ * hardware that a TX packet is in the XFER -+ * Buffer #1. -+ */ -+#define INTR_TRIG_TX_PROC1 BIT(18) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_rx.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_rx.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_rx.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_rx.c 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,195 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+ -+#include "wl1251.h" -+#include "wl1251_reg.h" -+#include "wl1251_spi.h" -+#include "wl1251_rx.h" -+#include "wl1251_acx.h" -+ -+static void wl1251_rx_header(struct wl1251 *wl, -+ struct wl1251_rx_descriptor *desc) -+{ -+ u32 rx_packet_ring_addr; -+ -+ rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr; -+ if (wl->rx_current_buffer) -+ rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; -+ -+ wl1251_spi_mem_read(wl, rx_packet_ring_addr, desc, sizeof(*desc)); -+} -+ -+static void wl1251_rx_status(struct wl1251 *wl, -+ struct wl1251_rx_descriptor *desc, -+ struct ieee80211_rx_status *status, -+ u8 beacon) -+{ -+ u64 mactime; -+ int ret; -+ -+ memset(status, 0, sizeof(struct ieee80211_rx_status)); -+ -+ status->band = IEEE80211_BAND_2GHZ; -+ status->mactime = desc->timestamp; -+ -+ /* -+ * The rx status timestamp is a 32 bits value while the TSF is a -+ * 64 bits one. -+ * For IBSS merging, TSF is mandatory, so we have to get it -+ * somehow, so we ask for ACX_TSF_INFO. -+ * That could be moved to the get_tsf() hook, but unfortunately, -+ * this one must be atomic, while our SPI routines can sleep. -+ */ -+ if ((wl->bss_type == BSS_TYPE_IBSS) && beacon) { -+ ret = wl1251_acx_tsf_info(wl, &mactime); -+ if (ret == 0) -+ status->mactime = mactime; -+ } -+ -+ status->signal = desc->rssi; -+ status->qual = (desc->rssi - WL1251_RX_MIN_RSSI) * 100 / -+ (WL1251_RX_MAX_RSSI - WL1251_RX_MIN_RSSI); -+ status->qual = min(status->qual, 100); -+ status->qual = max(status->qual, 0); -+ -+ /* -+ * FIXME: guessing that snr needs to be divided by two, otherwise -+ * the values don't make any sense -+ */ -+ status->noise = desc->rssi - desc->snr / 2; -+ -+ status->freq = ieee80211_channel_to_frequency(desc->channel); -+ -+ status->flag |= RX_FLAG_TSFT; -+ -+ if (desc->flags & RX_DESC_ENCRYPTION_MASK) { -+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; -+ -+ if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL))) -+ status->flag |= RX_FLAG_DECRYPTED; -+ -+ if (unlikely(desc->flags & RX_DESC_MIC_FAIL)) -+ status->flag |= RX_FLAG_MMIC_ERROR; -+ } -+ -+ if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) -+ status->flag |= RX_FLAG_FAILED_FCS_CRC; -+ -+ -+ /* FIXME: set status->rate_idx */ -+} -+ -+static void wl1251_rx_body(struct wl1251 *wl, -+ struct wl1251_rx_descriptor *desc) -+{ -+ struct sk_buff *skb; -+ struct ieee80211_rx_status status; -+ u8 *rx_buffer, beacon = 0; -+ u16 length, *fc; -+ u32 curr_id, last_id_inc, rx_packet_ring_addr; -+ -+ length = WL1251_RX_ALIGN(desc->length - PLCP_HEADER_LENGTH); -+ curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT; -+ last_id_inc = (wl->rx_last_id + 1) % (RX_MAX_PACKET_ID + 1); -+ -+ if (last_id_inc != curr_id) { -+ wl1251_warning("curr ID:%d, last ID inc:%d", -+ curr_id, last_id_inc); -+ wl->rx_last_id = curr_id; -+ } else { -+ wl->rx_last_id = last_id_inc; -+ } -+ -+ rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr + -+ sizeof(struct wl1251_rx_descriptor) + 20; -+ if (wl->rx_current_buffer) -+ rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; -+ -+ skb = dev_alloc_skb(length); -+ if (!skb) { -+ wl1251_error("Couldn't allocate RX frame"); -+ return; -+ } -+ -+ rx_buffer = skb_put(skb, length); -+ wl1251_spi_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); -+ -+ /* The actual lenght doesn't include the target's alignment */ -+ skb->len = desc->length - PLCP_HEADER_LENGTH; -+ -+ fc = (u16 *)skb->data; -+ -+ if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) -+ beacon = 1; -+ -+ wl1251_rx_status(wl, desc, &status, beacon); -+ -+ wl1251_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, -+ beacon ? "beacon" : ""); -+ -+ ieee80211_rx(wl->hw, skb, &status); -+} -+ -+static void wl1251_rx_ack(struct wl1251 *wl) -+{ -+ u32 data, addr; -+ -+ if (wl->rx_current_buffer) { -+ addr = ACX_REG_INTERRUPT_TRIG_H; -+ data = INTR_TRIG_RX_PROC1; -+ } else { -+ addr = ACX_REG_INTERRUPT_TRIG; -+ data = INTR_TRIG_RX_PROC0; -+ } -+ -+ wl1251_reg_write32(wl, addr, data); -+ -+ /* Toggle buffer ring */ -+ wl->rx_current_buffer = !wl->rx_current_buffer; -+} -+ -+ -+void wl1251_rx(struct wl1251 *wl) -+{ -+ struct wl1251_rx_descriptor *rx_desc; -+ -+ if (wl->state != WL1251_STATE_ON) -+ return; -+ -+ rx_desc = wl->rx_descriptor; -+ -+ /* We first read the frame's header */ -+ wl1251_rx_header(wl, rx_desc); -+ -+ /* Now we can read the body */ -+ wl1251_rx_body(wl, rx_desc); -+ -+ /* Finally, we need to ACK the RX */ -+ wl1251_rx_ack(wl); -+ -+ return; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_rx.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_rx.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_rx.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_rx.h 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,124 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_RX_H__ -+#define __WL1251_RX_H__ -+ -+#include -+ -+#include "wl1251.h" -+ -+/* -+ * RX PATH -+ * -+ * The Rx path uses a double buffer and an rx_contro structure, each located -+ * at a fixed address in the device memory. The host keeps track of which -+ * buffer is available and alternates between them on a per packet basis. -+ * The size of each of the two buffers is large enough to hold the longest -+ * 802.3 packet. -+ * The RX path goes like that: -+ * 1) The target generates an interrupt each time a new packet is received. -+ * There are 2 RX interrupts, one for each buffer. -+ * 2) The host reads the received packet from one of the double buffers. -+ * 3) The host triggers a target interrupt. -+ * 4) The target prepares the next RX packet. -+ */ -+ -+#define WL1251_RX_MAX_RSSI -30 -+#define WL1251_RX_MIN_RSSI -95 -+ -+#define WL1251_RX_ALIGN_TO 4 -+#define WL1251_RX_ALIGN(len) (((len) + WL1251_RX_ALIGN_TO - 1) & \ -+ ~(WL1251_RX_ALIGN_TO - 1)) -+ -+#define SHORT_PREAMBLE_BIT BIT(0) -+#define OFDM_RATE_BIT BIT(6) -+#define PBCC_RATE_BIT BIT(7) -+ -+#define PLCP_HEADER_LENGTH 8 -+#define RX_DESC_PACKETID_SHIFT 11 -+#define RX_MAX_PACKET_ID 3 -+ -+#define RX_DESC_VALID_FCS 0x0001 -+#define RX_DESC_MATCH_RXADDR1 0x0002 -+#define RX_DESC_MCAST 0x0004 -+#define RX_DESC_STAINTIM 0x0008 -+#define RX_DESC_VIRTUAL_BM 0x0010 -+#define RX_DESC_BCAST 0x0020 -+#define RX_DESC_MATCH_SSID 0x0040 -+#define RX_DESC_MATCH_BSSID 0x0080 -+#define RX_DESC_ENCRYPTION_MASK 0x0300 -+#define RX_DESC_MEASURMENT 0x0400 -+#define RX_DESC_SEQNUM_MASK 0x1800 -+#define RX_DESC_MIC_FAIL 0x2000 -+#define RX_DESC_DECRYPT_FAIL 0x4000 -+ -+struct wl1251_rx_descriptor { -+ u32 timestamp; /* In microseconds */ -+ u16 length; /* Paylod length, including headers */ -+ u16 flags; -+ -+ /* -+ * 0 - 802.11 -+ * 1 - 802.3 -+ * 2 - IP -+ * 3 - Raw Codec -+ */ -+ u8 type; -+ -+ /* -+ * Recevied Rate: -+ * 0x0A - 1MBPS -+ * 0x14 - 2MBPS -+ * 0x37 - 5_5MBPS -+ * 0x0B - 6MBPS -+ * 0x0F - 9MBPS -+ * 0x6E - 11MBPS -+ * 0x0A - 12MBPS -+ * 0x0E - 18MBPS -+ * 0xDC - 22MBPS -+ * 0x09 - 24MBPS -+ * 0x0D - 36MBPS -+ * 0x08 - 48MBPS -+ * 0x0C - 54MBPS -+ */ -+ u8 rate; -+ -+ u8 mod_pre; /* Modulation and preamble */ -+ u8 channel; -+ -+ /* -+ * 0 - 2.4 Ghz -+ * 1 - 5 Ghz -+ */ -+ u8 band; -+ -+ s8 rssi; /* in dB */ -+ u8 rcpi; /* in dB */ -+ u8 snr; /* in dB */ -+} __attribute__ ((packed)); -+ -+void wl1251_rx(struct wl1251 *wl); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_spi.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_spi.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_spi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_spi.c 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,409 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "wl1251.h" -+#include "wl1251_reg.h" -+#include "wl1251_spi.h" -+ -+/* FIXME: this is static data nowadays and the table can be removed */ -+static enum wl12xx_acx_int_reg wl1251_acx_reg_table[ACX_REG_TABLE_LEN] = { -+ [ACX_REG_INTERRUPT_TRIG] = (REGISTERS_BASE + 0x0474), -+ [ACX_REG_INTERRUPT_TRIG_H] = (REGISTERS_BASE + 0x0478), -+ [ACX_REG_INTERRUPT_MASK] = (REGISTERS_BASE + 0x0494), -+ [ACX_REG_HINT_MASK_SET] = (REGISTERS_BASE + 0x0498), -+ [ACX_REG_HINT_MASK_CLR] = (REGISTERS_BASE + 0x049C), -+ [ACX_REG_INTERRUPT_NO_CLEAR] = (REGISTERS_BASE + 0x04B0), -+ [ACX_REG_INTERRUPT_CLEAR] = (REGISTERS_BASE + 0x04A4), -+ [ACX_REG_INTERRUPT_ACK] = (REGISTERS_BASE + 0x04A8), -+ [ACX_REG_SLV_SOFT_RESET] = (REGISTERS_BASE + 0x0000), -+ [ACX_REG_EE_START] = (REGISTERS_BASE + 0x080C), -+ [ACX_REG_ECPU_CONTROL] = (REGISTERS_BASE + 0x0804) -+}; -+ -+static int wl1251_translate_reg_addr(struct wl1251 *wl, int addr) -+{ -+ /* If the address is lower than REGISTERS_BASE, it means that this is -+ * a chip-specific register address, so look it up in the registers -+ * table */ -+ if (addr < REGISTERS_BASE) { -+ /* Make sure we don't go over the table */ -+ if (addr >= ACX_REG_TABLE_LEN) { -+ wl1251_error("address out of range (%d)", addr); -+ return -EINVAL; -+ } -+ addr = wl1251_acx_reg_table[addr]; -+ } -+ -+ return addr - wl->physical_reg_addr + wl->virtual_reg_addr; -+} -+ -+static int wl1251_translate_mem_addr(struct wl1251 *wl, int addr) -+{ -+ return addr - wl->physical_mem_addr + wl->virtual_mem_addr; -+} -+ -+ -+void wl1251_spi_reset(struct wl1251 *wl) -+{ -+ u8 *cmd; -+ struct spi_transfer t; -+ struct spi_message m; -+ -+ cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); -+ if (!cmd) { -+ wl1251_error("could not allocate cmd for spi reset"); -+ return; -+ } -+ -+ memset(&t, 0, sizeof(t)); -+ spi_message_init(&m); -+ -+ memset(cmd, 0xff, WSPI_INIT_CMD_LEN); -+ -+ t.tx_buf = cmd; -+ t.len = WSPI_INIT_CMD_LEN; -+ spi_message_add_tail(&t, &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); -+} -+ -+void wl1251_spi_init(struct wl1251 *wl) -+{ -+ u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; -+ struct spi_transfer t; -+ struct spi_message m; -+ -+ cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); -+ if (!cmd) { -+ wl1251_error("could not allocate cmd for spi init"); -+ return; -+ } -+ -+ memset(crc, 0, sizeof(crc)); -+ memset(&t, 0, sizeof(t)); -+ spi_message_init(&m); -+ -+ /* -+ * Set WSPI_INIT_COMMAND -+ * the data is being send from the MSB to LSB -+ */ -+ cmd[2] = 0xff; -+ cmd[3] = 0xff; -+ cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; -+ cmd[0] = 0; -+ cmd[7] = 0; -+ cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; -+ cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; -+ -+ if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) -+ cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; -+ else -+ cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; -+ -+ cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS -+ | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; -+ -+ crc[0] = cmd[1]; -+ crc[1] = cmd[0]; -+ crc[2] = cmd[7]; -+ crc[3] = cmd[6]; -+ crc[4] = cmd[5]; -+ -+ cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; -+ cmd[4] |= WSPI_INIT_CMD_END; -+ -+ t.tx_buf = cmd; -+ t.len = WSPI_INIT_CMD_LEN; -+ spi_message_add_tail(&t, &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); -+} -+ -+/* Set the SPI partitions to access the chip addresses -+ * -+ * There are two VIRTUAL (SPI) partitions (the memory partition and the -+ * registers partition), which are mapped to two different areas of the -+ * PHYSICAL (hardware) memory. This function also makes other checks to -+ * ensure that the partitions are not overlapping. In the diagram below, the -+ * memory partition comes before the register partition, but the opposite is -+ * also supported. -+ * -+ * PHYSICAL address -+ * space -+ * -+ * | | -+ * ...+----+--> mem_start -+ * VIRTUAL address ... | | -+ * space ... | | [PART_0] -+ * ... | | -+ * 0x00000000 <--+----+... ...+----+--> mem_start + mem_size -+ * | | ... | | -+ * |MEM | ... | | -+ * | | ... | | -+ * part_size <--+----+... | | {unused area) -+ * | | ... | | -+ * |REG | ... | | -+ * part_size | | ... | | -+ * + <--+----+... ...+----+--> reg_start -+ * reg_size ... | | -+ * ... | | [PART_1] -+ * ... | | -+ * ...+----+--> reg_start + reg_size -+ * | | -+ * -+ */ -+int wl1251_set_partition(struct wl1251 *wl, -+ u32 mem_start, u32 mem_size, -+ u32 reg_start, u32 reg_size) -+{ -+ struct wl1251_partition *partition; -+ struct spi_transfer t; -+ struct spi_message m; -+ size_t len, cmd_len; -+ u32 *cmd; -+ int addr; -+ -+ cmd_len = sizeof(u32) + 2 * sizeof(struct wl1251_partition); -+ cmd = kzalloc(cmd_len, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ spi_message_init(&m); -+ memset(&t, 0, sizeof(t)); -+ -+ partition = (struct wl1251_partition *) (cmd + 1); -+ addr = HW_ACCESS_PART0_SIZE_ADDR; -+ len = 2 * sizeof(struct wl1251_partition); -+ -+ *cmd |= WSPI_CMD_WRITE; -+ *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; -+ *cmd |= addr & WSPI_CMD_BYTE_ADDR; -+ -+ wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ -+ /* Make sure that the two partitions together don't exceed the -+ * address range */ -+ if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) { -+ wl1251_debug(DEBUG_SPI, "Total size exceeds maximum virtual" -+ " address range. Truncating partition[0]."); -+ mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size; -+ wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ } -+ -+ if ((mem_start < reg_start) && -+ ((mem_start + mem_size) > reg_start)) { -+ /* Guarantee that the memory partition doesn't overlap the -+ * registers partition */ -+ wl1251_debug(DEBUG_SPI, "End of partition[0] is " -+ "overlapping partition[1]. Adjusted."); -+ mem_size = reg_start - mem_start; -+ wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ } else if ((reg_start < mem_start) && -+ ((reg_start + reg_size) > mem_start)) { -+ /* Guarantee that the register partition doesn't overlap the -+ * memory partition */ -+ wl1251_debug(DEBUG_SPI, "End of partition[1] is" -+ " overlapping partition[0]. Adjusted."); -+ reg_size = mem_start - reg_start; -+ wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ } -+ -+ partition[0].start = mem_start; -+ partition[0].size = mem_size; -+ partition[1].start = reg_start; -+ partition[1].size = reg_size; -+ -+ wl->physical_mem_addr = mem_start; -+ wl->physical_reg_addr = reg_start; -+ -+ wl->virtual_mem_addr = 0; -+ wl->virtual_reg_addr = mem_size; -+ -+ t.tx_buf = cmd; -+ t.len = cmd_len; -+ spi_message_add_tail(&t, &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ kfree(cmd); -+ -+ return 0; -+} -+ -+void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf, -+ size_t len, bool fixed) -+{ -+ struct spi_transfer t[3]; -+ struct spi_message m; -+ u8 *busy_buf; -+ u32 *cmd; -+ -+ cmd = &wl->buffer_cmd; -+ busy_buf = wl->buffer_busyword; -+ -+ *cmd = 0; -+ *cmd |= WSPI_CMD_READ; -+ *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; -+ *cmd |= addr & WSPI_CMD_BYTE_ADDR; -+ -+ if (fixed) -+ *cmd |= WSPI_CMD_FIXED; -+ -+ spi_message_init(&m); -+ memset(t, 0, sizeof(t)); -+ -+ t[0].tx_buf = cmd; -+ t[0].len = 4; -+ spi_message_add_tail(&t[0], &m); -+ -+ /* Busy and non busy words read */ -+ t[1].rx_buf = busy_buf; -+ t[1].len = WL1251_BUSY_WORD_LEN; -+ spi_message_add_tail(&t[1], &m); -+ -+ t[2].rx_buf = buf; -+ t[2].len = len; -+ spi_message_add_tail(&t[2], &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ /* FIXME: check busy words */ -+ -+ wl1251_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); -+ wl1251_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); -+} -+ -+void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf, -+ size_t len, bool fixed) -+{ -+ struct spi_transfer t[2]; -+ struct spi_message m; -+ u32 *cmd; -+ -+ cmd = &wl->buffer_cmd; -+ -+ *cmd = 0; -+ *cmd |= WSPI_CMD_WRITE; -+ *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; -+ *cmd |= addr & WSPI_CMD_BYTE_ADDR; -+ -+ if (fixed) -+ *cmd |= WSPI_CMD_FIXED; -+ -+ spi_message_init(&m); -+ memset(t, 0, sizeof(t)); -+ -+ t[0].tx_buf = cmd; -+ t[0].len = sizeof(*cmd); -+ spi_message_add_tail(&t[0], &m); -+ -+ t[1].tx_buf = buf; -+ t[1].len = len; -+ spi_message_add_tail(&t[1], &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ wl1251_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); -+ wl1251_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); -+} -+ -+void wl1251_spi_mem_read(struct wl1251 *wl, int addr, void *buf, -+ size_t len) -+{ -+ int physical; -+ -+ physical = wl1251_translate_mem_addr(wl, addr); -+ -+ wl1251_spi_read(wl, physical, buf, len, false); -+} -+ -+void wl1251_spi_mem_write(struct wl1251 *wl, int addr, void *buf, -+ size_t len) -+{ -+ int physical; -+ -+ physical = wl1251_translate_mem_addr(wl, addr); -+ -+ wl1251_spi_write(wl, physical, buf, len, false); -+} -+ -+void wl1251_spi_reg_read(struct wl1251 *wl, int addr, void *buf, size_t len, -+ bool fixed) -+{ -+ int physical; -+ -+ physical = wl1251_translate_reg_addr(wl, addr); -+ -+ wl1251_spi_read(wl, physical, buf, len, fixed); -+} -+ -+void wl1251_spi_reg_write(struct wl1251 *wl, int addr, void *buf, size_t len, -+ bool fixed) -+{ -+ int physical; -+ -+ physical = wl1251_translate_reg_addr(wl, addr); -+ -+ wl1251_spi_write(wl, physical, buf, len, fixed); -+} -+ -+u32 wl1251_mem_read32(struct wl1251 *wl, int addr) -+{ -+ return wl1251_read32(wl, wl1251_translate_mem_addr(wl, addr)); -+} -+ -+void wl1251_mem_write32(struct wl1251 *wl, int addr, u32 val) -+{ -+ wl1251_write32(wl, wl1251_translate_mem_addr(wl, addr), val); -+} -+ -+u32 wl1251_reg_read32(struct wl1251 *wl, int addr) -+{ -+ return wl1251_read32(wl, wl1251_translate_reg_addr(wl, addr)); -+} -+ -+void wl1251_reg_write32(struct wl1251 *wl, int addr, u32 val) -+{ -+ wl1251_write32(wl, wl1251_translate_reg_addr(wl, addr), val); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_spi.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_spi.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_spi.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_spi.h 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,111 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_SPI_H__ -+#define __WL1251_SPI_H__ -+ -+#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 -+ -+#define HW_ACCESS_PART0_SIZE_ADDR 0x1FFC0 -+#define HW_ACCESS_PART0_START_ADDR 0x1FFC4 -+#define HW_ACCESS_PART1_SIZE_ADDR 0x1FFC8 -+#define HW_ACCESS_PART1_START_ADDR 0x1FFCC -+ -+#define HW_ACCESS_REGISTER_SIZE 4 -+ -+#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 -+ -+#define WSPI_CMD_READ 0x40000000 -+#define WSPI_CMD_WRITE 0x00000000 -+#define WSPI_CMD_FIXED 0x20000000 -+#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000 -+#define WSPI_CMD_BYTE_LENGTH_OFFSET 17 -+#define WSPI_CMD_BYTE_ADDR 0x0001FFFF -+ -+#define WSPI_INIT_CMD_CRC_LEN 5 -+ -+#define WSPI_INIT_CMD_START 0x00 -+#define WSPI_INIT_CMD_TX 0x40 -+/* the extra bypass bit is sampled by the TNET as '1' */ -+#define WSPI_INIT_CMD_BYPASS_BIT 0x80 -+#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07 -+#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80 -+#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00 -+#define WSPI_INIT_CMD_IOD 0x40 -+#define WSPI_INIT_CMD_IP 0x20 -+#define WSPI_INIT_CMD_CS 0x10 -+#define WSPI_INIT_CMD_WS 0x08 -+#define WSPI_INIT_CMD_WSPI 0x01 -+#define WSPI_INIT_CMD_END 0x01 -+ -+#define WSPI_INIT_CMD_LEN 8 -+ -+#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \ -+ ((WL1251_BUSY_WORD_LEN - 4) / sizeof(u32)) -+#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 -+ -+ -+/* Raw target IO, address is not translated */ -+void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf, -+ size_t len, bool fixed); -+void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf, -+ size_t len, bool fixed); -+ -+/* Memory target IO, address is tranlated to partition 0 */ -+void wl1251_spi_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len); -+void wl1251_spi_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len); -+u32 wl1251_mem_read32(struct wl1251 *wl, int addr); -+void wl1251_mem_write32(struct wl1251 *wl, int addr, u32 val); -+ -+/* Registers IO */ -+void wl1251_spi_reg_read(struct wl1251 *wl, int addr, void *buf, size_t len, -+ bool fixed); -+void wl1251_spi_reg_write(struct wl1251 *wl, int addr, void *buf, size_t len, -+ bool fixed); -+u32 wl1251_reg_read32(struct wl1251 *wl, int addr); -+void wl1251_reg_write32(struct wl1251 *wl, int addr, u32 val); -+ -+/* INIT and RESET words */ -+void wl1251_spi_reset(struct wl1251 *wl); -+void wl1251_spi_init(struct wl1251 *wl); -+int wl1251_set_partition(struct wl1251 *wl, -+ u32 part_start, u32 part_size, -+ u32 reg_start, u32 reg_size); -+ -+static inline u32 wl1251_read32(struct wl1251 *wl, int addr) -+{ -+ wl1251_spi_read(wl, addr, &wl->buffer_32, -+ sizeof(wl->buffer_32), false); -+ -+ return wl->buffer_32; -+} -+ -+static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val) -+{ -+ wl->buffer_32 = val; -+ wl1251_spi_write(wl, addr, &wl->buffer_32, -+ sizeof(wl->buffer_32), false); -+} -+ -+#endif /* __WL1251_SPI_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_tx.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_tx.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_tx.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_tx.c 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,564 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+ -+#include "wl1251.h" -+#include "wl1251_reg.h" -+#include "wl1251_spi.h" -+#include "wl1251_tx.h" -+#include "wl1251_ps.h" -+ -+static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count) -+{ -+ int used, data_in_count; -+ -+ data_in_count = wl->data_in_count; -+ -+ if (data_in_count < data_out_count) -+ /* data_in_count has wrapped */ -+ data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1; -+ -+ used = data_in_count - data_out_count; -+ -+ WARN_ON(used < 0); -+ WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM); -+ -+ if (used >= DP_TX_PACKET_RING_CHUNK_NUM) -+ return true; -+ else -+ return false; -+} -+ -+static int wl1251_tx_path_status(struct wl1251 *wl) -+{ -+ u32 status, addr, data_out_count; -+ bool busy; -+ -+ addr = wl->data_path->tx_control_addr; -+ status = wl1251_mem_read32(wl, addr); -+ data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK; -+ busy = wl1251_tx_double_buffer_busy(wl, data_out_count); -+ -+ if (busy) -+ return -EBUSY; -+ -+ return 0; -+} -+ -+static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb) -+{ -+ int i; -+ -+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) -+ if (wl->tx_frames[i] == NULL) { -+ wl->tx_frames[i] = skb; -+ return i; -+ } -+ -+ return -EBUSY; -+} -+ -+static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr, -+ struct ieee80211_tx_info *control, u16 fc) -+{ -+ *(u16 *)&tx_hdr->control = 0; -+ -+ tx_hdr->control.rate_policy = 0; -+ -+ /* 802.11 packets */ -+ tx_hdr->control.packet_type = 0; -+ -+ if (control->flags & IEEE80211_TX_CTL_NO_ACK) -+ tx_hdr->control.ack_policy = 1; -+ -+ tx_hdr->control.tx_complete = 1; -+ -+ if ((fc & IEEE80211_FTYPE_DATA) && -+ ((fc & IEEE80211_STYPE_QOS_DATA) || -+ (fc & IEEE80211_STYPE_QOS_NULLFUNC))) -+ tx_hdr->control.qos = 1; -+} -+ -+/* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */ -+#define MAX_MSDU_SECURITY_LENGTH 16 -+#define MAX_MPDU_SECURITY_LENGTH 16 -+#define WLAN_QOS_HDR_LEN 26 -+#define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \ -+ WLAN_QOS_HDR_LEN) -+#define HW_BLOCK_SIZE 252 -+static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr) -+{ -+ u16 payload_len, frag_threshold, mem_blocks; -+ u16 num_mpdus, mem_blocks_per_frag; -+ -+ frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; -+ tx_hdr->frag_threshold = cpu_to_le16(frag_threshold); -+ -+ payload_len = tx_hdr->length + MAX_MSDU_SECURITY_LENGTH; -+ -+ if (payload_len > frag_threshold) { -+ mem_blocks_per_frag = -+ ((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) / -+ HW_BLOCK_SIZE) + 1; -+ num_mpdus = payload_len / frag_threshold; -+ mem_blocks = num_mpdus * mem_blocks_per_frag; -+ payload_len -= num_mpdus * frag_threshold; -+ num_mpdus++; -+ -+ } else { -+ mem_blocks_per_frag = 0; -+ mem_blocks = 0; -+ num_mpdus = 1; -+ } -+ -+ mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1; -+ -+ if (num_mpdus > 1) -+ mem_blocks += min(num_mpdus, mem_blocks_per_frag); -+ -+ tx_hdr->num_mem_blocks = mem_blocks; -+} -+ -+static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb, -+ struct ieee80211_tx_info *control) -+{ -+ struct tx_double_buffer_desc *tx_hdr; -+ struct ieee80211_rate *rate; -+ int id; -+ u16 fc; -+ -+ if (!skb) -+ return -EINVAL; -+ -+ id = wl1251_tx_id(wl, skb); -+ if (id < 0) -+ return id; -+ -+ fc = *(u16 *)skb->data; -+ tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb, -+ sizeof(*tx_hdr)); -+ -+ tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr)); -+ rate = ieee80211_get_tx_rate(wl->hw, control); -+ tx_hdr->rate = cpu_to_le16(rate->hw_value); -+ tx_hdr->expiry_time = cpu_to_le32(1 << 16); -+ tx_hdr->id = id; -+ -+ /* FIXME: how to get the correct queue id? */ -+ tx_hdr->xmit_queue = 0; -+ -+ wl1251_tx_control(tx_hdr, control, fc); -+ wl1251_tx_frag_block_num(tx_hdr); -+ -+ return 0; -+} -+ -+/* We copy the packet to the target */ -+static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, -+ struct ieee80211_tx_info *control) -+{ -+ struct tx_double_buffer_desc *tx_hdr; -+ int len; -+ u32 addr; -+ -+ if (!skb) -+ return -EINVAL; -+ -+ tx_hdr = (struct tx_double_buffer_desc *) skb->data; -+ -+ if (control->control.hw_key && -+ control->control.hw_key->alg == ALG_TKIP) { -+ int hdrlen; -+ u16 fc; -+ u8 *pos; -+ -+ fc = *(u16 *)(skb->data + sizeof(*tx_hdr)); -+ tx_hdr->length += WL1251_TKIP_IV_SPACE; -+ -+ hdrlen = ieee80211_hdrlen(fc); -+ -+ pos = skb_push(skb, WL1251_TKIP_IV_SPACE); -+ memmove(pos, pos + WL1251_TKIP_IV_SPACE, -+ sizeof(*tx_hdr) + hdrlen); -+ } -+ -+ /* Revisit. This is a workaround for getting non-aligned packets. -+ This happens at least with EAPOL packets from the user space. -+ Our DMA requires packets to be aligned on a 4-byte boundary. -+ */ -+ if (unlikely((long)skb->data & 0x03)) { -+ int offset = (4 - (long)skb->data) & 0x03; -+ wl1251_debug(DEBUG_TX, "skb offset %d", offset); -+ -+ /* check whether the current skb can be used */ -+ if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { -+ unsigned char *src = skb->data; -+ -+ /* align the buffer on a 4-byte boundary */ -+ skb_reserve(skb, offset); -+ memmove(skb->data, src, skb->len); -+ } else { -+ wl1251_info("No handler, fixme!"); -+ return -EINVAL; -+ } -+ } -+ -+ /* Our skb->data at this point includes the HW header */ -+ len = WL1251_TX_ALIGN(skb->len); -+ -+ if (wl->data_in_count & 0x1) -+ addr = wl->data_path->tx_packet_ring_addr + -+ wl->data_path->tx_packet_ring_chunk_size; -+ else -+ addr = wl->data_path->tx_packet_ring_addr; -+ -+ wl1251_spi_mem_write(wl, addr, skb->data, len); -+ -+ wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x", -+ tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate); -+ -+ return 0; -+} -+ -+static void wl1251_tx_trigger(struct wl1251 *wl) -+{ -+ u32 data, addr; -+ -+ if (wl->data_in_count & 0x1) { -+ addr = ACX_REG_INTERRUPT_TRIG_H; -+ data = INTR_TRIG_TX_PROC1; -+ } else { -+ addr = ACX_REG_INTERRUPT_TRIG; -+ data = INTR_TRIG_TX_PROC0; -+ } -+ -+ wl1251_reg_write32(wl, addr, data); -+ -+ /* Bumping data in */ -+ wl->data_in_count = (wl->data_in_count + 1) & -+ TX_STATUS_DATA_OUT_COUNT_MASK; -+} -+ -+/* caller must hold wl->mutex */ -+static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb) -+{ -+ struct ieee80211_tx_info *info; -+ int ret = 0; -+ u8 idx; -+ -+ info = IEEE80211_SKB_CB(skb); -+ -+ if (info->control.hw_key) { -+ idx = info->control.hw_key->hw_key_idx; -+ if (unlikely(wl->default_key != idx)) { -+ ret = wl1251_acx_default_key(wl, idx); -+ if (ret < 0) -+ return ret; -+ } -+ } -+ -+ ret = wl1251_tx_path_status(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_tx_fill_hdr(wl, skb, info); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1251_tx_send_packet(wl, skb, info); -+ if (ret < 0) -+ return ret; -+ -+ wl1251_tx_trigger(wl); -+ -+ return ret; -+} -+ -+void wl1251_tx_work(struct work_struct *work) -+{ -+ struct wl1251 *wl = container_of(work, struct wl1251, tx_work); -+ struct sk_buff *skb; -+ bool woken_up = false; -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ if (unlikely(wl->state == WL1251_STATE_OFF)) -+ goto out; -+ -+ while ((skb = skb_dequeue(&wl->tx_queue))) { -+ if (!woken_up) { -+ ret = wl1251_ps_elp_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ woken_up = true; -+ } -+ -+ ret = wl1251_tx_frame(wl, skb); -+ if (ret == -EBUSY) { -+ /* firmware buffer is full, stop queues */ -+ wl1251_debug(DEBUG_TX, "tx_work: fw buffer full, " -+ "stop queues"); -+ ieee80211_stop_queues(wl->hw); -+ wl->tx_queue_stopped = true; -+ skb_queue_head(&wl->tx_queue, skb); -+ goto out; -+ } else if (ret < 0) { -+ dev_kfree_skb(skb); -+ goto out; -+ } -+ } -+ -+out: -+ if (woken_up) -+ wl1251_ps_elp_sleep(wl); -+ -+ mutex_unlock(&wl->mutex); -+} -+ -+static const char *wl1251_tx_parse_status(u8 status) -+{ -+ /* 8 bit status field, one character per bit plus null */ -+ static char buf[9]; -+ int i = 0; -+ -+ memset(buf, 0, sizeof(buf)); -+ -+ if (status & TX_DMA_ERROR) -+ buf[i++] = 'm'; -+ if (status & TX_DISABLED) -+ buf[i++] = 'd'; -+ if (status & TX_RETRY_EXCEEDED) -+ buf[i++] = 'r'; -+ if (status & TX_TIMEOUT) -+ buf[i++] = 't'; -+ if (status & TX_KEY_NOT_FOUND) -+ buf[i++] = 'k'; -+ if (status & TX_ENCRYPT_FAIL) -+ buf[i++] = 'e'; -+ if (status & TX_UNAVAILABLE_PRIORITY) -+ buf[i++] = 'p'; -+ -+ /* bit 0 is unused apparently */ -+ -+ return buf; -+} -+ -+static void wl1251_tx_packet_cb(struct wl1251 *wl, -+ struct tx_result *result) -+{ -+ struct ieee80211_tx_info *info; -+ struct sk_buff *skb; -+ int hdrlen, ret; -+ u8 *frame; -+ -+ skb = wl->tx_frames[result->id]; -+ if (skb == NULL) { -+ wl1251_error("SKB for packet %d is NULL", result->id); -+ return; -+ } -+ -+ info = IEEE80211_SKB_CB(skb); -+ -+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { -+ if (result->status == TX_SUCCESS) -+ info->flags |= IEEE80211_TX_STAT_ACK; -+ if (result->status & TX_RETRY_EXCEEDED) { -+ info->status.excessive_retries = 1; -+ wl->stats.excessive_retries++; -+ } -+ } -+ -+ info->status.retry_count = result->ack_failures; -+ wl->stats.retry_count += result->ack_failures; -+ -+ /* -+ * We have to remove our private TX header before pushing -+ * the skb back to mac80211. -+ */ -+ frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc)); -+ if (info->control.hw_key && -+ info->control.hw_key->alg == ALG_TKIP) { -+ hdrlen = ieee80211_get_hdrlen_from_skb(skb); -+ memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen); -+ skb_pull(skb, WL1251_TKIP_IV_SPACE); -+ } -+ -+ wl1251_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" -+ " status 0x%x (%s)", -+ result->id, skb, result->ack_failures, result->rate, -+ result->status, wl1251_tx_parse_status(result->status)); -+ -+ -+ ieee80211_tx_status(wl->hw, skb); -+ -+ wl->tx_frames[result->id] = NULL; -+ -+ if (wl->tx_queue_stopped) { -+ wl1251_debug(DEBUG_TX, "cb: queue was stopped"); -+ -+ skb = skb_dequeue(&wl->tx_queue); -+ -+ /* The skb can be NULL because tx_work might have been -+ scheduled before the queue was stopped making the -+ queue empty */ -+ -+ if (skb) { -+ ret = wl1251_tx_frame(wl, skb); -+ if (ret == -EBUSY) { -+ /* firmware buffer is still full */ -+ wl1251_debug(DEBUG_TX, "cb: fw buffer " -+ "still full"); -+ skb_queue_head(&wl->tx_queue, skb); -+ return; -+ } else if (ret < 0) { -+ dev_kfree_skb(skb); -+ return; -+ } -+ } -+ -+ wl1251_debug(DEBUG_TX, "cb: waking queues"); -+ ieee80211_wake_queues(wl->hw); -+ wl->tx_queue_stopped = false; -+ } -+} -+ -+/* Called upon reception of a TX complete interrupt */ -+void wl1251_tx_complete(struct wl1251 *wl) -+{ -+ int i, result_index, num_complete = 0; -+ struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; -+ -+ if (unlikely(wl->state != WL1251_STATE_ON)) -+ return; -+ -+ /* First we read the result */ -+ wl1251_spi_mem_read(wl, wl->data_path->tx_complete_addr, -+ result, sizeof(result)); -+ -+ result_index = wl->next_tx_complete; -+ -+ for (i = 0; i < ARRAY_SIZE(result); i++) { -+ result_ptr = &result[result_index]; -+ -+ if (result_ptr->done_1 == 1 && -+ result_ptr->done_2 == 1) { -+ wl1251_tx_packet_cb(wl, result_ptr); -+ -+ result_ptr->done_1 = 0; -+ result_ptr->done_2 = 0; -+ -+ result_index = (result_index + 1) & -+ (FW_TX_CMPLT_BLOCK_SIZE - 1); -+ num_complete++; -+ } else { -+ break; -+ } -+ } -+ -+ /* Every completed frame needs to be acknowledged */ -+ if (num_complete) { -+ /* -+ * If we've wrapped, we have to clear -+ * the results in 2 steps. -+ */ -+ if (result_index > wl->next_tx_complete) { -+ /* Only 1 write is needed */ -+ wl1251_spi_mem_write(wl, -+ wl->data_path->tx_complete_addr + -+ (wl->next_tx_complete * -+ sizeof(struct tx_result)), -+ &result[wl->next_tx_complete], -+ num_complete * -+ sizeof(struct tx_result)); -+ -+ -+ } else if (result_index < wl->next_tx_complete) { -+ /* 2 writes are needed */ -+ wl1251_spi_mem_write(wl, -+ wl->data_path->tx_complete_addr + -+ (wl->next_tx_complete * -+ sizeof(struct tx_result)), -+ &result[wl->next_tx_complete], -+ (FW_TX_CMPLT_BLOCK_SIZE - -+ wl->next_tx_complete) * -+ sizeof(struct tx_result)); -+ -+ wl1251_spi_mem_write(wl, -+ wl->data_path->tx_complete_addr, -+ result, -+ (num_complete - -+ FW_TX_CMPLT_BLOCK_SIZE + -+ wl->next_tx_complete) * -+ sizeof(struct tx_result)); -+ -+ } else { -+ /* We have to write the whole array */ -+ wl1251_spi_mem_write(wl, -+ wl->data_path->tx_complete_addr, -+ result, -+ FW_TX_CMPLT_BLOCK_SIZE * -+ sizeof(struct tx_result)); -+ } -+ -+ } -+ -+ wl->next_tx_complete = result_index; -+} -+ -+/* caller must hold wl->mutex */ -+void wl1251_tx_flush(struct wl1251 *wl) -+{ -+ int i; -+ struct sk_buff *skb; -+ struct ieee80211_tx_info *info; -+ -+ /* TX failure */ -+/* control->flags = 0; FIXME */ -+ -+ while ((skb = skb_dequeue(&wl->tx_queue))) { -+ info = IEEE80211_SKB_CB(skb); -+ -+ wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb); -+ -+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) -+ continue; -+ -+ ieee80211_tx_status(wl->hw, skb); -+ } -+ -+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) -+ if (wl->tx_frames[i] != NULL) { -+ skb = wl->tx_frames[i]; -+ info = IEEE80211_SKB_CB(skb); -+ -+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) -+ continue; -+ -+ ieee80211_tx_status(wl->hw, skb); -+ wl->tx_frames[i] = NULL; -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_tx.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_tx.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1251_tx.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1251_tx.h 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,216 @@ -+/* -+ * This file is part of wl1251 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1251_TX_H__ -+#define __WL1251_TX_H__ -+ -+#include -+ -+/* -+ * -+ * TX PATH -+ * -+ * The Tx path uses a double buffer and a tx_control structure, each located -+ * at a fixed address in the device's memory. On startup, the host retrieves -+ * the pointers to these addresses. A double buffer allows for continuous data -+ * flow towards the device. The host keeps track of which buffer is available -+ * and alternates between these two buffers on a per packet basis. -+ * -+ * The size of each of the two buffers is large enough to hold the longest -+ * 802.3 packet - maximum size Ethernet packet + header + descriptor. -+ * TX complete indication will be received a-synchronously in a TX done cyclic -+ * buffer which is composed of 16 tx_result descriptors structures and is used -+ * in a cyclic manner. -+ * -+ * The TX (HOST) procedure is as follows: -+ * 1. Read the Tx path status, that will give the data_out_count. -+ * 2. goto 1, if not possible. -+ * i.e. if data_in_count - data_out_count >= HwBuffer size (2 for double -+ * buffer). -+ * 3. Copy the packet (preceded by double_buffer_desc), if possible. -+ * i.e. if data_in_count - data_out_count < HwBuffer size (2 for double -+ * buffer). -+ * 4. increment data_in_count. -+ * 5. Inform the firmware by generating a firmware internal interrupt. -+ * 6. FW will increment data_out_count after it reads the buffer. -+ * -+ * The TX Complete procedure: -+ * 1. To get a TX complete indication the host enables the tx_complete flag in -+ * the TX descriptor Structure. -+ * 2. For each packet with a Tx Complete field set, the firmware adds the -+ * transmit results to the cyclic buffer (txDoneRing) and sets both done_1 -+ * and done_2 to 1 to indicate driver ownership. -+ * 3. The firmware sends a Tx Complete interrupt to the host to trigger the -+ * host to process the new data. Note: interrupt will be send per packet if -+ * TX complete indication was requested in tx_control or per crossing -+ * aggregation threshold. -+ * 4. After receiving the Tx Complete interrupt, the host reads the -+ * TxDescriptorDone information in a cyclic manner and clears both done_1 -+ * and done_2 fields. -+ * -+ */ -+ -+#define TX_COMPLETE_REQUIRED_BIT 0x80 -+#define TX_STATUS_DATA_OUT_COUNT_MASK 0xf -+ -+#define WL1251_TX_ALIGN_TO 4 -+#define WL1251_TX_ALIGN(len) (((len) + WL1251_TX_ALIGN_TO - 1) & \ -+ ~(WL1251_TX_ALIGN_TO - 1)) -+#define WL1251_TKIP_IV_SPACE 4 -+ -+struct tx_control { -+ /* Rate Policy (class) index */ -+ unsigned rate_policy:3; -+ -+ /* When set, no ack policy is expected */ -+ unsigned ack_policy:1; -+ -+ /* -+ * Packet type: -+ * 0 -> 802.11 -+ * 1 -> 802.3 -+ * 2 -> IP -+ * 3 -> raw codec -+ */ -+ unsigned packet_type:2; -+ -+ /* If set, this is a QoS-Null or QoS-Data frame */ -+ unsigned qos:1; -+ -+ /* -+ * If set, the target triggers the tx complete INT -+ * upon frame sending completion. -+ */ -+ unsigned tx_complete:1; -+ -+ /* 2 bytes padding before packet header */ -+ unsigned xfer_pad:1; -+ -+ unsigned reserved:7; -+} __attribute__ ((packed)); -+ -+ -+struct tx_double_buffer_desc { -+ /* Length of payload, including headers. */ -+ u16 length; -+ -+ /* -+ * A bit mask that specifies the initial rate to be used -+ * Possible values are: -+ * 0x0001 - 1Mbits -+ * 0x0002 - 2Mbits -+ * 0x0004 - 5.5Mbits -+ * 0x0008 - 6Mbits -+ * 0x0010 - 9Mbits -+ * 0x0020 - 11Mbits -+ * 0x0040 - 12Mbits -+ * 0x0080 - 18Mbits -+ * 0x0100 - 22Mbits -+ * 0x0200 - 24Mbits -+ * 0x0400 - 36Mbits -+ * 0x0800 - 48Mbits -+ * 0x1000 - 54Mbits -+ */ -+ u16 rate; -+ -+ /* Time in us that a packet can spend in the target */ -+ u32 expiry_time; -+ -+ /* index of the TX queue used for this packet */ -+ u8 xmit_queue; -+ -+ /* Used to identify a packet */ -+ u8 id; -+ -+ struct tx_control control; -+ -+ /* -+ * The FW should cut the packet into fragments -+ * of this size. -+ */ -+ u16 frag_threshold; -+ -+ /* Numbers of HW queue blocks to be allocated */ -+ u8 num_mem_blocks; -+ -+ u8 reserved; -+} __attribute__ ((packed)); -+ -+enum { -+ TX_SUCCESS = 0, -+ TX_DMA_ERROR = BIT(7), -+ TX_DISABLED = BIT(6), -+ TX_RETRY_EXCEEDED = BIT(5), -+ TX_TIMEOUT = BIT(4), -+ TX_KEY_NOT_FOUND = BIT(3), -+ TX_ENCRYPT_FAIL = BIT(2), -+ TX_UNAVAILABLE_PRIORITY = BIT(1), -+}; -+ -+struct tx_result { -+ /* -+ * Ownership synchronization between the host and -+ * the firmware. If done_1 and done_2 are cleared, -+ * owned by the FW (no info ready). -+ */ -+ u8 done_1; -+ -+ /* same as double_buffer_desc->id */ -+ u8 id; -+ -+ /* -+ * Total air access duration consumed by this -+ * packet, including all retries and overheads. -+ */ -+ u16 medium_usage; -+ -+ /* Total media delay (from 1st EDCA AIFS counter until TX Complete). */ -+ u32 medium_delay; -+ -+ /* Time between host xfer and tx complete */ -+ u32 fw_hnadling_time; -+ -+ /* The LS-byte of the last TKIP sequence number. */ -+ u8 lsb_seq_num; -+ -+ /* Retry count */ -+ u8 ack_failures; -+ -+ /* At which rate we got a ACK */ -+ u16 rate; -+ -+ u16 reserved; -+ -+ /* TX_* */ -+ u8 status; -+ -+ /* See done_1 */ -+ u8 done_2; -+} __attribute__ ((packed)); -+ -+void wl1251_tx_work(struct work_struct *work); -+void wl1251_tx_complete(struct wl1251 *wl); -+void wl1251_tx_flush(struct wl1251 *wl); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_acx.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_acx.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_acx.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_acx.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,938 @@ -+#include "wl1271_acx.h" -+ -+#include -+#include -+#include -+#include -+ -+#include "wl1271.h" -+#include "wl12xx_80211.h" -+#include "wl1271_reg.h" -+#include "wl1271_spi.h" -+#include "wl1271_ps.h" -+ -+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, -+ u8 listen_interval) -+{ -+ struct acx_wake_up_condition *wake_up; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx wake up conditions"); -+ -+ wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL); -+ if (!wake_up) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ wake_up->wake_up_event = wake_up_event; -+ wake_up->listen_interval = listen_interval; -+ -+ ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, -+ wake_up, sizeof(*wake_up)); -+ if (ret < 0) { -+ wl1271_warning("could not set wake up conditions: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(wake_up); -+ return ret; -+} -+ -+int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) -+{ -+ struct acx_sleep_auth *auth; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx sleep auth"); -+ -+ auth = kzalloc(sizeof(*auth), GFP_KERNEL); -+ if (!auth) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ auth->sleep_auth = sleep_auth; -+ -+ ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); -+ if (ret < 0) -+ return ret; -+ -+out: -+ kfree(auth); -+ return ret; -+} -+ -+int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len) -+{ -+ struct acx_revision *rev; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx fw rev"); -+ -+ rev = kzalloc(sizeof(*rev), GFP_KERNEL); -+ if (!rev) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ret = wl1271_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev)); -+ if (ret < 0) { -+ wl1271_warning("ACX_FW_REV interrogate failed"); -+ goto out; -+ } -+ -+ /* be careful with the buffer sizes */ -+ strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version))); -+ -+ /* -+ * if the firmware version string is exactly -+ * sizeof(rev->fw_version) long or fw_len is less than -+ * sizeof(rev->fw_version) it won't be null terminated -+ */ -+ buf[min(len, sizeof(rev->fw_version)) - 1] = '\0'; -+ -+out: -+ kfree(rev); -+ return ret; -+} -+ -+int wl1271_acx_tx_power(struct wl1271 *wl, int power) -+{ -+ struct acx_current_tx_power *acx; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr"); -+ -+ if (power < 0 || power > 25) -+ return -EINVAL; -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->current_tx_power = power * 10; -+ -+ ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("configure of tx power failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_feature_cfg(struct wl1271 *wl) -+{ -+ struct acx_feature_config *feature; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx feature cfg"); -+ -+ feature = kzalloc(sizeof(*feature), GFP_KERNEL); -+ if (!feature) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ -+ feature->data_flow_options = 0; -+ feature->options = 0; -+ -+ ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG, -+ feature, sizeof(*feature)); -+ if (ret < 0) { -+ wl1271_error("Couldnt set HW encryption"); -+ goto out; -+ } -+ -+out: -+ kfree(feature); -+ return ret; -+} -+ -+int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map, -+ size_t len) -+{ -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx mem map"); -+ -+ ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time) -+{ -+ struct acx_rx_msdu_lifetime *acx; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx rx msdu life time"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->lifetime = life_time; -+ ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, -+ acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("failed to set rx msdu life time: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter) -+{ -+ struct acx_rx_config *rx_config; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx rx config"); -+ -+ rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL); -+ if (!rx_config) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ rx_config->config_options = config; -+ rx_config->filter_options = filter; -+ -+ ret = wl1271_cmd_configure(wl, ACX_RX_CFG, -+ rx_config, sizeof(*rx_config)); -+ if (ret < 0) { -+ wl1271_warning("failed to set rx config: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(rx_config); -+ return ret; -+} -+ -+int wl1271_acx_pd_threshold(struct wl1271 *wl) -+{ -+ struct acx_packet_detection *pd; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx data pd threshold"); -+ -+ pd = kzalloc(sizeof(*pd), GFP_KERNEL); -+ if (!pd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* FIXME: threshold value not set */ -+ -+ ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); -+ if (ret < 0) { -+ wl1271_warning("failed to set pd threshold: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(pd); -+ return 0; -+} -+ -+int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time) -+{ -+ struct acx_slot *slot; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx slot"); -+ -+ slot = kzalloc(sizeof(*slot), GFP_KERNEL); -+ if (!slot) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ slot->wone_index = STATION_WONE_INDEX; -+ slot->slot_time = slot_time; -+ -+ ret = wl1271_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot)); -+ if (ret < 0) { -+ wl1271_warning("failed to set slot time: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(slot); -+ return ret; -+} -+ -+int wl1271_acx_group_address_tbl(struct wl1271 *wl) -+{ -+ struct acx_dot11_grp_addr_tbl *acx; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx group address tbl"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* MAC filtering */ -+ acx->enabled = 0; -+ acx->num_groups = 0; -+ memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN); -+ -+ ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, -+ acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("failed to set group addr table: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_service_period_timeout(struct wl1271 *wl) -+{ -+ struct acx_rx_timeout *rx_timeout; -+ int ret; -+ -+ rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL); -+ if (!rx_timeout) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ wl1271_debug(DEBUG_ACX, "acx service period timeout"); -+ -+ rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF; -+ rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF; -+ -+ ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, -+ rx_timeout, sizeof(*rx_timeout)); -+ if (ret < 0) { -+ wl1271_warning("failed to set service period timeout: %d", -+ ret); -+ goto out; -+ } -+ -+out: -+ kfree(rx_timeout); -+ return ret; -+} -+ -+int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold) -+{ -+ struct acx_rts_threshold *rts; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx rts threshold"); -+ -+ rts = kzalloc(sizeof(*rts), GFP_KERNEL); -+ if (!rts) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ rts->threshold = rts_threshold; -+ -+ ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); -+ if (ret < 0) { -+ wl1271_warning("failed to set rts threshold: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(rts); -+ return ret; -+} -+ -+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl) -+{ -+ struct acx_beacon_filter_option *beacon_filter; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx beacon filter opt"); -+ -+ beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); -+ if (!beacon_filter) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ beacon_filter->enable = 0; -+ beacon_filter->max_num_beacons = 0; -+ -+ ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT, -+ beacon_filter, sizeof(*beacon_filter)); -+ if (ret < 0) { -+ wl1271_warning("failed to set beacon filter opt: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(beacon_filter); -+ return ret; -+} -+ -+int wl1271_acx_beacon_filter_table(struct wl1271 *wl) -+{ -+ struct acx_beacon_filter_ie_table *ie_table; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx beacon filter table"); -+ -+ ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL); -+ if (!ie_table) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ie_table->num_ie = 0; -+ memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE); -+ -+ ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, -+ ie_table, sizeof(*ie_table)); -+ if (ret < 0) { -+ wl1271_warning("failed to set beacon filter table: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(ie_table); -+ return ret; -+} -+ -+int wl1271_acx_sg_enable(struct wl1271 *wl) -+{ -+ struct acx_bt_wlan_coex *pta; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx sg enable"); -+ -+ pta = kzalloc(sizeof(*pta), GFP_KERNEL); -+ if (!pta) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ pta->enable = SG_ENABLE; -+ -+ ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); -+ if (ret < 0) { -+ wl1271_warning("failed to set softgemini enable: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(pta); -+ return ret; -+} -+ -+int wl1271_acx_sg_cfg(struct wl1271 *wl) -+{ -+ struct acx_bt_wlan_coex_param *param; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx sg cfg"); -+ -+ param = kzalloc(sizeof(*param), GFP_KERNEL); -+ if (!param) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* BT-WLAN coext parameters */ -+ param->min_rate = RATE_INDEX_24MBPS; -+ param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF; -+ param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF; -+ param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF; -+ param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF; -+ param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF; -+ param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF; -+ param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF; -+ param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF; -+ param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF; -+ param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF; -+ param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF; -+ param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF; -+ param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF; -+ param->antenna_type = PTA_ANTENNA_TYPE_DEF; -+ param->signal_type = PTA_SIGNALING_TYPE_DEF; -+ param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF; -+ param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF; -+ param->max_cts = PTA_MAX_NUM_CTS_DEF; -+ param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF; -+ param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF; -+ param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF; -+ param->wlan_elp_hp = PTA_ELP_HP_DEF; -+ param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF; -+ param->ack_mode_dual_ant = PTA_ACK_MODE_DEF; -+ param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF; -+ param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF; -+ param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF; -+ -+ ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); -+ if (ret < 0) { -+ wl1271_warning("failed to set sg config: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(param); -+ return ret; -+} -+ -+int wl1271_acx_cca_threshold(struct wl1271 *wl) -+{ -+ struct acx_energy_detection *detection; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx cca threshold"); -+ -+ detection = kzalloc(sizeof(*detection), GFP_KERNEL); -+ if (!detection) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D; -+ detection->tx_energy_detection = 0; -+ -+ ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, -+ detection, sizeof(*detection)); -+ if (ret < 0) { -+ wl1271_warning("failed to set cca threshold: %d", ret); -+ return ret; -+ } -+ -+out: -+ kfree(detection); -+ return ret; -+} -+ -+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl) -+{ -+ struct acx_beacon_broadcast *bb; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx bcn dtim options"); -+ -+ bb = kzalloc(sizeof(*bb), GFP_KERNEL); -+ if (!bb) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE; -+ bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE; -+ bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE; -+ bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF; -+ -+ ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); -+ if (ret < 0) { -+ wl1271_warning("failed to set rx config: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(bb); -+ return ret; -+} -+ -+int wl1271_acx_aid(struct wl1271 *wl, u16 aid) -+{ -+ struct acx_aid *acx_aid; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx aid"); -+ -+ acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL); -+ if (!acx_aid) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx_aid->aid = aid; -+ -+ ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); -+ if (ret < 0) { -+ wl1271_warning("failed to set aid: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx_aid); -+ return ret; -+} -+ -+int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask) -+{ -+ struct acx_event_mask *mask; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx event mbox mask"); -+ -+ mask = kzalloc(sizeof(*mask), GFP_KERNEL); -+ if (!mask) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* high event mask is unused */ -+ mask->high_event_mask = 0xffffffff; -+ -+ mask->event_mask = event_mask; -+ -+ ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK, -+ mask, sizeof(*mask)); -+ if (ret < 0) { -+ wl1271_warning("failed to set acx_event_mbox_mask: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(mask); -+ return ret; -+} -+ -+int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble) -+{ -+ struct acx_preamble *acx; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx_set_preamble"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->preamble = preamble; -+ -+ ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("Setting of preamble failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_cts_protect(struct wl1271 *wl, -+ enum acx_ctsprotect_type ctsprotect) -+{ -+ struct acx_ctsprotect *acx; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx_set_ctsprotect"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->ctsprotect = ctsprotect; -+ -+ ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("Setting of ctsprotect failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) -+{ -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "acx statistics"); -+ -+ ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats, -+ sizeof(*stats)); -+ if (ret < 0) { -+ wl1271_warning("acx statistics failed: %d", ret); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+int wl1271_acx_rate_policies(struct wl1271 *wl) -+{ -+ struct acx_rate_policy *acx; -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_ACX, "acx rate policies"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* configure one default (one-size-fits-all) rate class */ -+ acx->rate_class_cnt = 1; -+ acx->rate_class[0].enabled_rates = ACX_RATE_MASK_ALL; -+ acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT; -+ acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT; -+ acx->rate_class[0].aflags = 0; -+ -+ ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("Setting of rate policies failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_ac_cfg(struct wl1271 *wl) -+{ -+ struct acx_ac_cfg *acx; -+ int i, ret = 0; -+ -+ wl1271_debug(DEBUG_ACX, "acx access category config"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* -+ * FIXME: Configure each AC with appropriate values (most suitable -+ * values will probably be different for each AC. -+ */ -+ for (i = 0; i < WL1271_ACX_AC_COUNT; i++) { -+ acx->ac = i; -+ -+ /* -+ * FIXME: The following default values originate from -+ * the TI reference driver. What do they mean? -+ */ -+ acx->cw_min = 15; -+ acx->cw_max = 63; -+ acx->aifsn = 3; -+ acx->reserved = 0; -+ acx->tx_op_limit = 0; -+ -+ ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("Setting of access category " -+ "config: %d", ret); -+ goto out; -+ } -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_tid_cfg(struct wl1271 *wl) -+{ -+ struct acx_tid_config *acx; -+ int i, ret = 0; -+ -+ wl1271_debug(DEBUG_ACX, "acx tid config"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* FIXME: configure each TID with a different AC reference */ -+ for (i = 0; i < WL1271_ACX_TID_COUNT; i++) { -+ acx->queue_id = i; -+ acx->tsid = WL1271_ACX_AC_BE; -+ acx->ps_scheme = WL1271_ACX_PS_SCHEME_LEGACY; -+ acx->ack_policy = WL1271_ACX_ACK_POLICY_LEGACY; -+ -+ ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("Setting of tid config failed: %d", ret); -+ goto out; -+ } -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_frag_threshold(struct wl1271 *wl) -+{ -+ struct acx_frag_threshold *acx; -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_ACX, "acx frag threshold"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; -+ ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("Setting of frag threshold failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_tx_config_options(struct wl1271 *wl) -+{ -+ struct acx_tx_config_options *acx; -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_ACX, "acx tx config options"); -+ -+ acx = kzalloc(sizeof(*acx), GFP_KERNEL); -+ -+ if (!acx) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ acx->tx_compl_timeout = WL1271_ACX_TX_COMPL_TIMEOUT; -+ acx->tx_compl_threshold = WL1271_ACX_TX_COMPL_THRESHOLD; -+ ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_warning("Setting of tx options failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(acx); -+ return ret; -+} -+ -+int wl1271_acx_mem_cfg(struct wl1271 *wl) -+{ -+ struct wl1271_acx_config_memory *mem_conf; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); -+ -+ mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL); -+ if (!mem_conf) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* memory config */ -+ mem_conf->num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS); -+ mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; -+ mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; -+ mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; -+ mem_conf->total_tx_descriptors = ACX_TX_DESCRIPTORS; -+ -+ ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, -+ sizeof(*mem_conf)); -+ if (ret < 0) { -+ wl1271_warning("wl1271 mem config failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(mem_conf); -+ return ret; -+} -+ -+int wl1271_acx_init_mem_config(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_mem_cfg(wl); -+ if (ret < 0) -+ return ret; -+ -+ wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), -+ GFP_KERNEL); -+ if (!wl->target_mem_map) { -+ wl1271_error("couldn't allocate target memory map"); -+ return -ENOMEM; -+ } -+ -+ /* we now ask for the firmware built memory map */ -+ ret = wl1271_acx_mem_map(wl, (void *)wl->target_mem_map, -+ sizeof(struct wl1271_acx_mem_map)); -+ if (ret < 0) { -+ wl1271_error("couldn't retrieve firmware memory map"); -+ kfree(wl->target_mem_map); -+ wl->target_mem_map = NULL; -+ return ret; -+ } -+ -+ /* initialize TX block book keeping */ -+ wl->tx_blocks_available = wl->target_mem_map->num_tx_mem_blocks; -+ wl1271_debug(DEBUG_TX, "available tx blocks: %d", -+ wl->tx_blocks_available); -+ -+ return 0; -+} -+ -+int wl1271_acx_init_rx_interrupt(struct wl1271 *wl) -+{ -+ struct wl1271_acx_rx_config_opt *rx_conf; -+ int ret; -+ -+ wl1271_debug(DEBUG_ACX, "wl1271 rx interrupt config"); -+ -+ rx_conf = kzalloc(sizeof(*rx_conf), GFP_KERNEL); -+ if (!rx_conf) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ rx_conf->threshold = WL1271_RX_INTR_THRESHOLD_DEF; -+ rx_conf->timeout = WL1271_RX_INTR_TIMEOUT_DEF; -+ rx_conf->mblk_threshold = USHORT_MAX; /* Disabled */ -+ rx_conf->queue_type = RX_QUEUE_TYPE_RX_LOW_PRIORITY; -+ -+ ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf, -+ sizeof(*rx_conf)); -+ if (ret < 0) { -+ wl1271_warning("wl1271 rx config opt failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(rx_conf); -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_acx.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_acx.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_acx.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_acx.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,1221 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_ACX_H__ -+#define __WL1271_ACX_H__ -+ -+#include "wl1271.h" -+#include "wl1271_cmd.h" -+ -+/************************************************************************* -+ -+ Host Interrupt Register (WiLink -> Host) -+ -+**************************************************************************/ -+/* HW Initiated interrupt Watchdog timer expiration */ -+#define WL1271_ACX_INTR_WATCHDOG BIT(0) -+/* Init sequence is done (masked interrupt, detection through polling only ) */ -+#define WL1271_ACX_INTR_INIT_COMPLETE BIT(1) -+/* Event was entered to Event MBOX #A*/ -+#define WL1271_ACX_INTR_EVENT_A BIT(2) -+/* Event was entered to Event MBOX #B*/ -+#define WL1271_ACX_INTR_EVENT_B BIT(3) -+/* Command processing completion*/ -+#define WL1271_ACX_INTR_CMD_COMPLETE BIT(4) -+/* Signaling the host on HW wakeup */ -+#define WL1271_ACX_INTR_HW_AVAILABLE BIT(5) -+/* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */ -+#define WL1271_ACX_INTR_DATA BIT(6) -+/* Trace meassge on MBOX #A */ -+#define WL1271_ACX_INTR_TRACE_A BIT(7) -+/* Trace meassge on MBOX #B */ -+#define WL1271_ACX_INTR_TRACE_B BIT(8) -+ -+#define WL1271_ACX_INTR_ALL 0xFFFFFFFF -+#define WL1271_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \ -+ WL1271_ACX_INTR_INIT_COMPLETE | \ -+ WL1271_ACX_INTR_EVENT_A | \ -+ WL1271_ACX_INTR_EVENT_B | \ -+ WL1271_ACX_INTR_CMD_COMPLETE | \ -+ WL1271_ACX_INTR_HW_AVAILABLE | \ -+ WL1271_ACX_INTR_DATA) -+ -+#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \ -+ WL1271_ACX_INTR_EVENT_B | \ -+ WL1271_ACX_INTR_DATA) -+ -+/* Target's information element */ -+struct acx_header { -+ struct wl1271_cmd_header cmd; -+ -+ /* acx (or information element) header */ -+ u16 id; -+ -+ /* payload length (not including headers */ -+ u16 len; -+}; -+ -+struct acx_error_counter { -+ struct acx_header header; -+ -+ /* The number of PLCP errors since the last time this */ -+ /* information element was interrogated. This field is */ -+ /* automatically cleared when it is interrogated.*/ -+ u32 PLCP_error; -+ -+ /* The number of FCS errors since the last time this */ -+ /* information element was interrogated. This field is */ -+ /* automatically cleared when it is interrogated.*/ -+ u32 FCS_error; -+ -+ /* The number of MPDUs without PLCP header errors received*/ -+ /* since the last time this information element was interrogated. */ -+ /* This field is automatically cleared when it is interrogated.*/ -+ u32 valid_frame; -+ -+ /* the number of missed sequence numbers in the squentially */ -+ /* values of frames seq numbers */ -+ u32 seq_num_miss; -+} __attribute__ ((packed)); -+ -+struct acx_revision { -+ struct acx_header header; -+ -+ /* -+ * The WiLink firmware version, an ASCII string x.x.x.x, -+ * that uniquely identifies the current firmware. -+ * The left most digit is incremented each time a -+ * significant change is made to the firmware, such as -+ * code redesign or new platform support. -+ * The second digit is incremented when major enhancements -+ * are added or major fixes are made. -+ * The third digit is incremented for each GA release. -+ * The fourth digit is incremented for each build. -+ * The first two digits identify a firmware release version, -+ * in other words, a unique set of features. -+ * The first three digits identify a GA release. -+ */ -+ char fw_version[20]; -+ -+ /* -+ * This 4 byte field specifies the WiLink hardware version. -+ * bits 0 - 15: Reserved. -+ * bits 16 - 23: Version ID - The WiLink version ID -+ * (1 = first spin, 2 = second spin, and so on). -+ * bits 24 - 31: Chip ID - The WiLink chip ID. -+ */ -+ u32 hw_version; -+} __attribute__ ((packed)); -+ -+enum wl1271_psm_mode { -+ /* Active mode */ -+ WL1271_PSM_CAM = 0, -+ -+ /* Power save mode */ -+ WL1271_PSM_PS = 1, -+ -+ /* Extreme low power */ -+ WL1271_PSM_ELP = 2, -+}; -+ -+struct acx_sleep_auth { -+ struct acx_header header; -+ -+ /* The sleep level authorization of the device. */ -+ /* 0 - Always active*/ -+ /* 1 - Power down mode: light / fast sleep*/ -+ /* 2 - ELP mode: Deep / Max sleep*/ -+ u8 sleep_auth; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+enum { -+ HOSTIF_PCI_MASTER_HOST_INDIRECT, -+ HOSTIF_PCI_MASTER_HOST_DIRECT, -+ HOSTIF_SLAVE, -+ HOSTIF_PKT_RING, -+ HOSTIF_DONTCARE = 0xFF -+}; -+ -+#define DEFAULT_UCAST_PRIORITY 0 -+#define DEFAULT_RX_Q_PRIORITY 0 -+#define DEFAULT_NUM_STATIONS 1 -+#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ -+#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ -+#define TRACE_BUFFER_MAX_SIZE 256 -+ -+#define DP_RX_PACKET_RING_CHUNK_SIZE 1600 -+#define DP_TX_PACKET_RING_CHUNK_SIZE 1600 -+#define DP_RX_PACKET_RING_CHUNK_NUM 2 -+#define DP_TX_PACKET_RING_CHUNK_NUM 2 -+#define DP_TX_COMPLETE_TIME_OUT 20 -+#define FW_TX_CMPLT_BLOCK_SIZE 16 -+ -+#define TX_MSDU_LIFETIME_MIN 0 -+#define TX_MSDU_LIFETIME_MAX 3000 -+#define TX_MSDU_LIFETIME_DEF 512 -+#define RX_MSDU_LIFETIME_MIN 0 -+#define RX_MSDU_LIFETIME_MAX 0xFFFFFFFF -+#define RX_MSDU_LIFETIME_DEF 512000 -+ -+struct acx_rx_msdu_lifetime { -+ struct acx_header header; -+ -+ /* -+ * The maximum amount of time, in TU, before the -+ * firmware discards the MSDU. -+ */ -+ u32 lifetime; -+} __attribute__ ((packed)); -+ -+/* -+ * RX Config Options Table -+ * Bit Definition -+ * === ========== -+ * 31:14 Reserved -+ * 13 Copy RX Status - when set, write three receive status words -+ * to top of rx'd MPDUs. -+ * When cleared, do not write three status words (added rev 1.5) -+ * 12 Reserved -+ * 11 RX Complete upon FCS error - when set, give rx complete -+ * interrupt for FCS errors, after the rx filtering, e.g. unicast -+ * frames not to us with FCS error will not generate an interrupt. -+ * 10 SSID Filter Enable - When set, the WiLink discards all beacon, -+ * probe request, and probe response frames with an SSID that does -+ * not match the SSID specified by the host in the START/JOIN -+ * command. -+ * When clear, the WiLink receives frames with any SSID. -+ * 9 Broadcast Filter Enable - When set, the WiLink discards all -+ * broadcast frames. When clear, the WiLink receives all received -+ * broadcast frames. -+ * 8:6 Reserved -+ * 5 BSSID Filter Enable - When set, the WiLink discards any frames -+ * with a BSSID that does not match the BSSID specified by the -+ * host. -+ * When clear, the WiLink receives frames from any BSSID. -+ * 4 MAC Addr Filter - When set, the WiLink discards any frames -+ * with a destination address that does not match the MAC address -+ * of the adaptor. -+ * When clear, the WiLink receives frames destined to any MAC -+ * address. -+ * 3 Promiscuous - When set, the WiLink receives all valid frames -+ * (i.e., all frames that pass the FCS check). -+ * When clear, only frames that pass the other filters specified -+ * are received. -+ * 2 FCS - When set, the WiLink includes the FCS with the received -+ * frame. -+ * When cleared, the FCS is discarded. -+ * 1 PLCP header - When set, write all data from baseband to frame -+ * buffer including PHY header. -+ * 0 Reserved - Always equal to 0. -+ * -+ * RX Filter Options Table -+ * Bit Definition -+ * === ========== -+ * 31:12 Reserved - Always equal to 0. -+ * 11 Association - When set, the WiLink receives all association -+ * related frames (association request/response, reassocation -+ * request/response, and disassociation). When clear, these frames -+ * are discarded. -+ * 10 Auth/De auth - When set, the WiLink receives all authentication -+ * and de-authentication frames. When clear, these frames are -+ * discarded. -+ * 9 Beacon - When set, the WiLink receives all beacon frames. -+ * When clear, these frames are discarded. -+ * 8 Contention Free - When set, the WiLink receives all contention -+ * free frames. -+ * When clear, these frames are discarded. -+ * 7 Control - When set, the WiLink receives all control frames. -+ * When clear, these frames are discarded. -+ * 6 Data - When set, the WiLink receives all data frames. -+ * When clear, these frames are discarded. -+ * 5 FCS Error - When set, the WiLink receives frames that have FCS -+ * errors. -+ * When clear, these frames are discarded. -+ * 4 Management - When set, the WiLink receives all management -+ * frames. -+ * When clear, these frames are discarded. -+ * 3 Probe Request - When set, the WiLink receives all probe request -+ * frames. -+ * When clear, these frames are discarded. -+ * 2 Probe Response - When set, the WiLink receives all probe -+ * response frames. -+ * When clear, these frames are discarded. -+ * 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK -+ * frames. -+ * When clear, these frames are discarded. -+ * 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames -+ * that have reserved frame types and sub types as defined by the -+ * 802.11 specification. -+ * When clear, these frames are discarded. -+ */ -+struct acx_rx_config { -+ struct acx_header header; -+ -+ u32 config_options; -+ u32 filter_options; -+} __attribute__ ((packed)); -+ -+struct acx_packet_detection { -+ struct acx_header header; -+ -+ u32 threshold; -+} __attribute__ ((packed)); -+ -+ -+enum acx_slot_type { -+ SLOT_TIME_LONG = 0, -+ SLOT_TIME_SHORT = 1, -+ DEFAULT_SLOT_TIME = SLOT_TIME_SHORT, -+ MAX_SLOT_TIMES = 0xFF -+}; -+ -+#define STATION_WONE_INDEX 0 -+ -+struct acx_slot { -+ struct acx_header header; -+ -+ u8 wone_index; /* Reserved */ -+ u8 slot_time; -+ u8 reserved[6]; -+} __attribute__ ((packed)); -+ -+ -+#define ADDRESS_GROUP_MAX (8) -+#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX) -+ -+struct acx_dot11_grp_addr_tbl { -+ struct acx_header header; -+ -+ u8 enabled; -+ u8 num_groups; -+ u8 pad[2]; -+ u8 mac_table[ADDRESS_GROUP_MAX_LEN]; -+} __attribute__ ((packed)); -+ -+ -+#define RX_TIMEOUT_PS_POLL_MIN 0 -+#define RX_TIMEOUT_PS_POLL_MAX (200000) -+#define RX_TIMEOUT_PS_POLL_DEF (15) -+#define RX_TIMEOUT_UPSD_MIN 0 -+#define RX_TIMEOUT_UPSD_MAX (200000) -+#define RX_TIMEOUT_UPSD_DEF (15) -+ -+struct acx_rx_timeout { -+ struct acx_header header; -+ -+ /* -+ * The longest time the STA will wait to receive -+ * traffic from the AP after a PS-poll has been -+ * transmitted. -+ */ -+ u16 ps_poll_timeout; -+ -+ /* -+ * The longest time the STA will wait to receive -+ * traffic from the AP after a frame has been sent -+ * from an UPSD enabled queue. -+ */ -+ u16 upsd_timeout; -+} __attribute__ ((packed)); -+ -+#define RTS_THRESHOLD_MIN 0 -+#define RTS_THRESHOLD_MAX 4096 -+#define RTS_THRESHOLD_DEF 2347 -+ -+struct acx_rts_threshold { -+ struct acx_header header; -+ -+ u16 threshold; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+struct acx_beacon_filter_option { -+ struct acx_header header; -+ -+ u8 enable; -+ -+ /* -+ * The number of beacons without the unicast TIM -+ * bit set that the firmware buffers before -+ * signaling the host about ready frames. -+ * When set to 0 and the filter is enabled, beacons -+ * without the unicast TIM bit set are dropped. -+ */ -+ u8 max_num_beacons; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+/* -+ * ACXBeaconFilterEntry (not 221) -+ * Byte Offset Size (Bytes) Definition -+ * =========== ============ ========== -+ * 0 1 IE identifier -+ * 1 1 Treatment bit mask -+ * -+ * ACXBeaconFilterEntry (221) -+ * Byte Offset Size (Bytes) Definition -+ * =========== ============ ========== -+ * 0 1 IE identifier -+ * 1 1 Treatment bit mask -+ * 2 3 OUI -+ * 5 1 Type -+ * 6 2 Version -+ * -+ * -+ * Treatment bit mask - The information element handling: -+ * bit 0 - The information element is compared and transferred -+ * in case of change. -+ * bit 1 - The information element is transferred to the host -+ * with each appearance or disappearance. -+ * Note that both bits can be set at the same time. -+ */ -+#define BEACON_FILTER_TABLE_MAX_IE_NUM (32) -+#define BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM (6) -+#define BEACON_FILTER_TABLE_IE_ENTRY_SIZE (2) -+#define BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE (6) -+#define BEACON_FILTER_TABLE_MAX_SIZE ((BEACON_FILTER_TABLE_MAX_IE_NUM * \ -+ BEACON_FILTER_TABLE_IE_ENTRY_SIZE) + \ -+ (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \ -+ BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE)) -+ -+struct acx_beacon_filter_ie_table { -+ struct acx_header header; -+ -+ u8 num_ie; -+ u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; -+ u8 pad[3]; -+} __attribute__ ((packed)); -+ -+enum { -+ SG_ENABLE = 0, -+ SG_DISABLE, -+ SG_SENSE_NO_ACTIVITY, -+ SG_SENSE_ACTIVE -+}; -+ -+struct acx_bt_wlan_coex { -+ struct acx_header header; -+ -+ /* -+ * 0 -> PTA enabled -+ * 1 -> PTA disabled -+ * 2 -> sense no active mode, i.e. -+ * an interrupt is sent upon -+ * BT activity. -+ * 3 -> PTA is switched on in response -+ * to the interrupt sending. -+ */ -+ u8 enable; -+ u8 pad[3]; -+} __attribute__ ((packed)); -+ -+#define PTA_ANTENNA_TYPE_DEF (0) -+#define PTA_BT_HP_MAXTIME_DEF (2000) -+#define PTA_WLAN_HP_MAX_TIME_DEF (5000) -+#define PTA_SENSE_DISABLE_TIMER_DEF (1350) -+#define PTA_PROTECTIVE_RX_TIME_DEF (1500) -+#define PTA_PROTECTIVE_TX_TIME_DEF (1500) -+#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000) -+#define PTA_SIGNALING_TYPE_DEF (1) -+#define PTA_AFH_LEVERAGE_ON_DEF (0) -+#define PTA_NUMBER_QUIET_CYCLE_DEF (0) -+#define PTA_MAX_NUM_CTS_DEF (3) -+#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2) -+#define PTA_NUMBER_OF_BT_PACKETS_DEF (2) -+#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500) -+#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000) -+#define PTA_CYCLE_TIME_FAST_DEF (8700) -+#define PTA_RX_FOR_AVALANCHE_DEF (5) -+#define PTA_ELP_HP_DEF (0) -+#define PTA_ANTI_STARVE_PERIOD_DEF (500) -+#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4) -+#define PTA_ALLOW_PA_SD_DEF (1) -+#define PTA_TIME_BEFORE_BEACON_DEF (6300) -+#define PTA_HPDM_MAX_TIME_DEF (1600) -+#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550) -+#define PTA_AUTO_MODE_NO_CTS_DEF (0) -+#define PTA_BT_HP_RESPECTED_DEF (3) -+#define PTA_WLAN_RX_MIN_RATE_DEF (24) -+#define PTA_ACK_MODE_DEF (1) -+ -+struct acx_bt_wlan_coex_param { -+ struct acx_header header; -+ -+ /* -+ * The minimum rate of a received WLAN packet in the STA, -+ * during protective mode, of which a new BT-HP request -+ * during this Rx will always be respected and gain the antenna. -+ */ -+ u32 min_rate; -+ -+ /* Max time the BT HP will be respected. */ -+ u16 bt_hp_max_time; -+ -+ /* Max time the WLAN HP will be respected. */ -+ u16 wlan_hp_max_time; -+ -+ /* -+ * The time between the last BT activity -+ * and the moment when the sense mode returns -+ * to SENSE_INACTIVE. -+ */ -+ u16 sense_disable_timer; -+ -+ /* Time before the next BT HP instance */ -+ u16 rx_time_bt_hp; -+ u16 tx_time_bt_hp; -+ -+ /* range: 10-20000 default: 1500 */ -+ u16 rx_time_bt_hp_fast; -+ u16 tx_time_bt_hp_fast; -+ -+ /* range: 2000-65535 default: 8700 */ -+ u16 wlan_cycle_fast; -+ -+ /* range: 0 - 15000 (Msec) default: 1000 */ -+ u16 bt_anti_starvation_period; -+ -+ /* range 400-10000(Usec) default: 3000 */ -+ u16 next_bt_lp_packet; -+ -+ /* Deafult: worst case for BT DH5 traffic */ -+ u16 wake_up_beacon; -+ -+ /* range: 0-50000(Usec) default: 1050 */ -+ u16 hp_dm_max_guard_time; -+ -+ /* -+ * This is to prevent both BT & WLAN antenna -+ * starvation. -+ * Range: 100-50000(Usec) default:2550 -+ */ -+ u16 next_wlan_packet; -+ -+ /* 0 -> shared antenna */ -+ u8 antenna_type; -+ -+ /* -+ * 0 -> TI legacy -+ * 1 -> Palau -+ */ -+ u8 signal_type; -+ -+ /* -+ * BT AFH status -+ * 0 -> no AFH -+ * 1 -> from dedicated GPIO -+ * 2 -> AFH on (from host) -+ */ -+ u8 afh_leverage_on; -+ -+ /* -+ * The number of cycles during which no -+ * TX will be sent after 1 cycle of RX -+ * transaction in protective mode -+ */ -+ u8 quiet_cycle_num; -+ -+ /* -+ * The maximum number of CTSs that will -+ * be sent for receiving RX packet in -+ * protective mode -+ */ -+ u8 max_cts; -+ -+ /* -+ * The number of WLAN packets -+ * transferred in common mode before -+ * switching to BT. -+ */ -+ u8 wlan_packets_num; -+ -+ /* -+ * The number of BT packets -+ * transferred in common mode before -+ * switching to WLAN. -+ */ -+ u8 bt_packets_num; -+ -+ /* range: 1-255 default: 5 */ -+ u8 missed_rx_avalanche; -+ -+ /* range: 0-1 default: 1 */ -+ u8 wlan_elp_hp; -+ -+ /* range: 0 - 15 default: 4 */ -+ u8 bt_anti_starvation_cycles; -+ -+ u8 ack_mode_dual_ant; -+ -+ /* -+ * Allow PA_SD assertion/de-assertion -+ * during enabled BT activity. -+ */ -+ u8 pa_sd_enable; -+ -+ /* -+ * Enable/Disable PTA in auto mode: -+ * Support Both Active & P.S modes -+ */ -+ u8 pta_auto_mode_enable; -+ -+ /* range: 0 - 20 default: 1 */ -+ u8 bt_hp_respected_num; -+} __attribute__ ((packed)); -+ -+#define CCA_THRSH_ENABLE_ENERGY_D 0x140A -+#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF -+ -+struct acx_energy_detection { -+ struct acx_header header; -+ -+ /* The RX Clear Channel Assessment threshold in the PHY */ -+ u16 rx_cca_threshold; -+ u8 tx_energy_detection; -+ u8 pad; -+} __attribute__ ((packed)); -+ -+#define BCN_RX_TIMEOUT_DEF_VALUE 10000 -+#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 -+#define RX_BROADCAST_IN_PS_DEF_VALUE 1 -+#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4 -+ -+struct acx_beacon_broadcast { -+ struct acx_header header; -+ -+ u16 beacon_rx_timeout; -+ u16 broadcast_timeout; -+ -+ /* Enables receiving of broadcast packets in PS mode */ -+ u8 rx_broadcast_in_ps; -+ -+ /* Consecutive PS Poll failures before updating the host */ -+ u8 ps_poll_threshold; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+struct acx_event_mask { -+ struct acx_header header; -+ -+ u32 event_mask; -+ u32 high_event_mask; /* Unused */ -+} __attribute__ ((packed)); -+ -+#define CFG_RX_FCS BIT(2) -+#define CFG_RX_ALL_GOOD BIT(3) -+#define CFG_UNI_FILTER_EN BIT(4) -+#define CFG_BSSID_FILTER_EN BIT(5) -+#define CFG_MC_FILTER_EN BIT(6) -+#define CFG_MC_ADDR0_EN BIT(7) -+#define CFG_MC_ADDR1_EN BIT(8) -+#define CFG_BC_REJECT_EN BIT(9) -+#define CFG_SSID_FILTER_EN BIT(10) -+#define CFG_RX_INT_FCS_ERROR BIT(11) -+#define CFG_RX_INT_ENCRYPTED BIT(12) -+#define CFG_RX_WR_RX_STATUS BIT(13) -+#define CFG_RX_FILTER_NULTI BIT(14) -+#define CFG_RX_RESERVE BIT(15) -+#define CFG_RX_TIMESTAMP_TSF BIT(16) -+ -+#define CFG_RX_RSV_EN BIT(0) -+#define CFG_RX_RCTS_ACK BIT(1) -+#define CFG_RX_PRSP_EN BIT(2) -+#define CFG_RX_PREQ_EN BIT(3) -+#define CFG_RX_MGMT_EN BIT(4) -+#define CFG_RX_FCS_ERROR BIT(5) -+#define CFG_RX_DATA_EN BIT(6) -+#define CFG_RX_CTL_EN BIT(7) -+#define CFG_RX_CF_EN BIT(8) -+#define CFG_RX_BCN_EN BIT(9) -+#define CFG_RX_AUTH_EN BIT(10) -+#define CFG_RX_ASSOC_EN BIT(11) -+ -+#define SCAN_PASSIVE BIT(0) -+#define SCAN_5GHZ_BAND BIT(1) -+#define SCAN_TRIGGERED BIT(2) -+#define SCAN_PRIORITY_HIGH BIT(3) -+ -+struct acx_feature_config { -+ struct acx_header header; -+ -+ u32 options; -+ u32 data_flow_options; -+} __attribute__ ((packed)); -+ -+struct acx_current_tx_power { -+ struct acx_header header; -+ -+ u8 current_tx_power; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+enum acx_wake_up_event { -+ WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ -+ WAKE_UP_EVENT_DTIM_BITMAP = 0x02, /* Wake on every DTIM*/ -+ WAKE_UP_EVENT_N_DTIM_BITMAP = 0x04, /* Wake on every Nth DTIM */ -+ WAKE_UP_EVENT_N_BEACONS_BITMAP = 0x08, /* Wake on every Nth Beacon */ -+ WAKE_UP_EVENT_BITS_MASK = 0x0F -+}; -+ -+struct acx_wake_up_condition { -+ struct acx_header header; -+ -+ u8 wake_up_event; /* Only one bit can be set */ -+ u8 listen_interval; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+struct acx_aid { -+ struct acx_header header; -+ -+ /* -+ * To be set when associated with an AP. -+ */ -+ u16 aid; -+ u8 pad[2]; -+} __attribute__ ((packed)); -+ -+enum acx_preamble_type { -+ ACX_PREAMBLE_LONG = 0, -+ ACX_PREAMBLE_SHORT = 1 -+}; -+ -+struct acx_preamble { -+ struct acx_header header; -+ -+ /* -+ * When set, the WiLink transmits the frames with a short preamble and -+ * when cleared, the WiLink transmits the frames with a long preamble. -+ */ -+ u8 preamble; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+enum acx_ctsprotect_type { -+ CTSPROTECT_DISABLE = 0, -+ CTSPROTECT_ENABLE = 1 -+}; -+ -+struct acx_ctsprotect { -+ struct acx_header header; -+ u8 ctsprotect; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+struct acx_tx_statistics { -+ u32 internal_desc_overflow; -+} __attribute__ ((packed)); -+ -+struct acx_rx_statistics { -+ u32 out_of_mem; -+ u32 hdr_overflow; -+ u32 hw_stuck; -+ u32 dropped; -+ u32 fcs_err; -+ u32 xfr_hint_trig; -+ u32 path_reset; -+ u32 reset_counter; -+} __attribute__ ((packed)); -+ -+struct acx_dma_statistics { -+ u32 rx_requested; -+ u32 rx_errors; -+ u32 tx_requested; -+ u32 tx_errors; -+} __attribute__ ((packed)); -+ -+struct acx_isr_statistics { -+ /* host command complete */ -+ u32 cmd_cmplt; -+ -+ /* fiqisr() */ -+ u32 fiqs; -+ -+ /* (INT_STS_ND & INT_TRIG_RX_HEADER) */ -+ u32 rx_headers; -+ -+ /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */ -+ u32 rx_completes; -+ -+ /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */ -+ u32 rx_mem_overflow; -+ -+ /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */ -+ u32 rx_rdys; -+ -+ /* irqisr() */ -+ u32 irqs; -+ -+ /* (INT_STS_ND & INT_TRIG_TX_PROC) */ -+ u32 tx_procs; -+ -+ /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */ -+ u32 decrypt_done; -+ -+ /* (INT_STS_ND & INT_TRIG_DMA0) */ -+ u32 dma0_done; -+ -+ /* (INT_STS_ND & INT_TRIG_DMA1) */ -+ u32 dma1_done; -+ -+ /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */ -+ u32 tx_exch_complete; -+ -+ /* (INT_STS_ND & INT_TRIG_COMMAND) */ -+ u32 commands; -+ -+ /* (INT_STS_ND & INT_TRIG_RX_PROC) */ -+ u32 rx_procs; -+ -+ /* (INT_STS_ND & INT_TRIG_PM_802) */ -+ u32 hw_pm_mode_changes; -+ -+ /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */ -+ u32 host_acknowledges; -+ -+ /* (INT_STS_ND & INT_TRIG_PM_PCI) */ -+ u32 pci_pm; -+ -+ /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */ -+ u32 wakeups; -+ -+ /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ -+ u32 low_rssi; -+} __attribute__ ((packed)); -+ -+struct acx_wep_statistics { -+ /* WEP address keys configured */ -+ u32 addr_key_count; -+ -+ /* default keys configured */ -+ u32 default_key_count; -+ -+ u32 reserved; -+ -+ /* number of times that WEP key not found on lookup */ -+ u32 key_not_found; -+ -+ /* number of times that WEP key decryption failed */ -+ u32 decrypt_fail; -+ -+ /* WEP packets decrypted */ -+ u32 packets; -+ -+ /* WEP decrypt interrupts */ -+ u32 interrupt; -+} __attribute__ ((packed)); -+ -+#define ACX_MISSED_BEACONS_SPREAD 10 -+ -+struct acx_pwr_statistics { -+ /* the amount of enters into power save mode (both PD & ELP) */ -+ u32 ps_enter; -+ -+ /* the amount of enters into ELP mode */ -+ u32 elp_enter; -+ -+ /* the amount of missing beacon interrupts to the host */ -+ u32 missing_bcns; -+ -+ /* the amount of wake on host-access times */ -+ u32 wake_on_host; -+ -+ /* the amount of wake on timer-expire */ -+ u32 wake_on_timer_exp; -+ -+ /* the number of packets that were transmitted with PS bit set */ -+ u32 tx_with_ps; -+ -+ /* the number of packets that were transmitted with PS bit clear */ -+ u32 tx_without_ps; -+ -+ /* the number of received beacons */ -+ u32 rcvd_beacons; -+ -+ /* the number of entering into PowerOn (power save off) */ -+ u32 power_save_off; -+ -+ /* the number of entries into power save mode */ -+ u16 enable_ps; -+ -+ /* -+ * the number of exits from power save, not including failed PS -+ * transitions -+ */ -+ u16 disable_ps; -+ -+ /* -+ * the number of times the TSF counter was adjusted because -+ * of drift -+ */ -+ u32 fix_tsf_ps; -+ -+ /* Gives statistics about the spread continuous missed beacons. -+ * The 16 LSB are dedicated for the PS mode. -+ * The 16 MSB are dedicated for the PS mode. -+ * cont_miss_bcns_spread[0] - single missed beacon. -+ * cont_miss_bcns_spread[1] - two continuous missed beacons. -+ * cont_miss_bcns_spread[2] - three continuous missed beacons. -+ * ... -+ * cont_miss_bcns_spread[9] - ten and more continuous missed beacons. -+ */ -+ u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD]; -+ -+ /* the number of beacons in awake mode */ -+ u32 rcvd_awake_beacons; -+} __attribute__ ((packed)); -+ -+struct acx_mic_statistics { -+ u32 rx_pkts; -+ u32 calc_failure; -+} __attribute__ ((packed)); -+ -+struct acx_aes_statistics { -+ u32 encrypt_fail; -+ u32 decrypt_fail; -+ u32 encrypt_packets; -+ u32 decrypt_packets; -+ u32 encrypt_interrupt; -+ u32 decrypt_interrupt; -+} __attribute__ ((packed)); -+ -+struct acx_event_statistics { -+ u32 heart_beat; -+ u32 calibration; -+ u32 rx_mismatch; -+ u32 rx_mem_empty; -+ u32 rx_pool; -+ u32 oom_late; -+ u32 phy_transmit_error; -+ u32 tx_stuck; -+} __attribute__ ((packed)); -+ -+struct acx_ps_statistics { -+ u32 pspoll_timeouts; -+ u32 upsd_timeouts; -+ u32 upsd_max_sptime; -+ u32 upsd_max_apturn; -+ u32 pspoll_max_apturn; -+ u32 pspoll_utilization; -+ u32 upsd_utilization; -+} __attribute__ ((packed)); -+ -+struct acx_rxpipe_statistics { -+ u32 rx_prep_beacon_drop; -+ u32 descr_host_int_trig_rx_data; -+ u32 beacon_buffer_thres_host_int_trig_rx_data; -+ u32 missed_beacon_host_int_trig_rx_data; -+ u32 tx_xfr_host_int_trig_rx_data; -+} __attribute__ ((packed)); -+ -+struct acx_statistics { -+ struct acx_header header; -+ -+ struct acx_tx_statistics tx; -+ struct acx_rx_statistics rx; -+ struct acx_dma_statistics dma; -+ struct acx_isr_statistics isr; -+ struct acx_wep_statistics wep; -+ struct acx_pwr_statistics pwr; -+ struct acx_aes_statistics aes; -+ struct acx_mic_statistics mic; -+ struct acx_event_statistics event; -+ struct acx_ps_statistics ps; -+ struct acx_rxpipe_statistics rxpipe; -+} __attribute__ ((packed)); -+ -+#define ACX_MAX_RATE_CLASSES 8 -+#define ACX_RATE_MASK_UNSPECIFIED 0 -+#define ACX_RATE_MASK_ALL 0x1eff -+#define ACX_RATE_RETRY_LIMIT 10 -+ -+struct acx_rate_class { -+ u32 enabled_rates; -+ u8 short_retry_limit; -+ u8 long_retry_limit; -+ u8 aflags; -+ u8 reserved; -+}; -+ -+struct acx_rate_policy { -+ struct acx_header header; -+ -+ u32 rate_class_cnt; -+ struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; -+} __attribute__ ((packed)); -+ -+#define WL1271_ACX_AC_COUNT 4 -+ -+struct acx_ac_cfg { -+ struct acx_header header; -+ u8 ac; -+ u8 cw_min; -+ u16 cw_max; -+ u8 aifsn; -+ u8 reserved; -+ u16 tx_op_limit; -+} __attribute__ ((packed)); -+ -+enum wl1271_acx_ac { -+ WL1271_ACX_AC_BE = 0, -+ WL1271_ACX_AC_BK = 1, -+ WL1271_ACX_AC_VI = 2, -+ WL1271_ACX_AC_VO = 3, -+ WL1271_ACX_AC_CTS2SELF = 4, -+ WL1271_ACX_AC_ANY_TID = 0x1F, -+ WL1271_ACX_AC_INVALID = 0xFF, -+}; -+ -+enum wl1271_acx_ps_scheme { -+ WL1271_ACX_PS_SCHEME_LEGACY = 0, -+ WL1271_ACX_PS_SCHEME_UPSD_TRIGGER = 1, -+ WL1271_ACX_PS_SCHEME_LEGACY_PSPOLL = 2, -+ WL1271_ACX_PS_SCHEME_SAPSD = 3, -+}; -+ -+enum wl1271_acx_ack_policy { -+ WL1271_ACX_ACK_POLICY_LEGACY = 0, -+ WL1271_ACX_ACK_POLICY_NO_ACK = 1, -+ WL1271_ACX_ACK_POLICY_BLOCK = 2, -+}; -+ -+#define WL1271_ACX_TID_COUNT 7 -+ -+struct acx_tid_config { -+ struct acx_header header; -+ u8 queue_id; -+ u8 channel_type; -+ u8 tsid; -+ u8 ps_scheme; -+ u8 ack_policy; -+ u8 padding[3]; -+ u32 apsd_conf[2]; -+} __attribute__ ((packed)); -+ -+struct acx_frag_threshold { -+ struct acx_header header; -+ u16 frag_threshold; -+ u8 padding[2]; -+} __attribute__ ((packed)); -+ -+#define WL1271_ACX_TX_COMPL_TIMEOUT 5 -+#define WL1271_ACX_TX_COMPL_THRESHOLD 5 -+ -+struct acx_tx_config_options { -+ struct acx_header header; -+ u16 tx_compl_timeout; /* msec */ -+ u16 tx_compl_threshold; /* number of packets */ -+} __attribute__ ((packed)); -+ -+#define ACX_RX_MEM_BLOCKS 64 -+#define ACX_TX_MIN_MEM_BLOCKS 64 -+#define ACX_TX_DESCRIPTORS 32 -+#define ACX_NUM_SSID_PROFILES 1 -+ -+struct wl1271_acx_config_memory { -+ struct acx_header header; -+ -+ u8 rx_mem_block_num; -+ u8 tx_min_mem_block_num; -+ u8 num_stations; -+ u8 num_ssid_profiles; -+ u32 total_tx_descriptors; -+} __attribute__ ((packed)); -+ -+struct wl1271_acx_mem_map { -+ struct acx_header header; -+ -+ void *code_start; -+ void *code_end; -+ -+ void *wep_defkey_start; -+ void *wep_defkey_end; -+ -+ void *sta_table_start; -+ void *sta_table_end; -+ -+ void *packet_template_start; -+ void *packet_template_end; -+ -+ /* Address of the TX result interface (control block) */ -+ u32 tx_result; -+ u32 tx_result_queue_start; -+ -+ void *queue_memory_start; -+ void *queue_memory_end; -+ -+ u32 packet_memory_pool_start; -+ u32 packet_memory_pool_end; -+ -+ void *debug_buffer1_start; -+ void *debug_buffer1_end; -+ -+ void *debug_buffer2_start; -+ void *debug_buffer2_end; -+ -+ /* Number of blocks FW allocated for TX packets */ -+ u32 num_tx_mem_blocks; -+ -+ /* Number of blocks FW allocated for RX packets */ -+ u32 num_rx_mem_blocks; -+ -+ /* the following 4 fields are valid in SLAVE mode only */ -+ u8 *tx_cbuf; -+ u8 *rx_cbuf; -+ void *rx_ctrl; -+ void *tx_ctrl; -+} __attribute__ ((packed)); -+ -+enum wl1271_acx_rx_queue_type { -+ RX_QUEUE_TYPE_RX_LOW_PRIORITY, /* All except the high priority */ -+ RX_QUEUE_TYPE_RX_HIGH_PRIORITY, /* Management and voice packets */ -+ RX_QUEUE_TYPE_NUM, -+ RX_QUEUE_TYPE_MAX = USHORT_MAX -+}; -+ -+#define WL1271_RX_INTR_THRESHOLD_DEF 0 /* no pacing, send interrupt on -+ * every event */ -+#define WL1271_RX_INTR_THRESHOLD_MIN 0 -+#define WL1271_RX_INTR_THRESHOLD_MAX 15 -+ -+#define WL1271_RX_INTR_TIMEOUT_DEF 5 -+#define WL1271_RX_INTR_TIMEOUT_MIN 1 -+#define WL1271_RX_INTR_TIMEOUT_MAX 100 -+ -+struct wl1271_acx_rx_config_opt { -+ struct acx_header header; -+ -+ u16 mblk_threshold; -+ u16 threshold; -+ u16 timeout; -+ u8 queue_type; -+ u8 reserved; -+} __attribute__ ((packed)); -+ -+enum { -+ ACX_WAKE_UP_CONDITIONS = 0x0002, -+ ACX_MEM_CFG = 0x0003, -+ ACX_SLOT = 0x0004, -+ ACX_AC_CFG = 0x0007, -+ ACX_MEM_MAP = 0x0008, -+ ACX_AID = 0x000A, -+ /* ACX_FW_REV is missing in the ref driver, but seems to work */ -+ ACX_FW_REV = 0x000D, -+ ACX_MEDIUM_USAGE = 0x000F, -+ ACX_RX_CFG = 0x0010, -+ ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */ -+ ACX_STATISTICS = 0x0013, /* Debug API */ -+ ACX_PWR_CONSUMPTION_STATISTICS = 0x0014, -+ ACX_FEATURE_CFG = 0x0015, -+ ACX_TID_CFG = 0x001A, -+ ACX_PS_RX_STREAMING = 0x001B, -+ ACX_BEACON_FILTER_OPT = 0x001F, -+ ACX_NOISE_HIST = 0x0021, -+ ACX_HDK_VERSION = 0x0022, /* ??? */ -+ ACX_PD_THRESHOLD = 0x0023, -+ ACX_TX_CONFIG_OPT = 0x0024, -+ ACX_CCA_THRESHOLD = 0x0025, -+ ACX_EVENT_MBOX_MASK = 0x0026, -+ ACX_CONN_MONIT_PARAMS = 0x002D, -+ ACX_CONS_TX_FAILURE = 0x002F, -+ ACX_BCN_DTIM_OPTIONS = 0x0031, -+ ACX_SG_ENABLE = 0x0032, -+ ACX_SG_CFG = 0x0033, -+ ACX_BEACON_FILTER_TABLE = 0x0038, -+ ACX_ARP_IP_FILTER = 0x0039, -+ ACX_ROAMING_STATISTICS_TBL = 0x003B, -+ ACX_RATE_POLICY = 0x003D, -+ ACX_CTS_PROTECTION = 0x003E, -+ ACX_SLEEP_AUTH = 0x003F, -+ ACX_PREAMBLE_TYPE = 0x0040, -+ ACX_ERROR_CNT = 0x0041, -+ ACX_IBSS_FILTER = 0x0044, -+ ACX_SERVICE_PERIOD_TIMEOUT = 0x0045, -+ ACX_TSF_INFO = 0x0046, -+ ACX_CONFIG_PS_WMM = 0x0049, -+ ACX_ENABLE_RX_DATA_FILTER = 0x004A, -+ ACX_SET_RX_DATA_FILTER = 0x004B, -+ ACX_GET_DATA_FILTER_STATISTICS = 0x004C, -+ ACX_RX_CONFIG_OPT = 0x004E, -+ ACX_FRAG_CFG = 0x004F, -+ ACX_BET_ENABLE = 0x0050, -+ ACX_RSSI_SNR_TRIGGER = 0x0051, -+ ACX_RSSI_SNR_WEIGHTS = 0x0051, -+ ACX_KEEP_ALIVE_MODE = 0x0052, -+ ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, -+ ACX_BA_SESSION_RESPONDER_POLICY = 0x0055, -+ ACX_BA_SESSION_INITIATOR_POLICY = 0x0056, -+ ACX_PEER_HT_CAP = 0x0057, -+ ACX_HT_BSS_OPERATION = 0x0058, -+ ACX_COEX_ACTIVITY = 0x0059, -+ DOT11_RX_MSDU_LIFE_TIME = 0x1004, -+ DOT11_CUR_TX_PWR = 0x100D, -+ DOT11_RX_DOT11_MODE = 0x1012, -+ DOT11_RTS_THRESHOLD = 0x1013, -+ DOT11_GROUP_ADDRESS_TBL = 0x1014, -+ -+ MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, -+ -+ MAX_IE = 0xFFFF -+}; -+ -+ -+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, -+ u8 listen_interval); -+int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); -+int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len); -+int wl1271_acx_tx_power(struct wl1271 *wl, int power); -+int wl1271_acx_feature_cfg(struct wl1271 *wl); -+int wl1271_acx_mem_map(struct wl1271 *wl, -+ struct acx_header *mem_map, size_t len); -+int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time); -+int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter); -+int wl1271_acx_pd_threshold(struct wl1271 *wl); -+int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); -+int wl1271_acx_group_address_tbl(struct wl1271 *wl); -+int wl1271_acx_service_period_timeout(struct wl1271 *wl); -+int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); -+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl); -+int wl1271_acx_beacon_filter_table(struct wl1271 *wl); -+int wl1271_acx_sg_enable(struct wl1271 *wl); -+int wl1271_acx_sg_cfg(struct wl1271 *wl); -+int wl1271_acx_cca_threshold(struct wl1271 *wl); -+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); -+int wl1271_acx_aid(struct wl1271 *wl, u16 aid); -+int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); -+int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); -+int wl1271_acx_cts_protect(struct wl1271 *wl, -+ enum acx_ctsprotect_type ctsprotect); -+int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); -+int wl1271_acx_rate_policies(struct wl1271 *wl); -+int wl1271_acx_ac_cfg(struct wl1271 *wl); -+int wl1271_acx_tid_cfg(struct wl1271 *wl); -+int wl1271_acx_frag_threshold(struct wl1271 *wl); -+int wl1271_acx_tx_config_options(struct wl1271 *wl); -+int wl1271_acx_mem_cfg(struct wl1271 *wl); -+int wl1271_acx_init_mem_config(struct wl1271 *wl); -+int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); -+ -+#endif /* __WL1271_ACX_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_boot.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_boot.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_boot.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_boot.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,540 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+ -+#include "wl1271_acx.h" -+#include "wl1271_reg.h" -+#include "wl1271_boot.h" -+#include "wl1271_spi.h" -+#include "wl1271_event.h" -+ -+static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { -+ [PART_DOWN] = { -+ .mem = { -+ .start = 0x00000000, -+ .size = 0x000177c0 -+ }, -+ .reg = { -+ .start = REGISTERS_BASE, -+ .size = 0x00008800 -+ }, -+ }, -+ -+ [PART_WORK] = { -+ .mem = { -+ .start = 0x00040000, -+ .size = 0x00014fc0 -+ }, -+ .reg = { -+ .start = REGISTERS_BASE, -+ .size = 0x0000b000 -+ }, -+ }, -+ -+ [PART_DRPW] = { -+ .mem = { -+ .start = 0x00040000, -+ .size = 0x00014fc0 -+ }, -+ .reg = { -+ .start = DRPW_BASE, -+ .size = 0x00006000 -+ } -+ } -+}; -+ -+static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) -+{ -+ u32 cpu_ctrl; -+ -+ /* 10.5.0 run the firmware (I) */ -+ cpu_ctrl = wl1271_reg_read32(wl, ACX_REG_ECPU_CONTROL); -+ -+ /* 10.5.1 run the firmware (II) */ -+ cpu_ctrl |= flag; -+ wl1271_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); -+} -+ -+static void wl1271_boot_fw_version(struct wl1271 *wl) -+{ -+ struct wl1271_static_data static_data; -+ -+ wl1271_spi_mem_read(wl, wl->cmd_box_addr, -+ &static_data, sizeof(static_data)); -+ -+ strncpy(wl->chip.fw_ver, static_data.fw_version, -+ sizeof(wl->chip.fw_ver)); -+ -+ /* make sure the string is NULL-terminated */ -+ wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0'; -+} -+ -+static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, -+ size_t fw_data_len, u32 dest) -+{ -+ int addr, chunk_num, partition_limit; -+ u8 *p; -+ -+ /* whal_FwCtrl_LoadFwImageSm() */ -+ -+ wl1271_debug(DEBUG_BOOT, "starting firmware upload"); -+ -+ wl1271_debug(DEBUG_BOOT, "fw_data_len %d chunk_size %d", fw_data_len, -+ CHUNK_SIZE); -+ -+ -+ if ((fw_data_len % 4) != 0) { -+ wl1271_error("firmware length not multiple of four"); -+ return -EIO; -+ } -+ -+ wl1271_set_partition(wl, dest, -+ part_table[PART_DOWN].mem.size, -+ part_table[PART_DOWN].reg.start, -+ part_table[PART_DOWN].reg.size); -+ -+ /* 10.1 set partition limit and chunk num */ -+ chunk_num = 0; -+ partition_limit = part_table[PART_DOWN].mem.size; -+ -+ while (chunk_num < fw_data_len / CHUNK_SIZE) { -+ /* 10.2 update partition, if needed */ -+ addr = dest + (chunk_num + 2) * CHUNK_SIZE; -+ if (addr > partition_limit) { -+ addr = dest + chunk_num * CHUNK_SIZE; -+ partition_limit = chunk_num * CHUNK_SIZE + -+ part_table[PART_DOWN].mem.size; -+ -+ /* FIXME: Over 80 chars! */ -+ wl1271_set_partition(wl, -+ addr, -+ part_table[PART_DOWN].mem.size, -+ part_table[PART_DOWN].reg.start, -+ part_table[PART_DOWN].reg.size); -+ } -+ -+ /* 10.3 upload the chunk */ -+ addr = dest + chunk_num * CHUNK_SIZE; -+ p = buf + chunk_num * CHUNK_SIZE; -+ wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", -+ p, addr); -+ wl1271_spi_mem_write(wl, addr, p, CHUNK_SIZE); -+ -+ chunk_num++; -+ } -+ -+ /* 10.4 upload the last chunk */ -+ addr = dest + chunk_num * CHUNK_SIZE; -+ p = buf + chunk_num * CHUNK_SIZE; -+ wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%d B) 0x%p to 0x%x", -+ fw_data_len % CHUNK_SIZE, p, addr); -+ wl1271_spi_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE); -+ -+ return 0; -+} -+ -+static int wl1271_boot_upload_firmware(struct wl1271 *wl) -+{ -+ u32 chunks, addr, len; -+ u8 *fw; -+ -+ fw = wl->fw; -+ chunks = be32_to_cpup((u32 *) fw); -+ fw += sizeof(u32); -+ -+ wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); -+ -+ while (chunks--) { -+ addr = be32_to_cpup((u32 *) fw); -+ fw += sizeof(u32); -+ len = be32_to_cpup((u32 *) fw); -+ fw += sizeof(u32); -+ -+ if (len > 300000) { -+ wl1271_info("firmware chunk too long: %u", len); -+ return -EINVAL; -+ } -+ wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", -+ chunks, addr, len); -+ wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); -+ fw += len; -+ } -+ -+ return 0; -+} -+ -+static int wl1271_boot_upload_nvs(struct wl1271 *wl) -+{ -+ size_t nvs_len, burst_len; -+ int i; -+ u32 dest_addr, val; -+ u8 *nvs_ptr, *nvs, *nvs_aligned; -+ -+ nvs = wl->nvs; -+ if (nvs == NULL) -+ return -ENODEV; -+ -+ nvs_ptr = nvs; -+ -+ nvs_len = wl->nvs_len; -+ -+ /* Update the device MAC address into the nvs */ -+ nvs[11] = wl->mac_addr[0]; -+ nvs[10] = wl->mac_addr[1]; -+ nvs[6] = wl->mac_addr[2]; -+ nvs[5] = wl->mac_addr[3]; -+ nvs[4] = wl->mac_addr[4]; -+ nvs[3] = wl->mac_addr[5]; -+ -+ /* -+ * Layout before the actual NVS tables: -+ * 1 byte : burst length. -+ * 2 bytes: destination address. -+ * n bytes: data to burst copy. -+ * -+ * This is ended by a 0 length, then the NVS tables. -+ */ -+ -+ /* FIXME: Do we need to check here whether the LSB is 1? */ -+ while (nvs_ptr[0]) { -+ burst_len = nvs_ptr[0]; -+ dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); -+ -+ /* FIXME: Due to our new wl1271_translate_reg_addr function, -+ we need to add the REGISTER_BASE to the destination */ -+ dest_addr += REGISTERS_BASE; -+ -+ /* We move our pointer to the data */ -+ nvs_ptr += 3; -+ -+ for (i = 0; i < burst_len; i++) { -+ val = (nvs_ptr[0] | (nvs_ptr[1] << 8) -+ | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); -+ -+ wl1271_debug(DEBUG_BOOT, -+ "nvs burst write 0x%x: 0x%x", -+ dest_addr, val); -+ wl1271_reg_write32(wl, dest_addr, val); -+ -+ nvs_ptr += 4; -+ dest_addr += 4; -+ } -+ } -+ -+ /* -+ * We've reached the first zero length, the first NVS table -+ * is 7 bytes further. -+ */ -+ nvs_ptr += 7; -+ nvs_len -= nvs_ptr - nvs; -+ nvs_len = ALIGN(nvs_len, 4); -+ -+ /* FIXME: The driver sets the partition here, but this is not needed, -+ since it sets to the same one as currently in use */ -+ /* Now we must set the partition correctly */ -+ wl1271_set_partition(wl, -+ part_table[PART_WORK].mem.start, -+ part_table[PART_WORK].mem.size, -+ part_table[PART_WORK].reg.start, -+ part_table[PART_WORK].reg.size); -+ -+ /* Copy the NVS tables to a new block to ensure alignment */ -+ nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); -+ -+ /* And finally we upload the NVS tables */ -+ /* FIXME: In wl1271, we upload everything at once. -+ No endianness handling needed here?! The ref driver doesn't do -+ anything about it at this point */ -+ wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len); -+ -+ kfree(nvs_aligned); -+ return 0; -+} -+ -+static void wl1271_boot_enable_interrupts(struct wl1271 *wl) -+{ -+ enable_irq(wl->irq); -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(WL1271_INTR_MASK)); -+ wl1271_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL); -+} -+ -+static int wl1271_boot_soft_reset(struct wl1271 *wl) -+{ -+ unsigned long timeout; -+ u32 boot_data; -+ -+ /* perform soft reset */ -+ wl1271_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); -+ -+ /* SOFT_RESET is self clearing */ -+ timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); -+ while (1) { -+ boot_data = wl1271_reg_read32(wl, ACX_REG_SLV_SOFT_RESET); -+ wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); -+ if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) -+ break; -+ -+ if (time_after(jiffies, timeout)) { -+ /* 1.2 check pWhalBus->uSelfClearTime if the -+ * timeout was reached */ -+ wl1271_error("soft reset timeout"); -+ return -1; -+ } -+ -+ udelay(SOFT_RESET_STALL_TIME); -+ } -+ -+ /* disable Rx/Tx */ -+ wl1271_reg_write32(wl, ENABLE, 0x0); -+ -+ /* disable auto calibration on start*/ -+ wl1271_reg_write32(wl, SPARE_A2, 0xffff); -+ -+ return 0; -+} -+ -+static int wl1271_boot_run_firmware(struct wl1271 *wl) -+{ -+ int loop, ret; -+ u32 chip_id, interrupt; -+ -+ wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); -+ -+ chip_id = wl1271_reg_read32(wl, CHIP_ID_B); -+ -+ wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); -+ -+ if (chip_id != wl->chip.id) { -+ wl1271_error("chip id doesn't match after firmware boot"); -+ return -EIO; -+ } -+ -+ /* wait for init to complete */ -+ loop = 0; -+ while (loop++ < INIT_LOOP) { -+ udelay(INIT_LOOP_DELAY); -+ interrupt = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); -+ -+ if (interrupt == 0xffffffff) { -+ wl1271_error("error reading hardware complete " -+ "init indication"); -+ return -EIO; -+ } -+ /* check that ACX_INTR_INIT_COMPLETE is enabled */ -+ else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, -+ WL1271_ACX_INTR_INIT_COMPLETE); -+ break; -+ } -+ } -+ -+ if (loop >= INIT_LOOP) { -+ wl1271_error("timeout waiting for the hardware to " -+ "complete initialization"); -+ return -EIO; -+ } -+ -+ /* get hardware config command mail box */ -+ wl->cmd_box_addr = wl1271_reg_read32(wl, REG_COMMAND_MAILBOX_PTR); -+ -+ /* get hardware config event mail box */ -+ wl->event_box_addr = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); -+ -+ /* set the working partition to its "running" mode offset */ -+ wl1271_set_partition(wl, -+ part_table[PART_WORK].mem.start, -+ part_table[PART_WORK].mem.size, -+ part_table[PART_WORK].reg.start, -+ part_table[PART_WORK].reg.size); -+ -+ wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", -+ wl->cmd_box_addr, wl->event_box_addr); -+ -+ wl1271_boot_fw_version(wl); -+ -+ /* -+ * in case of full asynchronous mode the firmware event must be -+ * ready to receive event from the command mailbox -+ */ -+ -+ /* enable gpio interrupts */ -+ wl1271_boot_enable_interrupts(wl); -+ -+ /* unmask all mbox events */ -+ wl->event_mask = 0xffffffff; -+ -+ ret = wl1271_event_unmask(wl); -+ if (ret < 0) { -+ wl1271_error("EVENT mask setting failed"); -+ return ret; -+ } -+ -+ wl1271_event_mbox_config(wl); -+ -+ /* firmware startup completed */ -+ return 0; -+} -+ -+static int wl1271_boot_write_irq_polarity(struct wl1271 *wl) -+{ -+ u32 polarity, status, i; -+ -+ wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY); -+ wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_READ); -+ -+ /* Wait until the command is complete (ie. bit 18 is set) */ -+ for (i = 0; i < OCP_CMD_LOOP; i++) { -+ polarity = wl1271_reg_read32(wl, OCP_DATA_READ); -+ if (polarity & OCP_READY_MASK) -+ break; -+ } -+ if (i == OCP_CMD_LOOP) { -+ wl1271_error("OCP command timeout!"); -+ return -EIO; -+ } -+ -+ status = polarity & OCP_STATUS_MASK; -+ if (status != OCP_STATUS_OK) { -+ wl1271_error("OCP command failed (%d)", status); -+ return -EIO; -+ } -+ -+ /* We use HIGH polarity, so unset the LOW bit */ -+ polarity &= ~POLARITY_LOW; -+ -+ wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY); -+ wl1271_reg_write32(wl, OCP_DATA_WRITE, polarity); -+ wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_WRITE); -+ -+ return 0; -+} -+ -+int wl1271_boot(struct wl1271 *wl) -+{ -+ int ret = 0; -+ u32 tmp, clk, pause; -+ -+ if (REF_CLOCK == 0 || REF_CLOCK == 2) -+ /* ref clk: 19.2/38.4 */ -+ clk = 0x3; -+ else if (REF_CLOCK == 1 || REF_CLOCK == 3) -+ /* ref clk: 26/52 */ -+ clk = 0x5; -+ -+ wl1271_reg_write32(wl, PLL_PARAMETERS, clk); -+ -+ pause = wl1271_reg_read32(wl, PLL_PARAMETERS); -+ -+ wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); -+ -+ pause &= ~(WU_COUNTER_PAUSE_VAL); /* FIXME: This should probably be -+ * WU_COUNTER_PAUSE_VAL instead of -+ * 0x3ff (magic number ). How does -+ * this work?! */ -+ pause |= WU_COUNTER_PAUSE_VAL; -+ wl1271_reg_write32(wl, WU_COUNTER_PAUSE, pause); -+ -+ /* Continue the ELP wake up sequence */ -+ wl1271_reg_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); -+ udelay(500); -+ -+ wl1271_set_partition(wl, -+ part_table[PART_DRPW].mem.start, -+ part_table[PART_DRPW].mem.size, -+ part_table[PART_DRPW].reg.start, -+ part_table[PART_DRPW].reg.size); -+ -+ /* Read-modify-write DRPW_SCRATCH_START register (see next state) -+ to be used by DRPw FW. The RTRIM value will be added by the FW -+ before taking DRPw out of reset */ -+ -+ wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START); -+ clk = wl1271_reg_read32(wl, DRPW_SCRATCH_START); -+ -+ wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); -+ -+ /* 2 */ -+ clk |= (REF_CLOCK << 1) << 4; -+ wl1271_reg_write32(wl, DRPW_SCRATCH_START, clk); -+ -+ wl1271_set_partition(wl, -+ part_table[PART_WORK].mem.start, -+ part_table[PART_WORK].mem.size, -+ part_table[PART_WORK].reg.start, -+ part_table[PART_WORK].reg.size); -+ -+ /* Disable interrupts */ -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); -+ -+ ret = wl1271_boot_soft_reset(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* 2. start processing NVS file */ -+ ret = wl1271_boot_upload_nvs(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* write firmware's last address (ie. it's length) to -+ * ACX_EEPROMLESS_IND_REG */ -+ wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); -+ -+ wl1271_reg_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG); -+ -+ tmp = wl1271_reg_read32(wl, CHIP_ID_B); -+ -+ wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); -+ -+ /* 6. read the EEPROM parameters */ -+ tmp = wl1271_reg_read32(wl, SCR_PAD2); -+ -+ ret = wl1271_boot_write_irq_polarity(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* FIXME: Need to check whether this is really what we want */ -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, -+ WL1271_ACX_ALL_EVENTS_VECTOR); -+ -+ /* WL1271: The reference driver skips steps 7 to 10 (jumps directly -+ * to upload_fw) */ -+ -+ ret = wl1271_boot_upload_firmware(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* 10.5 start firmware */ -+ ret = wl1271_boot_run_firmware(wl); -+ if (ret < 0) -+ goto out; -+ -+ /* set the wl1271 default filters */ -+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG; -+ wl->rx_filter = WL1271_DEFAULT_RX_FILTER; -+ -+ wl1271_event_mbox_config(wl); -+ -+out: -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_boot.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_boot.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_boot.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_boot.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,72 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __BOOT_H__ -+#define __BOOT_H__ -+ -+#include "wl1271.h" -+ -+int wl1271_boot(struct wl1271 *wl); -+ -+#define WL1271_NO_SUBBANDS 8 -+#define WL1271_NO_POWER_LEVELS 4 -+#define WL1271_FW_VERSION_MAX_LEN 20 -+ -+struct wl1271_static_data { -+ u8 mac_address[ETH_ALEN]; -+ u8 padding[2]; -+ u8 fw_version[WL1271_FW_VERSION_MAX_LEN]; -+ u32 hw_version; -+ u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS]; -+}; -+ -+/* number of times we try to read the INIT interrupt */ -+#define INIT_LOOP 20000 -+ -+/* delay between retries */ -+#define INIT_LOOP_DELAY 50 -+ -+#define REF_CLOCK 2 -+#define WU_COUNTER_PAUSE_VAL 0x3FF -+#define WELP_ARM_COMMAND_VAL 0x4 -+ -+#define OCP_CMD_LOOP 32 -+ -+#define OCP_CMD_WRITE 0x1 -+#define OCP_CMD_READ 0x2 -+ -+#define OCP_READY_MASK BIT(18) -+#define OCP_STATUS_MASK (BIT(16) | BIT(17)) -+ -+#define OCP_STATUS_NO_RESP 0x00000 -+#define OCP_STATUS_OK 0x10000 -+#define OCP_STATUS_REQ_FAILED 0x20000 -+#define OCP_STATUS_RESP_ERROR 0x30000 -+ -+#define OCP_REG_POLARITY 0x30032 -+ -+#define CMD_MBOX_ADDRESS 0x407B4 -+ -+#define POLARITY_LOW BIT(1) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_cmd.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_cmd.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_cmd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_cmd.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,813 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "wl1271.h" -+#include "wl1271_reg.h" -+#include "wl1271_spi.h" -+#include "wl1271_acx.h" -+#include "wl12xx_80211.h" -+#include "wl1271_cmd.h" -+ -+/* -+ * send command to firmware -+ * -+ * @wl: wl struct -+ * @id: command id -+ * @buf: buffer containing the command, must work with dma -+ * @len: length of the buffer -+ */ -+int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len) -+{ -+ struct wl1271_cmd_header *cmd; -+ unsigned long timeout; -+ u32 intr; -+ int ret = 0; -+ -+ cmd = buf; -+ cmd->id = id; -+ cmd->status = 0; -+ -+ WARN_ON(len % 4 != 0); -+ -+ wl1271_spi_mem_write(wl, wl->cmd_box_addr, buf, len); -+ -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); -+ -+ timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); -+ -+ intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); -+ while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { -+ if (time_after(jiffies, timeout)) { -+ wl1271_error("command complete timeout"); -+ ret = -ETIMEDOUT; -+ goto out; -+ } -+ -+ msleep(1); -+ -+ intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); -+ } -+ -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, -+ WL1271_ACX_INTR_CMD_COMPLETE); -+ -+out: -+ return ret; -+} -+ -+int wl1271_cmd_cal_channel_tune(struct wl1271 *wl) -+{ -+ struct wl1271_cmd_cal_channel_tune *cmd; -+ int ret = 0; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->test.id = TEST_CMD_CHANNEL_TUNE; -+ -+ cmd->band = WL1271_CHANNEL_TUNE_BAND_2_4; -+ /* set up any channel, 7 is in the middle of the range */ -+ cmd->channel = 7; -+ -+ ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); -+ if (ret < 0) -+ wl1271_warning("TEST_CMD_CHANNEL_TUNE failed"); -+ -+ kfree(cmd); -+ return ret; -+} -+ -+int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl) -+{ -+ struct wl1271_cmd_cal_update_ref_point *cmd; -+ int ret = 0; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->test.id = TEST_CMD_UPDATE_PD_REFERENCE_POINT; -+ -+ /* FIXME: still waiting for the correct values */ -+ cmd->ref_power = 0; -+ cmd->ref_detector = 0; -+ -+ cmd->sub_band = WL1271_PD_REFERENCE_POINT_BAND_B_G; -+ -+ ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); -+ if (ret < 0) -+ wl1271_warning("TEST_CMD_UPDATE_PD_REFERENCE_POINT failed"); -+ -+ kfree(cmd); -+ return ret; -+} -+ -+int wl1271_cmd_cal_p2g(struct wl1271 *wl) -+{ -+ struct wl1271_cmd_cal_p2g *cmd; -+ int ret = 0; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ cmd->test.id = TEST_CMD_P2G_CAL; -+ -+ cmd->sub_band_mask = WL1271_CAL_P2G_BAND_B_G; -+ -+ ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); -+ if (ret < 0) -+ wl1271_warning("TEST_CMD_P2G_CAL failed"); -+ -+ kfree(cmd); -+ return ret; -+} -+ -+int wl1271_cmd_cal(struct wl1271 *wl) -+{ -+ /* -+ * FIXME: we must make sure that we're not sleeping when calibration -+ * is done -+ */ -+ int ret; -+ -+ wl1271_notice("performing tx calibration"); -+ -+ ret = wl1271_cmd_cal_channel_tune(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_cal_update_ref_point(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_cal_p2g(wl); -+ if (ret < 0) -+ return ret; -+ -+ return ret; -+} -+ -+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, -+ u16 beacon_interval, u8 wait) -+{ -+ static bool do_cal = true; -+ unsigned long timeout; -+ struct wl1271_cmd_join *join; -+ int ret, i; -+ u8 *bssid; -+ -+ /* FIXME: remove when we get calibration from the factory */ -+ if (do_cal) { -+ ret = wl1271_cmd_cal(wl); -+ if (ret < 0) -+ wl1271_warning("couldn't calibrate"); -+ else -+ do_cal = false; -+ } -+ -+ -+ join = kzalloc(sizeof(*join), GFP_KERNEL); -+ if (!join) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ wl1271_debug(DEBUG_CMD, "cmd join"); -+ -+ /* Reverse order BSSID */ -+ bssid = (u8 *) &join->bssid_lsb; -+ for (i = 0; i < ETH_ALEN; i++) -+ bssid[i] = wl->bssid[ETH_ALEN - i - 1]; -+ -+ join->rx_config_options = wl->rx_config; -+ join->rx_filter_options = wl->rx_filter; -+ -+ join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | -+ RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; -+ -+ join->beacon_interval = beacon_interval; -+ join->dtim_interval = dtim_interval; -+ join->bss_type = bss_type; -+ join->channel = wl->channel; -+ join->ssid_len = wl->ssid_len; -+ memcpy(join->ssid, wl->ssid, wl->ssid_len); -+ join->ctrl = WL1271_JOIN_CMD_CTRL_TX_FLUSH; -+ -+ /* increment the session counter */ -+ wl->session_counter++; -+ if (wl->session_counter >= SESSION_COUNTER_MAX) -+ wl->session_counter = 0; -+ -+ join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; -+ -+ -+ ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join)); -+ if (ret < 0) { -+ wl1271_error("failed to initiate cmd join"); -+ goto out_free; -+ } -+ -+ timeout = msecs_to_jiffies(JOIN_TIMEOUT); -+ -+ /* -+ * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to -+ * simplify locking we just sleep instead, for now -+ */ -+ if (wait) -+ msleep(10); -+ -+out_free: -+ kfree(join); -+ -+out: -+ return ret; -+} -+ -+/** -+ * send test command to firmware -+ * -+ * @wl: wl struct -+ * @buf: buffer containing the command, with all headers, must work with dma -+ * @len: length of the buffer -+ * @answer: is answer needed -+ */ -+int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer) -+{ -+ int ret; -+ -+ wl1271_debug(DEBUG_CMD, "cmd test"); -+ -+ ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len); -+ -+ if (ret < 0) { -+ wl1271_warning("TEST command failed"); -+ return ret; -+ } -+ -+ if (answer) { -+ struct wl1271_command *cmd_answer; -+ -+ /* -+ * The test command got in, we can read the answer. -+ * The answer would be a wl1271_command, where the -+ * parameter array contains the actual answer. -+ */ -+ wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len); -+ -+ cmd_answer = buf; -+ -+ if (cmd_answer->header.status != CMD_STATUS_SUCCESS) -+ wl1271_error("TEST command answer error: %d", -+ cmd_answer->header.status); -+ } -+ -+ return 0; -+} -+ -+/** -+ * read acx from firmware -+ * -+ * @wl: wl struct -+ * @id: acx id -+ * @buf: buffer for the response, including all headers, must work with dma -+ * @len: lenght of buf -+ */ -+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len) -+{ -+ struct acx_header *acx = buf; -+ int ret; -+ -+ wl1271_debug(DEBUG_CMD, "cmd interrogate"); -+ -+ acx->id = id; -+ -+ /* payload length, does not include any headers */ -+ acx->len = len - sizeof(*acx); -+ -+ ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx)); -+ if (ret < 0) { -+ wl1271_error("INTERROGATE command failed"); -+ goto out; -+ } -+ -+ /* the interrogate command got in, we can read the answer */ -+ wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, len); -+ -+ acx = buf; -+ if (acx->cmd.status != CMD_STATUS_SUCCESS) -+ wl1271_error("INTERROGATE command error: %d", -+ acx->cmd.status); -+ -+out: -+ return ret; -+} -+ -+/** -+ * write acx value to firmware -+ * -+ * @wl: wl struct -+ * @id: acx id -+ * @buf: buffer containing acx, including all headers, must work with dma -+ * @len: length of buf -+ */ -+int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len) -+{ -+ struct acx_header *acx = buf; -+ int ret; -+ -+ wl1271_debug(DEBUG_CMD, "cmd configure"); -+ -+ acx->id = id; -+ -+ /* payload length, does not include any headers */ -+ acx->len = len - sizeof(*acx); -+ -+ ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len); -+ if (ret < 0) { -+ wl1271_warning("CONFIGURE command NOK"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable) -+{ -+ struct cmd_enabledisable_path *cmd; -+ int ret; -+ u16 cmd_rx, cmd_tx; -+ -+ wl1271_debug(DEBUG_CMD, "cmd data path"); -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ cmd->channel = channel; -+ -+ if (enable) { -+ cmd_rx = CMD_ENABLE_RX; -+ cmd_tx = CMD_ENABLE_TX; -+ } else { -+ cmd_rx = CMD_DISABLE_RX; -+ cmd_tx = CMD_DISABLE_TX; -+ } -+ -+ ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1271_error("rx %s cmd for channel %d failed", -+ enable ? "start" : "stop", channel); -+ goto out; -+ } -+ -+ wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", -+ enable ? "start" : "stop", channel); -+ -+ ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1271_error("tx %s cmd for channel %d failed", -+ enable ? "start" : "stop", channel); -+ return ret; -+ } -+ -+ wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", -+ enable ? "start" : "stop", channel); -+ -+out: -+ kfree(cmd); -+ return ret; -+} -+ -+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode) -+{ -+ struct wl1271_cmd_ps_params *ps_params = NULL; -+ int ret = 0; -+ -+ /* FIXME: this should be in ps.c */ -+ ret = wl1271_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP, -+ wl->listen_int); -+ if (ret < 0) { -+ wl1271_error("couldn't set wake up conditions"); -+ goto out; -+ } -+ -+ wl1271_debug(DEBUG_CMD, "cmd set ps mode"); -+ -+ ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL); -+ if (!ps_params) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ ps_params->ps_mode = ps_mode; -+ ps_params->send_null_data = 1; -+ ps_params->retries = 5; -+ ps_params->hang_over_period = 128; -+ ps_params->null_data_rate = 1; /* 1 Mbps */ -+ -+ ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, -+ sizeof(*ps_params)); -+ if (ret < 0) { -+ wl1271_error("cmd set_ps_mode failed"); -+ goto out; -+ } -+ -+out: -+ kfree(ps_params); -+ return ret; -+} -+ -+int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, -+ size_t len) -+{ -+ struct cmd_read_write_memory *cmd; -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_CMD, "cmd read memory"); -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ WARN_ON(len > MAX_READ_SIZE); -+ len = min_t(size_t, len, MAX_READ_SIZE); -+ -+ cmd->addr = addr; -+ cmd->size = len; -+ -+ ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1271_error("read memory command failed: %d", ret); -+ goto out; -+ } -+ -+ /* the read command got in, we can now read the answer */ -+ wl1271_spi_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd)); -+ -+ if (cmd->header.status != CMD_STATUS_SUCCESS) -+ wl1271_error("error in read command result: %d", -+ cmd->header.status); -+ -+ memcpy(answer, cmd->value, len); -+ -+out: -+ kfree(cmd); -+ return ret; -+} -+ -+int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, -+ u8 active_scan, u8 high_prio, u8 num_channels, -+ u8 probe_requests) -+{ -+ -+ struct wl1271_cmd_trigger_scan_to *trigger = NULL; -+ struct wl1271_cmd_scan *params = NULL; -+ int i, ret; -+ u16 scan_options = 0; -+ -+ if (wl->scanning) -+ return -EINVAL; -+ -+ params = kzalloc(sizeof(*params), GFP_KERNEL); -+ if (!params) -+ return -ENOMEM; -+ -+ params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD); -+ params->params.rx_filter_options = -+ cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN); -+ -+ if (!active_scan) -+ scan_options |= WL1271_SCAN_OPT_PASSIVE; -+ if (high_prio) -+ scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH; -+ params->params.scan_options = scan_options; -+ -+ params->params.num_channels = num_channels; -+ params->params.num_probe_requests = probe_requests; -+ params->params.tx_rate = cpu_to_le32(RATE_MASK_2MBPS); -+ params->params.tid_trigger = 0; -+ params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; -+ -+ for (i = 0; i < num_channels; i++) { -+ params->channels[i].min_duration = -+ cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); -+ params->channels[i].max_duration = -+ cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); -+ memset(¶ms->channels[i].bssid_lsb, 0xff, 4); -+ memset(¶ms->channels[i].bssid_msb, 0xff, 2); -+ params->channels[i].early_termination = 0; -+ params->channels[i].tx_power_att = WL1271_SCAN_CURRENT_TX_PWR; -+ params->channels[i].channel = i + 1; -+ } -+ -+ if (len && ssid) { -+ params->params.ssid_len = len; -+ memcpy(params->params.ssid, ssid, len); -+ } -+ -+ ret = wl1271_cmd_build_probe_req(wl, ssid, len); -+ if (ret < 0) { -+ wl1271_error("PROBE request template failed"); -+ goto out; -+ } -+ -+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); -+ if (!trigger) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ /* disable the timeout */ -+ trigger->timeout = 0; -+ -+ ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, -+ sizeof(*trigger)); -+ if (ret < 0) { -+ wl1271_error("trigger scan to failed for hw scan"); -+ goto out; -+ } -+ -+ wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); -+ -+ wl->scanning = true; -+ -+ ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); -+ if (ret < 0) { -+ wl1271_error("SCAN failed"); -+ goto out; -+ } -+ -+ wl1271_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params)); -+ -+ if (params->header.status != CMD_STATUS_SUCCESS) { -+ wl1271_error("Scan command error: %d", -+ params->header.status); -+ wl->scanning = false; -+ ret = -EIO; -+ goto out; -+ } -+ -+out: -+ kfree(params); -+ return ret; -+} -+ -+int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, -+ void *buf, size_t buf_len) -+{ -+ struct wl1271_cmd_template_set *cmd; -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_CMD, "cmd template_set %d", template_id); -+ -+ WARN_ON(buf_len > WL1271_CMD_TEMPL_MAX_SIZE); -+ buf_len = min_t(size_t, buf_len, WL1271_CMD_TEMPL_MAX_SIZE); -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ cmd->len = cpu_to_le16(buf_len); -+ cmd->template_type = template_id; -+ cmd->enabled_rates = ACX_RATE_MASK_UNSPECIFIED; -+ cmd->short_retry_limit = ACX_RATE_RETRY_LIMIT; -+ cmd->long_retry_limit = ACX_RATE_RETRY_LIMIT; -+ -+ if (buf) -+ memcpy(cmd->template_data, buf, buf_len); -+ -+ ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1271_warning("cmd set_template failed: %d", ret); -+ goto out_free; -+ } -+ -+out_free: -+ kfree(cmd); -+ -+out: -+ return ret; -+} -+ -+static int wl1271_build_basic_rates(char *rates) -+{ -+ u8 index = 0; -+ -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; -+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; -+ -+ return index; -+} -+ -+static int wl1271_build_extended_rates(char *rates) -+{ -+ u8 index = 0; -+ -+ rates[index++] = IEEE80211_OFDM_RATE_6MB; -+ rates[index++] = IEEE80211_OFDM_RATE_9MB; -+ rates[index++] = IEEE80211_OFDM_RATE_12MB; -+ rates[index++] = IEEE80211_OFDM_RATE_18MB; -+ rates[index++] = IEEE80211_OFDM_RATE_24MB; -+ rates[index++] = IEEE80211_OFDM_RATE_36MB; -+ rates[index++] = IEEE80211_OFDM_RATE_48MB; -+ rates[index++] = IEEE80211_OFDM_RATE_54MB; -+ -+ return index; -+} -+ -+int wl1271_cmd_build_null_data(struct wl1271 *wl) -+{ -+ struct wl12xx_null_data_template template; -+ -+ if (!is_zero_ether_addr(wl->bssid)) { -+ memcpy(template.header.da, wl->bssid, ETH_ALEN); -+ memcpy(template.header.bssid, wl->bssid, ETH_ALEN); -+ } else { -+ memset(template.header.da, 0xff, ETH_ALEN); -+ memset(template.header.bssid, 0xff, ETH_ALEN); -+ } -+ -+ memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); -+ template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | -+ IEEE80211_STYPE_NULLFUNC); -+ -+ return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template, -+ sizeof(template)); -+ -+} -+ -+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) -+{ -+ struct wl12xx_ps_poll_template template; -+ -+ memcpy(template.bssid, wl->bssid, ETH_ALEN); -+ memcpy(template.ta, wl->mac_addr, ETH_ALEN); -+ template.aid = aid; -+ template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); -+ -+ return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template, -+ sizeof(template)); -+ -+} -+ -+int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len) -+{ -+ struct wl12xx_probe_req_template template; -+ struct wl12xx_ie_rates *rates; -+ char *ptr; -+ u16 size; -+ -+ ptr = (char *)&template; -+ size = sizeof(struct ieee80211_header); -+ -+ memset(template.header.da, 0xff, ETH_ALEN); -+ memset(template.header.bssid, 0xff, ETH_ALEN); -+ memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); -+ template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); -+ -+ /* IEs */ -+ /* SSID */ -+ template.ssid.header.id = WLAN_EID_SSID; -+ template.ssid.header.len = ssid_len; -+ if (ssid_len && ssid) -+ memcpy(template.ssid.ssid, ssid, ssid_len); -+ size += sizeof(struct wl12xx_ie_header) + ssid_len; -+ ptr += size; -+ -+ /* Basic Rates */ -+ rates = (struct wl12xx_ie_rates *)ptr; -+ rates->header.id = WLAN_EID_SUPP_RATES; -+ rates->header.len = wl1271_build_basic_rates(rates->rates); -+ size += sizeof(struct wl12xx_ie_header) + rates->header.len; -+ ptr += sizeof(struct wl12xx_ie_header) + rates->header.len; -+ -+ /* Extended rates */ -+ rates = (struct wl12xx_ie_rates *)ptr; -+ rates->header.id = WLAN_EID_EXT_SUPP_RATES; -+ rates->header.len = wl1271_build_extended_rates(rates->rates); -+ size += sizeof(struct wl12xx_ie_header) + rates->header.len; -+ -+ wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size); -+ -+ return wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, -+ &template, size); -+} -+ -+int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) -+{ -+ struct wl1271_cmd_set_keys *cmd; -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id); -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ cmd->id = id; -+ cmd->key_action = KEY_SET_ID; -+ cmd->key_type = KEY_WEP; -+ -+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1271_warning("cmd set_default_wep_key failed: %d", ret); -+ goto out; -+ } -+ -+out: -+ kfree(cmd); -+ -+ return ret; -+} -+ -+int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, -+ u8 key_size, const u8 *key, const u8 *addr) -+{ -+ struct wl1271_cmd_set_keys *cmd; -+ int ret = 0; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ if (key_type != KEY_WEP) -+ memcpy(cmd->addr, addr, ETH_ALEN); -+ -+ cmd->key_action = action; -+ cmd->key_size = key_size; -+ cmd->key_type = key_type; -+ -+ /* we have only one SSID profile */ -+ cmd->ssid_profile = 0; -+ -+ cmd->id = id; -+ -+ /* FIXME: this is from wl1251, needs to be checked */ -+ if (key_type == KEY_TKIP) { -+ /* -+ * We get the key in the following form: -+ * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes) -+ * but the target is expecting: -+ * TKIP - RX MIC - TX MIC -+ */ -+ memcpy(cmd->key, key, 16); -+ memcpy(cmd->key + 16, key + 24, 8); -+ memcpy(cmd->key + 24, key + 16, 8); -+ -+ } else { -+ memcpy(cmd->key, key, key_size); -+ } -+ -+ wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd)); -+ -+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd)); -+ if (ret < 0) { -+ wl1271_warning("could not set keys"); -+ goto out; -+ } -+ -+out: -+ kfree(cmd); -+ -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_cmd.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_cmd.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_cmd.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_cmd.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,463 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_CMD_H__ -+#define __WL1271_CMD_H__ -+ -+#include "wl1271.h" -+ -+struct acx_header; -+ -+int wl1271_cmd_send(struct wl1271 *wl, u16 type, void *buf, size_t buf_len); -+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, -+ u16 beacon_interval, u8 wait); -+int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); -+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); -+int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); -+int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable); -+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); -+int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, -+ size_t len); -+int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, -+ u8 active_scan, u8 high_prio, u8 num_channels, -+ u8 probe_requests); -+int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, -+ void *buf, size_t buf_len); -+int wl1271_cmd_build_null_data(struct wl1271 *wl); -+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); -+int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len); -+int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); -+int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, -+ u8 key_size, const u8 *key, const u8 *addr); -+ -+enum wl1271_commands { -+ CMD_INTERROGATE = 1, /*use this to read information elements*/ -+ CMD_CONFIGURE = 2, /*use this to write information elements*/ -+ CMD_ENABLE_RX = 3, -+ CMD_ENABLE_TX = 4, -+ CMD_DISABLE_RX = 5, -+ CMD_DISABLE_TX = 6, -+ CMD_SCAN = 8, -+ CMD_STOP_SCAN = 9, -+ CMD_START_JOIN = 11, -+ CMD_SET_KEYS = 12, -+ CMD_READ_MEMORY = 13, -+ CMD_WRITE_MEMORY = 14, -+ CMD_SET_TEMPLATE = 19, -+ CMD_TEST = 23, -+ CMD_NOISE_HIST = 28, -+ CMD_LNA_CONTROL = 32, -+ CMD_SET_BCN_MODE = 33, -+ CMD_MEASUREMENT = 34, -+ CMD_STOP_MEASUREMENT = 35, -+ CMD_DISCONNECT = 36, -+ CMD_SET_PS_MODE = 37, -+ CMD_CHANNEL_SWITCH = 38, -+ CMD_STOP_CHANNEL_SWICTH = 39, -+ CMD_AP_DISCOVERY = 40, -+ CMD_STOP_AP_DISCOVERY = 41, -+ CMD_SPS_SCAN = 42, -+ CMD_STOP_SPS_SCAN = 43, -+ CMD_HEALTH_CHECK = 45, -+ CMD_DEBUG = 46, -+ CMD_TRIGGER_SCAN_TO = 47, -+ CMD_CONNECTION_SCAN_CFG = 48, -+ CMD_CONNECTION_SCAN_SSID_CFG = 49, -+ CMD_START_PERIODIC_SCAN = 50, -+ CMD_STOP_PERIODIC_SCAN = 51, -+ CMD_SET_STA_STATE = 52, -+ -+ NUM_COMMANDS, -+ MAX_COMMAND_ID = 0xFFFF, -+}; -+ -+#define MAX_CMD_PARAMS 572 -+ -+enum cmd_templ { -+ CMD_TEMPL_NULL_DATA = 0, -+ CMD_TEMPL_BEACON, -+ CMD_TEMPL_CFG_PROBE_REQ_2_4, -+ CMD_TEMPL_CFG_PROBE_REQ_5, -+ CMD_TEMPL_PROBE_RESPONSE, -+ CMD_TEMPL_QOS_NULL_DATA, -+ CMD_TEMPL_PS_POLL, -+ CMD_TEMPL_KLV, -+ CMD_TEMPL_DISCONNECT, -+ CMD_TEMPL_PROBE_REQ_2_4, /* for firmware internal use only */ -+ CMD_TEMPL_PROBE_REQ_5, /* for firmware internal use only */ -+ CMD_TEMPL_BAR, /* for firmware internal use only */ -+ CMD_TEMPL_CTS, /* -+ * For CTS-to-self (FastCTS) mechanism -+ * for BT/WLAN coexistence (SoftGemini). */ -+ CMD_TEMPL_MAX = 0xff -+}; -+ -+/* unit ms */ -+#define WL1271_COMMAND_TIMEOUT 2000 -+#define WL1271_CMD_TEMPL_MAX_SIZE 252 -+ -+struct wl1271_cmd_header { -+ u16 id; -+ u16 status; -+ /* payload */ -+ u8 data[0]; -+} __attribute__ ((packed)); -+ -+#define WL1271_CMD_MAX_PARAMS 572 -+ -+struct wl1271_command { -+ struct wl1271_cmd_header header; -+ u8 parameters[WL1271_CMD_MAX_PARAMS]; -+} __attribute__ ((packed)); -+ -+enum { -+ CMD_MAILBOX_IDLE = 0, -+ CMD_STATUS_SUCCESS = 1, -+ CMD_STATUS_UNKNOWN_CMD = 2, -+ CMD_STATUS_UNKNOWN_IE = 3, -+ CMD_STATUS_REJECT_MEAS_SG_ACTIVE = 11, -+ CMD_STATUS_RX_BUSY = 13, -+ CMD_STATUS_INVALID_PARAM = 14, -+ CMD_STATUS_TEMPLATE_TOO_LARGE = 15, -+ CMD_STATUS_OUT_OF_MEMORY = 16, -+ CMD_STATUS_STA_TABLE_FULL = 17, -+ CMD_STATUS_RADIO_ERROR = 18, -+ CMD_STATUS_WRONG_NESTING = 19, -+ CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/ -+ CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/ -+ MAX_COMMAND_STATUS = 0xff -+}; -+ -+ -+/* -+ * CMD_READ_MEMORY -+ * -+ * The host issues this command to read the WiLink device memory/registers. -+ * -+ * Note: The Base Band address has special handling (16 bits registers and -+ * addresses). For more information, see the hardware specification. -+ */ -+/* -+ * CMD_WRITE_MEMORY -+ * -+ * The host issues this command to write the WiLink device memory/registers. -+ * -+ * The Base Band address has special handling (16 bits registers and -+ * addresses). For more information, see the hardware specification. -+ */ -+#define MAX_READ_SIZE 256 -+ -+struct cmd_read_write_memory { -+ struct wl1271_cmd_header header; -+ -+ /* The address of the memory to read from or write to.*/ -+ u32 addr; -+ -+ /* The amount of data in bytes to read from or write to the WiLink -+ * device.*/ -+ u32 size; -+ -+ /* The actual value read from or written to the Wilink. The source -+ of this field is the Host in WRITE command or the Wilink in READ -+ command. */ -+ u8 value[MAX_READ_SIZE]; -+}; -+ -+#define CMDMBOX_HEADER_LEN 4 -+#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 -+ -+enum { -+ BSS_TYPE_IBSS = 0, -+ BSS_TYPE_STA_BSS = 2, -+ BSS_TYPE_AP_BSS = 3, -+ MAX_BSS_TYPE = 0xFF -+}; -+ -+#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ -+#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1 -+ -+struct wl1271_cmd_join { -+ struct wl1271_cmd_header header; -+ -+ u32 bssid_lsb; -+ u16 bssid_msb; -+ u16 beacon_interval; /* in TBTTs */ -+ u32 rx_config_options; -+ u32 rx_filter_options; -+ -+ /* -+ * The target uses this field to determine the rate at -+ * which to transmit control frame responses (such as -+ * ACK or CTS frames). -+ */ -+ u32 basic_rate_set; -+ u8 dtim_interval; -+ /* -+ * bits 0-2: This bitwise field specifies the type -+ * of BSS to start or join (BSS_TYPE_*). -+ * bit 4: Band - The radio band in which to join -+ * or start. -+ * 0 - 2.4GHz band -+ * 1 - 5GHz band -+ * bits 3, 5-7: Reserved -+ */ -+ u8 bss_type; -+ u8 channel; -+ u8 ssid_len; -+ u8 ssid[IW_ESSID_MAX_SIZE]; -+ u8 ctrl; /* JOIN_CMD_CTRL_* */ -+ u8 reserved[3]; -+} __attribute__ ((packed)); -+ -+struct cmd_enabledisable_path { -+ struct wl1271_cmd_header header; -+ -+ u8 channel; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+struct wl1271_cmd_template_set { -+ struct wl1271_cmd_header header; -+ -+ u16 len; -+ u8 template_type; -+ u8 index; /* relevant only for KLV_TEMPLATE type */ -+ u32 enabled_rates; -+ u8 short_retry_limit; -+ u8 long_retry_limit; -+ u8 aflags; -+ u8 reserved; -+ u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; -+} __attribute__ ((packed)); -+ -+#define TIM_ELE_ID 5 -+#define PARTIAL_VBM_MAX 251 -+ -+struct wl1271_tim { -+ u8 identity; -+ u8 length; -+ u8 dtim_count; -+ u8 dtim_period; -+ u8 bitmap_ctrl; -+ u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ -+} __attribute__ ((packed)); -+ -+enum wl1271_cmd_ps_mode { -+ STATION_ACTIVE_MODE, -+ STATION_POWER_SAVE_MODE -+}; -+ -+struct wl1271_cmd_ps_params { -+ struct wl1271_cmd_header header; -+ -+ u8 ps_mode; /* STATION_* */ -+ u8 send_null_data; /* Do we have to send NULL data packet ? */ -+ u8 retries; /* Number of retires for the initial NULL data packet */ -+ -+ /* -+ * TUs during which the target stays awake after switching -+ * to power save mode. -+ */ -+ u8 hang_over_period; -+ u32 null_data_rate; -+} __attribute__ ((packed)); -+ -+/* HW encryption keys */ -+#define NUM_ACCESS_CATEGORIES_COPY 4 -+#define MAX_KEY_SIZE 32 -+ -+/* When set, disable HW encryption */ -+#define DF_ENCRYPTION_DISABLE 0x01 -+/* When set, disable HW decryption */ -+#define DF_SNIFF_MODE_ENABLE 0x80 -+ -+enum wl1271_cmd_key_action { -+ KEY_ADD_OR_REPLACE = 1, -+ KEY_REMOVE = 2, -+ KEY_SET_ID = 3, -+ MAX_KEY_ACTION = 0xffff, -+}; -+ -+enum wl1271_cmd_key_type { -+ KEY_NONE = 0, -+ KEY_WEP = 1, -+ KEY_TKIP = 2, -+ KEY_AES = 3, -+ KEY_GEM = 4 -+}; -+ -+/* FIXME: Add description for key-types */ -+ -+struct wl1271_cmd_set_keys { -+ struct wl1271_cmd_header header; -+ -+ /* Ignored for default WEP key */ -+ u8 addr[ETH_ALEN]; -+ -+ /* key_action_e */ -+ u16 key_action; -+ -+ u16 reserved_1; -+ -+ /* key size in bytes */ -+ u8 key_size; -+ -+ /* key_type_e */ -+ u8 key_type; -+ u8 ssid_profile; -+ -+ /* -+ * TKIP, AES: frame's key id field. -+ * For WEP default key: key id; -+ */ -+ u8 id; -+ u8 reserved_2[6]; -+ u8 key[MAX_KEY_SIZE]; -+ u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; -+ u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; -+} __attribute__ ((packed)); -+ -+ -+#define WL1271_SCAN_MAX_CHANNELS 24 -+#define WL1271_SCAN_DEFAULT_TAG 1 -+#define WL1271_SCAN_CURRENT_TX_PWR 0 -+#define WL1271_SCAN_OPT_ACTIVE 0 -+#define WL1271_SCAN_OPT_PASSIVE 1 -+#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 -+#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */ -+#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */ -+ -+struct basic_scan_params { -+ u32 rx_config_options; -+ u32 rx_filter_options; -+ /* Scan option flags (WL1271_SCAN_OPT_*) */ -+ u16 scan_options; -+ /* Number of scan channels in the list (maximum 30) */ -+ u8 num_channels; -+ /* This field indicates the number of probe requests to send -+ per channel for an active scan */ -+ u8 num_probe_requests; -+ /* Rate bit field for sending the probes */ -+ u32 tx_rate; -+ u8 tid_trigger; -+ u8 ssid_len; -+ /* in order to align */ -+ u8 padding1[2]; -+ u8 ssid[IW_ESSID_MAX_SIZE]; -+ /* Band to scan */ -+ u8 band; -+ u8 use_ssid_list; -+ u8 scan_tag; -+ u8 padding2; -+} __attribute__ ((packed)); -+ -+struct basic_scan_channel_params { -+ /* Duration in TU to wait for frames on a channel for active scan */ -+ u32 min_duration; -+ u32 max_duration; -+ u32 bssid_lsb; -+ u16 bssid_msb; -+ u8 early_termination; -+ u8 tx_power_att; -+ u8 channel; -+ /* FW internal use only! */ -+ u8 dfs_candidate; -+ u8 activity_detected; -+ u8 pad; -+} __attribute__ ((packed)); -+ -+struct wl1271_cmd_scan { -+ struct wl1271_cmd_header header; -+ -+ struct basic_scan_params params; -+ struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; -+} __attribute__ ((packed)); -+ -+struct wl1271_cmd_trigger_scan_to { -+ struct wl1271_cmd_header header; -+ -+ u32 timeout; -+}; -+ -+struct wl1271_cmd_test_header { -+ u8 id; -+ u8 padding[3]; -+}; -+ -+enum wl1271_channel_tune_bands { -+ WL1271_CHANNEL_TUNE_BAND_2_4, -+ WL1271_CHANNEL_TUNE_BAND_5, -+ WL1271_CHANNEL_TUNE_BAND_4_9 -+}; -+ -+#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 -+ -+#define TEST_CMD_P2G_CAL 0x02 -+#define TEST_CMD_CHANNEL_TUNE 0x0d -+#define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d -+ -+struct wl1271_cmd_cal_channel_tune { -+ struct wl1271_cmd_header header; -+ -+ struct wl1271_cmd_test_header test; -+ -+ u8 band; -+ u8 channel; -+ -+ u16 radio_status; -+} __attribute__ ((packed)); -+ -+struct wl1271_cmd_cal_update_ref_point { -+ struct wl1271_cmd_header header; -+ -+ struct wl1271_cmd_test_header test; -+ -+ s32 ref_power; -+ s32 ref_detector; -+ u8 sub_band; -+ u8 padding[3]; -+} __attribute__ ((packed)); -+ -+#define MAX_TLV_LENGTH 400 -+#define MAX_NVS_VERSION_LENGTH 12 -+ -+#define WL1271_CAL_P2G_BAND_B_G BIT(0) -+ -+struct wl1271_cmd_cal_p2g { -+ struct wl1271_cmd_header header; -+ -+ struct wl1271_cmd_test_header test; -+ -+ u16 len; -+ u8 buf[MAX_TLV_LENGTH]; -+ u8 type; -+ u8 padding; -+ -+ s16 radio_status; -+ u8 nvs_version[MAX_NVS_VERSION_LENGTH]; -+ -+ u8 sub_band_mask; -+ u8 padding2; -+} __attribute__ ((packed)); -+ -+#endif /* __WL1271_CMD_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_debugfs.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_debugfs.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_debugfs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_debugfs.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,518 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1271_debugfs.h" -+ -+#include -+ -+#include "wl1271.h" -+#include "wl1271_acx.h" -+#include "wl1271_ps.h" -+ -+/* ms */ -+#define WL1271_DEBUGFS_STATS_LIFETIME 1000 -+ -+/* debugfs macros idea from mac80211 */ -+ -+#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ -+static ssize_t name## _read(struct file *file, char __user *userbuf, \ -+ size_t count, loff_t *ppos) \ -+{ \ -+ struct wl1271 *wl = file->private_data; \ -+ char buf[buflen]; \ -+ int res; \ -+ \ -+ res = scnprintf(buf, buflen, fmt "\n", ##value); \ -+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ -+} \ -+ \ -+static const struct file_operations name## _ops = { \ -+ .read = name## _read, \ -+ .open = wl1271_open_file_generic, \ -+}; -+ -+#define DEBUGFS_ADD(name, parent) \ -+ wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \ -+ wl, &name## _ops); \ -+ if (IS_ERR(wl->debugfs.name)) { \ -+ ret = PTR_ERR(wl->debugfs.name); \ -+ wl->debugfs.name = NULL; \ -+ goto out; \ -+ } -+ -+#define DEBUGFS_DEL(name) \ -+ do { \ -+ debugfs_remove(wl->debugfs.name); \ -+ wl->debugfs.name = NULL; \ -+ } while (0) -+ -+#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \ -+static ssize_t sub## _ ##name## _read(struct file *file, \ -+ char __user *userbuf, \ -+ size_t count, loff_t *ppos) \ -+{ \ -+ struct wl1271 *wl = file->private_data; \ -+ char buf[buflen]; \ -+ int res; \ -+ \ -+ wl1271_debugfs_update_stats(wl); \ -+ \ -+ res = scnprintf(buf, buflen, fmt "\n", \ -+ wl->stats.fw_stats->sub.name); \ -+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ -+} \ -+ \ -+static const struct file_operations sub## _ ##name## _ops = { \ -+ .read = sub## _ ##name## _read, \ -+ .open = wl1271_open_file_generic, \ -+}; -+ -+#define DEBUGFS_FWSTATS_ADD(sub, name) \ -+ DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics) -+ -+#define DEBUGFS_FWSTATS_DEL(sub, name) \ -+ DEBUGFS_DEL(sub## _ ##name) -+ -+static void wl1271_debugfs_update_stats(struct wl1271 *wl) -+{ -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ -+ if (wl->state == WL1271_STATE_ON && -+ time_after(jiffies, wl->stats.fw_stats_update + -+ msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) { -+ wl1271_acx_statistics(wl, wl->stats.fw_stats); -+ wl->stats.fw_stats_update = jiffies; -+ } -+ -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+static int wl1271_open_file_generic(struct inode *inode, struct file *file) -+{ -+ file->private_data = inode->i_private; -+ return 0; -+} -+ -+DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u"); -+/* skipping wep.reserved */ -+DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u"); -+/* skipping cont_miss_bcns_spread for now */ -+DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u"); -+ -+DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, -+ 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u"); -+DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u"); -+ -+DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count); -+DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u", -+ wl->stats.excessive_retries); -+ -+static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, -+ size_t count, loff_t *ppos) -+{ -+ struct wl1271 *wl = file->private_data; -+ u32 queue_len; -+ char buf[20]; -+ int res; -+ -+ queue_len = skb_queue_len(&wl->tx_queue); -+ -+ res = scnprintf(buf, sizeof(buf), "%u\n", queue_len); -+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); -+} -+ -+static const struct file_operations tx_queue_len_ops = { -+ .read = tx_queue_len_read, -+ .open = wl1271_open_file_generic, -+}; -+ -+static void wl1271_debugfs_delete_files(struct wl1271 *wl) -+{ -+ DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); -+ -+ DEBUGFS_FWSTATS_DEL(rx, out_of_mem); -+ DEBUGFS_FWSTATS_DEL(rx, hdr_overflow); -+ DEBUGFS_FWSTATS_DEL(rx, hw_stuck); -+ DEBUGFS_FWSTATS_DEL(rx, dropped); -+ DEBUGFS_FWSTATS_DEL(rx, fcs_err); -+ DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig); -+ DEBUGFS_FWSTATS_DEL(rx, path_reset); -+ DEBUGFS_FWSTATS_DEL(rx, reset_counter); -+ -+ DEBUGFS_FWSTATS_DEL(dma, rx_requested); -+ DEBUGFS_FWSTATS_DEL(dma, rx_errors); -+ DEBUGFS_FWSTATS_DEL(dma, tx_requested); -+ DEBUGFS_FWSTATS_DEL(dma, tx_errors); -+ -+ DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt); -+ DEBUGFS_FWSTATS_DEL(isr, fiqs); -+ DEBUGFS_FWSTATS_DEL(isr, rx_headers); -+ DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow); -+ DEBUGFS_FWSTATS_DEL(isr, rx_rdys); -+ DEBUGFS_FWSTATS_DEL(isr, irqs); -+ DEBUGFS_FWSTATS_DEL(isr, tx_procs); -+ DEBUGFS_FWSTATS_DEL(isr, decrypt_done); -+ DEBUGFS_FWSTATS_DEL(isr, dma0_done); -+ DEBUGFS_FWSTATS_DEL(isr, dma1_done); -+ DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete); -+ DEBUGFS_FWSTATS_DEL(isr, commands); -+ DEBUGFS_FWSTATS_DEL(isr, rx_procs); -+ DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes); -+ DEBUGFS_FWSTATS_DEL(isr, host_acknowledges); -+ DEBUGFS_FWSTATS_DEL(isr, pci_pm); -+ DEBUGFS_FWSTATS_DEL(isr, wakeups); -+ DEBUGFS_FWSTATS_DEL(isr, low_rssi); -+ -+ DEBUGFS_FWSTATS_DEL(wep, addr_key_count); -+ DEBUGFS_FWSTATS_DEL(wep, default_key_count); -+ /* skipping wep.reserved */ -+ DEBUGFS_FWSTATS_DEL(wep, key_not_found); -+ DEBUGFS_FWSTATS_DEL(wep, decrypt_fail); -+ DEBUGFS_FWSTATS_DEL(wep, packets); -+ DEBUGFS_FWSTATS_DEL(wep, interrupt); -+ -+ DEBUGFS_FWSTATS_DEL(pwr, ps_enter); -+ DEBUGFS_FWSTATS_DEL(pwr, elp_enter); -+ DEBUGFS_FWSTATS_DEL(pwr, missing_bcns); -+ DEBUGFS_FWSTATS_DEL(pwr, wake_on_host); -+ DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp); -+ DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons); -+ DEBUGFS_FWSTATS_DEL(pwr, power_save_off); -+ DEBUGFS_FWSTATS_DEL(pwr, enable_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, disable_ps); -+ DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps); -+ /* skipping cont_miss_bcns_spread for now */ -+ DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons); -+ -+ DEBUGFS_FWSTATS_DEL(mic, rx_pkts); -+ DEBUGFS_FWSTATS_DEL(mic, calc_failure); -+ -+ DEBUGFS_FWSTATS_DEL(aes, encrypt_fail); -+ DEBUGFS_FWSTATS_DEL(aes, decrypt_fail); -+ DEBUGFS_FWSTATS_DEL(aes, encrypt_packets); -+ DEBUGFS_FWSTATS_DEL(aes, decrypt_packets); -+ DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt); -+ DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt); -+ -+ DEBUGFS_FWSTATS_DEL(event, heart_beat); -+ DEBUGFS_FWSTATS_DEL(event, calibration); -+ DEBUGFS_FWSTATS_DEL(event, rx_mismatch); -+ DEBUGFS_FWSTATS_DEL(event, rx_mem_empty); -+ DEBUGFS_FWSTATS_DEL(event, rx_pool); -+ DEBUGFS_FWSTATS_DEL(event, oom_late); -+ DEBUGFS_FWSTATS_DEL(event, phy_transmit_error); -+ DEBUGFS_FWSTATS_DEL(event, tx_stuck); -+ -+ DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn); -+ DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn); -+ DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization); -+ DEBUGFS_FWSTATS_DEL(ps, upsd_utilization); -+ -+ DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop); -+ DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data); -+ -+ DEBUGFS_DEL(tx_queue_len); -+ DEBUGFS_DEL(retry_count); -+ DEBUGFS_DEL(excessive_retries); -+} -+ -+static int wl1271_debugfs_add_files(struct wl1271 *wl) -+{ -+ int ret = 0; -+ -+ DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); -+ -+ DEBUGFS_FWSTATS_ADD(rx, out_of_mem); -+ DEBUGFS_FWSTATS_ADD(rx, hdr_overflow); -+ DEBUGFS_FWSTATS_ADD(rx, hw_stuck); -+ DEBUGFS_FWSTATS_ADD(rx, dropped); -+ DEBUGFS_FWSTATS_ADD(rx, fcs_err); -+ DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig); -+ DEBUGFS_FWSTATS_ADD(rx, path_reset); -+ DEBUGFS_FWSTATS_ADD(rx, reset_counter); -+ -+ DEBUGFS_FWSTATS_ADD(dma, rx_requested); -+ DEBUGFS_FWSTATS_ADD(dma, rx_errors); -+ DEBUGFS_FWSTATS_ADD(dma, tx_requested); -+ DEBUGFS_FWSTATS_ADD(dma, tx_errors); -+ -+ DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt); -+ DEBUGFS_FWSTATS_ADD(isr, fiqs); -+ DEBUGFS_FWSTATS_ADD(isr, rx_headers); -+ DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow); -+ DEBUGFS_FWSTATS_ADD(isr, rx_rdys); -+ DEBUGFS_FWSTATS_ADD(isr, irqs); -+ DEBUGFS_FWSTATS_ADD(isr, tx_procs); -+ DEBUGFS_FWSTATS_ADD(isr, decrypt_done); -+ DEBUGFS_FWSTATS_ADD(isr, dma0_done); -+ DEBUGFS_FWSTATS_ADD(isr, dma1_done); -+ DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete); -+ DEBUGFS_FWSTATS_ADD(isr, commands); -+ DEBUGFS_FWSTATS_ADD(isr, rx_procs); -+ DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes); -+ DEBUGFS_FWSTATS_ADD(isr, host_acknowledges); -+ DEBUGFS_FWSTATS_ADD(isr, pci_pm); -+ DEBUGFS_FWSTATS_ADD(isr, wakeups); -+ DEBUGFS_FWSTATS_ADD(isr, low_rssi); -+ -+ DEBUGFS_FWSTATS_ADD(wep, addr_key_count); -+ DEBUGFS_FWSTATS_ADD(wep, default_key_count); -+ /* skipping wep.reserved */ -+ DEBUGFS_FWSTATS_ADD(wep, key_not_found); -+ DEBUGFS_FWSTATS_ADD(wep, decrypt_fail); -+ DEBUGFS_FWSTATS_ADD(wep, packets); -+ DEBUGFS_FWSTATS_ADD(wep, interrupt); -+ -+ DEBUGFS_FWSTATS_ADD(pwr, ps_enter); -+ DEBUGFS_FWSTATS_ADD(pwr, elp_enter); -+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns); -+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_host); -+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp); -+ DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons); -+ DEBUGFS_FWSTATS_ADD(pwr, power_save_off); -+ DEBUGFS_FWSTATS_ADD(pwr, enable_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, disable_ps); -+ DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps); -+ /* skipping cont_miss_bcns_spread for now */ -+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons); -+ -+ DEBUGFS_FWSTATS_ADD(mic, rx_pkts); -+ DEBUGFS_FWSTATS_ADD(mic, calc_failure); -+ -+ DEBUGFS_FWSTATS_ADD(aes, encrypt_fail); -+ DEBUGFS_FWSTATS_ADD(aes, decrypt_fail); -+ DEBUGFS_FWSTATS_ADD(aes, encrypt_packets); -+ DEBUGFS_FWSTATS_ADD(aes, decrypt_packets); -+ DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt); -+ DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt); -+ -+ DEBUGFS_FWSTATS_ADD(event, heart_beat); -+ DEBUGFS_FWSTATS_ADD(event, calibration); -+ DEBUGFS_FWSTATS_ADD(event, rx_mismatch); -+ DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); -+ DEBUGFS_FWSTATS_ADD(event, rx_pool); -+ DEBUGFS_FWSTATS_ADD(event, oom_late); -+ DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); -+ DEBUGFS_FWSTATS_ADD(event, tx_stuck); -+ -+ DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn); -+ DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn); -+ DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization); -+ DEBUGFS_FWSTATS_ADD(ps, upsd_utilization); -+ -+ DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop); -+ DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); -+ DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); -+ -+ DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir); -+ DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); -+ DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); -+ -+out: -+ if (ret < 0) -+ wl1271_debugfs_delete_files(wl); -+ -+ return ret; -+} -+ -+void wl1271_debugfs_reset(struct wl1271 *wl) -+{ -+ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); -+ wl->stats.retry_count = 0; -+ wl->stats.excessive_retries = 0; -+} -+ -+int wl1271_debugfs_init(struct wl1271 *wl) -+{ -+ int ret; -+ -+ wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); -+ -+ if (IS_ERR(wl->debugfs.rootdir)) { -+ ret = PTR_ERR(wl->debugfs.rootdir); -+ wl->debugfs.rootdir = NULL; -+ goto err; -+ } -+ -+ wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics", -+ wl->debugfs.rootdir); -+ -+ if (IS_ERR(wl->debugfs.fw_statistics)) { -+ ret = PTR_ERR(wl->debugfs.fw_statistics); -+ wl->debugfs.fw_statistics = NULL; -+ goto err_root; -+ } -+ -+ wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), -+ GFP_KERNEL); -+ -+ if (!wl->stats.fw_stats) { -+ ret = -ENOMEM; -+ goto err_fw; -+ } -+ -+ wl->stats.fw_stats_update = jiffies; -+ -+ ret = wl1271_debugfs_add_files(wl); -+ -+ if (ret < 0) -+ goto err_file; -+ -+ return 0; -+ -+err_file: -+ kfree(wl->stats.fw_stats); -+ wl->stats.fw_stats = NULL; -+ -+err_fw: -+ debugfs_remove(wl->debugfs.fw_statistics); -+ wl->debugfs.fw_statistics = NULL; -+ -+err_root: -+ debugfs_remove(wl->debugfs.rootdir); -+ wl->debugfs.rootdir = NULL; -+ -+err: -+ return ret; -+} -+ -+void wl1271_debugfs_exit(struct wl1271 *wl) -+{ -+ wl1271_debugfs_delete_files(wl); -+ -+ kfree(wl->stats.fw_stats); -+ wl->stats.fw_stats = NULL; -+ -+ debugfs_remove(wl->debugfs.fw_statistics); -+ wl->debugfs.fw_statistics = NULL; -+ -+ debugfs_remove(wl->debugfs.rootdir); -+ wl->debugfs.rootdir = NULL; -+ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_debugfs.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_debugfs.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_debugfs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_debugfs.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,33 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef WL1271_DEBUGFS_H -+#define WL1271_DEBUGFS_H -+ -+#include "wl1271.h" -+ -+int wl1271_debugfs_init(struct wl1271 *wl); -+void wl1271_debugfs_exit(struct wl1271 *wl); -+void wl1271_debugfs_reset(struct wl1271 *wl); -+ -+#endif /* WL1271_DEBUGFS_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_event.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_event.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_event.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_event.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,126 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1271.h" -+#include "wl1271_reg.h" -+#include "wl1271_spi.h" -+#include "wl1271_event.h" -+#include "wl1271_ps.h" -+ -+static int wl1271_event_scan_complete(struct wl1271 *wl, -+ struct event_mailbox *mbox) -+{ -+ wl1271_debug(DEBUG_EVENT, "status: 0x%x", -+ mbox->scheduled_scan_status); -+ -+ if (wl->scanning) { -+ mutex_unlock(&wl->mutex); -+ ieee80211_scan_completed(wl->hw); -+ mutex_lock(&wl->mutex); -+ wl->scanning = false; -+ } -+ -+ return 0; -+} -+ -+static void wl1271_event_mbox_dump(struct event_mailbox *mbox) -+{ -+ wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); -+ wl1271_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector); -+ wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); -+} -+ -+static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) -+{ -+ int ret; -+ u32 vector; -+ -+ wl1271_event_mbox_dump(mbox); -+ -+ vector = mbox->events_vector & ~(mbox->events_mask); -+ wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); -+ -+ if (vector & SCAN_COMPLETE_EVENT_ID) { -+ ret = wl1271_event_scan_complete(wl, mbox); -+ if (ret < 0) -+ return ret; -+ } -+ -+ if (vector & BSS_LOSE_EVENT_ID) { -+ wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); -+ -+ if (wl->psm_requested && wl->psm) { -+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); -+ if (ret < 0) -+ return ret; -+ } -+ } -+ -+ return 0; -+} -+ -+int wl1271_event_unmask(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask)); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+void wl1271_event_mbox_config(struct wl1271 *wl) -+{ -+ wl->mbox_ptr[0] = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); -+ wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); -+ -+ wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", -+ wl->mbox_ptr[0], wl->mbox_ptr[1]); -+} -+ -+int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) -+{ -+ struct event_mailbox mbox; -+ int ret; -+ -+ wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); -+ -+ if (mbox_num > 1) -+ return -EINVAL; -+ -+ /* first we read the mbox descriptor */ -+ wl1271_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, -+ sizeof(struct event_mailbox)); -+ -+ /* process the descriptor */ -+ ret = wl1271_event_process(wl, &mbox); -+ if (ret < 0) -+ return ret; -+ -+ /* then we let the firmware know it can go on...*/ -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); -+ -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_event.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_event.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_event.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_event.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,110 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_EVENT_H__ -+#define __WL1271_EVENT_H__ -+ -+/* -+ * Mbox events -+ * -+ * The event mechanism is based on a pair of event buffers (buffers A and -+ * B) at fixed locations in the target's memory. The host processes one -+ * buffer while the other buffer continues to collect events. If the host -+ * is not processing events, an interrupt is issued to signal that a buffer -+ * is ready. Once the host is done with processing events from one buffer, -+ * it signals the target (with an ACK interrupt) that the event buffer is -+ * free. -+ */ -+ -+enum { -+ MEASUREMENT_START_EVENT_ID = BIT(8), -+ MEASUREMENT_COMPLETE_EVENT_ID = BIT(9), -+ SCAN_COMPLETE_EVENT_ID = BIT(10), -+ SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(11), -+ AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12), -+ PS_REPORT_EVENT_ID = BIT(13), -+ PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14), -+ DISCONNECT_EVENT_COMPLETE_ID = BIT(15), -+ JOIN_EVENT_COMPLETE_ID = BIT(16), -+ CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17), -+ BSS_LOSE_EVENT_ID = BIT(18), -+ REGAINED_BSS_EVENT_ID = BIT(19), -+ ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20), -+ SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), -+ SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23), -+ SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), -+ PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25), -+ DBG_EVENT_ID = BIT(26), -+ HEALTH_CHECK_REPLY_EVENT_ID = BIT(27), -+ PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28), -+ PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29), -+ BA_SESSION_TEAR_DOWN_EVENT_ID = BIT(30), -+ EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, -+}; -+ -+struct event_debug_report { -+ u8 debug_event_id; -+ u8 num_params; -+ u16 pad; -+ u32 report_1; -+ u32 report_2; -+ u32 report_3; -+} __attribute__ ((packed)); -+ -+#define NUM_OF_RSSI_SNR_TRIGGERS 8 -+ -+struct event_mailbox { -+ u32 events_vector; -+ u32 events_mask; -+ u32 reserved_1; -+ u32 reserved_2; -+ -+ u8 dbg_event_id; -+ u8 num_relevant_params; -+ u16 reserved_3; -+ u32 event_report_p1; -+ u32 event_report_p2; -+ u32 event_report_p3; -+ -+ u8 number_of_scan_results; -+ u8 scan_tag; -+ u8 reserved_4[2]; -+ u32 compl_scheduled_scan_status; -+ -+ u16 scheduled_scan_attended_channels; -+ u8 soft_gemini_sense_info; -+ u8 soft_gemini_protective_info; -+ s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; -+ u8 channel_switch_status; -+ u8 scheduled_scan_status; -+ u8 ps_status; -+ -+ u8 reserved_5[29]; -+} __attribute__ ((packed)); -+ -+int wl1271_event_unmask(struct wl1271 *wl); -+void wl1271_event_mbox_config(struct wl1271 *wl); -+int wl1271_event_handle(struct wl1271 *wl, u8 mbox); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271.h 2011-06-22 13:19:32.933063273 +0200 -@@ -0,0 +1,409 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008-2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_H__ -+#define __WL1271_H__ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRIVER_NAME "wl1271" -+#define DRIVER_PREFIX DRIVER_NAME ": " -+ -+enum { -+ DEBUG_NONE = 0, -+ DEBUG_IRQ = BIT(0), -+ DEBUG_SPI = BIT(1), -+ DEBUG_BOOT = BIT(2), -+ DEBUG_MAILBOX = BIT(3), -+ DEBUG_NETLINK = BIT(4), -+ DEBUG_EVENT = BIT(5), -+ DEBUG_TX = BIT(6), -+ DEBUG_RX = BIT(7), -+ DEBUG_SCAN = BIT(8), -+ DEBUG_CRYPT = BIT(9), -+ DEBUG_PSM = BIT(10), -+ DEBUG_MAC80211 = BIT(11), -+ DEBUG_CMD = BIT(12), -+ DEBUG_ACX = BIT(13), -+ DEBUG_ALL = ~0, -+}; -+ -+#define DEBUG_LEVEL (DEBUG_NONE) -+ -+#define DEBUG_DUMP_LIMIT 1024 -+ -+#define wl1271_error(fmt, arg...) \ -+ printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg) -+ -+#define wl1271_warning(fmt, arg...) \ -+ printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg) -+ -+#define wl1271_notice(fmt, arg...) \ -+ printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg) -+ -+#define wl1271_info(fmt, arg...) \ -+ printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg) -+ -+#define wl1271_debug(level, fmt, arg...) \ -+ do { \ -+ if (level & DEBUG_LEVEL) \ -+ printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \ -+ } while (0) -+ -+#define wl1271_dump(level, prefix, buf, len) \ -+ do { \ -+ if (level & DEBUG_LEVEL) \ -+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ -+ DUMP_PREFIX_OFFSET, 16, 1, \ -+ buf, \ -+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \ -+ 0); \ -+ } while (0) -+ -+#define wl1271_dump_ascii(level, prefix, buf, len) \ -+ do { \ -+ if (level & DEBUG_LEVEL) \ -+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ -+ DUMP_PREFIX_OFFSET, 16, 1, \ -+ buf, \ -+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \ -+ true); \ -+ } while (0) -+ -+#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ -+ CFG_BSSID_FILTER_EN) -+ -+#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ -+ CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ -+ CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ -+ CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) -+ -+#define WL1271_FW_NAME "wl1271-fw.bin" -+#define WL1271_NVS_NAME "wl1271-nvs.bin" -+ -+#define WL1271_BUSY_WORD_LEN 8 -+ -+#define WL1271_ELP_HW_STATE_ASLEEP 0 -+#define WL1271_ELP_HW_STATE_IRQ 1 -+ -+enum wl1271_state { -+ WL1271_STATE_OFF, -+ WL1271_STATE_ON, -+ WL1271_STATE_PLT, -+}; -+ -+enum wl1271_partition_type { -+ PART_DOWN, -+ PART_WORK, -+ PART_DRPW, -+ -+ PART_TABLE_LEN -+}; -+ -+struct wl1271_partition { -+ u32 size; -+ u32 start; -+}; -+ -+struct wl1271_partition_set { -+ struct wl1271_partition mem; -+ struct wl1271_partition reg; -+}; -+ -+struct wl1271; -+ -+/* FIXME: I'm not sure about this structure name */ -+struct wl1271_chip { -+ u32 id; -+ char fw_ver[21]; -+}; -+ -+struct wl1271_stats { -+ struct acx_statistics *fw_stats; -+ unsigned long fw_stats_update; -+ -+ unsigned int retry_count; -+ unsigned int excessive_retries; -+}; -+ -+struct wl1271_debugfs { -+ struct dentry *rootdir; -+ struct dentry *fw_statistics; -+ -+ struct dentry *tx_internal_desc_overflow; -+ -+ struct dentry *rx_out_of_mem; -+ struct dentry *rx_hdr_overflow; -+ struct dentry *rx_hw_stuck; -+ struct dentry *rx_dropped; -+ struct dentry *rx_fcs_err; -+ struct dentry *rx_xfr_hint_trig; -+ struct dentry *rx_path_reset; -+ struct dentry *rx_reset_counter; -+ -+ struct dentry *dma_rx_requested; -+ struct dentry *dma_rx_errors; -+ struct dentry *dma_tx_requested; -+ struct dentry *dma_tx_errors; -+ -+ struct dentry *isr_cmd_cmplt; -+ struct dentry *isr_fiqs; -+ struct dentry *isr_rx_headers; -+ struct dentry *isr_rx_mem_overflow; -+ struct dentry *isr_rx_rdys; -+ struct dentry *isr_irqs; -+ struct dentry *isr_tx_procs; -+ struct dentry *isr_decrypt_done; -+ struct dentry *isr_dma0_done; -+ struct dentry *isr_dma1_done; -+ struct dentry *isr_tx_exch_complete; -+ struct dentry *isr_commands; -+ struct dentry *isr_rx_procs; -+ struct dentry *isr_hw_pm_mode_changes; -+ struct dentry *isr_host_acknowledges; -+ struct dentry *isr_pci_pm; -+ struct dentry *isr_wakeups; -+ struct dentry *isr_low_rssi; -+ -+ struct dentry *wep_addr_key_count; -+ struct dentry *wep_default_key_count; -+ /* skipping wep.reserved */ -+ struct dentry *wep_key_not_found; -+ struct dentry *wep_decrypt_fail; -+ struct dentry *wep_packets; -+ struct dentry *wep_interrupt; -+ -+ struct dentry *pwr_ps_enter; -+ struct dentry *pwr_elp_enter; -+ struct dentry *pwr_missing_bcns; -+ struct dentry *pwr_wake_on_host; -+ struct dentry *pwr_wake_on_timer_exp; -+ struct dentry *pwr_tx_with_ps; -+ struct dentry *pwr_tx_without_ps; -+ struct dentry *pwr_rcvd_beacons; -+ struct dentry *pwr_power_save_off; -+ struct dentry *pwr_enable_ps; -+ struct dentry *pwr_disable_ps; -+ struct dentry *pwr_fix_tsf_ps; -+ /* skipping cont_miss_bcns_spread for now */ -+ struct dentry *pwr_rcvd_awake_beacons; -+ -+ struct dentry *mic_rx_pkts; -+ struct dentry *mic_calc_failure; -+ -+ struct dentry *aes_encrypt_fail; -+ struct dentry *aes_decrypt_fail; -+ struct dentry *aes_encrypt_packets; -+ struct dentry *aes_decrypt_packets; -+ struct dentry *aes_encrypt_interrupt; -+ struct dentry *aes_decrypt_interrupt; -+ -+ struct dentry *event_heart_beat; -+ struct dentry *event_calibration; -+ struct dentry *event_rx_mismatch; -+ struct dentry *event_rx_mem_empty; -+ struct dentry *event_rx_pool; -+ struct dentry *event_oom_late; -+ struct dentry *event_phy_transmit_error; -+ struct dentry *event_tx_stuck; -+ -+ struct dentry *ps_pspoll_timeouts; -+ struct dentry *ps_upsd_timeouts; -+ struct dentry *ps_upsd_max_sptime; -+ struct dentry *ps_upsd_max_apturn; -+ struct dentry *ps_pspoll_max_apturn; -+ struct dentry *ps_pspoll_utilization; -+ struct dentry *ps_upsd_utilization; -+ -+ struct dentry *rxpipe_rx_prep_beacon_drop; -+ struct dentry *rxpipe_descr_host_int_trig_rx_data; -+ struct dentry *rxpipe_beacon_buffer_thres_host_int_trig_rx_data; -+ struct dentry *rxpipe_missed_beacon_host_int_trig_rx_data; -+ struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data; -+ -+ struct dentry *tx_queue_len; -+ -+ struct dentry *retry_count; -+ struct dentry *excessive_retries; -+}; -+ -+#define NUM_TX_QUEUES 4 -+#define NUM_RX_PKT_DESC 8 -+ -+/* FW status registers */ -+struct wl1271_fw_status { -+ u32 intr; -+ u8 fw_rx_counter; -+ u8 drv_rx_counter; -+ u8 reserved; -+ u8 tx_results_counter; -+ u32 rx_pkt_descs[NUM_RX_PKT_DESC]; -+ u32 tx_released_blks[NUM_TX_QUEUES]; -+ u32 fw_localtime; -+ u32 padding[2]; -+} __attribute__ ((packed)); -+ -+struct wl1271_rx_mem_pool_addr { -+ u32 addr; -+ u32 addr_extra; -+}; -+ -+struct wl1271 { -+ struct ieee80211_hw *hw; -+ bool mac80211_registered; -+ -+ struct spi_device *spi; -+ -+ void (*set_power)(bool enable); -+ int irq; -+ -+ spinlock_t wl_lock; -+ -+ enum wl1271_state state; -+ struct mutex mutex; -+ -+ int physical_mem_addr; -+ int physical_reg_addr; -+ int virtual_mem_addr; -+ int virtual_reg_addr; -+ -+ struct wl1271_chip chip; -+ -+ int cmd_box_addr; -+ int event_box_addr; -+ -+ u8 *fw; -+ size_t fw_len; -+ u8 *nvs; -+ size_t nvs_len; -+ -+ u8 bssid[ETH_ALEN]; -+ u8 mac_addr[ETH_ALEN]; -+ u8 bss_type; -+ u8 ssid[IW_ESSID_MAX_SIZE + 1]; -+ u8 ssid_len; -+ u8 listen_int; -+ int channel; -+ -+ struct wl1271_acx_mem_map *target_mem_map; -+ -+ /* Accounting for allocated / available TX blocks on HW */ -+ u32 tx_blocks_freed[NUM_TX_QUEUES]; -+ u32 tx_blocks_available; -+ u8 tx_results_count; -+ -+ /* Transmitted TX packets counter for chipset interface */ -+ int tx_packets_count; -+ -+ /* Time-offset between host and chipset clocks */ -+ int time_offset; -+ -+ /* Session counter for the chipset */ -+ int session_counter; -+ -+ /* Frames scheduled for transmission, not handled yet */ -+ struct sk_buff_head tx_queue; -+ bool tx_queue_stopped; -+ -+ struct work_struct tx_work; -+ struct work_struct filter_work; -+ -+ /* Pending TX frames */ -+ struct sk_buff *tx_frames[16]; -+ -+ /* FW Rx counter */ -+ u32 rx_counter; -+ -+ /* Rx memory pool address */ -+ struct wl1271_rx_mem_pool_addr rx_mem_pool_addr; -+ -+ /* The target interrupt mask */ -+ struct work_struct irq_work; -+ -+ /* The mbox event mask */ -+ u32 event_mask; -+ -+ /* Mailbox pointers */ -+ u32 mbox_ptr[2]; -+ -+ /* Are we currently scanning */ -+ bool scanning; -+ -+ /* Our association ID */ -+ u16 aid; -+ -+ /* Default key (for WEP) */ -+ u32 default_key; -+ -+ unsigned int rx_config; -+ unsigned int rx_filter; -+ -+ /* is firmware in elp mode */ -+ bool elp; -+ -+ struct completion *elp_compl; -+ -+ /* we can be in psm, but not in elp, we have to differentiate */ -+ bool psm; -+ -+ /* PSM mode requested */ -+ bool psm_requested; -+ -+ /* in dBm */ -+ int power_level; -+ -+ struct wl1271_stats stats; -+ struct wl1271_debugfs debugfs; -+ -+ u32 buffer_32; -+ u32 buffer_cmd; -+ u8 buffer_busyword[WL1271_BUSY_WORD_LEN]; -+ struct wl1271_rx_descriptor *rx_descriptor; -+ -+ struct wl1271_fw_status *fw_status; -+ struct wl1271_tx_hw_res_if *tx_res_if; -+}; -+ -+int wl1271_plt_start(struct wl1271 *wl); -+int wl1271_plt_stop(struct wl1271 *wl); -+ -+#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ -+ -+#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */ -+ -+#define WL1271_DEFAULT_POWER_LEVEL 0 -+ -+#define WL1271_TX_QUEUE_MAX_LENGTH 20 -+ -+/* WL1271 needs a 200ms sleep after power on */ -+#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_init.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_init.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_init.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_init.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,397 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+ -+#include "wl1271_init.h" -+#include "wl12xx_80211.h" -+#include "wl1271_acx.h" -+#include "wl1271_cmd.h" -+#include "wl1271_reg.h" -+ -+static int wl1271_init_hwenc_config(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_feature_cfg(wl); -+ if (ret < 0) { -+ wl1271_warning("couldn't set feature config"); -+ return ret; -+ } -+ -+ ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key); -+ if (ret < 0) { -+ wl1271_warning("couldn't set default key"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int wl1271_init_templates_config(struct wl1271 *wl) -+{ -+ int ret; -+ -+ /* send empty templates for fw memory reservation */ -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, -+ sizeof(struct wl12xx_probe_req_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, -+ sizeof(struct wl12xx_null_data_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL, -+ sizeof(struct wl12xx_ps_poll_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, -+ sizeof -+ (struct wl12xx_qos_null_data_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL, -+ sizeof -+ (struct wl12xx_probe_resp_template)); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL, -+ sizeof -+ (struct wl12xx_beacon_template)); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter) -+{ -+ int ret; -+ -+ ret = wl1271_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_acx_rx_config(wl, config, filter); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static int wl1271_init_phy_config(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_pd_threshold(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_acx_group_address_tbl(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_acx_service_period_timeout(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static int wl1271_init_beacon_filter(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_beacon_filter_opt(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_acx_beacon_filter_table(wl); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static int wl1271_init_pta(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_sg_enable(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_acx_sg_cfg(wl); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static int wl1271_init_energy_detection(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_cca_threshold(wl); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static int wl1271_init_beacon_broadcast(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_bcn_dtim_options(wl); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static int wl1271_init_general_parms(struct wl1271 *wl) -+{ -+ struct wl1271_general_parms *gen_parms; -+ int ret; -+ -+ gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); -+ if (!gen_parms) -+ return -ENOMEM; -+ -+ gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM; -+ -+ gen_parms->ref_clk = REF_CLK_38_4_E; -+ /* FIXME: magic numbers */ -+ gen_parms->settling_time = 5; -+ gen_parms->clk_valid_on_wakeup = 0; -+ gen_parms->dc2dcmode = 0; -+ gen_parms->single_dual_band = 0; -+ gen_parms->tx_bip_fem_autodetect = 1; -+ gen_parms->tx_bip_fem_manufacturer = 1; -+ gen_parms->settings = 1; -+ -+ ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); -+ if (ret < 0) { -+ wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); -+ return ret; -+ } -+ -+ kfree(gen_parms); -+ return 0; -+} -+ -+static int wl1271_init_radio_parms(struct wl1271 *wl) -+{ -+ /* -+ * FIXME: All these magic numbers should be moved to some place where -+ * they can be configured (separate file?) -+ */ -+ -+ struct wl1271_radio_parms *radio_parms; -+ int ret; -+ u8 compensation[] = { 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 0xfc, 0x00, -+ 0x08, 0x10, 0xf0, 0xf8, 0x00, 0x0a, 0x14 }; -+ -+ u8 tx_rate_limits_normal[] = { 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 }; -+ u8 tx_rate_limits_degraded[] = { 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 }; -+ -+ u8 tx_channel_limits_11b[] = { 0x22, 0x50, 0x50, 0x50, -+ 0x50, 0x50, 0x50, 0x50, -+ 0x50, 0x50, 0x22, 0x50, -+ 0x22, 0x50 }; -+ -+ u8 tx_channel_limits_ofdm[] = { 0x20, 0x50, 0x50, 0x50, -+ 0x50, 0x50, 0x50, 0x50, -+ 0x50, 0x50, 0x20, 0x50, -+ 0x20, 0x50 }; -+ -+ u8 tx_pdv_rate_offsets[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; -+ -+ u8 tx_ibias[] = { 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 }; -+ -+ radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); -+ if (!radio_parms) -+ return -ENOMEM; -+ -+ radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM; -+ -+ /* Static radio parameters */ -+ radio_parms->rx_trace_loss = 10; -+ radio_parms->tx_trace_loss = 10; -+ memcpy(radio_parms->rx_rssi_and_proc_compens, compensation, -+ sizeof(compensation)); -+ -+ /* We don't set the 5GHz -- N/A */ -+ -+ /* Dynamic radio parameters */ -+ radio_parms->tx_ref_pd_voltage = cpu_to_le16(0x24e); -+ radio_parms->tx_ref_power = 0x78; -+ radio_parms->tx_offset_db = 0x0; -+ -+ memcpy(radio_parms->tx_rate_limits_normal, tx_rate_limits_normal, -+ sizeof(tx_rate_limits_normal)); -+ memcpy(radio_parms->tx_rate_limits_degraded, tx_rate_limits_degraded, -+ sizeof(tx_rate_limits_degraded)); -+ -+ memcpy(radio_parms->tx_channel_limits_11b, tx_channel_limits_11b, -+ sizeof(tx_channel_limits_11b)); -+ memcpy(radio_parms->tx_channel_limits_ofdm, tx_channel_limits_ofdm, -+ sizeof(tx_channel_limits_ofdm)); -+ memcpy(radio_parms->tx_pdv_rate_offsets, tx_pdv_rate_offsets, -+ sizeof(tx_pdv_rate_offsets)); -+ memcpy(radio_parms->tx_ibias, tx_ibias, -+ sizeof(tx_ibias)); -+ -+ radio_parms->rx_fem_insertion_loss = 0x14; -+ -+ ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); -+ if (ret < 0) -+ wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed"); -+ -+ kfree(radio_parms); -+ return ret; -+} -+ -+int wl1271_hw_init(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_init_general_parms(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_init_radio_parms(wl); -+ if (ret < 0) -+ return ret; -+ -+ /* Template settings */ -+ ret = wl1271_init_templates_config(wl); -+ if (ret < 0) -+ return ret; -+ -+ /* Default memory configuration */ -+ ret = wl1271_acx_init_mem_config(wl); -+ if (ret < 0) -+ return ret; -+ -+ /* RX config */ -+ ret = wl1271_init_rx_config(wl, -+ RX_CFG_PROMISCUOUS | RX_CFG_TSF, -+ RX_FILTER_OPTION_DEF); -+ /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, -+ RX_FILTER_OPTION_FILTER_ALL); */ -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* PHY layer config */ -+ ret = wl1271_init_phy_config(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Beacon filtering */ -+ ret = wl1271_init_beacon_filter(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Configure TX patch complete interrupt behavior */ -+ ret = wl1271_acx_tx_config_options(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* RX complete interrupt pacing */ -+ ret = wl1271_acx_init_rx_interrupt(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Bluetooth WLAN coexistence */ -+ ret = wl1271_init_pta(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Energy detection */ -+ ret = wl1271_init_energy_detection(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Beacons and boradcast settings */ -+ ret = wl1271_init_beacon_broadcast(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Default fragmentation threshold */ -+ ret = wl1271_acx_frag_threshold(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Default TID configuration */ -+ ret = wl1271_acx_tid_cfg(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Default AC configuration */ -+ ret = wl1271_acx_ac_cfg(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Configure TX rate classes */ -+ ret = wl1271_acx_rate_policies(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Enable data path */ -+ ret = wl1271_cmd_data_path(wl, wl->channel, 1); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Configure for ELP power saving */ -+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ /* Configure HW encryption */ -+ ret = wl1271_init_hwenc_config(wl); -+ if (ret < 0) -+ goto out_free_memmap; -+ -+ return 0; -+ -+ out_free_memmap: -+ kfree(wl->target_mem_map); -+ -+ return ret; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_init.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_init.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_init.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_init.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,115 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_INIT_H__ -+#define __WL1271_INIT_H__ -+ -+#include "wl1271.h" -+ -+int wl1271_hw_init_power_auth(struct wl1271 *wl); -+int wl1271_hw_init(struct wl1271 *wl); -+ -+/* These are not really a TEST_CMD, but the ref driver uses them as such */ -+#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19 -+#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E -+ -+struct wl1271_general_parms { -+ u8 id; -+ u8 padding[3]; -+ -+ u8 ref_clk; -+ u8 settling_time; -+ u8 clk_valid_on_wakeup; -+ u8 dc2dcmode; -+ u8 single_dual_band; -+ -+ u8 tx_bip_fem_autodetect; -+ u8 tx_bip_fem_manufacturer; -+ u8 settings; -+} __attribute__ ((packed)); -+ -+enum ref_clk_enum { -+ REF_CLK_19_2_E, -+ REF_CLK_26_E, -+ REF_CLK_38_4_E, -+ REF_CLK_52_E -+}; -+ -+#define RSSI_AND_PROCESS_COMPENSATION_SIZE 15 -+#define NUMBER_OF_SUB_BANDS_5 7 -+#define NUMBER_OF_RATE_GROUPS 6 -+#define NUMBER_OF_CHANNELS_2_4 14 -+#define NUMBER_OF_CHANNELS_5 35 -+ -+struct wl1271_radio_parms { -+ u8 id; -+ u8 padding[3]; -+ -+ /* Static radio parameters */ -+ /* 2.4GHz */ -+ u8 rx_trace_loss; -+ u8 tx_trace_loss; -+ s8 rx_rssi_and_proc_compens[RSSI_AND_PROCESS_COMPENSATION_SIZE]; -+ -+ /* 5GHz */ -+ u8 rx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; -+ u8 tx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; -+ s8 rx_rssi_and_proc_compens_5[RSSI_AND_PROCESS_COMPENSATION_SIZE]; -+ -+ /* Dynamic radio parameters */ -+ /* 2.4GHz */ -+ s16 tx_ref_pd_voltage; -+ s8 tx_ref_power; -+ s8 tx_offset_db; -+ -+ s8 tx_rate_limits_normal[NUMBER_OF_RATE_GROUPS]; -+ s8 tx_rate_limits_degraded[NUMBER_OF_RATE_GROUPS]; -+ -+ s8 tx_channel_limits_11b[NUMBER_OF_CHANNELS_2_4]; -+ s8 tx_channel_limits_ofdm[NUMBER_OF_CHANNELS_2_4]; -+ s8 tx_pdv_rate_offsets[NUMBER_OF_RATE_GROUPS]; -+ -+ u8 tx_ibias[NUMBER_OF_RATE_GROUPS]; -+ u8 rx_fem_insertion_loss; -+ -+ u8 padding2; -+ -+ /* 5GHz */ -+ s16 tx_ref_pd_voltage_5[NUMBER_OF_SUB_BANDS_5]; -+ s8 tx_ref_power_5[NUMBER_OF_SUB_BANDS_5]; -+ s8 tx_offset_db_5[NUMBER_OF_SUB_BANDS_5]; -+ -+ s8 tx_rate_limits_normal_5[NUMBER_OF_RATE_GROUPS]; -+ s8 tx_rate_limits_degraded_5[NUMBER_OF_RATE_GROUPS]; -+ -+ s8 tx_channel_limits_ofdm_5[NUMBER_OF_CHANNELS_5]; -+ s8 tx_pdv_rate_offsets_5[NUMBER_OF_RATE_GROUPS]; -+ -+ /* FIXME: this is inconsistent with the types for 2.4GHz */ -+ s8 tx_ibias_5[NUMBER_OF_RATE_GROUPS]; -+ s8 rx_fem_insertion_loss_5[NUMBER_OF_SUB_BANDS_5]; -+ -+ u8 padding3[2]; -+} __attribute__ ((packed)); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_main.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_main.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_main.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_main.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,1390 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2008-2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "wl1271.h" -+#include "wl12xx_80211.h" -+#include "wl1271_reg.h" -+#include "wl1271_spi.h" -+#include "wl1271_event.h" -+#include "wl1271_tx.h" -+#include "wl1271_rx.h" -+#include "wl1271_ps.h" -+#include "wl1271_init.h" -+#include "wl1271_netlink.h" -+#include "wl1271_debugfs.h" -+#include "wl1271_cmd.h" -+#include "wl1271_boot.h" -+ -+static int wl1271_plt_init(struct wl1271 *wl) -+{ -+ int ret; -+ -+ ret = wl1271_acx_init_mem_config(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_data_path(wl, wl->channel, 1); -+ if (ret < 0) -+ return ret; -+ -+ return 0; -+} -+ -+static void wl1271_disable_interrupts(struct wl1271 *wl) -+{ -+ disable_irq(wl->irq); -+} -+ -+static void wl1271_power_off(struct wl1271 *wl) -+{ -+ wl->set_power(false); -+} -+ -+static void wl1271_power_on(struct wl1271 *wl) -+{ -+ wl->set_power(true); -+} -+ -+static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status) -+{ -+ u32 total = 0; -+ int i; -+ -+ /* -+ * FIXME: Reading the FW status directly from the registers seems to -+ * be the right thing to do, but it doesn't work. And in the -+ * reference driver, there is a workaround called -+ * USE_SDIO_24M_WORKAROUND, which reads the status from memory -+ * instead, so we do the same here. -+ */ -+ -+ wl1271_spi_mem_read(wl, STATUS_MEM_ADDRESS, status, sizeof(*status)); -+ -+ wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " -+ "drv_rx_counter = %d, tx_results_counter = %d)", -+ status->intr, -+ status->fw_rx_counter, -+ status->drv_rx_counter, -+ status->tx_results_counter); -+ -+ /* update number of available TX blocks */ -+ for (i = 0; i < NUM_TX_QUEUES; i++) { -+ u32 cnt = status->tx_released_blks[i] - wl->tx_blocks_freed[i]; -+ wl->tx_blocks_freed[i] = status->tx_released_blks[i]; -+ wl->tx_blocks_available += cnt; -+ total += cnt; -+ } -+ -+ /* if more blocks are available now, schedule some tx work */ -+ if (total && !skb_queue_empty(&wl->tx_queue)) -+ schedule_work(&wl->tx_work); -+ -+ /* update the host-chipset time offset */ -+ wl->time_offset = jiffies_to_usecs(jiffies) - status->fw_localtime; -+} -+ -+#define WL1271_IRQ_MAX_LOOPS 10 -+static void wl1271_irq_work(struct work_struct *work) -+{ -+ u32 intr, ctr = WL1271_IRQ_MAX_LOOPS; -+ int ret; -+ struct wl1271 *wl = -+ container_of(work, struct wl1271, irq_work); -+ -+ mutex_lock(&wl->mutex); -+ -+ wl1271_debug(DEBUG_IRQ, "IRQ work"); -+ -+ if (wl->state == WL1271_STATE_OFF) -+ goto out; -+ -+ ret = wl1271_ps_elp_wakeup(wl, true); -+ if (ret < 0) -+ goto out; -+ -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); -+ -+ intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); -+ if (!intr) { -+ wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); -+ goto out_sleep; -+ } -+ -+ intr &= WL1271_INTR_MASK; -+ -+ do { -+ wl1271_fw_status(wl, wl->fw_status); -+ -+ -+ if (intr & (WL1271_ACX_INTR_EVENT_A | -+ WL1271_ACX_INTR_EVENT_B)) { -+ wl1271_debug(DEBUG_IRQ, -+ "WL1271_ACX_INTR_EVENT (0x%x)", intr); -+ if (intr & WL1271_ACX_INTR_EVENT_A) -+ wl1271_event_handle(wl, 0); -+ else -+ wl1271_event_handle(wl, 1); -+ } -+ -+ if (intr & WL1271_ACX_INTR_INIT_COMPLETE) -+ wl1271_debug(DEBUG_IRQ, -+ "WL1271_ACX_INTR_INIT_COMPLETE"); -+ -+ if (intr & WL1271_ACX_INTR_HW_AVAILABLE) -+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); -+ -+ if (intr & WL1271_ACX_INTR_DATA) { -+ u8 tx_res_cnt = wl->fw_status->tx_results_counter - -+ wl->tx_results_count; -+ -+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); -+ -+ /* check for tx results */ -+ if (tx_res_cnt) -+ wl1271_tx_complete(wl, tx_res_cnt); -+ -+ wl1271_rx(wl, wl->fw_status); -+ } -+ -+ intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); -+ intr &= WL1271_INTR_MASK; -+ } while (intr && --ctr); -+ -+out_sleep: -+ wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(WL1271_INTR_MASK)); -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+static irqreturn_t wl1271_irq(int irq, void *cookie) -+{ -+ struct wl1271 *wl; -+ unsigned long flags; -+ -+ wl1271_debug(DEBUG_IRQ, "IRQ"); -+ -+ wl = cookie; -+ -+ /* complete the ELP completion */ -+ spin_lock_irqsave(&wl->wl_lock, flags); -+ if (wl->elp_compl) { -+ complete(wl->elp_compl); -+ wl->elp_compl = NULL; -+ } -+ -+ schedule_work(&wl->irq_work); -+ spin_unlock_irqrestore(&wl->wl_lock, flags); -+ -+ return IRQ_HANDLED; -+} -+ -+static int wl1271_fetch_firmware(struct wl1271 *wl) -+{ -+ const struct firmware *fw; -+ int ret; -+ -+ ret = request_firmware(&fw, WL1271_FW_NAME, &wl->spi->dev); -+ -+ if (ret < 0) { -+ wl1271_error("could not get firmware: %d", ret); -+ return ret; -+ } -+ -+ if (fw->size % 4) { -+ wl1271_error("firmware size is not multiple of 32 bits: %zu", -+ fw->size); -+ ret = -EILSEQ; -+ goto out; -+ } -+ -+ wl->fw_len = fw->size; -+ wl->fw = kmalloc(wl->fw_len, GFP_KERNEL); -+ -+ if (!wl->fw) { -+ wl1271_error("could not allocate memory for the firmware"); -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ memcpy(wl->fw, fw->data, wl->fw_len); -+ -+ ret = 0; -+ -+out: -+ release_firmware(fw); -+ -+ return ret; -+} -+ -+static int wl1271_fetch_nvs(struct wl1271 *wl) -+{ -+ const struct firmware *fw; -+ int ret; -+ -+ ret = request_firmware(&fw, WL1271_NVS_NAME, &wl->spi->dev); -+ -+ if (ret < 0) { -+ wl1271_error("could not get nvs file: %d", ret); -+ return ret; -+ } -+ -+ if (fw->size % 4) { -+ wl1271_error("nvs size is not multiple of 32 bits: %zu", -+ fw->size); -+ ret = -EILSEQ; -+ goto out; -+ } -+ -+ wl->nvs_len = fw->size; -+ wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL); -+ -+ if (!wl->nvs) { -+ wl1271_error("could not allocate memory for the nvs file"); -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ memcpy(wl->nvs, fw->data, wl->nvs_len); -+ -+ ret = 0; -+ -+out: -+ release_firmware(fw); -+ -+ return ret; -+} -+ -+static void wl1271_fw_wakeup(struct wl1271 *wl) -+{ -+ u32 elp_reg; -+ -+ elp_reg = ELPCTRL_WAKE_UP; -+ wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); -+} -+ -+static int wl1271_setup(struct wl1271 *wl) -+{ -+ wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL); -+ if (!wl->fw_status) -+ return -ENOMEM; -+ -+ wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); -+ if (!wl->tx_res_if) { -+ kfree(wl->fw_status); -+ return -ENOMEM; -+ } -+ -+ INIT_WORK(&wl->irq_work, wl1271_irq_work); -+ INIT_WORK(&wl->tx_work, wl1271_tx_work); -+ return 0; -+} -+ -+static int wl1271_chip_wakeup(struct wl1271 *wl) -+{ -+ int ret = 0; -+ -+ wl1271_power_on(wl); -+ msleep(WL1271_POWER_ON_SLEEP); -+ wl1271_spi_reset(wl); -+ wl1271_spi_init(wl); -+ -+ /* We don't need a real memory partition here, because we only want -+ * to use the registers at this point. */ -+ wl1271_set_partition(wl, -+ 0x00000000, -+ 0x00000000, -+ REGISTERS_BASE, -+ REGISTERS_DOWN_SIZE); -+ -+ /* ELP module wake up */ -+ wl1271_fw_wakeup(wl); -+ -+ /* whal_FwCtrl_BootSm() */ -+ -+ /* 0. read chip id from CHIP_ID */ -+ wl->chip.id = wl1271_reg_read32(wl, CHIP_ID_B); -+ -+ /* 1. check if chip id is valid */ -+ -+ switch (wl->chip.id) { -+ case CHIP_ID_1271_PG10: -+ wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", -+ wl->chip.id); -+ -+ ret = wl1271_setup(wl); -+ if (ret < 0) -+ goto out; -+ break; -+ case CHIP_ID_1271_PG20: -+ wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", -+ wl->chip.id); -+ -+ ret = wl1271_setup(wl); -+ if (ret < 0) -+ goto out; -+ break; -+ default: -+ wl1271_error("unsupported chip id: 0x%x", wl->chip.id); -+ ret = -ENODEV; -+ goto out; -+ } -+ -+ if (wl->fw == NULL) { -+ ret = wl1271_fetch_firmware(wl); -+ if (ret < 0) -+ goto out; -+ } -+ -+ /* No NVS from netlink, try to get it from the filesystem */ -+ if (wl->nvs == NULL) { -+ ret = wl1271_fetch_nvs(wl); -+ if (ret < 0) -+ goto out; -+ } -+ -+out: -+ return ret; -+} -+ -+static void wl1271_filter_work(struct work_struct *work) -+{ -+ struct wl1271 *wl = -+ container_of(work, struct wl1271, filter_work); -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ if (wl->state == WL1271_STATE_OFF) -+ goto out; -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ -+ /* FIXME: replace the magic numbers with proper definitions */ -+ ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0); -+ if (ret < 0) -+ goto out_sleep; -+ -+out_sleep: -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+int wl1271_plt_start(struct wl1271 *wl) -+{ -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ wl1271_notice("power up"); -+ -+ if (wl->state != WL1271_STATE_OFF) { -+ wl1271_error("cannot go into PLT state because not " -+ "in off state: %d", wl->state); -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ wl->state = WL1271_STATE_PLT; -+ -+ ret = wl1271_chip_wakeup(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1271_boot(wl); -+ if (ret < 0) -+ goto out; -+ -+ wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver); -+ -+ ret = wl1271_plt_init(wl); -+ if (ret < 0) -+ goto out; -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+int wl1271_plt_stop(struct wl1271 *wl) -+{ -+ int ret = 0; -+ -+ mutex_lock(&wl->mutex); -+ -+ wl1271_notice("power down"); -+ -+ if (wl->state != WL1271_STATE_PLT) { -+ wl1271_error("cannot power down because not in PLT " -+ "state: %d", wl->state); -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ wl1271_disable_interrupts(wl); -+ wl1271_power_off(wl); -+ -+ wl->state = WL1271_STATE_OFF; -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+ -+static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -+{ -+ struct wl1271 *wl = hw->priv; -+ -+ skb_queue_tail(&wl->tx_queue, skb); -+ -+ /* -+ * The chip specific setup must run before the first TX packet - -+ * before that, the tx_work will not be initialized! -+ */ -+ -+ schedule_work(&wl->tx_work); -+ -+ /* -+ * The workqueue is slow to process the tx_queue and we need stop -+ * the queue here, otherwise the queue will get too long. -+ */ -+ if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_MAX_LENGTH) { -+ ieee80211_stop_queues(wl->hw); -+ -+ /* -+ * FIXME: this is racy, the variable is not properly -+ * protected. Maybe fix this by removing the stupid -+ * variable altogether and checking the real queue state? -+ */ -+ wl->tx_queue_stopped = true; -+ } -+ -+ return NETDEV_TX_OK; -+} -+ -+static int wl1271_op_start(struct ieee80211_hw *hw) -+{ -+ struct wl1271 *wl = hw->priv; -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 start"); -+ -+ mutex_lock(&wl->mutex); -+ -+ if (wl->state != WL1271_STATE_OFF) { -+ wl1271_error("cannot start because not in off state: %d", -+ wl->state); -+ ret = -EBUSY; -+ goto out; -+ } -+ -+ ret = wl1271_chip_wakeup(wl); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_boot(wl); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1271_hw_init(wl); -+ if (ret < 0) -+ goto out; -+ -+ wl->state = WL1271_STATE_ON; -+ -+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver); -+ -+out: -+ if (ret < 0) -+ wl1271_power_off(wl); -+ -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static void wl1271_op_stop(struct ieee80211_hw *hw) -+{ -+ struct wl1271 *wl = hw->priv; -+ int i; -+ -+ wl1271_info("down"); -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); -+ -+ mutex_lock(&wl->mutex); -+ -+ WARN_ON(wl->state != WL1271_STATE_ON); -+ -+ if (wl->scanning) { -+ mutex_unlock(&wl->mutex); -+ ieee80211_scan_completed(wl->hw); -+ mutex_lock(&wl->mutex); -+ wl->scanning = false; -+ } -+ -+ wl->state = WL1271_STATE_OFF; -+ -+ wl1271_disable_interrupts(wl); -+ -+ mutex_unlock(&wl->mutex); -+ -+ cancel_work_sync(&wl->irq_work); -+ cancel_work_sync(&wl->tx_work); -+ cancel_work_sync(&wl->filter_work); -+ -+ mutex_lock(&wl->mutex); -+ -+ /* let's notify MAC80211 about the remaining pending TX frames */ -+ wl1271_tx_flush(wl); -+ wl1271_power_off(wl); -+ -+ memset(wl->bssid, 0, ETH_ALEN); -+ memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); -+ wl->ssid_len = 0; -+ wl->listen_int = 1; -+ wl->bss_type = MAX_BSS_TYPE; -+ -+ wl->rx_counter = 0; -+ wl->elp = false; -+ wl->psm = 0; -+ wl->tx_queue_stopped = false; -+ wl->power_level = WL1271_DEFAULT_POWER_LEVEL; -+ wl->tx_blocks_available = 0; -+ wl->tx_results_count = 0; -+ wl->tx_packets_count = 0; -+ wl->time_offset = 0; -+ wl->session_counter = 0; -+ for (i = 0; i < NUM_TX_QUEUES; i++) -+ wl->tx_blocks_freed[i] = 0; -+ -+ wl1271_debugfs_reset(wl); -+ mutex_unlock(&wl->mutex); -+} -+ -+static int wl1271_op_add_interface(struct ieee80211_hw *hw, -+ struct ieee80211_if_init_conf *conf) -+{ -+ struct wl1271 *wl = hw->priv; -+ DECLARE_MAC_BUF(mac); -+ int ret = 0; -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %s", -+ conf->type, print_mac(mac, conf->mac_addr)); -+ -+ mutex_lock(&wl->mutex); -+ -+ switch (conf->type) { -+ case NL80211_IFTYPE_STATION: -+ wl->bss_type = BSS_TYPE_STA_BSS; -+ break; -+ case NL80211_IFTYPE_ADHOC: -+ wl->bss_type = BSS_TYPE_IBSS; -+ break; -+ default: -+ ret = -EOPNOTSUPP; -+ goto out; -+ } -+ -+ /* FIXME: what if conf->mac_addr changes? */ -+ -+out: -+ mutex_unlock(&wl->mutex); -+ return ret; -+} -+ -+static void wl1271_op_remove_interface(struct ieee80211_hw *hw, -+ struct ieee80211_if_init_conf *conf) -+{ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); -+} -+ -+static int wl1271_op_config_interface(struct ieee80211_hw *hw, -+ struct ieee80211_vif *vif, -+ struct ieee80211_if_conf *conf) -+{ -+ struct wl1271 *wl = hw->priv; -+ struct sk_buff *beacon; -+ DECLARE_MAC_BUF(mac); -+ int ret; -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %s", -+ print_mac(mac, conf->bssid)); -+ wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid, -+ conf->ssid_len); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ -+ memcpy(wl->bssid, conf->bssid, ETH_ALEN); -+ -+ ret = wl1271_cmd_build_null_data(wl); -+ if (ret < 0) -+ goto out_sleep; -+ -+ wl->ssid_len = conf->ssid_len; -+ if (wl->ssid_len) -+ memcpy(wl->ssid, conf->ssid, wl->ssid_len); -+ -+ if (wl->bss_type != BSS_TYPE_IBSS) { -+ /* FIXME: replace the magic numbers with proper definitions */ -+ ret = wl1271_cmd_join(wl, wl->bss_type, 5, 100, 1); -+ if (ret < 0) -+ goto out_sleep; -+ } -+ -+ if (conf->changed & IEEE80211_IFCC_BEACON) { -+ beacon = ieee80211_beacon_get(hw, vif); -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, -+ beacon->data, beacon->len); -+ -+ if (ret < 0) { -+ dev_kfree_skb(beacon); -+ goto out_sleep; -+ } -+ -+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, -+ beacon->data, beacon->len); -+ -+ dev_kfree_skb(beacon); -+ -+ if (ret < 0) -+ goto out_sleep; -+ -+ /* FIXME: replace the magic numbers with proper definitions */ -+ ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0); -+ -+ if (ret < 0) -+ goto out_sleep; -+ } -+ -+out_sleep: -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static int wl1271_op_config(struct ieee80211_hw *hw, -+ struct ieee80211_conf *conf) -+{ -+ struct wl1271 *wl = hw->priv; -+ int channel, ret = 0; -+ -+ channel = ieee80211_frequency_to_channel(conf->channel->center_freq); -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d", -+ channel, -+ conf->flags & IEEE80211_CONF_PS ? "on" : "off", -+ conf->power_level); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ -+ if (channel != wl->channel) { -+ u8 old_channel = wl->channel; -+ wl->channel = channel; -+ -+ /* FIXME: use beacon interval provided by mac80211 */ -+ ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0); -+ if (ret < 0) { -+ wl->channel = old_channel; -+ goto out_sleep; -+ } -+ } -+ -+ ret = wl1271_cmd_build_null_data(wl); -+ if (ret < 0) -+ goto out_sleep; -+ -+ if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { -+ wl1271_info("psm enabled"); -+ -+ wl->psm_requested = true; -+ -+ /* -+ * We enter PSM only if we're already associated. -+ * If we're not, we'll enter it when joining an SSID, -+ * through the bss_info_changed() hook. -+ */ -+ ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE); -+ } else if (!(conf->flags & IEEE80211_CONF_PS) && -+ wl->psm_requested) { -+ wl1271_info("psm disabled"); -+ -+ wl->psm_requested = false; -+ -+ if (wl->psm) { -+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); -+ } -+ } -+ -+ if (conf->power_level != wl->power_level) { -+ ret = wl1271_acx_tx_power(wl, conf->power_level); -+ if (ret < 0) -+ goto out; -+ -+ wl->power_level = conf->power_level; -+ } -+ -+out_sleep: -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ -+ FIF_ALLMULTI | \ -+ FIF_FCSFAIL | \ -+ FIF_BCN_PRBRESP_PROMISC | \ -+ FIF_CONTROL | \ -+ FIF_OTHER_BSS) -+ -+static void wl1271_op_configure_filter(struct ieee80211_hw *hw, -+ unsigned int changed, -+ unsigned int *total, -+ int mc_count, -+ struct dev_addr_list *mc_list) -+{ -+ struct wl1271 *wl = hw->priv; -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); -+ -+ *total &= WL1271_SUPPORTED_FILTERS; -+ changed &= WL1271_SUPPORTED_FILTERS; -+ -+ if (changed == 0) -+ return; -+ -+ /* FIXME: wl->rx_config and wl->rx_filter are not protected */ -+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG; -+ wl->rx_filter = WL1271_DEFAULT_RX_FILTER; -+ -+ /* -+ * FIXME: workqueues need to be properly cancelled on stop(), for -+ * now let's just disable changing the filter settings. They will -+ * be updated any on config(). -+ */ -+ /* schedule_work(&wl->filter_work); */ -+} -+ -+static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, -+ const u8 *local_addr, const u8 *addr, -+ struct ieee80211_key_conf *key_conf) -+{ -+ struct wl1271 *wl = hw->priv; -+ int ret; -+ u8 key_type; -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); -+ -+ wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); -+ wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); -+ wl1271_dump(DEBUG_CRYPT, "LOCAL_ADDR: ", local_addr, ETH_ALEN); -+ wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", -+ key_conf->alg, key_conf->keyidx, -+ key_conf->keylen, key_conf->flags); -+ wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); -+ -+ if (is_zero_ether_addr(addr)) { -+ /* We dont support TX only encryption */ -+ ret = -EOPNOTSUPP; -+ goto out; -+ } -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out_unlock; -+ -+ switch (key_conf->alg) { -+ case ALG_WEP: -+ key_type = KEY_WEP; -+ -+ key_conf->hw_key_idx = key_conf->keyidx; -+ break; -+ case ALG_TKIP: -+ key_type = KEY_TKIP; -+ -+ key_conf->hw_key_idx = key_conf->keyidx; -+ break; -+ case ALG_CCMP: -+ key_type = KEY_AES; -+ -+ key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; -+ break; -+ default: -+ wl1271_error("Unknown key algo 0x%x", key_conf->alg); -+ -+ ret = -EOPNOTSUPP; -+ goto out_sleep; -+ } -+ -+ switch (cmd) { -+ case SET_KEY: -+ ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, -+ key_conf->keyidx, key_type, -+ key_conf->keylen, key_conf->key, -+ addr); -+ if (ret < 0) { -+ wl1271_error("Could not add or replace key"); -+ goto out_sleep; -+ } -+ break; -+ -+ case DISABLE_KEY: -+ ret = wl1271_cmd_set_key(wl, KEY_REMOVE, -+ key_conf->keyidx, key_type, -+ key_conf->keylen, key_conf->key, -+ addr); -+ if (ret < 0) { -+ wl1271_error("Could not remove key"); -+ goto out_sleep; -+ } -+ break; -+ -+ default: -+ wl1271_error("Unsupported key cmd 0x%x", cmd); -+ ret = -EOPNOTSUPP; -+ goto out_sleep; -+ -+ break; -+ } -+ -+out_sleep: -+ wl1271_ps_elp_sleep(wl); -+ -+out_unlock: -+ mutex_unlock(&wl->mutex); -+ -+out: -+ return ret; -+} -+ -+static int wl1271_op_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) -+{ -+ struct wl1271 *wl = hw->priv; -+ int ret; -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0, 13, 3); -+ -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) -+{ -+ struct wl1271 *wl = hw->priv; -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ -+ ret = wl1271_acx_rts_threshold(wl, (u16) value); -+ if (ret < 0) -+ wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret); -+ -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, -+ struct ieee80211_vif *vif, -+ struct ieee80211_bss_conf *bss_conf, -+ u32 changed) -+{ -+ enum wl1271_cmd_ps_mode mode; -+ struct wl1271 *wl = hw->priv; -+ int ret; -+ -+ wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); -+ -+ mutex_lock(&wl->mutex); -+ -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ -+ if (changed & BSS_CHANGED_ASSOC) { -+ if (bss_conf->assoc) { -+ wl->aid = bss_conf->aid; -+ -+ ret = wl1271_cmd_build_ps_poll(wl, wl->aid); -+ if (ret < 0) -+ goto out_sleep; -+ -+ ret = wl1271_acx_aid(wl, wl->aid); -+ if (ret < 0) -+ goto out_sleep; -+ -+ /* If we want to go in PSM but we're not there yet */ -+ if (wl->psm_requested && !wl->psm) { -+ mode = STATION_POWER_SAVE_MODE; -+ ret = wl1271_ps_set_mode(wl, mode); -+ if (ret < 0) -+ goto out_sleep; -+ } -+ } -+ } -+ if (changed & BSS_CHANGED_ERP_SLOT) { -+ if (bss_conf->use_short_slot) -+ ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); -+ else -+ ret = wl1271_acx_slot(wl, SLOT_TIME_LONG); -+ if (ret < 0) { -+ wl1271_warning("Set slot time failed %d", ret); -+ goto out_sleep; -+ } -+ } -+ -+ if (changed & BSS_CHANGED_ERP_PREAMBLE) { -+ if (bss_conf->use_short_preamble) -+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); -+ else -+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG); -+ } -+ -+ if (changed & BSS_CHANGED_ERP_CTS_PROT) { -+ if (bss_conf->use_cts_prot) -+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE); -+ else -+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE); -+ if (ret < 0) { -+ wl1271_warning("Set ctsprotect failed %d", ret); -+ goto out_sleep; -+ } -+ } -+ -+out_sleep: -+ wl1271_ps_elp_sleep(wl); -+ -+out: -+ mutex_unlock(&wl->mutex); -+} -+ -+ -+/* can't be const, mac80211 writes to this */ -+static struct ieee80211_rate wl1271_rates[] = { -+ { .bitrate = 10, -+ .hw_value = 0x1, -+ .hw_value_short = 0x1, }, -+ { .bitrate = 20, -+ .hw_value = 0x2, -+ .hw_value_short = 0x2, -+ .flags = IEEE80211_RATE_SHORT_PREAMBLE }, -+ { .bitrate = 55, -+ .hw_value = 0x4, -+ .hw_value_short = 0x4, -+ .flags = IEEE80211_RATE_SHORT_PREAMBLE }, -+ { .bitrate = 110, -+ .hw_value = 0x20, -+ .hw_value_short = 0x20, -+ .flags = IEEE80211_RATE_SHORT_PREAMBLE }, -+ { .bitrate = 60, -+ .hw_value = 0x8, -+ .hw_value_short = 0x8, }, -+ { .bitrate = 90, -+ .hw_value = 0x10, -+ .hw_value_short = 0x10, }, -+ { .bitrate = 120, -+ .hw_value = 0x40, -+ .hw_value_short = 0x40, }, -+ { .bitrate = 180, -+ .hw_value = 0x80, -+ .hw_value_short = 0x80, }, -+ { .bitrate = 240, -+ .hw_value = 0x200, -+ .hw_value_short = 0x200, }, -+ { .bitrate = 360, -+ .hw_value = 0x400, -+ .hw_value_short = 0x400, }, -+ { .bitrate = 480, -+ .hw_value = 0x800, -+ .hw_value_short = 0x800, }, -+ { .bitrate = 540, -+ .hw_value = 0x1000, -+ .hw_value_short = 0x1000, }, -+}; -+ -+/* can't be const, mac80211 writes to this */ -+static struct ieee80211_channel wl1271_channels[] = { -+ { .hw_value = 1, .center_freq = 2412}, -+ { .hw_value = 2, .center_freq = 2417}, -+ { .hw_value = 3, .center_freq = 2422}, -+ { .hw_value = 4, .center_freq = 2427}, -+ { .hw_value = 5, .center_freq = 2432}, -+ { .hw_value = 6, .center_freq = 2437}, -+ { .hw_value = 7, .center_freq = 2442}, -+ { .hw_value = 8, .center_freq = 2447}, -+ { .hw_value = 9, .center_freq = 2452}, -+ { .hw_value = 10, .center_freq = 2457}, -+ { .hw_value = 11, .center_freq = 2462}, -+ { .hw_value = 12, .center_freq = 2467}, -+ { .hw_value = 13, .center_freq = 2472}, -+}; -+ -+/* can't be const, mac80211 writes to this */ -+static struct ieee80211_supported_band wl1271_band_2ghz = { -+ .channels = wl1271_channels, -+ .n_channels = ARRAY_SIZE(wl1271_channels), -+ .bitrates = wl1271_rates, -+ .n_bitrates = ARRAY_SIZE(wl1271_rates), -+}; -+ -+static const struct ieee80211_ops wl1271_ops = { -+ .start = wl1271_op_start, -+ .stop = wl1271_op_stop, -+ .add_interface = wl1271_op_add_interface, -+ .remove_interface = wl1271_op_remove_interface, -+ .config = wl1271_op_config, -+ .config_interface = wl1271_op_config_interface, -+ .configure_filter = wl1271_op_configure_filter, -+ .tx = wl1271_op_tx, -+ .set_key = wl1271_op_set_key, -+ .hw_scan = wl1271_op_hw_scan, -+ .bss_info_changed = wl1271_op_bss_info_changed, -+ .set_rts_threshold = wl1271_op_set_rts_threshold, -+}; -+ -+static int wl1271_register_hw(struct wl1271 *wl) -+{ -+ int ret; -+ -+ if (wl->mac80211_registered) -+ return 0; -+ -+ SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); -+ -+ ret = ieee80211_register_hw(wl->hw); -+ if (ret < 0) { -+ wl1271_error("unable to register mac80211 hw: %d", ret); -+ return ret; -+ } -+ -+ wl->mac80211_registered = true; -+ -+ wl1271_notice("loaded"); -+ -+ return 0; -+} -+ -+static int wl1271_init_ieee80211(struct wl1271 *wl) -+{ -+ /* -+ * The tx descriptor buffer and the TKIP space. -+ * -+ * FIXME: add correct 1271 descriptor size -+ */ -+ wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE; -+ -+ /* unit us */ -+ /* FIXME: find a proper value */ -+ wl->hw->channel_change_time = 10000; -+ -+ wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | -+ IEEE80211_HW_NOISE_DBM; -+ -+ wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; -+ -+ SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); -+ -+ return 0; -+} -+ -+static void wl1271_device_release(struct device *dev) -+{ -+ -+} -+ -+static struct platform_device wl1271_device = { -+ .name = "wl1271", -+ .id = -1, -+ -+ /* device model insists to have a release function */ -+ .dev = { -+ .release = wl1271_device_release, -+ }, -+}; -+ -+#define WL1271_DEFAULT_CHANNEL 0 -+static int __devinit wl1271_probe(struct spi_device *spi) -+{ -+ struct wl12xx_platform_data *pdata; -+ struct ieee80211_hw *hw; -+ struct wl1271 *wl; -+ int ret, i; -+ static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; -+ -+ pdata = spi->dev.platform_data; -+ if (!pdata) { -+ wl1271_error("no platform data"); -+ return -ENODEV; -+ } -+ -+ hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); -+ if (!hw) { -+ wl1271_error("could not alloc ieee80211_hw"); -+ return -ENOMEM; -+ } -+ -+ wl = hw->priv; -+ memset(wl, 0, sizeof(*wl)); -+ -+ wl->hw = hw; -+ dev_set_drvdata(&spi->dev, wl); -+ wl->spi = spi; -+ -+ skb_queue_head_init(&wl->tx_queue); -+ -+ INIT_WORK(&wl->filter_work, wl1271_filter_work); -+ wl->channel = WL1271_DEFAULT_CHANNEL; -+ wl->scanning = false; -+ wl->default_key = 0; -+ wl->listen_int = 1; -+ wl->rx_counter = 0; -+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG; -+ wl->rx_filter = WL1271_DEFAULT_RX_FILTER; -+ wl->elp = false; -+ wl->psm = 0; -+ wl->psm_requested = false; -+ wl->tx_queue_stopped = false; -+ wl->power_level = WL1271_DEFAULT_POWER_LEVEL; -+ -+ /* We use the default power on sleep time until we know which chip -+ * we're using */ -+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) -+ wl->tx_frames[i] = NULL; -+ -+ spin_lock_init(&wl->wl_lock); -+ -+ /* -+ * In case our MAC address is not correctly set, -+ * we use a random but Nokia MAC. -+ */ -+ memcpy(wl->mac_addr, nokia_oui, 3); -+ get_random_bytes(wl->mac_addr + 3, 3); -+ -+ wl->state = WL1271_STATE_OFF; -+ mutex_init(&wl->mutex); -+ -+ wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL); -+ if (!wl->rx_descriptor) { -+ wl1271_error("could not allocate memory for rx descriptor"); -+ ret = -ENOMEM; -+ goto out_free; -+ } -+ -+ /* This is the only SPI value that we need to set here, the rest -+ * comes from the board-peripherals file */ -+ spi->bits_per_word = 32; -+ -+ ret = spi_setup(spi); -+ if (ret < 0) { -+ wl1271_error("spi_setup failed"); -+ goto out_free; -+ } -+ -+ wl->set_power = pdata->set_power; -+ if (!wl->set_power) { -+ wl1271_error("set power function missing in platform data"); -+ ret = -ENODEV; -+ goto out_free; -+ } -+ -+ wl->irq = spi->irq; -+ if (wl->irq < 0) { -+ wl1271_error("irq missing in platform data"); -+ ret = -ENODEV; -+ goto out_free; -+ } -+ -+ ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); -+ if (ret < 0) { -+ wl1271_error("request_irq() failed: %d", ret); -+ goto out_free; -+ } -+ -+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); -+ -+ disable_irq(wl->irq); -+ -+ ret = platform_device_register(&wl1271_device); -+ if (ret) { -+ wl1271_error("couldn't register platform device"); -+ goto out_irq; -+ } -+ dev_set_drvdata(&wl1271_device.dev, wl); -+ -+ ret = wl1271_init_ieee80211(wl); -+ if (ret) -+ goto out_platform; -+ -+ ret = wl1271_register_hw(wl); -+ if (ret) -+ goto out_platform; -+ -+ ret = wl1271_nl_register(); -+ if (ret) -+ goto out_register_hw; -+ -+ wl1271_debugfs_init(wl); -+ -+ wl1271_notice("initialized"); -+ -+ return 0; -+ -+ out_register_hw: -+ ieee80211_unregister_hw(hw); -+ wl->mac80211_registered = false; -+ -+ out_platform: -+ platform_device_unregister(&wl1271_device); -+ -+ out_irq: -+ free_irq(wl->irq, wl); -+ -+ out_free: -+ kfree(wl->rx_descriptor); -+ wl->rx_descriptor = NULL; -+ -+ ieee80211_free_hw(hw); -+ -+ return ret; -+} -+ -+static int __devexit wl1271_remove(struct spi_device *spi) -+{ -+ struct wl1271 *wl = dev_get_drvdata(&spi->dev); -+ -+ ieee80211_unregister_hw(wl->hw); -+ -+ wl1271_debugfs_exit(wl); -+ platform_device_unregister(&wl1271_device); -+ free_irq(wl->irq, wl); -+ kfree(wl->target_mem_map); -+ kfree(wl->fw); -+ wl->fw = NULL; -+ kfree(wl->nvs); -+ wl->nvs = NULL; -+ -+ kfree(wl->rx_descriptor); -+ wl->rx_descriptor = NULL; -+ -+ kfree(wl->fw_status); -+ kfree(wl->tx_res_if); -+ -+ ieee80211_free_hw(wl->hw); -+ wl1271_nl_unregister(); -+ -+ return 0; -+} -+ -+ -+static struct spi_driver wl1271_spi_driver = { -+ .driver = { -+ .name = "wl1271", -+ .bus = &spi_bus_type, -+ .owner = THIS_MODULE, -+ }, -+ -+ .probe = wl1271_probe, -+ .remove = __devexit_p(wl1271_remove), -+}; -+ -+static int __init wl1271_init(void) -+{ -+ int ret; -+ -+ ret = spi_register_driver(&wl1271_spi_driver); -+ if (ret < 0) { -+ wl1271_error("failed to register spi driver: %d", ret); -+ goto out; -+ } -+ -+out: -+ return ret; -+} -+ -+static void __exit wl1271_exit(void) -+{ -+ spi_unregister_driver(&wl1271_spi_driver); -+ -+ wl1271_notice("unloaded"); -+} -+ -+module_init(wl1271_init); -+module_exit(wl1271_exit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Kalle Valo , " -+ "Luciano Coelho "); -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_netlink.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_netlink.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_netlink.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_netlink.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,679 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+#include "wl1271_netlink.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "wl1271.h" -+#include "wl1271_spi.h" -+#include "wl1271_acx.h" -+ -+/* FIXME: this should be changed as soon as user space catches up */ -+#define WL1271_NL_NAME "wl1251" -+#define WL1271_NL_VERSION 1 -+ -+#define WL1271_MAX_TEST_LENGTH 1024 -+#define WL1271_MAX_NVS_LENGTH 1024 -+ -+enum wl1271_nl_commands { -+ WL1271_NL_CMD_UNSPEC, -+ WL1271_NL_CMD_TEST, -+ WL1271_NL_CMD_INTERROGATE, -+ WL1271_NL_CMD_CONFIGURE, -+ WL1271_NL_CMD_PHY_REG_READ, -+ WL1271_NL_CMD_NVS_PUSH, -+ WL1271_NL_CMD_REG_WRITE, -+ WL1271_NL_CMD_REG_READ, -+ WL1271_NL_CMD_SET_PLT_MODE, -+ -+ __WL1271_NL_CMD_AFTER_LAST -+}; -+#define WL1271_NL_CMD_MAX (__WL1271_NL_CMD_AFTER_LAST - 1) -+ -+enum wl1271_nl_attrs { -+ WL1271_NL_ATTR_UNSPEC, -+ WL1271_NL_ATTR_IFNAME, -+ WL1271_NL_ATTR_CMD_TEST_PARAM, -+ WL1271_NL_ATTR_CMD_TEST_ANSWER, -+ WL1271_NL_ATTR_CMD_IE, -+ WL1271_NL_ATTR_CMD_IE_LEN, -+ WL1271_NL_ATTR_CMD_IE_BUFFER, -+ WL1271_NL_ATTR_CMD_IE_ANSWER, -+ WL1271_NL_ATTR_REG_ADDR, -+ WL1271_NL_ATTR_REG_VAL, -+ WL1271_NL_ATTR_NVS_BUFFER, -+ WL1271_NL_ATTR_NVS_LEN, -+ WL1271_NL_ATTR_PLT_MODE, -+ -+ __WL1271_NL_ATTR_AFTER_LAST -+}; -+#define WL1271_NL_ATTR_MAX (__WL1271_NL_ATTR_AFTER_LAST - 1) -+ -+static struct genl_family wl1271_nl_family = { -+ .id = GENL_ID_GENERATE, -+ .name = WL1271_NL_NAME, -+ .hdrsize = 0, -+ .version = WL1271_NL_VERSION, -+ .maxattr = WL1271_NL_ATTR_MAX, -+}; -+ -+static struct net_device *ifname_to_netdev(struct net *net, -+ struct genl_info *info) -+{ -+ char *ifname; -+ -+ if (!info->attrs[WL1271_NL_ATTR_IFNAME]) -+ return NULL; -+ -+ ifname = nla_data(info->attrs[WL1271_NL_ATTR_IFNAME]); -+ -+ wl1271_debug(DEBUG_NETLINK, "Looking for %s", ifname); -+ -+ return dev_get_by_name(net, ifname); -+} -+ -+static struct wl1271 *ifname_to_wl1271(struct net *net, struct genl_info *info) -+{ -+ struct net_device *netdev; -+ struct wireless_dev *wdev; -+ struct wiphy *wiphy; -+ struct ieee80211_hw *hw; -+ -+ netdev = ifname_to_netdev(net, info); -+ if (netdev == NULL) { -+ wl1271_error("Wrong interface"); -+ return NULL; -+ } -+ -+ wdev = netdev->ieee80211_ptr; -+ if (wdev == NULL) { -+ wl1271_error("ieee80211_ptr is NULL"); -+ return NULL; -+ } -+ -+ wiphy = wdev->wiphy; -+ if (wiphy == NULL) { -+ wl1271_error("wiphy is NULL"); -+ return NULL; -+ } -+ -+ hw = wiphy_priv(wiphy); -+ if (hw == NULL) { -+ wl1271_error("hw is NULL"); -+ return NULL; -+ } -+ -+ dev_put(netdev); -+ -+ return hw->priv; -+} -+ -+static int wl1271_nl_test_cmd(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1271 *wl; -+ struct wl1271_command *cmd; -+ char *buf; -+ int buf_len, ret, cmd_len; -+ u8 answer; -+ -+ if (!info->attrs[WL1271_NL_ATTR_CMD_TEST_PARAM]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ return -EINVAL; -+ } -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ buf = nla_data(info->attrs[WL1271_NL_ATTR_CMD_TEST_PARAM]); -+ buf_len = nla_len(info->attrs[WL1271_NL_ATTR_CMD_TEST_PARAM]); -+ answer = nla_get_u8(info->attrs[WL1271_NL_ATTR_CMD_TEST_ANSWER]); -+ -+ cmd->header.id = CMD_TEST; -+ memcpy(cmd->parameters, buf, buf_len); -+ cmd_len = sizeof(struct wl1271_cmd_header) + buf_len; -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1271_cmd_test(wl, cmd, cmd_len, answer); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto out; -+ } -+ -+ if (answer) { -+ struct sk_buff *msg; -+ void *hdr; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1271_nl_family, 0, WL1271_NL_CMD_TEST); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1271_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1271_NL_ATTR_IFNAME])); -+ NLA_PUT(msg, WL1271_NL_ATTR_CMD_TEST_ANSWER, -+ sizeof(*cmd), cmd); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ wl1271_debug(DEBUG_NETLINK, "TEST cmd sent, answer"); -+ ret = genlmsg_reply(msg, info); -+ goto out; -+ -+ nla_put_failure: -+ nlmsg_free(msg); -+ } else -+ wl1271_debug(DEBUG_NETLINK, "TEST cmd sent"); -+ -+out: -+ kfree(cmd); -+ return ret; -+} -+ -+static int wl1271_nl_interrogate(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1271 *wl; -+ struct sk_buff *msg; -+ int ret = -ENOBUFS, cmd_ie, cmd_ie_len; -+ struct wl1271_command *cmd; -+ void *hdr; -+ -+ if (!info->attrs[WL1271_NL_ATTR_CMD_IE]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1271_NL_ATTR_CMD_IE_LEN]) -+ return -EINVAL; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ ret = -EINVAL; -+ goto nla_put_failure; -+ } -+ -+ /* acx id */ -+ cmd_ie = nla_get_u32(info->attrs[WL1271_NL_ATTR_CMD_IE]); -+ -+ /* maximum length of acx, including all headers */ -+ cmd_ie_len = nla_get_u32(info->attrs[WL1271_NL_ATTR_CMD_IE_LEN]); -+ -+ wl1271_debug(DEBUG_NETLINK, "Getting IE 0x%x (len %d)", -+ cmd_ie, cmd_ie_len); -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1271_cmd_interrogate(wl, cmd_ie, cmd, cmd_ie_len); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1271_nl_family, 0, WL1271_NL_CMD_INTERROGATE); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1271_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1271_NL_ATTR_IFNAME])); -+ NLA_PUT(msg, WL1271_NL_ATTR_CMD_IE_ANSWER, cmd_ie_len, cmd); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ kfree(cmd); -+ return genlmsg_reply(msg, info); -+ -+ nla_put_failure: -+ kfree(cmd); -+ nlmsg_free(msg); -+ -+ return ret; -+} -+ -+static int wl1271_nl_configure(struct sk_buff *skb, struct genl_info *info) -+{ -+ int ret = 0, cmd_ie_len, acx_len; -+ struct acx_header *acx = NULL; -+ struct sk_buff *msg; -+ struct wl1271 *wl; -+ void *cmd_ie; -+ u16 *id; -+ -+ if (!info->attrs[WL1271_NL_ATTR_CMD_IE_BUFFER]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1271_NL_ATTR_CMD_IE_LEN]) -+ return -EINVAL; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ ret = -EINVAL; -+ goto nla_put_failure; -+ } -+ -+ /* contains the acx header but not the cmd header */ -+ cmd_ie = nla_data(info->attrs[WL1271_NL_ATTR_CMD_IE_BUFFER]); -+ -+ cmd_ie_len = nla_get_u32(info->attrs[WL1271_NL_ATTR_CMD_IE_LEN]); -+ -+ /* acx id is in the first two bytes */ -+ id = cmd_ie; -+ -+ /* need to add acx_header before cmd_ie, so create a new command */ -+ acx_len = sizeof(struct acx_header) + cmd_ie_len; -+ acx = kzalloc(acx_len, GFP_KERNEL); -+ if (!acx) { -+ ret = -ENOMEM; -+ goto nla_put_failure; -+ } -+ -+ /* copy the acx header and the payload */ -+ memcpy(&acx->id, cmd_ie, cmd_ie_len); -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1271_cmd_configure(wl, *id, acx, acx_len); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ wl1271_debug(DEBUG_NETLINK, "CONFIGURE cmd sent"); -+ -+ nla_put_failure: -+ kfree(acx); -+ nlmsg_free(msg); -+ -+ return ret; -+} -+ -+static int wl1271_nl_phy_reg_read(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1271 *wl; -+ struct sk_buff *msg; -+ u32 reg_addr, *reg_value = NULL; -+ int ret = 0; -+ void *hdr; -+ -+ if (!info->attrs[WL1271_NL_ATTR_REG_ADDR]) -+ return -EINVAL; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ ret = -EINVAL; -+ goto nla_put_failure; -+ } -+ -+ reg_value = kmalloc(sizeof(*reg_value), GFP_KERNEL); -+ if (!reg_value) { -+ ret = -ENOMEM; -+ goto nla_put_failure; -+ } -+ -+ reg_addr = nla_get_u32(info->attrs[WL1271_NL_ATTR_REG_ADDR]); -+ -+ wl1271_debug(DEBUG_NETLINK, "Reading PHY reg 0x%x", reg_addr); -+ -+ mutex_lock(&wl->mutex); -+ ret = wl1271_cmd_read_memory(wl, reg_addr, reg_value, -+ sizeof(*reg_value)); -+ mutex_unlock(&wl->mutex); -+ -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1271_nl_family, 0, WL1271_NL_CMD_PHY_REG_READ); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1271_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1271_NL_ATTR_IFNAME])); -+ -+ NLA_PUT_U32(msg, WL1271_NL_ATTR_REG_VAL, *reg_value); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ kfree(reg_value); -+ -+ return genlmsg_reply(msg, info); -+ -+ nla_put_failure: -+ nlmsg_free(msg); -+ kfree(reg_value); -+ -+ return ret; -+} -+ -+static int wl1271_nl_nvs_push(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1271 *wl; -+ int ret = 0; -+ -+ if (!info->attrs[WL1271_NL_ATTR_NVS_BUFFER]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1271_NL_ATTR_NVS_LEN]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ return -EINVAL; -+ } -+ -+ mutex_lock(&wl->mutex); -+ wl->nvs_len = nla_get_u32(info->attrs[WL1271_NL_ATTR_NVS_LEN]); -+ if (wl->nvs_len % 4) { -+ wl1271_error("NVS size is not multiple of 32: %d", wl->nvs_len); -+ ret = -EILSEQ; -+ goto out; -+ } -+ -+ /* If we already have an NVS, we should free it */ -+ kfree(wl->nvs); -+ -+ wl->nvs = kzalloc(wl->nvs_len, GFP_KERNEL); -+ if (wl->nvs == NULL) { -+ wl1271_error("Can't allocate NVS"); -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ memcpy(wl->nvs, -+ nla_data(info->attrs[WL1271_NL_ATTR_NVS_BUFFER]), -+ wl->nvs_len); -+ -+ wl1271_debug(DEBUG_NETLINK, "got NVS from userspace, %d bytes", -+ wl->nvs_len); -+ -+out: -+ mutex_unlock(&wl->mutex); -+ -+ return ret; -+} -+ -+static int wl1271_nl_reg_read(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1271 *wl; -+ u32 addr, val; -+ int ret = 0; -+ struct sk_buff *msg; -+ void *hdr; -+ -+ if (!info->attrs[WL1271_NL_ATTR_REG_ADDR]) -+ return -EINVAL; -+ -+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); -+ if (!msg) -+ return -ENOMEM; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ return -EINVAL; -+ } -+ -+ addr = nla_get_u32(info->attrs[WL1271_NL_ATTR_REG_ADDR]); -+ -+ mutex_lock(&wl->mutex); -+ val = wl1271_reg_read32(wl, addr); -+ mutex_unlock(&wl->mutex); -+ -+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, -+ &wl1271_nl_family, 0, WL1271_NL_CMD_PHY_REG_READ); -+ if (IS_ERR(hdr)) { -+ ret = PTR_ERR(hdr); -+ goto nla_put_failure; -+ } -+ -+ NLA_PUT_STRING(msg, WL1271_NL_ATTR_IFNAME, -+ nla_data(info->attrs[WL1271_NL_ATTR_IFNAME])); -+ -+ NLA_PUT_U32(msg, WL1271_NL_ATTR_REG_VAL, val); -+ -+ ret = genlmsg_end(msg, hdr); -+ if (ret < 0) { -+ wl1271_error("%s() failed", __func__); -+ goto nla_put_failure; -+ } -+ -+ return genlmsg_reply(msg, info); -+ -+ nla_put_failure: -+ nlmsg_free(msg); -+ -+ return ret; -+} -+ -+static int wl1271_nl_reg_write(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1271 *wl; -+ u32 addr, val; -+ -+ if (!info->attrs[WL1271_NL_ATTR_REG_ADDR]) -+ return -EINVAL; -+ -+ if (!info->attrs[WL1271_NL_ATTR_REG_VAL]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ return -EINVAL; -+ } -+ -+ addr = nla_get_u32(info->attrs[WL1271_NL_ATTR_REG_ADDR]); -+ val = nla_get_u32(info->attrs[WL1271_NL_ATTR_REG_VAL]); -+ -+ mutex_lock(&wl->mutex); -+ wl1271_reg_write32(wl, addr, val); -+ mutex_unlock(&wl->mutex); -+ -+ return 0; -+} -+ -+static int wl1271_nl_set_plt_mode(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wl1271 *wl; -+ u32 val; -+ int ret; -+ -+ if (!info->attrs[WL1271_NL_ATTR_PLT_MODE]) -+ return -EINVAL; -+ -+ wl = ifname_to_wl1271(&init_net, info); -+ if (wl == NULL) { -+ wl1271_error("wl1271 not found"); -+ return -EINVAL; -+ } -+ -+ val = nla_get_u32(info->attrs[WL1271_NL_ATTR_PLT_MODE]); -+ -+ switch (val) { -+ case 0: -+ ret = wl1271_plt_stop(wl); -+ break; -+ case 1: -+ ret = wl1271_plt_start(wl); -+ break; -+ default: -+ ret = -EINVAL; -+ break; -+ } -+ -+ return ret; -+} -+ -+static struct nla_policy wl1271_nl_policy[WL1271_NL_ATTR_MAX + 1] = { -+ [WL1271_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, -+ .len = IFNAMSIZ-1 }, -+ [WL1271_NL_ATTR_CMD_TEST_PARAM] = { .type = NLA_BINARY, -+ .len = WL1271_MAX_TEST_LENGTH }, -+ [WL1271_NL_ATTR_CMD_TEST_ANSWER] = { .type = NLA_U8 }, -+ [WL1271_NL_ATTR_CMD_IE] = { .type = NLA_U32 }, -+ [WL1271_NL_ATTR_CMD_IE_LEN] = { .type = NLA_U32 }, -+ [WL1271_NL_ATTR_CMD_IE_BUFFER] = { .type = NLA_BINARY, -+ .len = WL1271_MAX_TEST_LENGTH }, -+ [WL1271_NL_ATTR_CMD_IE_ANSWER] = { .type = NLA_BINARY, -+ .len = WL1271_MAX_TEST_LENGTH }, -+ [WL1271_NL_ATTR_REG_ADDR] = { .type = NLA_U32 }, -+ [WL1271_NL_ATTR_REG_VAL] = { .type = NLA_U32 }, -+ [WL1271_NL_ATTR_NVS_BUFFER] = { .type = NLA_BINARY, -+ .len = WL1271_MAX_NVS_LENGTH }, -+ [WL1271_NL_ATTR_NVS_LEN] = { .type = NLA_U32 }, -+ [WL1271_NL_ATTR_PLT_MODE] = { .type = NLA_U32 }, -+}; -+ -+static struct genl_ops wl1271_nl_ops[] = { -+ { -+ .cmd = WL1271_NL_CMD_TEST, -+ .doit = wl1271_nl_test_cmd, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1271_NL_CMD_INTERROGATE, -+ .doit = wl1271_nl_interrogate, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1271_NL_CMD_CONFIGURE, -+ .doit = wl1271_nl_configure, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1271_NL_CMD_PHY_REG_READ, -+ .doit = wl1271_nl_phy_reg_read, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1271_NL_CMD_NVS_PUSH, -+ .doit = wl1271_nl_nvs_push, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1271_NL_CMD_REG_WRITE, -+ .doit = wl1271_nl_reg_write, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1271_NL_CMD_REG_READ, -+ .doit = wl1271_nl_reg_read, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+ { -+ .cmd = WL1271_NL_CMD_SET_PLT_MODE, -+ .doit = wl1271_nl_set_plt_mode, -+ .policy = wl1271_nl_policy, -+ .flags = GENL_ADMIN_PERM, -+ }, -+}; -+ -+int wl1271_nl_register(void) -+{ -+ int err, i; -+ -+ err = genl_register_family(&wl1271_nl_family); -+ if (err) -+ return err; -+ -+ for (i = 0; i < ARRAY_SIZE(wl1271_nl_ops); i++) { -+ err = genl_register_ops(&wl1271_nl_family, &wl1271_nl_ops[i]); -+ if (err) -+ goto err_out; -+ } -+ return 0; -+ err_out: -+ genl_unregister_family(&wl1271_nl_family); -+ return err; -+} -+ -+void wl1271_nl_unregister(void) -+{ -+ genl_unregister_family(&wl1271_nl_family); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_netlink.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_netlink.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_netlink.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_netlink.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,30 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_NETLINK_H__ -+#define __WL1271_NETLINK_H__ -+ -+int wl1271_nl_register(void); -+void wl1271_nl_unregister(void); -+ -+#endif /* __WL1271_NETLINK_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_ps.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_ps.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_ps.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_ps.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,127 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1271_reg.h" -+#include "wl1271_ps.h" -+#include "wl1271_spi.h" -+ -+#define WL1271_WAKEUP_TIMEOUT 500 -+ -+/* Routines to toggle sleep mode while in ELP */ -+void wl1271_ps_elp_sleep(struct wl1271 *wl) -+{ -+ if (wl->elp || !wl->psm) -+ return; -+ -+ /* -+ * Go to ELP unless there is work already pending - pending work -+ * will immediately wakeup the chipset anyway. -+ */ -+ if (!work_pending(&wl->irq_work) && !work_pending(&wl->tx_work)) { -+ wl1271_debug(DEBUG_PSM, "chip to elp"); -+ wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); -+ wl->elp = true; -+ } -+} -+ -+int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake) -+{ -+ DECLARE_COMPLETION_ONSTACK(compl); -+ unsigned long flags; -+ int ret; -+ u32 start_time = jiffies; -+ bool pending = false; -+ -+ if (!wl->elp) -+ return 0; -+ -+ wl1271_debug(DEBUG_PSM, "waking up chip from elp"); -+ -+ /* -+ * The spinlock is required here to synchronize both the work and -+ * the completion variable in one entity. -+ */ -+ spin_lock_irqsave(&wl->wl_lock, flags); -+ if (work_pending(&wl->irq_work) || chip_awake) -+ pending = true; -+ else -+ wl->elp_compl = &compl; -+ spin_unlock_irqrestore(&wl->wl_lock, flags); -+ -+ wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); -+ -+ if (!pending) { -+ ret = wait_for_completion_timeout( -+ &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); -+ if (ret == 0) -+ wl1271_error("ELP wakeup timeout!"); -+ else if (ret < 0) { -+ wl1271_error("ELP wakeup completion error."); -+ return -EIO; -+ } -+ } -+ -+ wl->elp = false; -+ -+ wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", -+ jiffies_to_msecs(jiffies - start_time)); -+ -+ return 0; -+} -+ -+int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode) -+{ -+ int ret; -+ -+ switch (mode) { -+ case STATION_POWER_SAVE_MODE: -+ wl1271_debug(DEBUG_PSM, "entering psm"); -+ ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); -+ if (ret < 0) -+ return ret; -+ -+ wl1271_ps_elp_sleep(wl); -+ if (ret < 0) -+ return ret; -+ -+ wl->psm = 1; -+ break; -+ case STATION_ACTIVE_MODE: -+ default: -+ wl1271_debug(DEBUG_PSM, "leaving psm"); -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); -+ if (ret < 0) -+ return ret; -+ -+ wl->psm = 0; -+ break; -+ } -+ -+ return ret; -+} -+ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_ps.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_ps.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_ps.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_ps.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,36 @@ -+#ifndef __WL1271_PS_H__ -+#define __WL1271_PS_H__ -+ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1271.h" -+#include "wl1271_acx.h" -+ -+int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode); -+void wl1271_ps_elp_sleep(struct wl1271 *wl); -+int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); -+ -+ -+#endif /* __WL1271_PS_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_reg.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_reg.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_reg.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_reg.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,758 @@ -+/* -+ * This file is part of wl12xx -+ * -+ * Copyright (C) 1998-2009 Texas Instruments Incorporated -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __REG_H__ -+#define __REG_H__ -+ -+#include -+ -+#define REGISTERS_BASE 0x00300000 -+#define DRPW_BASE 0x00310000 -+ -+#define REGISTERS_DOWN_SIZE 0x00008800 -+#define REGISTERS_WORK_SIZE 0x0000b000 -+ -+#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC -+#define STATUS_MEM_ADDRESS 0x40400 -+ -+/* ELP register commands */ -+#define ELPCTRL_WAKE_UP 0x1 -+#define ELPCTRL_WAKE_UP_WLAN_READY 0x5 -+#define ELPCTRL_SLEEP 0x0 -+/* ELP WLAN_READY bit */ -+#define ELPCTRL_WLAN_READY 0x2 -+ -+/*=============================================== -+ Host Software Reset - 32bit RW -+ ------------------------------------------ -+ [31:1] Reserved -+ 0 SOFT_RESET Soft Reset - When this bit is set, -+ it holds the Wlan hardware in a soft reset state. -+ This reset disables all MAC and baseband processor -+ clocks except the CardBus/PCI interface clock. -+ It also initializes all MAC state machines except -+ the host interface. It does not reload the -+ contents of the EEPROM. When this bit is cleared -+ (not self-clearing), the Wlan hardware -+ exits the software reset state. -+===============================================*/ -+#define ACX_REG_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000) -+ -+#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) -+#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) -+#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) -+/* -+ * Interrupt registers. -+ * 64 bit interrupt sources registers ws ced. -+ * sme interupts were removed and new ones were added. -+ * Order was changed. -+ */ -+#define FIQ_MASK (REGISTERS_BASE + 0x0400) -+#define FIQ_MASK_L (REGISTERS_BASE + 0x0400) -+#define FIQ_MASK_H (REGISTERS_BASE + 0x0404) -+#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408) -+#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408) -+#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C) -+#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410) -+#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410) -+#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414) -+#define IRQ_MASK (REGISTERS_BASE + 0x0418) -+#define IRQ_MASK_L (REGISTERS_BASE + 0x0418) -+#define IRQ_MASK_H (REGISTERS_BASE + 0x041C) -+#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420) -+#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420) -+#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424) -+#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428) -+#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428) -+#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C) -+#define ECPU_MASK (REGISTERS_BASE + 0x0448) -+#define FIQ_STS_L (REGISTERS_BASE + 0x044C) -+#define FIQ_STS_H (REGISTERS_BASE + 0x0450) -+#define IRQ_STS_L (REGISTERS_BASE + 0x0454) -+#define IRQ_STS_H (REGISTERS_BASE + 0x0458) -+#define INT_STS_ND (REGISTERS_BASE + 0x0464) -+#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464) -+#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468) -+#define INT_STS_CLR (REGISTERS_BASE + 0x04B4) -+#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4) -+#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8) -+#define INT_ACK (REGISTERS_BASE + 0x046C) -+#define INT_ACK_L (REGISTERS_BASE + 0x046C) -+#define INT_ACK_H (REGISTERS_BASE + 0x0470) -+#define INT_TRIG (REGISTERS_BASE + 0x0474) -+#define INT_TRIG_L (REGISTERS_BASE + 0x0474) -+#define INT_TRIG_H (REGISTERS_BASE + 0x0478) -+#define HOST_STS_L (REGISTERS_BASE + 0x045C) -+#define HOST_STS_H (REGISTERS_BASE + 0x0460) -+#define HOST_MASK (REGISTERS_BASE + 0x0430) -+#define HOST_MASK_L (REGISTERS_BASE + 0x0430) -+#define HOST_MASK_H (REGISTERS_BASE + 0x0434) -+#define HOST_MASK_SET (REGISTERS_BASE + 0x0438) -+#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438) -+#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C) -+#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440) -+#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440) -+#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444) -+ -+#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) -+#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) -+ -+/* Host Interrupts*/ -+#define HINT_MASK (REGISTERS_BASE + 0x0494) -+#define HINT_MASK_SET (REGISTERS_BASE + 0x0498) -+#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C) -+#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0) -+/*1150 spec calls this HINT_STS_RAW*/ -+#define HINT_STS_ND (REGISTERS_BASE + 0x04B0) -+#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4) -+#define HINT_ACK (REGISTERS_BASE + 0x04A8) -+#define HINT_TRIG (REGISTERS_BASE + 0x04AC) -+ -+/*============================================= -+ Host Interrupt Mask Register - 32bit (RW) -+ ------------------------------------------ -+ Setting a bit in this register masks the -+ corresponding interrupt to the host. -+ 0 - RX0 - Rx first dubble buffer Data Interrupt -+ 1 - TXD - Tx Data Interrupt -+ 2 - TXXFR - Tx Transfer Interrupt -+ 3 - RX1 - Rx second dubble buffer Data Interrupt -+ 4 - RXXFR - Rx Transfer Interrupt -+ 5 - EVENT_A - Event Mailbox interrupt -+ 6 - EVENT_B - Event Mailbox interrupt -+ 7 - WNONHST - Wake On Host Interrupt -+ 8 - TRACE_A - Debug Trace interrupt -+ 9 - TRACE_B - Debug Trace interrupt -+ 10 - CDCMP - Command Complete Interrupt -+ 11 - -+ 12 - -+ 13 - -+ 14 - ICOMP - Initialization Complete Interrupt -+ 16 - SG SE - Soft Gemini - Sense enable interrupt -+ 17 - SG SD - Soft Gemini - Sense disable interrupt -+ 18 - - -+ 19 - - -+ 20 - - -+ 21- - -+ Default: 0x0001 -+*==============================================*/ -+#define ACX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC) -+ -+/*============================================= -+ Host Interrupt Mask Set 16bit, (Write only) -+ ------------------------------------------ -+ Setting a bit in this register sets -+ the corresponding bin in ACX_HINT_MASK register -+ without effecting the mask -+ state of other bits (0 = no effect). -+==============================================*/ -+#define ACX_REG_HINT_MASK_SET (REGISTERS_BASE + 0x04E0) -+ -+/*============================================= -+ Host Interrupt Mask Clear 16bit,(Write only) -+ ------------------------------------------ -+ Setting a bit in this register clears -+ the corresponding bin in ACX_HINT_MASK register -+ without effecting the mask -+ state of other bits (0 = no effect). -+=============================================*/ -+#define ACX_REG_HINT_MASK_CLR (REGISTERS_BASE + 0x04E4) -+ -+/*============================================= -+ Host Interrupt Status Nondestructive Read -+ 16bit,(Read only) -+ ------------------------------------------ -+ The host can read this register to determine -+ which interrupts are active. -+ Reading this register doesn't -+ effect its content. -+=============================================*/ -+#define ACX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8) -+ -+/*============================================= -+ Host Interrupt Status Clear on Read Register -+ 16bit,(Read only) -+ ------------------------------------------ -+ The host can read this register to determine -+ which interrupts are active. -+ Reading this register clears it, -+ thus making all interrupts inactive. -+==============================================*/ -+#define ACX_REG_INTERRUPT_CLEAR (REGISTERS_BASE + 0x04F8) -+ -+/*============================================= -+ Host Interrupt Acknowledge Register -+ 16bit,(Write only) -+ ------------------------------------------ -+ The host can set individual bits in this -+ register to clear (acknowledge) the corresp. -+ interrupt status bits in the HINT_STS_CLR and -+ HINT_STS_ND registers, thus making the -+ assotiated interrupt inactive. (0-no effect) -+==============================================*/ -+#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) -+ -+#define RX_DRIVER_DUMMY_WRITE_ADDRESS (REGISTERS_BASE + 0x0534) -+#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) -+ -+/* Device Configuration registers*/ -+#define SOR_CFG (REGISTERS_BASE + 0x0800) -+ -+/* Embedded ARM CPU Control */ -+ -+/*=============================================== -+ Halt eCPU - 32bit RW -+ ------------------------------------------ -+ 0 HALT_ECPU Halt Embedded CPU - This bit is the -+ compliment of bit 1 (MDATA2) in the SOR_CFG register. -+ During a hardware reset, this bit holds -+ the inverse of MDATA2. -+ When downloading firmware from the host, -+ set this bit (pull down MDATA2). -+ The host clears this bit after downloading the firmware into -+ zero-wait-state SSRAM. -+ When loading firmware from Flash, clear this bit (pull up MDATA2) -+ so that the eCPU can run the bootloader code in Flash -+ HALT_ECPU eCPU State -+ -------------------- -+ 1 halt eCPU -+ 0 enable eCPU -+ ===============================================*/ -+#define ACX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804) -+ -+#define HI_CFG (REGISTERS_BASE + 0x0808) -+ -+/*=============================================== -+ EEPROM Burst Read Start - 32bit RW -+ ------------------------------------------ -+ [31:1] Reserved -+ 0 ACX_EE_START - EEPROM Burst Read Start 0 -+ Setting this bit starts a burst read from -+ the external EEPROM. -+ If this bit is set (after reset) before an EEPROM read/write, -+ the burst read starts at EEPROM address 0. -+ Otherwise, it starts at the address -+ following the address of the previous access. -+ TheWlan hardware hardware clears this bit automatically. -+ -+ Default: 0x00000000 -+*================================================*/ -+#define ACX_REG_EE_START (REGISTERS_BASE + 0x080C) -+ -+#define OCP_POR_CTR (REGISTERS_BASE + 0x09B4) -+#define OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8) -+#define OCP_DATA_READ (REGISTERS_BASE + 0x09BC) -+#define OCP_CMD (REGISTERS_BASE + 0x09C0) -+ -+#define WL1271_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8) -+ -+#define CHIP_ID_B (REGISTERS_BASE + 0x5674) -+ -+#define CHIP_ID_1271_PG10 (0x4030101) -+#define CHIP_ID_1271_PG20 (0x4030111) -+ -+#define ENABLE (REGISTERS_BASE + 0x5450) -+ -+/* Power Management registers */ -+#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804) -+#define ELP_CMD (REGISTERS_BASE + 0x5808) -+#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810) -+#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814) -+#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818) -+ -+#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) -+ -+/* Scratch Pad registers*/ -+#define SCR_PAD0 (REGISTERS_BASE + 0x5608) -+#define SCR_PAD1 (REGISTERS_BASE + 0x560C) -+#define SCR_PAD2 (REGISTERS_BASE + 0x5610) -+#define SCR_PAD3 (REGISTERS_BASE + 0x5614) -+#define SCR_PAD4 (REGISTERS_BASE + 0x5618) -+#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C) -+#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) -+#define SCR_PAD5 (REGISTERS_BASE + 0x5624) -+#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628) -+#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) -+#define SCR_PAD6 (REGISTERS_BASE + 0x5630) -+#define SCR_PAD7 (REGISTERS_BASE + 0x5634) -+#define SCR_PAD8 (REGISTERS_BASE + 0x5638) -+#define SCR_PAD9 (REGISTERS_BASE + 0x563C) -+ -+/* Spare registers*/ -+#define SPARE_A1 (REGISTERS_BASE + 0x0994) -+#define SPARE_A2 (REGISTERS_BASE + 0x0998) -+#define SPARE_A3 (REGISTERS_BASE + 0x099C) -+#define SPARE_A4 (REGISTERS_BASE + 0x09A0) -+#define SPARE_A5 (REGISTERS_BASE + 0x09A4) -+#define SPARE_A6 (REGISTERS_BASE + 0x09A8) -+#define SPARE_A7 (REGISTERS_BASE + 0x09AC) -+#define SPARE_A8 (REGISTERS_BASE + 0x09B0) -+#define SPARE_B1 (REGISTERS_BASE + 0x5420) -+#define SPARE_B2 (REGISTERS_BASE + 0x5424) -+#define SPARE_B3 (REGISTERS_BASE + 0x5428) -+#define SPARE_B4 (REGISTERS_BASE + 0x542C) -+#define SPARE_B5 (REGISTERS_BASE + 0x5430) -+#define SPARE_B6 (REGISTERS_BASE + 0x5434) -+#define SPARE_B7 (REGISTERS_BASE + 0x5438) -+#define SPARE_B8 (REGISTERS_BASE + 0x543C) -+ -+#define PLL_PARAMETERS (REGISTERS_BASE + 0x6040) -+#define WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008) -+#define WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100) -+#define DRPW_SCRATCH_START (DRPW_BASE + 0x002C) -+ -+ -+#define ACX_SLV_SOFT_RESET_BIT BIT(1) -+#define ACX_REG_EEPROM_START_BIT BIT(1) -+ -+/* Command/Information Mailbox Pointers */ -+ -+/*=============================================== -+ Command Mailbox Pointer - 32bit RW -+ ------------------------------------------ -+ This register holds the start address of -+ the command mailbox located in the Wlan hardware memory. -+ The host must read this pointer after a reset to -+ find the location of the command mailbox. -+ The Wlan hardware initializes the command mailbox -+ pointer with the default address of the command mailbox. -+ The command mailbox pointer is not valid until after -+ the host receives the Init Complete interrupt from -+ the Wlan hardware. -+ ===============================================*/ -+#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0) -+ -+/*=============================================== -+ Information Mailbox Pointer - 32bit RW -+ ------------------------------------------ -+ This register holds the start address of -+ the information mailbox located in the Wlan hardware memory. -+ The host must read this pointer after a reset to find -+ the location of the information mailbox. -+ The Wlan hardware initializes the information mailbox pointer -+ with the default address of the information mailbox. -+ The information mailbox pointer is not valid -+ until after the host receives the Init Complete interrupt from -+ the Wlan hardware. -+ ===============================================*/ -+#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) -+ -+ -+/* Misc */ -+ -+#define REG_ENABLE_TX_RX (ENABLE) -+/* -+ * Rx configuration (filter) information element -+ * --------------------------------------------- -+ */ -+#define REG_RX_CONFIG (RX_CFG) -+#define REG_RX_FILTER (RX_FILTER_CFG) -+ -+ -+#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002 -+ -+/* promiscuous - receives all valid frames */ -+#define RX_CFG_PROMISCUOUS 0x0008 -+ -+/* receives frames from any BSSID */ -+#define RX_CFG_BSSID 0x0020 -+ -+/* receives frames destined to any MAC address */ -+#define RX_CFG_MAC 0x0010 -+ -+#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010 -+#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000 -+#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020 -+#define RX_CFG_ENABLE_ANY_BSSID 0x0000 -+ -+/* discards all broadcast frames */ -+#define RX_CFG_DISABLE_BCAST 0x0200 -+ -+#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400 -+#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800 -+#define RX_CFG_COPY_RX_STATUS 0x2000 -+#define RX_CFG_TSF 0x10000 -+ -+#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ -+ RX_CFG_ENABLE_ONLY_MY_BSSID) -+ -+#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ -+ | RX_CFG_ENABLE_ANY_BSSID) -+ -+#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ -+ RX_CFG_ENABLE_ANY_BSSID) -+ -+#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ -+ | RX_CFG_ENABLE_ONLY_MY_BSSID) -+ -+#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \ -+ | RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \ -+ | RX_CFG_COPY_RX_STATUS | RX_CFG_TSF) -+ -+#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC) -+ -+#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \ -+ RX_CFG_ENABLE_ONLY_MY_DEST_MAC) -+ -+#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \ -+ RX_CFG_ENABLE_ONLY_MY_DEST_MAC) -+ -+#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ -+ | CFG_RX_CTL_EN | CFG_RX_BCN_EN\ -+ | CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) -+ -+#define RX_FILTER_OPTION_FILTER_ALL 0 -+ -+#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\ -+ | CFG_RX_RCTS_ACK | CFG_RX_BCN_EN) -+ -+#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ -+ | CFG_RX_BCN_EN | CFG_RX_AUTH_EN\ -+ | CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\ -+ | CFG_RX_PRSP_EN) -+ -+ -+/*=============================================== -+ Phy regs -+ ===============================================*/ -+#define ACX_PHY_ADDR_REG SBB_ADDR -+#define ACX_PHY_DATA_REG SBB_DATA -+#define ACX_PHY_CTRL_REG SBB_CTL -+#define ACX_PHY_REG_WR_MASK 0x00000001ul -+#define ACX_PHY_REG_RD_MASK 0x00000002ul -+ -+ -+/*=============================================== -+ EEPROM Read/Write Request 32bit RW -+ ------------------------------------------ -+ 1 EE_READ - EEPROM Read Request 1 - Setting this bit -+ loads a single byte of data into the EE_DATA -+ register from the EEPROM location specified in -+ the EE_ADDR register. -+ The Wlan hardware hardware clears this bit automatically. -+ EE_DATA is valid when this bit is cleared. -+ -+ 0 EE_WRITE - EEPROM Write Request - Setting this bit -+ writes a single byte of data from the EE_DATA register into the -+ EEPROM location specified in the EE_ADDR register. -+ The Wlan hardware hardware clears this bit automatically. -+*===============================================*/ -+#define ACX_EE_CTL_REG EE_CTL -+#define EE_WRITE 0x00000001ul -+#define EE_READ 0x00000002ul -+ -+/*=============================================== -+ EEPROM Address - 32bit RW -+ ------------------------------------------ -+ This register specifies the address -+ within the EEPROM from/to which to read/write data. -+ ===============================================*/ -+#define ACX_EE_ADDR_REG EE_ADDR -+ -+/*=============================================== -+ EEPROM Data - 32bit RW -+ ------------------------------------------ -+ This register either holds the read 8 bits of -+ data from the EEPROM or the write data -+ to be written to the EEPROM. -+ ===============================================*/ -+#define ACX_EE_DATA_REG EE_DATA -+ -+/*=============================================== -+ EEPROM Base Address - 32bit RW -+ ------------------------------------------ -+ This register holds the upper nine bits -+ [23:15] of the 24-bit Wlan hardware memory -+ address for burst reads from EEPROM accesses. -+ The EEPROM provides the lower 15 bits of this address. -+ The MSB of the address from the EEPROM is ignored. -+ ===============================================*/ -+#define ACX_EE_CFG EE_CFG -+ -+/*=============================================== -+ GPIO Output Values -32bit, RW -+ ------------------------------------------ -+ [31:16] Reserved -+ [15: 0] Specify the output values (at the output driver inputs) for -+ GPIO[15:0], respectively. -+ ===============================================*/ -+#define ACX_GPIO_OUT_REG GPIO_OUT -+#define ACX_MAX_GPIO_LINES 15 -+ -+/*=============================================== -+ Contention window -32bit, RW -+ ------------------------------------------ -+ [31:26] Reserved -+ [25:16] Max (0x3ff) -+ [15:07] Reserved -+ [06:00] Current contention window value - default is 0x1F -+ ===============================================*/ -+#define ACX_CONT_WIND_CFG_REG CONT_WIND_CFG -+#define ACX_CONT_WIND_MIN_MASK 0x0000007f -+#define ACX_CONT_WIND_MAX 0x03ff0000 -+ -+/* -+ * Indirect slave register/memory registers -+ * ---------------------------------------- -+ */ -+#define HW_SLAVE_REG_ADDR_REG 0x00000004 -+#define HW_SLAVE_REG_DATA_REG 0x00000008 -+#define HW_SLAVE_REG_CTRL_REG 0x0000000c -+ -+#define SLAVE_AUTO_INC 0x00010000 -+#define SLAVE_NO_AUTO_INC 0x00000000 -+#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000 -+ -+#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR -+#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA -+#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL -+#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL -+ -+#define HW_FUNC_EVENT_INT_EN 0x8000 -+#define HW_FUNC_EVENT_MASK_REG 0x00000034 -+ -+#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP) -+ -+/*=============================================== -+ HI_CFG Interface Configuration Register Values -+ ------------------------------------------ -+ ===============================================*/ -+#define HI_CFG_UART_ENABLE 0x00000004 -+#define HI_CFG_RST232_ENABLE 0x00000008 -+#define HI_CFG_CLOCK_REQ_SELECT 0x00000010 -+#define HI_CFG_HOST_INT_ENABLE 0x00000020 -+#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040 -+#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080 -+#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100 -+#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200 -+#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400 -+ -+/* -+ * NOTE: USE_ACTIVE_HIGH compilation flag should be defined in makefile -+ * for platforms using active high interrupt level -+ */ -+#ifdef USE_ACTIVE_HIGH -+#define HI_CFG_DEF_VAL \ -+ (HI_CFG_UART_ENABLE | \ -+ HI_CFG_RST232_ENABLE | \ -+ HI_CFG_CLOCK_REQ_SELECT | \ -+ HI_CFG_HOST_INT_ENABLE) -+#else -+#define HI_CFG_DEF_VAL \ -+ (HI_CFG_UART_ENABLE | \ -+ HI_CFG_RST232_ENABLE | \ -+ HI_CFG_CLOCK_REQ_SELECT | \ -+ HI_CFG_HOST_INT_ENABLE) -+ -+#endif -+ -+#define REF_FREQ_19_2 0 -+#define REF_FREQ_26_0 1 -+#define REF_FREQ_38_4 2 -+#define REF_FREQ_40_0 3 -+#define REF_FREQ_33_6 4 -+#define REF_FREQ_NUM 5 -+ -+#define LUT_PARAM_INTEGER_DIVIDER 0 -+#define LUT_PARAM_FRACTIONAL_DIVIDER 1 -+#define LUT_PARAM_ATTN_BB 2 -+#define LUT_PARAM_ALPHA_BB 3 -+#define LUT_PARAM_STOP_TIME_BB 4 -+#define LUT_PARAM_BB_PLL_LOOP_FILTER 5 -+#define LUT_PARAM_NUM 6 -+ -+#define ACX_EEPROMLESS_IND_REG (SCR_PAD4) -+#define USE_EEPROM 0 -+#define SOFT_RESET_MAX_TIME 1000000 -+#define SOFT_RESET_STALL_TIME 1000 -+#define NVS_DATA_BUNDARY_ALIGNMENT 4 -+ -+ -+/* Firmware image load chunk size */ -+#define CHUNK_SIZE 512 -+ -+/* Firmware image header size */ -+#define FW_HDR_SIZE 8 -+ -+#define ECPU_CONTROL_HALT 0x00000101 -+ -+ -+/****************************************************************************** -+ -+ CHANNELS, BAND & REG DOMAINS definitions -+ -+******************************************************************************/ -+ -+ -+enum { -+ RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */ -+ RADIO_BAND_5GHZ = 1, /* 5 Ghz band */ -+ RADIO_BAND_JAPAN_4_9_GHZ = 2, -+ DEFAULT_BAND = RADIO_BAND_2_4GHZ, -+ INVALID_BAND = 0xFE, -+ MAX_RADIO_BANDS = 0xFF -+}; -+ -+enum { -+ NO_RATE = 0, -+ RATE_1MBPS = 0x0A, -+ RATE_2MBPS = 0x14, -+ RATE_5_5MBPS = 0x37, -+ RATE_6MBPS = 0x0B, -+ RATE_9MBPS = 0x0F, -+ RATE_11MBPS = 0x6E, -+ RATE_12MBPS = 0x0A, -+ RATE_18MBPS = 0x0E, -+ RATE_22MBPS = 0xDC, -+ RATE_24MBPS = 0x09, -+ RATE_36MBPS = 0x0D, -+ RATE_48MBPS = 0x08, -+ RATE_54MBPS = 0x0C -+}; -+ -+enum { -+ RATE_INDEX_1MBPS = 0, -+ RATE_INDEX_2MBPS = 1, -+ RATE_INDEX_5_5MBPS = 2, -+ RATE_INDEX_6MBPS = 3, -+ RATE_INDEX_9MBPS = 4, -+ RATE_INDEX_11MBPS = 5, -+ RATE_INDEX_12MBPS = 6, -+ RATE_INDEX_18MBPS = 7, -+ RATE_INDEX_22MBPS = 8, -+ RATE_INDEX_24MBPS = 9, -+ RATE_INDEX_36MBPS = 10, -+ RATE_INDEX_48MBPS = 11, -+ RATE_INDEX_54MBPS = 12, -+ RATE_INDEX_MAX = RATE_INDEX_54MBPS, -+ MAX_RATE_INDEX, -+ INVALID_RATE_INDEX = MAX_RATE_INDEX, -+ RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF -+}; -+ -+enum { -+ RATE_MASK_1MBPS = 0x1, -+ RATE_MASK_2MBPS = 0x2, -+ RATE_MASK_5_5MBPS = 0x4, -+ RATE_MASK_11MBPS = 0x20, -+}; -+ -+#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ -+#define OFDM_RATE_BIT BIT(6) -+#define PBCC_RATE_BIT BIT(7) -+ -+enum { -+ CCK_LONG = 0, -+ CCK_SHORT = SHORT_PREAMBLE_BIT, -+ PBCC_LONG = PBCC_RATE_BIT, -+ PBCC_SHORT = PBCC_RATE_BIT | SHORT_PREAMBLE_BIT, -+ OFDM = OFDM_RATE_BIT -+}; -+ -+/****************************************************************************** -+ -+Transmit-Descriptor RATE-SET field definitions... -+ -+Define a new "Rate-Set" for TX path that incorporates the -+Rate & Modulation info into a single 16-bit field. -+ -+TxdRateSet_t: -+b15 - Indicates Preamble type (1=SHORT, 0=LONG). -+ Notes: -+ Must be LONG (0) for 1Mbps rate. -+ Does not apply (set to 0) for RevG-OFDM rates. -+b14 - Indicates PBCC encoding (1=PBCC, 0=not). -+ Notes: -+ Does not apply (set to 0) for rates 1 and 2 Mbps. -+ Does not apply (set to 0) for RevG-OFDM rates. -+b13 - Unused (set to 0). -+b12-b0 - Supported Rate indicator bits as defined below. -+ -+******************************************************************************/ -+ -+ -+#define TNETW1251_CHIP_ID_PG1_0 0x07010101 -+#define TNETW1251_CHIP_ID_PG1_1 0x07020101 -+#define TNETW1251_CHIP_ID_PG1_2 0x07030101 -+ -+/************************************************************************* -+ -+ Interrupt Trigger Register (Host -> WiLink) -+ -+**************************************************************************/ -+ -+/* Hardware to Embedded CPU Interrupts - first 32-bit register set */ -+ -+/* -+ * Host Command Interrupt. Setting this bit masks -+ * the interrupt that the host issues to inform -+ * the FW that it has sent a command -+ * to the Wlan hardware Command Mailbox. -+ */ -+#define INTR_TRIG_CMD BIT(0) -+ -+/* -+ * Host Event Acknowlegde Interrupt. The host -+ * sets this bit to acknowledge that it received -+ * the unsolicited information from the event -+ * mailbox. -+ */ -+#define INTR_TRIG_EVENT_ACK BIT(1) -+ -+/* -+ * The host sets this bit to inform the Wlan -+ * FW that a TX packet is in the XFER -+ * Buffer #0. -+ */ -+#define INTR_TRIG_TX_PROC0 BIT(2) -+ -+/* -+ * The host sets this bit to inform the FW -+ * that it read a packet from RX XFER -+ * Buffer #0. -+ */ -+#define INTR_TRIG_RX_PROC0 BIT(3) -+ -+#define INTR_TRIG_DEBUG_ACK BIT(4) -+ -+#define INTR_TRIG_STATE_CHANGED BIT(5) -+ -+ -+/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ -+ -+/* -+ * The host sets this bit to inform the FW -+ * that it read a packet from RX XFER -+ * Buffer #1. -+ */ -+#define INTR_TRIG_RX_PROC1 BIT(17) -+ -+/* -+ * The host sets this bit to inform the Wlan -+ * hardware that a TX packet is in the XFER -+ * Buffer #1. -+ */ -+#define INTR_TRIG_TX_PROC1 BIT(18) -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_rx.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_rx.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_rx.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_rx.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,199 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include "wl1271.h" -+#include "wl1271_acx.h" -+#include "wl1271_reg.h" -+#include "wl1271_rx.h" -+#include "wl1271_spi.h" -+ -+static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, -+ u32 drv_rx_counter) -+{ -+ return status->rx_pkt_descs[drv_rx_counter] & RX_MEM_BLOCK_MASK; -+} -+ -+static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, -+ u32 drv_rx_counter) -+{ -+ return (status->rx_pkt_descs[drv_rx_counter] & RX_BUF_SIZE_MASK) >> -+ RX_BUF_SIZE_SHIFT_DIV; -+} -+ -+/* The values of this table must match the wl1271_rates[] array */ -+static u8 wl1271_rx_rate_to_idx[] = { -+ /* MCS rates are used only with 11n */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */ -+ -+ 11, /* WL1271_RATE_54 */ -+ 10, /* WL1271_RATE_48 */ -+ 9, /* WL1271_RATE_36 */ -+ 8, /* WL1271_RATE_24 */ -+ -+ /* TI-specific rate */ -+ WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */ -+ -+ 7, /* WL1271_RATE_18 */ -+ 6, /* WL1271_RATE_12 */ -+ 3, /* WL1271_RATE_11 */ -+ 5, /* WL1271_RATE_9 */ -+ 4, /* WL1271_RATE_6 */ -+ 2, /* WL1271_RATE_5_5 */ -+ 1, /* WL1271_RATE_2 */ -+ 0 /* WL1271_RATE_1 */ -+}; -+ -+static void wl1271_rx_status(struct wl1271 *wl, -+ struct wl1271_rx_descriptor *desc, -+ struct ieee80211_rx_status *status, -+ u8 beacon) -+{ -+ memset(status, 0, sizeof(struct ieee80211_rx_status)); -+ -+ if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) -+ status->band = IEEE80211_BAND_2GHZ; -+ else -+ wl1271_warning("unsupported band 0x%x", -+ desc->flags & WL1271_RX_DESC_BAND_MASK); -+ -+ /* -+ * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the -+ * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we -+ * only need the mactime for monitor mode. For now the mactime is -+ * not valid, so RX_FLAG_TSFT should not be set -+ */ -+ status->signal = desc->rssi; -+ -+ /* FIXME: Should this be optimized? */ -+ status->qual = (desc->rssi - WL1271_RX_MIN_RSSI) * 100 / -+ (WL1271_RX_MAX_RSSI - WL1271_RX_MIN_RSSI); -+ status->qual = min(status->qual, 100); -+ status->qual = max(status->qual, 0); -+ -+ /* -+ * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we -+ * need to divide by two for now, but TI has been discussing about -+ * changing it. This needs to be rechecked. -+ */ -+ status->noise = desc->rssi - (desc->snr >> 1); -+ -+ status->freq = ieee80211_channel_to_frequency(desc->channel); -+ -+ if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { -+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; -+ -+ if (likely(!(desc->flags & WL1271_RX_DESC_DECRYPT_FAIL))) -+ status->flag |= RX_FLAG_DECRYPTED; -+ -+ if (unlikely(desc->flags & WL1271_RX_DESC_MIC_FAIL)) -+ status->flag |= RX_FLAG_MMIC_ERROR; -+ } -+ -+ status->rate_idx = wl1271_rx_rate_to_idx[desc->rate]; -+ -+ if (status->rate_idx == WL1271_RX_RATE_UNSUPPORTED) -+ wl1271_warning("unsupported rate"); -+} -+ -+static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) -+{ -+ struct ieee80211_rx_status rx_status; -+ struct wl1271_rx_descriptor *desc; -+ struct sk_buff *skb; -+ u16 *fc; -+ u8 *buf; -+ u8 beacon = 0; -+ -+ skb = dev_alloc_skb(length); -+ if (!skb) { -+ wl1271_error("Couldn't allocate RX frame"); -+ return; -+ } -+ -+ buf = skb_put(skb, length); -+ wl1271_spi_reg_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); -+ -+ /* the data read starts with the descriptor */ -+ desc = (struct wl1271_rx_descriptor *) buf; -+ -+ /* now we pull the descriptor out of the buffer */ -+ skb_pull(skb, sizeof(*desc)); -+ -+ fc = (u16 *)skb->data; -+ if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) -+ beacon = 1; -+ -+ wl1271_rx_status(wl, desc, &rx_status, beacon); -+ -+ wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, -+ beacon ? "beacon" : ""); -+ -+ ieee80211_rx(wl->hw, skb, &rx_status); -+} -+ -+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) -+{ -+ struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; -+ u32 buf_size; -+ u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; -+ u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; -+ u32 mem_block; -+ -+ while (drv_rx_counter != fw_rx_counter) { -+ mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter); -+ buf_size = wl1271_rx_get_buf_size(status, drv_rx_counter); -+ -+ if (buf_size == 0) { -+ wl1271_warning("received empty data"); -+ break; -+ } -+ -+ wl->rx_mem_pool_addr.addr = -+ (mem_block << 8) + wl_mem_map->packet_memory_pool_start; -+ wl->rx_mem_pool_addr.addr_extra = -+ wl->rx_mem_pool_addr.addr + 4; -+ -+ /* Choose the block we want to read */ -+ wl1271_spi_reg_write(wl, WL1271_SLV_REG_DATA, -+ &wl->rx_mem_pool_addr, -+ sizeof(wl->rx_mem_pool_addr), false); -+ -+ wl1271_rx_handle_data(wl, buf_size); -+ -+ wl->rx_counter++; -+ drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; -+ } -+ -+ wl1271_reg_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); -+ -+ /* This is a workaround for some problems in the chip */ -+ wl1271_reg_write32(wl, RX_DRIVER_DUMMY_WRITE_ADDRESS, 0x1); -+ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_rx.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_rx.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_rx.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_rx.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,121 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_RX_H__ -+#define __WL1271_RX_H__ -+ -+#include -+ -+#define WL1271_RX_MAX_RSSI -30 -+#define WL1271_RX_MIN_RSSI -95 -+ -+#define WL1271_RX_ALIGN_TO 4 -+#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \ -+ ~(WL1271_RX_ALIGN_TO - 1)) -+ -+#define SHORT_PREAMBLE_BIT BIT(0) -+#define OFDM_RATE_BIT BIT(6) -+#define PBCC_RATE_BIT BIT(7) -+ -+#define PLCP_HEADER_LENGTH 8 -+#define RX_DESC_PACKETID_SHIFT 11 -+#define RX_MAX_PACKET_ID 3 -+ -+#define NUM_RX_PKT_DESC_MOD_MASK 7 -+#define WL1271_RX_RATE_UNSUPPORTED 0xFF -+ -+#define RX_DESC_VALID_FCS 0x0001 -+#define RX_DESC_MATCH_RXADDR1 0x0002 -+#define RX_DESC_MCAST 0x0004 -+#define RX_DESC_STAINTIM 0x0008 -+#define RX_DESC_VIRTUAL_BM 0x0010 -+#define RX_DESC_BCAST 0x0020 -+#define RX_DESC_MATCH_SSID 0x0040 -+#define RX_DESC_MATCH_BSSID 0x0080 -+#define RX_DESC_ENCRYPTION_MASK 0x0300 -+#define RX_DESC_MEASURMENT 0x0400 -+#define RX_DESC_SEQNUM_MASK 0x1800 -+#define RX_DESC_MIC_FAIL 0x2000 -+#define RX_DESC_DECRYPT_FAIL 0x4000 -+ -+/* -+ * RX Descriptor flags: -+ * -+ * Bits 0-1 - band -+ * Bit 2 - STBC -+ * Bit 3 - A-MPDU -+ * Bit 4 - HT -+ * Bits 5-7 - encryption -+ */ -+#define WL1271_RX_DESC_BAND_MASK 0x03 -+#define WL1271_RX_DESC_ENCRYPT_MASK 0xE0 -+ -+#define WL1271_RX_DESC_BAND_BG 0x00 -+#define WL1271_RX_DESC_BAND_J 0x01 -+#define WL1271_RX_DESC_BAND_A 0x02 -+ -+#define WL1271_RX_DESC_STBC BIT(2) -+#define WL1271_RX_DESC_A_MPDU BIT(3) -+#define WL1271_RX_DESC_HT BIT(4) -+ -+#define WL1271_RX_DESC_ENCRYPT_WEP 0x20 -+#define WL1271_RX_DESC_ENCRYPT_TKIP 0x40 -+#define WL1271_RX_DESC_ENCRYPT_AES 0x60 -+#define WL1271_RX_DESC_ENCRYPT_GEM 0x80 -+ -+/* -+ * RX Descriptor status -+ * -+ * Bits 0-2 - status -+ * Bits 3-7 - reserved -+ */ -+#define WL1271_RX_DESC_STATUS_MASK 0x07 -+ -+#define WL1271_RX_DESC_SUCCESS 0x00 -+#define WL1271_RX_DESC_DECRYPT_FAIL 0x01 -+#define WL1271_RX_DESC_MIC_FAIL 0x02 -+#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03 -+ -+#define RX_MEM_BLOCK_MASK 0xFF -+#define RX_BUF_SIZE_MASK 0xFFF00 -+#define RX_BUF_SIZE_SHIFT_DIV 6 -+ -+struct wl1271_rx_descriptor { -+ u16 length; -+ u8 status; -+ u8 flags; -+ u8 rate; -+ u8 channel; -+ s8 rssi; -+ u8 snr; -+ u32 timestamp; -+ u8 packet_class; -+ u8 process_id; -+ u8 pad_len; -+ u8 reserved; -+} __attribute__ ((packed)); -+ -+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_spi.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_spi.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_spi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_spi.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,382 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "wl1271.h" -+#include "wl12xx_80211.h" -+#include "wl1271_spi.h" -+ -+static int wl1271_translate_reg_addr(struct wl1271 *wl, int addr) -+{ -+ return addr - wl->physical_reg_addr + wl->virtual_reg_addr; -+} -+ -+static int wl1271_translate_mem_addr(struct wl1271 *wl, int addr) -+{ -+ return addr - wl->physical_mem_addr + wl->virtual_mem_addr; -+} -+ -+ -+void wl1271_spi_reset(struct wl1271 *wl) -+{ -+ u8 *cmd; -+ struct spi_transfer t; -+ struct spi_message m; -+ -+ cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); -+ if (!cmd) { -+ wl1271_error("could not allocate cmd for spi reset"); -+ return; -+ } -+ -+ memset(&t, 0, sizeof(t)); -+ spi_message_init(&m); -+ -+ memset(cmd, 0xff, WSPI_INIT_CMD_LEN); -+ -+ t.tx_buf = cmd; -+ t.len = WSPI_INIT_CMD_LEN; -+ spi_message_add_tail(&t, &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); -+} -+ -+void wl1271_spi_init(struct wl1271 *wl) -+{ -+ u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; -+ struct spi_transfer t; -+ struct spi_message m; -+ -+ cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); -+ if (!cmd) { -+ wl1271_error("could not allocate cmd for spi init"); -+ return; -+ } -+ -+ memset(crc, 0, sizeof(crc)); -+ memset(&t, 0, sizeof(t)); -+ spi_message_init(&m); -+ -+ /* -+ * Set WSPI_INIT_COMMAND -+ * the data is being send from the MSB to LSB -+ */ -+ cmd[2] = 0xff; -+ cmd[3] = 0xff; -+ cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; -+ cmd[0] = 0; -+ cmd[7] = 0; -+ cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; -+ cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; -+ -+ if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) -+ cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; -+ else -+ cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; -+ -+ cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS -+ | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; -+ -+ crc[0] = cmd[1]; -+ crc[1] = cmd[0]; -+ crc[2] = cmd[7]; -+ crc[3] = cmd[6]; -+ crc[4] = cmd[5]; -+ -+ cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; -+ cmd[4] |= WSPI_INIT_CMD_END; -+ -+ t.tx_buf = cmd; -+ t.len = WSPI_INIT_CMD_LEN; -+ spi_message_add_tail(&t, &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); -+} -+ -+/* Set the SPI partitions to access the chip addresses -+ * -+ * There are two VIRTUAL (SPI) partitions (the memory partition and the -+ * registers partition), which are mapped to two different areas of the -+ * PHYSICAL (hardware) memory. This function also makes other checks to -+ * ensure that the partitions are not overlapping. In the diagram below, the -+ * memory partition comes before the register partition, but the opposite is -+ * also supported. -+ * -+ * PHYSICAL address -+ * space -+ * -+ * | | -+ * ...+----+--> mem_start -+ * VIRTUAL address ... | | -+ * space ... | | [PART_0] -+ * ... | | -+ * 0x00000000 <--+----+... ...+----+--> mem_start + mem_size -+ * | | ... | | -+ * |MEM | ... | | -+ * | | ... | | -+ * part_size <--+----+... | | {unused area) -+ * | | ... | | -+ * |REG | ... | | -+ * part_size | | ... | | -+ * + <--+----+... ...+----+--> reg_start -+ * reg_size ... | | -+ * ... | | [PART_1] -+ * ... | | -+ * ...+----+--> reg_start + reg_size -+ * | | -+ * -+ */ -+int wl1271_set_partition(struct wl1271 *wl, -+ u32 mem_start, u32 mem_size, -+ u32 reg_start, u32 reg_size) -+{ -+ struct wl1271_partition *partition; -+ struct spi_transfer t; -+ struct spi_message m; -+ size_t len, cmd_len; -+ u32 *cmd; -+ int addr; -+ -+ cmd_len = sizeof(u32) + 2 * sizeof(struct wl1271_partition); -+ cmd = kzalloc(cmd_len, GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; -+ -+ spi_message_init(&m); -+ memset(&t, 0, sizeof(t)); -+ -+ partition = (struct wl1271_partition *) (cmd + 1); -+ addr = HW_ACCESS_PART0_SIZE_ADDR; -+ len = 2 * sizeof(struct wl1271_partition); -+ -+ *cmd |= WSPI_CMD_WRITE; -+ *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; -+ *cmd |= addr & WSPI_CMD_BYTE_ADDR; -+ -+ wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ -+ /* Make sure that the two partitions together don't exceed the -+ * address range */ -+ if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) { -+ wl1271_debug(DEBUG_SPI, "Total size exceeds maximum virtual" -+ " address range. Truncating partition[0]."); -+ mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size; -+ wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ } -+ -+ if ((mem_start < reg_start) && -+ ((mem_start + mem_size) > reg_start)) { -+ /* Guarantee that the memory partition doesn't overlap the -+ * registers partition */ -+ wl1271_debug(DEBUG_SPI, "End of partition[0] is " -+ "overlapping partition[1]. Adjusted."); -+ mem_size = reg_start - mem_start; -+ wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ } else if ((reg_start < mem_start) && -+ ((reg_start + reg_size) > mem_start)) { -+ /* Guarantee that the register partition doesn't overlap the -+ * memory partition */ -+ wl1271_debug(DEBUG_SPI, "End of partition[1] is" -+ " overlapping partition[0]. Adjusted."); -+ reg_size = mem_start - reg_start; -+ wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", -+ mem_start, mem_size); -+ wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", -+ reg_start, reg_size); -+ } -+ -+ partition[0].start = mem_start; -+ partition[0].size = mem_size; -+ partition[1].start = reg_start; -+ partition[1].size = reg_size; -+ -+ wl->physical_mem_addr = mem_start; -+ wl->physical_reg_addr = reg_start; -+ -+ wl->virtual_mem_addr = 0; -+ wl->virtual_reg_addr = mem_size; -+ -+ t.tx_buf = cmd; -+ t.len = cmd_len; -+ spi_message_add_tail(&t, &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ kfree(cmd); -+ -+ return 0; -+} -+ -+void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, -+ size_t len, bool fixed) -+{ -+ struct spi_transfer t[3]; -+ struct spi_message m; -+ u8 *busy_buf; -+ u32 *cmd; -+ -+ cmd = &wl->buffer_cmd; -+ busy_buf = wl->buffer_busyword; -+ -+ *cmd = 0; -+ *cmd |= WSPI_CMD_READ; -+ *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; -+ *cmd |= addr & WSPI_CMD_BYTE_ADDR; -+ -+ if (fixed) -+ *cmd |= WSPI_CMD_FIXED; -+ -+ spi_message_init(&m); -+ memset(t, 0, sizeof(t)); -+ -+ t[0].tx_buf = cmd; -+ t[0].len = 4; -+ spi_message_add_tail(&t[0], &m); -+ -+ /* Busy and non busy words read */ -+ t[1].rx_buf = busy_buf; -+ t[1].len = WL1271_BUSY_WORD_LEN; -+ spi_message_add_tail(&t[1], &m); -+ -+ t[2].rx_buf = buf; -+ t[2].len = len; -+ spi_message_add_tail(&t[2], &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ /* FIXME: check busy words */ -+ -+ wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); -+ wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); -+} -+ -+void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, -+ size_t len, bool fixed) -+{ -+ struct spi_transfer t[2]; -+ struct spi_message m; -+ u32 *cmd; -+ -+ cmd = &wl->buffer_cmd; -+ -+ *cmd = 0; -+ *cmd |= WSPI_CMD_WRITE; -+ *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; -+ *cmd |= addr & WSPI_CMD_BYTE_ADDR; -+ -+ if (fixed) -+ *cmd |= WSPI_CMD_FIXED; -+ -+ spi_message_init(&m); -+ memset(t, 0, sizeof(t)); -+ -+ t[0].tx_buf = cmd; -+ t[0].len = sizeof(*cmd); -+ spi_message_add_tail(&t[0], &m); -+ -+ t[1].tx_buf = buf; -+ t[1].len = len; -+ spi_message_add_tail(&t[1], &m); -+ -+ spi_sync(wl->spi, &m); -+ -+ wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); -+ wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); -+} -+ -+void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, -+ size_t len) -+{ -+ int physical; -+ -+ physical = wl1271_translate_mem_addr(wl, addr); -+ -+ wl1271_spi_read(wl, physical, buf, len, false); -+} -+ -+void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, -+ size_t len) -+{ -+ int physical; -+ -+ physical = wl1271_translate_mem_addr(wl, addr); -+ -+ wl1271_spi_write(wl, physical, buf, len, false); -+} -+ -+void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, -+ bool fixed) -+{ -+ int physical; -+ -+ physical = wl1271_translate_reg_addr(wl, addr); -+ -+ wl1271_spi_read(wl, physical, buf, len, fixed); -+} -+ -+void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len, -+ bool fixed) -+{ -+ int physical; -+ -+ physical = wl1271_translate_reg_addr(wl, addr); -+ -+ wl1271_spi_write(wl, physical, buf, len, fixed); -+} -+ -+u32 wl1271_mem_read32(struct wl1271 *wl, int addr) -+{ -+ return wl1271_read32(wl, wl1271_translate_mem_addr(wl, addr)); -+} -+ -+void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val) -+{ -+ wl1271_write32(wl, wl1271_translate_mem_addr(wl, addr), val); -+} -+ -+u32 wl1271_reg_read32(struct wl1271 *wl, int addr) -+{ -+ return wl1271_read32(wl, wl1271_translate_reg_addr(wl, addr)); -+} -+ -+void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val) -+{ -+ wl1271_write32(wl, wl1271_translate_reg_addr(wl, addr), val); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_spi.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_spi.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_spi.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_spi.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,113 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (c) 1998-2007 Texas Instruments Incorporated -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_SPI_H__ -+#define __WL1271_SPI_H__ -+ -+#include "wl1271_reg.h" -+ -+#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 -+ -+#define HW_ACCESS_PART0_SIZE_ADDR 0x1FFC0 -+#define HW_ACCESS_PART0_START_ADDR 0x1FFC4 -+#define HW_ACCESS_PART1_SIZE_ADDR 0x1FFC8 -+#define HW_ACCESS_PART1_START_ADDR 0x1FFCC -+ -+#define HW_ACCESS_REGISTER_SIZE 4 -+ -+#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 -+ -+#define WSPI_CMD_READ 0x40000000 -+#define WSPI_CMD_WRITE 0x00000000 -+#define WSPI_CMD_FIXED 0x20000000 -+#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000 -+#define WSPI_CMD_BYTE_LENGTH_OFFSET 17 -+#define WSPI_CMD_BYTE_ADDR 0x0001FFFF -+ -+#define WSPI_INIT_CMD_CRC_LEN 5 -+ -+#define WSPI_INIT_CMD_START 0x00 -+#define WSPI_INIT_CMD_TX 0x40 -+/* the extra bypass bit is sampled by the TNET as '1' */ -+#define WSPI_INIT_CMD_BYPASS_BIT 0x80 -+#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07 -+#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80 -+#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00 -+#define WSPI_INIT_CMD_IOD 0x40 -+#define WSPI_INIT_CMD_IP 0x20 -+#define WSPI_INIT_CMD_CS 0x10 -+#define WSPI_INIT_CMD_WS 0x08 -+#define WSPI_INIT_CMD_WSPI 0x01 -+#define WSPI_INIT_CMD_END 0x01 -+ -+#define WSPI_INIT_CMD_LEN 8 -+ -+#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \ -+ ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32)) -+#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 -+ -+ -+/* Raw target IO, address is not translated */ -+void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, -+ size_t len, bool fixed); -+void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, -+ size_t len, bool fixed); -+ -+/* Memory target IO, address is tranlated to partition 0 */ -+void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, size_t len); -+void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, size_t len); -+u32 wl1271_mem_read32(struct wl1271 *wl, int addr); -+void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val); -+ -+/* Registers IO */ -+void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, -+ bool fixed); -+void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len, -+ bool fixed); -+u32 wl1271_reg_read32(struct wl1271 *wl, int addr); -+void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val); -+ -+/* INIT and RESET words */ -+void wl1271_spi_reset(struct wl1271 *wl); -+void wl1271_spi_init(struct wl1271 *wl); -+int wl1271_set_partition(struct wl1271 *wl, -+ u32 part_start, u32 part_size, -+ u32 reg_start, u32 reg_size); -+ -+static inline u32 wl1271_read32(struct wl1271 *wl, int addr) -+{ -+ wl1271_spi_read(wl, addr, &wl->buffer_32, -+ sizeof(wl->buffer_32), false); -+ -+ return wl->buffer_32; -+} -+ -+static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) -+{ -+ wl->buffer_32 = val; -+ wl1271_spi_write(wl, addr, &wl->buffer_32, -+ sizeof(wl->buffer_32), false); -+} -+ -+#endif /* __WL1271_SPI_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_tx.c linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_tx.c ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_tx.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_tx.c 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,376 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#include -+#include -+ -+#include "wl1271.h" -+#include "wl1271_spi.h" -+#include "wl1271_reg.h" -+#include "wl1271_ps.h" -+#include "wl1271_tx.h" -+ -+static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) -+{ -+ int i; -+ -+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) -+ if (wl->tx_frames[i] == NULL) { -+ wl->tx_frames[i] = skb; -+ return i; -+ } -+ -+ return -EBUSY; -+} -+ -+static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra) -+{ -+ struct wl1271_tx_hw_descr *desc; -+ u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; -+ u32 total_blocks, excluded; -+ int id, ret = -EBUSY; -+ -+ /* allocate free identifier for the packet */ -+ id = wl1271_tx_id(wl, skb); -+ if (id < 0) -+ return id; -+ -+ /* approximate the number of blocks required for this packet -+ in the firmware */ -+ /* FIXME: try to figure out what is done here and make it cleaner */ -+ total_blocks = (skb->len) >> TX_HW_BLOCK_SHIFT_DIV; -+ excluded = (total_blocks << 2) + (skb->len & 0xff) + 34; -+ total_blocks += (excluded > 252) ? 2 : 1; -+ total_blocks += TX_HW_BLOCK_SPARE; -+ -+ if (total_blocks <= wl->tx_blocks_available) { -+ desc = (struct wl1271_tx_hw_descr *)skb_push( -+ skb, total_len - skb->len); -+ -+ desc->extra_mem_blocks = TX_HW_BLOCK_SPARE; -+ desc->total_mem_blocks = total_blocks; -+ desc->id = id; -+ -+ wl->tx_blocks_available -= total_blocks; -+ -+ ret = 0; -+ -+ wl1271_debug(DEBUG_TX, -+ "tx_allocate: size: %d, blocks: %d, id: %d", -+ total_len, total_blocks, id); -+ } else -+ wl->tx_frames[id] = NULL; -+ -+ return ret; -+} -+ -+static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, -+ u32 extra, struct ieee80211_tx_info *control) -+{ -+ struct wl1271_tx_hw_descr *desc; -+ int pad; -+ -+ desc = (struct wl1271_tx_hw_descr *) skb->data; -+ -+ /* configure packet life time */ -+ desc->start_time = jiffies_to_usecs(jiffies) - wl->time_offset; -+ desc->life_time = TX_HW_MGMT_PKT_LIFETIME_TU; -+ -+ /* configure the tx attributes */ -+ desc->tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; -+ /* FIXME: do we know the packet priority? can we identify mgmt -+ packets, and use max prio for them at least? */ -+ desc->tid = 0; -+ desc->aid = TX_HW_DEFAULT_AID; -+ desc->reserved = 0; -+ -+ /* align the length (and store in terms of words) */ -+ pad = WL1271_TX_ALIGN(skb->len); -+ desc->length = pad >> 2; -+ -+ /* calculate number of padding bytes */ -+ pad = pad - skb->len; -+ desc->tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; -+ -+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); -+ return 0; -+} -+ -+static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb, -+ struct ieee80211_tx_info *control) -+{ -+ -+ struct wl1271_tx_hw_descr *desc; -+ int len; -+ -+ /* FIXME: This is a workaround for getting non-aligned packets. -+ This happens at least with EAPOL packets from the user space. -+ Our DMA requires packets to be aligned on a 4-byte boundary. -+ */ -+ if (unlikely((long)skb->data & 0x03)) { -+ int offset = (4 - (long)skb->data) & 0x03; -+ wl1271_debug(DEBUG_TX, "skb offset %d", offset); -+ -+ /* check whether the current skb can be used */ -+ if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { -+ unsigned char *src = skb->data; -+ -+ /* align the buffer on a 4-byte boundary */ -+ skb_reserve(skb, offset); -+ memmove(skb->data, src, skb->len); -+ } else { -+ wl1271_info("No handler, fixme!"); -+ return -EINVAL; -+ } -+ } -+ -+ len = WL1271_TX_ALIGN(skb->len); -+ -+ /* perform a fixed address block write with the packet */ -+ wl1271_spi_reg_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true); -+ -+ /* write packet new counter into the write access register */ -+ wl->tx_packets_count++; -+ wl1271_reg_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); -+ -+ desc = (struct wl1271_tx_hw_descr *) skb->data; -+ wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", -+ desc->id, skb, len, desc->length); -+ -+ return 0; -+} -+ -+/* caller must hold wl->mutex */ -+static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb) -+{ -+ struct ieee80211_tx_info *info; -+ u32 extra = 0; -+ int ret = 0; -+ u8 idx; -+ -+ if (!skb) -+ return -EINVAL; -+ -+ info = IEEE80211_SKB_CB(skb); -+ -+ if (info->control.hw_key && -+ info->control.hw_key->alg == ALG_TKIP) -+ extra = WL1271_TKIP_IV_SPACE; -+ -+ if (info->control.hw_key) { -+ idx = info->control.hw_key->hw_key_idx; -+ -+ /* FIXME: do we have to do this if we're not using WEP? */ -+ if (unlikely(wl->default_key != idx)) { -+ ret = wl1271_cmd_set_default_wep_key(wl, idx); -+ if (ret < 0) -+ return ret; -+ } -+ } -+ -+ ret = wl1271_tx_allocate(wl, skb, extra); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_tx_fill_hdr(wl, skb, extra, info); -+ if (ret < 0) -+ return ret; -+ -+ ret = wl1271_tx_send_packet(wl, skb, info); -+ if (ret < 0) -+ return ret; -+ -+ return ret; -+} -+ -+void wl1271_tx_work(struct work_struct *work) -+{ -+ struct wl1271 *wl = container_of(work, struct wl1271, tx_work); -+ struct sk_buff *skb; -+ bool woken_up = false; -+ int ret; -+ -+ mutex_lock(&wl->mutex); -+ -+ if (unlikely(wl->state == WL1271_STATE_OFF)) -+ goto out; -+ -+ while ((skb = skb_dequeue(&wl->tx_queue))) { -+ if (!woken_up) { -+ ret = wl1271_ps_elp_wakeup(wl, false); -+ if (ret < 0) -+ goto out; -+ woken_up = true; -+ } -+ -+ ret = wl1271_tx_frame(wl, skb); -+ if (ret == -EBUSY) { -+ /* firmware buffer is full, stop queues */ -+ wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, " -+ "stop queues"); -+ ieee80211_stop_queues(wl->hw); -+ wl->tx_queue_stopped = true; -+ skb_queue_head(&wl->tx_queue, skb); -+ goto out; -+ } else if (ret < 0) { -+ dev_kfree_skb(skb); -+ goto out; -+ } else if (wl->tx_queue_stopped) { -+ /* firmware buffer has space, restart queues */ -+ wl1271_debug(DEBUG_TX, -+ "complete_packet: waking queues"); -+ ieee80211_wake_queues(wl->hw); -+ wl->tx_queue_stopped = false; -+ } -+ } -+ -+out: -+ if (woken_up) -+ wl1271_ps_elp_sleep(wl); -+ -+ mutex_unlock(&wl->mutex); -+} -+ -+static void wl1271_tx_complete_packet(struct wl1271 *wl, -+ struct wl1271_tx_hw_res_descr *result) -+{ -+ -+ struct ieee80211_tx_info *info; -+ struct sk_buff *skb; -+ u32 header_len; -+ int id = result->id; -+ -+ /* check for id legality */ -+ if (id >= TX_HW_RESULT_QUEUE_LEN || wl->tx_frames[id] == NULL) { -+ wl1271_warning("TX result illegal id: %d", id); -+ return; -+ } -+ -+ skb = wl->tx_frames[id]; -+ info = IEEE80211_SKB_CB(skb); -+ -+ /* update packet status */ -+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { -+ if (result->status == TX_SUCCESS) -+ info->flags |= IEEE80211_TX_STAT_ACK; -+ if (result->status & TX_RETRY_EXCEEDED) { -+ info->status.excessive_retries = 1; -+ wl->stats.excessive_retries++; -+ } -+ } -+ -+ info->status.retry_count = result->ack_failures; -+ wl->stats.retry_count += result->ack_failures; -+ -+ /* get header len */ -+ if (info->control.hw_key && -+ info->control.hw_key->alg == ALG_TKIP) -+ header_len = WL1271_TKIP_IV_SPACE + -+ sizeof(struct wl1271_tx_hw_descr); -+ else -+ header_len = sizeof(struct wl1271_tx_hw_descr); -+ -+ wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" -+ " status 0x%x", -+ result->id, skb, result->ack_failures, -+ result->rate_class_index, result->status); -+ -+ /* remove private header from packet */ -+ skb_pull(skb, header_len); -+ -+ /* return the packet to the stack */ -+ ieee80211_tx_status(wl->hw, skb); -+ wl->tx_frames[result->id] = NULL; -+} -+ -+/* Called upon reception of a TX complete interrupt */ -+void wl1271_tx_complete(struct wl1271 *wl, u32 count) -+{ -+ struct wl1271_acx_mem_map *memmap = -+ (struct wl1271_acx_mem_map *)wl->target_mem_map; -+ u32 i; -+ -+ wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); -+ -+ /* read the tx results from the chipset */ -+ wl1271_spi_mem_read(wl, memmap->tx_result, -+ wl->tx_res_if, sizeof(*wl->tx_res_if)); -+ -+ /* verify that the result buffer is not getting overrun */ -+ if (count > TX_HW_RESULT_QUEUE_LEN) { -+ wl1271_warning("TX result overflow from chipset: %d", count); -+ count = TX_HW_RESULT_QUEUE_LEN; -+ } -+ -+ /* process the results */ -+ for (i = 0; i < count; i++) { -+ struct wl1271_tx_hw_res_descr *result; -+ u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; -+ -+ /* process the packet */ -+ result = &(wl->tx_res_if->tx_results_queue[offset]); -+ wl1271_tx_complete_packet(wl, result); -+ -+ wl->tx_results_count++; -+ } -+ -+ /* write host counter to chipset (to ack) */ -+ wl1271_mem_write32(wl, memmap->tx_result + -+ offsetof(struct wl1271_tx_hw_res_if, -+ tx_result_host_counter), -+ wl->tx_res_if->tx_result_fw_counter); -+} -+ -+/* caller must hold wl->mutex */ -+void wl1271_tx_flush(struct wl1271 *wl) -+{ -+ int i; -+ struct sk_buff *skb; -+ struct ieee80211_tx_info *info; -+ -+ /* TX failure */ -+/* control->flags = 0; FIXME */ -+ -+ while ((skb = skb_dequeue(&wl->tx_queue))) { -+ info = IEEE80211_SKB_CB(skb); -+ -+ wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb); -+ -+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) -+ continue; -+ -+ ieee80211_tx_status(wl->hw, skb); -+ } -+ -+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) -+ if (wl->tx_frames[i] != NULL) { -+ skb = wl->tx_frames[i]; -+ info = IEEE80211_SKB_CB(skb); -+ -+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) -+ continue; -+ -+ ieee80211_tx_status(wl->hw, skb); -+ wl->tx_frames[i] = NULL; -+ } -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_tx.h linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_tx.h ---- linux-omap-2.6.28-omap1/drivers/net/wireless/wl12xx/wl1271_tx.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/net/wireless/wl12xx/wl1271_tx.h 2011-06-22 13:19:32.943063273 +0200 -@@ -0,0 +1,129 @@ -+/* -+ * This file is part of wl1271 -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * -+ * Contact: Kalle Valo -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef __WL1271_TX_H__ -+#define __WL1271_TX_H__ -+ -+#define TX_HW_BLOCK_SPARE 2 -+#define TX_HW_BLOCK_SHIFT_DIV 8 -+ -+#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 -+/* The chipset reference driver states, that the "aid" value 1 -+ * is for infra-BSS, but is still always used */ -+#define TX_HW_DEFAULT_AID 1 -+ -+#define TX_HW_ATTR_SAVE_RETRIES BIT(0) -+#define TX_HW_ATTR_HEADER_PAD BIT(1) -+#define TX_HW_ATTR_SESSION_COUNTER (BIT(2) | BIT(3) | BIT(4)) -+#define TX_HW_ATTR_RATE_POLICY (BIT(5) | BIT(6) | BIT(7) | \ -+ BIT(8) | BIT(9)) -+#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11)) -+#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12) -+ -+#define TX_HW_ATTR_OFST_SAVE_RETRIES 0 -+#define TX_HW_ATTR_OFST_HEADER_PAD 1 -+#define TX_HW_ATTR_OFST_SESSION_COUNTER 2 -+#define TX_HW_ATTR_OFST_RATE_POLICY 5 -+#define TX_HW_ATTR_OFST_LAST_WORD_PAD 10 -+#define TX_HW_ATTR_OFST_TX_CMPLT_REQ 12 -+ -+#define TX_HW_RESULT_QUEUE_LEN 16 -+#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf -+ -+#define WL1271_TX_ALIGN_TO 4 -+#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \ -+ ~(WL1271_TX_ALIGN_TO - 1)) -+#define WL1271_TKIP_IV_SPACE 4 -+ -+struct wl1271_tx_hw_descr { -+ /* Length of packet in words, including descriptor+header+data */ -+ u16 length; -+ /* Number of extra memory blocks to allocate for this packet in -+ addition to the number of blocks derived from the packet length */ -+ u8 extra_mem_blocks; -+ /* Total number of memory blocks allocated by the host for this packet. -+ Must be equal or greater than the actual blocks number allocated by -+ HW!! */ -+ u8 total_mem_blocks; -+ /* Device time (in us) when the packet arrived to the driver */ -+ u32 start_time; -+ /* Max delay in TUs until transmission. The last device time the -+ packet can be transmitted is: startTime+(1024*LifeTime) */ -+ u16 life_time; -+ /* Bitwise fields - see TX_ATTR... definitions above. */ -+ u16 tx_attr; -+ /* Packet identifier used also in the Tx-Result. */ -+ u8 id; -+ /* The packet TID value (as User-Priority) */ -+ u8 tid; -+ /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ -+ u8 aid; -+ u8 reserved; -+} __attribute__ ((packed)); -+ -+enum wl1271_tx_hw_res_status { -+ TX_SUCCESS = 0, -+ TX_HW_ERROR = 1, -+ TX_DISABLED = 2, -+ TX_RETRY_EXCEEDED = 3, -+ TX_TIMEOUT = 4, -+ TX_KEY_NOT_FOUND = 5, -+ TX_PEER_NOT_FOUND = 6, -+ TX_SESSION_MISMATCH = 7 -+}; -+ -+struct wl1271_tx_hw_res_descr { -+ /* Packet Identifier - same value used in the Tx descriptor.*/ -+ u8 id; -+ /* The status of the transmission, indicating success or one of -+ several possible reasons for failure. */ -+ u8 status; -+ /* Total air access duration including all retrys and overheads.*/ -+ u16 medium_usage; -+ /* The time passed from host xfer to Tx-complete.*/ -+ u32 fw_handling_time; -+ /* Total media delay -+ (from 1st EDCA AIFS counter until TX Complete). */ -+ u32 medium_delay; -+ /* LS-byte of last TKIP seq-num (saved per AC for recovery). */ -+ u8 lsb_security_sequence_number; -+ /* Retry count - number of transmissions without successful ACK.*/ -+ u8 ack_failures; -+ /* The rate that succeeded getting ACK -+ (Valid only if status=SUCCESS). */ -+ u8 rate_class_index; -+ /* for 4-byte alignment. */ -+ u8 spare; -+} __attribute__ ((packed)); -+ -+struct wl1271_tx_hw_res_if { -+ u32 tx_result_fw_counter; -+ u32 tx_result_host_counter; -+ struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; -+} __attribute__ ((packed)); -+ -+void wl1271_tx_work(struct work_struct *work); -+void wl1271_tx_complete(struct wl1271 *wl, u32 count); -+void wl1271_tx_flush(struct wl1271 *wl); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/regulator/twl4030-regulator.c linux-omap-2.6.28-nokia1/drivers/regulator/twl4030-regulator.c ---- linux-omap-2.6.28-omap1/drivers/regulator/twl4030-regulator.c 2011-06-22 13:14:19.593067728 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/regulator/twl4030-regulator.c 2011-06-22 13:19:32.993063272 +0200 -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - - - /* -@@ -81,6 +82,69 @@ twl4030reg_write(struct twlreg_info *inf - value, info->base + offset); - } - -+static int twl4030_wait_pb_ready(void) -+{ -+ -+ u8 pb_status; -+ int status, timeout = 10; -+ -+ do { -+ status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, -+ &pb_status, 0x14); -+ if (status < 0) -+ return status; -+ -+ if (!(pb_status & 1)) -+ return 0; -+ -+ mdelay(1); -+ timeout--; -+ -+ } while (timeout); -+ -+ return -ETIMEDOUT; -+} -+ -+static int twl4030_send_pb_msg(unsigned msg) -+{ -+ -+ u8 pb_state; -+ int status; -+ -+ /* save powerbus configuration */ -+ status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, -+ &pb_state, 0x14); -+ if (status < 0) -+ return status; -+ -+ /* Enable I2C access to powerbus */ -+ status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, -+ pb_state | (1<<1), 0x14); -+ if (status < 0) -+ return status; -+ -+ status = twl4030_wait_pb_ready(); -+ if (status < 0) -+ return status; -+ -+ status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, msg >> 8, -+ 0x15 /* PB_WORD_MSB */); -+ if (status < 0) -+ return status; -+ -+ status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, msg & 0xff, -+ 0x16 /* PB_WORD_LSB */); -+ if (status < 0) -+ return status; -+ -+ status = twl4030_wait_pb_ready(); -+ if (status < 0) -+ return status; -+ -+ /* Restore powerbus configuration */ -+ return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, pb_state, 0x14); -+} -+ - /*----------------------------------------------------------------------*/ - - /* generic power resource operations, which work on all regulators */ -@@ -113,14 +177,21 @@ static int twl4030reg_is_enabled(struct - static int twl4030reg_enable(struct regulator_dev *rdev) - { - struct twlreg_info *info = rdev_get_drvdata(rdev); -- int grp; -+ int grp, status; -+ unsigned message; - - grp = twl4030reg_read(info, VREG_GRP); - if (grp < 0) - return grp; - - grp |= P1_GRP; -- return twl4030reg_write(info, VREG_GRP, grp); -+ status = twl4030reg_write(info, VREG_GRP, grp); -+ if (status < 0) -+ return status; -+ -+ message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_ACTIVE); -+ -+ return twl4030_send_pb_msg(message); - } - - static int twl4030reg_disable(struct regulator_dev *rdev) -@@ -177,13 +248,7 @@ static int twl4030reg_set_mode(struct re - if (!(status & (P3_GRP | P2_GRP | P1_GRP))) - return -EACCES; - -- status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, -- message >> 8, 0x15 /* PB_WORD_MSB */ ); -- if (status >= 0) -- return status; -- -- return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, -- message, 0x16 /* PB_WORD_LSB */ ); -+ return twl4030_send_pb_msg(message); - } - - /*----------------------------------------------------------------------*/ -diff -Nurp linux-omap-2.6.28-omap1/drivers/serial/8250.c linux-omap-2.6.28-nokia1/drivers/serial/8250.c ---- linux-omap-2.6.28-omap1/drivers/serial/8250.c 2011-06-22 13:14:20.383067718 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/serial/8250.c 2011-06-22 13:19:33.003063272 +0200 -@@ -1456,6 +1456,16 @@ static void serial8250_handle_port(struc - if (status & UART_LSR_THRE) - transmit_chars(up); - -+#ifdef CONFIG_ARCH_OMAP -+ /* -+ * OMAP3 UART has a special RX_FIFO_STATUS bit that will stall -+ * RX transfer on FIFO overflow until the RX fifo is cleared. -+ */ -+ if (cpu_is_omap34xx() && is_omap_port(up) && -+ status & UART_LSR_RX_FIFO_STS) -+ serial_outp(up, UART_FCR, uart_config[up->port.type].fcr | -+ UART_FCR_CLEAR_RCVR); -+#endif - spin_unlock_irqrestore(&up->port.lock, flags); - } - -diff -Nurp linux-omap-2.6.28-omap1/drivers/spi/omap2_mcspi.c linux-omap-2.6.28-nokia1/drivers/spi/omap2_mcspi.c ---- linux-omap-2.6.28-omap1/drivers/spi/omap2_mcspi.c 2011-06-22 13:14:20.463067715 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/spi/omap2_mcspi.c 2011-06-22 13:19:33.013063272 +0200 -@@ -41,6 +41,9 @@ - - #define OMAP2_MCSPI_MAX_FREQ 48000000 - -+/* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */ -+#define OMAP2_MCSPI_MAX_CTRL 4 -+ - #define OMAP2_MCSPI_REVISION 0x00 - #define OMAP2_MCSPI_SYSCONFIG 0x10 - #define OMAP2_MCSPI_SYSSTATUS 0x14 -@@ -131,8 +134,23 @@ struct omap2_mcspi_cs { - void __iomem *base; - unsigned long phys; - int word_len; -+ struct list_head node; -+ /* Context save and restore shadow register */ -+ u32 chconf0; -+}; -+ -+/* used for context save and restore, structure members to be updated whenever -+ * corresponding registers are modified. -+ */ -+struct omap2_mcspi_regs { -+ u32 sysconfig; -+ u32 modulctrl; -+ u32 wakeupenable; -+ struct list_head cs; - }; - -+static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL]; -+ - static struct workqueue_struct *omap2_mcspi_wq; - - #define MOD_REG_BIT(val, mask, set) do { \ -@@ -172,12 +190,27 @@ static inline u32 mcspi_read_cs_reg(cons - return __raw_readl(cs->base + idx); - } - -+static inline u32 mcspi_cached_chconf0(const struct spi_device *spi) -+{ -+ struct omap2_mcspi_cs *cs = spi->controller_state; -+ -+ return cs->chconf0; -+} -+ -+static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) -+{ -+ struct omap2_mcspi_cs *cs = spi->controller_state; -+ -+ cs->chconf0 = val; -+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); -+} -+ - static void omap2_mcspi_set_dma_req(const struct spi_device *spi, - int is_read, int enable) - { - u32 l, rw; - -- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); -+ l = mcspi_cached_chconf0(spi); - - if (is_read) /* 1 is read, 0 write */ - rw = OMAP2_MCSPI_CHCONF_DMAR; -@@ -185,7 +218,7 @@ static void omap2_mcspi_set_dma_req(cons - rw = OMAP2_MCSPI_CHCONF_DMAW; - - MOD_REG_BIT(l, rw, enable); -- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l); -+ mcspi_write_chconf0(spi, l); - } - - static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) -@@ -200,9 +233,9 @@ static void omap2_mcspi_force_cs(struct - { - u32 l; - -- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); -+ l = mcspi_cached_chconf0(spi); - MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active); -- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l); -+ mcspi_write_chconf0(spi, l); - } - - static void omap2_mcspi_set_master_mode(struct spi_master *master) -@@ -217,6 +250,46 @@ static void omap2_mcspi_set_master_mode( - MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0); - MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1); - mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); -+ -+ omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l; -+} -+ -+static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) -+{ -+ struct spi_master *spi_cntrl; -+ struct omap2_mcspi_cs *cs; -+ spi_cntrl = mcspi->master; -+ -+ /* McSPI: context restore */ -+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, -+ omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); -+ -+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG, -+ omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig); -+ -+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, -+ omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); -+ -+ list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs, -+ node) -+ __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); -+} -+static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) -+{ -+ clk_disable(mcspi->ick); -+ clk_disable(mcspi->fck); -+} -+ -+static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) -+{ -+ if (clk_enable(mcspi->ick)) -+ return -ENODEV; -+ if (clk_enable(mcspi->fck)) -+ return -ENODEV; -+ -+ omap2_mcspi_restore_ctx(mcspi); -+ -+ return 0; - } - - static unsigned -@@ -338,7 +411,7 @@ omap2_mcspi_txrx_pio(struct spi_device * - c = count; - word_len = cs->word_len; - -- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); -+ l = mcspi_cached_chconf0(spi); - l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; - - /* We store the pre-calculated register addresses on stack to speed -@@ -378,8 +451,7 @@ omap2_mcspi_txrx_pio(struct spi_device * - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) -- mcspi_write_cs_reg(spi, -- OMAP2_MCSPI_CHCONF0, l); -+ mcspi_write_chconf0(spi, l); - *rx++ = __raw_readl(rx_reg); - #ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %02x\n", -@@ -417,8 +489,7 @@ omap2_mcspi_txrx_pio(struct spi_device * - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) -- mcspi_write_cs_reg(spi, -- OMAP2_MCSPI_CHCONF0, l); -+ mcspi_write_chconf0(spi, l); - *rx++ = __raw_readl(rx_reg); - #ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", -@@ -456,8 +527,7 @@ omap2_mcspi_txrx_pio(struct spi_device * - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) -- mcspi_write_cs_reg(spi, -- OMAP2_MCSPI_CHCONF0, l); -+ mcspi_write_chconf0(spi, l); - *rx++ = __raw_readl(rx_reg); - #ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", -@@ -486,10 +556,12 @@ static int omap2_mcspi_setup_transfer(st - { - struct omap2_mcspi_cs *cs = spi->controller_state; - struct omap2_mcspi *mcspi; -+ struct spi_master *spi_cntrl; - u32 l = 0, div = 0; - u8 word_len = spi->bits_per_word; - - mcspi = spi_master_get_devdata(spi->master); -+ spi_cntrl = mcspi->master; - - if (t != NULL && t->bits_per_word) - word_len = t->bits_per_word; -@@ -503,7 +575,7 @@ static int omap2_mcspi_setup_transfer(st - } else - div = 15; - -- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); -+ l = mcspi_cached_chconf0(spi); - - /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS - * REVISIT: this controller could support SPI_3WIRE mode. -@@ -535,7 +607,7 @@ static int omap2_mcspi_setup_transfer(st - else - l &= ~OMAP2_MCSPI_CHCONF_PHA; - -- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l); -+ mcspi_write_chconf0(spi, l); - - dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", - OMAP2_MCSPI_MAX_FREQ / (1 << div), -@@ -639,7 +711,11 @@ static int omap2_mcspi_setup(struct spi_ - return -ENOMEM; - cs->base = mcspi->base + spi->chip_select * 0x14; - cs->phys = mcspi->phys + spi->chip_select * 0x14; -+ cs->chconf0 = 0; - spi->controller_state = cs; -+ /* Link this to context save list */ -+ list_add_tail(&cs->node, -+ &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs); - } - - if (mcspi_dma->dma_rx_channel == -1 -@@ -649,11 +725,11 @@ static int omap2_mcspi_setup(struct spi_ - return ret; - } - -- clk_enable(mcspi->ick); -- clk_enable(mcspi->fck); -+ if (omap2_mcspi_enable_clocks(mcspi)) -+ return -ENODEV; -+ - ret = omap2_mcspi_setup_transfer(spi, NULL); -- clk_disable(mcspi->fck); -- clk_disable(mcspi->ick); -+ omap2_mcspi_disable_clocks(mcspi); - - return ret; - } -@@ -662,10 +738,15 @@ static void omap2_mcspi_cleanup(struct s - { - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; -+ struct omap2_mcspi_cs *cs; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - -+ /* Unlink controller state from context save list */ -+ cs = spi->controller_state; -+ list_del(&cs->node); -+ - kfree(spi->controller_state); - - if (mcspi_dma->dma_rx_channel != -1) { -@@ -685,8 +766,8 @@ static void omap2_mcspi_work(struct work - mcspi = container_of(work, struct omap2_mcspi, work); - spin_lock_irq(&mcspi->lock); - -- clk_enable(mcspi->ick); -- clk_enable(mcspi->fck); -+ if (omap2_mcspi_enable_clocks(mcspi)) -+ goto out; - - /* We only enable one channel at a time -- the one whose message is - * at the head of the queue -- although this controller would gladly -@@ -733,13 +814,13 @@ static void omap2_mcspi_work(struct work - cs_active = 1; - } - -- chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); -+ chconf = mcspi_cached_chconf0(spi); - chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; - if (t->tx_buf == NULL) - chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; - else if (t->rx_buf == NULL) - chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; -- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf); -+ mcspi_write_chconf0(spi, chconf); - - if (t->len) { - unsigned count; -@@ -788,9 +869,9 @@ static void omap2_mcspi_work(struct work - spin_lock_irq(&mcspi->lock); - } - -- clk_disable(mcspi->fck); -- clk_disable(mcspi->ick); -+ omap2_mcspi_disable_clocks(mcspi); - -+out: - spin_unlock_irq(&mcspi->lock); - } - -@@ -877,8 +958,8 @@ static int __init omap2_mcspi_reset(stru - struct spi_master *master = mcspi->master; - u32 tmp; - -- clk_enable(mcspi->ick); -- clk_enable(mcspi->fck); -+ if (omap2_mcspi_enable_clocks(mcspi)) -+ return -1; - - mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, - OMAP2_MCSPI_SYSCONFIG_SOFTRESET); -@@ -886,18 +967,18 @@ static int __init omap2_mcspi_reset(stru - tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS); - } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); - -- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, -- OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | -- OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | -- OMAP2_MCSPI_SYSCONFIG_SMARTIDLE); -- -- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, -- OMAP2_MCSPI_WAKEUPENABLE_WKEN); -+ tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | -+ OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | -+ OMAP2_MCSPI_SYSCONFIG_SMARTIDLE; -+ mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp); -+ omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp; -+ -+ tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; -+ mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); -+ omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp; - - omap2_mcspi_set_master_mode(master); -- -- clk_disable(mcspi->fck); -- clk_disable(mcspi->ick); -+ omap2_mcspi_disable_clocks(mcspi); - return 0; - } - -@@ -1027,6 +1108,7 @@ static int __init omap2_mcspi_probe(stru - - spin_lock_init(&mcspi->lock); - INIT_LIST_HEAD(&mcspi->msg_queue); -+ INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); - - mcspi->ick = clk_get(&pdev->dev, "mcspi_ick"); - if (IS_ERR(mcspi->ick)) { -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/core/Kconfig linux-omap-2.6.28-nokia1/drivers/usb/core/Kconfig ---- linux-omap-2.6.28-omap1/drivers/usb/core/Kconfig 2011-06-22 13:14:20.973067708 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/core/Kconfig 2011-06-22 13:19:33.023063272 +0200 -@@ -104,11 +104,10 @@ config USB_SUSPEND - - config USB_OTG - bool -- depends on USB && EXPERIMENTAL -+ depends on USB - select USB_SUSPEND - default n - -- - config USB_OTG_WHITELIST - bool "Rely on OTG Targeted Peripherals List" - depends on USB_OTG || EMBEDDED -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/core/otg_whitelist.h linux-omap-2.6.28-nokia1/drivers/usb/core/otg_whitelist.h ---- linux-omap-2.6.28-omap1/drivers/usb/core/otg_whitelist.h 2011-06-22 13:10:45.523070771 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/core/otg_whitelist.h 2011-06-22 13:19:33.033063272 +0200 -@@ -9,104 +9,12 @@ - * (at your option) any later version. - */ - --/* -- * This OTG Whitelist is the OTG "Targeted Peripheral List". It should -- * mostly use of USB_DEVICE() or USB_DEVICE_VER() entries.. -- * -- * YOU _SHOULD_ CHANGE THIS LIST TO MATCH YOUR PRODUCT AND ITS TESTING! -- */ -- --static struct usb_device_id whitelist_table [] = { -- --/* hubs are optional in OTG, but very handy ... */ --{ USB_DEVICE_INFO(USB_CLASS_HUB, 0, 0), }, --{ USB_DEVICE_INFO(USB_CLASS_HUB, 0, 1), }, -- --#ifdef CONFIG_USB_PRINTER /* ignoring nonstatic linkage! */ --/* FIXME actually, printers are NOT supposed to use device classes; -- * they're supposed to use interface classes... -- */ --{ USB_DEVICE_INFO(7, 1, 1) }, --{ USB_DEVICE_INFO(7, 1, 2) }, --{ USB_DEVICE_INFO(7, 1, 3) }, --#endif -- --#ifdef CONFIG_USB_NET_CDCETHER --/* Linux-USB CDC Ethernet gadget */ --{ USB_DEVICE(0x0525, 0xa4a1), }, --/* Linux-USB CDC Ethernet + RNDIS gadget */ --{ USB_DEVICE(0x0525, 0xa4a2), }, --#endif -- --#if defined(CONFIG_USB_TEST) || defined(CONFIG_USB_TEST_MODULE) --/* gadget zero, for testing */ --{ USB_DEVICE(0x0525, 0xa4a0), }, --#endif -- --{ } /* Terminating entry */ --}; -- --static int is_targeted(struct usb_device *dev) --{ -- struct usb_device_id *id = whitelist_table; -- -- /* possible in developer configs only! */ -- if (!dev->bus->otg_port) -- return 1; -- -- /* HNP test device is _never_ targeted (see OTG spec 6.6.6) */ -- if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a && -- le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) -- return 0; -- -- /* NOTE: can't use usb_match_id() since interface caches -- * aren't set up yet. this is cut/paste from that code. -- */ -- for (id = whitelist_table; id->match_flags; id++) { -- if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && -- id->idVendor != le16_to_cpu(dev->descriptor.idVendor)) -- continue; -- -- if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) && -- id->idProduct != le16_to_cpu(dev->descriptor.idProduct)) -- continue; -- -- /* No need to test id->bcdDevice_lo != 0, since 0 is never -- greater than any unsigned number. */ -- if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) && -- (id->bcdDevice_lo > le16_to_cpu(dev->descriptor.bcdDevice))) -- continue; -- -- if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) && -- (id->bcdDevice_hi < le16_to_cpu(dev->descriptor.bcdDevice))) -- continue; -- -- if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) && -- (id->bDeviceClass != dev->descriptor.bDeviceClass)) -- continue; -- -- if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) && -- (id->bDeviceSubClass != dev->descriptor.bDeviceSubClass)) -- continue; -- -- if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) && -- (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol)) -- continue; -- -- return 1; -- } -- -- /* add other match criteria here ... */ -- -- -- /* OTG MESSAGE: report errors here, customize to match your product */ -- dev_err(&dev->dev, "device v%04x p%04x is not supported\n", -- le16_to_cpu(dev->descriptor.idVendor), -- le16_to_cpu(dev->descriptor.idProduct)); - #ifdef CONFIG_USB_OTG_WHITELIST -- return 0; -+extern int is_targeted(struct usb_device *); - #else -- return 1; --#endif -+static inline int is_targeted(struct usb_device *d) -+{ -+ return 0; - } -+#endif - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/composite.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/composite.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/composite.c 2011-06-22 13:14:20.983067708 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/composite.c 2011-06-22 13:19:33.033063272 +0200 -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - #include - -@@ -69,6 +70,41 @@ static char *iSerialNumber; - module_param(iSerialNumber, charp, 0); - MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); - -+/** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * Windows can only handle 1 usb configuration at a time. -+ * -+ * In order to work around that issue, we will have a retry -+ * method implemented in such a way that we try one configuration -+ * at a time until one works. -+ * -+ * What we do is that we connect with 500mA configuration, if that -+ * doesn't work, we disconnect from the bus, change to 100mA and try -+ * again, if that still doesn't work, we disconnect and try 8mA, -+ * if that doesn't work we give up. -+ */ -+ -+/* To determine whether a configuration worked or no, we use a timer. -+ * If the time required to get a SET_CONFIG request exceeds the timeout, -+ * it means the configuration failed. We then use the next config. -+ */ -+ -+static struct timer_list cdev_set_config_timer; -+ -+static void cdev_set_config_timeout(unsigned long _gadget) -+{ -+ struct usb_gadget *gadget = (void *) _gadget; -+ -+ /* Configuration failed, so disconnect from bus and use next config */ -+ gadget->get_config = 0; -+ usb_gadget_disconnect(gadget); -+ /* sleep to allow host see our disconnect */ -+ mdelay(500); -+ gadget->cindex++; -+ usb_gadget_connect(gadget); -+} -+ - /*-------------------------------------------------------------------------*/ - - /** -@@ -149,16 +185,17 @@ done: - int usb_function_deactivate(struct usb_function *function) - { - struct usb_composite_dev *cdev = function->config->cdev; -+ unsigned long flags; - int status = 0; - -- spin_lock(&cdev->lock); -+ spin_lock_irqsave(&cdev->lock, flags); - - if (cdev->deactivations == 0) - status = usb_gadget_disconnect(cdev->gadget); - if (status == 0) - cdev->deactivations++; - -- spin_unlock(&cdev->lock); -+ spin_unlock_irqrestore(&cdev->lock, flags); - return status; - } - -@@ -280,12 +317,15 @@ static int config_buf(struct usb_configu - return len; - } - -+static int count_configs(struct usb_composite_dev *cdev, unsigned type); -+ - static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) - { - struct usb_gadget *gadget = cdev->gadget; - struct usb_configuration *c; - u8 type = w_value >> 8; - enum usb_device_speed speed = USB_SPEED_UNKNOWN; -+ u8 index; - - if (gadget_is_dualspeed(gadget)) { - int hs = 0; -@@ -300,7 +340,27 @@ static int config_desc(struct usb_compos - } - - /* This is a lookup by config *INDEX* */ -- w_value &= 0xff; -+ index = w_value & 0xFF; -+ if (!index) { -+ u8 num_configs; -+ -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * This is us giving up, if this one doesn't work -+ * then user will have to take action, we can't -+ * got any further -+ */ -+ index = gadget->cindex; -+ num_configs = count_configs(cdev, USB_DT_DEVICE); -+ if (index >= num_configs - 1) { -+ del_timer(&cdev_set_config_timer); -+ gadget->set_config = 1; -+ /* Restrict to the last configuration */ -+ index = num_configs - 1; -+ } -+ } -+ - list_for_each_entry(c, &cdev->configs, list) { - /* ignore configs that won't work at this speed */ - if (speed == USB_SPEED_HIGH) { -@@ -310,10 +370,17 @@ static int config_desc(struct usb_compos - if (!c->fullspeed) - continue; - } -- if (w_value == 0) -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * We need to keep track of which configuration to try this -+ * time in order to make Windows happy. -+ */ -+ if (index == 0) - return config_buf(c, speed, cdev->req->buf, type); -- w_value--; -+ index--; - } -+ - return -EINVAL; - } - -@@ -357,7 +424,7 @@ static void device_qual(struct usb_compo - qual->bDeviceProtocol = cdev->desc.bDeviceProtocol; - /* ASSUME same EP0 fifo size at both speeds */ - qual->bMaxPacketSize0 = cdev->desc.bMaxPacketSize0; -- qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER); -+ qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE); - qual->bRESERVED = 0; - } - -@@ -707,6 +774,7 @@ composite_setup(struct usb_gadget *gadge - case USB_DT_DEVICE: - cdev->desc.bNumConfigurations = - count_configs(cdev, USB_DT_DEVICE); -+ - value = min(w_length, (u16) sizeof cdev->desc); - memcpy(req->buf, &cdev->desc, value); - break; -@@ -725,6 +793,13 @@ composite_setup(struct usb_gadget *gadge - value = config_desc(cdev, w_value); - if (value >= 0) - value = min(w_length, (u16) value); -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * Note that we got a get_config -+ */ -+ gadget->get_config = 1; -+ DBG(cdev, "get_config = 1\n"); - break; - case USB_DT_STRING: - value = get_string(cdev, req->buf, -@@ -750,6 +825,15 @@ composite_setup(struct usb_gadget *gadge - spin_lock(&cdev->lock); - value = set_config(cdev, ctrl, w_value); - spin_unlock(&cdev->lock); -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * We got a SetConfiguration, meaning Windows accepted -+ * our configuration descriptor, so stop the retry -+ * timer and let device work. -+ */ -+ gadget->set_config = 1; -+ DBG(cdev, "set_config = 1\n"); - break; - case USB_REQ_GET_CONFIGURATION: - if (ctrl->bRequestType != USB_DIR_IN) -@@ -842,11 +926,31 @@ done: - return value; - } - -+/** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * This hook was introduced to differentiate between -+ * BUS RESET and DISCONNECT events. All we do here -+ * is delete our retry timer so we don't retry -+ * forever. -+ */ -+static void composite_vbus_disconnect(struct usb_gadget *gadget) -+{ -+ struct usb_composite_dev *cdev = get_gadget_data(gadget); -+ -+ DBG(cdev, "%s\n", __func__); -+ del_timer(&cdev_set_config_timer); -+ gadget->cindex = 0; -+ gadget->set_config = 0; -+ gadget->get_config = 0; -+} -+ - static void composite_disconnect(struct usb_gadget *gadget) - { - struct usb_composite_dev *cdev = get_gadget_data(gadget); - unsigned long flags; - -+ DBG(cdev, "%s\n", __func__); - /* REVISIT: should we have config and device level - * disconnect callbacks? - */ -@@ -854,6 +958,16 @@ static void composite_disconnect(struct - if (cdev->config) - reset_config(cdev); - spin_unlock_irqrestore(&cdev->lock, flags); -+ -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * Port RESET so maintain cindex but reset get_config -+ * and set_config flags so we can try other configurations -+ */ -+ -+ gadget->set_config = 0; -+ gadget->get_config = 0; - } - - /*-------------------------------------------------------------------------*/ -@@ -942,6 +1056,9 @@ static int __init composite_bind(struct - cdev->gadget = gadget; - set_gadget_data(gadget, cdev); - INIT_LIST_HEAD(&cdev->configs); -+ cdev->gadget->cindex = 0; -+ cdev->gadget->set_config = 0; -+ cdev->gadget->get_config = 0; - - /* preallocate control response and buffer */ - cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); -@@ -956,8 +1073,6 @@ static int __init composite_bind(struct - cdev->bufsiz = USB_BUFSIZ; - cdev->driver = composite; - -- usb_gadget_set_selfpowered(gadget); -- - /* interface and string IDs start at zero via kzalloc. - * we force endpoints to start unassigned; few controller - * drivers will zero ep->driver_data. -@@ -996,6 +1111,8 @@ static int __init composite_bind(struct - string_override(composite->strings, - cdev->desc.iSerialNumber, iSerialNumber); - -+ setup_timer(&cdev_set_config_timer, cdev_set_config_timeout, -+ (unsigned long) gadget); - INFO(cdev, "%s ready\n", composite->name); - return 0; - -@@ -1006,6 +1123,11 @@ fail: - - /*-------------------------------------------------------------------------*/ - -+/** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * we use suspend to try another configuration -+ */ - static void - composite_suspend(struct usb_gadget *gadget) - { -@@ -1022,6 +1144,17 @@ composite_suspend(struct usb_gadget *gad - f->suspend(f); - } - } -+ -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * we try another configuration if we have received -+ * a get_config but not a set_config -+ */ -+ if (gadget->get_config && !gadget->set_config) { -+ mod_timer(&cdev_set_config_timer, -+ jiffies + msecs_to_jiffies(10)); -+ } - } - - static void -@@ -1052,6 +1185,7 @@ static struct usb_gadget_driver composit - - .setup = composite_setup, - .disconnect = composite_disconnect, -+ .vbus_disconnect = composite_vbus_disconnect, - - .suspend = composite_suspend, - .resume = composite_resume, -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/f_acm.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_acm.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/f_acm.c 2011-06-22 13:14:20.983067708 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_acm.c 2011-06-22 13:19:33.033063272 +0200 -@@ -340,10 +340,13 @@ static int acm_setup(struct usb_function - - value = 0; - -- /* FIXME we should not allow data to flow until the -- * host sets the ACM_CTRL_DTR bit; and when it clears -- * that bit, we should return to that no-flow state. -+ /* REVISIT Hangup would be the right way, but since the hooks -+ * are not there we need to connect/disconnect. - */ -+ if (w_value & ACM_CTRL_DTR) -+ gserial_connect(&acm->port, acm->port_num); -+ else -+ gserial_disconnect(&acm->port); - acm->port_handshake_bits = w_value; - break; - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/file_storage.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/file_storage.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/file_storage.c 2011-06-22 13:14:21.003067708 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/file_storage.c 2011-06-22 13:19:33.033063272 +0200 -@@ -87,6 +87,8 @@ - * removable Default false, boolean for removable media - * luns=N Default N = number of filenames, number of - * LUNs to support -+ * fua=N Default N = 0, boolean for ignoring FUA flag -+ * in SCSI WRITE(10,12) commands - * stall Default determined according to the type of - * USB device controller (usually true), - * boolean to permit the driver to halt -@@ -103,14 +105,14 @@ - * PAGE_CACHE_SIZE) - * - * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro", -- * "removable", "luns", and "stall" options are available; default values -- * are used for everything else. -+ * "removable", "luns", "fua" and "stall" options are available; default -+ * values are used for everything else. - * - * The pathnames of the backing files and the ro settings are available in -- * the attribute files "file" and "ro" in the lun subdirectory of the -- * gadget's sysfs directory. If the "removable" option is set, writing to -- * these files will simulate ejecting/loading the medium (writing an empty -- * line means eject) and adjusting a write-enable tab. Changes to the ro -+ * the attribute files "file", "ro" and "fua" in the lun subdirectory of -+ * the gadget's sysfs directory. If the "removable" option is set, writing -+ * to "file" will simulate ejecting/loading the medium (writing an empty -+ * line means eject) and adjusting a write-enable tab. Changes to the "ro" - * setting are not allowed when the medium is loaded. - * - * This gadget driver is heavily based on "Gadget Zero" by David Brownell. -@@ -238,10 +240,13 @@ - #include - #include - #include -+#include - - #include - #include - -+#include -+ - #include "gadget_chips.h" - - -@@ -266,6 +271,13 @@ - static const char longname[] = DRIVER_DESC; - static const char shortname[] = DRIVER_NAME; - -+static const char manufacturer_nokia[] = "Nokia"; -+static const char longname_770[] = "Nokia 770"; -+static const char longname_n800[] = "Nokia N800 Internet Tablet"; -+static const char longname_n810[] = "Nokia N810 Internet Tablet"; -+static const char longname_n810_wimax[] = "Nokia N810 Internet Tablet WiMAX Edition"; -+static const char longname_rx51[] = "N900 (Storage Mode)"; -+ - MODULE_DESCRIPTION(DRIVER_DESC); - MODULE_AUTHOR("Alan Stern"); - MODULE_LICENSE("Dual BSD/GPL"); -@@ -341,6 +353,7 @@ static struct { - - int removable; - int can_stall; -+ int fua; - - char *transport_parm; - char *protocol_parm; -@@ -357,12 +370,13 @@ static struct { - } mod_data = { // Default values - .transport_parm = "BBB", - .protocol_parm = "SCSI", -- .removable = 0, -- .can_stall = 1, -+ .removable = 1, -+ .can_stall = 0, -+ .fua = 0, - .vendor = DRIVER_VENDOR_ID, - .product = DRIVER_PRODUCT_ID, - .release = 0xffff, // Use controller chip type -- .buflen = 16384, -+ .buflen = 64 * 1024, - }; - - -@@ -382,6 +396,8 @@ MODULE_PARM_DESC(removable, "true to sim - module_param_named(stall, mod_data.can_stall, bool, S_IRUGO); - MODULE_PARM_DESC(stall, "false to prevent bulk stalls"); - -+module_param_named(fua, mod_data.fua, bool, S_IRUGO); -+MODULE_PARM_DESC(fua, "true to obey SCSI WRITE(6,10,12) FUA bit"); - - /* In the non-TEST version, only the module parameters listed above - * are available. */ -@@ -553,6 +569,8 @@ struct lun { - unsigned int prevent_medium_removal : 1; - unsigned int registered : 1; - unsigned int info_valid : 1; -+ unsigned int fua:1; -+ unsigned int direct:1; - - u32 sense_data; - u32 sense_data_info; -@@ -574,7 +592,10 @@ static struct lun *dev_to_lun(struct dev - #define DELAYED_STATUS (EP0_BUFSIZE + 999) // An impossibly large value - - /* Number of buffers we will use. 2 is enough for double-buffering */ --#define NUM_BUFFERS 2 -+/* FIXME: fsg_buffhd's should be allocated dynamically */ -+#define NUM_BUFFERS 256 -+/* FIXME: MEMLIMIT should be a parameter */ -+#define MEMLIMIT (1024 * 1024) - - enum fsg_buffer_state { - BUF_STATE_EMPTY = 0, -@@ -582,8 +603,15 @@ enum fsg_buffer_state { - BUF_STATE_BUSY - }; - -+struct fsg_dev; -+ - struct fsg_buffhd { -+ struct rb_node rb_node; -+ sector_t sector; -+ int sectors; -+ - void *buf; -+ size_t buflen; - enum fsg_buffer_state state; - struct fsg_buffhd *next; - -@@ -596,6 +624,8 @@ struct fsg_buffhd { - int inreq_busy; - struct usb_request *outreq; - int outreq_busy; -+ -+ struct fsg_dev *fsg; - }; - - enum fsg_state { -@@ -666,6 +696,10 @@ struct fsg_dev { - struct fsg_buffhd *next_buffhd_to_fill; - struct fsg_buffhd *next_buffhd_to_drain; - struct fsg_buffhd buffhds[NUM_BUFFERS]; -+ int num_buffers; -+ -+ /* Tree to find direct I/O's with overlapping sectors */ -+ struct rb_root bio_tree; - - int thread_wakeup_needed; - struct completion thread_notifier; -@@ -718,7 +752,6 @@ static struct fsg_dev *the_fsg; - static struct usb_gadget_driver fsg_driver; - - static void close_backing_file(struct lun *curlun); --static void close_all_backing_files(struct fsg_dev *fsg); - - - /*-------------------------------------------------------------------------*/ -@@ -816,11 +849,13 @@ static void put_be32(u8 *buf, u32 val) - #define STRING_MANUFACTURER 1 - #define STRING_PRODUCT 2 - #define STRING_SERIAL 3 --#define STRING_CONFIG 4 --#define STRING_INTERFACE 5 -- --/* There is only one configuration. */ --#define CONFIG_VALUE 1 -+#define STRING_CONFIG_MAXPOWER 4 -+#define STRING_CONFIG_SELFPOWERED 5 -+#define STRING_INTERFACE 6 -+ -+/* The configurations */ -+#define CONFIG_VALUE_MAXPOWER 1 -+#define CONFIG_VALUE_SELFPOWERED 2 - - static struct usb_device_descriptor - device_desc = { -@@ -838,20 +873,33 @@ device_desc = { - .iManufacturer = STRING_MANUFACTURER, - .iProduct = STRING_PRODUCT, - .iSerialNumber = STRING_SERIAL, -- .bNumConfigurations = 1, -+ .bNumConfigurations = 2, - }; - - static struct usb_config_descriptor --config_desc = { -- .bLength = sizeof config_desc, -+config_desc_500 = { -+ .bLength = sizeof config_desc_500, - .bDescriptorType = USB_DT_CONFIG, - - /* wTotalLength computed by usb_gadget_config_buf() */ - .bNumInterfaces = 1, -- .bConfigurationValue = CONFIG_VALUE, -- .iConfiguration = STRING_CONFIG, -+ .bConfigurationValue = CONFIG_VALUE_MAXPOWER, -+ .iConfiguration = STRING_CONFIG_MAXPOWER, -+ .bmAttributes = USB_CONFIG_ATT_ONE, /* Bus powered */ -+ .bMaxPower = 250, /* 500mA */ -+}; -+ -+static struct usb_config_descriptor -+config_desc_100 = { -+ .bLength = sizeof config_desc_100, -+ .bDescriptorType = USB_DT_CONFIG, -+ -+ /* wTotalLength computed by usb_gadget_config_buf() */ -+ .bNumInterfaces = 1, -+ .bConfigurationValue = CONFIG_VALUE_SELFPOWERED, -+ .iConfiguration = STRING_CONFIG_SELFPOWERED, - .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, -- .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2, -+ .bMaxPower = 50, /* 100mA */ - }; - - static struct usb_otg_descriptor -@@ -937,7 +985,7 @@ dev_qualifier = { - .bcdUSB = __constant_cpu_to_le16(0x0200), - .bDeviceClass = USB_CLASS_PER_INTERFACE, - -- .bNumConfigurations = 1, -+ .bNumConfigurations = 2, - }; - - static struct usb_endpoint_descriptor -@@ -1003,7 +1051,8 @@ static struct usb_string strings[] = { - {STRING_MANUFACTURER, manufacturer}, - {STRING_PRODUCT, longname}, - {STRING_SERIAL, serial}, -- {STRING_CONFIG, "Self-powered"}, -+ {STRING_CONFIG_MAXPOWER, "Max power"}, -+ {STRING_CONFIG_SELFPOWERED, "Self-powered"}, - {STRING_INTERFACE, "Mass Storage"}, - {} - }; -@@ -1013,6 +1062,93 @@ static struct usb_gadget_strings stringt - .strings = strings, - }; - -+/* -+ * Find overlapped bio in fsg->bio_tree rb tree. -+ */ -+static int fsg_rbtree_find(struct fsg_dev *fsg, sector_t s, -+ unsigned int sectors) -+{ -+ struct rb_node *n; -+ struct fsg_buffhd *tmp; -+ int found = 0; -+ -+ spin_lock_irq(&fsg->lock); -+ n = fsg->bio_tree.rb_node; -+ while (n) { -+ tmp = rb_entry(n, struct fsg_buffhd, rb_node); -+ if (s + sectors <= tmp->sector) -+ n = n->rb_left; -+ else if (s >= tmp->sector + tmp->sectors) -+ n = n->rb_right; -+ else { -+ found = 1; -+ break; -+ } -+ } -+ spin_unlock_irq(&fsg->lock); -+ return found; -+} -+ -+/* -+ * Insert a node into the fsg->bio_tree rb tree. -+ */ -+static void fsg_rbtree_insert(struct fsg_dev *fsg, struct fsg_buffhd *node) -+{ -+ struct rb_node **p; -+ struct rb_node *parent = NULL; -+ struct fsg_buffhd *tmp; -+ -+ spin_lock_irq(&fsg->lock); -+ p = &fsg->bio_tree.rb_node; -+ -+ while (*p) { -+ parent = *p; -+ tmp = rb_entry(parent, struct fsg_buffhd, rb_node); -+ if (node->sector < tmp->sector) -+ p = &(*p)->rb_left; -+ else -+ p = &(*p)->rb_right; -+ } -+ rb_link_node(&node->rb_node, parent, p); -+ rb_insert_color(&node->rb_node, &fsg->bio_tree); -+ spin_unlock_irq(&fsg->lock); -+} -+ -+/** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * Windows can only handle 1 usb configuration at a time. -+ * -+ * In order to work around that issue, we will have a retry -+ * method implemented in such a way that we try one configuration -+ * at a time until one works. -+ * -+ * What we do is that we connect with 500mA configuration, if that -+ * doesn't work, we disconnect from the bus, change to 100mA and try -+ * again, if that still doesn't work, we disconnect and try 8mA, -+ * if that doesn't work we give up. -+ */ -+ -+/* To determine whether a configuration worked or no, we use a timer. -+ * If the time required to get a SET_CONFIG request exceeds the timeout, -+ * it means the configuration failed. We then use the next config. -+ */ -+static struct timer_list fsg_set_config_timer; -+ -+static void fsg_set_config_timeout(unsigned long _gadget) -+{ -+ struct usb_gadget *gadget = (void *) _gadget; -+ struct fsg_dev *fsg = get_gadget_data(gadget); -+ -+ /* Configuration failed, so disconnect from bus and use next config */ -+ fsg->gadget->get_config = 0; -+ usb_gadget_disconnect(gadget); -+ /* sleep to allow host see our disconnect */ -+ mdelay(500); -+ gadget->cindex++; -+ usb_gadget_connect(gadget); -+ DBG(fsg, "%s cindex %d\n", __func__, gadget->cindex); -+} - - /* - * Config descriptors must agree with the code that sets configurations -@@ -1025,10 +1161,36 @@ static int populate_config_buf(struct us - enum usb_device_speed speed = gadget->speed; - int len; - const struct usb_descriptor_header **function; -+ struct usb_config_descriptor *config; - -- if (index > 0) -+ if (index > 1) - return -EINVAL; - -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * We need to keep track of which configuration to try this -+ * time in order to make Windows happy. we don't implement -+ * the hack if host sends non zero index. -+ */ -+ if (!index) { -+ index = gadget->cindex; -+ -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * This is us giving up, if this one doesn't work -+ * then user will have to take action, we can't -+ * got any further -+ */ -+ if (index >= 1) { -+ del_timer(&fsg_set_config_timer); -+ gadget->set_config = 1; -+ /* Restrict to the last configuration */ -+ index = 1; -+ } -+ } -+ - if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG) - speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed; - if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH) -@@ -1036,11 +1198,23 @@ static int populate_config_buf(struct us - else - function = fs_function; - -+ switch (index) { -+ case 0: -+ config = &config_desc_500; -+ break; -+ case 1: -+ default: -+ config = &config_desc_100; -+ break; -+ } -+ - /* for now, don't advertise srp-only devices */ -- if (!gadget_is_otg(gadget)) -+ if (machine_is_nokia770() || machine_is_nokia_n800() -+ || machine_is_nokia_rx51() -+ || !gadget_is_otg(gadget)) - function++; - -- len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function); -+ len = usb_gadget_config_buf(config, buf, EP0_BUFSIZE, function); - ((struct usb_config_descriptor *) buf)->bDescriptorType = type; - return len; - } -@@ -1092,6 +1266,33 @@ static void fsg_disconnect(struct usb_ga - - DBG(fsg, "disconnect or port reset\n"); - raise_exception(fsg, FSG_STATE_DISCONNECT); -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * We need to know we're gonna enumerate so we can -+ * apply our retry method. -+ */ -+ gadget->set_config = 0; -+ gadget->get_config = 0; -+} -+ -+/** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * This hook was introduced to differentiate between -+ * BUS RESET and DISCONNECT events. All we do here -+ * is delete our retry timer so we don't retry -+ * forever. -+ */ -+static void fsg_vbus_disconnect(struct usb_gadget *gadget) -+{ -+ struct fsg_dev *fsg = get_gadget_data(gadget); -+ -+ DBG(fsg, "%s\n", __func__); -+ del_timer(&fsg_set_config_timer); -+ gadget->cindex = 0; -+ gadget->set_config = 0; -+ gadget->get_config = 0; - } - - -@@ -1379,6 +1580,13 @@ get_config: - req->buf, - w_value >> 8, - w_value & 0xff); -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * Note that we got a get_config -+ */ -+ fsg->gadget->get_config = 1; -+ DBG(fsg, "get_config = 1\n"); - break; - - case USB_DT_STRING: -@@ -1397,7 +1605,8 @@ get_config: - USB_RECIP_DEVICE)) - break; - VDBG(fsg, "set configuration\n"); -- if (w_value == CONFIG_VALUE || w_value == 0) { -+ if (w_value == CONFIG_VALUE_MAXPOWER || w_value == 0 -+ || w_value == CONFIG_VALUE_SELFPOWERED) { - fsg->new_config = w_value; - - /* Raise an exception to wipe out previous transaction -@@ -1405,6 +1614,15 @@ get_config: - raise_exception(fsg, FSG_STATE_CONFIG_CHANGE); - value = DELAYED_STATUS; - } -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * We got a SetConfiguration, meaning Windows accepted -+ * our configuration descriptor, so stop the retry -+ * timer and let device work. -+ */ -+ fsg->gadget->set_config = 1; -+ DBG(fsg, "set_config = 1\n"); - break; - case USB_REQ_GET_CONFIGURATION: - if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | -@@ -1548,6 +1766,96 @@ static int sleep_thread(struct fsg_dev * - - /*-------------------------------------------------------------------------*/ - -+static void direct_read_end_io(struct bio *bio, int err) -+{ -+ if (err) -+ clear_bit(BIO_UPTODATE, &bio->bi_flags); -+ -+ complete(bio->bi_private); -+} -+ -+/* -+ * FIXME: Caller expects entire 'amount' to be read which means either: -+ * a) the maximum buflen must be less-than-or-equal the maximum I/O size -+ * or b) more than one bio must be submitted -+ */ -+/* FIXME: Needs an equivalent of readahead */ -+static ssize_t direct_read(struct file *file, struct fsg_buffhd *bh, -+ size_t amount, loff_t *pos) -+{ -+ DECLARE_COMPLETION_ONSTACK(wait); -+ unsigned max_pages = (amount >> PAGE_SHIFT) + 1; -+ unsigned remains = amount; -+ ssize_t totlen = 0; -+ struct page *page; -+ struct bio *bio; -+ char *p = bh->buf; -+ int rc; -+ -+ if (!amount) -+ return 0; -+ -+ if (*pos & 511 || amount & 511) -+ return -EINVAL; -+ -+ bio = bio_alloc(GFP_KERNEL, max_pages); -+ if (!bio) -+ return -ENOMEM; -+ -+ bio->bi_sector = *pos >> 9; -+ bio->bi_bdev = file->f_path.dentry->d_inode->i_bdev; -+ bio->bi_end_io = direct_read_end_io; -+ bio->bi_private = &wait; -+ -+ while (remains) { -+ unsigned offset, len; -+ -+ page = virt_to_page(p); -+ offset = offset_in_page(p); -+ len = PAGE_SIZE - offset; -+ if (len > remains) -+ len = remains; -+ len = bio_add_page(bio, page, len, offset); -+ if (!len) -+ break; -+ remains -= len; -+ totlen += len; -+ p += len; -+ } -+ -+ if (!totlen) { -+ bio_put(bio); -+ return -EINVAL; -+ } -+ -+ while (fsg_rbtree_find(bh->fsg, bio->bi_sector, -+ bio_sectors(bio))) { -+ rc = sleep_thread(bh->fsg); -+ if (rc) { -+ bio_put(bio); -+ return rc; -+ } -+ } -+ -+ submit_bio(READ, bio); -+ -+ wait_for_completion(&wait); -+ -+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { -+ totlen = -EIO; -+ goto out; -+ } -+ -+ *pos += totlen; -+out: -+ bio_put(bio); -+ -+ return totlen; -+} -+ -+ -+/*-------------------------------------------------------------------------*/ -+ - static int do_read(struct fsg_dev *fsg) - { - struct lun *curlun = fsg->curlun; -@@ -1588,6 +1896,14 @@ static int do_read(struct fsg_dev *fsg) - - for (;;) { - -+ /* Wait for the next buffer to become available */ -+ bh = fsg->next_buffhd_to_fill; -+ while (bh->state != BUF_STATE_EMPTY) { -+ rc = sleep_thread(fsg); -+ if (rc) -+ return rc; -+ } -+ - /* Figure out how much we need to read: - * Try to read the remaining amount. - * But don't read more than the buffer size. -@@ -1596,7 +1912,7 @@ static int do_read(struct fsg_dev *fsg) - * the next page. - * If this means reading 0 then we were asked to read past - * the end of file. */ -- amount = min((unsigned int) amount_left, mod_data.buflen); -+ amount = min((unsigned int) amount_left, bh->buflen); - amount = min((loff_t) amount, - curlun->file_length - file_offset); - partial_page = file_offset & (PAGE_CACHE_SIZE - 1); -@@ -1604,14 +1920,6 @@ static int do_read(struct fsg_dev *fsg) - amount = min(amount, (unsigned int) PAGE_CACHE_SIZE - - partial_page); - -- /* Wait for the next buffer to become available */ -- bh = fsg->next_buffhd_to_fill; -- while (bh->state != BUF_STATE_EMPTY) { -- rc = sleep_thread(fsg); -- if (rc) -- return rc; -- } -- - /* If we were asked to read past the end of file, - * end with an empty buffer. */ - if (amount == 0) { -@@ -1626,9 +1934,13 @@ static int do_read(struct fsg_dev *fsg) - - /* Perform the read */ - file_offset_tmp = file_offset; -- nread = vfs_read(curlun->filp, -- (char __user *) bh->buf, -- amount, &file_offset_tmp); -+ if (curlun->direct) -+ nread = direct_read(curlun->filp, bh, -+ amount, &file_offset_tmp); -+ else -+ nread = vfs_read(curlun->filp, -+ (char __user *) bh->buf, -+ amount, &file_offset_tmp); - VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, - (unsigned long long) file_offset, - (int) nread); -@@ -1674,6 +1986,102 @@ static int do_read(struct fsg_dev *fsg) - - /*-------------------------------------------------------------------------*/ - -+static void direct_write_end_io(struct bio *bio, int err) -+{ -+ struct fsg_buffhd *bh = bio->bi_private; -+ struct fsg_dev *fsg = bh->fsg; -+ unsigned long flags; -+ -+ if (err) { -+ /* FIXME: how to let host know about this error */ -+ printk(KERN_ERR "direct_write_end_io: err %d\n", err); -+ clear_bit(BIO_UPTODATE, &bio->bi_flags); -+ } -+ -+ /* FIXME: smp barriers are not necessary for this this driver */ -+ smp_wmb(); -+ spin_lock_irqsave(&fsg->lock, flags); -+ rb_erase(&bh->rb_node, &fsg->bio_tree); -+ bh->state = BUF_STATE_EMPTY; -+ wakeup_thread(fsg); -+ spin_unlock_irqrestore(&fsg->lock, flags); -+ -+ bio_put(bio); -+} -+ -+/* -+ * FIXME: Caller expects entire 'amount' to be written which means either: -+ * a) the maximum buflen must be less-than-or-equal the maximum I/O size -+ * or b) more than one bio must be submitted -+ */ -+static ssize_t direct_write(struct file *file, struct fsg_buffhd *bh, size_t amount, loff_t *pos) -+{ -+ unsigned max_pages = (amount >> PAGE_SHIFT) + 1; -+ unsigned remains = amount; -+ ssize_t totlen = 0; -+ struct page *page; -+ struct bio *bio; -+ char *p = bh->buf; -+ int rc; -+ -+ if (!amount) -+ return 0; -+ -+ if (*pos & 511 || amount & 511) -+ return -EINVAL; -+ -+ bio = bio_alloc(GFP_KERNEL, max_pages); -+ if (!bio) -+ return -ENOMEM; -+ -+ bio->bi_sector = *pos >> 9; -+ bio->bi_bdev = file->f_path.dentry->d_inode->i_bdev; -+ bio->bi_end_io = direct_write_end_io; -+ bio->bi_private = bh; -+ -+ while (remains) { -+ unsigned offset, len; -+ -+ page = virt_to_page(p); -+ offset = offset_in_page(p); -+ len = PAGE_SIZE - offset; -+ if (len > remains) -+ len = remains; -+ len = bio_add_page(bio, page, len, offset); -+ if (!len) -+ break; -+ remains -= len; -+ totlen += len; -+ p += len; -+ } -+ -+ if (!totlen) { -+ bio_put(bio); -+ return -EINVAL; -+ } -+ -+ bh->state = BUF_STATE_BUSY; -+ bh->sector = bio->bi_sector; -+ bh->sectors = bio_sectors(bio); -+ while (fsg_rbtree_find(bh->fsg, bh->sector, bh->sectors)) { -+ rc = sleep_thread(bh->fsg); -+ if (rc) { -+ bio_put(bio); -+ return rc; -+ } -+ } -+ fsg_rbtree_insert(bh->fsg, bh); -+ -+ submit_bio(WRITE, bio); -+ -+ *pos += totlen; -+ -+ return totlen; -+} -+ -+ -+/*-------------------------------------------------------------------------*/ -+ - static int do_write(struct fsg_dev *fsg) - { - struct lun *curlun = fsg->curlun; -@@ -1708,7 +2116,8 @@ static int do_write(struct fsg_dev *fsg) - curlun->sense_data = SS_INVALID_FIELD_IN_CDB; - return -EINVAL; - } -- if (fsg->cmnd[1] & 0x08) // FUA -+ /* FUA */ -+ if ((fsg->cmnd[1] & 0x08) && curlun->fua) - curlun->filp->f_flags |= O_SYNC; - } - if (lba >= curlun->num_sectors) { -@@ -1736,7 +2145,7 @@ static int do_write(struct fsg_dev *fsg) - * If this means getting 0, then we were asked - * to write past the end of file. - * Finally, round down to a block boundary. */ -- amount = min(amount_left_to_req, mod_data.buflen); -+ amount = min(amount_left_to_req, bh->buflen); - amount = min((loff_t) amount, curlun->file_length - - usb_offset); - partial_page = usb_offset & (PAGE_CACHE_SIZE - 1); -@@ -1807,9 +2216,13 @@ static int do_write(struct fsg_dev *fsg) - - /* Perform the write */ - file_offset_tmp = file_offset; -- nwritten = vfs_write(curlun->filp, -- (char __user *) bh->buf, -- amount, &file_offset_tmp); -+ if (curlun->direct) -+ nwritten = direct_write(curlun->filp, bh, -+ amount, &file_offset_tmp); -+ else -+ nwritten = vfs_write(curlun->filp, -+ (char __user *) bh->buf, -+ amount, &file_offset_tmp); - VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, - (unsigned long long) file_offset, - (int) nwritten); -@@ -1971,7 +2384,7 @@ static int do_verify(struct fsg_dev *fsg - * And don't try to read past the end of the file. - * If this means reading 0 then we were asked to read - * past the end of file. */ -- amount = min((unsigned int) amount_left, mod_data.buflen); -+ amount = min((unsigned int) amount_left, bh->buflen); - amount = min((loff_t) amount, - curlun->file_length - file_offset); - if (amount == 0) { -@@ -2024,6 +2437,21 @@ static int do_inquiry(struct fsg_dev *fs - static char vendor_id[] = "Linux "; - static char product_id[] = "File-Stor Gadget"; - -+#if defined(CONFIG_MACH_NOKIA770) || defined(CONFIG_MACH_NOKIA_N800) \ -+ || defined(CONFIG_MACH_NOKIA_N810) || defined(CONFIG_MACH_NOKIA_N810_WIMAX) \ -+ || defined(CONFIG_MACH_NOKIA_RX51) -+ sprintf(vendor_id, "Nokia "); -+ if (machine_is_nokia770()) -+ sprintf(product_id, "770 "); -+ else if (machine_is_nokia_n800()) -+ sprintf(product_id, "N800 "); -+ else if (machine_is_nokia_n810()) -+ sprintf(product_id, "N810 "); -+ else if (machine_is_nokia_n810_wimax()) -+ sprintf(product_id, "N810 WiMAX "); -+ else -+ sprintf(product_id, "N900 "); -+#endif - if (!fsg->curlun) { // Unsupported LUNs are okay - fsg->bad_lun_okay = 1; - memset(buf, 0, 36); -@@ -2153,7 +2581,7 @@ static int do_mode_sense(struct fsg_dev - } else { // SC_MODE_SENSE_10 - buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA - buf += 8; -- limit = 65535; // Should really be mod_data.buflen -+ limit = bh->buflen - 1; - } - - /* No block descriptors */ -@@ -2319,29 +2747,6 @@ static int halt_bulk_in_endpoint(struct - return rc; - } - --static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) --{ -- int rc; -- -- DBG(fsg, "bulk-in set wedge\n"); -- rc = usb_ep_set_wedge(fsg->bulk_in); -- if (rc == -EAGAIN) -- VDBG(fsg, "delayed bulk-in endpoint wedge\n"); -- while (rc != 0) { -- if (rc != -EAGAIN) { -- WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); -- rc = 0; -- break; -- } -- -- /* Wait for a short time and then try again */ -- if (msleep_interruptible(100) != 0) -- return -EINTR; -- rc = usb_ep_set_wedge(fsg->bulk_in); -- } -- return rc; --} -- - static int pad_with_zeros(struct fsg_dev *fsg) - { - struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; -@@ -2360,7 +2765,7 @@ static int pad_with_zeros(struct fsg_dev - return rc; - } - -- nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen); -+ nsend = min(fsg->usb_amount_left, (u32) bh->buflen); - memset(bh->buf + nkeep, 0, nsend - nkeep); - bh->inreq->length = nsend; - bh->inreq->zero = 0; -@@ -2401,7 +2806,7 @@ static int throw_away_data(struct fsg_de - bh = fsg->next_buffhd_to_fill; - if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) { - amount = min(fsg->usb_amount_left, -- (u32) mod_data.buflen); -+ (u32) bh->buflen); - - /* amount is always divisible by 512, hence by - * the bulk-out maxpacket size */ -@@ -3005,8 +3410,14 @@ static int received_cbw(struct fsg_dev * - * We aren't required to halt the OUT endpoint; instead - * we can simply accept and discard any data received - * until the next reset. */ -- wedge_bulk_in_endpoint(fsg); -- set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); -+ -+ /* USBCV tool expects Clear-Feature(HALT) to be processed -+ * so don't wedge the IN endpoint, just stall it -+ */ -+ -+ if (mod_data.can_stall) -+ halt_bulk_in_endpoint(fsg); -+ - return -EINVAL; - } - -@@ -3144,7 +3555,7 @@ static int do_set_interface(struct fsg_d - - reset: - /* Deallocate the requests */ -- for (i = 0; i < NUM_BUFFERS; ++i) { -+ for (i = 0; i < fsg->num_buffers; ++i) { - struct fsg_buffhd *bh = &fsg->buffhds[i]; - - if (bh->inreq) { -@@ -3202,7 +3613,7 @@ reset: - } - - /* Allocate the requests */ -- for (i = 0; i < NUM_BUFFERS; ++i) { -+ for (i = 0; i < fsg->num_buffers; ++i) { - struct fsg_buffhd *bh = &fsg->buffhds[i]; - - if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0) -@@ -3237,6 +3648,7 @@ reset: - */ - static int do_set_config(struct fsg_dev *fsg, u8 new_config) - { -+ unsigned power; - int rc = 0; - - /* Disable the single interface */ -@@ -3262,7 +3674,21 @@ static int do_set_config(struct fsg_dev - } - INFO(fsg, "%s speed config #%d\n", speed, fsg->config); - } -+ -+ switch (new_config) { -+ case CONFIG_VALUE_MAXPOWER: -+ power = 2 * config_desc_500.bMaxPower; -+ break; -+ case CONFIG_VALUE_SELFPOWERED: -+ power = 2 * config_desc_100.bMaxPower; -+ break; -+ default: -+ power = gadget_is_otg(fsg->gadget) ? 8 : 100; -+ } -+ -+ usb_gadget_vbus_draw(fsg->gadget, power); - } -+ - return rc; - } - -@@ -3298,7 +3724,7 @@ static void handle_exception(struct fsg_ - /* Cancel all the pending transfers */ - if (fsg->intreq_busy) - usb_ep_dequeue(fsg->intr_in, fsg->intreq); -- for (i = 0; i < NUM_BUFFERS; ++i) { -+ for (i = 0; i < fsg->num_buffers; ++i) { - bh = &fsg->buffhds[i]; - if (bh->inreq_busy) - usb_ep_dequeue(fsg->bulk_in, bh->inreq); -@@ -3309,7 +3735,7 @@ static void handle_exception(struct fsg_ - /* Wait until everything is idle */ - for (;;) { - num_active = fsg->intreq_busy; -- for (i = 0; i < NUM_BUFFERS; ++i) { -+ for (i = 0; i < fsg->num_buffers; ++i) { - bh = &fsg->buffhds[i]; - num_active += bh->inreq_busy + bh->outreq_busy; - } -@@ -3331,7 +3757,7 @@ static void handle_exception(struct fsg_ - * state, and the exception. Then invoke the handler. */ - spin_lock_irq(&fsg->lock); - -- for (i = 0; i < NUM_BUFFERS; ++i) { -+ for (i = 0; i < fsg->num_buffers; ++i) { - bh = &fsg->buffhds[i]; - bh->state = BUF_STATE_EMPTY; - } -@@ -3489,12 +3915,10 @@ static int fsg_main_thread(void *fsg_) - fsg->thread_task = NULL; - spin_unlock_irq(&fsg->lock); - -- /* In case we are exiting because of a signal, unregister the -- * gadget driver and close the backing file. */ -- if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) { -+ /* If we are exiting because of a signal, unregister the -+ * gadget driver. */ -+ if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) - usb_gadget_unregister_driver(&fsg_driver); -- close_all_backing_files(fsg); -- } - - /* Let the unbind and cleanup routines know the thread has exited */ - complete_and_exit(&fsg->thread_notifier, 0); -@@ -3534,7 +3958,10 @@ static int open_backing_file(struct lun - - if (filp->f_path.dentry) - inode = filp->f_path.dentry->d_inode; -+ curlun->direct = 0; - if (inode && S_ISBLK(inode->i_mode)) { -+ /* FIXME: memory-limiting mode should be optional */ -+ curlun->direct = 1; - if (bdev_read_only(inode->i_bdev)) - ro = 1; - } else if (!inode || !S_ISREG(inode->i_mode)) { -@@ -3564,12 +3991,35 @@ static int open_backing_file(struct lun - goto out; - } - -+ if (curlun->direct) { -+ /* -+ * We are going to go around the caches, so make sure they -+ * are sync'ed and invalidated. Note that typically, the block -+ * device had a file system on it, which has just been -+ * unmounted and the unmount has already cleared the caches -+ * anyway. -+ */ -+ curlun->ro = ro; -+ curlun->filp = filp; -+ rc = fsync_sub(curlun); -+ if (rc) { -+ LINFO(curlun, "could not fsync: %s\n", filename); -+ curlun->filp = NULL; -+ goto out; -+ } -+ invalidate_mapping_pages(inode->i_mapping, 0, -1); -+ invalidate_bdev(inode->i_bdev); -+ } -+ - get_file(filp); - curlun->ro = ro; - curlun->filp = filp; - curlun->file_length = size; - curlun->num_sectors = num_sectors; - LDBG(curlun, "open backing file: %s\n", filename); -+ if (curlun->direct) -+ LDBG(curlun, "using direct I/O with %u bytes memory limit\n", -+ MEMLIMIT); - rc = 0; - - out: -@@ -3587,20 +4037,20 @@ static void close_backing_file(struct lu - } - } - --static void close_all_backing_files(struct fsg_dev *fsg) -+ -+static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *buf) - { -- int i; -+ struct lun *curlun = dev_to_lun(dev); - -- for (i = 0; i < fsg->nluns; ++i) -- close_backing_file(&fsg->luns[i]); -+ return sprintf(buf, "%d\n", curlun->ro); - } - -- --static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *buf) -+static ssize_t show_fua(struct device *dev, struct device_attribute *attr, -+ char *buf) - { - struct lun *curlun = dev_to_lun(dev); - -- return sprintf(buf, "%d\n", curlun->ro); -+ return sprintf(buf, "%u\n", curlun->fua); - } - - static ssize_t show_file(struct device *dev, struct device_attribute *attr, -@@ -3691,9 +4141,27 @@ static ssize_t store_file(struct device - } - - -+static ssize_t store_fua(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct lun *curlun = dev_to_lun(dev); -+ unsigned long attr_val = 0; -+ -+ if (strict_strtoul(buf, 2, &attr_val)) -+ return -EINVAL; -+ -+ if (!(curlun->fua)) -+ fsync_sub(curlun); -+ -+ curlun->fua = attr_val ? 1 : 0; -+ -+ return count; -+} -+ - /* The write permissions and store_xxx pointers are set in fsg_bind() */ - static DEVICE_ATTR(ro, 0444, show_ro, NULL); - static DEVICE_ATTR(file, 0444, show_file, NULL); -+static DEVICE_ATTR(fua, 0644, show_fua, store_fua); - - - /*-------------------------------------------------------------------------*/ -@@ -3729,6 +4197,7 @@ static void /* __init_or_exit */ fsg_unb - if (curlun->registered) { - device_remove_file(&curlun->dev, &dev_attr_ro); - device_remove_file(&curlun->dev, &dev_attr_file); -+ close_backing_file(curlun); - device_unregister(&curlun->dev); - curlun->registered = 0; - } -@@ -3744,7 +4213,7 @@ static void /* __init_or_exit */ fsg_unb - } - - /* Free the data buffers */ -- for (i = 0; i < NUM_BUFFERS; ++i) -+ for (i = 0; i < fsg->num_buffers; ++i) - kfree(fsg->buffhds[i].buf); - - /* Free the request and buffer for endpoint 0 */ -@@ -3851,12 +4320,23 @@ static int __init fsg_bind(struct usb_ga - struct usb_ep *ep; - struct usb_request *req; - char *pathbuf, *p; -+ ssize_t memlimit; - - fsg->gadget = gadget; - set_gadget_data(gadget, fsg); - fsg->ep0 = gadget->ep0; - fsg->ep0->driver_data = fsg; - -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * On bind time, init our variables to zero so we -+ * know our starting point. -+ */ -+ fsg->gadget->cindex = 0; -+ fsg->gadget->set_config = 0; -+ fsg->gadget->get_config = 0; -+ - if ((rc = check_parameters(fsg)) != 0) - goto out; - -@@ -3888,6 +4368,7 @@ static int __init fsg_bind(struct usb_ga - for (i = 0; i < fsg->nluns; ++i) { - curlun = &fsg->luns[i]; - curlun->ro = mod_data.ro[i]; -+ curlun->fua = mod_data.fua; - curlun->dev.release = lun_release; - curlun->dev.parent = &gadget->dev; - curlun->dev.driver = &fsg_driver.driver; -@@ -3902,7 +4383,9 @@ static int __init fsg_bind(struct usb_ga - if ((rc = device_create_file(&curlun->dev, - &dev_attr_ro)) != 0 || - (rc = device_create_file(&curlun->dev, -- &dev_attr_file)) != 0) { -+ &dev_attr_file)) != 0 || -+ (rc = device_create_file(&curlun->dev, -+ &dev_attr_fua)) != 0) { - device_unregister(&curlun->dev); - goto out; - } -@@ -3942,10 +4425,36 @@ static int __init fsg_bind(struct usb_ga - fsg->intr_in = ep; - } - -- /* Fix up the descriptors */ -- device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket; -+#if defined(CONFIG_MACH_NOKIA770) || defined(CONFIG_MACH_NOKIA_N800) \ -+ || defined(CONFIG_MACH_NOKIA_N810) || defined(CONFIG_MACH_NOKIA_N810_WIMAX) \ -+ || defined(CONFIG_MACH_NOKIA_RX51) -+ /* REVISIT: Get configuration from platform_data in board-*.c files */ -+ strings[0].s = manufacturer_nokia; -+ device_desc.idVendor = 0x0421; /* Nokia */ -+ -+ if (machine_is_nokia770()) { -+ strings[1].s = longname_770; -+ device_desc.idProduct = 0x0431; /* 770 */ -+ } else if (machine_is_nokia_n800()) { -+ strings[1].s = longname_n800; -+ device_desc.idProduct = 0x04c3; /* N800 */ -+ } else if (machine_is_nokia_n810()) { -+ strings[1].s = longname_n810; -+ device_desc.idProduct = 0x0096; /* N810 */ -+ } else if (machine_is_nokia_n810_wimax()){ -+ strings[1].s = longname_n810_wimax; -+ device_desc.idProduct = 0x0189; /* N810 WiMAX*/ -+ } else { -+ strings[1].s = longname_rx51; -+ device_desc.idProduct = 0x01c7; /* N900 */ -+ } -+#else - device_desc.idVendor = cpu_to_le16(mod_data.vendor); - device_desc.idProduct = cpu_to_le16(mod_data.product); -+#endif -+ -+ /* Fix up the descriptors */ -+ device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket; - device_desc.bcdDevice = cpu_to_le16(mod_data.release); - - i = (transport_is_cbi() ? 3 : 2); // Number of endpoints -@@ -3984,21 +4493,54 @@ static int __init fsg_bind(struct usb_ga - req->complete = ep0_complete; - - /* Allocate the data buffers */ -- for (i = 0; i < NUM_BUFFERS; ++i) { -+ /* FIXME: memory-limiting should be optional */ -+ /* FIXME: buffers should be allocated and freed -+ * when the first / last backing file is opened / closed -+ */ -+ memlimit = MEMLIMIT; -+ fsg->num_buffers = 0; -+ for (i = 0; i < NUM_BUFFERS && memlimit > 0; ++i) { - struct fsg_buffhd *bh = &fsg->buffhds[i]; -+ unsigned int buflen = mod_data.buflen; - - /* Allocate for the bulk-in endpoint. We assume that - * the buffer will also work with the bulk-out (and -- * interrupt-in) endpoint. */ -- bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL); -- if (!bh->buf) -+ * interrupt-in) endpoint. -+ * -+ * We try to workaround problems with memory fragmentation -+ * but we're not miracle men, so we stop trying allocation -+ * when it can't allocate 4k buffers -+ */ -+ while (buflen >= PAGE_SIZE) { -+ gfp_t flags = GFP_KERNEL; -+ -+ if (buflen == PAGE_SIZE) -+ flags |= __GFP_NOWARN; -+ -+ bh->buf = kmalloc(buflen, flags); -+ if (bh->buf) -+ break; -+ -+ buflen >>= 1; -+ } -+ -+ if (buflen == PAGE_SIZE && bh->buf) -+ dev_dbg(&gadget->dev, "unable to allocate large buffer" -+ " fall back to small transfers\n"); -+ -+ bh->buflen = buflen; -+ bh->fsg = fsg; -+ -+ if (!bh->buf) { -+ dev_err(&gadget->dev, "unable to allocate memory\n"); - goto out; -+ } -+ memlimit -= buflen; -+ fsg->num_buffers += 1; - bh->next = bh + 1; - } -- fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0]; -- -- /* This should reflect the actual gadget power source */ -- usb_gadget_set_selfpowered(gadget); -+ fsg->buffhds[fsg->num_buffers - 1].next = &fsg->buffhds[0]; -+ fsg->bio_tree = RB_ROOT; - - snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", - init_utsname()->sysname, init_utsname()->release, -@@ -4022,7 +4564,8 @@ static int __init fsg_bind(struct usb_ga - } - - INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n"); -- INFO(fsg, "Number of LUNs=%d\n", fsg->nluns); -+ INFO(fsg, "Number of LUNs=%d Number of buffers=%d\n", -+ fsg->nluns, fsg->num_buffers); - - pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); - for (i = 0; i < fsg->nluns; ++i) { -@@ -4035,8 +4578,8 @@ static int __init fsg_bind(struct usb_ga - if (IS_ERR(p)) - p = NULL; - } -- LINFO(curlun, "ro=%d, file: %s\n", -- curlun->ro, (p ? p : "(error)")); -+ LINFO(curlun, "ro=%d, fua=%d file: %s\n", -+ curlun->ro, curlun->fua, (p ? p : "(error)")); - } - } - kfree(pathbuf); -@@ -4056,6 +4599,14 @@ static int __init fsg_bind(struct usb_ga - - /* Tell the thread to start working */ - wake_up_process(fsg->thread_task); -+ -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * setup our retry timer. -+ */ -+ setup_timer(&fsg_set_config_timer, fsg_set_config_timeout, -+ (unsigned long) gadget); - return 0; - - autoconf_fail: -@@ -4065,19 +4616,35 @@ autoconf_fail: - out: - fsg->state = FSG_STATE_TERMINATED; // The thread is dead - fsg_unbind(gadget); -- close_all_backing_files(fsg); -+ complete(&fsg->thread_notifier); - return rc; - } - - - /*-------------------------------------------------------------------------*/ - -+/** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * when we suspend, we disconnect and retry with another configuration -+ */ - static void fsg_suspend(struct usb_gadget *gadget) - { - struct fsg_dev *fsg = get_gadget_data(gadget); - - DBG(fsg, "suspend\n"); - set_bit(SUSPENDED, &fsg->atomic_bitflags); -+ -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * we try another configuration if we have received -+ * a get_config but not a set_config -+ */ -+ if (gadget->get_config && !gadget->set_config) { -+ mod_timer(&fsg_set_config_timer, -+ jiffies + msecs_to_jiffies(10)); -+ } - } - - static void fsg_resume(struct usb_gadget *gadget) -@@ -4101,6 +4668,7 @@ static struct usb_gadget_driver fsg_dri - .bind = fsg_bind, - .unbind = fsg_unbind, - .disconnect = fsg_disconnect, -+ .vbus_disconnect = fsg_vbus_disconnect, - .setup = fsg_setup, - .suspend = fsg_suspend, - .resume = fsg_resume, -@@ -4158,7 +4726,6 @@ static void __exit fsg_cleanup(void) - /* Wait for the thread to finish up */ - wait_for_completion(&fsg->thread_notifier); - -- close_all_backing_files(fsg); - kref_put(&fsg->ref, fsg_release); - } - module_exit(fsg_cleanup); -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/f_obex.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_obex.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/f_obex.c 2011-06-22 13:14:20.993067708 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_obex.c 2011-06-22 13:19:33.033063272 +0200 -@@ -50,6 +50,7 @@ struct f_obex { - u8 data_id; - u8 port_num; - u8 can_activate; -+ u8 connected; - - struct obex_ep_descs fs; - struct obex_ep_descs hs; -@@ -69,10 +70,14 @@ static inline struct f_obex *port_to_obe - - #define OBEX_CTRL_IDX 0 - #define OBEX_DATA_IDX 1 -+#define OBEX_CTRL0_IDX 2 -+#define OBEX_CTRL1_IDX 3 - - static struct usb_string obex_string_defs[] = { - [OBEX_CTRL_IDX].s = "CDC Object Exchange (OBEX)", - [OBEX_DATA_IDX].s = "CDC OBEX Data", -+ [OBEX_CTRL0_IDX].s = "PC Suite Services", -+ [OBEX_CTRL1_IDX].s = "SYNCML-SYNC", - { }, /* end of list */ - }; - -@@ -237,6 +242,10 @@ static int obex_set_alt(struct usb_funct - - if (alt == 1) { - DBG(cdev, "activate obex ttyGS%d\n", obex->port_num); -+ obex->port.in_desc = ep_choose(cdev->gadget, -+ obex->hs.obex_in, obex->fs.obex_in); -+ obex->port.out_desc = ep_choose(cdev->gadget, -+ obex->hs.obex_out, obex->fs.obex_out); - gserial_connect(&obex->port, obex->port_num); - } - -@@ -270,38 +279,6 @@ static void obex_disable(struct usb_func - - /*-------------------------------------------------------------------------*/ - --static void obex_connect(struct gserial *g) --{ -- struct f_obex *obex = port_to_obex(g); -- struct usb_composite_dev *cdev = g->func.config->cdev; -- int status; -- -- if (!obex->can_activate) -- return; -- -- status = usb_function_activate(&g->func); -- if (status) -- DBG(cdev, "obex ttyGS%d function activate --> %d\n", -- obex->port_num, status); --} -- --static void obex_disconnect(struct gserial *g) --{ -- struct f_obex *obex = port_to_obex(g); -- struct usb_composite_dev *cdev = g->func.config->cdev; -- int status; -- -- if (!obex->can_activate) -- return; -- -- status = usb_function_deactivate(&g->func); -- if (status) -- DBG(cdev, "obex ttyGS%d function deactivate --> %d\n", -- obex->port_num, status); --} -- --/*-------------------------------------------------------------------------*/ -- - static int __init - obex_bind(struct usb_configuration *c, struct usb_function *f) - { -@@ -366,22 +343,11 @@ obex_bind(struct usb_configuration *c, s - f->hs_descriptors = usb_copy_descriptors(hs_function); - - obex->hs.obex_in = usb_find_endpoint(hs_function, -- f->descriptors, &obex_hs_ep_in_desc); -+ f->hs_descriptors, &obex_hs_ep_in_desc); - obex->hs.obex_out = usb_find_endpoint(hs_function, -- f->descriptors, &obex_hs_ep_out_desc); -+ f->hs_descriptors, &obex_hs_ep_out_desc); - } - -- /* Avoid letting this gadget enumerate until the userspace -- * OBEX server is active. -- */ -- status = usb_function_deactivate(f); -- if (status < 0) -- WARNING(cdev, "obex ttyGS%d: can't prevent enumeration, %d\n", -- obex->port_num, status); -- else -- obex->can_activate = true; -- -- - DBG(cdev, "obex ttyGS%d: %s speed IN/%s OUT/%s\n", - obex->port_num, - gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", -@@ -452,15 +418,35 @@ int __init obex_bind_config(struct usb_c - return status; - obex_string_defs[OBEX_CTRL_IDX].id = status; - -- obex_control_intf.iInterface = status; -- - status = usb_string_id(c->cdev); - if (status < 0) - return status; - obex_string_defs[OBEX_DATA_IDX].id = status; - -- obex_data_nop_intf.iInterface = -- obex_data_intf.iInterface = status; -+ status = usb_string_id(c->cdev); -+ if (status < 0) -+ return status; -+ obex_string_defs[OBEX_CTRL0_IDX].id = status; -+ -+ status = usb_string_id(c->cdev); -+ if (status < 0) -+ return status; -+ obex_string_defs[OBEX_CTRL1_IDX].id = status; -+ } -+ -+ obex_data_nop_intf.iInterface = obex_string_defs[OBEX_DATA_IDX].id; -+ obex_data_intf.iInterface = obex_string_defs[OBEX_DATA_IDX].id; -+ -+ switch (port_num) { -+ case 0: -+ obex_control_intf.iInterface = obex_string_defs[OBEX_CTRL0_IDX].id; -+ break; -+ case 1: -+ obex_control_intf.iInterface = obex_string_defs[OBEX_CTRL1_IDX].id; -+ break; -+ default: -+ obex_control_intf.iInterface = obex_string_defs[OBEX_CTRL_IDX].id; -+ break; - } - - /* allocate and initialize one new instance */ -@@ -470,9 +456,6 @@ int __init obex_bind_config(struct usb_c - - obex->port_num = port_num; - -- obex->port.connect = obex_connect; -- obex->port.disconnect = obex_disconnect; -- - obex->port.func.name = "obex"; - obex->port.func.strings = obex_strings; - /* descriptors are per-instance copies */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/f_phonet.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_phonet.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/f_phonet.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_phonet.c 2011-06-22 13:19:33.033063272 +0200 -@@ -0,0 +1,623 @@ -+/* -+ * f_phonet.c -- USB CDC Phonet function -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Author: Rémi Denis-Courmont -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "u_phonet.h" -+ -+#define PN_MEDIA_USB 0x1B -+ -+/*-------------------------------------------------------------------------*/ -+ -+struct phonet_port { -+ struct f_phonet *usb; -+ spinlock_t lock; -+}; -+ -+struct f_phonet { -+ struct usb_function function; -+ struct { -+ struct sk_buff *skb; -+ spinlock_t lock; -+ } rx; -+ struct net_device *dev; -+ struct usb_ep *in_ep, *out_ep; -+ -+ struct usb_request *in_req; -+ struct usb_request *out_reqv[0]; -+}; -+ -+static int phonet_rxq_size = 17; -+ -+static inline struct f_phonet *func_to_pn(struct usb_function *f) -+{ -+ return container_of(f, struct f_phonet, function); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+#define USB_CDC_SUBCLASS_PHONET 0xfe -+#define USB_CDC_PHONET_TYPE 0xab -+ -+static struct usb_interface_descriptor -+pn_control_intf_desc = { -+ .bLength = sizeof pn_control_intf_desc, -+ .bDescriptorType = USB_DT_INTERFACE, -+ -+ /* .bInterfaceNumber = DYNAMIC, */ -+ .bInterfaceClass = USB_CLASS_COMM, -+ .bInterfaceSubClass = USB_CDC_SUBCLASS_PHONET, -+}; -+ -+static const struct usb_cdc_header_desc -+pn_header_desc = { -+ .bLength = sizeof pn_header_desc, -+ .bDescriptorType = USB_DT_CS_INTERFACE, -+ .bDescriptorSubType = USB_CDC_HEADER_TYPE, -+ .bcdCDC = __constant_cpu_to_le16(0x0110), -+}; -+ -+static const struct usb_cdc_header_desc -+pn_phonet_desc = { -+ .bLength = sizeof pn_phonet_desc, -+ .bDescriptorType = USB_DT_CS_INTERFACE, -+ .bDescriptorSubType = USB_CDC_PHONET_TYPE, -+ .bcdCDC = __constant_cpu_to_le16(0x1505), /* ??? */ -+}; -+ -+static struct usb_cdc_union_desc -+pn_union_desc = { -+ .bLength = sizeof pn_union_desc, -+ .bDescriptorType = USB_DT_CS_INTERFACE, -+ .bDescriptorSubType = USB_CDC_UNION_TYPE, -+ -+ /* .bMasterInterface0 = DYNAMIC, */ -+ /* .bSlaveInterface0 = DYNAMIC, */ -+}; -+ -+static struct usb_interface_descriptor -+pn_data_nop_intf_desc = { -+ .bLength = sizeof pn_data_nop_intf_desc, -+ .bDescriptorType = USB_DT_INTERFACE, -+ -+ /* .bInterfaceNumber = DYNAMIC, */ -+ .bAlternateSetting = 0, -+ .bNumEndpoints = 0, -+ .bInterfaceClass = USB_CLASS_CDC_DATA, -+}; -+ -+static struct usb_interface_descriptor -+pn_data_intf_desc = { -+ .bLength = sizeof pn_data_intf_desc, -+ .bDescriptorType = USB_DT_INTERFACE, -+ -+ /* .bInterfaceNumber = DYNAMIC, */ -+ .bAlternateSetting = 1, -+ .bNumEndpoints = 2, -+ .bInterfaceClass = USB_CLASS_CDC_DATA, -+}; -+ -+static struct usb_endpoint_descriptor -+pn_fs_sink_desc = { -+ .bLength = USB_DT_ENDPOINT_SIZE, -+ .bDescriptorType = USB_DT_ENDPOINT, -+ -+ .bEndpointAddress = USB_DIR_OUT, -+ .bmAttributes = USB_ENDPOINT_XFER_BULK, -+}; -+ -+static struct usb_endpoint_descriptor -+pn_hs_sink_desc = { -+ .bLength = USB_DT_ENDPOINT_SIZE, -+ .bDescriptorType = USB_DT_ENDPOINT, -+ -+ .bEndpointAddress = USB_DIR_OUT, -+ .bmAttributes = USB_ENDPOINT_XFER_BULK, -+ .wMaxPacketSize = __constant_cpu_to_le16(512), -+}; -+ -+static struct usb_endpoint_descriptor -+pn_fs_source_desc = { -+ .bLength = USB_DT_ENDPOINT_SIZE, -+ .bDescriptorType = USB_DT_ENDPOINT, -+ -+ .bEndpointAddress = USB_DIR_IN, -+ .bmAttributes = USB_ENDPOINT_XFER_BULK, -+}; -+ -+static struct usb_endpoint_descriptor -+pn_hs_source_desc = { -+ .bLength = USB_DT_ENDPOINT_SIZE, -+ .bDescriptorType = USB_DT_ENDPOINT, -+ -+ .bEndpointAddress = USB_DIR_IN, -+ .bmAttributes = USB_ENDPOINT_XFER_BULK, -+ .wMaxPacketSize = __constant_cpu_to_le16(512), -+}; -+ -+static struct usb_descriptor_header *fs_pn_function[] = { -+ (struct usb_descriptor_header *) &pn_control_intf_desc, -+ (struct usb_descriptor_header *) &pn_header_desc, -+ (struct usb_descriptor_header *) &pn_phonet_desc, -+ (struct usb_descriptor_header *) &pn_union_desc, -+ (struct usb_descriptor_header *) &pn_data_nop_intf_desc, -+ (struct usb_descriptor_header *) &pn_data_intf_desc, -+ (struct usb_descriptor_header *) &pn_fs_sink_desc, -+ (struct usb_descriptor_header *) &pn_fs_source_desc, -+ NULL, -+}; -+ -+static struct usb_descriptor_header *hs_pn_function[] = { -+ (struct usb_descriptor_header *) &pn_control_intf_desc, -+ (struct usb_descriptor_header *) &pn_header_desc, -+ (struct usb_descriptor_header *) &pn_phonet_desc, -+ (struct usb_descriptor_header *) &pn_union_desc, -+ (struct usb_descriptor_header *) &pn_data_nop_intf_desc, -+ (struct usb_descriptor_header *) &pn_data_intf_desc, -+ (struct usb_descriptor_header *) &pn_hs_sink_desc, -+ (struct usb_descriptor_header *) &pn_hs_source_desc, -+ NULL, -+}; -+ -+/*-------------------------------------------------------------------------*/ -+ -+static int pn_net_open(struct net_device *dev) -+{ -+ netif_wake_queue(dev); -+ return 0; -+} -+ -+static int pn_net_close(struct net_device *dev) -+{ -+ netif_stop_queue(dev); -+ return 0; -+} -+ -+static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req) -+{ -+ struct f_phonet *fp = ep->driver_data; -+ struct net_device *dev = fp->dev; -+ struct sk_buff *skb = req->context; -+ -+ switch (req->status) { -+ case 0: -+ dev->stats.tx_packets++; -+ dev->stats.tx_bytes += skb->len; -+ break; -+ -+ case -ESHUTDOWN: /* disconnected */ -+ case -ECONNRESET: /* disabled */ -+ dev->stats.tx_aborted_errors++; -+ default: -+ dev->stats.tx_errors++; -+ } -+ -+ dev_kfree_skb_any(skb); -+ netif_wake_queue(dev); -+} -+ -+static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct phonet_port *port = netdev_priv(dev); -+ struct f_phonet *fp; -+ struct usb_request *req; -+ unsigned long flags; -+ -+ if (skb->protocol != htons(ETH_P_PHONET)) -+ goto out; -+ -+ spin_lock_irqsave(&port->lock, flags); -+ fp = port->usb; -+ if (unlikely(!fp)) /* race with carrier loss */ -+ goto out_unlock; -+ -+ req = fp->in_req; -+ req->buf = skb->data; -+ req->length = skb->len; -+ req->complete = pn_tx_complete; -+ req->zero = 1; -+ req->context = skb; -+ -+ if (unlikely(usb_ep_queue(fp->in_ep, req, GFP_ATOMIC))) -+ goto out_unlock; -+ -+ netif_stop_queue(dev); -+ skb = NULL; -+ -+out_unlock: -+ spin_unlock_irqrestore(&port->lock, flags); -+out: -+ if (unlikely(skb)) { -+ dev_kfree_skb(skb); -+ dev->stats.tx_dropped++; -+ } -+ return 0; -+} -+ -+static int pn_net_mtu(struct net_device *dev, int new_mtu) -+{ -+ if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU)) -+ return -EINVAL; -+ dev->mtu = new_mtu; -+ return 0; -+} -+ -+static void pn_net_setup(struct net_device *dev) -+{ -+ dev->features = 0; -+ dev->type = ARPHRD_PHONET; -+ dev->flags = IFF_POINTOPOINT | IFF_NOARP; -+ dev->mtu = (60 * 1024), -+ dev->hard_header_len = 1; -+ dev->dev_addr[0] = PN_MEDIA_USB; -+ dev->addr_len = 1; -+ dev->tx_queue_len = 1; -+ -+ dev->open = pn_net_open; -+ dev->stop = pn_net_close; -+ dev->hard_start_xmit = pn_net_xmit; -+ dev->change_mtu = pn_net_mtu; -+ dev->destructor = free_netdev; -+ dev->header_ops = &phonet_header_ops; -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* -+ * Queue buffer for data from the host -+ */ -+static int -+pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) -+{ -+ struct net_device *dev = fp->dev; -+ struct page *page; -+ int err; -+ -+ page = __netdev_alloc_page(dev, gfp_flags); -+ if (!page) -+ return -ENOMEM; -+ -+ req->buf = page_address(page); -+ req->length = PAGE_SIZE; -+ req->context = page; -+ -+ err = usb_ep_queue(fp->out_ep, req, gfp_flags); -+ if (unlikely(err)) -+ netdev_free_page(dev, page); -+ return err; -+} -+ -+static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) -+{ -+ struct f_phonet *fp = ep->driver_data; -+ struct net_device *dev = fp->dev; -+ struct page *page = req->context; -+ struct sk_buff *skb; -+ unsigned long flags; -+ int status = req->status; -+ -+ switch (status) { -+ case 0: -+ spin_lock_irqsave(&fp->rx.lock, flags); -+ skb = fp->rx.skb; -+ if (!skb) -+ skb = fp->rx.skb = netdev_alloc_skb(dev, 12); -+ if (req->actual < req->length) /* Last fragment */ -+ fp->rx.skb = NULL; -+ spin_unlock_irqrestore(&fp->rx.lock, flags); -+ -+ if (unlikely(!skb)) -+ break; -+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, -+ req->actual); -+ page = NULL; -+ -+ if (req->actual < req->length) { /* Last fragment */ -+ skb->protocol = htons(ETH_P_PHONET); -+ skb_reset_mac_header(skb); -+ pskb_pull(skb, 1); -+ skb->dev = dev; -+ dev->stats.rx_packets++; -+ dev->stats.rx_bytes += skb->len; -+ -+ netif_rx(skb); -+ } -+ break; -+ -+ /* Do not resubmit in these cases: */ -+ case -ESHUTDOWN: /* disconnect */ -+ case -ECONNABORTED: /* hw reset */ -+ case -ECONNRESET: /* dequeued (unlink or netif down) */ -+ req = NULL; -+ break; -+ -+ /* Do resubmit in these cases: */ -+ case -EOVERFLOW: /* request buffer overflow */ -+ dev->stats.rx_over_errors++; -+ default: -+ dev->stats.rx_errors++; -+ break; -+ } -+ -+ if (page) -+ netdev_free_page(dev, page); -+ if (req) -+ pn_rx_submit(fp, req, GFP_ATOMIC); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void __pn_reset(struct usb_function *f) -+{ -+ struct f_phonet *fp = func_to_pn(f); -+ struct net_device *dev = fp->dev; -+ struct phonet_port *port = netdev_priv(dev); -+ -+ port->usb = NULL; -+ -+ usb_ep_disable(fp->out_ep); -+ usb_ep_disable(fp->in_ep); -+ if (fp->rx.skb) { -+ dev_kfree_skb_irq(fp->rx.skb); -+ fp->rx.skb = NULL; -+ } -+} -+ -+static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) -+{ -+ struct f_phonet *fp = func_to_pn(f); -+ struct usb_gadget *gadget = fp->function.config->cdev->gadget; -+ -+ if (intf == pn_control_intf_desc.bInterfaceNumber) -+ /* control interface, no altsetting */ -+ return (alt > 0) ? -EINVAL : 0; -+ -+ if (intf == pn_data_intf_desc.bInterfaceNumber) { -+ struct net_device *dev = fp->dev; -+ struct phonet_port *port = netdev_priv(dev); -+ -+ /* data intf (0: inactive, 1: active) */ -+ if (alt > 1) -+ return -EINVAL; -+ -+ spin_lock(&port->lock); -+ __pn_reset(f); -+ if (alt == 1) { -+ struct usb_endpoint_descriptor *out, *in; -+ int i; -+ -+ out = ep_choose(gadget, -+ &pn_hs_sink_desc, -+ &pn_fs_sink_desc); -+ in = ep_choose(gadget, -+ &pn_hs_source_desc, -+ &pn_fs_source_desc); -+ usb_ep_enable(fp->out_ep, out); -+ usb_ep_enable(fp->in_ep, in); -+ -+ port->usb = fp; -+ fp->out_ep->driver_data = fp; -+ fp->in_ep->driver_data = fp; -+ -+ for (i = 0; i < phonet_rxq_size; i++) -+ pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC); -+ } -+ spin_unlock(&port->lock); -+ return 0; -+ } -+ -+ return -EINVAL; -+} -+ -+static int pn_get_alt(struct usb_function *f, unsigned intf) -+{ -+ struct f_phonet *fp = func_to_pn(f); -+ -+ if (intf == pn_control_intf_desc.bInterfaceNumber) -+ return 0; -+ -+ if (intf == pn_data_intf_desc.bInterfaceNumber) { -+ struct phonet_port *port = netdev_priv(fp->dev); -+ u8 alt; -+ -+ spin_lock(&port->lock); -+ alt = port->usb != NULL; -+ spin_unlock(&port->lock); -+ return alt; -+ } -+ -+ return -EINVAL; -+} -+ -+static void pn_disconnect(struct usb_function *f) -+{ -+ struct f_phonet *fp = func_to_pn(f); -+ struct phonet_port *port = netdev_priv(fp->dev); -+ unsigned long flags; -+ -+ /* remain disabled until set_alt */ -+ spin_lock_irqsave(&port->lock, flags); -+ __pn_reset(f); -+ spin_unlock_irqrestore(&port->lock, flags); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static __init -+int pn_bind(struct usb_configuration *c, struct usb_function *f) -+{ -+ struct usb_composite_dev *cdev = c->cdev; -+ struct usb_gadget *gadget = cdev->gadget; -+ struct f_phonet *fp = func_to_pn(f); -+ struct usb_ep *ep; -+ int status, i; -+ -+ /* Reserve interface IDs */ -+ status = usb_interface_id(c, f); -+ if (status < 0) -+ goto err; -+ pn_control_intf_desc.bInterfaceNumber = status; -+ pn_union_desc.bMasterInterface0 = status; -+ -+ status = usb_interface_id(c, f); -+ if (status < 0) -+ goto err; -+ pn_data_nop_intf_desc.bInterfaceNumber = status; -+ pn_data_intf_desc.bInterfaceNumber = status; -+ pn_union_desc.bSlaveInterface0 = status; -+ -+ /* Reserve endpoints */ -+ status = -ENODEV; -+ ep = usb_ep_autoconfig(gadget, &pn_fs_sink_desc); -+ if (!ep) -+ goto err; -+ fp->out_ep = ep; -+ ep->driver_data = fp; /* Claim */ -+ -+ ep = usb_ep_autoconfig(gadget, &pn_fs_source_desc); -+ if (!ep) -+ goto err; -+ fp->in_ep = ep; -+ ep->driver_data = fp; /* Claim */ -+ -+ pn_hs_sink_desc.bEndpointAddress = -+ pn_fs_sink_desc.bEndpointAddress; -+ pn_hs_source_desc.bEndpointAddress = -+ pn_fs_source_desc.bEndpointAddress; -+ -+ /* Do not try to bind Phonet twice... */ -+ fp->function.descriptors = fs_pn_function; -+ fp->function.hs_descriptors = hs_pn_function; -+ -+ /* Incoming USB requests */ -+ status = -ENOMEM; -+ for (i = 0; i < phonet_rxq_size; i++) { -+ struct usb_request *req; -+ -+ req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL); -+ if (!req) -+ goto err; -+ -+ req->complete = pn_rx_complete; -+ fp->out_reqv[i] = req; -+ } -+ -+ /* Outgoing USB requests */ -+ fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL); -+ if (!fp->in_req) -+ goto err; -+ -+ INFO(cdev, "USB CDC Phonet function\n"); -+ INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name, -+ fp->out_ep->name, fp->in_ep->name); -+ return 0; -+ -+err: -+ if (fp->out_ep) -+ fp->out_ep->driver_data = NULL; -+ if (fp->in_ep) -+ fp->in_ep->driver_data = NULL; -+ ERROR(cdev, "USB CDC Phonet: cannot autoconfigure\n"); -+ return status; -+} -+ -+static void -+pn_unbind(struct usb_configuration *c, struct usb_function *f) -+{ -+ struct f_phonet *fp = func_to_pn(f); -+ int i; -+ -+ /* We are already disconnected */ -+ if (fp->in_req) -+ usb_ep_free_request(fp->in_ep, fp->in_req); -+ for (i = 0; i < phonet_rxq_size; i++) -+ if (fp->out_reqv[i]) -+ usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); -+ -+ kfree(fp); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static struct net_device *dev; -+ -+int __init phonet_bind_config(struct usb_configuration *c) -+{ -+ struct f_phonet *fp; -+ int err, size; -+ -+ size = sizeof(*fp) + (phonet_rxq_size * sizeof(struct usb_request *)); -+ fp = kzalloc(size, GFP_KERNEL); -+ if (!fp) -+ return -ENOMEM; -+ -+ fp->dev = dev; -+ fp->function.name = "phonet"; -+ fp->function.bind = pn_bind; -+ fp->function.unbind = pn_unbind; -+ fp->function.set_alt = pn_set_alt; -+ fp->function.get_alt = pn_get_alt; -+ fp->function.disable = pn_disconnect; -+ spin_lock_init(&fp->rx.lock); -+ -+ err = usb_add_function(c, &fp->function); -+ if (err) -+ kfree(fp); -+ return err; -+} -+ -+int __init gphonet_setup(struct usb_gadget *gadget) -+{ -+ struct phonet_port *port; -+ int err; -+ -+ /* Create net device */ -+ BUG_ON(dev); -+ dev = alloc_netdev(sizeof(*port), "upnlink%d", pn_net_setup); -+ if (!dev) -+ return -ENOMEM; -+ -+ port = netdev_priv(dev); -+ spin_lock_init(&port->lock); -+ netif_stop_queue(dev); -+ SET_NETDEV_DEV(dev, &gadget->dev); -+ -+ err = register_netdev(dev); -+ if (err) -+ free_netdev(dev); -+ return err; -+} -+ -+void gphonet_cleanup(void) -+{ -+ unregister_netdev(dev); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/f_raw.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_raw.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/f_raw.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/f_raw.c 2011-06-22 13:19:33.033063272 +0200 -@@ -0,0 +1,779 @@ -+/* -+ * f_raw.c -- USB Raw Access Function Driver -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Contact: Felipe Balbi -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+/* #define VERBOSE_DEBUG */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "gadget_chips.h" -+ -+struct graw { -+ struct cdev chdev; -+ -+ struct usb_gadget *gadget; -+ struct usb_function func; -+ -+ unsigned major; -+ dev_t dev; -+}; -+ -+static struct graw *the_graw; -+static struct f_raw *the_raw; -+ -+struct raw_request { -+ struct usb_request *req; -+ struct list_head list; -+ wait_queue_head_t wait; -+ unsigned long len; -+ -+ unsigned queued:1, completed:1; -+ int nr; -+}; -+ -+struct raw_ep_descs { -+ struct usb_endpoint_descriptor *raw_out; -+}; -+ -+struct f_raw { -+ /* pool of read requests */ -+ struct list_head read_pool; -+ int nr_reqs; -+ -+ /* synchronize with userland access */ -+ struct mutex mutex; -+ -+ struct usb_ep *out; -+ struct raw_request *allocated_req; -+ struct class *class; -+ -+ struct raw_ep_descs fs; -+ struct raw_ep_descs hs; -+ -+ struct graw graw; -+ -+ unsigned vmas; -+ unsigned connected:1; -+ unsigned can_activate:1; -+ -+ u8 intf_id; -+}; -+ -+static inline struct f_raw *func_to_raw(struct usb_function *f) -+{ -+ return container_of(f, struct f_raw, graw.func); -+} -+ -+static u64 raw_dmamask = DMA_BIT_MASK(64); -+ -+/*-------------------------------------------------------------------------*/ -+ -+#define RAW_INTF_IDX 1 -+ -+static struct usb_string raw_string_defs[] = { -+ [RAW_INTF_IDX].s = "Device Upgrade Interface", -+ { }, /* end of list */ -+}; -+ -+static struct usb_gadget_strings raw_string_table = { -+ .language = 0x0409, /* en-US */ -+ .strings = raw_string_defs, -+}; -+ -+static struct usb_gadget_strings *raw_strings[] = { -+ &raw_string_table, -+ NULL, -+}; -+ -+/*-------------------------------------------------------------------------*/ -+ -+static struct usb_interface_descriptor raw_intf __initdata = { -+ .bLength = sizeof(raw_intf), -+ .bDescriptorType = USB_DT_INTERFACE, -+ .bInterfaceNumber = 0, -+ -+ .bAlternateSetting = 0, -+ .bNumEndpoints = 1, -+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC, -+}; -+ -+/* High-Speed Support */ -+ -+static struct usb_endpoint_descriptor raw_hs_ep_out_desc = { -+ .bLength = USB_DT_ENDPOINT_SIZE, -+ .bDescriptorType = USB_DT_ENDPOINT, -+ -+ .bEndpointAddress = USB_DIR_OUT, -+ .bmAttributes = USB_ENDPOINT_XFER_BULK, -+ .wMaxPacketSize = __constant_cpu_to_le16(512), -+}; -+ -+static struct usb_descriptor_header *hs_function[] __initdata = { -+ (struct usb_descriptor_header *) &raw_intf, -+ (struct usb_descriptor_header *) &raw_hs_ep_out_desc, -+ NULL, -+}; -+ -+/* Full-Speed Support */ -+ -+static struct usb_endpoint_descriptor raw_fs_ep_out_desc = { -+ .bLength = USB_DT_ENDPOINT_SIZE, -+ .bDescriptorType = USB_DT_ENDPOINT, -+ -+ .bEndpointAddress = USB_DIR_OUT, -+ .bmAttributes = USB_ENDPOINT_XFER_BULK, -+}; -+ -+static struct usb_descriptor_header *fs_function[] __initdata = { -+ (struct usb_descriptor_header *) &raw_intf, -+ (struct usb_descriptor_header *) &raw_fs_ep_out_desc, -+ NULL, -+}; -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void raw_complete(struct usb_ep *ep, struct usb_request *req); -+ -+static struct raw_request *raw_alloc_request(struct f_raw *raw, unsigned buflen) -+{ -+ struct list_head *pool = &raw->read_pool; -+ struct usb_request *req; -+ struct raw_request *raw_req; -+ void *buf; -+ -+ raw_req = kzalloc(sizeof(*raw_req), GFP_KERNEL); -+ if (raw_req == NULL) -+ goto fail1; -+ -+ INIT_LIST_HEAD(&raw_req->list); -+ -+ req = usb_ep_alloc_request(raw->out, GFP_KERNEL); -+ if (req == NULL) -+ goto fail2; -+ -+ req->length = buflen; -+ req->complete = raw_complete; -+ req->context = raw_req; -+ -+ buf = dma_alloc_coherent(&raw->graw.gadget->dev, buflen, -+ &req->dma, GFP_KERNEL); -+ if (IS_ERR(buf)) -+ goto fail3; -+ req->buf = buf; -+ -+ raw_req->req = req; -+ raw_req->len = buflen; -+ -+ if (raw->nr_reqs == MAX_NR_REQUESTS) -+ goto fail4; -+ -+ raw_req->nr = raw->nr_reqs; -+ raw->nr_reqs++; -+ list_add_tail(&raw_req->list, pool); -+ -+ return raw_req; -+ -+fail4: -+ dma_free_coherent(&raw->graw.gadget->dev, buflen, -+ buf, req->dma); -+ -+fail3: -+ usb_ep_free_request(raw->out, req); -+ -+fail2: -+ kfree(raw_req); -+ -+fail1: -+ return NULL; -+} -+ -+static void raw_complete(struct usb_ep *ep, struct usb_request *req) -+{ -+ struct f_raw *raw = ep->driver_data; -+ struct raw_request *raw_req = req->context; -+ struct usb_composite_dev *cdev = raw->graw.func.config->cdev; -+ int status = req->status; -+ -+ switch (status) { -+ case 0: /* normal completion */ -+ break; -+ case -ECONNABORTED: /* hardware forced ep reset */ -+ case -ECONNRESET: /* request dequeued */ -+ case -ESHUTDOWN: /* disconnected from host */ -+ VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status, -+ req->actual, req->length); -+ return; -+ case -EOVERFLOW: /* not big enough buffer */ -+ default: -+ DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name, -+ status, req->actual, req->length); -+ case -EREMOTEIO: /* short read */ -+ break; -+ } -+ -+ raw_req->queued = 0; -+ raw_req->completed = 1; -+ wake_up_interruptible(&raw_req->wait); -+} -+ -+static struct raw_request *find_request(struct f_raw *raw, int value) -+{ -+ struct raw_request *req; -+ -+ list_for_each_entry(req, &raw->read_pool, list) -+ if (req->nr == value) -+ return req; -+ -+ return NULL; -+} -+ -+static inline int enable_raw(struct usb_composite_dev *cdev, struct f_raw *raw) -+{ -+ const struct usb_endpoint_descriptor *out_desc; -+ struct usb_ep *ep; -+ -+ int status = 0; -+ -+ /* choose endpoint */ -+ out_desc = ep_choose(cdev->gadget, &raw_hs_ep_out_desc, -+ &raw_fs_ep_out_desc); -+ -+ /* enable it */ -+ ep = raw->out; -+ status = usb_ep_enable(ep, out_desc); -+ if (status < 0) -+ return status; -+ ep->driver_data = raw; -+ -+ DBG(cdev, "%s enabled\n", raw->graw.func.name); -+ -+ return 0; -+} -+ -+static inline void disable_raw(struct f_raw *raw) -+{ -+ struct usb_composite_dev *cdev; -+ struct usb_ep *ep; -+ -+ int status; -+ -+ cdev = raw->graw.func.config->cdev; -+ -+ ep = raw->out; -+ if (ep->driver_data) { -+ status = usb_ep_disable(ep); -+ if (status < 0) -+ DBG(cdev, "disable %s --> %d\n", -+ ep->name, status); -+ ep->driver_data = NULL; -+ } -+ -+ VDBG(cdev, "%s disabled\n", raw->graw.func.name); -+} -+ -+static int raw_set_alt(struct usb_function *f, unsigned intf, unsigned alt) -+{ -+ struct usb_composite_dev *cdev = f->config->cdev; -+ struct f_raw *raw = func_to_raw(f); -+ -+ /* we konw alt is zero */ -+ if (raw->out->driver_data) -+ disable_raw(raw); -+ -+ return enable_raw(cdev, raw); -+} -+ -+static void raw_disable(struct usb_function *f) -+{ -+ struct f_raw *raw = func_to_raw(f); -+ -+ disable_raw(raw); -+} -+ -+static int raw_queue_request(struct f_raw *raw, struct raw_queue_request *qr) -+{ -+ struct usb_ep *ep = raw->out; -+ struct raw_request *raw_req; -+ int status = 0; -+ -+ raw_req = find_request(raw, qr->nr); -+ if (raw_req == NULL) -+ return -ENOENT; -+ -+ if (qr->nr_bytes > raw_req->len) -+ return -EINVAL; -+ -+ /* FIXME: lock with irqsave and check if transfer already in progress, -+ * bail out if so. */ -+ -+ raw_req->req->length = qr->nr_bytes; -+ -+ init_waitqueue_head(&raw_req->wait); -+ raw_req->completed = 0; -+ raw_req->queued = 1; -+ status = usb_ep_queue(ep, raw_req->req, GFP_KERNEL); -+ if (status) { -+ struct usb_composite_dev *cdev; -+ -+ cdev = raw->graw.func.config->cdev; -+ ERROR(cdev, "start %s %s --> %d\n", "OUT", ep->name, status); -+ raw_req->queued = 0; -+ } -+ -+ return status; -+} -+ -+static int raw_free_request(struct f_raw *raw, int nr) -+{ -+ struct raw_request *raw_req; -+ struct usb_request *req; -+ -+ raw_req = find_request(raw, nr); -+ if (raw_req == NULL) -+ return -ENOENT; -+ -+ if (raw->allocated_req == raw_req) -+ raw->allocated_req = NULL; -+ /* FIXME: munmap? */ -+ -+ req = raw_req->req; -+ /* FIXME: spinlocking? */ -+ if (raw_req->queued) -+ usb_ep_dequeue(raw->out, req); -+ raw_req->queued = 0; -+ dma_free_coherent(&raw->graw.gadget->dev, raw_req->len, req->buf, -+ req->dma); -+ usb_ep_free_request(raw->out, req); -+ list_del(&raw_req->list); -+ kfree(raw_req); -+ -+ return 0; -+} -+ -+static int raw_get_request_status(struct f_raw *raw, -+ struct raw_request_status *st) -+{ -+ struct raw_request *raw_req; -+ -+ raw_req = find_request(raw, st->nr); -+ if (raw_req == NULL) -+ return -ENOENT; -+ -+ if (!raw_req->queued) { -+ st->status = raw_req->req->status; -+ st->nr_bytes = raw_req->req->actual; -+ raw_req->completed = 0; -+ } else { -+ st->status = -EBUSY; -+ st->nr_bytes = 0; -+ } -+ -+ return 0; -+} -+ -+static void get_completion_map(struct f_raw *raw, unsigned int *mask_out) -+{ -+ struct raw_request *req; -+ unsigned int mask = 0; -+ -+ list_for_each_entry(req, &raw->read_pool, list) -+ if (req->completed) -+ mask |= (1 << req->nr); -+ -+ *mask_out = mask; -+} -+ -+static long fraw_ioctl(struct file *filp, unsigned code, unsigned long value) -+{ -+ struct f_raw *raw = filp->private_data; -+ struct usb_ep *ep = raw->out; -+ unsigned int map; -+ int status = 0; -+ struct raw_request_status req_st; -+ struct raw_queue_request que_req; -+ -+ if (unlikely(!ep)) -+ return -EINVAL; -+ -+ mutex_lock(&raw->mutex); -+ -+ switch (code) { -+ case RAW_FIFO_STATUS: -+ status = usb_ep_fifo_status(ep); -+ break; -+ case RAW_FIFO_FLUSH: -+ usb_ep_fifo_flush(ep); -+ break; -+ case RAW_CLEAR_HALT: -+ status = usb_ep_clear_halt(ep); -+ break; -+ case RAW_ALLOC_REQUEST: -+ if (raw->allocated_req != NULL) { -+ status = -EBUSY; -+ break; -+ } -+ if (value > MAX_REQUEST_LEN || (value % PAGE_SIZE) != 0) { -+ status = -EINVAL; -+ break; -+ } -+ raw->allocated_req = raw_alloc_request(raw, value); -+ if (raw->allocated_req == NULL) { -+ status = -ENOMEM; -+ break; -+ } -+ status = raw->allocated_req->nr; -+ break; -+ case RAW_QUEUE_REQUEST: -+ status = copy_from_user(&que_req, (void __user *) value, -+ sizeof(que_req)); -+ if (status) -+ break; -+ status = raw_queue_request(raw, &que_req); -+ break; -+ case RAW_FREE_REQUEST: -+ status = raw_free_request(raw, value); -+ break; -+ case RAW_GET_COMPLETION_MAP: -+ get_completion_map(raw, &map); -+ status = put_user(map, (unsigned int __user *) value); -+ break; -+ case RAW_GET_REQUEST_STATUS: -+ status = copy_from_user(&req_st, (void __user *) value, -+ sizeof(req_st)); -+ if (status) -+ break; -+ status = raw_get_request_status(raw, &req_st); -+ if (status) -+ break; -+ status = copy_to_user((void __user *) value, &req_st, -+ sizeof(req_st)); -+ } -+ -+ mutex_unlock(&raw->mutex); -+ -+ return status; -+} -+ -+static int fraw_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ size_t size = vma->vm_end - vma->vm_start; -+ struct f_raw *raw = filp->private_data; -+ struct raw_request *raw_req; -+ struct usb_request *req; -+ int ret; -+ -+ mutex_lock(&raw->mutex); -+ raw_req = raw->allocated_req; -+ if (raw_req == NULL) { -+ ret = -ENXIO; -+ goto out; -+ } -+ req = raw_req->req; -+ -+ if (size != raw_req->len) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ vma->vm_private_data = raw; -+ -+ ret = dma_mmap_coherent(&raw->graw.gadget->dev, vma, req->buf, -+ req->dma, raw_req->len); -+ if (ret < 0) -+ goto out; -+ -+ raw->allocated_req = NULL; -+ -+out: -+ mutex_unlock(&raw->mutex); -+ -+ return 0; -+} -+ -+static int fraw_open(struct inode *inode, struct file *filp) -+{ -+ struct f_raw *raw; -+ -+ raw = the_raw; -+ filp->private_data = the_raw; -+ -+ return 0; -+} -+ -+static int fraw_release(struct inode *inode, struct file *filp) -+{ -+ struct f_raw *raw = filp->private_data; -+ -+ while (!list_empty(&raw->read_pool)) { -+ struct raw_request *req; -+ -+ req = list_first_entry(&raw->read_pool, struct raw_request, -+ list); -+ raw_free_request(raw, req->nr); -+ } -+ raw->nr_reqs = 0; -+ filp->private_data = NULL; -+ -+ return 0; -+} -+ -+static unsigned int fraw_poll(struct file *filp, struct poll_table_struct *pt) -+{ -+ struct f_raw *raw = filp->private_data; -+ struct raw_request *req; -+ int ret = 0; -+ -+ mutex_lock(&raw->mutex); -+ list_for_each_entry(req, &raw->read_pool, list) { -+ poll_wait(filp, &req->wait, pt); -+ -+ if (req->completed) { -+ ret = POLLIN | POLLRDNORM; -+ break; -+ } -+ } -+ mutex_unlock(&raw->mutex); -+ -+ return ret; -+} -+ -+static struct file_operations fraw_fops = { -+ .owner = THIS_MODULE, -+ .open = fraw_open, -+ .release = fraw_release, -+ .unlocked_ioctl = fraw_ioctl, -+ .mmap = fraw_mmap, -+ .poll = fraw_poll, -+}; -+ -+/*-------------------------------------------------------------------------*/ -+ -+static int __init raw_bind(struct usb_configuration *c, struct usb_function *f) -+{ -+ struct usb_composite_dev *cdev = c->cdev; -+ struct f_raw *raw = func_to_raw(f); -+ struct usb_ep *ep; -+ -+ int status; -+ -+ /* allocate instance-specific interface IDs and patch descriptors */ -+ -+ status = usb_interface_id(c, f); -+ if (status < 0) -+ goto fail; -+ raw->intf_id = status; -+ -+ raw_intf.bInterfaceNumber = status; -+ -+ /* allocate instance-specific endpoints */ -+ -+ ep = usb_ep_autoconfig(cdev->gadget, &raw_fs_ep_out_desc); -+ if (!ep) -+ goto fail; -+ raw->out = ep; -+ ep->driver_data = cdev; /* claim */ -+ -+ /* copy descriptors and track endpoint copies */ -+ f->descriptors = usb_copy_descriptors(fs_function); -+ -+ raw->fs.raw_out = usb_find_endpoint(fs_function, -+ f->descriptors, &raw_fs_ep_out_desc); -+ -+ /* support all relevant hardware speeds... we expect that when -+ * hardware is dual speed, all bulk-capable endpoints work at -+ * both speeds -+ */ -+ if (gadget_is_dualspeed(c->cdev->gadget)) { -+ raw_hs_ep_out_desc.bEndpointAddress = -+ raw_fs_ep_out_desc.bEndpointAddress; -+ -+ /* copy descriptors and track endpoint copies */ -+ f->hs_descriptors = usb_copy_descriptors(hs_function); -+ -+ raw->hs.raw_out = usb_find_endpoint(hs_function, -+ f->hs_descriptors, &raw_hs_ep_out_desc); -+ } -+ -+ INIT_LIST_HEAD(&raw->read_pool); -+ mutex_init(&raw->mutex); -+ -+ /* create device nodes */ -+ raw->class = class_create(THIS_MODULE, "fraw"); -+ device_create(raw->class, &cdev->gadget->dev, -+ MKDEV(raw->graw.major, 0), raw, "%s", f->name); -+ -+ cdev->gadget->dev.dma_mask = &raw_dmamask; -+ cdev->gadget->dev.coherent_dma_mask = DMA_64BIT_MASK; -+ raw->graw.gadget = cdev->gadget; -+ the_raw = raw; -+ -+ DBG(cdev, "raw: %s speed OUT/%s\n", -+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", -+ raw->out->name); -+ -+ return 0; -+ -+fail: -+ if (raw->class) -+ class_destroy(raw->class); -+ -+ if (raw->out) -+ raw->out->driver_data = NULL; -+ -+ ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); -+ -+ return status; -+} -+ -+static void raw_unbind(struct usb_configuration *c, struct usb_function *f) -+{ -+ struct f_raw *raw = func_to_raw(f); -+ -+ if (gadget_is_dualspeed(c->cdev->gadget)) -+ usb_free_descriptors(f->hs_descriptors); -+ usb_free_descriptors(f->descriptors); -+ device_destroy(raw->class, MKDEV(raw->graw.major, 0)); -+ class_destroy(raw->class); -+ kfree(raw); -+} -+ -+/** -+ * raw_bind_config - add a RAW function to a configuration -+ * @c: the configuration to support the RAW instance -+ * Context: single threaded during gadget setup -+ * -+ * Returns zero on success, else negative errno. -+ */ -+static int __init raw_bind_config(struct usb_configuration *c) -+{ -+ struct f_raw *raw; -+ int status; -+ -+ if (raw_string_defs[RAW_INTF_IDX].id == 0) { -+ status = usb_string_id(c->cdev); -+ if (status < 0) -+ return status; -+ -+ raw_string_defs[RAW_INTF_IDX].id = status; -+ raw_intf.iInterface = status; -+ } -+ -+ /* allocate and initialize one new instance */ -+ raw = kzalloc(sizeof(*raw), GFP_KERNEL); -+ if (!raw) -+ return -ENOMEM; -+ -+ raw->graw.func.name = "raw"; -+ raw->graw.func.strings = raw_strings; -+ /* descriptors are per-instance copies */ -+ raw->graw.func.bind = raw_bind; -+ raw->graw.func.unbind = raw_unbind; -+ raw->graw.func.set_alt = raw_set_alt; -+ raw->graw.func.disable = raw_disable; -+ -+ status = usb_add_function(c, &raw->graw.func); -+ if (status) -+ kfree(raw); -+ -+ return status; -+} -+ -+/** -+ * graw_setup - initialize character driver for one rx -+ * @g: gadget to associate with -+ * Contex: may sleep -+ * -+ * Returns negative errno or zero. -+ */ -+static int __init graw_setup(struct usb_gadget *g) -+{ -+ struct graw *graw; -+ -+ int status; -+ int major; -+ -+ dev_t dev; -+ -+ if (the_graw) -+ return -EBUSY; -+ -+ graw = kzalloc(sizeof(*graw), GFP_KERNEL); -+ if (!graw) { -+ status = -ENOMEM; -+ goto fail1; -+ } -+ -+ status = alloc_chrdev_region(&dev, 0, 1, "fraw"); -+ if (status) -+ goto fail2; -+ -+ major = MAJOR(dev); -+ -+ cdev_init(&graw->chdev, &fraw_fops); -+ graw->chdev.owner = THIS_MODULE; -+ graw->dev = dev; -+ graw->major = major; -+ -+ status = cdev_add(&graw->chdev, dev, 1); -+ if (status) -+ goto fail3; -+ -+ the_graw = graw; -+ -+ return 0; -+ -+fail3: -+ /* cdev_put(&graw->cdev); */ -+ unregister_chrdev_region(graw->dev, 1); -+ -+fail2: -+ kfree(graw); -+ -+fail1: -+ return status; -+} -+ -+static void __exit graw_cleanup(void) -+{ -+ struct graw *graw = the_graw; -+ -+ if (!graw) -+ return; -+ -+ cdev_del(&graw->chdev); -+ /* cdev_put(&graw->chdev); */ -+ unregister_chrdev_region(graw->dev, 1); -+ kfree(graw); -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/Kconfig linux-omap-2.6.28-nokia1/drivers/usb/gadget/Kconfig ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/Kconfig 2011-06-22 13:14:20.983067708 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/Kconfig 2011-06-22 13:19:33.033063272 +0200 -@@ -208,17 +208,6 @@ config USB_OMAP - default USB_GADGET - select USB_GADGET_SELECTED - --config USB_OTG -- boolean "OTG Support" -- depends on USB_GADGET_OMAP && ARCH_OMAP_OTG && USB_OHCI_HCD -- help -- The most notable feature of USB OTG is support for a -- "Dual-Role" device, which can act as either a device -- or a host. The initial role choice can be changed -- later, when two dual-role devices talk to each other. -- -- Select this only if your OMAP board has a Mini-AB connector. -- - config USB_GADGET_PXA25X - boolean "PXA 25x or IXP 4xx" - depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX -@@ -673,6 +662,26 @@ config USB_CDC_COMPOSITE - Say "y" to link the driver statically, or "m" to build a - dynamically linked module. - -+config USB_G_NOKIA -+ tristate "Nokia composite gadget" -+ depends on PHONET -+ help -+ The Nokia composite gadget provides support for acm, obex -+ and phonet in only one gadget driver. -+ -+ It's only really useful for RX51 hardware. If you're building -+ a kernel for RX51, say Y or M here. If unsure, say N. -+ -+config USB_G_SOFTUPD -+ tristate "Nokia Firmware Upgrade Composite Kernel Driver" -+ depends on PHONET -+ help -+ The Nokia upgrade gadget provides support for Firmware upgrading -+ using the userland application softupd. -+ -+ It's only really useful for RX51 hardware. If you're building -+ a kernel for RX51, say Y or M here. If unsure, say N. -+ - # put drivers that need isochronous transfer support (for audio - # or video class gadget drivers), or specific hardware, here. - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/Makefile linux-omap-2.6.28-nokia1/drivers/usb/gadget/Makefile ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/Makefile 2011-06-22 13:14:20.983067708 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/Makefile 2011-06-22 13:19:33.033063272 +0200 -@@ -31,6 +31,8 @@ gadgetfs-objs := inode.o - g_file_storage-objs := file_storage.o - g_printer-objs := printer.o - g_cdc-objs := cdc2.o -+g_nokia-objs += nokia.o -+g_softupd-objs += softupd.o - - obj-$(CONFIG_USB_ZERO) += g_zero.o - obj-$(CONFIG_USB_ETH) += g_ether.o -@@ -40,4 +42,6 @@ obj-$(CONFIG_USB_G_SERIAL) += g_serial.o - obj-$(CONFIG_USB_G_PRINTER) += g_printer.o - obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o - obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o -+obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o -+obj-$(CONFIG_USB_G_SOFTUPD) += g_softupd.o - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/nokia.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/nokia.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/nokia.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/nokia.c 2011-06-22 13:19:33.033063272 +0200 -@@ -0,0 +1,263 @@ -+/* -+ * nokia.c -- Nokia Composite Gadget Driver -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Contact: Felipe Balbi -+ * -+ * This gadget driver borrows from serial.c which is: -+ * -+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) -+ * Copyright (C) 2008 by David Brownell -+ * Copyright (C) 2008 by Nokia Corporation -+ * -+ * This software is distributed under the terms of the GNU General -+ * Public License ("GPL") as published by the Free Software Foundation, -+ * version 2 of that License. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "u_serial.h" -+#include "u_ether.h" -+#include "u_phonet.h" -+#include "gadget_chips.h" -+ -+#include -+ -+/* Defines */ -+ -+#define NOKIA_VERSION_NUM 0x0211 -+#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)" -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* -+ * Kbuild is not very cooperative with respect to linking separately -+ * compiled library objects into one module. So for now we won't use -+ * separate compilation ... ensuring init/exit sections work to shrink -+ * the runtime footprint, and giving us at least some parts of what -+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would. -+ */ -+#include "composite.c" -+#include "usbstring.c" -+#include "config.c" -+#include "epautoconf.c" -+ -+#include "f_acm.c" -+#include "f_ecm.c" -+#include "f_obex.c" -+#include "f_serial.c" -+#include "f_phonet.c" -+#include "u_serial.c" -+#include "u_ether.c" -+ -+/*-------------------------------------------------------------------------*/ -+ -+#define NOKIA_VENDOR_ID 0x0421 /* Nokia */ -+#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */ -+ -+/* string IDs are assigned dynamically */ -+ -+#define STRING_MANUFACTURER_IDX 0 -+#define STRING_PRODUCT_IDX 1 -+#define STRING_DESCRIPTION_IDX 2 -+ -+static char manufacturer_nokia[] = "Nokia"; -+static const char product_nokia[] = NOKIA_LONG_NAME; -+static const char description_nokia[] = "PC-Suite Configuration"; -+ -+static struct usb_string strings_dev[] = { -+ [STRING_MANUFACTURER_IDX].s = manufacturer_nokia, -+ [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME, -+ [STRING_DESCRIPTION_IDX].s = description_nokia, -+ { } /* end of list */ -+}; -+ -+static struct usb_gadget_strings stringtab_dev = { -+ .language = 0x0409, /* en-us */ -+ .strings = strings_dev, -+}; -+ -+static struct usb_gadget_strings *dev_strings[] = { -+ &stringtab_dev, -+ NULL, -+}; -+ -+static struct usb_device_descriptor device_desc = { -+ .bLength = USB_DT_DEVICE_SIZE, -+ .bDescriptorType = USB_DT_DEVICE, -+ .bcdUSB = __constant_cpu_to_le16(0x0200), -+ .bDeviceClass = USB_CLASS_COMM, -+ .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID), -+ .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID), -+ /* .iManufacturer = DYNAMIC */ -+ /* .iProduct = DYNAMIC */ -+ .bNumConfigurations = 1, -+}; -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* Module */ -+MODULE_DESCRIPTION("Nokia composite gadget driver for N900"); -+MODULE_AUTHOR("Felipe Balbi"); -+MODULE_LICENSE("GPL"); -+ -+/*-------------------------------------------------------------------------*/ -+ -+static u8 hostaddr[ETH_ALEN]; -+ -+static int __init nokia_bind_config(struct usb_configuration *c) -+{ -+ int status = 0; -+ -+ status = phonet_bind_config(c); -+ if (status) -+ printk(KERN_DEBUG "could not bind phonet config\n"); -+ -+ status = obex_bind_config(c, 0); -+ if (status) -+ printk(KERN_DEBUG "could not bind obex config %d\n", 0); -+ -+ status = obex_bind_config(c, 1); -+ if (status) -+ printk(KERN_DEBUG "could not bind obex config %d\n", 0); -+ -+ status = acm_bind_config(c, 2); -+ if (status) -+ printk(KERN_DEBUG "could not bind acm config\n"); -+ -+ status = ecm_bind_config(c, hostaddr); -+ if (status) -+ printk(KERN_DEBUG "could not bind ecm config\n"); -+ -+ return status; -+} -+ -+static struct usb_configuration nokia_config_500ma_driver = { -+ .label = "nokia1", -+ .bind = nokia_bind_config, -+ .bConfigurationValue = 1, -+ /* .iConfiguration = DYNAMIC */ -+ .bmAttributes = USB_CONFIG_ATT_ONE, -+ .bMaxPower = 250, /* 500mA */ -+}; -+ -+static struct usb_configuration nokia_config_100ma_driver = { -+ .label = "nokia2", -+ .bind = nokia_bind_config, -+ .bConfigurationValue = 2, -+ /* .iConfiguration = DYNAMIC */ -+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, -+ .bMaxPower = 50, /* 100 mA */ -+}; -+ -+static int __init nokia_bind(struct usb_composite_dev *cdev) -+{ -+ int gcnum; -+ struct usb_gadget *gadget = cdev->gadget; -+ int status; -+ -+ status = gphonet_setup(cdev->gadget); -+ if (status < 0) -+ goto err_phonet; -+ -+ status = gserial_setup(cdev->gadget, 3); -+ if (status < 0) -+ goto err_serial; -+ -+ status = gether_setup(cdev->gadget, hostaddr); -+ if (status < 0) -+ goto err_ether; -+ -+ status = usb_string_id(cdev); -+ if (status < 0) -+ goto err_usb; -+ strings_dev[STRING_MANUFACTURER_IDX].id = status; -+ -+ device_desc.iManufacturer = status; -+ -+ status = usb_string_id(cdev); -+ if (status < 0) -+ goto err_usb; -+ strings_dev[STRING_PRODUCT_IDX].id = status; -+ -+ device_desc.iProduct = status; -+ -+ /* config description */ -+ status = usb_string_id(cdev); -+ if (status < 0) -+ goto err_usb; -+ strings_dev[STRING_DESCRIPTION_IDX].id = status; -+ -+ nokia_config_500ma_driver.iConfiguration = status; -+ nokia_config_100ma_driver.iConfiguration = status; -+ -+ /* set up other descriptors */ -+ gcnum = usb_gadget_controller_number(gadget); -+ if (gcnum >= 0) -+ device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM); -+ else { -+ /* this should only work with hw that supports altsettings -+ * and several endpoints, anything else, panic. -+ */ -+ pr_err("nokia_bind: controller '%s' not recognized\n", -+ gadget->name); -+ goto err_usb; -+ } -+ -+ /* finaly register the configuration */ -+ status = usb_add_config(cdev, &nokia_config_500ma_driver); -+ if (status < 0) -+ goto err_usb; -+ -+ status = usb_add_config(cdev, &nokia_config_100ma_driver); -+ if (status < 0) -+ goto err_usb; -+ -+ INFO(cdev, "%s\n", NOKIA_LONG_NAME); -+ -+ return 0; -+ -+err_usb: -+ gether_cleanup(); -+err_ether: -+ gserial_cleanup(); -+err_serial: -+ gphonet_cleanup(); -+err_phonet: -+ return status; -+} -+ -+static int __exit nokia_unbind(struct usb_composite_dev *cdev) -+{ -+ gphonet_cleanup(); -+ gserial_cleanup(); -+ gether_cleanup(); -+ -+ return 0; -+} -+ -+static struct usb_composite_driver nokia_driver = { -+ .name = "g_nokia", -+ .dev = &device_desc, -+ .strings = dev_strings, -+ .bind = nokia_bind, -+ .unbind = __exit_p(nokia_unbind), -+}; -+ -+static int __init nokia_init(void) -+{ -+ return usb_composite_register(&nokia_driver); -+} -+module_init(nokia_init); -+ -+static void __exit nokia_cleanup(void) -+{ -+ usb_composite_unregister(&nokia_driver); -+} -+module_exit(nokia_cleanup); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/softupd.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/softupd.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/softupd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/softupd.c 2011-06-22 13:19:33.033063272 +0200 -@@ -0,0 +1,243 @@ -+/* -+ * softupd.c -- Nokia Software Update Gadget -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Contact: Felipe Balbi -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "gadget_chips.h" -+ -+/* Defines */ -+ -+#define NOKIA_VERSION_NUM 0x0100 -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* -+ * Kbuild is not very cooperative with respect to linking separately -+ * compiled library objects into one module. So for now we won't use -+ * separate compilation ... ensuring init/exit sections work to shrink -+ * the runtime footprint, and giving us at least some parts of what -+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would. -+ */ -+#include "composite.c" -+#include "usbstring.c" -+#include "config.c" -+#include "epautoconf.c" -+ -+#include "u_phonet.h" -+ -+#include "f_raw.c" -+#include "f_phonet.c" -+ -+/*-------------------------------------------------------------------------*/ -+ -+#define NOKIA_VENDOR_ID 0x0421 /* Nokia */ -+#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Update Gadget */ -+ -+/* string IDs are assigned dynamically */ -+ -+#define STRING_MANUFACTURER_IDX 0 -+#define STRING_PRODUCT_IDX 1 -+#define STRING_DESCRIPTION_IDX 2 -+#define STRING_SERIAL_IDX 3 -+ -+static char manufacturer_nokia[] = "Nokia"; -+static const char product_nokia[] = "N900 (PC-Suite Mode)"; -+static const char description_nokia[] = "Firmware Upgrade Configuration"; -+ -+static struct usb_string strings_dev[] = { -+ [STRING_MANUFACTURER_IDX].s = manufacturer_nokia, -+ [STRING_PRODUCT_IDX].s = product_nokia, -+ [STRING_DESCRIPTION_IDX].s = description_nokia, -+ [STRING_SERIAL_IDX].s = "", -+ { } /* end of list */ -+}; -+ -+static struct usb_gadget_strings stringtab_dev = { -+ .language = 0x0409, /* en-us */ -+ .strings = strings_dev, -+}; -+ -+static struct usb_gadget_strings *dev_strings[] = { -+ &stringtab_dev, -+ NULL, -+}; -+ -+static struct usb_device_descriptor device_desc = { -+ .bLength = USB_DT_DEVICE_SIZE, -+ .bDescriptorType = USB_DT_DEVICE, -+ -+ .bcdUSB = __constant_cpu_to_le16(0x0200), -+ -+ .bDeviceClass = USB_CLASS_COMM, -+ .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID), -+ .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID), -+ -+ /* .iManufacturer = DYNAMIC */ -+ /* .iProduct = DYNAMIC */ -+ /* .iSerialNumber = DYNAMIC */ -+ -+ .bNumConfigurations = 1, -+}; -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* Module */ -+MODULE_DESCRIPTION("Nokia Firmware Upgrade Gadget Driver"); -+MODULE_AUTHOR("Felipe Balbi"); -+MODULE_LICENSE("GPL"); -+ -+/*-------------------------------------------------------------------------*/ -+ -+static int __init softupd_bind_config(struct usb_configuration *c) -+{ -+ int status = 0; -+ -+ status = phonet_bind_config(c); -+ if (status) { -+ struct usb_composite_dev *cdev = c->cdev; -+ -+ dev_err(&cdev->gadget->dev, "could not bind phonet config\n"); -+ } -+ -+ status = raw_bind_config(c); -+ if (status) -+ dev_err(&c->cdev->gadget->dev, "could not bind raw config\n"); -+ -+ return status; -+} -+ -+static struct usb_configuration softupd_config_driver = { -+ .label = "softupd", -+ .bind = softupd_bind_config, -+ -+ .bmAttributes = USB_CONFIG_ATT_ONE, -+ .bMaxPower = 250, -+ -+ /* .iConfiguration = DYNAMIC */ -+ .bConfigurationValue = 1, -+}; -+ -+static int __init softupd_bind(struct usb_composite_dev *cdev) -+{ -+ struct usb_gadget *gadget = cdev->gadget; -+ int status; -+ int gcnum; -+ -+ status = gphonet_setup(cdev->gadget); -+ if (status < 0) -+ goto err_phonet; -+ -+ status = graw_setup(cdev->gadget); -+ if (status < 0) -+ goto err_raw; -+ -+ status = usb_string_id(cdev); -+ if (status < 0) -+ goto err_usb; -+ strings_dev[STRING_MANUFACTURER_IDX].id = status; -+ -+ device_desc.iManufacturer = status; -+ -+ status = usb_string_id(cdev); -+ if (status < 0) -+ goto err_usb; -+ strings_dev[STRING_PRODUCT_IDX].id = status; -+ -+ device_desc.iProduct = status; -+ -+ status = usb_string_id(cdev); -+ if (status < 0) -+ goto err_usb; -+ strings_dev[STRING_SERIAL_IDX].id = status; -+ -+ device_desc.iSerialNumber = status; -+ -+ /* config description */ -+ status = usb_string_id(cdev); -+ if (status < 0) -+ goto err_usb; -+ strings_dev[STRING_DESCRIPTION_IDX].id = status; -+ -+ softupd_config_driver.iConfiguration = status; -+ -+ /* set up other descriptors */ -+ gcnum = usb_gadget_controller_number(gadget); -+ if (gcnum >= 0) -+ device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM); -+ else { -+ /* this should only work with hw that supports altsettings -+ * and several endpoints, anything else, panic. -+ */ -+ pr_err("%s: controller '%s' not recognized\n", -+ __func__, gadget->name); -+ goto err_usb; -+ } -+ -+ /* finaly register the configuration */ -+ status = usb_add_config(cdev, &softupd_config_driver); -+ if (status < 0) -+ goto err_usb; -+ -+ INFO(cdev, "%s\n", product_nokia); -+ -+ return 0; -+ -+err_usb: -+ graw_cleanup(); -+ -+err_raw: -+ gphonet_cleanup(); -+ -+err_phonet: -+ return status; -+} -+ -+static int softupd_unbind(struct usb_composite_dev *cdev) -+{ -+ graw_cleanup(); -+ gphonet_cleanup(); -+ -+ return 0; -+} -+ -+static struct usb_composite_driver softupd_driver = { -+ .name = "g_softupd", -+ .dev = &device_desc, -+ .strings = dev_strings, -+ .bind = softupd_bind, -+ .unbind = softupd_unbind, -+}; -+ -+static int __init softupd_init(void) -+{ -+ return usb_composite_register(&softupd_driver); -+} -+module_init(softupd_init); -+ -+static void __exit softupd_cleanup(void) -+{ -+ usb_composite_unregister(&softupd_driver); -+} -+module_exit(softupd_cleanup); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/u_ether.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/u_ether.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/u_ether.c 2011-06-22 13:14:21.023067707 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/u_ether.c 2011-06-22 13:19:33.033063272 +0200 -@@ -116,7 +116,6 @@ static inline int qlen(struct usb_gadget - #undef DBG - #undef VDBG - #undef ERROR --#undef INFO - - #define xprintk(d, level, fmt, args...) \ - printk(level "%s: " fmt , (d)->net->name , ## args) -@@ -139,7 +138,7 @@ static inline int qlen(struct usb_gadget - - #define ERROR(dev, fmt, args...) \ - xprintk(dev , KERN_ERR , fmt , ## args) --#define INFO(dev, fmt, args...) \ -+#define ETH_INFO(dev, fmt, args...) \ - xprintk(dev , KERN_INFO , fmt , ## args) - - /*-------------------------------------------------------------------------*/ -@@ -789,8 +788,8 @@ int __init gether_setup(struct usb_gadge - } else { - DECLARE_MAC_BUF(tmp); - -- INFO(dev, "MAC %s\n", print_mac(tmp, net->dev_addr)); -- INFO(dev, "HOST MAC %s\n", print_mac(tmp, dev->host_mac)); -+ ETH_INFO(dev, "MAC %s\n", print_mac(tmp, net->dev_addr)); -+ ETH_INFO(dev, "HOST MAC %s\n", print_mac(tmp, dev->host_mac)); - - the_dev = dev; - } -@@ -809,6 +808,8 @@ void gether_cleanup(void) - if (!the_dev) - return; - -+ netif_stop_queue(the_dev->net); -+ netif_carrier_off(the_dev->net); - unregister_netdev(the_dev->net); - free_netdev(the_dev->net); - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/u_phonet.h linux-omap-2.6.28-nokia1/drivers/usb/gadget/u_phonet.h ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/u_phonet.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/u_phonet.h 2011-06-22 13:19:33.033063272 +0200 -@@ -0,0 +1,21 @@ -+/* -+ * u_phonet.h - interface to Phonet -+ * -+ * Copyright (C) 2007-2008 by Nokia Corporation -+ * -+ * This software is distributed under the terms of the GNU General -+ * Public License ("GPL") as published by the Free Software Foundation, -+ * either version 2 of that License or (at your option) any later version. -+ */ -+ -+#ifndef __U_PHONET_H -+#define __U_PHONET_H -+ -+#include -+#include -+ -+int gphonet_setup(struct usb_gadget *gadget); -+int phonet_bind_config(struct usb_configuration *c); -+void gphonet_cleanup(void); -+ -+#endif /* __U_PHONET_H */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/gadget/u_serial.c linux-omap-2.6.28-nokia1/drivers/usb/gadget/u_serial.c ---- linux-omap-2.6.28-omap1/drivers/usb/gadget/u_serial.c 2011-06-22 13:14:21.023067707 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/gadget/u_serial.c 2011-06-22 13:19:33.033063272 +0200 -@@ -81,6 +81,7 @@ - /* circular buffer */ - struct gs_buf { - unsigned buf_size; -+ unsigned buf_full; - char *buf_buf; - char *buf_get; - char *buf_put; -@@ -110,6 +111,7 @@ struct gs_port { - struct list_head write_pool; - struct gs_buf port_write_buf; - wait_queue_head_t drain_wait; /* wait while writes drain */ -+ wait_queue_head_t full_wait; /* wait while buffer is full */ - - /* REVISIT this state ... */ - struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ -@@ -124,6 +126,7 @@ static struct portmaster { - static unsigned n_ports; - - #define GS_CLOSE_TIMEOUT 15 /* seconds */ -+#define GS_FULL_TIMEOUT 2 /* seconds */ - - - -@@ -151,6 +154,7 @@ static int gs_buf_alloc(struct gs_buf *g - return -ENOMEM; - - gb->buf_size = size; -+ gb->buf_full = false; - gb->buf_put = gb->buf_buf; - gb->buf_get = gb->buf_buf; - -@@ -166,6 +170,7 @@ static void gs_buf_free(struct gs_buf *g - { - kfree(gb->buf_buf); - gb->buf_buf = NULL; -+ gb->buf_full = false; - } - - /* -@@ -176,6 +181,7 @@ static void gs_buf_free(struct gs_buf *g - static void gs_buf_clear(struct gs_buf *gb) - { - gb->buf_get = gb->buf_put; -+ gb->buf_full = false; - /* equivalent to a get of all data available */ - } - -@@ -187,6 +193,9 @@ static void gs_buf_clear(struct gs_buf * - */ - static unsigned gs_buf_data_avail(struct gs_buf *gb) - { -+ if (gb->buf_full) -+ return gb->buf_size; -+ - return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size; - } - -@@ -198,7 +207,13 @@ static unsigned gs_buf_data_avail(struct - */ - static unsigned gs_buf_space_avail(struct gs_buf *gb) - { -- return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size; -+ if (gb->buf_full) -+ return 0; -+ -+ if (gb->buf_get == gb->buf_put) -+ return gb->buf_size; -+ -+ return (gb->buf_size + gb->buf_get - gb->buf_put) % gb->buf_size; - } - - /* -@@ -215,8 +230,10 @@ gs_buf_put(struct gs_buf *gb, const char - unsigned len; - - len = gs_buf_space_avail(gb); -- if (count > len) -+ if (count >= len) { - count = len; -+ gb->buf_full = true; -+ } - - if (count == 0) - return 0; -@@ -269,6 +286,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, - else /* count == len */ - gb->buf_get = gb->buf_buf; - } -+ gb->buf_full = false; - - return count; - } -@@ -325,13 +343,10 @@ void gs_free_req(struct usb_ep *ep, stru - static unsigned - gs_send_packet(struct gs_port *port, char *packet, unsigned size) - { -- unsigned len; - -- len = gs_buf_data_avail(&port->port_write_buf); -- if (len < size) -- size = len; -- if (size != 0) -- size = gs_buf_get(&port->port_write_buf, packet, size); -+ size = gs_buf_get(&port->port_write_buf, packet, size); -+ wake_up_interruptible(&port->full_wait); -+ - return size; - } - -@@ -371,6 +386,7 @@ __acquires(&port->port_lock) - - req->length = len; - list_del(&req->list); -+ req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0); - - pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", - port->port_num, len, *((u8 *)req->buf), -@@ -889,7 +905,17 @@ static int gs_write(struct tty_struct *t - pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n", - port->port_num, tty, count); - -+ if (port->port_write_buf.buf_full) -+ wait_event_interruptible_timeout(port->full_wait, -+ !port->port_write_buf.buf_full, -+ GS_FULL_TIMEOUT * HZ); -+ - spin_lock_irqsave(&port->port_lock, flags); -+ if (unlikely(port->port_write_buf.buf_buf == NULL)) { -+ spin_unlock_irqrestore(&port->port_lock, flags); -+ return 0; -+ } -+ - if (count) - count = gs_buf_put(&port->port_write_buf, buf, count); - /* treat count == 0 as flush_chars() */ -@@ -1026,6 +1052,7 @@ gs_port_alloc(unsigned port_num, struct - spin_lock_init(&port->port_lock); - init_waitqueue_head(&port->close_wait); - init_waitqueue_head(&port->drain_wait); -+ init_waitqueue_head(&port->full_wait); - - tasklet_init(&port->push, gs_rx_push, (unsigned long) port); - -@@ -1326,5 +1353,6 @@ void gserial_disconnect(struct gserial * - gs_free_requests(gser->out, &port->read_pool); - gs_free_requests(gser->out, &port->read_queue); - gs_free_requests(gser->in, &port->write_pool); -+ wake_up_interruptible(&port->full_wait); - spin_unlock_irqrestore(&port->port_lock, flags); - } -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/blackfin.c linux-omap-2.6.28-nokia1/drivers/usb/musb/blackfin.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/blackfin.c 2011-06-22 13:14:21.153067707 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/blackfin.c 2011-06-22 13:19:33.083063271 +0200 -@@ -143,7 +143,7 @@ static void musb_conn_timer_handler(unsi - u16 val; - - spin_lock_irqsave(&musb->lock, flags); -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_IDLE: - case OTG_STATE_A_WAIT_BCON: - /* Start a new session */ -@@ -154,7 +154,7 @@ static void musb_conn_timer_handler(unsi - val = musb_readw(musb->mregs, MUSB_DEVCTL); - if (!(val & MUSB_DEVCTL_BDEVICE)) { - gpio_set_value(musb->config->gpio_vrsel, 1); -- musb->xceiv.state = OTG_STATE_A_WAIT_BCON; -+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON; - } else { - gpio_set_value(musb->config->gpio_vrsel, 0); - -@@ -232,6 +232,7 @@ void musb_platform_set_mode(struct musb - - int __init musb_platform_init(struct musb *musb) - { -+ struct otg_transceiver xceiv; - - /* - * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE -@@ -285,15 +286,23 @@ int __init musb_platform_init(struct mus - EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); - SSYNC(); - -+ memset(&xceiv, 0, sizeof(xceiv)); -+ xceiv.label = "blackfin"; -+ xceiv.dev = musb->controller; -+ - if (is_host_enabled(musb)) { - musb->board_set_vbus = bfin_set_vbus; - setup_timer(&musb_conn_timer, - musb_conn_timer_handler, (unsigned long) musb); - } - if (is_peripheral_enabled(musb)) -- musb->xceiv.set_power = bfin_set_power; -+ xceiv.set_power = bfin_set_power; -+ -+ otg_set_transceiver(&xceiv); -+ musb->xceiv = &xceiv; - - musb->isr = blackfin_interrupt; -+ musb->suspendm = true; - - return 0; - } -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/davinci.c linux-omap-2.6.28-nokia1/drivers/usb/musb/davinci.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/davinci.c 2011-06-22 13:14:21.163067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/davinci.c 2011-06-22 13:19:33.083063271 +0200 -@@ -32,9 +32,9 @@ - #include - #include - --#include --#include --#include -+#include -+#include -+#include - #include - - #include "musb_core.h" -@@ -199,7 +199,7 @@ static void otg_timer(unsigned long _mus - DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); - - spin_lock_irqsave(&musb->lock, flags); -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_WAIT_VFALL: - /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL - * seems to mis-handle session "start" otherwise (or in our -@@ -210,7 +210,7 @@ static void otg_timer(unsigned long _mus - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - break; - } -- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; -+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, - MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); - break; -@@ -235,7 +235,7 @@ static void otg_timer(unsigned long _mus - if (devctl & MUSB_DEVCTL_BDEVICE) - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - else -- musb->xceiv.state = OTG_STATE_A_IDLE; -+ musb->xceiv->state = OTG_STATE_A_IDLE; - break; - default: - break; -@@ -315,21 +315,21 @@ static irqreturn_t davinci_interrupt(int - * to stop registering in devctl. - */ - musb->int_usb &= ~MUSB_INTR_VBUSERROR; -- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; -+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - WARNING("VBUS error workaround (delay coming)\n"); - } else if (is_host_enabled(musb) && drvvbus) { - musb->is_active = 1; - MUSB_HST_MODE(musb); -- musb->xceiv.default_a = 1; -- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; -+ musb->xceiv->default_a = 1; -+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; - portstate(musb->port1_status |= USB_PORT_STAT_POWER); - del_timer(&otg_workaround); - } else { - musb->is_active = 0; - MUSB_DEV_MODE(musb); -- musb->xceiv.default_a = 0; -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->default_a = 0; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); - } - -@@ -351,7 +351,7 @@ static irqreturn_t davinci_interrupt(int - - /* poll for ID change */ - if (is_otg_enabled(musb) -- && musb->xceiv.state == OTG_STATE_B_IDLE) -+ && musb->xceiv->state == OTG_STATE_B_IDLE) - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - - spin_unlock_irqrestore(&musb->lock, flags); -@@ -378,6 +378,7 @@ int musb_platform_set_mode(struct musb * - - int __init musb_platform_init(struct musb *musb) - { -+ struct otg_transceiver xceiv; - void __iomem *tibase = musb->ctrl_base; - u32 revision; - -@@ -395,6 +396,12 @@ int __init musb_platform_init(struct mus - return -ENODEV; - #endif - -+ memset(&xceiv, 0, sizeof(xceiv)); -+ xceiv.label = "davinci"; -+ xceiv.dev = musb->controller; -+ otg_set_transceiver(&xceiv); -+ musb->xceiv = &xceiv; -+ - /* returns zero if e.g. not clocked */ - revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); - if (revision == 0) -@@ -421,6 +428,8 @@ int __init musb_platform_init(struct mus - musb_readb(tibase, DAVINCI_USB_CTRL_REG)); - - musb->isr = davinci_interrupt; -+ musb->suspendm = true; -+ - return 0; - } - -@@ -432,7 +441,7 @@ int musb_platform_exit(struct musb *musb - davinci_source_power(musb, 0 /*off*/, 1); - - /* delay, to avoid problems with module reload */ -- if (is_host_enabled(musb) && musb->xceiv.default_a) { -+ if (is_host_enabled(musb) && musb->xceiv->default_a) { - int maxdelay = 30; - u8 devctl, warn = 0; - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/isp1704.h linux-omap-2.6.28-nokia1/drivers/usb/musb/isp1704.h ---- linux-omap-2.6.28-omap1/drivers/usb/musb/isp1704.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/isp1704.h 2011-06-22 13:19:33.083063271 +0200 -@@ -0,0 +1,81 @@ -+/* -+ * isp1704.h - ISP 1704 Register -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED -+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN -+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ */ -+ -+#ifndef __ISP1704_H__ -+#define __ISP1704_H__ -+ -+#define ISP1704_VENDOR_ID_LOW 0x00 -+#define ISP1704_VENDOR_ID_HIGH 0x01 -+#define ISP1704_PRODUCT_ID_LOW 0x02 -+#define ISP1704_PRODUCT_ID_HIGH 0x03 -+#define ISP1704_FUNC_CTRL 0x04 -+#define ISP1704_OTG_CTRL 0x0a -+#define ISP1704_USB_INTRISE 0x0d -+#define ISP1704_USB_INTFALL 0x10 -+#define ISP1704_DEBUG 0x15 -+#define ISP1704_SCRATCH 0x16 -+#define ISP1704_PWR_CTRL 0x3d -+ -+/* Function control */ -+#define ISP1704_FUNC_CTRL_FULL_SPEED (1 << 0) -+#define ISP1704_FUNC_CTRL_XCVRSELECT 0x3 -+#define ISP1704_FUNC_CTRL_XCVRSELECT_SHIFT (1 << 0) -+#define ISP1704_FUNC_CTRL_TERMSELECT (1 << 2) -+#define ISP1704_FUNC_CTRL_OPMODE (1 << 3) -+#define ISP1704_FUNC_CTRL_OPMODE_SHIFT 3 -+#define ISP1704_FUNC_CTRL_RESET (1 << 5) -+#define ISP1704_FUNC_CTRL_SUSPENDM (1 << 6) -+ -+/* OTG Control */ -+#define ISP1704_OTG_CTRL_IDPULLUP (1 << 0) -+#define ISP1704_OTG_CTRL_DP_PULLDOWN (1 << 1) -+#define ISP1704_OTG_CTRL_DM_PULLDOWN (1 << 2) -+#define ISP1704_OTG_CTRL_DISCHRG_VBUS (1 << 3) -+#define ISP1704_OTG_CTRL_CHRG_VBUS (1 << 4) -+#define ISP1704_OTG_CTRL_DRV_VBUS_EXT (1 << 6) -+#define ISP1704_OTG_CTRL_USB_EXT_VBUS (1 << 7) -+ -+/* Debug */ -+#define ISP1704_DEBUG_LINESTATE0 (1 << 0) -+#define ISP1704_DEBUG_LINESTATE1 (1 << 1) -+ -+/* Power control */ -+#define ISP1704_PWR_CTRL_SWCTRL (1 << 0) -+#define ISP1704_PWR_CTRL_DET_COMP (1 << 1) -+#define ISP1704_PWR_CTRL_BVALID_RISE (1 << 2) -+#define ISP1704_PWR_CTRL_BVALID_FALL (1 << 3) -+#define ISP1704_PWR_CTRL_DP_WKPU_EN (1 << 4) -+#define ISP1704_PWR_CTRL_VDAT_DET (1 << 5) -+#define ISP1704_PWR_CTRL_DPVSRC_EN (1 << 6) -+#define ISP1704_PWR_CTRL_HWDETECT (1 << 7) -+ -+#endif /* __ISP1704_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/Kconfig linux-omap-2.6.28-nokia1/drivers/usb/musb/Kconfig ---- linux-omap-2.6.28-omap1/drivers/usb/musb/Kconfig 2011-06-22 13:14:21.153067707 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/Kconfig 2011-06-22 13:19:33.083063271 +0200 -@@ -11,6 +11,8 @@ config USB_MUSB_HDRC - depends on (USB || USB_GADGET) && HAVE_CLK - depends on !SUPERH - select TWL4030_USB if MACH_OMAP_3430SDP -+ select TWL4030_USB if MACH_NOKIA_RX51 -+ select USB_OTG_UTILS - tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' - help - Say Y here if your system has a dual role high speed USB -@@ -178,3 +180,11 @@ config USB_MUSB_DEBUG - This enables musb debugging. To set the logging level use the debug - module parameter. Starting at level 3, per-transfer (urb, usb_request, - packet, or dma transfer) tracing may kick in. -+ -+config MUSB_PROC_FS -+ depends on USB_MUSB_HDRC && PROC_FS -+ bool "Enabled old musb procfs interface" -+ default n -+ help -+ This enables the old musb procfs entry. Ideally this would move to a userland application -+ talking to musb via usbfs and issueing the correct control messages. -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/Makefile linux-omap-2.6.28-nokia1/drivers/usb/musb/Makefile ---- linux-omap-2.6.28-omap1/drivers/usb/musb/Makefile 2011-06-22 13:14:21.153067707 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/Makefile 2011-06-22 13:19:33.083063271 +0200 -@@ -74,4 +74,9 @@ endif - - ifeq ($(CONFIG_USB_MUSB_DEBUG),y) - EXTRA_CFLAGS += -DDEBUG -+ -+endif -+ -+ifeq ($(CONFIG_MUSB_PROC_FS),y) -+ musb_hdrc-objs += musb_procfs.o - endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_core.c linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_core.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_core.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_core.c 2011-06-22 13:19:33.083063271 +0200 -@@ -112,10 +112,19 @@ - #include "davinci.h" - #endif - -+static struct musb *the_musb; -+static struct musb_ctx ctx; - -+#ifndef CONFIG_MUSB_PIO_ONLY -+static int __initdata use_dma = 1; -+#else -+static int __initdata use_dma; -+#endif -+module_param(use_dma, bool, 0); -+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); - --unsigned musb_debug; --module_param(musb_debug, uint, S_IRUGO | S_IWUSR); -+unsigned musb_debug = 0; -+module_param_named(debug, musb_debug, uint, S_IRUGO | S_IWUSR); - MODULE_PARM_DESC(debug, "Debug message level. Default = 0"); - - #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" -@@ -133,6 +142,187 @@ MODULE_AUTHOR(DRIVER_AUTHOR); - MODULE_LICENSE("GPL"); - MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); - -+static inline int musb_verify_charger(void __iomem *addr) -+{ -+ u8 r, ret = 0; -+ -+ /* Reset the transceiver */ -+ r = musb_ulpi_readb(addr, ISP1704_FUNC_CTRL); -+ r |= ISP1704_FUNC_CTRL_RESET; -+ musb_ulpi_writeb(addr, ISP1704_FUNC_CTRL, r); -+ msleep(1); -+ -+ /* Set normal mode */ -+ r &= ~(ISP1704_FUNC_CTRL_RESET | (3 << ISP1704_FUNC_CTRL_OPMODE)); -+ musb_ulpi_writeb(addr, ISP1704_FUNC_CTRL, r); -+ -+ /* Clear the DP and DM pull-down bits */ -+ r = musb_ulpi_readb(addr, ISP1704_OTG_CTRL); -+ r &= ~(ISP1704_OTG_CTRL_DP_PULLDOWN | ISP1704_OTG_CTRL_DM_PULLDOWN); -+ musb_ulpi_writeb(addr, ISP1704_OTG_CTRL, r); -+ -+ /* Enable strong pull-up on DP (1.5K) and reset */ -+ r = musb_ulpi_readb(addr, ISP1704_FUNC_CTRL); -+ r |= ISP1704_FUNC_CTRL_TERMSELECT | ISP1704_FUNC_CTRL_RESET; -+ musb_ulpi_writeb(addr, ISP1704_FUNC_CTRL, r); -+ msleep(1); -+ -+ /* Read the line state */ -+ if (musb_ulpi_readb(addr, ISP1704_DEBUG)) { -+ /* Is it a charger or PS2 connection */ -+ -+ /* Enable weak pull-up resistor on DP */ -+ r = musb_ulpi_readb(addr, ISP1704_PWR_CTRL); -+ r |= ISP1704_PWR_CTRL_DP_WKPU_EN; -+ musb_ulpi_writeb(addr, ISP1704_PWR_CTRL, r); -+ -+ /* Disable strong pull-up on DP (1.5K) */ -+ r = musb_ulpi_readb(addr, ISP1704_FUNC_CTRL); -+ r &= ~ISP1704_FUNC_CTRL_TERMSELECT; -+ musb_ulpi_writeb(addr, ISP1704_FUNC_CTRL, r); -+ -+ /* Enable weak pull-down resistor on DM */ -+ r = musb_ulpi_readb(addr, ISP1704_OTG_CTRL); -+ r |= ISP1704_OTG_CTRL_DM_PULLDOWN; -+ musb_ulpi_writeb(addr, ISP1704_OTG_CTRL, r); -+ -+ /* It's a charger if the line states are clear */ -+ if (!(musb_ulpi_readb(addr, ISP1704_DEBUG))) -+ ret = 1; -+ -+ /* Disable weak pull-up resistor on DP */ -+ r = musb_ulpi_readb(addr, ISP1704_PWR_CTRL); -+ r &= ~ISP1704_PWR_CTRL_DP_WKPU_EN; -+ musb_ulpi_writeb(addr, ISP1704_PWR_CTRL, r); -+ } else { -+ ret = 1; -+ -+ /* Disable strong pull-up on DP (1.5K) */ -+ r = musb_ulpi_readb(addr, ISP1704_FUNC_CTRL); -+ r &= ~ISP1704_FUNC_CTRL_TERMSELECT; -+ musb_ulpi_writeb(addr, ISP1704_FUNC_CTRL, r); -+ } -+ -+ return ret; -+} -+ -+/* Bad connections with the charger may lead into the transceiver -+ * thinking that a device was just connected. We can wait for 5 ms to -+ * ensure that these cases will generate SUSPEND interrupt and not -+ * RESET. Reading and writing to the transceiver may still cause -+ * RESET interrupts. We mask out RESET/RESUME interrupts to -+ * recover from this. -+ */ -+static int check_charger; -+static int musb_charger_detect(struct musb *musb) -+{ -+ unsigned long timeout; -+ -+ u8 vdat = 0; -+ u8 r; -+ -+ msleep(5); -+ -+ /* Using ulpi with musb is quite tricky. The following code -+ * was written based on the ulpi application note. -+ * -+ * The order of reads and writes and quite important, don't -+ * change it unless you really know what you're doing -+ */ -+ -+ switch(musb->xceiv->state) { -+ case OTG_STATE_B_IDLE: -+ /* we always reset transceiver */ -+ check_charger = 1; -+ -+ /* HACK: ULPI tends to get stuck when booting with -+ * the cable connected -+ */ -+ r = musb_readb(musb->mregs, MUSB_DEVCTL); -+ if ((r & MUSB_DEVCTL_VBUS) -+ == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { -+ musb_save_ctx_and_suspend(&musb->g, 0); -+ musb_restore_ctx_and_resume(&musb->g); -+ if (musb->board && musb->board->set_pm_limits) -+ musb->board->set_pm_limits( -+ musb->controller, 1); -+ } -+ -+ /* disable RESET and RESUME interrupts */ -+ r = musb_readb(musb->mregs, MUSB_INTRUSBE); -+ r &= ~(MUSB_INTR_RESUME | MUSB_INTR_RESET); -+ musb_writeb(musb->mregs, MUSB_INTRUSBE, r); -+ -+ if (musb->board && musb->board->xceiv_reset) -+ musb->board->xceiv_reset(); -+ -+ /* then we resume to sync with controller */ -+ r = musb_readb(musb->mregs, MUSB_POWER); -+ musb_writeb(musb->mregs, MUSB_POWER, -+ r | MUSB_POWER_RESUME); -+ msleep(10); -+ musb_writeb(musb->mregs, MUSB_POWER, -+ r & ~MUSB_POWER_RESUME); -+ -+ /* now we set SW control bit in PWR_CTRL register */ -+ musb_ulpi_writeb(musb->mregs, ISP1704_PWR_CTRL, -+ ISP1704_PWR_CTRL_SWCTRL); -+ -+ r = musb_ulpi_readb(musb->mregs, ISP1704_PWR_CTRL); -+ r |= (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN); -+ -+ /* and finally enable manual charger detection */ -+ musb_ulpi_writeb(musb->mregs, ISP1704_PWR_CTRL, r); -+ msleep(10); -+ -+ timeout = jiffies + msecs_to_jiffies(300); -+ while (!time_after(jiffies, timeout)) { -+ /* Check if there is a charger */ -+ vdat = !!(musb_ulpi_readb(musb->mregs, ISP1704_PWR_CTRL) -+ & ISP1704_PWR_CTRL_VDAT_DET); -+ if (vdat) -+ break; -+ } -+ if (vdat) -+ vdat = musb_verify_charger(musb->mregs); -+ -+ r &= ~ISP1704_PWR_CTRL_DPVSRC_EN; -+ -+ /* Clear DPVSRC_EN, otherwise usb communication doesn't work */ -+ musb_ulpi_writeb(musb->mregs, ISP1704_PWR_CTRL, r); -+ break; -+ -+ default: -+ vdat = 0; -+ break; -+ } -+ -+ if (vdat) { -+ /* REVISIT: This code works only with dedicated chargers! -+ * When support for HOST/HUB chargers is added, don't -+ * forget this. -+ */ -+ musb_stop(musb); -+ /* Regulators off */ -+ otg_set_suspend(musb->xceiv, 1); -+ musb->is_charger = 1; -+ } else { -+ /* enable interrupts */ -+ musb_writeb(musb->mregs, MUSB_INTRUSBE, ctx.intrusbe); -+ -+ /* Make sure the communication starts normally */ -+ r = musb_readb(musb->mregs, MUSB_POWER); -+ musb_writeb(musb->mregs, MUSB_POWER, -+ r | MUSB_POWER_RESUME); -+ msleep(10); -+ musb_writeb(musb->mregs, MUSB_POWER, -+ r & ~MUSB_POWER_RESUME); -+ } -+ -+ check_charger = 0; -+ -+ return vdat; -+} - - /*-------------------------------------------------------------------------*/ - -@@ -267,7 +457,7 @@ void musb_load_testpacket(struct musb *m - - const char *otg_state_string(struct musb *musb) - { -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_IDLE: return "a_idle"; - case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; - case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; -@@ -302,11 +492,11 @@ void musb_otg_timer_func(unsigned long d - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_B_WAIT_ACON: - DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n"); - musb_g_disconnect(musb); -- musb->xceiv.state = OTG_STATE_B_PERIPHERAL; -+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - musb->is_active = 0; - break; - case OTG_STATE_A_WAIT_BCON: -@@ -331,20 +521,20 @@ void musb_hnp_stop(struct musb *musb) - void __iomem *mbase = musb->mregs; - u8 reg; - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_PERIPHERAL: - case OTG_STATE_A_WAIT_VFALL: - case OTG_STATE_A_WAIT_BCON: - DBG(1, "HNP: Switching back to A-host\n"); - musb_g_disconnect(musb); -- musb->xceiv.state = OTG_STATE_A_IDLE; -+ musb->xceiv->state = OTG_STATE_A_IDLE; - MUSB_HST_MODE(musb); - musb->is_active = 0; - break; - case OTG_STATE_B_HOST: - DBG(1, "HNP: Disabling HR\n"); - hcd->self.is_b_host = 0; -- musb->xceiv.state = OTG_STATE_B_PERIPHERAL; -+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - MUSB_DEV_MODE(musb); - reg = musb_readb(mbase, MUSB_POWER); - reg |= MUSB_POWER_SUSPENDM; -@@ -379,15 +569,12 @@ void musb_hnp_stop(struct musb *musb) - * @param power - */ - --#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \ -- | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \ -- | MUSB_INTR_RESET) -- - static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, - u8 devctl, u8 power) - { - irqreturn_t handled = IRQ_NONE; - void __iomem *mbase = musb->mregs; -+ u8 r; - - DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, - int_usb); -@@ -402,7 +589,7 @@ static irqreturn_t musb_stage0_irq(struc - - if (devctl & MUSB_DEVCTL_HM) { - #ifdef CONFIG_USB_MUSB_HDRC_HCD -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_SUSPEND: - /* remote wakeup? later, GetPortStatus - * will stop RESUME signaling -@@ -425,12 +612,12 @@ static irqreturn_t musb_stage0_irq(struc - musb->rh_timer = jiffies - + msecs_to_jiffies(20); - -- musb->xceiv.state = OTG_STATE_A_HOST; -+ musb->xceiv->state = OTG_STATE_A_HOST; - musb->is_active = 1; - usb_hcd_resume_root_hub(musb_to_hcd(musb)); - break; - case OTG_STATE_B_WAIT_ACON: -- musb->xceiv.state = OTG_STATE_B_PERIPHERAL; -+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - musb->is_active = 1; - MUSB_DEV_MODE(musb); - break; -@@ -441,11 +628,11 @@ static irqreturn_t musb_stage0_irq(struc - } - #endif - } else { -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - #ifdef CONFIG_USB_MUSB_HDRC_HCD - case OTG_STATE_A_SUSPEND: - /* possibly DISCONNECT is upcoming */ -- musb->xceiv.state = OTG_STATE_A_HOST; -+ musb->xceiv->state = OTG_STATE_A_HOST; - usb_hcd_resume_root_hub(musb_to_hcd(musb)); - break; - #endif -@@ -487,12 +674,19 @@ static irqreturn_t musb_stage0_irq(struc - * - go through A_WAIT_VRISE - * - ... to A_WAIT_BCON. - * a_wait_vrise_tmout triggers VBUS_ERROR transitions -+ * NOTE : Spurious SESS_REQ int's detected, which should -+ * be discarded silently. - */ -- musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); -- musb->ep0_stage = MUSB_EP0_START; -- musb->xceiv.state = OTG_STATE_A_IDLE; -- MUSB_HST_MODE(musb); -- musb_set_vbus(musb, 1); -+ if ((devctl & MUSB_DEVCTL_VBUS) -+ && !(devctl & MUSB_DEVCTL_BDEVICE)) { -+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); -+ musb->ep0_stage = MUSB_EP0_START; -+ musb->xceiv->state = OTG_STATE_A_IDLE; -+ MUSB_HST_MODE(musb); -+ musb_set_vbus(musb, 1); -+ } else { -+ DBG(5,"discarding SESSREQ INT: VBUS < SessEnd\n"); -+ } - - handled = IRQ_HANDLED; - } -@@ -516,7 +710,7 @@ static irqreturn_t musb_stage0_irq(struc - * REVISIT: do delays from lots of DEBUG_KERNEL checks - * make trouble here, keeping VBUS < 4.4V ? - */ -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_HOST: - /* recovery is dicey once we've gotten past the - * initial stages of enumeration, but if VBUS -@@ -565,6 +759,58 @@ static irqreturn_t musb_stage0_irq(struc - handled = IRQ_HANDLED; - } - -+ if (int_usb & MUSB_INTR_SUSPEND) { -+ DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", -+ otg_state_string(musb), devctl, power); -+ handled = IRQ_HANDLED; -+ -+ switch (musb->xceiv->state) { -+#ifdef CONFIG_USB_MUSB_OTG -+ case OTG_STATE_A_PERIPHERAL: -+ /* -+ * We cannot stop HNP here, devctl BDEVICE might be -+ * still set. -+ */ -+ break; -+#endif -+ case OTG_STATE_B_IDLE: -+ if (!musb->is_active) -+ break; -+ case OTG_STATE_B_PERIPHERAL: -+ musb_g_suspend(musb); -+ musb->is_active = is_otg_enabled(musb) -+ && musb->xceiv->gadget->b_hnp_enable; -+ if (musb->is_active) { -+#ifdef CONFIG_USB_MUSB_OTG -+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON; -+ DBG(1, "HNP: Setting timer for b_ase0_brst\n"); -+ musb_otg_timer.data = (unsigned long)musb; -+ mod_timer(&musb_otg_timer, jiffies -+ + msecs_to_jiffies(TB_ASE0_BRST)); -+#endif -+ } -+ break; -+ case OTG_STATE_A_WAIT_BCON: -+ if (musb->a_wait_bcon != 0) -+ musb_platform_try_idle(musb, jiffies -+ + msecs_to_jiffies(musb->a_wait_bcon)); -+ break; -+ case OTG_STATE_A_HOST: -+ musb->xceiv->state = OTG_STATE_A_SUSPEND; -+ musb->is_active = is_otg_enabled(musb) -+ && musb->xceiv->host->b_hnp_enable; -+ break; -+ case OTG_STATE_B_HOST: -+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ -+ DBG(1, "REVISIT: SUSPEND as B_HOST\n"); -+ break; -+ default: -+ /* "should not happen" */ -+ musb->is_active = 0; -+ break; -+ } -+ } -+ - if (int_usb & MUSB_INTR_CONNECT) { - struct usb_hcd *hcd = musb_to_hcd(musb); - -@@ -602,11 +848,11 @@ static irqreturn_t musb_stage0_irq(struc - MUSB_HST_MODE(musb); - - /* indicate new connection to OTG machine */ -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_B_PERIPHERAL: - if (int_usb & MUSB_INTR_SUSPEND) { - DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n"); -- musb->xceiv.state = OTG_STATE_B_HOST; -+ musb->xceiv->state = OTG_STATE_B_HOST; - hcd->self.is_b_host = 1; - int_usb &= ~MUSB_INTR_SUSPEND; - } else -@@ -614,13 +860,13 @@ static irqreturn_t musb_stage0_irq(struc - break; - case OTG_STATE_B_WAIT_ACON: - DBG(1, "HNP: Waiting to switch to b_host state\n"); -- musb->xceiv.state = OTG_STATE_B_HOST; -+ musb->xceiv->state = OTG_STATE_B_HOST; - hcd->self.is_b_host = 1; - break; - default: - if ((devctl & MUSB_DEVCTL_VBUS) - == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { -- musb->xceiv.state = OTG_STATE_A_HOST; -+ musb->xceiv->state = OTG_STATE_A_HOST; - hcd->self.is_b_host = 0; - } - break; -@@ -630,10 +876,78 @@ static irqreturn_t musb_stage0_irq(struc - } - #endif /* CONFIG_USB_MUSB_HDRC_HCD */ - -+ if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { -+ DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", -+ otg_state_string(musb), -+ MUSB_MODE(musb), devctl); -+ handled = IRQ_HANDLED; -+ -+ switch (musb->xceiv->state) { -+#ifdef CONFIG_USB_MUSB_HDRC_HCD -+ case OTG_STATE_A_HOST: -+ case OTG_STATE_A_SUSPEND: -+ usb_hcd_resume_root_hub(musb_to_hcd(musb)); -+ musb_root_disconnect(musb); -+ if (musb->a_wait_bcon != 0 && is_otg_enabled(musb)) -+ musb_platform_try_idle(musb, jiffies -+ + msecs_to_jiffies(musb->a_wait_bcon)); -+ break; -+#endif /* HOST */ -+#ifdef CONFIG_USB_MUSB_OTG -+ case OTG_STATE_B_HOST: -+ musb_hnp_stop(musb); -+ break; -+ case OTG_STATE_A_PERIPHERAL: -+ musb_hnp_stop(musb); -+ musb_root_disconnect(musb); -+ /* FALLTHROUGH */ -+ case OTG_STATE_B_WAIT_ACON: -+ /* FALLTHROUGH */ -+#endif /* OTG */ -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ case OTG_STATE_B_PERIPHERAL: -+ case OTG_STATE_B_IDLE: -+ /* Workaround for a problem of Vbus quickly dropping -+ * during Certification tests. -+ * -+ * Undo the workaround on disconnect -+ */ -+ -+ /* Disable suspend so we can write to ULPI */ -+ r = musb_readb(musb->mregs, MUSB_POWER); -+ musb_writeb(musb->mregs, MUSB_POWER, -+ r & ~MUSB_POWER_ENSUSPEND); -+ musb_ulpi_writeb(musb->mregs, -+ ISP1704_USB_INTFALL, 0x1f); -+ musb_ulpi_writeb(musb->mregs, -+ ISP1704_USB_INTRISE, 0x1f); -+ musb_writeb(musb->mregs, MUSB_POWER, -+ r | MUSB_POWER_ENSUSPEND); -+ -+ musb_g_disconnect(musb); -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * This is necessary to notify gadget driver this was -+ * a physical disconnection and not only a port reset. -+ */ -+ if (musb->gadget_driver->vbus_disconnect) -+ musb->gadget_driver->vbus_disconnect(&musb->g); -+ -+ break; -+#endif /* GADGET */ -+ default: -+ WARNING("unhandled DISCONNECT transition (%s)\n", -+ otg_state_string(musb)); -+ break; -+ } -+ } -+ - /* mentor saves a bit: bus reset and babble share the same irq. - * only host sees babble; only peripheral sees bus reset. - */ - if (int_usb & MUSB_INTR_RESET) { -+ handled = IRQ_HANDLED; - if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { - /* - * Looks like non-HS BABBLE can be ignored, but -@@ -646,11 +960,11 @@ static irqreturn_t musb_stage0_irq(struc - DBG(1, "BABBLE devctl: %02x\n", devctl); - else { - ERR("Stopping host session -- babble\n"); -- musb_writeb(mbase, MUSB_DEVCTL, 0); -+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0); - } - } else if (is_peripheral_capable()) { - DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - #ifdef CONFIG_USB_OTG - case OTG_STATE_A_SUSPEND: - /* We need to ignore disconnect on suspend -@@ -673,14 +987,35 @@ static irqreturn_t musb_stage0_irq(struc - case OTG_STATE_B_WAIT_ACON: - DBG(1, "HNP: RESET (%s), to b_peripheral\n", - otg_state_string(musb)); -- musb->xceiv.state = OTG_STATE_B_PERIPHERAL; -+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - musb_g_reset(musb); - break; - #endif - case OTG_STATE_B_IDLE: -- musb->xceiv.state = OTG_STATE_B_PERIPHERAL; -+ /* Workaround the charger detection problems */ -+ if ((devctl & MUSB_DEVCTL_VBUS) -+ != (3 << MUSB_DEVCTL_VBUS_SHIFT)) -+ break; -+ if (check_charger) -+ break; -+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - /* FALLTHROUGH */ - case OTG_STATE_B_PERIPHERAL: -+ /* Workaround for a problem of Vbus quickly dropping -+ * during Certification tests. -+ * -+ * The guess is that vbus drops due to the circuitry -+ * for overcurrent protection and that makes transceiver -+ * think VBUS is not valid anymore. Transceiver will -+ * then send an RXCMD to PHY which will cause it to -+ * disconnect from the bus even though we disable the -+ * DISCONNECT IRQ -+ */ -+ musb_ulpi_writeb(musb->mregs, -+ ISP1704_USB_INTFALL, 0x1d); -+ musb_ulpi_writeb(musb->mregs, -+ ISP1704_USB_INTRISE, 0x1d); -+ - musb_g_reset(musb); - break; - default: -@@ -688,29 +1023,7 @@ static irqreturn_t musb_stage0_irq(struc - otg_state_string(musb)); - } - } -- -- handled = IRQ_HANDLED; - } -- schedule_work(&musb->irq_work); -- -- return handled; --} -- --/* -- * Interrupt Service Routine to record USB "global" interrupts. -- * Since these do not happen often and signify things of -- * paramount importance, it seems OK to check them individually; -- * the order of the tests is specified in the manual -- * -- * @param musb instance pointer -- * @param int_usb register contents -- * @param devctl -- * @param power -- */ --static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, -- u8 devctl, u8 power) --{ -- irqreturn_t handled = IRQ_NONE; - - #if 0 - /* REVISIT ... this would be for multiplexing periodic endpoints, or -@@ -757,98 +1070,7 @@ static irqreturn_t musb_stage2_irq(struc - } - #endif - -- if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { -- DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", -- otg_state_string(musb), -- MUSB_MODE(musb), devctl); -- handled = IRQ_HANDLED; -- -- switch (musb->xceiv.state) { --#ifdef CONFIG_USB_MUSB_HDRC_HCD -- case OTG_STATE_A_HOST: -- case OTG_STATE_A_SUSPEND: -- musb_root_disconnect(musb); -- if (musb->a_wait_bcon != 0) -- musb_platform_try_idle(musb, jiffies -- + msecs_to_jiffies(musb->a_wait_bcon)); -- break; --#endif /* HOST */ --#ifdef CONFIG_USB_MUSB_OTG -- case OTG_STATE_B_HOST: -- musb_hnp_stop(musb); -- break; -- case OTG_STATE_A_PERIPHERAL: -- musb_hnp_stop(musb); -- musb_root_disconnect(musb); -- /* FALLTHROUGH */ -- case OTG_STATE_B_WAIT_ACON: -- /* FALLTHROUGH */ --#endif /* OTG */ --#ifdef CONFIG_USB_GADGET_MUSB_HDRC -- case OTG_STATE_B_PERIPHERAL: -- case OTG_STATE_B_IDLE: -- musb_g_disconnect(musb); -- break; --#endif /* GADGET */ -- default: -- WARNING("unhandled DISCONNECT transition (%s)\n", -- otg_state_string(musb)); -- break; -- } -- -- schedule_work(&musb->irq_work); -- } -- -- if (int_usb & MUSB_INTR_SUSPEND) { -- DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", -- otg_state_string(musb), devctl, power); -- handled = IRQ_HANDLED; -- -- switch (musb->xceiv.state) { --#ifdef CONFIG_USB_MUSB_OTG -- case OTG_STATE_A_PERIPHERAL: -- /* -- * We cannot stop HNP here, devctl BDEVICE might be -- * still set. -- */ -- break; --#endif -- case OTG_STATE_B_PERIPHERAL: -- musb_g_suspend(musb); -- musb->is_active = is_otg_enabled(musb) -- && musb->xceiv.gadget->b_hnp_enable; -- if (musb->is_active) { --#ifdef CONFIG_USB_MUSB_OTG -- musb->xceiv.state = OTG_STATE_B_WAIT_ACON; -- DBG(1, "HNP: Setting timer for b_ase0_brst\n"); -- musb_otg_timer.data = (unsigned long)musb; -- mod_timer(&musb_otg_timer, jiffies -- + msecs_to_jiffies(TB_ASE0_BRST)); --#endif -- } -- break; -- case OTG_STATE_A_WAIT_BCON: -- if (musb->a_wait_bcon != 0) -- musb_platform_try_idle(musb, jiffies -- + msecs_to_jiffies(musb->a_wait_bcon)); -- break; -- case OTG_STATE_A_HOST: -- musb->xceiv.state = OTG_STATE_A_SUSPEND; -- musb->is_active = is_otg_enabled(musb) -- && musb->xceiv.host->b_hnp_enable; -- break; -- case OTG_STATE_B_HOST: -- /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ -- DBG(1, "REVISIT: SUSPEND as B_HOST\n"); -- break; -- default: -- /* "should not happen" */ -- musb->is_active = 0; -- break; -- } -- schedule_work(&musb->irq_work); -- } -- -+ schedule_work(&musb->irq_work); - - return handled; - } -@@ -862,9 +1084,16 @@ void musb_start(struct musb *musb) - { - void __iomem *regs = musb->mregs; - u8 devctl = musb_readb(regs, MUSB_DEVCTL); -+ u8 power; - - DBG(2, "<== devctl %02x\n", devctl); - -+ /* Ensure the clocks are on */ -+ if (musb->set_clock) -+ musb->set_clock(musb->clock, 1); -+ else -+ clk_enable(musb->clock); -+ - /* Set INT enable registers, enable interrupts */ - musb_writew(regs, MUSB_INTRTXE, musb->epmask); - musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe); -@@ -872,13 +1101,15 @@ void musb_start(struct musb *musb) - - musb_writeb(regs, MUSB_TESTMODE, 0); - -+ power = MUSB_POWER_ISOUPDATE | MUSB_POWER_SOFTCONN -+ | MUSB_POWER_HSENAB; -+ -+ /* ENSUSPEND wedges tusb */ -+ if (musb->suspendm) -+ power |= MUSB_POWER_ENSUSPEND; -+ - /* put into basic highspeed mode and start session */ -- musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE -- | MUSB_POWER_SOFTCONN -- | MUSB_POWER_HSENAB -- /* ENSUSPEND wedges tusb */ -- /* | MUSB_POWER_ENSUSPEND */ -- ); -+ musb_writeb(regs, MUSB_POWER, power); - - musb->is_active = 0; - devctl = musb_readb(regs, MUSB_DEVCTL); -@@ -913,6 +1144,12 @@ static void musb_generic_disable(struct - void __iomem *mbase = musb->mregs; - u16 temp; - -+ /* Clocks need to be turned on with OFF-mode */ -+ if (musb->set_clock) -+ musb->set_clock(musb->clock, 1); -+ else -+ clk_enable(musb->clock); -+ - /* disable interrupts */ - musb_writeb(mbase, MUSB_INTRUSBE, 0); - musb_writew(mbase, MUSB_INTRTXE, 0); -@@ -928,6 +1165,15 @@ static void musb_generic_disable(struct - - } - -+void musb_emergency_stop(void) -+{ -+ if (!the_musb) -+ return; -+ -+ musb_stop(the_musb); -+} -+EXPORT_SYMBOL_GPL(musb_emergency_stop); -+ - /* - * Make the HDRC stop (disable interrupts, etc.); - * reversible by musb_start -@@ -1064,21 +1310,58 @@ static struct fifo_cfg __initdata mode_4 - { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, }, - { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, - { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, --{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, -+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 64, }, - { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, --{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, -+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 64, }, - { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, }, --{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, }, --{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, }, --{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, }, --{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, }, --{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, }, --{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, }, --{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, }, -+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, }, -+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, }, -+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 256, }, -+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, }, -+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 256, }, -+{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 256, }, -+{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 4096, }, - { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, - { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, - }; - -+/* mode 5 - fits in 16KB */ -+static struct fifo_cfg __initdata mode_5_cfg[] = { -+/* phonet or mass storage */ -+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+ -+/* obex 1 */ -+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+ -+/* obex 2 */ -+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+ -+/* acm 1 */ -+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 16, }, -+ -+/* ecm */ -+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_SINGLE, }, -+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 16, }, -+ -+/* extras */ -+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, -+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, }, -+ -+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, -+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, -+ -+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, }, -+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, -+ -+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, }, -+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, -+}; - - /* - * configure a fifo; for non-shared endpoints, this may be called -@@ -1129,12 +1412,16 @@ fifo_setup(struct musb *musb, struct mus - musb_write_txfifoadd(mbase, c_off); - hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); - hw_ep->max_packet_sz_tx = maxpacket; -+ ctx.txfifosz[hw_ep->epnum] = c_size; -+ ctx.txfifoadd[hw_ep->epnum] = c_off; - break; - case FIFO_RX: - musb_write_rxfifosz(mbase, c_size); - musb_write_rxfifoadd(mbase, c_off); - hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); - hw_ep->max_packet_sz_rx = maxpacket; -+ ctx.rxfifosz[hw_ep->epnum] = c_size; -+ ctx.rxfifoadd[hw_ep->epnum] = c_off; - break; - case FIFO_RXTX: - musb_write_txfifosz(mbase, c_size); -@@ -1147,6 +1434,12 @@ fifo_setup(struct musb *musb, struct mus - hw_ep->tx_double_buffered = hw_ep->rx_double_buffered; - hw_ep->max_packet_sz_tx = maxpacket; - -+ /* Save the context of endpoints. */ -+ ctx.rxfifosz[hw_ep->epnum] = c_size; -+ ctx.txfifosz[hw_ep->epnum] = c_size; -+ ctx.txfifoadd[hw_ep->epnum] = c_off; -+ ctx.rxfifoadd[hw_ep->epnum] = c_off; -+ - hw_ep->is_shared_fifo = true; - break; - } -@@ -1170,6 +1463,9 @@ static int __init ep_config_from_table(s - int offset; - struct musb_hw_ep *hw_ep = musb->endpoints; - -+ if (machine_is_nokia_rx51()) -+ fifo_mode = 5; -+ - switch (fifo_mode) { - default: - fifo_mode = 0; -@@ -1194,6 +1490,10 @@ static int __init ep_config_from_table(s - cfg = mode_4_cfg; - n = ARRAY_SIZE(mode_4_cfg); - break; -+ case 5: -+ cfg = mode_5_cfg; -+ n = ARRAY_SIZE(mode_5_cfg); -+ break; - } - - printk(KERN_DEBUG "%s: setup fifo_mode %d\n", -@@ -1429,6 +1729,9 @@ static int __init musb_core_init(u16 mus - - hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase; - #ifdef CONFIG_USB_MUSB_HDRC_HCD -+ /* init list of in and out qhs */ -+ INIT_LIST_HEAD(&hw_ep->in_list); -+ INIT_LIST_HEAD(&hw_ep->out_list); - hw_ep->target_regs = musb_read_target_reg_base(i, mbase); - hw_ep->rx_reinit = 1; - hw_ep->tx_reinit = 1; -@@ -1475,8 +1778,8 @@ static irqreturn_t generic_interrupt(int - musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); - musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); - -- if (musb->int_usb || musb->int_tx || musb->int_rx) -- retval = musb_interrupt(musb); -+ while (musb->int_usb || musb->int_tx || musb->int_rx) -+ retval |= musb_interrupt(musb); - - spin_unlock_irqrestore(&musb->lock, flags); - -@@ -1503,7 +1806,7 @@ static irqreturn_t generic_interrupt(int - irqreturn_t musb_interrupt(struct musb *musb) - { - irqreturn_t retval = IRQ_NONE; -- u8 devctl, power; -+ u8 devctl, power, int_usb; - int ep_num; - u32 reg; - -@@ -1514,80 +1817,85 @@ irqreturn_t musb_interrupt(struct musb * - (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", - musb->int_usb, musb->int_tx, musb->int_rx); - -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ if (is_otg_enabled(musb)|| is_peripheral_enabled(musb)) -+ if (!musb->gadget_driver) { -+ DBG(5, "No gadget driver loaded\n"); -+ musb->int_usb = 0; -+ musb->int_tx = 0; -+ musb->int_rx = 0; -+ return IRQ_HANDLED; -+ } -+#endif -+ - /* the core can interrupt us for multiple reasons; docs have - * a generic interrupt flowchart to follow - */ -- if (musb->int_usb & STAGE0_MASK) -- retval |= musb_stage0_irq(musb, musb->int_usb, -- devctl, power); -+ int_usb = musb->int_usb; -+ musb->int_usb = 0; -+ int_usb &= ~MUSB_INTR_SOF; -+ if (int_usb) -+ retval |= musb_stage0_irq(musb, int_usb, devctl, power); - - /* "stage 1" is handling endpoint irqs */ - - /* handle endpoint 0 first */ - if (musb->int_tx & 1) { -+ musb->int_tx &= ~1; - if (devctl & MUSB_DEVCTL_HM) - retval |= musb_h_ep0_irq(musb); - else - retval |= musb_g_ep0_irq(musb); - } - -- /* RX on endpoints 1-15 */ -- reg = musb->int_rx >> 1; -+ /* TX on endpoints 1-15 */ -+ reg = musb->int_tx >> 1; -+ musb->int_tx = 0; - ep_num = 1; - while (reg) { - if (reg & 1) { - /* musb_ep_select(musb->mregs, ep_num); */ -- /* REVISIT just retval = ep->rx_irq(...) */ -+ /* REVISIT just retval |= ep->tx_irq(...) */ - retval = IRQ_HANDLED; - if (devctl & MUSB_DEVCTL_HM) { - if (is_host_capable()) -- musb_host_rx(musb, ep_num); -+ musb_host_tx(musb, ep_num); - } else { - if (is_peripheral_capable()) -- musb_g_rx(musb, ep_num); -+ musb_g_tx(musb, ep_num); - } - } -- - reg >>= 1; - ep_num++; - } - -- /* TX on endpoints 1-15 */ -- reg = musb->int_tx >> 1; -+ /* RX on endpoints 1-15 */ -+ reg = musb->int_rx >> 1; -+ musb->int_rx = 0; - ep_num = 1; - while (reg) { - if (reg & 1) { - /* musb_ep_select(musb->mregs, ep_num); */ -- /* REVISIT just retval |= ep->tx_irq(...) */ -+ /* REVISIT just retval = ep->rx_irq(...) */ - retval = IRQ_HANDLED; - if (devctl & MUSB_DEVCTL_HM) { - if (is_host_capable()) -- musb_host_tx(musb, ep_num); -+ musb_host_rx(musb, ep_num); - } else { - if (is_peripheral_capable()) -- musb_g_tx(musb, ep_num); -+ musb_g_rx(musb, ep_num, false); - } - } -+ - reg >>= 1; - ep_num++; - } - -- /* finish handling "global" interrupts after handling fifos */ -- if (musb->int_usb) -- retval |= musb_stage2_irq(musb, -- musb->int_usb, devctl, power); -- - return retval; - } - - - #ifndef CONFIG_MUSB_PIO_ONLY --static int __initdata use_dma = 1; -- --/* "modprobe ... use_dma=0" etc */ --module_param(use_dma, bool, 0); --MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); -- - void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) - { - u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); -@@ -1621,14 +1929,11 @@ void musb_dma_completion(struct musb *mu - musb_host_rx(musb, epnum); - } else { - if (is_peripheral_capable()) -- musb_g_rx(musb, epnum); -+ musb_g_rx(musb, epnum, true); - } - } - } - } -- --#else --#define use_dma 0 - #endif - - /*-------------------------------------------------------------------------*/ -@@ -1636,28 +1941,93 @@ void musb_dma_completion(struct musb *mu - #ifdef CONFIG_SYSFS - - static ssize_t -+musb_charger_show(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct musb *musb = dev_to_musb(dev); -+ -+ return sprintf(buf, "%d\n", (musb->is_charger ? -+ musb->is_charger : musb_charger_detect(musb))); -+} -+static DEVICE_ATTR(charger, 0444, musb_charger_show, NULL); -+ -+static ssize_t -+musb_amp_show(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct musb *musb = dev_to_musb(dev); -+ -+ return sprintf(buf, "%d\n", musb->power_draw); -+} -+static DEVICE_ATTR(mA, 0444, musb_amp_show, NULL); -+ -+static ssize_t - musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) - { - struct musb *musb = dev_to_musb(dev); -- unsigned long flags; - int ret = -EINVAL; - -- spin_lock_irqsave(&musb->lock, flags); -+ mutex_lock(&musb->mutex); - ret = sprintf(buf, "%s\n", otg_state_string(musb)); -+ mutex_unlock(&musb->mutex); -+ -+ return ret; -+} -+ -+static ssize_t -+musb_connect_show(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct musb *musb = dev_to_musb(dev); -+ unsigned long flags; -+ int ret = -EINVAL; -+ -+ spin_lock_irqsave(&musb->lock, flags); -+ ret = sprintf(buf, "%d\n", musb->softconnect); - spin_unlock_irqrestore(&musb->lock, flags); - - return ret; - } - - static ssize_t --musb_mode_store(struct device *dev, struct device_attribute *attr, -+musb_connect_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t n) - { - struct musb *musb = dev_to_musb(dev); - unsigned long flags; -+ unsigned val; - int status; -+ u8 power; -+ -+ status = sscanf(buf, "%u", &val); -+ if (status < 1) { -+ printk(KERN_ERR "invalid parameter, %d\n", status); -+ return -EINVAL; -+ } - - spin_lock_irqsave(&musb->lock, flags); -+ -+ power = musb_readb(musb->mregs, MUSB_POWER); -+ -+ if (val) -+ power |= MUSB_POWER_SOFTCONN; -+ else -+ power &= ~MUSB_POWER_SOFTCONN; -+ -+ musb->softconnect = !!val; -+ musb_writeb(musb->mregs, MUSB_POWER, power); -+ -+ spin_unlock_irqrestore(&musb->lock, flags); -+ -+ return n; -+} -+static DEVICE_ATTR(connect, 0644, musb_connect_show, musb_connect_store); -+ -+static ssize_t -+musb_mode_store(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t n) -+{ -+ struct musb *musb = dev_to_musb(dev); -+ int status; -+ -+ mutex_lock(&musb->mutex); - if (sysfs_streq(buf, "host")) - status = musb_platform_set_mode(musb, MUSB_HOST); - else if (sysfs_streq(buf, "peripheral")) -@@ -1666,7 +2036,7 @@ musb_mode_store(struct device *dev, stru - status = musb_platform_set_mode(musb, MUSB_OTG); - else - status = -EINVAL; -- spin_unlock_irqrestore(&musb->lock, flags); -+ mutex_unlock(&musb->mutex); - - return (status == 0) ? n : status; - } -@@ -1687,7 +2057,7 @@ musb_vbus_store(struct device *dev, stru - - spin_lock_irqsave(&musb->lock, flags); - musb->a_wait_bcon = val; -- if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON) -+ if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON) - musb->is_active = 0; - musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); - spin_unlock_irqrestore(&musb->lock, flags); -@@ -1715,6 +2085,15 @@ static DEVICE_ATTR(vbus, 0644, musb_vbus - - #ifdef CONFIG_USB_GADGET_MUSB_HDRC - -+static ssize_t -+musb_suspend_show(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ struct musb *musb = dev_to_musb(dev); -+ -+ return sprintf(buf, "%d\n", musb->is_suspended); -+} -+static DEVICE_ATTR(suspend, 0444, musb_suspend_show, NULL); -+ - /* Gadget drivers can't know that a host is connected so they might want - * to start SRP, but users can. This allows userspace to trigger SRP. - */ -@@ -1746,12 +2125,22 @@ static DEVICE_ATTR(srp, 0644, NULL, musb - static void musb_irq_work(struct work_struct *data) - { - struct musb *musb = container_of(data, struct musb, irq_work); -- static int old_state; -+ static int old_state, old_ma, old_suspend; - -- if (musb->xceiv.state != old_state) { -- old_state = musb->xceiv.state; -+ if (musb->xceiv->state != old_state) { -+ old_state = musb->xceiv->state; - sysfs_notify(&musb->controller->kobj, NULL, "mode"); - } -+ if (musb->power_draw != old_ma) { -+ old_ma = musb->power_draw; -+ sysfs_notify(&musb->controller->kobj, NULL, "mA"); -+ } -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ if (old_suspend != musb->is_suspended) { -+ old_suspend = musb->is_suspended; -+ sysfs_notify(&musb->controller->kobj, NULL, "suspend"); -+ } -+#endif - } - - /* -------------------------------------------------------------------------- -@@ -1768,15 +2157,12 @@ allocate_instance(struct device *dev, - #ifdef CONFIG_USB_MUSB_HDRC_HCD - struct usb_hcd *hcd; - -- hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id); -+ hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); - if (!hcd) - return NULL; - /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ - - musb = hcd_to_musb(hcd); -- INIT_LIST_HEAD(&musb->control); -- INIT_LIST_HEAD(&musb->in_bulk); -- INIT_LIST_HEAD(&musb->out_bulk); - - hcd->uses_new_polling = 1; - -@@ -1813,9 +2199,13 @@ static void musb_free(struct musb *musb) - */ - - #ifdef CONFIG_SYSFS -+ device_remove_file(musb->controller, &dev_attr_mA); -+ device_remove_file(musb->controller, &dev_attr_connect); -+ device_remove_file(musb->controller, &dev_attr_charger); - device_remove_file(musb->controller, &dev_attr_mode); - device_remove_file(musb->controller, &dev_attr_vbus); --#ifdef CONFIG_USB_MUSB_OTG -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ device_remove_file(musb->controller, &dev_attr_suspend); - device_remove_file(musb->controller, &dev_attr_srp); - #endif - #endif -@@ -1824,8 +2214,9 @@ static void musb_free(struct musb *musb) - musb_gadget_cleanup(musb); - #endif - -- if (musb->nIrq >= 0 && musb->irq_wake) { -- disable_irq_wake(musb->nIrq); -+ if (musb->nIrq >= 0) { -+ if (musb->irq_wake) -+ disable_irq_wake(musb->nIrq); - free_irq(musb->nIrq, musb); - } - if (is_dma_capable() && musb->dma_controller) { -@@ -1845,7 +2236,7 @@ static void musb_free(struct musb *musb) - } - - #ifdef CONFIG_USB_MUSB_OTG -- put_device(musb->xceiv.dev); -+ put_device(musb->xceiv->dev); - #endif - - #ifdef CONFIG_USB_MUSB_HDRC_HCD -@@ -1906,11 +2297,16 @@ bad_config: - if (!musb) - return -ENOMEM; - -+ the_musb = musb; -+ - spin_lock_init(&musb->lock); -+ mutex_init(&musb->mutex); -+ musb->board = plat->board; - musb->board_mode = plat->mode; - musb->board_set_power = plat->set_power; - musb->set_clock = plat->set_clock; - musb->min_power = plat->min_power; -+ musb->use_dma = use_dma; - - /* Clock usage is chip-specific ... functional clock (DaVinci, - * OMAP2430), or PHY ref (some TUSB6010 boards). All this core -@@ -1952,7 +2348,7 @@ bad_config: - } - #endif - /* ideally this would be abstracted in platform setup */ -- if (!is_dma_capable() || !musb->dma_controller) -+ if (!musb->use_dma || !musb->dma_controller) - dev->dma_mask = NULL; - - /* be sure interrupts are disabled before connecting ISR */ -@@ -1970,7 +2366,7 @@ bad_config: - INIT_WORK(&musb->irq_work, musb_irq_work); - - /* attach to the IRQ */ -- if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) { -+ if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { - dev_err(dev, "request_irq %d failed!\n", nIrq); - status = -ENODEV; - goto fail2; -@@ -2004,7 +2400,7 @@ bad_config: - - if (musb->board_mode == MUSB_OTG) - hcd->self.otg_port = 1; -- musb->xceiv.host = &hcd->self; -+ musb->xceiv->host = &hcd->self; - hcd->power_budget = 2 * (plat->power ? : 250); - } - #endif /* CONFIG_USB_MUSB_HDRC_HCD */ -@@ -2015,8 +2411,8 @@ bad_config: - */ - if (!is_otg_enabled(musb) && is_host_enabled(musb)) { - MUSB_HST_MODE(musb); -- musb->xceiv.default_a = 1; -- musb->xceiv.state = OTG_STATE_A_IDLE; -+ musb->xceiv->default_a = 1; -+ musb->xceiv->state = OTG_STATE_A_IDLE; - - status = usb_add_hcd(musb_to_hcd(musb), -1, 0); - if (status) -@@ -2031,8 +2427,8 @@ bad_config: - - } else /* peripheral is enabled */ { - MUSB_DEV_MODE(musb); -- musb->xceiv.default_a = 0; -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->default_a = 0; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - - status = musb_gadget_setup(musb); - if (status) -@@ -2045,10 +2441,17 @@ bad_config: - - } - -+ if (!(musb_debug_create("driver/musb_hdrc", musb))) -+ DBG(1, "could not create procfs entry\n"); -+ - #ifdef CONFIG_SYSFS -+ status = device_create_file(dev, &dev_attr_mA); -+ status = device_create_file(dev, &dev_attr_connect); -+ status = device_create_file(dev, &dev_attr_charger); - status = device_create_file(dev, &dev_attr_mode); - status = device_create_file(dev, &dev_attr_vbus); - #ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ status = device_create_file(dev, &dev_attr_suspend); - status = device_create_file(dev, &dev_attr_srp); - #endif /* CONFIG_USB_GADGET_MUSB_HDRC */ - status = 0; -@@ -2056,13 +2459,23 @@ bad_config: - if (status) - goto fail2; - -+ /* Resets the controller. Has to be done. Without this, most likely -+ * the state machine inside the transceiver doesn't get fixed properly -+ */ -+ musb_save_ctx_and_suspend(&musb->g, 0); -+ musb_restore_ctx_and_resume(&musb->g); -+ - return 0; - - fail2: - #ifdef CONFIG_SYSFS -+ device_remove_file(dev, &dev_attr_mA); -+ device_remove_file(dev, &dev_attr_connect); -+ device_remove_file(dev, &dev_attr_charger); - device_remove_file(musb->controller, &dev_attr_mode); - device_remove_file(musb->controller, &dev_attr_vbus); --#ifdef CONFIG_USB_MUSB_OTG -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ device_remove_file(musb->controller, &dev_attr_suspend); - device_remove_file(musb->controller, &dev_attr_srp); - #endif - #endif -@@ -2111,6 +2524,9 @@ static int __init musb_probe(struct plat - /* clobbered by use_dma=n */ - orig_dma_mask = dev->dma_mask; - #endif -+ /* Store initial mask for USB interrupts */ -+ ctx.intrusbe = 0xf7; -+ - return musb_init_controller(dev, irq, base); - } - -@@ -2125,6 +2541,7 @@ static int __devexit musb_remove(struct - * - OTG mode: both roles are deactivated (or never-activated) - */ - musb_shutdown(pdev); -+ musb_debug_delete("driver/musb_hdrc", musb); - #ifdef CONFIG_USB_MUSB_HDRC_HCD - if (musb->board_mode == MUSB_HOST) - usb_remove_hcd(musb_to_hcd(musb)); -@@ -2140,6 +2557,37 @@ static int __devexit musb_remove(struct - - #ifdef CONFIG_PM - -+void musb_save_ctx(struct musb *musb) -+{ -+ ctx.power = musb_readb(musb->mregs, MUSB_POWER); -+ ctx.intrtxe = musb_readw(musb->mregs, MUSB_INTRTXE); -+ ctx.intrrxe = musb_readw(musb->mregs, MUSB_INTRRXE); -+ ctx.intrusbe = musb_readb(musb->mregs, MUSB_INTRUSBE); -+ ctx.devctl = musb_readb(musb->mregs, MUSB_DEVCTL); -+} -+ -+void musb_restore_ctx(struct musb *musb) -+{ -+ int i; -+ musb_writeb(musb->mregs, MUSB_POWER, ctx.power); -+ musb_writew(musb->mregs, MUSB_INTRTX, 0x00); -+ musb_writew(musb->mregs, MUSB_INTRTXE, ctx.intrtxe); -+ musb_writew(musb->mregs, MUSB_INTRRX, 0x00); -+ musb_writew(musb->mregs, MUSB_INTRRXE, ctx.intrrxe); -+ musb_writeb(musb->mregs, MUSB_INTRUSB, 0x00); -+ musb_writeb(musb->mregs, MUSB_INTRUSBE, ctx.intrusbe); -+ musb_writeb(musb->mregs, MUSB_DEVCTL, ctx.devctl); -+ -+ /* iterate over every endpoint, select it and restore its context */ -+ for (i = 0; i < musb->config->num_eps; i++) { -+ musb_writeb(musb->mregs, MUSB_INDEX, i); -+ musb_writeb(musb->mregs, MUSB_RXFIFOSZ, ctx.rxfifosz[i]); -+ musb_writeb(musb->mregs, MUSB_TXFIFOSZ, ctx.txfifosz[i]); -+ musb_writew(musb->mregs, MUSB_TXFIFOADD, ctx.txfifoadd[i]); -+ musb_writew(musb->mregs, MUSB_RXFIFOADD, ctx.rxfifoadd[i]); -+ }; -+} -+ - static int musb_suspend(struct platform_device *pdev, pm_message_t message) - { - unsigned long flags; -@@ -2160,10 +2608,14 @@ static int musb_suspend(struct platform_ - */ - } - -+ /* save context */ -+ musb_save_ctx(musb); -+ - if (musb->set_clock) - musb->set_clock(musb->clock, 0); - else - clk_disable(musb->clock); -+ - spin_unlock_irqrestore(&musb->lock, flags); - return 0; - } -@@ -2183,6 +2635,9 @@ static int musb_resume(struct platform_d - else - clk_enable(musb->clock); - -+ /* restore context */ -+ musb_restore_ctx(musb); -+ - /* for static cmos like DaVinci, register values were preserved - * unless for some reason the whole soc powered down and we're - * not treating that as a whole-system restart (e.g. swsusp) -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_core.h linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_core.h ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_core.h 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_core.h 2011-06-22 13:19:33.083063271 +0200 -@@ -64,7 +64,6 @@ struct musb_ep; - #include "musb_host.h" - - -- - #ifdef CONFIG_USB_MUSB_OTG - - #define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST) -@@ -109,7 +108,7 @@ struct musb_ep; - - extern irqreturn_t musb_g_ep0_irq(struct musb *); - extern void musb_g_tx(struct musb *, u8); --extern void musb_g_rx(struct musb *, u8); -+extern void musb_g_rx(struct musb *, u8, bool); - extern void musb_g_reset(struct musb *); - extern void musb_g_suspend(struct musb *); - extern void musb_g_resume(struct musb *); -@@ -271,6 +270,12 @@ struct musb_hw_ep { - struct musb_qh *in_qh; - struct musb_qh *out_qh; - -+ /* list of rx and tx qhs, control transfer needs only -+ * one list thus only in_list is used for control. -+ */ -+ struct list_head in_list; -+ struct list_head out_list; -+ - u8 rx_reinit; - u8 tx_reinit; - #endif -@@ -300,15 +305,62 @@ static inline struct usb_request *next_o - #endif - } - -+#define MUSB_MAX_EPS 16 -+ -+struct musb_ctx { -+ /* common register */ -+ u16 intrtxe; -+ u16 intrrxe; -+ -+ u8 intrusbe; -+ -+ u8 faddr; -+ u8 power; -+ -+ u8 frame; -+ u8 index; -+ u8 testmode; -+ u8 devctl; -+ u8 misc; -+ -+ /* indexed registers */ -+ u16 txmaxp[MUSB_MAX_EPS]; -+ u16 txcsr[MUSB_MAX_EPS]; -+ -+ u16 rxmaxp[MUSB_MAX_EPS]; -+ u16 rxcsr[MUSB_MAX_EPS]; -+ -+ u16 csr0; /* select ep0 to read/write this register */ -+ -+ u8 txtype[MUSB_MAX_EPS]; -+ u8 txinterval[MUSB_MAX_EPS]; -+ -+ u8 rxtype[MUSB_MAX_EPS]; -+ u8 rxinterval[MUSB_MAX_EPS]; -+ -+ u8 fifosize[MUSB_MAX_EPS]; -+ -+ u8 rxfifosz[MUSB_MAX_EPS]; -+ u8 txfifosz[MUSB_MAX_EPS]; -+ u16 txfifoadd[MUSB_MAX_EPS]; -+ u16 rxfifoadd[MUSB_MAX_EPS]; -+ -+ u8 count0; -+ u8 type0; -+ u8 naklimit0; -+}; -+ - /* - * struct musb - Driver instance data. - */ - struct musb { - /* device lock */ - spinlock_t lock; -+ struct mutex mutex; - struct clk *clock; - irqreturn_t (*isr)(int, void *); - struct work_struct irq_work; -+ struct work_struct vbus_work; - - /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ - #define MUSB_PORT_STAT_RESUME (1 << 31) -@@ -328,9 +380,6 @@ struct musb { - */ - struct musb_hw_ep *bulk_ep; - -- struct list_head control; /* of musb_qh */ -- struct list_head in_bulk; /* of musb_qh */ -- struct list_head out_bulk; /* of musb_qh */ - struct musb_qh *periodic[32]; /* tree of interrupt+iso */ - #endif - -@@ -356,7 +405,7 @@ struct musb { - u16 int_rx; - u16 int_tx; - -- struct otg_transceiver xceiv; -+ struct otg_transceiver *xceiv; - - int nIrq; - unsigned irq_wake:1; -@@ -369,6 +418,7 @@ struct musb { - u16 epmask; - u8 nr_endpoints; - -+ struct musb_board_data *board; - u8 board_mode; /* enum musb_mode */ - int (*board_set_power)(int state); - -@@ -376,6 +426,8 @@ struct musb { - - u8 min_power; /* vbus for periph, in mA/2 */ - -+ unsigned power_draw; /* current power draw, gadget only */ -+ - bool is_host; - - int a_wait_bcon; /* VBUS timeout in msecs */ -@@ -429,11 +481,18 @@ struct musb { - struct usb_gadget_driver *gadget_driver; /* its driver */ - #endif - -+ /* true if this chip can enable SUSPENDM */ -+ unsigned suspendm:1; -+ -+ /* true if we're using dma */ -+ unsigned use_dma:1; -+ - struct musb_hdrc_config *config; - - #ifdef MUSB_CONFIG_PROC_FS - struct proc_dir_entry *proc_entry; - #endif -+ unsigned is_charger:1; - }; - - static inline void musb_set_vbus(struct musb *musb, int is_on) -@@ -547,7 +606,37 @@ extern int musb_platform_get_vbus_status - #define musb_platform_get_vbus_status(x) 0 - #endif - -+#ifdef CONFIG_PM -+extern void musb_save_ctx(struct musb *musb); -+extern void musb_restore_ctx(struct musb *musb); -+extern void musb_save_ctx_and_suspend(struct usb_gadget *, int); -+extern void musb_restore_ctx_and_resume(struct usb_gadget *); -+#else -+static inline void musb_save_ctx(struct musb *musb) {} -+static inline void musb_restore_ctx(struct musb *musb) {} -+static inline void musb_save_ctx_and_suspend(struct usb_gadget *, int) {} -+static inline void musb_restore_ctx_and_resume(struct usb_gadget *) {} -+#endif -+ - extern int __init musb_platform_init(struct musb *musb); - extern int musb_platform_exit(struct musb *musb); - -+/*-------------------------- ProcFS definitions ---------------------*/ -+ -+struct proc_dir_entry; -+ -+#ifdef CONFIG_MUSB_PROC_FS -+extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data); -+extern void musb_debug_delete(char *name, struct musb *data); -+#else -+static inline struct proc_dir_entry * -+musb_debug_create(char *name, struct musb *data) -+{ -+ return NULL; -+} -+static inline void musb_debug_delete(char *name, struct musb *data) -+{ -+} -+#endif -+ - #endif /* __MUSB_CORE_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_debug.h linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_debug.h ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_debug.h 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_debug.h 2011-06-22 13:19:33.083063271 +0200 -@@ -42,9 +42,13 @@ - #define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) - #define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) - -+extern const char *otg_state_string(struct musb *); -+ -+#ifdef CONFIG_USB_MUSB_DEBUG -+ - #define xprintk(level, facility, format, args...) do { \ - if (_dbg_level(level)) { \ -- printk(facility "%s %d: " format , \ -+ printk(facility "%-20s %4d: " format , \ - __func__, __LINE__ , ## args); \ - } } while (0) - -@@ -54,9 +58,10 @@ static inline int _dbg_level(unsigned l) - { - return musb_debug >= l; - } -- - #define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args) -+#else -+#define DBG(level, fmt, args...) do {} while(0) -+#endif /* CONFIG_USB_MUSB_DEBUG */ - --extern const char *otg_state_string(struct musb *); - - #endif /* __MUSB_LINUX_DEBUG_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_gadget.c linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_gadget.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_gadget.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_gadget.c 2011-06-22 13:19:33.083063271 +0200 -@@ -106,50 +106,207 @@ __acquires(ep->musb->lock) - { - struct musb_request *req; - struct musb *musb; -- int busy = ep->busy; - - req = to_musb_request(request); -+ req->complete = false; - - list_del(&request->list); - if (req->request.status == -EINPROGRESS) - req->request.status = status; - musb = req->musb; - -- ep->busy = 1; - spin_unlock(&musb->lock); -- if (is_dma_capable()) { -- if (req->mapped) { -- dma_unmap_single(musb->controller, -- req->request.dma, -- req->request.length, -- req->tx -- ? DMA_TO_DEVICE -- : DMA_FROM_DEVICE); -- req->request.dma = DMA_ADDR_INVALID; -- req->mapped = 0; -- } else if (req->request.dma != DMA_ADDR_INVALID) -- dma_sync_single_for_cpu(musb->controller, -- req->request.dma, -- req->request.length, -- req->tx -- ? DMA_TO_DEVICE -- : DMA_FROM_DEVICE); -- } -- if (request->status == 0) -+ if (request->status == 0) { - DBG(5, "%s done request %p, %d/%d\n", -- ep->end_point.name, request, -- req->request.actual, req->request.length); -- else -+ ep->name, request, req->request.actual, -+ req->request.length); -+ } else - DBG(2, "%s request %p, %d/%d fault %d\n", -- ep->end_point.name, request, -+ ep->name, request, - req->request.actual, req->request.length, - request->status); - req->request.complete(&req->ep->end_point, &req->request); - spin_lock(&musb->lock); -- ep->busy = busy; - } - --/* ----------------------------------------------------------------------- */ -+/** -+ * start_dma - starts dma for a transfer -+ * @musb: musb controller pointer -+ * @epnum: endpoint number to kick dma -+ * @req: musb request to be received -+ * -+ * Context: controller locked, IRQs blocked, endpoint selected -+ */ -+static int start_dma(struct musb *musb, struct musb_request *req) -+{ -+ struct musb_ep *musb_ep = req->ep; -+ struct dma_controller *cntr = musb->dma_controller; -+ struct musb_hw_ep *hw_ep = musb_ep->hw_ep; -+ struct dma_channel *dma; -+ void __iomem *epio; -+ size_t transfer_size; -+ int packet_sz; -+ u16 csr; -+ -+ if (!musb->use_dma || musb->dma_controller == NULL) -+ return -1; -+ -+ if (musb_ep->type == USB_ENDPOINT_XFER_INT) { -+ DBG(5, "not allocating dma for interrupt endpoint\n"); -+ return -1; -+ } -+ -+ if (((unsigned long) req->request.buf) & 0x01) { -+ DBG(5, "unaligned buffer %p for %s\n", req->request.buf, -+ musb_ep->name); -+ return -1; -+ } -+ -+ packet_sz = musb_ep->packet_sz; -+ transfer_size = req->request.length; -+ -+ if (transfer_size < packet_sz || -+ (transfer_size == packet_sz && packet_sz < 512)) { -+ DBG(4, "small transfer, using pio\n"); -+ return -1; -+ } -+ -+ epio = musb->endpoints[musb_ep->current_epnum].regs; -+ if (!musb_ep->is_in) { -+ csr = musb_readw(epio, MUSB_RXCSR); -+ -+ /* If RXPKTRDY we might have something already waiting -+ * in the fifo. If that something is less than packet_sz -+ * it means we only have a short packet waiting in the fifo -+ * so we unload it with pio. -+ */ -+ if (csr & MUSB_RXCSR_RXPKTRDY) { -+ u16 count; -+ -+ count = musb_readw(epio, MUSB_RXCOUNT); -+ if (count < packet_sz) { -+ DBG(4, "small packet in FIFO (%d bytes), " -+ "using PIO\n", count); -+ return -1; -+ } -+ } -+ } -+ -+ dma = cntr->channel_alloc(cntr, hw_ep, musb_ep->is_in); -+ if (dma == NULL) { -+ DBG(4, "unable to allocate dma channel for %s\n", -+ musb_ep->name); -+ return -1; -+ } -+ -+ if (transfer_size > dma->max_len) -+ transfer_size = dma->max_len; -+ -+ if (req->request.dma == DMA_ADDR_INVALID) { -+ req->request.dma = dma_map_single(musb->controller, -+ req->request.buf, -+ transfer_size, -+ musb_ep->is_in ? -+ DMA_TO_DEVICE : -+ DMA_FROM_DEVICE); -+ req->mapped = 1; -+ } else { -+ dma_sync_single_for_device(musb->controller, -+ req->request.dma, -+ transfer_size, -+ musb_ep->is_in ? DMA_TO_DEVICE : -+ DMA_FROM_DEVICE); -+ req->mapped = 0; -+ } -+ -+ if (musb_ep->is_in) { -+ csr = musb_readw(epio, MUSB_TXCSR); -+ csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE; -+ csr |= MUSB_TXCSR_AUTOSET | MUSB_TXCSR_MODE; -+ csr &= ~MUSB_TXCSR_P_UNDERRUN; -+ musb_writew(epio, MUSB_TXCSR, csr); -+ } else { -+ /* We only use mode1 dma and assume we never know the size of -+ * the data we're receiving. For anything else, we're gonna use -+ * pio. -+ */ -+ -+ /* this special sequence is necessary to get DMAReq to -+ * activate -+ */ -+ csr = musb_readw(epio, MUSB_RXCSR); -+ csr |= MUSB_RXCSR_AUTOCLEAR; -+ musb_writew(epio, MUSB_RXCSR, csr); -+ -+ csr |= MUSB_RXCSR_DMAENAB; -+ musb_writew(epio, MUSB_RXCSR, csr); -+ -+ csr |= MUSB_RXCSR_DMAMODE; -+ musb_writew(epio, MUSB_RXCSR, csr); -+ musb_writew(epio, MUSB_RXCSR, csr); -+ -+ csr = musb_readw(epio, MUSB_RXCSR); -+ } -+ -+ musb_ep->dma = dma; -+ -+ (void) cntr->channel_program(dma, packet_sz, true, req->request.dma, -+ transfer_size); -+ -+ DBG(4, "%s dma started (addr 0x%08x, len %u, CSR %04x)\n", -+ musb_ep->name, req->request.dma, transfer_size, csr); -+ -+ return 0; -+} -+ -+/** -+ * stop_dma - stops a dma transfer and unmaps a buffer -+ * @musb: the musb controller pointer -+ * @ep: the enpoint being used -+ * @req: the request to stop -+ */ -+static void stop_dma(struct musb *musb, struct musb_ep *ep, -+ struct musb_request *req) -+{ -+ void __iomem *epio; -+ -+ DBG(4, "%s dma stopped (addr 0x%08x, len %d)\n", ep->name, -+ req->request.dma, req->request.actual); -+ -+ if (req->mapped) { -+ dma_unmap_single(musb->controller, req->request.dma, -+ req->request.actual, req->tx ? -+ DMA_TO_DEVICE : DMA_FROM_DEVICE); -+ req->request.dma = DMA_ADDR_INVALID; -+ req->mapped = 0; -+ } else { -+ dma_sync_single_for_cpu(musb->controller, req->request.dma, -+ req->request.actual, req->tx ? -+ DMA_TO_DEVICE : DMA_FROM_DEVICE); -+ } -+ -+ epio = musb->endpoints[ep->current_epnum].regs; -+ if (req->tx) { -+ u16 csr; -+ -+ csr = musb_readw(epio, MUSB_TXCSR); -+ csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET); -+ musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_P_WZC_BITS); -+ csr &= ~MUSB_TXCSR_DMAMODE; -+ musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_P_WZC_BITS); -+ } else { -+ u16 csr; -+ -+ csr = musb_readw(epio, MUSB_RXCSR); -+ csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); -+ musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_P_WZC_BITS); -+ csr &= ~MUSB_RXCSR_DMAMODE; -+ musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_P_WZC_BITS); -+ } -+ -+ musb->dma_controller->channel_release(ep->dma); -+ ep->dma = NULL; -+} - - /* - * Abort requests queued to an endpoint using the status. Synchronous. -@@ -157,31 +314,55 @@ __acquires(ep->musb->lock) - */ - static void nuke(struct musb_ep *ep, const int status) - { -+ void __iomem *epio; - struct musb_request *req = NULL; -- void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; -+ struct musb *musb; - -+ musb = ep->musb; -+ epio = musb->endpoints[ep->current_epnum].regs; - ep->busy = 1; - -- if (is_dma_capable() && ep->dma) { -- struct dma_controller *c = ep->musb->dma_controller; -- int value; -+ DBG(2, "%s nuke, DMA %p RxCSR %04x TxCSR %04x\n", ep->name, ep->dma, -+ musb_readw(epio, MUSB_RXCSR), musb_readw(epio, MUSB_TXCSR)); -+ if (ep->dma) { -+ struct dma_controller *c = musb->dma_controller; -+ -+ BUG_ON(next_request(ep) == NULL); -+ req = to_musb_request(next_request(ep)); -+ (void) c->channel_abort(ep->dma); -+ stop_dma(musb, ep, req); -+ - if (ep->is_in) { -- musb_writew(epio, MUSB_TXCSR, -- 0 | MUSB_TXCSR_FLUSHFIFO); -- musb_writew(epio, MUSB_TXCSR, -- 0 | MUSB_TXCSR_FLUSHFIFO); -+ u16 csr; -+ -+ csr = musb_readw(epio, MUSB_TXCSR); -+ musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_DMAENAB -+ | MUSB_TXCSR_FLUSHFIFO); -+ musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_FLUSHFIFO); -+ if (csr & MUSB_TXCSR_TXPKTRDY) { -+ /* If TxPktRdy was set, an extra IRQ was just -+ * generated. This IRQ will confuse things if -+ * a we don't handle it before a new TX request -+ * is started. So we clear it here, in a bit -+ * unsafe fashion (if nuke() is called outside -+ * musb_interrupt(), we might have a delay in -+ * handling other TX EPs.) */ -+ musb->int_tx |= musb_readw(musb->mregs, -+ MUSB_INTRTX); -+ musb->int_tx &= ~(1 << ep->current_epnum); -+ } - } else { -- musb_writew(epio, MUSB_RXCSR, -- 0 | MUSB_RXCSR_FLUSHFIFO); -- musb_writew(epio, MUSB_RXCSR, -- 0 | MUSB_RXCSR_FLUSHFIFO); -+ musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_DMAENAB -+ | MUSB_RXCSR_FLUSHFIFO); -+ musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_FLUSHFIFO); - } -- -- value = c->channel_abort(ep->dma); -- DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); -- c->channel_release(ep->dma); -- ep->dma = NULL; - } -+ if (ep->is_in) -+ musb_writew(epio, MUSB_TXCSR, 0); -+ else -+ musb_writew(epio, MUSB_RXCSR, 0); -+ -+ ep->rx_pending = false; - - while (!list_empty(&(ep->req_list))) { - req = container_of(ep->req_list.next, struct musb_request, -@@ -207,81 +388,43 @@ static inline int max_ep_writesize(struc - return ep->packet_sz; - } - -- --#ifdef CONFIG_USB_INVENTRA_DMA -- --/* Peripheral tx (IN) using Mentor DMA works as follows: -- Only mode 0 is used for transfers <= wPktSize, -- mode 1 is used for larger transfers, -- -- One of the following happens: -- - Host sends IN token which causes an endpoint interrupt -- -> TxAvail -- -> if DMA is currently busy, exit. -- -> if queue is non-empty, txstate(). -- -- - Request is queued by the gadget driver. -- -> if queue was previously empty, txstate() -- -- txstate() -- -> start -- /\ -> setup DMA -- | (data is transferred to the FIFO, then sent out when -- | IN token(s) are recd from Host. -- | -> DMA interrupt on completion -- | calls TxAvail. -- | -> stop DMA, ~DmaEenab, -- | -> set TxPktRdy for last short pkt or zlp -- | -> Complete Request -- | -> Continue next request (call txstate) -- |___________________________________| -- -- * Non-Mentor DMA engines can of course work differently, such as by -- * upleveling from irq-per-packet to irq-per-buffer. -- */ -- --#endif -- --/* -- * An endpoint is transmitting data. This can be called either from -- * the IRQ routine or from ep.queue() to kickstart a request on an -- * endpoint. -+/** -+ * do_pio_tx - kicks TX pio transfer -+ * @musb: musb controller pointer -+ * @req: the request to be transfered via pio -+ * -+ * An endpoint is transmitting data. This can be called from -+ * the IRQ routine. - * - * Context: controller locked, IRQs blocked, endpoint selected - */ --static void txstate(struct musb *musb, struct musb_request *req) -+static void do_pio_tx(struct musb *musb, struct musb_request *req) - { - u8 epnum = req->epnum; - struct musb_ep *musb_ep; - void __iomem *epio = musb->endpoints[epnum].regs; - struct usb_request *request; - u16 fifo_count = 0, csr; -- int use_dma = 0; - - musb_ep = req->ep; - -- /* we shouldn't get here while DMA is active ... but we do ... */ -- if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { -- DBG(4, "dma pending...\n"); -- return; -- } -- - /* read TXCSR before */ - csr = musb_readw(epio, MUSB_TXCSR); - - request = &req->request; -+ - fifo_count = min(max_ep_writesize(musb, musb_ep), - (int)(request->length - request->actual)); - - if (csr & MUSB_TXCSR_TXPKTRDY) { - DBG(5, "%s old packet still ready , txcsr %03x\n", -- musb_ep->end_point.name, csr); -+ musb_ep->name, csr); - return; - } - - if (csr & MUSB_TXCSR_P_SENDSTALL) { - DBG(5, "%s stalling, txcsr %03x\n", -- musb_ep->end_point.name, csr); -+ musb_ep->name, csr); - return; - } - -@@ -289,108 +432,17 @@ static void txstate(struct musb *musb, s - epnum, musb_ep->packet_sz, fifo_count, - csr); - --#ifndef CONFIG_MUSB_PIO_ONLY -- if (is_dma_capable() && musb_ep->dma) { -- struct dma_controller *c = musb->dma_controller; -- -- use_dma = (request->dma != DMA_ADDR_INVALID); -- -- /* MUSB_TXCSR_P_ISO is still set correctly */ -- -- if (musb_inventra_dma()) { -- size_t request_size; -- -- /* setup DMA, then program endpoint CSR */ -- request_size = min(request->length, -- musb_ep->dma->max_len); -- if (request_size <= musb_ep->packet_sz) -- musb_ep->dma->desired_mode = 0; -- else -- musb_ep->dma->desired_mode = 1; -- -- use_dma = use_dma && c->channel_program( -- musb_ep->dma, musb_ep->packet_sz, -- musb_ep->dma->desired_mode, -- request->dma, request_size); -- if (use_dma) { -- if (musb_ep->dma->desired_mode == 0) { -- /* ASSERT: DMAENAB is clear */ -- csr &= ~(MUSB_TXCSR_AUTOSET | -- MUSB_TXCSR_DMAMODE); -- csr |= (MUSB_TXCSR_DMAENAB | -- MUSB_TXCSR_MODE); -- /* against programming guide */ -- } else -- csr |= (MUSB_TXCSR_AUTOSET -- | MUSB_TXCSR_DMAENAB -- | MUSB_TXCSR_DMAMODE -- | MUSB_TXCSR_MODE); -- -- csr &= ~MUSB_TXCSR_P_UNDERRUN; -- musb_writew(epio, MUSB_TXCSR, csr); -- } -- } -- -- if (cppi_ti_dma()) { -- /* program endpoint CSR first, then setup DMA */ -- csr &= ~(MUSB_TXCSR_AUTOSET -- | MUSB_TXCSR_DMAMODE -- | MUSB_TXCSR_P_UNDERRUN -- | MUSB_TXCSR_TXPKTRDY); -- csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; -- musb_writew(epio, MUSB_TXCSR, -- (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) -- | csr); -- -- /* ensure writebuffer is empty */ -- csr = musb_readw(epio, MUSB_TXCSR); -- -- /* NOTE host side sets DMAENAB later than this; both are -- * OK since the transfer dma glue (between CPPI and Mentor -- * fifos) just tells CPPI it could start. Data only moves -- * to the USB TX fifo when both fifos are ready. -- */ -- -- /* "mode" is irrelevant here; handle terminating ZLPs like -- * PIO does, since the hardware RNDIS mode seems unreliable -- * except for the last-packet-is-already-short case. -- */ -- use_dma = use_dma && c->channel_program( -- musb_ep->dma, musb_ep->packet_sz, -- 0, -- request->dma, -- request->length); -- if (!use_dma) { -- c->channel_release(musb_ep->dma); -- musb_ep->dma = NULL; -- /* ASSERT: DMAENAB clear */ -- csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); -- /* invariant: prequest->buf is non-null */ -- } -- } -- -- if (tusb_dma_omap()) { -- use_dma = use_dma && c->channel_program( -- musb_ep->dma, musb_ep->packet_sz, -- request->zero, -- request->dma, -- request->length); -- } -- } --#endif -- -- if (!use_dma) { -- musb_write_fifo(musb_ep->hw_ep, fifo_count, -- (u8 *) (request->buf + request->actual)); -- request->actual += fifo_count; -- csr |= MUSB_TXCSR_TXPKTRDY; -- csr &= ~MUSB_TXCSR_P_UNDERRUN; -- musb_writew(epio, MUSB_TXCSR, csr); -- } -+ musb_write_fifo(musb_ep->hw_ep, fifo_count, -+ (u8 *) (request->buf + request->actual)); -+ request->actual += fifo_count; -+ csr |= MUSB_TXCSR_TXPKTRDY; -+ /* REVISIT wasn't this cleared by musb_g_tx() ? */ -+ csr &= ~MUSB_TXCSR_P_UNDERRUN; -+ musb_writew(epio, MUSB_TXCSR, csr); - - /* host may already have the data when this message shows... */ -- DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", -- musb_ep->end_point.name, use_dma ? "dma" : "pio", -+ DBG(3, "%s TX/IN pio len %d/%d, txcsr %04x, fifo %d/%d\n", -+ musb_ep->name, - request->actual, request->length, - musb_readw(epio, MUSB_TXCSR), - fifo_count, -@@ -398,333 +450,241 @@ static void txstate(struct musb *musb, s - } - - /* -+ * Context: controller locked, IRQs blocked. -+ */ -+static void musb_ep_restart(struct musb *musb, struct musb_request *req) -+{ -+ DBG(3, "<== TX/IN request %p len %u on hw_ep%d%s\n", -+ &req->request, req->request.length, req->epnum, -+ req->ep->dma ? " (dma)" : "(pio)"); -+ -+ musb_ep_select(musb->mregs, req->epnum); -+ -+ if (start_dma(musb, req) < 0) -+ do_pio_tx(musb, req); -+} -+ -+/* - * FIFO state update (e.g. data ready). - * Called from IRQ, with controller locked. - */ - void musb_g_tx(struct musb *musb, u8 epnum) - { - u16 csr; -+ struct musb_request *req; - struct usb_request *request; - u8 __iomem *mbase = musb->mregs; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; - void __iomem *epio = musb->endpoints[epnum].regs; - struct dma_channel *dma; -+ int count; - - musb_ep_select(mbase, epnum); - request = next_request(musb_ep); - - csr = musb_readw(epio, MUSB_TXCSR); -- DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); -+ dma = musb_ep->dma; -+ DBG(4, "<== %s, TxCSR %04x, DMA %p\n", musb_ep->name, csr, dma); - -- dma = is_dma_capable() ? musb_ep->dma : NULL; -- do { -- /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX -- * probably rates reporting as a host error -- */ -- if (csr & MUSB_TXCSR_P_SENTSTALL) { -- csr |= MUSB_TXCSR_P_WZC_BITS; -- csr &= ~MUSB_TXCSR_P_SENTSTALL; -- musb_writew(epio, MUSB_TXCSR, csr); -- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { -- dma->status = MUSB_DMA_STATUS_CORE_ABORT; -- musb->dma_controller->channel_abort(dma); -- } -- -- if (request) -- musb_g_giveback(musb_ep, request, -EPIPE); -+ if (csr & MUSB_TXCSR_P_SENDSTALL) { -+ DBG(5, "%s stalling, txcsr %04x\n", -+ musb_ep->name, csr); -+ return; -+ } - -- break; -+ /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX -+ * probably rates reporting as a host error -+ */ -+ if (csr & MUSB_TXCSR_P_SENTSTALL) { -+ DBG(5, "ep%d is halted, cannot transfer\n", epnum); -+ csr |= MUSB_TXCSR_P_WZC_BITS; -+ csr &= ~MUSB_TXCSR_P_SENTSTALL; -+ musb_writew(epio, MUSB_TXCSR, csr); -+ if (dma != NULL) { -+ BUG_ON(request == NULL); -+ dma->status = MUSB_DMA_STATUS_CORE_ABORT; -+ musb->dma_controller->channel_abort(dma); -+ stop_dma(musb, musb_ep, to_musb_request(request)); -+ dma = NULL; - } - -- if (csr & MUSB_TXCSR_P_UNDERRUN) { -- /* we NAKed, no big deal ... little reason to care */ -- csr |= MUSB_TXCSR_P_WZC_BITS; -- csr &= ~(MUSB_TXCSR_P_UNDERRUN -- | MUSB_TXCSR_TXPKTRDY); -- musb_writew(epio, MUSB_TXCSR, csr); -- DBG(20, "underrun on ep%d, req %p\n", epnum, request); -- } -+ if (request && musb_ep->stalled) -+ musb_g_giveback(musb_ep, request, -EPIPE); - -- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { -- /* SHOULD NOT HAPPEN ... has with cppi though, after -- * changing SENDSTALL (and other cases); harmless? -- */ -- DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); -- break; -+ return; -+ } -+ -+ if (csr & MUSB_TXCSR_P_UNDERRUN) { -+ /* we NAKed, no big deal ... little reason to care */ -+ csr |= MUSB_TXCSR_P_WZC_BITS; -+ csr &= ~MUSB_TXCSR_P_UNDERRUN; -+ musb_writew(epio, MUSB_TXCSR, csr); -+ DBG(2, "underrun on ep%d, req %p\n", epnum, request); -+ } -+ -+ /* The interrupt is generated when this bit gets cleared, -+ * if we fall here while TXPKTRDY is still set, then that's -+ * a really messed up case. One such case seems to be due to -+ * the HW -- sometimes the IRQ is generated early. -+ */ -+ count = 0; -+ while (csr & MUSB_TXCSR_TXPKTRDY) { -+ count++; -+ if (count == 1000) { -+ DBG(1, "TX IRQ while TxPktRdy still set " -+ "(CSR %04x)\n", csr); -+ return; - } -+ csr = musb_readw(epio, MUSB_TXCSR); -+ } - -- if (request) { -- u8 is_dma = 0; -+ if (dma != NULL && dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { -+ /* SHOULD NOT HAPPEN ... has with cppi though, after -+ * changing SENDSTALL (and other cases); harmless? -+ */ -+ DBG(3, "%s dma still busy?\n", musb_ep->name); -+ return; -+ } - -- if (dma && (csr & MUSB_TXCSR_DMAENAB)) { -- is_dma = 1; -- csr |= MUSB_TXCSR_P_WZC_BITS; -- csr &= ~(MUSB_TXCSR_DMAENAB -- | MUSB_TXCSR_P_UNDERRUN -- | MUSB_TXCSR_TXPKTRDY); -- musb_writew(epio, MUSB_TXCSR, csr); -- /* ensure writebuffer is empty */ -- csr = musb_readw(epio, MUSB_TXCSR); -- request->actual += musb_ep->dma->actual_len; -- DBG(4, "TXCSR%d %04x, dma off, " -- "len %zu, req %p\n", -- epnum, csr, -- musb_ep->dma->actual_len, -- request); -- } -+ if (request == NULL) { -+ DBG(2, "%s, spurious TX IRQ", musb_ep->name); -+ return; -+ } - -- if (is_dma || request->actual == request->length) { -+ req = to_musb_request(request); - -- /* First, maybe a terminating short packet. -- * Some DMA engines might handle this by -- * themselves. -- */ -- if ((request->zero -- && request->length -- && (request->length -- % musb_ep->packet_sz) -- == 0) --#ifdef CONFIG_USB_INVENTRA_DMA -- || (is_dma && -- ((!dma->desired_mode) || -- (request->actual & -- (musb_ep->packet_sz - 1)))) --#endif -- ) { -- /* on dma completion, fifo may not -- * be available yet ... -- */ -- if (csr & MUSB_TXCSR_TXPKTRDY) -- break; -- -- DBG(4, "sending zero pkt\n"); -- musb_writew(epio, MUSB_TXCSR, -- MUSB_TXCSR_MODE -- | MUSB_TXCSR_TXPKTRDY); -- request->zero = 0; -- } -- -- /* ... or if not, then complete it */ -- musb_g_giveback(musb_ep, request, 0); -- -- /* kickstart next transfer if appropriate; -- * the packet that just completed might not -- * be transmitted for hours or days. -- * REVISIT for double buffering... -- * FIXME revisit for stalls too... -- */ -- musb_ep_select(mbase, epnum); -- csr = musb_readw(epio, MUSB_TXCSR); -- if (csr & MUSB_TXCSR_FIFONOTEMPTY) -- break; -- request = musb_ep->desc -- ? next_request(musb_ep) -- : NULL; -- if (!request) { -- DBG(4, "%s idle now\n", -- musb_ep->end_point.name); -- break; -- } -- } -+ if (dma) { -+ int short_packet = 0; - -- txstate(musb, to_musb_request(request)); -- } -+ BUG_ON(!(csr & MUSB_TXCSR_DMAENAB)); - -- } while (0); --} -+ request->actual += dma->actual_len; -+ DBG(4, "TxCSR%d %04x, dma finished, len %zu, req %p\n", -+ epnum, csr, dma->actual_len, request); - --/* ------------------------------------------------------------ */ -+ stop_dma(musb, musb_ep, req); - --#ifdef CONFIG_USB_INVENTRA_DMA -+ WARN(request->actual != request->length, -+ "actual %d length %d\n", request->actual, -+ request->length); - --/* Peripheral rx (OUT) using Mentor DMA works as follows: -- - Only mode 0 is used. -+ if (request->length % musb_ep->packet_sz) -+ short_packet = 1; - -- - Request is queued by the gadget class driver. -- -> if queue was previously empty, rxstate() -+ req->complete = true; -+ if (request->zero || short_packet) { -+ csr = musb_readw(epio, MUSB_TXCSR); -+ DBG(4, "sending zero pkt, DMA, TxCSR %04x\n", csr); -+ musb_writew(epio, MUSB_TXCSR, -+ csr | MUSB_TXCSR_TXPKTRDY); -+ return; -+ } -+ } - -- - Host sends OUT token which causes an endpoint interrupt -- /\ -> RxReady -- | -> if request queued, call rxstate -- | /\ -> setup DMA -- | | -> DMA interrupt on completion -- | | -> RxReady -- | | -> stop DMA -- | | -> ack the read -- | | -> if data recd = max expected -- | | by the request, or host -- | | sent a short packet, -- | | complete the request, -- | | and start the next one. -- | |_____________________________________| -- | else just wait for the host -- | to send the next OUT token. -- |__________________________________________________| -+ if (request->actual == request->length) { -+ if (!req->complete) { -+ /* Maybe we have to send a zero length packet */ -+ if (request->zero && request->length && -+ (request->length % musb_ep->packet_sz) == 0) { -+ csr = musb_readw(epio, MUSB_TXCSR); -+ DBG(4, "sending zero pkt, TxCSR %04x\n", csr); -+ musb_writew(epio, MUSB_TXCSR, -+ csr | MUSB_TXCSR_TXPKTRDY); -+ req->complete = true; -+ return; -+ } -+ } -+ musb_ep->busy = 1; -+ musb_g_giveback(musb_ep, request, 0); -+ musb_ep->busy = 0; - -- * Non-Mentor DMA engines can of course work differently. -- */ -+ request = musb_ep->desc ? next_request(musb_ep) : NULL; -+ if (!request) { -+ DBG(4, "%s idle now\n", musb_ep->name); -+ return; -+ } -+ musb_ep_restart(musb, to_musb_request(request)); -+ return; -+ } - --#endif -+ do_pio_tx(musb, to_musb_request(request)); -+} - --/* -+/* ------------------------------------------------------------ */ -+ -+/** -+ * do_pio_rx - kicks RX pio transfer -+ * @musb: musb controller pointer -+ * @req: the request to be transfered via pio -+ * - * Context: controller locked, IRQs blocked, endpoint selected - */ --static void rxstate(struct musb *musb, struct musb_request *req) -+static void do_pio_rx(struct musb *musb, struct musb_request *req) - { - u16 csr = 0; - const u8 epnum = req->epnum; - struct usb_request *request = &req->request; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; -- struct dma_controller *c = musb->dma_controller; -- struct dma_channel *channel = musb_ep->dma; - void __iomem *epio = musb->endpoints[epnum].regs; -- u16 fifo_count = 0; -- u16 len = musb_ep->packet_sz; -- int use_dma = 0; -+ unsigned fifo_count = 0; -+ u16 count = musb_ep->packet_sz; -+ int retries = 1000; - - csr = musb_readw(epio, MUSB_RXCSR); - -- if (cppi_ti_dma() && musb_ep->dma) { -- /* NOTE: CPPI won't actually stop advancing the DMA -- * queue after short packet transfers, so this is almost -- * always going to run as IRQ-per-packet DMA so that -- * faults will be handled correctly. -- */ -- use_dma = c->channel_program(channel, -- musb_ep->packet_sz, -- !request->short_not_ok, -- request->dma + request->actual, -- request->length - request->actual); -- -- if (use_dma) { -- /* make sure that if an rxpkt arrived after the irq, -- * the cppi engine will be ready to take it as soon -- * as DMA is enabled -- */ -- csr &= ~(MUSB_RXCSR_AUTOCLEAR -- | MUSB_RXCSR_DMAMODE); -- csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; -- musb_writew(epio, MUSB_RXCSR, csr); -+ /* RxPktRdy should be the only possibility here. -+ * Sometimes the IRQ is generated before -+ * RxPktRdy gets set, so we'll wait a while. */ -+ while (!(csr & MUSB_RXCSR_RXPKTRDY)) { -+ if (retries-- == 0) { -+ DBG(1, "RxPktRdy did not get set (CSR %04x)\n", csr); -+ BUG_ON(!(csr & MUSB_RXCSR_RXPKTRDY)); - } -+ csr = musb_readw(epio, MUSB_RXCSR); - } - -- if (csr & MUSB_RXCSR_RXPKTRDY) { -- len = musb_readw(epio, MUSB_RXCOUNT); -- if (request->actual < request->length) { -- -- /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in -- * mode 0 only. So we do not get endpoint interrupts due to DMA -- * completion. We only get interrupts from DMA controller. -- * -- * We could operate in DMA mode 1 if we knew the size of the tranfer -- * in advance. For mass storage class, request->length = what the host -- * sends, so that'd work. But for pretty much everything else, -- * request->length is routinely more than what the host sends. For -- * most these gadgets, end of is signified either by a short packet, -- * or filling the last byte of the buffer. (Sending extra data in -- * that last pckate should trigger an overflow fault.) But in mode 1, -- * we don't get DMA completion interrrupt for short packets. -- * -- * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), -- * to get endpoint interrupt on every DMA req, but that didn't seem -- * to work reliably. -- * -- * REVISIT an updated g_file_storage can set req->short_not_ok, which -- * then becomes usable as a runtime "use mode 1" hint... -- */ -- if (musb_inventra_dma() && musb_ep->dma) { -- struct dma_controller *c; -- struct dma_channel *channel; -- int use_dma = 0; -- -- c = musb->dma_controller; -- channel = musb_ep->dma; -- -- csr |= MUSB_RXCSR_DMAENAB; --#ifdef USE_MODE1 -- csr |= MUSB_RXCSR_AUTOCLEAR; -- /* csr |= MUSB_RXCSR_DMAMODE; */ -- -- /* this special sequence (enabling and then -- * disabling MUSB_RXCSR_DMAMODE) is required -- * to get DMAReq to activate -- */ -- musb_writew(epio, MUSB_RXCSR, -- csr | MUSB_RXCSR_DMAMODE); --#endif -- musb_writew(epio, MUSB_RXCSR, csr); -+ musb_ep->busy = 1; - -- if (request->actual < request->length) { -- int transfer_size = 0; --#ifdef USE_MODE1 -- transfer_size = min(request->length, -- channel->max_len); --#else -- transfer_size = len; --#endif -- if (transfer_size <= musb_ep->packet_sz) -- musb_ep->dma->desired_mode = 0; -- else -- musb_ep->dma->desired_mode = 1; -- -- use_dma = c->channel_program( -- channel, -- musb_ep->packet_sz, -- channel->desired_mode, -- request->dma -- + request->actual, -- transfer_size); -- if (use_dma) -- return; -- } -- } -+ count = musb_readw(epio, MUSB_RXCOUNT); -+ if (request->actual < request->length) { -+ fifo_count = request->length - request->actual; -+ DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", -+ musb_ep->name, -+ count, fifo_count, -+ musb_ep->packet_sz); - -- if (tusb_dma_omap() && musb_ep->dma) { -- u32 dma_addr = request->dma + request->actual; -+ fifo_count = min_t(unsigned, count, fifo_count); - -- use_dma = c->channel_program(channel, -- musb_ep->packet_sz, -- channel->desired_mode, -- dma_addr, -- fifo_count); -- if (use_dma) -- return; -- } -- -- fifo_count = request->length - request->actual; -- DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", -- musb_ep->end_point.name, -- len, fifo_count, -- musb_ep->packet_sz); -- -- fifo_count = min(len, fifo_count); -- -- musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) -- (request->buf + request->actual)); -- request->actual += fifo_count; -+ musb_read_fifo(musb_ep->hw_ep, fifo_count, -+ (u8 *) (request->buf + request->actual)); -+ request->actual += fifo_count; - -- /* REVISIT if we left anything in the fifo, flush -- * it and report -EOVERFLOW -- */ -+ /* REVISIT if we left anything in the fifo, flush -+ * it and report -EOVERFLOW -+ */ - -- /* ack the read! */ -- csr |= MUSB_RXCSR_P_WZC_BITS; -- csr &= ~MUSB_RXCSR_RXPKTRDY; -- musb_writew(epio, MUSB_RXCSR, csr); -- } -+ /* ack the read! */ -+ csr |= MUSB_RXCSR_P_WZC_BITS; -+ csr &= ~MUSB_RXCSR_RXPKTRDY; -+ musb_writew(epio, MUSB_RXCSR, csr); - } - -- /* reach the end or short packet detected */ -- if (request->actual == request->length || len < musb_ep->packet_sz) -+ musb_ep->busy = 0; -+ -+ /* we just received a short packet, it's ok to -+ * giveback() the request already -+ */ -+ if (request->actual == request->length || count < musb_ep->packet_sz) - musb_g_giveback(musb_ep, request, 0); - } - - /* - * Data ready for a request; called from IRQ - */ --void musb_g_rx(struct musb *musb, u8 epnum) -+void musb_g_rx(struct musb *musb, u8 epnum, bool is_dma) - { - u16 csr; -+ struct musb_request *req; - struct usb_request *request; - void __iomem *mbase = musb->mregs; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; -@@ -733,28 +693,42 @@ void musb_g_rx(struct musb *musb, u8 epn - - musb_ep_select(mbase, epnum); - -+ csr = musb_readw(epio, MUSB_RXCSR); -+restart: -+ if (csr == 0) { -+ DBG(3, "spurious IRQ\n"); -+ return; -+ } -+ - request = next_request(musb_ep); -+ if (!request) { -+ DBG(1, "waiting for request for %s (csr %04x)\n", -+ musb_ep->name, csr); -+ musb_ep->rx_pending = true; -+ return; -+ } - -- csr = musb_readw(epio, MUSB_RXCSR); -- dma = is_dma_capable() ? musb_ep->dma : NULL; -+ dma = musb_ep->dma; - -- DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, -- csr, dma ? " (dma)" : "", request); -+ DBG(4, "<== %s, rxcsr %04x %p (dma %s, %s)\n", musb_ep->name, -+ csr, request, dma ? "enabled" : "disabled", -+ is_dma ? "true" : "false"); - - if (csr & MUSB_RXCSR_P_SENTSTALL) { -- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { -- dma->status = MUSB_DMA_STATUS_CORE_ABORT; -- (void) musb->dma_controller->channel_abort(dma); -- request->actual += musb_ep->dma->actual_len; -- } -- -+ DBG(5, "ep%d is halted, cannot transfer\n", epnum); - csr |= MUSB_RXCSR_P_WZC_BITS; - csr &= ~MUSB_RXCSR_P_SENTSTALL; - musb_writew(epio, MUSB_RXCSR, csr); - -- if (request) -+ if (dma != NULL && -+ dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { -+ dma->status = MUSB_DMA_STATUS_CORE_ABORT; -+ musb->dma_controller->channel_abort(dma); -+ } -+ -+ if (musb_ep->stalled) - musb_g_giveback(musb_ep, request, -EPIPE); -- goto done; -+ return; - } - - if (csr & MUSB_RXCSR_P_OVERRUN) { -@@ -763,76 +737,64 @@ void musb_g_rx(struct musb *musb, u8 epn - musb_writew(epio, MUSB_RXCSR, csr); - - DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); -- if (request && request->status == -EINPROGRESS) -+ if (request->status == -EINPROGRESS) - request->status = -EOVERFLOW; - } -+ - if (csr & MUSB_RXCSR_INCOMPRX) { - /* REVISIT not necessarily an error */ -- DBG(4, "%s, incomprx\n", musb_ep->end_point.name); -+ DBG(4, "%s, incomprx\n", musb_ep->name); - } - -- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { -- /* "should not happen"; likely RXPKTRDY pending for DMA */ -- DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, -- "%s busy, csr %04x\n", -- musb_ep->end_point.name, csr); -- goto done; -- } -+ req = to_musb_request(request); - -- if (dma && (csr & MUSB_RXCSR_DMAENAB)) { -- csr &= ~(MUSB_RXCSR_AUTOCLEAR -- | MUSB_RXCSR_DMAENAB -- | MUSB_RXCSR_DMAMODE); -- musb_writew(epio, MUSB_RXCSR, -- MUSB_RXCSR_P_WZC_BITS | csr); -+ BUG_ON(dma == NULL && (csr & MUSB_RXCSR_DMAENAB)); - -- request->actual += musb_ep->dma->actual_len; -+ if (dma != NULL) { -+ u32 len; - -- DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", -- epnum, csr, -- musb_readw(epio, MUSB_RXCSR), -- musb_ep->dma->actual_len, request); -- -- /* Autoclear doesn't clear RxPktRdy for short packets */ -- if ((tusb_dma_omap() || musb_inventra_dma()) -- && ((dma->desired_mode == 0) -- || (dma->actual_len -- & (musb_ep->packet_sz - 1)))) { -- /* ack the read! */ -- csr &= ~MUSB_RXCSR_RXPKTRDY; -- musb_writew(epio, MUSB_RXCSR, csr); -- } -- -- /* incomplete, and not short? wait for next IN packet */ -- if ((request->actual < request->length) -- && (musb_ep->dma->actual_len -- == musb_ep->packet_sz)) -- goto done; -+ /* We don't handle stalls yet. */ -+ BUG_ON(csr & MUSB_RXCSR_P_SENDSTALL); - -- musb_g_giveback(musb_ep, request, 0); -+ /* We abort() so dma->actual_len gets updated */ -+ musb->dma_controller->channel_abort(dma); - -- request = next_request(musb_ep); -- if (!request) -- goto done; -+ /* We only expect full packets. */ -+ BUG_ON(dma->actual_len & (musb_ep->packet_sz - 1)); - -- /* don't start more i/o till the stall clears */ -- musb_ep_select(mbase, epnum); -- csr = musb_readw(epio, MUSB_RXCSR); -- if (csr & MUSB_RXCSR_P_SENDSTALL) -- goto done; -- } -+ request->actual += dma->actual_len; -+ len = dma->actual_len; - -+ stop_dma(musb, musb_ep, req); -+ dma = NULL; - -- /* analyze request if the ep is hot */ -- if (request) -- rxstate(musb, to_musb_request(request)); -- else -- DBG(3, "packet waiting for %s%s request\n", -- musb_ep->desc ? "" : "inactive ", -- musb_ep->end_point.name); -+ DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", -+ epnum, csr, musb_readw(epio, MUSB_RXCSR), len, request); - --done: -- return; -+ if (!is_dma) { -+ /* Unload with pio */ -+ do_pio_rx(musb, req); -+ } else { -+ BUG_ON(request->actual != request->length); -+ musb_g_giveback(musb_ep, request, 0); -+ } -+ return; -+ } -+ -+ if (dma == NULL && musb->use_dma) { -+ if (start_dma(musb, req) == 0) -+ dma = musb_ep->dma; -+ } -+ -+ if (dma == NULL) { -+ do_pio_rx(musb, req); -+ csr = musb_readw(epio, MUSB_RXCSR); -+ if (csr & MUSB_RXCSR_RXPKTRDY) { -+ DBG(2, "new packet in FIFO, restarting RX " -+ "(CSR %04x)\n", csr); -+ goto restart; -+ } -+ } - } - - /* ------------------------------------------------------------ */ -@@ -847,13 +809,15 @@ static int musb_gadget_enable(struct usb - struct musb *musb; - void __iomem *mbase; - u8 epnum; -- u16 csr; -+ u16 csr = 0; - unsigned tmp; - int status = -EINVAL; - - if (!ep || !desc) - return -EINVAL; - -+ DBG(1, "===> enabling %s\n", ep->name); -+ - musb_ep = to_musb_ep(ep); - hw_ep = musb_ep->hw_ep; - regs = hw_ep->regs; -@@ -867,10 +831,10 @@ static int musb_gadget_enable(struct usb - status = -EBUSY; - goto fail; - } -- musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; -+ musb_ep->type = usb_endpoint_type(desc); - - /* check direction and (later) maxpacket size against endpoint */ -- if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum) -+ if (usb_endpoint_num(desc) != epnum) - goto fail; - - /* REVISIT this rules out high bandwidth periodic transfers */ -@@ -883,7 +847,7 @@ static int musb_gadget_enable(struct usb - * packet size (or fail), set the mode, clear the fifo - */ - musb_ep_select(mbase, epnum); -- if (desc->bEndpointAddress & USB_DIR_IN) { -+ if (usb_endpoint_dir_in(desc)) { - u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); - - if (hw_ep->is_shared_fifo) -@@ -901,18 +865,15 @@ static int musb_gadget_enable(struct usb - */ - musb_writew(regs, MUSB_TXMAXP, tmp); - -+ /* clear DATAx toggle */ - csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; -+ - if (musb_readw(regs, MUSB_TXCSR) - & MUSB_TXCSR_FIFONOTEMPTY) - csr |= MUSB_TXCSR_FLUSHFIFO; -- if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) -+ if (usb_endpoint_xfer_isoc(desc)) - csr |= MUSB_TXCSR_P_ISO; -- -- /* set twice in case of double buffering */ -- musb_writew(regs, MUSB_TXCSR, csr); -- /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ - musb_writew(regs, MUSB_TXCSR, csr); -- - } else { - u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); - -@@ -938,34 +899,26 @@ static int musb_gadget_enable(struct usb - musb_writew(regs, MUSB_TXCSR, csr); - } - -+ /* clear DATAx toggle */ - csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; -- if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) -+ -+ if (usb_endpoint_xfer_isoc(desc)) - csr |= MUSB_RXCSR_P_ISO; -- else if (musb_ep->type == USB_ENDPOINT_XFER_INT) -+ else if (usb_endpoint_xfer_int(desc)) - csr |= MUSB_RXCSR_DISNYET; -- -- /* set twice in case of double buffering */ -- musb_writew(regs, MUSB_RXCSR, csr); - musb_writew(regs, MUSB_RXCSR, csr); - } - - /* NOTE: all the I/O code _should_ work fine without DMA, in case - * for some reason you run out of channels here. - */ -- if (is_dma_capable() && musb->dma_controller) { -- struct dma_controller *c = musb->dma_controller; -- -- musb_ep->dma = c->channel_alloc(c, hw_ep, -- (desc->bEndpointAddress & USB_DIR_IN)); -- } else -- musb_ep->dma = NULL; -- -+ musb_ep->dma = NULL; - musb_ep->desc = desc; - musb_ep->busy = 0; - status = 0; - - pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", -- musb_driver_name, musb_ep->end_point.name, -+ musb_driver_name, musb_ep->name, - ({ char *s; switch (musb_ep->type) { - case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; - case USB_ENDPOINT_XFER_INT: s = "int"; break; -@@ -978,6 +931,7 @@ static int musb_gadget_enable(struct usb - schedule_work(&musb->irq_work); - - fail: -+ musb_ep_select(mbase, 0); - spin_unlock_irqrestore(&musb->lock, flags); - return status; - } -@@ -995,6 +949,7 @@ static int musb_gadget_disable(struct us - int status = 0; - - musb_ep = to_musb_ep(ep); -+ DBG(4, "disabling %s\n", musb_ep->name); - musb = musb_ep->musb; - epnum = musb_ep->current_epnum; - epio = musb->endpoints[epnum].regs; -@@ -1008,11 +963,13 @@ static int musb_gadget_disable(struct us - int_txe &= ~(1 << epnum); - musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); - musb_writew(epio, MUSB_TXMAXP, 0); -+ musb_writew(epio, MUSB_TXCSR, 0); - } else { - u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); - int_rxe &= ~(1 << epnum); - musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); - musb_writew(epio, MUSB_RXMAXP, 0); -+ musb_writew(epio, MUSB_RXCSR, 0); - } - - musb_ep->desc = NULL; -@@ -1024,7 +981,7 @@ static int musb_gadget_disable(struct us - - spin_unlock_irqrestore(&(musb->lock), flags); - -- DBG(2, "%s\n", musb_ep->end_point.name); -+ DBG(2, "%s\n", musb_ep->name); - - return status; - } -@@ -1036,16 +993,20 @@ static int musb_gadget_disable(struct us - struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) - { - struct musb_ep *musb_ep = to_musb_ep(ep); -+ struct musb *musb = musb_ep->musb; - struct musb_request *request = NULL; - - request = kzalloc(sizeof *request, gfp_flags); -- if (request) { -- INIT_LIST_HEAD(&request->request.list); -- request->request.dma = DMA_ADDR_INVALID; -- request->epnum = musb_ep->current_epnum; -- request->ep = musb_ep; -+ if (!request) { -+ dev_err(musb->controller, "not enough memory\n"); -+ return NULL; - } - -+ INIT_LIST_HEAD(&request->request.list); -+ request->request.dma = DMA_ADDR_INVALID; -+ request->epnum = musb_ep->current_epnum; -+ request->ep = musb_ep; -+ - return &request->request; - } - -@@ -1067,22 +1028,6 @@ struct free_record { - dma_addr_t dma; - }; - --/* -- * Context: controller locked, IRQs blocked. -- */ --static void musb_ep_restart(struct musb *musb, struct musb_request *req) --{ -- DBG(3, "<== %s request %p len %u on hw_ep%d\n", -- req->tx ? "TX/IN" : "RX/OUT", -- &req->request, req->request.length, req->epnum); -- -- musb_ep_select(musb->mregs, req->epnum); -- if (req->tx) -- txstate(musb, req); -- else -- rxstate(musb, req); --} -- - static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, - gfp_t gfp_flags) - { -@@ -1106,37 +1051,14 @@ static int musb_gadget_queue(struct usb_ - if (request->ep != musb_ep) - return -EINVAL; - -- DBG(4, "<== to %s request=%p\n", ep->name, req); -+ DBG(4, "<== to %s request %p length %d\n", ep->name, req, req->length); - - /* request is mine now... */ - request->request.actual = 0; - request->request.status = -EINPROGRESS; - request->epnum = musb_ep->current_epnum; - request->tx = musb_ep->is_in; -- -- if (is_dma_capable() && musb_ep->dma) { -- if (request->request.dma == DMA_ADDR_INVALID) { -- request->request.dma = dma_map_single( -- musb->controller, -- request->request.buf, -- request->request.length, -- request->tx -- ? DMA_TO_DEVICE -- : DMA_FROM_DEVICE); -- request->mapped = 1; -- } else { -- dma_sync_single_for_device(musb->controller, -- request->request.dma, -- request->request.length, -- request->tx -- ? DMA_TO_DEVICE -- : DMA_FROM_DEVICE); -- request->mapped = 0; -- } -- } else if (!req->buf) { -- return -ENODATA; -- } else -- request->mapped = 0; -+ request->mapped = 0; - - spin_lock_irqsave(&musb->lock, lockflags); - -@@ -1151,9 +1073,23 @@ static int musb_gadget_queue(struct usb_ - /* add request to the list */ - list_add_tail(&(request->request.list), &(musb_ep->req_list)); - -- /* it this is the head of the queue, start i/o ... */ -- if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) -+ /* we can only start i/o if this is the head of the queue and -+ * endpoint is not stalled (halted) or busy -+ */ -+ if (!musb_ep->stalled && !musb_ep->busy && -+ &request->request.list == musb_ep->req_list.next && -+ request->tx) { -+ DBG(1, "restarting\n"); - musb_ep_restart(musb, request); -+ } -+ -+ /* if we received an RX packet before the request was queued, -+ * process it here. */ -+ if (!request->tx && musb_ep->rx_pending) { -+ DBG(1, "processing pending RX\n"); -+ musb_ep->rx_pending = false; -+ musb_g_rx(musb, musb_ep->current_epnum, false); -+ } - - cleanup: - spin_unlock_irqrestore(&musb->lock, lockflags); -@@ -1168,6 +1104,7 @@ static int musb_gadget_dequeue(struct us - int status = 0; - struct musb *musb = musb_ep->musb; - -+ DBG(4, "%s, dequeueing request %p\n", ep->name, request); - if (!ep || !request || to_musb_request(request)->ep != musb_ep) - return -EINVAL; - -@@ -1184,11 +1121,10 @@ static int musb_gadget_dequeue(struct us - } - - /* if the hardware doesn't have the request, easy ... */ -- if (musb_ep->req_list.next != &request->list || musb_ep->busy) -+ if (musb_ep->req_list.next != &request->list) { - musb_g_giveback(musb_ep, request, -ECONNRESET); -- - /* ... else abort the dma transfer ... */ -- else if (is_dma_capable() && musb_ep->dma) { -+ } else if (musb_ep->dma) { - struct dma_controller *c = musb->dma_controller; - - musb_ep_select(musb->mregs, musb_ep->current_epnum); -@@ -1196,6 +1132,7 @@ static int musb_gadget_dequeue(struct us - status = c->channel_abort(musb_ep->dma); - else - status = -EBUSY; -+ stop_dma(musb, musb_ep, to_musb_request(request)); - if (status == 0) - musb_g_giveback(musb_ep, request, -ECONNRESET); - } else { -@@ -1281,10 +1218,12 @@ int musb_gadget_set_halt(struct usb_ep * - musb_writew(epio, MUSB_RXCSR, csr); - } - -+ musb_ep->stalled = value; -+ - done: - - /* maybe start the first request in the queue */ -- if (!musb_ep->busy && !value && request) { -+ if (!musb_ep->stalled && request) { - DBG(3, "restarting the request\n"); - musb_ep_restart(musb, request); - } -@@ -1387,7 +1326,7 @@ static int musb_gadget_wakeup(struct usb - - spin_lock_irqsave(&musb->lock, flags); - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_B_PERIPHERAL: - /* NOTE: OTG state machine doesn't include B_SUSPENDED; - * that's part of the standard usb 1.1 state machine, and -@@ -1459,10 +1398,39 @@ static void musb_pullup(struct musb *mus - u8 power; - - power = musb_readb(musb->mregs, MUSB_POWER); -- if (is_on) -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * This is necessary to prevent a RESET irq to -+ * come when we fake a usb disconnection in order -+ * to change the configuration on the gadget driver. -+ */ -+ if (is_on) { -+ u8 r; - power |= MUSB_POWER_SOFTCONN; -- else -+ -+ r = musb_readb(musb->mregs, MUSB_INTRUSBE); -+ /* disable RESET interrupt */ -+ musb_writeb(musb->mregs, MUSB_INTRUSBE, ~(r & BIT(1))); -+ -+ /* send resume */ -+ r = musb_readb(musb->mregs, MUSB_POWER); -+ r |= MUSB_POWER_RESUME; -+ musb_writeb(musb->mregs, MUSB_POWER, r); -+ -+ /* ...for 10 ms */ -+ mdelay(10); -+ r &= ~MUSB_POWER_RESUME; -+ musb_writeb(musb->mregs, MUSB_POWER, r); -+ -+ /* enable interrupts */ -+ musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7); -+ -+ /* some delay required for this to work */ -+ mdelay(10); -+ } else { - power &= ~MUSB_POWER_SOFTCONN; -+ } - - /* FIXME if on, HdrcStart; if off, HdrcStop */ - -@@ -1489,9 +1457,13 @@ static int musb_gadget_vbus_draw(struct - { - struct musb *musb = gadget_to_musb(gadget); - -- if (!musb->xceiv.set_power) -+ if (!musb->xceiv->set_power) - return -EOPNOTSUPP; -- return otg_set_power(&musb->xceiv, mA); -+ -+ musb->power_draw = mA; -+ schedule_work(&musb->irq_work); -+ -+ return otg_set_power(musb->xceiv, mA); - } - - static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) -@@ -1626,7 +1598,7 @@ int __init musb_gadget_setup(struct musb - musb->g.speed = USB_SPEED_UNKNOWN; - - /* this "gadget" abstracts/virtualizes the controller */ -- strcpy(musb->g.dev.bus_id, "gadget"); -+ dev_set_name(&musb->g.dev, "gadget"); - musb->g.dev.parent = musb->controller; - musb->g.dev.dma_mask = musb->controller->dma_mask; - musb->g.dev.release = musb_gadget_release; -@@ -1704,6 +1676,12 @@ int usb_gadget_register_driver(struct us - spin_unlock_irqrestore(&musb->lock, flags); - - if (retval == 0) { -+ /* Clocks need to be turned on with OFF mode */ -+ if (musb->set_clock) -+ musb->set_clock(musb->clock, 1); -+ else -+ clk_enable(musb->clock); -+ - retval = driver->bind(&musb->g); - if (retval != 0) { - DBG(3, "bind to driver %s failed --> %d\n", -@@ -1717,8 +1695,8 @@ int usb_gadget_register_driver(struct us - /* REVISIT always use otg_set_peripheral(), handling - * issues including the root hub one below ... - */ -- musb->xceiv.gadget = &musb->g; -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->gadget = &musb->g; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - musb->is_active = 1; - - /* FIXME this ignores the softconnect flag. Drivers are -@@ -1743,14 +1721,15 @@ int usb_gadget_register_driver(struct us - if (retval < 0) { - DBG(1, "add_hcd failed, %d\n", retval); - spin_lock_irqsave(&musb->lock, flags); -- musb->xceiv.gadget = NULL; -- musb->xceiv.state = OTG_STATE_UNDEFINED; -+ musb->xceiv->gadget = NULL; -+ musb->xceiv->state = OTG_STATE_UNDEFINED; - musb->gadget_driver = NULL; - musb->g.dev.driver = NULL; - spin_unlock_irqrestore(&musb->lock, flags); - } - } - } -+ musb_save_ctx(musb); - - return retval; - } -@@ -1819,6 +1798,11 @@ int usb_gadget_unregister_driver(struct - - spin_lock_irqsave(&musb->lock, flags); - -+ if (musb->set_clock) -+ musb->set_clock(musb->clock, 1); -+ else -+ clk_enable(musb->clock); -+ - #ifdef CONFIG_USB_MUSB_OTG - musb_hnp_stop(musb); - #endif -@@ -1827,7 +1811,7 @@ int usb_gadget_unregister_driver(struct - - (void) musb_gadget_vbus_draw(&musb->g, 0); - -- musb->xceiv.state = OTG_STATE_UNDEFINED; -+ musb->xceiv->state = OTG_STATE_UNDEFINED; - stop_activity(musb, driver); - - DBG(3, "unregistering driver %s\n", driver->function); -@@ -1851,6 +1835,7 @@ int usb_gadget_unregister_driver(struct - * that currently misbehaves. - */ - } -+ musb_save_ctx(musb); - - return retval; - } -@@ -1864,7 +1849,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driv - void musb_g_resume(struct musb *musb) - { - musb->is_suspended = 0; -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_B_IDLE: - break; - case OTG_STATE_B_WAIT_ACON: -@@ -1890,10 +1875,10 @@ void musb_g_suspend(struct musb *musb) - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - DBG(3, "devctl %02x\n", devctl); - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_B_IDLE: - if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) -- musb->xceiv.state = OTG_STATE_B_PERIPHERAL; -+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - break; - case OTG_STATE_B_PERIPHERAL: - musb->is_suspended = 1; -@@ -1939,22 +1924,22 @@ void musb_g_disconnect(struct musb *musb - spin_lock(&musb->lock); - } - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - default: - #ifdef CONFIG_USB_MUSB_OTG - DBG(2, "Unhandled disconnect %s, setting a_idle\n", - otg_state_string(musb)); -- musb->xceiv.state = OTG_STATE_A_IDLE; -+ musb->xceiv->state = OTG_STATE_A_IDLE; - break; - case OTG_STATE_A_PERIPHERAL: -- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; -+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; - break; - case OTG_STATE_B_WAIT_ACON: - case OTG_STATE_B_HOST: - #endif - case OTG_STATE_B_PERIPHERAL: - case OTG_STATE_B_IDLE: -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - break; - case OTG_STATE_B_SRP_INIT: - break; -@@ -2010,10 +1995,10 @@ __acquires(musb->lock) - * or else after HNP, as A-Device - */ - if (devctl & MUSB_DEVCTL_BDEVICE) { -- musb->xceiv.state = OTG_STATE_B_PERIPHERAL; -+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - musb->g.is_a_peripheral = 0; - } else if (is_otg_enabled(musb)) { -- musb->xceiv.state = OTG_STATE_A_PERIPHERAL; -+ musb->xceiv->state = OTG_STATE_A_PERIPHERAL; - musb->g.is_a_peripheral = 1; - } else - WARN_ON(1); -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_gadget_ep0.c linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_gadget_ep0.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_gadget_ep0.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_gadget_ep0.c 2011-06-22 13:19:33.083063271 +0200 -@@ -55,7 +55,7 @@ - * the gadget driver, or adjusting endpoint halt status. - */ - --static char *decode_ep0stage(u8 stage) -+static inline char *decode_ep0stage(u8 stage) - { - switch (stage) { - case MUSB_EP0_STAGE_SETUP: return "idle"; -@@ -197,7 +197,6 @@ service_in_request(struct musb *musb, co - static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) - { - musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); -- musb->ep0_state = MUSB_EP0_STAGE_SETUP; - } - - /* -@@ -405,7 +404,7 @@ stall: - csr |= MUSB_RXCSR_P_SENDSTALL - | MUSB_RXCSR_FLUSHFIFO - | MUSB_RXCSR_CLRDATATOG -- | MUSB_TXCSR_P_WZC_BITS; -+ | MUSB_RXCSR_P_WZC_BITS; - musb_writew(regs, MUSB_RXCSR, - csr); - } -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_gadget.h linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_gadget.h ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_gadget.h 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_gadget.h 2011-06-22 13:19:33.083063271 +0200 -@@ -42,6 +42,7 @@ struct musb_request { - u8 tx; /* endpoint direction */ - u8 epnum; - u8 mapped; -+ u8 complete; /* true when completed and zero-length needed */ - }; - - static inline struct musb_request *to_musb_request(struct usb_request *req) -@@ -76,7 +77,11 @@ struct musb_ep { - struct list_head req_list; - - /* true if lock must be dropped but req_list may not be advanced */ -- u8 busy; -+ u8 busy:1; -+ u8 rx_pending:1; -+ -+ /* true if endpoint is stalled */ -+ unsigned stalled:1; - }; - - static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) -@@ -94,7 +99,7 @@ static inline struct usb_request *next_r - } - - extern void musb_g_tx(struct musb *musb, u8 epnum); --extern void musb_g_rx(struct musb *musb, u8 epnum); -+extern void musb_g_rx(struct musb *musb, u8 epnum, bool); - - extern const struct usb_ep_ops musb_g_ep0_ops; - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_host.c linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_host.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_host.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_host.c 2011-06-22 13:19:33.083063271 +0200 -@@ -176,8 +176,6 @@ musb_start_urb(struct musb *musb, int is - void __iomem *mbase = musb->mregs; - struct urb *urb = next_urb(qh); - struct musb_hw_ep *hw_ep = qh->hw_ep; -- unsigned pipe = urb->pipe; -- u8 address = usb_pipedevice(pipe); - int epnum = hw_ep->epnum; - - /* initialize software qh state */ -@@ -206,7 +204,7 @@ musb_start_urb(struct musb *musb, int is - } - - DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", -- qh, urb, address, qh->epnum, -+ qh, urb, usb_pipedevice(urb->pipe), qh->epnum, - is_in ? "in" : "out", - ({char *s; switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: s = ""; break; -@@ -335,16 +333,11 @@ musb_save_toggle(struct musb_hw_ep *ep, - static struct musb_qh * - musb_giveback(struct musb_qh *qh, struct urb *urb, int status) - { -- int is_in; - struct musb_hw_ep *ep = qh->hw_ep; - struct musb *musb = ep->musb; -+ int is_in = usb_pipein(urb->pipe); - int ready = qh->is_ready; - -- if (ep->is_shared_fifo) -- is_in = 1; -- else -- is_in = usb_pipein(urb->pipe); -- - /* save toggle eagerly, for paranoia */ - switch (qh->type) { - case USB_ENDPOINT_XFER_BULK: -@@ -432,7 +425,7 @@ musb_advance_schedule(struct musb *musb, - else - qh = musb_giveback(qh, urb, urb->status); - -- if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { -+ if (qh != NULL && qh->is_ready) { - DBG(4, "... next ep%d %cX urb %p\n", - hw_ep->epnum, is_in ? 'R' : 'T', - next_urb(qh)); -@@ -940,8 +933,8 @@ static bool musb_h_ep0_continue(struct m - switch (musb->ep0_stage) { - case MUSB_EP0_IN: - fifo_dest = urb->transfer_buffer + urb->actual_length; -- fifo_count = min(len, ((u16) (urb->transfer_buffer_length -- - urb->actual_length))); -+ fifo_count = min_t(size_t, len, urb->transfer_buffer_length - -+ urb->actual_length); - if (fifo_count < len) - urb->status = -EOVERFLOW; - -@@ -974,10 +967,9 @@ static bool musb_h_ep0_continue(struct m - } - /* FALLTHROUGH */ - case MUSB_EP0_OUT: -- fifo_count = min(qh->maxpacket, ((u16) -- (urb->transfer_buffer_length -- - urb->actual_length))); -- -+ fifo_count = min_t(size_t, qh->maxpacket, -+ urb->transfer_buffer_length - -+ urb->actual_length); - if (fifo_count) { - fifo_dest = (u8 *) (urb->transfer_buffer - + urb->actual_length); -@@ -1159,7 +1151,8 @@ void musb_host_tx(struct musb *musb, u8 - struct urb *urb; - struct musb_hw_ep *hw_ep = musb->endpoints + epnum; - void __iomem *epio = hw_ep->regs; -- struct musb_qh *qh = hw_ep->out_qh; -+ struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh -+ : hw_ep->out_qh; - u32 status = 0; - void __iomem *mbase = musb->mregs; - struct dma_channel *dma; -@@ -1201,7 +1194,7 @@ void musb_host_tx(struct musb *musb, u8 - * transfer, if there's some other (nonperiodic) tx urb - * that could use this fifo. (dma complicates it...) - * -- * if (bulk && qh->ring.next != &musb->out_bulk), then -+ * if (bulk && qh->ring.next != &hw_ep->out_list), then - * we have a candidate... NAKing is *NOT* an error - */ - musb_ep_select(mbase, epnum); -@@ -1306,7 +1299,8 @@ void musb_host_tx(struct musb *musb, u8 - * packets before updating TXCSR ... other docs disagree ... - */ - /* PIO: start next packet in this URB */ -- wLength = min(qh->maxpacket, (u16) wLength); -+ if (wLength > qh->maxpacket) -+ wLength = qh->maxpacket; - musb_write_fifo(hw_ep, wLength, buf); - qh->segsize = wLength; - -@@ -1360,6 +1354,50 @@ finish: - - #endif - -+/* Schedule next qh from ep->in_list and add the current qh at tail -+ * to avoid endpoint starvation. -+ */ -+static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) -+{ -+ struct dma_channel *dma; -+ struct urb *urb; -+ void __iomem *mbase = musb->mregs; -+ void __iomem *epio = ep->regs; -+ struct musb_qh *cur_qh, *next_qh; -+ u16 rx_csr; -+ -+ musb_ep_select(mbase, ep->epnum); -+ dma = is_dma_capable() ? ep->rx_channel : NULL; -+ -+ /* clear nak timeout bit */ -+ rx_csr = musb_readw(epio, MUSB_RXCSR); -+ rx_csr &= ~MUSB_RXCSR_DATAERROR; -+ musb_writew(epio, MUSB_RXCSR, rx_csr); -+ -+ cur_qh = first_qh(&ep->in_list); -+ if (cur_qh) { -+ urb = next_urb(cur_qh); -+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { -+ dma->status = MUSB_DMA_STATUS_CORE_ABORT; -+ musb->dma_controller->channel_abort(dma); -+ urb->actual_length += dma->actual_len; -+ dma->actual_len = 0L; -+ } -+ musb_save_toggle(ep, 1, urb); -+ -+ /* delete cur_qh and add to tail to ep->in_list */ -+ list_del(&cur_qh->ring); -+ list_add_tail(&cur_qh->ring, &ep->in_list); -+ -+ /* get the next qh from ep->in_list */ -+ next_qh = first_qh(&ep->in_list); -+ -+ /* set rx_reinit and schedule the next qh */ -+ ep->rx_reinit = 1; -+ musb_start_urb(musb, 1, next_qh); -+ } -+} -+ - /* - * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, - * and high-bandwidth IN transfer cases. -@@ -1427,14 +1465,18 @@ void musb_host_rx(struct musb *musb, u8 - * transfer, if there's some other (nonperiodic) rx urb - * that could use this fifo. (dma complicates it...) - * -- * if (bulk && qh->ring.next != &musb->in_bulk), then -+ * if (bulk && qh->ring.next != &hw_ep->in_list), then - * we have a candidate... NAKing is *NOT* an error - */ - DBG(6, "RX end %d NAK timeout\n", epnum); -+ if (usb_pipebulk(urb->pipe) && qh->mux == 1 && -+ !list_is_singular(&hw_ep->in_list)) { -+ musb_bulk_nak_timeout(musb, hw_ep); -+ return; -+ } - musb_ep_select(mbase, epnum); -- musb_writew(epio, MUSB_RXCSR, -- MUSB_RXCSR_H_WZC_BITS -- | MUSB_RXCSR_H_REQPKT); -+ rx_csr &= ~MUSB_RXCSR_DATAERROR; -+ musb_writew(epio, MUSB_RXCSR, rx_csr); - - goto finish; - } else { -@@ -1442,6 +1484,10 @@ void musb_host_rx(struct musb *musb, u8 - /* packet error reported later */ - iso_err = true; - } -+ } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { -+ DBG(3, "end %d Highbandwidth incomplete ISO packet received\n", -+ epnum); -+ status = -EPROTO; - } - - /* faults abort the transfer */ -@@ -1647,7 +1693,13 @@ void musb_host_rx(struct musb *musb, u8 - val &= ~MUSB_RXCSR_H_AUTOREQ; - else - val |= MUSB_RXCSR_H_AUTOREQ; -- val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; -+ -+ if (qh->maxpacket & ~0x7ff) -+ /* Autoclear doesn't work in high bandwidth iso */ -+ val |= MUSB_RXCSR_DMAENAB; -+ else -+ val |= MUSB_RXCSR_AUTOCLEAR -+ | MUSB_RXCSR_DMAENAB; - - musb_writew(epio, MUSB_RXCSR, - MUSB_RXCSR_H_WZC_BITS | val); -@@ -1700,11 +1752,12 @@ static int musb_schedule( - int best_end, epnum; - struct musb_hw_ep *hw_ep = NULL; - struct list_head *head = NULL; -+ u16 maxpacket; - - /* use fixed hardware for control and bulk */ - if (qh->type == USB_ENDPOINT_XFER_CONTROL) { -- head = &musb->control; - hw_ep = musb->control_ep; -+ head = &hw_ep->in_list; - goto success; - } - -@@ -1729,6 +1782,13 @@ static int musb_schedule( - best_diff = 4096; - best_end = -1; - -+ if (qh->maxpacket & (1 << 11)) -+ maxpacket = 2 * (qh->maxpacket & 0x7ff); -+ else if (qh->maxpacket & (1 << 12)) -+ maxpacket = 3 * (qh->maxpacket & 0x7ff); -+ else -+ maxpacket = (qh->maxpacket & 0x7ff); -+ - for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { - int diff; - -@@ -1739,9 +1799,9 @@ static int musb_schedule( - continue; - - if (is_in) -- diff = hw_ep->max_packet_sz_rx - qh->maxpacket; -+ diff = hw_ep->max_packet_sz_rx - maxpacket; - else -- diff = hw_ep->max_packet_sz_tx - qh->maxpacket; -+ diff = hw_ep->max_packet_sz_tx - maxpacket; - - if (diff >= 0 && best_diff > diff) { - best_diff = diff; -@@ -1752,9 +1812,19 @@ static int musb_schedule( - if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { - hw_ep = musb->bulk_ep; - if (is_in) -- head = &musb->in_bulk; -+ head = &hw_ep->in_list; - else -- head = &musb->out_bulk; -+ head = &hw_ep->out_list; -+ /* Enable bulk NAK time out scheme when bulk requests are -+ * multiplxed. This scheme doesn't work in high speed to full -+ * speed scenario as NAK interrupts are not coming from a -+ * full speed device connected to a high speed device. -+ * NAK timeout interval is 8 (128 uframe or 16ms) for HS and -+ * 4 (8 frame or 8ms) for FS device. -+ */ -+ if (is_in && qh->dev) -+ qh->intv_reg = -+ (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; - goto success; - } else if (best_end < 0) { - return -ENOSPC; -@@ -1786,7 +1856,7 @@ static int musb_urb_enqueue( - unsigned long flags; - struct musb *musb = hcd_to_musb(hcd); - struct usb_host_endpoint *hep = urb->ep; -- struct musb_qh *qh = hep->hcpriv; -+ struct musb_qh *qh; - struct usb_endpoint_descriptor *epd = &hep->desc; - int ret; - unsigned type_reg; -@@ -1798,22 +1868,21 @@ static int musb_urb_enqueue( - - spin_lock_irqsave(&musb->lock, flags); - ret = usb_hcd_link_urb_to_ep(hcd, urb); -+ qh = ret ? NULL : hep->hcpriv; -+ if (qh) -+ urb->hcpriv = qh; - spin_unlock_irqrestore(&musb->lock, flags); -- if (ret) -- return ret; - - /* DMA mapping was already done, if needed, and this urb is on -- * hep->urb_list ... so there's little to do unless hep wasn't -- * yet scheduled onto a live qh. -+ * hep->urb_list now ... so we're done, unless hep wasn't yet -+ * scheduled onto a live qh. - * -- * REVISIT best to keep hep->hcpriv valid until the endpoint gets -+ * REVISIT best to keep urb->hcpriv valid until the endpoint gets - * disabled, testing for empty qh->ring and avoiding qh setup costs - * except for the first urb queued after a config change. - */ -- if (qh) { -- urb->hcpriv = qh; -- return 0; -- } -+ if (qh || ret) -+ return ret; - - /* Allocate and initialize qh, minimizing the work done each time - * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. -@@ -1836,14 +1905,8 @@ static int musb_urb_enqueue( - - qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); - -- /* no high bandwidth support yet */ -- if (qh->maxpacket & ~0x7ff) { -- ret = -EMSGSIZE; -- goto done; -- } -- -- qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; -- qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; -+ qh->epnum = usb_endpoint_num(epd); -+ qh->type = usb_endpoint_type(epd); - - /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ - qh->addr_reg = (u8) usb_pipedevice(urb->pipe); -@@ -1862,19 +1925,21 @@ static int musb_urb_enqueue( - } - qh->type_reg = type_reg; - -- /* precompute rxinterval/txinterval register */ -- interval = min((u8)16, epd->bInterval); /* log encoding */ -+ /* Precompute RXINTERVAL/TXINTERVAL register */ - switch (qh->type) { - case USB_ENDPOINT_XFER_INT: -- /* fullspeed uses linear encoding */ -- if (USB_SPEED_FULL == urb->dev->speed) { -- interval = epd->bInterval; -- if (!interval) -- interval = 1; -+ /* -+ * Full/low speeds use the linear encoding, -+ * high speed uses the logarithmic encoding. -+ */ -+ if (urb->dev->speed <= USB_SPEED_FULL) { -+ interval = max_t(u8, epd->bInterval, 1); -+ break; - } - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_ISOC: -- /* iso always uses log encoding */ -+ /* ISO always uses logarithmic encoding */ -+ interval = min_t(u8, epd->bInterval, 16); - break; - default: - /* REVISIT we actually want to use NAK limits, hinting to the -@@ -1939,7 +2004,6 @@ static int musb_urb_enqueue( - } - spin_unlock_irqrestore(&musb->lock, flags); - --done: - if (ret != 0) { - spin_lock_irqsave(&musb->lock, flags); - usb_hcd_unlink_urb_from_ep(hcd, urb); -@@ -2032,9 +2096,9 @@ static int musb_urb_dequeue(struct usb_h - goto done; - - /* Any URB not actively programmed into endpoint hardware can be -- * immediately given back. Such an URB must be at the head of its -+ * immediately given back; that's any URB not at the head of an - * endpoint queue, unless someday we get real DMA queues. And even -- * then, it might not be known to the hardware... -+ * if it's at the head, it might not be known to the hardware... - * - * Otherwise abort current transfer, pending dma, etc.; urb->status - * has already been updated. This is a synchronous abort; it'd be -@@ -2045,14 +2109,14 @@ static int musb_urb_dequeue(struct usb_h - else { - switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: -- sched = &musb->control; -+ sched = &musb->control_ep->in_list; - break; - case USB_ENDPOINT_XFER_BULK: - if (qh->mux == 1) { - if (usb_pipein(urb->pipe)) -- sched = &musb->in_bulk; -+ sched = &musb->bulk_ep->in_list; - else -- sched = &musb->out_bulk; -+ sched = &musb->bulk_ep->out_list; - break; - } - default: -@@ -2073,6 +2137,15 @@ static int musb_urb_dequeue(struct usb_h - qh->is_ready = 0; - __musb_giveback(musb, urb, 0); - qh->is_ready = ready; -+ -+ /* If nothing else (usually musb_giveback) is using it -+ * and its URB list has emptied, recycle this qh. -+ */ -+ if (ready && list_empty(&qh->hep->urb_list)) { -+ qh->hep->hcpriv = NULL; -+ list_del(&qh->ring); -+ kfree(qh); -+ } - } else - ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); - done: -@@ -2088,25 +2161,26 @@ musb_h_disable(struct usb_hcd *hcd, stru - unsigned long flags; - struct musb *musb = hcd_to_musb(hcd); - u8 is_in = epnum & USB_DIR_IN; -- struct musb_qh *qh = hep->hcpriv; -- struct urb *urb, *tmp; -+ struct musb_qh *qh; -+ struct urb *urb; - struct list_head *sched; - -- if (!qh) -- return; -- - spin_lock_irqsave(&musb->lock, flags); - -+ qh = hep->hcpriv; -+ if (qh == NULL) -+ goto exit; -+ - switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: -- sched = &musb->control; -+ sched = &musb->control_ep->in_list; - break; - case USB_ENDPOINT_XFER_BULK: - if (qh->mux == 1) { - if (is_in) -- sched = &musb->in_bulk; -+ sched = &musb->bulk_ep->in_list; - else -- sched = &musb->out_bulk; -+ sched = &musb->bulk_ep->out_list; - break; - } - default: -@@ -2130,13 +2204,28 @@ musb_h_disable(struct usb_hcd *hcd, stru - - /* cleanup */ - musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); -- } else -- urb = NULL; - -- /* then just nuke all the others */ -- list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) -- musb_giveback(qh, urb, -ESHUTDOWN); -+ /* Then nuke all the others ... and advance the -+ * queue on hw_ep (e.g. bulk ring) when we're done. -+ */ -+ while (!list_empty(&hep->urb_list)) { -+ urb = next_urb(qh); -+ urb->status = -ESHUTDOWN; -+ musb_advance_schedule(musb, urb, qh->hw_ep, is_in); -+ } -+ } else { -+ /* Just empty the queue; the hardware is busy with -+ * other transfers, and since !qh->is_ready nothing -+ * will activate any of these as it advances. -+ */ -+ while (!list_empty(&hep->urb_list)) -+ __musb_giveback(musb, next_urb(qh), -ESHUTDOWN); - -+ hep->hcpriv = NULL; -+ list_del(&qh->ring); -+ kfree(qh); -+ } -+exit: - spin_unlock_irqrestore(&musb->lock, flags); - } - -@@ -2169,7 +2258,7 @@ static int musb_bus_suspend(struct usb_h - { - struct musb *musb = hcd_to_musb(hcd); - -- if (musb->xceiv.state == OTG_STATE_A_SUSPEND) -+ if (musb->xceiv->state == OTG_STATE_A_SUSPEND) - return 0; - - if (is_host_active(musb) && musb->is_active) { -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musbhsdma.c linux-omap-2.6.28-nokia1/drivers/usb/musb/musbhsdma.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musbhsdma.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musbhsdma.c 2011-06-22 13:19:33.113063272 +0200 -@@ -79,23 +79,29 @@ static struct dma_channel *dma_channel_a - struct dma_channel *channel = NULL; - u8 bit; - -- for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) { -- if (!(controller->used_channels & (1 << bit))) { -- controller->used_channels |= (1 << bit); -- musb_channel = &(controller->channel[bit]); -- musb_channel->controller = controller; -- musb_channel->idx = bit; -- musb_channel->epnum = hw_ep->epnum; -- musb_channel->transmit = transmit; -- channel = &(musb_channel->channel); -- channel->private_data = musb_channel; -- channel->status = MUSB_DMA_STATUS_FREE; -- channel->max_len = 0x10000; -- /* Tx => mode 1; Rx => mode 0 */ -- channel->desired_mode = transmit; -- channel->actual_len = 0; -- break; -- } -+ /* musb on omap3 has a problem with using dma channels simultaneously -+ * so we will only allocate 1 dma channel at a time to avoid problems -+ * related to that bug -+ */ -+ for (bit = 0; bit < 1; bit++) { -+ if (controller->used_channels & (1 << bit)) -+ continue; -+ -+ controller->used_channels |= (1 << bit); -+ musb_channel = &(controller->channel[bit]); -+ musb_channel->controller = controller; -+ musb_channel->idx = bit; -+ musb_channel->epnum = hw_ep->epnum; -+ musb_channel->transmit = transmit; -+ channel = &(musb_channel->channel); -+ channel->private_data = musb_channel; -+ channel->status = MUSB_DMA_STATUS_FREE; -+ channel->max_len = 0x7fffffff; -+ /* always use mode1 */ -+ channel->desired_mode = true; -+ channel->actual_len = 0; -+ -+ break; - } - - return channel; -@@ -128,6 +134,9 @@ static void configure_channel(struct dma - DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", - channel, packet_sz, dma_addr, len, mode); - -+ if (mode) -+ csr |= MUSB_HSDMA_MODE1; -+ - if (packet_sz >= 64) - csr |= MUSB_HSDMA_BURSTMODE_INCR16; - else if (packet_sz >= 32) -@@ -136,7 +145,6 @@ static void configure_channel(struct dma - csr |= MUSB_HSDMA_BURSTMODE_INCR4; - - csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) -- | MUSB_HSDMA_MODE1 - | MUSB_HSDMA_ENABLE - | MUSB_HSDMA_IRQENABLE - | (musb_channel->transmit -@@ -183,8 +191,9 @@ static int dma_channel_abort(struct dma_ - struct musb_dma_channel *musb_channel = channel->private_data; - void __iomem *mbase = musb_channel->controller->base; - -- u8 bchannel = musb_channel->idx; -+ u32 addr = 0; - u16 csr; -+ u8 bchannel = musb_channel->idx; - - if (channel->status == MUSB_DMA_STATUS_BUSY) { - if (musb_channel->transmit) { -@@ -193,8 +202,11 @@ static int dma_channel_abort(struct dma_ - MUSB_EP_OFFSET(musb_channel->epnum, - MUSB_TXCSR)); - csr &= ~(MUSB_TXCSR_AUTOSET | -- MUSB_TXCSR_DMAENAB | -- MUSB_TXCSR_DMAMODE); -+ MUSB_TXCSR_DMAENAB); -+ musb_writew(mbase, -+ MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR), -+ csr); -+ csr &= ~MUSB_TXCSR_DMAMODE; - musb_writew(mbase, - MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR), - csr); -@@ -203,11 +215,16 @@ static int dma_channel_abort(struct dma_ - MUSB_EP_OFFSET(musb_channel->epnum, - MUSB_RXCSR)); - csr &= ~(MUSB_RXCSR_AUTOCLEAR | -- MUSB_RXCSR_DMAENAB | -- MUSB_RXCSR_DMAMODE); -+ MUSB_RXCSR_DMAENAB); -+ musb_writew(mbase, -+ MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR), -+ csr); -+ csr &= ~MUSB_RXCSR_DMAMODE; - musb_writew(mbase, - MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR), - csr); -+ addr = musb_read_hsdma_addr(mbase, bchannel); -+ channel->actual_len = addr - musb_channel->start_addr; - } - - musb_writew(mbase, -@@ -225,7 +242,7 @@ static irqreturn_t dma_controller_irq(in - { - struct musb_dma_controller *controller = private_data; - struct musb *musb = controller->private_data; -- struct musb_dma_channel *musb_channel; -+ struct musb_dma_channel *mchannel; - struct dma_channel *channel; - - void __iomem *mbase = controller->base; -@@ -247,59 +264,55 @@ static irqreturn_t dma_controller_irq(in - goto done; - - for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { -- if (int_hsdma & (1 << bchannel)) { -- musb_channel = (struct musb_dma_channel *) -- &(controller->channel[bchannel]); -- channel = &musb_channel->channel; -+ u8 devctl; - -- csr = musb_readw(mbase, -- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, -- MUSB_HSDMA_CONTROL)); -+ if (!(int_hsdma & (1 << bchannel))) -+ continue; -+ -+ mchannel = &(controller->channel[bchannel]); -+ channel = &mchannel->channel; -+ -+ csr = musb_readw(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, -+ MUSB_HSDMA_CONTROL)); -+ -+ if (csr & MUSB_HSDMA_BUSERROR) { -+ mchannel->channel.status = MUSB_DMA_STATUS_BUS_ABORT; -+ goto done; -+ } - -- if (csr & MUSB_HSDMA_BUSERROR) { -- musb_channel->channel.status = -- MUSB_DMA_STATUS_BUS_ABORT; -- } else { -- u8 devctl; -- -- addr = musb_read_hsdma_addr(mbase, -- bchannel); -- channel->actual_len = addr -- - musb_channel->start_addr; -- -- DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n", -- channel, musb_channel->start_addr, -- addr, channel->actual_len, -- musb_channel->len, -- (channel->actual_len -- < musb_channel->len) ? -- "=> reconfig 0" : "=> complete"); -- -- devctl = musb_readb(mbase, MUSB_DEVCTL); -- -- channel->status = MUSB_DMA_STATUS_FREE; -- -- /* completed */ -- if ((devctl & MUSB_DEVCTL_HM) -- && (musb_channel->transmit) -- && ((channel->desired_mode == 0) -- || (channel->actual_len & -- (musb_channel->max_packet_sz - 1))) -- ) { -- /* Send out the packet */ -- musb_ep_select(mbase, -- musb_channel->epnum); -- musb_writew(mbase, MUSB_EP_OFFSET( -- musb_channel->epnum, -- MUSB_TXCSR), -- MUSB_TXCSR_TXPKTRDY); -- } else { -- musb_dma_completion( -- musb, -- musb_channel->epnum, -- musb_channel->transmit); -- } -- } -+ addr = musb_read_hsdma_addr(mbase, bchannel); -+ channel->actual_len = addr - mchannel->start_addr; -+ -+ DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n", channel, -+ mchannel->start_addr, addr, -+ channel->actual_len, mchannel->len, -+ (channel->actual_len < mchannel->len) ? -+ "=> reconfig 0" : "=> complete"); -+ -+ devctl = musb_readb(mbase, MUSB_DEVCTL); -+ channel->status = MUSB_DMA_STATUS_FREE; -+ -+ /* completed */ -+ if ((devctl & MUSB_DEVCTL_HM) && (mchannel->transmit) -+ && ((channel->desired_mode == 0) -+ || (channel->actual_len & -+ (mchannel->max_packet_sz - 1)))) { -+ u8 txcsr; -+ -+ musb_ep_select(mbase, mchannel->epnum); -+ txcsr = musb_readw(mbase, MUSB_EP_OFFSET( -+ mchannel->epnum, MUSB_TXCSR)); -+ txcsr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET); -+ musb_writew(mbase, MUSB_EP_OFFSET(mchannel->epnum, -+ MUSB_TXCSR), txcsr); -+ txcsr &= ~MUSB_TXCSR_DMAMODE; -+ txcsr |= MUSB_TXCSR_TXPKTRDY; -+ /* Send out the packet */ -+ musb_writew(mbase, MUSB_EP_OFFSET(mchannel->epnum, -+ MUSB_TXCSR), txcsr); -+ } else { -+ musb_dma_completion(musb, mchannel->epnum, -+ mchannel->transmit); - } - } - -@@ -357,7 +370,7 @@ dma_controller_create(struct musb *musb, - controller->controller.channel_abort = dma_channel_abort; - - if (request_irq(irq, dma_controller_irq, IRQF_DISABLED, -- musb->controller->bus_id, &controller->controller)) { -+ dev_name(musb->controller), &controller->controller)) { - dev_err(dev, "request_irq %d failed!\n", irq); - dma_controller_destroy(&controller->controller); - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_procfs.c linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_procfs.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_procfs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_procfs.c 2011-06-22 13:19:33.113063272 +0200 -@@ -0,0 +1,775 @@ -+/* -+ * MUSB OTG driver debug support -+ * -+ * Copyright 2005 Mentor Graphics Corporation -+ * Copyright (C) 2005-2006 by Texas Instruments -+ * Copyright (C) 2006-2007 Nokia Corporation -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED -+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN -+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ */ -+ -+#include -+#include -+#include -+#include /* FIXME remove procfs writes */ -+#include -+ -+#include "musb_core.h" -+ -+#include "davinci.h" -+ -+extern unsigned musb_debug; -+ -+#ifdef CONFIG_USB_MUSB_HDRC_HCD -+ -+static int dump_qh(struct musb_qh *qh, char *buf, unsigned max) -+{ -+ int count; -+ int tmp; -+ struct usb_host_endpoint *hep = qh->hep; -+ struct urb *urb; -+ -+ count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n", -+ qh, qh->dev->devnum, qh->epnum, -+ ({ char *s; switch (qh->type) { -+ case USB_ENDPOINT_XFER_BULK: -+ s = "-bulk"; break; -+ case USB_ENDPOINT_XFER_INT: -+ s = "-int"; break; -+ case USB_ENDPOINT_XFER_CONTROL: -+ s = ""; break; -+ default: -+ s = "iso"; break; -+ }; s; }), -+ qh->maxpacket); -+ if (count <= 0) -+ return 0; -+ buf += count; -+ max -= count; -+ -+ list_for_each_entry(urb, &hep->urb_list, urb_list) { -+ tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n", -+ usb_pipein(urb->pipe) ? "in" : "out", -+ urb, urb->actual_length, -+ urb->transfer_buffer_length); -+ if (tmp <= 0) -+ break; -+ tmp = min(tmp, (int)max); -+ count += tmp; -+ buf += tmp; -+ max -= tmp; -+ } -+ return count; -+} -+ -+static int -+dump_queue(struct list_head *q, char *buf, unsigned max) -+{ -+ int count = 0; -+ struct musb_qh *qh; -+ -+ list_for_each_entry(qh, q, ring) { -+ int tmp; -+ -+ tmp = dump_qh(qh, buf, max); -+ if (tmp <= 0) -+ break; -+ tmp = min(tmp, (int)max); -+ count += tmp; -+ buf += tmp; -+ max -= tmp; -+ } -+ return count; -+} -+ -+#endif /* HCD */ -+ -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max) -+{ -+ char *buf = buffer; -+ int code = 0; -+ void __iomem *regs = ep->hw_ep->regs; -+ char *mode = "1buf"; -+ -+ if (ep->is_in) { -+ if (ep->hw_ep->tx_double_buffered) -+ mode = "2buf"; -+ } else { -+ if (ep->hw_ep->rx_double_buffered) -+ mode = "2buf"; -+ } -+ -+ do { -+ struct usb_request *req; -+ -+ code = snprintf(buf, max, -+ "\n%s (hw%d): %s%s, csr %04x maxp %04x\n", -+ ep->name, ep->current_epnum, -+ mode, ep->dma ? " dma" : "", -+ musb_readw(regs, -+ (ep->is_in || !ep->current_epnum) -+ ? MUSB_TXCSR -+ : MUSB_RXCSR), -+ musb_readw(regs, ep->is_in -+ ? MUSB_TXMAXP -+ : MUSB_RXMAXP) -+ ); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ -+ if (cppi_ti_dma() && ep->current_epnum) { -+ unsigned cppi = ep->current_epnum - 1; -+ void __iomem *base = ep->musb->ctrl_base; -+ unsigned off1 = cppi << 2; -+ void __iomem *ram = base; -+ char tmp[16]; -+ -+ if (ep->is_in) { -+ ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi); -+ tmp[0] = 0; -+ } else { -+ ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi); -+ snprintf(tmp, sizeof tmp, "%d left, ", -+ musb_readl(base, -+ DAVINCI_RXCPPI_BUFCNT0_REG + off1)); -+ } -+ -+ code = snprintf(buf, max, "%cX DMA%d: %s" -+ "%08x %08x, %08x %08x; " -+ "%08x %08x %08x .. %08x\n", -+ ep->is_in ? 'T' : 'R', -+ ep->current_epnum - 1, tmp, -+ musb_readl(ram, 0 * 4), -+ musb_readl(ram, 1 * 4), -+ musb_readl(ram, 2 * 4), -+ musb_readl(ram, 3 * 4), -+ musb_readl(ram, 4 * 4), -+ musb_readl(ram, 5 * 4), -+ musb_readl(ram, 6 * 4), -+ musb_readl(ram, 7 * 4)); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ -+ if (list_empty(&ep->req_list)) { -+ code = snprintf(buf, max, "\t(queue empty)\n"); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ break; -+ } -+ list_for_each_entry(req, &ep->req_list, list) { -+ code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n", -+ req, -+ req->zero ? "zero, " : "", -+ req->short_not_ok ? "!short, " : "", -+ req->actual, req->length); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ } while (0); -+ return buf - buffer; -+} -+#endif -+ -+static int -+dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max) -+{ -+ int code = 0; -+ char *buf = aBuffer; -+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; -+ -+ do { -+ musb_ep_select(musb->mregs, epnum); -+#ifdef CONFIG_USB_MUSB_HDRC_HCD -+ if (is_host_active(musb)) { -+ int dump_rx, dump_tx; -+ void __iomem *regs = hw_ep->regs; -+ -+ /* TEMPORARY (!) until we have a real periodic -+ * schedule tree ... -+ */ -+ if (!epnum) { -+ /* control is shared, uses RX queue -+ * but (mostly) shadowed tx registers -+ */ -+ dump_tx = !list_empty(&hw_ep->in_list); -+ dump_rx = 0; -+ } else if (hw_ep == musb->bulk_ep) { -+ dump_tx = !list_empty(&hw_ep->out_list); -+ dump_rx = !list_empty(&hw_ep->in_list); -+ } else -+ break; -+ /* END TEMPORARY */ -+ -+ -+ if (dump_rx) { -+ code = snprintf(buf, max, -+ "\nRX%d: %s rxcsr %04x interval %02x " -+ "max %04x type %02x; " -+ "dev %d hub %d port %d" -+ "\n", -+ epnum, -+ hw_ep->rx_double_buffered -+ ? "2buf" : "1buf", -+ musb_readw(regs, MUSB_RXCSR), -+ musb_readb(regs, MUSB_RXINTERVAL), -+ musb_readw(regs, MUSB_RXMAXP), -+ musb_readb(regs, MUSB_RXTYPE), -+ /* FIXME: assumes multipoint */ -+ musb_readb(musb->mregs, -+ MUSB_BUSCTL_OFFSET(epnum, -+ MUSB_RXFUNCADDR)), -+ musb_readb(musb->mregs, -+ MUSB_BUSCTL_OFFSET(epnum, -+ MUSB_RXHUBADDR)), -+ musb_readb(musb->mregs, -+ MUSB_BUSCTL_OFFSET(epnum, -+ MUSB_RXHUBPORT)) -+ ); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ -+ if (cppi_ti_dma() -+ && epnum -+ && hw_ep->rx_channel) { -+ unsigned cppi = epnum - 1; -+ unsigned off1 = cppi << 2; -+ void __iomem *base; -+ void __iomem *ram; -+ char tmp[16]; -+ -+ base = musb->ctrl_base; -+ ram = DAVINCI_RXCPPI_STATERAM_OFFSET( -+ cppi) + base; -+ snprintf(tmp, sizeof tmp, "%d left, ", -+ musb_readl(base, -+ DAVINCI_RXCPPI_BUFCNT0_REG -+ + off1)); -+ -+ code = snprintf(buf, max, -+ " rx dma%d: %s" -+ "%08x %08x, %08x %08x; " -+ "%08x %08x %08x .. %08x\n", -+ cppi, tmp, -+ musb_readl(ram, 0 * 4), -+ musb_readl(ram, 1 * 4), -+ musb_readl(ram, 2 * 4), -+ musb_readl(ram, 3 * 4), -+ musb_readl(ram, 4 * 4), -+ musb_readl(ram, 5 * 4), -+ musb_readl(ram, 6 * 4), -+ musb_readl(ram, 7 * 4)); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ -+ if (hw_ep == musb->bulk_ep -+ && !list_empty( -+ &hw_ep->in_list)) { -+ code = dump_queue(&hw_ep->in_list, -+ buf, max); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ } -+ -+ if (dump_tx) { -+ code = snprintf(buf, max, -+ "\nTX%d: %s txcsr %04x interval %02x " -+ "max %04x type %02x; " -+ "dev %d hub %d port %d" -+ "\n", -+ epnum, -+ hw_ep->tx_double_buffered -+ ? "2buf" : "1buf", -+ musb_readw(regs, MUSB_TXCSR), -+ musb_readb(regs, MUSB_TXINTERVAL), -+ musb_readw(regs, MUSB_TXMAXP), -+ musb_readb(regs, MUSB_TXTYPE), -+ /* FIXME: assumes multipoint */ -+ musb_readb(musb->mregs, -+ MUSB_BUSCTL_OFFSET(epnum, -+ MUSB_TXFUNCADDR)), -+ musb_readb(musb->mregs, -+ MUSB_BUSCTL_OFFSET(epnum, -+ MUSB_TXHUBADDR)), -+ musb_readb(musb->mregs, -+ MUSB_BUSCTL_OFFSET(epnum, -+ MUSB_TXHUBPORT)) -+ ); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ -+ if (cppi_ti_dma() -+ && epnum -+ && hw_ep->tx_channel) { -+ unsigned cppi = epnum - 1; -+ void __iomem *base; -+ void __iomem *ram; -+ -+ base = musb->ctrl_base; -+ ram = DAVINCI_RXCPPI_STATERAM_OFFSET( -+ cppi) + base; -+ code = snprintf(buf, max, -+ " tx dma%d: " -+ "%08x %08x, %08x %08x; " -+ "%08x %08x %08x .. %08x\n", -+ cppi, -+ musb_readl(ram, 0 * 4), -+ musb_readl(ram, 1 * 4), -+ musb_readl(ram, 2 * 4), -+ musb_readl(ram, 3 * 4), -+ musb_readl(ram, 4 * 4), -+ musb_readl(ram, 5 * 4), -+ musb_readl(ram, 6 * 4), -+ musb_readl(ram, 7 * 4)); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ -+ if (hw_ep == musb->control_ep -+ && !list_empty( -+ &hw_ep->in_list)) { -+ code = dump_queue(&hw_ep->in_list, -+ buf, max); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } else if (hw_ep == musb->bulk_ep -+ && !list_empty( -+ &hw_ep->out_list)) { -+ code = dump_queue(&hw_ep->out_list, -+ buf, max); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ } -+ } -+#endif -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ if (is_peripheral_active(musb)) { -+ code = 0; -+ -+ if (hw_ep->ep_in.desc || !epnum) { -+ code = dump_ep(&hw_ep->ep_in, buf, max); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ if (hw_ep->ep_out.desc) { -+ code = dump_ep(&hw_ep->ep_out, buf, max); -+ if (code <= 0) -+ break; -+ code = min(code, (int) max); -+ buf += code; -+ max -= code; -+ } -+ } -+#endif -+ } while (0); -+ -+ return buf - aBuffer; -+} -+ -+/* Dump the current status and compile options. -+ * @param musb the device driver instance -+ * @param buffer where to dump the status; it must be big enough to hold the -+ * result otherwise "BAD THINGS HAPPENS(TM)". -+ */ -+static int dump_header_stats(struct musb *musb, char *buffer) -+{ -+ int code, count = 0; -+ const void __iomem *mbase = musb->mregs; -+ -+ *buffer = 0; -+ count = sprintf(buffer, "Status: %sHDRC, Mode=%s " -+ "(Power=%02x, DevCtl=%02x)\n", -+ (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb), -+ musb_readb(mbase, MUSB_POWER), -+ musb_readb(mbase, MUSB_DEVCTL)); -+ if (count <= 0) -+ return 0; -+ buffer += count; -+ -+ code = sprintf(buffer, "OTG state: %s; %sactive\n", -+ otg_state_string(musb), -+ musb->is_active ? "" : "in"); -+ if (code <= 0) -+ goto done; -+ buffer += code; -+ count += code; -+ -+ code = sprintf(buffer, -+ "Options: " -+#ifdef CONFIG_MUSB_PIO_ONLY -+ "pio" -+#elif defined(CONFIG_USB_TI_CPPI_DMA) -+ "cppi-dma" -+#elif defined(CONFIG_USB_INVENTRA_DMA) -+ "musb-dma" -+#elif defined(CONFIG_USB_TUSB_OMAP_DMA) -+ "tusb-omap-dma" -+#else -+ "?dma?" -+#endif -+ ", " -+#ifdef CONFIG_USB_MUSB_OTG -+ "otg (peripheral+host)" -+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC) -+ "peripheral" -+#elif defined(CONFIG_USB_MUSB_HDRC_HCD) -+ "host" -+#endif -+ ", debug=%d [eps=%d]\n", -+ musb_debug, -+ musb->nr_endpoints); -+ if (code <= 0) -+ goto done; -+ count += code; -+ buffer += code; -+ -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ code = sprintf(buffer, "Peripheral address: %02x\n", -+ musb_readb(musb->ctrl_base, MUSB_FADDR)); -+ if (code <= 0) -+ goto done; -+ buffer += code; -+ count += code; -+#endif -+ -+#ifdef CONFIG_USB_MUSB_HDRC_HCD -+ code = sprintf(buffer, "Root port status: %08x\n", -+ musb->port1_status); -+ if (code <= 0) -+ goto done; -+ buffer += code; -+ count += code; -+#endif -+ -+#ifdef CONFIG_ARCH_DAVINCI -+ code = sprintf(buffer, -+ "DaVinci: ctrl=%02x stat=%1x phy=%03x\n" -+ "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x" -+ "\n", -+ musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG), -+ musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG), -+ __raw_readl((void __force __iomem *) -+ IO_ADDRESS(USBPHY_CTL_PADDR)), -+ musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG), -+ musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG), -+ musb_readl(musb->ctrl_base, -+ DAVINCI_USB_INT_SOURCE_REG), -+ musb_readl(musb->ctrl_base, -+ DAVINCI_USB_INT_MASK_REG)); -+ if (code <= 0) -+ goto done; -+ count += code; -+ buffer += code; -+#endif /* DAVINCI */ -+ -+#ifdef CONFIG_USB_TUSB6010 -+ code = sprintf(buffer, -+ "TUSB6010: devconf %08x, phy enable %08x drive %08x" -+ "\n\totg %03x timer %08x" -+ "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x" -+ "\n", -+ musb_readl(musb->ctrl_base, TUSB_DEV_CONF), -+ musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE), -+ musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL), -+ musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT), -+ musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER), -+ musb_readl(musb->ctrl_base, TUSB_PRCM_CONF), -+ musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT), -+ musb_readl(musb->ctrl_base, TUSB_INT_SRC), -+ musb_readl(musb->ctrl_base, TUSB_INT_MASK)); -+ if (code <= 0) -+ goto done; -+ count += code; -+ buffer += code; -+#endif /* DAVINCI */ -+ -+ if (cppi_ti_dma() && musb->dma_controller) { -+ code = sprintf(buffer, -+ "CPPI: txcr=%d txsrc=%01x txena=%01x; " -+ "rxcr=%d rxsrc=%01x rxena=%01x " -+ "\n", -+ musb_readl(musb->ctrl_base, -+ DAVINCI_TXCPPI_CTRL_REG), -+ musb_readl(musb->ctrl_base, -+ DAVINCI_TXCPPI_RAW_REG), -+ musb_readl(musb->ctrl_base, -+ DAVINCI_TXCPPI_INTENAB_REG), -+ musb_readl(musb->ctrl_base, -+ DAVINCI_RXCPPI_CTRL_REG), -+ musb_readl(musb->ctrl_base, -+ DAVINCI_RXCPPI_RAW_REG), -+ musb_readl(musb->ctrl_base, -+ DAVINCI_RXCPPI_INTENAB_REG)); -+ if (code <= 0) -+ goto done; -+ count += code; -+ buffer += code; -+ } -+ -+#ifdef CONFIG_USB_GADGET_MUSB_HDRC -+ if (is_peripheral_enabled(musb)) { -+ code = sprintf(buffer, "Gadget driver: %s\n", -+ musb->gadget_driver -+ ? musb->gadget_driver->driver.name -+ : "(none)"); -+ if (code <= 0) -+ goto done; -+ count += code; -+ buffer += code; -+ } -+#endif -+ -+done: -+ return count; -+} -+ -+/* Write to ProcFS -+ * -+ * C soft-connect -+ * c soft-disconnect -+ * I enable HS -+ * i disable HS -+ * s stop session -+ * F force session (OTG-unfriendly) -+ * E rElinquish bus (OTG) -+ * H request host mode -+ * h cancel host request -+ * T start sending TEST_PACKET -+ * D set/query the debug level -+ */ -+static int musb_proc_write(struct file *file, const char __user *buffer, -+ unsigned long count, void *data) -+{ -+ char cmd; -+ u8 reg; -+ struct musb *musb = (struct musb *)data; -+ void __iomem *mbase = musb->mregs; -+ -+ /* MOD_INC_USE_COUNT; */ -+ -+ if (unlikely(copy_from_user(&cmd, buffer, 1))) -+ return -EFAULT; -+ -+ switch (cmd) { -+ case 'C': -+ if (mbase) { -+ reg = musb_readb(mbase, MUSB_POWER) -+ | MUSB_POWER_SOFTCONN; -+ musb_writeb(mbase, MUSB_POWER, reg); -+ } -+ break; -+ -+ case 'c': -+ if (mbase) { -+ reg = musb_readb(mbase, MUSB_POWER) -+ & ~MUSB_POWER_SOFTCONN; -+ musb_writeb(mbase, MUSB_POWER, reg); -+ } -+ break; -+ -+ case 'I': -+ if (mbase) { -+ reg = musb_readb(mbase, MUSB_POWER) -+ | MUSB_POWER_HSENAB; -+ musb_writeb(mbase, MUSB_POWER, reg); -+ } -+ break; -+ -+ case 'i': -+ if (mbase) { -+ reg = musb_readb(mbase, MUSB_POWER) -+ & ~MUSB_POWER_HSENAB; -+ musb_writeb(mbase, MUSB_POWER, reg); -+ } -+ break; -+ -+ case 'F': -+ reg = musb_readb(mbase, MUSB_DEVCTL); -+ reg |= MUSB_DEVCTL_SESSION; -+ musb_writeb(mbase, MUSB_DEVCTL, reg); -+ break; -+ -+ case 'H': -+ if (mbase) { -+ reg = musb_readb(mbase, MUSB_DEVCTL); -+ reg |= MUSB_DEVCTL_HR; -+ musb_writeb(mbase, MUSB_DEVCTL, reg); -+ /* MUSB_HST_MODE( ((struct musb*)data) ); */ -+ /* WARNING("Host Mode\n"); */ -+ } -+ break; -+ -+ case 'h': -+ if (mbase) { -+ reg = musb_readb(mbase, MUSB_DEVCTL); -+ reg &= ~MUSB_DEVCTL_HR; -+ musb_writeb(mbase, MUSB_DEVCTL, reg); -+ } -+ break; -+ -+ case 'T': -+ if (mbase) { -+ musb_load_testpacket(musb); -+ musb_writeb(mbase, MUSB_TESTMODE, -+ MUSB_TEST_PACKET); -+ } -+ break; -+ -+ case '?': -+ INFO("?: you are seeing it\n"); -+ INFO("C/c: soft connect enable/disable\n"); -+ INFO("I/i: hispeed enable/disable\n"); -+ INFO("F: force session start\n"); -+ INFO("H: host mode\n"); -+ INFO("T: start sending TEST_PACKET\n"); -+ break; -+ -+ default: -+ ERR("Command %c not implemented\n", cmd); -+ break; -+ } -+ -+ musb_platform_try_idle(musb, 0); -+ -+ return count; -+} -+ -+static int musb_proc_read(char *page, char **start, -+ off_t off, int count, int *eof, void *data) -+{ -+ char *buffer = page; -+ int code = 0; -+ unsigned long flags; -+ struct musb *musb = data; -+ unsigned epnum; -+ -+ count -= off; -+ count -= 1; /* for NUL at end */ -+ if (count <= 0) -+ return -EINVAL; -+ -+ spin_lock_irqsave(&musb->lock, flags); -+ -+ code = dump_header_stats(musb, buffer); -+ if (code > 0) { -+ buffer += code; -+ count -= code; -+ } -+ -+ /* generate the report for the end points */ -+ /* REVISIT ... not unless something's connected! */ -+ for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints; -+ epnum++) { -+ code = dump_end_info(musb, epnum, buffer, count); -+ if (code > 0) { -+ buffer += code; -+ count -= code; -+ } -+ } -+ -+ musb_platform_try_idle(musb, 0); -+ -+ spin_unlock_irqrestore(&musb->lock, flags); -+ *eof = 1; -+ -+ return buffer - page; -+} -+ -+void __devexit musb_debug_delete(char *name, struct musb *musb) -+{ -+ if (musb->proc_entry) -+ remove_proc_entry(name, NULL); -+} -+ -+struct proc_dir_entry *__init -+musb_debug_create(char *name, struct musb *data) -+{ -+ struct proc_dir_entry *pde; -+ -+ /* FIXME convert everything to seq_file; then later, debugfs */ -+ -+ if (!name) -+ return NULL; -+ -+ pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL); -+ data->proc_entry = pde; -+ if (pde) { -+ pde->data = data; -+ /* pde->owner = THIS_MODULE; */ -+ -+ pde->read_proc = musb_proc_read; -+ pde->write_proc = musb_proc_write; -+ -+ pde->size = 0; -+ -+ pr_debug("Registered /proc/%s\n", name); -+ } else { -+ pr_debug("Cannot create a valid proc file entry"); -+ } -+ -+ return pde; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_regs.h linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_regs.h ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_regs.h 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_regs.h 2011-06-22 13:19:33.113063272 +0200 -@@ -35,6 +35,8 @@ - #ifndef __MUSB_REGS_H__ - #define __MUSB_REGS_H__ - -+#include "isp1704.h" -+ - #define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */ - - /* -@@ -72,6 +74,11 @@ - #define MUSB_DEVCTL_HR 0x02 - #define MUSB_DEVCTL_SESSION 0x01 - -+/* ULPI_REG_CONTROL */ -+#define ULPI_REG_REQ (1 << 0) -+#define ULPI_REG_CMPLT (1 << 1) -+#define ULPI_RDN_WR (1 << 2) -+ - /* TESTMODE */ - #define MUSB_TEST_FORCE_HOST 0x80 - #define MUSB_TEST_FIFO_ACCESS 0x40 -@@ -224,6 +231,7 @@ - #define MUSB_FRAME 0x0C - #define MUSB_INDEX 0x0E /* 8 bit */ - #define MUSB_TESTMODE 0x0F /* 8 bit */ -+#define MUSB_MISC 0x61 /* 8 bit */ - - /* Get offset for a given FIFO from musb->mregs */ - #ifdef CONFIG_USB_TUSB6010 -@@ -247,6 +255,16 @@ - /* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ - #define MUSB_HWVERS 0x6C /* 8 bit */ - -+/* ULPI Registers */ -+#define ULPI_VBUS_CONTROL 0x70 /* 8 bit */ -+#define ULPI_CARKIT_CONTROL 0x71 /* 8 bit */ -+#define ULPI_INT_MASK 0x72 /* 8 bit */ -+#define ULPI_INT_SRC 0x73 /* 8 bit */ -+#define ULPI_REG_DATA 0x74 /* 8 bit */ -+#define ULPI_REG_ADDR 0x75 /* 8 bit */ -+#define ULPI_REG_CONTROL 0x76 /* 8 bit */ -+#define ULPI_RAW_DATA 0x77 /* 8 bit */ -+ - #define MUSB_EPINFO 0x78 /* 8 bit */ - #define MUSB_RAMINFO 0x79 /* 8 bit */ - #define MUSB_LINKINFO 0x7a /* 8 bit */ -@@ -301,6 +319,52 @@ - #define MUSB_BUSCTL_OFFSET(_epnum, _offset) \ - (0x80 + (8*(_epnum)) + (_offset)) - -+static inline u8 musb_ulpi_readb(void __iomem *addr, u8 offset) -+{ -+ int i = 0; -+ u8 r; -+ -+ musb_writeb(addr, ULPI_REG_ADDR, offset); -+ musb_writeb(addr, ULPI_REG_CONTROL, ULPI_REG_REQ | ULPI_RDN_WR); -+ -+ while (!(musb_readb(addr, ULPI_REG_CONTROL) & ULPI_REG_CMPLT)) { -+ i++; -+ if (i == 10000) { -+ DBG(3, "ULPI read timed out\n"); -+ return 0; -+ } -+ -+ } -+ r = musb_readb(addr, ULPI_REG_CONTROL); -+ r &= ~ULPI_REG_CMPLT; -+ musb_writeb(addr, ULPI_REG_CONTROL, r); -+ -+ return musb_readb(addr, ULPI_REG_DATA); -+} -+ -+static inline void musb_ulpi_writeb(void __iomem *addr, -+ u8 offset, u8 data) -+{ -+ int i = 0; -+ u8 r = 0; -+ -+ musb_writeb(addr, ULPI_REG_ADDR, offset); -+ musb_writeb(addr, ULPI_REG_DATA, data); -+ musb_writeb(addr, ULPI_REG_CONTROL, ULPI_REG_REQ); -+ -+ while(!(musb_readb(addr, ULPI_REG_CONTROL) & ULPI_REG_CMPLT)) { -+ i++; -+ if (i == 10000) { -+ DBG(3, "ULPI write timed out\n"); -+ return; -+ } -+ } -+ -+ r = musb_readb(addr, ULPI_REG_CONTROL); -+ r &= ~ULPI_REG_CMPLT; -+ musb_writeb(addr, ULPI_REG_CONTROL, r); -+} -+ - static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size) - { - musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); -@@ -447,6 +511,16 @@ static inline void musb_write_txhubport - #define clk_enable(clock) do {} while (0) - #define clk_disable(clock) do {} while (0) - -+static inline u8 musb_ulpi_readb(void __iomem *addr, u8 offset) -+{ -+ return 0 -+} -+ -+static inline void musb_ulpi_writeb(void __iomem *addr, -+ u8 offset, u8 data) -+{ -+} -+ - static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size) - { - } -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/musb_virthub.c linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_virthub.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/musb_virthub.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/musb_virthub.c 2011-06-22 13:19:33.113063272 +0200 -@@ -78,18 +78,18 @@ static void musb_port_suspend(struct mus - DBG(3, "Root port suspended, power %02x\n", power); - - musb->port1_status |= USB_PORT_STAT_SUSPEND; -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_HOST: -- musb->xceiv.state = OTG_STATE_A_SUSPEND; -+ musb->xceiv->state = OTG_STATE_A_SUSPEND; - musb->is_active = is_otg_enabled(musb) -- && musb->xceiv.host->b_hnp_enable; -+ && musb->xceiv->host->b_hnp_enable; - musb_platform_try_idle(musb, 0); - break; - #ifdef CONFIG_USB_MUSB_OTG - case OTG_STATE_B_HOST: -- musb->xceiv.state = OTG_STATE_B_WAIT_ACON; -+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON; - musb->is_active = is_otg_enabled(musb) -- && musb->xceiv.host->b_hnp_enable; -+ && musb->xceiv->host->b_hnp_enable; - musb_platform_try_idle(musb, 0); - break; - #endif -@@ -116,7 +116,7 @@ static void musb_port_reset(struct musb - void __iomem *mbase = musb->mregs; - - #ifdef CONFIG_USB_MUSB_OTG -- if (musb->xceiv.state == OTG_STATE_B_IDLE) { -+ if (musb->xceiv->state == OTG_STATE_B_IDLE) { - DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n"); - musb->port1_status &= ~USB_PORT_STAT_RESET; - return; -@@ -186,14 +186,14 @@ void musb_root_disconnect(struct musb *m - usb_hcd_poll_rh_status(musb_to_hcd(musb)); - musb->is_active = 0; - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_HOST: - case OTG_STATE_A_SUSPEND: -- musb->xceiv.state = OTG_STATE_A_WAIT_BCON; -+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON; - musb->is_active = 0; - break; - case OTG_STATE_A_WAIT_VFALL: -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - break; - default: - DBG(1, "host disconnect (%s)\n", otg_state_string(musb)); -@@ -332,7 +332,7 @@ int musb_hub_control( - musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; - usb_hcd_poll_rh_status(musb_to_hcd(musb)); - /* NOTE: it might really be A_WAIT_BCON ... */ -- musb->xceiv.state = OTG_STATE_A_HOST; -+ musb->xceiv->state = OTG_STATE_A_HOST; - } - - put_unaligned(cpu_to_le32(musb->port1_status -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/omap2430.c linux-omap-2.6.28-nokia1/drivers/usb/musb/omap2430.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/omap2430.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/omap2430.c 2011-06-22 13:19:33.113063272 +0200 -@@ -49,6 +49,17 @@ - - static struct timer_list musb_idle_timer; - -+static void musb_vbus_work(struct work_struct *data) -+{ -+ struct musb *musb = container_of(data, struct musb, vbus_work); -+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); -+ -+ /* clear/set requirements for musb to work with DPS on omap3 */ -+ if (musb->board && musb->board->set_pm_limits && !musb->is_charger) -+ musb->board->set_pm_limits(musb->controller, -+ (devctl & MUSB_DEVCTL_VBUS)); -+} -+ - static void musb_do_idle(unsigned long _musb) - { - struct musb *musb = (void *)_musb; -@@ -62,17 +73,17 @@ static void musb_do_idle(unsigned long _ - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_WAIT_BCON: - devctl &= ~MUSB_DEVCTL_SESSION; - musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) { -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - MUSB_DEV_MODE(musb); - } else { -- musb->xceiv.state = OTG_STATE_A_IDLE; -+ musb->xceiv->state = OTG_STATE_A_IDLE; - MUSB_HST_MODE(musb); - } - break; -@@ -90,7 +101,7 @@ static void musb_do_idle(unsigned long _ - musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; - usb_hcd_poll_rh_status(musb_to_hcd(musb)); - /* NOTE: it might really be A_WAIT_BCON ... */ -- musb->xceiv.state = OTG_STATE_A_HOST; -+ musb->xceiv->state = OTG_STATE_A_HOST; - } - break; - #endif -@@ -98,9 +109,9 @@ static void musb_do_idle(unsigned long _ - case OTG_STATE_A_HOST: - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - else -- musb->xceiv.state = OTG_STATE_A_WAIT_BCON; -+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON; - #endif - default: - break; -@@ -119,7 +130,7 @@ void musb_platform_try_idle(struct musb - - /* Never idle if active, or when VBUS timeout is not set as host */ - if (musb->is_active || ((musb->a_wait_bcon == 0) -- && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { -+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { - DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); - del_timer(&musb_idle_timer); - last_timer = jiffies; -@@ -164,8 +175,8 @@ static void omap_set_vbus(struct musb *m - - if (is_on) { - musb->is_active = 1; -- musb->xceiv.default_a = 1; -- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; -+ musb->xceiv->default_a = 1; -+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; - devctl |= MUSB_DEVCTL_SESSION; - - MUSB_HST_MODE(musb); -@@ -176,8 +187,8 @@ static void omap_set_vbus(struct musb *m - * jumping right to B_IDLE... - */ - -- musb->xceiv.default_a = 0; -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->default_a = 0; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - devctl &= ~MUSB_DEVCTL_SESSION; - - MUSB_DEV_MODE(musb); -@@ -198,7 +209,9 @@ static int musb_platform_resume(struct m - - int musb_platform_set_mode(struct musb *musb, u8 musb_mode) - { -- u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); -+ struct usb_hcd *hcd; -+ struct usb_bus *host; -+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - - devctl |= MUSB_DEVCTL_SESSION; - musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); -@@ -206,12 +219,15 @@ int musb_platform_set_mode(struct musb * - switch (musb_mode) { - #ifdef CONFIG_USB_MUSB_HDRC_HCD - case MUSB_HOST: -- otg_set_host(&musb->xceiv, musb->xceiv.host); -+ hcd = musb_to_hcd(musb); -+ host = hcd_to_bus(hcd); -+ -+ otg_set_host(musb->xceiv, host); - break; - #endif - #ifdef CONFIG_USB_GADGET_MUSB_HDRC - case MUSB_PERIPHERAL: -- otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget); -+ otg_set_peripheral(musb->xceiv, &musb->g); - break; - #endif - #ifdef CONFIG_USB_MUSB_OTG -@@ -233,7 +249,8 @@ int __init musb_platform_init(struct mus - omap_cfg_reg(AE5_2430_USB0HS_STP); - #endif - -- musb->xceiv = *x; -+ musb->suspendm = true; -+ musb->xceiv = x; - musb_platform_resume(musb); - - l = omap_readl(OTG_SYSCONFIG); -@@ -243,7 +260,12 @@ int __init musb_platform_init(struct mus - l &= ~AUTOIDLE; /* disable auto idle */ - l &= ~NOIDLE; /* remove possible noidle */ - l |= SMARTIDLE; /* enable smart idle */ -- l |= AUTOIDLE; /* enable auto idle */ -+ /* -+ * MUSB AUTOIDLE don't work in 3430. -+ * Workaround by Richard Woodruff/TI -+ */ -+ if (!cpu_is_omap3430()) -+ l |= AUTOIDLE; /* enable auto idle */ - omap_writel(l, OTG_SYSCONFIG); - - l = omap_readl(OTG_INTERFSEL); -@@ -261,10 +283,11 @@ int __init musb_platform_init(struct mus - if (is_host_enabled(musb)) - musb->board_set_vbus = omap_set_vbus; - if (is_peripheral_enabled(musb)) -- musb->xceiv.set_power = omap_set_power; -+ musb->xceiv->set_power = omap_set_power; - musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON; - - setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); -+ INIT_WORK(&musb->vbus_work, musb_vbus_work); - - return 0; - } -@@ -285,8 +308,8 @@ int musb_platform_suspend(struct musb *m - l |= ENABLEWAKEUP; /* enable wakeup */ - omap_writel(l, OTG_SYSCONFIG); - -- if (musb->xceiv.set_suspend) -- musb->xceiv.set_suspend(&musb->xceiv, 1); -+ if (musb->xceiv->set_suspend) -+ musb->xceiv->set_suspend(musb->xceiv, 1); - - if (musb->set_clock) - musb->set_clock(musb->clock, 0); -@@ -303,8 +326,8 @@ static int musb_platform_resume(struct m - if (!musb->clock) - return 0; - -- if (musb->xceiv.set_suspend) -- musb->xceiv.set_suspend(&musb->xceiv, 0); -+ if (musb->xceiv->set_suspend) -+ musb->xceiv->set_suspend(musb->xceiv, 0); - - if (musb->set_clock) - musb->set_clock(musb->clock, 1); -@@ -335,3 +358,112 @@ int musb_platform_exit(struct musb *musb - - return 0; - } -+ -+#ifdef CONFIG_PM -+ -+void musb_save_ctx_and_suspend(struct usb_gadget *gadget, int overwrite) -+{ -+ struct musb *musb = gadget_to_musb(gadget); -+ u32 l; -+ unsigned long flags; -+ unsigned long tmo; -+ -+ spin_lock_irqsave(&musb->lock, flags); -+ if (overwrite) -+ /* Save register context */ -+ musb_save_ctx(musb); -+ spin_unlock_irqrestore(&musb->lock, flags); -+ -+ DBG(3, "allow sleep\n"); -+ /* Do soft reset. This needs to be done with broken AUTOIDLE */ -+ tmo = jiffies + msecs_to_jiffies(300); -+ omap_writel(SOFTRST, OTG_SYSCONFIG); -+ while (!omap_readl(OTG_SYSSTATUS)) { -+ if (time_after(jiffies, tmo)) { -+ WARN(1, "musb failed to recover from reset!"); -+ break; -+ } -+ } -+ -+ l = omap_readl(OTG_FORCESTDBY); -+ l |= ENABLEFORCE; /* enable MSTANDBY */ -+ omap_writel(l, OTG_FORCESTDBY); -+ -+ l = ENABLEWAKEUP; /* enable wakeup */ -+ omap_writel(l, OTG_SYSCONFIG); -+ /* Use AUTOIDLE here or the device may fail to hit sleep */ -+ l |= AUTOIDLE; -+ omap_writel(l, OTG_SYSCONFIG); -+ -+ if (musb->board && musb->board->xceiv_power) -+ musb->board->xceiv_power(0); -+ /* Now it's safe to get rid of the buggy AUTOIDLE */ -+ l &= ~AUTOIDLE; -+ omap_writel(l, OTG_SYSCONFIG); -+ -+ musb->is_charger = 0; -+ -+ /* clear constraints */ -+ if (musb->board && musb->board->set_pm_limits) -+ musb->board->set_pm_limits(musb->controller, 0); -+} -+EXPORT_SYMBOL_GPL(musb_save_ctx_and_suspend); -+ -+void musb_restore_ctx_and_resume(struct usb_gadget *gadget) -+{ -+ struct musb *musb = gadget_to_musb(gadget); -+ u32 l; -+ u8 r; -+ unsigned long flags; -+ -+ DBG(3, "restoring register context\n"); -+ -+ if (musb->board && musb->board->xceiv_power) -+ musb->board->xceiv_power(1); -+ -+ spin_lock_irqsave(&musb->lock, flags); -+ if (musb->set_clock) -+ musb->set_clock(musb->clock, 1); -+ else -+ clk_enable(musb->clock); -+ -+ /* Recover OTG control */ -+ r = musb_ulpi_readb(musb->mregs, ISP1704_OTG_CTRL); -+ r |= ISP1704_OTG_CTRL_IDPULLUP | ISP1704_OTG_CTRL_DP_PULLDOWN; -+ musb_ulpi_writeb(musb->mregs, ISP1704_OTG_CTRL, r); -+ -+ /* Recover FUNC control */ -+ r = ISP1704_FUNC_CTRL_FULL_SPEED; -+ r |= ISP1704_FUNC_CTRL_SUSPENDM | ISP1704_FUNC_CTRL_RESET; -+ musb_ulpi_writeb(musb->mregs, ISP1704_FUNC_CTRL, r); -+ -+ l = omap_readl(OTG_SYSCONFIG); -+ l &= ~ENABLEWAKEUP; /* disable wakeup */ -+ omap_writel(l, OTG_SYSCONFIG); -+ -+ l = omap_readl(OTG_FORCESTDBY); -+ l &= ~ENABLEFORCE; /* disable MSTANDBY */ -+ omap_writel(l, OTG_FORCESTDBY); -+ -+ l = omap_readl(OTG_SYSCONFIG); -+ l &= ~ENABLEWAKEUP; /* disable wakeup */ -+ l &= ~NOSTDBY; /* remove possible nostdby */ -+ l |= SMARTSTDBY; /* enable smart standby */ -+ l &= ~AUTOIDLE; /* disable auto idle */ -+ l &= ~NOIDLE; /* remove possible noidle */ -+ l |= SMARTIDLE; /* enable smart idle */ -+ omap_writel(l, OTG_SYSCONFIG); -+ -+ l = omap_readl(OTG_INTERFSEL); -+ l |= ULPI_12PIN; -+ omap_writel(l, OTG_INTERFSEL); -+ -+ /* Restore register context */ -+ musb_restore_ctx(musb); -+ -+ /* set constraints */ -+ schedule_work(&musb->vbus_work); -+ spin_unlock_irqrestore(&musb->lock, flags); -+} -+EXPORT_SYMBOL_GPL(musb_restore_ctx_and_resume); -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/musb/tusb6010.c linux-omap-2.6.28-nokia1/drivers/usb/musb/tusb6010.c ---- linux-omap-2.6.28-omap1/drivers/usb/musb/tusb6010.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/musb/tusb6010.c 2011-06-22 13:19:33.113063272 +0200 -@@ -270,7 +270,7 @@ void musb_read_fifo(struct musb_hw_ep *h - */ - static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) - { -- struct musb *musb = container_of(x, struct musb, xceiv); -+ struct musb *musb = dev_get_drvdata(x->dev); - void __iomem *tbase = musb->ctrl_base; - u32 reg; - -@@ -420,7 +420,7 @@ static void musb_do_idle(unsigned long _ - - spin_lock_irqsave(&musb->lock, flags); - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_WAIT_BCON: - if ((musb->a_wait_bcon != 0) - && (musb->idle_timeout == 0 -@@ -484,7 +484,7 @@ void musb_platform_try_idle(struct musb - - /* Never idle if active, or when VBUS timeout is not set as host */ - if (musb->is_active || ((musb->a_wait_bcon == 0) -- && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { -+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { - DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); - del_timer(&musb_idle_timer); - last_timer = jiffies; -@@ -533,8 +533,8 @@ static void tusb_source_power(struct mus - if (musb->set_clock) - musb->set_clock(musb->clock, 1); - timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); -- musb->xceiv.default_a = 1; -- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; -+ musb->xceiv->default_a = 1; -+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; - devctl |= MUSB_DEVCTL_SESSION; - - conf |= TUSB_DEV_CONF_USB_HOST_MODE; -@@ -547,24 +547,24 @@ static void tusb_source_power(struct mus - /* If ID pin is grounded, we want to be a_idle */ - otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); - if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) { -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_WAIT_VRISE: - case OTG_STATE_A_WAIT_BCON: -- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; -+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; - break; - case OTG_STATE_A_WAIT_VFALL: -- musb->xceiv.state = OTG_STATE_A_IDLE; -+ musb->xceiv->state = OTG_STATE_A_IDLE; - break; - default: -- musb->xceiv.state = OTG_STATE_A_IDLE; -+ musb->xceiv->state = OTG_STATE_A_IDLE; - } - musb->is_active = 0; -- musb->xceiv.default_a = 1; -+ musb->xceiv->default_a = 1; - MUSB_HST_MODE(musb); - } else { - musb->is_active = 0; -- musb->xceiv.default_a = 0; -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->default_a = 0; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - MUSB_DEV_MODE(musb); - } - -@@ -675,7 +675,7 @@ tusb_otg_ints(struct musb *musb, u32 int - else - default_a = is_host_enabled(musb); - DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); -- musb->xceiv.default_a = default_a; -+ musb->xceiv->default_a = default_a; - tusb_source_power(musb, default_a); - - /* Don't allow idling immediately */ -@@ -687,7 +687,7 @@ tusb_otg_ints(struct musb *musb, u32 int - if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { - - /* B-dev state machine: no vbus ~= disconnect */ -- if ((is_otg_enabled(musb) && !musb->xceiv.default_a) -+ if ((is_otg_enabled(musb) && !musb->xceiv->default_a) - || !is_host_enabled(musb)) { - #ifdef CONFIG_USB_MUSB_HDRC_HCD - /* ? musb_root_disconnect(musb); */ -@@ -702,9 +702,9 @@ tusb_otg_ints(struct musb *musb, u32 int - - if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { - DBG(1, "Forcing disconnect (no interrupt)\n"); -- if (musb->xceiv.state != OTG_STATE_B_IDLE) { -+ if (musb->xceiv->state != OTG_STATE_B_IDLE) { - /* INTR_DISCONNECT can hide... */ -- musb->xceiv.state = OTG_STATE_B_IDLE; -+ musb->xceiv->state = OTG_STATE_B_IDLE; - musb->int_usb |= MUSB_INTR_DISCONNECT; - } - musb->is_active = 0; -@@ -718,7 +718,7 @@ tusb_otg_ints(struct musb *musb, u32 int - DBG(2, "vbus change, %s, otg %03x\n", - otg_state_string(musb), otg_stat); - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_IDLE: - DBG(2, "Got SRP, turning on VBUS\n"); - musb_set_vbus(musb, 1); -@@ -766,7 +766,7 @@ tusb_otg_ints(struct musb *musb, u32 int - - DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat); - -- switch (musb->xceiv.state) { -+ switch (musb->xceiv->state) { - case OTG_STATE_A_WAIT_VRISE: - /* VBUS has probably been valid for a while now, - * but may well have bounced out of range a bit -@@ -778,7 +778,7 @@ tusb_otg_ints(struct musb *musb, u32 int - DBG(2, "devctl %02x\n", devctl); - break; - } -- musb->xceiv.state = OTG_STATE_A_WAIT_BCON; -+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON; - musb->is_active = 0; - idle_timeout = jiffies - + msecs_to_jiffies(musb->a_wait_bcon); -@@ -1092,6 +1092,7 @@ err: - - int __init musb_platform_init(struct musb *musb) - { -+ struct otg_transceiver xceiv; - struct platform_device *pdev; - struct resource *mem; - void __iomem *sync; -@@ -1103,6 +1104,8 @@ int __init musb_platform_init(struct mus - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - musb->async = mem->start; - -+ musb->suspendm = false; -+ - /* dma address for sync dma */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!mem) { -@@ -1131,10 +1134,17 @@ int __init musb_platform_init(struct mus - } - musb->isr = tusb_interrupt; - -+ memset(&xceiv, 0, sizeof(xceiv)); -+ xceiv.label = "tusb6010"; -+ xceiv.dev = musb->controller; -+ - if (is_host_enabled(musb)) - musb->board_set_vbus = tusb_source_power; - if (is_peripheral_enabled(musb)) -- musb->xceiv.set_power = tusb_draw_power; -+ xceiv.set_power = tusb_draw_power; -+ -+ otg_set_transceiver(&xceiv); -+ musb->xceiv = &xceiv; - - setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); - -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/otg/Kconfig linux-omap-2.6.28-nokia1/drivers/usb/otg/Kconfig ---- linux-omap-2.6.28-omap1/drivers/usb/otg/Kconfig 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/otg/Kconfig 2011-06-22 13:19:33.113063272 +0200 -@@ -43,7 +43,7 @@ config ISP1301_OMAP - - config TWL4030_USB - tristate "TWL4030 USB Transceiver Driver" -- depends on TWL4030_CORE -+ depends on TWL4030_CORE && REGULATOR_TWL4030 - select USB_OTG_UTILS - help - Enable this to support the USB OTG transceiver on TWL4030 -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/otg/otg.c linux-omap-2.6.28-nokia1/drivers/usb/otg/otg.c ---- linux-omap-2.6.28-omap1/drivers/usb/otg/otg.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/otg/otg.c 2011-06-22 13:19:33.113063272 +0200 -@@ -11,9 +11,14 @@ - - #include - #include -- -+#include -+#include -+#include -+#include - #include - -+#include "../core/otg_whitelist.h" -+ - static struct otg_transceiver *xceiv; - - /** -@@ -63,3 +68,105 @@ int otg_set_transceiver(struct otg_trans - return 0; - } - EXPORT_SYMBOL(otg_set_transceiver); -+ -+#ifdef CONFIG_USB_OTG_WHITELIST -+ -+/* -+ * This OTG Whitelist is the OTG "Targeted Peripheral List". It should -+ * mostly use of USB_DEVICE() or USB_DEVICE_VER() entries.. -+ * -+ * YOU _SHOULD_ CHANGE THIS LIST TO MATCH YOUR PRODUCT AND ITS TESTING! -+ */ -+ -+static struct usb_device_id whitelist_table[] = { -+ -+/* hubs are optional in OTG, but very handy ... */ -+{ USB_DEVICE_INFO(USB_CLASS_HUB, 0, 0), }, -+{ USB_DEVICE_INFO(USB_CLASS_HUB, 0, 1), }, -+ -+#ifdef CONFIG_USB_PRINTER /* ignoring nonstatic linkage! */ -+/* FIXME actually, printers are NOT supposed to use device classes; -+ * they're supposed to use interface classes... -+ */ -+{ USB_DEVICE_INFO(7, 1, 1) }, -+{ USB_DEVICE_INFO(7, 1, 2) }, -+{ USB_DEVICE_INFO(7, 1, 3) }, -+#endif -+ -+#ifdef CONFIG_USB_NET_CDCETHER -+/* Linux-USB CDC Ethernet gadget */ -+{ USB_DEVICE(0x0525, 0xa4a1), }, -+/* Linux-USB CDC Ethernet + RNDIS gadget */ -+{ USB_DEVICE(0x0525, 0xa4a2), }, -+#endif -+ -+#if defined(CONFIG_USB_TEST) || defined(CONFIG_USB_TEST_MODULE) -+/* gadget zero, for testing */ -+{ USB_DEVICE(0x0525, 0xa4a0), }, -+#endif -+ -+{ } /* Terminating entry */ -+}; -+ -+int is_targeted(struct usb_device *dev) -+{ -+ struct usb_device_id *id = whitelist_table; -+ -+ /* possible in developer configs only! */ -+ if (!dev->bus->otg_port) -+ return 1; -+ -+ /* HNP test device is _never_ targeted (see OTG spec 6.6.6) */ -+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a && -+ le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) -+ return 0; -+ -+ /* NOTE: can't use usb_match_id() since interface caches -+ * aren't set up yet. this is cut/paste from that code. -+ */ -+ for (id = whitelist_table; id->match_flags; id++) { -+ if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && -+ id->idVendor != le16_to_cpu(dev->descriptor.idVendor)) -+ continue; -+ -+ if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) && -+ id->idProduct != le16_to_cpu(dev->descriptor.idProduct)) -+ continue; -+ -+ /* No need to test id->bcdDevice_lo != 0, since 0 is never -+ greater than any unsigned number. */ -+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) && -+ (id->bcdDevice_lo > le16_to_cpu(dev->descriptor.bcdDevice))) -+ continue; -+ -+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) && -+ (id->bcdDevice_hi < le16_to_cpu(dev->descriptor.bcdDevice))) -+ continue; -+ -+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) && -+ (id->bDeviceClass != dev->descriptor.bDeviceClass)) -+ continue; -+ -+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) && -+ (id->bDeviceSubClass != dev->descriptor.bDeviceSubClass)) -+ continue; -+ -+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) && -+ (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol)) -+ continue; -+ -+ return 1; -+ } -+ -+ /* add other match criteria here ... */ -+ -+ -+ /* OTG MESSAGE: report errors here, customize to match your product */ -+ dev_err(&dev->dev, "device v%04x p%04x is not supported\n", -+ le16_to_cpu(dev->descriptor.idVendor), -+ le16_to_cpu(dev->descriptor.idProduct)); -+ -+ return 0; -+} -+ -+#endif /* CONFIG_USB_OTG_WHITELIST */ -diff -Nurp linux-omap-2.6.28-omap1/drivers/usb/otg/twl4030-usb.c linux-omap-2.6.28-nokia1/drivers/usb/otg/twl4030-usb.c ---- linux-omap-2.6.28-omap1/drivers/usb/otg/twl4030-usb.c 2011-06-22 13:14:21.173067705 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/usb/otg/twl4030-usb.c 2011-06-22 13:19:33.113063272 +0200 -@@ -34,6 +34,8 @@ - #include - #include - #include -+#include -+#include - - - /* Register defines */ -@@ -246,6 +248,11 @@ struct twl4030_usb { - struct otg_transceiver otg; - struct device *dev; - -+ /* TWL4030 internal USB regulator supplies */ -+ struct regulator *usb1v5; -+ struct regulator *usb1v8; -+ struct regulator *usb3v1; -+ - /* for vbus reporting with irqs disabled */ - spinlock_t lock; - -@@ -428,12 +435,31 @@ static void twl4030_i2c_access(struct tw - } - } - -+/* REVISIT regulator framework should provide a sleep() call */ -+static void twl4030_usb3v1_sleep(int sleep) -+{ -+ /* -+ * We don't disable usb3v1 regulator as it would break some MADC -+ * readings. Instead of disabling it, we keep it on and put it -+ * on sleep mode when cable is detached. -+ */ -+ if (sleep) -+ twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -+ (1 << 3), VUSB_DEDICATED2); -+ else -+ twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, -+ VUSB_DEDICATED2); -+} -+ - static void twl4030_phy_power(struct twl4030_usb *twl, int on) - { - u8 pwr; - - pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); - if (on) { -+ twl4030_usb3v1_sleep(false); -+ regulator_enable(twl->usb1v8); -+ regulator_enable(twl->usb1v5); - pwr &= ~PHY_PWR_PHYPWD; - WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); - twl4030_usb_write(twl, PHY_CLK_CTRL, -@@ -443,16 +469,26 @@ static void twl4030_phy_power(struct twl - } else { - pwr |= PHY_PWR_PHYPWD; - WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); -+ regulator_disable(twl->usb1v5); -+ regulator_disable(twl->usb1v8); -+ twl4030_usb3v1_sleep(true); - } - } - -+extern void musb_save_ctx_and_suspend(struct usb_gadget *gadget, int overwrite); -+extern void musb_restore_ctx_and_resume(struct usb_gadget *gadget); -+ - static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off) - { - if (twl->asleep) - return; - - twl4030_phy_power(twl, 0); -- twl->asleep = 1; -+ if (!controller_off) -+ twl->asleep = 1; -+ -+ if (twl->otg.gadget) -+ musb_save_ctx_and_suspend(twl->otg.gadget, 0); - } - - static void twl4030_phy_resume(struct twl4030_usb *twl) -@@ -466,9 +502,12 @@ static void twl4030_phy_resume(struct tw - if (twl->usb_mode == T2_USB_MODE_ULPI) - twl4030_i2c_access(twl, 0); - twl->asleep = 0; -+ -+ if (twl->otg.gadget) -+ musb_restore_ctx_and_resume(twl->otg.gadget); - } - --static void twl4030_usb_ldo_init(struct twl4030_usb *twl) -+static int twl4030_usb_ldo_init(struct twl4030_usb *twl) - { - /* Enable writing to power configuration registers */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); -@@ -480,20 +519,48 @@ static void twl4030_usb_ldo_init(struct - /* input to VUSB3V1 LDO is from VBAT, not VBUS */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); - -- /* turn on 3.1V regulator */ -- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x20, VUSB3V1_DEV_GRP); -+ /* Initialize 3.1V regulator */ -+ twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP); -+ -+ twl->usb3v1 = regulator_get(twl->dev, "usb3v1"); -+ if (IS_ERR(twl->usb3v1)) -+ return -ENODEV; -+ -+ /* enable early and only put it on sleep instead of disabling it */ -+ regulator_enable(twl->usb3v1); -+ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE); - -- /* turn on 1.5V regulator */ -- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x20, VUSB1V5_DEV_GRP); -+ /* Initialize 1.5V regulator */ -+ twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP); -+ -+ twl->usb1v5 = regulator_get(twl->dev, "usb1v5"); -+ if (IS_ERR(twl->usb1v5)) -+ goto fail1; -+ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE); - -- /* turn on 1.8V regulator */ -- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x20, VUSB1V8_DEV_GRP); -+ /* Initialize 1.8V regulator */ -+ twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP); -+ -+ twl->usb1v8 = regulator_get(twl->dev, "usb1v8"); -+ if (IS_ERR(twl->usb1v8)) -+ goto fail2; -+ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE); - - /* disable access to power configuration registers */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY); -+ -+ return 0; -+ -+fail2: -+ regulator_put(twl->usb1v5); -+ twl->usb1v5 = NULL; -+fail1: -+ regulator_put(twl->usb3v1); -+ twl->usb3v1 = NULL; -+ return -ENODEV; - } - - static ssize_t twl4030_usb_vbus_show(struct device *dev, -@@ -504,14 +571,42 @@ static ssize_t twl4030_usb_vbus_show(str - int ret = -EINVAL; - - spin_lock_irqsave(&twl->lock, flags); -- ret = sprintf(buf, "%s\n", -- (twl->linkstat == USB_LINK_VBUS) ? "on" : "off"); -+ ret = sprintf(buf, "%d\n", -+ (twl->linkstat == USB_LINK_VBUS) ? 1 : 0); - spin_unlock_irqrestore(&twl->lock, flags); - - return ret; - } - static DEVICE_ATTR(vbus, 0444, twl4030_usb_vbus_show, NULL); - -+static ssize_t twl4030_usb_linkstat_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct twl4030_usb *twl = dev_get_drvdata(dev); -+ unsigned long flags; -+ char *link; -+ -+ spin_lock_irqsave(&twl->lock, flags); -+ switch (twl->linkstat) { -+ case USB_LINK_VBUS: -+ link = "vbus"; -+ break; -+ case USB_LINK_ID: -+ link = "idpin"; -+ break; -+ case USB_LINK_NONE: -+ link = "none"; -+ break; -+ default: -+ link = "unknown"; -+ break; -+ } -+ spin_unlock_irqrestore(&twl->lock, flags); -+ -+ return sprintf(buf, "%s\n", link); -+} -+static DEVICE_ATTR(linkstat, 0444, twl4030_usb_linkstat_show, NULL); -+ - static irqreturn_t twl4030_usb_irq(int irq, void *_twl) - { - struct twl4030_usb *twl = _twl; -@@ -547,6 +642,7 @@ static irqreturn_t twl4030_usb_irq(int i - twl4030_phy_resume(twl); - } - sysfs_notify(&twl->dev->kobj, NULL, "vbus"); -+ sysfs_notify(&twl->dev->kobj, NULL, "linkstat"); - - return IRQ_HANDLED; - } -@@ -598,7 +694,7 @@ static int __init twl4030_usb_probe(stru - { - struct twl4030_usb_data *pdata = pdev->dev.platform_data; - struct twl4030_usb *twl; -- int status; -+ int status, err; - - if (!pdata) { - dev_dbg(&pdev->dev, "platform_data not available\n"); -@@ -622,12 +718,20 @@ static int __init twl4030_usb_probe(stru - /* init spinlock for workqueue */ - spin_lock_init(&twl->lock); - -- twl4030_usb_ldo_init(twl); -+ err = twl4030_usb_ldo_init(twl); -+ if (err) { -+ dev_err(&pdev->dev, "ldo init failed\n"); -+ kfree(twl); -+ return err; -+ } - otg_set_transceiver(&twl->otg); - - platform_set_drvdata(pdev, twl); - if (device_create_file(&pdev->dev, &dev_attr_vbus)) -- dev_warn(&pdev->dev, "could not create sysfs file\n"); -+ dev_warn(&pdev->dev, "could not create vbus sysfs file\n"); -+ -+ if (device_create_file(&pdev->dev, &dev_attr_linkstat)) -+ dev_warn(&pdev->dev, "could not create linkstat sysfs file\n"); - - /* Our job is to use irqs and status from the power module - * to keep the transceiver disabled when nothing's connected. -@@ -669,6 +773,7 @@ static int __exit twl4030_usb_remove(str - - free_irq(twl->irq, twl); - device_remove_file(twl->dev, &dev_attr_vbus); -+ device_remove_file(twl->dev, &dev_attr_linkstat); - - /* set transceiver mode to power on defaults */ - twl4030_usb_set_mode(twl, -1); -@@ -688,6 +793,9 @@ static int __exit twl4030_usb_remove(str - twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); - - twl4030_phy_power(twl, 0); -+ regulator_put(twl->usb1v5); -+ regulator_put(twl->usb1v8); -+ regulator_put(twl->usb3v1); - - kfree(twl); - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/Kconfig linux-omap-2.6.28-nokia1/drivers/video/Kconfig ---- linux-omap-2.6.28-omap1/drivers/video/Kconfig 2011-06-22 13:14:21.253067704 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/Kconfig 2011-06-22 13:19:33.143063270 +0200 -@@ -7,7 +7,7 @@ menu "Graphics support" - - source "drivers/char/agp/Kconfig" - --source "drivers/gpu/drm/Kconfig" -+source "drivers/gpu/Kconfig" - - config VGASTATE - tristate -@@ -2116,6 +2116,7 @@ config FB_PRE_INIT_FB - the bootloader. - - source "drivers/video/omap/Kconfig" -+source "drivers/video/omap2/Kconfig" - - source "drivers/video/backlight/Kconfig" - source "drivers/video/display/Kconfig" -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/Makefile linux-omap-2.6.28-nokia1/drivers/video/Makefile ---- linux-omap-2.6.28-omap1/drivers/video/Makefile 2011-06-22 13:14:21.253067704 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/Makefile 2011-06-22 13:19:33.143063270 +0200 -@@ -120,6 +120,7 @@ obj-$(CONFIG_FB_SM501) += sm5 - obj-$(CONFIG_FB_XILINX) += xilinxfb.o - obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o - obj-$(CONFIG_FB_OMAP) += omap/ -+obj-$(CONFIG_OMAP2_DSS) += omap2/ - obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o - obj-$(CONFIG_FB_CARMINE) += carminefb.o - obj-$(CONFIG_FB_MB862XX) += mb862xx/ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/blizzard.c linux-omap-2.6.28-nokia1/drivers/video/omap/blizzard.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/blizzard.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/blizzard.c 2011-06-22 13:19:33.143063270 +0200 -@@ -25,9 +25,9 @@ - #include - #include - #include -+#include - - #include --#include - #include - - #include "dispc.h" -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/dispc.c linux-omap-2.6.28-nokia1/drivers/video/omap/dispc.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/dispc.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/dispc.c 2011-06-22 13:19:33.143063270 +0200 -@@ -24,17 +24,35 @@ - #include - #include - #include -+#include -+#include - - #include --#include - #include -+#include - - #include "dispc.h" - - #define MODULE_NAME "dispc" - - #define DSS_BASE 0x48050000 -+ - #define DSS_SYSCONFIG 0x0010 -+#define DSS_CONTROL 0x0040 -+#define DSS_SDI_CONTROL 0x0044 -+#define DSS_PLL_CONTROL 0x0048 -+#define DSS_SDI_STATUS 0x005c -+ -+ -+static struct { -+ int reg; -+ u32 val; -+} dss_context[] = { -+ { DSS_SYSCONFIG, }, -+ { DSS_CONTROL, }, -+ { DSS_SDI_CONTROL, }, -+ { DSS_PLL_CONTROL, }, -+}; - - #define DISPC_BASE 0x48050400 - -@@ -64,6 +82,31 @@ - #define DISPC_DATA_CYCLE2 0x01D8 - #define DISPC_DATA_CYCLE3 0x01DC - -+static struct { -+ int reg; -+ u32 val; -+} dispc_context[] = { -+ { DISPC_SYSCONFIG, }, -+ { DISPC_IRQENABLE, }, -+ { DISPC_CONTROL, }, -+ -+ { DISPC_CONFIG, }, -+ { DISPC_DEFAULT_COLOR0, }, -+ { DISPC_DEFAULT_COLOR1, }, -+ { DISPC_TRANS_COLOR0, }, -+ { DISPC_TRANS_COLOR1, }, -+ { DISPC_LINE_NUMBER, }, -+ { DISPC_TIMING_H, }, -+ { DISPC_TIMING_V, }, -+ { DISPC_POL_FREQ, }, -+ { DISPC_DIVISOR, }, -+ { DISPC_SIZE_DIG, }, -+ { DISPC_SIZE_LCD, }, -+ { DISPC_DATA_CYCLE1, }, -+ { DISPC_DATA_CYCLE2, }, -+ { DISPC_DATA_CYCLE3, }, -+}; -+ - /* DISPC GFX plane */ - #define DISPC_GFX_BA0 0x0080 - #define DISPC_GFX_BA1 0x0084 -@@ -77,6 +120,22 @@ - #define DISPC_GFX_WINDOW_SKIP 0x00B4 - #define DISPC_GFX_TABLE_BA 0x00B8 - -+static struct { -+ int reg; -+ u32 val; -+} gfx_context[] = { -+ { DISPC_GFX_BA0, }, -+ { DISPC_GFX_BA1, }, -+ { DISPC_GFX_POSITION, }, -+ { DISPC_GFX_SIZE, }, -+ { DISPC_GFX_ATTRIBUTES, }, -+ { DISPC_GFX_FIFO_THRESHOLD, }, -+ { DISPC_GFX_ROW_INC, }, -+ { DISPC_GFX_PIXEL_INC, }, -+ { DISPC_GFX_WINDOW_SKIP, }, -+ { DISPC_GFX_TABLE_BA, }, -+}; -+ - /* DISPC Video plane 1/2 */ - #define DISPC_VID1_BASE 0x00BC - #define DISPC_VID2_BASE 0x014C -@@ -103,6 +162,41 @@ - /* 5 elements in 4 byte increments */ - #define DISPC_VID_CONV_COEF0 0x0074 - -+static struct { -+ int reg; -+ u32 val; -+} vid_context[] = { -+ { DISPC_VID_BA0, }, -+ { DISPC_VID_BA1, }, -+ { DISPC_VID_POSITION, }, -+ { DISPC_VID_SIZE, }, -+ { DISPC_VID_ATTRIBUTES, }, -+ { DISPC_VID_FIFO_THRESHOLD, }, -+ { DISPC_VID_ROW_INC, }, -+ { DISPC_VID_PIXEL_INC, }, -+ { DISPC_VID_FIR, }, -+ { DISPC_VID_PICTURE_SIZE, }, -+ { DISPC_VID_ACCU0, }, -+ { DISPC_VID_ACCU1, }, -+}; -+ -+ -+static struct { -+ int reg; -+ u32 val; -+} vid_fir_context[2 * 2 * 8] = { /* 2 planes * 2 coef * 8 instance */ -+ { DISPC_VID_FIR_COEF_H0, }, -+ { DISPC_VID_FIR_COEF_HV0, }, -+}; -+ -+static struct { -+ int reg; -+ u32 val; -+} vid_conv_context[2 * 1 * 5] = { /* 2 planes * 1 coef * 5 instance */ -+ { DISPC_VID_CONV_COEF0, }, -+}; -+ -+ - #define DISPC_IRQ_FRAMEMASK 0x0001 - #define DISPC_IRQ_VSYNC 0x0002 - #define DISPC_IRQ_EVSYNC_EVEN 0x0004 -@@ -118,13 +212,25 @@ - #define DISPC_IRQ_VID2_FIFO_UNDERFLOW 0x1000 - #define DISPC_IRQ_VID2_END_WIN 0x2000 - #define DISPC_IRQ_SYNC_LOST 0x4000 -+#define DISPC_IRQ_SYNC_LOST_DIGITAL 0x8000 -+ -+#define DISPC_IRQ_MASK_ALL 0xffff - --#define DISPC_IRQ_MASK_ALL 0x7fff -+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -+#define DISPC_VID_FIR_COEF_H(n, i) (0x00F0 + (n)*0x90 + (i)*0x8) -+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -+#define DISPC_VID_FIR_COEF_HV(n, i) (0x00F4 + (n)*0x90 + (i)*0x8) -+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -+#define DISPC_VID_FIR_COEF_V(n, i) (0x01E0 + (n)*0x20 + (i)*0x4) -+/* coef index i = {0, 1, 2, 3, 4} */ -+#define DISPC_VID_CONV_COEF(n, i) (0x0130 + (n)*0x90 + (i)*0x4) - - #define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \ -+ DISPC_IRQ_OCP_ERR | \ - DISPC_IRQ_VID1_FIFO_UNDERFLOW | \ - DISPC_IRQ_VID2_FIFO_UNDERFLOW | \ -- DISPC_IRQ_SYNC_LOST) -+ DISPC_IRQ_SYNC_LOST | \ -+ DISPC_IRQ_SYNC_LOST_DIGITAL) - - #define RFBI_CONTROL 0x48050040 - -@@ -174,6 +280,8 @@ static struct { - void (*callback)(void *); - void *data; - } irq_handlers[MAX_IRQ_HANDLERS]; -+ spinlock_t lock; -+ struct completion vsync_done; - struct completion frame_done; - - int fir_hinc[OMAPFB_PLANE_NUM]; -@@ -190,6 +298,16 @@ static struct { - - static void enable_lcd_clocks(int enable); - -+static void inline dss_write_reg(int idx, u32 val) -+{ -+ omap_writel(val, DSS_BASE + idx); -+} -+ -+static u32 inline dss_read_reg(int idx) -+{ -+ return omap_readl(DSS_BASE + idx); -+} -+ - static void inline dispc_write_reg(int idx, u32 val) - { - __raw_writel(val, dispc.base + idx); -@@ -201,6 +319,178 @@ static u32 inline dispc_read_reg(int idx - return l; - } - -+static void save_dss_context(void) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dss_context); i++) -+ dss_context[i].val = dss_read_reg(dss_context[i].reg); -+} -+ -+static void restore_dss_context(void) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dss_context); i++) -+ dss_write_reg(dss_context[i].reg, dss_context[i].val); -+} -+ -+static void save_dispc_context(void) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dispc_context); i++) -+ dispc_context[i].val = dispc_read_reg(dispc_context[i].reg); -+} -+ -+static void restore_dispc_context(void) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dispc_context); i++) -+ dispc_write_reg(dispc_context[i].reg, dispc_context[i].val); -+} -+ -+static void save_gfx_context(void) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(gfx_context); i++) -+ gfx_context[i].val = dispc_read_reg(gfx_context[i].reg); -+} -+ -+static void restore_gfx_context(void) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(gfx_context); i++) -+ dispc_write_reg(gfx_context[i].reg, gfx_context[i].val); -+} -+ -+static void save_vid_context(int video_plane) -+{ -+ int i; -+ u32 base; -+ -+ if (video_plane == 1) -+ base = DISPC_VID1_BASE; -+ else -+ base = DISPC_VID2_BASE; -+ for (i = 0; i < ARRAY_SIZE(vid_context); i++) -+ vid_context[i].val = dispc_read_reg(base + vid_context[i].reg); -+} -+ -+static void restore_vid_context(int video_plane) -+{ -+ u32 base; -+ int i; -+ -+ if (video_plane == 1) -+ base = DISPC_VID1_BASE; -+ else -+ base = DISPC_VID2_BASE; -+ -+ for (i = 0; i < ARRAY_SIZE(vid_context); i++) -+ dispc_write_reg(base + vid_context[i].reg, vid_context[i].val); -+} -+ -+static void save_vid_fir_context(int video_plane) -+{ -+ int i; -+ u32 base; -+ -+ if (video_plane == 1) -+ base = DISPC_VID1_BASE; -+ else -+ base = DISPC_VID2_BASE; -+ -+ for (i = 0; i < 8; i++) { -+ vid_fir_context[i * 2].val = dispc_read_reg(base + -+ vid_fir_context[0].reg + i * 8); -+ vid_fir_context[i * 2 + 1].val = dispc_read_reg(base + -+ vid_fir_context[1].reg + i * 8); -+ } -+} -+ -+static void restore_vid_fir_context(int video_plane) -+{ -+ int i; -+ u32 base; -+ -+ if (video_plane == 1) -+ base = DISPC_VID1_BASE; -+ else -+ base = DISPC_VID2_BASE; -+ -+ -+ for (i = 0; i < 8; i++) { -+ dispc_write_reg(base + vid_fir_context[0].reg + i * 8, -+ vid_fir_context[i * 2].val); -+ dispc_write_reg(base + vid_fir_context[1].reg + i * 8, -+ vid_fir_context[i * 2 + 1].val); -+ } -+} -+ -+static void save_vid_conv_context(int video_plane) -+{ -+ int i; -+ u32 base; -+ -+ if (video_plane == 1) -+ base = DISPC_VID1_BASE; -+ else -+ base = DISPC_VID2_BASE; -+ -+ -+ for (i = 0; i < 5; i++) { -+ vid_conv_context[i].val = dispc_read_reg(base + -+ vid_conv_context[0].reg + i * 4); -+ } -+} -+ -+static void restore_vid_conv_context(int video_plane) -+{ -+ int i; -+ u32 base; -+ -+ if (video_plane == 1) -+ base = DISPC_VID1_BASE; -+ else -+ base = DISPC_VID2_BASE; -+ -+ -+ for (i = 0; i < 5; i++) { -+ dispc_write_reg(base + vid_conv_context[0].reg + i * 4, -+ vid_conv_context[i].val); -+ } -+} -+ -+static void save_all_context(void) -+{ -+ save_dss_context(); -+ save_dispc_context(); -+ save_gfx_context(); -+ save_vid_context(1); -+ save_vid_context(2); -+ save_vid_fir_context(1); -+ save_vid_fir_context(2); -+ save_vid_conv_context(1); -+ save_vid_conv_context(2); -+} -+ -+static void restore_all_context(void) -+{ -+ restore_dss_context(); -+ restore_dispc_context(); -+ restore_gfx_context(); -+ restore_vid_context(1); -+ restore_vid_context(2); -+ restore_vid_fir_context(1); -+ restore_vid_fir_context(2); -+ restore_vid_conv_context(1); -+ restore_vid_conv_context(2); -+} -+ - /* Select RFBI or bypass mode */ - static void enable_rfbi_mode(int enable) - { -@@ -249,6 +539,117 @@ static void set_lcd_data_lines(int data_ - dispc_write_reg(DISPC_CONTROL, l); - } - -+static void omap_dispc_go(enum omapfb_channel_out channel_out) -+{ -+ int bit; -+ -+ bit = channel_out == OMAPFB_CHANNEL_OUT_LCD ? (1 << 5) : (1 << 6); -+ -+ MOD_REG_FLD(DISPC_CONTROL, bit, bit); -+} -+ -+static void omap_dispc_vsync_done(void *data) -+{ -+ complete(&dispc.vsync_done); -+} -+ -+/** -+ * omap_dispc_sync - wait for the vsync signal -+ * @channel_out: specifies whether to wait for the LCD or DIGIT out vsync -+ * signal -+ * -+ * Sleeps until receiving the appropriate vsync signal. If the output is not -+ * enabled return immediately. -+ */ -+static int omap_dispc_sync(enum omapfb_channel_out channel_out) -+{ -+ u32 irq_mask; -+ u32 l; -+ -+ l = dispc_read_reg(DISPC_CONTROL); -+ -+ switch (channel_out) { -+ case OMAPFB_CHANNEL_OUT_LCD: -+ irq_mask = DISPC_IRQ_VSYNC; -+ if (!(l & 1)) -+ return 0; -+ break; -+ case OMAPFB_CHANNEL_OUT_DIGIT: -+ irq_mask = DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD; -+ if (!(l & (1 << 1))) -+ return 0; -+ break; -+ default: -+ return -ENODEV; -+ } -+ -+ init_completion(&dispc.vsync_done); -+ if (omap_dispc_request_irq(irq_mask, omap_dispc_vsync_done, NULL) < 0) -+ BUG(); -+ if (!wait_for_completion_timeout(&dispc.vsync_done, -+ msecs_to_jiffies(100))) { -+ if (printk_ratelimit()) { -+ dev_err(dispc.fbdev->dev, -+ "timeout waiting for VSYNC\n"); -+ } -+ } -+ omap_dispc_free_irq(irq_mask, omap_dispc_vsync_done, NULL); -+ -+ return 0; -+} -+ -+/** -+ * omap_dispc_wait_update - wait for a pending shadow->internal register update -+ * @channel_out: specifies whether to wait for LCD or DIGIT out updates to -+ * finish. -+ * -+ * If there is a pending update sleep until it finishes. If output is not -+ * enabled or GO bit is not set (no pending update) return imediately. -+ */ -+static void omap_dispc_wait_update(enum omapfb_channel_out channel_out) -+{ -+ int enable_bit; -+ int go_bit; -+ u32 l; -+ int tmo = 100000; -+ -+ if (channel_out == OMAPFB_CHANNEL_OUT_LCD) { -+ enable_bit = 1 << 0; -+ go_bit = 1 << 5; -+ } else { -+ enable_bit = 1 << 1; -+ go_bit = 1 << 6; -+ } -+ -+ l = dispc_read_reg(DISPC_CONTROL); -+ if (!(l & enable_bit) || !(l & go_bit)) -+ /* Output is disabled or GO bit is not set, so no pending -+ * updates */ -+ return; -+ /* GO bit is set, the update will happen at the next vsync time. */ -+ omap_dispc_sync(channel_out); -+ while (l & go_bit) { -+ cpu_relax(); -+ if (!tmo--) { -+ dev_err(dispc.fbdev->dev, -+ "timeout waiting for %s\n", -+ channel_out == OMAPFB_CHANNEL_OUT_LCD ? -+ "GOLCD" : "GODIGIT"); -+ break; -+ } -+ l = dispc_read_reg(DISPC_CONTROL); -+ } -+} -+ -+static int omap_dispc_sync_wrapper(enum omapfb_channel_out channel_out) -+{ -+ int r; -+ enable_lcd_clocks(1); -+ r = omap_dispc_sync(channel_out); -+ enable_lcd_clocks(0); -+ return r; -+} -+ - static void set_load_mode(int mode) - { - BUG_ON(mode & ~(DISPC_LOAD_CLUT_ONLY | DISPC_LOAD_FRAME_ONLY | -@@ -291,13 +692,8 @@ static void setup_plane_fifo(int plane, - - l = dispc_read_reg(fsz_reg[plane]); - l &= FLD_MASK(0, 11); -- if (ext_mode) { -- low = l * 3 / 4; -- high = l; -- } else { -- low = l / 4; -- high = l * 3 / 4; -- } -+ low = l * 3 / 4; -+ high = l - 1; - MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 12) | FLD_MASK(0, 12), - (high << 16) | low); - } -@@ -318,16 +714,52 @@ void omap_dispc_enable_digit_out(int ena - } - EXPORT_SYMBOL(omap_dispc_enable_digit_out); - --static inline int _setup_plane(int plane, int channel_out, -+extern void omap_dispc_set_plane_base(int plane, u32 paddr) -+{ -+ u32 reg; -+ -+ enable_lcd_clocks(1); -+ -+ switch (plane) { -+ case 0: -+ reg = DISPC_GFX_BA0; -+ break; -+ case 1: -+ reg = DISPC_VID1_BASE + DISPC_VID_BA0; -+ break; -+ case 2: -+ reg = DISPC_VID2_BASE + DISPC_VID_BA0; -+ break; -+ default: -+ BUG(); -+ return; -+ } -+ -+ dispc_write_reg(reg, paddr); -+ -+ omap_dispc_go(OMAPFB_CHANNEL_OUT_LCD); -+ enable_lcd_clocks(0); -+} -+EXPORT_SYMBOL(omap_dispc_set_plane_base); -+ -+static int omap_dispc_set_scale(int plane, -+ int orig_width, int orig_height, -+ int out_width, int out_height, int ilace); -+static void setup_color_conv_coef(void); -+ -+static inline int _setup_plane(int plane, int enabled, int channel_out, - u32 paddr, int screen_width, - int pos_x, int pos_y, int width, int height, -- int color_mode) -+ int out_width, int out_height, -+ int color_mode, int rotate) - { - const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES, - DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES, - DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES }; -- const u32 ba_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0, -+ const u32 ba0_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0, - DISPC_VID2_BASE + DISPC_VID_BA0 }; -+ const u32 ba1_reg[] = { DISPC_GFX_BA1, DISPC_VID1_BASE + DISPC_VID_BA1, -+ DISPC_VID2_BASE + DISPC_VID_BA1 }; - const u32 ps_reg[] = { DISPC_GFX_POSITION, - DISPC_VID1_BASE + DISPC_VID_POSITION, - DISPC_VID2_BASE + DISPC_VID_POSITION }; -@@ -345,17 +777,41 @@ static inline int _setup_plane(int plane - int color_code; - int bpp; - int cconv_en; -- int set_vsize; - u32 l; -+ int ilace = channel_out == OMAPFB_CHANNEL_OUT_DIGIT; -+ int fieldmode = 0; -+ -+ /* -+ * Some definitions: -+ * -+ * ilace == the actual output is interlaced, image is draw as -+ * two separate fields with other containing odd horizontal -+ * lines and the other even horizontal lines. -+ * -+ * fieldmode == the input data from framebuffer is also fed to -+ * venc as two separate fields. -+ * -+ * Why fieldmode can be disabled with interlacing? -+ * -+ * When scaling up, we must not skip any lines from the -+ * framebuffer, otherwise the scaling unit cannot interpolate -+ * missing lines properly. Furthermore, since the venc is in -+ * interlaced mode, each output field has only half of the -+ * vertical resolution, thus we may end up actually -+ * downsampling even though the original image has less -+ * physical lines than the output. -+ * -+ */ - - #ifdef VERBOSE - dev_dbg(dispc.fbdev->dev, "plane %d channel %d paddr %#08x scr_width %d" -- " pos_x %d pos_y %d width %d height %d color_mode %d\n", -- plane, channel_out, paddr, screen_width, pos_x, pos_y, -- width, height, color_mode); -+ " pos_x %d pos_y %d width %d height %d color_mode %d " -+ " out_width %d out height %d " -+ "interlaced %d\n", -+ plane, channel_out, paddr, screen_width, pos_x, pos_y, -+ width, height, color_mode, out_width, out_height, ilace); - #endif - -- set_vsize = 0; - switch (plane) { - case OMAPFB_PLANE_GFX: - burst_shift = 6; -@@ -365,12 +821,19 @@ static inline int _setup_plane(int plane - case OMAPFB_PLANE_VID2: - burst_shift = 14; - chout_shift = 16; -- set_vsize = 1; - break; - default: - return -EINVAL; - } - -+ if (!enabled) { -+ /* just disable it, without configuring the rest */ -+ l = dispc_read_reg(at_reg[plane]); -+ l &= ~1; -+ dispc_write_reg(at_reg[plane], l); -+ goto out; -+ } -+ - switch (channel_out) { - case OMAPFB_CHANNEL_OUT_LCD: - chout_val = 0; -@@ -406,6 +869,39 @@ static inline int _setup_plane(int plane - return -EINVAL; - } - -+ if (plane == OMAPFB_PLANE_GFX) { -+ if (width != out_width || height != out_height) -+ return -EINVAL; -+ } -+ -+ if (ilace) { -+ /* -+ * FIXME the downscaling ratio really isn't a good -+ * indicator whether fieldmode should be used. -+ * fieldmode should be user controllable. -+ * -+ * In general the downscaling ratio could be reduced -+ * by simply skipping some of the source lines -+ * regardless of fieldmode. -+ */ -+ if (height >= (out_height << (cpu_is_omap34xx() ? 1 : 0))) -+ fieldmode = 1; -+ } -+ -+ if (fieldmode) -+ height /= 2; -+ if (ilace) { -+ pos_y /= 2; -+ out_height /= 2; -+ } -+ -+ if (plane != OMAPFB_PLANE_GFX) { -+ l = omap_dispc_set_scale(plane, width, height, -+ out_width, out_height, ilace); -+ if (l) -+ return l; -+ } -+ - l = dispc_read_reg(at_reg[plane]); - - l &= ~(0x0f << 1); -@@ -419,141 +915,309 @@ static inline int _setup_plane(int plane - l &= ~(1 << chout_shift); - l |= chout_val << chout_shift; - -+ l |= 1; /* Enable plane */ -+ - dispc_write_reg(at_reg[plane], l); - -- dispc_write_reg(ba_reg[plane], paddr); -+ if (cconv_en) -+ setup_color_conv_coef(); -+ -+ dispc_write_reg(ba0_reg[plane], paddr); -+ -+ if (fieldmode) -+ dispc_write_reg(ba1_reg[plane], -+ paddr + (screen_width) * bpp / 8); -+ else -+ dispc_write_reg(ba1_reg[plane], paddr); -+ - MOD_REG_FLD(ps_reg[plane], - FLD_MASK(16, 11) | FLD_MASK(0, 11), (pos_y << 16) | pos_x); - - MOD_REG_FLD(sz_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11), - ((height - 1) << 16) | (width - 1)); - -- if (set_vsize) { -- /* Set video size if set_scale hasn't set it */ -- if (!dispc.fir_vinc[plane]) -- MOD_REG_FLD(vs_reg[plane], -- FLD_MASK(16, 11), (height - 1) << 16); -- if (!dispc.fir_hinc[plane]) -- MOD_REG_FLD(vs_reg[plane], -- FLD_MASK(0, 11), width - 1); -- } -+ MOD_REG_FLD(vs_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11), -+ ((out_height - 1) << 16) | (out_width - 1)); -+ -+ dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + -+ (fieldmode ? screen_width * bpp / 8 : 0) + 1); - -- dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1); -+out: -+ omap_dispc_go(channel_out); - - return height * screen_width * bpp / 8; - } - --static int omap_dispc_setup_plane(int plane, int channel_out, -- unsigned long offset, -+static int omap_dispc_setup_plane(int plane, int enable, int channel_out, -+ unsigned long paddr, - int screen_width, - int pos_x, int pos_y, int width, int height, -- int color_mode) -+ int out_width, int out_height, -+ int color_mode, int rotate) - { -- u32 paddr; - int r; - - if ((unsigned)plane > dispc.mem_desc.region_cnt) - return -EINVAL; -- paddr = dispc.mem_desc.region[plane].paddr + offset; - enable_lcd_clocks(1); -- r = _setup_plane(plane, channel_out, paddr, -+ omap_dispc_wait_update(channel_out); -+ r = _setup_plane(plane, enable, channel_out, paddr, - screen_width, -- pos_x, pos_y, width, height, color_mode); -+ pos_x, pos_y, width, height, -+ out_width, out_height, color_mode, rotate); -+ omap_dispc_wait_update(channel_out); - enable_lcd_clocks(0); - return r; - } - - static void write_firh_reg(int plane, int reg, u32 value) - { -- u32 base; -- -- if (plane == 1) -- base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_H0; -- else -- base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_H0; -- dispc_write_reg(base + reg * 8, value); -+ dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value); - } - - static void write_firhv_reg(int plane, int reg, u32 value) - { -- u32 base; -+ dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value); -+} - -- if (plane == 1) -- base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_HV0; -- else -- base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_HV0; -- dispc_write_reg(base + reg * 8, value); -+static void write_firv_reg(int plane, int reg, u32 value) -+{ -+ dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value); - } - --static void set_upsampling_coef_table(int plane) -+static void set_sampling_coef_tables(int plane, int hscaleup, int vscaleup, int five_taps) - { -- const u32 coef[][2] = { -- { 0x00800000, 0x00800000 }, -- { 0x0D7CF800, 0x037B02FF }, -- { 0x1E70F5FF, 0x0C6F05FE }, -- { 0x335FF5FE, 0x205907FB }, -- { 0xF74949F7, 0x00404000 }, -- { 0xF55F33FB, 0x075920FE }, -- { 0xF5701EFE, 0x056F0CFF }, -- { 0xF87C0DFF, 0x027B0300 }, -+ /* Coefficients for horizontal up-sampling */ -+ static const u32 coef_hup[8] = { -+ 0x00800000, -+ 0x0D7CF800, -+ 0x1E70F5FF, -+ 0x335FF5FE, -+ 0xF74949F7, -+ 0xF55F33FB, -+ 0xF5701EFE, -+ 0xF87C0DFF, -+ }; -+ -+ /* Coefficients for horizontal down-sampling */ -+ static const u32 coef_hdown[8] = { -+ 0x24382400, -+ 0x28371FFE, -+ 0x2C361BFB, -+ 0x303516F9, -+ 0x11343311, -+ 0x1635300C, -+ 0x1B362C08, -+ 0x1F372804, -+ }; -+ -+ /* Coefficients for horizontal and vertical up-sampling */ -+ static const u32 coef_hvup[2][8] = { -+ { -+ 0x00800000, -+ 0x037B02FF, -+ 0x0C6F05FE, -+ 0x205907FB, -+ 0x00404000, -+ 0x075920FE, -+ 0x056F0CFF, -+ 0x027B0300, -+ }, -+ { -+ 0x00800000, -+ 0x0D7CF8FF, -+ 0x1E70F5FE, -+ 0x335FF5FB, -+ 0xF7404000, -+ 0xF55F33FE, -+ 0xF5701EFF, -+ 0xF87C0D00, -+ }, -+ }; -+ -+ /* Coefficients for horizontal and vertical down-sampling */ -+ static const u32 coef_hvdown[2][8] = { -+ { -+ 0x24382400, -+ 0x28391F04, -+ 0x2D381B08, -+ 0x3237170C, -+ 0x123737F7, -+ 0x173732F9, -+ 0x1B382DFB, -+ 0x1F3928FE, -+ }, -+ { -+ 0x24382400, -+ 0x28371F04, -+ 0x2C361B08, -+ 0x3035160C, -+ 0x113433F7, -+ 0x163530F9, -+ 0x1B362CFB, -+ 0x1F3728FE, -+ }, - }; -+ -+ /* Coefficients for vertical up-sampling */ -+ static const u32 coef_vup[8] = { -+ 0x00000000, -+ 0x0000FF00, -+ 0x0000FEFF, -+ 0x0000FBFE, -+ 0x000000F7, -+ 0x0000FEFB, -+ 0x0000FFFE, -+ 0x000000FF, -+ }; -+ -+ /* Coefficients for vertical down-sampling */ -+ static const u32 coef_vdown[8] = { -+ 0x00000000, -+ 0x000004FE, -+ 0x000008FB, -+ 0x00000CF9, -+ 0x0000F711, -+ 0x0000F90C, -+ 0x0000FB08, -+ 0x0000FE04, -+ }; -+ -+ const u32 *h_coef; -+ const u32 *hv_coef; -+ const u32 *hv_coef_mod; -+ const u32 *v_coef; - int i; - -+ if (hscaleup) -+ h_coef = coef_hup; -+ else -+ h_coef = coef_hdown; -+ -+ if (vscaleup) { -+ hv_coef = coef_hvup[five_taps]; -+ v_coef = coef_vup; -+ -+ if (hscaleup) -+ hv_coef_mod = NULL; -+ else -+ hv_coef_mod = coef_hvdown[five_taps]; -+ } else { -+ hv_coef = coef_hvdown[five_taps]; -+ v_coef = coef_vdown; -+ -+ if (hscaleup) -+ hv_coef_mod = coef_hvup[five_taps]; -+ else -+ hv_coef_mod = NULL; -+ } -+ -+ for (i = 0; i < 8; i++) { -+ u32 h, hv; -+ -+ h = h_coef[i]; -+ -+ hv = hv_coef[i]; -+ -+ if (hv_coef_mod) { -+ hv &= 0xffffff00; -+ hv |= (hv_coef_mod[i] & 0xff); -+ } -+ -+ write_firh_reg(plane, i, h); -+ write_firhv_reg(plane, i, hv); -+ } -+ -+ if (!five_taps) -+ return; -+ - for (i = 0; i < 8; i++) { -- write_firh_reg(plane, i, coef[i][0]); -- write_firhv_reg(plane, i, coef[i][1]); -+ u32 v; -+ -+ v = v_coef[i]; -+ -+ write_firv_reg(plane, i, v); - } - } - - static int omap_dispc_set_scale(int plane, - int orig_width, int orig_height, -- int out_width, int out_height) -+ int out_width, int out_height, int ilace) - { - const u32 at_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES, - DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES }; -- const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE, -- DISPC_VID2_BASE + DISPC_VID_SIZE }; - const u32 fir_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_FIR, - DISPC_VID2_BASE + DISPC_VID_FIR }; -- -+ const u32 accu0_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ACCU0, -+ DISPC_VID2_BASE + DISPC_VID_ACCU0 }; -+ const u32 accu1_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ACCU1, -+ DISPC_VID2_BASE + DISPC_VID_ACCU1 }; - u32 l; - int fir_hinc; - int fir_vinc; -+ int hscaleup, vscaleup, five_taps; -+ int accu0 = 0; -+ int accu1 = 0; -+ -+ hscaleup = orig_width <= out_width; -+ vscaleup = orig_height <= out_height; -+ five_taps = orig_height > out_height * 2; - -- if ((unsigned)plane > OMAPFB_PLANE_NUM) -+ if ((unsigned)plane >= OMAPFB_PLANE_NUM) - return -ENODEV; - -- if (out_width != orig_width || out_height != orig_height) -+ if (plane == OMAPFB_PLANE_GFX) -+ return 0; -+ -+ if (orig_width > (2048 >> five_taps)) - return -EINVAL; - -- enable_lcd_clocks(1); -- if (orig_width < out_width) { -- /* -- * Upsampling. -- * Currently you can only scale both dimensions in one way. -- */ -- if (orig_height > out_height || -- orig_width * 8 < out_width || -- orig_height * 8 < out_height) { -- enable_lcd_clocks(0); -+ if (hscaleup) { -+ if (orig_width * 8 < out_width) - return -EINVAL; -- } -- set_upsampling_coef_table(plane); -- } else if (orig_width > out_width) { -- /* Downsampling not yet supported -- */ -+ } else { -+ if (!cpu_is_omap34xx() && orig_width > out_width * 2) -+ return -EINVAL; -+ if (orig_width > out_width * 4) -+ return -EINVAL; -+ } - -- enable_lcd_clocks(0); -- return -EINVAL; -+ if (vscaleup) { -+ if (orig_height * 8 < out_height) -+ return -EINVAL; -+ } else { -+ if (!cpu_is_omap34xx() && orig_height > out_height * 2) -+ return -EINVAL; -+ if (orig_height > out_height * 4) -+ return -EINVAL; - } -+ - if (!orig_width || orig_width == out_width) - fir_hinc = 0; - else - fir_hinc = 1024 * orig_width / out_width; -+ - if (!orig_height || orig_height == out_height) - fir_vinc = 0; - else - fir_vinc = 1024 * orig_height / out_height; -+ -+ if (ilace) { -+ accu0 = 0; -+ accu1 = fir_vinc / 2; -+ if (accu1 >= 1024 / 2) { -+ accu0 = 1024 / 2; -+ accu1 -= accu0; -+ } -+ } -+ -+ enable_lcd_clocks(1); -+ -+ set_sampling_coef_tables(plane, hscaleup, vscaleup, five_taps); -+ -+ dispc_write_reg(accu0_reg[plane], FLD_MASK(16, 26) || (accu0 << 16)); -+ dispc_write_reg(accu1_reg[plane], FLD_MASK(16, 26) || (accu1 << 16)); -+ - dispc.fir_hinc[plane] = fir_hinc; - dispc.fir_vinc[plane] = fir_vinc; - -@@ -567,35 +1231,22 @@ static int omap_dispc_set_scale(int plan - out_width, out_height, orig_width, orig_height, - fir_hinc, fir_vinc); - -- MOD_REG_FLD(vs_reg[plane], -- FLD_MASK(16, 11) | FLD_MASK(0, 11), -- ((out_height - 1) << 16) | (out_width - 1)); -- - l = dispc_read_reg(at_reg[plane]); - l &= ~(0x03 << 5); - l |= fir_hinc ? (1 << 5) : 0; - l |= fir_vinc ? (1 << 6) : 0; -+ l &= ~(0x3 << 7); -+ l |= hscaleup ? 0 : (1 << 7); -+ l |= vscaleup ? 0 : (1 << 8); -+ l &= ~(0x3 << 21); -+ l |= five_taps ? (1 << 21) : 0; -+ l |= five_taps ? (1 << 22) : 0; - dispc_write_reg(at_reg[plane], l); - - enable_lcd_clocks(0); - return 0; - } - --static int omap_dispc_enable_plane(int plane, int enable) --{ -- const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES, -- DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES, -- DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES }; -- if ((unsigned int)plane > dispc.mem_desc.region_cnt) -- return -EINVAL; -- -- enable_lcd_clocks(1); -- MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0); -- enable_lcd_clocks(0); -- -- return 0; --} -- - static int omap_dispc_set_color_key(struct omapfb_color_key *ck) - { - u32 df_reg, tr_reg; -@@ -634,6 +1285,8 @@ static int omap_dispc_set_color_key(stru - if (val != 0) - dispc_write_reg(tr_reg, ck->trans_key); - dispc_write_reg(df_reg, ck->background); -+ -+ omap_dispc_go(ck->channel_out); - enable_lcd_clocks(0); - - dispc.color_key = *ck; -@@ -651,6 +1304,11 @@ static void load_palette(void) - { - } - -+static void omap_dispc_frame_done(void *data) -+{ -+ complete(&dispc.frame_done); -+} -+ - static int omap_dispc_set_update_mode(enum omapfb_update_mode mode) - { - int r = 0; -@@ -665,12 +1323,17 @@ static int omap_dispc_set_update_mode(en - break; - case OMAPFB_UPDATE_DISABLED: - init_completion(&dispc.frame_done); -+ if (omap_dispc_request_irq(DISPC_IRQ_FRAMEMASK, -+ omap_dispc_frame_done, NULL) < 0) -+ BUG(); - omap_dispc_enable_lcd_out(0); - if (!wait_for_completion_timeout(&dispc.frame_done, - msecs_to_jiffies(500))) { - dev_err(dispc.fbdev->dev, - "timeout waiting for FRAME DONE\n"); - } -+ omap_dispc_free_irq(DISPC_IRQ_FRAMEMASK, -+ omap_dispc_frame_done, NULL); - dispc.update_mode = mode; - enable_lcd_clocks(0); - break; -@@ -776,7 +1439,7 @@ static void set_lcd_timings(void) - { - u32 l; - int lck_div, pck_div; -- struct lcd_panel *panel = dispc.fbdev->panel; -+ struct lcd_panel *panel = dispc.fbdev->lcd_panel; - int is_tft = panel->config & OMAP_LCDC_PANEL_TFT; - unsigned long fck; - -@@ -812,7 +1475,15 @@ static void set_lcd_timings(void) - panel->pixel_clock = fck / lck_div / pck_div / 1000; - } - --static void recalc_irq_mask(void) -+/** -+ * _recalc_irq_mask - calculate the new set of enabled IRQs -+ * -+ * Calculate the new set of enabled IRQs which is a combination of all -+ * handlers' IRQs. -+ * -+ * dispc.lock must be held. -+ */ -+static void _recalc_irq_mask(void) - { - int i; - unsigned long irq_mask = DISPC_IRQ_MASK_ERROR; -@@ -825,17 +1496,40 @@ static void recalc_irq_mask(void) - } - - enable_lcd_clocks(1); -- MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask); -+ MOD_REG_FLD(DISPC_IRQENABLE, DISPC_IRQ_MASK_ALL, irq_mask); -+ enable_lcd_clocks(0); -+} -+ -+static void recalc_irq_mask(void) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dispc.lock, flags); -+ _recalc_irq_mask(); -+ spin_unlock_irqrestore(&dispc.lock, flags); -+} -+ -+static inline void _clear_irq(u32 irq_mask) -+{ -+ dispc_write_reg(DISPC_IRQSTATUS, irq_mask); -+} -+ -+static inline void clear_irq(u32 irq_mask) -+{ -+ enable_lcd_clocks(1); -+ _clear_irq(irq_mask); - enable_lcd_clocks(0); - } - - int omap_dispc_request_irq(unsigned long irq_mask, void (*callback)(void *data), - void *data) - { -+ unsigned long flags; - int i; - - BUG_ON(callback == NULL); - -+ spin_lock_irqsave(&dispc.lock, flags); - for (i = 0; i < MAX_IRQ_HANDLERS; i++) { - if (dispc.irq_handlers[i].callback) - continue; -@@ -843,11 +1537,14 @@ int omap_dispc_request_irq(unsigned long - dispc.irq_handlers[i].irq_mask = irq_mask; - dispc.irq_handlers[i].callback = callback; - dispc.irq_handlers[i].data = data; -- recalc_irq_mask(); -+ clear_irq(irq_mask); -+ _recalc_irq_mask(); - -+ spin_unlock_irqrestore(&dispc.lock, flags); - return 0; - } - -+ spin_unlock_irqrestore(&dispc.lock, flags); - return -EBUSY; - } - EXPORT_SYMBOL(omap_dispc_request_irq); -@@ -856,17 +1553,22 @@ void omap_dispc_free_irq(unsigned long i - void *data) - { - int i; -+ unsigned long flags; - -+ spin_lock_irqsave(&dispc.lock, flags); - for (i = 0; i < MAX_IRQ_HANDLERS; i++) { - if (dispc.irq_handlers[i].callback == callback && - dispc.irq_handlers[i].data == data) { - dispc.irq_handlers[i].irq_mask = 0; - dispc.irq_handlers[i].callback = NULL; - dispc.irq_handlers[i].data = NULL; -- recalc_irq_mask(); -+ _recalc_irq_mask(); -+ -+ spin_unlock_irqrestore(&dispc.lock, flags); - return; - } - } -+ spin_unlock_irqrestore(&dispc.lock, flags); - - BUG(); - } -@@ -880,21 +1582,26 @@ static irqreturn_t omap_dispc_irq_handle - enable_lcd_clocks(1); - - stat = dispc_read_reg(DISPC_IRQSTATUS); -- if (stat & DISPC_IRQ_FRAMEMASK) -- complete(&dispc.frame_done); - - if (stat & DISPC_IRQ_MASK_ERROR) { - if (printk_ratelimit()) { - dev_err(dispc.fbdev->dev, "irq error status %04x\n", -- stat & 0x7fff); -+ stat & DISPC_IRQ_MASK_ALL); - } - } - -+ spin_lock(&dispc.lock); - for (i = 0; i < MAX_IRQ_HANDLERS; i++) { -- if (unlikely(dispc.irq_handlers[i].callback && -- (stat & dispc.irq_handlers[i].irq_mask))) -- dispc.irq_handlers[i].callback(dispc.irq_handlers[i].data); -+ void (*cb)(void *) = dispc.irq_handlers[i].callback; -+ unsigned long cb_irqs = dispc.irq_handlers[i].irq_mask; -+ void *cb_data = dispc.irq_handlers[i].data; -+ -+ spin_unlock(&dispc.lock); -+ if (unlikely(cb != NULL && (stat & cb_irqs))) -+ cb(cb_data); -+ spin_lock(&dispc.lock); - } -+ spin_unlock(&dispc.lock); - - dispc_write_reg(DISPC_IRQSTATUS, stat); - -@@ -943,7 +1650,11 @@ static void enable_lcd_clocks(int enable - if (enable) { - clk_enable(dispc.dss_ick); - clk_enable(dispc.dss1_fck); -+ if (dispc.dss1_fck->usecount == 1) -+ restore_all_context(); - } else { -+ if (dispc.dss1_fck->usecount == 1) -+ save_all_context(); - clk_disable(dispc.dss1_fck); - clk_disable(dispc.dss_ick); - } -@@ -957,20 +1668,100 @@ static void enable_digit_clocks(int enab - clk_disable(dispc.dss_54m_fck); - } - -+#ifdef DEBUG -+static void omap_dispc_dump(void) -+{ -+ int i; -+ -+ static const struct { -+ const char *name; -+ int idx; -+ } dss_regs[] = { -+ { "DSS_SYSCONFIG", 0x0010}, -+ { "DSS_CONTROL", 0x0040}, -+ { "DSS_SDI_CONTROL", 0x0044}, -+ { "DSS_PLL_CONTROL", 0x0048}, -+ { "DSS_SDI_STATUS", 0x005c}, -+ }; -+ -+ static const struct { -+ const char *name; -+ int idx; -+ } dispc_regs[] = { -+ { "DISPC_REVISION", 0x0000}, -+ { "DISPC_SYSCONFIG", 0x0010}, -+ { "DISPC_SYSSTATUS", 0x0014}, -+ { "DISPC_IRQSTATUS", 0x0018}, -+ { "DISPC_IRQENABLE", 0x001C}, -+ { "DISPC_CONTROL", 0x0040}, -+ { "DISPC_CONFIG", 0x0044}, -+ { "DISPC_CAPABLE", 0x0048}, -+ { "DISPC_DEFAULT_COLOR0", 0x004C}, -+ { "DISPC_DEFAULT_COLOR1", 0x0050}, -+ { "DISPC_TRANS_COLOR0", 0x0054}, -+ { "DISPC_TRANS_COLOR1", 0x0058}, -+ { "DISPC_LINE_STATUS", 0x005C}, -+ { "DISPC_LINE_NUMBER", 0x0060}, -+ { "DISPC_TIMING_H", 0x0064}, -+ { "DISPC_TIMING_V", 0x0068}, -+ { "DISPC_POL_FREQ", 0x006C}, -+ { "DISPC_DIVISOR", 0x0070}, -+ { "DISPC_SIZE_DIG", 0x0078}, -+ { "DISPC_SIZE_LCD", 0x007C}, -+ { "DISPC_DATA_CYCLE1", 0x01D4}, -+ { "DISPC_DATA_CYCLE2", 0x01D8}, -+ { "DISPC_DATA_CYCLE3", 0x01DC}, -+ { "DISPC_GFX_BA0", 0x0080}, -+ { "DISPC_GFX_BA1", 0x0084}, -+ { "DISPC_GFX_POSITION", 0x0088}, -+ { "DISPC_GFX_SIZE", 0x008C}, -+ { "DISPC_GFX_ATTRIBUTES", 0x00A0}, -+ { "DISPC_GFX_FIFO_THRESHOLD", 0x00A4}, -+ { "DISPC_GFX_FIFO_SIZE_STATUS", 0x00A8}, -+ { "DISPC_GFX_ROW_INC", 0x00AC}, -+ { "DISPC_GFX_PIXEL_INC", 0x00B0}, -+ { "DISPC_GFX_WINDOW_SKIP", 0x00B4}, -+ { "DISPC_GFX_TABLE_BA", 0x00B8}, -+ }; -+ for (i = 0; i < ARRAY_SIZE(dss_regs); i++) { -+ printk(KERN_DEBUG "%-20s: %08x\n", -+ dss_regs[i].name, dss_read_reg(dss_regs[i].idx)); -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(dispc_regs); i++) { -+ printk(KERN_DEBUG "%-20s: %08x\n", -+ dispc_regs[i].name, dispc_read_reg(dispc_regs[i].idx)); -+ } -+} -+#else -+static inline void omap_dispc_dump(void) -+{ -+} -+#endif -+ - static void omap_dispc_suspend(void) - { - if (dispc.update_mode == OMAPFB_AUTO_UPDATE) { -+ omap_dispc_dump(); - init_completion(&dispc.frame_done); -+ if (omap_dispc_request_irq(DISPC_IRQ_FRAMEMASK, -+ omap_dispc_frame_done, NULL) < 0) -+ BUG(); - omap_dispc_enable_lcd_out(0); -+ - if (!wait_for_completion_timeout(&dispc.frame_done, - msecs_to_jiffies(500))) { - dev_err(dispc.fbdev->dev, - "timeout waiting for FRAME DONE\n"); - } -+ omap_dispc_free_irq(DISPC_IRQ_FRAMEMASK, -+ omap_dispc_frame_done, NULL); - enable_lcd_clocks(0); - } - } - -+static void sdi_enable(void); -+ - static void omap_dispc_resume(void) - { - if (dispc.update_mode == OMAPFB_AUTO_UPDATE) { -@@ -979,11 +1770,12 @@ static void omap_dispc_resume(void) - set_lcd_timings(); - load_palette(); - } -+ sdi_enable(); - omap_dispc_enable_lcd_out(1); -+ omap_dispc_dump(); - } - } - -- - static int omap_dispc_update_window(struct fb_info *fbi, - struct omapfb_update_window *win, - void (*complete_callback)(void *arg), -@@ -1361,12 +2153,109 @@ static void cleanup_fbmem(void) - } - } - -+ -+#ifdef CONFIG_FB_OMAP_VENC -+void omap_dispc_set_venc_clocks(void) -+{ -+ unsigned int l; -+ enable_lcd_clocks(1); -+ -+ l = dss_read_reg(DSS_CONTROL); -+ l |= 1 << 4; /* venc dac demen */ -+ l |= 1 << 3; /* venc clock 4x enable */ -+ l &= ~3; /* venc clock mode */ -+ dss_write_reg(DSS_CONTROL, l); -+ -+ enable_lcd_clocks(0); -+} -+ -+void omap_dispc_set_venc_output(enum omap_dispc_venc_type type) -+{ -+ unsigned int l; -+ -+ /* venc out selection. 0 = comp, 1 = svideo */ -+ if (type == OMAP_DISPC_VENC_TYPE_COMPOSITE) -+ l = 0; -+ else if (type == OMAP_DISPC_VENC_TYPE_SVIDEO) -+ l = 1 << 6; -+ else -+ BUG(); -+ -+ enable_lcd_clocks(1); -+ l = dss_read_reg(DSS_CONTROL); -+ l = (l & ~(1 << 6)) | l; -+ dss_write_reg(DSS_CONTROL, l); -+ enable_lcd_clocks(0); -+} -+ -+void omap_dispc_set_dac_pwrdn_bgz(int enable) -+{ -+ int l; -+ -+ enable_lcd_clocks(1); -+ /* DAC Power-Down Control */ -+ l = dss_read_reg(DSS_CONTROL); -+ l = (l & ~(1 << 5)) | (enable ? 1 << 5 : 0); -+ dss_write_reg(DSS_CONTROL, l); -+ enable_lcd_clocks(0); -+} -+#endif -+ -+static void sdi_init(void) -+{ -+ u32 l; -+ -+ l = dss_read_reg(DSS_SDI_CONTROL); -+ l |= (0xF << 15) | (0x1 << 2) | (0x2 << 0); -+ dss_write_reg(DSS_SDI_CONTROL, l); -+ -+ l = dss_read_reg(DSS_PLL_CONTROL); -+ /* FSEL | NDIV | MDIV */ -+ l |= (0x7 << 22) | (0xB << 11) | (0xB4 << 1); -+ dss_write_reg(DSS_PLL_CONTROL, l); -+ -+ /* Reset SDI PLL */ -+ l |= (1 << 18); -+ dss_write_reg(DSS_PLL_CONTROL, l); -+ udelay(1); -+ -+ /* Lock SDI PLL */ -+ l |= (1 << 28); -+ dss_write_reg(DSS_PLL_CONTROL, l); -+ -+ /* Waiting for PLL lock request to complete */ -+ while(dss_read_reg(DSS_SDI_STATUS) & (1 << 6)); -+ -+ /* Clearing PLL_GO bit */ -+ l &= ~(1 << 28); -+ dss_write_reg(DSS_PLL_CONTROL, l); -+ -+ /* Waiting for PLL to lock */ -+ while(!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))); -+} -+ -+static void sdi_enable(void) -+{ -+ u32 l; -+ -+ l = dispc_read_reg(DISPC_CONTROL); -+ l |= (1 << 29) | (1 << 27) | (1 << 3); -+ dispc_write_reg(DISPC_CONTROL, l); -+ -+ sdi_init(); -+ -+ /* Enable SDI */ -+ l |= (1 << 28); -+ dispc_write_reg(DISPC_CONTROL, l); -+ mdelay(2); -+} -+ - static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode, - struct omapfb_mem_desc *req_vram) - { - int r; - u32 l; -- struct lcd_panel *panel = fbdev->panel; -+ struct lcd_panel *panel = fbdev->lcd_panel; - int tmo = 10000; - int skip_init = 0; - int i; -@@ -1383,12 +2272,18 @@ static int omap_dispc_init(struct omapfb - dispc.ext_mode = ext_mode; - - init_completion(&dispc.frame_done); -+ spin_lock_init(&dispc.lock); - - if ((r = get_dss_clocks()) < 0) - goto fail0; - - enable_lcd_clocks(1); - -+/* If built as module we _have_ to reset, since boot time arch code -+ * disabled the clocks, thus the configuration done by the bootloader -+ * is lost. -+ */ -+#ifndef CONFIG_FB_OMAP_MODULE - #ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT - l = dispc_read_reg(DISPC_CONTROL); - /* LCD enabled ? */ -@@ -1397,6 +2292,7 @@ static int omap_dispc_init(struct omapfb - skip_init = 1; - } - #endif -+#endif - - if (!skip_init) { - /* Reset monitoring works only w/ the 54M clk */ -@@ -1463,8 +2359,6 @@ static int omap_dispc_init(struct omapfb - setup_plane_fifo(1, ext_mode); - setup_plane_fifo(2, ext_mode); - -- setup_color_conv_coef(); -- - set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT); - set_load_mode(DISPC_LOAD_FRAME_ONLY); - -@@ -1474,12 +2368,22 @@ static int omap_dispc_init(struct omapfb - set_lcd_timings(); - } else - set_lcd_data_lines(panel->bpp); -+ - enable_rfbi_mode(ext_mode); -+ -+ sdi_enable(); - } - - l = dispc_read_reg(DISPC_REVISION); - pr_info("omapfb: DISPC version %d.%d initialized\n", - l >> 4 & 0x0f, l & 0x0f); -+ if (skip_init && !ext_mode) { -+ /* Since the bootloader already enabled the display, and the -+ * clocks are enabled by the arch FB code, we can set the -+ * update mode already here. -+ */ -+ dispc.update_mode = OMAPFB_AUTO_UPDATE; -+ } - enable_lcd_clocks(0); - - return 0; -@@ -1497,12 +2401,8 @@ fail0: - - static void omap_dispc_cleanup(void) - { -- int i; -- - omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED); - /* This will also disable clocks that are on */ -- for (i = 0; i < dispc.mem_desc.region_cnt; i++) -- omap_dispc_enable_plane(i, 0); - cleanup_fbmem(); - free_palette_ram(); - free_irq(INT_24XX_DSS_IRQ, dispc.fbdev); -@@ -1518,12 +2418,11 @@ const struct lcd_ctrl omap2_int_ctrl = { - .set_update_mode = omap_dispc_set_update_mode, - .get_update_mode = omap_dispc_get_update_mode, - .update_window = omap_dispc_update_window, -+ .sync = omap_dispc_sync_wrapper, - .suspend = omap_dispc_suspend, - .resume = omap_dispc_resume, - .setup_plane = omap_dispc_setup_plane, - .setup_mem = omap_dispc_setup_mem, -- .set_scale = omap_dispc_set_scale, -- .enable_plane = omap_dispc_enable_plane, - .set_color_key = omap_dispc_set_color_key, - .get_color_key = omap_dispc_get_color_key, - .mmap = omap_dispc_mmap_user, -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/dispc.h linux-omap-2.6.28-nokia1/drivers/video/omap/dispc.h ---- linux-omap-2.6.28-omap1/drivers/video/omap/dispc.h 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/dispc.h 2011-06-22 13:19:33.143063270 +0200 -@@ -2,6 +2,7 @@ - #define _DISPC_H - - #include -+#include - - #define DISPC_PLANE_GFX 0 - #define DISPC_PLANE_VID1 1 -@@ -32,7 +33,20 @@ - #define DISPC_TFT_DATA_LINES_18 2 - #define DISPC_TFT_DATA_LINES_24 3 - -+enum omap_dispc_venc_type { -+ OMAP_DISPC_VENC_TYPE_COMPOSITE, -+ OMAP_DISPC_VENC_TYPE_SVIDEO, -+}; -+ -+extern void omap_dispc_set_venc_output(enum omap_dispc_venc_type type); -+extern void omap_dispc_set_dac_pwrdn_bgz(int enable); -+extern void omap_dispc_set_venc_clocks(void); -+ -+extern void omap_dispc_set_plane_base(int plane, u32 paddr); -+extern void omap_dispc_get_plane_base(int plane, u32 *paddr); -+ - extern void omap_dispc_set_lcd_size(int width, int height); -+extern void omap_dispc_set_digit_size(int x, int y); - - extern void omap_dispc_enable_lcd_out(int enable); - extern void omap_dispc_enable_digit_out(int enable); -@@ -44,4 +58,26 @@ extern void omap_dispc_free_irq(unsigned - - extern const struct lcd_ctrl omap2_int_ctrl; - -+#ifdef CONFIG_FB_OMAP_VENC -+/* in venc.c */ -+enum omapfb_tv_std { -+ OMAPFB_TV_STD_PAL = 0, -+ OMAPFB_TV_STD_NTSC, -+}; -+ -+extern int venc_init(struct lcd_panel **digital_panel); -+extern void venc_exit(void); -+extern int venc_change_tv_standard(enum omapfb_tv_std tv_standard); -+extern enum omapfb_tv_std venc_query_tv_standard(void); -+#else -+static inline int venc_init(struct lcd_panel **digital_panel) -+{ -+ return 0; -+} -+ -+static inline void venc_exit(void) -+{ -+} -+#endif -+ - #endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/hwa742.c linux-omap-2.6.28-nokia1/drivers/video/omap/hwa742.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/hwa742.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/hwa742.c 2011-06-22 13:19:33.143063270 +0200 -@@ -25,9 +25,9 @@ - #include - #include - #include -+#include - - #include --#include - #include - - #define HWA742_REV_CODE_REG 0x0 -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/Kconfig linux-omap-2.6.28-nokia1/drivers/video/omap/Kconfig ---- linux-omap-2.6.28-omap1/drivers/video/omap/Kconfig 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/Kconfig 2011-06-22 13:19:33.143063270 +0200 -@@ -1,6 +1,7 @@ - config FB_OMAP - tristate "OMAP frame buffer support (EXPERIMENTAL)" -- depends on FB && ARCH_OMAP -+ depends on FB && ARCH_OMAP && (OMAP2_DSS = "n") -+ - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT -@@ -63,16 +64,18 @@ config FB_OMAP_MANUAL_UPDATE - the external frame buffer is required. If unsure, say N. - - config FB_OMAP_LCD_MIPID -- bool "MIPI DBI-C/DCS compatible LCD support" -- depends on FB_OMAP && SPI_MASTER && CBUS_TAHVO -+ tristate "MIPI DBI-C/DCS compatible LCD support" -+ depends on FB_OMAP && SPI_MASTER -+ select BACKLIGHT_CLASS_DEVICE - help - Say Y here if you want to have support for LCDs compatible with -- the Mobile Industry Processor Interface DBI-C/DCS -- specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3) -+ the Mobile Industry Processor Interface DBI-C/DCS specification. -+ (Supported LCDs: Philips LPH8923, Sharp LS041Y3, Epson L4F00311, -+ Sony ACX565AKM) - - config FB_OMAP_BOOTLOADER_INIT - bool "Check bootloader initialization" -- depends on FB_OMAP -+ depends on FB_OMAP || FB_OMAP2 - help - Say Y here if you want to enable checking if the bootloader has - already initialized the display controller. In this case the -@@ -99,4 +102,9 @@ config FB_OMAP_DMA_TUNE - answer yes. Answer no if you have a dedicated video - memory, or don't use any of the accelerated features. - -+config FB_OMAP_VENC -+ bool "OMAP Video Encoder support" -+ depends on FB_OMAP && (ARCH_OMAP2 || ARCH_OMAP3) -+ help -+ TV-out support - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_ams_delta.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_ams_delta.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_ams_delta.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_ams_delta.c 2011-06-22 13:19:33.143063270 +0200 -@@ -24,13 +24,13 @@ - - #include - #include -+#include - - #include - #include - - #include - #include --#include - - #define AMS_DELTA_DEFAULT_CONTRAST 112 - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_apollon.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_apollon.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_apollon.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_apollon.c 2011-06-22 13:19:33.143063270 +0200 -@@ -23,10 +23,10 @@ - - #include - #include -+#include - - #include - #include --#include - - /* #define USE_35INCH_LCD 1 */ - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcdc.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcdc.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcdc.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcdc.c 2011-06-22 13:19:33.153063270 +0200 -@@ -28,9 +28,9 @@ - #include - #include - #include -+#include - - #include --#include - - #include - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_h3.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_h3.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_h3.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_h3.c 2011-06-22 13:19:33.153063270 +0200 -@@ -22,9 +22,9 @@ - #include - #include - #include -+#include - - #include --#include - - #define MODULE_NAME "omapfb-lcd_h3" - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_h4.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_h4.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_h4.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_h4.c 2011-06-22 13:19:33.153063270 +0200 -@@ -21,8 +21,7 @@ - - #include - #include -- --#include -+#include - - static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) - { -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_inn1510.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_inn1510.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_inn1510.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_inn1510.c 2011-06-22 13:19:33.153063270 +0200 -@@ -22,9 +22,9 @@ - #include - #include - #include -+#include - - #include --#include - - static int innovator1510_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_inn1610.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_inn1610.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_inn1610.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_inn1610.c 2011-06-22 13:19:33.153063270 +0200 -@@ -21,9 +21,9 @@ - - #include - #include -+#include - - #include --#include - - #define MODULE_NAME "omapfb-lcd_h3" - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_ldp.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_ldp.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_ldp.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_ldp.c 2011-06-22 13:19:33.153063270 +0200 -@@ -25,10 +25,10 @@ - #include - #include - #include -+#include - - #include - #include --#include - #include - - #define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES) -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_mipid.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_mipid.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_mipid.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_mipid.c 2011-06-22 13:19:33.153063270 +0200 -@@ -22,12 +22,10 @@ - #include - #include - #include -+#include - --#include - #include - --#include "../../cbus/tahvo.h" -- - #define MIPID_MODULE_NAME "lcd_mipid" - - #define MIPID_CMD_READ_DISP_ID 0x04 -@@ -40,20 +38,36 @@ - #define MIPID_CMD_SLEEP_OUT 0x11 - #define MIPID_CMD_DISP_OFF 0x28 - #define MIPID_CMD_DISP_ON 0x29 -- --#define MIPID_VER_LPH8923 3 --#define MIPID_VER_LS041Y3 4 -+#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51 -+#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52 -+#define MIPID_CMD_WRITE_CTRL_DISP 0x53 -+ -+#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5) -+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4) -+#define CTRL_DISP_BACKLIGHT_ON (1 << 2) -+#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1) -+ -+#define MIPID_CMD_READ_CTRL_DISP 0x54 -+#define MIPID_CMD_WRITE_CABC 0x55 -+#define MIPID_CMD_READ_CABC 0x56 -+ -+#define MIPID_VER_LPH8923 1 -+#define MIPID_VER_LS041Y3 2 -+#define MIPID_VER_L4F00311 3 -+#define MIPID_VER_ACX565AKM 4 - - #define MIPID_ESD_CHECK_PERIOD msecs_to_jiffies(5000) - - #define to_mipid_device(p) container_of(p, struct mipid_device, \ - panel) - struct mipid_device { -+ struct backlight_device *bl_dev; - int enabled; - int model; - int revision; - u8 display_id[3]; -- unsigned int saved_bklight_level; -+ int has_bc:1; -+ int has_cabc:1; - unsigned long hw_guard_end; /* next value of jiffies - when we can issue the - next sleep in/out command */ -@@ -73,8 +87,7 @@ static void mipid_transfer(struct mipid_ - int wlen, u8 *rbuf, int rlen) - { - struct spi_message m; -- struct spi_transfer *x, xfer[4]; -- u16 w; -+ struct spi_transfer *x, xfer[5]; - int r; - - BUG_ON(md->spi == NULL); -@@ -88,6 +101,16 @@ static void mipid_transfer(struct mipid_ - x->tx_buf = &cmd; - x->bits_per_word= 9; - x->len = 2; -+ -+ if (rlen > 1 && wlen == 0) { -+ /* -+ * Between the command and the response data there is a -+ * dummy clock cycle. Add an extra bit after the command -+ * word to account for this. -+ */ -+ x->bits_per_word = 10; -+ cmd <<= 1; -+ } - spi_message_add_tail(x, &m); - - if (wlen) { -@@ -100,30 +123,14 @@ static void mipid_transfer(struct mipid_ - - if (rlen) { - x++; -- x->rx_buf = &w; -- x->len = 1; -+ x->rx_buf = rbuf; -+ x->len = rlen; - spi_message_add_tail(x, &m); -- -- if (rlen > 1) { -- /* Arrange for the extra clock before the first -- * data bit. -- */ -- x->bits_per_word = 9; -- x->len = 2; -- -- x++; -- x->rx_buf = &rbuf[1]; -- x->len = rlen - 1; -- spi_message_add_tail(x, &m); -- } - } - - r = spi_sync(md->spi, &m); - if (r < 0) - dev_dbg(&md->spi->dev, "spi_sync %d\n", r); -- -- if (rlen) -- rbuf[0] = w & 0xff; - } - - static inline void mipid_cmd(struct mipid_device *md, int cmd) -@@ -213,31 +220,130 @@ static void set_display_state(struct mip - mipid_cmd(md, cmd); - } - -+static void enable_backlight_ctrl(struct mipid_device *md, int enable) -+{ -+ u16 ctrl; -+ -+ mipid_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1); -+ if (enable) { -+ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON | -+ CTRL_DISP_BACKLIGHT_ON; -+ } else { -+ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON | -+ CTRL_DISP_BACKLIGHT_ON); -+ } -+ -+ ctrl |= 1 << 8; -+ mipid_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2); -+} -+ -+static void mipid_set_brightness(struct mipid_device *md, int level) -+{ -+ int bv; -+ -+ bv = level | (1 << 8); -+ mipid_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2); -+ -+ if (level) -+ enable_backlight_ctrl(md, 1); -+ else -+ enable_backlight_ctrl(md, 0); -+} -+ -+static int mipid_get_actual_brightness(struct mipid_device *md) -+{ -+ u8 bv; -+ -+ mipid_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1); -+ -+ return bv; -+} -+ -+static int mipid_bl_update_status(struct backlight_device *dev) -+{ -+ struct mipid_device *md = dev_get_drvdata(&dev->dev); -+ struct mipid_platform_data *pd = md->spi->dev.platform_data; -+ int r; -+ int level; -+ -+ dev_dbg(&md->spi->dev, "%s\n", __func__); -+ -+ mutex_lock(&md->mutex); -+ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) -+ level = dev->props.brightness; -+ else -+ level = 0; -+ -+ r = 0; -+ if (md->has_bc) -+ mipid_set_brightness(md, level); -+ else -+ if (pd->set_bklight_level != NULL) -+ pd->set_bklight_level(pd, level); -+ else -+ r = -ENODEV; -+ -+ mutex_unlock(&md->mutex); -+ -+ return r; -+} -+ -+static int mipid_bl_get_intensity(struct backlight_device *dev) -+{ -+ struct mipid_device *md = dev_get_drvdata(&dev->dev); -+ struct mipid_platform_data *pd = md->spi->dev.platform_data; -+ -+ dev_dbg(&dev->dev, "%s\n", __func__); -+ -+ if (!md->has_bc && pd->set_bklight_level == NULL) -+ return -ENODEV; -+ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) { -+ if (md->has_bc) -+ return mipid_get_actual_brightness(md); -+ else -+ return dev->props.brightness; -+ } -+ -+ return 0; -+} -+ -+static struct backlight_ops mipid_bl_ops = { -+ .get_brightness = mipid_bl_get_intensity, -+ .update_status = mipid_bl_update_status, -+}; -+ -+/* Old backlight interface, to be removed */ - static int mipid_set_bklight_level(struct lcd_panel *panel, unsigned int level) - { - struct mipid_device *md = to_mipid_device(panel); -+ struct backlight_device *bldev = md->bl_dev; - -- if (level > tahvo_get_max_backlight_level()) -+ if (level > bldev->props.max_brightness) - return -EINVAL; -- if (!md->enabled) { -- md->saved_bklight_level = level; -- return 0; -- } -- tahvo_set_backlight_level(level); - -- return 0; -+ bldev->props.brightness = level; -+ return mipid_bl_update_status(bldev); - } - - static unsigned int mipid_get_bklight_level(struct lcd_panel *panel) - { -- return tahvo_get_backlight_level(); -+ struct mipid_device *md = to_mipid_device(panel); -+ struct backlight_device *bldev = md->bl_dev; -+ -+ return bldev->props.brightness; - } - - static unsigned int mipid_get_bklight_max(struct lcd_panel *panel) - { -- return tahvo_get_max_backlight_level(); --} -+ struct mipid_device *md = to_mipid_device(panel); -+ struct backlight_device *bldev = md->bl_dev; - -+ return bldev->props.max_brightness; -+} - - static unsigned long mipid_get_caps(struct lcd_panel *panel) - { -@@ -265,6 +371,7 @@ static u16 read_first_pixel(struct mipid - (blue >> 3); - break; - default: -+ pixel = 0; - BUG(); - } - -@@ -391,7 +498,7 @@ static void mipid_esd_stop_check(struct - - static void mipid_esd_work(struct work_struct *work) - { -- struct mipid_device *md = container_of(work, struct mipid_device, esd_work.work); -+ struct mipid_device *md =container_of(work, struct mipid_device, esd_work.work); - - mutex_lock(&md->mutex); - md->esd_check(md); -@@ -413,7 +520,6 @@ static int mipid_enable(struct lcd_panel - md->enabled = 1; - send_init_string(md); - set_display_state(md, 1); -- mipid_set_bklight_level(panel, md->saved_bklight_level); - mipid_esd_start_check(md); - - mutex_unlock(&md->mutex); -@@ -435,8 +541,6 @@ static void mipid_disable(struct lcd_pan - mutex_unlock(&md->mutex); - return; - } -- md->saved_bklight_level = mipid_get_bklight_level(panel); -- mipid_set_bklight_level(panel, 0); - set_display_state(md, 0); - set_sleep_mode(md, 1); - md->enabled = 0; -@@ -458,10 +562,129 @@ static int panel_enabled(struct mipid_de - return enabled; - } - -+static void set_cabc_mode(struct mipid_device *md, int mode) -+{ -+ u16 cabc_ctrl; -+ -+ cabc_ctrl = 0; -+ mipid_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1); -+ cabc_ctrl &= ~3; -+ cabc_ctrl |= (1 << 8) | (mode & 3); -+ mipid_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2); -+} -+ -+static int get_cabc_mode(struct mipid_device *md) -+{ -+ u8 cabc_ctrl; -+ -+ mipid_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1); -+ return cabc_ctrl & 3; -+} -+ -+static const char *cabc_modes[] = { -+ "off", /* used also always when CABC is not supported */ -+ "ui", -+ "still-image", -+ "moving-image", -+}; -+ -+static ssize_t show_cabc_mode(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct mipid_device *md = dev_get_drvdata(dev); -+ const char *mode_str; -+ int mode; -+ int len; -+ -+ if (!md->has_cabc) -+ mode = 0; -+ else -+ mode = get_cabc_mode(md); -+ mode_str = "unknown"; -+ if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) -+ mode_str = cabc_modes[mode]; -+ len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str); -+ -+ return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1; -+} -+ -+static ssize_t store_cabc_mode(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct mipid_device *md = dev_get_drvdata(dev); -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { -+ const char *mode_str = cabc_modes[i]; -+ int cmp_len = strlen(mode_str); -+ -+ if (count > 0 && buf[count - 1] == '\n') -+ count--; -+ if (count != cmp_len) -+ continue; -+ -+ if (strncmp(buf, mode_str, cmp_len) == 0) -+ break; -+ } -+ -+ if (i == ARRAY_SIZE(cabc_modes)) -+ return -EINVAL; -+ -+ if (!md->has_cabc && i != 0) -+ return -EINVAL; -+ -+ mutex_lock(&md->mutex); -+ set_cabc_mode(md, i); -+ mutex_unlock(&md->mutex); -+ -+ return count; -+} -+ -+static ssize_t show_cabc_available_modes(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct mipid_device *md = dev_get_drvdata(dev); -+ int len; -+ int i; -+ -+ if (!md->has_cabc) -+ return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]); -+ -+ for (i = 0, len = 0; -+ len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) -+ len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s", -+ i ? " " : "", cabc_modes[i], -+ i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); -+ -+ return len < PAGE_SIZE ? len : PAGE_SIZE - 1; -+} -+ -+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, -+ show_cabc_mode, store_cabc_mode); -+static DEVICE_ATTR(cabc_available_modes, S_IRUGO, -+ show_cabc_available_modes, NULL); -+ -+static struct attribute *bldev_attrs[] = { -+ &dev_attr_cabc_mode.attr, -+ &dev_attr_cabc_available_modes.attr, -+ NULL, -+}; -+ -+static struct attribute_group bldev_attr_group = { -+ .attrs = bldev_attrs, -+}; -+ - static int mipid_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) - { - struct mipid_device *md = to_mipid_device(panel); -+ struct mipid_platform_data *pd = md->spi->dev.platform_data; -+ struct backlight_device *bldev; -+ int max_brightness; -+ int r; - - md->fbdev = fbdev; - md->esd_wq = create_singlethread_workqueue("mipid_esd"); -@@ -474,10 +697,48 @@ static int mipid_init(struct lcd_panel * - - md->enabled = panel_enabled(md); - -+ if (!pd->bc_connected) { -+ md->has_bc = 0; -+ md->has_cabc = 0; -+ } -+ -+ bldev = backlight_device_register("acx565akm", &md->spi->dev, -+ md, &mipid_bl_ops); -+ md->bl_dev = bldev; -+ -+ if (md->has_cabc) { -+ r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group); -+ if (r) { -+ dev_err(&bldev->dev, "failed to create sysfs files\n"); -+ backlight_device_unregister(bldev); -+ return r; -+ } -+ } -+ -+ bldev->props.fb_blank = FB_BLANK_UNBLANK; -+ bldev->props.power = FB_BLANK_UNBLANK; -+ -+ if (md->has_bc) -+ max_brightness = 255; -+ else -+ max_brightness = pd->get_bklight_max(pd); -+ -+ bldev->props.max_brightness = max_brightness; -+ bldev->props.brightness = max_brightness; -+ -+ mipid_bl_update_status(bldev); -+ -+ if (md->has_bc && pd->set_bklight_level != NULL) { -+ /* -+ * If both LCD BC and some other board specific BC is present -+ * make sure that only LCD BC is active. -+ */ -+ msleep(30); -+ pd->set_bklight_level(pd, 0); -+ } -+ - if (md->enabled) - mipid_esd_start_check(md); -- else -- md->saved_bklight_level = mipid_get_bklight_level(panel); - - return 0; - } -@@ -489,9 +750,15 @@ static void mipid_cleanup(struct lcd_pan - if (md->enabled) - mipid_esd_stop_check(md); - destroy_workqueue(md->esd_wq); -+ -+ if (md->has_cabc) -+ sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group); -+ -+ backlight_device_unregister(md->bl_dev); - } - - static struct lcd_panel mipid_panel = { -+ .name = "mipid", - .config = OMAP_LCDC_PANEL_TFT, - - .bpp = 16, -@@ -531,6 +798,23 @@ static int mipid_detect(struct mipid_dev - md->display_id[0], md->display_id[1], md->display_id[2]); - - switch (md->display_id[0]) { -+ case 0x10: -+ md->model = MIPID_VER_ACX565AKM; -+ md->panel.name = "acx565akm"; -+ md->has_bc = 1; -+ md->has_cabc = 1; -+ md->panel.pixel_clock = 24360; -+ md->panel.hsw = 4; -+ md->panel.hfp = 16; -+ md->panel.hbp = 12; -+ md->panel.vsw = 3; -+ md->panel.vfp = 3; -+ md->panel.vbp = 3; -+ break; -+ case 0x29: -+ md->model = MIPID_VER_L4F00311; -+ md->panel.name = "l4f00311"; -+ break; - case 0x45: - md->model = MIPID_VER_LPH8923; - md->panel.name = "lph8923"; -@@ -548,8 +832,8 @@ static int mipid_detect(struct mipid_dev - - md->revision = md->display_id[1]; - md->panel.data_lines = pdata->data_lines; -- pr_info("omapfb: %s rev %02x LCD detected\n", -- md->panel.name, md->revision); -+ pr_info("omapfb: %s rev %02x LCD detected, %d data lines\n", -+ md->panel.name, md->revision, md->panel.data_lines); - - return 0; - } -@@ -565,7 +849,7 @@ static int mipid_spi_probe(struct spi_de - return -ENOMEM; - } - -- spi->mode = SPI_MODE_0; -+ spi->mode = SPI_MODE_3; - md->spi = spi; - dev_set_drvdata(&spi->dev, md); - md->panel = mipid_panel; -@@ -583,7 +867,7 @@ static int mipid_spi_remove(struct spi_d - { - struct mipid_device *md = dev_get_drvdata(&spi->dev); - -- mipid_disable(&md->panel); -+ omapfb_unregister_panel(&md->panel, md->fbdev); - kfree(md); - - return 0; -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_omap2evm.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_omap2evm.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_omap2evm.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_omap2evm.c 2011-06-22 13:19:33.153063270 +0200 -@@ -25,9 +25,9 @@ - #include - #include - #include -+#include - - #include --#include - #include - - #define LCD_PANEL_ENABLE_GPIO 154 -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_omap3beagle.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_omap3beagle.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_omap3beagle.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_omap3beagle.c 2011-06-22 13:19:33.153063270 +0200 -@@ -24,9 +24,9 @@ - #include - #include - #include -+#include - - #include --#include - #include - - #define LCD_PANEL_ENABLE_GPIO 170 -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_omap3evm.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_omap3evm.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_omap3evm.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_omap3evm.c 2011-06-22 13:19:33.153063270 +0200 -@@ -24,9 +24,9 @@ - #include - #include - #include -+#include - - #include --#include - #include - - #define LCD_PANEL_ENABLE_GPIO 153 -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_osk.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_osk.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_osk.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_osk.c 2011-06-22 13:19:33.153063270 +0200 -@@ -22,10 +22,10 @@ - - #include - #include -+#include - - #include - #include --#include - - static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) - { -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_overo.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_overo.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_overo.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_overo.c 2011-06-22 13:19:33.153063270 +0200 -@@ -22,10 +22,10 @@ - #include - #include - #include -+#include - - #include - #include --#include - #include - - #define LCD_ENABLE 144 -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_palmte.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_palmte.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_palmte.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_palmte.c 2011-06-22 13:19:33.153063270 +0200 -@@ -22,9 +22,9 @@ - #include - #include - #include -+#include - - #include --#include - - static int palmte_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_palmtt.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_palmtt.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_palmtt.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_palmtt.c 2011-06-22 13:19:33.153063270 +0200 -@@ -28,9 +28,9 @@ GPIO13 - screen blanking - #include - #include - #include -+#include - - #include --#include - - static int palmtt_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_palmz71.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_palmz71.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_palmz71.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_palmz71.c 2011-06-22 13:19:33.153063270 +0200 -@@ -23,8 +23,7 @@ - #include - #include - #include -- --#include -+#include - - static int palmz71_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_p2.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_p2.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_p2.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_p2.c 2011-06-22 13:19:33.153063270 +0200 -@@ -24,10 +24,10 @@ - #include - #include - #include -+#include - - #include - #include --#include - - /* - * File: epson-md-tft.h -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/lcd_2430sdp.c linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_2430sdp.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/lcd_2430sdp.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/lcd_2430sdp.c 2011-06-22 13:19:33.143063270 +0200 -@@ -26,9 +26,9 @@ - #include - #include - #include -+#include - - #include --#include - #include - - #define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91 -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/Makefile linux-omap-2.6.28-nokia1/drivers/video/omap/Makefile ---- linux-omap-2.6.28-omap1/drivers/video/omap/Makefile 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/Makefile 2011-06-22 13:19:33.143063270 +0200 -@@ -6,6 +6,8 @@ obj-$(CONFIG_FB_OMAP) += omapfb.o - - objs-yy := omapfb_main.o - -+objs-y$(CONFIG_FB_OMAP_VENC) += venc.o -+ - objs-y$(CONFIG_ARCH_OMAP1) += lcdc.o - objs-y$(CONFIG_ARCH_OMAP2) += dispc.o - objs-y$(CONFIG_ARCH_OMAP3) += dispc.o -@@ -33,8 +35,8 @@ objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp - objs-y$(CONFIG_MACH_OMAP2EVM) += lcd_omap2evm.o - objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o - objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o --objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o - objs-y$(CONFIG_MACH_OVERO) += lcd_overo.o - --omapfb-objs := $(objs-yy) -+obj-$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o - -+omapfb-objs := $(objs-yy) -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/omapfb_main.c linux-omap-2.6.28-nokia1/drivers/video/omap/omapfb_main.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/omapfb_main.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/omapfb_main.c 2011-06-22 13:19:33.153063270 +0200 -@@ -27,9 +27,9 @@ - #include - #include - #include -+#include - - #include --#include - - #include "lcdc.h" - #include "dispc.h" -@@ -163,7 +163,7 @@ static int ctrl_init(struct omapfb_devic - } - - if (!fbdev->mem_desc.region_cnt) { -- struct lcd_panel *panel = fbdev->panel; -+ struct lcd_panel *panel = fbdev->lcd_panel; - int def_size; - int bpp = panel->bpp; - -@@ -198,36 +198,89 @@ static void ctrl_cleanup(struct omapfb_d - fbdev->ctrl->cleanup(); - } - -+static int ctrl_change_mode(struct fb_info *fbi); -+static void set_fb_fix(struct fb_info *fbi); -+ -+/* -+ * Check if some other plane is cloning this one, and if so, check if -+ * they differ and whether the changes should be propagated to the -+ * cloned framebuffer as well. -+ */ -+static void update_cloned_var(struct fb_info *fbi) -+{ -+ struct omapfb_plane_struct *plane = fbi->par; -+ struct omapfb_device *fbdev = plane->fbdev; -+ struct fb_var_screeninfo *var = &fbi->var; -+ -+ struct fb_info *fbinfo; -+ struct omapfb_plane_struct *pla; -+ struct fb_var_screeninfo *clone_var; -+ int i; -+ -+ for (i = 0; i < OMAPFB_PLANE_NUM; i++) { -+ if (i == plane->idx) -+ continue; -+ fbinfo = fbdev->fb_info[i]; -+ pla = fbinfo->par; -+ clone_var = &fbinfo->var; -+ -+ if (!(pla->info.clone_idx & OMAPFB_CLONE_ENABLED)) -+ continue; -+ -+ if ((pla->info.clone_idx & OMAPFB_CLONE_MASK) != plane->idx) -+ continue; -+ -+ /* -+ * The original and cloned framebuffers must have at -+ * least the same resolution and color mode, otherwise -+ * the image gets corrupted. -+ */ -+ if ((clone_var->xres_virtual == var->xres_virtual) && -+ (clone_var->yres_virtual == var->yres_virtual) && -+ (clone_var->bits_per_pixel == var->bits_per_pixel) && -+ (clone_var->nonstd == var->nonstd)) { -+ continue; -+ } -+ -+ *clone_var = *var; -+ set_fb_fix(fbinfo); -+ pla->color_mode = plane->color_mode; -+ ctrl_change_mode(fbinfo); -+ } -+} -+ - /* Must be called with fbdev->rqueue_mutex held. */ - static int ctrl_change_mode(struct fb_info *fbi) - { - int r; - unsigned long offset; -+ unsigned int paddr; - struct omapfb_plane_struct *plane = fbi->par; - struct omapfb_device *fbdev = plane->fbdev; - struct fb_var_screeninfo *var = &fbi->var; - -+ if (plane->info.clone_idx & OMAPFB_CLONE_ENABLED) { -+ unsigned int clone_idx; -+ -+ clone_idx = plane->info.clone_idx & OMAPFB_CLONE_MASK; -+ paddr = fbdev->mem_desc.region[clone_idx].paddr; -+ } else { -+ update_cloned_var(fbi); -+ paddr = fbdev->mem_desc.region[plane->idx].paddr; -+ } -+ - offset = var->yoffset * fbi->fix.line_length + - var->xoffset * var->bits_per_pixel / 8; - -- if (fbdev->ctrl->sync) -- fbdev->ctrl->sync(); -- r = fbdev->ctrl->setup_plane(plane->idx, plane->info.channel_out, -- offset, var->xres_virtual, -- plane->info.pos_x, plane->info.pos_y, -- var->xres, var->yres, plane->color_mode); -- if (r < 0) -- return r; -- -- if (fbdev->ctrl->set_rotate != NULL) -- if((r = fbdev->ctrl->set_rotate(var->rotate)) < 0) -- return r; -+ paddr += offset; - -- if ((fbdev->ctrl->set_scale != NULL) && (plane->idx > 0)) -- r = fbdev->ctrl->set_scale(plane->idx, -- var->xres, var->yres, -- plane->info.out_width, -- plane->info.out_height); -+ r = fbdev->ctrl->setup_plane(plane->idx, plane->info.enabled, -+ plane->info.channel_out, -+ paddr, var->xres_virtual, -+ plane->info.pos_x, plane->info.pos_y, -+ var->xres, var->yres, -+ plane->info.out_width, plane->info.out_height, -+ plane->color_mode, var->rotate); - if (r < 0) - return r; - -@@ -352,7 +405,7 @@ static int omapfb_blank(int blank, struc - if (fbdev->state == OMAPFB_SUSPENDED) { - if (fbdev->ctrl->resume) - fbdev->ctrl->resume(); -- fbdev->panel->enable(fbdev->panel); -+ plane->panel->enable(plane->panel); - fbdev->state = OMAPFB_ACTIVE; - if (fbdev->ctrl->get_update_mode() == - OMAPFB_MANUAL_UPDATE) -@@ -361,7 +414,7 @@ static int omapfb_blank(int blank, struc - break; - case VESA_POWERDOWN: - if (fbdev->state == OMAPFB_ACTIVE) { -- fbdev->panel->disable(fbdev->panel); -+ plane->panel->disable(plane->panel); - if (fbdev->ctrl->suspend) - fbdev->ctrl->suspend(); - fbdev->state = OMAPFB_SUSPENDED; -@@ -385,7 +438,7 @@ static void omapfb_sync(struct fb_info * - - omapfb_rqueue_lock(fbdev); - if (fbdev->ctrl->sync) -- fbdev->ctrl->sync(); -+ fbdev->ctrl->sync(plane->info.channel_out); - omapfb_rqueue_unlock(fbdev); - } - -@@ -401,7 +454,12 @@ static void set_fb_fix(struct fb_info *f - struct omapfb_mem_region *rg; - int bpp; - -- rg = &plane->fbdev->mem_desc.region[plane->idx]; -+ if (plane->info.clone_idx & OMAPFB_CLONE_ENABLED) { -+ rg = &plane->fbdev->mem_desc.region[ -+ plane->info.clone_idx & OMAPFB_CLONE_MASK]; -+ } else { -+ rg = &plane->fbdev->mem_desc.region[plane->idx]; -+ } - fbi->screen_base = rg->vaddr; - fix->smem_start = rg->paddr; - fix->smem_len = rg->size; -@@ -487,9 +545,25 @@ static int set_fb_var(struct fb_info *fb - unsigned long line_size; - int xres_min, xres_max; - int yres_min, yres_max; -+ int can_scale; - struct omapfb_plane_struct *plane = fbi->par; - struct omapfb_device *fbdev = plane->fbdev; -- struct lcd_panel *panel = fbdev->panel; -+ struct lcd_panel *panel = plane->panel; -+ int mem_idx = plane->idx; -+ -+ can_scale = plane->idx != OMAPFB_PLANE_GFX; -+ -+ if (plane->info.clone_idx & OMAPFB_CLONE_ENABLED) { -+ int clone_idx = plane->info.clone_idx & OMAPFB_CLONE_MASK; -+ struct fb_info *orig_fbi = fbdev->fb_info[clone_idx]; -+ struct fb_var_screeninfo *orig_var = &orig_fbi->var; -+ -+ var->xres_virtual = orig_var->xres_virtual; -+ var->yres_virtual = orig_var->yres_virtual; -+ var->bits_per_pixel = orig_var->bits_per_pixel; -+ var->nonstd = orig_var->nonstd; -+ mem_idx = clone_idx; -+ } - - if (set_color_mode(plane, var) < 0) - return -EINVAL; -@@ -525,6 +599,14 @@ static int set_fb_var(struct fb_info *fb - return -EINVAL; - } - -+ /* -+ * Video planes can downscale the screen to fit in view, so -+ * there is no need to restrict the framebuffer resolution to -+ * be smaller than the display resolution. -+ */ -+ if (can_scale) -+ xres_max = yres_max = 2048; -+ - if (var->xres < xres_min) - var->xres = xres_min; - if (var->yres < yres_min) -@@ -538,7 +620,7 @@ static int set_fb_var(struct fb_info *fb - var->xres_virtual = var->xres; - if (var->yres_virtual < var->yres) - var->yres_virtual = var->yres; -- max_frame_size = fbdev->mem_desc.region[plane->idx].size; -+ max_frame_size = fbdev->mem_desc.region[mem_idx].size; - line_size = var->xres_virtual * bpp / 8; - if (line_size * var->yres_virtual > max_frame_size) { - /* Try to keep yres_virtual first */ -@@ -684,7 +766,7 @@ static int omapfb_check_var(struct fb_va - - omapfb_rqueue_lock(fbdev); - if (fbdev->ctrl->sync != NULL) -- fbdev->ctrl->sync(); -+ fbdev->ctrl->sync(plane->info.channel_out); - r = set_fb_var(fbi, var); - omapfb_rqueue_unlock(fbdev); - -@@ -722,13 +804,13 @@ int omapfb_update_window_async(struct fb - switch (var->rotate) { - case 0: - case 180: -- xres = fbdev->panel->x_res; -- yres = fbdev->panel->y_res; -+ xres = plane->panel->x_res; -+ yres = plane->panel->y_res; - break; - case 90: - case 270: -- xres = fbdev->panel->y_res; -- yres = fbdev->panel->x_res; -+ xres = plane->panel->y_res; -+ yres = plane->panel->x_res; - break; - default: - return -EINVAL; -@@ -802,16 +884,33 @@ static int omapfb_setup_plane(struct fb_ - { - struct omapfb_plane_struct *plane = fbi->par; - struct omapfb_device *fbdev = plane->fbdev; -- struct lcd_panel *panel = fbdev->panel; -+ struct lcd_panel *panel = plane->panel; - struct omapfb_plane_info old_info; -+ int mem_idx = plane->idx; - int r = 0; - -+ omapfb_rqueue_lock(fbdev); -+ -+ if (pi->channel_out == OMAPFB_CHANNEL_OUT_DIGIT) { -+ if (fbdev->digital_panel == NULL) { -+ r = -EINVAL; -+ goto out; -+ } -+ panel = plane->panel = fbdev->digital_panel; -+ } else { -+ panel = plane->panel = fbdev->lcd_panel; -+ } -+ - if (pi->pos_x + pi->out_width > panel->x_res || -- pi->pos_y + pi->out_height > panel->y_res) -- return -EINVAL; -+ pi->pos_y + pi->out_height > panel->y_res) { -+ r = -EINVAL; -+ goto out; -+ } - -- omapfb_rqueue_lock(fbdev); -- if (pi->enabled && !fbdev->mem_desc.region[plane->idx].size) { -+ if (pi->clone_idx & OMAPFB_CLONE_ENABLED) -+ mem_idx = pi->clone_idx & OMAPFB_CLONE_MASK; -+ -+ if (pi->enabled && !fbdev->mem_desc.region[mem_idx].size) { - /* - * This plane's memory was freed, can't enable it - * until it's reallocated. -@@ -819,20 +918,58 @@ static int omapfb_setup_plane(struct fb_ - r = -EINVAL; - goto out; - } -+ -+ if ((pi->clone_idx & OMAPFB_CLONE_MASK) >= OMAPFB_PLANE_NUM) { -+ r = -EINVAL; -+ goto out; -+ } -+ - old_info = plane->info; - plane->info = *pi; -- if (pi->enabled) { -- r = ctrl_change_mode(fbi); -- if (r < 0) { -- plane->info = old_info; -- goto out; -- } -+ if ((pi->clone_idx & OMAPFB_CLONE_ENABLED) && -+ (!(old_info.clone_idx & OMAPFB_CLONE_ENABLED) || -+ (old_info.clone_idx & OMAPFB_CLONE_MASK) != (pi->clone_idx & OMAPFB_CLONE_MASK))) { -+ struct omapfb_plane_struct *s_plane = -+ fbdev->fb_info[pi->clone_idx & OMAPFB_CLONE_MASK]->par; -+ /* -+ * When the cloning is enabled, we copy over the -+ * fb_var_screeninfo from the source framebuffer. We -+ * don't need to call set_fb_var for it because we -+ * copy it from a valid source. -+ */ -+ fbi->var = -+ fbdev->fb_info[pi->clone_idx & OMAPFB_CLONE_MASK]->var; -+ plane->color_mode = s_plane->color_mode; -+ -+ /* Sync framebuffer vaddr pointer with the source */ -+ set_fb_fix(fbi); -+ } else if ((old_info.clone_idx & OMAPFB_CLONE_ENABLED) && -+ !(pi->clone_idx & OMAPFB_CLONE_ENABLED)) { -+ /* -+ * When disabling cloning, revalidate the -+ * fb_var_screeninfo and revert the vaddr pointer. -+ */ -+ set_fb_var(fbi, &fbi->var); -+ set_fb_fix(fbi); - } -- r = fbdev->ctrl->enable_plane(plane->idx, pi->enabled); -+ -+ r = ctrl_change_mode(fbi); - if (r < 0) { - plane->info = old_info; - goto out; - } -+ -+ if (pi->channel_out == OMAPFB_CHANNEL_OUT_DIGIT) { -+ /* -+ * FIXME: We will not consider the possibility that -+ * there might be multiple planes routed to digital -+ * panel. -+ */ -+ if (pi->enabled) -+ plane->panel->enable(plane->panel); -+ else -+ plane->panel->disable(plane->panel); -+ } - out: - omapfb_rqueue_unlock(fbdev); - return r; -@@ -889,7 +1026,7 @@ static int omapfb_setup_mem(struct fb_in - } - - if (fbdev->ctrl->sync) -- fbdev->ctrl->sync(); -+ fbdev->ctrl->sync(plane->info.channel_out); - r = fbdev->ctrl->setup_mem(plane->idx, size, mi->type, &paddr); - if (r < 0) { - /* Revert changes. */ -@@ -1055,7 +1192,7 @@ static void omapfb_get_caps(struct omapf - { - memset(caps, 0, sizeof(*caps)); - fbdev->ctrl->get_caps(plane, caps); -- caps->ctrl |= fbdev->panel->get_caps(fbdev->panel); -+ caps->ctrl |= fbdev->lcd_panel->get_caps(fbdev->lcd_panel); - } - - /* For lcd testing */ -@@ -1201,11 +1338,12 @@ static int omapfb_ioctl(struct fb_info * - r = -EFAULT; - break; - } -- if (!fbdev->panel->run_test) { -+ if (!fbdev->lcd_panel->run_test) { - r = -EINVAL; - break; - } -- r = fbdev->panel->run_test(fbdev->panel, test_num); -+ r = fbdev->lcd_panel->run_test(fbdev->lcd_panel, -+ test_num); - break; - } - case OMAPFB_CTRL_TEST: -@@ -1342,7 +1480,7 @@ static ssize_t omapfb_show_panel_name(st - { - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; - -- return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name); -+ return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->lcd_panel->name); - } - - static ssize_t omapfb_show_bklight_level(struct device *dev, -@@ -1352,9 +1490,10 @@ static ssize_t omapfb_show_bklight_level - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; - int r; - -- if (fbdev->panel->get_bklight_level) { -+ if (fbdev->lcd_panel->get_bklight_level) { - r = snprintf(buf, PAGE_SIZE, "%d\n", -- fbdev->panel->get_bklight_level(fbdev->panel)); -+ fbdev->lcd_panel->get_bklight_level( -+ fbdev->lcd_panel)); - } else - r = -ENODEV; - return r; -@@ -1367,12 +1506,12 @@ static ssize_t omapfb_store_bklight_leve - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; - int r; - -- if (fbdev->panel->set_bklight_level) { -+ if (fbdev->lcd_panel->set_bklight_level) { - unsigned int level; - - if (sscanf(buf, "%10d", &level) == 1) { -- r = fbdev->panel->set_bklight_level(fbdev->panel, -- level); -+ r = fbdev->lcd_panel->set_bklight_level( -+ fbdev->lcd_panel, level); - } else - r = -EINVAL; - } else -@@ -1386,9 +1525,10 @@ static ssize_t omapfb_show_bklight_max(s - struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data; - int r; - -- if (fbdev->panel->get_bklight_level) { -+ if (fbdev->lcd_panel->get_bklight_level) { - r = snprintf(buf, PAGE_SIZE, "%d\n", -- fbdev->panel->get_bklight_max(fbdev->panel)); -+ fbdev->lcd_panel->get_bklight_max( -+ fbdev->lcd_panel)); - } else - r = -ENODEV; - return r; -@@ -1434,6 +1574,64 @@ static struct attribute_group ctrl_attr_ - .attrs = ctrl_attrs, - }; - -+#ifdef CONFIG_FB_OMAP_VENC -+ -+static ssize_t omapfb_show_venc_tv_standard(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ enum omapfb_tv_std standard; -+ char *std_str = 0; -+ -+ standard = venc_query_tv_standard(); -+ switch (standard) { -+ case OMAPFB_TV_STD_PAL: -+ std_str = "pal"; -+ break; -+ case OMAPFB_TV_STD_NTSC: -+ std_str = "ntsc"; -+ break; -+ default: -+ std_str = "unknown"; -+ break; -+ } -+ -+ return snprintf(buf, PAGE_SIZE, "%s\n", std_str); -+} -+ -+static ssize_t omapfb_store_venc_tv_standard(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t size) -+{ -+ enum omapfb_tv_std standard; -+ int r = 0, s; -+ char str[8]; -+ -+ strncpy(str, buf, size < sizeof(str) ? size : sizeof(str)); -+ str[sizeof(str) - 1] = 0; -+ for (s = 0; s < sizeof(str); s++) -+ if (str[s] == '\n') { -+ str[s] = 0; -+ break; -+ } -+ -+ if (!strcmp(str, "pal")) -+ standard = OMAPFB_TV_STD_PAL; -+ else if (!strcmp(str, "ntsc")) -+ standard = OMAPFB_TV_STD_NTSC; -+ else -+ r = -EINVAL; -+ -+ if (!r) -+ r = venc_change_tv_standard(standard); -+ -+ return r ? r : size; -+} -+ -+static DEVICE_ATTR(venc_tv_standard, 0664, -+ omapfb_show_venc_tv_standard, omapfb_store_venc_tv_standard); -+#endif -+ - static int omapfb_register_sysfs(struct omapfb_device *fbdev) - { - int r; -@@ -1449,8 +1647,19 @@ static int omapfb_register_sysfs(struct - - if ((r = sysfs_create_group(&fbdev->dev->kobj, &ctrl_attr_grp))) - goto fail3; -+#ifdef CONFIG_FB_OMAP_VENC -+ r = device_create_file(fbdev->dev, &dev_attr_venc_tv_standard); -+ if (r) -+ goto fail4; -+#endif - - return 0; -+ -+#ifdef CONFIG_FB_OMAP_VENC -+fail4: -+ sysfs_remove_group(&fbdev->dev->kobj, &ctrl_attr_grp); -+#endif -+ - fail3: - sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp); - fail2: -@@ -1464,6 +1673,9 @@ fail0: - - static void omapfb_unregister_sysfs(struct omapfb_device *fbdev) - { -+#ifdef CONFIG_FB_OMAP_VENC -+ device_remove_file(fbdev->dev, &dev_attr_venc_tv_standard); -+#endif - sysfs_remove_group(&fbdev->dev->kobj, &ctrl_attr_grp); - sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp); - device_remove_file(fbdev->dev, &dev_attr_caps_num); -@@ -1497,7 +1709,7 @@ static int fbinfo_init(struct omapfb_dev - var->xres_virtual = def_vxres; - var->yres_virtual = def_vyres; - var->rotate = def_rotate; -- var->bits_per_pixel = fbdev->panel->bpp; -+ var->bits_per_pixel = fbdev->lcd_panel->bpp; - - set_fb_var(info, var); - set_fb_fix(info); -@@ -1547,6 +1759,9 @@ static int planes_init(struct omapfb_dev - plane->idx = i; - plane->fbdev = fbdev; - plane->info.mirror = def_mirror; -+ if (i == 0) /* GFX plane is enabled by default */ -+ plane->info.enabled = 1; -+ plane->panel = fbdev->lcd_panel; - fbdev->fb_info[i] = fbi; - - if ((r = fbinfo_init(fbdev, fbi)) < 0) { -@@ -1572,18 +1787,26 @@ static void omapfb_free_resources(struct - case OMAPFB_ACTIVE: - for (i = 0; i < fbdev->mem_desc.region_cnt; i++) - unregister_framebuffer(fbdev->fb_info[i]); -- case 7: -+ case 8: - omapfb_unregister_sysfs(fbdev); -+ case 7: -+ fbdev->lcd_panel->disable(fbdev->lcd_panel); -+ -+ if (fbdev->digital_panel) -+ fbdev->digital_panel->disable(fbdev->digital_panel); - case 6: -- fbdev->panel->disable(fbdev->panel); -+ venc_exit(); - case 5: - omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED); - case 4: -+ - planes_cleanup(fbdev); - case 3: - ctrl_cleanup(fbdev); - case 2: -- fbdev->panel->cleanup(fbdev->panel); -+ fbdev->lcd_panel->cleanup(fbdev->lcd_panel); -+ if (fbdev->digital_panel && fbdev->digital_panel->cleanup) -+ fbdev->digital_panel->cleanup(fbdev->digital_panel); - case 1: - dev_set_drvdata(fbdev->dev, NULL); - kfree(fbdev); -@@ -1632,10 +1855,10 @@ static int omapfb_find_ctrl(struct omapf - static void check_required_callbacks(struct omapfb_device *fbdev) - { - #define _C(x) (fbdev->ctrl->x != NULL) --#define _P(x) (fbdev->panel->x != NULL) -- BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL); -+#define _P(x) (fbdev->lcd_panel->x != NULL) -+ BUG_ON(fbdev->ctrl == NULL || fbdev->lcd_panel == NULL); - BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) && -- _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) && -+ _C(set_update_mode) && _C(setup_plane) && - _P(init) && _P(cleanup) && _P(enable) && _P(disable) && - _P(get_caps))); - #undef _P -@@ -1689,7 +1912,7 @@ static int omapfb_do_probe(struct platfo - init_state++; - - fbdev->dev = &pdev->dev; -- fbdev->panel = panel; -+ fbdev->lcd_panel = panel; - platform_set_drvdata(pdev, fbdev); - - mutex_init(&fbdev->rqueue_mutex); -@@ -1712,14 +1935,14 @@ static int omapfb_do_probe(struct platfo - goto cleanup; - } - -- r = fbdev->panel->init(fbdev->panel, fbdev); -+ r = fbdev->lcd_panel->init(fbdev->lcd_panel, fbdev); - if (r) - goto cleanup; - -- pr_info("omapfb: configured for panel %s\n", fbdev->panel->name); -+ pr_info("omapfb: configured for panel %s\n", fbdev->lcd_panel->name); - -- def_vxres = def_vxres ? def_vxres : fbdev->panel->x_res; -- def_vyres = def_vyres ? def_vyres : fbdev->panel->y_res; -+ def_vxres = def_vxres ? def_vxres : fbdev->lcd_panel->x_res; -+ def_vyres = def_vyres ? def_vyres : fbdev->lcd_panel->y_res; - - init_state++; - -@@ -1743,22 +1966,24 @@ static int omapfb_do_probe(struct platfo - omap_set_dma_priority(0, OMAP_DMA_PORT_EMIFF, 15); - #endif - -+ /* Change mode only for GFX plane, since this is the only enabled -+ * by default. -+ */ - r = ctrl_change_mode(fbdev->fb_info[0]); - if (r) { - dev_err(fbdev->dev, "mode setting failed\n"); - goto cleanup; - } - -- /* GFX plane is enabled by default */ -- r = fbdev->ctrl->enable_plane(OMAPFB_PLANE_GFX, 1); -- if (r) -- goto cleanup; -- - omapfb_set_update_mode(fbdev, manual_update ? - OMAPFB_MANUAL_UPDATE : OMAPFB_AUTO_UPDATE); - init_state++; - -- r = fbdev->panel->enable(fbdev->panel); -+ r = venc_init(&fbdev->digital_panel); -+ if (r) -+ goto cleanup; -+ -+ r = fbdev->lcd_panel->enable(fbdev->lcd_panel); - if (r) - goto cleanup; - init_state++; -@@ -1781,7 +2006,7 @@ static int omapfb_do_probe(struct platfo - - fbdev->state = OMAPFB_ACTIVE; - -- panel = fbdev->panel; -+ panel = fbdev->lcd_panel; - phz = panel->pixel_clock * 1000; - hhz = phz * 10 / (panel->hfp + panel->x_res + panel->hbp + panel->hsw); - vhz = hhz / (panel->vfp + panel->y_res + panel->vbp + panel->vsw); -@@ -1821,12 +2046,34 @@ void omapfb_register_panel(struct lcd_pa - if (fbdev_pdev != NULL) - omapfb_do_probe(fbdev_pdev, fbdev_panel); - } -+EXPORT_SYMBOL_GPL(omapfb_register_panel); -+ -+static int omapfb_remove(struct platform_device *pdev); -+ -+void omapfb_unregister_panel(struct lcd_panel *panel, -+ struct omapfb_device *fbdev) -+{ -+ struct platform_device *pdev; -+ -+ BUG_ON(fbdev_panel == NULL); -+ -+ pdev = to_platform_device(fbdev->dev); -+ omapfb_remove(pdev); -+ fbdev_panel = NULL; -+} -+EXPORT_SYMBOL_GPL(omapfb_unregister_panel); - --/* Called when the device is being detached from the driver */ - static int omapfb_remove(struct platform_device *pdev) - { -- struct omapfb_device *fbdev = platform_get_drvdata(pdev); -- enum omapfb_state saved_state = fbdev->state; -+ struct omapfb_device *fbdev; -+ enum omapfb_state saved_state; -+ -+ /* Panel not registered yet, thus omapfb_do_probe is not called. */ -+ if (fbdev_panel == NULL) -+ return 0; -+ -+ fbdev = platform_get_drvdata(pdev); -+ saved_state = fbdev->state; - - /* FIXME: wait till completion of pending events */ - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/rfbi.c linux-omap-2.6.28-nokia1/drivers/video/omap/rfbi.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/rfbi.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/rfbi.c 2011-06-22 13:19:33.153063270 +0200 -@@ -26,8 +26,7 @@ - #include - #include - #include -- --#include -+#include - - #include "dispc.h" - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/sossi.c linux-omap-2.6.28-nokia1/drivers/video/omap/sossi.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/sossi.c 2011-06-22 13:14:21.393067702 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/sossi.c 2011-06-22 13:19:33.153063270 +0200 -@@ -23,9 +23,9 @@ - #include - #include - #include -+#include - - #include --#include - - #include "lcdc.h" - -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap/venc.c linux-omap-2.6.28-nokia1/drivers/video/omap/venc.c ---- linux-omap-2.6.28-omap1/drivers/video/omap/venc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap/venc.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,557 @@ -+/* -+ * linux/arch/arm/plat-omap/dss/venc.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * VENC settings from TI's DSS driver -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dispc.h" -+ -+#define VENC_ERR(format, ...) \ -+ printk(KERN_ERR "venc error: " format, ## __VA_ARGS__) -+ -+#define VENC_DBG(format, ...) \ -+ printk(KERN_DEBUG "venc: " format, ## __VA_ARGS__) -+ -+#define VENC_BASE 0x48050C00 -+ -+/* Venc registers */ -+#define VENC_REV_ID 0x00 -+#define VENC_STATUS 0x04 -+#define VENC_F_CONTROL 0x08 -+#define VENC_VIDOUT_CTRL 0x10 -+#define VENC_SYNC_CTRL 0x14 -+#define VENC_LLEN 0x1C -+#define VENC_FLENS 0x20 -+#define VENC_HFLTR_CTRL 0x24 -+#define VENC_CC_CARR_WSS_CARR 0x28 -+#define VENC_C_PHASE 0x2C -+#define VENC_GAIN_U 0x30 -+#define VENC_GAIN_V 0x34 -+#define VENC_GAIN_Y 0x38 -+#define VENC_BLACK_LEVEL 0x3C -+#define VENC_BLANK_LEVEL 0x40 -+#define VENC_X_COLOR 0x44 -+#define VENC_M_CONTROL 0x48 -+#define VENC_BSTAMP_WSS_DATA 0x4C -+#define VENC_S_CARR 0x50 -+#define VENC_LINE21 0x54 -+#define VENC_LN_SEL 0x58 -+#define VENC_L21__WC_CTL 0x5C -+#define VENC_HTRIGGER_VTRIGGER 0x60 -+#define VENC_SAVID__EAVID 0x64 -+#define VENC_FLEN__FAL 0x68 -+#define VENC_LAL__PHASE_RESET 0x6C -+#define VENC_HS_INT_START_STOP_X 0x70 -+#define VENC_HS_EXT_START_STOP_X 0x74 -+#define VENC_VS_INT_START_X 0x78 -+#define VENC_VS_INT_STOP_X__VS_INT_START_Y 0x7C -+#define VENC_VS_INT_STOP_Y__VS_EXT_START_X 0x80 -+#define VENC_VS_EXT_STOP_X__VS_EXT_START_Y 0x84 -+#define VENC_VS_EXT_STOP_Y 0x88 -+#define VENC_AVID_START_STOP_X 0x90 -+#define VENC_AVID_START_STOP_Y 0x94 -+#define VENC_FID_INT_START_X__FID_INT_START_Y 0xA0 -+#define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X 0xA4 -+#define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y 0xA8 -+#define VENC_TVDETGP_INT_START_STOP_X 0xB0 -+#define VENC_TVDETGP_INT_START_STOP_Y 0xB4 -+#define VENC_GEN_CTRL 0xB8 -+#define VENC_OUTPUT_CONTROL 0xC4 -+#define VENC_DAC_B__DAC_C 0xC8 -+ -+struct venc_config { -+ u32 f_control; -+ u32 vidout_ctrl; -+ u32 sync_ctrl; -+ u32 llen; -+ u32 flens; -+ u32 hfltr_ctrl; -+ u32 cc_carr_wss_carr; -+ u32 c_phase; -+ u32 gain_u; -+ u32 gain_v; -+ u32 gain_y; -+ u32 black_level; -+ u32 blank_level; -+ u32 x_color; -+ u32 m_control; -+ u32 bstamp_wss_data; -+ u32 s_carr; -+ u32 line21; -+ u32 ln_sel; -+ u32 l21__wc_ctl; -+ u32 htrigger_vtrigger; -+ u32 savid__eavid; -+ u32 flen__fal; -+ u32 lal__phase_reset; -+ u32 hs_int_start_stop_x; -+ u32 hs_ext_start_stop_x; -+ u32 vs_int_start_x; -+ u32 vs_int_stop_x__vs_int_start_y; -+ u32 vs_int_stop_y__vs_ext_start_x; -+ u32 vs_ext_stop_x__vs_ext_start_y; -+ u32 vs_ext_stop_y; -+ u32 avid_start_stop_x; -+ u32 avid_start_stop_y; -+ u32 fid_int_start_x__fid_int_start_y; -+ u32 fid_int_offset_y__fid_ext_start_x; -+ u32 fid_ext_start_y__fid_ext_offset_y; -+ u32 tvdetgp_int_start_stop_x; -+ u32 tvdetgp_int_start_stop_y; -+ u32 gen_ctrl; -+ -+ int width; -+ int height; -+}; -+ -+/* from TRM */ -+static const struct venc_config venc_config_pal_trm = { -+ .f_control = 0, -+ .vidout_ctrl = 1, -+ .sync_ctrl = 0x40, -+ .llen = 0x35F, /* 863 */ -+ .flens = 0x270, /* 624 */ -+ .hfltr_ctrl = 0, -+ .cc_carr_wss_carr = 0x2F7225ED, -+ .c_phase = 0, -+ .gain_u = 0x111, -+ .gain_v = 0x181, -+ .gain_y = 0x140, -+ .black_level = 0x3B, -+ .blank_level = 0x3B, -+ .x_color = 0x7, -+ .m_control = 0x2, -+ .bstamp_wss_data = 0x3F, -+ .s_carr = 0x2A098ACB, -+ .line21 = 0, -+ .ln_sel = 0x01290015, -+ .l21__wc_ctl = 0x0000F603, -+ .htrigger_vtrigger = 0, -+ -+ .savid__eavid = 0x06A70108, -+ .flen__fal = 0x00180270, -+ .lal__phase_reset = 0x00180270, -+ .hs_int_start_stop_x = 0x00880358, -+ .hs_ext_start_stop_x = 0x000F035F, -+ .vs_int_start_x = 0x01A70000, -+ .vs_int_stop_x__vs_int_start_y = 0x000001A7, -+ .vs_int_stop_y__vs_ext_start_x = 0x01AF0000, -+ .vs_ext_stop_x__vs_ext_start_y = 0x000101AF, -+ .vs_ext_stop_y = 0x00000025, -+ .avid_start_stop_x = 0x03530083, -+ .avid_start_stop_y = 0x026C002E, -+ .fid_int_start_x__fid_int_start_y = 0x0001008A, -+ .fid_int_offset_y__fid_ext_start_x = 0x002E0138, -+ .fid_ext_start_y__fid_ext_offset_y = 0x01380001, -+ -+ .tvdetgp_int_start_stop_x = 0x00140001, -+ .tvdetgp_int_start_stop_y = 0x00010001, -+ .gen_ctrl = 0x00FF0000, -+ -+ .width = 720, -+ .height = 574, /* for some reason, this isn't 576 */ -+}; -+ -+/* from TRM */ -+static const struct venc_config venc_config_ntsc_trm = { -+ .f_control = 0, -+ .vidout_ctrl = 1, -+ .sync_ctrl = 0x8040, -+ .llen = 0x359, -+ .flens = 0x20C, -+ .hfltr_ctrl = 0, -+ .cc_carr_wss_carr = 0x043F2631, -+ .c_phase = 0, -+ .gain_u = 0x102, -+ .gain_v = 0x16C, -+ .gain_y = 0x12F, -+ .black_level = 0x43, -+ .blank_level = 0x38, -+ .x_color = 0x7, -+ .m_control = 0x1, -+ .bstamp_wss_data = 0x38, -+ .s_carr = 0x21F07C1F, -+ .line21 = 0, -+ .ln_sel = 0x01310011, -+ .l21__wc_ctl = 0x0000F003, -+ .htrigger_vtrigger = 0, -+ -+ .savid__eavid = 0x069300F4, -+ .flen__fal = 0x0016020C, -+ .lal__phase_reset = 0x00060107, -+ .hs_int_start_stop_x = 0x008E0350, -+ .hs_ext_start_stop_x = 0x000F0359, -+ .vs_int_start_x = 0x01A00000, -+ .vs_int_stop_x__vs_int_start_y = 0x020701A0, -+ .vs_int_stop_y__vs_ext_start_x = 0x01AC0024, -+ .vs_ext_stop_x__vs_ext_start_y = 0x020D01AC, -+ .vs_ext_stop_y = 0x00000006, -+ .avid_start_stop_x = 0x03480078, -+ .avid_start_stop_y = 0x02060024, -+ .fid_int_start_x__fid_int_start_y = 0x0001008A, -+ .fid_int_offset_y__fid_ext_start_x = 0x01AC0106, -+ .fid_ext_start_y__fid_ext_offset_y = 0x01060006, -+ -+ .tvdetgp_int_start_stop_x = 0x00140001, -+ .tvdetgp_int_start_stop_y = 0x00010001, -+ .gen_ctrl = 0x00F90000, -+ -+ .width = 720, -+ .height = 482, -+}; -+ -+static const struct venc_config venc_config_pal_bdghi = { -+ .f_control = 0, -+ .vidout_ctrl = 0, -+ .sync_ctrl = 0, -+ .hfltr_ctrl = 0, -+ .x_color = 0, -+ .line21 = 0, -+ .ln_sel = 21, -+ .htrigger_vtrigger = 0, -+ .tvdetgp_int_start_stop_x = 0x00140001, -+ .tvdetgp_int_start_stop_y = 0x00010001, -+ .gen_ctrl = 0x00FB0000, -+ -+ .llen = 864-1, -+ .flens = 625-1, -+ .cc_carr_wss_carr = 0x2F7625ED, -+ .c_phase = 0xDF, -+ .gain_u = 0x111, -+ .gain_v = 0x181, -+ .gain_y = 0x140, -+ .black_level = 0x3e, -+ .blank_level = 0x3e, -+ .m_control = 0<<2 | 1<<1, -+ .bstamp_wss_data = 0x42, -+ .s_carr = 0x2a098acb, -+ .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0, -+ .savid__eavid = 0x06A70108, -+ .flen__fal = 23<<16 | 624<<0, -+ .lal__phase_reset = 2<<17 | 310<<0, -+ .hs_int_start_stop_x = 0x00920358, -+ .hs_ext_start_stop_x = 0x000F035F, -+ .vs_int_start_x = 0x1a7<<16, -+ .vs_int_stop_x__vs_int_start_y = 0x000601A7, -+ .vs_int_stop_y__vs_ext_start_x = 0x01AF0036, -+ .vs_ext_stop_x__vs_ext_start_y = 0x27101af, -+ .vs_ext_stop_y = 0x05, -+ .avid_start_stop_x = 0x03530082, -+ .avid_start_stop_y = 0x0270002E, -+ .fid_int_start_x__fid_int_start_y = 0x0005008A, -+ .fid_int_offset_y__fid_ext_start_x = 0x002E0138, -+ .fid_ext_start_y__fid_ext_offset_y = 0x01380005, -+ -+ .width = 720, -+ .height = 576, -+}; -+ -+static struct { -+ void __iomem *base; -+ struct clk *dss_54m_fck; -+ struct clk *dss_96m_fck; -+ struct clk *dss_ick; -+ struct clk *dss1_fck; -+ const struct venc_config *config; -+ int enabled; -+ struct mutex lock; -+} venc; -+ -+static inline void venc_write_reg(int idx, u32 val) -+{ -+ __raw_writel(val, venc.base + idx); -+} -+ -+static inline u32 venc_read_reg(int idx) -+{ -+ u32 l = __raw_readl(venc.base + idx); -+ return l; -+} -+ -+static void venc_write_config(const struct venc_config *config) -+{ -+ venc_write_reg(VENC_LLEN, config->llen); -+ venc_write_reg(VENC_FLENS, config->flens); -+ venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr); -+ venc_write_reg(VENC_C_PHASE, config->c_phase); -+ venc_write_reg(VENC_GAIN_U, config->gain_u); -+ venc_write_reg(VENC_GAIN_V, config->gain_v); -+ venc_write_reg(VENC_GAIN_Y, config->gain_y); -+ venc_write_reg(VENC_BLACK_LEVEL, config->black_level); -+ venc_write_reg(VENC_BLANK_LEVEL, config->blank_level); -+ venc_write_reg(VENC_M_CONTROL, config->m_control); -+ venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data); -+ venc_write_reg(VENC_S_CARR, config->s_carr); -+ venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl); -+ venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid); -+ venc_write_reg(VENC_FLEN__FAL, config->flen__fal); -+ venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset); -+ venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x); -+ venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x); -+ venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x); -+ venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y, -+ config->vs_int_stop_x__vs_int_start_y); -+ venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X, -+ config->vs_int_stop_y__vs_ext_start_x); -+ venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y, -+ config->vs_ext_stop_x__vs_ext_start_y); -+ venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y); -+ venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x); -+ venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y); -+ venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y, -+ config->fid_int_start_x__fid_int_start_y); -+ venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X, -+ config->fid_int_offset_y__fid_ext_start_x); -+ venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y, -+ config->fid_ext_start_y__fid_ext_offset_y); -+ -+ venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C)); -+ venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl); -+ venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl); -+ venc_write_reg(VENC_X_COLOR, config->x_color); -+ venc_write_reg(VENC_LINE21, config->line21); -+ venc_write_reg(VENC_LN_SEL, config->ln_sel); -+ venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger); -+ venc_write_reg(VENC_TVDETGP_INT_START_STOP_X, -+ config->tvdetgp_int_start_stop_x); -+ venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y, -+ config->tvdetgp_int_start_stop_y); -+ venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl); -+ venc_write_reg(VENC_F_CONTROL, config->f_control); -+ venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl); -+} -+ -+static void venc_reset(void) -+{ -+ int t = 1000; -+ -+ venc_write_reg(VENC_F_CONTROL, venc_read_reg(VENC_F_CONTROL) | (1<<8)); -+ while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) { -+ if (--t == 0) { -+ VENC_ERR("Failed to reset venc\n"); -+ return; -+ } -+ } -+ msleep(20); -+} -+ -+static void venc_enable_clocks(int enable) -+{ -+ if (enable) { -+ clk_enable(venc.dss_ick); -+ clk_enable(venc.dss1_fck); -+ clk_enable(venc.dss_54m_fck); -+ clk_enable(venc.dss_96m_fck); -+ } else { -+ clk_disable(venc.dss_96m_fck); -+ clk_disable(venc.dss_54m_fck); -+ clk_disable(venc.dss1_fck); -+ clk_disable(venc.dss_ick); -+ } -+} -+ -+static int venc_get_clocks(void) -+{ -+ int i; -+ const struct { -+ struct clk **clock; -+ char *name; -+ } clocks[4] = { -+ { &venc.dss_ick, "dss_ick" }, -+ { &venc.dss1_fck, "dss1_alwon_fck" }, -+ { &venc.dss_54m_fck, "dss_tv_fck" }, -+ { &venc.dss_96m_fck, "dss_96m_fck" }, -+ }; -+ -+ for (i = 0; i < ARRAY_SIZE(clocks); i++) { -+ struct clk *clk; -+ char *clock_name = clocks[i].name; -+ -+ clk = clk_get(NULL, clock_name); -+ if (IS_ERR(clk)) { -+ VENC_ERR("Can't get clock %s\n", clock_name); -+ BUG(); -+ } -+ *clocks[i].clock = clk; -+ VENC_DBG("clk %s, rate %ld\n", clock_name, clk_get_rate(clk)); -+ } -+ -+ return 0; -+} -+ -+static int venc_enable_display(struct lcd_panel *panel) -+{ -+ mutex_lock(&venc.lock); -+ -+ if (venc.enabled) { -+ mutex_unlock(&venc.lock); -+ return 0; -+ } -+ venc.enabled++; -+ -+ venc_enable_clocks(1); -+ -+ omap_dispc_set_venc_output(OMAP_DISPC_VENC_TYPE_COMPOSITE); -+ omap_dispc_set_dac_pwrdn_bgz(1); -+ -+ venc_write_config(venc.config); -+ -+ if (1) { /* composite mode */ -+ if (cpu_is_omap24xx()) -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0x2); -+ else -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0xa); -+ } else { /* S-Video */ -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0xd); -+ } -+ -+ omap_dispc_set_digit_size(venc.config->width, venc.config->height/2); -+ -+ omap_dispc_enable_digit_out(1); -+ -+ mutex_unlock(&venc.lock); -+ -+ return 0; -+} -+ -+static void venc_disable_display(struct lcd_panel *panel) -+{ -+ mutex_lock(&venc.lock); -+ -+ if (!venc.enabled) { -+ mutex_unlock(&venc.lock); -+ return; -+ } -+ venc.enabled--; -+ -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0); -+ omap_dispc_set_dac_pwrdn_bgz(0); -+ -+ omap_dispc_enable_digit_out(0); -+ -+ venc_enable_clocks(0); -+ -+ mutex_unlock(&venc.lock); -+} -+ -+static struct lcd_panel venc_panel = { -+ .name = "tv-out", -+ .enable = venc_enable_display, -+ .disable = venc_disable_display, -+ .pixel_clock = 13500, -+}; -+ -+int venc_change_tv_standard(enum omapfb_tv_std standard) -+{ -+ int ret = 0; -+ -+ mutex_lock(&venc.lock); -+ -+ if (venc.enabled) { -+ mutex_unlock(&venc.lock); -+ return -EBUSY; -+ } -+ -+ switch (standard) { -+ case OMAPFB_TV_STD_PAL: -+ venc.config = &venc_config_pal_trm; -+ break; -+ case OMAPFB_TV_STD_NTSC: -+ venc.config = &venc_config_ntsc_trm; -+ break; -+ default: -+ ret = -EINVAL; -+ break; -+ } -+ -+ venc_panel.x_res = venc.config->width; -+ venc_panel.y_res = venc.config->height; -+ -+ venc_enable_clocks(1); -+ venc_reset(); -+ venc_write_config(venc.config); -+ venc_enable_clocks(0); -+ -+ mutex_unlock(&venc.lock); -+ return ret; -+} -+ -+enum omapfb_tv_std venc_query_tv_standard(void) -+{ -+ int r = 0; -+ -+ mutex_lock(&venc.lock); -+ -+ if (venc.config == &venc_config_pal_trm) -+ r = OMAPFB_TV_STD_PAL; -+ else if (venc.config == &venc_config_ntsc_trm) -+ r = OMAPFB_TV_STD_NTSC; -+ else -+ BUG(); -+ -+ mutex_unlock(&venc.lock); -+ -+ return r; -+} -+ -+int venc_init(struct lcd_panel **digital_panel) -+{ -+ u8 rev_id; -+ -+ venc.base = ioremap(VENC_BASE, SZ_1K); -+ if (!venc.base) { -+ VENC_ERR("can't ioremap VENC\n"); -+ return -ENOMEM; -+ } -+ -+ mutex_init(&venc.lock); -+ -+ omap_dispc_set_venc_clocks(); -+ venc_get_clocks(); -+ -+ /* venc is reset in venc_change_tv_standard */ -+ venc_change_tv_standard(OMAPFB_TV_STD_PAL); -+ -+ venc_enable_clocks(1); -+ -+ rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); -+ printk(KERN_INFO "OMAP VENC rev %d\n", rev_id); -+ -+ venc_enable_clocks(0); -+ -+ *digital_panel = &venc_panel; -+ -+ return 0; -+} -+ -+void venc_exit(void) -+{ -+ iounmap(venc.base); -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/ctrl-zonda.c linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/ctrl-zonda.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/ctrl-zonda.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/ctrl-zonda.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,631 @@ -+/* -+ * TC358731XBG, eDisco -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+/* DSI Virtual channel. Hardcoded for now. */ -+#define ZCH 0 -+ -+#define DCS_GET_POWER_MODE 0x0a -+#define DCS_GET_ADDR_MODE 0x0b -+#define DCS_GET_PIX_FORMAT 0x0c -+#define DCS_ENTER_SLEEP_MODE 0x10 -+#define DCS_EXIT_SLEEP_MODE 0x11 -+#define DCS_DISP_OFF 0x28 -+#define DCS_DISP_ON 0x29 -+#define DCS_WRITE_MEM_START 0x2c -+#define DCS_SET_TEAR_OFF 0x34 -+#define DCS_SET_TEAR_ON 0x35 -+#define DCS_SET_ADDR_MODE 0x36 -+#define DCS_SET_PIX_FORMAT 0x3a -+#define DCS_THSSI_OFF 0x80 -+#define DCS_THSSI_ON 0x81 -+#define DCS_SET_IOCTRL 0x82 -+#define DCS_GET_IOCTRL 0x83 -+#define DCS_SET_TE_TIMING 0x84 -+#define DCS_SET_VTIMING 0x8b -+#define DCS_SET_HTIMING 0x92 -+#define DCS_SET_PIX_CLOCK 0x9e -+#define DCS_GET_ID1 0xda -+#define DCS_GET_ID2 0xdb -+#define DCS_GET_ID3 0xdc -+ -+#define DCS_WRITE_IDX 0xfb -+#define DCS_READ_EDISCO 0xfc -+#define DCS_WRITE_EDISCO 0xfd -+#define DCS_ENABLE_EDISCO 0x9d -+ -+#ifdef DEBUG -+#define DBG(format, ...) printk(KERN_DEBUG "Zonda: " format, ## __VA_ARGS__) -+#else -+#define DBG(format, ...) -+#endif -+ -+static struct { -+ bool enabled; -+ u8 rotate; -+ bool mirror; -+} zonda; -+ -+static int zonda_dcs_read_1(u8 dcs_cmd, u8 *data) -+{ -+ int r; -+ u8 buf[1]; -+ -+ r = dsi_vc_dcs_read(ZCH, dcs_cmd, buf, 1); -+ -+ if (r < 0) { -+ printk(KERN_ERR "Zonda read error\n"); -+ return r; -+ } -+ -+ *data = buf[0]; -+ -+ return 0; -+} -+ -+static int zonda_dcs_write_0(u8 dcs_cmd) -+{ -+ return dsi_vc_dcs_write(ZCH, &dcs_cmd, 1); -+} -+ -+static int zonda_dcs_write_1(u8 dcs_cmd, u8 param) -+{ -+ u8 buf[2]; -+ buf[0] = dcs_cmd; -+ buf[1] = param; -+ return dsi_vc_dcs_write(ZCH, buf, 2); -+} -+ -+#if 0 -+static void zonda_edisco_toggle(void) -+{ -+ u8 buf[5]; -+ buf[0] = DCS_ENABLE_EDISCO; -+ buf[1] = 0x03; -+ buf[2] = 0x7f; -+ buf[3] = 0x5c; -+ buf[4] = 0x33; -+ dsi_vc_dcs_write(ZCH, buf, sizeof(buf)); -+} -+ -+static u8 zonda_edisco_read(u8 reg) -+{ -+ int r; -+ u8 buf[2]; -+ -+ buf[0] = DCS_WRITE_IDX; -+ buf[1] = reg; -+ dsi_vc_dcs_write(ZCH, buf, 2); -+ -+ r = dsi_vc_dcs_read(ZCH, DCS_READ_EDISCO, buf, 1); -+ if (r != 1) -+ printk(KERN_ERR "ERRor reading edisco\n"); -+ -+ return buf[0]; -+} -+ -+static void zonda_edisco_write(u8 reg, u32 data) -+{ -+ int val; -+ u8 buf[5]; -+ buf[0] = DCS_WRITE_EDISCO; -+ buf[1] = reg; -+ buf[2] = (data >> 16) & 0xf; -+ buf[3] = (data >> 8) & 0xf; -+ buf[4] = (data >> 0) & 0xf; -+ val = dsi_vc_dcs_write(ZCH, buf, sizeof(buf)); -+} -+ -+static void zonda_enable_colorbar(bool enable) -+{ -+ zonda_edisco_toggle(); -+ -+ /* enable colorbar (10-8 bits) */ -+ zonda_edisco_write(0x20, -+ (1<<2) | ((enable ? 2 : 0)<<8)); -+ zonda_edisco_write(0x28, 1); /* confirm lcdc settings */ -+ -+ zonda_edisco_toggle(); -+} -+#endif -+ -+static void zonda_set_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ display->panel->timings = *timings; -+} -+ -+static int zonda_check_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ return 0; -+} -+ -+static void zonda_get_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ *timings = display->panel->timings; -+} -+ -+static void zonda_get_resolution(struct omap_display *display, -+ u16 *xres, u16 *yres) -+{ -+ if (zonda.rotate == 0 || zonda.rotate == 2) { -+ *xres = display->panel->timings.x_res; -+ *yres = display->panel->timings.y_res; -+ } else { -+ *yres = display->panel->timings.x_res; -+ *xres = display->panel->timings.y_res; -+ } -+} -+ -+static int zonda_ctrl_init(struct omap_display *display) -+{ -+ DBG("zonda_ctrl_init\n"); -+ -+ display->set_timings = zonda_set_timings; -+ display->check_timings = zonda_check_timings; -+ display->get_timings = zonda_get_timings; -+ -+ display->get_resolution = zonda_get_resolution; -+ -+ return 0; -+} -+ -+static int zonda_sleep_enable(bool enable) -+{ -+ u8 cmd; -+ int r; -+ -+ if (enable) { -+ cmd = DCS_ENTER_SLEEP_MODE; -+ r = dsi_vc_dcs_write_nosync(ZCH, &cmd, 1); -+ } else { -+ cmd = DCS_EXIT_SLEEP_MODE; -+ r = dsi_vc_dcs_write(ZCH, &cmd, 1); -+ } -+ -+ if (r) -+ return r; -+ -+ if (!enable) -+ msleep(5); -+ -+ r = dsi_vc_send_null(ZCH); -+ if (r) -+ return r; -+ -+ return 0; -+} -+ -+static int zonda_display_enable(bool enable) -+{ -+ u8 cmd; -+ -+ if (enable) -+ cmd = DCS_DISP_ON; -+ else -+ cmd = DCS_DISP_OFF; -+ -+ return zonda_dcs_write_0(cmd); -+} -+ -+static int zonda_sdi_enable(bool enable) -+{ -+ u8 cmd; -+ -+ if (enable) -+ cmd = DCS_THSSI_ON; -+ else -+ cmd = DCS_THSSI_OFF; -+ -+ return zonda_dcs_write_0(cmd); -+} -+ -+static int zonda_get_id(void) -+{ -+ u8 id1, id2, id3; -+ int r; -+ -+ r = zonda_dcs_read_1(DCS_GET_ID1, &id1); -+ if (r) -+ return r; -+ r = zonda_dcs_read_1(DCS_GET_ID2, &id2); -+ if (r) -+ return r; -+ r = zonda_dcs_read_1(DCS_GET_ID3, &id3); -+ if (r) -+ return r; -+ -+ printk(KERN_INFO "Zonda version %d.%d.%d\n", id1, id2, id3); -+ return 0; -+} -+ -+static void zonda_set_te_timing(void) -+{ -+ u8 buf[7]; -+ int start = 0, end = 0; -+ -+ buf[0] = DCS_SET_TE_TIMING; -+ buf[1] = (start >> 16) & 0xff; -+ buf[2] = (start >> 8) & 0xff; -+ buf[3] = start & 0xff; -+ buf[4] = (end >> 16) & 0xff; -+ buf[5] = (end >> 8) & 0xff; -+ buf[6] = end & 0xff; -+ dsi_vc_dcs_write(ZCH, buf, sizeof(buf)); -+} -+ -+static void zonda_set_video_timings(struct omap_display *display) -+{ -+ u8 buf[7]; -+ int res; -+ -+ res = display->panel->timings.y_res; -+ buf[0] = DCS_SET_VTIMING; -+ buf[1] = display->panel->timings.vfp; -+ buf[2] = display->panel->timings.vsw; -+ buf[3] = display->panel->timings.vbp; -+ buf[4] = (res >> 16) & 0xff; -+ buf[5] = (res >> 8) & 0xff; -+ buf[6] = res & 0xff; -+ dsi_vc_dcs_write(ZCH, buf, sizeof(buf)); -+ -+ res = display->panel->timings.x_res; -+ buf[0] = DCS_SET_HTIMING; -+ buf[1] = display->panel->timings.hfp; -+ buf[2] = display->panel->timings.hsw; -+ buf[3] = display->panel->timings.hbp; -+ buf[4] = (res >> 16) & 0xff; -+ buf[5] = (res >> 8) & 0xff; -+ buf[6] = res & 0xff; -+ dsi_vc_dcs_write(ZCH, buf, sizeof(buf)); -+} -+ -+static void zonda_set_pixel_clock(void) -+{ -+ /* nevada supports only 10MHz pclk for now. broken.*/ -+ /*zonda_dcs_write_1(DCS_SET_PIX_CLOCK, 0x41);*/ -+ zonda_dcs_write_1(DCS_SET_PIX_CLOCK, 0x1b); -+ /*zonda_dcs_write_1(DCS_SET_PIX_CLOCK, 0x3);*/ -+} -+ -+static void zonda_set_sdi_channels(int numchannels) -+{ -+ BUG_ON(numchannels != 1 && numchannels != 2); -+ -+ zonda_dcs_write_1(DCS_SET_IOCTRL, numchannels == 1 ? 0 : 1); -+} -+ -+static void zonda_dump_debug(void) -+{ -+ u8 val; -+ -+ dsi_vc_dcs_read(ZCH, DCS_GET_IOCTRL, &val, 1); -+ DBG("ioctrl %#x, thssi %s, thssi channels %d\n", val, -+ val & (1<<7) ? "on" : "off", -+ (val & 3) + 1); -+ -+ dsi_vc_dcs_read(ZCH, DCS_GET_POWER_MODE, &val, 1); -+ DBG("powermode %#x: sleep %s, display %s\n", val, -+ val & (1<<4) ? "off" : "on", -+ val & (1<<2) ? "on" : "off"); -+ -+} -+ -+static void zonda_dump_video_timings(void) -+{ -+ u8 zonda_dcs_read(u8 dcs_cmd) -+ { -+ u8 tmp = 0; -+ int r; -+ r = zonda_dcs_read_1(dcs_cmd, &tmp); -+ if (r) -+ printk(KERN_ERR "Zonda read error\n"); -+ return tmp; -+ } -+ -+ u8 fp = zonda_dcs_read(0x8c); -+ u8 sp = zonda_dcs_read(0x8d); -+ u8 bp = zonda_dcs_read(0x8e); -+ u8 amsb = zonda_dcs_read(0x8f); -+ u8 acsb = zonda_dcs_read(0x90); -+ u8 alsb = zonda_dcs_read(0x91); -+ -+ DBG("vfp %d, vsp %d, vbp %d, lines %d\n", -+ fp, sp, bp, -+ (amsb << 16) | (acsb << 8) | (alsb << 0)); -+ -+ fp = zonda_dcs_read(0x93); -+ sp = zonda_dcs_read(0x94); -+ bp = zonda_dcs_read(0x95); -+ amsb = zonda_dcs_read(0x96); -+ acsb = zonda_dcs_read(0x97); -+ alsb = zonda_dcs_read(0x98); -+ -+ DBG("hfp %d, hsp %d, hbp %d, cols %d\n", -+ fp, sp, bp, -+ (amsb << 16) | (acsb << 8) | (alsb << 0)); -+} -+ -+static int zonda_set_addr_mode(int rotate, int mirror) -+{ -+ int r; -+ u8 mode; -+ int b5, b6, b7; -+ -+ r = zonda_dcs_read_1(DCS_GET_ADDR_MODE, &mode); -+ if (r) -+ return r; -+ -+ switch (rotate) { -+ default: -+ case 0: -+ b7 = 0; -+ b6 = 0; -+ b5 = 0; -+ break; -+ case 1: -+ b7 = 0; -+ b6 = 1; -+ b5 = 1; -+ break; -+ case 2: -+ b7 = 1; -+ b6 = 1; -+ b5 = 0; -+ break; -+ case 3: -+ b7 = 1; -+ b6 = 0; -+ b5 = 1; -+ break; -+ } -+ -+ if (mirror) -+ b6 = !b6; -+ -+ mode &= ~((1<<7) | (1<<6) | (1<<5)); -+ mode |= (b7 << 7) | (b6 << 6) | (b5 << 5); -+ -+ return zonda_dcs_write_1(DCS_SET_ADDR_MODE, mode); -+} -+ -+static int zonda_ctrl_rotate(struct omap_display *display, u8 rotate) -+{ -+ int r; -+ -+ if (zonda.enabled) { -+ r = zonda_set_addr_mode(rotate, zonda.mirror); -+ -+ if (r) -+ return r; -+ } -+ -+ zonda.rotate = rotate; -+ -+ return 0; -+} -+ -+static u8 zonda_ctrl_get_rotate(struct omap_display *display) -+{ -+ return zonda.rotate; -+} -+ -+static int zonda_ctrl_mirror(struct omap_display *display, bool enable) -+{ -+ int r; -+ -+ if (zonda.enabled) { -+ r = zonda_set_addr_mode(zonda.rotate, enable); -+ -+ if (r) -+ return r; -+ } -+ -+ zonda.mirror = enable; -+ -+ return 0; -+} -+ -+static bool zonda_ctrl_get_mirror(struct omap_display *display) -+{ -+ return zonda.mirror; -+} -+ -+ -+static int zonda_ctrl_enable(struct omap_display *display) -+{ -+ int r; -+ -+ DBG("zonda_ctrl_enable\n"); -+ -+ if (display->hw_config.ctrl_enable) { -+ r = display->hw_config.ctrl_enable(display); -+ if (r) -+ return r; -+ } -+ -+ /* it seems we have to wait a bit until zonda is ready */ -+ msleep(5); -+ -+ r = zonda_sleep_enable(0); -+ if (r) -+ return r; -+ -+ /*dsi_vc_set_max_rx_packet_size(0, 64);*/ -+ -+ r = zonda_get_id(); -+ if (r) -+ return r; -+ -+ /*dsi_vc_enable_hs(0, 1);*/ -+ -+ zonda_set_video_timings(display); -+ -+ zonda_set_pixel_clock(); -+ -+ zonda_set_sdi_channels(2); -+ -+ zonda_set_te_timing(); -+ -+ /*zonda_enable_colorbar(1);*/ -+ -+ zonda_dcs_write_1(DCS_SET_PIX_FORMAT, 0x77); -+ -+ zonda_set_addr_mode(zonda.rotate, zonda.mirror); -+ -+ zonda_display_enable(1); -+ zonda_sdi_enable(1); -+ -+ zonda_dump_debug(); -+ -+ zonda_dump_video_timings(); -+ -+ zonda.enabled = 1; -+ -+ return 0; -+} -+ -+static void zonda_ctrl_disable(struct omap_display *display) -+{ -+ zonda_sdi_enable(0); -+ zonda_display_enable(0); -+ zonda_sleep_enable(1); -+ -+ /* wait a bit so that the message goes through */ -+ msleep(10); -+ -+ if (display->hw_config.ctrl_disable) -+ display->hw_config.ctrl_disable(display); -+ -+ zonda.enabled = 0; -+} -+ -+static void zonda_set_update_area(int x, int y, int w, int h) -+{ -+ int x1 = x; -+ int x2 = x + w - 1; -+ int y1 = y; -+ int y2 = y + h - 1; -+ -+ u8 buf[7]; -+ buf[0] = 0x2a; /* 0x2a == set_column_address */ -+ buf[1] = (x1 >> 16) & 0xff; -+ buf[2] = (x1 >> 8) & 0xff; -+ buf[3] = (x1 >> 0) & 0xff; -+ buf[4] = (x2 >> 16) & 0xff; -+ buf[5] = (x2 >> 8) & 0xff; -+ buf[6] = (x2 >> 0) & 0xff; -+ -+ dsi_vc_dcs_write(ZCH, buf, sizeof(buf)); -+ -+ buf[0] = 0x2b; /* 0x2b == set_column_address */ -+ buf[1] = (y1 >> 16) & 0xff; -+ buf[2] = (y1 >> 8) & 0xff; -+ buf[3] = (y1 >> 0) & 0xff; -+ buf[4] = (y2 >> 16) & 0xff; -+ buf[5] = (y2 >> 8) & 0xff; -+ buf[6] = (y2 >> 0) & 0xff; -+ -+ dsi_vc_dcs_write(ZCH, buf, sizeof(buf)); -+} -+ -+static void zonda_ctrl_setup_update(struct omap_display *display, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ u8 tmpbuf[3+1]; -+ -+ zonda_set_update_area(x, y, w, h); -+ -+ /* zonda errata: in high-speed mode, with 2 lanes, -+ * start_mem_write has to be sent twice, first is ignored */ -+ tmpbuf[0] = 0x2c; /* start mem write */ -+ dsi_vc_dcs_write(ZCH, tmpbuf, sizeof(tmpbuf)); -+} -+ -+static int zonda_ctrl_enable_te(struct omap_display *display, bool enable) -+{ -+ u8 buf[2]; -+ -+ if (enable) { -+ buf[0] = DCS_SET_TEAR_ON; -+ buf[1] = 0; /* only vertical sync */ -+ dsi_vc_dcs_write(ZCH, buf, 2); -+ } else { -+ buf[0] = DCS_SET_TEAR_OFF; -+ dsi_vc_dcs_write(ZCH, buf, 1); -+ } -+ -+ return 0; -+} -+ -+static int zonda_run_test(struct omap_display *display, int test_num) -+{ -+ u8 id1, id2, id3; -+ int r; -+ -+ r = zonda_dcs_read_1(DCS_GET_ID1, &id1); -+ if (r) -+ return r; -+ r = zonda_dcs_read_1(DCS_GET_ID2, &id2); -+ if (r) -+ return r; -+ r = zonda_dcs_read_1(DCS_GET_ID3, &id3); -+ if (r) -+ return r; -+ -+ if (id1 != 41 || id2 != 129 || id3 != 1) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static struct omap_ctrl zonda_ctrl = { -+ .owner = THIS_MODULE, -+ .name = "ctrl-zonda", -+ .init = zonda_ctrl_init, -+ .enable = zonda_ctrl_enable, -+ .disable = zonda_ctrl_disable, -+ .setup_update = zonda_ctrl_setup_update, -+ .enable_te = zonda_ctrl_enable_te, -+ .set_rotate = zonda_ctrl_rotate, -+ .get_rotate = zonda_ctrl_get_rotate, -+ .set_mirror = zonda_ctrl_mirror, -+ .get_mirror = zonda_ctrl_get_mirror, -+ .run_test = zonda_run_test, -+ .pixel_size = 24, -+}; -+ -+ -+static int __init zonda_init(void) -+{ -+ DBG("zonda_init\n"); -+ omap_dss_register_ctrl(&zonda_ctrl); -+ return 0; -+} -+ -+static void __exit zonda_exit(void) -+{ -+ DBG("zonda_exit\n"); -+ -+ omap_dss_unregister_ctrl(&zonda_ctrl); -+} -+ -+module_init(zonda_init); -+module_exit(zonda_exit); -+ -+MODULE_AUTHOR("Tomi Valkeinen "); -+MODULE_DESCRIPTION("Zonda Driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/Kconfig linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/Kconfig ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/Kconfig 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,45 @@ -+menu "OMAP2/3 Display Device Drivers" -+ depends on OMAP2_DSS -+ -+config PANEL_NEVADA -+ tristate "Nevada Panel" -+ help -+ Nevada LCD -+ -+config CTRL_ZONDA -+ tristate "Zonda ctrl" -+ depends on OMAP2_DSS_DSI -+ help -+ Zonda Ctrl -+ -+config PANEL_TAAL -+ tristate "Taal Panel" -+ depends on OMAP2_DSS_DSI -+ help -+ Taal panel with integrated controller -+ -+config PANEL_ACX565AKM -+ tristate "ACX565AKM LCD Panel" -+ depends on OMAP2_DSS_SDI -+ select BACKLIGHT_CLASS_DEVICE -+ help -+ LCD Panel used in Rover -+ -+config PANEL_GENERIC -+ tristate "Generic Panel" -+ help -+ Generic panel driver. -+ Used for DVI output for Beagle and OMAP3 SDP. -+ -+config PANEL_SAMSUNG_LTE430WQ_F0C -+ tristate "Samsung LTE430WQ-F0C LCD Panel" -+ depends on OMAP2_DSS -+ help -+ LCD Panel used on Overo Palo43 -+ -+config PANEL_SHARP_LS037V7DW01 -+ tristate "Sharp LS037V7DW01 LCD Panel" -+ depends on OMAP2_DSS -+ help -+ LCD Panel used in TI's SDP3430 and EVM boards -+endmenu -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/Makefile linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/Makefile ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/Makefile 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,9 @@ -+obj-$(CONFIG_PANEL_NEVADA) += panel-nevada.o -+obj-$(CONFIG_CTRL_ZONDA) += ctrl-zonda.o -+ -+obj-$(CONFIG_PANEL_TAAL) += panel-taal.o -+obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o -+ -+obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o -+obj-$(CONFIG_PANEL_SAMSUNG_LTE430WQ_F0C) += panel-samsung-lte430wq-f0c.o -+obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-acx565akm.c linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-acx565akm.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-acx565akm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-acx565akm.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,737 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "panel-acx565akm.h" -+ -+#define MIPID_CMD_READ_DISP_ID 0x04 -+#define MIPID_CMD_READ_RED 0x06 -+#define MIPID_CMD_READ_GREEN 0x07 -+#define MIPID_CMD_READ_BLUE 0x08 -+#define MIPID_CMD_READ_DISP_STATUS 0x09 -+#define MIPID_CMD_RDDSDR 0x0F -+#define MIPID_CMD_SLEEP_IN 0x10 -+#define MIPID_CMD_SLEEP_OUT 0x11 -+#define MIPID_CMD_DISP_OFF 0x28 -+#define MIPID_CMD_DISP_ON 0x29 -+#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51 -+#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52 -+#define MIPID_CMD_WRITE_CTRL_DISP 0x53 -+ -+#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5) -+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4) -+#define CTRL_DISP_BACKLIGHT_ON (1 << 2) -+#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1) -+ -+#define MIPID_CMD_READ_CTRL_DISP 0x54 -+#define MIPID_CMD_WRITE_CABC 0x55 -+#define MIPID_CMD_READ_CABC 0x56 -+ -+#define MIPID_VER_LPH8923 3 -+#define MIPID_VER_LS041Y3 4 -+#define MIPID_VER_L4F00311 8 -+#define MIPID_VER_ACX565AKM 9 -+ -+struct acx565akm_device { -+ struct backlight_device *bl_dev; -+ int enabled; -+ int model; -+ int revision; -+ u8 display_id[3]; -+ unsigned has_bc:1; -+ unsigned has_cabc:1; -+ unsigned cabc_mode; -+ unsigned long hw_guard_end; /* next value of jiffies -+ when we can issue the -+ next sleep in/out command */ -+ unsigned long hw_guard_wait; /* max guard time in jiffies */ -+ -+ struct spi_device *spi; -+ struct mutex mutex; -+ struct omap_panel panel; -+ struct omap_display *display; -+}; -+ -+static int acx565akm_bl_update_status(struct backlight_device *dev); -+ -+static void acx565akm_transfer(struct acx565akm_device *md, int cmd, -+ const u8 *wbuf, int wlen, u8 *rbuf, int rlen) -+{ -+ struct spi_message m; -+ struct spi_transfer *x, xfer[5]; -+ int r; -+ -+ BUG_ON(md->spi == NULL); -+ -+ spi_message_init(&m); -+ -+ memset(xfer, 0, sizeof(xfer)); -+ x = &xfer[0]; -+ -+ cmd &= 0xff; -+ x->tx_buf = &cmd; -+ x->bits_per_word= 9; -+ x->len = 2; -+ -+ if (rlen > 1 && wlen == 0) { -+ /* -+ * Between the command and the response data there is a -+ * dummy clock cycle. Add an extra bit after the command -+ * word to account for this. -+ */ -+ x->bits_per_word = 10; -+ cmd <<= 1; -+ } -+ spi_message_add_tail(x, &m); -+ -+ if (wlen) { -+ x++; -+ x->tx_buf = wbuf; -+ x->len = wlen; -+ x->bits_per_word= 9; -+ spi_message_add_tail(x, &m); -+ } -+ -+ if (rlen) { -+ x++; -+ x->rx_buf = rbuf; -+ x->len = rlen; -+ spi_message_add_tail(x, &m); -+ } -+ -+ r = spi_sync(md->spi, &m); -+ if (r < 0) -+ dev_dbg(&md->spi->dev, "spi_sync %d\n", r); -+} -+ -+static inline void acx565akm_cmd(struct acx565akm_device *md, int cmd) -+{ -+ acx565akm_transfer(md, cmd, NULL, 0, NULL, 0); -+} -+ -+static inline void acx565akm_write(struct acx565akm_device *md, -+ int reg, const u8 *buf, int len) -+{ -+ acx565akm_transfer(md, reg, buf, len, NULL, 0); -+} -+ -+static inline void acx565akm_read(struct acx565akm_device *md, -+ int reg, u8 *buf, int len) -+{ -+ acx565akm_transfer(md, reg, NULL, 0, buf, len); -+} -+ -+static void hw_guard_start(struct acx565akm_device *md, int guard_msec) -+{ -+ md->hw_guard_wait = msecs_to_jiffies(guard_msec); -+ md->hw_guard_end = jiffies + md->hw_guard_wait; -+} -+ -+static void hw_guard_wait(struct acx565akm_device *md) -+{ -+ unsigned long wait = md->hw_guard_end - jiffies; -+ -+ if ((long)wait > 0 && wait <= md->hw_guard_wait) { -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ schedule_timeout(wait); -+ } -+} -+ -+static void set_sleep_mode(struct acx565akm_device *md, int on) -+{ -+ int cmd; -+ -+ if (on) -+ cmd = MIPID_CMD_SLEEP_IN; -+ else -+ cmd = MIPID_CMD_SLEEP_OUT; -+ /* -+ * We have to keep 120msec between sleep in/out commands. -+ * (8.2.15, 8.2.16). -+ */ -+ hw_guard_wait(md); -+ acx565akm_cmd(md, cmd); -+ hw_guard_start(md, 120); -+} -+ -+static void set_display_state(struct acx565akm_device *md, int enabled) -+{ -+ int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF; -+ -+ acx565akm_cmd(md, cmd); -+} -+ -+static int panel_enabled(struct acx565akm_device *md) -+{ -+ u32 disp_status; -+ int enabled; -+ -+ acx565akm_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4); -+ disp_status = __be32_to_cpu(disp_status); -+ enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10)); -+ dev_dbg(&md->spi->dev, -+ "LCD panel %senabled by bootloader (status 0x%04x)\n", -+ enabled ? "" : "not ", disp_status); -+ return enabled; -+} -+ -+static void enable_backlight_ctrl(struct acx565akm_device *md, int enable) -+{ -+ u16 ctrl; -+ -+ acx565akm_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1); -+ if (enable) { -+ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON | -+ CTRL_DISP_BACKLIGHT_ON; -+ } else { -+ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON | -+ CTRL_DISP_BACKLIGHT_ON); -+ } -+ -+ ctrl |= 1 << 8; -+ acx565akm_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2); -+} -+ -+static void set_cabc_mode(struct acx565akm_device *md, unsigned mode) -+{ -+ u16 cabc_ctrl; -+ -+ md->cabc_mode = mode; -+ if (!md->enabled) -+ return; -+ cabc_ctrl = 0; -+ acx565akm_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1); -+ cabc_ctrl &= ~3; -+ cabc_ctrl |= (1 << 8) | (mode & 3); -+ acx565akm_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2); -+} -+ -+static unsigned get_cabc_mode(struct acx565akm_device *md) -+{ -+ return md->cabc_mode; -+} -+ -+static unsigned get_hw_cabc_mode(struct acx565akm_device *md) -+{ -+ u8 cabc_ctrl; -+ -+ acx565akm_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1); -+ return cabc_ctrl & 3; -+} -+ -+static int panel_detect(struct acx565akm_device *md) -+{ -+ acx565akm_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3); -+ dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n", -+ md->display_id[0], md->display_id[1], md->display_id[2]); -+ -+ switch (md->display_id[0]) { -+ case 0x10: -+ md->model = MIPID_VER_ACX565AKM; -+ md->panel.name = "acx565akm"; -+ md->has_bc = 1; -+ md->has_cabc = 1; -+ break; -+ case 0x29: -+ md->model = MIPID_VER_L4F00311; -+ md->panel.name = "l4f00311"; -+ break; -+ case 0x45: -+ md->model = MIPID_VER_LPH8923; -+ md->panel.name = "lph8923"; -+ break; -+ case 0x83: -+ md->model = MIPID_VER_LS041Y3; -+ md->panel.name = "ls041y3"; -+ break; -+ default: -+ md->panel.name = "unknown"; -+ dev_err(&md->spi->dev, "invalid display ID\n"); -+ return -ENODEV; -+ } -+ -+ md->revision = md->display_id[1]; -+ -+ pr_info("omapfb: %s rev %02x LCD detected\n", -+ md->panel.name, md->revision); -+ -+ return 0; -+} -+ -+static int acx565akm_panel_enable(struct omap_display *display) -+{ -+ struct acx565akm_device *md = -+ (struct acx565akm_device*)display->panel->priv; -+ -+ dev_dbg(&md->spi->dev, "%s\n", __func__); -+ -+ mutex_lock(&md->mutex); -+ -+ if (display->hw_config.panel_enable != NULL) -+ display->hw_config.panel_enable(display); -+ -+ if (md->enabled) { -+ dev_dbg(&md->spi->dev, "panel already enabled\n"); -+ mutex_unlock(&md->mutex); -+ return 0; -+ } -+ -+ /* -+ * We have to meet all the following delay requirements: -+ * 1. tRW: reset pulse width 10usec (7.12.1) -+ * 2. tRT: reset cancel time 5msec (7.12.1) -+ * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst -+ * case (7.6.2) -+ * 4. 120msec before the sleep out command (7.12.1) -+ */ -+ msleep(120); -+ -+ set_sleep_mode(md, 0); -+ md->enabled = 1; -+ -+ /* 5msec between sleep out and the next command. (8.2.16) */ -+ msleep(5); -+ set_display_state(md, 1); -+ set_cabc_mode(md, md->cabc_mode); -+ -+ mutex_unlock(&md->mutex); -+ -+ return acx565akm_bl_update_status(md->bl_dev); -+} -+ -+static void acx565akm_panel_disable(struct omap_display *display) -+{ -+ struct acx565akm_device *md = -+ (struct acx565akm_device*)display->panel->priv; -+ -+ dev_dbg(&md->spi->dev, "%s\n", __func__); -+ -+ mutex_lock(&md->mutex); -+ -+ if (!md->enabled) { -+ mutex_unlock(&md->mutex); -+ return; -+ } -+ set_display_state(md, 0); -+ set_sleep_mode(md, 1); -+ md->enabled = 0; -+ /* -+ * We have to provide PCLK,HS,VS signals for 2 frames (worst case -+ * ~50msec) after sending the sleep in command and asserting the -+ * reset signal. We probably could assert the reset w/o the delay -+ * but we still delay to avoid possible artifacts. (7.6.1) -+ */ -+ msleep(50); -+ -+ if (display->hw_config.panel_disable != NULL) -+ display->hw_config.panel_disable(display); -+ -+ mutex_unlock(&md->mutex); -+} -+ -+#if 0 -+static void acx565akm_set_mode(struct omap_display *display, -+ int x_res, int y_res, int bpp) -+{ -+ struct acx565akm_device *md = -+ (struct acx565akm_device*)display->panel->priv; -+ u16 par; -+ -+ switch (bpp) { -+ case 16: -+ par = 0x150; -+ break; -+ case 18: -+ par = 0x160; -+ break; -+ case 24: -+ par = 0x170; -+ break; -+ } -+ -+ acx565akm_write(md, 0x3a, (u8 *)&par, 2); -+} -+#endif -+ -+static int acx565akm_panel_suspend(struct omap_display *display) -+{ -+ acx565akm_panel_disable(display); -+ return 0; -+} -+ -+static int acx565akm_panel_resume(struct omap_display *display) -+{ -+ return acx565akm_panel_enable(display); -+} -+ -+static void acx565akm_set_brightness(struct acx565akm_device *md, int level) -+{ -+ int bv; -+ -+ bv = level | (1 << 8); -+ acx565akm_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2); -+ -+ if (level) -+ enable_backlight_ctrl(md, 1); -+ else -+ enable_backlight_ctrl(md, 0); -+} -+ -+static int acx565akm_get_actual_brightness(struct acx565akm_device *md) -+{ -+ u8 bv; -+ -+ acx565akm_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1); -+ -+ return bv; -+} -+ -+static int acx565akm_bl_update_status(struct backlight_device *dev) -+{ -+ struct acx565akm_device *md = dev_get_drvdata(&dev->dev); -+ struct omap_display *display = md->display; -+ int r; -+ int level; -+ -+ dev_dbg(&md->spi->dev, "%s\n", __func__); -+ -+ mutex_lock(&md->mutex); -+ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) -+ level = dev->props.brightness; -+ else -+ level = 0; -+ -+ r = 0; -+ if (md->has_bc) -+ acx565akm_set_brightness(md, level); -+ else if (display->hw_config.set_backlight != NULL) -+ r = display->hw_config.set_backlight(display, level); -+ else -+ r = -ENODEV; -+ -+ mutex_unlock(&md->mutex); -+ -+ return r; -+} -+ -+static int acx565akm_bl_get_intensity(struct backlight_device *dev) -+{ -+ struct acx565akm_device *md = dev_get_drvdata(&dev->dev); -+ struct omap_display *display = md->display; -+ -+ dev_dbg(&dev->dev, "%s\n", __func__); -+ -+ if (!md->has_bc && display->hw_config.set_backlight == NULL) -+ return -ENODEV; -+ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) { -+ if (md->has_bc) -+ return acx565akm_get_actual_brightness(md); -+ else -+ return dev->props.brightness; -+ } -+ -+ return 0; -+} -+ -+static struct backlight_ops acx565akm_bl_ops = { -+ .get_brightness = acx565akm_bl_get_intensity, -+ .update_status = acx565akm_bl_update_status, -+}; -+ -+static const char *cabc_modes[] = { -+ "off", /* used also always when CABC is not supported */ -+ "ui", -+ "still-image", -+ "moving-image", -+}; -+ -+static ssize_t show_cabc_mode(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct acx565akm_device *md = dev_get_drvdata(dev); -+ const char *mode_str; -+ int mode; -+ int len; -+ -+ if (!md->has_cabc) -+ mode = 0; -+ else -+ mode = get_cabc_mode(md); -+ mode_str = "unknown"; -+ if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) -+ mode_str = cabc_modes[mode]; -+ len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str); -+ -+ return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1; -+} -+ -+static ssize_t store_cabc_mode(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct acx565akm_device *md = dev_get_drvdata(dev); -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { -+ const char *mode_str = cabc_modes[i]; -+ int cmp_len = strlen(mode_str); -+ -+ if (count > 0 && buf[count - 1] == '\n') -+ count--; -+ if (count != cmp_len) -+ continue; -+ -+ if (strncmp(buf, mode_str, cmp_len) == 0) -+ break; -+ } -+ -+ if (i == ARRAY_SIZE(cabc_modes)) -+ return -EINVAL; -+ -+ if (!md->has_cabc && i != 0) -+ return -EINVAL; -+ -+ mutex_lock(&md->mutex); -+ set_cabc_mode(md, i); -+ mutex_unlock(&md->mutex); -+ -+ return count; -+} -+ -+static ssize_t show_cabc_available_modes(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct acx565akm_device *md = dev_get_drvdata(dev); -+ int len; -+ int i; -+ -+ if (!md->has_cabc) -+ return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]); -+ -+ for (i = 0, len = 0; -+ len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) -+ len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s", -+ i ? " " : "", cabc_modes[i], -+ i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); -+ -+ return len < PAGE_SIZE ? len : PAGE_SIZE - 1; -+} -+ -+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, -+ show_cabc_mode, store_cabc_mode); -+static DEVICE_ATTR(cabc_available_modes, S_IRUGO, -+ show_cabc_available_modes, NULL); -+ -+static struct attribute *bldev_attrs[] = { -+ &dev_attr_cabc_mode.attr, -+ &dev_attr_cabc_available_modes.attr, -+ NULL, -+}; -+ -+static struct attribute_group bldev_attr_group = { -+ .attrs = bldev_attrs, -+}; -+ -+static int acx565akm_panel_init(struct omap_display *display) -+{ -+ struct omap_panel *panel = display->panel; -+ struct acx565akm_panel_data *panel_data = display->hw_config.panel_data; -+ struct acx565akm_device *md = (struct acx565akm_device*)panel->priv; -+ -+ struct backlight_device *bldev; -+ int brightness; -+ int max_brightness; -+ int r; -+ -+ dev_dbg(&md->spi->dev, "%s\n", __func__); -+ -+ if (!panel_data) { -+ dev_err(&md->spi->dev, "no panel data\n"); -+ return -ENODEV; -+ } -+ -+ mutex_init(&md->mutex); -+ md->display = display; -+ -+ if (display->hw_config.panel_enable != NULL) -+ display->hw_config.panel_enable(display); -+ /* -+ * After reset we have to wait 5 msec before the first -+ * command can be sent. -+ */ -+ msleep(5); -+ -+ md->enabled = panel_enabled(md); -+ -+ r = panel_detect(md); -+ if (r) { -+ if (!md->enabled && display->hw_config.panel_disable != NULL) -+ display->hw_config.panel_disable(display); -+ mutex_unlock(&md->mutex); -+ return r; -+ } -+ -+ if (!panel_data->bc_connected) { -+ md->has_bc = 0; -+ md->has_cabc = 0; -+ } -+ -+#if 0 -+ acx565akm_set_mode(display, panel->timings.x_res, panel->timings.y_res, -+ panel->bpp); -+#endif -+ -+ if (!md->enabled) -+ display->hw_config.panel_disable(display); -+ -+ bldev = backlight_device_register("acx565akm", &md->spi->dev, -+ md, &acx565akm_bl_ops); -+ md->bl_dev = bldev; -+ -+ if (md->has_cabc) { -+ r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group); -+ if (r) { -+ dev_err(&bldev->dev, "failed to create sysfs files\n"); -+ backlight_device_unregister(bldev); -+ return r; -+ } -+ md->cabc_mode = get_hw_cabc_mode(md); -+ } -+ -+ bldev->props.fb_blank = FB_BLANK_UNBLANK; -+ bldev->props.power = FB_BLANK_UNBLANK; -+ -+ if (md->has_bc) -+ max_brightness = 255; -+ else -+ max_brightness = display->hw_config.max_backlight_level; -+ -+ if (md->has_bc) -+ brightness = acx565akm_get_actual_brightness(md); -+ else if (display->hw_config.get_backlight != NULL) -+ brightness = display->hw_config.get_backlight(display); -+ else -+ brightness = 0; -+ -+ bldev->props.max_brightness = max_brightness; -+ bldev->props.brightness = brightness; -+ acx565akm_bl_update_status(bldev); -+ -+ return 0; -+} -+ -+static struct omap_panel acx565akm_panel = { -+ .name = "panel-acx565akm", -+ .init = acx565akm_panel_init, -+ .suspend = acx565akm_panel_suspend, -+ .resume = acx565akm_panel_resume, -+ .enable = acx565akm_panel_enable, -+ .disable = acx565akm_panel_disable, -+ -+ .timings = { -+ .x_res = 800, -+ .y_res = 480, -+ -+ .pixel_clock = 24000, -+ -+ .hsw = 4, -+ .hfp = 28, -+ .hbp = 24, -+ -+ .vsw = 3, -+ .vfp = 3, -+ .vbp = 4, -+ }, -+ -+ .config = OMAP_DSS_LCD_TFT | -+ OMAP_DSS_LCD_IVS | -+ OMAP_DSS_LCD_IHS, -+ -+ .recommended_bpp = 16, -+ -+ /* -+ * supported modes: 12bpp(444), 16bpp(565), 18bpp(666), 24bpp(888) -+ * resolutions. -+ */ -+}; -+ -+static int acx565akm_spi_probe(struct spi_device *spi) -+{ -+ struct acx565akm_device *md; -+ -+ dev_dbg(&spi->dev, "%s\n", __func__); -+ -+ md = kzalloc(sizeof(*md), GFP_KERNEL); -+ if (md == NULL) { -+ dev_err(&spi->dev, "out of memory\n"); -+ return -ENOMEM; -+ } -+ -+ spi->mode = SPI_MODE_3; -+ md->spi = spi; -+ dev_set_drvdata(&spi->dev, md); -+ md->panel = acx565akm_panel; -+ acx565akm_panel.priv = md; -+ -+ omap_dss_register_panel(&acx565akm_panel); -+ -+ return 0; -+} -+ -+static int acx565akm_spi_remove(struct spi_device *spi) -+{ -+ struct acx565akm_device *md = dev_get_drvdata(&spi->dev); -+ -+ dev_dbg(&md->spi->dev, "%s\n", __func__); -+ -+ sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group); -+ backlight_device_unregister(md->bl_dev); -+ omap_dss_unregister_panel(&acx565akm_panel); -+ -+ kfree(md); -+ -+ return 0; -+} -+ -+static struct spi_driver acx565akm_spi_driver = { -+ .driver = { -+ .name = "acx565akm", -+ .bus = &spi_bus_type, -+ .owner = THIS_MODULE, -+ }, -+ .probe = acx565akm_spi_probe, -+ .remove = __devexit_p(acx565akm_spi_remove), -+}; -+ -+static int __init acx565akm_init(void) -+{ -+ return spi_register_driver(&acx565akm_spi_driver); -+} -+ -+static void __exit acx565akm_exit(void) -+{ -+ spi_unregister_driver(&acx565akm_spi_driver); -+} -+ -+module_init(acx565akm_init); -+module_exit(acx565akm_exit); -+ -+MODULE_AUTHOR("Tomi Valkeinen "); -+MODULE_DESCRIPTION("acx565akm LCD Driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-acx565akm.h linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-acx565akm.h ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-acx565akm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-acx565akm.h 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,9 @@ -+#ifndef __DRIVERS_VIDEO_OMAP2_DISPLAYS_PANEL_ACX565AKM_H -+#define __DRIVERS_VIDEO_OMAP2_DISPLAYS_PANEL_ACX565AKM_H -+ -+struct acx565akm_panel_data { -+ unsigned bc_connected : 1; -+}; -+ -+#endif -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-generic.c linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-generic.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-generic.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-generic.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,96 @@ -+/* -+ * Generic panel support -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+ -+#include -+ -+static int generic_panel_init(struct omap_display *display) -+{ -+ return 0; -+} -+ -+static int generic_panel_enable(struct omap_display *display) -+{ -+ int r = 0; -+ -+ if (display->hw_config.panel_enable) -+ r = display->hw_config.panel_enable(display); -+ -+ return r; -+} -+ -+static void generic_panel_disable(struct omap_display *display) -+{ -+ if (display->hw_config.panel_disable) -+ display->hw_config.panel_disable(display); -+} -+ -+static int generic_panel_suspend(struct omap_display *display) -+{ -+ generic_panel_disable(display); -+ return 0; -+} -+ -+static int generic_panel_resume(struct omap_display *display) -+{ -+ return generic_panel_enable(display); -+} -+ -+static struct omap_panel generic_panel = { -+ .owner = THIS_MODULE, -+ .name = "panel-generic", -+ .init = generic_panel_init, -+ .enable = generic_panel_enable, -+ .disable = generic_panel_disable, -+ .suspend = generic_panel_suspend, -+ .resume = generic_panel_resume, -+ -+ .timings = { -+ /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */ -+ .x_res = 640, -+ .y_res = 480, -+ .pixel_clock = 23500, -+ .hfp = 48, -+ .hsw = 32, -+ .hbp = 80, -+ .vfp = 3, -+ .vsw = 4, -+ .vbp = 7, -+ }, -+ -+ .config = OMAP_DSS_LCD_TFT, -+}; -+ -+ -+static int __init generic_panel_drv_init(void) -+{ -+ omap_dss_register_panel(&generic_panel); -+ return 0; -+} -+ -+static void __exit generic_panel_drv_exit(void) -+{ -+ omap_dss_unregister_panel(&generic_panel); -+} -+ -+module_init(generic_panel_drv_init); -+module_exit(generic_panel_drv_exit); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-nevada.c linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-nevada.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-nevada.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-nevada.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,558 @@ -+/* -+ * DMIF-S99AL-V225 -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#define MIPID_CMD_READ_DISP_ID 0x04 -+#define MIPID_CMD_READ_RED 0x06 -+#define MIPID_CMD_READ_GREEN 0x07 -+#define MIPID_CMD_READ_BLUE 0x08 -+#define MIPID_CMD_READ_DISP_STATUS 0x09 -+#define MIPID_CMD_RDDSDR 0x0F -+#define MIPID_CMD_SLEEP_IN 0x10 -+#define MIPID_CMD_SLEEP_OUT 0x11 -+#define MIPID_CMD_DISP_OFF 0x28 -+#define MIPID_CMD_DISP_ON 0x29 -+ -+#define MIPID_VER_LPH8923 3 -+#define MIPID_VER_LS041Y3 4 -+#define MIPID_VER_L4F00311 8 -+ -+#ifdef DEBUG -+#define DBG(format, ...) printk(KERN_DEBUG "Nevada: " format, ## __VA_ARGS__) -+#else -+#define DBG(format, ...) -+#endif -+ -+struct nevada_device { -+ struct backlight_device *bl_dev; -+ int enabled; -+ int model; -+ int revision; -+ u8 display_id[3]; -+ unsigned int saved_bklight_level; -+ unsigned long hw_guard_end; /* next value of jiffies -+ when we can issue the -+ next sleep in/out command */ -+ unsigned long hw_guard_wait; /* max guard time in jiffies */ -+ -+ struct spi_device *spi; -+ struct mutex mutex; -+ struct omap_panel panel; -+ struct omap_display *display; -+}; -+ -+ -+static void nevada_transfer(struct nevada_device *md, int cmd, -+ const u8 *wbuf, int wlen, u8 *rbuf, int rlen) -+{ -+ struct spi_message m; -+ struct spi_transfer *x, xfer[4]; -+ u16 w; -+ int r; -+ -+ BUG_ON(md->spi == NULL); -+ -+ spi_message_init(&m); -+ -+ memset(xfer, 0, sizeof(xfer)); -+ x = &xfer[0]; -+ -+ cmd &= 0xff; -+ x->tx_buf = &cmd; -+ x->bits_per_word = 9; -+ x->len = 2; -+ spi_message_add_tail(x, &m); -+ -+ if (wlen) { -+ x++; -+ x->tx_buf = wbuf; -+ x->len = wlen; -+ x->bits_per_word = 9; -+ spi_message_add_tail(x, &m); -+ } -+ -+ if (rlen) { -+ x++; -+ x->rx_buf = &w; -+ x->len = 1; -+ spi_message_add_tail(x, &m); -+ -+ if (rlen > 1) { -+ /* Arrange for the extra clock before the first -+ * data bit. -+ */ -+ x->bits_per_word = 9; -+ x->len = 2; -+ -+ x++; -+ x->rx_buf = &rbuf[1]; -+ x->len = rlen - 1; -+ spi_message_add_tail(x, &m); -+ } -+ } -+ -+ r = spi_sync(md->spi, &m); -+ if (r < 0) -+ dev_dbg(&md->spi->dev, "spi_sync %d\n", r); -+ -+ if (rlen) -+ rbuf[0] = w & 0xff; -+} -+ -+static inline void nevada_cmd(struct nevada_device *md, int cmd) -+{ -+ nevada_transfer(md, cmd, NULL, 0, NULL, 0); -+} -+ -+static inline void nevada_write(struct nevada_device *md, -+ int reg, const u8 *buf, int len) -+{ -+ nevada_transfer(md, reg, buf, len, NULL, 0); -+} -+ -+static inline void nevada_read(struct nevada_device *md, -+ int reg, u8 *buf, int len) -+{ -+ nevada_transfer(md, reg, NULL, 0, buf, len); -+} -+ -+#if 0 -+static void send_init_string(struct nevada_device *md) -+{ -+ u8 initpar1[] = { 0xa1, 0x90, 0x86, 0x00, 0x00, 0x00 }; -+ u8 initpar2[] = { 0xa0, 0x9f, 0x80, 0x8e, 0xae, 0x90, 0x8e, 0 }; -+ u8 initpar3[] = { 0x0c, 0x0c, 0x00, 0x00, 0x0a, 0x0a }; -+ -+ DBG("nevada: sending init string\n"); -+ -+ nevada_write(md, 0xc2, initpar1, sizeof(initpar1)); -+ nevada_write(md, 0xb8, initpar2, sizeof(initpar2)); -+ nevada_write(md, 0xc0, initpar3, sizeof(initpar3)); -+} -+#endif -+ -+static void hw_guard_start(struct nevada_device *md, int guard_msec) -+{ -+ md->hw_guard_wait = msecs_to_jiffies(guard_msec); -+ md->hw_guard_end = jiffies + md->hw_guard_wait; -+} -+ -+static void hw_guard_wait(struct nevada_device *md) -+{ -+ unsigned long wait = md->hw_guard_end - jiffies; -+ -+ if ((long)wait > 0 && wait <= md->hw_guard_wait) { -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ schedule_timeout(wait); -+ } -+} -+ -+static void set_sleep_mode(struct nevada_device *md, int on) -+{ -+ int cmd, sleep_time; -+ -+ if (on) -+ cmd = MIPID_CMD_SLEEP_IN; -+ else -+ cmd = MIPID_CMD_SLEEP_OUT; -+ hw_guard_wait(md); -+ nevada_cmd(md, cmd); -+ hw_guard_start(md, 120); -+ /* -+ * When disabling the -+ * panel we'll sleep for the duration of 2 frames, so that the -+ * controller can still provide the PCLK,HS,VS signals. */ -+ if (on) -+ sleep_time = 50; -+ else -+ sleep_time = 5; -+ msleep(sleep_time); -+} -+ -+static void set_display_state(struct nevada_device *md, int enabled) -+{ -+ int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF; -+ -+ nevada_cmd(md, cmd); -+} -+ -+ -+ -+static int panel_enabled(struct nevada_device *md) -+{ -+ u32 disp_status; -+ int enabled; -+ -+ nevada_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4); -+ disp_status = __be32_to_cpu(disp_status); -+ enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10)); -+ dev_dbg(&md->spi->dev, -+ "LCD panel %s enabled by bootloader (status 0x%04x)\n", -+ enabled ? "" : "not ", disp_status); -+ DBG("status %#08x\n", disp_status); -+ return enabled; -+} -+ -+static void panel_test_dump(struct nevada_device *md) -+{ -+ { -+ u8 r, g, b; -+ nevada_read(md, 0x6, &r, 1); -+ nevada_read(md, 0x7, &g, 1); -+ nevada_read(md, 0x8, &b, 1); -+ DBG("rgb %x,%x,%x\n", r, g, b); -+ -+ } -+ -+ { -+ u32 val; -+ nevada_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&val, 4); -+ val = __be32_to_cpu(val); -+ DBG("status %#08x\n", val); -+ } -+ -+ { -+ u8 val; -+ nevada_read(md, 0x5, (u8 *)&val, 1); -+ DBG("parity errors %#x\n", val); -+ } -+ -+ { -+ u8 val; -+ nevada_read(md, 0xc, (u8 *)&val, 1); -+ DBG("pixformat %#x == 0x71\n", val); -+ } -+ { -+ u8 val; -+ nevada_read(md, 0xe, (u8 *)&val, 1); -+ DBG("signal mode %#x: %s %s %s %s %s\n", val, -+ (val & (1<<5)) ? "HS" : "", -+ (val & (1<<4)) ? "VS" : "", -+ (val & (1<<3)) ? "PC" : "", -+ (val & (1<<2)) ? "DE" : "", -+ (val & (1<<0)) ? "parity error" : "" -+ ); -+ } -+ { -+ u8 val; -+ nevada_read(md, 0xa, (u8 *)&val, 1); -+ DBG("power mode %#x: %s %s %s %s %s\n", val, -+ (val & (1<<7)) ? "Booster" : "", -+ (val & (1<<5)) ? "Partial" : "", -+ (val & (1<<4)) ? "SleepOut" : "SleepIn", -+ (val & (1<<3)) ? "Normal" : "", -+ (val & (1<<2)) ? "DispOn" : "DispOff" -+ ); -+ } -+} -+ -+ -+static int panel_detect(struct nevada_device *md) -+{ -+ nevada_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3); -+ dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n", -+ md->display_id[0], md->display_id[1], md->display_id[2]); -+ -+ DBG("MIPI display ID: %02x%02x%02x\n", -+ md->display_id[0], md->display_id[1], md->display_id[2]); -+ -+ /* only TX is connected, we can't read from nevada */ -+#if 0 -+ switch (md->display_id[0]) { -+ case 0xe3: -+ md->model = MIPID_VER_L4F00311; -+ md->panel.name = "nevada"; -+ break; -+ default: -+ md->panel.name = "unknown"; -+ dev_err(&md->spi->dev, "invalid display ID\n"); -+ return -ENODEV; -+ } -+#else -+ md->model = MIPID_VER_L4F00311; -+ md->panel.name = "nevada"; -+#endif -+ -+ md->revision = md->display_id[1]; -+ -+ pr_info("omapfb: %s rev %02x LCD detected\n", -+ md->panel.name, md->revision); -+ -+ return 0; -+} -+ -+ -+ -+static int nevada_panel_enable(struct omap_display *display) -+{ -+ int r; -+ struct nevada_device *md = -+ (struct nevada_device *)display->panel->priv; -+ -+ DBG("nevada_panel_enable\n"); -+ -+ mutex_lock(&md->mutex); -+ -+ if (display->hw_config.panel_enable) -+ display->hw_config.panel_enable(display); -+ -+ r = panel_detect(md); -+ if (r) { -+ mutex_unlock(&md->mutex); -+ return r; -+ } -+ -+ md->enabled = panel_enabled(md); -+ -+ if (md->enabled) { -+ DBG("panel already enabled\n"); -+ ; /*nevada_esd_start_check(md);*/ -+ } else { -+ ; /*md->saved_bklight_level = nevada_get_bklight_level(panel);*/ -+ } -+ -+ -+ if (md->enabled) { -+ mutex_unlock(&md->mutex); -+ return 0; -+ } -+ -+ /*nevada_cmd(md, 0x1);*/ /* SW reset */ -+ /*msleep(120);*/ -+ -+ /*send_init_string(md);*/ -+ -+ set_sleep_mode(md, 0); -+ md->enabled = 1; -+ -+ /*panel_test_dump(md);*/ -+ -+ /*for(r = 0; r < 500; r++)*/ -+ /*send_init_string(md);*/ -+ -+ set_display_state(md, 1); -+ /*nevada_set_bklight_level(panel, md->saved_bklight_level);*/ -+ -+ panel_test_dump(md); -+ nevada_cmd(md, 0x13); /* normal mode XXX */ -+ -+ /*msleep(500);*/ -+ panel_test_dump(md); -+ -+ mutex_unlock(&md->mutex); -+ return 0; -+} -+ -+static void nevada_panel_disable(struct omap_display *display) -+{ -+ struct nevada_device *md = -+ (struct nevada_device *)display->panel->priv; -+ -+ DBG("nevada_panel_disable\n"); -+ -+ mutex_lock(&md->mutex); -+ -+ if (!md->enabled) { -+ mutex_unlock(&md->mutex); -+ return; -+ } -+ /*md->saved_bklight_level = nevada_get_bklight_level(panel);*/ -+ /*nevada_set_bklight_level(panel, 0);*/ -+ -+ if (display->hw_config.set_backlight) -+ display->hw_config.set_backlight(display, 0); -+ -+ set_display_state(md, 0); -+ set_sleep_mode(md, 1); -+ md->enabled = 0; -+ -+ -+ if (display->hw_config.panel_disable) -+ display->hw_config.panel_disable(display); -+ -+ mutex_unlock(&md->mutex); -+} -+ -+static int nevada_bl_update_status(struct backlight_device *dev) -+{ -+ struct nevada_device *md = dev_get_drvdata(&dev->dev); -+ struct omap_display *display = md->display; -+ int r; -+ int level; -+ -+ if (!display->hw_config.set_backlight) -+ return -EINVAL; -+ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) -+ level = dev->props.brightness; -+ else -+ level = 0; -+ -+ r = display->hw_config.set_backlight(display, level); -+ if (r) -+ return r; -+ -+ return 0; -+} -+ -+static int nevada_bl_get_intensity(struct backlight_device *dev) -+{ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) -+ return dev->props.brightness; -+ -+ return 0; -+} -+ -+static struct backlight_ops omapbl_ops = { -+ .get_brightness = nevada_bl_get_intensity, -+ .update_status = nevada_bl_update_status, -+}; -+ -+static int nevada_panel_init(struct omap_display *display) -+{ -+ struct nevada_device *md = -+ (struct nevada_device *)display->panel->priv; -+ struct backlight_device *bldev; -+ -+ DBG("nevada_panel_init\n"); -+ -+ mutex_init(&md->mutex); -+ md->display = display; -+ -+ bldev = backlight_device_register("nevada", &md->spi->dev, -+ md, &omapbl_ops); -+ md->bl_dev = bldev; -+ -+ bldev->props.fb_blank = FB_BLANK_UNBLANK; -+ bldev->props.power = FB_BLANK_UNBLANK; -+ bldev->props.max_brightness = 127; -+ bldev->props.brightness = 127; -+ -+ nevada_bl_update_status(bldev); -+ -+ return 0; -+} -+ -+static int nevada_run_test(struct omap_display *display, int test_num) -+{ -+ return 0; -+} -+ -+static struct omap_panel nevada_panel = { -+ .owner = THIS_MODULE, -+ .name = "panel-nevada", -+ .init = nevada_panel_init, -+ /*.remove = nevada_cleanup,*/ -+ .enable = nevada_panel_enable, -+ .disable = nevada_panel_disable, -+ .run_test = nevada_run_test, -+ -+ /* -+ * 640*360 = 230400 pixels -+ * 640*360*60 = 13824000 pixels per second -+ * -+ */ -+ .timings = { -+ .x_res = 640, -+ .y_res = 360, -+ -+ .pixel_clock = ((640+4+4+4) * (360+4+4+4) * 60) / 1000, -+ .hsw = 2, -+ .hfp = 10, -+ .hbp = 20, -+ -+ .vsw = 3, -+ .vfp = 3, -+ .vbp = 3, -+ }, -+ .config = OMAP_DSS_LCD_TFT, -+ -+ /* supported modes: 12bpp(444), 16bpp(565), 18bpp(666), 24bpp(888) -+ * resolutions */ -+}; -+ -+static int nevada_spi_probe(struct spi_device *spi) -+{ -+ struct nevada_device *md; -+ -+ DBG("nevada_spi_probe\n"); -+ -+ md = kzalloc(sizeof(*md), GFP_KERNEL); -+ if (md == NULL) { -+ dev_err(&spi->dev, "out of memory\n"); -+ return -ENOMEM; -+ } -+ -+ spi->mode = SPI_MODE_0; -+ md->spi = spi; -+ dev_set_drvdata(&spi->dev, md); -+ md->panel = nevada_panel; -+ nevada_panel.priv = md; -+ -+ omap_dss_register_panel(&nevada_panel); -+ -+ return 0; -+} -+ -+static int nevada_spi_remove(struct spi_device *spi) -+{ -+ struct nevada_device *md = dev_get_drvdata(&spi->dev); -+ struct backlight_device *dev = md->bl_dev; -+ -+ DBG("nevada_spi_remove\n"); -+ -+ backlight_device_unregister(dev); -+ omap_dss_unregister_panel(&nevada_panel); -+ -+ /*nevada_disable(&md->panel);*/ -+ kfree(md); -+ -+ return 0; -+} -+ -+static struct spi_driver nevada_spi_driver = { -+ .driver = { -+ .name = "nevada", -+ .bus = &spi_bus_type, -+ .owner = THIS_MODULE, -+ }, -+ .probe = nevada_spi_probe, -+ .remove = __devexit_p(nevada_spi_remove), -+}; -+ -+static int __init nevada_init(void) -+{ -+ DBG("nevada_init\n"); -+ return spi_register_driver(&nevada_spi_driver); -+} -+ -+static void __exit nevada_exit(void) -+{ -+ DBG("nevada_exit\n"); -+ spi_unregister_driver(&nevada_spi_driver); -+} -+ -+module_init(nevada_init); -+module_exit(nevada_exit); -+ -+MODULE_AUTHOR("Tomi Valkeinen "); -+MODULE_DESCRIPTION("Caucasus LCD Driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-samsung-lte430wq-f0c.c linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-samsung-lte430wq-f0c.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-samsung-lte430wq-f0c.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-samsung-lte430wq-f0c.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,108 @@ -+/* -+ * LCD panel driver for Samsung LTE430WQ-F0C -+ * -+ * Author: Steve Sakoman -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+ -+#include -+ -+static int samsung_lte_panel_init(struct omap_display *display) -+{ -+ return 0; -+} -+ -+static void samsung_lte_panel_cleanup(struct omap_display *display) -+{ -+} -+ -+static int samsung_lte_panel_enable(struct omap_display *display) -+{ -+ int r = 0; -+ -+ /* wait couple of vsyncs until enabling the LCD */ -+ msleep(50); -+ -+ if (display->hw_config.panel_enable) -+ r = display->hw_config.panel_enable(display); -+ -+ return r; -+} -+ -+static void samsung_lte_panel_disable(struct omap_display *display) -+{ -+ if (display->hw_config.panel_disable) -+ display->hw_config.panel_disable(display); -+ -+ /* wait at least 5 vsyncs after disabling the LCD */ -+ msleep(100); -+} -+ -+static int samsung_lte_panel_suspend(struct omap_display *display) -+{ -+ samsung_lte_panel_disable(display); -+ return 0; -+} -+ -+static int samsung_lte_panel_resume(struct omap_display *display) -+{ -+ return samsung_lte_panel_enable(display); -+} -+ -+static struct omap_panel samsung_lte_panel = { -+ .owner = THIS_MODULE, -+ .name = "samsung-lte430wq-f0c", -+ .init = samsung_lte_panel_init, -+ .cleanup = samsung_lte_panel_cleanup, -+ .enable = samsung_lte_panel_enable, -+ .disable = samsung_lte_panel_disable, -+ .suspend = samsung_lte_panel_suspend, -+ .resume = samsung_lte_panel_resume, -+ -+ .timings = { -+ .x_res = 480, -+ .y_res = 272, -+ -+ .pixel_clock = 9200, -+ -+ .hsw = 41, -+ .hfp = 8, -+ .hbp = 45-41, -+ -+ .vsw = 10, -+ .vfp = 4, -+ .vbp = 12-10, -+ }, -+ -+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IVS, -+}; -+ -+ -+static int __init samsung_lte_panel_drv_init(void) -+{ -+ omap_dss_register_panel(&samsung_lte_panel); -+ return 0; -+} -+ -+static void __exit samsung_lte_panel_drv_exit(void) -+{ -+ omap_dss_unregister_panel(&samsung_lte_panel); -+} -+ -+module_init(samsung_lte_panel_drv_init); -+module_exit(samsung_lte_panel_drv_exit); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,112 @@ -+/* -+ * LCD panel driver for Sharp LS037V7DW01 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+ -+#include -+ -+static int sharp_ls_panel_init(struct omap_display *display) -+{ -+ return 0; -+} -+ -+static void sharp_ls_panel_cleanup(struct omap_display *display) -+{ -+} -+ -+static int sharp_ls_panel_enable(struct omap_display *display) -+{ -+ int r = 0; -+ -+ /* wait couple of vsyncs until enabling the LCD */ -+ msleep(50); -+ -+ if (display->hw_config.panel_enable) -+ r = display->hw_config.panel_enable(display); -+ -+ return r; -+} -+ -+static void sharp_ls_panel_disable(struct omap_display *display) -+{ -+ if (display->hw_config.panel_disable) -+ display->hw_config.panel_disable(display); -+ -+ /* wait at least 5 vsyncs after disabling the LCD */ -+ -+ msleep(100); -+} -+ -+static int sharp_ls_panel_suspend(struct omap_display *display) -+{ -+ sharp_ls_panel_disable(display); -+ return 0; -+} -+ -+static int sharp_ls_panel_resume(struct omap_display *display) -+{ -+ return sharp_ls_panel_enable(display); -+} -+ -+static struct omap_panel sharp_ls_panel = { -+ .owner = THIS_MODULE, -+ .name = "sharp-ls037v7dw01", -+ .init = sharp_ls_panel_init, -+ .cleanup = sharp_ls_panel_cleanup, -+ .enable = sharp_ls_panel_enable, -+ .disable = sharp_ls_panel_disable, -+ .suspend = sharp_ls_panel_suspend, -+ .resume = sharp_ls_panel_resume, -+ -+ .timings = { -+ .x_res = 480, -+ .y_res = 640, -+ -+ .pixel_clock = 19200, -+ -+ .hsw = 2, -+ .hfp = 1, -+ .hbp = 28, -+ -+ .vsw = 1, -+ .vfp = 1, -+ .vbp = 1, -+ }, -+ -+ .acb = 0x28, -+ -+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS, -+}; -+ -+ -+static int __init sharp_ls_panel_drv_init(void) -+{ -+ omap_dss_register_panel(&sharp_ls_panel); -+ return 0; -+} -+ -+static void __exit sharp_ls_panel_drv_exit(void) -+{ -+ omap_dss_unregister_panel(&sharp_ls_panel); -+} -+ -+module_init(sharp_ls_panel_drv_init); -+module_exit(sharp_ls_panel_drv_exit); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-taal.c linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-taal.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/displays/panel-taal.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/displays/panel-taal.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,620 @@ -+/* -+ * Taal -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+/* DSI Virtual channel. Hardcoded for now. */ -+#define TCH 0 -+ -+#define DCS_READ_POWER_MODE 0x0a -+#define DCS_READ_MADCTL 0x0b -+#define DCS_READ_PIXEL_FORMAT 0x0c -+#define DCS_SLEEP_IN 0x10 -+#define DCS_SLEEP_OUT 0x11 -+#define DCS_DISPLAY_OFF 0x28 -+#define DCS_DISPLAY_ON 0x29 -+#define DCS_COLUMN_ADDR 0x2a -+#define DCS_PAGE_ADDR 0x2b -+#define DCS_MEMORY_WRITE 0x2c -+#define DCS_TEAR_OFF 0x34 -+#define DCS_TEAR_ON 0x35 -+#define DCS_MEM_ACC_CTRL 0x36 -+#define DCS_PIXEL_FORMAT 0x3a -+#define DCS_GET_ID1 0xda -+#define DCS_GET_ID2 0xdb -+#define DCS_GET_ID3 0xdc -+ -+#ifdef DEBUG -+#define DBG(format, ...) printk(KERN_DEBUG "Taal: " format, ## __VA_ARGS__) -+#else -+#define DBG(format, ...) -+#endif -+ -+struct taal_data { -+ struct backlight_device *bldev; -+ -+ unsigned long hw_guard_end; /* next value of jiffies when we can -+ * issue the next sleep in/out command -+ */ -+ unsigned long hw_guard_wait; /* max guard time in jiffies */ -+ -+ struct omap_display *display; -+ -+ bool enabled; -+ u8 rotate; -+ bool mirror; -+}; -+ -+static int taal_dcs_read_1(u8 dcs_cmd, u8 *data) -+{ -+ int r; -+ u8 buf[1]; -+ -+ r = dsi_vc_dcs_read(TCH, dcs_cmd, buf, 1); -+ -+ if (r < 0) { -+ printk(KERN_ERR "Taal read error\n"); -+ return r; -+ } -+ -+ *data = buf[0]; -+ -+ return 0; -+} -+ -+static int taal_dcs_write_0(u8 dcs_cmd) -+{ -+ return dsi_vc_dcs_write(TCH, &dcs_cmd, 1); -+} -+ -+static int taal_dcs_write_1(u8 dcs_cmd, u8 param) -+{ -+ u8 buf[2]; -+ buf[0] = dcs_cmd; -+ buf[1] = param; -+ return dsi_vc_dcs_write(TCH, buf, 2); -+} -+ -+static void taal_get_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ *timings = display->panel->timings; -+} -+ -+static void taal_get_resolution(struct omap_display *display, -+ u16 *xres, u16 *yres) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ -+ if (td->rotate == 0 || td->rotate == 2) { -+ *xres = display->panel->timings.x_res; -+ *yres = display->panel->timings.y_res; -+ } else { -+ *yres = display->panel->timings.x_res; -+ *xres = display->panel->timings.y_res; -+ } -+} -+ -+static int taal_ctrl_init(struct omap_display *display) -+{ -+ struct taal_data *td; -+ -+ DBG("taal_ctrl_init\n"); -+ -+ td = kzalloc(sizeof(*td), GFP_KERNEL); -+ if (td == NULL) -+ return -ENOMEM; -+ -+ td->display = display; -+ display->ctrl->priv = td; -+ -+ display->get_timings = taal_get_timings; -+ -+ display->get_resolution = taal_get_resolution; -+ -+ return 0; -+} -+ -+static void taal_ctrl_cleanup(struct omap_display *display) -+{ -+ if (display->ctrl->priv) -+ kfree(display->ctrl->priv); -+} -+ -+static void hw_guard_start(struct taal_data *td, int guard_msec) -+{ -+ td->hw_guard_wait = msecs_to_jiffies(guard_msec); -+ td->hw_guard_end = jiffies + td->hw_guard_wait; -+} -+ -+static void hw_guard_wait(struct taal_data *td) -+{ -+ unsigned long wait = td->hw_guard_end - jiffies; -+ -+ if ((long)wait > 0 && wait <= td->hw_guard_wait) { -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ schedule_timeout(wait); -+ } -+} -+ -+static int taal_sleep_enable(struct omap_display *display, bool enable) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ u8 cmd; -+ int r; -+ -+ hw_guard_wait(td); -+ -+ if (enable) { -+ cmd = DCS_SLEEP_IN; -+ r = dsi_vc_dcs_write_nosync(TCH, &cmd, 1); -+ } else { -+ cmd = DCS_SLEEP_OUT; -+ r = dsi_vc_dcs_write(TCH, &cmd, 1); -+ } -+ -+ if (r) -+ return r; -+ -+ hw_guard_start(td, 120); -+ -+ r = dsi_vc_send_null(TCH); -+ if (r) -+ return r; -+ -+ msleep(5); -+ -+ return 0; -+} -+ -+static int taal_get_id(void) -+{ -+ u8 id1, id2, id3; -+ int r; -+ -+ r = taal_dcs_read_1(DCS_GET_ID1, &id1); -+ if (r) -+ return r; -+ r = taal_dcs_read_1(DCS_GET_ID2, &id2); -+ if (r) -+ return r; -+ r = taal_dcs_read_1(DCS_GET_ID3, &id3); -+ if (r) -+ return r; -+ -+ return 0; -+} -+ -+static int taal_set_addr_mode(u8 rotate, bool mirror) -+{ -+ int r; -+ u8 mode; -+ int b5, b6, b7; -+ -+ r = taal_dcs_read_1(DCS_READ_MADCTL, &mode); -+ if (r) -+ return r; -+ -+ switch (rotate) { -+ default: -+ case 0: -+ b7 = 0; -+ b6 = 0; -+ b5 = 0; -+ break; -+ case 1: -+ b7 = 0; -+ b6 = 1; -+ b5 = 1; -+ break; -+ case 2: -+ b7 = 1; -+ b6 = 1; -+ b5 = 0; -+ break; -+ case 3: -+ b7 = 1; -+ b6 = 0; -+ b5 = 1; -+ break; -+ } -+ -+ if (mirror) -+ b6 = !b6; -+ -+ mode &= ~((1<<7) | (1<<6) | (1<<5)); -+ mode |= (b7 << 7) | (b6 << 6) | (b5 << 5); -+ -+ return taal_dcs_write_1(DCS_MEM_ACC_CTRL, mode); -+} -+ -+static int taal_ctrl_enable(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ int r; -+ -+ DBG("taal_ctrl_enable\n"); -+ -+ if (display->hw_config.ctrl_enable) { -+ r = display->hw_config.ctrl_enable(display); -+ if (r) -+ return r; -+ } -+ -+ /* it seems we have to wait a bit until taal is ready */ -+ msleep(5); -+ -+ r = taal_sleep_enable(display, 0); -+ if (r) -+ return r; -+ -+ r = taal_get_id(); -+ if (r) -+ return r; -+ -+ taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */ -+ -+ taal_set_addr_mode(td->rotate, td->mirror); -+ -+ taal_dcs_write_0(DCS_DISPLAY_ON); -+ -+ td->enabled = 1; -+ -+ return 0; -+} -+ -+static void taal_ctrl_disable(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ -+ taal_dcs_write_0(DCS_DISPLAY_OFF); -+ taal_sleep_enable(display, 1); -+ -+ /* wait a bit so that the message goes through */ -+ msleep(10); -+ -+ if (display->hw_config.ctrl_disable) -+ display->hw_config.ctrl_disable(display); -+ -+ td->enabled = 0; -+} -+ -+static void taal_ctrl_setup_update(struct omap_display *display, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ u16 x1 = x; -+ u16 x2 = x + w - 1; -+ u16 y1 = y; -+ u16 y2 = y + h - 1; -+ -+ u8 buf[5]; -+ buf[0] = DCS_COLUMN_ADDR; -+ buf[1] = (x1 >> 8) & 0xff; -+ buf[2] = (x1 >> 0) & 0xff; -+ buf[3] = (x2 >> 8) & 0xff; -+ buf[4] = (x2 >> 0) & 0xff; -+ -+ dsi_vc_dcs_write(TCH, buf, sizeof(buf)); -+ -+ buf[0] = DCS_PAGE_ADDR; -+ buf[1] = (y1 >> 8) & 0xff; -+ buf[2] = (y1 >> 0) & 0xff; -+ buf[3] = (y2 >> 8) & 0xff; -+ buf[4] = (y2 >> 0) & 0xff; -+ -+ dsi_vc_dcs_write(TCH, buf, sizeof(buf)); -+} -+ -+static int taal_ctrl_enable_te(struct omap_display *display, bool enable) -+{ -+ u8 buf[2]; -+ -+ if (enable) { -+ buf[0] = DCS_TEAR_ON; -+ buf[1] = 0; /* only vertical sync */ -+ dsi_vc_dcs_write(TCH, buf, 2); -+ } else { -+ buf[0] = DCS_TEAR_OFF; -+ dsi_vc_dcs_write(TCH, buf, 1); -+ } -+ -+ return 0; -+} -+ -+static int taal_ctrl_rotate(struct omap_display *display, u8 rotate) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ int r; -+ -+ DBG("taal_ctrl_rotate %d\n", rotate); -+ -+ if (td->enabled) { -+ r = taal_set_addr_mode(rotate, td->mirror); -+ -+ if (r) -+ return r; -+ } -+ -+ td->rotate = rotate; -+ -+ return 0; -+} -+ -+static u8 taal_ctrl_get_rotate(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ return td->rotate; -+} -+ -+static int taal_ctrl_mirror(struct omap_display *display, bool enable) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ int r; -+ -+ DBG("taal_ctrl_mirror %d\n", enable); -+ -+ if (td->enabled) { -+ r = taal_set_addr_mode(td->rotate, enable); -+ -+ if (r) -+ return r; -+ } -+ -+ td->mirror = enable; -+ -+ return 0; -+} -+ -+static bool taal_ctrl_get_mirror(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ return td->mirror; -+} -+ -+static int taal_run_test(struct omap_display *display, int test_num) -+{ -+ u8 id1, id2, id3; -+ int r; -+ -+ r = taal_dcs_read_1(DCS_GET_ID1, &id1); -+ if (r) -+ return r; -+ r = taal_dcs_read_1(DCS_GET_ID2, &id2); -+ if (r) -+ return r; -+ r = taal_dcs_read_1(DCS_GET_ID3, &id3); -+ if (r) -+ return r; -+ -+ return 0; -+} -+ -+static int taal_ctrl_memory_read(struct omap_display *display, -+ void *buf, size_t size, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ int r; -+ int first = 1; -+ int plen; -+ unsigned buf_used = 0; -+ -+ if (size < w * h * 3) -+ return -ENOMEM; -+ -+ size = min(w * h * 3, -+ display->panel->timings.x_res * -+ display->panel->timings.y_res * 3); -+ -+ /* plen 1 or 2 goes into short packet. until checksum error is fixed, use -+ * short packets. plen 32 works, but bigger packets seem to cause an -+ * error. */ -+ if (size % 2) -+ plen = 1; -+ else -+ plen = 2; -+ -+ taal_ctrl_setup_update(display, x, y, w, h); -+ -+ r = dsi_vc_set_max_rx_packet_size(TCH, plen); -+ if (r) -+ return r; -+ -+ while (buf_used < size) { -+ u8 dcs_cmd = first ? 0x2e : 0x3e; -+ first = 0; -+ -+ r = dsi_vc_dcs_read(TCH, dcs_cmd, -+ buf + buf_used, size - buf_used); -+ -+ if (r < 0) { -+ printk(KERN_ERR "Taal read error\n"); -+ goto err; -+ } -+ -+ buf_used += r; -+ -+ if (r < plen) { -+ printk("short read\n"); -+ break; -+ } -+ } -+ -+ r = buf_used; -+ -+err: -+ dsi_vc_set_max_rx_packet_size(TCH, 1); -+ -+ return r; -+} -+ -+static struct omap_ctrl taal_ctrl = { -+ .owner = THIS_MODULE, -+ .name = "ctrl-taal", -+ .init = taal_ctrl_init, -+ .cleanup = taal_ctrl_cleanup, -+ .enable = taal_ctrl_enable, -+ .disable = taal_ctrl_disable, -+ .setup_update = taal_ctrl_setup_update, -+ .enable_te = taal_ctrl_enable_te, -+ .set_rotate = taal_ctrl_rotate, -+ .get_rotate = taal_ctrl_get_rotate, -+ .set_mirror = taal_ctrl_mirror, -+ .get_mirror = taal_ctrl_get_mirror, -+ .run_test = taal_run_test, -+ .memory_read = taal_ctrl_memory_read, -+ .pixel_size = 24, -+}; -+ -+ -+/* PANEL */ -+static int taal_bl_update_status(struct backlight_device *dev) -+{ -+ struct omap_display *display = dev_get_drvdata(&dev->dev); -+ int r; -+ int level; -+ -+ if (!display->hw_config.set_backlight) -+ return -EINVAL; -+ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) -+ level = dev->props.brightness; -+ else -+ level = 0; -+ -+ r = display->hw_config.set_backlight(display, level); -+ if (r) -+ return r; -+ -+ return 0; -+} -+ -+static int taal_bl_get_intensity(struct backlight_device *dev) -+{ -+ if (dev->props.fb_blank == FB_BLANK_UNBLANK && -+ dev->props.power == FB_BLANK_UNBLANK) -+ return dev->props.brightness; -+ -+ return 0; -+} -+ -+static struct backlight_ops taal_bl_ops = { -+ .get_brightness = taal_bl_get_intensity, -+ .update_status = taal_bl_update_status, -+}; -+ -+static int taal_panel_init(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ struct backlight_device *bldev; -+ -+ BUG_ON(display->ctrl->priv == NULL); -+ -+ bldev = backlight_device_register("taal", NULL, display, &taal_bl_ops); -+ td->bldev = bldev; -+ -+ bldev->props.fb_blank = FB_BLANK_UNBLANK; -+ bldev->props.power = FB_BLANK_UNBLANK; -+ bldev->props.max_brightness = 127; -+ bldev->props.brightness = 127; -+ -+ taal_bl_update_status(bldev); -+ -+ return 0; -+} -+ -+static void taal_panel_cleanup(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ struct backlight_device *bldev = td->bldev; -+ -+ bldev->props.power = FB_BLANK_POWERDOWN; -+ taal_bl_update_status(bldev); -+ -+ backlight_device_unregister(bldev); -+} -+ -+static int taal_panel_enable(struct omap_display *display) -+{ -+ return 0; -+} -+ -+static void taal_panel_disable(struct omap_display *display) -+{ -+} -+ -+static int taal_panel_suspend(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ struct backlight_device *bldev = td->bldev; -+ -+ bldev->props.power = FB_BLANK_POWERDOWN; -+ taal_bl_update_status(bldev); -+ -+ return 0; -+} -+ -+static int taal_panel_resume(struct omap_display *display) -+{ -+ struct taal_data *td = (struct taal_data *)display->ctrl->priv; -+ struct backlight_device *bldev = td->bldev; -+ -+ bldev->props.power = FB_BLANK_UNBLANK; -+ taal_bl_update_status(bldev); -+ -+ return 0; -+} -+ -+static struct omap_panel taal_panel = { -+ .owner = THIS_MODULE, -+ .name = "panel-taal", -+ .init = taal_panel_init, -+ .cleanup = taal_panel_cleanup, -+ .enable = taal_panel_enable, -+ .disable = taal_panel_disable, -+ .suspend = taal_panel_suspend, -+ .resume = taal_panel_resume, -+ -+ .config = OMAP_DSS_LCD_TFT, -+ -+ .timings = { -+ .x_res = 864, -+ .y_res = 480, -+ }, -+}; -+ -+static int __init taal_init(void) -+{ -+ DBG("taal_init\n"); -+ -+ omap_dss_register_ctrl(&taal_ctrl); -+ omap_dss_register_panel(&taal_panel); -+ -+ return 0; -+} -+ -+static void __exit taal_exit(void) -+{ -+ DBG("taal_exit\n"); -+ -+ omap_dss_unregister_panel(&taal_panel); -+ omap_dss_unregister_ctrl(&taal_ctrl); -+} -+ -+module_init(taal_init); -+module_exit(taal_exit); -+ -+MODULE_AUTHOR("Tomi Valkeinen "); -+MODULE_DESCRIPTION("Taal Driver"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/core.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/core.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/core.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/core.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,854 @@ -+/* -+ * linux/drivers/video/omap2/dss/core.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "CORE" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "dss.h" -+ -+static struct { -+ struct platform_device *pdev; -+ int ctx_id; -+ -+ struct clk *dss_ick; -+ struct clk *dss1_fck; -+ struct clk *dss2_fck; -+ struct clk *dss_54m_fck; -+ struct clk *dss_96m_fck; -+ unsigned num_clks_enabled; -+ -+ struct delayed_work bus_tput_work; -+ unsigned int bus_tput; -+ -+ bool reset_pending; -+ spinlock_t reset_lock; -+ struct work_struct reset_work; -+ -+ struct mutex dss_lock; -+} core; -+ -+static void dss_clk_enable_all_no_ctx(void); -+static void dss_clk_disable_all_no_ctx(void); -+static void dss_clk_enable_no_ctx(enum dss_clock clks); -+static void dss_clk_disable_no_ctx(enum dss_clock clks); -+ -+static char *def_disp_name; -+module_param_named(def_disp, def_disp_name, charp, 0); -+MODULE_PARM_DESC(def_disp_name, "default display name"); -+ -+#ifdef DEBUG -+unsigned int dss_debug; -+module_param_named(debug, dss_debug, bool, 0644); -+#endif -+ -+/* CONTEXT */ -+static int dss_get_ctx_id(void) -+{ -+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; -+ int r; -+ -+ if (!pdata->get_last_off_on_transaction_id) -+ return 0; -+ r = pdata->get_last_off_on_transaction_id(&core.pdev->dev); -+ if (r < 0) { -+ dev_err(&core.pdev->dev, -+ "getting transaction ID failed, will force context restore\n"); -+ r = -1; -+ } -+ return r; -+} -+ -+int dss_need_ctx_restore(void) -+{ -+ int id = dss_get_ctx_id(); -+ -+ if (id < 0 || id != core.ctx_id) { -+ DSSDBG("ctx id %d -> id %d\n", -+ core.ctx_id, id); -+ core.ctx_id = id; -+ return 1; -+ } else { -+ /* Hack to workaround context loss */ -+ if (dss_check_context()) { -+ DSSERR("unexpected HW context loss, will force context restore (id=%d)\n", -+ id); -+ return 1; -+ } -+ -+ return 0; -+ } -+} -+ -+static void save_all_ctx(void) -+{ -+ DSSDBG("save context\n"); -+ -+ dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ /* Hack to workaround context loss */ -+ if (dss_check_context()) { -+ DSSERR("HW context corrupted, skipping save\n"); -+ goto out; -+ } -+ -+ dss_save_context(); -+ dispc_save_context(); -+#ifdef CONFIG_OMAP2_DSS_DSI -+ dsi_save_context(); -+#endif -+ -+ out: -+ dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1); -+} -+ -+static void restore_all_ctx(void) -+{ -+ DSSDBG("restore context\n"); -+ -+ dss_clk_enable_all_no_ctx(); -+ -+ dss_restore_context(); -+ dispc_restore_context(); -+#ifdef CONFIG_OMAP2_DSS_DSI -+ dsi_restore_context(); -+#endif -+ -+ dss_clk_disable_all_no_ctx(); -+} -+ -+/* CLOCKS */ -+void dss_dump_clocks(struct seq_file *s) -+{ -+ int i; -+ struct clk *clocks[5] = { -+ core.dss_ick, -+ core.dss1_fck, -+ core.dss2_fck, -+ core.dss_54m_fck, -+ core.dss_96m_fck -+ }; -+ -+ seq_printf(s, "- dss -\n"); -+ -+ seq_printf(s, "internal clk count\t%u\n", core.num_clks_enabled); -+ -+ for (i = 0; i < 5; i++) { -+ if (!clocks[i]) -+ continue; -+ seq_printf(s, "%-15s\t%lu\t%d\n", -+ clocks[i]->name, -+ clk_get_rate(clocks[i]), -+ clocks[i]->usecount); -+ } -+} -+ -+static int dss_get_clocks(void) -+{ -+ const struct { -+ struct clk **clock; -+ char *omap2_name; -+ char *omap3_name; -+ } clocks[5] = { -+ { &core.dss_ick, "dss_ick", "dss_ick" }, /* L3 & L4 ick */ -+ { &core.dss1_fck, "dss1_fck", "dss1_alwon_fck" }, -+ { &core.dss2_fck, "dss2_fck", "dss2_alwon_fck" }, -+ { &core.dss_54m_fck, "dss_54m_fck", "dss_tv_fck" }, -+ { &core.dss_96m_fck, NULL, "dss_96m_fck" }, -+ }; -+ -+ int r = 0; -+ int i; -+ const int num_clocks = 5; -+ -+ for (i = 0; i < num_clocks; i++) -+ *clocks[i].clock = NULL; -+ -+ for (i = 0; i < num_clocks; i++) { -+ struct clk *clk; -+ const char *clk_name; -+ -+ clk_name = cpu_is_omap34xx() ? clocks[i].omap3_name -+ : clocks[i].omap2_name; -+ -+ if (!clk_name) -+ continue; -+ -+ clk = clk_get(NULL, clk_name); -+ -+ if (IS_ERR(clk)) { -+ DSSERR("can't get clock %s", clk_name); -+ r = PTR_ERR(clk); -+ goto err; -+ } -+ -+ DSSDBG("clk %s, rate %ld\n", -+ clk_name, clk_get_rate(clk)); -+ -+ *clocks[i].clock = clk; -+ } -+ -+ return 0; -+ -+err: -+ for (i = 0; i < num_clocks; i++) { -+ if (!IS_ERR(*clocks[i].clock)) -+ clk_put(*clocks[i].clock); -+ } -+ -+ return r; -+} -+ -+static void dss_put_clocks(void) -+{ -+ if (core.dss_96m_fck) -+ clk_put(core.dss_96m_fck); -+ clk_put(core.dss_54m_fck); -+ clk_put(core.dss1_fck); -+ clk_put(core.dss2_fck); -+ clk_put(core.dss_ick); -+} -+ -+unsigned long dss_clk_get_rate(enum dss_clock clk) -+{ -+ switch (clk) { -+ case DSS_CLK_ICK: -+ return clk_get_rate(core.dss_ick); -+ case DSS_CLK_FCK1: -+ return clk_get_rate(core.dss1_fck); -+ case DSS_CLK_FCK2: -+ return clk_get_rate(core.dss2_fck); -+ case DSS_CLK_54M: -+ return clk_get_rate(core.dss_54m_fck); -+ case DSS_CLK_96M: -+ return clk_get_rate(core.dss_96m_fck); -+ } -+ -+ BUG(); -+ return 0; -+} -+ -+static unsigned count_clk_bits(enum dss_clock clks) -+{ -+ unsigned num_clks = 0; -+ -+ if (clks & DSS_CLK_ICK) -+ ++num_clks; -+ if (clks & DSS_CLK_FCK1) -+ ++num_clks; -+ if (clks & DSS_CLK_FCK2) -+ ++num_clks; -+ if (clks & DSS_CLK_54M) -+ ++num_clks; -+ if (clks & DSS_CLK_96M) -+ ++num_clks; -+ -+ return num_clks; -+} -+ -+static void dss_clk_enable_no_ctx(enum dss_clock clks) -+{ -+ unsigned num_clks = count_clk_bits(clks); -+ -+ if (clks & DSS_CLK_ICK) -+ clk_enable(core.dss_ick); -+ if (clks & DSS_CLK_FCK1) -+ clk_enable(core.dss1_fck); -+ if (clks & DSS_CLK_FCK2) -+ clk_enable(core.dss2_fck); -+ if (clks & DSS_CLK_54M) -+ clk_enable(core.dss_54m_fck); -+ if (clks & DSS_CLK_96M) -+ clk_enable(core.dss_96m_fck); -+ -+ core.num_clks_enabled += num_clks; -+} -+ -+void dss_clk_enable(enum dss_clock clks) -+{ -+ dss_clk_enable_no_ctx(clks); -+ -+ if (cpu_is_omap34xx() && dss_need_ctx_restore()) -+ restore_all_ctx(); -+} -+ -+static void dss_clk_disable_no_ctx(enum dss_clock clks) -+{ -+ unsigned num_clks = count_clk_bits(clks); -+ -+ if (clks & DSS_CLK_ICK) -+ clk_disable(core.dss_ick); -+ if (clks & DSS_CLK_FCK1) -+ clk_disable(core.dss1_fck); -+ if (clks & DSS_CLK_FCK2) -+ clk_disable(core.dss2_fck); -+ if (clks & DSS_CLK_54M) -+ clk_disable(core.dss_54m_fck); -+ if (clks & DSS_CLK_96M) -+ clk_disable(core.dss_96m_fck); -+ -+ core.num_clks_enabled -= num_clks; -+} -+ -+void dss_clk_disable(enum dss_clock clks) -+{ -+ if (cpu_is_omap34xx()) { -+ unsigned num_clks = count_clk_bits(clks); -+ -+ BUG_ON(core.num_clks_enabled < num_clks); -+ -+ if (core.num_clks_enabled == num_clks) -+ save_all_ctx(); -+ } -+ -+ dss_clk_disable_no_ctx(clks); -+} -+ -+static void dss_clk_enable_all_no_ctx(void) -+{ -+ enum dss_clock clks; -+ -+ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M; -+ if (cpu_is_omap34xx()) -+ clks |= DSS_CLK_96M; -+ dss_clk_enable_no_ctx(clks); -+} -+ -+static void dss_clk_disable_all_no_ctx(void) -+{ -+ enum dss_clock clks; -+ -+ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M; -+ if (cpu_is_omap34xx()) -+ clks |= DSS_CLK_96M; -+ dss_clk_disable_no_ctx(clks); -+} -+ -+static void dss_clk_enable_all(void) -+{ -+ enum dss_clock clks; -+ -+ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M; -+ if (cpu_is_omap34xx()) -+ clks |= DSS_CLK_96M; -+ dss_clk_enable(clks); -+} -+ -+static void dss_clk_disable_all(void) -+{ -+ enum dss_clock clks; -+ -+ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M; -+ if (cpu_is_omap34xx()) -+ clks |= DSS_CLK_96M; -+ dss_clk_disable(clks); -+} -+ -+/* DEBUGFS */ -+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) -+static void dss_debug_dump_clocks(struct seq_file *s) -+{ -+ dss_dump_clocks(s); -+ dispc_dump_clocks(s); -+#ifdef CONFIG_OMAP2_DSS_DSI -+ dsi_dump_clocks(s); -+#endif -+} -+ -+static int dss_debug_show(struct seq_file *s, void *unused) -+{ -+ void (*func)(struct seq_file *) = s->private; -+ func(s); -+ return 0; -+} -+ -+static int dss_debug_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, dss_debug_show, inode->i_private); -+} -+ -+static const struct file_operations dss_debug_fops = { -+ .open = dss_debug_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static struct dentry *dss_debugfs_dir; -+ -+static int dss_initialize_debugfs(void) -+{ -+ dss_debugfs_dir = debugfs_create_dir("omapdss", NULL); -+ if (IS_ERR(dss_debugfs_dir)) { -+ int err = PTR_ERR(dss_debugfs_dir); -+ dss_debugfs_dir = NULL; -+ return err; -+ } -+ -+ debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, -+ &dss_debug_dump_clocks, &dss_debug_fops); -+ -+ debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, -+ &dss_dump_regs, &dss_debug_fops); -+ debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir, -+ &dispc_dump_regs, &dss_debug_fops); -+#ifdef CONFIG_OMAP2_DSS_RFBI -+ debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir, -+ &rfbi_dump_regs, &dss_debug_fops); -+#endif -+#ifdef CONFIG_OMAP2_DSS_DSI -+ debugfs_create_file("dsi", S_IRUGO, dss_debugfs_dir, -+ &dsi_dump_regs, &dss_debug_fops); -+#endif -+#ifdef CONFIG_OMAP2_DSS_VENC -+ debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir, -+ &venc_dump_regs, &dss_debug_fops); -+#endif -+ return 0; -+} -+ -+static void dss_uninitialize_debugfs(void) -+{ -+ if (dss_debugfs_dir) -+ debugfs_remove_recursive(dss_debugfs_dir); -+} -+#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */ -+ -+void omap_dss_lock(void) -+{ -+ mutex_lock(&core.dss_lock); -+} -+EXPORT_SYMBOL(omap_dss_lock); -+ -+void omap_dss_unlock(void) -+{ -+ mutex_unlock(&core.dss_lock); -+} -+EXPORT_SYMBOL(omap_dss_unlock); -+ -+/* RESET */ -+ -+void dss_schedule_reset(void) -+{ -+ unsigned long flags; -+ -+ DSSDBG("schduling a soft reset\n"); -+ -+ spin_lock_irqsave(&core.reset_lock, flags); -+ if (core.reset_pending) { -+ spin_unlock_irqrestore(&core.reset_lock, flags); -+ return; -+ } -+ -+ core.reset_pending = true; -+ schedule_work(&core.reset_work); -+ -+ spin_unlock_irqrestore(&core.reset_lock, flags); -+} -+ -+static void reset_work_func(struct work_struct *work) -+{ -+ DSSDBG("performing soft reset\n"); -+ -+ spin_lock_irq(&core.reset_lock); -+ if (!core.reset_pending) { -+ spin_unlock_irq(&core.reset_lock); -+ return; -+ } -+ spin_unlock_irq(&core.reset_lock); -+ -+ omap_dss_lock(); -+ dss_clk_enable_all(); -+ dss_suspend_all_displays(); -+ save_all_ctx(); -+ -+ dss_reset(); -+ -+ restore_all_ctx(); -+ dss_resume_all_displays(); -+ dss_clk_disable_all(); -+ omap_dss_unlock(); -+ -+ spin_lock_irq(&core.reset_lock); -+ core.reset_pending = false; -+ spin_unlock_irq(&core.reset_lock); -+ -+ DSSDBG("done with soft reset\n"); -+} -+ -+/* DVFS */ -+ -+static void bus_tput_work_func(struct work_struct *work) -+{ -+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; -+ -+ DSSDBG("setting bus throughput to %d KiB/s\n", core.bus_tput); -+ pdata->set_min_bus_tput(&core.pdev->dev, -+ OCP_INITIATOR_AGENT, core.bus_tput); -+} -+ -+static void set_min_bus_tput(unsigned int num_overlays) -+{ -+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; -+ /* -+ * Magic value 400000 chosen so that on OMAP3 OPP3 is used. -+ */ -+ unsigned int tput_max = 400000; -+ unsigned int tput = num_overlays ? 400000 : 0; -+ -+ if (!pdata->set_min_bus_tput || tput == core.bus_tput) -+ return; -+ -+ cancel_delayed_work_sync(&core.bus_tput_work); -+ -+ core.bus_tput = tput; -+ -+ /* Switch to the maximum when the FIFOs are empty. */ -+ DSSDBG("setting bus throughput to %d KiB/s\n", tput_max); -+ pdata->set_min_bus_tput(&core.pdev->dev, OCP_INITIATOR_AGENT, tput_max); -+ -+ if (tput == tput_max) -+ return; -+ -+ /* Switch to whatever was requested after things have stabilized. */ -+ schedule_delayed_work(&core.bus_tput_work, msecs_to_jiffies(2000)); -+} -+ -+void omap_dss_maximize_min_bus_tput(void) -+{ -+ set_min_bus_tput(omap_dss_get_num_overlays()); -+} -+ -+void omap_dss_update_min_bus_tput(void) -+{ -+ int i; -+ struct omap_display *display; -+ struct omap_overlay *ovl; -+ int num_overlays = 0; -+ -+ DSSDBG("dss_update_min_bus_tput()\n"); -+ -+ /* Determine how many overlays are actually fetching data */ -+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) { -+ ovl = omap_dss_get_overlay(i); -+ -+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC)) -+ continue; -+ -+ if (!ovl->info.enabled || !ovl->manager) -+ continue; -+ -+ display = ovl->manager->display; -+ if (!display || display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ continue; -+ -+ num_overlays++; -+ } -+ -+ set_min_bus_tput(num_overlays); -+} -+ -+/* DSI powers */ -+int dss_dsi_power_up(void) -+{ -+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; -+ -+ if (!pdata->dsi_power_up) -+ return 0; /* presume power is always on then */ -+ -+ return pdata->dsi_power_up(); -+} -+ -+void dss_dsi_power_down(void) -+{ -+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; -+ -+ if (!pdata->dsi_power_down) -+ return; -+ -+ pdata->dsi_power_down(); -+} -+ -+const char *dss_get_def_disp_name(void) -+{ -+ return def_disp_name ? def_disp_name : ""; -+} -+ -+ -+ -+/* PLATFORM DEVICE */ -+static int omap_dss_probe(struct platform_device *pdev) -+{ -+ int skip_init = 0; -+ int r; -+ -+ core.pdev = pdev; -+ -+ r = dss_get_clocks(); -+ if (r) -+ goto fail0; -+ -+ dss_clk_enable_all_no_ctx(); -+ -+ core.ctx_id = dss_get_ctx_id(); -+ DSSDBG("initial ctx id %u\n", core.ctx_id); -+ -+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT -+ /* DISPC_CONTROL */ -+ if (omap_readl(0x48050440) & 1) /* LCD enabled? */ -+ skip_init = 1; -+#endif -+ -+ r = dss_init(skip_init); -+ if (r) { -+ DSSERR("Failed to initialize DSS\n"); -+ goto fail0; -+ } -+ -+#ifdef CONFIG_OMAP2_DSS_RFBI -+ r = rfbi_init(); -+ if (r) { -+ DSSERR("Failed to initialize rfbi\n"); -+ goto fail0; -+ } -+#endif -+ -+ r = dpi_init(); -+ if (r) { -+ DSSERR("Failed to initialize dpi\n"); -+ goto fail0; -+ } -+ -+ r = dispc_init(); -+ if (r) { -+ DSSERR("Failed to initialize dispc\n"); -+ goto fail0; -+ } -+#ifdef CONFIG_OMAP2_DSS_VENC -+ r = venc_init(core.pdev); -+ if (r) { -+ DSSERR("Failed to initialize venc\n"); -+ goto fail0; -+ } -+#endif -+ if (cpu_is_omap34xx()) { -+#ifdef CONFIG_OMAP2_DSS_SDI -+ r = sdi_init(skip_init); -+ if (r) { -+ DSSERR("Failed to initialize SDI\n"); -+ goto fail0; -+ } -+#endif -+#ifdef CONFIG_OMAP2_DSS_DSI -+ r = dsi_init(); -+ if (r) { -+ DSSERR("Failed to initialize DSI\n"); -+ goto fail0; -+ } -+#endif -+ } -+ -+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) -+ r = dss_initialize_debugfs(); -+ if (r) -+ goto fail0; -+#endif -+ -+ dss_init_displays(pdev); -+ dss_init_overlay_managers(pdev); -+ dss_init_overlays(pdev); -+ -+ dss_clk_disable_all(); -+ -+ INIT_DELAYED_WORK(&core.bus_tput_work, bus_tput_work_func); -+ -+ core.reset_pending = false; -+ spin_lock_init(&core.reset_lock); -+ INIT_WORK(&core.reset_work, reset_work_func); -+ -+ mutex_init(&core.dss_lock); -+ -+ return 0; -+ -+ /* XXX fail correctly */ -+fail0: -+ return r; -+} -+ -+static int omap_dss_remove(struct platform_device *pdev) -+{ -+ struct omap_dss_board_info *pdata = pdev->dev.platform_data; -+ int c; -+ -+ cancel_work_sync(&core.reset_work); -+ core.reset_pending = false; -+ -+ cancel_delayed_work_sync(&core.bus_tput_work); -+ if (pdata->set_min_bus_tput) -+ pdata->set_min_bus_tput(&core.pdev->dev, OCP_INITIATOR_AGENT, 0); -+ -+ dss_uninit_overlays(pdev); -+ dss_uninit_overlay_managers(pdev); -+ dss_uninit_displays(pdev); -+ -+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) -+ dss_uninitialize_debugfs(); -+#endif -+ -+#ifdef CONFIG_OMAP2_DSS_VENC -+ venc_exit(); -+#endif -+ dispc_exit(); -+ dpi_exit(); -+#ifdef CONFIG_OMAP2_DSS_RFBI -+ rfbi_exit(); -+#endif -+ if (cpu_is_omap34xx()) { -+#ifdef CONFIG_OMAP2_DSS_DSI -+ dsi_exit(); -+#endif -+#ifdef CONFIG_OMAP2_DSS_SDI -+ sdi_exit(); -+#endif -+ } -+ -+ dss_exit(); -+ -+ /* these should be removed at some point */ -+ c = core.dss_ick->usecount; -+ if (c > 0) { -+ DSSERR("warning: dss_ick usecount %d, disabling\n", c); -+ while (c-- > 0) -+ clk_disable(core.dss_ick); -+ } -+ -+ c = core.dss1_fck->usecount; -+ if (c > 0) { -+ DSSERR("warning: dss1_fck usecount %d, disabling\n", c); -+ while (c-- > 0) -+ clk_disable(core.dss1_fck); -+ } -+ -+ c = core.dss2_fck->usecount; -+ if (c > 0) { -+ DSSERR("warning: dss2_fck usecount %d, disabling\n", c); -+ while (c-- > 0) -+ clk_disable(core.dss2_fck); -+ } -+ -+ c = core.dss_54m_fck->usecount; -+ if (c > 0) { -+ DSSERR("warning: dss_54m_fck usecount %d, disabling\n", c); -+ while (c-- > 0) -+ clk_disable(core.dss_54m_fck); -+ } -+ -+ if (core.dss_96m_fck) { -+ c = core.dss_96m_fck->usecount; -+ if (c > 0) { -+ DSSERR("warning: dss_96m_fck usecount %d, disabling\n", -+ c); -+ while (c-- > 0) -+ clk_disable(core.dss_96m_fck); -+ } -+ } -+ -+ dss_put_clocks(); -+ -+ return 0; -+} -+ -+static void omap_dss_shutdown(struct platform_device *pdev) -+{ -+ DSSDBG("shutdown\n"); -+} -+ -+static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state) -+{ -+ int ret; -+ -+ DSSDBG("suspend %d\n", state.event); -+ -+ omap_dss_lock(); -+ ret = dss_suspend_all_displays(); -+ omap_dss_unlock(); -+ -+ return ret; -+ -+} -+ -+static int omap_dss_resume(struct platform_device *pdev) -+{ -+ int ret; -+ -+ DSSDBG("resume\n"); -+ -+ omap_dss_lock(); -+ ret = dss_resume_all_displays(); -+ omap_dss_unlock(); -+ -+ return ret; -+} -+ -+static struct platform_driver omap_dss_driver = { -+ .probe = omap_dss_probe, -+ .remove = omap_dss_remove, -+ .shutdown = omap_dss_shutdown, -+ .suspend = omap_dss_suspend, -+ .resume = omap_dss_resume, -+ .driver = { -+ .name = "omapdss", -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init omap_dss_init(void) -+{ -+ return platform_driver_register(&omap_dss_driver); -+} -+ -+static void __exit omap_dss_exit(void) -+{ -+ platform_driver_unregister(&omap_dss_driver); -+} -+ -+device_initcall(omap_dss_init); -+module_exit(omap_dss_exit); -+ -+ -+MODULE_AUTHOR("Tomi Valkeinen "); -+MODULE_DESCRIPTION("OMAP2/3 Display Subsystem"); -+MODULE_LICENSE("GPL v2"); -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dispc.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dispc.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dispc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dispc.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,3568 @@ -+/* -+ * linux/drivers/video/omap2/dss/dispc.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "DISPC" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "dss.h" -+ -+/* DISPC */ -+#define DISPC_BASE 0x48050400 -+ -+#define DISPC_SZ_REGS SZ_1K -+ -+struct dispc_reg { u16 idx; }; -+ -+#define DISPC_REG(idx) ((const struct dispc_reg) { idx }) -+ -+/* DISPC common */ -+#define DISPC_REVISION DISPC_REG(0x0000) -+#define DISPC_SYSCONFIG DISPC_REG(0x0010) -+#define DISPC_SYSSTATUS DISPC_REG(0x0014) -+#define DISPC_IRQSTATUS DISPC_REG(0x0018) -+#define DISPC_IRQENABLE DISPC_REG(0x001C) -+#define DISPC_CONTROL DISPC_REG(0x0040) -+#define DISPC_CONFIG DISPC_REG(0x0044) -+#define DISPC_CAPABLE DISPC_REG(0x0048) -+#define DISPC_DEFAULT_COLOR0 DISPC_REG(0x004C) -+#define DISPC_DEFAULT_COLOR1 DISPC_REG(0x0050) -+#define DISPC_TRANS_COLOR0 DISPC_REG(0x0054) -+#define DISPC_TRANS_COLOR1 DISPC_REG(0x0058) -+#define DISPC_LINE_STATUS DISPC_REG(0x005C) -+#define DISPC_LINE_NUMBER DISPC_REG(0x0060) -+#define DISPC_TIMING_H DISPC_REG(0x0064) -+#define DISPC_TIMING_V DISPC_REG(0x0068) -+#define DISPC_POL_FREQ DISPC_REG(0x006C) -+#define DISPC_DIVISOR DISPC_REG(0x0070) -+#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074) -+#define DISPC_SIZE_DIG DISPC_REG(0x0078) -+#define DISPC_SIZE_LCD DISPC_REG(0x007C) -+ -+/* DISPC GFX plane */ -+#define DISPC_GFX_BA0 DISPC_REG(0x0080) -+#define DISPC_GFX_BA1 DISPC_REG(0x0084) -+#define DISPC_GFX_POSITION DISPC_REG(0x0088) -+#define DISPC_GFX_SIZE DISPC_REG(0x008C) -+#define DISPC_GFX_ATTRIBUTES DISPC_REG(0x00A0) -+#define DISPC_GFX_FIFO_THRESHOLD DISPC_REG(0x00A4) -+#define DISPC_GFX_FIFO_SIZE_STATUS DISPC_REG(0x00A8) -+#define DISPC_GFX_ROW_INC DISPC_REG(0x00AC) -+#define DISPC_GFX_PIXEL_INC DISPC_REG(0x00B0) -+#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4) -+#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8) -+ -+#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4) -+#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8) -+#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC) -+ -+#define DISPC_CPR_COEF_R DISPC_REG(0x0220) -+#define DISPC_CPR_COEF_G DISPC_REG(0x0224) -+#define DISPC_CPR_COEF_B DISPC_REG(0x0228) -+ -+#define DISPC_GFX_PRELOAD DISPC_REG(0x022C) -+ -+/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */ -+#define DISPC_VID_REG(n, idx) DISPC_REG(0x00BC + (n)*0x90 + idx) -+ -+#define DISPC_VID_BA0(n) DISPC_VID_REG(n, 0x0000) -+#define DISPC_VID_BA1(n) DISPC_VID_REG(n, 0x0004) -+#define DISPC_VID_POSITION(n) DISPC_VID_REG(n, 0x0008) -+#define DISPC_VID_SIZE(n) DISPC_VID_REG(n, 0x000C) -+#define DISPC_VID_ATTRIBUTES(n) DISPC_VID_REG(n, 0x0010) -+#define DISPC_VID_FIFO_THRESHOLD(n) DISPC_VID_REG(n, 0x0014) -+#define DISPC_VID_FIFO_SIZE_STATUS(n) DISPC_VID_REG(n, 0x0018) -+#define DISPC_VID_ROW_INC(n) DISPC_VID_REG(n, 0x001C) -+#define DISPC_VID_PIXEL_INC(n) DISPC_VID_REG(n, 0x0020) -+#define DISPC_VID_FIR(n) DISPC_VID_REG(n, 0x0024) -+#define DISPC_VID_PICTURE_SIZE(n) DISPC_VID_REG(n, 0x0028) -+#define DISPC_VID_ACCU0(n) DISPC_VID_REG(n, 0x002C) -+#define DISPC_VID_ACCU1(n) DISPC_VID_REG(n, 0x0030) -+ -+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -+#define DISPC_VID_FIR_COEF_H(n, i) DISPC_REG(0x00F0 + (n)*0x90 + (i)*0x8) -+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -+#define DISPC_VID_FIR_COEF_HV(n, i) DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8) -+/* coef index i = {0, 1, 2, 3, 4} */ -+#define DISPC_VID_CONV_COEF(n, i) DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4) -+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -+#define DISPC_VID_FIR_COEF_V(n, i) DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4) -+ -+#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04) -+ -+ -+#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \ -+ DISPC_IRQ_OCP_ERR | \ -+ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \ -+ DISPC_IRQ_VID2_FIFO_UNDERFLOW | \ -+ DISPC_IRQ_SYNC_LOST | \ -+ DISPC_IRQ_SYNC_LOST_DIGIT) -+ -+#define DISPC_MAX_NR_ISRS 8 -+ -+struct omap_dispc_isr_data { -+ omap_dispc_isr_t isr; -+ void *arg; -+ u32 mask; -+}; -+ -+#define REG_GET(idx, start, end) \ -+ FLD_GET(dispc_read_reg(idx), start, end) -+ -+#define REG_FLD_MOD(idx, val, start, end) \ -+ dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end)) -+ -+static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES, -+ DISPC_VID_ATTRIBUTES(0), -+ DISPC_VID_ATTRIBUTES(1) }; -+ -+static struct { -+ void __iomem *base; -+ -+ struct clk *dpll4_m4_ck; -+ -+ unsigned long cache_req_pck; -+ unsigned long cache_prate; -+ struct dispc_clock_info cache_cinfo; -+ -+ spinlock_t irq_lock; -+ u32 irq_error_mask; -+ struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS]; -+ u32 error_irqs; -+ struct work_struct error_work; -+ -+ u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; -+} dispc; -+ -+static void _omap_dispc_set_irqs(void); -+ -+static inline void dispc_write_reg(const struct dispc_reg idx, u32 val) -+{ -+ __raw_writel(val, dispc.base + idx.idx); -+} -+ -+static inline u32 dispc_read_reg(const struct dispc_reg idx) -+{ -+ return __raw_readl(dispc.base + idx.idx); -+} -+ -+#define SR(reg) \ -+ dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg) -+#define RR(reg) \ -+ dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)]) -+ -+void dispc_save_context(void) -+{ -+ if (cpu_is_omap24xx()) -+ return; -+ -+ SR(SYSCONFIG); -+ SR(IRQENABLE); -+ SR(CONTROL); -+ SR(CONFIG); -+ SR(DEFAULT_COLOR0); -+ SR(DEFAULT_COLOR1); -+ SR(TRANS_COLOR0); -+ SR(TRANS_COLOR1); -+ SR(LINE_NUMBER); -+ SR(TIMING_H); -+ SR(TIMING_V); -+ SR(POL_FREQ); -+ SR(DIVISOR); -+ SR(GLOBAL_ALPHA); -+ SR(SIZE_DIG); -+ SR(SIZE_LCD); -+ -+ SR(GFX_BA0); -+ SR(GFX_BA1); -+ SR(GFX_POSITION); -+ SR(GFX_SIZE); -+ SR(GFX_ATTRIBUTES); -+ SR(GFX_FIFO_THRESHOLD); -+ SR(GFX_ROW_INC); -+ SR(GFX_PIXEL_INC); -+ SR(GFX_WINDOW_SKIP); -+ SR(GFX_TABLE_BA); -+ -+ SR(DATA_CYCLE1); -+ SR(DATA_CYCLE2); -+ SR(DATA_CYCLE3); -+ -+ SR(CPR_COEF_R); -+ SR(CPR_COEF_G); -+ SR(CPR_COEF_B); -+ -+ SR(GFX_PRELOAD); -+ -+ /* VID1 */ -+ SR(VID_BA0(0)); -+ SR(VID_BA1(0)); -+ SR(VID_POSITION(0)); -+ SR(VID_SIZE(0)); -+ SR(VID_ATTRIBUTES(0)); -+ SR(VID_FIFO_THRESHOLD(0)); -+ SR(VID_ROW_INC(0)); -+ SR(VID_PIXEL_INC(0)); -+ SR(VID_FIR(0)); -+ SR(VID_PICTURE_SIZE(0)); -+ SR(VID_ACCU0(0)); -+ SR(VID_ACCU1(0)); -+ -+ SR(VID_FIR_COEF_H(0, 0)); -+ SR(VID_FIR_COEF_H(0, 1)); -+ SR(VID_FIR_COEF_H(0, 2)); -+ SR(VID_FIR_COEF_H(0, 3)); -+ SR(VID_FIR_COEF_H(0, 4)); -+ SR(VID_FIR_COEF_H(0, 5)); -+ SR(VID_FIR_COEF_H(0, 6)); -+ SR(VID_FIR_COEF_H(0, 7)); -+ -+ SR(VID_FIR_COEF_HV(0, 0)); -+ SR(VID_FIR_COEF_HV(0, 1)); -+ SR(VID_FIR_COEF_HV(0, 2)); -+ SR(VID_FIR_COEF_HV(0, 3)); -+ SR(VID_FIR_COEF_HV(0, 4)); -+ SR(VID_FIR_COEF_HV(0, 5)); -+ SR(VID_FIR_COEF_HV(0, 6)); -+ SR(VID_FIR_COEF_HV(0, 7)); -+ -+ SR(VID_CONV_COEF(0, 0)); -+ SR(VID_CONV_COEF(0, 1)); -+ SR(VID_CONV_COEF(0, 2)); -+ SR(VID_CONV_COEF(0, 3)); -+ SR(VID_CONV_COEF(0, 4)); -+ -+ SR(VID_FIR_COEF_V(0, 0)); -+ SR(VID_FIR_COEF_V(0, 1)); -+ SR(VID_FIR_COEF_V(0, 2)); -+ SR(VID_FIR_COEF_V(0, 3)); -+ SR(VID_FIR_COEF_V(0, 4)); -+ SR(VID_FIR_COEF_V(0, 5)); -+ SR(VID_FIR_COEF_V(0, 6)); -+ SR(VID_FIR_COEF_V(0, 7)); -+ -+ SR(VID_PRELOAD(0)); -+ -+ /* VID2 */ -+ SR(VID_BA0(1)); -+ SR(VID_BA1(1)); -+ SR(VID_POSITION(1)); -+ SR(VID_SIZE(1)); -+ SR(VID_ATTRIBUTES(1)); -+ SR(VID_FIFO_THRESHOLD(1)); -+ SR(VID_ROW_INC(1)); -+ SR(VID_PIXEL_INC(1)); -+ SR(VID_FIR(1)); -+ SR(VID_PICTURE_SIZE(1)); -+ SR(VID_ACCU0(1)); -+ SR(VID_ACCU1(1)); -+ -+ SR(VID_FIR_COEF_H(1, 0)); -+ SR(VID_FIR_COEF_H(1, 1)); -+ SR(VID_FIR_COEF_H(1, 2)); -+ SR(VID_FIR_COEF_H(1, 3)); -+ SR(VID_FIR_COEF_H(1, 4)); -+ SR(VID_FIR_COEF_H(1, 5)); -+ SR(VID_FIR_COEF_H(1, 6)); -+ SR(VID_FIR_COEF_H(1, 7)); -+ -+ SR(VID_FIR_COEF_HV(1, 0)); -+ SR(VID_FIR_COEF_HV(1, 1)); -+ SR(VID_FIR_COEF_HV(1, 2)); -+ SR(VID_FIR_COEF_HV(1, 3)); -+ SR(VID_FIR_COEF_HV(1, 4)); -+ SR(VID_FIR_COEF_HV(1, 5)); -+ SR(VID_FIR_COEF_HV(1, 6)); -+ SR(VID_FIR_COEF_HV(1, 7)); -+ -+ SR(VID_CONV_COEF(1, 0)); -+ SR(VID_CONV_COEF(1, 1)); -+ SR(VID_CONV_COEF(1, 2)); -+ SR(VID_CONV_COEF(1, 3)); -+ SR(VID_CONV_COEF(1, 4)); -+ -+ SR(VID_FIR_COEF_V(1, 0)); -+ SR(VID_FIR_COEF_V(1, 1)); -+ SR(VID_FIR_COEF_V(1, 2)); -+ SR(VID_FIR_COEF_V(1, 3)); -+ SR(VID_FIR_COEF_V(1, 4)); -+ SR(VID_FIR_COEF_V(1, 5)); -+ SR(VID_FIR_COEF_V(1, 6)); -+ SR(VID_FIR_COEF_V(1, 7)); -+ -+ SR(VID_PRELOAD(1)); -+} -+ -+void dispc_restore_context(void) -+{ -+ RR(SYSCONFIG); -+ RR(IRQENABLE); -+ /*RR(CONTROL);*/ -+ RR(CONFIG); -+ RR(DEFAULT_COLOR0); -+ RR(DEFAULT_COLOR1); -+ RR(TRANS_COLOR0); -+ RR(TRANS_COLOR1); -+ RR(LINE_NUMBER); -+ RR(TIMING_H); -+ RR(TIMING_V); -+ RR(POL_FREQ); -+ RR(DIVISOR); -+ RR(GLOBAL_ALPHA); -+ RR(SIZE_DIG); -+ RR(SIZE_LCD); -+ -+ RR(GFX_BA0); -+ RR(GFX_BA1); -+ RR(GFX_POSITION); -+ RR(GFX_SIZE); -+ RR(GFX_ATTRIBUTES); -+ RR(GFX_FIFO_THRESHOLD); -+ RR(GFX_ROW_INC); -+ RR(GFX_PIXEL_INC); -+ RR(GFX_WINDOW_SKIP); -+ RR(GFX_TABLE_BA); -+ -+ RR(DATA_CYCLE1); -+ RR(DATA_CYCLE2); -+ RR(DATA_CYCLE3); -+ -+ RR(CPR_COEF_R); -+ RR(CPR_COEF_G); -+ RR(CPR_COEF_B); -+ -+ RR(GFX_PRELOAD); -+ -+ /* VID1 */ -+ RR(VID_BA0(0)); -+ RR(VID_BA1(0)); -+ RR(VID_POSITION(0)); -+ RR(VID_SIZE(0)); -+ RR(VID_ATTRIBUTES(0)); -+ RR(VID_FIFO_THRESHOLD(0)); -+ RR(VID_ROW_INC(0)); -+ RR(VID_PIXEL_INC(0)); -+ RR(VID_FIR(0)); -+ RR(VID_PICTURE_SIZE(0)); -+ RR(VID_ACCU0(0)); -+ RR(VID_ACCU1(0)); -+ -+ RR(VID_FIR_COEF_H(0, 0)); -+ RR(VID_FIR_COEF_H(0, 1)); -+ RR(VID_FIR_COEF_H(0, 2)); -+ RR(VID_FIR_COEF_H(0, 3)); -+ RR(VID_FIR_COEF_H(0, 4)); -+ RR(VID_FIR_COEF_H(0, 5)); -+ RR(VID_FIR_COEF_H(0, 6)); -+ RR(VID_FIR_COEF_H(0, 7)); -+ -+ RR(VID_FIR_COEF_HV(0, 0)); -+ RR(VID_FIR_COEF_HV(0, 1)); -+ RR(VID_FIR_COEF_HV(0, 2)); -+ RR(VID_FIR_COEF_HV(0, 3)); -+ RR(VID_FIR_COEF_HV(0, 4)); -+ RR(VID_FIR_COEF_HV(0, 5)); -+ RR(VID_FIR_COEF_HV(0, 6)); -+ RR(VID_FIR_COEF_HV(0, 7)); -+ -+ RR(VID_CONV_COEF(0, 0)); -+ RR(VID_CONV_COEF(0, 1)); -+ RR(VID_CONV_COEF(0, 2)); -+ RR(VID_CONV_COEF(0, 3)); -+ RR(VID_CONV_COEF(0, 4)); -+ -+ RR(VID_FIR_COEF_V(0, 0)); -+ RR(VID_FIR_COEF_V(0, 1)); -+ RR(VID_FIR_COEF_V(0, 2)); -+ RR(VID_FIR_COEF_V(0, 3)); -+ RR(VID_FIR_COEF_V(0, 4)); -+ RR(VID_FIR_COEF_V(0, 5)); -+ RR(VID_FIR_COEF_V(0, 6)); -+ RR(VID_FIR_COEF_V(0, 7)); -+ -+ RR(VID_PRELOAD(0)); -+ -+ /* VID2 */ -+ RR(VID_BA0(1)); -+ RR(VID_BA1(1)); -+ RR(VID_POSITION(1)); -+ RR(VID_SIZE(1)); -+ RR(VID_ATTRIBUTES(1)); -+ RR(VID_FIFO_THRESHOLD(1)); -+ RR(VID_ROW_INC(1)); -+ RR(VID_PIXEL_INC(1)); -+ RR(VID_FIR(1)); -+ RR(VID_PICTURE_SIZE(1)); -+ RR(VID_ACCU0(1)); -+ RR(VID_ACCU1(1)); -+ -+ RR(VID_FIR_COEF_H(1, 0)); -+ RR(VID_FIR_COEF_H(1, 1)); -+ RR(VID_FIR_COEF_H(1, 2)); -+ RR(VID_FIR_COEF_H(1, 3)); -+ RR(VID_FIR_COEF_H(1, 4)); -+ RR(VID_FIR_COEF_H(1, 5)); -+ RR(VID_FIR_COEF_H(1, 6)); -+ RR(VID_FIR_COEF_H(1, 7)); -+ -+ RR(VID_FIR_COEF_HV(1, 0)); -+ RR(VID_FIR_COEF_HV(1, 1)); -+ RR(VID_FIR_COEF_HV(1, 2)); -+ RR(VID_FIR_COEF_HV(1, 3)); -+ RR(VID_FIR_COEF_HV(1, 4)); -+ RR(VID_FIR_COEF_HV(1, 5)); -+ RR(VID_FIR_COEF_HV(1, 6)); -+ RR(VID_FIR_COEF_HV(1, 7)); -+ -+ RR(VID_CONV_COEF(1, 0)); -+ RR(VID_CONV_COEF(1, 1)); -+ RR(VID_CONV_COEF(1, 2)); -+ RR(VID_CONV_COEF(1, 3)); -+ RR(VID_CONV_COEF(1, 4)); -+ -+ RR(VID_FIR_COEF_V(1, 0)); -+ RR(VID_FIR_COEF_V(1, 1)); -+ RR(VID_FIR_COEF_V(1, 2)); -+ RR(VID_FIR_COEF_V(1, 3)); -+ RR(VID_FIR_COEF_V(1, 4)); -+ RR(VID_FIR_COEF_V(1, 5)); -+ RR(VID_FIR_COEF_V(1, 6)); -+ RR(VID_FIR_COEF_V(1, 7)); -+ -+ RR(VID_PRELOAD(1)); -+ -+ /* enable last, because LCD & DIGIT enable are here */ -+ RR(CONTROL); -+} -+ -+#undef SR -+#undef RR -+ -+static inline void enable_clocks(bool enable) -+{ -+ if (enable) -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ else -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+} -+ -+void dispc_go(enum omap_channel channel) -+{ -+ int bit; -+ unsigned long tmo; -+ -+ enable_clocks(1); -+ -+ if (channel == OMAP_DSS_CHANNEL_LCD) -+ bit = 0; /* LCDENABLE */ -+ else -+ bit = 1; /* DIGITALENABLE */ -+ -+ /* if the channel is not enabled, we don't need GO */ -+ if (REG_GET(DISPC_CONTROL, bit, bit) == 0) -+ goto end; -+ -+ if (channel == OMAP_DSS_CHANNEL_LCD) -+ bit = 5; /* GOLCD */ -+ else -+ bit = 6; /* GODIGIT */ -+ -+ tmo = jiffies + msecs_to_jiffies(200); -+ while (REG_GET(DISPC_CONTROL, bit, bit) == 1) { -+ if (time_after(jiffies, tmo)) { -+ DSSERR("timeout waiting GO flag\n"); -+ goto end; -+ } -+ cpu_relax(); -+ } -+ -+ DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT"); -+ -+ REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); -+end: -+ enable_clocks(0); -+} -+ -+static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) -+{ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value); -+} -+ -+static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value) -+{ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value); -+} -+ -+static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value) -+{ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value); -+} -+ -+static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup, -+ int vscaleup, int five_taps) -+{ -+ /* Coefficients for horizontal up-sampling */ -+ static const u32 coef_hup[8] = { -+ 0x00800000, -+ 0x0D7CF800, -+ 0x1E70F5FF, -+ 0x335FF5FE, -+ 0xF74949F7, -+ 0xF55F33FB, -+ 0xF5701EFE, -+ 0xF87C0DFF, -+ }; -+ -+ /* Coefficients for horizontal down-sampling */ -+ static const u32 coef_hdown[8] = { -+ 0x24382400, -+ 0x28371FFE, -+ 0x2C361BFB, -+ 0x303516F9, -+ 0x11343311, -+ 0x1635300C, -+ 0x1B362C08, -+ 0x1F372804, -+ }; -+ -+ /* Coefficients for horizontal and vertical up-sampling */ -+ static const u32 coef_hvup[2][8] = { -+ { -+ 0x00800000, -+ 0x037B02FF, -+ 0x0C6F05FE, -+ 0x205907FB, -+ 0x00404000, -+ 0x075920FE, -+ 0x056F0CFF, -+ 0x027B0300, -+ }, -+ { -+ 0x00800000, -+ 0x0D7CF8FF, -+ 0x1E70F5FE, -+ 0x335FF5FB, -+ 0xF7404000, -+ 0xF55F33FE, -+ 0xF5701EFF, -+ 0xF87C0D00, -+ }, -+ }; -+ -+ /* Coefficients for horizontal and vertical down-sampling */ -+ static const u32 coef_hvdown[2][8] = { -+ { -+ 0x24382400, -+ 0x28391F04, -+ 0x2D381B08, -+ 0x3237170C, -+ 0x123737F7, -+ 0x173732F9, -+ 0x1B382DFB, -+ 0x1F3928FE, -+ }, -+ { -+ 0x24382400, -+ 0x28371F04, -+ 0x2C361B08, -+ 0x3035160C, -+ 0x113433F7, -+ 0x163530F9, -+ 0x1B362CFB, -+ 0x1F3728FE, -+ }, -+ }; -+ -+ /* Coefficients for vertical up-sampling */ -+ static const u32 coef_vup[8] = { -+ 0x00000000, -+ 0x0000FF00, -+ 0x0000FEFF, -+ 0x0000FBFE, -+ 0x000000F7, -+ 0x0000FEFB, -+ 0x0000FFFE, -+ 0x000000FF, -+ }; -+ -+ -+ /* Coefficients for vertical down-sampling */ -+ static const u32 coef_vdown[8] = { -+ 0x00000000, -+ 0x000004FE, -+ 0x000008FB, -+ 0x00000CF9, -+ 0x0000F711, -+ 0x0000F90C, -+ 0x0000FB08, -+ 0x0000FE04, -+ }; -+ -+ const u32 *h_coef; -+ const u32 *hv_coef; -+ const u32 *hv_coef_mod; -+ const u32 *v_coef; -+ int i; -+ -+ if (hscaleup) -+ h_coef = coef_hup; -+ else -+ h_coef = coef_hdown; -+ -+ if (vscaleup) { -+ hv_coef = coef_hvup[five_taps]; -+ v_coef = coef_vup; -+ -+ if (hscaleup) -+ hv_coef_mod = NULL; -+ else -+ hv_coef_mod = coef_hvdown[five_taps]; -+ } else { -+ hv_coef = coef_hvdown[five_taps]; -+ v_coef = coef_vdown; -+ -+ if (hscaleup) -+ hv_coef_mod = coef_hvup[five_taps]; -+ else -+ hv_coef_mod = NULL; -+ } -+ -+ for (i = 0; i < 8; i++) { -+ u32 h, hv; -+ -+ h = h_coef[i]; -+ -+ hv = hv_coef[i]; -+ -+ if (hv_coef_mod) { -+ hv &= 0xffffff00; -+ hv |= (hv_coef_mod[i] & 0xff); -+ } -+ -+ _dispc_write_firh_reg(plane, i, h); -+ _dispc_write_firhv_reg(plane, i, hv); -+ } -+ -+ if (!five_taps) -+ return; -+ -+ for (i = 0; i < 8; i++) { -+ u32 v; -+ v = v_coef[i]; -+ _dispc_write_firv_reg(plane, i, v); -+ } -+} -+ -+static void _dispc_setup_color_conv_coef(void) -+{ -+ const struct color_conv_coef { -+ int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb; -+ int full_range; -+ } ctbl_bt601_5 = { -+ 298, 409, 0, 298, -208, -100, 298, 0, 517, 0, -+ }; -+ -+ const struct color_conv_coef *ct; -+ -+#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0)) -+ -+ ct = &ctbl_bt601_5; -+ -+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 0), CVAL(ct->rcr, ct->ry)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 1), CVAL(ct->gy, ct->rcb)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 2), CVAL(ct->gcb, ct->gcr)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 3), CVAL(ct->bcr, ct->by)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 4), CVAL(0, ct->bcb)); -+ -+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 0), CVAL(ct->rcr, ct->ry)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 1), CVAL(ct->gy, ct->rcb)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 2), CVAL(ct->gcb, ct->gcr)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by)); -+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0, ct->bcb)); -+ -+#undef CVAL -+ -+ REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11); -+ REG_FLD_MOD(DISPC_VID_ATTRIBUTES(1), ct->full_range, 11, 11); -+} -+ -+ -+static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr) -+{ -+ const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0, -+ DISPC_VID_BA0(0), -+ DISPC_VID_BA0(1) }; -+ -+ dispc_write_reg(ba0_reg[plane], paddr); -+} -+ -+void omap_dispc_set_plane_ba0(enum omap_channel channel, enum omap_plane plane, -+ u32 paddr) -+{ -+ enable_clocks(1); -+ _dispc_set_plane_ba0(plane, paddr); -+ dispc_go(channel); -+ enable_clocks(0); -+} -+EXPORT_SYMBOL_GPL(omap_dispc_set_plane_ba0); -+ -+static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr) -+{ -+ const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1, -+ DISPC_VID_BA1(0), -+ DISPC_VID_BA1(1) }; -+ -+ dispc_write_reg(ba1_reg[plane], paddr); -+} -+ -+static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y) -+{ -+ const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION, -+ DISPC_VID_POSITION(0), -+ DISPC_VID_POSITION(1) }; -+ -+ u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0); -+ dispc_write_reg(pos_reg[plane], val); -+} -+ -+static void _dispc_set_pic_size(enum omap_plane plane, int width, int height) -+{ -+ const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE, -+ DISPC_VID_PICTURE_SIZE(0), -+ DISPC_VID_PICTURE_SIZE(1) }; -+ u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); -+ dispc_write_reg(siz_reg[plane], val); -+} -+ -+static void _dispc_set_vid_size(enum omap_plane plane, int width, int height) -+{ -+ u32 val; -+ const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0), -+ DISPC_VID_SIZE(1) }; -+ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); -+ dispc_write_reg(vsi_reg[plane-1], val); -+} -+ -+static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha) -+{ -+ -+ BUG_ON(plane == OMAP_DSS_VIDEO1); -+ -+ if (cpu_is_omap24xx()) -+ return; -+ -+ if (plane == OMAP_DSS_GFX) -+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0); -+ else if (plane == OMAP_DSS_VIDEO2) -+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 23, 16); -+} -+ -+static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc) -+{ -+ const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC, -+ DISPC_VID_PIXEL_INC(0), -+ DISPC_VID_PIXEL_INC(1) }; -+ -+ dispc_write_reg(ri_reg[plane], inc); -+} -+ -+static void _dispc_set_row_inc(enum omap_plane plane, s32 inc) -+{ -+ const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC, -+ DISPC_VID_ROW_INC(0), -+ DISPC_VID_ROW_INC(1) }; -+ -+ dispc_write_reg(ri_reg[plane], inc); -+} -+ -+static s32 _dispc_get_pix_inc(enum omap_plane plane) -+{ -+ const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC, -+ DISPC_VID_PIXEL_INC(0), -+ DISPC_VID_PIXEL_INC(1) }; -+ -+ return dispc_read_reg(ri_reg[plane]); -+} -+ -+static s32 _dispc_get_row_inc(enum omap_plane plane) -+{ -+ const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC, -+ DISPC_VID_ROW_INC(0), -+ DISPC_VID_ROW_INC(1) }; -+ -+ return dispc_read_reg(ri_reg[plane]); -+} -+ -+static void _dispc_set_color_mode(enum omap_plane plane, -+ enum omap_color_mode color_mode) -+{ -+ u32 m = 0; -+ -+ switch (color_mode) { -+ case OMAP_DSS_COLOR_CLUT1: -+ m = 0x0; break; -+ case OMAP_DSS_COLOR_CLUT2: -+ m = 0x1; break; -+ case OMAP_DSS_COLOR_CLUT4: -+ m = 0x2; break; -+ case OMAP_DSS_COLOR_CLUT8: -+ m = 0x3; break; -+ case OMAP_DSS_COLOR_RGB12U: -+ m = 0x4; break; -+ case OMAP_DSS_COLOR_ARGB16: -+ m = 0x5; break; -+ case OMAP_DSS_COLOR_RGB16: -+ m = 0x6; break; -+ case OMAP_DSS_COLOR_RGB24U: -+ m = 0x8; break; -+ case OMAP_DSS_COLOR_RGB24P: -+ m = 0x9; break; -+ case OMAP_DSS_COLOR_YUV2: -+ m = 0xa; break; -+ case OMAP_DSS_COLOR_UYVY: -+ m = 0xb; break; -+ case OMAP_DSS_COLOR_ARGB32: -+ m = 0xc; break; -+ case OMAP_DSS_COLOR_RGBA32: -+ m = 0xd; break; -+ case OMAP_DSS_COLOR_RGBX32: -+ m = 0xe; break; -+ default: -+ BUG(); break; -+ } -+ -+ REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1); -+} -+ -+static enum omap_color_mode _dispc_get_color_mode(enum omap_plane plane) -+{ -+ u32 m = REG_GET(dispc_reg_att[plane], 4, 1); -+ -+ switch (m) { -+ case 0x0: -+ return OMAP_DSS_COLOR_CLUT1; -+ case 0x1: -+ return OMAP_DSS_COLOR_CLUT2; -+ case 0x2: -+ return OMAP_DSS_COLOR_CLUT4; -+ case 0x3: -+ return OMAP_DSS_COLOR_CLUT8; -+ case 0x4: -+ return OMAP_DSS_COLOR_RGB12U; -+ case 0x5: -+ return OMAP_DSS_COLOR_ARGB16; -+ case 0x6: -+ return OMAP_DSS_COLOR_RGB16; -+ case 0x8: -+ return OMAP_DSS_COLOR_RGB24U; -+ case 0x9: -+ return OMAP_DSS_COLOR_RGB24P; -+ case 0xa: -+ return OMAP_DSS_COLOR_YUV2; -+ case 0xb: -+ return OMAP_DSS_COLOR_UYVY; -+ case 0xc: -+ return OMAP_DSS_COLOR_ARGB32; -+ case 0xd: -+ return OMAP_DSS_COLOR_RGBA32; -+ case 0xe: -+ return OMAP_DSS_COLOR_RGBX32; -+ default: -+ BUG(); -+ } -+} -+ -+static void _dispc_set_channel_out(enum omap_plane plane, -+ enum omap_channel channel) -+{ -+ int shift; -+ u32 val; -+ -+ switch (plane) { -+ case OMAP_DSS_GFX: -+ shift = 8; -+ break; -+ case OMAP_DSS_VIDEO1: -+ case OMAP_DSS_VIDEO2: -+ shift = 16; -+ break; -+ default: -+ BUG(); -+ return; -+ } -+ -+ val = dispc_read_reg(dispc_reg_att[plane]); -+ val = FLD_MOD(val, channel, shift, shift); -+ dispc_write_reg(dispc_reg_att[plane], val); -+} -+ -+void dispc_set_burst_size(enum omap_plane plane, -+ enum omap_burst_size burst_size) -+{ -+ int shift; -+ u32 val; -+ -+ enable_clocks(1); -+ -+ switch (plane) { -+ case OMAP_DSS_GFX: -+ shift = 6; -+ break; -+ case OMAP_DSS_VIDEO1: -+ case OMAP_DSS_VIDEO2: -+ shift = 14; -+ break; -+ default: -+ BUG(); -+ return; -+ } -+ -+ val = dispc_read_reg(dispc_reg_att[plane]); -+ val = FLD_MOD(val, burst_size, shift+1, shift); -+ dispc_write_reg(dispc_reg_att[plane], val); -+ -+ enable_clocks(0); -+} -+ -+static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable) -+{ -+ u32 val; -+ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ val = dispc_read_reg(dispc_reg_att[plane]); -+ val = FLD_MOD(val, enable, 9, 9); -+ dispc_write_reg(dispc_reg_att[plane], val); -+} -+ -+void dispc_enable_replication(enum omap_plane plane, bool enable) -+{ -+ int bit; -+ -+ if (plane == OMAP_DSS_GFX) -+ bit = 5; -+ else -+ bit = 10; -+ -+ enable_clocks(1); -+ REG_FLD_MOD(dispc_reg_att[plane], enable, bit, bit); -+ enable_clocks(0); -+} -+ -+void dispc_set_lcd_size(u16 width, u16 height) -+{ -+ u32 val; -+ BUG_ON((width > (1 << 11)) || (height > (1 << 11))); -+ val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); -+ enable_clocks(1); -+ dispc_write_reg(DISPC_SIZE_LCD, val); -+ enable_clocks(0); -+} -+ -+void dispc_set_digit_size(u16 width, u16 height) -+{ -+ u32 val; -+ BUG_ON((width > (1 << 11)) || (height > (1 << 11))); -+ val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); -+ enable_clocks(1); -+ dispc_write_reg(DISPC_SIZE_DIG, val); -+ enable_clocks(0); -+} -+ -+u32 dispc_get_plane_fifo_size(enum omap_plane plane) -+{ -+ const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS, -+ DISPC_VID_FIFO_SIZE_STATUS(0), -+ DISPC_VID_FIFO_SIZE_STATUS(1) }; -+ u32 size; -+ -+ enable_clocks(1); -+ -+ if (cpu_is_omap24xx()) -+ size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 8, 0); -+ else if (cpu_is_omap34xx()) -+ size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 10, 0); -+ else -+ BUG(); -+ -+ if (cpu_is_omap34xx()) { -+ /* FIFOMERGE */ -+ if (REG_GET(DISPC_CONFIG, 14, 14)) -+ size *= 3; -+ } -+ -+ enable_clocks(0); -+ -+ return size; -+} -+ -+void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) -+{ -+ const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD, -+ DISPC_VID_FIFO_THRESHOLD(0), -+ DISPC_VID_FIFO_THRESHOLD(1) }; -+ u32 size; -+ -+ enable_clocks(1); -+ -+ size = dispc_get_plane_fifo_size(plane); -+ -+ BUG_ON(low > size || high > size); -+ -+ DSSDBG("fifo(%d) size %d, low/high old %u/%u, new %u/%u\n", -+ plane, size, -+ REG_GET(ftrs_reg[plane], 11, 0), -+ REG_GET(ftrs_reg[plane], 27, 16), -+ low, high); -+ -+ if (cpu_is_omap24xx()) -+ dispc_write_reg(ftrs_reg[plane], -+ FLD_VAL(high, 24, 16) | FLD_VAL(low, 8, 0)); -+ else -+ dispc_write_reg(ftrs_reg[plane], -+ FLD_VAL(high, 27, 16) | FLD_VAL(low, 11, 0)); -+ -+ enable_clocks(0); -+} -+ -+void dispc_enable_fifomerge(bool enable) -+{ -+ enable_clocks(1); -+ -+ DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); -+ REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14); -+ -+ enable_clocks(0); -+} -+ -+static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc) -+{ -+ u32 val; -+ const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0), -+ DISPC_VID_FIR(1) }; -+ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ if (cpu_is_omap24xx()) -+ val = FLD_VAL(vinc, 27, 16) | FLD_VAL(hinc, 11, 0); -+ else -+ val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0); -+ dispc_write_reg(fir_reg[plane-1], val); -+} -+ -+static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu) -+{ -+ u32 val; -+ const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0), -+ DISPC_VID_ACCU0(1) }; -+ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0); -+ dispc_write_reg(ac0_reg[plane-1], val); -+} -+ -+static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu) -+{ -+ u32 val; -+ const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0), -+ DISPC_VID_ACCU1(1) }; -+ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0); -+ dispc_write_reg(ac1_reg[plane-1], val); -+} -+ -+ -+static void _dispc_set_scaling(enum omap_plane plane, -+ u16 orig_width, u16 orig_height, -+ u16 out_width, u16 out_height, -+ bool ilace, bool five_taps, -+ bool fieldmode) -+{ -+ int fir_hinc; -+ int fir_vinc; -+ int hscaleup, vscaleup; -+ int accu0 = 0; -+ int accu1 = 0; -+ u32 l; -+ -+ BUG_ON(plane == OMAP_DSS_GFX); -+ -+ hscaleup = orig_width <= out_width; -+ vscaleup = orig_height <= out_height; -+ -+ _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps); -+ -+ if (!orig_width || orig_width == out_width) -+ fir_hinc = 0; -+ else -+ fir_hinc = 1024 * orig_width / out_width; -+ -+ if (!orig_height || orig_height == out_height) -+ fir_vinc = 0; -+ else -+ fir_vinc = 1024 * orig_height / out_height; -+ -+ _dispc_set_fir(plane, fir_hinc, fir_vinc); -+ -+ l = dispc_read_reg(dispc_reg_att[plane]); -+ l &= ~((0x0f << 5) | (0x3 << 21)); -+ -+ l |= fir_hinc ? (1 << 5) : 0; -+ l |= fir_vinc ? (1 << 6) : 0; -+ -+ l |= hscaleup ? 0 : (1 << 7); -+ l |= vscaleup ? 0 : (1 << 8); -+ -+ l |= five_taps ? (1 << 21) : 0; -+ l |= five_taps ? (1 << 22) : 0; -+ -+ dispc_write_reg(dispc_reg_att[plane], l); -+ -+ /* -+ * field 0 = even field = bottom field -+ * field 1 = odd field = top field -+ */ -+ if (ilace && !fieldmode) { -+ accu1 = 0; -+ accu0 = (fir_vinc / 2) & 0x3ff; -+ if (accu0 >= 1024/2) { -+ accu1 = 1024/2; -+ accu0 -= accu1; -+ } -+ } -+ -+ _dispc_set_vid_accu0(plane, 0, accu0); -+ _dispc_set_vid_accu1(plane, 0, accu1); -+} -+ -+static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation, -+ bool mirroring, enum omap_color_mode color_mode) -+{ -+ if (color_mode == OMAP_DSS_COLOR_YUV2 || -+ color_mode == OMAP_DSS_COLOR_UYVY) { -+ int vidrot = 0; -+ -+ if (mirroring) { -+ switch (rotation) { -+ case OMAP_DSS_ROT_0: -+ vidrot = 2; -+ break; -+ case OMAP_DSS_ROT_90: -+ vidrot = 1; -+ break; -+ case OMAP_DSS_ROT_180: -+ vidrot = 0; -+ break; -+ case OMAP_DSS_ROT_270: -+ vidrot = 3; -+ break; -+ } -+ } else { -+ switch (rotation) { -+ case OMAP_DSS_ROT_0: -+ vidrot = 0; -+ break; -+ case OMAP_DSS_ROT_90: -+ vidrot = 1; -+ break; -+ case OMAP_DSS_ROT_180: -+ vidrot = 2; -+ break; -+ case OMAP_DSS_ROT_270: -+ vidrot = 3; -+ break; -+ } -+ } -+ -+ REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12); -+ -+ if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270) -+ REG_FLD_MOD(dispc_reg_att[plane], 0x1, 18, 18); -+ else -+ REG_FLD_MOD(dispc_reg_att[plane], 0x0, 18, 18); -+ } else { -+ REG_FLD_MOD(dispc_reg_att[plane], 0, 13, 12); -+ REG_FLD_MOD(dispc_reg_att[plane], 0, 18, 18); -+ } -+} -+ -+static int color_mode_to_bpp(enum omap_color_mode color_mode) -+{ -+ switch (color_mode) { -+ case OMAP_DSS_COLOR_CLUT1: -+ return 1; -+ case OMAP_DSS_COLOR_CLUT2: -+ return 2; -+ case OMAP_DSS_COLOR_CLUT4: -+ return 4; -+ case OMAP_DSS_COLOR_CLUT8: -+ return 8; -+ case OMAP_DSS_COLOR_RGB12U: -+ case OMAP_DSS_COLOR_RGB16: -+ case OMAP_DSS_COLOR_ARGB16: -+ case OMAP_DSS_COLOR_YUV2: -+ case OMAP_DSS_COLOR_UYVY: -+ return 16; -+ case OMAP_DSS_COLOR_RGB24P: -+ return 24; -+ case OMAP_DSS_COLOR_RGB24U: -+ case OMAP_DSS_COLOR_ARGB32: -+ case OMAP_DSS_COLOR_RGBA32: -+ case OMAP_DSS_COLOR_RGBX32: -+ return 32; -+ default: -+ BUG(); -+ } -+} -+ -+static s32 calc_gfx_window_skip(void) -+{ -+ enum omap_channel channel; -+ enum omap_color_mode color_mode; -+ unsigned int x0, x1, y0, y1, x, y; -+ unsigned int gfxx, gfxy, gfxw, gfxh; -+ unsigned int vid1x, vid1y, vid1w, vid1h; -+ s32 pix_inc, row_inc, skip; -+ u16 ps; -+ -+ /* GFXENABLE */ -+ if (REG_GET(dispc_reg_att[OMAP_DSS_GFX], 0, 0) == 0) { -+ DSSDBG("gfx_window_skip: GFX not enabled\n"); -+ return 0; -+ } -+ -+ /* VIDENABLE */ -+ if (REG_GET(dispc_reg_att[OMAP_DSS_VIDEO1], 0, 0) == 0) { -+ DSSDBG("gfx_window_skip: VID1 not enabled\n"); -+ return 0; -+ } -+ -+ /* GFXCHANNELOUT and VIDCHANNELOUT */ -+ channel = REG_GET(dispc_reg_att[OMAP_DSS_GFX], 8, 8); -+ if (channel != REG_GET(dispc_reg_att[OMAP_DSS_VIDEO1], 16, 16)) { -+ DSSDBG("gfx_window_skip: GFX and VID1 on different channels\n"); -+ return 0; -+ } -+ -+ if (dispc_trans_key_enabled(channel)) { -+ DSSDBG("gfx_window_skip: transparency color key enabled\n"); -+ return 0; -+ } -+ -+ if (dispc_alpha_blending_enabled(channel)) { -+ DSSDBG("gfx_window_skip: alpha blender enabled\n"); -+ return 0; -+ } -+ -+ /* FIXME RGB12U, RGBX32, ARGB16, ARGB32, RGBA32 formats OK? */ -+ color_mode = _dispc_get_color_mode(OMAP_DSS_GFX); -+ switch (color_mode) { -+ case OMAP_DSS_COLOR_CLUT8: -+ case OMAP_DSS_COLOR_RGB16: -+ case OMAP_DSS_COLOR_YUV2: -+ case OMAP_DSS_COLOR_UYVY: -+ case OMAP_DSS_COLOR_RGB24P: -+ case OMAP_DSS_COLOR_RGB24U: -+ case OMAP_DSS_COLOR_RGB12U: -+ case OMAP_DSS_COLOR_RGBX32: -+ case OMAP_DSS_COLOR_ARGB16: -+ case OMAP_DSS_COLOR_ARGB32: -+ case OMAP_DSS_COLOR_RGBA32: -+ break; -+ default: -+ DSSDBG("gfx_window_skip: unsupported GFX format\n"); -+ return 0; -+ } -+ -+ /* FIXME RGB12U, RGBX32 formats OK? */ -+ switch (_dispc_get_color_mode(OMAP_DSS_VIDEO1)) { -+ case OMAP_DSS_COLOR_RGB16: -+ case OMAP_DSS_COLOR_YUV2: -+ case OMAP_DSS_COLOR_UYVY: -+ case OMAP_DSS_COLOR_RGB24P: -+ case OMAP_DSS_COLOR_RGB24U: -+ case OMAP_DSS_COLOR_RGB12U: -+ case OMAP_DSS_COLOR_RGBX32: -+ break; -+ default: -+ DSSDBG("gfx_window_skip: unsupported VID1 format\n"); -+ return 0; -+ } -+ -+ gfxx = REG_GET(DISPC_GFX_POSITION, 10, 0); -+ gfxy = REG_GET(DISPC_GFX_POSITION, 26, 16); -+ gfxw = REG_GET(DISPC_GFX_SIZE, 10, 0) + 1; -+ gfxh = REG_GET(DISPC_GFX_SIZE, 26, 16) + 1; -+ -+ vid1x = REG_GET(DISPC_VID_POSITION(0), 10, 0); -+ vid1y = REG_GET(DISPC_VID_POSITION(0), 26, 16); -+ vid1w = REG_GET(DISPC_VID_SIZE(0), 10, 0) + 1; -+ vid1h = REG_GET(DISPC_VID_SIZE(0), 26, 16) + 1; -+ -+ x0 = max(vid1x, gfxx); -+ y0 = max(vid1y, gfxy); -+ x1 = min(vid1x + vid1w, gfxx + gfxw); -+ y1 = min(vid1y + vid1h, gfxy + gfxh); -+ -+ if (x1 <= x0 || y1 <= y0) { -+ DSSDBG("gfx_window_skip: GFX and VID1 do not overlap\n"); -+ return 0; -+ } -+ -+ pix_inc = _dispc_get_pix_inc(OMAP_DSS_GFX); -+ row_inc = _dispc_get_row_inc(OMAP_DSS_GFX); -+ ps = color_mode_to_bpp(color_mode) / 8; -+ -+ x = x1 - x0; -+ y = y1 - y0; -+ -+ DSSDBG("gfx_window_skip: GFX w=%u, VID1 w=%u, " -+ "x=%u, y=%u, pix_inc=%u, row_inc=%u, ps=%u\n", -+ gfxw, vid1w, x, y, pix_inc, row_inc, ps); -+ -+ if (x == gfxw) -+ /* Skip full lines */ -+ skip = y * (x * (pix_inc - 1 + ps) + row_inc - 1); -+ else if (gfxx < vid1x && gfxx + gfxw > vid1x + vid1w) -+ /* Skip the middle of the line */ -+ skip = x * (pix_inc - 1 + ps) + 1; -+ else -+ /* Skip the beginning or the end of the line */ -+ skip = x * (pix_inc - 1 + ps); -+ -+ DSSDBG("gfx_window_skip: skipping %d bytes\n", skip); -+ -+ return skip; -+} -+ -+void dispc_set_overlay_optimization(void) -+{ -+ s32 skip = calc_gfx_window_skip(); -+ -+ REG_FLD_MOD(DISPC_CONTROL, skip ? 1 : 0, 12, 12); -+ dispc_write_reg(DISPC_GFX_WINDOW_SKIP, skip); -+} -+ -+static s32 pixinc(int pixels, u8 ps) -+{ -+ if (pixels == 1) -+ return 1; -+ else if (pixels > 1) -+ return 1 + (pixels - 1) * ps; -+ else if (pixels < 0) -+ return 1 - (-pixels + 1) * ps; -+ else -+ BUG(); -+} -+ -+static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, -+ u16 screen_width, -+ u16 width, u16 height, -+ enum omap_color_mode color_mode, bool fieldmode, -+ unsigned int field_offset, -+ unsigned *offset0, unsigned *offset1, -+ s32 *row_inc, s32 *pix_inc) -+{ -+ u8 ps; -+ -+ /* FIXME CLUT formats */ -+ switch (color_mode) { -+ case OMAP_DSS_COLOR_CLUT1: -+ case OMAP_DSS_COLOR_CLUT2: -+ case OMAP_DSS_COLOR_CLUT4: -+ case OMAP_DSS_COLOR_CLUT8: -+ BUG(); -+ return; -+ case OMAP_DSS_COLOR_YUV2: -+ case OMAP_DSS_COLOR_UYVY: -+ ps = 4; -+ break; -+ default: -+ ps = color_mode_to_bpp(color_mode) / 8; -+ break; -+ } -+ -+ DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width, -+ width, height); -+ -+ /* -+ * field 0 = even field = bottom field -+ * field 1 = odd field = top field -+ */ -+ switch (rotation + mirror * 4) { -+ case OMAP_DSS_ROT_0: -+ case OMAP_DSS_ROT_180: -+ /* -+ * If the pixel format is YUV or UYVY divide the width -+ * of the image by 2 for 0 and 180 degree rotation. -+ */ -+ if (color_mode == OMAP_DSS_COLOR_YUV2 || -+ color_mode == OMAP_DSS_COLOR_UYVY) -+ width = width >> 1; -+ case OMAP_DSS_ROT_90: -+ case OMAP_DSS_ROT_270: -+ *offset1 = 0; -+ if (field_offset) -+ *offset0 = field_offset * screen_width * ps; -+ else -+ *offset0 = 0; -+ -+ *row_inc = pixinc(1 + (screen_width - width) + -+ (fieldmode ? screen_width : 0), -+ ps); -+ *pix_inc = pixinc(1, ps); -+ break; -+ -+ case OMAP_DSS_ROT_0 + 4: -+ case OMAP_DSS_ROT_180 + 4: -+ /* If the pixel format is YUV or UYVY divide the width -+ * of the image by 2 for 0 degree and 180 degree -+ */ -+ if (color_mode == OMAP_DSS_COLOR_YUV2 || -+ color_mode == OMAP_DSS_COLOR_UYVY) -+ width = width >> 1; -+ case OMAP_DSS_ROT_90 + 4: -+ case OMAP_DSS_ROT_270 + 4: -+ *offset1 = 0; -+ if (field_offset) -+ *offset0 = field_offset * screen_width * ps; -+ else -+ *offset0 = 0; -+ *row_inc = pixinc(1 - (screen_width + width) - -+ (fieldmode ? screen_width : 0), -+ ps); -+ *pix_inc = pixinc(1, ps); -+ break; -+ -+ default: -+ BUG(); -+ } -+} -+ -+static void calc_dma_rotation_offset(u8 rotation, bool mirror, -+ u16 screen_width, -+ u16 width, u16 height, -+ enum omap_color_mode color_mode, bool fieldmode, -+ unsigned int field_offset, -+ unsigned *offset0, unsigned *offset1, -+ s32 *row_inc, s32 *pix_inc) -+{ -+ u8 ps; -+ u16 fbw, fbh; -+ -+ /* FIXME CLUT formats */ -+ switch (color_mode) { -+ case OMAP_DSS_COLOR_CLUT1: -+ case OMAP_DSS_COLOR_CLUT2: -+ case OMAP_DSS_COLOR_CLUT4: -+ case OMAP_DSS_COLOR_CLUT8: -+ BUG(); -+ return; -+ default: -+ ps = color_mode_to_bpp(color_mode) / 8; -+ break; -+ } -+ -+ DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width, -+ width, height); -+ -+ /* width & height are overlay sizes, convert to fb sizes */ -+ -+ if (rotation == OMAP_DSS_ROT_0 || rotation == OMAP_DSS_ROT_180) { -+ fbw = width; -+ fbh = height; -+ } else { -+ fbw = height; -+ fbh = width; -+ } -+ -+ /* -+ * field 0 = even field = bottom field -+ * field 1 = odd field = top field -+ */ -+ switch (rotation + mirror * 4) { -+ case OMAP_DSS_ROT_0: -+ *offset1 = 0; -+ if (field_offset) -+ *offset0 = *offset1 + field_offset * screen_width * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(1 + (screen_width - fbw) + -+ (fieldmode ? screen_width : 0), -+ ps); -+ *pix_inc = pixinc(1, ps); -+ break; -+ case OMAP_DSS_ROT_90: -+ *offset1 = screen_width * (fbh - 1) * ps; -+ if (field_offset) -+ *offset0 = *offset1 + field_offset * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(screen_width * (fbh - 1) + 1 + -+ (fieldmode ? 1 : 0), ps); -+ *pix_inc = pixinc(-screen_width, ps); -+ break; -+ case OMAP_DSS_ROT_180: -+ *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps; -+ if (field_offset) -+ *offset0 = *offset1 - field_offset * screen_width * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(-1 - -+ (screen_width - fbw) - -+ (fieldmode ? screen_width : 0), -+ ps); -+ *pix_inc = pixinc(-1, ps); -+ break; -+ case OMAP_DSS_ROT_270: -+ *offset1 = (fbw - 1) * ps; -+ if (field_offset) -+ *offset0 = *offset1 - field_offset * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(-screen_width * (fbh - 1) - 1 - -+ (fieldmode ? 1 : 0), ps); -+ *pix_inc = pixinc(screen_width, ps); -+ break; -+ -+ /* mirroring */ -+ case OMAP_DSS_ROT_0 + 4: -+ *offset1 = (fbw - 1) * ps; -+ if (field_offset) -+ *offset0 = *offset1 + field_offset * screen_width * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(screen_width * 2 - 1 + -+ (fieldmode ? screen_width : 0), -+ ps); -+ *pix_inc = pixinc(-1, ps); -+ break; -+ -+ case OMAP_DSS_ROT_90 + 4: -+ *offset1 = 0; -+ if (field_offset) -+ *offset0 = *offset1 + field_offset * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(-screen_width * (fbh - 1) + 1 + -+ (fieldmode ? 1 : 0), -+ ps); -+ *pix_inc = pixinc(screen_width, ps); -+ break; -+ -+ case OMAP_DSS_ROT_180 + 4: -+ *offset1 = screen_width * (fbh - 1) * ps; -+ if (field_offset) -+ *offset0 = *offset1 - field_offset * screen_width * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(1 - screen_width * 2 - -+ (fieldmode ? screen_width : 0), -+ ps); -+ *pix_inc = pixinc(1, ps); -+ break; -+ -+ case OMAP_DSS_ROT_270 + 4: -+ *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps; -+ if (field_offset) -+ *offset0 = *offset1 - field_offset * ps; -+ else -+ *offset0 = *offset1; -+ *row_inc = pixinc(screen_width * (fbh - 1) - 1 - -+ (fieldmode ? 1 : 0), -+ ps); -+ *pix_inc = pixinc(-screen_width, ps); -+ break; -+ -+ default: -+ BUG(); -+ } -+} -+ -+static struct omap_overlay_manager * -+manager_for_channel(enum omap_channel channel_out) -+{ -+ int i; -+ -+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { -+ struct omap_overlay_manager *mgr; -+ mgr = omap_dss_get_overlay_manager(i); -+ -+ if (mgr->id == channel_out) -+ return mgr; -+ } -+ -+ return NULL; -+} -+ -+static u32 get_pixel_clock(enum omap_channel channel_out) -+{ -+ struct omap_video_timings t; -+ struct omap_overlay_manager *mgr = manager_for_channel(channel_out); -+ -+ if (!mgr || !mgr->display || !mgr->display->get_timings) -+ return 0; -+ -+ mgr->display->get_timings(mgr->display, &t); -+ -+ DSSDBG("PCLK = %u\n", t.pixel_clock); -+ -+ return t.pixel_clock; -+} -+ -+static u32 get_display_width(enum omap_channel channel_out) -+{ -+ struct omap_video_timings t; -+ struct omap_overlay_manager *mgr = manager_for_channel(channel_out); -+ -+ if (!mgr || !mgr->display || !mgr->display->get_timings) -+ return 0; -+ -+ mgr->display->get_timings(mgr->display, &t); -+ -+ DSSDBG("PPL = %u\n", t.x_res); -+ -+ return t.x_res; -+} -+ -+static void dispc_get_lcd_divisor(int *lck_div, int *pck_div); -+ -+static int check_hblank_len(u16 width, u16 height, -+ u16 out_width, u16 out_height, -+ enum omap_channel channel_out) -+{ -+ static const u8 limits[3] = { 8, 10, 20 }; -+ int i = 0; -+ int lcd, pcd; -+ struct omap_video_timings t; -+ struct omap_overlay_manager *mgr = manager_for_channel(channel_out); -+ -+ if (!mgr || !mgr->display || !mgr->display->get_timings) -+ return -ENODEV; -+ -+ mgr->display->get_timings(mgr->display, &t); -+ -+ enable_clocks(1); -+ -+ dispc_get_lcd_divisor(&lcd, &pcd); -+ -+ enable_clocks(0); -+ -+ if (out_height < height) -+ i++; -+ if (out_width < width) -+ i++; -+ -+ DSSDBG("(hbp + hsw + hfp) * pcd = %u (limit = %u)\n", -+ (t.hbp + t.hsw + t.hfp) * pcd, limits[i]); -+ -+ if ((t.hbp + t.hsw + t.hfp) * pcd <= limits[i]) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static unsigned long calc_fclk_five_taps(u16 width, u16 height, -+ u16 out_width, u16 out_height, enum omap_color_mode color_mode, -+ enum omap_channel channel_out) -+{ -+ u32 fclk = 0; -+ u64 tmp, pclk = get_pixel_clock(channel_out); -+ -+ if (!out_height || !out_height) -+ return 0; -+ -+ if (height > out_height) { -+ unsigned int ppl = get_display_width(channel_out); -+ if (!ppl) -+ return 0; -+ -+ tmp = pclk * height * out_width; -+ do_div(tmp, 2 * out_height * ppl); -+ fclk = tmp; -+ -+ if (height > 2 * out_height && ppl != out_width) { -+ tmp = pclk * (height - 2 * out_height) * out_width; -+ do_div(tmp, 2 * out_height * (ppl - out_width)); -+ fclk = max(fclk, (u32) tmp); -+ } -+ } -+ -+ if (width > out_width) { -+ tmp = pclk * width; -+ do_div(tmp, out_width); -+ fclk = max(fclk, (u32) tmp); -+ -+ if (color_mode == OMAP_DSS_COLOR_RGB24U) -+ fclk <<= 1; -+ } -+ -+ return fclk; -+} -+ -+static unsigned long calc_fclk(u16 width, u16 height, -+ u16 out_width, u16 out_height, -+ enum omap_channel channel_out) -+{ -+ unsigned int hf, vf; -+ -+ /* -+ * FIXME how to determine the 'A' factor -+ * for the no downscaling case ? -+ */ -+ -+ if (width > 3 * out_width) -+ hf = 4; -+ else if (width > 2 * out_width) -+ hf = 3; -+ else if (width > out_width) -+ hf = 2; -+ else -+ hf = 1; -+ -+ if (height > out_height) -+ vf = 2; -+ else -+ vf = 1; -+ -+ return get_pixel_clock(channel_out) * vf * hf; -+} -+ -+static int _dispc_setup_plane(enum omap_plane plane, -+ enum omap_channel channel_out, -+ u32 paddr, u16 screen_width, -+ u16 pos_x, u16 pos_y, -+ u16 width, u16 height, -+ u16 out_width, u16 out_height, -+ enum omap_color_mode color_mode, -+ bool ilace, -+ enum omap_dss_rotation_type rotation_type, -+ u8 rotation, int mirror, -+ u8 global_alpha) -+{ -+ const int maxdownscale = cpu_is_omap34xx() ? 4 : 2; -+ bool five_taps = 0; -+ bool fieldmode = 0; -+ int cconv = 0; -+ unsigned offset0, offset1; -+ s32 row_inc; -+ s32 pix_inc; -+ u16 frame_height = height; -+ unsigned int field_offset = 0; -+ -+ if (paddr == 0) -+ return -EINVAL; -+ -+ if (ilace && height == out_height) -+ fieldmode = 1; -+ -+ if (ilace) { -+ if (fieldmode) -+ height /= 2; -+ pos_y /= 2; -+ out_height /= 2; -+ -+ DSSDBG("adjusting for ilace: height %d, pos_y %d, " -+ "out_height %d\n", -+ height, pos_y, out_height); -+ } -+ -+ if (plane == OMAP_DSS_GFX) { -+ if (width != out_width || height != out_height) -+ return -EINVAL; -+ -+ switch (color_mode) { -+ case OMAP_DSS_COLOR_ARGB16: -+ case OMAP_DSS_COLOR_ARGB32: -+ case OMAP_DSS_COLOR_RGBA32: -+ case OMAP_DSS_COLOR_RGBX32: -+ if (cpu_is_omap24xx()) -+ return -EINVAL; -+ /* fall through */ -+ case OMAP_DSS_COLOR_RGB12U: -+ case OMAP_DSS_COLOR_RGB16: -+ case OMAP_DSS_COLOR_RGB24P: -+ case OMAP_DSS_COLOR_RGB24U: -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ } else { -+ /* video plane */ -+ -+ unsigned long fclk = 0; -+ -+ if (out_width < width / maxdownscale || -+ out_width > width * 8) -+ return -EINVAL; -+ -+ if (out_height < height / maxdownscale || -+ out_height > height * 8) -+ return -EINVAL; -+ -+ switch (color_mode) { -+ case OMAP_DSS_COLOR_RGBX32: -+ case OMAP_DSS_COLOR_RGB12U: -+ if (cpu_is_omap24xx()) -+ return -EINVAL; -+ /* fall through */ -+ case OMAP_DSS_COLOR_RGB16: -+ case OMAP_DSS_COLOR_RGB24P: -+ case OMAP_DSS_COLOR_RGB24U: -+ break; -+ -+ case OMAP_DSS_COLOR_ARGB16: -+ case OMAP_DSS_COLOR_ARGB32: -+ case OMAP_DSS_COLOR_RGBA32: -+ if (cpu_is_omap24xx()) -+ return -EINVAL; -+ if (plane == OMAP_DSS_VIDEO1) -+ return -EINVAL; -+ break; -+ -+ case OMAP_DSS_COLOR_YUV2: -+ case OMAP_DSS_COLOR_UYVY: -+ cconv = 1; -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ -+ if (check_hblank_len(width, height, out_width, out_height, -+ channel_out)) -+ return -EINVAL; -+ -+ /* Must use 5-tap filter? */ -+ five_taps = height > out_height * 2; -+ -+ if (!five_taps) { -+ fclk = calc_fclk(width, height, -+ out_width, out_height, channel_out); -+ -+ /* Try 5-tap filter if 3-tap fclk is too high */ -+ if (cpu_is_omap34xx() && height > out_height && -+ fclk > dispc_fclk_rate()) -+ five_taps = true; -+ } -+ -+ if (width > (2048 >> five_taps)) -+ return -EINVAL; -+ -+ if (five_taps) -+ fclk = calc_fclk_five_taps(width, height, -+ out_width, out_height, -+ color_mode, channel_out); -+ -+ DSSDBG("required fclk rate = %lu Hz\n", fclk); -+ DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate()); -+ -+ if (fclk > dispc_fclk_rate()) -+ return -EINVAL; -+ } -+ -+ if (ilace && !fieldmode) { -+ /* -+ * when downscaling the bottom field may have to start several -+ * source lines below the top field. Unfortunately ACCUI -+ * registers will only hold the fractional part of the offset -+ * so the integer part must be added to the base address of the -+ * bottom field. -+ */ -+ if (!height || height == out_height) -+ field_offset = 0; -+ else -+ field_offset = height / out_height / 2; -+ } -+ -+ /* Fields are independent but interleaved in memory. */ -+ if (fieldmode) -+ field_offset = 1; -+ -+ if (rotation_type == OMAP_DSS_ROT_DMA) -+ calc_dma_rotation_offset(rotation, mirror, -+ screen_width, width, frame_height, color_mode, -+ fieldmode, field_offset, -+ &offset0, &offset1, &row_inc, &pix_inc); -+ else -+ calc_vrfb_rotation_offset(rotation, mirror, -+ screen_width, width, frame_height, color_mode, -+ fieldmode, field_offset, -+ &offset0, &offset1, &row_inc, &pix_inc); -+ -+ DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n", -+ offset0, offset1, row_inc, pix_inc); -+ -+ _dispc_set_channel_out(plane, channel_out); -+ _dispc_set_color_mode(plane, color_mode); -+ -+ _dispc_set_plane_ba0(plane, paddr + offset0); -+ _dispc_set_plane_ba1(plane, paddr + offset1); -+ -+ _dispc_set_row_inc(plane, row_inc); -+ _dispc_set_pix_inc(plane, pix_inc); -+ -+ DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, width, height, -+ out_width, out_height); -+ -+ _dispc_set_plane_pos(plane, pos_x, pos_y); -+ -+ _dispc_set_pic_size(plane, width, height); -+ -+ if (plane != OMAP_DSS_GFX) { -+ _dispc_set_scaling(plane, width, height, -+ out_width, out_height, -+ ilace, five_taps, fieldmode); -+ _dispc_set_vid_size(plane, out_width, out_height); -+ _dispc_set_vid_color_conv(plane, cconv); -+ } -+ -+ _dispc_set_rotation_attrs(plane, rotation, mirror, color_mode); -+ -+ if (plane != OMAP_DSS_VIDEO1) -+ _dispc_setup_global_alpha(plane, global_alpha); -+ -+ return 0; -+} -+ -+static void _dispc_enable_plane(enum omap_plane plane, bool enable) -+{ -+ REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0); -+} -+ -+static void dispc_disable_isr(void *data, u32 mask) -+{ -+ struct completion *compl = data; -+ complete(compl); -+} -+ -+static void _enable_lcd_out(bool enable) -+{ -+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0); -+} -+ -+void dispc_enable_lcd_out(bool enable) -+{ -+ struct completion frame_done_completion; -+ bool is_on; -+ int r; -+ -+ enable_clocks(1); -+ -+ /* When we disable LCD output, we need to wait until frame is done. -+ * Otherwise the DSS is still working, and turning off the clocks -+ * prevents DSS from going to OFF mode */ -+ is_on = REG_GET(DISPC_CONTROL, 0, 0); -+ -+ if (!enable && is_on) { -+ init_completion(&frame_done_completion); -+ -+ r = omap_dispc_register_isr(dispc_disable_isr, -+ &frame_done_completion, -+ DISPC_IRQ_FRAMEDONE); -+ -+ if (r) -+ DSSERR("failed to register FRAMEDONE isr\n"); -+ } -+ -+ _enable_lcd_out(enable); -+ -+ if (!enable && is_on) { -+ if (!wait_for_completion_timeout(&frame_done_completion, -+ msecs_to_jiffies(100))) -+ DSSERR("timeout waiting for FRAME DONE\n"); -+ -+ r = omap_dispc_unregister_isr(dispc_disable_isr, -+ &frame_done_completion, -+ DISPC_IRQ_FRAMEDONE); -+ -+ if (r) -+ DSSERR("failed to unregister FRAMEDONE isr\n"); -+ } -+ -+ enable_clocks(0); -+} -+ -+static void _enable_digit_out(bool enable) -+{ -+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1); -+} -+ -+void dispc_enable_digit_errors(int enable) -+{ -+ unsigned long flags; -+ -+ enable_clocks(1); -+ -+ spin_lock_irqsave(&dispc.irq_lock, flags); -+ -+ if (!enable) { -+ /* When we enable digit output, we'll get an extra digit -+ * sync lost interrupt, that we need to ignore */ -+ dispc.irq_error_mask &= ~DISPC_IRQ_SYNC_LOST_DIGIT; -+ } else { -+ dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR; -+ dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT); -+ } -+ -+ _omap_dispc_set_irqs(); -+ spin_unlock_irqrestore(&dispc.irq_lock, flags); -+ -+ enable_clocks(0); -+} -+ -+void dispc_enable_digit_out(bool enable) -+{ -+ struct completion frame_done_completion; -+ int r; -+ -+ enable_clocks(1); -+ -+ if (REG_GET(DISPC_CONTROL, 1, 1) == enable) { -+ enable_clocks(0); -+ return; -+ } -+ -+ /* When we disable digit output, we need to wait until fields are done. -+ * Otherwise the DSS is still working, and turning off the clocks -+ * prevents DSS from going to OFF mode. And when enabling, we need to -+ * wait for the extra sync losts */ -+ init_completion(&frame_done_completion); -+ -+ r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion, -+ DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD); -+ if (r) -+ DSSERR("failed to register EVSYNC isr\n"); -+ -+ _enable_digit_out(enable); -+ -+ /* XXX I understand from TRM that we should only wait for the -+ * current field to complete. But it seems we have to wait -+ * for both fields */ -+ if (!wait_for_completion_timeout(&frame_done_completion, -+ msecs_to_jiffies(100))) -+ DSSERR("timeout waiting for EVSYNC\n"); -+ -+ if (!wait_for_completion_timeout(&frame_done_completion, -+ msecs_to_jiffies(100))) -+ DSSERR("timeout waiting for EVSYNC\n"); -+ -+ r = omap_dispc_unregister_isr(dispc_disable_isr, -+ &frame_done_completion, -+ DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD); -+ if (r) -+ DSSERR("failed to unregister EVSYNC isr\n"); -+ -+ enable_clocks(0); -+} -+ -+void dispc_lcd_enable_signal_polarity(bool act_high) -+{ -+ enable_clocks(1); -+ REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29); -+ enable_clocks(0); -+} -+ -+void dispc_lcd_enable_signal(bool enable) -+{ -+ enable_clocks(1); -+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28); -+ enable_clocks(0); -+} -+ -+void dispc_pck_free_enable(bool enable) -+{ -+ enable_clocks(1); -+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27); -+ enable_clocks(0); -+} -+ -+void dispc_enable_fifohandcheck(bool enable) -+{ -+ enable_clocks(1); -+ REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); -+ enable_clocks(0); -+} -+ -+ -+void dispc_set_lcd_display_type(enum omap_lcd_display_type type) -+{ -+ int mode; -+ -+ switch (type) { -+ case OMAP_DSS_LCD_DISPLAY_STN: -+ mode = 0; -+ break; -+ -+ case OMAP_DSS_LCD_DISPLAY_TFT: -+ mode = 1; -+ break; -+ -+ default: -+ BUG(); -+ return; -+ } -+ -+ enable_clocks(1); -+ REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); -+ enable_clocks(0); -+} -+ -+void dispc_set_loadmode(enum omap_dss_load_mode mode) -+{ -+ enable_clocks(1); -+ REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1); -+ enable_clocks(0); -+} -+ -+ -+void dispc_set_default_color(enum omap_channel channel, u32 color) -+{ -+ const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0, -+ DISPC_DEFAULT_COLOR1 }; -+ -+ enable_clocks(1); -+ dispc_write_reg(def_reg[channel], color); -+ enable_clocks(0); -+} -+ -+u32 dispc_get_default_color(enum omap_channel channel) -+{ -+ const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0, -+ DISPC_DEFAULT_COLOR1 }; -+ u32 l; -+ -+ BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT && -+ channel != OMAP_DSS_CHANNEL_LCD); -+ -+ enable_clocks(1); -+ l = dispc_read_reg(def_reg[channel]); -+ enable_clocks(0); -+ -+ return l; -+} -+ -+void dispc_set_trans_key(enum omap_channel ch, -+ enum omap_dss_color_key_type type, -+ u32 trans_key) -+{ -+ const struct dispc_reg tr_reg[] = { -+ DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 }; -+ -+ enable_clocks(1); -+ if (ch == OMAP_DSS_CHANNEL_LCD) -+ REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); -+ else /* OMAP_DSS_CHANNEL_DIGIT */ -+ REG_FLD_MOD(DISPC_CONFIG, type, 13, 13); -+ -+ dispc_write_reg(tr_reg[ch], trans_key); -+ enable_clocks(0); -+} -+ -+void dispc_get_trans_key(enum omap_channel ch, -+ enum omap_dss_color_key_type *type, -+ u32 *trans_key) -+{ -+ const struct dispc_reg tr_reg[] = { -+ DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 }; -+ -+ enable_clocks(1); -+ if (type) { -+ if (ch == OMAP_DSS_CHANNEL_LCD) -+ *type = REG_GET(DISPC_CONFIG, 11, 11); -+ else if (ch == OMAP_DSS_CHANNEL_DIGIT) -+ *type = REG_GET(DISPC_CONFIG, 13, 13); -+ else -+ BUG(); -+ } -+ -+ if (trans_key) -+ *trans_key = dispc_read_reg(tr_reg[ch]); -+ enable_clocks(0); -+} -+ -+void dispc_enable_trans_key(enum omap_channel ch, bool enable) -+{ -+ enable_clocks(1); -+ if (ch == OMAP_DSS_CHANNEL_LCD) -+ REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); -+ else /* OMAP_DSS_CHANNEL_DIGIT */ -+ REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); -+ dispc_set_overlay_optimization(); -+ dispc_go(ch); -+ enable_clocks(0); -+} -+void dispc_enable_alpha_blending(enum omap_channel ch, bool enable) -+{ -+ if (cpu_is_omap24xx()) -+ return; -+ -+ enable_clocks(1); -+ if (ch == OMAP_DSS_CHANNEL_LCD) -+ REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18); -+ else /* OMAP_DSS_CHANNEL_DIGIT */ -+ REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19); -+ dispc_set_overlay_optimization(); -+ dispc_go(ch); -+ enable_clocks(0); -+} -+bool dispc_alpha_blending_enabled(enum omap_channel ch) -+{ -+ bool enabled; -+ -+ if (cpu_is_omap24xx()) -+ return false; -+ -+ enable_clocks(1); -+ if (ch == OMAP_DSS_CHANNEL_LCD) -+ enabled = REG_GET(DISPC_CONFIG, 18, 18); -+ else if (ch == OMAP_DSS_CHANNEL_DIGIT) -+ enabled = REG_GET(DISPC_CONFIG, 18, 18); -+ else -+ BUG(); -+ enable_clocks(0); -+ -+ return enabled; -+ -+} -+ -+ -+bool dispc_trans_key_enabled(enum omap_channel ch) -+{ -+ bool enabled; -+ -+ enable_clocks(1); -+ if (ch == OMAP_DSS_CHANNEL_LCD) -+ enabled = REG_GET(DISPC_CONFIG, 10, 10); -+ else if (ch == OMAP_DSS_CHANNEL_DIGIT) -+ enabled = REG_GET(DISPC_CONFIG, 12, 12); -+ else BUG(); -+ enable_clocks(0); -+ -+ return enabled; -+} -+ -+ -+void dispc_set_tft_data_lines(u8 data_lines) -+{ -+ int code; -+ -+ switch (data_lines) { -+ case 12: -+ code = 0; -+ break; -+ case 16: -+ code = 1; -+ break; -+ case 18: -+ code = 2; -+ break; -+ case 24: -+ code = 3; -+ break; -+ default: -+ BUG(); -+ return; -+ } -+ -+ enable_clocks(1); -+ REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); -+ enable_clocks(0); -+} -+ -+void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode) -+{ -+ u32 l; -+ int stallmode; -+ int gpout0 = 1; -+ int gpout1; -+ -+ switch (mode) { -+ case OMAP_DSS_PARALLELMODE_BYPASS: -+ stallmode = 0; -+ gpout1 = 1; -+ break; -+ -+ case OMAP_DSS_PARALLELMODE_RFBI: -+ stallmode = 1; -+ gpout1 = 0; -+ break; -+ -+ case OMAP_DSS_PARALLELMODE_DSI: -+ stallmode = 1; -+ gpout1 = 1; -+ break; -+ -+ default: -+ BUG(); -+ return; -+ } -+ -+ enable_clocks(1); -+ -+ l = dispc_read_reg(DISPC_CONTROL); -+ -+ l = FLD_MOD(l, stallmode, 11, 11); -+ l = FLD_MOD(l, gpout0, 15, 15); -+ l = FLD_MOD(l, gpout1, 16, 16); -+ -+ dispc_write_reg(DISPC_CONTROL, l); -+ -+ enable_clocks(0); -+} -+ -+static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp, -+ int vsw, int vfp, int vbp) -+{ -+ u32 timing_h, timing_v; -+ -+ if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) { -+ BUG_ON(hsw < 1 || hsw > 64); -+ BUG_ON(hfp < 1 || hfp > 256); -+ BUG_ON(hbp < 1 || hbp > 256); -+ -+ BUG_ON(vsw < 1 || vsw > 64); -+ BUG_ON(vfp < 0 || vfp > 255); -+ BUG_ON(vbp < 0 || vbp > 255); -+ -+ timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) | -+ FLD_VAL(hbp-1, 27, 20); -+ -+ timing_v = FLD_VAL(vsw-1, 5, 0) | FLD_VAL(vfp, 15, 8) | -+ FLD_VAL(vbp, 27, 20); -+ } else { -+ BUG_ON(hsw < 1 || hsw > 256); -+ BUG_ON(hfp < 1 || hfp > 4096); -+ BUG_ON(hbp < 1 || hbp > 4096); -+ -+ BUG_ON(vsw < 1 || vsw > 256); -+ BUG_ON(vfp < 0 || vfp > 4095); -+ BUG_ON(vbp < 0 || vbp > 4095); -+ -+ timing_h = FLD_VAL(hsw-1, 7, 0) | FLD_VAL(hfp-1, 19, 8) | -+ FLD_VAL(hbp-1, 31, 20); -+ -+ timing_v = FLD_VAL(vsw-1, 7, 0) | FLD_VAL(vfp, 19, 8) | -+ FLD_VAL(vbp, 31, 20); -+ } -+ -+ enable_clocks(1); -+ dispc_write_reg(DISPC_TIMING_H, timing_h); -+ dispc_write_reg(DISPC_TIMING_V, timing_v); -+ enable_clocks(0); -+} -+ -+/* change name to mode? */ -+void dispc_set_lcd_timings(struct omap_video_timings *timings) -+{ -+ unsigned xtot, ytot; -+ unsigned long ht, vt; -+ -+ _dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp, -+ timings->vsw, timings->vfp, timings->vbp); -+ -+ dispc_set_lcd_size(timings->x_res, timings->y_res); -+ -+ xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp; -+ ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp; -+ -+ ht = (timings->pixel_clock * 1000) / xtot; -+ vt = (timings->pixel_clock * 1000) / xtot / ytot; -+ -+ DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res); -+ DSSDBG("pck %u\n", timings->pixel_clock); -+ DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", -+ timings->hsw, timings->hfp, timings->hbp, -+ timings->vsw, timings->vfp, timings->vbp); -+ -+ DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); -+} -+ -+void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div) -+{ -+ BUG_ON(lck_div < 1); -+ BUG_ON(pck_div < 2); -+ -+ enable_clocks(1); -+ dispc_write_reg(DISPC_DIVISOR, -+ FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0)); -+ enable_clocks(0); -+} -+ -+static void dispc_get_lcd_divisor(int *lck_div, int *pck_div) -+{ -+ u32 l; -+ l = dispc_read_reg(DISPC_DIVISOR); -+ *lck_div = FLD_GET(l, 23, 16); -+ *pck_div = FLD_GET(l, 7, 0); -+} -+ -+unsigned long dispc_fclk_rate(void) -+{ -+ unsigned long r = 0; -+ -+ if (dss_get_dispc_clk_source() == 0) -+ r = dss_clk_get_rate(DSS_CLK_FCK1); -+ else -+#ifdef CONFIG_OMAP2_DSS_DSI -+ r = dsi_get_dsi1_pll_rate(); -+#else -+ BUG(); -+#endif -+ return r; -+} -+ -+unsigned long dispc_lclk_rate(void) -+{ -+ int lcd; -+ unsigned long r; -+ u32 l; -+ -+ l = dispc_read_reg(DISPC_DIVISOR); -+ -+ lcd = FLD_GET(l, 23, 16); -+ -+ r = dispc_fclk_rate(); -+ -+ return r / lcd; -+} -+ -+unsigned long dispc_pclk_rate(void) -+{ -+ int lcd, pcd; -+ unsigned long r; -+ u32 l; -+ -+ l = dispc_read_reg(DISPC_DIVISOR); -+ -+ lcd = FLD_GET(l, 23, 16); -+ pcd = FLD_GET(l, 7, 0); -+ -+ r = dispc_fclk_rate(); -+ -+ return r / lcd / pcd; -+} -+ -+void dispc_dump_clocks(struct seq_file *s) -+{ -+ int lcd, pcd; -+ -+ enable_clocks(1); -+ -+ dispc_get_lcd_divisor(&lcd, &pcd); -+ -+ seq_printf(s, "- dispc -\n"); -+ -+ seq_printf(s, "dispc fclk source = %s\n", -+ dss_get_dispc_clk_source() == 0 ? -+ "dss1_alwon_fclk" : "dsi1_pll_fclk"); -+ -+ seq_printf(s, "pixel clk = %lu / %d / %d = %lu\n", -+ dispc_fclk_rate(), -+ lcd, pcd, -+ dispc_pclk_rate()); -+ -+ enable_clocks(0); -+} -+ -+void dispc_dump_regs(struct seq_file *s) -+{ -+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r)) -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ DUMPREG(DISPC_REVISION); -+ DUMPREG(DISPC_SYSCONFIG); -+ DUMPREG(DISPC_SYSSTATUS); -+ DUMPREG(DISPC_IRQSTATUS); -+ DUMPREG(DISPC_IRQENABLE); -+ DUMPREG(DISPC_CONTROL); -+ DUMPREG(DISPC_CONFIG); -+ DUMPREG(DISPC_CAPABLE); -+ DUMPREG(DISPC_DEFAULT_COLOR0); -+ DUMPREG(DISPC_DEFAULT_COLOR1); -+ DUMPREG(DISPC_TRANS_COLOR0); -+ DUMPREG(DISPC_TRANS_COLOR1); -+ DUMPREG(DISPC_LINE_STATUS); -+ DUMPREG(DISPC_LINE_NUMBER); -+ DUMPREG(DISPC_TIMING_H); -+ DUMPREG(DISPC_TIMING_V); -+ DUMPREG(DISPC_POL_FREQ); -+ DUMPREG(DISPC_DIVISOR); -+ DUMPREG(DISPC_GLOBAL_ALPHA); -+ DUMPREG(DISPC_SIZE_DIG); -+ DUMPREG(DISPC_SIZE_LCD); -+ -+ DUMPREG(DISPC_GFX_BA0); -+ DUMPREG(DISPC_GFX_BA1); -+ DUMPREG(DISPC_GFX_POSITION); -+ DUMPREG(DISPC_GFX_SIZE); -+ DUMPREG(DISPC_GFX_ATTRIBUTES); -+ DUMPREG(DISPC_GFX_FIFO_THRESHOLD); -+ DUMPREG(DISPC_GFX_FIFO_SIZE_STATUS); -+ DUMPREG(DISPC_GFX_ROW_INC); -+ DUMPREG(DISPC_GFX_PIXEL_INC); -+ DUMPREG(DISPC_GFX_WINDOW_SKIP); -+ DUMPREG(DISPC_GFX_TABLE_BA); -+ -+ DUMPREG(DISPC_DATA_CYCLE1); -+ DUMPREG(DISPC_DATA_CYCLE2); -+ DUMPREG(DISPC_DATA_CYCLE3); -+ -+ DUMPREG(DISPC_CPR_COEF_R); -+ DUMPREG(DISPC_CPR_COEF_G); -+ DUMPREG(DISPC_CPR_COEF_B); -+ -+ DUMPREG(DISPC_GFX_PRELOAD); -+ -+ DUMPREG(DISPC_VID_BA0(0)); -+ DUMPREG(DISPC_VID_BA1(0)); -+ DUMPREG(DISPC_VID_POSITION(0)); -+ DUMPREG(DISPC_VID_SIZE(0)); -+ DUMPREG(DISPC_VID_ATTRIBUTES(0)); -+ DUMPREG(DISPC_VID_FIFO_THRESHOLD(0)); -+ DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(0)); -+ DUMPREG(DISPC_VID_ROW_INC(0)); -+ DUMPREG(DISPC_VID_PIXEL_INC(0)); -+ DUMPREG(DISPC_VID_FIR(0)); -+ DUMPREG(DISPC_VID_PICTURE_SIZE(0)); -+ DUMPREG(DISPC_VID_ACCU0(0)); -+ DUMPREG(DISPC_VID_ACCU1(0)); -+ -+ DUMPREG(DISPC_VID_BA0(1)); -+ DUMPREG(DISPC_VID_BA1(1)); -+ DUMPREG(DISPC_VID_POSITION(1)); -+ DUMPREG(DISPC_VID_SIZE(1)); -+ DUMPREG(DISPC_VID_ATTRIBUTES(1)); -+ DUMPREG(DISPC_VID_FIFO_THRESHOLD(1)); -+ DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(1)); -+ DUMPREG(DISPC_VID_ROW_INC(1)); -+ DUMPREG(DISPC_VID_PIXEL_INC(1)); -+ DUMPREG(DISPC_VID_FIR(1)); -+ DUMPREG(DISPC_VID_PICTURE_SIZE(1)); -+ DUMPREG(DISPC_VID_ACCU0(1)); -+ DUMPREG(DISPC_VID_ACCU1(1)); -+ -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 0)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 1)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 2)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 3)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 5)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 6)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 7)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 0)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 1)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 2)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 3)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 5)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 6)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 7)); -+ DUMPREG(DISPC_VID_CONV_COEF(0, 0)); -+ DUMPREG(DISPC_VID_CONV_COEF(0, 1)); -+ DUMPREG(DISPC_VID_CONV_COEF(0, 2)); -+ DUMPREG(DISPC_VID_CONV_COEF(0, 3)); -+ DUMPREG(DISPC_VID_CONV_COEF(0, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 0)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 1)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 2)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 3)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 5)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 6)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 7)); -+ -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 0)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 1)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 2)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 3)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 5)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 6)); -+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 7)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 0)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 1)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 2)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 3)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 5)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 6)); -+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 7)); -+ DUMPREG(DISPC_VID_CONV_COEF(1, 0)); -+ DUMPREG(DISPC_VID_CONV_COEF(1, 1)); -+ DUMPREG(DISPC_VID_CONV_COEF(1, 2)); -+ DUMPREG(DISPC_VID_CONV_COEF(1, 3)); -+ DUMPREG(DISPC_VID_CONV_COEF(1, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 0)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 1)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 2)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 3)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 4)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 5)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 6)); -+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 7)); -+ -+ DUMPREG(DISPC_VID_PRELOAD(0)); -+ DUMPREG(DISPC_VID_PRELOAD(1)); -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+#undef DUMPREG -+} -+ -+static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc, -+ bool ihs, bool ivs, u8 acbi, u8 acb) -+{ -+ u32 l = 0; -+ -+ DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n", -+ onoff, rf, ieo, ipc, ihs, ivs, acbi, acb); -+ -+ l |= FLD_VAL(onoff, 17, 17); -+ l |= FLD_VAL(rf, 16, 16); -+ l |= FLD_VAL(ieo, 15, 15); -+ l |= FLD_VAL(ipc, 14, 14); -+ l |= FLD_VAL(ihs, 13, 13); -+ l |= FLD_VAL(ivs, 12, 12); -+ l |= FLD_VAL(acbi, 11, 8); -+ l |= FLD_VAL(acb, 7, 0); -+ -+ enable_clocks(1); -+ dispc_write_reg(DISPC_POL_FREQ, l); -+ enable_clocks(0); -+} -+ -+void dispc_set_pol_freq(struct omap_panel *panel) -+{ -+ _dispc_set_pol_freq((panel->config & OMAP_DSS_LCD_ONOFF) != 0, -+ (panel->config & OMAP_DSS_LCD_RF) != 0, -+ (panel->config & OMAP_DSS_LCD_IEO) != 0, -+ (panel->config & OMAP_DSS_LCD_IPC) != 0, -+ (panel->config & OMAP_DSS_LCD_IHS) != 0, -+ (panel->config & OMAP_DSS_LCD_IVS) != 0, -+ panel->acbi, panel->acb); -+} -+ -+void find_lck_pck_divs(bool is_tft, unsigned long req_pck, unsigned long fck, -+ u16 *lck_div, u16 *pck_div) -+{ -+ u16 pcd_min = is_tft ? 2 : 3; -+ unsigned long best_pck; -+ u16 best_ld, cur_ld; -+ u16 best_pd, cur_pd; -+ -+ best_pck = 0; -+ best_ld = 0; -+ best_pd = 0; -+ -+ for (cur_ld = 1; cur_ld <= 255; ++cur_ld) { -+ unsigned long lck = fck / cur_ld; -+ -+ for (cur_pd = pcd_min; cur_pd <= 255; ++cur_pd) { -+ unsigned long pck = lck / cur_pd; -+ long old_delta = abs(best_pck - req_pck); -+ long new_delta = abs(pck - req_pck); -+ -+ if (best_pck == 0 || new_delta < old_delta) { -+ best_pck = pck; -+ best_ld = cur_ld; -+ best_pd = cur_pd; -+ -+ if (pck == req_pck) -+ goto found; -+ } -+ -+ if (pck < req_pck) -+ break; -+ } -+ -+ if (lck / pcd_min < req_pck) -+ break; -+ } -+ -+found: -+ *lck_div = best_ld; -+ *pck_div = best_pd; -+} -+ -+int dispc_calc_clock_div(bool is_tft, unsigned long req_pck, -+ struct dispc_clock_info *cinfo) -+{ -+ unsigned long prate; -+ struct dispc_clock_info cur, best; -+ int match = 0; -+ int min_fck_per_pck; -+ unsigned long fck_rate = dss_clk_get_rate(DSS_CLK_FCK1); -+ -+ if (cpu_is_omap34xx()) -+ prate = clk_get_rate(clk_get_parent(dispc.dpll4_m4_ck)); -+ else -+ prate = 0; -+ -+ if (req_pck == dispc.cache_req_pck && -+ ((cpu_is_omap34xx() && prate == dispc.cache_prate) || -+ dispc.cache_cinfo.fck == fck_rate)) { -+ DSSDBG("dispc clock info found from cache.\n"); -+ *cinfo = dispc.cache_cinfo; -+ return 0; -+ } -+ -+ min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK; -+ -+ if (min_fck_per_pck && -+ req_pck * min_fck_per_pck > DISPC_MAX_FCK) { -+ DSSERR("Requested pixel clock not possible with the current " -+ "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning " -+ "the constraint off.\n"); -+ min_fck_per_pck = 0; -+ } -+ -+retry: -+ memset(&cur, 0, sizeof(cur)); -+ memset(&best, 0, sizeof(best)); -+ -+ if (cpu_is_omap24xx()) { -+ /* XXX can we change the clock on omap2? */ -+ cur.fck = dss_clk_get_rate(DSS_CLK_FCK1); -+ cur.fck_div = 1; -+ -+ match = 1; -+ -+ find_lck_pck_divs(is_tft, req_pck, cur.fck, -+ &cur.lck_div, &cur.pck_div); -+ -+ cur.lck = cur.fck / cur.lck_div; -+ cur.pck = cur.lck / cur.pck_div; -+ -+ best = cur; -+ -+ goto found; -+ } else if (cpu_is_omap34xx()) { -+ for (cur.fck_div = 16; cur.fck_div > 0; --cur.fck_div) { -+ cur.fck = prate / cur.fck_div * 2; -+ -+ if (cur.fck > DISPC_MAX_FCK) -+ continue; -+ -+ if (min_fck_per_pck && -+ cur.fck < req_pck * min_fck_per_pck) -+ continue; -+ -+ match = 1; -+ -+ find_lck_pck_divs(is_tft, req_pck, cur.fck, -+ &cur.lck_div, &cur.pck_div); -+ -+ cur.lck = cur.fck / cur.lck_div; -+ cur.pck = cur.lck / cur.pck_div; -+ -+ if (abs(cur.pck - req_pck) < abs(best.pck - req_pck)) { -+ best = cur; -+ -+ if (cur.pck == req_pck) -+ goto found; -+ } -+ } -+ } else { -+ BUG(); -+ } -+ -+found: -+ if (!match) { -+ if (min_fck_per_pck) { -+ DSSERR("Could not find suitable clock settings.\n" -+ "Turning FCK/PCK constraint off and" -+ "trying again.\n"); -+ min_fck_per_pck = 0; -+ goto retry; -+ } -+ -+ DSSERR("Could not find suitable clock settings.\n"); -+ -+ return -EINVAL; -+ } -+ -+ if (cinfo) -+ *cinfo = best; -+ -+ dispc.cache_req_pck = req_pck; -+ dispc.cache_prate = prate; -+ dispc.cache_cinfo = best; -+ -+ return 0; -+} -+ -+int dispc_set_clock_div(struct dispc_clock_info *cinfo) -+{ -+ unsigned long prate; -+ int r; -+ -+ if (cpu_is_omap34xx()) { -+ prate = clk_get_rate(clk_get_parent(dispc.dpll4_m4_ck)); -+ DSSDBG("dpll4_m4 = %ld\n", prate); -+ } -+ -+ DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div); -+ DSSDBG("lck = %ld (%d)\n", cinfo->lck, cinfo->lck_div); -+ DSSDBG("pck = %ld (%d)\n", cinfo->pck, cinfo->pck_div); -+ -+ if (cpu_is_omap34xx()) { -+ r = clk_set_rate(dispc.dpll4_m4_ck, prate / cinfo->fck_div); -+ if (r) -+ return r; -+ } -+ -+ dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div); -+ -+ return 0; -+} -+ -+int dispc_get_clock_div(struct dispc_clock_info *cinfo) -+{ -+ cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1); -+ -+ if (cpu_is_omap34xx()) { -+ unsigned long prate; -+ prate = clk_get_rate(clk_get_parent(dispc.dpll4_m4_ck)); -+ cinfo->fck_div = prate / (cinfo->fck / 2); -+ } else { -+ cinfo->fck_div = 0; -+ } -+ -+ cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16); -+ cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0); -+ -+ cinfo->lck = cinfo->fck / cinfo->lck_div; -+ cinfo->pck = cinfo->lck / cinfo->pck_div; -+ -+ return 0; -+} -+ -+/* dispc.irq_lock has to be locked by the caller */ -+static void _omap_dispc_set_irqs(void) -+{ -+ u32 mask; -+ u32 old_mask; -+ int i; -+ struct omap_dispc_isr_data *isr_data; -+ -+ mask = dispc.irq_error_mask; -+ -+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { -+ isr_data = &dispc.registered_isr[i]; -+ -+ if (isr_data->isr == NULL) -+ continue; -+ -+ mask |= isr_data->mask; -+ } -+ -+ enable_clocks(1); -+ -+ old_mask = dispc_read_reg(DISPC_IRQENABLE); -+ /* clear the irqstatus for newly enabled irqs */ -+ dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask); -+ -+ dispc_write_reg(DISPC_IRQENABLE, mask); -+ -+ enable_clocks(0); -+} -+ -+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask) -+{ -+ int i; -+ int ret; -+ unsigned long flags; -+ struct omap_dispc_isr_data *isr_data; -+ -+ if (isr == NULL) -+ return -EINVAL; -+ -+ spin_lock_irqsave(&dispc.irq_lock, flags); -+ -+ /* check for duplicate entry */ -+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { -+ isr_data = &dispc.registered_isr[i]; -+ if (isr_data->isr == isr && isr_data->arg == arg && -+ isr_data->mask == mask) { -+ ret = -EINVAL; -+ goto err; -+ } -+ } -+ -+ isr_data = NULL; -+ ret = -EBUSY; -+ -+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { -+ isr_data = &dispc.registered_isr[i]; -+ -+ if (isr_data->isr != NULL) -+ continue; -+ -+ isr_data->isr = isr; -+ isr_data->arg = arg; -+ isr_data->mask = mask; -+ ret = 0; -+ -+ break; -+ } -+ -+ _omap_dispc_set_irqs(); -+ -+ spin_unlock_irqrestore(&dispc.irq_lock, flags); -+ -+ return 0; -+err: -+ spin_unlock_irqrestore(&dispc.irq_lock, flags); -+ -+ return ret; -+} -+EXPORT_SYMBOL(omap_dispc_register_isr); -+ -+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask) -+{ -+ int i; -+ unsigned long flags; -+ int ret = -EINVAL; -+ struct omap_dispc_isr_data *isr_data; -+ -+ spin_lock_irqsave(&dispc.irq_lock, flags); -+ -+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { -+ isr_data = &dispc.registered_isr[i]; -+ if (isr_data->isr != isr || isr_data->arg != arg || -+ isr_data->mask != mask) -+ continue; -+ -+ /* found the correct isr */ -+ -+ isr_data->isr = NULL; -+ isr_data->arg = NULL; -+ isr_data->mask = 0; -+ -+ ret = 0; -+ break; -+ } -+ -+ if (ret == 0) -+ _omap_dispc_set_irqs(); -+ -+ spin_unlock_irqrestore(&dispc.irq_lock, flags); -+ -+ return ret; -+} -+EXPORT_SYMBOL(omap_dispc_unregister_isr); -+ -+#ifdef DEBUG -+static void print_irq_status(u32 status) -+{ -+ if ((status & dispc.irq_error_mask) == 0) -+ return; -+ -+ printk(KERN_DEBUG "DISPC IRQ: 0x%x: ", status); -+ -+#define PIS(x) \ -+ if (status & DISPC_IRQ_##x) \ -+ printk(#x " "); -+ PIS(GFX_FIFO_UNDERFLOW); -+ PIS(OCP_ERR); -+ PIS(VID1_FIFO_UNDERFLOW); -+ PIS(VID2_FIFO_UNDERFLOW); -+ PIS(SYNC_LOST); -+ PIS(SYNC_LOST_DIGIT); -+#undef PIS -+ -+ printk("\n"); -+} -+#endif -+ -+/* Called from dss.c. Note that we don't touch clocks here, -+ * but we presume they are on because we got an IRQ. However, -+ * an irq handler may turn the clocks off, so we may not have -+ * clock later in the function. */ -+void dispc_irq_handler(void) -+{ -+ int i; -+ u32 irqstatus; -+ u32 handledirqs = 0; -+ u32 unhandled_errors; -+ struct omap_dispc_isr_data *isr_data; -+ -+ spin_lock(&dispc.irq_lock); -+ -+ irqstatus = dispc_read_reg(DISPC_IRQSTATUS); -+ -+#ifdef DEBUG -+ if (dss_debug) -+ print_irq_status(irqstatus); -+#endif -+ /* Ack the interrupt. Do it here before clocks are possibly turned -+ * off */ -+ dispc_write_reg(DISPC_IRQSTATUS, irqstatus); -+ -+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { -+ isr_data = &dispc.registered_isr[i]; -+ -+ if (!isr_data->isr) -+ continue; -+ -+ if (isr_data->mask & irqstatus) { -+ isr_data->isr(isr_data->arg, irqstatus); -+ handledirqs |= isr_data->mask; -+ } -+ } -+ -+ unhandled_errors = irqstatus & ~handledirqs & dispc.irq_error_mask; -+ -+ if (unhandled_errors) { -+ dispc.error_irqs |= unhandled_errors; -+ -+ dispc.irq_error_mask &= ~unhandled_errors; -+ _omap_dispc_set_irqs(); -+ -+ schedule_work(&dispc.error_work); -+ } -+ -+ spin_unlock(&dispc.irq_lock); -+} -+ -+static void dispc_error_worker(struct work_struct *work) -+{ -+ int i; -+ u32 errors; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dispc.irq_lock, flags); -+ errors = dispc.error_irqs; -+ dispc.error_irqs = 0; -+ spin_unlock_irqrestore(&dispc.irq_lock, flags); -+ -+ if (errors & DISPC_IRQ_GFX_FIFO_UNDERFLOW) -+ DSSERR("GFX_FIFO_UNDERFLOW\n"); -+ -+ if (errors & DISPC_IRQ_VID1_FIFO_UNDERFLOW) -+ DSSERR("VID1_FIFO_UNDERFLOW\n"); -+ -+ if (errors & DISPC_IRQ_VID2_FIFO_UNDERFLOW) -+ DSSERR("VID2_FIFO_UNDERFLOW\n"); -+ -+ if (errors & DISPC_IRQ_SYNC_LOST) { -+ DSSERR("SYNC_LOST, going to perform a soft reset\n"); -+ dss_schedule_reset(); -+ } -+ -+ if (errors & DISPC_IRQ_SYNC_LOST_DIGIT) { -+ DSSERR("SYNC_LOST_DIGIT, going to perform a soft reset\n"); -+ dss_schedule_reset(); -+ } -+ -+ if (errors & DISPC_IRQ_OCP_ERR) { -+ DSSERR("OCP_ERR\n"); -+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { -+ struct omap_overlay_manager *mgr; -+ mgr = omap_dss_get_overlay_manager(i); -+ -+ if (mgr->caps & OMAP_DSS_OVL_CAP_DISPC) -+ mgr->display->disable(mgr->display); -+ } -+ } -+ -+ spin_lock_irqsave(&dispc.irq_lock, flags); -+ dispc.irq_error_mask |= errors; -+ _omap_dispc_set_irqs(); -+ spin_unlock_irqrestore(&dispc.irq_lock, flags); -+} -+ -+int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout) -+{ -+ void dispc_irq_wait_handler(void *data, u32 mask) -+ { -+ complete((struct completion *)data); -+ } -+ -+ int r; -+ DECLARE_COMPLETION_ONSTACK(completion); -+ -+ r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion, -+ irqmask); -+ -+ if (r) -+ return r; -+ -+ timeout = wait_for_completion_timeout(&completion, timeout); -+ -+ omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask); -+ -+ if (timeout == 0) -+ return -ETIMEDOUT; -+ -+ if (timeout == -ERESTARTSYS) -+ return -ERESTARTSYS; -+ -+ return 0; -+} -+ -+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask, -+ unsigned long timeout) -+{ -+ void dispc_irq_wait_handler(void *data, u32 mask) -+ { -+ complete((struct completion *)data); -+ } -+ -+ int r; -+ DECLARE_COMPLETION_ONSTACK(completion); -+ -+ r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion, -+ irqmask); -+ -+ if (r) -+ return r; -+ -+ timeout = wait_for_completion_interruptible_timeout(&completion, -+ timeout); -+ -+ omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask); -+ -+ if (timeout == 0) -+ return -ETIMEDOUT; -+ -+ if (timeout == -ERESTARTSYS) -+ return -ERESTARTSYS; -+ -+ return 0; -+} -+ -+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC -+void dispc_fake_vsync_irq(void) -+{ -+ u32 irqstatus = DISPC_IRQ_VSYNC; -+ int i; -+ -+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { -+ struct omap_dispc_isr_data *isr_data; -+ isr_data = &dispc.registered_isr[i]; -+ -+ if (!isr_data->isr) -+ continue; -+ -+ if (isr_data->mask & irqstatus) -+ isr_data->isr(isr_data->arg, irqstatus); -+ } -+} -+#endif -+ -+static void _omap_dispc_initialize_irq(void) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dispc.irq_lock, flags); -+ -+ memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr)); -+ -+ dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR; -+ -+ /* there's SYNC_LOST_DIGIT waiting after enabling the DSS, -+ * so clear it */ -+ dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS)); -+ -+ _omap_dispc_set_irqs(); -+ -+ spin_unlock_irqrestore(&dispc.irq_lock, flags); -+} -+ -+void dispc_enable_sidle(void) -+{ -+ REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3); /* SIDLEMODE: smart idle */ -+} -+ -+void dispc_disable_sidle(void) -+{ -+ REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ -+} -+ -+static void _omap_dispc_initial_config(void) -+{ -+ u32 l; -+ -+ l = dispc_read_reg(DISPC_SYSCONFIG); -+ l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */ -+ l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */ -+ l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */ -+ l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */ -+ dispc_write_reg(DISPC_SYSCONFIG, l); -+ -+ /* FUNCGATED */ -+ REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); -+ -+ /* L3 firewall setting: enable access to OCM RAM */ -+ if (cpu_is_omap24xx()) -+ __raw_writel(0x402000b0, IO_ADDRESS(0x680050a0)); -+ -+ _dispc_setup_color_conv_coef(); -+ -+ dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY); -+} -+ -+int dispc_init(void) -+{ -+ u32 rev; -+ -+ spin_lock_init(&dispc.irq_lock); -+ -+ INIT_WORK(&dispc.error_work, dispc_error_worker); -+ -+ dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS); -+ if (!dispc.base) { -+ DSSERR("can't ioremap DISPC\n"); -+ return -ENOMEM; -+ } -+ -+ if (cpu_is_omap34xx()) { -+ dispc.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck"); -+ if (IS_ERR(dispc.dpll4_m4_ck)) { -+ DSSERR("Failed to get dpll4_m4_ck\n"); -+ return -ENODEV; -+ } -+ } -+ -+ enable_clocks(1); -+ -+ _omap_dispc_initial_config(); -+ -+ _omap_dispc_initialize_irq(); -+ -+ dispc_save_context(); -+ -+ rev = dispc_read_reg(DISPC_REVISION); -+ printk(KERN_INFO "OMAP DISPC rev %d.%d\n", -+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); -+ -+ enable_clocks(0); -+ -+ return 0; -+} -+ -+void dispc_exit(void) -+{ -+ if (cpu_is_omap34xx()) -+ clk_put(dispc.dpll4_m4_ck); -+ iounmap(dispc.base); -+} -+ -+int dispc_enable_plane(enum omap_plane plane, bool enable) -+{ -+ DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); -+ -+ enable_clocks(1); -+ _dispc_enable_plane(plane, enable); -+ enable_clocks(0); -+ -+ return 0; -+} -+ -+int dispc_setup_plane(enum omap_plane plane, enum omap_channel channel_out, -+ u32 paddr, u16 screen_width, -+ u16 pos_x, u16 pos_y, -+ u16 width, u16 height, -+ u16 out_width, u16 out_height, -+ enum omap_color_mode color_mode, -+ bool ilace, -+ enum omap_dss_rotation_type rotation_type, -+ u8 rotation, bool mirror, u8 global_alpha) -+{ -+ int r = 0; -+ -+ DSSDBG("dispc_setup_plane %d, ch %d, pa %x, sw %d, %d,%d, %dx%d -> " -+ "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n", -+ plane, channel_out, paddr, screen_width, pos_x, pos_y, -+ width, height, -+ out_width, out_height, -+ ilace, color_mode, -+ rotation, mirror); -+ -+ enable_clocks(1); -+ -+ r = _dispc_setup_plane(plane, channel_out, -+ paddr, screen_width, -+ pos_x, pos_y, -+ width, height, -+ out_width, out_height, -+ color_mode, ilace, -+ rotation_type, -+ rotation, mirror, -+ global_alpha); -+ -+ enable_clocks(0); -+ -+ return r; -+} -+ -+static int dispc_is_intersecting(int x1, int y1, int w1, int h1, -+ int x2, int y2, int w2, int h2) -+{ -+ if (x1 >= (x2+w2)) -+ return 0; -+ -+ if ((x1+w1) <= x2) -+ return 0; -+ -+ if (y1 >= (y2+h2)) -+ return 0; -+ -+ if ((y1+h1) <= y2) -+ return 0; -+ -+ return 1; -+} -+ -+static int dispc_is_overlay_scaled(struct omap_overlay_info *pi) -+{ -+ if (pi->width != pi->out_width) -+ return 1; -+ -+ if (pi->height != pi->out_height) -+ return 1; -+ -+ return 0; -+} -+ -+/* returns the area that needs updating */ -+void dispc_setup_partial_planes(struct omap_display *display, -+ u16 *xi, u16 *yi, u16 *wi, u16 *hi) -+{ -+ struct omap_overlay_manager *mgr; -+ int i; -+ -+ int x, y, w, h; -+ -+ x = *xi; -+ y = *yi; -+ w = *wi; -+ h = *hi; -+ -+ DSSDBG("dispc_setup_partial_planes %d,%d %dx%d\n", -+ *xi, *yi, *wi, *hi); -+ -+ -+ mgr = display->manager; -+ -+ if (!mgr) { -+ DSSDBG("no manager\n"); -+ return; -+ } -+ -+ for (i = 0; i < mgr->num_overlays; i++) { -+ struct omap_overlay *ovl; -+ struct omap_overlay_info *pi; -+ ovl = mgr->overlays[i]; -+ -+ if (ovl->manager != mgr) -+ continue; -+ -+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) -+ continue; -+ -+ pi = &ovl->info; -+ -+ if (!pi->enabled) -+ continue; -+ /* -+ * If the plane is intersecting and scaled, we -+ * enlarge the update region to accomodate the -+ * whole area -+ */ -+ -+ if (dispc_is_intersecting(x, y, w, h, -+ pi->pos_x, pi->pos_y, -+ pi->out_width, pi->out_height)) { -+ if (dispc_is_overlay_scaled(pi)) { -+ -+ int x1, y1, x2, y2; -+ -+ if (x > pi->pos_x) -+ x1 = pi->pos_x; -+ else -+ x1 = x; -+ -+ if (y > pi->pos_y) -+ y1 = pi->pos_y; -+ else -+ y1 = y; -+ -+ if ((x + w) < (pi->pos_x + pi->out_width)) -+ x2 = pi->pos_x + pi->out_width; -+ else -+ x2 = x + w; -+ -+ if ((y + h) < (pi->pos_y + pi->out_height)) -+ y2 = pi->pos_y + pi->out_height; -+ else -+ y2 = y + h; -+ -+ x = x1; -+ y = y1; -+ w = x2 - x1; -+ h = y2 - y1; -+ -+ DSSDBG("Update area after enlarge due to " -+ "scaling %d, %d %dx%d\n", -+ x, y, w, h); -+ } -+ } -+ } -+ -+ for (i = 0; i < mgr->num_overlays; i++) { -+ struct omap_overlay *ovl = mgr->overlays[i]; -+ struct omap_overlay_info *pi = &ovl->info; -+ -+ int px = pi->pos_x; -+ int py = pi->pos_y; -+ int pw = pi->width; -+ int ph = pi->height; -+ int pow = pi->out_width; -+ int poh = pi->out_height; -+ u32 pa = pi->paddr; -+ int psw = pi->screen_width; -+ int bpp; -+ -+ if (ovl->manager != mgr) -+ continue; -+ -+ /* -+ * If plane is not enabled or the update region -+ * does not intersect with the plane in question, -+ * we really disable the plane from hardware -+ */ -+ -+ if (!pi->enabled || -+ !dispc_is_intersecting(x, y, w, h, -+ px, py, pow, poh)) { -+ dispc_enable_plane(ovl->id, 0); -+ continue; -+ } -+ -+ /* FIXME CLUT formats */ -+ switch (pi->color_mode) { -+ case OMAP_DSS_COLOR_RGB12U: -+ case OMAP_DSS_COLOR_RGB16: -+ case OMAP_DSS_COLOR_ARGB16: -+ case OMAP_DSS_COLOR_YUV2: -+ case OMAP_DSS_COLOR_UYVY: -+ case OMAP_DSS_COLOR_RGB24P: -+ case OMAP_DSS_COLOR_RGB24U: -+ case OMAP_DSS_COLOR_ARGB32: -+ case OMAP_DSS_COLOR_RGBA32: -+ case OMAP_DSS_COLOR_RGBX32: -+ bpp = color_mode_to_bpp(pi->color_mode); -+ break; -+ -+ default: -+ BUG(); -+ return; -+ } -+ -+ if (x > pi->pos_x) { -+ px = 0; -+ pw -= (x - pi->pos_x); -+ pa += (x - pi->pos_x) * bpp / 8; -+ } else { -+ px = pi->pos_x - x; -+ } -+ -+ if (y > pi->pos_y) { -+ py = 0; -+ ph -= (y - pi->pos_y); -+ pa += (y - pi->pos_y) * psw * bpp / 8; -+ } else { -+ py = pi->pos_y - y; -+ } -+ -+ if (w < (px+pw)) -+ pw -= (px+pw) - (w); -+ -+ if (h < (py+ph)) -+ ph -= (py+ph) - (h); -+ -+ /* Can't scale the GFX plane */ -+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 || -+ dispc_is_overlay_scaled(pi) == 0) { -+ pow = pw; -+ poh = ph; -+ } -+ -+ DSSDBG("calc plane %d, %x, sw %d, %d,%d, %dx%d -> %dx%d\n", -+ ovl->id, pa, psw, px, py, pw, ph, pow, poh); -+ -+ dispc_setup_plane(ovl->id, mgr->id, -+ pa, psw, -+ px, py, -+ pw, ph, -+ pow, poh, -+ pi->color_mode, 0, -+ pi->rotation_type, -+ pi->rotation, -+ pi->mirror, -+ pi->global_alpha); -+ -+ if (dss_use_replication(display, ovl->info.color_mode)) -+ dispc_enable_replication(ovl->id, true); -+ else -+ dispc_enable_replication(ovl->id, false); -+ -+ dispc_enable_plane(ovl->id, 1); -+ } -+ -+ *xi = x; -+ *yi = y; -+ *wi = w; -+ *hi = h; -+ -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/display.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/display.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/display.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/display.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,800 @@ -+/* -+ * linux/drivers/video/omap2/dss/display.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "DISPLAY" -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include "dss.h" -+ -+static int num_displays; -+static LIST_HEAD(display_list); -+ -+static ssize_t display_name_show(struct omap_display *display, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%s\n", display->name); -+} -+ -+static ssize_t display_enabled_show(struct omap_display *display, char *buf) -+{ -+ bool enabled = display->state != OMAP_DSS_DISPLAY_DISABLED; -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", enabled); -+} -+ -+static ssize_t display_enabled_store(struct omap_display *display, -+ const char *buf, size_t size) -+{ -+ bool enabled, r = 0; -+ -+ enabled = simple_strtoul(buf, NULL, 10); -+ -+ omap_dss_lock(); -+ -+ if (enabled != (display->state != OMAP_DSS_DISPLAY_DISABLED)) { -+ if (enabled) { -+ omap_dss_maximize_min_bus_tput(); -+ r = display->enable(display); -+ omap_dss_update_min_bus_tput(); -+ if (r) -+ goto unlock; -+ } else { -+ display->disable(display); -+ } -+ } -+ -+unlock: -+ omap_dss_unlock(); -+ -+ return r ? r : size; -+} -+ -+static ssize_t display_upd_mode_show(struct omap_display *display, char *buf) -+{ -+ enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO; -+ if (display->get_update_mode) -+ mode = display->get_update_mode(display); -+ return snprintf(buf, PAGE_SIZE, "%d\n", mode); -+} -+ -+static ssize_t display_upd_mode_store(struct omap_display *display, -+ const char *buf, size_t size) -+{ -+ int val, r; -+ enum omap_dss_update_mode mode; -+ -+ val = simple_strtoul(buf, NULL, 10); -+ -+ switch (val) { -+ case OMAP_DSS_UPDATE_DISABLED: -+ case OMAP_DSS_UPDATE_AUTO: -+ case OMAP_DSS_UPDATE_MANUAL: -+ mode = (enum omap_dss_update_mode)val; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ if ((r = display->set_update_mode(display, mode))) -+ return r; -+ -+ return size; -+} -+ -+static ssize_t display_tear_show(struct omap_display *display, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d\n", -+ display->get_te ? display->get_te(display) : 0); -+} -+ -+static ssize_t display_tear_store(struct omap_display *display, -+ const char *buf, size_t size) -+{ -+ unsigned long te; -+ int r; -+ -+ if (!display->enable_te || !display->get_te) -+ return -ENOENT; -+ -+ te = simple_strtoul(buf, NULL, 0); -+ -+ if ((r = display->enable_te(display, te))) -+ return r; -+ -+ return size; -+} -+ -+static ssize_t display_timings_show(struct omap_display *display, char *buf) -+{ -+ struct omap_video_timings t; -+ -+ if (!display->get_timings) -+ return -ENOENT; -+ -+ display->get_timings(display, &t); -+ -+ return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n", -+ t.pixel_clock, -+ t.x_res, t.hfp, t.hbp, t.hsw, -+ t.y_res, t.vfp, t.vbp, t.vsw); -+} -+ -+static ssize_t display_timings_store(struct omap_display *display, -+ const char *buf, size_t size) -+{ -+ struct omap_video_timings t; -+ int r, found; -+ -+ if (!display->set_timings || !display->check_timings) -+ return -ENOENT; -+ -+ found = 0; -+#ifdef CONFIG_OMAP2_DSS_VENC -+ if (strncmp("pal", buf, 3) == 0) { -+ t = omap_dss_pal_timings; -+ found = 1; -+ } else if (strncmp("ntsc", buf, 4) == 0) { -+ t = omap_dss_ntsc_timings; -+ found = 1; -+ } -+#endif -+ if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu", -+ &t.pixel_clock, -+ &t.x_res, &t.hfp, &t.hbp, &t.hsw, -+ &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9) -+ return -EINVAL; -+ -+ if ((r = display->check_timings(display, &t))) -+ return r; -+ -+ omap_dss_lock(); -+ display->set_timings(display, &t); -+ omap_dss_unlock(); -+ -+ return size; -+} -+ -+static ssize_t display_rotate_show(struct omap_display *display, char *buf) -+{ -+ int rotate; -+ if (!display->get_rotate) -+ return -ENOENT; -+ rotate = display->get_rotate(display); -+ return snprintf(buf, PAGE_SIZE, "%u\n", rotate); -+} -+ -+static ssize_t display_rotate_store(struct omap_display *display, -+ const char *buf, size_t size) -+{ -+ unsigned long rot; -+ int r; -+ -+ if (!display->set_rotate || !display->get_rotate) -+ return -ENOENT; -+ -+ rot = simple_strtoul(buf, NULL, 0); -+ -+ if ((r = display->set_rotate(display, rot))) -+ return r; -+ -+ return size; -+} -+ -+static ssize_t display_mirror_show(struct omap_display *display, char *buf) -+{ -+ int mirror; -+ if (!display->get_mirror) -+ return -ENOENT; -+ mirror = display->get_mirror(display); -+ return snprintf(buf, PAGE_SIZE, "%u\n", mirror); -+} -+ -+static ssize_t display_mirror_store(struct omap_display *display, -+ const char *buf, size_t size) -+{ -+ unsigned long mirror; -+ int r; -+ -+ if (!display->set_mirror || !display->get_mirror) -+ return -ENOENT; -+ -+ mirror = simple_strtoul(buf, NULL, 0); -+ -+ if ((r = display->set_mirror(display, mirror))) -+ return r; -+ -+ return size; -+} -+ -+static ssize_t display_panel_name_show(struct omap_display *display, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%s\n", -+ display->panel ? display->panel->name : ""); -+} -+ -+static ssize_t display_ctrl_name_show(struct omap_display *display, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%s\n", -+ display->ctrl ? display->ctrl->name : ""); -+} -+ -+struct display_attribute { -+ struct attribute attr; -+ ssize_t (*show)(struct omap_display *, char *); -+ ssize_t (*store)(struct omap_display *, const char *, size_t); -+}; -+ -+static ssize_t display_wss_show(struct omap_display *display, char *buf) -+{ -+ unsigned int wss; -+ -+ if (!display->get_wss) -+ return -ENOENT; -+ -+ wss = display->get_wss(display); -+ -+ return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); -+} -+ -+static ssize_t display_wss_store(struct omap_display *display, -+ const char *buf, size_t size) -+{ -+ unsigned long wss; -+ int r; -+ -+ if (!display->get_wss || !display->set_wss) -+ return -ENOENT; -+ -+ if (strict_strtoul(buf, 0, &wss)) -+ return -EINVAL; -+ -+ if (wss > 0xfffff) -+ return -EINVAL; -+ -+ omap_dss_lock(); -+ r = display->set_wss(display, wss); -+ omap_dss_unlock(); -+ -+ if (r) -+ return r; -+ -+ return size; -+} -+ -+#define DISPLAY_ATTR(_name, _mode, _show, _store) \ -+ struct display_attribute display_attr_##_name = \ -+ __ATTR(_name, _mode, _show, _store) -+ -+static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); -+static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, -+ display_enabled_show, display_enabled_store); -+static DISPLAY_ATTR(update_mode, S_IRUGO|S_IWUSR, -+ display_upd_mode_show, display_upd_mode_store); -+static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, -+ display_tear_show, display_tear_store); -+static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, -+ display_timings_show, display_timings_store); -+static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, -+ display_rotate_show, display_rotate_store); -+static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, -+ display_mirror_show, display_mirror_store); -+static DISPLAY_ATTR(panel_name, S_IRUGO, display_panel_name_show, NULL); -+static DISPLAY_ATTR(ctrl_name, S_IRUGO, display_ctrl_name_show, NULL); -+static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, -+ display_wss_show, display_wss_store); -+ -+static struct attribute *display_sysfs_attrs[] = { -+ &display_attr_name.attr, -+ &display_attr_enabled.attr, -+ &display_attr_update_mode.attr, -+ &display_attr_tear_elim.attr, -+ &display_attr_timings.attr, -+ &display_attr_rotate.attr, -+ &display_attr_mirror.attr, -+ &display_attr_panel_name.attr, -+ &display_attr_ctrl_name.attr, -+ &display_attr_wss.attr, -+ NULL -+}; -+ -+static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) -+{ -+ struct omap_display *display; -+ struct display_attribute *display_attr; -+ -+ display = container_of(kobj, struct omap_display, kobj); -+ display_attr = container_of(attr, struct display_attribute, attr); -+ -+ if (!display_attr->show) -+ return -ENOENT; -+ -+ if (display->state == OMAP_DSS_DISPLAY_UNINITIALIZED) -+ return -ENODEV; -+ -+ return display_attr->show(display, buf); -+} -+ -+static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, -+ const char *buf, size_t size) -+{ -+ struct omap_display *display; -+ struct display_attribute *display_attr; -+ -+ display = container_of(kobj, struct omap_display, kobj); -+ display_attr = container_of(attr, struct display_attribute, attr); -+ -+ if (!display_attr->store) -+ return -ENOENT; -+ -+ if (display->state == OMAP_DSS_DISPLAY_UNINITIALIZED) -+ return -ENODEV; -+ -+ return display_attr->store(display, buf, size); -+} -+ -+static struct sysfs_ops display_sysfs_ops = { -+ .show = display_attr_show, -+ .store = display_attr_store, -+}; -+ -+static struct kobj_type display_ktype = { -+ .sysfs_ops = &display_sysfs_ops, -+ .default_attrs = display_sysfs_attrs, -+}; -+ -+static void default_get_resolution(struct omap_display *display, -+ u16 *xres, u16 *yres) -+{ -+ *xres = display->panel->timings.x_res; -+ *yres = display->panel->timings.y_res; -+} -+ -+static void default_configure_overlay(struct omap_overlay *ovl) -+{ -+ unsigned low, high, size; -+ enum omap_burst_size burst; -+ enum omap_plane plane = ovl->id; -+ -+ burst = OMAP_DSS_BURST_16x32; -+ size = 16 * 32 / 8; -+ -+ dispc_set_burst_size(plane, burst); -+ -+ high = dispc_get_plane_fifo_size(plane) - 1; -+ low = dispc_get_plane_fifo_size(plane) - size; -+ -+ if (ovl->info.fifo_threshold_high && -+ ovl->info.fifo_threshold_high < high) -+ high = ovl->info.fifo_threshold_high; -+ if (ovl->info.fifo_threshold_low && -+ ovl->info.fifo_threshold_low < low) -+ low = ovl->info.fifo_threshold_low; -+ -+ dispc_setup_plane_fifo(plane, low, high); -+} -+ -+static int default_wait_vsync(struct omap_display *display) -+{ -+ unsigned long timeout = msecs_to_jiffies(500); -+ u32 irq; -+ -+ if (display->type == OMAP_DISPLAY_TYPE_VENC) -+ irq = DISPC_IRQ_EVSYNC_ODD; -+ else -+ irq = DISPC_IRQ_VSYNC; -+ -+ return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); -+} -+ -+static int default_get_recommended_bpp(struct omap_display *display) -+{ -+ if (display->panel->recommended_bpp) -+ return display->panel->recommended_bpp; -+ -+ switch (display->type) { -+ case OMAP_DISPLAY_TYPE_DPI: -+ if (display->hw_config.u.dpi.data_lines == 24) -+ return 24; -+ else -+ return 16; -+ -+ case OMAP_DISPLAY_TYPE_DBI: -+ case OMAP_DISPLAY_TYPE_DSI: -+ if (display->ctrl->pixel_size == 24) -+ return 24; -+ else -+ return 16; -+ case OMAP_DISPLAY_TYPE_VENC: -+ case OMAP_DISPLAY_TYPE_SDI: -+ return 24; -+ return 24; -+ default: -+ BUG(); -+ } -+} -+ -+/* Checks if replication logic should be used. Only use for active matrix, -+ * when overlay is in RGB12U or RGB16 mode, and LCD interface is -+ * 18bpp or 24bpp */ -+bool dss_use_replication(struct omap_display *display, -+ enum omap_color_mode mode) -+{ -+ int bpp; -+ -+ if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) -+ return false; -+ -+ if (display->type == OMAP_DISPLAY_TYPE_DPI && -+ (display->panel->config & OMAP_DSS_LCD_TFT) == 0) -+ return false; -+ -+ switch (display->type) { -+ case OMAP_DISPLAY_TYPE_DPI: -+ bpp = display->hw_config.u.dpi.data_lines; -+ break; -+ case OMAP_DISPLAY_TYPE_VENC: -+ case OMAP_DISPLAY_TYPE_SDI: -+ bpp = 24; -+ break; -+ case OMAP_DISPLAY_TYPE_DBI: -+ case OMAP_DISPLAY_TYPE_DSI: -+ bpp = display->ctrl->pixel_size; -+ break; -+ default: -+ BUG(); -+ } -+ -+ return bpp > 16; -+} -+ -+void dss_init_displays(struct platform_device *pdev) -+{ -+ struct omap_dss_board_info *pdata = pdev->dev.platform_data; -+ int i, r = 0; -+ -+ INIT_LIST_HEAD(&display_list); -+ -+ num_displays = 0; -+ -+ for (i = 0; i < pdata->num_displays; ++i) { -+ struct omap_display *display; -+ -+ switch (pdata->displays[i]->type) { -+ case OMAP_DISPLAY_TYPE_DPI: -+#ifdef CONFIG_OMAP2_DSS_RFBI -+ case OMAP_DISPLAY_TYPE_DBI: -+#endif -+#ifdef CONFIG_OMAP2_DSS_SDI -+ case OMAP_DISPLAY_TYPE_SDI: -+#endif -+#ifdef CONFIG_OMAP2_DSS_DSI -+ case OMAP_DISPLAY_TYPE_DSI: -+#endif -+#ifdef CONFIG_OMAP2_DSS_VENC -+ case OMAP_DISPLAY_TYPE_VENC: -+#endif -+ break; -+ default: -+ DSSERR("Support for display '%s' not compiled in.\n", -+ pdata->displays[i]->name); -+ continue; -+ } -+ -+ display = kzalloc(sizeof(*display), GFP_KERNEL); -+ -+ /*atomic_set(&display->ref_count, 0);*/ -+ display->ref_count = 0; -+ -+ display->hw_config = *pdata->displays[i]; -+ display->type = pdata->displays[i]->type; -+ display->name = pdata->displays[i]->name; -+ -+ display->get_resolution = default_get_resolution; -+ display->get_recommended_bpp = default_get_recommended_bpp; -+ display->configure_overlay = default_configure_overlay; -+ display->wait_vsync = default_wait_vsync; -+ -+ switch (display->type) { -+ case OMAP_DISPLAY_TYPE_DPI: -+ r = dpi_init_display(display); -+ break; -+#ifdef CONFIG_OMAP2_DSS_RFBI -+ case OMAP_DISPLAY_TYPE_DBI: -+ r = rfbi_init_display(display); -+ break; -+#endif -+#ifdef CONFIG_OMAP2_DSS_VENC -+ case OMAP_DISPLAY_TYPE_VENC: -+ r = venc_init_display(display); -+ break; -+#endif -+#ifdef CONFIG_OMAP2_DSS_SDI -+ case OMAP_DISPLAY_TYPE_SDI: -+ r = sdi_init_display(display); -+ break; -+#endif -+#ifdef CONFIG_OMAP2_DSS_DSI -+ case OMAP_DISPLAY_TYPE_DSI: -+ r = dsi_init_display(display); -+ break; -+#endif -+ default: -+ BUG(); -+ } -+ -+ if (r) { -+ DSSERR("failed to init display%d\n", i); -+ continue; -+ } -+ -+ r = kobject_init_and_add(&display->kobj, &display_ktype, -+ &pdev->dev.kobj, "display%d", num_displays); -+ -+ if (r) { -+ DSSERR("failed to create sysfs file\n"); -+ continue; -+ } -+ -+ num_displays++; -+ -+ list_add_tail(&display->list, &display_list); -+ } -+} -+ -+void dss_uninit_displays(struct platform_device *pdev) -+{ -+ struct omap_display *display; -+ -+ while (!list_empty(&display_list)) { -+ display = list_first_entry(&display_list, -+ struct omap_display, list); -+ list_del(&display->list); -+ kobject_del(&display->kobj); -+ kobject_put(&display->kobj); -+ kfree(display); -+ } -+ -+ num_displays = 0; -+} -+ -+int dss_suspend_all_displays(void) -+{ -+ int r; -+ struct omap_display *display; -+ -+ list_for_each_entry(display, &display_list, list) { -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) { -+ display->activate_after_resume = 0; -+ continue; -+ } -+ -+ if (!display->suspend) { -+ DSSERR("display '%s' doesn't implement suspend\n", -+ display->name); -+ r = -ENOSYS; -+ goto err; -+ } -+ -+ r = display->suspend(display); -+ -+ if (r) -+ goto err; -+ -+ display->activate_after_resume = 1; -+ } -+ -+ return 0; -+err: -+ /* resume all displays that were suspended */ -+ dss_resume_all_displays(); -+ return r; -+} -+ -+int dss_resume_all_displays(void) -+{ -+ int r; -+ struct omap_display *display; -+ -+ list_for_each_entry(display, &display_list, list) { -+ if (display->activate_after_resume && display->resume) { -+ r = display->resume(display); -+ if (r) -+ return r; -+ } -+ -+ display->activate_after_resume = 0; -+ } -+ -+ return 0; -+} -+ -+int omap_dss_get_num_displays(void) -+{ -+ return num_displays; -+} -+EXPORT_SYMBOL(omap_dss_get_num_displays); -+ -+struct omap_display *dss_get_display(int no) -+{ -+ int i = 0; -+ struct omap_display *display; -+ -+ list_for_each_entry(display, &display_list, list) { -+ if (i++ == no) -+ return display; -+ } -+ -+ return NULL; -+} -+ -+struct omap_display *omap_dss_get_display(int no) -+{ -+ struct omap_display *display; -+ -+ display = dss_get_display(no); -+ -+ if (!display) -+ return NULL; -+ -+ switch (display->type) { -+ case OMAP_DISPLAY_TYPE_VENC: -+ break; -+ -+ case OMAP_DISPLAY_TYPE_DPI: -+ case OMAP_DISPLAY_TYPE_SDI: -+ if (display->panel == NULL) -+ return NULL; -+ break; -+ -+ case OMAP_DISPLAY_TYPE_DBI: -+ case OMAP_DISPLAY_TYPE_DSI: -+ if (display->panel == NULL || display->ctrl == NULL) -+ return NULL; -+ break; -+ -+ default: -+ return NULL; -+ } -+ -+ if (display->ctrl) { -+ if (!try_module_get(display->ctrl->owner)) -+ goto err0; -+ -+ if (display->ctrl->init) -+ if (display->ctrl->init(display) != 0) -+ goto err1; -+ } -+ -+ if (display->panel) { -+ if (!try_module_get(display->panel->owner)) -+ goto err2; -+ -+ if (display->panel->init) -+ if (display->panel->init(display) != 0) -+ goto err3; -+ } -+ -+ dss_recheck_connections(display, -+ !strcmp(display->name, dss_get_def_disp_name())); -+ -+ display->ref_count++; -+ display->state = OMAP_DSS_DISPLAY_DISABLED; -+ /* -+ if (atomic_cmpxchg(&display->ref_count, 0, 1) != 0) -+ return 0; -+*/ -+ -+ return display; -+err3: -+ if (display->panel) -+ module_put(display->panel->owner); -+err2: -+ if (display->ctrl && display->ctrl->cleanup) -+ display->ctrl->cleanup(display); -+err1: -+ if (display->ctrl) -+ module_put(display->ctrl->owner); -+err0: -+ return NULL; -+} -+EXPORT_SYMBOL(omap_dss_get_display); -+ -+void omap_dss_put_display(struct omap_display *display) -+{ -+ if (--display->ref_count > 0) -+ return; -+ -+ display->state = OMAP_DSS_DISPLAY_UNINITIALIZED; -+/* -+ if (atomic_cmpxchg(&display->ref_count, 1, 0) != 1) -+ return; -+*/ -+ if (display->ctrl) { -+ if (display->ctrl->cleanup) -+ display->ctrl->cleanup(display); -+ module_put(display->ctrl->owner); -+ } -+ -+ if (display->panel) { -+ if (display->panel->cleanup) -+ display->panel->cleanup(display); -+ module_put(display->panel->owner); -+ } -+} -+EXPORT_SYMBOL(omap_dss_put_display); -+ -+void omap_dss_register_ctrl(struct omap_ctrl *ctrl) -+{ -+ struct omap_display *display; -+ -+ list_for_each_entry(display, &display_list, list) { -+ if (display->hw_config.ctrl_name && -+ strcmp(display->hw_config.ctrl_name, ctrl->name) == 0) { -+ display->ctrl = ctrl; -+ DSSDBG("ctrl '%s' registered\n", ctrl->name); -+ } -+ } -+} -+EXPORT_SYMBOL(omap_dss_register_ctrl); -+ -+void omap_dss_register_panel(struct omap_panel *panel) -+{ -+ struct omap_display *display; -+ -+ list_for_each_entry(display, &display_list, list) { -+ if (display->hw_config.panel_name && -+ strcmp(display->hw_config.panel_name, panel->name) == 0) { -+ display->panel = panel; -+ DSSDBG("panel '%s' registered\n", panel->name); -+ } -+ } -+} -+EXPORT_SYMBOL(omap_dss_register_panel); -+ -+void omap_dss_unregister_ctrl(struct omap_ctrl *ctrl) -+{ -+ struct omap_display *display; -+ -+ list_for_each_entry(display, &display_list, list) { -+ if (display->hw_config.ctrl_name && -+ strcmp(display->hw_config.ctrl_name, ctrl->name) == 0) -+ display->ctrl = NULL; -+ } -+} -+EXPORT_SYMBOL(omap_dss_unregister_ctrl); -+ -+void omap_dss_unregister_panel(struct omap_panel *panel) -+{ -+ struct omap_display *display; -+ -+ list_for_each_entry(display, &display_list, list) { -+ if (display->hw_config.panel_name && -+ strcmp(display->hw_config.panel_name, panel->name) == 0) -+ display->panel = NULL; -+ } -+} -+EXPORT_SYMBOL(omap_dss_unregister_panel); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dpi.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dpi.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dpi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dpi.c 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,395 @@ -+/* -+ * linux/drivers/video/omap2/dss/dpi.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "dss.h" -+ -+static struct { -+ int update_enabled; -+} dpi; -+ -+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL -+static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req, -+ unsigned long *fck, int *lck_div, int *pck_div) -+{ -+ struct dsi_clock_info cinfo; -+ int r; -+ -+ r = dsi_pll_calc_pck(is_tft, pck_req, &cinfo); -+ if (r) -+ return r; -+ -+ r = dsi_pll_program(&cinfo); -+ if (r) -+ return r; -+ -+ dss_select_clk_source(0, 1); -+ -+ dispc_set_lcd_divisor(cinfo.lck_div, cinfo.pck_div); -+ -+ *fck = cinfo.dsi1_pll_fclk; -+ *lck_div = cinfo.lck_div; -+ *pck_div = cinfo.pck_div; -+ -+ return 0; -+} -+#else -+static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req, -+ unsigned long *fck, int *lck_div, int *pck_div) -+{ -+ struct dispc_clock_info cinfo; -+ int r; -+ -+ r = dispc_calc_clock_div(is_tft, pck_req, &cinfo); -+ if (r) -+ return r; -+ -+ r = dispc_set_clock_div(&cinfo); -+ if (r) -+ return r; -+ -+ *fck = cinfo.fck; -+ *lck_div = cinfo.lck_div; -+ *pck_div = cinfo.pck_div; -+ -+ return 0; -+} -+#endif -+ -+static int dpi_set_mode(struct omap_display *display) -+{ -+ struct omap_panel *panel = display->panel; -+ int lck_div, pck_div; -+ unsigned long fck; -+ unsigned long pck; -+ bool is_tft; -+ int r = 0; -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ dispc_set_pol_freq(panel); -+ -+ is_tft = (display->panel->config & OMAP_DSS_LCD_TFT) != 0; -+ -+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL -+ r = dpi_set_dsi_clk(is_tft, panel->timings.pixel_clock * 1000, -+ &fck, &lck_div, &pck_div); -+#else -+ r = dpi_set_dispc_clk(is_tft, panel->timings.pixel_clock * 1000, -+ &fck, &lck_div, &pck_div); -+#endif -+ if (r) -+ goto err0; -+ -+ pck = fck / lck_div / pck_div / 1000; -+ -+ if (pck != panel->timings.pixel_clock) { -+ DSSWARN("Could not find exact pixel clock. " -+ "Requested %d kHz, got %lu kHz\n", -+ panel->timings.pixel_clock, pck); -+ -+ panel->timings.pixel_clock = pck; -+ } -+ -+ dispc_set_lcd_timings(&panel->timings); -+ -+err0: -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ return r; -+} -+ -+static int dpi_basic_init(struct omap_display *display) -+{ -+ bool is_tft; -+ -+ is_tft = (display->panel->config & OMAP_DSS_LCD_TFT) != 0; -+ -+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS); -+ dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT : -+ OMAP_DSS_LCD_DISPLAY_STN); -+ dispc_set_tft_data_lines(display->hw_config.u.dpi.data_lines); -+ -+ return 0; -+} -+ -+static int dpi_display_enable(struct omap_display *display) -+{ -+ struct omap_panel *panel = display->panel; -+ int r; -+ -+ if (display->state != OMAP_DSS_DISPLAY_DISABLED) { -+ DSSERR("display already enabled\n"); -+ return -EINVAL; -+ } -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ r = dpi_basic_init(display); -+ if (r) -+ goto err0; -+ -+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL -+ dss_clk_enable(DSS_CLK_FCK2); -+ r = dsi_pll_init(0, 1); -+ if (r) -+ goto err1; -+#endif -+ r = dpi_set_mode(display); -+ if (r) -+ goto err2; -+ -+ mdelay(2); -+ -+ dispc_enable_lcd_out(1); -+ -+ r = panel->enable(display); -+ if (r) -+ goto err3; -+ -+ display->state = OMAP_DSS_DISPLAY_ACTIVE; -+ -+ return 0; -+ -+err3: -+ dispc_enable_lcd_out(0); -+err2: -+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL -+ dsi_pll_uninit(); -+err1: -+ dss_clk_disable(DSS_CLK_FCK2); -+#endif -+err0: -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ return r; -+} -+ -+static int dpi_display_resume(struct omap_display *display); -+ -+static void dpi_display_disable(struct omap_display *display) -+{ -+ if (display->state == OMAP_DSS_DISPLAY_DISABLED) -+ return; -+ -+ if (display->state == OMAP_DSS_DISPLAY_SUSPENDED) -+ dpi_display_resume(display); -+ -+ display->panel->disable(display); -+ -+ dispc_enable_lcd_out(0); -+ -+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL -+ dss_select_clk_source(0, 0); -+ dsi_pll_uninit(); -+ dss_clk_disable(DSS_CLK_FCK2); -+#endif -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ display->state = OMAP_DSS_DISPLAY_DISABLED; -+} -+ -+static int dpi_display_suspend(struct omap_display *display) -+{ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return -EINVAL; -+ -+ DSSDBG("dpi_display_suspend\n"); -+ -+ if (display->panel->suspend) -+ display->panel->suspend(display); -+ -+ dispc_enable_lcd_out(0); -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ display->state = OMAP_DSS_DISPLAY_SUSPENDED; -+ -+ return 0; -+} -+ -+static int dpi_display_resume(struct omap_display *display) -+{ -+ if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) -+ return -EINVAL; -+ -+ DSSDBG("dpi_display_resume\n"); -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ dispc_enable_lcd_out(1); -+ -+ if (display->panel->resume) -+ display->panel->resume(display); -+ -+ display->state = OMAP_DSS_DISPLAY_ACTIVE; -+ -+ return 0; -+} -+ -+static void dpi_set_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ DSSDBG("dpi_set_timings\n"); -+ display->panel->timings = *timings; -+ if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { -+ dpi_set_mode(display); -+ dispc_go(OMAP_DSS_CHANNEL_LCD); -+ } -+} -+ -+static int dpi_check_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ bool is_tft; -+ int r; -+ int lck_div, pck_div; -+ unsigned long fck; -+ unsigned long pck; -+ -+ if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) { -+ if (timings->hsw < 1 || timings->hsw > 64 || -+ timings->hfp < 1 || timings->hfp > 256 || -+ timings->hbp < 1 || timings->hbp > 256) { -+ return -EINVAL; -+ } -+ -+ if (timings->vsw < 1 || timings->vsw > 64 || -+ timings->vfp > 255 || timings->vbp > 255) { -+ return -EINVAL; -+ } -+ } else { -+ if (timings->hsw < 1 || timings->hsw > 256 || -+ timings->hfp < 1 || timings->hfp > 4096 || -+ timings->hbp < 1 || timings->hbp > 4096) { -+ return -EINVAL; -+ } -+ -+ if (timings->vsw < 1 || timings->vsw > 64 || -+ timings->vfp > 4095 || timings->vbp > 4095) { -+ return -EINVAL; -+ } -+ } -+ -+ if (timings->pixel_clock == 0) -+ return -EINVAL; -+ -+ is_tft = (display->panel->config & OMAP_DSS_LCD_TFT) != 0; -+ -+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL -+ { -+ struct dsi_clock_info cinfo; -+ r = dsi_pll_calc_pck(is_tft, timings->pixel_clock * 1000, -+ &cinfo); -+ -+ if (r) -+ return r; -+ -+ fck = cinfo.dsi1_pll_fclk; -+ lck_div = cinfo.lck_div; -+ pck_div = cinfo.pck_div; -+ } -+#else -+ { -+ struct dispc_clock_info cinfo; -+ r = dispc_calc_clock_div(is_tft, timings->pixel_clock * 1000, -+ &cinfo); -+ -+ if (r) -+ return r; -+ -+ fck = cinfo.fck; -+ lck_div = cinfo.lck_div; -+ pck_div = cinfo.pck_div; -+ } -+#endif -+ -+ pck = fck / lck_div / pck_div / 1000; -+ -+ timings->pixel_clock = pck; -+ -+ return 0; -+} -+ -+static void dpi_get_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ *timings = display->panel->timings; -+} -+ -+static int dpi_display_set_update_mode(struct omap_display *display, -+ enum omap_dss_update_mode mode) -+{ -+ if (mode == OMAP_DSS_UPDATE_MANUAL) -+ return -EINVAL; -+ -+ if (mode == OMAP_DSS_UPDATE_DISABLED) { -+ dispc_enable_lcd_out(0); -+ dpi.update_enabled = 0; -+ } else { -+ dispc_enable_lcd_out(1); -+ dpi.update_enabled = 1; -+ } -+ -+ return 0; -+} -+ -+static enum omap_dss_update_mode dpi_display_get_update_mode( -+ struct omap_display *display) -+{ -+ return dpi.update_enabled ? OMAP_DSS_UPDATE_AUTO : -+ OMAP_DSS_UPDATE_DISABLED; -+} -+ -+int dpi_init_display(struct omap_display *display) -+{ -+ DSSDBG("DPI init_display\n"); -+ -+ display->enable = dpi_display_enable; -+ display->disable = dpi_display_disable; -+ display->suspend = dpi_display_suspend; -+ display->resume = dpi_display_resume; -+ display->set_timings = dpi_set_timings; -+ display->check_timings = dpi_check_timings; -+ display->get_timings = dpi_get_timings; -+ display->set_update_mode = dpi_display_set_update_mode; -+ display->get_update_mode = dpi_display_get_update_mode; -+ -+ return 0; -+} -+ -+int dpi_init(void) -+{ -+ return 0; -+} -+ -+void dpi_exit(void) -+{ -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dsi.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dsi.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dsi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dsi.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,3873 @@ -+/* -+ * linux/drivers/video/omap2/dss/dsi.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "DSI" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "dss.h" -+ -+/*#define VERBOSE_IRQ*/ -+ -+#define DSI_BASE 0x4804FC00 -+ -+struct dsi_reg { u16 idx; }; -+ -+#define DSI_REG(idx) ((const struct dsi_reg) { idx }) -+ -+#define DSI_SZ_REGS SZ_1K -+/* DSI Protocol Engine */ -+ -+#define DSI_REVISION DSI_REG(0x0000) -+#define DSI_SYSCONFIG DSI_REG(0x0010) -+#define DSI_SYSSTATUS DSI_REG(0x0014) -+#define DSI_IRQSTATUS DSI_REG(0x0018) -+#define DSI_IRQENABLE DSI_REG(0x001C) -+#define DSI_CTRL DSI_REG(0x0040) -+#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048) -+#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C) -+#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050) -+#define DSI_CLK_CTRL DSI_REG(0x0054) -+#define DSI_TIMING1 DSI_REG(0x0058) -+#define DSI_TIMING2 DSI_REG(0x005C) -+#define DSI_VM_TIMING1 DSI_REG(0x0060) -+#define DSI_VM_TIMING2 DSI_REG(0x0064) -+#define DSI_VM_TIMING3 DSI_REG(0x0068) -+#define DSI_CLK_TIMING DSI_REG(0x006C) -+#define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070) -+#define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074) -+#define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078) -+#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C) -+#define DSI_VM_TIMING4 DSI_REG(0x0080) -+#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084) -+#define DSI_VM_TIMING5 DSI_REG(0x0088) -+#define DSI_VM_TIMING6 DSI_REG(0x008C) -+#define DSI_VM_TIMING7 DSI_REG(0x0090) -+#define DSI_STOPCLK_TIMING DSI_REG(0x0094) -+#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20)) -+#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20)) -+#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20)) -+#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20)) -+#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20)) -+#define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20)) -+#define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20)) -+ -+/* DSIPHY_SCP */ -+ -+#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000) -+#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004) -+#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008) -+#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014) -+ -+/* DSI_PLL_CTRL_SCP */ -+ -+#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000) -+#define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004) -+#define DSI_PLL_GO DSI_REG(0x300 + 0x0008) -+#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C) -+#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010) -+ -+#define REG_GET(idx, start, end) \ -+ FLD_GET(dsi_read_reg(idx), start, end) -+ -+#define REG_FLD_MOD(idx, val, start, end) \ -+ dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end)) -+ -+/* Global interrupts */ -+#define DSI_IRQ_VC0 (1 << 0) -+#define DSI_IRQ_VC1 (1 << 1) -+#define DSI_IRQ_VC2 (1 << 2) -+#define DSI_IRQ_VC3 (1 << 3) -+#define DSI_IRQ_WAKEUP (1 << 4) -+#define DSI_IRQ_RESYNC (1 << 5) -+#define DSI_IRQ_PLL_LOCK (1 << 7) -+#define DSI_IRQ_PLL_UNLOCK (1 << 8) -+#define DSI_IRQ_PLL_RECALL (1 << 9) -+#define DSI_IRQ_COMPLEXIO_ERR (1 << 10) -+#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14) -+#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15) -+#define DSI_IRQ_TE_TRIGGER (1 << 16) -+#define DSI_IRQ_ACK_TRIGGER (1 << 17) -+#define DSI_IRQ_SYNC_LOST (1 << 18) -+#define DSI_IRQ_LDO_POWER_GOOD (1 << 19) -+#define DSI_IRQ_TA_TIMEOUT (1 << 20) -+#define DSI_IRQ_ERROR_MASK \ -+ (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \ -+ DSI_IRQ_TA_TIMEOUT) -+#define DSI_IRQ_CHANNEL_MASK 0xf -+ -+/* Virtual channel interrupts */ -+#define DSI_VC_IRQ_CS (1 << 0) -+#define DSI_VC_IRQ_ECC_CORR (1 << 1) -+#define DSI_VC_IRQ_PACKET_SENT (1 << 2) -+#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3) -+#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4) -+#define DSI_VC_IRQ_BTA (1 << 5) -+#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6) -+#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7) -+#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8) -+#define DSI_VC_IRQ_ERROR_MASK \ -+ (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \ -+ DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \ -+ DSI_VC_IRQ_FIFO_TX_UDF) -+ -+/* ComplexIO interrupts */ -+#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) -+#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) -+#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) -+#define DSI_CIO_IRQ_ERRESC1 (1 << 5) -+#define DSI_CIO_IRQ_ERRESC2 (1 << 6) -+#define DSI_CIO_IRQ_ERRESC3 (1 << 7) -+#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) -+#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) -+#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) -+#define DSI_CIO_IRQ_STATEULPS1 (1 << 15) -+#define DSI_CIO_IRQ_STATEULPS2 (1 << 16) -+#define DSI_CIO_IRQ_STATEULPS3 (1 << 17) -+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) -+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) -+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) -+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) -+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) -+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) -+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) -+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) -+ -+#define DSI_DT_DCS_SHORT_WRITE_0 0x05 -+#define DSI_DT_DCS_SHORT_WRITE_1 0x15 -+#define DSI_DT_DCS_READ 0x06 -+#define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37 -+#define DSI_DT_NULL_PACKET 0x09 -+#define DSI_DT_DCS_LONG_WRITE 0x39 -+ -+#define DSI_DT_RX_ACK_WITH_ERR 0x02 -+#define DSI_DT_RX_DCS_LONG_READ 0x1c -+#define DSI_DT_RX_SHORT_READ_1 0x21 -+#define DSI_DT_RX_SHORT_READ_2 0x22 -+ -+#define FINT_MAX 2100000 -+#define FINT_MIN 750000 -+#define REGN_MAX (1 << 7) -+#define REGM_MAX ((1 << 11) - 1) -+#define REGM3_MAX (1 << 4) -+#define REGM4_MAX (1 << 4) -+ -+enum fifo_size { -+ DSI_FIFO_SIZE_0 = 0, -+ DSI_FIFO_SIZE_32 = 1, -+ DSI_FIFO_SIZE_64 = 2, -+ DSI_FIFO_SIZE_96 = 3, -+ DSI_FIFO_SIZE_128 = 4, -+}; -+ -+#define DSI_CMD_FIFO_LEN 16 -+ -+struct dsi_cmd_update { -+ int bytespp; -+ u16 x; -+ u16 y; -+ u16 w; -+ u16 h; -+}; -+ -+struct dsi_cmd_mem_read { -+ void *buf; -+ size_t size; -+ u16 x; -+ u16 y; -+ u16 w; -+ u16 h; -+ size_t *ret_size; -+ struct completion *completion; -+}; -+ -+struct dsi_cmd_test { -+ int test_num; -+ int *result; -+ struct completion *completion; -+}; -+ -+enum dsi_cmd { -+ DSI_CMD_UPDATE, -+ DSI_CMD_AUTOUPDATE, -+ DSI_CMD_SYNC, -+ DSI_CMD_MEM_READ, -+ DSI_CMD_TEST, -+ DSI_CMD_SET_TE, -+ DSI_CMD_SET_UPDATE_MODE, -+ DSI_CMD_SET_ROTATE, -+ DSI_CMD_SET_MIRROR, -+}; -+ -+struct dsi_cmd_item { -+ struct omap_display *display; -+ -+ enum dsi_cmd cmd; -+ -+ union { -+ struct dsi_cmd_update r; -+ struct completion *sync; -+ struct dsi_cmd_mem_read mem_read; -+ struct dsi_cmd_test test; -+ int te; -+ enum omap_dss_update_mode update_mode; -+ int rotate; -+ int mirror; -+ } u; -+}; -+ -+static struct -+{ -+ void __iomem *base; -+ -+ unsigned long dsi1_pll_fclk; /* Hz */ -+ unsigned long dsi2_pll_fclk; /* Hz */ -+ unsigned long dsiphy; /* Hz */ -+ unsigned long ddr_clk; /* Hz */ -+ -+ struct { -+ struct omap_display *display; -+ enum fifo_size fifo_size; -+ int dest_per; /* destination peripheral 0-3 */ -+ } vc[4]; -+ -+ struct mutex lock; -+ -+ unsigned pll_locked; -+ -+ struct completion bta_completion; -+ -+ struct work_struct framedone_work; -+ struct work_struct process_work; -+ struct workqueue_struct *workqueue; -+ -+ enum omap_dss_update_mode user_update_mode; -+ enum omap_dss_update_mode target_update_mode; -+ enum omap_dss_update_mode update_mode; -+ bool use_te; -+ bool use_ext_te; -+ int framedone_scheduled; /* helps to catch strange framedone bugs */ -+ -+ unsigned long cache_req_pck; -+ unsigned long cache_clk_freq; -+ struct dsi_clock_info cache_cinfo; -+ -+ struct kfifo *cmd_fifo; -+ spinlock_t cmd_lock; -+ struct completion cmd_done; -+ atomic_t cmd_fifo_full; -+ atomic_t cmd_pending; -+ -+ bool autoupdate_setup; -+ -+ u32 errors; -+ spinlock_t errors_lock; -+#ifdef DEBUG -+ ktime_t perf_setup_time; -+ ktime_t perf_start_time; -+ ktime_t perf_start_time_auto; -+ int perf_measure_frames; -+ -+ struct { -+ int x, y, w, h; -+ int bytespp; -+ } update_region; -+ -+#endif -+ int debug_process; -+ int debug_read; -+ int debug_write; -+} dsi; -+ -+#ifdef DEBUG -+static unsigned int dsi_perf; -+module_param_named(dsi_perf, dsi_perf, bool, 0644); -+#endif -+ -+static void dsi_process_cmd_fifo(struct work_struct *work); -+static void dsi_push_update(struct omap_display *display, -+ int x, int y, int w, int h); -+static void dsi_push_autoupdate(struct omap_display *display); -+ -+static inline void dsi_write_reg(const struct dsi_reg idx, u32 val) -+{ -+ __raw_writel(val, dsi.base + idx.idx); -+} -+ -+static inline u32 dsi_read_reg(const struct dsi_reg idx) -+{ -+ return __raw_readl(dsi.base + idx.idx); -+} -+ -+ -+void dsi_save_context(void) -+{ -+} -+ -+void dsi_restore_context(void) -+{ -+} -+ -+static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum, -+ int value) -+{ -+ int t = 100000; -+ -+ while (REG_GET(idx, bitnum, bitnum) != value) { -+ if (--t == 0) -+ return !value; -+ } -+ -+ return value; -+} -+ -+#ifdef DEBUG -+static void perf_mark_setup(void) -+{ -+ dsi.perf_setup_time = ktime_get(); -+} -+ -+static void perf_mark_start(void) -+{ -+ dsi.perf_start_time = ktime_get(); -+} -+ -+static void perf_mark_start_auto(void) -+{ -+ dsi.perf_start_time_auto = ktime_get(); -+} -+ -+static void perf_show(const char *name) -+{ -+ ktime_t t, setup_time, trans_time; -+ u32 total_bytes; -+ u32 setup_us, trans_us, total_us; -+ -+ if (!dsi_perf) -+ return; -+ -+ if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED) -+ return; -+ -+ t = ktime_get(); -+ -+ setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time); -+ setup_us = (u32)ktime_to_us(setup_time); -+ if (setup_us == 0) -+ setup_us = 1; -+ -+ trans_time = ktime_sub(t, dsi.perf_start_time); -+ trans_us = (u32)ktime_to_us(trans_time); -+ if (trans_us == 0) -+ trans_us = 1; -+ -+ total_us = setup_us + trans_us; -+ -+ total_bytes = dsi.update_region.w * -+ dsi.update_region.h * -+ dsi.update_region.bytespp; -+ -+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) { -+ static u32 s_total_trans_us, s_total_setup_us; -+ static u32 s_min_trans_us = 0xffffffff, s_min_setup_us; -+ static u32 s_max_trans_us, s_max_setup_us; -+ const int numframes = 100; -+ ktime_t total_time_auto; -+ u32 total_time_auto_us; -+ -+ dsi.perf_measure_frames++; -+ -+ if (setup_us < s_min_setup_us) -+ s_min_setup_us = setup_us; -+ -+ if (setup_us > s_max_setup_us) -+ s_max_setup_us = setup_us; -+ -+ s_total_setup_us += setup_us; -+ -+ if (trans_us < s_min_trans_us) -+ s_min_trans_us = trans_us; -+ -+ if (trans_us > s_max_trans_us) -+ s_max_trans_us = trans_us; -+ -+ s_total_trans_us += trans_us; -+ -+ if (dsi.perf_measure_frames < numframes) -+ return; -+ -+ total_time_auto = ktime_sub(t, dsi.perf_start_time_auto); -+ total_time_auto_us = (u32)ktime_to_us(total_time_auto); -+ -+ printk("DSI(%s): %u fps, setup %u/%u/%u, trans %u/%u/%u\n", -+ name, -+ 1000 * 1000 * numframes / total_time_auto_us, -+ s_min_setup_us, -+ s_max_setup_us, -+ s_total_setup_us / numframes, -+ s_min_trans_us, -+ s_max_trans_us, -+ s_total_trans_us / numframes); -+ -+ dsi.perf_measure_frames = 0; -+ s_total_setup_us = 0; -+ s_min_setup_us = 0xffffffff; -+ s_max_setup_us = 0; -+ s_total_trans_us = 0; -+ s_min_trans_us = 0xffffffff; -+ s_max_trans_us = 0; -+ perf_mark_start_auto(); -+ } else { -+ printk("DSI(%s): %u us + %u us = %u us (%uHz), %u bytes, " -+ "%u kbytes/sec\n", -+ name, -+ setup_us, -+ trans_us, -+ total_us, -+ 1000*1000 / total_us, -+ total_bytes, -+ total_bytes * 1000 / total_us); -+ } -+} -+#else -+#define perf_mark_setup() -+#define perf_mark_start() -+#define perf_show(x) -+#endif -+ -+static void print_irq_status(u32 status) -+{ -+#ifndef VERBOSE_IRQ -+ if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0) -+ return; -+#endif -+ printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status); -+ -+#define PIS(x) \ -+ if (status & DSI_IRQ_##x) \ -+ printk(#x " "); -+#ifdef VERBOSE_IRQ -+ PIS(VC0); -+ PIS(VC1); -+ PIS(VC2); -+ PIS(VC3); -+#endif -+ PIS(WAKEUP); -+ PIS(RESYNC); -+ PIS(PLL_LOCK); -+ PIS(PLL_UNLOCK); -+ PIS(PLL_RECALL); -+ PIS(COMPLEXIO_ERR); -+ PIS(HS_TX_TIMEOUT); -+ PIS(LP_RX_TIMEOUT); -+ PIS(TE_TRIGGER); -+ PIS(ACK_TRIGGER); -+ PIS(SYNC_LOST); -+ PIS(LDO_POWER_GOOD); -+ PIS(TA_TIMEOUT); -+#undef PIS -+ -+ printk("\n"); -+} -+ -+static void print_irq_status_vc(int channel, u32 status) -+{ -+#ifndef VERBOSE_IRQ -+ if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0) -+ return; -+#endif -+ printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status); -+ -+#define PIS(x) \ -+ if (status & DSI_VC_IRQ_##x) \ -+ printk(#x " "); -+ PIS(CS); -+ PIS(ECC_CORR); -+#ifdef VERBOSE_IRQ -+ PIS(PACKET_SENT); -+#endif -+ PIS(FIFO_TX_OVF); -+ PIS(FIFO_RX_OVF); -+ PIS(BTA); -+ PIS(ECC_NO_CORR); -+ PIS(FIFO_TX_UDF); -+ PIS(PP_BUSY_CHANGE); -+#undef PIS -+ printk("\n"); -+} -+ -+static void print_irq_status_cio(u32 status) -+{ -+ printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status); -+ -+#define PIS(x) \ -+ if (status & DSI_CIO_IRQ_##x) \ -+ printk(#x " "); -+ PIS(ERRSYNCESC1); -+ PIS(ERRSYNCESC2); -+ PIS(ERRSYNCESC3); -+ PIS(ERRESC1); -+ PIS(ERRESC2); -+ PIS(ERRESC3); -+ PIS(ERRCONTROL1); -+ PIS(ERRCONTROL2); -+ PIS(ERRCONTROL3); -+ PIS(STATEULPS1); -+ PIS(STATEULPS2); -+ PIS(STATEULPS3); -+ PIS(ERRCONTENTIONLP0_1); -+ PIS(ERRCONTENTIONLP1_1); -+ PIS(ERRCONTENTIONLP0_2); -+ PIS(ERRCONTENTIONLP1_2); -+ PIS(ERRCONTENTIONLP0_3); -+ PIS(ERRCONTENTIONLP1_3); -+ PIS(ULPSACTIVENOT_ALL0); -+ PIS(ULPSACTIVENOT_ALL1); -+#undef PIS -+ -+ printk("\n"); -+} -+ -+static int debug_irq; -+ -+/* called from dss */ -+void dsi_irq_handler(void) -+{ -+ u32 irqstatus, vcstatus, ciostatus; -+ int i; -+ -+ irqstatus = dsi_read_reg(DSI_IRQSTATUS); -+ -+ if (irqstatus & DSI_IRQ_ERROR_MASK) { -+ DSSERR("DSI error, irqstatus %x\n", irqstatus); -+ print_irq_status(irqstatus); -+ spin_lock(&dsi.errors_lock); -+ dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK; -+ spin_unlock(&dsi.errors_lock); -+ } else if (debug_irq) { -+ print_irq_status(irqstatus); -+ } -+ -+ for (i = 0; i < 4; ++i) { -+ if ((irqstatus & (1<hw_config.u.dsi.lp_clk_hz; -+ -+ for (n = 1; n < (1 << 13) - 1; ++n) { -+ lp_clk = dsi_fclk / 2 / n; -+ if (lp_clk <= lp_clk_req) -+ break; -+ } -+ -+ if (n == (1 << 13) - 1) { -+ DSSERR("Failed to find LP_CLK_DIVISOR\n"); -+ return -EINVAL; -+ } -+ -+ DSSDBG("LP_CLK_DIV %u, LP_CLK %lu (req %lu)\n", n, lp_clk, lp_clk_req); -+ -+ REG_FLD_MOD(DSI_CLK_CTRL, n, 12, 0); /* LP_CLK_DIVISOR */ -+ if (dsi_fclk > 30*1000*1000) -+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 21, 21); /* LP_RX_SYNCHRO_ENABLE */ -+ -+ return 0; -+} -+ -+ -+enum dsi_pll_power_state { -+ DSI_PLL_POWER_OFF = 0x0, -+ DSI_PLL_POWER_ON_HSCLK = 0x1, -+ DSI_PLL_POWER_ON_ALL = 0x2, -+ DSI_PLL_POWER_ON_DIV = 0x3, -+}; -+ -+static int dsi_pll_power(enum dsi_pll_power_state state) -+{ -+ int t = 0; -+ -+ REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */ -+ -+ /* PLL_PWR_STATUS */ -+ while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { -+ udelay(1); -+ if (t++ > 1000) { -+ DSSERR("Failed to set DSI PLL power mode to %d\n", -+ state); -+ return -ENODEV; -+ } -+ } -+ -+ return 0; -+} -+ -+int dsi_pll_calc_pck(bool is_tft, unsigned long req_pck, -+ struct dsi_clock_info *cinfo) -+{ -+ struct dsi_clock_info cur, best; -+ int min_fck_per_pck; -+ int match = 0; -+ -+ if (req_pck == dsi.cache_req_pck && -+ dsi.cache_cinfo.clkin == dss_clk_get_rate(DSS_CLK_FCK2)) { -+ DSSDBG("DSI clock info found from cache\n"); -+ *cinfo = dsi.cache_cinfo; -+ return 0; -+ } -+ -+ min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK; -+ -+ if (min_fck_per_pck && -+ req_pck * min_fck_per_pck > DISPC_MAX_FCK) { -+ DSSERR("Requested pixel clock not possible with the current " -+ "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning " -+ "the constraint off.\n"); -+ min_fck_per_pck = 0; -+ } -+ -+ DSSDBG("dsi_pll_calc\n"); -+ -+retry: -+ memset(&best, 0, sizeof(best)); -+ -+ memset(&cur, 0, sizeof(cur)); -+ cur.clkin = dss_clk_get_rate(DSS_CLK_FCK2); -+ cur.use_dss2_fck = 1; -+ cur.highfreq = 0; -+ -+ /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ -+ /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */ -+ /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ -+ for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) { -+ if (cur.highfreq == 0) -+ cur.fint = cur.clkin / cur.regn; -+ else -+ cur.fint = cur.clkin / (2 * cur.regn); -+ -+ if (cur.fint > FINT_MAX || cur.fint < FINT_MIN) -+ continue; -+ -+ /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ -+ for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) { -+ unsigned long a, b; -+ -+ a = 2 * cur.regm * (cur.clkin/1000); -+ b = cur.regn * (cur.highfreq + 1); -+ cur.dsiphy = a / b * 1000; -+ -+ if (cur.dsiphy > 1800 * 1000 * 1000) -+ break; -+ -+ /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */ -+ for (cur.regm3 = 1; cur.regm3 < REGM3_MAX; -+ ++cur.regm3) { -+ cur.dsi1_pll_fclk = cur.dsiphy / cur.regm3; -+ -+ /* this will narrow down the search a bit, -+ * but still give pixclocks below what was -+ * requested */ -+ if (cur.dsi1_pll_fclk < req_pck) -+ break; -+ -+ if (cur.dsi1_pll_fclk > DISPC_MAX_FCK) -+ continue; -+ -+ if (min_fck_per_pck && -+ cur.dsi1_pll_fclk < -+ req_pck * min_fck_per_pck) -+ continue; -+ -+ match = 1; -+ -+ find_lck_pck_divs(is_tft, req_pck, -+ cur.dsi1_pll_fclk, -+ &cur.lck_div, -+ &cur.pck_div); -+ -+ cur.lck = cur.dsi1_pll_fclk / cur.lck_div; -+ cur.pck = cur.lck / cur.pck_div; -+ -+ if (abs(cur.pck - req_pck) < -+ abs(best.pck - req_pck)) { -+ best = cur; -+ -+ if (cur.pck == req_pck) -+ goto found; -+ } -+ } -+ } -+ } -+found: -+ if (!match) { -+ if (min_fck_per_pck) { -+ DSSERR("Could not find suitable clock settings.\n" -+ "Turning FCK/PCK constraint off and" -+ "trying again.\n"); -+ min_fck_per_pck = 0; -+ goto retry; -+ } -+ -+ DSSERR("Could not find suitable clock settings.\n"); -+ -+ return -EINVAL; -+ } -+ -+ /* DSI2_PLL_FCLK (regm4) is not used. Set it to something sane. */ -+ best.regm4 = best.dsiphy / 48000000; -+ if (best.regm4 > REGM4_MAX) -+ best.regm4 = REGM4_MAX; -+ else if (best.regm4 == 0) -+ best.regm4 = 1; -+ best.dsi2_pll_fclk = best.dsiphy / best.regm4; -+ -+ if (cinfo) -+ *cinfo = best; -+ -+ dsi.cache_req_pck = req_pck; -+ dsi.cache_clk_freq = 0; -+ dsi.cache_cinfo = best; -+ -+ return 0; -+} -+ -+static int dsi_pll_calc_ddrfreq(unsigned long clk_freq, -+ struct dsi_clock_info *cinfo) -+{ -+ struct dsi_clock_info cur, best; -+ const bool use_dss2_fck = 1; -+ unsigned long datafreq; -+ -+ DSSDBG("dsi_pll_calc_ddrfreq\n"); -+ -+ if (clk_freq == dsi.cache_clk_freq && -+ dsi.cache_cinfo.clkin == dss_clk_get_rate(DSS_CLK_FCK2)) { -+ DSSDBG("DSI clock info found from cache\n"); -+ *cinfo = dsi.cache_cinfo; -+ return 0; -+ } -+ -+ datafreq = clk_freq * 4; -+ -+ memset(&best, 0, sizeof(best)); -+ -+ memset(&cur, 0, sizeof(cur)); -+ cur.use_dss2_fck = use_dss2_fck; -+ if (use_dss2_fck) { -+ cur.clkin = dss_clk_get_rate(DSS_CLK_FCK2); -+ cur.highfreq = 0; -+ } else { -+ cur.clkin = dispc_pclk_rate(); -+ if (cur.clkin < 32000000) -+ cur.highfreq = 0; -+ else -+ cur.highfreq = 1; -+ } -+ -+ /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ -+ /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */ -+ /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ -+ for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) { -+ if (cur.highfreq == 0) -+ cur.fint = cur.clkin / cur.regn; -+ else -+ cur.fint = cur.clkin / (2 * cur.regn); -+ -+ if (cur.fint > FINT_MAX || cur.fint < FINT_MIN) -+ continue; -+ -+ /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ -+ for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) { -+ unsigned long a, b; -+ -+ a = 2 * cur.regm * (cur.clkin/1000); -+ b = cur.regn * (cur.highfreq + 1); -+ cur.dsiphy = a / b * 1000; -+ -+ if (cur.dsiphy > 1800 * 1000 * 1000) -+ break; -+ -+ if (abs(cur.dsiphy - datafreq) < -+ abs(best.dsiphy - datafreq)) { -+ best = cur; -+ /* DSSDBG("best %ld\n", best.dsiphy); */ -+ } -+ -+ if (cur.dsiphy == datafreq) -+ goto found; -+ } -+ } -+found: -+ /* DSI1_PLL_FCLK (regm3) is not used. Set it to something sane. */ -+ best.regm3 = best.dsiphy / 48000000; -+ if (best.regm3 > REGM3_MAX) -+ best.regm3 = REGM3_MAX; -+ else if (best.regm3 == 0) -+ best.regm3 = 1; -+ best.dsi1_pll_fclk = best.dsiphy / best.regm3; -+ -+ /* DSI2_PLL_FCLK (regm4) is not used. Set it to something sane. */ -+ best.regm4 = best.dsiphy / 48000000; -+ if (best.regm4 > REGM4_MAX) -+ best.regm4 = REGM4_MAX; -+ else if (best.regm4 == 0) -+ best.regm4 = 1; -+ best.dsi2_pll_fclk = best.dsiphy / best.regm4; -+ -+ if (cinfo) -+ *cinfo = best; -+ -+ dsi.cache_clk_freq = clk_freq; -+ dsi.cache_req_pck = 0; -+ dsi.cache_cinfo = best; -+ -+ return 0; -+} -+ -+int dsi_pll_program(struct dsi_clock_info *cinfo) -+{ -+ int r = 0; -+ u32 l; -+ -+ DSSDBG("dsi_pll_program\n"); -+ -+ dsi.dsiphy = cinfo->dsiphy; -+ dsi.ddr_clk = dsi.dsiphy / 4; -+ dsi.dsi1_pll_fclk = cinfo->dsi1_pll_fclk; -+ dsi.dsi2_pll_fclk = cinfo->dsi2_pll_fclk; -+ -+ DSSDBG("DSI Fint %ld\n", cinfo->fint); -+ -+ DSSDBG("clkin (%s) rate %ld, highfreq %d\n", -+ cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree", -+ cinfo->clkin, -+ cinfo->highfreq); -+ -+ /* DSIPHY == CLKIN4DDR */ -+ DSSDBG("DSIPHY = 2 * %d / %d * %lu / %d = %lu\n", -+ cinfo->regm, -+ cinfo->regn, -+ cinfo->clkin, -+ cinfo->highfreq + 1, -+ cinfo->dsiphy); -+ -+ DSSDBG("Data rate on 1 DSI lane %ld Mbps\n", -+ dsi.dsiphy / 1000 / 1000 / 2); -+ -+ DSSDBG("Clock lane freq %ld Hz\n", dsi.ddr_clk); -+ -+ DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n", -+ cinfo->regm3, cinfo->dsi1_pll_fclk); -+ DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n", -+ cinfo->regm4, cinfo->dsi2_pll_fclk); -+ -+ REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */ -+ -+ l = dsi_read_reg(DSI_PLL_CONFIGURATION1); -+ l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */ -+ l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */ -+ l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */ -+ l = FLD_MOD(l, cinfo->regm3 - 1, 22, 19); /* DSI_CLOCK_DIV */ -+ l = FLD_MOD(l, cinfo->regm4 - 1, 26, 23); /* DSIPROTO_CLOCK_DIV */ -+ dsi_write_reg(DSI_PLL_CONFIGURATION1, l); -+ -+ l = dsi_read_reg(DSI_PLL_CONFIGURATION2); -+ l = FLD_MOD(l, 7, 4, 1); /* DSI_PLL_FREQSEL */ -+ /* DSI_PLL_CLKSEL */ -+ l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1, 11, 11); -+ l = FLD_MOD(l, cinfo->highfreq, 12, 12); /* DSI_PLL_HIGHFREQ */ -+ l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ -+ l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ -+ l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ -+ dsi_write_reg(DSI_PLL_CONFIGURATION2, l); -+ -+ REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ -+ -+ if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) { -+ DSSERR("dsi pll go bit not going down.\n"); -+ r = -EIO; -+ goto err; -+ } -+ -+ if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) { -+ DSSERR("cannot lock PLL\n"); -+ r = -EIO; -+ goto err; -+ } -+ -+ dsi.pll_locked = 1; -+ -+ l = dsi_read_reg(DSI_PLL_CONFIGURATION2); -+ l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */ -+ l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */ -+ l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */ -+ l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */ -+ l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */ -+ l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */ -+ l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ -+ l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */ -+ l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */ -+ l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */ -+ l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */ -+ l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */ -+ l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */ -+ l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */ -+ dsi_write_reg(DSI_PLL_CONFIGURATION2, l); -+ -+ DSSDBG("PLL config done\n"); -+err: -+ return r; -+} -+ -+int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv) -+{ -+ int r = 0; -+ enum dsi_pll_power_state pwstate; -+ struct dispc_clock_info cinfo; -+ -+ DSSDBG("PLL init\n"); -+ -+ enable_clocks(1); -+ dsi_enable_pll_clock(1); -+ -+ /* XXX this should be calculated depending on the screen size, -+ * required framerate and DSI speed. -+ * For now 48MHz is enough for 864x480@60 with 360Mbps/lane -+ * with two lanes */ -+ r = dispc_calc_clock_div(1, 48 * 1000 * 1000, &cinfo); -+ if (r) -+ goto err0; -+ -+ r = dispc_set_clock_div(&cinfo); -+ if (r) { -+ DSSERR("Failed to set basic clocks\n"); -+ goto err0; -+ } -+ -+ r = dss_dsi_power_up(); -+ if (r) -+ goto err0; -+ -+ /* XXX PLL does not come out of reset without this... */ -+ dispc_pck_free_enable(1); -+ -+ if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) { -+ DSSERR("PLL not coming out of reset.\n"); -+ r = -ENODEV; -+ goto err1; -+ } -+ -+ /* XXX ... but if left on, we get problems when planes do not -+ * fill the whole display. No idea about this */ -+ dispc_pck_free_enable(0); -+ -+ if (enable_hsclk && enable_hsdiv) -+ pwstate = DSI_PLL_POWER_ON_ALL; -+ else if (enable_hsclk) -+ pwstate = DSI_PLL_POWER_ON_HSCLK; -+ else if (enable_hsdiv) -+ pwstate = DSI_PLL_POWER_ON_DIV; -+ else -+ pwstate = DSI_PLL_POWER_OFF; -+ -+ r = dsi_pll_power(pwstate); -+ -+ if (r) -+ goto err1; -+ -+ DSSDBG("PLL init done\n"); -+ -+ return 0; -+err1: -+ dss_dsi_power_down(); -+err0: -+ enable_clocks(0); -+ dsi_enable_pll_clock(0); -+ return r; -+} -+ -+void dsi_pll_uninit(void) -+{ -+ enable_clocks(0); -+ dsi_enable_pll_clock(0); -+ -+ dsi.pll_locked = 0; -+ dsi_pll_power(DSI_PLL_POWER_OFF); -+ dss_dsi_power_down(); -+ DSSDBG("PLL uninit done\n"); -+} -+ -+unsigned long dsi_get_dsi1_pll_rate(void) -+{ -+ return dsi.dsi1_pll_fclk; -+} -+ -+unsigned long dsi_get_dsi2_pll_rate(void) -+{ -+ return dsi.dsi2_pll_fclk; -+} -+ -+void dsi_dump_clocks(struct seq_file *s) -+{ -+ int clksel; -+ -+ enable_clocks(1); -+ -+ clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11); -+ -+ seq_printf(s, "- dsi -\n"); -+ -+ seq_printf(s, "dsi fclk source = %s\n", -+ dss_get_dsi_clk_source() == 0 ? -+ "dss1_alwon_fclk" : "dsi2_pll_fclk"); -+ -+ seq_printf(s, "dsi pll source = %s\n", -+ clksel == 0 ? -+ "dss2_alwon_fclk" : "pclkfree"); -+ -+ seq_printf(s, "DSIPHY\t\t%lu\nDDR_CLK\t\t%lu\n", -+ dsi.dsiphy, dsi.ddr_clk); -+ -+ seq_printf(s, "dsi1_pll_fck\t%lu (%s)\n" -+ "dsi2_pll_fck\t%lu (%s)\n", -+ dsi.dsi1_pll_fclk, -+ dss_get_dispc_clk_source() == 0 ? "off" : "on", -+ dsi.dsi2_pll_fclk, -+ dss_get_dsi_clk_source() == 0 ? "off" : "on"); -+ -+ enable_clocks(0); -+} -+ -+void dsi_dump_regs(struct seq_file *s) -+{ -+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ DUMPREG(DSI_REVISION); -+ DUMPREG(DSI_SYSCONFIG); -+ DUMPREG(DSI_SYSSTATUS); -+ DUMPREG(DSI_IRQSTATUS); -+ DUMPREG(DSI_IRQENABLE); -+ DUMPREG(DSI_CTRL); -+ DUMPREG(DSI_COMPLEXIO_CFG1); -+ DUMPREG(DSI_COMPLEXIO_IRQ_STATUS); -+ DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE); -+ DUMPREG(DSI_CLK_CTRL); -+ DUMPREG(DSI_TIMING1); -+ DUMPREG(DSI_TIMING2); -+ DUMPREG(DSI_VM_TIMING1); -+ DUMPREG(DSI_VM_TIMING2); -+ DUMPREG(DSI_VM_TIMING3); -+ DUMPREG(DSI_CLK_TIMING); -+ DUMPREG(DSI_TX_FIFO_VC_SIZE); -+ DUMPREG(DSI_RX_FIFO_VC_SIZE); -+ DUMPREG(DSI_COMPLEXIO_CFG2); -+ DUMPREG(DSI_RX_FIFO_VC_FULLNESS); -+ DUMPREG(DSI_VM_TIMING4); -+ DUMPREG(DSI_TX_FIFO_VC_EMPTINESS); -+ DUMPREG(DSI_VM_TIMING5); -+ DUMPREG(DSI_VM_TIMING6); -+ DUMPREG(DSI_VM_TIMING7); -+ DUMPREG(DSI_STOPCLK_TIMING); -+ -+ DUMPREG(DSI_VC_CTRL(0)); -+ DUMPREG(DSI_VC_TE(0)); -+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(0)); -+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0)); -+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0)); -+ DUMPREG(DSI_VC_IRQSTATUS(0)); -+ DUMPREG(DSI_VC_IRQENABLE(0)); -+ -+ DUMPREG(DSI_VC_CTRL(1)); -+ DUMPREG(DSI_VC_TE(1)); -+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(1)); -+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1)); -+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1)); -+ DUMPREG(DSI_VC_IRQSTATUS(1)); -+ DUMPREG(DSI_VC_IRQENABLE(1)); -+ -+ DUMPREG(DSI_VC_CTRL(2)); -+ DUMPREG(DSI_VC_TE(2)); -+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(2)); -+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2)); -+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2)); -+ DUMPREG(DSI_VC_IRQSTATUS(2)); -+ DUMPREG(DSI_VC_IRQENABLE(2)); -+ -+ DUMPREG(DSI_VC_CTRL(3)); -+ DUMPREG(DSI_VC_TE(3)); -+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(3)); -+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3)); -+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3)); -+ DUMPREG(DSI_VC_IRQSTATUS(3)); -+ DUMPREG(DSI_VC_IRQENABLE(3)); -+ -+ DUMPREG(DSI_DSIPHY_CFG0); -+ DUMPREG(DSI_DSIPHY_CFG1); -+ DUMPREG(DSI_DSIPHY_CFG2); -+ DUMPREG(DSI_DSIPHY_CFG5); -+ -+ DUMPREG(DSI_PLL_CONTROL); -+ DUMPREG(DSI_PLL_STATUS); -+ DUMPREG(DSI_PLL_GO); -+ DUMPREG(DSI_PLL_CONFIGURATION1); -+ DUMPREG(DSI_PLL_CONFIGURATION2); -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+#undef DUMPREG -+} -+ -+enum dsi_complexio_power_state { -+ DSI_COMPLEXIO_POWER_OFF = 0x0, -+ DSI_COMPLEXIO_POWER_ON = 0x1, -+ DSI_COMPLEXIO_POWER_ULPS = 0x2, -+}; -+ -+static int dsi_complexio_power(enum dsi_complexio_power_state state) -+{ -+ int t = 0; -+ -+ /* PWR_CMD */ -+ REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27); -+ -+ /* PWR_STATUS */ -+ while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { -+ udelay(1); -+ if (t++ > 1000) { -+ DSSERR("failed to set complexio power state to " -+ "%d\n", state); -+ return -ENODEV; -+ } -+ } -+ -+ return 0; -+} -+ -+static void dsi_complexio_config(struct omap_display *display) -+{ -+ u32 r; -+ -+ int clk_lane = display->hw_config.u.dsi.clk_lane; -+ int data1_lane = display->hw_config.u.dsi.data1_lane; -+ int data2_lane = display->hw_config.u.dsi.data2_lane; -+ int clk_pol = display->hw_config.u.dsi.clk_pol; -+ int data1_pol = display->hw_config.u.dsi.data1_pol; -+ int data2_pol = display->hw_config.u.dsi.data2_pol; -+ -+ r = dsi_read_reg(DSI_COMPLEXIO_CFG1); -+ r = FLD_MOD(r, clk_lane, 2, 0); -+ r = FLD_MOD(r, clk_pol, 3, 3); -+ r = FLD_MOD(r, data1_lane, 6, 4); -+ r = FLD_MOD(r, data1_pol, 7, 7); -+ r = FLD_MOD(r, data2_lane, 10, 8); -+ r = FLD_MOD(r, data2_pol, 11, 11); -+ dsi_write_reg(DSI_COMPLEXIO_CFG1, r); -+ -+ /* The configuration of the DSI complex I/O (number of data lanes, -+ position, differential order) should not be changed while -+ DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for -+ the hardware to take into account a new configuration of the complex -+ I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to -+ follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, -+ then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set -+ DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the -+ DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the -+ DSI complex I/O configuration is unknown. */ -+ -+ /* -+ REG_FLD_MOD(DSI_CTRL, 1, 0, 0); -+ REG_FLD_MOD(DSI_CTRL, 0, 0, 0); -+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); -+ REG_FLD_MOD(DSI_CTRL, 1, 0, 0); -+ */ -+} -+ -+static inline unsigned ns2ddr(unsigned ns) -+{ -+ /* convert time in ns to ddr ticks, rounding up */ -+ return (ns * (dsi.ddr_clk/1000/1000) + 999) / 1000; -+} -+ -+static inline unsigned ddr2ns(unsigned ddr) -+{ -+ return ddr * 1000 * 1000 / (dsi.ddr_clk / 1000); -+} -+ -+static void dsi_complexio_timings(void) -+{ -+ u32 r; -+ u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; -+ u32 tlpx_half, tclk_trail, tclk_zero; -+ u32 tclk_prepare; -+ -+ /* calculate timings */ -+ -+ /* 1 * DDR_CLK = 2 * UI */ -+ -+ /* min 40ns + 4*UI max 85ns + 6*UI */ -+ ths_prepare = ns2ddr(70) + 2; -+ -+ /* min 145ns + 10*UI */ -+ ths_prepare_ths_zero = ns2ddr(175) + 2; -+ -+ /* min max(8*UI, 60ns+4*UI) */ -+ ths_trail = ns2ddr(60) + 5; -+ -+ /* min 100ns */ -+ ths_exit = ns2ddr(145); -+ -+ /* tlpx min 50n */ -+ tlpx_half = ns2ddr(25); -+ -+ /* min 60ns */ -+ tclk_trail = ns2ddr(60) + 2; -+ -+ /* min 38ns, max 95ns */ -+ tclk_prepare = ns2ddr(65); -+ -+ /* min tclk-prepare + tclk-zero = 300ns */ -+ tclk_zero = ns2ddr(260); -+ -+ DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", -+ ths_prepare, ddr2ns(ths_prepare), -+ ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero)); -+ DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", -+ ths_trail, ddr2ns(ths_trail), -+ ths_exit, ddr2ns(ths_exit)); -+ -+ DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " -+ "tclk_zero %u (%uns)\n", -+ tlpx_half, ddr2ns(tlpx_half), -+ tclk_trail, ddr2ns(tclk_trail), -+ tclk_zero, ddr2ns(tclk_zero)); -+ DSSDBG("tclk_prepare %u (%uns)\n", -+ tclk_prepare, ddr2ns(tclk_prepare)); -+ -+ /* program timings */ -+ -+ r = dsi_read_reg(DSI_DSIPHY_CFG0); -+ r = FLD_MOD(r, ths_prepare, 31, 24); -+ r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); -+ r = FLD_MOD(r, ths_trail, 15, 8); -+ r = FLD_MOD(r, ths_exit, 7, 0); -+ dsi_write_reg(DSI_DSIPHY_CFG0, r); -+ -+ r = dsi_read_reg(DSI_DSIPHY_CFG1); -+ r = FLD_MOD(r, tlpx_half, 22, 16); -+ r = FLD_MOD(r, tclk_trail, 15, 8); -+ r = FLD_MOD(r, tclk_zero, 7, 0); -+ dsi_write_reg(DSI_DSIPHY_CFG1, r); -+ -+ r = dsi_read_reg(DSI_DSIPHY_CFG2); -+ r = FLD_MOD(r, tclk_prepare, 7, 0); -+ dsi_write_reg(DSI_DSIPHY_CFG2, r); -+} -+ -+ -+static int dsi_complexio_init(struct omap_display *display) -+{ -+ int r = 0; -+ -+ DSSDBG("dsi_complexio_init\n"); -+ -+ /* CIO_CLK_ICG, enable L3 clk to CIO */ -+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14); -+ -+ /* A dummy read using the SCP interface to any DSIPHY register is -+ * required after DSIPHY reset to complete the reset of the DSI complex -+ * I/O. */ -+ dsi_read_reg(DSI_DSIPHY_CFG5); -+ -+ if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) { -+ DSSERR("ComplexIO PHY not coming out of reset.\n"); -+ r = -ENODEV; -+ goto err; -+ } -+ -+ dsi_complexio_config(display); -+ -+ r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON); -+ -+ if (r) -+ goto err; -+ -+ if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) { -+ DSSERR("ComplexIO not coming out of reset.\n"); -+ r = -ENODEV; -+ goto err; -+ } -+ -+ if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) { -+ DSSERR("ComplexIO LDO power down.\n"); -+ r = -ENODEV; -+ goto err; -+ } -+ -+ dsi_complexio_timings(); -+ -+ /* -+ The configuration of the DSI complex I/O (number of data lanes, -+ position, differential order) should not be changed while -+ DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the -+ hardware to recognize a new configuration of the complex I/O (done -+ in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow -+ this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next -+ reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20] -+ LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN -+ bit to 1. If the sequence is not followed, the DSi complex I/O -+ configuration is undetermined. -+ */ -+ dsi_if_enable(1); -+ dsi_if_enable(0); -+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ -+ dsi_if_enable(1); -+ dsi_if_enable(0); -+ -+ DSSDBG("CIO init done\n"); -+err: -+ return r; -+} -+ -+static void dsi_complexio_uninit(void) -+{ -+ dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF); -+} -+ -+static int _dsi_wait_reset(void) -+{ -+ int i = 0; -+ -+ while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { -+ if (i++ > 5) { -+ DSSERR("soft reset failed\n"); -+ return -ENODEV; -+ } -+ udelay(1); -+ } -+ -+ return 0; -+} -+ -+static int _dsi_reset(void) -+{ -+ /* Soft reset */ -+ REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1); -+ return _dsi_wait_reset(); -+} -+ -+ -+static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2, -+ enum fifo_size size3, enum fifo_size size4) -+{ -+ u32 r = 0; -+ int add = 0; -+ int i; -+ -+ dsi.vc[0].fifo_size = size1; -+ dsi.vc[1].fifo_size = size2; -+ dsi.vc[2].fifo_size = size3; -+ dsi.vc[3].fifo_size = size4; -+ -+ for (i = 0; i < 4; i++) { -+ u8 v; -+ int size = dsi.vc[i].fifo_size; -+ -+ if (add + size > 4) { -+ DSSERR("Illegal FIFO configuration\n"); -+ BUG(); -+ } -+ -+ v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); -+ r |= v << (8 * i); -+ /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */ -+ add += size; -+ } -+ -+ dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r); -+} -+ -+static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2, -+ enum fifo_size size3, enum fifo_size size4) -+{ -+ u32 r = 0; -+ int add = 0; -+ int i; -+ -+ dsi.vc[0].fifo_size = size1; -+ dsi.vc[1].fifo_size = size2; -+ dsi.vc[2].fifo_size = size3; -+ dsi.vc[3].fifo_size = size4; -+ -+ for (i = 0; i < 4; i++) { -+ u8 v; -+ int size = dsi.vc[i].fifo_size; -+ -+ if (add + size > 4) { -+ DSSERR("Illegal FIFO configuration\n"); -+ BUG(); -+ } -+ -+ v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); -+ r |= v << (8 * i); -+ /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */ -+ add += size; -+ } -+ -+ dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r); -+} -+ -+static int dsi_force_tx_stop_mode_io(void) -+{ -+ u32 r; -+ -+ r = dsi_read_reg(DSI_TIMING1); -+ r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ -+ dsi_write_reg(DSI_TIMING1, r); -+ -+ if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) { -+ DSSERR("TX_STOP bit not going down\n"); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static void dsi_vc_print_status(int channel) -+{ -+ u32 r; -+ -+ r = dsi_read_reg(DSI_VC_CTRL(channel)); -+ DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, " -+ "TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ", -+ channel, -+ FLD_GET(r, 5, 5), -+ FLD_GET(r, 6, 6), -+ FLD_GET(r, 15, 15), -+ FLD_GET(r, 16, 16), -+ FLD_GET(r, 20, 20)); -+ -+ r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS); -+ DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff); -+} -+ -+static void dsi_vc_config(int channel) -+{ -+ u32 r; -+ -+ DSSDBG("dsi_vc_config %d\n", channel); -+ -+ r = dsi_read_reg(DSI_VC_CTRL(channel)); -+ -+ r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */ -+ r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */ -+ r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */ -+ r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */ -+ r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ -+ r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ -+ r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */ -+ -+ r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ -+ r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ -+ -+ dsi_write_reg(DSI_VC_CTRL(channel), r); -+} -+ -+static void dsi_vc_config_vp(int channel) -+{ -+ u32 r; -+ -+ DSSDBG("dsi_vc_config_vp\n"); -+ -+ r = dsi_read_reg(DSI_VC_CTRL(channel)); -+ -+ r = FLD_MOD(r, 1, 1, 1); /* SOURCE, 1 = video port */ -+ r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */ -+ r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */ -+ r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */ -+ r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ -+ r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ -+ r = FLD_MOD(r, 1, 9, 9); /* MODE_SPEED, high speed on/off */ -+ -+ r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ -+ r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ -+ -+ dsi_write_reg(DSI_VC_CTRL(channel), r); -+} -+ -+ -+static int dsi_vc_enable(int channel, bool enable) -+{ -+ DSSDBG("dsi_vc_enable channel %d, enable %d\n", channel, enable); -+ -+ enable = enable ? 1 : 0; -+ -+ REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0); -+ -+ if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) { -+ DSSERR("Failed to set dsi_vc_enable to %d\n", enable); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static void dsi_vc_enable_hs(int channel, bool enable) -+{ -+ DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); -+ -+ dsi_vc_enable(channel, 0); -+ dsi_if_enable(0); -+ -+ REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9); -+ -+ dsi_vc_enable(channel, 1); -+ dsi_if_enable(1); -+ -+ dsi_force_tx_stop_mode_io(); -+} -+ -+static void dsi_vc_flush_long_data(int channel) -+{ -+ while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { -+ u32 val; -+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); -+ DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", -+ (val >> 0) & 0xff, -+ (val >> 8) & 0xff, -+ (val >> 16) & 0xff, -+ (val >> 24) & 0xff); -+ } -+} -+ -+static void dsi_show_rx_ack_with_err(u16 err) -+{ -+ DSSERR("\tACK with ERROR (%#x):\n", err); -+ if (err & (1 << 0)) -+ DSSERR("\t\tSoT Error\n"); -+ if (err & (1 << 1)) -+ DSSERR("\t\tSoT Sync Error\n"); -+ if (err & (1 << 2)) -+ DSSERR("\t\tEoT Sync Error\n"); -+ if (err & (1 << 3)) -+ DSSERR("\t\tEscape Mode Entry Command Error\n"); -+ if (err & (1 << 4)) -+ DSSERR("\t\tLP Transmit Sync Error\n"); -+ if (err & (1 << 5)) -+ DSSERR("\t\tHS Receive Timeout Error\n"); -+ if (err & (1 << 6)) -+ DSSERR("\t\tFalse Control Error\n"); -+ if (err & (1 << 7)) -+ DSSERR("\t\t(reserved7)\n"); -+ if (err & (1 << 8)) -+ DSSERR("\t\tECC Error, single-bit (corrected)\n"); -+ if (err & (1 << 9)) -+ DSSERR("\t\tECC Error, multi-bit (not corrected)\n"); -+ if (err & (1 << 10)) -+ DSSERR("\t\tChecksum Error\n"); -+ if (err & (1 << 11)) -+ DSSERR("\t\tData type not recognized\n"); -+ if (err & (1 << 12)) -+ DSSERR("\t\tInvalid VC ID\n"); -+ if (err & (1 << 13)) -+ DSSERR("\t\tInvalid Transmission Length\n"); -+ if (err & (1 << 14)) -+ DSSERR("\t\t(reserved14)\n"); -+ if (err & (1 << 15)) -+ DSSERR("\t\tDSI Protocol Violation\n"); -+} -+ -+static u16 dsi_vc_flush_receive_data(int channel) -+{ -+ /* RX_FIFO_NOT_EMPTY */ -+ while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { -+ u32 val; -+ u8 dt; -+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); -+ DSSDBG("\trawval %#08x\n", val); -+ dt = FLD_GET(val, 5, 0); -+ if (dt == DSI_DT_RX_ACK_WITH_ERR) { -+ u16 err = FLD_GET(val, 23, 8); -+ dsi_show_rx_ack_with_err(err); -+ } else if (dt == DSI_DT_RX_SHORT_READ_1) { -+ DSSDBG("\tDCS short response, 1 byte: %#x\n", -+ FLD_GET(val, 23, 8)); -+ } else if (dt == DSI_DT_RX_SHORT_READ_2) { -+ DSSDBG("\tDCS short response, 2 byte: %#x\n", -+ FLD_GET(val, 23, 8)); -+ } else if (dt == DSI_DT_RX_DCS_LONG_READ) { -+ DSSDBG("\tDCS long response, len %d\n", -+ FLD_GET(val, 23, 8)); -+ dsi_vc_flush_long_data(channel); -+ } else { -+ DSSERR("\tunknown datatype 0x%02x\n", dt); -+ } -+ } -+ return 0; -+} -+ -+static int dsi_vc_send_bta(int channel) -+{ -+ unsigned long tmo; -+ -+ /*DSSDBG("dsi_vc_send_bta_sync %d\n", channel); */ -+ -+ if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ -+ DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); -+ dsi_vc_flush_receive_data(channel); -+ } -+ -+ REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ -+ -+ tmo = jiffies + msecs_to_jiffies(10); -+ while (REG_GET(DSI_VC_CTRL(channel), 6, 6) == 1) { -+ if (time_after(jiffies, tmo)) { -+ DSSERR("Failed to send BTA\n"); -+ return -EIO; -+ } -+ } -+ -+ return 0; -+} -+ -+static int dsi_vc_send_bta_sync(int channel) -+{ -+ int r = 0; -+ u32 err; -+ -+ init_completion(&dsi.bta_completion); -+ -+ dsi_vc_enable_bta_irq(channel); -+ -+ r = dsi_vc_send_bta(channel); -+ if (r) -+ goto err; -+ -+ if (wait_for_completion_timeout(&dsi.bta_completion, -+ msecs_to_jiffies(500)) == 0) { -+ DSSERR("Failed to receive BTA\n"); -+ r = -EIO; -+ goto err; -+ } -+ -+ err = dsi_get_errors(); -+ if (err) { -+ DSSERR("Error while sending BTA: %x\n", err); -+ r = -EIO; -+ goto err; -+ } -+err: -+ dsi_vc_disable_bta_irq(channel); -+ -+ return r; -+} -+ -+static inline void dsi_vc_write_long_header(int channel, u8 data_type, -+ u16 len, u8 ecc) -+{ -+ u32 val; -+ u8 data_id; -+ -+ /*data_id = data_type | channel << 6; */ -+ data_id = data_type | dsi.vc[channel].dest_per << 6; -+ -+ val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | -+ FLD_VAL(ecc, 31, 24); -+ -+ dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val); -+} -+ -+static inline void dsi_vc_write_long_payload(int channel, -+ u8 b1, u8 b2, u8 b3, u8 b4) -+{ -+ u32 val; -+ -+ val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0; -+ -+/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", -+ b1, b2, b3, b4, val); */ -+ -+ dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val); -+} -+ -+static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, -+ u8 ecc) -+{ -+ /*u32 val; */ -+ int i; -+ u8 *p; -+ int r = 0; -+ u8 b1, b2, b3, b4; -+ -+ if (dsi.debug_write) -+ DSSDBG("dsi_vc_send_long, %d bytes\n", len); -+ -+ /* len + header */ -+ if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) { -+ DSSERR("unable to send long packet: packet too long.\n"); -+ return -EINVAL; -+ } -+ -+ dsi_vc_write_long_header(channel, data_type, len, ecc); -+ -+ /*dsi_vc_print_status(0); */ -+ -+ p = data; -+ for (i = 0; i < len >> 2; i++) { -+ if (dsi.debug_write) -+ DSSDBG("\tsending full packet %d\n", i); -+ /*dsi_vc_print_status(0); */ -+ -+ b1 = *p++; -+ b2 = *p++; -+ b3 = *p++; -+ b4 = *p++; -+ -+ dsi_vc_write_long_payload(channel, b1, b2, b3, b4); -+ } -+ -+ i = len % 4; -+ if (i) { -+ b1 = 0; b2 = 0; b3 = 0; -+ -+ if (dsi.debug_write) -+ DSSDBG("\tsending remainder bytes %d\n", i); -+ -+ switch (i) { -+ case 3: -+ b1 = *p++; -+ b2 = *p++; -+ b3 = *p++; -+ break; -+ case 2: -+ b1 = *p++; -+ b2 = *p++; -+ break; -+ case 1: -+ b1 = *p++; -+ break; -+ } -+ -+ dsi_vc_write_long_payload(channel, b1, b2, b3, 0); -+ } -+ -+ return r; -+} -+ -+static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) -+{ -+ u32 r; -+ u8 data_id; -+ -+ if (dsi.debug_write) -+ DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", -+ channel, -+ data_type, data & 0xff, (data >> 8) & 0xff); -+ -+ if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) { -+ DSSERR("ERROR FIFO FULL, aborting transfer\n"); -+ return -EINVAL; -+ } -+ -+ data_id = data_type | channel << 6; -+ -+ r = (data_id << 0) | (data << 8) | (ecc << 24); -+ -+ dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r); -+ -+ return 0; -+} -+ -+int dsi_vc_send_null(int channel) -+{ -+ u8 nullpkg[] = {0, 0, 0, 0}; -+ return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0); -+} -+EXPORT_SYMBOL(dsi_vc_send_null); -+ -+int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len) -+{ -+ int r; -+ -+ BUG_ON(len == 0); -+ -+ if (len == 1) { -+ r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0, -+ data[0], 0); -+ } else if (len == 2) { -+ r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1, -+ data[0] | (data[1] << 8), 0); -+ } else { -+ /* 0x39 = DCS Long Write */ -+ r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE, -+ data, len, 0); -+ } -+ -+ return r; -+} -+EXPORT_SYMBOL(dsi_vc_dcs_write_nosync); -+ -+int dsi_vc_dcs_write(int channel, u8 *data, int len) -+{ -+ int r; -+ -+ r = dsi_vc_dcs_write_nosync(channel, data, len); -+ if (r) -+ return r; -+ -+ /* Some devices need time to process the msg in low power mode. -+ This also makes the write synchronous, and checks that -+ the peripheral is still alive */ -+ r = dsi_vc_send_bta_sync(channel); -+ -+ return r; -+} -+EXPORT_SYMBOL(dsi_vc_dcs_write); -+ -+int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) -+{ -+ u32 val; -+ u8 dt; -+ int r; -+ -+ if (dsi.debug_read) -+ DSSDBG("dsi_vc_dcs_read\n"); -+ -+ r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); -+ if (r) -+ return r; -+ -+ r = dsi_vc_send_bta_sync(channel); -+ if (r) -+ return r; -+ -+ if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) { /* RX_FIFO_NOT_EMPTY */ -+ DSSERR("RX fifo empty when trying to read.\n"); -+ return -EIO; -+ } -+ -+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); -+ if (dsi.debug_read) -+ DSSDBG("\theader: %08x\n", val); -+ dt = FLD_GET(val, 5, 0); -+ if (dt == DSI_DT_RX_ACK_WITH_ERR) { -+ u16 err = FLD_GET(val, 23, 8); -+ dsi_show_rx_ack_with_err(err); -+ return -1; -+ -+ } else if (dt == DSI_DT_RX_SHORT_READ_1) { -+ u8 data = FLD_GET(val, 15, 8); -+ if (dsi.debug_read) -+ DSSDBG("\tDCS short response, 1 byte: %02x\n", data); -+ -+ if (buflen < 1) -+ return -1; -+ -+ buf[0] = data; -+ -+ return 1; -+ } else if (dt == DSI_DT_RX_SHORT_READ_2) { -+ u16 data = FLD_GET(val, 23, 8); -+ if (dsi.debug_read) -+ DSSDBG("\tDCS short response, 2 byte: %04x\n", data); -+ -+ if (buflen < 2) -+ return -1; -+ -+ buf[0] = data & 0xff; -+ buf[1] = (data >> 8) & 0xff; -+ -+ return 2; -+ } else if (dt == DSI_DT_RX_DCS_LONG_READ) { -+ int w; -+ int len = FLD_GET(val, 23, 8); -+ if (dsi.debug_read) -+ DSSDBG("\tDCS long response, len %d\n", len); -+ -+ if (len > buflen) -+ return -1; -+ -+ /* two byte checksum ends the packet, not included in len */ -+ for (w = 0; w < len + 2;) { -+ int b; -+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); -+ if (dsi.debug_read) -+ DSSDBG("\t\t%02x %02x %02x %02x\n", -+ (val >> 0) & 0xff, -+ (val >> 8) & 0xff, -+ (val >> 16) & 0xff, -+ (val >> 24) & 0xff); -+ -+ for (b = 0; b < 4; ++b) { -+ if (w < len) -+ buf[w] = (val >> (b * 8)) & 0xff; -+ /* we discard the 2 byte checksum */ -+ ++w; -+ } -+ } -+ -+ return len; -+ -+ } else { -+ DSSERR("\tunknown datatype 0x%02x\n", dt); -+ return -1; -+ } -+} -+EXPORT_SYMBOL(dsi_vc_dcs_read); -+ -+ -+int dsi_vc_set_max_rx_packet_size(int channel, u16 len) -+{ -+ return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE, -+ len, 0); -+} -+EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size); -+ -+ -+static int dsi_set_lp_rx_timeout(int ns, int x4, int x16) -+{ -+ u32 r; -+ unsigned long fck; -+ int ticks; -+ -+ /* ticks in DSI_FCK */ -+ -+ fck = dsi_fclk_rate(); -+ ticks = (fck / 1000 / 1000) * ns / 1000; -+ -+ if (ticks > 0x1fff) { -+ DSSERR("LP_TX_TO too high\n"); -+ return -EINVAL; -+ } -+ -+ r = dsi_read_reg(DSI_TIMING2); -+ r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ -+ r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */ -+ r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */ -+ r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ -+ dsi_write_reg(DSI_TIMING2, r); -+ -+ DSSDBG("LP_RX_TO %ld ns (%#x ticks)\n", -+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) / -+ (fck / 1000 / 1000), -+ ticks); -+ -+ return 0; -+} -+ -+static int dsi_set_ta_timeout(int ns, int x8, int x16) -+{ -+ u32 r; -+ unsigned long fck; -+ int ticks; -+ -+ /* ticks in DSI_FCK */ -+ -+ fck = dsi_fclk_rate(); -+ ticks = (fck / 1000 / 1000) * ns / 1000; -+ -+ if (ticks > 0x1fff) { -+ DSSERR("TA_TO too high\n"); -+ return -EINVAL; -+ } -+ -+ r = dsi_read_reg(DSI_TIMING1); -+ r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ -+ r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */ -+ r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */ -+ r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ -+ dsi_write_reg(DSI_TIMING1, r); -+ -+ DSSDBG("TA_TO %ld ns (%#x ticks)\n", -+ (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) / -+ (fck / 1000 / 1000), -+ ticks); -+ -+ return 0; -+} -+ -+static int dsi_set_stop_state_counter(int ns, int x4, int x16) -+{ -+ u32 r; -+ unsigned long fck; -+ int ticks; -+ -+ /* ticks in DSI_FCK */ -+ -+ fck = dsi_fclk_rate(); -+ ticks = (fck / 1000 / 1000) * ns / 1000; -+ -+ if (ticks > 0x1fff) { -+ DSSERR("STOP_STATE_COUNTER_IO too high\n"); -+ return -EINVAL; -+ } -+ -+ r = dsi_read_reg(DSI_TIMING1); -+ r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ -+ r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */ -+ r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */ -+ r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ -+ dsi_write_reg(DSI_TIMING1, r); -+ -+ DSSDBG("STOP_STATE_COUNTER %ld ns (%#x ticks)\n", -+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) / -+ (fck / 1000 / 1000), -+ ticks); -+ -+ return 0; -+} -+ -+static int dsi_set_hs_tx_timeout(int ns, int x4, int x16) -+{ -+ u32 r; -+ unsigned long fck; -+ int ticks; -+ -+ /* ticks in TxByteClkHS */ -+ -+ fck = dsi.ddr_clk / 4; -+ ticks = (fck / 1000 / 1000) * ns / 1000; -+ -+ if (ticks > 0x1fff) { -+ DSSERR("HS_TX_TO too high\n"); -+ return -EINVAL; -+ } -+ -+ r = dsi_read_reg(DSI_TIMING2); -+ r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ -+ r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */ -+ r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */ -+ r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ -+ dsi_write_reg(DSI_TIMING2, r); -+ -+ DSSDBG("HS_TX_TO %ld ns (%#x ticks)\n", -+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) / -+ (fck / 1000 / 1000), -+ ticks); -+ -+ return 0; -+} -+static int dsi_proto_config(struct omap_display *display) -+{ -+ u32 r; -+ int buswidth = 0; -+ int div; -+ -+ dsi_config_tx_fifo(DSI_FIFO_SIZE_128, -+ DSI_FIFO_SIZE_0, -+ DSI_FIFO_SIZE_0, -+ DSI_FIFO_SIZE_0); -+ -+ dsi_config_rx_fifo(DSI_FIFO_SIZE_128, -+ DSI_FIFO_SIZE_0, -+ DSI_FIFO_SIZE_0, -+ DSI_FIFO_SIZE_0); -+ -+ /* XXX what values for the timeouts? */ -+ dsi_set_stop_state_counter(1000, 0, 0); -+ -+ dsi_set_ta_timeout(50000, 1, 1); -+ -+ /* 3000ns * 16 */ -+ dsi_set_lp_rx_timeout(3000, 0, 1); -+ -+ /* 10000ns * 4 */ -+ dsi_set_hs_tx_timeout(10000, 1, 0); -+ -+ switch (display->ctrl->pixel_size) { -+ case 16: -+ buswidth = 0; -+ break; -+ case 18: -+ buswidth = 1; -+ break; -+ case 24: -+ buswidth = 2; -+ break; -+ default: -+ BUG(); -+ } -+ -+ r = dsi_read_reg(DSI_CTRL); -+ r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ -+ r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ -+ r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ -+ -+ div = dispc_lclk_rate() / dispc_pclk_rate(); -+ r = FLD_MOD(r, div == 2 ? 0 : 1, 4, 4); /* VP_CLK_RATIO */ -+ r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */ -+ r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */ -+ r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */ -+ r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */ -+ r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ -+ r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ -+ r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */ -+ -+ dsi_write_reg(DSI_CTRL, r); -+ -+ /* we configure vc0 for L4 communication, and -+ * vc1 for dispc */ -+ dsi_vc_config(0); -+ dsi_vc_config_vp(1); -+ -+ /* set all vc targets to peripheral 0 */ -+ dsi.vc[0].dest_per = 0; -+ dsi.vc[1].dest_per = 0; -+ dsi.vc[2].dest_per = 0; -+ dsi.vc[3].dest_per = 0; -+ -+ return 0; -+} -+ -+static void dsi_proto_timings(struct omap_display *display) -+{ -+ unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; -+ unsigned tclk_pre, tclk_post; -+ unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; -+ unsigned ths_trail, ths_exit; -+ unsigned ddr_clk_pre, ddr_clk_post; -+ unsigned enter_hs_mode_lat, exit_hs_mode_lat; -+ unsigned ths_eot; -+ u32 r; -+ -+ r = dsi_read_reg(DSI_DSIPHY_CFG0); -+ ths_prepare = FLD_GET(r, 31, 24); -+ ths_prepare_ths_zero = FLD_GET(r, 23, 16); -+ ths_zero = ths_prepare_ths_zero - ths_prepare; -+ ths_trail = FLD_GET(r, 15, 8); -+ ths_exit = FLD_GET(r, 7, 0); -+ -+ r = dsi_read_reg(DSI_DSIPHY_CFG1); -+ tlpx = FLD_GET(r, 22, 16) * 2; -+ tclk_trail = FLD_GET(r, 15, 8); -+ tclk_zero = FLD_GET(r, 7, 0); -+ -+ r = dsi_read_reg(DSI_DSIPHY_CFG2); -+ tclk_prepare = FLD_GET(r, 7, 0); -+ -+ /* min 8*UI */ -+ tclk_pre = 20; -+ /* min 60ns + 52*UI */ -+ tclk_post = ns2ddr(60) + 26; -+ -+ /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */ -+ if (display->hw_config.u.dsi.data1_lane != 0 && -+ display->hw_config.u.dsi.data2_lane != 0) -+ ths_eot = 2; -+ else -+ ths_eot = 4; -+ -+ ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, -+ 4); -+ ddr_clk_post = DIV_ROUND_UP(tclk_post + tclk_trail, 4) + ths_eot; -+ -+ BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); -+ BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); -+ -+ r = dsi_read_reg(DSI_CLK_TIMING); -+ r = FLD_MOD(r, ddr_clk_pre, 15, 8); -+ r = FLD_MOD(r, ddr_clk_post, 7, 0); -+ dsi_write_reg(DSI_CLK_TIMING, r); -+ -+ DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", -+ ddr_clk_pre, -+ ddr_clk_post); -+ -+ enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) + -+ DIV_ROUND_UP(ths_prepare, 4) + -+ DIV_ROUND_UP(ths_zero + 3, 4); -+ exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot; -+ -+ r = FLD_VAL(enter_hs_mode_lat, 31, 16) | -+ FLD_VAL(exit_hs_mode_lat, 15, 0); -+ dsi_write_reg(DSI_VM_TIMING7, r); -+ -+ DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", -+ enter_hs_mode_lat, exit_hs_mode_lat); -+} -+ -+ -+#define DSI_DECL_VARS \ -+ int __dsi_cb = 0; u32 __dsi_cv = 0; -+ -+#define DSI_FLUSH(ch) \ -+ if (__dsi_cb > 0) { \ -+ /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \ -+ dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \ -+ __dsi_cb = __dsi_cv = 0; \ -+ } -+ -+#define DSI_PUSH(ch, data) \ -+ do { \ -+ __dsi_cv |= (data) << (__dsi_cb * 8); \ -+ /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \ -+ if (++__dsi_cb > 3) \ -+ DSI_FLUSH(ch); \ -+ } while (0) -+ -+static int dsi_update_screen_l4(struct omap_display *display, -+ int x, int y, int w, int h) -+{ -+ /* Note: supports only 24bit colors in 32bit container */ -+ int first = 1; -+ int fifo_stalls = 0; -+ int max_dsi_packet_size; -+ int max_data_per_packet; -+ int max_pixels_per_packet; -+ int pixels_left; -+ int bytespp = 3; -+ int scr_width; -+ u32 __iomem *data; -+ int start_offset; -+ int horiz_inc; -+ int current_x; -+ struct omap_overlay *ovl; -+ -+ debug_irq = 0; -+ -+ DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n", -+ x, y, w, h); -+ -+ ovl = display->manager->overlays[0]; -+ -+ if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U) -+ return -EINVAL; -+ -+ if (display->ctrl->pixel_size != 24) -+ return -EINVAL; -+ -+ scr_width = ovl->info.screen_width; -+ data = ovl->info.vaddr; -+ -+ start_offset = scr_width * y + x; -+ horiz_inc = scr_width - w; -+ current_x = x; -+ -+ /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes -+ * in fifo */ -+ -+ /* When using CPU, max long packet size is TX buffer size */ -+ max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4; -+ -+ /* we seem to get better perf if we divide the tx fifo to half, -+ and while the other half is being sent, we fill the other half -+ max_dsi_packet_size /= 2; */ -+ -+ max_data_per_packet = max_dsi_packet_size - 4 - 1; -+ -+ max_pixels_per_packet = max_data_per_packet / bytespp; -+ -+ DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet); -+ -+ display->ctrl->setup_update(display, x, y, w, h); -+ -+ pixels_left = w * h; -+ -+ DSSDBG("total pixels %d\n", pixels_left); -+ -+ data += start_offset; -+ -+#ifdef DEBUG -+ dsi.update_region.x = x; -+ dsi.update_region.y = y; -+ dsi.update_region.w = w; -+ dsi.update_region.h = h; -+ dsi.update_region.bytespp = bytespp; -+#endif -+ -+ perf_mark_start(); -+ -+ while (pixels_left > 0) { -+ /* 0x2c = write_memory_start */ -+ /* 0x3c = write_memory_continue */ -+ u8 dcs_cmd = first ? 0x2c : 0x3c; -+ int pixels; -+ DSI_DECL_VARS; -+ first = 0; -+ -+#if 1 -+ /* using fifo not empty */ -+ /* TX_FIFO_NOT_EMPTY */ -+ while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { -+ udelay(1); -+ fifo_stalls++; -+ if (fifo_stalls > 0xfffff) { -+ DSSERR("fifo stalls overflow, pixels left %d\n", -+ pixels_left); -+ dsi_if_enable(0); -+ return -EIO; -+ } -+ } -+#elif 1 -+ /* using fifo emptiness */ -+ while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 < -+ max_dsi_packet_size) { -+ fifo_stalls++; -+ if (fifo_stalls > 0xfffff) { -+ DSSERR("fifo stalls overflow, pixels left %d\n", -+ pixels_left); -+ dsi_if_enable(0); -+ return -EIO; -+ } -+ } -+#else -+ while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) { -+ fifo_stalls++; -+ if (fifo_stalls > 0xfffff) { -+ DSSERR("fifo stalls overflow, pixels left %d\n", -+ pixels_left); -+ dsi_if_enable(0); -+ return -EIO; -+ } -+ } -+#endif -+ pixels = min(max_pixels_per_packet, pixels_left); -+ -+ pixels_left -= pixels; -+ -+ dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE, -+ 1 + pixels * bytespp, 0); -+ -+ DSI_PUSH(0, dcs_cmd); -+ -+ while (pixels-- > 0) { -+ u32 pix = __raw_readl(data++); -+ -+ DSI_PUSH(0, (pix >> 16) & 0xff); -+ DSI_PUSH(0, (pix >> 8) & 0xff); -+ DSI_PUSH(0, (pix >> 0) & 0xff); -+ -+ current_x++; -+ if (current_x == x+w) { -+ current_x = x; -+ data += horiz_inc; -+ } -+ } -+ -+ DSI_FLUSH(0); -+ } -+ -+ perf_show("L4"); -+ -+ return 0; -+} -+ -+#if 0 -+static void dsi_clear_screen_l4(struct omap_display *display, -+ int x, int y, int w, int h) -+{ -+ int first = 1; -+ int fifo_stalls = 0; -+ int max_dsi_packet_size; -+ int max_data_per_packet; -+ int max_pixels_per_packet; -+ int pixels_left; -+ int bytespp = 3; -+ int pixnum; -+ -+ debug_irq = 0; -+ -+ DSSDBG("dsi_clear_screen_l4 (%d,%d %dx%d)\n", -+ x, y, w, h); -+ -+ if (display->ctrl->bpp != 24) -+ return -EINVAL; -+ -+ /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) -+ * bytes in fifo */ -+ -+ /* When using CPU, max long packet size is TX buffer size */ -+ max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4; -+ -+ max_data_per_packet = max_dsi_packet_size - 4 - 1; -+ -+ max_pixels_per_packet = max_data_per_packet / bytespp; -+ -+ enable_clocks(1); -+ -+ display->ctrl->setup_update(display, x, y, w, h); -+ -+ pixels_left = w * h; -+ -+ dsi.update_region.x = x; -+ dsi.update_region.y = y; -+ dsi.update_region.w = w; -+ dsi.update_region.h = h; -+ dsi.update_region.bytespp = bytespp; -+ -+ start_measuring(); -+ -+ pixnum = 0; -+ -+ while (pixels_left > 0) { -+ /* 0x2c = write_memory_start */ -+ /* 0x3c = write_memory_continue */ -+ u8 dcs_cmd = first ? 0x2c : 0x3c; -+ int pixels; -+ DSI_DECL_VARS; -+ first = 0; -+ -+ /* TX_FIFO_NOT_EMPTY */ -+ while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { -+ fifo_stalls++; -+ if (fifo_stalls > 0xfffff) { -+ DSSERR("fifo stalls overflow\n"); -+ dsi_if_enable(0); -+ enable_clocks(0); -+ return; -+ } -+ } -+ -+ pixels = min(max_pixels_per_packet, pixels_left); -+ -+ pixels_left -= pixels; -+ -+ dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE, -+ 1 + pixels * bytespp, 0); -+ -+ DSI_PUSH(0, dcs_cmd); -+ -+ while (pixels-- > 0) { -+ u32 pix; -+ -+ pix = 0x000000; -+ -+ DSI_PUSH(0, (pix >> 16) & 0xff); -+ DSI_PUSH(0, (pix >> 8) & 0xff); -+ DSI_PUSH(0, (pix >> 0) & 0xff); -+ } -+ -+ DSI_FLUSH(0); -+ } -+ -+ enable_clocks(0); -+ -+ end_measuring("L4 CLEAR"); -+} -+#endif -+ -+static void dsi_setup_update_dispc(struct omap_display *display, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ DSSDBG("dsi_setup_update_dispc(%d,%d %dx%d)\n", -+ x, y, w, h); -+ -+#ifdef DEBUG -+ dsi.update_region.x = x; -+ dsi.update_region.y = y; -+ dsi.update_region.w = w; -+ dsi.update_region.h = h; -+ dsi.update_region.bytespp = 3; // XXX -+#endif -+ -+ dispc_setup_partial_planes(display, &x, &y, &w, &h); -+ -+ dispc_set_lcd_size(w, h); -+} -+ -+static void dsi_setup_autoupdate_dispc(struct omap_display *display) -+{ -+ u16 w, h; -+ -+ display->get_resolution(display, &w, &h); -+ -+#ifdef DEBUG -+ dsi.update_region.x = 0; -+ dsi.update_region.y = 0; -+ dsi.update_region.w = w; -+ dsi.update_region.h = h; -+ dsi.update_region.bytespp = 3; // XXX -+#endif -+ -+ /* the overlay settings may not have been applied, if we were in manual -+ * mode earlier, so do it here */ -+ display->manager->apply(display->manager); -+ -+ dispc_set_lcd_size(w, h); -+ -+ dsi.autoupdate_setup = 0; -+} -+ -+static void dsi_update_screen_dispc(struct omap_display *display, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ int bytespp = 3; -+ int len; -+ int total_len; -+ int packet_payload; -+ int packet_len; -+ u32 l; -+ -+ if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL) -+ DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n", -+ x, y, w, h); -+ -+ len = w * h * bytespp; -+ -+ /* XXX: one packet could be longer, I think? Line buffer is -+ * 1024 x 24bits, but we have to put DCS cmd there also. -+ * 1023 * 3 should work, but causes strange color effects. */ -+ packet_payload = min(w, (u16)1020) * bytespp; -+ -+ packet_len = packet_payload + 1; /* 1 byte for DCS cmd */ -+ total_len = (len / packet_payload) * packet_len; -+ -+ if (len % packet_payload) -+ total_len += (len % packet_payload) + 1; -+ -+ display->ctrl->setup_update(display, x, y, w, h); -+ -+ if (dsi.use_ext_te && display->ctrl->wait_for_te) -+ display->ctrl->wait_for_te(display); -+ -+ if (0) -+ dsi_vc_print_status(1); -+ -+ perf_mark_start(); -+ -+ l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ -+ dsi_write_reg(DSI_VC_TE(1), l); -+ -+ dsi_vc_write_long_header(1, DSI_DT_DCS_LONG_WRITE, packet_len, 0); -+ -+ if (dsi.use_te) -+ l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ -+ else -+ l = FLD_MOD(l, 1, 31, 31); /* TE_START */ -+ dsi_write_reg(DSI_VC_TE(1), l); -+ -+ dispc_disable_sidle(); -+ -+ dispc_enable_lcd_out(1); -+ -+ if (dsi.use_te) -+ dsi_vc_send_bta(1); -+} -+ -+static void framedone_callback(void *data, u32 mask) -+{ -+ if (dsi.framedone_scheduled) { -+ DSSERR("Framedone already scheduled. Bogus FRAMEDONE IRQ?\n"); -+ return; -+ } -+ -+ dispc_enable_sidle(); -+ -+ dsi.framedone_scheduled = 1; -+ -+ /* We get FRAMEDONE when DISPC has finished sending pixels and turns -+ * itself off. However, DSI still has the pixels in its buffers, and -+ * is sending the data. Thus we have to wait until we can do a new -+ * transfer or turn the clocks off. We do that in a separate work -+ * func. */ -+ queue_work(dsi.workqueue, &dsi.framedone_work); -+} -+ -+static void framedone_worker(struct work_struct *work) -+{ -+ u32 l; -+ unsigned long tmo; -+ int i = 0; -+ -+ l = REG_GET(DSI_VC_TE(1), 23, 0); /* TE_SIZE */ -+ -+ /* There shouldn't be much stuff in DSI buffers, if any, so we'll -+ * just busyloop */ -+ if (l > 0) { -+ tmo = jiffies + msecs_to_jiffies(50); -+ while (REG_GET(DSI_VC_TE(1), 23, 0) > 0) { /* TE_SIZE */ -+ i++; -+ if (time_after(jiffies, tmo)) { -+ DSSERR("timeout waiting TE_SIZE to zero\n"); -+ break; -+ } -+ cpu_relax(); -+ } -+ } -+ -+ if (REG_GET(DSI_VC_TE(1), 30, 30)) -+ DSSERR("TE_EN not zero\n"); -+ -+ if (REG_GET(DSI_VC_TE(1), 31, 31)) -+ DSSERR("TE_START not zero\n"); -+ -+ perf_show("DISPC"); -+ -+ if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL) -+ DSSDBG("FRAMEDONE\n"); -+ -+#if 0 -+ if (l) -+ DSSWARN("FRAMEDONE irq too early, %d bytes, %d loops\n", l, i); -+#else -+ if (l > 1024*3) -+ DSSWARN("FRAMEDONE irq too early, %d bytes, %d loops\n", l, i); -+#endif -+ -+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC -+ dispc_fake_vsync_irq(); -+#endif -+ dsi.framedone_scheduled = 0; -+ -+ /* XXX check that fifo is not full. otherwise we would sleep and never -+ * get to process_cmd_fifo below */ -+ /* We check for target_update_mode, not update_mode. No reason to push -+ * new updates if we're turning auto update off */ -+ if (dsi.target_update_mode == OMAP_DSS_UPDATE_AUTO) -+ dsi_push_autoupdate(dsi.vc[1].display); -+ -+ atomic_set(&dsi.cmd_pending, 0); -+ dsi_process_cmd_fifo(NULL); -+} -+ -+static void dsi_start_auto_update(struct omap_display *display) -+{ -+ DSSDBG("starting auto update\n"); -+ -+ dsi.autoupdate_setup = 1; -+ -+ dsi_push_autoupdate(display); -+ -+ perf_mark_start_auto(); -+} -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+/* FIFO functions */ -+ -+static void dsi_signal_fifo_waiters(void) -+{ -+ if (atomic_read(&dsi.cmd_fifo_full) > 0) { -+ DSSDBG("SIGNALING: Fifo not full for waiter!\n"); -+ complete(&dsi.cmd_done); -+ atomic_dec(&dsi.cmd_fifo_full); -+ } -+} -+ -+/* returns 1 for async op, and 0 for sync op */ -+static int dsi_do_update(struct omap_display *display, -+ struct dsi_cmd_update *upd) -+{ -+ int r; -+ u16 x = upd->x, y = upd->y, w = upd->w, h = upd->h; -+ u16 dw, dh; -+ -+ if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED) -+ return 0; -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return 0; -+ -+ display->get_resolution(display, &dw, &dh); -+ if (x > dw || y > dh) -+ return 0; -+ -+ if (x + w > dw) -+ w = dw - x; -+ -+ if (y + h > dh) -+ h = dh - y; -+ -+ DSSDBGF("%d,%d %dx%d", x, y, w, h); -+ -+ perf_mark_setup(); -+ -+ if (display->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { -+ dsi_setup_update_dispc(display, x, y, w, h); -+ dsi_update_screen_dispc(display, x, y, w, h); -+ return 1; -+ } else { -+ r = dsi_update_screen_l4(display, x, y, w, h); -+ if (r) -+ DSSERR("L4 update failed\n"); -+ return 0; -+ } -+} -+ -+/* returns 1 for async op, and 0 for sync op */ -+static int dsi_do_autoupdate(struct omap_display *display) -+{ -+ int r; -+ u16 w, h; -+ -+ if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED) -+ return 0; -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return 0; -+ -+ display->get_resolution(display, &w, &h); -+ -+ perf_mark_setup(); -+ -+ if (display->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { -+ if (dsi.autoupdate_setup) -+ dsi_setup_autoupdate_dispc(display); -+ dsi_update_screen_dispc(display, 0, 0, w, h); -+ return 1; -+ } else { -+ r = dsi_update_screen_l4(display, 0, 0, w, h); -+ if (r) -+ DSSERR("L4 update failed\n"); -+ return 0; -+ } -+} -+ -+static void dsi_do_cmd_mem_read(struct omap_display *display, -+ struct dsi_cmd_mem_read *mem_read) -+{ -+ int r; -+ r = display->ctrl->memory_read(display, -+ mem_read->buf, -+ mem_read->size, -+ mem_read->x, -+ mem_read->y, -+ mem_read->w, -+ mem_read->h); -+ -+ *mem_read->ret_size = (size_t)r; -+ complete(mem_read->completion); -+} -+ -+static void dsi_do_cmd_test(struct omap_display *display, -+ struct dsi_cmd_test *test) -+{ -+ int r = 0; -+ -+ DSSDBGF(""); -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return; -+ -+ /* run test first in low speed mode */ -+ dsi_vc_enable_hs(0, 0); -+ -+ if (display->ctrl->run_test) { -+ r = display->ctrl->run_test(display, test->test_num); -+ if (r) -+ goto end; -+ } -+ -+ if (display->panel->run_test) { -+ r = display->panel->run_test(display, test->test_num); -+ if (r) -+ goto end; -+ } -+ -+ /* then in high speed */ -+ dsi_vc_enable_hs(0, 1); -+ -+ if (display->ctrl->run_test) { -+ r = display->ctrl->run_test(display, test->test_num); -+ if (r) -+ goto end; -+ } -+ -+ if (display->panel->run_test) -+ r = display->panel->run_test(display, test->test_num); -+ -+end: -+ dsi_vc_enable_hs(0, 1); -+ -+ *test->result = r; -+ complete(test->completion); -+ -+ DSSDBG("test end\n"); -+} -+ -+static void dsi_do_cmd_set_te(struct omap_display *display, bool enable) -+{ -+ if (!display->hw_config.u.dsi.ext_te) -+ dsi.use_te = enable; -+ else -+ dsi.use_ext_te = enable; -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return; -+ -+ display->ctrl->enable_te(display, enable); -+ -+ if (!display->hw_config.u.dsi.ext_te) { -+ if (enable) { -+ /* disable LP_RX_TO, so that we can receive TE. -+ * Time to wait for TE is longer than the timer allows */ -+ REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ -+ } else { -+ REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ -+ } -+ } -+} -+ -+static void dsi_do_cmd_set_update_mode(struct omap_display *display, -+ enum omap_dss_update_mode mode) -+{ -+ dsi.update_mode = mode; -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return; -+ -+ if (mode == OMAP_DSS_UPDATE_AUTO) -+ dsi_start_auto_update(display); -+} -+ -+static void dsi_process_cmd_fifo(struct work_struct *work) -+{ -+ int len; -+ struct dsi_cmd_item p; -+ unsigned long flags; -+ struct omap_display *display; -+ int exit = 0; -+ -+ if (dsi.debug_process) -+ DSSDBGF(""); -+ -+ if (atomic_cmpxchg(&dsi.cmd_pending, 0, 1) == 1) { -+ if (dsi.debug_process) -+ DSSDBG("cmd pending, skip process\n"); -+ return; -+ } -+ -+ while (!exit) { -+ spin_lock_irqsave(dsi.cmd_fifo->lock, flags); -+ -+ len = __kfifo_get(dsi.cmd_fifo, (unsigned char *)&p, -+ sizeof(p)); -+ if (len == 0) { -+ if (dsi.debug_process) -+ DSSDBG("nothing more in fifo, atomic clear\n"); -+ atomic_set(&dsi.cmd_pending, 0); -+ spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags); -+ break; -+ } -+ -+ spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags); -+ -+ BUG_ON(len != sizeof(p)); -+ -+ display = p.display; -+ -+ if (dsi.debug_process) -+ DSSDBG("processing cmd %d\n", p.cmd); -+ -+ switch (p.cmd) { -+ case DSI_CMD_UPDATE: -+ if (dsi_do_update(display, &p.u.r)) { -+ if (dsi.debug_process) -+ DSSDBG("async update\n"); -+ exit = 1; -+ } else { -+ if (dsi.debug_process) -+ DSSDBG("sync update\n"); -+ } -+ break; -+ -+ case DSI_CMD_AUTOUPDATE: -+ if (dsi_do_autoupdate(display)) { -+ if (dsi.debug_process) -+ DSSDBG("async autoupdate\n"); -+ exit = 1; -+ } else { -+ if (dsi.debug_process) -+ DSSDBG("sync autoupdate\n"); -+ } -+ break; -+ -+ case DSI_CMD_SYNC: -+ if (dsi.debug_process) -+ DSSDBG("Signaling SYNC done!\n"); -+ complete(p.u.sync); -+ break; -+ -+ case DSI_CMD_MEM_READ: -+ dsi_do_cmd_mem_read(display, &p.u.mem_read); -+ break; -+ -+ case DSI_CMD_TEST: -+ dsi_do_cmd_test(display, &p.u.test); -+ break; -+ -+ case DSI_CMD_SET_TE: -+ dsi_do_cmd_set_te(display, p.u.te); -+ break; -+ -+ case DSI_CMD_SET_UPDATE_MODE: -+ dsi_do_cmd_set_update_mode(display, p.u.update_mode); -+ break; -+ -+ case DSI_CMD_SET_ROTATE: -+ display->ctrl->set_rotate(display, p.u.rotate); -+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) -+ dsi.autoupdate_setup = 1; -+ break; -+ -+ case DSI_CMD_SET_MIRROR: -+ display->ctrl->set_mirror(display, p.u.mirror); -+ break; -+ -+ default: -+ BUG(); -+ } -+ } -+ -+ if (dsi.debug_process) -+ DSSDBG("exit dsi_process_cmd_fifo\n"); -+ -+ dsi_signal_fifo_waiters(); -+} -+ -+static void dsi_push_cmd(struct dsi_cmd_item *p) -+{ -+ int ret; -+ -+ if (dsi.debug_process) -+ DSSDBGF(""); -+ -+ while (1) { -+ unsigned long flags; -+ unsigned avail, used; -+ -+ spin_lock_irqsave(dsi.cmd_fifo->lock, flags); -+ used = __kfifo_len(dsi.cmd_fifo) / sizeof(struct dsi_cmd_item); -+ avail = DSI_CMD_FIFO_LEN - used; -+ -+ if (dsi.debug_process) -+ DSSDBG("%u/%u items left in fifo\n", avail, used); -+ -+ if (avail == 0) { -+ if (dsi.debug_process) -+ DSSDBG("cmd fifo full, waiting...\n"); -+ spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags); -+ atomic_inc(&dsi.cmd_fifo_full); -+ wait_for_completion(&dsi.cmd_done); -+ if (dsi.debug_process) -+ DSSDBG("cmd fifo not full, woke up\n"); -+ continue; -+ } -+ -+ ret = __kfifo_put(dsi.cmd_fifo, (unsigned char *)p, -+ sizeof(*p)); -+ -+ spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags); -+ -+ BUG_ON(ret != sizeof(*p)); -+ -+ break; -+ } -+ -+ queue_work(dsi.workqueue, &dsi.process_work); -+} -+ -+static void dsi_push_update(struct omap_display *display, -+ int x, int y, int w, int h) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_UPDATE; -+ -+ p.u.r.x = x; -+ p.u.r.y = y; -+ p.u.r.w = w; -+ p.u.r.h = h; -+ -+ DSSDBG("pushing UPDATE %d,%d %dx%d\n", x, y, w, h); -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_autoupdate(struct omap_display *display) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_AUTOUPDATE; -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_sync(struct omap_display *display, -+ struct completion *sync_comp) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_SYNC; -+ p.u.sync = sync_comp; -+ -+ DSSDBG("pushing SYNC\n"); -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_mem_read(struct omap_display *display, -+ struct dsi_cmd_mem_read *mem_read) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_MEM_READ; -+ p.u.mem_read = *mem_read; -+ -+ DSSDBG("pushing MEM_READ\n"); -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_test(struct omap_display *display, int test_num, -+ int *result, struct completion *completion) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_TEST; -+ p.u.test.test_num = test_num; -+ p.u.test.result = result; -+ p.u.test.completion = completion; -+ -+ DSSDBG("pushing TEST\n"); -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_set_te(struct omap_display *display, bool enable) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_SET_TE; -+ p.u.te = enable; -+ -+ DSSDBG("pushing SET_TE\n"); -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_set_update_mode(struct omap_display *display, -+ enum omap_dss_update_mode mode) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_SET_UPDATE_MODE; -+ p.u.update_mode = mode; -+ -+ DSSDBG("pushing SET_UPDATE_MODE\n"); -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_set_rotate(struct omap_display *display, int rotate) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_SET_ROTATE; -+ p.u.rotate = rotate; -+ -+ DSSDBG("pushing SET_ROTATE\n"); -+ -+ dsi_push_cmd(&p); -+} -+ -+static void dsi_push_set_mirror(struct omap_display *display, int mirror) -+{ -+ struct dsi_cmd_item p; -+ -+ p.display = display; -+ p.cmd = DSI_CMD_SET_MIRROR; -+ p.u.mirror = mirror; -+ -+ DSSDBG("pushing SET_MIRROR\n"); -+ -+ dsi_push_cmd(&p); -+} -+ -+static int dsi_wait_sync(struct omap_display *display) -+{ -+ long wait = msecs_to_jiffies(2000); -+ struct completion compl; -+ -+ DSSDBGF(""); -+ -+ init_completion(&compl); -+ dsi_push_sync(display, &compl); -+ -+ DSSDBG("Waiting for SYNC to happen...\n"); -+ wait = wait_for_completion_timeout(&compl, wait); -+ DSSDBG("Released from SYNC\n"); -+ -+ if (wait == 0) { -+ DSSERR("timeout waiting sync\n"); -+ return -ETIME; -+ } -+ -+ return 0; -+} -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+/* Display funcs */ -+ -+static int dsi_display_init_dispc(struct omap_display *display) -+{ -+ int r; -+ -+ r = omap_dispc_register_isr(framedone_callback, NULL, -+ DISPC_IRQ_FRAMEDONE); -+ if (r) { -+ DSSERR("can't get FRAMEDONE irq\n"); -+ return r; -+ } -+ -+ dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT); -+ -+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI); -+ dispc_enable_fifohandcheck(1); -+ -+ dispc_set_tft_data_lines(display->ctrl->pixel_size); -+ -+ { -+ struct omap_video_timings timings = { -+ .hsw = 1, -+ .hfp = 1, -+ .hbp = 1, -+ .vsw = 1, -+ .vfp = 0, -+ .vbp = 0, -+ }; -+ -+ dispc_set_lcd_timings(&timings); -+ } -+ -+ return 0; -+} -+ -+static void dsi_display_uninit_dispc(struct omap_display *display) -+{ -+ omap_dispc_unregister_isr(framedone_callback, NULL, -+ DISPC_IRQ_FRAMEDONE); -+} -+ -+static int dsi_display_init_dsi(struct omap_display *display) -+{ -+ struct dsi_clock_info cinfo; -+ int r; -+ -+ _dsi_print_reset_status(); -+ -+ r = dsi_pll_init(1, 0); -+ if (r) -+ goto err0; -+ -+ r = dsi_pll_calc_ddrfreq(display->hw_config.u.dsi.ddr_clk_hz, &cinfo); -+ if (r) -+ goto err1; -+ -+ r = dsi_pll_program(&cinfo); -+ if (r) -+ goto err1; -+ -+ DSSDBG("PLL OK\n"); -+ -+ r = dsi_complexio_init(display); -+ if (r) -+ goto err1; -+ -+ _dsi_print_reset_status(); -+ -+ dsi_proto_timings(display); -+ dsi_set_lp_clk_divisor(display); -+ -+ if (1) -+ _dsi_print_reset_status(); -+ -+ r = dsi_proto_config(display); -+ if (r) -+ goto err2; -+ -+ /* enable interface */ -+ dsi_vc_enable(0, 1); -+ dsi_vc_enable(1, 1); -+ dsi_if_enable(1); -+ dsi_force_tx_stop_mode_io(); -+ -+ if (display->ctrl && display->ctrl->enable) { -+ r = display->ctrl->enable(display); -+ if (r) -+ goto err3; -+ } -+ -+ if (display->panel && display->panel->enable) { -+ r = display->panel->enable(display); -+ if (r) -+ goto err4; -+ } -+ -+ /* enable high-speed after initial config */ -+ dsi_vc_enable_hs(0, 1); -+ -+ return 0; -+err4: -+ if (display->ctrl && display->ctrl->disable) -+ display->ctrl->disable(display); -+err3: -+ dsi_if_enable(0); -+err2: -+ dsi_complexio_uninit(); -+err1: -+ dsi_pll_uninit(); -+err0: -+ return r; -+} -+ -+static void dsi_display_uninit_dsi(struct omap_display *display) -+{ -+ if (display->panel && display->panel->disable) -+ display->panel->disable(display); -+ if (display->ctrl && display->ctrl->disable) -+ display->ctrl->disable(display); -+ -+ dsi_complexio_uninit(); -+ dsi_pll_uninit(); -+} -+ -+static int dsi_core_init(void) -+{ -+ /* Autoidle */ -+ REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0); -+ -+ /* ENWAKEUP */ -+ REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2); -+ -+ /* SIDLEMODE smart-idle */ -+ REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3); -+ -+ _dsi_initialize_irq(); -+ -+ return 0; -+} -+ -+static int dsi_display_enable(struct omap_display *display) -+{ -+ int r = 0; -+ -+ DSSDBG("dsi_display_enable\n"); -+ -+ mutex_lock(&dsi.lock); -+ -+ if (display->state != OMAP_DSS_DISPLAY_DISABLED) { -+ DSSERR("display already enabled\n"); -+ r = -EINVAL; -+ goto err0; -+ } -+ -+ enable_clocks(1); -+ dsi_enable_pll_clock(1); -+ -+ r = _dsi_reset(); -+ if (r) -+ return r; -+ -+ dsi_core_init(); -+ -+ r = dsi_display_init_dispc(display); -+ if (r) -+ goto err1; -+ -+ r = dsi_display_init_dsi(display); -+ if (r) -+ goto err2; -+ -+ display->state = OMAP_DSS_DISPLAY_ACTIVE; -+ -+ if (dsi.use_te || dsi.use_ext_te) -+ dsi_push_set_te(display, 1); -+ -+ dsi_push_set_update_mode(display, dsi.user_update_mode); -+ dsi.target_update_mode = dsi.user_update_mode; -+ -+ mutex_unlock(&dsi.lock); -+ -+ return dsi_wait_sync(display); -+ -+err2: -+ dsi_display_uninit_dispc(display); -+err1: -+ enable_clocks(0); -+ dsi_enable_pll_clock(0); -+err0: -+ mutex_unlock(&dsi.lock); -+ DSSDBG("dsi_display_enable FAILED\n"); -+ return r; -+} -+ -+static void dsi_display_disable(struct omap_display *display) -+{ -+ DSSDBG("dsi_display_disable\n"); -+ -+ mutex_lock(&dsi.lock); -+ -+ if (display->state == OMAP_DSS_DISPLAY_DISABLED || -+ display->state == OMAP_DSS_DISPLAY_SUSPENDED) -+ goto end; -+ -+ if (dsi.target_update_mode != OMAP_DSS_UPDATE_DISABLED) { -+ dsi_push_set_update_mode(display, OMAP_DSS_UPDATE_DISABLED); -+ dsi.target_update_mode = OMAP_DSS_UPDATE_DISABLED; -+ } -+ -+ dsi_wait_sync(display); -+ -+ display->state = OMAP_DSS_DISPLAY_DISABLED; -+ -+ dsi_display_uninit_dispc(display); -+ -+ dsi_display_uninit_dsi(display); -+ -+ enable_clocks(0); -+ dsi_enable_pll_clock(0); -+end: -+ mutex_unlock(&dsi.lock); -+} -+ -+static int dsi_display_suspend(struct omap_display *display) -+{ -+ DSSDBG("dsi_display_suspend\n"); -+ -+ dsi_display_disable(display); -+ -+ display->state = OMAP_DSS_DISPLAY_SUSPENDED; -+ -+ return 0; -+} -+ -+static int dsi_display_resume(struct omap_display *display) -+{ -+ DSSDBG("dsi_display_resume\n"); -+ -+ display->state = OMAP_DSS_DISPLAY_DISABLED; -+ return dsi_display_enable(display); -+} -+ -+static int dsi_display_update(struct omap_display *display, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h); -+ -+ if (w == 0 || h == 0) -+ return 0; -+ -+ mutex_lock(&dsi.lock); -+ -+ if (dsi.target_update_mode == OMAP_DSS_UPDATE_MANUAL) -+ dsi_push_update(display, x, y, w, h); -+ /* XXX else return error? */ -+ -+ mutex_unlock(&dsi.lock); -+ -+ return 0; -+} -+ -+static int dsi_display_sync(struct omap_display *display) -+{ -+ DSSDBGF(""); -+ return dsi_wait_sync(display); -+} -+ -+static int dsi_display_set_update_mode(struct omap_display *display, -+ enum omap_dss_update_mode mode) -+{ -+ DSSDBGF("%d", mode); -+ -+ mutex_lock(&dsi.lock); -+ -+ if (dsi.target_update_mode != mode) { -+ dsi_push_set_update_mode(display, mode); -+ -+ dsi.target_update_mode = mode; -+ dsi.user_update_mode = mode; -+ } -+ -+ mutex_unlock(&dsi.lock); -+ -+ return dsi_wait_sync(display); -+} -+ -+static enum omap_dss_update_mode dsi_display_get_update_mode( -+ struct omap_display *display) -+{ -+ return dsi.update_mode; -+} -+ -+static int dsi_display_enable_te(struct omap_display *display, bool enable) -+{ -+ DSSDBGF("%d", enable); -+ -+ if (!display->ctrl->enable_te) -+ return -ENOENT; -+ -+ dsi_push_set_te(display, enable); -+ -+ return dsi_wait_sync(display); -+} -+ -+static int dsi_display_get_te(struct omap_display *display) -+{ -+ return dsi.use_te | dsi.use_ext_te; -+} -+ -+ -+ -+static int dsi_display_set_rotate(struct omap_display *display, u8 rotate) -+{ -+ DSSDBGF("%d", rotate); -+ -+ if (!display->ctrl->set_rotate || !display->ctrl->get_rotate) -+ return -EINVAL; -+ -+ dsi_push_set_rotate(display, rotate); -+ -+ return dsi_wait_sync(display); -+} -+ -+static u8 dsi_display_get_rotate(struct omap_display *display) -+{ -+ if (!display->ctrl->set_rotate || !display->ctrl->get_rotate) -+ return 0; -+ -+ return display->ctrl->get_rotate(display); -+} -+ -+static int dsi_display_set_mirror(struct omap_display *display, bool mirror) -+{ -+ DSSDBGF("%d", mirror); -+ -+ if (!display->ctrl->set_mirror || !display->ctrl->get_mirror) -+ return -EINVAL; -+ -+ dsi_push_set_mirror(display, mirror); -+ -+ return dsi_wait_sync(display); -+} -+ -+static bool dsi_display_get_mirror(struct omap_display *display) -+{ -+ if (!display->ctrl->set_mirror || !display->ctrl->get_mirror) -+ return 0; -+ -+ return display->ctrl->get_mirror(display); -+} -+ -+static int dsi_display_run_test(struct omap_display *display, int test_num) -+{ -+ long wait = msecs_to_jiffies(60000); -+ struct completion compl; -+ int result; -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return -EIO; -+ -+ DSSDBGF("%d", test_num); -+ -+ init_completion(&compl); -+ -+ dsi_push_test(display, test_num, &result, &compl); -+ -+ DSSDBG("Waiting for SYNC to happen...\n"); -+ wait = wait_for_completion_timeout(&compl, wait); -+ DSSDBG("Released from SYNC\n"); -+ -+ if (wait == 0) { -+ DSSERR("timeout waiting test sync\n"); -+ return -ETIME; -+ } -+ -+ return result; -+} -+ -+static int dsi_display_memory_read(struct omap_display *display, -+ void *buf, size_t size, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ long wait = msecs_to_jiffies(60000); -+ struct completion compl; -+ struct dsi_cmd_mem_read mem_read; -+ size_t ret_size; -+ -+ DSSDBGF(""); -+ -+ if (!display->ctrl->memory_read) -+ return -EINVAL; -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return -EIO; -+ -+ init_completion(&compl); -+ -+ mem_read.x = x; -+ mem_read.y = y; -+ mem_read.w = w; -+ mem_read.h = h; -+ mem_read.buf = buf; -+ mem_read.size = size; -+ mem_read.ret_size = &ret_size; -+ mem_read.completion = &compl; -+ -+ dsi_push_mem_read(display, &mem_read); -+ -+ DSSDBG("Waiting for SYNC to happen...\n"); -+ wait = wait_for_completion_timeout(&compl, wait); -+ DSSDBG("Released from SYNC\n"); -+ -+ if (wait == 0) { -+ DSSERR("timeout waiting mem read sync\n"); -+ return -ETIME; -+ } -+ -+ return ret_size; -+} -+ -+static void dsi_configure_overlay(struct omap_overlay *ovl) -+{ -+ unsigned low, high, size; -+ enum omap_burst_size burst; -+ enum omap_plane plane = ovl->id; -+ -+ burst = OMAP_DSS_BURST_16x32; -+ size = 16 * 32 / 8; -+ -+ dispc_set_burst_size(plane, burst); -+ -+ high = dispc_get_plane_fifo_size(plane) - size; -+ low = 0; -+ dispc_setup_plane_fifo(plane, low, high); -+} -+ -+int dsi_init_display(struct omap_display *display) -+{ -+ DSSDBG("DSI init\n"); -+ -+ display->enable = dsi_display_enable; -+ display->disable = dsi_display_disable; -+ display->suspend = dsi_display_suspend; -+ display->resume = dsi_display_resume; -+ display->update = dsi_display_update; -+ display->sync = dsi_display_sync; -+ display->set_update_mode = dsi_display_set_update_mode; -+ display->get_update_mode = dsi_display_get_update_mode; -+ display->enable_te = dsi_display_enable_te; -+ display->get_te = dsi_display_get_te; -+ -+ display->get_rotate = dsi_display_get_rotate; -+ display->set_rotate = dsi_display_set_rotate; -+ -+ display->get_mirror = dsi_display_get_mirror; -+ display->set_mirror = dsi_display_set_mirror; -+ -+ display->run_test = dsi_display_run_test; -+ display->memory_read = dsi_display_memory_read; -+ -+ display->configure_overlay = dsi_configure_overlay; -+ -+ display->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; -+ -+ dsi.vc[0].display = display; -+ dsi.vc[1].display = display; -+ -+ return 0; -+} -+ -+int dsi_init(void) -+{ -+ u32 rev; -+ -+ spin_lock_init(&dsi.errors_lock); -+ dsi.errors = 0; -+ -+ spin_lock_init(&dsi.cmd_lock); -+ dsi.cmd_fifo = kfifo_alloc( -+ DSI_CMD_FIFO_LEN * sizeof(struct dsi_cmd_item), -+ GFP_KERNEL, -+ &dsi.cmd_lock); -+ -+ init_completion(&dsi.cmd_done); -+ atomic_set(&dsi.cmd_fifo_full, 0); -+ atomic_set(&dsi.cmd_pending, 0); -+ -+ init_completion(&dsi.bta_completion); -+ -+ dsi.workqueue = create_singlethread_workqueue("dsi"); -+ INIT_WORK(&dsi.framedone_work, framedone_worker); -+ INIT_WORK(&dsi.process_work, dsi_process_cmd_fifo); -+ -+ mutex_init(&dsi.lock); -+ -+ dsi.target_update_mode = OMAP_DSS_UPDATE_DISABLED; -+ dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED; -+ -+ dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS); -+ if (!dsi.base) { -+ DSSERR("can't ioremap DSI\n"); -+ return -ENOMEM; -+ } -+ -+ enable_clocks(1); -+ -+ rev = dsi_read_reg(DSI_REVISION); -+ printk(KERN_INFO "OMAP DSI rev %d.%d\n", -+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); -+ -+ enable_clocks(0); -+ -+ return 0; -+} -+ -+void dsi_exit(void) -+{ -+ flush_workqueue(dsi.workqueue); -+ destroy_workqueue(dsi.workqueue); -+ -+ iounmap(dsi.base); -+ -+ kfifo_free(dsi.cmd_fifo); -+ -+ DSSDBG("omap_dsi_exit\n"); -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dss.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dss.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dss.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dss.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,387 @@ -+/* -+ * linux/drivers/video/omap2/dss/dss.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "DSS" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include "dss.h" -+ -+#define DSS_BASE 0x48050000 -+ -+#define DSS_SZ_REGS SZ_512 -+ -+struct dss_reg { -+ u16 idx; -+}; -+ -+#define DSS_REG(idx) ((const struct dss_reg) { idx }) -+ -+#define DSS_REVISION DSS_REG(0x0000) -+#define DSS_SYSCONFIG DSS_REG(0x0010) -+#define DSS_SYSSTATUS DSS_REG(0x0014) -+#define DSS_IRQSTATUS DSS_REG(0x0018) -+#define DSS_CONTROL DSS_REG(0x0040) -+#define DSS_SDI_CONTROL DSS_REG(0x0044) -+#define DSS_PLL_CONTROL DSS_REG(0x0048) -+#define DSS_SDI_STATUS DSS_REG(0x005C) -+ -+#define REG_GET(idx, start, end) \ -+ FLD_GET(dss_read_reg(idx), start, end) -+ -+#define REG_FLD_MOD(idx, val, start, end) \ -+ dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end)) -+ -+static struct { -+ void __iomem *base; -+ -+ u32 ctx[DSS_SZ_REGS / sizeof(u32)]; -+} dss; -+ -+static int _omap_dss_wait_reset(void); -+ -+static inline void dss_write_reg(const struct dss_reg idx, u32 val) -+{ -+ __raw_writel(val, dss.base + idx.idx); -+} -+ -+static inline u32 dss_read_reg(const struct dss_reg idx) -+{ -+ return __raw_readl(dss.base + idx.idx); -+} -+ -+#define SR(reg) \ -+ dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg) -+#define RR(reg) \ -+ dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)]) -+ -+int dss_check_context(void) -+{ -+ if (cpu_is_omap24xx()) -+ return 0; -+ -+ if (dss_read_reg(DSS_CONTROL) == 0) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+void dss_save_context(void) -+{ -+ if (cpu_is_omap24xx()) -+ return; -+ -+ SR(SYSCONFIG); -+ SR(CONTROL); -+ -+#ifdef CONFIG_OMAP2_DSS_SDI -+ SR(SDI_CONTROL); -+ SR(PLL_CONTROL); -+#endif -+} -+ -+void dss_restore_context(void) -+{ -+ if (_omap_dss_wait_reset()) -+ DSSERR("DSS not coming out of reset after sleep\n"); -+ -+ RR(SYSCONFIG); -+ RR(CONTROL); -+ -+#ifdef CONFIG_OMAP2_DSS_SDI -+ RR(SDI_CONTROL); -+ RR(PLL_CONTROL); -+#endif -+} -+ -+#undef SR -+#undef RR -+ -+void dss_sdi_init(u8 datapairs) -+{ -+ u32 l; -+ -+ BUG_ON(datapairs > 3 || datapairs < 1); -+ -+ l = dss_read_reg(DSS_SDI_CONTROL); -+ l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */ -+ l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */ -+ l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */ -+ dss_write_reg(DSS_SDI_CONTROL, l); -+ -+ l = dss_read_reg(DSS_PLL_CONTROL); -+ l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */ -+ l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */ -+ l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */ -+ dss_write_reg(DSS_PLL_CONTROL, l); -+} -+ -+int dss_sdi_enable(void) -+{ -+ unsigned long timeout; -+ -+ dispc_pck_free_enable(1); -+ -+ /* Reset SDI PLL */ -+ REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */ -+ udelay(1); /* wait 2x PCLK */ -+ -+ /* Lock SDI PLL */ -+ REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */ -+ -+ /* Waiting for PLL lock request to complete */ -+ timeout = jiffies + msecs_to_jiffies(500); -+ while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) { -+ if (time_after_eq(jiffies, timeout)) { -+ DSSERR("PLL lock request timed out\n"); -+ goto err1; -+ } -+ } -+ -+ /* Clearing PLL_GO bit */ -+ REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28); -+ -+ /* Waiting for PLL to lock */ -+ timeout = jiffies + msecs_to_jiffies(500); -+ while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) { -+ if (time_after_eq(jiffies, timeout)) { -+ DSSERR("PLL lock timed out\n"); -+ goto err1; -+ } -+ } -+ -+ dispc_lcd_enable_signal(1); -+ -+ /* Waiting for SDI reset to complete */ -+ timeout = jiffies + msecs_to_jiffies(500); -+ while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) { -+ if (time_after_eq(jiffies, timeout)) { -+ DSSERR("SDI reset timed out\n"); -+ goto err2; -+ } -+ } -+ -+ return 0; -+ -+ err2: -+ dispc_lcd_enable_signal(0); -+ err1: -+ /* Reset SDI PLL */ -+ REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ -+ -+ dispc_pck_free_enable(0); -+ -+ return -ETIMEDOUT; -+} -+ -+void dss_sdi_disable(void) -+{ -+ dispc_lcd_enable_signal(0); -+ -+ dispc_pck_free_enable(0); -+ -+ /* Reset SDI PLL */ -+ REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ -+} -+ -+void dss_dump_regs(struct seq_file *s) -+{ -+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ DUMPREG(DSS_REVISION); -+ DUMPREG(DSS_SYSCONFIG); -+ DUMPREG(DSS_SYSSTATUS); -+ DUMPREG(DSS_IRQSTATUS); -+ DUMPREG(DSS_CONTROL); -+ DUMPREG(DSS_SDI_CONTROL); -+ DUMPREG(DSS_PLL_CONTROL); -+ DUMPREG(DSS_SDI_STATUS); -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+#undef DUMPREG -+} -+ -+void dss_select_clk_source(bool dsi, bool dispc) -+{ -+ u32 r; -+ r = dss_read_reg(DSS_CONTROL); -+ r = FLD_MOD(r, dsi, 1, 1); /* DSI_CLK_SWITCH */ -+ r = FLD_MOD(r, dispc, 0, 0); /* DISPC_CLK_SWITCH */ -+ dss_write_reg(DSS_CONTROL, r); -+} -+ -+int dss_get_dsi_clk_source(void) -+{ -+ return FLD_GET(dss_read_reg(DSS_CONTROL), 1, 1); -+} -+ -+int dss_get_dispc_clk_source(void) -+{ -+ return FLD_GET(dss_read_reg(DSS_CONTROL), 0, 0); -+} -+ -+static irqreturn_t dss_irq_handler_omap2(int irq, void *arg) -+{ -+ dispc_irq_handler(); -+ -+ return IRQ_HANDLED; -+} -+ -+static irqreturn_t dss_irq_handler_omap3(int irq, void *arg) -+{ -+ u32 irqstatus; -+ -+ irqstatus = dss_read_reg(DSS_IRQSTATUS); -+ -+ if (irqstatus & (1<<0)) /* DISPC_IRQ */ -+ dispc_irq_handler(); -+#ifdef CONFIG_OMAP2_DSS_DSI -+ if (irqstatus & (1<<1)) /* DSI_IRQ */ -+ dsi_irq_handler(); -+#endif -+ -+ return IRQ_HANDLED; -+} -+ -+static int _omap_dss_wait_reset(void) -+{ -+ unsigned timeout = 1000; -+ -+ while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) { -+ udelay(1); -+ if (!--timeout) { -+ DSSERR("soft reset failed\n"); -+ return -ENODEV; -+ } -+ } -+ -+ return 0; -+} -+ -+int dss_reset(void) -+{ -+ /* Soft reset */ -+ REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1); -+ return _omap_dss_wait_reset(); -+} -+ -+void dss_set_venc_output(enum omap_dss_venc_type type) -+{ -+ int l = 0; -+ -+ if (type == OMAP_DSS_VENC_TYPE_COMPOSITE) -+ l = 0; -+ else if (type == OMAP_DSS_VENC_TYPE_SVIDEO) -+ l = 1; -+ else -+ BUG(); -+ -+ /* venc out selection. 0 = comp, 1 = svideo */ -+ REG_FLD_MOD(DSS_CONTROL, l, 6, 6); -+} -+ -+void dss_set_dac_pwrdn_bgz(bool enable) -+{ -+ REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */ -+} -+ -+int dss_init(bool skip_init) -+{ -+ int r; -+ u32 rev; -+ -+ dss.base = ioremap(DSS_BASE, DSS_SZ_REGS); -+ if (!dss.base) { -+ DSSERR("can't ioremap DSS\n"); -+ r = -ENOMEM; -+ goto fail0; -+ } -+ -+ if (!skip_init) { -+ /* disable LCD and DIGIT output. This seems to fix the synclost -+ * problem that we get, if the bootloader starts the DSS and -+ * the kernel resets it */ -+ omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440); -+ -+ /* We need to wait here a bit, otherwise we sometimes start to -+ * get synclost errors, and after that only power cycle will -+ * restore DSS functionality. I have no idea why this happens. -+ * And we have to wait _before_ resetting the DSS, but after -+ * enabling clocks. -+ */ -+ msleep(50); -+ -+ dss_reset(); -+ } -+ -+ /* autoidle */ -+ REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0); -+ -+ /* Select DPLL */ -+ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); -+ -+#ifdef CONFIG_OMAP2_DSS_VENC -+ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ -+ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ -+ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ -+#endif -+ -+ r = request_irq(INT_24XX_DSS_IRQ, -+ cpu_is_omap24xx() -+ ? dss_irq_handler_omap2 -+ : dss_irq_handler_omap3, -+ 0, "OMAP DSS", NULL); -+ -+ if (r < 0) { -+ DSSERR("omap2 dss: request_irq failed\n"); -+ goto fail1; -+ } -+ -+ dss_save_context(); -+ -+ rev = dss_read_reg(DSS_REVISION); -+ printk(KERN_INFO "OMAP DSS rev %d.%d\n", -+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); -+ -+ return 0; -+ -+fail1: -+ iounmap(dss.base); -+fail0: -+ return r; -+} -+ -+void dss_exit(void) -+{ -+ free_irq(INT_24XX_DSS_IRQ, NULL); -+ -+ iounmap(dss.base); -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dss.h linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dss.h ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/dss.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/dss.h 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,352 @@ -+/* -+ * linux/drivers/video/omap2/dss/dss.h -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#ifndef __OMAP2_DSS_H -+#define __OMAP2_DSS_H -+ -+#ifdef CONFIG_OMAP2_DSS_DEBUG_SUPPORT -+#define DEBUG -+#endif -+ -+#ifdef DEBUG -+extern unsigned int dss_debug; -+#ifdef DSS_SUBSYS_NAME -+#define DSSDBG(format, ...) \ -+ if (dss_debug) \ -+ printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME ": " format, \ -+ ## __VA_ARGS__) -+#else -+#define DSSDBG(format, ...) \ -+ if (dss_debug) \ -+ printk(KERN_DEBUG "omapdss: " format, ## __VA_ARGS__) -+#endif -+ -+#ifdef DSS_SUBSYS_NAME -+#define DSSDBGF(format, ...) \ -+ if (dss_debug) \ -+ printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME \ -+ ": %s(" format ")\n", \ -+ __func__, \ -+ ## __VA_ARGS__) -+#else -+#define DSSDBGF(format, ...) \ -+ if (dss_debug) \ -+ printk(KERN_DEBUG "omapdss: " \ -+ ": %s(" format ")\n", \ -+ __func__, \ -+ ## __VA_ARGS__) -+#endif -+ -+#else /* DEBUG */ -+#define DSSDBG(format, ...) -+#define DSSDBGF(format, ...) -+#endif -+ -+ -+#ifdef DSS_SUBSYS_NAME -+#define DSSERR(format, ...) \ -+ printk(KERN_ERR "omapdss " DSS_SUBSYS_NAME " error: " format, \ -+ ## __VA_ARGS__) -+#else -+#define DSSERR(format, ...) \ -+ printk(KERN_ERR "omapdss error: " format, ## __VA_ARGS__) -+#endif -+ -+#ifdef DSS_SUBSYS_NAME -+#define DSSINFO(format, ...) \ -+ printk(KERN_INFO "omapdss " DSS_SUBSYS_NAME ": " format, \ -+ ## __VA_ARGS__) -+#else -+#define DSSINFO(format, ...) \ -+ printk(KERN_INFO "omapdss: " format, ## __VA_ARGS__) -+#endif -+ -+#ifdef DSS_SUBSYS_NAME -+#define DSSWARN(format, ...) \ -+ printk(KERN_WARNING "omapdss " DSS_SUBSYS_NAME ": " format, \ -+ ## __VA_ARGS__) -+#else -+#define DSSWARN(format, ...) \ -+ printk(KERN_WARNING "omapdss: " format, ## __VA_ARGS__) -+#endif -+ -+/* OMAP TRM gives bitfields as start:end, where start is the higher bit -+ number. For example 7:0 */ -+#define FLD_MASK(start, end) (((1 << (start - end + 1)) - 1) << (end)) -+#define FLD_VAL(val, start, end) (((val) << end) & FLD_MASK(start, end)) -+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end)) -+#define FLD_MOD(orig, val, start, end) \ -+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) -+ -+#define DISPC_MAX_FCK 173000000 -+ -+enum omap_burst_size { -+ OMAP_DSS_BURST_4x32 = 0, -+ OMAP_DSS_BURST_8x32 = 1, -+ OMAP_DSS_BURST_16x32 = 2, -+}; -+ -+enum omap_parallel_interface_mode { -+ OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */ -+ OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */ -+ OMAP_DSS_PARALLELMODE_DSI, -+}; -+ -+enum dss_clock { -+ DSS_CLK_ICK = 1 << 0, -+ DSS_CLK_FCK1 = 1 << 1, -+ DSS_CLK_FCK2 = 1 << 2, -+ DSS_CLK_54M = 1 << 3, -+ DSS_CLK_96M = 1 << 4, -+}; -+ -+struct dispc_clock_info { -+ /* rates that we get with dividers below */ -+ unsigned long fck; -+ unsigned long lck; -+ unsigned long pck; -+ -+ /* dividers */ -+ u16 fck_div; -+ u16 lck_div; -+ u16 pck_div; -+}; -+ -+struct dsi_clock_info { -+ /* rates that we get with dividers below */ -+ unsigned long fint; -+ unsigned long dsiphy; -+ unsigned long clkin; -+ unsigned long dsi1_pll_fclk; -+ unsigned long dsi2_pll_fclk; -+ unsigned long lck; -+ unsigned long pck; -+ -+ /* dividers */ -+ u16 regn; -+ u16 regm; -+ u16 regm3; -+ u16 regm4; -+ -+ u16 lck_div; -+ u16 pck_div; -+ -+ u8 highfreq; -+ bool use_dss2_fck; -+}; -+ -+struct seq_file; -+struct platform_device; -+ -+/* core */ -+void dss_clk_enable(enum dss_clock clks); -+void dss_clk_disable(enum dss_clock clks); -+unsigned long dss_clk_get_rate(enum dss_clock clk); -+int dss_need_ctx_restore(void); -+void dss_dump_clocks(struct seq_file *s); -+const char *dss_get_def_disp_name(void); -+ -+ -+int dss_dsi_power_up(void); -+void dss_dsi_power_down(void); -+ -+void dss_schedule_reset(void); -+ -+/* display */ -+void dss_init_displays(struct platform_device *pdev); -+void dss_uninit_displays(struct platform_device *pdev); -+int dss_suspend_all_displays(void); -+int dss_resume_all_displays(void); -+struct omap_display *dss_get_display(int no); -+bool dss_use_replication(struct omap_display *display, -+ enum omap_color_mode mode); -+ -+/* manager */ -+int dss_init_overlay_managers(struct platform_device *pdev); -+void dss_uninit_overlay_managers(struct platform_device *pdev); -+ -+/* overlay */ -+void dss_init_overlays(struct platform_device *pdev); -+void dss_uninit_overlays(struct platform_device *pdev); -+void dss_recheck_connections(struct omap_display *display, bool force); -+int dss_check_overlay(struct omap_overlay *ovl, struct omap_display *display); -+void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr); -+ -+/* DSS */ -+int dss_init(bool skip_init); -+void dss_exit(void); -+ -+int dss_check_context(void); -+void dss_save_context(void); -+void dss_restore_context(void); -+ -+int dss_reset(void); -+ -+void dss_dump_regs(struct seq_file *s); -+ -+void dss_sdi_init(u8 datapairs); -+int dss_sdi_enable(void); -+void dss_sdi_disable(void); -+ -+void dss_select_clk_source(bool dsi, bool dispc); -+int dss_get_dsi_clk_source(void); -+int dss_get_dispc_clk_source(void); -+void dss_set_venc_output(enum omap_dss_venc_type type); -+void dss_set_dac_pwrdn_bgz(bool enable); -+ -+/* SDI */ -+int sdi_init(bool skip_init); -+void sdi_exit(void); -+int sdi_init_display(struct omap_display *display); -+ -+/* DSI */ -+int dsi_init(void); -+void dsi_exit(void); -+ -+void dsi_dump_clocks(struct seq_file *s); -+void dsi_dump_regs(struct seq_file *s); -+ -+void dsi_save_context(void); -+void dsi_restore_context(void); -+ -+int dsi_init_display(struct omap_display *display); -+void dsi_irq_handler(void); -+unsigned long dsi_get_dsi1_pll_rate(void); -+unsigned long dsi_get_dsi2_pll_rate(void); -+int dsi_pll_calc_pck(bool is_tft, unsigned long req_pck, -+ struct dsi_clock_info *cinfo); -+int dsi_pll_program(struct dsi_clock_info *cinfo); -+int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv); -+void dsi_pll_uninit(void); -+ -+/* DPI */ -+int dpi_init(void); -+void dpi_exit(void); -+int dpi_init_display(struct omap_display *display); -+ -+/* DISPC */ -+int dispc_init(void); -+void dispc_exit(void); -+void dispc_dump_clocks(struct seq_file *s); -+void dispc_dump_regs(struct seq_file *s); -+void dispc_irq_handler(void); -+void dispc_fake_vsync_irq(void); -+ -+void dispc_save_context(void); -+void dispc_restore_context(void); -+ -+void dispc_enable_sidle(void); -+void dispc_disable_sidle(void); -+ -+void dispc_lcd_enable_signal_polarity(bool act_high); -+void dispc_lcd_enable_signal(bool enable); -+void dispc_pck_free_enable(bool enable); -+void dispc_enable_fifohandcheck(bool enable); -+ -+void dispc_set_lcd_size(u16 width, u16 height); -+void dispc_set_digit_size(u16 width, u16 height); -+u32 dispc_get_plane_fifo_size(enum omap_plane plane); -+void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high); -+void dispc_enable_fifomerge(bool enable); -+void dispc_set_overlay_optimization(void); -+void dispc_set_burst_size(enum omap_plane plane, -+ enum omap_burst_size burst_size); -+ -+void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr); -+void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr); -+void dispc_set_plane_pos(enum omap_plane plane, u16 x, u16 y); -+void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height); -+ -+int dispc_setup_plane(enum omap_plane plane, enum omap_channel channel_out, -+ u32 paddr, u16 screen_width, -+ u16 pos_x, u16 pos_y, -+ u16 width, u16 height, -+ u16 out_width, u16 out_height, -+ enum omap_color_mode color_mode, -+ bool ilace, -+ enum omap_dss_rotation_type rotation_type, -+ u8 rotation, bool mirror, -+ u8 global_alpha); -+ -+void dispc_go(enum omap_channel channel); -+void dispc_enable_lcd_out(bool enable); -+void dispc_enable_digit_out(bool enable); -+void dispc_enable_digit_errors(int enable); -+int dispc_enable_plane(enum omap_plane plane, bool enable); -+void dispc_enable_replication(enum omap_plane plane, bool enable); -+ -+void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode); -+void dispc_set_tft_data_lines(u8 data_lines); -+void dispc_set_lcd_display_type(enum omap_lcd_display_type type); -+void dispc_set_loadmode(enum omap_dss_load_mode mode); -+ -+void dispc_set_default_color(enum omap_channel channel, u32 color); -+u32 dispc_get_default_color(enum omap_channel channel); -+void dispc_set_trans_key(enum omap_channel ch, -+ enum omap_dss_color_key_type type, -+ u32 trans_key); -+void dispc_get_trans_key(enum omap_channel ch, -+ enum omap_dss_color_key_type *type, -+ u32 *trans_key); -+void dispc_enable_trans_key(enum omap_channel ch, bool enable); -+void dispc_enable_alpha_blending(enum omap_channel ch, bool enable); -+bool dispc_trans_key_enabled(enum omap_channel ch); -+bool dispc_alpha_blending_enabled(enum omap_channel ch); -+ -+void dispc_set_lcd_timings(struct omap_video_timings *timings); -+unsigned long dispc_fclk_rate(void); -+unsigned long dispc_lclk_rate(void); -+unsigned long dispc_pclk_rate(void); -+void dispc_set_pol_freq(struct omap_panel *panel); -+void find_lck_pck_divs(bool is_tft, unsigned long req_pck, unsigned long fck, -+ u16 *lck_div, u16 *pck_div); -+int dispc_calc_clock_div(bool is_tft, unsigned long req_pck, -+ struct dispc_clock_info *cinfo); -+int dispc_set_clock_div(struct dispc_clock_info *cinfo); -+int dispc_get_clock_div(struct dispc_clock_info *cinfo); -+void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div); -+ -+void dispc_setup_partial_planes(struct omap_display *display, -+ u16 *x, u16 *y, u16 *w, u16 *h); -+void dispc_draw_partial_planes(struct omap_display *display); -+ -+ -+/* VENC */ -+int venc_init(struct platform_device *pdev); -+void venc_exit(void); -+void venc_dump_regs(struct seq_file *s); -+int venc_init_display(struct omap_display *display); -+ -+/* RFBI */ -+int rfbi_init(void); -+void rfbi_exit(void); -+void rfbi_dump_regs(struct seq_file *s); -+ -+int rfbi_configure(int rfbi_module, int bpp, int lines); -+void rfbi_enable_rfbi(bool enable); -+void rfbi_transfer_area(u16 width, u16 height, -+ void (callback)(void *data), void *data); -+void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t); -+unsigned long rfbi_get_max_tx_rate(void); -+int rfbi_init_display(struct omap_display *display); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/Kconfig linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/Kconfig ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/Kconfig 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,99 @@ -+menuconfig OMAP2_DSS -+ tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)" -+ depends on ARCH_OMAP2 || ARCH_OMAP3 -+ help -+ OMAP2/3 Display Subsystem support. -+ -+if OMAP2_DSS -+ -+config OMAP2_DSS_VRAM_SIZE -+ int "VRAM size (MB)" -+ range 0 32 -+ default 4 -+ help -+ The amount of SDRAM to reserve at boot time for video RAM use. -+ This VRAM will be used by omapfb and other drivers that need -+ large continuous RAM area for video use. -+ -+ You can also set this with "vram=" kernel argument, or -+ in the board file. -+ -+config OMAP2_DSS_DEBUG_SUPPORT -+ bool "Debug support" -+ default y -+ help -+ This enables debug messages. You need to enable printing -+ with 'debug' module parameter. -+ -+config OMAP2_DSS_RFBI -+ bool "RFBI support" -+ default n -+ help -+ MIPI DBI, or RFBI (Remote Framebuffer Interface), support. -+ -+config OMAP2_DSS_VENC -+ bool "VENC support" -+ default y -+ help -+ OMAP Video Encoder support. -+ -+config OMAP2_DSS_SDI -+ bool "SDI support" -+ depends on ARCH_OMAP3 -+ default n -+ help -+ SDI (Serial Display Interface) support. -+ -+config OMAP2_DSS_DSI -+ bool "DSI support" -+ depends on ARCH_OMAP3 -+ default n -+ help -+ MIPI DSI support. -+ -+config OMAP2_DSS_USE_DSI_PLL -+ bool "Use DSI PLL for PCLK (EXPERIMENTAL)" -+ default n -+ depends on OMAP2_DSS_DSI -+ help -+ Use DSI PLL to generate pixel clock. Currently only for DPI output. -+ DSI PLL can be used to generate higher and more precise pixel clocks. -+ -+config OMAP2_DSS_FAKE_VSYNC -+ bool "Fake VSYNC irq from manual update displays" -+ default n -+ help -+ If this is selected, DSI will generate a fake DISPC VSYNC interrupt -+ when DSI has sent a frame. This is only needed with DSI or RFBI -+ displays using manual mode, and you want VSYNC to, for example, -+ time animation. -+ -+config OMAP2_DSS_MIN_FCK_PER_PCK -+ int "Minimum FCK/PCK ratio (for scaling)" -+ range 0 32 -+ default 0 -+ help -+ This can be used to adjust the minimum FCK/PCK ratio. -+ -+ With this you can make sure that DISPC FCK is at least -+ n x PCK. Video plane scaling requires higher FCK than -+ normally. -+ -+ If this is set to 0, there's no extra constraint on the -+ DISPC FCK. However, the FCK will at minimum be -+ 2xPCK (if active matrix) or 3xPCK (if passive matrix). -+ -+ Max FCK is 173MHz, so this doesn't work if your PCK -+ is very high. -+ -+config FB_OMAP_CONSISTENT_DMA_SIZE -+ int "Consistent DMA memory size (MB)" -+ range 1 14 -+ default 2 -+ help -+ Increase the DMA consistent memory size according to your video -+ memory needs, for example if you want to use multiple planes. -+ The size must be 2MB aligned. -+ If unsure say 1. -+ -+endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/Makefile linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/Makefile ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/Makefile 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,6 @@ -+obj-$(CONFIG_OMAP2_DSS) += omapdss.o -+omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o -+omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o -+omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o -+omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o -+omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/manager.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/manager.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/manager.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/manager.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,747 @@ -+/* -+ * linux/drivers/video/omap2/dss/manager.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "MANAGER" -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "dss.h" -+ -+static int num_managers; -+static struct list_head manager_list; -+ -+static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name); -+} -+ -+static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%s\n", -+ mgr->display ? mgr->display->name : ""); -+} -+ -+static ssize_t manager_display_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) -+{ -+ int r, i; -+ int len = size; -+ struct omap_display *display = NULL; -+ -+ if (buf[size-1] == '\n') -+ --len; -+ -+ if (len > 0) { -+ for (i = 0; i < omap_dss_get_num_displays(); ++i) { -+ display = dss_get_display(i); -+ -+ if (strncmp(buf, display->name, len) == 0) -+ break; -+ -+ display = NULL; -+ } -+ } -+ -+ if (len > 0 && display == NULL) -+ return -EINVAL; -+ -+ if (display) -+ DSSDBG("display %s found\n", display->name); -+ -+ if (display && display->state == OMAP_DSS_DISPLAY_UNINITIALIZED) { -+ DSSERR("display %s not initialized\n", display->name); -+ return -ENODEV; -+ } -+ -+ if (mgr->display) { -+ r = mgr->unset_display(mgr); -+ if (r) { -+ DSSERR("failed to unset display\n"); -+ return r; -+ } -+ } -+ -+ if (display) { -+ r = mgr->set_display(mgr, display); -+ if (r) { -+ DSSERR("failed to set manager\n"); -+ return r; -+ } -+ -+ r = mgr->apply(mgr); -+ if (r) { -+ DSSERR("failed to apply dispc config\n"); -+ return r; -+ } -+ } -+ -+ return size; -+} -+ -+static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr, -+ char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d", -+ mgr->get_default_color(mgr)); -+} -+ -+static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr, -+ const char *buf, size_t size) -+{ -+ u32 default_color; -+ -+ if (sscanf(buf, "%d", &default_color) != 1) -+ return -EINVAL; -+ omap_dss_lock(); -+ dispc_set_default_color(mgr->id, default_color); -+ omap_dss_unlock(); -+ -+ return size; -+} -+ -+static const char *color_key_type_str[] = { -+ "gfx-destination", -+ "video-source", -+}; -+ -+static ssize_t manager_color_key_type_show(struct omap_overlay_manager *mgr, -+ char *buf) -+{ -+ enum omap_dss_color_key_type key_type; -+ -+ omap_dss_lock(); -+ mgr->get_trans_key_type_and_value(mgr, &key_type, NULL); -+ omap_dss_unlock(); -+ BUG_ON(key_type >= ARRAY_SIZE(color_key_type_str)); -+ -+ return snprintf(buf, PAGE_SIZE, "%s\n", color_key_type_str[key_type]); -+} -+ -+static ssize_t manager_color_key_type_store(struct omap_overlay_manager *mgr, -+ const char *buf, size_t size) -+{ -+ enum omap_dss_color_key_type key_type; -+ u32 key_value; -+ -+ for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST; -+ key_type < ARRAY_SIZE(color_key_type_str); key_type++) { -+ if (sysfs_streq(buf, color_key_type_str[key_type])) -+ break; -+ } -+ if (key_type == ARRAY_SIZE(color_key_type_str)) -+ return -EINVAL; -+ /* OMAP does not support destination color key and alpha blending -+ * simultaneously. So if alpha blending and color keying both are -+ * enabled then refrain from setting the color key type to -+ * gfx-destination -+ */ -+ omap_dss_lock(); -+ if (!key_type) { -+ bool color_key_enabled; -+ bool alpha_blending_enabled; -+ color_key_enabled = mgr->get_trans_key_status(mgr); -+ alpha_blending_enabled = mgr->get_alpha_blending_status(mgr); -+ if (color_key_enabled && alpha_blending_enabled) { -+ omap_dss_unlock(); -+ return -EINVAL; -+ } -+ } -+ -+ mgr->get_trans_key_type_and_value(mgr, NULL, &key_value); -+ mgr->set_trans_key_type_and_value(mgr, key_type, key_value); -+ omap_dss_unlock(); -+ -+ return size; -+} -+ -+static ssize_t manager_color_key_value_show(struct omap_overlay_manager *mgr, -+ char *buf) -+{ -+ u32 key_value; -+ -+ omap_dss_lock(); -+ mgr->get_trans_key_type_and_value(mgr, NULL, &key_value); -+ omap_dss_unlock(); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", key_value); -+} -+ -+static ssize_t manager_color_key_value_store(struct omap_overlay_manager *mgr, -+ const char *buf, size_t size) -+{ -+ enum omap_dss_color_key_type key_type; -+ u32 key_value; -+ -+ if (sscanf(buf, "%d", &key_value) != 1) -+ return -EINVAL; -+ omap_dss_lock(); -+ mgr->get_trans_key_type_and_value(mgr, &key_type, NULL); -+ mgr->set_trans_key_type_and_value(mgr, key_type, key_value); -+ omap_dss_unlock(); -+ -+ return size; -+} -+ -+static ssize_t manager_color_key_enabled_show(struct omap_overlay_manager *mgr, -+ char *buf) -+{ -+ int status; -+ -+ omap_dss_lock(); -+ status = mgr->get_trans_key_status(mgr); -+ omap_dss_unlock(); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", status); -+} -+ -+static ssize_t manager_color_key_enabled_store(struct omap_overlay_manager *mgr, -+ const char *buf, size_t size) -+{ -+ int enable; -+ -+ if (sscanf(buf, "%d", &enable) != 1) -+ return -EINVAL; -+ -+ /* OMAP does not support destination color keying and -+ * alpha blending simultaneously. so if alpha blending -+ * is enabled refrain from enabling destination color -+ * keying. -+ */ -+ omap_dss_lock(); -+ if (enable) { -+ bool enabled; -+ enabled = mgr->get_alpha_blending_status(mgr); -+ if (enabled) { -+ enum omap_dss_color_key_type key_type; -+ mgr->get_trans_key_type_and_value(mgr, -+ &key_type, NULL); -+ if (!key_type) { -+ omap_dss_unlock(); -+ return -EINVAL; -+ } -+ } -+ -+ } -+ mgr->enable_trans_key(mgr, enable); -+ omap_dss_unlock(); -+ -+ return size; -+} -+static ssize_t manager_alpha_blending_enabled_show( -+ struct omap_overlay_manager *mgr, char *buf) -+{ -+ int status; -+ -+ omap_dss_lock(); -+ status = mgr->get_alpha_blending_status(mgr); -+ omap_dss_unlock(); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", status); -+} -+static ssize_t manager_alpha_blending_enabled_store( -+ struct omap_overlay_manager *mgr, -+ const char *buf, size_t size) -+{ -+ int enable; -+ if (sscanf(buf, "%d", &enable) != 1) -+ return -EINVAL; -+ /* OMAP does not support destination color keying and -+ * alpha blending simultaneously. so if destination -+ * color keying is enabled refrain from enabling -+ * alpha blending -+ */ -+ omap_dss_lock(); -+ if (enable) { -+ bool enabled; -+ enabled = mgr->get_trans_key_status(mgr); -+ if (enabled) { -+ enum omap_dss_color_key_type key_type; -+ mgr->get_trans_key_type_and_value(mgr, &key_type, NULL); -+ if (!key_type) { -+ omap_dss_unlock(); -+ return -EINVAL; -+ } -+ } -+ -+ } -+ mgr->enable_alpha_blending(mgr, enable); -+ omap_dss_unlock(); -+ -+ return size; -+} -+ -+static ssize_t reset_show( -+ struct omap_overlay_manager *mgr, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d\n", 0); -+} -+static ssize_t reset_store( -+ struct omap_overlay_manager *mgr, -+ const char *buf, size_t size) -+{ -+ int enable; -+ if (sscanf(buf, "%d", &enable) != 1) -+ return -EINVAL; -+ if (enable != 0 && enable != 1) -+ return -EINVAL; -+ -+ dss_schedule_reset(); -+ -+ return size; -+} -+ -+struct manager_attribute { -+ struct attribute attr; -+ ssize_t (*show)(struct omap_overlay_manager *, char *); -+ ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t); -+}; -+ -+#define MANAGER_ATTR(_name, _mode, _show, _store) \ -+ struct manager_attribute manager_attr_##_name = \ -+ __ATTR(_name, _mode, _show, _store) -+ -+static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL); -+static MANAGER_ATTR(display, S_IRUGO|S_IWUSR, -+ manager_display_show, manager_display_store); -+static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR, -+ manager_default_color_show, manager_default_color_store); -+static MANAGER_ATTR(color_key_type, S_IRUGO|S_IWUSR, -+ manager_color_key_type_show, manager_color_key_type_store); -+static MANAGER_ATTR(color_key_value, S_IRUGO|S_IWUSR, -+ manager_color_key_value_show, manager_color_key_value_store); -+static MANAGER_ATTR(color_key_enabled, S_IRUGO|S_IWUSR, -+ manager_color_key_enabled_show, manager_color_key_enabled_store); -+static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR, -+ manager_alpha_blending_enabled_show, -+ manager_alpha_blending_enabled_store); -+static MANAGER_ATTR(reset, S_IRUGO|S_IWUSR, -+ reset_show, -+ reset_store); -+ -+ -+static struct attribute *manager_sysfs_attrs[] = { -+ &manager_attr_name.attr, -+ &manager_attr_display.attr, -+ &manager_attr_default_color.attr, -+ &manager_attr_color_key_type.attr, -+ &manager_attr_color_key_value.attr, -+ &manager_attr_color_key_enabled.attr, -+ &manager_attr_alpha_blending_enabled.attr, -+ &manager_attr_reset.attr, -+ NULL -+}; -+ -+static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) -+{ -+ struct omap_overlay_manager *manager; -+ struct manager_attribute *manager_attr; -+ -+ manager = container_of(kobj, struct omap_overlay_manager, kobj); -+ manager_attr = container_of(attr, struct manager_attribute, attr); -+ -+ if (!manager_attr->show) -+ return -ENOENT; -+ -+ return manager_attr->show(manager, buf); -+} -+ -+static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr, -+ const char *buf, size_t size) -+{ -+ struct omap_overlay_manager *manager; -+ struct manager_attribute *manager_attr; -+ -+ manager = container_of(kobj, struct omap_overlay_manager, kobj); -+ manager_attr = container_of(attr, struct manager_attribute, attr); -+ -+ if (!manager_attr->store) -+ return -ENOENT; -+ -+ return manager_attr->store(manager, buf, size); -+} -+ -+static struct sysfs_ops manager_sysfs_ops = { -+ .show = manager_attr_show, -+ .store = manager_attr_store, -+}; -+ -+static struct kobj_type manager_ktype = { -+ .sysfs_ops = &manager_sysfs_ops, -+ .default_attrs = manager_sysfs_attrs, -+}; -+ -+static int omap_dss_set_display(struct omap_overlay_manager *mgr, -+ struct omap_display *display) -+{ -+ int i; -+ int r; -+ -+ if (display->manager) { -+ DSSERR("display '%s' already has a manager '%s'\n", -+ display->name, display->manager->name); -+ return -EINVAL; -+ } -+ -+ if ((mgr->supported_displays & display->type) == 0) { -+ DSSERR("display '%s' does not support manager '%s'\n", -+ display->name, mgr->name); -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < mgr->num_overlays; i++) { -+ struct omap_overlay *ovl = mgr->overlays[i]; -+ -+ if (ovl->manager != mgr) -+ continue; -+ -+ r = dss_check_overlay(ovl, display); -+ if (r) -+ return r; -+ } -+ -+ display->manager = mgr; -+ mgr->display = display; -+ -+ return 0; -+} -+ -+static int omap_dss_unset_display(struct omap_overlay_manager *mgr) -+{ -+ if (!mgr->display) { -+ DSSERR("failed to unset display, display not set.\n"); -+ return -EINVAL; -+ } -+ -+ mgr->display->manager = NULL; -+ mgr->display = NULL; -+ -+ return 0; -+} -+ -+ -+static int overlay_enabled(struct omap_overlay *ovl) -+{ -+ return ovl->info.enabled && ovl->manager && ovl->manager->display; -+} -+ -+/* We apply settings to both managers here so that we can use optimizations -+ * like fifomerge. Shadow registers can be changed first and the non-shadowed -+ * should be changed last, at the same time with GO */ -+static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) -+{ -+ int i; -+ int ret = 0; -+ enum omap_dss_update_mode mode; -+ struct omap_display *display; -+ struct omap_overlay *ovl; -+ bool ilace; -+ int outw, outh; -+ int r; -+ int num_planes_enabled = 0; -+ -+ DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name); -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ /* Configure normal overlay parameters and disable unused overlays */ -+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) { -+ ovl = omap_dss_get_overlay(i); -+ -+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC)) -+ continue; -+ -+ if (!overlay_enabled(ovl)) { -+ dispc_enable_plane(ovl->id, 0); -+ continue; -+ } -+ -+ display = ovl->manager->display; -+ -+ if (dss_check_overlay(ovl, display)) { -+ dispc_enable_plane(ovl->id, 0); -+ continue; -+ } -+ -+ ++num_planes_enabled; -+ -+ /* On a manual update display, in manual update mode, update() -+ * handles configuring planes */ -+ mode = OMAP_DSS_UPDATE_AUTO; -+ if (display->get_update_mode) -+ mode = display->get_update_mode(mgr->display); -+ -+ if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE && -+ mode != OMAP_DSS_UPDATE_AUTO) -+ continue; -+ -+ ilace = display->type == OMAP_DISPLAY_TYPE_VENC; -+ -+ if (ovl->info.out_width == 0) -+ outw = ovl->info.width; -+ else -+ outw = ovl->info.out_width; -+ -+ if (ovl->info.out_height == 0) -+ outh = ovl->info.height; -+ else -+ outh = ovl->info.out_height; -+ -+ r = dispc_setup_plane(ovl->id, ovl->manager->id, -+ ovl->info.paddr, -+ ovl->info.screen_width, -+ ovl->info.pos_x, -+ ovl->info.pos_y, -+ ovl->info.width, -+ ovl->info.height, -+ outw, -+ outh, -+ ovl->info.color_mode, -+ ilace, -+ ovl->info.rotation_type, -+ ovl->info.rotation, -+ ovl->info.mirror, -+ ovl->info.global_alpha); -+ -+ if (r) { -+ DSSERR("dispc_setup_plane failed for ovl %d\n", -+ ovl->id); -+ dispc_enable_plane(ovl->id, 0); -+ continue; -+ } -+ -+ if (dss_use_replication(display, ovl->info.color_mode)) -+ dispc_enable_replication(ovl->id, true); -+ else -+ dispc_enable_replication(ovl->id, false); -+ -+ dispc_enable_plane(ovl->id, 1); -+ } -+ -+ dispc_set_overlay_optimization(); -+ -+ /* Enable fifo merge if possible */ -+ dispc_enable_fifomerge(num_planes_enabled == 1); -+ -+ /* Go through overlays again. This time we configure fifos. We have to -+ * do this after enabling/disabling fifomerge so that we have correct -+ * knowledge of fifo sizes */ -+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) { -+ ovl = omap_dss_get_overlay(i); -+ -+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC)) -+ continue; -+ -+ if (!overlay_enabled(ovl)) { -+ continue; -+ } -+ -+ ovl->manager->display->configure_overlay(ovl); -+ } -+ -+ /* Try to prevent FIFO undeflows. */ -+ omap_dss_update_min_bus_tput(); -+ -+ /* Issue GO for managers */ -+ list_for_each_entry(mgr, &manager_list, list) { -+ if (!(mgr->caps & OMAP_DSS_OVL_MGR_CAP_DISPC)) -+ continue; -+ -+ display = mgr->display; -+ -+ if (!display) -+ continue; -+ -+ /* We don't need GO with manual update display. LCD iface will -+ * always be turned off after frame, and new settings will -+ * be taken in to use at next update */ -+ if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) -+ continue; -+ -+ dispc_go(mgr->id); -+ } -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ return ret; -+} -+ -+static void omap_dss_mgr_set_def_color(struct omap_overlay_manager *mgr, -+ u32 color) -+{ -+ dispc_set_default_color(mgr->id, color); -+} -+ -+static void omap_dss_mgr_set_trans_key_type_and_value( -+ struct omap_overlay_manager *mgr, -+ enum omap_dss_color_key_type type, -+ u32 trans_key) -+{ -+ dispc_set_trans_key(mgr->id, type, trans_key); -+} -+static void omap_dss_mgr_get_trans_key_type_and_value( -+ struct omap_overlay_manager *mgr, -+ enum omap_dss_color_key_type *type, -+ u32 *trans_key) -+{ -+ dispc_get_trans_key(mgr->id, type, trans_key); -+} -+ -+static void omap_dss_mgr_enable_trans_key(struct omap_overlay_manager *mgr, -+ bool enable) -+{ -+ dispc_enable_trans_key(mgr->id, enable); -+} -+static void omap_dss_mgr_enable_alpha_blending(struct omap_overlay_manager *mgr, -+ bool enable) -+{ -+ dispc_enable_alpha_blending(mgr->id, enable); -+} -+static bool omap_dss_mgr_get_alpha_blending_status( -+ struct omap_overlay_manager *mgr) -+{ -+ return dispc_alpha_blending_enabled(mgr->id); -+} -+static u32 omap_dss_mgr_get_default_color(struct omap_overlay_manager *mgr) -+{ -+ return dispc_get_default_color(mgr->id); -+} -+static bool omap_dss_mgr_get_trans_key_status(struct omap_overlay_manager *mgr) -+{ -+ return dispc_trans_key_enabled(mgr->id); -+} -+ -+static void omap_dss_add_overlay_manager(struct omap_overlay_manager *manager) -+{ -+ ++num_managers; -+ list_add_tail(&manager->list, &manager_list); -+} -+ -+int dss_init_overlay_managers(struct platform_device *pdev) -+{ -+ int i, r; -+ -+ INIT_LIST_HEAD(&manager_list); -+ -+ num_managers = 0; -+ -+ for (i = 0; i < 2; ++i) { -+ struct omap_overlay_manager *mgr; -+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); -+ -+ BUG_ON(mgr == NULL); -+ -+ switch (i) { -+ case 0: -+ mgr->name = "lcd"; -+ mgr->id = OMAP_DSS_CHANNEL_LCD; -+ mgr->supported_displays = -+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | -+ OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI; -+ break; -+ case 1: -+ mgr->name = "tv"; -+ mgr->id = OMAP_DSS_CHANNEL_DIGIT; -+ mgr->supported_displays = OMAP_DISPLAY_TYPE_VENC; -+ break; -+ } -+ -+ mgr->set_display = &omap_dss_set_display; -+ mgr->unset_display = &omap_dss_unset_display; -+ mgr->apply = &omap_dss_mgr_apply; -+ mgr->set_default_color = &omap_dss_mgr_set_def_color; -+ mgr->set_trans_key_type_and_value = -+ &omap_dss_mgr_set_trans_key_type_and_value; -+ mgr->get_trans_key_type_and_value = -+ &omap_dss_mgr_get_trans_key_type_and_value; -+ mgr->enable_trans_key = &omap_dss_mgr_enable_trans_key; -+ mgr->get_trans_key_status = &omap_dss_mgr_get_trans_key_status; -+ mgr->enable_alpha_blending = -+ &omap_dss_mgr_enable_alpha_blending; -+ mgr->get_alpha_blending_status = -+ omap_dss_mgr_get_alpha_blending_status; -+ mgr->get_default_color = &omap_dss_mgr_get_default_color; -+ mgr->caps = OMAP_DSS_OVL_MGR_CAP_DISPC; -+ -+ dss_overlay_setup_dispc_manager(mgr); -+ -+ omap_dss_add_overlay_manager(mgr); -+ -+ r = kobject_init_and_add(&mgr->kobj, &manager_ktype, -+ &pdev->dev.kobj, "manager%d", i); -+ -+ if (r) { -+ DSSERR("failed to create sysfs file\n"); -+ continue; -+ } -+ } -+ -+ return 0; -+} -+ -+void dss_uninit_overlay_managers(struct platform_device *pdev) -+{ -+ struct omap_overlay_manager *mgr; -+ -+ while (!list_empty(&manager_list)) { -+ mgr = list_first_entry(&manager_list, -+ struct omap_overlay_manager, list); -+ list_del(&mgr->list); -+ kobject_del(&mgr->kobj); -+ kobject_put(&mgr->kobj); -+ kfree(mgr); -+ } -+ -+ num_managers = 0; -+} -+ -+int omap_dss_get_num_overlay_managers(void) -+{ -+ return num_managers; -+} -+EXPORT_SYMBOL(omap_dss_get_num_overlay_managers); -+ -+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num) -+{ -+ int i = 0; -+ struct omap_overlay_manager *mgr; -+ -+ list_for_each_entry(mgr, &manager_list, list) { -+ if (i++ == num) -+ return mgr; -+ } -+ -+ return NULL; -+} -+EXPORT_SYMBOL(omap_dss_get_overlay_manager); -+ -+#ifdef L4_EXAMPLE -+static int ovl_mgr_apply_l4(struct omap_overlay_manager *mgr) -+{ -+ DSSDBG("omap_dss_mgr_apply_l4(%s)\n", mgr->name); -+ -+ return 0; -+} -+#endif -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/overlay.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/overlay.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/overlay.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/overlay.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,655 @@ -+/* -+ * linux/drivers/video/omap2/dss/overlay.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "OVERLAY" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "dss.h" -+ -+static int num_overlays; -+static struct list_head overlay_list; -+ -+static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name); -+} -+ -+static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%s\n", -+ ovl->manager ? ovl->manager->name : ""); -+} -+ -+static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf, size_t size) -+{ -+ int i, r; -+ struct omap_overlay_manager *mgr = NULL; -+ int len = size; -+ -+ if (buf[size-1] == '\n') -+ --len; -+ -+ if (len > 0) { -+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { -+ mgr = omap_dss_get_overlay_manager(i); -+ -+ if (strncmp(buf, mgr->name, len) == 0) -+ break; -+ -+ mgr = NULL; -+ } -+ } -+ -+ if (len > 0 && mgr == NULL) -+ return -EINVAL; -+ -+ if (mgr) -+ DSSDBG("manager %s found\n", mgr->name); -+ -+ if (mgr != ovl->manager) { -+ /* detach old manager */ -+ if (ovl->manager) { -+ r = ovl->unset_manager(ovl); -+ if (r) { -+ DSSERR("detach failed\n"); -+ return r; -+ } -+ } -+ -+ if (mgr) { -+ r = ovl->set_manager(ovl, mgr); -+ if (r) { -+ DSSERR("Failed to attach overlay\n"); -+ return r; -+ } -+ } -+ } -+ -+ if (ovl->manager) { -+ omap_dss_lock(); -+ r = ovl->manager->apply(ovl->manager); -+ omap_dss_unlock(); -+ if (r) -+ return r; -+ } -+ -+ return size; -+} -+ -+static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d,%d\n", -+ ovl->info.width, ovl->info.height); -+} -+ -+static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.screen_width); -+} -+ -+static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d,%d\n", -+ ovl->info.pos_x, ovl->info.pos_y); -+} -+ -+static ssize_t overlay_position_store(struct omap_overlay *ovl, -+ const char *buf, size_t size) -+{ -+ int r; -+ char *last; -+ struct omap_overlay_info info; -+ -+ ovl->get_overlay_info(ovl, &info); -+ -+ info.pos_x = simple_strtoul(buf, &last, 10); -+ ++last; -+ if (last - buf >= size) -+ return -EINVAL; -+ -+ info.pos_y = simple_strtoul(last, &last, 10); -+ -+ if ((r = ovl->set_overlay_info(ovl, &info))) -+ return r; -+ -+ if (ovl->manager) { -+ omap_dss_lock(); -+ r = ovl->manager->apply(ovl->manager); -+ omap_dss_unlock(); -+ if (r) -+ return r; -+ } -+ -+ return size; -+} -+ -+static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d,%d\n", -+ ovl->info.out_width, ovl->info.out_height); -+} -+ -+static ssize_t overlay_output_size_store(struct omap_overlay *ovl, -+ const char *buf, size_t size) -+{ -+ int r; -+ char *last; -+ struct omap_overlay_info info; -+ -+ ovl->get_overlay_info(ovl, &info); -+ -+ info.out_width = simple_strtoul(buf, &last, 10); -+ ++last; -+ if (last - buf >= size) -+ return -EINVAL; -+ -+ info.out_height = simple_strtoul(last, &last, 10); -+ -+ if ((r = ovl->set_overlay_info(ovl, &info))) -+ return r; -+ -+ if (ovl->manager) { -+ omap_dss_lock(); -+ r = ovl->manager->apply(ovl->manager); -+ omap_dss_unlock(); -+ if (r) -+ return r; -+ } -+ -+ return size; -+} -+ -+static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled); -+} -+ -+static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf, size_t size) -+{ -+ int r; -+ struct omap_overlay_info info; -+ -+ ovl->get_overlay_info(ovl, &info); -+ -+ info.enabled = simple_strtoul(buf, NULL, 10); -+ -+ if ((r = ovl->set_overlay_info(ovl, &info))) -+ return r; -+ -+ if (ovl->manager) { -+ omap_dss_lock(); -+ r = ovl->manager->apply(ovl->manager); -+ omap_dss_unlock(); -+ -+ if (r) -+ return r; -+ } -+ -+ return size; -+} -+ -+static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "%d\n", -+ ovl->info.global_alpha); -+} -+ -+static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, -+ const char *buf, size_t size) -+{ -+ int r; -+ struct omap_overlay_info info; -+ -+ ovl->get_overlay_info(ovl, &info); -+ -+ /* Video1 plane does not support global alpha -+ * to always make it 255 completely opaque -+ */ -+ if (ovl->id == OMAP_DSS_VIDEO1) -+ info.global_alpha = 255; -+ else -+ info.global_alpha = simple_strtoul(buf, NULL, 10); -+ -+ if ((r = ovl->set_overlay_info(ovl, &info))) -+ return r; -+ -+ if (ovl->manager) { -+ omap_dss_lock(); -+ r = ovl->manager->apply(ovl->manager); -+ omap_dss_unlock(); -+ if (r) -+ return r; -+ } -+ -+ return size; -+} -+ -+struct overlay_attribute { -+ struct attribute attr; -+ ssize_t (*show)(struct omap_overlay *, char *); -+ ssize_t (*store)(struct omap_overlay *, const char *, size_t); -+}; -+ -+#define OVERLAY_ATTR(_name, _mode, _show, _store) \ -+ struct overlay_attribute overlay_attr_##_name = \ -+ __ATTR(_name, _mode, _show, _store) -+ -+static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL); -+static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR, -+ overlay_manager_show, overlay_manager_store); -+static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL); -+static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL); -+static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR, -+ overlay_position_show, overlay_position_store); -+static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR, -+ overlay_output_size_show, overlay_output_size_store); -+static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR, -+ overlay_enabled_show, overlay_enabled_store); -+static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR, -+ overlay_global_alpha_show, overlay_global_alpha_store); -+ -+static struct attribute *overlay_sysfs_attrs[] = { -+ &overlay_attr_name.attr, -+ &overlay_attr_manager.attr, -+ &overlay_attr_input_size.attr, -+ &overlay_attr_screen_width.attr, -+ &overlay_attr_position.attr, -+ &overlay_attr_output_size.attr, -+ &overlay_attr_enabled.attr, -+ &overlay_attr_global_alpha.attr, -+ NULL -+}; -+ -+static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) -+{ -+ struct omap_overlay *overlay; -+ struct overlay_attribute *overlay_attr; -+ -+ overlay = container_of(kobj, struct omap_overlay, kobj); -+ overlay_attr = container_of(attr, struct overlay_attribute, attr); -+ -+ if (!overlay_attr->show) -+ return -ENOENT; -+ -+ return overlay_attr->show(overlay, buf); -+} -+ -+static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr, -+ const char *buf, size_t size) -+{ -+ struct omap_overlay *overlay; -+ struct overlay_attribute *overlay_attr; -+ -+ overlay = container_of(kobj, struct omap_overlay, kobj); -+ overlay_attr = container_of(attr, struct overlay_attribute, attr); -+ -+ if (!overlay_attr->store) -+ return -ENOENT; -+ -+ return overlay_attr->store(overlay, buf, size); -+} -+ -+static struct sysfs_ops overlay_sysfs_ops = { -+ .show = overlay_attr_show, -+ .store = overlay_attr_store, -+}; -+ -+static struct kobj_type overlay_ktype = { -+ .sysfs_ops = &overlay_sysfs_ops, -+ .default_attrs = overlay_sysfs_attrs, -+}; -+ -+/* Check if overlay parameters are compatible with display */ -+int dss_check_overlay(struct omap_overlay *ovl, struct omap_display *display) -+{ -+ struct omap_overlay_info *info; -+ u16 outw, outh; -+ u16 dw, dh; -+ -+ if (!display) -+ return 0; -+ -+ if (!ovl->info.enabled) -+ return 0; -+ -+ info = &ovl->info; -+ -+ if (info->paddr == 0) { -+ DSSDBG("check_overlay failed: paddr 0\n"); -+ return -EINVAL; -+ } -+ -+ display->get_resolution(display, &dw, &dh); -+ -+ DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n", -+ ovl->id, -+ info->pos_x, info->pos_y, -+ info->width, info->height, -+ info->out_width, info->out_height, -+ dw, dh); -+ -+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { -+ outw = info->width; -+ outh = info->height; -+ } else { -+ if (info->out_width == 0) -+ outw = info->width; -+ else -+ outw = info->out_width; -+ -+ if (info->out_height == 0) -+ outh = info->height; -+ else -+ outh = info->out_height; -+ } -+ -+ if (dw < info->pos_x + outw) { -+ DSSDBG("check_overlay failed 1: %d < %d + %d\n", -+ dw, info->pos_x, outw); -+ return -EINVAL; -+ } -+ -+ if (dh < info->pos_y + outh) { -+ DSSDBG("check_overlay failed 2: %d < %d + %d\n", -+ dh, info->pos_y, outh); -+ return -EINVAL; -+ } -+ -+ if ((ovl->supported_modes & info->color_mode) == 0) { -+ DSSERR("overlay doesn't support mode %d\n", info->color_mode); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int dss_ovl_set_overlay_info(struct omap_overlay *ovl, -+ struct omap_overlay_info *info) -+{ -+ int r; -+ struct omap_overlay_info old_info; -+ -+ old_info = ovl->info; -+ ovl->info = *info; -+ -+ if (ovl->manager) { -+ r = dss_check_overlay(ovl, ovl->manager->display); -+ if (r) { -+ ovl->info = old_info; -+ return r; -+ } -+ } -+ -+ return 0; -+} -+ -+static void dss_ovl_get_overlay_info(struct omap_overlay *ovl, -+ struct omap_overlay_info *info) -+{ -+ *info = ovl->info; -+} -+ -+static int omap_dss_set_manager(struct omap_overlay *ovl, -+ struct omap_overlay_manager *mgr) -+{ -+ int r; -+ -+ if (ovl->manager) { -+ DSSERR("overlay '%s' already has a manager '%s'\n", -+ ovl->name, ovl->manager->name); -+ } -+ -+ r = dss_check_overlay(ovl, mgr->display); -+ if (r) -+ return r; -+ -+ ovl->manager = mgr; -+ -+ return 0; -+} -+ -+static int omap_dss_unset_manager(struct omap_overlay *ovl) -+{ -+ if (!ovl->manager) { -+ DSSERR("failed to detach overlay: manager not set\n"); -+ return -EINVAL; -+ } -+ -+ ovl->manager = NULL; -+ -+ return 0; -+} -+ -+int omap_dss_get_num_overlays(void) -+{ -+ return num_overlays; -+} -+EXPORT_SYMBOL(omap_dss_get_num_overlays); -+ -+struct omap_overlay *omap_dss_get_overlay(int num) -+{ -+ int i = 0; -+ struct omap_overlay *ovl; -+ -+ list_for_each_entry(ovl, &overlay_list, list) { -+ if (i++ == num) -+ return ovl; -+ } -+ -+ return NULL; -+} -+EXPORT_SYMBOL(omap_dss_get_overlay); -+ -+static void omap_dss_add_overlay(struct omap_overlay *overlay) -+{ -+ ++num_overlays; -+ list_add_tail(&overlay->list, &overlay_list); -+} -+ -+static struct omap_overlay *dispc_overlays[3]; -+ -+void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr) -+{ -+ mgr->num_overlays = 3; -+ mgr->overlays = dispc_overlays; -+} -+ -+void dss_init_overlays(struct platform_device *pdev) -+{ -+ int i, r; -+ struct omap_overlay_manager *lcd_mgr; -+ struct omap_overlay_manager *tv_mgr; -+ struct omap_dss_board_info *pdata = pdev->dev.platform_data; -+ -+ INIT_LIST_HEAD(&overlay_list); -+ -+ num_overlays = 0; -+ -+ for (i = 0; i < 3; ++i) { -+ struct omap_overlay *ovl; -+ ovl = kzalloc(sizeof(*ovl), GFP_KERNEL); -+ -+ BUG_ON(ovl == NULL); -+ -+ switch (i) { -+ case 0: -+ ovl->name = "gfx"; -+ ovl->id = OMAP_DSS_GFX; -+ ovl->supported_modes = cpu_is_omap34xx() ? -+ OMAP_DSS_COLOR_GFX_OMAP3 : -+ OMAP_DSS_COLOR_GFX_OMAP2; -+ ovl->caps = OMAP_DSS_OVL_CAP_DISPC; -+ ovl->info.global_alpha = 255; -+ ovl->info.fifo_threshold_low = -+ pdata->fifo_thresholds[OMAP_DSS_GFX].low; -+ ovl->info.fifo_threshold_high = -+ pdata->fifo_thresholds[OMAP_DSS_GFX].high; -+ break; -+ case 1: -+ ovl->name = "vid1"; -+ ovl->id = OMAP_DSS_VIDEO1; -+ ovl->supported_modes = cpu_is_omap34xx() ? -+ OMAP_DSS_COLOR_VID1_OMAP3 : -+ OMAP_DSS_COLOR_VID_OMAP2; -+ ovl->caps = OMAP_DSS_OVL_CAP_SCALE | -+ OMAP_DSS_OVL_CAP_DISPC; -+ ovl->info.global_alpha = 255; -+ ovl->info.fifo_threshold_low = -+ pdata->fifo_thresholds[OMAP_DSS_VIDEO1].low; -+ ovl->info.fifo_threshold_high = -+ pdata->fifo_thresholds[OMAP_DSS_VIDEO1].high; -+ break; -+ case 2: -+ ovl->name = "vid2"; -+ ovl->id = OMAP_DSS_VIDEO2; -+ ovl->supported_modes = cpu_is_omap34xx() ? -+ OMAP_DSS_COLOR_VID2_OMAP3 : -+ OMAP_DSS_COLOR_VID_OMAP2; -+ ovl->caps = OMAP_DSS_OVL_CAP_SCALE | -+ OMAP_DSS_OVL_CAP_DISPC; -+ ovl->info.global_alpha = 255; -+ ovl->info.fifo_threshold_low = -+ pdata->fifo_thresholds[OMAP_DSS_VIDEO2].low; -+ ovl->info.fifo_threshold_high = -+ pdata->fifo_thresholds[OMAP_DSS_VIDEO2].high; -+ break; -+ } -+ -+ ovl->set_manager = &omap_dss_set_manager; -+ ovl->unset_manager = &omap_dss_unset_manager; -+ ovl->set_overlay_info = &dss_ovl_set_overlay_info; -+ ovl->get_overlay_info = &dss_ovl_get_overlay_info; -+ -+ omap_dss_add_overlay(ovl); -+ -+ r = kobject_init_and_add(&ovl->kobj, &overlay_ktype, -+ &pdev->dev.kobj, "overlay%d", i); -+ -+ if (r) { -+ DSSERR("failed to create sysfs file\n"); -+ continue; -+ } -+ -+ dispc_overlays[i] = ovl; -+ } -+ -+ lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD); -+ tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV); -+ -+#ifdef L4_EXAMPLE -+ /* setup L4 overlay as an example */ -+ { -+ static struct omap_overlay ovl = { -+ .name = "l4-ovl", -+ .supported_modes = OMAP_DSS_COLOR_RGB24U, -+ .set_manager = &omap_dss_set_manager, -+ .unset_manager = &omap_dss_unset_manager, -+ .setup_input = &omap_dss_setup_overlay_input, -+ .setup_output = &omap_dss_setup_overlay_output, -+ .enable = &omap_dss_enable_overlay, -+ }; -+ -+ static struct omap_overlay_manager mgr = { -+ .name = "l4", -+ .num_overlays = 1, -+ .overlays = &ovl, -+ .set_display = &omap_dss_set_display, -+ .unset_display = &omap_dss_unset_display, -+ .apply = &ovl_mgr_apply_l4, -+ .supported_displays = -+ OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI, -+ }; -+ -+ omap_dss_add_overlay(&ovl); -+ omap_dss_add_overlay_manager(&mgr); -+ omap_dss_set_manager(&ovl, &mgr); -+ } -+#endif -+} -+ -+/* connect overlays to the new device, if not already connected. if force -+ * selected, connect always. */ -+void dss_recheck_connections(struct omap_display *display, bool force) -+{ -+ int i; -+ struct omap_overlay_manager *lcd_mgr; -+ struct omap_overlay_manager *tv_mgr; -+ struct omap_overlay_manager *mgr = NULL; -+ -+ lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD); -+ tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV); -+ -+ if (display->type != OMAP_DISPLAY_TYPE_VENC) { -+ if (!lcd_mgr->display || force) { -+ if (lcd_mgr->display) -+ lcd_mgr->unset_display(lcd_mgr); -+ lcd_mgr->set_display(lcd_mgr, display); -+ mgr = lcd_mgr; -+ } -+ } -+ -+ if (display->type == OMAP_DISPLAY_TYPE_VENC) { -+ if (!tv_mgr->display || force) { -+ if (tv_mgr->display) -+ tv_mgr->unset_display(tv_mgr); -+ tv_mgr->set_display(tv_mgr, display); -+ mgr = tv_mgr; -+ } -+ } -+ -+ if (mgr) { -+ for (i = 0; i < 3; i++) { -+ struct omap_overlay *ovl; -+ ovl = omap_dss_get_overlay(i); -+ if (!ovl->manager || force) { -+ if (ovl->manager) -+ omap_dss_unset_manager(ovl); -+ omap_dss_set_manager(ovl, mgr); -+ } -+ } -+ } -+} -+ -+void dss_uninit_overlays(struct platform_device *pdev) -+{ -+ struct omap_overlay *ovl; -+ -+ while (!list_empty(&overlay_list)) { -+ ovl = list_first_entry(&overlay_list, -+ struct omap_overlay, list); -+ list_del(&ovl->list); -+ kobject_del(&ovl->kobj); -+ kobject_put(&ovl->kobj); -+ kfree(ovl); -+ } -+ -+ num_overlays = 0; -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/rfbi.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/rfbi.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/rfbi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/rfbi.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,1306 @@ -+/* -+ * linux/drivers/video/omap2/dss/rfbi.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "RFBI" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include "dss.h" -+ -+/*#define MEASURE_PERF*/ -+ -+#define RFBI_BASE 0x48050800 -+ -+struct rfbi_reg { u16 idx; }; -+ -+#define RFBI_REG(idx) ((const struct rfbi_reg) { idx }) -+ -+#define RFBI_REVISION RFBI_REG(0x0000) -+#define RFBI_SYSCONFIG RFBI_REG(0x0010) -+#define RFBI_SYSSTATUS RFBI_REG(0x0014) -+#define RFBI_CONTROL RFBI_REG(0x0040) -+#define RFBI_PIXEL_CNT RFBI_REG(0x0044) -+#define RFBI_LINE_NUMBER RFBI_REG(0x0048) -+#define RFBI_CMD RFBI_REG(0x004c) -+#define RFBI_PARAM RFBI_REG(0x0050) -+#define RFBI_DATA RFBI_REG(0x0054) -+#define RFBI_READ RFBI_REG(0x0058) -+#define RFBI_STATUS RFBI_REG(0x005c) -+ -+#define RFBI_CONFIG(n) RFBI_REG(0x0060 + (n)*0x18) -+#define RFBI_ONOFF_TIME(n) RFBI_REG(0x0064 + (n)*0x18) -+#define RFBI_CYCLE_TIME(n) RFBI_REG(0x0068 + (n)*0x18) -+#define RFBI_DATA_CYCLE1(n) RFBI_REG(0x006c + (n)*0x18) -+#define RFBI_DATA_CYCLE2(n) RFBI_REG(0x0070 + (n)*0x18) -+#define RFBI_DATA_CYCLE3(n) RFBI_REG(0x0074 + (n)*0x18) -+ -+#define RFBI_VSYNC_WIDTH RFBI_REG(0x0090) -+#define RFBI_HSYNC_WIDTH RFBI_REG(0x0094) -+ -+#define RFBI_CMD_FIFO_LEN_BYTES (16 * sizeof(struct update_param)) -+ -+#define REG_FLD_MOD(idx, val, start, end) \ -+ rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end)) -+ -+/* To work around an RFBI transfer rate limitation */ -+#define OMAP_RFBI_RATE_LIMIT 1 -+ -+enum omap_rfbi_cycleformat { -+ OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0, -+ OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1, -+ OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2, -+ OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3, -+}; -+ -+enum omap_rfbi_datatype { -+ OMAP_DSS_RFBI_DATATYPE_12 = 0, -+ OMAP_DSS_RFBI_DATATYPE_16 = 1, -+ OMAP_DSS_RFBI_DATATYPE_18 = 2, -+ OMAP_DSS_RFBI_DATATYPE_24 = 3, -+}; -+ -+enum omap_rfbi_parallelmode { -+ OMAP_DSS_RFBI_PARALLELMODE_8 = 0, -+ OMAP_DSS_RFBI_PARALLELMODE_9 = 1, -+ OMAP_DSS_RFBI_PARALLELMODE_12 = 2, -+ OMAP_DSS_RFBI_PARALLELMODE_16 = 3, -+}; -+ -+enum update_cmd { -+ RFBI_CMD_UPDATE = 0, -+ RFBI_CMD_SYNC = 1, -+}; -+ -+static int rfbi_convert_timings(struct rfbi_timings *t); -+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div); -+static void process_cmd_fifo(void); -+ -+static struct { -+ void __iomem *base; -+ -+ unsigned long l4_khz; -+ -+ enum omap_rfbi_datatype datatype; -+ enum omap_rfbi_parallelmode parallelmode; -+ -+ enum omap_rfbi_te_mode te_mode; -+ int te_enabled; -+ -+ void (*framedone_callback)(void *data); -+ void *framedone_callback_data; -+ -+ struct omap_display *display[2]; -+ -+ struct kfifo *cmd_fifo; -+ spinlock_t cmd_lock; -+ struct completion cmd_done; -+ atomic_t cmd_fifo_full; -+ atomic_t cmd_pending; -+#ifdef MEASURE_PERF -+ unsigned perf_bytes; -+ ktime_t perf_setup_time; -+ ktime_t perf_start_time; -+#endif -+} rfbi; -+ -+struct update_region { -+ u16 x; -+ u16 y; -+ u16 w; -+ u16 h; -+}; -+ -+struct update_param { -+ u8 rfbi_module; -+ u8 cmd; -+ -+ union { -+ struct update_region r; -+ struct completion *sync; -+ } par; -+}; -+ -+static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val) -+{ -+ __raw_writel(val, rfbi.base + idx.idx); -+} -+ -+static inline u32 rfbi_read_reg(const struct rfbi_reg idx) -+{ -+ return __raw_readl(rfbi.base + idx.idx); -+} -+ -+static void rfbi_enable_clocks(bool enable) -+{ -+ if (enable) -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ else -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+} -+ -+void omap_rfbi_write_command(const void *buf, u32 len) -+{ -+ rfbi_enable_clocks(1); -+ switch (rfbi.parallelmode) { -+ case OMAP_DSS_RFBI_PARALLELMODE_8: -+ { -+ const u8 *b = buf; -+ for (; len; len--) -+ rfbi_write_reg(RFBI_CMD, *b++); -+ break; -+ } -+ -+ case OMAP_DSS_RFBI_PARALLELMODE_16: -+ { -+ const u16 *w = buf; -+ BUG_ON(len & 1); -+ for (; len; len -= 2) -+ rfbi_write_reg(RFBI_CMD, *w++); -+ break; -+ } -+ -+ case OMAP_DSS_RFBI_PARALLELMODE_9: -+ case OMAP_DSS_RFBI_PARALLELMODE_12: -+ default: -+ BUG(); -+ } -+ rfbi_enable_clocks(0); -+} -+EXPORT_SYMBOL(omap_rfbi_write_command); -+ -+void omap_rfbi_read_data(void *buf, u32 len) -+{ -+ rfbi_enable_clocks(1); -+ switch (rfbi.parallelmode) { -+ case OMAP_DSS_RFBI_PARALLELMODE_8: -+ { -+ u8 *b = buf; -+ for (; len; len--) { -+ rfbi_write_reg(RFBI_READ, 0); -+ *b++ = rfbi_read_reg(RFBI_READ); -+ } -+ break; -+ } -+ -+ case OMAP_DSS_RFBI_PARALLELMODE_16: -+ { -+ u16 *w = buf; -+ BUG_ON(len & ~1); -+ for (; len; len -= 2) { -+ rfbi_write_reg(RFBI_READ, 0); -+ *w++ = rfbi_read_reg(RFBI_READ); -+ } -+ break; -+ } -+ -+ case OMAP_DSS_RFBI_PARALLELMODE_9: -+ case OMAP_DSS_RFBI_PARALLELMODE_12: -+ default: -+ BUG(); -+ } -+ rfbi_enable_clocks(0); -+} -+EXPORT_SYMBOL(omap_rfbi_read_data); -+ -+void omap_rfbi_write_data(const void *buf, u32 len) -+{ -+ rfbi_enable_clocks(1); -+ switch (rfbi.parallelmode) { -+ case OMAP_DSS_RFBI_PARALLELMODE_8: -+ { -+ const u8 *b = buf; -+ for (; len; len--) -+ rfbi_write_reg(RFBI_PARAM, *b++); -+ break; -+ } -+ -+ case OMAP_DSS_RFBI_PARALLELMODE_16: -+ { -+ const u16 *w = buf; -+ BUG_ON(len & 1); -+ for (; len; len -= 2) -+ rfbi_write_reg(RFBI_PARAM, *w++); -+ break; -+ } -+ -+ case OMAP_DSS_RFBI_PARALLELMODE_9: -+ case OMAP_DSS_RFBI_PARALLELMODE_12: -+ default: -+ BUG(); -+ -+ } -+ rfbi_enable_clocks(0); -+} -+EXPORT_SYMBOL(omap_rfbi_write_data); -+ -+void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, -+ u16 x, u16 y, -+ u16 w, u16 h) -+{ -+ int start_offset = scr_width * y + x; -+ int horiz_offset = scr_width - w; -+ int i; -+ -+ rfbi_enable_clocks(1); -+ -+ if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && -+ rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { -+ const u16 __iomem *pd = buf; -+ pd += start_offset; -+ -+ for (; h; --h) { -+ for (i = 0; i < w; ++i) { -+ const u8 __iomem *b = (const u8 __iomem *)pd; -+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1)); -+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0)); -+ ++pd; -+ } -+ pd += horiz_offset; -+ } -+ } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 && -+ rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { -+ const u32 __iomem *pd = buf; -+ pd += start_offset; -+ -+ for (; h; --h) { -+ for (i = 0; i < w; ++i) { -+ const u8 __iomem *b = (const u8 __iomem *)pd; -+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2)); -+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1)); -+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0)); -+ ++pd; -+ } -+ pd += horiz_offset; -+ } -+ } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && -+ rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) { -+ const u16 __iomem *pd = buf; -+ pd += start_offset; -+ -+ for (; h; --h) { -+ for (i = 0; i < w; ++i) { -+ rfbi_write_reg(RFBI_PARAM, __raw_readw(pd)); -+ ++pd; -+ } -+ pd += horiz_offset; -+ } -+ } else { -+ BUG(); -+ } -+ -+ rfbi_enable_clocks(0); -+} -+EXPORT_SYMBOL(omap_rfbi_write_pixels); -+ -+#ifdef MEASURE_PERF -+static void perf_mark_setup(void) -+{ -+ rfbi.perf_setup_time = ktime_get(); -+} -+ -+static void perf_mark_start(void) -+{ -+ rfbi.perf_start_time = ktime_get(); -+} -+ -+static void perf_show(const char *name) -+{ -+ ktime_t t, setup_time, trans_time; -+ u32 total_bytes; -+ u32 setup_us, trans_us, total_us; -+ -+ t = ktime_get(); -+ -+ setup_time = ktime_sub(rfbi.perf_start_time, rfbi.perf_setup_time); -+ setup_us = (u32)ktime_to_us(setup_time); -+ if (setup_us == 0) -+ setup_us = 1; -+ -+ trans_time = ktime_sub(t, rfbi.perf_start_time); -+ trans_us = (u32)ktime_to_us(trans_time); -+ if (trans_us == 0) -+ trans_us = 1; -+ -+ total_us = setup_us + trans_us; -+ -+ total_bytes = rfbi.perf_bytes; -+ -+ DSSINFO("%s update %u us + %u us = %u us (%uHz), %u bytes, " -+ "%u kbytes/sec\n", -+ name, -+ setup_us, -+ trans_us, -+ total_us, -+ 1000*1000 / total_us, -+ total_bytes, -+ total_bytes * 1000 / total_us); -+} -+#else -+#define perf_mark_setup() -+#define perf_mark_start() -+#define perf_show(x) -+#endif -+ -+void rfbi_transfer_area(u16 width, u16 height, -+ void (callback)(void *data), void *data) -+{ -+ u32 l; -+ -+ /*BUG_ON(callback == 0);*/ -+ BUG_ON(rfbi.framedone_callback != NULL); -+ -+ DSSDBG("rfbi_transfer_area %dx%d\n", width, height); -+ -+ dispc_set_lcd_size(width, height); -+ -+ dispc_enable_lcd_out(1); -+ -+ rfbi.framedone_callback = callback; -+ rfbi.framedone_callback_data = data; -+ -+ rfbi_enable_clocks(1); -+ -+ rfbi_write_reg(RFBI_PIXEL_CNT, width * height); -+ -+ l = rfbi_read_reg(RFBI_CONTROL); -+ l = FLD_MOD(l, 1, 0, 0); /* enable */ -+ if (!rfbi.te_enabled) -+ l = FLD_MOD(l, 1, 4, 4); /* ITE */ -+ -+ perf_mark_start(); -+ -+ rfbi_write_reg(RFBI_CONTROL, l); -+} -+ -+static void framedone_callback(void *data, u32 mask) -+{ -+ void (*callback)(void *data); -+ -+ DSSDBG("FRAMEDONE\n"); -+ -+ perf_show("DISPC"); -+ -+ REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0); -+ -+ rfbi_enable_clocks(0); -+ -+ callback = rfbi.framedone_callback; -+ rfbi.framedone_callback = NULL; -+ -+ /*callback(rfbi.framedone_callback_data);*/ -+ -+ atomic_set(&rfbi.cmd_pending, 0); -+ -+ process_cmd_fifo(); -+} -+ -+#if 1 /* VERBOSE */ -+static void rfbi_print_timings(void) -+{ -+ u32 l; -+ u32 time; -+ -+ l = rfbi_read_reg(RFBI_CONFIG(0)); -+ time = 1000000000 / rfbi.l4_khz; -+ if (l & (1 << 4)) -+ time *= 2; -+ -+ DSSDBG("Tick time %u ps\n", time); -+ l = rfbi_read_reg(RFBI_ONOFF_TIME(0)); -+ DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, " -+ "REONTIME %d, REOFFTIME %d\n", -+ l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f, -+ (l >> 20) & 0x0f, (l >> 24) & 0x3f); -+ -+ l = rfbi_read_reg(RFBI_CYCLE_TIME(0)); -+ DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, " -+ "ACCESSTIME %d\n", -+ (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f, -+ (l >> 22) & 0x3f); -+} -+#else -+static void rfbi_print_timings(void) {} -+#endif -+ -+ -+ -+ -+static u32 extif_clk_period; -+ -+static inline unsigned long round_to_extif_ticks(unsigned long ps, int div) -+{ -+ int bus_tick = extif_clk_period * div; -+ return (ps + bus_tick - 1) / bus_tick * bus_tick; -+} -+ -+static int calc_reg_timing(struct rfbi_timings *t, int div) -+{ -+ t->clk_div = div; -+ -+ t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div); -+ -+ t->we_on_time = round_to_extif_ticks(t->we_on_time, div); -+ t->we_off_time = round_to_extif_ticks(t->we_off_time, div); -+ t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div); -+ -+ t->re_on_time = round_to_extif_ticks(t->re_on_time, div); -+ t->re_off_time = round_to_extif_ticks(t->re_off_time, div); -+ t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div); -+ -+ t->access_time = round_to_extif_ticks(t->access_time, div); -+ t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div); -+ t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div); -+ -+ DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n", -+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); -+ DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n", -+ t->we_on_time, t->we_off_time, t->re_cycle_time, -+ t->we_cycle_time); -+ DSSDBG("[reg]rdaccess %d cspulse %d\n", -+ t->access_time, t->cs_pulse_width); -+ -+ return rfbi_convert_timings(t); -+} -+ -+static int calc_extif_timings(struct rfbi_timings *t) -+{ -+ u32 max_clk_div; -+ int div; -+ -+ rfbi_get_clk_info(&extif_clk_period, &max_clk_div); -+ for (div = 1; div <= max_clk_div; div++) { -+ if (calc_reg_timing(t, div) == 0) -+ break; -+ } -+ -+ if (div <= max_clk_div) -+ return 0; -+ -+ DSSERR("can't setup timings\n"); -+ return -1; -+} -+ -+ -+void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) -+{ -+ int r; -+ -+ if (!t->converted) { -+ r = calc_extif_timings(t); -+ if (r < 0) -+ DSSERR("Failed to calc timings\n"); -+ } -+ -+ BUG_ON(!t->converted); -+ -+ rfbi_enable_clocks(1); -+ rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]); -+ rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]); -+ -+ /* TIMEGRANULARITY */ -+ REG_FLD_MOD(RFBI_CONFIG(rfbi_module), -+ (t->tim[2] ? 1 : 0), 4, 4); -+ -+ rfbi_print_timings(); -+ rfbi_enable_clocks(0); -+} -+ -+static int ps_to_rfbi_ticks(int time, int div) -+{ -+ unsigned long tick_ps; -+ int ret; -+ -+ /* Calculate in picosecs to yield more exact results */ -+ tick_ps = 1000000000 / (rfbi.l4_khz) * div; -+ -+ ret = (time + tick_ps - 1) / tick_ps; -+ -+ return ret; -+} -+ -+#ifdef OMAP_RFBI_RATE_LIMIT -+unsigned long rfbi_get_max_tx_rate(void) -+{ -+ unsigned long l4_rate, dss1_rate; -+ int min_l4_ticks = 0; -+ int i; -+ -+ /* According to TI this can't be calculated so make the -+ * adjustments for a couple of known frequencies and warn for -+ * others. -+ */ -+ static const struct { -+ unsigned long l4_clk; /* HZ */ -+ unsigned long dss1_clk; /* HZ */ -+ unsigned long min_l4_ticks; -+ } ftab[] = { -+ { 55, 132, 7, }, /* 7.86 MPix/s */ -+ { 110, 110, 12, }, /* 9.16 MPix/s */ -+ { 110, 132, 10, }, /* 11 Mpix/s */ -+ { 120, 120, 10, }, /* 12 Mpix/s */ -+ { 133, 133, 10, }, /* 13.3 Mpix/s */ -+ }; -+ -+ l4_rate = rfbi.l4_khz / 1000; -+ dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000; -+ -+ for (i = 0; i < ARRAY_SIZE(ftab); i++) { -+ /* Use a window instead of an exact match, to account -+ * for different DPLL multiplier / divider pairs. -+ */ -+ if (abs(ftab[i].l4_clk - l4_rate) < 3 && -+ abs(ftab[i].dss1_clk - dss1_rate) < 3) { -+ min_l4_ticks = ftab[i].min_l4_ticks; -+ break; -+ } -+ } -+ if (i == ARRAY_SIZE(ftab)) { -+ /* Can't be sure, return anyway the maximum not -+ * rate-limited. This might cause a problem only for the -+ * tearing synchronisation. -+ */ -+ DSSERR("can't determine maximum RFBI transfer rate\n"); -+ return rfbi.l4_khz * 1000; -+ } -+ return rfbi.l4_khz * 1000 / min_l4_ticks; -+} -+#else -+int rfbi_get_max_tx_rate(void) -+{ -+ return rfbi.l4_khz * 1000; -+} -+#endif -+ -+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div) -+{ -+ *clk_period = 1000000000 / rfbi.l4_khz; -+ *max_clk_div = 2; -+} -+ -+static int rfbi_convert_timings(struct rfbi_timings *t) -+{ -+ u32 l; -+ int reon, reoff, weon, weoff, cson, csoff, cs_pulse; -+ int actim, recyc, wecyc; -+ int div = t->clk_div; -+ -+ if (div <= 0 || div > 2) -+ return -1; -+ -+ /* Make sure that after conversion it still holds that: -+ * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff, -+ * csoff > cson, csoff >= max(weoff, reoff), actim > reon -+ */ -+ weon = ps_to_rfbi_ticks(t->we_on_time, div); -+ weoff = ps_to_rfbi_ticks(t->we_off_time, div); -+ if (weoff <= weon) -+ weoff = weon + 1; -+ if (weon > 0x0f) -+ return -1; -+ if (weoff > 0x3f) -+ return -1; -+ -+ reon = ps_to_rfbi_ticks(t->re_on_time, div); -+ reoff = ps_to_rfbi_ticks(t->re_off_time, div); -+ if (reoff <= reon) -+ reoff = reon + 1; -+ if (reon > 0x0f) -+ return -1; -+ if (reoff > 0x3f) -+ return -1; -+ -+ cson = ps_to_rfbi_ticks(t->cs_on_time, div); -+ csoff = ps_to_rfbi_ticks(t->cs_off_time, div); -+ if (csoff <= cson) -+ csoff = cson + 1; -+ if (csoff < max(weoff, reoff)) -+ csoff = max(weoff, reoff); -+ if (cson > 0x0f) -+ return -1; -+ if (csoff > 0x3f) -+ return -1; -+ -+ l = cson; -+ l |= csoff << 4; -+ l |= weon << 10; -+ l |= weoff << 14; -+ l |= reon << 20; -+ l |= reoff << 24; -+ -+ t->tim[0] = l; -+ -+ actim = ps_to_rfbi_ticks(t->access_time, div); -+ if (actim <= reon) -+ actim = reon + 1; -+ if (actim > 0x3f) -+ return -1; -+ -+ wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div); -+ if (wecyc < weoff) -+ wecyc = weoff; -+ if (wecyc > 0x3f) -+ return -1; -+ -+ recyc = ps_to_rfbi_ticks(t->re_cycle_time, div); -+ if (recyc < reoff) -+ recyc = reoff; -+ if (recyc > 0x3f) -+ return -1; -+ -+ cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div); -+ if (cs_pulse > 0x3f) -+ return -1; -+ -+ l = wecyc; -+ l |= recyc << 6; -+ l |= cs_pulse << 12; -+ l |= actim << 22; -+ -+ t->tim[1] = l; -+ -+ t->tim[2] = div - 1; -+ -+ t->converted = 1; -+ -+ return 0; -+} -+ -+/* xxx FIX module selection missing */ -+int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode, -+ unsigned hs_pulse_time, unsigned vs_pulse_time, -+ int hs_pol_inv, int vs_pol_inv, int extif_div) -+{ -+ int hs, vs; -+ int min; -+ u32 l; -+ -+ hs = ps_to_rfbi_ticks(hs_pulse_time, 1); -+ vs = ps_to_rfbi_ticks(vs_pulse_time, 1); -+ if (hs < 2) -+ return -EDOM; -+ if (mode == OMAP_DSS_RFBI_TE_MODE_2) -+ min = 2; -+ else /* OMAP_DSS_RFBI_TE_MODE_1 */ -+ min = 4; -+ if (vs < min) -+ return -EDOM; -+ if (vs == hs) -+ return -EINVAL; -+ rfbi.te_mode = mode; -+ DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n", -+ mode, hs, vs, hs_pol_inv, vs_pol_inv); -+ -+ rfbi_enable_clocks(1); -+ rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); -+ rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); -+ -+ l = rfbi_read_reg(RFBI_CONFIG(0)); -+ if (hs_pol_inv) -+ l &= ~(1 << 21); -+ else -+ l |= 1 << 21; -+ if (vs_pol_inv) -+ l &= ~(1 << 20); -+ else -+ l |= 1 << 20; -+ rfbi_enable_clocks(0); -+ -+ return 0; -+} -+EXPORT_SYMBOL(omap_rfbi_setup_te); -+ -+/* xxx FIX module selection missing */ -+int omap_rfbi_enable_te(bool enable, unsigned line) -+{ -+ u32 l; -+ -+ DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode); -+ if (line > (1 << 11) - 1) -+ return -EINVAL; -+ -+ rfbi_enable_clocks(1); -+ l = rfbi_read_reg(RFBI_CONFIG(0)); -+ l &= ~(0x3 << 2); -+ if (enable) { -+ rfbi.te_enabled = 1; -+ l |= rfbi.te_mode << 2; -+ } else -+ rfbi.te_enabled = 0; -+ rfbi_write_reg(RFBI_CONFIG(0), l); -+ rfbi_write_reg(RFBI_LINE_NUMBER, line); -+ rfbi_enable_clocks(0); -+ -+ return 0; -+} -+EXPORT_SYMBOL(omap_rfbi_enable_te); -+ -+#if 0 -+static void rfbi_enable_config(int enable1, int enable2) -+{ -+ u32 l; -+ int cs = 0; -+ -+ if (enable1) -+ cs |= 1<<0; -+ if (enable2) -+ cs |= 1<<1; -+ -+ rfbi_enable_clocks(1); -+ -+ l = rfbi_read_reg(RFBI_CONTROL); -+ -+ l = FLD_MOD(l, cs, 3, 2); -+ l = FLD_MOD(l, 0, 1, 1); -+ -+ rfbi_write_reg(RFBI_CONTROL, l); -+ -+ -+ l = rfbi_read_reg(RFBI_CONFIG(0)); -+ l = FLD_MOD(l, 0, 3, 2); /* TRIGGERMODE: ITE */ -+ /*l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */ -+ /*l |= FLD_VAL(0, 8, 7); */ /* L4FORMAT, 1pix/L4 */ -+ -+ l = FLD_MOD(l, 0, 16, 16); /* A0POLARITY */ -+ l = FLD_MOD(l, 1, 20, 20); /* TE_VSYNC_POLARITY */ -+ l = FLD_MOD(l, 1, 21, 21); /* HSYNCPOLARITY */ -+ -+ l = FLD_MOD(l, OMAP_DSS_RFBI_PARALLELMODE_8, 1, 0); -+ rfbi_write_reg(RFBI_CONFIG(0), l); -+ -+ rfbi_enable_clocks(0); -+} -+#endif -+ -+int rfbi_configure(int rfbi_module, int bpp, int lines) -+{ -+ u32 l; -+ int cycle1 = 0, cycle2 = 0, cycle3 = 0; -+ enum omap_rfbi_cycleformat cycleformat; -+ enum omap_rfbi_datatype datatype; -+ enum omap_rfbi_parallelmode parallelmode; -+ -+ switch (bpp) { -+ case 12: -+ datatype = OMAP_DSS_RFBI_DATATYPE_12; -+ break; -+ case 16: -+ datatype = OMAP_DSS_RFBI_DATATYPE_16; -+ break; -+ case 18: -+ datatype = OMAP_DSS_RFBI_DATATYPE_18; -+ break; -+ case 24: -+ datatype = OMAP_DSS_RFBI_DATATYPE_24; -+ break; -+ default: -+ BUG(); -+ return 1; -+ } -+ rfbi.datatype = datatype; -+ -+ switch (lines) { -+ case 8: -+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8; -+ break; -+ case 9: -+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9; -+ break; -+ case 12: -+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12; -+ break; -+ case 16: -+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16; -+ break; -+ default: -+ BUG(); -+ return 1; -+ } -+ rfbi.parallelmode = parallelmode; -+ -+ if ((bpp % lines) == 0) { -+ switch (bpp / lines) { -+ case 1: -+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1; -+ break; -+ case 2: -+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1; -+ break; -+ case 3: -+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1; -+ break; -+ default: -+ BUG(); -+ return 1; -+ } -+ } else if ((2 * bpp % lines) == 0) { -+ if ((2 * bpp / lines) == 3) -+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2; -+ else { -+ BUG(); -+ return 1; -+ } -+ } else { -+ BUG(); -+ return 1; -+ } -+ -+ switch (cycleformat) { -+ case OMAP_DSS_RFBI_CYCLEFORMAT_1_1: -+ cycle1 = lines; -+ break; -+ -+ case OMAP_DSS_RFBI_CYCLEFORMAT_2_1: -+ cycle1 = lines; -+ cycle2 = lines; -+ break; -+ -+ case OMAP_DSS_RFBI_CYCLEFORMAT_3_1: -+ cycle1 = lines; -+ cycle2 = lines; -+ cycle3 = lines; -+ break; -+ -+ case OMAP_DSS_RFBI_CYCLEFORMAT_3_2: -+ cycle1 = lines; -+ cycle2 = (lines / 2) | ((lines / 2) << 16); -+ cycle3 = (lines << 16); -+ break; -+ } -+ -+ rfbi_enable_clocks(1); -+ -+ REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */ -+ -+ l = 0; -+ l |= FLD_VAL(parallelmode, 1, 0); -+ l |= FLD_VAL(0, 3, 2); /* TRIGGERMODE: ITE */ -+ l |= FLD_VAL(0, 4, 4); /* TIMEGRANULARITY */ -+ l |= FLD_VAL(datatype, 6, 5); -+ /* l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */ -+ l |= FLD_VAL(0, 8, 7); /* L4FORMAT, 1pix/L4 */ -+ l |= FLD_VAL(cycleformat, 10, 9); -+ l |= FLD_VAL(0, 12, 11); /* UNUSEDBITS */ -+ l |= FLD_VAL(0, 16, 16); /* A0POLARITY */ -+ l |= FLD_VAL(0, 17, 17); /* REPOLARITY */ -+ l |= FLD_VAL(0, 18, 18); /* WEPOLARITY */ -+ l |= FLD_VAL(0, 19, 19); /* CSPOLARITY */ -+ l |= FLD_VAL(1, 20, 20); /* TE_VSYNC_POLARITY */ -+ l |= FLD_VAL(1, 21, 21); /* HSYNCPOLARITY */ -+ rfbi_write_reg(RFBI_CONFIG(rfbi_module), l); -+ -+ rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1); -+ rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2); -+ rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3); -+ -+ -+ l = rfbi_read_reg(RFBI_CONTROL); -+ l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */ -+ l = FLD_MOD(l, 0, 1, 1); /* clear bypass */ -+ rfbi_write_reg(RFBI_CONTROL, l); -+ -+ -+ DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n", -+ bpp, lines, cycle1, cycle2, cycle3); -+ -+ rfbi_enable_clocks(0); -+ -+ return 0; -+} -+EXPORT_SYMBOL(rfbi_configure); -+ -+static int rfbi_find_display(struct omap_display *disp) -+{ -+ if (disp == rfbi.display[0]) -+ return 0; -+ -+ if (disp == rfbi.display[1]) -+ return 1; -+ -+ BUG(); -+ return -1; -+} -+ -+ -+static void signal_fifo_waiters(void) -+{ -+ if (atomic_read(&rfbi.cmd_fifo_full) > 0) { -+ /* DSSDBG("SIGNALING: Fifo not full for waiter!\n"); */ -+ complete(&rfbi.cmd_done); -+ atomic_dec(&rfbi.cmd_fifo_full); -+ } -+} -+ -+/* returns 1 for async op, and 0 for sync op */ -+static int do_update(struct omap_display *display, struct update_region *upd) -+{ -+ u16 x = upd->x; -+ u16 y = upd->y; -+ u16 w = upd->w; -+ u16 h = upd->h; -+ -+ perf_mark_setup(); -+ -+ if (display->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { -+ /*display->ctrl->enable_te(display, 1); */ -+ dispc_setup_partial_planes(display, &x, &y, &w, &h); -+ } -+ -+#ifdef MEASURE_PERF -+ rfbi.perf_bytes = w * h * 2; /* XXX always 16bit */ -+#endif -+ -+ display->ctrl->setup_update(display, x, y, w, h); -+ -+ if (display->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { -+ rfbi_transfer_area(w, h, NULL, NULL); -+ return 1; -+ } else { -+ struct omap_overlay *ovl; -+ void __iomem *addr; -+ int scr_width; -+ -+ ovl = display->manager->overlays[0]; -+ scr_width = ovl->info.screen_width; -+ addr = ovl->info.vaddr; -+ -+ omap_rfbi_write_pixels(addr, scr_width, x, y, w, h); -+ -+ perf_show("L4"); -+ -+ return 0; -+ } -+} -+ -+static void process_cmd_fifo(void) -+{ -+ int len; -+ struct update_param p; -+ struct omap_display *display; -+ unsigned long flags; -+ -+ if (atomic_inc_return(&rfbi.cmd_pending) != 1) -+ return; -+ -+ while (true) { -+ spin_lock_irqsave(rfbi.cmd_fifo->lock, flags); -+ -+ len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p, -+ sizeof(struct update_param)); -+ if (len == 0) { -+ DSSDBG("nothing more in fifo\n"); -+ atomic_set(&rfbi.cmd_pending, 0); -+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); -+ break; -+ } -+ -+ /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/ -+ -+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); -+ -+ BUG_ON(len != sizeof(struct update_param)); -+ BUG_ON(p.rfbi_module > 1); -+ -+ display = rfbi.display[p.rfbi_module]; -+ -+ if (p.cmd == RFBI_CMD_UPDATE) { -+ if (do_update(display, &p.par.r)) -+ break; /* async op */ -+ } else if (p.cmd == RFBI_CMD_SYNC) { -+ DSSDBG("Signaling SYNC done!\n"); -+ complete(p.par.sync); -+ } else -+ BUG(); -+ } -+ -+ signal_fifo_waiters(); -+} -+ -+static void rfbi_push_cmd(struct update_param *p) -+{ -+ int ret; -+ -+ while (1) { -+ unsigned long flags; -+ int available; -+ -+ spin_lock_irqsave(rfbi.cmd_fifo->lock, flags); -+ available = RFBI_CMD_FIFO_LEN_BYTES - -+ __kfifo_len(rfbi.cmd_fifo); -+ -+/* DSSDBG("%d bytes left in fifo\n", available); */ -+ if (available < sizeof(struct update_param)) { -+ DSSDBG("Going to wait because FIFO FULL..\n"); -+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); -+ atomic_inc(&rfbi.cmd_fifo_full); -+ wait_for_completion(&rfbi.cmd_done); -+ /*DSSDBG("Woke up because fifo not full anymore\n");*/ -+ continue; -+ } -+ -+ ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p, -+ sizeof(struct update_param)); -+/* DSSDBG("pushed %d bytes\n", ret);*/ -+ -+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); -+ -+ BUG_ON(ret != sizeof(struct update_param)); -+ -+ break; -+ } -+} -+ -+static void rfbi_push_update(int rfbi_module, int x, int y, int w, int h) -+{ -+ struct update_param p; -+ -+ p.rfbi_module = rfbi_module; -+ p.cmd = RFBI_CMD_UPDATE; -+ -+ p.par.r.x = x; -+ p.par.r.y = y; -+ p.par.r.w = w; -+ p.par.r.h = h; -+ -+ DSSDBG("RFBI pushed %d,%d %dx%d\n", x, y, w, h); -+ -+ rfbi_push_cmd(&p); -+ -+ process_cmd_fifo(); -+} -+ -+static void rfbi_push_sync(int rfbi_module, struct completion *sync_comp) -+{ -+ struct update_param p; -+ -+ p.rfbi_module = rfbi_module; -+ p.cmd = RFBI_CMD_SYNC; -+ p.par.sync = sync_comp; -+ -+ rfbi_push_cmd(&p); -+ -+ DSSDBG("RFBI sync pushed to cmd fifo\n"); -+ -+ process_cmd_fifo(); -+} -+ -+void rfbi_dump_regs(struct seq_file *s) -+{ -+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) -+ -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ DUMPREG(RFBI_REVISION); -+ DUMPREG(RFBI_SYSCONFIG); -+ DUMPREG(RFBI_SYSSTATUS); -+ DUMPREG(RFBI_CONTROL); -+ DUMPREG(RFBI_PIXEL_CNT); -+ DUMPREG(RFBI_LINE_NUMBER); -+ DUMPREG(RFBI_CMD); -+ DUMPREG(RFBI_PARAM); -+ DUMPREG(RFBI_DATA); -+ DUMPREG(RFBI_READ); -+ DUMPREG(RFBI_STATUS); -+ -+ DUMPREG(RFBI_CONFIG(0)); -+ DUMPREG(RFBI_ONOFF_TIME(0)); -+ DUMPREG(RFBI_CYCLE_TIME(0)); -+ DUMPREG(RFBI_DATA_CYCLE1(0)); -+ DUMPREG(RFBI_DATA_CYCLE2(0)); -+ DUMPREG(RFBI_DATA_CYCLE3(0)); -+ -+ DUMPREG(RFBI_CONFIG(1)); -+ DUMPREG(RFBI_ONOFF_TIME(1)); -+ DUMPREG(RFBI_CYCLE_TIME(1)); -+ DUMPREG(RFBI_DATA_CYCLE1(1)); -+ DUMPREG(RFBI_DATA_CYCLE2(1)); -+ DUMPREG(RFBI_DATA_CYCLE3(1)); -+ -+ DUMPREG(RFBI_VSYNC_WIDTH); -+ DUMPREG(RFBI_HSYNC_WIDTH); -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+#undef DUMPREG -+} -+ -+int rfbi_init(void) -+{ -+ u32 rev; -+ u32 l; -+ -+ spin_lock_init(&rfbi.cmd_lock); -+ rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL, -+ &rfbi.cmd_lock); -+ if (IS_ERR(rfbi.cmd_fifo)) -+ return -ENOMEM; -+ -+ init_completion(&rfbi.cmd_done); -+ atomic_set(&rfbi.cmd_fifo_full, 0); -+ atomic_set(&rfbi.cmd_pending, 0); -+ -+ rfbi.base = ioremap(RFBI_BASE, SZ_256); -+ if (!rfbi.base) { -+ DSSERR("can't ioremap RFBI\n"); -+ return -ENOMEM; -+ } -+ -+ rfbi_enable_clocks(1); -+ -+ msleep(10); -+ -+ rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000; -+ -+ /* Enable autoidle and smart-idle */ -+ l = rfbi_read_reg(RFBI_SYSCONFIG); -+ l |= (1 << 0) | (2 << 3); -+ rfbi_write_reg(RFBI_SYSCONFIG, l); -+ -+ rev = rfbi_read_reg(RFBI_REVISION); -+ printk(KERN_INFO "OMAP RFBI rev %d.%d\n", -+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); -+ -+ rfbi_enable_clocks(0); -+ -+ return 0; -+} -+ -+void rfbi_exit(void) -+{ -+ DSSDBG("rfbi_exit\n"); -+ -+ kfifo_free(rfbi.cmd_fifo); -+ -+ iounmap(rfbi.base); -+} -+ -+/* struct omap_display support */ -+static int rfbi_display_update(struct omap_display *display, -+ u16 x, u16 y, u16 w, u16 h) -+{ -+ int rfbi_module; -+ -+ if (w == 0 || h == 0) -+ return 0; -+ -+ rfbi_module = rfbi_find_display(display); -+ -+ rfbi_push_update(rfbi_module, x, y, w, h); -+ -+ return 0; -+} -+ -+static int rfbi_display_sync(struct omap_display *display) -+{ -+ struct completion sync_comp; -+ int rfbi_module; -+ -+ rfbi_module = rfbi_find_display(display); -+ -+ init_completion(&sync_comp); -+ rfbi_push_sync(rfbi_module, &sync_comp); -+ DSSDBG("Waiting for SYNC to happen...\n"); -+ wait_for_completion(&sync_comp); -+ DSSDBG("Released from SYNC\n"); -+ return 0; -+} -+ -+static int rfbi_display_enable_te(struct omap_display *display, bool enable) -+{ -+ display->ctrl->enable_te(display, enable); -+ return 0; -+} -+ -+static int rfbi_display_enable(struct omap_display *display) -+{ -+ int r; -+ -+ BUG_ON(display->panel == NULL || display->ctrl == NULL); -+ -+ r = omap_dispc_register_isr(framedone_callback, NULL, -+ DISPC_IRQ_FRAMEDONE); -+ if (r) { -+ DSSERR("can't get FRAMEDONE irq\n"); -+ return r; -+ } -+ -+ dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT); -+ -+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI); -+ -+ dispc_set_tft_data_lines(display->ctrl->pixel_size); -+ -+ rfbi_configure(display->hw_config.u.rfbi.channel, -+ display->ctrl->pixel_size, -+ display->hw_config.u.rfbi.data_lines); -+ -+ rfbi_set_timings(display->hw_config.u.rfbi.channel, -+ &display->ctrl->timings); -+ -+ -+ if (display->ctrl && display->ctrl->enable) { -+ r = display->ctrl->enable(display); -+ if (r) -+ goto err; -+ } -+ -+ if (display->panel && display->panel->enable) { -+ r = display->panel->enable(display); -+ if (r) -+ goto err; -+ } -+ -+ return 0; -+err: -+ return -ENODEV; -+} -+ -+static void rfbi_display_disable(struct omap_display *display) -+{ -+ display->ctrl->disable(display); -+ omap_dispc_unregister_isr(framedone_callback, NULL, -+ DISPC_IRQ_FRAMEDONE); -+} -+ -+int rfbi_init_display(struct omap_display *display) -+{ -+ display->enable = rfbi_display_enable; -+ display->disable = rfbi_display_disable; -+ display->update = rfbi_display_update; -+ display->sync = rfbi_display_sync; -+ display->enable_te = rfbi_display_enable_te; -+ -+ rfbi.display[display->hw_config.u.rfbi.channel] = display; -+ -+ display->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; -+ -+ return 0; -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/sdi.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/sdi.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/sdi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/sdi.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,387 @@ -+/* -+ * linux/drivers/video/omap2/dss/sdi.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "SDI" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "dss.h" -+ -+#define CONTROL_PADCONF_BASE 0x48002000 -+ -+#define OMAP_SDI_PAD_DIS(pe,pu) ((7 << 0) | /* MODE 7 = safe */ \ -+ (((pe) ? 1 : 0) << 3) | /* PULL_ENA */ \ -+ (((pu) ? 1 : 0) << 4) | /* PULL_UP */ \ -+ (1 << 8)) /* INPUT_EN */ -+ -+#define OMAP_SDI_PAD_EN (1 << 0) /* MODE 1 = SDI_xx */ -+ -+#define OMAP_SDI_PAD_MASK OMAP_SDI_PAD_DIS(1, 1) -+ -+static struct { -+ bool skip_init; -+ bool update_enabled; -+} sdi; -+ -+/* CONTROL_PADCONF_DSS_DATAXX */ -+static const u16 sdi_pads[] = -+{ -+ 0x0f0, /* 10[ 7..0]:SDI_DAT1N */ -+ 0x0f2, /* 10[15..0]:SDI_DAT1P */ -+ 0x0f4, /* 12[ 7..0]:SDI_DAT2N */ -+ 0x0f6, /* 12[15..0]:SDI_DAT2P */ -+ 0x0f8, /* 14[ 7..0]:SDI_DAT3N */ -+ 0x0fa, /* 14[15..0]:SDI_DAT3P */ -+ 0x108, /* 22[ 7..0]:SDI_CLKN */ -+ 0x10a, /* 22[15..0]:SDI_CLKP */ -+}; -+ -+/* -+ * Check if bootloader / platform code has configured the SDI pads properly. -+ * This means it either configured all required pads for SDI mode, or that it -+ * left all the required pads unconfigured. -+ */ -+static int sdi_pad_init(struct omap_display *display) -+{ -+ unsigned req_map; -+ bool configured = false; -+ bool unconfigured = false; -+ int data_pairs; -+ int i; -+ -+ data_pairs = display->hw_config.u.sdi.datapairs; -+ req_map = (1 << (data_pairs * 2)) - 1; /* data lanes */ -+ req_map |= 3 << 6; /* clk lane */ -+ for (i = 0; i < ARRAY_SIZE(sdi_pads); i++) { -+ u32 reg; -+ u32 val; -+ -+ if (!((1 << i) & req_map)) -+ /* Ignore unneded pads. */ -+ continue; -+ reg = CONTROL_PADCONF_BASE + sdi_pads[i]; -+ val = omap_readw(reg); -+ switch (val & 0x07) { /* pad mode */ -+ case 1: -+ if (unconfigured) -+ break; -+ /* Is the pull configuration ok for SDI mode? */ -+ if ((val & OMAP_SDI_PAD_MASK) != OMAP_SDI_PAD_EN) -+ break; -+ configured = true; -+ break; -+ case 0: -+ case 7: -+ if (configured) -+ break; -+ unconfigured = true; -+ break; -+ default: -+ break; -+ } -+ } -+ if (i != ARRAY_SIZE(sdi_pads)) { -+ DSSERR("SDI: invalid pad configuration\n"); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static void sdi_pad_config(struct omap_display *display, bool enable) -+{ -+ int data_pairs; -+ bool pad_off_pe, pad_off_pu; -+ unsigned req_map; -+ int i; -+ -+ data_pairs = display->hw_config.u.sdi.datapairs; -+ pad_off_pe = display->hw_config.u.sdi.pad_off_pe; -+ pad_off_pu = display->hw_config.u.sdi.pad_off_pu; -+ req_map = (1 << (data_pairs * 2)) - 1; /* data lanes */ -+ req_map |= 3 << 6; /* clk lane */ -+ for (i = 0; i < ARRAY_SIZE(sdi_pads); i++) { -+ u32 reg; -+ u16 val; -+ -+ if (!((1 << i) & req_map)) -+ continue; -+ if (enable) -+ val = OMAP_SDI_PAD_EN; -+ else -+ val = OMAP_SDI_PAD_DIS(pad_off_pe, pad_off_pu); -+ reg = CONTROL_PADCONF_BASE + sdi_pads[i]; -+ omap_writew(val, reg); -+ } -+} -+ -+static void sdi_basic_init(void) -+{ -+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS); -+ -+ dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT); -+ dispc_set_tft_data_lines(24); -+ dispc_lcd_enable_signal_polarity(1); -+} -+ -+static int sdi_display_enable(struct omap_display *display) -+{ -+ struct dispc_clock_info cinfo; -+ u16 lck_div, pck_div; -+ unsigned long fck; -+ struct omap_panel *panel = display->panel; -+ unsigned long pck; -+ int r; -+ -+ if (display->state != OMAP_DSS_DISPLAY_DISABLED) { -+ DSSERR("display already enabled\n"); -+ return -EINVAL; -+ } -+ -+ twl4030_enable_regulator(RES_VAUX1); -+ -+ sdi_pad_config(display, 1); -+ -+ /* In case of skip_init sdi_init has already enabled the clocks */ -+ if (!sdi.skip_init) -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ sdi_basic_init(); -+ -+ /* 15.5.9.1.2 */ -+ panel->config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF; -+ -+ dispc_set_pol_freq(panel); -+ -+ if (!sdi.skip_init) -+ r = dispc_calc_clock_div(1, panel->timings.pixel_clock * 1000, -+ &cinfo); -+ else -+ r = dispc_get_clock_div(&cinfo); -+ -+ if (r) -+ goto err0; -+ -+ fck = cinfo.fck; -+ lck_div = cinfo.lck_div; -+ pck_div = cinfo.pck_div; -+ -+ pck = fck / lck_div / pck_div / 1000; -+ -+ if (pck != panel->timings.pixel_clock) { -+ DSSWARN("Could not find exact pixel clock. Requested %d kHz, " -+ "got %lu kHz\n", -+ panel->timings.pixel_clock, pck); -+ -+ panel->timings.pixel_clock = pck; -+ } -+ -+ -+ dispc_set_lcd_timings(&panel->timings); -+ -+ r = dispc_set_clock_div(&cinfo); -+ if (r) -+ goto err1; -+ -+ if (!sdi.skip_init) { -+ dss_sdi_init(display->hw_config.u.sdi.datapairs); -+ r = dss_sdi_enable(); -+ if (r) -+ goto err1; -+ mdelay(2); -+ } -+ -+ dispc_enable_lcd_out(1); -+ -+ r = panel->enable(display); -+ if (r) -+ goto err2; -+ -+ display->state = OMAP_DSS_DISPLAY_ACTIVE; -+ -+ sdi.skip_init = 0; -+ -+ return 0; -+err2: -+ dispc_enable_lcd_out(0); -+err1: -+err0: -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ twl4030_disable_regulator(RES_VAUX1); -+ -+ return r; -+} -+ -+static int sdi_display_resume(struct omap_display *display); -+ -+static void sdi_display_disable(struct omap_display *display) -+{ -+ if (display->state == OMAP_DSS_DISPLAY_DISABLED) -+ return; -+ -+ if (display->state == OMAP_DSS_DISPLAY_SUSPENDED) { -+ if (sdi_display_resume(display)) -+ return; -+ } -+ -+ display->panel->disable(display); -+ -+ dispc_enable_lcd_out(0); -+ -+ dss_sdi_disable(); -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ sdi_pad_config(display, 0); -+ -+ twl4030_disable_regulator(RES_VAUX1); -+ -+ display->state = OMAP_DSS_DISPLAY_DISABLED; -+} -+ -+static int sdi_display_suspend(struct omap_display *display) -+{ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ return -EINVAL; -+ -+ if (display->panel->suspend) -+ display->panel->suspend(display); -+ -+ dispc_enable_lcd_out(0); -+ -+ dss_sdi_disable(); -+ -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ sdi_pad_config(display, 0); -+ -+ twl4030_disable_regulator(RES_VAUX1); -+ -+ display->state = OMAP_DSS_DISPLAY_SUSPENDED; -+ -+ return 0; -+} -+ -+static int sdi_display_resume(struct omap_display *display) -+{ -+ int r; -+ -+ if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) -+ return -EINVAL; -+ -+ twl4030_enable_regulator(RES_VAUX1); -+ -+ sdi_pad_config(display, 1); -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ -+ r = dss_sdi_enable(); -+ if (r) -+ goto err; -+ mdelay(2); -+ -+ dispc_enable_lcd_out(1); -+ -+ if (display->panel->resume) -+ display->panel->resume(display); -+ -+ display->state = OMAP_DSS_DISPLAY_ACTIVE; -+ -+ return 0; -+ -+ err: -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ sdi_pad_config(display, 0); -+ -+ twl4030_disable_regulator(RES_VAUX1); -+ -+ return r; -+} -+ -+static int sdi_display_set_update_mode(struct omap_display *display, -+ enum omap_dss_update_mode mode) -+{ -+ if (mode == OMAP_DSS_UPDATE_MANUAL) -+ return -EINVAL; -+ -+ if (mode == OMAP_DSS_UPDATE_DISABLED) { -+ dispc_enable_lcd_out(0); -+ sdi.update_enabled = 0; -+ } else { -+ dispc_enable_lcd_out(1); -+ sdi.update_enabled = 1; -+ } -+ -+ return 0; -+} -+ -+static enum omap_dss_update_mode sdi_display_get_update_mode( -+ struct omap_display *display) -+{ -+ return sdi.update_enabled ? OMAP_DSS_UPDATE_AUTO : -+ OMAP_DSS_UPDATE_DISABLED; -+} -+ -+static void sdi_get_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ *timings = display->panel->timings; -+} -+ -+int sdi_init_display(struct omap_display *display) -+{ -+ DSSDBG("SDI init\n"); -+ -+ display->enable = sdi_display_enable; -+ display->disable = sdi_display_disable; -+ display->suspend = sdi_display_suspend; -+ display->resume = sdi_display_resume; -+ display->set_update_mode = sdi_display_set_update_mode; -+ display->get_update_mode = sdi_display_get_update_mode; -+ display->get_timings = sdi_get_timings; -+ -+ return sdi_pad_init(display); -+} -+ -+int sdi_init(bool skip_init) -+{ -+ /* we store this for first display enable, then clear it */ -+ sdi.skip_init = skip_init; -+ -+ /* -+ * Enable clocks already here, otherwise there would be a toggle -+ * of them until sdi_display_enable is called. -+ */ -+ if (skip_init) { -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1); -+ dss_boottime_disable_clocks(); -+ dss_boottime_put_clocks(); -+ } -+ return 0; -+} -+ -+void sdi_exit(void) -+{ -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/dss/venc.c linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/venc.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/dss/venc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/dss/venc.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,718 @@ -+/* -+ * linux/drivers/video/omap2/dss/venc.c -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * VENC settings from TI's DSS driver -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#define DSS_SUBSYS_NAME "VENC" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "dss.h" -+ -+#define VENC_BASE 0x48050C00 -+ -+/* Venc registers */ -+#define VENC_REV_ID 0x00 -+#define VENC_STATUS 0x04 -+#define VENC_F_CONTROL 0x08 -+#define VENC_VIDOUT_CTRL 0x10 -+#define VENC_SYNC_CTRL 0x14 -+#define VENC_LLEN 0x1C -+#define VENC_FLENS 0x20 -+#define VENC_HFLTR_CTRL 0x24 -+#define VENC_CC_CARR_WSS_CARR 0x28 -+#define VENC_C_PHASE 0x2C -+#define VENC_GAIN_U 0x30 -+#define VENC_GAIN_V 0x34 -+#define VENC_GAIN_Y 0x38 -+#define VENC_BLACK_LEVEL 0x3C -+#define VENC_BLANK_LEVEL 0x40 -+#define VENC_X_COLOR 0x44 -+#define VENC_M_CONTROL 0x48 -+#define VENC_BSTAMP_WSS_DATA 0x4C -+#define VENC_S_CARR 0x50 -+#define VENC_LINE21 0x54 -+#define VENC_LN_SEL 0x58 -+#define VENC_L21__WC_CTL 0x5C -+#define VENC_HTRIGGER_VTRIGGER 0x60 -+#define VENC_SAVID__EAVID 0x64 -+#define VENC_FLEN__FAL 0x68 -+#define VENC_LAL__PHASE_RESET 0x6C -+#define VENC_HS_INT_START_STOP_X 0x70 -+#define VENC_HS_EXT_START_STOP_X 0x74 -+#define VENC_VS_INT_START_X 0x78 -+#define VENC_VS_INT_STOP_X__VS_INT_START_Y 0x7C -+#define VENC_VS_INT_STOP_Y__VS_EXT_START_X 0x80 -+#define VENC_VS_EXT_STOP_X__VS_EXT_START_Y 0x84 -+#define VENC_VS_EXT_STOP_Y 0x88 -+#define VENC_AVID_START_STOP_X 0x90 -+#define VENC_AVID_START_STOP_Y 0x94 -+#define VENC_FID_INT_START_X__FID_INT_START_Y 0xA0 -+#define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X 0xA4 -+#define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y 0xA8 -+#define VENC_TVDETGP_INT_START_STOP_X 0xB0 -+#define VENC_TVDETGP_INT_START_STOP_Y 0xB4 -+#define VENC_GEN_CTRL 0xB8 -+#define VENC_OUTPUT_CONTROL 0xC4 -+#define VENC_OUTPUT_TEST 0xC8 -+#define VENC_DAC_B__DAC_C 0xC8 -+ -+struct venc_config { -+ u32 f_control; -+ u32 vidout_ctrl; -+ u32 sync_ctrl; -+ u32 llen; -+ u32 flens; -+ u32 hfltr_ctrl; -+ u32 cc_carr_wss_carr; -+ u32 c_phase; -+ u32 gain_u; -+ u32 gain_v; -+ u32 gain_y; -+ u32 black_level; -+ u32 blank_level; -+ u32 x_color; -+ u32 m_control; -+ u32 bstamp_wss_data; -+ u32 s_carr; -+ u32 line21; -+ u32 ln_sel; -+ u32 l21__wc_ctl; -+ u32 htrigger_vtrigger; -+ u32 savid__eavid; -+ u32 flen__fal; -+ u32 lal__phase_reset; -+ u32 hs_int_start_stop_x; -+ u32 hs_ext_start_stop_x; -+ u32 vs_int_start_x; -+ u32 vs_int_stop_x__vs_int_start_y; -+ u32 vs_int_stop_y__vs_ext_start_x; -+ u32 vs_ext_stop_x__vs_ext_start_y; -+ u32 vs_ext_stop_y; -+ u32 avid_start_stop_x; -+ u32 avid_start_stop_y; -+ u32 fid_int_start_x__fid_int_start_y; -+ u32 fid_int_offset_y__fid_ext_start_x; -+ u32 fid_ext_start_y__fid_ext_offset_y; -+ u32 tvdetgp_int_start_stop_x; -+ u32 tvdetgp_int_start_stop_y; -+ u32 gen_ctrl; -+}; -+ -+/* from TRM */ -+static const struct venc_config venc_config_pal_trm = { -+ .f_control = 0, -+ .vidout_ctrl = 1, -+ .sync_ctrl = 0x40, -+ .llen = 0x35F, /* 863 */ -+ .flens = 0x270, /* 624 */ -+ .hfltr_ctrl = 0, -+ .cc_carr_wss_carr = 0x2F7225ED, -+ .c_phase = 0, -+ .gain_u = 0x111, -+ .gain_v = 0x181, -+ .gain_y = 0x140, -+ .black_level = 0x3B, -+ .blank_level = 0x3B, -+ .x_color = 0x7, -+ .m_control = 0x2, -+ .bstamp_wss_data = 0x3F, -+ .s_carr = 0x2A098ACB, -+ .line21 = 0, -+ .ln_sel = 0x01290015, -+ .l21__wc_ctl = 0x0000F603, -+ .htrigger_vtrigger = 0, -+ -+ .savid__eavid = 0x06A70108, -+ .flen__fal = 0x00180270, -+ .lal__phase_reset = 0x00040135, -+ .hs_int_start_stop_x = 0x00880358, -+ .hs_ext_start_stop_x = 0x000F035F, -+ .vs_int_start_x = 0x01A70000, -+ .vs_int_stop_x__vs_int_start_y = 0x000001A7, -+ .vs_int_stop_y__vs_ext_start_x = 0x01AF0000, -+ .vs_ext_stop_x__vs_ext_start_y = 0x000101AF, -+ .vs_ext_stop_y = 0x00000025, -+ .avid_start_stop_x = 0x03530083, -+ .avid_start_stop_y = 0x026C002E, -+ .fid_int_start_x__fid_int_start_y = 0x0001008A, -+ .fid_int_offset_y__fid_ext_start_x = 0x002E0138, -+ .fid_ext_start_y__fid_ext_offset_y = 0x01380001, -+ -+ .tvdetgp_int_start_stop_x = 0x00140001, -+ .tvdetgp_int_start_stop_y = 0x00010001, -+ .gen_ctrl = 0x00FF0000, -+}; -+ -+/* from TRM */ -+static const struct venc_config venc_config_ntsc_trm = { -+ .f_control = 0, -+ .vidout_ctrl = 1, -+ .sync_ctrl = 0x8040, -+ .llen = 0x359, -+ .flens = 0x20C, -+ .hfltr_ctrl = 0, -+ .cc_carr_wss_carr = 0x043F2631, -+ .c_phase = 0, -+ .gain_u = 0x102, -+ .gain_v = 0x16C, -+ .gain_y = 0x12F, -+ .black_level = 0x43, -+ .blank_level = 0x38, -+ .x_color = 0x7, -+ .m_control = 0x1, -+ .bstamp_wss_data = 0x38, -+ .s_carr = 0x21F07C1F, -+ .line21 = 0, -+ .ln_sel = 0x01310011, -+ .l21__wc_ctl = 0x0000F003, -+ .htrigger_vtrigger = 0, -+ -+ .savid__eavid = 0x069300F4, -+ .flen__fal = 0x0016020C, -+ .lal__phase_reset = 0x00060107, -+ .hs_int_start_stop_x = 0x008E0350, -+ .hs_ext_start_stop_x = 0x000F0359, -+ .vs_int_start_x = 0x01A00000, -+ .vs_int_stop_x__vs_int_start_y = 0x020701A0, -+ .vs_int_stop_y__vs_ext_start_x = 0x01AC0024, -+ .vs_ext_stop_x__vs_ext_start_y = 0x020D01AC, -+ .vs_ext_stop_y = 0x00000006, -+ .avid_start_stop_x = 0x03480078, -+ .avid_start_stop_y = 0x02060024, -+ .fid_int_start_x__fid_int_start_y = 0x0001008A, -+ .fid_int_offset_y__fid_ext_start_x = 0x01AC0106, -+ .fid_ext_start_y__fid_ext_offset_y = 0x01060006, -+ -+ .tvdetgp_int_start_stop_x = 0x00140001, -+ .tvdetgp_int_start_stop_y = 0x00010001, -+ .gen_ctrl = 0x00F90000, -+}; -+ -+static const struct venc_config venc_config_pal_bdghi = { -+ .f_control = 0, -+ .vidout_ctrl = 0, -+ .sync_ctrl = 0, -+ .hfltr_ctrl = 0, -+ .x_color = 0, -+ .line21 = 0, -+ .ln_sel = 21, -+ .htrigger_vtrigger = 0, -+ .tvdetgp_int_start_stop_x = 0x00140001, -+ .tvdetgp_int_start_stop_y = 0x00010001, -+ .gen_ctrl = 0x00FB0000, -+ -+ .llen = 864-1, -+ .flens = 625-1, -+ .cc_carr_wss_carr = 0x2F7625ED, -+ .c_phase = 0xDF, -+ .gain_u = 0x111, -+ .gain_v = 0x181, -+ .gain_y = 0x140, -+ .black_level = 0x3e, -+ .blank_level = 0x3e, -+ .m_control = 0<<2 | 1<<1, -+ .bstamp_wss_data = 0x42, -+ .s_carr = 0x2a098acb, -+ .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0, -+ .savid__eavid = 0x06A70108, -+ .flen__fal = 23<<16 | 624<<0, -+ .lal__phase_reset = 2<<17 | 310<<0, -+ .hs_int_start_stop_x = 0x00920358, -+ .hs_ext_start_stop_x = 0x000F035F, -+ .vs_int_start_x = 0x1a7<<16, -+ .vs_int_stop_x__vs_int_start_y = 0x000601A7, -+ .vs_int_stop_y__vs_ext_start_x = 0x01AF0036, -+ .vs_ext_stop_x__vs_ext_start_y = 0x27101af, -+ .vs_ext_stop_y = 0x05, -+ .avid_start_stop_x = 0x03530082, -+ .avid_start_stop_y = 0x0270002E, -+ .fid_int_start_x__fid_int_start_y = 0x0005008A, -+ .fid_int_offset_y__fid_ext_start_x = 0x002E0138, -+ .fid_ext_start_y__fid_ext_offset_y = 0x01380005, -+}; -+ -+const struct omap_video_timings omap_dss_pal_timings = { -+ .x_res = 720, -+ .y_res = 574, -+ .pixel_clock = 13500, -+ .hsw = 64, -+ .hfp = 12, -+ .hbp = 68, -+ .vsw = 5, -+ .vfp = 5, -+ .vbp = 41, -+}; -+EXPORT_SYMBOL(omap_dss_pal_timings); -+ -+const struct omap_video_timings omap_dss_ntsc_timings = { -+ .x_res = 720, -+ .y_res = 482, -+ .pixel_clock = 13500, -+ .hsw = 64, -+ .hfp = 16, -+ .hbp = 58, -+ .vsw = 6, -+ .vfp = 6, -+ .vbp = 31, -+}; -+EXPORT_SYMBOL(omap_dss_ntsc_timings); -+ -+static struct { -+ void __iomem *base; -+ struct mutex venc_lock; -+ struct regulator *vdac_reg; -+ u32 wss_data; -+} venc; -+ -+static struct omap_panel venc_panel = { -+ .name = "tv-out", -+}; -+ -+static inline void venc_write_reg(int idx, u32 val) -+{ -+ __raw_writel(val, venc.base + idx); -+} -+ -+static inline u32 venc_read_reg(int idx) -+{ -+ u32 l = __raw_readl(venc.base + idx); -+ return l; -+} -+ -+static void venc_write_config(const struct venc_config *config) -+{ -+ DSSDBG("write venc conf\n"); -+ -+ venc_write_reg(VENC_LLEN, config->llen); -+ venc_write_reg(VENC_FLENS, config->flens); -+ venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr); -+ venc_write_reg(VENC_C_PHASE, config->c_phase); -+ venc_write_reg(VENC_GAIN_U, config->gain_u); -+ venc_write_reg(VENC_GAIN_V, config->gain_v); -+ venc_write_reg(VENC_GAIN_Y, config->gain_y); -+ venc_write_reg(VENC_BLACK_LEVEL, config->black_level); -+ venc_write_reg(VENC_BLANK_LEVEL, config->blank_level); -+ venc_write_reg(VENC_M_CONTROL, config->m_control); -+ venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | venc.wss_data); -+ venc_write_reg(VENC_S_CARR, config->s_carr); -+ venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl); -+ venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid); -+ venc_write_reg(VENC_FLEN__FAL, config->flen__fal); -+ venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset); -+ venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x); -+ venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x); -+ venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x); -+ venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y, -+ config->vs_int_stop_x__vs_int_start_y); -+ venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X, -+ config->vs_int_stop_y__vs_ext_start_x); -+ venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y, -+ config->vs_ext_stop_x__vs_ext_start_y); -+ venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y); -+ venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x); -+ venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y); -+ venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y, -+ config->fid_int_start_x__fid_int_start_y); -+ venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X, -+ config->fid_int_offset_y__fid_ext_start_x); -+ venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y, -+ config->fid_ext_start_y__fid_ext_offset_y); -+ -+ venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C)); -+ venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl); -+ venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl); -+ venc_write_reg(VENC_X_COLOR, config->x_color); -+ venc_write_reg(VENC_LINE21, config->line21); -+ venc_write_reg(VENC_LN_SEL, config->ln_sel); -+ venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger); -+ venc_write_reg(VENC_TVDETGP_INT_START_STOP_X, -+ config->tvdetgp_int_start_stop_x); -+ venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y, -+ config->tvdetgp_int_start_stop_y); -+ venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl); -+ venc_write_reg(VENC_F_CONTROL, config->f_control); -+ venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl); -+} -+ -+static void venc_reset(void) -+{ -+ int t = 1000; -+ -+ venc_write_reg(VENC_F_CONTROL, 1<<8); -+ while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) { -+ if (--t == 0) { -+ DSSERR("Failed to reset venc\n"); -+ return; -+ } -+ } -+ -+ /* the magical sleep that makes things work */ -+ msleep(20); -+} -+ -+static void venc_enable_clocks(int enable) -+{ -+ if (enable) -+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M | -+ DSS_CLK_96M); -+ else -+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M | -+ DSS_CLK_96M); -+} -+ -+static const struct venc_config *venc_timings_to_config( -+ struct omap_video_timings *timings) -+{ -+ if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0) -+ return &venc_config_pal_trm; -+ -+ if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0) -+ return &venc_config_ntsc_trm; -+ -+ BUG(); -+} -+ -+int venc_init(struct platform_device *pdev) -+{ -+ u8 rev_id; -+ -+ mutex_init(&venc.venc_lock); -+ -+ venc.wss_data = 0; -+ -+ venc_panel.timings = omap_dss_pal_timings; -+ -+ venc.base = ioremap(VENC_BASE, SZ_1K); -+ if (!venc.base) { -+ DSSERR("can't ioremap VENC\n"); -+ return -ENOMEM; -+ } -+ -+ venc.vdac_reg = regulator_get(&pdev->dev, "vdac"); -+ -+ if (IS_ERR(venc.vdac_reg)) { -+ DSSERR("Can't get vdac regulator as source\n"); -+ venc.vdac_reg = NULL; -+ return -ENODEV; -+ } -+ -+ venc_enable_clocks(1); -+ -+ rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); -+ printk(KERN_INFO "OMAP VENC rev %d\n", rev_id); -+ -+ venc_enable_clocks(0); -+ -+ return 0; -+} -+ -+void venc_exit(void) -+{ -+ regulator_put(venc.vdac_reg); -+ iounmap(venc.base); -+} -+ -+static void venc_power_on(struct omap_display *display) -+{ -+ regulator_enable(venc.vdac_reg); -+ -+ dispc_enable_digit_errors(0); -+ -+ venc_enable_clocks(1); -+ -+ venc_reset(); -+ venc_write_config(venc_timings_to_config(&display->panel->timings)); -+ -+ dss_set_venc_output(display->hw_config.u.venc.type); -+ dss_set_dac_pwrdn_bgz(1); -+ -+ if (display->hw_config.u.venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE) { -+ if (cpu_is_omap24xx()) -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0x2); -+ else -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0xa); -+ } else { /* S-Video */ -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0xd); -+ } -+ -+ dispc_set_digit_size(display->panel->timings.x_res, -+ display->panel->timings.y_res/2); -+ -+ if (display->hw_config.panel_enable) -+ display->hw_config.panel_enable(display); -+ -+ dispc_enable_digit_out(1); -+ -+ dispc_enable_digit_errors(1); -+} -+ -+static void venc_power_off(struct omap_display *display) -+{ -+ venc_write_reg(VENC_OUTPUT_CONTROL, 0); -+ dss_set_dac_pwrdn_bgz(0); -+ -+ dispc_enable_digit_out(0); -+ -+ if (display->hw_config.panel_disable) -+ display->hw_config.panel_disable(display); -+ -+ venc_enable_clocks(0); -+ -+ regulator_disable(venc.vdac_reg); -+} -+ -+static int venc_enable_display(struct omap_display *display) -+{ -+ int r = 0; -+ -+ DSSDBG("venc_enable_display\n"); -+ -+ mutex_lock(&venc.venc_lock); -+ -+ if (display->state != OMAP_DSS_DISPLAY_DISABLED) { -+ r = -EINVAL; -+ goto err; -+ } -+ -+ venc_power_on(display); -+ -+ display->state = OMAP_DSS_DISPLAY_ACTIVE; -+err: -+ mutex_unlock(&venc.venc_lock); -+ -+ return r; -+} -+ -+static void venc_disable_display(struct omap_display *display) -+{ -+ DSSDBG("venc_disable_display\n"); -+ -+ mutex_lock(&venc.venc_lock); -+ -+ if (display->state == OMAP_DSS_DISPLAY_DISABLED) -+ goto end; -+ -+ if (display->state == OMAP_DSS_DISPLAY_SUSPENDED) { -+ /* suspended is the same as disabled with venc */ -+ display->state = OMAP_DSS_DISPLAY_DISABLED; -+ goto end; -+ } -+ -+ venc_power_off(display); -+ -+ display->state = OMAP_DSS_DISPLAY_DISABLED; -+end: -+ mutex_unlock(&venc.venc_lock); -+} -+ -+static int venc_display_suspend(struct omap_display *display) -+{ -+ int r = 0; -+ -+ DSSDBG("venc_display_suspend\n"); -+ -+ mutex_lock(&venc.venc_lock); -+ -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) { -+ r = -EINVAL; -+ goto err; -+ } -+ -+ venc_power_off(display); -+ -+ display->state = OMAP_DSS_DISPLAY_SUSPENDED; -+err: -+ mutex_unlock(&venc.venc_lock); -+ -+ return r; -+} -+ -+static int venc_display_resume(struct omap_display *display) -+{ -+ int r = 0; -+ -+ DSSDBG("venc_display_resume\n"); -+ -+ mutex_lock(&venc.venc_lock); -+ -+ if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) { -+ r = -EINVAL; -+ goto err; -+ } -+ -+ venc_power_on(display); -+ -+ display->state = OMAP_DSS_DISPLAY_ACTIVE; -+err: -+ mutex_unlock(&venc.venc_lock); -+ -+ return r; -+} -+ -+static void venc_get_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ *timings = venc_panel.timings; -+} -+ -+static void venc_set_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ DSSDBG("venc_set_timings\n"); -+ -+ /* Reset WSS data when the TV standard changes. */ -+ if (memcmp(&display->panel->timings, timings, sizeof(*timings))) -+ venc.wss_data = 0; -+ -+ display->panel->timings = *timings; -+ if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { -+ /* turn the venc off and on to get new timings to use */ -+ venc_disable_display(display); -+ venc_enable_display(display); -+ } -+} -+ -+static int venc_check_timings(struct omap_display *display, -+ struct omap_video_timings *timings) -+{ -+ DSSDBG("venc_check_timings\n"); -+ -+ if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0) -+ return 0; -+ -+ if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0) -+ return 0; -+ -+ return -EINVAL; -+} -+ -+static u32 venc_get_wss(struct omap_display *display) -+{ -+ /* Invert due to VENC_L21_WC_CTL:INV=1 */ -+ return (venc.wss_data >> 8) ^ 0xfffff; -+} -+ -+static int venc_set_wss(struct omap_display *display, -+ u32 wss) -+{ -+ const struct venc_config *config; -+ -+ DSSDBG("venc_set_wss\n"); -+ -+ mutex_lock(&venc.venc_lock); -+ -+ config = venc_timings_to_config(&display->panel->timings); -+ -+ /* Invert due to VENC_L21_WC_CTL:INV=1 */ -+ venc.wss_data = (wss ^ 0xfffff) << 8; -+ -+ venc_enable_clocks(1); -+ -+ venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | venc.wss_data); -+ -+ venc_enable_clocks(0); -+ -+ mutex_unlock(&venc.venc_lock); -+ -+ return 0; -+} -+ -+int venc_init_display(struct omap_display *display) -+{ -+ display->panel = &venc_panel; -+ display->enable = venc_enable_display; -+ display->disable = venc_disable_display; -+ display->suspend = venc_display_suspend; -+ display->resume = venc_display_resume; -+ display->get_timings = venc_get_timings; -+ display->set_timings = venc_set_timings; -+ display->check_timings = venc_check_timings; -+ display->get_wss = venc_get_wss; -+ display->set_wss = venc_set_wss; -+ -+ return 0; -+} -+ -+void venc_dump_regs(struct seq_file *s) -+{ -+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) -+ -+ venc_enable_clocks(1); -+ -+ DUMPREG(VENC_F_CONTROL); -+ DUMPREG(VENC_VIDOUT_CTRL); -+ DUMPREG(VENC_SYNC_CTRL); -+ DUMPREG(VENC_LLEN); -+ DUMPREG(VENC_FLENS); -+ DUMPREG(VENC_HFLTR_CTRL); -+ DUMPREG(VENC_CC_CARR_WSS_CARR); -+ DUMPREG(VENC_C_PHASE); -+ DUMPREG(VENC_GAIN_U); -+ DUMPREG(VENC_GAIN_V); -+ DUMPREG(VENC_GAIN_Y); -+ DUMPREG(VENC_BLACK_LEVEL); -+ DUMPREG(VENC_BLANK_LEVEL); -+ DUMPREG(VENC_X_COLOR); -+ DUMPREG(VENC_M_CONTROL); -+ DUMPREG(VENC_BSTAMP_WSS_DATA); -+ DUMPREG(VENC_S_CARR); -+ DUMPREG(VENC_LINE21); -+ DUMPREG(VENC_LN_SEL); -+ DUMPREG(VENC_L21__WC_CTL); -+ DUMPREG(VENC_HTRIGGER_VTRIGGER); -+ DUMPREG(VENC_SAVID__EAVID); -+ DUMPREG(VENC_FLEN__FAL); -+ DUMPREG(VENC_LAL__PHASE_RESET); -+ DUMPREG(VENC_HS_INT_START_STOP_X); -+ DUMPREG(VENC_HS_EXT_START_STOP_X); -+ DUMPREG(VENC_VS_INT_START_X); -+ DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y); -+ DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X); -+ DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y); -+ DUMPREG(VENC_VS_EXT_STOP_Y); -+ DUMPREG(VENC_AVID_START_STOP_X); -+ DUMPREG(VENC_AVID_START_STOP_Y); -+ DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y); -+ DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X); -+ DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y); -+ DUMPREG(VENC_TVDETGP_INT_START_STOP_X); -+ DUMPREG(VENC_TVDETGP_INT_START_STOP_Y); -+ DUMPREG(VENC_GEN_CTRL); -+ DUMPREG(VENC_OUTPUT_CONTROL); -+ DUMPREG(VENC_OUTPUT_TEST); -+ -+ venc_enable_clocks(0); -+ -+#undef DUMPREG -+} -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/Kconfig linux-omap-2.6.28-nokia1/drivers/video/omap2/Kconfig ---- linux-omap-2.6.28-omap1/drivers/video/omap2/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/Kconfig 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,3 @@ -+source "drivers/video/omap2/dss/Kconfig" -+source "drivers/video/omap2/displays/Kconfig" -+source "drivers/video/omap2/omapfb/Kconfig" -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/Makefile linux-omap-2.6.28-nokia1/drivers/video/omap2/Makefile ---- linux-omap-2.6.28-omap1/drivers/video/omap2/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/Makefile 2011-06-22 13:19:33.153063270 +0200 -@@ -0,0 +1,4 @@ -+# OMAP2/3 Display Subsystem -+obj-y += dss/ -+obj-y += displays/ -+obj-y += omapfb/ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/Kconfig linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/Kconfig ---- linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/Kconfig 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,35 @@ -+menuconfig FB_OMAP2 -+ tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)" -+ depends on FB && OMAP2_DSS -+ -+ select FB_CFB_FILLRECT -+ select FB_CFB_COPYAREA -+ select FB_CFB_IMAGEBLIT -+ help -+ Frame buffer driver for OMAP2/3 based boards. -+ -+config FB_OMAP2_DEBUG_SUPPORT -+ bool "Debug support for OMAP2/3 FB" -+ default y -+ depends on FB_OMAP2 -+ help -+ Support for debug output. You have to enable the actual printing -+ with debug module parameter. -+ -+config FB_OMAP2_FORCE_AUTO_UPDATE -+ bool "Force main display to automatic update mode" -+ depends on FB_OMAP2 -+ help -+ Forces main display to automatic update mode (if possible), -+ and also enables tearsync (if possible). By default -+ displays that support manual update are started in manual -+ update mode. -+ -+config FB_OMAP2_NUM_FBS -+ int "Number of framebuffers" -+ range 1 10 -+ default 3 -+ depends on FB_OMAP2 -+ help -+ Select the number of framebuffers created. OMAP2/3 has 3 overlays -+ so normally this would be 3. -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/Makefile linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/Makefile ---- linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/Makefile 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,2 @@ -+obj-$(CONFIG_FB_OMAP2) += omapfb.o -+omapfb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb.h linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb.h ---- linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb.h 2011-06-22 13:19:33.183063271 +0200 -@@ -0,0 +1,149 @@ -+/* -+ * linux/drivers/video/omap2/omapfb.h -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#ifndef __DRIVERS_VIDEO_OMAP2_OMAPFB_H__ -+#define __DRIVERS_VIDEO_OMAP2_OMAPFB_H__ -+ -+#ifdef CONFIG_FB_OMAP2_DEBUG_SUPPORT -+#define DEBUG -+#endif -+ -+#include -+ -+#ifdef DEBUG -+extern unsigned int omapfb_debug; -+#define DBG(format, ...) \ -+ if (omapfb_debug) \ -+ printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__) -+#else -+#define DBG(format, ...) -+#endif -+ -+#define FB2OFB(fb_info) ((struct omapfb_info *)(fb_info->par)) -+ -+/* max number of overlays to which a framebuffer data can be direct */ -+#define OMAPFB_MAX_OVL_PER_FB 3 -+ -+struct omapfb2_mem_region { -+ u32 paddr; -+ void __iomem *vaddr; -+ struct vrfb vrfb; -+ unsigned long size; -+ u8 type; /* OMAPFB_PLANE_MEM_* */ -+ bool alloc; /* allocated by the driver */ -+ bool map; /* kernel mapped by the driver */ -+}; -+ -+/* appended to fb_info */ -+struct omapfb_info { -+ int id; -+ struct omapfb2_mem_region region; -+ atomic_t map_count; -+ int num_overlays; -+ struct omap_overlay *overlays[OMAPFB_MAX_OVL_PER_FB]; -+ struct omapfb2_device *fbdev; -+ enum omap_dss_rotation_type rotation_type; -+ u8 rotation[OMAPFB_MAX_OVL_PER_FB]; -+ bool mirror; -+}; -+ -+struct omapfb2_device { -+ struct device *dev; -+ struct mutex mtx; -+ -+ u32 pseudo_palette[17]; -+ -+ int state; -+ -+ unsigned num_fbs; -+ struct fb_info *fbs[10]; -+ -+ unsigned num_displays; -+ struct omap_display *displays[10]; -+ unsigned num_overlays; -+ struct omap_overlay *overlays[10]; -+ unsigned num_managers; -+ struct omap_overlay_manager *managers[10]; -+}; -+ -+struct omapfb_colormode { -+ enum omap_color_mode dssmode; -+ u32 bits_per_pixel; -+ u32 nonstd; -+ struct fb_bitfield red; -+ struct fb_bitfield green; -+ struct fb_bitfield blue; -+ struct fb_bitfield transp; -+}; -+ -+void set_fb_fix(struct fb_info *fbi); -+int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var); -+int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type); -+int omapfb_apply_changes(struct fb_info *fbi, int init); -+int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi); -+ -+int omapfb_create_sysfs(struct omapfb2_device *fbdev); -+void omapfb_remove_sysfs(struct omapfb2_device *fbdev); -+ -+int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg); -+ -+int omapfb_mode_to_timings(const char *mode_str, -+ struct omap_video_timings *timings, u8 *bpp); -+int dss_mode_to_fb_mode(enum omap_color_mode dssmode, -+ struct fb_var_screeninfo *var); -+ -+/* find the display connected to this fb, if any */ -+static inline struct omap_display *fb2display(struct fb_info *fbi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ int i; -+ -+ /* XXX: returns the display connected to first attached overlay */ -+ for (i = 0; i < ofbi->num_overlays; i++) { -+ if (ofbi->overlays[i]->manager) -+ return ofbi->overlays[i]->manager->display; -+ } -+ -+ return NULL; -+} -+ -+static inline void omapfb_lock(struct omapfb2_device *fbdev) -+{ -+ mutex_lock(&fbdev->mtx); -+} -+ -+static inline void omapfb_unlock(struct omapfb2_device *fbdev) -+{ -+ mutex_unlock(&fbdev->mtx); -+} -+ -+static inline int omapfb_overlay_enable(struct omap_overlay *ovl, -+ int enable) -+{ -+ struct omap_overlay_info info; -+ -+ ovl->get_overlay_info(ovl, &info); -+ info.enabled = enable; -+ return ovl->set_overlay_info(ovl, &info); -+} -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb-ioctl.c linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb-ioctl.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb-ioctl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb-ioctl.c 2011-06-22 13:19:33.163063270 +0200 -@@ -0,0 +1,760 @@ -+/* -+ * linux/drivers/video/omap2/omapfb-ioctl.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "omapfb.h" -+ -+static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ struct omap_overlay *ovl; -+ struct omap_overlay_info info; -+ int r = 0; -+ -+ DBG("omapfb_setup_plane\n"); -+ -+ omapfb_lock(fbdev); -+ -+ if (ofbi->num_overlays != 1) { -+ r = -EINVAL; -+ goto out; -+ } -+ -+ /* XXX uses only the first overlay */ -+ ovl = ofbi->overlays[0]; -+ -+ if (pi->enabled && !ofbi->region.size) { -+ /* -+ * This plane's memory was freed, can't enable it -+ * until it's reallocated. -+ */ -+ r = -EINVAL; -+ goto out; -+ } -+ -+ ovl->get_overlay_info(ovl, &info); -+ -+ info.pos_x = pi->pos_x; -+ info.pos_y = pi->pos_y; -+ info.out_width = pi->out_width; -+ info.out_height = pi->out_height; -+ info.enabled = pi->enabled; -+ -+ omap_dss_lock(); -+ -+ r = ovl->set_overlay_info(ovl, &info); -+ if (r) -+ goto unlock; -+ -+ if (ovl->manager) { -+ r = ovl->manager->apply(ovl->manager); -+ if (r) -+ goto unlock; -+ } -+ -+ if (display) { -+ u16 w, h; -+ -+ if (display->sync) -+ display->sync(display); -+ -+ display->get_resolution(display, &w, &h); -+ -+ if (display->update) -+ display->update(display, 0, 0, w, h); -+ } -+ -+unlock: -+ omap_dss_unlock(); -+out: -+ omapfb_unlock(fbdev); -+ if (r) -+ dev_err(fbdev->dev, "setup_plane failed\n"); -+ return r; -+} -+ -+static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ -+ omapfb_lock(fbdev); -+ -+ if (ofbi->num_overlays != 1) { -+ memset(pi, 0, sizeof(*pi)); -+ } else { -+ struct omap_overlay_info *ovli; -+ struct omap_overlay *ovl; -+ -+ ovl = ofbi->overlays[0]; -+ ovli = &ovl->info; -+ -+ pi->pos_x = ovli->pos_x; -+ pi->pos_y = ovli->pos_y; -+ pi->enabled = ovli->enabled; -+ pi->channel_out = 0; /* xxx */ -+ pi->mirror = 0; -+ pi->out_width = ovli->out_width; -+ pi->out_height = ovli->out_height; -+ } -+ -+ omapfb_unlock(fbdev); -+ -+ return 0; -+} -+ -+static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omapfb2_mem_region *rg; -+ int r, i; -+ size_t size; -+ -+ if (mi->type > OMAPFB_MEMTYPE_MAX) -+ return -EINVAL; -+ -+ size = PAGE_ALIGN(mi->size); -+ -+ rg = &ofbi->region; -+ -+ omapfb_lock(fbdev); -+ -+ for (i = 0; i < ofbi->num_overlays; i++) { -+ if (ofbi->overlays[i]->info.enabled) { -+ r = -EBUSY; -+ goto out; -+ } -+ } -+ -+ if (rg->size != size || rg->type != mi->type) { -+ r = omapfb_realloc_fbmem(fbi, size, mi->type); -+ if (r) { -+ dev_err(fbdev->dev, "realloc fbmem failed\n"); -+ goto out; -+ } -+ } -+ -+ r = 0; -+out: -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omapfb2_mem_region *rg; -+ -+ rg = &ofbi->region; -+ memset(mi, 0, sizeof(*mi)); -+ -+ omapfb_lock(fbdev); -+ mi->size = rg->size; -+ mi->type = rg->type; -+ omapfb_unlock(fbdev); -+ -+ return 0; -+} -+ -+static int omapfb_update_window(struct fb_info *fbi, -+ u32 x, u32 y, u32 w, u32 h) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ u16 dw, dh; -+ -+ if (!display) -+ return 0; -+ -+ if (w == 0 || h == 0) -+ return 0; -+ -+ display->get_resolution(display, &dw, &dh); -+ -+ if (x + w > dw || y + h > dh) -+ return -EINVAL; -+ -+ omapfb_lock(fbdev); -+ omap_dss_lock(); -+ display->update(display, x, y, w, h); -+ omap_dss_unlock(); -+ omapfb_unlock(fbdev); -+ -+ return 0; -+} -+ -+static int omapfb_set_update_mode(struct fb_info *fbi, -+ enum omapfb_update_mode mode) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ enum omap_dss_update_mode um; -+ int r; -+ -+ if (!display || !display->set_update_mode) -+ return -EINVAL; -+ -+ switch (mode) { -+ case OMAPFB_UPDATE_DISABLED: -+ um = OMAP_DSS_UPDATE_DISABLED; -+ break; -+ -+ case OMAPFB_AUTO_UPDATE: -+ um = OMAP_DSS_UPDATE_AUTO; -+ break; -+ -+ case OMAPFB_MANUAL_UPDATE: -+ um = OMAP_DSS_UPDATE_MANUAL; -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ -+ omapfb_lock(fbdev); -+ omap_dss_lock(); -+ r = display->set_update_mode(display, um); -+ omap_dss_unlock(); -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static int omapfb_get_update_mode(struct fb_info *fbi, -+ enum omapfb_update_mode *mode) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ enum omap_dss_update_mode m; -+ -+ if (!display || !display->get_update_mode) -+ return -EINVAL; -+ -+ omapfb_lock(fbdev); -+ m = display->get_update_mode(display); -+ omapfb_unlock(fbdev); -+ -+ switch (m) { -+ case OMAP_DSS_UPDATE_DISABLED: -+ *mode = OMAPFB_UPDATE_DISABLED; -+ break; -+ case OMAP_DSS_UPDATE_AUTO: -+ *mode = OMAPFB_AUTO_UPDATE; -+ break; -+ case OMAP_DSS_UPDATE_MANUAL: -+ *mode = OMAPFB_MANUAL_UPDATE; -+ break; -+ default: -+ BUG(); -+ } -+ -+ return 0; -+} -+ -+/* XXX this color key handling is a hack... */ -+static struct omapfb_color_key omapfb_color_keys[2]; -+ -+static int _omapfb_set_color_key(struct omap_overlay_manager *mgr, -+ struct omapfb_color_key *ck) -+{ -+ enum omap_dss_color_key_type kt; -+ -+ if (!mgr->set_default_color || -+ !mgr->set_trans_key_type_and_value || -+ !mgr->enable_trans_key) -+ return 0; -+ -+ if (ck->key_type == OMAPFB_COLOR_KEY_DISABLED) { -+ mgr->enable_trans_key(mgr, 0); -+ omapfb_color_keys[mgr->id] = *ck; -+ return 0; -+ } -+ -+ switch(ck->key_type) { -+ case OMAPFB_COLOR_KEY_GFX_DST: -+ kt = OMAP_DSS_COLOR_KEY_GFX_DST; -+ break; -+ case OMAPFB_COLOR_KEY_VID_SRC: -+ kt = OMAP_DSS_COLOR_KEY_VID_SRC; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ omap_dss_lock(); -+ mgr->set_default_color(mgr, ck->background); -+ mgr->set_trans_key_type_and_value(mgr, kt, ck->trans_key); -+ mgr->enable_trans_key(mgr, 1); -+ omap_dss_unlock(); -+ -+ omapfb_color_keys[mgr->id] = *ck; -+ -+ return 0; -+} -+ -+static int omapfb_set_color_key(struct fb_info *fbi, -+ struct omapfb_color_key *ck) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ int r; -+ int i; -+ struct omap_overlay_manager *mgr = NULL; -+ -+ omapfb_lock(fbdev); -+ -+ for (i = 0; i < ofbi->num_overlays; i++) { -+ if (ofbi->overlays[i]->manager) { -+ mgr = ofbi->overlays[i]->manager; -+ break; -+ } -+ } -+ -+ if (!mgr) { -+ r = -EINVAL; -+ goto err; -+ } -+ -+ if (!mgr->set_default_color || -+ !mgr->set_trans_key_type_and_value || -+ !mgr->enable_trans_key) { -+ r = -ENODEV; -+ goto err; -+ } -+ -+ r = _omapfb_set_color_key(mgr, ck); -+err: -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static int omapfb_get_color_key(struct fb_info *fbi, -+ struct omapfb_color_key *ck) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_overlay_manager *mgr = NULL; -+ int r = 0; -+ int i; -+ -+ omapfb_lock(fbdev); -+ -+ for (i = 0; i < ofbi->num_overlays; i++) { -+ if (ofbi->overlays[i]->manager) { -+ mgr = ofbi->overlays[i]->manager; -+ break; -+ } -+ } -+ -+ if (!mgr) { -+ r = -EINVAL; -+ goto err; -+ } -+ -+ if (!mgr->set_default_color || -+ !mgr->set_trans_key_type_and_value || -+ !mgr->enable_trans_key) { -+ r = -ENODEV; -+ goto err; -+ } -+ -+ *ck = omapfb_color_keys[mgr->id]; -+err: -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static int omapfb_memory_read(struct fb_info *fbi, -+ struct omapfb_memory_read *mr) -+{ -+ struct omap_display *display = fb2display(fbi); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ void *buf; -+ int r; -+ -+ if (!display || !display->memory_read) -+ return -ENOENT; -+ -+ if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) -+ return -EFAULT; -+ -+ if (mr->w * mr->h * 3 > mr->buffer_size) -+ return -EINVAL; -+ -+ buf = vmalloc(mr->buffer_size); -+ if (!buf) { -+ DBG("vmalloc failed\n"); -+ return -ENOMEM; -+ } -+ -+ omapfb_lock(fbdev); -+ -+ omap_dss_lock(); -+ r = display->memory_read(display, buf, mr->buffer_size, -+ mr->x, mr->y, mr->w, mr->h); -+ -+ omap_dss_unlock(); -+ if (r > 0) { -+ if (copy_to_user(mr->buffer, buf, mr->buffer_size)) -+ r = -EFAULT; -+ } -+ -+ vfree(buf); -+ -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+int omapfb_get_ovl_colormode(struct omapfb2_device *fbdev, -+ struct omapfb_ovl_colormode *mode) -+{ -+ int ovl_idx = mode->overlay_idx; -+ int mode_idx = mode->mode_idx; -+ struct omap_overlay *ovl; -+ enum omap_color_mode supported_modes; -+ struct fb_var_screeninfo var; -+ int i; -+ -+ if (ovl_idx >= fbdev->num_overlays) -+ return -ENODEV; -+ ovl = fbdev->overlays[ovl_idx]; -+ supported_modes = ovl->supported_modes; -+ -+ mode_idx = mode->mode_idx; -+ -+ for (i = 0; i < sizeof(supported_modes) * 8; i++) { -+ if (!(supported_modes & (1 << i))) -+ continue; -+ /* -+ * It's possible that the FB doesn't support a mode -+ * that is supported by the overlay, so call the -+ * following here. -+ */ -+ if (dss_mode_to_fb_mode(1 << i, &var) < 0) -+ continue; -+ -+ mode_idx--; -+ if (mode_idx < 0) -+ break; -+ } -+ -+ if (i == sizeof(supported_modes) * 8) -+ return -ENOENT; -+ -+ mode->bits_per_pixel = var.bits_per_pixel; -+ mode->nonstd = var.nonstd; -+ mode->red = var.red; -+ mode->green = var.green; -+ mode->blue = var.blue; -+ mode->transp = var.transp; -+ -+ return 0; -+} -+ -+int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ -+ union { -+ struct omapfb_update_window_old uwnd_o; -+ struct omapfb_update_window uwnd; -+ struct omapfb_plane_info plane_info; -+ struct omapfb_caps caps; -+ struct omapfb_mem_info mem_info; -+ struct omapfb_color_key color_key; -+ struct omapfb_ovl_colormode ovl_colormode; -+ enum omapfb_update_mode update_mode; -+ int test_num; -+ struct omapfb_memory_read memory_read; -+ struct omapfb_vram_info vram_info; -+ } p; -+ -+ int r = 0; -+ -+ switch (cmd) { -+ case OMAPFB_SYNC_GFX: -+ DBG("ioctl SYNC_GFX\n"); -+ if (!display || !display->sync) { -+ /* DSS1 never returns an error here, so we neither */ -+ /*r = -EINVAL;*/ -+ break; -+ } -+ -+ omapfb_lock(fbdev); -+ omap_dss_lock(); -+ r = display->sync(display); -+ omap_dss_unlock(); -+ omapfb_unlock(fbdev); -+ break; -+ -+ case OMAPFB_UPDATE_WINDOW_OLD: -+ DBG("ioctl UPDATE_WINDOW_OLD\n"); -+ if (!display || !display->update) { -+ r = -EINVAL; -+ break; -+ } -+ -+ if (copy_from_user(&p.uwnd_o, -+ (void __user *)arg, -+ sizeof(p.uwnd_o))) { -+ r = -EFAULT; -+ break; -+ } -+ -+ r = omapfb_update_window(fbi, p.uwnd_o.x, p.uwnd_o.y, -+ p.uwnd_o.width, p.uwnd_o.height); -+ break; -+ -+ case OMAPFB_UPDATE_WINDOW: -+ DBG("ioctl UPDATE_WINDOW\n"); -+ if (!display || !display->update) { -+ r = -EINVAL; -+ break; -+ } -+ -+ if (copy_from_user(&p.uwnd, (void __user *)arg, -+ sizeof(p.uwnd))) { -+ r = -EFAULT; -+ break; -+ } -+ -+ r = omapfb_update_window(fbi, p.uwnd.x, p.uwnd.y, -+ p.uwnd.width, p.uwnd.height); -+ break; -+ -+ case OMAPFB_SETUP_PLANE: -+ DBG("ioctl SETUP_PLANE\n"); -+ if (copy_from_user(&p.plane_info, (void __user *)arg, -+ sizeof(p.plane_info))) -+ r = -EFAULT; -+ else -+ r = omapfb_setup_plane(fbi, &p.plane_info); -+ break; -+ -+ case OMAPFB_QUERY_PLANE: -+ DBG("ioctl QUERY_PLANE\n"); -+ r = omapfb_query_plane(fbi, &p.plane_info); -+ if (r < 0) -+ break; -+ if (copy_to_user((void __user *)arg, &p.plane_info, -+ sizeof(p.plane_info))) -+ r = -EFAULT; -+ break; -+ -+ case OMAPFB_SETUP_MEM: -+ DBG("ioctl SETUP_MEM\n"); -+ if (copy_from_user(&p.mem_info, (void __user *)arg, -+ sizeof(p.mem_info))) -+ r = -EFAULT; -+ else -+ r = omapfb_setup_mem(fbi, &p.mem_info); -+ break; -+ -+ case OMAPFB_QUERY_MEM: -+ DBG("ioctl QUERY_MEM\n"); -+ r = omapfb_query_mem(fbi, &p.mem_info); -+ if (r < 0) -+ break; -+ if (copy_to_user((void __user *)arg, &p.mem_info, -+ sizeof(p.mem_info))) -+ r = -EFAULT; -+ break; -+ -+ case OMAPFB_GET_CAPS: -+ DBG("ioctl GET_CAPS\n"); -+ if (!display) { -+ r = -EINVAL; -+ break; -+ } -+ -+ memset(&p.caps, 0, sizeof(p.caps)); -+ p.caps.ctrl = display->caps; -+ -+ if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps))) -+ r = -EFAULT; -+ break; -+ -+ case OMAPFB_GET_OVERLAY_COLORMODE: -+ DBG("ioctl GET_OVERLAY_COLORMODE\n"); -+ if (copy_from_user(&p.ovl_colormode, (void __user *)arg, -+ sizeof(p.ovl_colormode))) { -+ r = -EFAULT; -+ break; -+ } -+ r = omapfb_get_ovl_colormode(fbdev, &p.ovl_colormode); -+ if (r < 0) -+ break; -+ if (copy_to_user((void __user *)arg, &p.ovl_colormode, -+ sizeof(p.ovl_colormode))) -+ r = -EFAULT; -+ break; -+ -+ case OMAPFB_SET_UPDATE_MODE: -+ DBG("ioctl SET_UPDATE_MODE\n"); -+ if (get_user(p.update_mode, (int __user *)arg)) -+ r = -EFAULT; -+ else -+ r = omapfb_set_update_mode(fbi, p.update_mode); -+ break; -+ -+ case OMAPFB_GET_UPDATE_MODE: -+ DBG("ioctl GET_UPDATE_MODE\n"); -+ r = omapfb_get_update_mode(fbi, &p.update_mode); -+ if (r) -+ break; -+ if (put_user(p.update_mode, -+ (enum omapfb_update_mode __user *)arg)) -+ r = -EFAULT; -+ break; -+ -+ case OMAPFB_SET_COLOR_KEY: -+ DBG("ioctl SET_COLOR_KEY\n"); -+ if (copy_from_user(&p.color_key, (void __user *)arg, -+ sizeof(p.color_key))) -+ r = -EFAULT; -+ else -+ r = omapfb_set_color_key(fbi, &p.color_key); -+ break; -+ -+ case OMAPFB_GET_COLOR_KEY: -+ DBG("ioctl GET_COLOR_KEY\n"); -+ if ((r = omapfb_get_color_key(fbi, &p.color_key)) < 0) -+ break; -+ if (copy_to_user((void __user *)arg, &p.color_key, -+ sizeof(p.color_key))) -+ r = -EFAULT; -+ break; -+ -+ case OMAPFB_WAITFORVSYNC: -+ DBG("ioctl WAITFORVSYNC\n"); -+ if (!display) { -+ r = -EINVAL; -+ break; -+ } -+ -+ omap_dss_lock(); -+ r = display->wait_vsync(display); -+ omap_dss_unlock(); -+ break; -+ -+ /* LCD and CTRL tests do the same thing for backward -+ * compatibility */ -+ case OMAPFB_LCD_TEST: -+ DBG("ioctl LCD_TEST\n"); -+ if (get_user(p.test_num, (int __user *)arg)) { -+ r = -EFAULT; -+ break; -+ } -+ if (!display || !display->run_test) { -+ r = -EINVAL; -+ break; -+ } -+ -+ omap_dss_lock(); -+ r = display->run_test(display, p.test_num); -+ omap_dss_unlock(); -+ -+ break; -+ -+ case OMAPFB_CTRL_TEST: -+ DBG("ioctl CTRL_TEST\n"); -+ if (get_user(p.test_num, (int __user *)arg)) { -+ r = -EFAULT; -+ break; -+ } -+ if (!display || !display->run_test) { -+ r = -EINVAL; -+ break; -+ } -+ -+ omap_dss_lock(); -+ r = display->run_test(display, p.test_num); -+ omap_dss_unlock(); -+ -+ break; -+ -+ case OMAPFB_MEMORY_READ: -+ DBG("ioctl MEMORY_READ\n"); -+ -+ if (copy_from_user(&p.memory_read, (void __user *)arg, -+ sizeof(p.memory_read))) { -+ r = -EFAULT; -+ break; -+ } -+ -+ r = omapfb_memory_read(fbi, &p.memory_read); -+ -+ break; -+ -+ case OMAPFB_GET_VRAM_INFO: { -+ unsigned long vram, free, largest; -+ -+ DBG("ioctl GET_VRAM_INFO\n"); -+ -+ omap_vram_get_info(&vram, &free, &largest); -+ p.vram_info.total = vram; -+ p.vram_info.free = free; -+ p.vram_info.largest_free_block = largest; -+ -+ if (copy_to_user((void __user *)arg, &p.vram_info, -+ sizeof(p.vram_info))) -+ r = -EFAULT; -+ break; -+ } -+ -+ default: -+ dev_err(fbdev->dev, "Unknown ioctl 0x%x\n", cmd); -+ r = -EINVAL; -+ } -+ -+ if (r < 0) -+ DBG("ioctl failed: %d\n", r); -+ -+ return r; -+} -+ -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb-main.c linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb-main.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb-main.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb-main.c 2011-06-22 13:19:33.183063271 +0200 -@@ -0,0 +1,2232 @@ -+/* -+ * linux/drivers/video/omap2/omapfb-main.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "omapfb.h" -+ -+#define MODULE_NAME "omapfb" -+ -+static char *def_mode; -+static char *def_vram; -+static int def_vrfb; -+static int def_rotate; -+static int def_mirror; -+ -+#ifdef DEBUG -+unsigned int omapfb_debug; -+module_param_named(debug, omapfb_debug, bool, 0644); -+static unsigned int omapfb_test_pattern; -+module_param_named(test, omapfb_test_pattern, bool, 0644); -+#endif -+ -+#ifdef DEBUG -+static void draw_pixel(struct fb_info *fbi, int x, int y, unsigned color) -+{ -+ struct fb_var_screeninfo *var = &fbi->var; -+ struct fb_fix_screeninfo *fix = &fbi->fix; -+ void __iomem *addr = fbi->screen_base; -+ const unsigned bytespp = var->bits_per_pixel >> 3; -+ const unsigned line_len = fix->line_length / bytespp; -+ -+ int r = (color >> 16) & 0xff; -+ int g = (color >> 8) & 0xff; -+ int b = (color >> 0) & 0xff; -+ -+ if (var->bits_per_pixel == 16) { -+ u16 __iomem *p = (u16 __iomem *)addr; -+ p += y * line_len + x; -+ -+ r = r * 32 / 256; -+ g = g * 64 / 256; -+ b = b * 32 / 256; -+ -+ __raw_writew((r << 11) | (g << 5) | (b << 0), p); -+ } else if (var->bits_per_pixel == 24) { -+ u8 __iomem *p = (u8 __iomem *)addr; -+ p += (y * line_len + x) * 3; -+ -+ __raw_writeb(b, p + 0); -+ __raw_writeb(g, p + 1); -+ __raw_writeb(r, p + 2); -+ } else if (var->bits_per_pixel == 32) { -+ u32 __iomem *p = (u32 __iomem *)addr; -+ p += y * line_len + x; -+ __raw_writel(color, p); -+ } -+} -+ -+static void fill_fb(struct fb_info *fbi) -+{ -+ struct fb_var_screeninfo *var = &fbi->var; -+ const short w = var->xres_virtual; -+ const short h = var->yres_virtual; -+ void __iomem *addr = fbi->screen_base; -+ int y, x; -+ -+ if (!addr) -+ return; -+ -+ DBG("fill_fb %dx%d, line_len %d bytes\n", w, h, fbi->fix.line_length); -+ -+ for (y = 0; y < h; y++) { -+ for (x = 0; x < w; x++) { -+ if (x < 20 && y < 20) -+ draw_pixel(fbi, x, y, 0xffffff); -+ else if (x < 20 && (y > 20 && y < h - 20)) -+ draw_pixel(fbi, x, y, 0xff); -+ else if (y < 20 && (x > 20 && x < w - 20)) -+ draw_pixel(fbi, x, y, 0xff00); -+ else if (x > w - 20 && (y > 20 && y < h - 20)) -+ draw_pixel(fbi, x, y, 0xff0000); -+ else if (y > h - 20 && (x > 20 && x < w - 20)) -+ draw_pixel(fbi, x, y, 0xffff00); -+ else if (x == 20 || x == w - 20 || -+ y == 20 || y == h - 20) -+ draw_pixel(fbi, x, y, 0xffffff); -+ else if (x == y || w - x == h - y) -+ draw_pixel(fbi, x, y, 0xff00ff); -+ else if (w - x == y || x == h - y) -+ draw_pixel(fbi, x, y, 0x00ffff); -+ else if (x > 20 && y > 20 && x < w - 20 && y < h - 20) { -+ int t = x * 3 / w; -+ unsigned r = 0, g = 0, b = 0; -+ unsigned c; -+ if (var->bits_per_pixel == 16) { -+ if (t == 0) -+ b = (y % 32) * 256 / 32; -+ else if (t == 1) -+ g = (y % 64) * 256 / 64; -+ else if (t == 2) -+ r = (y % 32) * 256 / 32; -+ } else { -+ if (t == 0) -+ b = (y % 256); -+ else if (t == 1) -+ g = (y % 256); -+ else if (t == 2) -+ r = (y % 256); -+ } -+ c = (r << 16) | (g << 8) | (b << 0); -+ draw_pixel(fbi, x, y, c); -+ } else { -+ draw_pixel(fbi, x, y, 0); -+ } -+ } -+ } -+} -+#endif -+ -+static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot, -+ const struct fb_var_screeninfo *var) -+{ -+ const struct vrfb *vrfb = &ofbi->region.vrfb; -+ unsigned offset; -+ unsigned int yoffset = var->yres_virtual - var->yres + vrfb->yoffset; -+ unsigned int xoffset = var->xres_virtual - var->xres + vrfb->xoffset; -+ -+ switch (rot) { -+ case FB_ROTATE_UR: -+ offset = 0; -+ offset += var->yoffset * OMAP_VRFB_LINE_LEN; -+ offset += var->xoffset; -+ break; -+ case FB_ROTATE_CW: -+ offset = yoffset; -+ offset += var->xoffset * OMAP_VRFB_LINE_LEN; -+ offset -= var->yoffset; -+ break; -+ case FB_ROTATE_UD: -+ offset = yoffset * OMAP_VRFB_LINE_LEN + xoffset; -+ offset -= var->xoffset; -+ offset -= var->yoffset * OMAP_VRFB_LINE_LEN; -+ break; -+ case FB_ROTATE_CCW: -+ offset = xoffset * OMAP_VRFB_LINE_LEN; -+ offset -= var->xoffset * OMAP_VRFB_LINE_LEN; -+ offset += var->yoffset; -+ break; -+ default: -+ BUG(); -+ } -+ -+ offset *= vrfb->bytespp; -+ -+ return offset; -+} -+ -+static u32 omapfb_get_region_rot_paddr(struct omapfb_info *ofbi, int rot, -+ const struct fb_var_screeninfo *var) -+{ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+ return ofbi->region.vrfb.paddr[rot] -+ + omapfb_get_vrfb_offset(ofbi, rot, var); -+ } else { -+ return ofbi->region.paddr; -+ } -+} -+ -+static u32 omapfb_get_region_paddr(struct omapfb_info *ofbi) -+{ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) -+ return ofbi->region.vrfb.paddr[0]; -+ else -+ return ofbi->region.paddr; -+} -+ -+static void __iomem *omapfb_get_region_vaddr(struct omapfb_info *ofbi) -+{ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) -+ return ofbi->region.vrfb.vaddr[0]; -+ else -+ return ofbi->region.vaddr; -+} -+ -+static struct omapfb_colormode omapfb_colormodes[] = { -+ { -+ .dssmode = OMAP_DSS_COLOR_UYVY, -+ .bits_per_pixel = 16, -+ .nonstd = OMAPFB_COLOR_YUV422, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_YUV2, -+ .bits_per_pixel = 16, -+ .nonstd = OMAPFB_COLOR_YUY422, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_ARGB16, -+ .bits_per_pixel = 16, -+ .red = { .length = 4, .offset = 8, .msb_right = 0 }, -+ .green = { .length = 4, .offset = 4, .msb_right = 0 }, -+ .blue = { .length = 4, .offset = 0, .msb_right = 0 }, -+ .transp = { .length = 4, .offset = 12, .msb_right = 0 }, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_RGB16, -+ .bits_per_pixel = 16, -+ .red = { .length = 5, .offset = 11, .msb_right = 0 }, -+ .green = { .length = 6, .offset = 5, .msb_right = 0 }, -+ .blue = { .length = 5, .offset = 0, .msb_right = 0 }, -+ .transp = { .length = 0, .offset = 0, .msb_right = 0 }, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_RGB24P, -+ .bits_per_pixel = 24, -+ .red = { .length = 8, .offset = 16, .msb_right = 0 }, -+ .green = { .length = 8, .offset = 8, .msb_right = 0 }, -+ .blue = { .length = 8, .offset = 0, .msb_right = 0 }, -+ .transp = { .length = 0, .offset = 0, .msb_right = 0 }, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_RGB24U, -+ .bits_per_pixel = 32, -+ .red = { .length = 8, .offset = 16, .msb_right = 0 }, -+ .green = { .length = 8, .offset = 8, .msb_right = 0 }, -+ .blue = { .length = 8, .offset = 0, .msb_right = 0 }, -+ .transp = { .length = 0, .offset = 0, .msb_right = 0 }, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_ARGB32, -+ .bits_per_pixel = 32, -+ .red = { .length = 8, .offset = 16, .msb_right = 0 }, -+ .green = { .length = 8, .offset = 8, .msb_right = 0 }, -+ .blue = { .length = 8, .offset = 0, .msb_right = 0 }, -+ .transp = { .length = 8, .offset = 24, .msb_right = 0 }, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_RGBA32, -+ .bits_per_pixel = 32, -+ .red = { .length = 8, .offset = 24, .msb_right = 0 }, -+ .green = { .length = 8, .offset = 16, .msb_right = 0 }, -+ .blue = { .length = 8, .offset = 8, .msb_right = 0 }, -+ .transp = { .length = 8, .offset = 0, .msb_right = 0 }, -+ }, { -+ .dssmode = OMAP_DSS_COLOR_RGBX32, -+ .bits_per_pixel = 32, -+ .red = { .length = 8, .offset = 24, .msb_right = 0 }, -+ .green = { .length = 8, .offset = 16, .msb_right = 0 }, -+ .blue = { .length = 8, .offset = 8, .msb_right = 0 }, -+ .transp = { .length = 0, .offset = 0, .msb_right = 0 }, -+ }, -+}; -+ -+static bool cmp_var_to_colormode(struct fb_var_screeninfo *var, -+ struct omapfb_colormode *color) -+{ -+ bool cmp_component(struct fb_bitfield *f1, struct fb_bitfield *f2) -+ { -+ return f1->length == f2->length && -+ f1->offset == f2->offset && -+ f1->msb_right == f2->msb_right; -+ } -+ -+ if (var->bits_per_pixel == 0 || -+ var->red.length == 0 || -+ var->blue.length == 0 || -+ var->green.length == 0) -+ return 0; -+ -+ return var->bits_per_pixel == color->bits_per_pixel && -+ cmp_component(&var->red, &color->red) && -+ cmp_component(&var->green, &color->green) && -+ cmp_component(&var->blue, &color->blue) && -+ cmp_component(&var->transp, &color->transp); -+} -+ -+static void assign_colormode_to_var(struct fb_var_screeninfo *var, -+ struct omapfb_colormode *color) -+{ -+ var->bits_per_pixel = color->bits_per_pixel; -+ var->nonstd = color->nonstd; -+ var->red = color->red; -+ var->green = color->green; -+ var->blue = color->blue; -+ var->transp = color->transp; -+} -+ -+static int fb_mode_to_dss_mode(struct fb_var_screeninfo *var) -+{ -+ enum omap_color_mode dssmode; -+ int i; -+ -+ /* first match with nonstd field */ -+ if (var->nonstd) { -+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { -+ struct omapfb_colormode *mode = &omapfb_colormodes[i]; -+ if (var->nonstd == mode->nonstd) { -+ assign_colormode_to_var(var, mode); -+ return mode->dssmode; -+ } -+ } -+ -+ return -EINVAL; -+ } -+ -+ /* then try exact match of bpp and colors */ -+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { -+ struct omapfb_colormode *mode = &omapfb_colormodes[i]; -+ if (cmp_var_to_colormode(var, mode)) { -+ assign_colormode_to_var(var, mode); -+ return mode->dssmode; -+ } -+ } -+ -+ /* fail if the colors were specified but the didn't match */ -+ if (var->red.length != 0 || var->blue.length != 0 || -+ var->green.length != 0 || var->transp.length != 0) -+ return -EINVAL; -+ -+ /* match with bpp if user has not filled color fields -+ * properly */ -+ switch (var->bits_per_pixel) { -+ case 1: -+ dssmode = OMAP_DSS_COLOR_CLUT1; -+ break; -+ case 2: -+ dssmode = OMAP_DSS_COLOR_CLUT2; -+ break; -+ case 4: -+ dssmode = OMAP_DSS_COLOR_CLUT4; -+ break; -+ case 8: -+ dssmode = OMAP_DSS_COLOR_CLUT8; -+ break; -+ case 12: -+ dssmode = OMAP_DSS_COLOR_RGB12U; -+ break; -+ case 16: -+ dssmode = OMAP_DSS_COLOR_RGB16; -+ break; -+ case 24: -+ dssmode = OMAP_DSS_COLOR_RGB24P; -+ break; -+ case 32: -+ dssmode = OMAP_DSS_COLOR_RGB24U; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { -+ struct omapfb_colormode *mode = &omapfb_colormodes[i]; -+ if (dssmode == mode->dssmode) { -+ assign_colormode_to_var(var, mode); -+ return mode->dssmode; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+static int check_fb_res_bounds(struct fb_var_screeninfo *var) -+{ -+ int xres_min = OMAPFB_PLANE_XRES_MIN; -+ int xres_max = 2048; -+ int yres_min = OMAPFB_PLANE_YRES_MIN; -+ int yres_max = 2048; -+ -+ /* XXX: some applications seem to set virtual res to 0. */ -+ if (var->xres_virtual == 0) -+ var->xres_virtual = var->xres; -+ -+ if (var->yres_virtual == 0) -+ var->yres_virtual = var->yres; -+ -+ if (var->xres_virtual < xres_min || var->yres_virtual < yres_min) -+ return -EINVAL; -+ -+ if (var->xres < xres_min) -+ var->xres = xres_min; -+ if (var->yres < yres_min) -+ var->yres = yres_min; -+ if (var->xres > xres_max) -+ var->xres = xres_max; -+ if (var->yres > yres_max) -+ var->yres = yres_max; -+ -+ if (var->xres > var->xres_virtual) -+ var->xres = var->xres_virtual; -+ if (var->yres > var->yres_virtual) -+ var->yres = var->yres_virtual; -+ -+ return 0; -+} -+ -+static void shrink_height(unsigned long max_frame_size, -+ struct fb_var_screeninfo *var) -+{ -+ DBG("can't fit FB into memory, reducing y\n"); -+ var->yres_virtual = max_frame_size / -+ (var->xres_virtual * var->bits_per_pixel >> 3); -+ -+ if (var->yres_virtual < OMAPFB_PLANE_YRES_MIN) -+ var->yres_virtual = OMAPFB_PLANE_YRES_MIN; -+ -+ if (var->yres > var->yres_virtual) -+ var->yres = var->yres_virtual; -+} -+ -+static void shrink_width(unsigned long max_frame_size, -+ struct fb_var_screeninfo *var) -+{ -+ DBG("can't fit FB into memory, reducing x\n"); -+ var->xres_virtual = max_frame_size / var->yres_virtual / -+ (var->bits_per_pixel >> 3); -+ -+ if (var->xres_virtual < OMAPFB_PLANE_XRES_MIN) -+ var->xres_virtual = OMAPFB_PLANE_XRES_MIN; -+ -+ if (var->xres > var->xres_virtual) -+ var->xres = var->xres_virtual; -+} -+ -+static int check_vrfb_fb_size(unsigned long region_size, -+ const struct fb_var_screeninfo *var) -+{ -+ unsigned long min_phys_size = omap_vrfb_min_phys_size(var->xres_virtual, -+ var->yres_virtual, var->bits_per_pixel >> 3); -+ -+ return min_phys_size > region_size ? -EINVAL : 0; -+} -+ -+static int check_fb_size(const struct omapfb_info *ofbi, -+ struct fb_var_screeninfo *var) -+{ -+ unsigned long max_frame_size = ofbi->region.size; -+ int bytespp = var->bits_per_pixel >> 3; -+ unsigned long line_size = var->xres_virtual * bytespp; -+ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+ /* One needs to check for both VRFB and OMAPFB limitations. */ -+ if (check_vrfb_fb_size(max_frame_size, var)) -+ shrink_height(omap_vrfb_max_height( -+ max_frame_size, var->xres_virtual, bytespp) * -+ line_size, var); -+ -+ if (check_vrfb_fb_size(max_frame_size, var)) { -+ DBG("cannot fit FB to memory\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+ } -+ -+ DBG("max frame size %lu, line size %lu\n", max_frame_size, line_size); -+ -+ if (line_size * var->yres_virtual > max_frame_size) -+ shrink_height(max_frame_size, var); -+ -+ if (line_size * var->yres_virtual > max_frame_size) { -+ shrink_width(max_frame_size, var); -+ line_size = var->xres_virtual * bytespp; -+ } -+ -+ if (line_size * var->yres_virtual > max_frame_size) { -+ DBG("cannot fit FB to memory\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+/* -+ * Consider if VRFB assisted rotation is in use and if the virtual space for -+ * the zero degree view needs to be mapped. The need for mapping also acts as -+ * the trigger for setting up the hardware on the context in question. This -+ * ensures that one does not attempt to access the virtual view before the -+ * hardware is serving the address translations. -+ */ -+static int check_rotation(struct fb_info *fbi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_mem_region *rg = &ofbi->region; -+ -+ if (rg->size && ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+ struct fb_var_screeninfo *var = &fbi->var; -+ struct fb_fix_screeninfo *fix = &fbi->fix; -+ struct vrfb *vrfb = &rg->vrfb; -+ -+ /* -+ * In case of resolution change, one needs to reset the HW -+ * invalidating the 0 angle view and any existing mapping. -+ */ -+ if ((vrfb->xres != var->xres_virtual || -+ vrfb->yres != var->yres_virtual || -+ vrfb->bytespp != var->bits_per_pixel >> 3) && vrfb->vaddr[0]) { -+ fbi->screen_base = NULL; -+ fix->smem_start = 0; -+ fix->smem_len = 0; -+ iounmap(vrfb->vaddr[0]); -+ vrfb->vaddr[0] = NULL; -+ } -+ -+ if (!vrfb->vaddr[0]) { -+ int r; -+ -+ omap_vrfb_setup(&rg->vrfb, rg->paddr, -+ var->xres_virtual, -+ var->yres_virtual, -+ fb_mode_to_dss_mode(var)); -+ -+ /* Now one can ioremap the 0 angle view */ -+ r = omap_vrfb_map_angle(vrfb, var->yres_virtual, 0); -+ if (r) -+ return r; -+ -+ /* used by open/write in fbmem.c */ -+ fbi->screen_base = ofbi->region.vrfb.vaddr[0]; -+ -+ fix->smem_start = ofbi->region.vrfb.paddr[0]; -+ fix->smem_len = var->yres_virtual * OMAP_VRFB_LINE_LEN * -+ var->bits_per_pixel >> 3; -+ } -+ } -+ -+ return 0; -+} -+ -+int dss_mode_to_fb_mode(enum omap_color_mode dssmode, -+ struct fb_var_screeninfo *var) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { -+ struct omapfb_colormode *mode = &omapfb_colormodes[i]; -+ if (dssmode == mode->dssmode) { -+ assign_colormode_to_var(var, mode); -+ return 0; -+ } -+ } -+ return -ENOENT; -+} -+ -+void set_fb_fix(struct fb_info *fbi) -+{ -+ struct fb_fix_screeninfo *fix = &fbi->fix; -+ struct fb_var_screeninfo *var = &fbi->var; -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ -+ DBG("set_fb_fix\n"); -+ DBG("changing rotation to %d\n", var->rotate); -+ -+ /* used by mmap in fbmem.c */ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+ switch (var->nonstd) { -+ case OMAPFB_COLOR_YUV422: -+ case OMAPFB_COLOR_YUY422: -+ fix->line_length = -+ (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2; -+ break; -+ default: -+ fix->line_length = -+ (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3; -+ break; -+ } -+ } else { -+ /* used by open/write in fbmem.c */ -+ fbi->screen_base = -+ (char __iomem *)omapfb_get_region_vaddr(ofbi); -+ -+ fix->line_length = -+ (var->xres_virtual * var->bits_per_pixel) >> 3; -+ fix->smem_start = ofbi->region.paddr; -+ fix->smem_len = ofbi->region.size; -+ } -+ -+ fix->type = FB_TYPE_PACKED_PIXELS; -+ -+ if (var->nonstd) -+ fix->visual = FB_VISUAL_PSEUDOCOLOR; -+ else { -+ switch (var->bits_per_pixel) { -+ case 32: -+ case 24: -+ case 16: -+ case 12: -+ fix->visual = FB_VISUAL_TRUECOLOR; -+ /* 12bpp is stored in 16 bits */ -+ break; -+ case 1: -+ case 2: -+ case 4: -+ case 8: -+ fix->visual = FB_VISUAL_PSEUDOCOLOR; -+ break; -+ } -+ } -+ -+ fix->accel = FB_ACCEL_NONE; -+ -+ fix->xpanstep = 1; -+ fix->ypanstep = 1; -+} -+ -+/* check new var and possibly modify it to be ok */ -+int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omap_display *display = fb2display(fbi); -+ enum omap_color_mode mode = 0; -+ int i; -+ -+ DBG("check_fb_var %d\n", ofbi->id); -+ -+ if (ofbi->region.size == 0) -+ return 0; -+ -+ mode = fb_mode_to_dss_mode(var); -+ if (mode < 0) { -+ DBG("cannot convert var to omap dss mode\n"); -+ return -EINVAL; -+ } -+ -+ for (i = 0; i < ofbi->num_overlays; ++i) { -+ if ((ofbi->overlays[i]->supported_modes & mode) == 0) { -+ DBG("invalid mode\n"); -+ return -EINVAL; -+ } -+ } -+ -+ if (var->rotate < 0 || var->rotate > 3) -+ return -EINVAL; -+ -+ if (check_fb_res_bounds(var)) -+ return -EINVAL; -+ -+ if (check_fb_size(ofbi, var)) -+ return -EINVAL; -+ -+ if (var->xres + var->xoffset > var->xres_virtual) -+ var->xoffset = var->xres_virtual - var->xres; -+ if (var->yres + var->yoffset > var->yres_virtual) -+ var->yoffset = var->yres_virtual - var->yres; -+ -+ DBG("xres = %d, yres = %d, vxres = %d, vyres = %d\n", -+ var->xres, var->yres, -+ var->xres_virtual, var->yres_virtual); -+ -+ var->height = -1; -+ var->width = -1; -+ var->grayscale = 0; -+ -+ if (display && display->get_timings) { -+ struct omap_video_timings timings; -+ display->get_timings(display, &timings); -+ -+ /* pixclock in ps, the rest in pixclock */ -+ var->pixclock = timings.pixel_clock != 0 ? -+ KHZ2PICOS(timings.pixel_clock) : -+ 0; -+ var->left_margin = timings.hfp; -+ var->right_margin = timings.hbp; -+ var->upper_margin = timings.vfp; -+ var->lower_margin = timings.vbp; -+ var->hsync_len = timings.hsw; -+ var->vsync_len = timings.vsw; -+ } else { -+ var->pixclock = 0; -+ var->left_margin = 0; -+ var->right_margin = 0; -+ var->upper_margin = 0; -+ var->lower_margin = 0; -+ var->hsync_len = 0; -+ var->vsync_len = 0; -+ } -+ -+ /* TODO: get these from panel->config */ -+ var->vmode = FB_VMODE_NONINTERLACED; -+ var->sync = 0; -+ -+ return 0; -+} -+ -+/* -+ * --------------------------------------------------------------------------- -+ * fbdev framework callbacks -+ * --------------------------------------------------------------------------- -+ */ -+static int omapfb_open(struct fb_info *fbi, int user) -+{ -+ return 0; -+} -+ -+static int omapfb_release(struct fb_info *fbi, int user) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ -+ DBG("Closing fb with plane index %d\n", ofbi->id); -+ -+ omapfb_lock(fbdev); -+ -+ omap_dss_lock(); -+#if 1 -+ if (display && display->get_update_mode && display->update) { -+ /* XXX this update should be removed, I think. But it's -+ * good for debugging */ -+ if (display->get_update_mode(display) == -+ OMAP_DSS_UPDATE_MANUAL) { -+ u16 w, h; -+ -+ if (display->sync) -+ display->sync(display); -+ -+ display->get_resolution(display, &w, &h); -+ display->update(display, 0, 0, w, h); -+ } -+ } -+#endif -+ -+ if (display && display->sync) -+ display->sync(display); -+ -+ omap_dss_unlock(); -+ -+ omapfb_unlock(fbdev); -+ -+ return 0; -+} -+ -+/* setup overlay according to the fb */ -+static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, -+ u16 posx, u16 posy, u16 outw, u16 outh) -+{ -+ int r = 0; -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct fb_var_screeninfo *var = &fbi->var; -+ struct fb_fix_screeninfo *fix = &fbi->fix; -+ int mode; -+ u32 data_start_p; -+ void __iomem *data_start_v; -+ struct omap_overlay_info info; -+ int xres, yres; -+ int screen_width; -+ int mirror; -+ int rotation = var->rotate; -+ int i; -+ -+ for (i = 0; i < ofbi->num_overlays; i++) { -+ if (ovl != ofbi->overlays[i]) -+ continue; -+ -+ rotation = (rotation + ofbi->rotation[i]) % 4; -+ break; -+ } -+ -+ DBG("setup_overlay %d, posx %d, posy %d, outw %d, outh %d\n", ofbi->id, -+ posx, posy, outw, outh); -+ -+ if (rotation == FB_ROTATE_CW || rotation == FB_ROTATE_CCW) { -+ xres = var->yres; -+ yres = var->xres; -+ } else { -+ xres = var->xres; -+ yres = var->yres; -+ } -+ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+ data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation, var); -+ data_start_v = NULL; -+ } else { -+ unsigned int offset = var->yoffset * fix->line_length + -+ ((var->xoffset * var->bits_per_pixel) >> 3); -+ -+ data_start_p = omapfb_get_region_paddr(ofbi) + offset; -+ data_start_v = omapfb_get_region_vaddr(ofbi) + offset; -+ } -+ -+ mode = fb_mode_to_dss_mode(var); -+ -+ if (mode == -EINVAL) { -+ DBG("fb_mode_to_dss_mode failed"); -+ r = -EINVAL; -+ goto err; -+ } -+ -+ switch (var->nonstd) { -+ case OMAPFB_COLOR_YUV422: -+ case OMAPFB_COLOR_YUY422: -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+ screen_width = fix->line_length -+ / (var->bits_per_pixel >> 2); -+ break; -+ } -+ default: -+ screen_width = fix->line_length / (var->bits_per_pixel >> 3); -+ break; -+ } -+ -+ ovl->get_overlay_info(ovl, &info); -+ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) -+ mirror = 0; -+ else -+ mirror = ofbi->mirror; -+ -+ info.paddr = data_start_p; -+ info.vaddr = data_start_v; -+ info.screen_width = screen_width; -+ info.width = xres; -+ info.height = yres; -+ info.color_mode = mode; -+ info.rotation_type = ofbi->rotation_type; -+ info.rotation = rotation; -+ info.mirror = mirror; -+ -+ info.pos_x = posx; -+ info.pos_y = posy; -+ info.out_width = outw; -+ info.out_height = outh; -+ -+ r = ovl->set_overlay_info(ovl, &info); -+ if (r) { -+ DBG("ovl->setup_overlay_info failed\n"); -+ goto err; -+ } -+ -+ return 0; -+ -+err: -+ DBG("setup_overlay failed\n"); -+ return r; -+} -+ -+/* apply var to the overlay */ -+int omapfb_apply_changes(struct fb_info *fbi, int init) -+{ -+ int r = 0; -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct fb_var_screeninfo *var = &fbi->var; -+ struct omap_overlay *ovl; -+ struct omap_overlay_manager *mgr = NULL; -+ u16 posx, posy; -+ u16 outw, outh; -+ int i; -+ -+#ifdef DEBUG -+ if (omapfb_test_pattern) -+ fill_fb(fbi); -+#endif -+ -+ for (i = 0; i < ofbi->num_overlays; i++) { -+ ovl = ofbi->overlays[i]; -+ -+ DBG("apply_changes, fb %d, ovl %d\n", ofbi->id, ovl->id); -+ -+ if (ofbi->region.size == 0) { -+ /* the fb is not available. disable the overlay */ -+ omapfb_overlay_enable(ovl, 0); -+ if (!init && ovl->manager) { -+ omap_dss_lock(); -+ ovl->manager->apply(ovl->manager); -+ omap_dss_unlock(); -+ } -+ continue; -+ } -+ -+ if (init || (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { -+ int rotation = (var->rotate + ofbi->rotation[i]) % 4; -+ if (rotation == FB_ROTATE_CW || -+ rotation == FB_ROTATE_CCW) { -+ outw = var->yres; -+ outh = var->xres; -+ } else { -+ outw = var->xres; -+ outh = var->yres; -+ } -+ } else { -+ outw = ovl->info.out_width; -+ outh = ovl->info.out_height; -+ } -+ -+ if (init) { -+ posx = 0; -+ posy = 0; -+ } else { -+ posx = ovl->info.pos_x; -+ posy = ovl->info.pos_y; -+ } -+ -+ r = omapfb_setup_overlay(fbi, ovl, posx, posy, outw, outh); -+ if (r) -+ goto err; -+ -+ /* -+ * Do not call mgr->apply() for all managers here. -+ * mgr->apply() will itself go over all the managers so it's -+ * enough to call it just once after all overlays are setup -+ * properly. -+ */ -+ if (!mgr) -+ mgr = ovl->manager; -+ } -+ -+ if (!init && mgr) { -+ omap_dss_lock(); -+ mgr->apply(mgr); -+ omap_dss_unlock(); -+ } -+ -+ return 0; -+err: -+ DBG("apply_changes failed\n"); -+ return r; -+} -+ -+/* checks var and eventually tweaks it to something supported, -+ * DO NOT MODIFY PAR */ -+static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) -+{ -+ int r; -+ -+ DBG("check_var(%d)\n", FB2OFB(fbi)->id); -+ -+ r = check_fb_var(fbi, var); -+ -+ return r; -+} -+ -+/* set the video mode according to info->var */ -+static int omapfb_set_par(struct fb_info *fbi) -+{ -+ int r; -+ -+ DBG("set_par(%d)\n", FB2OFB(fbi)->id); -+ -+ set_fb_fix(fbi); -+ -+ r = check_rotation(fbi); -+ if (r) -+ return r; -+ -+ r = omapfb_apply_changes(fbi, 0); -+ -+ return r; -+} -+ -+static int omapfb_pan_display(struct fb_var_screeninfo *var, -+ struct fb_info *fbi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct fb_var_screeninfo new_var; -+ int r; -+ -+ DBG("pan_display(%d)\n", ofbi->id); -+ -+ omapfb_lock(fbdev); -+ -+ if (var->xoffset == fbi->var.xoffset && -+ var->yoffset == fbi->var.yoffset) { -+ omapfb_unlock(fbdev); -+ return 0; -+ } -+ -+ new_var = fbi->var; -+ new_var.xoffset = var->xoffset; -+ new_var.yoffset = var->yoffset; -+ -+ fbi->var = new_var; -+ -+ r = omapfb_apply_changes(fbi, 0); -+ -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static void mmap_user_open(struct vm_area_struct *vma) -+{ -+ struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data; -+ -+ atomic_inc(&ofbi->map_count); -+} -+ -+static void mmap_user_close(struct vm_area_struct *vma) -+{ -+ struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data; -+ -+ atomic_dec(&ofbi->map_count); -+} -+ -+static struct vm_operations_struct mmap_user_ops = { -+ .open = mmap_user_open, -+ .close = mmap_user_close, -+}; -+ -+static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct fb_fix_screeninfo *fix = &fbi->fix; -+ unsigned long off; -+ unsigned long start; -+ u32 len; -+ -+ if (vma->vm_end - vma->vm_start == 0) -+ return 0; -+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) -+ return -EINVAL; -+ off = vma->vm_pgoff << PAGE_SHIFT; -+ -+ start = omapfb_get_region_paddr(ofbi); -+ len = fix->smem_len; -+ if (off >= len) -+ return -EINVAL; -+ if ((vma->vm_end - vma->vm_start + off) > len) -+ return -EINVAL; -+ -+ off += start; -+ -+ DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off); -+ -+ vma->vm_pgoff = off >> PAGE_SHIFT; -+ vma->vm_flags |= VM_IO | VM_RESERVED; -+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); -+ vma->vm_ops = &mmap_user_ops; -+ vma->vm_private_data = ofbi; -+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, -+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) -+ return -EAGAIN; -+ /* vm_ops.open won't be called for mmap itself. */ -+ atomic_inc(&ofbi->map_count); -+ return 0; -+} -+ -+/* Store a single color palette entry into a pseudo palette or the hardware -+ * palette if one is available. For now we support only 16bpp and thus store -+ * the entry only to the pseudo palette. -+ */ -+static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green, -+ u_int blue, u_int transp, int update_hw_pal) -+{ -+ /*struct omapfb_info *ofbi = FB2OFB(fbi);*/ -+ /*struct omapfb2_device *fbdev = ofbi->fbdev;*/ -+ struct fb_var_screeninfo *var = &fbi->var; -+ int r = 0; -+ -+ enum omapfb_color_format mode = OMAPFB_COLOR_RGB24U; /* XXX */ -+ -+ /*switch (plane->color_mode) {*/ -+ switch (mode) { -+ case OMAPFB_COLOR_YUV422: -+ case OMAPFB_COLOR_YUV420: -+ case OMAPFB_COLOR_YUY422: -+ r = -EINVAL; -+ break; -+ case OMAPFB_COLOR_CLUT_8BPP: -+ case OMAPFB_COLOR_CLUT_4BPP: -+ case OMAPFB_COLOR_CLUT_2BPP: -+ case OMAPFB_COLOR_CLUT_1BPP: -+ /* -+ if (fbdev->ctrl->setcolreg) -+ r = fbdev->ctrl->setcolreg(regno, red, green, blue, -+ transp, update_hw_pal); -+ */ -+ /* Fallthrough */ -+ r = -EINVAL; -+ break; -+ case OMAPFB_COLOR_RGB565: -+ case OMAPFB_COLOR_RGB444: -+ case OMAPFB_COLOR_RGB24P: -+ case OMAPFB_COLOR_RGB24U: -+ if (r != 0) -+ break; -+ -+ if (regno < 0) { -+ r = -EINVAL; -+ break; -+ } -+ -+ if (regno < 16) { -+ u16 pal; -+ pal = ((red >> (16 - var->red.length)) << -+ var->red.offset) | -+ ((green >> (16 - var->green.length)) << -+ var->green.offset) | -+ (blue >> (16 - var->blue.length)); -+ ((u32 *)(fbi->pseudo_palette))[regno] = pal; -+ } -+ break; -+ default: -+ BUG(); -+ } -+ return r; -+} -+ -+static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, -+ u_int transp, struct fb_info *info) -+{ -+ DBG("setcolreg\n"); -+ -+ return _setcolreg(info, regno, red, green, blue, transp, 1); -+} -+ -+static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info) -+{ -+ int count, index, r; -+ u16 *red, *green, *blue, *transp; -+ u16 trans = 0xffff; -+ -+ DBG("setcmap\n"); -+ -+ red = cmap->red; -+ green = cmap->green; -+ blue = cmap->blue; -+ transp = cmap->transp; -+ index = cmap->start; -+ -+ for (count = 0; count < cmap->len; count++) { -+ if (transp) -+ trans = *transp++; -+ r = _setcolreg(info, index++, *red++, *green++, *blue++, trans, -+ count == cmap->len - 1); -+ if (r != 0) -+ return r; -+ } -+ -+ return 0; -+} -+ -+static int omapfb_blank(int blank, struct fb_info *fbi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ int do_update = 0; -+ int r = 0; -+ -+ omapfb_lock(fbdev); -+ -+ omap_dss_lock(); -+ -+ switch (blank) { -+ case FB_BLANK_UNBLANK: -+ if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) -+ goto exit; -+ -+ omap_dss_maximize_min_bus_tput(); -+ -+ if (display->resume) -+ r = display->resume(display); -+ -+ if (r == 0 && display->get_update_mode && -+ display->get_update_mode(display) == -+ OMAP_DSS_UPDATE_MANUAL) -+ do_update = 1; -+ -+ omap_dss_update_min_bus_tput(); -+ break; -+ -+ case FB_BLANK_NORMAL: -+ /* FB_BLANK_NORMAL could be implemented. -+ * Needs DSS additions. */ -+ case FB_BLANK_VSYNC_SUSPEND: -+ case FB_BLANK_HSYNC_SUSPEND: -+ case FB_BLANK_POWERDOWN: -+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE) -+ goto exit; -+ -+ if (display->suspend) -+ r = display->suspend(display); -+ -+ omap_dss_update_min_bus_tput(); -+ break; -+ -+ default: -+ r = -EINVAL; -+ } -+ -+exit: -+ if (r == 0 && do_update && display->update) { -+ u16 w, h; -+ display->get_resolution(display, &w, &h); -+ -+ r = display->update(display, 0, 0, w, h); -+ } -+ -+ omap_dss_unlock(); -+ -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+#if 0 -+/* XXX fb_read and fb_write are needed for VRFB */ -+ssize_t omapfb_write(struct fb_info *info, const char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ DBG("omapfb_write %d, %lu\n", count, (unsigned long)*ppos); -+ // XXX needed for VRFB -+ return count; -+} -+#endif -+ -+static struct fb_ops omapfb_ops = { -+ .owner = THIS_MODULE, -+ .fb_open = omapfb_open, -+ .fb_release = omapfb_release, -+ .fb_fillrect = cfb_fillrect, -+ .fb_copyarea = cfb_copyarea, -+ .fb_imageblit = cfb_imageblit, -+ .fb_blank = omapfb_blank, -+ .fb_ioctl = omapfb_ioctl, -+ .fb_check_var = omapfb_check_var, -+ .fb_set_par = omapfb_set_par, -+ .fb_pan_display = omapfb_pan_display, -+ .fb_mmap = omapfb_mmap, -+ .fb_setcolreg = omapfb_setcolreg, -+ .fb_setcmap = omapfb_setcmap, -+ //.fb_write = omapfb_write, -+}; -+ -+static void omapfb_free_fbmem(struct fb_info *fbi) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omapfb2_mem_region *rg; -+ -+ rg = &ofbi->region; -+ -+ if (rg->paddr) -+ if (omap_vram_free(rg->paddr, rg->size)) -+ dev_err(fbdev->dev, "VRAM FREE failed\n"); -+ -+ if (rg->vaddr) -+ iounmap(rg->vaddr); -+ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+ /* unmap the 0 angle rotation */ -+ if (rg->vrfb.vaddr[0]) { -+ iounmap(rg->vrfb.vaddr[0]); -+ rg->vrfb.vaddr[0] = NULL; -+ omap_vrfb_release_ctx(&rg->vrfb); -+ } -+ } -+ -+ rg->vaddr = NULL; -+ rg->paddr = 0; -+ rg->alloc = 0; -+ rg->size = 0; -+} -+ -+static void clear_fb_info(struct fb_info *fbi) -+{ -+ memset(&fbi->var, 0, sizeof(fbi->var)); -+ memset(&fbi->fix, 0, sizeof(fbi->fix)); -+ strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id)); -+} -+ -+static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev) -+{ -+ int i; -+ -+ DBG("free all fbmem\n"); -+ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ struct fb_info *fbi = fbdev->fbs[i]; -+ omapfb_free_fbmem(fbi); -+ clear_fb_info(fbi); -+ } -+ -+ return 0; -+} -+ -+static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, -+ unsigned long paddr) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omapfb2_mem_region *rg; -+ void __iomem *vaddr; -+ int r; -+ -+ rg = &ofbi->region; -+ memset(rg, 0, sizeof(*rg)); -+ -+ size = PAGE_ALIGN(size); -+ -+ if (!paddr) { -+ DBG("allocating %lu bytes for fb %d\n", size, ofbi->id); -+ r = omap_vram_alloc(OMAPFB_MEMTYPE_SDRAM, size, &paddr); -+ } else { -+ DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr, -+ ofbi->id); -+ r = omap_vram_reserve(paddr, size); -+ } -+ -+ if (r) { -+ dev_err(fbdev->dev, "failed to allocate framebuffer\n"); -+ return -ENOMEM; -+ } -+ -+ if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) { -+ vaddr = ioremap_wc(paddr, size); -+ -+ if (!vaddr) { -+ dev_err(fbdev->dev, "failed to ioremap framebuffer\n"); -+ omap_vram_free(paddr, size); -+ return -ENOMEM; -+ } -+ -+ DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr); -+ } else { -+ r = omap_vrfb_request_ctx(&rg->vrfb); -+ if (r) { -+ dev_err(fbdev->dev, "vrfb create ctx failed\n"); -+ return r; -+ } -+ -+ vaddr = NULL; -+ } -+ -+ rg->paddr = paddr; -+ rg->vaddr = vaddr; -+ rg->size = size; -+ rg->alloc = 1; -+ -+ return 0; -+} -+ -+/* allocate fbmem using display resolution as reference */ -+static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size, -+ unsigned long paddr) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omap_display *display; -+ int bytespp; -+ -+ display = fb2display(fbi); -+ -+ if (!display) -+ return 0; -+ -+ switch (display->get_recommended_bpp(display)) { -+ case 16: -+ bytespp = 2; -+ break; -+ case 24: -+ bytespp = 4; -+ break; -+ default: -+ bytespp = 4; -+ break; -+ } -+ -+ if (!size) { -+ u16 w, h; -+ -+ display->get_resolution(display, &w, &h); -+ -+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { -+#ifdef DEBUG -+ int oldw = w, oldh = h; -+#endif -+ -+ omap_vrfb_adjust_size(&w, &h, bytespp); -+ -+ /* Because we change the resolution of the 0 degree view, -+ * we need to alloc max(w, h) for height */ -+ h = max(w, h); -+ w = OMAP_VRFB_LINE_LEN; -+ -+ DBG("adjusting fb mem size for VRFB, %dx%d -> %dx%d\n", -+ oldw, oldh, w, h); -+ } -+ -+ size = w * h * bytespp; -+ } -+ -+ return omapfb_alloc_fbmem(fbi, size, paddr); -+} -+ -+static enum omap_color_mode fb_format_to_dss_mode(enum omapfb_color_format format) -+{ -+ enum omap_color_mode mode; -+ -+ switch (format) { -+ case OMAPFB_COLOR_RGB565: -+ mode = OMAP_DSS_COLOR_RGB16; -+ break; -+ case OMAPFB_COLOR_YUV422: -+ mode = OMAP_DSS_COLOR_YUV2; -+ break; -+ case OMAPFB_COLOR_CLUT_8BPP: -+ mode = OMAP_DSS_COLOR_CLUT8; -+ break; -+ case OMAPFB_COLOR_CLUT_4BPP: -+ mode = OMAP_DSS_COLOR_CLUT4; -+ break; -+ case OMAPFB_COLOR_CLUT_2BPP: -+ mode = OMAP_DSS_COLOR_CLUT2; -+ break; -+ case OMAPFB_COLOR_CLUT_1BPP: -+ mode = OMAP_DSS_COLOR_CLUT1; -+ break; -+ case OMAPFB_COLOR_RGB444: -+ mode = OMAP_DSS_COLOR_RGB12U; -+ break; -+ case OMAPFB_COLOR_YUY422: -+ mode = OMAP_DSS_COLOR_UYVY; -+ break; -+ case OMAPFB_COLOR_ARGB16: -+ mode = OMAP_DSS_COLOR_ARGB16; -+ break; -+ case OMAPFB_COLOR_RGB24U: -+ mode = OMAP_DSS_COLOR_RGB24U; -+ break; -+ case OMAPFB_COLOR_RGB24P: -+ mode = OMAP_DSS_COLOR_RGB24P; -+ break; -+ case OMAPFB_COLOR_ARGB32: -+ mode = OMAP_DSS_COLOR_ARGB32; -+ break; -+ case OMAPFB_COLOR_RGBA32: -+ mode = OMAP_DSS_COLOR_RGBA32; -+ break; -+ case OMAPFB_COLOR_RGBX32: -+ mode = OMAP_DSS_COLOR_RGBX32; -+ break; -+ default: -+ mode = -EINVAL; -+ } -+ -+ return mode; -+} -+ -+static int omapfb_parse_vram_param(const char *param, int max_entries, -+ unsigned long *sizes, unsigned long *paddrs) -+{ -+ int fbnum; -+ unsigned long size; -+ unsigned long paddr = 0; -+ char *p, *start; -+ -+ start = (char *)param; -+ -+ while (1) { -+ p = start; -+ -+ fbnum = simple_strtoul(p, &p, 10); -+ -+ if (p == param) -+ return -EINVAL; -+ -+ if (*p != ':') -+ return -EINVAL; -+ -+ if (fbnum >= max_entries) -+ return -EINVAL; -+ -+ size = memparse(p + 1, &p); -+ -+ if (!size) -+ return -EINVAL; -+ -+ paddr = 0; -+ -+ if (*p == '@') { -+ paddr = simple_strtoul(p + 1, &p, 16); -+ -+ if (!paddr) -+ return -EINVAL; -+ -+ } -+ -+ paddrs[fbnum] = paddr; -+ sizes[fbnum] = size; -+ -+ if (*p == 0) -+ break; -+ -+ if (*p != ',') -+ return -EINVAL; -+ -+ ++p; -+ -+ start = p; -+ } -+ -+ return 0; -+} -+ -+static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev) -+{ -+ int i, r; -+ unsigned long vram_sizes[10]; -+ unsigned long vram_paddrs[10]; -+ -+ memset(&vram_sizes, 0, sizeof(vram_sizes)); -+ memset(&vram_paddrs, 0, sizeof(vram_paddrs)); -+ -+ if (def_vram && omapfb_parse_vram_param(def_vram, 10, -+ vram_sizes, vram_paddrs)) { -+ dev_err(fbdev->dev, "failed to parse vram parameter\n"); -+ -+ memset(&vram_sizes, 0, sizeof(vram_sizes)); -+ memset(&vram_paddrs, 0, sizeof(vram_paddrs)); -+ } -+ -+ if (fbdev->dev->platform_data) { -+ struct omapfb_platform_data *opd; -+ opd = fbdev->dev->platform_data; -+ for (i = 0; i < opd->mem_desc.region_cnt; ++i) { -+ if (!vram_sizes[i]) { -+ unsigned long size; -+ unsigned long paddr; -+ -+ size = opd->mem_desc.region[i].size; -+ paddr = opd->mem_desc.region[i].paddr; -+ -+ vram_sizes[i] = size; -+ vram_paddrs[i] = paddr; -+ } -+ } -+ } -+ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ /* allocate memory automatically only for fb0, or if -+ * excplicitly defined with vram or plat data option */ -+ if (i == 0 || vram_sizes[i] != 0) { -+ r = omapfb_alloc_fbmem_display(fbdev->fbs[i], -+ vram_sizes[i], vram_paddrs[i]); -+ -+ if (r) -+ return r; -+ } -+ } -+ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); -+ struct omapfb2_mem_region *rg; -+ rg = &ofbi->region; -+ -+ DBG("region%d phys %08x virt %p size=%lu\n", -+ i, -+ rg->paddr, -+ rg->vaddr, -+ rg->size); -+ } -+ -+ return 0; -+} -+ -+int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type) -+{ -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_display *display = fb2display(fbi); -+ struct omapfb2_mem_region *rg = &ofbi->region; -+ unsigned long old_size = rg->size; -+ unsigned long old_paddr = rg->paddr; -+ int old_type = rg->type; -+ int r; -+ -+ if (type > OMAPFB_MEMTYPE_MAX) -+ return -EINVAL; -+ -+ size = PAGE_ALIGN(size); -+ -+ if (old_size == size && old_type == type) -+ return 0; -+ -+ if (display && display->sync) { -+ omap_dss_lock(); -+ display->sync(display); -+ omap_dss_unlock(); -+ } -+ -+ omapfb_free_fbmem(fbi); -+ -+ if (size == 0) { -+ clear_fb_info(fbi); -+ return 0; -+ } -+ -+ r = omapfb_alloc_fbmem(fbi, size, 0); -+ -+ if (r) { -+ if (old_size) -+ omapfb_alloc_fbmem(fbi, old_size, old_paddr); -+ -+ if (rg->size == 0) -+ clear_fb_info(fbi); -+ -+ return r; -+ } -+ -+ if (old_size == size) -+ return 0; -+ -+ if (old_size == 0) { -+ DBG("initializing fb %d\n", ofbi->id); -+ r = omapfb_fb_init(fbdev, fbi); -+ if (r) { -+ DBG("omapfb_fb_init failed\n"); -+ goto err; -+ } -+ r = omapfb_apply_changes(fbi, 1); -+ if (r) { -+ DBG("omapfb_apply_changes failed\n"); -+ goto err; -+ } -+ } else { -+ struct fb_var_screeninfo new_var; -+ memcpy(&new_var, &fbi->var, sizeof(new_var)); -+ r = check_fb_var(fbi, &new_var); -+ if (r) -+ goto err; -+ memcpy(&fbi->var, &new_var, sizeof(fbi->var)); -+ set_fb_fix(fbi); -+ r = check_rotation(fbi); -+ if (r) -+ goto err; -+ } -+ -+ return 0; -+err: -+ omapfb_free_fbmem(fbi); -+ clear_fb_info(fbi); -+ return r; -+} -+ -+/* initialize fb_info, var, fix to something sane based on the display */ -+int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) -+{ -+ struct fb_var_screeninfo *var = &fbi->var; -+ struct omap_display *display = fb2display(fbi); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ int r = 0; -+ -+ fbi->fbops = &omapfb_ops; -+ fbi->flags = FBINFO_FLAG_DEFAULT; -+ fbi->pseudo_palette = fbdev->pseudo_palette; -+ -+ if (ofbi->region.size == 0) { -+ clear_fb_info(fbi); -+ return 0; -+ } -+ -+ var->nonstd = 0; -+ var->bits_per_pixel = 0; -+ -+ var->rotate = def_rotate; -+ -+ /* -+ * Check if there is a default color format set in the board file, -+ * and use this format instead the default deducted from the -+ * display bpp. -+ */ -+ if (fbdev->dev->platform_data) { -+ struct omapfb_platform_data *opd; -+ int id = ofbi->id; -+ -+ opd = fbdev->dev->platform_data; -+ if (opd->mem_desc.region[id].format_used) { -+ enum omap_color_mode mode; -+ enum omapfb_color_format format; -+ -+ format = opd->mem_desc.region[id].format; -+ mode = fb_format_to_dss_mode(format); -+ if (mode < 0) { -+ r = mode; -+ goto err; -+ } -+ r = dss_mode_to_fb_mode(mode, var); -+ if (r < 0) -+ goto err; -+ } -+ } -+ -+ if (display) { -+ u16 w, h; -+ int rotation = (var->rotate + ofbi->rotation[0]) % 4; -+ -+ display->get_resolution(display, &w, &h); -+ -+ if (rotation == FB_ROTATE_CW || -+ rotation == FB_ROTATE_CCW) { -+ var->xres = h; -+ var->yres = w; -+ } else { -+ var->xres = w; -+ var->yres = h; -+ } -+ -+ var->xres_virtual = var->xres; -+ var->yres_virtual = var->yres; -+ -+ if (!var->bits_per_pixel) { -+ switch (display->get_recommended_bpp(display)) { -+ case 16: -+ var->bits_per_pixel = 16; -+ break; -+ case 24: -+ var->bits_per_pixel = 32; -+ break; -+ default: -+ dev_err(fbdev->dev, "illegal display bpp\n"); -+ return -EINVAL; -+ } -+ } -+ } else { -+ /* if there's no display, let's just guess some basic values */ -+ var->xres = 320; -+ var->yres = 240; -+ var->xres_virtual = var->xres; -+ var->yres_virtual = var->yres; -+ if (!var->bits_per_pixel) -+ var->bits_per_pixel = 16; -+ } -+ -+ r = check_fb_var(fbi, var); -+ if (r) -+ goto err; -+ -+ set_fb_fix(fbi); -+ r = check_rotation(fbi); -+ if (r) -+ goto err; -+ -+ r = fb_alloc_cmap(&fbi->cmap, 256, 0); -+ if (r) -+ dev_err(fbdev->dev, "unable to allocate color map memory\n"); -+ -+err: -+ return r; -+} -+ -+static void fbinfo_cleanup(struct omapfb2_device *fbdev, struct fb_info *fbi) -+{ -+ fb_dealloc_cmap(&fbi->cmap); -+} -+ -+ -+static void omapfb_free_resources(struct omapfb2_device *fbdev) -+{ -+ int i; -+ -+ DBG("free_resources\n"); -+ -+ if (fbdev == NULL) -+ return; -+ -+ for (i = 0; i < fbdev->num_fbs; i++) -+ unregister_framebuffer(fbdev->fbs[i]); -+ -+ /* free the reserved fbmem */ -+ omapfb_free_all_fbmem(fbdev); -+ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ fbinfo_cleanup(fbdev, fbdev->fbs[i]); -+ framebuffer_release(fbdev->fbs[i]); -+ } -+ -+ for (i = 0; i < fbdev->num_displays; i++) { -+ if (!fbdev->displays[i]) -+ continue; -+ -+ if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED) -+ fbdev->displays[i]->disable(fbdev->displays[i]); -+ -+ omap_dss_put_display(fbdev->displays[i]); -+ } -+ -+ dev_set_drvdata(fbdev->dev, NULL); -+ kfree(fbdev); -+} -+ -+static int omapfb_create_framebuffers(struct omapfb2_device *fbdev) -+{ -+ int r, i; -+ -+ fbdev->num_fbs = 0; -+ -+ DBG("create %d framebuffers\n", CONFIG_FB_OMAP2_NUM_FBS); -+ -+ /* allocate fb_infos */ -+ for (i = 0; i < CONFIG_FB_OMAP2_NUM_FBS; i++) { -+ struct fb_info *fbi; -+ struct omapfb_info *ofbi; -+ -+ fbi = framebuffer_alloc(sizeof(struct omapfb_info), -+ fbdev->dev); -+ -+ if (fbi == NULL) { -+ dev_err(fbdev->dev, -+ "unable to allocate memory for plane info\n"); -+ return -ENOMEM; -+ } -+ -+ clear_fb_info(fbi); -+ -+ fbdev->fbs[i] = fbi; -+ -+ ofbi = FB2OFB(fbi); -+ ofbi->fbdev = fbdev; -+ ofbi->id = i; -+ -+ /* assign these early, so that fb alloc can use them */ -+ ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB : -+ OMAP_DSS_ROT_DMA; -+ ofbi->mirror = def_mirror; -+ -+ fbdev->num_fbs++; -+ } -+ -+ DBG("fb_infos allocated\n"); -+ -+ /* assign overlays for the fbs */ -+ for (i = 0; i < min(fbdev->num_fbs, fbdev->num_overlays); i++) { -+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); -+ -+ ofbi->overlays[0] = fbdev->overlays[i]; -+ ofbi->num_overlays = 1; -+ } -+ -+ /* allocate fb memories */ -+ r = omapfb_allocate_all_fbs(fbdev); -+ if (r) { -+ dev_err(fbdev->dev, "failed to allocate fbmem\n"); -+ return r; -+ } -+ -+ DBG("fbmems allocated\n"); -+ -+ /* setup fb_infos */ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ r = omapfb_fb_init(fbdev, fbdev->fbs[i]); -+ if (r) { -+ dev_err(fbdev->dev, "failed to setup fb_info\n"); -+ return r; -+ } -+ } -+ -+ DBG("fb_infos initialized\n"); -+ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ r = register_framebuffer(fbdev->fbs[i]); -+ if (r != 0) { -+ dev_err(fbdev->dev, -+ "registering framebuffer %d failed\n", i); -+ return r; -+ } -+ } -+ -+ DBG("framebuffers registered\n"); -+ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ r = omapfb_apply_changes(fbdev->fbs[i], 1); -+ if (r) { -+ dev_err(fbdev->dev, "failed to change mode\n"); -+ return r; -+ } -+ } -+ -+ DBG("create sysfs for fbs\n"); -+ r = omapfb_create_sysfs(fbdev); -+ if (r) { -+ dev_err(fbdev->dev, "failed to create sysfs entries\n"); -+ return r; -+ } -+ -+ /* Enable fb0 */ -+ if (fbdev->num_fbs > 0) { -+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[0]); -+ -+ if (ofbi->num_overlays > 0 ) { -+ struct omap_overlay *ovl = ofbi->overlays[0]; -+ -+ r = omapfb_overlay_enable(ovl, 1); -+ -+ if (r) { -+ dev_err(fbdev->dev, -+ "failed to enable overlay\n"); -+ return r; -+ } -+ } -+ } -+ -+ DBG("create_framebuffers done\n"); -+ -+ return 0; -+} -+ -+int omapfb_mode_to_timings(const char *mode_str, -+ struct omap_video_timings *timings, u8 *bpp) -+{ -+ struct fb_info fbi; -+ struct fb_var_screeninfo var; -+ struct fb_ops fbops; -+ int r; -+ -+#ifdef CONFIG_OMAP2_DSS_VENC -+ if (strcmp(mode_str, "pal") == 0) { -+ *timings = omap_dss_pal_timings; -+ *bpp = 0; -+ return 0; -+ } else if (strcmp(mode_str, "ntsc") == 0) { -+ *timings = omap_dss_ntsc_timings; -+ *bpp = 0; -+ return 0; -+ } -+#endif -+ -+ /* this is quite a hack, but I wanted to use the modedb and for -+ * that we need fb_info and var, so we create dummy ones */ -+ -+ memset(&fbi, 0, sizeof(fbi)); -+ memset(&var, 0, sizeof(var)); -+ memset(&fbops, 0, sizeof(fbops)); -+ fbi.fbops = &fbops; -+ -+ r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24); -+ -+ if (r != 0) { -+ timings->pixel_clock = PICOS2KHZ(var.pixclock); -+ timings->hfp = var.left_margin; -+ timings->hbp = var.right_margin; -+ timings->vfp = var.upper_margin; -+ timings->vbp = var.lower_margin; -+ timings->hsw = var.hsync_len; -+ timings->vsw = var.vsync_len; -+ timings->x_res = var.xres; -+ timings->y_res = var.yres; -+ -+ switch (var.bits_per_pixel) { -+ case 16: -+ *bpp = 16; -+ break; -+ case 24: -+ case 32: -+ default: -+ *bpp = 24; -+ break; -+ } -+ -+ return 0; -+ } else { -+ return -EINVAL; -+ } -+} -+ -+static int omapfb_set_def_mode(struct omap_display *display, char *mode_str) -+{ -+ int r; -+ u8 bpp; -+ struct omap_video_timings timings; -+ -+ r = omapfb_mode_to_timings(mode_str, &timings, &bpp); -+ if (r) -+ return r; -+ -+ display->panel->recommended_bpp = bpp; -+ -+ if (!display->check_timings || !display->set_timings) -+ return -EINVAL; -+ -+ r = display->check_timings(display, &timings); -+ if (r) -+ return r; -+ -+ omap_dss_lock(); -+ display->set_timings(display, &timings); -+ omap_dss_unlock(); -+ -+ return 0; -+} -+ -+static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) -+{ -+ char *str, *options, *this_opt; -+ int r = 0; -+ -+ str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL); -+ strcpy(str, def_mode); -+ options = str; -+ -+ while (!r && (this_opt = strsep(&options, ",")) != NULL) { -+ char *p, *display_str, *mode_str; -+ struct omap_display *display; -+ int i; -+ -+ p = strchr(this_opt, ':'); -+ if (!p) { -+ r = -EINVAL; -+ break; -+ } -+ -+ *p = 0; -+ display_str = this_opt; -+ mode_str = p + 1; -+ -+ display = NULL; -+ for (i = 0; i < fbdev->num_displays; ++i) { -+ if (strcmp(fbdev->displays[i]->name, -+ display_str) == 0) { -+ display = fbdev->displays[i]; -+ break; -+ } -+ } -+ -+ if (!display) { -+ r = -EINVAL; -+ break; -+ } -+ -+ r = omapfb_set_def_mode(display, mode_str); -+ if (r) -+ break; -+ } -+ -+ kfree(str); -+ -+ return r; -+} -+ -+static int omapfb_probe(struct platform_device *pdev) -+{ -+ struct omapfb2_device *fbdev = NULL; -+ int r = 0; -+ int i, t; -+ struct omap_overlay *ovl; -+ struct omap_display *def_display; -+ -+ DBG("omapfb_probe\n"); -+ -+ if (pdev->num_resources != 0) { -+ dev_err(&pdev->dev, "probed for an unknown device\n"); -+ r = -ENODEV; -+ goto err0; -+ } -+ -+ fbdev = kzalloc(sizeof(struct omapfb2_device), GFP_KERNEL); -+ if (fbdev == NULL) { -+ r = -ENOMEM; -+ goto err0; -+ } -+ -+ mutex_init(&fbdev->mtx); -+ -+ fbdev->dev = &pdev->dev; -+ platform_set_drvdata(pdev, fbdev); -+ -+ fbdev->num_displays = 0; -+ t = omap_dss_get_num_displays(); -+ for (i = 0; i < t; i++) { -+ struct omap_display *display; -+ display = omap_dss_get_display(i); -+ if (!display) -+ dev_warn(&pdev->dev, "can't get display %d\n", i); -+ -+ fbdev->displays[fbdev->num_displays++] = display; -+ } -+ -+ if (fbdev->num_displays == 0) { -+ dev_err(&pdev->dev, "no displays\n"); -+ r = -EINVAL; -+ goto cleanup; -+ } -+ -+ fbdev->num_overlays = omap_dss_get_num_overlays(); -+ for (i = 0; i < fbdev->num_overlays; i++) -+ fbdev->overlays[i] = omap_dss_get_overlay(i); -+ -+ fbdev->num_managers = omap_dss_get_num_overlay_managers(); -+ for (i = 0; i < fbdev->num_managers; i++) -+ fbdev->managers[i] = omap_dss_get_overlay_manager(i); -+ -+ if (def_mode && strlen(def_mode) > 0) { -+ if (omapfb_parse_def_modes(fbdev)) -+ dev_warn(&pdev->dev, "cannot parse default modes\n"); -+ } -+ -+ r = omapfb_create_framebuffers(fbdev); -+ if (r) -+ goto cleanup; -+ -+ omap_dss_lock(); -+ -+ for (i = 0; i < fbdev->num_managers; i++) { -+ struct omap_overlay_manager *mgr; -+ mgr = fbdev->managers[i]; -+ r = mgr->apply(mgr); -+ if (r) -+ dev_warn(fbdev->dev, "failed to apply dispc config\n"); -+ } -+ -+ DBG("mgr->apply'ed\n"); -+ -+ /* gfx overlay should be the default one. find a display -+ * connected to that, and use it as default display */ -+ ovl = omap_dss_get_overlay(0); -+ if (ovl->manager && ovl->manager->display) { -+ def_display = ovl->manager->display; -+ } else { -+ dev_warn(&pdev->dev, "cannot find default display\n"); -+ def_display = NULL; -+ } -+ -+ if (def_display) { -+ u16 w, h; -+ r = def_display->enable(def_display); -+ if (r) -+ dev_warn(fbdev->dev, "Failed to enable display '%s'\n", -+ def_display->name); -+ -+ /* set the update mode */ -+ if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { -+#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE -+ if (def_display->enable_te) -+ def_display->enable_te(def_display, 1); -+ if (def_display->set_update_mode) -+ def_display->set_update_mode(def_display, -+ OMAP_DSS_UPDATE_AUTO); -+#else /* MANUAL_UPDATE */ -+ if (def_display->enable_te) -+ def_display->enable_te(def_display, 0); -+ if (def_display->set_update_mode) -+ def_display->set_update_mode(def_display, -+ OMAP_DSS_UPDATE_MANUAL); -+ -+ def_display->get_resolution(def_display, &w, &h); -+ def_display->update(def_display, 0, 0, w, h); -+#endif -+ } else { -+ if (def_display->set_update_mode) -+ def_display->set_update_mode(def_display, -+ OMAP_DSS_UPDATE_AUTO); -+ } -+ } -+ -+ omap_dss_unlock(); -+ -+ return 0; -+ -+cleanup: -+ omapfb_free_resources(fbdev); -+err0: -+ dev_err(&pdev->dev, "failed to setup omapfb\n"); -+ return r; -+} -+ -+static int omapfb_remove(struct platform_device *pdev) -+{ -+ struct omapfb2_device *fbdev = platform_get_drvdata(pdev); -+ -+ /* FIXME: wait till completion of pending events */ -+ -+ omapfb_remove_sysfs(fbdev); -+ -+ omapfb_free_resources(fbdev); -+ -+ return 0; -+} -+ -+static struct platform_driver omapfb_driver = { -+ .probe = omapfb_probe, -+ .remove = omapfb_remove, -+ .driver = { -+ .name = "omapfb", -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init omapfb_init(void) -+{ -+ DBG("omapfb_init\n"); -+ -+ if (platform_driver_register(&omapfb_driver)) { -+ printk(KERN_ERR "failed to register omapfb driver\n"); -+ return -ENODEV; -+ } -+ -+ return 0; -+} -+ -+static void __exit omapfb_exit(void) -+{ -+ DBG("omapfb_exit\n"); -+ platform_driver_unregister(&omapfb_driver); -+} -+ -+module_param_named(mode, def_mode, charp, 0); -+module_param_named(vram, def_vram, charp, 0); -+module_param_named(rotate, def_rotate, int, 0); -+module_param_named(vrfb, def_vrfb, bool, 0); -+module_param_named(mirror, def_mirror, bool, 0); -+ -+/* late_initcall to let panel/ctrl drivers loaded first. -+ * I guess better option would be a more dynamic approach, -+ * so that omapfb reacts to new panels when they are loaded */ -+late_initcall(omapfb_init); -+/*module_init(omapfb_init);*/ -+module_exit(omapfb_exit); -+ -+MODULE_AUTHOR("Tomi Valkeinen "); -+MODULE_DESCRIPTION("OMAP2/3 Framebuffer"); -+MODULE_LICENSE("GPL v2"); -diff -Nurp linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb-sysfs.c linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb-sysfs.c ---- linux-omap-2.6.28-omap1/drivers/video/omap2/omapfb/omapfb-sysfs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/video/omap2/omapfb/omapfb-sysfs.c 2011-06-22 13:19:33.183063271 +0200 -@@ -0,0 +1,496 @@ -+/* -+ * linux/drivers/video/omap2/omapfb-sysfs.c -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Author: Tomi Valkeinen -+ * -+ * Some code and ideas taken from drivers/video/omap/ driver -+ * by Imre Deak. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "omapfb.h" -+ -+static ssize_t show_rotate_type(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type); -+} -+ -+static ssize_t store_rotate_type(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ enum omap_dss_rotation_type rot_type; -+ int r; -+ -+ rot_type = simple_strtoul(buf, NULL, 0); -+ -+ if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) -+ return -EINVAL; -+ -+ omapfb_lock(fbdev); -+ -+ r = 0; -+ if (rot_type == ofbi->rotation_type) -+ goto out; -+ -+ if (ofbi->region.size) { -+ r = -EBUSY; -+ goto out; -+ } -+ -+ ofbi->rotation_type = rot_type; -+ -+ /* -+ * Since the VRAM for this FB is not allocated at the moment we don't need to -+ * do any further parameter checking at this point. -+ */ -+out: -+ omapfb_unlock(fbdev); -+ -+ return r ? r : count; -+} -+ -+ -+static ssize_t show_mirror(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ -+ return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror); -+} -+ -+static ssize_t store_mirror(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ bool mirror; -+ int r; -+ struct fb_var_screeninfo new_var; -+ -+ mirror = simple_strtoul(buf, NULL, 0); -+ -+ if (mirror != 0 && mirror != 1) -+ return -EINVAL; -+ -+ omapfb_lock(fbdev); -+ -+ ofbi->mirror = mirror; -+ -+ memcpy(&new_var, &fbi->var, sizeof(new_var)); -+ r = check_fb_var(fbi, &new_var); -+ if (r) -+ goto out; -+ memcpy(&fbi->var, &new_var, sizeof(fbi->var)); -+ -+ set_fb_fix(fbi); -+ -+ r = omapfb_apply_changes(fbi, 0); -+ if (r) -+ goto out; -+ -+ r = count; -+out: -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static ssize_t show_overlays(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ ssize_t l = 0; -+ int t; -+ -+ for (t = 0; t < ofbi->num_overlays; t++) { -+ struct omap_overlay *ovl = ofbi->overlays[t]; -+ int ovlnum; -+ -+ for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum) -+ if (ovl == fbdev->overlays[ovlnum]) -+ break; -+ -+ l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", -+ t == 0 ? "" : ",", ovlnum); -+ } -+ -+ l += snprintf(buf + l, PAGE_SIZE - l, "\n"); -+ -+ return l; -+} -+ -+static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev, -+ struct omap_overlay *ovl) -+{ -+ int i, t; -+ -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); -+ -+ for (t = 0; t < ofbi->num_overlays; t++) { -+ if (ofbi->overlays[t] == ovl) -+ return ofbi; -+ } -+ } -+ -+ return NULL; -+} -+ -+static ssize_t store_overlays(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB]; -+ struct omap_overlay *ovl; -+ int num_ovls, r, i; -+ int len; -+ bool added = false; -+ -+ num_ovls = 0; -+ -+ len = strlen(buf); -+ if (buf[len - 1] == '\n') -+ len = len - 1; -+ -+ omapfb_lock(fbdev); -+ -+ if (len > 0) { -+ char *p = (char *)buf; -+ int ovlnum; -+ -+ while (p < buf + len) { -+ int found; -+ if (num_ovls == OMAPFB_MAX_OVL_PER_FB) { -+ r = -EINVAL; -+ goto out; -+ } -+ -+ ovlnum = simple_strtoul(p, &p, 0); -+ if (ovlnum > fbdev->num_overlays) { -+ r = -EINVAL; -+ goto out; -+ } -+ -+ found = 0; -+ for (i = 0; i < num_ovls; ++i) { -+ if (ovls[i] == fbdev->overlays[ovlnum]) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (!found) -+ ovls[num_ovls++] = fbdev->overlays[ovlnum]; -+ -+ p++; -+ } -+ } -+ -+ for (i = 0; i < num_ovls; ++i) { -+ struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]); -+ if (ofbi2 && ofbi2 != ofbi) { -+ dev_err(fbdev->dev, "overlay already in use\n"); -+ r = -EINVAL; -+ goto out; -+ } -+ } -+ -+ /* detach unused overlays */ -+ for (i = 0; i < ofbi->num_overlays; ++i) { -+ int t, found; -+ -+ ovl = ofbi->overlays[i]; -+ -+ found = 0; -+ -+ for (t = 0; t < num_ovls; ++t) { -+ if (ovl == ovls[t]) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (found) -+ continue; -+ -+ DBG("detaching %d\n", ofbi->overlays[i]->id); -+ -+ omapfb_overlay_enable(ovl, 0); -+ -+ if (ovl->manager) -+ ovl->manager->apply(ovl->manager); -+ -+ for (t = i + 1; t < ofbi->num_overlays; t++) { -+ ofbi->rotation[t-1] = ofbi->rotation[t]; -+ ofbi->overlays[t-1] = ofbi->overlays[t]; -+ } -+ -+ ofbi->num_overlays--; -+ i--; -+ } -+ -+ for (i = 0; i < num_ovls; ++i) { -+ int t, found; -+ -+ ovl = ovls[i]; -+ -+ found = 0; -+ -+ for (t = 0; t < ofbi->num_overlays; ++t) { -+ if (ovl == ofbi->overlays[t]) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (found) -+ continue; -+ ofbi->rotation[ofbi->num_overlays] = 0; -+ ofbi->overlays[ofbi->num_overlays++] = ovl; -+ -+ added = true; -+ } -+ -+ if (added) { -+ r = omapfb_apply_changes(fbi, 0); -+ if (r) -+ goto out; -+ } -+ -+ r = count; -+out: -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static ssize_t show_overlays_rotate(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ ssize_t l = 0; -+ int t; -+ -+ for (t = 0; t < ofbi->num_overlays; t++) { -+ l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", -+ t == 0 ? "" : ",", ofbi->rotation[t]); -+ } -+ -+ l += snprintf(buf + l, PAGE_SIZE - l, "\n"); -+ -+ return l; -+} -+ -+static ssize_t store_overlays_rotate(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ int num_ovls = 0, r, i; -+ int len; -+ bool changed = false; -+ u8 rotation[OMAPFB_MAX_OVL_PER_FB]; -+ -+ len = strlen(buf); -+ if (buf[len - 1] == '\n') -+ len = len - 1; -+ -+ omapfb_lock(fbdev); -+ -+ if (len > 0) { -+ char *p = (char *)buf; -+ -+ while (p < buf + len) { -+ int rot; -+ -+ if (num_ovls == ofbi->num_overlays) { -+ r = -EINVAL; -+ goto out; -+ } -+ -+ rot = simple_strtoul(p, &p, 0); -+ if (rot < 0 || rot > 3) { -+ r = -EINVAL; -+ goto out; -+ } -+ -+ if (ofbi->rotation[num_ovls] != rot) -+ changed = true; -+ -+ rotation[num_ovls++] = rot; -+ -+ p++; -+ } -+ } -+ -+ if (num_ovls != ofbi->num_overlays) { -+ r = -EINVAL; -+ goto out; -+ } -+ -+ if (changed) { -+ for (i = 0; i < num_ovls; ++i) -+ ofbi->rotation[i] = rotation[i]; -+ -+ r = omapfb_apply_changes(fbi, 0); -+ if (r) -+ goto out; -+ -+ /* FIXME error handling? */ -+ } -+ -+ r = count; -+out: -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static ssize_t show_size(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ -+ return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region.size); -+} -+ -+static ssize_t store_size(struct device *dev, struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ struct omapfb2_device *fbdev = ofbi->fbdev; -+ unsigned long size; -+ int r; -+ int i; -+ -+ size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0)); -+ -+ omapfb_lock(fbdev); -+ -+ for (i = 0; i < ofbi->num_overlays; i++) { -+ if (ofbi->overlays[i]->info.enabled) { -+ r = -EBUSY; -+ goto out; -+ } -+ } -+ -+ if (size != ofbi->region.size) { -+ r = omapfb_realloc_fbmem(fbi, size, ofbi->region.type); -+ if (r) { -+ dev_err(dev, "realloc fbmem failed\n"); -+ goto out; -+ } -+ } -+ -+ r = count; -+out: -+ omapfb_unlock(fbdev); -+ -+ return r; -+} -+ -+static ssize_t show_phys(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ -+ return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region.paddr); -+} -+ -+static ssize_t show_virt(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fb_info *fbi = dev_get_drvdata(dev); -+ struct omapfb_info *ofbi = FB2OFB(fbi); -+ -+ return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region.vaddr); -+} -+ -+static struct device_attribute omapfb_attrs[] = { -+ __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, store_rotate_type), -+ __ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror), -+ __ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size), -+ __ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays), -+ __ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate, store_overlays_rotate), -+ __ATTR(phys_addr, S_IRUGO, show_phys, NULL), -+ __ATTR(virt_addr, S_IRUGO, show_virt, NULL), -+}; -+ -+int omapfb_create_sysfs(struct omapfb2_device *fbdev) -+{ -+ int i; -+ int r; -+ -+ DBG("create sysfs for fbs\n"); -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ int t; -+ for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) { -+ r = device_create_file(fbdev->fbs[i]->dev, -+ &omapfb_attrs[t]); -+ -+ if (r) { -+ dev_err(fbdev->dev, "failed to create sysfs file\n"); -+ return r; -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+void omapfb_remove_sysfs(struct omapfb2_device *fbdev) -+{ -+ int i, t; -+ -+ DBG("remove sysfs for fbs\n"); -+ for (i = 0; i < fbdev->num_fbs; i++) { -+ for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) -+ device_remove_file(fbdev->fbs[i]->dev, -+ &omapfb_attrs[t]); -+ } -+} -+ -diff -Nurp linux-omap-2.6.28-omap1/drivers/watchdog/Kconfig linux-omap-2.6.28-nokia1/drivers/watchdog/Kconfig ---- linux-omap-2.6.28-omap1/drivers/watchdog/Kconfig 2011-06-22 13:14:21.663067698 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/watchdog/Kconfig 2011-06-22 13:19:33.183063271 +0200 -@@ -233,6 +233,13 @@ config ORION5X_WATCHDOG - To compile this driver as a module, choose M here: the - module will be called orion5x_wdt. - -+config TWL4030_WATCHDOG -+ tristate "TWL4030 Watchdog" -+ depends on TWL4030_CORE -+ help -+ Support for TI TWL4030 watchdog. Say 'Y' here to enable the -+ watchdog timer support for TWL4030 chips. -+ - # ARM26 Architecture - - # AVR32 Architecture -diff -Nurp linux-omap-2.6.28-omap1/drivers/watchdog/Makefile linux-omap-2.6.28-nokia1/drivers/watchdog/Makefile ---- linux-omap-2.6.28-omap1/drivers/watchdog/Makefile 2011-06-22 13:14:21.663067698 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/watchdog/Makefile 2011-06-22 13:19:33.183063271 +0200 -@@ -28,6 +28,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb. - obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o - obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o - obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o -+obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o - obj-$(CONFIG_21285_WATCHDOG) += wdt285.o - obj-$(CONFIG_977_WATCHDOG) += wdt977.o - obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o -diff -Nurp linux-omap-2.6.28-omap1/drivers/watchdog/omap_wdt.c linux-omap-2.6.28-nokia1/drivers/watchdog/omap_wdt.c ---- linux-omap-2.6.28-omap1/drivers/watchdog/omap_wdt.c 2011-06-22 13:14:21.673067698 +0200 -+++ linux-omap-2.6.28-nokia1/drivers/watchdog/omap_wdt.c 2011-06-22 13:19:33.183063271 +0200 -@@ -53,18 +53,27 @@ static unsigned timer_margin; - module_param(timer_margin, uint, 0); - MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)"); - -+static int nowayout = WATCHDOG_NOWAYOUT; -+module_param(nowayout, int, 0); -+MODULE_PARM_DESC(nowayout, -+ "Watchdog cannot be stopped once started (default=" -+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -+ -+ - static unsigned int wdt_trgr_pattern = 0x1234; - static spinlock_t wdt_lock; - -+#define OMAP_WDT_STATE_OPENED_BIT 1 -+#define OMAP_WDT_STATE_ACTIVATED_BIT 8 -+ - struct omap_wdt_dev { -- void __iomem *base; /* physical */ -- struct device *dev; -- int omap_wdt_users; -- struct clk *armwdt_ck; -- struct clk *mpu_wdt_ick; -- struct clk *mpu_wdt_fck; -- struct resource *mem; -- struct miscdevice omap_wdt_miscdev; -+ void __iomem *base; /* physical */ -+ struct device *dev; -+ unsigned long omap_wdt_state; -+ struct clk *mpu_wdt_ick; -+ struct clk *mpu_wdt_fck; -+ struct resource *mem; -+ struct miscdevice omap_wdt_miscdev; - }; - - static void omap_wdt_ping(struct omap_wdt_dev *wdev) -@@ -84,6 +93,16 @@ static void omap_wdt_ping(struct omap_wd - /* reloaded WCRR from WLDR */ - } - -+static void omap_wdt_ick_enable(struct clk *ick, int enable) -+{ -+ if (ick) { -+ if (enable) -+ clk_enable(ick); -+ else -+ clk_disable(ick); -+ } -+} -+ - static void omap_wdt_enable(struct omap_wdt_dev *wdev) - { - void __iomem *base = wdev->base; -@@ -143,29 +162,31 @@ static int omap_wdt_open(struct inode *i - struct omap_wdt_dev *wdev = platform_get_drvdata(omap_wdt_dev); - void __iomem *base = wdev->base; - -- if (test_and_set_bit(1, (unsigned long *)&(wdev->omap_wdt_users))) -+ if (test_and_set_bit(OMAP_WDT_STATE_OPENED_BIT, -+ &wdev->omap_wdt_state)) - return -EBUSY; - -- if (cpu_is_omap16xx()) -- clk_enable(wdev->armwdt_ck); /* Enable the clock */ -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 1); -+ if (wdev->omap_wdt_state & (1 << OMAP_WDT_STATE_ACTIVATED_BIT)) -+ omap_wdt_ping(wdev); -+ else { -+ clk_enable(wdev->mpu_wdt_fck); - -- if (cpu_is_omap24xx() || cpu_is_omap34xx()) { -- clk_enable(wdev->mpu_wdt_ick); /* Enable the interface clock */ -- clk_enable(wdev->mpu_wdt_fck); /* Enable the functional clock */ -+ /* initialize prescaler */ -+ while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) -+ cpu_relax(); -+ -+ __raw_writel((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL); -+ while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) -+ cpu_relax(); -+ wdev->omap_wdt_state |= (1 << OMAP_WDT_STATE_ACTIVATED_BIT); - } - -- /* initialize prescaler */ -- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) -- cpu_relax(); -- -- __raw_writel((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL); -- while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01) -- cpu_relax(); -- - file->private_data = (void *) wdev; - - omap_wdt_set_timeout(wdev); - omap_wdt_enable(wdev); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); - - return nonseekable_open(inode, file); - } -@@ -173,25 +194,22 @@ static int omap_wdt_open(struct inode *i - static int omap_wdt_release(struct inode *inode, struct file *file) - { - struct omap_wdt_dev *wdev = file->private_data; -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 1); -+ if (nowayout) { -+ /* Give the user application some time to recover -+ * in case of crash. -+ */ -+ omap_wdt_ping(wdev); -+ dev_err(wdev->omap_wdt_miscdev.parent, -+ "Unexpected close, not stopping!\n"); -+ } else { -+ omap_wdt_disable(wdev); - -- /* -- * Shut off the timer unless NOWAYOUT is defined. -- */ --#ifndef CONFIG_WATCHDOG_NOWAYOUT -- -- omap_wdt_disable(wdev); -- -- if (cpu_is_omap16xx()) -- clk_disable(wdev->armwdt_ck); /* Disable the clock */ -- -- if (cpu_is_omap24xx() || cpu_is_omap34xx()) { -- clk_disable(wdev->mpu_wdt_ick); /* Disable the clock */ -- clk_disable(wdev->mpu_wdt_fck); /* Disable the clock */ -+ clk_disable(wdev->mpu_wdt_fck); -+ wdev->omap_wdt_state &= ~(1 << OMAP_WDT_STATE_ACTIVATED_BIT); - } --#else -- printk(KERN_CRIT "omap_wdt: Unexpected close, not stopping!\n"); --#endif -- wdev->omap_wdt_users = 0; -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); -+ wdev->omap_wdt_state &= ~(1 << OMAP_WDT_STATE_OPENED_BIT); - - return 0; - } -@@ -203,9 +221,11 @@ static ssize_t omap_wdt_write(struct fil - - /* Refresh LOAD_TIME. */ - if (len) { -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 1); - spin_lock(&wdt_lock); - omap_wdt_ping(wdev); - spin_unlock(&wdt_lock); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); - } - return len; - } -@@ -237,15 +257,18 @@ static long omap_wdt_ioctl(struct file * - return put_user(omap_prcm_get_reset_sources(), - (int __user *)arg); - case WDIOC_KEEPALIVE: -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 1); - spin_lock(&wdt_lock); - omap_wdt_ping(wdev); - spin_unlock(&wdt_lock); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); - return 0; - case WDIOC_SETTIMEOUT: - if (get_user(new_margin, (int __user *)arg)) - return -EFAULT; - omap_wdt_adjust_timeout(new_margin); - -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 1); - spin_lock(&wdt_lock); - omap_wdt_disable(wdev); - omap_wdt_set_timeout(wdev); -@@ -253,6 +276,7 @@ static long omap_wdt_ioctl(struct file * - - omap_wdt_ping(wdev); - spin_unlock(&wdt_lock); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); - /* Fall */ - case WDIOC_GETTIMEOUT: - return put_user(timer_margin, (int __user *)arg); -@@ -269,7 +293,7 @@ static const struct file_operations omap - .release = omap_wdt_release, - }; - --static int __init omap_wdt_probe(struct platform_device *pdev) -+static int __devinit omap_wdt_probe(struct platform_device *pdev) - { - struct resource *res, *mem; - struct omap_wdt_dev *wdev; -@@ -294,20 +318,20 @@ static int __init omap_wdt_probe(struct - goto err_busy; - } - -- wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL); -+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); - if (!wdev) { - ret = -ENOMEM; - goto err_kzalloc; - } - -- wdev->omap_wdt_users = 0; -+ wdev->omap_wdt_state = 0; - wdev->mem = mem; - - if (cpu_is_omap16xx()) { -- wdev->armwdt_ck = clk_get(&pdev->dev, "armwdt_ck"); -- if (IS_ERR(wdev->armwdt_ck)) { -- ret = PTR_ERR(wdev->armwdt_ck); -- wdev->armwdt_ck = NULL; -+ wdev->mpu_wdt_fck = clk_get(&pdev->dev, "armwdt_ck"); -+ if (IS_ERR(wdev->mpu_wdt_fck)) { -+ ret = PTR_ERR(wdev->mpu_wdt_fck); -+ wdev->mpu_wdt_fck = NULL; - goto err_clk; - } - } -@@ -347,8 +371,13 @@ static int __init omap_wdt_probe(struct - goto err_ioremap; - } - -+ spin_lock_init(&wdt_lock); - platform_set_drvdata(pdev, wdev); - -+ /* enable clocks for register access */ -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 1); -+ clk_enable(wdev->mpu_wdt_fck); -+ - omap_wdt_disable(wdev); - omap_wdt_adjust_timeout(timer_margin); - -@@ -361,18 +390,26 @@ static int __init omap_wdt_probe(struct - if (ret) - goto err_misc; - -- pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n", -+ dev_info(wdev->omap_wdt_miscdev.parent, -+ "OMAP Watchdog Timer Rev 0x%02x: initial " -+ "timeout %d sec, nowayout is %s\n", - __raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF, -- timer_margin); -+ timer_margin, (nowayout ? "on" : "off")); - - /* autogate OCP interface clock */ - __raw_writel(0x01, wdev->base + OMAP_WATCHDOG_SYS_CONFIG); - -+ /* disable clocks since we don't need them now */ -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); -+ clk_disable(wdev->mpu_wdt_fck); -+ - omap_wdt_dev = pdev; - - return 0; - - err_misc: -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); -+ clk_disable(wdev->mpu_wdt_fck); - platform_set_drvdata(pdev, NULL); - iounmap(wdev->base); - -@@ -380,8 +417,6 @@ err_ioremap: - wdev->base = NULL; - - err_clk: -- if (wdev->armwdt_ck) -- clk_put(wdev->armwdt_ck); - if (wdev->mpu_wdt_ick) - clk_put(wdev->mpu_wdt_ick); - if (wdev->mpu_wdt_fck) -@@ -401,11 +436,14 @@ static void omap_wdt_shutdown(struct pla - { - struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); - -- if (wdev->omap_wdt_users) -+ if (wdev->omap_wdt_state & (1<mpu_wdt_ick, 1); - omap_wdt_disable(wdev); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); -+ } - } - --static int omap_wdt_remove(struct platform_device *pdev) -+static int __devexit omap_wdt_remove(struct platform_device *pdev) - { - struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -@@ -417,11 +455,6 @@ static int omap_wdt_remove(struct platfo - release_mem_region(res->start, res->end - res->start + 1); - platform_set_drvdata(pdev, NULL); - -- if (wdev->armwdt_ck) { -- clk_put(wdev->armwdt_ck); -- wdev->armwdt_ck = NULL; -- } -- - if (wdev->mpu_wdt_ick) { - clk_put(wdev->mpu_wdt_ick); - wdev->mpu_wdt_ick = NULL; -@@ -441,18 +474,16 @@ static int omap_wdt_remove(struct platfo - - #ifdef CONFIG_PM - --/* REVISIT ... not clear this is the best way to handle system suspend; and -- * it's very inappropriate for selective device suspend (e.g. suspending this -- * through sysfs rather than by stopping the watchdog daemon). Also, this -- * may not play well enough with NOWAYOUT... -- */ -- - static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state) - { - struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); - -- if (wdev->omap_wdt_users) -+ if (wdev->omap_wdt_state & (1<mpu_wdt_ick, 1); - omap_wdt_disable(wdev); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); -+ clk_disable(wdev->mpu_wdt_fck); -+ } - - return 0; - } -@@ -461,9 +492,12 @@ static int omap_wdt_resume(struct platfo - { - struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); - -- if (wdev->omap_wdt_users) { -+ if (wdev->omap_wdt_state & (1<mpu_wdt_fck); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 1); - omap_wdt_enable(wdev); - omap_wdt_ping(wdev); -+ omap_wdt_ick_enable(wdev->mpu_wdt_ick, 0); - } - - return 0; -@@ -476,7 +510,7 @@ static int omap_wdt_resume(struct platfo - - static struct platform_driver omap_wdt_driver = { - .probe = omap_wdt_probe, -- .remove = omap_wdt_remove, -+ .remove = __devexit_p(omap_wdt_remove), - .shutdown = omap_wdt_shutdown, - .suspend = omap_wdt_suspend, - .resume = omap_wdt_resume, -@@ -488,7 +522,6 @@ static struct platform_driver omap_wdt_d - - static int __init omap_wdt_init(void) - { -- spin_lock_init(&wdt_lock); - return platform_driver_register(&omap_wdt_driver); - } - -diff -Nurp linux-omap-2.6.28-omap1/drivers/watchdog/twl4030_wdt.c linux-omap-2.6.28-nokia1/drivers/watchdog/twl4030_wdt.c ---- linux-omap-2.6.28-omap1/drivers/watchdog/twl4030_wdt.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/drivers/watchdog/twl4030_wdt.c 2011-06-22 13:19:33.183063271 +0200 -@@ -0,0 +1,299 @@ -+/* -+ * Copyright (C) Nokia Corporation -+ * -+ * Written by Timo Kokkonen -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define TWL4030_WDT_ENABLE_POWEROFF_ON_SUSPEND 1 -+ -+ -+#ifdef TWL4030_WDT_ENABLE_POWEROFF_ON_SUSPEND -+#include -+#endif -+ -+#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3 -+ -+static int nowayout = WATCHDOG_NOWAYOUT; -+module_param(nowayout, int, 0); -+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " -+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -+ -+ -+ -+#define TWL4030_WDT_STATE_OPEN_BIT 0x0 -+#define TWL4030_WDT_STATE_OPEN (1 << TWL4030_WDT_STATE_OPEN_BIT) -+#define TWL4030_WDT_STATE_ACTIVE 0x8 -+ -+static struct platform_device *twl4030_wdt_dev; -+ -+struct twl4030_wdt { -+ struct miscdevice miscdev; -+ int timer_margin; -+ unsigned long state; -+#ifdef TWL4030_WDT_ENABLE_POWEROFF_ON_SUSPEND -+ u8 poweroff_on_suspend; -+#endif -+}; -+ -+static int twl4030_wdt_write(unsigned char val) -+{ -+ return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val, -+ TWL4030_WATCHDOG_CFG_REG_OFFS); -+} -+ -+static int twl4030_wdt_ping(struct twl4030_wdt *wdt) -+{ -+ return twl4030_wdt_write(wdt->timer_margin + 1); -+} -+ -+static int twl4030_wdt_enable(struct twl4030_wdt *wdt) -+{ -+ return twl4030_wdt_ping(wdt); -+} -+ -+static int twl4030_wdt_disable(struct twl4030_wdt *wdt) -+{ -+ return twl4030_wdt_write(0); -+} -+ -+static int twl4030_wdt_set_timeout(struct twl4030_wdt *wdt, int timeout) -+{ -+ if (timeout < 0 || timeout > 30) { -+ dev_warn(wdt->miscdev.parent, -+ "Timeout can only be in the range [0-30] seconds"); -+ return -EINVAL; -+ } -+ wdt->timer_margin = timeout; -+ return twl4030_wdt_ping(wdt); -+} -+ -+#ifdef TWL4030_WDT_ENABLE_POWEROFF_ON_SUSPEND -+static struct dentry *twl4030_wdt_debugfs; -+#endif -+ -+static ssize_t twl4030_wdt_write_fop(struct file *file, -+ const char __user *data, size_t len, loff_t *ppos) -+{ -+ struct twl4030_wdt *wdt = file->private_data; -+ if (twl4030_wdt_ping(wdt)) -+ return -EAGAIN; -+ return len; -+} -+ -+static long twl4030_wdt_ioctl(struct file *file, -+ unsigned int cmd, unsigned long arg) -+{ -+ int new_margin; -+ struct twl4030_wdt *wdt = file->private_data; -+ -+ static const struct watchdog_info twl4030_wd_ident = { -+ .identity = "TWL4030 Watchdog", -+ .options = WDIOF_SETTIMEOUT, -+ .firmware_version = 0 -+ }; -+ -+ switch (cmd) { -+ case WDIOC_GETSUPPORT: -+ return copy_to_user((struct watchdog_info __user *) arg, -+ &twl4030_wd_ident, sizeof(twl4030_wd_ident)) ? -+ -EFAULT : 0; -+ case WDIOC_SETTIMEOUT: -+ if (get_user(new_margin, (int __user *) arg)) -+ return -EFAULT; -+ if (twl4030_wdt_set_timeout(wdt, new_margin)) -+ return -EINVAL; -+ if (put_user(wdt->timer_margin, (int __user *) arg)) -+ return -EFAULT; -+ break; -+ -+ case WDIOC_GETTIMEOUT: -+ return put_user(wdt->timer_margin, (int __user *) arg); -+ -+ case WDIOC_KEEPALIVE: -+ if (twl4030_wdt_ping(wdt)) -+ return -EFAULT; -+ break; -+ -+ default: -+ return -ENOTTY; -+ } -+ -+ return 0; -+} -+ -+static int twl4030_wdt_open(struct inode *inode, struct file *file) -+{ -+ struct twl4030_wdt *wdt = platform_get_drvdata(twl4030_wdt_dev); -+ -+ if (test_and_set_bit(TWL4030_WDT_STATE_OPEN_BIT, &wdt->state)) -+ return -EBUSY; -+ -+ wdt->state |= TWL4030_WDT_STATE_ACTIVE; -+ -+ if (twl4030_wdt_ping(wdt)) { -+ wdt->state &= ~(TWL4030_WDT_STATE_ACTIVE | -+ TWL4030_WDT_STATE_OPEN); -+ return -EAGAIN; -+ } -+ file->private_data = wdt; -+ return nonseekable_open(inode, file); -+} -+ -+static int twl4030_wdt_release(struct inode *inode, struct file *file) -+{ -+ struct twl4030_wdt *wdt = file->private_data; -+ if (nowayout) { -+ dev_alert(wdt->miscdev.parent, -+ "Unexpected close, watchdog still running!\n"); -+ } else { -+ if (twl4030_wdt_disable(wdt)) -+ return -EAGAIN; -+ wdt->state &= ~TWL4030_WDT_STATE_ACTIVE; -+ } -+ -+ wdt->state &= ~TWL4030_WDT_STATE_OPEN; -+ return 0; -+} -+ -+static int twl4030_wdt_suspend(struct platform_device *pdev, pm_message_t state) -+{ -+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev); -+#ifdef TWL4030_WDT_ENABLE_POWEROFF_ON_SUSPEND -+ if (wdt->poweroff_on_suspend > 0 && wdt->poweroff_on_suspend < 31) -+ return twl4030_wdt_write(wdt->poweroff_on_suspend + 1); -+#endif -+ -+ return (wdt->state & TWL4030_WDT_STATE_ACTIVE) ? -+ twl4030_wdt_disable(wdt) : 0; -+} -+ -+static int twl4030_wdt_resume(struct platform_device *pdev) -+{ -+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev); -+ return (wdt->state & TWL4030_WDT_STATE_ACTIVE) ? -+ twl4030_wdt_enable(wdt) : 0; -+} -+ -+static const struct file_operations twl4030_wdt_fops = { -+ .owner = THIS_MODULE, -+ .open = twl4030_wdt_open, -+ .release = twl4030_wdt_release, -+ .unlocked_ioctl = twl4030_wdt_ioctl, -+ .write = twl4030_wdt_write_fop, -+}; -+ -+static int __devinit twl4030_wdt_probe(struct platform_device *pdev) -+{ -+ int ret = 0; -+ struct twl4030_wdt *wdt; -+ -+ wdt = kzalloc(sizeof(struct twl4030_wdt), GFP_KERNEL); -+ if (!wdt) -+ return -ENOMEM; -+ -+ wdt->state = 0; -+ wdt->timer_margin = 30; -+ wdt->miscdev.parent = &pdev->dev; -+ wdt->miscdev.fops = &twl4030_wdt_fops; -+ -+ /* RX51 HACK: Make it register as yet another misc device, -+ * as we do already have the omap_wdt registered as watchdog */ -+#ifdef CONFIG_MACH_NOKIA_RX51 -+ wdt->miscdev.minor = 142; -+ wdt->miscdev.name = "twl4030_wdt"; -+#else -+ wdt->miscdev.minor = WATCHDOG_MINOR; -+ wdt->miscdev.name = "watchdog"; -+#endif -+ -+ ret = misc_register(&wdt->miscdev); -+ if (ret) { -+ dev_err(wdt->miscdev.parent, -+ "Failed to register misc device\n"); -+ kfree(wdt); -+ return ret; -+ } -+ -+ platform_set_drvdata(pdev, wdt); -+ -+#ifdef TWL4030_WDT_ENABLE_POWEROFF_ON_SUSPEND -+ twl4030_wdt_debugfs = debugfs_create_dir("twl4030_wdt", NULL); -+ if (!IS_ERR(twl4030_wdt_debugfs) && twl4030_wdt_debugfs) -+ debugfs_create_u8("poweroff_on_suspend", 0644, -+ twl4030_wdt_debugfs, &wdt->poweroff_on_suspend); -+#endif -+ twl4030_wdt_dev = pdev; -+ return 0; -+} -+ -+static int __devexit twl4030_wdt_remove(struct platform_device *pdev) -+{ -+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev); -+ -+ if (wdt->state & TWL4030_WDT_STATE_ACTIVE) -+ if (twl4030_wdt_disable(wdt)) -+ return -EFAULT; -+#ifdef TWL4030_WDT_ENABLE_POWEROFF_ON_SUSPEND -+ if (!IS_ERR(twl4030_wdt_debugfs) && twl4030_wdt_debugfs) -+ debugfs_remove_recursive(twl4030_wdt_debugfs); -+#endif -+ -+ -+ misc_deregister(&wdt->miscdev); -+ -+ platform_set_drvdata(pdev, NULL); -+ kfree(wdt); -+ twl4030_wdt_dev = NULL; -+ -+ return 0; -+} -+ -+static struct platform_driver twl4030_wdt_driver = { -+ .probe = twl4030_wdt_probe, -+ .remove = __devexit_p(twl4030_wdt_remove), -+ .suspend = twl4030_wdt_suspend, -+ .resume = twl4030_wdt_resume, -+ .driver = { -+ .name = "twl4030_wdt", -+ }, -+}; -+ -+static int __devinit twl4030_wdt_init(void) -+{ -+ return platform_driver_register(&twl4030_wdt_driver); -+} -+module_init(twl4030_wdt_init); -+ -+static void __devexit twl4030_wdt_exit(void) -+{ -+ platform_driver_unregister(&twl4030_wdt_driver); -+} -+module_exit(twl4030_wdt_exit); -+ -+MODULE_AUTHOR("Nokia Corporation"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("platform:twl4030_wdt"); -diff -Nurp linux-omap-2.6.28-omap1/fs/affs/bitmap.c linux-omap-2.6.28-nokia1/fs/affs/bitmap.c ---- linux-omap-2.6.28-omap1/fs/affs/bitmap.c 2011-06-22 13:14:21.783067698 +0200 -+++ linux-omap-2.6.28-nokia1/fs/affs/bitmap.c 2011-06-22 13:19:33.203063269 +0200 -@@ -102,7 +102,7 @@ affs_free_block(struct super_block *sb, - *(__be32 *)bh->b_data = cpu_to_be32(tmp - mask); - - mark_buffer_dirty(bh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - bm->bm_free++; - - mutex_unlock(&sbi->s_bmlock); -@@ -247,7 +247,7 @@ find_bit: - *(__be32 *)bh->b_data = cpu_to_be32(tmp + mask); - - mark_buffer_dirty(bh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - mutex_unlock(&sbi->s_bmlock); - -diff -Nurp linux-omap-2.6.28-omap1/fs/affs/super.c linux-omap-2.6.28-nokia1/fs/affs/super.c ---- linux-omap-2.6.28-omap1/fs/affs/super.c 2011-06-22 13:14:21.783067698 +0200 -+++ linux-omap-2.6.28-nokia1/fs/affs/super.c 2011-06-22 13:19:33.203063269 +0200 -@@ -60,9 +60,13 @@ affs_write_super(struct super_block *sb) - &AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->disk_change); - affs_fix_checksum(sb, sbi->s_root_bh); - mark_buffer_dirty(sbi->s_root_bh); -- sb->s_dirt = !clean; /* redo until bitmap synced */ -+ /* redo until bitmap synced */ -+ if (clean) -+ mark_sb_clean(sb); -+ else -+ mark_sb_dirty(sb); - } else -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean); - } -@@ -518,8 +522,8 @@ affs_remount(struct super_block *sb, int - if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) - return 0; - if (*flags & MS_RDONLY) { -- sb->s_dirt = 1; -- while (sb->s_dirt) -+ mark_sb_dirty(sb); -+ while (is_sb_dirty(sb)) - affs_write_super(sb); - affs_free_bitmap(sb); - } else -diff -Nurp linux-omap-2.6.28-omap1/fs/bfs/inode.c linux-omap-2.6.28-nokia1/fs/bfs/inode.c ---- linux-omap-2.6.28-omap1/fs/bfs/inode.c 2011-06-22 13:14:21.843067696 +0200 -+++ linux-omap-2.6.28-nokia1/fs/bfs/inode.c 2011-06-22 13:19:33.203063269 +0200 -@@ -244,7 +244,7 @@ static void bfs_write_super(struct super - mutex_lock(&info->bfs_lock); - if (!(s->s_flags & MS_RDONLY)) - mark_buffer_dirty(info->si_sbh); -- s->s_dirt = 0; -+ mark_sb_clean(s); - mutex_unlock(&info->bfs_lock); - } - -@@ -411,7 +411,7 @@ static int bfs_fill_super(struct super_b - brelse(bh); - if (!(s->s_flags & MS_RDONLY)) { - mark_buffer_dirty(info->si_sbh); -- s->s_dirt = 1; -+ mark_sb_dirty(s); - } - dump_imap("read_super", s); - mutex_init(&info->bfs_lock); -diff -Nurp linux-omap-2.6.28-omap1/fs/eventpoll.c linux-omap-2.6.28-nokia1/fs/eventpoll.c ---- linux-omap-2.6.28-omap1/fs/eventpoll.c 2011-06-22 13:14:22.073067694 +0200 -+++ linux-omap-2.6.28-nokia1/fs/eventpoll.c 2011-06-22 13:19:33.203063269 +0200 -@@ -234,8 +234,6 @@ struct ep_pqueue { - /* - * Configuration options available inside /proc/sys/fs/epoll/ - */ --/* Maximum number of epoll devices, per user */ --static int max_user_instances __read_mostly; - /* Maximum number of epoll watched descriptors, per user */ - static int max_user_watches __read_mostly; - -@@ -261,14 +259,6 @@ static int zero; - - ctl_table epoll_table[] = { - { -- .procname = "max_user_instances", -- .data = &max_user_instances, -- .maxlen = sizeof(int), -- .mode = 0644, -- .proc_handler = &proc_dointvec_minmax, -- .extra1 = &zero, -- }, -- { - .procname = "max_user_watches", - .data = &max_user_watches, - .maxlen = sizeof(int), -@@ -491,7 +481,6 @@ static void ep_free(struct eventpoll *ep - - mutex_unlock(&epmutex); - mutex_destroy(&ep->mtx); -- atomic_dec(&ep->user->epoll_devs); - free_uid(ep->user); - kfree(ep); - } -@@ -581,10 +570,6 @@ static int ep_alloc(struct eventpoll **p - struct eventpoll *ep; - - user = get_current_user(); -- error = -EMFILE; -- if (unlikely(atomic_read(&user->epoll_devs) >= -- max_user_instances)) -- goto free_uid; - error = -ENOMEM; - ep = kzalloc(sizeof(*ep), GFP_KERNEL); - if (unlikely(!ep)) -@@ -1141,7 +1126,6 @@ asmlinkage long sys_epoll_create1(int fl - flags & O_CLOEXEC); - if (fd < 0) - ep_free(ep); -- atomic_inc(&ep->user->epoll_devs); - - error_return: - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", -@@ -1366,8 +1350,10 @@ static int __init eventpoll_init(void) - struct sysinfo si; - - si_meminfo(&si); -- max_user_instances = 128; -- max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) / -+ /* -+ * Allows top 4% of lomem to be allocated for epoll watches (per user). -+ */ -+ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / - EP_ITEM_COST; - - /* Initialize the structure used to perform safe poll wait head wake ups */ -diff -Nurp linux-omap-2.6.28-omap1/fs/exec.c linux-omap-2.6.28-nokia1/fs/exec.c ---- linux-omap-2.6.28-omap1/fs/exec.c 2011-06-22 13:14:22.073067694 +0200 -+++ linux-omap-2.6.28-nokia1/fs/exec.c 2011-06-22 13:19:33.203063269 +0200 -@@ -1778,6 +1778,14 @@ int do_coredump(long signr, int exit_cod - goto fail_unlock; - - if (ispipe) { -+ char env_exe[64]; -+ char env_pid[16], env_sig[16]; -+ char env_uid[16], env_gid[16]; -+ char *envp[] = { env_exe, -+ env_pid, env_sig, -+ env_uid, env_gid, -+ NULL }; -+ - helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc); - /* Terminate the string before the first option */ - delimit = strchr(corename, ' '); -@@ -1796,8 +1804,14 @@ int do_coredump(long signr, int exit_cod - - core_limit = RLIM_INFINITY; - -+ snprintf(env_exe, sizeof(env_exe), "CORE_EXE=%s", current->comm); -+ snprintf(env_pid, sizeof(env_pid), "CORE_PID=%d", current->tgid); -+ snprintf(env_sig, sizeof(env_sig), "CORE_SIG=%ld", signr); -+ snprintf(env_uid, sizeof(env_uid), "CORE_UID=%u", current->uid); -+ snprintf(env_gid, sizeof(env_gid), "CORE_GID=%u", current->gid); -+ - /* SIGPIPE can happen, but it's just never processed */ -- if (call_usermodehelper_pipe(corename+1, helper_argv, NULL, -+ if (call_usermodehelper_pipe(corename+1, helper_argv, envp, - &file)) { - printk(KERN_INFO "Core dump to %s pipe failed\n", - corename); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext2/balloc.c linux-omap-2.6.28-nokia1/fs/ext2/balloc.c ---- linux-omap-2.6.28-omap1/fs/ext2/balloc.c 2011-06-22 13:14:22.083067692 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext2/balloc.c 2011-06-22 13:19:33.213063269 +0200 -@@ -164,7 +164,7 @@ static void release_blocks(struct super_ - struct ext2_sb_info *sbi = EXT2_SB(sb); - - percpu_counter_add(&sbi->s_freeblocks_counter, count); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - } - -@@ -179,7 +179,7 @@ static void group_adjust_blocks(struct s - free_blocks = le16_to_cpu(desc->bg_free_blocks_count); - desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count); - spin_unlock(sb_bgl_lock(sbi, group_no)); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mark_buffer_dirty(bh); - } - } -diff -Nurp linux-omap-2.6.28-omap1/fs/ext2/ialloc.c linux-omap-2.6.28-nokia1/fs/ext2/ialloc.c ---- linux-omap-2.6.28-omap1/fs/ext2/ialloc.c 2011-06-22 13:14:22.103067692 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext2/ialloc.c 2011-06-22 13:19:33.213063269 +0200 -@@ -81,7 +81,7 @@ static void ext2_release_inode(struct su - spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); - if (dir) - percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mark_buffer_dirty(bh); - } - -@@ -548,7 +548,7 @@ got: - } - spin_unlock(sb_bgl_lock(sbi, group)); - -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mark_buffer_dirty(bh2); - inode->i_uid = current->fsuid; - if (test_opt (sb, GRPID)) -diff -Nurp linux-omap-2.6.28-omap1/fs/ext2/super.c linux-omap-2.6.28-nokia1/fs/ext2/super.c ---- linux-omap-2.6.28-omap1/fs/ext2/super.c 2011-06-22 13:14:22.103067692 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext2/super.c 2011-06-22 13:19:33.213063269 +0200 -@@ -1094,7 +1094,7 @@ static void ext2_commit_super (struct su - { - es->s_wtime = cpu_to_le32(get_seconds()); - mark_buffer_dirty(EXT2_SB(sb)->s_sbh); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - } - - static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es) -@@ -1104,7 +1104,7 @@ static void ext2_sync_super(struct super - es->s_wtime = cpu_to_le32(get_seconds()); - mark_buffer_dirty(EXT2_SB(sb)->s_sbh); - sync_dirty_buffer(EXT2_SB(sb)->s_sbh); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - } - - /* -@@ -1135,7 +1135,7 @@ void ext2_write_super (struct super_bloc - } else - ext2_commit_super (sb, es); - } -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - unlock_kernel(); - } - -diff -Nurp linux-omap-2.6.28-omap1/fs/ext2/xattr.c linux-omap-2.6.28-nokia1/fs/ext2/xattr.c ---- linux-omap-2.6.28-omap1/fs/ext2/xattr.c 2011-06-22 13:14:22.113067692 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext2/xattr.c 2011-06-22 13:19:33.213063269 +0200 -@@ -343,7 +343,7 @@ static void ext2_xattr_update_super_bloc - return; - - EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mark_buffer_dirty(EXT2_SB(sb)->s_sbh); - } - -diff -Nurp linux-omap-2.6.28-omap1/fs/ext3/balloc.c linux-omap-2.6.28-nokia1/fs/ext3/balloc.c ---- linux-omap-2.6.28-omap1/fs/ext3/balloc.c 2011-06-22 13:14:22.143067693 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext3/balloc.c 2011-06-22 13:19:33.223063269 +0200 -@@ -649,7 +649,7 @@ do_more: - count = overflow; - goto do_more; - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - error_return: - brelse(bitmap_bh); - ext3_std_error(sb, err); -@@ -1708,7 +1708,7 @@ allocated: - if (!fatal) - fatal = err; - -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - if (fatal) - goto out; - -diff -Nurp linux-omap-2.6.28-omap1/fs/ext3/ialloc.c linux-omap-2.6.28-nokia1/fs/ext3/ialloc.c ---- linux-omap-2.6.28-omap1/fs/ext3/ialloc.c 2011-06-22 13:14:22.143067693 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext3/ialloc.c 2011-06-22 13:19:33.223063269 +0200 -@@ -181,7 +181,7 @@ void ext3_free_inode (handle_t *handle, - err = ext3_journal_dirty_metadata(handle, bitmap_bh); - if (!fatal) - fatal = err; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - error_return: - brelse(bitmap_bh); - ext3_std_error(sb, fatal); -@@ -537,7 +537,7 @@ got: - percpu_counter_dec(&sbi->s_freeinodes_counter); - if (S_ISDIR(mode)) - percpu_counter_inc(&sbi->s_dirs_counter); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - inode->i_uid = current->fsuid; - if (test_opt (sb, GRPID)) -diff -Nurp linux-omap-2.6.28-omap1/fs/ext3/inode.c linux-omap-2.6.28-nokia1/fs/ext3/inode.c ---- linux-omap-2.6.28-omap1/fs/ext3/inode.c 2011-06-22 13:14:22.143067693 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext3/inode.c 2011-06-22 13:19:33.223063269 +0200 -@@ -2924,7 +2924,7 @@ static int ext3_do_update_inode(handle_t - ext3_update_dynamic_rev(sb); - EXT3_SET_RO_COMPAT_FEATURE(sb, - EXT3_FEATURE_RO_COMPAT_LARGE_FILE); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - handle->h_sync = 1; - err = ext3_journal_dirty_metadata(handle, - EXT3_SB(sb)->s_sbh); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext3/resize.c linux-omap-2.6.28-nokia1/fs/ext3/resize.c ---- linux-omap-2.6.28-omap1/fs/ext3/resize.c 2011-06-22 13:14:22.143067693 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext3/resize.c 2011-06-22 13:19:33.223063269 +0200 -@@ -934,7 +934,7 @@ int ext3_group_add(struct super_block *s - EXT3_INODES_PER_GROUP(sb)); - - ext3_journal_dirty_metadata(handle, sbi->s_sbh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - exit_journal: - unlock_super(sb); -@@ -1066,7 +1066,7 @@ int ext3_group_extend(struct super_block - } - es->s_blocks_count = cpu_to_le32(o_blocks_count + add); - ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - unlock_super(sb); - ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count, - o_blocks_count + add); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext3/super.c linux-omap-2.6.28-nokia1/fs/ext3/super.c ---- linux-omap-2.6.28-omap1/fs/ext3/super.c 2011-06-22 13:14:22.143067693 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext3/super.c 2011-06-22 13:19:33.223063269 +0200 -@@ -2221,7 +2221,7 @@ static int ext3_load_journal(struct supe - if (journal_devnum && - journal_devnum != le32_to_cpu(es->s_journal_dev)) { - es->s_journal_dev = cpu_to_le32(journal_devnum); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - /* Make sure we flush the recovery flag to disk. */ - ext3_commit_super(sb, es, 1); -@@ -2264,7 +2264,7 @@ static int ext3_create_journal(struct su - EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL); - - es->s_journal_inum = cpu_to_le32(journal_inum); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - /* Make sure we flush the recovery flag to disk. */ - ext3_commit_super(sb, es, 1); -@@ -2308,7 +2308,7 @@ static void ext3_mark_recovery_complete( - if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && - sb->s_flags & MS_RDONLY) { - EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - ext3_commit_super(sb, es, 1); - } - unlock_super(sb); -@@ -2367,7 +2367,7 @@ int ext3_force_commit(struct super_block - return 0; - - journal = EXT3_SB(sb)->s_journal; -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - ret = ext3_journal_force_commit(journal); - return ret; - } -@@ -2382,12 +2382,12 @@ static void ext3_write_super (struct sup - { - if (mutex_trylock(&sb->s_lock) != 0) - BUG(); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - } - - static int ext3_sync_fs(struct super_block *sb, int wait) - { -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - if (wait) - ext3_force_commit(sb); - else -@@ -2402,7 +2402,7 @@ static int ext3_sync_fs(struct super_blo - */ - static void ext3_write_super_lockfs(struct super_block *sb) - { -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - if (!(sb->s_flags & MS_RDONLY)) { - journal_t *journal = EXT3_SB(sb)->s_journal; -diff -Nurp linux-omap-2.6.28-omap1/fs/ext3/xattr.c linux-omap-2.6.28-nokia1/fs/ext3/xattr.c ---- linux-omap-2.6.28-omap1/fs/ext3/xattr.c 2011-06-22 13:14:22.143067693 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext3/xattr.c 2011-06-22 13:19:33.223063269 +0200 -@@ -463,7 +463,7 @@ static void ext3_xattr_update_super_bloc - - if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) { - EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); - } - } -diff -Nurp linux-omap-2.6.28-omap1/fs/ext4/balloc.c linux-omap-2.6.28-nokia1/fs/ext4/balloc.c ---- linux-omap-2.6.28-omap1/fs/ext4/balloc.c 2011-06-22 13:14:22.143067693 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext4/balloc.c 2011-06-22 13:19:33.223063269 +0200 -@@ -544,7 +544,7 @@ do_more: - count = overflow; - goto do_more; - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - error_return: - brelse(bitmap_bh); - ext4_std_error(sb, err); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext4/ialloc.c linux-omap-2.6.28-nokia1/fs/ext4/ialloc.c ---- linux-omap-2.6.28-omap1/fs/ext4/ialloc.c 2011-06-22 13:14:22.183067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext4/ialloc.c 2011-06-22 13:19:33.223063269 +0200 -@@ -261,7 +261,7 @@ void ext4_free_inode(handle_t *handle, s - err = ext4_journal_dirty_metadata(handle, bitmap_bh); - if (!fatal) - fatal = err; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - error_return: - brelse(bitmap_bh); - ext4_std_error(sb, fatal); -@@ -778,7 +778,7 @@ got: - percpu_counter_dec(&sbi->s_freeinodes_counter); - if (S_ISDIR(mode)) - percpu_counter_inc(&sbi->s_dirs_counter); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - if (sbi->s_log_groups_per_flex) { - flex_group = ext4_flex_group(sbi, group); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext4/inode.c linux-omap-2.6.28-nokia1/fs/ext4/inode.c ---- linux-omap-2.6.28-omap1/fs/ext4/inode.c 2011-06-22 13:14:22.183067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext4/inode.c 2011-06-22 13:19:33.223063269 +0200 -@@ -4309,7 +4309,7 @@ static int ext4_do_update_inode(handle_t - ext4_update_dynamic_rev(sb); - EXT4_SET_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_LARGE_FILE); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - handle->h_sync = 1; - err = ext4_journal_dirty_metadata(handle, - EXT4_SB(sb)->s_sbh); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext4/mballoc.c linux-omap-2.6.28-nokia1/fs/ext4/mballoc.c ---- linux-omap-2.6.28-omap1/fs/ext4/mballoc.c 2011-06-22 13:14:22.183067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext4/mballoc.c 2011-06-22 13:19:33.223063269 +0200 -@@ -2905,7 +2905,7 @@ ext4_mb_mark_diskspace_used(struct ext4_ - err = ext4_journal_dirty_metadata(handle, gdp_bh); - - out_err: -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - brelse(bitmap_bh); - return err; - } -@@ -4647,7 +4647,7 @@ do_more: - put_bh(bitmap_bh); - goto do_more; - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - error_return: - brelse(bitmap_bh); - ext4_std_error(sb, err); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext4/resize.c linux-omap-2.6.28-nokia1/fs/ext4/resize.c ---- linux-omap-2.6.28-omap1/fs/ext4/resize.c 2011-06-22 13:14:22.183067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext4/resize.c 2011-06-22 13:19:33.223063269 +0200 -@@ -938,7 +938,7 @@ int ext4_group_add(struct super_block *s - } - - ext4_journal_dirty_metadata(handle, sbi->s_sbh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - exit_journal: - unlock_super(sb); -@@ -1072,7 +1072,7 @@ int ext4_group_extend(struct super_block - } - ext4_blocks_count_set(es, o_blocks_count + add); - ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - unlock_super(sb); - ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, - o_blocks_count + add); -diff -Nurp linux-omap-2.6.28-omap1/fs/ext4/super.c linux-omap-2.6.28-nokia1/fs/ext4/super.c ---- linux-omap-2.6.28-omap1/fs/ext4/super.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext4/super.c 2011-06-22 13:19:33.223063269 +0200 -@@ -2710,7 +2710,7 @@ static int ext4_load_journal(struct supe - if (journal_devnum && - journal_devnum != le32_to_cpu(es->s_journal_dev)) { - es->s_journal_dev = cpu_to_le32(journal_devnum); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - /* Make sure we flush the recovery flag to disk. */ - ext4_commit_super(sb, es, 1); -@@ -2753,7 +2753,7 @@ static int ext4_create_journal(struct su - EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL); - - es->s_journal_inum = cpu_to_le32(journal_inum); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - /* Make sure we flush the recovery flag to disk. */ - ext4_commit_super(sb, es, 1); -@@ -2817,7 +2817,7 @@ static void ext4_mark_recovery_complete( - if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) && - sb->s_flags & MS_RDONLY) { - EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - ext4_commit_super(sb, es, 1); - } - unlock_super(sb); -@@ -2876,7 +2876,7 @@ int ext4_force_commit(struct super_block - return 0; - - journal = EXT4_SB(sb)->s_journal; -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - ret = ext4_journal_force_commit(journal); - return ret; - } -@@ -2891,7 +2891,7 @@ static void ext4_write_super(struct supe - { - if (mutex_trylock(&sb->s_lock) != 0) - BUG(); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - } - - static int ext4_sync_fs(struct super_block *sb, int wait) -@@ -2899,7 +2899,7 @@ static int ext4_sync_fs(struct super_blo - int ret = 0; - - trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - if (wait) - ret = ext4_force_commit(sb); - else -@@ -2913,7 +2913,7 @@ static int ext4_sync_fs(struct super_blo - */ - static void ext4_write_super_lockfs(struct super_block *sb) - { -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - if (!(sb->s_flags & MS_RDONLY)) { - journal_t *journal = EXT4_SB(sb)->s_journal; -diff -Nurp linux-omap-2.6.28-omap1/fs/ext4/xattr.c linux-omap-2.6.28-nokia1/fs/ext4/xattr.c ---- linux-omap-2.6.28-omap1/fs/ext4/xattr.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ext4/xattr.c 2011-06-22 13:19:33.223063269 +0200 -@@ -456,7 +456,7 @@ static void ext4_xattr_update_super_bloc - - if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { - EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); - } - } -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/cache.c linux-omap-2.6.28-nokia1/fs/fat/cache.c ---- linux-omap-2.6.28-omap1/fs/fat/cache.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/cache.c 2011-06-22 13:19:33.233063269 +0200 -@@ -241,7 +241,7 @@ int fat_get_cluster(struct inode *inode, - while (*fclus < cluster) { - /* prevent the infinite loop of cluster chain */ - if (*fclus > limit) { -- fat_fs_panic(sb, "%s: detected the cluster chain loop" -+ fat_fs_error(sb, "%s: detected the cluster chain loop" - " (i_pos %lld)", __func__, - MSDOS_I(inode)->i_pos); - nr = -EIO; -@@ -252,7 +252,7 @@ int fat_get_cluster(struct inode *inode, - if (nr < 0) - goto out; - else if (nr == FAT_ENT_FREE) { -- fat_fs_panic(sb, "%s: invalid cluster chain" -+ fat_fs_error(sb, "%s: invalid cluster chain" - " (i_pos %lld)", __func__, - MSDOS_I(inode)->i_pos); - nr = -EIO; -@@ -285,7 +285,7 @@ static int fat_bmap_cluster(struct inode - if (ret < 0) - return ret; - else if (ret == FAT_ENT_EOF) { -- fat_fs_panic(sb, "%s: request beyond EOF (i_pos %lld)", -+ fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)", - __func__, MSDOS_I(inode)->i_pos); - return -EIO; - } -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/dir.c linux-omap-2.6.28-nokia1/fs/fat/dir.c ---- linux-omap-2.6.28-omap1/fs/fat/dir.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/dir.c 2011-06-22 13:19:33.233063269 +0200 -@@ -1335,7 +1335,7 @@ found: - goto error_remove; - } - if (dir->i_size & (sbi->cluster_size - 1)) { -- fat_fs_panic(sb, "Odd directory size"); -+ fat_fs_error(sb, "Odd directory size"); - dir->i_size = (dir->i_size + sbi->cluster_size - 1) - & ~((loff_t)sbi->cluster_size - 1); - } -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/fatent.c linux-omap-2.6.28-nokia1/fs/fat/fatent.c ---- linux-omap-2.6.28-omap1/fs/fat/fatent.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/fatent.c 2011-06-22 13:19:33.233063269 +0200 -@@ -345,7 +345,7 @@ int fat_ent_read(struct inode *inode, st - - if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { - fatent_brelse(fatent); -- fat_fs_panic(sb, "invalid access to FAT (entry 0x%08x)", entry); -+ fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); - return -EIO; - } - -@@ -495,7 +495,7 @@ int fat_alloc_clusters(struct inode *ino - sbi->prev_free = entry; - if (sbi->free_clusters != -1) - sbi->free_clusters--; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - cluster[idx_clus] = entry; - idx_clus++; -@@ -517,7 +517,7 @@ int fat_alloc_clusters(struct inode *ino - /* Couldn't allocate the free entries */ - sbi->free_clusters = 0; - sbi->free_clus_valid = 1; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - err = -ENOSPC; - - out: -@@ -557,7 +557,7 @@ int fat_free_clusters(struct inode *inod - err = cluster; - goto error; - } else if (cluster == FAT_ENT_FREE) { -- fat_fs_panic(sb, "%s: deleting FAT entry beyond EOF", -+ fat_fs_error(sb, "%s: deleting FAT entry beyond EOF", - __func__); - err = -EIO; - goto error; -@@ -578,7 +578,7 @@ int fat_free_clusters(struct inode *inod - ops->ent_put(&fatent, FAT_ENT_FREE); - if (sbi->free_clusters != -1) { - sbi->free_clusters++; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - - if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) { -@@ -668,7 +668,7 @@ int fat_count_free_clusters(struct super - } - sbi->free_clusters = free; - sbi->free_clus_valid = 1; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - fatent_brelse(&fatent); - out: - unlock_fat(sbi); -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/fat.h linux-omap-2.6.28-nokia1/fs/fat/fat.h ---- linux-omap-2.6.28-omap1/fs/fat/fat.h 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/fat.h 2011-06-22 13:19:33.233063269 +0200 -@@ -17,6 +17,10 @@ - #define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */ - #define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */ - -+#define FAT_ERRORS_CONT 1 /* ignore error and continue */ -+#define FAT_ERRORS_PANIC 2 /* panic on error */ -+#define FAT_ERRORS_RO 3 /* remount r/o on error */ -+ - struct fat_mount_options { - uid_t fs_uid; - gid_t fs_gid; -@@ -26,6 +30,7 @@ struct fat_mount_options { - char *iocharset; /* Charset used for filename input/display */ - unsigned short shortname; /* flags for shortname display/create rule */ - unsigned char name_check; /* r = relaxed, n = normal, s = strict */ -+ unsigned char errors; /* On error: continue, panic, remount-ro */ - unsigned short allow_utime;/* permission for setting the [am]time */ - unsigned quiet:1, /* set = fake successful chmods and chowns */ - showexec:1, /* set = only set x bit for com/exe/bat */ -@@ -310,7 +315,7 @@ extern int fat_fill_super(struct super_b - extern int fat_flush_inodes(struct super_block *sb, struct inode *i1, - struct inode *i2); - /* fat/misc.c */ --extern void fat_fs_panic(struct super_block *s, const char *fmt, ...) -+extern void fat_fs_error(struct super_block *s, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))) __cold; - extern void fat_clusters_flush(struct super_block *sb); - extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/file.c linux-omap-2.6.28-nokia1/fs/fat/file.c ---- linux-omap-2.6.28-omap1/fs/fat/file.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/file.c 2011-06-22 13:19:33.233063269 +0200 -@@ -213,7 +213,7 @@ static int fat_free(struct inode *inode, - fatent_brelse(&fatent); - return 0; - } else if (ret == FAT_ENT_FREE) { -- fat_fs_panic(sb, -+ fat_fs_error(sb, - "%s: invalid cluster chain (i_pos %lld)", - __func__, MSDOS_I(inode)->i_pos); - ret = -EIO; -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/inode.c linux-omap-2.6.28-nokia1/fs/fat/inode.c ---- linux-omap-2.6.28-omap1/fs/fat/inode.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/inode.c 2011-06-22 13:19:33.233063269 +0200 -@@ -76,7 +76,7 @@ static inline int __fat_get_block(struct - return 0; - - if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) { -- fat_fs_panic(sb, "corrupted file size (i_pos %lld, %lld)", -+ fat_fs_error(sb, "corrupted file size (i_pos %lld, %lld)", - MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private); - return -EIO; - } -@@ -441,7 +441,7 @@ static void fat_clear_inode(struct inode - - static void fat_write_super(struct super_block *sb) - { -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - if (!(sb->s_flags & MS_RDONLY)) - fat_clusters_flush(sb); -@@ -828,6 +828,12 @@ static int fat_show_options(struct seq_f - seq_puts(m, ",flush"); - if (opts->tz_utc) - seq_puts(m, ",tz=UTC"); -+ if (opts->errors == FAT_ERRORS_CONT) -+ seq_puts(m, ",errors=continue"); -+ else if (opts->errors == FAT_ERRORS_PANIC) -+ seq_puts(m, ",errors=panic"); -+ else -+ seq_puts(m, ",errors=remount-ro"); - - return 0; - } -@@ -840,7 +846,8 @@ enum { - Opt_charset, Opt_shortname_lower, Opt_shortname_win95, - Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes, - Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes, -- Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err, -+ Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont, -+ Opt_err_panic, Opt_err_ro, Opt_err, - }; - - static const match_table_t fat_tokens = { -@@ -876,6 +883,9 @@ static const match_table_t fat_tokens = - {Opt_obsolate, "posix"}, - {Opt_flush, "flush"}, - {Opt_tz_utc, "tz=UTC"}, -+ {Opt_err_cont, "errors=continue"}, -+ {Opt_err_panic, "errors=panic"}, -+ {Opt_err_ro, "errors=remount-ro"}, - {Opt_err, NULL}, - }; - static const match_table_t msdos_tokens = { -@@ -945,6 +955,7 @@ static int parse_options(char *options, - opts->numtail = 1; - opts->usefree = opts->nocase = 0; - opts->tz_utc = 0; -+ opts->errors = FAT_ERRORS_RO; - *debug = 0; - - if (!options) -@@ -1037,6 +1048,15 @@ static int parse_options(char *options, - case Opt_tz_utc: - opts->tz_utc = 1; - break; -+ case Opt_err_cont: -+ opts->errors = FAT_ERRORS_CONT; -+ break; -+ case Opt_err_panic: -+ opts->errors = FAT_ERRORS_PANIC; -+ break; -+ case Opt_err_ro: -+ opts->errors = FAT_ERRORS_RO; -+ break; - - /* msdos specific */ - case Opt_dots: -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/misc.c linux-omap-2.6.28-nokia1/fs/fat/misc.c ---- linux-omap-2.6.28-omap1/fs/fat/misc.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/misc.c 2011-06-22 13:19:33.233063269 +0200 -@@ -12,14 +12,19 @@ - #include "fat.h" - - /* -- * fat_fs_panic reports a severe file system problem and sets the file system -- * read-only. The file system can be made writable again by remounting it. -+ * fat_fs_error reports a file system problem that might indicate fa data -+ * corruption/inconsistency. Depending on 'errors' mount option the -+ * panic() is called, or error message is printed FAT and nothing is done, -+ * or filesystem is remounted read-only (default behavior). -+ * In case the file system is remounted read-only, it can be made writable -+ * again by remounting it. - */ --void fat_fs_panic(struct super_block *s, const char *fmt, ...) -+void fat_fs_error(struct super_block *s, const char *fmt, ...) - { - va_list args; -+ struct msdos_sb_info *sbi = MSDOS_SB(s); - -- printk(KERN_ERR "FAT: Filesystem panic (dev %s)\n", s->s_id); -+ printk(KERN_ERR "FAT: Filesystem error (dev %s)\n", s->s_id); - - printk(KERN_ERR " "); - va_start(args, fmt); -@@ -27,13 +32,15 @@ void fat_fs_panic(struct super_block *s, - va_end(args); - printk("\n"); - -- if (!(s->s_flags & MS_RDONLY)) { -+ if (sbi->options.errors == FAT_ERRORS_PANIC) -+ panic(" FAT fs panic from previous error\n"); -+ if ((sbi->options.errors == FAT_ERRORS_RO) && -+ !(s->s_flags & MS_RDONLY)) { - s->s_flags |= MS_RDONLY; - printk(KERN_ERR " File system has been set read-only\n"); - } - } -- --EXPORT_SYMBOL_GPL(fat_fs_panic); -+EXPORT_SYMBOL_GPL(fat_fs_error); - - /* Flushes the number of free clusters on FAT32 */ - /* XXX: Need to write one per FSINFO block. Currently only writes 1 */ -@@ -124,7 +131,7 @@ int fat_chain_add(struct inode *inode, i - mark_inode_dirty(inode); - } - if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) { -- fat_fs_panic(sb, "clusters badly computed (%d != %llu)", -+ fat_fs_error(sb, "clusters badly computed (%d != %llu)", - new_fclus, - (llu)(inode->i_blocks >> (sbi->cluster_bits - 9))); - fat_cache_inval_inode(inode); -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/namei_msdos.c linux-omap-2.6.28-nokia1/fs/fat/namei_msdos.c ---- linux-omap-2.6.28-omap1/fs/fat/namei_msdos.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/namei_msdos.c 2011-06-22 13:19:33.233063269 +0200 -@@ -608,7 +608,7 @@ error_inode: - sinfo.bh = NULL; - } - if (corrupt < 0) { -- fat_fs_panic(new_dir->i_sb, -+ fat_fs_error(new_dir->i_sb, - "%s: Filesystem corrupted (i_pos %lld)", - __func__, sinfo.i_pos); - } -diff -Nurp linux-omap-2.6.28-omap1/fs/fat/namei_vfat.c linux-omap-2.6.28-nokia1/fs/fat/namei_vfat.c ---- linux-omap-2.6.28-omap1/fs/fat/namei_vfat.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fat/namei_vfat.c 2011-06-22 13:19:33.233063269 +0200 -@@ -1030,7 +1030,7 @@ error_inode: - sinfo.bh = NULL; - } - if (corrupt < 0) { -- fat_fs_panic(new_dir->i_sb, -+ fat_fs_error(new_dir->i_sb, - "%s: Filesystem corrupted (i_pos %lld)", - __func__, sinfo.i_pos); - } -diff -Nurp linux-omap-2.6.28-omap1/fs/fs-writeback.c linux-omap-2.6.28-nokia1/fs/fs-writeback.c ---- linux-omap-2.6.28-omap1/fs/fs-writeback.c 2011-06-22 13:14:22.193067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/fs-writeback.c 2011-06-22 13:19:33.233063269 +0200 -@@ -65,6 +65,24 @@ static void writeback_release(struct bac - } - - /** -+ * enable_pwb - enable periodic write-back after an inode was marked as dirty. -+ * @inode: the inode which was marked as dirty -+ * -+ * This is a helper function for '__mark_inode_dirty()' which enables the -+ * periodic write-back, unless: -+ * * the backing device @inode belongs to does not support write-back; -+ * * periodic write-back is already enabled. -+ */ -+static void enable_pwb(struct inode *inode) -+{ -+ struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; -+ -+ if (bdi_cap_writeback_dirty(bdi) && -+ atomic_add_unless(&periodic_wb_enabled, 1, 1)) -+ enable_periodic_wb(); -+} -+ -+/** - * __mark_inode_dirty - internal function - * @inode: inode to mark - * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) -@@ -164,6 +182,7 @@ void __mark_inode_dirty(struct inode *in - if (!was_dirty) { - inode->dirtied_when = jiffies; - list_move(&inode->i_list, &sb->s_dirty); -+ enable_pwb(inode); - } - } - out: -@@ -172,6 +191,28 @@ out: - - EXPORT_SYMBOL(__mark_inode_dirty); - -+/** -+ * mark_sb_dirty - mark super block as dirty. -+ * @sb: the super block to mark as dirty -+ * -+ * This function marks super block @sb as dirty and enables the periodic -+ * write-back, unless it is already enabled. Note, VFS does not serialize the -+ * super block clean/dirty (@sb->s_dirt) state changes, and each FS is -+ * responsible for doing its own serialization. -+ */ -+void mark_sb_dirty(struct super_block *sb) -+{ -+ sb->s_dirt = 1; -+ /* -+ * If 'periodic_wb_enabled' is 0, set it to 1 and enable the periodic -+ * write-back. -+ */ -+ if (atomic_add_unless(&periodic_wb_enabled, 1, 1)) -+ enable_periodic_wb(); -+} -+ -+EXPORT_SYMBOL(mark_sb_dirty); -+ - static int write_inode(struct inode *inode, int sync) - { - if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) -diff -Nurp linux-omap-2.6.28-omap1/fs/gfs2/log.c linux-omap-2.6.28-nokia1/fs/gfs2/log.c ---- linux-omap-2.6.28-omap1/fs/gfs2/log.c 2011-06-22 13:14:22.203067691 +0200 -+++ linux-omap-2.6.28-nokia1/fs/gfs2/log.c 2011-06-22 13:19:33.233063269 +0200 -@@ -765,7 +765,7 @@ void __gfs2_log_flush(struct gfs2_sbd *s - } - gfs2_log_unlock(sdp); - -- sdp->sd_vfs->s_dirt = 0; -+ makr_sb_clean(sdp->sd_vfs); - up_write(&sdp->sd_log_flush_lock); - - kfree(ai); -@@ -824,7 +824,7 @@ void gfs2_log_commit(struct gfs2_sbd *sd - log_refund(sdp, tr); - buf_lo_incore_commit(sdp, tr); - -- sdp->sd_vfs->s_dirt = 1; -+ mark_sb_dirty(sdp->sd_vfs); - up_read(&sdp->sd_log_flush_lock); - - gfs2_log_lock(sdp); -diff -Nurp linux-omap-2.6.28-omap1/fs/gfs2/ops_super.c linux-omap-2.6.28-nokia1/fs/gfs2/ops_super.c ---- linux-omap-2.6.28-omap1/fs/gfs2/ops_super.c 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/gfs2/ops_super.c 2011-06-22 13:19:33.233063269 +0200 -@@ -196,7 +196,7 @@ static void gfs2_put_super(struct super_ - - static void gfs2_write_super(struct super_block *sb) - { -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - } - - /** -@@ -208,7 +208,7 @@ static void gfs2_write_super(struct supe - - static int gfs2_sync_fs(struct super_block *sb, int wait) - { -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - if (wait && sb->s_fs_info) - gfs2_log_flush(sb->s_fs_info, NULL); - return 0; -diff -Nurp linux-omap-2.6.28-omap1/fs/hfs/extent.c linux-omap-2.6.28-nokia1/fs/hfs/extent.c ---- linux-omap-2.6.28-omap1/fs/hfs/extent.c 2011-06-22 13:10:46.463070758 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfs/extent.c 2011-06-22 13:19:33.233063269 +0200 -@@ -432,7 +432,7 @@ out: - if (inode->i_ino < HFS_FIRSTUSER_CNID) - set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags); - set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - return res; - -diff -Nurp linux-omap-2.6.28-omap1/fs/hfs/hfs_fs.h linux-omap-2.6.28-nokia1/fs/hfs/hfs_fs.h ---- linux-omap-2.6.28-omap1/fs/hfs/hfs_fs.h 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfs/hfs_fs.h 2011-06-22 13:19:33.233063269 +0200 -@@ -251,7 +251,7 @@ static inline const char *hfs_mdb_name(s - static inline void hfs_bitmap_dirty(struct super_block *sb) - { - set_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - - static inline void hfs_buffer_sync(struct buffer_head *bh) -diff -Nurp linux-omap-2.6.28-omap1/fs/hfs/inode.c linux-omap-2.6.28-nokia1/fs/hfs/inode.c ---- linux-omap-2.6.28-omap1/fs/hfs/inode.c 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfs/inode.c 2011-06-22 13:19:33.233063269 +0200 -@@ -194,7 +194,7 @@ struct inode *hfs_new_inode(struct inode - insert_inode_hash(inode); - mark_inode_dirty(inode); - set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - return inode; - } -@@ -209,7 +209,7 @@ void hfs_delete_inode(struct inode *inod - if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID)) - HFS_SB(sb)->root_dirs--; - set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - return; - } - HFS_SB(sb)->file_count--; -@@ -222,7 +222,7 @@ void hfs_delete_inode(struct inode *inod - } - } - set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - - void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, -diff -Nurp linux-omap-2.6.28-omap1/fs/hfs/super.c linux-omap-2.6.28-nokia1/fs/hfs/super.c ---- linux-omap-2.6.28-omap1/fs/hfs/super.c 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfs/super.c 2011-06-22 13:19:33.233063269 +0200 -@@ -49,7 +49,7 @@ MODULE_LICENSE("GPL"); - */ - static void hfs_write_super(struct super_block *sb) - { -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - if (sb->s_flags & MS_RDONLY) - return; - /* sync everything to the buffers */ -diff -Nurp linux-omap-2.6.28-omap1/fs/hfsplus/bitmap.c linux-omap-2.6.28-nokia1/fs/hfsplus/bitmap.c ---- linux-omap-2.6.28-omap1/fs/hfsplus/bitmap.c 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfsplus/bitmap.c 2011-06-22 13:19:33.233063269 +0200 -@@ -151,7 +151,7 @@ done: - kunmap(page); - *max = offset + (curr - pptr) * 32 + i - start; - HFSPLUS_SB(sb).free_blocks -= *max; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); - out: - mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); -@@ -225,7 +225,7 @@ out: - set_page_dirty(page); - kunmap(page); - HFSPLUS_SB(sb).free_blocks += len; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); - - return 0; -diff -Nurp linux-omap-2.6.28-omap1/fs/hfsplus/dir.c linux-omap-2.6.28-nokia1/fs/hfsplus/dir.c ---- linux-omap-2.6.28-omap1/fs/hfsplus/dir.c 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfsplus/dir.c 2011-06-22 13:19:33.233063269 +0200 -@@ -305,7 +305,7 @@ static int hfsplus_link(struct dentry *s - inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - HFSPLUS_SB(sb).file_count++; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - return 0; - } -diff -Nurp linux-omap-2.6.28-omap1/fs/hfsplus/inode.c linux-omap-2.6.28-nokia1/fs/hfsplus/inode.c ---- linux-omap-2.6.28-omap1/fs/hfsplus/inode.c 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfsplus/inode.c 2011-06-22 13:19:33.233063269 +0200 -@@ -333,7 +333,7 @@ struct inode *hfsplus_new_inode(struct s - HFSPLUS_SB(sb).file_count++; - insert_inode_hash(inode); - mark_inode_dirty(inode); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - return inode; - } -@@ -344,7 +344,7 @@ void hfsplus_delete_inode(struct inode * - - if (S_ISDIR(inode->i_mode)) { - HFSPLUS_SB(sb).folder_count--; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - return; - } - HFSPLUS_SB(sb).file_count--; -@@ -357,7 +357,7 @@ void hfsplus_delete_inode(struct inode * - inode->i_size = 0; - hfsplus_file_truncate(inode); - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - - void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) -diff -Nurp linux-omap-2.6.28-omap1/fs/hfsplus/super.c linux-omap-2.6.28-nokia1/fs/hfsplus/super.c ---- linux-omap-2.6.28-omap1/fs/hfsplus/super.c 2011-06-22 13:14:22.253067690 +0200 -+++ linux-omap-2.6.28-nokia1/fs/hfsplus/super.c 2011-06-22 13:19:33.233063269 +0200 -@@ -104,7 +104,7 @@ static int hfsplus_write_inode(struct in - case HFSPLUS_EXT_CNID: - if (vhdr->ext_file.total_size != cpu_to_be64(inode->i_size)) { - HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; -- inode->i_sb->s_dirt = 1; -+ mark_sb_dirty(inode->i_sb); - } - hfsplus_inode_write_fork(inode, &vhdr->ext_file); - hfs_btree_write(HFSPLUS_SB(inode->i_sb).ext_tree); -@@ -112,7 +112,7 @@ static int hfsplus_write_inode(struct in - case HFSPLUS_CAT_CNID: - if (vhdr->cat_file.total_size != cpu_to_be64(inode->i_size)) { - HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; -- inode->i_sb->s_dirt = 1; -+ mark_sb_dirty(inode->i_sb); - } - hfsplus_inode_write_fork(inode, &vhdr->cat_file); - hfs_btree_write(HFSPLUS_SB(inode->i_sb).cat_tree); -@@ -120,21 +120,21 @@ static int hfsplus_write_inode(struct in - case HFSPLUS_ALLOC_CNID: - if (vhdr->alloc_file.total_size != cpu_to_be64(inode->i_size)) { - HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; -- inode->i_sb->s_dirt = 1; -+ mark_sb_dirty(inode->i_sb); - } - hfsplus_inode_write_fork(inode, &vhdr->alloc_file); - break; - case HFSPLUS_START_CNID: - if (vhdr->start_file.total_size != cpu_to_be64(inode->i_size)) { - HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; -- inode->i_sb->s_dirt = 1; -+ mark_sb_dirty(inode->i_sb); - } - hfsplus_inode_write_fork(inode, &vhdr->start_file); - break; - case HFSPLUS_ATTR_CNID: - if (vhdr->attr_file.total_size != cpu_to_be64(inode->i_size)) { - HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; -- inode->i_sb->s_dirt = 1; -+ mark_sb_dirty(inode->i_sb); - } - hfsplus_inode_write_fork(inode, &vhdr->attr_file); - hfs_btree_write(HFSPLUS_SB(inode->i_sb).attr_tree); -@@ -157,7 +157,7 @@ static void hfsplus_write_super(struct s - struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; - - dprint(DBG_SUPER, "hfsplus_write_super\n"); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - if (sb->s_flags & MS_RDONLY) - /* warn? */ - return; -diff -Nurp linux-omap-2.6.28-omap1/fs/jffs2/fs.c linux-omap-2.6.28-nokia1/fs/jffs2/fs.c ---- linux-omap-2.6.28-omap1/fs/jffs2/fs.c 2011-06-22 13:14:22.433067687 +0200 -+++ linux-omap-2.6.28-nokia1/fs/jffs2/fs.c 2011-06-22 13:19:33.233063269 +0200 -@@ -405,7 +405,7 @@ int jffs2_remount_fs (struct super_block - void jffs2_write_super (struct super_block *sb) - { - struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - if (sb->s_flags & MS_RDONLY) - return; -diff -Nurp linux-omap-2.6.28-omap1/fs/jffs2/os-linux.h linux-omap-2.6.28-nokia1/fs/jffs2/os-linux.h ---- linux-omap-2.6.28-omap1/fs/jffs2/os-linux.h 2011-06-22 13:14:22.483067687 +0200 -+++ linux-omap-2.6.28-nokia1/fs/jffs2/os-linux.h 2011-06-22 13:19:33.233063269 +0200 -@@ -147,7 +147,7 @@ void jffs2_nor_wbuf_flash_cleanup(struct - /* erase.c */ - static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c) - { -- OFNI_BS_2SFFJ(c)->s_dirt = 1; -+ mark_sb_dirty(OFNI_BS_2SFFJ(c)); - } - - /* background.c */ -diff -Nurp linux-omap-2.6.28-omap1/fs/ocfs2/super.c linux-omap-2.6.28-nokia1/fs/ocfs2/super.c ---- linux-omap-2.6.28-omap1/fs/ocfs2/super.c 2011-06-22 13:14:23.293067675 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ocfs2/super.c 2011-06-22 13:19:33.243063269 +0200 -@@ -190,7 +190,7 @@ static void ocfs2_write_super(struct sup - { - if (mutex_trylock(&sb->s_lock) != 0) - BUG(); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - } - - static int ocfs2_sync_fs(struct super_block *sb, int wait) -@@ -199,7 +199,7 @@ static int ocfs2_sync_fs(struct super_bl - tid_t target; - struct ocfs2_super *osb = OCFS2_SB(sb); - -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - if (ocfs2_is_hard_readonly(osb)) - return -EROFS; -diff -Nurp linux-omap-2.6.28-omap1/fs/proc/generic.c linux-omap-2.6.28-nokia1/fs/proc/generic.c ---- linux-omap-2.6.28-omap1/fs/proc/generic.c 2011-06-22 13:14:23.353067674 +0200 -+++ linux-omap-2.6.28-nokia1/fs/proc/generic.c 2011-06-22 13:19:33.243063269 +0200 -@@ -14,7 +14,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -379,7 +378,6 @@ struct dentry *proc_lookup_de(struct pro - struct inode *inode = NULL; - int error = -ENOENT; - -- lock_kernel(); - spin_lock(&proc_subdir_lock); - for (de = de->subdir; de ; de = de->next) { - if (de->namelen != dentry->d_name.len) -@@ -397,7 +395,6 @@ struct dentry *proc_lookup_de(struct pro - } - spin_unlock(&proc_subdir_lock); - out_unlock: -- unlock_kernel(); - - if (inode) { - dentry->d_op = &proc_dentry_operations; -@@ -432,8 +429,6 @@ int proc_readdir_de(struct proc_dir_entr - struct inode *inode = filp->f_path.dentry->d_inode; - int ret = 0; - -- lock_kernel(); -- - ino = inode->i_ino; - i = filp->f_pos; - switch (i) { -@@ -487,7 +482,7 @@ int proc_readdir_de(struct proc_dir_entr - spin_unlock(&proc_subdir_lock); - } - ret = 1; --out: unlock_kernel(); -+out: - return ret; - } - -@@ -504,6 +499,7 @@ int proc_readdir(struct file *filp, void - * the /proc directory. - */ - static const struct file_operations proc_dir_operations = { -+ .llseek = generic_file_llseek, - .read = generic_read_dir, - .readdir = proc_readdir, - }; -diff -Nurp linux-omap-2.6.28-omap1/fs/proc/inode.c linux-omap-2.6.28-nokia1/fs/proc/inode.c ---- linux-omap-2.6.28-omap1/fs/proc/inode.c 2011-06-22 13:14:23.353067674 +0200 -+++ linux-omap-2.6.28-nokia1/fs/proc/inode.c 2011-06-22 13:19:33.243063269 +0200 -@@ -35,16 +35,13 @@ struct proc_dir_entry *de_get(struct pro - */ - void de_put(struct proc_dir_entry *de) - { -- lock_kernel(); - if (!atomic_read(&de->count)) { - printk("de_put: entry %s already free!\n", de->name); -- unlock_kernel(); - return; - } - - if (atomic_dec_and_test(&de->count)) - free_proc_entry(de); -- unlock_kernel(); - } - - /* -diff -Nurp linux-omap-2.6.28-omap1/fs/proc/proc_net.c linux-omap-2.6.28-nokia1/fs/proc/proc_net.c ---- linux-omap-2.6.28-omap1/fs/proc/proc_net.c 2011-06-22 13:14:23.353067674 +0200 -+++ linux-omap-2.6.28-nokia1/fs/proc/proc_net.c 2011-06-22 13:19:33.243063269 +0200 -@@ -18,7 +18,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -172,6 +171,7 @@ static int proc_tgid_net_readdir(struct - } - - const struct file_operations proc_net_operations = { -+ .llseek = generic_file_llseek, - .read = generic_read_dir, - .readdir = proc_tgid_net_readdir, - }; -diff -Nurp linux-omap-2.6.28-omap1/fs/proc/root.c linux-omap-2.6.28-nokia1/fs/proc/root.c ---- linux-omap-2.6.28-omap1/fs/proc/root.c 2011-06-22 13:14:23.353067674 +0200 -+++ linux-omap-2.6.28-nokia1/fs/proc/root.c 2011-06-22 13:19:33.243063269 +0200 -@@ -16,7 +16,6 @@ - #include - #include - #include --#include - #include - #include - -@@ -162,17 +161,12 @@ static int proc_root_readdir(struct file - unsigned int nr = filp->f_pos; - int ret; - -- lock_kernel(); -- - if (nr < FIRST_PROCESS_ENTRY) { - int error = proc_readdir(filp, dirent, filldir); -- if (error <= 0) { -- unlock_kernel(); -+ if (error <= 0) - return error; -- } - filp->f_pos = FIRST_PROCESS_ENTRY; - } -- unlock_kernel(); - - ret = proc_pid_readdir(filp, dirent, filldir); - return ret; -diff -Nurp linux-omap-2.6.28-omap1/fs/qnx4/inode.c linux-omap-2.6.28-nokia1/fs/qnx4/inode.c ---- linux-omap-2.6.28-omap1/fs/qnx4/inode.c 2011-06-22 13:14:23.353067674 +0200 -+++ linux-omap-2.6.28-nokia1/fs/qnx4/inode.c 2011-06-22 13:19:33.243063269 +0200 -@@ -74,7 +74,7 @@ static void qnx4_write_super(struct supe - { - lock_kernel(); - QNX4DEBUG(("qnx4: write_super\n")); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - unlock_kernel(); - } - -diff -Nurp linux-omap-2.6.28-omap1/fs/reiserfs/journal.c linux-omap-2.6.28-nokia1/fs/reiserfs/journal.c ---- linux-omap-2.6.28-omap1/fs/reiserfs/journal.c 2011-06-22 13:14:23.403067675 +0200 -+++ linux-omap-2.6.28-nokia1/fs/reiserfs/journal.c 2011-06-22 13:19:33.243063269 +0200 -@@ -3251,7 +3251,7 @@ int journal_mark_dirty(struct reiserfs_t - th->t_trans_id, journal->j_trans_id); - } - -- p_s_sb->s_dirt = 1; -+ mark_sb_dirty(p_s_sb); - - prepared = test_clear_buffer_journal_prepared(bh); - clear_buffer_journal_restore_dirty(bh); -@@ -3545,7 +3545,7 @@ int reiserfs_flush_old_commits(struct su - do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT); - } - } -- return p_s_sb->s_dirt; -+ return is_sb_dirty(p_s_sb); - } - - /* -@@ -3974,7 +3974,7 @@ static int do_journal_end(struct reiserf - ** it tells us if we should continue with the journal_end, or just return - */ - if (!check_journal_end(th, p_s_sb, nblocks, flags)) { -- p_s_sb->s_dirt = 1; -+ mark_sb_dirty(p_s_sb); - wake_queued_writers(p_s_sb); - reiserfs_async_progress_wait(p_s_sb); - goto out; -diff -Nurp linux-omap-2.6.28-omap1/fs/reiserfs/resize.c linux-omap-2.6.28-nokia1/fs/reiserfs/resize.c ---- linux-omap-2.6.28-omap1/fs/reiserfs/resize.c 2011-06-22 13:14:23.413067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/reiserfs/resize.c 2011-06-22 13:19:33.243063269 +0200 -@@ -202,7 +202,7 @@ int reiserfs_resize(struct super_block * - (bmap_nr_new - bmap_nr))); - PUT_SB_BLOCK_COUNT(s, block_count_new); - PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new); -- s->s_dirt = 1; -+ mark_sb_dirty(s); - - journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); - -diff -Nurp linux-omap-2.6.28-omap1/fs/reiserfs/super.c linux-omap-2.6.28-nokia1/fs/reiserfs/super.c ---- linux-omap-2.6.28-omap1/fs/reiserfs/super.c 2011-06-22 13:14:23.413067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/reiserfs/super.c 2011-06-22 13:19:33.243063269 +0200 -@@ -69,11 +69,11 @@ static int reiserfs_sync_fs(struct super - if (!journal_begin(&th, s, 1)) - if (!journal_end_sync(&th, s, 1)) - reiserfs_flush_old_commits(s); -- s->s_dirt = 0; /* Even if it's not true. -- * We'll loop forever in sync_supers otherwise */ -+ mark_sb_clean(s); /* Even if it's not true. -+ * We'll loop forever in sync_supers otherwise */ - reiserfs_write_unlock(s); - } else { -- s->s_dirt = 0; -+ mark_sb_clean(s); - } - return 0; - } -@@ -99,7 +99,7 @@ static void reiserfs_write_super_lockfs( - journal_end_sync(&th, s, 1); - } - } -- s->s_dirt = 0; -+ mark_sb_clean(s); - reiserfs_write_unlock(s); - } - -@@ -1287,7 +1287,7 @@ static int reiserfs_remount(struct super - err = journal_end(&th, s, 10); - if (err) - goto out_err; -- s->s_dirt = 0; -+ mark_sb_clean(s); - - if (!(*mount_flags & MS_RDONLY)) { - finish_unfinished(s); -diff -Nurp linux-omap-2.6.28-omap1/fs/super.c linux-omap-2.6.28-nokia1/fs/super.c ---- linux-omap-2.6.28-omap1/fs/super.c 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/super.c 2011-06-22 13:19:33.243063269 +0200 -@@ -251,7 +251,7 @@ void __fsync_super(struct super_block *s - sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); - lock_super(sb); -- if (sb->s_dirt && sb->s_op->write_super) -+ if (is_sb_dirty(sb) && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - if (sb->s_op->sync_fs) -@@ -298,7 +298,7 @@ void generic_shutdown_super(struct super - invalidate_inodes(sb); - lock_kernel(); - -- if (sop->write_super && sb->s_dirt) -+ if (sop->write_super && is_sb_dirty(sb)) - sop->write_super(sb); - if (sop->put_super) - sop->put_super(sb); -@@ -388,7 +388,7 @@ EXPORT_SYMBOL(drop_super); - static inline void write_super(struct super_block *sb) - { - lock_super(sb); -- if (sb->s_root && sb->s_dirt) -+ if (sb->s_root && is_sb_dirty(sb)) - if (sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); -@@ -406,7 +406,7 @@ void sync_supers(void) - spin_lock(&sb_lock); - restart: - list_for_each_entry(sb, &super_blocks, s_list) { -- if (sb->s_dirt) { -+ if (is_sb_dirty(sb)) { - sb->s_count++; - spin_unlock(&sb_lock); - down_read(&sb->s_umount); -@@ -461,7 +461,7 @@ restart: - sb->s_count++; - spin_unlock(&sb_lock); - down_read(&sb->s_umount); -- if (sb->s_root && (wait || sb->s_dirt)) -+ if (sb->s_root && (wait || is_sb_dirty(sb))) - sb->s_op->sync_fs(sb, wait); - up_read(&sb->s_umount); - /* restart only when sb is no longer on the list */ -diff -Nurp linux-omap-2.6.28-omap1/fs/sync.c linux-omap-2.6.28-nokia1/fs/sync.c ---- linux-omap-2.6.28-omap1/fs/sync.c 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/sync.c 2011-06-22 13:19:33.243063269 +0200 -@@ -64,7 +64,7 @@ int file_fsync(struct file *filp, struct - /* sync the superblock to buffers */ - sb = inode->i_sb; - lock_super(sb); -- if (sb->s_dirt && sb->s_op->write_super) -+ if (is_sb_dirty(sb) && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - -diff -Nurp linux-omap-2.6.28-omap1/fs/sysv/inode.c linux-omap-2.6.28-nokia1/fs/sysv/inode.c ---- linux-omap-2.6.28-omap1/fs/sysv/inode.c 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/sysv/inode.c 2011-06-22 13:19:33.243063269 +0200 -@@ -30,7 +30,7 @@ - #include - #include "sysv.h" - --/* This is only called on sync() and umount(), when s_dirt=1. */ -+/* This is only called on sync() and umount(), when the super block is dirty. */ - static void sysv_write_super(struct super_block *sb) - { - struct sysv_sb_info *sbi = SYSV_SB(sb); -@@ -53,7 +53,7 @@ static void sysv_write_super(struct supe - mark_buffer_dirty(sbi->s_bh2); - } - clean: -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - unlock_kernel(); - } - -@@ -63,7 +63,7 @@ static int sysv_remount(struct super_blo - if (sbi->s_forced_ro) - *flags |= MS_RDONLY; - if (!(*flags & MS_RDONLY)) -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - return 0; - } - -diff -Nurp linux-omap-2.6.28-omap1/fs/sysv/super.c linux-omap-2.6.28-nokia1/fs/sysv/super.c ---- linux-omap-2.6.28-omap1/fs/sysv/super.c 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/sysv/super.c 2011-06-22 13:19:33.243063269 +0200 -@@ -347,7 +347,7 @@ static int complete_read_super(struct su - sb->s_flags |= MS_RDONLY; - if (sbi->s_truncate) - sb->s_root->d_op = &sysv_dentry_operations; -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - return 1; - } - -diff -Nurp linux-omap-2.6.28-omap1/fs/sysv/sysv.h linux-omap-2.6.28-nokia1/fs/sysv/sysv.h ---- linux-omap-2.6.28-omap1/fs/sysv/sysv.h 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/sysv/sysv.h 2011-06-22 13:19:33.243063269 +0200 -@@ -118,7 +118,7 @@ static inline void dirty_sb(struct super - mark_buffer_dirty(sbi->s_bh1); - if (sbi->s_bh1 != sbi->s_bh2) - mark_buffer_dirty(sbi->s_bh2); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - - -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/budget.c linux-omap-2.6.28-nokia1/fs/ubifs/budget.c ---- linux-omap-2.6.28-omap1/fs/ubifs/budget.c 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/budget.c 2011-06-22 13:19:33.243063269 +0200 -@@ -32,18 +32,15 @@ - - #include "ubifs.h" - #include --#include -+#include - - /* - * When pessimistic budget calculations say that there is no enough space, - * UBIFS starts writing back dirty inodes and pages, doing garbage collection, -- * or committing. The below constants define maximum number of times UBIFS -+ * or committing. The below constant defines maximum number of times UBIFS - * repeats the operations. - */ --#define MAX_SHRINK_RETRIES 8 --#define MAX_GC_RETRIES 4 --#define MAX_CMT_RETRIES 2 --#define MAX_NOSPC_RETRIES 1 -+#define MAX_MKSPC_RETRIES 3 - - /* - * The below constant defines amount of dirty pages which should be written -@@ -52,30 +49,6 @@ - #define NR_TO_WRITE 16 - - /** -- * struct retries_info - information about re-tries while making free space. -- * @prev_liability: previous liability -- * @shrink_cnt: how many times the liability was shrinked -- * @shrink_retries: count of liability shrink re-tries (increased when -- * liability does not shrink) -- * @try_gc: GC should be tried first -- * @gc_retries: how many times GC was run -- * @cmt_retries: how many times commit has been done -- * @nospc_retries: how many times GC returned %-ENOSPC -- * -- * Since we consider budgeting to be the fast-path, and this structure has to -- * be allocated on stack and zeroed out, we make it smaller using bit-fields. -- */ --struct retries_info { -- long long prev_liability; -- unsigned int shrink_cnt; -- unsigned int shrink_retries:5; -- unsigned int try_gc:1; -- unsigned int gc_retries:4; -- unsigned int cmt_retries:3; -- unsigned int nospc_retries:1; --}; -- --/** - * shrink_liability - write-back some dirty pages/inodes. - * @c: UBIFS file-system description object - * @nr_to_write: how many dirty pages to write-back -@@ -147,9 +120,25 @@ static int run_gc(struct ubifs_info *c) - } - - /** -+ * get_liability - calculate current liability. -+ * @c: UBIFS file-system description object -+ * -+ * This function calculates and returns current UBIFS liability, i.e. the -+ * amount of bytes UBIFS has "promised" to write to the media. -+ */ -+static long long get_liability(struct ubifs_info *c) -+{ -+ long long liab; -+ -+ spin_lock(&c->space_lock); -+ liab = c->budg_idx_growth + c->budg_data_growth + c->budg_dd_growth; -+ spin_unlock(&c->space_lock); -+ return liab; -+} -+ -+/** - * make_free_space - make more free space on the file-system. - * @c: UBIFS file-system description object -- * @ri: information about previous invocations of this function - * - * This function is called when an operation cannot be budgeted because there - * is supposedly no free space. But in most cases there is some free space: -@@ -165,87 +154,42 @@ static int run_gc(struct ubifs_info *c) - * Returns %-ENOSPC if it couldn't do more free space, and other negative error - * codes on failures. - */ --static int make_free_space(struct ubifs_info *c, struct retries_info *ri) -+static int make_free_space(struct ubifs_info *c) - { -- int err; -- -- /* -- * If we have some dirty pages and inodes (liability), try to write -- * them back unless this was tried too many times without effect -- * already. -- */ -- if (ri->shrink_retries < MAX_SHRINK_RETRIES && !ri->try_gc) { -- long long liability; -- -- spin_lock(&c->space_lock); -- liability = c->budg_idx_growth + c->budg_data_growth + -- c->budg_dd_growth; -- spin_unlock(&c->space_lock); -+ int err, retries = 0; -+ long long liab1, liab2; - -- if (ri->prev_liability >= liability) { -- /* Liability does not shrink, next time try GC then */ -- ri->shrink_retries += 1; -- if (ri->gc_retries < MAX_GC_RETRIES) -- ri->try_gc = 1; -- dbg_budg("liability did not shrink: retries %d of %d", -- ri->shrink_retries, MAX_SHRINK_RETRIES); -- } -- -- dbg_budg("force write-back (count %d)", ri->shrink_cnt); -- shrink_liability(c, NR_TO_WRITE + ri->shrink_cnt); -+ do { -+ liab1 = get_liability(c); -+ /* -+ * We probably have some dirty pages or inodes (liability), try -+ * to write them back. -+ */ -+ dbg_budg("liability %lld, run write-back", liab1); -+ shrink_liability(c, NR_TO_WRITE); - -- ri->prev_liability = liability; -- ri->shrink_cnt += 1; -- return -EAGAIN; -- } -+ liab2 = get_liability(c); -+ if (liab2 < liab1) -+ return -EAGAIN; - -- /* -- * Try to run garbage collector unless it was already tried too many -- * times. -- */ -- if (ri->gc_retries < MAX_GC_RETRIES) { -- ri->gc_retries += 1; -- dbg_budg("run GC, retries %d of %d", -- ri->gc_retries, MAX_GC_RETRIES); -+ dbg_budg("new liability %lld (not shrinked)", liab2); - -- ri->try_gc = 0; -+ /* Liability did not shrink again, try GC */ -+ dbg_budg("Run GC"); - err = run_gc(c); - if (!err) - return -EAGAIN; - -- if (err == -EAGAIN) { -- dbg_budg("GC asked to commit"); -- err = ubifs_run_commit(c); -- if (err) -- return err; -- return -EAGAIN; -- } -- -- if (err != -ENOSPC) -- return err; -- -- /* -- * GC could not make any progress. If this is the first time, -- * then it makes sense to try to commit, because it might make -- * some dirty space. -- */ -- dbg_budg("GC returned -ENOSPC, retries %d", -- ri->nospc_retries); -- if (ri->nospc_retries >= MAX_NOSPC_RETRIES) -+ if (err != -EAGAIN && err != -ENOSPC) -+ /* Some real error happened */ - return err; -- ri->nospc_retries += 1; -- } - -- /* Neither GC nor write-back helped, try to commit */ -- if (ri->cmt_retries < MAX_CMT_RETRIES) { -- ri->cmt_retries += 1; -- dbg_budg("run commit, retries %d of %d", -- ri->cmt_retries, MAX_CMT_RETRIES); -+ dbg_budg("Run commit (retries %d)", retries); - err = ubifs_run_commit(c); - if (err) - return err; -- return -EAGAIN; -- } -+ } while (retries++ < MAX_MKSPC_RETRIES); -+ - return -ENOSPC; - } - -@@ -258,8 +202,8 @@ static int make_free_space(struct ubifs_ - */ - int ubifs_calc_min_idx_lebs(struct ubifs_info *c) - { -- int ret; -- uint64_t idx_size; -+ int idx_lebs, eff_leb_size = c->leb_size - c->max_idx_node_sz; -+ long long idx_size; - - idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx; - -@@ -271,23 +215,16 @@ int ubifs_calc_min_idx_lebs(struct ubifs - * pair, nor similarly the two variables for the new index size, so we - * have to do this costly 64-bit division on fast-path. - */ -- if (do_div(idx_size, c->leb_size - c->max_idx_node_sz)) -- ret = idx_size + 1; -- else -- ret = idx_size; -+ idx_size += eff_leb_size - 1; -+ idx_lebs = div_u64(idx_size, eff_leb_size); - /* - * The index head is not available for the in-the-gaps method, so add an - * extra LEB to compensate. - */ -- ret += 1; -- /* -- * At present the index needs at least 2 LEBs: one for the index head -- * and one for in-the-gaps method (which currently does not cater for -- * the index head and so excludes it from consideration). -- */ -- if (ret < 2) -- ret = 2; -- return ret; -+ idx_lebs += 1; -+ if (idx_lebs < MIN_INDEX_LEBS) -+ idx_lebs = MIN_INDEX_LEBS; -+ return idx_lebs; - } - - /** -@@ -530,8 +467,7 @@ static int calc_dd_growth(const struct u - int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) - { - int uninitialized_var(cmt_retries), uninitialized_var(wb_retries); -- int err, idx_growth, data_growth, dd_growth; -- struct retries_info ri; -+ int err, idx_growth, data_growth, dd_growth, retried = 0; - - ubifs_assert(req->new_page <= 1); - ubifs_assert(req->dirtied_page <= 1); -@@ -549,7 +485,6 @@ int ubifs_budget_space(struct ubifs_info - if (!data_growth && !dd_growth) - return 0; - idx_growth = calc_idx_growth(c, req); -- memset(&ri, 0, sizeof(struct retries_info)); - - again: - spin_lock(&c->space_lock); -@@ -587,12 +522,17 @@ again: - return err; - } - -- err = make_free_space(c, &ri); -+ err = make_free_space(c); -+ cond_resched(); - if (err == -EAGAIN) { - dbg_budg("try again"); -- cond_resched(); - goto again; - } else if (err == -ENOSPC) { -+ if (!retried) { -+ retried = 1; -+ dbg_budg("-ENOSPC, but anyway try once again"); -+ goto again; -+ } - dbg_budg("FS is full, -ENOSPC"); - c->nospace = 1; - if (can_use_rp(c) || c->rp_size == 0) -@@ -691,7 +631,7 @@ void ubifs_convert_page_budget(struct ub - * - * This function releases budget corresponding to a dirty inode. It is usually - * called when after the inode has been written to the media and marked as -- * clean. -+ * clean. It also causes the "no space" flags to be cleared. - */ - void ubifs_release_dirty_inode_budget(struct ubifs_info *c, - struct ubifs_inode *ui) -@@ -699,6 +639,7 @@ void ubifs_release_dirty_inode_budget(st - struct ubifs_budget_req req; - - memset(&req, 0, sizeof(struct ubifs_budget_req)); -+ /* The "no space" flags will be cleared because dd_growth is > 0 */ - req.dd_growth = c->inode_budget + ALIGN(ui->data_len, 8); - ubifs_release_budget(c, &req); - } -@@ -712,9 +653,9 @@ void ubifs_release_dirty_inode_budget(st - * user-space. User-space application tend to expect that if the file-system - * (e.g., via the 'statfs()' call) reports that it has N bytes available, they - * are able to write a file of size N. UBIFS attaches node headers to each data -- * node and it has to write indexind nodes as well. This introduces additional -- * overhead, and UBIFS it has to report sligtly less free space to meet the -- * above expectetion. -+ * node and it has to write indexing nodes as well. This introduces additional -+ * overhead, and UBIFS has to report slightly less free space to meet the above -+ * expectations. - * - * This function assumes free space is made up of uncompressed data nodes and - * full index nodes (one per data node, tripled because we always allow enough -@@ -723,7 +664,7 @@ void ubifs_release_dirty_inode_budget(st - * Note, the calculation is pessimistic, which means that most of the time - * UBIFS reports less space than it actually has. - */ --long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free) -+long long ubifs_reported_space(const struct ubifs_info *c, long long free) - { - int divisor, factor, f; - -@@ -737,7 +678,7 @@ long long ubifs_reported_space(const str - * of data nodes, f - fanout. Because effective UBIFS fanout is twice - * as less than maximum fanout, we assume that each data node - * introduces 3 * @c->max_idx_node_sz / (@c->fanout/2 - 1) bytes. -- * Note, the multiplier 3 is because UBIFS reseves thrice as more space -+ * Note, the multiplier 3 is because UBIFS reserves thrice as more space - * for the index. - */ - f = c->fanout > 3 ? c->fanout >> 1 : 2; -@@ -745,45 +686,33 @@ long long ubifs_reported_space(const str - divisor = UBIFS_MAX_DATA_NODE_SZ; - divisor += (c->max_idx_node_sz * 3) / (f - 1); - free *= factor; -- do_div(free, divisor); -- return free; -+ return div_u64(free, divisor); - } - - /** -- * ubifs_get_free_space - return amount of free space. -+ * ubifs_get_free_space_nolock - return amount of free space. - * @c: UBIFS file-system description object - * - * This function calculates amount of free space to report to user-space. - * - * Because UBIFS may introduce substantial overhead (the index, node headers, -- * alighment, wastage at the end of eraseblocks, etc), it cannot report real -+ * alignment, wastage at the end of eraseblocks, etc), it cannot report real - * amount of free flash space it has (well, because not all dirty space is -- * reclamable, UBIFS does not actually know the real amount). If UBIFS did so, -- * it would bread user expectetion about what free space is. Users seem to -+ * reclaimable, UBIFS does not actually know the real amount). If UBIFS did so, -+ * it would bread user expectations about what free space is. Users seem to - * accustomed to assume that if the file-system reports N bytes of free space, - * they would be able to fit a file of N bytes to the FS. This almost works for - * traditional file-systems, because they have way less overhead than UBIFS. - * So, to keep users happy, UBIFS tries to take the overhead into account. - */ --long long ubifs_get_free_space(struct ubifs_info *c) -+long long ubifs_get_free_space_nolock(struct ubifs_info *c) - { -- int min_idx_lebs, rsvd_idx_lebs, lebs; -+ int rsvd_idx_lebs, lebs; - long long available, outstanding, free; - -- spin_lock(&c->space_lock); -- min_idx_lebs = ubifs_calc_min_idx_lebs(c); -+ ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); - outstanding = c->budg_data_growth + c->budg_dd_growth; -- -- /* -- * Force the amount available to the total size reported if the used -- * space is zero. -- */ -- if (c->lst.total_used <= UBIFS_INO_NODE_SZ && !outstanding) { -- spin_unlock(&c->space_lock); -- return (long long)c->block_cnt << UBIFS_BLOCK_SHIFT; -- } -- -- available = ubifs_calc_available(c, min_idx_lebs); -+ available = ubifs_calc_available(c, c->min_idx_lebs); - - /* - * When reporting free space to user-space, UBIFS guarantees that it is -@@ -796,15 +725,14 @@ long long ubifs_get_free_space(struct ub - * Note, the calculations below are similar to what we have in - * 'do_budget_space()', so refer there for comments. - */ -- if (min_idx_lebs > c->lst.idx_lebs) -- rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs; -+ if (c->min_idx_lebs > c->lst.idx_lebs) -+ rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; - else - rsvd_idx_lebs = 0; - lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - - c->lst.taken_empty_lebs; - lebs -= rsvd_idx_lebs; - available += lebs * (c->dark_wm - c->leb_overhead); -- spin_unlock(&c->space_lock); - - if (available > outstanding) - free = ubifs_reported_space(c, available - outstanding); -@@ -812,3 +740,21 @@ long long ubifs_get_free_space(struct ub - free = 0; - return free; - } -+ -+/** -+ * ubifs_get_free_space - return amount of free space. -+ * @c: UBIFS file-system description object -+ * -+ * This function calculates and retuns amount of free space to report to -+ * user-space. -+ */ -+long long ubifs_get_free_space(struct ubifs_info *c) -+{ -+ long long free; -+ -+ spin_lock(&c->space_lock); -+ free = ubifs_get_free_space_nolock(c); -+ spin_unlock(&c->space_lock); -+ -+ return free; -+} -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/commit.c linux-omap-2.6.28-nokia1/fs/ubifs/commit.c ---- linux-omap-2.6.28-omap1/fs/ubifs/commit.c 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/commit.c 2011-06-22 13:19:33.243063269 +0200 -@@ -470,12 +470,12 @@ int dbg_old_index_check_init(struct ubif - { - struct ubifs_idx_node *idx; - int lnum, offs, len, err = 0; -+ struct ubifs_debug_info *d = c->dbg; - -- c->old_zroot = *zroot; -- -- lnum = c->old_zroot.lnum; -- offs = c->old_zroot.offs; -- len = c->old_zroot.len; -+ d->old_zroot = *zroot; -+ lnum = d->old_zroot.lnum; -+ offs = d->old_zroot.offs; -+ len = d->old_zroot.len; - - idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); - if (!idx) -@@ -485,8 +485,8 @@ int dbg_old_index_check_init(struct ubif - if (err) - goto out; - -- c->old_zroot_level = le16_to_cpu(idx->level); -- c->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); -+ d->old_zroot_level = le16_to_cpu(idx->level); -+ d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); - out: - kfree(idx); - return err; -@@ -509,6 +509,7 @@ int dbg_check_old_index(struct ubifs_inf - { - int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; - int first = 1, iip; -+ struct ubifs_debug_info *d = c->dbg; - union ubifs_key lower_key, upper_key, l_key, u_key; - unsigned long long uninitialized_var(last_sqnum); - struct ubifs_idx_node *idx; -@@ -525,9 +526,9 @@ int dbg_check_old_index(struct ubifs_inf - UBIFS_IDX_NODE_SZ; - - /* Start at the old zroot */ -- lnum = c->old_zroot.lnum; -- offs = c->old_zroot.offs; -- len = c->old_zroot.len; -+ lnum = d->old_zroot.lnum; -+ offs = d->old_zroot.offs; -+ len = d->old_zroot.len; - iip = 0; - - /* -@@ -560,11 +561,11 @@ int dbg_check_old_index(struct ubifs_inf - if (first) { - first = 0; - /* Check root level and sqnum */ -- if (le16_to_cpu(idx->level) != c->old_zroot_level) { -+ if (le16_to_cpu(idx->level) != d->old_zroot_level) { - err = 2; - goto out_dump; - } -- if (le64_to_cpu(idx->ch.sqnum) != c->old_zroot_sqnum) { -+ if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) { - err = 3; - goto out_dump; - } -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/compress.c linux-omap-2.6.28-nokia1/fs/ubifs/compress.c ---- linux-omap-2.6.28-omap1/fs/ubifs/compress.c 2011-06-22 13:14:23.433067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/compress.c 2011-06-22 13:19:33.243063269 +0200 -@@ -33,7 +33,7 @@ - /* Fake description object for the "none" compressor */ - static struct ubifs_compressor none_compr = { - .compr_type = UBIFS_COMPR_NONE, -- .name = "no compression", -+ .name = "none", - .capi_name = "", - }; - -@@ -43,13 +43,26 @@ static DEFINE_MUTEX(lzo_mutex); - static struct ubifs_compressor lzo_compr = { - .compr_type = UBIFS_COMPR_LZO, - .comp_mutex = &lzo_mutex, -- .name = "LZO", -+ .name = "lzo", - .capi_name = "lzo", - }; -+ -+static DEFINE_MUTEX(lzo999_mutex); -+ -+static struct ubifs_compressor lzo999_compr = { -+ .compr_type = UBIFS_COMPR_LZO999, -+ .comp_mutex = &lzo999_mutex, -+ .name = "lzo999", -+ .capi_name = "lzo999", -+}; - #else - static struct ubifs_compressor lzo_compr = { - .compr_type = UBIFS_COMPR_LZO, -- .name = "LZO", -+ .name = "lzo", -+}; -+static struct ubifs_compressor lzo_compr = { -+ .compr_type = UBIFS_COMPR_LZO999, -+ .name = "lzo999", - }; - #endif - -@@ -108,7 +121,7 @@ void ubifs_compress(const void *in_buf, - if (compr->comp_mutex) - mutex_lock(compr->comp_mutex); - err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf, -- out_len); -+ (unsigned int *)out_len); - if (compr->comp_mutex) - mutex_unlock(compr->comp_mutex); - if (unlikely(err)) { -@@ -119,12 +132,15 @@ void ubifs_compress(const void *in_buf, - } - - /* -- * Presently, we just require that compression results in less data, -- * rather than any defined minimum compression ratio or amount. -+ * If the data compressed only slightly, it is better to leave it -+ * uncompressed to improve read speed. - */ -- if (ALIGN(*out_len, 8) >= ALIGN(in_len, 8)) -+ if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF) - goto no_compr; - -+ if (*compr_type == UBIFS_COMPR_LZO999) -+ *compr_type = UBIFS_COMPR_LZO; -+ - return; - - no_compr: -@@ -172,7 +188,7 @@ int ubifs_decompress(const void *in_buf, - if (compr->decomp_mutex) - mutex_lock(compr->decomp_mutex); - err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, -- out_len); -+ (unsigned int *)out_len); - if (compr->decomp_mutex) - mutex_unlock(compr->decomp_mutex); - if (err) -@@ -229,13 +245,19 @@ int __init ubifs_compressors_init(void) - if (err) - return err; - -- err = compr_init(&zlib_compr); -+ err = compr_init(&lzo999_compr); - if (err) - goto out_lzo; - -+ err = compr_init(&zlib_compr); -+ if (err) -+ goto out_lzo999; -+ - ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr; - return 0; - -+out_lzo999: -+ compr_exit(&lzo999_compr); - out_lzo: - compr_exit(&lzo_compr); - return err; -@@ -244,8 +266,9 @@ out_lzo: - /** - * ubifs_compressors_exit - de-initialize UBIFS compressors. - */ --void __exit ubifs_compressors_exit(void) -+void ubifs_compressors_exit(void) - { -+ compr_exit(&lzo999_compr); - compr_exit(&lzo_compr); - compr_exit(&zlib_compr); - } -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/debug.c linux-omap-2.6.28-nokia1/fs/ubifs/debug.c ---- linux-omap-2.6.28-omap1/fs/ubifs/debug.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/debug.c 2011-06-22 13:19:33.243063269 +0200 -@@ -32,6 +32,8 @@ - #include "ubifs.h" - #include - #include -+#include -+#include - - #ifdef CONFIG_UBIFS_FS_DEBUG - -@@ -596,7 +598,9 @@ void dbg_dump_budg(struct ubifs_info *c) - struct rb_node *rb; - struct ubifs_bud *bud; - struct ubifs_gced_idx_leb *idx_gc; -+ long long available, outstanding, free; - -+ ubifs_assert(spin_is_locked(&c->space_lock)); - spin_lock(&dbg_lock); - printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, " - "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid, -@@ -616,9 +620,11 @@ void dbg_dump_budg(struct ubifs_info *c) - c->dark_wm, c->dead_wm, c->max_idx_node_sz); - printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n", - c->gc_lnum, c->ihead_lnum); -- for (i = 0; i < c->jhead_cnt; i++) -- printk(KERN_DEBUG "\tjhead %d\t LEB %d\n", -- c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum); -+ /* If we are in R/O mode, journal heads do not exist */ -+ if (c->jheads) -+ for (i = 0; i < c->jhead_cnt; i++) -+ printk(KERN_DEBUG "\tjhead %d\t LEB %d\n", -+ c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum); - for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { - bud = rb_entry(rb, struct ubifs_bud, rb); - printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum); -@@ -629,6 +635,14 @@ void dbg_dump_budg(struct ubifs_info *c) - printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n", - idx_gc->lnum, idx_gc->unmap); - printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state); -+ -+ /* Print budgeting predictions */ -+ available = ubifs_calc_available(c, c->min_idx_lebs); -+ outstanding = c->budg_data_growth + c->budg_dd_growth; -+ free = ubifs_get_free_space_nolock(c); -+ printk(KERN_DEBUG "Budgeting predictions:\n"); -+ printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n", -+ available, outstanding, free); - spin_unlock(&dbg_lock); - } - -@@ -645,7 +659,8 @@ void dbg_dump_lprops(struct ubifs_info * - struct ubifs_lprops lp; - struct ubifs_lp_stats lst; - -- printk(KERN_DEBUG "(pid %d) Dumping LEB properties\n", current->pid); -+ printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n", -+ current->pid); - ubifs_get_lp_stats(c, &lst); - dbg_dump_lstats(&lst); - -@@ -656,6 +671,8 @@ void dbg_dump_lprops(struct ubifs_info * - - dbg_dump_lprop(c, &lp); - } -+ printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n", -+ current->pid); - } - - void dbg_dump_lpt_info(struct ubifs_info *c) -@@ -663,6 +680,7 @@ void dbg_dump_lpt_info(struct ubifs_info - int i; - - spin_lock(&dbg_lock); -+ printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid); - printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz); - printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz); - printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz); -@@ -684,7 +702,8 @@ void dbg_dump_lpt_info(struct ubifs_info - printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); - printk(KERN_DEBUG "\tLPT head is at %d:%d\n", - c->nhead_lnum, c->nhead_offs); -- printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs); -+ printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", -+ c->ltab_lnum, c->ltab_offs); - if (c->big_lpt) - printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n", - c->lsave_lnum, c->lsave_offs); -@@ -703,9 +722,9 @@ void dbg_dump_leb(const struct ubifs_inf - if (dbg_failure_mode) - return; - -- printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum); -- -- sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); -+ printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", -+ current->pid, lnum); -+ sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); - if (IS_ERR(sleb)) { - ubifs_err("scan error %d", (int)PTR_ERR(sleb)); - return; -@@ -721,6 +740,8 @@ void dbg_dump_leb(const struct ubifs_inf - dbg_dump_node(c, snod->node); - } - -+ printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n", -+ current->pid, lnum); - ubifs_scan_destroy(sleb); - return; - } -@@ -768,7 +789,7 @@ void dbg_dump_heap(struct ubifs_info *c, - { - int i; - -- printk(KERN_DEBUG "(pid %d) Dumping heap cat %d (%d elements)\n", -+ printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n", - current->pid, cat, heap->cnt); - for (i = 0; i < heap->cnt; i++) { - struct ubifs_lprops *lprops = heap->arr[i]; -@@ -777,6 +798,7 @@ void dbg_dump_heap(struct ubifs_info *c, - "flags %d\n", i, lprops->lnum, lprops->hpos, - lprops->free, lprops->dirty, lprops->flags); - } -+ printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid); - } - - void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, -@@ -784,7 +806,7 @@ void dbg_dump_pnode(struct ubifs_info *c - { - int i; - -- printk(KERN_DEBUG "(pid %d) Dumping pnode:\n", current->pid); -+ printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid); - printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n", - (size_t)pnode, (size_t)parent, (size_t)pnode->cnext); - printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n", -@@ -803,7 +825,7 @@ void dbg_dump_tnc(struct ubifs_info *c) - int level; - - printk(KERN_DEBUG "\n"); -- printk(KERN_DEBUG "(pid %d) Dumping the TNC tree\n", current->pid); -+ printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid); - znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL); - level = znode->level; - printk(KERN_DEBUG "== Level %d ==\n", level); -@@ -815,8 +837,7 @@ void dbg_dump_tnc(struct ubifs_info *c) - dbg_dump_znode(c, znode); - znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); - } -- -- printk(KERN_DEBUG "\n"); -+ printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid); - } - - static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode, -@@ -839,6 +860,65 @@ void dbg_dump_index(struct ubifs_info *c - } - - /** -+ * dbg_save_space_info - save information about flash space. -+ * @c: UBIFS file-system description object -+ * -+ * This function saves information about UBIFS free space, dirty space, etc, in -+ * order to check it later. -+ */ -+void dbg_save_space_info(struct ubifs_info *c) -+{ -+ struct ubifs_debug_info *d = c->dbg; -+ -+ ubifs_get_lp_stats(c, &d->saved_lst); -+ -+ spin_lock(&c->space_lock); -+ d->saved_free = ubifs_get_free_space_nolock(c); -+ spin_unlock(&c->space_lock); -+} -+ -+/** -+ * dbg_check_space_info - check flash space information. -+ * @c: UBIFS file-system description object -+ * -+ * This function compares current flash space information with the information -+ * which was saved when the 'dbg_save_space_info()' function was called. -+ * Returns zero if the information has not changed, and %-EINVAL it it has -+ * changed. -+ */ -+int dbg_check_space_info(struct ubifs_info *c) -+{ -+ struct ubifs_debug_info *d = c->dbg; -+ struct ubifs_lp_stats lst; -+ long long avail, free; -+ -+ spin_lock(&c->space_lock); -+ avail = ubifs_calc_available(c, c->min_idx_lebs); -+ spin_unlock(&c->space_lock); -+ free = ubifs_get_free_space(c); -+ -+ if (free != d->saved_free) { -+ ubifs_err("free space changed from %lld to %lld", -+ d->saved_free, free); -+ goto out; -+ } -+ -+ return 0; -+ -+out: -+ ubifs_msg("saved lprops statistics dump"); -+ dbg_dump_lstats(&d->saved_lst); -+ ubifs_get_lp_stats(c, &lst); -+ ubifs_msg("current lprops statistics dump"); -+ dbg_dump_lstats(&d->saved_lst); -+ spin_lock(&c->space_lock); -+ dbg_dump_budg(c); -+ spin_unlock(&c->space_lock); -+ dump_stack(); -+ return -EINVAL; -+} -+ -+/** - * dbg_check_synced_i_size - check synchronized inode size. - * @inode: inode to check - * -@@ -992,8 +1072,8 @@ static int dbg_check_key_order(struct ub - zbr1->offs, DBGKEY(&key)); - dbg_err("but it should have key %s according to tnc", - DBGKEY(&zbr1->key)); -- dbg_dump_node(c, dent1); -- goto out_free; -+ dbg_dump_node(c, dent1); -+ goto out_free; - } - - key_read(c, &dent2->key, &key); -@@ -1002,8 +1082,8 @@ static int dbg_check_key_order(struct ub - zbr1->offs, DBGKEY(&key)); - dbg_err("but it should have key %s according to tnc", - DBGKEY(&zbr2->key)); -- dbg_dump_node(c, dent2); -- goto out_free; -+ dbg_dump_node(c, dent2); -+ goto out_free; - } - - nlen1 = le16_to_cpu(dent1->nlen); -@@ -1020,9 +1100,9 @@ static int dbg_check_key_order(struct ub - dbg_err("bad order of colliding key %s", - DBGKEY(&key)); - -- dbg_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); -+ ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); - dbg_dump_node(c, dent1); -- dbg_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); -+ ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); - dbg_dump_node(c, dent2); - - out_free: -@@ -1327,7 +1407,7 @@ int dbg_check_tnc(struct ubifs_info *c, - * @c: UBIFS file-system description object - * @leaf_cb: called for each leaf node - * @znode_cb: called for each indexing node -- * @priv: private date which is passed to callbacks -+ * @priv: private data which is passed to callbacks - * - * This function walks the UBIFS index and calls the @leaf_cb for each leaf - * node and @znode_cb for each indexing node. Returns zero in case of success -@@ -2097,13 +2177,13 @@ static int simple_rand(void) - return (next >> 16) & 32767; - } - --void dbg_failure_mode_registration(struct ubifs_info *c) -+static void failure_mode_init(struct ubifs_info *c) - { - struct failure_mode_info *fmi; - - fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS); - if (!fmi) { -- dbg_err("Failed to register failure mode - no memory"); -+ ubifs_err("Failed to register failure mode - no memory"); - return; - } - fmi->c = c; -@@ -2112,7 +2192,7 @@ void dbg_failure_mode_registration(struc - spin_unlock(&fmi_lock); - } - --void dbg_failure_mode_deregistration(struct ubifs_info *c) -+static void failure_mode_exit(struct ubifs_info *c) - { - struct failure_mode_info *fmi, *tmp; - -@@ -2146,42 +2226,44 @@ static int in_failure_mode(struct ubi_vo - struct ubifs_info *c = dbg_find_info(desc); - - if (c && dbg_failure_mode) -- return c->failure_mode; -+ return c->dbg->failure_mode; - return 0; - } - - static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) - { - struct ubifs_info *c = dbg_find_info(desc); -+ struct ubifs_debug_info *d; - - if (!c || !dbg_failure_mode) - return 0; -- if (c->failure_mode) -+ d = c->dbg; -+ if (d->failure_mode) - return 1; -- if (!c->fail_cnt) { -+ if (!d->fail_cnt) { - /* First call - decide delay to failure */ - if (chance(1, 2)) { - unsigned int delay = 1 << (simple_rand() >> 11); - - if (chance(1, 2)) { -- c->fail_delay = 1; -- c->fail_timeout = jiffies + -+ d->fail_delay = 1; -+ d->fail_timeout = jiffies + - msecs_to_jiffies(delay); - dbg_rcvry("failing after %ums", delay); - } else { -- c->fail_delay = 2; -- c->fail_cnt_max = delay; -+ d->fail_delay = 2; -+ d->fail_cnt_max = delay; - dbg_rcvry("failing after %u calls", delay); - } - } -- c->fail_cnt += 1; -+ d->fail_cnt += 1; - } - /* Determine if failure delay has expired */ -- if (c->fail_delay == 1) { -- if (time_before(jiffies, c->fail_timeout)) -+ if (d->fail_delay == 1) { -+ if (time_before(jiffies, d->fail_timeout)) - return 0; -- } else if (c->fail_delay == 2) -- if (c->fail_cnt++ < c->fail_cnt_max) -+ } else if (d->fail_delay == 2) -+ if (d->fail_cnt++ < d->fail_cnt_max) - return 0; - if (lnum == UBIFS_SB_LNUM) { - if (write) { -@@ -2239,7 +2321,7 @@ static int do_fail(struct ubi_volume_des - dbg_rcvry("failing in bud LEB %d commit not running", lnum); - } - ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum); -- c->failure_mode = 1; -+ d->failure_mode = 1; - dump_stack(); - return 1; - } -@@ -2344,4 +2426,177 @@ int dbg_leb_map(struct ubi_volume_desc * - return 0; - } - -+/** -+ * ubifs_debugging_init - initialize UBIFS debugging. -+ * @c: UBIFS file-system description object -+ * -+ * This function initializes debugging-related data for the file system. -+ * Returns zero in case of success and a negative error code in case of -+ * failure. -+ */ -+int ubifs_debugging_init(struct ubifs_info *c) -+{ -+ c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL); -+ if (!c->dbg) -+ return -ENOMEM; -+ -+ c->dbg->buf = vmalloc(c->leb_size); -+ if (!c->dbg->buf) -+ goto out; -+ -+ failure_mode_init(c); -+ return 0; -+ -+out: -+ kfree(c->dbg); -+ return -ENOMEM; -+} -+ -+/** -+ * ubifs_debugging_exit - free debugging data. -+ * @c: UBIFS file-system description object -+ */ -+void ubifs_debugging_exit(struct ubifs_info *c) -+{ -+ failure_mode_exit(c); -+ vfree(c->dbg->buf); -+ kfree(c->dbg); -+} -+ -+/* -+ * Root directory for UBIFS stuff in debugfs. Contains sub-directories which -+ * contain the stuff specific to particular file-system mounts. -+ */ -+static struct dentry *dfs_rootdir; -+ -+/** -+ * dbg_debugfs_init - initialize debugfs file-system. -+ * -+ * UBIFS uses debugfs file-system to expose various debugging knobs to -+ * user-space. This function creates "ubifs" directory in the debugfs -+ * file-system. Returns zero in case of success and a negative error code in -+ * case of failure. -+ */ -+int dbg_debugfs_init(void) -+{ -+ dfs_rootdir = debugfs_create_dir("ubifs", NULL); -+ if (IS_ERR(dfs_rootdir)) { -+ int err = PTR_ERR(dfs_rootdir); -+ ubifs_err("cannot create \"ubifs\" debugfs directory, " -+ "error %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+/** -+ * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system. -+ */ -+void dbg_debugfs_exit(void) -+{ -+ debugfs_remove(dfs_rootdir); -+} -+ -+static int open_debugfs_file(struct inode *inode, struct file *file) -+{ -+ file->private_data = inode->i_private; -+ return 0; -+} -+ -+static ssize_t write_debugfs_file(struct file *file, const char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct ubifs_info *c = file->private_data; -+ struct ubifs_debug_info *d = c->dbg; -+ -+ if (file->f_path.dentry == d->dfs_dump_lprops) -+ dbg_dump_lprops(c); -+ else if (file->f_path.dentry == d->dfs_dump_budg) { -+ spin_lock(&c->space_lock); -+ dbg_dump_budg(c); -+ spin_unlock(&c->space_lock); -+ } else if (file->f_path.dentry == d->dfs_dump_tnc) { -+ mutex_lock(&c->tnc_mutex); -+ dbg_dump_tnc(c); -+ mutex_unlock(&c->tnc_mutex); -+ } else -+ return -EINVAL; -+ -+ *ppos += count; -+ return count; -+} -+ -+static const struct file_operations dfs_fops = { -+ .open = open_debugfs_file, -+ .write = write_debugfs_file, -+ .owner = THIS_MODULE, -+}; -+ -+/** -+ * dbg_debugfs_init_fs - initialize debugfs for UBIFS instance. -+ * @c: UBIFS file-system description object -+ * -+ * This function creates all debugfs files for this instance of UBIFS. Returns -+ * zero in case of success and a negative error code in case of failure. -+ * -+ * Note, the only reason we have not merged this function with the -+ * 'ubifs_debugging_init()' function is because it is better to initialize -+ * debugfs interfaces at the very end of the mount process, and remove them at -+ * the very beginning of the mount process. -+ */ -+int dbg_debugfs_init_fs(struct ubifs_info *c) -+{ -+ int err; -+ const char *fname; -+ struct dentry *dent; -+ struct ubifs_debug_info *d = c->dbg; -+ -+ sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); -+ d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir); -+ if (IS_ERR(d->dfs_dir)) { -+ err = PTR_ERR(d->dfs_dir); -+ ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", -+ d->dfs_dir_name, err); -+ goto out; -+ } -+ -+ fname = "dump_lprops"; -+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); -+ if (IS_ERR(dent)) -+ goto out_remove; -+ d->dfs_dump_lprops = dent; -+ -+ fname = "dump_budg"; -+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); -+ if (IS_ERR(dent)) -+ goto out_remove; -+ d->dfs_dump_budg = dent; -+ -+ fname = "dump_tnc"; -+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); -+ if (IS_ERR(dent)) -+ goto out_remove; -+ d->dfs_dump_tnc = dent; -+ -+ return 0; -+ -+out_remove: -+ err = PTR_ERR(dent); -+ ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", -+ fname, err); -+ debugfs_remove_recursive(d->dfs_dir); -+out: -+ return err; -+} -+ -+/** -+ * dbg_debugfs_exit_fs - remove all debugfs files. -+ * @c: UBIFS file-system description object -+ */ -+void dbg_debugfs_exit_fs(struct ubifs_info *c) -+{ -+ debugfs_remove_recursive(c->dbg->dfs_dir); -+} -+ - #endif /* CONFIG_UBIFS_FS_DEBUG */ -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/debug.h linux-omap-2.6.28-nokia1/fs/ubifs/debug.h ---- linux-omap-2.6.28-omap1/fs/ubifs/debug.h 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/debug.h 2011-06-22 13:19:33.243063269 +0200 -@@ -25,7 +25,61 @@ - - #ifdef CONFIG_UBIFS_FS_DEBUG - --#define UBIFS_DBG(op) op -+/** -+ * ubifs_debug_info - per-FS debugging information. -+ * @buf: a buffer of LEB size, used for various purposes -+ * @old_zroot: old index root - used by 'dbg_check_old_index()' -+ * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' -+ * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' -+ * @failure_mode: failure mode for recovery testing -+ * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls -+ * @fail_timeout: time in jiffies when delay of failure mode expires -+ * @fail_cnt: current number of calls to failure mode I/O functions -+ * @fail_cnt_max: number of calls by which to delay failure mode -+ * @chk_lpt_sz: used by LPT tree size checker -+ * @chk_lpt_sz2: used by LPT tree size checker -+ * @chk_lpt_wastage: used by LPT tree size checker -+ * @chk_lpt_lebs: used by LPT tree size checker -+ * @new_nhead_offs: used by LPT tree size checker -+ * @new_ihead_lnum: used by debugging to check @c->ihead_lnum -+ * @new_ihead_offs: used by debugging to check @c->ihead_offs -+ * -+ * @saved_lst: saved lprops statistics (used by 'dbg_save_space_info()') -+ * @saved_free: saved free space (used by 'dbg_save_space_info()') -+ * -+ * dfs_dir_name: name of debugfs directory containing this file-system's files -+ * dfs_dir: direntry object of the file-system debugfs directory -+ * dfs_dump_lprops: "dump lprops" debugfs knob -+ * dfs_dump_budg: "dump budgeting information" debugfs knob -+ * dfs_dump_tnc: "dump TNC" debugfs knob -+ */ -+struct ubifs_debug_info { -+ void *buf; -+ struct ubifs_zbranch old_zroot; -+ int old_zroot_level; -+ unsigned long long old_zroot_sqnum; -+ int failure_mode; -+ int fail_delay; -+ unsigned long fail_timeout; -+ unsigned int fail_cnt; -+ unsigned int fail_cnt_max; -+ long long chk_lpt_sz; -+ long long chk_lpt_sz2; -+ long long chk_lpt_wastage; -+ int chk_lpt_lebs; -+ int new_nhead_offs; -+ int new_ihead_lnum; -+ int new_ihead_offs; -+ -+ struct ubifs_lp_stats saved_lst; -+ long long saved_free; -+ -+ char dfs_dir_name[100]; -+ struct dentry *dfs_dir; -+ struct dentry *dfs_dump_lprops; -+ struct dentry *dfs_dump_budg; -+ struct dentry *dfs_dump_tnc; -+}; - - #define ubifs_assert(expr) do { \ - if (unlikely(!(expr))) { \ -@@ -211,14 +265,18 @@ extern unsigned int ubifs_msg_flags; - extern unsigned int ubifs_chk_flags; - extern unsigned int ubifs_tst_flags; - --/* Dump functions */ -+int ubifs_debugging_init(struct ubifs_info *c); -+void ubifs_debugging_exit(struct ubifs_info *c); - -+/* Dump functions */ - const char *dbg_ntype(int type); - const char *dbg_cstate(int cmt_state); - const char *dbg_get_key_dump(const struct ubifs_info *c, - const union ubifs_key *key); - void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode); - void dbg_dump_node(const struct ubifs_info *c, const void *node); -+void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum, -+ int offs); - void dbg_dump_budget_req(const struct ubifs_budget_req *req); - void dbg_dump_lstats(const struct ubifs_lp_stats *lst); - void dbg_dump_budg(struct ubifs_info *c); -@@ -233,9 +291,9 @@ void dbg_dump_pnode(struct ubifs_info *c - struct ubifs_nnode *parent, int iip); - void dbg_dump_tnc(struct ubifs_info *c); - void dbg_dump_index(struct ubifs_info *c); -+void dbg_dump_lpt_lebs(const struct ubifs_info *c); - - /* Checking helper functions */ -- - typedef int (*dbg_leaf_callback)(struct ubifs_info *c, - struct ubifs_zbranch *zbr, void *priv); - typedef int (*dbg_znode_callback)(struct ubifs_info *c, -@@ -244,7 +302,8 @@ int dbg_walk_index(struct ubifs_info *c, - dbg_znode_callback znode_cb, void *priv); - - /* Checking functions */ -- -+void dbg_save_space_info(struct ubifs_info *c); -+int dbg_check_space_info(struct ubifs_info *c); - int dbg_check_lprops(struct ubifs_info *c); - int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot); - int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot); -@@ -274,9 +333,6 @@ int dbg_force_in_the_gaps(void); - - #define dbg_failure_mode (ubifs_tst_flags & UBIFS_TST_RCVRY) - --void dbg_failure_mode_registration(struct ubifs_info *c); --void dbg_failure_mode_deregistration(struct ubifs_info *c); -- - #ifndef UBIFS_DBG_PRESERVE_UBI - - #define ubi_leb_read dbg_leb_read -@@ -318,9 +374,13 @@ static inline int dbg_change(struct ubi_ - return dbg_leb_change(desc, lnum, buf, len, UBI_UNKNOWN); - } - --#else /* !CONFIG_UBIFS_FS_DEBUG */ -+/* Debugfs-related stuff */ -+int dbg_debugfs_init(void); -+void dbg_debugfs_exit(void); -+int dbg_debugfs_init_fs(struct ubifs_info *c); -+void dbg_debugfs_exit_fs(struct ubifs_info *c); - --#define UBIFS_DBG(op) -+#else /* !CONFIG_UBIFS_FS_DEBUG */ - - /* Use "if (0)" to make compiler check arguments even if debugging is off */ - #define ubifs_assert(expr) do { \ -@@ -360,26 +420,33 @@ static inline int dbg_change(struct ubi_ - #define DBGKEY(key) ((char *)(key)) - #define DBGKEY1(key) ((char *)(key)) - --#define dbg_ntype(type) "" --#define dbg_cstate(cmt_state) "" --#define dbg_get_key_dump(c, key) ({}) --#define dbg_dump_inode(c, inode) ({}) --#define dbg_dump_node(c, node) ({}) --#define dbg_dump_budget_req(req) ({}) --#define dbg_dump_lstats(lst) ({}) --#define dbg_dump_budg(c) ({}) --#define dbg_dump_lprop(c, lp) ({}) --#define dbg_dump_lprops(c) ({}) --#define dbg_dump_lpt_info(c) ({}) --#define dbg_dump_leb(c, lnum) ({}) --#define dbg_dump_znode(c, znode) ({}) --#define dbg_dump_heap(c, heap, cat) ({}) --#define dbg_dump_pnode(c, pnode, parent, iip) ({}) --#define dbg_dump_tnc(c) ({}) --#define dbg_dump_index(c) ({}) -+#define ubifs_debugging_init(c) 0 -+#define ubifs_debugging_exit(c) ({}) -+ -+#define dbg_ntype(type) "" -+#define dbg_cstate(cmt_state) "" -+#define dbg_get_key_dump(c, key) ({}) -+#define dbg_dump_inode(c, inode) ({}) -+#define dbg_dump_node(c, node) ({}) -+#define dbg_dump_lpt_node(c, node, lnum, offs) ({}) -+#define dbg_dump_budget_req(req) ({}) -+#define dbg_dump_lstats(lst) ({}) -+#define dbg_dump_budg(c) ({}) -+#define dbg_dump_lprop(c, lp) ({}) -+#define dbg_dump_lprops(c) ({}) -+#define dbg_dump_lpt_info(c) ({}) -+#define dbg_dump_leb(c, lnum) ({}) -+#define dbg_dump_znode(c, znode) ({}) -+#define dbg_dump_heap(c, heap, cat) ({}) -+#define dbg_dump_pnode(c, pnode, parent, iip) ({}) -+#define dbg_dump_tnc(c) ({}) -+#define dbg_dump_index(c) ({}) -+#define dbg_dump_lpt_lebs(c) ({}) - - #define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 - #define dbg_old_index_check_init(c, zroot) 0 -+#define dbg_save_space_info(c) ({}) -+#define dbg_check_space_info(c) 0 - #define dbg_check_old_index(c, zroot) 0 - #define dbg_check_cats(c) 0 - #define dbg_check_ltab(c) 0 -@@ -396,9 +463,11 @@ static inline int dbg_change(struct ubi_ - #define dbg_force_in_the_gaps_enabled 0 - #define dbg_force_in_the_gaps() 0 - #define dbg_failure_mode 0 --#define dbg_failure_mode_registration(c) ({}) --#define dbg_failure_mode_deregistration(c) ({}) - --#endif /* !CONFIG_UBIFS_FS_DEBUG */ -+#define dbg_debugfs_init() 0 -+#define dbg_debugfs_exit() -+#define dbg_debugfs_init_fs(c) 0 -+#define dbg_debugfs_exit_fs(c) 0 - -+#endif /* !CONFIG_UBIFS_FS_DEBUG */ - #endif /* !__UBIFS_DEBUG_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/dir.c linux-omap-2.6.28-nokia1/fs/ubifs/dir.c ---- linux-omap-2.6.28-omap1/fs/ubifs/dir.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/dir.c 2011-06-22 13:19:33.243063269 +0200 -@@ -482,30 +482,29 @@ static int ubifs_dir_release(struct inod - } - - /** -- * lock_2_inodes - lock two UBIFS inodes. -+ * lock_2_inodes - a wrapper for locking two UBIFS inodes. - * @inode1: first inode - * @inode2: second inode -+ * -+ * We do not implement any tricks to guarantee strict lock ordering, because -+ * VFS has already done it for us on the @i_mutex. So this is just a simple -+ * wrapper function. - */ - static void lock_2_inodes(struct inode *inode1, struct inode *inode2) - { -- if (inode1->i_ino < inode2->i_ino) { -- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_2); -- mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_3); -- } else { -- mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); -- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_3); -- } -+ mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); -+ mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); - } - - /** -- * unlock_2_inodes - unlock two UBIFS inodes inodes. -+ * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes. - * @inode1: first inode - * @inode2: second inode - */ - static void unlock_2_inodes(struct inode *inode1, struct inode *inode2) - { -- mutex_unlock(&ubifs_inode(inode1)->ui_mutex); - mutex_unlock(&ubifs_inode(inode2)->ui_mutex); -+ mutex_unlock(&ubifs_inode(inode1)->ui_mutex); - } - - static int ubifs_link(struct dentry *old_dentry, struct inode *dir, -@@ -527,6 +526,27 @@ static int ubifs_link(struct dentry *old - dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu", - dentry->d_name.len, dentry->d_name.name, inode->i_ino, - inode->i_nlink, dir->i_ino); -+ ubifs_assert(mutex_is_locked(&dir->i_mutex)); -+ ubifs_assert(mutex_is_locked(&inode->i_mutex)); -+ -+ /* -+ * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing -+ * otherwise has the potential to corrupt the orphan inode list. -+ * -+ * Indeed, consider a scenario when 'vfs_link(dirA/fileA)' and -+ * 'vfs_unlink(dirA/fileA, dirB/fileB)' race. 'vfs_link()' does not -+ * lock 'dirA->i_mutex', so this is possible. Both of the functions -+ * lock 'fileA->i_mutex' though. Suppose 'vfs_unlink()' wins, and takes -+ * 'fileA->i_mutex' mutex first. Suppose 'fileA->i_nlink' is 1. In this -+ * case 'ubifs_unlink()' will drop the last reference, and put 'inodeA' -+ * to the list of orphans. After this, 'vfs_link()' will link -+ * 'dirB/fileB' to 'inodeA'. This is a problem because, for example, -+ * the subsequent 'vfs_unlink(dirB/fileB)' will add the same inode -+ * to the list of orphans. -+ */ -+ if (inode->i_nlink == 0) -+ return -ENOENT; -+ - err = dbg_check_synced_i_size(inode); - if (err) - return err; -@@ -580,6 +600,8 @@ static int ubifs_unlink(struct inode *di - dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu", - dentry->d_name.len, dentry->d_name.name, inode->i_ino, - inode->i_nlink, dir->i_ino); -+ ubifs_assert(mutex_is_locked(&dir->i_mutex)); -+ ubifs_assert(mutex_is_locked(&inode->i_mutex)); - err = dbg_check_synced_i_size(inode); - if (err) - return err; -@@ -667,7 +689,8 @@ static int ubifs_rmdir(struct inode *dir - - dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len, - dentry->d_name.name, inode->i_ino, dir->i_ino); -- -+ ubifs_assert(mutex_is_locked(&dir->i_mutex)); -+ ubifs_assert(mutex_is_locked(&inode->i_mutex)); - err = check_dir_empty(c, dentry->d_inode); - if (err) - return err; -@@ -922,59 +945,30 @@ out_budg: - } - - /** -- * lock_3_inodes - lock three UBIFS inodes for rename. -+ * lock_3_inodes - a wrapper for locking three UBIFS inodes. - * @inode1: first inode - * @inode2: second inode - * @inode3: third inode - * -- * For 'ubifs_rename()', @inode1 may be the same as @inode2 whereas @inode3 may -- * be null. -+ * This function is used for 'ubifs_rename()' and @inode1 may be the same as -+ * @inode2 whereas @inode3 may be %NULL. -+ * -+ * We do not implement any tricks to guarantee strict lock ordering, because -+ * VFS has already done it for us on the @i_mutex. So this is just a simple -+ * wrapper function. - */ - static void lock_3_inodes(struct inode *inode1, struct inode *inode2, - struct inode *inode3) - { -- struct inode *i1, *i2, *i3; -- -- if (!inode3) { -- if (inode1 != inode2) { -- lock_2_inodes(inode1, inode2); -- return; -- } -- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); -- return; -- } -- -- if (inode1 == inode2) { -- lock_2_inodes(inode1, inode3); -- return; -- } -- -- /* 3 different inodes */ -- if (inode1 < inode2) { -- i3 = inode2; -- if (inode1 < inode3) { -- i1 = inode1; -- i2 = inode3; -- } else { -- i1 = inode3; -- i2 = inode1; -- } -- } else { -- i3 = inode1; -- if (inode2 < inode3) { -- i1 = inode2; -- i2 = inode3; -- } else { -- i1 = inode3; -- i2 = inode2; -- } -- } -- mutex_lock_nested(&ubifs_inode(i1)->ui_mutex, WB_MUTEX_1); -- lock_2_inodes(i2, i3); -+ mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); -+ if (inode2 != inode1) -+ mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); -+ if (inode3) -+ mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3); - } - - /** -- * unlock_3_inodes - unlock three UBIFS inodes for rename. -+ * unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename. - * @inode1: first inode - * @inode2: second inode - * @inode3: third inode -@@ -982,11 +976,11 @@ static void lock_3_inodes(struct inode * - static void unlock_3_inodes(struct inode *inode1, struct inode *inode2, - struct inode *inode3) - { -- mutex_unlock(&ubifs_inode(inode1)->ui_mutex); -- if (inode1 != inode2) -- mutex_unlock(&ubifs_inode(inode2)->ui_mutex); - if (inode3) - mutex_unlock(&ubifs_inode(inode3)->ui_mutex); -+ if (inode1 != inode2) -+ mutex_unlock(&ubifs_inode(inode2)->ui_mutex); -+ mutex_unlock(&ubifs_inode(inode1)->ui_mutex); - } - - static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, -@@ -1020,6 +1014,11 @@ static int ubifs_rename(struct inode *ol - "dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name, - old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len, - new_dentry->d_name.name, new_dir->i_ino); -+ ubifs_assert(mutex_is_locked(&old_dir->i_mutex)); -+ ubifs_assert(mutex_is_locked(&new_dir->i_mutex)); -+ if (unlink) -+ ubifs_assert(mutex_is_locked(&new_inode->i_mutex)); -+ - - if (unlink && is_dir) { - err = check_dir_empty(c, new_inode); -@@ -1199,7 +1198,7 @@ int ubifs_getattr(struct vfsmount *mnt, - return 0; - } - --struct inode_operations ubifs_dir_inode_operations = { -+const struct inode_operations ubifs_dir_inode_operations = { - .lookup = ubifs_lookup, - .create = ubifs_create, - .link = ubifs_link, -@@ -1219,7 +1218,7 @@ struct inode_operations ubifs_dir_inode_ - #endif - }; - --struct file_operations ubifs_dir_operations = { -+const struct file_operations ubifs_dir_operations = { - .llseek = ubifs_dir_llseek, - .release = ubifs_dir_release, - .read = generic_read_dir, -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/file.c linux-omap-2.6.28-nokia1/fs/ubifs/file.c ---- linux-omap-2.6.28-omap1/fs/ubifs/file.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/file.c 2011-06-22 13:19:33.243063269 +0200 -@@ -72,8 +72,8 @@ static int read_block(struct inode *inod - return err; - } - -- ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); -- -+ ubifs_assert(le64_to_cpu(dn->ch.sqnum) > -+ ubifs_inode(inode)->creat_sqnum); - len = le32_to_cpu(dn->size); - if (len <= 0 || len > UBIFS_BLOCK_SIZE) - goto dump; -@@ -254,7 +254,7 @@ static int write_begin_slow(struct addre - } - - if (!PageUptodate(page)) { -- if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) -+ if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) - SetPageChecked(page); - else { - err = do_readpage(page); -@@ -429,9 +429,9 @@ static int ubifs_write_begin(struct file - struct ubifs_inode *ui = ubifs_inode(inode); - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - int uninitialized_var(err), appending = !!(pos + len > inode->i_size); -+ int skipped_read = 0; - struct page *page; - -- - ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); - - if (unlikely(c->ro_media)) -@@ -444,7 +444,7 @@ static int ubifs_write_begin(struct file - - if (!PageUptodate(page)) { - /* The page is not loaded from the flash */ -- if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) -+ if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { - /* - * We change whole page so no need to load it. But we - * have to set the @PG_checked flag to make the further -@@ -453,7 +453,8 @@ static int ubifs_write_begin(struct file - * the media. - */ - SetPageChecked(page); -- else { -+ skipped_read = 1; -+ } else { - err = do_readpage(page); - if (err) { - unlock_page(page); -@@ -470,6 +471,14 @@ static int ubifs_write_begin(struct file - if (unlikely(err)) { - ubifs_assert(err == -ENOSPC); - /* -+ * If we skipped reading the page because we were going to -+ * write all of it, then it is not up to date. -+ */ -+ if (skipped_read) { -+ ClearPageChecked(page); -+ ClearPageUptodate(page); -+ } -+ /* - * Budgeting failed which means it would have to force - * write-back but didn't, because we set the @fast flag in the - * request. Write-back cannot be done now, while we have the -@@ -1540,7 +1549,7 @@ static int ubifs_file_mmap(struct file * - return 0; - } - --struct address_space_operations ubifs_file_address_operations = { -+const struct address_space_operations ubifs_file_address_operations = { - .readpage = ubifs_readpage, - .writepage = ubifs_writepage, - .write_begin = ubifs_write_begin, -@@ -1550,7 +1559,7 @@ struct address_space_operations ubifs_fi - .releasepage = ubifs_releasepage, - }; - --struct inode_operations ubifs_file_inode_operations = { -+const struct inode_operations ubifs_file_inode_operations = { - .setattr = ubifs_setattr, - .getattr = ubifs_getattr, - #ifdef CONFIG_UBIFS_FS_XATTR -@@ -1561,14 +1570,14 @@ struct inode_operations ubifs_file_inode - #endif - }; - --struct inode_operations ubifs_symlink_inode_operations = { -+const struct inode_operations ubifs_symlink_inode_operations = { - .readlink = generic_readlink, - .follow_link = ubifs_follow_link, - .setattr = ubifs_setattr, - .getattr = ubifs_getattr, - }; - --struct file_operations ubifs_file_operations = { -+const struct file_operations ubifs_file_operations = { - .llseek = generic_file_llseek, - .read = do_sync_read, - .write = do_sync_write, -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/gc.c linux-omap-2.6.28-nokia1/fs/ubifs/gc.c ---- linux-omap-2.6.28-omap1/fs/ubifs/gc.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/gc.c 2011-06-22 13:19:33.243063269 +0200 -@@ -31,6 +31,26 @@ - * to be reused. Garbage collection will cause the number of dirty index nodes - * to grow, however sufficient space is reserved for the index to ensure the - * commit will never run out of space. -+ * -+ * Notes about dead watermark. At current UBIFS implementation we assume that -+ * LEBs which have less than @c->dead_wm bytes of free + dirty space are full -+ * and not worth garbage-collecting. The dead watermark is one min. I/O unit -+ * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS -+ * Garbage Collector has to synchronize the GC head's write buffer before -+ * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can -+ * actually reclaim even very small pieces of dirty space by garbage collecting -+ * enough dirty LEBs, but we do not bother doing this at this implementation. -+ * -+ * Notes about dark watermark. The results of GC work depends on how big are -+ * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, -+ * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would -+ * have to waste large pieces of free space at the end of LEB B, because nodes -+ * from LEB A would not fit. And the worst situation is when all nodes are of -+ * maximum size. So dark watermark is the amount of free + dirty space in LEB -+ * which are guaranteed to be reclaimable. If LEB has less space, the GC migh -+ * be unable to reclaim it. So, LEBs with free + dirty greater than dark -+ * watermark are "good" LEBs from GC's point of few. The other LEBs are not so -+ * good, and GC takes extra care when moving them. - */ - - #include -@@ -381,7 +401,7 @@ int ubifs_garbage_collect_leb(struct ubi - - /* - * Don't release the LEB until after the next commit, because -- * it may contain date which is needed for recovery. So -+ * it may contain data which is needed for recovery. So - * although we freed this LEB, it will become usable only after - * the commit. - */ -@@ -810,8 +830,9 @@ out: - * ubifs_destroy_idx_gc - destroy idx_gc list. - * @c: UBIFS file-system description object - * -- * This function destroys the idx_gc list. It is called when unmounting or -- * remounting read-only so locks are not needed. -+ * This function destroys the @c->idx_gc list. It is called when unmounting -+ * so locks are not needed. Returns zero in case of success and a negative -+ * error code in case of failure. - */ - void ubifs_destroy_idx_gc(struct ubifs_info *c) - { -@@ -824,7 +845,6 @@ void ubifs_destroy_idx_gc(struct ubifs_i - list_del(&idx_gc->list); - kfree(idx_gc); - } -- - } - - /** -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/io.c linux-omap-2.6.28-nokia1/fs/ubifs/io.c ---- linux-omap-2.6.28-omap1/fs/ubifs/io.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/io.c 2011-06-22 13:19:33.243063269 +0200 -@@ -29,7 +29,7 @@ - * would have been wasted for padding to the nearest minimal I/O unit boundary. - * Instead, data first goes to the write-buffer and is flushed when the - * buffer is full or when it is not used for some time (by timer). This is -- * similarto the mechanism is used by JFFS2. -+ * similar to the mechanism is used by JFFS2. - * - * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by - * mutexes defined inside these objects. Since sometimes upper-level code -@@ -75,7 +75,7 @@ void ubifs_ro_mode(struct ubifs_info *c, - * @lnum: logical eraseblock number - * @offs: offset within the logical eraseblock - * @quiet: print no messages -- * @chk_crc: indicates whether to always check the CRC -+ * @must_chk_crc: indicates whether to always check the CRC - * - * This function checks node magic number and CRC checksum. This function also - * validates node length to prevent UBIFS from becoming crazy when an attacker -@@ -83,11 +83,17 @@ void ubifs_ro_mode(struct ubifs_info *c, - * node length in the common header could cause UBIFS to read memory outside of - * allocated buffer when checking the CRC checksum. - * -- * This function returns zero in case of success %-EUCLEAN in case of bad CRC -- * or magic. -+ * This function may skip data nodes CRC checking if @c->no_chk_data_crc is -+ * true, which is controlled by corresponding UBIFS mount option. However, if -+ * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is -+ * checked. Similarly, if @c->always_chk_crc is true, @c->no_chk_data_crc is -+ * ignored and CRC is checked. -+ * -+ * This function returns zero in case of success and %-EUCLEAN in case of bad -+ * CRC or magic. - */ - int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, -- int offs, int quiet, int chk_crc) -+ int offs, int quiet, int must_chk_crc) - { - int err = -EINVAL, type, node_len; - uint32_t crc, node_crc, magic; -@@ -123,9 +129,9 @@ int ubifs_check_node(const struct ubifs_ - node_len > c->ranges[type].max_len) - goto out_len; - -- if (!chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc) -- if (c->no_chk_data_crc) -- return 0; -+ if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc && -+ c->no_chk_data_crc) -+ return 0; - - crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); - node_crc = le32_to_cpu(ch->crc); -@@ -287,13 +293,15 @@ void ubifs_prep_grp_node(struct ubifs_in - * - * This function is called when the write-buffer timer expires. - */ --static void wbuf_timer_callback_nolock(unsigned long data) -+static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) - { -- struct ubifs_wbuf *wbuf = (struct ubifs_wbuf *)data; -+ struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); - -+ dbg_io("jhead %d", wbuf->jhead); - wbuf->need_sync = 1; - wbuf->c->need_wbuf_sync = 1; - ubifs_wake_up_bgt(wbuf->c); -+ return HRTIMER_NORESTART; - } - - /** -@@ -302,13 +310,16 @@ static void wbuf_timer_callback_nolock(u - */ - static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) - { -- ubifs_assert(!timer_pending(&wbuf->timer)); -+ ubifs_assert(!hrtimer_active(&wbuf->timer)); - -- if (!wbuf->timeout) -+ if (wbuf->no_timer) - return; -- -- wbuf->timer.expires = jiffies + wbuf->timeout; -- add_timer(&wbuf->timer); -+ dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead, -+ div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), -+ div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, -+ USEC_PER_SEC)); -+ hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, -+ HRTIMER_MODE_REL); - } - - /** -@@ -317,13 +328,10 @@ static void new_wbuf_timer_nolock(struct - */ - static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) - { -- /* -- * If the syncer is waiting for the lock (from the background thread's -- * context) and another task is changing write-buffer then the syncing -- * should be canceled. -- */ -+ if (wbuf->no_timer) -+ return; - wbuf->need_sync = 0; -- del_timer(&wbuf->timer); -+ hrtimer_cancel(&wbuf->timer); - } - - /** -@@ -343,8 +351,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_ - /* Write-buffer is empty or not seeked */ - return 0; - -- dbg_io("LEB %d:%d, %d bytes", -- wbuf->lnum, wbuf->offs, wbuf->used); -+ dbg_io("LEB %d:%d, %d bytes, jhead %d", -+ wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead); - ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); - ubifs_assert(!(wbuf->avail & 7)); - ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); -@@ -384,7 +392,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_ - * @offs: logical eraseblock offset to seek to - * @dtype: data type - * -- * This function targets the write buffer to logical eraseblock @lnum:@offs. -+ * This function targets the write-buffer to logical eraseblock @lnum:@offs. - * The write-buffer is synchronized if it is not empty. Returns zero in case of - * success and a negative error code in case of failure. - */ -@@ -393,7 +401,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_ - { - const struct ubifs_info *c = wbuf->c; - -- dbg_io("LEB %d:%d", lnum, offs); -+ dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead); - ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); - ubifs_assert(offs >= 0 && offs <= c->leb_size); - ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); -@@ -500,9 +508,9 @@ int ubifs_wbuf_write_nolock(struct ubifs - struct ubifs_info *c = wbuf->c; - int err, written, n, aligned_len = ALIGN(len, 8), offs; - -- dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len, -- dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum, -- wbuf->offs + wbuf->used); -+ dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len, -+ dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead, -+ wbuf->lnum, wbuf->offs + wbuf->used); - ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); - ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); - ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); -@@ -527,8 +535,8 @@ int ubifs_wbuf_write_nolock(struct ubifs - memcpy(wbuf->buf + wbuf->used, buf, len); - - if (aligned_len == wbuf->avail) { -- dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, -- wbuf->offs); -+ dbg_io("flush jhead %d wbuf to LEB %d:%d", -+ wbuf->jhead, wbuf->lnum, wbuf->offs); - err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, - wbuf->offs, c->min_io_size, - wbuf->dtype); -@@ -556,7 +564,8 @@ int ubifs_wbuf_write_nolock(struct ubifs - * minimal I/O unit. We have to fill and flush write-buffer and switch - * to the next min. I/O unit. - */ -- dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs); -+ dbg_io("flush jhead %d wbuf to LEB %d:%d", -+ wbuf->jhead, wbuf->lnum, wbuf->offs); - memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); - err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, - c->min_io_size, wbuf->dtype); -@@ -689,7 +698,8 @@ int ubifs_read_node_wbuf(struct ubifs_wb - int err, rlen, overlap; - struct ubifs_ch *ch = buf; - -- dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); -+ dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs, -+ dbg_ntype(type), len, wbuf->jhead); - ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); - ubifs_assert(!(offs & 7) && offs < c->leb_size); - ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); -@@ -813,7 +823,7 @@ out: - * @c: UBIFS file-system description object - * @wbuf: write-buffer to initialize - * -- * This function initializes write buffer. Returns zero in case of success -+ * This function initializes write-buffer. Returns zero in case of success - * %-ENOMEM in case of failure. - */ - int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) -@@ -839,20 +849,21 @@ int ubifs_wbuf_init(struct ubifs_info *c - wbuf->sync_callback = NULL; - mutex_init(&wbuf->io_mutex); - spin_lock_init(&wbuf->lock); -- - wbuf->c = c; -- init_timer(&wbuf->timer); -- wbuf->timer.function = wbuf_timer_callback_nolock; -- wbuf->timer.data = (unsigned long)wbuf; -- wbuf->timeout = DEFAULT_WBUF_TIMEOUT; - wbuf->next_ino = 0; - -+ hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ wbuf->timer.function = wbuf_timer_callback_nolock; -+ wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0); -+ wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT; -+ wbuf->delta *= 1000000000ULL; -+ ubifs_assert(wbuf->delta <= ULONG_MAX); - return 0; - } - - /** - * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. -- * @wbuf: the write-buffer whereto add -+ * @wbuf: the write-buffer where to add - * @inum: the inode number - * - * This function adds an inode number to the inode array of the write-buffer. -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/ioctl.c linux-omap-2.6.28-nokia1/fs/ubifs/ioctl.c ---- linux-omap-2.6.28-omap1/fs/ubifs/ioctl.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/ioctl.c 2011-06-22 13:19:33.243063269 +0200 -@@ -154,6 +154,7 @@ long ubifs_ioctl(struct file *file, unsi - case FS_IOC_GETFLAGS: - flags = ubifs2ioctl(ubifs_inode(inode)->flags); - -+ dbg_gen("get flags: %#x, i_flags %#x", flags, inode->i_flags); - return put_user(flags, (int __user *) arg); - - case FS_IOC_SETFLAGS: { -@@ -176,6 +177,7 @@ long ubifs_ioctl(struct file *file, unsi - err = mnt_want_write(file->f_path.mnt); - if (err) - return err; -+ dbg_gen("set flags: %#x, i_flags %#x", flags, inode->i_flags); - err = setflags(inode, flags); - mnt_drop_write(file->f_path.mnt); - return err; -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/journal.c linux-omap-2.6.28-nokia1/fs/ubifs/journal.c ---- linux-omap-2.6.28-omap1/fs/ubifs/journal.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/journal.c 2011-06-22 13:19:33.243063269 +0200 -@@ -208,7 +208,7 @@ again: - offs = 0; - - out: -- err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, UBI_SHORTTERM); -+ err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype); - if (err) - goto out_unlock; - -@@ -469,7 +469,10 @@ static void pack_inode(struct ubifs_info - ino->flags = cpu_to_le32(ui->flags); - ino->size = cpu_to_le64(ui->ui_size); - ino->nlink = cpu_to_le32(inode->i_nlink); -- ino->compr_type = cpu_to_le16(ui->compr_type); -+ if (ui->compr_type == UBIFS_COMPR_LZO999) -+ ino->compr_type = cpu_to_le16(UBIFS_COMPR_LZO); -+ else -+ ino->compr_type = cpu_to_le16(ui->compr_type); - ino->data_len = cpu_to_le32(ui->data_len); - ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt); - ino->xattr_size = cpu_to_le32(ui->xattr_size); -@@ -704,7 +707,7 @@ int ubifs_jnl_write_data(struct ubifs_in - data->size = cpu_to_le32(len); - zero_data_node_unused(data); - -- if (!(ui->flags && UBIFS_COMPR_FL)) -+ if (!(ui->flags & UBIFS_COMPR_FL)) - /* Compression is disabled for this inode */ - compr_type = UBIFS_COMPR_NONE; - else -@@ -1220,7 +1223,7 @@ int ubifs_jnl_truncate(struct ubifs_info - data_key_init(c, &key, inum, blk); - - bit = old_size & (UBIFS_BLOCK_SIZE - 1); -- blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0: 1); -+ blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1); - data_key_init(c, &to_key, inum, blk); - - err = ubifs_tnc_remove_range(c, &key, &to_key); -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/key.h linux-omap-2.6.28-nokia1/fs/ubifs/key.h ---- linux-omap-2.6.28-omap1/fs/ubifs/key.h 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/key.h 2011-06-22 13:19:33.243063269 +0200 -@@ -38,6 +38,22 @@ - #define __UBIFS_KEY_H__ - - /** -+ * key_mask_hash - mask a valid hash value. -+ * @val: value to be masked -+ * -+ * We use hash values as offset in directories, so values %0 and %1 are -+ * reserved for "." and "..". %2 is reserved for "end of readdir" marker. This -+ * function makes sure the reserved values are not used. -+ */ -+static inline uint32_t key_mask_hash(uint32_t hash) -+{ -+ hash &= UBIFS_S_KEY_HASH_MASK; -+ if (unlikely(hash <= 2)) -+ hash += 3; -+ return hash; -+} -+ -+/** - * key_r5_hash - R5 hash function (borrowed from reiserfs). - * @s: direntry name - * @len: name length -@@ -54,16 +70,7 @@ static inline uint32_t key_r5_hash(const - str++; - } - -- a &= UBIFS_S_KEY_HASH_MASK; -- -- /* -- * We use hash values as offset in directories, so values %0 and %1 are -- * reserved for "." and "..". %2 is reserved for "end of readdir" -- * marker. -- */ -- if (unlikely(a >= 0 && a <= 2)) -- a += 3; -- return a; -+ return key_mask_hash(a); - } - - /** -@@ -77,10 +84,7 @@ static inline uint32_t key_test_hash(con - - len = min_t(uint32_t, len, 4); - memcpy(&a, str, len); -- a &= UBIFS_S_KEY_HASH_MASK; -- if (unlikely(a >= 0 && a <= 2)) -- a += 3; -- return a; -+ return key_mask_hash(a); - } - - /** -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/lprops.c linux-omap-2.6.28-nokia1/fs/ubifs/lprops.c ---- linux-omap-2.6.28-omap1/fs/ubifs/lprops.c 2011-06-22 13:14:23.443067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/lprops.c 2011-06-22 13:19:33.243063269 +0200 -@@ -520,13 +520,13 @@ static int is_lprops_dirty(struct ubifs_ - * @flags: new flags - * @idx_gc_cnt: change to the count of idx_gc list - * -- * This function changes LEB properties. This function does not change a LEB -- * property (@free, @dirty or @flag) if the value passed is %LPROPS_NC. -- * -- * This function returns a pointer to the updated LEB properties on success -- * and a negative error code on failure. N.B. the LEB properties may have had to -- * be copied (due to COW) and consequently the pointer returned may not be the -- * same as the pointer passed. -+ * This function changes LEB properties (@free, @dirty or @flag). However, the -+ * property which has the %LPROPS_NC value is not changed. Returns a pointer to -+ * the updated LEB properties on success and a negative error code on failure. -+ * -+ * Note, the LEB properties may have had to be copied (due to COW) and -+ * consequently the pointer returned may not be the same as the pointer -+ * passed. - */ - const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, - const struct ubifs_lprops *lp, -@@ -635,10 +635,10 @@ const struct ubifs_lprops *ubifs_change_ - * @c: UBIFS file-system description object - * @st: return statistics - */ --void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *st) -+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst) - { - spin_lock(&c->space_lock); -- memcpy(st, &c->lst, sizeof(struct ubifs_lp_stats)); -+ memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats)); - spin_unlock(&c->space_lock); - } - -@@ -678,6 +678,9 @@ int ubifs_change_one_lp(struct ubifs_inf - - out: - ubifs_release_lprops(c); -+ if (err) -+ ubifs_err("cannot change properties of LEB %d, error %d", -+ lnum, err); - return err; - } - -@@ -714,6 +717,9 @@ int ubifs_update_one_lp(struct ubifs_inf - - out: - ubifs_release_lprops(c); -+ if (err) -+ ubifs_err("cannot update properties of LEB %d, error %d", -+ lnum, err); - return err; - } - -@@ -737,6 +743,8 @@ int ubifs_read_one_lp(struct ubifs_info - lpp = ubifs_lpt_lookup(c, lnum); - if (IS_ERR(lpp)) { - err = PTR_ERR(lpp); -+ ubifs_err("cannot read properties of LEB %d, error %d", -+ lnum, err); - goto out; - } - -@@ -1088,7 +1096,7 @@ static int scan_check_cb(struct ubifs_in - } - } - -- sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); -+ sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); - if (IS_ERR(sleb)) { - /* - * After an unclean unmount, empty and freeable LEBs -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/lpt.c linux-omap-2.6.28-nokia1/fs/ubifs/lpt.c ---- linux-omap-2.6.28-omap1/fs/ubifs/lpt.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/lpt.c 2011-06-22 13:19:33.243063269 +0200 -@@ -36,15 +36,16 @@ - * can be written into a single eraseblock. In that case, garbage collection - * consists of just writing the whole table, which therefore makes all other - * eraseblocks reusable. In the case of the big model, dirty eraseblocks are -- * selected for garbage collection, which consists are marking the nodes in -+ * selected for garbage collection, which consists of marking the clean nodes in - * that LEB as dirty, and then only the dirty nodes are written out. Also, in - * the case of the big model, a table of LEB numbers is saved so that the entire - * LPT does not to be scanned looking for empty eraseblocks when UBIFS is first - * mounted. - */ - --#include - #include "ubifs.h" -+#include -+#include - - /** - * do_calc_lpt_geom - calculate sizes for the LPT area. -@@ -135,15 +136,13 @@ static void do_calc_lpt_geom(struct ubif - int ubifs_calc_lpt_geom(struct ubifs_info *c) - { - int lebs_needed; -- uint64_t sz; -+ long long sz; - - do_calc_lpt_geom(c); - - /* Verify that lpt_lebs is big enough */ - sz = c->lpt_sz * 2; /* Must have at least 2 times the size */ -- sz += c->leb_size - 1; -- do_div(sz, c->leb_size); -- lebs_needed = sz; -+ lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); - if (lebs_needed > c->lpt_lebs) { - ubifs_err("too few LPT LEBs"); - return -EINVAL; -@@ -156,7 +155,6 @@ int ubifs_calc_lpt_geom(struct ubifs_inf - } - - c->check_lpt_free = c->big_lpt; -- - return 0; - } - -@@ -176,7 +174,7 @@ static int calc_dflt_lpt_geom(struct ubi - int *big_lpt) - { - int i, lebs_needed; -- uint64_t sz; -+ long long sz; - - /* Start by assuming the minimum number of LPT LEBs */ - c->lpt_lebs = UBIFS_MIN_LPT_LEBS; -@@ -203,9 +201,7 @@ static int calc_dflt_lpt_geom(struct ubi - /* Now check there are enough LPT LEBs */ - for (i = 0; i < 64 ; i++) { - sz = c->lpt_sz * 4; /* Allow 4 times the size */ -- sz += c->leb_size - 1; -- do_div(sz, c->leb_size); -- lebs_needed = sz; -+ lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); - if (lebs_needed > c->lpt_lebs) { - /* Not enough LPT LEBs so try again with more */ - c->lpt_lebs = lebs_needed; -@@ -558,7 +554,7 @@ static int calc_nnode_num(int row, int c - * This function calculates and returns the nnode number based on the parent's - * nnode number and the index in parent. - */ --static int calc_nnode_num_from_parent(struct ubifs_info *c, -+static int calc_nnode_num_from_parent(const struct ubifs_info *c, - struct ubifs_nnode *parent, int iip) - { - int num, shft; -@@ -583,7 +579,7 @@ static int calc_nnode_num_from_parent(st - * This function calculates and returns the pnode number based on the parent's - * nnode number and the index in parent. - */ --static int calc_pnode_num_from_parent(struct ubifs_info *c, -+static int calc_pnode_num_from_parent(const struct ubifs_info *c, - struct ubifs_nnode *parent, int iip) - { - int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0; -@@ -966,7 +962,7 @@ static int check_lpt_type(uint8_t **addr - * - * This function returns %0 on success and a negative error code on failure. - */ --static int unpack_pnode(struct ubifs_info *c, void *buf, -+static int unpack_pnode(const struct ubifs_info *c, void *buf, - struct ubifs_pnode *pnode) - { - uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; -@@ -996,15 +992,15 @@ static int unpack_pnode(struct ubifs_inf - } - - /** -- * unpack_nnode - unpack a nnode. -+ * ubifs_unpack_nnode - unpack a nnode. - * @c: UBIFS file-system description object - * @buf: buffer containing packed nnode to unpack - * @nnode: nnode structure to fill - * - * This function returns %0 on success and a negative error code on failure. - */ --static int unpack_nnode(struct ubifs_info *c, void *buf, -- struct ubifs_nnode *nnode) -+int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, -+ struct ubifs_nnode *nnode) - { - uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; - int i, pos = 0, err; -@@ -1036,7 +1032,7 @@ static int unpack_nnode(struct ubifs_inf - * - * This function returns %0 on success and a negative error code on failure. - */ --static int unpack_ltab(struct ubifs_info *c, void *buf) -+static int unpack_ltab(const struct ubifs_info *c, void *buf) - { - uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; - int i, pos = 0, err; -@@ -1068,7 +1064,7 @@ static int unpack_ltab(struct ubifs_info - * - * This function returns %0 on success and a negative error code on failure. - */ --static int unpack_lsave(struct ubifs_info *c, void *buf) -+static int unpack_lsave(const struct ubifs_info *c, void *buf) - { - uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; - int i, pos = 0, err; -@@ -1096,7 +1092,7 @@ static int unpack_lsave(struct ubifs_inf - * - * This function returns %0 on success and a negative error code on failure. - */ --static int validate_nnode(struct ubifs_info *c, struct ubifs_nnode *nnode, -+static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode, - struct ubifs_nnode *parent, int iip) - { - int i, lvl, max_offs; -@@ -1140,7 +1136,7 @@ static int validate_nnode(struct ubifs_i - * - * This function returns %0 on success and a negative error code on failure. - */ --static int validate_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, -+static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode, - struct ubifs_nnode *parent, int iip) - { - int i; -@@ -1174,7 +1170,8 @@ static int validate_pnode(struct ubifs_i - * This function calculates the LEB numbers for the LEB properties it contains - * based on the pnode number. - */ --static void set_pnode_lnum(struct ubifs_info *c, struct ubifs_pnode *pnode) -+static void set_pnode_lnum(const struct ubifs_info *c, -+ struct ubifs_pnode *pnode) - { - int i, lnum; - -@@ -1227,7 +1224,7 @@ int ubifs_read_nnode(struct ubifs_info * - err = ubi_read(c->ubi, lnum, buf, offs, c->nnode_sz); - if (err) - goto out; -- err = unpack_nnode(c, buf, nnode); -+ err = ubifs_unpack_nnode(c, buf, nnode); - if (err) - goto out; - } -@@ -1816,7 +1813,7 @@ static struct ubifs_nnode *scan_get_nnod - c->nnode_sz); - if (err) - return ERR_PTR(err); -- err = unpack_nnode(c, buf, nnode); -+ err = ubifs_unpack_nnode(c, buf, nnode); - if (err) - return ERR_PTR(err); - } -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/lpt_commit.c linux-omap-2.6.28-nokia1/fs/ubifs/lpt_commit.c ---- linux-omap-2.6.28-omap1/fs/ubifs/lpt_commit.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/lpt_commit.c 2011-06-22 13:19:33.243063269 +0200 -@@ -229,7 +229,7 @@ static int layout_cnodes(struct ubifs_in - while (offs + len > c->leb_size) { - alen = ALIGN(offs, c->min_io_size); - upd_ltab(c, lnum, c->leb_size - alen, alen - offs); -- dbg_chk_lpt_sz(c, 2, alen - offs); -+ dbg_chk_lpt_sz(c, 2, c->leb_size - offs); - err = alloc_lpt_leb(c, &lnum); - if (err) - goto no_space; -@@ -272,7 +272,7 @@ static int layout_cnodes(struct ubifs_in - if (offs + c->lsave_sz > c->leb_size) { - alen = ALIGN(offs, c->min_io_size); - upd_ltab(c, lnum, c->leb_size - alen, alen - offs); -- dbg_chk_lpt_sz(c, 2, alen - offs); -+ dbg_chk_lpt_sz(c, 2, c->leb_size - offs); - err = alloc_lpt_leb(c, &lnum); - if (err) - goto no_space; -@@ -292,7 +292,7 @@ static int layout_cnodes(struct ubifs_in - if (offs + c->ltab_sz > c->leb_size) { - alen = ALIGN(offs, c->min_io_size); - upd_ltab(c, lnum, c->leb_size - alen, alen - offs); -- dbg_chk_lpt_sz(c, 2, alen - offs); -+ dbg_chk_lpt_sz(c, 2, c->leb_size - offs); - err = alloc_lpt_leb(c, &lnum); - if (err) - goto no_space; -@@ -320,6 +320,8 @@ no_space: - dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " - "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); - dbg_dump_lpt_info(c); -+ dbg_dump_lpt_lebs(c); -+ dump_stack(); - return err; - } - -@@ -414,14 +416,12 @@ static int write_cnodes(struct ubifs_inf - alen, UBI_SHORTTERM); - if (err) - return err; -- dbg_chk_lpt_sz(c, 4, alen - wlen); - } -- dbg_chk_lpt_sz(c, 2, 0); -+ dbg_chk_lpt_sz(c, 2, c->leb_size - offs); - err = realloc_lpt_leb(c, &lnum); - if (err) - goto no_space; -- offs = 0; -- from = 0; -+ offs = from = 0; - ubifs_assert(lnum >= c->lpt_first && - lnum <= c->lpt_last); - err = ubifs_leb_unmap(c, lnum); -@@ -475,11 +475,11 @@ static int write_cnodes(struct ubifs_inf - UBI_SHORTTERM); - if (err) - return err; -- dbg_chk_lpt_sz(c, 2, alen - wlen); -+ dbg_chk_lpt_sz(c, 2, c->leb_size - offs); - err = realloc_lpt_leb(c, &lnum); - if (err) - goto no_space; -- offs = 0; -+ offs = from = 0; - ubifs_assert(lnum >= c->lpt_first && - lnum <= c->lpt_last); - err = ubifs_leb_unmap(c, lnum); -@@ -502,11 +502,11 @@ static int write_cnodes(struct ubifs_inf - UBI_SHORTTERM); - if (err) - return err; -- dbg_chk_lpt_sz(c, 2, alen - wlen); -+ dbg_chk_lpt_sz(c, 2, c->leb_size - offs); - err = realloc_lpt_leb(c, &lnum); - if (err) - goto no_space; -- offs = 0; -+ offs = from = 0; - ubifs_assert(lnum >= c->lpt_first && - lnum <= c->lpt_last); - err = ubifs_leb_unmap(c, lnum); -@@ -546,29 +546,31 @@ static int write_cnodes(struct ubifs_inf - no_space: - ubifs_err("LPT out of space mismatch"); - dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " -- "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); -+ "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); - dbg_dump_lpt_info(c); -+ dbg_dump_lpt_lebs(c); -+ dump_stack(); - return err; - } - - /** -- * next_pnode - find next pnode. -+ * next_pnode_to_dirty - find next pnode to dirty. - * @c: UBIFS file-system description object - * @pnode: pnode - * -- * This function returns the next pnode or %NULL if there are no more pnodes. -+ * This function returns the next pnode to dirty or %NULL if there are no more -+ * pnodes. Note that pnodes that have never been written (lnum == 0) are -+ * skipped. - */ --static struct ubifs_pnode *next_pnode(struct ubifs_info *c, -- struct ubifs_pnode *pnode) -+static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c, -+ struct ubifs_pnode *pnode) - { - struct ubifs_nnode *nnode; - int iip; - - /* Try to go right */ - nnode = pnode->parent; -- iip = pnode->iip + 1; -- if (iip < UBIFS_LPT_FANOUT) { -- /* We assume here that LEB zero is never an LPT LEB */ -+ for (iip = pnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) { - if (nnode->nbranch[iip].lnum) - return ubifs_get_pnode(c, nnode, iip); - } -@@ -579,8 +581,11 @@ static struct ubifs_pnode *next_pnode(st - nnode = nnode->parent; - if (!nnode) - return NULL; -- /* We assume here that LEB zero is never an LPT LEB */ -- } while (iip >= UBIFS_LPT_FANOUT || !nnode->nbranch[iip].lnum); -+ for (; iip < UBIFS_LPT_FANOUT; iip++) { -+ if (nnode->nbranch[iip].lnum) -+ break; -+ } -+ } while (iip >= UBIFS_LPT_FANOUT); - - /* Go right */ - nnode = ubifs_get_nnode(c, nnode, iip); -@@ -589,12 +594,29 @@ static struct ubifs_pnode *next_pnode(st - - /* Go down to level 1 */ - while (nnode->level > 1) { -- nnode = ubifs_get_nnode(c, nnode, 0); -+ for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) { -+ if (nnode->nbranch[iip].lnum) -+ break; -+ } -+ if (iip >= UBIFS_LPT_FANOUT) { -+ /* -+ * Should not happen, but we need to keep going -+ * if it does. -+ */ -+ iip = 0; -+ } -+ nnode = ubifs_get_nnode(c, nnode, iip); - if (IS_ERR(nnode)) - return (void *)nnode; - } - -- return ubifs_get_pnode(c, nnode, 0); -+ for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) -+ if (nnode->nbranch[iip].lnum) -+ break; -+ if (iip >= UBIFS_LPT_FANOUT) -+ /* Should not happen, but we need to keep going if it does */ -+ iip = 0; -+ return ubifs_get_pnode(c, nnode, iip); - } - - /** -@@ -684,7 +706,7 @@ static int make_tree_dirty(struct ubifs_ - pnode = pnode_lookup(c, 0); - while (pnode) { - do_make_pnode_dirty(c, pnode); -- pnode = next_pnode(c, pnode); -+ pnode = next_pnode_to_dirty(c, pnode); - if (IS_ERR(pnode)) - return PTR_ERR(pnode); - } -@@ -749,7 +771,7 @@ static void lpt_tgc_start(struct ubifs_i - * LPT trivial garbage collection is where a LPT LEB contains only dirty and - * free space and so may be reused as soon as the next commit is completed. - * This function is called after the commit is completed (master node has been -- * written) and unmaps LPT LEBs that were marked for trivial GC. -+ * written) and un-maps LPT LEBs that were marked for trivial GC. - */ - static int lpt_tgc_end(struct ubifs_info *c) - { -@@ -1025,7 +1047,7 @@ static int make_node_dirty(struct ubifs_ - * @c: UBIFS file-system description object - * @node_type: LPT node type - */ --static int get_lpt_node_len(struct ubifs_info *c, int node_type) -+static int get_lpt_node_len(const struct ubifs_info *c, int node_type) - { - switch (node_type) { - case UBIFS_LPT_NNODE: -@@ -1046,7 +1068,7 @@ static int get_lpt_node_len(struct ubifs - * @buf: buffer - * @len: length of buffer - */ --static int get_pad_len(struct ubifs_info *c, uint8_t *buf, int len) -+static int get_pad_len(const struct ubifs_info *c, uint8_t *buf, int len) - { - int offs, pad_len; - -@@ -1063,7 +1085,8 @@ static int get_pad_len(struct ubifs_info - * @buf: buffer - * @node_num: node number is returned here - */ --static int get_lpt_node_type(struct ubifs_info *c, uint8_t *buf, int *node_num) -+static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf, -+ int *node_num) - { - uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; - int pos = 0, node_type; -@@ -1081,7 +1104,7 @@ static int get_lpt_node_type(struct ubif - * - * This function returns %1 if the buffer contains a node or %0 if it does not. - */ --static int is_a_node(struct ubifs_info *c, uint8_t *buf, int len) -+static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len) - { - uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; - int pos = 0, node_type, node_len; -@@ -1105,7 +1128,6 @@ static int is_a_node(struct ubifs_info * - return 1; - } - -- - /** - * lpt_gc_lnum - garbage collect a LPT LEB. - * @c: UBIFS file-system description object -@@ -1463,7 +1485,7 @@ void ubifs_lpt_free(struct ubifs_info *c - #ifdef CONFIG_UBIFS_FS_DEBUG - - /** -- * dbg_is_all_ff - determine if a buffer contains only 0xff bytes. -+ * dbg_is_all_ff - determine if a buffer contains only 0xFF bytes. - * @buf: buffer - * @len: buffer length - */ -@@ -1488,7 +1510,7 @@ static int dbg_is_nnode_dirty(struct ubi - struct ubifs_nnode *nnode; - int hght; - -- /* Entire tree is in memory so first_nnode / next_nnode are ok */ -+ /* Entire tree is in memory so first_nnode / next_nnode are OK */ - nnode = first_nnode(c, &hght); - for (; nnode; nnode = next_nnode(c, nnode, &hght)) { - struct ubifs_nbranch *branch; -@@ -1602,7 +1624,10 @@ static int dbg_check_ltab_lnum(struct ub - { - int err, len = c->leb_size, dirty = 0, node_type, node_num, node_len; - int ret; -- void *buf = c->dbg_buf; -+ void *buf = c->dbg->buf; -+ -+ if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) -+ return 0; - - dbg_lp("LEB %d", lnum); - err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); -@@ -1704,6 +1729,9 @@ int dbg_chk_lpt_free_spc(struct ubifs_in - long long free = 0; - int i; - -+ if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) -+ return 0; -+ - for (i = 0; i < c->lpt_lebs; i++) { - if (c->ltab[i].tgc || c->ltab[i].cmt) - continue; -@@ -1716,6 +1744,8 @@ int dbg_chk_lpt_free_spc(struct ubifs_in - dbg_err("LPT space error: free %lld lpt_sz %lld", - free, c->lpt_sz); - dbg_dump_lpt_info(c); -+ dbg_dump_lpt_lebs(c); -+ dump_stack(); - return -EINVAL; - } - return 0; -@@ -1724,22 +1754,32 @@ int dbg_chk_lpt_free_spc(struct ubifs_in - /** - * dbg_chk_lpt_sz - check LPT does not write more than LPT size. - * @c: the UBIFS file-system description object -- * @action: action -+ * @action: what to do - * @len: length written - * - * This function returns %0 on success and a negative error code on failure. -+ * The @action argument may be one of: -+ * o %0 - LPT debugging checking starts, initialize debugging variables; -+ * o %1 - wrote an LPT node, increase LPT size by @len bytes; -+ * o %2 - switched to a different LEB and wasted @len bytes; -+ * o %3 - check that we've written the right number of bytes. -+ * o %4 - wasted @len bytes; - */ - int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) - { -+ struct ubifs_debug_info *d = c->dbg; - long long chk_lpt_sz, lpt_sz; - int err = 0; - -+ if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) -+ return 0; -+ - switch (action) { - case 0: -- c->chk_lpt_sz = 0; -- c->chk_lpt_sz2 = 0; -- c->chk_lpt_lebs = 0; -- c->chk_lpt_wastage = 0; -+ d->chk_lpt_sz = 0; -+ d->chk_lpt_sz2 = 0; -+ d->chk_lpt_lebs = 0; -+ d->chk_lpt_wastage = 0; - if (c->dirty_pn_cnt > c->pnode_cnt) { - dbg_err("dirty pnodes %d exceed max %d", - c->dirty_pn_cnt, c->pnode_cnt); -@@ -1752,35 +1792,35 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, - } - return err; - case 1: -- c->chk_lpt_sz += len; -+ d->chk_lpt_sz += len; - return 0; - case 2: -- c->chk_lpt_sz += len; -- c->chk_lpt_wastage += len; -- c->chk_lpt_lebs += 1; -+ d->chk_lpt_sz += len; -+ d->chk_lpt_wastage += len; -+ d->chk_lpt_lebs += 1; - return 0; - case 3: - chk_lpt_sz = c->leb_size; -- chk_lpt_sz *= c->chk_lpt_lebs; -+ chk_lpt_sz *= d->chk_lpt_lebs; - chk_lpt_sz += len - c->nhead_offs; -- if (c->chk_lpt_sz != chk_lpt_sz) { -+ if (d->chk_lpt_sz != chk_lpt_sz) { - dbg_err("LPT wrote %lld but space used was %lld", -- c->chk_lpt_sz, chk_lpt_sz); -+ d->chk_lpt_sz, chk_lpt_sz); - err = -EINVAL; - } -- if (c->chk_lpt_sz > c->lpt_sz) { -+ if (d->chk_lpt_sz > c->lpt_sz) { - dbg_err("LPT wrote %lld but lpt_sz is %lld", -- c->chk_lpt_sz, c->lpt_sz); -+ d->chk_lpt_sz, c->lpt_sz); - err = -EINVAL; - } -- if (c->chk_lpt_sz2 && c->chk_lpt_sz != c->chk_lpt_sz2) { -+ if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) { - dbg_err("LPT layout size %lld but wrote %lld", -- c->chk_lpt_sz, c->chk_lpt_sz2); -+ d->chk_lpt_sz, d->chk_lpt_sz2); - err = -EINVAL; - } -- if (c->chk_lpt_sz2 && c->new_nhead_offs != len) { -+ if (d->chk_lpt_sz2 && d->new_nhead_offs != len) { - dbg_err("LPT new nhead offs: expected %d was %d", -- c->new_nhead_offs, len); -+ d->new_nhead_offs, len); - err = -EINVAL; - } - lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; -@@ -1788,26 +1828,146 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, - lpt_sz += c->ltab_sz; - if (c->big_lpt) - lpt_sz += c->lsave_sz; -- if (c->chk_lpt_sz - c->chk_lpt_wastage > lpt_sz) { -+ if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) { - dbg_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld", -- c->chk_lpt_sz, c->chk_lpt_wastage, lpt_sz); -+ d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); - err = -EINVAL; - } -- if (err) -+ if (err) { - dbg_dump_lpt_info(c); -- c->chk_lpt_sz2 = c->chk_lpt_sz; -- c->chk_lpt_sz = 0; -- c->chk_lpt_wastage = 0; -- c->chk_lpt_lebs = 0; -- c->new_nhead_offs = len; -+ dbg_dump_lpt_lebs(c); -+ dump_stack(); -+ } -+ d->chk_lpt_sz2 = d->chk_lpt_sz; -+ d->chk_lpt_sz = 0; -+ d->chk_lpt_wastage = 0; -+ d->chk_lpt_lebs = 0; -+ d->new_nhead_offs = len; - return err; - case 4: -- c->chk_lpt_sz += len; -- c->chk_lpt_wastage += len; -+ d->chk_lpt_sz += len; -+ d->chk_lpt_wastage += len; - return 0; - default: - return -EINVAL; - } - } - -+/** -+ * dbg_dump_lpt_leb - dump an LPT LEB. -+ * @c: UBIFS file-system description object -+ * @lnum: LEB number to dump -+ * -+ * This function dumps an LEB from LPT area. Nodes in this area are very -+ * different to nodes in the main area (e.g., they do not have common headers, -+ * they do not have 8-byte alignments, etc), so we have a separate function to -+ * dump LPT area LEBs. Note, LPT has to be locked by the caller. -+ */ -+static void dump_lpt_leb(const struct ubifs_info *c, int lnum) -+{ -+ int err, len = c->leb_size, node_type, node_num, node_len, offs; -+ void *buf = c->dbg->buf; -+ -+ printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", -+ current->pid, lnum); -+ err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); -+ if (err) { -+ ubifs_err("cannot read LEB %d, error %d", lnum, err); -+ return; -+ } -+ while (1) { -+ offs = c->leb_size - len; -+ if (!is_a_node(c, buf, len)) { -+ int pad_len; -+ -+ pad_len = get_pad_len(c, buf, len); -+ if (pad_len) { -+ printk(KERN_DEBUG "LEB %d:%d, pad %d bytes\n", -+ lnum, offs, pad_len); -+ buf += pad_len; -+ len -= pad_len; -+ continue; -+ } -+ if (len) -+ printk(KERN_DEBUG "LEB %d:%d, free %d bytes\n", -+ lnum, offs, len); -+ break; -+ } -+ -+ node_type = get_lpt_node_type(c, buf, &node_num); -+ switch (node_type) { -+ case UBIFS_LPT_PNODE: -+ { -+ node_len = c->pnode_sz; -+ if (c->big_lpt) -+ printk(KERN_DEBUG "LEB %d:%d, pnode num %d\n", -+ lnum, offs, node_num); -+ else -+ printk(KERN_DEBUG "LEB %d:%d, pnode\n", -+ lnum, offs); -+ break; -+ } -+ case UBIFS_LPT_NNODE: -+ { -+ int i; -+ struct ubifs_nnode nnode; -+ -+ node_len = c->nnode_sz; -+ if (c->big_lpt) -+ printk(KERN_DEBUG "LEB %d:%d, nnode num %d, ", -+ lnum, offs, node_num); -+ else -+ printk(KERN_DEBUG "LEB %d:%d, nnode, ", -+ lnum, offs); -+ err = ubifs_unpack_nnode(c, buf, &nnode); -+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) { -+ printk("%d:%d", nnode.nbranch[i].lnum, -+ nnode.nbranch[i].offs); -+ if (i != UBIFS_LPT_FANOUT - 1) -+ printk(", "); -+ } -+ printk("\n"); -+ break; -+ } -+ case UBIFS_LPT_LTAB: -+ node_len = c->ltab_sz; -+ printk(KERN_DEBUG "LEB %d:%d, ltab\n", -+ lnum, offs); -+ break; -+ case UBIFS_LPT_LSAVE: -+ node_len = c->lsave_sz; -+ printk(KERN_DEBUG "LEB %d:%d, lsave len\n", lnum, offs); -+ break; -+ default: -+ ubifs_err("LPT node type %d not recognized", node_type); -+ return; -+ } -+ -+ buf += node_len; -+ len -= node_len; -+ } -+ -+ printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n", -+ current->pid, lnum); -+} -+ -+/** -+ * dbg_dump_lpt_lebs - dump LPT lebs. -+ * @c: UBIFS file-system description object -+ * -+ * This function dumps all LPT LEBs. The caller has to make sure the LPT is -+ * locked. -+ */ -+void dbg_dump_lpt_lebs(const struct ubifs_info *c) -+{ -+ int i; -+ -+ printk(KERN_DEBUG "(pid %d) start dumping all LPT LEBs\n", -+ current->pid); -+ for (i = 0; i < c->lpt_lebs; i++) -+ dump_lpt_leb(c, i + c->lpt_first); -+ printk(KERN_DEBUG "(pid %d) finish dumping all LPT LEBs\n", -+ current->pid); -+} -+ - #endif /* CONFIG_UBIFS_FS_DEBUG */ -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/master.c linux-omap-2.6.28-nokia1/fs/ubifs/master.c ---- linux-omap-2.6.28-omap1/fs/ubifs/master.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/master.c 2011-06-22 13:19:33.243063269 +0200 -@@ -354,7 +354,7 @@ int ubifs_write_master(struct ubifs_info - int err, lnum, offs, len; - - if (c->ro_media) -- return -EINVAL; -+ return -EROFS; - - lnum = UBIFS_MST_LNUM; - offs = c->mst_offs + c->mst_node_alsz; -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/orphan.c linux-omap-2.6.28-nokia1/fs/ubifs/orphan.c ---- linux-omap-2.6.28-omap1/fs/ubifs/orphan.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/orphan.c 2011-06-22 13:19:33.243063269 +0200 -@@ -46,7 +46,7 @@ - * Orphans are accumulated in a rb-tree. When an inode's link count drops to - * zero, the inode number is added to the rb-tree. It is removed from the tree - * when the inode is deleted. Any new orphans that are in the orphan tree when -- * the commit is run, are written to the orphan area in 1 or more orph nodes. -+ * the commit is run, are written to the orphan area in 1 or more orphan nodes. - * If the orphan area is full, it is consolidated to make space. There is - * always enough space because validation prevents the user from creating more - * than the maximum number of orphans allowed. -@@ -231,7 +231,7 @@ static int tot_avail_orphs(struct ubifs_ - } - - /** -- * do_write_orph_node - write a node -+ * do_write_orph_node - write a node to the orphan head. - * @c: UBIFS file-system description object - * @len: length of node - * @atomic: write atomically -@@ -264,11 +264,11 @@ static int do_write_orph_node(struct ubi - } - - /** -- * write_orph_node - write an orph node -+ * write_orph_node - write an orphan node. - * @c: UBIFS file-system description object - * @atomic: write atomically - * -- * This function builds an orph node from the cnext list and writes it to the -+ * This function builds an orphan node from the cnext list and writes it to the - * orphan head. On success, %0 is returned, otherwise a negative error code - * is returned. - */ -@@ -326,11 +326,11 @@ static int write_orph_node(struct ubifs_ - } - - /** -- * write_orph_nodes - write orph nodes until there are no more to commit -+ * write_orph_nodes - write orphan nodes until there are no more to commit. - * @c: UBIFS file-system description object - * @atomic: write atomically - * -- * This function writes orph nodes for all the orphans to commit. On success, -+ * This function writes orphan nodes for all the orphans to commit. On success, - * %0 is returned, otherwise a negative error code is returned. - */ - static int write_orph_nodes(struct ubifs_info *c, int atomic) -@@ -478,14 +478,14 @@ int ubifs_orphan_end_commit(struct ubifs - } - - /** -- * clear_orphans - erase all LEBs used for orphans. -+ * ubifs_clear_orphans - erase all LEBs used for orphans. - * @c: UBIFS file-system description object - * - * If recovery is not required, then the orphans from the previous session - * are not needed. This function locates the LEBs used to record - * orphans, and un-maps them. - */ --static int clear_orphans(struct ubifs_info *c) -+int ubifs_clear_orphans(struct ubifs_info *c) - { - int lnum, err; - -@@ -547,9 +547,9 @@ static int insert_dead_orphan(struct ubi - * do_kill_orphans - remove orphan inodes from the index. - * @c: UBIFS file-system description object - * @sleb: scanned LEB -- * @last_cmt_no: cmt_no of last orph node read is passed and returned here -+ * @last_cmt_no: cmt_no of last orphan node read is passed and returned here - * @outofdate: whether the LEB is out of date is returned here -- * @last_flagged: whether the end orph node is encountered -+ * @last_flagged: whether the end orphan node is encountered - * - * This function is a helper to the 'kill_orphans()' function. It goes through - * every orphan node in a LEB and for every inode number recorded, removes -@@ -580,8 +580,8 @@ static int do_kill_orphans(struct ubifs_ - /* - * The commit number on the master node may be less, because - * of a failed commit. If there are several failed commits in a -- * row, the commit number written on orph nodes will continue to -- * increase (because the commit number is adjusted here) even -+ * row, the commit number written on orphan nodes will continue -+ * to increase (because the commit number is adjusted here) even - * though the commit number on the master node stays the same - * because the master node has not been re-written. - */ -@@ -589,9 +589,9 @@ static int do_kill_orphans(struct ubifs_ - c->cmt_no = cmt_no; - if (cmt_no < *last_cmt_no && *last_flagged) { - /* -- * The last orph node had a higher commit number and was -- * flagged as the last written for that commit number. -- * That makes this orph node, out of date. -+ * The last orphan node had a higher commit number and -+ * was flagged as the last written for that commit -+ * number. That makes this orphan node, out of date. - */ - if (!first) { - ubifs_err("out of order commit number %llu in " -@@ -658,10 +658,10 @@ static int kill_orphans(struct ubifs_inf - /* - * Orph nodes always start at c->orph_first and are written to each - * successive LEB in turn. Generally unused LEBs will have been unmapped -- * but may contain out of date orph nodes if the unmap didn't go -- * through. In addition, the last orph node written for each commit is -+ * but may contain out of date orphan nodes if the unmap didn't go -+ * through. In addition, the last orphan node written for each commit is - * marked (top bit of orph->cmt_no is set to 1). It is possible that -- * there are orph nodes from the next commit (i.e. the commit did not -+ * there are orphan nodes from the next commit (i.e. the commit did not - * complete successfully). In that case, no orphans will have been lost - * due to the way that orphans are written, and any orphans added will - * be valid orphans anyway and so can be deleted. -@@ -718,7 +718,7 @@ int ubifs_mount_orphans(struct ubifs_inf - if (unclean) - err = kill_orphans(c); - else if (!read_only) -- err = clear_orphans(c); -+ err = ubifs_clear_orphans(c); - - return err; - } -@@ -899,7 +899,7 @@ static int dbg_scan_orphans(struct ubifs - for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { - struct ubifs_scan_leb *sleb; - -- sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); -+ sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); - if (IS_ERR(sleb)) { - err = PTR_ERR(sleb); - break; -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/recovery.c linux-omap-2.6.28-nokia1/fs/ubifs/recovery.c ---- linux-omap-2.6.28-omap1/fs/ubifs/recovery.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/recovery.c 2011-06-22 13:19:33.243063269 +0200 -@@ -425,59 +425,35 @@ static void clean_buf(const struct ubifs - * @lnum: LEB number of the LEB from which @buf was read - * @offs: offset from which @buf was read - * -- * This function scans @buf for more nodes and returns %0 is a node is found and -- * %1 if no more nodes are found. -+ * This function ensures that the corrupted node at @offs is the last thing -+ * written to a LEB. This function returns %1 if more data is not found and -+ * %0 if more data is found. - */ - static int no_more_nodes(const struct ubifs_info *c, void *buf, int len, - int lnum, int offs) - { -- int skip, next_offs = 0; -+ struct ubifs_ch *ch = buf; -+ int skip, dlen = le32_to_cpu(ch->len); - -- if (len > UBIFS_DATA_NODE_SZ) { -- struct ubifs_ch *ch = buf; -- int dlen = le32_to_cpu(ch->len); -- -- if (ch->node_type == UBIFS_DATA_NODE && dlen >= UBIFS_CH_SZ && -- dlen <= UBIFS_MAX_DATA_NODE_SZ) -- /* The corrupt node looks like a data node */ -- next_offs = ALIGN(offs + dlen, 8); -- } -- -- if (c->min_io_size == 1) -- skip = 8; -- else -- skip = ALIGN(offs + 1, c->min_io_size) - offs; -- -- offs += skip; -- buf += skip; -- len -= skip; -- while (len > 8) { -- struct ubifs_ch *ch = buf; -- uint32_t magic = le32_to_cpu(ch->magic); -- int ret; -- -- if (magic == UBIFS_NODE_MAGIC) { -- ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); -- if (ret == SCANNED_A_NODE || ret > 0) { -- /* -- * There is a small chance this is just data in -- * a data node, so check that possibility. e.g. -- * this is part of a file that itself contains -- * a UBIFS image. -- */ -- if (next_offs && offs + le32_to_cpu(ch->len) <= -- next_offs) -- continue; -- dbg_rcvry("unexpected node at %d:%d", lnum, -- offs); -- return 0; -- } -- } -- offs += 8; -- buf += 8; -- len -= 8; -+ /* Check for empty space after the corrupt node's common header */ -+ skip = ALIGN(offs + UBIFS_CH_SZ, c->min_io_size) - offs; -+ if (is_empty(buf + skip, len - skip)) -+ return 1; -+ /* -+ * The area after the common header size is not empty, so the common -+ * header must be intact. Check it. -+ */ -+ if (ubifs_check_node(c, buf, lnum, offs, 1, 0) != -EUCLEAN) { -+ dbg_rcvry("unexpected bad common header at %d:%d", lnum, offs); -+ return 0; - } -- return 1; -+ /* Now we know the corrupt node's length we can skip over it */ -+ skip = ALIGN(offs + dlen, c->min_io_size) - offs; -+ /* After which there should be empty space */ -+ if (is_empty(buf + skip, len - skip)) -+ return 1; -+ dbg_rcvry("unexpected data at %d:%d", lnum, offs + skip); -+ return 0; - } - - /** -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/replay.c linux-omap-2.6.28-nokia1/fs/ubifs/replay.c ---- linux-omap-2.6.28-omap1/fs/ubifs/replay.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/replay.c 2011-06-22 13:19:33.243063269 +0200 -@@ -144,7 +144,7 @@ static int set_bud_lprops(struct ubifs_i - /* - * If the replay order was perfect the dirty space would now be - * zero. The order is not perfect because the the journal heads -- * race with eachother. This is not a problem but is does mean -+ * race with each other. This is not a problem but is does mean - * that the dirty space may temporarily exceed c->leb_size - * during the replay. - */ -@@ -656,7 +656,7 @@ out_dump: - * @dirty: amount of dirty space from padding and deletion nodes - * - * This function inserts a reference node to the replay tree and returns zero -- * in case of success ort a negative error code in case of failure. -+ * in case of success or a negative error code in case of failure. - */ - static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, - unsigned long long sqnum, int free, int dirty) -@@ -883,7 +883,7 @@ static int replay_log_leb(struct ubifs_i - * This means that we reached end of log and now - * look to the older log data, which was already - * committed but the eraseblock was not erased (UBIFS -- * only unmaps it). So this basically means we have to -+ * only un-maps it). So this basically means we have to - * exit with "end of log" code. - */ - err = 1; -@@ -957,7 +957,7 @@ out: - return err; - - out_dump: -- ubifs_err("log error detected while replying the log at LEB %d:%d", -+ ubifs_err("log error detected while replaying the log at LEB %d:%d", - lnum, offs + snod->offs); - dbg_dump_node(c, snod->node); - ubifs_scan_destroy(sleb); -@@ -1062,6 +1062,15 @@ int ubifs_replay_journal(struct ubifs_in - if (err) - goto out; - -+ /* -+ * UBIFS budgeting calculations use @c->budg_uncommitted_idx variable -+ * to roughly estimate index growth. Things like @c->min_idx_lebs -+ * depend on it. This means we have to initialize it to make sure -+ * budgeting works properly. -+ */ -+ c->budg_uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt); -+ c->budg_uncommitted_idx *= c->max_idx_node_sz; -+ - ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); - dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " - "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/sb.c linux-omap-2.6.28-nokia1/fs/ubifs/sb.c ---- linux-omap-2.6.28-omap1/fs/ubifs/sb.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/sb.c 2011-06-22 13:19:33.243063269 +0200 -@@ -28,6 +28,7 @@ - - #include "ubifs.h" - #include -+#include - - /* - * Default journal size in logical eraseblocks as a percent of total -@@ -80,7 +81,7 @@ static int create_default_filesystem(str - int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first; - int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; - int min_leb_cnt = UBIFS_MIN_LEB_CNT; -- uint64_t tmp64, main_bytes; -+ long long tmp64, main_bytes; - __le64 tmp_le64; - - /* Some functions called from here depend on the @c->key_len filed */ -@@ -160,7 +161,7 @@ static int create_default_filesystem(str - if (!sup) - return -ENOMEM; - -- tmp64 = (uint64_t)max_buds * c->leb_size; -+ tmp64 = (long long)max_buds * c->leb_size; - if (big_lpt) - sup_flags |= UBIFS_FLG_BIGLPT; - -@@ -179,14 +180,19 @@ static int create_default_filesystem(str - sup->fanout = cpu_to_le32(DEFAULT_FANOUT); - sup->lsave_cnt = cpu_to_le32(c->lsave_cnt); - sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION); -- sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); - sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN); -+ if (c->mount_opts.override_compr) { -+ if (c->mount_opts.compr_type == UBIFS_COMPR_LZO999) -+ sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); -+ else -+ sup->default_compr = cpu_to_le16(c->mount_opts.compr_type); -+ } else -+ sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); - - generate_random_uuid(sup->uuid); - -- main_bytes = (uint64_t)main_lebs * c->leb_size; -- tmp64 = main_bytes * DEFAULT_RP_PERCENT; -- do_div(tmp64, 100); -+ main_bytes = (long long)main_lebs * c->leb_size; -+ tmp64 = div_u64(main_bytes * DEFAULT_RP_PERCENT, 100); - if (tmp64 > DEFAULT_MAX_RP_SIZE) - tmp64 = DEFAULT_MAX_RP_SIZE; - sup->rp_size = cpu_to_le64(tmp64); -@@ -582,16 +588,15 @@ int ubifs_read_superblock(struct ubifs_i - c->jhead_cnt = le32_to_cpu(sup->jhead_cnt) + NONDATA_JHEADS_CNT; - c->fanout = le32_to_cpu(sup->fanout); - c->lsave_cnt = le32_to_cpu(sup->lsave_cnt); -- c->default_compr = le16_to_cpu(sup->default_compr); - c->rp_size = le64_to_cpu(sup->rp_size); - c->rp_uid = le32_to_cpu(sup->rp_uid); - c->rp_gid = le32_to_cpu(sup->rp_gid); - sup_flags = le32_to_cpu(sup->flags); -+ if (!c->mount_opts.override_compr) -+ c->default_compr = le16_to_cpu(sup->default_compr); - - c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran); -- - memcpy(&c->uuid, &sup->uuid, 16); -- - c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT); - - /* Automatically increase file system size to the maximum size */ -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/super.c linux-omap-2.6.28-nokia1/fs/ubifs/super.c ---- linux-omap-2.6.28-omap1/fs/ubifs/super.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/super.c 2011-06-22 13:19:33.243063269 +0200 -@@ -34,6 +34,8 @@ - #include - #include - #include -+#include -+#include - #include "ubifs.h" - - /* -@@ -358,6 +360,11 @@ static void ubifs_delete_inode(struct in - out: - if (ui->dirty) - ubifs_release_dirty_inode_budget(c, ui); -+ else { -+ /* We've deleted something - clean the "no space" flags */ -+ c->nospace = c->nospace_rp = 0; -+ smp_wmb(); -+ } - clear_inode(inode); - } - -@@ -395,6 +402,7 @@ static int ubifs_statfs(struct dentry *d - buf->f_namelen = UBIFS_MAX_NLEN; - buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]); - buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]); -+ ubifs_assert(buf->f_bfree <= c->block_cnt); - return 0; - } - -@@ -417,39 +425,62 @@ static int ubifs_show_options(struct seq - else if (c->mount_opts.chk_data_crc == 1) - seq_printf(s, ",no_chk_data_crc"); - -+ if (c->mount_opts.override_compr) { -+ seq_printf(s, ",compr="); -+ seq_printf(s, ubifs_compr_name(c->mount_opts.compr_type)); -+ } -+ - return 0; - } - - static int ubifs_sync_fs(struct super_block *sb, int wait) - { -+ int i, err; - struct ubifs_info *c = sb->s_fs_info; -- int i, ret = 0, err; -- long long bud_bytes; -+ struct writeback_control wbc = { -+ .sync_mode = WB_SYNC_ALL, -+ .range_start = 0, -+ .range_end = LLONG_MAX, -+ .nr_to_write = LONG_MAX, -+ }; - -- if (c->jheads) { -- for (i = 0; i < c->jhead_cnt; i++) { -- err = ubifs_wbuf_sync(&c->jheads[i].wbuf); -- if (err && !ret) -- ret = err; -- } -+ /* -+ * Zero @wait is just an advisory thing to help the file system shove -+ * lots of data into the queues, and there will be the second -+ * '->sync_fs()' call, with non-zero @wait. -+ */ -+ if (!wait) -+ return 0; - -- /* Commit the journal unless it has too little data */ -- spin_lock(&c->buds_lock); -- bud_bytes = c->bud_bytes; -- spin_unlock(&c->buds_lock); -- if (bud_bytes > c->leb_size) { -- err = ubifs_run_commit(c); -- if (err) -- return err; -- } -- } -+ if (sb->s_flags & MS_RDONLY) -+ return 0; - - /* -- * We ought to call sync for c->ubi but it does not have one. If it had -- * it would in turn call mtd->sync, however mtd operations are -- * synchronous anyway, so we don't lose any sleep here. -+ * VFS calls '->sync_fs()' before synchronizing all dirty inodes and -+ * pages, so synchronize them first, then commit the journal. Strictly -+ * speaking, it is not necessary to commit the journal here, -+ * synchronizing write-buffers would be enough. But committing makes -+ * UBIFS free space predictions much more accurate, so we want to let -+ * the user be able to get more accurate results of 'statfs()' after -+ * they synchronize the file system. - */ -- return ret; -+ generic_sync_sb_inodes(sb, &wbc); -+ -+ /* -+ * Synchronize write buffers, because 'ubifs_run_commit()' does not -+ * do this if it waits for an already running commit. -+ */ -+ for (i = 0; i < c->jhead_cnt; i++) { -+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf); -+ if (err) -+ return err; -+ } -+ -+ err = ubifs_run_commit(c); -+ if (err) -+ return err; -+ -+ return ubi_sync(c->vi.ubi_num); - } - - /** -@@ -548,15 +579,8 @@ static int init_constants_early(struct u - c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX; - - /* -- * Initialize dead and dark LEB space watermarks. -- * -- * Dead space is the space which cannot be used. Its watermark is -- * equivalent to min. I/O unit or minimum node size if it is greater -- * then min. I/O unit. -- * -- * Dark space is the space which might be used, or might not, depending -- * on which node should be written to the LEB. Its watermark is -- * equivalent to maximum UBIFS node size. -+ * Initialize dead and dark LEB space watermarks. See gc.c for comments -+ * about these values. - */ - c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size); - c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size); -@@ -596,7 +620,7 @@ static int bud_wbuf_callback(struct ubif - } - - /* -- * init_constants_late - initialize UBIFS constants. -+ * init_constants_sb - initialize UBIFS constants. - * @c: UBIFS file-system description object - * - * This is a helper function which initializes various UBIFS constants after -@@ -604,10 +628,10 @@ static int bud_wbuf_callback(struct ubif - * makes sure they are all right. Returns zero in case of success and a - * negative error code in case of failure. - */ --static int init_constants_late(struct ubifs_info *c) -+static int init_constants_sb(struct ubifs_info *c) - { - int tmp, err; -- uint64_t tmp64; -+ long long tmp64; - - c->main_bytes = (long long)c->main_lebs * c->leb_size; - c->max_znode_sz = sizeof(struct ubifs_znode) + -@@ -634,9 +658,8 @@ static int init_constants_late(struct ub - * Make sure that the log is large enough to fit reference nodes for - * all buds plus one reserved LEB. - */ -- tmp64 = c->max_bud_bytes; -- tmp = do_div(tmp64, c->leb_size); -- c->max_bud_cnt = tmp64 + !!tmp; -+ tmp64 = c->max_bud_bytes + c->leb_size - 1; -+ c->max_bud_cnt = div_u64(tmp64, c->leb_size); - tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1); - tmp /= c->leb_size; - tmp += 1; -@@ -672,7 +695,7 @@ static int init_constants_late(struct ub - * Consequently, if the journal is too small, UBIFS will treat it as - * always full. - */ -- tmp64 = (uint64_t)(c->jhead_cnt + 1) * c->leb_size + 1; -+ tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1; - if (c->bg_bud_bytes < tmp64) - c->bg_bud_bytes = tmp64; - if (c->max_bud_bytes < tmp64 + c->leb_size) -@@ -682,6 +705,21 @@ static int init_constants_late(struct ub - if (err) - return err; - -+ return 0; -+} -+ -+/* -+ * init_constants_master - initialize UBIFS constants. -+ * @c: UBIFS file-system description object -+ * -+ * This is a helper function which initializes various UBIFS constants after -+ * the master node has been read. It also checks various UBIFS parameters and -+ * makes sure they are all right. -+ */ -+static void init_constants_master(struct ubifs_info *c) -+{ -+ long long tmp64; -+ - c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); - - /* -@@ -690,26 +728,25 @@ static int init_constants_late(struct ub - * necessary to report something for the 'statfs()' call. - * - * Subtract the LEB reserved for GC, the LEB which is reserved for -- * deletions, and assume only one journal head is available. -+ * deletions, minimum LEBs for the index, and assume only one journal -+ * head is available. - */ -- tmp64 = c->main_lebs - 2 - c->jhead_cnt + 1; -- tmp64 *= (uint64_t)c->leb_size - c->leb_overhead; -+ tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; -+ tmp64 *= (long long)c->leb_size - c->leb_overhead; - tmp64 = ubifs_reported_space(c, tmp64); - c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; -- -- return 0; - } - - /** - * take_gc_lnum - reserve GC LEB. - * @c: UBIFS file-system description object - * -- * This function ensures that the LEB reserved for garbage collection is -- * unmapped and is marked as "taken" in lprops. We also have to set free space -- * to LEB size and dirty space to zero, because lprops may contain out-of-date -- * information if the file-system was un-mounted before it has been committed. -- * This function returns zero in case of success and a negative error code in -- * case of failure. -+ * This function ensures that the LEB reserved for garbage collection is marked -+ * as "taken" in lprops. We also have to set free space to LEB size and dirty -+ * space to zero, because lprops may contain out-of-date information if the -+ * file-system was un-mounted before it has been committed. This function -+ * returns zero in case of success and a negative error code in case of -+ * failure. - */ - static int take_gc_lnum(struct ubifs_info *c) - { -@@ -720,10 +757,6 @@ static int take_gc_lnum(struct ubifs_inf - return -EINVAL; - } - -- err = ubifs_leb_unmap(c, c->gc_lnum); -- if (err) -- return err; -- - /* And we have to tell lprops that this LEB is taken */ - err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0, - LPROPS_TAKEN, 0, 0); -@@ -763,7 +796,7 @@ static int alloc_wbufs(struct ubifs_info - * does not need to be synchronized by timer. - */ - c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; -- c->jheads[GCHD].wbuf.timeout = 0; -+ c->jheads[GCHD].wbuf.no_timer = 1; - - return 0; - } -@@ -878,6 +911,7 @@ static int check_volume_empty(struct ubi - * Opt_no_bulk_read: disable bulk-reads - * Opt_chk_data_crc: check CRCs when reading data nodes - * Opt_no_chk_data_crc: do not check CRCs when reading data nodes -+ * Opt_override_compr: override default compressor - * Opt_err: just end of array marker - */ - enum { -@@ -887,6 +921,7 @@ enum { - Opt_no_bulk_read, - Opt_chk_data_crc, - Opt_no_chk_data_crc, -+ Opt_override_compr, - Opt_err, - }; - -@@ -897,6 +932,7 @@ static const match_table_t tokens = { - {Opt_no_bulk_read, "no_bulk_read"}, - {Opt_chk_data_crc, "chk_data_crc"}, - {Opt_no_chk_data_crc, "no_chk_data_crc"}, -+ {Opt_override_compr, "compr=%s"}, - {Opt_err, NULL}, - }; - -@@ -926,13 +962,16 @@ static int ubifs_parse_options(struct ub - - token = match_token(p, tokens, args); - switch (token) { -+ /* -+ * %Opt_fast_unmount and %Opt_norm_unmount options are ignored. -+ * We accept them in order to be backward-compatible. But this -+ * should be removed at some point. -+ */ - case Opt_fast_unmount: - c->mount_opts.unmount_mode = 2; -- c->fast_unmount = 1; - break; - case Opt_norm_unmount: - c->mount_opts.unmount_mode = 1; -- c->fast_unmount = 0; - break; - case Opt_bulk_read: - c->mount_opts.bulk_read = 2; -@@ -950,6 +989,30 @@ static int ubifs_parse_options(struct ub - c->mount_opts.chk_data_crc = 1; - c->no_chk_data_crc = 1; - break; -+ case Opt_override_compr: -+ { -+ char *name = match_strdup(&args[0]); -+ -+ if (!name) -+ return -ENOMEM; -+ if (!strcmp(name, "none")) -+ c->mount_opts.compr_type = UBIFS_COMPR_NONE; -+ else if (!strcmp(name, "lzo")) -+ c->mount_opts.compr_type = UBIFS_COMPR_LZO; -+ else if (!strcmp(name, "zlib")) -+ c->mount_opts.compr_type = UBIFS_COMPR_ZLIB; -+ else if (!strcmp(name, "lzo999")) -+ c->mount_opts.compr_type = UBIFS_COMPR_LZO999; -+ else { -+ ubifs_err("unknown compressor \"%s\"", name); -+ kfree(name); -+ return -EINVAL; -+ } -+ kfree(name); -+ c->mount_opts.override_compr = 1; -+ c->default_compr = c->mount_opts.compr_type; -+ break; -+ } - default: - ubifs_err("unrecognized mount option \"%s\" " - "or missing value", p); -@@ -1019,6 +1082,25 @@ again: - } - - /** -+ * check_free_space - check if there is enough free space to mount. -+ * @c: UBIFS file-system description object -+ * -+ * This function makes sure UBIFS has enough free space to be mounted in -+ * read/write mode. UBIFS must always have some free space to allow deletions. -+ */ -+static int check_free_space(struct ubifs_info *c) -+{ -+ ubifs_assert(c->dark_wm > 0); -+ if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { -+ ubifs_err("insufficient free space to mount in read/write mode"); -+ dbg_dump_budg(c); -+ dbg_dump_lprops(c); -+ return -ENOSPC; -+ } -+ return 0; -+} -+ -+/** - * mount_ubifs - mount UBIFS file-system. - * @c: UBIFS file-system description object - * -@@ -1039,11 +1121,9 @@ static int mount_ubifs(struct ubifs_info - if (err) - return err; - --#ifdef CONFIG_UBIFS_FS_DEBUG -- c->dbg_buf = vmalloc(c->leb_size); -- if (!c->dbg_buf) -- return -ENOMEM; --#endif -+ err = ubifs_debugging_init(c); -+ if (err) -+ return err; - - err = check_volume_empty(c); - if (err) -@@ -1100,27 +1180,25 @@ static int mount_ubifs(struct ubifs_info - goto out_free; - - /* -- * Make sure the compressor which is set as the default on in the -- * superblock was actually compiled in. -+ * Make sure the compressor which is set as default in the superblock -+ * or overridden by mount options is actually compiled in. - */ - if (!ubifs_compr_present(c->default_compr)) { -- ubifs_warn("'%s' compressor is set by superblock, but not " -- "compiled in", ubifs_compr_name(c->default_compr)); -- c->default_compr = UBIFS_COMPR_NONE; -+ ubifs_err("'compressor \"%s\" is not compiled in", -+ ubifs_compr_name(c->default_compr)); -+ goto out_free; - } - -- dbg_failure_mode_registration(c); -- -- err = init_constants_late(c); -+ err = init_constants_sb(c); - if (err) -- goto out_dereg; -+ goto out_free; - - sz = ALIGN(c->max_idx_node_sz, c->min_io_size); - sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size); - c->cbuf = kmalloc(sz, GFP_NOFS); - if (!c->cbuf) { - err = -ENOMEM; -- goto out_dereg; -+ goto out_free; - } - - sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); -@@ -1145,6 +1223,8 @@ static int mount_ubifs(struct ubifs_info - if (err) - goto out_master; - -+ init_constants_master(c); -+ - if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { - ubifs_msg("recovery needed"); - c->need_recovery = 1; -@@ -1183,12 +1263,9 @@ static int mount_ubifs(struct ubifs_info - if (!mounted_read_only) { - int lnum; - -- /* Check for enough free space */ -- if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) { -- ubifs_err("insufficient available space"); -- err = -EINVAL; -+ err = check_free_space(c); -+ if (err) - goto out_orphans; -- } - - /* Check for enough log space */ - lnum = c->lhead_lnum + 1; -@@ -1205,10 +1282,19 @@ static int mount_ubifs(struct ubifs_info - if (err) - goto out_orphans; - err = ubifs_rcvry_gc_commit(c); -- } else -+ } else { - err = take_gc_lnum(c); -- if (err) -- goto out_orphans; -+ if (err) -+ goto out_orphans; -+ -+ /* -+ * GC LEB may contain garbage if there was an unclean -+ * reboot, and it should be un-mapped. -+ */ -+ err = ubifs_leb_unmap(c, c->gc_lnum); -+ if (err) -+ return err; -+ } - - err = dbg_check_lprops(c); - if (err) -@@ -1217,6 +1303,16 @@ static int mount_ubifs(struct ubifs_info - err = ubifs_recover_size(c); - if (err) - goto out_orphans; -+ } else { -+ /* -+ * Even if we mount read-only, we have to set space in GC LEB -+ * to proper value because this affects UBIFS free space -+ * reporting. We do not want to have a situation when -+ * re-mounting from R/O to R/W changes amount of free space. -+ */ -+ err = take_gc_lnum(c); -+ if (err) -+ goto out_orphans; - } - - spin_lock(&ubifs_infos_lock); -@@ -1229,13 +1325,20 @@ static int mount_ubifs(struct ubifs_info - else { - c->need_recovery = 0; - ubifs_msg("recovery completed"); -+ /* GC LEB has to be empty and taken at this point */ -+ ubifs_assert(c->lst.taken_empty_lebs == 1); - } -- } -+ } else -+ ubifs_assert(c->lst.taken_empty_lebs == 1); - - err = dbg_check_filesystem(c); - if (err) - goto out_infos; - -+ err = dbg_debugfs_init_fs(c); -+ if (err) -+ goto out_infos; -+ - c->always_chk_crc = 0; - - ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"", -@@ -1266,7 +1369,6 @@ static int mount_ubifs(struct ubifs_info - c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7], - c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11], - c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]); -- dbg_msg("fast unmount: %d", c->fast_unmount); - dbg_msg("big_lpt %d", c->big_lpt); - dbg_msg("log LEBs: %d (%d - %d)", - c->log_lebs, UBIFS_LOG_LNUM, c->log_last); -@@ -1283,8 +1385,20 @@ static int mount_ubifs(struct ubifs_info - dbg_msg("tree fanout: %d", c->fanout); - dbg_msg("reserved GC LEB: %d", c->gc_lnum); - dbg_msg("first main LEB: %d", c->main_first); -+ dbg_msg("max. znode size %d", c->max_znode_sz); -+ dbg_msg("max. index node size %d", c->max_idx_node_sz); -+ dbg_msg("node sizes: data %zu, inode %zu, dentry %zu", -+ UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ); -+ dbg_msg("node sizes: trun %zu, sb %zu, master %zu", -+ UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ); -+ dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu", -+ UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ); -+ dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu", -+ UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ, -+ UBIFS_MAX_DENT_NODE_SZ); - dbg_msg("dead watermark: %d", c->dead_wm); - dbg_msg("dark watermark: %d", c->dark_wm); -+ dbg_msg("LEB overhead: %d", c->leb_overhead); - x = (long long)c->main_lebs * c->dark_wm; - dbg_msg("max. dark space: %lld (%lld KiB, %lld MiB)", - x, x >> 10, x >> 20); -@@ -1320,14 +1434,12 @@ out_wbufs: - free_wbufs(c); - out_cbuf: - kfree(c->cbuf); --out_dereg: -- dbg_failure_mode_deregistration(c); - out_free: - kfree(c->bu.buf); - vfree(c->ileb_buf); - vfree(c->sbuf); - kfree(c->bottom_up_buf); -- UBIFS_DBG(vfree(c->dbg_buf)); -+ ubifs_debugging_exit(c); - return err; - } - -@@ -1345,6 +1457,7 @@ static void ubifs_umount(struct ubifs_in - dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num, - c->vi.vol_id); - -+ dbg_debugfs_exit_fs(c); - spin_lock(&ubifs_infos_lock); - list_del(&c->infos_list); - spin_unlock(&ubifs_infos_lock); -@@ -1364,8 +1477,7 @@ static void ubifs_umount(struct ubifs_in - vfree(c->ileb_buf); - vfree(c->sbuf); - kfree(c->bottom_up_buf); -- UBIFS_DBG(vfree(c->dbg_buf)); -- dbg_failure_mode_deregistration(c); -+ ubifs_debugging_exit(c); - } - - /** -@@ -1380,19 +1492,14 @@ static int ubifs_remount_rw(struct ubifs - { - int err, lnum; - -- if (c->ro_media) -- return -EINVAL; -- - mutex_lock(&c->umount_mutex); -+ dbg_save_space_info(c); - c->remounting_rw = 1; - c->always_chk_crc = 1; - -- /* Check for enough free space */ -- if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) { -- ubifs_err("insufficient available space"); -- err = -EINVAL; -+ err = check_free_space(c); -+ if (err) - goto out; -- } - - if (c->old_leb_cnt != c->leb_cnt) { - struct ubifs_sb_node *sup; -@@ -1422,6 +1529,12 @@ static int ubifs_remount_rw(struct ubifs - err = ubifs_recover_inl_heads(c, c->sbuf); - if (err) - goto out; -+ } else { -+ /* A readonly mount is not allowed to have orphans */ -+ ubifs_assert(c->tot_orphans == 0); -+ err = ubifs_clear_orphans(c); -+ if (err) -+ goto out; - } - - if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) { -@@ -1477,7 +1590,7 @@ static int ubifs_remount_rw(struct ubifs - if (c->need_recovery) - err = ubifs_rcvry_gc_commit(c); - else -- err = take_gc_lnum(c); -+ err = ubifs_leb_unmap(c, c->gc_lnum); - if (err) - goto out; - -@@ -1490,8 +1603,9 @@ static int ubifs_remount_rw(struct ubifs - c->vfs_sb->s_flags &= ~MS_RDONLY; - c->remounting_rw = 0; - c->always_chk_crc = 0; -+ err = dbg_check_space_info(c); - mutex_unlock(&c->umount_mutex); -- return 0; -+ return err; - - out: - vfree(c->orph_buf); -@@ -1511,39 +1625,18 @@ out: - } - - /** -- * commit_on_unmount - commit the journal when un-mounting. -- * @c: UBIFS file-system description object -- * -- * This function is called during un-mounting and re-mounting, and it commits -- * the journal unless the "fast unmount" mode is enabled. It also avoids -- * committing the journal if it contains too few data. -- */ --static void commit_on_unmount(struct ubifs_info *c) --{ -- if (!c->fast_unmount) { -- long long bud_bytes; -- -- spin_lock(&c->buds_lock); -- bud_bytes = c->bud_bytes; -- spin_unlock(&c->buds_lock); -- if (bud_bytes > c->leb_size) -- ubifs_run_commit(c); -- } --} -- --/** - * ubifs_remount_ro - re-mount in read-only mode. - * @c: UBIFS file-system description object - * -- * We rely on VFS to have stopped writing. Possibly the background thread could -- * be running a commit, however kthread_stop will wait in that case. -+ * We assume VFS has stopped writing. Possibly the background thread could be -+ * running a commit, however kthread_stop will wait in that case. - */ - static void ubifs_remount_ro(struct ubifs_info *c) - { - int i, err; - - ubifs_assert(!c->need_recovery); -- commit_on_unmount(c); -+ ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); - - mutex_lock(&c->umount_mutex); - if (c->bgt) { -@@ -1551,27 +1644,29 @@ static void ubifs_remount_ro(struct ubif - c->bgt = NULL; - } - -+ dbg_save_space_info(c); -+ - for (i = 0; i < c->jhead_cnt; i++) { - ubifs_wbuf_sync(&c->jheads[i].wbuf); -- del_timer_sync(&c->jheads[i].wbuf.timer); -+ hrtimer_cancel(&c->jheads[i].wbuf.timer); - } - -- if (!c->ro_media) { -- c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); -- c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); -- c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); -- err = ubifs_write_master(c); -- if (err) -- ubifs_ro_mode(c, err); -- } -+ c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); -+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); -+ c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); -+ err = ubifs_write_master(c); -+ if (err) -+ ubifs_ro_mode(c, err); - -- ubifs_destroy_idx_gc(c); - free_wbufs(c); - vfree(c->orph_buf); - c->orph_buf = NULL; - vfree(c->ileb_buf); - c->ileb_buf = NULL; - ubifs_lpt_free(c, 1); -+ err = dbg_check_space_info(c); -+ if (err) -+ ubifs_ro_mode(c, err); - mutex_unlock(&c->umount_mutex); - } - -@@ -1611,10 +1706,8 @@ static void ubifs_put_super(struct super - - /* Synchronize write-buffers */ - if (c->jheads) -- for (i = 0; i < c->jhead_cnt; i++) { -+ for (i = 0; i < c->jhead_cnt; i++) - ubifs_wbuf_sync(&c->jheads[i].wbuf); -- del_timer_sync(&c->jheads[i].wbuf.timer); -- } - - /* - * On fatal errors c->ro_media is set to 1, in which case we do -@@ -1664,11 +1757,20 @@ static int ubifs_remount_fs(struct super - } - - if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { -+ if (c->ro_media) { -+ ubifs_msg("cannot re-mount due to prior errors"); -+ return -EROFS; -+ } - err = ubifs_remount_rw(c); - if (err) - return err; -- } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) -+ } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) { -+ if (c->ro_media) { -+ ubifs_msg("cannot re-mount due to prior errors"); -+ return -EROFS; -+ } - ubifs_remount_ro(c); -+ } - - if (c->bulk_read == 1) - bu_init(c); -@@ -1678,10 +1780,11 @@ static int ubifs_remount_fs(struct super - c->bu.buf = NULL; - } - -+ ubifs_assert(c->lst.taken_empty_lebs == 1); - return 0; - } - --struct super_operations ubifs_super_operations = { -+const struct super_operations ubifs_super_operations = { - .alloc_inode = ubifs_alloc_inode, - .destroy_inode = ubifs_destroy_inode, - .put_super = ubifs_put_super, -@@ -1849,7 +1952,6 @@ static int ubifs_fill_super(struct super - goto out_iput; - - mutex_unlock(&c->umount_mutex); -- - return 0; - - out_iput: -@@ -1949,15 +2051,6 @@ out_close: - - static void ubifs_kill_sb(struct super_block *sb) - { -- struct ubifs_info *c = sb->s_fs_info; -- -- /* -- * We do 'commit_on_unmount()' here instead of 'ubifs_put_super()' -- * in order to be outside BKL. -- */ -- if (sb->s_root && !(sb->s_flags & MS_RDONLY)) -- commit_on_unmount(c); -- /* The un-mount routine is actually done in put_super() */ - generic_shutdown_super(sb); - } - -@@ -2021,6 +2114,14 @@ static int __init ubifs_init(void) - BUILD_BUG_ON(UBIFS_REF_NODE_SZ != 64); - - /* -+ * We use 2 bit wide bit-fields to store compression type, which should -+ * be amended if more compressors are added. The bit-fields are: -+ * @compr_type in 'struct ubifs_inode', @default_compr in -+ * 'struct ubifs_info' and @compr_type in 'struct ubifs_mount_opts'. -+ */ -+ BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); -+ -+ /* - * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to - * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. - */ -@@ -2049,11 +2150,17 @@ static int __init ubifs_init(void) - - err = ubifs_compressors_init(); - if (err) -+ goto out_shrinker; -+ -+ err = dbg_debugfs_init(); -+ if (err) - goto out_compr; - - return 0; - - out_compr: -+ ubifs_compressors_exit(); -+out_shrinker: - unregister_shrinker(&ubifs_shrinker_info); - kmem_cache_destroy(ubifs_inode_slab); - out_reg: -@@ -2068,6 +2175,7 @@ static void __exit ubifs_exit(void) - ubifs_assert(list_empty(&ubifs_infos)); - ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0); - -+ dbg_debugfs_exit(); - ubifs_compressors_exit(); - unregister_shrinker(&ubifs_shrinker_info); - kmem_cache_destroy(ubifs_inode_slab); -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/tnc.c linux-omap-2.6.28-nokia1/fs/ubifs/tnc.c ---- linux-omap-2.6.28-omap1/fs/ubifs/tnc.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/tnc.c 2011-06-22 13:19:33.253063270 +0200 -@@ -443,6 +443,11 @@ static int tnc_read_node_nm(struct ubifs - * This function performs that same function as ubifs_read_node except that - * it does not require that there is actually a node present and instead - * the return code indicates if a node was read. -+ * -+ * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc -+ * is true (it is controlled by corresponding mount option). However, if -+ * @c->always_chk_crc is true, @c->no_chk_data_crc is ignored and CRC is always -+ * checked. - */ - static int try_read_node(const struct ubifs_info *c, void *buf, int type, - int len, int lnum, int offs) -@@ -470,9 +475,8 @@ static int try_read_node(const struct ub - if (node_len != len) - return 0; - -- if (type == UBIFS_DATA_NODE && !c->always_chk_crc) -- if (c->no_chk_data_crc) -- return 0; -+ if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc) -+ return 1; - - crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); - node_crc = le32_to_cpu(ch->crc); -@@ -1506,7 +1510,7 @@ out: - * - * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function - * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares -- * maxumum possible amount of nodes for bulk-read. -+ * maximum possible amount of nodes for bulk-read. - */ - int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) - { -@@ -2245,12 +2249,11 @@ int ubifs_tnc_replace(struct ubifs_info - if (found) { - /* Ensure the znode is dirtied */ - if (znode->cnext || !ubifs_zn_dirty(znode)) { -- znode = dirty_cow_bottom_up(c, -- znode); -- if (IS_ERR(znode)) { -- err = PTR_ERR(znode); -- goto out_unlock; -- } -+ znode = dirty_cow_bottom_up(c, znode); -+ if (IS_ERR(znode)) { -+ err = PTR_ERR(znode); -+ goto out_unlock; -+ } - } - zbr = &znode->zbranch[n]; - lnc_free(zbr); -@@ -2317,11 +2320,11 @@ int ubifs_tnc_add_nm(struct ubifs_info * - - /* Ensure the znode is dirtied */ - if (znode->cnext || !ubifs_zn_dirty(znode)) { -- znode = dirty_cow_bottom_up(c, znode); -- if (IS_ERR(znode)) { -- err = PTR_ERR(znode); -- goto out_unlock; -- } -+ znode = dirty_cow_bottom_up(c, znode); -+ if (IS_ERR(znode)) { -+ err = PTR_ERR(znode); -+ goto out_unlock; -+ } - } - - if (found == 1) { -@@ -2627,11 +2630,11 @@ int ubifs_tnc_remove_range(struct ubifs_ - - /* Ensure the znode is dirtied */ - if (znode->cnext || !ubifs_zn_dirty(znode)) { -- znode = dirty_cow_bottom_up(c, znode); -- if (IS_ERR(znode)) { -- err = PTR_ERR(znode); -- goto out_unlock; -- } -+ znode = dirty_cow_bottom_up(c, znode); -+ if (IS_ERR(znode)) { -+ err = PTR_ERR(znode); -+ goto out_unlock; -+ } - } - - /* Remove all keys in range except the first */ -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/tnc_commit.c linux-omap-2.6.28-nokia1/fs/ubifs/tnc_commit.c ---- linux-omap-2.6.28-omap1/fs/ubifs/tnc_commit.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/tnc_commit.c 2011-06-22 13:19:33.253063270 +0200 -@@ -553,8 +553,8 @@ static int layout_in_empty_space(struct - } - - #ifdef CONFIG_UBIFS_FS_DEBUG -- c->new_ihead_lnum = lnum; -- c->new_ihead_offs = buf_offs; -+ c->dbg->new_ihead_lnum = lnum; -+ c->dbg->new_ihead_offs = buf_offs; - #endif - - return 0; -@@ -802,8 +802,10 @@ int ubifs_tnc_start_commit(struct ubifs_ - * budgeting subsystem to assume the index is already committed, - * even though it is not. - */ -+ ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); - c->old_idx_sz = c->calc_idx_sz; - c->budg_uncommitted_idx = 0; -+ c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); - spin_unlock(&c->space_lock); - mutex_unlock(&c->tnc_mutex); - -@@ -1002,7 +1004,8 @@ static int write_index(struct ubifs_info - } - - #ifdef CONFIG_UBIFS_FS_DEBUG -- if (lnum != c->new_ihead_lnum || buf_offs != c->new_ihead_offs) { -+ if (lnum != c->dbg->new_ihead_lnum || -+ buf_offs != c->dbg->new_ihead_offs) { - ubifs_err("inconsistent ihead"); - return -EINVAL; - } -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/ubifs.h linux-omap-2.6.28-nokia1/fs/ubifs/ubifs.h ---- linux-omap-2.6.28-omap1/fs/ubifs/ubifs.h 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/ubifs.h 2011-06-22 13:19:33.253063270 +0200 -@@ -63,6 +63,14 @@ - #define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL - #define SQNUM_WATERMARK 0xFFFFFFFFFF000000ULL - -+/* -+ * Minimum amount of LEBs reserved for the index. At present the index needs at -+ * least 2 LEBs: one for the index head and one for in-the-gaps method (which -+ * currently does not cater for the index head and so excludes it from -+ * consideration). -+ */ -+#define MIN_INDEX_LEBS 2 -+ - /* Minimum amount of data UBIFS writes to the flash */ - #define MIN_WRITE_SZ (UBIFS_DATA_NODE_SZ + 8) - -@@ -87,8 +95,9 @@ - */ - #define BGT_NAME_PATTERN "ubifs_bgt%d_%d" - --/* Default write-buffer synchronization timeout (5 secs) */ --#define DEFAULT_WBUF_TIMEOUT (5 * HZ) -+/* Write-buffer synchronization timeout interval in seconds */ -+#define WBUF_TIMEOUT_SOFTLIMIT 3 -+#define WBUF_TIMEOUT_HARDLIMIT 5 - - /* Maximum possible inode number (only 32-bit inodes are supported now) */ - #define MAX_INUM 0xFFFFFFFF -@@ -386,12 +395,12 @@ struct ubifs_inode { - unsigned int dirty:1; - unsigned int xattr:1; - unsigned int bulk_read:1; -+ unsigned int compr_type:2; - struct mutex ui_mutex; - spinlock_t ui_lock; - loff_t synced_i_size; - loff_t ui_size; - int flags; -- int compr_type; - pgoff_t last_page_read; - pgoff_t read_in_a_row; - int data_len; -@@ -418,9 +427,9 @@ struct ubifs_unclean_leb { - * LEB properties flags. - * - * LPROPS_UNCAT: not categorized -- * LPROPS_DIRTY: dirty > 0, not index -- * LPROPS_DIRTY_IDX: dirty + free > UBIFS_CH_SZ and index -- * LPROPS_FREE: free > 0, not empty, not index -+ * LPROPS_DIRTY: dirty > free, dirty >= @c->dead_wm, not index -+ * LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index -+ * LPROPS_FREE: free > 0, dirty < @c->dead_wm, not empty, not index - * LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs - * LPROPS_EMPTY: LEB is empty, not taken - * LPROPS_FREEABLE: free + dirty == leb_size, not index, not taken -@@ -473,8 +482,8 @@ struct ubifs_lprops { - struct ubifs_lpt_lprops { - int free; - int dirty; -- unsigned tgc : 1; -- unsigned cmt : 1; -+ unsigned tgc:1; -+ unsigned cmt:1; - }; - - /** -@@ -482,24 +491,26 @@ struct ubifs_lpt_lprops { - * @empty_lebs: number of empty LEBs - * @taken_empty_lebs: number of taken LEBs - * @idx_lebs: number of indexing LEBs -- * @total_free: total free space in bytes -- * @total_dirty: total dirty space in bytes -- * @total_used: total used space in bytes (includes only data LEBs) -- * @total_dead: total dead space in bytes (includes only data LEBs) -- * @total_dark: total dark space in bytes (includes only data LEBs) -- * -- * N.B. total_dirty and total_used are different to other total_* fields, -- * because they account _all_ LEBs, not just data LEBs. -- * -- * 'taken_empty_lebs' counts the LEBs that are in the transient state of having -- * been 'taken' for use but not yet written to. 'taken_empty_lebs' is needed -- * to account correctly for gc_lnum, otherwise 'empty_lebs' could be used -- * by itself (in which case 'unused_lebs' would be a better name). In the case -- * of gc_lnum, it is 'taken' at mount time or whenever a LEB is retained by GC, -- * but unlike other empty LEBs that are 'taken', it may not be written straight -- * away (i.e. before the next commit start or unmount), so either gc_lnum must -- * be specially accounted for, or the current approach followed i.e. count it -- * under 'taken_empty_lebs'. -+ * @total_free: total free space in bytes (includes all LEBs) -+ * @total_dirty: total dirty space in bytes (includes all LEBs) -+ * @total_used: total used space in bytes (does not include index LEBs) -+ * @total_dead: total dead space in bytes (does not include index LEBs) -+ * @total_dark: total dark space in bytes (does not include index LEBs) -+ * -+ * The @taken_empty_lebs field counts the LEBs that are in the transient state -+ * of having been "taken" for use but not yet written to. @taken_empty_lebs is -+ * needed to account correctly for @gc_lnum, otherwise @empty_lebs could be -+ * used by itself (in which case 'unused_lebs' would be a better name). In the -+ * case of @gc_lnum, it is "taken" at mount time or whenever a LEB is retained -+ * by GC, but unlike other empty LEBs that are "taken", it may not be written -+ * straight away (i.e. before the next commit start or unmount), so either -+ * @gc_lnum must be specially accounted for, or the current approach followed -+ * i.e. count it under @taken_empty_lebs. -+ * -+ * @empty_lebs includes @taken_empty_lebs. -+ * -+ * @total_used, @total_dead and @total_dark fields do not account indexing -+ * LEBs. - */ - struct ubifs_lp_stats { - int empty_lebs; -@@ -640,9 +651,12 @@ typedef int (*ubifs_lpt_scan_callback)(s - * @io_mutex: serializes write-buffer I/O - * @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes - * fields -+ * @softlimit: soft write-buffer timeout interval -+ * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit -+ * and @softlimit + @delta) - * @timer: write-buffer timer -- * @timeout: timer expire interval in jiffies -- * @need_sync: it is set if its timer expired and needs sync -+ * @no_timer: non-zero if this write-buffer does not have a timer -+ * @need_sync: non-zero if the timer expired and the wbuf needs sync'ing - * @next_ino: points to the next position of the following inode number - * @inodes: stores the inode numbers of the nodes which are in wbuf - * -@@ -668,9 +682,11 @@ struct ubifs_wbuf { - int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad); - struct mutex io_mutex; - spinlock_t lock; -- struct timer_list timer; -- int timeout; -- int need_sync; -+ ktime_t softlimit; -+ unsigned long long delta; -+ struct hrtimer timer; -+ unsigned int no_timer:1; -+ unsigned int need_sync:1; - int next_ino; - ino_t *inodes; - }; -@@ -893,15 +909,25 @@ struct ubifs_orphan { - /** - * struct ubifs_mount_opts - UBIFS-specific mount options information. - * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) -- * @bulk_read: enable bulk-reads -- * @chk_data_crc: check CRCs when reading data nodes -+ * @bulk_read: enable/disable bulk-reads (%0 default, %1 disabe, %2 enable) -+ * @chk_data_crc: enable/disable CRC data checking when reading data nodes -+ * (%0 default, %1 disabe, %2 enable) -+ * @override_compr: override default compressor (%0 - do not override and use -+ * superblock compressor, %1 - override and use compressor -+ * specified in @compr_type) -+ * @compr_type: compressor type to override the superblock compressor with -+ * (%UBIFS_COMPR_NONE, etc) - */ - struct ubifs_mount_opts { - unsigned int unmount_mode:2; - unsigned int bulk_read:2; - unsigned int chk_data_crc:2; -+ unsigned int override_compr:1; -+ unsigned int compr_type:2; - }; - -+struct ubifs_debug_info; -+ - /** - * struct ubifs_info - UBIFS file-system description data structure - * (per-superblock). -@@ -941,11 +967,11 @@ struct ubifs_mount_opts { - * @cs_lock: commit state lock - * @cmt_wq: wait queue to sleep on if the log is full and a commit is running - * -- * @fast_unmount: do not run journal commit before un-mounting - * @big_lpt: flag that LPT is too big to write whole during commit - * @no_chk_data_crc: do not check CRCs when reading data nodes (except during - * recovery) - * @bulk_read: enable bulk-reads -+ * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) - * - * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and - * @calc_idx_sz -@@ -963,8 +989,6 @@ struct ubifs_mount_opts { - * @ileb_nxt: next pre-allocated index LEBs - * @old_idx: tree of index nodes obsoleted since the last commit start - * @bottom_up_buf: a buffer which is used by 'dirty_cow_bottom_up()' in tnc.c -- * @new_ihead_lnum: used by debugging to check ihead_lnum -- * @new_ihead_offs: used by debugging to check ihead_offs - * - * @mst_node: master node - * @mst_offs: offset of valid master node -@@ -986,7 +1010,6 @@ struct ubifs_mount_opts { - * @main_lebs: count of LEBs in the main area - * @main_first: first LEB of the main area - * @main_bytes: main area size in bytes -- * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) - * - * @key_hash_type: type of the key hash - * @key_hash: direntry key hash function -@@ -1149,15 +1172,7 @@ struct ubifs_mount_opts { - * @always_chk_crc: always check CRCs (while mounting and remounting rw) - * @mount_opts: UBIFS-specific mount options - * -- * @dbg_buf: a buffer of LEB size used for debugging purposes -- * @old_zroot: old index root - used by 'dbg_check_old_index()' -- * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' -- * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' -- * @failure_mode: failure mode for recovery testing -- * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls -- * @fail_timeout: time in jiffies when delay of failure mode expires -- * @fail_cnt: current number of calls to failure mode I/O functions -- * @fail_cnt_max: number of calls by which to delay failure mode -+ * @dbg: debugging-related information - */ - struct ubifs_info { - struct super_block *vfs_sb; -@@ -1192,10 +1207,10 @@ struct ubifs_info { - spinlock_t cs_lock; - wait_queue_head_t cmt_wq; - -- unsigned int fast_unmount:1; - unsigned int big_lpt:1; - unsigned int no_chk_data_crc:1; - unsigned int bulk_read:1; -+ unsigned int default_compr:2; - - struct mutex tnc_mutex; - struct ubifs_zbranch zroot; -@@ -1212,10 +1227,6 @@ struct ubifs_info { - int ileb_nxt; - struct rb_root old_idx; - int *bottom_up_buf; --#ifdef CONFIG_UBIFS_FS_DEBUG -- int new_ihead_lnum; -- int new_ihead_offs; --#endif - - struct ubifs_mst_node *mst_node; - int mst_offs; -@@ -1237,7 +1248,6 @@ struct ubifs_info { - int main_lebs; - int main_first; - long long main_bytes; -- int default_compr; - - uint8_t key_hash_type; - uint32_t (*key_hash)(const char *str, int len); -@@ -1315,8 +1325,8 @@ struct ubifs_info { - void *sbuf; - struct list_head idx_gc; - int idx_gc_cnt; -- volatile int gc_seq; -- volatile int gced_lnum; -+ int gc_seq; -+ int gced_lnum; - - struct list_head infos_list; - struct mutex umount_mutex; -@@ -1391,21 +1401,7 @@ struct ubifs_info { - struct ubifs_mount_opts mount_opts; - - #ifdef CONFIG_UBIFS_FS_DEBUG -- void *dbg_buf; -- struct ubifs_zbranch old_zroot; -- int old_zroot_level; -- unsigned long long old_zroot_sqnum; -- int failure_mode; -- int fail_delay; -- unsigned long fail_timeout; -- unsigned int fail_cnt; -- unsigned int fail_cnt_max; -- long long chk_lpt_sz; -- long long chk_lpt_sz2; -- long long chk_lpt_wastage; -- int chk_lpt_lebs; -- int new_nhead_lnum; -- int new_nhead_offs; -+ struct ubifs_debug_info *dbg; - #endif - }; - -@@ -1413,13 +1409,13 @@ extern struct list_head ubifs_infos; - extern spinlock_t ubifs_infos_lock; - extern atomic_long_t ubifs_clean_zn_cnt; - extern struct kmem_cache *ubifs_inode_slab; --extern struct super_operations ubifs_super_operations; --extern struct address_space_operations ubifs_file_address_operations; --extern struct file_operations ubifs_file_operations; --extern struct inode_operations ubifs_file_inode_operations; --extern struct file_operations ubifs_dir_operations; --extern struct inode_operations ubifs_dir_inode_operations; --extern struct inode_operations ubifs_symlink_inode_operations; -+extern const struct super_operations ubifs_super_operations; -+extern const struct address_space_operations ubifs_file_address_operations; -+extern const struct file_operations ubifs_file_operations; -+extern const struct inode_operations ubifs_file_inode_operations; -+extern const struct file_operations ubifs_dir_operations; -+extern const struct inode_operations ubifs_dir_inode_operations; -+extern const struct inode_operations ubifs_symlink_inode_operations; - extern struct backing_dev_info ubifs_backing_dev_info; - extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; - -@@ -1436,7 +1432,7 @@ int ubifs_read_node_wbuf(struct ubifs_wb - int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, - int offs, int dtype); - int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, -- int offs, int quiet, int chk_crc); -+ int offs, int quiet, int must_chk_crc); - void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); - void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last); - int ubifs_io_init(struct ubifs_info *c); -@@ -1503,9 +1499,10 @@ void ubifs_release_ino_dirty(struct ubif - void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, - struct ubifs_budget_req *req); - long long ubifs_get_free_space(struct ubifs_info *c); -+long long ubifs_get_free_space_nolock(struct ubifs_info *c); - int ubifs_calc_min_idx_lebs(struct ubifs_info *c); - void ubifs_convert_page_budget(struct ubifs_info *c); --long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free); -+long long ubifs_reported_space(const struct ubifs_info *c, long long free); - long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs); - - /* find.c */ -@@ -1611,6 +1608,7 @@ void ubifs_delete_orphan(struct ubifs_in - int ubifs_orphan_start_commit(struct ubifs_info *c); - int ubifs_orphan_end_commit(struct ubifs_info *c); - int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only); -+int ubifs_clear_orphans(struct ubifs_info *c); - - /* lpt.c */ - int ubifs_calc_lpt_geom(struct ubifs_info *c); -@@ -1639,6 +1637,9 @@ void ubifs_add_lpt_dirt(struct ubifs_inf - void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode); - uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits); - struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght); -+/* Needed only in debugging code in lpt_commit.c */ -+int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, -+ struct ubifs_nnode *nnode); - - /* lpt_commit.c */ - int ubifs_lpt_start_commit(struct ubifs_info *c); -@@ -1651,7 +1652,7 @@ const struct ubifs_lprops *ubifs_change_ - const struct ubifs_lprops *lp, - int free, int dirty, int flags, - int idx_gc_cnt); --void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *stats); -+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst); - void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, - int cat); - void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, -@@ -1714,7 +1715,7 @@ long ubifs_compat_ioctl(struct file *fil - - /* compressor.c */ - int __init ubifs_compressors_init(void); --void __exit ubifs_compressors_exit(void); -+void ubifs_compressors_exit(void); - void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, - int *compr_type); - int ubifs_decompress(const void *buf, int len, void *out, int *out_len, -diff -Nurp linux-omap-2.6.28-omap1/fs/ubifs/ubifs-media.h linux-omap-2.6.28-nokia1/fs/ubifs/ubifs-media.h ---- linux-omap-2.6.28-omap1/fs/ubifs/ubifs-media.h 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ubifs/ubifs-media.h 2011-06-22 13:19:33.253063270 +0200 -@@ -51,6 +51,13 @@ - */ - #define UBIFS_MIN_COMPR_LEN 128 - -+/* -+ * If compressed data length is less than %UBIFS_MIN_COMPRESS_DIFF bytes -+ * shorter than uncompressed data length, UBIFS preferes to leave this data -+ * node uncompress, because it'll be read faster. -+ */ -+#define UBIFS_MIN_COMPRESS_DIFF 64 -+ - /* Root inode number */ - #define UBIFS_ROOT_INO 1 - -@@ -296,12 +303,14 @@ enum { - * UBIFS_COMPR_NONE: no compression - * UBIFS_COMPR_LZO: LZO compression - * UBIFS_COMPR_ZLIB: ZLIB compression -+ * UBIFS_COMPR_LZO999: LZO999 compression - * UBIFS_COMPR_TYPES_CNT: count of supported compression types - */ - enum { - UBIFS_COMPR_NONE, - UBIFS_COMPR_LZO, - UBIFS_COMPR_ZLIB, -+ UBIFS_COMPR_LZO999, - UBIFS_COMPR_TYPES_CNT, - }; - -diff -Nurp linux-omap-2.6.28-omap1/fs/udf/balloc.c linux-omap-2.6.28-nokia1/fs/udf/balloc.c ---- linux-omap-2.6.28-omap1/fs/udf/balloc.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/udf/balloc.c 2011-06-22 13:19:33.253063270 +0200 -@@ -218,7 +218,7 @@ static void udf_bitmap_free_blocks(struc - } while (overflow); - - error_return: -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - if (sbi->s_lvid_bh) - mark_buffer_dirty(sbi->s_lvid_bh); - mutex_unlock(&sbi->s_alloc_mutex); -@@ -279,7 +279,7 @@ static int udf_bitmap_prealloc_blocks(st - out: - if (udf_add_free_space(sbi, partition, -alloc_count)) - mark_buffer_dirty(sbi->s_lvid_bh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mutex_unlock(&sbi->s_alloc_mutex); - return alloc_count; - } -@@ -411,7 +411,7 @@ got_block: - - if (udf_add_free_space(sbi, partition, -1)) - mark_buffer_dirty(sbi->s_lvid_bh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mutex_unlock(&sbi->s_alloc_mutex); - *err = 0; - return newblock; -@@ -653,7 +653,7 @@ static void udf_table_free_blocks(struct - brelse(oepos.bh); - - error_return: -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mutex_unlock(&sbi->s_alloc_mutex); - return; - } -@@ -720,7 +720,7 @@ static int udf_table_prealloc_blocks(str - - if (alloc_count && udf_add_free_space(sbi, partition, -alloc_count)) { - mark_buffer_dirty(sbi->s_lvid_bh); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - mutex_unlock(&sbi->s_alloc_mutex); - return alloc_count; -@@ -822,7 +822,7 @@ static int udf_table_new_block(struct su - if (udf_add_free_space(sbi, partition, -1)) - mark_buffer_dirty(sbi->s_lvid_bh); - -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - mutex_unlock(&sbi->s_alloc_mutex); - *err = 0; - return newblock; -diff -Nurp linux-omap-2.6.28-omap1/fs/udf/super.c linux-omap-2.6.28-nokia1/fs/udf/super.c ---- linux-omap-2.6.28-omap1/fs/udf/super.c 2011-06-22 13:14:23.453067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/udf/super.c 2011-06-22 13:19:33.253063270 +0200 -@@ -546,7 +546,7 @@ static void udf_write_super(struct super - - if (!(sb->s_flags & MS_RDONLY)) - udf_open_lvid(sb); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - unlock_kernel(); - } -@@ -1924,7 +1924,7 @@ static int udf_fill_super(struct super_b - sb->s_op = &udf_sb_ops; - sb->s_export_op = &udf_export_ops; - sb->dq_op = NULL; -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - sb->s_magic = UDF_SUPER_MAGIC; - sb->s_time_gran = 1000; - -@@ -2037,7 +2037,7 @@ static void udf_error(struct super_block - - if (!(sb->s_flags & MS_RDONLY)) { - /* mark sb error */ -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - va_start(args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); -diff -Nurp linux-omap-2.6.28-omap1/fs/ufs/balloc.c linux-omap-2.6.28-nokia1/fs/ufs/balloc.c ---- linux-omap-2.6.28-omap1/fs/ufs/balloc.c 2011-06-22 13:14:23.463067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ufs/balloc.c 2011-06-22 13:19:33.253063270 +0200 -@@ -122,7 +122,7 @@ void ufs_free_fragments(struct inode *in - ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); - ubh_wait_on_buffer (UCPI_UBH(ucpi)); - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - unlock_super (sb); - UFSD("EXIT\n"); -@@ -223,7 +223,7 @@ do_more: - goto do_more; - } - -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - unlock_super (sb); - UFSD("EXIT\n"); - return; -@@ -571,7 +571,7 @@ static u64 ufs_add_fragments(struct inod - ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); - ubh_wait_on_buffer (UCPI_UBH(ucpi)); - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment); - -@@ -698,7 +698,7 @@ succed: - ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); - ubh_wait_on_buffer (UCPI_UBH(ucpi)); - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - result += cgno * uspi->s_fpg; - UFSD("EXIT3, result %llu\n", (unsigned long long)result); -diff -Nurp linux-omap-2.6.28-omap1/fs/ufs/ialloc.c linux-omap-2.6.28-nokia1/fs/ufs/ialloc.c ---- linux-omap-2.6.28-omap1/fs/ufs/ialloc.c 2011-06-22 13:14:23.463067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ufs/ialloc.c 2011-06-22 13:19:33.253063270 +0200 -@@ -124,7 +124,7 @@ void ufs_free_inode (struct inode * inod - ubh_wait_on_buffer (UCPI_UBH(ucpi)); - } - -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - unlock_super (sb); - UFSD("EXIT\n"); - } -@@ -300,7 +300,7 @@ cg_found: - ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); - ubh_wait_on_buffer (UCPI_UBH(ucpi)); - } -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - - inode->i_ino = cg * uspi->s_ipg + bit; - inode->i_mode = mode; -diff -Nurp linux-omap-2.6.28-omap1/fs/ufs/super.c linux-omap-2.6.28-nokia1/fs/ufs/super.c ---- linux-omap-2.6.28-omap1/fs/ufs/super.c 2011-06-22 13:14:23.463067673 +0200 -+++ linux-omap-2.6.28-nokia1/fs/ufs/super.c 2011-06-22 13:19:33.253063270 +0200 -@@ -237,7 +237,7 @@ void ufs_error (struct super_block * sb, - if (!(sb->s_flags & MS_RDONLY)) { - usb1->fs_clean = UFS_FSBAD; - ubh_mark_buffer_dirty(USPI_UBH(uspi)); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - sb->s_flags |= MS_RDONLY; - } - va_start (args, fmt); -@@ -269,7 +269,7 @@ void ufs_panic (struct super_block * sb, - if (!(sb->s_flags & MS_RDONLY)) { - usb1->fs_clean = UFS_FSBAD; - ubh_mark_buffer_dirty(USPI_UBH(uspi)); -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - } - va_start (args, fmt); - vsnprintf (error_buf, sizeof(error_buf), fmt, args); -@@ -1130,7 +1130,7 @@ static void ufs_write_super(struct super - UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); - ufs_put_cstotal(sb); - } -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - UFSD("EXIT\n"); - unlock_kernel(); - } -@@ -1199,7 +1199,7 @@ static int ufs_remount (struct super_blo - ufs_set_fs_state(sb, usb1, usb3, - UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); - ubh_mark_buffer_dirty (USPI_UBH(uspi)); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - sb->s_flags |= MS_RDONLY; - } else { - /* -diff -Nurp linux-omap-2.6.28-omap1/fs/xfs/linux-2.6/xfs_super.c linux-omap-2.6.28-nokia1/fs/xfs/linux-2.6/xfs_super.c ---- linux-omap-2.6.28-omap1/fs/xfs/linux-2.6/xfs_super.c 2011-06-22 13:14:23.483067672 +0200 -+++ linux-omap-2.6.28-nokia1/fs/xfs/linux-2.6/xfs_super.c 2011-06-22 13:19:33.253063270 +0200 -@@ -1162,7 +1162,7 @@ xfs_fs_write_super( - { - if (!(sb->s_flags & MS_RDONLY)) - xfs_sync(XFS_M(sb), SYNC_FSDATA); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - } - - STATIC int -@@ -1200,7 +1200,7 @@ xfs_fs_sync_super( - flags = SYNC_FSDATA; - - error = xfs_sync(mp, flags); -- sb->s_dirt = 0; -+ mark_sb_clean(sb); - - if (unlikely(laptop_mode)) { - int prev_sync_seq = mp->m_sync_seq; -@@ -1749,7 +1749,7 @@ xfs_fs_fill_super( - - XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, args->mtpt, args->fsname); - -- sb->s_dirt = 1; -+ mark_sb_dirty(sb); - sb->s_magic = XFS_SB_MAGIC; - sb->s_blocksize = mp->m_sb.sb_blocksize; - sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; -diff -Nurp linux-omap-2.6.28-omap1/fs/xfs/xfs_trans.c linux-omap-2.6.28-nokia1/fs/xfs/xfs_trans.c ---- linux-omap-2.6.28-omap1/fs/xfs/xfs_trans.c 2011-06-22 13:14:23.793067668 +0200 -+++ linux-omap-2.6.28-nokia1/fs/xfs/xfs_trans.c 2011-06-22 13:19:33.253063270 +0200 -@@ -629,7 +629,7 @@ xfs_trans_apply_sb_deltas( - offsetof(xfs_dsb_t, sb_frextents) + - sizeof(sbp->sb_frextents) - 1); - -- tp->t_mountp->m_super->s_dirt = 1; -+ mark_sb_dirty(tp->t_mountp->m_super); - } - - /* -diff -Nurp linux-omap-2.6.28-omap1/include/drm/Kbuild linux-omap-2.6.28-nokia1/include/drm/Kbuild ---- linux-omap-2.6.28-omap1/include/drm/Kbuild 2011-06-22 13:14:23.853067667 +0200 -+++ linux-omap-2.6.28-nokia1/include/drm/Kbuild 2011-06-22 13:19:33.253063270 +0200 -@@ -3,6 +3,7 @@ unifdef-y += i810_drm.h - unifdef-y += i830_drm.h - unifdef-y += i915_drm.h - unifdef-y += mga_drm.h -+unifdef-y += pvr2d_drm.h - unifdef-y += r128_drm.h - unifdef-y += radeon_drm.h - unifdef-y += sis_drm.h -diff -Nurp linux-omap-2.6.28-omap1/include/drm/pvr2d_drm.h linux-omap-2.6.28-nokia1/include/drm/pvr2d_drm.h ---- linux-omap-2.6.28-omap1/include/drm/pvr2d_drm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/drm/pvr2d_drm.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,45 @@ -+/* pvr2d_drm.h -- Public header for the PVR2D helper module -*- linux-c -*- -+ * -+ * Copyright (C) 2008 Nokia Corporation. All rights reserved. -+ */ -+ -+#ifndef __PVR2D_DRM_H__ -+#define __PVR2D_DRM_H__ -+ -+ -+/* This wouldn't work with 64 bit userland */ -+struct drm_pvr2d_buf_lock { -+ uint32_t virt; -+ uint32_t length; -+ uint32_t phys_array; -+ uint32_t handle; -+}; -+ -+struct drm_pvr2d_buf_release { -+ uint32_t handle; -+}; -+ -+enum drm_pvr2d_cflush_type { -+ DRM_PVR2D_CFLUSH_FROM_GPU = 1, -+ DRM_PVR2D_CFLUSH_TO_GPU = 2 -+}; -+ -+struct drm_pvr2d_cflush { -+ enum drm_pvr2d_cflush_type type; -+ uint32_t virt; -+ uint32_t length; -+}; -+ -+#define DRM_PVR2D_BUF_LOCK 0x0 -+#define DRM_PVR2D_BUF_RELEASE 0x1 -+#define DRM_PVR2D_CFLUSH 0x2 -+ -+#define DRM_IOCTL_PVR2D_BUF_LOCK DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR2D_BUF_LOCK, \ -+ struct drm_pvr2d_buf_lock) -+#define DRM_IOCTL_PVR2D_BUF_RELEASE DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_BUF_RELEASE, \ -+ struct drm_pvr2d_buf_release) -+#define DRM_IOCTL_PVR2D_CFLUSH DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_CFLUSH, \ -+ struct drm_pvr2d_cflush) -+ -+ -+#endif /* __PVR2D_DRM_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/camera_button.h linux-omap-2.6.28-nokia1/include/linux/camera_button.h ---- linux-omap-2.6.28-omap1/include/linux/camera_button.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/camera_button.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,13 @@ -+/* -+ * Camera Button Interface -+ */ -+#ifndef _LINUX_CAMERA_BUTTON_H -+#define _LINUX_CAMERA_BUTTON_H -+ -+struct camera_button_platform_data { -+ int shutter; -+ int focus; -+}; -+ -+#endif /* _LINUX_CAMERA_BUTTON_H */ -+ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/clockchips.h linux-omap-2.6.28-nokia1/include/linux/clockchips.h ---- linux-omap-2.6.28-omap1/include/linux/clockchips.h 2011-06-22 13:14:24.003067665 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/clockchips.h 2011-06-22 13:19:33.253063270 +0200 -@@ -76,7 +76,7 @@ enum clock_event_nofitiers { - struct clock_event_device { - const char *name; - unsigned int features; -- unsigned long max_delta_ns; -+ unsigned long long max_delta_ns; - unsigned long min_delta_ns; - unsigned long mult; - int shift; -@@ -115,7 +115,7 @@ static inline unsigned long div_sc(unsig - } - - /* Clock event layer functions */ --extern unsigned long clockevent_delta2ns(unsigned long latch, -+extern unsigned long long clockevent_delta2ns(unsigned long latch, - struct clock_event_device *evt); - extern void clockevents_register_device(struct clock_event_device *dev); - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/cs-protocol.h linux-omap-2.6.28-nokia1/include/linux/cs-protocol.h ---- linux-omap-2.6.28-omap1/include/linux/cs-protocol.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/cs-protocol.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,128 @@ -+/* -+ * cs-protocol.h -+ * -+ * Part of the CMT speech driver. Protocol definitions. -+ * -+ * Copyright (C) 2008,2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Kai Vehmanen -+ * Original author: Peter Ujfalusi -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef _CS_PROTOCOL_H -+#define _CS_PROTOCOL_H -+ -+#include -+#include -+ -+/* chardev parameters -+ * ***********************************/ -+ -+#define CS_DEV_FILE_NAME "/dev/cmt_speech" -+ -+/* APE kernel <-> user space messages -+ * ***********************************/ -+ -+#define CS_CMD_SHIFT 28 -+#define CS_DOMAIN_SHIFT 24 -+ -+#define CS_CMD_MASK (0xff000000) -+#define CS_PARAM_MASK (0x00ffffff) -+ -+#define CS_CDSP_RESET_DONE (0x1<s_dirt = 0; -+} -+static inline int is_sb_dirty(const struct super_block *sb) -+{ -+ return sb->s_dirt; -+} -+ - /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ - #define fops_get(fops) \ - (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) -diff -Nurp linux-omap-2.6.28-omap1/include/linux/if_arp.h linux-omap-2.6.28-nokia1/include/linux/if_arp.h ---- linux-omap-2.6.28-omap1/include/linux/if_arp.h 2011-06-22 13:14:24.153067663 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/if_arp.h 2011-06-22 13:19:33.253063270 +0200 -@@ -87,6 +87,9 @@ - #define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */ - #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ - -+#define ARPHRD_PHONET 820 /* PhoNet media type */ -+#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ -+ - #define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ - #define ARPHRD_NONE 0xFFFE /* zero header length */ - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/input.h linux-omap-2.6.28-nokia1/include/linux/input.h ---- linux-omap-2.6.28-omap1/include/linux/input.h 2011-06-22 13:14:24.163067663 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/input.h 2011-06-22 13:19:33.253063270 +0200 -@@ -659,6 +659,9 @@ struct input_absinfo { - #define SW_RADIO SW_RFKILL_ALL /* deprecated */ - #define SW_MICROPHONE_INSERT 0x04 /* set = inserted */ - #define SW_DOCK 0x05 /* set = plugged into dock */ -+#define SW_LINEOUT_INSERT 0x06 /* set = inserted */ -+#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */ -+#define SW_VIDEOOUT_INSERT 0x08 /* set = inserted */ - #define SW_MAX 0x0f - #define SW_CNT (SW_MAX+1) - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/i2c/tpa6130a2.h linux-omap-2.6.28-nokia1/include/linux/i2c/tpa6130a2.h ---- linux-omap-2.6.28-omap1/include/linux/i2c/tpa6130a2.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/i2c/tpa6130a2.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,34 @@ -+/* -+ * TPA6130A2 driver headers -+ * -+ * Copyright (C) Nokia Corporation -+ * -+ * Written by Timo Kokkonen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#ifndef TPA6130A2_H -+#define TPA6130A2_H -+ -+struct tpa6130a2_platform_data { -+ int (*set_power)(int state); -+}; -+ -+void tpa6130a2_set_enabled(int enabled); -+int tpa6130a2_get_volume(void); -+int tpa6130a2_set_volume(int vol); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/include/linux/i2c/tsl2563.h linux-omap-2.6.28-nokia1/include/linux/i2c/tsl2563.h ---- linux-omap-2.6.28-omap1/include/linux/i2c/tsl2563.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/i2c/tsl2563.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,9 @@ -+#ifndef __LINUX_TSL2563_H -+#define __LINUX_TSL2563_H -+ -+struct tsl2563_platform_data { -+ int cover_comp_gain; -+}; -+ -+#endif /* __LINUX_TSL2563_H */ -+ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/i2c/twl4030.h linux-omap-2.6.28-nokia1/include/linux/i2c/twl4030.h ---- linux-omap-2.6.28-omap1/include/linux/i2c/twl4030.h 2011-06-22 13:14:24.113067665 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/i2c/twl4030.h 2011-06-22 13:19:33.253063270 +0200 -@@ -243,6 +243,37 @@ int twl4030_i2c_read(u8 mod_no, u8 *valu - #define RES_STATE_SLEEP 0x8 - #define RES_STATE_OFF 0x0 - -+/* Power resources */ -+ -+#define RES_VAUX1 1 -+#define RES_VAUX2 2 -+#define RES_VAUX3 3 -+#define RES_VAUX4 4 -+#define RES_VMMC1 5 -+#define RES_VMMC2 6 -+#define RES_VPLL1 7 -+#define RES_VPLL2 8 -+#define RES_VSIM 9 -+#define RES_VDAC 10 -+#define RES_VINTANA1 11 -+#define RES_VINTANA2 12 -+#define RES_VINTDIG 13 -+#define RES_VIO 14 -+#define RES_VDD1 15 -+#define RES_VDD2 16 -+#define RES_VUSB_1v5 17 -+#define RES_VUSB_1v8 18 -+#define RES_VUSB_3v1 19 -+#define RES_VUSBCP 20 -+#define RES_REGEN 21 -+#define RES_NRES_PWRON 22 -+#define RES_CLKEN 23 -+#define RES_SYSEN 24 -+#define RES_HFCLKOUT 25 -+#define RES_32KCLKOUT 26 -+#define RES_RESET 27 -+#define RES_Main_Ref 28 -+ - /* - * Power Bus Message Format ... these can be sent individually by Linux, - * but are usually part of downloaded scripts that are run when various -@@ -263,6 +294,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *valu - #define MSG_SINGULAR(devgrp, id, state) \ - ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) - -+ - /*----------------------------------------------------------------------*/ - - struct twl4030_bci_platform_data { -@@ -324,19 +356,40 @@ struct twl4030_ins { - u8 delay; - }; - -+#define MAX_EVENTS 4 -+ -+enum twl4030_event { -+ TRITON_SLEEP = 1, -+ TRITON_WAKEUP12 = 2, -+ TRITON_WAKEUP3 = 3, -+ TRITON_WRST = 4, -+}; -+ -+struct twl4030_script_event { -+ /* offset from the start of the script allow overlapping */ -+ u8 offset; -+ enum twl4030_event event; -+}; -+ - struct twl4030_script { - struct twl4030_ins *script; - unsigned size; -- u8 flags; --#define TRITON_WRST_SCRIPT (1<<0) --#define TRITON_WAKEUP12_SCRIPT (1<<1) --#define TRITON_WAKEUP3_SCRIPT (1<<2) --#define TRITON_SLEEP_SCRIPT (1<<3) -+ unsigned number_of_events; -+ struct twl4030_script_event events[MAX_EVENTS]; -+}; -+ -+struct twl4030_resconfig { -+ int resource; -+ int devgroup; -+ int type; -+ int type2; -+ int remap; - }; - - struct twl4030_power_data { - struct twl4030_script **scripts; -- unsigned size; -+ unsigned scripts_size; -+ struct twl4030_resconfig *resource_config; - }; - - struct twl4030_platform_data { -@@ -371,11 +424,25 @@ int twl4030_sih_setup(int module); - #define TWL4030_VDAC_DEV_GRP 0x3B - #define TWL4030_VDAC_DEDICATED 0x3E - #define TWL4030_VAUX1_DEV_GRP 0x17 -+#define TWL4030_VAUX1_TYPE 0x18 -+#define TWL4030_VAUX1_REMAP 0x19 - #define TWL4030_VAUX1_DEDICATED 0x1A - #define TWL4030_VAUX2_DEV_GRP 0x1B -+#define TWL4030_VAUX2_TYPE 0x1C -+#define TWL4030_VAUX2_REMAP 0x1D - #define TWL4030_VAUX2_DEDICATED 0x1E - #define TWL4030_VAUX3_DEV_GRP 0x1F -+#define TWL4030_VAUX3_TYPE 0x20 -+#define TWL4030_VAUX3_REMAP 0x21 - #define TWL4030_VAUX3_DEDICATED 0x22 -+#define TWL4030_VAUX4_DEV_GRP 0x23 -+#define TWL4030_VAUX4_TYPE 0x24 -+#define TWL4030_VAUX4_REMAP 0x25 -+#define TWL4030_VAUX4_DEDICATED 0x26 -+#define TWL4030_VMMC2_DEV_GRP 0x2b -+#define TWL4030_VMMC2_TYPE 0x2c -+#define TWL4030_VMMC2_REMAP 0x2d -+#define TWL4030_VMMC2_DEDICATED 0x2e - - #if defined(CONFIG_TWL4030_BCI_BATTERY) || \ - defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) -@@ -418,4 +485,6 @@ int twl4030_sih_setup(int module); - #define TWL4030_REG_VUSB1V8 18 - #define TWL4030_REG_VUSB3V1 19 - -+extern int twl4030_enable_regulator(int res); -+extern int twl4030_disable_regulator(int res); - #endif /* End of __TWL4030_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/leds-lp5523.h linux-omap-2.6.28-nokia1/include/linux/leds-lp5523.h ---- linux-omap-2.6.28-omap1/include/linux/leds-lp5523.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/leds-lp5523.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,18 @@ -+#ifndef __LINUX_LP5523_H -+#define __LINUX_LP5523_H -+ -+struct lp5523_led_config { -+ const char *name; -+ u8 led_nr; -+ u8 led_current; /* mA x10 */ -+}; -+ -+struct lp5523_platform_data { -+ struct lp5523_led_config *led_config; -+ u8 num_leds; -+ int irq; -+ int chip_en; -+}; -+ -+#endif /* __LINUX_LP5523_H */ -+ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/lis302dl.h linux-omap-2.6.28-nokia1/include/linux/lis302dl.h ---- linux-omap-2.6.28-omap1/include/linux/lis302dl.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/lis302dl.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,9 @@ -+#ifndef _LINUX_LIS302DL_H -+#define _LINUX_LIS302DL_H -+ -+struct lis302dl_platform_data { -+ int int1_gpio; -+ int int2_gpio; -+}; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/include/linux/lzo.h linux-omap-2.6.28-nokia1/include/linux/lzo.h ---- linux-omap-2.6.28-omap1/include/linux/lzo.h 2011-06-22 13:10:46.873070752 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/lzo.h 2011-06-22 13:19:33.253063270 +0200 -@@ -17,6 +17,8 @@ - #define LZO1X_MEM_COMPRESS (16384 * sizeof(unsigned char *)) - #define LZO1X_1_MEM_COMPRESS LZO1X_MEM_COMPRESS - -+#define LZO1X_999_MEM_COMPRESS ((unsigned) (14 * 16384L * sizeof(short))) -+ - #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3) - - /* This requires 'workmem' of size LZO1X_1_MEM_COMPRESS */ -@@ -27,6 +29,27 @@ int lzo1x_1_compress(const unsigned char - int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len); - -+int -+lzo1x_999_compress ( const unsigned char *src, unsigned src_len, -+ unsigned char *dst, unsigned * dst_len, -+ void * wrkmem ); -+ -+int -+lzo1x_999_compress_dict ( const unsigned char *in , unsigned in_len, -+ unsigned char *out, unsigned * out_len, -+ void * wrkmem, -+ const unsigned char *dict, unsigned dict_len ); -+ -+typedef void ( *lzo_progress_callback_t) (unsigned, unsigned); -+ -+int -+lzo1x_999_compress_level ( const unsigned char *in , unsigned in_len, -+ unsigned char *out, unsigned * out_len, -+ void * wrkmem, -+ const unsigned char *dict, unsigned dict_len, -+ lzo_progress_callback_t cb, -+ int compression_level ); -+ - /* - * Return values (< 0 = Error) - */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/mmc/card.h linux-omap-2.6.28-nokia1/include/linux/mmc/card.h ---- linux-omap-2.6.28-omap1/include/linux/mmc/card.h 2011-06-22 13:14:24.213067662 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/mmc/card.h 2011-06-22 13:19:33.253063270 +0200 -@@ -40,6 +40,8 @@ struct mmc_csd { - }; - - struct mmc_ext_csd { -+ u8 rev; -+ unsigned int sa_timeout; /* Units: 100ns */ - unsigned int hs_max_dtr; - unsigned int sectors; - }; -diff -Nurp linux-omap-2.6.28-omap1/include/linux/mmc/core.h linux-omap-2.6.28-nokia1/include/linux/mmc/core.h ---- linux-omap-2.6.28-omap1/include/linux/mmc/core.h 2011-06-22 13:14:24.213067662 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/mmc/core.h 2011-06-22 13:19:33.253063270 +0200 -@@ -139,6 +139,7 @@ extern unsigned int mmc_align_data_size( - - extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); - extern void mmc_release_host(struct mmc_host *host); -+extern int mmc_try_claim_host(struct mmc_host *host); - - /** - * mmc_claim_host - exclusively claim a host -diff -Nurp linux-omap-2.6.28-omap1/include/linux/mmc/host.h linux-omap-2.6.28-nokia1/include/linux/mmc/host.h ---- linux-omap-2.6.28-omap1/include/linux/mmc/host.h 2011-06-22 13:14:24.213067662 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/mmc/host.h 2011-06-22 13:19:33.253063270 +0200 -@@ -41,6 +41,7 @@ struct mmc_ios { - - #define MMC_BUS_WIDTH_1 0 - #define MMC_BUS_WIDTH_4 2 -+#define MMC_BUS_WIDTH_8 3 - - unsigned char timing; /* timing specification used */ - -@@ -50,6 +51,35 @@ struct mmc_ios { - }; - - struct mmc_host_ops { -+ /* -+ * Hosts that support power saving can use the 'enable' and 'disable' -+ * methods to exit and enter power saving states. 'enable' is called -+ * when the host is claimed and 'disable' is called (or scheduled with -+ * a delay) when the host is released. The 'disable' is scheduled if -+ * the disable delay set by 'mmc_set_disable_delay()' is non-zero, -+ * otherwise 'disable' is called immediately. 'disable' may be -+ * scheduled repeatedly, to permit ever greater power saving at the -+ * expense of ever greater latency to re-enable. Rescheduling is -+ * determined by the return value of the 'disable' method. A positive -+ * value gives the delay in jiffies. -+ * -+ * In the case where a host function (like set_ios) may be called -+ * with or without the host claimed, enabling and disabling can be -+ * done directly and will nest correctly. Call 'mmc_host_enable()' and -+ * 'mmc_host_lazy_disable()' for this purpose, but note that these -+ * functions must be paired. -+ * -+ * Alternatively, 'mmc_host_enable()' may be paired with -+ * 'mmc_host_disable()' which calls 'disable' immediately. In this -+ * case the 'disable' method will be called with 'lazy' set to 0. -+ * This is mainly useful for error paths. -+ * -+ * Because lazy disble may be called from a work queue, the 'disable' -+ * method must claim the host when 'lazy' != 0, which will work -+ * correctly because recursion is detected and handled. -+ */ -+ int (*enable)(struct mmc_host *host); -+ int (*disable)(struct mmc_host *host, int lazy); - void (*request)(struct mmc_host *host, struct mmc_request *req); - /* - * Avoid calling these three functions too often or in a "fast path", -@@ -116,6 +146,16 @@ struct mmc_host { - #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ - #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ - #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ -+#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ -+#define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ -+#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ -+#define MMC_CAP_NOT_SDIO (1 << 9) /* Card cannot be SDIO */ -+#define MMC_CAP_NOT_SD (1 << 10) /* Card cannot be SD */ -+#define MMC_CAP_NOT_MMC (1 << 11) /* Card cannot be MMC */ -+ -+#define MMC_CAP_SDIO_ONLY (MMC_CAP_NOT_SD | MMC_CAP_NOT_MMC) -+#define MMC_CAP_SD_ONLY (MMC_CAP_NOT_SDIO | MMC_CAP_NOT_MMC) -+#define MMC_CAP_MMC_ONLY (MMC_CAP_NOT_SDIO | MMC_CAP_NOT_SD) - - /* host specific block data */ - unsigned int max_seg_size; /* see blk_queue_max_segment_size */ -@@ -140,9 +180,18 @@ struct mmc_host { - unsigned int removed:1; /* host is being removed */ - #endif - -+ /* Only used with MMC_CAP_DISABLE */ -+ int enabled; /* host is enabled */ -+ int nesting_cnt; /* "enable" nesting count */ -+ int en_dis_recurs; /* detect recursion */ -+ unsigned int disable_delay; /* disable delay in msecs */ -+ struct delayed_work disable; /* disabling work */ -+ - struct mmc_card *card; /* device attached to this host */ - - wait_queue_head_t wq; -+ struct task_struct *claimer; /* task that has host claimed */ -+ int claim_cnt; /* "claim" nesting count */ - - struct delayed_work detect; - -@@ -181,6 +230,9 @@ static inline void *mmc_priv(struct mmc_ - extern int mmc_suspend_host(struct mmc_host *, pm_message_t); - extern int mmc_resume_host(struct mmc_host *); - -+extern void mmc_power_save_host(struct mmc_host *host); -+extern void mmc_power_restore_host(struct mmc_host *host); -+ - extern void mmc_detect_change(struct mmc_host *, unsigned long delay); - extern void mmc_request_done(struct mmc_host *, struct mmc_request *); - -@@ -190,5 +242,19 @@ static inline void mmc_signal_sdio_irq(s - wake_up_process(host->sdio_irq_thread); - } - -+int mmc_card_awake(struct mmc_host *host); -+int mmc_card_sleep(struct mmc_host *host); -+int mmc_card_can_sleep(struct mmc_host *host); -+ -+int mmc_host_enable(struct mmc_host *host); -+int mmc_host_disable(struct mmc_host *host); -+int mmc_host_lazy_disable(struct mmc_host *host); -+ -+static inline void mmc_set_disable_delay(struct mmc_host *host, -+ unsigned int disable_delay) -+{ -+ host->disable_delay = disable_delay; -+} -+ - #endif - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/mmc/mmc.h linux-omap-2.6.28-nokia1/include/linux/mmc/mmc.h ---- linux-omap-2.6.28-omap1/include/linux/mmc/mmc.h 2011-06-22 13:14:24.213067662 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/mmc/mmc.h 2011-06-22 13:19:33.253063270 +0200 -@@ -31,6 +31,7 @@ - #define MMC_ALL_SEND_CID 2 /* bcr R2 */ - #define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ - #define MMC_SET_DSR 4 /* bc [31:16] RCA */ -+#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */ - #define MMC_SWITCH 6 /* ac [31:0] See below R1b */ - #define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ - #define MMC_SEND_EXT_CSD 8 /* adtc R1 */ -@@ -127,6 +128,7 @@ - #define R1_STATUS(x) (x & 0xFFFFE000) - #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ - #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ -+#define R1_SWITCH_ERROR (1 << 7) /* sx, c */ - #define R1_APP_CMD (1 << 5) /* sr, c */ - - /* -@@ -254,6 +256,7 @@ struct _mmc_csd { - #define EXT_CSD_CARD_TYPE 196 /* RO */ - #define EXT_CSD_REV 192 /* RO */ - #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ -+#define EXT_CSD_S_A_TIMEOUT 217 - - /* - * EXT_CSD field definitions -diff -Nurp linux-omap-2.6.28-omap1/include/linux/mtd/mtd.h linux-omap-2.6.28-nokia1/include/linux/mtd/mtd.h ---- linux-omap-2.6.28-omap1/include/linux/mtd/mtd.h 2011-06-22 13:14:24.223067662 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/mtd/mtd.h 2011-06-22 13:19:33.253063270 +0200 -@@ -219,6 +219,14 @@ struct mtd_info { - * supposed to be called by MTD users */ - int (*get_device) (struct mtd_info *mtd); - void (*put_device) (struct mtd_info *mtd); -+ -+ uint32_t read_cnt; -+ uint32_t read_sz; -+ uint32_t write_cnt; -+ uint32_t write_sz; -+ uint32_t erase_cnt; -+ uint32_t erase_sz; -+ uint32_t other_cnt; - }; - - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/mtd/onenand.h linux-omap-2.6.28-nokia1/include/linux/mtd/onenand.h ---- linux-omap-2.6.28-omap1/include/linux/mtd/onenand.h 2011-06-22 13:14:24.223067662 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/mtd/onenand.h 2011-06-22 13:19:33.253063270 +0200 -@@ -176,6 +176,7 @@ struct onenand_chip { - * OneNAND Flash Manufacturer ID Codes - */ - #define ONENAND_MFR_SAMSUNG 0xec -+#define ONENAND_MFR_NUMONYX 0x20 - - /** - * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure -diff -Nurp linux-omap-2.6.28-omap1/include/linux/nokia-av.h linux-omap-2.6.28-nokia1/include/linux/nokia-av.h ---- linux-omap-2.6.28-omap1/include/linux/nokia-av.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/nokia-av.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,29 @@ -+/* -+ * nokia-av.h - Nokia AV accessory detection -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -+ */ -+ -+#ifndef __NOKIA_AV_H -+#define __NOKIA_AV_H -+ -+struct nokia_av_platform_data { -+ int eci0_gpio; -+ int eci1_gpio; -+ int headph_gpio; -+}; -+ -+#endif /* __NOKIA_AV_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/omapfb.h linux-omap-2.6.28-nokia1/include/linux/omapfb.h ---- linux-omap-2.6.28-omap1/include/linux/omapfb.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/omapfb.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,450 @@ -+/* -+ * Framebuffer driver for TI OMAP boards -+ * -+ * Copyright (C) 2004 Nokia Corporation -+ * Author: Imre Deak -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -+ */ -+ -+#ifndef __OMAPFB_H -+#define __OMAPFB_H -+ -+#include -+ -+#include -+#include -+ -+/* IOCTL commands. */ -+ -+#define OMAP_IOW(num, dtype) _IOW('O', num, dtype) -+#define OMAP_IOR(num, dtype) _IOR('O', num, dtype) -+#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype) -+#define OMAP_IO(num) _IO('O', num) -+ -+#define OMAPFB_MIRROR OMAP_IOW(31, int) -+#define OMAPFB_SYNC_GFX OMAP_IO(37) -+#define OMAPFB_VSYNC OMAP_IO(38) -+#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int) -+#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps) -+#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int) -+#define OMAPFB_LCD_TEST OMAP_IOW(45, int) -+#define OMAPFB_CTRL_TEST OMAP_IOW(46, int) -+#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old) -+#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key) -+#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key) -+#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info) -+#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info) -+#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window) -+#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info) -+#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info) -+#define OMAPFB_WAITFORVSYNC OMAP_IO(57) -+#define OMAPFB_MEMORY_READ OMAP_IOR(58, struct omapfb_memory_read) -+#define OMAPFB_GET_OVERLAY_COLORMODE OMAP_IOR(59, struct omapfb_ovl_colormode) -+#define OMAPFB_GET_VRAM_INFO OMAP_IOR(61, struct omapfb_vram_info) -+ -+#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff -+#define OMAPFB_CAPS_LCDC_MASK 0x00fff000 -+#define OMAPFB_CAPS_PANEL_MASK 0xff000000 -+ -+#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000 -+#define OMAPFB_CAPS_TEARSYNC 0x00002000 -+#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000 -+#define OMAPFB_CAPS_PLANE_SCALE 0x00008000 -+#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000 -+#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000 -+#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000 -+#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000 -+#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000 -+ -+/* Values from DSP must map to lower 16-bits */ -+#define OMAPFB_FORMAT_MASK 0x00ff -+#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100 -+#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200 -+#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400 -+#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800 -+#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000 -+ -+#define OMAPFB_EVENT_READY 1 -+#define OMAPFB_EVENT_DISABLED 2 -+ -+#define OMAPFB_MEMTYPE_SDRAM 0 -+#define OMAPFB_MEMTYPE_SRAM 1 -+#define OMAPFB_MEMTYPE_MAX 1 -+ -+enum omapfb_color_format { -+ OMAPFB_COLOR_RGB565 = 0, -+ OMAPFB_COLOR_YUV422, -+ OMAPFB_COLOR_YUV420, -+ OMAPFB_COLOR_CLUT_8BPP, -+ OMAPFB_COLOR_CLUT_4BPP, -+ OMAPFB_COLOR_CLUT_2BPP, -+ OMAPFB_COLOR_CLUT_1BPP, -+ OMAPFB_COLOR_RGB444, -+ OMAPFB_COLOR_YUY422, -+ -+ OMAPFB_COLOR_ARGB16, -+ OMAPFB_COLOR_RGB24U, /* RGB24, 32-bit container */ -+ OMAPFB_COLOR_RGB24P, /* RGB24, 24-bit container */ -+ OMAPFB_COLOR_ARGB32, -+ OMAPFB_COLOR_RGBA32, -+ OMAPFB_COLOR_RGBX32, -+}; -+ -+struct omapfb_update_window { -+ __u32 x, y; -+ __u32 width, height; -+ __u32 format; -+ __u32 out_x, out_y; -+ __u32 out_width, out_height; -+ __u32 reserved[8]; -+}; -+ -+struct omapfb_update_window_old { -+ __u32 x, y; -+ __u32 width, height; -+ __u32 format; -+}; -+ -+enum omapfb_plane { -+ OMAPFB_PLANE_GFX = 0, -+ OMAPFB_PLANE_VID1, -+ OMAPFB_PLANE_VID2, -+}; -+ -+enum omapfb_channel_out { -+ OMAPFB_CHANNEL_OUT_LCD = 0, -+ OMAPFB_CHANNEL_OUT_DIGIT, -+}; -+ -+#define OMAPFB_CLONE_ENABLED 0x4 -+#define OMAPFB_CLONE_MASK 0x3 -+ -+struct omapfb_plane_info { -+ __u32 pos_x; -+ __u32 pos_y; -+ __u8 enabled; -+ __u8 channel_out; -+ __u8 mirror; -+ __u8 clone_idx; /* if OMAPFB_CLONE_ENABLED bit is set -+ * cloned plane will be stored under -+ * OMAPFB_CLONE_MASK */ -+ __u32 out_width; -+ __u32 out_height; -+ __u32 reserved2[12]; -+}; -+ -+struct omapfb_mem_info { -+ __u32 size; -+ __u8 type; -+ __u8 reserved[3]; -+}; -+ -+struct omapfb_caps { -+ __u32 ctrl; -+ __u32 plane_color; -+ __u32 wnd_color; -+}; -+ -+enum omapfb_color_key_type { -+ OMAPFB_COLOR_KEY_DISABLED = 0, -+ OMAPFB_COLOR_KEY_GFX_DST, -+ OMAPFB_COLOR_KEY_VID_SRC, -+}; -+ -+struct omapfb_color_key { -+ __u8 channel_out; -+ __u32 background; -+ __u32 trans_key; -+ __u8 key_type; -+}; -+ -+enum omapfb_update_mode { -+ OMAPFB_UPDATE_DISABLED = 0, -+ OMAPFB_AUTO_UPDATE, -+ OMAPFB_MANUAL_UPDATE -+}; -+ -+struct omapfb_memory_read { -+ __u16 x; -+ __u16 y; -+ __u16 w; -+ __u16 h; -+ size_t buffer_size; -+ void __user *buffer; -+}; -+ -+struct omapfb_ovl_colormode { -+ __u8 overlay_idx; -+ __u8 mode_idx; -+ __u32 bits_per_pixel; -+ __u32 nonstd; -+ struct fb_bitfield red; -+ struct fb_bitfield green; -+ struct fb_bitfield blue; -+ struct fb_bitfield transp; -+}; -+ -+struct omapfb_vram_info { -+ __u32 total; -+ __u32 free; -+ __u32 largest_free_block; -+ __u32 reserved[5]; -+}; -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+ -+#include -+ -+#define OMAP_LCDC_INV_VSYNC 0x0001 -+#define OMAP_LCDC_INV_HSYNC 0x0002 -+#define OMAP_LCDC_INV_PIX_CLOCK 0x0004 -+#define OMAP_LCDC_INV_OUTPUT_EN 0x0008 -+#define OMAP_LCDC_HSVS_RISING_EDGE 0x0010 -+#define OMAP_LCDC_HSVS_OPPOSITE 0x0020 -+ -+#define OMAP_LCDC_SIGNAL_MASK 0x003f -+ -+#define OMAP_LCDC_PANEL_TFT 0x0100 -+ -+#define OMAPFB_PLANE_XRES_MIN 8 -+#define OMAPFB_PLANE_YRES_MIN 8 -+ -+#ifdef CONFIG_ARCH_OMAP1 -+#define OMAPFB_PLANE_NUM 1 -+#else -+#define OMAPFB_PLANE_NUM 3 -+#endif -+ -+struct omapfb_device; -+ -+struct lcd_panel { -+ const char *name; -+ int config; /* TFT/STN, signal inversion */ -+ int bpp; /* Pixel format in fb mem */ -+ int data_lines; /* Lines on LCD HW interface */ -+ -+ int x_res, y_res; -+ int pixel_clock; /* In kHz */ -+ int hsw; /* Horizontal synchronization -+ pulse width */ -+ int hfp; /* Horizontal front porch */ -+ int hbp; /* Horizontal back porch */ -+ int vsw; /* Vertical synchronization -+ pulse width */ -+ int vfp; /* Vertical front porch */ -+ int vbp; /* Vertical back porch */ -+ int acb; /* ac-bias pin frequency */ -+ int pcd; /* pixel clock divider. -+ Obsolete use pixel_clock instead */ -+ -+ int (*init) (struct lcd_panel *panel, -+ struct omapfb_device *fbdev); -+ void (*cleanup) (struct lcd_panel *panel); -+ int (*enable) (struct lcd_panel *panel); -+ void (*disable) (struct lcd_panel *panel); -+ unsigned long (*get_caps) (struct lcd_panel *panel); -+ int (*set_bklight_level)(struct lcd_panel *panel, -+ unsigned int level); -+ unsigned int (*get_bklight_level)(struct lcd_panel *panel); -+ unsigned int (*get_bklight_max) (struct lcd_panel *panel); -+ int (*run_test) (struct lcd_panel *panel, int test_num); -+}; -+ -+struct extif_timings { -+ int cs_on_time; -+ int cs_off_time; -+ int we_on_time; -+ int we_off_time; -+ int re_on_time; -+ int re_off_time; -+ int we_cycle_time; -+ int re_cycle_time; -+ int cs_pulse_width; -+ int access_time; -+ -+ int clk_div; -+ -+ u32 tim[5]; /* set by extif->convert_timings */ -+ -+ int converted; -+}; -+ -+struct lcd_ctrl_extif { -+ int (*init) (struct omapfb_device *fbdev); -+ void (*cleanup) (void); -+ void (*get_clk_info) (u32 *clk_period, u32 *max_clk_div); -+ unsigned long (*get_max_tx_rate)(void); -+ int (*convert_timings) (struct extif_timings *timings); -+ void (*set_timings) (const struct extif_timings *timings); -+ void (*set_bits_per_cycle)(int bpc); -+ void (*write_command) (const void *buf, unsigned int len); -+ void (*read_data) (void *buf, unsigned int len); -+ void (*write_data) (const void *buf, unsigned int len); -+ void (*transfer_area) (int width, int height, -+ void (callback)(void * data), void *data); -+ int (*setup_tearsync) (unsigned pin_cnt, -+ unsigned hs_pulse_time, unsigned vs_pulse_time, -+ int hs_pol_inv, int vs_pol_inv, int div); -+ int (*enable_tearsync) (int enable, unsigned line); -+ -+ unsigned long max_transmit_size; -+}; -+ -+struct omapfb_notifier_block { -+ struct notifier_block nb; -+ void *data; -+ int plane_idx; -+}; -+ -+typedef int (*omapfb_notifier_callback_t)(struct notifier_block *, -+ unsigned long event, -+ void *fbi); -+ -+struct omapfb_mem_region { -+ u32 paddr; -+ void __iomem *vaddr; -+ unsigned long size; -+ u8 type; /* OMAPFB_PLANE_MEM_* */ -+ enum omapfb_color_format format;/* OMAPFB_COLOR_* */ -+ unsigned format_used:1; /* Must be set when format is set. -+ * Needed b/c of the badly chosen 0 -+ * base for OMAPFB_COLOR_* values -+ */ -+ unsigned alloc:1; /* allocated by the driver */ -+ unsigned map:1; /* kernel mapped by the driver */ -+}; -+ -+struct omapfb_mem_desc { -+ int region_cnt; -+ struct omapfb_mem_region region[OMAPFB_PLANE_NUM]; -+}; -+ -+struct lcd_ctrl { -+ const char *name; -+ void *data; -+ -+ int (*init) (struct omapfb_device *fbdev, -+ int ext_mode, -+ struct omapfb_mem_desc *req_md); -+ void (*cleanup) (void); -+ void (*bind_client) (struct omapfb_notifier_block *nb); -+ void (*get_caps) (int plane, struct omapfb_caps *caps); -+ int (*set_update_mode)(enum omapfb_update_mode mode); -+ enum omapfb_update_mode (*get_update_mode)(void); -+ int (*setup_plane) (int plane, int enable, -+ int channel_out, -+ unsigned long baddr, -+ int screen_width, -+ int pos_x, int pos_y, int width, -+ int height, int out_width, -+ int out_height, int color_mode, -+ int rotate); -+ int (*setup_mem) (int plane, size_t size, -+ int mem_type, unsigned long *paddr); -+ int (*mmap) (struct fb_info *info, -+ struct vm_area_struct *vma); -+ int (*update_window) (struct fb_info *fbi, -+ struct omapfb_update_window *win, -+ void (*callback)(void *), -+ void *callback_data); -+ int (*sync) (enum omapfb_channel_out channel_out); -+ void (*suspend) (void); -+ void (*resume) (void); -+ int (*run_test) (int test_num); -+ int (*setcolreg) (u_int regno, u16 red, u16 green, -+ u16 blue, u16 transp, -+ int update_hw_mem); -+ int (*set_color_key) (struct omapfb_color_key *ck); -+ int (*get_color_key) (struct omapfb_color_key *ck); -+}; -+ -+enum omapfb_state { -+ OMAPFB_DISABLED = 0, -+ OMAPFB_SUSPENDED= 99, -+ OMAPFB_ACTIVE = 100 -+}; -+ -+struct omapfb_plane_struct { -+ int idx; -+ struct omapfb_plane_info info; -+ enum omapfb_color_format color_mode; -+ struct omapfb_device *fbdev; -+ struct lcd_panel *panel; -+}; -+ -+struct omapfb_device { -+ int state; -+ int ext_lcdc; /* Using external -+ LCD controller */ -+ struct mutex rqueue_mutex; -+ -+ int palette_size; -+ u32 pseudo_palette[17]; -+ -+ struct lcd_panel *lcd_panel; /* LCD panel */ -+ const struct lcd_ctrl *ctrl; /* LCD controller */ -+ const struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */ -+ struct lcd_ctrl_extif *ext_if; /* LCD ctrl external -+ interface */ -+ struct lcd_panel *digital_panel; /* panel for VENC */ -+ -+ struct device *dev; -+ struct fb_var_screeninfo new_var; /* for mode changes */ -+ -+ struct omapfb_mem_desc mem_desc; -+ struct fb_info *fb_info[OMAPFB_PLANE_NUM]; -+}; -+ -+struct omapfb_platform_data { -+ struct omap_lcd_config lcd; -+ struct omapfb_mem_desc mem_desc; -+ void *ctrl_platform_data; -+}; -+ -+#ifdef CONFIG_ARCH_OMAP1 -+extern struct lcd_ctrl omap1_lcd_ctrl; -+#else -+extern struct lcd_ctrl omap2_disp_ctrl; -+#endif -+ -+extern void omapfb_set_platform_data(struct omapfb_platform_data *data); -+ -+extern void omapfb_reserve_sdram(void); -+extern void omapfb_register_panel(struct lcd_panel *panel); -+extern void omapfb_unregister_panel(struct lcd_panel *panel, -+ struct omapfb_device *fbdev); -+extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval); -+extern void omapfb_notify_clients(struct omapfb_device *fbdev, -+ unsigned long event); -+extern int omapfb_register_client(struct omapfb_notifier_block *nb, -+ omapfb_notifier_callback_t callback, -+ void *callback_data); -+extern int omapfb_unregister_client(struct omapfb_notifier_block *nb); -+extern int omapfb_update_window_async(struct fb_info *fbi, -+ struct omapfb_update_window *win, -+ void (*callback)(void *), -+ void *callback_data); -+ -+/* in arch/arm/plat-omap/fb.c */ -+extern void omapfb_set_ctrl_platform_data(void *pdata); -+ -+#endif /* __KERNEL__ */ -+ -+#endif /* __OMAPFB_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/omap34xxcam-daemon.h linux-omap-2.6.28-nokia1/include/linux/omap34xxcam-daemon.h ---- linux-omap-2.6.28-omap1/include/linux/omap34xxcam-daemon.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/omap34xxcam-daemon.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,133 @@ -+/* -+ * drivers/media/video/omap/omap34xcam-daemon.h -+ * -+ * OMAP 3 camera driver daemon support. -+ * -+ * Copyright (C) 2008 Nokia Corporation. -+ * -+ * Contact: Sakari Ailus -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#ifndef OMAP34XXCAM_DAEMON_H -+#define OMAP34XXCAM_DAEMON_H -+ -+#define OMAP34XXCAM_DAEMON_REQ_MAX_SIZE (1 << 18) -+#define OMAP34XXCAM_DAEMON_REQ_STACK_ALLOC 2048 -+ -+#include -+ -+struct omap34xxcam_videodev; -+ -+/* User application -> driver */ -+#define VIDIOC_DAEMON_REQ \ -+ _IOWR('V', BASE_VIDIOC_PRIVATE + 20, struct omap34xxcam_daemon_req) -+ -+/* Daemon -> driver */ -+#define VIDIOC_DAEMON_INSTALL \ -+ _IO('V', BASE_VIDIOC_PRIVATE + 21) -+#define VIDIOC_DAEMON_SET_EVENTS \ -+ _IOW('V', BASE_VIDIOC_PRIVATE + 22, __u32) -+#define VIDIOC_DAEMON_DAEMON_REQ_GET \ -+ _IOWR('V', BASE_VIDIOC_PRIVATE + 23, \ -+ struct omap34xxcam_daemon_daemon_req) -+#define VIDIOC_DAEMON_DAEMON_REQ_COMPLETE \ -+ _IOW('V', BASE_VIDIOC_PRIVATE + 24, \ -+ struct omap34xxcam_daemon_daemon_req) -+ -+/* -+ * @max_size: allocated size of the blob -+ * @size: actual size of the blob -+ */ -+struct omap34xxcam_daemon_req { -+ __u32 max_size; -+ __u32 size; -+ __u32 type; -+ void *blob; -+}; -+ -+/* -+ * @sync: Is this a synchronous req? -+ * @req: request -+ */ -+struct omap34xxcam_daemon_daemon_req { -+ union { -+ __u32 sync; /* get */ -+ int rval; /* complete */ -+ } u; -+ struct omap34xxcam_daemon_req req; -+}; -+ -+#include "omap34xxcam-daemon-req.h" -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+#define OMAP34XXCAM_DAEMON_SYNC (1<<0) -+#define OMAP34XXCAM_DAEMON_ASYNC (1<<1) -+ -+#define OMAP34XXCAM_DAEMON_REQUEST_USER_START 1 -+#define OMAP34XXCAM_DAEMON_REQUEST_DAEMON_START 2 -+#define OMAP34XXCAM_DAEMON_REQUEST_DAEMON_FINISH 3 -+#define OMAP34XXCAM_DAEMON_REQUEST_USER_FINISH 0 -+ -+struct omap34xxcam_daemon { -+ /* Synchronise access to daemon / this structure. */ -+ struct mutex mutex; -+ /* Daemon file if it's there. */ -+ struct file *file; -+ /* Mutual exclusion from daemon events. */ -+ struct mutex request_mutex; -+ int request_state; -+ wait_queue_head_t poll_wait; -+ struct semaphore begin; -+ struct semaphore finish; -+ struct omap34xxcam_daemon_req *req; -+ int req_rval; -+ /* event_lock serialises the rest of the fields */ -+ spinlock_t event_lock; -+ u32 req_pending; /* sync / async */ -+ u32 event_mask; -+ struct omap34xxcam_daemon_event event; -+}; -+ -+int omap34xxcam_daemon_req(struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_req *req, -+ struct file *file); -+int omap34xxcam_daemon_req_user(struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_req *_req, -+ struct file *file); -+int omap34xxcam_daemon_daemon_req_get_user( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *get, -+ struct file *file); -+int omap34xxcam_daemon_daemon_req_complete_user( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *complete, -+ struct file *file); -+void omap34xxcam_daemon_init(struct omap34xxcam_videodev *vdev); -+int omap34xxcam_daemon_install(struct file *file); -+int omap34xxcam_daemon_release(struct omap34xxcam_videodev *vdev, -+ struct file *file); -+void omap34xxcam_daemon_event_cb(unsigned long status, int (*arg1) -+ (struct videobuf_buffer *vb), void *arg2); -+int omap34xxcam_daemon_set_events(struct omap34xxcam_videodev *vdev, u32 *mask, -+ struct file *file); -+ -+#endif /* __KERNEL__ */ -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/include/linux/omap34xxcam-daemon-req.h linux-omap-2.6.28-nokia1/include/linux/omap34xxcam-daemon-req.h ---- linux-omap-2.6.28-omap1/include/linux/omap34xxcam-daemon-req.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/omap34xxcam-daemon-req.h 2011-06-22 13:19:33.253063270 +0200 -@@ -0,0 +1,77 @@ -+/* -+ * drivers/media/video/omap/omap34xcam-daemon-req.h -+ * -+ * OMAP 3 camera driver daemon support. -+ * -+ * Copyright (C) 2008 Nokia Corporation. -+ * -+ * Contact: Sakari Ailus -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#ifndef OMAP34XXCAM_DAEMON_REQ_H -+#define OMAP34XXCAM_DAEMON_REQ_H -+ -+/* Synchronous requests */ -+#define OMAP34XXCAM_DAEMON_REQ_HW_RECONFIG 1000 -+#define OMAP34XXCAM_DAEMON_REQ_HW_INIT 1001 -+ -+/* -+ * Please start numbering requests defined in daemon source code from -+ * this. -+ */ -+#define OMAP34XXCAM_DAEMON_REQ_PRIV_START 10000 -+ -+/* Asynchronous requests */ -+#define OMAP34XXCAM_DAEMON_REQ_EVENTS 10 -+ -+#define OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMON (1<<0) -+#define OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMOFF (1<<1) -+#define OMAP34XXCAM_DAEMON_HW_RECONFIG_CROP (1<<2) -+#define OMAP34XXCAM_DAEMON_HW_RECONFIG_FMT (1<<4) -+struct omap34xxcam_daemon_req_hw_reconfig { -+ __u32 mask; -+}; -+ -+/* Driver events */ -+#define OMAP34XXCAM_DAEMON_EVENT_HIST_DONE (1<<0) -+#define OMAP34XXCAM_DAEMON_EVENT_H3A_AWB_DONE (1<<1) -+#define OMAP34XXCAM_DAEMON_EVENT_H3A_AF_DONE (1<<2) -+#define OMAP34XXCAM_DAEMON_EVENT_HS_VS (1<<3) -+struct omap34xxcam_daemon_event { -+ __u32 mask; -+ struct timeval hist_done_stamp; -+ struct timeval h3a_awb_done_stamp; -+ struct timeval h3a_af_done_stamp; -+ struct timeval hs_vs_stamp; -+}; -+ -+#ifdef __KERNEL__ -+ -+int omap34xxcam_daemon_req_hw_init(struct omap34xxcam_videodev *vdev); -+int omap34xxcam_daemon_req_hw_reconfig(struct omap34xxcam_videodev *vdev, -+ u32 what); -+ -+int omap34xxcam_daemon_daemon_req_sync( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *get); -+int omap34xxcam_daemon_daemon_req_async( -+ struct omap34xxcam_videodev *vdev, -+ struct omap34xxcam_daemon_daemon_req *get); -+ -+#endif -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/include/linux/serial.h linux-omap-2.6.28-nokia1/include/linux/serial.h ---- linux-omap-2.6.28-omap1/include/linux/serial.h 2011-06-22 13:14:24.403067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/serial.h 2011-06-22 13:19:33.263063268 +0200 -@@ -10,8 +10,9 @@ - #ifndef _LINUX_SERIAL_H - #define _LINUX_SERIAL_H - --#ifdef __KERNEL__ - #include -+ -+#ifdef __KERNEL__ - #include - - /* -diff -Nurp linux-omap-2.6.28-omap1/include/linux/serial_reg.h linux-omap-2.6.28-nokia1/include/linux/serial_reg.h ---- linux-omap-2.6.28-omap1/include/linux/serial_reg.h 2011-06-22 13:14:24.403067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/serial_reg.h 2011-06-22 13:19:33.263063268 +0200 -@@ -111,6 +111,7 @@ - #define UART_MCR_DTR 0x01 /* DTR complement */ - - #define UART_LSR 5 /* In: Line Status Register */ -+#define UART_LSR_RX_FIFO_STS 0x80 /* RX fifo status */ - #define UART_LSR_TEMT 0x40 /* Transmitter empty */ - #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ - #define UART_LSR_BI 0x10 /* Break interrupt indicator */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/sched.h linux-omap-2.6.28-nokia1/include/linux/sched.h ---- linux-omap-2.6.28-omap1/include/linux/sched.h 2011-06-22 13:14:24.403067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/sched.h 2011-06-22 13:19:33.263063268 +0200 -@@ -631,7 +631,6 @@ struct user_struct { - atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ - #endif - #ifdef CONFIG_EPOLL -- atomic_t epoll_devs; /* The number of epoll descriptors currently open */ - atomic_t epoll_watches; /* The number of file descriptors currently watched */ - #endif - #ifdef CONFIG_POSIX_MQUEUE -diff -Nurp linux-omap-2.6.28-omap1/include/linux/slub_def.h linux-omap-2.6.28-nokia1/include/linux/slub_def.h ---- linux-omap-2.6.28-omap1/include/linux/slub_def.h 2011-06-22 13:14:24.433067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/slub_def.h 2011-06-22 13:19:33.263063268 +0200 -@@ -139,8 +139,10 @@ static __always_inline int kmalloc_index - return KMALLOC_SHIFT_LOW; - - #if KMALLOC_MIN_SIZE <= 64 -+#if KMALLOC_MIN_SIZE <= 32 - if (size > 64 && size <= 96) - return 1; -+#endif - if (size > 128 && size <= 192) - return 2; - #endif -diff -Nurp linux-omap-2.6.28-omap1/include/linux/spi/tsc2005.h linux-omap-2.6.28-nokia1/include/linux/spi/tsc2005.h ---- linux-omap-2.6.28-omap1/include/linux/spi/tsc2005.h 2011-06-22 13:14:24.443067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/spi/tsc2005.h 2011-06-22 13:19:33.263063268 +0200 -@@ -4,9 +4,6 @@ - #include - - struct tsc2005_platform_data { -- s16 reset_gpio; -- s16 dav_gpio; -- s16 pen_int_gpio; - u16 ts_x_plate_ohm; - u32 ts_stab_time; /* voltage settling time */ - u8 ts_hw_avg; /* HW assiseted averaging. Can be -@@ -23,7 +20,11 @@ struct tsc2005_platform_data { - u32 ts_y_max; - u32 ts_y_fudge; - -+ u32 esd_timeout; /* msec of inactivity before we check */ -+ - unsigned ts_ignore_last : 1; -+ -+ void (*set_reset)(bool enable); - }; - - #endif -diff -Nurp linux-omap-2.6.28-omap1/include/linux/spi/wl12xx.h linux-omap-2.6.28-nokia1/include/linux/spi/wl12xx.h ---- linux-omap-2.6.28-omap1/include/linux/spi/wl12xx.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/spi/wl12xx.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,8 @@ -+#ifndef _LINUX_SPI_WL12XX_H -+#define _LINUX_SPI_WL12XX_H -+ -+struct wl12xx_platform_data { -+ void (*set_power)(bool enable); -+}; -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/include/linux/ssi_driver_if.h linux-omap-2.6.28-nokia1/include/linux/ssi_driver_if.h ---- linux-omap-2.6.28-omap1/include/linux/ssi_driver_if.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/ssi_driver_if.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,164 @@ -+/* -+ * ssi_driver_if.h -+ * -+ * Header for the SSI driver low level interface. -+ * -+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Carlos Chinea -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+#ifndef __SSI_H__ -+#define __SSI_H__ -+ -+#include -+#include -+#include -+ -+/* The number of ports handled by the driver. (MAX:2) */ -+#define SSI_MAX_PORTS 1 -+ -+#define SSI_MAX_FRAME_SIZE 0x1f -+#define SSI_MAX_RX_TIMEOUT 0x1ff -+#define SSI_MAX_TX_DIVISOR 0x7f -+ -+#define ANY_SSI_CONTROLLER -1 -+#define ANY_CHANNEL -1 -+#define CHANNEL(channel) (1 << (channel)) -+ -+enum { -+ SSI_EVENT_BREAK_DETECTED = 0, -+ SSI_EVENT_ERROR, -+ SSI_EVENT_PRE_SPEED_CHANGE, -+ SSI_EVENT_POST_SPEED_CHANGE, -+ SSI_EVENT_CAWAKE_UP, -+ SSI_EVENT_CAWAKE_DOWN, -+ SSI_EVENT_SSR_DATAAVAILABLE, -+}; -+ -+enum { -+ SSI_IOCTL_WAKE_UP, -+ SSI_IOCTL_WAKE_DOWN, -+ SSI_IOCTL_SEND_BREAK, -+ SSI_IOCTL_WAKE, -+ SSI_IOCTL_FLUSH_RX, -+ SSI_IOCTL_FLUSH_TX, -+ SSI_IOCTL_CAWAKE, -+ SSI_IOCTL_SET_RX, -+ SSI_IOCTL_GET_RX, -+ SSI_IOCTL_SET_TX, -+ SSI_IOCTL_GET_TX, -+ SSI_IOCTL_TX_CH_FULL, -+ SSI_IOCTL_CH_DATAACCEPT, -+}; -+ -+/* Forward references */ -+struct ssi_device; -+struct ssi_channel; -+ -+/* DPS */ -+struct sst_ctx { -+ u32 mode; -+ u32 frame_size; -+ u32 divisor; -+ u32 arb_mode; -+ u32 channels; -+}; -+ -+struct ssr_ctx { -+ u32 mode; -+ u32 frame_size; -+ u32 timeout; -+ u32 channels; -+}; -+ -+struct port_ctx { -+ u32 sys_mpu_enable[2]; -+ struct sst_ctx sst; -+ struct ssr_ctx ssr; -+}; -+ -+/** -+ * struct ctrl_ctx - ssi controller regs context -+ * @loss_count: ssi last loss count -+ * @sysconfig: keeps sysconfig reg state -+ * @pctx: array of port context -+ */ -+struct ctrl_ctx { -+ int loss_count; -+ u32 sysconfig; -+ u32 gdd_gcr; -+ struct port_ctx *pctx; -+}; -+/* END DPS */ -+ -+struct ssi_platform_data { -+ void (*set_min_bus_tput)(struct device *dev, u8 agent_id, -+ unsigned long r); -+ int (*clk_notifier_register)(struct clk *clk, -+ struct notifier_block *nb); -+ int (*clk_notifier_unregister)(struct clk *clk, -+ struct notifier_block *nb); -+ u8 num_ports; -+ struct ctrl_ctx ctx; /* FIXME */ -+}; -+ -+struct ssi_device { -+ int n_ctrl; -+ unsigned int n_p; -+ unsigned int n_ch; -+ char modalias[BUS_ID_SIZE]; -+ struct ssi_channel *ch; -+ struct device device; -+}; -+ -+#define to_ssi_device(dev) container_of(dev, struct ssi_device, device) -+ -+struct ssi_device_driver { -+ unsigned long ctrl_mask; -+ unsigned long ch_mask[SSI_MAX_PORTS]; -+ void (*port_event) (struct ssi_device *dev, -+ unsigned int event, void *arg); -+ int (*probe)(struct ssi_device *dev); -+ int (*remove)(struct ssi_device *dev); -+ int (*suspend)(struct ssi_device *dev, -+ pm_message_t mesg); -+ int (*resume)(struct ssi_device *dev); -+ struct device_driver driver; -+}; -+ -+#define to_ssi_device_driver(drv) container_of(drv, \ -+ struct ssi_device_driver, \ -+ driver) -+ -+int register_ssi_driver(struct ssi_device_driver *driver); -+void unregister_ssi_driver(struct ssi_device_driver *driver); -+int ssi_open(struct ssi_device *dev); -+int ssi_write(struct ssi_device *dev, u32 *data, unsigned int count); -+void ssi_write_cancel(struct ssi_device *dev); -+int ssi_read(struct ssi_device *dev, u32 *data, unsigned int w_count); -+void ssi_read_cancel(struct ssi_device *dev); -+int ssi_poll(struct ssi_device *dev); -+int ssi_ioctl(struct ssi_device *dev, unsigned int command, void *arg); -+void ssi_close(struct ssi_device *dev); -+void ssi_set_read_cb(struct ssi_device *dev, -+ void (*read_cb)(struct ssi_device *dev)); -+void ssi_set_write_cb(struct ssi_device *dev, -+ void (*write_cb)(struct ssi_device *dev)); -+void ssi_set_port_event_cb(struct ssi_device *dev, -+ void (*port_event_cb)(struct ssi_device *dev, -+ unsigned int event, void *arg)); -+#endif /* __SSI_H__ */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/ssi_char.h linux-omap-2.6.28-nokia1/include/linux/ssi_char.h ---- linux-omap-2.6.28-omap1/include/linux/ssi_char.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/ssi_char.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,71 @@ -+/* -+ * ssi_char.h -+ * -+ * Part of the SSI character device driver. -+ * -+ * Copyright (C) 2009 Nokia Corporation. All rights reserved. -+ * -+ * Contact: Andras Domokos -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+ -+#ifndef SSI_CHAR_H -+#define SSI_CHAR_H -+ -+#define SSI_CHAR_BASE 'S' -+#define CS_IOW(num, dtype) _IOW(SSI_CHAR_BASE, num, dtype) -+#define CS_IOR(num, dtype) _IOR(SSI_CHAR_BASE, num, dtype) -+#define CS_IOWR(num, dtype) _IOWR(SSI_CHAR_BASE, num, dtype) -+#define CS_IO(num) _IO(SSI_CHAR_BASE, num) -+ -+#define CS_SEND_BREAK CS_IO(1) -+#define CS_FLUSH_RX CS_IO(2) -+#define CS_FLUSH_TX CS_IO(3) -+#define CS_BOOTSTRAP CS_IO(4) -+#define CS_SET_WAKELINE CS_IOW(5, unsigned int) -+#define CS_GET_WAKELINE CS_IOR(6, unsigned int) -+#define CS_SET_RX CS_IOW(7, struct ssi_rx_config) -+#define CS_GET_RX CS_IOW(8, struct ssi_rx_config) -+#define CS_SET_TX CS_IOW(9, struct ssi_tx_config) -+#define CS_GET_TX CS_IOW(10, struct ssi_tx_config) -+ -+#define SSI_MODE_SLEEP 0 -+#define SSI_MODE_STREAM 1 -+#define SSI_MODE_FRAME 2 -+ -+#define SSI_ARBMODE_RR 0 -+#define SSI_ARBMODE_PRIO 1 -+ -+#define WAKE_UP 0 -+#define WAKE_DOWN 1 -+ -+struct ssi_tx_config { -+ u32 mode; -+ u32 frame_size; -+ u32 channels; -+ u32 divisor; -+ u32 arb_mode; -+}; -+ -+struct ssi_rx_config { -+ u32 mode; -+ u32 frame_size; -+ u32 channels; -+ u32 timeout; -+}; -+ -+#endif /* SSI_CHAR_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/swap.h linux-omap-2.6.28-nokia1/include/linux/swap.h ---- linux-omap-2.6.28-omap1/include/linux/swap.h 2011-06-22 13:14:24.453067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/swap.h 2011-06-22 13:19:33.263063268 +0200 -@@ -142,6 +142,21 @@ struct swap_info_struct { - struct swap_extent *curr_swap_extent; - unsigned old_block_size; - unsigned short * swap_map; -+ /* -+ * swap_remap is dual-purpose. The bottome 31 bits contain the -+ * re-mapped page number. The top bit determines if the re-mapped page -+ * itself is in use. e.g. -+ * say swap_remap[5] = 0x00000009 -+ * and swap_remap[9] = 0x80000000 -+ * then page 5 is re-mapped to page 9, which is therefore in use. -+ * Page 9, on the other hand, is not re-mapped. -+ */ -+ unsigned int *swap_remap; -+ spinlock_t remap_lock; /* Protects swap_remap */ -+ struct mutex remap_mutex; /* Protects find_gap() */ -+ unsigned int gap_next; -+ unsigned int gap_end; -+ unsigned int gaps_exist; - unsigned int lowest_bit; - unsigned int highest_bit; - unsigned int cluster_next; -@@ -303,7 +318,7 @@ extern void swap_free(swp_entry_t); - extern void free_swap_and_cache(swp_entry_t); - extern int swap_type_of(dev_t, sector_t, struct block_device **); - extern unsigned int count_swap_pages(int, int); --extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); -+extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t, int); - extern sector_t swapdev_block(int, pgoff_t); - extern struct swap_info_struct *get_swap_info_struct(unsigned); - extern int can_share_swap_page(struct page *); -diff -Nurp linux-omap-2.6.28-omap1/include/linux/tty_ldisc.h linux-omap-2.6.28-nokia1/include/linux/tty_ldisc.h ---- linux-omap-2.6.28-omap1/include/linux/tty_ldisc.h 2011-06-22 13:14:24.453067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/tty_ldisc.h 2011-06-22 13:19:33.263063268 +0200 -@@ -133,7 +133,7 @@ struct tty_ldisc_ops { - /* - * The following routines are called from below. - */ -- void (*receive_buf)(struct tty_struct *, const unsigned char *cp, -+ int (*receive_buf)(struct tty_struct *, const unsigned char *cp, - char *fp, int count); - void (*write_wakeup)(struct tty_struct *); - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/usb/gadget.h linux-omap-2.6.28-nokia1/include/linux/usb/gadget.h ---- linux-omap-2.6.28-omap1/include/linux/usb/gadget.h 2011-06-22 13:14:24.463067660 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/usb/gadget.h 2011-06-22 13:19:33.263063268 +0200 -@@ -484,6 +484,17 @@ struct usb_gadget { - unsigned b_hnp_enable:1; - unsigned a_hnp_support:1; - unsigned a_alt_hnp_support:1; -+ /** UGLY UGLY HACK: Windows problems with multiple -+ * configurations. -+ * -+ * We're adding the next three fields in order to: -+ * (a) keep track when we get a get_config -+ * (b) keep track when we get a set_config -+ * (c) keep track of config index. -+ */ -+ unsigned get_config:1; -+ unsigned set_config:1; -+ unsigned cindex; - const char *name; - struct device dev; - }; -@@ -771,6 +782,7 @@ struct usb_gadget_driver { - int (*setup)(struct usb_gadget *, - const struct usb_ctrlrequest *); - void (*disconnect)(struct usb_gadget *); -+ void (*vbus_disconnect)(struct usb_gadget *); - void (*suspend)(struct usb_gadget *); - void (*resume)(struct usb_gadget *); - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/usb/musb.h linux-omap-2.6.28-nokia1/include/linux/usb/musb.h ---- linux-omap-2.6.28-omap1/include/linux/usb/musb.h 2011-06-22 13:14:24.463067660 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/usb/musb.h 2011-06-22 13:19:33.263063268 +0200 -@@ -46,6 +46,11 @@ struct musb_hdrc_config { - u8 dma_req_chan; /* bitmask for required dma channels */ - u8 ram_bits; /* ram address size */ - -+#ifdef CONFIG_BLACKFIN -+ /* A GPIO controlling VRSEL in Blackfin */ -+ unsigned int gpio_vrsel; -+#endif -+ - struct musb_hdrc_eps_bits *eps_bits; - #ifdef CONFIG_BLACKFIN - /* A GPIO controlling VRSEL in Blackfin */ -@@ -54,6 +59,16 @@ struct musb_hdrc_config { - - }; - -+struct musb_board_data { -+ /* reset the transceiver */ -+ int (*xceiv_reset)(void); -+ -+ int (*xceiv_power)(bool power); -+ -+ /* related to omap3 power management */ -+ void (*set_pm_limits)(struct device *dev, bool set); -+}; -+ - struct musb_hdrc_platform_data { - /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ - u8 mode; -@@ -81,6 +96,9 @@ struct musb_hdrc_platform_data { - - /* MUSB configuration-specific details */ - struct musb_hdrc_config *config; -+ -+ /* MUSB board-specific details */ -+ struct musb_board_data *board; - }; - - -diff -Nurp linux-omap-2.6.28-omap1/include/linux/usb/raw.h linux-omap-2.6.28-nokia1/include/linux/usb/raw.h ---- linux-omap-2.6.28-omap1/include/linux/usb/raw.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/linux/usb/raw.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,50 @@ -+/* -+ * raw.h -- USB Raw Access Header -+ * -+ * Copyright (C) 2009 Nokia Corporation -+ * Contact: Felipe Balbi -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ */ -+ -+#ifndef __LINUX_USB_RAW_H -+#define __LINUX_USB_RAW_H -+ -+#define MAX_REQUEST_LEN (128 * 1024) -+#define MAX_NR_REQUESTS 31 -+ -+#define RAW_FIFO_STATUS 0x01 -+#define RAW_FIFO_FLUSH 0x02 -+#define RAW_CLEAR_HALT 0x03 -+#define RAW_ALLOC_REQUEST 0x10 -+#define RAW_QUEUE_REQUEST 0x11 -+#define RAW_FREE_REQUEST 0x12 -+#define RAW_GET_COMPLETION_MAP 0x13 -+#define RAW_GET_REQUEST_STATUS 0x14 -+#define RAW_STOP_REQUEST 0x15 -+ -+struct raw_request_status { -+ int nr; -+ int status; -+ unsigned int nr_bytes; -+}; -+ -+struct raw_queue_request { -+ int nr; -+ unsigned int nr_bytes; -+}; -+ -+#endif /* __LINUX_USB_RAW_H */ -+ -diff -Nurp linux-omap-2.6.28-omap1/include/linux/videodev2.h linux-omap-2.6.28-nokia1/include/linux/videodev2.h ---- linux-omap-2.6.28-omap1/include/linux/videodev2.h 2011-06-22 13:14:24.473067658 +0200 -+++ linux-omap-2.6.28-nokia1/include/linux/videodev2.h 2011-06-22 13:19:33.263063268 +0200 -@@ -793,6 +793,7 @@ struct v4l2_ext_controls { - #define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ - #define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */ - #define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */ -+#define V4L2_CTRL_CLASS_MODE 0x009b0000 /* Sensor mode information */ - - #define V4L2_CTRL_ID_MASK (0x0fffffff) - #define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL) -@@ -876,8 +877,15 @@ enum v4l2_power_line_frequency { - #define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) - #define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) - #define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) -+#define V4L2_CID_COLORFX (V4L2_CID_BASE+31) -+enum v4l2_colorfx { -+ V4L2_COLORFX_NONE = 0, -+ V4L2_COLORFX_BW = 1, -+ V4L2_COLORFX_SEPIA = 2, -+}; -+ - /* last CID + 1 */ --#define V4L2_CID_LASTP1 (V4L2_CID_BASE+31) -+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+32) - - /* MPEG-class control IDs defined by V4L2 */ - #define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) -@@ -1117,6 +1125,37 @@ enum v4l2_exposure_auto_type { - #define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) - #define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) - -+/* Flash and privacy (indicator) light controls */ -+#define V4L2_CID_FLASH_STROBE (V4L2_CID_CAMERA_CLASS_BASE+13) -+#define V4L2_CID_FLASH_TIMEOUT (V4L2_CID_CAMERA_CLASS_BASE+14) -+#define V4L2_CID_FLASH_INTENSITY (V4L2_CID_CAMERA_CLASS_BASE+15) -+#define V4L2_CID_TORCH_INTENSITY (V4L2_CID_CAMERA_CLASS_BASE+16) -+#define V4L2_CID_INDICATOR_INTENSITY (V4L2_CID_CAMERA_CLASS_BASE+17) -+ -+#define V4L2_CID_TEST_PATTERN (V4L2_CTRL_CLASS_CAMERA | 0x107e) -+ -+/* SMIA-type sensor information */ -+#define V4L2_CID_MODE_CLASS_BASE (V4L2_CTRL_CLASS_MODE | 0x900) -+#define V4L2_CID_MODE_CLASS (V4L2_CTRL_CLASS_MODE | 1) -+#define V4L2_CID_MODE_FRAME_WIDTH (V4L2_CID_MODE_CLASS_BASE+1) -+#define V4L2_CID_MODE_FRAME_HEIGHT (V4L2_CID_MODE_CLASS_BASE+2) -+#define V4L2_CID_MODE_VISIBLE_WIDTH (V4L2_CID_MODE_CLASS_BASE+3) -+#define V4L2_CID_MODE_VISIBLE_HEIGHT (V4L2_CID_MODE_CLASS_BASE+4) -+#define V4L2_CID_MODE_PIXELCLOCK (V4L2_CID_MODE_CLASS_BASE+5) -+#define V4L2_CID_MODE_SENSITIVITY (V4L2_CID_MODE_CLASS_BASE+6) -+ -+/* Control IDs specific to the AD5820 driver as defined by V4L2 */ -+#define V4L2_CID_FOCUS_AD5820_BASE (V4L2_CTRL_CLASS_CAMERA | 0x10af) -+#define V4L2_CID_FOCUS_AD5820_RAMP_TIME (V4L2_CID_FOCUS_AD5820_BASE+0) -+#define V4L2_CID_FOCUS_AD5820_RAMP_MODE (V4L2_CID_FOCUS_AD5820_BASE+1) -+ -+/* Control IDs specific to the ADP1653 flash driver as defined by V4L2 */ -+#define V4L2_CID_FLASH_ADP1653_BASE (V4L2_CTRL_CLASS_CAMERA | 0x10f1) -+#define V4L2_CID_FLASH_ADP1653_FAULT_SCP (V4L2_CID_FLASH_ADP1653_BASE+0) -+#define V4L2_CID_FLASH_ADP1653_FAULT_OT (V4L2_CID_FLASH_ADP1653_BASE+1) -+#define V4L2_CID_FLASH_ADP1653_FAULT_TMR (V4L2_CID_FLASH_ADP1653_BASE+2) -+#define V4L2_CID_FLASH_ADP1653_FAULT_OV (V4L2_CID_FLASH_ADP1653_BASE+3) -+ - /* - * T U N I N G - */ -@@ -1385,6 +1424,14 @@ struct v4l2_chip_ident { - __u32 revision; /* chip revision, chip specific */ - }; - -+/* VIDIOC_ENUM_SLAVES */ -+struct v4l2_slave_info { -+ __u32 index; -+ __u8 driver[16]; -+ __u8 bus_info[32]; -+ __u8 version[16]; -+}; -+ - /* - * I O C T L C O D E S F O R V I D E O D E V I C E S - * -@@ -1468,6 +1515,7 @@ struct v4l2_chip_ident { - #define VIDIOC_G_AUDOUT_OLD _IOWR('V', 49, struct v4l2_audioout) - #define VIDIOC_CROPCAP_OLD _IOR('V', 58, struct v4l2_cropcap) - #endif -+#define VIDIOC_ENUM_SLAVES _IOWR ('V', 82, struct v4l2_slave_info) - - #define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ - -diff -Nurp linux-omap-2.6.28-omap1/include/media/adp1653.h linux-omap-2.6.28-nokia1/include/media/adp1653.h ---- linux-omap-2.6.28-omap1/include/media/adp1653.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/media/adp1653.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,86 @@ -+/* -+ * include/media/adp1653.h -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef ADP1653_H -+#define ADP1653_H -+ -+#include -+#include -+#include -+#include -+ -+#define ADP1653_NAME "adp1653" -+#define ADP1653_I2C_ADDR (0x60 >> 1) -+ -+/* Register definitions */ -+#define ADP1653_REG_OUT_SEL 0x00 -+#define ADP1653_REG_OUT_SEL_HPLED_MAX 0x1f -+#define ADP1653_REG_OUT_SEL_HPLED_SHIFT 3 -+#define ADP1653_REG_OUT_SEL_ILED_MAX 0x07 -+#define ADP1653_REG_OUT_SEL_ILED_SHIFT 0 -+ -+#define ADP1653_REG_CONFIG 0x01 -+#define ADP1653_REG_CONFIG_TMR_CFG (1 << 4) -+#define ADP1653_REG_CONFIG_TMR_SET_MAX 0x0f -+#define ADP1653_REG_CONFIG_TMR_SET_SHIFT 0 -+ -+#define ADP1653_REG_SW_STROBE 0x02 -+#define ADP1653_REG_SW_STROBE_SW_STROBE (1 << 0) -+ -+#define ADP1653_REG_FAULT 0x03 -+#define ADP1653_REG_FAULT_FLT_SCP (1 << 3) -+#define ADP1653_REG_FAULT_FLT_OT (1 << 2) -+#define ADP1653_REG_FAULT_FLT_TMR (1 << 1) -+#define ADP1653_REG_FAULT_FLT_OV (1 << 0) -+ -+#define ADP1653_TORCH_INTENSITY_MAX 11 -+ -+struct adp1653_platform_data { -+ int (*g_priv)(struct v4l2_int_device *s, void *priv); -+ int (*power_on)(struct v4l2_int_device *s); -+ int (*power_off)(struct v4l2_int_device *s); -+ int (*strobe)(struct v4l2_int_device *s); /* If NULL, use SW strobe */ -+ u32 max_flash_timeout; /* flash light timeout in us */ -+ u32 max_flash_intensity; /* led intensity, flash mode */ -+ u32 max_torch_intensity; /* led intensity, torch mode */ -+ u32 max_indicator_intensity; /* indicator led intensity */ -+}; -+ -+struct adp1653_flash { -+ struct i2c_client *i2c_client; -+ struct i2c_driver driver; -+ -+ u32 flash_timeout; -+ u32 flash_intensity; -+ u32 torch_intensity; -+ u32 indicator_intensity; -+ -+ struct v4l2_int_device *v4l2_int_device; -+ -+ struct adp1653_platform_data *platform_data; -+ -+ enum v4l2_power power; /* Requested power state */ -+}; -+ -+#endif /* ADP1653_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/media/ad5820.h linux-omap-2.6.28-nokia1/include/media/ad5820.h ---- linux-omap-2.6.28-omap1/include/media/ad5820.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/media/ad5820.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,56 @@ -+/* -+ * include/media/ad5820.h -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * Copyright (C) 2007 Texas Instruments -+ * -+ * Contact: Tuukka Toivonen -+ * Sakari Ailus -+ * -+ * Based on af_d88.c by Texas Instruments. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#ifndef AD5820_H -+#define AD5820_H -+ -+#include -+ -+#include -+ -+#include -+ -+#define AD5820_NAME "ad5820" -+#define AD5820_I2C_ADDR (0x18 >> 1) -+ -+struct ad5820_platform_data { -+ int (*g_priv)(struct v4l2_int_device *s, void *priv); -+ int (*s_power)(struct v4l2_int_device *s, enum v4l2_power state); -+}; -+ -+struct ad5820_device { -+ /* client->adapter is non-NULL if driver is registered to -+ * I2C subsystem and omap camera driver, otherwise NULL */ -+ struct i2c_client *i2c_client; -+ s32 focus_absolute; /* Current values of V4L2 controls */ -+ s32 focus_ramp_time; -+ s32 focus_ramp_mode; -+ enum v4l2_power power; -+ struct ad5820_platform_data *platform_data; -+ struct v4l2_int_device *v4l2_int_device; -+}; -+ -+#endif /* AD5820_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/media/radio-bcm2048.h linux-omap-2.6.28-nokia1/include/media/radio-bcm2048.h ---- linux-omap-2.6.28-omap1/include/media/radio-bcm2048.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/media/radio-bcm2048.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,30 @@ -+/* -+ * drivers/media/radio/radio-bcm2048.h -+ * -+ * Property and command definitions for bcm2048 radio receiver chip. -+ * -+ * Copyright (C) Nokia Corporation -+ * Contact: Eero Nurkkala -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#ifndef BCM2048_H -+#define BCM2048_H -+ -+#define BCM2048_NAME "bcm2048" -+#define BCM2048_I2C_ADDR 0x22 -+ -+#endif /* ifndef BCM2048_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/media/smiaregs.h linux-omap-2.6.28-nokia1/include/media/smiaregs.h ---- linux-omap-2.6.28-omap1/include/media/smiaregs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/include/media/smiaregs.h 2011-06-22 13:19:33.263063268 +0200 -@@ -0,0 +1,148 @@ -+/* -+ * include/media/smiaregs.h -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Sakari Ailus -+ * Tuukka Toivonen -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ */ -+ -+#ifndef SMIAREGS_H -+#define SMIAREGS_H -+ -+#include -+#include -+#include -+ -+#define SMIA_MAGIC 0x531A0002 -+ -+struct smia_mode { -+ /* Physical sensor resolution and current image window */ -+ __u16 sensor_width; -+ __u16 sensor_height; -+ __u16 sensor_window_origin_x; -+ __u16 sensor_window_origin_y; -+ __u16 sensor_window_width; -+ __u16 sensor_window_height; -+ -+ /* Image data coming from sensor (after scaling) */ -+ __u16 width; -+ __u16 height; -+ __u16 window_origin_x; -+ __u16 window_origin_y; -+ __u16 window_width; -+ __u16 window_height; -+ -+ __u32 pixel_clock; /* in Hz */ -+ __u32 ext_clock; /* in Hz */ -+ struct v4l2_fract timeperframe; -+ __u32 max_exp; /* Maximum exposure value */ -+ __u32 pixel_format; /* V4L2_PIX_FMT_xxx */ -+ __u32 sensitivity; /* 16.16 fixed point */ -+}; -+ -+#define SMIA_REG_8BIT 1 -+#define SMIA_REG_16BIT 2 -+#define SMIA_REG_32BIT 4 -+#define SMIA_REG_DELAY 100 -+#define SMIA_REG_TERM 0xff -+struct smia_reg { -+ u16 type; -+ u16 reg; /* 16-bit offset */ -+ u32 val; /* 8/16/32-bit value */ -+}; -+ -+/* Possible struct smia_reglist types. */ -+#define SMIA_REGLIST_STANDBY 0 -+#define SMIA_REGLIST_POWERON 1 -+#define SMIA_REGLIST_RESUME 2 -+#define SMIA_REGLIST_STREAMON 3 -+#define SMIA_REGLIST_STREAMOFF 4 -+#define SMIA_REGLIST_DISABLED 5 -+ -+#define SMIA_REGLIST_MODE 10 -+ -+#define SMIA_REGLIST_LSC_ENABLE 100 -+#define SMIA_REGLIST_LSC_DISABLE 101 -+#define SMIA_REGLIST_ANR_ENABLE 102 -+#define SMIA_REGLIST_ANR_DISABLE 103 -+ -+struct smia_reglist { -+ u32 type; -+ struct smia_mode mode; -+ struct smia_reg regs[]; -+}; -+ -+#define SMIA_MAX_LEN 32 -+struct smia_meta_reglist { -+ u32 magic; -+ char version[SMIA_MAX_LEN]; -+ /* -+ * When we generate a reglist, the objcopy program will put -+ * here the list of addresses to reglists local to that object -+ * file. -+ * -+ * In the kernel they serve as offsets inside the the register -+ * list binary. -+ * -+ * The list must be NULL-terminated. That is expected by the -+ * drivers. -+ */ -+ union { -+ uintptr_t offset; -+ struct smia_reglist *ptr; -+ } reglist[]; -+}; -+ -+int smia_ctrl_find(struct v4l2_queryctrl *ctrls, size_t nctrls, int id); -+int smia_ctrl_find_next(struct v4l2_queryctrl *ctrls, size_t nctrls, int id); -+int smia_ctrl_query(struct v4l2_queryctrl *ctrls, size_t nctrls, -+ struct v4l2_queryctrl *a); -+int smia_mode_query(const __u32 *ctrls, size_t nctrls, struct v4l2_queryctrl *a); -+int smia_mode_g_ctrl(const __u32 *ctrls, size_t nctrls, struct v4l2_control *vc, -+ const struct smia_mode *sm); -+ -+int smia_reglist_import(struct smia_meta_reglist *meta); -+struct smia_reglist *smia_reglist_find_type(struct smia_meta_reglist *meta, -+ u16 type); -+struct smia_reglist **smia_reglist_first(struct smia_meta_reglist *meta); -+struct smia_reglist *smia_reglist_find_mode_fmt( -+ struct smia_meta_reglist *meta, -+ struct smia_reglist *current_reglist, -+ struct v4l2_format *f); -+struct smia_reglist *smia_reglist_find_mode_streamparm( -+ struct smia_meta_reglist *meta, -+ struct smia_reglist *current_reglist, -+ struct v4l2_streamparm *a); -+int smia_reglist_enum_fmt(struct smia_meta_reglist *meta, -+ struct v4l2_fmtdesc *f); -+int smia_reglist_enum_framesizes(struct smia_meta_reglist *meta, -+ struct v4l2_frmsizeenum *frm); -+int smia_reglist_enum_frameintervals(struct smia_meta_reglist *meta, -+ struct v4l2_frmivalenum *frm); -+ -+int smia_i2c_read_reg(struct i2c_client *client, u16 data_length, -+ u16 reg, u32 *val); -+int smia_i2c_write_reg(struct i2c_client *client, u16 data_length, u16 reg, -+ u32 val); -+int smia_i2c_write_regs(struct i2c_client *client, -+ const struct smia_reg reglist[]); -+int smia_i2c_reglist_find_write(struct i2c_client *client, -+ struct smia_meta_reglist *meta, u16 type); -+ -+#endif -diff -Nurp linux-omap-2.6.28-omap1/include/media/v4l2-int-device.h linux-omap-2.6.28-nokia1/include/media/v4l2-int-device.h ---- linux-omap-2.6.28-omap1/include/media/v4l2-int-device.h 2011-06-22 13:14:24.523067658 +0200 -+++ linux-omap-2.6.28-nokia1/include/media/v4l2-int-device.h 2011-06-22 13:19:33.263063268 +0200 -@@ -84,6 +84,8 @@ struct v4l2_int_device { - void *priv; - }; - -+struct v4l2_int_device *v4l2_int_device_dummy(void); -+ - void v4l2_int_device_try_attach_all(void); - - int v4l2_int_device_register(struct v4l2_int_device *d); -@@ -171,11 +173,13 @@ enum v4l2_int_ioctl_num { - * "Proper" V4L ioctls, as in struct video_device. - * - */ -- vidioc_int_enum_fmt_cap_num = 1, -+ vidioc_int_querycap_num = 1, -+ vidioc_int_enum_fmt_cap_num, - vidioc_int_g_fmt_cap_num, - vidioc_int_s_fmt_cap_num, - vidioc_int_try_fmt_cap_num, - vidioc_int_queryctrl_num, -+ vidioc_int_querymenu_num, - vidioc_int_g_ctrl_num, - vidioc_int_s_ctrl_num, - vidioc_int_cropcap_num, -@@ -206,6 +210,7 @@ enum v4l2_int_ioctl_num { - vidioc_int_g_needs_reset_num, - vidioc_int_enum_framesizes_num, - vidioc_int_enum_frameintervals_num, -+ vidioc_int_enum_slaves_num, - - /* - * -@@ -272,11 +277,13 @@ enum v4l2_int_ioctl_num { - return desc; \ - } - -+V4L2_INT_WRAPPER_1(querycap, struct v4l2_capability, *); - V4L2_INT_WRAPPER_1(enum_fmt_cap, struct v4l2_fmtdesc, *); - V4L2_INT_WRAPPER_1(g_fmt_cap, struct v4l2_format, *); - V4L2_INT_WRAPPER_1(s_fmt_cap, struct v4l2_format, *); - V4L2_INT_WRAPPER_1(try_fmt_cap, struct v4l2_format, *); - V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *); -+V4L2_INT_WRAPPER_1(querymenu, struct v4l2_querymenu, *); - V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *); - V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *); - V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *); -@@ -293,6 +300,7 @@ V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2 - V4L2_INT_WRAPPER_1(g_needs_reset, void, *); - V4L2_INT_WRAPPER_1(enum_framesizes, struct v4l2_frmsizeenum, *); - V4L2_INT_WRAPPER_1(enum_frameintervals, struct v4l2_frmivalenum, *); -+V4L2_INT_WRAPPER_1(enum_slaves, struct v4l2_slave_info, *); - - V4L2_INT_WRAPPER_0(reset); - V4L2_INT_WRAPPER_0(init); -diff -Nurp linux-omap-2.6.28-omap1/include/net/bluetooth/bluetooth.h linux-omap-2.6.28-nokia1/include/net/bluetooth/bluetooth.h ---- linux-omap-2.6.28-omap1/include/net/bluetooth/bluetooth.h 2011-06-22 13:14:24.533067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/bluetooth/bluetooth.h 2011-06-22 13:19:33.263063268 +0200 -@@ -53,9 +53,20 @@ - #define SOL_SCO 17 - #define SOL_RFCOMM 18 - -+#define BT_SECURITY 4 -+struct bt_security { -+ __u8 level; -+}; -+#define BT_SECURITY_SDP 0 -+#define BT_SECURITY_LOW 1 -+#define BT_SECURITY_MEDIUM 2 -+#define BT_SECURITY_HIGH 3 -+ -+#define BT_DEFER_SETUP 7 -+ - #define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg) --#define BT_DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n" , __func__ , ## arg) --#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg) -+#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg) -+#define BT_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg) - - /* Connection and socket states */ - enum { -@@ -108,6 +119,7 @@ struct bt_sock { - bdaddr_t dst; - struct list_head accept_q; - struct sock *parent; -+ u32 defer_setup; - }; - - struct bt_sock_list { -diff -Nurp linux-omap-2.6.28-omap1/include/net/bluetooth/hci_core.h linux-omap-2.6.28-nokia1/include/net/bluetooth/hci_core.h ---- linux-omap-2.6.28-omap1/include/net/bluetooth/hci_core.h 2011-06-22 13:14:24.533067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/bluetooth/hci_core.h 2011-06-22 13:19:33.263063268 +0200 -@@ -169,7 +169,9 @@ struct hci_conn { - __u16 link_policy; - __u32 link_mode; - __u8 auth_type; -+ __u8 sec_level; - __u8 power_save; -+ __u16 disc_timeout; - unsigned long pend; - - unsigned int sent; -@@ -179,7 +181,8 @@ struct hci_conn { - struct timer_list disc_timer; - struct timer_list idle_timer; - -- struct work_struct work; -+ struct work_struct work_add; -+ struct work_struct work_del; - - struct device dev; - -@@ -325,12 +328,11 @@ int hci_conn_del(struct hci_conn *conn); - void hci_conn_hash_flush(struct hci_dev *hdev); - void hci_conn_check_pending(struct hci_dev *hdev); - --struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type); -+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type); - int hci_conn_check_link_mode(struct hci_conn *conn); --int hci_conn_auth(struct hci_conn *conn); --int hci_conn_encrypt(struct hci_conn *conn); -+int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); - int hci_conn_change_link_key(struct hci_conn *conn); --int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); -+int hci_conn_switch_role(struct hci_conn *conn, __u8 role); - - void hci_conn_enter_active_mode(struct hci_conn *conn); - void hci_conn_enter_sniff_mode(struct hci_conn *conn); -@@ -348,13 +350,16 @@ static inline void hci_conn_put(struct h - if (conn->type == ACL_LINK) { - del_timer(&conn->idle_timer); - if (conn->state == BT_CONNECTED) { -- timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT); -+ timeo = msecs_to_jiffies(conn->disc_timeout); - if (!conn->out) -- timeo *= 5; -+ timeo *= 2; - } else - timeo = msecs_to_jiffies(10); - } else - timeo = msecs_to_jiffies(10); -+ -+ if (test_bit(HCI_DUT_MODE, &conn->hdev->flags) && !conn->out) -+ return; - mod_timer(&conn->disc_timer, jiffies + timeo); - } - } -@@ -455,6 +460,7 @@ int hci_recv_fragment(struct hci_dev *hd - - int hci_register_sysfs(struct hci_dev *hdev); - void hci_unregister_sysfs(struct hci_dev *hdev); -+void hci_conn_init_sysfs(struct hci_conn *conn); - void hci_conn_add_sysfs(struct hci_conn *conn); - void hci_conn_del_sysfs(struct hci_conn *conn); - -@@ -470,26 +476,26 @@ void hci_conn_del_sysfs(struct hci_conn - - /* ----- HCI protocols ----- */ - struct hci_proto { -- char *name; -+ char *name; - unsigned int id; - unsigned long flags; - - void *priv; - -- int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type); -+ int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type); - int (*connect_cfm) (struct hci_conn *conn, __u8 status); -- int (*disconn_ind) (struct hci_conn *conn, __u8 reason); -+ int (*disconn_ind) (struct hci_conn *conn); -+ int (*disconn_cfm) (struct hci_conn *conn, __u8 reason); - int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags); - int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); -- int (*auth_cfm) (struct hci_conn *conn, __u8 status); -- int (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); -+ int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); - }; - - static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) - { - register struct hci_proto *hp; - int mask = 0; -- -+ - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->connect_ind) - mask |= hp->connect_ind(hdev, bdaddr, type); -@@ -514,30 +520,52 @@ static inline void hci_proto_connect_cfm - hp->connect_cfm(conn, status); - } - --static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason) -+static inline int hci_proto_disconn_ind(struct hci_conn *conn) - { - register struct hci_proto *hp; -+ int reason = 0x13; - - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->disconn_ind) -- hp->disconn_ind(conn, reason); -+ reason = hp->disconn_ind(conn); - - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->disconn_ind) -- hp->disconn_ind(conn, reason); -+ reason = hp->disconn_ind(conn); -+ -+ return reason; -+} -+ -+static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) -+{ -+ register struct hci_proto *hp; -+ -+ hp = hci_proto[HCI_PROTO_L2CAP]; -+ if (hp && hp->disconn_cfm) -+ hp->disconn_cfm(conn, reason); -+ -+ hp = hci_proto[HCI_PROTO_SCO]; -+ if (hp && hp->disconn_cfm) -+ hp->disconn_cfm(conn, reason); - } - - static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) - { - register struct hci_proto *hp; -+ __u8 encrypt; -+ -+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) -+ return; -+ -+ encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; - - hp = hci_proto[HCI_PROTO_L2CAP]; -- if (hp && hp->auth_cfm) -- hp->auth_cfm(conn, status); -+ if (hp && hp->security_cfm) -+ hp->security_cfm(conn, status, encrypt); - - hp = hci_proto[HCI_PROTO_SCO]; -- if (hp && hp->auth_cfm) -- hp->auth_cfm(conn, status); -+ if (hp && hp->security_cfm) -+ hp->security_cfm(conn, status, encrypt); - } - - static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) -@@ -545,12 +573,12 @@ static inline void hci_proto_encrypt_cfm - register struct hci_proto *hp; - - hp = hci_proto[HCI_PROTO_L2CAP]; -- if (hp && hp->encrypt_cfm) -- hp->encrypt_cfm(conn, status, encrypt); -+ if (hp && hp->security_cfm) -+ hp->security_cfm(conn, status, encrypt); - - hp = hci_proto[HCI_PROTO_SCO]; -- if (hp && hp->encrypt_cfm) -- hp->encrypt_cfm(conn, status, encrypt); -+ if (hp && hp->security_cfm) -+ hp->security_cfm(conn, status, encrypt); - } - - int hci_register_proto(struct hci_proto *hproto); -@@ -562,8 +590,7 @@ struct hci_cb { - - char *name; - -- void (*auth_cfm) (struct hci_conn *conn, __u8 status); -- void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); -+ void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); - void (*key_change_cfm) (struct hci_conn *conn, __u8 status); - void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); - }; -@@ -571,14 +598,20 @@ struct hci_cb { - static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) - { - struct list_head *p; -+ __u8 encrypt; - - hci_proto_auth_cfm(conn, status); - -+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) -+ return; -+ -+ encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; -+ - read_lock_bh(&hci_cb_list_lock); - list_for_each(p, &hci_cb_list) { - struct hci_cb *cb = list_entry(p, struct hci_cb, list); -- if (cb->auth_cfm) -- cb->auth_cfm(conn, status); -+ if (cb->security_cfm) -+ cb->security_cfm(conn, status, encrypt); - } - read_unlock_bh(&hci_cb_list_lock); - } -@@ -587,13 +620,16 @@ static inline void hci_encrypt_cfm(struc - { - struct list_head *p; - -+ if (conn->sec_level == BT_SECURITY_SDP) -+ conn->sec_level = BT_SECURITY_LOW; -+ - hci_proto_encrypt_cfm(conn, status, encrypt); - - read_lock_bh(&hci_cb_list_lock); - list_for_each(p, &hci_cb_list) { - struct hci_cb *cb = list_entry(p, struct hci_cb, list); -- if (cb->encrypt_cfm) -- cb->encrypt_cfm(conn, status, encrypt); -+ if (cb->security_cfm) -+ cb->security_cfm(conn, status, encrypt); - } - read_unlock_bh(&hci_cb_list_lock); - } -diff -Nurp linux-omap-2.6.28-omap1/include/net/bluetooth/hci.h linux-omap-2.6.28-nokia1/include/net/bluetooth/hci.h ---- linux-omap-2.6.28-omap1/include/net/bluetooth/hci.h 2011-06-22 13:14:24.533067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/bluetooth/hci.h 2011-06-22 13:19:33.263063268 +0200 -@@ -54,7 +54,7 @@ - - /* HCI device quirks */ - enum { -- HCI_QUIRK_RESET_ON_INIT, -+ HCI_QUIRK_NO_RESET, - HCI_QUIRK_RAW_DEVICE, - HCI_QUIRK_FIXUP_BUFFER_SIZE - }; -@@ -72,6 +72,8 @@ enum { - HCI_INQUIRY, - - HCI_RAW, -+ -+ HCI_DUT_MODE, - }; - - /* HCI ioctl defines */ -@@ -101,6 +103,7 @@ enum { - /* HCI timeouts */ - #define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ - #define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ -+#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */ - #define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ - #define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ - -@@ -133,8 +136,13 @@ enum { - #define ESCO_EV3 0x0008 - #define ESCO_EV4 0x0010 - #define ESCO_EV5 0x0020 -+#define ESCO_2EV3 0x0040 -+#define ESCO_3EV3 0x0080 -+#define ESCO_2EV5 0x0100 -+#define ESCO_3EV5 0x0200 - - #define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3) -+#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5) - - /* ACL flags */ - #define ACL_CONT 0x01 -@@ -176,6 +184,9 @@ enum { - #define LMP_EV5 0x02 - - #define LMP_SNIFF_SUBR 0x02 -+#define LMP_EDR_ESCO_2M 0x20 -+#define LMP_EDR_ESCO_3M 0x40 -+#define LMP_EDR_3S_ESCO 0x80 - - #define LMP_SIMPLE_PAIR 0x08 - -@@ -577,6 +588,11 @@ struct hci_rp_read_bd_addr { - bdaddr_t bdaddr; - } __attribute__ ((packed)); - -+#define HCI_OP_ENABLE_DUT_MODE 0x1803 -+struct hci_rp_enable_dut_mode { -+ __u8 status; -+} __attribute__ ((packed)); -+ - /* ---- HCI Events ---- */ - #define HCI_EV_INQUIRY_COMPLETE 0x01 - -diff -Nurp linux-omap-2.6.28-omap1/include/net/bluetooth/l2cap.h linux-omap-2.6.28-nokia1/include/net/bluetooth/l2cap.h ---- linux-omap-2.6.28-omap1/include/net/bluetooth/l2cap.h 2011-06-22 13:14:24.533067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/bluetooth/l2cap.h 2011-06-22 13:19:33.263063268 +0200 -@@ -37,6 +37,7 @@ struct sockaddr_l2 { - sa_family_t l2_family; - __le16 l2_psm; - bdaddr_t l2_bdaddr; -+ __le16 l2_cid; - }; - - /* L2CAP socket options */ -@@ -185,6 +186,7 @@ struct l2cap_info_rsp { - /* info type */ - #define L2CAP_IT_CL_MTU 0x0001 - #define L2CAP_IT_FEAT_MASK 0x0002 -+#define L2CAP_IT_FIXED_CHAN 0x0003 - - /* info result */ - #define L2CAP_IR_SUCCESS 0x0000 -@@ -219,11 +221,14 @@ struct l2cap_conn { - __u8 rx_ident; - __u8 tx_ident; - -+ __u8 disc_reason; -+ - struct l2cap_chan_list chan_list; - }; - - #define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 --#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x02 -+#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04 -+#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08 - - /* ----- L2CAP channel and socket info ----- */ - #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) -@@ -237,8 +242,9 @@ struct l2cap_pinfo { - __u16 imtu; - __u16 omtu; - __u16 flush_to; -- -- __u32 link_mode; -+ __u8 sec_level; -+ __u8 role_switch; -+ __u8 force_reliable; - - __u8 conf_req[64]; - __u8 conf_len; -@@ -257,6 +263,7 @@ struct l2cap_pinfo { - #define L2CAP_CONF_REQ_SENT 0x01 - #define L2CAP_CONF_INPUT_DONE 0x02 - #define L2CAP_CONF_OUTPUT_DONE 0x04 -+#define L2CAP_CONF_CONNECT_PEND 0x80 - - #define L2CAP_CONF_MAX_RETRIES 2 - -diff -Nurp linux-omap-2.6.28-omap1/include/net/bluetooth/rfcomm.h linux-omap-2.6.28-nokia1/include/net/bluetooth/rfcomm.h ---- linux-omap-2.6.28-omap1/include/net/bluetooth/rfcomm.h 2011-06-22 13:14:24.533067659 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/bluetooth/rfcomm.h 2011-06-22 13:19:33.263063268 +0200 -@@ -29,6 +29,7 @@ - #define RFCOMM_CONN_TIMEOUT (HZ * 30) - #define RFCOMM_DISC_TIMEOUT (HZ * 20) - #define RFCOMM_AUTH_TIMEOUT (HZ * 25) -+#define RFCOMM_IDLE_TIMEOUT (HZ * 2) - - #define RFCOMM_DEFAULT_MTU 127 - #define RFCOMM_DEFAULT_CREDITS 7 -@@ -154,6 +155,7 @@ struct rfcomm_msc { - struct rfcomm_session { - struct list_head list; - struct socket *sock; -+ struct timer_list timer; - unsigned long state; - unsigned long flags; - atomic_t refcnt; -@@ -183,8 +185,9 @@ struct rfcomm_dlc { - u8 remote_v24_sig; - u8 mscex; - u8 out; -- -- u32 link_mode; -+ u8 sec_level; -+ u8 role_switch; -+ u32 defer_setup; - - uint mtu; - uint cfc; -@@ -202,10 +205,12 @@ struct rfcomm_dlc { - #define RFCOMM_RX_THROTTLED 0 - #define RFCOMM_TX_THROTTLED 1 - #define RFCOMM_TIMED_OUT 2 --#define RFCOMM_MSC_PENDING 3 --#define RFCOMM_AUTH_PENDING 4 --#define RFCOMM_AUTH_ACCEPT 5 --#define RFCOMM_AUTH_REJECT 6 -+#define RFCOMM_MSC_PENDING 3 -+#define RFCOMM_SEC_PENDING 4 -+#define RFCOMM_AUTH_PENDING 5 -+#define RFCOMM_AUTH_ACCEPT 6 -+#define RFCOMM_AUTH_REJECT 7 -+#define RFCOMM_DEFER_SETUP 8 - - /* Scheduling flags and events */ - #define RFCOMM_SCHED_STATE 0 -@@ -239,6 +244,7 @@ int rfcomm_dlc_close(struct rfcomm_dlc - int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb); - int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig); - int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig); -+void rfcomm_dlc_accept(struct rfcomm_dlc *d); - - #define rfcomm_dlc_lock(d) spin_lock(&d->lock) - #define rfcomm_dlc_unlock(d) spin_unlock(&d->lock) -@@ -304,7 +310,8 @@ struct rfcomm_pinfo { - struct bt_sock bt; - struct rfcomm_dlc *dlc; - u8 channel; -- u32 link_mode; -+ u8 sec_level; -+ u8 role_switch; - }; - - int rfcomm_init_sockets(void); -@@ -333,7 +340,6 @@ struct rfcomm_dev_req { - bdaddr_t src; - bdaddr_t dst; - u8 channel; -- - }; - - struct rfcomm_dev_info { -diff -Nurp linux-omap-2.6.28-omap1/include/net/mac80211.h linux-omap-2.6.28-nokia1/include/net/mac80211.h ---- linux-omap-2.6.28-omap1/include/net/mac80211.h 2011-06-22 13:14:24.583067657 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/mac80211.h 2011-06-22 13:19:33.263063268 +0200 -@@ -172,8 +172,12 @@ enum ieee80211_bss_change { - * @assoc: association status - * @aid: association ID number, valid only when @assoc is true - * @use_cts_prot: use CTS protection -- * @use_short_preamble: use 802.11b short preamble -- * @use_short_slot: use short slot time (only relevant for ERP) -+ * @use_short_preamble: use 802.11b short preamble; -+ * if the hardware cannot handle this it must set the -+ * IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag -+ * @use_short_slot: use short slot time (only relevant for ERP); -+ * if the hardware cannot handle this it must set the -+ * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag - * @dtim_period: num of beacons before the next DTIM, for PSM - * @timestamp: beacon timestamp - * @beacon_int: beacon interval -@@ -434,23 +438,23 @@ struct ieee80211_rx_status { - * - * Flags to define PHY configuration options - * -- * @IEEE80211_CONF_SHORT_SLOT_TIME: use 802.11g short slot time - * @IEEE80211_CONF_RADIOTAP: add radiotap header at receive time (if supported) - * @IEEE80211_CONF_SUPPORT_HT_MODE: use 802.11n HT capabilities (if supported) - * @IEEE80211_CONF_PS: Enable 802.11 power save mode - */ - enum ieee80211_conf_flags { -- /* -- * TODO: IEEE80211_CONF_SHORT_SLOT_TIME will be removed once drivers -- * have been converted to use bss_info_changed() for slot time -- * configuration -- */ -- IEEE80211_CONF_SHORT_SLOT_TIME = (1<<0), -- IEEE80211_CONF_RADIOTAP = (1<<1), -- IEEE80211_CONF_SUPPORT_HT_MODE = (1<<2), -- IEEE80211_CONF_PS = (1<<3), -+ IEEE80211_CONF_RADIOTAP = (1<<0), -+ IEEE80211_CONF_SUPPORT_HT_MODE = (1<<1), -+ IEEE80211_CONF_PS = (1<<2), - }; - -+/* XXX: remove all this once drivers stop trying to use it */ -+static inline int __deprecated __IEEE80211_CONF_SHORT_SLOT_TIME(void) -+{ -+ return 0; -+} -+#define IEEE80211_CONF_SHORT_SLOT_TIME (__IEEE80211_CONF_SHORT_SLOT_TIME()) -+ - /** - * struct ieee80211_conf - configuration of the device - * -@@ -769,6 +773,14 @@ enum ieee80211_tkip_key_type { - * @IEEE80211_HW_SPECTRUM_MGMT: - * Hardware supports spectrum management defined in 802.11h - * Measurement, Channel Switch, Quieting, TPC -+ * -+ * @IEEE80211_HW_NO_STACK_DYNAMIC_PS: -+ * Hardware which has dynamic power save support, meaning -+ * that power save is enabled in idle periods, and don't need support -+ * from stack. -+ * @IEEE80211_HW_BEACON_FILTER: -+ * Hardware supports dropping of irrelevant beacon frames to -+ * avoid waking up cpu. - */ - enum ieee80211_hw_flags { - IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, -@@ -780,6 +792,9 @@ enum ieee80211_hw_flags { - IEEE80211_HW_SIGNAL_DBM = 1<<7, - IEEE80211_HW_NOISE_DBM = 1<<8, - IEEE80211_HW_SPECTRUM_MGMT = 1<<9, -+ IEEE80211_HW_AMPDU_AGGREGATION = 1<<10, -+ IEEE80211_HW_NO_STACK_DYNAMIC_PS = 1<<11, -+ IEEE80211_HW_BEACON_FILTER = 1<<14, - }; - - /** -@@ -972,6 +987,24 @@ ieee80211_get_alt_retry_rate(const struc - */ - - /** -+ * DOC: Beacon filter support -+ * -+ * Some hardware have beacon filter support to reduce host cpu wakeups -+ * which will reduce system power consumption. It usuallly works so that -+ * the firmware creates a checksum of the beacon but omits all constantly -+ * changing elements (TSF, TIM etc). Whenever the checksum changes the -+ * beacon is forwarded to the host, otherwise it will be just dropped. That -+ * way the host will only receive beacons where some relevant information -+ * (for example ERP protection or WMM settings) have changed. -+ * -+ * Beacon filter support is informed with %IEEE80211_HW_BEACON_FILTER flag. -+ * The driver needs to enable beacon filter support whenever power save is -+ * enabled, that is %IEEE80211_CONF_PS is set. When power save is enabled, -+ * the stack will not check for beacon miss at all and the driver needs to -+ * notify about complete loss of beacons with ieee80211_beacon_loss(). -+ */ -+ -+/** - * DOC: Frame filtering - * - * mac80211 requires to see many management frames for proper -@@ -1801,6 +1834,16 @@ void ieee80211_stop_tx_ba_cb_irqsafe(str - struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw, - const u8 *addr); - -+/** -+ * ieee80211_beacon_loss - inform hardware does not receive beacons -+ * -+ * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. -+ * -+ * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and -+ * IEEE80211_CONF_PS is set, the driver needs to inform whenever the -+ * hardware is not receiving beacons with this function. -+ */ -+void ieee80211_beacon_loss(struct ieee80211_vif *vif); - - /* Rate control API */ - /** -@@ -1869,4 +1912,12 @@ rate_lowest_index(struct ieee80211_suppo - int ieee80211_rate_control_register(struct rate_control_ops *ops); - void ieee80211_rate_control_unregister(struct rate_control_ops *ops); - -+enum ieee80211_rssi_state { -+ IEEE80211_RSSI_STATE_HIGH, -+ IEEE80211_RSSI_STATE_LOW, -+}; -+ -+void ieee80211_rssi_changed(struct ieee80211_vif *vif, -+ enum ieee80211_rssi_state state); -+ - #endif /* MAC80211_H */ -diff -Nurp linux-omap-2.6.28-omap1/include/net/neighbour.h linux-omap-2.6.28-nokia1/include/net/neighbour.h ---- linux-omap-2.6.28-omap1/include/net/neighbour.h 2011-06-22 13:14:24.583067657 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/neighbour.h 2011-06-22 13:19:33.263063268 +0200 -@@ -24,6 +24,7 @@ - - #include - #include -+#include - #include - - /* -@@ -167,7 +168,7 @@ struct neigh_table - int gc_thresh2; - int gc_thresh3; - unsigned long last_flush; -- struct timer_list gc_timer; -+ struct delayed_work gc_work; - struct timer_list proxy_timer; - struct sk_buff_head proxy_queue; - atomic_t entries; -@@ -178,7 +179,6 @@ struct neigh_table - struct neighbour **hash_buckets; - unsigned int hash_mask; - __u32 hash_rnd; -- unsigned int hash_chain_gc; - struct pneigh_entry **phash_buckets; - #ifdef CONFIG_PROC_FS - struct proc_dir_entry *pde; -diff -Nurp linux-omap-2.6.28-omap1/include/net/phonet/pep.h linux-omap-2.6.28-nokia1/include/net/phonet/pep.h ---- linux-omap-2.6.28-omap1/include/net/phonet/pep.h 2011-06-22 13:14:24.603067658 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/phonet/pep.h 2011-06-22 13:19:33.263063268 +0200 -@@ -35,12 +35,12 @@ struct pep_sock { - struct sock *listener; - struct sk_buff_head ctrlreq_queue; - #define PNPIPE_CTRLREQ_MAX 10 -+ atomic_t tx_credits; - int ifindex; - u16 peer_type; /* peer type/subtype */ - u8 pipe_handle; - - u8 rx_credits; -- u8 tx_credits; - u8 rx_fc; /* RX flow control */ - u8 tx_fc; /* TX flow control */ - u8 init_enable; /* auto-enable at creation */ -diff -Nurp linux-omap-2.6.28-omap1/include/net/phonet/phonet.h linux-omap-2.6.28-nokia1/include/net/phonet/phonet.h ---- linux-omap-2.6.28-omap1/include/net/phonet/phonet.h 2011-06-22 13:14:24.603067658 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/phonet/phonet.h 2011-06-22 13:19:33.263063268 +0200 -@@ -46,7 +46,7 @@ static inline struct pn_sock *pn_sk(stru - - extern const struct proto_ops phonet_dgram_ops; - --struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *sa); -+struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa); - void phonet_get_local_port_range(int *min, int *max); - void pn_sock_hash(struct sock *sk); - void pn_sock_unhash(struct sock *sk); -@@ -105,7 +105,6 @@ void phonet_proto_unregister(int protoco - - int phonet_sysctl_init(void); - void phonet_sysctl_exit(void); --void phonet_netlink_register(void); - int isi_register(void); - void isi_unregister(void); - -diff -Nurp linux-omap-2.6.28-omap1/include/net/phonet/pn_dev.h linux-omap-2.6.28-nokia1/include/net/phonet/pn_dev.h ---- linux-omap-2.6.28-omap1/include/net/phonet/pn_dev.h 2011-06-22 13:14:24.603067658 +0200 -+++ linux-omap-2.6.28-nokia1/include/net/phonet/pn_dev.h 2011-06-22 13:19:33.263063268 +0200 -@@ -28,7 +28,7 @@ struct phonet_device_list { - spinlock_t lock; - }; - --extern struct phonet_device_list pndevs; -+struct phonet_device_list *phonet_device_list(struct net *net); - - struct phonet_device { - struct list_head list; -@@ -36,14 +36,15 @@ struct phonet_device { - DECLARE_BITMAP(addrs, 64); - }; - --void phonet_device_init(void); -+int phonet_device_init(void); - void phonet_device_exit(void); -+int phonet_netlink_register(void); - struct net_device *phonet_device_get(struct net *net); - - int phonet_address_add(struct net_device *dev, u8 addr); - int phonet_address_del(struct net_device *dev, u8 addr); - u8 phonet_address_get(struct net_device *dev, u8 addr); --int phonet_address_lookup(u8 addr); -+int phonet_address_lookup(struct net *net, u8 addr); - - #define PN_NO_ADDR 0xff - -diff -Nurp linux-omap-2.6.28-omap1/include/sound/jack.h linux-omap-2.6.28-nokia1/include/sound/jack.h ---- linux-omap-2.6.28-omap1/include/sound/jack.h 2011-06-22 13:14:24.623067656 +0200 -+++ linux-omap-2.6.28-nokia1/include/sound/jack.h 2011-06-22 13:19:33.263063268 +0200 -@@ -30,11 +30,18 @@ struct input_dev; - /** - * Jack types which can be reported. These values are used as a - * bitmask. -+ * -+ * Note that this must be kept in sync with the lookup table in -+ * sound/core/jack.c. - */ - enum snd_jack_types { - SND_JACK_HEADPHONE = 0x0001, - SND_JACK_MICROPHONE = 0x0002, - SND_JACK_HEADSET = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE, -+ SND_JACK_LINEOUT = 0x0004, -+ SND_JACK_MECHANICAL = 0x0008, /* If detected separately */ -+ SND_JACK_VIDEOOUT = 0x0010, -+ SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT, - }; - - struct snd_jack { -diff -Nurp linux-omap-2.6.28-omap1/kernel/cgroup.c linux-omap-2.6.28-nokia1/kernel/cgroup.c ---- linux-omap-2.6.28-omap1/kernel/cgroup.c 2011-06-22 13:14:24.773067654 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/cgroup.c 2011-06-22 13:19:33.263063268 +0200 -@@ -2942,7 +2942,11 @@ int cgroup_clone(struct task_struct *tsk - parent = task_cgroup(tsk, subsys->subsys_id); - - /* Pin the hierarchy */ -- atomic_inc(&parent->root->sb->s_active); -+ if (!atomic_inc_not_zero(&parent->root->sb->s_active)) { -+ /* We race with the final deactivate_super() */ -+ mutex_unlock(&cgroup_mutex); -+ return 0; -+ } - - /* Keep the cgroup alive */ - get_css_set(cg); -diff -Nurp linux-omap-2.6.28-omap1/kernel/hrtimer.c linux-omap-2.6.28-nokia1/kernel/hrtimer.c ---- linux-omap-2.6.28-omap1/kernel/hrtimer.c 2011-06-22 13:14:24.803067654 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/hrtimer.c 2011-06-22 13:19:33.263063268 +0200 -@@ -333,6 +333,8 @@ ktime_t ktime_add_safe(const ktime_t lhs - return res; - } - -+EXPORT_SYMBOL_GPL(ktime_add_safe); -+ - #ifdef CONFIG_DEBUG_OBJECTS_TIMERS - - static struct debug_obj_descr hrtimer_debug_descr; -diff -Nurp linux-omap-2.6.28-omap1/kernel/panic.c linux-omap-2.6.28-nokia1/kernel/panic.c ---- linux-omap-2.6.28-omap1/kernel/panic.c 2011-06-22 13:14:24.893067652 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/panic.c 2011-06-22 13:19:33.263063268 +0200 -@@ -73,7 +73,6 @@ NORET_TYPE void panic(const char * fmt, - vsnprintf(buf, sizeof(buf), fmt, args); - va_end(args); - printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); -- bust_spinlocks(0); - - /* - * If we have crashed and we have a crash kernel loaded let it handle -@@ -93,6 +92,8 @@ NORET_TYPE void panic(const char * fmt, - - atomic_notifier_call_chain(&panic_notifier_list, 0, buf); - -+ bust_spinlocks(0); -+ - if (!panic_blink) - panic_blink = no_blink; - -diff -Nurp linux-omap-2.6.28-omap1/kernel/posix-timers.c linux-omap-2.6.28-nokia1/kernel/posix-timers.c ---- linux-omap-2.6.28-omap1/kernel/posix-timers.c 2011-06-22 13:14:24.893067652 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/posix-timers.c 2011-06-22 13:19:33.263063268 +0200 -@@ -202,6 +202,12 @@ static int no_timer_create(struct k_itim - return -EOPNOTSUPP; - } - -+static int no_nsleep(const clockid_t which_clock, int flags, -+ struct timespec *tsave, struct timespec __user *rmtp) -+{ -+ return -EOPNOTSUPP; -+} -+ - /* - * Return nonzero if we know a priori this clockid_t value is bogus. - */ -@@ -254,6 +260,7 @@ static __init int init_posix_timers(void - .clock_get = posix_get_monotonic_raw, - .clock_set = do_posix_clock_nosettime, - .timer_create = no_timer_create, -+ .nsleep = no_nsleep, - }; - - register_posix_clock(CLOCK_REALTIME, &clock_realtime); -diff -Nurp linux-omap-2.6.28-omap1/kernel/time/clockevents.c linux-omap-2.6.28-nokia1/kernel/time/clockevents.c ---- linux-omap-2.6.28-omap1/kernel/time/clockevents.c 2011-06-22 13:14:25.153067649 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/time/clockevents.c 2011-06-22 13:19:33.273063268 +0200 -@@ -36,7 +36,7 @@ static DEFINE_SPINLOCK(clockevents_lock) - * - * Math helper, returns latch value converted to nanoseconds (bound checked) - */ --unsigned long clockevent_delta2ns(unsigned long latch, -+unsigned long long clockevent_delta2ns(unsigned long latch, - struct clock_event_device *evt) - { - u64 clc = ((u64) latch << evt->shift); -@@ -49,10 +49,10 @@ unsigned long clockevent_delta2ns(unsign - do_div(clc, evt->mult); - if (clc < 1000) - clc = 1000; -- if (clc > LONG_MAX) -- clc = LONG_MAX; -+ if (clc > LLONG_MAX) -+ clc = LLONG_MAX; - -- return (unsigned long) clc; -+ return (unsigned long long) clc; - } - - /** -diff -Nurp linux-omap-2.6.28-omap1/kernel/time/tick-sched.c linux-omap-2.6.28-nokia1/kernel/time/tick-sched.c ---- linux-omap-2.6.28-omap1/kernel/time/tick-sched.c 2011-06-22 13:14:25.153067649 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/time/tick-sched.c 2011-06-22 13:19:33.273063268 +0200 -@@ -222,6 +222,15 @@ void tick_nohz_stop_sched_tick(int inidl - - cpu = smp_processor_id(); - ts = &per_cpu(tick_cpu_sched, cpu); -+ -+ /* -+ * Call to tick_nohz_start_idle stops the last_update_time from being -+ * updated. Thus, it must not be called in the event we are called from -+ * irq_exit() with the prior state different than idle. -+ */ -+ if (!inidle && !ts->inidle) -+ goto end; -+ - now = tick_nohz_start_idle(ts); - - /* -@@ -239,9 +248,6 @@ void tick_nohz_stop_sched_tick(int inidl - if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) - goto end; - -- if (!inidle && !ts->inidle) -- goto end; -- - ts->inidle = 1; - - if (need_resched()) -@@ -282,8 +288,31 @@ void tick_nohz_stop_sched_tick(int inidl - /* Schedule the tick, if we are at least one jiffie off */ - if ((long)delta_jiffies >= 1) { - -+ /* -+ * calculate the expiry time for the next timer wheel -+ * timer -+ */ -+ expires = ktime_add_ns(last_update, tick_period.tv64 * -+ delta_jiffies); -+ -+ /* -+ * If this cpu is the one which updates jiffies, then -+ * give up the assignment and let it be taken by the -+ * cpu which runs the tick timer next, which might be -+ * this cpu as well. If we don't drop this here the -+ * jiffies might be stale and do_timer() never -+ * invoked. -+ */ -+ if (cpu == tick_do_timer_cpu) -+ tick_do_timer_cpu = TICK_DO_TIMER_NONE; -+ - if (delta_jiffies > 1) - cpu_set(cpu, nohz_cpu_mask); -+ -+ /* Skip reprogram of event if its not changed */ -+ if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) -+ goto out; -+ - /* - * nohz_stop_sched_tick can be called several times before - * the nohz_restart_sched_tick is called. This happens when -@@ -306,17 +335,6 @@ void tick_nohz_stop_sched_tick(int inidl - rcu_enter_nohz(); - } - -- /* -- * If this cpu is the one which updates jiffies, then -- * give up the assignment and let it be taken by the -- * cpu which runs the tick timer next, which might be -- * this cpu as well. If we don't drop this here the -- * jiffies might be stale and do_timer() never -- * invoked. -- */ -- if (cpu == tick_do_timer_cpu) -- tick_do_timer_cpu = TICK_DO_TIMER_NONE; -- - ts->idle_sleeps++; - - /* -@@ -332,12 +350,7 @@ void tick_nohz_stop_sched_tick(int inidl - goto out; - } - -- /* -- * calculate the expiry time for the next timer wheel -- * timer -- */ -- expires = ktime_add_ns(last_update, tick_period.tv64 * -- delta_jiffies); -+ /* Mark expiries */ - ts->idle_expires = expires; - - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { -diff -Nurp linux-omap-2.6.28-omap1/kernel/time/timer_list.c linux-omap-2.6.28-nokia1/kernel/time/timer_list.c ---- linux-omap-2.6.28-omap1/kernel/time/timer_list.c 2011-06-22 13:14:25.153067649 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/time/timer_list.c 2011-06-22 13:19:33.273063268 +0200 -@@ -204,7 +204,7 @@ print_tickdevice(struct seq_file *m, str - return; - } - SEQ_printf(m, "%s\n", dev->name); -- SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns); -+ SEQ_printf(m, " max_delta_ns: %llu\n", dev->max_delta_ns); - SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns); - SEQ_printf(m, " mult: %lu\n", dev->mult); - SEQ_printf(m, " shift: %d\n", dev->shift); -diff -Nurp linux-omap-2.6.28-omap1/kernel/timer.c linux-omap-2.6.28-nokia1/kernel/timer.c ---- linux-omap-2.6.28-omap1/kernel/timer.c 2011-06-22 13:14:25.153067649 +0200 -+++ linux-omap-2.6.28-nokia1/kernel/timer.c 2011-06-22 13:19:33.273063268 +0200 -@@ -930,6 +930,9 @@ cascade: - index = slot = timer_jiffies & TVN_MASK; - do { - list_for_each_entry(nte, varp->vec + slot, entry) { -+ if (tbase_get_deferrable(nte->base)) -+ continue; -+ - found = 1; - if (time_before(nte->expires, expires)) - expires = nte->expires; -diff -Nurp linux-omap-2.6.28-omap1/lib/bust_spinlocks.c linux-omap-2.6.28-nokia1/lib/bust_spinlocks.c ---- linux-omap-2.6.28-omap1/lib/bust_spinlocks.c 2011-06-22 13:14:25.263067647 +0200 -+++ linux-omap-2.6.28-nokia1/lib/bust_spinlocks.c 2011-06-22 13:19:33.273063268 +0200 -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - - - void __attribute__((weak)) bust_spinlocks(int yes) -@@ -22,6 +23,7 @@ void __attribute__((weak)) bust_spinlock - #ifdef CONFIG_VT - unblank_screen(); - #endif -+ console_unblank(); - if (--oops_in_progress == 0) - wake_up_klogd(); - } -diff -Nurp linux-omap-2.6.28-omap1/lib/Kconfig.debug linux-omap-2.6.28-nokia1/lib/Kconfig.debug ---- linux-omap-2.6.28-omap1/lib/Kconfig.debug 2011-06-22 13:14:25.263067647 +0200 -+++ linux-omap-2.6.28-nokia1/lib/Kconfig.debug 2011-06-22 13:19:33.273063268 +0200 -@@ -731,6 +731,12 @@ config FAULT_INJECTION_DEBUG_FS - help - Enable configuration of fault-injection capabilities via debugfs. - -+config PANIC_INFO_BUFF -+ tristate "Buffer to be printed at panic" -+ depends on DEBUG_FS -+ help -+ Provide a small buffer which will be printed at panic. -+ - config FAULT_INJECTION_STACKTRACE_FILTER - bool "stacktrace filter for fault-injection capabilities" - depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT -diff -Nurp linux-omap-2.6.28-omap1/lib/lzo/lzo1x_9x.c linux-omap-2.6.28-nokia1/lib/lzo/lzo1x_9x.c ---- linux-omap-2.6.28-omap1/lib/lzo/lzo1x_9x.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/lib/lzo/lzo1x_9x.c 2011-06-22 13:19:33.273063268 +0200 -@@ -0,0 +1,1272 @@ -+/* lzo1x_9x.c -- implementation of the LZO1X-999 compression algorithm -+ -+ This file is part of the LZO real-time data compression library. -+ -+ Copyright (C) 1996-2002 Markus Franz Xaver Johannes Oberhumer -+ All Rights Reserved. -+ -+ The LZO library is free software; you can redistribute it and/or -+ modify it under the terms of version 2 of the GNU General Public -+ License as published by the Free Software Foundation. -+ -+ The LZO library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with the LZO library; see the file COPYING. -+ If not, write to the Free Software Foundation, Inc., -+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -+ -+ Markus F.X.J. Oberhumer -+ -+ */ -+ -+#include -+#include -+ -+typedef unsigned int lzo_uint32; -+typedef int lzo_int32; -+typedef unsigned int lzo_uint; -+typedef int lzo_int; -+typedef int lzo_bool; -+ -+typedef void ( *lzo_progress_callback_t) (unsigned, unsigned); -+ -+typedef struct -+{ -+ int init; -+ -+ lzo_uint look; -+ -+ lzo_uint m_len; -+ lzo_uint m_off; -+ -+ lzo_uint last_m_len; -+ lzo_uint last_m_off; -+ -+ const unsigned char *bp; -+ const unsigned char *ip; -+ const unsigned char *in; -+ const unsigned char *in_end; -+ unsigned char *out; -+ -+ lzo_progress_callback_t cb; -+ -+ lzo_uint textsize; -+ lzo_uint codesize; -+ lzo_uint printcount; -+ -+ -+ unsigned long lit_bytes; -+ unsigned long match_bytes; -+ unsigned long rep_bytes; -+ unsigned long lazy; -+ -+ lzo_uint r1_lit; -+ lzo_uint r1_m_len; -+ -+ unsigned long m1a_m, m1b_m, m2_m, m3_m, m4_m; -+ unsigned long lit1_r, lit2_r, lit3_r; -+} -+lzo1x_999_t; -+ -+typedef unsigned short swd_uint; -+ -+typedef struct -+{ -+ lzo_uint n; -+ lzo_uint f; -+ lzo_uint threshold; -+ -+ lzo_uint max_chain; -+ lzo_uint nice_length; -+ lzo_bool use_best_off; -+ lzo_uint lazy_insert; -+ -+ lzo_uint m_len; -+ lzo_uint m_off; -+ lzo_uint look; -+ int b_char; -+ -+ lzo_uint best_off[ (((8) >= (33) ? ((8) >= (9) ? (8) : (9)) : ((33) >= (9) ? (33) : (9))) + 1) ]; -+ -+ lzo1x_999_t *c; -+ lzo_uint m_pos; -+ -+ lzo_uint best_pos[ (((8) >= (33) ? ((8) >= (9) ? (8) : (9)) : ((33) >= (9) ? (33) : (9))) + 1) ]; -+ -+ const unsigned char *dict; -+ const unsigned char *dict_end; -+ lzo_uint dict_len; -+ -+ lzo_uint ip; -+ lzo_uint bp; -+ lzo_uint rp; -+ lzo_uint b_size; -+ -+ unsigned char *b_wrap; -+ -+ lzo_uint node_count; -+ lzo_uint first_rp; -+ -+ unsigned char b [ 0xbfff + 2048 + 2048 ]; -+ swd_uint head3 [ 16384 ]; -+ swd_uint succ3 [ 0xbfff + 2048 ]; -+ swd_uint best3 [ 0xbfff + 2048 ]; -+ swd_uint llen3 [ 16384 ]; -+ -+ swd_uint head2 [ 65536L ]; -+} -+lzo1x_999_swd_t; -+ -+static -+void swd_initdict(lzo1x_999_swd_t *s, const unsigned char *dict, lzo_uint dict_len) -+{ -+ s->dict = s->dict_end = ((void *)0); -+ s->dict_len = 0; -+ -+ if (!dict || dict_len <= 0) -+ return; -+ if (dict_len > s->n) -+ { -+ dict += dict_len - s->n; -+ dict_len = s->n; -+ } -+ -+ s->dict = dict; -+ s->dict_len = dict_len; -+ s->dict_end = dict + dict_len; -+ memcpy(s->b,dict,dict_len); -+ s->ip = dict_len; -+} -+ -+ -+static -+void swd_insertdict(lzo1x_999_swd_t *s, lzo_uint node, lzo_uint len) -+{ -+ lzo_uint key; -+ -+ s->node_count = s->n - len; -+ s->first_rp = node; -+ -+ while (len-- > 0) -+ { -+ key = (((0x9f5f*(((((lzo_uint32)s->b[node]<<5)^s->b[node+1])<<5)^s->b[node+2]))>>5) & (16384 -1)); -+ s->succ3[node] = s->head3[key]; -+ s->head3[key] = ((swd_uint)(node)); -+ s->best3[node] = ((swd_uint)(s->f + 1)); -+ s->llen3[key]++; -+ ((void) (0)); -+ -+ -+ key = (* (unsigned short *) &(s->b[node])); -+ s->head2[key] = ((swd_uint)(node)); -+ -+ -+ node++; -+ } -+} -+ -+ -+ -+ -+ -+ -+static -+int swd_init(lzo1x_999_swd_t *s, const unsigned char *dict, lzo_uint dict_len) -+{ -+ lzo_uint i = 0; -+ int c = 0; -+ -+ s->n = 0xbfff; -+ s->f = 2048; -+ s->threshold = 1; -+ -+ -+ s->max_chain = 2048; -+ s->nice_length = 2048; -+ s->use_best_off = 0; -+ s->lazy_insert = 0; -+ -+ s->b_size = s->n + s->f; -+ if (2 * s->f >= s->n || s->b_size + s->f >= (32767 * 2 + 1)) -+ return (-1); -+ s->b_wrap = s->b + s->b_size; -+ s->node_count = s->n; -+ -+ memset(s->llen3, 0, sizeof(s->llen3[0]) * 16384); -+ -+ -+ memset(s->head2, 0xff, sizeof(s->head2[0]) * 65536L); -+ ((void) (0)); -+ -+ -+ -+ -+ -+ -+ s->ip = 0; -+ swd_initdict(s,dict,dict_len); -+ s->bp = s->ip; -+ s->first_rp = s->ip; -+ -+ ((void) (0)); -+ -+ s->look = (lzo_uint) (s->c->in_end - s->c->ip); -+ if (s->look > 0) -+ { -+ if (s->look > s->f) -+ s->look = s->f; -+ memcpy(&s->b[s->ip],s->c->ip,s->look); -+ s->c->ip += s->look; -+ s->ip += s->look; -+ } -+ -+ if (s->ip == s->b_size) -+ s->ip = 0; -+ -+ if (s->look >= 2 && s->dict_len > 0) -+ swd_insertdict(s,0,s->dict_len); -+ -+ s->rp = s->first_rp; -+ if (s->rp >= s->node_count) -+ s->rp -= s->node_count; -+ else -+ s->rp += s->b_size - s->node_count; -+ -+ ((void)&i); -+ ((void)&c); -+ return 0; -+} -+ -+ -+static -+void swd_exit(lzo1x_999_swd_t *s) -+{ -+ -+ ((void)&s); -+ -+} -+ -+static __inline__ -+void swd_getbyte(lzo1x_999_swd_t *s) -+{ -+ int c; -+ -+ if ((c = ((*(s->c)).ip < (*(s->c)).in_end ? *((*(s->c)).ip)++ : (-1))) < 0) -+ { -+ if (s->look > 0) -+ --s->look; -+ -+ -+ -+ -+ -+ -+ } -+ else -+ { -+ s->b[s->ip] = ((unsigned char) ((c) & 0xff)); -+ if (s->ip < s->f) -+ s->b_wrap[s->ip] = ((unsigned char) ((c) & 0xff)); -+ } -+ if (++s->ip == s->b_size) -+ s->ip = 0; -+ if (++s->bp == s->b_size) -+ s->bp = 0; -+ if (++s->rp == s->b_size) -+ s->rp = 0; -+} -+ -+ -+ -+ -+ -+ -+static __inline__ -+void swd_remove_node(lzo1x_999_swd_t *s, lzo_uint node) -+{ -+ if (s->node_count == 0) -+ { -+ lzo_uint key; -+ -+ key = (((0x9f5f*(((((lzo_uint32)s->b[node]<<5)^s->b[node+1])<<5)^s->b[node+2]))>>5) & (16384 -1)); -+ ((void) (0)); -+ --s->llen3[key]; -+ -+ -+ key = (* (unsigned short *) &(s->b[node])); -+ ((void) (0)); -+ if ((lzo_uint) s->head2[key] == node) -+ s->head2[key] = (32767 * 2 + 1); -+ -+ } -+ else -+ --s->node_count; -+} -+ -+ -+ -+ -+ -+ -+static -+void swd_accept(lzo1x_999_swd_t *s, lzo_uint n) -+{ -+ ((void) (0)); -+ -+ while (n--) -+ { -+ lzo_uint key; -+ -+ swd_remove_node(s,s->rp); -+ -+ -+ key = (((0x9f5f*(((((lzo_uint32)s->b[s->bp]<<5)^s->b[s->bp+1])<<5)^s->b[s->bp+2]))>>5) & (16384 -1)); -+ s->succ3[s->bp] = s->head3[key]; -+ s->head3[key] = ((swd_uint)(s->bp)); -+ s->best3[s->bp] = ((swd_uint)(s->f + 1)); -+ s->llen3[key]++; -+ ((void) (0)); -+ -+ -+ -+ key = (* (unsigned short *) &(s->b[s->bp])); -+ s->head2[key] = ((swd_uint)(s->bp)); -+ -+ -+ swd_getbyte(s); -+ } -+} -+ -+ -+ -+ -+ -+ -+static -+void swd_search(lzo1x_999_swd_t *s, lzo_uint node, lzo_uint cnt) -+{ -+ -+ -+ -+ -+ -+ const unsigned char *p1; -+ const unsigned char *p2; -+ const unsigned char *px; -+ -+ lzo_uint m_len = s->m_len; -+ const unsigned char * b = s->b; -+ const unsigned char * bp = s->b + s->bp; -+ const unsigned char * bx = s->b + s->bp + s->look; -+ unsigned char scan_end1; -+ -+ ((void) (0)); -+ -+ scan_end1 = bp[m_len - 1]; -+ for ( ; cnt-- > 0; node = s->succ3[node]) -+ { -+ p1 = bp; -+ p2 = b + node; -+ px = bx; -+ -+ ((void) (0)); -+ -+ if ( -+ -+ p2[m_len - 1] == scan_end1 && -+ p2[m_len] == p1[m_len] && -+ -+ p2[0] == p1[0] && -+ p2[1] == p1[1]) -+ { -+ lzo_uint i; -+ ((void) (0)); -+ -+ p1 += 2; p2 += 2; -+ do {} while (++p1 < px && *p1 == *++p2); -+ -+ i = p1 - bp; -+ -+ -+ -+ -+ -+ -+ -+ ((void) (0)); -+ -+ -+ if (i < (((8) >= (33) ? ((8) >= (9) ? (8) : (9)) : ((33) >= (9) ? (33) : (9))) + 1)) -+ { -+ if (s->best_pos[i] == 0) -+ s->best_pos[i] = node + 1; -+ } -+ -+ if (i > m_len) -+ { -+ s->m_len = m_len = i; -+ s->m_pos = node; -+ if (m_len == s->look) -+ return; -+ if (m_len >= s->nice_length) -+ return; -+ if (m_len > (lzo_uint) s->best3[node]) -+ return; -+ scan_end1 = bp[m_len - 1]; -+ } -+ } -+ } -+} -+ -+static -+lzo_bool swd_search2(lzo1x_999_swd_t *s) -+{ -+ lzo_uint key; -+ -+ ((void) (0)); -+ ((void) (0)); -+ -+ key = s->head2[ (* (unsigned short *) &(s->b[s->bp])) ]; -+ if (key == (32767 * 2 + 1)) -+ return 0; -+ -+ -+ -+ -+ -+ ((void) (0)); -+ -+ if (s->best_pos[2] == 0) -+ s->best_pos[2] = key + 1; -+ -+ -+ if (s->m_len < 2) -+ { -+ s->m_len = 2; -+ s->m_pos = key; -+ } -+ return 1; -+} -+ -+static -+void swd_findbest(lzo1x_999_swd_t *s) -+{ -+ lzo_uint key; -+ lzo_uint cnt, node; -+ lzo_uint len; -+ -+ ((void) (0)); -+ -+ -+ key = (((0x9f5f*(((((lzo_uint32)s->b[s->bp]<<5)^s->b[s->bp+1])<<5)^s->b[s->bp+2]))>>5) & (16384 -1)); -+ node = s->succ3[s->bp] = s->head3[key]; -+ cnt = s->llen3[key]++; -+ ((void) (0)); -+ if (cnt > s->max_chain && s->max_chain > 0) -+ cnt = s->max_chain; -+ s->head3[key] = ((swd_uint)(s->bp)); -+ -+ s->b_char = s->b[s->bp]; -+ len = s->m_len; -+ if (s->m_len >= s->look) -+ { -+ if (s->look == 0) -+ s->b_char = -1; -+ s->m_off = 0; -+ s->best3[s->bp] = ((swd_uint)(s->f + 1)); -+ } -+ else -+ { -+ -+ if (swd_search2(s)) -+ -+ if (s->look >= 3) -+ swd_search(s,node,cnt); -+ if (s->m_len > len) -+ s->m_off = (s->bp > (s->m_pos) ? s->bp - (s->m_pos) : s->b_size - ((s->m_pos) - s->bp)); -+ s->best3[s->bp] = ((swd_uint)(s->m_len)); -+ -+ -+ if (s->use_best_off) -+ { -+ int i; -+ for (i = 2; i < (((8) >= (33) ? ((8) >= (9) ? (8) : (9)) : ((33) >= (9) ? (33) : (9))) + 1); i++) -+ if (s->best_pos[i] > 0) -+ s->best_off[i] = (s->bp > (s->best_pos[i]-1) ? s->bp - (s->best_pos[i]-1) : s->b_size - ((s->best_pos[i]-1) - s->bp)); -+ else -+ s->best_off[i] = 0; -+ } -+ -+ } -+ -+ swd_remove_node(s,s->rp); -+ -+ -+ -+ key = (* (unsigned short *) &(s->b[s->bp])); -+ s->head2[key] = ((swd_uint)(s->bp)); -+ -+} -+ -+ -+ -+ -+ -+ -+ -+ -+static int -+init_match ( lzo1x_999_t *c, lzo1x_999_swd_t *s, -+ const unsigned char *dict, lzo_uint dict_len, -+ lzo_uint32 flags ) -+{ -+ int r; -+ -+ ((void) (0)); -+ c->init = 1; -+ -+ s->c = c; -+ -+ c->last_m_len = c->last_m_off = 0; -+ -+ c->textsize = c->codesize = c->printcount = 0; -+ c->lit_bytes = c->match_bytes = c->rep_bytes = 0; -+ c->lazy = 0; -+ -+ r = swd_init(s,dict,dict_len); -+ if (r != 0) -+ return r; -+ -+ s->use_best_off = (flags & 1) ? 1 : 0; -+ return r; -+} -+ -+ -+ -+ -+ -+ -+static int -+find_match ( lzo1x_999_t *c, lzo1x_999_swd_t *s, -+ lzo_uint this_len, lzo_uint skip ) -+{ -+ ((void) (0)); -+ -+ if (skip > 0) -+ { -+ ((void) (0)); -+ swd_accept(s, this_len - skip); -+ c->textsize += this_len - skip + 1; -+ } -+ else -+ { -+ ((void) (0)); -+ c->textsize += this_len - skip; -+ } -+ -+ s->m_len = 1; -+ s->m_len = 1; -+ -+ if (s->use_best_off) -+ memset(s->best_pos,0,sizeof(s->best_pos)); -+ -+ swd_findbest(s); -+ c->m_len = s->m_len; -+ c->m_off = s->m_off; -+ -+ swd_getbyte(s); -+ -+ if (s->b_char < 0) -+ { -+ c->look = 0; -+ c->m_len = 0; -+ swd_exit(s); -+ } -+ else -+ { -+ c->look = s->look + 1; -+ } -+ c->bp = c->ip - c->look; -+ -+ if (c->cb && c->textsize > c->printcount) -+ { -+ (*c->cb)(c->textsize,c->codesize); -+ c->printcount += 1024; -+ } -+ -+ return 0; -+} -+ -+ -+ -+ -+static int -+lzo1x_999_compress_internal ( const unsigned char *in , lzo_uint in_len, -+ unsigned char *out, lzo_uint * out_len, -+ void * wrkmem, -+ const unsigned char *dict, lzo_uint dict_len, -+ lzo_progress_callback_t cb, -+ int try_lazy, -+ lzo_uint good_length, -+ lzo_uint max_lazy, -+ lzo_uint nice_length, -+ lzo_uint max_chain, -+ lzo_uint32 flags ); -+ -+ -+ -+ -+ -+ -+static unsigned char * -+code_match ( lzo1x_999_t *c, unsigned char *op, lzo_uint m_len, lzo_uint m_off ) -+{ -+ lzo_uint x_len = m_len; -+ lzo_uint x_off = m_off; -+ -+ c->match_bytes += m_len; -+ -+ ((void) (0)); -+ if (m_len == 2) -+ { -+ ((void) (0)); -+ ((void) (0)); ((void) (0)); -+ m_off -= 1; -+ -+ -+ -+ -+ *op++ = ((unsigned char) ((0 | ((m_off & 3) << 2)) & 0xff)); -+ *op++ = ((unsigned char) ((m_off >> 2) & 0xff)); -+ -+ c->m1a_m++; -+ } -+ -+ -+ -+ else if (m_len <= 8 && m_off <= 0x0800) -+ -+ { -+ ((void) (0)); -+ -+ m_off -= 1; -+ *op++ = ((unsigned char) ((((m_len - 1) << 5) | ((m_off & 7) << 2)) & 0xff)); -+ *op++ = ((unsigned char) ((m_off >> 3) & 0xff)); -+ ((void) (0)); -+ -+ c->m2_m++; -+ } -+ else if (m_len == 3 && m_off <= (0x0400 + 0x0800) && c->r1_lit >= 4) -+ { -+ ((void) (0)); -+ ((void) (0)); -+ m_off -= 1 + 0x0800; -+ -+ -+ -+ -+ *op++ = ((unsigned char) ((0 | ((m_off & 3) << 2)) & 0xff)); -+ *op++ = ((unsigned char) ((m_off >> 2) & 0xff)); -+ -+ c->m1b_m++; -+ } -+ else if (m_off <= 0x4000) -+ { -+ ((void) (0)); -+ m_off -= 1; -+ if (m_len <= 33) -+ *op++ = ((unsigned char) ((32 | (m_len - 2)) & 0xff)); -+ else -+ { -+ m_len -= 33; -+ *op++ = 32 | 0; -+ while (m_len > 255) -+ { -+ m_len -= 255; -+ *op++ = 0; -+ } -+ ((void) (0)); -+ *op++ = ((unsigned char) ((m_len) & 0xff)); -+ } -+ -+ -+ -+ -+ *op++ = ((unsigned char) ((m_off << 2) & 0xff)); -+ *op++ = ((unsigned char) ((m_off >> 6) & 0xff)); -+ -+ c->m3_m++; -+ } -+ else -+ { -+ lzo_uint k; -+ -+ ((void) (0)); -+ ((void) (0)); ((void) (0)); -+ m_off -= 0x4000; -+ k = (m_off & 0x4000) >> 11; -+ if (m_len <= 9) -+ *op++ = ((unsigned char) ((16 | k | (m_len - 2)) & 0xff)); -+ else -+ { -+ m_len -= 9; -+ *op++ = ((unsigned char) ((16 | k | 0) & 0xff)); -+ while (m_len > 255) -+ { -+ m_len -= 255; -+ *op++ = 0; -+ } -+ ((void) (0)); -+ *op++ = ((unsigned char) ((m_len) & 0xff)); -+ } -+ -+ -+ -+ -+ *op++ = ((unsigned char) ((m_off << 2) & 0xff)); -+ *op++ = ((unsigned char) ((m_off >> 6) & 0xff)); -+ -+ c->m4_m++; -+ } -+ -+ c->last_m_len = x_len; -+ c->last_m_off = x_off; -+ return op; -+} -+ -+ -+static unsigned char * -+STORE_RUN ( lzo1x_999_t *c, unsigned char *op, const unsigned char *ii, lzo_uint t ) -+{ -+ c->lit_bytes += t; -+ -+ if (op == c->out && t <= 238) -+ { -+ *op++ = ((unsigned char) ((17 + t) & 0xff)); -+ } -+ else if (t <= 3) -+ { -+ -+ -+ -+ op[-2] |= ((unsigned char) ((t) & 0xff)); -+ -+ c->lit1_r++; -+ } -+ else if (t <= 18) -+ { -+ *op++ = ((unsigned char) ((t - 3) & 0xff)); -+ c->lit2_r++; -+ } -+ else -+ { -+ lzo_uint tt = t - 18; -+ -+ *op++ = 0; -+ while (tt > 255) -+ { -+ tt -= 255; -+ *op++ = 0; -+ } -+ ((void) (0)); -+ *op++ = ((unsigned char) ((tt) & 0xff)); -+ c->lit3_r++; -+ } -+ do *op++ = *ii++; while (--t > 0); -+ -+ return op; -+} -+ -+ -+static unsigned char * -+code_run ( lzo1x_999_t *c, unsigned char *op, const unsigned char *ii, -+ lzo_uint lit, lzo_uint m_len ) -+{ -+ if (lit > 0) -+ { -+ ((void) (0)); -+ op = STORE_RUN(c,op,ii,lit); -+ c->r1_m_len = m_len; -+ c->r1_lit = lit; -+ } -+ else -+ { -+ ((void) (0)); -+ c->r1_m_len = 0; -+ c->r1_lit = 0; -+ } -+ -+ return op; -+} -+ -+ -+ -+ -+ -+ -+static int -+len_of_coded_match ( lzo_uint m_len, lzo_uint m_off, lzo_uint lit ) -+{ -+ int n = 4; -+ -+ if (m_len < 2) -+ return -1; -+ if (m_len == 2) -+ return (m_off <= 0x0400 && lit > 0 && lit < 4) ? 2 : -1; -+ if (m_len <= 8 && m_off <= 0x0800) -+ return 2; -+ if (m_len == 3 && m_off <= (0x0400 + 0x0800) && lit >= 4) -+ return 2; -+ if (m_off <= 0x4000) -+ { -+ if (m_len <= 33) -+ return 3; -+ m_len -= 33; -+ while (m_len > 255) -+ { -+ m_len -= 255; -+ n++; -+ } -+ return n; -+ } -+ if (m_off <= 0xbfff) -+ { -+ if (m_len <= 9) -+ return 3; -+ m_len -= 9; -+ while (m_len > 255) -+ { -+ m_len -= 255; -+ n++; -+ } -+ return n; -+ } -+ return -1; -+} -+ -+ -+static lzo_int -+min_gain(lzo_uint ahead, lzo_uint lit1, lzo_uint lit2, int l1, int l2, int l3) -+{ -+ lzo_int lazy_match_min_gain = 0; -+ -+ ((void) (0)); -+ lazy_match_min_gain += ahead; -+ -+ -+ -+ -+ -+ -+ if (lit1 <= 3) -+ lazy_match_min_gain += (lit2 <= 3) ? 0 : 2; -+ else if (lit1 <= 18) -+ lazy_match_min_gain += (lit2 <= 18) ? 0 : 1; -+ -+ lazy_match_min_gain += (l2 - l1) * 2; -+ if (l3 > 0) -+ lazy_match_min_gain -= (ahead - l3) * 2; -+ -+ if (lazy_match_min_gain < 0) -+ lazy_match_min_gain = 0; -+ -+ -+ -+ -+ -+ -+ -+ return lazy_match_min_gain; -+} -+ -+static void -+better_match ( const lzo1x_999_swd_t *swd, lzo_uint *m_len, lzo_uint *m_off ) -+{ -+ -+ -+ -+ -+ if (*m_len <= 3) -+ return; -+ -+ if (*m_off <= 0x0800) -+ return; -+ -+ -+ -+ if (*m_off > 0x0800 && -+ *m_len >= 3 + 1 && *m_len <= 8 + 1 && -+ swd->best_off[*m_len-1] && swd->best_off[*m_len-1] <= 0x0800) -+ { -+ *m_len = *m_len - 1; -+ *m_off = swd->best_off[*m_len]; -+ return; -+ } -+ -+ -+ -+ -+ if (*m_off > 0x4000 && -+ *m_len >= 9 + 1 && *m_len <= 8 + 2 && -+ swd->best_off[*m_len-2] && swd->best_off[*m_len-2] <= 0x0800) -+ { -+ *m_len = *m_len - 2; -+ *m_off = swd->best_off[*m_len]; -+ return; -+ } -+ -+ -+ -+ -+ if (*m_off > 0x4000 && -+ *m_len >= 9 + 1 && *m_len <= 33 + 1 && -+ swd->best_off[*m_len-1] && swd->best_off[*m_len-1] <= 0x4000) -+ { -+ *m_len = *m_len - 1; -+ *m_off = swd->best_off[*m_len]; -+ } -+ -+} -+ -+ int -+lzo1x_999_compress_internal ( const unsigned char *in , lzo_uint in_len, -+ unsigned char *out, lzo_uint * out_len, -+ void * wrkmem, -+ const unsigned char *dict, lzo_uint dict_len, -+ lzo_progress_callback_t cb, -+ int try_lazy, -+ lzo_uint good_length, -+ lzo_uint max_lazy, -+ lzo_uint nice_length, -+ lzo_uint max_chain, -+ lzo_uint32 flags ) -+{ -+ unsigned char *op; -+ const unsigned char *ii; -+ lzo_uint lit; -+ lzo_uint m_len, m_off; -+ lzo1x_999_t cc; -+ lzo1x_999_t * const c = &cc; -+ lzo1x_999_swd_t * const swd = (lzo1x_999_swd_t *) wrkmem; -+ int r; -+ -+ -+ -+ -+ -+ -+ -+ if (!(((lzo_uint32) (14 * 16384L * sizeof(short))) >= ((lzo_uint) (sizeof(lzo1x_999_swd_t))))) -+ return (-1); -+ -+ -+ -+ if (try_lazy < 0) -+ try_lazy = 1; -+ -+ if (good_length <= 0) -+ good_length = 32; -+ -+ if (max_lazy <= 0) -+ max_lazy = 32; -+ -+ if (nice_length <= 0) -+ nice_length = 0; -+ -+ if (max_chain <= 0) -+ max_chain = 2048; -+ -+ c->init = 0; -+ c->ip = c->in = in; -+ c->in_end = in + in_len; -+ c->out = out; -+ c->cb = cb; -+ c->m1a_m = c->m1b_m = c->m2_m = c->m3_m = c->m4_m = 0; -+ c->lit1_r = c->lit2_r = c->lit3_r = 0; -+ -+ op = out; -+ ii = c->ip; -+ lit = 0; -+ c->r1_lit = c->r1_m_len = 0; -+ -+ r = init_match(c,swd,dict,dict_len,flags); -+ if (r != 0) -+ return r; -+ if (max_chain > 0) -+ swd->max_chain = max_chain; -+ if (nice_length > 0) -+ swd->nice_length = nice_length; -+ -+ r = find_match(c,swd,0,0); -+ if (r != 0) -+ return r; -+ while (c->look > 0) -+ { -+ lzo_uint ahead; -+ lzo_uint max_ahead; -+ int l1, l2, l3; -+ -+ c->codesize = op - out; -+ -+ m_len = c->m_len; -+ m_off = c->m_off; -+ -+ ((void) (0)); -+ ((void) (0)); -+ if (lit == 0) -+ ii = c->bp; -+ ((void) (0)); -+ ((void) (0)); -+ -+ if ( m_len < 2 || -+ (m_len == 2 && (m_off > 0x0400 || lit == 0 || lit >= 4)) || -+ -+ -+ -+ -+ -+ (m_len == 2 && op == out) || -+ -+ (op == out && lit == 0)) -+ { -+ -+ m_len = 0; -+ } -+ else if (m_len == 3) -+ { -+ -+ if (m_off > (0x0400 + 0x0800) && lit >= 4) -+ m_len = 0; -+ } -+ -+ if (m_len == 0) -+ { -+ -+ lit++; -+ swd->max_chain = max_chain; -+ r = find_match(c,swd,1,0); -+ ((void) (0)); -+ continue; -+ } -+ -+ -+ -+ if (swd->use_best_off) -+ better_match(swd,&m_len,&m_off); -+ -+ ((void)0); -+ -+ -+ -+ -+ ahead = 0; -+ if (try_lazy <= 0 || m_len >= max_lazy) -+ { -+ -+ l1 = 0; -+ max_ahead = 0; -+ } -+ else -+ { -+ -+ l1 = len_of_coded_match(m_len,m_off,lit); -+ ((void) (0)); -+ -+ max_ahead = ((try_lazy) <= (l1 - 1) ? (try_lazy) : (l1 - 1)); -+ -+ -+ -+ } -+ -+ -+ while (ahead < max_ahead && c->look > m_len) -+ { -+ lzo_int lazy_match_min_gain; -+ -+ if (m_len >= good_length) -+ swd->max_chain = max_chain >> 2; -+ else -+ swd->max_chain = max_chain; -+ r = find_match(c,swd,1,0); -+ ahead++; -+ -+ ((void) (0)); -+ ((void) (0)); -+ ((void) (0)); -+ -+ -+ -+ -+ -+ -+ if (c->m_len < m_len) -+ continue; -+ -+ if (c->m_len == m_len && c->m_off >= m_off) -+ continue; -+ -+ -+ if (swd->use_best_off) -+ better_match(swd,&c->m_len,&c->m_off); -+ -+ l2 = len_of_coded_match(c->m_len,c->m_off,lit+ahead); -+ if (l2 < 0) -+ continue; -+ -+ l3 = (op == out) ? -1 : len_of_coded_match(ahead,m_off,lit); -+ -+ -+ -+ -+ lazy_match_min_gain = min_gain(ahead,lit,lit+ahead,l1,l2,l3); -+ if (c->m_len >= m_len + lazy_match_min_gain) -+ { -+ c->lazy++; -+ ((void)0); -+ -+ if (l3 > 0) -+ { -+ -+ op = code_run(c,op,ii,lit,ahead); -+ lit = 0; -+ -+ op = code_match(c,op,ahead,m_off); -+ } -+ else -+ { -+ lit += ahead; -+ ((void) (0)); -+ } -+ goto lazy_match_done; -+ } -+ } -+ -+ -+ ((void) (0)); -+ -+ -+ op = code_run(c,op,ii,lit,m_len); -+ lit = 0; -+ -+ -+ op = code_match(c,op,m_len,m_off); -+ swd->max_chain = max_chain; -+ r = find_match(c,swd,m_len,1+ahead); -+ ((void) (0)); -+ -+lazy_match_done: ; -+ } -+ -+ -+ -+ if (lit > 0) -+ op = STORE_RUN(c,op,ii,lit); -+ -+ -+ *op++ = 16 | 1; -+ *op++ = 0; -+ *op++ = 0; -+ -+ -+ c->codesize = op - out; -+ ((void) (0)); -+ -+ *out_len = op - out; -+ -+ if (c->cb) -+ (*c->cb)(c->textsize,c->codesize); -+ -+ -+ -+ -+ -+ -+ -+ ((void) (0)); -+ -+ return 0; -+} -+ -+ -+ -+ -+ -+ -+ int -+lzo1x_999_compress_level ( const unsigned char *in , unsigned in_len, -+ unsigned char *out, unsigned * out_len, -+ void * wrkmem, -+ const unsigned char *dict, unsigned dict_len, -+ lzo_progress_callback_t cb, -+ int compression_level ) -+{ -+ static const struct -+ { -+ int try_lazy; -+ lzo_uint good_length; -+ lzo_uint max_lazy; -+ lzo_uint nice_length; -+ lzo_uint max_chain; -+ lzo_uint32 flags; -+ } c[9] = { -+ { 0, 0, 0, 8, 4, 0 }, -+ { 0, 0, 0, 16, 8, 0 }, -+ { 0, 0, 0, 32, 16, 0 }, -+ -+ { 1, 4, 4, 16, 16, 0 }, -+ { 1, 8, 16, 32, 32, 0 }, -+ { 1, 8, 16, 128, 128, 0 }, -+ -+ { 2, 8, 32, 128, 256, 0 }, -+ { 2, 32, 128, 2048, 2048, 1 }, -+ { 2, 2048, 2048, 2048, 4096, 1 } -+ }; -+ -+ if (compression_level < 1 || compression_level > 9) -+ return (-1); -+ -+ compression_level -= 1; -+ return lzo1x_999_compress_internal(in, in_len, out, out_len, wrkmem, -+ dict, dict_len, cb, -+ c[compression_level].try_lazy, -+ c[compression_level].good_length, -+ c[compression_level].max_lazy, -+ -+ -+ -+ 0, -+ -+ c[compression_level].max_chain, -+ c[compression_level].flags); -+} -+EXPORT_SYMBOL_GPL(lzo1x_999_compress_level); -+ -+ -+ -+ -+ -+ int -+lzo1x_999_compress_dict ( const unsigned char *in , unsigned in_len, -+ unsigned char *out, unsigned * out_len, -+ void * wrkmem, -+ const unsigned char *dict, unsigned dict_len ) -+{ -+ return lzo1x_999_compress_level(in, in_len, out, out_len, wrkmem, -+ dict, dict_len, 0, 8); -+} -+EXPORT_SYMBOL_GPL(lzo1x_999_compress_dict); -+ -+ int -+lzo1x_999_compress ( const unsigned char *in , unsigned in_len, -+ unsigned char *out, unsigned * out_len, -+ void * wrkmem ) -+{ -+ return lzo1x_999_compress_level(in, in_len, out, out_len, wrkmem, -+ ((void *)0), 0, 0, 8); -+} -+EXPORT_SYMBOL_GPL(lzo1x_999_compress); -+ -diff -Nurp linux-omap-2.6.28-omap1/lib/lzo/Makefile linux-omap-2.6.28-nokia1/lib/lzo/Makefile ---- linux-omap-2.6.28-omap1/lib/lzo/Makefile 2011-06-22 13:10:47.043070751 +0200 -+++ linux-omap-2.6.28-nokia1/lib/lzo/Makefile 2011-06-22 13:19:33.273063268 +0200 -@@ -1,4 +1,4 @@ --lzo_compress-objs := lzo1x_compress.o -+lzo_compress-objs := lzo1x_compress.o lzo1x_9x.o - lzo_decompress-objs := lzo1x_decompress.o - - obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o -diff -Nurp linux-omap-2.6.28-omap1/mm/memory.c linux-omap-2.6.28-nokia1/mm/memory.c ---- linux-omap-2.6.28-omap1/mm/memory.c 2011-06-22 13:14:25.343067646 +0200 -+++ linux-omap-2.6.28-nokia1/mm/memory.c 2011-06-22 13:19:33.273063268 +0200 -@@ -1110,6 +1110,7 @@ no_page_table: - } - return page; - } -+EXPORT_SYMBOL_GPL(follow_page); - - /* Can we do the FOLL_ANON optimization? */ - static inline int use_zero_page(struct vm_area_struct *vma) -diff -Nurp linux-omap-2.6.28-omap1/mm/page_io.c linux-omap-2.6.28-nokia1/mm/page_io.c ---- linux-omap-2.6.28-omap1/mm/page_io.c 2011-06-22 13:14:25.353067646 +0200 -+++ linux-omap-2.6.28-nokia1/mm/page_io.c 2011-06-22 13:19:33.273063268 +0200 -@@ -20,7 +20,8 @@ - #include - - static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, -- struct page *page, bio_end_io_t end_io) -+ struct page *page, bio_end_io_t end_io, -+ unsigned long rw) - { - struct bio *bio; - -@@ -30,8 +31,9 @@ static struct bio *get_swap_bio(gfp_t gf - swp_entry_t entry = { .val = index, }; - - sis = get_swap_info_struct(swp_type(entry)); -- bio->bi_sector = map_swap_page(sis, swp_offset(entry)) * -- (PAGE_SIZE >> 9); -+ bio->bi_sector = -+ map_swap_page(sis, swp_offset(entry), rw & WRITE) * -+ (PAGE_SIZE >> 9); - bio->bi_bdev = sis->bdev; - bio->bi_io_vec[0].bv_page = page; - bio->bi_io_vec[0].bv_len = PAGE_SIZE; -@@ -103,7 +105,7 @@ int swap_writepage(struct page *page, st - goto out; - } - bio = get_swap_bio(GFP_NOIO, page_private(page), page, -- end_swap_bio_write); -+ end_swap_bio_write, rw); - if (bio == NULL) { - set_page_dirty(page); - unlock_page(page); -@@ -128,7 +130,7 @@ int swap_readpage(struct file *file, str - BUG_ON(!PageLocked(page)); - BUG_ON(PageUptodate(page)); - bio = get_swap_bio(GFP_KERNEL, page_private(page), page, -- end_swap_bio_read); -+ end_swap_bio_read, 0); - if (bio == NULL) { - unlock_page(page); - ret = -ENOMEM; -diff -Nurp linux-omap-2.6.28-omap1/mm/page-writeback.c linux-omap-2.6.28-nokia1/mm/page-writeback.c ---- linux-omap-2.6.28-omap1/mm/page-writeback.c 2011-06-22 13:14:25.353067646 +0200 -+++ linux-omap-2.6.28-nokia1/mm/page-writeback.c 2011-06-22 13:19:33.273063268 +0200 -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - - /* - * The maximum number of pages to writeout in a single bdflush/kupdate -@@ -661,11 +662,114 @@ int wakeup_pdflush(long nr_pages) - return pdflush_operation(background_writeout, nr_pages); - } - --static void wb_timer_fn(unsigned long unused); -+static enum hrtimer_restart wb_timer_fn(struct hrtimer *timer); - static void laptop_timer_fn(unsigned long unused); - --static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); -+struct hrtimer wb_timer; - static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); -+static DEFINE_SPINLOCK(wb_timer_lock); -+ -+/* Whether the atomic write-back is enabled or not */ -+atomic_t periodic_wb_enabled; -+ -+/* -+ * This is a helper function which sets up the next periodic write-back timer -+ * event. The @wb_timer is set up as a range timer with soft limit 25% less -+ * than @expires and the hard limit equivalent to @expires. This means that the -+ * kernel may group this timer with other events and lessen number of -+ * wakeups. -+ */ -+static void setup_wb_timer(unsigned long expires) -+{ -+ u64 hardlimit, delta; -+ -+ hardlimit = jiffies_to_usecs(expires) * 1000LLU; -+ delta = hardlimit >> 2; -+ if (delta > ULONG_MAX) -+ delta = ULONG_MAX; -+ -+ hrtimer_start_range_ns(&wb_timer, ns_to_ktime(hardlimit - delta), delta, -+ HRTIMER_MODE_REL); -+} -+ -+/* -+ * Enable the periodic write-back. This function is usually called when -+ * an inode or a super block becomes dirty. -+ */ -+void enable_periodic_wb(void) -+{ -+ if (dirty_writeback_interval) { -+ spin_lock(&wb_timer_lock); -+ setup_wb_timer(dirty_writeback_interval); -+ spin_unlock(&wb_timer_lock); -+ } -+} -+ -+static int sb_supports_wb(struct super_block *sb) -+{ -+ struct inode *inode; -+ struct backing_dev_info *bdi; -+ int res; -+ -+ spin_lock(&inode_lock); -+ inode = list_entry(sb->s_inodes.next, struct inode, i_sb_list); -+ bdi = inode->i_mapping->backing_dev_info; -+ res = bdi_cap_writeback_dirty(bdi); -+ spin_unlock(&inode_lock); -+ return res; -+} -+ -+static void set_next_wb_timer(unsigned long expires) -+{ -+ int all_clean = 1; -+ struct super_block *sb; -+ -+ atomic_set(&periodic_wb_enabled, 0); -+ -+ spin_lock(&sb_lock); -+restart: -+ list_for_each_entry(sb, &super_blocks, s_list) { -+ sb->s_count++; -+ spin_unlock(&sb_lock); -+ -+ if (down_read_trylock(&sb->s_umount)) { -+ spin_lock(&sb_lock); -+ if (is_sb_dirty(sb)) -+ all_clean = 0; -+ else if (sb->s_root && sb_supports_wb(sb) && -+ sb_has_dirty_inodes(sb)) -+ all_clean = 0; -+ up_read(&sb->s_umount); -+ } else { -+ all_clean = 0; -+ spin_lock(&sb_lock); -+ } -+ -+ if (__put_super_and_need_restart(sb)) -+ goto restart; -+ -+ if (!all_clean) -+ break; -+ } -+ spin_unlock(&sb_lock); -+ -+ spin_lock(&wb_timer_lock); -+ if (all_clean && !atomic_read(&periodic_wb_enabled)) { -+ /* -+ * There are no dirty data, and no one marked an inode or -+ * super block as dirty. The periodic update timer may be -+ * deleted. Note, if we race with some other task which has -+ * just marked something as dirty and just set -+ * 'periodic_wb_enabled' to 1, then this task will call -+ * 'enable_periodic_wb()' which will re-enable the 'wb_timer'. -+ */ -+ hrtimer_cancel(&wb_timer); -+ } else { -+ atomic_set(&periodic_wb_enabled, 1); -+ setup_wb_timer(expires); -+ } -+ spin_unlock(&wb_timer_lock); -+} - - /* - * Periodic writeback of "old" data. -@@ -719,10 +823,16 @@ static void wb_kupdate(unsigned long arg - } - nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; - } -- if (time_before(next_jif, jiffies + HZ)) -- next_jif = jiffies + HZ; -- if (dirty_writeback_interval) -- mod_timer(&wb_timer, next_jif); -+ -+ if (dirty_writeback_interval) { -+ unsigned long expires; -+ -+ if (time_before(next_jif, jiffies + HZ)) -+ expires = HZ; -+ else -+ expires = next_jif - jiffies; -+ set_next_wb_timer(expires); -+ } - } + # CONFIG_XIP_KERNEL is not set + CONFIG_KEXEC=y + CONFIG_ATAGS_PROC=y +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/board-rx51.c kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/board-rx51.c +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/board-rx51.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/board-rx51.c 2011-09-04 11:37:54.000000000 +0200 +@@ -84,11 +84,16 @@ static void __init rx51_init_irq(void) + static void __init rx51_pm_init(void) + { + struct prm_setup_times prm_setup = { +- .clksetup = 81, ++ .clksetup = 111, /* must equal Volt offset + voltsetup2 */ + .voltsetup_time1 = 270, + .voltsetup_time2 = 150, +- .voltoffset = 17, +- .voltsetup2 = 37, ++ /* Time between wakeup event to when the sysoff goes high */ ++ .voltoffset = 16, ++ /* The following is for a 2.25ms ramp time of the oscillator ++ * Documented 2ms, added .25 as margin. NOTE: scripts ++ * change as oscillator changes ++ */ ++ .voltsetup2 = 95, + }; + omap3_set_prm_setup_times(&prm_setup); +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/board-rx51-peripherals.c kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/board-rx51-peripherals.c +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/board-rx51-peripherals.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/board-rx51-peripherals.c 2011-09-04 11:37:54.000000000 +0200 +@@ -415,8 +415,16 @@ static struct twl4030_gpio_platform_data + static struct twl4030_ins sleep_on_seq[] = { /* -@@ -733,16 +843,17 @@ int dirty_writeback_centisecs_handler(ct - { - proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); - if (dirty_writeback_interval) -- mod_timer(&wb_timer, jiffies + dirty_writeback_interval); -+ setup_wb_timer(dirty_writeback_interval); - else -- del_timer(&wb_timer); -+ hrtimer_cancel(&wb_timer); - return 0; - } - --static void wb_timer_fn(unsigned long unused) -+static enum hrtimer_restart wb_timer_fn(struct hrtimer *timer) - { - if (pdflush_operation(wb_kupdate, 0) < 0) -- mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ -+ setup_wb_timer(HZ); /* delay 1 second */ -+ return HRTIMER_NORESTART; - } - - static void laptop_flush(unsigned long unused) -@@ -835,7 +946,8 @@ void __init page_writeback_init(void) - { - int shift; - -- mod_timer(&wb_timer, jiffies + dirty_writeback_interval); -+ hrtimer_init(&wb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ wb_timer.function = wb_timer_fn; - writeback_set_ratelimit(); - register_cpu_notifier(&ratelimit_nb); - -diff -Nurp linux-omap-2.6.28-omap1/mm/slub.c linux-omap-2.6.28-nokia1/mm/slub.c ---- linux-omap-2.6.28-omap1/mm/slub.c 2011-06-22 13:14:25.393067645 +0200 -+++ linux-omap-2.6.28-nokia1/mm/slub.c 2011-06-22 13:19:33.283063268 +0200 -@@ -9,6 +9,7 @@ + * Turn off everything. ++#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ ++ ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ ++ | (type) << 4 | (state)) ++#define MSG_SINGULAR(devgrp, id, state) \ ++ ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) ++ 0x14 - Corresponds to 500uSec */ +- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_SLEEP), 2}, ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_HFCLKOUT, RES_STATE_SLEEP), 0x14}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 4, 1, RES_STATE_SLEEP), 2}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 4, 2, RES_STATE_SLEEP), 2}, + }; - #include -+#include /* struct reclaim_state */ - #include - #include - #include -@@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cach - - __ClearPageSlab(page); - reset_page_mapcount(page); -+ if (current->reclaim_state) -+ current->reclaim_state->reclaimed_slab += pages; - __free_pages(page, order); - } - -@@ -1591,6 +1594,7 @@ static __always_inline void *slab_alloc( - unsigned long flags; - unsigned int objsize; - -+ might_sleep_if(gfpflags & __GFP_WAIT); - local_irq_save(flags); - c = get_cpu_slab(s, smp_processor_id()); - objsize = c->objsize; -@@ -1986,7 +1990,7 @@ static struct kmem_cache_cpu *alloc_kmem - static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) - { - if (c < per_cpu(kmem_cache_cpu, cpu) || -- c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { -+ c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { - kfree(c); - return; - } -@@ -2969,10 +2973,12 @@ void __init kmem_cache_init(void) - slab_state = PARTIAL; - - /* Caches that are not of the two-to-the-power-of size */ -- if (KMALLOC_MIN_SIZE <= 64) { -+ if (KMALLOC_MIN_SIZE <= 32) { - create_kmalloc_cache(&kmalloc_caches[1], - "kmalloc-96", 96, GFP_KERNEL); - caches++; -+ } -+ if (KMALLOC_MIN_SIZE <= 64) { - create_kmalloc_cache(&kmalloc_caches[2], - "kmalloc-192", 192, GFP_KERNEL); - caches++; -@@ -2999,10 +3005,17 @@ void __init kmem_cache_init(void) - BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || - (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); - -- for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) -+ for (i = 8; i < min(KMALLOC_MIN_SIZE, 192 + 8); i += 8) - size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; - -- if (KMALLOC_MIN_SIZE == 128) { -+ if (KMALLOC_MIN_SIZE == 64) { -+ /* -+ * The 96 byte size cache is not used if the alignment -+ * is 64 byte. -+ */ -+ for (i = 64 + 8; i <= 96; i += 8) -+ size_index[(i - 1) / 8] = 7; -+ } else if (KMALLOC_MIN_SIZE == 128) { - /* - * The 192 byte sized cache is not used if the alignment - * is 128 byte. Redirect kmalloc to use the 256 byte cache -@@ -3123,8 +3136,12 @@ struct kmem_cache *kmem_cache_create(con - s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); - up_write(&slub_lock); - -- if (sysfs_slab_alias(s, name)) -+ if (sysfs_slab_alias(s, name)) { -+ down_write(&slub_lock); -+ s->refcount--; -+ up_write(&slub_lock); - goto err; -+ } - return s; - } - -@@ -3134,8 +3151,13 @@ struct kmem_cache *kmem_cache_create(con - size, align, flags, ctor)) { - list_add(&s->list, &slab_caches); - up_write(&slub_lock); -- if (sysfs_slab_add(s)) -+ if (sysfs_slab_add(s)) { -+ down_write(&slub_lock); -+ list_del(&s->list); -+ up_write(&slub_lock); -+ kfree(s); - goto err; -+ } - return s; - } - kfree(s); -diff -Nurp linux-omap-2.6.28-omap1/mm/swapfile.c linux-omap-2.6.28-nokia1/mm/swapfile.c ---- linux-omap-2.6.28-omap1/mm/swapfile.c 2011-06-22 13:14:25.393067645 +0200 -+++ linux-omap-2.6.28-nokia1/mm/swapfile.c 2011-06-22 13:19:33.283063268 +0200 -@@ -273,22 +273,41 @@ out: - static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) - { - int count = p->swap_map[offset]; -+ unsigned old; - -- if (count < SWAP_MAP_MAX) { -- count--; -- p->swap_map[offset] = count; -- if (!count) { -- if (offset < p->lowest_bit) -- p->lowest_bit = offset; -- if (offset > p->highest_bit) -- p->highest_bit = offset; -- if (p->prio > swap_info[swap_list.next].prio) -- swap_list.next = p - swap_info; -- nr_swap_pages++; -- p->inuse_pages--; -- } -- } -- return count; -+ if (count >= SWAP_MAP_MAX) -+ return count; -+ -+ count--; -+ p->swap_map[offset] = count; -+ if (count) -+ return count; -+ -+ spin_lock(&p->remap_lock); -+ -+ if (offset < p->lowest_bit) -+ p->lowest_bit = offset; -+ if (offset > p->highest_bit) -+ p->highest_bit = offset; -+ if (p->prio > swap_info[swap_list.next].prio) -+ swap_list.next = p - swap_info; -+ nr_swap_pages++; -+ p->inuse_pages--; -+ -+ /* Re-map the page number */ -+ old = p->swap_remap[offset] & 0x7FFFFFFF; -+ /* Zero means it was not re-mapped */ -+ if (!old) -+ goto out; -+ /* Clear the re-mapping */ -+ p->swap_remap[offset] &= 0x80000000; -+ /* Mark the re-mapped page as unused */ -+ p->swap_remap[old] &= 0x7FFFFFFF; -+ /* Record how many free pages there are */ -+ p->gaps_exist += 1; -+out: -+ spin_unlock(&p->remap_lock); -+ return 0; - } - + static struct twl4030_script sleep_on_script = { +@@ -433,7 +441,10 @@ static struct twl4030_ins wakeup_seq[] = /* -@@ -977,14 +996,123 @@ static void drain_mmlist(void) - spin_unlock(&mmlist_lock); - } + * Reenable everything. + */ +- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2}, ++ /* 0x32= 2.25 max(32khz) delay */ ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 2, RES_STATE_ACTIVE), 0x32}, ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_HFCLKOUT, RES_STATE_ACTIVE), 1}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 1, RES_STATE_ACTIVE), 2}, + }; -+/* Find the largest sequence of free pages */ -+int find_gap(struct swap_info_struct *sis) -+{ -+ unsigned i, uninitialized_var(start), uninitialized_var(gap_next); -+ unsigned uninitialized_var(gap_end), gap_size = 0; -+ int in_gap = 0; -+ -+ spin_unlock(&sis->remap_lock); -+ cond_resched(); -+ mutex_lock(&sis->remap_mutex); -+ -+ /* Check if a gap was found while we waited for the mutex */ -+ spin_lock(&sis->remap_lock); -+ if (sis->gap_next <= sis->gap_end) { -+ mutex_unlock(&sis->remap_mutex); -+ return 0; -+ } -+ if (!sis->gaps_exist) { -+ mutex_unlock(&sis->remap_mutex); -+ return -1; -+ } -+ spin_unlock(&sis->remap_lock); -+ -+ /* -+ * There is no current gap, so no new re-mappings can be made without -+ * going through this function (find_gap) which is protected by the -+ * remap_mutex. -+ */ -+ for (i = 1; i < sis->max; i++) { -+ if (in_gap) { -+ if (!(sis->swap_remap[i] & 0x80000000)) -+ continue; -+ if (i - start > gap_size) { -+ gap_next = start; -+ gap_end = i - 1; -+ gap_size = i - start; -+ } -+ in_gap = 0; -+ } else { -+ if (sis->swap_remap[i] & 0x80000000) -+ continue; -+ in_gap = 1; -+ start = i; -+ } -+ cond_resched(); -+ } -+ spin_lock(&sis->remap_lock); -+ if (in_gap && i - start > gap_size) { -+ sis->gap_next = start; -+ sis->gap_end = i - 1; -+ } else { -+ sis->gap_next = gap_next; -+ sis->gap_end = gap_end; -+ } -+ mutex_unlock(&sis->remap_mutex); -+ return 0; -+} -+ - /* - * Use this swapdev's extent info to locate the (PAGE_SIZE) block which - * corresponds to page offset `offset'. + static struct twl4030_script wakeup_script = { +@@ -461,14 +472,10 @@ static struct twl4030_ins wrst_seq[] = { + * Enable sysclk output. + * Reenable twl4030. */ --sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) -+sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset, int write) - { - struct swap_extent *se = sis->curr_swap_extent; - struct swap_extent *start_se = se; -+ unsigned old; +- {MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_OFF), 2}, +- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 0, 1, RES_STATE_ACTIVE), +- 0x13}, +- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 3, RES_STATE_OFF), 0x13}, +- {MSG_SINGULAR(DEV_GRP_NULL, RES_VDD1, RES_STATE_WRST), 0x13}, +- {MSG_SINGULAR(DEV_GRP_NULL, RES_VDD2, RES_STATE_WRST), 0x13}, +- {MSG_SINGULAR(DEV_GRP_NULL, RES_VPLL1, RES_STATE_WRST), 0x35}, +- {MSG_SINGULAR(DEV_GRP_P3, RES_HFCLKOUT, RES_STATE_ACTIVE), 2}, ++ {MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_OFF), 1}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 0, 2, RES_STATE_WRST), 1}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 3, RES_STATE_OFF), 0x34}, ++ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 0, 1, RES_STATE_WRST), 1}, + {MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_ACTIVE), 2}, + }; + +@@ -490,54 +497,31 @@ static struct twl4030_script *twl4030_sc + + static struct twl4030_resconfig twl4030_rconfig[] = { + +- { .resource = RES_VDD1, .devgroup = -1, .type = 1, .type2 = -1, +- .remap = 0 }, +- { .resource = RES_VDD2, .devgroup = -1, .type = 1, .type2 = -1, +- .remap = 0 }, +- { .resource = RES_VPLL1, .devgroup = -1, .type = 1, .type2 = -1, +- .remap = 0 }, +- { .resource = RES_VPLL2, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VAUX1, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VAUX2, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VAUX3, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VAUX4, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VMMC1, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VMMC2, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VDAC, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VSIM, .devgroup = -1, .type = -1, .type2 = 3, +- .remap = -1 }, +- { .resource = RES_VINTANA1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, +- .type = -1, .type2 = -1, .remap = -1 }, +- { .resource = RES_VINTANA2, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, +- .type = 1, .type2 = -1, .remap = -1 }, +- { .resource = RES_VINTDIG, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, +- .type = -1, .type2 = -1, .remap = -1 }, +- { .resource = RES_VIO, .devgroup = DEV_GRP_P3, +- .type = 1, .type2 = -1, .remap = -1 }, +- { .resource = RES_CLKEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, +- .type = 1, .type2 = -1 , .remap = -1 }, +- { .resource = RES_REGEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, +- .type = 1, .type2 = -1, .remap = -1 }, +- { .resource = RES_NRES_PWRON, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, +- .type = 1, .type2 = -1, .remap = -1 }, +- { .resource = RES_SYSEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, +- .type = 1, .type2 = -1, .remap = -1 }, +- { .resource = RES_HFCLKOUT, .devgroup = DEV_GRP_P3, .type = 1, +- .type2 = -1, .remap = -1 }, +- { .resource = RES_32KCLKOUT, .devgroup = -1, .type = 1, .type2 = -1, +- .remap = -1 }, +- { .resource = RES_RESET, .devgroup = -1, .type = 1, .type2 = -1, +- .remap = -1 }, +- { .resource = RES_Main_Ref, .devgroup = -1, .type = 1, .type2 = -1, +- .remap = -1 }, ++ /* Default p1*/ ++ { .resource = RES_VDD1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 4, .type2 = 1, .remap = 0 }, ++ { .resource = RES_VDD2, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 3, .type2 = 1, .remap = 0 }, ++ { .resource = RES_VPLL1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 3, .type2 = 1, .remap = 0 }, ++ { .resource = RES_VPLL2, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VAUX1, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VAUX2, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VAUX3, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VAUX4, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VMMC1, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VMMC2, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VDAC, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VSIM, .devgroup = -1, .type = 0, .type2 = 3, .remap = 8 }, ++ { .resource = RES_VINTANA1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 1, .type2 = 2, .remap = 8 }, ++ { .resource = RES_VINTANA2, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 0, .type2 = 2, .remap = 8 }, ++ { .resource = RES_VINTDIG, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 1, .type2 = 2, .remap = 8 }, ++ { .resource = RES_VIO, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 2, .type2 = 2, .remap = 8 }, ++ { .resource = RES_CLKEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 3, .type2 = 2, .remap = 8 }, ++ { .resource = RES_REGEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 2, .type2 = 1, .remap = 8 }, ++ { .resource = RES_NRES_PWRON, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 0, .type2 = 1, .remap = 8 }, ++ { .resource = RES_SYSEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 6, .type2 = 1, .remap = 8 }, ++ { .resource = RES_HFCLKOUT, .devgroup = DEV_GRP_P1 | DEV_GRP_P3, .type = 0, .type2 = 1, .remap = 8 }, ++ { .resource = RES_32KCLKOUT, .devgroup = DEV_GRP_P1 | DEV_GRP_P2 | DEV_GRP_P3, .type = 0, .type2 = 0, .remap = 8 }, ++ { .resource = RES_RESET, .devgroup = DEV_GRP_P1 | DEV_GRP_P2 | DEV_GRP_P3, .type = 6, .type2 = 0, .remap = 8 }, ++ { .resource = RES_Main_Ref, .devgroup = DEV_GRP_P1 | DEV_GRP_P2 | DEV_GRP_P3, .type = 0, .type2 = 0, .remap = 8 }, + { 0, 0}, + }; + +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/cpuidle34xx.c kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/cpuidle34xx.c +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/cpuidle34xx.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/cpuidle34xx.c 2011-09-04 11:37:54.000000000 +0200 +@@ -180,8 +180,8 @@ void omap_init_power_states(void) + /* C1 . MPU WFI + Core active */ + omap3_power_states[OMAP3_STATE_C1].valid = 1; + omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1; +- omap3_power_states[OMAP3_STATE_C1].sleep_latency = 58; +- omap3_power_states[OMAP3_STATE_C1].wakeup_latency = 52; ++ omap3_power_states[OMAP3_STATE_C1].sleep_latency = 110; ++ omap3_power_states[OMAP3_STATE_C1].wakeup_latency = 162; + omap3_power_states[OMAP3_STATE_C1].threshold = 5; + omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON; + omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON; +@@ -190,9 +190,9 @@ void omap_init_power_states(void) + /* C2 . MPU WFI + Core inactive */ + omap3_power_states[OMAP3_STATE_C2].valid = 1; + omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2; +- omap3_power_states[OMAP3_STATE_C2].sleep_latency = 73; +- omap3_power_states[OMAP3_STATE_C2].wakeup_latency = 164; +- omap3_power_states[OMAP3_STATE_C2].threshold = 30; ++ omap3_power_states[OMAP3_STATE_C2].sleep_latency = 106; ++ omap3_power_states[OMAP3_STATE_C2].wakeup_latency = 180; ++ omap3_power_states[OMAP3_STATE_C2].threshold = 309; + omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON; + omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON; + omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID | +@@ -201,9 +201,9 @@ void omap_init_power_states(void) + /* C3 . MPU CSWR + Core inactive */ + omap3_power_states[OMAP3_STATE_C3].valid = 0; + omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3; +- omap3_power_states[OMAP3_STATE_C3].sleep_latency = 90; +- omap3_power_states[OMAP3_STATE_C3].wakeup_latency = 267; +- omap3_power_states[OMAP3_STATE_C3].threshold = 113872; /* vs. C2 */ ++ omap3_power_states[OMAP3_STATE_C3].sleep_latency = 107; ++ omap3_power_states[OMAP3_STATE_C3].wakeup_latency = 410; ++ omap3_power_states[OMAP3_STATE_C3].threshold = 46057; + omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET; + omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON; + omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID | +@@ -212,9 +212,9 @@ void omap_init_power_states(void) + /* C4 . MPU OFF + Core inactive */ + omap3_power_states[OMAP3_STATE_C4].valid = 0; + omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4; +- omap3_power_states[OMAP3_STATE_C4].sleep_latency = 4130; +- omap3_power_states[OMAP3_STATE_C4].wakeup_latency = 2130; +- omap3_power_states[OMAP3_STATE_C4].threshold = 619328; /* vs. C2 */ ++ omap3_power_states[OMAP3_STATE_C4].sleep_latency = 121; ++ omap3_power_states[OMAP3_STATE_C4].wakeup_latency = 3374; ++ omap3_power_states[OMAP3_STATE_C4].threshold = 46057; + omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF; + omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON; + omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID | +@@ -223,9 +223,9 @@ void omap_init_power_states(void) + /* C5 . MPU CSWR + Core CSWR*/ + omap3_power_states[OMAP3_STATE_C5].valid = 1; + omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5; +- omap3_power_states[OMAP3_STATE_C5].sleep_latency = 596; +- omap3_power_states[OMAP3_STATE_C5].wakeup_latency = 1000; +- omap3_power_states[OMAP3_STATE_C5].threshold = 7971; ++ omap3_power_states[OMAP3_STATE_C5].sleep_latency = 855; ++ omap3_power_states[OMAP3_STATE_C5].wakeup_latency = 1146; ++ omap3_power_states[OMAP3_STATE_C5].threshold = 46057; + omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET; + omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET; + omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID | +@@ -234,9 +234,9 @@ void omap_init_power_states(void) + /* C6 . MPU OFF + Core CSWR */ + omap3_power_states[OMAP3_STATE_C6].valid = 0; + omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6; +- omap3_power_states[OMAP3_STATE_C6].sleep_latency = 4600; +- omap3_power_states[OMAP3_STATE_C6].wakeup_latency = 2850; +- omap3_power_states[OMAP3_STATE_C6].threshold = 2801100; /* vs. C5 */ ++ omap3_power_states[OMAP3_STATE_C6].sleep_latency = 7580; ++ omap3_power_states[OMAP3_STATE_C6].wakeup_latency = 4134; ++ omap3_power_states[OMAP3_STATE_C6].threshold = 484329; + omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF; + omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET; + omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID | +@@ -245,9 +245,9 @@ void omap_init_power_states(void) + /* C7 . MPU OFF + Core OFF */ + omap3_power_states[OMAP3_STATE_C7].valid = 1; + omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7; +- omap3_power_states[OMAP3_STATE_C7].sleep_latency = 4760; +- omap3_power_states[OMAP3_STATE_C7].wakeup_latency = 7780; +- omap3_power_states[OMAP3_STATE_C7].threshold = 610082; ++ omap3_power_states[OMAP3_STATE_C7].sleep_latency = 7505; ++ omap3_power_states[OMAP3_STATE_C7].wakeup_latency = 15274; ++ omap3_power_states[OMAP3_STATE_C7].threshold = 484329; + omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF; + omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF; + omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID | +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/mmc-twl4030.c kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/mmc-twl4030.c +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/mmc-twl4030.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/mmc-twl4030.c 2011-09-04 11:37:54.000000000 +0200 +@@ -36,6 +36,12 @@ + #define MMCHS_SYSSTATUS 0x0014 + #define MMCHS_SYSSTATUS_RESETDONE (1 << 0) + ++#define OMAP343X_PADCONF_MMC2_CMD (OMAP2_CONTROL_PADCONFS + 0x12A) ++#define OMAP343X_PADCONF_MMC2_DAT0 (OMAP2_CONTROL_PADCONFS + 0x12C) ++#define OMAP343X_PADCONF_MMC2_DAT2 (OMAP2_CONTROL_PADCONFS + 0x130) ++#define OMAP343X_PADCONF_MMC2_DAT4 (OMAP2_CONTROL_PADCONFS + 0x134) ++#define OMAP343X_PADCONF_MMC2_DAT6 (OMAP2_CONTROL_PADCONFS + 0x138) + -+ /* -+ * Instead of using the offset we are given, re-map it to the next -+ * sequential position. -+ */ -+ spin_lock(&sis->remap_lock); -+ /* Get the old re-mapping */ -+ old = sis->swap_remap[offset] & 0x7FFFFFFF; -+ if (write) { -+ /* See if we have free pages */ -+ if (sis->gap_next > sis->gap_end) { -+ /* The gap is used up. Find another one */ -+ if (!sis->gaps_exist || find_gap(sis) < 0) { -+ /* -+ * Out of space, so this page must have a -+ * re-mapping, so use that. -+ */ -+ BUG_ON(!old); -+ sis->gap_next = sis->gap_end = old; -+ } -+ } -+ /* Zero means it was not re-mapped previously */ -+ if (old) { -+ /* Clear the re-mapping */ -+ sis->swap_remap[offset] &= 0x80000000; -+ /* Mark the re-mapped page as unused */ -+ sis->swap_remap[old] &= 0x7FFFFFFF; -+ } else { -+ /* Record how many free pages there are */ -+ sis->gaps_exist -= 1; + static struct platform_device dummy_pdev = { + .dev = { + .bus = &platform_bus_type, +@@ -599,6 +605,14 @@ static int twl_mmc2_set_power(struct dev + * transceiver is used, DAT3..7 are muxed as transceiver control pins. + */ + if (power_on) { ++ if (!cpu_is_omap2430()) { ++ /* Pull up */ ++ omap_ctrl_writew( 0x118, OMAP343X_PADCONF_MMC2_CMD); ++ omap_ctrl_writel(0x1180118, OMAP343X_PADCONF_MMC2_DAT0); ++ omap_ctrl_writel(0x1180118, OMAP343X_PADCONF_MMC2_DAT2); ++ omap_ctrl_writel(0x1180118, OMAP343X_PADCONF_MMC2_DAT4); ++ omap_ctrl_writel(0x1180118, OMAP343X_PADCONF_MMC2_DAT6); + } -+ /* Create the re-mapping to the next free page */ -+ sis->swap_remap[offset] |= sis->gap_next; -+ /* Mark it as used */ -+ sis->swap_remap[sis->gap_next] |= 0x80000000; -+ /* Use the re-mapped page number */ -+ offset = sis->gap_next; -+ /* Update the free pages gap */ -+ sis->gap_next += 1; -+ } else { -+ /* -+ * Always read from the existing re-mapping -+ * if there is one. There may not be because -+ * 'swapin_readahead()' has won a race with -+ * 'add_to_swap()'. -+ */ -+ if (old) -+ offset = old; -+ } -+ spin_unlock(&sis->remap_lock); - - for ( ; ; ) { - struct list_head *lh; -@@ -1015,7 +1143,8 @@ sector_t swapdev_block(int swap_type, pg - return 0; - - sis = swap_info + swap_type; -- return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0; -+#error map_swap_page does not support hibernation -+ return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset, 0) : 0; - } - #endif /* CONFIG_HIBERNATION */ + if (mmc->slots[0].internal_clock) { + u32 reg; -@@ -1342,6 +1471,7 @@ asmlinkage long sys_swapoff(const char _ - p->flags = 0; - spin_unlock(&swap_lock); - mutex_unlock(&swapon_mutex); -+ vfree(p->swap_remap); - vfree(swap_map); - inode = mapping->host; - if (S_ISBLK(inode->i_mode)) { -@@ -1485,6 +1615,7 @@ asmlinkage long sys_swapon(const char __ - unsigned long maxpages = 1; - int swapfilesize; - unsigned short *swap_map = NULL; -+ unsigned int *swap_remap = NULL; - struct page *page = NULL; - struct inode *inode = NULL; - int did_down = 0; -@@ -1654,9 +1785,15 @@ asmlinkage long sys_swapon(const char __ - error = -ENOMEM; - goto bad_swap; +@@ -608,6 +622,14 @@ static int twl_mmc2_set_power(struct dev } -+ swap_remap = vmalloc(maxpages * sizeof(unsigned)); -+ if (!swap_remap) { -+ error = -ENOMEM; -+ goto bad_swap; + ret = twl_mmc_set_voltage(c, vdd); + } else { ++ if (!cpu_is_omap2430()) { ++ /* Pull down */ ++ omap_ctrl_writew( 0x108, OMAP343X_PADCONF_MMC2_CMD); ++ omap_ctrl_writel(0x1080108, OMAP343X_PADCONF_MMC2_DAT0); ++ omap_ctrl_writel(0x1080108, OMAP343X_PADCONF_MMC2_DAT2); ++ omap_ctrl_writel(0x1080108, OMAP343X_PADCONF_MMC2_DAT4); ++ omap_ctrl_writel(0x1080108, OMAP343X_PADCONF_MMC2_DAT6); + } - - error = 0; - memset(swap_map, 0, maxpages * sizeof(short)); -+ memset(swap_remap, 0, maxpages * sizeof(unsigned)); - for (i = 0; i < swap_header->info.nr_badpages; i++) { - int page_nr = swap_header->info.badpages[i]; - if (page_nr <= 0 || page_nr >= swap_header->info.last_page) -@@ -1696,6 +1833,12 @@ asmlinkage long sys_swapon(const char __ - else - p->prio = --least_priority; - p->swap_map = swap_map; -+ p->swap_remap = swap_remap; -+ p->gap_next = 1; -+ p->gap_end = p->max - 1; -+ p->gaps_exist = p->max - 1; -+ spin_lock_init(&p->remap_lock); -+ mutex_init(&p->remap_mutex); - p->flags = SWP_ACTIVE; - nr_swap_pages += nr_good_pages; - total_swap_pages += nr_good_pages; -@@ -1734,6 +1877,7 @@ bad_swap_2: - p->swap_file = NULL; - p->flags = 0; - spin_unlock(&swap_lock); -+ vfree(swap_remap); - vfree(swap_map); - if (swap_file) - filp_close(swap_file, NULL); -diff -Nurp linux-omap-2.6.28-omap1/mm/vmalloc.c linux-omap-2.6.28-nokia1/mm/vmalloc.c ---- linux-omap-2.6.28-omap1/mm/vmalloc.c 2011-06-22 13:14:25.393067645 +0200 -+++ linux-omap-2.6.28-nokia1/mm/vmalloc.c 2011-06-22 13:19:33.283063268 +0200 -@@ -1085,6 +1085,7 @@ struct vm_struct *get_vm_area(unsigned l - return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, - -1, GFP_KERNEL, __builtin_return_address(0)); - } -+EXPORT_SYMBOL_GPL(get_vm_area); - - struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, - void *caller) -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/af_bluetooth.c linux-omap-2.6.28-nokia1/net/bluetooth/af_bluetooth.c ---- linux-omap-2.6.28-omap1/net/bluetooth/af_bluetooth.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/af_bluetooth.c 2011-06-22 13:19:33.283063268 +0200 -@@ -41,18 +41,13 @@ - - #include - --#ifndef CONFIG_BT_SOCK_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- --#define VERSION "2.13" -+#define VERSION "2.15" - - /* Bluetooth sockets */ - #define BT_MAX_PROTO 8 - static struct net_proto_family *bt_proto[BT_MAX_PROTO]; -+static DEFINE_RWLOCK(bt_proto_lock); - --static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; - static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; - static const char *bt_key_strings[BT_MAX_PROTO] = { - "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", -@@ -65,6 +60,7 @@ static const char *bt_key_strings[BT_MAX - "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", - }; - -+static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; - static const char *bt_slock_key_strings[BT_MAX_PROTO] = { - "slock-AF_BLUETOOTH-BTPROTO_L2CAP", - "slock-AF_BLUETOOTH-BTPROTO_HCI", -@@ -75,7 +71,20 @@ static const char *bt_slock_key_strings[ - "slock-AF_BLUETOOTH-BTPROTO_HIDP", - "slock-AF_BLUETOOTH-BTPROTO_AVDTP", - }; --static DEFINE_RWLOCK(bt_proto_lock); -+ -+static inline void bt_sock_reclassify_lock(struct socket *sock, int proto) -+{ -+ struct sock *sk = sock->sk; -+ -+ if (!sk) -+ return; -+ -+ BUG_ON(sock_owned_by_user(sk)); -+ -+ sock_lock_init_class_and_name(sk, -+ bt_slock_key_strings[proto], &bt_slock_key[proto], -+ bt_key_strings[proto], &bt_lock_key[proto]); -+} - - int bt_sock_register(int proto, struct net_proto_family *ops) - { -@@ -117,21 +126,6 @@ int bt_sock_unregister(int proto) - } - EXPORT_SYMBOL(bt_sock_unregister); - --static void bt_reclassify_sock_lock(struct socket *sock, int proto) --{ -- struct sock *sk = sock->sk; -- -- if (!sk) -- return; -- BUG_ON(sock_owned_by_user(sk)); -- -- sock_lock_init_class_and_name(sk, -- bt_slock_key_strings[proto], -- &bt_slock_key[proto], -- bt_key_strings[proto], -- &bt_lock_key[proto]); --} -- - static int bt_sock_create(struct net *net, struct socket *sock, int proto) - { - int err; -@@ -151,7 +145,7 @@ static int bt_sock_create(struct net *ne - - if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { - err = bt_proto[proto]->create(net, sock, proto); -- bt_reclassify_sock_lock(sock, proto); -+ bt_sock_reclassify_lock(sock, proto); - module_put(bt_proto[proto]->owner); - } - -@@ -217,7 +211,8 @@ struct sock *bt_accept_dequeue(struct so - continue; - } - -- if (sk->sk_state == BT_CONNECTED || !newsock) { -+ if (sk->sk_state == BT_CONNECTED || !newsock || -+ bt_sk(parent)->defer_setup) { - bt_accept_unlink(sk); - if (newsock) - sock_graft(sk, newsock); -@@ -232,7 +227,7 @@ struct sock *bt_accept_dequeue(struct so - EXPORT_SYMBOL(bt_accept_dequeue); - - int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, -- struct msghdr *msg, size_t len, int flags) -+ struct msghdr *msg, size_t len, int flags) - { - int noblock = flags & MSG_DONTWAIT; - struct sock *sk = sock->sk; -@@ -240,7 +235,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, - size_t copied; - int err; - -- BT_DBG("sock %p sk %p len %d", sock, sk, len); -+ BT_DBG("sock %p sk %p len %zu", sock, sk, len); - - if (flags & (MSG_OOB)) - return -EOPNOTSUPP; -@@ -277,7 +272,9 @@ static inline unsigned int bt_accept_pol - - list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { - sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); -- if (sk->sk_state == BT_CONNECTED) -+ if (sk->sk_state == BT_CONNECTED || -+ (bt_sk(parent)->defer_setup && -+ sk->sk_state == BT_CONNECT2)) - return POLLIN | POLLRDNORM; + ret = twl_mmc_set_voltage(c, 0); } -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/bnep/bnep.h linux-omap-2.6.28-nokia1/net/bluetooth/bnep/bnep.h ---- linux-omap-2.6.28-omap1/net/bluetooth/bnep/bnep.h 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/bnep/bnep.h 2011-06-22 13:19:33.283063268 +0200 -@@ -161,7 +161,7 @@ struct bnep_session { - struct msghdr msg; - - struct bnep_proto_filter proto_filter[BNEP_MAX_PROTO_FILTERS]; -- u64 mc_filter; -+ unsigned long long mc_filter; - - struct socket *sock; - struct net_device *dev; -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/bnep/core.c linux-omap-2.6.28-nokia1/net/bluetooth/bnep/core.c ---- linux-omap-2.6.28-omap1/net/bluetooth/bnep/core.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/bnep/core.c 2011-06-22 13:19:33.283063268 +0200 -@@ -52,11 +52,6 @@ - - #include "bnep.h" - --#ifndef CONFIG_BT_BNEP_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "1.3" - - static int compress_src = 1; -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/bnep/netdev.c linux-omap-2.6.28-nokia1/net/bluetooth/bnep/netdev.c ---- linux-omap-2.6.28-omap1/net/bluetooth/bnep/netdev.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/bnep/netdev.c 2011-06-22 13:19:33.283063268 +0200 -@@ -41,11 +41,6 @@ - - #include "bnep.h" +@@ -649,12 +671,14 @@ static int twl_mmc2_set_sleep(struct dev + return twl_mmc_regulator_set_mode(c->twl_vmmc_dev_grp, sleep); --#ifndef CONFIG_BT_BNEP_DEBUG --#undef BT_DBG --#define BT_DBG( A... ) --#endif -- - #define BNEP_TX_QUEUE_LEN 20 - - static int bnep_net_open(struct net_device *dev) -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/bnep/sock.c linux-omap-2.6.28-nokia1/net/bluetooth/bnep/sock.c ---- linux-omap-2.6.28-omap1/net/bluetooth/bnep/sock.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/bnep/sock.c 2011-06-22 13:19:33.283063268 +0200 -@@ -46,11 +46,6 @@ - - #include "bnep.h" - --#ifndef CONFIG_BT_BNEP_DEBUG --#undef BT_DBG --#define BT_DBG( A... ) --#endif -- - static int bnep_sock_release(struct socket *sock) - { - struct sock *sk = sock->sk; -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/cmtp/capi.c linux-omap-2.6.28-nokia1/net/bluetooth/cmtp/capi.c ---- linux-omap-2.6.28-omap1/net/bluetooth/cmtp/capi.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/cmtp/capi.c 2011-06-22 13:19:33.283063268 +0200 -@@ -42,11 +42,6 @@ - - #include "cmtp.h" - --#ifndef CONFIG_BT_CMTP_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define CAPI_INTEROPERABILITY 0x20 - - #define CAPI_INTEROPERABILITY_REQ CAPICMD(CAPI_INTEROPERABILITY, CAPI_REQ) -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/cmtp/core.c linux-omap-2.6.28-nokia1/net/bluetooth/cmtp/core.c ---- linux-omap-2.6.28-omap1/net/bluetooth/cmtp/core.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/cmtp/core.c 2011-06-22 13:19:33.283063268 +0200 -@@ -44,11 +44,6 @@ - - #include "cmtp.h" - --#ifndef CONFIG_BT_CMTP_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "1.0" - - static DECLARE_RWSEM(cmtp_session_sem); -@@ -131,8 +126,7 @@ static inline void cmtp_add_msgpart(stru - - session->reassembly[id] = nskb; - -- if (skb) -- kfree_skb(skb); -+ kfree_skb(skb); - } - - static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *skb) -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/cmtp/sock.c linux-omap-2.6.28-nokia1/net/bluetooth/cmtp/sock.c ---- linux-omap-2.6.28-omap1/net/bluetooth/cmtp/sock.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/cmtp/sock.c 2011-06-22 13:19:33.283063268 +0200 -@@ -43,11 +43,6 @@ - - #include "cmtp.h" - --#ifndef CONFIG_BT_CMTP_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - static int cmtp_sock_release(struct socket *sock) - { - struct sock *sk = sock->sk; -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/hci_conn.c linux-omap-2.6.28-nokia1/net/bluetooth/hci_conn.c ---- linux-omap-2.6.28-omap1/net/bluetooth/hci_conn.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/hci_conn.c 2011-06-22 13:19:33.283063268 +0200 -@@ -45,11 +45,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCI_CORE_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - void hci_acl_connect(struct hci_conn *conn) - { - struct hci_dev *hdev = conn->hdev; -@@ -128,6 +123,8 @@ void hci_add_sco(struct hci_conn *conn, - conn->state = BT_CONNECT; - conn->out = 1; - -+ conn->attempt++; -+ - cp.handle = cpu_to_le16(handle); - cp.pkt_type = cpu_to_le16(conn->pkt_type); - -@@ -144,6 +141,8 @@ void hci_setup_sync(struct hci_conn *con - conn->state = BT_CONNECT; - conn->out = 1; - -+ conn->attempt++; + if (cardsleep) { ++ struct twl_mmc_controller *c = &hsmmc[1]; + - cp.handle = cpu_to_le16(handle); - cp.pkt_type = cpu_to_le16(conn->pkt_type); - -@@ -160,6 +159,7 @@ static void hci_conn_timeout(unsigned lo - { - struct hci_conn *conn = (void *) arg; - struct hci_dev *hdev = conn->hdev; -+ __u8 reason; - - BT_DBG("conn %p state %d", conn, conn->state); - -@@ -171,14 +171,13 @@ static void hci_conn_timeout(unsigned lo - switch (conn->state) { - case BT_CONNECT: - case BT_CONNECT2: -- if (conn->type == ACL_LINK) -+ if (conn->type == ACL_LINK && conn->out) - hci_acl_connect_cancel(conn); -- else -- hci_acl_disconn(conn, 0x13); - break; - case BT_CONFIG: - case BT_CONNECTED: -- hci_acl_disconn(conn, 0x13); -+ reason = hci_proto_disconn_ind(conn); -+ hci_acl_disconn(conn, reason); - break; - default: - conn->state = BT_CLOSED; -@@ -212,8 +211,10 @@ struct hci_conn *hci_conn_add(struct hci - conn->type = type; - conn->mode = HCI_CM_ACTIVE; - conn->state = BT_OPEN; -+ conn->auth_type = HCI_AT_GENERAL_BONDING; - - conn->power_save = 1; -+ conn->disc_timeout = HCI_DISCONN_TIMEOUT; - - switch (type) { - case ACL_LINK: -@@ -221,12 +222,13 @@ struct hci_conn *hci_conn_add(struct hci - break; - case SCO_LINK: - if (lmp_esco_capable(hdev)) -- conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK; -+ conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | -+ (hdev->esco_type & EDR_ESCO_MASK); + /* VCC can be turned off if card is asleep */ + c->vsim_18v = 0; + if (sleep) +- err = twl_mmc2_set_power(dev, slot, 0, 0); ++ err = twl_mmc_set_voltage(c, 0); else - conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; - break; - case ESCO_LINK: -- conn->pkt_type = hdev->esco_type; -+ conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; - break; - } - -@@ -245,6 +247,8 @@ struct hci_conn *hci_conn_add(struct hci - if (hdev->notify) - hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); - -+ hci_conn_init_sysfs(conn); -+ - tasklet_enable(&hdev->tx_task); - - return conn; -@@ -285,6 +289,10 @@ int hci_conn_del(struct hci_conn *conn) - - skb_queue_purge(&conn->data_q); +- err = twl_mmc2_set_power(dev, slot, 1, vdd); ++ err = twl_mmc_set_voltage(c, vdd); + c->vsim_18v = 1; + } else + err = twl_mmc_regulator_set_mode(c->twl_vmmc_dev_grp, sleep); +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/pm-debug.c kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/pm-debug.c +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/pm-debug.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/pm-debug.c 2011-09-04 11:37:54.000000000 +0200 +@@ -169,7 +169,8 @@ static int pm_dbg_init_done; + enum { + DEBUG_FILE_COUNTERS = 0, + DEBUG_FILE_TIMERS, +- DEBUG_FILE_RESOURCES ++ DEBUG_FILE_RESOURCES, ++ DEBUG_FILE_WAIT_SDRC_COUNT + }; -+ hci_conn_del_sysfs(conn); -+ -+ hci_dev_put(hdev); -+ + struct pm_module_def { +@@ -428,9 +429,21 @@ static int pm_dbg_show_timers(struct seq return 0; } -@@ -330,7 +338,7 @@ EXPORT_SYMBOL(hci_get_route); - - /* Create SCO or ACL connection. - * Device _must_ be locked */ --struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type) -+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) - { - struct hci_conn *acl; - struct hci_conn *sco; -@@ -345,6 +353,7 @@ struct hci_conn *hci_connect(struct hci_ - hci_conn_hold(acl); - - if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { -+ acl->sec_level = sec_level; - acl->auth_type = auth_type; - hci_acl_connect(acl); - } -@@ -390,51 +399,56 @@ int hci_conn_check_link_mode(struct hci_ - EXPORT_SYMBOL(hci_conn_check_link_mode); - - /* Authenticate remote device */ --int hci_conn_auth(struct hci_conn *conn) -+static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) - { - BT_DBG("conn %p", conn); - -- if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) { -- if (!(conn->auth_type & 0x01)) { -- conn->auth_type |= 0x01; -- conn->link_mode &= ~HCI_LM_AUTH; -- } -- } -- -- if (conn->link_mode & HCI_LM_AUTH) -+ if (sec_level > conn->sec_level) -+ conn->sec_level = sec_level; -+ else if (conn->link_mode & HCI_LM_AUTH) - return 1; - -+ conn->auth_type = auth_type; ++static int pm_dbg_show_sdrc_wait_count(struct seq_file *s, void *unused) ++{ ++ unsigned int *sdrc_counters = omap3_get_sdrc_counters(); + - if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { - struct hci_cp_auth_requested cp; - cp.handle = cpu_to_le16(conn->handle); - hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, - sizeof(cp), &cp); - } ++ seq_printf(s, "dll kick count: %u\n", sdrc_counters[0]); ++ seq_printf(s, "wait dll lock count: %u\n", sdrc_counters[1]); + - return 0; - } --EXPORT_SYMBOL(hci_conn_auth); - --/* Enable encryption */ --int hci_conn_encrypt(struct hci_conn *conn) -+/* Enable security */ -+int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) ++ return 0; ++} + static int pm_dbg_open(struct inode *inode, struct file *file) { - BT_DBG("conn %p", conn); + switch ((int)inode->i_private) { ++ case DEBUG_FILE_WAIT_SDRC_COUNT: ++ return single_open(file, pm_dbg_show_sdrc_wait_count, ++ &inode->i_private); + case DEBUG_FILE_COUNTERS: + return single_open(file, pm_dbg_show_counters, + &inode->i_private); +@@ -548,6 +561,8 @@ static int __init pm_dbg_init(void) + d, (void *)DEBUG_FILE_TIMERS, &debug_fops); + (void) debugfs_create_file("resources", S_IRUGO, + d, (void *)DEBUG_FILE_RESOURCES, &debug_fops); ++ (void) debugfs_create_file("wait_sdrc_count", S_IRUGO, ++ d, (void *)DEBUG_FILE_WAIT_SDRC_COUNT, &debug_fops); -+ if (sec_level == BT_SECURITY_SDP) -+ return 1; -+ -+ if (sec_level == BT_SECURITY_LOW && -+ (!conn->ssp_mode || !conn->hdev->ssp_mode)) -+ return 1; -+ - if (conn->link_mode & HCI_LM_ENCRYPT) -- return hci_conn_auth(conn); -+ return hci_conn_auth(conn, sec_level, auth_type); + pwrdm_for_each(pwrdms_setup, (void *)d); - if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) - return 0; +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/pm.h kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/pm.h +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/pm.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/pm.h 2011-09-04 11:37:54.000000000 +0200 +@@ -50,6 +50,7 @@ extern void omap3_pm_off_mode_enable(int + extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm); + extern int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state); + extern void omap3_set_prm_setup_times(struct prm_setup_times *setup_times); ++extern unsigned int *omap3_get_sdrc_counters(void); + #else + #define omap3_pm_off_mode_enable(int) do {} while (0); + #define omap3_pm_get_suspend_state(pwrdm) do {} while (0); +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/pm34xx.c kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/pm34xx.c +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/pm34xx.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/pm34xx.c 2011-09-04 11:37:54.000000000 +0200 +@@ -127,6 +127,9 @@ static int (*_omap_save_secure_sram)(u32 -- if (hci_conn_auth(conn)) { -+ if (hci_conn_auth(conn, sec_level, auth_type)) { - struct hci_cp_set_conn_encrypt cp; - cp.handle = cpu_to_le16(conn->handle); - cp.encrypt = 1; - hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, - sizeof(cp), &cp); - } -+ - return 0; - } --EXPORT_SYMBOL(hci_conn_encrypt); -+EXPORT_SYMBOL(hci_conn_security); + static void (*saved_idle)(void); - /* Change link key */ - int hci_conn_change_link_key(struct hci_conn *conn) -@@ -447,12 +461,13 @@ int hci_conn_change_link_key(struct hci_ - hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, - sizeof(cp), &cp); - } ++static unsigned int *_sdrc_counters; ++static unsigned int save_sdrc_counters[2]; + - return 0; - } - EXPORT_SYMBOL(hci_conn_change_link_key); - - /* Switch role */ --int hci_conn_switch_role(struct hci_conn *conn, uint8_t role) -+int hci_conn_switch_role(struct hci_conn *conn, __u8 role) - { - BT_DBG("conn %p", conn); - -@@ -465,6 +480,7 @@ int hci_conn_switch_role(struct hci_conn - cp.role = role; - hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); - } + static struct powerdomain *mpu_pwrdm, *neon_pwrdm; + static struct powerdomain *core_pwrdm, *per_pwrdm; + static struct powerdomain *cam_pwrdm, *iva2_pwrdm, *dss_pwrdm, *usb_pwrdm; +@@ -301,7 +304,15 @@ static void omap3_core_save_context(void + /* wait for the save to complete */ + while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS) + & PADCONF_SAVE_DONE)) +- ; ++ udelay(1); + - return 0; - } - EXPORT_SYMBOL(hci_conn_switch_role); -@@ -479,7 +495,7 @@ void hci_conn_enter_active_mode(struct h - if (test_bit(HCI_RAW, &hdev->flags)) - return; - -- if (conn->mode != HCI_CM_SNIFF || !conn->power_save) -+ if (conn->mode != HCI_CM_SNIFF) - goto timer; - - if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { -@@ -547,9 +563,7 @@ void hci_conn_hash_flush(struct hci_dev - - c->state = BT_CLOSED; - -- hci_conn_del_sysfs(c); -- -- hci_proto_disconn_ind(c, 0x16); -+ hci_proto_disconn_cfm(c, 0x16); - hci_conn_del(c); ++ /* ++ * Force write last pad into memory, as this can fail in some ++ * cases according to errata XYZ ++ */ ++ omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14), ++ OMAP343X_CONTROL_MEM_WKUP + 0x2a0); ++ + /* Save the Interrupt controller context */ + omap3_intc_save_context(); + /* Save the GPMC context */ +@@ -668,6 +679,8 @@ void omap_sram_idle(void) + OMAP3_PRM_CLKSETUP_OFFSET); } - } -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/hci_core.c linux-omap-2.6.28-nokia1/net/bluetooth/hci_core.c ---- linux-omap-2.6.28-omap1/net/bluetooth/hci_core.c 2011-06-22 13:14:25.623067642 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/hci_core.c 2011-06-22 13:19:33.283063268 +0200 -@@ -48,11 +48,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCI_CORE_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - static void hci_cmd_task(unsigned long arg); - static void hci_rx_task(unsigned long arg); - static void hci_tx_task(unsigned long arg); -@@ -205,7 +200,7 @@ static void hci_init_req(struct hci_dev - /* Mandatory initialization */ - - /* Reset */ -- if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) -+ if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) - hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); - /* Read Local Supported Features */ -@@ -290,7 +285,7 @@ static void hci_linkpol_req(struct hci_d - { - __le16 policy = cpu_to_le16(opt); - -- BT_DBG("%s %x", hdev->name, opt); -+ BT_DBG("%s %x", hdev->name, policy); - - /* Default link policy */ - hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); -@@ -756,7 +751,7 @@ int hci_get_dev_list(void __user *arg) - - size = sizeof(*dl) + dev_num * sizeof(*dr); - -- if (!(dl = kmalloc(size, GFP_KERNEL))) -+ if (!(dl = kzalloc(size, GFP_KERNEL))) - return -ENOMEM; - - dr = dl->dev_req; -@@ -1570,8 +1565,7 @@ static void hci_cmd_task(unsigned long a - - /* Send queued commands */ - if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { -- if (hdev->sent_cmd) -- kfree_skb(hdev->sent_cmd); -+ kfree_skb(hdev->sent_cmd); - - if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { - atomic_dec(&hdev->cmd_cnt); -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/hci_event.c linux-omap-2.6.28-nokia1/net/bluetooth/hci_event.c ---- linux-omap-2.6.28-omap1/net/bluetooth/hci_event.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/hci_event.c 2011-06-22 13:19:33.283063268 +0200 -@@ -45,11 +45,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCI_CORE_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - /* Handle HCI Event packets */ - - static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) -@@ -489,6 +484,15 @@ static void hci_cc_read_local_features(s - if (hdev->features[4] & LMP_EV5) - hdev->esco_type |= (ESCO_EV5); - -+ if (hdev->features[5] & LMP_EDR_ESCO_2M) -+ hdev->esco_type |= (ESCO_2EV3); -+ -+ if (hdev->features[5] & LMP_EDR_ESCO_3M) -+ hdev->esco_type |= (ESCO_3EV3); -+ -+ if (hdev->features[5] & LMP_EDR_3S_ESCO) -+ hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); ++ memcpy(save_sdrc_counters, _sdrc_counters, sizeof(save_sdrc_counters)); + - BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, - hdev->features[0], hdev->features[1], - hdev->features[2], hdev->features[3], -@@ -535,6 +539,14 @@ static void hci_cc_read_bd_addr(struct h - hci_req_complete(hdev, rp->status); + /* + * omap3_arm_context is the location where ARM registers + * get saved. The restore path then reads from this +@@ -697,6 +710,7 @@ void omap_sram_idle(void) + omap3_sram_restore_context(); + omap2_sms_restore_context(); + reset_ssi(); ++ memcpy(_sdrc_counters, save_sdrc_counters, sizeof(save_sdrc_counters)); + } + omap_uart_resume_idle(0); + omap_uart_resume_idle(1); +@@ -802,6 +816,12 @@ err: + return ret; } -+static void hci_cc_enable_dut_mode(struct hci_dev *hdev, struct sk_buff *skb) ++/* return a pointer to the sdrc counters */ ++unsigned int *omap3_get_sdrc_counters(void) +{ -+ struct hci_rp_enable_dut_mode *rp = (void *) skb->data; -+ -+ if (!rp->status) -+ set_bit(HCI_DUT_MODE, &hdev->flags); ++ return _sdrc_counters; +} + - static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) - { - BT_DBG("%s status 0x%x", hdev->name, status); -@@ -862,8 +874,16 @@ static inline void hci_conn_complete_evt - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); -- if (!conn) -- goto unlock; -+ if (!conn) { -+ if (ev->link_type != SCO_LINK) -+ goto unlock; -+ -+ conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); -+ if (!conn) -+ goto unlock; -+ -+ conn->type = SCO_LINK; -+ } - - if (!ev->status) { - conn->handle = __le16_to_cpu(ev->handle); -@@ -871,6 +891,7 @@ static inline void hci_conn_complete_evt - if (conn->type == ACL_LINK) { - conn->state = BT_CONFIG; - hci_conn_hold(conn); -+ conn->disc_timeout = HCI_DISCONN_TIMEOUT; - } else - conn->state = BT_CONNECTED; - -@@ -919,7 +940,8 @@ static inline void hci_conn_complete_evt - if (ev->status) { - hci_proto_connect_cfm(conn, ev->status); - hci_conn_del(conn); -- } -+ } else if (ev->link_type != ACL_LINK) -+ hci_proto_connect_cfm(conn, ev->status); - - unlock: - hci_dev_unlock(hdev); -@@ -1014,9 +1036,7 @@ static inline void hci_disconn_complete_ - if (conn) { - conn->state = BT_CLOSED; - -- hci_conn_del_sysfs(conn); -- -- hci_proto_disconn_ind(conn, ev->reason); -+ hci_proto_disconn_cfm(conn, ev->reason); - hci_conn_del(conn); - } - -@@ -1052,9 +1072,14 @@ static inline void hci_auth_complete_evt - hci_proto_connect_cfm(conn, ev->status); - hci_conn_put(conn); - } -- } else -+ } else { - hci_auth_cfm(conn, ev->status); - -+ hci_conn_hold(conn); -+ conn->disc_timeout = HCI_DISCONN_TIMEOUT; -+ hci_conn_put(conn); -+ } -+ - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { - if (!ev->status) { - struct hci_cp_set_conn_encrypt cp; -@@ -1294,6 +1319,10 @@ static inline void hci_cmd_complete_evt( - hci_cc_read_bd_addr(hdev, skb); - break; - -+ case HCI_OP_ENABLE_DUT_MODE: -+ hci_cc_enable_dut_mode(hdev, skb); -+ break; -+ - default: - BT_DBG("%s opcode 0x%x", hdev->name, opcode); - break; -@@ -1468,7 +1497,21 @@ static inline void hci_mode_change_evt(s - - static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) - { -+ struct hci_ev_pin_code_req *ev = (void *) skb->data; -+ struct hci_conn *conn; -+ - BT_DBG("%s", hdev->name); -+ -+ hci_dev_lock(hdev); -+ -+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); -+ if (conn && conn->state == BT_CONNECTED) { -+ hci_conn_hold(conn); -+ conn->disc_timeout = HCI_PAIRING_TIMEOUT; -+ hci_conn_put(conn); -+ } -+ -+ hci_dev_unlock(hdev); - } - - static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) -@@ -1478,7 +1521,21 @@ static inline void hci_link_key_request_ - - static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) - { -+ struct hci_ev_link_key_notify *ev = (void *) skb->data; -+ struct hci_conn *conn; -+ - BT_DBG("%s", hdev->name); -+ -+ hci_dev_lock(hdev); -+ -+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); -+ if (conn) { -+ hci_conn_hold(conn); -+ conn->disc_timeout = HCI_DISCONN_TIMEOUT; -+ hci_conn_put(conn); -+ } -+ -+ hci_dev_unlock(hdev); - } - - static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) -@@ -1605,7 +1662,8 @@ static inline void hci_remote_ext_featur - - if (conn->state == BT_CONFIG) { - if (!ev->status && hdev->ssp_mode > 0 && -- conn->ssp_mode > 0 && conn->out) { -+ conn->ssp_mode > 0 && conn->out && -+ conn->sec_level != BT_SECURITY_SDP) { - struct hci_cp_auth_requested cp; - cp.handle = ev->handle; - hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, -@@ -1642,13 +1700,28 @@ static inline void hci_sync_conn_complet - conn->type = SCO_LINK; - } - -- if (!ev->status) { -+ switch (ev->status) { -+ case 0x00: - conn->handle = __le16_to_cpu(ev->handle); - conn->state = BT_CONNECTED; - - hci_conn_add_sysfs(conn); -- } else -+ break; -+ -+ case 0x1c: /* SCO interval rejected */ -+ case 0x1f: /* Unspecified error */ -+ if (conn->out && conn->attempt < 2) { -+ conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | -+ (hdev->esco_type & EDR_ESCO_MASK); -+ hci_setup_sync(conn, conn->link->handle); -+ goto unlock; -+ } -+ /* fall through */ -+ -+ default: - conn->state = BT_CLOSED; -+ break; -+ } - - hci_proto_connect_cfm(conn, ev->status); - if (ev->status) -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/hci_sock.c linux-omap-2.6.28-nokia1/net/bluetooth/hci_sock.c ---- linux-omap-2.6.28-omap1/net/bluetooth/hci_sock.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/hci_sock.c 2011-06-22 13:19:33.283063268 +0200 -@@ -49,11 +49,6 @@ - #include - #include - --#ifndef CONFIG_BT_HCI_SOCK_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - /* ----- HCI socket interface ----- */ - - static inline int hci_test_bit(int nr, void *addr) -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/hci_sysfs.c linux-omap-2.6.28-nokia1/net/bluetooth/hci_sysfs.c ---- linux-omap-2.6.28-omap1/net/bluetooth/hci_sysfs.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/hci_sysfs.c 2011-06-22 13:19:33.283063268 +0200 -@@ -6,16 +6,10 @@ - #include - #include - --#ifndef CONFIG_BT_HCI_CORE_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - struct class *bt_class = NULL; - EXPORT_SYMBOL_GPL(bt_class); - --static struct workqueue_struct *btaddconn; --static struct workqueue_struct *btdelconn; -+static struct workqueue_struct *bt_workq; - - static inline char *link_typetostr(int type) + static void omap3_pm_idle(void) { -@@ -93,36 +87,20 @@ static struct device_type bt_link = { - - static void add_conn(struct work_struct *work) + local_irq_disable(); +@@ -1251,6 +1271,10 @@ void omap_push_sram_idle(void) { -- struct hci_conn *conn = container_of(work, struct hci_conn, work); -+ struct hci_conn *conn = container_of(work, struct hci_conn, work_add); -+ struct hci_dev *hdev = conn->hdev; - -- flush_workqueue(btdelconn); -+ /* ensure previous del is complete */ -+ flush_work(&conn->work_del); + _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, + omap34xx_cpu_suspend_sz); ++ /* the sdrc counters are always at the end of the omap34xx_cpu_suspend ++ * block */ ++ _sdrc_counters = (unsigned *)((u8 *)_omap_sram_idle + omap34xx_cpu_suspend_sz - 8); + -+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); - - if (device_add(&conn->dev) < 0) { - BT_ERR("Failed to register connection device"); - return; - } --} -- --void hci_conn_add_sysfs(struct hci_conn *conn) --{ -- struct hci_dev *hdev = conn->hdev; -- -- BT_DBG("conn %p", conn); -- -- conn->dev.type = &bt_link; -- conn->dev.class = bt_class; -- conn->dev.parent = &hdev->dev; -- -- snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d", -- hdev->name, conn->handle); -- -- dev_set_drvdata(&conn->dev, conn); -- -- device_initialize(&conn->dev); -- -- INIT_WORK(&conn->work, add_conn); - -- queue_work(btaddconn, &conn->work); -+ hci_dev_hold(hdev); - } + if (omap_type() != OMAP2_DEVICE_TYPE_GP) + _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, + save_secure_ram_context_sz); +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/sleep34xx.S kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/sleep34xx.S +--- kernel-2.6.28-20094102.6+0m5/arch/arm/mach-omap2/sleep34xx.S 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/mach-omap2/sleep34xx.S 2011-09-04 11:37:54.000000000 +0200 +@@ -40,6 +40,7 @@ + #define PM_PREPWSTST_MPU_V OMAP34XX_PRM_REGADDR(MPU_MOD, \ + OMAP3430_PM_PREPWSTST) + #define CM_IDLEST1_CORE_V IO_ADDRESS(OMAP3430_CM_BASE + 0x220) ++#define CM_IDLEST_CKGEN_V IO_ADDRESS(OMAP3430_CM_BASE + 0x520) /* -@@ -132,14 +110,20 @@ void hci_conn_add_sysfs(struct hci_conn - */ - static int __match_tty(struct device *dev, void *data) - { -- return !strncmp(dev->bus_id, "rfcomm", 6); -+ return !strncmp(dev_name(dev), "rfcomm", 6); - } - - static void del_conn(struct work_struct *work) - { -- struct hci_conn *conn = container_of(work, struct hci_conn, work); -+ struct hci_conn *conn = container_of(work, struct hci_conn, work_del); - struct hci_dev *hdev = conn->hdev; - -+ /* ensure previous add is complete */ -+ flush_work(&conn->work_add); -+ -+ if (!device_is_registered(&conn->dev)) -+ return; -+ - while (1) { - struct device *dev; - -@@ -152,19 +136,40 @@ static void del_conn(struct work_struct - - device_del(&conn->dev); - put_device(&conn->dev); -+ - hci_dev_put(hdev); - } - --void hci_conn_del_sysfs(struct hci_conn *conn) -+void hci_conn_init_sysfs(struct hci_conn *conn) - { -+ struct hci_dev *hdev = conn->hdev; -+ - BT_DBG("conn %p", conn); - -- if (!device_is_registered(&conn->dev)) -- return; -+ conn->dev.type = &bt_link; -+ conn->dev.class = bt_class; -+ conn->dev.parent = &hdev->dev; + * This is the physical address of the register as specified +@@ -703,35 +704,70 @@ skip_l2_inval: -- INIT_WORK(&conn->work, del_conn); -+ dev_set_drvdata(&conn->dev, conn); + /* Make sure SDRC accesses are ok */ + wait_sdrc_ok: + -+ device_initialize(&conn->dev); - -- queue_work(btdelconn, &conn->work); -+ INIT_WORK(&conn->work_add, add_conn); -+ INIT_WORK(&conn->work_del, del_conn); -+} -+ -+void hci_conn_add_sysfs(struct hci_conn *conn) -+{ -+ BT_DBG("conn %p", conn); -+ -+ queue_work(bt_workq, &conn->work_add); -+} -+ -+void hci_conn_del_sysfs(struct hci_conn *conn) -+{ -+ BT_DBG("conn %p", conn); ++/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this. */ ++ ldr r4, cm_idlest_ckgen ++wait_dpll3_lock: ++ ldr r5, [r4] ++ tst r5, #1 ++ beq wait_dpll3_lock + -+ queue_work(bt_workq, &conn->work_del); - } - - static inline char *host_typetostr(int type) -@@ -421,7 +426,7 @@ int hci_register_sysfs(struct hci_dev *h - dev->class = bt_class; - dev->parent = hdev->parent; - -- strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE); -+ dev_set_name(dev, "%s", hdev->name); - - dev_set_drvdata(dev, hdev); - -@@ -441,20 +446,13 @@ void hci_unregister_sysfs(struct hci_dev - - int __init bt_sysfs_init(void) - { -- btaddconn = create_singlethread_workqueue("btaddconn"); -- if (!btaddconn) -+ bt_workq = create_singlethread_workqueue("bluetooth"); -+ if (!bt_workq) - return -ENOMEM; + ldr r4, cm_idlest1_core ++wait_sdrc_ready: + ldr r5, [r4] +- and r5, r5, #0x2 +- cmp r5, #0 +- bne wait_sdrc_ok ++ tst r5, #0x2 ++ bne wait_sdrc_ready ++ /* allow DLL powerdown upon hw idle req */ + ldr r4, sdrc_power + ldr r5, [r4] + bic r5, r5, #0x40 + str r5, [r4] +-wait_dll_lock: +- /* Is dll in lock mode? */ ++is_dll_in_lock_mode: + ldr r4, sdrc_dlla_ctrl + ldr r5, [r4] + tst r5, #0x4 + bxne lr + /* wait till dll locks */ ++wait_dll_lock_timed: ++ ldr r4, wait_dll_lock_counter ++ add r4, r4, #1 ++ str r4, wait_dll_lock_counter + ldr r4, sdrc_dlla_status ++ mov r6, #8 /* Wait 20uS for lock */ ++wait_dll_lock: ++ subs r6, r6, #0x1 ++ beq kick_dll + ldr r5, [r4] + and r5, r5, #0x4 + cmp r5, #0x4 + bne wait_dll_lock + bx lr -- btdelconn = create_singlethread_workqueue("btdelconn"); -- if (!btdelconn) { -- destroy_workqueue(btaddconn); -- return -ENOMEM; -- } -- - bt_class = class_create(THIS_MODULE, "bluetooth"); - if (IS_ERR(bt_class)) { -- destroy_workqueue(btdelconn); -- destroy_workqueue(btaddconn); -+ destroy_workqueue(bt_workq); - return PTR_ERR(bt_class); ++ /* disable/reenable DLL if locked */ ++kick_dll: ++ ldr r4, sdrc_dlla_ctrl /* get dlla addr */ ++ ldr r5, [r4] /* grab value */ ++ mov r6, r5 /* save value */ ++ bic r6, #(1<<3) /* disable dll */ ++ str r6, [r4] ++ dsb ++ orr r6, r6, #(1<<3) /* enable dll */ ++ str r6, [r4] ++ dsb ++ str r5, [r4] /* restore old value */ ++ ldr r4, kick_counter ++ add r4, r4, #1 ++ str r4, kick_counter ++ b wait_dll_lock_timed ++ + phys_offset: + .word PHYS_OFFSET + page_offset: + .word PAGE_OFFSET + cm_idlest1_core: + .word CM_IDLEST1_CORE_V ++cm_idlest_ckgen: ++ .word CM_IDLEST_CKGEN_V + sdrc_dlla_status: + .word SDRC_DLLA_STATUS_V + sdrc_dlla_ctrl: +@@ -766,5 +802,10 @@ cache_pred_disable_mask: + .word 0xFFFFE7FB + control_stat: + .word CONTROL_STAT ++ /* these 2 words need to be at the end !!! */ ++kick_counter: ++ .word 0 ++wait_dll_lock_counter: ++ .word 0 + ENTRY(omap34xx_cpu_suspend_sz) + .word . - omap34xx_cpu_suspend +diff -Nurp kernel-2.6.28-20094102.6+0m5/arch/arm/plat-omap/iovmm.c kernel-2.6.28-20094803.3+0m5/arch/arm/plat-omap/iovmm.c +--- kernel-2.6.28-20094102.6+0m5/arch/arm/plat-omap/iovmm.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/arch/arm/plat-omap/iovmm.c 2011-09-04 11:37:54.000000000 +0200 +@@ -47,7 +47,7 @@ + * 'va': mpu virtual address + * + * 'c': contiguous memory area +- * 'd': dicontiguous memory area ++ * 'd': discontiguous memory area + * 'a': anonymous memory allocation + * '()': optional feature + * +@@ -385,14 +385,13 @@ static void sgtable_fill_vmalloc(struct } -@@ -463,8 +461,7 @@ int __init bt_sysfs_init(void) - - void bt_sysfs_cleanup(void) - { -- destroy_workqueue(btaddconn); -- destroy_workqueue(btdelconn); -+ destroy_workqueue(bt_workq); - - class_destroy(bt_class); + va_end = _va + PAGE_SIZE * i; +- flush_cache_vmap(_va, va_end); } -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/hidp/core.c linux-omap-2.6.28-nokia1/net/bluetooth/hidp/core.c ---- linux-omap-2.6.28-omap1/net/bluetooth/hidp/core.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/hidp/core.c 2011-06-22 13:19:33.283063268 +0200 -@@ -47,11 +47,6 @@ - - #include "hidp.h" - --#ifndef CONFIG_BT_HIDP_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "1.2" - - static DECLARE_RWSEM(hidp_session_sem); -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/hidp/sock.c linux-omap-2.6.28-nokia1/net/bluetooth/hidp/sock.c ---- linux-omap-2.6.28-omap1/net/bluetooth/hidp/sock.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/hidp/sock.c 2011-06-22 13:19:33.283063268 +0200 -@@ -39,11 +39,6 @@ - #include "hidp.h" - --#ifndef CONFIG_BT_HIDP_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - static int hidp_sock_release(struct socket *sock) + static inline void sgtable_drain_vmalloc(struct sg_table *sgt) { - struct sock *sk = sock->sk; -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/l2cap.c linux-omap-2.6.28-nokia1/net/bluetooth/l2cap.c ---- linux-omap-2.6.28-omap1/net/bluetooth/l2cap.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/l2cap.c 2011-06-22 13:19:33.283063268 +0200 -@@ -50,14 +50,10 @@ - #include - #include - --#ifndef CONFIG_BT_L2CAP_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif + /* + * Actually this is not necessary at all, just exists for +- * consistency of the code readibility. ++ * consistency of the code readability. + */ + BUG_ON(!sgt); + } +@@ -420,15 +419,13 @@ static void sgtable_fill_kmalloc(struct + len -= bytes; + } + BUG_ON(len); - --#define VERSION "2.11" -+#define VERSION "2.13" - - static u32 l2cap_feat_mask = 0x0000; -+static u8 l2cap_fixed_chan[8] = { 0x02, }; - - static const struct proto_ops l2cap_sock_ops; - -@@ -82,9 +78,10 @@ static void l2cap_sock_timeout(unsigned - - bh_lock_sock(sk); - -- if (sk->sk_state == BT_CONNECT && -- (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH | -- L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE))) -+ if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) -+ reason = ECONNREFUSED; -+ else if (sk->sk_state == BT_CONNECT && -+ l2cap_pi(sk)->sec_level != BT_SECURITY_SDP) - reason = ECONNREFUSED; - else - reason = ETIMEDOUT; -@@ -209,6 +206,8 @@ static void __l2cap_chan_add(struct l2ca - - BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); - -+ conn->disc_reason = 0x13; -+ - l2cap_pi(sk)->conn = conn; - - if (sk->sk_type == SOCK_SEQPACKET) { -@@ -264,18 +263,35 @@ static void l2cap_chan_del(struct sock * +- clean_dcache_area(va, len); } - /* Service level security */ --static inline int l2cap_check_link_mode(struct sock *sk) -+static inline int l2cap_check_security(struct sock *sk) + static inline void sgtable_drain_kmalloc(struct sg_table *sgt) { - struct l2cap_conn *conn = l2cap_pi(sk)->conn; -+ __u8 auth_type; - -- if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) || -- (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) -- return hci_conn_encrypt(conn->hcon); -+ if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { -+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) -+ auth_type = HCI_AT_NO_BONDING_MITM; -+ else -+ auth_type = HCI_AT_NO_BONDING; - -- if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) -- return hci_conn_auth(conn->hcon); -+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) -+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; -+ } else { -+ switch (l2cap_pi(sk)->sec_level) { -+ case BT_SECURITY_HIGH: -+ auth_type = HCI_AT_GENERAL_BONDING_MITM; -+ break; -+ case BT_SECURITY_MEDIUM: -+ auth_type = HCI_AT_GENERAL_BONDING; -+ break; -+ default: -+ auth_type = HCI_AT_NO_BONDING; -+ break; -+ } -+ } - -- return 1; -+ return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, -+ auth_type); + /* + * Actually this is not necessary at all, just exists for +- * consistency of the code readibility ++ * consistency of the code readability + */ + BUG_ON(!sgt); } +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/dsp/bridge/rmgr/proc.c kernel-2.6.28-20094803.3+0m5/drivers/dsp/bridge/rmgr/proc.c +--- kernel-2.6.28-20094102.6+0m5/drivers/dsp/bridge/rmgr/proc.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/dsp/bridge/rmgr/proc.c 2011-09-04 11:37:54.000000000 +0200 +@@ -159,6 +159,8 @@ + #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ + #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ - static inline u8 l2cap_get_ident(struct l2cap_conn *conn) -@@ -317,7 +333,10 @@ static void l2cap_do_start(struct sock * - struct l2cap_conn *conn = l2cap_pi(sk)->conn; - - if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { -- if (l2cap_check_link_mode(sk)) { -+ if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) -+ return; ++#define DSP_CACHE_LINE 128 + -+ if (l2cap_check_security(sk)) { - struct l2cap_conn_req req; - req.scid = cpu_to_le16(l2cap_pi(sk)->scid); - req.psm = l2cap_pi(sk)->psm; -@@ -361,7 +380,7 @@ static void l2cap_conn_start(struct l2ca - } + extern char *iva_img; - if (sk->sk_state == BT_CONNECT) { -- if (l2cap_check_link_mode(sk)) { -+ if (l2cap_check_security(sk)) { - struct l2cap_conn_req req; - req.scid = cpu_to_le16(l2cap_pi(sk)->scid); - req.psm = l2cap_pi(sk)->psm; -@@ -376,10 +395,18 @@ static void l2cap_conn_start(struct l2ca - rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); - rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); + /* ----------------------------------- Globals */ +@@ -679,8 +681,48 @@ DSP_STATUS PROC_EnumNodes(DSP_HPROCESSOR + return status; + } -- if (l2cap_check_link_mode(sk)) { -- sk->sk_state = BT_CONFIG; -- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); -- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); -+ if (l2cap_check_security(sk)) { -+ if (bt_sk(sk)->defer_setup) { -+ struct sock *parent = bt_sk(sk)->parent; -+ rsp.result = cpu_to_le16(L2CAP_CR_PEND); -+ rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); -+ parent->sk_data_ready(parent, 0); ++/* Cache operation against kernel address instead of users */ ++static int memory_sync_page(struct vm_area_struct *vma, unsigned long start, ++ ssize_t len, enum DSP_FLUSHTYPE ftype) ++{ ++ struct page *page; ++ void *kaddr; ++ unsigned long offset; ++ ssize_t rest; + -+ } else { -+ sk->sk_state = BT_CONFIG; -+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); -+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); -+ } - } else { - rsp.result = cpu_to_le16(L2CAP_CR_PEND); - rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); -@@ -431,7 +458,7 @@ static void l2cap_conn_unreliable(struct - read_lock(&l->lock); - - for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { -- if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE) -+ if (l2cap_pi(sk)->force_reliable) - sk->sk_err = err; - } - -@@ -442,6 +469,7 @@ static void l2cap_info_timeout(unsigned - { - struct l2cap_conn *conn = (void *) arg; - -+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; - conn->info_ident = 0; - - l2cap_conn_start(conn); -@@ -475,6 +503,8 @@ static struct l2cap_conn *l2cap_conn_add - spin_lock_init(&conn->lock); - rwlock_init(&conn->chan_list.lock); - -+ conn->disc_reason = 0x13; ++#ifdef CHECK_DSP_CACHE_LINE ++ if ((start & DSP_CACHE_LINE) || (len & DSP_CACHE_LINE)) ++ pr_warning("%s: not aligned: %08lx(%d)\n", __func__, ++ start, len); ++#endif ++ while (len) { ++ page = follow_page(vma, start, FOLL_GET); ++ if (!page) { ++ pr_err("%s: no page for %08lx\n", __func__, start); ++ return -EINVAL; ++ } else if (IS_ERR(page)) { ++ pr_err("%s: err page for %08lx(%lu)\n", __func__, start, ++ IS_ERR(page)); ++ return IS_ERR(page); ++ } + - return conn; - } - -@@ -488,8 +518,7 @@ static void l2cap_conn_del(struct hci_co - - BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); - -- if (conn->rx_skb) -- kfree_skb(conn->rx_skb); -+ kfree_skb(conn->rx_skb); - - /* Kill channels */ - while ((sk = conn->chan_list.head)) { -@@ -613,7 +642,6 @@ static void __l2cap_sock_close(struct so - - case BT_CONNECTED: - case BT_CONFIG: -- case BT_CONNECT2: - if (sk->sk_type == SOCK_SEQPACKET) { - struct l2cap_conn *conn = l2cap_pi(sk)->conn; - struct l2cap_disconn_req req; -@@ -629,6 +657,27 @@ static void __l2cap_sock_close(struct so - l2cap_chan_del(sk, reason); - break; - -+ case BT_CONNECT2: -+ if (sk->sk_type == SOCK_SEQPACKET) { -+ struct l2cap_conn *conn = l2cap_pi(sk)->conn; -+ struct l2cap_conn_rsp rsp; -+ __u16 result; ++ offset = start & ~PAGE_MASK; ++ kaddr = page_address(page) + offset; ++ rest = min_t(ssize_t, PAGE_SIZE - offset, len); + -+ if (bt_sk(sk)->defer_setup) -+ result = L2CAP_CR_SEC_BLOCK; -+ else -+ result = L2CAP_CR_BAD_PSM; ++ MEM_FlushCache(kaddr, rest, ftype); + -+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); -+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); -+ rsp.result = cpu_to_le16(result); -+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); -+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident, -+ L2CAP_CONN_RSP, sizeof(rsp), &rsp); -+ } else -+ l2cap_chan_del(sk, reason); -+ break; ++ put_page(page); ++ len -= rest; ++ start += rest; ++ } + - case BT_CONNECT: - case BT_DISCONN: - l2cap_chan_del(sk, reason); -@@ -658,13 +707,19 @@ static void l2cap_sock_init(struct sock - - if (parent) { - sk->sk_type = parent->sk_type; -+ bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; ++ return 0; ++} + - pi->imtu = l2cap_pi(parent)->imtu; - pi->omtu = l2cap_pi(parent)->omtu; -- pi->link_mode = l2cap_pi(parent)->link_mode; -+ pi->sec_level = l2cap_pi(parent)->sec_level; -+ pi->role_switch = l2cap_pi(parent)->role_switch; -+ pi->force_reliable = l2cap_pi(parent)->force_reliable; - } else { - pi->imtu = L2CAP_DEFAULT_MTU; - pi->omtu = 0; -- pi->link_mode = 0; -+ pi->sec_level = BT_SECURITY_LOW; -+ pi->role_switch = 0; -+ pi->force_reliable = 0; - } - - /* Default config options */ -@@ -728,17 +783,24 @@ static int l2cap_sock_create(struct net - return 0; - } - --static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) -+static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) + /* Check if the given area blongs to process virtul memory address space */ +-static int memory_check_vma(unsigned long start, u32 len) ++static int memory_sync_vma(unsigned long start, u32 len, ++ enum DSP_FLUSHTYPE ftype) { -- struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; - struct sock *sk = sock->sk; -- int err = 0; -+ struct sockaddr_l2 la; -+ int len, err = 0; - -- BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm); -+ BT_DBG("sk %p", sk); - - if (!addr || addr->sa_family != AF_BLUETOOTH) + int err = 0; + unsigned long end; +@@ -690,14 +732,19 @@ static int memory_check_vma(unsigned lon + if (end <= start) return -EINVAL; -+ memset(&la, 0, sizeof(la)); -+ len = min_t(unsigned int, sizeof(la), alen); -+ memcpy(&la, addr, len); +- down_read(¤t->mm->mmap_sem); +- + while ((vma = find_vma(current->mm, start)) != NULL) { ++ ssize_t size; + -+ if (la.l2_cid) -+ return -EINVAL; ++ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) ++ return -EINVAL; + - lock_sock(sk); ++ if (vma->vm_start > start) ++ return -EINVAL; - if (sk->sk_state != BT_OPEN) { -@@ -746,7 +808,7 @@ static int l2cap_sock_bind(struct socket - goto done; - } +- if (vma->vm_start > start) { +- err = -EINVAL; ++ size = min_t(ssize_t, vma->vm_end - start, len); ++ err = memory_sync_page(vma, start, size, ftype); ++ if (err) + break; +- } -- if (la->l2_psm && btohs(la->l2_psm) < 0x1001 && -+ if (la.l2_psm && btohs(la.l2_psm) < 0x1001 && - !capable(CAP_NET_BIND_SERVICE)) { - err = -EACCES; - goto done; -@@ -754,14 +816,17 @@ static int l2cap_sock_bind(struct socket + if (end <= vma->vm_end) + break; +@@ -708,8 +755,6 @@ static int memory_check_vma(unsigned lon + if (!vma) + err = -EINVAL; - write_lock_bh(&l2cap_sk_list.lock); +- up_read(¤t->mm->mmap_sem); +- + return err; + } -- if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) { -+ if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) { - err = -EADDRINUSE; - } else { - /* Save source address */ -- bacpy(&bt_sk(sk)->src, &la->l2_bdaddr); -- l2cap_pi(sk)->psm = la->l2_psm; -- l2cap_pi(sk)->sport = la->l2_psm; -+ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); -+ l2cap_pi(sk)->psm = la.l2_psm; -+ l2cap_pi(sk)->sport = la.l2_psm; - sk->sk_state = BT_BOUND; -+ -+ if (btohs(la.l2_psm) == 0x0001 || btohs(la.l2_psm) == 0x0003) -+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; +@@ -734,18 +779,15 @@ static DSP_STATUS proc_memory_sync(DSP_H + goto err_out; } - write_unlock_bh(&l2cap_sk_list.lock); -@@ -781,7 +846,8 @@ static int l2cap_do_connect(struct sock - __u8 auth_type; - int err = 0; - -- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm); -+ BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), -+ l2cap_pi(sk)->psm); - - if (!(hdev = hci_get_route(dst, src))) - return -EHOSTUNREACH; -@@ -790,21 +856,42 @@ static int l2cap_do_connect(struct sock - - err = -ENOMEM; - -- if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH || -- l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT || -- l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) { -- if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) -+ if (sk->sk_type == SOCK_RAW) { -+ switch (l2cap_pi(sk)->sec_level) { -+ case BT_SECURITY_HIGH: -+ auth_type = HCI_AT_DEDICATED_BONDING_MITM; -+ break; -+ case BT_SECURITY_MEDIUM: -+ auth_type = HCI_AT_DEDICATED_BONDING; -+ break; -+ default: -+ auth_type = HCI_AT_NO_BONDING; -+ break; -+ } -+ } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { -+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) - auth_type = HCI_AT_NO_BONDING_MITM; - else -- auth_type = HCI_AT_GENERAL_BONDING_MITM; -- } else { -- if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) - auth_type = HCI_AT_NO_BONDING; -- else +- if (memory_check_vma((u32)pMpuAddr, ulSize)) { +- GT_3trace(PROC_DebugMask, GT_7CLASS, +- "%s: InValid address parameters\n", +- __func__, pMpuAddr, ulSize); ++ down_read(¤t->mm->mmap_sem); + -+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) -+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; -+ } else { -+ switch (l2cap_pi(sk)->sec_level) { -+ case BT_SECURITY_HIGH: -+ auth_type = HCI_AT_GENERAL_BONDING_MITM; -+ break; -+ case BT_SECURITY_MEDIUM: - auth_type = HCI_AT_GENERAL_BONDING; -+ break; -+ default: -+ auth_type = HCI_AT_NO_BONDING; -+ break; -+ } ++ if (memory_sync_vma((u32)pMpuAddr, ulSize, FlushMemType)) { ++ pr_err("%s: InValid address parameters %p %x\n", ++ __func__, pMpuAddr, ulSize); + status = DSP_EHANDLE; +- goto err_out; } -- hcon = hci_connect(hdev, ACL_LINK, dst, auth_type); -+ hcon = hci_connect(hdev, ACL_LINK, dst, -+ l2cap_pi(sk)->sec_level, auth_type); - if (!hcon) - goto done; - -@@ -840,20 +927,25 @@ done: - - static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) - { -- struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; - struct sock *sk = sock->sk; -- int err = 0; +- (void)SYNC_EnterCS(hProcLock); +- MEM_FlushCache(pMpuAddr, ulSize, FlushMemType); +- (void)SYNC_LeaveCS(hProcLock); - -- lock_sock(sk); -+ struct sockaddr_l2 la; -+ int len, err = 0; - - BT_DBG("sk %p", sk); ++ up_read(¤t->mm->mmap_sem); + err_out: + GT_2trace(PROC_DebugMask, GT_ENTER, + "Leaving %s [0x%x]", __func__, status); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/bridged_pvr_bridge.c kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/bridged_pvr_bridge.c +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/bridged_pvr_bridge.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/bridged_pvr_bridge.c 2011-09-04 11:37:54.000000000 +0200 +@@ -791,25 +791,25 @@ PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui3 + } -- if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) { -- err = -EINVAL; -- goto done; -- } -+ if (!addr || addr->sa_family != AF_BLUETOOTH) -+ return -EINVAL; + static int +-FlushCacheDRI(IMG_UINT32 ui32Type, IMG_UINT32 ui32Virt, IMG_UINT32 ui32Length) ++FlushCacheDRI(IMG_UINT32 ui32Type, IMG_VOID *pvVirt, IMG_UINT32 ui32Length) + { + switch (ui32Type) { + case DRM_PVR2D_CFLUSH_FROM_GPU: + PVR_DPF((PVR_DBG_MESSAGE, + "DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n", +- ui32Virt, ui32Length)); ++ pvVirt, ui32Length)); + #ifdef CONFIG_ARM +- dmac_inv_range((const void *)ui32Virt, +- (const void *)(ui32Virt + ui32Length)); ++ dmac_inv_range((const void *)pvVirt, ++ (const void *)(pvVirt + ui32Length)); + #endif + return 0; + case DRM_PVR2D_CFLUSH_TO_GPU: + PVR_DPF((PVR_DBG_MESSAGE, + "DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n", +- ui32Virt, ui32Length)); ++ pvVirt, ui32Length)); + #ifdef CONFIG_ARM +- dmac_clean_range((const void *)ui32Virt, +- (const void *)(ui32Virt + ui32Length)); ++ dmac_clean_range((const void *)pvVirt, ++ (const void *)(pvVirt + ui32Length)); + #endif + return 0; + default: +@@ -821,18 +821,51 @@ FlushCacheDRI(IMG_UINT32 ui32Type, IMG_U + return 0; + } -- if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) { -+ memset(&la, 0, sizeof(la)); -+ len = min_t(unsigned int, sizeof(la), alen); -+ memcpy(&la, addr, len); ++PVRSRV_ERROR ++PVRSRVIsWrappedExtMemoryBW(PVRSRV_PER_PROCESS_DATA *psPerProc, ++ PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER *psCacheFlushIN) ++{ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hDevCookieInt; + -+ if (la.l2_cid) -+ return -EINVAL; ++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, ++ psCacheFlushIN->hDevCookie, ++ PVRSRV_HANDLE_TYPE_DEV_NODE); + -+ lock_sock(sk); ++ eError = PVRSRVIsWrappedExtMemoryKM( ++ hDevCookieInt, ++ psPerProc, ++ &(psCacheFlushIN->ui32Length), ++ &(psCacheFlushIN->pvVirt)); + -+ if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) { - err = -EINVAL; - goto done; - } -@@ -880,8 +972,8 @@ static int l2cap_sock_connect(struct soc - } - - /* Set destination address and psm */ -- bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr); -- l2cap_pi(sk)->psm = la->l2_psm; -+ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); -+ l2cap_pi(sk)->psm = la.l2_psm; - - if ((err = l2cap_do_connect(sk))) - goto done; -@@ -1005,12 +1097,16 @@ static int l2cap_sock_getname(struct soc - addr->sa_family = AF_BLUETOOTH; - *len = sizeof(struct sockaddr_l2); ++ return eError; ++} ++ + static int + PVRSRVCacheFlushDRIBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER * psCacheFlushIN, + PVRSRV_BRIDGE_RETURN * psRetOUT, + PVRSRV_PER_PROCESS_DATA * psPerProc) + { ++ PVRSRV_ERROR eError; + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CACHE_FLUSH_DRM); -- if (peer) -+ if (peer) { -+ la->l2_psm = l2cap_pi(sk)->psm; - bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); -- else -+ la->l2_cid = htobs(l2cap_pi(sk)->dcid); +- psRetOUT->eError = FlushCacheDRI(psCacheFlushIN->ui32Type, +- psCacheFlushIN->ui32Virt, +- psCacheFlushIN->ui32Length); ++ down_read(¤t->mm->mmap_sem); ++ ++ eError = PVRSRVIsWrappedExtMemoryBW(psPerProc, psCacheFlushIN); ++ ++ if (eError == PVRSRV_OK) { ++ psRetOUT->eError = FlushCacheDRI(psCacheFlushIN->ui32Type, ++ psCacheFlushIN->pvVirt, ++ psCacheFlushIN->ui32Length); + } else { -+ la->l2_psm = l2cap_pi(sk)->sport; - bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); -+ la->l2_cid = htobs(l2cap_pi(sk)->scid); ++ printk(KERN_WARNING ++ ": PVRSRVCacheFlushDRIBW: Start address 0x%08x and length 0x%08x not wrapped \n", ++ (unsigned int)(psCacheFlushIN->pvVirt), ++ (unsigned int)(psCacheFlushIN->ui32Length)); + } -- la->l2_psm = l2cap_pi(sk)->psm; ++ up_read(¤t->mm->mmap_sem); return 0; } -@@ -1111,11 +1207,38 @@ static int l2cap_sock_sendmsg(struct kio - return err; +@@ -987,7 +1020,7 @@ PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32Bri + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + IMG_UINT32 ui32PageTableSize = 0; +- IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;; ++ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY); + +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/buffer_manager.c kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/buffer_manager.c +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/buffer_manager.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/buffer_manager.c 2011-09-04 11:37:54.000000000 +0200 +@@ -30,6 +30,8 @@ + #include "ra.h" + #include "pdump_km.h" + ++#include ++ + #define MIN(a,b) (a > b ? b : a) + + static IMG_BOOL +@@ -988,6 +990,34 @@ BM_IsWrapped(IMG_HANDLE hDevMemHeap, } --static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) -+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) + IMG_BOOL ++BM_IsWrappedCheckSize(IMG_HANDLE hDevMemHeap, ++ IMG_UINT32 ui32Offset, ++ IMG_SYS_PHYADDR sSysAddr, ++ IMG_UINT32 ui32ByteSize) +{ -+ struct sock *sk = sock->sk; -+ -+ lock_sock(sk); -+ -+ if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { -+ struct l2cap_conn_rsp rsp; ++ BM_BUF *pBuf; ++ BM_CONTEXT *psBMContext; ++ BM_HEAP *psBMHeap; + -+ sk->sk_state = BT_CONFIG; ++ IMG_BOOL ret = IMG_FALSE; + -+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); -+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); -+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); -+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); -+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, -+ L2CAP_CONN_RSP, sizeof(rsp), &rsp); ++ psBMHeap = (BM_HEAP *) hDevMemHeap; ++ psBMContext = psBMHeap->pBMContext; ++ sSysAddr.uiAddr += ui32Offset; ++ pBuf = (BM_BUF *) HASH_Retrieve(psBMContext->pBufferHash, ++ (IMG_UINTPTR_T) sSysAddr.uiAddr); + -+ release_sock(sk); -+ return 0; ++ if (pBuf != NULL) { ++ if (pBuf->pMapping->uSize >= ui32ByteSize) ++ ret = IMG_TRUE; ++ else ++ ret = IMG_FALSE; + } + -+ release_sock(sk); -+ -+ return bt_sock_recvmsg(iocb, sock, msg, len, flags); ++ return ret; +} + -+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen) - { - struct sock *sk = sock->sk; - struct l2cap_options opts; -- int err = 0, len; -+ int len, err = 0; - u32 opt; ++IMG_BOOL + BM_Wrap(IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Offset, +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/buffer_manager.h kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/buffer_manager.h +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/buffer_manager.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/buffer_manager.h 2011-09-04 11:37:54.000000000 +0200 +@@ -138,6 +138,12 @@ + IMG_UINT32 ui32Offset, IMG_SYS_PHYADDR sSysAddr); + + IMG_BOOL ++ BM_IsWrappedCheckSize(IMG_HANDLE hDevMemHeap, ++ IMG_UINT32 ui32Offset, ++ IMG_SYS_PHYADDR sSysAddr, ++ IMG_UINT32 ui32ByteSize); ++ ++ IMG_BOOL + BM_Wrap(IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Offset, +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/devicemem.c kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/devicemem.c +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/devicemem.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/devicemem.c 2011-09-04 11:37:54.000000000 +0200 +@@ -34,6 +34,7 @@ + #include "pvr_bridge_km.h" - BT_DBG("sk %p", sk); -@@ -1145,7 +1268,15 @@ static int l2cap_sock_setsockopt(struct - break; - } + #include "linux/kernel.h" ++#include "linux/pagemap.h" -- l2cap_pi(sk)->link_mode = opt; -+ if (opt & L2CAP_LM_AUTH) -+ l2cap_pi(sk)->sec_level = BT_SECURITY_LOW; -+ if (opt & L2CAP_LM_ENCRYPT) -+ l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM; -+ if (opt & L2CAP_LM_SECURE) -+ l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH; -+ -+ l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); -+ l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); - break; + static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, +@@ -595,13 +596,92 @@ static PVRSRV_ERROR UnwrapExtMemoryCallB + } - default: -@@ -1157,12 +1288,77 @@ static int l2cap_sock_setsockopt(struct - return err; + if (hOSWrapMem) { +- OSReleasePhysPageAddr(hOSWrapMem); ++ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); + } + + return eError; } --static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) -+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) + IMG_EXPORT ++ PVRSRV_ERROR IMG_CALLCONV PVRSRVIsWrappedExtMemoryKM(IMG_HANDLE hDevCookie, ++ PVRSRV_PER_PROCESS_DATA ++ *psPerProc, ++ IMG_UINT32 ++ *pui32ByteSize, ++ IMG_VOID ++ **pvLinAddr) +{ -+ struct sock *sk = sock->sk; -+ struct bt_security sec; -+ int len, err = 0; -+ u32 opt; -+ -+ BT_DBG("sk %p", sk); -+ -+ if (level == SOL_L2CAP) -+ return l2cap_sock_setsockopt_old(sock, optname, optval, optlen); ++ DEVICE_MEMORY_INFO *psDevMemoryInfo; ++ IMG_UINT32 ui32HostPageSize = HOST_PAGESIZE(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ PVRSRV_ERROR eError; ++ IMG_SYS_PHYADDR sIntSysPAddr; ++ IMG_HANDLE hOSWrapMem = IMG_NULL; ++ IMG_HANDLE hDevMemHeap; ++ IMG_UINT32 ui32PageOffset = 0; + -+ if (level != SOL_BLUETOOTH) -+ return -ENOPROTOOPT; ++ IMG_UINT32 ui32ReturnedByteSize = *pui32ByteSize; + -+ lock_sock(sk); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; + -+ switch (optname) { -+ case BT_SECURITY: -+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) { -+ err = -EINVAL; -+ break; -+ } ++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; ++ PVR_ASSERT(psDeviceNode != IMG_NULL); ++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + -+ sec.level = BT_SECURITY_LOW; ++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].hDevMemHeap; + -+ len = min_t(unsigned int, sizeof(sec), optlen); -+ if (copy_from_user((char *) &sec, optval, len)) { -+ err = -EFAULT; -+ break; -+ } ++ if (pvLinAddr) { ++ ui32PageOffset = ((IMG_UINT32)*pvLinAddr) & ~PAGE_MASK; ++ *pvLinAddr = (IMG_VOID *)((IMG_UINT32)*pvLinAddr & PAGE_MASK); ++ ui32ReturnedByteSize += ui32PageOffset; + -+ if (sec.level < BT_SECURITY_LOW || -+ sec.level > BT_SECURITY_HIGH) { -+ err = -EINVAL; -+ break; ++ /* let's start by getting the address of the first page */ ++ eError = OSAcquirePhysPageAddr(*pvLinAddr, ++ ui32HostPageSize, ++ &sIntSysPAddr, ++ &hOSWrapMem, ++ IMG_FALSE); ++ if (eError != PVRSRV_OK) { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVIsWrappedExtMemoryKM: Failed to alloc memory for block")); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto ErrorExitPhase1; + } + -+ l2cap_pi(sk)->sec_level = sec.level; -+ break; ++ OSReleasePhysPageAddr(hOSWrapMem, IMG_FALSE); ++ hOSWrapMem = IMG_NULL; + -+ case BT_DEFER_SETUP: -+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { -+ err = -EINVAL; -+ break; -+ } ++ /* now check if this memory address is already wrapped */ ++ if (BM_IsWrappedCheckSize(hDevMemHeap, ++ ui32PageOffset, ++ sIntSysPAddr, ++ *pui32ByteSize)) { ++ /* already wrapped */ ++ eError = PVRSRV_OK; ++ } else { ++ /* not mapped in this heap */ ++ /* try the alternative heap */ ++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[SGX_ALT_MAPPING_HEAP_ID].hDevMemHeap; + -+ if (get_user(opt, (u32 __user *) optval)) { -+ err = -EFAULT; -+ break; ++ if (BM_IsWrappedCheckSize(hDevMemHeap, ++ ui32PageOffset, ++ sIntSysPAddr, ++ *pui32ByteSize)) { ++ /* already wrapped */ ++ eError = PVRSRV_OK; ++ } else { ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ } + } ++ } + -+ bt_sk(sk)->defer_setup = opt; -+ break; ++ErrorExitPhase1: + -+ default: -+ err = -ENOPROTOOPT; -+ break; -+ } ++ *pui32ByteSize = ui32ReturnedByteSize; + -+ release_sock(sk); -+ return err; ++ return eError; +} + -+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) ++IMG_EXPORT + PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA * + psPerProc, +@@ -631,6 +711,13 @@ IMG_EXPORT + IMG_SYS_PHYADDR *pPageList = psExtSysPAddr; + IMG_UINT32 ui32PageCount; + ++ IMG_UINT32 ui32CalculatedPageOffset = ((IMG_UINT32)pvLinAddr) & ~PAGE_MASK; ++ if (ui32CalculatedPageOffset != ui32PageOffset) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "PVRSRVWrapExtMemoryKM: offset from address not match offset param")); ++ return PVRSRV_ERROR_BAD_MAPPING; ++ } ++ + psDeviceNode = (PVRSRV_DEVICE_NODE *) hDevCookie; + PVR_ASSERT(psDeviceNode != IMG_NULL); + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; +@@ -657,7 +744,7 @@ IMG_EXPORT + /* let's start by getting the address of the first page */ + eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr, + ui32HostPageSize, +- psIntSysPAddr, &hOSWrapMem); ++ psIntSysPAddr, &hOSWrapMem, IMG_TRUE); + if (eError != PVRSRV_OK) { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); +@@ -667,10 +754,10 @@ IMG_EXPORT + /* now check if this memory address is already wrapped */ + if (BM_IsWrapped(hDevMemHeap, ui32PageOffset, psIntSysPAddr[0])) { + /* already wrapped */ +- OSReleasePhysPageAddr(hOSWrapMem); ++ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); + hOSWrapMem = IMG_NULL; + } else if (ui32PageCount > 1) { +- OSReleasePhysPageAddr(hOSWrapMem); ++ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); + hOSWrapMem = IMG_NULL; + /* the memory is going to wrapped for the first time, + * so we need full page list */ +@@ -678,7 +765,8 @@ IMG_EXPORT + ui32PageCount * + ui32HostPageSize, + psIntSysPAddr, +- &hOSWrapMem); ++ &hOSWrapMem, ++ IMG_TRUE); + if (eError != PVRSRV_OK) { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); +@@ -783,7 +871,7 @@ ErrorExitPhase2: + } + + if (hOSWrapMem) +- OSReleasePhysPageAddr(hOSWrapMem); ++ OSReleasePhysPageAddr(hOSWrapMem, IMG_TRUE); + ErrorExitPhase1: + if (psIntSysPAddr) { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/osfunc.c kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/osfunc.c +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/osfunc.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/osfunc.c 2011-09-04 11:37:54.000000000 +0200 +@@ -1126,6 +1126,7 @@ PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hT + + psTimerCBData->bActive = IMG_TRUE; + ++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; + add_timer(&psTimerCBData->sTimer); + + return PVRSRV_OK; +@@ -1400,7 +1401,8 @@ exit_unlock: + return psPage; + } + +-PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem) ++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem, ++ IMG_BOOL bUseLock) { - struct sock *sk = sock->sk; - struct l2cap_options opts; - struct l2cap_conninfo cinfo; - int len, err = 0; -+ u32 opt; + sWrapMemInfo *psInfo = (sWrapMemInfo *) hOSWrapMem; + unsigned ui; +@@ -1415,7 +1417,8 @@ PVRSRV_ERROR OSReleasePhysPageAddr(IMG_H + { + struct vm_area_struct *psVMArea; - BT_DBG("sk %p", sk); +- down_read(¤t->mm->mmap_sem); ++ if (bUseLock) ++ down_read(¤t->mm->mmap_sem); -@@ -1185,12 +1381,36 @@ static int l2cap_sock_getsockopt(struct - break; + psVMArea = find_vma(current->mm, psInfo->ulStartAddr); + if (psVMArea == NULL) { +@@ -1423,7 +1426,9 @@ PVRSRV_ERROR OSReleasePhysPageAddr(IMG_H + ": OSCpuVToPageListRelease: Couldn't find memory region containing start address %lx", + psInfo->ulStartAddr); - case L2CAP_LM: -- if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval)) -+ switch (l2cap_pi(sk)->sec_level) { -+ case BT_SECURITY_LOW: -+ opt = L2CAP_LM_AUTH; -+ break; -+ case BT_SECURITY_MEDIUM: -+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT; -+ break; -+ case BT_SECURITY_HIGH: -+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | -+ L2CAP_LM_SECURE; -+ break; -+ default: -+ opt = 0; -+ break; -+ } -+ -+ if (l2cap_pi(sk)->role_switch) -+ opt |= L2CAP_LM_MASTER; -+ -+ if (l2cap_pi(sk)->force_reliable) -+ opt |= L2CAP_LM_RELIABLE; +- up_read(¤t->mm->mmap_sem); ++ if (bUseLock) ++ up_read(¤t->mm->mmap_sem); + -+ if (put_user(opt, (u32 __user *) optval)) - err = -EFAULT; - break; + break; + } + +@@ -1459,7 +1464,9 @@ PVRSRV_ERROR OSReleasePhysPageAddr(IMG_H + psVMArea->vm_flags); + } - case L2CAP_CONNINFO: -- if (sk->sk_state != BT_CONNECTED) { -+ if (sk->sk_state != BT_CONNECTED && -+ !(sk->sk_state == BT_CONNECT2 && -+ bt_sk(sk)->defer_setup)) { - err = -ENOTCONN; +- up_read(¤t->mm->mmap_sem); ++ if (bUseLock) ++ up_read(¤t->mm->mmap_sem); ++ break; } -@@ -1213,6 +1433,60 @@ static int l2cap_sock_getsockopt(struct - return err; + default: +@@ -1514,10 +1521,11 @@ PVRSRV_ERROR OSReleasePhysPageAddr(IMG_H + return PVRSRV_OK; } -+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) -+{ -+ struct sock *sk = sock->sk; -+ struct bt_security sec; -+ int len, err = 0; -+ -+ BT_DBG("sk %p", sk); -+ -+ if (level == SOL_L2CAP) -+ return l2cap_sock_getsockopt_old(sock, optname, optval, optlen); -+ -+ if (level != SOL_BLUETOOTH) -+ return -ENOPROTOOPT; -+ -+ if (get_user(len, optlen)) -+ return -EFAULT; -+ -+ lock_sock(sk); -+ -+ switch (optname) { -+ case BT_SECURITY: -+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) { -+ err = -EINVAL; -+ break; -+ } -+ -+ sec.level = l2cap_pi(sk)->sec_level; -+ -+ len = min_t(unsigned int, len, sizeof(sec)); -+ if (copy_to_user(optval, (char *) &sec, len)) -+ err = -EFAULT; -+ -+ break; -+ -+ case BT_DEFER_SETUP: -+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { -+ err = -EINVAL; -+ break; -+ } +-PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID * pvCPUVAddr, ++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID *pvCPUVAddr, + IMG_UINT32 ui32Bytes, +- IMG_SYS_PHYADDR * psSysPAddr, +- IMG_HANDLE * phOSWrapMem) ++ IMG_SYS_PHYADDR *psSysPAddr, ++ IMG_HANDLE *phOSWrapMem, ++ IMG_BOOL bUseLock) + { + unsigned long ulStartAddrOrig = (unsigned long)pvCPUVAddr; + unsigned long ulAddrRangeOrig = (unsigned long)ui32Bytes; +@@ -1538,7 +1546,7 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL); + if (psInfo == NULL) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Couldn't allocate information structure"); ++ ": OSCpuVToPageList: Couldn't allocate information structure\n"); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + memset(psInfo, 0, sizeof(*psInfo)); +@@ -1556,7 +1564,7 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + GFP_KERNEL); + if (psInfo->psPhysAddr == NULL) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Couldn't allocate page array"); ++ ": OSCpuVToPageList: Couldn't allocate page array\n"); + goto error_free; + } + +@@ -1564,21 +1572,26 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + kmalloc(psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL); + if (psInfo->ppsPages == NULL) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Couldn't allocate page array"); ++ ": OSCpuVToPageList: Couldn't allocate page array\n"); + goto error_free; + } + +- down_read(¤t->mm->mmap_sem); ++ if (bUseLock) ++ down_read(¤t->mm->mmap_sem); + -+ if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) -+ err = -EFAULT; + iNumPagesMapped = + get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, + 1, 0, psInfo->ppsPages, NULL); +- up_read(¤t->mm->mmap_sem); + -+ break; ++ if (bUseLock) ++ up_read(¤t->mm->mmap_sem); + -+ default: -+ err = -ENOPROTOOPT; -+ break; -+ } + + if (iNumPagesMapped >= 0) { + + if (iNumPagesMapped != psInfo->iNumPages) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Couldn't map all the pages needed (wanted: %d, got %d)", ++ ": OSCpuVToPageList: Couldn't map all the pages needed (wanted: %d, got %d \n)", + psInfo->iNumPages, iNumPagesMapped); + + for (ui = 0; ui < iNumPagesMapped; ui++) { +@@ -1605,15 +1618,16 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + } + + printk(KERN_WARNING +- ": OSCpuVToPageList: get_user_pages failed (%d), trying something else", ++ ": OSCpuVToPageList: get_user_pages failed (%d), trying something else \n", + iNumPagesMapped); + +- down_read(¤t->mm->mmap_sem); ++ if (bUseLock) ++ down_read(¤t->mm->mmap_sem); + + psVMArea = find_vma(current->mm, ulStartAddrOrig); + if (psVMArea == NULL) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Couldn't find memory region containing start address %lx", ++ ": OSCpuVToPageList: Couldn't find memory region containing start address %lx \n", + ulStartAddrOrig); + + goto error_release_mmap_sem; +@@ -1624,14 +1638,14 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + + if (ulStartAddrOrig < psVMArea->vm_start) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Start address %lx is outside of the region returned by find_vma", ++ ": OSCpuVToPageList: Start address %lx is outside of the region returned by find_vma\n", + ulStartAddrOrig); + goto error_release_mmap_sem; + } + + if (ulBeyondEndAddrOrig > psVMArea->vm_end) { + printk(KERN_WARNING +- ": OSCpuVToPageList: End address %lx is outside of the region returned by find_vma", ++ ": OSCpuVToPageList: End address %lx is outside of the region returned by find_vma\n", + ulBeyondEndAddrOrig); + goto error_release_mmap_sem; + } +@@ -1639,14 +1653,14 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != + (VM_IO | VM_RESERVED)) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", ++ ": OSCpuVToPageList: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)\n", + psVMArea->vm_flags); + goto error_release_mmap_sem; + } + + if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) { + printk(KERN_WARNING +- ": OSCpuVToPageList: No read/write access to memory region (VMA flags: 0x%lx)", ++ ": OSCpuVToPageList: No read/write access to memory region (VMA flags: 0x%lx)\n", + psVMArea->vm_flags); + goto error_release_mmap_sem; + } +@@ -1662,7 +1676,7 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + unsigned uj; + + printk(KERN_WARNING +- ": OSCpuVToPageList: Couldn't lookup page structure for address 0x%lx, trying something else", ++ ": OSCpuVToPageList: Couldn't lookup page structure for address 0x%lx, trying something else\n", + ulAddr); + + for (uj = 0; uj < ui; uj++) { +@@ -1693,7 +1707,7 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + + if ((psVMArea->vm_flags & VM_PFNMAP) == 0) { + printk(KERN_WARNING +- ": OSCpuVToPageList: Region isn't a raw PFN mapping. Giving up."); ++ ": OSCpuVToPageList: Region isn't a raw PFN mapping. Giving up.\n"); + goto error_release_mmap_sem; + } + +@@ -1714,10 +1728,11 @@ PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_V + psInfo->eType = WRAP_TYPE_FIND_VMA_PFN; + + printk(KERN_WARNING +- ": OSCpuVToPageList: Region can't be locked down"); ++ ": OSCpuVToPageList: Region can't be locked down\n"); + } + +- up_read(¤t->mm->mmap_sem); ++ if (bUseLock) ++ up_read(¤t->mm->mmap_sem); + + exit_check: + CheckPagesContiguous(psInfo); +@@ -1727,9 +1742,11 @@ exit_check: + return PVRSRV_OK; + + error_release_mmap_sem: +- up_read(¤t->mm->mmap_sem); ++ if (bUseLock) ++ up_read(¤t->mm->mmap_sem); + -+ release_sock(sk); -+ return err; + error_free: + psInfo->eType = WRAP_TYPE_CLEANUP; +- OSReleasePhysPageAddr((IMG_HANDLE) psInfo); ++ OSReleasePhysPageAddr((IMG_HANDLE) psInfo, bUseLock); + return PVRSRV_ERROR_GENERIC; + } +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/osfunc.h kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/osfunc.h +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/osfunc.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/osfunc.h 2011-09-04 11:37:54.000000000 +0200 +@@ -291,8 +291,10 @@ + PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID * pvCPUVAddr, + IMG_UINT32 ui32Bytes, + IMG_SYS_PHYADDR * psSysPAddr, +- IMG_HANDLE * phOSWrapMem); +- PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem); ++ IMG_HANDLE * phOSWrapMem, ++ IMG_BOOL bUseLock); ++ PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem, ++ IMG_BOOL bUseLock); + + + #endif +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/power.c kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/power.c +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/power.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/power.c 2011-09-04 11:37:54.000000000 +0200 +@@ -106,12 +106,16 @@ IMG_EXPORT IMG_VOID PVRSRVDvfsUnlock(IMG + mutex_unlock(&hPowerAndFreqLock); + } + ++static IMG_BOOL IsPowerLocked(void) ++{ ++ return mutex_is_locked(&hPowerAndFreqLock) || gbDvfsActive; +} + - static int l2cap_sock_shutdown(struct socket *sock, int how) + IMG_EXPORT + PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, + IMG_BOOL bSystemPowerEvent) { - struct sock *sk = sock->sk; -@@ -1275,11 +1549,6 @@ static void l2cap_chan_ready(struct sock - */ - parent->sk_data_ready(parent, 0); +- if ((ui32CallerID == TIMER_ID) && +- (mutex_is_locked(&hPowerAndFreqLock) || gbDvfsActive)) ++ if ((ui32CallerID == TIMER_ID) && IsPowerLocked()) + return PVRSRV_ERROR_RETRY; + mutex_lock(&hPowerAndFreqLock); + while (gbDvfsActive) { +@@ -553,11 +557,8 @@ IMG_EXPORT IMG_BOOL PVRSRVIsDevicePowere + return IMG_FALSE; } -- -- if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) { -- struct l2cap_conn *conn = l2cap_pi(sk)->conn; -- hci_conn_change_link_key(conn->hcon); + +- if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) +- || OSIsResourceLocked(&psSysData->sPowerStateChangeResource, +- ISR_ID)) { ++ if (IsPowerLocked()) + return IMG_FALSE; - } - } - /* Copy frame to all raw sockets on that connection */ -@@ -1554,8 +1823,11 @@ static inline int l2cap_command_rej(stru + psPowerDevice = psSysData->psPowerDeviceList; + while (psPowerDevice) { +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/pvr_bridge.h kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/pvr_bridge.h +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/pvr_bridge.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/pvr_bridge.h 2011-09-04 11:37:54.000000000 +0200 +@@ -307,8 +307,9 @@ - if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && - cmd->ident == conn->info_ident) { -- conn->info_ident = 0; - del_timer(&conn->info_timer); -+ -+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; -+ conn->info_ident = 0; -+ - l2cap_conn_start(conn); - } + typedef struct PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER_TAG { + IMG_UINT32 ui32BridgeFlags; ++ IMG_HANDLE hDevCookie; + IMG_UINT32 ui32Type; +- IMG_UINT32 ui32Virt; ++ IMG_VOID *pvVirt; + IMG_UINT32 ui32Length; -@@ -1585,6 +1857,7 @@ static inline int l2cap_connect_req(stru - /* Check if the ACL is secure enough (if not SDP) */ - if (psm != cpu_to_le16(0x0001) && - !hci_conn_check_link_mode(conn->hcon)) { -+ conn->disc_reason = 0x05; - result = L2CAP_CR_SEC_BLOCK; - goto response; - } -@@ -1626,11 +1899,18 @@ static inline int l2cap_connect_req(stru + } PVRSRV_BRIDGE_IN_CACHEFLUSHDRMFROMUSER; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/pvr_bridge_km.h kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/pvr_bridge_km.h +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/pvr_bridge_km.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/pvr_bridge_km.h 2011-09-04 11:37:54.000000000 +0200 +@@ -186,6 +186,13 @@ - l2cap_pi(sk)->ident = cmd->ident; + IMG_IMPORT + PVRSRV_ERROR IMG_CALLCONV ++ PVRSRVIsWrappedExtMemoryKM(IMG_HANDLE hDevCookie, ++ PVRSRV_PER_PROCESS_DATA *psPerProc, ++ IMG_UINT32 *pui32ByteSize, ++ IMG_VOID **pvLinAddr); ++ ++ IMG_IMPORT ++ PVRSRV_ERROR IMG_CALLCONV + PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO * psMemInfo); -- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { -- if (l2cap_check_link_mode(sk)) { -- sk->sk_state = BT_CONFIG; -- result = L2CAP_CR_SUCCESS; -- status = L2CAP_CS_NO_INFO; -+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { -+ if (l2cap_check_security(sk)) { -+ if (bt_sk(sk)->defer_setup) { -+ sk->sk_state = BT_CONNECT2; -+ result = L2CAP_CR_PEND; -+ status = L2CAP_CS_AUTHOR_PEND; -+ parent->sk_data_ready(parent, 0); -+ } else { -+ sk->sk_state = BT_CONFIG; -+ result = L2CAP_CR_SUCCESS; -+ status = L2CAP_CS_NO_INFO; -+ } - } else { - sk->sk_state = BT_CONNECT2; - result = L2CAP_CR_PEND; -@@ -1700,11 +1980,14 @@ static inline int l2cap_connect_rsp(stru - l2cap_pi(sk)->dcid = dcid; - l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; + IMG_IMPORT +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/sgxconfig.h kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/sgxconfig.h +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/sgxconfig.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/sgxconfig.h 2011-09-04 11:37:54.000000000 +0200 +@@ -44,10 +44,10 @@ + #define SGX_GENERAL_HEAP_SIZE (0x05000000-0x00401000) -+ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; -+ - l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, - l2cap_build_conf_req(sk, req), req); - break; + #define SGX_GENERAL_MAPPING_HEAP_BASE 0x05000000 +-#define SGX_GENERAL_MAPPING_HEAP_SIZE (0x06C00000-0x05001000) ++#define SGX_GENERAL_MAPPING_HEAP_SIZE (0x06800000-0x05001000) - case L2CAP_CR_PEND: -+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; - break; +-#define SGX_FB_MAPPING_HEAP_BASE 0x06C00000 +-#define SGX_FB_MAPPING_HEAP_SIZE (0x07000000-0x06C01000) ++#define SGX_FB_MAPPING_HEAP_BASE 0x06800000 ++#define SGX_FB_MAPPING_HEAP_SIZE (0x07000000-0x06801000) - default: -@@ -1913,6 +2196,14 @@ static inline int l2cap_information_req( - put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data); - l2cap_send_cmd(conn, cmd->ident, - L2CAP_INFO_RSP, sizeof(buf), buf); -+ } else if (type == L2CAP_IT_FIXED_CHAN) { -+ u8 buf[12]; -+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; -+ rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); -+ rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); -+ memcpy(buf + 4, l2cap_fixed_chan, 8); -+ l2cap_send_cmd(conn, cmd->ident, -+ L2CAP_INFO_RSP, sizeof(buf), buf); - } else { - struct l2cap_info_rsp rsp; - rsp.type = cpu_to_le16(type); -@@ -1934,14 +2225,31 @@ static inline int l2cap_information_rsp( + #define SGX_TADATA_HEAP_BASE 0x07000000 + #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/sgxinit.c kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/sgxinit.c +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/sgxinit.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/sgxinit.c 2011-09-04 11:37:54.000000000 +0200 +@@ -225,8 +225,12 @@ static IMG_VOID SGXGetTimingInfo(PVRSRV_ + } + if (psDevInfo->hTimer == IMG_NULL) { - BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); ++ /* ++ * the magic calculation below sets the hardware lock-up ++ * detection and recovery timer interval to ~150msecs ++ */ + psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode, +- 1000 * 50 / ++ 1000 * 150 / + psSGXTimingInfo-> + ui32uKernelFreq); + if (psDevInfo->hTimer == IMG_NULL) { +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/sysconfig.h kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/sysconfig.h +--- kernel-2.6.28-20094102.6+0m5/drivers/gpu/pvr/sysconfig.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/gpu/pvr/sysconfig.h 2011-09-04 11:37:54.000000000 +0200 +@@ -34,7 +34,7 @@ + #define SYS_SGX_CLOCK_SPEED 110666666 + #define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100) + #define SYS_SGX_PDS_TIMER_FREQ (1000) +-#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (500) ++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (100) -- conn->info_ident = 0; -- - del_timer(&conn->info_timer); + #define SYS_OMAP3430_VDD2_OPP3_SGX_CLOCK_SPEED SYS_SGX_CLOCK_SPEED + #define SYS_OMAP3430_VDD2_OPP2_SGX_CLOCK_SPEED (SYS_SGX_CLOCK_SPEED / 2) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/et8ek8.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/et8ek8.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/et8ek8.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/et8ek8.c 2011-09-04 11:37:54.000000000 +0200 +@@ -777,10 +777,11 @@ static int et8ek8_ioctl_s_power(struct v + enum v4l2_power new_state) + { + struct et8ek8_sensor *sensor = s->priv; ++ enum v4l2_power old_state = sensor->power; + int rval = 0; -- if (type == L2CAP_IT_FEAT_MASK) -+ if (type == L2CAP_IT_FEAT_MASK) { - conn->feat_mask = get_unaligned_le32(rsp->data); + /* If we are already in this mode, do nothing */ +- if (sensor->power == new_state) ++ if (old_state == new_state) + return 0; -- l2cap_conn_start(conn); -+ if (conn->feat_mask & 0x0080) { -+ struct l2cap_info_req req; -+ req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); -+ -+ conn->info_ident = l2cap_get_ident(conn); -+ -+ l2cap_send_cmd(conn, conn->info_ident, -+ L2CAP_INFO_REQ, sizeof(req), &req); -+ } else { -+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; -+ conn->info_ident = 0; -+ -+ l2cap_conn_start(conn); -+ } -+ } else if (type == L2CAP_IT_FIXED_CHAN) { -+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; -+ conn->info_ident = 0; -+ -+ l2cap_conn_start(conn); -+ } + /* Disable power if so requested (it was enabled) */ +@@ -796,7 +797,7 @@ static int et8ek8_ioctl_s_power(struct v + /* Either STANDBY or ON requested */ - return 0; - } -@@ -2148,10 +2456,15 @@ static int l2cap_connect_ind(struct hci_ - continue; + /* Enable power and move to standby if it was off */ +- if (sensor->power == V4L2_POWER_OFF) { ++ if (old_state == V4L2_POWER_OFF) { + rval = et8ek8_power_on(s); + if (rval) + goto out; +@@ -806,10 +807,11 @@ static int et8ek8_ioctl_s_power(struct v - if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { -- lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode); -+ lm1 |= HCI_LM_ACCEPT; -+ if (l2cap_pi(sk)->role_switch) -+ lm1 |= HCI_LM_MASTER; - exact++; -- } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) -- lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode); -+ } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { -+ lm2 |= HCI_LM_ACCEPT; -+ if (l2cap_pi(sk)->role_switch) -+ lm2 |= HCI_LM_MASTER; -+ } + if (new_state == V4L2_POWER_ON) { + /* Standby -> streaming */ ++ sensor->power = V4L2_POWER_ON; + rval = et8ek8_configure(s); + if (rval) { + et8ek8_stream_off(s); +- if (sensor->power == V4L2_POWER_OFF) ++ if (old_state == V4L2_POWER_OFF) + et8ek8_power_off(s); + goto out; + } +@@ -820,9 +822,7 @@ static int et8ek8_ioctl_s_power(struct v } - read_unlock(&l2cap_sk_list.lock); -@@ -2177,89 +2490,48 @@ static int l2cap_connect_cfm(struct hci_ - return 0; + out: +- if (rval == 0) +- sensor->power = new_state; +- ++ sensor->power = (rval == 0) ? new_state : old_state; + return rval; } --static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason) -+static int l2cap_disconn_ind(struct hci_conn *hcon) - { -- BT_DBG("hcon %p reason %d", hcon, reason); -+ struct l2cap_conn *conn = hcon->l2cap_data; - -- if (hcon->type != ACL_LINK) -- return 0; -+ BT_DBG("hcon %p", hcon); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp_af.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp_af.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp_af.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp_af.c 2011-09-04 11:37:54.000000000 +0200 +@@ -194,6 +194,7 @@ void isp_af_config_registers(struct isp_ + ~AF_PCR_MASK, pcr); -- l2cap_conn_del(hcon, bt_err(reason)); -+ if (hcon->type != ACL_LINK || !conn) -+ return 0x13; + isp_af->update = 0; ++ isp_af->stat.config_counter++; + ispstat_bufs_set_size(&isp_af->stat, isp_af->buf_size); -- return 0; -+ return conn->disc_reason; + spin_unlock_irqrestore(isp_af->lock, irqflags); +@@ -299,8 +300,8 @@ void isp_af_try_enable(struct isp_af_dev } --static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) -+static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) + /* Function to perform hardware set up */ +-int omap34xx_isp_af_config(struct isp_af_device *isp_af, +- struct af_configuration *afconfig) ++int isp_af_config(struct isp_af_device *isp_af, ++ struct af_configuration *afconfig) { -- struct l2cap_chan_list *l; -- struct l2cap_conn *conn = hcon->l2cap_data; -- struct sock *sk; -+ BT_DBG("hcon %p reason %d", hcon, reason); + struct device *dev = to_device(isp_af); + int result; +@@ -337,14 +338,14 @@ int omap34xx_isp_af_config(struct isp_af + /* Success */ + return 0; + } +-EXPORT_SYMBOL(omap34xx_isp_af_config); ++EXPORT_SYMBOL(isp_af_config); -- if (!conn) -+ if (hcon->type != ACL_LINK) - return 0; + /* + * This API allows the user to update White Balance gains, as well as + * exposure time and analog gain. It is also used to request frame + * statistics. + */ +-int omap34xx_isp_af_request_statistics(struct isp_af_device *isp_af, ++int isp_af_request_statistics(struct isp_af_device *isp_af, + struct isp_af_data *afdata) + { + struct device *dev = to_device(isp_af); +@@ -374,7 +375,7 @@ int omap34xx_isp_af_request_statistics(s -- l = &conn->chan_list; -- -- BT_DBG("conn %p", conn); -- -- read_lock(&l->lock); -- -- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { -- struct l2cap_pinfo *pi = l2cap_pi(sk); -- -- bh_lock_sock(sk); -- -- if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) && -- !(hcon->link_mode & HCI_LM_ENCRYPT) && -- !status) { -- bh_unlock_sock(sk); -- continue; -- } -- -- if (sk->sk_state == BT_CONNECT) { -- if (!status) { -- struct l2cap_conn_req req; -- req.scid = cpu_to_le16(l2cap_pi(sk)->scid); -- req.psm = l2cap_pi(sk)->psm; -- -- l2cap_pi(sk)->ident = l2cap_get_ident(conn); -- -- l2cap_send_cmd(conn, l2cap_pi(sk)->ident, -- L2CAP_CONN_REQ, sizeof(req), &req); -- } else { -- l2cap_sock_clear_timer(sk); -- l2cap_sock_set_timer(sk, HZ / 10); -- } -- } else if (sk->sk_state == BT_CONNECT2) { -- struct l2cap_conn_rsp rsp; -- __u16 result; -+ l2cap_conn_del(hcon, bt_err(reason)); + return 0; + } +-EXPORT_SYMBOL(omap34xx_isp_af_request_statistics); ++EXPORT_SYMBOL(isp_af_request_statistics); -- if (!status) { -- sk->sk_state = BT_CONFIG; -- result = L2CAP_CR_SUCCESS; -- } else { -- sk->sk_state = BT_DISCONN; -- l2cap_sock_set_timer(sk, HZ / 10); -- result = L2CAP_CR_SEC_BLOCK; -- } -+ return 0; -+} + /* This function will handle the AF buffer. */ + int isp_af_buf_process(struct isp_af_device *isp_af) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp_af.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp_af.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp_af.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp_af.h 2011-09-04 11:37:54.000000000 +0200 +@@ -131,9 +131,8 @@ void isp_af_suspend(struct isp_af_device + void isp_af_resume(struct isp_af_device *); + int isp_af_busy(struct isp_af_device *); + void isp_af_config_registers(struct isp_af_device *isp_af); +-int omap34xx_isp_af_request_statistics(struct isp_af_device *, +- struct isp_af_data *afdata); +-int omap34xx_isp_af_config(struct isp_af_device *, +- struct af_configuration *afconfig); ++int isp_af_request_statistics(struct isp_af_device *, ++ struct isp_af_data *afdata); ++int isp_af_config(struct isp_af_device *, struct af_configuration *afconfig); -- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); -- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); -- rsp.result = cpu_to_le16(result); -- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); -- l2cap_send_cmd(conn, l2cap_pi(sk)->ident, -- L2CAP_CONN_RSP, sizeof(rsp), &rsp); -- } -+static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt) -+{ -+ if (sk->sk_type != SOCK_SEQPACKET) -+ return; + #endif /* OMAP_ISP_AF_H */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp.c 2011-09-04 11:37:54.000000000 +0200 +@@ -294,7 +294,7 @@ static void isp_enable_interrupts(struct + u32 irq0enable; -- bh_unlock_sock(sk); -+ if (encrypt == 0x00) { -+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) { -+ l2cap_sock_clear_timer(sk); -+ l2cap_sock_set_timer(sk, HZ * 5); -+ } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) -+ __l2cap_sock_close(sk, ECONNREFUSED); -+ } else { -+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) -+ l2cap_sock_clear_timer(sk); - } -- -- read_unlock(&l->lock); -- -- return 0; + irq0enable = IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ +- | IRQ0ENABLE_CCDC_VD0_IRQ ++ | IRQ0ENABLE_CCDC_VD0_IRQ | IRQ0ENABLE_HS_VS_IRQ + | IRQ0ENABLE_CSIA_IRQ + | IRQ0ENABLE_CSIB_IRQ | IRQ0ENABLE_HIST_DONE_IRQ + | IRQ0ENABLE_H3A_AWB_DONE_IRQ | IRQ0ENABLE_H3A_AF_DONE_IRQ +@@ -711,7 +711,7 @@ int isp_configure_interface(struct devic } + EXPORT_SYMBOL(isp_configure_interface); --static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) -+static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) +-void omap34xx_isp_hist_dma_done(struct device *dev) ++void isp_hist_dma_done(struct device *dev) { - struct l2cap_chan_list *l; - struct l2cap_conn *conn = hcon->l2cap_data; -@@ -2275,15 +2547,16 @@ static int l2cap_encrypt_cfm(struct hci_ - read_lock(&l->lock); + struct isp_device *isp = dev_get_drvdata(dev); + struct isp_irq *irqdis = &isp->irq; +@@ -734,7 +734,7 @@ void omap34xx_isp_hist_dma_done(struct d + static void isp_buf_process(struct device *dev, struct isp_bufs *bufs); - for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { -- struct l2cap_pinfo *pi = l2cap_pi(sk); -- - bh_lock_sock(sk); + /** +- * omap34xx_isp_isr - Interrupt Service Routine for Camera ISP module. ++ * isp_isr - Interrupt Service Routine for Camera ISP module. + * @irq: Not used currently. + * @ispirq_disp: Pointer to the object that is passed while request_irq is + * called. This is the isp->irq object containing info on the +@@ -745,7 +745,7 @@ static void isp_buf_process(struct devic + * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the + * IRQ wasn't handled. + **/ +-static irqreturn_t omap34xx_isp_isr(int irq, void *_pdev) ++static irqreturn_t isp_isr(int irq, void *_pdev) + { + struct device *dev = &((struct platform_device *)_pdev)->dev; + struct isp_device *isp = dev_get_drvdata(dev); +@@ -755,7 +755,7 @@ static irqreturn_t omap34xx_isp_isr(int + unsigned long flags; + u32 irqstatus = 0; + u32 sbl_pcr; +- int wait_hs_vs = 0; ++ int wait_hs_vs; + int ret; -- if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) && -- (sk->sk_state == BT_CONNECTED || -- sk->sk_state == BT_CONFIG) && -- !status && encrypt == 0x00) { -- __l2cap_sock_close(sk, ECONNREFUSED); -+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) { -+ bh_unlock_sock(sk); -+ continue; -+ } -+ -+ if (!status && (sk->sk_state == BT_CONNECTED || -+ sk->sk_state == BT_CONFIG)) { -+ l2cap_check_encryption(sk, encrypt); - bh_unlock_sock(sk); - continue; + if (isp->running == ISP_STOPPED) +@@ -773,6 +773,8 @@ static irqreturn_t omap34xx_isp_isr(int + wait_hs_vs = bufs->wait_hs_vs; + if (irqstatus & CCDC_VD0 && bufs->wait_hs_vs) + bufs->wait_hs_vs--; ++ if (irqstatus & HS_VS && bufs->wait_stats && !bufs->wait_hs_vs) ++ bufs->wait_stats = 0; + /* + * We need to wait for the first HS_VS interrupt from CCDC. + * Otherwise our frame (and everything else) might be bad. +@@ -783,12 +785,18 @@ static irqreturn_t omap34xx_isp_isr(int + * Enable preview for the first time. We just have + * missed the start-of-frame so we can do it now. + */ +- if (irqstatus & CCDC_VD0 && +- !RAW_CAPTURE(isp) && +- !(isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR) & +- (ISPPRV_PCR_BUSY | ISPPRV_PCR_EN))) { +- isppreview_config_shadow_registers(&isp->isp_prev); +- isppreview_enable(&isp->isp_prev, 1); ++ if (irqstatus & CCDC_VD0) { ++ isp_af_try_enable(&isp->isp_af); ++ isph3a_aewb_try_enable(&isp->isp_h3a); ++ isp_hist_try_enable(&isp->isp_hist); ++ if (!RAW_CAPTURE(isp) && ++ !(isp_reg_readl(dev, OMAP3_ISP_IOMEM_PREV, ++ ISPPRV_PCR) & ++ (ISPPRV_PCR_BUSY | ISPPRV_PCR_EN))) { ++ isppreview_config_shadow_registers( ++ &isp->isp_prev); ++ isppreview_enable(&isp->isp_prev, 1); ++ } } -@@ -2381,14 +2654,17 @@ static int l2cap_recv_acldata(struct hci - goto drop; - - skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), -- skb->len); -+ skb->len); - conn->rx_len = len - skb->len; - } else { - BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); + default: + /* +@@ -811,6 +819,13 @@ static irqreturn_t omap34xx_isp_isr(int - if (!conn->rx_len) { -+ /* Hack hack - workaround for bcm2048 bug. It sends -+ empty frames every now and then without reason. - BT_ERR("Unexpected continuation frame (len %d)", skb->len); - l2cap_conn_unreliable(conn, ECOMM); -+ */ - goto drop; - } + goto out_ignore_buff; + case 0: ++ if (bufs->wait_stats) { ++ if (irqstatus & (H3A_AWB_DONE | H3A_AF_DONE)) ++ irqstatus &= ~(H3A_AWB_DONE | H3A_AF_DONE); ++ if (irqstatus & HIST_DONE) ++ isp_hist_mark_invalid_buf(&isp->isp_hist); ++ } ++ + break; + } -@@ -2403,7 +2679,7 @@ static int l2cap_recv_acldata(struct hci +@@ -965,7 +980,6 @@ static irqreturn_t omap34xx_isp_isr(int + "busy.\n"); + /* current and next buffer might have invalid data */ + isp_hist_mark_invalid_buf(&isp->isp_hist); +- irqstatus &= ~HIST_DONE; + ret = HIST_NO_BUF; } + if (ret != HIST_BUF_WAITING_DMA) +@@ -974,7 +988,7 @@ static irqreturn_t omap34xx_isp_isr(int + irqstatus &= ~HIST_DONE; + } - skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), -- skb->len); -+ skb->len); - conn->rx_len -= skb->len; +- if (irqdis->isp_callbk[CBK_CATCHALL]) { ++ if (irqdis->isp_callbk[CBK_CATCHALL] && irqstatus) { + irqdis->isp_callbk[CBK_CATCHALL]( + irqstatus, + irqdis->isp_callbk_arg1[CBK_CATCHALL], +@@ -1123,6 +1137,11 @@ static int __isp_disable_modules(struct + isp_af_enable(&isp->isp_af, 0); + isph3a_aewb_enable(&isp->isp_h3a, 0); + isp_hist_enable(&isp->isp_hist, 0); ++ ++ /* FIXME: find me a better interface */ ++ isp->isp_af.config.af_config = 0; ++ isp->isp_h3a.aewb_config_local.aewb_enable = 0; ++ isp->isp_hist.config.enable = 0; + } + ispresizer_enable(&isp->isp_res, 0); + isppreview_enable(&isp->isp_prev, 0); +@@ -1424,6 +1443,7 @@ static void isp_buf_init(struct device * + bufs->queue = 0; + bufs->done = 0; + bufs->wait_hs_vs = isp->config->wait_hs_vs; ++ bufs->wait_stats = bufs->wait_hs_vs; + for (sg = 0; sg < NUM_BUFS; sg++) { + if (bufs->buf[sg].vb) { + bufs->buf[sg].vb->state = VIDEOBUF_ERROR; +@@ -1528,11 +1548,9 @@ int isp_buf_queue(struct device *dev, st + * receiving a frame. + */ + bufs->wait_hs_vs++; ++ bufs->wait_stats = 1; + isp_enable_interrupts(dev, RAW_CAPTURE(isp)); + isp_set_buf(dev, buf); +- isp_af_try_enable(&isp->isp_af); +- isph3a_aewb_try_enable(&isp->isp_h3a); +- isp_hist_try_enable(&isp->isp_hist); + ispccdc_enable(&isp->isp_ccdc, 1); + } - if (!conn->rx_len) { -@@ -2429,10 +2705,10 @@ static ssize_t l2cap_sysfs_show(struct c - sk_for_each(sk, node, &l2cap_sk_list.head) { - struct l2cap_pinfo *pi = l2cap_pi(sk); +@@ -1814,48 +1832,48 @@ int isp_handle_private(struct device *de -- str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n", -+ str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", - batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), - sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid, -- pi->imtu, pi->omtu, pi->link_mode); -+ pi->imtu, pi->omtu, pi->sec_level); + switch (cmd) { + case VIDIOC_PRIVATE_ISP_CCDC_CFG: +- rval = omap34xx_isp_ccdc_config(&isp->isp_ccdc, arg); ++ rval = ispccdc_config(&isp->isp_ccdc, arg); + break; + case VIDIOC_PRIVATE_ISP_PRV_CFG: +- rval = omap34xx_isp_preview_config(&isp->isp_prev, arg); ++ rval = isppreview_config(&isp->isp_prev, arg); + break; + case VIDIOC_PRIVATE_ISP_AEWB_CFG: { + struct isph3a_aewb_config *params; + params = (struct isph3a_aewb_config *)arg; +- rval = omap34xx_isph3a_aewb_config(&isp->isp_h3a, params); ++ rval = isph3a_aewb_config(&isp->isp_h3a, params); + } + break; + case VIDIOC_PRIVATE_ISP_AEWB_REQ: { + struct isph3a_aewb_data *data; + data = (struct isph3a_aewb_data *)arg; +- rval = omap34xx_isph3a_aewb_request_statistics(&isp->isp_h3a, ++ rval = isph3a_aewb_request_statistics(&isp->isp_h3a, + data); + } + break; + case VIDIOC_PRIVATE_ISP_HIST_CFG: { + struct isp_hist_config *params; + params = (struct isp_hist_config *)arg; +- rval = omap34xx_isp_hist_config(&isp->isp_hist, params); ++ rval = isp_hist_config(&isp->isp_hist, params); } + break; + case VIDIOC_PRIVATE_ISP_HIST_REQ: { + struct isp_hist_data *data; + data = (struct isp_hist_data *)arg; +- rval = omap34xx_isp_hist_request_statistics(&isp->isp_hist, ++ rval = isp_hist_request_statistics(&isp->isp_hist, + data); + } + break; + case VIDIOC_PRIVATE_ISP_AF_CFG: { + struct af_configuration *params; + params = (struct af_configuration *)arg; +- rval = omap34xx_isp_af_config(&isp->isp_af, params); ++ rval = isp_af_config(&isp->isp_af, params); - read_unlock_bh(&l2cap_sk_list.lock); -@@ -2452,7 +2728,7 @@ static const struct proto_ops l2cap_sock - .accept = l2cap_sock_accept, - .getname = l2cap_sock_getname, - .sendmsg = l2cap_sock_sendmsg, -- .recvmsg = bt_sock_recvmsg, -+ .recvmsg = l2cap_sock_recvmsg, - .poll = bt_sock_poll, - .ioctl = bt_sock_ioctl, - .mmap = sock_no_mmap, -@@ -2474,8 +2750,8 @@ static struct hci_proto l2cap_hci_proto - .connect_ind = l2cap_connect_ind, - .connect_cfm = l2cap_connect_cfm, - .disconn_ind = l2cap_disconn_ind, -- .auth_cfm = l2cap_auth_cfm, -- .encrypt_cfm = l2cap_encrypt_cfm, -+ .disconn_cfm = l2cap_disconn_cfm, -+ .security_cfm = l2cap_security_cfm, - .recv_acldata = l2cap_recv_acldata - }; + } + break; + case VIDIOC_PRIVATE_ISP_AF_REQ: { + struct isp_af_data *data; + data = (struct isp_af_data *)arg; +- rval = omap34xx_isp_af_request_statistics(&isp->isp_af, data); ++ rval = isp_af_request_statistics(&isp->isp_af, data); + } + break; + default: +@@ -2029,7 +2047,7 @@ static void isp_save_ctx(struct device * + ispccdc_save_context(dev); + if (isp->iommu) + iommu_save_ctx(isp->iommu); +- isphist_save_context(dev); ++ isp_hist_save_context(dev); + isph3a_save_context(dev); + isppreview_save_context(dev); + ispresizer_save_context(dev); +@@ -2049,7 +2067,7 @@ static void isp_restore_ctx(struct devic + ispccdc_restore_context(dev); + if (isp->iommu) + iommu_restore_ctx(isp->iommu); +- isphist_restore_context(dev); ++ isp_hist_restore_context(dev); + isph3a_restore_context(dev); + isppreview_restore_context(dev); + ispresizer_restore_context(dev); +@@ -2389,7 +2407,7 @@ static int isp_probe(struct platform_dev + goto out_clk_get_l3_ick; + } -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/rfcomm/core.c linux-omap-2.6.28-nokia1/net/bluetooth/rfcomm/core.c ---- linux-omap-2.6.28-omap1/net/bluetooth/rfcomm/core.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/rfcomm/core.c 2011-06-22 13:19:33.283063268 +0200 -@@ -46,12 +46,7 @@ - #include - #include +- if (request_irq(isp->irq_num, omap34xx_isp_isr, IRQF_SHARED, ++ if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, + "Omap 3 Camera ISP", pdev)) { + dev_err(isp->dev, "could not install isr\n"); + ret_err = -EINVAL; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/ispccdc.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/ispccdc.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/ispccdc.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/ispccdc.c 2011-09-04 11:37:54.000000000 +0200 +@@ -29,6 +29,7 @@ + #include "ispccdc.h" --#ifndef CONFIG_BT_RFCOMM_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- --#define VERSION "1.10" -+#define VERSION "1.11" + #define LSC_TABLE_INIT_SIZE 50052 ++#define PTR_FREE ((u32)(-ENOMEM)) - static int disable_cfc = 0; - static int channel_mtu = -1; -@@ -228,19 +223,52 @@ static int rfcomm_l2sock_create(struct s - return err; - } + /* Structure for saving/restoring CCDC module registers*/ + static struct isp_reg ispccdc_reg_list[] = { +@@ -76,1008 +77,905 @@ static struct isp_reg ispccdc_reg_list[] + }; --static inline int rfcomm_check_link_mode(struct rfcomm_dlc *d) -+static inline int rfcomm_check_security(struct rfcomm_dlc *d) + /** +- * omap34xx_isp_ccdc_config - Sets CCDC configuration from userspace +- * @userspace_add: Structure containing CCDC configuration sent from userspace. ++ * ispccdc_print_status - Prints the values of the CCDC Module registers + * +- * Returns 0 if successful, -EINVAL if the pointer to the configuration +- * structure is null, or the copy_from_user function fails to copy user space +- * memory to kernel space memory. ++ * Also prints other debug information stored in the CCDC module. + **/ +-int omap34xx_isp_ccdc_config(struct isp_ccdc_device *isp_ccdc, +- void *userspace_add) ++static void ispccdc_print_status(struct isp_ccdc_device *isp_ccdc, ++ struct isp_pipeline *pipe) { - struct sock *sk = d->session->sock->sk; -+ __u8 auth_type; +- struct isp_device *isp = to_isp_device(isp_ccdc); +- struct ispccdc_bclamp bclamp_t; +- struct ispccdc_blcomp blcomp_t; +- struct ispccdc_fpc fpc_t; +- struct ispccdc_culling cull_t; +- struct ispccdc_update_config *ccdc_struct; +- +- if (userspace_add == NULL) +- return -EINVAL; +- +- ccdc_struct = userspace_add; +- +- if (ISP_ABS_CCDC_ALAW & ccdc_struct->flag) { +- if (ISP_ABS_CCDC_ALAW & ccdc_struct->update) +- ispccdc_config_alaw(isp_ccdc, ccdc_struct->alawip); +- ispccdc_enable_alaw(isp_ccdc, 1); +- } else if (ISP_ABS_CCDC_ALAW & ccdc_struct->update) +- ispccdc_enable_alaw(isp_ccdc, 0); +- +- if (ISP_ABS_CCDC_LPF & ccdc_struct->flag) +- ispccdc_enable_lpf(isp_ccdc, 1); +- else +- ispccdc_enable_lpf(isp_ccdc, 0); +- +- if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->flag) { +- if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) { +- if (copy_from_user(&bclamp_t, (struct ispccdc_bclamp *) +- ccdc_struct->bclamp, +- sizeof(struct ispccdc_bclamp))) +- goto copy_from_user_err; +- +- ispccdc_enable_black_clamp(isp_ccdc, 1); +- ispccdc_config_black_clamp(isp_ccdc, bclamp_t); +- } else +- ispccdc_enable_black_clamp(isp_ccdc, 1); +- } else { +- if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) { +- if (copy_from_user(&bclamp_t, (struct ispccdc_bclamp *) +- ccdc_struct->bclamp, +- sizeof(struct ispccdc_bclamp))) +- goto copy_from_user_err; +- +- ispccdc_enable_black_clamp(isp_ccdc, 0); +- ispccdc_config_black_clamp(isp_ccdc, bclamp_t); +- } +- } +- +- if (ISP_ABS_CCDC_BCOMP & ccdc_struct->update) { +- if (copy_from_user(&blcomp_t, (struct ispccdc_blcomp *) +- ccdc_struct->blcomp, +- sizeof(blcomp_t))) +- goto copy_from_user_err; +- +- ispccdc_config_black_comp(isp_ccdc, blcomp_t); +- } +- +- if (ISP_ABS_CCDC_FPC & ccdc_struct->flag) { +- if (ISP_ABS_CCDC_FPC & ccdc_struct->update) { +- if (copy_from_user(&fpc_t, (struct ispccdc_fpc *) +- ccdc_struct->fpc, +- sizeof(fpc_t))) +- goto copy_from_user_err; +- isp_ccdc->fpc_table_add = kmalloc(64 + fpc_t.fpnum * 4, +- GFP_KERNEL | GFP_DMA); +- if (!isp_ccdc->fpc_table_add) { +- dev_err(to_device(isp_ccdc), +- "ccdc: Cannot allocate memory for" +- " FPC table"); +- return -ENOMEM; +- } +- while (((unsigned long)isp_ccdc->fpc_table_add +- & 0xFFFFFFC0) +- != (unsigned long)isp_ccdc->fpc_table_add) +- isp_ccdc->fpc_table_add++; +- +- isp_ccdc->fpc_table_add_m = iommu_kmap( +- isp->iommu, +- 0, +- virt_to_phys(isp_ccdc->fpc_table_add), +- fpc_t.fpnum * 4, +- IOMMU_FLAG); +- /* FIXME: Correct unwinding */ +- BUG_ON(IS_ERR_VALUE(isp_ccdc->fpc_table_add_m)); +- +- if (copy_from_user(isp_ccdc->fpc_table_add, +- (u32 *)fpc_t.fpcaddr, +- fpc_t.fpnum * 4)) +- goto copy_from_user_err; +- +- fpc_t.fpcaddr = isp_ccdc->fpc_table_add_m; +- ispccdc_config_fpc(isp_ccdc, fpc_t); +- } +- ispccdc_enable_fpc(isp_ccdc, 1); +- } else if (ISP_ABS_CCDC_FPC & ccdc_struct->update) +- ispccdc_enable_fpc(isp_ccdc, 0); ++ if (!is_ispccdc_debug_enabled()) ++ return; + +- if (ISP_ABS_CCDC_CULL & ccdc_struct->update) { +- if (copy_from_user(&cull_t, (struct ispccdc_culling *) +- ccdc_struct->cull, +- sizeof(cull_t))) +- goto copy_from_user_err; +- ispccdc_config_culling(isp_ccdc, cull_t); ++ DPRINTK_ISPCCDC("Module in use =%d\n", isp_ccdc->ccdc_inuse); ++ DPRINTK_ISPCCDC("Accepted CCDC Input (width = %d,Height = %d)\n", ++ isp_ccdc->ccdcin_w, ++ isp_ccdc->ccdcin_h); ++ DPRINTK_ISPCCDC("Accepted CCDC Output (width = %d,Height = %d)\n", ++ isp_ccdc->ccdcout_w, ++ isp_ccdc->ccdcout_h); ++ DPRINTK_ISPCCDC("###CCDC PCR=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_PCR)); ++ DPRINTK_ISPCCDC("ISP_CTRL =0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ++ ISP_CTRL)); ++ switch (pipe->ccdc_in) { ++ case CCDC_RAW: ++ DPRINTK_ISPCCDC("ccdc input format is CCDC_RAW\n"); ++ break; ++ case CCDC_YUV_SYNC: ++ DPRINTK_ISPCCDC("ccdc input format is CCDC_YUV_SYNC\n"); ++ break; ++ case CCDC_YUV_BT: ++ DPRINTK_ISPCCDC("ccdc input format is CCDC_YUV_BT\n"); ++ break; ++ default: ++ break; + } -- if (d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) { -- if (!hci_conn_encrypt(l2cap_pi(sk)->conn->hcon)) -- return 1; -- } else if (d->link_mode & RFCOMM_LM_AUTH) { -- if (!hci_conn_auth(l2cap_pi(sk)->conn->hcon)) -- return 1; -+ switch (d->sec_level) { -+ case BT_SECURITY_HIGH: -+ auth_type = HCI_AT_GENERAL_BONDING_MITM; +- if (is_isplsc_activated()) { +- if (ISP_ABS_CCDC_CONFIG_LSC & ccdc_struct->flag) { +- if (ISP_ABS_CCDC_CONFIG_LSC & ccdc_struct->update) { +- if (copy_from_user( +- &isp_ccdc->lsc_config, +- (struct ispccdc_lsc_config *) +- ccdc_struct->lsc_cfg, +- sizeof(struct ispccdc_lsc_config))) +- goto copy_from_user_err; +- ispccdc_config_lsc(isp_ccdc, +- &isp_ccdc->lsc_config); +- } +- ispccdc_enable_lsc(isp_ccdc, 1); +- } else if (ISP_ABS_CCDC_CONFIG_LSC & ccdc_struct->update) { +- ispccdc_enable_lsc(isp_ccdc, 0); +- } +- if (ISP_ABS_TBL_LSC & ccdc_struct->update) { +- if (copy_from_user(isp_ccdc->lsc_gain_table, +- ccdc_struct->lsc, +- isp_ccdc->lsc_config.size)) +- goto copy_from_user_err; +- ispccdc_load_lsc(isp_ccdc, isp_ccdc->lsc_gain_table, +- isp_ccdc->lsc_config.size); +- } ++ switch (pipe->ccdc_out) { ++ case CCDC_OTHERS_VP: ++ DPRINTK_ISPCCDC("ccdc output format is CCDC_OTHERS_VP\n"); ++ break; ++ case CCDC_OTHERS_MEM: ++ DPRINTK_ISPCCDC("ccdc output format is CCDC_OTHERS_MEM\n"); + break; -+ case BT_SECURITY_MEDIUM: -+ auth_type = HCI_AT_GENERAL_BONDING; ++ case CCDC_YUV_RSZ: ++ DPRINTK_ISPCCDC("ccdc output format is CCDC_YUV_RSZ\n"); + break; + default: -+ auth_type = HCI_AT_NO_BONDING; + break; } +- if (ISP_ABS_CCDC_COLPTN & ccdc_struct->update) +- ispccdc_config_imgattr(isp_ccdc, ccdc_struct->colptn); +- - return 0; -+ return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level, -+ auth_type); -+} -+ -+static void rfcomm_session_timeout(unsigned long arg) -+{ -+ struct rfcomm_session *s = (void *) arg; -+ -+ BT_DBG("session %p state %ld", s, s->state); -+ -+ set_bit(RFCOMM_TIMED_OUT, &s->flags); -+ rfcomm_session_put(s); -+ rfcomm_schedule(RFCOMM_SCHED_TIMEO); -+} -+ -+static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) -+{ -+ BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); -+ -+ if (!mod_timer(&s->timer, jiffies + timeout)) -+ rfcomm_session_hold(s); -+} -+ -+static void rfcomm_session_clear_timer(struct rfcomm_session *s) -+{ -+ BT_DBG("session %p state %ld", s, s->state); -+ -+ if (timer_pending(&s->timer) && del_timer(&s->timer)) -+ rfcomm_session_put(s); +- +-copy_from_user_err: +- dev_err(isp->dev, "ccdc: Config: copy from user error"); +- return -EINVAL ; +-} +- +-/** +- * Set the value to be used for CCDC_CFG.WENLOG. +- * w - Value of wenlog. +- */ +-void ispccdc_set_wenlog(struct isp_ccdc_device *isp_ccdc, u32 wenlog) +-{ +- isp_ccdc->wenlog = wenlog; ++ DPRINTK_ISPCCDC("###ISP_CTRL in ccdc =0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ++ ISP_CTRL)); ++ DPRINTK_ISPCCDC("###ISP_IRQ0ENABLE in ccdc =0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ++ ISP_IRQ0ENABLE)); ++ DPRINTK_ISPCCDC("###ISP_IRQ0STATUS in ccdc =0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, ++ ISP_IRQ0STATUS)); ++ DPRINTK_ISPCCDC("###CCDC SYN_MODE=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_SYN_MODE)); ++ DPRINTK_ISPCCDC("###CCDC HORZ_INFO=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_HORZ_INFO)); ++ DPRINTK_ISPCCDC("###CCDC VERT_START=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_VERT_START)); ++ DPRINTK_ISPCCDC("###CCDC VERT_LINES=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_VERT_LINES)); ++ DPRINTK_ISPCCDC("###CCDC CULLING=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_CULLING)); ++ DPRINTK_ISPCCDC("###CCDC HSIZE_OFF=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_HSIZE_OFF)); ++ DPRINTK_ISPCCDC("###CCDC SDOFST=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_SDOFST)); ++ DPRINTK_ISPCCDC("###CCDC SDR_ADDR=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_SDR_ADDR)); ++ DPRINTK_ISPCCDC("###CCDC CLAMP=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_CLAMP)); ++ DPRINTK_ISPCCDC("###CCDC COLPTN=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_COLPTN)); ++ DPRINTK_ISPCCDC("###CCDC CFG=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_CFG)); ++ DPRINTK_ISPCCDC("###CCDC VP_OUT=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_VP_OUT)); ++ DPRINTK_ISPCCDC("###CCDC_SDR_ADDR= 0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_SDR_ADDR)); ++ DPRINTK_ISPCCDC("###CCDC FMTCFG=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_FMTCFG)); ++ DPRINTK_ISPCCDC("###CCDC FMT_HORZ=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_FMT_HORZ)); ++ DPRINTK_ISPCCDC("###CCDC FMT_VERT=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_FMT_VERT)); ++ DPRINTK_ISPCCDC("###CCDC LSC_CONFIG=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_CONFIG)); ++ DPRINTK_ISPCCDC("###CCDC LSC_INIT=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_INITIAL)); ++ DPRINTK_ISPCCDC("###CCDC LSC_TABLE BASE=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_TABLE_BASE)); ++ DPRINTK_ISPCCDC("###CCDC LSC TABLE OFFSET=0x%x\n", ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_TABLE_OFFSET)); } - /* ---- RFCOMM DLCs ---- */ -@@ -319,6 +347,7 @@ static void rfcomm_dlc_link(struct rfcom + /** +- * ispccdc_request - Reserves the CCDC module. +- * +- * Reserves the CCDC module and assures that is used only once at a time. ++ * ispccdc_config_black_clamp - Configures the clamp parameters in CCDC. ++ * @bclamp: Structure containing the optical black average gain, optical black ++ * sample length, sample lines, and the start pixel position of the ++ * samples w.r.t the HS pulse. ++ * Configures the clamp parameters in CCDC. Either if its being used the ++ * optical black clamp, or the digital clamp. If its a digital clamp, then ++ * assures to put a valid DC substraction level. + * +- * Returns 0 if successful, or -EBUSY if CCDC module is busy. ++ * Returns always 0 when completed. + **/ +-int ispccdc_request(struct isp_ccdc_device *isp_ccdc) ++static int ispccdc_config_black_clamp(struct isp_ccdc_device *isp_ccdc, ++ struct ispccdc_bclamp bclamp) + { + struct device *dev = to_device(isp_ccdc); ++ u32 bclamp_val = 0; - rfcomm_session_hold(s); +- mutex_lock(&isp_ccdc->mutexlock); +- if (isp_ccdc->ccdc_inuse) { +- mutex_unlock(&isp_ccdc->mutexlock); +- DPRINTK_ISPCCDC("ISP_ERR : CCDC Module Busy\n"); +- return -EBUSY; ++ if (isp_ccdc->obclamp_en) { ++ bclamp_val |= bclamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT; ++ bclamp_val |= bclamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT; ++ bclamp_val |= bclamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT; ++ bclamp_val |= bclamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT; ++ isp_reg_writel(dev, bclamp_val, ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP); ++ } else { ++ if (omap_rev() < OMAP3430_REV_ES2_0) ++ if (isp_ccdc->syncif_ipmod == YUV16 || ++ isp_ccdc->syncif_ipmod == YUV8 || ++ isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_REC656IF) & ++ ISPCCDC_REC656IF_R656ON) ++ bclamp.dcsubval = 0; ++ isp_reg_writel(dev, bclamp.dcsubval, ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB); + } +- +- isp_ccdc->ccdc_inuse = 1; +- mutex_unlock(&isp_ccdc->mutexlock); +- isp_reg_or(dev, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, +- ISPCTRL_CCDC_RAM_EN | ISPCTRL_CCDC_CLK_EN | +- ISPCTRL_SBL_WR1_RAM_EN); +- isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, +- ISPCCDC_CFG_VDLC); + return 0; + } -+ rfcomm_session_clear_timer(s); - rfcomm_dlc_hold(d); - list_add(&d->list, &s->dlcs); - d->session = s; -@@ -334,6 +363,9 @@ static void rfcomm_dlc_unlink(struct rfc - d->session = NULL; - rfcomm_dlc_put(d); + /** +- * ispccdc_free - Frees the CCDC module. +- * +- * Frees the CCDC module so it can be used by another process. ++ * ispccdc_enable_black_clamp - Enables/Disables the optical black clamp. ++ * @enable: 0 Disables optical black clamp, 1 Enables optical black clamp. + * +- * Returns 0 if successful, or -EINVAL if module has been already freed. ++ * Enables or disables the optical black clamp. When disabled, the digital ++ * clamp operates. + **/ +-int ispccdc_free(struct isp_ccdc_device *isp_ccdc) ++static void ispccdc_enable_black_clamp(struct isp_ccdc_device *isp_ccdc, ++ u8 enable) + { +- mutex_lock(&isp_ccdc->mutexlock); +- if (!isp_ccdc->ccdc_inuse) { +- mutex_unlock(&isp_ccdc->mutexlock); +- DPRINTK_ISPCCDC("ISP_ERR: CCDC Module already freed\n"); +- return -EINVAL; +- } ++ struct device *dev = to_device(isp_ccdc); -+ if (list_empty(&s->dlcs)) -+ rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); -+ - rfcomm_session_put(s); +- isp_ccdc->ccdc_inuse = 0; +- mutex_unlock(&isp_ccdc->mutexlock); +- isp_reg_and(to_device(isp_ccdc), OMAP3_ISP_IOMEM_MAIN, +- ISP_CTRL, ~(ISPCTRL_CCDC_CLK_EN | +- ISPCTRL_CCDC_RAM_EN | +- ISPCTRL_SBL_WR1_RAM_EN)); +- return 0; ++ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP, ++ ~ISPCCDC_CLAMP_CLAMPEN, ++ enable ? ISPCCDC_CLAMP_CLAMPEN : 0); ++ isp_ccdc->obclamp_en = enable; } -@@ -393,10 +425,10 @@ static int __rfcomm_dlc_open(struct rfco - d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc; + /** +- * ispccdc_free_lsc - Frees Lens Shading Compensation table ++ * ispccdc_config_fpc - Configures the Faulty Pixel Correction parameters. ++ * @fpc: Structure containing the number of faulty pixels corrected in the ++ * frame, address of the FPC table. + * +- * Always returns 0. ++ * Returns 0 if successful, or -EINVAL if FPC Address is not on the 64 byte ++ * boundary. + **/ +-static int ispccdc_free_lsc(struct isp_ccdc_device *isp_ccdc) ++static int ispccdc_config_fpc(struct isp_ccdc_device *isp_ccdc, ++ struct ispccdc_fpc fpc) + { +- struct isp_device *isp = to_isp_device(isp_ccdc); ++ struct device *dev = to_device(isp_ccdc); ++ u32 fpc_val = 0; - if (s->state == BT_CONNECTED) { -- if (rfcomm_check_link_mode(d)) -- set_bit(RFCOMM_AUTH_PENDING, &d->flags); -- else -+ if (rfcomm_check_security(d)) - rfcomm_send_pn(s, 1, d); -+ else -+ set_bit(RFCOMM_AUTH_PENDING, &d->flags); - } +- if (!isp_ccdc->lsc_ispmmu_addr) +- return 0; ++ fpc_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); - rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); -@@ -426,9 +458,16 @@ static int __rfcomm_dlc_close(struct rfc - d, d->state, d->dlci, err, s); +- ispccdc_enable_lsc(isp_ccdc, 0); +- isp_ccdc->lsc_initialized = 0; +- isp_reg_writel(to_device(isp_ccdc), 0, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_TABLE_BASE); +- iommu_vfree(isp->iommu, isp_ccdc->lsc_ispmmu_addr); +- isp_ccdc->lsc_gain_table = NULL; ++ if ((fpc.fpcaddr & 0xFFFFFFC0) == fpc.fpcaddr) { ++ isp_reg_writel(dev, fpc_val & (~ISPCCDC_FPC_FPCEN), ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); ++ isp_reg_writel(dev, fpc.fpcaddr, ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR); ++ } else { ++ DPRINTK_ISPCCDC("FPC Address should be on 64byte boundary\n"); ++ return -EINVAL; ++ } ++ isp_reg_writel(dev, fpc_val | (fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); + return 0; + } - switch (d->state) { -- case BT_CONNECTED: -- case BT_CONFIG: - case BT_CONNECT: -+ case BT_CONFIG: -+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { -+ set_bit(RFCOMM_AUTH_REJECT, &d->flags); -+ rfcomm_schedule(RFCOMM_SCHED_AUTH); -+ break; -+ } -+ /* Fall through */ -+ -+ case BT_CONNECTED: - d->state = BT_DISCONN; - if (skb_queue_empty(&d->tx_queue)) { - rfcomm_send_disc(s, d->dlci); -@@ -439,6 +478,15 @@ static int __rfcomm_dlc_close(struct rfc - } - break; + /** +- * ispccdc_allocate_lsc - Allocate space for Lens Shading Compensation table +- * @table_size: LSC gain table size. +- * +- * Returns 0 if successful, -ENOMEM of its no memory available, or -EINVAL if +- * table_size is zero. ++ * ispccdc_enable_fpc - Enables the Faulty Pixel Correction. ++ * @enable: 0 Disables FPC, 1 Enables FPC. + **/ +-static int ispccdc_allocate_lsc(struct isp_ccdc_device *isp_ccdc, +- u32 table_size) ++static void ispccdc_enable_fpc(struct isp_ccdc_device *isp_ccdc, u8 enable) + { +- struct isp_device *isp = to_isp_device(isp_ccdc); +- +- if (table_size == 0) +- return -EINVAL; ++ struct device *dev = to_device(isp_ccdc); -+ case BT_OPEN: -+ case BT_CONNECT2: -+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { -+ set_bit(RFCOMM_AUTH_REJECT, &d->flags); -+ rfcomm_schedule(RFCOMM_SCHED_AUTH); -+ break; -+ } -+ /* Fall through */ -+ - default: - rfcomm_dlc_clear_timer(d); +- if ((isp_ccdc->lsc_config.size >= table_size) +- && isp_ccdc->lsc_gain_table) +- return 0; ++ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, ++ ~ISPCCDC_FPC_FPCEN, enable ? ISPCCDC_FPC_FPCEN : 0); ++} -@@ -550,6 +598,8 @@ static struct rfcomm_session *rfcomm_ses +- ispccdc_free_lsc(isp_ccdc); ++/** ++ * ispccdc_config_black_comp - Configures Black Level Compensation parameters. ++ * @blcomp: Structure containing the black level compensation value for RGrGbB ++ * pixels. in 2's complement. ++ **/ ++static void ispccdc_config_black_comp(struct isp_ccdc_device *isp_ccdc, ++ struct ispccdc_blcomp blcomp) ++{ ++ struct device *dev = to_device(isp_ccdc); ++ u32 blcomp_val = 0; - BT_DBG("session %p sock %p", s, sock); +- isp_ccdc->lsc_ispmmu_addr = iommu_vmalloc(isp->iommu, 0, table_size, +- IOMMU_FLAG); +- if (IS_ERR_VALUE(isp_ccdc->lsc_ispmmu_addr)) { +- dev_err(to_device(isp_ccdc), +- "ccdc: Cannot allocate memory for gain tables\n"); +- isp_ccdc->lsc_ispmmu_addr = 0; +- return -ENOMEM; +- } +- isp_ccdc->lsc_gain_table = da_to_va(isp->iommu, +- (u32)isp_ccdc->lsc_ispmmu_addr); ++ blcomp_val |= blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT; ++ blcomp_val |= blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT; ++ blcomp_val |= blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT; ++ blcomp_val |= blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT; -+ setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long)s); -+ - INIT_LIST_HEAD(&s->dlcs); - s->state = state; - s->sock = sock; -@@ -581,6 +631,7 @@ static void rfcomm_session_del(struct rf - if (state == BT_CONNECTED) - rfcomm_send_disc(s, 0); +- return 0; ++ isp_reg_writel(dev, blcomp_val, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_BLKCMP); + } -+ rfcomm_session_clear_timer(s); - sock_release(s->sock); - kfree(s); + /** +- * ispccdc_program_lsc - Program Lens Shading Compensation table. +- * @table_size: LSC gain table size. +- * +- * Returns 0 if successful, or -EINVAL if there's no mapped address for the +- * table yet. ++ * ispccdc_config_vp - Configures the Video Port Configuration parameters. ++ * @vpcfg: Structure containing the Video Port input frequency, and the 10 bit ++ * format. + **/ +-static int ispccdc_program_lsc(struct isp_ccdc_device *isp_ccdc) ++static void ispccdc_config_vp(struct isp_ccdc_device *isp_ccdc, ++ struct ispccdc_vp vpcfg) + { +- if (!isp_ccdc->lsc_ispmmu_addr) +- return -EINVAL; ++ struct device *dev = to_device(isp_ccdc); ++ u32 fmtcfg_vp = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_FMTCFG); -@@ -622,6 +673,7 @@ static void rfcomm_session_close(struct - __rfcomm_dlc_close(d, err); - } +- if (isp_ccdc->lsc_initialized) +- return 0; ++ fmtcfg_vp &= ISPCCDC_FMTCFG_VPIN_MASK & ISPCCDC_FMTCFG_VPIF_FRQ_MASK; -+ rfcomm_session_clear_timer(s); - rfcomm_session_put(s); +- isp_reg_writel(to_device(isp_ccdc), isp_ccdc->lsc_ispmmu_addr, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); +- isp_ccdc->lsc_initialized = 1; +- return 0; ++ switch (vpcfg.bitshift_sel) { ++ case BIT9_0: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0; ++ break; ++ case BIT10_1: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1; ++ break; ++ case BIT11_2: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2; ++ break; ++ case BIT12_3: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3; ++ break; ++ }; ++ switch (vpcfg.freq_sel) { ++ case PIXCLKBY2: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY2; ++ break; ++ case PIXCLKBY3_5: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY3; ++ break; ++ case PIXCLKBY4_5: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY4; ++ break; ++ case PIXCLKBY5_5: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY5; ++ break; ++ case PIXCLKBY6_5: ++ fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY6; ++ break; ++ }; ++ isp_reg_writel(dev, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); } -@@ -641,6 +693,7 @@ static struct rfcomm_session *rfcomm_ses - bacpy(&addr.l2_bdaddr, src); - addr.l2_family = AF_BLUETOOTH; - addr.l2_psm = 0; -+ addr.l2_cid = 0; - *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); - if (*err < 0) - goto failed; -@@ -662,6 +715,7 @@ static struct rfcomm_session *rfcomm_ses - bacpy(&addr.l2_bdaddr, dst); - addr.l2_family = AF_BLUETOOTH; - addr.l2_psm = htobs(RFCOMM_PSM); -+ addr.l2_cid = 0; - *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); - if (*err == 0 || *err == -EINPROGRESS) - return s; -@@ -1167,7 +1221,7 @@ static int rfcomm_recv_disc(struct rfcom - return 0; + /** +- * ispccdc_load_lsc - Load Lens Shading Compensation table. +- * @table_addr: LSC gain table MMU Mapped address. +- * @table_size: LSC gain table size. +- * +- * Returns 0 if successful, -ENOMEM of its no memory available, or -EINVAL if +- * table_size is zero. ++ * ispccdc_enable_vp - Enables the Video Port. ++ * @enable: 0 Disables VP, 1 Enables VP + **/ +-int ispccdc_load_lsc(struct isp_ccdc_device *isp_ccdc, u8 *table_addr, +- u32 table_size) ++static void ispccdc_enable_vp(struct isp_ccdc_device *isp_ccdc, u8 enable) + { +- int ret; +- +- if (!is_isplsc_activated()) +- return 0; +- +- if (!table_addr) +- return -EINVAL; ++ struct device *dev = to_device(isp_ccdc); + +- ret = ispccdc_allocate_lsc(isp_ccdc, table_size); +- if (ret) +- return ret; +- +- if (table_addr != isp_ccdc->lsc_gain_table) +- memcpy(isp_ccdc->lsc_gain_table, table_addr, table_size); +- ret = ispccdc_program_lsc(isp_ccdc); +- if (ret) +- return ret; +- return 0; ++ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG, ++ ~ISPCCDC_FMTCFG_VPEN, ++ enable ? ISPCCDC_FMTCFG_VPEN : 0); } --static void rfcomm_dlc_accept(struct rfcomm_dlc *d) -+void rfcomm_dlc_accept(struct rfcomm_dlc *d) + /** +- * ispccdc_config_lsc - Configures the lens shading compensation module +- * @lsc_cfg: LSC configuration structure ++ * ispccdc_config_culling - Configures the culling parameters. ++ * @cull: Structure containing the vertical culling pattern, and horizontal ++ * culling pattern for odd and even lines. + **/ +-void ispccdc_config_lsc(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_lsc_config *lsc_cfg) ++static void ispccdc_config_culling(struct isp_ccdc_device *isp_ccdc, ++ struct ispccdc_culling cull) { - struct sock *sk = d->session->sock->sk; + struct device *dev = to_device(isp_ccdc); +- int reg; -@@ -1175,17 +1229,38 @@ static void rfcomm_dlc_accept(struct rfc +- if (!is_isplsc_activated()) +- return; +- +- ispccdc_enable_lsc(isp_ccdc, 0); +- isp_reg_writel(dev, lsc_cfg->offset, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_TABLE_OFFSET); ++ u32 culling_val = 0; - rfcomm_send_ua(d->session, d->dlci); +- reg = 0; +- reg |= lsc_cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT; +- reg |= lsc_cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT; +- reg |= lsc_cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT; +- isp_reg_writel(dev, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG); ++ culling_val |= cull.v_pattern << ISPCCDC_CULLING_CULV_SHIFT; ++ culling_val |= cull.h_even << ISPCCDC_CULLING_CULHEVN_SHIFT; ++ culling_val |= cull.h_odd << ISPCCDC_CULLING_CULHODD_SHIFT; -+ rfcomm_dlc_clear_timer(d); -+ - rfcomm_dlc_lock(d); - d->state = BT_CONNECTED; - d->state_change(d, 0); - rfcomm_dlc_unlock(d); +- reg = 0; +- reg &= ~ISPCCDC_LSC_INITIAL_X_MASK; +- reg |= lsc_cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT; +- reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK; +- reg |= lsc_cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT; +- isp_reg_writel(dev, reg, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_INITIAL); ++ isp_reg_writel(dev, culling_val, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_CULLING); + } -- if (d->link_mode & RFCOMM_LM_MASTER) -+ if (d->role_switch) - hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); + /** +- * ispccdc_enable_lsc - Enables/Disables the Lens Shading Compensation module. +- * @enable: 0 Disables LSC, 1 Enables LSC. ++ * ispccdc_enable_lpf - Enables the Low-Pass Filter (LPF). ++ * @enable: 0 Disables LPF, 1 Enables LPF + **/ +-void ispccdc_enable_lsc(struct isp_ccdc_device *isp_ccdc, u8 enable) ++static void ispccdc_enable_lpf(struct isp_ccdc_device *isp_ccdc, u8 enable) + { + struct device *dev = to_device(isp_ccdc); - rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); +- if (!is_isplsc_activated()) +- return; +- +- if (enable) { +- if (!ispccdc_busy(isp_ccdc)) { +- isp_reg_or(dev, OMAP3_ISP_IOMEM_MAIN, +- ISP_CTRL, ISPCTRL_SBL_SHARED_RPORTB +- | ISPCTRL_SBL_RD_RAM_EN); +- +- isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_CONFIG, 0x1); +- +- isp_ccdc->lsc_state = 1; +- } else { +- /* Postpone enabling LSC */ +- isp_ccdc->lsc_enable = 1; +- } +- } else { +- isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_CONFIG, 0xFFFE); +- isp_ccdc->lsc_state = 0; +- isp_ccdc->lsc_enable = 0; +- } ++ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE, ++ ~ISPCCDC_SYN_MODE_LPF, ++ enable ? ISPCCDC_SYN_MODE_LPF : 0); } -+static void rfcomm_check_accept(struct rfcomm_dlc *d) -+{ -+ if (rfcomm_check_security(d)) { -+ if (d->defer_setup) { -+ set_bit(RFCOMM_DEFER_SETUP, &d->flags); -+ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); -+ -+ rfcomm_dlc_lock(d); -+ d->state = BT_CONNECT2; -+ d->state_change(d, 0); -+ rfcomm_dlc_unlock(d); -+ } else -+ rfcomm_dlc_accept(d); -+ } else { -+ set_bit(RFCOMM_AUTH_PENDING, &d->flags); -+ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); -+ } -+} -+ - static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci) +-void ispccdc_lsc_error_handler(struct isp_ccdc_device *isp_ccdc) ++/** ++ * ispccdc_config_alaw - Configures the input width for A-law. ++ * @ipwidth: Input width for A-law ++ **/ ++static void ispccdc_config_alaw(struct isp_ccdc_device *isp_ccdc, ++ enum alaw_ipwidth ipwidth) { - struct rfcomm_dlc *d; -@@ -1208,11 +1283,7 @@ static int rfcomm_recv_sabm(struct rfcom - if (d) { - if (d->state == BT_OPEN) { - /* DLC was previously opened by PN request */ -- if (rfcomm_check_link_mode(d)) { -- set_bit(RFCOMM_AUTH_PENDING, &d->flags); -- rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); -- } else -- rfcomm_dlc_accept(d); -+ rfcomm_check_accept(d); - } - return 0; - } -@@ -1224,11 +1295,7 @@ static int rfcomm_recv_sabm(struct rfcom - d->addr = __addr(s->initiator, dlci); - rfcomm_dlc_link(s, d); +- int lsc_enable = isp_ccdc->lsc_state; +- +- ispccdc_enable_lsc(isp_ccdc, 0); ++ struct device *dev = to_device(isp_ccdc); -- if (rfcomm_check_link_mode(d)) { -- set_bit(RFCOMM_AUTH_PENDING, &d->flags); -- rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); -- } else -- rfcomm_dlc_accept(d); -+ rfcomm_check_accept(d); - } else { - rfcomm_send_dm(s, dlci); - } -@@ -1642,11 +1709,12 @@ static void rfcomm_process_connect(struc - d = list_entry(p, struct rfcomm_dlc, list); - if (d->state == BT_CONFIG) { - d->mtu = s->mtu; -- if (rfcomm_check_link_mode(d)) { -+ if (rfcomm_check_security(d)) { -+ rfcomm_send_pn(s, 1, d); -+ } else { - set_bit(RFCOMM_AUTH_PENDING, &d->flags); - rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); -- } else -- rfcomm_send_pn(s, 1, d); -+ } - } - } +- isp_ccdc->lsc_enable = lsc_enable; ++ isp_reg_writel(dev, ipwidth << ISPCCDC_ALAW_GWDI_SHIFT, ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW); } -@@ -1722,11 +1790,17 @@ static inline void rfcomm_process_dlcs(s - if (d->out) { - rfcomm_send_pn(s, 1, d); - rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); -- } else -- rfcomm_dlc_accept(d); -- if (d->link_mode & RFCOMM_LM_SECURE) { -- struct sock *sk = s->sock->sk; -- hci_conn_change_link_key(l2cap_pi(sk)->conn->hcon); -+ } else { -+ if (d->defer_setup) { -+ set_bit(RFCOMM_DEFER_SETUP, &d->flags); -+ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); -+ -+ rfcomm_dlc_lock(d); -+ d->state = BT_CONNECT2; -+ d->state_change(d, 0); -+ rfcomm_dlc_unlock(d); -+ } else -+ rfcomm_dlc_accept(d); - } - continue; - } else if (test_and_clear_bit(RFCOMM_AUTH_REJECT, &d->flags)) { -@@ -1739,6 +1813,9 @@ static inline void rfcomm_process_dlcs(s - continue; - } - -+ if (test_bit(RFCOMM_SEC_PENDING, &d->flags)) -+ continue; -+ - if (test_bit(RFCOMM_TX_THROTTLED, &s->flags)) - continue; -@@ -1837,6 +1914,12 @@ static inline void rfcomm_process_sessio - struct rfcomm_session *s; - s = list_entry(p, struct rfcomm_session, list); + /** +- * ispccdc_config_crop - Configures crop parameters for the ISP CCDC. +- * @left: Left offset of the crop area. +- * @top: Top offset of the crop area. +- * @height: Height of the crop area. +- * @width: Width of the crop area. +- * +- * The following restrictions are applied for the crop settings. If incoming +- * values do not follow these restrictions then we map the settings to the +- * closest acceptable crop value. +- * 1) Left offset is always odd. This can be avoided if we enable byte swap +- * option for incoming data into CCDC. +- * 2) Top offset is always even. +- * 3) Crop height is always even. +- * 4) Crop width is always a multiple of 16 pixels ++ * ispccdc_enable_alaw - Enables the A-law compression. ++ * @enable: 0 - Disables A-law, 1 - Enables A-law + **/ +-void ispccdc_config_crop(struct isp_ccdc_device *isp_ccdc, u32 left, u32 top, +- u32 height, u32 width) ++static void ispccdc_enable_alaw(struct isp_ccdc_device *isp_ccdc, u8 enable) + { +- isp_ccdc->ccdcin_woffset = left + (left % 2); +- isp_ccdc->ccdcin_hoffset = top + (top % 2); +- +- isp_ccdc->crop_w = width - (width % 16); +- isp_ccdc->crop_h = height + (height % 2); ++ struct device *dev = to_device(isp_ccdc); -+ if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { -+ s->state = BT_DISCONN; -+ rfcomm_send_disc(s, 0); -+ continue; -+ } -+ - if (s->state == BT_LISTEN) { - rfcomm_accept_connection(s); - continue; -@@ -1881,6 +1964,7 @@ static int rfcomm_add_listener(bdaddr_t - bacpy(&addr.l2_bdaddr, ba); - addr.l2_family = AF_BLUETOOTH; - addr.l2_psm = htobs(RFCOMM_PSM); -+ addr.l2_cid = 0; - err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); - if (err < 0) { - BT_ERR("Bind failed %d", err); -@@ -1952,42 +2036,7 @@ static int rfcomm_run(void *unused) - return 0; +- DPRINTK_ISPCCDC("\n\tOffsets L %d T %d W %d H %d\n", +- isp_ccdc->ccdcin_woffset, +- isp_ccdc->ccdcin_hoffset, +- isp_ccdc->crop_w, +- isp_ccdc->crop_h); ++ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW, ++ ~ISPCCDC_ALAW_CCDTBL, ++ enable ? ISPCCDC_ALAW_CCDTBL : 0); } --static void rfcomm_auth_cfm(struct hci_conn *conn, u8 status) --{ -- struct rfcomm_session *s; -- struct rfcomm_dlc *d; -- struct list_head *p, *n; -- -- BT_DBG("conn %p status 0x%02x", conn, status); + /** +- * ispccdc_config_datapath - Specifies the input and output modules for CCDC. +- * @input: Indicates the module that inputs the image to the CCDC. +- * @output: Indicates the module to which the CCDC outputs the image. +- * +- * Configures the default configuration for the CCDC to work with. +- * +- * The valid values for the input are CCDC_RAW (0), CCDC_YUV_SYNC (1), +- * CCDC_YUV_BT (2), and CCDC_OTHERS (3). +- * +- * The valid values for the output are CCDC_YUV_RSZ (0), CCDC_YUV_MEM_RSZ (1), +- * CCDC_OTHERS_VP (2), CCDC_OTHERS_MEM (3), CCDC_OTHERS_VP_MEM (4). +- * +- * Returns 0 if successful, or -EINVAL if wrong I/O combination or wrong input +- * or output values. ++ * ispccdc_config_imgattr - Configures the sensor image specific attributes. ++ * @colptn: Color pattern of the sensor. + **/ +-static int ispccdc_config_datapath(struct isp_ccdc_device *isp_ccdc, +- struct isp_pipeline *pipe) ++static void ispccdc_config_imgattr(struct isp_ccdc_device *isp_ccdc, u32 colptn) + { + struct device *dev = to_device(isp_ccdc); + +- u32 syn_mode = 0; +- struct ispccdc_vp vpcfg; +- struct ispccdc_syncif syncif; +- struct ispccdc_bclamp blkcfg; +- +- u32 colptn = ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT | +- ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT | +- ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT | +- ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT | +- ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT | +- ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT | +- ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT | +- ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT | +- ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT | +- ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT | +- ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT | +- ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT | +- ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT | +- ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT | +- ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT | +- ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT; +- +- syn_mode = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); +- +- switch (pipe->ccdc_out) { +- case CCDC_YUV_RSZ: +- syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; +- syn_mode &= ~ISPCCDC_SYN_MODE_WEN; +- break; - -- s = rfcomm_session_get(&conn->hdev->bdaddr, &conn->dst); -- if (!s) -- return; +- case CCDC_YUV_MEM_RSZ: +- syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; +- isp_ccdc->wen = 1; +- syn_mode |= ISPCCDC_SYN_MODE_WEN; +- break; - -- rfcomm_session_hold(s); +- case CCDC_OTHERS_VP: +- syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; +- syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; +- syn_mode &= ~ISPCCDC_SYN_MODE_WEN; +- vpcfg.bitshift_sel = BIT9_0; +- vpcfg.freq_sel = PIXCLKBY2; +- ispccdc_config_vp(isp_ccdc, vpcfg); +- ispccdc_enable_vp(isp_ccdc, 1); +- break; - -- list_for_each_safe(p, n, &s->dlcs) { -- d = list_entry(p, struct rfcomm_dlc, list); +- case CCDC_OTHERS_MEM: +- syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; +- syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; +- syn_mode |= ISPCCDC_SYN_MODE_WEN; +- syn_mode &= ~ISPCCDC_SYN_MODE_EXWEN; +- isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, +- ~ISPCCDC_CFG_WENLOG); +- vpcfg.bitshift_sel = BIT11_2; +- vpcfg.freq_sel = PIXCLKBY2; +- ispccdc_config_vp(isp_ccdc, vpcfg); +- ispccdc_enable_vp(isp_ccdc, 0); +- break; - -- if ((d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) && -- !(conn->link_mode & HCI_LM_ENCRYPT) && !status) -- continue; +- case CCDC_OTHERS_VP_MEM: +- syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; +- syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; +- syn_mode |= ISPCCDC_SYN_MODE_WEN; +- syn_mode &= ~ISPCCDC_SYN_MODE_EXWEN; +- +- isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, +- ~ISPCCDC_CFG_WENLOG, isp_ccdc->wenlog); +- vpcfg.bitshift_sel = BIT9_0; +- vpcfg.freq_sel = PIXCLKBY2; +- ispccdc_config_vp(isp_ccdc, vpcfg); +- ispccdc_enable_vp(isp_ccdc, 1); +- break; +- default: +- DPRINTK_ISPCCDC("ISP_ERR: Wrong CCDC Output\n"); +- return -EINVAL; +- }; - -- if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) -- continue; +- isp_reg_writel(dev, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); +- +- switch (pipe->ccdc_in) { +- case CCDC_RAW: +- syncif.ccdc_mastermode = 0; +- syncif.datapol = 0; +- syncif.datsz = DAT10; +- syncif.fldmode = 0; +- syncif.fldout = 0; +- syncif.fldpol = 0; +- syncif.fldstat = 0; +- syncif.hdpol = 0; +- syncif.ipmod = RAW; +- syncif.vdpol = 0; +- ispccdc_config_sync_if(isp_ccdc, syncif); +- ispccdc_config_imgattr(isp_ccdc, colptn); +- blkcfg.oblen = 0; +- blkcfg.dcsubval = 64; +- ispccdc_config_black_clamp(isp_ccdc, blkcfg); +- if (is_isplsc_activated()) { +- ispccdc_config_lsc(isp_ccdc, &isp_ccdc->lsc_config); +- ispccdc_load_lsc(isp_ccdc, isp_ccdc->lsc_gain_table_tmp, +- LSC_TABLE_INIT_SIZE); +- } - -- if (!status) -- set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); -- else -- set_bit(RFCOMM_AUTH_REJECT, &d->flags); +- break; +- case CCDC_YUV_SYNC: +- syncif.ccdc_mastermode = 0; +- syncif.datapol = 0; +- syncif.datsz = DAT8; +- syncif.fldmode = 0; +- syncif.fldout = 0; +- syncif.fldpol = 0; +- syncif.fldstat = 0; +- syncif.hdpol = 0; +- syncif.ipmod = YUV16; +- syncif.vdpol = 1; +- ispccdc_config_imgattr(isp_ccdc, 0); +- ispccdc_config_sync_if(isp_ccdc, syncif); +- blkcfg.oblen = 0; +- blkcfg.dcsubval = 0; +- ispccdc_config_black_clamp(isp_ccdc, blkcfg); +- break; +- case CCDC_YUV_BT: +- break; +- case CCDC_OTHERS: +- break; +- default: +- DPRINTK_ISPCCDC("ISP_ERR: Wrong CCDC Input\n"); +- return -EINVAL; - } - -- rfcomm_session_put(s); +- ispccdc_print_status(isp_ccdc, pipe); +- isp_print_status(dev); +- return 0; ++ isp_reg_writel(dev, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN); + } + + /** +- * ispccdc_config_sync_if - Sets the sync i/f params between sensor and CCDC. +- * @syncif: Structure containing the sync parameters like field state, CCDC in +- * master/slave mode, raw/yuv data, polarity of data, field, hs, vs +- * signals. ++ * ispccdc_validate_config_lsc - Check that LSC configuration is valid. ++ * @lsc_cfg: the LSC configuration to check. ++ * @pipe: if not NULL, verify the table size against CCDC input size. ++ * ++ * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid. + **/ +-void ispccdc_config_sync_if(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_syncif syncif) ++static int ispccdc_validate_config_lsc(struct isp_ccdc_device *isp_ccdc, ++ struct ispccdc_lsc_config *lsc_cfg, ++ struct isp_pipeline *pipe) + { + struct device *dev = to_device(isp_ccdc); +- u32 syn_mode = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_SYN_MODE); - -- rfcomm_schedule(RFCOMM_SCHED_AUTH); --} +- syn_mode |= ISPCCDC_SYN_MODE_VDHDEN; - --static void rfcomm_encrypt_cfm(struct hci_conn *conn, u8 status, u8 encrypt) -+static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) - { - struct rfcomm_session *s; - struct rfcomm_dlc *d; -@@ -2004,18 +2053,29 @@ static void rfcomm_encrypt_cfm(struct hc - list_for_each_safe(p, n, &s->dlcs) { - d = list_entry(p, struct rfcomm_dlc, list); - -- if ((d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) && -- (d->state == BT_CONNECTED || -- d->state == BT_CONFIG) && -- !status && encrypt == 0x00) { -- __rfcomm_dlc_close(d, ECONNREFUSED); -- continue; -+ if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) { -+ rfcomm_dlc_clear_timer(d); -+ if (status || encrypt == 0x00) { -+ __rfcomm_dlc_close(d, ECONNREFUSED); -+ continue; -+ } -+ } -+ -+ if (d->state == BT_CONNECTED && !status && encrypt == 0x00) { -+ if (d->sec_level == BT_SECURITY_MEDIUM) { -+ set_bit(RFCOMM_SEC_PENDING, &d->flags); -+ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); -+ continue; -+ } else if (d->sec_level == BT_SECURITY_HIGH) { -+ __rfcomm_dlc_close(d, ECONNREFUSED); -+ continue; -+ } - } +- if (syncif.fldstat) +- syn_mode |= ISPCCDC_SYN_MODE_FLDSTAT; +- else +- syn_mode &= ~ISPCCDC_SYN_MODE_FLDSTAT; ++ unsigned int paxel_width, paxel_height; ++ unsigned int paxel_shift_x, paxel_shift_y; ++ unsigned int min_width, min_height, min_size; ++ unsigned int input_width, input_height; - if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) - continue; +- syn_mode &= ISPCCDC_SYN_MODE_INPMOD_MASK; +- isp_ccdc->syncif_ipmod = syncif.ipmod; ++ paxel_shift_x = lsc_cfg->gain_mode_m; ++ paxel_shift_y = lsc_cfg->gain_mode_n; -- if (!status && encrypt) -+ if (!status) - set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); - else - set_bit(RFCOMM_AUTH_REJECT, &d->flags); -@@ -2028,8 +2088,7 @@ static void rfcomm_encrypt_cfm(struct hc +- switch (syncif.ipmod) { +- case RAW: +- break; +- case YUV16: +- syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16; +- break; +- case YUV8: +- syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR8; +- break; +- }; ++ if ((paxel_shift_x < 2) || (paxel_shift_x > 6) || ++ (paxel_shift_y < 2) || (paxel_shift_y > 6)) { ++ dev_dbg(dev, "CCDC: LSC: Invalid paxel size\n"); ++ return -EINVAL; ++ } - static struct hci_cb rfcomm_cb = { - .name = "RFCOMM", -- .auth_cfm = rfcomm_auth_cfm, -- .encrypt_cfm = rfcomm_encrypt_cfm -+ .security_cfm = rfcomm_security_cfm - }; +- syn_mode &= ISPCCDC_SYN_MODE_DATSIZ_MASK; +- switch (syncif.datsz) { +- case DAT8: +- syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8; +- break; +- case DAT10: +- syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10; +- break; +- case DAT11: +- syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11; +- break; +- case DAT12: +- syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12; +- break; +- }; ++ if (lsc_cfg->offset & 3) { ++ dev_dbg(dev, "CCDC: LSC: Offset must be a multiple of 4\n"); ++ return -EINVAL; ++ } - static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf) -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/rfcomm/sock.c linux-omap-2.6.28-nokia1/net/bluetooth/rfcomm/sock.c ---- linux-omap-2.6.28-omap1/net/bluetooth/rfcomm/sock.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/rfcomm/sock.c 2011-06-22 13:19:33.283063268 +0200 -@@ -50,11 +50,6 @@ - #include - #include +- if (syncif.fldmode) +- syn_mode |= ISPCCDC_SYN_MODE_FLDMODE; +- else +- syn_mode &= ~ISPCCDC_SYN_MODE_FLDMODE; ++ if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) { ++ dev_dbg(dev, "CCDC: LSC: initial_x and y must be even\n"); ++ return -EINVAL; ++ } --#ifndef CONFIG_BT_RFCOMM_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - static const struct proto_ops rfcomm_sock_ops; +- if (syncif.datapol) +- syn_mode |= ISPCCDC_SYN_MODE_DATAPOL; +- else +- syn_mode &= ~ISPCCDC_SYN_MODE_DATAPOL; ++ if (!pipe) ++ return 0; - static struct bt_sock_list rfcomm_sk_list = { -@@ -266,12 +261,19 @@ static void rfcomm_sock_init(struct sock +- if (syncif.fldpol) +- syn_mode |= ISPCCDC_SYN_MODE_FLDPOL; +- else +- syn_mode &= ~ISPCCDC_SYN_MODE_FLDPOL; ++ input_width = pipe->ccdc_in_w; ++ input_height = pipe->ccdc_in_h; - if (parent) { - sk->sk_type = parent->sk_type; -- pi->link_mode = rfcomm_pi(parent)->link_mode; -+ pi->dlc->defer_setup = bt_sk(parent)->defer_setup; -+ -+ pi->sec_level = rfcomm_pi(parent)->sec_level; -+ pi->role_switch = rfcomm_pi(parent)->role_switch; - } else { -- pi->link_mode = 0; -+ pi->dlc->defer_setup = 0; +- if (syncif.hdpol) +- syn_mode |= ISPCCDC_SYN_MODE_HDPOL; +- else +- syn_mode &= ~ISPCCDC_SYN_MODE_HDPOL; ++ /* Calculate minimum bytesize for validation */ ++ paxel_width = 1 << paxel_shift_x; ++ min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1) ++ >> paxel_shift_x) + 1; + -+ pi->sec_level = BT_SECURITY_LOW; -+ pi->role_switch = 0; - } - -- pi->dlc->link_mode = pi->link_mode; -+ pi->dlc->sec_level = pi->sec_level; -+ pi->dlc->role_switch = pi->role_switch; - } - - static struct proto rfcomm_proto = { -@@ -411,7 +413,8 @@ static int rfcomm_sock_connect(struct so - bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); - rfcomm_pi(sk)->channel = sa->rc_channel; - -- d->link_mode = rfcomm_pi(sk)->link_mode; -+ d->sec_level = rfcomm_pi(sk)->sec_level; -+ d->role_switch = rfcomm_pi(sk)->role_switch; - - err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); - if (!err) -@@ -559,6 +562,9 @@ static int rfcomm_sock_sendmsg(struct ki - struct sk_buff *skb; - int sent = 0; - -+ if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) -+ return -ENOTCONN; ++ paxel_height = 1 << paxel_shift_y; ++ min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1) ++ >> paxel_shift_y) + 1; + - if (msg->msg_flags & MSG_OOB) - return -EOPNOTSUPP; - -@@ -575,8 +581,11 @@ static int rfcomm_sock_sendmsg(struct ki - - skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, - msg->msg_flags & MSG_DONTWAIT, &err); -- if (!skb) -+ if (!skb) { -+ if (sent == 0) -+ sent = err; - break; -+ } - skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); - - err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); -@@ -635,16 +644,22 @@ static int rfcomm_sock_recvmsg(struct ki - struct msghdr *msg, size_t size, int flags) - { - struct sock *sk = sock->sk; -+ struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; - int err = 0; - size_t target, copied = 0; - long timeo; - -+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { -+ rfcomm_dlc_accept(d); -+ return 0; ++ min_size = 4 * min_width * min_height; ++ if (min_size > lsc_cfg->size) { ++ dev_dbg(dev, "CCDC: LSC: too small table\n"); ++ return -EINVAL; + } -+ - if (flags & MSG_OOB) - return -EOPNOTSUPP; ++ if (lsc_cfg->offset < (min_width * 4)) { ++ dev_dbg(dev, "CCDC: LSC: Offset is too small\n"); ++ return -EINVAL; ++ } ++ if ((lsc_cfg->size / lsc_cfg->offset) < min_height) { ++ dev_dbg(dev, "CCDC: LSC: Wrong size/offset combination\n"); ++ return -EINVAL; ++ } ++ return 0; ++} + +- if (syncif.vdpol) +- syn_mode |= ISPCCDC_SYN_MODE_VDPOL; +- else +- syn_mode &= ~ISPCCDC_SYN_MODE_VDPOL; ++/** ++ * ispccdc_program_lsc - Program Lens Shading Compensation table address. ++ **/ ++static void ispccdc_program_lsc(struct isp_ccdc_device *isp_ccdc) ++{ ++ isp_reg_writel(to_device(isp_ccdc), isp_ccdc->lsc_table_inuse, ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); ++} - msg->msg_namelen = 0; +- if (syncif.ccdc_mastermode) { +- syn_mode |= ISPCCDC_SYN_MODE_FLDOUT | ISPCCDC_SYN_MODE_VDHDOUT; +- isp_reg_writel(dev, +- syncif.hs_width << ISPCCDC_HD_VD_WID_HDW_SHIFT +- | syncif.vs_width << ISPCCDC_HD_VD_WID_VDW_SHIFT, +- OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_HD_VD_WID); ++/** ++ * ispccdc_config_lsc - Configures the lens shading compensation module ++ **/ ++static void ispccdc_config_lsc(struct isp_ccdc_device *isp_ccdc) ++{ ++ struct device *dev = to_device(isp_ccdc); ++ struct ispccdc_lsc_config *lsc_cfg = &isp_ccdc->lsc_config; ++ int reg; -- BT_DBG("sk %p size %d", sk, size); -+ BT_DBG("sk %p size %zu", sk, size); +- isp_reg_writel(dev, +- syncif.ppln << ISPCCDC_PIX_LINES_PPLN_SHIFT +- | syncif.hlprf << ISPCCDC_PIX_LINES_HLPRF_SHIFT, +- OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_PIX_LINES); +- } else +- syn_mode &= ~(ISPCCDC_SYN_MODE_FLDOUT | +- ISPCCDC_SYN_MODE_VDHDOUT); ++ isp_reg_writel(dev, lsc_cfg->offset, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_TABLE_OFFSET); - lock_sock(sk); +- isp_reg_writel(dev, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); ++ reg = 0; ++ reg |= lsc_cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT; ++ reg |= lsc_cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT; ++ reg |= lsc_cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT; ++ isp_reg_writel(dev, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG); -@@ -715,7 +730,7 @@ out: - return copied ? : err; +- if (!(syncif.bt_r656_en)) { +- isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_REC656IF, ~ISPCCDC_REC656IF_R656ON); +- } ++ reg = 0; ++ reg &= ~ISPCCDC_LSC_INITIAL_X_MASK; ++ reg |= lsc_cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT; ++ reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK; ++ reg |= lsc_cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT; ++ isp_reg_writel(dev, reg, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_INITIAL); } --static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) -+static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen) + /** +- * ispccdc_config_black_clamp - Configures the clamp parameters in CCDC. +- * @bclamp: Structure containing the optical black average gain, optical black +- * sample length, sample lines, and the start pixel position of the +- * samples w.r.t the HS pulse. +- * Configures the clamp parameters in CCDC. Either if its being used the +- * optical black clamp, or the digital clamp. If its a digital clamp, then +- * assures to put a valid DC substraction level. +- * +- * Returns always 0 when completed. ++ * ispccdc_enable_lsc - Enables/Disables the Lens Shading Compensation module. ++ * @enable: 0 Disables LSC, 1 Enables LSC. + **/ +-int ispccdc_config_black_clamp(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_bclamp bclamp) ++static void ispccdc_enable_lsc(struct isp_ccdc_device *isp_ccdc, u8 enable) { - struct sock *sk = sock->sk; - int err = 0; -@@ -732,7 +747,14 @@ static int rfcomm_sock_setsockopt(struct - break; - } + struct device *dev = to_device(isp_ccdc); +- u32 bclamp_val = 0; -- rfcomm_pi(sk)->link_mode = opt; -+ if (opt & RFCOMM_LM_AUTH) -+ rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; -+ if (opt & RFCOMM_LM_ENCRYPT) -+ rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; -+ if (opt & RFCOMM_LM_SECURE) -+ rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; +- if (isp_ccdc->obclamp_en) { +- bclamp_val |= bclamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT; +- bclamp_val |= bclamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT; +- bclamp_val |= bclamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT; +- bclamp_val |= bclamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT; +- isp_reg_writel(dev, bclamp_val, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP); ++ if (enable) { ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_MAIN, ++ ISP_CTRL, ISPCTRL_SBL_SHARED_RPORTB ++ | ISPCTRL_SBL_RD_RAM_EN); + -+ rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); - break; - - default: -@@ -744,12 +766,76 @@ static int rfcomm_sock_setsockopt(struct - return err; ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE); + } else { +- if (omap_rev() < OMAP3430_REV_ES2_0) +- if (isp_ccdc->syncif_ipmod == YUV16 || +- isp_ccdc->syncif_ipmod == YUV8 || +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_REC656IF) & +- ISPCCDC_REC656IF_R656ON) +- bclamp.dcsubval = 0; +- isp_reg_writel(dev, bclamp.dcsubval, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB); ++ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_LSC_CONFIG, ~ISPCCDC_LSC_ENABLE); + } +- return 0; } --static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) -+static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) + /** +- * ispccdc_enable_black_clamp - Enables/Disables the optical black clamp. +- * @enable: 0 Disables optical black clamp, 1 Enables optical black clamp. ++ * ispccdc_setup_lsc - apply user LSC settings ++ * Consume the new LSC configuration and table set by user space application ++ * and program to CCDC. This function must be called from process context ++ * before streamon when ISP is not yet running. This function does not yet ++ * actually enable LSC, that has to be done separately. ++ */ ++static void ispccdc_setup_lsc(struct isp_ccdc_device *isp_ccdc, ++ struct isp_pipeline *pipe) +{ -+ struct sock *sk = sock->sk; -+ struct bt_security sec; -+ int len, err = 0; -+ u32 opt; -+ -+ BT_DBG("sk %p", sk); -+ -+ if (level == SOL_RFCOMM) -+ return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); -+ -+ if (level != SOL_BLUETOOTH) -+ return -ENOPROTOOPT; -+ -+ lock_sock(sk); -+ -+ switch (optname) { -+ case BT_SECURITY: -+ if (sk->sk_type != SOCK_STREAM) { -+ err = -EINVAL; -+ break; -+ } -+ -+ sec.level = BT_SECURITY_LOW; -+ -+ len = min_t(unsigned int, sizeof(sec), optlen); -+ if (copy_from_user((char *) &sec, optval, len)) { -+ err = -EFAULT; -+ break; -+ } -+ -+ if (sec.level > BT_SECURITY_HIGH) { -+ err = -EINVAL; -+ break; -+ } -+ -+ rfcomm_pi(sk)->sec_level = sec.level; -+ break; -+ -+ case BT_DEFER_SETUP: -+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { -+ err = -EINVAL; -+ break; -+ } -+ -+ if (get_user(opt, (u32 __user *) optval)) { -+ err = -EFAULT; -+ break; ++ ispccdc_enable_lsc(isp_ccdc, 0); /* Disable LSC */ ++ if (pipe->ccdc_in == CCDC_RAW && isp_ccdc->lsc_request_enable) { ++ /* LSC is requested to be enabled, so configure it */ ++ if (isp_ccdc->update_lsc_table) { ++ struct isp_device *isp = to_isp_device(isp_ccdc); ++ BUG_ON(isp_ccdc->lsc_table_new == PTR_FREE); ++ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_inuse); ++ isp_ccdc->lsc_table_inuse = isp_ccdc->lsc_table_new; ++ isp_ccdc->lsc_table_new = PTR_FREE; ++ isp_ccdc->update_lsc_table = 0; + } -+ -+ bt_sk(sk)->defer_setup = opt; -+ break; -+ -+ default: -+ err = -ENOPROTOOPT; -+ break; ++ ispccdc_config_lsc(isp_ccdc); ++ ispccdc_program_lsc(isp_ccdc); + } -+ -+ release_sock(sk); -+ return err; ++ isp_ccdc->update_lsc_config = 0; +} + -+static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) - { - struct sock *sk = sock->sk; - struct sock *l2cap_sk; - struct rfcomm_conninfo cinfo; - int len, err = 0; -+ u32 opt; - - BT_DBG("sk %p", sk); - -@@ -760,12 +846,32 @@ static int rfcomm_sock_getsockopt(struct - - switch (optname) { - case RFCOMM_LM: -- if (put_user(rfcomm_pi(sk)->link_mode, (u32 __user *) optval)) -+ switch (rfcomm_pi(sk)->sec_level) { -+ case BT_SECURITY_LOW: -+ opt = RFCOMM_LM_AUTH; -+ break; -+ case BT_SECURITY_MEDIUM: -+ opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; -+ break; -+ case BT_SECURITY_HIGH: -+ opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | -+ RFCOMM_LM_SECURE; -+ break; -+ default: -+ opt = 0; -+ break; -+ } -+ -+ if (rfcomm_pi(sk)->role_switch) -+ opt |= RFCOMM_LM_MASTER; -+ -+ if (put_user(opt, (u32 __user *) optval)) - err = -EFAULT; - break; - - case RFCOMM_CONNINFO: -- if (sk->sk_state != BT_CONNECTED) { -+ if (sk->sk_state != BT_CONNECTED && -+ !rfcomm_pi(sk)->dlc->defer_setup) { - err = -ENOTCONN; - break; - } -@@ -790,6 +896,60 @@ static int rfcomm_sock_getsockopt(struct - return err; - } - -+static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) ++void ispccdc_lsc_error_handler(struct isp_ccdc_device *isp_ccdc) +{ -+ struct sock *sk = sock->sk; -+ struct bt_security sec; -+ int len, err = 0; -+ -+ BT_DBG("sk %p", sk); -+ -+ if (level == SOL_RFCOMM) -+ return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); -+ -+ if (level != SOL_BLUETOOTH) -+ return -ENOPROTOOPT; -+ -+ if (get_user(len, optlen)) -+ return -EFAULT; -+ -+ lock_sock(sk); -+ -+ switch (optname) { -+ case BT_SECURITY: -+ if (sk->sk_type != SOCK_STREAM) { -+ err = -EINVAL; -+ break; -+ } -+ -+ sec.level = rfcomm_pi(sk)->sec_level; -+ -+ len = min_t(unsigned int, len, sizeof(sec)); -+ if (copy_to_user(optval, (char *) &sec, len)) -+ err = -EFAULT; -+ -+ break; -+ -+ case BT_DEFER_SETUP: -+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { -+ err = -EINVAL; -+ break; -+ } -+ -+ if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) -+ err = -EFAULT; -+ -+ break; -+ -+ default: -+ err = -ENOPROTOOPT; -+ break; -+ } -+ -+ release_sock(sk); -+ return err; ++ ispccdc_enable_lsc(isp_ccdc, 0); +} + - static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) - { - struct sock *sk = sock->sk; -@@ -893,6 +1053,10 @@ int rfcomm_connect_ind(struct rfcomm_ses - - done: - bh_unlock_sock(parent); -+ -+ if (bt_sk(parent)->defer_setup) -+ parent->sk_state_change(parent); -+ - return result; - } - -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/rfcomm/tty.c linux-omap-2.6.28-nokia1/net/bluetooth/rfcomm/tty.c ---- linux-omap-2.6.28-omap1/net/bluetooth/rfcomm/tty.c 2011-06-22 13:14:25.653067643 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/rfcomm/tty.c 2011-06-22 13:19:33.283063268 +0200 -@@ -39,11 +39,6 @@ - #include - #include - --#ifndef CONFIG_BT_RFCOMM_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define RFCOMM_TTY_MAGIC 0x6d02 /* magic number for rfcomm struct */ - #define RFCOMM_TTY_PORTS RFCOMM_MAX_DEV /* whole lotta rfcomm devices */ - #define RFCOMM_TTY_MAJOR 216 /* device node major id of the usb/bluetooth.c driver */ -@@ -58,7 +53,7 @@ struct rfcomm_dev { - char name[12]; - int id; - unsigned long flags; -- int opened; -+ atomic_t opened; - int err; - - bdaddr_t src; -@@ -261,6 +256,8 @@ static int rfcomm_dev_add(struct rfcomm_ - dev->flags = req->flags & - ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); - -+ atomic_set(&dev->opened, 0); -+ - init_waitqueue_head(&dev->wait); - tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); - -@@ -330,10 +327,10 @@ static void rfcomm_dev_del(struct rfcomm ++/** ++ * ispccdc_config_crop - Configures crop parameters for the ISP CCDC. ++ * @left: Left offset of the crop area. ++ * @top: Top offset of the crop area. ++ * @height: Height of the crop area. ++ * @width: Width of the crop area. + * +- * Enables or disables the optical black clamp. When disabled, the digital +- * clamp operates. ++ * The following restrictions are applied for the crop settings. If incoming ++ * values do not follow these restrictions then we map the settings to the ++ * closest acceptable crop value. ++ * 1) Left offset is always odd. This can be avoided if we enable byte swap ++ * option for incoming data into CCDC. ++ * 2) Top offset is always even. ++ * 3) Crop height is always even. ++ * 4) Crop width is always a multiple of 16 pixels + **/ +-void ispccdc_enable_black_clamp(struct isp_ccdc_device *isp_ccdc, u8 enable) ++static void ispccdc_config_crop(struct isp_ccdc_device *isp_ccdc, ++ u32 left, u32 top, u32 height, u32 width) { - BT_DBG("dev %p", dev); - -- if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) -- BUG_ON(1); -- else -- set_bit(RFCOMM_TTY_RELEASED, &dev->flags); -+ BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); -+ -+ if (atomic_read(&dev->opened) > 0) -+ return; - - write_lock_bh(&rfcomm_dev_lock); - list_del_init(&dev->list); -@@ -689,9 +686,10 @@ static int rfcomm_tty_open(struct tty_st - if (!dev) - return -ENODEV; - -- BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), dev->channel, dev->opened); -+ BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), -+ dev->channel, atomic_read(&dev->opened)); - -- if (dev->opened++ != 0) -+ if (atomic_inc_return(&dev->opened) > 1) - return 0; - - dlc = dev->dlc; -@@ -747,9 +745,10 @@ static void rfcomm_tty_close(struct tty_ - if (!dev) - return; - -- BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, dev->opened); -+ BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, -+ atomic_read(&dev->opened)); - -- if (--dev->opened == 0) { -+ if (atomic_dec_and_test(&dev->opened)) { - if (dev->tty_dev->parent) - device_move(dev->tty_dev, NULL); +- struct device *dev = to_device(isp_ccdc); ++ isp_ccdc->ccdcin_woffset = left + (left % 2); ++ isp_ccdc->ccdcin_hoffset = top + (top % 2); -@@ -763,6 +762,14 @@ static void rfcomm_tty_close(struct tty_ - tty->driver_data = NULL; - dev->tty = NULL; - rfcomm_dlc_unlock(dev->dlc); -+ -+ if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) { -+ write_lock_bh(&rfcomm_dev_lock); -+ list_del_init(&dev->list); -+ write_unlock_bh(&rfcomm_dev_lock); +- isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP, +- ~ISPCCDC_CLAMP_CLAMPEN, +- enable ? ISPCCDC_CLAMP_CLAMPEN : 0); +- isp_ccdc->obclamp_en = enable; ++ isp_ccdc->crop_w = width - (width % 16); ++ isp_ccdc->crop_h = height + (height % 2); + -+ rfcomm_dev_put(dev); -+ } - } - - rfcomm_dev_put(dev); -diff -Nurp linux-omap-2.6.28-omap1/net/bluetooth/sco.c linux-omap-2.6.28-nokia1/net/bluetooth/sco.c ---- linux-omap-2.6.28-omap1/net/bluetooth/sco.c 2011-06-22 13:14:25.663067641 +0200 -+++ linux-omap-2.6.28-nokia1/net/bluetooth/sco.c 2011-06-22 13:19:33.283063268 +0200 -@@ -48,11 +48,6 @@ - #include - #include - --#ifndef CONFIG_BT_SCO_DEBUG --#undef BT_DBG --#define BT_DBG(D...) --#endif -- - #define VERSION "0.6" - - static int disable_esco = 0; -@@ -200,7 +195,7 @@ static int sco_connect(struct sock *sk) - else - type = SCO_LINK; - -- hcon = hci_connect(hdev, type, dst, HCI_AT_NO_BONDING); -+ hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); - if (!hcon) - goto done; - -@@ -364,20 +359,9 @@ static void sco_sock_kill(struct sock *s - sock_put(sk); ++ DPRINTK_ISPCCDC("\n\tOffsets L %d T %d W %d H %d\n", ++ isp_ccdc->ccdcin_woffset, ++ isp_ccdc->ccdcin_hoffset, ++ isp_ccdc->crop_w, ++ isp_ccdc->crop_h); } --/* Close socket. -- * Must be called on unlocked socket. -- */ --static void sco_sock_close(struct sock *sk) -+static void __sco_sock_close(struct sock *sk) + /** +- * ispccdc_config_fpc - Configures the Faulty Pixel Correction parameters. +- * @fpc: Structure containing the number of faulty pixels corrected in the +- * frame, address of the FPC table. ++ * ispccdc_config_outlineoffset - Configures the output line offset ++ * @offset: Must be twice the Output width and aligned on 32 byte boundary ++ * @oddeven: Specifies the odd/even line pattern to be chosen to store the ++ * output. ++ * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines. + * +- * Returns 0 if successful, or -EINVAL if FPC Address is not on the 64 byte ++ * - Configures the output line offset when stored in memory ++ * - Sets the odd/even line pattern to store the output ++ * (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4)) ++ * - Configures the number of even and odd line fields in case of rearranging ++ * the lines. ++ * ++ * Returns 0 if successful, or -EINVAL if the offset is not in 32 byte + * boundary. + **/ +-int ispccdc_config_fpc(struct isp_ccdc_device *isp_ccdc, struct ispccdc_fpc fpc) ++static int ispccdc_config_outlineoffset(struct isp_ccdc_device *isp_ccdc, ++ u32 offset, u8 oddeven, u8 numlines) { -- struct sco_conn *conn; -- -- sco_sock_clear_timer(sk); -- -- lock_sock(sk); + struct device *dev = to_device(isp_ccdc); +- u32 fpc_val = 0; - -- conn = sco_pi(sk)->conn; -- -- BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket); -+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); +- fpc_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); - switch (sk->sk_state) { - case BT_LISTEN: -@@ -395,9 +379,15 @@ static void sco_sock_close(struct sock * - sock_set_flag(sk, SOCK_ZAPPED); - break; +- if ((fpc.fpcaddr & 0xFFFFFFC0) == fpc.fpcaddr) { +- isp_reg_writel(dev, fpc_val & (~ISPCCDC_FPC_FPCEN), +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); +- isp_reg_writel(dev, fpc.fpcaddr, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR); ++ if ((offset & ISP_32B_BOUNDARY_OFFSET) == offset) { ++ isp_reg_writel(dev, (offset & 0xFFFF), ++ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF); + } else { +- DPRINTK_ISPCCDC("FPC Address should be on 64byte boundary\n"); ++ DPRINTK_ISPCCDC("ISP_ERR : Offset should be in 32 byte" ++ " boundary\n"); + return -EINVAL; } -+} - -+/* Must be called on unlocked socket. */ -+static void sco_sock_close(struct sock *sk) -+{ -+ sco_sock_clear_timer(sk); -+ lock_sock(sk); -+ __sco_sock_close(sk); - release_sock(sk); -- - sco_sock_kill(sk); - } - -@@ -673,7 +663,7 @@ static int sco_sock_setsockopt(struct so - return err; - } +- isp_reg_writel(dev, fpc_val | (fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); +- return 0; +-} --static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) -+static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) - { - struct sock *sk = sock->sk; - struct sco_options opts; -@@ -728,6 +718,55 @@ static int sco_sock_getsockopt(struct so - return err; - } +-/** +- * ispccdc_enable_fpc - Enables the Faulty Pixel Correction. +- * @enable: 0 Disables FPC, 1 Enables FPC. +- **/ +-void ispccdc_enable_fpc(struct isp_ccdc_device *isp_ccdc, u8 enable) +-{ +- struct device *dev = to_device(isp_ccdc); ++ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ++ ~ISPCCDC_SDOFST_FINV); -+static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) -+{ -+ struct sock *sk = sock->sk; -+ int len, err = 0; -+ -+ BT_DBG("sk %p", sk); -+ -+ if (level == SOL_SCO) -+ return sco_sock_getsockopt_old(sock, optname, optval, optlen); -+ -+ if (get_user(len, optlen)) -+ return -EFAULT; -+ -+ lock_sock(sk); +- isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, +- ~ISPCCDC_FPC_FPCEN, enable ? ISPCCDC_FPC_FPCEN : 0); ++ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ++ ~ISPCCDC_SDOFST_FOFST_4L); + -+ switch (optname) { ++ switch (oddeven) { ++ case EVENEVEN: ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT); ++ break; ++ case ODDEVEN: ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT); ++ break; ++ case EVENODD: ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT); ++ break; ++ case ODDODD: ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ++ (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT); ++ break; + default: -+ err = -ENOPROTOOPT; + break; + } -+ -+ release_sock(sk); -+ return err; -+} -+ -+static int sco_sock_shutdown(struct socket *sock, int how) -+{ -+ struct sock *sk = sock->sk; -+ int err = 0; -+ -+ BT_DBG("sock %p, sk %p", sock, sk); -+ -+ if (!sk) -+ return 0; -+ -+ lock_sock(sk); -+ if (!sk->sk_shutdown) { -+ sk->sk_shutdown = SHUTDOWN_MASK; -+ sco_sock_clear_timer(sk); -+ __sco_sock_close(sk); -+ -+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) -+ err = bt_sock_wait_state(sk, BT_CLOSED, -+ sk->sk_lingertime); -+ } -+ release_sock(sk); -+ return err; -+} -+ - static int sco_sock_release(struct socket *sock) - { - struct sock *sk = sock->sk; -@@ -837,10 +876,30 @@ done: - /* ----- SCO interface with lower layer (HCI) ----- */ - static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) - { -+ register struct sock *sk; -+ struct hlist_node *node; -+ int lm = 0; -+ -+ if (type != SCO_LINK && type != ESCO_LINK) -+ return 0; -+ - BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); - -- /* Always accept connection */ -- return HCI_LM_ACCEPT; -+ /* Find listening sockets */ -+ read_lock(&sco_sk_list.lock); -+ sk_for_each(sk, node, &sco_sk_list.head) { -+ if (sk->sk_state != BT_LISTEN) -+ continue; -+ -+ if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || -+ !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { -+ lm |= HCI_LM_ACCEPT; -+ break; -+ } -+ } -+ read_unlock(&sco_sk_list.lock); -+ -+ return lm; - } - - static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) -@@ -862,7 +921,7 @@ static int sco_connect_cfm(struct hci_co - return 0; ++ return 0; } --static int sco_disconn_ind(struct hci_conn *hcon, __u8 reason) -+static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) + /** +- * ispccdc_config_black_comp - Configures Black Level Compensation parameters. +- * @blcomp: Structure containing the black level compensation value for RGrGbB +- * pixels. in 2's complement. ++ * ispccdc_set_outaddr - Sets the memory address where the output will be saved ++ * @addr: 32-bit memory address aligned on 32 byte boundary. ++ * ++ * Sets the memory address where the output will be saved. ++ * ++ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte ++ * boundary. + **/ +-void ispccdc_config_black_comp(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_blcomp blcomp) ++int ispccdc_set_outaddr(struct isp_ccdc_device *isp_ccdc, u32 addr) { - BT_DBG("hcon %p reason %d", hcon, reason); - -@@ -929,7 +988,7 @@ static const struct proto_ops sco_sock_o - .ioctl = bt_sock_ioctl, - .mmap = sock_no_mmap, - .socketpair = sock_no_socketpair, -- .shutdown = sock_no_shutdown, -+ .shutdown = sco_sock_shutdown, - .setsockopt = sco_sock_setsockopt, - .getsockopt = sco_sock_getsockopt - }; -@@ -945,7 +1004,7 @@ static struct hci_proto sco_hci_proto = - .id = HCI_PROTO_SCO, - .connect_ind = sco_connect_ind, - .connect_cfm = sco_connect_cfm, -- .disconn_ind = sco_disconn_ind, -+ .disconn_cfm = sco_disconn_cfm, - .recv_scodata = sco_recv_scodata - }; + struct device *dev = to_device(isp_ccdc); +- u32 blcomp_val = 0; -diff -Nurp linux-omap-2.6.28-omap1/net/core/dev.c linux-omap-2.6.28-nokia1/net/core/dev.c ---- linux-omap-2.6.28-omap1/net/core/dev.c 2011-06-22 13:14:25.693067641 +0200 -+++ linux-omap-2.6.28-nokia1/net/core/dev.c 2011-06-22 13:19:33.283063268 +0200 -@@ -281,8 +281,8 @@ static const unsigned short netdev_lock_ - ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, - ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, - ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, -- ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID, -- ARPHRD_NONE}; -+ ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, -+ ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; - - static const char *netdev_lock_name[] = - {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", -@@ -298,8 +298,8 @@ static const char *netdev_lock_name[] = - "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", - "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", - "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", -- "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID", -- "_xmit_NONE"}; -+ "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", -+ "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; +- blcomp_val |= blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT; +- blcomp_val |= blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT; +- blcomp_val |= blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT; +- blcomp_val |= blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT; ++ if ((addr & ISP_32B_BOUNDARY_BUF) == addr) { ++ isp_reg_writel(dev, addr, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_SDR_ADDR); ++ return 0; ++ } else { ++ DPRINTK_ISPCCDC("ISP_ERR : Address should be in 32 byte" ++ " boundary\n"); ++ return -EINVAL; ++ } - static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; - static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; -diff -Nurp linux-omap-2.6.28-omap1/net/core/neighbour.c linux-omap-2.6.28-nokia1/net/core/neighbour.c ---- linux-omap-2.6.28-omap1/net/core/neighbour.c 2011-06-22 13:14:25.693067641 +0200 -+++ linux-omap-2.6.28-nokia1/net/core/neighbour.c 2011-06-22 13:19:33.283063268 +0200 -@@ -694,75 +694,74 @@ static void neigh_connect(struct neighbo - hh->hh_output = neigh->ops->hh_output; +- isp_reg_writel(dev, blcomp_val, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_BLKCMP); } --static void neigh_periodic_timer(unsigned long arg) -+static void neigh_periodic_work(struct work_struct *work) + /** +- * ispccdc_config_vp - Configures the Video Port Configuration parameters. +- * @vpcfg: Structure containing the Video Port input frequency, and the 10 bit +- * format. ++ * ispccdc_config_sync_if - Sets the sync i/f params between sensor and CCDC. ++ * @syncif: Structure containing the sync parameters like field state, CCDC in ++ * master/slave mode, raw/yuv data, polarity of data, field, hs, vs ++ * signals. + **/ +-void ispccdc_config_vp(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_vp vpcfg) ++static void ispccdc_config_sync_if(struct isp_ccdc_device *isp_ccdc, ++ struct ispccdc_syncif syncif) { -- struct neigh_table *tbl = (struct neigh_table *)arg; -+ struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); - struct neighbour *n, **np; -- unsigned long expire, now = jiffies; -+ unsigned int i; + struct device *dev = to_device(isp_ccdc); +- u32 fmtcfg_vp = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_FMTCFG); ++ u32 syn_mode = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_SYN_MODE); - NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); +- fmtcfg_vp &= ISPCCDC_FMTCFG_VPIN_MASK & ISPCCDC_FMTCFG_VPIF_FRQ_MASK; ++ syn_mode |= ISPCCDC_SYN_MODE_VDHDEN; -- write_lock(&tbl->lock); -+ write_lock_bh(&tbl->lock); +- switch (vpcfg.bitshift_sel) { +- case BIT9_0: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0; +- break; +- case BIT10_1: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1; ++ if (syncif.fldstat) ++ syn_mode |= ISPCCDC_SYN_MODE_FLDSTAT; ++ else ++ syn_mode &= ~ISPCCDC_SYN_MODE_FLDSTAT; ++ ++ syn_mode &= ISPCCDC_SYN_MODE_INPMOD_MASK; ++ isp_ccdc->syncif_ipmod = syncif.ipmod; ++ ++ switch (syncif.ipmod) { ++ case RAW: + break; +- case BIT11_2: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2; ++ case YUV16: ++ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16; + break; +- case BIT12_3: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3; ++ case YUV8: ++ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR8; + break; + }; +- switch (vpcfg.freq_sel) { +- case PIXCLKBY2: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY2; +- break; +- case PIXCLKBY3_5: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY3; ++ ++ syn_mode &= ISPCCDC_SYN_MODE_DATSIZ_MASK; ++ switch (syncif.datsz) { ++ case DAT8: ++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8; + break; +- case PIXCLKBY4_5: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY4; ++ case DAT10: ++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10; + break; +- case PIXCLKBY5_5: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY5; ++ case DAT11: ++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11; + break; +- case PIXCLKBY6_5: +- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIF_FRQ_BY6; ++ case DAT12: ++ syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12; + break; + }; +- isp_reg_writel(dev, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); +-} - /* - * periodically recompute ReachableTime from random function - */ +-/** +- * ispccdc_enable_vp - Enables the Video Port. +- * @enable: 0 Disables VP, 1 Enables VP +- **/ +-void ispccdc_enable_vp(struct isp_ccdc_device *isp_ccdc, u8 enable) +-{ +- struct device *dev = to_device(isp_ccdc); ++ if (syncif.fldmode) ++ syn_mode |= ISPCCDC_SYN_MODE_FLDMODE; ++ else ++ syn_mode &= ~ISPCCDC_SYN_MODE_FLDMODE; -- if (time_after(now, tbl->last_rand + 300 * HZ)) { -+ if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { - struct neigh_parms *p; -- tbl->last_rand = now; -+ tbl->last_rand = jiffies; - for (p = &tbl->parms; p; p = p->next) - p->reachable_time = - neigh_rand_reach_time(p->base_reachable_time); - } +- isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG, +- ~ISPCCDC_FMTCFG_VPEN, +- enable ? ISPCCDC_FMTCFG_VPEN : 0); +-} ++ if (syncif.datapol) ++ syn_mode |= ISPCCDC_SYN_MODE_DATAPOL; ++ else ++ syn_mode &= ~ISPCCDC_SYN_MODE_DATAPOL; -- np = &tbl->hash_buckets[tbl->hash_chain_gc]; -- tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); -+ for (i = 0 ; i <= tbl->hash_mask; i++) { -+ np = &tbl->hash_buckets[i]; +-/** +- * ispccdc_config_reformatter - Configures the Reformatter. +- * @refmt: Structure containing the memory address to format and the bit fields +- * for the reformatter registers. +- * +- * Configures the Reformatter register values if line alternating is disabled. +- * Else, just enabling line alternating is enough. +- **/ +-void ispccdc_config_reformatter(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_refmt refmt) +-{ +- struct device *dev = to_device(isp_ccdc); +- u32 fmtcfg_val = 0; +- +- fmtcfg_val = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); +- +- if (refmt.lnalt) +- fmtcfg_val |= ISPCCDC_FMTCFG_LNALT; +- else { +- fmtcfg_val &= ~ISPCCDC_FMTCFG_LNALT; +- fmtcfg_val &= 0xFFFFF003; +- fmtcfg_val |= refmt.lnum << ISPCCDC_FMTCFG_LNUM_SHIFT; +- fmtcfg_val |= refmt.plen_even << +- ISPCCDC_FMTCFG_PLEN_EVEN_SHIFT; +- fmtcfg_val |= refmt.plen_odd << ISPCCDC_FMTCFG_PLEN_ODD_SHIFT; +- +- isp_reg_writel(dev, refmt.prgeven0, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGEVEN0); +- isp_reg_writel(dev, refmt.prgeven1, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGEVEN1); +- isp_reg_writel(dev, refmt.prgodd0, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGODD0); +- isp_reg_writel(dev, refmt.prgodd1, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PRGODD1); +- isp_reg_writel(dev, refmt.fmtaddr0, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR0); +- isp_reg_writel(dev, refmt.fmtaddr1, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR1); +- isp_reg_writel(dev, refmt.fmtaddr2, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR2); +- isp_reg_writel(dev, refmt.fmtaddr3, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR3); +- isp_reg_writel(dev, refmt.fmtaddr4, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR4); +- isp_reg_writel(dev, refmt.fmtaddr5, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR5); +- isp_reg_writel(dev, refmt.fmtaddr6, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR6); +- isp_reg_writel(dev, refmt.fmtaddr7, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_ADDR7); +- } +- isp_reg_writel(dev, fmtcfg_val, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); +-} ++ if (syncif.fldpol) ++ syn_mode |= ISPCCDC_SYN_MODE_FLDPOL; ++ else ++ syn_mode &= ~ISPCCDC_SYN_MODE_FLDPOL; -- while ((n = *np) != NULL) { -- unsigned int state; -+ while ((n = *np) != NULL) { -+ unsigned int state; +-/** +- * ispccdc_enable_reformatter - Enables the Reformatter. +- * @enable: 0 Disables Reformatter, 1- Enables Data Reformatter +- **/ +-void ispccdc_enable_reformatter(struct isp_ccdc_device *isp_ccdc, u8 enable) +-{ +- struct device *dev = to_device(isp_ccdc); ++ if (syncif.hdpol) ++ syn_mode |= ISPCCDC_SYN_MODE_HDPOL; ++ else ++ syn_mode &= ~ISPCCDC_SYN_MODE_HDPOL; -- write_lock(&n->lock); -+ write_lock(&n->lock); +- isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG, +- ~ISPCCDC_FMTCFG_FMTEN, +- enable ? ISPCCDC_FMTCFG_FMTEN : 0); +- isp_ccdc->refmt_en = enable; +-} ++ if (syncif.vdpol) ++ syn_mode |= ISPCCDC_SYN_MODE_VDPOL; ++ else ++ syn_mode &= ~ISPCCDC_SYN_MODE_VDPOL; -- state = n->nud_state; -- if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { -- write_unlock(&n->lock); -- goto next_elt; -- } -+ state = n->nud_state; -+ if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { -+ write_unlock(&n->lock); -+ goto next_elt; -+ } +-/** +- * ispccdc_config_culling - Configures the culling parameters. +- * @cull: Structure containing the vertical culling pattern, and horizontal +- * culling pattern for odd and even lines. +- **/ +-void ispccdc_config_culling(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_culling cull) +-{ +- struct device *dev = to_device(isp_ccdc); ++ if (syncif.ccdc_mastermode) { ++ syn_mode |= ISPCCDC_SYN_MODE_FLDOUT | ISPCCDC_SYN_MODE_VDHDOUT; ++ isp_reg_writel(dev, ++ syncif.hs_width << ISPCCDC_HD_VD_WID_HDW_SHIFT ++ | syncif.vs_width << ISPCCDC_HD_VD_WID_VDW_SHIFT, ++ OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_HD_VD_WID); -- if (time_before(n->used, n->confirmed)) -- n->used = n->confirmed; -+ if (time_before(n->used, n->confirmed)) -+ n->used = n->confirmed; +- u32 culling_val = 0; ++ isp_reg_writel(dev, ++ syncif.ppln << ISPCCDC_PIX_LINES_PPLN_SHIFT ++ | syncif.hlprf << ISPCCDC_PIX_LINES_HLPRF_SHIFT, ++ OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_PIX_LINES); ++ } else ++ syn_mode &= ~(ISPCCDC_SYN_MODE_FLDOUT | ++ ISPCCDC_SYN_MODE_VDHDOUT); -- if (atomic_read(&n->refcnt) == 1 && -- (state == NUD_FAILED || -- time_after(now, n->used + n->parms->gc_staletime))) { -- *np = n->next; -- n->dead = 1; -+ if (atomic_read(&n->refcnt) == 1 && -+ (state == NUD_FAILED || -+ time_after(jiffies, n->used + n->parms->gc_staletime))) { -+ *np = n->next; -+ n->dead = 1; -+ write_unlock(&n->lock); -+ neigh_cleanup_and_release(n); -+ continue; -+ } - write_unlock(&n->lock); -- neigh_cleanup_and_release(n); -- continue; -- } -- write_unlock(&n->lock); +- culling_val |= cull.v_pattern << ISPCCDC_CULLING_CULV_SHIFT; +- culling_val |= cull.h_even << ISPCCDC_CULLING_CULHEVN_SHIFT; +- culling_val |= cull.h_odd << ISPCCDC_CULLING_CULHODD_SHIFT; ++ isp_reg_writel(dev, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); - next_elt: -- np = &n->next; -+ np = &n->next; -+ } -+ /* -+ * It's fine to release lock here, even if hash table -+ * grows while we are preempted. -+ */ -+ write_unlock_bh(&tbl->lock); -+ cond_resched(); -+ write_lock_bh(&tbl->lock); - } -- - /* Cycle through all hash buckets every base_reachable_time/2 ticks. - * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 - * base_reachable_time. - */ -- expire = tbl->parms.base_reachable_time >> 1; -- expire /= (tbl->hash_mask + 1); -- if (!expire) -- expire = 1; -- -- if (expire>HZ) -- mod_timer(&tbl->gc_timer, round_jiffies(now + expire)); -- else -- mod_timer(&tbl->gc_timer, now + expire); -- -- write_unlock(&tbl->lock); -+ schedule_delayed_work(&tbl->gc_work, -+ tbl->parms.base_reachable_time >> 1); -+ write_unlock_bh(&tbl->lock); +- isp_reg_writel(dev, culling_val, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_CULLING); ++ if (!(syncif.bt_r656_en)) { ++ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ++ ISPCCDC_REC656IF, ~ISPCCDC_REC656IF_R656ON); ++ } } - static __inline__ int neigh_max_probes(struct neighbour *n) -@@ -1444,10 +1443,8 @@ void neigh_table_init_no_netlink(struct - get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); - - rwlock_init(&tbl->lock); -- setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl); -- tbl->gc_timer.expires = now + 1; -- add_timer(&tbl->gc_timer); + /** +- * ispccdc_enable_lpf - Enables the Low-Pass Filter (LPF). +- * @enable: 0 Disables LPF, 1 Enables LPF +- **/ +-void ispccdc_enable_lpf(struct isp_ccdc_device *isp_ccdc, u8 enable) ++ * Set the value to be used for CCDC_CFG.WENLOG. ++ * w - Value of wenlog. ++ */ ++void ispccdc_set_wenlog(struct isp_ccdc_device *isp_ccdc, u32 wenlog) + { +- struct device *dev = to_device(isp_ccdc); - -+ INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); -+ schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); - setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); - skb_queue_head_init_class(&tbl->proxy_queue, - &neigh_table_proxy_queue_class); -@@ -1484,7 +1481,8 @@ int neigh_table_clear(struct neigh_table - struct neigh_table **tp; - - /* It is not clean... Fix it to unload IPv6 module safely */ -- del_timer_sync(&tbl->gc_timer); -+ cancel_delayed_work(&tbl->gc_work); -+ flush_scheduled_work(); - del_timer_sync(&tbl->proxy_timer); - pneigh_queue_purge(&tbl->proxy_queue); - neigh_ifdown(tbl, NULL); -@@ -1750,7 +1748,6 @@ static int neightbl_fill_info(struct sk_ - .ndtc_last_rand = jiffies_to_msecs(rand_delta), - .ndtc_hash_rnd = tbl->hash_rnd, - .ndtc_hash_mask = tbl->hash_mask, -- .ndtc_hash_chain_gc = tbl->hash_chain_gc, - .ndtc_proxy_qlen = tbl->proxy_queue.qlen, - }; - -diff -Nurp linux-omap-2.6.28-omap1/net/core/sock.c linux-omap-2.6.28-nokia1/net/core/sock.c ---- linux-omap-2.6.28-omap1/net/core/sock.c 2011-06-22 13:14:25.693067641 +0200 -+++ linux-omap-2.6.28-nokia1/net/core/sock.c 2011-06-22 13:19:33.283063268 +0200 -@@ -886,8 +886,23 @@ static struct sock *sk_prot_alloc(struct - struct kmem_cache *slab; - - slab = prot->slab; -- if (slab != NULL) -- sk = kmem_cache_alloc(slab, priority); -+ if (slab != NULL) { -+ sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); -+ if (!sk) -+ return sk; -+ if (priority & __GFP_ZERO) { -+ /* -+ * caches using SLAB_DESTROY_BY_RCU should let -+ * sk_node.next un-modified. Special care is taken -+ * when initializing object to zero. -+ */ -+ if (offsetof(struct sock, sk_node.next) != 0) -+ memset(sk, 0, offsetof(struct sock, sk_node.next)); -+ memset(&sk->sk_node.pprev, 0, -+ prot->obj_size - offsetof(struct sock, -+ sk_node.pprev)); -+ } -+ } - else - sk = kmalloc(prot->obj_size, priority); +- isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE, +- ~ISPCCDC_SYN_MODE_LPF, +- enable ? ISPCCDC_SYN_MODE_LPF : 0); ++ isp_ccdc->wenlog = wenlog; + } -diff -Nurp linux-omap-2.6.28-omap1/net/ipv4/netfilter/iphb.c linux-omap-2.6.28-nokia1/net/ipv4/netfilter/iphb.c ---- linux-omap-2.6.28-omap1/net/ipv4/netfilter/iphb.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/net/ipv4/netfilter/iphb.c 2011-06-22 13:19:33.283063268 +0200 -@@ -0,0 +1,439 @@ -+/* -+ * linux/net/ipv4/netfilter/iphb.c -+ * -+ * Netfilter module to delay outgoing TCP keepalive messages. + /** +- * ispccdc_config_alaw - Configures the input width for A-law. +- * @ipwidth: Input width for A-law ++ * ispccdc_config_datapath - Specifies the input and output modules for CCDC. ++ * @input: Indicates the module that inputs the image to the CCDC. ++ * @output: Indicates the module to which the CCDC outputs the image. + * -+ * Copyright (C) 2008 Nokia Corporation. All rights reserved. -+ * Written by Jukka Rissanen ++ * Configures the default configuration for the CCDC to work with. + * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. ++ * The valid values for the input are CCDC_RAW (0), CCDC_YUV_SYNC (1), ++ * CCDC_YUV_BT (2), and CCDC_OTHERS (3). + * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. ++ * The valid values for the output are CCDC_YUV_RSZ (0), CCDC_YUV_MEM_RSZ (1), ++ * CCDC_OTHERS_VP (2), CCDC_OTHERS_MEM (3), CCDC_OTHERS_VP_MEM (4). + * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ */ -+ -+#ifdef IP_NF_HB_DEBUG -+#define DEBUG -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+#define MY_NAME "iphb" -+#define MAX_KEEPALIVES 20 -+#define IPV6_HDR_LEN 40 -+#define MAX_FLUSH_VALUE 180 /* 3 minutes */ -+#define MAX_RECV_LEN 15 -+ -+ -+/* Linked list that keeps track of the queued keepalives */ -+struct keepalives { -+ struct list_head list; -+ int num_keeps; /* current number of keepalives in the list */ -+ struct sk_buff *skb; -+ int (*okfn)(struct sk_buff *); -+}; -+ -+ -+/* -+ * Separate list of packets to be sent. This is a separate list so that -+ * no message is sent while being holding the bh lock. -+ */ -+struct packets { -+ struct list_head list; -+ struct sk_buff *skb; -+ int (*okfn)(struct sk_buff *); -+}; -+ -+ -+static struct keepalives keepalives; /* List of keepalive messages */ -+static DEFINE_SPINLOCK(keepalives_lock); /* protects the keepalive list */ -+ -+static unsigned long last_notification; -+static unsigned int flush_notification = MAX_FLUSH_VALUE; -+static int trigger_poll; -+static DECLARE_WAIT_QUEUE_HEAD(iphb_pollq); -+ -+static int iphb_is_enabled; -+static struct device *iphb_dev; -+ -+ -+static void flush_keepalives(int notify) -+{ -+ struct list_head *p, *q; -+ struct keepalives *entry; -+ struct packets packets, *packet; -+ -+ if (keepalives.num_keeps > 0) -+ dev_dbg(iphb_dev, "Flush (%d)\n", keepalives.num_keeps); -+ -+ /* -+ * If notify is set to 1, then userspace is notified about -+ * the flush. -+ */ -+ if (notify) { -+ unsigned long current_time = get_seconds(); -+ if (current_time > -+ (last_notification + flush_notification)) { -+ dev_dbg(iphb_dev, "Wake up daemon\n"); -+ last_notification = current_time; -+ trigger_poll = 1; -+ wake_up_interruptible(&iphb_pollq); -+ } -+ } -+ -+ -+ INIT_LIST_HEAD(&packets.list); -+ -+ spin_lock_bh(&keepalives_lock); -+ -+ list_for_each_safe(p, q, &keepalives.list) { -+ entry = list_entry(p, struct keepalives, list); -+ -+ /* -+ * Create a separate list for keepalives to be -+ * sent outside. -+ */ -+ packet = kzalloc(sizeof(struct packets), GFP_ATOMIC); -+ if (!packet) { -+ spin_unlock_bh(&keepalives_lock); -+ return; -+ } -+ packet->skb = entry->skb; -+ packet->okfn = entry->okfn; -+ list_add_tail(&packet->list, &packets.list); -+ -+ keepalives.num_keeps--; -+ list_del(p); -+ kfree(entry); -+ } -+ keepalives.num_keeps = 0; -+ -+ spin_unlock_bh(&keepalives_lock); ++ * Returns 0 if successful, or -EINVAL if wrong I/O combination or wrong input ++ * or output values. + **/ +-void ispccdc_config_alaw(struct isp_ccdc_device *isp_ccdc, +- enum alaw_ipwidth ipwidth) ++static int ispccdc_config_datapath(struct isp_ccdc_device *isp_ccdc, ++ struct isp_pipeline *pipe) + { + struct device *dev = to_device(isp_ccdc); + +- isp_reg_writel(dev, ipwidth << ISPCCDC_ALAW_GWDI_SHIFT, +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW); +-} ++ u32 syn_mode = 0; ++ struct ispccdc_vp vpcfg; ++ struct ispccdc_syncif syncif; ++ struct ispccdc_bclamp blkcfg; + +-/** +- * ispccdc_enable_alaw - Enables the A-law compression. +- * @enable: 0 - Disables A-law, 1 - Enables A-law +- **/ +-void ispccdc_enable_alaw(struct isp_ccdc_device *isp_ccdc, u8 enable) +-{ +- struct device *dev = to_device(isp_ccdc); ++ u32 colptn = ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ++ ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ++ ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ++ ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ++ ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT; + ++ syn_mode = isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); + -+ /* -+ * Send the packets to net from outside of locked area -+ * so that we wont be scheduled while atomic. -+ */ -+ list_for_each_safe(p, q, &packets.list) { -+ int ret; -+ packet = list_entry(p, struct packets, list); -+ /* Send the keepalive to network */ -+ ret = packet->okfn(packet->skb); -+ list_del(p); -+ kfree(packet); -+ } -+} ++ switch (pipe->ccdc_out) { ++ case CCDC_YUV_RSZ: ++ syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; ++ syn_mode &= ~ISPCCDC_SYN_MODE_WEN; ++ break; + ++ case CCDC_YUV_MEM_RSZ: ++ syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; ++ isp_ccdc->wen = 1; ++ syn_mode |= ISPCCDC_SYN_MODE_WEN; ++ break; + -+static int iphbd_open(struct inode *inode, struct file *file) -+{ -+ if (iphb_is_enabled) -+ return -EBUSY; ++ case CCDC_OTHERS_VP: ++ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; ++ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; ++ syn_mode &= ~ISPCCDC_SYN_MODE_WEN; ++ vpcfg.bitshift_sel = BIT9_0; ++ vpcfg.freq_sel = PIXCLKBY2; ++ ispccdc_config_vp(isp_ccdc, vpcfg); ++ ispccdc_enable_vp(isp_ccdc, 1); ++ break; + -+ iphb_is_enabled = 1; ++ case CCDC_OTHERS_MEM: ++ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; ++ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; ++ syn_mode |= ISPCCDC_SYN_MODE_WEN; ++ syn_mode &= ~ISPCCDC_SYN_MODE_EXWEN; ++ isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ++ ~ISPCCDC_CFG_WENLOG); ++ vpcfg.bitshift_sel = BIT11_2; ++ vpcfg.freq_sel = PIXCLKBY2; ++ ispccdc_config_vp(isp_ccdc, vpcfg); ++ ispccdc_enable_vp(isp_ccdc, 0); ++ break; + +- isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW, +- ~ISPCCDC_ALAW_CCDTBL, +- enable ? ISPCCDC_ALAW_CCDTBL : 0); +-} ++ case CCDC_OTHERS_VP_MEM: ++ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; ++ syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; ++ syn_mode |= ISPCCDC_SYN_MODE_WEN; ++ syn_mode &= ~ISPCCDC_SYN_MODE_EXWEN; + +-/** +- * ispccdc_config_imgattr - Configures the sensor image specific attributes. +- * @colptn: Color pattern of the sensor. +- **/ +-void ispccdc_config_imgattr(struct isp_ccdc_device *isp_ccdc, u32 colptn) +-{ +- struct device *dev = to_device(isp_ccdc); ++ isp_reg_and_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ++ ~ISPCCDC_CFG_WENLOG, isp_ccdc->wenlog); ++ vpcfg.bitshift_sel = BIT9_0; ++ vpcfg.freq_sel = PIXCLKBY2; ++ ispccdc_config_vp(isp_ccdc, vpcfg); ++ ispccdc_enable_vp(isp_ccdc, 1); ++ break; ++ default: ++ DPRINTK_ISPCCDC("ISP_ERR: Wrong CCDC Output\n"); ++ return -EINVAL; ++ }; + +- isp_reg_writel(dev, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN); +-} ++ isp_reg_writel(dev, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); + +-void ispccdc_config_shadow_registers(struct isp_ccdc_device *isp_ccdc) +-{ +- if (isp_ccdc->lsc_enable) { +- ispccdc_enable_lsc(isp_ccdc, 1); +- isp_ccdc->lsc_enable = 0; ++ switch (pipe->ccdc_in) { ++ case CCDC_RAW: ++ syncif.ccdc_mastermode = 0; ++ syncif.datapol = 0; ++ syncif.datsz = DAT10; ++ syncif.fldmode = 0; ++ syncif.fldout = 0; ++ syncif.fldpol = 0; ++ syncif.fldstat = 0; ++ syncif.hdpol = 0; ++ syncif.ipmod = RAW; ++ syncif.vdpol = 0; ++ ispccdc_config_sync_if(isp_ccdc, syncif); ++ ispccdc_config_imgattr(isp_ccdc, colptn); ++ blkcfg.oblen = 0; ++ blkcfg.dcsubval = 64; ++ ispccdc_config_black_clamp(isp_ccdc, blkcfg); ++ break; ++ case CCDC_YUV_SYNC: ++ syncif.ccdc_mastermode = 0; ++ syncif.datapol = 0; ++ syncif.datsz = DAT8; ++ syncif.fldmode = 0; ++ syncif.fldout = 0; ++ syncif.fldpol = 0; ++ syncif.fldstat = 0; ++ syncif.hdpol = 0; ++ syncif.ipmod = YUV16; ++ syncif.vdpol = 1; ++ ispccdc_config_imgattr(isp_ccdc, 0); ++ ispccdc_config_sync_if(isp_ccdc, syncif); ++ blkcfg.oblen = 0; ++ blkcfg.dcsubval = 0; ++ ispccdc_config_black_clamp(isp_ccdc, blkcfg); ++ break; ++ case CCDC_YUV_BT: ++ break; ++ case CCDC_OTHERS: ++ break; ++ default: ++ DPRINTK_ISPCCDC("ISP_ERR: Wrong CCDC Input\n"); ++ return -EINVAL; + } + ++ ispccdc_print_status(isp_ccdc, pipe); ++ isp_print_status(dev); + return 0; -+} -+ -+ -+static unsigned int iphbd_poll(struct file *file, poll_table *wait) + } + + /** +@@ -1199,156 +1097,339 @@ int ispccdc_s_pipeline(struct isp_ccdc_d + OMAP3_ISP_IOMEM_CCDC, + ISPCCDC_VP_OUT); + +- if (is_isplsc_activated()) { +- if (pipe->ccdc_in == CCDC_RAW) { +- ispccdc_config_lsc(isp_ccdc, &isp_ccdc->lsc_config); +- ispccdc_load_lsc(isp_ccdc, isp_ccdc->lsc_gain_table, +- isp_ccdc->lsc_config.size); +- } +- } ++ ispccdc_setup_lsc(isp_ccdc, pipe); + + return 0; + } + + /** +- * ispccdc_config_outlineoffset - Configures the output line offset +- * @offset: Must be twice the Output width and aligned on 32 byte boundary +- * @oddeven: Specifies the odd/even line pattern to be chosen to store the +- * output. +- * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines. +- * +- * - Configures the output line offset when stored in memory +- * - Sets the odd/even line pattern to store the output +- * (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4)) +- * - Configures the number of even and odd line fields in case of rearranging +- * the lines. ++ * ispccdc_enable - Enables the CCDC module. ++ * @enable: 0 Disables CCDC, 1 Enables CCDC + * +- * Returns 0 if successful, or -EINVAL if the offset is not in 32 byte +- * boundary. ++ * Client should configure all the sub modules in CCDC before this. + **/ +-int ispccdc_config_outlineoffset(struct isp_ccdc_device *isp_ccdc, u32 offset, +- u8 oddeven, u8 numlines) ++void ispccdc_enable(struct isp_ccdc_device *isp_ccdc, u8 enable) +{ -+ unsigned int mask = 0; -+ poll_wait(file, &iphb_pollq, wait); -+ if (trigger_poll) { -+ mask |= POLLIN | POLLRDNORM; -+ mask |= POLLOUT | POLLWRNORM; -+ trigger_poll = 0; -+ } -+ return mask; -+} -+ ++ struct isp_device *isp = to_isp_device(isp_ccdc); ++ int enable_lsc; + -+static int iphbd_release(struct inode *inode, struct file *file) -+{ -+ flush_keepalives(0); -+ last_notification = 0; -+ trigger_poll = 0; -+ iphb_is_enabled = 0; -+ return 0; ++ enable_lsc = enable && ++ isp->pipeline.ccdc_in == CCDC_RAW && ++ isp_ccdc->lsc_request_enable && ++ ispccdc_validate_config_lsc(isp_ccdc, ++ &isp_ccdc->lsc_config, &isp->pipeline) == 0; ++ ispccdc_enable_lsc(isp_ccdc, enable_lsc); ++ isp_reg_and_or(isp->dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR, ++ ~ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0); +} + ++/* ++ * Returns zero if the CCDC is idle and the image has been written to ++ * memory, too. ++ */ ++int ispccdc_sbl_busy(void *_isp_ccdc) + { ++ struct isp_ccdc_device *isp_ccdc = _isp_ccdc; + struct device *dev = to_device(isp_ccdc); + +- if ((offset & ISP_32B_BOUNDARY_OFFSET) == offset) { +- isp_reg_writel(dev, (offset & 0xFFFF), +- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF); +- } else { +- DPRINTK_ISPCCDC("ISP_ERR : Offset should be in 32 byte" +- " boundary\n"); ++ return ispccdc_busy(isp_ccdc) ++ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) & ++ ISPSBL_CCDC_WR_0_DATA_READY) ++ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) & ++ ISPSBL_CCDC_WR_0_DATA_READY) ++ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) & ++ ISPSBL_CCDC_WR_0_DATA_READY) ++ | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) & ++ ISPSBL_CCDC_WR_0_DATA_READY); ++} + -+static ssize_t iphbd_write(struct file *filp, -+ const char *buff, -+ size_t len, -+ loff_t *off) ++/** ++ * ispccdc_busy - Gets busy state of the CCDC. ++ **/ ++int ispccdc_busy(struct isp_ccdc_device *isp_ccdc) +{ -+ /* If userland writes to the device, then we flush. */ -+ long val; -+ char received[MAX_RECV_LEN + 1]; -+ snprintf(received, min((size_t)MAX_RECV_LEN, len + 1), "%s", buff); -+ -+ /* conversion errors are ignored and they will cause a flush */ -+ strict_strtol(received, 10, &val); ++ struct device *dev = to_device(isp_ccdc); + -+ if (val > 0) { -+ if (val > MAX_FLUSH_VALUE) -+ val = MAX_FLUSH_VALUE; -+ dev_dbg(iphb_dev, "Setting flush notification to %ld secs\n", -+ val); -+ flush_notification = (unsigned int)val; -+ } -+ flush_keepalives(0); -+ return 0; ++ return isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) & ++ ISPCCDC_PCR_BUSY; +} + -+ -+static unsigned int net_out_hook(unsigned int hook, -+ struct sk_buff *skb, -+ const struct net_device *indev, -+ const struct net_device *outdev, -+ int (*okfn)(struct sk_buff *)) ++void ispccdc_config_shadow_registers(struct isp_ccdc_device *isp_ccdc) +{ -+ struct keepalives *keepalive; -+ struct iphdr *ip; -+ struct ipv6hdr *ip6 = NULL; -+ struct tcphdr *tcp; -+ struct tcp_sock *tsk; -+ unsigned char proto; -+ unsigned int len, hlen; -+ unsigned char version; -+ -+ if (!iphb_is_enabled) -+ return NF_ACCEPT; -+ -+ if (hook != NF_INET_POST_ROUTING) -+ return NF_ACCEPT; -+ -+ if (keepalives.num_keeps >= MAX_KEEPALIVES) { -+ dev_dbg(iphb_dev, "Max keepalives (%d), flushing\n", -+ keepalives.num_keeps); -+ flush_keepalives(1); -+ return NF_ACCEPT; -+ } -+ -+ ip = ip_hdr(skb); -+ ip6 = ipv6_hdr(skb); -+ if (ip) -+ version = ip->version; -+ else { -+ flush_keepalives(1); -+ return NF_ACCEPT; -+ } ++ unsigned long flags; + -+ if (version == 6) { -+ len = ntohs(ip6->payload_len); -+ hlen = IPV6_HDR_LEN; -+ proto = (unsigned char)ip6->nexthdr; -+ } else if (version == 4) { -+ len = ntohs(ip->tot_len); -+ hlen = ip->ihl << 2; -+ proto = (unsigned char)ip->protocol; -+ } else { -+ flush_keepalives(1); -+ return NF_ACCEPT; -+ } ++ spin_lock_irqsave(&isp_ccdc->lock, flags); ++ if (isp_ccdc->shadow_update) ++ goto out; + -+ /* We are only interested in TCP traffic (keepalives) */ -+ if (proto != IPPROTO_TCP) { -+ flush_keepalives(1); -+ return NF_ACCEPT; ++#if 0 /* FIXME: Do not support on-the-fly-LSC configuration yet */ ++ if (isp_ccdc->update_lsc_config) { ++ ispccdc_config_lsc(isp_ccdc); ++ ispccdc_enable_lsc(isp_ccdc, isp_ccdc->lsc_request_enable); ++ isp_ccdc->update_lsc_config = 0; + } + -+ tsk = tcp_sk(skb->sk); -+ tcp = tcp_hdr(skb); -+ -+ len -= hlen; /* ip4/6 header len */ -+ len -= tcp->doff << 2; /* tcp header + options */ -+ -+ /* Is it keepalive? */ -+ if (!(tcp->ack && (len == 0 || len == 1) && -+ (ntohl(tcp->seq) == (tsk->snd_nxt - 1)) && -+ !(tcp->syn || tcp->fin || tcp->rst))) { -+ flush_keepalives(1); -+ return NF_ACCEPT; ++ if (isp_ccdc->update_lsc_table) { ++ u32 n = isp_ccdc->lsc_table_new; ++ /* Swap tables--no need to vfree in interrupt context */ ++ isp_ccdc->lsc_table_new = isp_ccdc->lsc_table_inuse; ++ isp_ccdc->lsc_table_inuse = n; ++ ispccdc_program_lsc(isp_ccdc); ++ isp_ccdc->update_lsc_table = 0; + } ++#endif + -+ keepalive = kzalloc(sizeof(struct keepalives), GFP_ATOMIC); -+ if (!keepalive) -+ return NF_ACCEPT; -+ -+ keepalive->skb = skb; -+ keepalive->okfn = okfn; -+ -+ spin_lock_bh(&keepalives_lock); -+ keepalives.num_keeps++; -+ list_add_tail(&keepalive->list, &keepalives.list); -+ spin_unlock_bh(&keepalives_lock); -+ -+ return NF_STOLEN; ++out: ++ spin_unlock_irqrestore(&isp_ccdc->lock, flags); +} + -+ -+ -+static unsigned int net_in_hook(unsigned int hook, -+ struct sk_buff *skb, -+ const struct net_device *indev, -+ const struct net_device *outdev, -+ int (*okfn)(struct sk_buff *)) ++/** ++ * ispccdc_config - Sets CCDC configuration from userspace ++ * @userspace_add: Structure containing CCDC configuration sent from userspace. ++ * ++ * Returns 0 if successful, -EINVAL if the pointer to the configuration ++ * structure is null, or the copy_from_user function fails to copy user space ++ * memory to kernel space memory. ++ **/ ++int ispccdc_config(struct isp_ccdc_device *isp_ccdc, ++ void *userspace_add) +{ -+ if (!iphb_is_enabled) -+ return NF_ACCEPT; -+ -+ /* -+ * Packets coming in will automatically flush output queue -+ * because radio is now on. -+ */ -+ flush_keepalives(1); -+ -+ return NF_ACCEPT; -+} -+ -+ -+/* -+ * The user space daemon (iphbd) needs the interface for communicating -+ * with this module. -+ */ -+static struct file_operations iphb_fops = { -+ .owner = THIS_MODULE, -+ .write = iphbd_write, -+ .poll = iphbd_poll, -+ .open = iphbd_open, -+ .release = iphbd_release -+}; ++ struct isp_device *isp = to_isp_device(isp_ccdc); ++ struct ispccdc_bclamp bclamp_t; ++ struct ispccdc_blcomp blcomp_t; ++ struct ispccdc_fpc fpc_t; ++ struct ispccdc_culling cull_t; ++ struct ispccdc_update_config *ccdc_struct; ++ unsigned long flags; ++ int ret = 0; + -+static struct miscdevice iphb_misc = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = MY_NAME, -+ .fops = &iphb_fops -+}; ++ if (userspace_add == NULL) + return -EINVAL; + -+/* hook for packets sent to interface */ -+static struct nf_hook_ops net_out_ops = { -+ .list = { NULL, NULL }, -+ .owner = THIS_MODULE, -+ .hook = net_out_hook, -+ .pf = PF_INET, -+ .hooknum = NF_INET_POST_ROUTING, -+ .priority = NF_IP_PRI_LAST -+}; -+static struct nf_hook_ops net_out6_ops = { -+ .list = { NULL, NULL }, -+ .owner = THIS_MODULE, -+ .hook = net_out_hook, -+ .pf = PF_INET6, -+ .hooknum = NF_INET_POST_ROUTING, -+ .priority = NF_IP6_PRI_LAST -+}; ++ ccdc_struct = userspace_add; + -+/* hook for packets received from interface */ -+static struct nf_hook_ops net_in_ops = { -+ .list = { NULL, NULL }, -+ .owner = THIS_MODULE, -+ .hook = net_in_hook, -+ .pf = PF_INET, -+ .hooknum = NF_INET_PRE_ROUTING, -+ .priority = NF_IP_PRI_FIRST -+}; -+static struct nf_hook_ops net_in6_ops = { -+ .list = { NULL, NULL }, -+ .owner = THIS_MODULE, -+ .hook = net_in_hook, -+ .pf = PF_INET6, -+ .hooknum = NF_INET_PRE_ROUTING, -+ .priority = NF_IP6_PRI_FIRST -+}; ++ spin_lock_irqsave(&isp_ccdc->lock, flags); ++ isp_ccdc->shadow_update = 1; ++ spin_unlock_irqrestore(&isp_ccdc->lock, flags); + ++ if (ISP_ABS_CCDC_ALAW & ccdc_struct->flag) { ++ if (ISP_ABS_CCDC_ALAW & ccdc_struct->update) ++ ispccdc_config_alaw(isp_ccdc, ccdc_struct->alawip); ++ ispccdc_enable_alaw(isp_ccdc, 1); ++ } else if (ISP_ABS_CCDC_ALAW & ccdc_struct->update) ++ ispccdc_enable_alaw(isp_ccdc, 0); + -+static int __init init(void) -+{ -+ int ret; -+ char *uts_release = (utsname())->release; ++ if (ISP_ABS_CCDC_LPF & ccdc_struct->flag) ++ ispccdc_enable_lpf(isp_ccdc, 1); ++ else ++ ispccdc_enable_lpf(isp_ccdc, 0); + -+ INIT_LIST_HEAD(&keepalives.list); ++ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->flag) { ++ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) { ++ if (copy_from_user(&bclamp_t, (struct ispccdc_bclamp *) ++ ccdc_struct->bclamp, ++ sizeof(struct ispccdc_bclamp))) { ++ ret = -EFAULT; ++ goto out; ++ } + -+ ret = misc_register(&iphb_misc); -+ if (ret < 0) { -+ pr_err(MY_NAME ": Cannot create device (%d)\n", ret); -+ return -ENODEV; -+ } ++ ispccdc_enable_black_clamp(isp_ccdc, 1); ++ ispccdc_config_black_clamp(isp_ccdc, bclamp_t); ++ } else ++ ispccdc_enable_black_clamp(isp_ccdc, 1); ++ } else { ++ if (ISP_ABS_CCDC_BLCLAMP & ccdc_struct->update) { ++ if (copy_from_user(&bclamp_t, (struct ispccdc_bclamp *) ++ ccdc_struct->bclamp, ++ sizeof(struct ispccdc_bclamp))) { ++ ret = -EFAULT; ++ goto out; ++ } + -+ iphb_dev = iphb_misc.this_device; -+ if (!iphb_dev) { -+ pr_err(MY_NAME ": Cannot create device\n"); -+ return -ENODEV; ++ ispccdc_enable_black_clamp(isp_ccdc, 0); ++ ispccdc_config_black_clamp(isp_ccdc, bclamp_t); ++ } + } + -+ nf_register_hook(&net_out_ops); -+ nf_register_hook(&net_in_ops); -+ nf_register_hook(&net_out6_ops); -+ nf_register_hook(&net_in6_ops); -+ -+ if (strcmp(uts_release, UTS_RELEASE) == 0) -+ dev_info(iphb_dev, "Module registered in %s, built %s %s\n", -+ uts_release, __DATE__, __TIME__); -+ else -+ dev_info(iphb_dev, -+ "Module registered in %s, compiled in %s, " -+ "built %s %s\n", -+ uts_release, UTS_RELEASE, __DATE__, __TIME__); -+ -+ return 0; -+} -+ ++ if (ISP_ABS_CCDC_BCOMP & ccdc_struct->update) { ++ if (copy_from_user(&blcomp_t, (struct ispccdc_blcomp *) ++ ccdc_struct->blcomp, ++ sizeof(blcomp_t))) { ++ ret = -EFAULT; ++ goto out; ++ } + -+static void __exit fini(void) -+{ -+ nf_unregister_hook(&net_out_ops); -+ nf_unregister_hook(&net_in_ops); -+ nf_unregister_hook(&net_out6_ops); -+ nf_unregister_hook(&net_in6_ops); ++ ispccdc_config_black_comp(isp_ccdc, blcomp_t); ++ } + -+ flush_keepalives(0); ++ if (ISP_ABS_CCDC_FPC & ccdc_struct->flag) { ++ if (ISP_ABS_CCDC_FPC & ccdc_struct->update) { ++ if (copy_from_user(&fpc_t, (struct ispccdc_fpc *) ++ ccdc_struct->fpc, ++ sizeof(fpc_t))) { ++ ret = -EFAULT; ++ goto out; ++ } ++ isp_ccdc->fpc_table_add = kmalloc(64 + fpc_t.fpnum * 4, ++ GFP_KERNEL | GFP_DMA); ++ if (!isp_ccdc->fpc_table_add) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ while (((unsigned long)isp_ccdc->fpc_table_add ++ & 0xFFFFFFC0) ++ != (unsigned long)isp_ccdc->fpc_table_add) ++ isp_ccdc->fpc_table_add++; + -+ misc_deregister(&iphb_misc); ++ isp_ccdc->fpc_table_add_m = iommu_kmap( ++ isp->iommu, ++ 0, ++ virt_to_phys(isp_ccdc->fpc_table_add), ++ fpc_t.fpnum * 4, ++ IOMMU_FLAG); ++ /* FIXME: Correct unwinding */ ++ BUG_ON(IS_ERR_VALUE(isp_ccdc->fpc_table_add_m)); + -+ iphb_is_enabled = 0; ++ if (copy_from_user(isp_ccdc->fpc_table_add, ++ (u32 *)fpc_t.fpcaddr, ++ fpc_t.fpnum * 4)) { ++ ret = -EFAULT; ++ goto out; ++ } + -+ pr_info(MY_NAME ": Keepalive handler module unregistered\n"); -+} ++ fpc_t.fpcaddr = isp_ccdc->fpc_table_add_m; ++ ispccdc_config_fpc(isp_ccdc, fpc_t); ++ } ++ ispccdc_enable_fpc(isp_ccdc, 1); ++ } else if (ISP_ABS_CCDC_FPC & ccdc_struct->update) ++ ispccdc_enable_fpc(isp_ccdc, 0); + -+module_init(init); -+module_exit(fini); ++ if (ISP_ABS_CCDC_CULL & ccdc_struct->update) { ++ if (copy_from_user(&cull_t, (struct ispccdc_culling *) ++ ccdc_struct->cull, ++ sizeof(cull_t))) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ispccdc_config_culling(isp_ccdc, cull_t); ++ } + -+MODULE_AUTHOR("Jukka Rissanen "); -+MODULE_DESCRIPTION("netfilter module for delaying TCP keepalive packets"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/net/ipv4/netfilter/Kconfig linux-omap-2.6.28-nokia1/net/ipv4/netfilter/Kconfig ---- linux-omap-2.6.28-omap1/net/ipv4/netfilter/Kconfig 2011-06-22 13:14:25.883067636 +0200 -+++ linux-omap-2.6.28-nokia1/net/ipv4/netfilter/Kconfig 2011-06-22 13:19:33.283063268 +0200 -@@ -407,5 +407,29 @@ config IP_NF_ARP_MANGLE ++ if (ISP_ABS_CCDC_CONFIG_LSC & ccdc_struct->update) { ++ if (ISP_ABS_CCDC_CONFIG_LSC & ccdc_struct->flag) { ++ struct ispccdc_lsc_config cfg; ++ if (copy_from_user(&cfg, ccdc_struct->lsc_cfg, ++ sizeof(cfg))) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ret = ispccdc_validate_config_lsc(isp_ccdc, &cfg, ++ isp->running == ISP_RUNNING ? ++ &isp->pipeline : NULL); ++ if (ret) ++ goto out; ++ memcpy(&isp_ccdc->lsc_config, &cfg, ++ sizeof(isp_ccdc->lsc_config)); ++ isp_ccdc->lsc_request_enable = 1; ++ } else { ++ isp_ccdc->lsc_request_enable = 0; ++ } ++ isp_ccdc->update_lsc_config = 1; + } + +- isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, +- ~ISPCCDC_SDOFST_FINV); ++ if (ISP_ABS_TBL_LSC & ccdc_struct->update) { ++ void *n; ++ if (isp_ccdc->lsc_table_new != PTR_FREE) ++ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_new); ++ isp_ccdc->lsc_table_new = iommu_vmalloc(isp->iommu, 0, ++ isp_ccdc->lsc_config.size, IOMMU_FLAG); ++ if (IS_ERR_VALUE(isp_ccdc->lsc_table_new)) { ++ /* Disable LSC if table can not be allocated */ ++ isp_ccdc->lsc_table_new = PTR_FREE; ++ isp_ccdc->lsc_request_enable = 0; ++ isp_ccdc->update_lsc_config = 1; ++ ret = -ENOMEM; ++ goto out; ++ } ++ n = da_to_va(isp->iommu, isp_ccdc->lsc_table_new); ++ if (copy_from_user(n, ccdc_struct->lsc, ++ isp_ccdc->lsc_config.size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ isp_ccdc->update_lsc_table = 1; ++ } - endif # IP_NF_ARPTABLES +- isp_reg_and(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, +- ~ISPCCDC_SDOFST_FOFST_4L); ++ if (isp->running == ISP_STOPPED && ++ (isp_ccdc->update_lsc_table || isp_ccdc->update_lsc_config)) ++ ispccdc_setup_lsc(isp_ccdc, &isp->pipeline); -+config IP_NF_HB -+ tristate "IP heartbeat support (EXPERIMENTAL)" -+ depends on NETFILTER_ADVANCED -+ ---help--- -+ IP heartbeat support will let you queue TCP keepalive messages. -+ The idea is to delay outgoing TCP keepalive (heartbeat) messages -+ until they are sent when triggered by user space daemon (iphbd). -+ The keepalives are also sent if we receive packets from the network -+ or if the keepalive queue is full. The feature is used in wireless -+ networks so that we do not wake up radio all the time and also save -+ battery when keepalives are synchronized. The keepalive queueing -+ is only activated if iphbd is running and active, default behaviour -+ is to pass keepalives as is. -+ -+if IP_NF_HB +- switch (oddeven) { +- case EVENEVEN: +- isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, +- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT); +- break; +- case ODDEVEN: +- isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, +- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT); +- break; +- case EVENODD: +- isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, +- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT); +- break; +- case ODDODD: +- isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, +- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT); +- break; +- default: +- break; +- } +- return 0; ++ if (ISP_ABS_CCDC_COLPTN & ccdc_struct->update) ++ ispccdc_config_imgattr(isp_ccdc, ccdc_struct->colptn); + -+config IP_NF_HB_DEBUG -+ bool "IP heartbeat debugging" -+ ---help--- -+ Say Y here if you want to get additional messages useful in -+ debugging the IP heartbeat code. ++out: ++ if (ret == -EFAULT) ++ dev_err(to_device(isp_ccdc), ++ "ccdc: user provided bad configuration data address"); + -+endif # IP_HB ++ if (ret == -ENOMEM) ++ dev_err(to_device(isp_ccdc), ++ "ccdc: can not allocate memory"); + - endmenu ++ isp_ccdc->shadow_update = 0; ++ return ret; + } + + /** +- * ispccdc_set_outaddr - Sets the memory address where the output will be saved +- * @addr: 32-bit memory address aligned on 32 byte boundary. ++ * ispccdc_request - Reserves the CCDC module. + * +- * Sets the memory address where the output will be saved. ++ * Reserves the CCDC module and assures that is used only once at a time. + * +- * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte +- * boundary. ++ * Returns 0 if successful, or -EBUSY if CCDC module is busy. + **/ +-int ispccdc_set_outaddr(struct isp_ccdc_device *isp_ccdc, u32 addr) ++int ispccdc_request(struct isp_ccdc_device *isp_ccdc) + { + struct device *dev = to_device(isp_ccdc); -diff -Nurp linux-omap-2.6.28-omap1/net/ipv4/netfilter/Makefile linux-omap-2.6.28-nokia1/net/ipv4/netfilter/Makefile ---- linux-omap-2.6.28-omap1/net/ipv4/netfilter/Makefile 2011-06-22 13:14:25.883067636 +0200 -+++ linux-omap-2.6.28-nokia1/net/ipv4/netfilter/Makefile 2011-06-22 13:19:33.283063268 +0200 -@@ -74,3 +74,4 @@ obj-$(CONFIG_IP_NF_ARPFILTER) += arptabl +- if ((addr & ISP_32B_BOUNDARY_BUF) == addr) { +- isp_reg_writel(dev, addr, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_SDR_ADDR); +- return 0; +- } else { +- DPRINTK_ISPCCDC("ISP_ERR : Address should be in 32 byte" +- " boundary\n"); +- return -EINVAL; ++ mutex_lock(&isp_ccdc->mutexlock); ++ if (isp_ccdc->ccdc_inuse) { ++ mutex_unlock(&isp_ccdc->mutexlock); ++ DPRINTK_ISPCCDC("ISP_ERR : CCDC Module Busy\n"); ++ return -EBUSY; + } - obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o ++ isp_ccdc->ccdc_inuse = 1; ++ mutex_unlock(&isp_ccdc->mutexlock); ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, ++ ISPCTRL_CCDC_RAM_EN | ISPCTRL_CCDC_CLK_EN | ++ ISPCTRL_SBL_WR1_RAM_EN); ++ isp_reg_or(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ++ ISPCCDC_CFG_VDLC); ++ return 0; + } -+obj-$(CONFIG_IP_NF_HB) += iphb.o -diff -Nurp linux-omap-2.6.28-omap1/net/Kconfig linux-omap-2.6.28-nokia1/net/Kconfig ---- linux-omap-2.6.28-omap1/net/Kconfig 2011-06-22 13:14:25.583067644 +0200 -+++ linux-omap-2.6.28-nokia1/net/Kconfig 2011-06-22 13:19:33.283063268 +0200 -@@ -190,6 +190,7 @@ source "net/x25/Kconfig" - source "net/lapb/Kconfig" - source "net/econet/Kconfig" - source "net/wanrouter/Kconfig" -+source "net/phonet/Kconfig" - source "net/sched/Kconfig" + /** +- * ispccdc_enable - Enables the CCDC module. +- * @enable: 0 Disables CCDC, 1 Enables CCDC ++ * ispccdc_free - Frees the CCDC module. + * +- * Client should configure all the sub modules in CCDC before this. ++ * Frees the CCDC module so it can be used by another process. ++ * ++ * Returns 0 if successful, or -EINVAL if module has been already freed. + **/ +-void ispccdc_enable(struct isp_ccdc_device *isp_ccdc, u8 enable) ++int ispccdc_free(struct isp_ccdc_device *isp_ccdc) + { +- struct isp_device *isp = to_isp_device(isp_ccdc); +- +- if (enable) { +- if (isp_ccdc->lsc_enable +- && isp->pipeline.ccdc_in == CCDC_RAW) +- ispccdc_enable_lsc(isp_ccdc, 1); +- +- } else { +- int lsc_enable = isp_ccdc->lsc_state; +- +- ispccdc_enable_lsc(isp_ccdc, 0); +- isp_ccdc->lsc_enable = lsc_enable; ++ mutex_lock(&isp_ccdc->mutexlock); ++ if (!isp_ccdc->ccdc_inuse) { ++ mutex_unlock(&isp_ccdc->mutexlock); ++ DPRINTK_ISPCCDC("ISP_ERR: CCDC Module already freed\n"); ++ return -EINVAL; + } - menu "Network testing" -@@ -233,7 +234,6 @@ source "net/can/Kconfig" - source "net/irda/Kconfig" - source "net/bluetooth/Kconfig" - source "net/rxrpc/Kconfig" --source "net/phonet/Kconfig" +- isp_reg_and_or(isp->dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR, +- ~ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0); +-} +- +-/* +- * Returns zero if the CCDC is idle and the image has been written to +- * memory, too. +- */ +-int ispccdc_sbl_busy(void *_isp_ccdc) +-{ +- struct isp_ccdc_device *isp_ccdc = _isp_ccdc; +- struct device *dev = to_device(isp_ccdc); +- +- return ispccdc_busy(isp_ccdc) +- | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) & +- ISPSBL_CCDC_WR_0_DATA_READY) +- | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) & +- ISPSBL_CCDC_WR_0_DATA_READY) +- | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) & +- ISPSBL_CCDC_WR_0_DATA_READY) +- | (isp_reg_readl(dev, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) & +- ISPSBL_CCDC_WR_0_DATA_READY); +-} +- +-/** +- * ispccdc_busy - Gets busy state of the CCDC. +- **/ +-int ispccdc_busy(struct isp_ccdc_device *isp_ccdc) +-{ +- struct device *dev = to_device(isp_ccdc); +- +- return isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) & +- ISPCCDC_PCR_BUSY; ++ isp_ccdc->ccdc_inuse = 0; ++ mutex_unlock(&isp_ccdc->mutexlock); ++ isp_reg_and(to_device(isp_ccdc), OMAP3_ISP_IOMEM_MAIN, ++ ISP_CTRL, ~(ISPCTRL_CCDC_CLK_EN | ++ ISPCTRL_CCDC_RAM_EN | ++ ISPCTRL_SBL_WR1_RAM_EN)); ++ return 0; + } - config FIB_RULES - bool -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/debugfs.c linux-omap-2.6.28-nokia1/net/mac80211/debugfs.c ---- linux-omap-2.6.28-omap1/net/mac80211/debugfs.c 2011-06-22 13:14:26.043067636 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/debugfs.c 2011-06-22 13:19:33.283063268 +0200 -@@ -190,6 +190,64 @@ DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount - DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); + /** +@@ -1370,129 +1451,6 @@ void ispccdc_restore_context(struct devi + } + /** +- * ispccdc_print_status - Prints the values of the CCDC Module registers +- * +- * Also prints other debug information stored in the CCDC module. +- **/ +-void ispccdc_print_status(struct isp_ccdc_device *isp_ccdc, +- struct isp_pipeline *pipe) +-{ +- if (!is_ispccdc_debug_enabled()) +- return; +- +- DPRINTK_ISPCCDC("Module in use =%d\n", isp_ccdc->ccdc_inuse); +- DPRINTK_ISPCCDC("Accepted CCDC Input (width = %d,Height = %d)\n", +- isp_ccdc->ccdcin_w, +- isp_ccdc->ccdcin_h); +- DPRINTK_ISPCCDC("Accepted CCDC Output (width = %d,Height = %d)\n", +- isp_ccdc->ccdcout_w, +- isp_ccdc->ccdcout_h); +- DPRINTK_ISPCCDC("###CCDC PCR=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_PCR)); +- DPRINTK_ISPCCDC("ISP_CTRL =0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, +- ISP_CTRL)); +- switch (pipe->ccdc_in) { +- case CCDC_RAW: +- DPRINTK_ISPCCDC("ccdc input format is CCDC_RAW\n"); +- break; +- case CCDC_YUV_SYNC: +- DPRINTK_ISPCCDC("ccdc input format is CCDC_YUV_SYNC\n"); +- break; +- case CCDC_YUV_BT: +- DPRINTK_ISPCCDC("ccdc input format is CCDC_YUV_BT\n"); +- break; +- default: +- break; +- } +- +- switch (pipe->ccdc_out) { +- case CCDC_OTHERS_VP: +- DPRINTK_ISPCCDC("ccdc output format is CCDC_OTHERS_VP\n"); +- break; +- case CCDC_OTHERS_MEM: +- DPRINTK_ISPCCDC("ccdc output format is CCDC_OTHERS_MEM\n"); +- break; +- case CCDC_YUV_RSZ: +- DPRINTK_ISPCCDC("ccdc output format is CCDC_YUV_RSZ\n"); +- break; +- default: +- break; +- } +- +- DPRINTK_ISPCCDC("###ISP_CTRL in ccdc =0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, +- ISP_CTRL)); +- DPRINTK_ISPCCDC("###ISP_IRQ0ENABLE in ccdc =0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, +- ISP_IRQ0ENABLE)); +- DPRINTK_ISPCCDC("###ISP_IRQ0STATUS in ccdc =0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_MAIN, +- ISP_IRQ0STATUS)); +- DPRINTK_ISPCCDC("###CCDC SYN_MODE=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_SYN_MODE)); +- DPRINTK_ISPCCDC("###CCDC HORZ_INFO=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_HORZ_INFO)); +- DPRINTK_ISPCCDC("###CCDC VERT_START=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_VERT_START)); +- DPRINTK_ISPCCDC("###CCDC VERT_LINES=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_VERT_LINES)); +- DPRINTK_ISPCCDC("###CCDC CULLING=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_CULLING)); +- DPRINTK_ISPCCDC("###CCDC HSIZE_OFF=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_HSIZE_OFF)); +- DPRINTK_ISPCCDC("###CCDC SDOFST=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_SDOFST)); +- DPRINTK_ISPCCDC("###CCDC SDR_ADDR=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_SDR_ADDR)); +- DPRINTK_ISPCCDC("###CCDC CLAMP=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_CLAMP)); +- DPRINTK_ISPCCDC("###CCDC COLPTN=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_COLPTN)); +- DPRINTK_ISPCCDC("###CCDC CFG=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_CFG)); +- DPRINTK_ISPCCDC("###CCDC VP_OUT=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_VP_OUT)); +- DPRINTK_ISPCCDC("###CCDC_SDR_ADDR= 0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_SDR_ADDR)); +- DPRINTK_ISPCCDC("###CCDC FMTCFG=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_FMTCFG)); +- DPRINTK_ISPCCDC("###CCDC FMT_HORZ=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_FMT_HORZ)); +- DPRINTK_ISPCCDC("###CCDC FMT_VERT=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_FMT_VERT)); +- DPRINTK_ISPCCDC("###CCDC LSC_CONFIG=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_CONFIG)); +- DPRINTK_ISPCCDC("###CCDC LSC_INIT=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_INITIAL)); +- DPRINTK_ISPCCDC("###CCDC LSC_TABLE BASE=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_TABLE_BASE)); +- DPRINTK_ISPCCDC("###CCDC LSC TABLE OFFSET=0x%x\n", +- isp_reg_readl(dev, OMAP3_ISP_IOMEM_CCDC, +- ISPCCDC_LSC_TABLE_OFFSET)); +-} +- +-/** + * isp_ccdc_init - CCDC module initialization. + * + * Always returns 0 +@@ -1501,24 +1459,34 @@ int __init isp_ccdc_init(struct device * + { + struct isp_device *isp = dev_get_drvdata(dev); + struct isp_ccdc_device *isp_ccdc = &isp->isp_ccdc; ++ void *p; -+static ssize_t rssisignal_read(struct file *file, char __user *user_buf, -+ size_t count, loff_t *ppos) -+{ -+ struct ieee80211_local *local = file->private_data; -+ char buf[100]; -+ int res; -+ -+ res = scnprintf(buf, sizeof(buf), "empty\n"); -+ return simple_read_from_buffer(user_buf, count, ppos, buf, res); -+} -+ -+static ssize_t rssisignal_write(struct file *file, const char __user *user_buf, -+ size_t count, loff_t *ppos) -+{ -+ struct ieee80211_local *local = file->private_data; -+ struct ieee80211_sub_if_data *sdata; -+ union iwreq_data wrqu; -+ char buf[100], *endp; -+ size_t buf_len; -+ int val; -+ -+ memset(buf, 0, sizeof(buf)); -+ buf_len = min(count, sizeof(buf) - 1); -+ -+ if (copy_from_user(buf, user_buf, buf_len)) -+ return -EFAULT; -+ -+ val = strict_strtoul(buf, &endp, 10); -+ if (endp == buf) -+ return -EFAULT; -+ -+ switch (val) { -+ case 0: -+ buf_len = snprintf(buf, sizeof(buf), "LOWSIGNAL"); -+ break; -+ case 1: -+ default: -+ buf_len = snprintf(buf, sizeof(buf), "HIGHSIGNAL"); -+ break; -+ } -+ -+ /* we should hold the RTNL here so can safely walk the list */ -+ sdata = list_first_entry(&local->interfaces, -+ struct ieee80211_sub_if_data, list); -+ -+ memset(&wrqu, 0, sizeof(wrqu)); -+ wrqu.data.length = buf_len; -+ wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf); + isp_ccdc->ccdc_inuse = 0; + ispccdc_config_crop(isp_ccdc, 0, 0, 0, 0); + mutex_init(&isp_ccdc->mutexlock); + +- if (is_isplsc_activated()) { +- isp_ccdc->lsc_gain_table_tmp = kmalloc(LSC_TABLE_INIT_SIZE, +- GFP_KERNEL | GFP_DMA); +- memset(isp_ccdc->lsc_gain_table_tmp, 0x40, LSC_TABLE_INIT_SIZE); +- isp_ccdc->lsc_config.initial_x = 0; +- isp_ccdc->lsc_config.initial_y = 0; +- isp_ccdc->lsc_config.gain_mode_n = 0x6; +- isp_ccdc->lsc_config.gain_mode_m = 0x6; +- isp_ccdc->lsc_config.gain_format = 0x4; +- isp_ccdc->lsc_config.offset = 0x60; +- isp_ccdc->lsc_config.size = LSC_TABLE_INIT_SIZE; +- isp_ccdc->lsc_enable = 1; +- } ++ isp_ccdc->update_lsc_config = 0; ++ isp_ccdc->lsc_request_enable = 1; + -+ return count; -+} ++ isp_ccdc->lsc_config.initial_x = 0; ++ isp_ccdc->lsc_config.initial_y = 0; ++ isp_ccdc->lsc_config.gain_mode_n = 0x6; ++ isp_ccdc->lsc_config.gain_mode_m = 0x6; ++ isp_ccdc->lsc_config.gain_format = 0x4; ++ isp_ccdc->lsc_config.offset = 0x60; ++ isp_ccdc->lsc_config.size = LSC_TABLE_INIT_SIZE; + -+static const struct file_operations rssisignal_ops = { -+ .read = rssisignal_read, -+ .write = rssisignal_write, -+ .open = mac80211_open_file_generic, -+}; ++ isp_ccdc->update_lsc_table = 0; ++ isp_ccdc->lsc_table_new = PTR_FREE; ++ isp_ccdc->lsc_table_inuse = iommu_vmalloc(isp->iommu, 0, ++ LSC_TABLE_INIT_SIZE, IOMMU_FLAG); ++ if (IS_ERR_VALUE(isp_ccdc->lsc_table_inuse)) ++ return -ENOMEM; ++ p = da_to_va(isp->iommu, isp_ccdc->lsc_table_inuse); ++ memset(p, 0x40, LSC_TABLE_INIT_SIZE); + - void debugfs_hw_add(struct ieee80211_local *local) - { - struct dentry *phyd = local->hw.wiphy->debugfsdir; -@@ -211,6 +269,10 @@ void debugfs_hw_add(struct ieee80211_loc - DEBUGFS_ADD(total_ps_buffered); - DEBUGFS_ADD(wep_iv); ++ isp_ccdc->shadow_update = 0; ++ spin_lock_init(&isp_ccdc->lock); -+ local->debugfs.rssisignal = debugfs_create_file("rssisignal", 0600, -+ phyd, -+ local, &rssisignal_ops); -+ - statsd = debugfs_create_dir("statistics", phyd); - local->debugfs.statistics = statsd; + return 0; + } +@@ -1531,10 +1499,9 @@ void isp_ccdc_cleanup(struct device *dev + struct isp_device *isp = dev_get_drvdata(dev); + struct isp_ccdc_device *isp_ccdc = &isp->isp_ccdc; -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/ieee80211_i.h linux-omap-2.6.28-nokia1/net/mac80211/ieee80211_i.h ---- linux-omap-2.6.28-omap1/net/mac80211/ieee80211_i.h 2011-06-22 13:14:26.043067636 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/ieee80211_i.h 2011-06-22 13:19:33.283063268 +0200 -@@ -107,6 +107,7 @@ struct ieee80211_bss { - * otherwise, you probably don't want to use them. */ - int has_erp_value; - u8 erp_value; -+ bool hold; - }; +- if (is_isplsc_activated()) { +- ispccdc_free_lsc(isp_ccdc); +- kfree(isp_ccdc->lsc_gain_table_tmp); +- } ++ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_inuse); ++ if (isp_ccdc->lsc_table_new != PTR_FREE) ++ iommu_vfree(isp->iommu, isp_ccdc->lsc_table_new); - static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss) -@@ -274,7 +275,6 @@ struct mesh_config { - u16 dot11MeshHWMPnetDiameterTraversalTime; - }; + if (isp_ccdc->fpc_table_add_m != 0) { + iommu_kunmap(isp->iommu, isp_ccdc->fpc_table_add_m); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/ispccdc.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/ispccdc.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/ispccdc.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/ispccdc.h 2011-09-04 11:37:54.000000000 +0200 +@@ -24,8 +24,6 @@ + + #include +-#define is_isplsc_activated() 1 - - /* flags used in struct ieee80211_if_sta.flags */ - #define IEEE80211_STA_SSID_SET BIT(0) - #define IEEE80211_STA_BSSID_SET BIT(1) -@@ -314,6 +314,8 @@ enum ieee80211_sta_mlme_state { - struct ieee80211_if_sta { - struct timer_list timer; - struct work_struct work; -+ struct work_struct beacon_loss_work; -+ - u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; - u8 ssid[IEEE80211_MAX_SSID_LEN]; - enum ieee80211_sta_mlme_state state; -@@ -339,6 +341,7 @@ struct ieee80211_if_sta { - unsigned long request; + /* Enumeration constants for CCDC input output format */ + enum ccdc_input { + CCDC_RAW, +@@ -152,6 +150,14 @@ struct ispccdc_refmt { + * @syncif_ipmod: Image + * @obclamp_en: Data input format. + * @mutexlock: Mutex used to get access to the CCDC. ++ * @update_lsc_config: Set when user changes lsc_config ++ * @lsc_request_enable: Whether LSC is requested to be enabled ++ * @lsc_config: LSC config set by user ++ * @update_lsc_table: Set when user provides a new LSC table to lsc_table_new ++ * @lsc_table_new: LSC table set by user, ISP address ++ * @lsc_table_inuse: LSC table currently in use, ISP address ++ * @shadow_update: non-zero when user is updating CCDC configuration ++ * @lock: serializes shadow_update with interrupt handler + */ + struct isp_ccdc_device { + u8 ccdc_inuse; +@@ -166,102 +172,39 @@ struct isp_ccdc_device { + u8 ccdcslave; + u8 syncif_ipmod; + u8 obclamp_en; +- u8 lsc_enable; +- u8 lsc_initialized; +- int lsc_state; + struct mutex mutexlock; /* For checking/modifying ccdc_inuse */ + u32 wenlog; +- u8 *lsc_gain_table_tmp; +- unsigned long lsc_ispmmu_addr; +- u8 *lsc_gain_table; +- struct ispccdc_lsc_config lsc_config; + unsigned long fpc_table_add_m; + u32 *fpc_table_add; +-}; +- +-int ispccdc_request(struct isp_ccdc_device *isp_ccdc); +- +-int ispccdc_free(struct isp_ccdc_device *isp_ccdc); +- +-void ispccdc_config_crop(struct isp_ccdc_device *isp_ccdc, u32 left, u32 top, +- u32 height, u32 width); +- +-void ispccdc_config_sync_if(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_syncif syncif); +- +-int ispccdc_config_black_clamp(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_bclamp bclamp); +- +-void ispccdc_enable_black_clamp(struct isp_ccdc_device *isp_ccdc, u8 enable); +- +-int ispccdc_config_fpc(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_fpc fpc); +- +-void ispccdc_enable_fpc(struct isp_ccdc_device *isp_ccdc, u8 enable); - unsigned long last_probe; -+ unsigned long last_beacon; +-void ispccdc_config_black_comp(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_blcomp blcomp); +- +-void ispccdc_config_vp(struct isp_ccdc_device *isp_ccdc, struct ispccdc_vp vp); +- +-void ispccdc_enable_vp(struct isp_ccdc_device *isp_ccdc, u8 enable); +- +-void ispccdc_config_reformatter(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_refmt refmt); +- +-void ispccdc_enable_reformatter(struct isp_ccdc_device *isp_ccdc, u8 enable); +- +-void ispccdc_config_culling(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_culling culling); +- +-void ispccdc_enable_lpf(struct isp_ccdc_device *isp_ccdc, u8 enable); +- +-void ispccdc_config_alaw(struct isp_ccdc_device *isp_ccdc, +- enum alaw_ipwidth ipwidth); +- +-void ispccdc_enable_alaw(struct isp_ccdc_device *isp_ccdc, u8 enable); +- +-int ispccdc_load_lsc(struct isp_ccdc_device *isp_ccdc, u8 *table_addr, +- u32 table_size); +- +-void ispccdc_config_lsc(struct isp_ccdc_device *isp_ccdc, +- struct ispccdc_lsc_config *lsc_cfg); ++ /* LSC related fields */ ++ u8 update_lsc_config; ++ u8 lsc_request_enable; ++ struct ispccdc_lsc_config lsc_config; ++ u8 update_lsc_table; ++ u32 lsc_table_new; ++ u32 lsc_table_inuse; - unsigned int flags; +-void ispccdc_enable_lsc(struct isp_ccdc_device *isp_ccdc, u8 enable); ++ int shadow_update; ++ spinlock_t lock; ++}; -@@ -351,6 +354,10 @@ struct ieee80211_if_sta { - u32 supp_rates_bits[IEEE80211_NUM_BANDS]; + void ispccdc_lsc_error_handler(struct isp_ccdc_device *isp_ccdc); +- +-void ispccdc_config_imgattr(struct isp_ccdc_device *isp_ccdc, u32 colptn); +- +-void ispccdc_config_shadow_registers(struct isp_ccdc_device *isp_ccdc); +- ++int ispccdc_set_outaddr(struct isp_ccdc_device *isp_ccdc, u32 addr); ++void ispccdc_set_wenlog(struct isp_ccdc_device *isp_ccdc, u32 wenlog); + int ispccdc_try_pipeline(struct isp_ccdc_device *isp_ccdc, + struct isp_pipeline *pipe); +- + int ispccdc_s_pipeline(struct isp_ccdc_device *isp_ccdc, + struct isp_pipeline *pipe); +- +-int ispccdc_config_outlineoffset(struct isp_ccdc_device *isp_ccdc, u32 offset, +- u8 oddeven, u8 numlines); +- +-int ispccdc_set_outaddr(struct isp_ccdc_device *isp_ccdc, u32 addr); +- + void ispccdc_enable(struct isp_ccdc_device *isp_ccdc, u8 enable); +- + int ispccdc_sbl_busy(void *_isp_ccdc); +- + int ispccdc_busy(struct isp_ccdc_device *isp_ccdc); +- ++void ispccdc_config_shadow_registers(struct isp_ccdc_device *isp_ccdc); ++int ispccdc_config(struct isp_ccdc_device *isp_ccdc, ++ void *userspace_add); ++int ispccdc_request(struct isp_ccdc_device *isp_ccdc); ++int ispccdc_free(struct isp_ccdc_device *isp_ccdc); + void ispccdc_save_context(struct device *dev); +- + void ispccdc_restore_context(struct device *dev); - int wmm_last_param_set; -+ -+ int num_beacons; /* number of TXed beacon frames by this STA */ -+ unsigned int roam_threshold_count; -+ enum ieee80211_rssi_state rssi_state; +-void ispccdc_print_status(struct isp_ccdc_device *isp_ccdc, +- struct isp_pipeline *pipe); +- +-int omap34xx_isp_ccdc_config(struct isp_ccdc_device *isp_ccdc, +- void *userspace_add); +- +-void ispccdc_set_wenlog(struct isp_ccdc_device *isp_ccdc, u32 wenlog); +- + #endif /* OMAP_ISP_CCDC_H */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isp.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isp.h 2011-09-04 11:37:54.000000000 +0200 +@@ -249,6 +249,8 @@ struct isp_bufs { + int done; + /* Wait for this many hs_vs before anything else. */ + int wait_hs_vs; ++ /* Ignore statistic's interrupts until first good hs_vs. */ ++ int wait_stats; }; - struct ieee80211_if_mesh { -@@ -570,6 +577,11 @@ enum { - IEEE80211_ADDBA_MSG = 4, + /** +@@ -361,7 +363,7 @@ struct isp_device { + struct iommu *iommu; }; -+enum queue_stop_reason { -+ IEEE80211_QUEUE_STOP_REASON_DRIVER, -+ IEEE80211_QUEUE_STOP_REASON_PS, -+}; -+ - /* maximum number of hardware queues we support. */ - #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES) - -@@ -586,7 +598,8 @@ struct ieee80211_local { - const struct ieee80211_ops *ops; +-void omap34xx_isp_hist_dma_done(struct device *dev); ++void isp_hist_dma_done(struct device *dev); - unsigned long queue_pool[BITS_TO_LONGS(QD_MAX_QUEUES)]; -- -+ unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; -+ spinlock_t queue_stop_reason_lock; - struct net_device *mdev; /* wmaster# - "master" 802.11 device */ - int open_count; - int monitors, cooked_mntrs; -@@ -722,6 +735,12 @@ struct ieee80211_local { - int wifi_wme_noack_test; - unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ + void isp_flush(struct device *dev); -+ bool powersave; -+ int dynamic_ps_timeout; -+ struct work_struct dynamic_ps_enable_work; -+ struct work_struct dynamic_ps_disable_work; -+ struct timer_list dynamic_ps_timer; -+ - #ifdef CONFIG_MAC80211_DEBUGFS - struct local_debugfsdentries { - struct dentry *rcdir; -@@ -735,6 +754,7 @@ struct ieee80211_local { - struct dentry *long_retry_limit; - struct dentry *total_ps_buffered; - struct dentry *wep_iv; -+ struct dentry *rssisignal; - struct dentry *statistics; - struct local_debugfsdentries_statsdentries { - struct dentry *transmitted_fragment_count; -@@ -1014,6 +1034,18 @@ int ieee80211_set_freq(struct ieee80211_ - u64 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isphist.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isphist.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isphist.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isphist.c 2011-09-04 11:37:54.000000000 +0200 +@@ -229,6 +229,7 @@ void isp_hist_config_registers(struct is + isp_hist_dma_config(isp_hist); -+void ieee80211_dynamic_ps_enable_work(struct work_struct *work); -+void ieee80211_dynamic_ps_disable_work(struct work_struct *work); -+void ieee80211_dynamic_ps_timer(unsigned long data); -+void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, -+ struct ieee80211_hdr *hdr); -+void ieee80211_beacon_loss_work(struct work_struct *work); -+ -+void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, -+ enum queue_stop_reason reason); -+void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, -+ enum queue_stop_reason reason); -+ - #ifdef CONFIG_MAC80211_NOINLINE - #define debug_noinline noinline - #else -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/iface.c linux-omap-2.6.28-nokia1/net/mac80211/iface.c ---- linux-omap-2.6.28-omap1/net/mac80211/iface.c 2011-06-22 13:14:26.123067635 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/iface.c 2011-06-22 13:19:33.283063268 +0200 -@@ -154,6 +154,16 @@ static int ieee80211_open(struct net_dev - goto err_del_bss; - need_hw_reconfig = 1; - ieee80211_led_radio(local, local->hw.conf.radio_enabled); -+ /* -+ * In the case of IBSS whenever interface is restarted, random -+ * BSSID should be generated when creating ad-hoc network with -+ * same ssid. As the bss_list is not cleared anywhere, it uses -+ * previously cached random BSSID. This is a workaround to -+ * clear bss_list during interface down and initialize during -+ * interface up. -+ */ -+ -+ ieee80211_rx_bss_list_init(local); + isp_hist->update = 0; ++ isp_hist->stat.config_counter++; + spin_unlock_irqrestore(&isp_hist->lock, irqflags); + + isp_hist_print_status(isp_hist); +@@ -251,7 +252,7 @@ static void isp_hist_dma_cb(int lch, u16 + isp_reg_and(dev, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, + ~ISPHIST_CNT_CLR_EN); + if (!ret) +- omap34xx_isp_hist_dma_done(dev); ++ isp_hist_dma_done(dev); } + isp_hist->waiting_dma = 0; + } +@@ -568,8 +569,8 @@ static void isp_hist_update_params(struc + * + * Returns 0 on success configuration. + **/ +-int omap34xx_isp_hist_config(struct isp_hist_device *isp_hist, +- struct isp_hist_config *histcfg) ++int isp_hist_config(struct isp_hist_device *isp_hist, ++ struct isp_hist_config *histcfg) + { + struct device *dev = to_device(isp_hist); + unsigned long irqflags; +@@ -632,15 +633,15 @@ int omap34xx_isp_hist_config(struct isp_ + } - /* -@@ -434,6 +444,8 @@ static int ieee80211_stop(struct net_dev - * it no longer is. - */ - cancel_work_sync(&sdata->u.sta.work); -+ cancel_work_sync(&sdata->u.sta.beacon_loss_work); -+ - /* - * When we get here, the interface is marked down. - * Call synchronize_rcu() to wait for the RX path -@@ -499,6 +511,11 @@ static int ieee80211_stop(struct net_dev - local->ops->stop(local_to_hw(local)); + /** +- * omap34xx_isp_hist_request_statistics - Request statistics in Histogram. ++ * isp_hist_request_statistics - Request statistics in Histogram. + * @histdata: Pointer to data structure. + * + * This API allows the user to request for histogram statistics. + * + * Returns 0 on successful request. + **/ +-int omap34xx_isp_hist_request_statistics(struct isp_hist_device *isp_hist, +- struct isp_hist_data *histdata) ++int isp_hist_request_statistics(struct isp_hist_device *isp_hist, ++ struct isp_hist_data *histdata) + { + struct device *dev = to_device(isp_hist); + struct ispstat_buffer *buf; +@@ -718,17 +719,17 @@ void isp_hist_cleanup(struct device *dev + } - ieee80211_led_radio(local, 0); -+ /* -+ * Clear the bss_list so that random BSSID is generated when -+ * creating ad-hoc network with same bssid. -+ */ -+ ieee80211_rx_bss_list_deinit(local); + /** +- * isphist_save_context - Saves the values of the histogram module registers. ++ * isp_hist_save_context - Saves the values of the histogram module registers. + **/ +-void isphist_save_context(struct device *dev) ++void isp_hist_save_context(struct device *dev) + { + isp_save_context(dev, isphist_reg_list); + } + + /** +- * isphist_restore_context - Restores the values of the histogram module regs. ++ * isp_hist_restore_context - Restores the values of the histogram module regs. + **/ +-void isphist_restore_context(struct device *dev) ++void isp_hist_restore_context(struct device *dev) + { + isp_restore_context(dev, isphist_reg_list); + } +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isphist.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isphist.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isphist.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isphist.h 2011-09-04 11:37:54.000000000 +0200 +@@ -149,11 +149,11 @@ void isp_hist_mark_invalid_buf(struct is + void isp_hist_config_registers(struct isp_hist_device *isp_hist); + void isp_hist_suspend(struct isp_hist_device *isp_hist); + void isp_hist_resume(struct isp_hist_device *isp_hist); +-void isphist_save_context(struct device *dev); +-void isphist_restore_context(struct device *dev); +-int omap34xx_isp_hist_config(struct isp_hist_device *isp_hist, +- struct isp_hist_config *histcfg); +-int omap34xx_isp_hist_request_statistics(struct isp_hist_device *isp_hist, +- struct isp_hist_data *histdata); ++void isp_hist_save_context(struct device *dev); ++void isp_hist_restore_context(struct device *dev); ++int isp_hist_config(struct isp_hist_device *isp_hist, ++ struct isp_hist_config *histcfg); ++int isp_hist_request_statistics(struct isp_hist_device *isp_hist, ++ struct isp_hist_data *histdata); + + #endif /* OMAP_ISP_HIST */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isph3a.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isph3a.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isph3a.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isph3a.c 2011-09-04 11:37:54.000000000 +0200 +@@ -194,6 +194,7 @@ void isph3a_aewb_config_registers(struct + + ispstat_bufs_set_size(&isp_h3a->stat, isp_h3a->buf_size); + isp_h3a->update = 0; ++ isp_h3a->stat.config_counter++; + + spin_unlock_irqrestore(isp_h3a->lock, irqflags); + } +@@ -404,14 +405,14 @@ static void isph3a_aewb_set_params(struc + } - flush_workqueue(local->hw.workqueue); + /** +- * omap34xx_isph3a_aewb_config - Configure AEWB regs, enable/disable H3A engine. ++ * isph3a_aewb_config - Configure AEWB regs, enable/disable H3A engine. + * @aewbcfg: Pointer to AEWB config structure. + * + * Returns 0 if successful, -EINVAL if aewbcfg pointer is NULL, -ENOMEM if + * was unable to allocate memory for the buffer, of other errors if H3A + * callback is not set or the parameters for AEWB are invalid. + **/ +-int omap34xx_isph3a_aewb_config(struct isp_h3a_device *isp_h3a, ++int isph3a_aewb_config(struct isp_h3a_device *isp_h3a, + struct isph3a_aewb_config *aewbcfg) + { + struct device *dev = to_device(isp_h3a); +@@ -454,7 +455,7 @@ int omap34xx_isph3a_aewb_config(struct i -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/main.c linux-omap-2.6.28-nokia1/net/mac80211/main.c ---- linux-omap-2.6.28-omap1/net/mac80211/main.c 2011-06-22 13:14:26.123067635 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/main.c 2011-06-22 13:19:33.283063268 +0200 -@@ -342,9 +342,12 @@ void ieee80211_bss_info_change_notify(st + return 0; + } +-EXPORT_SYMBOL(omap34xx_isph3a_aewb_config); ++EXPORT_SYMBOL(isph3a_aewb_config); - u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) + /** + * isph3a_aewb_request_statistics - REquest statistics and update gains in AEWB +@@ -467,8 +468,8 @@ EXPORT_SYMBOL(omap34xx_isph3a_aewb_confi + * Returns 0 if successful, -EINVAL when H3A engine is not enabled, or other + * errors when setting gains. + **/ +-int omap34xx_isph3a_aewb_request_statistics(struct isp_h3a_device *isp_h3a, +- struct isph3a_aewb_data *aewbdata) ++int isph3a_aewb_request_statistics(struct isp_h3a_device *isp_h3a, ++ struct isph3a_aewb_data *aewbdata) { -- sdata->bss_conf.use_cts_prot = 0; -- sdata->bss_conf.use_short_preamble = 0; -- return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; -+ sdata->bss_conf.use_cts_prot = false; -+ sdata->bss_conf.use_short_preamble = false; -+ sdata->bss_conf.use_short_slot = false; -+ return BSS_CHANGED_ERP_CTS_PROT | -+ BSS_CHANGED_ERP_PREAMBLE | -+ BSS_CHANGED_ERP_SLOT; + struct device *dev = to_device(isp_h3a); + unsigned long irqflags; +@@ -517,7 +518,7 @@ int omap34xx_isph3a_aewb_request_statist + + return ret; } +-EXPORT_SYMBOL(omap34xx_isph3a_aewb_request_statistics); ++EXPORT_SYMBOL(isph3a_aewb_request_statistics); + + /** + * isph3a_aewb_init - Module Initialisation. +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isph3a.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isph3a.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isph3a.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isph3a.h 2011-09-04 11:37:54.000000000 +0200 +@@ -130,11 +130,11 @@ struct isp_h3a_device { + struct ispstat stat; + }; + +-int omap34xx_isph3a_aewb_config(struct isp_h3a_device *isp_h3a, +- struct isph3a_aewb_config *aewbcfg); ++int isph3a_aewb_config(struct isp_h3a_device *isp_h3a, ++ struct isph3a_aewb_config *aewbcfg); + +-int omap34xx_isph3a_aewb_request_statistics(struct isp_h3a_device *isp_h3a, +- struct isph3a_aewb_data *aewbdata); ++int isph3a_aewb_request_statistics(struct isp_h3a_device *isp_h3a, ++ struct isph3a_aewb_data *aewbdata); - void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, -@@ -765,8 +768,17 @@ struct ieee80211_hw *ieee80211_alloc_hw( + void isph3a_save_context(struct device *dev); - spin_lock_init(&local->key_lock); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isppreview.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isppreview.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isppreview.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isppreview.c 2011-09-04 11:37:54.000000000 +0200 +@@ -184,17 +184,16 @@ static u32 luma_enhance_table[] = { + #include "luma_enhance_table.h" + }; -+ spin_lock_init(&local->queue_stop_reason_lock); -+ - INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); +-static int omap34xx_isp_tables_update(struct isp_prev_device *isp_prev, +- struct isptables_update *isptables_struct); ++static int isppreview_tables_update(struct isp_prev_device *isp_prev, ++ struct isptables_update *isptables_struct); -+ INIT_WORK(&local->dynamic_ps_enable_work, -+ ieee80211_dynamic_ps_enable_work); -+ INIT_WORK(&local->dynamic_ps_disable_work, -+ ieee80211_dynamic_ps_disable_work); -+ setup_timer(&local->dynamic_ps_timer, -+ ieee80211_dynamic_ps_timer, (unsigned long) local); -+ - sta_info_init(local); - tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/mlme.c linux-omap-2.6.28-nokia1/net/mac80211/mlme.c ---- linux-omap-2.6.28-omap1/net/mac80211/mlme.c 2011-06-22 13:14:26.153067636 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/mlme.c 2011-06-22 13:19:33.283063268 +0200 -@@ -34,7 +34,6 @@ - #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) - #define IEEE80211_ASSOC_MAX_TRIES 3 - #define IEEE80211_MONITORING_INTERVAL (2 * HZ) --#define IEEE80211_PROBE_INTERVAL (60 * HZ) - #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) - #define IEEE80211_SCAN_INTERVAL (2 * HZ) - #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) -@@ -45,6 +44,8 @@ + /** +- * omap34xx_isp_preview_config - Abstraction layer Preview configuration. ++ * isppreview_config - Abstraction layer Preview configuration. + * @userspace_add: Pointer from Userspace to structure with flags and data to + * update. + **/ +-int omap34xx_isp_preview_config(struct isp_prev_device *isp_prev, +- void *userspace_add) ++int isppreview_config(struct isp_prev_device *isp_prev, void *userspace_add) + { + struct isp_device *isp = to_isp_device(isp_prev); + struct device *dev = to_device(isp_prev); +@@ -348,32 +347,26 @@ out_config_shadow: + isp_table_update.prev_cfa = config->prev_cfa; + isp_table_update.prev_wbal = config->prev_wbal; - #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 +- if (omap34xx_isp_tables_update(isp_prev, &isp_table_update)) ++ if (isppreview_tables_update(isp_prev, &isp_table_update)) + goto err_copy_from_user; -+#define RSSI_HIGHSIGNAL "HIGHSIGNAL" -+#define RSSI_LOWSIGNAL "LOWSIGNAL" +- spin_lock_irqsave(&isp_prev->lock, flags); + isp_prev->shadow_update = 0; +- spin_unlock_irqrestore(&isp_prev->lock, flags); +- + return 0; - /* utils */ - static int ecw2cw(int ecw) -@@ -568,9 +569,8 @@ static void ieee80211_sta_wmm_params(str - } + err_copy_from_user: +- spin_lock_irqsave(&isp_prev->lock, flags); + isp_prev->shadow_update = 0; +- spin_unlock_irqrestore(&isp_prev->lock, flags); +- + dev_err(dev, "preview: Config: Copy From User Error\n"); + return -EFAULT; } +-EXPORT_SYMBOL_GPL(omap34xx_isp_preview_config); ++EXPORT_SYMBOL_GPL(isppreview_config); --static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, -- bool use_protection, -- bool use_short_preamble) -+static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, -+ u16 capab, bool erp_valid, u8 erp) + /** +- * omap34xx_isp_tables_update - Abstraction layer Tables update. ++ * isppreview_tables_update - Abstraction layer Tables update. + * @isptables_struct: Pointer from Userspace to structure with flags and table + * data to update. + **/ +-static int omap34xx_isp_tables_update(struct isp_prev_device *isp_prev, +- struct isptables_update *isptables_struct) ++static int isppreview_tables_update(struct isp_prev_device *isp_prev, ++ struct isptables_update *isptables_struct) { - struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; - #ifdef CONFIG_MAC80211_VERBOSE_DEBUG -@@ -578,6 +578,18 @@ static u32 ieee80211_handle_protect_prea - DECLARE_MAC_BUF(mac); - #endif - u32 changed = 0; -+ bool use_protection; -+ bool use_short_preamble; -+ bool use_short_slot; -+ -+ if (erp_valid) { -+ use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; -+ use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; -+ } else { -+ use_protection = false; -+ use_short_preamble = !!(capab & WLAN_CAPABILITY_SHORT_PREAMBLE); -+ } -+ use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); + struct device *dev = to_device(isp_prev); - if (use_protection != bss_conf->use_cts_prot) { - #ifdef CONFIG_MAC80211_VERBOSE_DEBUG -@@ -607,30 +619,18 @@ static u32 ieee80211_handle_protect_prea - changed |= BSS_CHANGED_ERP_PREAMBLE; - } +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isppreview.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isppreview.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/isppreview.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/isppreview.h 2011-09-04 11:37:54.000000000 +0200 +@@ -419,8 +419,7 @@ void isppreview_restore_context(struct d + static inline void isppreview_restore_context(struct device *dev) {} + #endif -- return changed; --} -- --static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata, -- u8 erp_value) --{ -- bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; -- bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0; -- -- return ieee80211_handle_protect_preamb(sdata, -- use_protection, use_short_preamble); --} -- --static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, -- struct ieee80211_bss *bss) --{ -- u32 changed = 0; -- -- if (bss->has_erp_value) -- changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value); -- else { -- u16 capab = bss->capability; -- changed |= ieee80211_handle_protect_preamb(sdata, false, -- (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0); -+ if (use_short_slot != bss_conf->use_short_slot) { -+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG -+ if (net_ratelimit()) { -+ printk(KERN_DEBUG "%s: switched to %s slot" -+ " (BSSID=%s)\n", -+ sdata->dev->name, -+ use_short_slot ? "short" : "long", -+ ifsta->bssid); -+ } -+#endif -+ bss_conf->use_short_slot = use_short_slot; -+ changed |= BSS_CHANGED_ERP_SLOT; - } +-int omap34xx_isp_preview_config(struct isp_prev_device *isp_prev, +- void *userspace_add); ++int isppreview_config(struct isp_prev_device *isp_prev, void *userspace_add); - return changed; -@@ -723,7 +723,10 @@ static void ieee80211_set_associated(str - sdata->bss_conf.timestamp = bss->timestamp; - sdata->bss_conf.dtim_period = bss->dtim_period; + void isppreview_set_skip(struct isp_prev_device *isp_prev, u32 h, u32 v); -- changed |= ieee80211_handle_bss_capability(sdata, bss); -+ changed |= ieee80211_handle_bss_capability(sdata, -+ bss->capability, bss->has_erp_value, bss->erp_value); -+ -+ bss->hold = true; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/ispreg.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/ispreg.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/isp/ispreg.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/isp/ispreg.h 2011-09-04 11:37:54.000000000 +0200 +@@ -1275,6 +1275,7 @@ - ieee80211_rx_bss_put(local, bss); - } -@@ -739,6 +742,9 @@ static void ieee80211_set_associated(str - memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); - ieee80211_sta_send_associnfo(sdata, ifsta); + #define ISPHIST_HV_INFO_MASK 0x3FFF3FFF -+ ifsta->rssi_state = IEEE80211_RSSI_STATE_HIGH; -+ ifsta->roam_threshold_count = 0; ++#define ISPCCDC_LSC_ENABLE 1 + #define ISPCCDC_LSC_GAIN_MODE_N_MASK 0x700 + #define ISPCCDC_LSC_GAIN_MODE_N_SHIFT 8 + #define ISPCCDC_LSC_GAIN_MODE_M_MASK 0x3800 +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/smia-sensor.c kernel-2.6.28-20094803.3+0m5/drivers/media/video/smia-sensor.c +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/smia-sensor.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/smia-sensor.c 2011-09-04 11:37:54.000000000 +0200 +@@ -38,7 +38,17 @@ + #include "smia-sensor.h" + + #define DEFAULT_XCLK 9600000 /* [Hz] */ +-#define DEFAULT_EXPOSURE 33946 /* [us] */ + - ifsta->last_probe = jiffies; - ieee80211_led_assoc(local, 1); - -@@ -751,6 +757,16 @@ static void ieee80211_set_associated(str - changed |= BSS_CHANGED_BASIC_RATES; - ieee80211_bss_info_change_notify(sdata, changed); - -+ if (local->powersave) { -+ if (local->dynamic_ps_timeout > 0) -+ mod_timer(&local->dynamic_ps_timer, jiffies + -+ msecs_to_jiffies(local->dynamic_ps_timeout)); -+ else { -+ conf->flags |= IEEE80211_CONF_PS; -+ ieee80211_hw_config(local); -+ } -+ } ++#define SMIA_CTRL_GAIN 0 ++#define SMIA_CTRL_EXPOSURE 1 ++#define SMIA_NCTRLS 2 + - netif_tx_start_all_queues(sdata->dev); - netif_carrier_on(sdata->dev); - -@@ -767,6 +783,8 @@ static void ieee80211_direct_probe(struc - printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n", - sdata->dev->name, print_mac(mac, ifsta->bssid)); - ifsta->state = IEEE80211_STA_MLME_DISABLED; -+ ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; -+ ieee80211_sta_send_apinfo(sdata, ifsta); - return; - } - -@@ -799,6 +817,8 @@ static void ieee80211_authenticate(struc - " timed out\n", - sdata->dev->name, print_mac(mac, ifsta->bssid)); - ifsta->state = IEEE80211_STA_MLME_DISABLED; -+ ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; -+ ieee80211_sta_send_apinfo(sdata, ifsta); - return; - } - -@@ -816,6 +836,8 @@ static void ieee80211_set_disassoc(struc - bool self_disconnected, u16 reason) - { - struct ieee80211_local *local = sdata->local; -+ struct ieee80211_conf *conf = &local_to_hw(local)->conf; -+ struct ieee80211_bss *bss; - struct sta_info *sta; - u32 changed = BSS_CHANGED_ASSOC; ++#define CID_TO_CTRL(id) ((id) == V4L2_CID_GAIN ? SMIA_CTRL_GAIN : \ ++ (id) == V4L2_CID_EXPOSURE ? \ ++ SMIA_CTRL_EXPOSURE : \ ++ -EINVAL) ++ ++#define VS6555_RESET_SHIFT_HACK 1 -@@ -839,6 +861,15 @@ static void ieee80211_set_disassoc(struc + /* Register definitions */ - ieee80211_sta_tear_down_BA_sessions(sdata, sta->sta.addr); +@@ -53,6 +63,50 @@ + #define REG_COARSE_EXPOSURE 0x0202 + #define REG_ANALOG_GAIN 0x0204 -+ bss = ieee80211_rx_bss_get(local, ifsta->bssid, -+ conf->channel->center_freq, -+ ifsta->ssid, ifsta->ssid_len); ++struct smia_sensor; + -+ if (bss) { -+ bss->hold = false; -+ ieee80211_rx_bss_put(local, bss); -+ } ++struct smia_sensor_type { ++ u8 manufacturer_id; ++ u16 model_id; ++ char *name; ++ int ev_table_size; ++ u16 *ev_table; ++}; + - if (self_disconnected) { - if (deauth) - ieee80211_send_deauth_disassoc(sdata, -@@ -870,6 +901,14 @@ static void ieee80211_set_disassoc(struc - - rcu_read_unlock(); ++/* Current values for V4L2 controls */ ++struct smia_control { ++ s32 minimum; ++ s32 maximum; ++ s32 step; ++ s32 default_value; ++ s32 value; ++ int (*set)(struct smia_sensor *, s32 value); ++}; ++ ++struct smia_sensor { ++ struct i2c_client *i2c_client; ++ struct i2c_driver driver; ++ ++ /* Sensor information */ ++ struct smia_sensor_type *type; ++ u8 revision_number; ++ u8 smia_version; ++ ++ /* V4L2 current control values */ ++ struct smia_control controls[SMIA_NCTRLS]; ++ ++ struct smia_reglist *current_reglist; ++ struct v4l2_int_device *v4l2_int_device; ++ struct v4l2_fract timeperframe; ++ ++ struct smia_sensor_platform_data *platform_data; ++ ++ const struct firmware *fw; ++ struct smia_meta_reglist *meta_reglist; ++ ++ enum v4l2_power power; ++}; ++ + static int smia_ioctl_queryctrl(struct v4l2_int_device *s, + struct v4l2_queryctrl *a); + static int smia_ioctl_g_ctrl(struct v4l2_int_device *s, +@@ -77,6 +131,106 @@ static int smia_ioctl_enum_frameinterval + struct v4l2_frmivalenum *frm); + static int smia_ioctl_dev_init(struct v4l2_int_device *s); -+ del_timer_sync(&local->dynamic_ps_timer); -+ cancel_work_sync(&local->dynamic_ps_enable_work); ++/* SMIA-model gain is stored in precalculated tables here. In the model, ++ * reg = (c0-gain*c1) / (gain*m1-m0) ++ * gain = 2^ev ++ * The constants c0, m0, c1 and m1 depend on sensor. ++ */ + -+ if (local->hw.conf.flags & IEEE80211_CONF_PS) { -+ local->hw.conf.flags &= ~IEEE80211_CONF_PS; -+ ieee80211_hw_config(local); -+ } ++/* Analog gain table for VS6555. ++ * m0 = 0 ++ * c0 = 256 ++ * m1 = -1 (erroneously -16 in silicon) ++ * c1 = 256 ++ * step = 16 ++ */ ++static u16 smia_gain_vs6555[] = { ++/* reg EV gain */ ++ 0, /* 0.0 1.00000 */ ++ 16, /* 0.1 1.07177 */ ++ 32, /* 0.2 1.14870 */ ++ 48, /* 0.3 1.23114 */ ++ 64, /* 0.4 1.31951 */ ++ 80, /* 0.5 1.41421 */ ++ 80, /* 0.6 1.51572 */ ++ 96, /* 0.7 1.62450 */ ++ 112, /* 0.8 1.74110 */ ++ 112, /* 0.9 1.86607 */ ++ 128, /* 1.0 2.00000 */ ++ 144, /* 1.1 2.14355 */ ++ 144, /* 1.2 2.29740 */ ++ 160, /* 1.3 2.46229 */ ++ 160, /* 1.4 2.63902 */ ++ 160, /* 1.5 2.82843 */ ++ 176, /* 1.6 3.03143 */ ++ 176, /* 1.7 3.24901 */ ++ 176, /* 1.8 3.48220 */ ++ 192, /* 1.9 3.73213 */ ++ 192, /* 2.0 4.00000 */ ++ 192, /* 2.1 4.28709 */ ++ 208, /* 2.2 4.59479 */ ++ 208, /* 2.3 4.92458 */ ++ 208, /* 2.4 5.27803 */ ++ 208, /* 2.5 5.65685 */ ++ 208, /* 2.6 6.06287 */ ++ 224, /* 2.7 6.49802 */ ++ 224, /* 2.8 6.96440 */ ++ 224, /* 2.9 7.46426 */ ++ 224, /* 3.0 8.00000 */ ++ 224, /* 3.1 8.57419 */ ++ 224, /* 3.2 9.18959 */ ++ 224, /* 3.3 9.84916 */ ++ 224, /* 3.4 10.55606 */ ++ 240, /* 3.5 11.31371 */ ++ 240, /* 3.6 12.12573 */ ++ 240, /* 3.7 12.99604 */ ++ 240, /* 3.8 13.92881 */ ++ 240, /* 3.9 14.92853 */ ++ 240, /* 4.0 16.00000 */ ++}; + - sta_info_destroy(sta); - } ++/* Analog gain table for TCM8330MD. ++ * m0 = 1 ++ * c0 = 0 ++ * m1 = 0 ++ * c1 = 36 (MMS uses 29) ++ * step = 1 ++ */ ++static u16 smia_gain_tcm8330md[] = { ++/* reg EV gain */ ++ 36, /* 0.0 1.00000 */ ++ 39, /* 0.1 1.07177 */ ++ 41, /* 0.2 1.14870 */ ++ 44, /* 0.3 1.23114 */ ++ 48, /* 0.4 1.31951 */ ++ 51, /* 0.5 1.41421 */ ++ 55, /* 0.6 1.51572 */ ++ 58, /* 0.7 1.62450 */ ++ 63, /* 0.8 1.74110 */ ++ 67, /* 0.9 1.86607 */ ++ 72, /* 1.0 2.00000 */ ++ 77, /* 1.1 2.14355 */ ++ 83, /* 1.2 2.29740 */ ++ 89, /* 1.3 2.46229 */ ++ 95, /* 1.4 2.63902 */ ++ 102, /* 1.5 2.82843 */ ++ 109, /* 1.6 3.03143 */ ++ 117, /* 1.7 3.24901 */ ++ 125, /* 1.8 3.48220 */ ++ 134, /* 1.9 3.73213 */ ++ 144, /* 2.0 4.00000 */ ++ 154, /* 2.1 4.28709 */ ++ 165, /* 2.2 4.59479 */ ++ 177, /* 2.3 4.92458 */ ++ 190, /* 2.4 5.27803 */ ++ 204, /* 2.5 5.65685 */ ++ 218, /* 2.6 6.06287 */ ++ 234, /* 2.7 6.49802 */ ++ 251, /* 2.8 6.96440 */ ++ 269, /* 2.9 7.46426 */ ++ 288, /* 3.0 8.00000 */ ++}; ++ + static struct v4l2_int_ioctl_desc smia_ioctl_desc[] = { + { vidioc_int_enum_fmt_cap_num, + (v4l2_int_ioctl_func *)smia_ioctl_enum_fmt_cap }, +@@ -125,13 +279,16 @@ static struct v4l2_int_device smia_int_d + }, + }; -@@ -922,6 +961,8 @@ static void ieee80211_associate(struct i - " timed out\n", - sdata->dev->name, print_mac(mac, ifsta->bssid)); - ifsta->state = IEEE80211_STA_MLME_DISABLED; -+ ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; -+ ieee80211_sta_send_apinfo(sdata, ifsta); - return; - } +-static struct { +- u8 manufacturer_id; +- u16 model_id; +- char *name; +-} smia_sensors[] = { +- { 0x01, 0x022b, "vs6555" }, +- { 0x0c, 0x208a, "tcm8330md" }, ++static struct smia_sensor_type smia_sensors[] = { ++ { 0, 0, "unknown", 0, NULL }, ++ { ++ 0x01, 0x022b, "vs6555", ++ ARRAY_SIZE(smia_gain_vs6555), smia_gain_vs6555 ++ }, ++ { ++ 0x0c, 0x208a, "tcm8330md", ++ ARRAY_SIZE(smia_gain_tcm8330md), smia_gain_tcm8330md ++ }, + }; -@@ -940,13 +981,113 @@ static void ieee80211_associate(struct i - mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); + static const __u32 smia_mode_ctrls[] = { +@@ -198,6 +355,47 @@ static int smia_exposure_rows_to_us(stru + return (smia_get_row_time(sensor) * rows + (1 << 7)) >> 8; } -+void ieee80211_rssi_changed(struct ieee80211_vif *vif, -+ enum ieee80211_rssi_state state) ++/* Called to change the V4L2 gain control value. This function ++ * rounds and clamps the given value and updates the V4L2 control value. ++ * If power is on, also updates the sensor analog gain. ++ */ ++static int smia_set_gain(struct smia_sensor *sensor, s32 gain) +{ -+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); -+ struct ieee80211_if_sta *ifsta = NULL; -+ const char *event = NULL; -+ union iwreq_data wrqu; -+ char *buf = NULL; -+ -+ ifsta = &sdata->u.sta; ++ gain = clamp(gain, ++ sensor->controls[SMIA_CTRL_GAIN].minimum, ++ sensor->controls[SMIA_CTRL_GAIN].maximum); ++ sensor->controls[SMIA_CTRL_GAIN].value = gain; + -+ if (!ifsta || ifsta->state != IEEE80211_STA_MLME_ASSOCIATED) -+ return; ++ if (sensor->power == V4L2_POWER_OFF) ++ return 0; + -+ if (ifsta->rssi_state == state) -+ return; ++ return smia_i2c_write_reg(sensor->i2c_client, ++ SMIA_REG_16BIT, REG_ANALOG_GAIN, ++ sensor->type->ev_table[gain]); ++} + -+ ifsta->rssi_state = state; ++/* Called to change the V4L2 exposure control value. This function ++ * rounds and clamps the given value and updates the V4L2 control value. ++ * If power is on, also update the sensor exposure time. ++ * exptime is in microseconds. ++ */ ++static int smia_set_exposure(struct smia_sensor *sensor, s32 exptime) ++{ ++ int exposure_rows; + -+ switch (state) { -+ case IEEE80211_RSSI_STATE_HIGH: -+ event = RSSI_HIGHSIGNAL; -+ break; -+ case IEEE80211_RSSI_STATE_LOW: -+ event = RSSI_LOWSIGNAL; -+ break; -+ default: -+ WARN_ON(1); -+ return; -+ } ++ exptime = clamp(exptime, sensor->controls[SMIA_CTRL_EXPOSURE].minimum, ++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum); + -+ buf = kstrdup(event, GFP_ATOMIC); -+ printk(KERN_DEBUG "%s: roaming signal from driver, sending %s\n", -+ sdata->dev->name, buf); -+ memset(&wrqu, 0, sizeof(wrqu)); -+ wrqu.data.length = strlen(buf); -+ wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf); -+ kfree(buf); -+} -+EXPORT_SYMBOL(ieee80211_rssi_changed); ++ exposure_rows = smia_exposure_us_to_rows(sensor, &exptime); ++ sensor->controls[SMIA_CTRL_EXPOSURE].value = exptime; + -+void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, -+ struct ieee80211_hdr *hdr) -+{ -+ struct ieee80211_local *local = sdata->local; ++ if (sensor->power == V4L2_POWER_OFF) ++ return 0; + -+ /* -+ * We can postpone the sta.timer whenever receiving unicast frames -+ * from AP because we know that the connection is working both ways -+ * at that time. But multicast frames (and hence also beacons) must -+ * be ignored here, because we need to trigger the timer during -+ * data idle periods for sending the periodical probe request to -+ * the AP. -+ */ -+ if (!is_multicast_ether_addr(hdr->addr1) && -+ !(local->hw.flags & IEEE80211_HW_BEACON_FILTER)) -+ mod_timer(&sdata->u.sta.timer, -+ jiffies + IEEE80211_MONITORING_INTERVAL); ++ return smia_i2c_write_reg(sensor->i2c_client, ++ SMIA_REG_16BIT, REG_COARSE_EXPOSURE, exposure_rows); +} + -+void ieee80211_beacon_loss_work(struct work_struct *work) + static int smia_stream_on(struct v4l2_int_device *s) + { + struct smia_sensor *sensor = s->priv; +@@ -212,6 +410,37 @@ static int smia_stream_off(struct v4l2_i + SMIA_REG_8BIT, 0x0100, 0x00); + } + ++static int smia_update_controls(struct v4l2_int_device *s) +{ -+ struct ieee80211_sub_if_data *sdata = -+ container_of(work, struct ieee80211_sub_if_data, -+ u.sta.beacon_loss_work); -+ struct ieee80211_if_sta *ifsta = &sdata->u.sta; -+ struct ieee80211_local *local = sdata->local; -+ -+ if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATED) { -+ printk(KERN_DEBUG "%s reports beacon loss when not " -+ "associated\n", sdata->dev->name); -+ return; -+ } ++ struct smia_sensor *sensor = s->priv; ++ int i; + -+ printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " -+ "- sending probe request\n", sdata->dev->name, -+ sdata->u.sta.bssid); ++ sensor->controls[SMIA_CTRL_EXPOSURE].minimum = 0; ++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum = ++ smia_exposure_rows_to_us(sensor, ++ sensor->current_reglist->mode.max_exp); ++ sensor->controls[SMIA_CTRL_EXPOSURE].step = ++ smia_exposure_rows_to_us(sensor, 1); ++ sensor->controls[SMIA_CTRL_EXPOSURE].default_value = ++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum; ++ if (sensor->controls[SMIA_CTRL_EXPOSURE].value == 0) ++ sensor->controls[SMIA_CTRL_EXPOSURE].value = ++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum; + -+ ifsta->flags |= IEEE80211_STA_PROBEREQ_POLL; ++ /* Adjust V4L2 control values and write them to the sensor */ + -+ if (local->powersave) { -+ /* disable power save before sending the probe request */ -+ local->hw.conf.flags &= ~IEEE80211_CONF_PS; -+ ieee80211_hw_config(local); ++ for (i = 0; i < ARRAY_SIZE(sensor->controls); i++) { ++ int rval; ++ if (!sensor->controls[i].set) ++ continue; ++ rval = sensor->controls[i].set(sensor, ++ sensor->controls[i].value); ++ if (rval) ++ return rval; + } -+ -+ ieee80211_send_probe_req(sdata, ifsta->bssid, ifsta->ssid, -+ ifsta->ssid_len); -+ -+ mod_timer(&ifsta->timer, jiffies + IEEE80211_MONITORING_INTERVAL); ++ return 0; +} + -+void ieee80211_beacon_loss(struct ieee80211_vif *vif) -+{ -+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); -+ -+ queue_work(sdata->local->hw.workqueue, -+ &sdata->u.sta.beacon_loss_work); -+} -+EXPORT_SYMBOL(ieee80211_beacon_loss); - - static void ieee80211_associated(struct ieee80211_sub_if_data *sdata, - struct ieee80211_if_sta *ifsta) + /* Must be called with power already enabled on the sensor */ + static int smia_configure(struct v4l2_int_device *s) { - struct ieee80211_local *local = sdata->local; - struct sta_info *sta; -- int disassoc; -+ bool disassoc = false, send_probe = false; - DECLARE_MAC_BUF(mac); +@@ -223,17 +452,6 @@ static int smia_configure(struct v4l2_in + if (rval) + goto fail; - /* TODO: start monitoring current AP signal quality and number of -@@ -960,42 +1101,57 @@ static void ieee80211_associated(struct +- rval = smia_i2c_write_reg(sensor->i2c_client, SMIA_REG_8BIT, +- REG_ANALOG_GAIN+1, +- sensor->ctrl_gain << 4); +- if (rval) +- goto fail; +- rval = smia_i2c_write_reg(sensor->i2c_client, +- SMIA_REG_16BIT, REG_COARSE_EXPOSURE, +- smia_exposure_us_to_rows(sensor, &sensor->ctrl_exposure)); +- if (rval) +- goto fail; +- + /* + * FIXME: remove stream_off from here as soon as camera-firmware + * is modified to not enable streaming automatically. +@@ -242,6 +460,10 @@ static int smia_configure(struct v4l2_in + if (rval) + goto fail; - sta = sta_info_get(local, ifsta->bssid); - if (!sta) { -- printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", -- sdata->dev->name, print_mac(mac, ifsta->bssid)); -- disassoc = 1; -- } else { -- disassoc = 0; -- if (time_after(jiffies, -- sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { -- if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) { -- printk(KERN_DEBUG "%s: No ProbeResp from " -- "current AP %s - assume out of " -- "range\n", -- sdata->dev->name, print_mac(mac, ifsta->bssid)); -- disassoc = 1; -- } else -- ieee80211_send_probe_req(sdata, ifsta->bssid, -- ifsta->ssid, -- ifsta->ssid_len); -- ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL; -- } else { -- ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; -- if (time_after(jiffies, ifsta->last_probe + -- IEEE80211_PROBE_INTERVAL)) { -- ifsta->last_probe = jiffies; -- ieee80211_send_probe_req(sdata, ifsta->bssid, -- ifsta->ssid, -- ifsta->ssid_len); -- } -- } -+ printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", -+ sdata->dev->name, ifsta->bssid); -+ disassoc = true; -+ goto unlock; ++ rval = smia_update_controls(s); ++ if (rval) ++ goto fail; ++ + rval = sensor->platform_data->configure_interface( + s, + sensor->current_reglist->mode.window_width, +@@ -313,21 +535,13 @@ static struct v4l2_queryctrl smia_ctrls[ + { + .id = V4L2_CID_GAIN, + .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Analog gain", +- .minimum = 0, +- .maximum = 15, +- .step = 1, +- .default_value = 0, ++ .name = "Analog gain [0.1 EV]", + .flags = V4L2_CTRL_FLAG_SLIDER, + }, + { + .id = V4L2_CID_EXPOSURE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Exposure time [us]", +- .minimum = 0, +- .maximum = DEFAULT_EXPOSURE, +- .step = 1, +- .default_value = DEFAULT_EXPOSURE, + .flags = V4L2_CTRL_FLAG_SLIDER, + }, + }; +@@ -336,7 +550,7 @@ static int smia_ioctl_queryctrl(struct v + struct v4l2_queryctrl *a) + { + struct smia_sensor *sensor = s->priv; +- int rval; ++ int rval, ctrl; + + rval = smia_ctrl_query(smia_ctrls, ARRAY_SIZE(smia_ctrls), a); + if (rval) { +@@ -344,16 +558,16 @@ static int smia_ioctl_queryctrl(struct v + ARRAY_SIZE(smia_mode_ctrls), a); } -+ if ((ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) && -+ time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { -+ printk(KERN_DEBUG "%s: no probe response from AP %pM " -+ "- disassociating\n", -+ sdata->dev->name, ifsta->bssid); -+ disassoc = true; -+ ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; -+ goto unlock; -+ } -+ -+ /* -+ * Beacon filtering is only enabled with power save and then the -+ * stack should not check for beacon loss. -+ */ -+ if (!(local->hw.flags & IEEE80211_HW_BEACON_FILTER) && -+ time_after(jiffies, -+ ifsta->last_beacon + IEEE80211_MONITORING_INTERVAL)) { -+ printk(KERN_DEBUG "%s: beacon loss from AP %pM " -+ "- sending probe request\n", -+ sdata->dev->name, ifsta->bssid); -+ ifsta->flags |= IEEE80211_STA_PROBEREQ_POLL; -+ send_probe = true; -+ goto unlock; -+ -+ } +- switch (a->id) { +- case V4L2_CID_EXPOSURE: +- if (sensor->current_reglist) { +- a->maximum = smia_exposure_rows_to_us(sensor, +- sensor->current_reglist->mode.max_exp); +- a->step = smia_exposure_rows_to_us(sensor, 1); +- a->default_value = a->maximum; +- } +- break; +- } ++ ctrl = CID_TO_CTRL(a->id); ++ if (ctrl < 0) ++ return ctrl; ++ if (!sensor->controls[ctrl].set) ++ return -EINVAL; + -+ unlock: - rcu_read_unlock(); ++ a->minimum = sensor->controls[ctrl].minimum; ++ a->maximum = sensor->controls[ctrl].maximum; ++ a->step = sensor->controls[ctrl].step; ++ a->default_value = sensor->controls[ctrl].default_value; -+ /* config() can sleep so call it after unlock */ -+ if (send_probe) { -+ if (local->powersave) { -+ /* disable ps before sending the probe request */ -+ local->hw.conf.flags &= ~IEEE80211_CONF_PS; -+ ieee80211_hw_config(local); -+ } -+ -+ ieee80211_send_probe_req(sdata, ifsta->bssid, ifsta->ssid, -+ ifsta->ssid_len); -+ } -+ - if (disassoc) - ieee80211_set_disassoc(sdata, ifsta, true, true, - WLAN_REASON_PREV_AUTH_NOT_VALID); -- else -+ else if (!(local->hw.flags & IEEE80211_HW_BEACON_FILTER)) - mod_timer(&ifsta->timer, jiffies + - IEEE80211_MONITORING_INTERVAL); + return 0; } -@@ -1139,8 +1295,7 @@ static void ieee80211_rx_mgmt_deauth(str - printk(KERN_DEBUG "%s: deauthenticated\n", sdata->dev->name); +@@ -362,6 +576,7 @@ static int smia_ioctl_g_ctrl(struct v4l2 + struct v4l2_control *vc) + { + struct smia_sensor *sensor = s->priv; ++ int ctrl; - if (ifsta->state == IEEE80211_STA_MLME_AUTHENTICATE || -- ifsta->state == IEEE80211_STA_MLME_ASSOCIATE || -- ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) { -+ ifsta->state == IEEE80211_STA_MLME_ASSOCIATE) { - ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; - mod_timer(&ifsta->timer, jiffies + - IEEE80211_RETRY_AUTH_INTERVAL); -@@ -1370,6 +1525,12 @@ static void ieee80211_rx_mgmt_assoc_resp - bss_conf->assoc_capability = capab_info; - ieee80211_set_associated(sdata, ifsta); + int rval = smia_mode_g_ctrl(smia_mode_ctrls, + ARRAY_SIZE(smia_mode_ctrls), +@@ -369,16 +584,13 @@ static int smia_ioctl_g_ctrl(struct v4l2 + if (rval == 0) + return 0; -+ /* -+ * initialise the time of last beacon to be the association time, -+ * otherwise beacon loss check will trigger immediately -+ */ -+ ifsta->last_beacon = jiffies; +- switch (vc->id) { +- case V4L2_CID_GAIN: +- vc->value = sensor->ctrl_gain; +- break; +- case V4L2_CID_EXPOSURE: +- vc->value = sensor->ctrl_exposure; +- break; +- default: ++ ctrl = CID_TO_CTRL(vc->id); ++ if (ctrl < 0) ++ return ctrl; ++ if (!sensor->controls[ctrl].set) + return -EINVAL; +- } ++ vc->value = sensor->controls[ctrl].value; + - ieee80211_associated(sdata, ifsta); + return 0; } -@@ -1639,6 +1800,7 @@ static void ieee80211_rx_mgmt_probe_resp - size_t baselen; - struct ieee802_11_elems elems; - struct ieee80211_if_sta *ifsta = &sdata->u.sta; -+ struct ieee80211_local *local = sdata->local; +@@ -386,30 +598,13 @@ static int smia_ioctl_s_ctrl(struct v4l2 + struct v4l2_control *vc) + { + struct smia_sensor *sensor = s->priv; +- int exposure_rows; +- int r = 0; - if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) - return; /* ignore ProbeResp to foreign address */ -@@ -1659,6 +1821,19 @@ static void ieee80211_rx_mgmt_probe_resp - sdata->dev->name); - ieee80211_authenticate(sdata, ifsta); - } -+ -+ if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) { -+ if (local->powersave) { -+ /* -+ * re-enable power save now that probe response was -+ * received -+ */ -+ local->hw.conf.flags |= IEEE80211_CONF_PS; -+ ieee80211_hw_config(local); -+ } -+ -+ ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; -+ } +- switch (vc->id) { +- case V4L2_CID_GAIN: +- sensor->ctrl_gain = clamp(vc->value, 0, 15); +- if (sensor->power == V4L2_POWER_ON) +- r = smia_i2c_write_reg(sensor->i2c_client, +- SMIA_REG_8BIT, REG_ANALOG_GAIN+1, +- sensor->ctrl_gain << 4); +- break; +- case V4L2_CID_EXPOSURE: +- sensor->ctrl_exposure = vc->value; +- exposure_rows = smia_exposure_us_to_rows(sensor, +- &sensor->ctrl_exposure); +- if (sensor->power == V4L2_POWER_ON) +- r = smia_i2c_write_reg(sensor->i2c_client, +- SMIA_REG_16BIT, REG_COARSE_EXPOSURE, +- exposure_rows); +- break; +- default: ++ int ctrl = CID_TO_CTRL(vc->id); ++ if (ctrl < 0) ++ return ctrl; ++ if (!sensor->controls[ctrl].set) + return -EINVAL; +- } +- return r; ++ return sensor->controls[ctrl].set(sensor, vc->value); } + static int smia_ioctl_enum_fmt_cap(struct v4l2_int_device *s, +@@ -440,13 +635,10 @@ static int smia_ioctl_s_fmt_cap(struct v -@@ -1673,6 +1848,8 @@ static void ieee80211_rx_mgmt_beacon(str - struct ieee80211_local *local = sdata->local; - struct ieee80211_conf *conf = &local->hw.conf; - u32 changed = 0; -+ bool erp_valid; -+ u8 erp_value = 0; + reglist = smia_reglist_find_mode_fmt(sensor->meta_reglist, + sensor->current_reglist, f); +- +- if (reglist) { +- sensor->current_reglist = reglist; +- return 0; +- } +- +- return -EINVAL; ++ if (!reglist) ++ return -EINVAL; ++ sensor->current_reglist = reglist; ++ return smia_update_controls(s); + } - /* Process beacon from the current BSS */ - baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; -@@ -1694,13 +1871,16 @@ static void ieee80211_rx_mgmt_beacon(str - ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param, - elems.wmm_param_len); + static int smia_ioctl_g_parm(struct v4l2_int_device *s, +@@ -476,12 +668,10 @@ static int smia_ioctl_s_parm(struct v4l2 + reglist = smia_reglist_find_mode_streamparm(sensor->meta_reglist, + sensor->current_reglist, a); -- if (elems.erp_info && elems.erp_info_len >= 1) -- changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); -- else { -- u16 capab = le16_to_cpu(mgmt->u.beacon.capab_info); -- changed |= ieee80211_handle_protect_preamb(sdata, false, -- (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0); -+ -+ if (elems.erp_info && elems.erp_info_len >= 1) { -+ erp_valid = true; -+ erp_value = elems.erp_info[0]; -+ } else { -+ erp_valid = false; +- if (reglist) { +- sensor->current_reglist = reglist; +- return 0; +- } +- +- return -EINVAL; ++ if (!reglist) ++ return -EINVAL; ++ sensor->current_reglist = reglist; ++ return smia_update_controls(s); + } + + static int smia_ioctl_dev_init(struct v4l2_int_device *s) +@@ -508,35 +698,56 @@ static int smia_ioctl_dev_init(struct v4 + goto out_poweroff; } -+ changed |= ieee80211_handle_bss_capability(sdata, -+ le16_to_cpu(mgmt->u.beacon.capab_info), -+ erp_valid, erp_value); - if (elems.ht_cap_elem && elems.ht_info_elem && - elems.wmm_param && conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { -@@ -2318,6 +2498,7 @@ void ieee80211_sta_setup_sdata(struct ie +- sensor->model_id = model_id; + sensor->revision_number = revision_number; +- sensor->manufacturer_id = manufacturer_id; + sensor->smia_version = smia_version; - ifsta = &sdata->u.sta; - INIT_WORK(&ifsta->work, ieee80211_sta_work); -+ INIT_WORK(&ifsta->beacon_loss_work, ieee80211_beacon_loss_work); - setup_timer(&ifsta->timer, ieee80211_sta_timer, - (unsigned long) sdata); - skb_queue_head_init(&ifsta->skb_queue); -@@ -2560,3 +2741,39 @@ void ieee80211_mlme_notify_scan_complete - ieee80211_restart_sta_timer(sdata); - rcu_read_unlock(); - } +- if (sensor->smia_version != 10) { ++ if (smia_version != 10) { + /* We support only SMIA version 1.0 at the moment */ + dev_err(&sensor->i2c_client->dev, + "unknown sensor 0x%04x detected (smia ver %i.%i)\n", +- sensor->model_id, +- sensor->smia_version / 10, sensor->smia_version % 10); ++ model_id, smia_version / 10, smia_version % 10); + rval = -ENODEV; + goto out_poweroff; + } + +- /* Update identification string */ +- for (i = 0; i < ARRAY_SIZE(smia_sensors); i++) { +- if (smia_sensors[i].manufacturer_id == sensor->manufacturer_id +- && smia_sensors[i].model_id == sensor->model_id) ++ /* Detect which sensor we have */ ++ for (i = 1; i < ARRAY_SIZE(smia_sensors); i++) { ++ if (smia_sensors[i].manufacturer_id == manufacturer_id ++ && smia_sensors[i].model_id == model_id) + break; + } +- if (i < ARRAY_SIZE(smia_sensors)) +- strncpy(s->name, smia_sensors[i].name, V4L2NAMESIZE); ++ if (i >= ARRAY_SIZE(smia_sensors)) ++ i = 0; /* Unknown sensor */ ++ sensor->type = &smia_sensors[i]; ++ ++ /* Initialize V4L2 controls */ ++ ++ /* Gain is initialized here permanently */ ++ sensor->controls[SMIA_CTRL_GAIN].minimum = 0; ++ sensor->controls[SMIA_CTRL_GAIN].maximum = ++ sensor->type->ev_table_size - 1; ++ sensor->controls[SMIA_CTRL_GAIN].step = 1; ++ sensor->controls[SMIA_CTRL_GAIN].default_value = 0; ++ sensor->controls[SMIA_CTRL_GAIN].value = 0; ++ sensor->controls[SMIA_CTRL_GAIN].set = ++ sensor->type->ev_table ? smia_set_gain : NULL; ++ ++ /* Exposure parameters may change at each mode change, just zero here */ ++ sensor->controls[SMIA_CTRL_EXPOSURE].minimum = 0; ++ sensor->controls[SMIA_CTRL_EXPOSURE].maximum = 0; ++ sensor->controls[SMIA_CTRL_EXPOSURE].step = 0; ++ sensor->controls[SMIA_CTRL_EXPOSURE].default_value = 0; ++ sensor->controls[SMIA_CTRL_EXPOSURE].value = 0; ++ sensor->controls[SMIA_CTRL_EXPOSURE].set = smia_set_exposure; + -+void ieee80211_dynamic_ps_disable_work(struct work_struct *work) ++ /* Update identification string */ ++ strncpy(s->name, sensor->type->name, V4L2NAMESIZE); + s->name[V4L2NAMESIZE-1] = 0; /* Ensure NULL terminated string */ + + /* Import firmware */ + snprintf(name, FIRMWARE_NAME_MAX, "%s-%02x-%04x-%02x.bin", +- SMIA_SENSOR_NAME, sensor->manufacturer_id, +- sensor->model_id, sensor->revision_number); ++ SMIA_SENSOR_NAME, sensor->type->manufacturer_id, ++ sensor->type->model_id, sensor->revision_number); + + if (request_firmware(&sensor->fw, name, + &sensor->i2c_client->dev)) { +@@ -585,10 +796,74 @@ out_poweroff: + return rval; + } + ++#if VS6555_RESET_SHIFT_HACK ++/* ++ * Check if certain undocumented registers have values we expect. ++ * If not, reset sensor and recheck. ++ * This should be called when streaming is already enabled. ++ */ ++static int smia_vs6555_reset_shift_hack(struct v4l2_int_device *s) +{ -+ struct ieee80211_local *local = -+ container_of(work, struct ieee80211_local, -+ dynamic_ps_disable_work); ++ struct smia_sensor *sensor = s->priv; ++ int count = 10; ++ int r381c = 0; ++ int r381d = 0; ++ int r381e = 0; ++ int r381f = 0; ++ int rval; + -+ if (local->hw.conf.flags & IEEE80211_CONF_PS) { -+ local->hw.conf.flags &= ~IEEE80211_CONF_PS; -+ ieee80211_hw_config(local); -+ } ++ do { ++ rval = smia_i2c_read_reg(sensor->i2c_client, ++ SMIA_REG_8BIT, 0x381c, &r381c); ++ if (rval) ++ return rval; ++ rval = smia_i2c_read_reg(sensor->i2c_client, ++ SMIA_REG_8BIT, 0x381d, &r381d); ++ if (rval) ++ return rval; ++ rval = smia_i2c_read_reg(sensor->i2c_client, ++ SMIA_REG_8BIT, 0x381e, &r381e); ++ if (rval) ++ return rval; ++ rval = smia_i2c_read_reg(sensor->i2c_client, ++ SMIA_REG_8BIT, 0x381f, &r381f); ++ if (rval) ++ return rval; + -+ ieee80211_wake_queues_by_reason(&local->hw, -+ IEEE80211_QUEUE_STOP_REASON_PS); -+} ++ if (r381d != 0 && r381f != 0 && ++ r381c == 0 && r381e == 0) ++ return 0; + -+void ieee80211_dynamic_ps_enable_work(struct work_struct *work) -+{ -+ struct ieee80211_local *local = -+ container_of(work, struct ieee80211_local, -+ dynamic_ps_enable_work); ++ dev_dbg(&sensor->i2c_client->dev, "VS6555 HW misconfigured--" ++ "trying to reset (%02X%02X%02X%02X)\n", ++ r381c, r381d, r381e, r381f); + -+ if (local->hw.conf.flags & IEEE80211_CONF_PS) -+ return; ++ smia_stream_off(s); ++ smia_power_off(s); ++ msleep(2); ++ rval = smia_power_on(s); ++ if (rval) ++ return rval; ++ rval = smia_configure(s); ++ if (rval) ++ return rval; ++ rval = smia_stream_on(s); ++ if (rval) ++ return rval; ++ } while (--count > 0); + -+ local->hw.conf.flags |= IEEE80211_CONF_PS; ++ dev_warn(&sensor->i2c_client->dev, ++ "VS6555 reset failed--expect bad image\n"); + -+ ieee80211_hw_config(local); ++ return 0; /* Return zero nevertheless -- at least we tried */ +} ++#endif + -+void ieee80211_dynamic_ps_timer(unsigned long data) -+{ -+ struct ieee80211_local *local = (void *) data; -+ -+ queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work); -+} -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/rc80211_minstrel.c linux-omap-2.6.28-nokia1/net/mac80211/rc80211_minstrel.c ---- linux-omap-2.6.28-omap1/net/mac80211/rc80211_minstrel.c 2011-06-22 13:14:26.163067634 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/rc80211_minstrel.c 2011-06-22 13:19:33.293063268 +0200 -@@ -389,13 +389,16 @@ minstrel_rate_init(void *priv, struct ie + static int smia_ioctl_s_power(struct v4l2_int_device *s, + enum v4l2_power new_state) { - struct minstrel_sta_info *mi = priv_sta; - struct minstrel_priv *mp = priv; -- struct minstrel_rate *mr_ctl; -+ struct ieee80211_local *local = hw_to_local(mp->hw); -+ struct ieee80211_rate *ctl_rate; - unsigned int i, n = 0; - unsigned int t_slot = 9; /* FIXME: get real slot time */ - - mi->lowest_rix = rate_lowest_index(sband, sta); -- mr_ctl = &mi->r[rix_to_ndx(mi, mi->lowest_rix)]; -- mi->sp_ack_dur = mr_ctl->ack_time; -+ ctl_rate = &sband->bitrates[mi->lowest_rix]; -+ mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate, -+ !!(ctl_rate->flags & -+ IEEE80211_RATE_ERP_G), 1); + struct smia_sensor *sensor = s->priv; ++ enum v4l2_power old_state = sensor->power; + int rval = 0; - for (i = 0; i < sband->n_bitrates; i++) { - struct minstrel_rate *mr = &mi->r[n]; -@@ -410,8 +413,7 @@ minstrel_rate_init(void *priv, struct ie + /* +@@ -599,7 +874,7 @@ static int smia_ioctl_s_power(struct v4l + new_state = V4L2_POWER_OFF; - mr->rix = i; - mr->bitrate = sband->bitrates[i].bitrate / 5; -- calc_rate_durations(mi, hw_to_local(mp->hw), mr, -- &sband->bitrates[i]); -+ calc_rate_durations(mi, local, mr, &sband->bitrates[i]); + /* If we are already in this mode, do nothing */ +- if (sensor->power == new_state) ++ if (old_state == new_state) + return 0; - /* calculate maximum number of retransmissions before - * fallback (based on maximum segment size) */ -@@ -467,8 +469,8 @@ minstrel_alloc_sta(void *priv, struct ie - return NULL; + /* Disable power if so requested (it was enabled) */ +@@ -615,7 +890,7 @@ static int smia_ioctl_s_power(struct v4l + /* Either STANDBY or ON requested */ - for (i = 0; i < IEEE80211_NUM_BANDS; i++) { -- sband = hw->wiphy->bands[hw->conf.channel->band]; -- if (sband->n_bitrates > max_rates) -+ sband = hw->wiphy->bands[i]; -+ if (sband && sband->n_bitrates > max_rates) - max_rates = sband->n_bitrates; - } + /* Enable power and move to standby if it was off */ +- if (sensor->power == V4L2_POWER_OFF) { ++ if (old_state == V4L2_POWER_OFF) { + rval = smia_power_on(s); + if (rval) + goto out; +@@ -625,23 +900,26 @@ static int smia_ioctl_s_power(struct v4l -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/rx.c linux-omap-2.6.28-nokia1/net/mac80211/rx.c ---- linux-omap-2.6.28-omap1/net/mac80211/rx.c 2011-06-22 13:14:26.163067634 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/rx.c 2011-06-22 13:19:33.293063268 +0200 -@@ -733,12 +733,19 @@ ieee80211_rx_h_sta_process(struct ieee80 - * Mesh beacons will update last_rx when if they are found to - * match the current local configuration when processed. - */ -- sta->last_rx = jiffies; -+ if (rx->sdata->vif.type == NL80211_IFTYPE_STATION && -+ ieee80211_is_beacon(hdr->frame_control)) { -+ rx->sdata->u.sta.last_beacon = jiffies; -+ } else -+ sta->last_rx = jiffies; + if (new_state == V4L2_POWER_ON) { + /* Standby -> streaming */ ++ sensor->power = V4L2_POWER_ON; + rval = smia_configure(s); + if (rval) { + smia_stream_off(s); +- if (sensor->power == V4L2_POWER_OFF) ++ if (old_state == V4L2_POWER_OFF) + smia_power_off(s); + goto out; + } + rval = smia_stream_on(s); ++#if VS6555_RESET_SHIFT_HACK ++ if (rval == 0 && sensor->type->manufacturer_id == 0x01) ++ rval = smia_vs6555_reset_shift_hack(s); ++#endif + } else { + /* Streaming -> standby */ + rval = smia_stream_off(s); } - if (!(rx->flags & IEEE80211_RX_RA_MATCH)) - return RX_CONTINUE; - -+ if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) -+ ieee80211_sta_rx_notify(rx->sdata, hdr); -+ - sta->rx_fragments++; - sta->rx_bytes += rx->skb->len; - sta->last_signal = rx->status->signal; -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/scan.c linux-omap-2.6.28-nokia1/net/mac80211/scan.c ---- linux-omap-2.6.28-omap1/net/mac80211/scan.c 2011-06-22 13:14:26.163067634 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/scan.c 2011-06-22 13:19:33.293063268 +0200 -@@ -759,7 +759,12 @@ ieee80211_scan_result(struct ieee80211_l - struct iw_event iwe; - char *buf; - -- if (time_after(jiffies, -+ /* -+ * we don't return old entries, unless the bss is in hold state due -+ * to beacon filter -+ */ -+ if (!bss->hold && -+ time_after(jiffies, - bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) - return current_ev; - -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/sta_info.c linux-omap-2.6.28-nokia1/net/mac80211/sta_info.c ---- linux-omap-2.6.28-omap1/net/mac80211/sta_info.c 2011-06-22 13:14:26.163067634 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/sta_info.c 2011-06-22 13:19:33.293063268 +0200 -@@ -538,7 +538,7 @@ static inline int sta_info_buffer_expire + out: +- if (rval == 0) +- sensor->power = new_state; +- ++ sensor->power = (rval == 0) ? new_state : old_state; + return rval; } +@@ -717,9 +995,6 @@ static int smia_probe(struct i2c_client + sensor->i2c_client = client; + i2c_set_clientdata(client, sensor); --static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, -+static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, - struct sta_info *sta) - { - unsigned long flags; -@@ -547,7 +547,7 @@ static void sta_info_cleanup_expire_buff - DECLARE_MAC_BUF(mac); +- sensor->ctrl_gain = 0; +- sensor->ctrl_exposure = DEFAULT_EXPOSURE; +- + rval = v4l2_int_device_register(sensor->v4l2_int_device); + if (rval) + i2c_set_clientdata(client, NULL); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/media/video/smia-sensor.h kernel-2.6.28-20094803.3+0m5/drivers/media/video/smia-sensor.h +--- kernel-2.6.28-20094102.6+0m5/drivers/media/video/smia-sensor.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/media/video/smia-sensor.h 2011-09-04 11:37:54.000000000 +0200 +@@ -24,7 +24,6 @@ + #ifndef SMIA_SENSOR_H + #define SMIA_SENSOR_H - if (skb_queue_empty(&sta->ps_tx_buf)) -- return; -+ return false; +-#include + #include - for (;;) { - spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); -@@ -572,6 +572,8 @@ static void sta_info_cleanup_expire_buff - if (skb_queue_empty(&sta->ps_tx_buf)) - sta_info_clear_tim_bit(sta); - } -+ -+ return true; - } + #define SMIA_SENSOR_NAME "smia-sensor" +@@ -39,30 +38,5 @@ struct smia_sensor_platform_data { + int (*power_off)(struct v4l2_int_device *s); + }; +-struct smia_sensor { +- struct i2c_client *i2c_client; +- struct i2c_driver driver; +- +- /* Sensor information */ +- u16 model_id; +- u8 revision_number; +- u8 manufacturer_id; +- u8 smia_version; +- +- /* V4L2 current control values */ +- s32 ctrl_exposure; +- s32 ctrl_gain; +- +- struct smia_reglist *current_reglist; +- struct v4l2_int_device *v4l2_int_device; +- struct v4l2_fract timeperframe; +- +- struct smia_sensor_platform_data *platform_data; +- +- const struct firmware *fw; +- struct smia_meta_reglist *meta_reglist; +- +- enum v4l2_power power; +-}; -@@ -579,15 +581,22 @@ static void sta_info_cleanup(unsigned lo - { - struct ieee80211_local *local = (struct ieee80211_local *) data; - struct sta_info *sta; -+ bool need_timer = false; + #endif /* SMIA_SENSOR_H */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/mfd/twl4030-power.c kernel-2.6.28-20094803.3+0m5/drivers/mfd/twl4030-power.c +--- kernel-2.6.28-20094102.6+0m5/drivers/mfd/twl4030-power.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/mfd/twl4030-power.c 2011-09-04 11:37:54.000000000 +0200 +@@ -69,6 +69,11 @@ static u8 triton_next_free_address = 0x2 + #define KEY_1 0xC0 + #define KEY_2 0x0C - rcu_read_lock(); -- list_for_each_entry_rcu(sta, &local->sta_list, list) -- sta_info_cleanup_expire_buffered(local, sta); -+ list_for_each_entry_rcu(sta, &local->sta_list, list) { -+ bool res = sta_info_cleanup_expire_buffered(local, sta); -+ if (res) -+ need_timer = true; -+ } - rcu_read_unlock(); ++#define R_VDD1_OSC 0x5C ++#define R_VDD2_OSC 0x6A ++#define R_VIO_OSC 0x52 ++#define EXT_FS_CLK_EN (0x1 << 6) ++ + /* resource configuration registers */ -- local->sta_cleanup.expires = -- round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); -- add_timer(&local->sta_cleanup); -+ /* If the queues are empty, don't add a new timer */ -+ if (need_timer) { -+ local->sta_cleanup.expires = -+ round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); -+ add_timer(&local->sta_cleanup); -+ } + #define DEVGROUP_OFFSET 0 +@@ -467,6 +472,31 @@ int twl4030_disable_regulator(int res) } + EXPORT_SYMBOL(twl4030_disable_regulator); - #ifdef CONFIG_MAC80211_DEBUGFS -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/tx.c linux-omap-2.6.28-nokia1/net/mac80211/tx.c ---- linux-omap-2.6.28-omap1/net/mac80211/tx.c 2011-06-22 13:14:26.163067634 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/tx.c 2011-06-22 13:19:33.293063268 +0200 -@@ -330,6 +330,7 @@ ieee80211_tx_h_unicast_ps_buf(struct iee - struct sta_info *sta = tx->sta; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; -+ struct ieee80211_local *local = tx->local; - u32 staflags; - DECLARE_MAC_BUF(mac); - -@@ -367,6 +368,8 @@ ieee80211_tx_h_unicast_ps_buf(struct iee - - info->control.jiffies = jiffies; - skb_queue_tail(&sta->ps_tx_buf, tx->skb); -+ mod_timer(&local->sta_cleanup, -+ round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); - return TX_QUEUED; - } - #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG -@@ -1465,6 +1468,19 @@ int ieee80211_subif_start_xmit(struct sk - goto fail; - } - -+ if (!(local->hw.flags & IEEE80211_HW_NO_STACK_DYNAMIC_PS) && -+ local->dynamic_ps_timeout > 0) { -+ if (local->hw.conf.flags & IEEE80211_CONF_PS) { -+ ieee80211_stop_queues_by_reason(&local->hw, -+ IEEE80211_QUEUE_STOP_REASON_PS); -+ queue_work(local->hw.workqueue, -+ &local->dynamic_ps_disable_work); -+ } -+ -+ mod_timer(&local->dynamic_ps_timer, jiffies + -+ msecs_to_jiffies(local->dynamic_ps_timeout)); ++/** ++ * @brief twl_workaround - implement errata XYZ ++ * XYZ errata workaround requires the TWL DCDCs to use ++ * HFCLK - for this you need to write to all OSC regs to ++ * enable this path ++ * WARNING: you SHOULD change your board dependent script ++ * file to handle RET and OFF mode sequences correctly ++ * ++ * @return ++ */ ++static void __init twl_workaround(void) ++{ ++ u8 val; ++ u8 reg[]={R_VDD1_OSC, R_VDD2_OSC, R_VIO_OSC}; ++ int i; ++ int err = 0; ++ for (i = 0; i < sizeof(reg); i++) { ++ err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &val, reg[i]); ++ val |= EXT_FS_CLK_EN; ++ err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val, reg[i]); + } ++ if (err) ++ pr_warning("TWL4030: workaround setup failed!\n"); ++} + - nh_pos = skb_network_header(skb) - skb->data; - h_pos = skb_transport_header(skb) - skb->data; - -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/util.c linux-omap-2.6.28-nokia1/net/mac80211/util.c ---- linux-omap-2.6.28-omap1/net/mac80211/util.c 2011-06-22 13:14:26.163067634 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/util.c 2011-06-22 13:19:33.293063268 +0200 -@@ -330,10 +330,20 @@ __le16 ieee80211_ctstoself_duration(stru - } - EXPORT_SYMBOL(ieee80211_ctstoself_duration); - --void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) -+static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, -+ enum queue_stop_reason reason) + void __init twl4030_power_init(struct twl4030_power_data *triton2_scripts) { - struct ieee80211_local *local = hw_to_local(hw); + int err = 0; +@@ -502,6 +532,8 @@ void __init twl4030_power_init(struct tw + } + } + ++ /* TODO: introduce workaround based on TWL4030 revision */ ++ twl_workaround(); + if (twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY)) + printk(KERN_ERR + "TWL4030 Unable to relock registers\n"); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/Kconfig kernel-2.6.28-20094803.3+0m5/drivers/misc/Kconfig +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/Kconfig 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/Kconfig 2011-09-04 11:37:54.000000000 +0200 +@@ -523,5 +523,6 @@ source "drivers/misc/c2port/Kconfig" + source "drivers/misc/ssi/Kconfig" + source "drivers/misc/mcsaab/Kconfig" + source "drivers/misc/cmt-speech/Kconfig" ++source "drivers/misc/ssi-char/Kconfig" -+ /* we don't need to track ampdu queues */ -+ if (queue < ieee80211_num_regular_queues(hw)) { -+ __clear_bit(reason, &local->queue_stop_reasons[queue]); + endif # MISC_DEVICES +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/Makefile kernel-2.6.28-20094803.3+0m5/drivers/misc/Makefile +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/Makefile 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/Makefile 2011-09-04 11:37:54.000000000 +0200 +@@ -38,3 +38,5 @@ obj-$(CONFIG_C2PORT) += c2port/ + obj-$(CONFIG_OMAP_SSI) += ssi/ + obj-$(CONFIG_SSI_MCSAAB_IMP) += mcsaab/ + obj-$(CONFIG_SSI_CMT_SPEECH) += cmt-speech/ ++obj-$(CONFIG_SSI_CHAR) += ssi-char/ ++obj-$(CONFIG_PANIC_INFO_BUFF) += panic_info_buff.o +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/panic_info_buff.c kernel-2.6.28-20094803.3+0m5/drivers/misc/panic_info_buff.c +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/panic_info_buff.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/panic_info_buff.c 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,93 @@ ++/* ++ * Copyright (C) Nokia Corporation ++ * ++ * Contact: Atal Shargorodsky ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ + -+ if (local->queue_stop_reasons[queue] != 0) -+ /* someone still has this queue stopped */ -+ return; -+ } ++#include ++#include ++#include ++#include ++#include + - if (test_bit(queue, local->queues_pending)) { - set_bit(queue, local->queues_pending_run); - tasklet_schedule(&local->tx_pending_tasklet); -@@ -341,22 +351,74 @@ void ieee80211_wake_queue(struct ieee802 - netif_wake_subqueue(local->mdev, queue); - } - } ++#define PANIC_BUFFER_MAX_LEN 1024 ++static char panic_info_buff[PANIC_BUFFER_MAX_LEN]; ++static struct dentry *panic_info_buff_debugfs; + -+void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, -+ enum queue_stop_reason reason) ++static int panic_info_buff_open(struct inode *inode, struct file *file) +{ -+ struct ieee80211_local *local = hw_to_local(hw); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); -+ __ieee80211_wake_queue(hw, queue, reason); -+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); ++ return 0; +} + -+void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) ++static ssize_t panic_info_buff_write(struct file *file, ++ const char __user *buf, size_t len, loff_t *off) +{ -+ ieee80211_wake_queue_by_reason(hw, queue, -+ IEEE80211_QUEUE_STOP_REASON_DRIVER); ++ if (len >= PANIC_BUFFER_MAX_LEN) ++ return -EINVAL; ++ if (copy_from_user(panic_info_buff, buf, len)) ++ return -EFAULT; ++ panic_info_buff[len] = '\0'; ++ return len; +} - EXPORT_SYMBOL(ieee80211_wake_queue); - --void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) -+static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, -+ enum queue_stop_reason reason) - { - struct ieee80211_local *local = hw_to_local(hw); - -+ /* we don't need to track ampdu queues */ -+ if (queue < ieee80211_num_regular_queues(hw)) -+ __set_bit(reason, &local->queue_stop_reasons[queue]); + - netif_stop_subqueue(local->mdev, queue); - } ++static struct file_operations panic_info_buff_fops = { ++ .open = panic_info_buff_open, ++ .write = panic_info_buff_write, ++ .llseek = no_llseek, ++ .owner = THIS_MODULE, ++}; + -+void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, -+ enum queue_stop_reason reason) ++static int panic_info_buff_event(struct notifier_block *this, ++ unsigned long event, void *ptr) +{ -+ struct ieee80211_local *local = hw_to_local(hw); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); -+ __ieee80211_stop_queue(hw, queue, reason); -+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); ++ if (panic_info_buff[0] == '\0') { ++ printk(KERN_EMERG "Panic info buffer is empty.\n"); ++ } else { ++ printk(KERN_EMERG "Panic info buffer:\n"); ++ printk(KERN_EMERG "%s\n", panic_info_buff); ++ } ++ return NOTIFY_OK; +} + -+void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) ++static struct notifier_block panic_info_buff_block = { ++ .notifier_call = panic_info_buff_event, ++ .priority = 1, ++}; ++ ++static int __devinit panic_info_buff_init(void) +{ -+ ieee80211_stop_queue_by_reason(hw, queue, -+ IEEE80211_QUEUE_STOP_REASON_DRIVER); ++ panic_info_buff_debugfs = debugfs_create_file("panic_info_buff", ++ S_IFREG | S_IWUSR | S_IWGRP, ++ NULL, NULL, &panic_info_buff_fops); ++ atomic_notifier_chain_register(&panic_notifier_list, ++ &panic_info_buff_block); ++ return 0; +} - EXPORT_SYMBOL(ieee80211_stop_queue); - --void ieee80211_stop_queues(struct ieee80211_hw *hw) -+void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, -+ enum queue_stop_reason reason) - { -+ struct ieee80211_local *local = hw_to_local(hw); -+ unsigned long flags; - int i; - -+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); ++module_init(panic_info_buff_init); + - for (i = 0; i < ieee80211_num_queues(hw); i++) -- ieee80211_stop_queue(hw, i); -+ __ieee80211_stop_queue(hw, i, reason); ++static void __devexit panic_info_buff_exit(void) ++{ ++ debugfs_remove(panic_info_buff_debugfs); ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &panic_info_buff_block); + -+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} ++module_exit(panic_info_buff_exit); + -+void ieee80211_stop_queues(struct ieee80211_hw *hw) -+{ -+ ieee80211_stop_queues_by_reason(hw, -+ IEEE80211_QUEUE_STOP_REASON_DRIVER); - } - EXPORT_SYMBOL(ieee80211_stop_queues); ++MODULE_AUTHOR("Nokia Corporation"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("panic_info_buff"); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi/ssi_driver.h kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi/ssi_driver.h +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi/ssi_driver.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi/ssi_driver.h 2011-09-04 11:37:54.000000000 +0200 +@@ -44,6 +44,8 @@ -@@ -367,12 +429,24 @@ int ieee80211_queue_stopped(struct ieee8 - } - EXPORT_SYMBOL(ieee80211_queue_stopped); + /* Channel states */ + #define SSI_CH_OPEN 0x01 ++#define SSI_CH_RX_POLL 0x10 ++ + /* + * The number of channels to use by the driver in the ports, or the highest + * port channel number (+1) used. (MAX:8) +@@ -87,8 +89,8 @@ struct ssi_channel { + u8 channel_number; + rwlock_t rw_lock; + struct ssi_device *dev; +- void (*write_done)(struct ssi_device *dev); +- void (*read_done)(struct ssi_device *dev); ++ void (*write_done) (struct ssi_device *dev); ++ void (*read_done) (struct ssi_device *dev); + void (*port_event)(struct ssi_device *dev, unsigned int event, + void *arg); + }; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi/ssi_driver_if.c kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi/ssi_driver_if.c +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi/ssi_driver_if.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi/ssi_driver_if.c 2011-09-04 11:37:54.000000000 +0200 +@@ -24,6 +24,127 @@ --void ieee80211_wake_queues(struct ieee80211_hw *hw) -+void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, -+ enum queue_stop_reason reason) - { -+ struct ieee80211_local *local = hw_to_local(hw); -+ unsigned long flags; - int i; + #include "ssi_driver.h" -+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); ++#define NOT_SET (-1) + - for (i = 0; i < hw->queues + hw->ampdu_queues; i++) -- ieee80211_wake_queue(hw, i); -+ __ieee80211_wake_queue(hw, i, reason); ++int ssi_set_rx(struct ssi_port *sport, struct ssr_ctx *cfg) ++{ ++ struct ssi_dev *ssi_ctrl = sport->ssi_controller; ++ void __iomem *base = ssi_ctrl->base; ++ int port = sport->port_number; + -+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); -+} ++ if ((cfg->mode != SSI_MODE_STREAM) && ++ (cfg->mode != SSI_MODE_FRAME) && ++ (cfg->mode != SSI_MODE_SLEEP) && ++ (cfg->mode != NOT_SET)) ++ return -EINVAL; + -+void ieee80211_wake_queues(struct ieee80211_hw *hw) -+{ -+ ieee80211_wake_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_DRIVER); - } - EXPORT_SYMBOL(ieee80211_wake_queues); - -diff -Nurp linux-omap-2.6.28-omap1/net/mac80211/wext.c linux-omap-2.6.28-nokia1/net/mac80211/wext.c ---- linux-omap-2.6.28-omap1/net/mac80211/wext.c 2011-06-22 13:14:26.163067634 +0200 -+++ linux-omap-2.6.28-nokia1/net/mac80211/wext.c 2011-06-22 13:19:33.293063268 +0200 -@@ -864,15 +864,20 @@ static int ieee80211_ioctl_siwmlme(struc - { - struct ieee80211_sub_if_data *sdata; - struct iw_mlme *mlme = (struct iw_mlme *) extra; -+ struct ieee80211_if_sta *ifsta; - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_STATION && - sdata->vif.type != NL80211_IFTYPE_ADHOC) - return -EINVAL; - -+ ifsta = &sdata->u.sta; ++ if ((cfg->frame_size > SSI_MAX_FRAME_SIZE) && ++ (cfg->frame_size != NOT_SET)) ++ return -EINVAL; + - switch (mlme->cmd) { - case IW_MLME_DEAUTH: - /* TODO: mlme->addr.sa_data */ -+ ifsta->flags &= ~IEEE80211_STA_BSSID_SET; -+ ifsta->flags &= ~IEEE80211_STA_SSID_SET; - return ieee80211_sta_deauthenticate(sdata, mlme->reason_code); - case IW_MLME_DISASSOC: - /* TODO: mlme->addr.sa_data */ -@@ -983,25 +988,56 @@ static int ieee80211_ioctl_siwpower(stru - struct iw_param *wrq, - char *extra) - { -+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_conf *conf = &local->hw.conf; -+ int ret = 0, timeout = 0; -+ bool ps; ++ if ((cfg->channels == 0) || ++ ((cfg->channels > SSI_CHANNELS_DEFAULT) && ++ (cfg->channels != NOT_SET))) ++ return -EINVAL; + -+ if (sdata->vif.type != NL80211_IFTYPE_STATION) ++ if ((cfg->timeout > SSI_MAX_RX_TIMEOUT) && (cfg->timeout != NOT_SET)) + return -EINVAL; - - if (wrq->disabled) { -- conf->flags &= ~IEEE80211_CONF_PS; -- return ieee80211_hw_config(local); -+ ps = false; -+ timeout = 0; -+ goto set; - } - - switch (wrq->flags & IW_POWER_MODE) { - case IW_POWER_ON: /* If not specified */ - case IW_POWER_MODE: /* If set all mask */ - case IW_POWER_ALL_R: /* If explicitely state all */ -- conf->flags |= IEEE80211_CONF_PS; -+ ps = true; -+ break; -+ default: /* Otherwise we ignore */ - break; -- default: /* Otherwise we don't support it */ -- return -EINVAL; - } - -- return ieee80211_hw_config(local); -+ if (wrq->flags & IW_POWER_TIMEOUT) -+ timeout = wrq->value / 1000; + -+set: -+ if (ps == local->powersave && timeout == local->dynamic_ps_timeout) -+ return ret; ++ if (cfg->mode != NOT_SET) ++ ssi_outl(cfg->mode, base, SSI_SSR_MODE_REG(port)); + -+ local->powersave = ps; -+ local->dynamic_ps_timeout = timeout; ++ if (cfg->frame_size != NOT_SET) ++ ssi_outl(cfg->frame_size, base, SSI_SSR_FRAMESIZE_REG(port)); + -+ if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { -+ if (!(local->hw.flags & IEEE80211_HW_NO_STACK_DYNAMIC_PS) && -+ local->dynamic_ps_timeout > 0) -+ mod_timer(&local->dynamic_ps_timer, jiffies + -+ msecs_to_jiffies(local->dynamic_ps_timeout)); -+ else { -+ if (local->powersave) -+ conf->flags |= IEEE80211_CONF_PS; -+ else -+ conf->flags &= ~IEEE80211_CONF_PS; -+ } -+ ret = ieee80211_hw_config(local); ++ if (cfg->channels != NOT_SET) { ++ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels) ++ return -EINVAL; ++ else ++ ssi_outl(cfg->channels, base, ++ SSI_SSR_CHANNELS_REG(port)); + } + -+ return ret; - } - - static int ieee80211_ioctl_giwpower(struct net_device *dev, -@@ -1010,9 +1046,8 @@ static int ieee80211_ioctl_giwpower(stru - char *extra) - { - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); -- struct ieee80211_conf *conf = &local->hw.conf; - -- wrqu->power.disabled = !(conf->flags & IEEE80211_CONF_PS); -+ wrqu->power.disabled = !local->powersave; - - return 0; - } -diff -Nurp linux-omap-2.6.28-omap1/net/phonet/af_phonet.c linux-omap-2.6.28-nokia1/net/phonet/af_phonet.c ---- linux-omap-2.6.28-omap1/net/phonet/af_phonet.c 2011-06-22 13:14:26.273067633 +0200 -+++ linux-omap-2.6.28-nokia1/net/phonet/af_phonet.c 2011-06-22 13:19:33.293063268 +0200 -@@ -67,9 +67,6 @@ static int pn_socket_create(struct net * - struct phonet_protocol *pnp; - int err; - -- if (net != &init_net) -- return -EAFNOSUPPORT; -- - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - -@@ -278,8 +275,6 @@ static inline int can_respond(struct sk_ - return 0; - - ph = pn_hdr(skb); -- if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev) -- return 0; /* we are not the destination */ - if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) - return 0; - if (ph->pn_res == PN_COMMGR) /* indications */ -@@ -347,14 +342,11 @@ static int phonet_rcv(struct sk_buff *sk - struct packet_type *pkttype, - struct net_device *orig_dev) - { -+ struct net *net = dev_net(dev); - struct phonethdr *ph; -- struct sock *sk; - struct sockaddr_pn sa; - u16 len; - -- if (dev_net(dev) != &init_net) -- goto out; -- - /* check we have at least a full Phonet header */ - if (!pskb_pull(skb, sizeof(struct phonethdr))) - goto out; -@@ -370,28 +362,28 @@ static int phonet_rcv(struct sk_buff *sk - skb_reset_transport_header(skb); - - pn_skb_get_dst_sockaddr(skb, &sa); -- if (pn_sockaddr_get_addr(&sa) == 0) -- goto out; /* currently, we cannot be device 0 */ - -- sk = pn_find_sock_by_sa(&sa); -- if (sk == NULL) { -+ /* check if we are the destination */ -+ if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { -+ /* Phonet packet input */ -+ struct sock *sk = pn_find_sock_by_sa(net, &sa); -+ -+ if (sk) -+ return sk_receive_skb(sk, skb, 0); ++ if (cfg->timeout != NOT_SET) ++ ssi_outl(cfg->timeout, base, SSI_SSR_TIMEOUT_REG(port)); + - if (can_respond(skb)) { - send_obj_unreachable(skb); - send_reset_indications(skb); - } -- goto out; - } - -- /* Push data to the socket (or other sockets connected to it). */ -- return sk_receive_skb(sk, skb, 0); -- - out: - kfree_skb(skb); - return NET_RX_DROP; - } - - static struct packet_type phonet_packet_type = { -- .type = __constant_htons(ETH_P_PHONET), -+ .type = cpu_to_be16(ETH_P_PHONET), - .dev = NULL, - .func = phonet_rcv, - }; -@@ -434,16 +426,18 @@ static int __init phonet_init(void) - { - int err; - -+ err = phonet_device_init(); -+ if (err) -+ return err; ++ return 0; ++} + - err = sock_register(&phonet_proto_family); - if (err) { - printk(KERN_ALERT - "phonet protocol family initialization failed\n"); -- return err; -+ goto err_sock; - } - -- phonet_device_init(); - dev_add_pack(&phonet_packet_type); -- phonet_netlink_register(); - phonet_sysctl_init(); - - err = isi_register(); -@@ -455,6 +449,7 @@ err: - phonet_sysctl_exit(); - sock_unregister(PF_PHONET); - dev_remove_pack(&phonet_packet_type); -+err_sock: - phonet_device_exit(); - return err; - } -diff -Nurp linux-omap-2.6.28-omap1/net/phonet/pep.c linux-omap-2.6.28-nokia1/net/phonet/pep.c ---- linux-omap-2.6.28-omap1/net/phonet/pep.c 2011-06-22 13:14:26.273067633 +0200 -+++ linux-omap-2.6.28-nokia1/net/phonet/pep.c 2011-06-22 13:19:33.293063268 +0200 -@@ -225,6 +225,7 @@ static int pipe_rcv_status(struct sock * - { - struct pep_sock *pn = pep_sk(sk); - struct pnpipehdr *hdr = pnp_hdr(skb); -+ int wake = 0; - - if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) - return -EINVAL; -@@ -241,16 +242,16 @@ static int pipe_rcv_status(struct sock * - case PN_LEGACY_FLOW_CONTROL: - switch (hdr->data[4]) { - case PEP_IND_BUSY: -- pn->tx_credits = 0; -+ atomic_set(&pn->tx_credits, 0); - break; - case PEP_IND_READY: -- pn->tx_credits = 1; -+ atomic_set(&pn->tx_credits, wake = 1); - break; - } - break; - case PN_ONE_CREDIT_FLOW_CONTROL: - if (hdr->data[4] == PEP_IND_READY) -- pn->tx_credits = 1; -+ atomic_set(&pn->tx_credits, wake = 1); - break; - } - break; -@@ -258,10 +259,7 @@ static int pipe_rcv_status(struct sock * - case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: - if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) - break; -- if (pn->tx_credits + hdr->data[4] > 0xff) -- pn->tx_credits = 0xff; -- else -- pn->tx_credits += hdr->data[4]; -+ atomic_add(wake = hdr->data[4], &pn->tx_credits); - break; - - default: -@@ -269,7 +267,7 @@ static int pipe_rcv_status(struct sock * - (unsigned)hdr->data[1]); - return -EOPNOTSUPP; - } -- if (pn->tx_credits) -+ if (wake) - sk->sk_write_space(sk); - return 0; - } -@@ -327,6 +325,7 @@ static int pipe_do_rcv(struct sock *sk, - case PNS_PEP_ENABLE_REQ: - /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */ - pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); -+ pn->init_enable = 1; - break; - - case PNS_PEP_RESET_REQ: -@@ -343,8 +342,9 @@ static int pipe_do_rcv(struct sock *sk, - } - /* fall through */ - case PNS_PEP_DISABLE_REQ: -- pn->tx_credits = 0; -+ atomic_set(&pn->tx_credits, 0); - pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); -+ pn->init_enable = 0; - break; - - case PNS_PEP_CTRL_REQ: -@@ -368,6 +368,14 @@ static int pipe_do_rcv(struct sock *sk, - break; - } - pn->rx_credits--; -+ if (pn->rx_fc == PN_MULTI_CREDIT_FLOW_CONTROL) { -+ if (pn->rx_credits == 0) -+ printk(KERN_ERR"pn_pep: RX congestion\n"); -+ else if (pn->rx_credits < (CREDITS_MAX - CREDITS_THR)) -+ printk(KERN_WARNING -+ "pn_pep: %u RX credits left\n", -+ pn->rx_credits); -+ } - queue = &sk->sk_receive_queue; - goto queue; - -@@ -376,9 +384,6 @@ static int pipe_do_rcv(struct sock *sk, - break; - - case PNS_PIPE_REDIRECTED_IND: -- err = pipe_rcv_created(sk, skb); -- break; -- - case PNS_PIPE_CREATED_IND: - err = pipe_rcv_created(sk, skb); - if (err) -@@ -390,7 +395,7 @@ static int pipe_do_rcv(struct sock *sk, - /* fall through */ - case PNS_PIPE_ENABLED_IND: - if (!pn_flow_safe(pn->tx_fc)) { -- pn->tx_credits = 1; -+ atomic_set(&pn->tx_credits, 1); - sk->sk_write_space(sk); - } - if (sk->sk_state == TCP_ESTABLISHED) -@@ -504,8 +509,9 @@ static int pep_connreq_rcv(struct sock * - newpn->pn_sk.resource = pn->pn_sk.resource; - skb_queue_head_init(&newpn->ctrlreq_queue); - newpn->pipe_handle = pipe_handle; -+ atomic_set(&newpn->tx_credits, 0); - newpn->peer_type = peer_type; -- newpn->rx_credits = newpn->tx_credits = 0; -+ newpn->rx_credits = 0; - newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; - newpn->init_enable = enabled; - -@@ -554,7 +560,7 @@ static int pep_do_rcv(struct sock *sk, s - { - struct pep_sock *pn = pep_sk(sk); - struct sock *sknode; -- struct pnpipehdr *hdr = pnp_hdr(skb); -+ struct pnpipehdr *hdr; - struct sockaddr_pn dst; - int err = NET_RX_SUCCESS; - u8 pipe_handle; -@@ -821,14 +827,18 @@ static int pipe_skb_send(struct sock *sk - struct pep_sock *pn = pep_sk(sk); - struct pnpipehdr *ph; - -+ if (pn_flow_safe(pn->tx_fc) && -+ !atomic_add_unless(&pn->tx_credits, -1, 0)) { -+ kfree_skb(skb); -+ return -ENOBUFS; -+ } ++void ssi_get_rx(struct ssi_port *sport, struct ssr_ctx *cfg) ++{ ++ struct ssi_dev *ssi_ctrl = sport->ssi_controller; ++ void __iomem *base = ssi_ctrl->base; ++ int port = sport->port_number; + - skb_push(skb, 3); - skb_reset_transport_header(skb); - ph = pnp_hdr(skb); - ph->utid = 0; - ph->message_id = PNS_PIPE_DATA; - ph->pipe_handle = pn->pipe_handle; -- if (pn_flow_safe(pn->tx_fc) && pn->tx_credits) -- pn->tx_credits--; - - return pn_skb_send(sk, skb, &pipe_srv); - } -@@ -866,7 +876,7 @@ disabled: - BUG_ON(sk->sk_state != TCP_ESTABLISHED); - - /* Wait until flow control allows TX */ -- done = pn->tx_credits > 0; -+ done = atomic_read(&pn->tx_credits); - while (!done) { - DEFINE_WAIT(wait); - -@@ -881,7 +891,7 @@ disabled: - - prepare_to_wait(&sk->sk_socket->wait, &wait, - TASK_INTERRUPTIBLE); -- done = sk_wait_event(sk, &timeo, pn->tx_credits > 0); -+ done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); - finish_wait(&sk->sk_socket->wait, &wait); - - if (sk->sk_state != TCP_ESTABLISHED) -@@ -895,7 +905,8 @@ disabled: - goto out; - skb_reserve(skb, MAX_PHONET_HEADER + 3); - -- if (sk->sk_state != TCP_ESTABLISHED || !pn->tx_credits) -+ if (sk->sk_state != TCP_ESTABLISHED || -+ !atomic_read(&pn->tx_credits)) - goto disabled; /* sock_alloc_send_skb might sleep */ - } - -@@ -917,7 +928,7 @@ int pep_writeable(struct sock *sk) - { - struct pep_sock *pn = pep_sk(sk); - -- return (sk->sk_state == TCP_ESTABLISHED) ? pn->tx_credits : 0; -+ return atomic_read(&pn->tx_credits); - } - - int pep_write(struct sock *sk, struct sk_buff *skb) -diff -Nurp linux-omap-2.6.28-omap1/net/phonet/pep-gprs.c linux-omap-2.6.28-nokia1/net/phonet/pep-gprs.c ---- linux-omap-2.6.28-omap1/net/phonet/pep-gprs.c 2011-06-22 13:14:26.273067633 +0200 -+++ linux-omap-2.6.28-nokia1/net/phonet/pep-gprs.c 2011-06-22 13:19:33.293063268 +0200 -@@ -40,23 +40,17 @@ struct gprs_dev { - void (*old_data_ready)(struct sock *, int); - void (*old_write_space)(struct sock *); - -- struct net_device *net; -- struct net_device_stats stats; -- -- struct sk_buff_head tx_queue; -- struct work_struct tx_work; -- spinlock_t tx_lock; -- unsigned tx_max; -+ struct net_device *dev; - }; - --static int gprs_type_trans(struct sk_buff *skb) -+static __be16 gprs_type_trans(struct sk_buff *skb) - { - const u8 *pvfc; - u8 buf; - - pvfc = skb_header_pointer(skb, 0, 1, &buf); - if (!pvfc) -- return 0; -+ return htons(0); - /* Look at IP version field */ - switch (*pvfc >> 4) { - case 4: -@@ -64,7 +58,15 @@ static int gprs_type_trans(struct sk_buf - case 6: - return htons(ETH_P_IPV6); - } -- return 0; -+ return htons(0); ++ cfg->mode = ssi_inl(base, SSI_SSR_MODE_REG(port)); ++ cfg->frame_size = ssi_inl(base, SSI_SSR_FRAMESIZE_REG(port)); ++ cfg->channels = ssi_inl(base, SSI_SSR_CHANNELS_REG(port)); ++ cfg->timeout = ssi_inl(base, SSI_SSR_TIMEOUT_REG(port)); +} + -+static void gprs_writeable(struct gprs_dev *gp) ++int ssi_set_tx(struct ssi_port *sport, struct sst_ctx *cfg) +{ -+ struct net_device *dev = gp->dev; ++ struct ssi_dev *ssi_ctrl = sport->ssi_controller; ++ void __iomem *base = ssi_ctrl->base; ++ int port = sport->port_number; + -+ if (pep_writeable(gp->sk)) -+ netif_wake_queue(dev); - } - - /* -@@ -73,18 +75,21 @@ static int gprs_type_trans(struct sk_buf - - static void gprs_state_change(struct sock *sk) - { -- struct gprs_dev *dev = sk->sk_user_data; -+ struct gprs_dev *gp = sk->sk_user_data; - - if (sk->sk_state == TCP_CLOSE_WAIT) { -- netif_stop_queue(dev->net); -- netif_carrier_off(dev->net); -+ struct net_device *dev = gp->dev; ++ if ((cfg->mode != SSI_MODE_STREAM) && ++ (cfg->mode != SSI_MODE_FRAME) && ++ (cfg->mode != NOT_SET)) ++ return -EINVAL; + -+ netif_stop_queue(dev); -+ netif_carrier_off(dev); - } - } - --static int gprs_recv(struct gprs_dev *dev, struct sk_buff *skb) -+static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb) - { -+ struct net_device *dev = gp->dev; - int err = 0; -- u16 protocol = gprs_type_trans(skb); -+ __be16 protocol = gprs_type_trans(skb); - - if (!protocol) { - err = -EINVAL; -@@ -99,7 +104,7 @@ static int gprs_recv(struct gprs_dev *de - * so wrap the IP packet as a single fragment of an head-less - * socket buffer. The network stack will pull what it needs, - * but at least, the whole IP payload is not memcpy'd. */ -- rskb = netdev_alloc_skb(dev->net, 0); -+ rskb = netdev_alloc_skb(dev, 0); - if (!rskb) { - err = -ENOBUFS; - goto drop; -@@ -123,9 +128,9 @@ static int gprs_recv(struct gprs_dev *de - - skb->protocol = protocol; - skb_reset_mac_header(skb); -- skb->dev = dev->net; -+ skb->dev = dev; - -- if (likely(dev->net->flags & IFF_UP)) { -+ if (likely(dev->flags & IFF_UP)) { - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - netif_rx(skb); -@@ -143,26 +148,21 @@ drop: - - static void gprs_data_ready(struct sock *sk, int len) - { -- struct gprs_dev *dev = sk->sk_user_data; -+ struct gprs_dev *gp = sk->sk_user_data; - struct sk_buff *skb; - - while ((skb = pep_read(sk)) != NULL) { - skb_orphan(skb); -- gprs_recv(dev, skb); -+ gprs_recv(gp, skb); - } - } - - static void gprs_write_space(struct sock *sk) - { -- struct gprs_dev *dev = sk->sk_user_data; -- struct net_device *net = dev->net; -- unsigned credits = pep_writeable(sk); -- -- spin_lock_bh(&dev->tx_lock); -- dev->tx_max = credits; -- if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net)) -- netif_wake_queue(net); -- spin_unlock_bh(&dev->tx_lock); -+ struct gprs_dev *gp = sk->sk_user_data; ++ if ((cfg->frame_size > SSI_MAX_FRAME_SIZE) && ++ (cfg->frame_size != NOT_SET)) ++ return -EINVAL; + -+ if (netif_running(gp->dev)) -+ gprs_writeable(gp); - } - - /* -@@ -173,22 +173,21 @@ static int gprs_open(struct net_device * - { - struct gprs_dev *gp = netdev_priv(dev); - -- gprs_write_space(gp->sk); -+ gprs_writeable(gp); - return 0; - } - - static int gprs_close(struct net_device *dev) - { -- struct gprs_dev *gp = netdev_priv(dev); -- - netif_stop_queue(dev); -- flush_work(&gp->tx_work); - return 0; - } - --static int gprs_xmit(struct sk_buff *skb, struct net_device *net) -+static int gprs_xmit(struct sk_buff *skb, struct net_device *dev) - { -- struct gprs_dev *dev = netdev_priv(net); -+ struct gprs_dev *gp = netdev_priv(dev); -+ struct sock *sk = gp->sk; -+ int len, err; - - switch (skb->protocol) { - case htons(ETH_P_IP): -@@ -199,84 +198,50 @@ static int gprs_xmit(struct sk_buff *skb - return 0; - } - -- spin_lock(&dev->tx_lock); -- if (likely(skb_queue_len(&dev->tx_queue) < dev->tx_max)) { -- skb_queue_tail(&dev->tx_queue, skb); -- skb = NULL; -- } -- if (skb_queue_len(&dev->tx_queue) >= dev->tx_max) -- netif_stop_queue(net); -- spin_unlock(&dev->tx_lock); -- -- schedule_work(&dev->tx_work); -- if (unlikely(skb)) -- dev_kfree_skb(skb); -- return 0; --} -- --static void gprs_tx(struct work_struct *work) --{ -- struct gprs_dev *dev = container_of(work, struct gprs_dev, tx_work); -- struct sock *sk = dev->sk; -- struct sk_buff *skb; -- -- while ((skb = skb_dequeue(&dev->tx_queue)) != NULL) { -- int err; -- -- dev->stats.tx_bytes += skb->len; -+ skb_orphan(skb); -+ skb_set_owner_w(skb, sk); -+ len = skb->len; -+ err = pep_write(sk, skb); -+ if (err) { -+ LIMIT_NETDEBUG(KERN_WARNING"%s: TX error (%d)\n", -+ dev->name, err); -+ dev->stats.tx_aborted_errors++; -+ dev->stats.tx_errors++; -+ } else { - dev->stats.tx_packets++; -- -- skb_orphan(skb); -- skb_set_owner_w(skb, sk); -- -- lock_sock(sk); -- err = pep_write(sk, skb); -- if (err) { -- LIMIT_NETDEBUG(KERN_WARNING"%s: TX error (%d)\n", -- dev->net->name, err); -- dev->stats.tx_aborted_errors++; -- dev->stats.tx_errors++; -- } -- release_sock(sk); -+ dev->stats.tx_bytes += len; - } - -- lock_sock(sk); -- gprs_write_space(sk); -- release_sock(sk); -+ netif_stop_queue(dev); -+ if (pep_writeable(sk)) -+ netif_wake_queue(dev); -+ return 0; - } - --static int gprs_set_mtu(struct net_device *net, int new_mtu) -+static int gprs_set_mtu(struct net_device *dev, int new_mtu) - { - if ((new_mtu < 576) || (new_mtu > (PHONET_MAX_MTU - 11))) - return -EINVAL; - -- net->mtu = new_mtu; -+ dev->mtu = new_mtu; - return 0; - } - --static struct net_device_stats *gprs_get_stats(struct net_device *net) -+static void gprs_setup(struct net_device *dev) - { -- struct gprs_dev *dev = netdev_priv(net); -- -- return &dev->stats; --} -- --static void gprs_setup(struct net_device *net) --{ -- net->features = NETIF_F_FRAGLIST; -- net->type = ARPHRD_NONE; -- net->flags = IFF_POINTOPOINT | IFF_NOARP; -- net->mtu = GPRS_DEFAULT_MTU; -- net->hard_header_len = 0; -- net->addr_len = 0; -- net->tx_queue_len = 10; -- -- net->destructor = free_netdev; -- net->open = gprs_open; -- net->stop = gprs_close; -- net->hard_start_xmit = gprs_xmit; /* mandatory */ -- net->change_mtu = gprs_set_mtu; -- net->get_stats = gprs_get_stats; -+ dev->features = NETIF_F_FRAGLIST; -+ dev->type = ARPHRD_PHONET_PIPE; -+ dev->flags = IFF_POINTOPOINT | IFF_NOARP; -+ dev->mtu = GPRS_DEFAULT_MTU; -+ dev->hard_header_len = 0; -+ dev->addr_len = 0; -+ dev->tx_queue_len = 10; ++ if ((cfg->channels == 0) || ++ ((cfg->channels > SSI_CHANNELS_DEFAULT) && ++ (cfg->channels != NOT_SET))) ++ return -EINVAL; ++ ++ if ((cfg->divisor > SSI_MAX_TX_DIVISOR) && (cfg->divisor != NOT_SET)) ++ return -EINVAL; ++ ++ if ((cfg->arb_mode != SSI_ARBMODE_ROUNDROBIN) && ++ (cfg->arb_mode != SSI_ARBMODE_PRIORITY) && ++ (cfg->mode != NOT_SET)) ++ return -EINVAL; ++ ++ if (cfg->mode != NOT_SET) ++ ssi_outl(cfg->channels, base, SSI_SST_CHANNELS_REG(port)); ++ ++ if (cfg->frame_size != NOT_SET) ++ ssi_outl(cfg->frame_size, base, SSI_SST_FRAMESIZE_REG(port)); ++ ++ if (cfg->channels != NOT_SET) { ++ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels) ++ return -EINVAL; ++ else ++ ssi_outl(cfg->mode, base, SSI_SST_MODE_REG(port)); ++ } ++ ++ if (cfg->divisor != NOT_SET) ++ ssi_outl(cfg->divisor, base, SSI_SST_DIVISOR_REG(port)); + -+ dev->destructor = free_netdev; -+ dev->open = gprs_open; -+ dev->stop = gprs_close; -+ dev->hard_start_xmit = gprs_xmit; /* mandatory */ -+ dev->change_mtu = gprs_set_mtu; - } - - /* -@@ -290,28 +255,25 @@ static void gprs_setup(struct net_device - int gprs_attach(struct sock *sk) - { - static const char ifname[] = "gprs%d"; -- struct gprs_dev *dev; -- struct net_device *net; -+ struct gprs_dev *gp; -+ struct net_device *dev; - int err; - - if (unlikely(sk->sk_type == SOCK_STREAM)) - return -EINVAL; /* need packet boundaries */ - - /* Create net device */ -- net = alloc_netdev(sizeof(*dev), ifname, gprs_setup); -- if (!net) -+ dev = alloc_netdev(sizeof(*gp), ifname, gprs_setup); -+ if (!dev) - return -ENOMEM; -- dev = netdev_priv(net); -- dev->net = net; -- dev->tx_max = 0; -- spin_lock_init(&dev->tx_lock); -- skb_queue_head_init(&dev->tx_queue); -- INIT_WORK(&dev->tx_work, gprs_tx); -+ gp = netdev_priv(dev); -+ gp->sk = sk; -+ gp->dev = dev; - -- netif_stop_queue(net); -- err = register_netdev(net); -+ netif_stop_queue(dev); -+ err = register_netdev(dev); - if (err) { -- free_netdev(net); -+ free_netdev(dev); - return err; - } - -@@ -325,40 +287,38 @@ int gprs_attach(struct sock *sk) - err = -EINVAL; - goto out_rel; - } -- sk->sk_user_data = dev; -- dev->old_state_change = sk->sk_state_change; -- dev->old_data_ready = sk->sk_data_ready; -- dev->old_write_space = sk->sk_write_space; -+ sk->sk_user_data = gp; -+ gp->old_state_change = sk->sk_state_change; -+ gp->old_data_ready = sk->sk_data_ready; -+ gp->old_write_space = sk->sk_write_space; - sk->sk_state_change = gprs_state_change; - sk->sk_data_ready = gprs_data_ready; - sk->sk_write_space = gprs_write_space; - release_sock(sk); -- - sock_hold(sk); -- dev->sk = sk; - -- printk(KERN_DEBUG"%s: attached\n", net->name); -- return net->ifindex; -+ printk(KERN_DEBUG"%s: attached\n", dev->name); -+ return dev->ifindex; - - out_rel: - release_sock(sk); -- unregister_netdev(net); -+ unregister_netdev(dev); - return err; - } - - void gprs_detach(struct sock *sk) - { -- struct gprs_dev *dev = sk->sk_user_data; -- struct net_device *net = dev->net; -+ struct gprs_dev *gp = sk->sk_user_data; -+ struct net_device *dev = gp->dev; - - lock_sock(sk); - sk->sk_user_data = NULL; -- sk->sk_state_change = dev->old_state_change; -- sk->sk_data_ready = dev->old_data_ready; -- sk->sk_write_space = dev->old_write_space; -+ sk->sk_state_change = gp->old_state_change; -+ sk->sk_data_ready = gp->old_data_ready; -+ sk->sk_write_space = gp->old_write_space; - release_sock(sk); - -- printk(KERN_DEBUG"%s: detached\n", net->name); -- unregister_netdev(net); -+ printk(KERN_DEBUG"%s: detached\n", dev->name); -+ unregister_netdev(dev); - sock_put(sk); - } -diff -Nurp linux-omap-2.6.28-omap1/net/phonet/pn_dev.c linux-omap-2.6.28-nokia1/net/phonet/pn_dev.c ---- linux-omap-2.6.28-omap1/net/phonet/pn_dev.c 2011-06-22 13:14:26.273067633 +0200 -+++ linux-omap-2.6.28-nokia1/net/phonet/pn_dev.c 2011-06-22 13:19:33.293063268 +0200 -@@ -28,32 +28,41 @@ - #include - #include - #include -+#include - #include - --/* when accessing, remember to lock with spin_lock(&pndevs.lock); */ --struct phonet_device_list pndevs = { -- .list = LIST_HEAD_INIT(pndevs.list), -- .lock = __SPIN_LOCK_UNLOCKED(pndevs.lock), -+struct phonet_net { -+ struct phonet_device_list pndevs; - }; - -+int phonet_net_id; ++ if (cfg->arb_mode != NOT_SET) ++ ssi_outl(cfg->arb_mode, base, SSI_SST_ARBMODE_REG(port)); ++ ++ return 0; ++} + -+struct phonet_device_list *phonet_device_list(struct net *net) ++void ssi_get_tx(struct ssi_port *sport, struct sst_ctx *cfg) +{ -+ struct phonet_net *pnn = net_generic(net, phonet_net_id); -+ return &pnn->pndevs; ++ struct ssi_dev *ssi_ctrl = sport->ssi_controller; ++ void __iomem *base = ssi_ctrl->base; ++ int port = sport->port_number; ++ ++ cfg->mode = ssi_inl(base, SSI_SST_MODE_REG(port)); ++ cfg->frame_size = ssi_inl(base, SSI_SST_FRAMESIZE_REG(port)); ++ cfg->channels = ssi_inl(base, SSI_SST_CHANNELS_REG(port)); ++ cfg->divisor = ssi_inl(base, SSI_SST_DIVISOR_REG(port)); ++ cfg->arb_mode = ssi_inl(base, SSI_SST_ARBMODE_REG(port)); +} + - /* Allocate new Phonet device. */ - static struct phonet_device *__phonet_device_alloc(struct net_device *dev) - { -+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); - struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC); - if (pnd == NULL) - return NULL; - pnd->netdev = dev; - bitmap_zero(pnd->addrs, 64); - -- list_add(&pnd->list, &pndevs.list); -+ list_add(&pnd->list, &pndevs->list); - return pnd; - } - - static struct phonet_device *__phonet_get(struct net_device *dev) - { -+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); - struct phonet_device *pnd; - -- list_for_each_entry(pnd, &pndevs.list, list) { -+ list_for_each_entry(pnd, &pndevs->list, list) { - if (pnd->netdev == dev) - return pnd; - } -@@ -68,32 +77,33 @@ static void __phonet_device_free(struct - - struct net_device *phonet_device_get(struct net *net) - { -+ struct phonet_device_list *pndevs = phonet_device_list(net); - struct phonet_device *pnd; - struct net_device *dev; - -- spin_lock_bh(&pndevs.lock); -- list_for_each_entry(pnd, &pndevs.list, list) { -+ spin_lock_bh(&pndevs->lock); -+ list_for_each_entry(pnd, &pndevs->list, list) { - dev = pnd->netdev; - BUG_ON(!dev); - -- if (dev_net(dev) == net && -- (dev->reg_state == NETREG_REGISTERED) && -+ if ((dev->reg_state == NETREG_REGISTERED) && - ((pnd->netdev->flags & IFF_UP)) == IFF_UP) - break; - dev = NULL; - } - if (dev) - dev_hold(dev); -- spin_unlock_bh(&pndevs.lock); -+ spin_unlock_bh(&pndevs->lock); - return dev; - } - - int phonet_address_add(struct net_device *dev, u8 addr) - { -+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); - struct phonet_device *pnd; - int err = 0; - -- spin_lock_bh(&pndevs.lock); -+ spin_lock_bh(&pndevs->lock); - /* Find or create Phonet-specific device data */ - pnd = __phonet_get(dev); - if (pnd == NULL) -@@ -102,31 +112,33 @@ int phonet_address_add(struct net_device - err = -ENOMEM; - else if (test_and_set_bit(addr >> 2, pnd->addrs)) - err = -EEXIST; -- spin_unlock_bh(&pndevs.lock); -+ spin_unlock_bh(&pndevs->lock); - return err; - } - - int phonet_address_del(struct net_device *dev, u8 addr) - { -+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); - struct phonet_device *pnd; - int err = 0; - -- spin_lock_bh(&pndevs.lock); -+ spin_lock_bh(&pndevs->lock); - pnd = __phonet_get(dev); - if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) - err = -EADDRNOTAVAIL; - else if (bitmap_empty(pnd->addrs, 64)) - __phonet_device_free(pnd); -- spin_unlock_bh(&pndevs.lock); -+ spin_unlock_bh(&pndevs->lock); - return err; - } - - /* Gets a source address toward a destination, through a interface. */ - u8 phonet_address_get(struct net_device *dev, u8 addr) - { -+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); - struct phonet_device *pnd; - -- spin_lock_bh(&pndevs.lock); -+ spin_lock_bh(&pndevs->lock); - pnd = __phonet_get(dev); - if (pnd) { - BUG_ON(bitmap_empty(pnd->addrs, 64)); -@@ -136,28 +148,31 @@ u8 phonet_address_get(struct net_device - addr = find_first_bit(pnd->addrs, 64) << 2; - } else - addr = PN_NO_ADDR; -- spin_unlock_bh(&pndevs.lock); -+ spin_unlock_bh(&pndevs->lock); - return addr; - } - --int phonet_address_lookup(u8 addr) -+int phonet_address_lookup(struct net *net, u8 addr) - { -+ struct phonet_device_list *pndevs = phonet_device_list(net); - struct phonet_device *pnd; -+ int err = -EADDRNOTAVAIL; - -- spin_lock_bh(&pndevs.lock); -- list_for_each_entry(pnd, &pndevs.list, list) { -+ spin_lock_bh(&pndevs->lock); -+ list_for_each_entry(pnd, &pndevs->list, list) { - /* Don't allow unregistering devices! */ - if ((pnd->netdev->reg_state != NETREG_REGISTERED) || - ((pnd->netdev->flags & IFF_UP)) != IFF_UP) - continue; - - if (test_bit(addr >> 2, pnd->addrs)) { -- spin_unlock_bh(&pndevs.lock); -- return 0; -+ err = 0; -+ goto found; - } - } -- spin_unlock_bh(&pndevs.lock); -- return -EADDRNOTAVAIL; -+found: -+ spin_unlock_bh(&pndevs->lock); -+ return err; - } - - /* notify Phonet of device events */ -@@ -167,14 +182,16 @@ static int phonet_device_notify(struct n - struct net_device *dev = arg; - - if (what == NETDEV_UNREGISTER) { -+ struct phonet_device_list *pndevs; - struct phonet_device *pnd; - - /* Destroy phonet-specific device data */ -- spin_lock_bh(&pndevs.lock); -+ pndevs = phonet_device_list(dev_net(dev)); -+ spin_lock_bh(&pndevs->lock); - pnd = __phonet_get(dev); - if (pnd) - __phonet_device_free(pnd); -- spin_unlock_bh(&pndevs.lock); -+ spin_unlock_bh(&pndevs->lock); - } - return 0; - -@@ -185,24 +202,52 @@ static struct notifier_block phonet_devi - .priority = 0, - }; + /** + * ssi_open - open a ssi device channel. + * @dev - Reference to the ssi device channel to be openned. +@@ -225,6 +346,38 @@ void ssi_read_cancel(struct ssi_device * + EXPORT_SYMBOL(ssi_read_cancel); --/* Initialize Phonet devices list */ --void phonet_device_init(void) -+/* Per-namespace Phonet devices handling */ -+static int phonet_init_net(struct net *net) - { -- register_netdevice_notifier(&phonet_device_notifier); -+ struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL); -+ if (!pnn) -+ return -ENOMEM; + /** ++ * ssi_poll - SSI poll ++ * @dev - ssi device channel reference to apply the I/O control ++ * (or port associated to it) ++ * ++ * Return 0 on sucess, a negative value on failure. ++ * ++ */ ++int ssi_poll(struct ssi_device *dev) ++{ ++ struct ssi_channel *ch; ++ int err; + -+ INIT_LIST_HEAD(&pnn->pndevs.list); -+ spin_lock_init(&pnn->pndevs.lock); -+ net_assign_generic(net, phonet_net_id, pnn); -+ return 0; - } - --void phonet_device_exit(void) -+static void phonet_exit_net(struct net *net) - { -+ struct phonet_net *pnn = net_generic(net, phonet_net_id); - struct phonet_device *pnd, *n; - -- rtnl_unregister_all(PF_PHONET); -- rtnl_lock(); -- spin_lock_bh(&pndevs.lock); -- -- list_for_each_entry_safe(pnd, n, &pndevs.list, list) -+ list_for_each_entry_safe(pnd, n, &pnn->pndevs.list, list) - __phonet_device_free(pnd); - -- spin_unlock_bh(&pndevs.lock); -- rtnl_unlock(); -+ kfree(pnn); -+} ++ if (unlikely(!dev || !dev->ch)) ++ return -EINVAL; + -+static struct pernet_operations phonet_net_ops = { -+ .init = phonet_init_net, -+ .exit = phonet_exit_net, -+}; ++ if (unlikely(!(dev->ch->flags & SSI_CH_OPEN))) { ++ dev_err(&dev->device, "SSI device NOT open\n"); ++ return -EINVAL; ++ } + -+/* Initialize Phonet devices list */ -+int __init phonet_device_init(void) -+{ -+ int err = register_pernet_gen_device(&phonet_net_id, &phonet_net_ops); -+ if (err) -+ return err; ++ ch = dev->ch; ++ spin_lock_bh(&ch->ssi_port->ssi_controller->lock); ++ ch->flags |= SSI_CH_RX_POLL; ++ err = ssi_driver_read_interrupt(ch, NULL); ++ spin_unlock_bh(&ch->ssi_port->ssi_controller->lock); + -+ register_netdevice_notifier(&phonet_device_notifier); -+ err = phonet_netlink_register(); -+ if (err) -+ phonet_device_exit(); + return err; +} ++EXPORT_SYMBOL(ssi_poll); + -+void phonet_device_exit(void) -+{ -+ rtnl_unregister_all(PF_PHONET); - unregister_netdevice_notifier(&phonet_device_notifier); -+ unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); - } -diff -Nurp linux-omap-2.6.28-omap1/net/phonet/pn_netlink.c linux-omap-2.6.28-nokia1/net/phonet/pn_netlink.c ---- linux-omap-2.6.28-omap1/net/phonet/pn_netlink.c 2011-06-22 13:14:26.273067633 +0200 -+++ linux-omap-2.6.28-nokia1/net/phonet/pn_netlink.c 2011-06-22 13:19:33.293063268 +0200 -@@ -123,17 +123,16 @@ nla_put_failure: - - static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) - { -- struct net *net = sock_net(skb->sk); -+ struct phonet_device_list *pndevs; - struct phonet_device *pnd; - int dev_idx = 0, dev_start_idx = cb->args[0]; - int addr_idx = 0, addr_start_idx = cb->args[1]; - -- spin_lock_bh(&pndevs.lock); -- list_for_each_entry(pnd, &pndevs.list, list) { -+ pndevs = phonet_device_list(sock_net(skb->sk)); -+ spin_lock_bh(&pndevs->lock); -+ list_for_each_entry(pnd, &pndevs->list, list) { - u8 addr; - -- if (!net_eq(dev_net(pnd->netdev), net)) -- continue; - if (dev_idx > dev_start_idx) - addr_start_idx = 0; - if (dev_idx++ < dev_start_idx) -@@ -153,16 +152,21 @@ static int getaddr_dumpit(struct sk_buff - } - - out: -- spin_unlock_bh(&pndevs.lock); -+ spin_unlock_bh(&pndevs->lock); - cb->args[0] = dev_idx; - cb->args[1] = addr_idx; - - return skb->len; - } - --void __init phonet_netlink_register(void) -+int __init phonet_netlink_register(void) - { -- rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); -- rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); -- rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); -+ int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); -+ if (err) -+ return err; + -+ /* Further __rtnl_register() cannot fail */ -+ __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); -+ __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); -+ return 0; - } -diff -Nurp linux-omap-2.6.28-omap1/net/phonet/socket.c linux-omap-2.6.28-nokia1/net/phonet/socket.c ---- linux-omap-2.6.28-omap1/net/phonet/socket.c 2011-06-22 13:14:26.273067633 +0200 -+++ linux-omap-2.6.28-nokia1/net/phonet/socket.c 2011-06-22 13:19:33.293063268 +0200 -@@ -57,7 +57,7 @@ static struct { - * Find address based on socket address, match only certain fields. - * Also grab sock if it was found. Remember to sock_put it later. - */ --struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *spn) -+struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) - { - struct hlist_node *node; - struct sock *sknode; -@@ -71,6 +71,8 @@ struct sock *pn_find_sock_by_sa(const st - struct pn_sock *pn = pn_sk(sknode); - BUG_ON(!pn->sobject); /* unbound socket */ - -+ if (!net_eq(sock_net(sknode), net)) -+ continue; - if (pn_port(obj)) { - /* Look up socket by port */ - if (pn_port(pn->sobject) != pn_port(obj)) -@@ -130,7 +132,7 @@ static int pn_socket_bind(struct socket - - handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr); - saddr = pn_addr(handle); -- if (saddr && phonet_address_lookup(saddr)) -+ if (saddr && phonet_address_lookup(sock_net(sk), saddr)) - return -EADDRNOTAVAIL; - - lock_sock(sk); -@@ -225,7 +227,7 @@ static unsigned int pn_socket_poll(struc - if (!mask && sk->sk_state == TCP_CLOSE_WAIT) - return POLLHUP; - -- if (sk->sk_state == TCP_ESTABLISHED && pn->tx_credits) -+ if (sk->sk_state == TCP_ESTABLISHED && atomic_read(&pn->tx_credits)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; - - return mask; -@@ -361,6 +363,7 @@ static DEFINE_MUTEX(port_mutex); - int pn_sock_get_port(struct sock *sk, unsigned short sport) - { - static int port_cur; -+ struct net *net = sock_net(sk); - struct pn_sock *pn = pn_sk(sk); - struct sockaddr_pn try_sa; - struct sock *tmpsk; -@@ -381,7 +384,7 @@ int pn_sock_get_port(struct sock *sk, un - port_cur = pmin; - - pn_sockaddr_set_port(&try_sa, port_cur); -- tmpsk = pn_find_sock_by_sa(&try_sa); -+ tmpsk = pn_find_sock_by_sa(net, &try_sa); - if (tmpsk == NULL) { - sport = port_cur; - goto found; -@@ -391,7 +394,7 @@ int pn_sock_get_port(struct sock *sk, un - } else { - /* try to find specific port */ - pn_sockaddr_set_port(&try_sa, sport); -- tmpsk = pn_find_sock_by_sa(&try_sa); -+ tmpsk = pn_find_sock_by_sa(net, &try_sa); - if (tmpsk == NULL) - /* No sock there! We can use that port... */ - goto found; -diff -Nurp linux-omap-2.6.28-omap1/net/phonet/sysctl.c linux-omap-2.6.28-nokia1/net/phonet/sysctl.c ---- linux-omap-2.6.28-omap1/net/phonet/sysctl.c 2011-06-22 13:14:26.273067633 +0200 -+++ linux-omap-2.6.28-nokia1/net/phonet/sysctl.c 2011-06-22 13:19:33.293063268 +0200 -@@ -89,13 +89,13 @@ static struct ctl_table phonet_table[] = - .data = &local_port_range, - .maxlen = sizeof(local_port_range), - .mode = 0644, -- .proc_handler = &proc_local_port_range, -+ .proc_handler = proc_local_port_range, - .strategy = NULL, - }, - { .ctl_name = 0 } - }; - --struct ctl_path phonet_ctl_path[] = { -+static struct ctl_path phonet_ctl_path[] = { - { .procname = "net", .ctl_name = CTL_NET, }, - { .procname = "phonet", .ctl_name = CTL_UNNUMBERED, }, - { }, -diff -Nurp linux-omap-2.6.28-omap1/net/socket.c linux-omap-2.6.28-nokia1/net/socket.c ---- linux-omap-2.6.28-omap1/net/socket.c 2011-06-22 13:14:26.343067632 +0200 -+++ linux-omap-2.6.28-nokia1/net/socket.c 2011-06-22 13:19:33.293063268 +0200 -@@ -695,7 +695,7 @@ static ssize_t sock_sendpage(struct file - if (more) - flags |= MSG_MORE; - -- return sock->ops->sendpage(sock, page, offset, size, flags); -+ return kernel_sendpage(sock, page, offset, size, flags); - } - - static ssize_t sock_splice_read(struct file *file, loff_t *ppos, -diff -Nurp linux-omap-2.6.28-omap1/net/wireless/nl80211.c linux-omap-2.6.28-nokia1/net/wireless/nl80211.c ---- linux-omap-2.6.28-omap1/net/wireless/nl80211.c 2011-06-22 13:14:26.413067631 +0200 -+++ linux-omap-2.6.28-nokia1/net/wireless/nl80211.c 2011-06-22 13:19:33.293063268 +0200 -@@ -619,7 +619,7 @@ static int nl80211_get_key(struct sk_buf - - if (IS_ERR(hdr)) { - err = PTR_ERR(hdr); -- goto out; -+ goto free_msg; - } - - cookie.msg = msg; -@@ -635,7 +635,7 @@ static int nl80211_get_key(struct sk_buf - rtnl_unlock(); - - if (err) -- goto out; -+ goto free_msg; - - if (cookie.error) - goto nla_put_failure; -@@ -646,6 +646,7 @@ static int nl80211_get_key(struct sk_buf - - nla_put_failure: - err = -ENOBUFS; -+ free_msg: - nlmsg_free(msg); - out: - cfg80211_put_dev(drv); -diff -Nurp linux-omap-2.6.28-omap1/net/wireless/reg.c linux-omap-2.6.28-nokia1/net/wireless/reg.c ---- linux-omap-2.6.28-omap1/net/wireless/reg.c 2011-06-22 13:14:26.423067631 +0200 -+++ linux-omap-2.6.28-nokia1/net/wireless/reg.c 2011-06-22 13:19:33.293063268 +0200 -@@ -98,7 +98,7 @@ static const struct ieee80211_regdomain - .alpha2 = "US", - .reg_rules = { - /* IEEE 802.11b/g, channels 1..11 */ -- REG_RULE(2412-10, 2462+10, 40, 6, 27, 0), -+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), - /* IEEE 802.11a, channel 36 */ - REG_RULE(5180-10, 5180+10, 40, 6, 23, 0), - /* IEEE 802.11a, channel 40 */ -@@ -156,7 +156,7 @@ static const struct ieee80211_regdomain - } - }; - --static const struct ieee80211_regdomain *static_regdom(char *alpha2) -+static const struct ieee80211_regdomain *static_regdom(const char *alpha2) - { - if (alpha2[0] == 'U' && alpha2[1] == 'S') - return &us_regdom; -@@ -277,29 +277,6 @@ static bool regdom_changed(const char *a - return true; - } ++/** + * ssi_ioctl - SSI I/O control + * @dev - ssi device channel reference to apply the I/O control + * (or port associated to it) +@@ -305,6 +458,33 @@ int ssi_ioctl(struct ssi_device *dev, un + } + *(unsigned int *)arg = ssi_cawake(dev->ch->ssi_port); + break; ++ case SSI_IOCTL_SET_RX: ++ if (!arg) { ++ err = -EINVAL; ++ goto out; ++ } ++ err = ssi_set_rx(dev->ch->ssi_port, (struct ssr_ctx *)arg); ++ break; ++ case SSI_IOCTL_GET_RX: ++ if (!arg) { ++ err = -EINVAL; ++ goto out; ++ } ++ ssi_get_rx(dev->ch->ssi_port, (struct ssr_ctx *)arg); ++ break; ++ case SSI_IOCTL_SET_TX: ++ if (!arg) { ++ err = -EINVAL; ++ goto out; ++ } ++ err = ssi_set_tx(dev->ch->ssi_port, (struct sst_ctx *)arg); ++ break; ++ case SSI_IOCTL_GET_TX: ++ if (!arg) { ++ err = -EINVAL; ++ goto out; ++ } ++ ssi_get_tx(dev->ch->ssi_port, (struct sst_ctx *)arg); + case SSI_IOCTL_TX_CH_FULL: + if (!arg) { + err = -EINVAL; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi/ssi_driver_int.c kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi/ssi_driver_int.c +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi/ssi_driver_int.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi/ssi_driver_int.c 2011-09-04 11:37:54.000000000 +0200 +@@ -25,9 +25,19 @@ --/* This lets us keep regulatory code which is updated on a regulatory -- * basis in userspace. */ --static int call_crda(const char *alpha2) --{ -- char country_env[9 + 2] = "COUNTRY="; -- char *envp[] = { -- country_env, -- NULL -- }; -- -- if (!is_world_regdom((char *) alpha2)) -- printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n", -- alpha2[0], alpha2[1]); -- else -- printk(KERN_INFO "cfg80211: Calling CRDA to update world " -- "regulatory domain\n"); -- -- country_env[8] = alpha2[0]; -- country_env[9] = alpha2[1]; -- -- return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, envp); --} -- - /* This has the logic which determines when a new request - * should be ignored. */ - static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by, -@@ -582,6 +559,7 @@ void wiphy_update_regulatory(struct wiph - int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by, - const char *alpha2, struct ieee80211_regdomain *rd) + void ssi_reset_ch_read(struct ssi_channel *ch) { -+ const struct ieee80211_regdomain *rdd; - struct regulatory_request *request; - char *rd_alpha2; - int r = 0; -@@ -613,11 +591,12 @@ int __regulatory_hint(struct wiphy *wiph - list_add_tail(&request->list, ®ulatory_requests); - if (rd) - break; -- r = call_crda(alpha2); --#ifndef CONFIG_WIRELESS_OLD_REGULATORY -- if (r) -- printk(KERN_ERR "cfg80211: Failed calling CRDA\n"); ++ struct ssi_port *p = ch->ssi_port; ++ struct ssi_dev *ssi_ctrl = p->ssi_controller; ++ unsigned int channel = ch->channel_number; ++ void __iomem *base = ssi_ctrl->base; ++ unsigned int port = p->port_number; ++ unsigned int irq = p->n_irq; + -+#ifdef CONFIG_WIRELESS_OLD_REGULATORY -+ rdd = static_regdom(alpha2); -+ r = set_regdom(rdd); - #endif + ch->read_data.addr = NULL; + ch->read_data.size = 0; + ch->read_data.lch = -1; + - break; - default: - r = -ENOTSUPP; -@@ -658,7 +637,7 @@ static void print_rd_rules(const struct - const struct ieee80211_freq_range *freq_range = NULL; - const struct ieee80211_power_rule *power_rule = NULL; - -- printk(KERN_INFO "\t(start_freq - end_freq @ bandwidth), " -+ printk(KERN_DEBUG "\t(start_freq - end_freq @ bandwidth), " - "(max_antenna_gain, max_eirp)\n"); - - for (i = 0; i < rd->n_reg_rules; i++) { -@@ -669,7 +648,7 @@ static void print_rd_rules(const struct - /* There may not be documentation for max antenna gain - * in certain regions */ - if (power_rule->max_antenna_gain) -- printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " -+ printk(KERN_DEBUG "\t(%d KHz - %d KHz @ %d KHz), " - "(%d mBi, %d mBm)\n", - freq_range->start_freq_khz, - freq_range->end_freq_khz, -@@ -677,7 +656,7 @@ static void print_rd_rules(const struct - power_rule->max_antenna_gain, - power_rule->max_eirp); - else -- printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " -+ printk(KERN_DEBUG "\t(%d KHz - %d KHz @ %d KHz), " - "(N/A, %d mBm)\n", - freq_range->start_freq_khz, - freq_range->end_freq_khz, -@@ -690,15 +669,15 @@ static void print_regdomain(const struct - { - - if (is_world_regdom(rd->alpha2)) -- printk(KERN_INFO "cfg80211: World regulatory " -+ printk(KERN_DEBUG "cfg80211: World regulatory " - "domain updated:\n"); - else { - if (is_unknown_alpha2(rd->alpha2)) -- printk(KERN_INFO "cfg80211: Regulatory domain " -+ printk(KERN_DEBUG "cfg80211: Regulatory domain " - "changed to driver built-in settings " - "(unknown country)\n"); - else -- printk(KERN_INFO "cfg80211: Regulatory domain " -+ printk(KERN_DEBUG "cfg80211: Regulatory domain " - "changed to country: %c%c\n", - rd->alpha2[0], rd->alpha2[1]); - } -@@ -707,7 +686,7 @@ static void print_regdomain(const struct - - void print_regdomain_info(const struct ieee80211_regdomain *rd) - { -- printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n", -+ printk(KERN_DEBUG "cfg80211: Regulatory domain: %c%c\n", - rd->alpha2[0], rd->alpha2[1]); - print_rd_rules(rd); ++ ssi_outl(SSI_SSR_DATAAVAILABLE(channel), base, ++ SSI_SYS_MPU_STATUS_REG(port, irq)); } -@@ -752,12 +731,6 @@ static int __set_regdom(const struct iee - case REGDOM_SET_BY_CORE: - case REGDOM_SET_BY_DRIVER: - case REGDOM_SET_BY_USER: -- if (!is_valid_rd(rd)) { -- printk(KERN_ERR "cfg80211: Invalid " -- "regulatory domain detected:\n"); -- print_regdomain_info(rd); -- return -EINVAL; -- } - break; - case REGDOM_SET_BY_COUNTRY_IE: /* Not yet */ - WARN_ON(1); -@@ -832,7 +805,7 @@ int regulatory_init(void) - #ifdef CONFIG_WIRELESS_OLD_REGULATORY - cfg80211_regdomain = static_regdom(ieee80211_regdom); - -- printk(KERN_INFO "cfg80211: Using static regulatory domain info\n"); -+ printk(KERN_DEBUG "cfg80211: Using static regulatory domain info\n"); - print_regdomain_info(cfg80211_regdomain); - /* The old code still requests for a new regdomain and if - * you have CRDA you get it updated, otherwise you get -diff -Nurp linux-omap-2.6.28-omap1/scripts/package/Makefile linux-omap-2.6.28-nokia1/scripts/package/Makefile ---- linux-omap-2.6.28-omap1/scripts/package/Makefile 2011-06-22 13:14:26.473067630 +0200 -+++ linux-omap-2.6.28-nokia1/scripts/package/Makefile 2011-06-22 13:19:33.293063268 +0200 -@@ -74,7 +74,7 @@ deb-pkg: FORCE - $(MAKE) KBUILD_SRC= - $(CONFIG_SHELL) $(srctree)/scripts/package/builddeb - --clean-dirs += $(objtree)/debian/ -+#clean-dirs += $(objtree)/debian/ + void ssi_reset_ch_write(struct ssi_channel *ch) +@@ -145,6 +155,8 @@ static void do_channel_rx(struct ssi_cha + unsigned int n_ch; + unsigned int n_p; + unsigned int irq; ++ int rx_poll = 0; ++ int data_read = 0; - # tarball targets -diff -Nurp linux-omap-2.6.28-omap1/security/device_cgroup.c linux-omap-2.6.28-nokia1/security/device_cgroup.c ---- linux-omap-2.6.28-omap1/security/device_cgroup.c 2011-06-22 13:14:26.533067629 +0200 -+++ linux-omap-2.6.28-nokia1/security/device_cgroup.c 2011-06-22 13:19:33.293063268 +0200 -@@ -513,6 +513,9 @@ int devcgroup_inode_mknod(int mode, dev_ - struct dev_cgroup *dev_cgroup; - struct dev_whitelist_item *wh; - -+ if (!S_ISBLK(mode) && !S_ISCHR(mode)) -+ return 0; -+ - rcu_read_lock(); + n_ch = ch->channel_number; + n_p = ch->ssi_port->port_number; +@@ -152,7 +164,14 @@ static void do_channel_rx(struct ssi_cha - dev_cgroup = task_devcgroup(current); -diff -Nurp linux-omap-2.6.28-omap1/sound/core/jack.c linux-omap-2.6.28-nokia1/sound/core/jack.c ---- linux-omap-2.6.28-omap1/sound/core/jack.c 2011-06-22 13:14:26.633067628 +0200 -+++ linux-omap-2.6.28-nokia1/sound/core/jack.c 2011-06-22 13:19:33.293063268 +0200 -@@ -23,6 +23,14 @@ - #include - #include + spin_lock(&ssi_ctrl->lock); -+static int jack_types[] = { -+ SW_HEADPHONE_INSERT, -+ SW_MICROPHONE_INSERT, -+ SW_LINEOUT_INSERT, -+ SW_JACK_PHYSICAL_INSERT, -+ SW_VIDEOOUT_INSERT, -+}; +- *(ch->read_data.addr) = ssi_inl(base, SSI_SSR_BUFFER_CH_REG(n_p, n_ch)); ++ if (ch->flags & SSI_CH_RX_POLL) ++ rx_poll = 1; + - static int snd_jack_dev_free(struct snd_device *device) - { - struct snd_jack *jack = device->device_data; -@@ -78,6 +86,7 @@ int snd_jack_new(struct snd_card *card, - { - struct snd_jack *jack; - int err; -+ int i; - static struct snd_device_ops ops = { - .dev_free = snd_jack_dev_free, - .dev_register = snd_jack_dev_register, -@@ -99,12 +108,10 @@ int snd_jack_new(struct snd_card *card, ++ if (ch->read_data.addr) { ++ data_read = 1; ++ *(ch->read_data.addr) = ssi_inl(base, ++ SSI_SSR_BUFFER_CH_REG(n_p, n_ch)); ++ } - jack->type = type; + ssi_outl_and(~SSI_SSR_DATAAVAILABLE(n_ch), base, + SSI_SYS_MPU_ENABLE_REG(n_p, irq)); +@@ -160,7 +179,13 @@ static void do_channel_rx(struct ssi_cha -- if (type & SND_JACK_HEADPHONE) -- input_set_capability(jack->input_dev, EV_SW, -- SW_HEADPHONE_INSERT); -- if (type & SND_JACK_MICROPHONE) -- input_set_capability(jack->input_dev, EV_SW, -- SW_MICROPHONE_INSERT); -+ for (i = 0; i < ARRAY_SIZE(jack_types); i++) -+ if (type & (1 << i)) -+ input_set_capability(jack->input_dev, EV_SW, -+ jack_types[i]); + spin_unlock(&ssi_ctrl->lock); - err = snd_device_new(card, SNDRV_DEV_JACK, jack, &ops); - if (err < 0) -@@ -147,15 +154,17 @@ EXPORT_SYMBOL(snd_jack_set_parent); - */ - void snd_jack_report(struct snd_jack *jack, int status) - { -+ int i; +- (*ch->read_done)(ch->dev); ++ if (rx_poll) ++ ssi_port_event_handler(ch->ssi_port, ++ SSI_EVENT_SSR_DATAAVAILABLE, ++ (void *)n_ch); + - if (!jack) - return; - -- if (jack->type & SND_JACK_HEADPHONE) -- input_report_switch(jack->input_dev, SW_HEADPHONE_INSERT, -- status & SND_JACK_HEADPHONE); -- if (jack->type & SND_JACK_MICROPHONE) -- input_report_switch(jack->input_dev, SW_MICROPHONE_INSERT, -- status & SND_JACK_MICROPHONE); -+ for (i = 0; i < ARRAY_SIZE(jack_types); i++) { -+ int testbit = 1 << i; -+ if (jack->type & testbit) -+ input_report_switch(jack->input_dev, jack_types[i], -+ status & testbit); -+ } - - input_sync(jack->input_dev); ++ if (data_read) ++ (*ch->read_done)(ch->dev); } -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/codecs/tlv320aic3x.c linux-omap-2.6.28-nokia1/sound/soc/codecs/tlv320aic3x.c ---- linux-omap-2.6.28-omap1/sound/soc/codecs/tlv320aic3x.c 2011-06-22 13:14:27.173067620 +0200 -+++ linux-omap-2.6.28-nokia1/sound/soc/codecs/tlv320aic3x.c 2011-06-22 13:19:33.293063268 +0200 -@@ -45,15 +45,20 @@ - #include - #include - #include -+#include - - #include "tlv320aic3x.h" - - #define AIC3X_VERSION "0.2" -+static int hp_dac_lim = 9; -+module_param(hp_dac_lim, int, 0); + static void do_ssi_tasklet(unsigned long ssi_port) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/Kconfig kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/Kconfig +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/Kconfig 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,18 @@ ++# ++# OMAP SSI kernel configuration ++# + - /* codec private data */ - struct aic3x_priv { - unsigned int sysclk; - int master; -+ int prepare_reset; - }; - - /* -@@ -71,7 +76,7 @@ static const u8 aic3x_reg[AIC3X_CACHEREG - 0x78, 0x78, 0x78, 0x78, /* 20 */ - 0x78, 0x00, 0x00, 0xfe, /* 24 */ - 0x00, 0x00, 0xfe, 0x00, /* 28 */ -- 0x18, 0x18, 0x00, 0x00, /* 32 */ -+ 0x00, 0x00, 0x00, 0x00, /* 32 */ - 0x00, 0x00, 0x00, 0x00, /* 36 */ - 0x00, 0x00, 0x00, 0x80, /* 40 */ - 0x80, 0x00, 0x00, 0x00, /* 44 */ -@@ -144,6 +149,10 @@ static int aic3x_read(struct snd_soc_cod - u8 *value) - { - *value = reg & 0xff; ++config SSI_CHAR ++ tristate "SSI character driver" ++ depends on OMAP_SSI ++ ---help--- ++ If you say Y here, you will enable the CMT character driver. ++ This driver provides a simple character device interface for ++ serial communication with the cellular modem over the SSI bus. + -+ /* No read access is recommended if the chip is reset after use */ -+ printk(KERN_ERR "%s(): Values are may be incorrect!\n", __func__); ++config SSI_CHAR_DEBUG ++ bool "Debug CMT character driver" ++ depends on SSI_CHAR && DEBUG_KERNEL ++ default n ++ ---help--- ++ Enable the debug information in the CMT character driver. +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/Makefile kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/Makefile +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/Makefile 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,8 @@ ++# ++# Makefile for SSI CHAR driver ++# ++#EXTRA_CFLAGS := -I$(src)/../../../include + - if (codec->hw_read(codec->control_data, value, 1) != 1) - return -EIO; - -@@ -151,6 +160,25 @@ static int aic3x_read(struct snd_soc_cod - return 0; - } - ++obj-$(CONFIG_SSI_CHAR) += ssi_char.o ++ ++ssi_char-objs := ssi-char.o ssi-if.o +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-char.c kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-char.c +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-char.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-char.c 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,493 @@ +/* -+ * Reset for getting low power consumption after bypass paths ++ * ssi-char.c ++ * ++ * SSI character device driver, implements the character device ++ * interface. ++ * ++ * Copyright (C) 2009 Nokia Corporation. All rights reserved. ++ * ++ * Contact: Andras Domokos ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA + */ -+static void aic3x_reset(struct snd_soc_codec *codec) -+{ -+ u8 *cache = codec->reg_cache; -+ u8 data[2]; -+ int i; + -+ aic3x_write(codec, AIC3X_RESET, SOFT_RESET); ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include + -+ /* We do not rewrite page select nor reset again */ -+ for (i = AIC3X_SAMPLE_RATE_SEL_REG; i < ARRAY_SIZE(aic3x_reg); i++) { -+ data[0] = i; -+ data[1] = cache[i]; -+ codec->hw_write(codec->control_data, data, 2); -+ } -+} ++#include "ssi-char-debug.h" ++#include "ssi-char.h" + - #define SOC_DAPM_SINGLE_AIC3X(xname, reg, shift, mask, invert) \ - { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ - .info = snd_soc_info_volsw, \ -@@ -165,10 +193,13 @@ static int snd_soc_dapm_put_volsw_aic3x( - struct snd_ctl_elem_value *ucontrol) - { - struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol); -- int reg = kcontrol->private_value & 0xff; -- int shift = (kcontrol->private_value >> 8) & 0x0f; -- int mask = (kcontrol->private_value >> 16) & 0xff; -- int invert = (kcontrol->private_value >> 24) & 0x01; -+ struct soc_mixer_control *mc = -+ (struct soc_mixer_control *)kcontrol->private_value; -+ unsigned int reg = mc->reg; -+ unsigned int shift = mc->shift; -+ int max = mc->max; -+ unsigned int mask = (1 << fls(max)) - 1; -+ unsigned int invert = mc->invert; - unsigned short val, val_mask; - int ret; - struct snd_soc_dapm_path *path; -@@ -247,44 +278,114 @@ static const struct soc_enum aic3x_enum[ - SOC_ENUM_DOUBLE(AIC3X_CODEC_DFILT_CTRL, 6, 4, 4, aic3x_adc_hpf), - }; - -+/* -+ * DAC digital volumes. From -63.5 to 0 dB in 0.5 dB steps -+ */ -+static DECLARE_TLV_DB_SCALE(dac_tlv, -6350, 50, 0); -+/* ADC PGA gain volumes. From 0 to 59.5 dB in 0.5 dB steps */ -+static DECLARE_TLV_DB_SCALE(adc_tlv, 0, 50, 0); -+/* HP DAC Output gain values. From 0 to 9.0 dB in 1 dB steps */ -+static DECLARE_TLV_DB_SCALE(hpout_tlv, 0, 100, 0); -+/* -+ * Output stage volumes. From -78.3 to 0 dB. Muted below -78.3 dB. -+ * Step size is approximately 0.5 dB over most of the scale but increasing -+ * near the very low levels. -+ * Define dB scale so that it is mostly correct for range about -55 to 0 dB -+ * but having increasing dB difference below that (and where it doesn't count -+ * so much). This setting shows -50 dB (actual is -50.3 dB) for register -+ * value 100 and -58.5 dB (actual is -78.3 dB) for register value 117. -+ */ -+static DECLARE_TLV_DB_SCALE(output_stage_tlv, -5900, 50, 1); ++#define DRIVER_VERSION "0.1.0" + -+#define SOC_DOUBLE_R_TLV_TLV320ALC3X(xname, reg_left, reg_right, xshift, xmax,\ -+ xinvert, tlv_array) \ -+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ -+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ -+ SNDRV_CTL_ELEM_ACCESS_READWRITE,\ -+ .tlv.p = (tlv_array), \ -+ .info = tlv320alc3x_info_volsw, \ -+ .get = snd_soc_get_volsw_2r,\ -+ .put = snd_soc_put_volsw_2r,\ -+ .private_value = (unsigned long)&(struct soc_mixer_control) \ -+ {.reg = reg_left, .rreg = reg_right, .shift = xshift, \ -+ .max = xmax, .invert = xinvert} } ++static unsigned int port = 1; ++module_param(port, uint, 1); ++MODULE_PARM_DESC(port, "SSI port to be probed"); + -+static int tlv320alc3x_info_volsw(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_info *uinfo) ++static unsigned int channels_map[SSI_MAX_CHAR_DEVS] = {1}; ++module_param_array(channels_map, uint, NULL, 0); ++MODULE_PARM_DESC(channels_map, "SSI channels to be probed"); ++ ++dev_t ssi_char_dev; ++ ++struct char_queue { ++ struct list_head list; ++ u32 *data; ++ unsigned int count; ++}; ++ ++struct ssi_char { ++ unsigned int opened; ++ int poll_event; ++ struct list_head rx_queue; ++ struct list_head tx_queue; ++ spinlock_t lock; ++ struct fasync_struct *async_queue; ++ wait_queue_head_t rx_wait; ++ wait_queue_head_t tx_wait; ++ wait_queue_head_t poll_wait; ++}; ++ ++static struct ssi_char ssi_char_data[SSI_MAX_CHAR_DEVS]; ++ ++void if_notify(int ch, struct ssi_event *ev) +{ -+ struct soc_mixer_control *mc = -+ (struct soc_mixer_control *)kcontrol->private_value; -+ int max = mc->max; ++ struct char_queue *entry; ++ ++ spin_lock(&ssi_char_data[ch].lock); ++ ++ if (!ssi_char_data[ch].opened) { ++ printk(KERN_DEBUG "device not opened\n!"); ++ spin_unlock(&ssi_char_data[ch].lock); ++ return; ++ } ++ ++ switch (SSI_EV_TYPE(ev->event)) { ++ case SSI_EV_IN: ++ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); ++ if (!entry) { ++ pr_err("SSI-CHAR: entry allocation failed.\n"); ++ spin_unlock(&ssi_char_data[ch].lock); ++ return; ++ } ++ entry->data = ev->data; ++ entry->count = ev->count; ++ list_add_tail(&entry->list, &ssi_char_data[ch].rx_queue); ++ spin_unlock(&ssi_char_data[ch].lock); ++ wake_up_interruptible(&ssi_char_data[ch].rx_wait); ++ break; ++ case SSI_EV_OUT: ++ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); ++ if (!entry) { ++ pr_err("SSI-CHAR: entry allocation failed.\n"); ++ spin_unlock(&ssi_char_data[ch].lock); ++ return; ++ } ++ entry->data = ev->data; ++ entry->count = ev->count; ++ ssi_char_data[ch].poll_event |= (POLLOUT | POLLWRNORM); ++ list_add_tail(&entry->list, &ssi_char_data[ch].tx_queue); ++ spin_unlock(&ssi_char_data[ch].lock); ++ wake_up_interruptible(&ssi_char_data[ch].tx_wait); ++ break; ++ case SSI_EV_EXCEP: ++ ssi_char_data[ch].poll_event |= POLLPRI; ++ spin_unlock(&ssi_char_data[ch].lock); ++ wake_up_interruptible(&ssi_char_data[ch].poll_wait); ++ break; ++ case SSI_EV_AVAIL: ++ ssi_char_data[ch].poll_event |= (POLLIN | POLLRDNORM); ++ spin_unlock(&ssi_char_data[ch].lock); ++ wake_up_interruptible(&ssi_char_data[ch].poll_wait); ++ break; ++ default: ++ spin_unlock(&ssi_char_data[ch].lock); ++ break; ++ } ++} + -+ if (hp_dac_lim != max && hp_dac_lim >= 2 && hp_dac_lim <= 9) -+ max = hp_dac_lim; + -+ if (max == 1) -+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; ++static int ssi_char_fasync(int fd, struct file *file, int on) ++{ ++ int ch = (int)file->private_data; ++ if (fasync_helper(fd, file, on, &ssi_char_data[ch].async_queue) >= 0) ++ return 0; + else -+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; -+ -+ uinfo->count = 2; -+ uinfo->value.integer.min = 0; -+ uinfo->value.integer.max = max; -+ return 0; ++ return -EIO; +} + - static const struct snd_kcontrol_new aic3x_snd_controls[] = { - /* Output */ -- SOC_DOUBLE_R("PCM Playback Volume", LDAC_VOL, RDAC_VOL, 0, 0x7f, 1), -+ SOC_DOUBLE_R_TLV("PCM Playback Volume", -+ LDAC_VOL, RDAC_VOL, 0, 0x7f, 1, dac_tlv), - -- SOC_DOUBLE_R("Line DAC Playback Volume", DACL1_2_LLOPM_VOL, -- DACR1_2_RLOPM_VOL, 0, 0x7f, 1), -+ SOC_DOUBLE_R_TLV("Line DAC Playback Volume", -+ DACL1_2_LLOPM_VOL, DACR1_2_RLOPM_VOL, -+ 0, 118, 1, output_stage_tlv), - SOC_DOUBLE_R("Line DAC Playback Switch", LLOPM_CTRL, RLOPM_CTRL, 3, - 0x01, 0), -- SOC_DOUBLE_R("Line PGA Bypass Playback Volume", PGAL_2_LLOPM_VOL, -- PGAR_2_RLOPM_VOL, 0, 0x7f, 1), -- SOC_DOUBLE_R("Line Line2 Bypass Playback Volume", LINE2L_2_LLOPM_VOL, -- LINE2R_2_RLOPM_VOL, 0, 0x7f, 1), -- -- SOC_DOUBLE_R("Mono DAC Playback Volume", DACL1_2_MONOLOPM_VOL, -- DACR1_2_MONOLOPM_VOL, 0, 0x7f, 1), -+ SOC_DOUBLE_R_TLV("Line PGA Bypass Playback Volume", -+ PGAL_2_LLOPM_VOL, PGAR_2_RLOPM_VOL, -+ 0, 118, 1, output_stage_tlv), -+ SOC_DOUBLE_R_TLV("Line Line2 Bypass Playback Volume", -+ LINE2L_2_LLOPM_VOL, LINE2R_2_RLOPM_VOL, -+ 0, 118, 1, output_stage_tlv), + -+ SOC_DOUBLE_R_TLV("Mono DAC Playback Volume", -+ DACL1_2_MONOLOPM_VOL, DACR1_2_MONOLOPM_VOL, -+ 0, 118, 1, output_stage_tlv), - SOC_SINGLE("Mono DAC Playback Switch", MONOLOPM_CTRL, 3, 0x01, 0), -- SOC_DOUBLE_R("Mono PGA Bypass Playback Volume", PGAL_2_MONOLOPM_VOL, -- PGAR_2_MONOLOPM_VOL, 0, 0x7f, 1), -- SOC_DOUBLE_R("Mono Line2 Bypass Playback Volume", LINE2L_2_MONOLOPM_VOL, -- LINE2R_2_MONOLOPM_VOL, 0, 0x7f, 1), -- -- SOC_DOUBLE_R("HP DAC Playback Volume", DACL1_2_HPLOUT_VOL, -- DACR1_2_HPROUT_VOL, 0, 0x7f, 1), -+ SOC_DOUBLE_R_TLV("Mono PGA Bypass Playback Volume", -+ PGAL_2_MONOLOPM_VOL, PGAR_2_MONOLOPM_VOL, -+ 0, 118, 1, output_stage_tlv), -+ SOC_DOUBLE_R_TLV("Mono Line2 Bypass Playback Volume", -+ LINE2L_2_MONOLOPM_VOL, LINE2R_2_MONOLOPM_VOL, -+ 0, 118, 1, output_stage_tlv), ++static unsigned int ssi_char_poll(struct file *file, poll_table *wait) ++{ ++ int ch = (int)file->private_data; ++ unsigned int ret = 0; + -+ SOC_DOUBLE_R_TLV("HP DAC Playback Volume", -+ DACL1_2_HPLOUT_VOL, DACR1_2_HPROUT_VOL, -+ 0, 118, 1, output_stage_tlv), - SOC_DOUBLE_R("HP DAC Playback Switch", HPLOUT_CTRL, HPROUT_CTRL, 3, - 0x01, 0), -- SOC_DOUBLE_R("HP PGA Bypass Playback Volume", PGAL_2_HPLOUT_VOL, -- PGAR_2_HPROUT_VOL, 0, 0x7f, 1), -- SOC_DOUBLE_R("HP Line2 Bypass Playback Volume", LINE2L_2_HPLOUT_VOL, -- LINE2R_2_HPROUT_VOL, 0, 0x7f, 1), -- -- SOC_DOUBLE_R("HPCOM DAC Playback Volume", DACL1_2_HPLCOM_VOL, -- DACR1_2_HPRCOM_VOL, 0, 0x7f, 1), -+ SOC_DOUBLE_R_TLV_TLV320ALC3X("HP DAC Output Volume", HPLOUT_CTRL, -+ HPROUT_CTRL, 4, 9, 0, hpout_tlv), -+ SOC_DOUBLE_R_TLV("HP PGA Bypass Playback Volume", -+ PGAL_2_HPLOUT_VOL, PGAR_2_HPROUT_VOL, -+ 0, 118, 1, output_stage_tlv), -+ SOC_DOUBLE_R_TLV("HP Line2 Bypass Playback Volume", -+ LINE2L_2_HPLOUT_VOL, LINE2R_2_HPROUT_VOL, -+ 0, 118, 1, output_stage_tlv), ++ poll_wait(file, &ssi_char_data[ch].poll_wait, wait); ++ poll_wait(file, &ssi_char_data[ch].tx_wait, wait); ++ spin_lock_bh(&ssi_char_data[ch].lock); ++ ret = ssi_char_data[ch].poll_event; ++ spin_unlock_bh(&ssi_char_data[ch].lock); + -+ SOC_DOUBLE_R_TLV("HPCOM DAC Playback Volume", -+ DACL1_2_HPLCOM_VOL, DACR1_2_HPRCOM_VOL, -+ 0, 118, 1, output_stage_tlv), - SOC_DOUBLE_R("HPCOM DAC Playback Switch", HPLCOM_CTRL, HPRCOM_CTRL, 3, - 0x01, 0), -- SOC_DOUBLE_R("HPCOM PGA Bypass Playback Volume", PGAL_2_HPLCOM_VOL, -- PGAR_2_HPRCOM_VOL, 0, 0x7f, 1), -- SOC_DOUBLE_R("HPCOM Line2 Bypass Playback Volume", LINE2L_2_HPLCOM_VOL, -- LINE2R_2_HPRCOM_VOL, 0, 0x7f, 1), -+ SOC_DOUBLE_R_TLV_TLV320ALC3X("HPCOM DAC Output Volume", HPLCOM_CTRL, -+ HPRCOM_CTRL, 4, 9, 0, hpout_tlv), -+ SOC_DOUBLE_R_TLV("HPCOM PGA Bypass Playback Volume", -+ PGAL_2_HPLCOM_VOL, PGAR_2_HPRCOM_VOL, -+ 0, 118, 1, output_stage_tlv), -+ SOC_DOUBLE_R_TLV("HPCOM Line2 Bypass Playback Volume", -+ LINE2L_2_HPLCOM_VOL, LINE2R_2_HPRCOM_VOL, -+ 0, 118, 1, output_stage_tlv), - - /* - * Note: enable Automatic input Gain Controller with care. It can -@@ -293,7 +394,8 @@ static const struct snd_kcontrol_new aic - SOC_DOUBLE_R("AGC Switch", LAGC_CTRL_A, RAGC_CTRL_A, 7, 0x01, 0), - - /* Input */ -- SOC_DOUBLE_R("PGA Capture Volume", LADC_VOL, RADC_VOL, 0, 0x7f, 0), -+ SOC_DOUBLE_R_TLV("PGA Capture Volume", LADC_VOL, RADC_VOL, -+ 0, 119, 0, adc_tlv), - SOC_DOUBLE_R("PGA Capture Switch", LADC_VOL, RADC_VOL, 7, 0x01, 1), - - SOC_ENUM("ADC HPF Cut-off", aic3x_enum[ADC_HPF_ENUM]), -@@ -315,6 +417,59 @@ static int aic3x_add_controls(struct snd - return 0; - } - -+static int reset_after_bypass(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *kcontrol, int event) ++ return ret; ++} ++ ++ ++static ssize_t ssi_char_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) +{ -+ struct aic3x_priv *aic3x = w->codec->private_data; -+ struct soc_mixer_control *mc = NULL; -+ unsigned int reg = 0; ++ int ch = (int)file->private_data; ++ DECLARE_WAITQUEUE(wait, current); ++ u32 *data; ++ unsigned int data_len; ++ struct char_queue *entry; ++ ssize_t ret; ++ ++ /* only 32bit data is supported for now */ ++ if ((count < 4) || (count & 3)) ++ return -EINVAL; + -+ if (kcontrol) -+ mc = (struct soc_mixer_control *)kcontrol->private_value; -+ if (mc) -+ reg = mc->reg; ++ data = kmalloc(count, GFP_ATOMIC); + -+ if (reg == PGAL_2_LLOPM_VOL || reg == PGAR_2_RLOPM_VOL || -+ reg == PGAL_2_HPLOUT_VOL || reg == PGAR_2_HPROUT_VOL) { -+ if (w->value & 0x80) { -+ /* Prepare reset on the chip */ -+ if (reg == PGAL_2_LLOPM_VOL) -+ aic3x->prepare_reset |= 0x01; -+ else if (reg == PGAR_2_RLOPM_VOL) -+ aic3x->prepare_reset |= 0x02; -+ else if (reg == PGAL_2_HPLOUT_VOL) -+ aic3x->prepare_reset |= 0x04; -+ else if (reg == PGAR_2_HPROUT_VOL) -+ aic3x->prepare_reset |= 0x08; -+ } else { -+ if (aic3x->prepare_reset) { -+ if (reg == PGAL_2_LLOPM_VOL) -+ aic3x->prepare_reset &= ~0x01; -+ else if (reg == PGAR_2_RLOPM_VOL) -+ aic3x->prepare_reset &= ~0x02; -+ else if (reg == PGAL_2_HPLOUT_VOL) -+ aic3x->prepare_reset &= ~0x04; -+ else if (reg == PGAR_2_HPROUT_VOL) -+ aic3x->prepare_reset &= ~0x08; -+ /* -+ * Controls may have now been turned off, -+ * once they were on, so schedule or -+ * issue a reset on the chip. -+ */ -+ if (!aic3x->prepare_reset) { -+ if (!((w->codec->bias_level == -+ SND_SOC_BIAS_ON) || -+ (w->codec->bias_level == -+ SND_SOC_BIAS_PREPARE))) -+ aic3x_reset(w->codec); -+ } -+ } -+ } ++ ret = if_ssi_read(ch, data, count); ++ if (ret < 0) { ++ kfree(data); ++ goto out2; + } + -+ return 0; -+} ++ add_wait_queue(&ssi_char_data[ch].rx_wait, &wait); + - /* Left DAC Mux */ - static const struct snd_kcontrol_new aic3x_left_dac_mux_controls = - SOC_DAPM_ENUM("Route", aic3x_enum[LDAC_ENUM]); -@@ -357,6 +512,7 @@ static const struct snd_kcontrol_new aic - /* Right PGA Mixer */ - static const struct snd_kcontrol_new aic3x_right_pga_mixer_controls[] = { - SOC_DAPM_SINGLE_AIC3X("Line1R Switch", LINE1R_2_RADC_CTRL, 3, 1, 1), -+ SOC_DAPM_SINGLE_AIC3X("Line1L Switch", LINE1L_2_RADC_CTRL, 3, 1, 1), - SOC_DAPM_SINGLE_AIC3X("Line2R Switch", LINE2R_2_RADC_CTRL, 3, 1, 1), - SOC_DAPM_SINGLE_AIC3X("Mic3R Switch", MIC3LR_2_RADC_CTRL, 0, 1, 1), - }; -@@ -455,6 +611,8 @@ static const struct snd_soc_dapm_widget - SND_SOC_DAPM_MIXER("Right PGA Mixer", SND_SOC_NOPM, 0, 0, - &aic3x_right_pga_mixer_controls[0], - ARRAY_SIZE(aic3x_right_pga_mixer_controls)), -+ SND_SOC_DAPM_MUX("Right Line1L Mux", SND_SOC_NOPM, 0, 0, -+ &aic3x_right_line1_mux_controls), - SND_SOC_DAPM_MUX("Right Line1R Mux", SND_SOC_NOPM, 0, 0, - &aic3x_right_line1_mux_controls), - SND_SOC_DAPM_MUX("Right Line2R Mux", SND_SOC_NOPM, 0, 0, -@@ -490,14 +648,16 @@ static const struct snd_soc_dapm_widget - MICBIAS_CTRL, 6, 3, 3, 0), - - /* Left PGA to Left Output bypass */ -- SND_SOC_DAPM_MIXER("Left PGA Bypass Mixer", SND_SOC_NOPM, 0, 0, -+ SND_SOC_DAPM_MIXER_E("Left PGA Bypass Mixer", SND_SOC_NOPM, 0, 0, - &aic3x_left_pga_bp_mixer_controls[0], -- ARRAY_SIZE(aic3x_left_pga_bp_mixer_controls)), -+ ARRAY_SIZE(aic3x_left_pga_bp_mixer_controls), -+ reset_after_bypass, SND_SOC_DAPM_POST_REG), - - /* Right PGA to Right Output bypass */ -- SND_SOC_DAPM_MIXER("Right PGA Bypass Mixer", SND_SOC_NOPM, 0, 0, -+ SND_SOC_DAPM_MIXER_E("Right PGA Bypass Mixer", SND_SOC_NOPM, 0, 0, - &aic3x_right_pga_bp_mixer_controls[0], -- ARRAY_SIZE(aic3x_right_pga_bp_mixer_controls)), -+ ARRAY_SIZE(aic3x_right_pga_bp_mixer_controls), -+ reset_after_bypass, SND_SOC_DAPM_POST_REG), - - /* Left Line2 to Left Output bypass */ - SND_SOC_DAPM_MIXER("Left Line2 Bypass Mixer", SND_SOC_NOPM, 0, 0, -@@ -599,12 +759,16 @@ static const struct snd_soc_dapm_route i - {"Left ADC", NULL, "GPIO1 dmic modclk"}, - - /* Right Input */ -+ {"Right Line1L Mux", "single-ended", "LINE1L"}, -+ {"Right Line1L Mux", "differential", "LINE1L"}, ++ for ( ; ; ) { ++ data = NULL; ++ data_len = 0; + - {"Right Line1R Mux", "single-ended", "LINE1R"}, - {"Right Line1R Mux", "differential", "LINE1R"}, - - {"Right Line2R Mux", "single-ended", "LINE2R"}, - {"Right Line2R Mux", "differential", "LINE2R"}, - -+ {"Right PGA Mixer", "Line1L Switch", "Right Line1L Mux"}, - {"Right PGA Mixer", "Line1R Switch", "Right Line1R Mux"}, - {"Right PGA Mixer", "Line2R Switch", "Right Line2R Mux"}, - {"Right PGA Mixer", "Mic3R Switch", "MIC3R"}, -@@ -846,6 +1010,7 @@ static int aic3x_set_dai_fmt(struct snd_ - struct snd_soc_codec *codec = codec_dai->codec; - struct aic3x_priv *aic3x = codec->private_data; - u8 iface_areg, iface_breg; -+ int delay = 0; - - iface_areg = aic3x_read_reg_cache(codec, AIC3X_ASD_INTF_CTRLA) & 0x3f; - iface_breg = aic3x_read_reg_cache(codec, AIC3X_ASD_INTF_CTRLB) & 0x3f; -@@ -871,6 +1036,8 @@ static int aic3x_set_dai_fmt(struct snd_ - SND_SOC_DAIFMT_INV_MASK)) { - case (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF): - break; -+ case (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF): -+ delay = 1; - case (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF): - iface_breg |= (0x01 << 6); - break; -@@ -887,6 +1054,7 @@ static int aic3x_set_dai_fmt(struct snd_ - /* set iface */ - aic3x_write(codec, AIC3X_ASD_INTF_CTRLA, iface_areg); - aic3x_write(codec, AIC3X_ASD_INTF_CTRLB, iface_breg); -+ aic3x_write(codec, AIC3X_ASD_INTF_CTRLC, delay); - - return 0; - } -@@ -905,6 +1073,12 @@ static int aic3x_set_bias_level(struct s - reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG); - aic3x_write(codec, AIC3X_PLL_PROGA_REG, - reg | PLL_ENABLE); -+ /* -+ * ensure that bit and word clocks are running also if -+ * DAC and ADC are shutdown -+ */ -+ reg = aic3x_read_reg_cache(codec, AIC3X_ASD_INTF_CTRLA); -+ aic3x_write(codec, AIC3X_ASD_INTF_CTRLA, reg | 0x10); - } - break; - case SND_SOC_BIAS_PREPARE: -@@ -915,11 +1089,16 @@ static int aic3x_set_bias_level(struct s - * so output power is safe if bypass was set - */ - if (aic3x->master) { -+ reg = aic3x_read_reg_cache(codec, AIC3X_ASD_INTF_CTRLA); -+ aic3x_write(codec, AIC3X_ASD_INTF_CTRLA, reg & ~0x10); - /* disable pll */ - reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG); - aic3x_write(codec, AIC3X_PLL_PROGA_REG, - reg & ~PLL_ENABLE); - } -+ /* Reset cannot be issued, if bypass paths are in use */ -+ if (!aic3x->prepare_reset) -+ aic3x_reset(codec); - break; - case SND_SOC_BIAS_OFF: - /* force all power off */ -@@ -950,6 +1129,8 @@ static int aic3x_set_bias_level(struct s - aic3x_write(codec, RLOPM_CTRL, reg & ~RLOPM_PWR_ON); - - if (aic3x->master) { -+ reg = aic3x_read_reg_cache(codec, AIC3X_ASD_INTF_CTRLA); -+ aic3x_write(codec, AIC3X_ASD_INTF_CTRLA, reg & ~0x10); - /* disable pll */ - reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG); - aic3x_write(codec, AIC3X_PLL_PROGA_REG, -@@ -1023,7 +1204,7 @@ static int aic3x_suspend(struct platform - struct snd_soc_device *socdev = platform_get_drvdata(pdev); - struct snd_soc_codec *codec = socdev->codec; - -- aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF); -+ aic3x_write(codec, AIC3X_RESET, SOFT_RESET); - - return 0; - } -@@ -1324,7 +1505,8 @@ static int aic3x_remove(struct platform_ - snd_soc_free_pcms(socdev); - snd_soc_dapm_free(socdev); - #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) -- i2c_unregister_device(codec->control_data); -+ if (codec->control_data) -+ i2c_unregister_device(codec->control_data); - i2c_del_driver(&aic3x_i2c_driver); - #endif - kfree(codec->private_data); -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/codecs/tlv320aic3x.h linux-omap-2.6.28-nokia1/sound/soc/codecs/tlv320aic3x.h ---- linux-omap-2.6.28-omap1/sound/soc/codecs/tlv320aic3x.h 2011-06-22 13:14:27.173067620 +0200 -+++ linux-omap-2.6.28-nokia1/sound/soc/codecs/tlv320aic3x.h 2011-06-22 13:19:33.293063268 +0200 -@@ -35,6 +35,8 @@ - #define AIC3X_ASD_INTF_CTRLA 8 - /* Audio serial data interface control register B */ - #define AIC3X_ASD_INTF_CTRLB 9 -+/* Audio serial data interface control register C */ -+#define AIC3X_ASD_INTF_CTRLC 10 - /* Audio overflow status and PLL R value programming register */ - #define AIC3X_OVRF_STATUS_AND_PLLR_REG 11 - /* Audio codec digital filter control register */ -@@ -49,6 +51,7 @@ - /* Line1 Input control registers */ - #define LINE1L_2_LADC_CTRL 19 - #define LINE1R_2_RADC_CTRL 22 -+#define LINE1L_2_RADC_CTRL 24 - /* Line2 Input control registers */ - #define LINE2L_2_LADC_CTRL 20 - #define LINE2R_2_RADC_CTRL 23 -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/aic34b_dummy.c linux-omap-2.6.28-nokia1/sound/soc/omap/aic34b_dummy.c ---- linux-omap-2.6.28-omap1/sound/soc/omap/aic34b_dummy.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/aic34b_dummy.c 2011-06-22 13:19:33.293063268 +0200 -@@ -0,0 +1,270 @@ -+/* -+ * aic34b_dummy.c -- Dummy driver for AIC34 block B parts used in Nokia RX51 -+ * -+ * Purpose for this driver is to cover few audio connections on Nokia RX51 HW -+ * which are connected into block B of TLV320AIC34 dual codec. -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * Contact: Jarkko Nikula -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -+ * 02110-1301 USA -+ * -+ * TODO: -+ * - Get rid of this driver, at least when ASoC v2 is merged and when -+ * we can support multiple codec instances in tlv320aic3x.c driver. -+ * This driver is hacked only for Nokia RX51 HW. -+ */ ++ set_current_state(TASK_INTERRUPTIBLE); + -+#include -+#include -+#include -+#include -+#include ++ spin_lock_bh(&ssi_char_data[ch].lock); ++ if (!list_empty(&ssi_char_data[ch].rx_queue)) { ++ entry = list_entry(ssi_char_data[ch].rx_queue.next, ++ struct char_queue, list); ++ data = entry->data; ++ data_len = entry->count; ++ list_del(&entry->list); ++ kfree(entry); ++ } ++ spin_unlock_bh(&ssi_char_data[ch].lock); ++ ++ if (data_len) { ++ spin_lock_bh(&ssi_char_data[ch].lock); ++ ssi_char_data[ch].poll_event &= ~(POLLIN | POLLRDNORM | ++ POLLPRI); ++ if_ssi_poll(ch); ++ spin_unlock_bh(&ssi_char_data[ch].lock); ++ break; ++ } else if (file->f_flags & O_NONBLOCK) { ++ ret = -EAGAIN; ++ goto out; ++ } else if (signal_pending(current)) { ++ ret = -EAGAIN; ++ if_ssi_cancel_read(ch); ++ break; ++ } + -+#include "../codecs/tlv320aic3x.h" ++ schedule(); ++ } + -+struct i2c_client *aic34b_client; -+static DEFINE_MUTEX(aic34b_mutex); -+static DEFINE_MUTEX(button_press_mutex); -+static ktime_t button_press_denial_start; -+static int aic34b_volume; -+static int button_press_denied; -+static int aic34b_bias; ++ if (data_len) { ++ ret = copy_to_user((void __user *)buf, data, data_len); ++ if (!ret) ++ ret = data_len; ++ } + ++ kfree(data); + -+static int aic34b_read(struct i2c_client *client, unsigned int reg, -+ u8 *value) -+{ -+ int err; ++out: ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&ssi_char_data[ch].rx_wait, &wait); + -+ err = i2c_smbus_read_byte_data(client, reg); -+ *value = err; -+ return (err >= 0) ? 0 : err; ++out2: ++ return ret; +} + -+static int aic34b_write(struct i2c_client *client, unsigned int reg, -+ u8 value) ++static ssize_t ssi_char_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) +{ -+ u8 data[2]; ++ int ch = (int)file->private_data; ++ DECLARE_WAITQUEUE(wait, current); ++ u32 *data; ++ unsigned int data_len = 0; ++ struct char_queue *entry; ++ ssize_t ret; + -+ data[0] = reg & 0xff; -+ data[1] = value & 0xff; ++ /* only 32bit data is supported for now */ ++ if ((count < 4) || (count & 3)) ++ return -EINVAL; + -+ return (i2c_master_send(client, data, 2) == 2) ? 0 : -EIO; -+} ++ data = kmalloc(count, GFP_ATOMIC); + -+/* -+ * Introduce a derivative FIR filter to detect unnecessary button -+ * presses caused by a change in the MICBIAS. The filter returns -+ * TRUE in the event there has not been a change in MICBIAS within -+ * the time window (500ms). If the rate of change within the window -+ * is >= 1, all button presses are denied. In addition, if bias is -+ * zero, then all button presses are also denied explicitly. -+ */ -+int allow_button_press(void) -+{ -+ /* If bias is not on, no chance for button presses */ -+ if (!aic34b_bias) -+ return 0; ++ if (copy_from_user(data, (void __user *)buf, count)) { ++ ret = -EFAULT; ++ kfree(data); ++ } else { ++ ret = count; ++ } ++ ++ spin_lock_bh(&ssi_char_data[ch].lock); ++ ret = if_ssi_write(ch, data, count); ++ if (ret < 0) { ++ spin_unlock_bh(&ssi_char_data[ch].lock); ++ kfree(data); ++ goto out2; ++ } ++ ssi_char_data[ch].poll_event &= ~(POLLOUT | POLLWRNORM); ++ spin_unlock_bh(&ssi_char_data[ch].lock); ++ ++ add_wait_queue(&ssi_char_data[ch].tx_wait, &wait); ++ ++ for ( ; ; ) { ++ data = NULL; ++ data_len = 0; + -+ /* If explicitly granted a button press */ -+ if (!button_press_denied) { -+ return 1; -+ } else { -+ int64_t delta; -+ /* This is the FIR portion with specified time window */ -+ mutex_lock(&button_press_mutex); -+ delta = ktime_to_ns(ktime_sub(ktime_get(), -+ button_press_denial_start)); ++ set_current_state(TASK_INTERRUPTIBLE); + -+ if (delta < 0) { -+ button_press_denied = 0; -+ /* If the clock ever wraps */ -+ button_press_denial_start.tv.sec = 0; -+ button_press_denial_start.tv.nsec = 0; -+ mutex_unlock(&button_press_mutex); -+ return 1; ++ spin_lock_bh(&ssi_char_data[ch].lock); ++ if (!list_empty(&ssi_char_data[ch].tx_queue)) { ++ entry = list_entry(ssi_char_data[ch].tx_queue.next, ++ struct char_queue, list); ++ data = entry->data; ++ data_len = entry->count; ++ list_del(&entry->list); ++ kfree(entry); + } -+ do_div(delta, 1000000); -+ /* Time window is 500ms */ -+ if (delta >= 500) { -+ button_press_denied = 0; -+ mutex_unlock(&button_press_mutex); -+ return 1; ++ spin_unlock_bh(&ssi_char_data[ch].lock); ++ ++ if (data_len) { ++ ret = data_len; ++ break; ++ } else if (file->f_flags & O_NONBLOCK) { ++ ret = -EAGAIN; ++ goto out; ++ } else if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ goto out; + } -+ mutex_unlock(&button_press_mutex); ++ ++ schedule(); + } + -+ /* There was a change in MICBIAS within time window */ -+ return 0; -+} -+EXPORT_SYMBOL(allow_button_press); ++ kfree(data); + -+static void deny_button_press(void) -+{ -+ mutex_lock(&button_press_mutex); -+ button_press_denied = 1; -+ button_press_denial_start = ktime_get(); -+ mutex_unlock(&button_press_mutex); ++out: ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&ssi_char_data[ch].tx_wait, &wait); ++ ++out2: ++ return ret; +} + -+void aic34b_set_mic_bias(int bias) ++static int ssi_char_ioctl(struct inode *inode, struct file *file, ++ unsigned int cmd, unsigned long arg) +{ -+ if (aic34b_client == NULL) -+ return; ++ int ch = (int)file->private_data; ++ unsigned int state; ++ struct ssi_rx_config rx_cfg; ++ struct ssi_tx_config tx_cfg; ++ int ret = 0; ++ ++ switch (cmd) { ++ case CS_SEND_BREAK: ++ if_ssi_send_break(ch); ++ break; ++ case CS_FLUSH_RX: ++ if_ssi_flush_rx(ch); ++ break; ++ case CS_FLUSH_TX: ++ if_ssi_flush_tx(ch); ++ break; ++ case CS_SET_WAKELINE: ++ if (copy_from_user(&state, (void __user *)arg, ++ sizeof(state))) ++ ret = -EFAULT; ++ else ++ if_ssi_set_wakeline(ch, state); ++ break; ++ case CS_GET_WAKELINE: ++ if_ssi_get_wakeline(ch, &state); ++ if (copy_to_user((void __user *)arg, &state, sizeof(state))) ++ ret = -EFAULT; ++ break; ++ case CS_SET_RX: { ++ if (copy_from_user(&rx_cfg, (void __user *)arg, ++ sizeof(rx_cfg))) ++ ret = -EFAULT; ++ else ++ ret = if_ssi_set_rx(ch, &rx_cfg); ++ } ++ break; ++ case CS_GET_RX: ++ if_ssi_get_rx(ch, &rx_cfg); ++ if (copy_to_user((void __user *)arg, &rx_cfg, sizeof(rx_cfg))) ++ ret = -EFAULT; ++ break; ++ case CS_SET_TX: ++ if (copy_from_user(&tx_cfg, (void __user *)arg, ++ sizeof(tx_cfg))) ++ ret = -EFAULT; ++ else ++ ret = if_ssi_set_tx(ch, &tx_cfg); ++ break; ++ case CS_GET_TX: ++ if_ssi_get_tx(ch, &tx_cfg); ++ if (copy_to_user((void __user *)arg, &tx_cfg, sizeof(tx_cfg))) ++ ret = -EFAULT; ++ break; ++ default: ++ return -ENOIOCTLCMD; ++ break; ++ } + -+ mutex_lock(&aic34b_mutex); -+ aic34b_write(aic34b_client, MICBIAS_CTRL, (bias & 0x3) << 6); -+ aic34b_bias = bias; -+ deny_button_press(); -+ mutex_unlock(&aic34b_mutex); ++ return ret; +} -+EXPORT_SYMBOL(aic34b_set_mic_bias); + -+int aic34b_set_volume(u8 volume) ++static int ssi_char_open(struct inode *inode, struct file *file) +{ -+ u8 val; -+ -+ if (aic34b_client == NULL) -+ return 0; -+ -+ mutex_lock(&aic34b_mutex); ++ int ret = 0, ch = iminor(inode); + -+ /* Volume control for Right PGA to HPLOUT */ -+ aic34b_read(aic34b_client, 49, &val); -+ val &= ~0x7f; -+ aic34b_write(aic34b_client, 49, val | (~volume & 0x7f)); ++ if (!channels_map[ch]) ++ return -ENODEV; + -+ /* Volume control for Right PGA to HPLCOM */ -+ aic34b_read(aic34b_client, 56, &val); -+ val &= ~0x7f; -+ aic34b_write(aic34b_client, 56, val | (~volume & 0x7f)); ++ spin_lock_bh(&ssi_char_data[ch].lock); ++#if 0 ++ if (ssi_char_data[ch].opened) { ++ spin_unlock_bh(&ssi_char_data[ch].lock); ++ return -EBUSY; ++ } ++#endif ++ file->private_data = (void *)ch; ++ ssi_char_data[ch].opened++; ++ ssi_char_data[ch].poll_event = (POLLOUT | POLLWRNORM); ++ spin_unlock_bh(&ssi_char_data[ch].lock); + -+ aic34b_volume = volume; -+ mutex_unlock(&aic34b_mutex); ++ ret = if_ssi_start(ch); + -+ return 0; ++ return ret; +} -+EXPORT_SYMBOL(aic34b_set_volume); + -+void aic34b_ear_enable(int enable) ++static int ssi_char_release(struct inode *inode, struct file *file) +{ -+ u8 val; ++ int ch = (int)file->private_data; ++ struct char_queue *entry; ++ struct list_head *cursor, *next; + -+ if (aic34b_client == NULL) -+ return; ++ if_ssi_stop(ch); ++ spin_lock_bh(&ssi_char_data[ch].lock); ++ ssi_char_data[ch].opened--; + -+ mutex_lock(&aic34b_mutex); -+ if (enable) { -+ /* Connect LINE2R to RADC */ -+ aic34b_write(aic34b_client, LINE2R_2_RADC_CTRL, 0x80); -+ /* Unmute Right ADC-PGA */ -+ aic34b_write(aic34b_client, RADC_VOL, 0x00); -+ /* Right PGA -> HPLOUT */ -+ aic34b_read(aic34b_client, 49, &val); -+ aic34b_write(aic34b_client, 49, val | 0x80); -+ /* Unmute HPLOUT with 1 dB gain */ -+ aic34b_write(aic34b_client, HPLOUT_CTRL, 0x19); -+ /* Right PGA -> HPLCOM */ -+ aic34b_read(aic34b_client, 56, &val); -+ aic34b_write(aic34b_client, 56, val | 0x80); -+ /* Unmute HPLCOM with 1 dB gain */ -+ aic34b_write(aic34b_client, HPLCOM_CTRL, 0x19); -+ } else { -+ /* Disconnect LINE2R from RADC */ -+ aic34b_write(aic34b_client, LINE2R_2_RADC_CTRL, 0xF8); -+ /* Mute Right ADC-PGA */ -+ aic34b_write(aic34b_client, RADC_VOL, 0x80); -+ /* Detach Right PGA from HPLOUT */ -+ aic34b_write(aic34b_client, 49, (~aic34b_volume & 0x7f)); -+ /* Power down HPLOUT */ -+ aic34b_write(aic34b_client, HPLOUT_CTRL, 0x06); -+ /* Detach Right PGA from HPLCOM */ -+ aic34b_write(aic34b_client, 56, (~aic34b_volume & 0x7f)); -+ /* Power down HPLCOM */ -+ aic34b_write(aic34b_client, HPLCOM_CTRL, 0x06); -+ /* Deny any possible keypresses for a second */ -+ deny_button_press(); -+ /* To regain low power consumption, reset is needed */ -+ aic34b_write(aic34b_client, AIC3X_RESET, SOFT_RESET); -+ /* And need to restore volume level */ -+ aic34b_write(aic34b_client, 49, (~aic34b_volume & 0x7f)); -+ aic34b_write(aic34b_client, 56, (~aic34b_volume & 0x7f)); -+ /* Need to restore MICBIAS if set */ -+ if (aic34b_bias) -+ aic34b_write(aic34b_client, MICBIAS_CTRL, -+ (aic34b_bias & 0x3) << 6); ++ if (!list_empty(&ssi_char_data[ch].rx_queue)) { ++ list_for_each_safe(cursor, next, &ssi_char_data[ch].rx_queue) { ++ entry = list_entry(cursor, struct char_queue, list); ++ list_del(&entry->list); ++ kfree(entry); ++ } + } -+ mutex_unlock(&aic34b_mutex); -+} -+EXPORT_SYMBOL(aic34b_ear_enable); -+ -+static int aic34b_dummy_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ u8 val; + -+ if (aic34b_read(client, AIC3X_PLL_PROGA_REG, &val) || val != 0x10) { -+ /* Chip not present */ -+ return -ENODEV; ++ if (!list_empty(&ssi_char_data[ch].tx_queue)) { ++ list_for_each_safe(cursor, next, &ssi_char_data[ch].tx_queue) { ++ entry = list_entry(cursor, struct char_queue, list); ++ list_del(&entry->list); ++ kfree(entry); ++ } + } -+ aic34b_client = client; + -+ /* Configure LINE2R for differential mode */ -+ aic34b_read(client, LINE2R_2_RADC_CTRL, &val); -+ aic34b_write(client, LINE2R_2_RADC_CTRL, val | 0x80); ++ spin_unlock_bh(&ssi_char_data[ch].lock); + + return 0; +} + -+static int aic34b_dummy_remove(struct i2c_client *client) ++static const struct file_operations ssi_char_fops = { ++ .owner = THIS_MODULE, ++ .read = ssi_char_read, ++ .write = ssi_char_write, ++ .poll = ssi_char_poll, ++ .ioctl = ssi_char_ioctl, ++ .open = ssi_char_open, ++ .release = ssi_char_release, ++ .fasync = ssi_char_fasync, ++}; ++ ++static struct cdev ssi_char_cdev; ++ ++static int __init ssi_char_init(void) +{ -+ aic34b_client = NULL; ++ char devname[] = "ssi_char"; ++ int ret, i; + -+ return 0; -+} ++ pr_info("SSI character device version " DRIVER_VERSION "\n"); + -+static const struct i2c_device_id aic34b_dummy_id[] = { -+ { "aic34b_dummy", 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, aic34b_dummy_id); ++ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { ++ init_waitqueue_head(&ssi_char_data[i].rx_wait); ++ init_waitqueue_head(&ssi_char_data[i].tx_wait); ++ init_waitqueue_head(&ssi_char_data[i].poll_wait); ++ spin_lock_init(&ssi_char_data[i].lock); ++ ssi_char_data[i].opened = 0; ++ INIT_LIST_HEAD(&ssi_char_data[i].rx_queue); ++ INIT_LIST_HEAD(&ssi_char_data[i].tx_queue); ++ } + -+static struct i2c_driver aic34b_dummy_driver = { -+ .driver = { -+ .name = "aic34b_dummy" -+ }, -+ .probe = aic34b_dummy_probe, -+ .remove = aic34b_dummy_remove, -+ .id_table = aic34b_dummy_id, -+}; ++ ret = if_ssi_init(port, channels_map); ++ if (ret) ++ return ret; + -+static int __init aic34b_dummy_init(void) -+{ -+ return i2c_add_driver(&aic34b_dummy_driver); ++ ret = alloc_chrdev_region(&ssi_char_dev, 0, SSI_MAX_CHAR_DEVS, devname); ++ if (ret < 0) { ++ pr_err("SSI character driver: Failed to register\n"); ++ return ret; ++ } ++ ++ cdev_init(&ssi_char_cdev, &ssi_char_fops); ++ cdev_add(&ssi_char_cdev, ssi_char_dev, SSI_MAX_CHAR_DEVS); ++ ++ return 0; +} + -+static void __exit aic34b_dummy_exit(void) ++static void __exit ssi_char_exit(void) +{ -+ i2c_del_driver(&aic34b_dummy_driver); ++ cdev_del(&ssi_char_cdev); ++ unregister_chrdev_region(ssi_char_dev, SSI_MAX_CHAR_DEVS); ++ if_ssi_exit(); +} + -+MODULE_AUTHOR(); -+MODULE_DESCRIPTION("Dummy driver for AIC34 block B parts used on Nokia RX51"); ++MODULE_AUTHOR("Andras Domokos "); ++MODULE_DESCRIPTION("SSI character device"); +MODULE_LICENSE("GPL"); ++MODULE_VERSION(DRIVER_VERSION); + -+module_init(aic34b_dummy_init); -+module_exit(aic34b_dummy_exit); -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/aic34b_dummy.h linux-omap-2.6.28-nokia1/sound/soc/omap/aic34b_dummy.h ---- linux-omap-2.6.28-omap1/sound/soc/omap/aic34b_dummy.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/aic34b_dummy.h 2011-06-22 13:19:33.293063268 +0200 -@@ -0,0 +1,31 @@ ++module_init(ssi_char_init); ++module_exit(ssi_char_exit); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-char-debug.h kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-char-debug.h +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-char-debug.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-char-debug.h 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,39 @@ +/* -+ * aic34b_dummy.h ++ * ssi-char-debug.h ++ * ++ * Part of the SSI character driver. Debugging related definitions. + * -+ * Copyright (C) 2008 Nokia Corporation ++ * Copyright (C) 2009 Nokia Corporation. All rights reserved. + * -+ * Contact: Jarkko Nikula ++ * Contact: Andras Domokos + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License @@ -284034,410 +6298,75 @@ diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/aic34b_dummy.h linux-omap-2.6. + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA -+ * + */ + -+#ifndef __AIC34B_DUMMY__ -+#define __AIC34B_DUMMY__ + -+extern void aic34b_ear_enable(int enable); -+void aic34b_set_mic_bias(int bias); -+int aic34b_set_volume(u8 volume); ++#ifndef _SSI_CHAR_DEBUG_H ++#define _SSI_CHAR_DEBUG_H + ++#ifdef CONFIG_SSI_CHAR_DEBUG ++#define DPRINTK(fmt, arg...) printk(KERN_DEBUG "%s(): " fmt, __func__, ##arg) ++#define DENTER() printk(KERN_DEBUG "ENTER %s()\n", __func__) ++#define DLEAVE(a) printk(KERN_DEBUG "LEAVE %s() %d\n", __func__, a) ++#else ++#define DPRINTK(fmt, arg...) while (0) ++#define DENTER() while (0) ++#define DLEAVE(a) while (0) +#endif -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/Kconfig linux-omap-2.6.28-nokia1/sound/soc/omap/Kconfig ---- linux-omap-2.6.28-omap1/sound/soc/omap/Kconfig 2011-06-22 13:14:27.223067619 +0200 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/Kconfig 2011-06-22 13:19:33.293063268 +0200 -@@ -14,6 +14,16 @@ config SND_OMAP_SOC_N810 - help - Say Y if you want to add support for SoC audio on Nokia N810. - -+config SND_OMAP_SOC_RX51 -+ tristate "SoC Audio support for Nokia RX51" -+ depends on SND_OMAP_SOC && MACH_NOKIA_RX51 -+ select OMAP_MCBSP -+ select SND_OMAP_SOC_MCBSP -+ select SND_SOC_TLV320AIC3X -+ select TPA6130A2 -+ help -+ Say Y if you want to add support for SoC audio on Nokia RX51. -+ - config SND_OMAP_SOC_OMAP3_BEAGLE - tristate "SoC Audio support for OMAP3 Beagle" - depends on SND_OMAP_SOC && MACH_OMAP3_BEAGLE -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/Makefile linux-omap-2.6.28-nokia1/sound/soc/omap/Makefile ---- linux-omap-2.6.28-omap1/sound/soc/omap/Makefile 2011-06-22 13:14:27.223067619 +0200 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/Makefile 2011-06-22 13:19:33.293063268 +0200 -@@ -7,11 +7,13 @@ obj-$(CONFIG_SND_OMAP_SOC_MCBSP) += snd- - - # OMAP Machine Support - snd-soc-n810-objs := n810.o -+snd-soc-rx51-objs := rx51.o - snd-soc-omap3beagle-objs := omap3beagle.o - snd-soc-osk5912-objs := osk5912.o - snd-soc-overo-objs := overo.o - - obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o -+obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o aic34b_dummy.o - obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o - obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o - obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/omap-mcbsp.c linux-omap-2.6.28-nokia1/sound/soc/omap/omap-mcbsp.c ---- linux-omap-2.6.28-omap1/sound/soc/omap/omap-mcbsp.c 2011-06-22 13:14:27.223067619 +0200 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/omap-mcbsp.c 2011-06-22 13:19:33.293063268 +0200 -@@ -36,9 +36,7 @@ - #include "omap-mcbsp.h" - #include "omap-pcm.h" - --#define OMAP_MCBSP_RATES (SNDRV_PCM_RATE_44100 | \ -- SNDRV_PCM_RATE_48000 | \ -- SNDRV_PCM_RATE_KNOT) -+#define OMAP_MCBSP_RATES (SNDRV_PCM_RATE_8000_96000) - - struct omap_mcbsp_data { - unsigned int bus_id; -@@ -178,6 +176,11 @@ static int omap_mcbsp_dai_trigger(struct - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - if (!mcbsp_data->active++) - omap_mcbsp_start(mcbsp_data->bus_id); -+ /* Make sure data transfer is frame synchronized */ -+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -+ omap_mcbsp_xmit_enable(mcbsp_data->bus_id, 1); -+ else -+ omap_mcbsp_recv_enable(mcbsp_data->bus_id, 1); - break; - - case SNDRV_PCM_TRIGGER_STOP: -@@ -201,8 +204,9 @@ static int omap_mcbsp_dai_hw_params(stru - struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); - struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; - int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id; -- int wlen; -+ int wlen, channels, wpf; - unsigned long port; -+ unsigned int format; - - if (cpu_class_is_omap1()) { - dma = omap1_dma_reqs[bus_id][substream->stream]; -@@ -230,13 +234,25 @@ static int omap_mcbsp_dai_hw_params(stru - return 0; - } - -- switch (params_channels(params)) { -+ format = mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK; -+ wpf = channels = params_channels(params); -+ switch (channels) { - case 2: -- /* Set 1 word per (McBPSP) frame and use dual-phase frames */ -- regs->rcr2 |= RFRLEN2(1 - 1) | RPHASE; -- regs->rcr1 |= RFRLEN1(1 - 1); -- regs->xcr2 |= XFRLEN2(1 - 1) | XPHASE; -- regs->xcr1 |= XFRLEN1(1 - 1); -+ if (format == SND_SOC_DAIFMT_I2S || -+ format == SND_SOC_DAIFMT_LEFT_J) { -+ /* Use dual-phase frames */ -+ regs->rcr2 |= RPHASE; -+ regs->xcr2 |= XPHASE; -+ /* Set 1 word per (McBSP) frame for phase1 and phase2 */ -+ wpf--; -+ regs->rcr2 |= RFRLEN2(wpf - 1); -+ regs->xcr2 |= XFRLEN2(wpf - 1); -+ } -+ case 1: -+ case 4: -+ /* Set word per (McBSP) frame for phase1 */ -+ regs->rcr1 |= RFRLEN1(wpf - 1); -+ regs->xcr1 |= XFRLEN1(wpf - 1); - break; - default: - /* Unsupported number of channels */ -@@ -258,14 +274,16 @@ static int omap_mcbsp_dai_hw_params(stru - } - - /* Set FS period and length in terms of bit clock periods */ -- switch (mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK) { -+ switch (format) { - case SND_SOC_DAIFMT_I2S: -+ case SND_SOC_DAIFMT_LEFT_J: - regs->srgr2 |= FPER(wlen * 2 - 1); - regs->srgr1 |= FWID(wlen - 1); - break; - case SND_SOC_DAIFMT_DSP_A: -- regs->srgr2 |= FPER(wlen * 2 - 1); -- regs->srgr1 |= FWID(wlen * 2 - 2); -+ case SND_SOC_DAIFMT_DSP_B: -+ regs->srgr2 |= FPER(wlen * channels - 1); -+ regs->srgr1 |= FWID(0); - break; - } - -@@ -284,6 +302,7 @@ static int omap_mcbsp_dai_set_dai_fmt(st - { - struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); - struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; -+ unsigned int temp_fmt = fmt; - - if (mcbsp_data->configured) - return 0; -@@ -293,19 +312,42 @@ static int omap_mcbsp_dai_set_dai_fmt(st - /* Generic McBSP register settings */ - regs->spcr2 |= XINTM(3) | FREE; - regs->spcr1 |= RINTM(3); -- regs->rcr2 |= RFIG; -- regs->xcr2 |= XFIG; -+ /* RFIG and XFIG are not defined in 34xx */ -+ if (!cpu_is_omap34xx()) { -+ regs->rcr2 |= RFIG; -+ regs->xcr2 |= XFIG; -+ } - - switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { - case SND_SOC_DAIFMT_I2S: - /* 1-bit data delay */ - regs->rcr2 |= RDATDLY(1); - regs->xcr2 |= XDATDLY(1); -+ regs->rccr |= RFULL_CYCLE | RDMAEN | RDISABLE; -+ regs->xccr |= (DXENDLY(1) | XDMAEN | XDISABLE); -+ break; -+ case SND_SOC_DAIFMT_LEFT_J: -+ regs->rcr2 |= RDATDLY(0); -+ regs->xcr2 |= XDATDLY(0); -+ regs->spcr1 |= RJUST(2); -+ regs->rccr |= RFULL_CYCLE | RDMAEN | RDISABLE; -+ regs->xccr |= (DXENDLY(1) | XDMAEN | XDISABLE); - break; - case SND_SOC_DAIFMT_DSP_A: -+ /* 1-bit data delay */ -+ regs->rcr2 |= RDATDLY(1); -+ regs->xcr2 |= XDATDLY(1); -+ regs->rccr |= RFULL_CYCLE | RDMAEN | RDISABLE; -+ regs->xccr |= (DXENDLY(1) | XDMAEN | XDISABLE); -+ temp_fmt ^= SND_SOC_DAIFMT_NB_IF; -+ break; -+ case SND_SOC_DAIFMT_DSP_B: - /* 0-bit data delay */ - regs->rcr2 |= RDATDLY(0); - regs->xcr2 |= XDATDLY(0); -+ regs->rccr |= RFULL_CYCLE | RDMAEN | RDISABLE; -+ regs->xccr |= (DXENDLY(1) | XDMAEN | XDISABLE); -+ temp_fmt ^= SND_SOC_DAIFMT_NB_IF; - break; - default: - /* Unsupported data format */ -@@ -329,7 +371,7 @@ static int omap_mcbsp_dai_set_dai_fmt(st - } - - /* Set bit clock (CLKX/CLKR) and FS polarities */ -- switch (fmt & SND_SOC_DAIFMT_INV_MASK) { -+ switch (temp_fmt & SND_SOC_DAIFMT_INV_MASK) { - case SND_SOC_DAIFMT_NB_NF: - /* - * Normal BCLK + FS. -@@ -452,18 +494,18 @@ static int omap_mcbsp_dai_set_dai_sysclk - - #define OMAP_MCBSP_DAI_BUILDER(link_id) \ - { \ -- .name = "omap-mcbsp-dai-(link_id)", \ -+ .name = "omap-mcbsp-dai-"#link_id, \ - .id = (link_id), \ - .type = SND_SOC_DAI_I2S, \ - .playback = { \ -- .channels_min = 2, \ -- .channels_max = 2, \ -+ .channels_min = 1, \ -+ .channels_max = 4, \ - .rates = OMAP_MCBSP_RATES, \ - .formats = SNDRV_PCM_FMTBIT_S16_LE, \ - }, \ - .capture = { \ -- .channels_min = 2, \ -- .channels_max = 2, \ -+ .channels_min = 1, \ -+ .channels_max = 4, \ - .rates = OMAP_MCBSP_RATES, \ - .formats = SNDRV_PCM_FMTBIT_S16_LE, \ - }, \ -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/omap-pcm.c linux-omap-2.6.28-nokia1/sound/soc/omap/omap-pcm.c ---- linux-omap-2.6.28-omap1/sound/soc/omap/omap-pcm.c 2011-06-22 13:14:27.223067619 +0200 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/omap-pcm.c 2011-06-22 13:19:33.293063268 +0200 -@@ -27,10 +27,11 @@ - #include - #include - -+#include - #include - #include "omap-pcm.h" - --static const struct snd_pcm_hardware omap_pcm_hardware = { -+static struct snd_pcm_hardware omap_pcm_hardware = { - .info = SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_INTERLEAVED | -@@ -38,7 +39,6 @@ static const struct snd_pcm_hardware oma - SNDRV_PCM_INFO_RESUME, - .formats = SNDRV_PCM_FMTBIT_S16_LE, - .period_bytes_min = 32, -- .period_bytes_max = 64 * 1024, - .periods_min = 2, - .periods_max = 255, - .buffer_bytes_max = 128 * 1024, -@@ -49,6 +49,7 @@ struct omap_runtime_data { - struct omap_pcm_dma_data *dma_data; - int dma_ch; - int period_index; -+ int dma_op_mode; - }; - - static void omap_pcm_dma_irq(int ch, u16 stat, void *data) -@@ -97,7 +98,7 @@ static int omap_pcm_hw_params(struct snd - prtd->dma_data = dma_data; - err = omap_request_dma(dma_data->dma_req, dma_data->name, - omap_pcm_dma_irq, substream, &prtd->dma_ch); -- if (!err & !cpu_is_omap1510()) { -+ if (!err && !cpu_is_omap1510()) { - /* - * Link channel with itself so DMA doesn't need any - * reprogramming while looping the buffer -@@ -132,15 +133,24 @@ static int omap_pcm_prepare(struct snd_p - struct omap_runtime_data *prtd = runtime->private_data; - struct omap_pcm_dma_data *dma_data = prtd->dma_data; - struct omap_dma_channel_params dma_params; -+ int sync_mode; - - memset(&dma_params, 0, sizeof(dma_params)); -+ -+ /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ -+ if (cpu_is_omap34xx() && -+ (prtd->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)) -+ sync_mode = OMAP_DMA_SYNC_FRAME; -+ else -+ sync_mode = OMAP_DMA_SYNC_ELEMENT; + - /* - * Note: Regardless of interface data formats supported by OMAP McBSP - * or EAC blocks, internal representation is always fixed 16-bit/sample - */ - dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; - dma_params.trigger = dma_data->dma_req; -- dma_params.sync_mode = OMAP_DMA_SYNC_ELEMENT; -+ dma_params.sync_mode = sync_mode; - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; - dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; -@@ -168,6 +178,9 @@ static int omap_pcm_prepare(struct snd_p - - omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); - -+ omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); -+ omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); -+ - return 0; - } - -@@ -175,14 +188,31 @@ static int omap_pcm_trigger(struct snd_p - { - struct snd_pcm_runtime *runtime = substream->runtime; - struct omap_runtime_data *prtd = runtime->private_data; -+ struct snd_soc_pcm_runtime *rtd = substream->private_data; -+ unsigned long flags; - int ret = 0; -+ unsigned int bus_id = *(unsigned int *)rtd->dai->cpu_dai->private_data; -+ u16 samples; ++#endif /* _SSI_CHAR_DEBUG_H */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-char.h kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-char.h +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-char.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-char.h 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,36 @@ ++/* ++ * ssi-char.h ++ * ++ * Part of the SSI character device driver. ++ * ++ * Copyright (C) 2009 Nokia Corporation. All rights reserved. ++ * ++ * Contact: Andras Domokos ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ + -+ spin_lock_irqsave(&prtd->lock, flags); + -+ /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ -+ if (prtd->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) -+ samples = snd_pcm_lib_period_bytes(substream) >> 1; -+ else -+ samples = 1; - -- spin_lock_irq(&prtd->lock); - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - prtd->period_index = 0; -+ /* Configure McBSP internal buffer usage */ -+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -+ omap_mcbsp_set_tx_threshold(bus_id, samples - 1); -+ else -+ omap_mcbsp_set_rx_threshold(bus_id, samples - 1); ++#ifndef _SSI_CHAR_H ++#define _SSI_CHAR_H + - omap_start_dma(prtd->dma_ch); - break; - -@@ -195,7 +225,7 @@ static int omap_pcm_trigger(struct snd_p - default: - ret = -EINVAL; - } -- spin_unlock_irq(&prtd->lock); -+ spin_unlock_irqrestore(&prtd->lock, flags); - - return ret; - } -@@ -223,7 +253,25 @@ static int omap_pcm_open(struct snd_pcm_ - { - struct snd_pcm_runtime *runtime = substream->runtime; - struct omap_runtime_data *prtd; -+ struct snd_soc_pcm_runtime *rtd = substream->private_data; -+ unsigned int bus_id = *(unsigned int *)rtd->dai->cpu_dai->private_data; -+ int dma_op_mode = omap_mcbsp_get_dma_op_mode(bus_id); - int ret; -+ int max_period; ++#include "ssi-if.h" + -+ /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ -+ if (cpu_is_omap34xx() && (dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)) { -+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -+ max_period = omap_mcbsp_get_max_tx_threshold(bus_id); -+ else -+ max_period = omap_mcbsp_get_max_rx_threshold(bus_id); -+ max_period++; -+ max_period <<= 1; -+ } else { -+ max_period = 64 * 1024; -+ } ++/* how many char devices would be created at most */ ++#define SSI_MAX_CHAR_DEVS 8 + -+ omap_pcm_hardware.period_bytes_max = max_period; - - snd_soc_set_runtime_hwparams(substream, &omap_pcm_hardware); - -@@ -238,6 +286,7 @@ static int omap_pcm_open(struct snd_pcm_ - ret = -ENOMEM; - goto out; - } -+ prtd->dma_op_mode = dma_op_mode; - spin_lock_init(&prtd->lock); - runtime->private_data = prtd; - -@@ -276,7 +325,7 @@ struct snd_pcm_ops omap_pcm_ops = { - .mmap = omap_pcm_mmap, - }; - --static u64 omap_pcm_dmamask = DMA_BIT_MASK(32); -+static u64 omap_pcm_dmamask = DMA_BIT_MASK(64); - - static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, - int stream) -@@ -326,7 +375,7 @@ int omap_pcm_new(struct snd_card *card, - if (!card->dev->dma_mask) - card->dev->dma_mask = &omap_pcm_dmamask; - if (!card->dev->coherent_dma_mask) -- card->dev->coherent_dma_mask = DMA_32BIT_MASK; -+ card->dev->coherent_dma_mask = DMA_64BIT_MASK; - - if (dai->playback.channels_min) { - ret = omap_pcm_preallocate_dma_buffer(pcm, -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/rx51.c linux-omap-2.6.28-nokia1/sound/soc/omap/rx51.c ---- linux-omap-2.6.28-omap1/sound/soc/omap/rx51.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/rx51.c 2011-06-22 13:19:33.293063268 +0200 -@@ -0,0 +1,923 @@ ++void if_notify(int ch, struct ssi_event *ev); ++ ++#endif /* _SSI_CHAR_H */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-if.c kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-if.c +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-if.c 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-if.c 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,625 @@ +/* -+ * n810.c -- SoC audio for Nokia RX51 ++ * ssi-if.c ++ * ++ * Part of the SSI character driver, implements the SSI interface. + * -+ * Copyright (C) 2008 Nokia Corporation ++ * Copyright (C) 2009 Nokia Corporation. All rights reserved. + * -+ * Contact: Jarkko Nikula ++ * Contact: Andras Domokos + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License @@ -284452,1032 +6381,1930 @@ diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/rx51.c linux-omap-2.6.28-nokia + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA -+ * + */ + -+#include -+#include ++#include ++#include ++#include ++#include ++#include +#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include +#include -+#include -+#include -+#include ++#include ++#include ++#include ++#include ++ ++#include ++#include + -+#include "omap-mcbsp.h" -+#include "omap-pcm.h" -+#include "../codecs/tlv320aic3x.h" -+#include "aic34b_dummy.h" ++#include "ssi-char-debug.h" ++#include "ssi-char.h" ++#include "ssi-if.h" + -+#define RX51_CODEC_RESET_GPIO 60 -+#define RX51_TVOUT_SEL_GPIO 40 -+#define RX51_ECI_SWITCH_1_GPIO 178 -+#define RX51_ECI_SWITCH_2_GPIO 182 -+/* REVISIT: TWL4030 GPIO base in RX51. Now statically defined to 192 */ -+#define RX51_SPEAKER_AMP_TWL_GPIO (192 + 7) ++#define SSI_CHANNEL_STATE_UNAVAIL (1 << 0) ++#define SSI_CHANNEL_STATE_READING (1 << 1) ++#define SSI_CHANNEL_STATE_WRITING (1 << 2) + -+enum { -+ RX51_JACK_DISABLED, -+ RX51_JACK_HP, /* headphone: stereo output, no mic */ -+ RX51_JACK_HS, /* headset: stereo output with mic */ -+ RX51_JACK_MIC, /* mic input only */ -+ RX51_JACK_ECI, /* ECI headset */ -+ RX51_JACK_TVOUT, /* stereo output with tv-out */ ++#define PORT1 0 ++#define PORT2 1 ++ ++#define SSI_RX_PARAM(cfg, mod, fsize, n, tmo) \ ++ do { \ ++ (cfg)->mode = mod; \ ++ (cfg)->frame_size = fsize; \ ++ (cfg)->channels = n; \ ++ (cfg)->timeout = tmo; \ ++ } while (0) ++ ++#define SSI_TX_PARAM(cfg, mod, fsize, n, div, arb) \ ++ do { \ ++ (cfg)->mode = mod; \ ++ (cfg)->frame_size = fsize; \ ++ (cfg)->channels = n; \ ++ (cfg)->divisor = div; \ ++ (cfg)->arb_mode = arb; \ ++ } while (0) ++ ++#define RXCONV(dst, src) \ ++ do { \ ++ (dst)->mode = (src)->mode; \ ++ (dst)->frame_size = (src)->frame_size; \ ++ (dst)->channels = (src)->channels; \ ++ (dst)->timeout = (src)->timeout; \ ++ } while (0) ++ ++#define TXCONV(dst, src) \ ++ do { \ ++ (dst)->mode = (src)->mode; \ ++ (dst)->frame_size = (src)->frame_size; \ ++ (dst)->channels = (src)->channels; \ ++ (dst)->divisor = (src)->divisor; \ ++ (dst)->arb_mode = (src)->arb_mode; \ ++ } while (0) ++ ++struct if_ssi_channel { ++ struct ssi_device *dev; ++ unsigned int channel_id; ++ u32 *tx_data; ++ unsigned int tx_count; ++ u32 *rx_data; ++ unsigned int rx_count; ++ unsigned int opened; ++ unsigned int state; ++ spinlock_t lock; +}; + -+static int hp_lim = 63; -+module_param(hp_lim, int, 0); ++struct if_ssi_iface { ++ struct if_ssi_channel channels[SSI_MAX_CHAR_DEVS]; ++ int bootstrap; ++ spinlock_t lock; ++}; ++ ++static void if_ssi_port_event(struct ssi_device *dev, unsigned int event, ++ void *arg); ++static int __devinit if_ssi_probe(struct ssi_device *dev); ++static int __devexit if_ssi_remove(struct ssi_device *dev); + -+static int rx51_new_hw_audio; -+static int rx51_spk_func; -+static int rx51_jack_func; -+static int rx51_fmtx_func; -+static int rx51_dmic_func; -+static int rx51_ear_func; -+static struct snd_jack *rx51_jack; ++static struct ssi_device_driver if_ssi_char_driver = { ++ .ctrl_mask = ANY_SSI_CONTROLLER, ++ .probe = if_ssi_probe, ++ .remove = __devexit_p(if_ssi_remove), ++ .driver = { ++ .name = "ssi_char" ++ }, ++}; + -+static DEFINE_MUTEX(eci_mutex); -+static int rx51_eci_mode = 1; -+static int rx51_dapm_jack_bias; -+static int tpa6130_volume = -1; -+static int tpa6130_enable; -+static int aic34b_volume; ++static struct if_ssi_iface ssi_iface; + -+static void rx51_set_eci_switches(int mode) ++static int if_ssi_read_on(int ch, u32 *data, unsigned int count) +{ -+ switch (mode) { -+ case 0: /* Bias off */ -+ case 1: /* Bias according to rx51_dapm_jack_bias */ -+ case 4: /* Bias on */ -+ /* Codec connected to mic/bias line */ -+ gpio_set_value(RX51_ECI_SWITCH_1_GPIO, 0); -+ gpio_set_value(RX51_ECI_SWITCH_2_GPIO, 1); -+ break; -+ case 2: -+ /* ECI INT#2 detect connected to mic/bias line */ -+ gpio_set_value(RX51_ECI_SWITCH_1_GPIO, 0); -+ gpio_set_value(RX51_ECI_SWITCH_2_GPIO, 0); -+ break; -+ case 3: -+ /* ECI RX/TX connected to mic/bias line */ -+ gpio_set_value(RX51_ECI_SWITCH_1_GPIO, 1); -+ gpio_set_value(RX51_ECI_SWITCH_2_GPIO, 0); -+ break; ++ struct if_ssi_channel *channel; ++ int ret; ++ ++ channel = &ssi_iface.channels[ch]; ++ ++ spin_lock(&channel->lock); ++ if (channel->state & SSI_CHANNEL_STATE_READING) { ++ pr_err("Read still pending on channel %d\n", ch); ++ spin_unlock(&channel->lock); ++ return -EBUSY; + } ++ channel->state |= SSI_CHANNEL_STATE_READING; ++ channel->rx_data = data; ++ channel->rx_count = count; ++ spin_unlock(&channel->lock); ++ ++ ret = ssi_read(channel->dev, data, count/4); ++ ++ return ret; +} + -+static void rx51_set_jack_bias(void) ++static void if_ssi_read_done(struct ssi_device *dev) +{ -+ int enable_bias = 0; ++ struct if_ssi_channel *channel; ++ struct ssi_event ev; + -+ mutex_lock(&eci_mutex); -+ if ((rx51_eci_mode == 1 && rx51_dapm_jack_bias) || rx51_eci_mode == 4) -+ enable_bias = 1; -+ else if (rx51_eci_mode == 1 && rx51_jack_func == RX51_JACK_ECI) -+ enable_bias = 1; -+ mutex_unlock(&eci_mutex); -+ if (enable_bias) -+ aic34b_set_mic_bias(2); /* 2.5 V */ -+ else -+ aic34b_set_mic_bias(0); ++ channel = &ssi_iface.channels[dev->n_ch]; ++ spin_lock(&channel->lock); ++ channel->state &= ~SSI_CHANNEL_STATE_READING; ++ ev.event = SSI_EV_IN; ++ ev.data = channel->rx_data; ++ ev.count = channel->rx_count; ++ spin_unlock(&channel->lock); ++ if_notify(dev->n_ch, &ev); ++} ++ ++int if_ssi_read(int ch, u32 *data, unsigned int count) ++{ ++ int ret = 0; ++ spin_lock_bh(&ssi_iface.lock); ++ ret = if_ssi_read_on(ch, data, count); ++ spin_unlock_bh(&ssi_iface.lock); ++ return ret; +} + -+static void rx51_set_jack_bias_handler(struct work_struct *unused) ++int if_ssi_poll(int ch) +{ -+ rx51_set_jack_bias(); ++ struct if_ssi_channel *channel; ++ int ret = 0; ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ret = ssi_poll(channel->dev); ++ spin_unlock_bh(&ssi_iface.lock); ++ return ret; +} -+DECLARE_WORK(rx51_jack_bias_work, rx51_set_jack_bias_handler); + -+static void rx51_ext_control(struct snd_soc_codec *codec) ++static int if_ssi_write_on(int ch, u32 *address, unsigned int count) +{ -+ int hp = 0, mic = 0, tvout = 0; ++ struct if_ssi_channel *channel; ++ int ret; + -+ switch (rx51_jack_func) { -+ case RX51_JACK_ECI: -+ case RX51_JACK_HS: -+ mic = 1; -+ case RX51_JACK_HP: -+ hp = 1; -+ break; -+ case RX51_JACK_MIC: -+ mic = 1; -+ break; -+ case RX51_JACK_TVOUT: -+ hp = 1; -+ tvout = 1; -+ break; ++ channel = &ssi_iface.channels[ch]; ++ ++ spin_lock(&channel->lock); ++ if (channel->state & SSI_CHANNEL_STATE_WRITING) { ++ pr_err("Write still pending on channel %d\n", ch); ++ spin_unlock(&channel->lock); ++ return -EBUSY; + } + -+ gpio_set_value(RX51_TVOUT_SEL_GPIO, tvout); ++ channel->tx_data = address; ++ channel->tx_count = count; ++ channel->state |= SSI_CHANNEL_STATE_WRITING; ++ spin_unlock(&channel->lock); ++ ret = ssi_write(channel->dev, address, count/4); ++ return ret; ++} ++ ++static void if_ssi_write_done(struct ssi_device *dev) ++{ ++ struct if_ssi_channel *channel; ++ struct ssi_event ev; + -+ if (rx51_spk_func) -+ snd_soc_dapm_enable_pin(codec, "Ext Spk"); -+ else -+ snd_soc_dapm_disable_pin(codec, "Ext Spk"); -+ if (hp) -+ snd_soc_dapm_enable_pin(codec, "Headphone Jack"); -+ else -+ snd_soc_dapm_disable_pin(codec, "Headphone Jack"); -+ if (mic) -+ snd_soc_dapm_enable_pin(codec, "Mic Jack"); -+ else -+ snd_soc_dapm_disable_pin(codec, "Mic Jack"); -+ if (rx51_fmtx_func) -+ snd_soc_dapm_enable_pin(codec, "FM Transmitter"); -+ else -+ snd_soc_dapm_disable_pin(codec, "FM Transmitter"); -+ if (rx51_dmic_func) -+ snd_soc_dapm_enable_pin(codec, "DMic"); -+ else -+ snd_soc_dapm_disable_pin(codec, "DMic"); -+ if (rx51_ear_func) -+ snd_soc_dapm_enable_pin(codec, "Earphone"); -+ else -+ snd_soc_dapm_disable_pin(codec, "Earphone"); ++ channel = &ssi_iface.channels[dev->n_ch]; + -+ snd_soc_dapm_sync(codec); ++ spin_lock(&channel->lock); ++ channel->state &= ~SSI_CHANNEL_STATE_WRITING; ++ ev.event = SSI_EV_OUT; ++ ev.data = channel->tx_data; ++ ev.count = channel->tx_count; ++ spin_unlock(&channel->lock); ++ if_notify(dev->n_ch, &ev); +} + -+int rx51_set_eci_mode(int mode) ++int if_ssi_write(int ch, u32 *data, unsigned int count) +{ -+ if (mode < 0 || mode > 4) -+ return -EINVAL; ++ int ret = 0; + -+ mutex_lock(&eci_mutex); -+ if (rx51_eci_mode == mode) { -+ mutex_unlock(&eci_mutex); -+ return 0; -+ } ++ spin_lock_bh(&ssi_iface.lock); ++ ret = if_ssi_write_on(ch, data, count); ++ spin_unlock_bh(&ssi_iface.lock); ++ return ret; ++} + -+ rx51_eci_mode = mode; -+ rx51_set_eci_switches(rx51_eci_mode); -+ mutex_unlock(&eci_mutex); ++void if_ssi_send_break(int ch) ++{ ++ struct if_ssi_channel *channel; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ssi_ioctl(channel->dev, SSI_IOCTL_SEND_BREAK, NULL); ++ spin_unlock_bh(&ssi_iface.lock); ++} + -+ rx51_set_jack_bias(); ++void if_ssi_flush_rx(int ch) ++{ ++ struct if_ssi_channel *channel; + -+ return 0; ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ssi_ioctl(channel->dev, SSI_IOCTL_FLUSH_RX, NULL); ++ spin_unlock_bh(&ssi_iface.lock); +} -+EXPORT_SYMBOL(rx51_set_eci_mode); + -+static ssize_t eci_mode_show(struct device *dev, struct device_attribute *attr, -+ char *buf) ++void if_ssi_flush_ch(int ch) +{ -+ return sprintf(buf, "%d\n", rx51_eci_mode); ++ struct if_ssi_channel *channel; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock(&channel->lock); ++ spin_unlock(&channel->lock); +} + -+static ssize_t eci_mode_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++void if_ssi_flush_tx(int ch) +{ -+ int mode, retval; -+ if (sscanf(buf, "%d", &mode) != 1) -+ return -EINVAL; -+ retval = rx51_set_eci_mode(mode); ++ struct if_ssi_channel *channel; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ssi_ioctl(channel->dev, SSI_IOCTL_FLUSH_TX, NULL); ++ spin_unlock_bh(&ssi_iface.lock); ++} + -+ return (retval < 0) ? retval : count; ++void if_ssi_get_wakeline(int ch, unsigned int *state) ++{ ++ struct if_ssi_channel *channel; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ssi_ioctl(channel->dev, SSI_IOCTL_WAKE, state); ++ spin_unlock_bh(&ssi_iface.lock); ++} ++ ++void if_ssi_set_wakeline(int ch, unsigned int state) ++{ ++ struct if_ssi_channel *channel; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ssi_ioctl(channel->dev, state, NULL); ++ spin_unlock_bh(&ssi_iface.lock); +} + -+static DEVICE_ATTR(eci_mode, S_IRUGO | S_IWUSR, -+ eci_mode_show, eci_mode_store); ++int if_ssi_set_rx(int ch, struct ssi_rx_config *cfg) ++{ ++ int ret; ++ struct if_ssi_channel *channel; ++ struct ssr_ctx ctx; ++ ++ RXCONV(&ctx, cfg); ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ret = ssi_ioctl(channel->dev, SSI_IOCTL_SET_RX, &ctx); ++ spin_unlock_bh(&ssi_iface.lock); ++ return ret; ++} + -+void rx51_jack_report(int status) ++void if_ssi_get_rx(int ch, struct ssi_rx_config *cfg) +{ -+ snd_jack_report(rx51_jack, status); ++ struct if_ssi_channel *channel; ++ struct ssr_ctx ctx; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ssi_ioctl(channel->dev, SSI_IOCTL_GET_RX, &ctx); ++ RXCONV(cfg, &ctx); ++ spin_unlock_bh(&ssi_iface.lock); +} -+EXPORT_SYMBOL(rx51_jack_report); + -+static int rx51_startup(struct snd_pcm_substream *substream) ++int if_ssi_set_tx(int ch, struct ssi_tx_config *cfg) +{ -+ struct snd_pcm_runtime *runtime = substream->runtime; -+ struct snd_soc_pcm_runtime *rtd = substream->private_data; -+ struct snd_soc_codec *codec = rtd->socdev->codec; ++ int ret; ++ struct if_ssi_channel *channel; ++ struct sst_ctx ctx; + -+ snd_pcm_hw_constraint_minmax(runtime, -+ SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2); ++ TXCONV(&ctx, cfg); ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ret = ssi_ioctl(channel->dev, SSI_IOCTL_SET_TX, &ctx); ++ spin_unlock_bh(&ssi_iface.lock); ++ return ret; ++} + -+ rx51_ext_control(codec); ++void if_ssi_get_tx(int ch, struct ssi_tx_config *cfg) ++{ ++ struct if_ssi_channel *channel; ++ struct sst_ctx ctx; + -+ return 0; ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ ssi_ioctl(channel->dev, SSI_IOCTL_GET_TX, &ctx); ++ TXCONV(cfg, &ctx); ++ spin_unlock_bh(&ssi_iface.lock); +} + -+static void rx51_shutdown(struct snd_pcm_substream *substream) ++void if_ssi_cancel_read(int ch) +{ ++ struct if_ssi_channel *channel; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock(&channel->lock); ++ if (channel->state & SSI_CHANNEL_STATE_READING) ++ ssi_read_cancel(channel->dev); ++ channel->state &= ~SSI_CHANNEL_STATE_READING; ++ spin_unlock(&channel->lock); +} + -+static int pre_events; ++void if_ssi_cancel_write(int ch) ++{ ++ struct if_ssi_channel *channel; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock(&channel->lock); ++ if (channel->state & SSI_CHANNEL_STATE_WRITING) ++ ssi_write_cancel(channel->dev); ++ channel->state &= ~SSI_CHANNEL_STATE_WRITING; ++ spin_unlock(&channel->lock); ++} + -+static int rx51_hw_params(struct snd_pcm_substream *substream, -+ struct snd_pcm_hw_params *params) ++static int if_ssi_openchannel(struct if_ssi_channel *channel) +{ -+ struct snd_soc_pcm_runtime *rtd = substream->private_data; -+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; -+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; -+ int err; ++ int ret = 0; + -+ if (rx51_new_hw_audio) { -+ if (!pre_events) { -+ pre_events = 1; -+ err = twl4030_enable_regulator(RES_VMMC2); -+ if (err < 0) -+ return err; -+ } ++ spin_lock(&channel->lock); ++ ++ if (channel->state == SSI_CHANNEL_STATE_UNAVAIL) ++ return -ENODEV; ++ ++ if (channel->opened) { ++ ret = -EBUSY; ++ goto leave; + } + -+ /* Set codec DAI configuration */ -+ err = snd_soc_dai_set_fmt(codec_dai, -+ SND_SOC_DAIFMT_DSP_A | -+ SND_SOC_DAIFMT_IB_NF | -+ SND_SOC_DAIFMT_CBM_CFM); -+ if (err < 0) -+ return err; ++ if (!channel->dev) { ++ pr_err("Channel %d is not ready??\n", ++ channel->channel_id); ++ ret = -ENODEV; ++ goto leave; ++ } + -+ /* Set cpu DAI configuration */ -+ err = snd_soc_dai_set_fmt(cpu_dai, -+ SND_SOC_DAIFMT_DSP_A | -+ SND_SOC_DAIFMT_IB_NF | -+ SND_SOC_DAIFMT_CBM_CFM); -+ if (err < 0) -+ return err; ++ ret = ssi_open(channel->dev); ++ if (ret < 0) { ++ pr_err("Could not open channel %d\n", ++ channel->channel_id); ++ goto leave; ++ } ++ ++ channel->opened = 1; + -+ /* Set the codec system clock for DAC and ADC */ -+ return snd_soc_dai_set_sysclk(codec_dai, 0, 19200000, -+ SND_SOC_CLOCK_IN); ++leave: ++ spin_unlock(&channel->lock); ++ return ret; +} + -+static int rx51_bt_hw_params(struct snd_pcm_substream *substream, -+ struct snd_pcm_hw_params *params) ++ ++static int if_ssi_closechannel(struct if_ssi_channel *channel) +{ -+ struct snd_soc_pcm_runtime *rtd = substream->private_data; -+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; ++ int ret = 0; ++ ++ spin_lock(&channel->lock); ++ ++ if (!channel->opened) ++ goto leave; ++ ++ if (!channel->dev) { ++ pr_err("Channel %d is not ready??\n", ++ channel->channel_id); ++ ret = -ENODEV; ++ goto leave; ++ } ++ ++ /* Stop any pending read/write */ ++ if (channel->state & SSI_CHANNEL_STATE_READING) { ++ ssi_read_cancel(channel->dev); ++ channel->state &= ~SSI_CHANNEL_STATE_READING; ++ } ++ if (channel->state & SSI_CHANNEL_STATE_WRITING) { ++ ssi_write_cancel(channel->dev); ++ channel->state &= ~SSI_CHANNEL_STATE_WRITING; ++ } ++ ++ ssi_close(channel->dev); + -+ /* Set cpu DAI configuration */ -+ return cpu_dai->dai_ops.set_fmt(cpu_dai, -+ SND_SOC_DAIFMT_DSP_A | -+ SND_SOC_DAIFMT_IB_NF | -+ SND_SOC_DAIFMT_CBM_CFM); ++ channel->opened = 0; ++leave: ++ spin_unlock(&channel->lock); ++ return ret; +} + -+static struct snd_soc_ops rx51_bt_ops = { -+ .hw_params = rx51_bt_hw_params, -+}; + -+static struct snd_soc_ops rx51_ops = { -+ .startup = rx51_startup, -+ .hw_params = rx51_hw_params, -+ .shutdown = rx51_shutdown, -+}; ++int if_ssi_start(int ch) ++{ ++ struct if_ssi_channel *channel; ++ int ret = 0; ++ ++ channel = &ssi_iface.channels[ch]; ++ spin_lock_bh(&ssi_iface.lock); ++ channel->state = 0; ++ ret = if_ssi_openchannel(channel); ++ if (ret < 0) { ++ pr_err("Could not open channel %d\n", ch); ++ spin_unlock_bh(&ssi_iface.lock); ++ goto error; ++ } ++ if_ssi_poll(ch); ++ spin_unlock_bh(&ssi_iface.lock); ++ ++error: ++ return ret; ++} + -+static int rx51_get_spk(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) ++void if_ssi_stop(int ch) +{ -+ ucontrol->value.integer.value[0] = rx51_spk_func; ++ struct if_ssi_channel *channel; ++ channel = &ssi_iface.channels[ch]; ++ if_ssi_set_wakeline(ch, 1); ++ spin_lock_bh(&ssi_iface.lock); ++ if_ssi_closechannel(channel); ++ spin_unlock_bh(&ssi_iface.lock); ++} + -+ return 0; ++static int __devinit if_ssi_probe(struct ssi_device *dev) ++{ ++ struct if_ssi_channel *channel; ++ unsigned long *address; ++ int ret = -ENXIO, port; ++ ++ for (port = 0; port < SSI_MAX_PORTS; port++) { ++ if (if_ssi_char_driver.ch_mask[port]) ++ break; ++ } ++ ++ if (port == SSI_MAX_PORTS) ++ return -ENXIO; ++ ++ address = &if_ssi_char_driver.ch_mask[port]; ++ ++ spin_lock_bh(&ssi_iface.lock); ++ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) { ++ ssi_set_read_cb(dev, if_ssi_read_done); ++ ssi_set_write_cb(dev, if_ssi_write_done); ++ ssi_set_port_event_cb(dev, if_ssi_port_event); ++ channel = &ssi_iface.channels[dev->n_ch]; ++ channel->dev = dev; ++ channel->state = 0; ++ ret = 0; ++ } ++ spin_unlock_bh(&ssi_iface.lock); ++ ++ return ret; +} + -+static int rx51_set_spk(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) ++static int __devexit if_ssi_remove(struct ssi_device *dev) +{ -+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); ++ struct if_ssi_channel *channel; ++ unsigned long *address; ++ int ret = -ENXIO, port; + -+ if (rx51_spk_func == ucontrol->value.integer.value[0]) -+ return 0; ++ for (port = 0; port < SSI_MAX_PORTS; port++) { ++ if (if_ssi_char_driver.ch_mask[port]) ++ break; ++ } ++ ++ if (port == SSI_MAX_PORTS) ++ return -ENXIO; ++ ++ address = &if_ssi_char_driver.ch_mask[port]; + -+ rx51_spk_func = ucontrol->value.integer.value[0]; -+ rx51_ext_control(codec); ++ spin_lock_bh(&ssi_iface.lock); ++ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) { ++ ssi_set_read_cb(dev, NULL); ++ ssi_set_write_cb(dev, NULL); ++ channel = &ssi_iface.channels[dev->n_ch]; ++ channel->dev = NULL; ++ channel->state = SSI_CHANNEL_STATE_UNAVAIL; ++ ret = 0; ++ } ++ spin_unlock_bh(&ssi_iface.lock); + -+ return 1; ++ return ret; +} + -+static int rx51_spk_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) ++static void if_ssi_port_event(struct ssi_device *dev, unsigned int event, ++ void *arg) +{ -+ if (SND_SOC_DAPM_EVENT_ON(event)) -+ gpio_set_value(RX51_SPEAKER_AMP_TWL_GPIO, 1); -+ else -+ gpio_set_value(RX51_SPEAKER_AMP_TWL_GPIO, 0); ++ struct ssi_event ev; ++ int i; + -+ return 0; ++ ev.event = SSI_EV_EXCEP; ++ ev.data = (u32 *)0; ++ ev.count = 0; ++ ++ switch (event) { ++ case SSI_EVENT_BREAK_DETECTED: ++ ev.data = (u32 *)SSI_HWBREAK; ++ spin_lock_bh(&ssi_iface.lock); ++ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { ++ if (ssi_iface.channels[i].opened) ++ if_notify(i, &ev); ++ } ++ spin_unlock_bh(&ssi_iface.lock); ++ break; ++ case SSI_EVENT_SSR_DATAAVAILABLE: ++ i = (int)arg; ++ ev.event = SSI_EV_AVAIL; ++ spin_lock_bh(&ssi_iface.lock); ++ if (ssi_iface.channels[i].opened) ++ if_notify(i, &ev); ++ spin_unlock_bh(&ssi_iface.lock); ++ break; ++ case SSI_EVENT_CAWAKE_UP: ++ break; ++ case SSI_EVENT_CAWAKE_DOWN: ++ break; ++ case SSI_EVENT_ERROR: ++ break; ++ default: ++ printk(KERN_DEBUG "%s, Unknown event(%d)\n", __func__, event); ++ break; ++ } +} + -+static int rx51_get_jack(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) ++int __init if_ssi_init(unsigned int port, unsigned int *channels_map) +{ -+ ucontrol->value.integer.value[0] = rx51_jack_func; -+ -+ return 0; -+} ++ struct if_ssi_channel *channel; ++ int i, ret = 0; + -+static int rx51_set_jack(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) -+{ -+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); ++ port -= 1; ++ if (port >= SSI_MAX_PORTS) ++ return -EINVAL; + -+ if (rx51_jack_func == ucontrol->value.integer.value[0]) -+ return 0; ++ ssi_iface.bootstrap = 1; ++ spin_lock_init(&ssi_iface.lock); + -+ rx51_jack_func = ucontrol->value.integer.value[0]; ++ for (i = 0; i < SSI_MAX_PORTS; i++) ++ if_ssi_char_driver.ch_mask[i] = 0; + -+ mutex_lock(&eci_mutex); -+ if (rx51_jack_func == RX51_JACK_ECI) { -+ /* Set ECI switches according to ECI mode */ -+ rx51_set_eci_switches(rx51_eci_mode); -+ schedule_work(&rx51_jack_bias_work); -+ } else { -+ /* -+ * Let the codec always be connected to mic/bias line when -+ * jack is in non-ECI function -+ */ -+ rx51_set_eci_switches(1); -+ schedule_work(&rx51_jack_bias_work); ++ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { ++ channel = &ssi_iface.channels[i]; ++ channel->dev = NULL; ++ channel->opened = 0; ++ channel->state = 0; ++ channel->channel_id = i; ++ spin_lock_init(&channel->lock); ++ channel->state = SSI_CHANNEL_STATE_UNAVAIL; + } -+ mutex_unlock(&eci_mutex); + -+ rx51_ext_control(codec); -+ -+ return 1; -+} -+ -+static int rx51_jack_hp_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) -+{ -+ /* -+ * Note: HP amp and fmtx must not be enabled at the same -+ * time. We keep a shadow copy of the desired tpa_enable value but -+ * keep the hpamp really disabled whenever fmtx is enabled. If -+ * hpamp is requested on but fmtx is enabled, hpamp is kept -+ * disabled and enabled later from rx51_set_fmtx function when -+ * user disables fmtx. -+ */ -+ if (SND_SOC_DAPM_EVENT_ON(event)) { -+ if (!rx51_fmtx_func) -+ tpa6130a2_set_enabled(1); -+ tpa6130_enable = 1; -+ } else { -+ tpa6130a2_set_enabled(0); -+ tpa6130_enable = 0; ++ for (i = 0; (i < SSI_MAX_CHAR_DEVS) && channels_map[i]; i++) { ++ if ((channels_map[i] - 1) < SSI_MAX_CHAR_DEVS) ++ if_ssi_char_driver.ch_mask[port] |= (1 << ((channels_map[i] - 1))); + } + -+ return 0; -+} -+ -+static int rx51_jack_mic_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) -+{ -+ if (SND_SOC_DAPM_EVENT_ON(event)) -+ rx51_dapm_jack_bias = 1; -+ else -+ rx51_dapm_jack_bias = 0; -+ schedule_work(&rx51_jack_bias_work); -+ -+ return 0; -+} -+ -+static int rx51_get_fmtx(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) -+{ -+ ucontrol->value.integer.value[0] = rx51_fmtx_func; ++ ret = register_ssi_driver(&if_ssi_char_driver); ++ if (ret) ++ pr_err("Error while registering SSI driver %d", ret); + -+ return 0; ++ return ret; +} + -+static int rx51_set_fmtx(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) ++int __exit if_ssi_exit(void) +{ -+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); -+ -+ if (rx51_fmtx_func == ucontrol->value.integer.value[0]) -+ return 0; -+ -+ rx51_fmtx_func = ucontrol->value.integer.value[0]; -+ rx51_ext_control(codec); ++ struct if_ssi_channel *channel; ++ unsigned long *address; ++ int i, port; + -+ /* fmtx and tpa must not be enabled at the same time */ -+ if (rx51_fmtx_func && tpa6130_enable) -+ tpa6130a2_set_enabled(0); -+ if (!rx51_fmtx_func && tpa6130_enable) -+ tpa6130a2_set_enabled(1); ++ for (port = 0; port < SSI_MAX_PORTS; port++) { ++ if (if_ssi_char_driver.ch_mask[port]) ++ break; ++ } + -+ return 1; -+} ++ if (port == SSI_MAX_PORTS) ++ return -ENXIO; + -+static int rx51_get_input(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) -+{ -+ ucontrol->value.integer.value[0] = rx51_dmic_func; ++ address = &if_ssi_char_driver.ch_mask[port]; + ++ for (i = 0; i < SSI_MAX_CHAR_DEVS; i++) { ++ channel = &ssi_iface.channels[i]; ++ if (channel->opened) { ++ if_ssi_set_wakeline(i, 1); ++ if_ssi_closechannel(channel); ++ } ++ } ++ unregister_ssi_driver(&if_ssi_char_driver); + return 0; +} +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-if.h kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-if.h +--- kernel-2.6.28-20094102.6+0m5/drivers/misc/ssi-char/ssi-if.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/misc/ssi-char/ssi-if.h 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,70 @@ ++/* ++ * ssi-if.h ++ * ++ * Part of the SSI character driver. ++ * ++ * Copyright (C) 2009 Nokia Corporation. All rights reserved. ++ * ++ * Contact: Andras Domokos ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ + -+static int rx51_set_input(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) -+{ -+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + -+ if (rx51_dmic_func == ucontrol->value.integer.value[0]) -+ return 0; ++#ifndef _SSI_IF_H ++#define _SSI_IF_H + -+ rx51_dmic_func = ucontrol->value.integer.value[0]; -+ rx51_ext_control(codec); ++#define SSI_EV_MASK (0xffff << 0) ++#define SSI_EV_TYPE_MASK (0x0f << 16) ++#define SSI_EV_IN (0x01 << 16) ++#define SSI_EV_OUT (0x02 << 16) ++#define SSI_EV_EXCEP (0x03 << 16) ++#define SSI_EV_AVAIL (0x04 << 16) ++#define SSI_EV_TYPE(event) ((event) & SSI_EV_TYPE_MASK) + -+ return 1; -+} ++#define SSI_HWBREAK 1 ++#define SSI_ERROR 2 + -+static int rx51_get_ear(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) -+{ -+ ucontrol->value.integer.value[0] = rx51_ear_func; ++struct ssi_event { ++ unsigned int event; ++ u32 *data; ++ unsigned int count; ++}; + -+ return 0; -+} ++int if_ssi_init(unsigned int port, unsigned int *channels_map); ++int if_ssi_exit(void); + -+static int rx51_set_ear(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) -+{ -+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); ++int if_ssi_start(int ch); ++void if_ssi_stop(int ch); + -+ if (rx51_ear_func == ucontrol->value.integer.value[0]) -+ return 0; ++void if_ssi_send_break(int ch); ++void if_ssi_flush_rx(int ch); ++void if_ssi_flush_tx(int ch); ++void if_ssi_bootstrap(int ch); ++void if_ssi_set_wakeline(int ch, unsigned int state); ++void if_ssi_get_wakeline(int ch, unsigned int *state); ++int if_ssi_set_rx(int ch, struct ssi_rx_config *cfg); ++void if_ssi_get_rx(int ch, struct ssi_rx_config *cfg); ++int if_ssi_set_tx(int ch, struct ssi_tx_config *cfg); ++void if_ssi_get_tx(int ch, struct ssi_tx_config *cfg); + -+ rx51_ear_func = ucontrol->value.integer.value[0]; -+ rx51_ext_control(codec); ++int if_ssi_read(int ch, u32 *data, unsigned int count); ++int if_ssi_poll(int ch); ++int if_ssi_write(int ch, u32 *data, unsigned int count); + -+ return 1; -+} ++void if_ssi_cancel_read(int ch); ++void if_ssi_cancel_write(int ch); + -+static int rx51_ear_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) -+{ -+ if (SND_SOC_DAPM_EVENT_ON(event)) -+ aic34b_ear_enable(1); -+ else -+ aic34b_ear_enable(0); ++#endif /* _SSI_IF_H */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/mmc/card/block.c kernel-2.6.28-20094803.3+0m5/drivers/mmc/card/block.c +--- kernel-2.6.28-20094102.6+0m5/drivers/mmc/card/block.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/mmc/card/block.c 2011-09-04 11:37:54.000000000 +0200 +@@ -83,7 +83,14 @@ static void mmc_blk_put(struct mmc_blk_d + mutex_lock(&open_lock); + md->usage--; + if (md->usage == 0) { ++ int devmaj = MAJOR(disk_devt(md->disk)); + int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; + -+ return 0; -+} ++ if (!devmaj) ++ devidx = md->disk->first_minor >> MMC_SHIFT; + -+static int rx51_pre_spk_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) -+{ -+ if (!rx51_new_hw_audio) -+ return 0; ++ blk_cleanup_queue(md->queue.queue); + -+ if (SND_SOC_DAPM_EVENT_ON(event)) -+ return twl4030_enable_regulator(RES_VMMC2); + __clear_bit(devidx, dev_use); + + put_disk(md->disk); +@@ -312,6 +319,15 @@ static int mmc_blk_issue_rq(struct mmc_q + + mmc_wait_for_req(card->host, &brq.mrq); + ++ /* Give up early if the card has gone away */ ++ if (brq.cmd.error == -ENODEV || brq.data.error == -ENODEV || brq.stop.error == -ENODEV) { ++ req->cmd_flags |= REQ_QUIET; ++ spin_lock_irq(&md->lock); ++ ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); ++ spin_unlock_irq(&md->lock); ++ break; ++ } + -+ return 0; -+} + mmc_queue_bounce_post(mq); + + /* +@@ -363,6 +379,11 @@ static int mmc_blk_issue_rq(struct mmc_q + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + err = mmc_wait_for_cmd(card->host, &cmd, 5); ++ if (err == -ENODEV) { ++ /* Card was removed so quiet errors */ ++ req->cmd_flags |= REQ_QUIET; ++ goto cmd_err; ++ } + if (err) { + printk(KERN_ERR "%s: error %d requesting status\n", + req->rq_disk->disk_name, err); +@@ -583,8 +604,11 @@ static int mmc_blk_probe(struct mmc_card + return PTR_ERR(md); + + err = mmc_blk_set_blksize(md, card); +- if (err) +- goto out; ++ if (err) { ++ mmc_cleanup_queue(&md->queue); ++ mmc_blk_put(md); ++ return err; ++ } + + string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, + cap_str, sizeof(cap_str)); +@@ -595,11 +619,6 @@ static int mmc_blk_probe(struct mmc_card + mmc_set_drvdata(card, md); + add_disk(md->disk); + return 0; +- +- out: +- mmc_blk_put(md); +- +- return err; + } + + static void mmc_blk_remove(struct mmc_card *card) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/mmc/card/queue.c kernel-2.6.28-20094803.3+0m5/drivers/mmc/card/queue.c +--- kernel-2.6.28-20094102.6+0m5/drivers/mmc/card/queue.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/drivers/mmc/card/queue.c 2011-09-04 11:37:54.000000000 +0200 +@@ -91,9 +91,9 @@ static void mmc_request(struct request_q + int ret; + + if (!mq) { +- printk(KERN_ERR "MMC: killing requests for dead queue\n"); + while ((req = elv_next_request(q)) != NULL) { + do { ++ req->cmd_flags |= REQ_QUIET; + ret = __blk_end_request(req, -EIO, + blk_rq_cur_bytes(req)); + } while (ret); +@@ -228,17 +228,18 @@ void mmc_cleanup_queue(struct mmc_queue + struct request_queue *q = mq->queue; + unsigned long flags; + +- /* Mark that we should start throwing out stragglers */ +- spin_lock_irqsave(q->queue_lock, flags); +- q->queuedata = NULL; +- spin_unlock_irqrestore(q->queue_lock, flags); +- + /* Make sure the queue isn't suspended, as that will deadlock */ + mmc_queue_resume(mq); + + /* Then terminate our worker thread */ + kthread_stop(mq->thread); + ++ /* Empty the queue */ ++ spin_lock_irqsave(q->queue_lock, flags); ++ q->queuedata = NULL; ++ blk_start_queue(q); ++ spin_unlock_irqrestore(q->queue_lock, flags); + -+static int rx51_post_spk_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) + if (mq->bounce_sg) + kfree(mq->bounce_sg); + mq->bounce_sg = NULL; +@@ -250,8 +251,6 @@ void mmc_cleanup_queue(struct mmc_queue + kfree(mq->bounce_buf); + mq->bounce_buf = NULL; + +- blk_cleanup_queue(mq->queue); +- + mq->card = NULL; + } + EXPORT_SYMBOL(mmc_cleanup_queue); +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/mmc/host/omap_hsmmc.c kernel-2.6.28-20094803.3+0m5/drivers/mmc/host/omap_hsmmc.c +--- kernel-2.6.28-20094102.6+0m5/drivers/mmc/host/omap_hsmmc.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/mmc/host/omap_hsmmc.c 2011-09-04 11:37:54.000000000 +0200 +@@ -1058,9 +1058,9 @@ static void omap_hsmmc_request(struct mm + ; + host->reqs_blocked += 1; + } +- req->cmd->error = -EBADF; ++ req->cmd->error = -ENODEV; + if (req->data) +- req->data->error = -EBADF; ++ req->data->error = -ENODEV; + enable_irq(host->irq); + mmc_request_done(mmc, req); + return; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c 2011-09-04 11:37:54.000000000 +0200 +@@ -1040,6 +1040,34 @@ out: + return ret; + } + ++int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode, ++ u8 max_consecutive) +{ -+ if (!rx51_new_hw_audio) -+ return 0; -+ -+ if (!SND_SOC_DAPM_EVENT_ON(event)) -+ return twl4030_disable_regulator(RES_VMMC2); -+ -+ return 0; -+} ++ struct wl1251_acx_bet_enable *acx; ++ int ret; + -+static int rx51_pre_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) -+{ -+ if (!rx51_new_hw_audio) -+ return 0; ++ wl1251_debug(DEBUG_ACX, "acx bet enable"); + -+ if (SND_SOC_DAPM_EVENT_ON(event)) { -+ if (!pre_events) { -+ pre_events = 1; -+ return twl4030_enable_regulator(RES_VMMC2); -+ } ++ acx = kzalloc(sizeof(*acx), GFP_KERNEL); ++ if (!acx) { ++ ret = -ENOMEM; ++ goto out; + } + -+ return 0; -+} -+ -+static int rx51_post_event(struct snd_soc_dapm_widget *w, -+ struct snd_kcontrol *k, int event) -+{ -+ if (!rx51_new_hw_audio) -+ return 0; ++ acx->enable = mode; ++ acx->max_consecutive = max_consecutive; + -+ if (!SND_SOC_DAPM_EVENT_ON(event)) { -+ if (pre_events && !w->codec->active) { -+ pre_events = 0; -+ return twl4030_disable_regulator(RES_VMMC2); -+ } ++ ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx)); ++ if (ret < 0) { ++ wl1251_warning("wl1251 acx bet enable failed: %d", ret); ++ goto out; + } + -+ return 0; ++out: ++ kfree(acx); ++ return ret; +} + -+enum { -+ RX51_EXT_API_TPA6130, -+ RX51_EXT_API_AIC34B, + int wl1251_acx_ip_config(struct wl1251 *wl, bool enable, u8 *address, + u8 version) + { +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_acx.h kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_acx.h +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_acx.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_acx.h 2011-09-04 11:37:54.000000000 +0200 +@@ -1191,7 +1191,6 @@ struct wl1251_acx_mem_map { + u32 num_rx_mem_blocks; + } __attribute__ ((packed)); + +- + struct wl1251_acx_wr_tbtt_and_dtim { + + struct acx_header header; +@@ -1222,6 +1221,31 @@ struct wl1251_acx_arp_filter { + bytes of the the address are ignored.*/ + } __attribute__((packed)); + ++enum wl1251_acx_bet_mode { ++ WL1251_ACX_BET_DISABLE = 0, ++ WL1251_ACX_BET_ENABLE = 1, +}; -+#define SOC_RX51_EXT_SINGLE_TLV(xname, ext_api, max, tlv_array) \ -+{ \ -+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ -+ .name = xname, \ -+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ -+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ -+ .tlv.p = (tlv_array), \ -+ .info = rx51_ext_info_volsw, \ -+ .get = rx51_ext_get_volsw, \ -+ .put = rx51_ext_put_volsw, \ -+ .private_value = (ext_api) << 26 | (max) << 16, \ -+} + -+static int rx51_ext_info_volsw(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_info *uinfo) -+{ -+ int ext_api = (kcontrol->private_value >> 26) & 0x0f; -+ int max = (kcontrol->private_value >> 16) & 0xff; ++struct wl1251_acx_bet_enable { ++ struct acx_header header; + -+ if (ext_api == RX51_EXT_API_TPA6130) -+ if (hp_lim != max && hp_lim >= 2 && hp_lim <= 63) { -+ kcontrol->private_value &= ~(0xff << 16); -+ kcontrol->private_value |= (hp_lim << 16); -+ max = hp_lim; -+ } ++ /* ++ * Specifies if beacon early termination procedure is enabled or ++ * disabled, see enum wl1251_acx_bet_mode. ++ */ ++ u8 enable; + -+ if (max == 1) -+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; -+ else -+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; ++ /* ++ * Specifies the maximum number of consecutive beacons that may be ++ * early terminated. After this number is reached at least one full ++ * beacon must be correctly received in FW before beacon ET ++ * resumes. Range 0 - 255. ++ */ ++ u8 max_consecutive; + -+ uinfo->count = 1; -+ uinfo->value.integer.min = 0; -+ uinfo->value.integer.max = max; ++ u8 padding[2]; ++} __attribute__ ((packed)); + -+ return 0; -+} + /************************************************************************* + + Host Interrupt Register (WiLink -> Host) +@@ -1384,4 +1408,7 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl + int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); + int wl1251_acx_ip_config(struct wl1251 *wl, bool enable, u8 *address, + u8 version); ++int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode, ++ u8 max_consecutive); + -+static int rx51_ext_get_volsw(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) + #endif /* __WL1251_ACX_H__ */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_boot.c kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_boot.c +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_boot.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_boot.c 2011-09-04 11:37:54.000000000 +0200 +@@ -305,7 +305,7 @@ int wl1251_boot_run_firmware(struct wl12 + ROAMING_TRIGGER_LOW_RSSI_EVENT_ID | + ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID | + REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID | +- BT_PTA_PREDICTION_EVENT_ID; ++ BT_PTA_PREDICTION_EVENT_ID | PS_REPORT_EVENT_ID; + + ret = wl1251_event_unmask(wl); + if (ret < 0) { +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_event.c kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_event.c +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_event.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_event.c 2011-09-04 11:37:54.000000000 +0200 +@@ -31,10 +31,6 @@ + static int wl1251_event_scan_complete(struct wl1251 *wl, + struct event_mailbox *mbox) + { +- wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d", +- mbox->scheduled_scan_status, +- mbox->scheduled_scan_channels); +- + if (wl->scanning) { + mutex_unlock(&wl->mutex); + ieee80211_scan_completed(wl->hw); +@@ -46,11 +42,36 @@ static int wl1251_event_scan_complete(st + return 0; + } + +-static void wl1251_event_mbox_dump(struct event_mailbox *mbox) +-{ +- wl1251_debug(DEBUG_EVENT, "MBOX DUMP:"); +- wl1251_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector); +- wl1251_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); ++#define WL1251_PS_ENTRY_RETRIES 3 ++static int wl1251_event_ps_report(struct wl1251 *wl, ++ struct event_mailbox *mbox) +{ -+ int ext_api = (kcontrol->private_value >> 26) & 0x0f; -+ -+ switch (ext_api) { -+ case RX51_EXT_API_TPA6130: -+ if (tpa6130_volume < 0) -+ tpa6130_volume = tpa6130a2_get_volume(); -+ ucontrol->value.integer.value[0] = tpa6130_volume; -+ break; -+ case RX51_EXT_API_AIC34B: -+ ucontrol->value.integer.value[0] = aic34b_volume; -+ break; -+ default: -+ return -EINVAL; -+ } ++ int ret = 0; + -+ return 0; -+} ++ wl1251_debug(DEBUG_EVENT, "ps status: %x", mbox->ps_status); + -+static int rx51_ext_put_volsw(struct snd_kcontrol *kcontrol, -+ struct snd_ctl_elem_value *ucontrol) -+{ -+ int ext_api = (kcontrol->private_value >> 26) & 0x0f; -+ int change = 0; ++ switch (mbox->ps_status) { ++ case ENTER_POWER_SAVE_FAIL: ++ if (!wl->psm) { ++ wl->ps_entry_retry = 0; ++ break; ++ } + -+ switch (ext_api) { -+ case RX51_EXT_API_TPA6130: -+ change = (tpa6130_volume != ucontrol->value.integer.value[0]); -+ tpa6130_volume = ucontrol->value.integer.value[0]; -+ tpa6130a2_set_volume(tpa6130_volume); -+ break; -+ case RX51_EXT_API_AIC34B: -+ change = (aic34b_volume != ucontrol->value.integer.value[0]); -+ aic34b_volume = ucontrol->value.integer.value[0]; -+ aic34b_set_volume(aic34b_volume); ++ if (wl->ps_entry_retry < WL1251_PS_ENTRY_RETRIES) { ++ ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); ++ wl->ps_entry_retry++; ++ } else { ++ wl1251_error("Power save entry failed, giving up"); ++ wl->ps_entry_retry = 0; ++ } + break; ++ case ENTER_POWER_SAVE_SUCCESS: + default: -+ return -EINVAL; ++ wl->ps_entry_retry = 0; ++ break; + } + -+ return change; -+} ++ return 0; + } + + static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox) +@@ -58,10 +79,7 @@ static int wl1251_event_process(struct w + int ret; + u32 vector; + +- wl1251_event_mbox_dump(mbox); +- + vector = mbox->events_vector & ~(mbox->events_mask); +- wl1251_debug(DEBUG_EVENT, "vector: 0x%x", vector); + + if (vector & SCAN_COMPLETE_EVENT_ID) { + ret = wl1251_event_scan_complete(wl, mbox); +@@ -79,7 +97,14 @@ static int wl1251_event_process(struct w + } + } + +- if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID && wl->psm) { ++ if (vector & PS_REPORT_EVENT_ID) { ++ wl1251_debug(DEBUG_EVENT, "PS_REPORT_EVENT_ID"); ++ ret = wl1251_event_ps_report(wl, mbox); ++ if (ret < 0) ++ return ret; ++ } + -+static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = { -+ SND_SOC_DAPM_POST("Post event", rx51_post_event), -+ SND_SOC_DAPM_SPK("Post spk", rx51_post_spk_event), -+ SND_SOC_DAPM_SPK("Ext Spk", rx51_spk_event), -+ SND_SOC_DAPM_SPK("Headphone Jack", rx51_jack_hp_event), -+ SND_SOC_DAPM_MIC("Mic Jack", rx51_jack_mic_event), -+ SND_SOC_DAPM_OUTPUT("FM Transmitter"), -+ SND_SOC_DAPM_MIC("DMic", NULL), -+ SND_SOC_DAPM_SPK("Earphone", rx51_ear_event), -+ SND_SOC_DAPM_SPK("Pre spk", rx51_pre_spk_event), -+ SND_SOC_DAPM_PRE("Pre event", rx51_pre_event), ++ if (wl->vif && (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID)) { + wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); + /* need to unlock mutex to avoid deadlocking with rtnl */ + mutex_unlock(&wl->mutex); +@@ -97,11 +122,16 @@ static int wl1251_event_process(struct w + } + } + +- if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) ++ if (wl->vif && (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID)) { ++ wl1251_debug(DEBUG_EVENT, "ROAMING_TRIGGER_LOW_RSSI_EVENT"); + ieee80211_rssi_changed(wl->vif, IEEE80211_RSSI_STATE_LOW); ++ } + +- if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) ++ if (wl->vif && (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID)) { ++ wl1251_debug(DEBUG_EVENT, ++ "ROAMING_TRIGGER_REGAINED_RSSI_EVENT"); + ieee80211_rssi_changed(wl->vif, IEEE80211_RSSI_STATE_HIGH); ++ } + + return 0; + } +@@ -121,9 +151,6 @@ void wl1251_event_mbox_config(struct wl1 + { + wl->mbox_ptr[0] = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR); + wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); +- +- wl1251_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", +- wl->mbox_ptr[0], wl->mbox_ptr[1]); + } + + int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num) +@@ -131,8 +158,6 @@ int wl1251_event_handle(struct wl1251 *w + struct event_mailbox mbox; + int ret; + +- wl1251_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); +- + if (mbox_num > 1) + return -EINVAL; + +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_event.h kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_event.h +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_event.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_event.h 2011-09-04 11:37:54.000000000 +0200 +@@ -75,6 +75,13 @@ enum { + EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, + }; + ++enum { ++ ENTER_POWER_SAVE_FAIL = 0, ++ ENTER_POWER_SAVE_SUCCESS, ++ EXIT_POWER_SAVE_FAIL, ++ EXIT_POWER_SAVE_SUCCESS +}; + -+static const struct snd_soc_dapm_route audio_map[] = { -+ {"Post spk", NULL, "LLOUT"}, -+ {"Post spk", NULL, "RLOUT"}, -+ -+ {"Ext Spk", NULL, "HPLOUT"}, -+ {"Ext Spk", NULL, "HPROUT"}, -+ -+ {"Headphone Jack", NULL, "LLOUT"}, -+ {"Headphone Jack", NULL, "RLOUT"}, -+ {"LINE1L", NULL, "Mic Jack"}, + struct event_debug_report { + u8 debug_event_id; + u8 num_params; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251.h kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251.h +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251.h 2011-09-04 11:37:54.000000000 +0200 +@@ -356,7 +356,7 @@ struct wl1251 { + /* Are we currently scanning */ + bool scanning; + +- u32 last_event; ++ unsigned long last_event; + + /* Our association ID */ + u16 aid; +@@ -381,6 +381,8 @@ struct wl1251 { + /* PSM mode requested */ + bool psm_requested; + ++ u8 ps_entry_retry; + -+ {"FM Transmitter", NULL, "LLOUT"}, -+ {"FM Transmitter", NULL, "RLOUT"}, + u16 beacon_int; + u8 dtim_period; + +@@ -423,6 +425,8 @@ int wl1251_plt_stop(struct wl1251 *wl); + + #define WL1251_DEFAULT_CHANNEL 0 + ++#define WL1251_DEFAULT_BET_CONSECUTIVE 10 + -+ {"Earphone", NULL, "MONO_LOUT"}, + #define CHIP_ID_1251_PG10 (0x7010101) + #define CHIP_ID_1251_PG11 (0x7020101) + #define CHIP_ID_1251_PG12 (0x7030101) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_main.c kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_main.c +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_main.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_main.c 2011-09-04 11:37:54.000000000 +0200 +@@ -939,6 +939,7 @@ static void wl1251_op_stop(struct ieee80 + wl->next_tx_complete = 0; + wl->elp = false; + wl->psm = 0; ++ wl->ps_entry_retry = 0; + wl->tx_queue_stopped = false; + wl->power_level = WL1251_DEFAULT_POWER_LEVEL; + wl->channel = WL1251_DEFAULT_CHANNEL; +@@ -1092,6 +1093,7 @@ static int wl1251_op_config_interface(st + struct wl1251 *wl = hw->priv; + struct sk_buff *beacon; + DECLARE_MAC_BUF(mac); ++ bool do_join = false; + int ret; + + wl1251_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %s", +@@ -1105,23 +1107,21 @@ static int wl1251_op_config_interface(st + if (ret < 0) + goto out; + ++ if (!is_zero_ether_addr(conf->bssid)) ++ do_join = true; + -+ {"DMic Rate 64", NULL, "Mic Bias 2V"}, -+ {"Mic Bias 2V", NULL, "DMic"}, + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + +- ret = wl1251_build_null_data(wl); +- if (ret < 0) +- goto out_sleep; ++ if (do_join) { ++ ret = wl1251_build_null_data(wl); ++ if (ret < 0) ++ goto out_sleep; ++ } + + wl->ssid_len = conf->ssid_len; + if (wl->ssid_len) + memcpy(wl->ssid, conf->ssid, wl->ssid_len); + +- if (wl->bss_type != BSS_TYPE_IBSS) { +- ret = wl1251_join(wl, wl->bss_type, wl->channel, +- wl->beacon_int, wl->dtim_period); +- if (ret < 0) +- goto out_sleep; +- } +- + if (conf->changed & IEEE80211_IFCC_BEACON) { + beacon = ieee80211_beacon_get(hw, vif); + wl1251_update_support_rates((struct wl12xx_beacon_template *) +@@ -1142,7 +1142,9 @@ static int wl1251_op_config_interface(st + + if (ret < 0) + goto out_sleep; ++ } + ++ if (do_join) { + ret = wl1251_join(wl, wl->bss_type, wl->channel, + wl->beacon_int, wl->dtim_period); + if (ret < 0) +@@ -1177,18 +1179,7 @@ static int wl1251_op_config(struct ieee8 + if (ret < 0) + goto out; + +- if (channel != wl->channel) { +- wl->channel = channel; +- +- ret = wl1251_join(wl, wl->bss_type, wl->channel, +- wl->beacon_int, wl->dtim_period); +- if (ret < 0) +- goto out_sleep; +- } +- +- ret = wl1251_build_null_data(wl); +- if (ret < 0) +- goto out_sleep; ++ wl->channel = channel; + + if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { + wl1251_debug(DEBUG_PSM, "psm enabled"); +@@ -1215,7 +1206,7 @@ static int wl1251_op_config(struct ieee8 + if (conf->power_level != wl->power_level) { + ret = wl1251_acx_tx_power(wl, conf->power_level); + if (ret < 0) +- goto out; ++ goto out_sleep; + + wl->power_level = conf->power_level; + } +@@ -1939,6 +1930,7 @@ static int __devinit wl1251_probe(struct + wl->elp = false; + wl->psm = 0; + wl->psm_requested = false; ++ wl->ps_entry_retry = 0; + wl->tx_queue_stopped = false; + wl->power_level = WL1251_DEFAULT_POWER_LEVEL; + wl->beacon_int = WL1251_DEFAULT_BEACON_INT; +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_ps.c kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_ps.c +--- kernel-2.6.28-20094102.6+0m5/drivers/net/wireless/wl12xx/wl1251_ps.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/net/wireless/wl12xx/wl1251_ps.c 2011-09-04 11:37:54.000000000 +0200 +@@ -151,6 +151,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl + if (ret < 0) + return ret; + ++ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE, ++ WL1251_DEFAULT_BET_CONSECUTIVE); ++ if (ret < 0) ++ return ret; + -+ {"Pre spk", NULL, "LLOUT"}, -+ {"Pre spk", NULL, "RLOUT"}, -+}; + ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); + if (ret < 0) + return ret; +@@ -168,6 +173,12 @@ int wl1251_ps_set_mode(struct wl1251 *wl + if (ret < 0) + return ret; + ++ /* disable BET */ ++ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE, ++ WL1251_DEFAULT_BET_CONSECUTIVE); ++ if (ret < 0) ++ return ret; + -+static const char *spk_function[] = {"Off", "On"}; -+static const char *jack_function[] = {"Off", "Headphone", "Headset", -+ "Mic", "ECI Headset", "TV-OUT"}; -+static const char *fmtx_function[] = {"Off", "On"}; -+static const char *input_function[] = {"ADC", "Digital Mic"}; -+static const char *ear_function[] = {"Off", "On"}; + /* disable beacon filtering */ + ret = wl1251_acx_beacon_filter_opt(wl, false); + if (ret < 0) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/usb/gadget/file_storage.c kernel-2.6.28-20094803.3+0m5/drivers/usb/gadget/file_storage.c +--- kernel-2.6.28-20094102.6+0m5/drivers/usb/gadget/file_storage.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/usb/gadget/file_storage.c 2011-09-04 11:37:54.000000000 +0200 +@@ -606,6 +606,10 @@ enum fsg_buffer_state { + struct fsg_dev; + + struct fsg_buffhd { ++ struct rb_node rb_node; ++ sector_t sector; ++ int sectors; + -+static const struct soc_enum rx51_enum[] = { -+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function), -+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(jack_function), jack_function), -+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(fmtx_function), fmtx_function), -+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(input_function), input_function), -+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ear_function), ear_function), -+}; + void *buf; + size_t buflen; + enum fsg_buffer_state state; +@@ -694,6 +698,9 @@ struct fsg_dev { + struct fsg_buffhd buffhds[NUM_BUFFERS]; + int num_buffers; + ++ /* Tree to find direct I/O's with overlapping sectors */ ++ struct rb_root bio_tree; + + int thread_wakeup_needed; + struct completion thread_notifier; + struct task_struct *thread_task; +@@ -1055,6 +1062,58 @@ static struct usb_gadget_strings stringt + .strings = strings, + }; + +/* -+ * TPA6130 volume. From -59.5 to 4 dB with increasing step size when going -+ * down in gain. Justify scale so that it is quite correct from -20 dB and -+ * up. This setting shows -30 dB at minimum, -12.95 dB at 49 % (actual -+ * is -10.3 dB) and 4.65 dB at maximum (actual is 4 dB). ++ * Find overlapped bio in fsg->bio_tree rb tree. + */ -+static const unsigned int tpa6130_tlv[] = { -+ TLV_DB_RANGE_HEAD(10), -+ 0, 1, TLV_DB_SCALE_ITEM(-5950, 600, 0), -+ 2, 3, TLV_DB_SCALE_ITEM(-5000, 250, 0), -+ 4, 5, TLV_DB_SCALE_ITEM(-4550, 160, 0), -+ 6, 7, TLV_DB_SCALE_ITEM(-4140, 190, 0), -+ 8, 9, TLV_DB_SCALE_ITEM(-3650, 120, 0), -+ 10, 11, TLV_DB_SCALE_ITEM(-3330, 160, 0), -+ 12, 13, TLV_DB_SCALE_ITEM(-3040, 180, 0), -+ 14, 20, TLV_DB_SCALE_ITEM(-2710, 110, 0), -+ 21, 37, TLV_DB_SCALE_ITEM(-1960, 74, 0), -+ 38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0), -+}; ++static int fsg_rbtree_find(struct fsg_dev *fsg, sector_t s, ++ unsigned int sectors) ++{ ++ struct rb_node *n; ++ struct fsg_buffhd *tmp; ++ int found = 0; ++ ++ spin_lock_irq(&fsg->lock); ++ n = fsg->bio_tree.rb_node; ++ while (n) { ++ tmp = rb_entry(n, struct fsg_buffhd, rb_node); ++ if (s + sectors <= tmp->sector) ++ n = n->rb_left; ++ else if (s >= tmp->sector + tmp->sectors) ++ n = n->rb_right; ++ else { ++ found = 1; ++ break; ++ } ++ } ++ spin_unlock_irq(&fsg->lock); ++ return found; ++} + +/* -+ * TLV320AIC3x output stage volumes. From -78.3 to 0 dB. Muted below -78.3 dB. -+ * Step size is approximately 0.5 dB over most of the scale but increasing -+ * near the very low levels. -+ * Define dB scale so that it is mostly correct for range about -55 to 0 dB -+ * but having increasing dB difference below that (and where it doesn't count -+ * so much). This setting shows -50 dB (actual is -50.3 dB) for register -+ * value 100 and -58.5 dB (actual is -78.3 dB) for register value 117. ++ * Insert a node into the fsg->bio_tree rb tree. + */ -+static DECLARE_TLV_DB_SCALE(aic3x_output_stage_tlv, -5900, 50, 1); -+ -+static const struct snd_kcontrol_new aic34_rx51_controls[] = { -+ SOC_ENUM_EXT("Speaker Function", rx51_enum[0], -+ rx51_get_spk, rx51_set_spk), -+ SOC_ENUM_EXT("Jack Function", rx51_enum[1], -+ rx51_get_jack, rx51_set_jack), -+ SOC_ENUM_EXT("FMTX Function", rx51_enum[2], -+ rx51_get_fmtx, rx51_set_fmtx), -+ SOC_ENUM_EXT("Input Select", rx51_enum[3], -+ rx51_get_input, rx51_set_input), -+ SOC_ENUM_EXT("Earphone Function", rx51_enum[4], -+ rx51_get_ear, rx51_set_ear), -+ SOC_RX51_EXT_SINGLE_TLV("Headphone Playback Volume", -+ RX51_EXT_API_TPA6130, 63, -+ tpa6130_tlv), -+ SOC_RX51_EXT_SINGLE_TLV("Earphone Playback Volume", -+ RX51_EXT_API_AIC34B, 118, -+ aic3x_output_stage_tlv), -+}; -+ -+static int rx51_aic34_init(struct snd_soc_codec *codec) ++static void fsg_rbtree_insert(struct fsg_dev *fsg, struct fsg_buffhd *node) +{ -+ int i, err; -+ -+ /* set up NC codec pins */ -+ snd_soc_dapm_nc_pin(codec, "MIC3L"); -+ snd_soc_dapm_nc_pin(codec, "MIC3R"); -+ snd_soc_dapm_nc_pin(codec, "LINE1R"); ++ struct rb_node **p; ++ struct rb_node *parent = NULL; ++ struct fsg_buffhd *tmp; + -+ /* Create jack for accessory reporting */ -+ err = snd_jack_new(codec->card, "Jack", SND_JACK_MECHANICAL | -+ SND_JACK_HEADSET | SND_JACK_AVOUT, &rx51_jack); -+ if (err < 0) -+ return err; ++ spin_lock_irq(&fsg->lock); ++ p = &fsg->bio_tree.rb_node; + -+ /* Add RX51 specific controls */ -+ for (i = 0; i < ARRAY_SIZE(aic34_rx51_controls); i++) { -+ err = snd_ctl_add(codec->card, -+ snd_soc_cnew(&aic34_rx51_controls[i], codec, NULL)); -+ if (err < 0) -+ return err; ++ while (*p) { ++ parent = *p; ++ tmp = rb_entry(parent, struct fsg_buffhd, rb_node); ++ if (node->sector < tmp->sector) ++ p = &(*p)->rb_left; ++ else ++ p = &(*p)->rb_right; + } -+ -+ /* Add RX51 specific widgets */ -+ snd_soc_dapm_new_controls(codec, aic34_dapm_widgets, -+ ARRAY_SIZE(aic34_dapm_widgets)); -+ -+ /* Set up RX51 specific audio path audio_map */ -+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); -+ -+ snd_soc_dapm_enable_pin(codec, "Earphone"); -+ -+ snd_soc_dapm_sync(codec); -+ -+ return 0; ++ rb_link_node(&node->rb_node, parent, p); ++ rb_insert_color(&node->rb_node, &fsg->bio_tree); ++ spin_unlock_irq(&fsg->lock); +} + -+/* Since all codec control is done by Bluetooth hardware -+ only some constrains need to be set for it */ -+struct snd_soc_dai btcodec_dai = { -+ .name = "Bluetooth codec", -+ .playback = { -+ .stream_name = "BT Playback", -+ .channels_min = 1, -+ .channels_max = 1, -+ .rates = SNDRV_PCM_RATE_8000, -+ .formats = SNDRV_PCM_FMTBIT_S16_LE,}, -+ .capture = { -+ .stream_name = "BT Capture", -+ .channels_min = 1, -+ .channels_max = 1, -+ .rates = SNDRV_PCM_RATE_8000, -+ .formats = SNDRV_PCM_FMTBIT_S16_LE,}, -+}; -+ -+/* Digital audio interface glue - connects codec <--> CPU */ -+static struct snd_soc_dai_link rx51_dai[] = { -+ { -+ .name = "TLV320AIC34", -+ .stream_name = "AIC34", -+ .cpu_dai = &omap_mcbsp_dai[0], -+ .codec_dai = &aic3x_dai, -+ .init = rx51_aic34_init, -+ .ops = &rx51_ops, -+ }, { -+ .name = "Bluetooth PCM", -+ .stream_name = "Bluetooth", -+ .cpu_dai = &omap_mcbsp_dai[1], -+ .codec_dai = &btcodec_dai, -+ .ops = &rx51_bt_ops, + /** UGLY UGLY HACK: Windows problems with multiple + * configurations. + * +@@ -1721,7 +1780,8 @@ static void direct_read_end_io(struct bi + * or b) more than one bio must be submitted + */ + /* FIXME: Needs an equivalent of readahead */ +-static ssize_t direct_read(struct file *file, char *buf, size_t amount, loff_t *pos) ++static ssize_t direct_read(struct file *file, struct fsg_buffhd *bh, ++ size_t amount, loff_t *pos) + { + DECLARE_COMPLETION_ONSTACK(wait); + unsigned max_pages = (amount >> PAGE_SHIFT) + 1; +@@ -1729,7 +1789,8 @@ static ssize_t direct_read(struct file * + ssize_t totlen = 0; + struct page *page; + struct bio *bio; +- char *p = buf; ++ char *p = bh->buf; ++ int rc; + + if (!amount) + return 0; +@@ -1767,6 +1828,15 @@ static ssize_t direct_read(struct file * + return -EINVAL; + } + ++ while (fsg_rbtree_find(bh->fsg, bio->bi_sector, ++ bio_sectors(bio))) { ++ rc = sleep_thread(bh->fsg); ++ if (rc) { ++ bio_put(bio); ++ return rc; ++ } + } -+}; -+ -+/* Audio machine driver */ -+static struct snd_soc_machine snd_soc_machine_rx51 = { -+ .name = "RX51", -+ .dai_link = rx51_dai, -+ .num_links = ARRAY_SIZE(rx51_dai), -+}; -+ -+/* Audio private data */ -+static struct aic3x_setup_data rx51_aic34_setup = { -+ .i2c_bus = 2, -+ .i2c_address = 0x18, -+ .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED, -+ .gpio_func[1] = AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT, -+}; -+ -+/* Audio subsystem */ -+static struct snd_soc_device rx51_snd_devdata = { -+ .machine = &snd_soc_machine_rx51, -+ .platform = &omap_soc_platform, -+ .codec_dev = &soc_codec_dev_aic3x, -+ .codec_data = &rx51_aic34_setup, -+}; -+ -+static struct platform_device *rx51_snd_device; -+ -+#define REMAP_OFFSET 2 -+#define DEDICATED_OFFSET 3 -+#define VMMC2_DEV_GRP 0x2B -+#define VMMC2_285V 0x0a -+ -+static int __init rx51_soc_init(void) -+{ -+ int err; -+ struct device *dev; + -+ if (!machine_is_nokia_rx51()) -+ return -ENODEV; -+ -+ if ((system_rev >= 0x08 && system_rev <= 0x13) || /* Macros */ -+ system_rev >= 0x1901) { -+ rx51_new_hw_audio = 1; -+ err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, -+ VMMC2_285V, -+ VMMC2_DEV_GRP + DEDICATED_OFFSET); -+ err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0xee, -+ VMMC2_DEV_GRP + REMAP_OFFSET); -+ if (err) { -+ printk(KERN_ERR "%s rx51 audio failed!\n", __func__); -+ return -ENODEV; + submit_bio(READ, bio); + + wait_for_completion(&wait); +@@ -1865,7 +1935,7 @@ static int do_read(struct fsg_dev *fsg) + /* Perform the read */ + file_offset_tmp = file_offset; + if (curlun->direct) +- nread = direct_read(curlun->filp, bh->buf, ++ nread = direct_read(curlun->filp, bh, + amount, &file_offset_tmp); + else + nread = vfs_read(curlun->filp, +@@ -1920,6 +1990,7 @@ static void direct_write_end_io(struct b + { + struct fsg_buffhd *bh = bio->bi_private; + struct fsg_dev *fsg = bh->fsg; ++ unsigned long flags; + + if (err) { + /* FIXME: how to let host know about this error */ +@@ -1927,11 +1998,13 @@ static void direct_write_end_io(struct b + clear_bit(BIO_UPTODATE, &bio->bi_flags); + } + ++ /* FIXME: smp barriers are not necessary for this this driver */ + smp_wmb(); +- spin_lock(&fsg->lock); ++ spin_lock_irqsave(&fsg->lock, flags); ++ rb_erase(&bh->rb_node, &fsg->bio_tree); + bh->state = BUF_STATE_EMPTY; + wakeup_thread(fsg); +- spin_unlock(&fsg->lock); ++ spin_unlock_irqrestore(&fsg->lock, flags); + + bio_put(bio); + } +@@ -1949,6 +2022,7 @@ static ssize_t direct_write(struct file + struct page *page; + struct bio *bio; + char *p = bh->buf; ++ int rc; + + if (!amount) + return 0; +@@ -1987,6 +2061,16 @@ static ssize_t direct_write(struct file + } + + bh->state = BUF_STATE_BUSY; ++ bh->sector = bio->bi_sector; ++ bh->sectors = bio_sectors(bio); ++ while (fsg_rbtree_find(bh->fsg, bh->sector, bh->sectors)) { ++ rc = sleep_thread(bh->fsg); ++ if (rc) { ++ bio_put(bio); ++ return rc; + } + } ++ fsg_rbtree_insert(bh->fsg, bh); + + submit_bio(WRITE, bio); + +@@ -4456,6 +4540,7 @@ static int __init fsg_bind(struct usb_ga + bh->next = bh + 1; + } + fsg->buffhds[fsg->num_buffers - 1].next = &fsg->buffhds[0]; ++ fsg->bio_tree = RB_ROOT; + + snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", + init_utsname()->sysname, init_utsname()->release, +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/usb/musb/musb_core.c kernel-2.6.28-20094803.3+0m5/drivers/usb/musb/musb_core.c +--- kernel-2.6.28-20094102.6+0m5/drivers/usb/musb/musb_core.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/usb/musb/musb_core.c 2011-09-04 11:37:54.000000000 +0200 +@@ -235,6 +235,19 @@ static int musb_charger_detect(struct mu + /* we always reset transceiver */ + check_charger = 1; + ++ /* HACK: ULPI tends to get stuck when booting with ++ * the cable connected ++ */ ++ r = musb_readb(musb->mregs, MUSB_DEVCTL); ++ if ((r & MUSB_DEVCTL_VBUS) ++ == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { ++ musb_save_ctx_and_suspend(&musb->g, 0); ++ musb_restore_ctx_and_resume(&musb->g); ++ if (musb->board && musb->board->set_pm_limits) ++ musb->board->set_pm_limits( ++ musb->controller, 1); ++ } + -+ if (gpio_request(RX51_CODEC_RESET_GPIO, NULL) < 0) -+ BUG(); -+ if (gpio_request(RX51_TVOUT_SEL_GPIO, "tvout_sel") < 0) -+ BUG(); -+ if (gpio_request(RX51_ECI_SWITCH_1_GPIO, "ECI switch 1") < 0) -+ BUG(); -+ if (gpio_request(RX51_ECI_SWITCH_2_GPIO, "ECI switch 2") < 0) -+ BUG(); -+ gpio_direction_output(RX51_CODEC_RESET_GPIO, 0); -+ gpio_direction_output(RX51_TVOUT_SEL_GPIO, 0); -+ gpio_direction_output(RX51_ECI_SWITCH_1_GPIO, 0); -+ gpio_direction_output(RX51_ECI_SWITCH_2_GPIO, 1); + /* disable RESET and RESUME interrupts */ + r = musb_readb(musb->mregs, MUSB_INTRUSBE); + r &= ~(MUSB_INTR_RESUME | MUSB_INTR_RESET); +@@ -2113,19 +2126,8 @@ static void musb_irq_work(struct work_st + { + struct musb *musb = container_of(data, struct musb, irq_work); + static int old_state, old_ma, old_suspend; +- u8 devctl; + + if (musb->xceiv->state != old_state) { +- devctl = musb_readb(musb->mregs, MUSB_DEVCTL); +- if (musb->xceiv->state == OTG_STATE_B_IDLE && +- (devctl & MUSB_DEVCTL_VBUS)) +- goto keep_limit; +- +- /* clear/set requirements for musb to work with DPS on omap3 */ +- if (musb->board && musb->board->set_pm_limits) +- musb->board->set_pm_limits(musb->controller, +- (musb->xceiv->state == OTG_STATE_B_PERIPHERAL)); +-keep_limit: + old_state = musb->xceiv->state; + sysfs_notify(&musb->controller->kobj, NULL, "mode"); + } +@@ -2457,6 +2459,12 @@ bad_config: + if (status) + goto fail2; + ++ /* Resets the controller. Has to be done. Without this, most likely ++ * the state machine inside the transceiver doesn't get fixed properly ++ */ ++ musb_save_ctx_and_suspend(&musb->g, 0); ++ musb_restore_ctx_and_resume(&musb->g); + -+ gpio_set_value(RX51_CODEC_RESET_GPIO, 0); -+ udelay(1); -+ gpio_set_value(RX51_CODEC_RESET_GPIO, 1); -+ msleep(1); + return 0; + + fail2: +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/usb/musb/musb_core.h kernel-2.6.28-20094803.3+0m5/drivers/usb/musb/musb_core.h +--- kernel-2.6.28-20094102.6+0m5/drivers/usb/musb/musb_core.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/usb/musb/musb_core.h 2011-09-04 11:37:54.000000000 +0200 +@@ -360,6 +360,7 @@ struct musb { + struct clk *clock; + irqreturn_t (*isr)(int, void *); + struct work_struct irq_work; ++ struct work_struct vbus_work; + + /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ + #define MUSB_PORT_STAT_RESUME (1 << 31) +diff -Nurp kernel-2.6.28-20094102.6+0m5/drivers/usb/musb/omap2430.c kernel-2.6.28-20094803.3+0m5/drivers/usb/musb/omap2430.c +--- kernel-2.6.28-20094102.6+0m5/drivers/usb/musb/omap2430.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/drivers/usb/musb/omap2430.c 2011-09-04 11:37:54.000000000 +0200 +@@ -49,6 +49,17 @@ + + static struct timer_list musb_idle_timer; + ++static void musb_vbus_work(struct work_struct *data) ++{ ++ struct musb *musb = container_of(data, struct musb, vbus_work); ++ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + -+ if (gpio_request(RX51_SPEAKER_AMP_TWL_GPIO, NULL) < 0) -+ BUG(); -+ gpio_direction_output(RX51_SPEAKER_AMP_TWL_GPIO, 0); ++ /* clear/set requirements for musb to work with DPS on omap3 */ ++ if (musb->board && musb->board->set_pm_limits && !musb->is_charger) ++ musb->board->set_pm_limits(musb->controller, ++ (devctl & MUSB_DEVCTL_VBUS)); ++} + -+ rx51_snd_device = platform_device_alloc("soc-audio", -1); -+ if (!rx51_snd_device) -+ return -ENOMEM; + static void musb_do_idle(unsigned long _musb) + { + struct musb *musb = (void *)_musb; +@@ -276,6 +287,7 @@ int __init musb_platform_init(struct mus + musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON; + + setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); ++ INIT_WORK(&musb->vbus_work, musb_vbus_work); + + return 0; + } +@@ -448,6 +460,9 @@ void musb_restore_ctx_and_resume(struct + + /* Restore register context */ + musb_restore_ctx(musb); + -+ platform_set_drvdata(rx51_snd_device, &rx51_snd_devdata); -+ rx51_snd_devdata.dev = &rx51_snd_device->dev; -+ err = platform_device_add(rx51_snd_device); -+ if (err) -+ goto err1; ++ /* set constraints */ ++ schedule_work(&musb->vbus_work); + spin_unlock_irqrestore(&musb->lock, flags); + } + EXPORT_SYMBOL_GPL(musb_restore_ctx_and_resume); +diff -Nurp kernel-2.6.28-20094102.6+0m5/include/linux/ssi_driver_if.h kernel-2.6.28-20094803.3+0m5/include/linux/ssi_driver_if.h +--- kernel-2.6.28-20094102.6+0m5/include/linux/ssi_driver_if.h 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/include/linux/ssi_driver_if.h 2011-09-04 11:37:54.000000000 +0200 +@@ -31,6 +31,10 @@ + /* The number of ports handled by the driver. (MAX:2) */ + #define SSI_MAX_PORTS 1 + ++#define SSI_MAX_FRAME_SIZE 0x1f ++#define SSI_MAX_RX_TIMEOUT 0x1ff ++#define SSI_MAX_TX_DIVISOR 0x7f + -+ dev = &rx51_snd_device->dev; + #define ANY_SSI_CONTROLLER -1 + #define ANY_CHANNEL -1 + #define CHANNEL(channel) (1 << (channel)) +@@ -42,6 +46,7 @@ enum { + SSI_EVENT_POST_SPEED_CHANGE, + SSI_EVENT_CAWAKE_UP, + SSI_EVENT_CAWAKE_DOWN, ++ SSI_EVENT_SSR_DATAAVAILABLE, + }; + + enum { +@@ -52,6 +57,10 @@ enum { + SSI_IOCTL_FLUSH_RX, + SSI_IOCTL_FLUSH_TX, + SSI_IOCTL_CAWAKE, ++ SSI_IOCTL_SET_RX, ++ SSI_IOCTL_GET_RX, ++ SSI_IOCTL_SET_TX, ++ SSI_IOCTL_GET_TX, + SSI_IOCTL_TX_CH_FULL, + SSI_IOCTL_CH_DATAACCEPT, + }; +@@ -142,6 +151,7 @@ int ssi_write(struct ssi_device *dev, u3 + void ssi_write_cancel(struct ssi_device *dev); + int ssi_read(struct ssi_device *dev, u32 *data, unsigned int w_count); + void ssi_read_cancel(struct ssi_device *dev); ++int ssi_poll(struct ssi_device *dev); + int ssi_ioctl(struct ssi_device *dev, unsigned int command, void *arg); + void ssi_close(struct ssi_device *dev); + void ssi_set_read_cb(struct ssi_device *dev, +diff -Nurp kernel-2.6.28-20094102.6+0m5/include/linux/ssi_char.h kernel-2.6.28-20094803.3+0m5/include/linux/ssi_char.h +--- kernel-2.6.28-20094102.6+0m5/include/linux/ssi_char.h 1970-01-01 01:00:00.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/include/linux/ssi_char.h 2011-09-04 11:37:54.000000000 +0200 +@@ -0,0 +1,71 @@ ++/* ++ * ssi_char.h ++ * ++ * Part of the SSI character device driver. ++ * ++ * Copyright (C) 2009 Nokia Corporation. All rights reserved. ++ * ++ * Contact: Andras Domokos ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ + -+ *(unsigned int *)rx51_dai[0].cpu_dai->private_data = 1; -+ *(unsigned int *)rx51_dai[1].cpu_dai->private_data = 2; + -+ err = device_create_file(dev, &dev_attr_eci_mode); -+ if (err) -+ goto err2; ++#ifndef SSI_CHAR_H ++#define SSI_CHAR_H + -+ return err; -+err2: -+ platform_device_del(rx51_snd_device); -+err1: -+ platform_device_put(rx51_snd_device); ++#define SSI_CHAR_BASE 'S' ++#define CS_IOW(num, dtype) _IOW(SSI_CHAR_BASE, num, dtype) ++#define CS_IOR(num, dtype) _IOR(SSI_CHAR_BASE, num, dtype) ++#define CS_IOWR(num, dtype) _IOWR(SSI_CHAR_BASE, num, dtype) ++#define CS_IO(num) _IO(SSI_CHAR_BASE, num) + -+ return err; ++#define CS_SEND_BREAK CS_IO(1) ++#define CS_FLUSH_RX CS_IO(2) ++#define CS_FLUSH_TX CS_IO(3) ++#define CS_BOOTSTRAP CS_IO(4) ++#define CS_SET_WAKELINE CS_IOW(5, unsigned int) ++#define CS_GET_WAKELINE CS_IOR(6, unsigned int) ++#define CS_SET_RX CS_IOW(7, struct ssi_rx_config) ++#define CS_GET_RX CS_IOW(8, struct ssi_rx_config) ++#define CS_SET_TX CS_IOW(9, struct ssi_tx_config) ++#define CS_GET_TX CS_IOW(10, struct ssi_tx_config) + -+} ++#define SSI_MODE_SLEEP 0 ++#define SSI_MODE_STREAM 1 ++#define SSI_MODE_FRAME 2 + -+static void __exit rx51_soc_exit(void) -+{ -+ platform_device_unregister(rx51_snd_device); -+} ++#define SSI_ARBMODE_RR 0 ++#define SSI_ARBMODE_PRIO 1 + -+module_init(rx51_soc_init); -+module_exit(rx51_soc_exit); ++#define WAKE_UP 0 ++#define WAKE_DOWN 1 + -+MODULE_AUTHOR("Jarkko Nikula "); -+MODULE_DESCRIPTION("ALSA SoC Nokia RX51"); -+MODULE_LICENSE("GPL"); -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/omap/rx51.h linux-omap-2.6.28-nokia1/sound/soc/omap/rx51.h ---- linux-omap-2.6.28-omap1/sound/soc/omap/rx51.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-omap-2.6.28-nokia1/sound/soc/omap/rx51.h 2011-06-22 13:19:33.293063268 +0200 -@@ -0,0 +1,27 @@ -+#ifndef _RX51_H_ -+#define _RX51_H_ ++struct ssi_tx_config { ++ u32 mode; ++ u32 frame_size; ++ u32 channels; ++ u32 divisor; ++ u32 arb_mode; ++}; + -+/* -+ * rx51.h - SoC audio for Nokia RX51 -+ * -+ * Copyright (C) 2008 Nokia Corporation -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ ++struct ssi_rx_config { ++ u32 mode; ++ u32 frame_size; ++ u32 channels; ++ u32 timeout; ++}; + -+int rx51_set_eci_mode(int mode); -+void rx51_jack_report(int status); -+int allow_button_press(void); ++#endif /* SSI_CHAR_H */ +diff -Nurp kernel-2.6.28-20094102.6+0m5/lib/Kconfig.debug kernel-2.6.28-20094803.3+0m5/lib/Kconfig.debug +--- kernel-2.6.28-20094102.6+0m5/lib/Kconfig.debug 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/lib/Kconfig.debug 2011-09-04 11:37:54.000000000 +0200 +@@ -731,6 +731,12 @@ config FAULT_INJECTION_DEBUG_FS + help + Enable configuration of fault-injection capabilities via debugfs. + ++config PANIC_INFO_BUFF ++ tristate "Buffer to be printed at panic" ++ depends on DEBUG_FS ++ help ++ Provide a small buffer which will be printed at panic. + -+#endif /* _RX51_H_ */ -diff -Nurp linux-omap-2.6.28-omap1/sound/soc/soc-core.c linux-omap-2.6.28-nokia1/sound/soc/soc-core.c ---- linux-omap-2.6.28-omap1/sound/soc/soc-core.c 2011-06-22 13:14:27.273067620 +0200 -+++ linux-omap-2.6.28-nokia1/sound/soc/soc-core.c 2011-06-22 13:19:33.293063268 +0200 -@@ -270,13 +270,14 @@ static void close_delayed_work(struct wo - { - struct snd_soc_device *socdev = - container_of(work, struct snd_soc_device, delayed_work.work); -+ struct snd_soc_machine *machine = socdev->machine; - struct snd_soc_codec *codec = socdev->codec; - struct snd_soc_dai *codec_dai; - int i; + config FAULT_INJECTION_STACKTRACE_FILTER + bool "stacktrace filter for fault-injection capabilities" + depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT +diff -Nurp kernel-2.6.28-20094102.6+0m5/mm/memory.c kernel-2.6.28-20094803.3+0m5/mm/memory.c +--- kernel-2.6.28-20094102.6+0m5/mm/memory.c 2008-12-25 00:26:37.000000000 +0100 ++++ kernel-2.6.28-20094803.3+0m5/mm/memory.c 2011-09-04 11:37:54.000000000 +0200 +@@ -1110,6 +1110,7 @@ no_page_table: + } + return page; + } ++EXPORT_SYMBOL_GPL(follow_page); + + /* Can we do the FOLL_ANON optimization? */ + static inline int use_zero_page(struct vm_area_struct *vma) +diff -Nurp kernel-2.6.28-20094102.6+0m5/mm/swapfile.c kernel-2.6.28-20094803.3+0m5/mm/swapfile.c +--- kernel-2.6.28-20094102.6+0m5/mm/swapfile.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/mm/swapfile.c 2011-09-04 11:37:54.000000000 +0200 +@@ -1103,9 +1103,14 @@ sector_t map_swap_page(struct swap_info_ + /* Update the free pages gap */ + sis->gap_next += 1; + } else { +- /* Always read from the existing re-mapping */ +- BUG_ON(!old); +- offset = old; ++ /* ++ * Always read from the existing re-mapping ++ * if there is one. There may not be because ++ * 'swapin_readahead()' has won a race with ++ * 'add_to_swap()'. ++ */ ++ if (old) ++ offset = old; + } + spin_unlock(&sis->remap_lock); - mutex_lock(&pcm_mutex); -- for (i = 0; i < codec->num_dai; i++) { -- codec_dai = &codec->dai[i]; -+ for (i = 0; i < machine->num_links; i++) { -+ codec_dai = machine->dai_link[i].codec_dai; +diff -Nurp kernel-2.6.28-20094102.6+0m5/net/bluetooth/hci_conn.c kernel-2.6.28-20094803.3+0m5/net/bluetooth/hci_conn.c +--- kernel-2.6.28-20094102.6+0m5/net/bluetooth/hci_conn.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/net/bluetooth/hci_conn.c 2011-09-04 11:37:54.000000000 +0200 +@@ -211,6 +211,7 @@ struct hci_conn *hci_conn_add(struct hci + conn->type = type; + conn->mode = HCI_CM_ACTIVE; + conn->state = BT_OPEN; ++ conn->auth_type = HCI_AT_GENERAL_BONDING; - dbg("pop wq checking: %s status: %s waiting: %s\n", - codec_dai->playback.stream_name, -@@ -428,51 +429,42 @@ static int soc_pcm_prepare(struct snd_pc - } + conn->power_save = 1; + conn->disc_timeout = HCI_DISCONN_TIMEOUT; +diff -Nurp kernel-2.6.28-20094102.6+0m5/net/mac80211/mlme.c kernel-2.6.28-20094803.3+0m5/net/mac80211/mlme.c +--- kernel-2.6.28-20094102.6+0m5/net/mac80211/mlme.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/net/mac80211/mlme.c 2011-09-04 11:37:54.000000000 +0200 +@@ -34,7 +34,6 @@ + #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) + #define IEEE80211_ASSOC_MAX_TRIES 3 + #define IEEE80211_MONITORING_INTERVAL (2 * HZ) +-#define IEEE80211_PROBE_IDLE_TIME (60 * HZ) + #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) + #define IEEE80211_SCAN_INTERVAL (2 * HZ) + #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) +@@ -784,6 +783,7 @@ static void ieee80211_direct_probe(struc + printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n", + sdata->dev->name, print_mac(mac, ifsta->bssid)); + ifsta->state = IEEE80211_STA_MLME_DISABLED; ++ ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; + ieee80211_sta_send_apinfo(sdata, ifsta); + return; } +@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL(ieee80211_rssi_changed); + void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr) + { ++ struct ieee80211_local *local = sdata->local; ++ + /* + * We can postpone the sta.timer whenever receiving unicast frames + * from AP because we know that the connection is working both ways +@@ -1033,7 +1035,8 @@ void ieee80211_sta_rx_notify(struct ieee + * data idle periods for sending the periodical probe request to + * the AP. + */ +- if (!is_multicast_ether_addr(hdr->addr1)) ++ if (!is_multicast_ether_addr(hdr->addr1) && ++ !(local->hw.flags & IEEE80211_HW_BEACON_FILTER)) + mod_timer(&sdata->u.sta.timer, + jiffies + IEEE80211_MONITORING_INTERVAL); + } +@@ -1046,6 +1049,12 @@ void ieee80211_beacon_loss_work(struct w + struct ieee80211_if_sta *ifsta = &sdata->u.sta; + struct ieee80211_local *local = sdata->local; -- /* we only want to start a DAPM playback stream if we are not waiting -- * on an existing one stopping */ -- if (codec_dai->pop_wait) { -- /* we are waiting for the delayed work to start */ -- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) -- snd_soc_dapm_stream_event(socdev->codec, -- codec_dai->capture.stream_name, -- SND_SOC_DAPM_STREAM_START); -- else { -- codec_dai->pop_wait = 0; -- cancel_delayed_work(&socdev->delayed_work); -- snd_soc_dai_digital_mute(codec_dai, 0); -- } -- } else { -- /* no delayed work - do we need to power up codec */ -- if (codec->bias_level != SND_SOC_BIAS_ON) { -+ /* cancel any delayed stream shutdown that is pending */ -+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && -+ codec_dai->pop_wait) { -+ codec_dai->pop_wait = 0; -+ cancel_delayed_work(&socdev->delayed_work); ++ if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATED) { ++ printk(KERN_DEBUG "%s reports beacon loss when not " ++ "associated\n", sdata->dev->name); ++ return; + } ++ + printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " + "- sending probe request\n", sdata->dev->name, + sdata->u.sta.bssid); +@@ -1112,8 +1121,7 @@ static void ieee80211_associated(struct + * Beacon filtering is only enabled with power save and then the + * stack should not check for beacon loss. + */ +- if (!((local->hw.flags & IEEE80211_HW_BEACON_FILTER) && +- (local->hw.conf.flags & IEEE80211_CONF_PS)) && ++ if (!(local->hw.flags & IEEE80211_HW_BEACON_FILTER) && + time_after(jiffies, + ifsta->last_beacon + IEEE80211_MONITORING_INTERVAL)) { + printk(KERN_DEBUG "%s: beacon loss from AP %pM " +@@ -1125,11 +1133,6 @@ static void ieee80211_associated(struct -- snd_soc_dapm_set_bias_level(socdev, -- SND_SOC_BIAS_PREPARE); -+ /* do we need to power up codec */ -+ if (codec->bias_level != SND_SOC_BIAS_ON) { -+ snd_soc_dapm_set_bias_level(socdev, -+ SND_SOC_BIAS_PREPARE); + } -- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -- snd_soc_dapm_stream_event(codec, -+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -+ snd_soc_dapm_stream_event(codec, - codec_dai->playback.stream_name, - SND_SOC_DAPM_STREAM_START); -- else -- snd_soc_dapm_stream_event(codec, -+ else -+ snd_soc_dapm_stream_event(codec, - codec_dai->capture.stream_name, - SND_SOC_DAPM_STREAM_START); +- if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) { +- ifsta->flags |= IEEE80211_STA_PROBEREQ_POLL; +- send_probe = true; +- } +- + unlock: + rcu_read_unlock(); + +@@ -1148,7 +1151,7 @@ static void ieee80211_associated(struct + if (disassoc) + ieee80211_set_disassoc(sdata, ifsta, true, true, + WLAN_REASON_PREV_AUTH_NOT_VALID); +- else ++ else if (!(local->hw.flags & IEEE80211_HW_BEACON_FILTER)) + mod_timer(&ifsta->timer, jiffies + + IEEE80211_MONITORING_INTERVAL); + } +diff -Nurp kernel-2.6.28-20094102.6+0m5/net/wireless/reg.c kernel-2.6.28-20094803.3+0m5/net/wireless/reg.c +--- kernel-2.6.28-20094102.6+0m5/net/wireless/reg.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/net/wireless/reg.c 2011-09-04 11:37:54.000000000 +0200 +@@ -637,7 +637,7 @@ static void print_rd_rules(const struct + const struct ieee80211_freq_range *freq_range = NULL; + const struct ieee80211_power_rule *power_rule = NULL; -- snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_ON); -- snd_soc_dai_digital_mute(codec_dai, 0); -+ snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_ON); -+ snd_soc_dai_digital_mute(codec_dai, 0); +- printk(KERN_INFO "\t(start_freq - end_freq @ bandwidth), " ++ printk(KERN_DEBUG "\t(start_freq - end_freq @ bandwidth), " + "(max_antenna_gain, max_eirp)\n"); -- } else { -- /* codec already powered - power on widgets */ -- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -- snd_soc_dapm_stream_event(codec, -+ } else { -+ /* codec already powered - power on widgets */ -+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -+ snd_soc_dapm_stream_event(codec, - codec_dai->playback.stream_name, - SND_SOC_DAPM_STREAM_START); -- else -- snd_soc_dapm_stream_event(codec, -+ else -+ snd_soc_dapm_stream_event(codec, - codec_dai->capture.stream_name, - SND_SOC_DAPM_STREAM_START); + for (i = 0; i < rd->n_reg_rules; i++) { +@@ -648,7 +648,7 @@ static void print_rd_rules(const struct + /* There may not be documentation for max antenna gain + * in certain regions */ + if (power_rule->max_antenna_gain) +- printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " ++ printk(KERN_DEBUG "\t(%d KHz - %d KHz @ %d KHz), " + "(%d mBi, %d mBm)\n", + freq_range->start_freq_khz, + freq_range->end_freq_khz, +@@ -656,7 +656,7 @@ static void print_rd_rules(const struct + power_rule->max_antenna_gain, + power_rule->max_eirp); + else +- printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " ++ printk(KERN_DEBUG "\t(%d KHz - %d KHz @ %d KHz), " + "(N/A, %d mBm)\n", + freq_range->start_freq_khz, + freq_range->end_freq_khz, +@@ -669,15 +669,15 @@ static void print_regdomain(const struct + { -- snd_soc_dai_digital_mute(codec_dai, 0); -- } -+ snd_soc_dai_digital_mute(codec_dai, 0); + if (is_world_regdom(rd->alpha2)) +- printk(KERN_INFO "cfg80211: World regulatory " ++ printk(KERN_DEBUG "cfg80211: World regulatory " + "domain updated:\n"); + else { + if (is_unknown_alpha2(rd->alpha2)) +- printk(KERN_INFO "cfg80211: Regulatory domain " ++ printk(KERN_DEBUG "cfg80211: Regulatory domain " + "changed to driver built-in settings " + "(unknown country)\n"); + else +- printk(KERN_INFO "cfg80211: Regulatory domain " ++ printk(KERN_DEBUG "cfg80211: Regulatory domain " + "changed to country: %c%c\n", + rd->alpha2[0], rd->alpha2[1]); } +@@ -686,7 +686,7 @@ static void print_regdomain(const struct - out: + void print_regdomain_info(const struct ieee80211_regdomain *rd) + { +- printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n", ++ printk(KERN_DEBUG "cfg80211: Regulatory domain: %c%c\n", + rd->alpha2[0], rd->alpha2[1]); + print_rd_rules(rd); + } +@@ -805,7 +805,7 @@ int regulatory_init(void) + #ifdef CONFIG_WIRELESS_OLD_REGULATORY + cfg80211_regdomain = static_regdom(ieee80211_regdom); + +- printk(KERN_INFO "cfg80211: Using static regulatory domain info\n"); ++ printk(KERN_DEBUG "cfg80211: Using static regulatory domain info\n"); + print_regdomain_info(cfg80211_regdomain); + /* The old code still requests for a new regdomain and if + * you have CRDA you get it updated, otherwise you get +diff -Nurp kernel-2.6.28-20094102.6+0m5/sound/soc/codecs/tlv320aic3x.c kernel-2.6.28-20094803.3+0m5/sound/soc/codecs/tlv320aic3x.c +--- kernel-2.6.28-20094102.6+0m5/sound/soc/codecs/tlv320aic3x.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/sound/soc/codecs/tlv320aic3x.c 2011-09-04 11:37:54.000000000 +0200 +@@ -51,6 +51,9 @@ + + #define AIC3X_VERSION "0.2" + ++static int hp_dac_lim = 9; ++module_param(hp_dac_lim, int, 0); ++ + /* codec private data */ + struct aic3x_priv { + unsigned int sysclk; +@@ -294,6 +297,40 @@ static DECLARE_TLV_DB_SCALE(hpout_tlv, 0 + */ + static DECLARE_TLV_DB_SCALE(output_stage_tlv, -5900, 50, 1); + ++#define SOC_DOUBLE_R_TLV_TLV320ALC3X(xname, reg_left, reg_right, xshift, xmax,\ ++ xinvert, tlv_array) \ ++{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ ++ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ ++ SNDRV_CTL_ELEM_ACCESS_READWRITE,\ ++ .tlv.p = (tlv_array), \ ++ .info = tlv320alc3x_info_volsw, \ ++ .get = snd_soc_get_volsw_2r,\ ++ .put = snd_soc_put_volsw_2r,\ ++ .private_value = (unsigned long)&(struct soc_mixer_control) \ ++ {.reg = reg_left, .rreg = reg_right, .shift = xshift, \ ++ .max = xmax, .invert = xinvert} } ++ ++static int tlv320alc3x_info_volsw(struct snd_kcontrol *kcontrol, ++ struct snd_ctl_elem_info *uinfo) ++{ ++ struct soc_mixer_control *mc = ++ (struct soc_mixer_control *)kcontrol->private_value; ++ int max = mc->max; ++ ++ if (hp_dac_lim != max && hp_dac_lim >= 2 && hp_dac_lim <= 9) ++ max = hp_dac_lim; ++ ++ if (max == 1) ++ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; ++ else ++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; ++ ++ uinfo->count = 2; ++ uinfo->value.integer.min = 0; ++ uinfo->value.integer.max = max; ++ return 0; ++} ++ + static const struct snd_kcontrol_new aic3x_snd_controls[] = { + /* Output */ + SOC_DOUBLE_R_TLV("PCM Playback Volume", +@@ -327,8 +364,8 @@ static const struct snd_kcontrol_new aic + 0, 118, 1, output_stage_tlv), + SOC_DOUBLE_R("HP DAC Playback Switch", HPLOUT_CTRL, HPROUT_CTRL, 3, + 0x01, 0), +- SOC_DOUBLE_R_TLV("HP DAC Output Volume", HPLOUT_CTRL, HPROUT_CTRL, 4, +- 9, 0, hpout_tlv), ++ SOC_DOUBLE_R_TLV_TLV320ALC3X("HP DAC Output Volume", HPLOUT_CTRL, ++ HPROUT_CTRL, 4, 9, 0, hpout_tlv), + SOC_DOUBLE_R_TLV("HP PGA Bypass Playback Volume", + PGAL_2_HPLOUT_VOL, PGAR_2_HPROUT_VOL, + 0, 118, 1, output_stage_tlv), +@@ -341,8 +378,8 @@ static const struct snd_kcontrol_new aic + 0, 118, 1, output_stage_tlv), + SOC_DOUBLE_R("HPCOM DAC Playback Switch", HPLCOM_CTRL, HPRCOM_CTRL, 3, + 0x01, 0), +- SOC_DOUBLE_R_TLV("HPCOM DAC Output Volume", HPLCOM_CTRL, HPRCOM_CTRL, +- 4, 9, 0, hpout_tlv), ++ SOC_DOUBLE_R_TLV_TLV320ALC3X("HPCOM DAC Output Volume", HPLCOM_CTRL, ++ HPRCOM_CTRL, 4, 9, 0, hpout_tlv), + SOC_DOUBLE_R_TLV("HPCOM PGA Bypass Playback Volume", + PGAL_2_HPLCOM_VOL, PGAR_2_HPRCOM_VOL, + 0, 118, 1, output_stage_tlv), +diff -Nurp kernel-2.6.28-20094102.6+0m5/sound/soc/omap/rx51.c kernel-2.6.28-20094803.3+0m5/sound/soc/omap/rx51.c +--- kernel-2.6.28-20094102.6+0m5/sound/soc/omap/rx51.c 2011-09-04 11:36:23.000000000 +0200 ++++ kernel-2.6.28-20094803.3+0m5/sound/soc/omap/rx51.c 2011-09-04 11:37:54.000000000 +0200 +@@ -59,6 +59,9 @@ enum { + RX51_JACK_TVOUT, /* stereo output with tv-out */ + }; + ++static int hp_lim = 63; ++module_param(hp_lim, int, 0); ++ + static int rx51_new_hw_audio; + static int rx51_spk_func; + static int rx51_jack_func; +@@ -563,8 +566,16 @@ enum { + static int rx51_ext_info_volsw(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) + { ++ int ext_api = (kcontrol->private_value >> 26) & 0x0f; + int max = (kcontrol->private_value >> 16) & 0xff; + ++ if (ext_api == RX51_EXT_API_TPA6130) ++ if (hp_lim != max && hp_lim >= 2 && hp_lim <= 63) { ++ kcontrol->private_value &= ~(0xff << 16); ++ kcontrol->private_value |= (hp_lim << 16); ++ max = hp_lim; ++ } ++ + if (max == 1) + uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; + else diff --git a/kernel-bfs-2.6.28/debian/patches/power-supply-ignore-enodata.diff b/kernel-bfs-2.6.28/debian/patches/power-supply-ignore-enodata.diff new file mode 100644 index 0000000..7f3b14e --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/power-supply-ignore-enodata.diff @@ -0,0 +1,37 @@ +From f722e17fdb2c97bbec2563636dd88489cdb1428b Mon Sep 17 00:00:00 2001 +From: Lars-Peter Clausen +Date: Sat, 8 Jan 2011 19:12:26 +0100 +Subject: [PATCH] power_supply: Ignore -ENODATA errors when generating uevents + +Sometimes a driver can not report a meaningful value for a certain property +and returns -ENODATA. + +Currently when generating a uevent and a property return -ENODATA it is +treated as an error an no uevent is generated at all. This is not an +desirable behavior. + +This patch adds a special case for -ENODATA and ignores properties which +return this error code when generating the uevent. + +Signed-off-by: Lars-Peter Clausen +Tested-by: Grazvydas Ignotas +--- + drivers/power/power_supply_sysfs.c | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) + +diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c +index cd1f907..605514a 100644 +--- a/drivers/power/power_supply_sysfs.c ++++ b/drivers/power/power_supply_sysfs.c +@@ -270,7 +270,7 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) + attr = &power_supply_attrs[psy->properties[j]]; + + ret = power_supply_show_property(dev, attr, prop_buf); +- if (ret == -ENODEV) { ++ if (ret == -ENODEV || ret == -ENODATA) { + /* When a battery is absent, we expect -ENODEV. Don't abort; + send the uevent with at least the the PRESENT=0 property */ + ret = 0; +-- +1.7.6.2 + diff --git a/kernel-bfs-2.6.28/debian/patches/radio-si4713.diff b/kernel-bfs-2.6.28/debian/patches/radio-si4713.diff new file mode 100644 index 0000000..e100338 --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/radio-si4713.diff @@ -0,0 +1,195 @@ +--- kernel-power-2.6.28/drivers/media/radio/radio-si4713.c 2011-09-14 23:56:12.320096297 +0200 ++++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.c 2011-09-15 00:19:37.030080507 +0200 +@@ -55,23 +55,10 @@ + static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */ + + /* properties lock for write operations */ +-static int config_locked; ++static int config_locked = 1; + +-/* saved power levels */ +-static unsigned int max_pl; +-static unsigned int min_pl; +- +-/* structure for pid registration */ +-struct pid_list { +- pid_t pid; +- struct list_head plist; +-}; +- +-#define APP_MAX_NUM 2 +- +-static int pid_count; +-static LIST_HEAD(pid_list_head); +-static struct si4713_device *si4713_dev; ++/* module param for initial power level */ ++static int init_power_level = 120; + + /* + * Sysfs properties +@@ -193,16 +180,7 @@ static ssize_t si4713_lock_write(struct + const char *buf, + size_t count) + { +- int l; +- +- if (config_locked) +- return -EPERM; +- +- sscanf(buf, "%d", &l); +- +- if (l != 0) +- config_locked = 1; +- ++ sscanf(buf, "%d", &config_locked); + return count; + } + +@@ -219,7 +197,7 @@ static DEVICE_ATTR(lock, S_IRUGO | S_IWU + /* + * Power level property + */ +-/* power_level (rw) 88 - 115 or 0 */ ++/* power_level (rw) 88 - 120 or 0 */ + static ssize_t si4713_power_level_write(struct device *dev, + struct device_attribute *attr, + const char *buf, +@@ -420,109 +398,13 @@ static irqreturn_t si4713_handler(int ir + return IRQ_HANDLED; + } + +-static int register_pid(pid_t pid) +-{ +- struct pid_list *pitem; +- +- list_for_each_entry(pitem, &pid_list_head, plist) { +- if (pitem->pid == pid) +- return -EINVAL; +- } +- +- pitem = kmalloc(sizeof(struct pid_list), GFP_KERNEL); +- +- if (!pitem) +- return -ENOMEM; +- +- pitem->pid = pid; +- +- list_add(&(pitem->plist), &pid_list_head); +- pid_count++; +- +- return 0; +-} +- +-static int unregister_pid(pid_t pid) +-{ +- struct pid_list *pitem, *n; +- +- list_for_each_entry_safe(pitem, n, &pid_list_head, plist) { +- if (pitem->pid == pid) { +- list_del(&(pitem->plist)); +- pid_count--; +- +- kfree(pitem); +- +- return 0; +- } +- } +- return -EINVAL; +-} +- + static int si4713_priv_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) + { +- unsigned int pow; +- int pl, rval; +- + if (cmd != LOCK_LOW_POWER && cmd != RELEASE_LOW_POWER) + return video_ioctl2(inode, file, cmd, arg); +- +- pl = si4713_get_power_level(si4713_dev); +- +- if (pl < 0) { +- rval = pl; +- goto exit; +- } +- +- if (copy_from_user(&pow, (void __user *)arg, sizeof(pow))) { +- rval = -EFAULT; +- goto exit; +- } +- +- if (cmd == LOCK_LOW_POWER) { +- +- if (pid_count == APP_MAX_NUM) { +- rval = -EPERM; +- goto exit; +- } +- +- if (pid_count == 0) { +- if (pow > pl) { +- rval = -EINVAL; +- goto exit; +- } else { +- /* Set max possible power level */ +- max_pl = pl; +- min_pl = pow; +- } +- } +- +- rval = register_pid(current->pid); +- +- if (rval) +- goto exit; +- +- /* Lower min power level if asked */ +- if (pow < min_pl) +- min_pl = pow; +- else +- pow = min_pl; +- +- } else { /* RELEASE_LOW_POWER */ +- rval = unregister_pid(current->pid); +- +- if (rval) +- goto exit; +- +- if (pid_count == 0) { +- if (pow > max_pl) +- pow = max_pl; +- } +- } +- rval = si4713_set_power_level(si4713_dev, pow); +-exit: +- return rval; ++ else ++ return 0; + } + + /* +@@ -906,8 +788,11 @@ static int si4713_i2c_driver_probe(struc + goto free_sysfs; + } + +- /* save to global pointer for it to be accesible from ioctl() call */ +- si4713_dev = sdev; ++ rval = si4713_set_power_level(sdev, init_power_level); ++ if (rval < 0) { ++ dev_dbg(&client->dev, "Failed to set initial power level.\n"); ++ goto free_sysfs; ++ } + + return 0; + +@@ -1013,6 +898,12 @@ module_param(radio_nr, int, 0); + MODULE_PARM_DESC(radio_nr, + "Minor number for radio device (-1 ==> auto assign)"); + ++module_param(init_power_level, int, 120); ++MODULE_PARM_DESC(init_power_level, "Initial value of power level (default 120)"); ++ ++module_param(config_locked, int, 1); ++MODULE_PARM_DESC(config_locked, "Lock power level configuration on init (default 1 - locked)"); ++ + MODULE_LICENSE("GPL"); + MODULE_AUTHOR(DRIVER_AUTHOR); + MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/kernel-bfs-2.6.28/debian/patches/series b/kernel-bfs-2.6.28/debian/patches/series index 7fc5e7f..7106bd0 100644 --- a/kernel-bfs-2.6.28/debian/patches/series +++ b/kernel-bfs-2.6.28/debian/patches/series @@ -1,6 +1,9 @@ ################################# # Patches as in kernel-power 2.6.28-omap1.diff +nokia-20093908+0m5.diff +nokia-20094102.3+0m5.diff +nokia-20094102.6+0m5.diff nokia-20094803.3+0m5.diff nokia-20100903+0m5.diff nokia-20101501+0m5.diff @@ -31,13 +34,16 @@ overclock.diff #bq24150-sniff.diff armthumb.diff wl12xx_rohar.diff -fmtx.unlock.diff +#fmtx.unlock.diff radio-bcm2048.diff #i2c-battery.diff usbhostmode.diff bt-mice.diff -board-rx51-peripherals.diff -bq27x00_battery.diff +power-supply-ignore-enodata.diff +bq27x00-rx51-board.diff +bq27x00-upstream.diff +bq27x00-maemo.diff +bq27x00-reg.diff l2cap_parent.diff wl12xx-rx-fix.diff anti-io-stalling.diff @@ -47,11 +53,11 @@ phys_to_page.diff ext4-data-corruption.diff patch_swap_notify_core_support_2.6.28.diff class10sd_dto14_fix.diff -patch_swap_notify_core_support_2_2.6.28.diff -update_mmu_cache_arm.diff adding-ramzswap-driver.diff -fmtx_lock_power.diff dspbridge_ioctl_buffer_overrun.diff +patch_swap_notify_core_support_2_2.6.28.diff +update_mmu_cache_arm.diff +radio-si4713.diff ################################# # BFS Patches diff --git a/kernel-bfs-2.6.28/debian/rules b/kernel-bfs-2.6.28/debian/rules index 01fb110..ef0203a 100644 --- a/kernel-bfs-2.6.28/debian/rules +++ b/kernel-bfs-2.6.28/debian/rules @@ -83,8 +83,6 @@ install-kernel: mkdir -p $(CURDIR)/debian/$(KERNEL_PACKAGE)/boot fiasco-gen -o $(CURDIR)/debian/$(KERNEL_PACKAGE)/boot/zImage-$(RELEASE).fiasco -g -k $(KSRC)/arch/arm/boot/zImage -v $(RELEASE) chmod 644 $(CURDIR)/debian/$(KERNEL_PACKAGE)/boot/zImage-$(RELEASE).fiasco - mkdir -p $(CURDIR)/debian/$(KERNEL_PACKAGE)/etc/event.d - install -m644 $(CURDIR)/debian/fmtxpower-bfs $(CURDIR)/debian/$(KERNEL_PACKAGE)/etc/event.d # we can't use the same file as kernel-power install-bootimg: dh_testdir -- 1.7.9.5